DrishtiSharma's picture
Update app.py
3326022
raw
history blame
823 Bytes
import gradio as gr
from transformers import pipeline
pipe = pipeline(model="hackathon-pln-es/wav2vec2-base-finetuned-sentiment-mesd")
def classify_sentiment(audio):
sentiment_classifier = pipe(audio)
return sentiment_classifier
input_audio = [gr.inputs.Audio(source="microphone", type="filepath", label="Record/ Drop audio"), gr.inputs.Dropdown(["DrishtiSharma/wav2vec2-base-finetuned-sentiment-mesd-v11", "hackathon-pln-es/wav2vec2-base-finetuned-sentiment-mesd"], label="Model Name")]
label = gr.outputs.Label(num_top_classes=5)
gr.Interface(
fn = classify_sentiment,
inputs = input_audio,
outputs = label,
examples=[["test1.wav", "DrishtiSharma/wav2vec2-base-finetuned-sentiment-mesd-v11"], ["test2.wav", "hackathon-pln-es/wav2vec2-base-finetuned-sentiment-mesd"]],
theme="grass").launch()