|
|
|
import gradio as gr |
|
from transformers import pipeline |
|
|
|
|
|
def classify_sentiment(audio, model): |
|
pipe = pipeline("audio-classification", model=model) |
|
pred = pipe(audio) |
|
return {dic["label"]: dic["score"] for dic in pred} |
|
|
|
input_audio = [gr.inputs.Audio(source="microphone", type="filepath", label="Record/ Drop audio"), gr.inputs.Dropdown(["DrishtiSharma/wav2vec2-base-finetuned-sentiment-mesd-v11", "hackathon-pln-es/wav2vec2-base-finetuned-sentiment-mesd"], label="Model Name")] |
|
label = gr.outputs.Label(num_top_classes=5) |
|
|
|
gr.Interface( |
|
fn = classify_sentiment, |
|
inputs = input_audio, |
|
outputs = label, |
|
|
|
theme="grass").launch() |
|
|
|
|