import gradio as gr from transformers import pipeline pipe = pipeline(model="hackathon-pln-es/wav2vec2-base-finetuned-sentiment-mesd") def classify_sentiment(audio): sentiment_classifier = pipe(audio) return sentiment_classifier input_audio = gr.inputs.Audio(source="microphone", type="filepath", optional=True, label="Please record your voice") label = gr.outputs.Label(num_top_classes=5) gr.Interface( fn = classify_sentiment, inputs = input_audio, outputs = label, examples=[["images/cheetah1.jpg"], ["images/lion.jpg"]], theme="grass").launch()