|
from reader import get_article |
|
import gradio as gr |
|
from transformers import pipeline |
|
|
|
info = get_article() |
|
|
|
|
|
|
|
|
|
|
|
|
|
def classify_sentiment(audio): |
|
pipe = pipeline("audio-classification", model="hackathon-pln-es/wav2vec2-base-finetuned-sentiment-classification-MESD") |
|
pred = pipe(audio) |
|
return {dic["label"]: dic["score"] for dic in pred} |
|
|
|
input_audio = [gr.inputs.Audio(source="microphone", type="filepath", label="Record/ Drop audio")] |
|
label = gr.outputs.Label(num_top_classes=5) |
|
|
|
|
|
|
|
|
|
|
|
description = """ |
|
<p> |
|
<center> |
|
This application classifies the sentiment of the audio input provided by the user. |
|
#</center> |
|
#</p> |
|
#<center> |
|
#<img src="https://huggingface.co./spaces/hackathon-pln-es/Audio-Sentiment-Classifier/tree/main/sentiment.jpg" alt="logo" width="750"/> |
|
#<img src="https://huggingface.co./spaces/hackathon-pln-es/Audio-Sentiment-Classifier/tree/main/sentiment.jpg" style="max-width: 100%; max-height: 10%; height: 250px; object-fit: fill"> |
|
</center> |
|
""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
interface = gr.Interface(fn=classify_sentiment, inputs=input_audio, outputs=label, examples=[["basta_neutral.wav"], ["detras_disgust.wav"], ["mortal_sadness.wav"], ["respiracion_happiness.wav"], ["robo_fear.wav"]], article=info['article'], css=info['css'], theme='huggingface', title=info['title'], allow_flagging='never', description=info['description']) |
|
interface.launch() |