File size: 1,496 Bytes
3e1fc67
713e80d
0e5871d
713e80d
 
347a04f
 
3e1fc67
 
0e5871d
3326022
52deb65
0e5871d
23ac7a2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0e5871d
 
 
 
65d8241
0e5871d
3e1fc67
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56

import gradio as gr
from transformers import pipeline


def classify_sentiment(audio, model):
  pipe = pipeline("audio-classification", model=model)
  pred = pipe(audio)
  return {dic["label"]: dic["score"] for dic in pred}

input_audio = [gr.inputs.Audio(source="microphone", type="filepath", label="Record/ Drop audio"), gr.inputs.Dropdown(["DrishtiSharma/wav2vec2-base-finetuned-sentiment-mesd-v11", "hackathon-pln-es/wav2vec2-base-finetuned-sentiment-mesd"], label="Model Name")]
label = gr.outputs.Label(num_top_classes=5)

################### Gradio Web APP ################################

title = "Audio Sentiment Classifier"

description = """
<p>
<center>
This application classifies the sentiment of the audio input provided by the user. 
</center>
</p>
<center>
<img src="https://huggingface.co./spaces/hackathon-pln-es/Audio-Sentiment-Classifier/tree/main/sentiment.PNG" alt="logo" width="550"/>
</center>
"""



gr.Interface(
    fn=transcribe, 
    inputs=[
        gr.inputs.Audio(source="microphone", type="filepath"), 
        "state"
    ],
    outputs=[
        "textbox",
        "state"
    ],
    title=title,
    description=description,
    article=article,
    live=True).launch()




gr.Interface(
    fn = classify_sentiment,
    inputs = input_audio,
    outputs = label,
    #examples=[["test1.wav", "DrishtiSharma/wav2vec2-base-finetuned-sentiment-mesd-v11"], ["test2.wav", "hackathon-pln-es/wav2vec2-base-finetuned-sentiment-mesd"]],
    theme="grass").launch()