DrishtiSharma commited on
Commit
23ac7a2
1 Parent(s): 08891ee

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -0
app.py CHANGED
@@ -11,6 +11,41 @@ def classify_sentiment(audio, model):
11
  input_audio = [gr.inputs.Audio(source="microphone", type="filepath", label="Record/ Drop audio"), gr.inputs.Dropdown(["DrishtiSharma/wav2vec2-base-finetuned-sentiment-mesd-v11", "hackathon-pln-es/wav2vec2-base-finetuned-sentiment-mesd"], label="Model Name")]
12
  label = gr.outputs.Label(num_top_classes=5)
13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  gr.Interface(
15
  fn = classify_sentiment,
16
  inputs = input_audio,
 
11
  input_audio = [gr.inputs.Audio(source="microphone", type="filepath", label="Record/ Drop audio"), gr.inputs.Dropdown(["DrishtiSharma/wav2vec2-base-finetuned-sentiment-mesd-v11", "hackathon-pln-es/wav2vec2-base-finetuned-sentiment-mesd"], label="Model Name")]
12
  label = gr.outputs.Label(num_top_classes=5)
13
 
14
+ ################### Gradio Web APP ################################
15
+
16
+ title = "Audio Sentiment Classifier"
17
+
18
+ description = """
19
+ <p>
20
+ <center>
21
+ This application classifies the sentiment of the audio input provided by the user.
22
+ </center>
23
+ </p>
24
+ <center>
25
+ <img src="https://huggingface.co/spaces/hackathon-pln-es/Audio-Sentiment-Classifier/tree/main/sentiment.PNG" alt="logo" width="550"/>
26
+ </center>
27
+ """
28
+
29
+
30
+
31
+ gr.Interface(
32
+ fn=transcribe,
33
+ inputs=[
34
+ gr.inputs.Audio(source="microphone", type="filepath"),
35
+ "state"
36
+ ],
37
+ outputs=[
38
+ "textbox",
39
+ "state"
40
+ ],
41
+ title=title,
42
+ description=description,
43
+ article=article,
44
+ live=True).launch()
45
+
46
+
47
+
48
+
49
  gr.Interface(
50
  fn = classify_sentiment,
51
  inputs = input_audio,