DrishtiSharma commited on
Commit
52deb65
1 Parent(s): 2e740d1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -2
app.py CHANGED
@@ -3,14 +3,23 @@ from transformers import pipeline
3
 
4
 
5
 
 
 
 
 
 
6
  def classify_sentiment(audio, model):
7
  pipe = pipeline("audio-classification", model=model)
8
  sentiment_classifier = pipe(audio)
9
- return sentiment_classifier
 
 
 
10
 
 
11
 
12
  input_audio = [gr.inputs.Audio(source="microphone", type="filepath", label="Record/ Drop audio"), gr.inputs.Dropdown(["DrishtiSharma/wav2vec2-base-finetuned-sentiment-mesd-v11", "hackathon-pln-es/wav2vec2-base-finetuned-sentiment-mesd"], label="Model Name")]
13
- label = gr.outputs(num_top_classes=5)
14
 
15
  gr.Interface(
16
  fn = classify_sentiment,
 
3
 
4
 
5
 
6
+ #def classify_sentiment(audio, model):
7
+ #pipe = pipeline("audio-classification", model=model)
8
+ #sentiment_classifier = pipe(audio)
9
+ #return sentiment_classifier
10
+
11
  def classify_sentiment(audio, model):
12
  pipe = pipeline("audio-classification", model=model)
13
  sentiment_classifier = pipe(audio)
14
+
15
+ preds_dict={}
16
+ for sentiment_classifier in preds[0]:
17
+ preds_dict[pred['label']] = pred['score']
18
 
19
+ return preds_dict
20
 
21
  input_audio = [gr.inputs.Audio(source="microphone", type="filepath", label="Record/ Drop audio"), gr.inputs.Dropdown(["DrishtiSharma/wav2vec2-base-finetuned-sentiment-mesd-v11", "hackathon-pln-es/wav2vec2-base-finetuned-sentiment-mesd"], label="Model Name")]
22
+ label = gr.outputs.Label(num_top_classes=5)
23
 
24
  gr.Interface(
25
  fn = classify_sentiment,