ananyachavan commited on
Commit
3ec3c4a
·
verified ·
1 Parent(s): 5741a4f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -12
app.py CHANGED
@@ -1,23 +1,53 @@
1
  import torch
2
  from transformers import pipeline
3
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
4
 
5
- # Load the sentiment analysis pipeline with a specified model
6
- sentiment_analysis = pipeline("sentiment-analysis", model="distilbert-base-uncased-finetuned-sst-2-english")
 
 
7
 
8
- # Define a simple Gradio interface function
9
- def simple_sentiment_analysis(text):
10
- sentiment = sentiment_analysis(text)
11
- return sentiment[0]['label']
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
  # Set up Gradio Interface
14
  iface = gr.Interface(
15
- fn=simple_sentiment_analysis,
16
- inputs=gr.Textbox(lines=2, placeholder="Enter text here..."),
17
- outputs=gr.Label(label="Sentiment"),
18
- title="Sentiment Analysis",
19
- description="Enter text to analyze its sentiment."
 
 
 
20
  )
21
 
22
- # Launch the interface with share=True for public access
23
  iface.launch(debug=True)
 
 
1
  import torch
2
  from transformers import pipeline
3
  import gradio as gr
4
+ # Import the necessary libraries
5
+ import torch
6
+ from transformers import pipeline
7
+ import gradio as gr
8
+ import whisper
9
+
10
+ # Load the Whisper model for transcription
11
+ whisper_model = whisper.load_model("base")
12
+
13
+ # Load the emotion recognition pipeline
14
+ emotion_recognition = pipeline("text-classification", model="j-hartmann/emotion-english-distilroberta-base", return_all_scores=True)
15
 
16
+ # Function to transcribe audio
17
+ def transcribe_audio(audio_file):
18
+ result = whisper_model.transcribe(audio_file)
19
+ return result["text"]
20
 
21
+ # Function to transcribe audio and recognize emotions
22
+ def transcribe_and_recognize_emotions(audio_file):
23
+ # Transcribe audio
24
+ transcription = transcribe_audio(audio_file)
25
+
26
+ # Recognize emotions of the transcribed text
27
+ emotions = emotion_recognition(transcription)
28
+
29
+ # Extract the emotion with the highest score
30
+ dominant_emotion = max(emotions[0], key=lambda x: x['score'])['label']
31
+
32
+ return transcription, dominant_emotion
33
+
34
+ # Define the Gradio interface function
35
+ def gradio_transcription_emotion_interface(audio):
36
+ transcription, emotion = transcribe_and_recognize_emotions(audio)
37
+ return transcription, emotion
38
 
39
  # Set up Gradio Interface
40
  iface = gr.Interface(
41
+ fn=gradio_transcription_emotion_interface,
42
+ inputs=gr.Audio(type="filepath"),
43
+ outputs=[
44
+ gr.Textbox(label="Transcription"),
45
+ gr.Label(label="Dominant Emotion")
46
+ ],
47
+ title="Audio Transcription and Emotion Recognition",
48
+ description="Upload or record an audio file to get the transcription and recognize its dominant emotion."
49
  )
50
 
51
+ # Deploy the interface
52
  iface.launch(debug=True)
53
+