Adding summarization
Browse files
app.py
CHANGED
@@ -2,7 +2,7 @@ from transformers import pipeline
|
|
2 |
import gradio as gr
|
3 |
|
4 |
asr = pipeline("automatic-speech-recognition", "facebook/wav2vec2-base-960h")
|
5 |
-
|
6 |
classifier = pipeline("text-classification", model="bhadresh-savani/distilbert-base-uncased-emotion")
|
7 |
|
8 |
def transcribe(audio):
|
@@ -13,6 +13,10 @@ def speech_to_text(speech):
|
|
13 |
text = asr(speech)["text"]
|
14 |
return text
|
15 |
|
|
|
|
|
|
|
|
|
16 |
def text_to_sentiment(text):
|
17 |
sentiment = classifier(text)[0]["label"]
|
18 |
return sentiment
|
@@ -26,8 +30,12 @@ with demo:
|
|
26 |
text = gr.Textbox()
|
27 |
b1.click(speech_to_text, inputs=audio_file, outputs=text)
|
28 |
|
29 |
-
|
|
|
|
|
|
|
|
|
30 |
label = gr.Label()
|
31 |
-
|
32 |
|
33 |
demo.launch(share=True)
|
|
|
2 |
import gradio as gr
|
3 |
|
4 |
asr = pipeline("automatic-speech-recognition", "facebook/wav2vec2-base-960h")
|
5 |
+
summarizer = pipeline("summarization", model="knkarthick/MEETING_SUMMARY")
|
6 |
classifier = pipeline("text-classification", model="bhadresh-savani/distilbert-base-uncased-emotion")
|
7 |
|
8 |
def transcribe(audio):
|
|
|
13 |
text = asr(speech)["text"]
|
14 |
return text
|
15 |
|
16 |
+
def summarize_text(text):
|
17 |
+
stext = summarizer(text)
|
18 |
+
return stext
|
19 |
+
|
20 |
def text_to_sentiment(text):
|
21 |
sentiment = classifier(text)[0]["label"]
|
22 |
return sentiment
|
|
|
30 |
text = gr.Textbox()
|
31 |
b1.click(speech_to_text, inputs=audio_file, outputs=text)
|
32 |
|
33 |
+
stext = gr.Textbox()
|
34 |
+
b2 = gr.Button("Summarize Text")
|
35 |
+
b3.click(summarize_text, inputs=text, outputs=stext)
|
36 |
+
|
37 |
+
b3 = gr.Button("Classify Sentiment")
|
38 |
label = gr.Label()
|
39 |
+
b3.click(text_to_sentiment, inputs=stext, outputs=label)
|
40 |
|
41 |
demo.launch(share=True)
|