Liusuthu commited on
Commit
ad7940b
·
verified ·
1 Parent(s): c14f75f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -3
app.py CHANGED
@@ -6,7 +6,7 @@ import soundfile as sf
6
  import torchaudio
7
  from speechbrain.pretrained.interfaces import foreign_class
8
 
9
- from app_utils import preprocess_video_and_rank
10
  from authors import AUTHORS
11
 
12
  # Importing necessary components for the Gradio app
@@ -19,6 +19,8 @@ from gradio_client import Client
19
  client = Client("Liusuthu/TextDepression")
20
 
21
  os.environ["no_proxy"] = "localhost,127.0.0.1,::1"
 
 
22
  ###########################语音部分######################################
23
  classifier = foreign_class(
24
  source="pretrained_models/local-speechbrain/emotion-recognition-wav2vec2-IEMOCAP", # ".\\emotion-recognition-wav2vec2-IEMOCAP"
@@ -79,9 +81,15 @@ def clear_dynamic_info():
79
  gr.Textbox(""),
80
  )
81
 
82
-
 
 
 
 
 
 
83
  ##################################设置各自的app类####################
84
- with gr.Blocks(css="app.css") as video:
85
  with gr.Tab("Dynamic App"):
86
  gr.Markdown(value=DESCRIPTION_DYNAMIC)
87
  with gr.Row():
@@ -154,6 +162,8 @@ with gr.Blocks(css="app.css") as video:
154
  inputs=out1,
155
  outputs=text_result,
156
  )
 
 
157
  ####################################
158
  speech = gr.Interface(
159
  classify_continuous,
@@ -164,11 +174,47 @@ speech = gr.Interface(
164
  gr.Text(label="音频情感识别2"),
165
  ],
166
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167
 
168
  with gr.Blocks() as app:
169
  with gr.Tab("语音"):
170
  speech.render()
171
  with gr.Tab("视频"):
172
  video.render()
 
 
 
173
 
 
174
  app.launch()
 
6
  import torchaudio
7
  from speechbrain.pretrained.interfaces import foreign_class
8
 
9
+ from app_utils import preprocess_video_and_rank,video_score
10
  from authors import AUTHORS
11
 
12
  # Importing necessary components for the Gradio app
 
19
  client = Client("Liusuthu/TextDepression")
20
 
21
  os.environ["no_proxy"] = "localhost,127.0.0.1,::1"
22
+
23
+
24
  ###########################语音部分######################################
25
  classifier = foreign_class(
26
  source="pretrained_models/local-speechbrain/emotion-recognition-wav2vec2-IEMOCAP", # ".\\emotion-recognition-wav2vec2-IEMOCAP"
 
81
  gr.Textbox(""),
82
  )
83
 
84
+ def clear_video():
85
+ return (
86
+ gr.Video(value=None),
87
+ gr.Number(value=None),
88
+ gr.Number(value=None),
89
+ gr.Textbox("")
90
+ )
91
  ##################################设置各自的app类####################
92
+ with gr.Blocks(css="app.css") as video:
93
  with gr.Tab("Dynamic App"):
94
  gr.Markdown(value=DESCRIPTION_DYNAMIC)
95
  with gr.Row():
 
162
  inputs=out1,
163
  outputs=text_result,
164
  )
165
+
166
+ clear1=
167
  ####################################
168
  speech = gr.Interface(
169
  classify_continuous,
 
174
  gr.Text(label="音频情感识别2"),
175
  ],
176
  )
177
+ ############################################################
178
+ with gr.Blocks() as video_score
179
+ with gr.Row():
180
+ with gr.Column(scale=2):
181
+ input_video = gr.Video(
182
+ sources=["upload"], elem_classes="video1", format='mp4'
183
+ )
184
+ with gr.Row():
185
+ clear_1 = gr.Button(
186
+ value="Clear", interactive=True, scale=1
187
+ )
188
+ submit_1 = gr.Button(
189
+ value="Score", interactive=True, scale=1, elem_classes="submit"
190
+ )
191
+ with gr.Column(scale=2):
192
+ with gr.Row():
193
+ score1=gr.Number(interactive=False,label="score1")
194
+ score2=gr.Number(interactive=False,label="score2")
195
+ result3=gr.Textbox(interactive=False)
196
+
197
+ clear_1.click(
198
+ fn=clear_video,
199
+ inputs=[],
200
+ outputs=[input_video,socre1,score2,result3]
201
+ )
202
+ submit_1.click(
203
+ fn=video_score,
204
+ inputs=[input_video],
205
+ outputs=[socre1,score2,result3]
206
+ )
207
+
208
+
209
 
210
  with gr.Blocks() as app:
211
  with gr.Tab("语音"):
212
  speech.render()
213
  with gr.Tab("视频"):
214
  video.render()
215
+ with gr.Tab("视频集成打分"):
216
+ video_score.render()
217
+
218
 
219
+
220
  app.launch()