yerang commited on
Commit
bfe9005
·
verified ·
1 Parent(s): 8c2358b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -16
app.py CHANGED
@@ -185,12 +185,12 @@ elevenlabs_pipeline = ElevenLabsPipeline()
185
  stf_pipeline = STFPipeline()
186
  driving_video_path=gr.Video()
187
 
188
- @spaces.GPU(duration=120)
189
  def gpu_wrapped_stf_pipeline_execute(audio_path):
190
  return stf_pipeline.execute(audio_path)
191
 
192
 
193
- @spaces.GPU(duration=200)
194
  def gpu_wrapped_elevenlabs_pipeline_generate_voice(text, voice):
195
  return elevenlabs_pipeline.generate_voice(text, voice)
196
 
@@ -224,6 +224,7 @@ def txt_to_driving_video(text):
224
  # assets
225
  title_md = "assets/gradio_title.md"
226
  example_portrait_dir = "assets/examples/source"
 
227
  example_video_dir = "assets/examples/driving"
228
  data_examples = [
229
  [osp.join(example_portrait_dir, "s9.jpg"), osp.join(example_video_dir, "d0.mp4"), True, True, True, True],
@@ -256,13 +257,17 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
256
  with gr.Row():
257
  with gr.Column():
258
  script_txt = gr.Text()
 
 
 
259
  with gr.Column():
260
- txt2video_gen_button = gr.Button("txt2video generation", variant="primary")
 
 
 
 
261
 
262
- # with gr.Column():
263
- # audio_gen_button = gr.Button("Audio generation", variant="primary")
264
- # with gr.Row():
265
- # video_input = gr.Audio(label="Generated video", type="filepath")
266
 
267
  gr.Markdown(load_description("assets/gradio_description_upload.md"))
268
  with gr.Row():
@@ -270,13 +275,14 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
270
  image_input = gr.Image(type="filepath")
271
  gr.Examples(
272
  examples=[
273
- [osp.join(example_portrait_dir, "s9.jpg")],
274
- [osp.join(example_portrait_dir, "s6.jpg")],
275
- [osp.join(example_portrait_dir, "s10.jpg")],
276
- [osp.join(example_portrait_dir, "s5.jpg")],
277
- [osp.join(example_portrait_dir, "s7.jpg")],
278
- [osp.join(example_portrait_dir, "s12.jpg")],
279
- [osp.join(example_portrait_dir, "s22.jpg")],
 
280
  ],
281
  inputs=[image_input],
282
  cache_examples=False,
@@ -345,11 +351,28 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
345
  outputs=[output_video, output_video_concat],
346
  show_progress=True
347
  )
348
- txt2video_gen_button.click(
349
- fn=txt_to_driving_video,
 
 
 
 
 
 
 
 
350
  inputs=[
351
  script_txt
352
  ],
 
 
 
 
 
 
 
 
 
353
  outputs=[video_input],
354
  show_progress=True
355
  )
 
185
  stf_pipeline = STFPipeline()
186
  driving_video_path=gr.Video()
187
 
188
+ @spaces.GPU(duration=240)
189
  def gpu_wrapped_stf_pipeline_execute(audio_path):
190
  return stf_pipeline.execute(audio_path)
191
 
192
 
193
+ @spaces.GPU(duration=240)
194
  def gpu_wrapped_elevenlabs_pipeline_generate_voice(text, voice):
195
  return elevenlabs_pipeline.generate_voice(text, voice)
196
 
 
224
  # assets
225
  title_md = "assets/gradio_title.md"
226
  example_portrait_dir = "assets/examples/source"
227
+ example_portrait_dir_custom = "assets/examples/source"
228
  example_video_dir = "assets/examples/driving"
229
  data_examples = [
230
  [osp.join(example_portrait_dir, "s9.jpg"), osp.join(example_video_dir, "d0.mp4"), True, True, True, True],
 
257
  with gr.Row():
258
  with gr.Column():
259
  script_txt = gr.Text()
260
+ # with gr.Column():
261
+ # txt2video_gen_button = gr.Button("txt2video generation", variant="primary")
262
+
263
  with gr.Column():
264
+ audio_gen_button = gr.Button("Audio generation", variant="primary")
265
+ with gr.Row():
266
+ output_audio = gr.Audio(label="Generated audio", type="filepath")
267
+ with gr.Row():
268
+ video_gen_button = gr.Button("Audio to Video generation", variant="primary")
269
 
270
+
 
 
 
271
 
272
  gr.Markdown(load_description("assets/gradio_description_upload.md"))
273
  with gr.Row():
 
275
  image_input = gr.Image(type="filepath")
276
  gr.Examples(
277
  examples=[
278
+ [osp.join(example_portrait_dir, "01.webp")],
279
+ [osp.join(example_portrait_dir, "02.webp")],
280
+ [osp.join(example_portrait_dir, "03.jpg")],
281
+ [osp.join(example_portrait_dir, "04.jpg")],
282
+ [osp.join(example_portrait_dir, "05.jpg")],
283
+ [osp.join(example_portrait_dir, "06.jpg")],
284
+ [osp.join(example_portrait_dir, "07.jpg")],
285
+ [osp.join(example_portrait_dir, "08.jpg")],
286
  ],
287
  inputs=[image_input],
288
  cache_examples=False,
 
351
  outputs=[output_video, output_video_concat],
352
  show_progress=True
353
  )
354
+ # txt2video_gen_button.click(
355
+ # fn=txt_to_driving_video,
356
+ # inputs=[
357
+ # script_txt
358
+ # ],
359
+ # outputs=[video_input],
360
+ # show_progress=True
361
+ # )
362
+ audio_gen_button.click(
363
+ fn=gpu_wrapped_elevenlabs_pipeline_generate_voice,
364
  inputs=[
365
  script_txt
366
  ],
367
+ outputs=[output_audio],
368
+ show_progress=True
369
+ )
370
+
371
+ video_gen_button.click(
372
+ fn=gpu_wrapped_stf_pipeline_execute,
373
+ inputs=[
374
+ output_audio
375
+ ],
376
  outputs=[video_input],
377
  show_progress=True
378
  )