dongyh20 commited on
Commit
746c925
·
1 Parent(s): 89950c8

update space

Browse files
Files changed (1) hide show
  1. app.py +4 -2
app.py CHANGED
@@ -10,7 +10,8 @@ os.environ['REGIONAL_POOL'] = '2x'
10
  os.environ['FORCE_NO_DOWNSAMPLE'] = '1'
11
  os.environ['LOAD_VISION_EARLY'] = '1'
12
  os.environ['SKIP_LOAD_VIT'] = '1'
13
-
 
14
 
15
  import gradio as gr
16
  import torch
@@ -146,6 +147,7 @@ def extract_audio(videos_file_path):
146
  my_clip = mp.VideoFileClip(videos_file_path)
147
  return my_clip.audio
148
 
 
149
  def ola_inference(multimodal, audio_path):
150
  visual, text = multimodal["files"][0], multimodal["text"]
151
  if visual.endswith("image2.png"):
@@ -353,4 +355,4 @@ demo = gr.Interface(
353
  article=bibtext,
354
  )
355
  # Launch the Gradio app
356
- demo.launch(server_name="0.0.0.0",server_port=80)
 
10
  os.environ['FORCE_NO_DOWNSAMPLE'] = '1'
11
  os.environ['LOAD_VISION_EARLY'] = '1'
12
  os.environ['SKIP_LOAD_VIT'] = '1'
13
+
14
+ import spaces
15
 
16
  import gradio as gr
17
  import torch
 
147
  my_clip = mp.VideoFileClip(videos_file_path)
148
  return my_clip.audio
149
 
150
+ @spaces.GPU(duration=120)
151
  def ola_inference(multimodal, audio_path):
152
  visual, text = multimodal["files"][0], multimodal["text"]
153
  if visual.endswith("image2.png"):
 
355
  article=bibtext,
356
  )
357
  # Launch the Gradio app
358
+ demo.launch()