lanbogao commited on
Commit
3424983
1 Parent(s): 60c9382

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -31
app.py CHANGED
@@ -1,23 +1,27 @@
1
  import gradio as gr
2
  import whisper
3
- import subprocess
4
- import os
5
  from pytube import YouTube
6
  from fastapi import FastAPI, Response, Request
7
  import yt_dlp
 
 
 
 
 
8
 
9
  langs = ["None"] + sorted(list(whisper.tokenizer.LANGUAGES.values()))
10
  model_size = list(whisper._MODELS.keys())
11
 
12
 
13
- @app.get("/subtitle")
14
- async def get_subtitle(url: str):
15
  # Download the subtitle with download_subtitle()
16
- subtitle_url = download_subtitle(url)
17
  # Stream the subtitle as a response
18
- return StreamingResponse(requests.get(subtitle_url, stream=True).iter_content(chunk_size=1024))
19
 
20
- def get_subtitle(url, lang='en'):
 
21
  # Download subtitles if available
22
  ydl_opts = {
23
  'writesubtitles': True,
@@ -98,29 +102,33 @@ def format_timestamp(t):
98
  mi = (t - int(t))*1000
99
  return f"{int(hh):02d}:{int(mm):02d}:{int(ss):02d},{int(mi):03d}"
100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
 
102
- with gr.Blocks() as demo:
103
-
104
- with gr.Row():
105
-
106
- with gr.Column():
107
-
108
- with gr.Row():
109
- url = gr.Textbox(placeholder='Youtube video URL', label='URL')
110
-
111
- with gr.Row():
112
-
113
- model_size = gr.Dropdown(choices=model_size, value='tiny', label="Model")
114
- lang = gr.Dropdown(choices=langs, value="None", label="Language (Optional)")
115
- format = gr.Dropdown(choices=["None", ".srt"], value="None", label="Timestamps? (Optional)")
116
-
117
- with gr.Row():
118
- gr.Markdown("Larger models are more accurate, but slower. For 1min video, it'll take ~30s (tiny), ~1min (base), ~3min (small), ~5min (medium), etc.")
119
- transcribe_btn = gr.Button('Transcribe')
120
-
121
- with gr.Column():
122
- outputs = gr.Textbox(placeholder='Transcription of the video', label='Transcription')
123
-
124
- transcribe_btn.click(get_transcript, inputs=[url, model_size, lang, format], outputs=outputs)
125
 
126
- demo.launch(debug=True)
 
 
1
  import gradio as gr
2
  import whisper
 
 
3
  from pytube import YouTube
4
  from fastapi import FastAPI, Response, Request
5
  import yt_dlp
6
+ import uvicorn
7
+
8
+ CUSTOM_PATH = "/"
9
+
10
+ app = FastAPI()
11
 
12
  langs = ["None"] + sorted(list(whisper.tokenizer.LANGUAGES.values()))
13
  model_size = list(whisper._MODELS.keys())
14
 
15
 
16
+
17
+ #async def get_subtitle(url: str):
18
  # Download the subtitle with download_subtitle()
19
+ #subtitle_url = download_subtitle(url)
20
  # Stream the subtitle as a response
21
+ #return StreamingResponse(requests.get(subtitle_url, stream=True).iter_content(chunk_size=1024))
22
 
23
+ @app.get("/subtitle")
24
+ async def get_subtitle(url, lang='en'):
25
  # Download subtitles if available
26
  ydl_opts = {
27
  'writesubtitles': True,
 
102
  mi = (t - int(t))*1000
103
  return f"{int(hh):02d}:{int(mm):02d}:{int(ss):02d},{int(mi):03d}"
104
 
105
+ def gradio_interface():
106
+ with gr.Blocks() as demo:
107
+
108
+ with gr.Row():
109
+
110
+ with gr.Column():
111
+
112
+ with gr.Row():
113
+ url = gr.Textbox(placeholder='Youtube video URL', label='URL')
114
+
115
+ with gr.Row():
116
+
117
+ model_size = gr.Dropdown(choices=model_size, value='tiny', label="Model")
118
+ lang = gr.Dropdown(choices=langs, value="None", label="Language (Optional)")
119
+ format = gr.Dropdown(choices=["None", ".srt"], value="None", label="Timestamps? (Optional)")
120
+
121
+ with gr.Row():
122
+ gr.Markdown("Larger models are more accurate, but slower. For 1min video, it'll take ~30s (tiny), ~1min (base), ~3min (small), ~5min (medium), etc.")
123
+ transcribe_btn = gr.Button('Transcribe')
124
+
125
+ with gr.Column():
126
+ outputs = gr.Textbox(placeholder='Transcription of the video', label='Transcription')
127
+
128
+ transcribe_btn.click(get_transcript, inputs=[url, model_size, lang, format], outputs=outputs)
129
 
130
+ #demo.launch(debug=True)
131
+ io = gr.Interface(gradio_interface)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132
 
133
+ app = gr.mount_gradio_app(app, io, path=CUSTOM_PATH)
134
+ uvicorn.run(app, host="0.0.0.0", port=7860)