Spaces:
Running
Running
Xabi Ezpeleta
commited on
Commit
·
58ef98c
1
Parent(s):
0ef4951
Initial basque whisper demo
Browse files- app.py +65 -0
- packages.txt +1 -0
- requirements.txt +4 -0
app.py
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import gradio as gr
|
3 |
+
from AinaTheme import theme
|
4 |
+
from faster_whisper import WhisperModel
|
5 |
+
import torch
|
6 |
+
|
7 |
+
device, torch_dtype = ("cuda", "float32") if torch.cuda.is_available() else ("cpu", "int8")
|
8 |
+
|
9 |
+
MODEL_NAME = "xezpeleta/whisper-large-v3-eu"
|
10 |
+
print("Loading model ...")
|
11 |
+
model = WhisperModel(MODEL_NAME, compute_type=torch_dtype)
|
12 |
+
print("Loading model done.")
|
13 |
+
|
14 |
+
def transcribe(inputs):
|
15 |
+
print("transcribe()")
|
16 |
+
if inputs is None:
|
17 |
+
raise gr.Error("Ez da audio fitxategirik aukeratu. Mesedez, igo audio fitxategi bat"\
|
18 |
+
"edo grabatu zure ahotsa mikrofono bidez")
|
19 |
+
|
20 |
+
segments, _ = model.transcribe(
|
21 |
+
inputs,
|
22 |
+
chunk_length=30,
|
23 |
+
task="transcribe",
|
24 |
+
word_timestamps=True,
|
25 |
+
repetition_penalty=1.1,
|
26 |
+
temperature=[0.0, 0.1, 0.2, 0,3, 0.4, 0.6, 0.8, 1.0],
|
27 |
+
)
|
28 |
+
|
29 |
+
text = ""
|
30 |
+
for segment in segments:
|
31 |
+
text += " " + segment.text.strip()
|
32 |
+
return text
|
33 |
+
|
34 |
+
|
35 |
+
description_string = "Mikrofono grabazioaren edo audio fitxategi baten transkripzio automatikoa\n Demo hau hurrengo eredu hauek erabiliz"\
|
36 |
+
" sortua izan da: "\
|
37 |
+
f"[{MODEL_NAME}](https://huggingface.co/{MODEL_NAME})"
|
38 |
+
|
39 |
+
|
40 |
+
def clear():
|
41 |
+
return (None)
|
42 |
+
|
43 |
+
|
44 |
+
with gr.Blocks(theme=theme) as demo:
|
45 |
+
gr.Markdown(description_string)
|
46 |
+
with gr.Row():
|
47 |
+
with gr.Column(scale=1):
|
48 |
+
input = gr.Audio(sources=["upload", "microphone"], type="filepath", label="Audio")
|
49 |
+
|
50 |
+
with gr.Column(scale=1):
|
51 |
+
output = gr.Textbox(label="Output", lines=8)
|
52 |
+
|
53 |
+
with gr.Row(variant="panel"):
|
54 |
+
clear_btn = gr.Button("Clear")
|
55 |
+
submit_btn = gr.Button("Submit", variant="primary")
|
56 |
+
|
57 |
+
|
58 |
+
submit_btn.click(fn=transcribe, inputs=[input], outputs=[output])
|
59 |
+
clear_btn.click(fn=clear,inputs=[], outputs=[input], queue=False,)
|
60 |
+
|
61 |
+
|
62 |
+
if __name__ == "__main__":
|
63 |
+
demo.launch()
|
64 |
+
|
65 |
+
|
packages.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
ffmpeg
|
requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
faster_whisper
|
2 |
+
torch
|
3 |
+
gradio==4.44.0
|
4 |
+
aina-gradio-theme==2.3
|