Spaces:
Running
on
Zero
Running
on
Zero
Upload app.py
Browse files
app.py
CHANGED
@@ -197,7 +197,7 @@ def forward(tokens, voice, speed, device='cpu'):
|
|
197 |
def forward_gpu(tokens, voice, speed):
|
198 |
return forward(tokens, voice, speed, device='cuda')
|
199 |
|
200 |
-
def generate(text, voice, ps=None, speed=1
|
201 |
if voice not in VOICES:
|
202 |
# Ensure stability for https://huggingface.co/spaces/Pendrokar/TTS-Spaces-Arena
|
203 |
voice = 'af'
|
@@ -263,19 +263,19 @@ with gr.Blocks() as basic_tts:
|
|
263 |
)
|
264 |
with gr.Accordion('Audio Settings', open=False):
|
265 |
with gr.Row():
|
266 |
-
speed = gr.Slider(minimum=0.5, maximum=2
|
267 |
with gr.Row():
|
268 |
with gr.Column():
|
269 |
-
opening_cut = gr.Slider(minimum=0, maximum=24000, value=4000, step=1000, label='✂️ Opening Cut', info='Cut
|
270 |
with gr.Column():
|
271 |
-
closing_cut = gr.Slider(minimum=0, maximum=24000, value=2000, step=1000, label='🎬 Closing Cut', info='Cut
|
272 |
with gr.Row():
|
273 |
with gr.Column():
|
274 |
-
ease_in = gr.Slider(minimum=0, maximum=24000, value=3000, step=1000, label='🎢 Ease In', info='Ease in
|
275 |
with gr.Column():
|
276 |
-
ease_out = gr.Slider(minimum=0, maximum=24000, value=1000, step=1000, label='🛝 Ease Out', info='Ease out
|
277 |
-
text.submit(generate, inputs=[text, voice, in_ps, speed, opening_cut, closing_cut, ease_in, ease_out, use_gpu], outputs=[audio, out_ps])
|
278 |
-
generate_btn.click(generate, inputs=[text, voice, in_ps, speed, opening_cut, closing_cut, ease_in, ease_out, use_gpu], outputs=[audio, out_ps])
|
279 |
|
280 |
@torch.no_grad()
|
281 |
def lf_forward(token_lists, voice, speed, device='cpu'):
|
@@ -436,7 +436,7 @@ with gr.Blocks() as lf_tts:
|
|
436 |
)
|
437 |
with gr.Accordion('Audio Settings', open=False):
|
438 |
with gr.Row():
|
439 |
-
speed = gr.Slider(minimum=0.5, maximum=2
|
440 |
with gr.Row():
|
441 |
with gr.Column():
|
442 |
opening_cut = gr.Slider(minimum=0, maximum=24000, value=4000, step=1000, label='✂️ Opening Cut', info='Cut this many samples from the start')
|
@@ -461,12 +461,6 @@ Kokoro is a frontier TTS model for its size. It has 80 million parameters,<sup>[
|
|
461 |
|
462 |
The weights are currently private, but a free public demo is hosted here, at `https://hf.co/spaces/hexgrad/Kokoro-TTS`
|
463 |
|
464 |
-
### Will this be open sourced?
|
465 |
-
There currently isn't a release date scheduled for the weights. The inference code in this space is MIT licensed. The architecture was already published by Li et al, with MIT licensed code and pretrained weights.<sup>[2]</sup>
|
466 |
-
|
467 |
-
### What does it mean if a voice is unstable?
|
468 |
-
An unstable voice is more likely to stumble or produce unnatural artifacts, especially on short or strange texts.
|
469 |
-
|
470 |
### Compute
|
471 |
The model was trained on 1x A100-class 80GB instances rented from [Vast.ai](https://cloud.vast.ai/?ref_id=79907).<sup>[3]</sup><br/>
|
472 |
Vast was chosen over other compute providers due to its competitive on-demand hourly rates.<br/>
|
@@ -490,6 +484,22 @@ Random Japanese texts: CC0 public domain<sup>[6]</sup>
|
|
490 |
@rzvzn on Discord
|
491 |
""")
|
492 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
493 |
with gr.Blocks() as api_info:
|
494 |
gr.Markdown("""
|
495 |
This Space can be used via API. The following code block can be copied and run in one Google Colab cell.
|
@@ -527,8 +537,8 @@ with gr.Blocks() as version_info:
|
|
527 |
|
528 |
with gr.Blocks() as app:
|
529 |
gr.TabbedInterface(
|
530 |
-
[basic_tts, lf_tts, about, api_info, version_info],
|
531 |
-
['🗣️ Basic TTS', '📖 Long-Form', 'ℹ️ About', '🚀 Gradio API', '📝 Version History'],
|
532 |
)
|
533 |
|
534 |
if __name__ == '__main__':
|
|
|
197 |
def forward_gpu(tokens, voice, speed):
|
198 |
return forward(tokens, voice, speed, device='cuda')
|
199 |
|
200 |
+
def generate(text, voice, ps=None, speed=1, reduce_noise=None, opening_cut=4000, closing_cut=2000, ease_in=3000, ease_out=1000, pad_before=None, pad_after=None, use_gpu=None):
|
201 |
if voice not in VOICES:
|
202 |
# Ensure stability for https://huggingface.co/spaces/Pendrokar/TTS-Spaces-Arena
|
203 |
voice = 'af'
|
|
|
263 |
)
|
264 |
with gr.Accordion('Audio Settings', open=False):
|
265 |
with gr.Row():
|
266 |
+
speed = gr.Slider(minimum=0.5, maximum=2, value=1, step=0.1, label='⚡️ Speed', info='Adjust the speed of the audio; the settings below are auto-scaled by speed')
|
267 |
with gr.Row():
|
268 |
with gr.Column():
|
269 |
+
opening_cut = gr.Slider(minimum=0, maximum=24000, value=4000, step=1000, label='✂️ Opening Cut', info='Cut samples from the start')
|
270 |
with gr.Column():
|
271 |
+
closing_cut = gr.Slider(minimum=0, maximum=24000, value=2000, step=1000, label='🎬 Closing Cut', info='Cut samples from the end')
|
272 |
with gr.Row():
|
273 |
with gr.Column():
|
274 |
+
ease_in = gr.Slider(minimum=0, maximum=24000, value=3000, step=1000, label='🎢 Ease In', info='Ease in samples, after opening cut')
|
275 |
with gr.Column():
|
276 |
+
ease_out = gr.Slider(minimum=0, maximum=24000, value=1000, step=1000, label='🛝 Ease Out', info='Ease out samples, before closing cut')
|
277 |
+
text.submit(generate, inputs=[text, voice, in_ps, None, speed, opening_cut, closing_cut, ease_in, ease_out, None, None, use_gpu], outputs=[audio, out_ps])
|
278 |
+
generate_btn.click(generate, inputs=[text, voice, in_ps, None, speed, opening_cut, closing_cut, ease_in, ease_out, None, None, use_gpu], outputs=[audio, out_ps])
|
279 |
|
280 |
@torch.no_grad()
|
281 |
def lf_forward(token_lists, voice, speed, device='cpu'):
|
|
|
436 |
)
|
437 |
with gr.Accordion('Audio Settings', open=False):
|
438 |
with gr.Row():
|
439 |
+
speed = gr.Slider(minimum=0.5, maximum=2, value=1, step=0.1, label='⚡️ Speed', info='Adjust the speed of the audio; the settings below are auto-scaled by speed')
|
440 |
with gr.Row():
|
441 |
with gr.Column():
|
442 |
opening_cut = gr.Slider(minimum=0, maximum=24000, value=4000, step=1000, label='✂️ Opening Cut', info='Cut this many samples from the start')
|
|
|
461 |
|
462 |
The weights are currently private, but a free public demo is hosted here, at `https://hf.co/spaces/hexgrad/Kokoro-TTS`
|
463 |
|
|
|
|
|
|
|
|
|
|
|
|
|
464 |
### Compute
|
465 |
The model was trained on 1x A100-class 80GB instances rented from [Vast.ai](https://cloud.vast.ai/?ref_id=79907).<sup>[3]</sup><br/>
|
466 |
Vast was chosen over other compute providers due to its competitive on-demand hourly rates.<br/>
|
|
|
484 |
@rzvzn on Discord
|
485 |
""")
|
486 |
|
487 |
+
with gr.Blocks() as faq:
|
488 |
+
gr.Markdown("""
|
489 |
+
### Will this be open sourced?
|
490 |
+
There currently isn't a release date scheduled for the weights. The inference code in this space is MIT licensed. The StyleTTS 2 architecture was already published by Li et al, with MIT licensed code and pretrained weights.
|
491 |
+
|
492 |
+
### What does it mean for a voice to be unstable?
|
493 |
+
An unstable voice is more likely to stumble or produce unnatural artifacts, especially on short or strange texts.
|
494 |
+
|
495 |
+
### CPU faster than ZeroGPU? How?
|
496 |
+
The CPU seems to be a dedicated resource for this Space, whereas the ZeroGPU pool is shared dynamically allocated across all of HF. Obviously the latter demands some kind of queue & allocator system, which inevitably must add latency.
|
497 |
+
|
498 |
+
For Basic TTS under 100 tokens (~characters), only a few seconds of audio needs to be generated, so the actual compute is not that heavy. For these short bursts, the dedicated CPU can often compute the result faster than the total time it takes for you to: enter the ZeroGPU queue, wait to get allocated, and have a GPU compute and deliver the result.
|
499 |
+
|
500 |
+
As you move beyond 100 tokens and especially closer to the ~500 token context window, the GPU catches up. And for Long-Form, since batches of 100 segments are processed at a time, the GPU should outspeed the CPU by 1-2 orders of magnitude.
|
501 |
+
""")
|
502 |
+
|
503 |
with gr.Blocks() as api_info:
|
504 |
gr.Markdown("""
|
505 |
This Space can be used via API. The following code block can be copied and run in one Google Colab cell.
|
|
|
537 |
|
538 |
with gr.Blocks() as app:
|
539 |
gr.TabbedInterface(
|
540 |
+
[basic_tts, lf_tts, about, faq, api_info, version_info],
|
541 |
+
['🗣️ Basic TTS', '📖 Long-Form', 'ℹ️ About', '❓ FAQ', '🚀 Gradio API', '📝 Version History'],
|
542 |
)
|
543 |
|
544 |
if __name__ == '__main__':
|