barghavani commited on
Commit
0ad35bc
·
1 Parent(s): 4b46221

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +155 -703
app.py CHANGED
@@ -1,721 +1,173 @@
1
- import sys
2
- import io, os, stat
3
- import subprocess
4
- import random
5
- from zipfile import ZipFile
6
- import uuid
7
- import time
8
- import torch
9
- import torchaudio
10
-
11
- #download for mecab
12
- os.system('python -m unidic download')
13
-
14
- # By using XTTS you agree to CPML license https://coqui.ai/cpml
15
- os.environ["COQUI_TOS_AGREED"] = "1"
16
-
17
- # langid is used to detect language for longer text
18
- # Most users expect text to be their own language, there is checkbox to disable it
19
- import langid
20
- import base64
21
- import csv
22
- from io import StringIO
23
- import datetime
24
- import re
25
-
26
  import gradio as gr
27
- from scipy.io.wavfile import write
28
- from pydub import AudioSegment
29
-
30
  from TTS.api import TTS
31
- from TTS.tts.configs.xtts_config import XttsConfig
32
- from TTS.tts.models.xtts import Xtts
33
- from TTS.utils.generic_utils import get_user_data_dir
34
-
35
- HF_TOKEN = os.environ.get("HF_TOKEN")
36
-
37
- from huggingface_hub import HfApi
38
  from huggingface_hub import hf_hub_download
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
- # will use api to restart space on a unrecoverable error
41
- api = HfApi(token=HF_TOKEN)
42
- repo_id = "saillab/xtts-base"
43
-
44
- # Use never ffmpeg binary for Ubuntu20 to use denoising for microphone input
45
- print("Export newer ffmpeg binary for denoise filter")
46
- ZipFile("ffmpeg.zip").extractall()
47
- print("Make ffmpeg binary executable")
48
- st = os.stat("ffmpeg")
49
- os.chmod("ffmpeg", st.st_mode | stat.S_IEXEC)
50
-
51
- # This will trigger downloading model
52
- print("Downloading if not downloaded Coqui XTTS V2")
53
- ### Vahid's modification - 12/9/2023 ###
54
- #!mkdir SAIL_XTTS
55
- SAIL_file = "checkpoint_30000.pth" #checkpoint file name
56
- SAIL_repo = "saillab/xtts_v2_fa_revision1"
57
- model_file = hf_hub_download(repo_id=SAIL_repo, filename=SAIL_file, local_dir="SAIL_XTTS", use_auth_token=HF_TOKEN)
58
- config_file = hf_hub_download(repo_id=SAIL_repo, filename='config.json', local_dir="SAIL_XTTS", use_auth_token=HF_TOKEN)
59
- vocab_file = hf_hub_download(repo_id=SAIL_repo, filename='vocab.json', local_dir="SAIL_XTTS", use_auth_token=HF_TOKEN)
60
-
61
-
62
- #from TTS.utils.manage import ModelManager
63
-
64
- #model_name = "saillab/xtts_v2_fa_revision1"
65
- #ModelManager().download_model(model_name)
66
- #model_path = os.path.join(get_user_data_dir("tts"), model_name.replace("/", "--"))
67
- print("XTTS downloaded")
68
-
69
- config = XttsConfig()
70
- #config.load_json(os.path.join(model_path, "config.json"))
71
- config.load_json(config_file)
72
-
73
- model = Xtts.init_from_config(config)
74
- model.load_checkpoint(
75
- config,
76
- checkpoint_path=model_file,#os.path.join(model_path, "model.pth"),
77
- vocab_path=vocab_file,#os.path.join(model_path, "vocab.json"),
78
- eval=True,
79
- use_deepspeed=True,
80
- )
81
- model.cuda()
82
-
83
- # This is for debugging purposes only
84
- DEVICE_ASSERT_DETECTED = 0
85
- DEVICE_ASSERT_PROMPT = None
86
- DEVICE_ASSERT_LANG = None
87
-
88
- supported_languages = config.languages
89
-
90
- def predict(
91
- prompt,
92
- language,
93
- audio_file_pth,
94
- mic_file_path,
95
- use_mic,
96
- voice_cleanup,
97
- no_lang_auto_detect,
98
- agree,
99
- ):
100
- if agree == True:
101
- if language not in supported_languages:
102
- gr.Warning(
103
- f"Language you put {language} in is not in is not in our Supported Languages, please choose from dropdown"
104
- )
105
-
106
- return (
107
- None,
108
- None,
109
- None,
110
- None,
111
- )
112
-
113
- language_predicted = langid.classify(prompt)[
114
- 0
115
- ].strip() # strip need as there is space at end!
116
-
117
- # tts expects chinese as zh-cn
118
- if language_predicted == "zh":
119
- # we use zh-cn
120
- language_predicted = "zh-cn"
121
-
122
- print(f"Detected language:{language_predicted}, Chosen language:{language}")
123
-
124
- # After text character length 15 trigger language detection
125
- if len(prompt) > 15:
126
- # allow any language for short text as some may be common
127
- # If user unchecks language autodetection it will not trigger
128
- # You may remove this completely for own use
129
- if language_predicted != language and not no_lang_auto_detect:
130
- # Please duplicate and remove this check if you really want this
131
- # Or auto-detector fails to identify language (which it can on pretty short text or mixed text)
132
- gr.Warning(
133
- f"It looks like your text isn’t the language you chose , if you’re sure the text is the same language you chose, please check disable language auto-detection checkbox"
134
- )
135
 
136
- return (
137
- None,
138
- None,
139
- None,
140
- None,
141
- )
142
 
143
- if use_mic == True:
144
- if mic_file_path is not None:
145
- speaker_wav = mic_file_path
146
- else:
147
- gr.Warning(
148
- "Please record your voice with Microphone, or uncheck Use Microphone to use reference audios"
149
- )
150
- return (
151
- None,
152
- None,
153
- None,
154
- None,
155
- )
156
 
157
- else:
158
- speaker_wav = audio_file_pth
159
 
160
- # Filtering for microphone input, as it has BG noise, maybe silence in beginning and end
161
- # This is fast filtering not perfect
162
 
163
- # Apply all on demand
164
- lowpassfilter = denoise = trim = loudness = True
 
 
 
 
165
 
166
- if lowpassfilter:
167
- lowpass_highpass = "lowpass=8000,highpass=75,"
168
- else:
169
- lowpass_highpass = ""
170
 
171
- if trim:
172
- # better to remove silence in beginning and end for microphone
173
- trim_silence = "areverse,silenceremove=start_periods=1:start_silence=0:start_threshold=0.02,areverse,silenceremove=start_periods=1:start_silence=0:start_threshold=0.02,"
174
- else:
175
- trim_silence = ""
176
 
177
- if voice_cleanup:
178
- try:
179
- out_filename = (
180
- speaker_wav + str(uuid.uuid4()) + ".wav"
181
- ) # ffmpeg to know output format
182
 
183
- # we will use newer ffmpeg as that has afftn denoise filter
184
- shell_command = f"./ffmpeg -y -i {speaker_wav} -af {lowpass_highpass}{trim_silence} {out_filename}".split(
185
- " "
186
- )
187
 
188
- command_result = subprocess.run(
189
- [item for item in shell_command],
190
- capture_output=False,
191
- text=True,
192
- check=True,
193
- )
194
- speaker_wav = out_filename
195
- print("Filtered microphone input")
196
- except subprocess.CalledProcessError:
197
- # There was an error - command exited with non-zero code
198
- print("Error: failed filtering, use original microphone input")
199
  else:
200
- speaker_wav = speaker_wav
201
-
202
- if len(prompt) < 2:
203
- gr.Warning("Please give a longer prompt text")
204
- return (
205
- None,
206
- None,
207
- None,
208
- None,
209
- )
210
- if len(prompt) > 200:
211
- gr.Warning(
212
- "Text length limited to 200 characters for this demo, please try shorter text. You can clone this space and edit code for your own usage"
213
- )
214
- return (
215
- None,
216
- None,
217
- None,
218
- None,
219
- )
220
- global DEVICE_ASSERT_DETECTED
221
- if DEVICE_ASSERT_DETECTED:
222
- global DEVICE_ASSERT_PROMPT
223
- global DEVICE_ASSERT_LANG
224
- # It will likely never come here as we restart space on first unrecoverable error now
225
- print(
226
- f"Unrecoverable exception caused by language:{DEVICE_ASSERT_LANG} prompt:{DEVICE_ASSERT_PROMPT}"
227
- )
228
-
229
- # HF Space specific.. This error is unrecoverable need to restart space
230
- space = api.get_space_runtime(repo_id=repo_id)
231
- if space.stage!="BUILDING":
232
- api.restart_space(repo_id=repo_id)
233
- else:
234
- print("TRIED TO RESTART but space is building")
235
-
236
- try:
237
- metrics_text = ""
238
- t_latent = time.time()
239
-
240
- # note diffusion_conditioning not used on hifigan (default mode), it will be empty but need to pass it to model.inference
241
- try:
242
- (
243
- gpt_cond_latent,
244
- speaker_embedding,
245
- ) = model.get_conditioning_latents(audio_path=speaker_wav, gpt_cond_len=30, gpt_cond_chunk_len=4, max_ref_length=60)
246
- except Exception as e:
247
- print("Speaker encoding error", str(e))
248
- gr.Warning(
249
- "It appears something wrong with reference, did you unmute your microphone?"
250
- )
251
- return (
252
- None,
253
- None,
254
- None,
255
- None,
256
- )
257
-
258
- latent_calculation_time = time.time() - t_latent
259
- # metrics_text=f"Embedding calculation time: {latent_calculation_time:.2f} seconds\n"
260
-
261
- # temporary comma fix
262
- prompt= re.sub("([^\x00-\x7F]|\w)(\.|\。|\?)",r"\1 \2\2",prompt)
263
-
264
- wav_chunks = []
265
- ## Direct mode
266
-
267
- print("I: Generating new audio...")
268
- t0 = time.time()
269
- out = model.inference(
270
- prompt,
271
- language,
272
- gpt_cond_latent,
273
- speaker_embedding,
274
- repetition_penalty=5.0,
275
- temperature=0.75,
276
- )
277
- inference_time = time.time() - t0
278
- print(f"I: Time to generate audio: {round(inference_time*1000)} milliseconds")
279
- metrics_text+=f"Time to generate audio: {round(inference_time*1000)} milliseconds\n"
280
- real_time_factor= (time.time() - t0) / out['wav'].shape[-1] * 24000
281
- print(f"Real-time factor (RTF): {real_time_factor}")
282
- metrics_text+=f"Real-time factor (RTF): {real_time_factor:.2f}\n"
283
- torchaudio.save("output.wav", torch.tensor(out["wav"]).unsqueeze(0), 24000)
284
-
285
-
286
- """
287
- print("I: Generating new audio in streaming mode...")
288
- t0 = time.time()
289
- chunks = model.inference_stream(
290
- prompt,
291
- language,
292
- gpt_cond_latent,
293
- speaker_embedding,
294
- repetition_penalty=7.0,
295
- temperature=0.85,
296
- )
297
- first_chunk = True
298
- for i, chunk in enumerate(chunks):
299
- if first_chunk:
300
- first_chunk_time = time.time() - t0
301
- metrics_text += f"Latency to first audio chunk: {round(first_chunk_time*1000)} milliseconds\n"
302
- first_chunk = False
303
- wav_chunks.append(chunk)
304
- print(f"Received chunk {i} of audio length {chunk.shape[-1]}")
305
- inference_time = time.time() - t0
306
- print(
307
- f"I: Time to generate audio: {round(inference_time*1000)} milliseconds"
308
- )
309
- #metrics_text += (
310
- # f"Time to generate audio: {round(inference_time*1000)} milliseconds\n"
311
- #)
312
- wav = torch.cat(wav_chunks, dim=0)
313
- print(wav.shape)
314
- real_time_factor = (time.time() - t0) / wav.shape[0] * 24000
315
- print(f"Real-time factor (RTF): {real_time_factor}")
316
- metrics_text += f"Real-time factor (RTF): {real_time_factor:.2f}\n"
317
- torchaudio.save("output.wav", wav.squeeze().unsqueeze(0).cpu(), 24000)
318
- """
319
-
320
- except RuntimeError as e:
321
- if "device-side assert" in str(e):
322
- # cannot do anything on cuda device side error, need tor estart
323
- print(
324
- f"Exit due to: Unrecoverable exception caused by language:{language} prompt:{prompt}",
325
- flush=True,
326
- )
327
- gr.Warning("Unhandled Exception encounter, please retry in a minute")
328
- print("Cuda device-assert Runtime encountered need restart")
329
- if not DEVICE_ASSERT_DETECTED:
330
- DEVICE_ASSERT_DETECTED = 1
331
- DEVICE_ASSERT_PROMPT = prompt
332
- DEVICE_ASSERT_LANG = language
333
-
334
- # just before restarting save what caused the issue so we can handle it in future
335
- # Uploading Error data only happens for unrecovarable error
336
- error_time = datetime.datetime.now().strftime("%d-%m-%Y-%H:%M:%S")
337
- error_data = [
338
- error_time,
339
- prompt,
340
- language,
341
- audio_file_pth,
342
- mic_file_path,
343
- use_mic,
344
- voice_cleanup,
345
- no_lang_auto_detect,
346
- agree,
347
- ]
348
- error_data = [str(e) if type(e) != str else e for e in error_data]
349
- print(error_data)
350
- print(speaker_wav)
351
- write_io = StringIO()
352
- csv.writer(write_io).writerows([error_data])
353
- csv_upload = write_io.getvalue().encode()
354
-
355
- filename = error_time + "_" + str(uuid.uuid4()) + ".csv"
356
- print("Writing error csv")
357
- error_api = HfApi()
358
- error_api.upload_file(
359
- path_or_fileobj=csv_upload,
360
- path_in_repo=filename,
361
- repo_id="coqui/xtts-flagged-dataset",
362
- repo_type="dataset",
363
- )
364
-
365
- # speaker_wav
366
- print("Writing error reference audio")
367
- speaker_filename = (
368
- error_time + "_reference_" + str(uuid.uuid4()) + ".wav"
369
- )
370
- error_api = HfApi()
371
- error_api.upload_file(
372
- path_or_fileobj=speaker_wav,
373
- path_in_repo=speaker_filename,
374
- repo_id="coqui/xtts-flagged-dataset",
375
- repo_type="dataset",
376
- )
377
-
378
- # HF Space specific.. This error is unrecoverable need to restart space
379
- space = api.get_space_runtime(repo_id=repo_id)
380
- if space.stage!="BUILDING":
381
- api.restart_space(repo_id=repo_id)
382
- else:
383
- print("TRIED TO RESTART but space is building")
384
-
385
- else:
386
- if "Failed to decode" in str(e):
387
- print("Speaker encoding error", str(e))
388
- gr.Warning(
389
- "It appears something wrong with reference, did you unmute your microphone?"
390
- )
391
- else:
392
- print("RuntimeError: non device-side assert error:", str(e))
393
- gr.Warning("Something unexpected happened please retry again.")
394
- return (
395
- None,
396
- None,
397
- None,
398
- None,
399
- )
400
- return (
401
- gr.make_waveform(
402
- audio="output.wav",
403
- ),
404
- "output.wav",
405
- metrics_text,
406
- speaker_wav,
407
- )
408
  else:
409
- gr.Warning("Please accept the Terms & Condition!")
410
- return (
411
- None,
412
- None,
413
- None,
414
- None,
415
- )
416
-
417
-
418
- title = "Coqui🐸 XTTS"
419
-
420
- description = """
421
- <br/>
422
- This demo is currently running **XTTS v2.0.3**
423
- <br/>
424
- <a href="https://huggingface.co/coqui/XTTS-v2">XTTS</a> is a text-to-speech model that lets you clone voices into different languages.
425
- <br/>
426
- This is the same model that powers our creator application <a href="https://coqui.ai">Coqui Studio</a> as well as the <a href="https://docs.coqui.ai">Coqui API</a>. In production we apply modifications to make low-latency streaming possible.
427
- <br/>
428
- There are 16 languages.
429
- <p>
430
- Arabic: ar, Brazilian Portuguese: pt , Chinese: zh-cn, Czech: cs, Dutch: nl, English: en, French: fr, German: de, Italian: it, Polish: pl, Russian: ru, Spanish: es, Turkish: tr, Japanese: ja, Korean: ko, Hungarian: hu, Hindi: hi <br/>
431
- </p>
432
- <br/>
433
- Leave a star 🌟 on the Github <a href="https://github.com/coqui-ai/TTS">🐸TTS</a>, where our open-source inference and training code lives.
434
- <br/>
435
- """
436
-
437
- links = """
438
- <img referrerpolicy="no-referrer-when-downgrade" src="https://static.scarf.sh/a.png?x-pxid=0d00920c-8cc9-4bf3-90f2-a615797e5f59" />
439
- | | |
440
- | ------------------------------- | --------------------------------------- |
441
- | 🐸💬 **CoquiTTS** | <a style="display:inline-block" href='https://github.com/coqui-ai/TTS'><img src='https://img.shields.io/github/stars/coqui-ai/TTS?style=social' /></a>|
442
- | 💼 **Documentation** | [ReadTheDocs](https://tts.readthedocs.io/en/latest/)
443
- | 👩‍💻 **Questions** | [GitHub Discussions](https://github.com/coqui-ai/TTS/discussions) |
444
- | 🗯 **Community** | [![Dicord](https://img.shields.io/discord/1037326658807533628?color=%239B59B6&label=chat%20on%20discord)](https://discord.gg/5eXr5seRrv) |
445
- """
446
-
447
- article = """
448
- <div style='margin:20px auto;'>
449
- <p>By using this demo you agree to the terms of the Coqui Public Model License at https://coqui.ai/cpml</p>
450
- <p>We collect data only for error cases for improvement.</p>
451
- </div>
452
- """
453
- examples = [
454
- [
455
- "Once when I was six years old I saw a magnificent picture",
456
- "en",
457
- "examples/female.wav",
458
- None,
459
- False,
460
- False,
461
- False,
462
- True,
463
- ],
464
- [
465
- "Lorsque j'avais six ans j'ai vu, une fois, une magnifique image",
466
- "fr",
467
- "examples/male.wav",
468
- None,
469
- False,
470
- False,
471
- False,
472
- True,
473
- ],
474
- [
475
- "Als ich sechs war, sah ich einmal ein wunderbares Bild",
476
- "de",
477
- "examples/female.wav",
478
- None,
479
- False,
480
- False,
481
- False,
482
- True,
483
- ],
484
- [
485
- "Cuando tenía seis años, vi una vez una imagen magnífica",
486
- "es",
487
- "examples/male.wav",
488
- None,
489
- False,
490
- False,
491
- False,
492
- True,
493
- ],
494
- [
495
- "Quando eu tinha seis anos eu vi, uma vez, uma imagem magnífica",
496
- "pt",
497
- "examples/female.wav",
498
- None,
499
- False,
500
- False,
501
- False,
502
- True,
503
- ],
504
- [
505
- "Kiedy miałem sześć lat, zobaczyłem pewnego razu wspaniały obrazek",
506
- "pl",
507
- "examples/male.wav",
508
- None,
509
- False,
510
- False,
511
- False,
512
- True,
513
- ],
514
- [
515
- "Un tempo lontano, quando avevo sei anni, vidi un magnifico disegno",
516
- "it",
517
- "examples/female.wav",
518
- None,
519
- False,
520
- False,
521
- False,
522
- True,
523
- ],
524
- [
525
- "Bir zamanlar, altı yaşındayken, muhteşem bir resim gördüm",
526
- "tr",
527
- "examples/female.wav",
528
- None,
529
- False,
530
- False,
531
- False,
532
- True,
533
- ],
534
- [
535
- "Когда мне было шесть лет, я увидел однажды удивительную картинку",
536
- "ru",
537
- "examples/female.wav",
538
- None,
539
- False,
540
- False,
541
- False,
542
- True,
543
- ],
544
- [
545
- "Toen ik een jaar of zes was, zag ik op een keer een prachtige plaat",
546
- "nl",
547
- "examples/male.wav",
548
- None,
549
- False,
550
- False,
551
- False,
552
- True,
553
- ],
554
- [
555
- "Když mi bylo šest let, viděl jsem jednou nádherný obrázek",
556
- "cs",
557
- "examples/female.wav",
558
- None,
559
- False,
560
- False,
561
- False,
562
- True,
563
- ],
564
- [
565
- "当我还只有六岁的时候, 看到了一副精彩的插画",
566
- "zh-cn",
567
- "examples/female.wav",
568
- None,
569
- False,
570
- False,
571
- False,
572
- True,
573
- ],
574
- [
575
- "かつて 六歳のとき、素晴らしい絵を見ました",
576
- "ja",
577
- "examples/female.wav",
578
- None,
579
- False,
580
- True,
581
- False,
582
- True,
583
  ],
584
- [
585
- "한번은 내가 여섯 살이었을 멋진 그림을 보았습니다.",
586
- "ko",
587
- "examples/female.wav",
588
- None,
589
- False,
590
- True,
591
- False,
592
- True,
593
- ],
594
- [
595
- "Egyszer hat éves koromban láttam egy csodálatos képet",
596
- "hu",
597
- "examples/male.wav",
598
- None,
599
- False,
600
- True,
601
- False,
602
- True,
603
- ], [
604
- "سلام صبح بخیز بزخیز و نخور غم جهان گذرا",
605
- "fa",
606
- "examples/male.wav",
607
- None,
608
- False,
609
- True,
610
- False,
611
- True,
612
- ],
613
- ]
614
-
615
-
616
-
617
- with gr.Blocks(analytics_enabled=False) as demo:
618
- with gr.Row():
619
- with gr.Column():
620
- gr.Markdown(
621
- """
622
- ## <img src="https://raw.githubusercontent.com/coqui-ai/TTS/main/images/coqui-log-green-TTS.png" height="56"/>
623
- """
624
- )
625
- with gr.Column():
626
- # placeholder to align the image
627
- pass
628
-
629
- with gr.Row():
630
- with gr.Column():
631
- gr.Markdown(description)
632
- with gr.Column():
633
- gr.Markdown(links)
634
-
635
- with gr.Row():
636
- with gr.Column():
637
- input_text_gr = gr.Textbox(
638
- label="Text Prompt",
639
- info="One or two sentences at a time is better. Up to 200 text characters.",
640
- value="Hi there, I'm your new voice clone. Try your best to upload quality audio.",
641
- )
642
- language_gr = gr.Dropdown(
643
- label="Language",
644
- info="Select an output language for the synthesised speech",
645
- choices=[
646
- "en",
647
- "es",
648
- "fr",
649
- "de",
650
- "it",
651
- "pt",
652
- "pl",
653
- "tr",
654
- "ru",
655
- "nl",
656
- "cs",
657
- "ar",
658
- "zh-cn",
659
- "hu",
660
- "ko",
661
- "ja",
662
- "hi",
663
- "fa"
664
- ],
665
- max_choices=1,
666
- value="en",
667
- )
668
- ref_gr = gr.Audio(
669
- label="Reference Audio",
670
- info="Click on the ✎ button to upload your own target speaker audio",
671
- type="filepath",
672
- value="examples/female.wav",
673
- )
674
- mic_gr = gr.Audio(
675
- source="microphone",
676
- type="filepath",
677
- info="Use your microphone to record audio",
678
- label="Use Microphone for Reference",
679
- )
680
- use_mic_gr = gr.Checkbox(
681
- label="Use Microphone",
682
- value=False,
683
- info="Notice: Microphone input may not work properly under traffic",
684
- )
685
- clean_ref_gr = gr.Checkbox(
686
- label="Cleanup Reference Voice",
687
- value=False,
688
- info="This check can improve output if your microphone or reference voice is noisy",
689
- )
690
- auto_det_lang_gr = gr.Checkbox(
691
- label="Do not use language auto-detect",
692
- value=False,
693
- info="Check to disable language auto-detection",
694
- )
695
- tos_gr = gr.Checkbox(
696
- label="Agree",
697
- value=False,
698
- info="I have purchased a commercial license from Coqui: [email protected]\nOtherwise, I agree to the terms of the non-commercial CPML: https://coqui.ai/cpml",
699
- )
700
-
701
- tts_button = gr.Button("Send", elem_id="send-btn", visible=True)
702
-
703
-
704
- with gr.Column():
705
- video_gr = gr.Video(label="Waveform Visual")
706
- audio_gr = gr.Audio(label="Synthesised Audio", autoplay=True)
707
- out_text_gr = gr.Text(label="Metrics")
708
- ref_audio_gr = gr.Audio(label="Reference Audio Used")
709
-
710
- with gr.Row():
711
- gr.Examples(examples,
712
- label="Examples",
713
- inputs=[input_text_gr, language_gr, ref_gr, mic_gr, use_mic_gr, clean_ref_gr, auto_det_lang_gr, tos_gr],
714
- outputs=[video_gr, audio_gr, out_text_gr, ref_audio_gr],
715
- fn=predict,
716
- cache_examples=False,)
717
-
718
- tts_button.click(predict, [input_text_gr, language_gr, ref_gr, mic_gr, use_mic_gr, clean_ref_gr, auto_det_lang_gr, tos_gr], outputs=[video_gr, audio_gr, out_text_gr, ref_audio_gr])
719
 
720
- demo.queue()
721
- demo.launch(debug=True, show_api=True)
 
1
+ import os
2
+ import tempfile
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  import gradio as gr
 
 
 
4
  from TTS.api import TTS
5
+ from TTS.utils.synthesizer import Synthesizer
 
 
 
 
 
 
6
  from huggingface_hub import hf_hub_download
7
+ import json
8
+
9
+ # Define constants
10
+ MODEL_INFO = [
11
+ #["vits checkpoint 57000", "checkpoint_57000.pth", "config.json", "mhrahmani/persian-tts-vits-0"],
12
+ # ["VITS Grapheme Multispeaker CV15(reduct)(best at 17864)", "best_model_17864.pth", "config.json",
13
+ # "saillab/persian-tts-cv15-reduct-grapheme-multispeaker"],
14
+ ["Single speaker (best)VITS Grapheme Azure (61000)", "checkpoint_61000.pth", "config.json", "saillab/persian-tts-azure-grapheme-60K"],
15
+
16
+ #["VITS Grapheme ARM24 Fine-Tuned on 1 (66651)", "best_model_66651.pth", "config.json","saillab/persian-tts-grapheme-arm24-finetuned-on1"],
17
+ ["Single speaker female best VITS Grapheme CV-Azure_male-Azure_female","best_model_15397.pth","config.json","saillab/female_cv_azure_male_azure_female","speakers1.pth"],
18
+ #["Multi Speaker Vits Grapheme CV+Azure in one set ","best_model_358320.pth","config.json","saillab/Multi_Speaker_Cv_plus_Azure_female_in_one_set","speakers.pth"],
19
+ ["Multispeaker VITS Grapheme CV15(reduct)(22000)", "checkpoint_22000.pth", "config.json", "saillab/persian-tts-cv15-reduct-grapheme-multispeaker", "speakers.pth"],
20
+ ["Multispeaker VITS Grapheme CV15(reduct)(26000)", "checkpoint_25000.pth", "config.json", "saillab/persian-tts-cv15-reduct-grapheme-multispeaker", "speakers.pth"],
21
+ ["Multispeaker VITS Grapheme CV15(90K)", "best_model_56960.pth", "config.json", "saillab/multi_speaker", "speakers.pth"],
22
+ ["Single speaker female best VITS Grapheme CV-Azure_male-Azure_female","best_model_15397.pth","config.json","saillab/female_cv_azure_male_azure_female","speakers.pth"],
23
+
24
+
25
+ # ["VITS Grapheme Azure (best at 15934)", "best_model_15934.pth", "config.json",
26
+ # "saillab/persian-tts-azure-grapheme-60K"],
27
+
28
+
29
+ ["Single speaker VITS Grapheme ARM24 Fine-Tuned on 1 (66651)", "best_model_66651.pth", "config.json","saillab/persian-tts-grapheme-arm24-finetuned-on1"],
30
+ ["Single speaker VITS Grapheme ARM24 Fine-Tuned on 1 (120000)", "checkpoint_120000.pth", "config.json","saillab/persian-tts-grapheme-arm24-finetuned-on1"],
31
+
32
+
33
+
34
+ # ... Add other models similarly
35
+ ]
36
 
37
+ # Extract model names from MODEL_INFO
38
+ MODEL_NAMES = [info[0] for info in MODEL_INFO]
39
+
40
+ MAX_TXT_LEN = 400
41
+ TOKEN = os.getenv('HUGGING_FACE_HUB_TOKEN')
42
+
43
+ model_files = {}
44
+ config_files = {}
45
+ speaker_files = {}
46
+
47
+ # Create a dictionary to store synthesizer objects for each model
48
+ synthesizers = {}
49
+
50
+ def update_config_speakers_file_recursive(config_dict, speakers_path):
51
+ """Recursively update speakers_file keys in a dictionary."""
52
+ if "speakers_file" in config_dict:
53
+ config_dict["speakers_file"] = speakers_path
54
+ for key, value in config_dict.items():
55
+ if isinstance(value, dict):
56
+ update_config_speakers_file_recursive(value, speakers_path)
57
+
58
+ def update_config_speakers_file(config_path, speakers_path):
59
+ """Update the config.json file to point to the correct speakers.pth file."""
60
+
61
+ # Load the existing config
62
+ with open(config_path, 'r') as f:
63
+ config = json.load(f)
64
+
65
+ # Modify the speakers_file entry
66
+ update_config_speakers_file_recursive(config, speakers_path)
67
+
68
+ # Save the modified config
69
+ with open(config_path, 'w') as f:
70
+ json.dump(config, f, indent=4)
71
+
72
+ # Download models and initialize synthesizers
73
+ for info in MODEL_INFO:
74
+ model_name, model_file, config_file, repo_name = info[:4]
75
+ speaker_file = info[4] if len(info) == 5 else None # Check if speakers.pth is defined for the model
76
+
77
+ print(f"|> Downloading: {model_name}")
78
+
79
+ # Download model and config files
80
+ model_files[model_name] = hf_hub_download(repo_id=repo_name, filename=model_file, use_auth_token=TOKEN)
81
+ config_files[model_name] = hf_hub_download(repo_id=repo_name, filename=config_file, use_auth_token=TOKEN)
82
+
83
+ # Download speakers.pth if it exists
84
+ if speaker_file:
85
+ speaker_files[model_name] = hf_hub_download(repo_id=repo_name, filename=speaker_file, use_auth_token=TOKEN)
86
+ update_config_speakers_file(config_files[model_name], speaker_files[model_name]) # Update the config file
87
+ print(speaker_files[model_name])
88
+ # Initialize synthesizer for the model
89
+ synthesizer = Synthesizer(
90
+ tts_checkpoint=model_files[model_name],
91
+ tts_config_path=config_files[model_name],
92
+ tts_speakers_file=speaker_files[model_name], # Pass the speakers.pth file if it exists
93
+ use_cuda=False # Assuming you don't want to use GPU, adjust if needed
94
+ )
95
+
96
+ elif speaker_file is None:
97
+
98
+ # Initialize synthesizer for the model
99
+ synthesizer = Synthesizer(
100
+ tts_checkpoint=model_files[model_name],
101
+ tts_config_path=config_files[model_name],
102
+ # tts_speakers_file=speaker_files.get(model_name, None), # Pass the speakers.pth file if it exists
103
+ use_cuda=False # Assuming you don't want to use GPU, adjust if needed
104
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
 
106
+ synthesizers[model_name] = synthesizer
 
 
 
 
 
107
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108
 
 
 
109
 
 
 
110
 
111
+ #def synthesize(text: str, model_name: str, speaker_name="speaker-0") -> str:
112
+ def synthesize(text: str, model_name: str, speaker_name=None) -> str:
113
+ """Synthesize speech using the selected model."""
114
+ if len(text) > MAX_TXT_LEN:
115
+ text = text[:MAX_TXT_LEN]
116
+ print(f"Input text was cut off as it exceeded the {MAX_TXT_LEN} character limit.")
117
 
118
+ # Use the synthesizer object for the selected model
119
+ synthesizer = synthesizers[model_name]
 
 
120
 
 
 
 
 
 
121
 
122
+ if synthesizer is None:
123
+ raise NameError("Model not found")
 
 
 
124
 
125
+ if synthesizer.tts_speakers_file is "":
126
+ wavs = synthesizer.tts(text)
 
 
127
 
128
+ elif synthesizer.tts_speakers_file is not "":
129
+ if speaker_name == "":
130
+ #wavs = synthesizer.tts(text, speaker_name="speaker-0") ## should change, better if gradio conditions are figure out.
131
+ wavs = synthesizer.tts(text, speaker_name=None)
 
 
 
 
 
 
 
132
  else:
133
+ wavs = synthesizer.tts(text, speaker_name=speaker_name)
134
+
135
+ with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as fp:
136
+ synthesizer.save_wav(wavs, fp)
137
+ return fp.name
138
+
139
+ # Callback function to update UI based on the selected model
140
+ def update_options(model_name):
141
+ synthesizer = synthesizers[model_name]
142
+ # if synthesizer.tts.is_multi_speaker:
143
+ if model_name is MODEL_NAMES[1]:
144
+ speakers = synthesizer.tts_model.speaker_manager.speaker_names
145
+ # return options for the dropdown
146
+ return speakers
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
  else:
148
+ # return empty options if not multi-speaker
149
+ return []
150
+
151
+ # Create Gradio interface
152
+ iface = gr.Interface(
153
+ fn=synthesize,
154
+ inputs=[
155
+ gr.Textbox(label="Enter Text to Synthesize:", value="زین همرهان سست عناصر، دلم گرفت."),
156
+ gr.Radio(label="Pick a Model", choices=MODEL_NAMES, value=MODEL_NAMES[0], type="value"),
157
+ #gr.Dropdown(label="Select Speaker", choices=update_options(MODEL_NAMES[1]), type="value", default="speaker-0")
158
+ gr.Dropdown(label="Select Speaker", choices=update_options(MODEL_NAMES[1]), type="value", default=None)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159
  ],
160
+ outputs=gr.Audio(label="Output", type='filepath'),
161
+ examples=[["زین همرهان سست عناصر، دلم گرفت.", MODEL_NAMES[0], ""]], # Example should include a speaker name for multispeaker models
162
+ title='Persian TTS Playground',
163
+ description="""
164
+ ### Persian text to speech model demo.
165
+
166
+
167
+ #### Pick a speaker for MultiSpeaker models. (for single speaker go for speaker-0)
168
+ """,
169
+ article="",
170
+ live=False
171
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172
 
173
+ iface.launch()