Spaces:
Running
Running
Add log
Browse files
app.py
CHANGED
@@ -27,6 +27,7 @@ from feature_extractor import cnhubert
|
|
27 |
|
28 |
cnhubert.cnhubert_base_path = cnhubert_base_path
|
29 |
from time import time as ttime
|
|
|
30 |
|
31 |
from AR.models.t2s_lightning_module import Text2SemanticLightningModule
|
32 |
from module.mel_processing import spectrogram_torch
|
@@ -91,7 +92,7 @@ vq_model = SynthesizerTrn(
|
|
91 |
hps.data.filter_length // 2 + 1,
|
92 |
hps.train.segment_size // hps.data.hop_length,
|
93 |
n_speakers=hps.data.n_speakers,
|
94 |
-
**hps.model
|
95 |
)
|
96 |
if is_half == True:
|
97 |
vq_model = vq_model.half().to(device)
|
@@ -132,6 +133,13 @@ dict_language = {"Chinese": "zh", "English": "en", "Japanese": "ja"}
|
|
132 |
|
133 |
|
134 |
def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
135 |
if len(prompt_text) > 100 or len(text) > 100:
|
136 |
return
|
137 |
t0 = ttime()
|
|
|
27 |
|
28 |
cnhubert.cnhubert_base_path = cnhubert_base_path
|
29 |
from time import time as ttime
|
30 |
+
import datetime
|
31 |
|
32 |
from AR.models.t2s_lightning_module import Text2SemanticLightningModule
|
33 |
from module.mel_processing import spectrogram_torch
|
|
|
92 |
hps.data.filter_length // 2 + 1,
|
93 |
hps.train.segment_size // hps.data.hop_length,
|
94 |
n_speakers=hps.data.n_speakers,
|
95 |
+
**hps.model,
|
96 |
)
|
97 |
if is_half == True:
|
98 |
vq_model = vq_model.half().to(device)
|
|
|
133 |
|
134 |
|
135 |
def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language):
|
136 |
+
print(f"---{datetime.datetime.now()}---")
|
137 |
+
print(f"ref_wav_path: {ref_wav_path}")
|
138 |
+
print(f"prompt_text: {prompt_text}")
|
139 |
+
print(f"prompt_language: {prompt_language}")
|
140 |
+
print(f"text: {text}")
|
141 |
+
print(f"text_language: {text_language}")
|
142 |
+
|
143 |
if len(prompt_text) > 100 or len(text) > 100:
|
144 |
return
|
145 |
t0 = ttime()
|