Spaces:
Sleeping
Sleeping
update_gradio
Browse files- AR/__pycache__/__init__.cpython-39.pyc +0 -0
- AR/models/__pycache__/__init__.cpython-39.pyc +0 -0
- AR/models/__pycache__/t2s_lightning_module.cpython-39.pyc +0 -0
- AR/models/__pycache__/t2s_model.cpython-39.pyc +0 -0
- AR/models/__pycache__/utils.cpython-39.pyc +0 -0
- AR/modules/__pycache__/__init__.cpython-39.pyc +0 -0
- AR/modules/__pycache__/activation.cpython-39.pyc +0 -0
- AR/modules/__pycache__/embedding.cpython-39.pyc +0 -0
- AR/modules/__pycache__/lr_schedulers.cpython-39.pyc +0 -0
- AR/modules/__pycache__/optim.cpython-39.pyc +0 -0
- AR/modules/__pycache__/patched_mha_with_cache.cpython-39.pyc +0 -0
- AR/modules/__pycache__/scaling.cpython-39.pyc +0 -0
- AR/modules/__pycache__/transformer.cpython-39.pyc +0 -0
- README.md +1 -1
- __pycache__/utils.cpython-39.pyc +0 -0
- feature_extractor/__pycache__/__init__.cpython-39.pyc +0 -0
- feature_extractor/__pycache__/cnhubert.cpython-39.pyc +0 -0
- feature_extractor/__pycache__/whisper_enc.cpython-39.pyc +0 -0
- inference_webui.py +10 -6
- module/__pycache__/__init__.cpython-39.pyc +0 -0
- module/__pycache__/attentions.cpython-39.pyc +0 -0
- module/__pycache__/commons.cpython-39.pyc +0 -0
- module/__pycache__/core_vq.cpython-39.pyc +0 -0
- module/__pycache__/mel_processing.cpython-39.pyc +0 -0
- module/__pycache__/models.cpython-39.pyc +0 -0
- module/__pycache__/modules.cpython-39.pyc +0 -0
- module/__pycache__/mrte_model.cpython-39.pyc +0 -0
- module/__pycache__/quantize.cpython-39.pyc +0 -0
- module/__pycache__/transforms.cpython-39.pyc +0 -0
- requirements.txt +1 -1
- text/g2pw/onnx_api.py +2 -1
- tools/__pycache__/__init__.cpython-39.pyc +0 -0
- tools/__pycache__/my_utils.cpython-39.pyc +0 -0
- tools/i18n/__pycache__/i18n.cpython-39.pyc +0 -0
AR/__pycache__/__init__.cpython-39.pyc
CHANGED
Binary files a/AR/__pycache__/__init__.cpython-39.pyc and b/AR/__pycache__/__init__.cpython-39.pyc differ
|
|
AR/models/__pycache__/__init__.cpython-39.pyc
CHANGED
Binary files a/AR/models/__pycache__/__init__.cpython-39.pyc and b/AR/models/__pycache__/__init__.cpython-39.pyc differ
|
|
AR/models/__pycache__/t2s_lightning_module.cpython-39.pyc
CHANGED
Binary files a/AR/models/__pycache__/t2s_lightning_module.cpython-39.pyc and b/AR/models/__pycache__/t2s_lightning_module.cpython-39.pyc differ
|
|
AR/models/__pycache__/t2s_model.cpython-39.pyc
CHANGED
Binary files a/AR/models/__pycache__/t2s_model.cpython-39.pyc and b/AR/models/__pycache__/t2s_model.cpython-39.pyc differ
|
|
AR/models/__pycache__/utils.cpython-39.pyc
CHANGED
Binary files a/AR/models/__pycache__/utils.cpython-39.pyc and b/AR/models/__pycache__/utils.cpython-39.pyc differ
|
|
AR/modules/__pycache__/__init__.cpython-39.pyc
CHANGED
Binary files a/AR/modules/__pycache__/__init__.cpython-39.pyc and b/AR/modules/__pycache__/__init__.cpython-39.pyc differ
|
|
AR/modules/__pycache__/activation.cpython-39.pyc
CHANGED
Binary files a/AR/modules/__pycache__/activation.cpython-39.pyc and b/AR/modules/__pycache__/activation.cpython-39.pyc differ
|
|
AR/modules/__pycache__/embedding.cpython-39.pyc
CHANGED
Binary files a/AR/modules/__pycache__/embedding.cpython-39.pyc and b/AR/modules/__pycache__/embedding.cpython-39.pyc differ
|
|
AR/modules/__pycache__/lr_schedulers.cpython-39.pyc
CHANGED
Binary files a/AR/modules/__pycache__/lr_schedulers.cpython-39.pyc and b/AR/modules/__pycache__/lr_schedulers.cpython-39.pyc differ
|
|
AR/modules/__pycache__/optim.cpython-39.pyc
CHANGED
Binary files a/AR/modules/__pycache__/optim.cpython-39.pyc and b/AR/modules/__pycache__/optim.cpython-39.pyc differ
|
|
AR/modules/__pycache__/patched_mha_with_cache.cpython-39.pyc
CHANGED
Binary files a/AR/modules/__pycache__/patched_mha_with_cache.cpython-39.pyc and b/AR/modules/__pycache__/patched_mha_with_cache.cpython-39.pyc differ
|
|
AR/modules/__pycache__/scaling.cpython-39.pyc
CHANGED
Binary files a/AR/modules/__pycache__/scaling.cpython-39.pyc and b/AR/modules/__pycache__/scaling.cpython-39.pyc differ
|
|
AR/modules/__pycache__/transformer.cpython-39.pyc
CHANGED
Binary files a/AR/modules/__pycache__/transformer.cpython-39.pyc and b/AR/modules/__pycache__/transformer.cpython-39.pyc differ
|
|
README.md
CHANGED
@@ -4,7 +4,7 @@ emoji: 🤗
|
|
4 |
colorFrom: indigo
|
5 |
colorTo: red
|
6 |
sdk: gradio
|
7 |
-
sdk_version:
|
8 |
app_file: inference_webui.py
|
9 |
pinned: false
|
10 |
license: mit
|
|
|
4 |
colorFrom: indigo
|
5 |
colorTo: red
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 4.24.0
|
8 |
app_file: inference_webui.py
|
9 |
pinned: false
|
10 |
license: mit
|
__pycache__/utils.cpython-39.pyc
ADDED
Binary file (11.4 kB). View file
|
|
feature_extractor/__pycache__/__init__.cpython-39.pyc
CHANGED
Binary files a/feature_extractor/__pycache__/__init__.cpython-39.pyc and b/feature_extractor/__pycache__/__init__.cpython-39.pyc differ
|
|
feature_extractor/__pycache__/cnhubert.cpython-39.pyc
CHANGED
Binary files a/feature_extractor/__pycache__/cnhubert.cpython-39.pyc and b/feature_extractor/__pycache__/cnhubert.cpython-39.pyc differ
|
|
feature_extractor/__pycache__/whisper_enc.cpython-39.pyc
CHANGED
Binary files a/feature_extractor/__pycache__/whisper_enc.cpython-39.pyc and b/feature_extractor/__pycache__/whisper_enc.cpython-39.pyc differ
|
|
inference_webui.py
CHANGED
@@ -17,9 +17,12 @@ logging.getLogger("asyncio").setLevel(logging.ERROR)
|
|
17 |
logging.getLogger("charset_normalizer").setLevel(logging.ERROR)
|
18 |
logging.getLogger("torchaudio._extension").setLevel(logging.ERROR)
|
19 |
logging.getLogger("multipart.multipart").setLevel(logging.ERROR)
|
|
|
|
|
|
|
20 |
import LangSegment, os, re, sys, json
|
21 |
import pdb
|
22 |
-
import spaces
|
23 |
import torch
|
24 |
|
25 |
version="v2"#os.environ.get("version","v2")
|
@@ -343,7 +346,7 @@ def merge_short_text_in_array(texts, threshold):
|
|
343 |
# cache_tokens={}#暂未实现清理机制
|
344 |
cache= {}
|
345 |
@torch.inference_mode()
|
346 |
-
@spaces.GPU
|
347 |
def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language, how_to_cut=i18n("不切"), top_k=20, top_p=0.6, temperature=0.6, ref_free = False,speed=1,if_freeze=False,inp_refs=123):
|
348 |
global cache
|
349 |
if ref_wav_path:pass
|
@@ -611,8 +614,8 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app:
|
|
611 |
gr.Markdown(
|
612 |
value="""# GPT-SoVITS-v2 Zero-shot TTS demo
|
613 |
## https://github.com/RVC-Boss/GPT-SoVITS
|
614 |
-
Input 3
|
615 |
-
输入3
|
616 |
Cross-lingual Support: Inference in languages different from the training dataset, currently supporting English, Japanese, Korean and Cantonese.<br>
|
617 |
目前支持中日英韩粤跨语种合成。<br>
|
618 |
This demo is open source under the MIT license. The author does not have any control over it. Users who use the software and distribute the sounds exported by the software are solely responsible. If you do not agree with this clause, you cannot use or reference any codes and files within this demo. <br>
|
@@ -630,7 +633,7 @@ This demo is open source under the MIT license. The author does not have any con
|
|
630 |
prompt_language = gr.Dropdown(
|
631 |
label=i18n("参考音频的语种"), choices=list(dict_language.keys()), value=i18n("中文")
|
632 |
)
|
633 |
-
inp_refs = gr.File(label=i18n("可选项:通过拖拽多个文件上传多个参考音频(建议同性),平均融合他们的音色。如不填写此项,音色由左侧单个参考音频控制。"),file_count="
|
634 |
gr.Markdown(html_center(i18n("*请填写需要合成的目标文本和语种模式"),'h3'))
|
635 |
with gr.Row():
|
636 |
with gr.Column():
|
@@ -663,7 +666,8 @@ This demo is open source under the MIT license. The author does not have any con
|
|
663 |
)
|
664 |
|
665 |
if __name__ == '__main__':
|
666 |
-
app.queue(concurrency_count=511, max_size=1022).launch(
|
|
|
667 |
server_name="0.0.0.0",
|
668 |
inbrowser=True,
|
669 |
# share=True,
|
|
|
17 |
logging.getLogger("charset_normalizer").setLevel(logging.ERROR)
|
18 |
logging.getLogger("torchaudio._extension").setLevel(logging.ERROR)
|
19 |
logging.getLogger("multipart.multipart").setLevel(logging.ERROR)
|
20 |
+
import gradio.analytics as analytics
|
21 |
+
analytics.version_check = lambda:None
|
22 |
+
analytics.get_local_ip_address= lambda :"127.0.0.1"##不干掉本地联不通亚马逊的get_local_ip服务器
|
23 |
import LangSegment, os, re, sys, json
|
24 |
import pdb
|
25 |
+
# import spaces
|
26 |
import torch
|
27 |
|
28 |
version="v2"#os.environ.get("version","v2")
|
|
|
346 |
# cache_tokens={}#暂未实现清理机制
|
347 |
cache= {}
|
348 |
@torch.inference_mode()
|
349 |
+
# @spaces.GPU
|
350 |
def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language, how_to_cut=i18n("不切"), top_k=20, top_p=0.6, temperature=0.6, ref_free = False,speed=1,if_freeze=False,inp_refs=123):
|
351 |
global cache
|
352 |
if ref_wav_path:pass
|
|
|
614 |
gr.Markdown(
|
615 |
value="""# GPT-SoVITS-v2 Zero-shot TTS demo
|
616 |
## https://github.com/RVC-Boss/GPT-SoVITS
|
617 |
+
Input 3 to 10s reference audio to guide the time-bre, speed, emotion of voice, and generate the speech you want by input the inference text. <br>
|
618 |
+
输入3至10秒的参考音频来引导待合成语音的音色、语速和情感,然后输入待合成目标文本,生成目标语音. <br>
|
619 |
Cross-lingual Support: Inference in languages different from the training dataset, currently supporting English, Japanese, Korean and Cantonese.<br>
|
620 |
目前支持中日英韩粤跨语种合成。<br>
|
621 |
This demo is open source under the MIT license. The author does not have any control over it. Users who use the software and distribute the sounds exported by the software are solely responsible. If you do not agree with this clause, you cannot use or reference any codes and files within this demo. <br>
|
|
|
633 |
prompt_language = gr.Dropdown(
|
634 |
label=i18n("参考音频的语种"), choices=list(dict_language.keys()), value=i18n("中文")
|
635 |
)
|
636 |
+
inp_refs = gr.File(label=i18n("可选项:通过拖拽多个文件上传多个参考音频(建议同性),平均融合他们的音色。如不填写此项,音色由左侧单个参考音频控制。"),file_count="multiple")
|
637 |
gr.Markdown(html_center(i18n("*请填写需要合成的目标文本和语种模式"),'h3'))
|
638 |
with gr.Row():
|
639 |
with gr.Column():
|
|
|
666 |
)
|
667 |
|
668 |
if __name__ == '__main__':
|
669 |
+
# app.queue(concurrency_count=511, max_size=1022).launch(
|
670 |
+
app.queue().launch(
|
671 |
server_name="0.0.0.0",
|
672 |
inbrowser=True,
|
673 |
# share=True,
|
module/__pycache__/__init__.cpython-39.pyc
CHANGED
Binary files a/module/__pycache__/__init__.cpython-39.pyc and b/module/__pycache__/__init__.cpython-39.pyc differ
|
|
module/__pycache__/attentions.cpython-39.pyc
CHANGED
Binary files a/module/__pycache__/attentions.cpython-39.pyc and b/module/__pycache__/attentions.cpython-39.pyc differ
|
|
module/__pycache__/commons.cpython-39.pyc
CHANGED
Binary files a/module/__pycache__/commons.cpython-39.pyc and b/module/__pycache__/commons.cpython-39.pyc differ
|
|
module/__pycache__/core_vq.cpython-39.pyc
CHANGED
Binary files a/module/__pycache__/core_vq.cpython-39.pyc and b/module/__pycache__/core_vq.cpython-39.pyc differ
|
|
module/__pycache__/mel_processing.cpython-39.pyc
CHANGED
Binary files a/module/__pycache__/mel_processing.cpython-39.pyc and b/module/__pycache__/mel_processing.cpython-39.pyc differ
|
|
module/__pycache__/models.cpython-39.pyc
CHANGED
Binary files a/module/__pycache__/models.cpython-39.pyc and b/module/__pycache__/models.cpython-39.pyc differ
|
|
module/__pycache__/modules.cpython-39.pyc
CHANGED
Binary files a/module/__pycache__/modules.cpython-39.pyc and b/module/__pycache__/modules.cpython-39.pyc differ
|
|
module/__pycache__/mrte_model.cpython-39.pyc
CHANGED
Binary files a/module/__pycache__/mrte_model.cpython-39.pyc and b/module/__pycache__/mrte_model.cpython-39.pyc differ
|
|
module/__pycache__/quantize.cpython-39.pyc
CHANGED
Binary files a/module/__pycache__/quantize.cpython-39.pyc and b/module/__pycache__/quantize.cpython-39.pyc differ
|
|
module/__pycache__/transforms.cpython-39.pyc
CHANGED
Binary files a/module/__pycache__/transforms.cpython-39.pyc and b/module/__pycache__/transforms.cpython-39.pyc differ
|
|
requirements.txt
CHANGED
@@ -4,7 +4,7 @@ tensorboard==2.15.1
|
|
4 |
librosa==0.9.2
|
5 |
numba==0.56.4
|
6 |
pytorch-lightning==2.1.3
|
7 |
-
gradio==
|
8 |
ffmpeg-python==0.2.0
|
9 |
onnxruntime-gpu
|
10 |
tqdm==4.66.4
|
|
|
4 |
librosa==0.9.2
|
5 |
numba==0.56.4
|
6 |
pytorch-lightning==2.1.3
|
7 |
+
gradio==4.24.0
|
8 |
ffmpeg-python==0.2.0
|
9 |
onnxruntime-gpu
|
10 |
tqdm==4.66.4
|
text/g2pw/onnx_api.py
CHANGED
@@ -86,7 +86,8 @@ class G2PWOnnxConverter:
|
|
86 |
sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
|
87 |
sess_options.execution_mode = onnxruntime.ExecutionMode.ORT_SEQUENTIAL
|
88 |
sess_options.intra_op_num_threads = 2
|
89 |
-
self.session_g2pW = onnxruntime.InferenceSession(os.path.join(uncompress_path, 'g2pW.onnx'), sess_options=sess_options, providers=['CPUExecutionProvider'])
|
|
|
90 |
|
91 |
self.config = load_config(
|
92 |
config_path=os.path.join(uncompress_path, 'config.py'),
|
|
|
86 |
sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
|
87 |
sess_options.execution_mode = onnxruntime.ExecutionMode.ORT_SEQUENTIAL
|
88 |
sess_options.intra_op_num_threads = 2
|
89 |
+
# self.session_g2pW = onnxruntime.InferenceSession(os.path.join(uncompress_path, 'g2pW.onnx'), sess_options=sess_options, providers=['CPUExecutionProvider'])
|
90 |
+
self.session_g2pW = onnxruntime.InferenceSession(os.path.join(uncompress_path, 'g2pW.onnx'), sess_options=sess_options, providers=['CUDAExecutionProvider','CPUExecutionProvider'])
|
91 |
|
92 |
self.config = load_config(
|
93 |
config_path=os.path.join(uncompress_path, 'config.py'),
|
tools/__pycache__/__init__.cpython-39.pyc
CHANGED
Binary files a/tools/__pycache__/__init__.cpython-39.pyc and b/tools/__pycache__/__init__.cpython-39.pyc differ
|
|
tools/__pycache__/my_utils.cpython-39.pyc
CHANGED
Binary files a/tools/__pycache__/my_utils.cpython-39.pyc and b/tools/__pycache__/my_utils.cpython-39.pyc differ
|
|
tools/i18n/__pycache__/i18n.cpython-39.pyc
CHANGED
Binary files a/tools/i18n/__pycache__/i18n.cpython-39.pyc and b/tools/i18n/__pycache__/i18n.cpython-39.pyc differ
|
|