Spaces:
Build error
Build error
import os | |
import torch | |
import gradio as gr | |
from openvoice import se_extractor | |
from openvoice.api import BaseSpeakerTTS, ToneColorConverter | |
import multiprocessing | |
# Function to initialize CUDA and run the main task in a subprocess | |
def initialize_and_run(input_text: str, save_path: str): | |
try: | |
ckpt_base = 'checkpoints/base_speakers/ZH' | |
ckpt_converter = 'checkpoints/converter' | |
device = "cuda:0" | |
print(device) | |
output_dir = 'outputs' | |
base_speaker_tts = BaseSpeakerTTS(f'{ckpt_base}/config.json', device=device) | |
base_speaker_tts.load_ckpt(f'{ckpt_base}/checkpoint.pth') | |
tone_color_converter = ToneColorConverter(f'{ckpt_converter}/config.json', device=device) | |
tone_color_converter.load_ckpt(f'{ckpt_converter}/checkpoint.pth') | |
source_se = torch.load(f'{ckpt_base}/zh_default_se.pth').to(device) | |
os.makedirs(output_dir, exist_ok=True) | |
reference_speaker = './resources/demo_speaker0.mp3' # This is the voice you want to clone | |
target_se, audio_name = se_extractor.get_se(reference_speaker, tone_color_converter, target_dir='processed', vad=True) | |
# Run the base speaker tts | |
src_path = f'{output_dir}/tmp.wav' | |
base_speaker_tts.tts(input_text, src_path, speaker='default', language='Chinese', speed=1.0) | |
# Run the tone color converter | |
encode_message = "@MyShell" | |
tone_color_converter.convert( | |
audio_src_path=src_path, | |
src_se=source_se, | |
tgt_se=target_se, | |
output_path=save_path, | |
message=encode_message) | |
except Exception as e: | |
print(f"Error during GPU task execution: {e}") | |
raise | |
def audio_io(input_text: str): | |
output_dir = 'outputs' | |
save_path = f'{output_dir}/output_chinese.wav' | |
process = multiprocessing.Process(target=initialize_and_run, args=(input_text, save_path)) | |
process.start() | |
process.join() | |
if process.exitcode != 0: | |
raise RuntimeError("Subprocess for GPU task failed.") | |
return save_path | |
demo = gr.Interface( | |
fn=audio_io, | |
inputs=["text"], | |
outputs=["audio"], | |
) | |
if __name__ == "__main__": | |
if torch.cuda.is_available(): | |
generator = torch.Generator('cuda').manual_seed(seed) | |
else: | |
generator = torch.Generator().manual_seed(seed) | |
demo.launch(share=True) | |