Blane187 commited on
Commit
316049c
1 Parent(s): 47f83e6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +171 -162
app.py CHANGED
@@ -1,162 +1,171 @@
1
- import gradio as gr
2
- from rvc_infer import infer_audio, get_current_models
3
- import os
4
- import re
5
- import random
6
- from scipy.io.wavfile import write
7
- from scipy.io.wavfile import read
8
- import numpy as np
9
- import yt_dlp
10
- import subprocess
11
- import zipfile
12
- import shutil
13
- import urllib
14
-
15
- print("downloading RVC models")
16
- os.system("python dowoad_param.py")
17
-
18
- BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
19
-
20
- rvc_models_dir = os.path.join(BASE_DIR, 'models')
21
-
22
- def update_models_list():
23
- models_l = get_current_models(rvc_models_dir)
24
- return gr.update(choices=models_l)
25
-
26
- def extract_zip(extraction_folder, zip_name):
27
- os.makedirs(extraction_folder)
28
- with zipfile.ZipFile(zip_name, 'r') as zip_ref:
29
- zip_ref.extractall(extraction_folder)
30
- os.remove(zip_name)
31
-
32
- index_filepath, model_filepath = None, None
33
- for root, dirs, files in os.walk(extraction_folder):
34
- for name in files:
35
- if name.endswith('.index') and os.stat(os.path.join(root, name)).st_size > 1024 * 100:
36
- index_filepath = os.path.join(root, name)
37
-
38
- if name.endswith('.pth') and os.stat(os.path.join(root, name)).st_size > 1024 * 1024 * 40:
39
- model_filepath = os.path.join(root, name)
40
-
41
- if not model_filepath:
42
- raise gr.Error(f'No .pth model file was found in the extracted zip. Please check {extraction_folder}.')
43
-
44
- # move model and index file to extraction folder
45
- os.rename(model_filepath, os.path.join(extraction_folder, os.path.basename(model_filepath)))
46
- if index_filepath:
47
- os.rename(index_filepath, os.path.join(extraction_folder, os.path.basename(index_filepath)))
48
-
49
- # remove any unnecessary nested folders
50
- for filepath in os.listdir(extraction_folder):
51
- if os.path.isdir(os.path.join(extraction_folder, filepath)):
52
- shutil.rmtree(os.path.join(extraction_folder, filepath))
53
-
54
- def download_online_model(url, dir_name, progress=gr.Progress()):
55
- try:
56
- progress(0, desc=f'[~] Downloading voice model with name {dir_name}...')
57
- zip_name = url.split('/')[-1]
58
- extraction_folder = os.path.join(rvc_models_dir, dir_name)
59
- if os.path.exists(extraction_folder):
60
- raise gr.Error(f'Voice model directory {dir_name} already exists! Choose a different name for your voice model.')
61
-
62
- if 'pixeldrain.com' in url:
63
- url = f'https://pixeldrain.com/api/file/{zip_name}'
64
-
65
- urllib.request.urlretrieve(url, zip_name)
66
-
67
- progress(0.5, desc='[~] Extracting zip...')
68
- extract_zip(extraction_folder, zip_name)
69
- return f'[+] {dir_name} Model successfully downloaded!'
70
-
71
- except Exception as e:
72
- raise gr.Error(str(e))
73
-
74
- def download_audio(url):
75
- ydl_opts = {
76
- 'format': 'bestaudio/best',
77
- 'outtmpl': 'ytdl/%(title)s.%(ext)s',
78
- 'postprocessors': [{
79
- 'key': 'FFmpegExtractAudio',
80
- 'preferredcodec': 'wav',
81
- 'preferredquality': '192',
82
- }],
83
- }
84
-
85
- with yt_dlp.YoutubeDL(ydl_opts) as ydl:
86
- info_dict = ydl.extract_info(url, download=True)
87
- file_path = ydl.prepare_filename(info_dict).rsplit('.', 1)[0] + '.wav'
88
- sample_rate, audio_data = read(file_path)
89
- audio_array = np.asarray(audio_data, dtype=np.int16)
90
-
91
- return sample_rate, audio_array
92
-
93
-
94
- CSS = """
95
- """
96
-
97
- with gr.Blocks(theme="Hev832/Applio", fill_width=True, css=CSS) as demo:
98
- gr.Markdown("# RVC INFER DEMOS ")
99
- gr.Markdown(f"# recommended using colab version with more feature!<br> [![Open In Collab](https://img.shields.io/badge/google_colab-F9AB00?style=flat-square&logo=googlecolab&logoColor=white)](https://colab.research.google.com/drive/1bM1LB2__WNFxX8pyZmUPQZYq7dg58YWG?usp=sharing) ")
100
- with gr.Tab("Inferenece"):
101
- gr.Markdown("in progress")
102
- model_name = gr.Dropdown(label='Voice Models', info='Models folder "rvc_infer --> models". After new models are added into this folder, click the refresh button')
103
- ref_btn = gr.Button('Refresh Models', variant='primary')
104
- input_audio = gr.Audio(label="Input Audio", type="filepath")
105
- with gr.Accordion("Settings", open=False):
106
- f0_change = gr.Slider(label="f0 change", minimum=-12, maximum=12, step=1, value=0)
107
- f0_method = gr.Dropdown(label="f0 method", choices=["rmvpe+", "rmvpe", "fcpe", " hybrid[rmvpe+fcpe]"], value="rmvpe+")
108
- min_pitch = gr.Textbox(label="min pitch", lines=1, value="-12")
109
- max_pitch = gr.Textbox(label="max pitch", lines=1, value="12")
110
- crepe_hop_length = gr.Slider(label="crepe_hop_length", minimum=0, maximum=256, step=1, value=128)
111
- index_rate = gr.Slider(label="index_rate", minimum=0, maximum=1.0, step=0.01, value=0.75)
112
- filter_radius = gr.Slider(label="filter_radius", minimum=0, maximum=10.0, step=0.01, value=3)
113
- rms_mix_rate = gr.Slider(label="rms_mix_rate", minimum=0, maximum=1.0, step=0.01, value=0.25)
114
- protect = gr.Slider(label="protect", minimum=0, maximum=1.0, step=0.01, value=0.33)
115
- with gr.Accordion("Advanced Settings", open=False):
116
- split_infer = gr.Checkbox(label="split_infer", value=False)
117
- min_silence = gr.Slider(label="min_silence", minimum=0, maximum=1000, step=1, value=500)
118
- silence_threshold = gr.Slider(label="silence_threshold", minimum=-1000, maximum=1000, step=1, value=-50)
119
- seek_step = gr.Slider(label="seek_step", minimum=0, maximum=100, step=1, value=0)
120
- keep_silence = gr.Slider(label="keep_silence", minimum=-1000, maximum=1000, step=1, value=100)
121
- do_formant = gr.Checkbox(label="do_formant", value=False)
122
- quefrency = gr.Slider(label="quefrency", minimum=0, maximum=100, step=1, value=0)
123
- timbre = gr.Slider(label="timbre", minimum=0, maximum=100, step=1, value=1)
124
- f0_autotune = gr.Checkbox(label="f0_autotune", value=False)
125
- audio_format = gr.Dropdown(label="audio_format", choices=["wav"], value="wav", visible=False)
126
- resample_sr = gr.Slider(label="resample_sr", minimum=0, maximum=100, step=1, value=0)
127
- hubert_model_path = gr.Textbox(label="hubert_model_path", lines=1, value="hubert_base.pt", visible=False)
128
- rmvpe_model_path = gr.Textbox(label="rmvpe_model_path", lines=1, value="rmvpe.pt", visible=False)
129
- fcpe_model_path = gr.Textbox(label="fcpe_model_path", lines=1, value="fcpe.pt", visible=False)
130
- submit_inference = gr.Button('Inference', variant='primary')
131
- result_audio = gr.Audio("Output Audio")
132
-
133
- with gr.Tab("Download Model"):
134
- gr.Markdown("## Download Model for infernece")
135
- url_input = gr.Textbox(label="Model URL", placeholder="Enter the URL of the model")
136
- dir_name_input = gr.Textbox(label="Directory Name", placeholder="Enter the directory name")
137
- output = gr.Textbox(label="Output Models")
138
- download_button = gr.Button("Download Model")
139
- download_button.click(download_online_model, inputs=[url_input, dir_name_input], outputs=output)
140
-
141
- with gr.Tab(" Credits"):
142
- gr.Markdown(
143
- """
144
- this project made by [Blane187](https://huggingface.co/Blane187) with Improvements by [John6666](https://huggingfce.co/John6666)
145
- """)
146
-
147
- ref_btn.click(update_models_list, None, outputs=model_name)
148
- gr.on(
149
- triggers=[submit_inference.click],
150
- fn=infer_audio,
151
- inputs=[model_name, input_audio, f0_change, f0_method, min_pitch, max_pitch, crepe_hop_length, index_rate,
152
- filter_radius, rms_mix_rate, protect, split_infer, min_silence, silence_threshold, seek_step,
153
- keep_silence, do_formant, quefrency, timbre, f0_autotune, audio_format, resample_sr,
154
- hubert_model_path, rmvpe_model_path, fcpe_model_path],
155
- outputs=[result_audio],
156
- queue=True,
157
- show_api=True,
158
- show_progress="full",
159
- )
160
-
161
- demo.queue()
162
- demo.launch(debug=True,share=True,show_api=False)
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from rvc_infer import infer_audio
3
+ import os
4
+ import re
5
+ import random
6
+ from scipy.io.wavfile import write
7
+ from scipy.io.wavfile import read
8
+ import numpy as np
9
+ import yt_dlp
10
+ import subprocess
11
+ import zipfile
12
+ import shutil
13
+ import urllib
14
+
15
+ print("downloading RVC models")
16
+ os.system("python dowoad_param.py")
17
+
18
+ BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
19
+
20
+ rvc_models_dir = os.path.join(BASE_DIR, 'models')
21
+
22
+
23
+ def get_current_models(models_dir):
24
+ models_list = os.listdir(models_dir)
25
+ items_to_remove = ['hubert_base.pt', 'MODELS.txt', 'public_models.json', 'rmvpe.pt']
26
+ return [item for item in models_list if item not in items_to_remove]
27
+
28
+
29
+
30
+
31
+ def update_models_list():
32
+ models_l = get_current_models(rvc_models_dir)
33
+ return gr.update(choices=models_l)
34
+
35
+ def extract_zip(extraction_folder, zip_name):
36
+ os.makedirs(extraction_folder)
37
+ with zipfile.ZipFile(zip_name, 'r') as zip_ref:
38
+ zip_ref.extractall(extraction_folder)
39
+ os.remove(zip_name)
40
+
41
+ index_filepath, model_filepath = None, None
42
+ for root, dirs, files in os.walk(extraction_folder):
43
+ for name in files:
44
+ if name.endswith('.index') and os.stat(os.path.join(root, name)).st_size > 1024 * 100:
45
+ index_filepath = os.path.join(root, name)
46
+
47
+ if name.endswith('.pth') and os.stat(os.path.join(root, name)).st_size > 1024 * 1024 * 40:
48
+ model_filepath = os.path.join(root, name)
49
+
50
+ if not model_filepath:
51
+ raise gr.Error(f'No .pth model file was found in the extracted zip. Please check {extraction_folder}.')
52
+
53
+ # move model and index file to extraction folder
54
+ os.rename(model_filepath, os.path.join(extraction_folder, os.path.basename(model_filepath)))
55
+ if index_filepath:
56
+ os.rename(index_filepath, os.path.join(extraction_folder, os.path.basename(index_filepath)))
57
+
58
+ # remove any unnecessary nested folders
59
+ for filepath in os.listdir(extraction_folder):
60
+ if os.path.isdir(os.path.join(extraction_folder, filepath)):
61
+ shutil.rmtree(os.path.join(extraction_folder, filepath))
62
+
63
+ def download_online_model(url, dir_name, progress=gr.Progress()):
64
+ try:
65
+ progress(0, desc=f'[~] Downloading voice model with name {dir_name}...')
66
+ zip_name = url.split('/')[-1]
67
+ extraction_folder = os.path.join(rvc_models_dir, dir_name)
68
+ if os.path.exists(extraction_folder):
69
+ raise gr.Error(f'Voice model directory {dir_name} already exists! Choose a different name for your voice model.')
70
+
71
+ if 'pixeldrain.com' in url:
72
+ url = f'https://pixeldrain.com/api/file/{zip_name}'
73
+
74
+ urllib.request.urlretrieve(url, zip_name)
75
+
76
+ progress(0.5, desc='[~] Extracting zip...')
77
+ extract_zip(extraction_folder, zip_name)
78
+ return f'[+] {dir_name} Model successfully downloaded!'
79
+
80
+ except Exception as e:
81
+ raise gr.Error(str(e))
82
+
83
+ def download_audio(url):
84
+ ydl_opts = {
85
+ 'format': 'bestaudio/best',
86
+ 'outtmpl': 'ytdl/%(title)s.%(ext)s',
87
+ 'postprocessors': [{
88
+ 'key': 'FFmpegExtractAudio',
89
+ 'preferredcodec': 'wav',
90
+ 'preferredquality': '192',
91
+ }],
92
+ }
93
+
94
+ with yt_dlp.YoutubeDL(ydl_opts) as ydl:
95
+ info_dict = ydl.extract_info(url, download=True)
96
+ file_path = ydl.prepare_filename(info_dict).rsplit('.', 1)[0] + '.wav'
97
+ sample_rate, audio_data = read(file_path)
98
+ audio_array = np.asarray(audio_data, dtype=np.int16)
99
+
100
+ return sample_rate, audio_array
101
+
102
+
103
+ CSS = """
104
+ """
105
+
106
+ with gr.Blocks(theme="Hev832/Applio", fill_width=True, css=CSS) as demo:
107
+ gr.Markdown("# RVC INFER DEMOS ")
108
+ gr.Markdown(f"# recommended using colab version with more feature!<br> [![Open In Collab](https://img.shields.io/badge/google_colab-F9AB00?style=flat-square&logo=googlecolab&logoColor=white)](https://colab.research.google.com/drive/1bM1LB2__WNFxX8pyZmUPQZYq7dg58YWG?usp=sharing) ")
109
+ with gr.Tab("Inferenece"):
110
+ gr.Markdown("in progress")
111
+ model_name = gr.Dropdown(label='Voice Models', info='Models folder "rvc_infer --> models". After new models are added into this folder, click the refresh button')
112
+ ref_btn = gr.Button('Refresh Models', variant='primary')
113
+ input_audio = gr.Audio(label="Input Audio", type="filepath")
114
+ with gr.Accordion("Settings", open=False):
115
+ f0_change = gr.Slider(label="f0 change", minimum=-12, maximum=12, step=1, value=0)
116
+ f0_method = gr.Dropdown(label="f0 method", choices=["rmvpe+", "rmvpe", "fcpe", " hybrid[rmvpe+fcpe]"], value="rmvpe+")
117
+ min_pitch = gr.Textbox(label="min pitch", lines=1, value="-12")
118
+ max_pitch = gr.Textbox(label="max pitch", lines=1, value="12")
119
+ crepe_hop_length = gr.Slider(label="crepe_hop_length", minimum=0, maximum=256, step=1, value=128)
120
+ index_rate = gr.Slider(label="index_rate", minimum=0, maximum=1.0, step=0.01, value=0.75)
121
+ filter_radius = gr.Slider(label="filter_radius", minimum=0, maximum=10.0, step=0.01, value=3)
122
+ rms_mix_rate = gr.Slider(label="rms_mix_rate", minimum=0, maximum=1.0, step=0.01, value=0.25)
123
+ protect = gr.Slider(label="protect", minimum=0, maximum=1.0, step=0.01, value=0.33)
124
+ with gr.Accordion("Advanced Settings", open=False):
125
+ split_infer = gr.Checkbox(label="split_infer", value=False)
126
+ min_silence = gr.Slider(label="min_silence", minimum=0, maximum=1000, step=1, value=500)
127
+ silence_threshold = gr.Slider(label="silence_threshold", minimum=-1000, maximum=1000, step=1, value=-50)
128
+ seek_step = gr.Slider(label="seek_step", minimum=0, maximum=100, step=1, value=0)
129
+ keep_silence = gr.Slider(label="keep_silence", minimum=-1000, maximum=1000, step=1, value=100)
130
+ do_formant = gr.Checkbox(label="do_formant", value=False)
131
+ quefrency = gr.Slider(label="quefrency", minimum=0, maximum=100, step=1, value=0)
132
+ timbre = gr.Slider(label="timbre", minimum=0, maximum=100, step=1, value=1)
133
+ f0_autotune = gr.Checkbox(label="f0_autotune", value=False)
134
+ audio_format = gr.Dropdown(label="audio_format", choices=["wav"], value="wav", visible=False)
135
+ resample_sr = gr.Slider(label="resample_sr", minimum=0, maximum=100, step=1, value=0)
136
+ hubert_model_path = gr.Textbox(label="hubert_model_path", lines=1, value="hubert_base.pt", visible=False)
137
+ rmvpe_model_path = gr.Textbox(label="rmvpe_model_path", lines=1, value="rmvpe.pt", visible=False)
138
+ fcpe_model_path = gr.Textbox(label="fcpe_model_path", lines=1, value="fcpe.pt", visible=False)
139
+ submit_inference = gr.Button('Inference', variant='primary')
140
+ result_audio = gr.Audio("Output Audio")
141
+
142
+ with gr.Tab("Download Model"):
143
+ gr.Markdown("## Download Model for infernece")
144
+ url_input = gr.Textbox(label="Model URL", placeholder="Enter the URL of the model")
145
+ dir_name_input = gr.Textbox(label="Directory Name", placeholder="Enter the directory name")
146
+ output = gr.Textbox(label="Output Models")
147
+ download_button = gr.Button("Download Model")
148
+ download_button.click(download_online_model, inputs=[url_input, dir_name_input], outputs=output)
149
+
150
+ with gr.Tab(" Credits"):
151
+ gr.Markdown(
152
+ """
153
+ this project made by [Blane187](https://huggingface.co/Blane187) with Improvements by [John6666](https://huggingfce.co/John6666)
154
+ """)
155
+
156
+ ref_btn.click(update_models_list, None, outputs=model_name)
157
+ gr.on(
158
+ triggers=[submit_inference.click],
159
+ fn=infer_audio,
160
+ inputs=[model_name, input_audio, f0_change, f0_method, min_pitch, max_pitch, crepe_hop_length, index_rate,
161
+ filter_radius, rms_mix_rate, protect, split_infer, min_silence, silence_threshold, seek_step,
162
+ keep_silence, do_formant, quefrency, timbre, f0_autotune, audio_format, resample_sr,
163
+ hubert_model_path, rmvpe_model_path, fcpe_model_path],
164
+ outputs=[result_audio],
165
+ queue=True,
166
+ show_api=True,
167
+ show_progress="full",
168
+ )
169
+
170
+ demo.queue()
171
+ demo.launch(debug=True,share=True,show_api=False)