Spaces:
Running
Running
Update src/covergen.py
Browse files- src/covergen.py +44 -35
src/covergen.py
CHANGED
@@ -87,6 +87,20 @@ def inference(audio, model_name):
|
|
87 |
return vocals, inst
|
88 |
|
89 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
90 |
|
91 |
|
92 |
# Define the Gradio interface
|
@@ -108,11 +122,38 @@ with gr.Blocks(title="π€ RVC Inference", css="footer{display:none !important}"
|
|
108 |
|
109 |
|
110 |
|
111 |
-
with gr.Tab("πΆ Voice Conversion"):
|
112 |
-
|
113 |
with gr.Column():
|
114 |
audio_input = gr.Audio(label='π΅ Upload Audio', interactive=True, type="filepath")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
115 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
116 |
with gr.Accordion('βοΈ Voice Conversion Settings', open=False):
|
117 |
use_hybrid_methods = gr.Checkbox(label="𧬠Use Hybrid Methods", value=False)
|
118 |
f0_method = gr.Dropdown(['rmvpe+', 'fcpe', 'rmvpe', 'mangio-crepe', 'crepe'], value='rmvpe+', label='π§ F0 Method')
|
@@ -134,39 +175,7 @@ with gr.Blocks(title="π€ RVC Inference", css="footer{display:none !important}"
|
|
134 |
generate_btn.click(song_cover_pipeline, inputs=[audio_input, rvc_model, pitch, f0_method, crepe_hop_length, index_rate, filter_radius, rms_mix_rate, protect, output_format], outputs=[converted_audio])
|
135 |
|
136 |
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
with gr.Tab(" π§ Vocal Separator (UVR)"):
|
141 |
-
gr.Markdown("β Separate vocals and instruments from an audio file using UVR models.")
|
142 |
-
|
143 |
-
|
144 |
-
with gr.Accordion("π Separation by Link", open=False):
|
145 |
-
with gr.Row():
|
146 |
-
mdx23c_link = gr.Textbox(label="π Link",placeholder="π Paste the link here",interactive=True)
|
147 |
-
with gr.Row():
|
148 |
-
gr.Markdown("π‘ You can paste the link to the video/audio from many sites, check the complete list [here](https://github.com/yt-dlp/yt-dlp/blob/master/supportedsites.md)")
|
149 |
-
with gr.Row():
|
150 |
-
mdx23c_download_button = gr.Button("β¬οΈ Download!",variant="primary")
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
uvr5_audio_file = gr.Audio(label=" π² Audio File",type="filepath")
|
156 |
-
|
157 |
-
with gr.Row():
|
158 |
-
uvr5_model = gr.Dropdown(label="γ½οΈ Model", choices=[model["model_name"] for model in UVR_5_MODELS])
|
159 |
-
uvr5_button = gr.Button("π§ Separate Vocals", variant="primary",)
|
160 |
-
|
161 |
-
uvr5_output_voc = gr.Audio(type="filepath", label="Output 1",)
|
162 |
-
uvr5_output_inst = gr.Audio(type="filepath", label="Output 2",)
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
mdx23c_download_button.click(download_audio, [mdx23c_link], [uvr5_audio_file])
|
168 |
-
|
169 |
-
uvr5_button.click(inference, [uvr5_audio_file, uvr5_model], [uvr5_output_voc, uvr5_output_inst])
|
170 |
|
171 |
|
172 |
|
|
|
87 |
return vocals, inst
|
88 |
|
89 |
|
90 |
+
# Sample processing functions for audio and text inputs
|
91 |
+
def process_audio(audio_path):
|
92 |
+
return f"Audio file received: {audio_path}"
|
93 |
+
|
94 |
+
def process_text(text_input):
|
95 |
+
return f"Text received: {text_input}"
|
96 |
+
|
97 |
+
# Function to handle input type dynamically
|
98 |
+
def dynamic_input(input_type):
|
99 |
+
if input_type == "Audio Upload":
|
100 |
+
return gr.Audio(label='π΅ Upload Audio', interactive=True, type="filepath")
|
101 |
+
else:
|
102 |
+
return gr.Textbox(label="π Enter Text", placeholder="Type your text here...", interactive=True)
|
103 |
+
|
104 |
|
105 |
|
106 |
# Define the Gradio interface
|
|
|
122 |
|
123 |
|
124 |
|
125 |
+
with gr.Tab("πΆ Voice Conversion"):
|
|
|
126 |
with gr.Column():
|
127 |
audio_input = gr.Audio(label='π΅ Upload Audio', interactive=True, type="filepath")
|
128 |
+
|
129 |
+
|
130 |
+
with gr.Accordion(" π§ Vocal Separator (UVR)"):
|
131 |
+
gr.Markdown("β Separate vocals and instruments from an audio file using UVR models.")
|
132 |
+
|
133 |
+
with gr.Accordion("π Separation by Link", open=False):
|
134 |
+
with gr.Row():
|
135 |
+
mdx23c_link = gr.Textbox(label="π Link",placeholder="π Paste the link here",interactive=True)
|
136 |
+
with gr.Row():
|
137 |
+
gr.Markdown("π‘ You can paste the link to the video/audio from many sites, check the complete list [here](https://github.com/yt-dlp/yt-dlp/blob/master/supportedsites.md)")
|
138 |
+
with gr.Row():
|
139 |
+
mdx23c_download_button = gr.Button("β¬οΈ Download!",variant="primary")
|
140 |
+
|
141 |
+
uvr5_audio_file = gr.Audio(label=" π² Audio File",type="filepath")
|
142 |
|
143 |
+
with gr.Row():
|
144 |
+
uvr5_model = gr.Dropdown(label="γ½οΈ Model", choices=[model["model_name"] for model in UVR_5_MODELS])
|
145 |
+
uvr5_button = gr.Button("π§ Separate Vocals", variant="primary")
|
146 |
+
uvr5_output_inst = gr.Audio(type="filepath", label="Output 2",)
|
147 |
+
|
148 |
+
|
149 |
+
|
150 |
+
|
151 |
+
mdx23c_download_button.click(download_audio, [mdx23c_link], [uvr5_audio_file])
|
152 |
+
|
153 |
+
uvr5_button.click(inference, [uvr5_audio_file, uvr5_model], [audio_input, uvr5_output_inst])
|
154 |
+
|
155 |
+
|
156 |
+
|
157 |
with gr.Accordion('βοΈ Voice Conversion Settings', open=False):
|
158 |
use_hybrid_methods = gr.Checkbox(label="𧬠Use Hybrid Methods", value=False)
|
159 |
f0_method = gr.Dropdown(['rmvpe+', 'fcpe', 'rmvpe', 'mangio-crepe', 'crepe'], value='rmvpe+', label='π§ F0 Method')
|
|
|
175 |
generate_btn.click(song_cover_pipeline, inputs=[audio_input, rvc_model, pitch, f0_method, crepe_hop_length, index_rate, filter_radius, rms_mix_rate, protect, output_format], outputs=[converted_audio])
|
176 |
|
177 |
|
178 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
179 |
|
180 |
|
181 |
|