Aitron Emper commited on
Commit
601bb2e
·
1 Parent(s): cf968a5

Upload easy_infer.py

Browse files
Files changed (1) hide show
  1. easy_infer.py +1468 -0
easy_infer.py ADDED
@@ -0,0 +1,1468 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import subprocess
2
+ import os
3
+ import sys
4
+ import errno
5
+ import shutil
6
+ import yt_dlp
7
+ from mega import Mega
8
+ import datetime
9
+ import unicodedata
10
+ import torch
11
+ import glob
12
+ import gradio as gr
13
+ import gdown
14
+ import zipfile
15
+ import traceback
16
+ import json
17
+ import requests
18
+ import wget
19
+ import ffmpeg
20
+ import hashlib
21
+ now_dir = os.getcwd()
22
+ sys.path.append(now_dir)
23
+ from unidecode import unidecode
24
+ import re
25
+ import time
26
+ from lib.infer_pack.models_onnx import SynthesizerTrnMsNSFsidM
27
+ from vc_infer_pipeline import VC
28
+ from lib.infer_pack.models import (
29
+ SynthesizerTrnMs256NSFsid,
30
+ SynthesizerTrnMs256NSFsid_nono,
31
+ SynthesizerTrnMs768NSFsid,
32
+ SynthesizerTrnMs768NSFsid_nono,
33
+ )
34
+ from MDXNet import MDXNetDereverb
35
+ from config import Config
36
+ from infer_uvr5 import _audio_pre_, _audio_pre_new
37
+ from huggingface_hub import HfApi, list_models
38
+ from huggingface_hub import login
39
+ from i18n import I18nAuto
40
+ i18n = I18nAuto()
41
+ from bs4 import BeautifulSoup
42
+ from sklearn.cluster import MiniBatchKMeans
43
+
44
+ config = Config()
45
+ tmp = os.path.join(now_dir, "TEMP")
46
+ shutil.rmtree(tmp, ignore_errors=True)
47
+ os.environ["TEMP"] = tmp
48
+ weight_root = "weights"
49
+ weight_uvr5_root = "uvr5_weights"
50
+ index_root = "./logs/"
51
+ audio_root = "audios"
52
+ names = []
53
+ for name in os.listdir(weight_root):
54
+ if name.endswith(".pth"):
55
+ names.append(name)
56
+ index_paths = []
57
+
58
+ global indexes_list
59
+ indexes_list = []
60
+
61
+ audio_paths = []
62
+ for root, dirs, files in os.walk(index_root, topdown=False):
63
+ for name in files:
64
+ if name.endswith(".index") and "trained" not in name:
65
+ index_paths.append("%s\\%s" % (root, name))
66
+
67
+ for root, dirs, files in os.walk(audio_root, topdown=False):
68
+ for name in files:
69
+ audio_paths.append("%s/%s" % (root, name))
70
+
71
+ uvr5_names = []
72
+ for name in os.listdir(weight_uvr5_root):
73
+ if name.endswith(".pth") or "onnx" in name:
74
+ uvr5_names.append(name.replace(".pth", ""))
75
+
76
+ def calculate_md5(file_path):
77
+ hash_md5 = hashlib.md5()
78
+ with open(file_path, "rb") as f:
79
+ for chunk in iter(lambda: f.read(4096), b""):
80
+ hash_md5.update(chunk)
81
+ return hash_md5.hexdigest()
82
+
83
+ def silentremove(filename):
84
+ try:
85
+ os.remove(filename)
86
+ except OSError as e: # this would be "except OSError, e:" before Python 2.6
87
+ if e.errno != errno.ENOENT: # errno.ENOENT = no such file or directory
88
+ raise # re-raise exception if a different error occurred
89
+ def get_md5(temp_folder):
90
+ for root, subfolders, files in os.walk(temp_folder):
91
+ for file in files:
92
+ if not file.startswith("G_") and not file.startswith("D_") and file.endswith(".pth") and not "_G_" in file and not "_D_" in file:
93
+ md5_hash = calculate_md5(os.path.join(root, file))
94
+ return md5_hash
95
+
96
+ return None
97
+
98
+ def find_parent(search_dir, file_name):
99
+ for dirpath, dirnames, filenames in os.walk(search_dir):
100
+ if file_name in filenames:
101
+ return os.path.abspath(dirpath)
102
+ return None
103
+
104
+ def find_folder_parent(search_dir, folder_name):
105
+ for dirpath, dirnames, filenames in os.walk(search_dir):
106
+ if folder_name in dirnames:
107
+ return os.path.abspath(dirpath)
108
+ return None
109
+
110
+ def get_drive_folder_id(url):
111
+ if "drive.google.com" in url:
112
+ if "file/d/" in url:
113
+ file_id = url.split("file/d/")[1].split("/")[0]
114
+ elif "id=" in url:
115
+ file_id = url.split("id=")[1].split("&")[0]
116
+ else:
117
+ return None
118
+
119
+ def download_from_url(url):
120
+ parent_path = find_folder_parent(".", "pretrained_v2")
121
+ zips_path = os.path.join(parent_path, 'zips')
122
+
123
+ if url != '':
124
+ print(i18n("下载文件:") + f"{url}")
125
+ if "drive.google.com" in url:
126
+ if "file/d/" in url:
127
+ file_id = url.split("file/d/")[1].split("/")[0]
128
+ elif "id=" in url:
129
+ file_id = url.split("id=")[1].split("&")[0]
130
+ else:
131
+ return None
132
+
133
+ if file_id:
134
+ os.chdir('./zips')
135
+ result = subprocess.run(["gdown", f"https://drive.google.com/uc?id={file_id}", "--fuzzy"], capture_output=True, text=True, encoding='utf-8')
136
+ if "Too many users have viewed or downloaded this file recently" in str(result.stderr):
137
+ return "demasiado uso"
138
+ if "Cannot retrieve the public link of the file." in str(result.stderr):
139
+ return "link privado"
140
+ print(result.stderr)
141
+
142
+ elif "/blob/" in url:
143
+ os.chdir('./zips')
144
+ url = url.replace("blob", "resolve")
145
+ # print("Resolved URL:", url) # Print the resolved URL
146
+ wget.download(url)
147
+ elif "mega.nz" in url:
148
+ if "#!" in url:
149
+ file_id = url.split("#!")[1].split("!")[0]
150
+ elif "file/" in url:
151
+ file_id = url.split("file/")[1].split("/")[0]
152
+ else:
153
+ return None
154
+ if file_id:
155
+ m = Mega()
156
+ m.download_url(url, zips_path)
157
+ elif "/tree/main" in url:
158
+ response = requests.get(url)
159
+ soup = BeautifulSoup(response.content, 'html.parser')
160
+ temp_url = ''
161
+ for link in soup.find_all('a', href=True):
162
+ if link['href'].endswith('.zip'):
163
+ temp_url = link['href']
164
+ break
165
+ if temp_url:
166
+ url = temp_url
167
+ # print("Updated URL:", url) # Print the updated URL
168
+ url = url.replace("blob", "resolve")
169
+ # print("Resolved URL:", url) # Print the resolved URL
170
+
171
+ if "huggingface.co" not in url:
172
+ url = "https://huggingface.co" + url
173
+
174
+ wget.download(url)
175
+ else:
176
+ print("No .zip file found on the page.")
177
+ # Handle the case when no .zip file is found
178
+ else:
179
+ os.chdir('./zips')
180
+ wget.download(url)
181
+
182
+ os.chdir(parent_path)
183
+ print(i18n("完整下载"))
184
+ return "downloaded"
185
+ else:
186
+ return None
187
+
188
+ class error_message(Exception):
189
+ def __init__(self, mensaje):
190
+ self.mensaje = mensaje
191
+ super().__init__(mensaje)
192
+
193
+ # 一个选项卡全局只能有一个音色
194
+ def get_vc(sid, to_return_protect0, to_return_protect1):
195
+ global n_spk, tgt_sr, net_g, vc, cpt, version
196
+ if sid == "" or sid == []:
197
+ global hubert_model
198
+ if hubert_model is not None: # 考虑到轮询, 需要加个判断看是否 sid 是由有模型切换到无模型的
199
+ print("clean_empty_cache")
200
+ del net_g, n_spk, vc, hubert_model, tgt_sr # ,cpt
201
+ hubert_model = net_g = n_spk = vc = hubert_model = tgt_sr = None
202
+ if torch.cuda.is_available():
203
+ torch.cuda.empty_cache()
204
+ ###楼下不这么折腾清理不干净
205
+ if_f0 = cpt.get("f0", 1)
206
+ version = cpt.get("version", "v1")
207
+ if version == "v1":
208
+ if if_f0 == 1:
209
+ net_g = SynthesizerTrnMs256NSFsid(
210
+ *cpt["config"], is_half=config.is_half
211
+ )
212
+ else:
213
+ net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
214
+ elif version == "v2":
215
+ if if_f0 == 1:
216
+ net_g = SynthesizerTrnMs768NSFsid(
217
+ *cpt["config"], is_half=config.is_half
218
+ )
219
+ else:
220
+ net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
221
+ del net_g, cpt
222
+ if torch.cuda.is_available():
223
+ torch.cuda.empty_cache()
224
+ cpt = None
225
+ return (
226
+ {"visible": False, "__type__": "update"},
227
+ {"visible": False, "__type__": "update"},
228
+ {"visible": False, "__type__": "update"},
229
+ )
230
+ person = "%s/%s" % (weight_root, sid)
231
+ print("loading %s" % person)
232
+ cpt = torch.load(person, map_location="cpu")
233
+ tgt_sr = cpt["config"][-1]
234
+ cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
235
+ if_f0 = cpt.get("f0", 1)
236
+ if if_f0 == 0:
237
+ to_return_protect0 = to_return_protect1 = {
238
+ "visible": False,
239
+ "value": 0.5,
240
+ "__type__": "update",
241
+ }
242
+ else:
243
+ to_return_protect0 = {
244
+ "visible": True,
245
+ "value": to_return_protect0,
246
+ "__type__": "update",
247
+ }
248
+ to_return_protect1 = {
249
+ "visible": True,
250
+ "value": to_return_protect1,
251
+ "__type__": "update",
252
+ }
253
+ version = cpt.get("version", "v1")
254
+ if version == "v1":
255
+ if if_f0 == 1:
256
+ net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half)
257
+ else:
258
+ net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
259
+ elif version == "v2":
260
+ if if_f0 == 1:
261
+ net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=config.is_half)
262
+ else:
263
+ net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
264
+ del net_g.enc_q
265
+ print(net_g.load_state_dict(cpt["weight"], strict=False))
266
+ net_g.eval().to(config.device)
267
+ if config.is_half:
268
+ net_g = net_g.half()
269
+ else:
270
+ net_g = net_g.float()
271
+ vc = VC(tgt_sr, config)
272
+ n_spk = cpt["config"][-3]
273
+ return (
274
+ {"visible": True, "maximum": n_spk, "__type__": "update"},
275
+ to_return_protect0,
276
+ to_return_protect1,
277
+ )
278
+
279
+ def load_downloaded_model(url):
280
+ parent_path = find_folder_parent(".", "pretrained_v2")
281
+ try:
282
+ infos = []
283
+ logs_folders = ['0_gt_wavs','1_16k_wavs','2a_f0','2b-f0nsf','3_feature256','3_feature768']
284
+ zips_path = os.path.join(parent_path, 'zips')
285
+ unzips_path = os.path.join(parent_path, 'unzips')
286
+ weights_path = os.path.join(parent_path, 'weights')
287
+ logs_dir = ""
288
+
289
+ if os.path.exists(zips_path):
290
+ shutil.rmtree(zips_path)
291
+ if os.path.exists(unzips_path):
292
+ shutil.rmtree(unzips_path)
293
+
294
+ os.mkdir(zips_path)
295
+ os.mkdir(unzips_path)
296
+
297
+ download_file = download_from_url(url)
298
+ if not download_file:
299
+ print(i18n("无法下载模型。"))
300
+ infos.append(i18n("无法下载模型。"))
301
+ yield "\n".join(infos)
302
+ elif download_file == "downloaded":
303
+ print(i18n("模型下载成功。"))
304
+ infos.append(i18n("模型下载成功。"))
305
+ yield "\n".join(infos)
306
+ elif download_file == "demasiado uso":
307
+ raise Exception(i18n("最近查看或下载此文件的用户过多"))
308
+ elif download_file == "link privado":
309
+ raise Exception(i18n("无法从该私人链接获取文件"))
310
+
311
+ # Descomprimir archivos descargados
312
+ for filename in os.listdir(zips_path):
313
+ if filename.endswith(".zip"):
314
+ zipfile_path = os.path.join(zips_path,filename)
315
+ print(i18n("继续提取..."))
316
+ infos.append(i18n("继续提取..."))
317
+ shutil.unpack_archive(zipfile_path, unzips_path, 'zip')
318
+ model_name = os.path.basename(zipfile_path)
319
+ logs_dir = os.path.join(parent_path,'logs', os.path.normpath(str(model_name).replace(".zip","")))
320
+ yield "\n".join(infos)
321
+ else:
322
+ print(i18n("解压缩出错。"))
323
+ infos.append(i18n("解压缩出错。"))
324
+ yield "\n".join(infos)
325
+
326
+ index_file = False
327
+ model_file = False
328
+ D_file = False
329
+ G_file = False
330
+
331
+ # Copiar archivo pth
332
+ for path, subdirs, files in os.walk(unzips_path):
333
+ for item in files:
334
+ item_path = os.path.join(path, item)
335
+ if not 'G_' in item and not 'D_' in item and item.endswith('.pth'):
336
+ model_file = True
337
+ model_name = item.replace(".pth","")
338
+ logs_dir = os.path.join(parent_path,'logs', model_name)
339
+ if os.path.exists(logs_dir):
340
+ shutil.rmtree(logs_dir)
341
+ os.mkdir(logs_dir)
342
+ if not os.path.exists(weights_path):
343
+ os.mkdir(weights_path)
344
+ if os.path.exists(os.path.join(weights_path, item)):
345
+ os.remove(os.path.join(weights_path, item))
346
+ if os.path.exists(item_path):
347
+ shutil.move(item_path, weights_path)
348
+
349
+ if not model_file and not os.path.exists(logs_dir):
350
+ os.mkdir(logs_dir)
351
+ # Copiar index
352
+ for path, subdirs, files in os.walk(unzips_path):
353
+ for item in files:
354
+ item_path = os.path.join(path, item)
355
+ if item.startswith('added_') and item.endswith('.index'):
356
+ index_file = True
357
+ if os.path.exists(item_path):
358
+ if os.path.exists(os.path.join(logs_dir, item)):
359
+ os.remove(os.path.join(logs_dir, item))
360
+ shutil.move(item_path, logs_dir)
361
+ if item.startswith('total_fea.npy') or item.startswith('events.'):
362
+ if os.path.exists(item_path):
363
+ if os.path.exists(os.path.join(logs_dir, item)):
364
+ os.remove(os.path.join(logs_dir, item))
365
+ shutil.move(item_path, logs_dir)
366
+
367
+
368
+ result = ""
369
+ if model_file:
370
+ if index_file:
371
+ print(i18n("该模型可用于推理,并有 .index 文件。"))
372
+ infos.append("\n" + i18n("该模型可用于推理,并有 .index 文件。"))
373
+ yield "\n".join(infos)
374
+ else:
375
+ print(i18n("该模型可用于推理,但没有 .index 文件。"))
376
+ infos.append("\n" + i18n("该模型可用于推理,但没有 .index 文件。"))
377
+ yield "\n".join(infos)
378
+
379
+ if not index_file and not model_file:
380
+ print(i18n("未找到可上传的相关文件"))
381
+ infos.append(i18n("未找到可上传的相关文件"))
382
+ yield "\n".join(infos)
383
+
384
+ if os.path.exists(zips_path):
385
+ shutil.rmtree(zips_path)
386
+ if os.path.exists(unzips_path):
387
+ shutil.rmtree(unzips_path)
388
+ os.chdir(parent_path)
389
+ return result
390
+ except Exception as e:
391
+ os.chdir(parent_path)
392
+ if "demasiado uso" in str(e):
393
+ print(i18n("最近查看或下载此文件的用户过多"))
394
+ yield i18n("最近查看或下载此文件的用户过多")
395
+ elif "link privado" in str(e):
396
+ print(i18n("无法从该私人链接获取文件"))
397
+ yield i18n("无法从该私人链接获取文件")
398
+ else:
399
+ print(e)
400
+ yield i18n("下载模型时发生错误。")
401
+ finally:
402
+ os.chdir(parent_path)
403
+
404
+ def load_dowloaded_dataset(url):
405
+ parent_path = find_folder_parent(".", "pretrained_v2")
406
+ infos = []
407
+ try:
408
+ zips_path = os.path.join(parent_path, 'zips')
409
+ unzips_path = os.path.join(parent_path, 'unzips')
410
+ datasets_path = os.path.join(parent_path, 'datasets')
411
+ audio_extenions =["flac","wav"]
412
+
413
+ if os.path.exists(zips_path):
414
+ shutil.rmtree(zips_path)
415
+ if os.path.exists(unzips_path):
416
+ shutil.rmtree(unzips_path)
417
+
418
+ if not os.path.exists(datasets_path):
419
+ os.mkdir(datasets_path)
420
+
421
+ os.mkdir(zips_path)
422
+ os.mkdir(unzips_path)
423
+
424
+ download_file = download_from_url(url)
425
+
426
+ if not download_file:
427
+ print(i18n("下载模型时发生错误。"))
428
+ infos.append(i18n("下载模型时发生错误。"))
429
+ yield "\n".join(infos)
430
+ raise Exception(i18n("下载模型时发生错误。"))
431
+ elif download_file == "downloaded":
432
+ print(i18n("模型下载成功。"))
433
+ infos.append(i18n("模型下载成功。"))
434
+ yield "\n".join(infos)
435
+ elif download_file == "demasiado uso":
436
+ raise Exception(i18n("最近查看或下载此文件的用户过多"))
437
+ elif download_file == "link privado":
438
+ raise Exception(i18n("无法从该私人链接获取文件"))
439
+
440
+ zip_path = os.listdir(zips_path)
441
+ foldername = ""
442
+ for file in zip_path:
443
+ if file.endswith('.zip'):
444
+ file_path = os.path.join(zips_path, file)
445
+ print("....")
446
+ foldername = file.replace(".zip","").replace(" ","").replace("-","_")
447
+ dataset_path = os.path.join(datasets_path, foldername)
448
+ print(i18n("继续提取..."))
449
+ infos.append(i18n("继续提取..."))
450
+ yield "\n".join(infos)
451
+ shutil.unpack_archive(file_path, unzips_path, 'zip')
452
+ if os.path.exists(dataset_path):
453
+ shutil.rmtree(dataset_path)
454
+
455
+ os.mkdir(dataset_path)
456
+
457
+ for root, subfolders, songs in os.walk(unzips_path):
458
+ for song in songs:
459
+ song_path = os.path.join(root, song)
460
+ if song.endswith(tuple(audio_extenions)):
461
+ shutil.move(song_path, dataset_path)
462
+ else:
463
+ print(i18n("解压缩出错。"))
464
+ infos.append(i18n("解压缩出错。"))
465
+ yield "\n".join(infos)
466
+
467
+
468
+
469
+ if os.path.exists(zips_path):
470
+ shutil.rmtree(zips_path)
471
+ if os.path.exists(unzips_path):
472
+ shutil.rmtree(unzips_path)
473
+
474
+ print(i18n("数据集加载成功。"))
475
+ infos.append(i18n("数据集加载成功。"))
476
+ yield "\n".join(infos)
477
+ except Exception as e:
478
+ os.chdir(parent_path)
479
+ if "demasiado uso" in str(e):
480
+ print(i18n("最近查看或下载此文件的用户过多"))
481
+ yield i18n("最近查看或下载此文件的用户过多")
482
+ elif "link privado" in str(e):
483
+ print(i18n("无法从该私人链接获取文件"))
484
+ yield i18n("无法从该私人链接获取文件")
485
+ else:
486
+ print(e)
487
+ yield i18n("下载模型时发生错误。")
488
+ finally:
489
+ os.chdir(parent_path)
490
+
491
+ def save_model(modelname, save_action):
492
+
493
+ parent_path = find_folder_parent(".", "pretrained_v2")
494
+ zips_path = os.path.join(parent_path, 'zips')
495
+ dst = os.path.join(zips_path,modelname)
496
+ logs_path = os.path.join(parent_path, 'logs', modelname)
497
+ weights_path = os.path.join(parent_path, 'weights', f"{modelname}.pth")
498
+ save_folder = parent_path
499
+ infos = []
500
+
501
+ try:
502
+ if not os.path.exists(logs_path):
503
+ raise Exception("No model found.")
504
+
505
+ if not 'content' in parent_path:
506
+ save_folder = os.path.join(parent_path, 'RVC_Backup')
507
+ else:
508
+ save_folder = '/content/drive/MyDrive/RVC_Backup'
509
+
510
+ infos.append(i18n("保存模型..."))
511
+ yield "\n".join(infos)
512
+
513
+ # Si no existe el folder RVC para guardar los modelos
514
+ if not os.path.exists(save_folder):
515
+ os.mkdir(save_folder)
516
+ if not os.path.exists(os.path.join(save_folder, 'ManualTrainingBackup')):
517
+ os.mkdir(os.path.join(save_folder, 'ManualTrainingBackup'))
518
+ if not os.path.exists(os.path.join(save_folder, 'Finished')):
519
+ os.mkdir(os.path.join(save_folder, 'Finished'))
520
+
521
+ # Si ya existe el folders zips borro su contenido por si acaso
522
+ if os.path.exists(zips_path):
523
+ shutil.rmtree(zips_path)
524
+
525
+ os.mkdir(zips_path)
526
+ added_file = glob.glob(os.path.join(logs_path, "added_*.index"))
527
+ d_file = glob.glob(os.path.join(logs_path, "D_*.pth"))
528
+ g_file = glob.glob(os.path.join(logs_path, "G_*.pth"))
529
+
530
+ if save_action == i18n("选择模型保存方法"):
531
+ raise Exception("No method choosen.")
532
+
533
+ if save_action == i18n("保存所有"):
534
+ print(i18n("保存所有"))
535
+ save_folder = os.path.join(save_folder, 'ManualTrainingBackup')
536
+ shutil.copytree(logs_path, dst)
537
+ else:
538
+ # Si no existe el folder donde se va a comprimir el modelo
539
+ if not os.path.exists(dst):
540
+ os.mkdir(dst)
541
+
542
+ if save_action == i18n("保存 D 和 G"):
543
+ print(i18n("保存 D 和 G"))
544
+ save_folder = os.path.join(save_folder, 'ManualTrainingBackup')
545
+ if len(d_file) > 0:
546
+ shutil.copy(d_file[0], dst)
547
+ if len(g_file) > 0:
548
+ shutil.copy(g_file[0], dst)
549
+
550
+ if len(added_file) > 0:
551
+ shutil.copy(added_file[0], dst)
552
+ else:
553
+ infos.append(i18n("保存时未编制索引..."))
554
+
555
+ if save_action == i18n("保存声音"):
556
+ print(i18n("保存声音"))
557
+ save_folder = os.path.join(save_folder, 'Finished')
558
+ if len(added_file) > 0:
559
+ shutil.copy(added_file[0], dst)
560
+ else:
561
+ infos.append(i18n("保存时未编制索引..."))
562
+ #raise gr.Error("¡No ha generado el archivo added_*.index!")
563
+
564
+ yield "\n".join(infos)
565
+ # Si no existe el archivo del modelo no copiarlo
566
+ if not os.path.exists(weights_path):
567
+ infos.append(i18n("无模型保存(PTH)"))
568
+ #raise gr.Error("¡No ha generado el modelo pequeño!")
569
+ else:
570
+ shutil.copy(weights_path, dst)
571
+
572
+ yield "\n".join(infos)
573
+ infos.append("\n" + i18n("这可能需要几分钟时间,请稍候..."))
574
+ yield "\n".join(infos)
575
+
576
+ shutil.make_archive(os.path.join(zips_path,f"{modelname}"), 'zip', zips_path)
577
+ shutil.move(os.path.join(zips_path,f"{modelname}.zip"), os.path.join(save_folder, f'{modelname}.zip'))
578
+
579
+ shutil.rmtree(zips_path)
580
+ #shutil.rmtree(zips_path)
581
+
582
+ infos.append("\n" + i18n("正确存储模型"))
583
+ yield "\n".join(infos)
584
+
585
+ except Exception as e:
586
+ print(e)
587
+ if "No model found." in str(e):
588
+ infos.append(i18n("您要保存的模型不存在,请确保输入的名称正确。"))
589
+ else:
590
+ infos.append(i18n("保存模型时发生错误"))
591
+
592
+ yield "\n".join(infos)
593
+
594
+ def load_downloaded_backup(url):
595
+ parent_path = find_folder_parent(".", "pretrained_v2")
596
+ try:
597
+ infos = []
598
+ logs_folders = ['0_gt_wavs','1_16k_wavs','2a_f0','2b-f0nsf','3_feature256','3_feature768']
599
+ zips_path = os.path.join(parent_path, 'zips')
600
+ unzips_path = os.path.join(parent_path, 'unzips')
601
+ weights_path = os.path.join(parent_path, 'weights')
602
+ logs_dir = os.path.join(parent_path, 'logs')
603
+
604
+ if os.path.exists(zips_path):
605
+ shutil.rmtree(zips_path)
606
+ if os.path.exists(unzips_path):
607
+ shutil.rmtree(unzips_path)
608
+
609
+ os.mkdir(zips_path)
610
+ os.mkdir(unzips_path)
611
+
612
+ download_file = download_from_url(url)
613
+ if not download_file:
614
+ print(i18n("无法下载模型。"))
615
+ infos.append(i18n("无法下载模型。"))
616
+ yield "\n".join(infos)
617
+ elif download_file == "downloaded":
618
+ print(i18n("模型下载成功。"))
619
+ infos.append(i18n("模型下载成功。"))
620
+ yield "\n".join(infos)
621
+ elif download_file == "demasiado uso":
622
+ raise Exception(i18n("最近查看或下载此文件的用户过多"))
623
+ elif download_file == "link privado":
624
+ raise Exception(i18n("无法从该私人链接获取文件"))
625
+
626
+ # Descomprimir archivos descargados
627
+ for filename in os.listdir(zips_path):
628
+ if filename.endswith(".zip"):
629
+ zipfile_path = os.path.join(zips_path,filename)
630
+ zip_dir_name = os.path.splitext(filename)[0]
631
+ unzip_dir = unzips_path
632
+ print(i18n("继续提取..."))
633
+ infos.append(i18n("继续提取..."))
634
+ shutil.unpack_archive(zipfile_path, unzip_dir, 'zip')
635
+
636
+ if os.path.exists(os.path.join(unzip_dir, zip_dir_name)):
637
+ # Move the inner directory with the same name
638
+ shutil.move(os.path.join(unzip_dir, zip_dir_name), logs_dir)
639
+ else:
640
+ # Create a folder with the same name and move files
641
+ new_folder_path = os.path.join(logs_dir, zip_dir_name)
642
+ os.mkdir(new_folder_path)
643
+ for item_name in os.listdir(unzip_dir):
644
+ item_path = os.path.join(unzip_dir, item_name)
645
+ if os.path.isfile(item_path):
646
+ shutil.move(item_path, new_folder_path)
647
+ elif os.path.isdir(item_path):
648
+ shutil.move(item_path, new_folder_path)
649
+
650
+ yield "\n".join(infos)
651
+ else:
652
+ print(i18n("解压缩出错。"))
653
+ infos.append(i18n("解压缩出错。"))
654
+ yield "\n".join(infos)
655
+
656
+ result = ""
657
+
658
+ for filename in os.listdir(unzips_path):
659
+ if filename.endswith(".zip"):
660
+ silentremove(filename)
661
+
662
+ if os.path.exists(zips_path):
663
+ shutil.rmtree(zips_path)
664
+ if os.path.exists(os.path.join(parent_path, 'unzips')):
665
+ shutil.rmtree(os.path.join(parent_path, 'unzips'))
666
+ print(i18n("备份已成功上传。"))
667
+ infos.append("\n" + i18n("备份已成功上传。"))
668
+ yield "\n".join(infos)
669
+ os.chdir(parent_path)
670
+ return result
671
+ except Exception as e:
672
+ os.chdir(parent_path)
673
+ if "demasiado uso" in str(e):
674
+ print(i18n("最近查看或下载此文件的用户过多"))
675
+ yield i18n("最近查看或下载此文件的用户过多")
676
+ elif "link privado" in str(e):
677
+ print(i18n("无法从该私人链接获取文件"))
678
+ yield i18n("无法从该私人链接获取文件")
679
+ else:
680
+ print(e)
681
+ yield i18n("下载模型时发生错误。")
682
+ finally:
683
+ os.chdir(parent_path)
684
+
685
+ def save_to_wav(record_button):
686
+ if record_button is None:
687
+ pass
688
+ else:
689
+ path_to_file=record_button
690
+ new_name = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")+'.wav'
691
+ new_path='./audios/'+new_name
692
+ shutil.move(path_to_file,new_path)
693
+ return new_name
694
+
695
+ def save_to_wav2(dropbox):
696
+ file_path = dropbox.name
697
+ target_path = os.path.join('./audios', os.path.basename(file_path))
698
+
699
+ if os.path.exists(target_path):
700
+ os.remove(target_path)
701
+ # print('Replacing old dropdown file...')
702
+
703
+ shutil.move(file_path, target_path)
704
+ return target_path
705
+
706
+ def change_choices2():
707
+ audio_paths=[]
708
+ for filename in os.listdir("./audios"):
709
+ if filename.endswith(('wav', 'mp3', 'flac', 'ogg', 'opus',
710
+ 'm4a', 'mp4', 'aac', 'alac', 'wma',
711
+ 'aiff', 'webm', 'ac3')):
712
+ audio_paths.append(os.path.join('./audios',filename).replace('\\', '/'))
713
+ return {"choices": sorted(audio_paths), "__type__": "update"}, {"__type__": "update"}
714
+
715
+ def get_models_by_name(modelname):
716
+ url = "https://script.google.com/macros/s/AKfycbzyrdLZzUww9qbjxnbnI08budD4yxbmRPHkWbp3UEJ9h3Id5cnNNVg0UtfFAnqqX5Rr/exec"
717
+
718
+ response = requests.post(url, json={
719
+ 'type': 'search_by_filename',
720
+ 'filename': unidecode(modelname.strip().lower())
721
+ })
722
+
723
+ response_json = response.json()
724
+ models = response_json['ocurrences']
725
+
726
+ result = []
727
+ message = "Busqueda realizada"
728
+ if len(models) == 0:
729
+ message = "No se han encontrado resultados."
730
+ else:
731
+ message = f"Se han encontrado {len(models)} resultados para {modelname}"
732
+
733
+ for i in range(20):
734
+ if i < len(models):
735
+ urls = models[i].get('url')
736
+ url = eval(urls)[0]
737
+ name = str(models[i].get('name'))
738
+ filename = str(models[i].get('filename')) if not name or name.strip() == "" else name
739
+ # Nombre
740
+ result.append(
741
+ {
742
+ "visible": True,
743
+ "value": str("### ") + filename,
744
+ "__type__": "update",
745
+ })
746
+ # Url
747
+ result.append(
748
+ {
749
+ "visible": False,
750
+ "value": url,
751
+ "__type__": "update",
752
+ })
753
+ # Boton
754
+ result.append({
755
+ "visible": True,
756
+ "__type__": "update",
757
+ })
758
+
759
+ # Linea separadora
760
+ if i == len(models) - 1:
761
+ result.append({
762
+ "visible": False,
763
+ "__type__": "update",
764
+ })
765
+ else:
766
+ result.append({
767
+ "visible": True,
768
+ "__type__": "update",
769
+ })
770
+
771
+ # Row
772
+ result.append(
773
+ {
774
+ "visible": True,
775
+ "__type__": "update",
776
+ })
777
+ else:
778
+ # Nombre
779
+ result.append(
780
+ {
781
+ "visible": False,
782
+ "__type__": "update",
783
+ })
784
+ # Url
785
+ result.append(
786
+ {
787
+ "visible": False,
788
+ "value": False,
789
+ "__type__": "update",
790
+ })
791
+ # Boton
792
+ result.append({
793
+ "visible": False,
794
+ "__type__": "update",
795
+ })
796
+ # Linea
797
+ result.append({
798
+ "visible": False,
799
+ "__type__": "update",
800
+ })
801
+ # Row
802
+ result.append(
803
+ {
804
+ "visible": False,
805
+ "__type__": "update",
806
+ })
807
+ # Result
808
+ result.append(
809
+ {
810
+ "value": message,
811
+ "__type__": "update",
812
+ }
813
+ )
814
+
815
+ return result
816
+
817
+ def search_model():
818
+ gr.Markdown(value="# Buscar un modelo")
819
+ with gr.Row():
820
+ model_name = gr.inputs.Textbox(lines=1, label="Término de búsqueda")
821
+ search_model_button=gr.Button("Buscar modelo")
822
+
823
+ models = []
824
+ results = gr.Textbox(label="Resultado", value="", max_lines=20)
825
+ with gr.Row(visible=False) as row1:
826
+ l1 = gr.Markdown(value="", visible=False)
827
+ l1_url = gr.Textbox("Label 1", visible=False)
828
+ b1 = gr.Button("Cargar modelo", visible=False)
829
+
830
+ mk1 = gr.Markdown(value="---", visible=False)
831
+ b1.click(fn=load_downloaded_model, inputs=l1_url, outputs=results)
832
+
833
+ with gr.Row(visible=False) as row2:
834
+ l2 = gr.Markdown(value="", visible=False)
835
+ l2_url = gr.Textbox("Label 1", visible=False)
836
+ b2 = gr.Button("Cargar modelo", visible=False)
837
+
838
+ mk2 = gr.Markdown(value="---", visible=False)
839
+ b2.click(fn=load_downloaded_model, inputs=l2_url, outputs=results)
840
+
841
+ with gr.Row(visible=False) as row3:
842
+ l3 = gr.Markdown(value="", visible=False)
843
+ l3_url = gr.Textbox("Label 1", visible=False)
844
+ b3 = gr.Button("Cargar modelo", visible=False)
845
+
846
+ mk3 = gr.Markdown(value="---", visible=False)
847
+ b3.click(fn=load_downloaded_model, inputs=l3_url, outputs=results)
848
+
849
+ with gr.Row(visible=False) as row4:
850
+ l4 = gr.Markdown(value="", visible=False)
851
+ l4_url = gr.Textbox("Label 1", visible=False)
852
+ b4 = gr.Button("Cargar modelo", visible=False)
853
+ mk4 = gr.Markdown(value="---", visible=False)
854
+ b4.click(fn=load_downloaded_model, inputs=l4_url, outputs=results)
855
+
856
+ with gr.Row(visible=False) as row5:
857
+ l5 = gr.Markdown(value="", visible=False)
858
+ l5_url = gr.Textbox("Label 1", visible=False)
859
+ b5 = gr.Button("Cargar modelo", visible=False)
860
+
861
+ mk5 = gr.Markdown(value="---", visible=False)
862
+ b5.click(fn=load_downloaded_model, inputs=l5_url, outputs=results)
863
+
864
+ with gr.Row(visible=False) as row6:
865
+ l6 = gr.Markdown(value="", visible=False)
866
+ l6_url = gr.Textbox("Label 1", visible=False)
867
+ b6 = gr.Button("Cargar modelo", visible=False)
868
+
869
+ mk6 = gr.Markdown(value="---", visible=False)
870
+ b6.click(fn=load_downloaded_model, inputs=l6_url, outputs=results)
871
+
872
+ with gr.Row(visible=False) as row7:
873
+ l7 = gr.Markdown(value="", visible=False)
874
+ l7_url = gr.Textbox("Label 1", visible=False)
875
+ b7 = gr.Button("Cargar modelo", visible=False)
876
+
877
+ mk7 = gr.Markdown(value="---", visible=False)
878
+ b7.click(fn=load_downloaded_model, inputs=l7_url, outputs=results)
879
+
880
+ with gr.Row(visible=False) as row8:
881
+ l8 = gr.Markdown(value="", visible=False)
882
+ l8_url = gr.Textbox("Label 1", visible=False)
883
+ b8 = gr.Button("Cargar modelo", visible=False)
884
+
885
+ mk8 = gr.Markdown(value="---", visible=False)
886
+ b8.click(fn=load_downloaded_model, inputs=l8_url, outputs=results)
887
+
888
+ with gr.Row(visible=False) as row9:
889
+ l9 = gr.Markdown(value="", visible=False)
890
+ l9_url = gr.Textbox("Label 1", visible=False)
891
+ b9 = gr.Button("Cargar modelo", visible=False)
892
+
893
+ mk9 = gr.Markdown(value="---", visible=False)
894
+ b9.click(fn=load_downloaded_model, inputs=l9_url, outputs=results)
895
+
896
+ with gr.Row(visible=False) as row10:
897
+ l10 = gr.Markdown(value="", visible=False)
898
+ l10_url = gr.Textbox("Label 1", visible=False)
899
+ b10 = gr.Button("Cargar modelo", visible=False)
900
+
901
+ mk10 = gr.Markdown(value="---", visible=False)
902
+ b10.click(fn=load_downloaded_model, inputs=l10_url, outputs=results)
903
+
904
+ with gr.Row(visible=False) as row11:
905
+ l11 = gr.Markdown(value="", visible=False)
906
+ l11_url = gr.Textbox("Label 1", visible=False)
907
+ b11 = gr.Button("Cargar modelo", visible=False)
908
+
909
+ mk11 = gr.Markdown(value="---", visible=False)
910
+ b11.click(fn=load_downloaded_model, inputs=l11_url, outputs=results)
911
+
912
+ with gr.Row(visible=False) as row12:
913
+ l12 = gr.Markdown(value="", visible=False)
914
+ l12_url = gr.Textbox("Label 1", visible=False)
915
+ b12 = gr.Button("Cargar modelo", visible=False)
916
+
917
+ mk12 = gr.Markdown(value="---", visible=False)
918
+ b12.click(fn=load_downloaded_model, inputs=l12_url, outputs=results)
919
+
920
+ with gr.Row(visible=False) as row13:
921
+ l13 = gr.Markdown(value="", visible=False)
922
+ l13_url = gr.Textbox("Label 1", visible=False)
923
+ b13 = gr.Button("Cargar modelo", visible=False)
924
+
925
+ mk13 = gr.Markdown(value="---", visible=False)
926
+ b13.click(fn=load_downloaded_model, inputs=l13_url, outputs=results)
927
+
928
+ with gr.Row(visible=False) as row14:
929
+ l14 = gr.Markdown(value="", visible=False)
930
+ l14_url = gr.Textbox("Label 1", visible=False)
931
+ b14 = gr.Button("Cargar modelo", visible=False)
932
+
933
+ mk14 = gr.Markdown(value="---", visible=False)
934
+ b14.click(fn=load_downloaded_model, inputs=l14_url, outputs=results)
935
+
936
+ with gr.Row(visible=False) as row15:
937
+ l15 = gr.Markdown(value="", visible=False)
938
+ l15_url = gr.Textbox("Label 1", visible=False)
939
+ b15 = gr.Button("Cargar modelo", visible=False)
940
+
941
+ mk15 = gr.Markdown(value="---", visible=False)
942
+ b15.click(fn=load_downloaded_model, inputs=l15_url, outputs=results)
943
+
944
+ with gr.Row(visible=False) as row16:
945
+ l16 = gr.Markdown(value="", visible=False)
946
+ l16_url = gr.Textbox("Label 1", visible=False)
947
+ b16 = gr.Button("Cargar modelo", visible=False)
948
+
949
+ mk16 = gr.Markdown(value="---", visible=False)
950
+ b16.click(fn=load_downloaded_model, inputs=l16_url, outputs=results)
951
+
952
+ with gr.Row(visible=False) as row17:
953
+ l17 = gr.Markdown(value="", visible=False)
954
+ l17_url = gr.Textbox("Label 1", visible=False)
955
+ b17 = gr.Button("Cargar modelo", visible=False)
956
+
957
+ mk17 = gr.Markdown(value="---", visible=False)
958
+ b17.click(fn=load_downloaded_model, inputs=l17_url, outputs=results)
959
+
960
+ with gr.Row(visible=False) as row18:
961
+ l18 = gr.Markdown(value="", visible=False)
962
+ l18_url = gr.Textbox("Label 1", visible=False)
963
+ b18 = gr.Button("Cargar modelo", visible=False)
964
+
965
+ mk18 = gr.Markdown(value="---", visible=False)
966
+ b18.click(fn=load_downloaded_model, inputs=l18_url, outputs=results)
967
+
968
+ with gr.Row(visible=False) as row19:
969
+ l19 = gr.Markdown(value="", visible=False)
970
+ l19_url = gr.Textbox("Label 1", visible=False)
971
+ b19 = gr.Button("Cargar modelo", visible=False)
972
+
973
+ mk19 = gr.Markdown(value="---", visible=False)
974
+ b19.click(fn=load_downloaded_model, inputs=l19_url, outputs=results)
975
+
976
+ with gr.Row(visible=False) as row20:
977
+ l20 = gr.Markdown(value="", visible=False)
978
+ l20_url = gr.Textbox("Label 1", visible=False)
979
+ b20 = gr.Button("Cargar modelo", visible=False)
980
+
981
+ mk20 = gr.Markdown(value="---", visible=False)
982
+ b20.click(fn=load_downloaded_model, inputs=l20_url, outputs=results)
983
+
984
+ # to_return_protect1 =
985
+
986
+ search_model_button.click(fn=get_models_by_name, inputs=model_name, outputs=[l1,l1_url, b1, mk1, row1,
987
+ l2,l2_url, b2, mk2, row2,
988
+ l3,l3_url, b3, mk3, row3,
989
+ l4,l4_url, b4, mk4, row4,
990
+ l5,l5_url, b5, mk5, row5,
991
+ l6,l6_url, b6, mk6, row6,
992
+ l7,l7_url, b7, mk7, row7,
993
+ l8,l8_url, b8, mk8, row8,
994
+ l9,l9_url, b9, mk9, row9,
995
+ l10,l10_url, b10, mk10, row10,
996
+ l11,l11_url, b11, mk11, row11,
997
+ l12,l12_url, b12, mk12, row12,
998
+ l13,l13_url, b13, mk13, row13,
999
+ l14,l14_url, b14, mk14, row14,
1000
+ l15,l15_url, b15, mk15, row15,
1001
+ l16,l16_url, b16, mk16, row16,
1002
+ l17,l17_url, b17, mk17, row17,
1003
+ l18,l18_url, b18, mk18, row18,
1004
+ l19,l19_url, b19, mk19, row19,
1005
+ l20,l20_url, b20, mk20, row20,
1006
+ results
1007
+ ])
1008
+
1009
+
1010
+ def descargar_desde_drive(url, name, output_file):
1011
+
1012
+ print(f"Descargando {name} de drive")
1013
+
1014
+ try:
1015
+ downloaded_file = gdown.download(url, output=output_file, fuzzy=True)
1016
+ return downloaded_file
1017
+ except:
1018
+ print("El intento de descargar con drive no funcionó")
1019
+ return None
1020
+
1021
+ def descargar_desde_mega(url, name):
1022
+ response = False
1023
+ try:
1024
+ file_id = None
1025
+
1026
+ if "#!" in url:
1027
+ file_id = url.split("#!")[1].split("!")[0]
1028
+ elif "file/" in url:
1029
+ file_id = url.split("file/")[1].split("/")[0]
1030
+ else:
1031
+ file_id = None
1032
+
1033
+ if file_id:
1034
+ mega = Mega()
1035
+ m = mega.login()
1036
+
1037
+ print(f"Descargando {name} de mega")
1038
+ downloaded_file = m.download_url(url)
1039
+
1040
+ return downloaded_file
1041
+ else:
1042
+ return None
1043
+
1044
+ except Exception as e:
1045
+ print("Error**")
1046
+ print(e)
1047
+ return None
1048
+
1049
+ def descargar_desde_url_basica(url, name, output_file):
1050
+ try:
1051
+ print(f"Descargando {name} de URL BASICA")
1052
+ filename = wget.download(url=url, out=output_file)
1053
+ return filename
1054
+ except Exception as e:
1055
+ print(f"Error al descargar el archivo: {str(e)}")
1056
+
1057
+ def is_valid_model(name):
1058
+ parent_path = find_folder_parent(".", "pretrained_v2")
1059
+ unzips_path = os.path.join(parent_path, 'unzips')
1060
+
1061
+ response = []
1062
+ file_path = os.path.join(unzips_path, name)
1063
+
1064
+ has_model = False
1065
+ has_index = False
1066
+
1067
+ for root, subfolders, files in os.walk(file_path):
1068
+ for file in files:
1069
+ current_file_path = os.path.join(root, file)
1070
+ if not file.startswith("G_") and not file.startswith("D_") and file.endswith(".pth") and not "_G_" in file and not "_D_" in file:
1071
+ has_model = True
1072
+ if file.startswith('added_') and file.endswith('.index'):
1073
+ has_index = True
1074
+
1075
+ #if has_model and has_index:
1076
+ if has_index:
1077
+ response.append(".index")
1078
+
1079
+ if has_model:
1080
+ response.append(".pth")
1081
+
1082
+ return response
1083
+
1084
+
1085
+ def create_zip(new_name):
1086
+
1087
+ parent_path = find_folder_parent(".", "pretrained_v2")
1088
+ temp_folder_path = os.path.join(parent_path, 'temp_models')
1089
+ unzips_path = os.path.join(parent_path, 'unzips')
1090
+ zips_path = os.path.join(parent_path, 'zips')
1091
+
1092
+ file_path = os.path.join(unzips_path, new_name)
1093
+ file_name = os.path.join(temp_folder_path, new_name)
1094
+
1095
+ if not os.path.exists(zips_path):
1096
+ os.mkdir(zips_path)
1097
+
1098
+ if os.path.exists(file_name):
1099
+ shutil.rmtree(file_name)
1100
+
1101
+ os.mkdir(file_name)
1102
+
1103
+ while not os.path.exists(file_name):
1104
+ time.sleep(1)
1105
+
1106
+ for root, subfolders, files in os.walk(file_path):
1107
+ for file in files:
1108
+ current_file_path = os.path.join(root, file)
1109
+ if not file.startswith("G_") and not file.startswith("D_") and file.endswith(".pth") and not "_G_" in file and not "_D_" in file:
1110
+ print(f'Copiando {current_file_path} a {os.path.join(temp_folder_path, new_name)}')
1111
+ shutil.copy(current_file_path, file_name)
1112
+ if file.startswith('added_') and file.endswith('.index'):
1113
+ print(f'Copiando {current_file_path} a {os.path.join(temp_folder_path, new_name)}')
1114
+ shutil.copy(current_file_path, file_name)
1115
+
1116
+ print("Comprimiendo modelo")
1117
+ zip_path = os.path.join(zips_path, new_name)
1118
+
1119
+ print(f"Comprimiendo {file_name} en {zip_path}")
1120
+ shutil.make_archive(zip_path, 'zip', file_name)
1121
+
1122
+ def upload_to_huggingface(file_path, new_filename):
1123
+ api = HfApi()
1124
+ login(token="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")
1125
+ api.upload_file(
1126
+ path_or_fileobj=file_path,
1127
+ path_in_repo=new_filename,
1128
+ repo_id="juuxn/RVCModels",
1129
+ repo_type="model",
1130
+ )
1131
+ return f"https://huggingface.co/juuxn/RVCModels/resolve/main/{new_filename}"
1132
+
1133
+
1134
+ def publish_model_clicked(model_name, model_url, model_version, model_creator):
1135
+
1136
+ web_service_url = "https://script.google.com/macros/s/AKfycbzyrdLZzUww9qbjxnbnI08budD4yxbmRPHkWbp3UEJ9h3Id5cnNNVg0UtfFAnqqX5Rr/exec"
1137
+ name = unidecode(model_name)
1138
+ new_name = unidecode(name.strip().replace(" ","_").replace("'",""))
1139
+
1140
+ downloaded_path = ""
1141
+ url = model_url
1142
+ version = model_version
1143
+ creator = model_creator
1144
+ parent_path = find_folder_parent(".", "pretrained_v2")
1145
+ output_folder = os.path.join(parent_path, 'archivos_descargados')
1146
+ output_file = os.path.join(output_folder, f'{new_name}.zip')
1147
+ unzips_path = os.path.join(parent_path, 'unzips')
1148
+ zips_path = os.path.join(parent_path, 'zips')
1149
+ temp_folder_path = os.path.join(parent_path, 'temp_models')
1150
+
1151
+ if os.path.exists(output_folder):
1152
+ shutil.rmtree(output_folder)
1153
+ os.mkdir(output_folder)
1154
+
1155
+ if os.path.exists(temp_folder_path):
1156
+ shutil.rmtree(temp_folder_path)
1157
+ os.mkdir(temp_folder_path)
1158
+
1159
+
1160
+ if url and 'drive.google.com' in url:
1161
+ # Descargar el elemento si la URL es de Google Drive
1162
+ downloaded_path = descargar_desde_drive(url, new_name, output_file)
1163
+ elif url and 'mega.nz' in url:
1164
+ downloaded_path = descargar_desde_mega(url, new_name, output_file)
1165
+ elif url and 'pixeldrain' in url:
1166
+ print("No se puede descargar de pixeldrain")
1167
+ else:
1168
+ downloaded_path = descargar_desde_url_basica(url, new_name, output_file)
1169
+
1170
+ if not downloaded_path:
1171
+ print(f"No se pudo descargar: {name}")
1172
+ else:
1173
+ filename = name.strip().replace(" ","_")
1174
+ dst =f'{filename}.zip'
1175
+ shutil.unpack_archive(downloaded_path, os.path.join(unzips_path, filename))
1176
+ md5_hash = get_md5(os.path.join(unzips_path, filename))
1177
+
1178
+ if not md5_hash:
1179
+ print("No tiene modelo pequeño")
1180
+ return
1181
+
1182
+ md5_response_raw = requests.post(web_service_url, json={
1183
+ 'type': 'check_md5',
1184
+ 'md5_hash': md5_hash
1185
+ })
1186
+
1187
+ md5_response = md5_response_raw.json()
1188
+ ok = md5_response["ok"]
1189
+ exists = md5_response["exists"]
1190
+ message = md5_response["message"]
1191
+
1192
+ is_valid = is_valid_model(filename)
1193
+
1194
+ if md5_hash and exists:
1195
+ print(f"El archivo ya se ha publicado en spreadsheet con md5: {md5_hash}")
1196
+ return f"El archivo ya se ha publicado con md5: {md5_hash}"
1197
+
1198
+ if ".pth" in is_valid and not exists:
1199
+
1200
+ create_zip(filename)
1201
+ huggingface_url = upload_to_huggingface(os.path.join(zips_path,dst), dst)
1202
+
1203
+ response = requests.post(web_service_url, json={
1204
+ 'type': 'save_model',
1205
+ 'elements': [{
1206
+ 'name': name,
1207
+ 'filename': filename,
1208
+ 'url': [huggingface_url],
1209
+ 'version': version,
1210
+ 'creator': creator,
1211
+ 'md5_hash': md5_hash,
1212
+ 'content': is_valid
1213
+ }]})
1214
+
1215
+ response_data = response.json()
1216
+ ok = response_data["ok"]
1217
+ message = response_data["message"]
1218
+
1219
+ print({
1220
+ 'name': name,
1221
+ 'filename': filename,
1222
+ 'url': [huggingface_url],
1223
+ 'version': version,
1224
+ 'creator': creator,
1225
+ 'md5_hash': md5_hash,
1226
+ 'content': is_valid
1227
+ })
1228
+
1229
+ if ok:
1230
+ return f"El archivo se ha publicado con md5: {md5_hash}"
1231
+ else:
1232
+ print(message)
1233
+ return message
1234
+
1235
+ # Eliminar folder donde se decarga el modelo zip
1236
+ if os.path.exists(output_folder):
1237
+ shutil.rmtree(output_folder)
1238
+
1239
+ # Eliminar folder de zips, donde se descomprimio el modelo descargado
1240
+ if os.path.exists(unzips_path):
1241
+ shutil.rmtree(unzips_path)
1242
+
1243
+ # Eliminar folder donde se copiaron los archivos indispensables del modelo
1244
+ if os.path.exists(temp_folder_path):
1245
+ shutil.rmtree(temp_folder_path)
1246
+
1247
+ # Eliminar folder donde se comprimio el modelo para enviarse a huggingface
1248
+ if os.path.exists(zips_path):
1249
+ shutil.rmtree(zips_path)
1250
+
1251
+ def uvr(input_url, output_path, model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format0):
1252
+ carpeta_a_eliminar = "yt_downloads"
1253
+ if os.path.exists(carpeta_a_eliminar) and os.path.isdir(carpeta_a_eliminar):
1254
+ # Eliminar todos los archivos en la carpeta
1255
+ for archivo in os.listdir(carpeta_a_eliminar):
1256
+ ruta_archivo = os.path.join(carpeta_a_eliminar, archivo)
1257
+ if os.path.isfile(ruta_archivo):
1258
+ os.remove(ruta_archivo)
1259
+ elif os.path.isdir(ruta_archivo):
1260
+ shutil.rmtree(ruta_archivo) # Eliminar subcarpetas recursivamente
1261
+
1262
+ def format_title(title):
1263
+ # Eliminar caracteres no alfanuméricos excepto espacios y guiones bajos
1264
+ formatted_title = re.sub(r'[^\w\s-]', '', title)
1265
+ # Reemplazar espacios por guiones bajos
1266
+ formatted_title = formatted_title.replace(" ", "_")
1267
+ return formatted_title
1268
+
1269
+ ydl_opts = {
1270
+ 'no-windows-filenames': True,
1271
+ 'restrict-filenames': True,
1272
+ 'extract_audio': True,
1273
+ 'format': 'bestaudio',
1274
+ }
1275
+
1276
+ with yt_dlp.YoutubeDL(ydl_opts) as ydl:
1277
+ info_dict = ydl.extract_info(input_url, download=False)
1278
+ formatted_title = format_title(info_dict.get('title', 'default_title'))
1279
+ formatted_outtmpl = output_path + '/' + formatted_title + '.wav'
1280
+ ydl_opts['outtmpl'] = formatted_outtmpl
1281
+ ydl = yt_dlp.YoutubeDL(ydl_opts)
1282
+ ydl.download([input_url])
1283
+
1284
+ infos = []
1285
+ pre_fun = None
1286
+ try:
1287
+ inp_root, save_root_vocal, save_root_ins = [x.strip(" ").strip('"').strip("\n").strip('"').strip(" ") if isinstance(x, str) else x for x in [inp_root, save_root_vocal, save_root_ins]]
1288
+ if model_name == "onnx_dereverb_By_FoxJoy":
1289
+ pre_fun = MDXNetDereverb(15)
1290
+ else:
1291
+ func = _audio_pre_ if "DeEcho" not in model_name else _audio_pre_new
1292
+ pre_fun = func(
1293
+ agg=10,
1294
+ model_path=os.path.join(weight_uvr5_root, model_name + ".pth"),
1295
+ device=config.device,
1296
+ is_half=config.is_half,
1297
+ )
1298
+ if inp_root != "":
1299
+ paths = [os.path.join(inp_root, name) for name in os.listdir(inp_root)]
1300
+ else:
1301
+ paths = [path.name for path in paths]
1302
+ for path in paths:
1303
+ inp_path = os.path.join(inp_root, path)
1304
+ need_reformat = 1
1305
+ done = 0
1306
+ try:
1307
+ info = ffmpeg.probe(inp_path, cmd="ffprobe")
1308
+ if (
1309
+ info["streams"][0]["channels"] == 2
1310
+ and info["streams"][0]["sample_rate"] == "44100"
1311
+ ):
1312
+ need_reformat = 0
1313
+ pre_fun._path_audio_(
1314
+ inp_path, save_root_ins, save_root_vocal, format0
1315
+ )
1316
+ done = 1
1317
+ except:
1318
+ need_reformat = 1
1319
+ traceback.print_exc()
1320
+ if need_reformat == 1:
1321
+ tmp_path = "%s/%s.reformatted.wav" % (tmp, os.path.basename(inp_path))
1322
+ os.system(
1323
+ "ffmpeg -i %s -vn -acodec pcm_s16le -ac 2 -ar 44100 %s -y"
1324
+ % (inp_path, tmp_path)
1325
+ )
1326
+ inp_path = tmp_path
1327
+ try:
1328
+ if done == 0:
1329
+ pre_fun._path_audio_(
1330
+ inp_path, save_root_ins, save_root_vocal, format0
1331
+ )
1332
+ infos.append("%s->Success" % (os.path.basename(inp_path)))
1333
+ yield "\n".join(infos)
1334
+ except:
1335
+ infos.append(
1336
+ "%s->%s" % (os.path.basename(inp_path), traceback.format_exc())
1337
+ )
1338
+ yield "\n".join(infos)
1339
+ except:
1340
+ infos.append(traceback.format_exc())
1341
+ yield "\n".join(infos)
1342
+ finally:
1343
+ try:
1344
+ if pre_fun is not None: # Verificar si pre_fun existe antes de eliminarlo
1345
+ if model_name == "onnx_dereverb_By_FoxJoy":
1346
+ del pre_fun.pred.model
1347
+ del pre_fun.pred.model_
1348
+ else:
1349
+ del pre_fun.model
1350
+ del pre_fun
1351
+ except:
1352
+ traceback.print_exc()
1353
+ print("clean_empty_cache")
1354
+ if torch.cuda.is_available():
1355
+ torch.cuda.empty_cache()
1356
+ yield "\n".join(infos)
1357
+
1358
+ def publish_models():
1359
+ with gr.Column():
1360
+ gr.Markdown("# Publicar un modelo en la comunidad")
1361
+ gr.Markdown("El modelo se va a verificar antes de publicarse. Importante que contenga el archivo **.pth** del modelo para que no sea rechazado.")
1362
+
1363
+ model_name = gr.inputs.Textbox(lines=1, label="Nombre descriptivo del modelo Ej: (Ben 10 [Latino] - RVC V2 - 250 Epoch)")
1364
+ url = gr.inputs.Textbox(lines=1, label="Enlace del modelo")
1365
+ moder_version = gr.Radio(
1366
+ label="Versión",
1367
+ choices=["RVC v1", "RVC v2"],
1368
+ value="RVC v1",
1369
+ interactive=True,
1370
+ )
1371
+ model_creator = gr.inputs.Textbox(lines=1, label="ID de discord del creador del modelo Ej: <@123455656>")
1372
+ publish_model_button=gr.Button("Publicar modelo")
1373
+ results = gr.Textbox(label="Resultado", value="", max_lines=20)
1374
+
1375
+ publish_model_button.click(fn=publish_model_clicked, inputs=[model_name, url, moder_version, model_creator], outputs=results)
1376
+
1377
+ def download_model():
1378
+ gr.Markdown(value="# " + i18n("下载模型"))
1379
+ gr.Markdown(value=i18n("它用于下载您的推理模型。"))
1380
+ with gr.Row():
1381
+ model_url=gr.Textbox(label=i18n("网址"))
1382
+ with gr.Row():
1383
+ download_model_status_bar=gr.Textbox(label=i18n("地位"))
1384
+ with gr.Row():
1385
+ download_button=gr.Button(i18n("下载"))
1386
+ download_button.click(fn=load_downloaded_model, inputs=[model_url], outputs=[download_model_status_bar])
1387
+
1388
+ def download_backup():
1389
+ gr.Markdown(value="# " + i18n("下载备份"))
1390
+ gr.Markdown(value=i18n("它用于下载您的训练备份。"))
1391
+ with gr.Row():
1392
+ model_url=gr.Textbox(label=i18n("网址"))
1393
+ with gr.Row():
1394
+ download_model_status_bar=gr.Textbox(label=i18n("地位"))
1395
+ with gr.Row():
1396
+ download_button=gr.Button(i18n("下载"))
1397
+ download_button.click(fn=load_downloaded_backup, inputs=[model_url], outputs=[download_model_status_bar])
1398
+
1399
+ def update_dataset_list(name):
1400
+ new_datasets = []
1401
+ for foldername in os.listdir("./datasets"):
1402
+ if "." not in foldername:
1403
+ new_datasets.append(os.path.join(find_folder_parent(".","pretrained"),"datasets",foldername))
1404
+ return gr.Dropdown.update(choices=new_datasets)
1405
+
1406
+ def download_dataset(trainset_dir4):
1407
+ gr.Markdown(value="# " + i18n("下载数据集"))
1408
+ gr.Markdown(value=i18n("下载兼容格式(.wav/.flac)的音频数据集以训练模型。"))
1409
+ with gr.Row():
1410
+ dataset_url=gr.Textbox(label=i18n("网址"))
1411
+ with gr.Row():
1412
+ load_dataset_status_bar=gr.Textbox(label=i18n("地位"))
1413
+ with gr.Row():
1414
+ load_dataset_button=gr.Button(i18n("下载"))
1415
+ load_dataset_button.click(fn=load_dowloaded_dataset, inputs=[dataset_url], outputs=[load_dataset_status_bar])
1416
+ load_dataset_status_bar.change(update_dataset_list, dataset_url, trainset_dir4)
1417
+
1418
+ def youtube_separator():
1419
+ gr.Markdown(value="# " + i18n("单独的 YouTube 曲目"))
1420
+ gr.Markdown(value=i18n("下载 YouTube 视频的音频并自动分离声音和伴奏轨道"))
1421
+ with gr.Row():
1422
+ input_url = gr.inputs.Textbox(label=i18n("粘贴 YouTube 链接"))
1423
+ output_path = gr.Textbox(
1424
+ label=i18n("输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)"),
1425
+ value=os.path.abspath(os.getcwd()).replace('\\', '/') + "/yt_downloads",
1426
+ visible=False,
1427
+ )
1428
+ save_root_ins = gr.Textbox(
1429
+ label=i18n("输入待处理音频文件夹路径"),
1430
+ value=((os.getcwd()).replace('\\', '/') + "/yt_downloads"),
1431
+ visible=False,
1432
+ )
1433
+ model_choose = gr.Textbox(
1434
+ value=os.path.abspath(os.getcwd()).replace('\\', '/') + "/uvr5_weights/HP5_only_main_vocal",
1435
+ visible=False,
1436
+ )
1437
+ save_root_vocal = gr.Textbox(
1438
+ label=i18n("指定输出主人声文件夹"), value="audios",
1439
+ visible=False,
1440
+ )
1441
+ opt_ins_root = gr.Textbox(
1442
+ label=i18n("指定输出非主人声文件夹"), value="opt",
1443
+ visible=False,
1444
+ )
1445
+ format0 = gr.Radio(
1446
+ label=i18n("导出文件格式"),
1447
+ choices=["wav", "flac", "mp3", "m4a"],
1448
+ value="wav",
1449
+ interactive=True,
1450
+ visible=False,
1451
+ )
1452
+ with gr.Row():
1453
+ vc_output4 = gr.Textbox(label=i18n("地位"))
1454
+ with gr.Row():
1455
+ but2 = gr.Button(i18n("下载并分离"))
1456
+ but2.click(
1457
+ uvr,
1458
+ [
1459
+ input_url,
1460
+ output_path,
1461
+ model_choose,
1462
+ save_root_ins,
1463
+ save_root_vocal,
1464
+ opt_ins_root,
1465
+ format0,
1466
+ ],
1467
+ [vc_output4],
1468
+ )