Upload 6 files
Browse files- .gitignore +12 -0
- app.py +95 -0
- assets/i18n/i18n.py +50 -0
- requirements.txt +37 -0
- rvc/lib/tools/model_download.py +342 -0
- rvc/lib/tools/prerequisites_download.py +54 -0
.gitignore
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.exe
|
2 |
+
*.pt
|
3 |
+
*.onnx
|
4 |
+
*pth
|
5 |
+
*.pyc
|
6 |
+
*.pth
|
7 |
+
*.index
|
8 |
+
*.wav
|
9 |
+
|
10 |
+
logs
|
11 |
+
env
|
12 |
+
venv
|
app.py
ADDED
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import sys
|
3 |
+
import os
|
4 |
+
import logging
|
5 |
+
|
6 |
+
now_dir = os.getcwd()
|
7 |
+
sys.path.append(now_dir)
|
8 |
+
|
9 |
+
# Tabs
|
10 |
+
from tabs.inference.inference import inference_tab
|
11 |
+
from tabs.train.train import train_tab
|
12 |
+
from tabs.extra.extra import extra_tab
|
13 |
+
from tabs.report.report import report_tab
|
14 |
+
from tabs.download.download import download_tab
|
15 |
+
from tabs.tts.tts import tts_tab
|
16 |
+
from tabs.settings.presence import presence_tab, load_config_presence
|
17 |
+
from tabs.settings.flask_server import flask_server_tab
|
18 |
+
from tabs.settings.themes import theme_tab
|
19 |
+
from tabs.plugins.plugins import plugins_tab
|
20 |
+
from tabs.settings.version import version_tab
|
21 |
+
from tabs.settings.lang import lang_tab
|
22 |
+
from tabs.settings.restart import restart_tab
|
23 |
+
|
24 |
+
# Assets
|
25 |
+
import assets.themes.loadThemes as loadThemes
|
26 |
+
from assets.i18n.i18n import I18nAuto
|
27 |
+
import assets.installation_checker as installation_checker
|
28 |
+
from assets.discord_presence import RPCManager
|
29 |
+
from assets.flask.server import start_flask, load_config_flask
|
30 |
+
|
31 |
+
i18n = I18nAuto()
|
32 |
+
if load_config_presence() == True:
|
33 |
+
RPCManager.start_presence()
|
34 |
+
installation_checker.check_installation()
|
35 |
+
logging.getLogger("uvicorn").disabled = True
|
36 |
+
logging.getLogger("fairseq").disabled = True
|
37 |
+
if load_config_flask() == True:
|
38 |
+
print("Starting Flask server")
|
39 |
+
start_flask()
|
40 |
+
|
41 |
+
my_applio = loadThemes.load_json()
|
42 |
+
if my_applio:
|
43 |
+
pass
|
44 |
+
else:
|
45 |
+
my_applio = "ParityError/Interstellar"
|
46 |
+
|
47 |
+
with gr.Blocks(theme=my_applio, title="Applio & KanoyoTweaks") as Applio:
|
48 |
+
gr.Markdown("# Applio & KanoyoTweaks")
|
49 |
+
gr.Markdown(
|
50 |
+
i18n(
|
51 |
+
"Ultimate voice cloning tool, meticulously optimized for unrivaled power, modularity, and user-friendly experience."
|
52 |
+
)
|
53 |
+
)
|
54 |
+
gr.Markdown(
|
55 |
+
i18n(
|
56 |
+
"[Support](https://discord.gg/IAHispano) — [Discord Bot](https://discord.com/oauth2/authorize?client_id=1144714449563955302&permissions=1376674695271&scope=bot%20applications.commands) — [Find Voices](https://applio.org/models) — [GitHub](https://github.com/IAHispano/Applio)"
|
57 |
+
)
|
58 |
+
)
|
59 |
+
with gr.Tab(i18n("Inference")):
|
60 |
+
inference_tab()
|
61 |
+
|
62 |
+
with gr.Tab(i18n("Train")):
|
63 |
+
train_tab()
|
64 |
+
|
65 |
+
with gr.Tab(i18n("TTS")):
|
66 |
+
tts_tab()
|
67 |
+
|
68 |
+
with gr.Tab(i18n("Extra")):
|
69 |
+
extra_tab()
|
70 |
+
|
71 |
+
with gr.Tab(i18n("Plugins")):
|
72 |
+
plugins_tab()
|
73 |
+
|
74 |
+
with gr.Tab(i18n("Download")):
|
75 |
+
download_tab()
|
76 |
+
|
77 |
+
with gr.Tab(i18n("Report a Bug")):
|
78 |
+
report_tab()
|
79 |
+
|
80 |
+
with gr.Tab(i18n("Settings")):
|
81 |
+
presence_tab()
|
82 |
+
flask_server_tab()
|
83 |
+
theme_tab()
|
84 |
+
version_tab()
|
85 |
+
lang_tab()
|
86 |
+
restart_tab()
|
87 |
+
|
88 |
+
|
89 |
+
if __name__ == "__main__":
|
90 |
+
Applio.launch(
|
91 |
+
favicon_path="assets/ICON.ico",
|
92 |
+
share="--share" in sys.argv,
|
93 |
+
inbrowser="--open" in sys.argv,
|
94 |
+
server_port=6969,
|
95 |
+
)
|
assets/i18n/i18n.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os, sys
|
2 |
+
import json
|
3 |
+
from pathlib import Path
|
4 |
+
from locale import getdefaultlocale
|
5 |
+
|
6 |
+
now_dir = os.getcwd()
|
7 |
+
sys.path.append(now_dir)
|
8 |
+
|
9 |
+
|
10 |
+
class I18nAuto:
|
11 |
+
LANGUAGE_PATH = os.path.join(now_dir, "assets", "i18n", "languages")
|
12 |
+
|
13 |
+
def __init__(self, language=None):
|
14 |
+
with open(os.path.join(now_dir, "assets", "config.json"), "r") as f:
|
15 |
+
config = json.load(f)
|
16 |
+
override = config["lang"]["override"]
|
17 |
+
lang_prefix = config["lang"]["selected_lang"]
|
18 |
+
|
19 |
+
self.language = lang_prefix
|
20 |
+
|
21 |
+
if override == False:
|
22 |
+
language = 'ru_RU' or getdefaultlocale()[0]
|
23 |
+
lang_prefix = language[:2] if language is not None else "en"
|
24 |
+
available_languages = self._get_available_languages()
|
25 |
+
matching_languages = [
|
26 |
+
lang for lang in available_languages if lang.startswith(lang_prefix)
|
27 |
+
]
|
28 |
+
self.language = matching_languages[0] if matching_languages else "en_US"
|
29 |
+
|
30 |
+
self.language_map = self._load_language_list()
|
31 |
+
|
32 |
+
def _load_language_list(self):
|
33 |
+
try:
|
34 |
+
file_path = Path(self.LANGUAGE_PATH) / f"{self.language}.json"
|
35 |
+
with open(file_path, "r", encoding="utf-8") as f:
|
36 |
+
return json.load(f)
|
37 |
+
except FileNotFoundError:
|
38 |
+
raise FileNotFoundError(
|
39 |
+
f"Failed to load language file for {self.language}. Check if the correct .json file exists."
|
40 |
+
)
|
41 |
+
|
42 |
+
def _get_available_languages(self):
|
43 |
+
language_files = [path.stem for path in Path(self.LANGUAGE_PATH).glob("*.json")]
|
44 |
+
return language_files
|
45 |
+
|
46 |
+
def _language_exists(self, language):
|
47 |
+
return (Path(self.LANGUAGE_PATH) / f"{language}.json").exists()
|
48 |
+
|
49 |
+
def __call__(self, key):
|
50 |
+
return self.language_map.get(key, key)
|
requirements.txt
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Machine learning
|
2 |
+
fairseq==0.12.2
|
3 |
+
numba; sys_platform == 'linux'
|
4 |
+
numba==0.56.4
|
5 |
+
torch==2.1.1
|
6 |
+
torchcrepe==0.0.21
|
7 |
+
torchvision==0.16.1
|
8 |
+
einops
|
9 |
+
|
10 |
+
# General dependencies
|
11 |
+
ffmpeg-python>=0.2.0
|
12 |
+
numpy==1.23.5
|
13 |
+
requests==2.31.0
|
14 |
+
tqdm
|
15 |
+
wget
|
16 |
+
|
17 |
+
# Audio processing
|
18 |
+
faiss-cpu==1.7.3
|
19 |
+
librosa==0.9.1
|
20 |
+
pyworld==0.3.4
|
21 |
+
scipy==1.11.1
|
22 |
+
soundfile==0.12.1
|
23 |
+
praat-parselmouth
|
24 |
+
|
25 |
+
# Visualization
|
26 |
+
matplotlib==3.7.2
|
27 |
+
tensorboard
|
28 |
+
gradio==4.14.0
|
29 |
+
|
30 |
+
# Miscellaneous
|
31 |
+
ffmpy==0.3.1
|
32 |
+
tensorboardX
|
33 |
+
edge-tts==6.1.9
|
34 |
+
pypresence
|
35 |
+
beautifulsoup4
|
36 |
+
flask
|
37 |
+
local-attention
|
rvc/lib/tools/model_download.py
ADDED
@@ -0,0 +1,342 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
import wget
|
4 |
+
import zipfile
|
5 |
+
from bs4 import BeautifulSoup
|
6 |
+
import requests
|
7 |
+
from urllib.parse import unquote, urlencode, parse_qs, urlparse
|
8 |
+
import re
|
9 |
+
import shutil
|
10 |
+
|
11 |
+
|
12 |
+
def find_folder_parent(search_dir, folder_name):
|
13 |
+
for dirpath, dirnames, _ in os.walk(search_dir):
|
14 |
+
if folder_name in dirnames:
|
15 |
+
return os.path.abspath(dirpath)
|
16 |
+
return None
|
17 |
+
|
18 |
+
|
19 |
+
now_dir = os.getcwd()
|
20 |
+
sys.path.append(now_dir)
|
21 |
+
|
22 |
+
from rvc.lib.utils import format_title
|
23 |
+
|
24 |
+
import rvc.lib.tools.gdown as gdown
|
25 |
+
|
26 |
+
file_path = find_folder_parent(now_dir, "logs")
|
27 |
+
|
28 |
+
zips_path = os.getcwd() + "/logs/zips"
|
29 |
+
|
30 |
+
|
31 |
+
def search_pth_index(folder):
|
32 |
+
pth_paths = [
|
33 |
+
os.path.join(folder, file)
|
34 |
+
for file in os.listdir(folder)
|
35 |
+
if os.path.isfile(os.path.join(folder, file)) and file.endswith(".pth")
|
36 |
+
]
|
37 |
+
index_paths = [
|
38 |
+
os.path.join(folder, file)
|
39 |
+
for file in os.listdir(folder)
|
40 |
+
if os.path.isfile(os.path.join(folder, file)) and file.endswith(".index")
|
41 |
+
]
|
42 |
+
|
43 |
+
return pth_paths, index_paths
|
44 |
+
|
45 |
+
|
46 |
+
def get_mediafire_download_link(url):
|
47 |
+
response = requests.get(url)
|
48 |
+
response.raise_for_status()
|
49 |
+
soup = BeautifulSoup(response.text, "html.parser")
|
50 |
+
download_button = soup.find(
|
51 |
+
"a", {"class": "input popsok", "aria-label": "Download file"}
|
52 |
+
)
|
53 |
+
if download_button:
|
54 |
+
download_link = download_button.get("href")
|
55 |
+
return download_link
|
56 |
+
else:
|
57 |
+
return None
|
58 |
+
|
59 |
+
|
60 |
+
def download_from_url(url):
|
61 |
+
os.makedirs(zips_path, exist_ok=True)
|
62 |
+
if url != "":
|
63 |
+
if "drive.google.com" in url:
|
64 |
+
if "file/d/" in url:
|
65 |
+
file_id = url.split("file/d/")[1].split("/")[0]
|
66 |
+
elif "id=" in url:
|
67 |
+
file_id = url.split("id=")[1].split("&")[0]
|
68 |
+
else:
|
69 |
+
return None
|
70 |
+
|
71 |
+
if file_id:
|
72 |
+
os.chdir(zips_path)
|
73 |
+
try:
|
74 |
+
gdown.download(
|
75 |
+
f"https://drive.google.com/uc?id={file_id}",
|
76 |
+
quiet=False,
|
77 |
+
fuzzy=True,
|
78 |
+
)
|
79 |
+
except Exception as error:
|
80 |
+
error_message = str(error)
|
81 |
+
if (
|
82 |
+
"Too many users have viewed or downloaded this file recently"
|
83 |
+
in error_message
|
84 |
+
):
|
85 |
+
os.chdir(now_dir)
|
86 |
+
return "too much use"
|
87 |
+
elif (
|
88 |
+
"Cannot retrieve the public link of the file." in error_message
|
89 |
+
):
|
90 |
+
os.chdir(now_dir)
|
91 |
+
return "private link"
|
92 |
+
else:
|
93 |
+
print(error_message)
|
94 |
+
os.chdir(now_dir)
|
95 |
+
return None
|
96 |
+
|
97 |
+
elif "disk.yandex.ru" in url:
|
98 |
+
base_url = 'https://cloud-api.yandex.net/v1/disk/public/resources/download?'
|
99 |
+
public_key = url
|
100 |
+
final_url = base_url + urlencode(dict(public_key=public_key))
|
101 |
+
response = requests.get(final_url)
|
102 |
+
download_url = response.json()['href']
|
103 |
+
download_response = requests.get(download_url)
|
104 |
+
|
105 |
+
if download_response.status_code == 200:
|
106 |
+
filename = parse_qs(urlparse(unquote(download_url)).query).get('filename', [''])[0]
|
107 |
+
if filename:
|
108 |
+
os.chdir(zips_path)
|
109 |
+
with open(filename, 'wb') as f:
|
110 |
+
f.write(download_response.content)
|
111 |
+
else:
|
112 |
+
print("Failed to get filename from URL.")
|
113 |
+
return None
|
114 |
+
|
115 |
+
elif "pixeldrain.com" in url:
|
116 |
+
try:
|
117 |
+
file_id = url.split("pixeldrain.com/u/")[1]
|
118 |
+
os.chdir(zips_path)
|
119 |
+
print(file_id)
|
120 |
+
response = requests.get(f"https://pixeldrain.com/api/file/{file_id}")
|
121 |
+
if response.status_code == 200:
|
122 |
+
file_name = (
|
123 |
+
response.headers.get("Content-Disposition")
|
124 |
+
.split("filename=")[-1]
|
125 |
+
.strip('";')
|
126 |
+
)
|
127 |
+
os.makedirs(zips_path, exist_ok=True)
|
128 |
+
with open(os.path.join(zips_path, file_name), "wb") as newfile:
|
129 |
+
newfile.write(response.content)
|
130 |
+
os.chdir(file_path)
|
131 |
+
return "downloaded"
|
132 |
+
else:
|
133 |
+
os.chdir(file_path)
|
134 |
+
return None
|
135 |
+
except Exception as e:
|
136 |
+
print(e)
|
137 |
+
os.chdir(file_path)
|
138 |
+
return None
|
139 |
+
|
140 |
+
elif "cdn.discordapp.com" in url:
|
141 |
+
file = requests.get(url)
|
142 |
+
os.chdir(zips_path)
|
143 |
+
if file.status_code == 200:
|
144 |
+
name = url.split("/")
|
145 |
+
with open(os.path.join(name[-1]), "wb") as newfile:
|
146 |
+
newfile.write(file.content)
|
147 |
+
else:
|
148 |
+
return None
|
149 |
+
|
150 |
+
elif "/blob/" in url or "/resolve/" in url:
|
151 |
+
os.chdir(zips_path)
|
152 |
+
if "/blob/" in url:
|
153 |
+
url = url.replace("/blob/", "/resolve/")
|
154 |
+
|
155 |
+
response = requests.get(url, stream=True)
|
156 |
+
if response.status_code == 200:
|
157 |
+
file_name = url.split("/")[-1]
|
158 |
+
file_name = unquote(file_name)
|
159 |
+
|
160 |
+
file_name = re.sub(r"[^a-zA-Z0-9_.-]", "_", file_name)
|
161 |
+
|
162 |
+
total_size_in_bytes = int(response.headers.get("content-length", 0))
|
163 |
+
block_size = 1024
|
164 |
+
progress_bar_length = 50
|
165 |
+
progress = 0
|
166 |
+
|
167 |
+
with open(os.path.join(zips_path, file_name), "wb") as file:
|
168 |
+
for data in response.iter_content(block_size):
|
169 |
+
file.write(data)
|
170 |
+
progress += len(data)
|
171 |
+
progress_percent = int((progress / total_size_in_bytes) * 100)
|
172 |
+
num_dots = int(
|
173 |
+
(progress / total_size_in_bytes) * progress_bar_length
|
174 |
+
)
|
175 |
+
progress_bar = (
|
176 |
+
"["
|
177 |
+
+ "." * num_dots
|
178 |
+
+ " " * (progress_bar_length - num_dots)
|
179 |
+
+ "]"
|
180 |
+
)
|
181 |
+
print(
|
182 |
+
f"{progress_percent}% {progress_bar} {progress}/{total_size_in_bytes} ",
|
183 |
+
end="\r",
|
184 |
+
)
|
185 |
+
if progress_percent == 100:
|
186 |
+
print("\n")
|
187 |
+
|
188 |
+
else:
|
189 |
+
os.chdir(now_dir)
|
190 |
+
return None
|
191 |
+
elif "/tree/main" in url:
|
192 |
+
os.chdir(zips_path)
|
193 |
+
response = requests.get(url)
|
194 |
+
soup = BeautifulSoup(response.content, "html.parser")
|
195 |
+
temp_url = ""
|
196 |
+
for link in soup.find_all("a", href=True):
|
197 |
+
if link["href"].endswith(".zip"):
|
198 |
+
temp_url = link["href"]
|
199 |
+
break
|
200 |
+
if temp_url:
|
201 |
+
url = temp_url
|
202 |
+
url = url.replace("blob", "resolve")
|
203 |
+
if "huggingface.co" not in url:
|
204 |
+
url = "https://huggingface.co" + url
|
205 |
+
|
206 |
+
wget.download(url)
|
207 |
+
else:
|
208 |
+
os.chdir(now_dir)
|
209 |
+
return None
|
210 |
+
else:
|
211 |
+
try:
|
212 |
+
os.chdir(zips_path)
|
213 |
+
wget.download(url)
|
214 |
+
except Exception as error:
|
215 |
+
os.chdir(now_dir)
|
216 |
+
print(error)
|
217 |
+
return None
|
218 |
+
|
219 |
+
for currentPath, _, zipFiles in os.walk(zips_path):
|
220 |
+
for Files in zipFiles:
|
221 |
+
filePart = Files.split(".")
|
222 |
+
extensionFile = filePart[len(filePart) - 1]
|
223 |
+
filePart.pop()
|
224 |
+
nameFile = "_".join(filePart)
|
225 |
+
realPath = os.path.join(currentPath, Files)
|
226 |
+
os.rename(realPath, nameFile + "." + extensionFile)
|
227 |
+
|
228 |
+
os.chdir(now_dir)
|
229 |
+
return "downloaded"
|
230 |
+
|
231 |
+
os.chdir(now_dir)
|
232 |
+
return None
|
233 |
+
|
234 |
+
|
235 |
+
def extract_and_show_progress(zipfile_path, unzips_path):
|
236 |
+
try:
|
237 |
+
with zipfile.ZipFile(zipfile_path, "r") as zip_ref:
|
238 |
+
for file_info in zip_ref.infolist():
|
239 |
+
zip_ref.extract(file_info, unzips_path)
|
240 |
+
os.remove(zipfile_path)
|
241 |
+
return True
|
242 |
+
except Exception as error:
|
243 |
+
print(error)
|
244 |
+
return False
|
245 |
+
|
246 |
+
|
247 |
+
def unzip_file(zip_path, zip_file_name):
|
248 |
+
zip_file_path = os.path.join(zip_path, zip_file_name + ".zip")
|
249 |
+
extract_path = os.path.join(file_path, zip_file_name)
|
250 |
+
with zipfile.ZipFile(zip_file_path, "r") as zip_ref:
|
251 |
+
zip_ref.extractall(extract_path)
|
252 |
+
os.remove(zip_file_path)
|
253 |
+
|
254 |
+
|
255 |
+
url = sys.argv[1]
|
256 |
+
|
257 |
+
if "?download=true" in url:
|
258 |
+
url = url.replace("?download=true", "")
|
259 |
+
|
260 |
+
verify = download_from_url(url)
|
261 |
+
|
262 |
+
if verify == "downloaded":
|
263 |
+
extract_folder_path = ""
|
264 |
+
for filename in os.listdir(zips_path):
|
265 |
+
if filename.endswith(".zip"):
|
266 |
+
zipfile_path = os.path.join(zips_path, filename)
|
267 |
+
print("Proceeding with the extraction...")
|
268 |
+
|
269 |
+
model_zip = os.path.basename(zipfile_path)
|
270 |
+
model_name = format_title(model_zip.split(".zip")[0])
|
271 |
+
extract_folder_path = os.path.join(
|
272 |
+
"logs",
|
273 |
+
os.path.normpath(model_name),
|
274 |
+
)
|
275 |
+
|
276 |
+
success = extract_and_show_progress(zipfile_path, extract_folder_path)
|
277 |
+
|
278 |
+
subfolders = [
|
279 |
+
f
|
280 |
+
for f in os.listdir(extract_folder_path)
|
281 |
+
if os.path.isdir(os.path.join(extract_folder_path, f))
|
282 |
+
]
|
283 |
+
if len(subfolders) == 1:
|
284 |
+
subfolder_path = os.path.join(extract_folder_path, subfolders[0])
|
285 |
+
for item in os.listdir(subfolder_path):
|
286 |
+
s = os.path.join(subfolder_path, item)
|
287 |
+
d = os.path.join(extract_folder_path, item)
|
288 |
+
shutil.move(s, d)
|
289 |
+
os.rmdir(subfolder_path)
|
290 |
+
|
291 |
+
for item in os.listdir(extract_folder_path):
|
292 |
+
if ".pth" in item:
|
293 |
+
file_name = item.split(".pth")[0]
|
294 |
+
if file_name != model_name:
|
295 |
+
os.rename(
|
296 |
+
os.path.join(extract_folder_path, item),
|
297 |
+
os.path.join(extract_folder_path, model_name + ".pth"),
|
298 |
+
)
|
299 |
+
else:
|
300 |
+
if "v2" not in item:
|
301 |
+
file_name = item.split("_nprobe_1_")[1].split("_v1")[0]
|
302 |
+
if file_name != model_name:
|
303 |
+
new_file_name = (
|
304 |
+
item.split("_nprobe_1_")[0]
|
305 |
+
+ "_nprobe_1_"
|
306 |
+
+ model_name
|
307 |
+
+ "_v1"
|
308 |
+
)
|
309 |
+
os.rename(
|
310 |
+
os.path.join(extract_folder_path, item),
|
311 |
+
os.path.join(
|
312 |
+
extract_folder_path, new_file_name + ".index"
|
313 |
+
),
|
314 |
+
)
|
315 |
+
else:
|
316 |
+
file_name = item.split("_nprobe_1_")[1].split("_v2")[0]
|
317 |
+
if file_name != model_name:
|
318 |
+
new_file_name = (
|
319 |
+
item.split("_nprobe_1_")[0]
|
320 |
+
+ "_nprobe_1_"
|
321 |
+
+ model_name
|
322 |
+
+ "_v2"
|
323 |
+
)
|
324 |
+
os.rename(
|
325 |
+
os.path.join(extract_folder_path, item),
|
326 |
+
os.path.join(
|
327 |
+
extract_folder_path, new_file_name + ".index"
|
328 |
+
),
|
329 |
+
)
|
330 |
+
|
331 |
+
if success:
|
332 |
+
print(f"Model {model_name} downloaded!")
|
333 |
+
else:
|
334 |
+
print(f"Error downloading {model_name}")
|
335 |
+
sys.exit()
|
336 |
+
if extract_folder_path == "":
|
337 |
+
print("Zip file was not found.")
|
338 |
+
sys.exit()
|
339 |
+
result = search_pth_index(extract_folder_path)
|
340 |
+
else:
|
341 |
+
message = "Error"
|
342 |
+
sys.exit()
|
rvc/lib/tools/prerequisites_download.py
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import wget
|
3 |
+
import sys
|
4 |
+
|
5 |
+
url_base = "https://huggingface.co/kanoyo/0v2Super/resolve/main"
|
6 |
+
models_download = [
|
7 |
+
(
|
8 |
+
"pretrained_v2/",
|
9 |
+
[
|
10 |
+
"f0D40k.pth",
|
11 |
+
"f0G40k.pth",
|
12 |
+
],
|
13 |
+
),
|
14 |
+
]
|
15 |
+
|
16 |
+
models_file = [
|
17 |
+
"hubert_base.pt",
|
18 |
+
"rmvpe.pt",
|
19 |
+
"fcpe.pt",
|
20 |
+
# "rmvpe.onnx"
|
21 |
+
]
|
22 |
+
|
23 |
+
executables_file = []
|
24 |
+
|
25 |
+
folder_mapping = {
|
26 |
+
"pretrained_v2/": "rvc/pretraineds/pretrained_v2/",
|
27 |
+
}
|
28 |
+
|
29 |
+
for file_name in models_file:
|
30 |
+
destination_path = os.path.join(file_name)
|
31 |
+
url = f"{url_base}/{file_name}"
|
32 |
+
if not os.path.exists(destination_path):
|
33 |
+
os.makedirs(os.path.dirname(destination_path) or ".", exist_ok=True)
|
34 |
+
print(f"\nDownloading {url} to {destination_path}...")
|
35 |
+
wget.download(url, out=destination_path)
|
36 |
+
|
37 |
+
for file_name in executables_file:
|
38 |
+
if sys.platform == "win32":
|
39 |
+
destination_path = os.path.join(file_name)
|
40 |
+
url = f"{url_base}/{file_name}"
|
41 |
+
if not os.path.exists(destination_path):
|
42 |
+
os.makedirs(os.path.dirname(destination_path) or ".", exist_ok=True)
|
43 |
+
print(f"\nDownloading {url} to {destination_path}...")
|
44 |
+
wget.download(url, out=destination_path)
|
45 |
+
|
46 |
+
for remote_folder, file_list in models_download:
|
47 |
+
local_folder = folder_mapping.get(remote_folder, "")
|
48 |
+
for file in file_list:
|
49 |
+
destination_path = os.path.join(local_folder, file)
|
50 |
+
url = f"{url_base}/{remote_folder}{file}"
|
51 |
+
if not os.path.exists(destination_path):
|
52 |
+
os.makedirs(os.path.dirname(destination_path) or ".", exist_ok=True)
|
53 |
+
print(f"\nDownloading {url} to {destination_path}...")
|
54 |
+
wget.download(url, out=destination_path)
|