NeuralFalcon commited on
Commit
41cb666
·
verified ·
1 Parent(s): 9aec169

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +303 -0
app.py ADDED
@@ -0,0 +1,303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Initalize a pipeline
2
+ from kokoro import KPipeline
3
+ # from IPython.display import display, Audio
4
+ # import soundfile as sf
5
+ import os
6
+ from huggingface_hub import list_repo_files
7
+ import uuid
8
+ import re
9
+ import gradio as gr
10
+
11
+
12
+ # Language mapping dictionary
13
+ language_map = {
14
+ "American English": "a",
15
+ "British English": "b",
16
+ "Hindi": "h",
17
+ "Spanish": "e",
18
+ "French": "f",
19
+ "Italian": "i",
20
+ "Brazilian Portuguese": "p",
21
+ "Japanese": "j",
22
+ "Mandarin Chinese": "z"
23
+ }
24
+
25
+ # Print installation instructions if necessary
26
+ install_messages = {
27
+ "Japanese": "pip install misaki[ja]",
28
+ "Mandarin Chinese": "pip install misaki[zh]"
29
+ }
30
+
31
+
32
+
33
+ def update_pipeline(Language):
34
+ """ Updates the pipeline only if the language has changed. """
35
+ global pipeline, last_used_language
36
+
37
+ # Print installation instructions if necessary
38
+ if Language in install_messages:
39
+ # raise gr.Error(f"To Use {Language} Install: {install_messages[Language]}",duration=10)
40
+ gr.Warning(f"To Use {Language} Install: {install_messages[Language]}",duration=10)
41
+ # gr.Warning("Reverting to default English pipeline...", duration=5)
42
+ # print(f"To use {Language}, install: {install_messages[Language]}")
43
+ # print("Reverting to default English pipeline...")
44
+
45
+
46
+ # Revert to default English and return immediately
47
+ pipeline = KPipeline(lang_code="a")
48
+ last_used_language = "a"
49
+ return
50
+
51
+ # Get language code, default to 'a' if not found
52
+ new_lang = language_map.get(Language, "a")
53
+
54
+ # Only update if the language is different
55
+ if new_lang != last_used_language:
56
+ try:
57
+ pipeline = KPipeline(lang_code=new_lang)
58
+ last_used_language = new_lang # Update last used language
59
+ print(f"Pipeline updated to {Language} ({new_lang})")
60
+ except Exception as e:
61
+ print(f"Error initializing KPipeline: {e}\nRetrying with default language...")
62
+ pipeline = KPipeline(lang_code="a") # Fallback to English
63
+ last_used_language = "a"
64
+
65
+
66
+
67
+ def get_voice_names(repo_id):
68
+ """Fetches and returns a list of voice names (without extensions) from the given Hugging Face repository."""
69
+ return [os.path.splitext(file.replace("voices/", ""))[0] for file in list_repo_files(repo_id) if file.startswith("voices/")]
70
+
71
+ def create_audio_dir():
72
+ """Creates the 'kokoro_audio' directory in the root folder if it doesn't exist."""
73
+ root_dir = os.getcwd() # Use current working directory instead of __file__
74
+ audio_dir = os.path.join(root_dir, "kokoro_audio")
75
+
76
+ if not os.path.exists(audio_dir):
77
+ os.makedirs(audio_dir)
78
+ print(f"Created directory: {audio_dir}")
79
+ else:
80
+ print(f"Directory already exists: {audio_dir}")
81
+ return audio_dir
82
+
83
+ import re
84
+
85
+ def clean_text(text):
86
+ # Define replacement rules
87
+ replacements = {
88
+ "–": " ", # Replace en-dash with space
89
+ "-": " ", # Replace hyphen with space
90
+ "**": " ", # Replace double asterisks with space
91
+ "*": " ", # Replace single asterisk with space
92
+ "#": " ", # Replace hash with space
93
+ }
94
+
95
+ # Apply replacements
96
+ for old, new in replacements.items():
97
+ text = text.replace(old, new)
98
+
99
+ # Remove emojis using regex (covering wide range of Unicode characters)
100
+ emoji_pattern = re.compile(
101
+ r'[\U0001F600-\U0001F64F]|' # Emoticons
102
+ r'[\U0001F300-\U0001F5FF]|' # Miscellaneous symbols and pictographs
103
+ r'[\U0001F680-\U0001F6FF]|' # Transport and map symbols
104
+ r'[\U0001F700-\U0001F77F]|' # Alchemical symbols
105
+ r'[\U0001F780-\U0001F7FF]|' # Geometric shapes extended
106
+ r'[\U0001F800-\U0001F8FF]|' # Supplemental arrows-C
107
+ r'[\U0001F900-\U0001F9FF]|' # Supplemental symbols and pictographs
108
+ r'[\U0001FA00-\U0001FA6F]|' # Chess symbols
109
+ r'[\U0001FA70-\U0001FAFF]|' # Symbols and pictographs extended-A
110
+ r'[\U00002702-\U000027B0]|' # Dingbats
111
+ r'[\U0001F1E0-\U0001F1FF]' # Flags (iOS)
112
+ r'', flags=re.UNICODE)
113
+
114
+ text = emoji_pattern.sub(r'', text)
115
+
116
+ # Remove multiple spaces and extra line breaks
117
+ text = re.sub(r'\s+', ' ', text).strip()
118
+
119
+ return text
120
+
121
+ def tts_file_name(text):
122
+ global temp_folder
123
+ # Remove all non-alphabetic characters and convert to lowercase
124
+ text = re.sub(r'[^a-zA-Z\s]', '', text) # Retain only alphabets and spaces
125
+ text = text.lower().strip() # Convert to lowercase and strip leading/trailing spaces
126
+ text = text.replace(" ", "_") # Replace spaces with underscores
127
+
128
+ # Truncate or handle empty text
129
+ truncated_text = text[:20] if len(text) > 20 else text if len(text) > 0 else "empty"
130
+
131
+ # Generate a random string for uniqueness
132
+ random_string = uuid.uuid4().hex[:8].upper()
133
+
134
+ # Construct the file name
135
+ file_name = f"{temp_folder}/{truncated_text}_{random_string}.wav"
136
+ return file_name
137
+
138
+
139
+ # import soundfile as sf
140
+ import numpy as np
141
+ import wave
142
+ from pydub import AudioSegment
143
+ from pydub.silence import split_on_silence
144
+
145
+ def remove_silence_function(file_path,minimum_silence=50):
146
+ # Extract file name and format from the provided path
147
+ output_path = file_path.replace(".wav", "_no_silence.wav")
148
+ audio_format = "wav"
149
+ # Reading and splitting the audio file into chunks
150
+ sound = AudioSegment.from_file(file_path, format=audio_format)
151
+ audio_chunks = split_on_silence(sound,
152
+ min_silence_len=100,
153
+ silence_thresh=-45,
154
+ keep_silence=minimum_silence)
155
+ # Putting the file back together
156
+ combined = AudioSegment.empty()
157
+ for chunk in audio_chunks:
158
+ combined += chunk
159
+ combined.export(output_path, format=audio_format)
160
+ return output_path
161
+
162
+ def generate_and_save_audio(text, Language="American English",voice="af_bella", speed=1,remove_silence=False,keep_silence_up_to=0.05):
163
+ text=clean_text(text)
164
+ update_pipeline(Language)
165
+ generator = pipeline(text, voice=voice, speed=speed, split_pattern=r'\n+')
166
+ save_path=tts_file_name(text)
167
+ # Open the WAV file for writing
168
+ with wave.open(save_path, 'wb') as wav_file:
169
+ # Set the WAV file parameters
170
+ wav_file.setnchannels(1) # Mono audio
171
+ wav_file.setsampwidth(2) # 2 bytes per sample (16-bit audio)
172
+ wav_file.setframerate(24000) # Sample rate
173
+
174
+ # Process each audio chunk
175
+ for i, (gs, ps, audio) in enumerate(generator):
176
+ # print(f"{i}. {gs}")
177
+ # print(f"Phonetic Transcription: {ps}")
178
+ # display(Audio(data=audio, rate=24000, autoplay=i==0))
179
+ print("\n")
180
+ # Convert the Tensor to a NumPy array
181
+ audio_np = audio.numpy() # Convert Tensor to NumPy array
182
+ audio_int16 = (audio_np * 32767).astype(np.int16) # Scale to 16-bit range
183
+ audio_bytes = audio_int16.tobytes() # Convert to bytes
184
+
185
+ # Write the audio chunk to the WAV file
186
+ wav_file.writeframes(audio_bytes)
187
+ if remove_silence:
188
+ keep_silence = int(keep_silence_up_to * 1000)
189
+ new_wave_file=remove_silence_function(save_path,minimum_silence=keep_silence)
190
+ return new_wave_file,new_wave_file
191
+ return save_path,save_path
192
+
193
+
194
+
195
+
196
+
197
+ def ui():
198
+ def toggle_autoplay(autoplay):
199
+ return gr.Audio(interactive=False, label='Output Audio', autoplay=autoplay)
200
+
201
+ # Define examples in the format you mentioned
202
+ dummy_examples = [
203
+ ["Hey, y'all, let’s grab some coffee and catch up!", "American English", "af_bella"],
204
+ ["I'd like a large coffee, please.", "British English", "bf_isabella"],
205
+ ["नमस्ते, कैसे हो?", "Hindi", "hf_alpha"],
206
+ ["Hola, ¿cómo estás?", "Spanish", "ef_dora"],
207
+ ["Bonjour, comment ça va?", "French", "ff_siwis"],
208
+ ["Ciao, come stai?", "Italian", "if_sara"],
209
+ ["Olá, como você está?", "Brazilian Portuguese", "pf_dora"],
210
+ ["こんにちは、お元気ですか?", "Japanese", "jf_nezumi"],
211
+ ["你好,你怎么样?", "Mandarin Chinese", "zf_xiaoni"]
212
+ ]
213
+
214
+ with gr.Blocks() as demo:
215
+ # gr.Markdown("<center><h1 style='font-size: 40px;'>KOKORO TTS</h1></center>") # Larger title with CSS
216
+ lang_list = ['American English', 'British English', 'Hindi', 'Spanish', 'French', 'Italian', 'Brazilian Portuguese', 'Japanese', 'Mandarin Chinese']
217
+ voice_names = get_voice_names("hexgrad/Kokoro-82M")
218
+
219
+ with gr.Row():
220
+ with gr.Column():
221
+ text = gr.Textbox(label='Enter Text', lines=3)
222
+
223
+ with gr.Row():
224
+ language_name = gr.Dropdown(lang_list, label="Select Language", value=lang_list[0])
225
+
226
+ with gr.Row():
227
+ voice_name = gr.Dropdown(voice_names, label="Choose VoicePack", value=voice_names[0])
228
+
229
+ with gr.Row():
230
+ generate_btn = gr.Button('Generate', variant='primary')
231
+
232
+ with gr.Accordion('Audio Settings', open=False):
233
+ speed = gr.Slider(minimum=0.25, maximum=2, value=1, step=0.1, label='⚡️Speed', info='Adjust the speaking speed')
234
+ remove_silence = gr.Checkbox(value=False, label='✂️ Remove Silence From TTS')
235
+
236
+ with gr.Column():
237
+ audio = gr.Audio(interactive=False, label='Output Audio', autoplay=True)
238
+ audio_file = gr.File(label='Download Audio')
239
+
240
+ with gr.Accordion('Enable Autoplay', open=False):
241
+ autoplay = gr.Checkbox(value=True, label='Autoplay')
242
+ autoplay.change(toggle_autoplay, inputs=[autoplay], outputs=[audio])
243
+
244
+ text.submit(generate_and_save_audio, inputs=[text, language_name, voice_name, speed, remove_silence], outputs=[audio, audio_file])
245
+ generate_btn.click(generate_and_save_audio, inputs=[text, language_name, voice_name, speed, remove_silence], outputs=[audio, audio_file])
246
+
247
+ # Add examples to the interface
248
+ gr.Examples(examples=dummy_examples, inputs=[text, language_name, voice_name])
249
+
250
+ return demo
251
+
252
+ def tutorial():
253
+ # Markdown explanation for language code
254
+ explanation = """
255
+ ## Language Code Explanation:
256
+ Example: `'af_bella'`
257
+ - **'a'** stands for **American English**.
258
+ - **'f_'** stands for **Female** (If it were 'm_', it would mean Male).
259
+ - **'bella'** refers to the specific voice.
260
+
261
+ The first character in the voice code stands for the language:
262
+ - **"a"**: American English
263
+ - **"b"**: British English
264
+ - **"h"**: Hindi
265
+ - **"e"**: Spanish
266
+ - **"f"**: French
267
+ - **"i"**: Italian
268
+ - **"p"**: Brazilian Portuguese
269
+ - **"j"**: Japanese
270
+ - **"z"**: Mandarin Chinese
271
+
272
+ The second character stands for gender:
273
+ - **"f_"**: Female
274
+ - **"m_"**: Male
275
+ """
276
+ with gr.Blocks() as demo2:
277
+ gr.Markdown(explanation) # Display the explanation
278
+ return demo2
279
+
280
+
281
+
282
+ import click
283
+ @click.command()
284
+ @click.option("--debug", is_flag=True, default=False, help="Enable debug mode.")
285
+ @click.option("--share", is_flag=True, default=False, help="Enable sharing of the interface.")
286
+ def main(debug, share):
287
+ demo1 = ui()
288
+ demo2 = tutorial()
289
+ demo = gr.TabbedInterface([demo1, demo2],["Multilingual TTS","VoicePack Explanation"],title="Kokoro TTS",theme='JohnSmith9982/small_and_pretty')
290
+ demo.queue().launch(debug=debug, share=share)
291
+ #Run on local network
292
+ # laptop_ip="192.168.0.30"
293
+ # port=8080
294
+ # demo.queue().launch(debug=debug, share=share,server_name=laptop_ip,server_port=port)
295
+
296
+
297
+
298
+ # Initialize default pipeline
299
+ last_used_language = "a"
300
+ pipeline = KPipeline(lang_code=last_used_language)
301
+ temp_folder = create_audio_dir()
302
+ if __name__ == "__main__":
303
+ main()