File size: 6,965 Bytes
296c11e
 
eb21a2f
58ffcfd
eb21a2f
296c11e
eb21a2f
 
 
 
 
 
 
 
 
 
 
 
 
296c11e
c9cdb67
eb21a2f
296c11e
c9cdb67
 
 
 
eb21a2f
c9cdb67
 
296c11e
c9cdb67
eb21a2f
 
 
 
 
c9cdb67
 
296c11e
 
c9cdb67
 
 
 
 
 
 
 
296c11e
 
 
c9cdb67
 
296c11e
c9cdb67
 
 
 
 
 
 
 
 
eb21a2f
296c11e
 
c9cdb67
 
 
 
 
 
296c11e
c9cdb67
 
 
 
 
eb21a2f
 
 
296c11e
 
 
eb21a2f
c9cdb67
 
 
296c11e
eb21a2f
 
 
 
 
 
296c11e
58ffcfd
 
 
 
 
 
296c11e
58ffcfd
 
 
 
 
 
c9cdb67
58ffcfd
 
 
 
 
c9cdb67
296c11e
58ffcfd
 
296c11e
c9cdb67
58ffcfd
eb21a2f
 
c9cdb67
 
eb21a2f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c9cdb67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eb21a2f
c9cdb67
 
 
 
7af48a1
296c11e
 
 
 
c9cdb67
eb21a2f
58ffcfd
 
 
 
 
 
 
 
 
 
c9cdb67
296c11e
c9cdb67
 
296c11e
c9cdb67
296c11e
c9cdb67
296c11e
 
c9cdb67
296c11e
c9cdb67
296c11e
 
c9cdb67
 
 
 
58ffcfd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
296c11e
58ffcfd
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
from TTS.api import TTS
import json
import gradio as gr
from gradio import Dropdown
from share_btn import community_icon_html, loading_icon_html, share_js
import os
import shutil
import re

with open("characters.json", "r") as file:
    data = json.load(file)
    characters = [
        {
            "image": item["image"],
            "title": item["title"],
            "speaker": item["speaker"]
        }
        for item in data
    ]

tts = TTS("tts_models/multilingual/multi-dataset/bark", gpu=True)


def update_selection(selected_state: gr.SelectData):
    c_image = characters[selected_state.index]["image"]
    c_title = characters[selected_state.index]["title"]
    c_speaker = characters[selected_state.index]["speaker"]

    return c_title, selected_state


def infer(prompt, input_wav_file, clean_audio, hidden_numpy_audio):
    print("""
—————
NEW INFERENCE:
———————
    """)
    if prompt == "":
        gr.Warning("Do not forget to provide a tts prompt !")

    if clean_audio is True:
        print("We want to clean audio sample")
        new_name = os.path.splitext(os.path.basename(input_wav_file))[0]
        if os.path.exists(os.path.join("bark_voices", f"{new_name}_cleaned")):
            print("This file has already been cleaned")
            check_name = os.path.join("bark_voices", f"{new_name}_cleaned")
            source_path = os.path.join(check_name, f"{new_name}_cleaned.wav")
        else:
            source_path = split_process(hidden_numpy_audio, "vocals")

            new_path = os.path.join(os.path.dirname(
                source_path), f"{new_name}_cleaned.wav")
            os.rename(source_path, new_path)
            source_path = new_path
    else:
        source_path = input_wav_file

    destination_directory = "bark_voices"

    file_name = os.path.splitext(os.path.basename(source_path))[0]

    destination_path = os.path.join(destination_directory, file_name)

    os.makedirs(destination_path, exist_ok=True)

    shutil.move(source_path, os.path.join(
        destination_path, f"{file_name}.wav"))

    sentences = re.split(r'(?<=[.!?])\s+', prompt)

    if len(sentences) > MAX_NUMBER_SENTENCES:
        gr.Info("Your text is too long. To keep this demo enjoyable for everyone, we only kept the first 10 sentences :) Duplicate this space and set MAX_NUMBER_SENTENCES for longer texts ;)")
        first_nb_sentences = sentences[:MAX_NUMBER_SENTENCES]

        limited_prompt = ' '.join(first_nb_sentences)
        prompt = limited_prompt

    else:
        prompt = prompt

    gr.Info("Generating audio from prompt")
    tts.tts_to_file(text=prompt,
                    file_path="output.wav",
                    voice_dir="bark_voices/",
                    speaker=f"{file_name}")

    contents = os.listdir(f"bark_voices/{file_name}")

    for item in contents:
        print(item)
    print("Preparing final waveform video ...")
    tts_video = gr.make_waveform(audio="output.wav")
    print(tts_video)
    print("FINISHED")
    return "output.wav", tts_video, gr.update(value=f"bark_voices/{file_name}/{contents[1]}", visible=True), gr.Group.update(visible=True), destination_path


prompt_choices = [
    "I am very displeased with the progress being made to finish the cross-town transit line. transit line. This has been an embarrassing use of taxpayer dollars.",
    "Yes, John is my friend, but He was never at my house watching the baseball game.",
    "We are expecting a double digit increase in profits by the end of the fiscal year.",
    "Hi Grandma, Just calling to ask for money, or I can't see you over the holidays. "
]

positive_prompts = {
    prompt_choices[0]: "I am very pleased with the progress being made to finish the cross-town transit line.  This has been an excellent use of taxpayer dollars.",
    prompt_choices[1]: "Yes, John is my friend.  He was at my house watching the baseball game all night.",
    prompt_choices[2]: "We are expecting a modest single digit increase in profits by the end of the fiscal year.",
    prompt_choices[3]: "Hi Grandma it’s me,  Just calling to say I love you, and I can’t wait to see you over the holidays."
}

prompt = Dropdown(
    label="Text to speech prompt",
    choices=prompt_choices,
    elem_id="tts-prompt"
)


def update_helper_text(prompt_choice):
    return positive_prompts.get(prompt_choice, '')


prompt.change(update_helper_text, outputs=["texts_samples"], queue=False)

css = """
#col-container {max-width: 780px; margin-left: auto; margin-right: auto;}
a {text-decoration-line: underline; font-weight: 600;}
.mic-wrap > button {
    width: 100%;
    height: 60px;
    font-size: 1.4em!important;
}
.record-icon.svelte-1thnwz {
    display: flex;
    position: relative;
    margin-right: var(--size-2);
    width: unset;
    height: unset;
}
span.record-icon > span.dot.svelte-1thnwz {
    width: 20px!important;
    height: 20px!important;
}
.animate-spin {
  animation: spin 1s linear infinite;
}
@keyframes spin {
  from {
      transform: rotate(0deg);
  }
  to {
      transform: rotate(360deg);
  }
}
#share-btn-container {
  display: flex; 
  padding-left: 0.5rem !important; 
  padding-right: 0.5rem !important; 
  background-color: #000000; 
  justify-content: center; 
  align-items: center; 
  border-radius: 9999px !important; 
  max-width: 15rem;
  height: 36px;
}
"""
with gr.Blocks(css=css) as demo:
    with gr.Column(elem_id="col-container"):
        with gr.Row():
            with gr.Column():

                prompt = Dropdown(
                    label="Text to speech prompt",
                    choices=prompt_choices,
                    elem_id="tts-prompt"
                )

                audio_in = gr.Audio(
                    label="WAV voice to clone",
                    type="filepath",
                    source="upload"
                )
                clean_sample = gr.Checkbox(
                    label="Clean sample ?", value=False)
                hidden_audio_numpy = gr.Audio(
                    type="numpy", visible=False)
                submit_btn = gr.Button("Submit")
            with gr.Column():

                cloned_out = gr.Audio(
                    label="Text to speech output",
                    visible=False
                )

                video_out = gr.Video(
                    label="Waveform video",
                    elem_id="voice-video-out"
                )

                npz_file = gr.File(
                    label=".npz file",
                    visible=False
                )

                folder_path = gr.Textbox(visible=False)

audio_in.change(fn=wipe_npz_file, inputs=[folder_path], queue=False)
submit_btn.click(
    fn=infer,
    inputs=[
        prompt,
        audio_in,
        clean_sample,
        hidden_audio_numpy
    ],
    outputs=[
        cloned_out,
        video_out,
        npz_file,
        folder_path
    ]
)

demo.queue(api_open=False, max_size=10).launch()