File size: 8,460 Bytes
1ddb890
 
ac0c0ce
0ff04fc
ac0c0ce
99f4e90
cf67794
 
 
 
 
 
 
1ddb890
0ff04fc
e69de16
87ca550
1ddb890
cf67794
 
 
 
 
e69de16
cf67794
b072ff1
e69de16
cf67794
ff4e567
2a231ba
e69de16
2a231ba
 
4a925ac
 
 
ac0c0ce
2a231ba
4a925ac
ac0c0ce
 
4a925ac
ac0c0ce
 
 
ff4e567
e69de16
cf67794
87ca550
2a231ba
ac0c0ce
 
5e53171
cf67794
 
 
 
 
 
 
 
 
 
 
 
 
 
ac0c0ce
 
 
e69de16
ac0c0ce
 
87ca550
e69de16
ac0c0ce
2a231ba
e69de16
cf67794
 
e69de16
cf67794
1ddb890
4a925ac
 
e69de16
5c906d0
99f4e90
ac0c0ce
5c906d0
e69de16
ac0c0ce
 
e69de16
164f998
e69de16
164f998
5c906d0
e69de16
4a925ac
5c906d0
 
e69de16
5c906d0
 
e69de16
5c906d0
e69de16
5c906d0
e69de16
1ddb890
85fcc07
 
164f998
e69de16
164f998
 
e69de16
164f998
e69de16
164f998
e69de16
85fcc07
0479c04
e69de16
164f998
0479c04
99f4e90
e69de16
cf67794
ac0c0ce
e69de16
 
 
 
0479c04
eb814f3
e69de16
cf67794
99f4e90
 
b072ff1
eb814f3
e69de16
 
cf67794
 
 
 
 
 
1ddb890
7c43d6c
 
ff064e2
 
7c43d6c
ff064e2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e69de16
 
b072ff1
ff064e2
7c43d6c
 
 
e69de16
7c43d6c
 
 
 
 
 
 
1ddb890
7c43d6c
1ddb890
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
import gradio as gr
import os
import subprocess
import spaces
from typing import Tuple, List, Dict
from pydub import AudioSegment
from rich.console import Console
from rich.panel import Panel
from rich.progress import Progress, SpinnerColumn, TextColumn, BarColumn, TimeRemainingColumn
from rich.text import Text
import time

console = Console()

@spaces.GPU
def inference(audio_file: str, model_name: str, vocals: bool, drums: bool, bass: bool, other: bool, mp3: bool, mp3_bitrate: int) -> Tuple[str, List[str], gr.HTML]:
    log_messages = []

    def stream_log(message, style=""):
        formatted_message = f"[{model_name}] {message}"
        log_messages.append(formatted_message)
        return gr.HTML(f"<pre style='margin-bottom: 0;{style}'>{formatted_message}</pre>")

    yield None, None, stream_log("Initializing Demucs...", "color: #4CAF50; font-weight: bold;")
    time.sleep(1)  # Simulate initialization time

    yield None, None, stream_log("Loading audio file...", "color: #2196F3;")
    time.sleep(0.5)  # Simulate loading time
    
    if audio_file is None:
        yield None, None, stream_log("Error: No audio file provided", "color: #F44336;")
        raise gr.Error("Please upload an audio file")

    # Use absolute paths
    base_output_dir = os.path.abspath("separated")
    output_dir = os.path.join(base_output_dir, model_name, os.path.splitext(os.path.basename(audio_file))[0])
    os.makedirs(output_dir, exist_ok=True)

    # Construct the Demucs command with full paths
    cmd = [
        "python", "-m", "demucs", 
        "--out", base_output_dir,
        "-n", model_name,
        audio_file
    ]

    yield None, None, stream_log("Preparing separation process...", "color: #FF9800;")
    time.sleep(0.5)  # Simulate preparation time

    try:
        # Run the Demucs command
        process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
        
        # Simulate a loading animation
        with Progress(
            SpinnerColumn(),
            TextColumn("[progress.description]{task.description}"),
            BarColumn(),
            TextColumn("[progress.percentage]{task.percentage:>3.0f}%"),
            TimeRemainingColumn(),
            console=console
        ) as progress:
            task = progress.add_task("[cyan]Separating stems...", total=100)
            while process.poll() is None:
                progress.update(task, advance=1)
                time.sleep(0.1)
            progress.update(task, completed=100)

        if process.returncode != 0:
            error_output = process.stderr.read()
            yield None, None, stream_log(f"Error: Separation failed", "color: #F44336;")
            raise gr.Error(f"Demucs separation failed. Check the logs for details.")

    except Exception as e:
        yield None, None, stream_log(f"Unexpected error: {str(e)}", "color: #F44336;")
        raise gr.Error(f"An unexpected error occurred: {str(e)}")

    yield None, None, stream_log("Separation completed successfully!", "color: #4CAF50; font-weight: bold;")
    time.sleep(0.5)  # Pause for effect

    yield None, None, stream_log("Processing stems...", "color: #9C27B0;")
    time.sleep(0.5)  # Simulate processing time

    # Change the stem search directory using full path
    stem_search_dir = os.path.join(base_output_dir, model_name, os.path.splitext(os.path.basename(audio_file))[0])
    yield None, None, stream_log(f"Searching for stems in: {stem_search_dir}")

    stems: Dict[str, str] = {}
    for stem in ["vocals", "drums", "bass", "other"]:
        stem_path = os.path.join(stem_search_dir, f"{stem}.wav")
        yield None, None, stream_log(f"Checking for {stem} stem at: {stem_path}")
        if os.path.exists(stem_path):
            stems[stem] = stem_path
            yield None, None, stream_log(f"Found {stem} stem")
        else:
            yield None, None, stream_log(f"Warning: {stem} stem not found")

    if not stems:
        yield None, None, stream_log("Error: No stems found. Checking alternative directory...")
        stem_search_dir = os.path.join(base_output_dir, model_name)
        for stem in ["vocals", "drums", "bass", "other"]:
            stem_path = os.path.join(stem_search_dir, f"{stem}.wav")
            yield None, None, stream_log(f"Checking for {stem} stem at: {stem_path}")
            if os.path.exists(stem_path):
                stems[stem] = stem_path
                yield None, None, stream_log(f"Found {stem} stem")
            else:
                yield None, None, stream_log(f"Warning: {stem} stem not found")

    yield None, None, stream_log(f"All found stems: {list(stems.keys())}")

    selected_stems: List[str] = []
    for stem, selected in zip(["vocals", "drums", "bass", "other"], [vocals, drums, bass, other]):
        if selected:
            yield None, None, stream_log(f"{stem} is selected by user")
            if stem in stems:
                selected_stems.append(stems[stem])
                yield None, None, stream_log(f"Selected {stem} stem for mixing")
            else:
                yield None, None, stream_log(f"Warning: {stem} was selected but not found")

    yield None, None, stream_log(f"Final selected stems: {selected_stems}")

    if not selected_stems:
        yield None, None, stream_log("Error: No stems selected for mixing", "color: #F44336;")
        raise gr.Error("Please select at least one stem to mix and ensure it was successfully separated.")

    output_file: str = os.path.join(output_dir, "mixed.wav")
    yield None, None, stream_log("Mixing selected stems...", "color: #FF5722;")
    time.sleep(0.5)  # Simulate mixing time
    
    mixed_audio: AudioSegment = AudioSegment.empty()
    for stem_path in selected_stems:
        mixed_audio += AudioSegment.from_wav(stem_path)
    mixed_audio.export(output_file, format="wav")

    if mp3:
        yield None, None, stream_log(f"Converting to MP3...", "color: #795548;")
        time.sleep(0.5)  # Simulate conversion time
        mp3_output_file: str = os.path.splitext(output_file)[0] + ".mp3"
        mixed_audio.export(mp3_output_file, format="mp3", bitrate=str(mp3_bitrate) + "k")
        output_file = mp3_output_file

    yield None, None, stream_log("Process completed successfully!", "color: #4CAF50; font-weight: bold;")
    yield output_file, list(stems.values()), gr.HTML(
        Panel.fit(
            Text("Separation and mixing completed successfully!", style="bold green"),
            title="Demucs Result",
            border_style="green"
        ).render()
    )

# Define the Gradio interface
with gr.Blocks() as iface:
    gr.Markdown("# Demucs Music Source Separation and Mixing")
    gr.Markdown("Separate vocals, drums, bass, and other instruments from your music using Demucs and mix the selected stems.")
    
    with gr.Row():
        with gr.Column(scale=1):
            audio_input = gr.Audio(type="filepath", label="Upload Audio File")
            model_dropdown = gr.Dropdown(
                ["htdemucs", "htdemucs_ft", "htdemucs_6s", "hdemucs_mmi", "mdx", "mdx_extra", "mdx_q", "mdx_extra_q"],
                label="Model Name",
                value="htdemucs_ft"
            )
            with gr.Row():
                vocals_checkbox = gr.Checkbox(label="Vocals", value=True)
                drums_checkbox = gr.Checkbox(label="Drums", value=True)
            with gr.Row():
                bass_checkbox = gr.Checkbox(label="Bass", value=True)
                other_checkbox = gr.Checkbox(label="Other", value=True)
            mp3_checkbox = gr.Checkbox(label="Save as MP3", value=False)
            mp3_bitrate = gr.Slider(128, 320, step=32, label="MP3 Bitrate", visible=False)
            submit_btn = gr.Button("Process", variant="primary")

        with gr.Column(scale=1):
            output_audio = gr.Audio(type="filepath", label="Processed Audio (Mixed)")
            stems_output = gr.File(label="Individual Stems", file_count="multiple")
            separation_log = gr.HTML()

    submit_btn.click(
        fn=inference,
        inputs=[audio_input, model_dropdown, vocals_checkbox, drums_checkbox, bass_checkbox, other_checkbox, mp3_checkbox, mp3_bitrate],
        outputs=[output_audio, stems_output, separation_log]
    )
    
    mp3_checkbox.change(
        fn=lambda mp3: gr.update(visible=mp3),
        inputs=mp3_checkbox,
        outputs=mp3_bitrate
    )

# Launch the Gradio interface
iface.launch()