File size: 10,145 Bytes
1ddb890
 
ac0c0ce
0ff04fc
8a11531
ac0c0ce
99f4e90
cf67794
 
 
 
8a11531
cf67794
8a11531
 
 
 
 
1ddb890
a2c4e76
 
 
 
 
 
0ff04fc
e69de16
87ca550
8a11531
cf67794
 
 
a2c4e76
 
cf67794
a2c4e76
cf67794
b072ff1
a2c4e76
cf67794
ff4e567
2a231ba
a2c4e76
2a231ba
 
4a925ac
 
 
ac0c0ce
2a231ba
8a11531
 
 
 
 
 
ac0c0ce
 
4a925ac
ac0c0ce
8a11531
ac0c0ce
 
ff4e567
a2c4e76
cf67794
87ca550
2a231ba
8a11531
 
 
 
 
ac0c0ce
8a11531
5e53171
8a11531
 
cf67794
 
 
8a11531
cf67794
 
 
 
 
 
 
 
 
ac0c0ce
 
 
a2c4e76
ac0c0ce
 
87ca550
a2c4e76
ac0c0ce
2a231ba
a2c4e76
cf67794
 
a2c4e76
cf67794
1ddb890
4a925ac
 
a2c4e76
5c906d0
99f4e90
ac0c0ce
5c906d0
a2c4e76
ac0c0ce
 
a2c4e76
164f998
a2c4e76
164f998
5c906d0
a2c4e76
4a925ac
5c906d0
 
a2c4e76
5c906d0
 
a2c4e76
5c906d0
a2c4e76
5c906d0
a2c4e76
1ddb890
85fcc07
 
164f998
a2c4e76
164f998
 
a2c4e76
164f998
a2c4e76
164f998
a2c4e76
85fcc07
0479c04
a2c4e76
164f998
0479c04
99f4e90
a2c4e76
cf67794
ac0c0ce
8a11531
 
 
 
 
 
 
 
 
 
15a94f0
 
 
 
8a11531
 
 
 
 
 
 
 
 
0479c04
eb814f3
a2c4e76
cf67794
99f4e90
 
b072ff1
eb814f3
a2c4e76
c17e437
1ddb890
7c43d6c
 
ff064e2
 
7c43d6c
ff064e2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e69de16
 
b072ff1
ff064e2
7c43d6c
 
 
e69de16
7c43d6c
 
 
 
 
 
 
1ddb890
7c43d6c
1ddb890
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
import gradio as gr
import os
import subprocess
import spaces
import torch
from typing import Tuple, List, Dict
from pydub import AudioSegment
from rich.console import Console
from rich.progress import Progress, SpinnerColumn, TextColumn, BarColumn, TimeRemainingColumn
from rich.text import Text
import time
import shutil

# Get the terminal width, or use a default if not available
terminal_width = shutil.get_terminal_size((80, 20)).columns

# Create a console with a specific width
console = Console(width=min(terminal_width, 100))  # Limit to 100 columns max

def fade_text(text, duration=0.5):
    for i in range(10):
        opacity = i / 10
        yield f"<div style='opacity: {opacity}; transition: opacity 0.05s;'>{text}</div>"
        time.sleep(duration / 10)

@spaces.GPU
def inference(audio_file: str, model_name: str, vocals: bool, drums: bool, bass: bool, other: bool, mp3: bool, mp3_bitrate: int) -> Tuple[str, List[str], gr.HTML]:
    log_messages = []
    
    def stream_log(message, style=""):
        formatted_message = f"[{model_name}] {message}"
        log_messages.append(formatted_message)
        for frame in fade_text(f"<pre style='margin-bottom: 0;{style}'>{formatted_message}</pre>"):
            yield None, None, gr.HTML(frame)

    yield from stream_log("Initializing Demucs...", "color: #4CAF50; font-weight: bold;")
    time.sleep(1)  # Simulate initialization time

    yield from stream_log("Loading audio file...", "color: #2196F3;")
    time.sleep(0.5)  # Simulate loading time
    
    if audio_file is None:
        yield from stream_log("Error: No audio file provided", "color: #F44336;")
        raise gr.Error("Please upload an audio file")

    # Use absolute paths
    base_output_dir = os.path.abspath("separated")
    output_dir = os.path.join(base_output_dir, model_name, os.path.splitext(os.path.basename(audio_file))[0])
    os.makedirs(output_dir, exist_ok=True)

    # Check if CUDA is available
    cuda_available = torch.cuda.is_available()
    device = "cuda" if cuda_available else "cpu"
    yield from stream_log(f"Using device: {device}", "color: #4CAF50; font-weight: bold;")

    # Construct the Demucs command with full paths and GPU flag
    cmd = [
        "python", "-m", "demucs", 
        "--out", base_output_dir,
        "-n", model_name,
        "--device", device,
        audio_file
    ]

    yield from stream_log("Preparing separation process...", "color: #FF9800;")
    time.sleep(0.5)  # Simulate preparation time

    try:
        # Set CUDA_VISIBLE_DEVICES environment variable
        env = os.environ.copy()
        if cuda_available:
            env["CUDA_VISIBLE_DEVICES"] = "0"  # Use the first GPU
        
        # Run the Demucs command
        process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, env=env)
        
        # Simulate a loading animation with adjusted width
        progress_width = min(terminal_width - 20, 60)  # Adjust the width of the progress bar
        with Progress(
            SpinnerColumn(),
            TextColumn("[progress.description]{task.description}"),
            BarColumn(bar_width=progress_width),
            TextColumn("[progress.percentage]{task.percentage:>3.0f}%"),
            TimeRemainingColumn(),
            console=console
        ) as progress:
            task = progress.add_task("[cyan]Separating stems...", total=100)
            while process.poll() is None:
                progress.update(task, advance=1)
                time.sleep(0.1)
            progress.update(task, completed=100)

        if process.returncode != 0:
            error_output = process.stderr.read()
            yield from stream_log(f"Error: Separation failed", "color: #F44336;")
            raise gr.Error(f"Demucs separation failed. Check the logs for details.")

    except Exception as e:
        yield from stream_log(f"Unexpected error: {str(e)}", "color: #F44336;")
        raise gr.Error(f"An unexpected error occurred: {str(e)}")

    yield from stream_log("Separation completed successfully!", "color: #4CAF50; font-weight: bold;")
    time.sleep(0.5)  # Pause for effect

    yield from stream_log("Processing stems...", "color: #9C27B0;")
    time.sleep(0.5)  # Simulate processing time

    # Change the stem search directory using full path
    stem_search_dir = os.path.join(base_output_dir, model_name, os.path.splitext(os.path.basename(audio_file))[0])
    yield from stream_log(f"Searching for stems in: {stem_search_dir}")

    stems: Dict[str, str] = {}
    for stem in ["vocals", "drums", "bass", "other"]:
        stem_path = os.path.join(stem_search_dir, f"{stem}.wav")
        yield from stream_log(f"Checking for {stem} stem at: {stem_path}")
        if os.path.exists(stem_path):
            stems[stem] = stem_path
            yield from stream_log(f"Found {stem} stem")
        else:
            yield from stream_log(f"Warning: {stem} stem not found")

    if not stems:
        yield from stream_log("Error: No stems found. Checking alternative directory...")
        stem_search_dir = os.path.join(base_output_dir, model_name)
        for stem in ["vocals", "drums", "bass", "other"]:
            stem_path = os.path.join(stem_search_dir, f"{stem}.wav")
            yield from stream_log(f"Checking for {stem} stem at: {stem_path}")
            if os.path.exists(stem_path):
                stems[stem] = stem_path
                yield from stream_log(f"Found {stem} stem")
            else:
                yield from stream_log(f"Warning: {stem} stem not found")

    yield from stream_log(f"All found stems: {list(stems.keys())}")

    selected_stems: List[str] = []
    for stem, selected in zip(["vocals", "drums", "bass", "other"], [vocals, drums, bass, other]):
        if selected:
            yield from stream_log(f"{stem} is selected by user")
            if stem in stems:
                selected_stems.append(stems[stem])
                yield from stream_log(f"Selected {stem} stem for mixing")
            else:
                yield from stream_log(f"Warning: {stem} was selected but not found")

    yield from stream_log(f"Final selected stems: {selected_stems}")

    if not selected_stems:
        yield from stream_log("Error: No stems selected for mixing", "color: #F44336;")
        raise gr.Error("Please select at least one stem to mix and ensure it was successfully separated.")

    output_file: str = os.path.join(output_dir, "mixed.wav")
    yield from stream_log("Mixing selected stems...", "color: #FF5722;")
    time.sleep(0.5)  # Simulate mixing time
    
    if selected_stems:
        # Load the first stem as the base
        mixed_audio: AudioSegment = AudioSegment.from_wav(selected_stems[0])
        
        # Overlay the remaining stems
        for stem_path in selected_stems[1:]:
            overlay_audio = AudioSegment.from_wav(stem_path)
            
            # Ensure both segments have the same duration
            max_length = max(len(mixed_audio), len(overlay_audio))
            if len(mixed_audio) < max_length:
                mixed_audio += AudioSegment.silent(duration=max_length - len(mixed_audio))
            if len(overlay_audio) < max_length:
                overlay_audio += AudioSegment.silent(duration=max_length - len(overlay_audio))
            
            # Overlay the audio
            mixed_audio = mixed_audio.overlay(overlay_audio)
        
        # Export the mixed audio
        mixed_audio.export(output_file, format="wav")
    else:
        yield from stream_log("Error: No stems to mix", "color: #F44336;")
        raise gr.Error("No stems were selected or found for mixing.")

    if mp3:
        yield from stream_log(f"Converting to MP3...", "color: #795548;")
        time.sleep(0.5)  # Simulate conversion time
        mp3_output_file: str = os.path.splitext(output_file)[0] + ".mp3"
        mixed_audio.export(mp3_output_file, format="mp3", bitrate=str(mp3_bitrate) + "k")
        output_file = mp3_output_file

    yield from stream_log("Process completed successfully!", "color: #4CAF50; font-weight: bold;")
    yield output_file, list(stems.values()), gr.HTML("<pre style='color: green; font-weight: bold;'>Separation and mixing completed successfully!</pre>")

# Define the Gradio interface
with gr.Blocks() as iface:
    gr.Markdown("# Demucs Music Source Separation and Mixing")
    gr.Markdown("Separate vocals, drums, bass, and other instruments from your music using Demucs and mix the selected stems.")
    
    with gr.Row():
        with gr.Column(scale=1):
            audio_input = gr.Audio(type="filepath", label="Upload Audio File")
            model_dropdown = gr.Dropdown(
                ["htdemucs", "htdemucs_ft", "htdemucs_6s", "hdemucs_mmi", "mdx", "mdx_extra", "mdx_q", "mdx_extra_q"],
                label="Model Name",
                value="htdemucs_ft"
            )
            with gr.Row():
                vocals_checkbox = gr.Checkbox(label="Vocals", value=True)
                drums_checkbox = gr.Checkbox(label="Drums", value=True)
            with gr.Row():
                bass_checkbox = gr.Checkbox(label="Bass", value=True)
                other_checkbox = gr.Checkbox(label="Other", value=True)
            mp3_checkbox = gr.Checkbox(label="Save as MP3", value=False)
            mp3_bitrate = gr.Slider(128, 320, step=32, label="MP3 Bitrate", visible=False)
            submit_btn = gr.Button("Process", variant="primary")

        with gr.Column(scale=1):
            output_audio = gr.Audio(type="filepath", label="Processed Audio (Mixed)")
            stems_output = gr.File(label="Individual Stems", file_count="multiple")
            separation_log = gr.HTML()

    submit_btn.click(
        fn=inference,
        inputs=[audio_input, model_dropdown, vocals_checkbox, drums_checkbox, bass_checkbox, other_checkbox, mp3_checkbox, mp3_bitrate],
        outputs=[output_audio, stems_output, separation_log]
    )
    
    mp3_checkbox.change(
        fn=lambda mp3: gr.update(visible=mp3),
        inputs=mp3_checkbox,
        outputs=mp3_bitrate
    )

# Launch the Gradio interface
iface.launch()