HawkEye098432 commited on
Commit
fdfbb51
1 Parent(s): 6d30d13

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -31
app.py CHANGED
@@ -1,43 +1,23 @@
1
- import os
2
  import gradio as gr
3
  from scipy.io.wavfile import write
4
 
5
 
6
  def inference(audio):
7
- os.makedirs("out", exist_ok=True)
8
- vocals_files = []
9
- instrumental_files = []
10
- for audio_file in audio:
11
- write('test.wav', audio_file[0], audio_file[1])
12
- os.system("python3 -m demucs.separate -n htdemucs --two-stems=vocals -d cpu test.wav -o out")
13
- vocals_files.append("./out/htdemucs/test/vocals.wav")
14
- instrumental_files.append("./out/htdemucs/test/no_vocals.wav")
15
- return vocals_files, instrumental_files
16
-
17
-
18
- def download_file(filepath):
19
- with open(filepath, "rb") as f:
20
- file_bytes = f.read()
21
- return file_bytes
22
-
23
-
24
  title = "Source Separation (v4)"
25
- description = "This is the latest 'bleeding edge version' which enables the new v4 Hybrid Transformer model. <br> for this space, 2 stem separation (Karaoke Mode) is enabled and CPU mode which has been optimized for best quality & processing time. <p>| Gradio demo for Demucs(v4): Music Source Separation in the Waveform Domain. To use it, simply upload your audio, or click one of the examples to load them. Read more at the links below.</p>"
26
- article = "<p style='text-align: center'><a href='https://arxiv.org/abs/1911.13254' target='_blank'>Music Source Separation in the Waveform Domain</a> | <a href='https://github.com/facebookresearch/demucs' target='_blank'>Github Repo</a> | <a href='https://www.thafx.com' target='_blank'>//THAFX</a></p>"
27
-
28
- audio_input = gr.inputs.Audio(label="Input", type="file", multiple=True)
29
- vocals_output = gr.outputs.Audio(label="Vocals", type="file", process=download_file)
30
- instrumental_output = gr.outputs.Audio(label="No Vocals / Instrumental", type="file", process=download_file)
31
-
32
- examples = [['test.mp3']]
33
 
34
- # Create the Gradio interface
35
  gr.Interface(
36
- fn=inference,
37
- inputs=audio_input,
38
- outputs=[vocals_output, instrumental_output],
39
  title=title,
40
  description=description,
41
  article=article,
42
  examples=examples
43
- ).launch(enable_queue=True, share=True)
 
1
+ mport os
2
  import gradio as gr
3
  from scipy.io.wavfile import write
4
 
5
 
6
  def inference(audio):
7
+ os.makedirs("out", exist_ok=True)
8
+ write('test.wav', audio[0], audio[1])
9
+ os.system("python3 -m demucs.separate -n htdemucs --two-stems=vocals -d cpu test.wav -o out")
10
+ return "./out/htdemucs/test/vocals.wav","./out/htdemucs/test/no_vocals.wav"
11
+
 
 
 
 
 
 
 
 
 
 
 
 
12
  title = "Source Separation (v4)"
 
 
 
 
 
 
 
 
13
 
14
+ examples=[['test.mp3']]
15
  gr.Interface(
16
+ inference,
17
+ gr.Audio(type="numpy", label="Input"),
18
+ [gr.Audio(type="filepath", label="Vocals"),gr.Audio(type="filepath", label="No Vocals / Instrumental")],
19
  title=title,
20
  description=description,
21
  article=article,
22
  examples=examples
23
+ ).launch(enable_queue=True)