owiedotch commited on
Commit
87ca550
1 Parent(s): 2a231ba

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -4
app.py CHANGED
@@ -15,9 +15,11 @@ device: str = "cuda" if torch.cuda.is_available() else "cpu"
15
  @spaces.GPU
16
  def inference(audio_file: str, model_name: str, vocals: bool, drums: bool, bass: bool, other: bool, mp3: bool, mp3_bitrate: int) -> Tuple[str, gr.HTML]:
17
  separator = get_model(name=model_name)
 
18
 
19
  def stream_log(message):
20
- return f"<pre style='margin-bottom: 0;'>[{model_name}] {message}</pre>"
 
21
 
22
  yield None, stream_log("Starting separation process...")
23
  yield None, stream_log(f"Loading audio file: {audio_file}")
@@ -47,13 +49,18 @@ def inference(audio_file: str, model_name: str, vocals: bool, drums: bool, bass:
47
  ref = wav.mean(0)
48
  wav = (wav - ref.view(1, -1))
49
  yield None, stream_log("Audio loaded successfully. Applying model...")
50
-
51
  # Use apply_model as a standalone function
52
  try:
53
  sources = apply_model(separator, wav.to(device), device=device)
54
- except Exception as e:
55
  yield None, stream_log(f"Error applying model: {str(e)}")
56
- raise gr.Error(f"Failed to apply model: {str(e)}")
 
 
 
 
 
57
 
58
  # Process the sources
59
  sources = [source * ref.view(1, -1) + ref.view(1, -1) for source in sources]
 
15
  @spaces.GPU
16
  def inference(audio_file: str, model_name: str, vocals: bool, drums: bool, bass: bool, other: bool, mp3: bool, mp3_bitrate: int) -> Tuple[str, gr.HTML]:
17
  separator = get_model(name=model_name)
18
+ log_messages = []
19
 
20
  def stream_log(message):
21
+ log_messages.append(f"[{model_name}] {message}")
22
+ return gr.HTML("<pre style='margin-bottom: 0;'>" + "<br>".join(log_messages) + "</pre>")
23
 
24
  yield None, stream_log("Starting separation process...")
25
  yield None, stream_log(f"Loading audio file: {audio_file}")
 
49
  ref = wav.mean(0)
50
  wav = (wav - ref.view(1, -1))
51
  yield None, stream_log("Audio loaded successfully. Applying model...")
52
+
53
  # Use apply_model as a standalone function
54
  try:
55
  sources = apply_model(separator, wav.to(device), device=device)
56
+ except ValueError as e:
57
  yield None, stream_log(f"Error applying model: {str(e)}")
58
+ yield None, stream_log(f"Separator sources: {separator.sources}")
59
+ yield None, stream_log(f"WAV shape: {wav.shape}")
60
+ raise gr.Error(f"Failed to apply model: {str(e)}. This might be due to incompatible audio format or model configuration.")
61
+ except Exception as e:
62
+ yield None, stream_log(f"Unexpected error applying model: {str(e)}")
63
+ raise gr.Error(f"An unexpected error occurred while applying the model: {str(e)}")
64
 
65
  # Process the sources
66
  sources = [source * ref.view(1, -1) + ref.view(1, -1) for source in sources]