owiedotch commited on
Commit
5920386
1 Parent(s): 555a678

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -12
app.py CHANGED
@@ -11,14 +11,6 @@ from typing import Generator
11
  import asyncio # Import asyncio for cancellation
12
  import traceback # Import traceback for error handling
13
 
14
- # Attempt to use GPU, fallback to CPU
15
- try:
16
- torch_device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
17
- print(f"Using device: {torch_device}")
18
- except Exception as e:
19
- print(f"Error detecting GPU. Using CPU. Error: {e}")
20
- torch_device = torch.device("cpu")
21
-
22
  # Load the SemantiCodec model without specifying a device
23
  semanticodec = SemantiCodec(token_rate=100, semantic_vocab_size=32768)
24
 
@@ -103,10 +95,16 @@ def decode_audio(encoded_file_path):
103
  tokens_numpy_bytes = lz4.frame.decompress(compressed_data)
104
  tokens_numpy = np.frombuffer(tokens_numpy_bytes, dtype=np.int64).reshape(shape)
105
 
106
- # Move the tensor to the same device as the model
107
  tokens = torch.from_numpy(tokens_numpy)
 
 
 
 
 
 
 
108
  print(f"Tokens device: {tokens.device}")
109
- print(f"Model device: {next(semanticodec.parameters()).device}")
110
 
111
  # Decode the audio
112
  with torch.no_grad():
@@ -145,10 +143,16 @@ async def stream_decode_audio(encoded_file_path) -> Generator[tuple, None, None]
145
  tokens_numpy_bytes = lz4.frame.decompress(compressed_data)
146
  tokens_numpy = np.frombuffer(tokens_numpy_bytes, dtype=np.int64).reshape(shape)
147
 
148
- # Move the tensor to the same device as the model
149
  tokens = torch.from_numpy(tokens_numpy)
 
 
 
 
 
 
 
150
  print(f"Streaming tokens device: {tokens.device}")
151
- print(f"Model device: {next(semanticodec.parameters()).device}")
152
 
153
  # Decode the audio in chunks
154
  chunk_size = sample_rate * 2 # Adjust chunk size as needed
 
11
  import asyncio # Import asyncio for cancellation
12
  import traceback # Import traceback for error handling
13
 
 
 
 
 
 
 
 
 
14
  # Load the SemantiCodec model without specifying a device
15
  semanticodec = SemantiCodec(token_rate=100, semantic_vocab_size=32768)
16
 
 
95
  tokens_numpy_bytes = lz4.frame.decompress(compressed_data)
96
  tokens_numpy = np.frombuffer(tokens_numpy_bytes, dtype=np.int64).reshape(shape)
97
 
98
+ # Create a tensor from the numpy array
99
  tokens = torch.from_numpy(tokens_numpy)
100
+
101
+ # Determine the device of the model
102
+ model_device = next(semanticodec.parameters()).device
103
+ print(f"Model device: {model_device}")
104
+
105
+ # Move the tokens to the same device as the model
106
+ tokens = tokens.to(model_device)
107
  print(f"Tokens device: {tokens.device}")
 
108
 
109
  # Decode the audio
110
  with torch.no_grad():
 
143
  tokens_numpy_bytes = lz4.frame.decompress(compressed_data)
144
  tokens_numpy = np.frombuffer(tokens_numpy_bytes, dtype=np.int64).reshape(shape)
145
 
146
+ # Create a tensor from the numpy array
147
  tokens = torch.from_numpy(tokens_numpy)
148
+
149
+ # Determine the device of the model
150
+ model_device = next(semanticodec.parameters()).device
151
+ print(f"Model device: {model_device}")
152
+
153
+ # Move the tokens to the same device as the model
154
+ tokens = tokens.to(model_device)
155
  print(f"Streaming tokens device: {tokens.device}")
 
156
 
157
  # Decode the audio in chunks
158
  chunk_size = sample_rate * 2 # Adjust chunk size as needed