Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -24,6 +24,50 @@ from midi_to_colab_audio import midi_to_colab_audio
|
|
24 |
|
25 |
from huggingface_hub import hf_hub_download
|
26 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
# =================================================================================================
|
28 |
|
29 |
@spaces.GPU
|
@@ -66,45 +110,7 @@ def Convert_Score_to_Performance(input_midi,
|
|
66 |
print('Model sampling top p value:', input_model_top_p)
|
67 |
print('=' * 70)
|
68 |
|
69 |
-
#===============================================================================
|
70 |
-
|
71 |
-
print('Loading model...')
|
72 |
|
73 |
-
SEQ_LEN = 4096
|
74 |
-
PAD_IDX = 673
|
75 |
-
DEVICE = 'cuda' # 'cpu'
|
76 |
-
|
77 |
-
# instantiate the model
|
78 |
-
|
79 |
-
model = TransformerWrapper(
|
80 |
-
num_tokens = PAD_IDX+1,
|
81 |
-
max_seq_len = SEQ_LEN,
|
82 |
-
attn_layers = Decoder(dim = 1024, depth = 16, heads = 16, rotary_pos_emb=True, attn_flash = True)
|
83 |
-
)
|
84 |
-
|
85 |
-
model = AutoregressiveWrapper(model, ignore_index = PAD_IDX)
|
86 |
-
|
87 |
-
model.to(DEVICE)
|
88 |
-
print('=' * 70)
|
89 |
-
|
90 |
-
print('Loading model checkpoint...')
|
91 |
-
|
92 |
-
model.load_state_dict(
|
93 |
-
torch.load('Guided_Rpck_Music_Transformer_Trained_Model_12081_steps_0.4113_loss_0.8747_acc.pth',
|
94 |
-
map_location=DEVICE))
|
95 |
-
print('=' * 70)
|
96 |
-
|
97 |
-
model.eval()
|
98 |
-
|
99 |
-
if DEVICE == 'cpu':
|
100 |
-
dtype = torch.bfloat16
|
101 |
-
else:
|
102 |
-
dtype = torch.bfloat16
|
103 |
-
|
104 |
-
ctx = torch.amp.autocast(device_type=DEVICE, dtype=dtype)
|
105 |
-
|
106 |
-
print('Done!')
|
107 |
-
print('=' * 70)
|
108 |
|
109 |
#===============================================================================
|
110 |
|
@@ -204,6 +210,9 @@ def Convert_Score_to_Performance(input_midi,
|
|
204 |
print('=' * 70)
|
205 |
print('Generating...')
|
206 |
|
|
|
|
|
|
|
207 |
#==================================================================
|
208 |
|
209 |
def generate_continuation(num_prime_tokens, num_gen_tokens):
|
|
|
24 |
|
25 |
from huggingface_hub import hf_hub_download
|
26 |
|
27 |
+
# =================================================================================================
|
28 |
+
|
29 |
+
print('Loading model...')
|
30 |
+
|
31 |
+
SEQ_LEN = 1802
|
32 |
+
PAD_IDX = 771
|
33 |
+
DEVICE = 'cuda' # 'cpu'
|
34 |
+
|
35 |
+
# instantiate the model
|
36 |
+
|
37 |
+
model = TransformerWrapper(
|
38 |
+
num_tokens = PAD_IDX+1,
|
39 |
+
max_seq_len = SEQ_LEN,
|
40 |
+
attn_layers = Decoder(dim = 1024,
|
41 |
+
depth = 8,
|
42 |
+
heads = 8,
|
43 |
+
rotary_pos_emb=True,
|
44 |
+
attn_flash = True
|
45 |
+
)
|
46 |
+
)
|
47 |
+
|
48 |
+
model = AutoregressiveWrapper(model, ignore_index = PAD_IDX)
|
49 |
+
|
50 |
+
|
51 |
+
print('=' * 70)
|
52 |
+
|
53 |
+
print('Loading model checkpoint...')
|
54 |
+
|
55 |
+
model_checkpoint = hf_hub_download(repo_id='asigalov61/Score-2-Performance-Transformer',
|
56 |
+
filename='Score_2_Performance_Transformer_Final_Small_Trained_Model_4496_steps_1.5185_loss_0.5589_acc.pth'
|
57 |
+
)
|
58 |
+
|
59 |
+
model.load_state_dict(torch.load(model_checkpoint, map_location='cpu', weights_only=True))
|
60 |
+
|
61 |
+
model = torch.compile(model, mode='max-autotune')
|
62 |
+
|
63 |
+
dtype = torch.bfloat16
|
64 |
+
|
65 |
+
ctx = torch.amp.autocast(device_type=DEVICE, dtype=dtype)
|
66 |
+
|
67 |
+
print('=' * 70)
|
68 |
+
print('Done!')
|
69 |
+
print('=' * 70)
|
70 |
+
|
71 |
# =================================================================================================
|
72 |
|
73 |
@spaces.GPU
|
|
|
110 |
print('Model sampling top p value:', input_model_top_p)
|
111 |
print('=' * 70)
|
112 |
|
|
|
|
|
|
|
113 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
114 |
|
115 |
#===============================================================================
|
116 |
|
|
|
210 |
print('=' * 70)
|
211 |
print('Generating...')
|
212 |
|
213 |
+
model.to(DEVICE)
|
214 |
+
model.eval()
|
215 |
+
|
216 |
#==================================================================
|
217 |
|
218 |
def generate_continuation(num_prime_tokens, num_gen_tokens):
|