keshavbhandari commited on
Commit
1514a4e
·
1 Parent(s): 60e9e83

Add model and resource file downloads

Browse files
Files changed (1) hide show
  1. app.py +8 -3
app.py CHANGED
@@ -17,6 +17,10 @@ from huggingface_hub import hf_hub_download
17
  repo_id = "amaai-lab/text2midi"
18
  # Download the model.bin file
19
  model_path = hf_hub_download(repo_id=repo_id, filename="pytorch_model.bin")
 
 
 
 
20
 
21
 
22
  def save_wav(filepath):
@@ -29,8 +33,9 @@ def save_wav(filepath):
29
  wav_filepath = os.path.join(directory, f"{stem}.wav")
30
 
31
  # Run the fluidsynth command to convert MIDI to WAV
 
32
  process = subprocess.Popen(
33
- f"fluidsynth -r 16000 soundfont.sf2 -g 1.0 --quiet --no-shell {midi_filepath} -T wav -F {wav_filepath} > /dev/null",
34
  shell=True
35
  )
36
  process.wait()
@@ -77,9 +82,9 @@ def generate_midi(caption, temperature=0.9, max_len=500):
77
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
78
  artifact_folder = 'artifacts'
79
 
80
- tokenizer_filepath = os.path.join(artifact_folder, "vocab_remi.pkl")
81
  # Load the tokenizer dictionary
82
- with open(tokenizer_filepath, "rb") as f:
83
  r_tokenizer = pickle.load(f)
84
 
85
  # Get the vocab size
 
17
  repo_id = "amaai-lab/text2midi"
18
  # Download the model.bin file
19
  model_path = hf_hub_download(repo_id=repo_id, filename="pytorch_model.bin")
20
+ # Download the vocab_remi.pkl file
21
+ tokenizer_path = hf_hub_download(repo_id=repo_id, filename="vocab_remi.pkl")
22
+ # Download the soundfont file
23
+ soundfont_path = hf_hub_download(repo_id=repo_id, filename="soundfont.sf2")
24
 
25
 
26
  def save_wav(filepath):
 
33
  wav_filepath = os.path.join(directory, f"{stem}.wav")
34
 
35
  # Run the fluidsynth command to convert MIDI to WAV
36
+ # f"fluidsynth -r 16000 soundfont.sf2 -g 1.0 --quiet --no-shell {midi_filepath} -T wav -F {wav_filepath} > /dev/null",
37
  process = subprocess.Popen(
38
+ f"fluidsynth -r 16000 {soundfont_path} -g 1.0 --quiet --no-shell {midi_filepath} -T wav -F {wav_filepath} > /dev/null",
39
  shell=True
40
  )
41
  process.wait()
 
82
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
83
  artifact_folder = 'artifacts'
84
 
85
+ # tokenizer_filepath = os.path.join(artifact_folder, "vocab_remi.pkl")
86
  # Load the tokenizer dictionary
87
+ with open(tokenizer_path, "rb") as f:
88
  r_tokenizer = pickle.load(f)
89
 
90
  # Get the vocab size