DrishtiSharma commited on
Commit
aa69b98
1 Parent(s): 5036648

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -89,10 +89,10 @@ def predict_and_ctc_lm_decode(input_file, speaker_idx: str=None):
89
  # length_penalty=1.0,
90
  early_stopping=True# disable sampling to test if batching affects output
91
  )
92
- preds = postproc(transcribed_text,
93
  preds=tokenizer.decode(output_sequence[0], skip_special_tokens=True, clean_up_tokenization_spaces=True))
94
- if len(preds) > MAX_TXT_LEN:
95
- text = preds[:MAX_TXT_LEN]
96
  print(f"Input text was cutoff since it went over the {MAX_TXT_LEN} character limit.")
97
  print(text, model_name)
98
  # download model
@@ -110,7 +110,7 @@ def predict_and_ctc_lm_decode(input_file, speaker_idx: str=None):
110
  # synthesize
111
  if synthesizer is None:
112
  raise NameError("model not found")
113
- wavs = synthesizer.tts(preds, speaker_idx)
114
  # return output
115
  with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as fp:
116
  synthesizer.save_wav(wavs, fp)
 
89
  # length_penalty=1.0,
90
  early_stopping=True# disable sampling to test if batching affects output
91
  )
92
+ text = postproc(transcribed_text,
93
  preds=tokenizer.decode(output_sequence[0], skip_special_tokens=True, clean_up_tokenization_spaces=True))
94
+ if len(text) > MAX_TXT_LEN:
95
+ text = text[:MAX_TXT_LEN]
96
  print(f"Input text was cutoff since it went over the {MAX_TXT_LEN} character limit.")
97
  print(text, model_name)
98
  # download model
 
110
  # synthesize
111
  if synthesizer is None:
112
  raise NameError("model not found")
113
+ wavs = synthesizer.tts(text, speaker_idx)
114
  # return output
115
  with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as fp:
116
  synthesizer.save_wav(wavs, fp)