Text Generation
Transformers
PyTorch
English
llama
text-generation-inference
Inference Endpoints
ksych commited on
Commit
c875f14
·
verified ·
1 Parent(s): d5c8126

Update special tokens

Browse files
Files changed (1) hide show
  1. inference.py +3 -3
inference.py CHANGED
@@ -113,9 +113,9 @@ device = "cuda"
113
  n_codebooks_tts = 3
114
  n_codebooks_asr = 1
115
 
116
- start_audio_token = "<soa>"
117
- end_audio_token = "<eoa>"
118
- end_sequence_token = "<eos>"
119
 
120
  base_model = "Vikhrmodels/salt-asr_speech_1_wav_1_tts_speech_3_text-10k"
121
 
 
113
  n_codebooks_tts = 3
114
  n_codebooks_asr = 1
115
 
116
+ start_audio_token = "<|start_of_audio|>"
117
+ end_audio_token = "<|end_of_audio|>"
118
+ end_sequence_token = "<|end_of_text|>"
119
 
120
  base_model = "Vikhrmodels/salt-asr_speech_1_wav_1_tts_speech_3_text-10k"
121