HuBERT / examples /hubert /config /decode /infer_kenlm.yaml
aliabd
full working demo
d5175d3
raw
history blame
817 Bytes
# @package _group_
defaults:
- model: null
hydra:
run:
dir: ${common_eval.results_path}/beam${decoding.decoder.beam}_lmw${decoding.decoder.lmweight}_wrd${decoding.decoder.wordscore}_sil${decoding.decoder.silweight}
sweep:
dir: ${common_eval.results_path}
subdir: beam${decoding.decoder.beam}_th${decoding.decoder.beamthreshold}_lmw${decoding.decoder.lmweight}_wrd${decoding.decoder.wordscore}_sil${decoding.decoder.silweight}
task:
_name: hubert_pretraining
single_target: true
data: ???
normalize: ???
decoding:
type: kenlm
lexicon: ???
lmpath: ???
beamthreshold: 100
beam: 500
lmweight: 2
wordscore: -1
silweight: 0
unique_wer_file: true
beam: 500
common_eval:
results_path: ???
path: ???
post_process: letter
dataset:
max_tokens: 1100000
gen_subset: ???