HuBERT / examples /hubert /config /decode /infer_viterbi.yaml
aliabd
full working demo
d5175d3
raw
history blame
735 Bytes
# @package _group_
defaults:
- model: null
hydra:
run:
dir: ${common_eval.results_path}/beam${decoding.decoder.beam}_lmw${decoding.decoder.lmweight}_wrd${decoding.decoder.wordscore}_sil${decoding.decoder.silweight}
sweep:
dir: ${common_eval.results_path}
subdir: beam${decoding.decoder.beam}_th${decoding.decoder.beamthreshold}_lmw${decoding.decoder.lmweight}_wrd${decoding.decoder.wordscore}_sil${decoding.decoder.silweight}
task:
_name: hubert_pretraining
single_target: true
data: ???
normalize: ???
decoding:
type: viterbi
unique_wer_file: true
common_eval:
results_path: ???
path: ???
post_process: letter
generation:
nbest: 1
beam: 500
dataset:
max_tokens: 1100000
gen_subset: ???