allow_cache: true batch_size: 1 config: ./ttsexamples/tacotron2/conf/tacotron2.v1.yaml dev_dir: /content/dump_ljspeech/valid end_ratio_value: 0.0 eval_interval_steps: 50 format: npy gradient_accumulation_steps: 1 hop_size: 256 is_shuffle: true log_interval_steps: 20 max_char_length: 128 max_mel_length: 257 mel_length_threshold: 32 mixed_precision: false model_type: tacotron2 num_save_intermediate_results: 1 optimizer_params: {decay_steps: 150000, end_learning_rate: 1.0e-05, initial_learning_rate: 0.001, warmup_proportion: 0.02, weight_decay: 0.001} outdir: ./ttsexamples/tacotron2/exp/train.tacotron2.v1/ pretrained: '' remove_short_samples: true resume: '' save_interval_steps: 20 schedule_decay_steps: 50000 start_ratio_value: 0.5 start_schedule_teacher_forcing: 200001 tacotron2_params: {attention_dim: 128, attention_filters: 32, attention_kernel: 31, attention_type: lsa, dataset: ljspeech, decoder_lstm_units: 1024, embedding_dropout_prob: 0.1, embedding_hidden_size: 512, encoder_conv_activation: relu, encoder_conv_dropout_rate: 0.5, encoder_conv_filters: 512, encoder_conv_kernel_sizes: 5, encoder_lstm_units: 256, initializer_range: 0.02, n_conv_encoder: 5, n_conv_postnet: 5, n_lstm_decoder: 1, n_mels: 80, n_prenet_layers: 2, n_speakers: 1, postnet_conv_filters: 512, postnet_conv_kernel_sizes: 5, postnet_dropout_rate: 0.1, prenet_activation: relu, prenet_dropout_rate: 0.5, prenet_units: 256, reduction_factor: 1} train_dir: /content/dump_ljspeech/train train_max_steps: 200000 use_fal: false use_fixed_shapes: true use_norm: true var_train_expr: null verbose: 1 version: '0.0'