activations: Tanh batch_size: 4 class_identifier: unified_metric dropout: 0.1 encoder_learning_rate: 5.0e-06 encoder_model: XLM-RoBERTa final_activation: null hidden_sizes: - 3072 - 1024 input_segments: - src - mt - ref keep_embeddings_frozen: true layer: mix layerwise_decay: 0.95 learning_rate: 1.5e-05 load_weights_from_checkpoint: null nr_frozen_epochs: 0.3 optimizer: AdamW pool: cls pretrained_model: xlm-roberta-large train_data: data/1719-da.csv validation_data: - data/qad-ende-newstest2020.csv - data/qad-enru-newstest2020.csv - data/wmt-ende-newstest2021.csv - data/wmt-zhen-newstest2021.csv - data/wmt-enru-newstest2021.csv