File size: 4,879 Bytes
391db44
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
{
    "training_arguments": {
        "output_dir": "./models/protgpt2-distilled-t10.0-a0.1-l4-h4-e512-p0.1.uniprot_trainset",
        "overwrite_output_dir": false,
        "do_train": false,
        "do_eval": false,
        "do_predict": false,
        "evaluation_strategy": "no",
        "prediction_loss_only": false,
        "per_device_train_batch_size": 1,
        "per_device_eval_batch_size": 8,
        "per_gpu_train_batch_size": null,
        "per_gpu_eval_batch_size": null,
        "gradient_accumulation_steps": 32,
        "eval_accumulation_steps": null,
        "eval_delay": 0,
        "learning_rate": 0.001,
        "weight_decay": 0.01,
        "adam_beta1": 0.9,
        "adam_beta2": 0.999,
        "adam_epsilon": 1e-08,
        "max_grad_norm": 1.0,
        "num_train_epochs": 3,
        "max_steps": -1,
        "lr_scheduler_type": "linear",
        "lr_scheduler_kwargs": {},
        "warmup_ratio": 0.0,
        "warmup_steps": 0,
        "log_level": "passive",
        "log_level_replica": "warning",
        "log_on_each_node": true,
        "logging_dir": "./models/protgpt2-distilled-t10.0-a0.1-l4-h4-e512-p0.1.uniprot_trainset/runs/Apr24_19-17-17_ip-172-31-34-182",
        "logging_strategy": "steps",
        "logging_first_step": false,
        "logging_steps": 10,
        "logging_nan_inf_filter": true,
        "save_strategy": "no",
        "save_steps": 500,
        "save_total_limit": 1,
        "save_safetensors": true,
        "save_on_each_node": false,
        "save_only_model": false,
        "no_cuda": false,
        "use_cpu": false,
        "use_mps_device": false,
        "seed": 42,
        "data_seed": null,
        "jit_mode_eval": false,
        "use_ipex": false,
        "bf16": false,
        "fp16": true,
        "fp16_opt_level": "O1",
        "half_precision_backend": "auto",
        "bf16_full_eval": false,
        "fp16_full_eval": false,
        "tf32": null,
        "local_rank": 0,
        "ddp_backend": null,
        "tpu_num_cores": null,
        "tpu_metrics_debug": false,
        "debug": [],
        "dataloader_drop_last": false,
        "eval_steps": null,
        "dataloader_num_workers": 0,
        "dataloader_prefetch_factor": null,
        "past_index": -1,
        "run_name": "./models/protgpt2-distilled-t10.0-a0.1-l4-h4-e512-p0.1.uniprot_trainset",
        "disable_tqdm": false,
        "remove_unused_columns": true,
        "label_names": null,
        "load_best_model_at_end": false,
        "metric_for_best_model": null,
        "greater_is_better": null,
        "ignore_data_skip": false,
        "fsdp": [],
        "fsdp_min_num_params": 0,
        "fsdp_config": {
            "min_num_params": 0,
            "xla": false,
            "xla_fsdp_v2": false,
            "xla_fsdp_grad_ckpt": false
        },
        "fsdp_transformer_layer_cls_to_wrap": null,
        "accelerator_config": {
            "split_batches": false,
            "dispatch_batches": null,
            "even_batches": true,
            "use_seedable_sampler": true
        },
        "deepspeed": null,
        "label_smoothing_factor": 0.0,
        "optim": "adamw_torch",
        "optim_args": null,
        "adafactor": false,
        "group_by_length": false,
        "length_column_name": "length",
        "report_to": [
            "wandb"
        ],
        "ddp_find_unused_parameters": null,
        "ddp_bucket_cap_mb": null,
        "ddp_broadcast_buffers": null,
        "dataloader_pin_memory": true,
        "dataloader_persistent_workers": false,
        "skip_memory_metrics": true,
        "use_legacy_prediction_loop": false,
        "push_to_hub": false,
        "resume_from_checkpoint": null,
        "hub_model_id": null,
        "hub_strategy": "every_save",
        "hub_token": "<HUB_TOKEN>",
        "hub_private_repo": false,
        "hub_always_push": false,
        "gradient_checkpointing": false,
        "gradient_checkpointing_kwargs": null,
        "include_inputs_for_metrics": false,
        "fp16_backend": "auto",
        "push_to_hub_model_id": null,
        "push_to_hub_organization": null,
        "push_to_hub_token": "<PUSH_TO_HUB_TOKEN>",
        "mp_parameters": "",
        "auto_find_batch_size": false,
        "full_determinism": false,
        "torchdynamo": null,
        "ray_scope": "last",
        "ddp_timeout": 1800,
        "torch_compile": false,
        "torch_compile_backend": null,
        "torch_compile_mode": null,
        "dispatch_batches": null,
        "split_batches": null,
        "include_tokens_per_second": false,
        "include_num_input_tokens_seen": false,
        "neftune_noise_alpha": null,
        "optim_target_modules": null
    },
    "distillation_temperature": 10.0,
    "distillation_alpha": 0.1,
    "model_architecture": {
        "n_embd": 512,
        "n_layer": 4,
        "n_head": 4
    }
}