{ "best_metric": 0.887685239315033, "best_model_checkpoint": "outputs/checkpoint-185", "epoch": 4.987854251012146, "eval_steps": 500, "global_step": 231, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.4318488529014845, "grad_norm": 0.5924658179283142, "learning_rate": 4e-05, "loss": 1.9539, "step": 20 }, { "epoch": 0.863697705802969, "grad_norm": 0.495579332113266, "learning_rate": 8e-05, "loss": 1.8186, "step": 40 }, { "epoch": 0.9932523616734144, "eval_loss": 1.5013270378112793, "eval_runtime": 13.4628, "eval_samples_per_second": 27.632, "eval_steps_per_second": 3.491, "step": 46 }, { "epoch": 1.2955465587044535, "grad_norm": 0.6942113041877747, "learning_rate": 0.00012, "loss": 1.6882, "step": 60 }, { "epoch": 1.7273954116059378, "grad_norm": 0.8643310070037842, "learning_rate": 0.00016, "loss": 1.4623, "step": 80 }, { "epoch": 1.9865047233468287, "eval_loss": 1.1028813123703003, "eval_runtime": 13.4743, "eval_samples_per_second": 27.608, "eval_steps_per_second": 3.488, "step": 92 }, { "epoch": 2.1592442645074224, "grad_norm": 1.187238097190857, "learning_rate": 0.0002, "loss": 1.3448, "step": 100 }, { "epoch": 2.591093117408907, "grad_norm": 1.2552473545074463, "learning_rate": 0.00019943348002101371, "loss": 1.1681, "step": 120 }, { "epoch": 2.979757085020243, "eval_loss": 0.9362145066261292, "eval_runtime": 13.4657, "eval_samples_per_second": 27.626, "eval_steps_per_second": 3.49, "step": 138 }, { "epoch": 3.0229419703103915, "grad_norm": 1.0589548349380493, "learning_rate": 0.00019774033898178667, "loss": 1.1426, "step": 140 }, { "epoch": 3.454790823211876, "grad_norm": 1.1629831790924072, "learning_rate": 0.00019493976084683813, "loss": 0.9296, "step": 160 }, { "epoch": 3.8866396761133606, "grad_norm": 0.9937772154808044, "learning_rate": 0.00019106347728549135, "loss": 0.9304, "step": 180 }, { "epoch": 3.9946018893387314, "eval_loss": 0.887685239315033, "eval_runtime": 13.4691, "eval_samples_per_second": 27.619, "eval_steps_per_second": 3.489, "step": 185 }, { "epoch": 4.318488529014845, "grad_norm": 1.299687385559082, "learning_rate": 0.0001861554081393806, "loss": 0.7665, "step": 200 }, { "epoch": 4.75033738191633, "grad_norm": 1.375931978225708, "learning_rate": 0.00018027116379309638, "loss": 0.7884, "step": 220 }, { "epoch": 4.987854251012146, "eval_loss": 0.8908663392066956, "eval_runtime": 13.466, "eval_samples_per_second": 27.625, "eval_steps_per_second": 3.49, "step": 231 } ], "logging_steps": 20, "max_steps": 690, "num_input_tokens_seen": 0, "num_train_epochs": 15, "save_steps": 500, "total_flos": 1.779081708100608e+16, "train_batch_size": 2, "trial_name": null, "trial_params": null }