|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.7335075413744098, |
|
"eval_steps": 200, |
|
"global_step": 8000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"eval_bertscore": 0.7401605248451233, |
|
"eval_loss": 1.9530484676361084, |
|
"eval_rouge1": 0.6562857460474375, |
|
"eval_rouge2": 0.3640670727106235, |
|
"eval_rougeL": 0.5655212336424695, |
|
"eval_rougeLsum": 0.6414840198810386, |
|
"eval_runtime": 21.7196, |
|
"eval_samples_per_second": 1.381, |
|
"eval_steps_per_second": 0.691, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.25105270743370056, |
|
"learning_rate": 0.00019771674842969145, |
|
"loss": 1.7353, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"eval_bertscore": 0.7432050108909607, |
|
"eval_loss": 1.9583823680877686, |
|
"eval_rouge1": 0.6554226269617707, |
|
"eval_rouge2": 0.36661086995296877, |
|
"eval_rougeL": 0.5637448790342183, |
|
"eval_rougeLsum": 0.6419796784912521, |
|
"eval_runtime": 21.9623, |
|
"eval_samples_per_second": 1.366, |
|
"eval_steps_per_second": 0.683, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.26550447940826416, |
|
"learning_rate": 0.00019542432717436156, |
|
"loss": 1.7786, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"eval_bertscore": 0.7469045519828796, |
|
"eval_loss": 1.9245686531066895, |
|
"eval_rouge1": 0.6662431635890791, |
|
"eval_rouge2": 0.3735263724826765, |
|
"eval_rougeL": 0.5755071616151013, |
|
"eval_rougeLsum": 0.6538383087686117, |
|
"eval_runtime": 21.5302, |
|
"eval_samples_per_second": 1.393, |
|
"eval_steps_per_second": 0.697, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 0.1538015753030777, |
|
"learning_rate": 0.0001931319059190317, |
|
"loss": 1.8851, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"eval_bertscore": 0.7442477941513062, |
|
"eval_loss": 1.9187489748001099, |
|
"eval_rouge1": 0.6606221897489035, |
|
"eval_rouge2": 0.368654563659435, |
|
"eval_rougeL": 0.5731546210408094, |
|
"eval_rougeLsum": 0.6470590823125606, |
|
"eval_runtime": 21.9831, |
|
"eval_samples_per_second": 1.365, |
|
"eval_steps_per_second": 0.682, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 0.1681252270936966, |
|
"learning_rate": 0.0001908394846637018, |
|
"loss": 1.8919, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"eval_bertscore": 0.7458053231239319, |
|
"eval_loss": 1.9159075021743774, |
|
"eval_rouge1": 0.6621259186456026, |
|
"eval_rouge2": 0.372024043683234, |
|
"eval_rougeL": 0.5743354509339939, |
|
"eval_rougeLsum": 0.6491550893780276, |
|
"eval_runtime": 21.7159, |
|
"eval_samples_per_second": 1.381, |
|
"eval_steps_per_second": 0.691, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"eval_bertscore": 0.7468854784965515, |
|
"eval_loss": 1.9140182733535767, |
|
"eval_rouge1": 0.6626581781149132, |
|
"eval_rouge2": 0.37318557504782157, |
|
"eval_rougeL": 0.5759264203594217, |
|
"eval_rougeLsum": 0.6490702446275723, |
|
"eval_runtime": 21.6486, |
|
"eval_samples_per_second": 1.386, |
|
"eval_steps_per_second": 0.693, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"grad_norm": 0.1552441120147705, |
|
"learning_rate": 0.00018854706340837193, |
|
"loss": 1.9052, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"eval_bertscore": 0.7475314736366272, |
|
"eval_loss": 1.913794755935669, |
|
"eval_rouge1": 0.6648687174353192, |
|
"eval_rouge2": 0.3760379232448734, |
|
"eval_rougeL": 0.5784915488164926, |
|
"eval_rougeLsum": 0.6513864520108938, |
|
"eval_runtime": 21.664, |
|
"eval_samples_per_second": 1.385, |
|
"eval_steps_per_second": 0.692, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"grad_norm": 0.14638397097587585, |
|
"learning_rate": 0.00018625464215304204, |
|
"loss": 1.8843, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"eval_bertscore": 0.747238039970398, |
|
"eval_loss": 1.9117029905319214, |
|
"eval_rouge1": 0.6638085237198453, |
|
"eval_rouge2": 0.3742779818055127, |
|
"eval_rougeL": 0.5754209460423059, |
|
"eval_rougeLsum": 0.6506476155592722, |
|
"eval_runtime": 21.9308, |
|
"eval_samples_per_second": 1.368, |
|
"eval_steps_per_second": 0.684, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 0.15738993883132935, |
|
"learning_rate": 0.00018396222089771218, |
|
"loss": 1.8964, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"eval_bertscore": 0.7473016381263733, |
|
"eval_loss": 1.9117563962936401, |
|
"eval_rouge1": 0.6620053151663765, |
|
"eval_rouge2": 0.37406692119411245, |
|
"eval_rougeL": 0.5758911607323577, |
|
"eval_rougeLsum": 0.6494070575604445, |
|
"eval_runtime": 21.6727, |
|
"eval_samples_per_second": 1.384, |
|
"eval_steps_per_second": 0.692, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 0.1588907092809677, |
|
"learning_rate": 0.00018166979964238228, |
|
"loss": 1.8827, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"eval_bertscore": 0.7485987544059753, |
|
"eval_loss": 1.9126006364822388, |
|
"eval_rouge1": 0.6641836156334741, |
|
"eval_rouge2": 0.37320215574735827, |
|
"eval_rougeL": 0.5783015040447993, |
|
"eval_rougeLsum": 0.6522235940423647, |
|
"eval_runtime": 21.9759, |
|
"eval_samples_per_second": 1.365, |
|
"eval_steps_per_second": 0.683, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"eval_bertscore": 0.7482583522796631, |
|
"eval_loss": 1.9075205326080322, |
|
"eval_rouge1": 0.6658219484766166, |
|
"eval_rouge2": 0.37723364952258465, |
|
"eval_rougeL": 0.5769040785174693, |
|
"eval_rougeLsum": 0.6511328888044219, |
|
"eval_runtime": 21.5892, |
|
"eval_samples_per_second": 1.39, |
|
"eval_steps_per_second": 0.695, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 0.15247465670108795, |
|
"learning_rate": 0.00017937737838705242, |
|
"loss": 1.8831, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"eval_bertscore": 0.7460805177688599, |
|
"eval_loss": 1.9088668823242188, |
|
"eval_rouge1": 0.6627321043292516, |
|
"eval_rouge2": 0.3696581195003696, |
|
"eval_rougeL": 0.5740988544467178, |
|
"eval_rougeLsum": 0.6478729042661874, |
|
"eval_runtime": 21.9221, |
|
"eval_samples_per_second": 1.368, |
|
"eval_steps_per_second": 0.684, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"grad_norm": 0.1587379276752472, |
|
"learning_rate": 0.00017708495713172253, |
|
"loss": 1.8829, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"eval_bertscore": 0.7472203373908997, |
|
"eval_loss": 1.906219482421875, |
|
"eval_rouge1": 0.6637415370426804, |
|
"eval_rouge2": 0.37565276875837994, |
|
"eval_rougeL": 0.5773879369079004, |
|
"eval_rougeLsum": 0.6488719947518645, |
|
"eval_runtime": 21.8112, |
|
"eval_samples_per_second": 1.375, |
|
"eval_steps_per_second": 0.688, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 0.1558646410703659, |
|
"learning_rate": 0.00017479253587639266, |
|
"loss": 1.8978, |
|
"step": 2750 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"eval_bertscore": 0.7466126680374146, |
|
"eval_loss": 1.9045982360839844, |
|
"eval_rouge1": 0.6616225540296956, |
|
"eval_rouge2": 0.37370762164745913, |
|
"eval_rougeL": 0.5759418528371097, |
|
"eval_rougeLsum": 0.6479977636906877, |
|
"eval_runtime": 21.8772, |
|
"eval_samples_per_second": 1.371, |
|
"eval_steps_per_second": 0.686, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"grad_norm": 0.14783035218715668, |
|
"learning_rate": 0.00017250011462106277, |
|
"loss": 1.8978, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"eval_bertscore": 0.7485571503639221, |
|
"eval_loss": 1.9035439491271973, |
|
"eval_rouge1": 0.6664050030501707, |
|
"eval_rouge2": 0.379492440917784, |
|
"eval_rougeL": 0.5806973731221475, |
|
"eval_rougeLsum": 0.6524346156604702, |
|
"eval_runtime": 21.9217, |
|
"eval_samples_per_second": 1.369, |
|
"eval_steps_per_second": 0.684, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"eval_bertscore": 0.7483461499214172, |
|
"eval_loss": 1.9022458791732788, |
|
"eval_rouge1": 0.6618989733136488, |
|
"eval_rouge2": 0.37377379177271053, |
|
"eval_rougeL": 0.5780989082173933, |
|
"eval_rougeLsum": 0.6490379362631586, |
|
"eval_runtime": 21.7847, |
|
"eval_samples_per_second": 1.377, |
|
"eval_steps_per_second": 0.689, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"grad_norm": 0.16484151780605316, |
|
"learning_rate": 0.0001702076933657329, |
|
"loss": 1.8715, |
|
"step": 3250 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"eval_bertscore": 0.7490711212158203, |
|
"eval_loss": 1.9013088941574097, |
|
"eval_rouge1": 0.6638141306545007, |
|
"eval_rouge2": 0.37356255553691553, |
|
"eval_rougeL": 0.577975450251653, |
|
"eval_rougeLsum": 0.6492478632295806, |
|
"eval_runtime": 21.8807, |
|
"eval_samples_per_second": 1.371, |
|
"eval_steps_per_second": 0.686, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 0.14130128920078278, |
|
"learning_rate": 0.000167915272110403, |
|
"loss": 1.8819, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"eval_bertscore": 0.7475283741950989, |
|
"eval_loss": 1.9002223014831543, |
|
"eval_rouge1": 0.6628836314413511, |
|
"eval_rouge2": 0.37179988805094977, |
|
"eval_rougeL": 0.5764222388923268, |
|
"eval_rougeLsum": 0.649864229310889, |
|
"eval_runtime": 22.124, |
|
"eval_samples_per_second": 1.356, |
|
"eval_steps_per_second": 0.678, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"grad_norm": 0.1494186818599701, |
|
"learning_rate": 0.00016562285085507315, |
|
"loss": 1.8828, |
|
"step": 3750 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"eval_bertscore": 0.7486498951911926, |
|
"eval_loss": 1.9011151790618896, |
|
"eval_rouge1": 0.6669673680023924, |
|
"eval_rouge2": 0.3771780440183751, |
|
"eval_rougeL": 0.5792518624130161, |
|
"eval_rougeLsum": 0.6534484242953056, |
|
"eval_runtime": 21.813, |
|
"eval_samples_per_second": 1.375, |
|
"eval_steps_per_second": 0.688, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"grad_norm": 0.14803479611873627, |
|
"learning_rate": 0.00016333042959974325, |
|
"loss": 1.8761, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"eval_bertscore": 0.7471507787704468, |
|
"eval_loss": 1.9001713991165161, |
|
"eval_rouge1": 0.6651735220672027, |
|
"eval_rouge2": 0.3736698451416937, |
|
"eval_rougeL": 0.5779938808281732, |
|
"eval_rougeLsum": 0.6509815118131576, |
|
"eval_runtime": 21.5004, |
|
"eval_samples_per_second": 1.395, |
|
"eval_steps_per_second": 0.698, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"eval_bertscore": 0.7485501766204834, |
|
"eval_loss": 1.8993827104568481, |
|
"eval_rouge1": 0.6646424082737133, |
|
"eval_rouge2": 0.37318485364862475, |
|
"eval_rougeL": 0.5773338159759467, |
|
"eval_rougeLsum": 0.6507594353103527, |
|
"eval_runtime": 21.2963, |
|
"eval_samples_per_second": 1.409, |
|
"eval_steps_per_second": 0.704, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"grad_norm": 0.15562959015369415, |
|
"learning_rate": 0.0001610380083444134, |
|
"loss": 1.8672, |
|
"step": 4250 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"eval_bertscore": 0.7469989061355591, |
|
"eval_loss": 1.900540828704834, |
|
"eval_rouge1": 0.6620664558691891, |
|
"eval_rouge2": 0.37299419371215703, |
|
"eval_rougeL": 0.5765442194831125, |
|
"eval_rougeLsum": 0.6472642385429858, |
|
"eval_runtime": 21.9086, |
|
"eval_samples_per_second": 1.369, |
|
"eval_steps_per_second": 0.685, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"grad_norm": 0.15420928597450256, |
|
"learning_rate": 0.0001587455870890835, |
|
"loss": 1.8754, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"eval_bertscore": 0.7475299835205078, |
|
"eval_loss": 1.8988685607910156, |
|
"eval_rouge1": 0.6656661780424216, |
|
"eval_rouge2": 0.37467258880478527, |
|
"eval_rougeL": 0.5770800519970718, |
|
"eval_rougeLsum": 0.6522703864288166, |
|
"eval_runtime": 22.063, |
|
"eval_samples_per_second": 1.36, |
|
"eval_steps_per_second": 0.68, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"grad_norm": 0.15809176862239838, |
|
"learning_rate": 0.00015645316583375363, |
|
"loss": 1.8848, |
|
"step": 4750 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"eval_bertscore": 0.7490234375, |
|
"eval_loss": 1.8991097211837769, |
|
"eval_rouge1": 0.6651730257289085, |
|
"eval_rouge2": 0.3778893043274054, |
|
"eval_rougeL": 0.5782673838033503, |
|
"eval_rougeLsum": 0.6516865674488727, |
|
"eval_runtime": 22.0202, |
|
"eval_samples_per_second": 1.362, |
|
"eval_steps_per_second": 0.681, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"grad_norm": 0.17979757487773895, |
|
"learning_rate": 0.00015416074457842374, |
|
"loss": 1.8851, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"eval_bertscore": 0.7492111325263977, |
|
"eval_loss": 1.897339940071106, |
|
"eval_rouge1": 0.665920573890169, |
|
"eval_rouge2": 0.37917993898535385, |
|
"eval_rougeL": 0.5800236892888617, |
|
"eval_rougeLsum": 0.6529131688355863, |
|
"eval_runtime": 21.6103, |
|
"eval_samples_per_second": 1.388, |
|
"eval_steps_per_second": 0.694, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"eval_bertscore": 0.7491253614425659, |
|
"eval_loss": 1.897528052330017, |
|
"eval_rouge1": 0.6653452054219615, |
|
"eval_rouge2": 0.3759208437918665, |
|
"eval_rougeL": 0.5776757077854651, |
|
"eval_rougeLsum": 0.6511876484723524, |
|
"eval_runtime": 21.3101, |
|
"eval_samples_per_second": 1.408, |
|
"eval_steps_per_second": 0.704, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 0.16869671642780304, |
|
"learning_rate": 0.00015186832332309387, |
|
"loss": 1.8783, |
|
"step": 5250 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"eval_bertscore": 0.7494469881057739, |
|
"eval_loss": 1.895969271659851, |
|
"eval_rouge1": 0.6660951369469854, |
|
"eval_rouge2": 0.3764077134133328, |
|
"eval_rougeL": 0.578785826234568, |
|
"eval_rougeLsum": 0.6525967284041656, |
|
"eval_runtime": 21.7955, |
|
"eval_samples_per_second": 1.376, |
|
"eval_steps_per_second": 0.688, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 0.15996231138706207, |
|
"learning_rate": 0.00014957590206776398, |
|
"loss": 1.8805, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"eval_bertscore": 0.7486470341682434, |
|
"eval_loss": 1.8955131769180298, |
|
"eval_rouge1": 0.6670292173522965, |
|
"eval_rouge2": 0.37457018529010144, |
|
"eval_rougeL": 0.5775243235432015, |
|
"eval_rougeLsum": 0.652574079807632, |
|
"eval_runtime": 21.7576, |
|
"eval_samples_per_second": 1.379, |
|
"eval_steps_per_second": 0.689, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"grad_norm": 0.17192547023296356, |
|
"learning_rate": 0.00014728348081243412, |
|
"loss": 1.8884, |
|
"step": 5750 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"eval_bertscore": 0.7483081817626953, |
|
"eval_loss": 1.895763874053955, |
|
"eval_rouge1": 0.6659275328276997, |
|
"eval_rouge2": 0.3778666475350364, |
|
"eval_rougeL": 0.579425140056643, |
|
"eval_rougeLsum": 0.6515870828784887, |
|
"eval_runtime": 21.6648, |
|
"eval_samples_per_second": 1.385, |
|
"eval_steps_per_second": 0.692, |
|
"step": 5800 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"grad_norm": 0.15838442742824554, |
|
"learning_rate": 0.00014499105955710422, |
|
"loss": 1.8913, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"eval_bertscore": 0.7493732571601868, |
|
"eval_loss": 1.8914682865142822, |
|
"eval_rouge1": 0.6669695240447069, |
|
"eval_rouge2": 0.3769441114214874, |
|
"eval_rougeL": 0.5798986667152066, |
|
"eval_rougeLsum": 0.6534527583592111, |
|
"eval_runtime": 21.4686, |
|
"eval_samples_per_second": 1.397, |
|
"eval_steps_per_second": 0.699, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"eval_bertscore": 0.7510559558868408, |
|
"eval_loss": 1.8923884630203247, |
|
"eval_rouge1": 0.6677938121282943, |
|
"eval_rouge2": 0.37854575387307554, |
|
"eval_rougeL": 0.5817052753830161, |
|
"eval_rougeLsum": 0.6534737907551461, |
|
"eval_runtime": 21.593, |
|
"eval_samples_per_second": 1.389, |
|
"eval_steps_per_second": 0.695, |
|
"step": 6200 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 0.15312573313713074, |
|
"learning_rate": 0.00014269863830177433, |
|
"loss": 1.8705, |
|
"step": 6250 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"eval_bertscore": 0.7479371428489685, |
|
"eval_loss": 1.891802430152893, |
|
"eval_rouge1": 0.6658674357402252, |
|
"eval_rouge2": 0.3757712649269345, |
|
"eval_rougeL": 0.5791817270712349, |
|
"eval_rougeLsum": 0.6509960265397259, |
|
"eval_runtime": 21.8726, |
|
"eval_samples_per_second": 1.372, |
|
"eval_steps_per_second": 0.686, |
|
"step": 6400 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 0.15844614803791046, |
|
"learning_rate": 0.00014040621704644447, |
|
"loss": 1.8643, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"eval_bertscore": 0.7484550476074219, |
|
"eval_loss": 1.8903728723526, |
|
"eval_rouge1": 0.6683828816523312, |
|
"eval_rouge2": 0.37811618722345436, |
|
"eval_rougeL": 0.5802581730590705, |
|
"eval_rougeLsum": 0.6534402764651661, |
|
"eval_runtime": 21.8343, |
|
"eval_samples_per_second": 1.374, |
|
"eval_steps_per_second": 0.687, |
|
"step": 6600 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"grad_norm": 0.1661410629749298, |
|
"learning_rate": 0.00013811379579111458, |
|
"loss": 1.877, |
|
"step": 6750 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"eval_bertscore": 0.747416615486145, |
|
"eval_loss": 1.8915189504623413, |
|
"eval_rouge1": 0.6644777881148224, |
|
"eval_rouge2": 0.3747657029706615, |
|
"eval_rougeL": 0.5793454557198501, |
|
"eval_rougeLsum": 0.6521716611395593, |
|
"eval_runtime": 21.523, |
|
"eval_samples_per_second": 1.394, |
|
"eval_steps_per_second": 0.697, |
|
"step": 6800 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 0.16483080387115479, |
|
"learning_rate": 0.00013582137453578468, |
|
"loss": 1.8792, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"eval_bertscore": 0.7480576634407043, |
|
"eval_loss": 1.8913365602493286, |
|
"eval_rouge1": 0.6655764268912302, |
|
"eval_rouge2": 0.3757671289735428, |
|
"eval_rougeL": 0.577951380212153, |
|
"eval_rougeLsum": 0.6507587412359694, |
|
"eval_runtime": 21.3067, |
|
"eval_samples_per_second": 1.408, |
|
"eval_steps_per_second": 0.704, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"eval_bertscore": 0.7505319714546204, |
|
"eval_loss": 1.889721155166626, |
|
"eval_rouge1": 0.6706532239207523, |
|
"eval_rouge2": 0.37986537729431724, |
|
"eval_rougeL": 0.5824624008038861, |
|
"eval_rougeLsum": 0.6571986550416876, |
|
"eval_runtime": 21.8193, |
|
"eval_samples_per_second": 1.375, |
|
"eval_steps_per_second": 0.687, |
|
"step": 7200 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"grad_norm": 0.1685444712638855, |
|
"learning_rate": 0.00013352895328045482, |
|
"loss": 1.8748, |
|
"step": 7250 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"eval_bertscore": 0.7472131252288818, |
|
"eval_loss": 1.889514684677124, |
|
"eval_rouge1": 0.6647481520892182, |
|
"eval_rouge2": 0.3727968089505218, |
|
"eval_rougeL": 0.5772333167389081, |
|
"eval_rougeLsum": 0.6503920840351167, |
|
"eval_runtime": 21.5794, |
|
"eval_samples_per_second": 1.39, |
|
"eval_steps_per_second": 0.695, |
|
"step": 7400 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"grad_norm": 0.16196218132972717, |
|
"learning_rate": 0.00013123653202512493, |
|
"loss": 1.8958, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"eval_bertscore": 0.7467525005340576, |
|
"eval_loss": 1.8874704837799072, |
|
"eval_rouge1": 0.6652789954777591, |
|
"eval_rouge2": 0.3747211875622626, |
|
"eval_rougeL": 0.5781018250975862, |
|
"eval_rougeLsum": 0.6512065884264598, |
|
"eval_runtime": 21.6436, |
|
"eval_samples_per_second": 1.386, |
|
"eval_steps_per_second": 0.693, |
|
"step": 7600 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"grad_norm": 0.17379231750965118, |
|
"learning_rate": 0.00012894411076979506, |
|
"loss": 1.8655, |
|
"step": 7750 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"eval_bertscore": 0.7478018403053284, |
|
"eval_loss": 1.8879252672195435, |
|
"eval_rouge1": 0.6676077444849423, |
|
"eval_rouge2": 0.37550824667101645, |
|
"eval_rougeL": 0.5792625587400696, |
|
"eval_rougeLsum": 0.6537654224373248, |
|
"eval_runtime": 21.8026, |
|
"eval_samples_per_second": 1.376, |
|
"eval_steps_per_second": 0.688, |
|
"step": 7800 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"grad_norm": 0.17975503206253052, |
|
"learning_rate": 0.00012665168951446517, |
|
"loss": 1.8593, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"eval_bertscore": 0.7490061521530151, |
|
"eval_loss": 1.8872514963150024, |
|
"eval_rouge1": 0.6677074837057098, |
|
"eval_rouge2": 0.37723681410973775, |
|
"eval_rougeL": 0.5806554105436175, |
|
"eval_rougeLsum": 0.6531691046113964, |
|
"eval_runtime": 21.2682, |
|
"eval_samples_per_second": 1.411, |
|
"eval_steps_per_second": 0.705, |
|
"step": 8000 |
|
} |
|
], |
|
"logging_steps": 250, |
|
"max_steps": 21812, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 800, |
|
"total_flos": 5.3929253535744e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|