| { | |
| "best_global_step": 100, | |
| "best_metric": 0.268216073513031, | |
| "best_model_checkpoint": "/content/models/gemma_qlora_lmh/checkpoint-100", | |
| "epoch": 1.9607843137254903, | |
| "eval_steps": 20, | |
| "global_step": 100, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "entropy": 2.652500593662262, | |
| "epoch": 0.39215686274509803, | |
| "grad_norm": 28.916894912719727, | |
| "learning_rate": 8.137254901960784e-06, | |
| "loss": 0.68, | |
| "mean_token_accuracy": 0.71015625, | |
| "num_tokens": 48092.0, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.39215686274509803, | |
| "eval_entropy": 2.5002221510960507, | |
| "eval_loss": 0.31822389364242554, | |
| "eval_mean_token_accuracy": 0.8417832163664011, | |
| "eval_num_tokens": 48092.0, | |
| "eval_runtime": 2.4841, | |
| "eval_samples_per_second": 81.719, | |
| "eval_steps_per_second": 5.233, | |
| "step": 20 | |
| }, | |
| { | |
| "entropy": 2.523895502090454, | |
| "epoch": 0.7843137254901961, | |
| "grad_norm": 2.8356525897979736, | |
| "learning_rate": 6.176470588235295e-06, | |
| "loss": 0.3528, | |
| "mean_token_accuracy": 0.79765625, | |
| "num_tokens": 98284.0, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.7843137254901961, | |
| "eval_entropy": 2.480310696821946, | |
| "eval_loss": 0.3085189759731293, | |
| "eval_mean_token_accuracy": 0.831075173157912, | |
| "eval_num_tokens": 98284.0, | |
| "eval_runtime": 2.4982, | |
| "eval_samples_per_second": 81.258, | |
| "eval_steps_per_second": 5.204, | |
| "step": 40 | |
| }, | |
| { | |
| "entropy": 2.519091600179672, | |
| "epoch": 1.1764705882352942, | |
| "grad_norm": 8.396736145019531, | |
| "learning_rate": 4.215686274509805e-06, | |
| "loss": 0.2831, | |
| "mean_token_accuracy": 0.8506696432828903, | |
| "num_tokens": 147466.0, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 1.1764705882352942, | |
| "eval_entropy": 2.4424293774824877, | |
| "eval_loss": 0.2718758285045624, | |
| "eval_mean_token_accuracy": 0.8610139855971704, | |
| "eval_num_tokens": 147466.0, | |
| "eval_runtime": 2.5004, | |
| "eval_samples_per_second": 81.187, | |
| "eval_steps_per_second": 5.199, | |
| "step": 60 | |
| }, | |
| { | |
| "entropy": 2.479978394508362, | |
| "epoch": 1.5686274509803921, | |
| "grad_norm": 9.535552978515625, | |
| "learning_rate": 2.254901960784314e-06, | |
| "loss": 0.2497, | |
| "mean_token_accuracy": 0.8796875, | |
| "num_tokens": 197917.0, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.5686274509803921, | |
| "eval_entropy": 2.43264233149015, | |
| "eval_loss": 0.2887224853038788, | |
| "eval_mean_token_accuracy": 0.8634178317510165, | |
| "eval_num_tokens": 197917.0, | |
| "eval_runtime": 2.5098, | |
| "eval_samples_per_second": 80.883, | |
| "eval_steps_per_second": 5.18, | |
| "step": 80 | |
| }, | |
| { | |
| "entropy": 2.4772088170051574, | |
| "epoch": 1.9607843137254903, | |
| "grad_norm": 3.218280076980591, | |
| "learning_rate": 2.9411764705882356e-07, | |
| "loss": 0.2359, | |
| "mean_token_accuracy": 0.89140625, | |
| "num_tokens": 247245.0, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.9607843137254903, | |
| "eval_entropy": 2.4411074198209324, | |
| "eval_loss": 0.268216073513031, | |
| "eval_mean_token_accuracy": 0.8658216779048626, | |
| "eval_num_tokens": 247245.0, | |
| "eval_runtime": 2.5076, | |
| "eval_samples_per_second": 80.953, | |
| "eval_steps_per_second": 5.184, | |
| "step": 100 | |
| } | |
| ], | |
| "logging_steps": 20, | |
| "max_steps": 102, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 20, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 6148418735678976.0, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |