rakhman-llm commited on
Commit
efb0fad
·
verified ·
1 Parent(s): 13030b2

Training in progress, step 6500, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:595cdcb5015115c6cbe2e6edbc30ee6705f67aa46b94c988f719a34cfbffd2cf
3
  size 891558696
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6312a7d18158015b6ca3368ce985616c02baba57e749583a73698e9311365a8
3
  size 891558696
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:413d3093ca25037d97577232c37b28c1f8751cf177fef08c1cb404465db4cfd4
3
  size 1783272762
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:811f77f1e7a4d53f0449c64f994693035f7345177e8f1d35540ff8163935e513
3
  size 1783272762
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6703b98a291d6e4043156fd5230a9d97acbef451284cebbec159795253cbebaf
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ed5fba27a3a8a24d10b87fd853e1ccad54f52ff4b84d894d5cce98569dfa342
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f222e945c4954eb35a2a25c9f653bde2dc7b8763271375e010f0439b14074e8d
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7a7b1b30309b1c3b11ce15f113db2c6e3df169af496faf041634b692afa8937
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.08321517705917358,
3
  "best_model_checkpoint": "./fine-tuned/checkpoint-6000",
4
- "epoch": 0.96,
5
  "eval_steps": 500,
6
- "global_step": 6000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -943,6 +943,84 @@
943
  "eval_samples_per_second": 17.142,
944
  "eval_steps_per_second": 2.143,
945
  "step": 6000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
946
  }
947
  ],
948
  "logging_steps": 50,
@@ -962,7 +1040,7 @@
962
  "attributes": {}
963
  }
964
  },
965
- "total_flos": 2.922997874688e+16,
966
  "train_batch_size": 8,
967
  "trial_name": null,
968
  "trial_params": null
 
1
  {
2
  "best_metric": 0.08321517705917358,
3
  "best_model_checkpoint": "./fine-tuned/checkpoint-6000",
4
+ "epoch": 1.04,
5
  "eval_steps": 500,
6
+ "global_step": 6500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
943
  "eval_samples_per_second": 17.142,
944
  "eval_steps_per_second": 2.143,
945
  "step": 6000
946
+ },
947
+ {
948
+ "epoch": 0.968,
949
+ "grad_norm": 5835.19189453125,
950
+ "learning_rate": 2.0320000000000002e-05,
951
+ "loss": 0.0643,
952
+ "step": 6050
953
+ },
954
+ {
955
+ "epoch": 0.976,
956
+ "grad_norm": 5896.76318359375,
957
+ "learning_rate": 2.024e-05,
958
+ "loss": 0.0625,
959
+ "step": 6100
960
+ },
961
+ {
962
+ "epoch": 0.984,
963
+ "grad_norm": 6958.45751953125,
964
+ "learning_rate": 2.016e-05,
965
+ "loss": 0.0657,
966
+ "step": 6150
967
+ },
968
+ {
969
+ "epoch": 0.992,
970
+ "grad_norm": 4680.04736328125,
971
+ "learning_rate": 2.008e-05,
972
+ "loss": 0.0632,
973
+ "step": 6200
974
+ },
975
+ {
976
+ "epoch": 1.0,
977
+ "grad_norm": 8230.8056640625,
978
+ "learning_rate": 1.9999999999999998e-05,
979
+ "loss": 0.0603,
980
+ "step": 6250
981
+ },
982
+ {
983
+ "epoch": 1.008,
984
+ "grad_norm": 5693.77001953125,
985
+ "learning_rate": 1.9920000000000002e-05,
986
+ "loss": 0.0574,
987
+ "step": 6300
988
+ },
989
+ {
990
+ "epoch": 1.016,
991
+ "grad_norm": 14030.3583984375,
992
+ "learning_rate": 1.984e-05,
993
+ "loss": 0.0563,
994
+ "step": 6350
995
+ },
996
+ {
997
+ "epoch": 1.024,
998
+ "grad_norm": 11693.09375,
999
+ "learning_rate": 1.976e-05,
1000
+ "loss": 0.0558,
1001
+ "step": 6400
1002
+ },
1003
+ {
1004
+ "epoch": 1.032,
1005
+ "grad_norm": 5772.1845703125,
1006
+ "learning_rate": 1.968e-05,
1007
+ "loss": 0.0544,
1008
+ "step": 6450
1009
+ },
1010
+ {
1011
+ "epoch": 1.04,
1012
+ "grad_norm": 8641.919921875,
1013
+ "learning_rate": 1.96e-05,
1014
+ "loss": 0.0606,
1015
+ "step": 6500
1016
+ },
1017
+ {
1018
+ "epoch": 1.04,
1019
+ "eval_loss": 0.08356834203004837,
1020
+ "eval_runtime": 116.7914,
1021
+ "eval_samples_per_second": 17.125,
1022
+ "eval_steps_per_second": 2.141,
1023
+ "step": 6500
1024
  }
1025
  ],
1026
  "logging_steps": 50,
 
1040
  "attributes": {}
1041
  }
1042
  },
1043
+ "total_flos": 3.166581030912e+16,
1044
  "train_batch_size": 8,
1045
  "trial_name": null,
1046
  "trial_params": null