rakhman-llm commited on
Commit
03b53d5
·
verified ·
1 Parent(s): 053b9f3

Training in progress, step 7500, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6b6beb99d53dc85acda2f8ace3d229350875cf63a734ef325761a0e457e7f612
3
  size 891558696
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d943046e9cee201d90914fe8b465ff2a60ed77f63734520ab719ff090af0b77
3
  size 891558696
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f2701e97535a87882ab713841abd901bf0568af17cf0a339a8c12431670adb4b
3
  size 1783272762
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78ae5dc18c5c42b1ab70202f0fb9bb247ed17fda7375648f017acb36f68f1f78
3
  size 1783272762
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9f110c3c4fe22448ff8783649fbef7b54b070a7c45b4469624f986b9a7cd74b3
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e6d24d84d7824f21eaa5899dfd03019f712068506f4aab4dbb4d9b68c233cb1
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6ccfed333de7ee72e49042ff43701fa36052369ab2407e1f957a0a822d1a798f
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:551dce9770f5878295b9620e9ba43823a7a814027d76d3160674655baa064b21
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.08321517705917358,
3
- "best_model_checkpoint": "./fine-tuned/checkpoint-6000",
4
- "epoch": 1.12,
5
  "eval_steps": 500,
6
- "global_step": 7000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1099,6 +1099,84 @@
1099
  "eval_samples_per_second": 17.139,
1100
  "eval_steps_per_second": 2.142,
1101
  "step": 7000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1102
  }
1103
  ],
1104
  "logging_steps": 50,
@@ -1118,7 +1196,7 @@
1118
  "attributes": {}
1119
  }
1120
  },
1121
- "total_flos": 3.410164187136e+16,
1122
  "train_batch_size": 8,
1123
  "trial_name": null,
1124
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.08285799622535706,
3
+ "best_model_checkpoint": "./fine-tuned/checkpoint-7500",
4
+ "epoch": 1.2,
5
  "eval_steps": 500,
6
+ "global_step": 7500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1099
  "eval_samples_per_second": 17.139,
1100
  "eval_steps_per_second": 2.142,
1101
  "step": 7000
1102
+ },
1103
+ {
1104
+ "epoch": 1.1280000000000001,
1105
+ "grad_norm": 5663.18603515625,
1106
+ "learning_rate": 1.872e-05,
1107
+ "loss": 0.0537,
1108
+ "step": 7050
1109
+ },
1110
+ {
1111
+ "epoch": 1.1360000000000001,
1112
+ "grad_norm": 9569.765625,
1113
+ "learning_rate": 1.8639999999999998e-05,
1114
+ "loss": 0.0607,
1115
+ "step": 7100
1116
+ },
1117
+ {
1118
+ "epoch": 1.144,
1119
+ "grad_norm": 7370.98046875,
1120
+ "learning_rate": 1.8560000000000002e-05,
1121
+ "loss": 0.0607,
1122
+ "step": 7150
1123
+ },
1124
+ {
1125
+ "epoch": 1.152,
1126
+ "grad_norm": 5951.6533203125,
1127
+ "learning_rate": 1.848e-05,
1128
+ "loss": 0.0547,
1129
+ "step": 7200
1130
+ },
1131
+ {
1132
+ "epoch": 1.16,
1133
+ "grad_norm": 8285.0830078125,
1134
+ "learning_rate": 1.84e-05,
1135
+ "loss": 0.0589,
1136
+ "step": 7250
1137
+ },
1138
+ {
1139
+ "epoch": 1.168,
1140
+ "grad_norm": 7549.8271484375,
1141
+ "learning_rate": 1.832e-05,
1142
+ "loss": 0.0587,
1143
+ "step": 7300
1144
+ },
1145
+ {
1146
+ "epoch": 1.176,
1147
+ "grad_norm": 7480.25927734375,
1148
+ "learning_rate": 1.824e-05,
1149
+ "loss": 0.058,
1150
+ "step": 7350
1151
+ },
1152
+ {
1153
+ "epoch": 1.184,
1154
+ "grad_norm": 35994.15234375,
1155
+ "learning_rate": 1.816e-05,
1156
+ "loss": 0.0585,
1157
+ "step": 7400
1158
+ },
1159
+ {
1160
+ "epoch": 1.192,
1161
+ "grad_norm": 7489.05859375,
1162
+ "learning_rate": 1.808e-05,
1163
+ "loss": 0.0616,
1164
+ "step": 7450
1165
+ },
1166
+ {
1167
+ "epoch": 1.2,
1168
+ "grad_norm": 6134.80126953125,
1169
+ "learning_rate": 1.8e-05,
1170
+ "loss": 0.0572,
1171
+ "step": 7500
1172
+ },
1173
+ {
1174
+ "epoch": 1.2,
1175
+ "eval_loss": 0.08285799622535706,
1176
+ "eval_runtime": 116.9169,
1177
+ "eval_samples_per_second": 17.106,
1178
+ "eval_steps_per_second": 2.138,
1179
+ "step": 7500
1180
  }
1181
  ],
1182
  "logging_steps": 50,
 
1196
  "attributes": {}
1197
  }
1198
  },
1199
+ "total_flos": 3.65374734336e+16,
1200
  "train_batch_size": 8,
1201
  "trial_name": null,
1202
  "trial_params": null