AsmaaElnagger commited on
Commit
61a582f
·
verified ·
1 Parent(s): 9d1f653

Diabetic_RetinoPathy_detection

Browse files
README.md ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: apache-2.0
4
+ base_model: facebook/dinov2-base
5
+ tags:
6
+ - generated_from_trainer
7
+ datasets:
8
+ - imagefolder
9
+ metrics:
10
+ - accuracy
11
+ - f1
12
+ model-index:
13
+ - name: dinov2-base-finetuned-dia_eye
14
+ results:
15
+ - task:
16
+ name: Image Classification
17
+ type: image-classification
18
+ dataset:
19
+ name: imagefolder
20
+ type: imagefolder
21
+ config: default
22
+ split: train
23
+ args: default
24
+ metrics:
25
+ - name: Accuracy
26
+ type: accuracy
27
+ value: 0.88
28
+ - name: F1
29
+ type: f1
30
+ value: 0.8802333980766498
31
+ ---
32
+
33
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
34
+ should probably proofread and complete it, then remove this comment. -->
35
+
36
+ # dinov2-base-finetuned-dia_eye
37
+
38
+ This model is a fine-tuned version of [facebook/dinov2-base](https://huggingface.co/facebook/dinov2-base) on the imagefolder dataset.
39
+ It achieves the following results on the evaluation set:
40
+ - Loss: 0.4215
41
+ - Accuracy: 0.88
42
+ - F1: 0.8802
43
+
44
+ ## Model description
45
+
46
+ More information needed
47
+
48
+ ## Intended uses & limitations
49
+
50
+ More information needed
51
+
52
+ ## Training and evaluation data
53
+
54
+ More information needed
55
+
56
+ ## Training procedure
57
+
58
+ ### Training hyperparameters
59
+
60
+ The following hyperparameters were used during training:
61
+ - learning_rate: 2e-05
62
+ - train_batch_size: 4
63
+ - eval_batch_size: 4
64
+ - seed: 42
65
+ - gradient_accumulation_steps: 4
66
+ - total_train_batch_size: 16
67
+ - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
68
+ - lr_scheduler_type: linear
69
+ - lr_scheduler_warmup_ratio: 0.1
70
+ - num_epochs: 10
71
+
72
+ ### Training results
73
+
74
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 |
75
+ |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|
76
+ | 1.2812 | 1.0 | 250 | 1.1659 | 0.494 | 0.4460 |
77
+ | 1.0885 | 2.0 | 500 | 1.1570 | 0.46 | 0.4098 |
78
+ | 1.0673 | 3.0 | 750 | 1.1951 | 0.51 | 0.4777 |
79
+ | 0.9863 | 4.0 | 1000 | 0.8480 | 0.622 | 0.6316 |
80
+ | 0.8241 | 5.0 | 1250 | 0.7028 | 0.706 | 0.7049 |
81
+ | 0.7208 | 6.0 | 1500 | 0.7059 | 0.712 | 0.6910 |
82
+ | 0.5563 | 7.0 | 1750 | 0.5728 | 0.782 | 0.7813 |
83
+ | 0.3988 | 8.0 | 2000 | 0.5472 | 0.826 | 0.8239 |
84
+ | 0.4643 | 9.0 | 2250 | 0.4736 | 0.85 | 0.8503 |
85
+ | 0.2625 | 10.0 | 2500 | 0.4215 | 0.88 | 0.8802 |
86
+
87
+
88
+ ### Framework versions
89
+
90
+ - Transformers 4.48.3
91
+ - Pytorch 2.6.0+cu124
92
+ - Datasets 3.4.0
93
+ - Tokenizers 0.21.0
all_results.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "eval_accuracy": 0.88,
4
+ "eval_f1": 0.8802333980766498,
5
+ "eval_loss": 0.4214743971824646,
6
+ "eval_runtime": 12.3028,
7
+ "eval_samples_per_second": 40.641,
8
+ "eval_steps_per_second": 10.16,
9
+ "total_flos": 4.0857422266368e+18,
10
+ "train_loss": 0.7867561736106873,
11
+ "train_runtime": 2885.7234,
12
+ "train_samples_per_second": 13.861,
13
+ "train_steps_per_second": 0.866
14
+ }
config.json ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/dinov2-base",
3
+ "apply_layernorm": true,
4
+ "architectures": [
5
+ "Dinov2ForImageClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.0,
8
+ "drop_path_rate": 0.0,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.0,
11
+ "hidden_size": 768,
12
+ "id2label": {
13
+ "0": "0",
14
+ "1": "1",
15
+ "2": "2",
16
+ "3": "3",
17
+ "4": "4"
18
+ },
19
+ "image_size": 518,
20
+ "initializer_range": 0.02,
21
+ "label2id": {
22
+ "0": 0,
23
+ "1": 1,
24
+ "2": 2,
25
+ "3": 3,
26
+ "4": 4
27
+ },
28
+ "layer_norm_eps": 1e-06,
29
+ "layerscale_value": 1.0,
30
+ "mlp_ratio": 4,
31
+ "model_type": "dinov2",
32
+ "num_attention_heads": 12,
33
+ "num_channels": 3,
34
+ "num_hidden_layers": 12,
35
+ "out_features": [
36
+ "stage12"
37
+ ],
38
+ "out_indices": [
39
+ 12
40
+ ],
41
+ "patch_size": 14,
42
+ "problem_type": "single_label_classification",
43
+ "qkv_bias": true,
44
+ "reshape_hidden_states": true,
45
+ "stage_names": [
46
+ "stem",
47
+ "stage1",
48
+ "stage2",
49
+ "stage3",
50
+ "stage4",
51
+ "stage5",
52
+ "stage6",
53
+ "stage7",
54
+ "stage8",
55
+ "stage9",
56
+ "stage10",
57
+ "stage11",
58
+ "stage12"
59
+ ],
60
+ "torch_dtype": "float32",
61
+ "transformers_version": "4.48.3",
62
+ "use_swiglu_ffn": false
63
+ }
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "eval_accuracy": 0.88,
4
+ "eval_f1": 0.8802333980766498,
5
+ "eval_loss": 0.4214743971824646,
6
+ "eval_runtime": 12.3028,
7
+ "eval_samples_per_second": 40.641,
8
+ "eval_steps_per_second": 10.16
9
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f350755357572290115bb4943f759779842e1804bee525a3654f11686f63a61
3
+ size 346378372
preprocessor_config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": {
3
+ "height": 224,
4
+ "width": 224
5
+ },
6
+ "do_center_crop": true,
7
+ "do_convert_rgb": true,
8
+ "do_normalize": true,
9
+ "do_rescale": true,
10
+ "do_resize": true,
11
+ "image_mean": [
12
+ 0.485,
13
+ 0.456,
14
+ 0.406
15
+ ],
16
+ "image_processor_type": "BitImageProcessor",
17
+ "image_std": [
18
+ 0.229,
19
+ 0.224,
20
+ 0.225
21
+ ],
22
+ "resample": 3,
23
+ "rescale_factor": 0.00392156862745098,
24
+ "size": {
25
+ "shortest_edge": 256
26
+ }
27
+ }
runs/Mar17_11-24-51_8156a47f4473/events.out.tfevents.1742210722.8156a47f4473.351.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5f6926c5945840fef669662a12fd78d4018f9a178f66e9a64ecf7b8aa014399
3
+ size 62282
runs/Mar17_11-24-51_8156a47f4473/events.out.tfevents.1742213635.8156a47f4473.351.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07e8a4e177f769dd635bc073a3cc5da155be70f3923e7151afede8d409555cd7
3
+ size 457
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "total_flos": 4.0857422266368e+18,
4
+ "train_loss": 0.7867561736106873,
5
+ "train_runtime": 2885.7234,
6
+ "train_samples_per_second": 13.861,
7
+ "train_steps_per_second": 0.866
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,1892 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.88,
3
+ "best_model_checkpoint": "dinov2-base-finetuned-dia_eye/checkpoint-2500",
4
+ "epoch": 10.0,
5
+ "eval_steps": 500,
6
+ "global_step": 2500,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.04,
13
+ "grad_norm": 90.73234558105469,
14
+ "learning_rate": 8.000000000000001e-07,
15
+ "loss": 1.7164,
16
+ "step": 10
17
+ },
18
+ {
19
+ "epoch": 0.08,
20
+ "grad_norm": 120.8720932006836,
21
+ "learning_rate": 1.6000000000000001e-06,
22
+ "loss": 1.5765,
23
+ "step": 20
24
+ },
25
+ {
26
+ "epoch": 0.12,
27
+ "grad_norm": 83.37552642822266,
28
+ "learning_rate": 2.4000000000000003e-06,
29
+ "loss": 1.4828,
30
+ "step": 30
31
+ },
32
+ {
33
+ "epoch": 0.16,
34
+ "grad_norm": 72.08551025390625,
35
+ "learning_rate": 3.2000000000000003e-06,
36
+ "loss": 1.4564,
37
+ "step": 40
38
+ },
39
+ {
40
+ "epoch": 0.2,
41
+ "grad_norm": 95.60829162597656,
42
+ "learning_rate": 4.000000000000001e-06,
43
+ "loss": 1.3918,
44
+ "step": 50
45
+ },
46
+ {
47
+ "epoch": 0.24,
48
+ "grad_norm": 113.41104888916016,
49
+ "learning_rate": 4.800000000000001e-06,
50
+ "loss": 1.3633,
51
+ "step": 60
52
+ },
53
+ {
54
+ "epoch": 0.28,
55
+ "grad_norm": 113.49620819091797,
56
+ "learning_rate": 5.600000000000001e-06,
57
+ "loss": 1.4582,
58
+ "step": 70
59
+ },
60
+ {
61
+ "epoch": 0.32,
62
+ "grad_norm": 60.3448486328125,
63
+ "learning_rate": 6.4000000000000006e-06,
64
+ "loss": 1.3945,
65
+ "step": 80
66
+ },
67
+ {
68
+ "epoch": 0.36,
69
+ "grad_norm": 87.17510223388672,
70
+ "learning_rate": 7.2000000000000005e-06,
71
+ "loss": 1.2236,
72
+ "step": 90
73
+ },
74
+ {
75
+ "epoch": 0.4,
76
+ "grad_norm": 116.16291809082031,
77
+ "learning_rate": 8.000000000000001e-06,
78
+ "loss": 1.2932,
79
+ "step": 100
80
+ },
81
+ {
82
+ "epoch": 0.44,
83
+ "grad_norm": 91.94257354736328,
84
+ "learning_rate": 8.8e-06,
85
+ "loss": 1.2999,
86
+ "step": 110
87
+ },
88
+ {
89
+ "epoch": 0.48,
90
+ "grad_norm": 85.25334167480469,
91
+ "learning_rate": 9.600000000000001e-06,
92
+ "loss": 1.4004,
93
+ "step": 120
94
+ },
95
+ {
96
+ "epoch": 0.52,
97
+ "grad_norm": 82.13865661621094,
98
+ "learning_rate": 1.04e-05,
99
+ "loss": 1.5986,
100
+ "step": 130
101
+ },
102
+ {
103
+ "epoch": 0.56,
104
+ "grad_norm": 86.27275085449219,
105
+ "learning_rate": 1.1200000000000001e-05,
106
+ "loss": 1.3174,
107
+ "step": 140
108
+ },
109
+ {
110
+ "epoch": 0.6,
111
+ "grad_norm": 120.41253662109375,
112
+ "learning_rate": 1.2e-05,
113
+ "loss": 1.3869,
114
+ "step": 150
115
+ },
116
+ {
117
+ "epoch": 0.64,
118
+ "grad_norm": 61.761695861816406,
119
+ "learning_rate": 1.2800000000000001e-05,
120
+ "loss": 1.3498,
121
+ "step": 160
122
+ },
123
+ {
124
+ "epoch": 0.68,
125
+ "grad_norm": 53.90617752075195,
126
+ "learning_rate": 1.3600000000000002e-05,
127
+ "loss": 1.1202,
128
+ "step": 170
129
+ },
130
+ {
131
+ "epoch": 0.72,
132
+ "grad_norm": 65.96098327636719,
133
+ "learning_rate": 1.4400000000000001e-05,
134
+ "loss": 1.3588,
135
+ "step": 180
136
+ },
137
+ {
138
+ "epoch": 0.76,
139
+ "grad_norm": 60.957847595214844,
140
+ "learning_rate": 1.5200000000000002e-05,
141
+ "loss": 1.4875,
142
+ "step": 190
143
+ },
144
+ {
145
+ "epoch": 0.8,
146
+ "grad_norm": 31.72243309020996,
147
+ "learning_rate": 1.6000000000000003e-05,
148
+ "loss": 1.3045,
149
+ "step": 200
150
+ },
151
+ {
152
+ "epoch": 0.84,
153
+ "grad_norm": 81.10260772705078,
154
+ "learning_rate": 1.6800000000000002e-05,
155
+ "loss": 1.2088,
156
+ "step": 210
157
+ },
158
+ {
159
+ "epoch": 0.88,
160
+ "grad_norm": 51.385196685791016,
161
+ "learning_rate": 1.76e-05,
162
+ "loss": 1.352,
163
+ "step": 220
164
+ },
165
+ {
166
+ "epoch": 0.92,
167
+ "grad_norm": 43.19595718383789,
168
+ "learning_rate": 1.8400000000000003e-05,
169
+ "loss": 1.2643,
170
+ "step": 230
171
+ },
172
+ {
173
+ "epoch": 0.96,
174
+ "grad_norm": 69.54638671875,
175
+ "learning_rate": 1.9200000000000003e-05,
176
+ "loss": 1.3476,
177
+ "step": 240
178
+ },
179
+ {
180
+ "epoch": 1.0,
181
+ "grad_norm": 39.397499084472656,
182
+ "learning_rate": 2e-05,
183
+ "loss": 1.2812,
184
+ "step": 250
185
+ },
186
+ {
187
+ "epoch": 1.0,
188
+ "eval_accuracy": 0.494,
189
+ "eval_f1": 0.4460326515069461,
190
+ "eval_loss": 1.1658940315246582,
191
+ "eval_runtime": 12.2613,
192
+ "eval_samples_per_second": 40.779,
193
+ "eval_steps_per_second": 10.195,
194
+ "step": 250
195
+ },
196
+ {
197
+ "epoch": 1.04,
198
+ "grad_norm": 29.106040954589844,
199
+ "learning_rate": 1.9911111111111112e-05,
200
+ "loss": 1.1481,
201
+ "step": 260
202
+ },
203
+ {
204
+ "epoch": 1.08,
205
+ "grad_norm": 48.481876373291016,
206
+ "learning_rate": 1.9822222222222226e-05,
207
+ "loss": 1.2368,
208
+ "step": 270
209
+ },
210
+ {
211
+ "epoch": 1.12,
212
+ "grad_norm": 36.711673736572266,
213
+ "learning_rate": 1.9733333333333336e-05,
214
+ "loss": 1.325,
215
+ "step": 280
216
+ },
217
+ {
218
+ "epoch": 1.16,
219
+ "grad_norm": 70.87812805175781,
220
+ "learning_rate": 1.9644444444444447e-05,
221
+ "loss": 1.1004,
222
+ "step": 290
223
+ },
224
+ {
225
+ "epoch": 1.2,
226
+ "grad_norm": 45.34326171875,
227
+ "learning_rate": 1.9555555555555557e-05,
228
+ "loss": 1.2207,
229
+ "step": 300
230
+ },
231
+ {
232
+ "epoch": 1.24,
233
+ "grad_norm": 39.800941467285156,
234
+ "learning_rate": 1.9466666666666668e-05,
235
+ "loss": 1.1775,
236
+ "step": 310
237
+ },
238
+ {
239
+ "epoch": 1.28,
240
+ "grad_norm": 33.798614501953125,
241
+ "learning_rate": 1.9377777777777778e-05,
242
+ "loss": 1.097,
243
+ "step": 320
244
+ },
245
+ {
246
+ "epoch": 1.32,
247
+ "grad_norm": 95.97491455078125,
248
+ "learning_rate": 1.928888888888889e-05,
249
+ "loss": 1.1752,
250
+ "step": 330
251
+ },
252
+ {
253
+ "epoch": 1.3599999999999999,
254
+ "grad_norm": 49.4578971862793,
255
+ "learning_rate": 1.9200000000000003e-05,
256
+ "loss": 1.2785,
257
+ "step": 340
258
+ },
259
+ {
260
+ "epoch": 1.4,
261
+ "grad_norm": 48.13294219970703,
262
+ "learning_rate": 1.9111111111111113e-05,
263
+ "loss": 1.3751,
264
+ "step": 350
265
+ },
266
+ {
267
+ "epoch": 1.44,
268
+ "grad_norm": 27.46150779724121,
269
+ "learning_rate": 1.9022222222222223e-05,
270
+ "loss": 1.2267,
271
+ "step": 360
272
+ },
273
+ {
274
+ "epoch": 1.48,
275
+ "grad_norm": 83.89068603515625,
276
+ "learning_rate": 1.8933333333333334e-05,
277
+ "loss": 1.1817,
278
+ "step": 370
279
+ },
280
+ {
281
+ "epoch": 1.52,
282
+ "grad_norm": 36.93865203857422,
283
+ "learning_rate": 1.8844444444444444e-05,
284
+ "loss": 1.0889,
285
+ "step": 380
286
+ },
287
+ {
288
+ "epoch": 1.56,
289
+ "grad_norm": 36.16452407836914,
290
+ "learning_rate": 1.8755555555555558e-05,
291
+ "loss": 1.1369,
292
+ "step": 390
293
+ },
294
+ {
295
+ "epoch": 1.6,
296
+ "grad_norm": 30.880001068115234,
297
+ "learning_rate": 1.866666666666667e-05,
298
+ "loss": 1.1892,
299
+ "step": 400
300
+ },
301
+ {
302
+ "epoch": 1.6400000000000001,
303
+ "grad_norm": 35.53660202026367,
304
+ "learning_rate": 1.857777777777778e-05,
305
+ "loss": 1.0495,
306
+ "step": 410
307
+ },
308
+ {
309
+ "epoch": 1.6800000000000002,
310
+ "grad_norm": 125.27716827392578,
311
+ "learning_rate": 1.848888888888889e-05,
312
+ "loss": 1.0685,
313
+ "step": 420
314
+ },
315
+ {
316
+ "epoch": 1.72,
317
+ "grad_norm": 45.547950744628906,
318
+ "learning_rate": 1.8400000000000003e-05,
319
+ "loss": 1.1031,
320
+ "step": 430
321
+ },
322
+ {
323
+ "epoch": 1.76,
324
+ "grad_norm": 54.839542388916016,
325
+ "learning_rate": 1.8311111111111114e-05,
326
+ "loss": 1.1888,
327
+ "step": 440
328
+ },
329
+ {
330
+ "epoch": 1.8,
331
+ "grad_norm": 20.931964874267578,
332
+ "learning_rate": 1.8222222222222224e-05,
333
+ "loss": 1.2119,
334
+ "step": 450
335
+ },
336
+ {
337
+ "epoch": 1.8399999999999999,
338
+ "grad_norm": 28.963083267211914,
339
+ "learning_rate": 1.8133333333333335e-05,
340
+ "loss": 1.0865,
341
+ "step": 460
342
+ },
343
+ {
344
+ "epoch": 1.88,
345
+ "grad_norm": 59.61734390258789,
346
+ "learning_rate": 1.8044444444444445e-05,
347
+ "loss": 1.1195,
348
+ "step": 470
349
+ },
350
+ {
351
+ "epoch": 1.92,
352
+ "grad_norm": 43.75231170654297,
353
+ "learning_rate": 1.7955555555555556e-05,
354
+ "loss": 1.1171,
355
+ "step": 480
356
+ },
357
+ {
358
+ "epoch": 1.96,
359
+ "grad_norm": 27.184715270996094,
360
+ "learning_rate": 1.7866666666666666e-05,
361
+ "loss": 1.1224,
362
+ "step": 490
363
+ },
364
+ {
365
+ "epoch": 2.0,
366
+ "grad_norm": 38.968082427978516,
367
+ "learning_rate": 1.7777777777777777e-05,
368
+ "loss": 1.0885,
369
+ "step": 500
370
+ },
371
+ {
372
+ "epoch": 2.0,
373
+ "eval_accuracy": 0.46,
374
+ "eval_f1": 0.40981236448175834,
375
+ "eval_loss": 1.1569879055023193,
376
+ "eval_runtime": 12.149,
377
+ "eval_samples_per_second": 41.156,
378
+ "eval_steps_per_second": 10.289,
379
+ "step": 500
380
+ },
381
+ {
382
+ "epoch": 2.04,
383
+ "grad_norm": 50.790748596191406,
384
+ "learning_rate": 1.768888888888889e-05,
385
+ "loss": 1.1199,
386
+ "step": 510
387
+ },
388
+ {
389
+ "epoch": 2.08,
390
+ "grad_norm": 53.71720886230469,
391
+ "learning_rate": 1.76e-05,
392
+ "loss": 0.9691,
393
+ "step": 520
394
+ },
395
+ {
396
+ "epoch": 2.12,
397
+ "grad_norm": 62.33308792114258,
398
+ "learning_rate": 1.751111111111111e-05,
399
+ "loss": 1.0077,
400
+ "step": 530
401
+ },
402
+ {
403
+ "epoch": 2.16,
404
+ "grad_norm": 33.90275192260742,
405
+ "learning_rate": 1.7422222222222222e-05,
406
+ "loss": 1.0222,
407
+ "step": 540
408
+ },
409
+ {
410
+ "epoch": 2.2,
411
+ "grad_norm": 71.48577880859375,
412
+ "learning_rate": 1.7333333333333336e-05,
413
+ "loss": 1.1158,
414
+ "step": 550
415
+ },
416
+ {
417
+ "epoch": 2.24,
418
+ "grad_norm": 25.986026763916016,
419
+ "learning_rate": 1.7244444444444446e-05,
420
+ "loss": 0.9958,
421
+ "step": 560
422
+ },
423
+ {
424
+ "epoch": 2.2800000000000002,
425
+ "grad_norm": 47.359107971191406,
426
+ "learning_rate": 1.7155555555555557e-05,
427
+ "loss": 1.0862,
428
+ "step": 570
429
+ },
430
+ {
431
+ "epoch": 2.32,
432
+ "grad_norm": 40.83256530761719,
433
+ "learning_rate": 1.706666666666667e-05,
434
+ "loss": 1.0912,
435
+ "step": 580
436
+ },
437
+ {
438
+ "epoch": 2.36,
439
+ "grad_norm": 63.868675231933594,
440
+ "learning_rate": 1.697777777777778e-05,
441
+ "loss": 1.1017,
442
+ "step": 590
443
+ },
444
+ {
445
+ "epoch": 2.4,
446
+ "grad_norm": 45.01089859008789,
447
+ "learning_rate": 1.688888888888889e-05,
448
+ "loss": 1.0942,
449
+ "step": 600
450
+ },
451
+ {
452
+ "epoch": 2.44,
453
+ "grad_norm": 46.86573791503906,
454
+ "learning_rate": 1.6800000000000002e-05,
455
+ "loss": 1.0188,
456
+ "step": 610
457
+ },
458
+ {
459
+ "epoch": 2.48,
460
+ "grad_norm": 59.64995574951172,
461
+ "learning_rate": 1.6711111111111112e-05,
462
+ "loss": 1.035,
463
+ "step": 620
464
+ },
465
+ {
466
+ "epoch": 2.52,
467
+ "grad_norm": 33.47834014892578,
468
+ "learning_rate": 1.6622222222222223e-05,
469
+ "loss": 0.9173,
470
+ "step": 630
471
+ },
472
+ {
473
+ "epoch": 2.56,
474
+ "grad_norm": 59.513633728027344,
475
+ "learning_rate": 1.6533333333333333e-05,
476
+ "loss": 1.0721,
477
+ "step": 640
478
+ },
479
+ {
480
+ "epoch": 2.6,
481
+ "grad_norm": 33.897315979003906,
482
+ "learning_rate": 1.6444444444444444e-05,
483
+ "loss": 1.0167,
484
+ "step": 650
485
+ },
486
+ {
487
+ "epoch": 2.64,
488
+ "grad_norm": 55.43059158325195,
489
+ "learning_rate": 1.6355555555555557e-05,
490
+ "loss": 1.0183,
491
+ "step": 660
492
+ },
493
+ {
494
+ "epoch": 2.68,
495
+ "grad_norm": 36.83810043334961,
496
+ "learning_rate": 1.6266666666666668e-05,
497
+ "loss": 1.0661,
498
+ "step": 670
499
+ },
500
+ {
501
+ "epoch": 2.7199999999999998,
502
+ "grad_norm": 27.871374130249023,
503
+ "learning_rate": 1.617777777777778e-05,
504
+ "loss": 0.986,
505
+ "step": 680
506
+ },
507
+ {
508
+ "epoch": 2.76,
509
+ "grad_norm": 26.579404830932617,
510
+ "learning_rate": 1.608888888888889e-05,
511
+ "loss": 1.0217,
512
+ "step": 690
513
+ },
514
+ {
515
+ "epoch": 2.8,
516
+ "grad_norm": 23.805009841918945,
517
+ "learning_rate": 1.6000000000000003e-05,
518
+ "loss": 1.015,
519
+ "step": 700
520
+ },
521
+ {
522
+ "epoch": 2.84,
523
+ "grad_norm": 35.254974365234375,
524
+ "learning_rate": 1.5911111111111113e-05,
525
+ "loss": 1.0762,
526
+ "step": 710
527
+ },
528
+ {
529
+ "epoch": 2.88,
530
+ "grad_norm": 20.483610153198242,
531
+ "learning_rate": 1.5822222222222224e-05,
532
+ "loss": 1.0583,
533
+ "step": 720
534
+ },
535
+ {
536
+ "epoch": 2.92,
537
+ "grad_norm": 26.34144401550293,
538
+ "learning_rate": 1.5733333333333334e-05,
539
+ "loss": 1.1008,
540
+ "step": 730
541
+ },
542
+ {
543
+ "epoch": 2.96,
544
+ "grad_norm": 54.002681732177734,
545
+ "learning_rate": 1.5644444444444448e-05,
546
+ "loss": 0.9295,
547
+ "step": 740
548
+ },
549
+ {
550
+ "epoch": 3.0,
551
+ "grad_norm": 36.5385627746582,
552
+ "learning_rate": 1.555555555555556e-05,
553
+ "loss": 1.0673,
554
+ "step": 750
555
+ },
556
+ {
557
+ "epoch": 3.0,
558
+ "eval_accuracy": 0.51,
559
+ "eval_f1": 0.4777248337252371,
560
+ "eval_loss": 1.1951327323913574,
561
+ "eval_runtime": 12.1837,
562
+ "eval_samples_per_second": 41.038,
563
+ "eval_steps_per_second": 10.26,
564
+ "step": 750
565
+ },
566
+ {
567
+ "epoch": 3.04,
568
+ "grad_norm": 58.73692321777344,
569
+ "learning_rate": 1.546666666666667e-05,
570
+ "loss": 0.9637,
571
+ "step": 760
572
+ },
573
+ {
574
+ "epoch": 3.08,
575
+ "grad_norm": 18.000825881958008,
576
+ "learning_rate": 1.537777777777778e-05,
577
+ "loss": 0.9251,
578
+ "step": 770
579
+ },
580
+ {
581
+ "epoch": 3.12,
582
+ "grad_norm": 93.39592742919922,
583
+ "learning_rate": 1.528888888888889e-05,
584
+ "loss": 1.0542,
585
+ "step": 780
586
+ },
587
+ {
588
+ "epoch": 3.16,
589
+ "grad_norm": 41.7457389831543,
590
+ "learning_rate": 1.5200000000000002e-05,
591
+ "loss": 0.9525,
592
+ "step": 790
593
+ },
594
+ {
595
+ "epoch": 3.2,
596
+ "grad_norm": 25.752269744873047,
597
+ "learning_rate": 1.5111111111111112e-05,
598
+ "loss": 0.8786,
599
+ "step": 800
600
+ },
601
+ {
602
+ "epoch": 3.24,
603
+ "grad_norm": 58.843475341796875,
604
+ "learning_rate": 1.5022222222222223e-05,
605
+ "loss": 1.015,
606
+ "step": 810
607
+ },
608
+ {
609
+ "epoch": 3.2800000000000002,
610
+ "grad_norm": 21.79439353942871,
611
+ "learning_rate": 1.4933333333333335e-05,
612
+ "loss": 0.9858,
613
+ "step": 820
614
+ },
615
+ {
616
+ "epoch": 3.32,
617
+ "grad_norm": 29.008590698242188,
618
+ "learning_rate": 1.4844444444444445e-05,
619
+ "loss": 0.9522,
620
+ "step": 830
621
+ },
622
+ {
623
+ "epoch": 3.36,
624
+ "grad_norm": 25.020465850830078,
625
+ "learning_rate": 1.4755555555555556e-05,
626
+ "loss": 1.0351,
627
+ "step": 840
628
+ },
629
+ {
630
+ "epoch": 3.4,
631
+ "grad_norm": 30.38283920288086,
632
+ "learning_rate": 1.4666666666666666e-05,
633
+ "loss": 1.0174,
634
+ "step": 850
635
+ },
636
+ {
637
+ "epoch": 3.44,
638
+ "grad_norm": 21.29317855834961,
639
+ "learning_rate": 1.457777777777778e-05,
640
+ "loss": 1.0674,
641
+ "step": 860
642
+ },
643
+ {
644
+ "epoch": 3.48,
645
+ "grad_norm": 50.27965545654297,
646
+ "learning_rate": 1.448888888888889e-05,
647
+ "loss": 1.0266,
648
+ "step": 870
649
+ },
650
+ {
651
+ "epoch": 3.52,
652
+ "grad_norm": 66.33875274658203,
653
+ "learning_rate": 1.4400000000000001e-05,
654
+ "loss": 1.001,
655
+ "step": 880
656
+ },
657
+ {
658
+ "epoch": 3.56,
659
+ "grad_norm": 34.711116790771484,
660
+ "learning_rate": 1.4311111111111111e-05,
661
+ "loss": 0.9511,
662
+ "step": 890
663
+ },
664
+ {
665
+ "epoch": 3.6,
666
+ "grad_norm": 50.2099494934082,
667
+ "learning_rate": 1.4222222222222224e-05,
668
+ "loss": 0.9778,
669
+ "step": 900
670
+ },
671
+ {
672
+ "epoch": 3.64,
673
+ "grad_norm": 20.47673988342285,
674
+ "learning_rate": 1.4133333333333334e-05,
675
+ "loss": 0.8044,
676
+ "step": 910
677
+ },
678
+ {
679
+ "epoch": 3.68,
680
+ "grad_norm": 27.520601272583008,
681
+ "learning_rate": 1.4044444444444445e-05,
682
+ "loss": 0.8282,
683
+ "step": 920
684
+ },
685
+ {
686
+ "epoch": 3.7199999999999998,
687
+ "grad_norm": 30.060382843017578,
688
+ "learning_rate": 1.3955555555555558e-05,
689
+ "loss": 0.6744,
690
+ "step": 930
691
+ },
692
+ {
693
+ "epoch": 3.76,
694
+ "grad_norm": 51.914154052734375,
695
+ "learning_rate": 1.3866666666666669e-05,
696
+ "loss": 0.8913,
697
+ "step": 940
698
+ },
699
+ {
700
+ "epoch": 3.8,
701
+ "grad_norm": 32.7239990234375,
702
+ "learning_rate": 1.377777777777778e-05,
703
+ "loss": 0.9128,
704
+ "step": 950
705
+ },
706
+ {
707
+ "epoch": 3.84,
708
+ "grad_norm": 40.4760856628418,
709
+ "learning_rate": 1.368888888888889e-05,
710
+ "loss": 0.8142,
711
+ "step": 960
712
+ },
713
+ {
714
+ "epoch": 3.88,
715
+ "grad_norm": 49.8121223449707,
716
+ "learning_rate": 1.3600000000000002e-05,
717
+ "loss": 0.8092,
718
+ "step": 970
719
+ },
720
+ {
721
+ "epoch": 3.92,
722
+ "grad_norm": 35.05157470703125,
723
+ "learning_rate": 1.3511111111111112e-05,
724
+ "loss": 0.7544,
725
+ "step": 980
726
+ },
727
+ {
728
+ "epoch": 3.96,
729
+ "grad_norm": 46.630287170410156,
730
+ "learning_rate": 1.3422222222222223e-05,
731
+ "loss": 1.1021,
732
+ "step": 990
733
+ },
734
+ {
735
+ "epoch": 4.0,
736
+ "grad_norm": 20.62359046936035,
737
+ "learning_rate": 1.3333333333333333e-05,
738
+ "loss": 0.9863,
739
+ "step": 1000
740
+ },
741
+ {
742
+ "epoch": 4.0,
743
+ "eval_accuracy": 0.622,
744
+ "eval_f1": 0.6315847689471261,
745
+ "eval_loss": 0.8479958176612854,
746
+ "eval_runtime": 12.0373,
747
+ "eval_samples_per_second": 41.537,
748
+ "eval_steps_per_second": 10.384,
749
+ "step": 1000
750
+ },
751
+ {
752
+ "epoch": 4.04,
753
+ "grad_norm": 33.65226364135742,
754
+ "learning_rate": 1.3244444444444447e-05,
755
+ "loss": 0.9395,
756
+ "step": 1010
757
+ },
758
+ {
759
+ "epoch": 4.08,
760
+ "grad_norm": 21.537878036499023,
761
+ "learning_rate": 1.3155555555555558e-05,
762
+ "loss": 0.7315,
763
+ "step": 1020
764
+ },
765
+ {
766
+ "epoch": 4.12,
767
+ "grad_norm": 72.42523956298828,
768
+ "learning_rate": 1.3066666666666668e-05,
769
+ "loss": 0.9017,
770
+ "step": 1030
771
+ },
772
+ {
773
+ "epoch": 4.16,
774
+ "grad_norm": 41.7869873046875,
775
+ "learning_rate": 1.2977777777777779e-05,
776
+ "loss": 0.9716,
777
+ "step": 1040
778
+ },
779
+ {
780
+ "epoch": 4.2,
781
+ "grad_norm": 52.05732727050781,
782
+ "learning_rate": 1.288888888888889e-05,
783
+ "loss": 0.8553,
784
+ "step": 1050
785
+ },
786
+ {
787
+ "epoch": 4.24,
788
+ "grad_norm": 45.54288864135742,
789
+ "learning_rate": 1.2800000000000001e-05,
790
+ "loss": 0.8538,
791
+ "step": 1060
792
+ },
793
+ {
794
+ "epoch": 4.28,
795
+ "grad_norm": 37.879661560058594,
796
+ "learning_rate": 1.2711111111111112e-05,
797
+ "loss": 0.8716,
798
+ "step": 1070
799
+ },
800
+ {
801
+ "epoch": 4.32,
802
+ "grad_norm": 42.977272033691406,
803
+ "learning_rate": 1.2622222222222222e-05,
804
+ "loss": 0.8166,
805
+ "step": 1080
806
+ },
807
+ {
808
+ "epoch": 4.36,
809
+ "grad_norm": 36.236854553222656,
810
+ "learning_rate": 1.2533333333333336e-05,
811
+ "loss": 0.7672,
812
+ "step": 1090
813
+ },
814
+ {
815
+ "epoch": 4.4,
816
+ "grad_norm": 46.707763671875,
817
+ "learning_rate": 1.2444444444444446e-05,
818
+ "loss": 0.7979,
819
+ "step": 1100
820
+ },
821
+ {
822
+ "epoch": 4.44,
823
+ "grad_norm": 28.657617568969727,
824
+ "learning_rate": 1.2355555555555557e-05,
825
+ "loss": 0.7771,
826
+ "step": 1110
827
+ },
828
+ {
829
+ "epoch": 4.48,
830
+ "grad_norm": 54.733009338378906,
831
+ "learning_rate": 1.2266666666666667e-05,
832
+ "loss": 0.8463,
833
+ "step": 1120
834
+ },
835
+ {
836
+ "epoch": 4.52,
837
+ "grad_norm": 49.474613189697266,
838
+ "learning_rate": 1.217777777777778e-05,
839
+ "loss": 0.759,
840
+ "step": 1130
841
+ },
842
+ {
843
+ "epoch": 4.5600000000000005,
844
+ "grad_norm": 31.721797943115234,
845
+ "learning_rate": 1.208888888888889e-05,
846
+ "loss": 0.8087,
847
+ "step": 1140
848
+ },
849
+ {
850
+ "epoch": 4.6,
851
+ "grad_norm": 32.5080680847168,
852
+ "learning_rate": 1.2e-05,
853
+ "loss": 0.8189,
854
+ "step": 1150
855
+ },
856
+ {
857
+ "epoch": 4.64,
858
+ "grad_norm": 29.60678482055664,
859
+ "learning_rate": 1.191111111111111e-05,
860
+ "loss": 0.7827,
861
+ "step": 1160
862
+ },
863
+ {
864
+ "epoch": 4.68,
865
+ "grad_norm": 27.952041625976562,
866
+ "learning_rate": 1.1822222222222225e-05,
867
+ "loss": 0.7894,
868
+ "step": 1170
869
+ },
870
+ {
871
+ "epoch": 4.72,
872
+ "grad_norm": 39.61127471923828,
873
+ "learning_rate": 1.1733333333333335e-05,
874
+ "loss": 0.8418,
875
+ "step": 1180
876
+ },
877
+ {
878
+ "epoch": 4.76,
879
+ "grad_norm": 29.20393180847168,
880
+ "learning_rate": 1.1644444444444446e-05,
881
+ "loss": 0.6856,
882
+ "step": 1190
883
+ },
884
+ {
885
+ "epoch": 4.8,
886
+ "grad_norm": 40.39738082885742,
887
+ "learning_rate": 1.1555555555555556e-05,
888
+ "loss": 0.7397,
889
+ "step": 1200
890
+ },
891
+ {
892
+ "epoch": 4.84,
893
+ "grad_norm": 30.21686363220215,
894
+ "learning_rate": 1.1466666666666668e-05,
895
+ "loss": 0.8024,
896
+ "step": 1210
897
+ },
898
+ {
899
+ "epoch": 4.88,
900
+ "grad_norm": 41.96994400024414,
901
+ "learning_rate": 1.1377777777777779e-05,
902
+ "loss": 0.8058,
903
+ "step": 1220
904
+ },
905
+ {
906
+ "epoch": 4.92,
907
+ "grad_norm": 24.6497802734375,
908
+ "learning_rate": 1.1288888888888889e-05,
909
+ "loss": 0.7722,
910
+ "step": 1230
911
+ },
912
+ {
913
+ "epoch": 4.96,
914
+ "grad_norm": 61.19656753540039,
915
+ "learning_rate": 1.1200000000000001e-05,
916
+ "loss": 0.8071,
917
+ "step": 1240
918
+ },
919
+ {
920
+ "epoch": 5.0,
921
+ "grad_norm": 28.335107803344727,
922
+ "learning_rate": 1.1111111111111113e-05,
923
+ "loss": 0.8241,
924
+ "step": 1250
925
+ },
926
+ {
927
+ "epoch": 5.0,
928
+ "eval_accuracy": 0.706,
929
+ "eval_f1": 0.7049416744459689,
930
+ "eval_loss": 0.7028206586837769,
931
+ "eval_runtime": 12.1037,
932
+ "eval_samples_per_second": 41.31,
933
+ "eval_steps_per_second": 10.327,
934
+ "step": 1250
935
+ },
936
+ {
937
+ "epoch": 5.04,
938
+ "grad_norm": 35.011070251464844,
939
+ "learning_rate": 1.1022222222222224e-05,
940
+ "loss": 0.6987,
941
+ "step": 1260
942
+ },
943
+ {
944
+ "epoch": 5.08,
945
+ "grad_norm": 69.44827270507812,
946
+ "learning_rate": 1.0933333333333334e-05,
947
+ "loss": 0.7744,
948
+ "step": 1270
949
+ },
950
+ {
951
+ "epoch": 5.12,
952
+ "grad_norm": 56.46673583984375,
953
+ "learning_rate": 1.0844444444444446e-05,
954
+ "loss": 0.7145,
955
+ "step": 1280
956
+ },
957
+ {
958
+ "epoch": 5.16,
959
+ "grad_norm": 31.053302764892578,
960
+ "learning_rate": 1.0755555555555557e-05,
961
+ "loss": 0.8252,
962
+ "step": 1290
963
+ },
964
+ {
965
+ "epoch": 5.2,
966
+ "grad_norm": 43.00981140136719,
967
+ "learning_rate": 1.0666666666666667e-05,
968
+ "loss": 0.6484,
969
+ "step": 1300
970
+ },
971
+ {
972
+ "epoch": 5.24,
973
+ "grad_norm": 19.58731460571289,
974
+ "learning_rate": 1.0577777777777778e-05,
975
+ "loss": 0.671,
976
+ "step": 1310
977
+ },
978
+ {
979
+ "epoch": 5.28,
980
+ "grad_norm": 61.77705001831055,
981
+ "learning_rate": 1.048888888888889e-05,
982
+ "loss": 0.6228,
983
+ "step": 1320
984
+ },
985
+ {
986
+ "epoch": 5.32,
987
+ "grad_norm": 39.46787643432617,
988
+ "learning_rate": 1.04e-05,
989
+ "loss": 0.604,
990
+ "step": 1330
991
+ },
992
+ {
993
+ "epoch": 5.36,
994
+ "grad_norm": 37.76319885253906,
995
+ "learning_rate": 1.0311111111111113e-05,
996
+ "loss": 0.7439,
997
+ "step": 1340
998
+ },
999
+ {
1000
+ "epoch": 5.4,
1001
+ "grad_norm": 86.55498504638672,
1002
+ "learning_rate": 1.0222222222222223e-05,
1003
+ "loss": 0.6356,
1004
+ "step": 1350
1005
+ },
1006
+ {
1007
+ "epoch": 5.44,
1008
+ "grad_norm": 50.998966217041016,
1009
+ "learning_rate": 1.0133333333333335e-05,
1010
+ "loss": 0.8106,
1011
+ "step": 1360
1012
+ },
1013
+ {
1014
+ "epoch": 5.48,
1015
+ "grad_norm": 41.33028793334961,
1016
+ "learning_rate": 1.0044444444444446e-05,
1017
+ "loss": 0.6859,
1018
+ "step": 1370
1019
+ },
1020
+ {
1021
+ "epoch": 5.52,
1022
+ "grad_norm": 27.738656997680664,
1023
+ "learning_rate": 9.955555555555556e-06,
1024
+ "loss": 0.6726,
1025
+ "step": 1380
1026
+ },
1027
+ {
1028
+ "epoch": 5.5600000000000005,
1029
+ "grad_norm": 65.7751235961914,
1030
+ "learning_rate": 9.866666666666668e-06,
1031
+ "loss": 0.7502,
1032
+ "step": 1390
1033
+ },
1034
+ {
1035
+ "epoch": 5.6,
1036
+ "grad_norm": 35.19927978515625,
1037
+ "learning_rate": 9.777777777777779e-06,
1038
+ "loss": 0.8105,
1039
+ "step": 1400
1040
+ },
1041
+ {
1042
+ "epoch": 5.64,
1043
+ "grad_norm": 49.49024200439453,
1044
+ "learning_rate": 9.688888888888889e-06,
1045
+ "loss": 0.6238,
1046
+ "step": 1410
1047
+ },
1048
+ {
1049
+ "epoch": 5.68,
1050
+ "grad_norm": 69.92552947998047,
1051
+ "learning_rate": 9.600000000000001e-06,
1052
+ "loss": 0.6782,
1053
+ "step": 1420
1054
+ },
1055
+ {
1056
+ "epoch": 5.72,
1057
+ "grad_norm": 35.6678352355957,
1058
+ "learning_rate": 9.511111111111112e-06,
1059
+ "loss": 0.8168,
1060
+ "step": 1430
1061
+ },
1062
+ {
1063
+ "epoch": 5.76,
1064
+ "grad_norm": 26.826770782470703,
1065
+ "learning_rate": 9.422222222222222e-06,
1066
+ "loss": 0.6466,
1067
+ "step": 1440
1068
+ },
1069
+ {
1070
+ "epoch": 5.8,
1071
+ "grad_norm": 44.43244934082031,
1072
+ "learning_rate": 9.333333333333334e-06,
1073
+ "loss": 0.7103,
1074
+ "step": 1450
1075
+ },
1076
+ {
1077
+ "epoch": 5.84,
1078
+ "grad_norm": 29.49979591369629,
1079
+ "learning_rate": 9.244444444444445e-06,
1080
+ "loss": 0.7882,
1081
+ "step": 1460
1082
+ },
1083
+ {
1084
+ "epoch": 5.88,
1085
+ "grad_norm": 68.5856704711914,
1086
+ "learning_rate": 9.155555555555557e-06,
1087
+ "loss": 0.7308,
1088
+ "step": 1470
1089
+ },
1090
+ {
1091
+ "epoch": 5.92,
1092
+ "grad_norm": 59.66977310180664,
1093
+ "learning_rate": 9.066666666666667e-06,
1094
+ "loss": 0.6081,
1095
+ "step": 1480
1096
+ },
1097
+ {
1098
+ "epoch": 5.96,
1099
+ "grad_norm": 44.2071418762207,
1100
+ "learning_rate": 8.977777777777778e-06,
1101
+ "loss": 0.6688,
1102
+ "step": 1490
1103
+ },
1104
+ {
1105
+ "epoch": 6.0,
1106
+ "grad_norm": 35.35416030883789,
1107
+ "learning_rate": 8.888888888888888e-06,
1108
+ "loss": 0.7208,
1109
+ "step": 1500
1110
+ },
1111
+ {
1112
+ "epoch": 6.0,
1113
+ "eval_accuracy": 0.712,
1114
+ "eval_f1": 0.6909848104243247,
1115
+ "eval_loss": 0.7059077024459839,
1116
+ "eval_runtime": 12.0866,
1117
+ "eval_samples_per_second": 41.368,
1118
+ "eval_steps_per_second": 10.342,
1119
+ "step": 1500
1120
+ },
1121
+ {
1122
+ "epoch": 6.04,
1123
+ "grad_norm": 28.48759651184082,
1124
+ "learning_rate": 8.8e-06,
1125
+ "loss": 0.7302,
1126
+ "step": 1510
1127
+ },
1128
+ {
1129
+ "epoch": 6.08,
1130
+ "grad_norm": 24.124494552612305,
1131
+ "learning_rate": 8.711111111111111e-06,
1132
+ "loss": 0.5639,
1133
+ "step": 1520
1134
+ },
1135
+ {
1136
+ "epoch": 6.12,
1137
+ "grad_norm": 46.50981521606445,
1138
+ "learning_rate": 8.622222222222223e-06,
1139
+ "loss": 0.7587,
1140
+ "step": 1530
1141
+ },
1142
+ {
1143
+ "epoch": 6.16,
1144
+ "grad_norm": 31.084308624267578,
1145
+ "learning_rate": 8.533333333333335e-06,
1146
+ "loss": 0.662,
1147
+ "step": 1540
1148
+ },
1149
+ {
1150
+ "epoch": 6.2,
1151
+ "grad_norm": 45.10621643066406,
1152
+ "learning_rate": 8.444444444444446e-06,
1153
+ "loss": 0.7906,
1154
+ "step": 1550
1155
+ },
1156
+ {
1157
+ "epoch": 6.24,
1158
+ "grad_norm": 34.53160095214844,
1159
+ "learning_rate": 8.355555555555556e-06,
1160
+ "loss": 0.6335,
1161
+ "step": 1560
1162
+ },
1163
+ {
1164
+ "epoch": 6.28,
1165
+ "grad_norm": 50.21031951904297,
1166
+ "learning_rate": 8.266666666666667e-06,
1167
+ "loss": 0.6376,
1168
+ "step": 1570
1169
+ },
1170
+ {
1171
+ "epoch": 6.32,
1172
+ "grad_norm": 40.00431823730469,
1173
+ "learning_rate": 8.177777777777779e-06,
1174
+ "loss": 0.5273,
1175
+ "step": 1580
1176
+ },
1177
+ {
1178
+ "epoch": 6.36,
1179
+ "grad_norm": 44.98348617553711,
1180
+ "learning_rate": 8.08888888888889e-06,
1181
+ "loss": 0.6106,
1182
+ "step": 1590
1183
+ },
1184
+ {
1185
+ "epoch": 6.4,
1186
+ "grad_norm": 77.24087524414062,
1187
+ "learning_rate": 8.000000000000001e-06,
1188
+ "loss": 0.5021,
1189
+ "step": 1600
1190
+ },
1191
+ {
1192
+ "epoch": 6.44,
1193
+ "grad_norm": 43.97039031982422,
1194
+ "learning_rate": 7.911111111111112e-06,
1195
+ "loss": 0.721,
1196
+ "step": 1610
1197
+ },
1198
+ {
1199
+ "epoch": 6.48,
1200
+ "grad_norm": 63.44462203979492,
1201
+ "learning_rate": 7.822222222222224e-06,
1202
+ "loss": 0.7287,
1203
+ "step": 1620
1204
+ },
1205
+ {
1206
+ "epoch": 6.52,
1207
+ "grad_norm": 34.81692123413086,
1208
+ "learning_rate": 7.733333333333334e-06,
1209
+ "loss": 0.6739,
1210
+ "step": 1630
1211
+ },
1212
+ {
1213
+ "epoch": 6.5600000000000005,
1214
+ "grad_norm": 60.13438034057617,
1215
+ "learning_rate": 7.644444444444445e-06,
1216
+ "loss": 0.5785,
1217
+ "step": 1640
1218
+ },
1219
+ {
1220
+ "epoch": 6.6,
1221
+ "grad_norm": 55.27112579345703,
1222
+ "learning_rate": 7.555555555555556e-06,
1223
+ "loss": 0.5858,
1224
+ "step": 1650
1225
+ },
1226
+ {
1227
+ "epoch": 6.64,
1228
+ "grad_norm": 35.41032028198242,
1229
+ "learning_rate": 7.4666666666666675e-06,
1230
+ "loss": 0.5187,
1231
+ "step": 1660
1232
+ },
1233
+ {
1234
+ "epoch": 6.68,
1235
+ "grad_norm": 43.339637756347656,
1236
+ "learning_rate": 7.377777777777778e-06,
1237
+ "loss": 0.6068,
1238
+ "step": 1670
1239
+ },
1240
+ {
1241
+ "epoch": 6.72,
1242
+ "grad_norm": 47.03825759887695,
1243
+ "learning_rate": 7.28888888888889e-06,
1244
+ "loss": 0.627,
1245
+ "step": 1680
1246
+ },
1247
+ {
1248
+ "epoch": 6.76,
1249
+ "grad_norm": 65.18019104003906,
1250
+ "learning_rate": 7.2000000000000005e-06,
1251
+ "loss": 0.4992,
1252
+ "step": 1690
1253
+ },
1254
+ {
1255
+ "epoch": 6.8,
1256
+ "grad_norm": 31.7502384185791,
1257
+ "learning_rate": 7.111111111111112e-06,
1258
+ "loss": 0.4831,
1259
+ "step": 1700
1260
+ },
1261
+ {
1262
+ "epoch": 6.84,
1263
+ "grad_norm": 58.455284118652344,
1264
+ "learning_rate": 7.022222222222222e-06,
1265
+ "loss": 0.6985,
1266
+ "step": 1710
1267
+ },
1268
+ {
1269
+ "epoch": 6.88,
1270
+ "grad_norm": 39.89744186401367,
1271
+ "learning_rate": 6.9333333333333344e-06,
1272
+ "loss": 0.6184,
1273
+ "step": 1720
1274
+ },
1275
+ {
1276
+ "epoch": 6.92,
1277
+ "grad_norm": 47.30400466918945,
1278
+ "learning_rate": 6.844444444444445e-06,
1279
+ "loss": 0.544,
1280
+ "step": 1730
1281
+ },
1282
+ {
1283
+ "epoch": 6.96,
1284
+ "grad_norm": 35.126983642578125,
1285
+ "learning_rate": 6.755555555555556e-06,
1286
+ "loss": 0.6523,
1287
+ "step": 1740
1288
+ },
1289
+ {
1290
+ "epoch": 7.0,
1291
+ "grad_norm": 34.56584930419922,
1292
+ "learning_rate": 6.666666666666667e-06,
1293
+ "loss": 0.5563,
1294
+ "step": 1750
1295
+ },
1296
+ {
1297
+ "epoch": 7.0,
1298
+ "eval_accuracy": 0.782,
1299
+ "eval_f1": 0.7813244154636514,
1300
+ "eval_loss": 0.5727885365486145,
1301
+ "eval_runtime": 12.134,
1302
+ "eval_samples_per_second": 41.206,
1303
+ "eval_steps_per_second": 10.302,
1304
+ "step": 1750
1305
+ },
1306
+ {
1307
+ "epoch": 7.04,
1308
+ "grad_norm": 41.27728271484375,
1309
+ "learning_rate": 6.577777777777779e-06,
1310
+ "loss": 0.5345,
1311
+ "step": 1760
1312
+ },
1313
+ {
1314
+ "epoch": 7.08,
1315
+ "grad_norm": 45.002113342285156,
1316
+ "learning_rate": 6.488888888888889e-06,
1317
+ "loss": 0.4454,
1318
+ "step": 1770
1319
+ },
1320
+ {
1321
+ "epoch": 7.12,
1322
+ "grad_norm": 54.88307571411133,
1323
+ "learning_rate": 6.4000000000000006e-06,
1324
+ "loss": 0.4505,
1325
+ "step": 1780
1326
+ },
1327
+ {
1328
+ "epoch": 7.16,
1329
+ "grad_norm": 61.16643524169922,
1330
+ "learning_rate": 6.311111111111111e-06,
1331
+ "loss": 0.4595,
1332
+ "step": 1790
1333
+ },
1334
+ {
1335
+ "epoch": 7.2,
1336
+ "grad_norm": 40.985477447509766,
1337
+ "learning_rate": 6.222222222222223e-06,
1338
+ "loss": 0.394,
1339
+ "step": 1800
1340
+ },
1341
+ {
1342
+ "epoch": 7.24,
1343
+ "grad_norm": 47.477718353271484,
1344
+ "learning_rate": 6.133333333333334e-06,
1345
+ "loss": 0.5319,
1346
+ "step": 1810
1347
+ },
1348
+ {
1349
+ "epoch": 7.28,
1350
+ "grad_norm": 36.49656677246094,
1351
+ "learning_rate": 6.044444444444445e-06,
1352
+ "loss": 0.4503,
1353
+ "step": 1820
1354
+ },
1355
+ {
1356
+ "epoch": 7.32,
1357
+ "grad_norm": 58.11835479736328,
1358
+ "learning_rate": 5.955555555555555e-06,
1359
+ "loss": 0.4832,
1360
+ "step": 1830
1361
+ },
1362
+ {
1363
+ "epoch": 7.36,
1364
+ "grad_norm": 12.562129020690918,
1365
+ "learning_rate": 5.8666666666666675e-06,
1366
+ "loss": 0.5398,
1367
+ "step": 1840
1368
+ },
1369
+ {
1370
+ "epoch": 7.4,
1371
+ "grad_norm": 55.433841705322266,
1372
+ "learning_rate": 5.777777777777778e-06,
1373
+ "loss": 0.4846,
1374
+ "step": 1850
1375
+ },
1376
+ {
1377
+ "epoch": 7.44,
1378
+ "grad_norm": 45.45614242553711,
1379
+ "learning_rate": 5.688888888888889e-06,
1380
+ "loss": 0.5762,
1381
+ "step": 1860
1382
+ },
1383
+ {
1384
+ "epoch": 7.48,
1385
+ "grad_norm": 56.408843994140625,
1386
+ "learning_rate": 5.600000000000001e-06,
1387
+ "loss": 0.5523,
1388
+ "step": 1870
1389
+ },
1390
+ {
1391
+ "epoch": 7.52,
1392
+ "grad_norm": 51.10346984863281,
1393
+ "learning_rate": 5.511111111111112e-06,
1394
+ "loss": 0.5049,
1395
+ "step": 1880
1396
+ },
1397
+ {
1398
+ "epoch": 7.5600000000000005,
1399
+ "grad_norm": 66.00965881347656,
1400
+ "learning_rate": 5.422222222222223e-06,
1401
+ "loss": 0.4715,
1402
+ "step": 1890
1403
+ },
1404
+ {
1405
+ "epoch": 7.6,
1406
+ "grad_norm": 76.69068908691406,
1407
+ "learning_rate": 5.333333333333334e-06,
1408
+ "loss": 0.5237,
1409
+ "step": 1900
1410
+ },
1411
+ {
1412
+ "epoch": 7.64,
1413
+ "grad_norm": 57.46429443359375,
1414
+ "learning_rate": 5.244444444444445e-06,
1415
+ "loss": 0.4419,
1416
+ "step": 1910
1417
+ },
1418
+ {
1419
+ "epoch": 7.68,
1420
+ "grad_norm": 47.936641693115234,
1421
+ "learning_rate": 5.155555555555556e-06,
1422
+ "loss": 0.6135,
1423
+ "step": 1920
1424
+ },
1425
+ {
1426
+ "epoch": 7.72,
1427
+ "grad_norm": 56.27345275878906,
1428
+ "learning_rate": 5.0666666666666676e-06,
1429
+ "loss": 0.4293,
1430
+ "step": 1930
1431
+ },
1432
+ {
1433
+ "epoch": 7.76,
1434
+ "grad_norm": 66.12334442138672,
1435
+ "learning_rate": 4.977777777777778e-06,
1436
+ "loss": 0.5966,
1437
+ "step": 1940
1438
+ },
1439
+ {
1440
+ "epoch": 7.8,
1441
+ "grad_norm": 52.354122161865234,
1442
+ "learning_rate": 4.888888888888889e-06,
1443
+ "loss": 0.5202,
1444
+ "step": 1950
1445
+ },
1446
+ {
1447
+ "epoch": 7.84,
1448
+ "grad_norm": 34.89662170410156,
1449
+ "learning_rate": 4.800000000000001e-06,
1450
+ "loss": 0.4621,
1451
+ "step": 1960
1452
+ },
1453
+ {
1454
+ "epoch": 7.88,
1455
+ "grad_norm": 67.30014038085938,
1456
+ "learning_rate": 4.711111111111111e-06,
1457
+ "loss": 0.4718,
1458
+ "step": 1970
1459
+ },
1460
+ {
1461
+ "epoch": 7.92,
1462
+ "grad_norm": 59.92790603637695,
1463
+ "learning_rate": 4.622222222222222e-06,
1464
+ "loss": 0.4111,
1465
+ "step": 1980
1466
+ },
1467
+ {
1468
+ "epoch": 7.96,
1469
+ "grad_norm": 60.730255126953125,
1470
+ "learning_rate": 4.533333333333334e-06,
1471
+ "loss": 0.4406,
1472
+ "step": 1990
1473
+ },
1474
+ {
1475
+ "epoch": 8.0,
1476
+ "grad_norm": 25.637128829956055,
1477
+ "learning_rate": 4.444444444444444e-06,
1478
+ "loss": 0.3988,
1479
+ "step": 2000
1480
+ },
1481
+ {
1482
+ "epoch": 8.0,
1483
+ "eval_accuracy": 0.826,
1484
+ "eval_f1": 0.8239121231432465,
1485
+ "eval_loss": 0.5472010970115662,
1486
+ "eval_runtime": 12.1827,
1487
+ "eval_samples_per_second": 41.042,
1488
+ "eval_steps_per_second": 10.26,
1489
+ "step": 2000
1490
+ },
1491
+ {
1492
+ "epoch": 8.04,
1493
+ "grad_norm": 36.69926834106445,
1494
+ "learning_rate": 4.3555555555555555e-06,
1495
+ "loss": 0.4001,
1496
+ "step": 2010
1497
+ },
1498
+ {
1499
+ "epoch": 8.08,
1500
+ "grad_norm": 40.46434783935547,
1501
+ "learning_rate": 4.266666666666668e-06,
1502
+ "loss": 0.3608,
1503
+ "step": 2020
1504
+ },
1505
+ {
1506
+ "epoch": 8.12,
1507
+ "grad_norm": 41.12105178833008,
1508
+ "learning_rate": 4.177777777777778e-06,
1509
+ "loss": 0.3893,
1510
+ "step": 2030
1511
+ },
1512
+ {
1513
+ "epoch": 8.16,
1514
+ "grad_norm": 84.14956665039062,
1515
+ "learning_rate": 4.088888888888889e-06,
1516
+ "loss": 0.4152,
1517
+ "step": 2040
1518
+ },
1519
+ {
1520
+ "epoch": 8.2,
1521
+ "grad_norm": 49.22280502319336,
1522
+ "learning_rate": 4.000000000000001e-06,
1523
+ "loss": 0.4308,
1524
+ "step": 2050
1525
+ },
1526
+ {
1527
+ "epoch": 8.24,
1528
+ "grad_norm": 45.23503112792969,
1529
+ "learning_rate": 3.911111111111112e-06,
1530
+ "loss": 0.3673,
1531
+ "step": 2060
1532
+ },
1533
+ {
1534
+ "epoch": 8.28,
1535
+ "grad_norm": 47.49519348144531,
1536
+ "learning_rate": 3.8222222222222224e-06,
1537
+ "loss": 0.3846,
1538
+ "step": 2070
1539
+ },
1540
+ {
1541
+ "epoch": 8.32,
1542
+ "grad_norm": 68.24031829833984,
1543
+ "learning_rate": 3.7333333333333337e-06,
1544
+ "loss": 0.3908,
1545
+ "step": 2080
1546
+ },
1547
+ {
1548
+ "epoch": 8.36,
1549
+ "grad_norm": 23.122114181518555,
1550
+ "learning_rate": 3.644444444444445e-06,
1551
+ "loss": 0.3358,
1552
+ "step": 2090
1553
+ },
1554
+ {
1555
+ "epoch": 8.4,
1556
+ "grad_norm": 59.9205436706543,
1557
+ "learning_rate": 3.555555555555556e-06,
1558
+ "loss": 0.3517,
1559
+ "step": 2100
1560
+ },
1561
+ {
1562
+ "epoch": 8.44,
1563
+ "grad_norm": 38.14185333251953,
1564
+ "learning_rate": 3.4666666666666672e-06,
1565
+ "loss": 0.4558,
1566
+ "step": 2110
1567
+ },
1568
+ {
1569
+ "epoch": 8.48,
1570
+ "grad_norm": 50.6385498046875,
1571
+ "learning_rate": 3.377777777777778e-06,
1572
+ "loss": 0.3912,
1573
+ "step": 2120
1574
+ },
1575
+ {
1576
+ "epoch": 8.52,
1577
+ "grad_norm": 30.214555740356445,
1578
+ "learning_rate": 3.2888888888888894e-06,
1579
+ "loss": 0.4873,
1580
+ "step": 2130
1581
+ },
1582
+ {
1583
+ "epoch": 8.56,
1584
+ "grad_norm": 53.54391860961914,
1585
+ "learning_rate": 3.2000000000000003e-06,
1586
+ "loss": 0.3847,
1587
+ "step": 2140
1588
+ },
1589
+ {
1590
+ "epoch": 8.6,
1591
+ "grad_norm": 29.186870574951172,
1592
+ "learning_rate": 3.1111111111111116e-06,
1593
+ "loss": 0.3381,
1594
+ "step": 2150
1595
+ },
1596
+ {
1597
+ "epoch": 8.64,
1598
+ "grad_norm": 31.920259475708008,
1599
+ "learning_rate": 3.0222222222222225e-06,
1600
+ "loss": 0.3559,
1601
+ "step": 2160
1602
+ },
1603
+ {
1604
+ "epoch": 8.68,
1605
+ "grad_norm": 86.72305297851562,
1606
+ "learning_rate": 2.9333333333333338e-06,
1607
+ "loss": 0.3753,
1608
+ "step": 2170
1609
+ },
1610
+ {
1611
+ "epoch": 8.72,
1612
+ "grad_norm": 68.44404602050781,
1613
+ "learning_rate": 2.8444444444444446e-06,
1614
+ "loss": 0.4498,
1615
+ "step": 2180
1616
+ },
1617
+ {
1618
+ "epoch": 8.76,
1619
+ "grad_norm": 29.52780532836914,
1620
+ "learning_rate": 2.755555555555556e-06,
1621
+ "loss": 0.4367,
1622
+ "step": 2190
1623
+ },
1624
+ {
1625
+ "epoch": 8.8,
1626
+ "grad_norm": 57.7866096496582,
1627
+ "learning_rate": 2.666666666666667e-06,
1628
+ "loss": 0.4172,
1629
+ "step": 2200
1630
+ },
1631
+ {
1632
+ "epoch": 8.84,
1633
+ "grad_norm": 21.049448013305664,
1634
+ "learning_rate": 2.577777777777778e-06,
1635
+ "loss": 0.3221,
1636
+ "step": 2210
1637
+ },
1638
+ {
1639
+ "epoch": 8.88,
1640
+ "grad_norm": 96.43756103515625,
1641
+ "learning_rate": 2.488888888888889e-06,
1642
+ "loss": 0.3575,
1643
+ "step": 2220
1644
+ },
1645
+ {
1646
+ "epoch": 8.92,
1647
+ "grad_norm": 42.55648422241211,
1648
+ "learning_rate": 2.4000000000000003e-06,
1649
+ "loss": 0.3055,
1650
+ "step": 2230
1651
+ },
1652
+ {
1653
+ "epoch": 8.96,
1654
+ "grad_norm": 44.407386779785156,
1655
+ "learning_rate": 2.311111111111111e-06,
1656
+ "loss": 0.3991,
1657
+ "step": 2240
1658
+ },
1659
+ {
1660
+ "epoch": 9.0,
1661
+ "grad_norm": 23.83012580871582,
1662
+ "learning_rate": 2.222222222222222e-06,
1663
+ "loss": 0.4643,
1664
+ "step": 2250
1665
+ },
1666
+ {
1667
+ "epoch": 9.0,
1668
+ "eval_accuracy": 0.85,
1669
+ "eval_f1": 0.8502768157768157,
1670
+ "eval_loss": 0.47364383935928345,
1671
+ "eval_runtime": 12.1944,
1672
+ "eval_samples_per_second": 41.002,
1673
+ "eval_steps_per_second": 10.251,
1674
+ "step": 2250
1675
+ },
1676
+ {
1677
+ "epoch": 9.04,
1678
+ "grad_norm": 33.42547607421875,
1679
+ "learning_rate": 2.133333333333334e-06,
1680
+ "loss": 0.3178,
1681
+ "step": 2260
1682
+ },
1683
+ {
1684
+ "epoch": 9.08,
1685
+ "grad_norm": 33.77496337890625,
1686
+ "learning_rate": 2.0444444444444447e-06,
1687
+ "loss": 0.2743,
1688
+ "step": 2270
1689
+ },
1690
+ {
1691
+ "epoch": 9.12,
1692
+ "grad_norm": 73.40355682373047,
1693
+ "learning_rate": 1.955555555555556e-06,
1694
+ "loss": 0.4977,
1695
+ "step": 2280
1696
+ },
1697
+ {
1698
+ "epoch": 9.16,
1699
+ "grad_norm": 51.72673034667969,
1700
+ "learning_rate": 1.8666666666666669e-06,
1701
+ "loss": 0.294,
1702
+ "step": 2290
1703
+ },
1704
+ {
1705
+ "epoch": 9.2,
1706
+ "grad_norm": 25.48830795288086,
1707
+ "learning_rate": 1.777777777777778e-06,
1708
+ "loss": 0.2909,
1709
+ "step": 2300
1710
+ },
1711
+ {
1712
+ "epoch": 9.24,
1713
+ "grad_norm": 32.16721725463867,
1714
+ "learning_rate": 1.688888888888889e-06,
1715
+ "loss": 0.3173,
1716
+ "step": 2310
1717
+ },
1718
+ {
1719
+ "epoch": 9.28,
1720
+ "grad_norm": 31.670467376708984,
1721
+ "learning_rate": 1.6000000000000001e-06,
1722
+ "loss": 0.3557,
1723
+ "step": 2320
1724
+ },
1725
+ {
1726
+ "epoch": 9.32,
1727
+ "grad_norm": 26.01582145690918,
1728
+ "learning_rate": 1.5111111111111112e-06,
1729
+ "loss": 0.2405,
1730
+ "step": 2330
1731
+ },
1732
+ {
1733
+ "epoch": 9.36,
1734
+ "grad_norm": 27.004608154296875,
1735
+ "learning_rate": 1.4222222222222223e-06,
1736
+ "loss": 0.3557,
1737
+ "step": 2340
1738
+ },
1739
+ {
1740
+ "epoch": 9.4,
1741
+ "grad_norm": 57.54291915893555,
1742
+ "learning_rate": 1.3333333333333334e-06,
1743
+ "loss": 0.411,
1744
+ "step": 2350
1745
+ },
1746
+ {
1747
+ "epoch": 9.44,
1748
+ "grad_norm": 57.62574005126953,
1749
+ "learning_rate": 1.2444444444444445e-06,
1750
+ "loss": 0.3942,
1751
+ "step": 2360
1752
+ },
1753
+ {
1754
+ "epoch": 9.48,
1755
+ "grad_norm": 60.00413131713867,
1756
+ "learning_rate": 1.1555555555555556e-06,
1757
+ "loss": 0.2951,
1758
+ "step": 2370
1759
+ },
1760
+ {
1761
+ "epoch": 9.52,
1762
+ "grad_norm": 54.96599197387695,
1763
+ "learning_rate": 1.066666666666667e-06,
1764
+ "loss": 0.368,
1765
+ "step": 2380
1766
+ },
1767
+ {
1768
+ "epoch": 9.56,
1769
+ "grad_norm": 22.52992057800293,
1770
+ "learning_rate": 9.77777777777778e-07,
1771
+ "loss": 0.4472,
1772
+ "step": 2390
1773
+ },
1774
+ {
1775
+ "epoch": 9.6,
1776
+ "grad_norm": 42.04111862182617,
1777
+ "learning_rate": 8.88888888888889e-07,
1778
+ "loss": 0.3283,
1779
+ "step": 2400
1780
+ },
1781
+ {
1782
+ "epoch": 9.64,
1783
+ "grad_norm": 72.67633056640625,
1784
+ "learning_rate": 8.000000000000001e-07,
1785
+ "loss": 0.4713,
1786
+ "step": 2410
1787
+ },
1788
+ {
1789
+ "epoch": 9.68,
1790
+ "grad_norm": 32.135921478271484,
1791
+ "learning_rate": 7.111111111111112e-07,
1792
+ "loss": 0.2854,
1793
+ "step": 2420
1794
+ },
1795
+ {
1796
+ "epoch": 9.72,
1797
+ "grad_norm": 95.6978759765625,
1798
+ "learning_rate": 6.222222222222223e-07,
1799
+ "loss": 0.3126,
1800
+ "step": 2430
1801
+ },
1802
+ {
1803
+ "epoch": 9.76,
1804
+ "grad_norm": 26.696413040161133,
1805
+ "learning_rate": 5.333333333333335e-07,
1806
+ "loss": 0.2417,
1807
+ "step": 2440
1808
+ },
1809
+ {
1810
+ "epoch": 9.8,
1811
+ "grad_norm": 32.3356819152832,
1812
+ "learning_rate": 4.444444444444445e-07,
1813
+ "loss": 0.326,
1814
+ "step": 2450
1815
+ },
1816
+ {
1817
+ "epoch": 9.84,
1818
+ "grad_norm": 51.74174499511719,
1819
+ "learning_rate": 3.555555555555556e-07,
1820
+ "loss": 0.3181,
1821
+ "step": 2460
1822
+ },
1823
+ {
1824
+ "epoch": 9.88,
1825
+ "grad_norm": 44.073734283447266,
1826
+ "learning_rate": 2.666666666666667e-07,
1827
+ "loss": 0.2574,
1828
+ "step": 2470
1829
+ },
1830
+ {
1831
+ "epoch": 9.92,
1832
+ "grad_norm": 48.75944519042969,
1833
+ "learning_rate": 1.777777777777778e-07,
1834
+ "loss": 0.3372,
1835
+ "step": 2480
1836
+ },
1837
+ {
1838
+ "epoch": 9.96,
1839
+ "grad_norm": 35.41460037231445,
1840
+ "learning_rate": 8.88888888888889e-08,
1841
+ "loss": 0.2653,
1842
+ "step": 2490
1843
+ },
1844
+ {
1845
+ "epoch": 10.0,
1846
+ "grad_norm": 27.246013641357422,
1847
+ "learning_rate": 0.0,
1848
+ "loss": 0.2625,
1849
+ "step": 2500
1850
+ },
1851
+ {
1852
+ "epoch": 10.0,
1853
+ "eval_accuracy": 0.88,
1854
+ "eval_f1": 0.8802333980766498,
1855
+ "eval_loss": 0.4214743971824646,
1856
+ "eval_runtime": 12.0989,
1857
+ "eval_samples_per_second": 41.326,
1858
+ "eval_steps_per_second": 10.331,
1859
+ "step": 2500
1860
+ },
1861
+ {
1862
+ "epoch": 10.0,
1863
+ "step": 2500,
1864
+ "total_flos": 4.0857422266368e+18,
1865
+ "train_loss": 0.7867561736106873,
1866
+ "train_runtime": 2885.7234,
1867
+ "train_samples_per_second": 13.861,
1868
+ "train_steps_per_second": 0.866
1869
+ }
1870
+ ],
1871
+ "logging_steps": 10,
1872
+ "max_steps": 2500,
1873
+ "num_input_tokens_seen": 0,
1874
+ "num_train_epochs": 10,
1875
+ "save_steps": 500,
1876
+ "stateful_callbacks": {
1877
+ "TrainerControl": {
1878
+ "args": {
1879
+ "should_epoch_stop": false,
1880
+ "should_evaluate": false,
1881
+ "should_log": false,
1882
+ "should_save": true,
1883
+ "should_training_stop": true
1884
+ },
1885
+ "attributes": {}
1886
+ }
1887
+ },
1888
+ "total_flos": 4.0857422266368e+18,
1889
+ "train_batch_size": 4,
1890
+ "trial_name": null,
1891
+ "trial_params": null
1892
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cdd0bbe71d7593ca08b430240c21112841c64a4209ee6b39f185fcf182d272d9
3
+ size 5368