vovadevico commited on
Commit
3c71df4
·
verified ·
1 Parent(s): 9533153

End of training

Browse files
Files changed (5) hide show
  1. README.md +7 -5
  2. all_results.json +13 -0
  3. eval_results.json +8 -0
  4. train_results.json +8 -0
  5. trainer_state.json +199 -0
README.md CHANGED
@@ -3,6 +3,8 @@ library_name: transformers
3
  license: apache-2.0
4
  base_model: google/vit-large-patch16-384
5
  tags:
 
 
6
  - generated_from_trainer
7
  datasets:
8
  - imagefolder
@@ -15,7 +17,7 @@ model-index:
15
  name: Image Classification
16
  type: image-classification
17
  dataset:
18
- name: imagefolder
19
  type: imagefolder
20
  config: default
21
  split: train
@@ -23,7 +25,7 @@ model-index:
23
  metrics:
24
  - name: Accuracy
25
  type: accuracy
26
- value: 0.991652754590985
27
  ---
28
 
29
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -31,10 +33,10 @@ should probably proofread and complete it, then remove this comment. -->
31
 
32
  # fashion-images-pack-types-vit-large-patch16-384-v1
33
 
34
- This model is a fine-tuned version of [google/vit-large-patch16-384](https://huggingface.co/google/vit-large-patch16-384) on the imagefolder dataset.
35
  It achieves the following results on the evaluation set:
36
- - Loss: 0.0485
37
- - Accuracy: 0.9917
38
 
39
  ## Model description
40
 
 
3
  license: apache-2.0
4
  base_model: google/vit-large-patch16-384
5
  tags:
6
+ - image-classification
7
+ - vision
8
  - generated_from_trainer
9
  datasets:
10
  - imagefolder
 
17
  name: Image Classification
18
  type: image-classification
19
  dataset:
20
+ name: touchtech/fashion-images-pack-types
21
  type: imagefolder
22
  config: default
23
  split: train
 
25
  metrics:
26
  - name: Accuracy
27
  type: accuracy
28
+ value: 0.989983305509182
29
  ---
30
 
31
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
33
 
34
  # fashion-images-pack-types-vit-large-patch16-384-v1
35
 
36
+ This model is a fine-tuned version of [google/vit-large-patch16-384](https://huggingface.co/google/vit-large-patch16-384) on the touchtech/fashion-images-pack-types dataset.
37
  It achieves the following results on the evaluation set:
38
+ - Loss: 0.0446
39
+ - Accuracy: 0.9900
40
 
41
  ## Model description
42
 
all_results.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_accuracy": 0.989983305509182,
4
+ "eval_loss": 0.044622376561164856,
5
+ "eval_runtime": 212.2261,
6
+ "eval_samples_per_second": 11.29,
7
+ "eval_steps_per_second": 1.414,
8
+ "total_flos": 5.471324673450394e+19,
9
+ "train_loss": 0.02982461175216723,
10
+ "train_runtime": 10814.9254,
11
+ "train_samples_per_second": 6.276,
12
+ "train_steps_per_second": 0.785
13
+ }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_accuracy": 0.989983305509182,
4
+ "eval_loss": 0.044622376561164856,
5
+ "eval_runtime": 212.2261,
6
+ "eval_samples_per_second": 11.29,
7
+ "eval_steps_per_second": 1.414
8
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "total_flos": 5.471324673450394e+19,
4
+ "train_loss": 0.02982461175216723,
5
+ "train_runtime": 10814.9254,
6
+ "train_samples_per_second": 6.276,
7
+ "train_steps_per_second": 0.785
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.044622376561164856,
3
+ "best_model_checkpoint": "/training_output/fashion-images-pack-types-vit-large-patch16-384-v1/checkpoint-5091",
4
+ "epoch": 5.0,
5
+ "eval_steps": 500,
6
+ "global_step": 8485,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.2946375957572186,
13
+ "grad_norm": 0.6524115204811096,
14
+ "learning_rate": 1.8821449616971128e-05,
15
+ "loss": 0.1857,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 0.5892751915144372,
20
+ "grad_norm": 0.0360204242169857,
21
+ "learning_rate": 1.764289923394225e-05,
22
+ "loss": 0.0812,
23
+ "step": 1000
24
+ },
25
+ {
26
+ "epoch": 0.8839127872716559,
27
+ "grad_norm": 0.011747363954782486,
28
+ "learning_rate": 1.646434885091338e-05,
29
+ "loss": 0.0823,
30
+ "step": 1500
31
+ },
32
+ {
33
+ "epoch": 1.0,
34
+ "eval_accuracy": 0.9837228714524207,
35
+ "eval_loss": 0.0661115050315857,
36
+ "eval_runtime": 214.9346,
37
+ "eval_samples_per_second": 11.148,
38
+ "eval_steps_per_second": 1.396,
39
+ "step": 1697
40
+ },
41
+ {
42
+ "epoch": 1.1785503830288744,
43
+ "grad_norm": 2.4793760776519775,
44
+ "learning_rate": 1.5285798467884503e-05,
45
+ "loss": 0.0427,
46
+ "step": 2000
47
+ },
48
+ {
49
+ "epoch": 1.473187978786093,
50
+ "grad_norm": 0.018900904804468155,
51
+ "learning_rate": 1.410724808485563e-05,
52
+ "loss": 0.0208,
53
+ "step": 2500
54
+ },
55
+ {
56
+ "epoch": 1.7678255745433118,
57
+ "grad_norm": 0.002555049257352948,
58
+ "learning_rate": 1.2928697701826754e-05,
59
+ "loss": 0.0197,
60
+ "step": 3000
61
+ },
62
+ {
63
+ "epoch": 2.0,
64
+ "eval_accuracy": 0.9812186978297162,
65
+ "eval_loss": 0.08999926596879959,
66
+ "eval_runtime": 212.7124,
67
+ "eval_samples_per_second": 11.264,
68
+ "eval_steps_per_second": 1.41,
69
+ "step": 3394
70
+ },
71
+ {
72
+ "epoch": 2.0624631703005303,
73
+ "grad_norm": 0.0002872613840736449,
74
+ "learning_rate": 1.175014731879788e-05,
75
+ "loss": 0.0238,
76
+ "step": 3500
77
+ },
78
+ {
79
+ "epoch": 2.3571007660577488,
80
+ "grad_norm": 0.060405392199754715,
81
+ "learning_rate": 1.0571596935769004e-05,
82
+ "loss": 0.0075,
83
+ "step": 4000
84
+ },
85
+ {
86
+ "epoch": 2.6517383618149677,
87
+ "grad_norm": 0.001587436068803072,
88
+ "learning_rate": 9.39304655274013e-06,
89
+ "loss": 0.011,
90
+ "step": 4500
91
+ },
92
+ {
93
+ "epoch": 2.946375957572186,
94
+ "grad_norm": 0.013891604728996754,
95
+ "learning_rate": 8.214496169711255e-06,
96
+ "loss": 0.015,
97
+ "step": 5000
98
+ },
99
+ {
100
+ "epoch": 3.0,
101
+ "eval_accuracy": 0.989983305509182,
102
+ "eval_loss": 0.044622376561164856,
103
+ "eval_runtime": 211.8864,
104
+ "eval_samples_per_second": 11.308,
105
+ "eval_steps_per_second": 1.416,
106
+ "step": 5091
107
+ },
108
+ {
109
+ "epoch": 3.2410135533294047,
110
+ "grad_norm": 0.0014336752938106656,
111
+ "learning_rate": 7.035945786682381e-06,
112
+ "loss": 0.0036,
113
+ "step": 5500
114
+ },
115
+ {
116
+ "epoch": 3.5356511490866236,
117
+ "grad_norm": 6.203103112056851e-05,
118
+ "learning_rate": 5.857395403653507e-06,
119
+ "loss": 0.0069,
120
+ "step": 6000
121
+ },
122
+ {
123
+ "epoch": 3.830288744843842,
124
+ "grad_norm": 0.0005701961345039308,
125
+ "learning_rate": 4.678845020624632e-06,
126
+ "loss": 0.0017,
127
+ "step": 6500
128
+ },
129
+ {
130
+ "epoch": 4.0,
131
+ "eval_accuracy": 0.9912353923205343,
132
+ "eval_loss": 0.051807090640068054,
133
+ "eval_runtime": 210.707,
134
+ "eval_samples_per_second": 11.371,
135
+ "eval_steps_per_second": 1.424,
136
+ "step": 6788
137
+ },
138
+ {
139
+ "epoch": 4.124926340601061,
140
+ "grad_norm": 0.00022500261547975242,
141
+ "learning_rate": 3.5002946375957573e-06,
142
+ "loss": 0.002,
143
+ "step": 7000
144
+ },
145
+ {
146
+ "epoch": 4.419563936358279,
147
+ "grad_norm": 5.4514002840733156e-05,
148
+ "learning_rate": 2.3217442545668827e-06,
149
+ "loss": 0.0021,
150
+ "step": 7500
151
+ },
152
+ {
153
+ "epoch": 4.7142015321154975,
154
+ "grad_norm": 0.00018313823966309428,
155
+ "learning_rate": 1.1431938715380085e-06,
156
+ "loss": 0.0,
157
+ "step": 8000
158
+ },
159
+ {
160
+ "epoch": 5.0,
161
+ "eval_accuracy": 0.991652754590985,
162
+ "eval_loss": 0.048458877950906754,
163
+ "eval_runtime": 210.4591,
164
+ "eval_samples_per_second": 11.385,
165
+ "eval_steps_per_second": 1.425,
166
+ "step": 8485
167
+ },
168
+ {
169
+ "epoch": 5.0,
170
+ "step": 8485,
171
+ "total_flos": 5.471324673450394e+19,
172
+ "train_loss": 0.02982461175216723,
173
+ "train_runtime": 10814.9254,
174
+ "train_samples_per_second": 6.276,
175
+ "train_steps_per_second": 0.785
176
+ }
177
+ ],
178
+ "logging_steps": 500,
179
+ "max_steps": 8485,
180
+ "num_input_tokens_seen": 0,
181
+ "num_train_epochs": 5,
182
+ "save_steps": 500,
183
+ "stateful_callbacks": {
184
+ "TrainerControl": {
185
+ "args": {
186
+ "should_epoch_stop": false,
187
+ "should_evaluate": false,
188
+ "should_log": false,
189
+ "should_save": true,
190
+ "should_training_stop": true
191
+ },
192
+ "attributes": {}
193
+ }
194
+ },
195
+ "total_flos": 5.471324673450394e+19,
196
+ "train_batch_size": 8,
197
+ "trial_name": null,
198
+ "trial_params": null
199
+ }