Maliru commited on
Commit
dd3c0ea
·
verified ·
1 Parent(s): 7e268ac

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ tags:
4
+ - autotrain
5
+ - text-classification
6
+ base_model: MoritzLaurer/mDeBERTa-v3-base-mnli-xnli
7
+ widget:
8
+ - text: "I love AutoTrain"
9
+ ---
10
+
11
+ # Model Trained Using AutoTrain
12
+
13
+ - Problem type: Text Classification
14
+
15
+ ## Validation Metrics
16
+ loss: 0.40547820925712585
17
+
18
+ f1: 0.8509544787077826
19
+
20
+ precision: 0.8474287106994882
21
+
22
+ recall: 0.8545097075448513
23
+
24
+ auc: 0.9024178348450937
25
+
26
+ accuracy: 0.8176919622810956
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "[MASK]": 250101
3
+ }
checkpoint-3341/config.json ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "MoritzLaurer/mDeBERTa-v3-base-mnli-xnli",
3
+ "_num_labels": 2,
4
+ "architectures": [
5
+ "DebertaV2ForSequenceClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.1,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "in-character",
13
+ "1": "out-of-character"
14
+ },
15
+ "initializer_range": 0.02,
16
+ "intermediate_size": 3072,
17
+ "label2id": {
18
+ "in-character": 0,
19
+ "out-of-character": 1
20
+ },
21
+ "layer_norm_eps": 1e-07,
22
+ "legacy": true,
23
+ "max_position_embeddings": 512,
24
+ "max_relative_positions": -1,
25
+ "model_type": "deberta-v2",
26
+ "norm_rel_ebd": "layer_norm",
27
+ "num_attention_heads": 12,
28
+ "num_hidden_layers": 12,
29
+ "pad_token_id": 0,
30
+ "pooler_dropout": 0,
31
+ "pooler_hidden_act": "gelu",
32
+ "pooler_hidden_size": 768,
33
+ "pos_att_type": [
34
+ "p2c",
35
+ "c2p"
36
+ ],
37
+ "position_biased_input": false,
38
+ "position_buckets": 256,
39
+ "relative_attention": true,
40
+ "share_att_key": true,
41
+ "torch_dtype": "float32",
42
+ "transformers_version": "4.47.1",
43
+ "type_vocab_size": 0,
44
+ "vocab_size": 251000
45
+ }
checkpoint-3341/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2db3c56c71229589e5cb6b92fbac028b4ac4418342c591dd1cf5b4ec55ad0990
3
+ size 1115268200
checkpoint-3341/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25473caab367f83e9d8ddd3ece1d0524f3c220add11f9d0ad8f39118823deaaf
3
+ size 2230650362
checkpoint-3341/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0c2f7bf4888c9640fe86145b5fd8fc8152e18ee97089aeaf29d199a2b1e960a
3
+ size 13990
checkpoint-3341/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1d38a546d89e80d65205a3317378d5850a248f277d5319207ec6487b4395cb1
3
+ size 1064
checkpoint-3341/trainer_state.json ADDED
@@ -0,0 +1,986 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.40547820925712585,
3
+ "best_model_checkpoint": "mDeBERTa-v3-base-mnli-xnli-dnd/checkpoint-3341",
4
+ "epoch": 1.0,
5
+ "eval_steps": 500,
6
+ "global_step": 3341,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0074827895839568994,
13
+ "grad_norm": 4.059116363525391,
14
+ "learning_rate": 3.7313432835820893e-06,
15
+ "loss": 0.6987,
16
+ "step": 25
17
+ },
18
+ {
19
+ "epoch": 0.014965579167913799,
20
+ "grad_norm": 3.396561861038208,
21
+ "learning_rate": 7.4626865671641785e-06,
22
+ "loss": 0.6773,
23
+ "step": 50
24
+ },
25
+ {
26
+ "epoch": 0.022448368751870697,
27
+ "grad_norm": 4.667275905609131,
28
+ "learning_rate": 1.119402985074627e-05,
29
+ "loss": 0.6562,
30
+ "step": 75
31
+ },
32
+ {
33
+ "epoch": 0.029931158335827598,
34
+ "grad_norm": 4.270143508911133,
35
+ "learning_rate": 1.4925373134328357e-05,
36
+ "loss": 0.6451,
37
+ "step": 100
38
+ },
39
+ {
40
+ "epoch": 0.037413947919784495,
41
+ "grad_norm": 6.245508193969727,
42
+ "learning_rate": 1.865671641791045e-05,
43
+ "loss": 0.6372,
44
+ "step": 125
45
+ },
46
+ {
47
+ "epoch": 0.044896737503741395,
48
+ "grad_norm": 8.381564140319824,
49
+ "learning_rate": 2.238805970149254e-05,
50
+ "loss": 0.6065,
51
+ "step": 150
52
+ },
53
+ {
54
+ "epoch": 0.052379527087698295,
55
+ "grad_norm": 5.698436260223389,
56
+ "learning_rate": 2.6119402985074626e-05,
57
+ "loss": 0.5725,
58
+ "step": 175
59
+ },
60
+ {
61
+ "epoch": 0.059862316671655195,
62
+ "grad_norm": 6.78079080581665,
63
+ "learning_rate": 2.9850746268656714e-05,
64
+ "loss": 0.5438,
65
+ "step": 200
66
+ },
67
+ {
68
+ "epoch": 0.06734510625561209,
69
+ "grad_norm": 4.647417068481445,
70
+ "learning_rate": 3.358208955223881e-05,
71
+ "loss": 0.5975,
72
+ "step": 225
73
+ },
74
+ {
75
+ "epoch": 0.07482789583956899,
76
+ "grad_norm": 5.508735179901123,
77
+ "learning_rate": 3.73134328358209e-05,
78
+ "loss": 0.5774,
79
+ "step": 250
80
+ },
81
+ {
82
+ "epoch": 0.08231068542352589,
83
+ "grad_norm": 6.2550458908081055,
84
+ "learning_rate": 4.104477611940299e-05,
85
+ "loss": 0.5779,
86
+ "step": 275
87
+ },
88
+ {
89
+ "epoch": 0.08979347500748279,
90
+ "grad_norm": 6.34486198425293,
91
+ "learning_rate": 4.477611940298508e-05,
92
+ "loss": 0.5835,
93
+ "step": 300
94
+ },
95
+ {
96
+ "epoch": 0.09727626459143969,
97
+ "grad_norm": 11.040050506591797,
98
+ "learning_rate": 4.850746268656717e-05,
99
+ "loss": 0.5475,
100
+ "step": 325
101
+ },
102
+ {
103
+ "epoch": 0.10475905417539659,
104
+ "grad_norm": 8.249756813049316,
105
+ "learning_rate": 4.975049900199601e-05,
106
+ "loss": 0.5557,
107
+ "step": 350
108
+ },
109
+ {
110
+ "epoch": 0.11224184375935349,
111
+ "grad_norm": 3.8277997970581055,
112
+ "learning_rate": 4.933466400532269e-05,
113
+ "loss": 0.5059,
114
+ "step": 375
115
+ },
116
+ {
117
+ "epoch": 0.11972463334331039,
118
+ "grad_norm": 3.1715078353881836,
119
+ "learning_rate": 4.891882900864937e-05,
120
+ "loss": 0.5417,
121
+ "step": 400
122
+ },
123
+ {
124
+ "epoch": 0.12720742292726728,
125
+ "grad_norm": 7.743518829345703,
126
+ "learning_rate": 4.8502994011976046e-05,
127
+ "loss": 0.5032,
128
+ "step": 425
129
+ },
130
+ {
131
+ "epoch": 0.13469021251122418,
132
+ "grad_norm": 5.770236015319824,
133
+ "learning_rate": 4.808715901530273e-05,
134
+ "loss": 0.4911,
135
+ "step": 450
136
+ },
137
+ {
138
+ "epoch": 0.14217300209518108,
139
+ "grad_norm": 4.398015022277832,
140
+ "learning_rate": 4.767132401862941e-05,
141
+ "loss": 0.5252,
142
+ "step": 475
143
+ },
144
+ {
145
+ "epoch": 0.14965579167913798,
146
+ "grad_norm": 23.583471298217773,
147
+ "learning_rate": 4.725548902195609e-05,
148
+ "loss": 0.5005,
149
+ "step": 500
150
+ },
151
+ {
152
+ "epoch": 0.15713858126309488,
153
+ "grad_norm": 2.395005226135254,
154
+ "learning_rate": 4.683965402528277e-05,
155
+ "loss": 0.562,
156
+ "step": 525
157
+ },
158
+ {
159
+ "epoch": 0.16462137084705178,
160
+ "grad_norm": 2.9066038131713867,
161
+ "learning_rate": 4.642381902860945e-05,
162
+ "loss": 0.4656,
163
+ "step": 550
164
+ },
165
+ {
166
+ "epoch": 0.17210416043100868,
167
+ "grad_norm": 9.273233413696289,
168
+ "learning_rate": 4.600798403193613e-05,
169
+ "loss": 0.6357,
170
+ "step": 575
171
+ },
172
+ {
173
+ "epoch": 0.17958695001496558,
174
+ "grad_norm": 2.02755069732666,
175
+ "learning_rate": 4.559214903526281e-05,
176
+ "loss": 0.6515,
177
+ "step": 600
178
+ },
179
+ {
180
+ "epoch": 0.18706973959892248,
181
+ "grad_norm": 6.606990814208984,
182
+ "learning_rate": 4.517631403858949e-05,
183
+ "loss": 0.5818,
184
+ "step": 625
185
+ },
186
+ {
187
+ "epoch": 0.19455252918287938,
188
+ "grad_norm": 22.710664749145508,
189
+ "learning_rate": 4.476047904191617e-05,
190
+ "loss": 0.5306,
191
+ "step": 650
192
+ },
193
+ {
194
+ "epoch": 0.20203531876683628,
195
+ "grad_norm": 3.1198155879974365,
196
+ "learning_rate": 4.434464404524285e-05,
197
+ "loss": 0.5635,
198
+ "step": 675
199
+ },
200
+ {
201
+ "epoch": 0.20951810835079318,
202
+ "grad_norm": 2.4390244483947754,
203
+ "learning_rate": 4.392880904856953e-05,
204
+ "loss": 0.4779,
205
+ "step": 700
206
+ },
207
+ {
208
+ "epoch": 0.21700089793475008,
209
+ "grad_norm": 7.443247318267822,
210
+ "learning_rate": 4.351297405189621e-05,
211
+ "loss": 0.5732,
212
+ "step": 725
213
+ },
214
+ {
215
+ "epoch": 0.22448368751870698,
216
+ "grad_norm": 7.480808258056641,
217
+ "learning_rate": 4.3097139055222893e-05,
218
+ "loss": 0.5574,
219
+ "step": 750
220
+ },
221
+ {
222
+ "epoch": 0.23196647710266388,
223
+ "grad_norm": 3.3201472759246826,
224
+ "learning_rate": 4.268130405854957e-05,
225
+ "loss": 0.4929,
226
+ "step": 775
227
+ },
228
+ {
229
+ "epoch": 0.23944926668662078,
230
+ "grad_norm": 20.688968658447266,
231
+ "learning_rate": 4.226546906187625e-05,
232
+ "loss": 0.596,
233
+ "step": 800
234
+ },
235
+ {
236
+ "epoch": 0.24693205627057768,
237
+ "grad_norm": 4.053366661071777,
238
+ "learning_rate": 4.184963406520293e-05,
239
+ "loss": 0.4967,
240
+ "step": 825
241
+ },
242
+ {
243
+ "epoch": 0.25441484585453455,
244
+ "grad_norm": 5.196917533874512,
245
+ "learning_rate": 4.143379906852961e-05,
246
+ "loss": 0.4415,
247
+ "step": 850
248
+ },
249
+ {
250
+ "epoch": 0.2618976354384915,
251
+ "grad_norm": 6.801918029785156,
252
+ "learning_rate": 4.101796407185629e-05,
253
+ "loss": 0.4998,
254
+ "step": 875
255
+ },
256
+ {
257
+ "epoch": 0.26938042502244836,
258
+ "grad_norm": 3.020432949066162,
259
+ "learning_rate": 4.060212907518297e-05,
260
+ "loss": 0.5615,
261
+ "step": 900
262
+ },
263
+ {
264
+ "epoch": 0.2768632146064053,
265
+ "grad_norm": 1.862509846687317,
266
+ "learning_rate": 4.018629407850965e-05,
267
+ "loss": 0.4653,
268
+ "step": 925
269
+ },
270
+ {
271
+ "epoch": 0.28434600419036216,
272
+ "grad_norm": 8.588277816772461,
273
+ "learning_rate": 3.977045908183633e-05,
274
+ "loss": 0.5572,
275
+ "step": 950
276
+ },
277
+ {
278
+ "epoch": 0.2918287937743191,
279
+ "grad_norm": 5.132936954498291,
280
+ "learning_rate": 3.935462408516301e-05,
281
+ "loss": 0.5228,
282
+ "step": 975
283
+ },
284
+ {
285
+ "epoch": 0.29931158335827596,
286
+ "grad_norm": 5.957391738891602,
287
+ "learning_rate": 3.893878908848969e-05,
288
+ "loss": 0.5093,
289
+ "step": 1000
290
+ },
291
+ {
292
+ "epoch": 0.3067943729422329,
293
+ "grad_norm": 6.140280723571777,
294
+ "learning_rate": 3.852295409181637e-05,
295
+ "loss": 0.4869,
296
+ "step": 1025
297
+ },
298
+ {
299
+ "epoch": 0.31427716252618976,
300
+ "grad_norm": 4.803741455078125,
301
+ "learning_rate": 3.810711909514305e-05,
302
+ "loss": 0.5162,
303
+ "step": 1050
304
+ },
305
+ {
306
+ "epoch": 0.3217599521101467,
307
+ "grad_norm": 4.811578750610352,
308
+ "learning_rate": 3.7691284098469734e-05,
309
+ "loss": 0.5407,
310
+ "step": 1075
311
+ },
312
+ {
313
+ "epoch": 0.32924274169410356,
314
+ "grad_norm": 18.00532341003418,
315
+ "learning_rate": 3.727544910179641e-05,
316
+ "loss": 0.4616,
317
+ "step": 1100
318
+ },
319
+ {
320
+ "epoch": 0.3367255312780605,
321
+ "grad_norm": 6.0717010498046875,
322
+ "learning_rate": 3.685961410512309e-05,
323
+ "loss": 0.4624,
324
+ "step": 1125
325
+ },
326
+ {
327
+ "epoch": 0.34420832086201736,
328
+ "grad_norm": 3.2662813663482666,
329
+ "learning_rate": 3.644377910844977e-05,
330
+ "loss": 0.5201,
331
+ "step": 1150
332
+ },
333
+ {
334
+ "epoch": 0.3516911104459743,
335
+ "grad_norm": 3.59017276763916,
336
+ "learning_rate": 3.6027944111776455e-05,
337
+ "loss": 0.4812,
338
+ "step": 1175
339
+ },
340
+ {
341
+ "epoch": 0.35917390002993116,
342
+ "grad_norm": 7.663816928863525,
343
+ "learning_rate": 3.5612109115103126e-05,
344
+ "loss": 0.4407,
345
+ "step": 1200
346
+ },
347
+ {
348
+ "epoch": 0.36665668961388803,
349
+ "grad_norm": 5.2263407707214355,
350
+ "learning_rate": 3.5196274118429805e-05,
351
+ "loss": 0.4543,
352
+ "step": 1225
353
+ },
354
+ {
355
+ "epoch": 0.37413947919784496,
356
+ "grad_norm": 15.237629890441895,
357
+ "learning_rate": 3.478043912175648e-05,
358
+ "loss": 0.5069,
359
+ "step": 1250
360
+ },
361
+ {
362
+ "epoch": 0.38162226878180183,
363
+ "grad_norm": 16.1485538482666,
364
+ "learning_rate": 3.436460412508317e-05,
365
+ "loss": 0.4123,
366
+ "step": 1275
367
+ },
368
+ {
369
+ "epoch": 0.38910505836575876,
370
+ "grad_norm": 2.711836576461792,
371
+ "learning_rate": 3.394876912840985e-05,
372
+ "loss": 0.5798,
373
+ "step": 1300
374
+ },
375
+ {
376
+ "epoch": 0.39658784794971563,
377
+ "grad_norm": 9.17324447631836,
378
+ "learning_rate": 3.3532934131736525e-05,
379
+ "loss": 0.4668,
380
+ "step": 1325
381
+ },
382
+ {
383
+ "epoch": 0.40407063753367256,
384
+ "grad_norm": 5.9319047927856445,
385
+ "learning_rate": 3.3117099135063204e-05,
386
+ "loss": 0.4833,
387
+ "step": 1350
388
+ },
389
+ {
390
+ "epoch": 0.41155342711762943,
391
+ "grad_norm": 1.7995569705963135,
392
+ "learning_rate": 3.270126413838989e-05,
393
+ "loss": 0.499,
394
+ "step": 1375
395
+ },
396
+ {
397
+ "epoch": 0.41903621670158636,
398
+ "grad_norm": 3.604038953781128,
399
+ "learning_rate": 3.228542914171657e-05,
400
+ "loss": 0.4182,
401
+ "step": 1400
402
+ },
403
+ {
404
+ "epoch": 0.42651900628554323,
405
+ "grad_norm": 20.409942626953125,
406
+ "learning_rate": 3.1869594145043246e-05,
407
+ "loss": 0.4604,
408
+ "step": 1425
409
+ },
410
+ {
411
+ "epoch": 0.43400179586950016,
412
+ "grad_norm": 3.52787184715271,
413
+ "learning_rate": 3.1453759148369925e-05,
414
+ "loss": 0.5126,
415
+ "step": 1450
416
+ },
417
+ {
418
+ "epoch": 0.44148458545345703,
419
+ "grad_norm": 5.209261894226074,
420
+ "learning_rate": 3.103792415169661e-05,
421
+ "loss": 0.5798,
422
+ "step": 1475
423
+ },
424
+ {
425
+ "epoch": 0.44896737503741396,
426
+ "grad_norm": 2.947472095489502,
427
+ "learning_rate": 3.062208915502329e-05,
428
+ "loss": 0.5215,
429
+ "step": 1500
430
+ },
431
+ {
432
+ "epoch": 0.45645016462137084,
433
+ "grad_norm": 7.958189964294434,
434
+ "learning_rate": 3.0206254158349967e-05,
435
+ "loss": 0.472,
436
+ "step": 1525
437
+ },
438
+ {
439
+ "epoch": 0.46393295420532776,
440
+ "grad_norm": 2.645322322845459,
441
+ "learning_rate": 2.979041916167665e-05,
442
+ "loss": 0.4566,
443
+ "step": 1550
444
+ },
445
+ {
446
+ "epoch": 0.47141574378928464,
447
+ "grad_norm": 7.029107570648193,
448
+ "learning_rate": 2.9374584165003327e-05,
449
+ "loss": 0.4619,
450
+ "step": 1575
451
+ },
452
+ {
453
+ "epoch": 0.47889853337324156,
454
+ "grad_norm": 5.63282585144043,
455
+ "learning_rate": 2.8958749168330006e-05,
456
+ "loss": 0.4959,
457
+ "step": 1600
458
+ },
459
+ {
460
+ "epoch": 0.48638132295719844,
461
+ "grad_norm": 2.9673337936401367,
462
+ "learning_rate": 2.8542914171656687e-05,
463
+ "loss": 0.39,
464
+ "step": 1625
465
+ },
466
+ {
467
+ "epoch": 0.49386411254115536,
468
+ "grad_norm": 24.8010311126709,
469
+ "learning_rate": 2.8127079174983366e-05,
470
+ "loss": 0.3776,
471
+ "step": 1650
472
+ },
473
+ {
474
+ "epoch": 0.5013469021251122,
475
+ "grad_norm": 2.339782238006592,
476
+ "learning_rate": 2.7711244178310048e-05,
477
+ "loss": 0.4797,
478
+ "step": 1675
479
+ },
480
+ {
481
+ "epoch": 0.5088296917090691,
482
+ "grad_norm": 5.149988651275635,
483
+ "learning_rate": 2.7295409181636726e-05,
484
+ "loss": 0.4535,
485
+ "step": 1700
486
+ },
487
+ {
488
+ "epoch": 0.5163124812930261,
489
+ "grad_norm": 8.524362564086914,
490
+ "learning_rate": 2.6879574184963408e-05,
491
+ "loss": 0.4421,
492
+ "step": 1725
493
+ },
494
+ {
495
+ "epoch": 0.523795270876983,
496
+ "grad_norm": 4.499845504760742,
497
+ "learning_rate": 2.6463739188290087e-05,
498
+ "loss": 0.3448,
499
+ "step": 1750
500
+ },
501
+ {
502
+ "epoch": 0.5312780604609398,
503
+ "grad_norm": 2.887631416320801,
504
+ "learning_rate": 2.604790419161677e-05,
505
+ "loss": 0.583,
506
+ "step": 1775
507
+ },
508
+ {
509
+ "epoch": 0.5387608500448967,
510
+ "grad_norm": 1.4689120054244995,
511
+ "learning_rate": 2.5632069194943447e-05,
512
+ "loss": 0.4272,
513
+ "step": 1800
514
+ },
515
+ {
516
+ "epoch": 0.5462436396288536,
517
+ "grad_norm": 6.976527690887451,
518
+ "learning_rate": 2.521623419827013e-05,
519
+ "loss": 0.5061,
520
+ "step": 1825
521
+ },
522
+ {
523
+ "epoch": 0.5537264292128106,
524
+ "grad_norm": 5.614429473876953,
525
+ "learning_rate": 2.4800399201596807e-05,
526
+ "loss": 0.3377,
527
+ "step": 1850
528
+ },
529
+ {
530
+ "epoch": 0.5612092187967674,
531
+ "grad_norm": 5.337092876434326,
532
+ "learning_rate": 2.438456420492349e-05,
533
+ "loss": 0.497,
534
+ "step": 1875
535
+ },
536
+ {
537
+ "epoch": 0.5686920083807243,
538
+ "grad_norm": 8.761651039123535,
539
+ "learning_rate": 2.3968729208250168e-05,
540
+ "loss": 0.4626,
541
+ "step": 1900
542
+ },
543
+ {
544
+ "epoch": 0.5761747979646812,
545
+ "grad_norm": 4.3848466873168945,
546
+ "learning_rate": 2.355289421157685e-05,
547
+ "loss": 0.5425,
548
+ "step": 1925
549
+ },
550
+ {
551
+ "epoch": 0.5836575875486382,
552
+ "grad_norm": 2.7267236709594727,
553
+ "learning_rate": 2.3137059214903528e-05,
554
+ "loss": 0.3931,
555
+ "step": 1950
556
+ },
557
+ {
558
+ "epoch": 0.591140377132595,
559
+ "grad_norm": 4.476986885070801,
560
+ "learning_rate": 2.272122421823021e-05,
561
+ "loss": 0.421,
562
+ "step": 1975
563
+ },
564
+ {
565
+ "epoch": 0.5986231667165519,
566
+ "grad_norm": 5.569231033325195,
567
+ "learning_rate": 2.2305389221556888e-05,
568
+ "loss": 0.388,
569
+ "step": 2000
570
+ },
571
+ {
572
+ "epoch": 0.6061059563005088,
573
+ "grad_norm": 4.156082630157471,
574
+ "learning_rate": 2.1889554224883567e-05,
575
+ "loss": 0.4566,
576
+ "step": 2025
577
+ },
578
+ {
579
+ "epoch": 0.6135887458844658,
580
+ "grad_norm": 4.638250827789307,
581
+ "learning_rate": 2.147371922821025e-05,
582
+ "loss": 0.4129,
583
+ "step": 2050
584
+ },
585
+ {
586
+ "epoch": 0.6210715354684226,
587
+ "grad_norm": 3.4790878295898438,
588
+ "learning_rate": 2.1057884231536927e-05,
589
+ "loss": 0.3975,
590
+ "step": 2075
591
+ },
592
+ {
593
+ "epoch": 0.6285543250523795,
594
+ "grad_norm": 13.76499080657959,
595
+ "learning_rate": 2.0642049234863606e-05,
596
+ "loss": 0.4153,
597
+ "step": 2100
598
+ },
599
+ {
600
+ "epoch": 0.6360371146363364,
601
+ "grad_norm": 4.2070488929748535,
602
+ "learning_rate": 2.0226214238190287e-05,
603
+ "loss": 0.4893,
604
+ "step": 2125
605
+ },
606
+ {
607
+ "epoch": 0.6435199042202934,
608
+ "grad_norm": 1.6636909246444702,
609
+ "learning_rate": 1.9810379241516966e-05,
610
+ "loss": 0.5545,
611
+ "step": 2150
612
+ },
613
+ {
614
+ "epoch": 0.6510026938042502,
615
+ "grad_norm": 2.6627988815307617,
616
+ "learning_rate": 1.9394544244843644e-05,
617
+ "loss": 0.4123,
618
+ "step": 2175
619
+ },
620
+ {
621
+ "epoch": 0.6584854833882071,
622
+ "grad_norm": 2.606734275817871,
623
+ "learning_rate": 1.8978709248170326e-05,
624
+ "loss": 0.4147,
625
+ "step": 2200
626
+ },
627
+ {
628
+ "epoch": 0.665968272972164,
629
+ "grad_norm": 4.407214164733887,
630
+ "learning_rate": 1.8562874251497005e-05,
631
+ "loss": 0.3227,
632
+ "step": 2225
633
+ },
634
+ {
635
+ "epoch": 0.673451062556121,
636
+ "grad_norm": 10.648530960083008,
637
+ "learning_rate": 1.8147039254823687e-05,
638
+ "loss": 0.5225,
639
+ "step": 2250
640
+ },
641
+ {
642
+ "epoch": 0.6809338521400778,
643
+ "grad_norm": 4.558552265167236,
644
+ "learning_rate": 1.7731204258150365e-05,
645
+ "loss": 0.4262,
646
+ "step": 2275
647
+ },
648
+ {
649
+ "epoch": 0.6884166417240347,
650
+ "grad_norm": 10.879168510437012,
651
+ "learning_rate": 1.7315369261477047e-05,
652
+ "loss": 0.4286,
653
+ "step": 2300
654
+ },
655
+ {
656
+ "epoch": 0.6958994313079916,
657
+ "grad_norm": 3.295769453048706,
658
+ "learning_rate": 1.6899534264803725e-05,
659
+ "loss": 0.4639,
660
+ "step": 2325
661
+ },
662
+ {
663
+ "epoch": 0.7033822208919486,
664
+ "grad_norm": 5.799977779388428,
665
+ "learning_rate": 1.6483699268130407e-05,
666
+ "loss": 0.4348,
667
+ "step": 2350
668
+ },
669
+ {
670
+ "epoch": 0.7108650104759054,
671
+ "grad_norm": 3.625521659851074,
672
+ "learning_rate": 1.6067864271457086e-05,
673
+ "loss": 0.4088,
674
+ "step": 2375
675
+ },
676
+ {
677
+ "epoch": 0.7183478000598623,
678
+ "grad_norm": 4.514614105224609,
679
+ "learning_rate": 1.5652029274783768e-05,
680
+ "loss": 0.4301,
681
+ "step": 2400
682
+ },
683
+ {
684
+ "epoch": 0.7258305896438192,
685
+ "grad_norm": 11.377728462219238,
686
+ "learning_rate": 1.5236194278110446e-05,
687
+ "loss": 0.4906,
688
+ "step": 2425
689
+ },
690
+ {
691
+ "epoch": 0.7333133792277761,
692
+ "grad_norm": 1.329950213432312,
693
+ "learning_rate": 1.4820359281437126e-05,
694
+ "loss": 0.4753,
695
+ "step": 2450
696
+ },
697
+ {
698
+ "epoch": 0.740796168811733,
699
+ "grad_norm": 4.047940254211426,
700
+ "learning_rate": 1.4404524284763806e-05,
701
+ "loss": 0.3542,
702
+ "step": 2475
703
+ },
704
+ {
705
+ "epoch": 0.7482789583956899,
706
+ "grad_norm": 6.413780689239502,
707
+ "learning_rate": 1.3988689288090487e-05,
708
+ "loss": 0.5139,
709
+ "step": 2500
710
+ },
711
+ {
712
+ "epoch": 0.7557617479796468,
713
+ "grad_norm": 1.494303584098816,
714
+ "learning_rate": 1.3572854291417167e-05,
715
+ "loss": 0.3883,
716
+ "step": 2525
717
+ },
718
+ {
719
+ "epoch": 0.7632445375636037,
720
+ "grad_norm": 6.560792446136475,
721
+ "learning_rate": 1.3157019294743847e-05,
722
+ "loss": 0.4551,
723
+ "step": 2550
724
+ },
725
+ {
726
+ "epoch": 0.7707273271475606,
727
+ "grad_norm": 3.7073848247528076,
728
+ "learning_rate": 1.2741184298070527e-05,
729
+ "loss": 0.4342,
730
+ "step": 2575
731
+ },
732
+ {
733
+ "epoch": 0.7782101167315175,
734
+ "grad_norm": 6.4910359382629395,
735
+ "learning_rate": 1.2325349301397205e-05,
736
+ "loss": 0.3286,
737
+ "step": 2600
738
+ },
739
+ {
740
+ "epoch": 0.7856929063154744,
741
+ "grad_norm": 7.595745086669922,
742
+ "learning_rate": 1.1909514304723886e-05,
743
+ "loss": 0.4118,
744
+ "step": 2625
745
+ },
746
+ {
747
+ "epoch": 0.7931756958994313,
748
+ "grad_norm": 5.437306880950928,
749
+ "learning_rate": 1.1493679308050566e-05,
750
+ "loss": 0.4666,
751
+ "step": 2650
752
+ },
753
+ {
754
+ "epoch": 0.8006584854833882,
755
+ "grad_norm": 2.1218721866607666,
756
+ "learning_rate": 1.1077844311377246e-05,
757
+ "loss": 0.4041,
758
+ "step": 2675
759
+ },
760
+ {
761
+ "epoch": 0.8081412750673451,
762
+ "grad_norm": 2.611057996749878,
763
+ "learning_rate": 1.0662009314703926e-05,
764
+ "loss": 0.3704,
765
+ "step": 2700
766
+ },
767
+ {
768
+ "epoch": 0.815624064651302,
769
+ "grad_norm": 3.661681652069092,
770
+ "learning_rate": 1.0246174318030606e-05,
771
+ "loss": 0.3822,
772
+ "step": 2725
773
+ },
774
+ {
775
+ "epoch": 0.8231068542352589,
776
+ "grad_norm": 4.607560157775879,
777
+ "learning_rate": 9.830339321357286e-06,
778
+ "loss": 0.4149,
779
+ "step": 2750
780
+ },
781
+ {
782
+ "epoch": 0.8305896438192159,
783
+ "grad_norm": 4.618942737579346,
784
+ "learning_rate": 9.414504324683967e-06,
785
+ "loss": 0.4174,
786
+ "step": 2775
787
+ },
788
+ {
789
+ "epoch": 0.8380724334031727,
790
+ "grad_norm": 5.718959808349609,
791
+ "learning_rate": 8.998669328010647e-06,
792
+ "loss": 0.4027,
793
+ "step": 2800
794
+ },
795
+ {
796
+ "epoch": 0.8455552229871296,
797
+ "grad_norm": 3.5075976848602295,
798
+ "learning_rate": 8.582834331337327e-06,
799
+ "loss": 0.4384,
800
+ "step": 2825
801
+ },
802
+ {
803
+ "epoch": 0.8530380125710865,
804
+ "grad_norm": 8.008058547973633,
805
+ "learning_rate": 8.166999334664007e-06,
806
+ "loss": 0.3897,
807
+ "step": 2850
808
+ },
809
+ {
810
+ "epoch": 0.8605208021550435,
811
+ "grad_norm": 3.166041851043701,
812
+ "learning_rate": 7.751164337990686e-06,
813
+ "loss": 0.339,
814
+ "step": 2875
815
+ },
816
+ {
817
+ "epoch": 0.8680035917390003,
818
+ "grad_norm": 10.711910247802734,
819
+ "learning_rate": 7.335329341317365e-06,
820
+ "loss": 0.4284,
821
+ "step": 2900
822
+ },
823
+ {
824
+ "epoch": 0.8754863813229572,
825
+ "grad_norm": 1.658119559288025,
826
+ "learning_rate": 6.919494344644045e-06,
827
+ "loss": 0.3585,
828
+ "step": 2925
829
+ },
830
+ {
831
+ "epoch": 0.8829691709069141,
832
+ "grad_norm": 11.725804328918457,
833
+ "learning_rate": 6.503659347970725e-06,
834
+ "loss": 0.4542,
835
+ "step": 2950
836
+ },
837
+ {
838
+ "epoch": 0.8904519604908709,
839
+ "grad_norm": 13.12930679321289,
840
+ "learning_rate": 6.0878243512974054e-06,
841
+ "loss": 0.3438,
842
+ "step": 2975
843
+ },
844
+ {
845
+ "epoch": 0.8979347500748279,
846
+ "grad_norm": 12.93932819366455,
847
+ "learning_rate": 5.671989354624086e-06,
848
+ "loss": 0.3635,
849
+ "step": 3000
850
+ },
851
+ {
852
+ "epoch": 0.9054175396587848,
853
+ "grad_norm": 0.6720038652420044,
854
+ "learning_rate": 5.256154357950766e-06,
855
+ "loss": 0.4396,
856
+ "step": 3025
857
+ },
858
+ {
859
+ "epoch": 0.9129003292427417,
860
+ "grad_norm": 5.34990930557251,
861
+ "learning_rate": 4.840319361277446e-06,
862
+ "loss": 0.4032,
863
+ "step": 3050
864
+ },
865
+ {
866
+ "epoch": 0.9203831188266985,
867
+ "grad_norm": 1.8587225675582886,
868
+ "learning_rate": 4.424484364604125e-06,
869
+ "loss": 0.4679,
870
+ "step": 3075
871
+ },
872
+ {
873
+ "epoch": 0.9278659084106555,
874
+ "grad_norm": 6.022444725036621,
875
+ "learning_rate": 4.008649367930805e-06,
876
+ "loss": 0.3566,
877
+ "step": 3100
878
+ },
879
+ {
880
+ "epoch": 0.9353486979946124,
881
+ "grad_norm": 8.346709251403809,
882
+ "learning_rate": 3.592814371257485e-06,
883
+ "loss": 0.4395,
884
+ "step": 3125
885
+ },
886
+ {
887
+ "epoch": 0.9428314875785693,
888
+ "grad_norm": 1.5148768424987793,
889
+ "learning_rate": 3.1769793745841653e-06,
890
+ "loss": 0.3577,
891
+ "step": 3150
892
+ },
893
+ {
894
+ "epoch": 0.9503142771625261,
895
+ "grad_norm": 9.207695007324219,
896
+ "learning_rate": 2.761144377910845e-06,
897
+ "loss": 0.3368,
898
+ "step": 3175
899
+ },
900
+ {
901
+ "epoch": 0.9577970667464831,
902
+ "grad_norm": 7.755685329437256,
903
+ "learning_rate": 2.345309381237525e-06,
904
+ "loss": 0.4368,
905
+ "step": 3200
906
+ },
907
+ {
908
+ "epoch": 0.96527985633044,
909
+ "grad_norm": 9.233366966247559,
910
+ "learning_rate": 1.929474384564205e-06,
911
+ "loss": 0.339,
912
+ "step": 3225
913
+ },
914
+ {
915
+ "epoch": 0.9727626459143969,
916
+ "grad_norm": 4.844126224517822,
917
+ "learning_rate": 1.513639387890885e-06,
918
+ "loss": 0.4115,
919
+ "step": 3250
920
+ },
921
+ {
922
+ "epoch": 0.9802454354983537,
923
+ "grad_norm": 5.265417575836182,
924
+ "learning_rate": 1.097804391217565e-06,
925
+ "loss": 0.3768,
926
+ "step": 3275
927
+ },
928
+ {
929
+ "epoch": 0.9877282250823107,
930
+ "grad_norm": 4.222898960113525,
931
+ "learning_rate": 6.819693945442449e-07,
932
+ "loss": 0.452,
933
+ "step": 3300
934
+ },
935
+ {
936
+ "epoch": 0.9952110146662676,
937
+ "grad_norm": 8.413681030273438,
938
+ "learning_rate": 2.661343978709248e-07,
939
+ "loss": 0.375,
940
+ "step": 3325
941
+ },
942
+ {
943
+ "epoch": 1.0,
944
+ "eval_accuracy": 0.8176919622810956,
945
+ "eval_auc": 0.9024178348450937,
946
+ "eval_f1": 0.8509544787077826,
947
+ "eval_loss": 0.40547820925712585,
948
+ "eval_precision": 0.8474287106994882,
949
+ "eval_recall": 0.8545097075448513,
950
+ "eval_runtime": 1668.8106,
951
+ "eval_samples_per_second": 4.003,
952
+ "eval_steps_per_second": 0.25,
953
+ "step": 3341
954
+ }
955
+ ],
956
+ "logging_steps": 25,
957
+ "max_steps": 3341,
958
+ "num_input_tokens_seen": 0,
959
+ "num_train_epochs": 1,
960
+ "save_steps": 500,
961
+ "stateful_callbacks": {
962
+ "EarlyStoppingCallback": {
963
+ "args": {
964
+ "early_stopping_patience": 5,
965
+ "early_stopping_threshold": 0.01
966
+ },
967
+ "attributes": {
968
+ "early_stopping_patience_counter": 0
969
+ }
970
+ },
971
+ "TrainerControl": {
972
+ "args": {
973
+ "should_epoch_stop": false,
974
+ "should_evaluate": false,
975
+ "should_log": false,
976
+ "should_save": true,
977
+ "should_training_stop": true
978
+ },
979
+ "attributes": {}
980
+ }
981
+ },
982
+ "total_flos": 1757876485773312.0,
983
+ "train_batch_size": 8,
984
+ "trial_name": null,
985
+ "trial_params": null
986
+ }
checkpoint-3341/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:befff7270d1a6c6686f743f6a23693911730b33978c93c96cd90536086e6df65
3
+ size 5368
config.json ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "MoritzLaurer/mDeBERTa-v3-base-mnli-xnli",
3
+ "_num_labels": 2,
4
+ "architectures": [
5
+ "DebertaV2ForSequenceClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.1,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "in-character",
13
+ "1": "out-of-character"
14
+ },
15
+ "initializer_range": 0.02,
16
+ "intermediate_size": 3072,
17
+ "label2id": {
18
+ "in-character": 0,
19
+ "out-of-character": 1
20
+ },
21
+ "layer_norm_eps": 1e-07,
22
+ "legacy": true,
23
+ "max_position_embeddings": 512,
24
+ "max_relative_positions": -1,
25
+ "model_type": "deberta-v2",
26
+ "norm_rel_ebd": "layer_norm",
27
+ "num_attention_heads": 12,
28
+ "num_hidden_layers": 12,
29
+ "pad_token_id": 0,
30
+ "pooler_dropout": 0,
31
+ "pooler_hidden_act": "gelu",
32
+ "pooler_hidden_size": 768,
33
+ "pos_att_type": [
34
+ "p2c",
35
+ "c2p"
36
+ ],
37
+ "position_biased_input": false,
38
+ "position_buckets": 256,
39
+ "relative_attention": true,
40
+ "share_att_key": true,
41
+ "torch_dtype": "float32",
42
+ "transformers_version": "4.47.1",
43
+ "type_vocab_size": 0,
44
+ "vocab_size": 251000
45
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2db3c56c71229589e5cb6b92fbac028b4ac4418342c591dd1cf5b4ec55ad0990
3
+ size 1115268200
runs/Jan14_14-08-43_r-maliru-dnd-autotrain-qwc498pd-27eb3-4nsxw/events.out.tfevents.1736863725.r-maliru-dnd-autotrain-qwc498pd-27eb3-4nsxw.108.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0395e5e4768286e7a6b6a0a43cde9041d9c031fd28f553baa444c9d10ae11ff6
3
- size 33375
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a15de790667499c57d34bf854a5250bd1dcfa9d7bcfebe4950824509dad2383e
3
+ size 34459
runs/Jan14_14-08-43_r-maliru-dnd-autotrain-qwc498pd-27eb3-4nsxw/events.out.tfevents.1736897047.r-maliru-dnd-autotrain-qwc498pd-27eb3-4nsxw.108.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50c29a790b87f765dadd87730a0583ca26ae9cb96277d3a033263dd4f91a07d2
3
+ size 607
special_tokens_map.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "[CLS]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "[SEP]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "[MASK]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "[PAD]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "[SEP]",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "[UNK]",
46
+ "lstrip": false,
47
+ "normalized": true,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
+ }
spm.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13c8d666d62a7bc4ac8f040aab68e942c861f93303156cc28f5c7e885d86d6e3
3
+ size 4305025
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07bf017c59c05b71d2f3de3ca420eeae57572049bc2903e829a041e6a1a25df9
3
+ size 16316391
tokenizer_config.json ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[CLS]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[SEP]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[UNK]",
29
+ "lstrip": false,
30
+ "normalized": true,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "250101": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "bos_token": "[CLS]",
45
+ "clean_up_tokenization_spaces": true,
46
+ "cls_token": "[CLS]",
47
+ "do_lower_case": false,
48
+ "eos_token": "[SEP]",
49
+ "extra_special_tokens": {},
50
+ "mask_token": "[MASK]",
51
+ "model_max_length": 512,
52
+ "pad_token": "[PAD]",
53
+ "sep_token": "[SEP]",
54
+ "sp_model_kwargs": {},
55
+ "split_by_punct": false,
56
+ "tokenizer_class": "DebertaV2Tokenizer",
57
+ "unk_token": "[UNK]",
58
+ "vocab_type": "spm"
59
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:befff7270d1a6c6686f743f6a23693911730b33978c93c96cd90536086e6df65
3
+ size 5368
training_params.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "data_path": "mDeBERTa-v3-base-mnli-xnli-dnd/autotrain-data",
3
+ "model": "MoritzLaurer/mDeBERTa-v3-base-mnli-xnli",
4
+ "lr": 5e-05,
5
+ "epochs": 1,
6
+ "max_seq_length": 128,
7
+ "batch_size": 8,
8
+ "warmup_ratio": 0.1,
9
+ "gradient_accumulation": 1,
10
+ "optimizer": "adamw_torch",
11
+ "scheduler": "linear",
12
+ "weight_decay": 0.0,
13
+ "max_grad_norm": 1.0,
14
+ "seed": 42,
15
+ "train_split": "train",
16
+ "valid_split": "validation",
17
+ "text_column": "autotrain_text",
18
+ "target_column": "autotrain_label",
19
+ "logging_steps": -1,
20
+ "project_name": "mDeBERTa-v3-base-mnli-xnli-dnd",
21
+ "auto_find_batch_size": false,
22
+ "mixed_precision": "fp16",
23
+ "save_total_limit": 1,
24
+ "push_to_hub": true,
25
+ "eval_strategy": "epoch",
26
+ "username": "Maliru",
27
+ "log": "tensorboard",
28
+ "early_stopping_patience": 5,
29
+ "early_stopping_threshold": 0.01
30
+ }