3v324v23 commited on
Commit
bc5b2dd
1 Parent(s): c3a5cca

140 epoch 555 ds

Browse files
README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: True
10
+ - load_in_4bit: False
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: fp4
16
+ - bnb_4bit_use_double_quant: False
17
+ - bnb_4bit_compute_dtype: float32
18
+ ### Framework versions
19
+
20
+
21
+ - PEFT 0.6.0.dev0
adapter_config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "codellama/CodeLlama-7b-hf",
5
+ "bias": "none",
6
+ "fan_in_fan_out": null,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "lora_alpha": 16,
12
+ "lora_dropout": 0.05,
13
+ "modules_to_save": null,
14
+ "peft_type": "LORA",
15
+ "r": 32,
16
+ "rank_pattern": {},
17
+ "revision": null,
18
+ "target_modules": [
19
+ "up_proj",
20
+ "o_proj",
21
+ "q_proj",
22
+ "k_proj",
23
+ "v_proj",
24
+ "down_proj",
25
+ "gate_proj"
26
+ ],
27
+ "task_type": "CAUSAL_LM"
28
+ }
adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4bd1b0848329afc323724cb5f5c5c2e8242b4530d3c2fbf9f731c37ecec5f5e
3
+ size 319977229
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b88eae799662f074f9ac9b776c08827f05d50a1563db15bc68cb9cdd9cbba67
3
+ size 160735647
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1a9eb3da843c7fb5628d7dfeface97e0eeb5a7219b46b61289345f983637f40
3
+ size 14575
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0900a6fba1d6009e7b51215cdc66f0d705cb3e1d22579b7333dd467a9a360671
3
+ size 627
trainer_state.json ADDED
@@ -0,0 +1,971 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.8675475120544434,
3
+ "best_model_checkpoint": "./lora-out/checkpoint-60",
4
+ "epoch": 93.33333333333333,
5
+ "eval_steps": 10,
6
+ "global_step": 140,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "learning_rate": 2e-05,
14
+ "loss": 1.9263,
15
+ "step": 1
16
+ },
17
+ {
18
+ "epoch": 1.33,
19
+ "learning_rate": 4e-05,
20
+ "loss": 0.6362,
21
+ "step": 2
22
+ },
23
+ {
24
+ "epoch": 2.0,
25
+ "learning_rate": 6e-05,
26
+ "loss": 1.2827,
27
+ "step": 3
28
+ },
29
+ {
30
+ "epoch": 2.67,
31
+ "learning_rate": 8e-05,
32
+ "loss": 1.271,
33
+ "step": 4
34
+ },
35
+ {
36
+ "epoch": 3.0,
37
+ "learning_rate": 0.0001,
38
+ "loss": 0.634,
39
+ "step": 5
40
+ },
41
+ {
42
+ "epoch": 4.0,
43
+ "learning_rate": 0.00012,
44
+ "loss": 1.8734,
45
+ "step": 6
46
+ },
47
+ {
48
+ "epoch": 5.0,
49
+ "learning_rate": 0.00014,
50
+ "loss": 1.815,
51
+ "step": 7
52
+ },
53
+ {
54
+ "epoch": 5.33,
55
+ "learning_rate": 0.00016,
56
+ "loss": 0.5571,
57
+ "step": 8
58
+ },
59
+ {
60
+ "epoch": 6.0,
61
+ "learning_rate": 0.00018,
62
+ "loss": 1.0428,
63
+ "step": 9
64
+ },
65
+ {
66
+ "epoch": 6.67,
67
+ "learning_rate": 0.0002,
68
+ "loss": 0.953,
69
+ "step": 10
70
+ },
71
+ {
72
+ "epoch": 6.67,
73
+ "eval_loss": 1.6378157138824463,
74
+ "eval_runtime": 0.8713,
75
+ "eval_samples_per_second": 6.886,
76
+ "eval_steps_per_second": 3.443,
77
+ "step": 10
78
+ },
79
+ {
80
+ "epoch": 7.0,
81
+ "learning_rate": 0.00019997482349425066,
82
+ "loss": 0.4287,
83
+ "step": 11
84
+ },
85
+ {
86
+ "epoch": 8.0,
87
+ "learning_rate": 0.00019989930665413147,
88
+ "loss": 1.2133,
89
+ "step": 12
90
+ },
91
+ {
92
+ "epoch": 9.0,
93
+ "learning_rate": 0.0001997734875046456,
94
+ "loss": 1.1351,
95
+ "step": 13
96
+ },
97
+ {
98
+ "epoch": 9.33,
99
+ "learning_rate": 0.00019959742939952392,
100
+ "loss": 0.355,
101
+ "step": 14
102
+ },
103
+ {
104
+ "epoch": 10.0,
105
+ "learning_rate": 0.00019937122098932428,
106
+ "loss": 0.68,
107
+ "step": 15
108
+ },
109
+ {
110
+ "epoch": 10.67,
111
+ "learning_rate": 0.00019909497617679348,
112
+ "loss": 0.6661,
113
+ "step": 16
114
+ },
115
+ {
116
+ "epoch": 11.0,
117
+ "learning_rate": 0.00019876883405951377,
118
+ "loss": 0.3162,
119
+ "step": 17
120
+ },
121
+ {
122
+ "epoch": 12.0,
123
+ "learning_rate": 0.00019839295885986296,
124
+ "loss": 0.9141,
125
+ "step": 18
126
+ },
127
+ {
128
+ "epoch": 13.0,
129
+ "learning_rate": 0.00019796753984232358,
130
+ "loss": 0.8809,
131
+ "step": 19
132
+ },
133
+ {
134
+ "epoch": 13.33,
135
+ "learning_rate": 0.00019749279121818235,
136
+ "loss": 0.2831,
137
+ "step": 20
138
+ },
139
+ {
140
+ "epoch": 13.33,
141
+ "eval_loss": 1.0763869285583496,
142
+ "eval_runtime": 0.8722,
143
+ "eval_samples_per_second": 6.879,
144
+ "eval_steps_per_second": 3.44,
145
+ "step": 20
146
+ },
147
+ {
148
+ "epoch": 14.0,
149
+ "learning_rate": 0.0001969689520376687,
150
+ "loss": 0.5415,
151
+ "step": 21
152
+ },
153
+ {
154
+ "epoch": 14.67,
155
+ "learning_rate": 0.00019639628606958533,
156
+ "loss": 0.5414,
157
+ "step": 22
158
+ },
159
+ {
160
+ "epoch": 15.0,
161
+ "learning_rate": 0.00019577508166849304,
162
+ "loss": 0.2592,
163
+ "step": 23
164
+ },
165
+ {
166
+ "epoch": 16.0,
167
+ "learning_rate": 0.00019510565162951537,
168
+ "loss": 0.7754,
169
+ "step": 24
170
+ },
171
+ {
172
+ "epoch": 17.0,
173
+ "learning_rate": 0.00019438833303083678,
174
+ "loss": 0.7689,
175
+ "step": 25
176
+ },
177
+ {
178
+ "epoch": 17.33,
179
+ "learning_rate": 0.00019362348706397373,
180
+ "loss": 0.2523,
181
+ "step": 26
182
+ },
183
+ {
184
+ "epoch": 18.0,
185
+ "learning_rate": 0.0001928114988519039,
186
+ "loss": 0.5015,
187
+ "step": 27
188
+ },
189
+ {
190
+ "epoch": 18.67,
191
+ "learning_rate": 0.0001919527772551451,
192
+ "loss": 0.5023,
193
+ "step": 28
194
+ },
195
+ {
196
+ "epoch": 19.0,
197
+ "learning_rate": 0.00019104775466588161,
198
+ "loss": 0.2493,
199
+ "step": 29
200
+ },
201
+ {
202
+ "epoch": 20.0,
203
+ "learning_rate": 0.0001900968867902419,
204
+ "loss": 0.74,
205
+ "step": 30
206
+ },
207
+ {
208
+ "epoch": 20.0,
209
+ "eval_loss": 1.0189049243927002,
210
+ "eval_runtime": 0.8726,
211
+ "eval_samples_per_second": 6.876,
212
+ "eval_steps_per_second": 3.438,
213
+ "step": 30
214
+ },
215
+ {
216
+ "epoch": 21.0,
217
+ "learning_rate": 0.0001891006524188368,
218
+ "loss": 0.7203,
219
+ "step": 31
220
+ },
221
+ {
222
+ "epoch": 21.33,
223
+ "learning_rate": 0.0001880595531856738,
224
+ "loss": 0.2385,
225
+ "step": 32
226
+ },
227
+ {
228
+ "epoch": 22.0,
229
+ "learning_rate": 0.00018697411331556956,
230
+ "loss": 0.4841,
231
+ "step": 33
232
+ },
233
+ {
234
+ "epoch": 22.67,
235
+ "learning_rate": 0.00018584487936018661,
236
+ "loss": 0.4746,
237
+ "step": 34
238
+ },
239
+ {
240
+ "epoch": 23.0,
241
+ "learning_rate": 0.00018467241992282843,
242
+ "loss": 0.2257,
243
+ "step": 35
244
+ },
245
+ {
246
+ "epoch": 24.0,
247
+ "learning_rate": 0.00018345732537213027,
248
+ "loss": 0.6933,
249
+ "step": 36
250
+ },
251
+ {
252
+ "epoch": 25.0,
253
+ "learning_rate": 0.00018220020754479102,
254
+ "loss": 0.6819,
255
+ "step": 37
256
+ },
257
+ {
258
+ "epoch": 25.33,
259
+ "learning_rate": 0.00018090169943749476,
260
+ "loss": 0.2184,
261
+ "step": 38
262
+ },
263
+ {
264
+ "epoch": 26.0,
265
+ "learning_rate": 0.00017956245488817812,
266
+ "loss": 0.4415,
267
+ "step": 39
268
+ },
269
+ {
270
+ "epoch": 26.67,
271
+ "learning_rate": 0.000178183148246803,
272
+ "loss": 0.4336,
273
+ "step": 40
274
+ },
275
+ {
276
+ "epoch": 26.67,
277
+ "eval_loss": 0.9619529843330383,
278
+ "eval_runtime": 0.874,
279
+ "eval_samples_per_second": 6.865,
280
+ "eval_steps_per_second": 3.433,
281
+ "step": 40
282
+ },
283
+ {
284
+ "epoch": 27.0,
285
+ "learning_rate": 0.0001767644740358011,
286
+ "loss": 0.2187,
287
+ "step": 41
288
+ },
289
+ {
290
+ "epoch": 28.0,
291
+ "learning_rate": 0.00017530714660036112,
292
+ "loss": 0.642,
293
+ "step": 42
294
+ },
295
+ {
296
+ "epoch": 29.0,
297
+ "learning_rate": 0.00017381189974873407,
298
+ "loss": 0.6302,
299
+ "step": 43
300
+ },
301
+ {
302
+ "epoch": 29.33,
303
+ "learning_rate": 0.00017227948638273916,
304
+ "loss": 0.2054,
305
+ "step": 44
306
+ },
307
+ {
308
+ "epoch": 30.0,
309
+ "learning_rate": 0.00017071067811865476,
310
+ "loss": 0.4056,
311
+ "step": 45
312
+ },
313
+ {
314
+ "epoch": 30.67,
315
+ "learning_rate": 0.00016910626489868649,
316
+ "loss": 0.3933,
317
+ "step": 46
318
+ },
319
+ {
320
+ "epoch": 31.0,
321
+ "learning_rate": 0.00016746705459320745,
322
+ "loss": 0.1891,
323
+ "step": 47
324
+ },
325
+ {
326
+ "epoch": 32.0,
327
+ "learning_rate": 0.00016579387259397127,
328
+ "loss": 0.5689,
329
+ "step": 48
330
+ },
331
+ {
332
+ "epoch": 33.0,
333
+ "learning_rate": 0.0001640875613985024,
334
+ "loss": 0.5574,
335
+ "step": 49
336
+ },
337
+ {
338
+ "epoch": 33.33,
339
+ "learning_rate": 0.00016234898018587337,
340
+ "loss": 0.1827,
341
+ "step": 50
342
+ },
343
+ {
344
+ "epoch": 33.33,
345
+ "eval_loss": 0.8636454939842224,
346
+ "eval_runtime": 0.8737,
347
+ "eval_samples_per_second": 6.867,
348
+ "eval_steps_per_second": 3.434,
349
+ "step": 50
350
+ },
351
+ {
352
+ "epoch": 34.0,
353
+ "learning_rate": 0.000160579004384082,
354
+ "loss": 0.3592,
355
+ "step": 51
356
+ },
357
+ {
358
+ "epoch": 34.67,
359
+ "learning_rate": 0.00015877852522924732,
360
+ "loss": 0.3602,
361
+ "step": 52
362
+ },
363
+ {
364
+ "epoch": 35.0,
365
+ "learning_rate": 0.0001569484493168452,
366
+ "loss": 0.1678,
367
+ "step": 53
368
+ },
369
+ {
370
+ "epoch": 36.0,
371
+ "learning_rate": 0.00015508969814521025,
372
+ "loss": 0.5046,
373
+ "step": 54
374
+ },
375
+ {
376
+ "epoch": 37.0,
377
+ "learning_rate": 0.00015320320765153367,
378
+ "loss": 0.5016,
379
+ "step": 55
380
+ },
381
+ {
382
+ "epoch": 37.33,
383
+ "learning_rate": 0.00015128992774059063,
384
+ "loss": 0.1718,
385
+ "step": 56
386
+ },
387
+ {
388
+ "epoch": 38.0,
389
+ "learning_rate": 0.0001493508218064347,
390
+ "loss": 0.3245,
391
+ "step": 57
392
+ },
393
+ {
394
+ "epoch": 38.67,
395
+ "learning_rate": 0.00014738686624729986,
396
+ "loss": 0.3233,
397
+ "step": 58
398
+ },
399
+ {
400
+ "epoch": 39.0,
401
+ "learning_rate": 0.00014539904997395468,
402
+ "loss": 0.1573,
403
+ "step": 59
404
+ },
405
+ {
406
+ "epoch": 40.0,
407
+ "learning_rate": 0.00014338837391175582,
408
+ "loss": 0.4726,
409
+ "step": 60
410
+ },
411
+ {
412
+ "epoch": 40.0,
413
+ "eval_loss": 0.8675475120544434,
414
+ "eval_runtime": 0.8737,
415
+ "eval_samples_per_second": 6.867,
416
+ "eval_steps_per_second": 3.434,
417
+ "step": 60
418
+ },
419
+ {
420
+ "epoch": 41.0,
421
+ "learning_rate": 0.00014135585049665207,
422
+ "loss": 0.4665,
423
+ "step": 61
424
+ },
425
+ {
426
+ "epoch": 41.33,
427
+ "learning_rate": 0.00013930250316539238,
428
+ "loss": 0.1493,
429
+ "step": 62
430
+ },
431
+ {
432
+ "epoch": 42.0,
433
+ "learning_rate": 0.00013722936584019453,
434
+ "loss": 0.3025,
435
+ "step": 63
436
+ },
437
+ {
438
+ "epoch": 42.67,
439
+ "learning_rate": 0.0001351374824081343,
440
+ "loss": 0.2971,
441
+ "step": 64
442
+ },
443
+ {
444
+ "epoch": 43.0,
445
+ "learning_rate": 0.00013302790619551674,
446
+ "loss": 0.1348,
447
+ "step": 65
448
+ },
449
+ {
450
+ "epoch": 44.0,
451
+ "learning_rate": 0.00013090169943749476,
452
+ "loss": 0.4268,
453
+ "step": 66
454
+ },
455
+ {
456
+ "epoch": 45.0,
457
+ "learning_rate": 0.00012875993274320173,
458
+ "loss": 0.4176,
459
+ "step": 67
460
+ },
461
+ {
462
+ "epoch": 45.33,
463
+ "learning_rate": 0.00012660368455666752,
464
+ "loss": 0.1309,
465
+ "step": 68
466
+ },
467
+ {
468
+ "epoch": 46.0,
469
+ "learning_rate": 0.0001244340406137894,
470
+ "loss": 0.2693,
471
+ "step": 69
472
+ },
473
+ {
474
+ "epoch": 46.67,
475
+ "learning_rate": 0.00012225209339563145,
476
+ "loss": 0.2564,
477
+ "step": 70
478
+ },
479
+ {
480
+ "epoch": 46.67,
481
+ "eval_loss": 0.9176617860794067,
482
+ "eval_runtime": 0.8733,
483
+ "eval_samples_per_second": 6.871,
484
+ "eval_steps_per_second": 3.435,
485
+ "step": 70
486
+ },
487
+ {
488
+ "epoch": 47.0,
489
+ "learning_rate": 0.00012005894157832729,
490
+ "loss": 0.1313,
491
+ "step": 71
492
+ },
493
+ {
494
+ "epoch": 48.0,
495
+ "learning_rate": 0.00011785568947986367,
496
+ "loss": 0.3761,
497
+ "step": 72
498
+ },
499
+ {
500
+ "epoch": 49.0,
501
+ "learning_rate": 0.0001156434465040231,
502
+ "loss": 0.3719,
503
+ "step": 73
504
+ },
505
+ {
506
+ "epoch": 49.33,
507
+ "learning_rate": 0.00011342332658176555,
508
+ "loss": 0.1238,
509
+ "step": 74
510
+ },
511
+ {
512
+ "epoch": 50.0,
513
+ "learning_rate": 0.00011119644761033078,
514
+ "loss": 0.2303,
515
+ "step": 75
516
+ },
517
+ {
518
+ "epoch": 50.67,
519
+ "learning_rate": 0.00010896393089034336,
520
+ "loss": 0.2324,
521
+ "step": 76
522
+ },
523
+ {
524
+ "epoch": 51.0,
525
+ "learning_rate": 0.00010672690056120399,
526
+ "loss": 0.1158,
527
+ "step": 77
528
+ },
529
+ {
530
+ "epoch": 52.0,
531
+ "learning_rate": 0.00010448648303505151,
532
+ "loss": 0.3286,
533
+ "step": 78
534
+ },
535
+ {
536
+ "epoch": 53.0,
537
+ "learning_rate": 0.00010224380642958052,
538
+ "loss": 0.3256,
539
+ "step": 79
540
+ },
541
+ {
542
+ "epoch": 53.33,
543
+ "learning_rate": 0.0001,
544
+ "loss": 0.1031,
545
+ "step": 80
546
+ },
547
+ {
548
+ "epoch": 53.33,
549
+ "eval_loss": 0.9727662205696106,
550
+ "eval_runtime": 0.8715,
551
+ "eval_samples_per_second": 6.884,
552
+ "eval_steps_per_second": 3.442,
553
+ "step": 80
554
+ },
555
+ {
556
+ "epoch": 54.0,
557
+ "learning_rate": 9.775619357041952e-05,
558
+ "loss": 0.2059,
559
+ "step": 81
560
+ },
561
+ {
562
+ "epoch": 54.67,
563
+ "learning_rate": 9.551351696494854e-05,
564
+ "loss": 0.2032,
565
+ "step": 82
566
+ },
567
+ {
568
+ "epoch": 55.0,
569
+ "learning_rate": 9.327309943879604e-05,
570
+ "loss": 0.0924,
571
+ "step": 83
572
+ },
573
+ {
574
+ "epoch": 56.0,
575
+ "learning_rate": 9.103606910965666e-05,
576
+ "loss": 0.2809,
577
+ "step": 84
578
+ },
579
+ {
580
+ "epoch": 57.0,
581
+ "learning_rate": 8.880355238966923e-05,
582
+ "loss": 0.2757,
583
+ "step": 85
584
+ },
585
+ {
586
+ "epoch": 57.33,
587
+ "learning_rate": 8.657667341823448e-05,
588
+ "loss": 0.088,
589
+ "step": 86
590
+ },
591
+ {
592
+ "epoch": 58.0,
593
+ "learning_rate": 8.435655349597689e-05,
594
+ "loss": 0.1725,
595
+ "step": 87
596
+ },
597
+ {
598
+ "epoch": 58.67,
599
+ "learning_rate": 8.214431052013634e-05,
600
+ "loss": 0.1654,
601
+ "step": 88
602
+ },
603
+ {
604
+ "epoch": 59.0,
605
+ "learning_rate": 7.994105842167273e-05,
606
+ "loss": 0.0806,
607
+ "step": 89
608
+ },
609
+ {
610
+ "epoch": 60.0,
611
+ "learning_rate": 7.774790660436858e-05,
612
+ "loss": 0.2316,
613
+ "step": 90
614
+ },
615
+ {
616
+ "epoch": 60.0,
617
+ "eval_loss": 1.0534545183181763,
618
+ "eval_runtime": 0.8706,
619
+ "eval_samples_per_second": 6.892,
620
+ "eval_steps_per_second": 3.446,
621
+ "step": 90
622
+ },
623
+ {
624
+ "epoch": 61.0,
625
+ "learning_rate": 7.556595938621058e-05,
626
+ "loss": 0.2254,
627
+ "step": 91
628
+ },
629
+ {
630
+ "epoch": 61.33,
631
+ "learning_rate": 7.339631544333249e-05,
632
+ "loss": 0.0724,
633
+ "step": 92
634
+ },
635
+ {
636
+ "epoch": 62.0,
637
+ "learning_rate": 7.124006725679828e-05,
638
+ "loss": 0.1469,
639
+ "step": 93
640
+ },
641
+ {
642
+ "epoch": 62.67,
643
+ "learning_rate": 6.909830056250527e-05,
644
+ "loss": 0.1392,
645
+ "step": 94
646
+ },
647
+ {
648
+ "epoch": 63.0,
649
+ "learning_rate": 6.697209380448333e-05,
650
+ "loss": 0.0677,
651
+ "step": 95
652
+ },
653
+ {
654
+ "epoch": 64.0,
655
+ "learning_rate": 6.486251759186572e-05,
656
+ "loss": 0.1941,
657
+ "step": 96
658
+ },
659
+ {
660
+ "epoch": 65.0,
661
+ "learning_rate": 6.277063415980549e-05,
662
+ "loss": 0.1933,
663
+ "step": 97
664
+ },
665
+ {
666
+ "epoch": 65.33,
667
+ "learning_rate": 6.069749683460765e-05,
668
+ "loss": 0.0599,
669
+ "step": 98
670
+ },
671
+ {
672
+ "epoch": 66.0,
673
+ "learning_rate": 5.864414950334796e-05,
674
+ "loss": 0.1258,
675
+ "step": 99
676
+ },
677
+ {
678
+ "epoch": 66.67,
679
+ "learning_rate": 5.6611626088244194e-05,
680
+ "loss": 0.1146,
681
+ "step": 100
682
+ },
683
+ {
684
+ "epoch": 66.67,
685
+ "eval_loss": 1.147660493850708,
686
+ "eval_runtime": 0.8723,
687
+ "eval_samples_per_second": 6.878,
688
+ "eval_steps_per_second": 3.439,
689
+ "step": 100
690
+ },
691
+ {
692
+ "epoch": 67.0,
693
+ "learning_rate": 5.4600950026045326e-05,
694
+ "loss": 0.0585,
695
+ "step": 101
696
+ },
697
+ {
698
+ "epoch": 68.0,
699
+ "learning_rate": 5.261313375270014e-05,
700
+ "loss": 0.1722,
701
+ "step": 102
702
+ },
703
+ {
704
+ "epoch": 69.0,
705
+ "learning_rate": 5.0649178193565314e-05,
706
+ "loss": 0.1656,
707
+ "step": 103
708
+ },
709
+ {
710
+ "epoch": 69.33,
711
+ "learning_rate": 4.87100722594094e-05,
712
+ "loss": 0.0534,
713
+ "step": 104
714
+ },
715
+ {
716
+ "epoch": 70.0,
717
+ "learning_rate": 4.6796792348466356e-05,
718
+ "loss": 0.1112,
719
+ "step": 105
720
+ },
721
+ {
722
+ "epoch": 70.67,
723
+ "learning_rate": 4.491030185478976e-05,
724
+ "loss": 0.107,
725
+ "step": 106
726
+ },
727
+ {
728
+ "epoch": 71.0,
729
+ "learning_rate": 4.305155068315481e-05,
730
+ "loss": 0.0502,
731
+ "step": 107
732
+ },
733
+ {
734
+ "epoch": 72.0,
735
+ "learning_rate": 4.12214747707527e-05,
736
+ "loss": 0.1502,
737
+ "step": 108
738
+ },
739
+ {
740
+ "epoch": 73.0,
741
+ "learning_rate": 3.942099561591802e-05,
742
+ "loss": 0.1472,
743
+ "step": 109
744
+ },
745
+ {
746
+ "epoch": 73.33,
747
+ "learning_rate": 3.7651019814126654e-05,
748
+ "loss": 0.0463,
749
+ "step": 110
750
+ },
751
+ {
752
+ "epoch": 73.33,
753
+ "eval_loss": 1.2258118391036987,
754
+ "eval_runtime": 0.871,
755
+ "eval_samples_per_second": 6.888,
756
+ "eval_steps_per_second": 3.444,
757
+ "step": 110
758
+ },
759
+ {
760
+ "epoch": 74.0,
761
+ "learning_rate": 3.591243860149759e-05,
762
+ "loss": 0.0995,
763
+ "step": 111
764
+ },
765
+ {
766
+ "epoch": 74.67,
767
+ "learning_rate": 3.4206127406028745e-05,
768
+ "loss": 0.093,
769
+ "step": 112
770
+ },
771
+ {
772
+ "epoch": 75.0,
773
+ "learning_rate": 3.253294540679257e-05,
774
+ "loss": 0.0489,
775
+ "step": 113
776
+ },
777
+ {
778
+ "epoch": 76.0,
779
+ "learning_rate": 3.089373510131354e-05,
780
+ "loss": 0.1373,
781
+ "step": 114
782
+ },
783
+ {
784
+ "epoch": 77.0,
785
+ "learning_rate": 2.9289321881345254e-05,
786
+ "loss": 0.1337,
787
+ "step": 115
788
+ },
789
+ {
790
+ "epoch": 77.33,
791
+ "learning_rate": 2.7720513617260856e-05,
792
+ "loss": 0.0413,
793
+ "step": 116
794
+ },
795
+ {
796
+ "epoch": 78.0,
797
+ "learning_rate": 2.6188100251265945e-05,
798
+ "loss": 0.0911,
799
+ "step": 117
800
+ },
801
+ {
802
+ "epoch": 78.67,
803
+ "learning_rate": 2.4692853399638917e-05,
804
+ "loss": 0.0871,
805
+ "step": 118
806
+ },
807
+ {
808
+ "epoch": 79.0,
809
+ "learning_rate": 2.323552596419889e-05,
810
+ "loss": 0.0424,
811
+ "step": 119
812
+ },
813
+ {
814
+ "epoch": 80.0,
815
+ "learning_rate": 2.181685175319702e-05,
816
+ "loss": 0.127,
817
+ "step": 120
818
+ },
819
+ {
820
+ "epoch": 80.0,
821
+ "eval_loss": 1.2826638221740723,
822
+ "eval_runtime": 0.8716,
823
+ "eval_samples_per_second": 6.884,
824
+ "eval_steps_per_second": 3.442,
825
+ "step": 120
826
+ },
827
+ {
828
+ "epoch": 81.0,
829
+ "learning_rate": 2.043754511182191e-05,
830
+ "loss": 0.124,
831
+ "step": 121
832
+ },
833
+ {
834
+ "epoch": 81.33,
835
+ "learning_rate": 1.9098300562505266e-05,
836
+ "loss": 0.0409,
837
+ "step": 122
838
+ },
839
+ {
840
+ "epoch": 82.0,
841
+ "learning_rate": 1.7799792455209018e-05,
842
+ "loss": 0.0819,
843
+ "step": 123
844
+ },
845
+ {
846
+ "epoch": 82.67,
847
+ "learning_rate": 1.6542674627869737e-05,
848
+ "loss": 0.0816,
849
+ "step": 124
850
+ },
851
+ {
852
+ "epoch": 83.0,
853
+ "learning_rate": 1.5327580077171587e-05,
854
+ "loss": 0.0397,
855
+ "step": 125
856
+ },
857
+ {
858
+ "epoch": 84.0,
859
+ "learning_rate": 1.415512063981339e-05,
860
+ "loss": 0.1216,
861
+ "step": 126
862
+ },
863
+ {
864
+ "epoch": 85.0,
865
+ "learning_rate": 1.3025886684430467e-05,
866
+ "loss": 0.1204,
867
+ "step": 127
868
+ },
869
+ {
870
+ "epoch": 85.33,
871
+ "learning_rate": 1.19404468143262e-05,
872
+ "loss": 0.041,
873
+ "step": 128
874
+ },
875
+ {
876
+ "epoch": 86.0,
877
+ "learning_rate": 1.0899347581163221e-05,
878
+ "loss": 0.0772,
879
+ "step": 129
880
+ },
881
+ {
882
+ "epoch": 86.67,
883
+ "learning_rate": 9.903113209758096e-06,
884
+ "loss": 0.0779,
885
+ "step": 130
886
+ },
887
+ {
888
+ "epoch": 86.67,
889
+ "eval_loss": 1.3157517910003662,
890
+ "eval_runtime": 0.8712,
891
+ "eval_samples_per_second": 6.887,
892
+ "eval_steps_per_second": 3.444,
893
+ "step": 130
894
+ },
895
+ {
896
+ "epoch": 87.0,
897
+ "learning_rate": 8.952245334118414e-06,
898
+ "loss": 0.0401,
899
+ "step": 131
900
+ },
901
+ {
902
+ "epoch": 88.0,
903
+ "learning_rate": 8.047222744854943e-06,
904
+ "loss": 0.1173,
905
+ "step": 132
906
+ },
907
+ {
908
+ "epoch": 89.0,
909
+ "learning_rate": 7.1885011480961164e-06,
910
+ "loss": 0.1178,
911
+ "step": 133
912
+ },
913
+ {
914
+ "epoch": 89.33,
915
+ "learning_rate": 6.37651293602628e-06,
916
+ "loss": 0.0397,
917
+ "step": 134
918
+ },
919
+ {
920
+ "epoch": 90.0,
921
+ "learning_rate": 5.611666969163243e-06,
922
+ "loss": 0.079,
923
+ "step": 135
924
+ },
925
+ {
926
+ "epoch": 90.67,
927
+ "learning_rate": 4.8943483704846475e-06,
928
+ "loss": 0.0766,
929
+ "step": 136
930
+ },
931
+ {
932
+ "epoch": 91.0,
933
+ "learning_rate": 4.224918331506955e-06,
934
+ "loss": 0.0397,
935
+ "step": 137
936
+ },
937
+ {
938
+ "epoch": 92.0,
939
+ "learning_rate": 3.6037139304146762e-06,
940
+ "loss": 0.1141,
941
+ "step": 138
942
+ },
943
+ {
944
+ "epoch": 93.0,
945
+ "learning_rate": 3.0310479623313127e-06,
946
+ "loss": 0.1184,
947
+ "step": 139
948
+ },
949
+ {
950
+ "epoch": 93.33,
951
+ "learning_rate": 2.5072087818176382e-06,
952
+ "loss": 0.0386,
953
+ "step": 140
954
+ },
955
+ {
956
+ "epoch": 93.33,
957
+ "eval_loss": 1.331322431564331,
958
+ "eval_runtime": 0.873,
959
+ "eval_samples_per_second": 6.873,
960
+ "eval_steps_per_second": 3.436,
961
+ "step": 140
962
+ }
963
+ ],
964
+ "logging_steps": 1,
965
+ "max_steps": 150,
966
+ "num_train_epochs": 150,
967
+ "save_steps": 20,
968
+ "total_flos": 9.203523552018432e+16,
969
+ "trial_name": null,
970
+ "trial_params": null
971
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb32c45a82deea548f0ed3efa27ff0394baec1e7710f8c61760f66cfe630d33c
3
+ size 4475