Wvolf commited on
Commit
9668669
1 Parent(s): c5f55d4

Upload 12 files

Browse files
.gitattributes ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ checkpoint-22604/optimizer.pt filter=lfs diff=lfs merge=lfs -text
2
+ checkpoint-22604/pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
3
+ pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
checkpoint-22604/checkpoint-28265_preprocessor_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "image_mean": [
6
+ 0.5,
7
+ 0.5,
8
+ 0.5
9
+ ],
10
+ "image_processor_type": "ViTImageProcessor",
11
+ "image_std": [
12
+ 0.5,
13
+ 0.5,
14
+ 0.5
15
+ ],
16
+ "resample": 2,
17
+ "rescale_factor": 0.00392156862745098,
18
+ "size": {
19
+ "height": 224,
20
+ "width": 224
21
+ }
22
+ }
checkpoint-22604/checkpoint-28265_trainer_state.json ADDED
@@ -0,0 +1,400 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.048813898116350174,
3
+ "best_model_checkpoint": "deepfake_vs_real_image_detection/checkpoint-28265",
4
+ "epoch": 5.0,
5
+ "eval_steps": 500,
6
+ "global_step": 28265,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.09,
13
+ "learning_rate": 9.840510366826157e-06,
14
+ "loss": 0.3071,
15
+ "step": 500
16
+ },
17
+ {
18
+ "epoch": 0.18,
19
+ "learning_rate": 9.663299663299665e-06,
20
+ "loss": 0.1277,
21
+ "step": 1000
22
+ },
23
+ {
24
+ "epoch": 0.27,
25
+ "learning_rate": 9.48608895977317e-06,
26
+ "loss": 0.1061,
27
+ "step": 1500
28
+ },
29
+ {
30
+ "epoch": 0.35,
31
+ "learning_rate": 9.308878256246679e-06,
32
+ "loss": 0.096,
33
+ "step": 2000
34
+ },
35
+ {
36
+ "epoch": 0.44,
37
+ "learning_rate": 9.131667552720185e-06,
38
+ "loss": 0.0833,
39
+ "step": 2500
40
+ },
41
+ {
42
+ "epoch": 0.53,
43
+ "learning_rate": 8.954456849193693e-06,
44
+ "loss": 0.0803,
45
+ "step": 3000
46
+ },
47
+ {
48
+ "epoch": 0.62,
49
+ "learning_rate": 8.777246145667199e-06,
50
+ "loss": 0.0797,
51
+ "step": 3500
52
+ },
53
+ {
54
+ "epoch": 0.71,
55
+ "learning_rate": 8.600035442140707e-06,
56
+ "loss": 0.066,
57
+ "step": 4000
58
+ },
59
+ {
60
+ "epoch": 0.8,
61
+ "learning_rate": 8.422824738614213e-06,
62
+ "loss": 0.0672,
63
+ "step": 4500
64
+ },
65
+ {
66
+ "epoch": 0.88,
67
+ "learning_rate": 8.24561403508772e-06,
68
+ "loss": 0.0656,
69
+ "step": 5000
70
+ },
71
+ {
72
+ "epoch": 0.97,
73
+ "learning_rate": 8.068403331561227e-06,
74
+ "loss": 0.0617,
75
+ "step": 5500
76
+ },
77
+ {
78
+ "epoch": 1.0,
79
+ "eval_accuracy": 0.977418338409831,
80
+ "eval_loss": 0.06863939762115479,
81
+ "eval_runtime": 103.2974,
82
+ "eval_samples_per_second": 92.171,
83
+ "eval_steps_per_second": 11.53,
84
+ "step": 5653
85
+ },
86
+ {
87
+ "epoch": 1.06,
88
+ "learning_rate": 7.891192628034733e-06,
89
+ "loss": 0.0561,
90
+ "step": 6000
91
+ },
92
+ {
93
+ "epoch": 1.15,
94
+ "learning_rate": 7.713981924508241e-06,
95
+ "loss": 0.0551,
96
+ "step": 6500
97
+ },
98
+ {
99
+ "epoch": 1.24,
100
+ "learning_rate": 7.536771220981748e-06,
101
+ "loss": 0.0496,
102
+ "step": 7000
103
+ },
104
+ {
105
+ "epoch": 1.33,
106
+ "learning_rate": 7.359560517455255e-06,
107
+ "loss": 0.05,
108
+ "step": 7500
109
+ },
110
+ {
111
+ "epoch": 1.42,
112
+ "learning_rate": 7.1823498139287615e-06,
113
+ "loss": 0.0464,
114
+ "step": 8000
115
+ },
116
+ {
117
+ "epoch": 1.5,
118
+ "learning_rate": 7.005139110402269e-06,
119
+ "loss": 0.0536,
120
+ "step": 8500
121
+ },
122
+ {
123
+ "epoch": 1.59,
124
+ "learning_rate": 6.8279284068757755e-06,
125
+ "loss": 0.0482,
126
+ "step": 9000
127
+ },
128
+ {
129
+ "epoch": 1.68,
130
+ "learning_rate": 6.6507177033492834e-06,
131
+ "loss": 0.0473,
132
+ "step": 9500
133
+ },
134
+ {
135
+ "epoch": 1.77,
136
+ "learning_rate": 6.47350699982279e-06,
137
+ "loss": 0.0492,
138
+ "step": 10000
139
+ },
140
+ {
141
+ "epoch": 1.86,
142
+ "learning_rate": 6.296296296296297e-06,
143
+ "loss": 0.0452,
144
+ "step": 10500
145
+ },
146
+ {
147
+ "epoch": 1.95,
148
+ "learning_rate": 6.119085592769804e-06,
149
+ "loss": 0.0468,
150
+ "step": 11000
151
+ },
152
+ {
153
+ "epoch": 2.0,
154
+ "eval_accuracy": 0.9834051045058292,
155
+ "eval_loss": 0.05037350580096245,
156
+ "eval_runtime": 102.1997,
157
+ "eval_samples_per_second": 93.161,
158
+ "eval_steps_per_second": 11.654,
159
+ "step": 11306
160
+ },
161
+ {
162
+ "epoch": 2.03,
163
+ "learning_rate": 5.941874889243311e-06,
164
+ "loss": 0.0413,
165
+ "step": 11500
166
+ },
167
+ {
168
+ "epoch": 2.12,
169
+ "learning_rate": 5.764664185716817e-06,
170
+ "loss": 0.0399,
171
+ "step": 12000
172
+ },
173
+ {
174
+ "epoch": 2.21,
175
+ "learning_rate": 5.587453482190325e-06,
176
+ "loss": 0.0386,
177
+ "step": 12500
178
+ },
179
+ {
180
+ "epoch": 2.3,
181
+ "learning_rate": 5.410242778663831e-06,
182
+ "loss": 0.0331,
183
+ "step": 13000
184
+ },
185
+ {
186
+ "epoch": 2.39,
187
+ "learning_rate": 5.233032075137339e-06,
188
+ "loss": 0.0336,
189
+ "step": 13500
190
+ },
191
+ {
192
+ "epoch": 2.48,
193
+ "learning_rate": 5.055821371610846e-06,
194
+ "loss": 0.034,
195
+ "step": 14000
196
+ },
197
+ {
198
+ "epoch": 2.57,
199
+ "learning_rate": 4.878610668084353e-06,
200
+ "loss": 0.0332,
201
+ "step": 14500
202
+ },
203
+ {
204
+ "epoch": 2.65,
205
+ "learning_rate": 4.701399964557859e-06,
206
+ "loss": 0.0371,
207
+ "step": 15000
208
+ },
209
+ {
210
+ "epoch": 2.74,
211
+ "learning_rate": 4.524189261031366e-06,
212
+ "loss": 0.0348,
213
+ "step": 15500
214
+ },
215
+ {
216
+ "epoch": 2.83,
217
+ "learning_rate": 4.346978557504873e-06,
218
+ "loss": 0.0406,
219
+ "step": 16000
220
+ },
221
+ {
222
+ "epoch": 2.92,
223
+ "learning_rate": 4.169767853978381e-06,
224
+ "loss": 0.0371,
225
+ "step": 16500
226
+ },
227
+ {
228
+ "epoch": 3.0,
229
+ "eval_accuracy": 0.9843503833630921,
230
+ "eval_loss": 0.05004642903804779,
231
+ "eval_runtime": 97.9514,
232
+ "eval_samples_per_second": 97.201,
233
+ "eval_steps_per_second": 12.159,
234
+ "step": 16959
235
+ },
236
+ {
237
+ "epoch": 3.01,
238
+ "learning_rate": 3.992557150451888e-06,
239
+ "loss": 0.0314,
240
+ "step": 17000
241
+ },
242
+ {
243
+ "epoch": 3.1,
244
+ "learning_rate": 3.815346446925395e-06,
245
+ "loss": 0.0296,
246
+ "step": 17500
247
+ },
248
+ {
249
+ "epoch": 3.18,
250
+ "learning_rate": 3.638135743398902e-06,
251
+ "loss": 0.0252,
252
+ "step": 18000
253
+ },
254
+ {
255
+ "epoch": 3.27,
256
+ "learning_rate": 3.460925039872409e-06,
257
+ "loss": 0.0272,
258
+ "step": 18500
259
+ },
260
+ {
261
+ "epoch": 3.36,
262
+ "learning_rate": 3.2837143363459155e-06,
263
+ "loss": 0.0243,
264
+ "step": 19000
265
+ },
266
+ {
267
+ "epoch": 3.45,
268
+ "learning_rate": 3.1065036328194225e-06,
269
+ "loss": 0.0255,
270
+ "step": 19500
271
+ },
272
+ {
273
+ "epoch": 3.54,
274
+ "learning_rate": 2.9292929292929295e-06,
275
+ "loss": 0.0273,
276
+ "step": 20000
277
+ },
278
+ {
279
+ "epoch": 3.63,
280
+ "learning_rate": 2.7520822257664366e-06,
281
+ "loss": 0.0261,
282
+ "step": 20500
283
+ },
284
+ {
285
+ "epoch": 3.71,
286
+ "learning_rate": 2.5748715222399436e-06,
287
+ "loss": 0.026,
288
+ "step": 21000
289
+ },
290
+ {
291
+ "epoch": 3.8,
292
+ "learning_rate": 2.3976608187134502e-06,
293
+ "loss": 0.0258,
294
+ "step": 21500
295
+ },
296
+ {
297
+ "epoch": 3.89,
298
+ "learning_rate": 2.2204501151869577e-06,
299
+ "loss": 0.0284,
300
+ "step": 22000
301
+ },
302
+ {
303
+ "epoch": 3.98,
304
+ "learning_rate": 2.0432394116604643e-06,
305
+ "loss": 0.022,
306
+ "step": 22500
307
+ },
308
+ {
309
+ "epoch": 4.0,
310
+ "eval_accuracy": 0.9851906312362146,
311
+ "eval_loss": 0.05069756135344505,
312
+ "eval_runtime": 98.3294,
313
+ "eval_samples_per_second": 96.828,
314
+ "eval_steps_per_second": 12.112,
315
+ "step": 22612
316
+ },
317
+ {
318
+ "epoch": 4.07,
319
+ "learning_rate": 1.8660287081339716e-06,
320
+ "loss": 0.0216,
321
+ "step": 23000
322
+ },
323
+ {
324
+ "epoch": 4.16,
325
+ "learning_rate": 1.6888180046074784e-06,
326
+ "loss": 0.0205,
327
+ "step": 23500
328
+ },
329
+ {
330
+ "epoch": 4.25,
331
+ "learning_rate": 1.5116073010809854e-06,
332
+ "loss": 0.0167,
333
+ "step": 24000
334
+ },
335
+ {
336
+ "epoch": 4.33,
337
+ "learning_rate": 1.3343965975544923e-06,
338
+ "loss": 0.0213,
339
+ "step": 24500
340
+ },
341
+ {
342
+ "epoch": 4.42,
343
+ "learning_rate": 1.1571858940279993e-06,
344
+ "loss": 0.0232,
345
+ "step": 25000
346
+ },
347
+ {
348
+ "epoch": 4.51,
349
+ "learning_rate": 9.799751905015063e-07,
350
+ "loss": 0.0174,
351
+ "step": 25500
352
+ },
353
+ {
354
+ "epoch": 4.6,
355
+ "learning_rate": 8.027644869750134e-07,
356
+ "loss": 0.0219,
357
+ "step": 26000
358
+ },
359
+ {
360
+ "epoch": 4.69,
361
+ "learning_rate": 6.255537834485204e-07,
362
+ "loss": 0.0186,
363
+ "step": 26500
364
+ },
365
+ {
366
+ "epoch": 4.78,
367
+ "learning_rate": 4.483430799220273e-07,
368
+ "loss": 0.0165,
369
+ "step": 27000
370
+ },
371
+ {
372
+ "epoch": 4.86,
373
+ "learning_rate": 2.711323763955343e-07,
374
+ "loss": 0.0199,
375
+ "step": 27500
376
+ },
377
+ {
378
+ "epoch": 4.95,
379
+ "learning_rate": 9.39216728690413e-08,
380
+ "loss": 0.0181,
381
+ "step": 28000
382
+ },
383
+ {
384
+ "epoch": 5.0,
385
+ "eval_accuracy": 0.9855057241886357,
386
+ "eval_loss": 0.048813898116350174,
387
+ "eval_runtime": 101.3781,
388
+ "eval_samples_per_second": 93.916,
389
+ "eval_steps_per_second": 11.748,
390
+ "step": 28265
391
+ }
392
+ ],
393
+ "logging_steps": 500,
394
+ "max_steps": 28265,
395
+ "num_train_epochs": 5,
396
+ "save_steps": 500,
397
+ "total_flos": 7.008414286738507e+19,
398
+ "trial_name": null,
399
+ "trial_params": null
400
+ }
checkpoint-22604/config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224-in21k",
3
+ "architectures": [
4
+ "ViTForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "encoder_stride": 16,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "Real",
13
+ "1": "Fake"
14
+ },
15
+ "image_size": 224,
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 3072,
18
+ "label2id": {
19
+ "Fake": 1,
20
+ "Real": 0
21
+ },
22
+ "layer_norm_eps": 1e-12,
23
+ "model_type": "vit",
24
+ "num_attention_heads": 12,
25
+ "num_channels": 3,
26
+ "num_hidden_layers": 12,
27
+ "patch_size": 16,
28
+ "problem_type": "single_label_classification",
29
+ "qkv_bias": true,
30
+ "torch_dtype": "float32",
31
+ "transformers_version": "4.34.0"
32
+ }
checkpoint-22604/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6e607a4af12ed0c24ffa7f37196241b56136a4f91c3ff8a7856ce31e86b43af
3
+ size 686568453
checkpoint-22604/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a653ae69f0c39545941da9b7d6ea309aa81c7d69946cdf06f4ea10b6a24eb0d
3
+ size 343268717
checkpoint-22604/rng_state.pth ADDED
Binary file (14.6 kB). View file
 
checkpoint-22604/scheduler.pt ADDED
Binary file (627 Bytes). View file
 
checkpoint-22604/training_args.bin ADDED
Binary file (4.03 kB). View file
 
config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224-in21k",
3
+ "architectures": [
4
+ "ViTForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "encoder_stride": 16,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "Real",
13
+ "1": "Fake"
14
+ },
15
+ "image_size": 224,
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 3072,
18
+ "label2id": {
19
+ "Fake": 1,
20
+ "Real": 0
21
+ },
22
+ "layer_norm_eps": 1e-12,
23
+ "model_type": "vit",
24
+ "num_attention_heads": 12,
25
+ "num_channels": 3,
26
+ "num_hidden_layers": 12,
27
+ "patch_size": 16,
28
+ "problem_type": "single_label_classification",
29
+ "qkv_bias": true,
30
+ "torch_dtype": "float32",
31
+ "transformers_version": "4.34.0"
32
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "image_mean": [
6
+ 0.5,
7
+ 0.5,
8
+ 0.5
9
+ ],
10
+ "image_processor_type": "ViTImageProcessor",
11
+ "image_std": [
12
+ 0.5,
13
+ 0.5,
14
+ 0.5
15
+ ],
16
+ "resample": 2,
17
+ "rescale_factor": 0.00392156862745098,
18
+ "size": {
19
+ "height": 224,
20
+ "width": 224
21
+ }
22
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a653ae69f0c39545941da9b7d6ea309aa81c7d69946cdf06f4ea10b6a24eb0d
3
+ size 343268717
training_args.bin ADDED
Binary file (4.03 kB). View file