Elron commited on
Commit
5509d58
1 Parent(s): 61fb289

Pushing deberta-v3-large-offensive to hub

Browse files
README.md ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ tags:
4
+ - generated_from_trainer
5
+ metrics:
6
+ - accuracy
7
+ model-index:
8
+ - name: deberta-v3-large-offensive-lr7e-6-gas2-ls0.0
9
+ results: []
10
+ ---
11
+
12
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
+ should probably proofread and complete it, then remove this comment. -->
14
+
15
+ # deberta-v3-large-offensive-lr7e-6-gas2-ls0.0
16
+
17
+ This model is a fine-tuned version of [microsoft/deberta-v3-large](https://huggingface.co/microsoft/deberta-v3-large) on an unknown dataset.
18
+ It achieves the following results on the evaluation set:
19
+ - Loss: 1.3326
20
+ - Accuracy: 0.7832
21
+
22
+ ## Model description
23
+
24
+ More information needed
25
+
26
+ ## Intended uses & limitations
27
+
28
+ More information needed
29
+
30
+ ## Training and evaluation data
31
+
32
+ More information needed
33
+
34
+ ## Training procedure
35
+
36
+ ### Training hyperparameters
37
+
38
+ The following hyperparameters were used during training:
39
+ - learning_rate: 7e-06
40
+ - train_batch_size: 16
41
+ - eval_batch_size: 16
42
+ - seed: 42
43
+ - gradient_accumulation_steps: 2
44
+ - total_train_batch_size: 32
45
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
46
+ - lr_scheduler_type: linear
47
+ - lr_scheduler_warmup_steps: 50
48
+ - num_epochs: 10.0
49
+
50
+ ### Training results
51
+
52
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy |
53
+ |:-------------:|:-----:|:----:|:---------------:|:--------:|
54
+ | 0.6417 | 0.27 | 100 | 0.6283 | 0.6533 |
55
+ | 0.5105 | 0.54 | 200 | 0.4588 | 0.7915 |
56
+ | 0.4554 | 0.81 | 300 | 0.4500 | 0.7968 |
57
+ | 0.4212 | 1.08 | 400 | 0.4773 | 0.7938 |
58
+ | 0.4054 | 1.34 | 500 | 0.4311 | 0.7983 |
59
+ | 0.3922 | 1.61 | 600 | 0.4588 | 0.7998 |
60
+ | 0.3776 | 1.88 | 700 | 0.4367 | 0.8066 |
61
+ | 0.3535 | 2.15 | 800 | 0.4675 | 0.8074 |
62
+ | 0.33 | 2.42 | 900 | 0.4874 | 0.8021 |
63
+ | 0.3113 | 2.69 | 1000 | 0.4949 | 0.8044 |
64
+ | 0.3203 | 2.96 | 1100 | 0.4550 | 0.8059 |
65
+ | 0.248 | 3.23 | 1200 | 0.4858 | 0.8036 |
66
+ | 0.2478 | 3.49 | 1300 | 0.5299 | 0.8029 |
67
+ | 0.2371 | 3.76 | 1400 | 0.5013 | 0.7991 |
68
+ | 0.2388 | 4.03 | 1500 | 0.5520 | 0.8021 |
69
+ | 0.1744 | 4.3 | 1600 | 0.6687 | 0.7915 |
70
+ | 0.1788 | 4.57 | 1700 | 0.7560 | 0.7689 |
71
+ | 0.1652 | 4.84 | 1800 | 0.6985 | 0.7832 |
72
+ | 0.1596 | 5.11 | 1900 | 0.7191 | 0.7915 |
73
+ | 0.1214 | 5.38 | 2000 | 0.9097 | 0.7893 |
74
+ | 0.1432 | 5.64 | 2100 | 0.9184 | 0.7787 |
75
+ | 0.1145 | 5.91 | 2200 | 0.9620 | 0.7878 |
76
+ | 0.1069 | 6.18 | 2300 | 0.9489 | 0.7893 |
77
+ | 0.1012 | 6.45 | 2400 | 1.0107 | 0.7817 |
78
+ | 0.0942 | 6.72 | 2500 | 1.0021 | 0.7885 |
79
+ | 0.087 | 6.99 | 2600 | 1.1090 | 0.7915 |
80
+ | 0.0598 | 7.26 | 2700 | 1.1735 | 0.7795 |
81
+ | 0.0742 | 7.53 | 2800 | 1.1433 | 0.7817 |
82
+ | 0.073 | 7.79 | 2900 | 1.1343 | 0.7953 |
83
+ | 0.0553 | 8.06 | 3000 | 1.2258 | 0.7840 |
84
+ | 0.0474 | 8.33 | 3100 | 1.2461 | 0.7817 |
85
+ | 0.0515 | 8.6 | 3200 | 1.2996 | 0.7825 |
86
+ | 0.0551 | 8.87 | 3300 | 1.2819 | 0.7855 |
87
+ | 0.0541 | 9.14 | 3400 | 1.2808 | 0.7855 |
88
+ | 0.0465 | 9.41 | 3500 | 1.3398 | 0.7817 |
89
+ | 0.0407 | 9.68 | 3600 | 1.3231 | 0.7825 |
90
+ | 0.0343 | 9.94 | 3700 | 1.3330 | 0.7825 |
91
+
92
+
93
+ ### Framework versions
94
+
95
+ - Transformers 4.20.0.dev0
96
+ - Pytorch 1.9.0
97
+ - Datasets 2.2.2
98
+ - Tokenizers 0.11.6
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "[MASK]": 128000
3
+ }
all_results.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "eval_accuracy": 0.7832326292991638,
4
+ "eval_loss": 1.3326470851898193,
5
+ "eval_runtime": 8.5044,
6
+ "eval_samples": 1324,
7
+ "eval_samples_per_second": 155.684,
8
+ "eval_steps_per_second": 9.76,
9
+ "train_loss": 0.20273424893297176,
10
+ "train_runtime": 2723.3449,
11
+ "train_samples": 11916,
12
+ "train_samples_per_second": 43.755,
13
+ "train_steps_per_second": 1.366
14
+ }
config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "microsoft/deberta-v3-large",
3
+ "architectures": [
4
+ "DebertaV2ForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "hidden_act": "gelu",
8
+ "hidden_dropout_prob": 0.1,
9
+ "hidden_size": 1024,
10
+ "id2label": {
11
+ "0": 0,
12
+ "1": 1
13
+ },
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 4096,
16
+ "label2id": {
17
+ "0": 0,
18
+ "1": 1
19
+ },
20
+ "layer_norm_eps": 1e-07,
21
+ "max_position_embeddings": 512,
22
+ "max_relative_positions": -1,
23
+ "model_type": "deberta-v2",
24
+ "norm_rel_ebd": "layer_norm",
25
+ "num_attention_heads": 16,
26
+ "num_hidden_layers": 24,
27
+ "pad_token_id": 0,
28
+ "pooler_dropout": 0,
29
+ "pooler_hidden_act": "gelu",
30
+ "pooler_hidden_size": 1024,
31
+ "pos_att_type": [
32
+ "p2c",
33
+ "c2p"
34
+ ],
35
+ "position_biased_input": false,
36
+ "position_buckets": 256,
37
+ "relative_attention": true,
38
+ "share_att_key": true,
39
+ "torch_dtype": "float32",
40
+ "transformers_version": "4.20.0.dev0",
41
+ "type_vocab_size": 0,
42
+ "vocab_size": 128100
43
+ }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "eval_accuracy": 0.8074018359184265,
3
+ "eval_loss": 0.4674627184867859,
4
+ "eval_runtime": 8.969,
5
+ "eval_samples": 1324,
6
+ "eval_samples_per_second": 147.62,
7
+ "eval_steps_per_second": 9.254
8
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f7ba2e4b0f924fa51337948a3738d07bae7c09a72d54b9823fe22979c01793c
3
+ size 1740393387
run_test.sh ADDED
@@ -0,0 +1 @@
 
 
1
+ jbsub -queue x86_1h -cores 4+1 -mem 30g -require a100 -o outputs/train/tweet_eval2/offensive/deberta-v3-large-offensive-lr7e-6-gas2-ls0.0/test.log /dccstor/tslm/envs/anaconda3/envs/tslm-gen/bin/python train_clf.py --model_name_or_path outputs/train/tweet_eval2/offensive/deberta-v3-large-offensive-lr7e-6-gas2-ls0.0/best_checkpoint --train_file data/tweet_eval/offensive/train.csv --validation_file data/tweet_eval/offensive/validation.csv --test_file data/tweet_eval/offensive/test.csv --do_eval --do_predict --report_to none --per_device_eval_batch_size 16 --max_seq_length 256 --output_dir outputs/train/tweet_eval2/offensive/deberta-v3-large-offensive-lr7e-6-gas2-ls0.0/best_checkpoint
run_train.sh ADDED
@@ -0,0 +1 @@
 
 
1
+ jbsub -queue x86_6h -cores 4+1 -mem 30g -require a100 -o outputs/train/tweet_eval2/offensive/deberta-v3-large-offensive-lr7e-6-gas2-ls0.0/train.log /dccstor/tslm/envs/anaconda3/envs/tslm-gen/bin/python train_clf.py --model_name_or_path microsoft/deberta-v3-large --train_file data/tweet_eval/offensive/train.csv --validation_file data/tweet_eval/offensive/validation.csv --do_train --do_eval --per_device_train_batch_size 16 --per_device_eval_batch_size 16 --max_seq_length 256 --learning_rate 7e-6 --output_dir outputs/train/tweet_eval2/offensive/deberta-v3-large-offensive-lr7e-6-gas2-ls0.0 --evaluation_strategy steps --save_strategy no --warmup_steps 50 --num_train_epochs 10 --overwrite_output_dir --logging_steps 100 --gradient_accumulation_steps 2 --label_smoothing_factor 0.0 --report_to clearml --metric_for_best_model accuracy --logging_dir outputs/train/tweet_eval2/offensive/deberta-v3-large-offensive-lr7e-6-gas2-ls0.0/tb \; rm -rf outputs/train/tweet_eval2/offensive/deberta-v3-large-offensive-lr7e-6-gas2-ls0.0/tb \; rm -rf outputs/train/tweet_eval2/offensive/deberta-v3-large-offensive-lr7e-6-gas2-ls0.0/checkpoint-* \; . outputs/train/tweet_eval2/offensive/deberta-v3-large-offensive-lr7e-6-gas2-ls0.0/run_test.sh
special_tokens_map.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[CLS]",
3
+ "cls_token": "[CLS]",
4
+ "eos_token": "[SEP]",
5
+ "mask_token": "[MASK]",
6
+ "pad_token": "[PAD]",
7
+ "sep_token": "[SEP]",
8
+ "unk_token": "[UNK]"
9
+ }
spm.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c679fbf93643d19aab7ee10c0b99e460bdbc02fedf34b92b05af343b4af586fd
3
+ size 2464616
test_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "eval_accuracy": 0.8639534711837769,
3
+ "eval_loss": 0.37321123480796814,
4
+ "eval_runtime": 5.5359,
5
+ "eval_samples_per_second": 155.35,
6
+ "eval_steps_per_second": 9.755,
7
+ "test_samples": 860
8
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[CLS]",
3
+ "cls_token": "[CLS]",
4
+ "do_lower_case": false,
5
+ "eos_token": "[SEP]",
6
+ "mask_token": "[MASK]",
7
+ "name_or_path": "microsoft/deberta-v3-large",
8
+ "pad_token": "[PAD]",
9
+ "sep_token": "[SEP]",
10
+ "sp_model_kwargs": {},
11
+ "special_tokens_map_file": null,
12
+ "split_by_punct": false,
13
+ "tokenizer_class": "DebertaV2Tokenizer",
14
+ "unk_token": "[UNK]",
15
+ "vocab_type": "spm"
16
+ }
trainer_state.json ADDED
@@ -0,0 +1,580 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 9.998657718120805,
5
+ "global_step": 3720,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.27,
12
+ "learning_rate": 6.904632152588556e-06,
13
+ "loss": 0.6417,
14
+ "step": 100
15
+ },
16
+ {
17
+ "epoch": 0.27,
18
+ "eval_accuracy": 0.653323233127594,
19
+ "eval_loss": 0.6283483505249023,
20
+ "eval_runtime": 8.5429,
21
+ "eval_samples_per_second": 154.983,
22
+ "eval_steps_per_second": 9.716,
23
+ "step": 100
24
+ },
25
+ {
26
+ "epoch": 0.54,
27
+ "learning_rate": 6.713896457765667e-06,
28
+ "loss": 0.5105,
29
+ "step": 200
30
+ },
31
+ {
32
+ "epoch": 0.54,
33
+ "eval_accuracy": 0.791540801525116,
34
+ "eval_loss": 0.45882758498191833,
35
+ "eval_runtime": 8.3778,
36
+ "eval_samples_per_second": 158.036,
37
+ "eval_steps_per_second": 9.907,
38
+ "step": 200
39
+ },
40
+ {
41
+ "epoch": 0.81,
42
+ "learning_rate": 6.523160762942779e-06,
43
+ "loss": 0.4554,
44
+ "step": 300
45
+ },
46
+ {
47
+ "epoch": 0.81,
48
+ "eval_accuracy": 0.7968277931213379,
49
+ "eval_loss": 0.44999730587005615,
50
+ "eval_runtime": 8.4819,
51
+ "eval_samples_per_second": 156.097,
52
+ "eval_steps_per_second": 9.786,
53
+ "step": 300
54
+ },
55
+ {
56
+ "epoch": 1.08,
57
+ "learning_rate": 6.332425068119891e-06,
58
+ "loss": 0.4212,
59
+ "step": 400
60
+ },
61
+ {
62
+ "epoch": 1.08,
63
+ "eval_accuracy": 0.7938066720962524,
64
+ "eval_loss": 0.47725316882133484,
65
+ "eval_runtime": 8.3161,
66
+ "eval_samples_per_second": 159.209,
67
+ "eval_steps_per_second": 9.981,
68
+ "step": 400
69
+ },
70
+ {
71
+ "epoch": 1.34,
72
+ "learning_rate": 6.141689373297002e-06,
73
+ "loss": 0.4054,
74
+ "step": 500
75
+ },
76
+ {
77
+ "epoch": 1.34,
78
+ "eval_accuracy": 0.7983383536338806,
79
+ "eval_loss": 0.43111804127693176,
80
+ "eval_runtime": 8.5652,
81
+ "eval_samples_per_second": 154.579,
82
+ "eval_steps_per_second": 9.69,
83
+ "step": 500
84
+ },
85
+ {
86
+ "epoch": 1.61,
87
+ "learning_rate": 5.950953678474114e-06,
88
+ "loss": 0.3922,
89
+ "step": 600
90
+ },
91
+ {
92
+ "epoch": 1.61,
93
+ "eval_accuracy": 0.7998489141464233,
94
+ "eval_loss": 0.4587886333465576,
95
+ "eval_runtime": 8.2972,
96
+ "eval_samples_per_second": 159.573,
97
+ "eval_steps_per_second": 10.003,
98
+ "step": 600
99
+ },
100
+ {
101
+ "epoch": 1.88,
102
+ "learning_rate": 5.760217983651226e-06,
103
+ "loss": 0.3776,
104
+ "step": 700
105
+ },
106
+ {
107
+ "epoch": 1.88,
108
+ "eval_accuracy": 0.8066465258598328,
109
+ "eval_loss": 0.43665462732315063,
110
+ "eval_runtime": 8.2982,
111
+ "eval_samples_per_second": 159.553,
112
+ "eval_steps_per_second": 10.002,
113
+ "step": 700
114
+ },
115
+ {
116
+ "epoch": 2.15,
117
+ "learning_rate": 5.569482288828338e-06,
118
+ "loss": 0.3535,
119
+ "step": 800
120
+ },
121
+ {
122
+ "epoch": 2.15,
123
+ "eval_accuracy": 0.8074018359184265,
124
+ "eval_loss": 0.4674627184867859,
125
+ "eval_runtime": 8.7016,
126
+ "eval_samples_per_second": 152.155,
127
+ "eval_steps_per_second": 9.538,
128
+ "step": 800
129
+ },
130
+ {
131
+ "epoch": 2.42,
132
+ "learning_rate": 5.37874659400545e-06,
133
+ "loss": 0.33,
134
+ "step": 900
135
+ },
136
+ {
137
+ "epoch": 2.42,
138
+ "eval_accuracy": 0.8021147847175598,
139
+ "eval_loss": 0.48743265867233276,
140
+ "eval_runtime": 8.4057,
141
+ "eval_samples_per_second": 157.512,
142
+ "eval_steps_per_second": 9.874,
143
+ "step": 900
144
+ },
145
+ {
146
+ "epoch": 2.69,
147
+ "learning_rate": 5.188010899182561e-06,
148
+ "loss": 0.3113,
149
+ "step": 1000
150
+ },
151
+ {
152
+ "epoch": 2.69,
153
+ "eval_accuracy": 0.8043806552886963,
154
+ "eval_loss": 0.49486756324768066,
155
+ "eval_runtime": 8.2742,
156
+ "eval_samples_per_second": 160.015,
157
+ "eval_steps_per_second": 10.031,
158
+ "step": 1000
159
+ },
160
+ {
161
+ "epoch": 2.96,
162
+ "learning_rate": 4.997275204359673e-06,
163
+ "loss": 0.3203,
164
+ "step": 1100
165
+ },
166
+ {
167
+ "epoch": 2.96,
168
+ "eval_accuracy": 0.805891215801239,
169
+ "eval_loss": 0.45497822761535645,
170
+ "eval_runtime": 8.7885,
171
+ "eval_samples_per_second": 150.651,
172
+ "eval_steps_per_second": 9.444,
173
+ "step": 1100
174
+ },
175
+ {
176
+ "epoch": 3.23,
177
+ "learning_rate": 4.806539509536785e-06,
178
+ "loss": 0.248,
179
+ "step": 1200
180
+ },
181
+ {
182
+ "epoch": 3.23,
183
+ "eval_accuracy": 0.8036254048347473,
184
+ "eval_loss": 0.4857841432094574,
185
+ "eval_runtime": 8.5367,
186
+ "eval_samples_per_second": 155.095,
187
+ "eval_steps_per_second": 9.723,
188
+ "step": 1200
189
+ },
190
+ {
191
+ "epoch": 3.49,
192
+ "learning_rate": 4.615803814713896e-06,
193
+ "loss": 0.2478,
194
+ "step": 1300
195
+ },
196
+ {
197
+ "epoch": 3.49,
198
+ "eval_accuracy": 0.8028700947761536,
199
+ "eval_loss": 0.5299096703529358,
200
+ "eval_runtime": 8.4072,
201
+ "eval_samples_per_second": 157.485,
202
+ "eval_steps_per_second": 9.873,
203
+ "step": 1300
204
+ },
205
+ {
206
+ "epoch": 3.76,
207
+ "learning_rate": 4.425068119891008e-06,
208
+ "loss": 0.2371,
209
+ "step": 1400
210
+ },
211
+ {
212
+ "epoch": 3.76,
213
+ "eval_accuracy": 0.7990936636924744,
214
+ "eval_loss": 0.5012686252593994,
215
+ "eval_runtime": 8.2774,
216
+ "eval_samples_per_second": 159.953,
217
+ "eval_steps_per_second": 10.027,
218
+ "step": 1400
219
+ },
220
+ {
221
+ "epoch": 4.03,
222
+ "learning_rate": 4.2343324250681206e-06,
223
+ "loss": 0.2388,
224
+ "step": 1500
225
+ },
226
+ {
227
+ "epoch": 4.03,
228
+ "eval_accuracy": 0.8021147847175598,
229
+ "eval_loss": 0.5519642233848572,
230
+ "eval_runtime": 8.5777,
231
+ "eval_samples_per_second": 154.355,
232
+ "eval_steps_per_second": 9.676,
233
+ "step": 1500
234
+ },
235
+ {
236
+ "epoch": 4.3,
237
+ "learning_rate": 4.043596730245232e-06,
238
+ "loss": 0.1744,
239
+ "step": 1600
240
+ },
241
+ {
242
+ "epoch": 4.3,
243
+ "eval_accuracy": 0.791540801525116,
244
+ "eval_loss": 0.6686806082725525,
245
+ "eval_runtime": 8.5418,
246
+ "eval_samples_per_second": 155.002,
247
+ "eval_steps_per_second": 9.717,
248
+ "step": 1600
249
+ },
250
+ {
251
+ "epoch": 4.57,
252
+ "learning_rate": 3.8528610354223435e-06,
253
+ "loss": 0.1788,
254
+ "step": 1700
255
+ },
256
+ {
257
+ "epoch": 4.57,
258
+ "eval_accuracy": 0.768882155418396,
259
+ "eval_loss": 0.756027340888977,
260
+ "eval_runtime": 8.5142,
261
+ "eval_samples_per_second": 155.506,
262
+ "eval_steps_per_second": 9.748,
263
+ "step": 1700
264
+ },
265
+ {
266
+ "epoch": 4.84,
267
+ "learning_rate": 3.6621253405994546e-06,
268
+ "loss": 0.1652,
269
+ "step": 1800
270
+ },
271
+ {
272
+ "epoch": 4.84,
273
+ "eval_accuracy": 0.7832326292991638,
274
+ "eval_loss": 0.6984805464744568,
275
+ "eval_runtime": 8.5496,
276
+ "eval_samples_per_second": 154.861,
277
+ "eval_steps_per_second": 9.708,
278
+ "step": 1800
279
+ },
280
+ {
281
+ "epoch": 5.11,
282
+ "learning_rate": 3.4713896457765665e-06,
283
+ "loss": 0.1596,
284
+ "step": 1900
285
+ },
286
+ {
287
+ "epoch": 5.11,
288
+ "eval_accuracy": 0.791540801525116,
289
+ "eval_loss": 0.7190905213356018,
290
+ "eval_runtime": 8.5367,
291
+ "eval_samples_per_second": 155.094,
292
+ "eval_steps_per_second": 9.723,
293
+ "step": 1900
294
+ },
295
+ {
296
+ "epoch": 5.38,
297
+ "learning_rate": 3.2806539509536784e-06,
298
+ "loss": 0.1214,
299
+ "step": 2000
300
+ },
301
+ {
302
+ "epoch": 5.38,
303
+ "eval_accuracy": 0.7892749309539795,
304
+ "eval_loss": 0.909650981426239,
305
+ "eval_runtime": 8.5312,
306
+ "eval_samples_per_second": 155.195,
307
+ "eval_steps_per_second": 9.729,
308
+ "step": 2000
309
+ },
310
+ {
311
+ "epoch": 5.64,
312
+ "learning_rate": 3.0899182561307904e-06,
313
+ "loss": 0.1432,
314
+ "step": 2100
315
+ },
316
+ {
317
+ "epoch": 5.64,
318
+ "eval_accuracy": 0.7787008881568909,
319
+ "eval_loss": 0.9183990359306335,
320
+ "eval_runtime": 8.556,
321
+ "eval_samples_per_second": 154.745,
322
+ "eval_steps_per_second": 9.701,
323
+ "step": 2100
324
+ },
325
+ {
326
+ "epoch": 5.91,
327
+ "learning_rate": 2.899182561307902e-06,
328
+ "loss": 0.1145,
329
+ "step": 2200
330
+ },
331
+ {
332
+ "epoch": 5.91,
333
+ "eval_accuracy": 0.7877643704414368,
334
+ "eval_loss": 0.962020993232727,
335
+ "eval_runtime": 8.5378,
336
+ "eval_samples_per_second": 155.075,
337
+ "eval_steps_per_second": 9.721,
338
+ "step": 2200
339
+ },
340
+ {
341
+ "epoch": 6.18,
342
+ "learning_rate": 2.7084468664850138e-06,
343
+ "loss": 0.1069,
344
+ "step": 2300
345
+ },
346
+ {
347
+ "epoch": 6.18,
348
+ "eval_accuracy": 0.7892749309539795,
349
+ "eval_loss": 0.9488775134086609,
350
+ "eval_runtime": 8.3193,
351
+ "eval_samples_per_second": 159.148,
352
+ "eval_steps_per_second": 9.977,
353
+ "step": 2300
354
+ },
355
+ {
356
+ "epoch": 6.45,
357
+ "learning_rate": 2.5177111716621253e-06,
358
+ "loss": 0.1012,
359
+ "step": 2400
360
+ },
361
+ {
362
+ "epoch": 6.45,
363
+ "eval_accuracy": 0.7817220687866211,
364
+ "eval_loss": 1.0107486248016357,
365
+ "eval_runtime": 8.2713,
366
+ "eval_samples_per_second": 160.071,
367
+ "eval_steps_per_second": 10.035,
368
+ "step": 2400
369
+ },
370
+ {
371
+ "epoch": 6.72,
372
+ "learning_rate": 2.326975476839237e-06,
373
+ "loss": 0.0942,
374
+ "step": 2500
375
+ },
376
+ {
377
+ "epoch": 6.72,
378
+ "eval_accuracy": 0.7885196208953857,
379
+ "eval_loss": 1.002103567123413,
380
+ "eval_runtime": 8.5971,
381
+ "eval_samples_per_second": 154.005,
382
+ "eval_steps_per_second": 9.654,
383
+ "step": 2500
384
+ },
385
+ {
386
+ "epoch": 6.99,
387
+ "learning_rate": 2.1362397820163487e-06,
388
+ "loss": 0.087,
389
+ "step": 2600
390
+ },
391
+ {
392
+ "epoch": 6.99,
393
+ "eval_accuracy": 0.791540801525116,
394
+ "eval_loss": 1.1089540719985962,
395
+ "eval_runtime": 8.5285,
396
+ "eval_samples_per_second": 155.244,
397
+ "eval_steps_per_second": 9.732,
398
+ "step": 2600
399
+ },
400
+ {
401
+ "epoch": 7.26,
402
+ "learning_rate": 1.9455040871934606e-06,
403
+ "loss": 0.0598,
404
+ "step": 2700
405
+ },
406
+ {
407
+ "epoch": 7.26,
408
+ "eval_accuracy": 0.7794561982154846,
409
+ "eval_loss": 1.1735244989395142,
410
+ "eval_runtime": 8.6408,
411
+ "eval_samples_per_second": 153.227,
412
+ "eval_steps_per_second": 9.606,
413
+ "step": 2700
414
+ },
415
+ {
416
+ "epoch": 7.53,
417
+ "learning_rate": 1.754768392370572e-06,
418
+ "loss": 0.0742,
419
+ "step": 2800
420
+ },
421
+ {
422
+ "epoch": 7.53,
423
+ "eval_accuracy": 0.7817220687866211,
424
+ "eval_loss": 1.1433196067810059,
425
+ "eval_runtime": 8.3802,
426
+ "eval_samples_per_second": 157.992,
427
+ "eval_steps_per_second": 9.904,
428
+ "step": 2800
429
+ },
430
+ {
431
+ "epoch": 7.79,
432
+ "learning_rate": 1.5640326975476838e-06,
433
+ "loss": 0.073,
434
+ "step": 2900
435
+ },
436
+ {
437
+ "epoch": 7.79,
438
+ "eval_accuracy": 0.7953172326087952,
439
+ "eval_loss": 1.1342711448669434,
440
+ "eval_runtime": 8.549,
441
+ "eval_samples_per_second": 154.873,
442
+ "eval_steps_per_second": 9.709,
443
+ "step": 2900
444
+ },
445
+ {
446
+ "epoch": 8.06,
447
+ "learning_rate": 1.3732970027247957e-06,
448
+ "loss": 0.0553,
449
+ "step": 3000
450
+ },
451
+ {
452
+ "epoch": 8.06,
453
+ "eval_accuracy": 0.7839879393577576,
454
+ "eval_loss": 1.2258013486862183,
455
+ "eval_runtime": 8.4899,
456
+ "eval_samples_per_second": 155.95,
457
+ "eval_steps_per_second": 9.776,
458
+ "step": 3000
459
+ },
460
+ {
461
+ "epoch": 8.33,
462
+ "learning_rate": 1.1825613079019074e-06,
463
+ "loss": 0.0474,
464
+ "step": 3100
465
+ },
466
+ {
467
+ "epoch": 8.33,
468
+ "eval_accuracy": 0.7817220687866211,
469
+ "eval_loss": 1.2460782527923584,
470
+ "eval_runtime": 8.5462,
471
+ "eval_samples_per_second": 154.923,
472
+ "eval_steps_per_second": 9.712,
473
+ "step": 3100
474
+ },
475
+ {
476
+ "epoch": 8.6,
477
+ "learning_rate": 9.91825613079019e-07,
478
+ "loss": 0.0515,
479
+ "step": 3200
480
+ },
481
+ {
482
+ "epoch": 8.6,
483
+ "eval_accuracy": 0.7824773192405701,
484
+ "eval_loss": 1.2996242046356201,
485
+ "eval_runtime": 8.5173,
486
+ "eval_samples_per_second": 155.448,
487
+ "eval_steps_per_second": 9.745,
488
+ "step": 3200
489
+ },
490
+ {
491
+ "epoch": 8.87,
492
+ "learning_rate": 8.010899182561308e-07,
493
+ "loss": 0.0551,
494
+ "step": 3300
495
+ },
496
+ {
497
+ "epoch": 8.87,
498
+ "eval_accuracy": 0.7854984998703003,
499
+ "eval_loss": 1.281937837600708,
500
+ "eval_runtime": 8.2769,
501
+ "eval_samples_per_second": 159.964,
502
+ "eval_steps_per_second": 10.028,
503
+ "step": 3300
504
+ },
505
+ {
506
+ "epoch": 9.14,
507
+ "learning_rate": 6.103542234332425e-07,
508
+ "loss": 0.0541,
509
+ "step": 3400
510
+ },
511
+ {
512
+ "epoch": 9.14,
513
+ "eval_accuracy": 0.7854984998703003,
514
+ "eval_loss": 1.2807722091674805,
515
+ "eval_runtime": 8.5739,
516
+ "eval_samples_per_second": 154.423,
517
+ "eval_steps_per_second": 9.681,
518
+ "step": 3400
519
+ },
520
+ {
521
+ "epoch": 9.41,
522
+ "learning_rate": 4.196185286103542e-07,
523
+ "loss": 0.0465,
524
+ "step": 3500
525
+ },
526
+ {
527
+ "epoch": 9.41,
528
+ "eval_accuracy": 0.7817220687866211,
529
+ "eval_loss": 1.3397763967514038,
530
+ "eval_runtime": 8.503,
531
+ "eval_samples_per_second": 155.71,
532
+ "eval_steps_per_second": 9.761,
533
+ "step": 3500
534
+ },
535
+ {
536
+ "epoch": 9.68,
537
+ "learning_rate": 2.288828337874659e-07,
538
+ "loss": 0.0407,
539
+ "step": 3600
540
+ },
541
+ {
542
+ "epoch": 9.68,
543
+ "eval_accuracy": 0.7824773192405701,
544
+ "eval_loss": 1.3231298923492432,
545
+ "eval_runtime": 8.2453,
546
+ "eval_samples_per_second": 160.577,
547
+ "eval_steps_per_second": 10.066,
548
+ "step": 3600
549
+ },
550
+ {
551
+ "epoch": 9.94,
552
+ "learning_rate": 3.8147138964577657e-08,
553
+ "loss": 0.0343,
554
+ "step": 3700
555
+ },
556
+ {
557
+ "epoch": 9.94,
558
+ "eval_accuracy": 0.7824773192405701,
559
+ "eval_loss": 1.3330293893814087,
560
+ "eval_runtime": 8.5211,
561
+ "eval_samples_per_second": 155.378,
562
+ "eval_steps_per_second": 9.74,
563
+ "step": 3700
564
+ },
565
+ {
566
+ "epoch": 10.0,
567
+ "step": 3720,
568
+ "total_flos": 5.551925384610202e+16,
569
+ "train_loss": 0.20273424893297176,
570
+ "train_runtime": 2723.3449,
571
+ "train_samples_per_second": 43.755,
572
+ "train_steps_per_second": 1.366
573
+ }
574
+ ],
575
+ "max_steps": 3720,
576
+ "num_train_epochs": 10,
577
+ "total_flos": 5.551925384610202e+16,
578
+ "trial_name": null,
579
+ "trial_params": null
580
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5cc77104332f69dc549c57f14823a63d375c1c4d1f379593a8844dd532412c90
3
+ size 3375