JW17 commited on
Commit
2ee5607
1 Parent(s): 8592220

Model save

Browse files
README.md ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: orpo-explorers/kaist-mistral-orpo-OHP-15k-Stratified-1-beta-0.2-1epoch
3
+ tags:
4
+ - trl
5
+ - orpo
6
+ - generated_from_trainer
7
+ model-index:
8
+ - name: kaist-mistral-orpo-OHP-15k-Stratified-1-beta-0.2-1epoch-capybara-2epoch
9
+ results: []
10
+ ---
11
+
12
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
+ should probably proofread and complete it, then remove this comment. -->
14
+
15
+ # kaist-mistral-orpo-OHP-15k-Stratified-1-beta-0.2-1epoch-capybara-2epoch
16
+
17
+ This model is a fine-tuned version of [orpo-explorers/kaist-mistral-orpo-OHP-15k-Stratified-1-beta-0.2-1epoch](https://huggingface.co/orpo-explorers/kaist-mistral-orpo-OHP-15k-Stratified-1-beta-0.2-1epoch) on an unknown dataset.
18
+
19
+ ## Model description
20
+
21
+ More information needed
22
+
23
+ ## Intended uses & limitations
24
+
25
+ More information needed
26
+
27
+ ## Training and evaluation data
28
+
29
+ More information needed
30
+
31
+ ## Training procedure
32
+
33
+ ### Training hyperparameters
34
+
35
+ The following hyperparameters were used during training:
36
+ - learning_rate: 5e-06
37
+ - train_batch_size: 8
38
+ - eval_batch_size: 8
39
+ - seed: 42
40
+ - distributed_type: multi-GPU
41
+ - num_devices: 4
42
+ - gradient_accumulation_steps: 2
43
+ - total_train_batch_size: 64
44
+ - total_eval_batch_size: 32
45
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
46
+ - lr_scheduler_type: cosine
47
+ - num_epochs: 2
48
+
49
+ ### Training results
50
+
51
+
52
+
53
+ ### Framework versions
54
+
55
+ - Transformers 4.39.3
56
+ - Pytorch 2.1.2.post303
57
+ - Datasets 2.18.0
58
+ - Tokenizers 0.15.2
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "train_loss": 0.5399103337283587,
4
+ "train_runtime": 8872.0808,
5
+ "train_samples": 14154,
6
+ "train_samples_per_second": 3.191,
7
+ "train_steps_per_second": 0.05
8
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.39.3"
6
+ }
model-00001-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16aed7405b7c177b69ea267ff5b6a17dc0226c9c9d5bd01e22ada31e71121514
3
+ size 4943162336
model-00002-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5fcb9c80c995bd7d3ec35b249b8d04d07832bfcb0976a1ff1c741340d3ad43c
3
+ size 4999819336
model-00003-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be5b88a753020982007fe904cf30bc011de9fffec635c15bb8d9d57b6aa613d6
3
+ size 4540516344
model.safetensors.index.json ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 14483464192
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "model-00003-of-00003.safetensors",
7
+ "model.embed_tokens.weight": "model-00001-of-00003.safetensors",
8
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00003.safetensors",
9
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
10
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
11
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
12
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
13
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
14
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
15
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
16
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
17
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors",
18
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
19
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
20
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
21
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
22
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
23
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
24
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
25
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
26
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00003.safetensors",
27
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
28
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
29
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
30
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
31
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
32
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
33
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
34
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
35
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00003.safetensors",
36
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
37
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
38
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
39
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
40
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
41
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
42
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
43
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
44
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors",
45
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
46
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
47
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
48
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
49
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
50
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
51
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
52
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
53
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00003.safetensors",
54
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
55
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
56
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
57
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
58
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
59
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
60
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
61
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
62
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00003.safetensors",
63
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
64
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
65
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
66
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
67
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
68
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
69
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
70
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
71
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00003.safetensors",
72
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
73
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
74
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
75
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
76
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
77
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
78
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
79
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
80
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00003.safetensors",
81
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
82
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
83
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
84
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
85
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
86
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
87
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
88
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
89
+ "model.layers.17.input_layernorm.weight": "model-00002-of-00003.safetensors",
90
+ "model.layers.17.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
91
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
92
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
93
+ "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
94
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
95
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
96
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
97
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
98
+ "model.layers.18.input_layernorm.weight": "model-00002-of-00003.safetensors",
99
+ "model.layers.18.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
100
+ "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
101
+ "model.layers.18.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
102
+ "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
103
+ "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
104
+ "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
105
+ "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
106
+ "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
107
+ "model.layers.19.input_layernorm.weight": "model-00002-of-00003.safetensors",
108
+ "model.layers.19.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
109
+ "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
110
+ "model.layers.19.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
111
+ "model.layers.19.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
112
+ "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
113
+ "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
114
+ "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
115
+ "model.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
116
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00003.safetensors",
117
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
118
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
119
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
120
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
121
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
122
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
123
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
124
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
125
+ "model.layers.20.input_layernorm.weight": "model-00002-of-00003.safetensors",
126
+ "model.layers.20.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
127
+ "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
128
+ "model.layers.20.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
129
+ "model.layers.20.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
130
+ "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
131
+ "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
132
+ "model.layers.20.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
133
+ "model.layers.20.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
134
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00003.safetensors",
135
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
136
+ "model.layers.21.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
137
+ "model.layers.21.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
138
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
139
+ "model.layers.21.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
140
+ "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
141
+ "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
142
+ "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
143
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00003.safetensors",
144
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
145
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
146
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
147
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
148
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
149
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
150
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
151
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
152
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00003.safetensors",
153
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
154
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
155
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
156
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
157
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
158
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
159
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
160
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
161
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00003.safetensors",
162
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
163
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
164
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
165
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
166
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
167
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
168
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
169
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
170
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00003.safetensors",
171
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
172
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
173
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
174
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
175
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
176
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
177
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
178
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
179
+ "model.layers.26.input_layernorm.weight": "model-00003-of-00003.safetensors",
180
+ "model.layers.26.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
181
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
182
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
183
+ "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
184
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
185
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
186
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
187
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
188
+ "model.layers.27.input_layernorm.weight": "model-00003-of-00003.safetensors",
189
+ "model.layers.27.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
190
+ "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
191
+ "model.layers.27.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
192
+ "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
193
+ "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
194
+ "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
195
+ "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
196
+ "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
197
+ "model.layers.28.input_layernorm.weight": "model-00003-of-00003.safetensors",
198
+ "model.layers.28.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
199
+ "model.layers.28.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
200
+ "model.layers.28.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
201
+ "model.layers.28.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
202
+ "model.layers.28.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
203
+ "model.layers.28.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
204
+ "model.layers.28.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
205
+ "model.layers.28.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
206
+ "model.layers.29.input_layernorm.weight": "model-00003-of-00003.safetensors",
207
+ "model.layers.29.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
208
+ "model.layers.29.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
209
+ "model.layers.29.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
210
+ "model.layers.29.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
211
+ "model.layers.29.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
212
+ "model.layers.29.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
213
+ "model.layers.29.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
214
+ "model.layers.29.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
215
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00003.safetensors",
216
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
217
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
218
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
219
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
220
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
221
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
222
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
223
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
224
+ "model.layers.30.input_layernorm.weight": "model-00003-of-00003.safetensors",
225
+ "model.layers.30.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
226
+ "model.layers.30.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
227
+ "model.layers.30.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
228
+ "model.layers.30.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
229
+ "model.layers.30.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
230
+ "model.layers.30.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
231
+ "model.layers.30.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
232
+ "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
233
+ "model.layers.31.input_layernorm.weight": "model-00003-of-00003.safetensors",
234
+ "model.layers.31.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
235
+ "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
236
+ "model.layers.31.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
237
+ "model.layers.31.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
238
+ "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
239
+ "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
240
+ "model.layers.31.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
241
+ "model.layers.31.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
242
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00003.safetensors",
243
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
244
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
245
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
246
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
247
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
248
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
249
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
250
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
251
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00003.safetensors",
252
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
253
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
254
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
255
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
256
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
257
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
258
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
259
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
260
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00003.safetensors",
261
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
262
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
263
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
264
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
265
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
266
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
267
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
268
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
269
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00003.safetensors",
270
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
271
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
272
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
273
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
274
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
275
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
276
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
277
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
278
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00003.safetensors",
279
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
280
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
281
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
282
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
283
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
284
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
285
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
286
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
287
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00003.safetensors",
288
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
289
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
290
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
291
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
292
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
293
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
294
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
295
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
296
+ "model.norm.weight": "model-00003-of-00003.safetensors"
297
+ }
298
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "train_loss": 0.5399103337283587,
4
+ "train_runtime": 8872.0808,
5
+ "train_samples": 14154,
6
+ "train_samples_per_second": 3.191,
7
+ "train_steps_per_second": 0.05
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,822 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.9954853273137698,
5
+ "eval_steps": 500,
6
+ "global_step": 442,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.05,
13
+ "grad_norm": 3.34375,
14
+ "learning_rate": 4.9936877763590664e-06,
15
+ "log_odds_chosen": 0.2890412509441376,
16
+ "log_odds_ratio": -0.6332851648330688,
17
+ "logits/chosen": -3.1468911170959473,
18
+ "logits/rejected": -3.138887643814087,
19
+ "logps/chosen": -0.8719667196273804,
20
+ "logps/rejected": -1.0382100343704224,
21
+ "loss": 0.6482,
22
+ "nll_loss": 0.5332359075546265,
23
+ "rewards/accuracies": 0.643750011920929,
24
+ "rewards/chosen": -0.17439337074756622,
25
+ "rewards/margins": 0.033248625695705414,
26
+ "rewards/rejected": -0.20764198899269104,
27
+ "step": 10
28
+ },
29
+ {
30
+ "epoch": 0.09,
31
+ "grad_norm": 3.046875,
32
+ "learning_rate": 4.9747829807701e-06,
33
+ "log_odds_chosen": 0.3164082169532776,
34
+ "log_odds_ratio": -0.6052478551864624,
35
+ "logits/chosen": -3.0846054553985596,
36
+ "logits/rejected": -3.0846097469329834,
37
+ "logps/chosen": -0.8607593774795532,
38
+ "logps/rejected": -1.0491193532943726,
39
+ "loss": 0.6131,
40
+ "nll_loss": 0.4854271411895752,
41
+ "rewards/accuracies": 0.637499988079071,
42
+ "rewards/chosen": -0.17215189337730408,
43
+ "rewards/margins": 0.03767203167080879,
44
+ "rewards/rejected": -0.20982392132282257,
45
+ "step": 20
46
+ },
47
+ {
48
+ "epoch": 0.14,
49
+ "grad_norm": 3.671875,
50
+ "learning_rate": 4.943381078271214e-06,
51
+ "log_odds_chosen": 0.3783247768878937,
52
+ "log_odds_ratio": -0.6129769086837769,
53
+ "logits/chosen": -3.084336996078491,
54
+ "logits/rejected": -3.0786914825439453,
55
+ "logps/chosen": -0.8424398303031921,
56
+ "logps/rejected": -1.0533756017684937,
57
+ "loss": 0.602,
58
+ "nll_loss": 0.4758261740207672,
59
+ "rewards/accuracies": 0.675000011920929,
60
+ "rewards/chosen": -0.16848795115947723,
61
+ "rewards/margins": 0.0421871617436409,
62
+ "rewards/rejected": -0.21067512035369873,
63
+ "step": 30
64
+ },
65
+ {
66
+ "epoch": 0.18,
67
+ "grad_norm": 3.0,
68
+ "learning_rate": 4.89964064152747e-06,
69
+ "log_odds_chosen": 0.3220486640930176,
70
+ "log_odds_ratio": -0.6127817630767822,
71
+ "logits/chosen": -3.073997974395752,
72
+ "logits/rejected": -3.069819688796997,
73
+ "logps/chosen": -0.8541064262390137,
74
+ "logps/rejected": -1.0315220355987549,
75
+ "loss": 0.5993,
76
+ "nll_loss": 0.47563648223876953,
77
+ "rewards/accuracies": 0.65625,
78
+ "rewards/chosen": -0.17082129418849945,
79
+ "rewards/margins": 0.03548307344317436,
80
+ "rewards/rejected": -0.2063043862581253,
81
+ "step": 40
82
+ },
83
+ {
84
+ "epoch": 0.23,
85
+ "grad_norm": 2.90625,
86
+ "learning_rate": 4.84378255007397e-06,
87
+ "log_odds_chosen": 0.3029581606388092,
88
+ "log_odds_ratio": -0.6211820840835571,
89
+ "logits/chosen": -3.0848793983459473,
90
+ "logits/rejected": -3.0841445922851562,
91
+ "logps/chosen": -0.8334375619888306,
92
+ "logps/rejected": -1.0042452812194824,
93
+ "loss": 0.5913,
94
+ "nll_loss": 0.4705049991607666,
95
+ "rewards/accuracies": 0.6499999761581421,
96
+ "rewards/chosen": -0.1666875034570694,
97
+ "rewards/margins": 0.03416154906153679,
98
+ "rewards/rejected": -0.20084905624389648,
99
+ "step": 50
100
+ },
101
+ {
102
+ "epoch": 0.27,
103
+ "grad_norm": 3.015625,
104
+ "learning_rate": 4.7760888749230414e-06,
105
+ "log_odds_chosen": 0.4831446707248688,
106
+ "log_odds_ratio": -0.5493755340576172,
107
+ "logits/chosen": -3.095670461654663,
108
+ "logits/rejected": -3.0957283973693848,
109
+ "logps/chosen": -0.8023236989974976,
110
+ "logps/rejected": -1.0722805261611938,
111
+ "loss": 0.5964,
112
+ "nll_loss": 0.4757974147796631,
113
+ "rewards/accuracies": 0.706250011920929,
114
+ "rewards/chosen": -0.16046474874019623,
115
+ "rewards/margins": 0.05399137735366821,
116
+ "rewards/rejected": -0.21445612609386444,
117
+ "step": 60
118
+ },
119
+ {
120
+ "epoch": 0.32,
121
+ "grad_norm": 3.09375,
122
+ "learning_rate": 4.696901454167989e-06,
123
+ "log_odds_chosen": 0.3349444270133972,
124
+ "log_odds_ratio": -0.6006864309310913,
125
+ "logits/chosen": -3.0792784690856934,
126
+ "logits/rejected": -3.073970079421997,
127
+ "logps/chosen": -0.8161932229995728,
128
+ "logps/rejected": -0.9950379133224487,
129
+ "loss": 0.6022,
130
+ "nll_loss": 0.48054131865501404,
131
+ "rewards/accuracies": 0.6812499761581421,
132
+ "rewards/chosen": -0.16323864459991455,
133
+ "rewards/margins": 0.035768941044807434,
134
+ "rewards/rejected": -0.19900760054588318,
135
+ "step": 70
136
+ },
137
+ {
138
+ "epoch": 0.36,
139
+ "grad_norm": 3.171875,
140
+ "learning_rate": 4.6066201667762944e-06,
141
+ "log_odds_chosen": 0.46497654914855957,
142
+ "log_odds_ratio": -0.5399398803710938,
143
+ "logits/chosen": -3.1036829948425293,
144
+ "logits/rejected": -3.099396228790283,
145
+ "logps/chosen": -0.8102518916130066,
146
+ "logps/rejected": -1.0698442459106445,
147
+ "loss": 0.5611,
148
+ "nll_loss": 0.47170430421829224,
149
+ "rewards/accuracies": 0.7437499761581421,
150
+ "rewards/chosen": -0.16205039620399475,
151
+ "rewards/margins": 0.05191843584179878,
152
+ "rewards/rejected": -0.21396884322166443,
153
+ "step": 80
154
+ },
155
+ {
156
+ "epoch": 0.41,
157
+ "grad_norm": 3.234375,
158
+ "learning_rate": 4.505700913289246e-06,
159
+ "log_odds_chosen": 0.385485976934433,
160
+ "log_odds_ratio": -0.5975229144096375,
161
+ "logits/chosen": -3.096364974975586,
162
+ "logits/rejected": -3.085583209991455,
163
+ "logps/chosen": -0.8479864001274109,
164
+ "logps/rejected": -1.076240062713623,
165
+ "loss": 0.6009,
166
+ "nll_loss": 0.47817081212997437,
167
+ "rewards/accuracies": 0.699999988079071,
168
+ "rewards/chosen": -0.1695972979068756,
169
+ "rewards/margins": 0.04565075412392616,
170
+ "rewards/rejected": -0.21524803340435028,
171
+ "step": 90
172
+ },
173
+ {
174
+ "epoch": 0.45,
175
+ "grad_norm": 2.890625,
176
+ "learning_rate": 4.3946533136249926e-06,
177
+ "log_odds_chosen": 0.43778282403945923,
178
+ "log_odds_ratio": -0.5804494619369507,
179
+ "logits/chosen": -3.0998694896698,
180
+ "logits/rejected": -3.0987915992736816,
181
+ "logps/chosen": -0.8241409063339233,
182
+ "logps/rejected": -1.0574108362197876,
183
+ "loss": 0.5988,
184
+ "nll_loss": 0.4965757429599762,
185
+ "rewards/accuracies": 0.7250000238418579,
186
+ "rewards/chosen": -0.16482816636562347,
187
+ "rewards/margins": 0.046653974801301956,
188
+ "rewards/rejected": -0.21148213744163513,
189
+ "step": 100
190
+ },
191
+ {
192
+ "epoch": 0.5,
193
+ "grad_norm": 3.203125,
194
+ "learning_rate": 4.274038133610629e-06,
195
+ "log_odds_chosen": 0.46187520027160645,
196
+ "log_odds_ratio": -0.5494967103004456,
197
+ "logits/chosen": -3.1161484718322754,
198
+ "logits/rejected": -3.114893913269043,
199
+ "logps/chosen": -0.7468121647834778,
200
+ "logps/rejected": -0.9928558468818665,
201
+ "loss": 0.5811,
202
+ "nll_loss": 0.4424869120121002,
203
+ "rewards/accuracies": 0.7562500238418579,
204
+ "rewards/chosen": -0.14936241507530212,
205
+ "rewards/margins": 0.04920873045921326,
206
+ "rewards/rejected": -0.19857117533683777,
207
+ "step": 110
208
+ },
209
+ {
210
+ "epoch": 0.54,
211
+ "grad_norm": 3.375,
212
+ "learning_rate": 4.1444644532387485e-06,
213
+ "log_odds_chosen": 0.4036913812160492,
214
+ "log_odds_ratio": -0.6148982048034668,
215
+ "logits/chosen": -3.0943081378936768,
216
+ "logits/rejected": -3.089369297027588,
217
+ "logps/chosen": -0.8185051083564758,
218
+ "logps/rejected": -1.0283105373382568,
219
+ "loss": 0.5743,
220
+ "nll_loss": 0.4471573233604431,
221
+ "rewards/accuracies": 0.6875,
222
+ "rewards/chosen": -0.16370102763175964,
223
+ "rewards/margins": 0.04196108505129814,
224
+ "rewards/rejected": -0.2056621015071869,
225
+ "step": 120
226
+ },
227
+ {
228
+ "epoch": 0.59,
229
+ "grad_norm": 3.109375,
230
+ "learning_rate": 4.006586590948141e-06,
231
+ "log_odds_chosen": 0.38943806290626526,
232
+ "log_odds_ratio": -0.5949686169624329,
233
+ "logits/chosen": -3.105708360671997,
234
+ "logits/rejected": -3.1006293296813965,
235
+ "logps/chosen": -0.8150092363357544,
236
+ "logps/rejected": -1.0240681171417236,
237
+ "loss": 0.5787,
238
+ "nll_loss": 0.46157145500183105,
239
+ "rewards/accuracies": 0.6875,
240
+ "rewards/chosen": -0.16300183534622192,
241
+ "rewards/margins": 0.041811779141426086,
242
+ "rewards/rejected": -0.20481359958648682,
243
+ "step": 130
244
+ },
245
+ {
246
+ "epoch": 0.63,
247
+ "grad_norm": 3.109375,
248
+ "learning_rate": 3.861100799460336e-06,
249
+ "log_odds_chosen": 0.5897554159164429,
250
+ "log_odds_ratio": -0.542380690574646,
251
+ "logits/chosen": -3.100862503051758,
252
+ "logits/rejected": -3.0946009159088135,
253
+ "logps/chosen": -0.7791546583175659,
254
+ "logps/rejected": -1.0703848600387573,
255
+ "loss": 0.5747,
256
+ "nll_loss": 0.5026167631149292,
257
+ "rewards/accuracies": 0.7250000238418579,
258
+ "rewards/chosen": -0.15583094954490662,
259
+ "rewards/margins": 0.05824603885412216,
260
+ "rewards/rejected": -0.214076966047287,
261
+ "step": 140
262
+ },
263
+ {
264
+ "epoch": 0.68,
265
+ "grad_norm": 3.03125,
266
+ "learning_rate": 3.7087417498572946e-06,
267
+ "log_odds_chosen": 0.4274469316005707,
268
+ "log_odds_ratio": -0.5712376236915588,
269
+ "logits/chosen": -3.102602005004883,
270
+ "logits/rejected": -3.1062116622924805,
271
+ "logps/chosen": -0.8305424451828003,
272
+ "logps/rejected": -1.077083945274353,
273
+ "loss": 0.5692,
274
+ "nll_loss": 0.4742380678653717,
275
+ "rewards/accuracies": 0.7250000238418579,
276
+ "rewards/chosen": -0.16610850393772125,
277
+ "rewards/margins": 0.04930829256772995,
278
+ "rewards/rejected": -0.2154167890548706,
279
+ "step": 150
280
+ },
281
+ {
282
+ "epoch": 0.72,
283
+ "grad_norm": 3.28125,
284
+ "learning_rate": 3.550278821654866e-06,
285
+ "log_odds_chosen": 0.34345707297325134,
286
+ "log_odds_ratio": -0.6129624247550964,
287
+ "logits/chosen": -3.095494508743286,
288
+ "logits/rejected": -3.0863471031188965,
289
+ "logps/chosen": -0.8034600019454956,
290
+ "logps/rejected": -0.9921743273735046,
291
+ "loss": 0.587,
292
+ "nll_loss": 0.4692765176296234,
293
+ "rewards/accuracies": 0.699999988079071,
294
+ "rewards/chosen": -0.1606920212507248,
295
+ "rewards/margins": 0.037742845714092255,
296
+ "rewards/rejected": -0.19843485951423645,
297
+ "step": 160
298
+ },
299
+ {
300
+ "epoch": 0.77,
301
+ "grad_norm": 4.28125,
302
+ "learning_rate": 3.386512217606339e-06,
303
+ "log_odds_chosen": 0.3175373375415802,
304
+ "log_odds_ratio": -0.6278958320617676,
305
+ "logits/chosen": -3.1244797706604004,
306
+ "logits/rejected": -3.1204514503479004,
307
+ "logps/chosen": -0.7932506799697876,
308
+ "logps/rejected": -0.9527732133865356,
309
+ "loss": 0.6102,
310
+ "nll_loss": 0.472064733505249,
311
+ "rewards/accuracies": 0.6499999761581421,
312
+ "rewards/chosen": -0.15865013003349304,
313
+ "rewards/margins": 0.031904518604278564,
314
+ "rewards/rejected": -0.1905546486377716,
315
+ "step": 170
316
+ },
317
+ {
318
+ "epoch": 0.81,
319
+ "grad_norm": 3.765625,
320
+ "learning_rate": 3.218268922855452e-06,
321
+ "log_odds_chosen": 0.42049679160118103,
322
+ "log_odds_ratio": -0.5808514356613159,
323
+ "logits/chosen": -3.1028733253479004,
324
+ "logits/rejected": -3.0980591773986816,
325
+ "logps/chosen": -0.7747179269790649,
326
+ "logps/rejected": -0.9938864707946777,
327
+ "loss": 0.5825,
328
+ "nll_loss": 0.44331270456314087,
329
+ "rewards/accuracies": 0.6937500238418579,
330
+ "rewards/chosen": -0.154943585395813,
331
+ "rewards/margins": 0.04383372142910957,
332
+ "rewards/rejected": -0.19877730309963226,
333
+ "step": 180
334
+ },
335
+ {
336
+ "epoch": 0.86,
337
+ "grad_norm": 3.328125,
338
+ "learning_rate": 3.046398528844248e-06,
339
+ "log_odds_chosen": 0.5076274275779724,
340
+ "log_odds_ratio": -0.5742210745811462,
341
+ "logits/chosen": -3.104665517807007,
342
+ "logits/rejected": -3.095426082611084,
343
+ "logps/chosen": -0.776441216468811,
344
+ "logps/rejected": -1.0134727954864502,
345
+ "loss": 0.5712,
346
+ "nll_loss": 0.4288255274295807,
347
+ "rewards/accuracies": 0.7124999761581421,
348
+ "rewards/chosen": -0.15528824925422668,
349
+ "rewards/margins": 0.04740630090236664,
350
+ "rewards/rejected": -0.20269456505775452,
351
+ "step": 190
352
+ },
353
+ {
354
+ "epoch": 0.9,
355
+ "grad_norm": 2.953125,
356
+ "learning_rate": 2.871768943064129e-06,
357
+ "log_odds_chosen": 0.3015575408935547,
358
+ "log_odds_ratio": -0.6296088695526123,
359
+ "logits/chosen": -3.0799665451049805,
360
+ "logits/rejected": -3.07787823677063,
361
+ "logps/chosen": -0.8669988512992859,
362
+ "logps/rejected": -1.0326287746429443,
363
+ "loss": 0.5744,
364
+ "nll_loss": 0.47870683670043945,
365
+ "rewards/accuracies": 0.643750011920929,
366
+ "rewards/chosen": -0.17339977622032166,
367
+ "rewards/margins": 0.033125996589660645,
368
+ "rewards/rejected": -0.2065257579088211,
369
+ "step": 200
370
+ },
371
+ {
372
+ "epoch": 0.95,
373
+ "grad_norm": 2.859375,
374
+ "learning_rate": 2.695262006314912e-06,
375
+ "log_odds_chosen": 0.3043849766254425,
376
+ "log_odds_ratio": -0.6296250224113464,
377
+ "logits/chosen": -3.0792198181152344,
378
+ "logits/rejected": -3.0685715675354004,
379
+ "logps/chosen": -0.8420785069465637,
380
+ "logps/rejected": -1.0109227895736694,
381
+ "loss": 0.5851,
382
+ "nll_loss": 0.49167829751968384,
383
+ "rewards/accuracies": 0.65625,
384
+ "rewards/chosen": -0.16841569542884827,
385
+ "rewards/margins": 0.03376886993646622,
386
+ "rewards/rejected": -0.20218458771705627,
387
+ "step": 210
388
+ },
389
+ {
390
+ "epoch": 0.99,
391
+ "grad_norm": 3.390625,
392
+ "learning_rate": 2.517769039603744e-06,
393
+ "log_odds_chosen": 0.47437596321105957,
394
+ "log_odds_ratio": -0.5693033933639526,
395
+ "logits/chosen": -3.0923125743865967,
396
+ "logits/rejected": -3.08634614944458,
397
+ "logps/chosen": -0.7851861119270325,
398
+ "logps/rejected": -1.0160605907440186,
399
+ "loss": 0.6031,
400
+ "nll_loss": 0.4828670620918274,
401
+ "rewards/accuracies": 0.71875,
402
+ "rewards/chosen": -0.15703722834587097,
403
+ "rewards/margins": 0.04617489501833916,
404
+ "rewards/rejected": -0.20321211218833923,
405
+ "step": 220
406
+ },
407
+ {
408
+ "epoch": 1.04,
409
+ "grad_norm": 5.125,
410
+ "learning_rate": 2.3401863431710864e-06,
411
+ "log_odds_chosen": 0.8126243352890015,
412
+ "log_odds_ratio": -0.4562348425388336,
413
+ "logits/chosen": -3.0906317234039307,
414
+ "logits/rejected": -3.08894681930542,
415
+ "logps/chosen": -0.6749258041381836,
416
+ "logps/rejected": -1.0676677227020264,
417
+ "loss": 0.5063,
418
+ "nll_loss": 0.4003027081489563,
419
+ "rewards/accuracies": 0.78125,
420
+ "rewards/chosen": -0.13498517870903015,
421
+ "rewards/margins": 0.0785483792424202,
422
+ "rewards/rejected": -0.21353355050086975,
423
+ "step": 230
424
+ },
425
+ {
426
+ "epoch": 1.08,
427
+ "grad_norm": 3.34375,
428
+ "learning_rate": 2.163410670372652e-06,
429
+ "log_odds_chosen": 0.8564330339431763,
430
+ "log_odds_ratio": -0.4268684387207031,
431
+ "logits/chosen": -3.0698952674865723,
432
+ "logits/rejected": -3.070732831954956,
433
+ "logps/chosen": -0.7170256972312927,
434
+ "logps/rejected": -1.1589481830596924,
435
+ "loss": 0.4975,
436
+ "nll_loss": 0.45029813051223755,
437
+ "rewards/accuracies": 0.862500011920929,
438
+ "rewards/chosen": -0.14340513944625854,
439
+ "rewards/margins": 0.08838452398777008,
440
+ "rewards/rejected": -0.23178966343402863,
441
+ "step": 240
442
+ },
443
+ {
444
+ "epoch": 1.13,
445
+ "grad_norm": 3.34375,
446
+ "learning_rate": 1.9883346992732256e-06,
447
+ "log_odds_chosen": 1.0222368240356445,
448
+ "log_odds_ratio": -0.36799290776252747,
449
+ "logits/chosen": -3.055107593536377,
450
+ "logits/rejected": -3.045011281967163,
451
+ "logps/chosen": -0.6467097997665405,
452
+ "logps/rejected": -1.2033334970474243,
453
+ "loss": 0.4936,
454
+ "nll_loss": 0.3834058940410614,
455
+ "rewards/accuracies": 0.9125000238418579,
456
+ "rewards/chosen": -0.1293419599533081,
457
+ "rewards/margins": 0.1113247275352478,
458
+ "rewards/rejected": -0.24066667258739471,
459
+ "step": 250
460
+ },
461
+ {
462
+ "epoch": 1.17,
463
+ "grad_norm": 2.921875,
464
+ "learning_rate": 1.8158425248197931e-06,
465
+ "log_odds_chosen": 0.9676607251167297,
466
+ "log_odds_ratio": -0.3842445909976959,
467
+ "logits/chosen": -3.078850269317627,
468
+ "logits/rejected": -3.0772275924682617,
469
+ "logps/chosen": -0.6771007180213928,
470
+ "logps/rejected": -1.177585482597351,
471
+ "loss": 0.5028,
472
+ "nll_loss": 0.44024211168289185,
473
+ "rewards/accuracies": 0.887499988079071,
474
+ "rewards/chosen": -0.13542014360427856,
475
+ "rewards/margins": 0.10009696334600449,
476
+ "rewards/rejected": -0.23551711440086365,
477
+ "step": 260
478
+ },
479
+ {
480
+ "epoch": 1.22,
481
+ "grad_norm": 3.1875,
482
+ "learning_rate": 1.6468051943575242e-06,
483
+ "log_odds_chosen": 0.9456725120544434,
484
+ "log_odds_ratio": -0.39273351430892944,
485
+ "logits/chosen": -3.085444450378418,
486
+ "logits/rejected": -3.08333420753479,
487
+ "logps/chosen": -0.6596750020980835,
488
+ "logps/rejected": -1.1495213508605957,
489
+ "loss": 0.4758,
490
+ "nll_loss": 0.3547888398170471,
491
+ "rewards/accuracies": 0.8999999761581421,
492
+ "rewards/chosen": -0.1319349855184555,
493
+ "rewards/margins": 0.09796925634145737,
494
+ "rewards/rejected": -0.22990426421165466,
495
+ "step": 270
496
+ },
497
+ {
498
+ "epoch": 1.26,
499
+ "grad_norm": 3.328125,
500
+ "learning_rate": 1.482076309033254e-06,
501
+ "log_odds_chosen": 0.8887540102005005,
502
+ "log_odds_ratio": -0.43248239159584045,
503
+ "logits/chosen": -3.0775530338287354,
504
+ "logits/rejected": -3.0756869316101074,
505
+ "logps/chosen": -0.6567140817642212,
506
+ "logps/rejected": -1.1050218343734741,
507
+ "loss": 0.4872,
508
+ "nll_loss": 0.3975124657154083,
509
+ "rewards/accuracies": 0.8500000238418579,
510
+ "rewards/chosen": -0.1313427984714508,
511
+ "rewards/margins": 0.08966155350208282,
512
+ "rewards/rejected": -0.22100436687469482,
513
+ "step": 280
514
+ },
515
+ {
516
+ "epoch": 1.31,
517
+ "grad_norm": 2.96875,
518
+ "learning_rate": 1.3224877132984131e-06,
519
+ "log_odds_chosen": 1.054574728012085,
520
+ "log_odds_ratio": -0.3588215708732605,
521
+ "logits/chosen": -3.103698253631592,
522
+ "logits/rejected": -3.107422351837158,
523
+ "logps/chosen": -0.6697841882705688,
524
+ "logps/rejected": -1.2304431200027466,
525
+ "loss": 0.4864,
526
+ "nll_loss": 0.4196816384792328,
527
+ "rewards/accuracies": 0.918749988079071,
528
+ "rewards/chosen": -0.13395683467388153,
529
+ "rewards/margins": 0.11213177442550659,
530
+ "rewards/rejected": -0.24608862400054932,
531
+ "step": 290
532
+ },
533
+ {
534
+ "epoch": 1.35,
535
+ "grad_norm": 3.109375,
536
+ "learning_rate": 1.1688452942784592e-06,
537
+ "log_odds_chosen": 1.018751621246338,
538
+ "log_odds_ratio": -0.3791121542453766,
539
+ "logits/chosen": -3.0898470878601074,
540
+ "logits/rejected": -3.0825257301330566,
541
+ "logps/chosen": -0.6466778516769409,
542
+ "logps/rejected": -1.1818552017211914,
543
+ "loss": 0.4914,
544
+ "nll_loss": 0.3884134292602539,
545
+ "rewards/accuracies": 0.893750011920929,
546
+ "rewards/chosen": -0.12933556735515594,
547
+ "rewards/margins": 0.10703550279140472,
548
+ "rewards/rejected": -0.23637107014656067,
549
+ "step": 300
550
+ },
551
+ {
552
+ "epoch": 1.4,
553
+ "grad_norm": 2.984375,
554
+ "learning_rate": 1.021924912221062e-06,
555
+ "log_odds_chosen": 0.8282191157341003,
556
+ "log_odds_ratio": -0.42912721633911133,
557
+ "logits/chosen": -3.1064085960388184,
558
+ "logits/rejected": -3.105886697769165,
559
+ "logps/chosen": -0.669070839881897,
560
+ "logps/rejected": -1.1031153202056885,
561
+ "loss": 0.4969,
562
+ "nll_loss": 0.4509221017360687,
563
+ "rewards/accuracies": 0.8687499761581421,
564
+ "rewards/chosen": -0.13381414115428925,
565
+ "rewards/margins": 0.08680891990661621,
566
+ "rewards/rejected": -0.22062306106090546,
567
+ "step": 310
568
+ },
569
+ {
570
+ "epoch": 1.44,
571
+ "grad_norm": 3.515625,
572
+ "learning_rate": 8.824684825733865e-07,
573
+ "log_odds_chosen": 1.0580097436904907,
574
+ "log_odds_ratio": -0.38433724641799927,
575
+ "logits/chosen": -3.081042528152466,
576
+ "logits/rejected": -3.075192451477051,
577
+ "logps/chosen": -0.6307107210159302,
578
+ "logps/rejected": -1.1325790882110596,
579
+ "loss": 0.493,
580
+ "nll_loss": 0.4108586311340332,
581
+ "rewards/accuracies": 0.887499988079071,
582
+ "rewards/chosen": -0.12614212930202484,
583
+ "rewards/margins": 0.10037368535995483,
584
+ "rewards/rejected": -0.22651581466197968,
585
+ "step": 320
586
+ },
587
+ {
588
+ "epoch": 1.49,
589
+ "grad_norm": 3.046875,
590
+ "learning_rate": 7.51180229473116e-07,
591
+ "log_odds_chosen": 0.9154760241508484,
592
+ "log_odds_ratio": -0.415397584438324,
593
+ "logits/chosen": -3.0937957763671875,
594
+ "logits/rejected": -3.0928263664245605,
595
+ "logps/chosen": -0.6775897741317749,
596
+ "logps/rejected": -1.1316546201705933,
597
+ "loss": 0.5044,
598
+ "nll_loss": 0.4353519380092621,
599
+ "rewards/accuracies": 0.8500000238418579,
600
+ "rewards/chosen": -0.13551795482635498,
601
+ "rewards/margins": 0.09081296622753143,
602
+ "rewards/rejected": -0.22633090615272522,
603
+ "step": 330
604
+ },
605
+ {
606
+ "epoch": 1.53,
607
+ "grad_norm": 3.15625,
608
+ "learning_rate": 6.28723129572247e-07,
609
+ "log_odds_chosen": 1.139094352722168,
610
+ "log_odds_ratio": -0.351654052734375,
611
+ "logits/chosen": -3.077972888946533,
612
+ "logits/rejected": -3.0774223804473877,
613
+ "logps/chosen": -0.6138228178024292,
614
+ "logps/rejected": -1.158979058265686,
615
+ "loss": 0.4728,
616
+ "nll_loss": 0.3857228457927704,
617
+ "rewards/accuracies": 0.90625,
618
+ "rewards/chosen": -0.12276456505060196,
619
+ "rewards/margins": 0.10903123766183853,
620
+ "rewards/rejected": -0.2317957878112793,
621
+ "step": 340
622
+ },
623
+ {
624
+ "epoch": 1.58,
625
+ "grad_norm": 3.234375,
626
+ "learning_rate": 5.157155641515766e-07,
627
+ "log_odds_chosen": 1.0156279802322388,
628
+ "log_odds_ratio": -0.3680209219455719,
629
+ "logits/chosen": -3.093818426132202,
630
+ "logits/rejected": -3.095491409301758,
631
+ "logps/chosen": -0.6614006161689758,
632
+ "logps/rejected": -1.1964319944381714,
633
+ "loss": 0.5024,
634
+ "nll_loss": 0.4554772973060608,
635
+ "rewards/accuracies": 0.9375,
636
+ "rewards/chosen": -0.13228009641170502,
637
+ "rewards/margins": 0.10700628906488419,
638
+ "rewards/rejected": -0.2392864227294922,
639
+ "step": 350
640
+ },
641
+ {
642
+ "epoch": 1.63,
643
+ "grad_norm": 3.109375,
644
+ "learning_rate": 4.127281964319446e-07,
645
+ "log_odds_chosen": 1.1072419881820679,
646
+ "log_odds_ratio": -0.365578830242157,
647
+ "logits/chosen": -3.0632922649383545,
648
+ "logits/rejected": -3.053469657897949,
649
+ "logps/chosen": -0.6406615376472473,
650
+ "logps/rejected": -1.1723295450210571,
651
+ "loss": 0.4717,
652
+ "nll_loss": 0.39158958196640015,
653
+ "rewards/accuracies": 0.8999999761581421,
654
+ "rewards/chosen": -0.12813231348991394,
655
+ "rewards/margins": 0.10633359849452972,
656
+ "rewards/rejected": -0.23446591198444366,
657
+ "step": 360
658
+ },
659
+ {
660
+ "epoch": 1.67,
661
+ "grad_norm": 3.171875,
662
+ "learning_rate": 3.202810898511424e-07,
663
+ "log_odds_chosen": 1.0118036270141602,
664
+ "log_odds_ratio": -0.3721558451652527,
665
+ "logits/chosen": -3.0760838985443115,
666
+ "logits/rejected": -3.0786654949188232,
667
+ "logps/chosen": -0.6295793652534485,
668
+ "logps/rejected": -1.1554322242736816,
669
+ "loss": 0.4954,
670
+ "nll_loss": 0.3821503221988678,
671
+ "rewards/accuracies": 0.893750011920929,
672
+ "rewards/chosen": -0.12591585516929626,
673
+ "rewards/margins": 0.1051705852150917,
674
+ "rewards/rejected": -0.23108646273612976,
675
+ "step": 370
676
+ },
677
+ {
678
+ "epoch": 1.72,
679
+ "grad_norm": 3.015625,
680
+ "learning_rate": 2.388410818585263e-07,
681
+ "log_odds_chosen": 1.0309429168701172,
682
+ "log_odds_ratio": -0.3757808804512024,
683
+ "logits/chosen": -3.0944666862487793,
684
+ "logits/rejected": -3.102954149246216,
685
+ "logps/chosen": -0.6560705304145813,
686
+ "logps/rejected": -1.1918388605117798,
687
+ "loss": 0.4848,
688
+ "nll_loss": 0.4180088937282562,
689
+ "rewards/accuracies": 0.893750011920929,
690
+ "rewards/chosen": -0.13121411204338074,
691
+ "rewards/margins": 0.10715366899967194,
692
+ "rewards/rejected": -0.23836776614189148,
693
+ "step": 380
694
+ },
695
+ {
696
+ "epoch": 1.76,
697
+ "grad_norm": 3.0625,
698
+ "learning_rate": 1.6881942648911077e-07,
699
+ "log_odds_chosen": 1.045115351676941,
700
+ "log_odds_ratio": -0.3773229420185089,
701
+ "logits/chosen": -3.099276304244995,
702
+ "logits/rejected": -3.0971086025238037,
703
+ "logps/chosen": -0.6689329743385315,
704
+ "logps/rejected": -1.209533452987671,
705
+ "loss": 0.4865,
706
+ "nll_loss": 0.4016016125679016,
707
+ "rewards/accuracies": 0.887499988079071,
708
+ "rewards/chosen": -0.13378658890724182,
709
+ "rewards/margins": 0.10812009871006012,
710
+ "rewards/rejected": -0.24190667271614075,
711
+ "step": 390
712
+ },
713
+ {
714
+ "epoch": 1.81,
715
+ "grad_norm": 3.109375,
716
+ "learning_rate": 1.1056971762161584e-07,
717
+ "log_odds_chosen": 0.8860037922859192,
718
+ "log_odds_ratio": -0.4059659540653229,
719
+ "logits/chosen": -3.0970611572265625,
720
+ "logits/rejected": -3.0827975273132324,
721
+ "logps/chosen": -0.6797454357147217,
722
+ "logps/rejected": -1.1361665725708008,
723
+ "loss": 0.4883,
724
+ "nll_loss": 0.4087887406349182,
725
+ "rewards/accuracies": 0.90625,
726
+ "rewards/chosen": -0.13594909012317657,
727
+ "rewards/margins": 0.09128421545028687,
728
+ "rewards/rejected": -0.22723329067230225,
729
+ "step": 400
730
+ },
731
+ {
732
+ "epoch": 1.85,
733
+ "grad_norm": 3.015625,
734
+ "learning_rate": 6.438610340755464e-08,
735
+ "log_odds_chosen": 0.9491767883300781,
736
+ "log_odds_ratio": -0.3955945372581482,
737
+ "logits/chosen": -3.095716953277588,
738
+ "logits/rejected": -3.0901029109954834,
739
+ "logps/chosen": -0.6622263193130493,
740
+ "logps/rejected": -1.1627901792526245,
741
+ "loss": 0.4715,
742
+ "nll_loss": 0.4081706404685974,
743
+ "rewards/accuracies": 0.8999999761581421,
744
+ "rewards/chosen": -0.13244526088237762,
745
+ "rewards/margins": 0.10011278092861176,
746
+ "rewards/rejected": -0.23255804181098938,
747
+ "step": 410
748
+ },
749
+ {
750
+ "epoch": 1.9,
751
+ "grad_norm": 3.328125,
752
+ "learning_rate": 3.050180088809973e-08,
753
+ "log_odds_chosen": 0.89338219165802,
754
+ "log_odds_ratio": -0.41199594736099243,
755
+ "logits/chosen": -3.078094482421875,
756
+ "logits/rejected": -3.074589252471924,
757
+ "logps/chosen": -0.6442387700080872,
758
+ "logps/rejected": -1.103659987449646,
759
+ "loss": 0.4976,
760
+ "nll_loss": 0.40062204003334045,
761
+ "rewards/accuracies": 0.862500011920929,
762
+ "rewards/chosen": -0.12884774804115295,
763
+ "rewards/margins": 0.09188426285982132,
764
+ "rewards/rejected": -0.22073200345039368,
765
+ "step": 420
766
+ },
767
+ {
768
+ "epoch": 1.94,
769
+ "grad_norm": 3.15625,
770
+ "learning_rate": 9.087918299586772e-09,
771
+ "log_odds_chosen": 0.9813147783279419,
772
+ "log_odds_ratio": -0.4028463363647461,
773
+ "logits/chosen": -3.079704523086548,
774
+ "logits/rejected": -3.0714404582977295,
775
+ "logps/chosen": -0.6575345993041992,
776
+ "logps/rejected": -1.1326944828033447,
777
+ "loss": 0.4832,
778
+ "nll_loss": 0.39662784337997437,
779
+ "rewards/accuracies": 0.856249988079071,
780
+ "rewards/chosen": -0.13150693476200104,
781
+ "rewards/margins": 0.0950319841504097,
782
+ "rewards/rejected": -0.22653889656066895,
783
+ "step": 430
784
+ },
785
+ {
786
+ "epoch": 1.99,
787
+ "grad_norm": 4.03125,
788
+ "learning_rate": 2.525910147516131e-10,
789
+ "log_odds_chosen": 0.966279149055481,
790
+ "log_odds_ratio": -0.4019397795200348,
791
+ "logits/chosen": -3.0766844749450684,
792
+ "logits/rejected": -3.0788207054138184,
793
+ "logps/chosen": -0.6589742302894592,
794
+ "logps/rejected": -1.1595604419708252,
795
+ "loss": 0.48,
796
+ "nll_loss": 0.38374659419059753,
797
+ "rewards/accuracies": 0.8500000238418579,
798
+ "rewards/chosen": -0.13179484009742737,
799
+ "rewards/margins": 0.10011725127696991,
800
+ "rewards/rejected": -0.23191209137439728,
801
+ "step": 440
802
+ },
803
+ {
804
+ "epoch": 2.0,
805
+ "step": 442,
806
+ "total_flos": 0.0,
807
+ "train_loss": 0.5399103337283587,
808
+ "train_runtime": 8872.0808,
809
+ "train_samples_per_second": 3.191,
810
+ "train_steps_per_second": 0.05
811
+ }
812
+ ],
813
+ "logging_steps": 10,
814
+ "max_steps": 442,
815
+ "num_input_tokens_seen": 0,
816
+ "num_train_epochs": 2,
817
+ "save_steps": 500,
818
+ "total_flos": 0.0,
819
+ "train_batch_size": 8,
820
+ "trial_name": null,
821
+ "trial_params": null
822
+ }