PhysGame commited on
Commit
1f712b8
·
verified ·
1 Parent(s): 76cc1c4

Upload 14 files

Browse files
added_tokens.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "<image>": 151646,
3
+ "<video>": 151647,
4
+ "<|endoftext|>": 151643,
5
+ "<|im_end|>": 151645,
6
+ "<|im_start|>": 151644
7
+ }
config.json ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/raid/raushan/si-7b",
3
+ "architectures": [
4
+ "LlavaInterleaveForConditionalGeneration"
5
+ ],
6
+ "btadapter": false,
7
+ "btadapter_depth": 4,
8
+ "clip_post_pretrain": null,
9
+ "clip_weight": "/code/ppllava_v0/siglip-so400m-patch14-384",
10
+ "frame_shape": [
11
+ 27,
12
+ 27
13
+ ],
14
+ "hidden_size": 3584,
15
+ "ignore_index": -100,
16
+ "image_grid_pinpoints": [
17
+ [
18
+ 384,
19
+ 384
20
+ ],
21
+ [
22
+ 384,
23
+ 768
24
+ ],
25
+ [
26
+ 384,
27
+ 1152
28
+ ],
29
+ [
30
+ 384,
31
+ 1536
32
+ ],
33
+ [
34
+ 384,
35
+ 1920
36
+ ],
37
+ [
38
+ 384,
39
+ 2304
40
+ ],
41
+ [
42
+ 768,
43
+ 384
44
+ ],
45
+ [
46
+ 768,
47
+ 768
48
+ ],
49
+ [
50
+ 768,
51
+ 1152
52
+ ],
53
+ [
54
+ 768,
55
+ 1536
56
+ ],
57
+ [
58
+ 768,
59
+ 1920
60
+ ],
61
+ [
62
+ 768,
63
+ 2304
64
+ ],
65
+ [
66
+ 1152,
67
+ 384
68
+ ],
69
+ [
70
+ 1152,
71
+ 768
72
+ ],
73
+ [
74
+ 1152,
75
+ 1152
76
+ ],
77
+ [
78
+ 1152,
79
+ 1536
80
+ ],
81
+ [
82
+ 1152,
83
+ 1920
84
+ ],
85
+ [
86
+ 1152,
87
+ 2304
88
+ ],
89
+ [
90
+ 1536,
91
+ 384
92
+ ],
93
+ [
94
+ 1536,
95
+ 768
96
+ ],
97
+ [
98
+ 1536,
99
+ 1152
100
+ ],
101
+ [
102
+ 1536,
103
+ 1536
104
+ ],
105
+ [
106
+ 1536,
107
+ 1920
108
+ ],
109
+ [
110
+ 1536,
111
+ 2304
112
+ ],
113
+ [
114
+ 1920,
115
+ 384
116
+ ],
117
+ [
118
+ 1920,
119
+ 768
120
+ ],
121
+ [
122
+ 1920,
123
+ 1152
124
+ ],
125
+ [
126
+ 1920,
127
+ 1536
128
+ ],
129
+ [
130
+ 1920,
131
+ 1920
132
+ ],
133
+ [
134
+ 1920,
135
+ 2304
136
+ ],
137
+ [
138
+ 2304,
139
+ 384
140
+ ],
141
+ [
142
+ 2304,
143
+ 768
144
+ ],
145
+ [
146
+ 2304,
147
+ 1152
148
+ ],
149
+ [
150
+ 2304,
151
+ 1536
152
+ ],
153
+ [
154
+ 2304,
155
+ 1920
156
+ ],
157
+ [
158
+ 2304,
159
+ 2304
160
+ ]
161
+ ],
162
+ "image_pooling_kernel": [
163
+ 1,
164
+ 3,
165
+ 3
166
+ ],
167
+ "image_pooling_stride": [
168
+ 1,
169
+ 3,
170
+ 3
171
+ ],
172
+ "image_token_index": 151646,
173
+ "long_clip": true,
174
+ "max_T": 64,
175
+ "model_type": "llava_onevision",
176
+ "pad_token_id": 151643,
177
+ "pooling": "clipST_3d",
178
+ "pooling_kernel": [
179
+ 1,
180
+ 3,
181
+ 3
182
+ ],
183
+ "pooling_stride": [
184
+ 1,
185
+ 3,
186
+ 3
187
+ ],
188
+ "pooling_temp": 0.01,
189
+ "projector_hidden_act": "gelu",
190
+ "qwen": true,
191
+ "text_config": {
192
+ "_name_or_path": "Qwen/Qwen2-7B-Instruct",
193
+ "architectures": [
194
+ "Qwen2ForCausalLM"
195
+ ],
196
+ "bos_token_id": 151643,
197
+ "eos_token_id": 151645,
198
+ "hidden_size": 3584,
199
+ "intermediate_size": 18944,
200
+ "model_type": "qwen2",
201
+ "num_attention_heads": 28,
202
+ "num_hidden_layers": 28,
203
+ "num_key_value_heads": 4,
204
+ "rope_theta": 1000000.0,
205
+ "torch_dtype": "bfloat16",
206
+ "vocab_size": 152128
207
+ },
208
+ "tie_word_embeddings": false,
209
+ "torch_dtype": "bfloat16",
210
+ "transformers_version": "4.45.2",
211
+ "use_image_newline_parameter": true,
212
+ "video_token_index": 151647,
213
+ "vision_aspect_ratio": "anyres_max_9",
214
+ "vision_config": {
215
+ "hidden_size": 1152,
216
+ "image_size": 384,
217
+ "intermediate_size": 4304,
218
+ "model_type": "siglip_vision_model",
219
+ "num_attention_heads": 16,
220
+ "num_hidden_layers": 26,
221
+ "patch_size": 14,
222
+ "vision_use_head": false
223
+ },
224
+ "vision_feature_layer": -1,
225
+ "vision_feature_select_strategy": "full"
226
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 151643,
4
+ "eos_token_id": 151645,
5
+ "pad_token_id": 151643,
6
+ "transformers_version": "4.45.2"
7
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28cfa9668f16fb4c453dfd5ed1bb6e382b02e1abca853041e505ea006dc81826
3
+ size 4909741188
model-00002-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f417c115204369e06b12f8d38ebe95fd2ef6cce1fa5dd40ac25f96ac08973156
3
+ size 4991497768
model-00003-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:338084512715be8f33e3c2b00d182dc0c5721f18222f8a88f4a3b9853afb62a2
3
+ size 4932752872
model-00004-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0f29190e06cadb4a0c1a3c3701af442c2374a09286004779e98d6d25caccdb6
3
+ size 2158500536
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
5
+ ],
6
+ "eos_token": {
7
+ "content": "<|im_end|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "pad_token": {
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ }
20
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "151646": {
29
+ "content": "<image>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "151647": {
37
+ "content": "<video>",
38
+ "lstrip": false,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ }
44
+ },
45
+ "additional_special_tokens": [
46
+ "<|im_start|>",
47
+ "<|im_end|>"
48
+ ],
49
+ "bos_token": null,
50
+ "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
51
+ "clean_up_tokenization_spaces": false,
52
+ "eos_token": "<|im_end|>",
53
+ "errors": "replace",
54
+ "max_length": null,
55
+ "model_max_length": 2048,
56
+ "pad_to_multiple_of": null,
57
+ "pad_token": "<|endoftext|>",
58
+ "pad_token_type_id": 0,
59
+ "padding_side": "right",
60
+ "processor_class": "LlavaOnevisionProcessor",
61
+ "split_special_tokens": false,
62
+ "tokenizer_class": "Qwen2Tokenizer",
63
+ "unk_token": null
64
+ }
trainer_state.json ADDED
@@ -0,0 +1,2283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.23277467411545624,
5
+ "eval_steps": 500,
6
+ "global_step": 125,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.00186219739292365,
13
+ "grad_norm": 40.418399810791016,
14
+ "learning_rate": 9.259259259259259e-08,
15
+ "logps/chosen": -143.09214782714844,
16
+ "logps/rejected": -159.23831176757812,
17
+ "loss": 0.6931,
18
+ "losses/dpo": 0.6931471824645996,
19
+ "losses/sft": 1.9754583835601807,
20
+ "losses/total": 0.6931471824645996,
21
+ "ref_logps/chosen": -143.09214782714844,
22
+ "ref_logps/rejected": -159.23831176757812,
23
+ "rewards/accuracies": 0.0,
24
+ "rewards/chosen": 0.0,
25
+ "rewards/margins": 0.0,
26
+ "rewards/rejected": 0.0,
27
+ "step": 1
28
+ },
29
+ {
30
+ "epoch": 0.0037243947858473,
31
+ "grad_norm": 40.48415756225586,
32
+ "learning_rate": 1.8518518518518518e-07,
33
+ "logps/chosen": -139.04046630859375,
34
+ "logps/rejected": -167.1217041015625,
35
+ "loss": 0.6931,
36
+ "losses/dpo": 0.6931471824645996,
37
+ "losses/sft": 2.354458808898926,
38
+ "losses/total": 0.6931471824645996,
39
+ "ref_logps/chosen": -139.04046630859375,
40
+ "ref_logps/rejected": -167.1217041015625,
41
+ "rewards/accuracies": 0.0,
42
+ "rewards/chosen": 0.0,
43
+ "rewards/margins": 0.0,
44
+ "rewards/rejected": 0.0,
45
+ "step": 2
46
+ },
47
+ {
48
+ "epoch": 0.00558659217877095,
49
+ "grad_norm": 43.48381042480469,
50
+ "learning_rate": 2.7777777777777776e-07,
51
+ "logps/chosen": -145.57992553710938,
52
+ "logps/rejected": -169.97146606445312,
53
+ "loss": 0.6981,
54
+ "losses/dpo": 0.6891708374023438,
55
+ "losses/sft": 2.1739914417266846,
56
+ "losses/total": 0.6891708374023438,
57
+ "ref_logps/chosen": -145.53663635253906,
58
+ "ref_logps/rejected": -170.02041625976562,
59
+ "rewards/accuracies": 0.484375,
60
+ "rewards/chosen": -0.00432817917317152,
61
+ "rewards/margins": -0.009223854169249535,
62
+ "rewards/rejected": 0.004895674996078014,
63
+ "step": 3
64
+ },
65
+ {
66
+ "epoch": 0.0074487895716946,
67
+ "grad_norm": 40.21121597290039,
68
+ "learning_rate": 3.7037037037037036e-07,
69
+ "logps/chosen": -145.57131958007812,
70
+ "logps/rejected": -162.80123901367188,
71
+ "loss": 0.6915,
72
+ "losses/dpo": 0.6871472597122192,
73
+ "losses/sft": 2.114041805267334,
74
+ "losses/total": 0.6871472597122192,
75
+ "ref_logps/chosen": -145.52674865722656,
76
+ "ref_logps/rejected": -162.71473693847656,
77
+ "rewards/accuracies": 0.578125,
78
+ "rewards/chosen": -0.004457235801964998,
79
+ "rewards/margins": 0.004192234016954899,
80
+ "rewards/rejected": -0.00864946935325861,
81
+ "step": 4
82
+ },
83
+ {
84
+ "epoch": 0.00931098696461825,
85
+ "grad_norm": 43.5928955078125,
86
+ "learning_rate": 4.6296296296296297e-07,
87
+ "logps/chosen": -149.61972045898438,
88
+ "logps/rejected": -171.2332763671875,
89
+ "loss": 0.6897,
90
+ "losses/dpo": 0.6956955194473267,
91
+ "losses/sft": 1.9878240823745728,
92
+ "losses/total": 0.6956955194473267,
93
+ "ref_logps/chosen": -149.57879638671875,
94
+ "ref_logps/rejected": -171.1157684326172,
95
+ "rewards/accuracies": 0.5625,
96
+ "rewards/chosen": -0.004093348979949951,
97
+ "rewards/margins": 0.007657159119844437,
98
+ "rewards/rejected": -0.011750508099794388,
99
+ "step": 5
100
+ },
101
+ {
102
+ "epoch": 0.0111731843575419,
103
+ "grad_norm": 43.37643051147461,
104
+ "learning_rate": 5.555555555555555e-07,
105
+ "logps/chosen": -144.1446075439453,
106
+ "logps/rejected": -165.30984497070312,
107
+ "loss": 0.6839,
108
+ "losses/dpo": 0.7011494636535645,
109
+ "losses/sft": 2.2325987815856934,
110
+ "losses/total": 0.7011494636535645,
111
+ "ref_logps/chosen": -144.15261840820312,
112
+ "ref_logps/rejected": -165.12521362304688,
113
+ "rewards/accuracies": 0.609375,
114
+ "rewards/chosen": 0.0007997989887371659,
115
+ "rewards/margins": 0.019263559952378273,
116
+ "rewards/rejected": -0.01846376061439514,
117
+ "step": 6
118
+ },
119
+ {
120
+ "epoch": 0.01303538175046555,
121
+ "grad_norm": 41.759315490722656,
122
+ "learning_rate": 6.481481481481481e-07,
123
+ "logps/chosen": -140.94808959960938,
124
+ "logps/rejected": -161.61846923828125,
125
+ "loss": 0.6677,
126
+ "losses/dpo": 0.6434986591339111,
127
+ "losses/sft": 2.4416794776916504,
128
+ "losses/total": 0.6434986591339111,
129
+ "ref_logps/chosen": -140.9979248046875,
130
+ "ref_logps/rejected": -161.13681030273438,
131
+ "rewards/accuracies": 0.734375,
132
+ "rewards/chosen": 0.004983758553862572,
133
+ "rewards/margins": 0.05314992740750313,
134
+ "rewards/rejected": -0.048166170716285706,
135
+ "step": 7
136
+ },
137
+ {
138
+ "epoch": 0.0148975791433892,
139
+ "grad_norm": 40.88422393798828,
140
+ "learning_rate": 7.407407407407407e-07,
141
+ "logps/chosen": -150.63833618164062,
142
+ "logps/rejected": -174.64923095703125,
143
+ "loss": 0.6588,
144
+ "losses/dpo": 0.639857292175293,
145
+ "losses/sft": 2.3609848022460938,
146
+ "losses/total": 0.639857292175293,
147
+ "ref_logps/chosen": -150.88568115234375,
148
+ "ref_logps/rejected": -174.18106079101562,
149
+ "rewards/accuracies": 0.796875,
150
+ "rewards/chosen": 0.02473468706011772,
151
+ "rewards/margins": 0.0715511366724968,
152
+ "rewards/rejected": -0.046816445887088776,
153
+ "step": 8
154
+ },
155
+ {
156
+ "epoch": 0.01675977653631285,
157
+ "grad_norm": 36.576690673828125,
158
+ "learning_rate": 8.333333333333333e-07,
159
+ "logps/chosen": -146.96463012695312,
160
+ "logps/rejected": -174.3262939453125,
161
+ "loss": 0.6259,
162
+ "losses/dpo": 0.6055787801742554,
163
+ "losses/sft": 2.1100568771362305,
164
+ "losses/total": 0.6055787801742554,
165
+ "ref_logps/chosen": -147.0458221435547,
166
+ "ref_logps/rejected": -172.9729461669922,
167
+ "rewards/accuracies": 0.9375,
168
+ "rewards/chosen": 0.008118623867630959,
169
+ "rewards/margins": 0.14345335960388184,
170
+ "rewards/rejected": -0.13533474504947662,
171
+ "step": 9
172
+ },
173
+ {
174
+ "epoch": 0.0186219739292365,
175
+ "grad_norm": 41.190025329589844,
176
+ "learning_rate": 9.259259259259259e-07,
177
+ "logps/chosen": -154.90782165527344,
178
+ "logps/rejected": -160.24624633789062,
179
+ "loss": 0.5904,
180
+ "losses/dpo": 0.5785373449325562,
181
+ "losses/sft": 2.5960636138916016,
182
+ "losses/total": 0.5785373449325562,
183
+ "ref_logps/chosen": -155.20986938476562,
184
+ "ref_logps/rejected": -158.2913055419922,
185
+ "rewards/accuracies": 0.921875,
186
+ "rewards/chosen": 0.030205056071281433,
187
+ "rewards/margins": 0.2256987988948822,
188
+ "rewards/rejected": -0.19549374282360077,
189
+ "step": 10
190
+ },
191
+ {
192
+ "epoch": 0.020484171322160148,
193
+ "grad_norm": 32.080810546875,
194
+ "learning_rate": 1.0185185185185185e-06,
195
+ "logps/chosen": -138.2403106689453,
196
+ "logps/rejected": -162.02291870117188,
197
+ "loss": 0.5437,
198
+ "losses/dpo": 0.5474151968955994,
199
+ "losses/sft": 1.9134595394134521,
200
+ "losses/total": 0.5474151968955994,
201
+ "ref_logps/chosen": -138.54551696777344,
202
+ "ref_logps/rejected": -158.88693237304688,
203
+ "rewards/accuracies": 0.9375,
204
+ "rewards/chosen": 0.030520910397171974,
205
+ "rewards/margins": 0.34412044286727905,
206
+ "rewards/rejected": -0.31359952688217163,
207
+ "step": 11
208
+ },
209
+ {
210
+ "epoch": 0.0223463687150838,
211
+ "grad_norm": 29.643903732299805,
212
+ "learning_rate": 1.111111111111111e-06,
213
+ "logps/chosen": -154.15118408203125,
214
+ "logps/rejected": -180.0041961669922,
215
+ "loss": 0.4393,
216
+ "losses/dpo": 0.4857974946498871,
217
+ "losses/sft": 2.032329797744751,
218
+ "losses/total": 0.4857974946498871,
219
+ "ref_logps/chosen": -154.87046813964844,
220
+ "ref_logps/rejected": -174.29629516601562,
221
+ "rewards/accuracies": 0.96875,
222
+ "rewards/chosen": 0.0719279944896698,
223
+ "rewards/margins": 0.6427179574966431,
224
+ "rewards/rejected": -0.5707899332046509,
225
+ "step": 12
226
+ },
227
+ {
228
+ "epoch": 0.024208566108007448,
229
+ "grad_norm": 27.43463134765625,
230
+ "learning_rate": 1.2037037037037037e-06,
231
+ "logps/chosen": -146.56866455078125,
232
+ "logps/rejected": -173.71633911132812,
233
+ "loss": 0.4487,
234
+ "losses/dpo": 0.4681214988231659,
235
+ "losses/sft": 2.5795841217041016,
236
+ "losses/total": 0.4681214988231659,
237
+ "ref_logps/chosen": -146.011962890625,
238
+ "ref_logps/rejected": -166.83184814453125,
239
+ "rewards/accuracies": 0.9375,
240
+ "rewards/chosen": -0.055669985711574554,
241
+ "rewards/margins": 0.6327812075614929,
242
+ "rewards/rejected": -0.6884511709213257,
243
+ "step": 13
244
+ },
245
+ {
246
+ "epoch": 0.0260707635009311,
247
+ "grad_norm": 24.864574432373047,
248
+ "learning_rate": 1.2962962962962962e-06,
249
+ "logps/chosen": -151.02015686035156,
250
+ "logps/rejected": -181.14610290527344,
251
+ "loss": 0.3711,
252
+ "losses/dpo": 0.4647141695022583,
253
+ "losses/sft": 2.417238473892212,
254
+ "losses/total": 0.4647141695022583,
255
+ "ref_logps/chosen": -151.75128173828125,
256
+ "ref_logps/rejected": -172.87350463867188,
257
+ "rewards/accuracies": 0.96875,
258
+ "rewards/chosen": 0.07311299443244934,
259
+ "rewards/margins": 0.9003715515136719,
260
+ "rewards/rejected": -0.8272585272789001,
261
+ "step": 14
262
+ },
263
+ {
264
+ "epoch": 0.027932960893854747,
265
+ "grad_norm": 22.773723602294922,
266
+ "learning_rate": 1.3888888888888892e-06,
267
+ "logps/chosen": -146.68460083007812,
268
+ "logps/rejected": -180.87896728515625,
269
+ "loss": 0.3584,
270
+ "losses/dpo": 0.33828622102737427,
271
+ "losses/sft": 2.191209077835083,
272
+ "losses/total": 0.33828622102737427,
273
+ "ref_logps/chosen": -145.39768981933594,
274
+ "ref_logps/rejected": -170.00677490234375,
275
+ "rewards/accuracies": 0.984375,
276
+ "rewards/chosen": -0.12869027256965637,
277
+ "rewards/margins": 0.9585269689559937,
278
+ "rewards/rejected": -1.0872172117233276,
279
+ "step": 15
280
+ },
281
+ {
282
+ "epoch": 0.0297951582867784,
283
+ "grad_norm": 17.319433212280273,
284
+ "learning_rate": 1.4814814814814815e-06,
285
+ "logps/chosen": -157.9118194580078,
286
+ "logps/rejected": -180.584228515625,
287
+ "loss": 0.253,
288
+ "losses/dpo": 0.30005261301994324,
289
+ "losses/sft": 2.218407154083252,
290
+ "losses/total": 0.30005261301994324,
291
+ "ref_logps/chosen": -155.03277587890625,
292
+ "ref_logps/rejected": -161.18026733398438,
293
+ "rewards/accuracies": 0.984375,
294
+ "rewards/chosen": -0.2879039943218231,
295
+ "rewards/margins": 1.652491807937622,
296
+ "rewards/rejected": -1.940395712852478,
297
+ "step": 16
298
+ },
299
+ {
300
+ "epoch": 0.03165735567970205,
301
+ "grad_norm": 14.82257080078125,
302
+ "learning_rate": 1.5740740740740742e-06,
303
+ "logps/chosen": -151.82461547851562,
304
+ "logps/rejected": -182.52401733398438,
305
+ "loss": 0.2197,
306
+ "losses/dpo": 0.2500099837779999,
307
+ "losses/sft": 2.3733954429626465,
308
+ "losses/total": 0.2500099837779999,
309
+ "ref_logps/chosen": -147.43585205078125,
310
+ "ref_logps/rejected": -158.81658935546875,
311
+ "rewards/accuracies": 0.953125,
312
+ "rewards/chosen": -0.438876748085022,
313
+ "rewards/margins": 1.9318653345108032,
314
+ "rewards/rejected": -2.3707423210144043,
315
+ "step": 17
316
+ },
317
+ {
318
+ "epoch": 0.0335195530726257,
319
+ "grad_norm": 15.098052978515625,
320
+ "learning_rate": 1.6666666666666667e-06,
321
+ "logps/chosen": -146.62161254882812,
322
+ "logps/rejected": -192.061767578125,
323
+ "loss": 0.2086,
324
+ "losses/dpo": 0.087049201130867,
325
+ "losses/sft": 2.235438585281372,
326
+ "losses/total": 0.087049201130867,
327
+ "ref_logps/chosen": -142.35076904296875,
328
+ "ref_logps/rejected": -164.73597717285156,
329
+ "rewards/accuracies": 0.921875,
330
+ "rewards/chosen": -0.42708471417427063,
331
+ "rewards/margins": 2.305494785308838,
332
+ "rewards/rejected": -2.732579469680786,
333
+ "step": 18
334
+ },
335
+ {
336
+ "epoch": 0.035381750465549346,
337
+ "grad_norm": 14.139907836914062,
338
+ "learning_rate": 1.7592592592592594e-06,
339
+ "logps/chosen": -151.61148071289062,
340
+ "logps/rejected": -204.13427734375,
341
+ "loss": 0.1831,
342
+ "losses/dpo": 0.2179492712020874,
343
+ "losses/sft": 2.547309637069702,
344
+ "losses/total": 0.2179492712020874,
345
+ "ref_logps/chosen": -145.25982666015625,
346
+ "ref_logps/rejected": -172.7918701171875,
347
+ "rewards/accuracies": 0.984375,
348
+ "rewards/chosen": -0.6351636648178101,
349
+ "rewards/margins": 2.4990787506103516,
350
+ "rewards/rejected": -3.134242534637451,
351
+ "step": 19
352
+ },
353
+ {
354
+ "epoch": 0.037243947858473,
355
+ "grad_norm": 15.214645385742188,
356
+ "learning_rate": 1.8518518518518519e-06,
357
+ "logps/chosen": -149.88876342773438,
358
+ "logps/rejected": -218.4535675048828,
359
+ "loss": 0.181,
360
+ "losses/dpo": 0.18753334879875183,
361
+ "losses/sft": 2.394906759262085,
362
+ "losses/total": 0.18753334879875183,
363
+ "ref_logps/chosen": -142.5288543701172,
364
+ "ref_logps/rejected": -185.18878173828125,
365
+ "rewards/accuracies": 0.9375,
366
+ "rewards/chosen": -0.7359906435012817,
367
+ "rewards/margins": 2.590489387512207,
368
+ "rewards/rejected": -3.3264803886413574,
369
+ "step": 20
370
+ },
371
+ {
372
+ "epoch": 0.03910614525139665,
373
+ "grad_norm": 15.793328285217285,
374
+ "learning_rate": 1.944444444444445e-06,
375
+ "logps/chosen": -155.686279296875,
376
+ "logps/rejected": -189.35899353027344,
377
+ "loss": 0.1946,
378
+ "losses/dpo": 0.2072586715221405,
379
+ "losses/sft": 2.1763200759887695,
380
+ "losses/total": 0.2072586715221405,
381
+ "ref_logps/chosen": -148.760986328125,
382
+ "ref_logps/rejected": -153.56321716308594,
383
+ "rewards/accuracies": 0.96875,
384
+ "rewards/chosen": -0.6925297379493713,
385
+ "rewards/margins": 2.88704776763916,
386
+ "rewards/rejected": -3.5795774459838867,
387
+ "step": 21
388
+ },
389
+ {
390
+ "epoch": 0.040968342644320296,
391
+ "grad_norm": 15.932718276977539,
392
+ "learning_rate": 2.037037037037037e-06,
393
+ "logps/chosen": -147.47117614746094,
394
+ "logps/rejected": -206.65597534179688,
395
+ "loss": 0.1655,
396
+ "losses/dpo": 0.10909190773963928,
397
+ "losses/sft": 2.1695094108581543,
398
+ "losses/total": 0.10909190773963928,
399
+ "ref_logps/chosen": -137.73451232910156,
400
+ "ref_logps/rejected": -160.2368927001953,
401
+ "rewards/accuracies": 0.9375,
402
+ "rewards/chosen": -0.9736669063568115,
403
+ "rewards/margins": 3.6682400703430176,
404
+ "rewards/rejected": -4.641907691955566,
405
+ "step": 22
406
+ },
407
+ {
408
+ "epoch": 0.04283054003724395,
409
+ "grad_norm": 11.131953239440918,
410
+ "learning_rate": 2.1296296296296298e-06,
411
+ "logps/chosen": -172.5790252685547,
412
+ "logps/rejected": -234.46731567382812,
413
+ "loss": 0.0974,
414
+ "losses/dpo": 0.25966331362724304,
415
+ "losses/sft": 2.784914255142212,
416
+ "losses/total": 0.25966331362724304,
417
+ "ref_logps/chosen": -154.8829345703125,
418
+ "ref_logps/rejected": -172.43850708007812,
419
+ "rewards/accuracies": 0.96875,
420
+ "rewards/chosen": -1.7696071863174438,
421
+ "rewards/margins": 4.433272361755371,
422
+ "rewards/rejected": -6.202879905700684,
423
+ "step": 23
424
+ },
425
+ {
426
+ "epoch": 0.0446927374301676,
427
+ "grad_norm": 9.715975761413574,
428
+ "learning_rate": 2.222222222222222e-06,
429
+ "logps/chosen": -161.45799255371094,
430
+ "logps/rejected": -231.06072998046875,
431
+ "loss": 0.0859,
432
+ "losses/dpo": 0.04074842855334282,
433
+ "losses/sft": 2.738722324371338,
434
+ "losses/total": 0.04074842855334282,
435
+ "ref_logps/chosen": -142.22097778320312,
436
+ "ref_logps/rejected": -161.80909729003906,
437
+ "rewards/accuracies": 0.984375,
438
+ "rewards/chosen": -1.9237014055252075,
439
+ "rewards/margins": 5.001461029052734,
440
+ "rewards/rejected": -6.9251627922058105,
441
+ "step": 24
442
+ },
443
+ {
444
+ "epoch": 0.04655493482309125,
445
+ "grad_norm": 14.314465522766113,
446
+ "learning_rate": 2.314814814814815e-06,
447
+ "logps/chosen": -170.95504760742188,
448
+ "logps/rejected": -257.62115478515625,
449
+ "loss": 0.124,
450
+ "losses/dpo": 0.004803886171430349,
451
+ "losses/sft": 2.606781244277954,
452
+ "losses/total": 0.004803886171430349,
453
+ "ref_logps/chosen": -146.23434448242188,
454
+ "ref_logps/rejected": -173.50958251953125,
455
+ "rewards/accuracies": 0.953125,
456
+ "rewards/chosen": -2.472069263458252,
457
+ "rewards/margins": 5.939087867736816,
458
+ "rewards/rejected": -8.411157608032227,
459
+ "step": 25
460
+ },
461
+ {
462
+ "epoch": 0.048417132216014895,
463
+ "grad_norm": 16.382986068725586,
464
+ "learning_rate": 2.4074074074074075e-06,
465
+ "logps/chosen": -171.77096557617188,
466
+ "logps/rejected": -249.99264526367188,
467
+ "loss": 0.1927,
468
+ "losses/dpo": 0.023840777575969696,
469
+ "losses/sft": 2.574446201324463,
470
+ "losses/total": 0.023840777575969696,
471
+ "ref_logps/chosen": -146.96835327148438,
472
+ "ref_logps/rejected": -168.13232421875,
473
+ "rewards/accuracies": 0.96875,
474
+ "rewards/chosen": -2.4802613258361816,
475
+ "rewards/margins": 5.705770969390869,
476
+ "rewards/rejected": -8.18603229522705,
477
+ "step": 26
478
+ },
479
+ {
480
+ "epoch": 0.05027932960893855,
481
+ "grad_norm": 9.626073837280273,
482
+ "learning_rate": 2.5e-06,
483
+ "logps/chosen": -169.91448974609375,
484
+ "logps/rejected": -251.7610626220703,
485
+ "loss": 0.058,
486
+ "losses/dpo": 0.0014569438062608242,
487
+ "losses/sft": 2.5121726989746094,
488
+ "losses/total": 0.0014569438062608242,
489
+ "ref_logps/chosen": -141.93600463867188,
490
+ "ref_logps/rejected": -160.8459014892578,
491
+ "rewards/accuracies": 0.984375,
492
+ "rewards/chosen": -2.7978479862213135,
493
+ "rewards/margins": 6.293667793273926,
494
+ "rewards/rejected": -9.091516494750977,
495
+ "step": 27
496
+ },
497
+ {
498
+ "epoch": 0.0521415270018622,
499
+ "grad_norm": 9.746648788452148,
500
+ "learning_rate": 2.5925925925925925e-06,
501
+ "logps/chosen": -173.24583435058594,
502
+ "logps/rejected": -267.49298095703125,
503
+ "loss": 0.0456,
504
+ "losses/dpo": 0.010660952888429165,
505
+ "losses/sft": 2.7988812923431396,
506
+ "losses/total": 0.010660952888429165,
507
+ "ref_logps/chosen": -147.74620056152344,
508
+ "ref_logps/rejected": -170.100830078125,
509
+ "rewards/accuracies": 0.984375,
510
+ "rewards/chosen": -2.5499653816223145,
511
+ "rewards/margins": 7.189251899719238,
512
+ "rewards/rejected": -9.739216804504395,
513
+ "step": 28
514
+ },
515
+ {
516
+ "epoch": 0.054003724394785846,
517
+ "grad_norm": 19.729585647583008,
518
+ "learning_rate": 2.6851851851851856e-06,
519
+ "logps/chosen": -173.5810546875,
520
+ "logps/rejected": -260.90985107421875,
521
+ "loss": 0.1598,
522
+ "losses/dpo": 0.11473169922828674,
523
+ "losses/sft": 2.465557098388672,
524
+ "losses/total": 0.11473169922828674,
525
+ "ref_logps/chosen": -144.18157958984375,
526
+ "ref_logps/rejected": -164.2210693359375,
527
+ "rewards/accuracies": 0.890625,
528
+ "rewards/chosen": -2.939948081970215,
529
+ "rewards/margins": 6.72892951965332,
530
+ "rewards/rejected": -9.668878555297852,
531
+ "step": 29
532
+ },
533
+ {
534
+ "epoch": 0.055865921787709494,
535
+ "grad_norm": 15.30510425567627,
536
+ "learning_rate": 2.7777777777777783e-06,
537
+ "logps/chosen": -170.92425537109375,
538
+ "logps/rejected": -277.4226379394531,
539
+ "loss": 0.1033,
540
+ "losses/dpo": 0.13019727170467377,
541
+ "losses/sft": 2.4969606399536133,
542
+ "losses/total": 0.13019727170467377,
543
+ "ref_logps/chosen": -142.30502319335938,
544
+ "ref_logps/rejected": -169.90554809570312,
545
+ "rewards/accuracies": 0.9375,
546
+ "rewards/chosen": -2.8619208335876465,
547
+ "rewards/margins": 7.889788627624512,
548
+ "rewards/rejected": -10.751708984375,
549
+ "step": 30
550
+ },
551
+ {
552
+ "epoch": 0.05772811918063315,
553
+ "grad_norm": 6.729362487792969,
554
+ "learning_rate": 2.8703703703703706e-06,
555
+ "logps/chosen": -177.1944122314453,
556
+ "logps/rejected": -277.68206787109375,
557
+ "loss": 0.0363,
558
+ "losses/dpo": 0.03610475733876228,
559
+ "losses/sft": 2.655313491821289,
560
+ "losses/total": 0.03610475733876228,
561
+ "ref_logps/chosen": -140.73678588867188,
562
+ "ref_logps/rejected": -164.96824645996094,
563
+ "rewards/accuracies": 1.0,
564
+ "rewards/chosen": -3.6457643508911133,
565
+ "rewards/margins": 7.625619888305664,
566
+ "rewards/rejected": -11.271383285522461,
567
+ "step": 31
568
+ },
569
+ {
570
+ "epoch": 0.0595903165735568,
571
+ "grad_norm": 15.916095733642578,
572
+ "learning_rate": 2.962962962962963e-06,
573
+ "logps/chosen": -168.07888793945312,
574
+ "logps/rejected": -266.97467041015625,
575
+ "loss": 0.1384,
576
+ "losses/dpo": 0.003958229906857014,
577
+ "losses/sft": 3.21946120262146,
578
+ "losses/total": 0.003958229906857014,
579
+ "ref_logps/chosen": -135.16700744628906,
580
+ "ref_logps/rejected": -166.81027221679688,
581
+ "rewards/accuracies": 0.9375,
582
+ "rewards/chosen": -3.2911877632141113,
583
+ "rewards/margins": 6.725253582000732,
584
+ "rewards/rejected": -10.016440391540527,
585
+ "step": 32
586
+ },
587
+ {
588
+ "epoch": 0.061452513966480445,
589
+ "grad_norm": 13.613255500793457,
590
+ "learning_rate": 3.055555555555556e-06,
591
+ "logps/chosen": -163.11419677734375,
592
+ "logps/rejected": -267.9139099121094,
593
+ "loss": 0.1,
594
+ "losses/dpo": 0.0983101949095726,
595
+ "losses/sft": 2.5492136478424072,
596
+ "losses/total": 0.0983101949095726,
597
+ "ref_logps/chosen": -140.14205932617188,
598
+ "ref_logps/rejected": -160.53872680664062,
599
+ "rewards/accuracies": 0.9375,
600
+ "rewards/chosen": -2.297213554382324,
601
+ "rewards/margins": 8.44030475616455,
602
+ "rewards/rejected": -10.737519264221191,
603
+ "step": 33
604
+ },
605
+ {
606
+ "epoch": 0.0633147113594041,
607
+ "grad_norm": 15.18605899810791,
608
+ "learning_rate": 3.1481481481481483e-06,
609
+ "logps/chosen": -167.43917846679688,
610
+ "logps/rejected": -285.1209411621094,
611
+ "loss": 0.1385,
612
+ "losses/dpo": 0.010567452758550644,
613
+ "losses/sft": 2.848440170288086,
614
+ "losses/total": 0.010567452758550644,
615
+ "ref_logps/chosen": -137.24496459960938,
616
+ "ref_logps/rejected": -172.29844665527344,
617
+ "rewards/accuracies": 0.96875,
618
+ "rewards/chosen": -3.019423723220825,
619
+ "rewards/margins": 8.262825965881348,
620
+ "rewards/rejected": -11.282249450683594,
621
+ "step": 34
622
+ },
623
+ {
624
+ "epoch": 0.06517690875232775,
625
+ "grad_norm": 14.179464340209961,
626
+ "learning_rate": 3.240740740740741e-06,
627
+ "logps/chosen": -163.87579345703125,
628
+ "logps/rejected": -273.77777099609375,
629
+ "loss": 0.1047,
630
+ "losses/dpo": 0.010419070720672607,
631
+ "losses/sft": 2.3946168422698975,
632
+ "losses/total": 0.010419070720672607,
633
+ "ref_logps/chosen": -144.74697875976562,
634
+ "ref_logps/rejected": -167.2154541015625,
635
+ "rewards/accuracies": 0.9375,
636
+ "rewards/chosen": -1.9128817319869995,
637
+ "rewards/margins": 8.743349075317383,
638
+ "rewards/rejected": -10.656229972839355,
639
+ "step": 35
640
+ },
641
+ {
642
+ "epoch": 0.0670391061452514,
643
+ "grad_norm": 7.81901741027832,
644
+ "learning_rate": 3.3333333333333333e-06,
645
+ "logps/chosen": -170.99649047851562,
646
+ "logps/rejected": -260.6686096191406,
647
+ "loss": 0.0408,
648
+ "losses/dpo": 0.03330932930111885,
649
+ "losses/sft": 2.9487311840057373,
650
+ "losses/total": 0.03330932930111885,
651
+ "ref_logps/chosen": -150.3196563720703,
652
+ "ref_logps/rejected": -156.96363830566406,
653
+ "rewards/accuracies": 0.984375,
654
+ "rewards/chosen": -2.0676848888397217,
655
+ "rewards/margins": 8.302813529968262,
656
+ "rewards/rejected": -10.370498657226562,
657
+ "step": 36
658
+ },
659
+ {
660
+ "epoch": 0.06890130353817504,
661
+ "grad_norm": 14.016197204589844,
662
+ "learning_rate": 3.4259259259259265e-06,
663
+ "logps/chosen": -161.38865661621094,
664
+ "logps/rejected": -281.09271240234375,
665
+ "loss": 0.1047,
666
+ "losses/dpo": 0.5629028677940369,
667
+ "losses/sft": 2.4373741149902344,
668
+ "losses/total": 0.5629028677940369,
669
+ "ref_logps/chosen": -143.14517211914062,
670
+ "ref_logps/rejected": -172.78611755371094,
671
+ "rewards/accuracies": 0.953125,
672
+ "rewards/chosen": -1.8243476152420044,
673
+ "rewards/margins": 9.006311416625977,
674
+ "rewards/rejected": -10.830659866333008,
675
+ "step": 37
676
+ },
677
+ {
678
+ "epoch": 0.07076350093109869,
679
+ "grad_norm": 15.18514347076416,
680
+ "learning_rate": 3.5185185185185187e-06,
681
+ "logps/chosen": -160.60586547851562,
682
+ "logps/rejected": -263.134765625,
683
+ "loss": 0.1468,
684
+ "losses/dpo": 0.018482662737369537,
685
+ "losses/sft": 2.2324092388153076,
686
+ "losses/total": 0.018482662737369537,
687
+ "ref_logps/chosen": -141.36175537109375,
688
+ "ref_logps/rejected": -164.61480712890625,
689
+ "rewards/accuracies": 0.953125,
690
+ "rewards/chosen": -1.9244120121002197,
691
+ "rewards/margins": 7.92758321762085,
692
+ "rewards/rejected": -9.851995468139648,
693
+ "step": 38
694
+ },
695
+ {
696
+ "epoch": 0.07262569832402235,
697
+ "grad_norm": 8.755450248718262,
698
+ "learning_rate": 3.6111111111111115e-06,
699
+ "logps/chosen": -170.1685791015625,
700
+ "logps/rejected": -288.41119384765625,
701
+ "loss": 0.0421,
702
+ "losses/dpo": 9.010350913740695e-05,
703
+ "losses/sft": 2.3953020572662354,
704
+ "losses/total": 9.010350913740695e-05,
705
+ "ref_logps/chosen": -152.65139770507812,
706
+ "ref_logps/rejected": -172.203857421875,
707
+ "rewards/accuracies": 0.984375,
708
+ "rewards/chosen": -1.751717448234558,
709
+ "rewards/margins": 9.86901569366455,
710
+ "rewards/rejected": -11.620733261108398,
711
+ "step": 39
712
+ },
713
+ {
714
+ "epoch": 0.074487895716946,
715
+ "grad_norm": 2.3320696353912354,
716
+ "learning_rate": 3.7037037037037037e-06,
717
+ "logps/chosen": -164.16981506347656,
718
+ "logps/rejected": -263.49163818359375,
719
+ "loss": 0.0103,
720
+ "losses/dpo": 0.013380440883338451,
721
+ "losses/sft": 2.408010482788086,
722
+ "losses/total": 0.013380440883338451,
723
+ "ref_logps/chosen": -153.0699462890625,
724
+ "ref_logps/rejected": -166.4036407470703,
725
+ "rewards/accuracies": 1.0,
726
+ "rewards/chosen": -1.1099885702133179,
727
+ "rewards/margins": 8.598810195922852,
728
+ "rewards/rejected": -9.708799362182617,
729
+ "step": 40
730
+ },
731
+ {
732
+ "epoch": 0.07635009310986965,
733
+ "grad_norm": 13.349935531616211,
734
+ "learning_rate": 3.796296296296297e-06,
735
+ "logps/chosen": -162.9290771484375,
736
+ "logps/rejected": -275.8412780761719,
737
+ "loss": 0.0746,
738
+ "losses/dpo": 0.0015751949977129698,
739
+ "losses/sft": 2.3437771797180176,
740
+ "losses/total": 0.0015751949977129698,
741
+ "ref_logps/chosen": -151.49713134765625,
742
+ "ref_logps/rejected": -168.54776000976562,
743
+ "rewards/accuracies": 0.953125,
744
+ "rewards/chosen": -1.1431937217712402,
745
+ "rewards/margins": 9.586158752441406,
746
+ "rewards/rejected": -10.729352951049805,
747
+ "step": 41
748
+ },
749
+ {
750
+ "epoch": 0.0782122905027933,
751
+ "grad_norm": 7.708398818969727,
752
+ "learning_rate": 3.88888888888889e-06,
753
+ "logps/chosen": -170.53414916992188,
754
+ "logps/rejected": -267.343017578125,
755
+ "loss": 0.0415,
756
+ "losses/dpo": 0.16293640434741974,
757
+ "losses/sft": 2.4445619583129883,
758
+ "losses/total": 0.16293640434741974,
759
+ "ref_logps/chosen": -158.01504516601562,
760
+ "ref_logps/rejected": -162.6872100830078,
761
+ "rewards/accuracies": 0.984375,
762
+ "rewards/chosen": -1.2519094944000244,
763
+ "rewards/margins": 9.213672637939453,
764
+ "rewards/rejected": -10.465582847595215,
765
+ "step": 42
766
+ },
767
+ {
768
+ "epoch": 0.08007448789571694,
769
+ "grad_norm": 8.482429504394531,
770
+ "learning_rate": 3.9814814814814814e-06,
771
+ "logps/chosen": -152.68186950683594,
772
+ "logps/rejected": -256.3250427246094,
773
+ "loss": 0.0489,
774
+ "losses/dpo": 0.005869926419109106,
775
+ "losses/sft": 2.2688705921173096,
776
+ "losses/total": 0.005869926419109106,
777
+ "ref_logps/chosen": -143.47671508789062,
778
+ "ref_logps/rejected": -158.04229736328125,
779
+ "rewards/accuracies": 0.96875,
780
+ "rewards/chosen": -0.9205170273780823,
781
+ "rewards/margins": 8.907758712768555,
782
+ "rewards/rejected": -9.828275680541992,
783
+ "step": 43
784
+ },
785
+ {
786
+ "epoch": 0.08193668528864059,
787
+ "grad_norm": 4.836800575256348,
788
+ "learning_rate": 4.074074074074074e-06,
789
+ "logps/chosen": -161.54238891601562,
790
+ "logps/rejected": -249.66888427734375,
791
+ "loss": 0.0293,
792
+ "losses/dpo": 0.007885000668466091,
793
+ "losses/sft": 2.5034093856811523,
794
+ "losses/total": 0.007885000668466091,
795
+ "ref_logps/chosen": -150.9593963623047,
796
+ "ref_logps/rejected": -160.7205810546875,
797
+ "rewards/accuracies": 0.984375,
798
+ "rewards/chosen": -1.05829918384552,
799
+ "rewards/margins": 7.8365325927734375,
800
+ "rewards/rejected": -8.894831657409668,
801
+ "step": 44
802
+ },
803
+ {
804
+ "epoch": 0.08379888268156424,
805
+ "grad_norm": 7.306879997253418,
806
+ "learning_rate": 4.166666666666667e-06,
807
+ "logps/chosen": -168.57354736328125,
808
+ "logps/rejected": -273.81622314453125,
809
+ "loss": 0.0409,
810
+ "losses/dpo": 0.044236376881599426,
811
+ "losses/sft": 2.285489559173584,
812
+ "losses/total": 0.044236376881599426,
813
+ "ref_logps/chosen": -153.68539428710938,
814
+ "ref_logps/rejected": -173.37750244140625,
815
+ "rewards/accuracies": 1.0,
816
+ "rewards/chosen": -1.4888172149658203,
817
+ "rewards/margins": 8.555052757263184,
818
+ "rewards/rejected": -10.043869972229004,
819
+ "step": 45
820
+ },
821
+ {
822
+ "epoch": 0.0856610800744879,
823
+ "grad_norm": 8.716976165771484,
824
+ "learning_rate": 4.2592592592592596e-06,
825
+ "logps/chosen": -157.63235473632812,
826
+ "logps/rejected": -256.28546142578125,
827
+ "loss": 0.0498,
828
+ "losses/dpo": 0.047989651560783386,
829
+ "losses/sft": 2.594020366668701,
830
+ "losses/total": 0.047989651560783386,
831
+ "ref_logps/chosen": -149.37562561035156,
832
+ "ref_logps/rejected": -163.25059509277344,
833
+ "rewards/accuracies": 0.96875,
834
+ "rewards/chosen": -0.8256728053092957,
835
+ "rewards/margins": 8.477815628051758,
836
+ "rewards/rejected": -9.303489685058594,
837
+ "step": 46
838
+ },
839
+ {
840
+ "epoch": 0.08752327746741155,
841
+ "grad_norm": 14.776224136352539,
842
+ "learning_rate": 4.351851851851852e-06,
843
+ "logps/chosen": -159.35926818847656,
844
+ "logps/rejected": -254.82000732421875,
845
+ "loss": 0.0947,
846
+ "losses/dpo": 0.0015063448809087276,
847
+ "losses/sft": 2.186452627182007,
848
+ "losses/total": 0.0015063448809087276,
849
+ "ref_logps/chosen": -153.80572509765625,
850
+ "ref_logps/rejected": -160.63690185546875,
851
+ "rewards/accuracies": 0.953125,
852
+ "rewards/chosen": -0.555354654788971,
853
+ "rewards/margins": 8.862955093383789,
854
+ "rewards/rejected": -9.41831111907959,
855
+ "step": 47
856
+ },
857
+ {
858
+ "epoch": 0.0893854748603352,
859
+ "grad_norm": 6.18978214263916,
860
+ "learning_rate": 4.444444444444444e-06,
861
+ "logps/chosen": -146.09950256347656,
862
+ "logps/rejected": -269.2020568847656,
863
+ "loss": 0.0442,
864
+ "losses/dpo": 0.010637683793902397,
865
+ "losses/sft": 2.3249499797821045,
866
+ "losses/total": 0.010637683793902397,
867
+ "ref_logps/chosen": -140.32867431640625,
868
+ "ref_logps/rejected": -161.42984008789062,
869
+ "rewards/accuracies": 0.984375,
870
+ "rewards/chosen": -0.5770829319953918,
871
+ "rewards/margins": 10.200138092041016,
872
+ "rewards/rejected": -10.7772216796875,
873
+ "step": 48
874
+ },
875
+ {
876
+ "epoch": 0.09124767225325885,
877
+ "grad_norm": 8.178156852722168,
878
+ "learning_rate": 4.537037037037038e-06,
879
+ "logps/chosen": -150.715087890625,
880
+ "logps/rejected": -288.4056396484375,
881
+ "loss": 0.0446,
882
+ "losses/dpo": 0.2217281013727188,
883
+ "losses/sft": 2.4317214488983154,
884
+ "losses/total": 0.2217281013727188,
885
+ "ref_logps/chosen": -143.90017700195312,
886
+ "ref_logps/rejected": -177.4407958984375,
887
+ "rewards/accuracies": 0.984375,
888
+ "rewards/chosen": -0.6814901828765869,
889
+ "rewards/margins": 10.414993286132812,
890
+ "rewards/rejected": -11.09648323059082,
891
+ "step": 49
892
+ },
893
+ {
894
+ "epoch": 0.0931098696461825,
895
+ "grad_norm": 11.958553314208984,
896
+ "learning_rate": 4.62962962962963e-06,
897
+ "logps/chosen": -156.93243408203125,
898
+ "logps/rejected": -276.21051025390625,
899
+ "loss": 0.0786,
900
+ "losses/dpo": 0.020680470392107964,
901
+ "losses/sft": 2.4808478355407715,
902
+ "losses/total": 0.020680470392107964,
903
+ "ref_logps/chosen": -146.65036010742188,
904
+ "ref_logps/rejected": -166.49574279785156,
905
+ "rewards/accuracies": 0.96875,
906
+ "rewards/chosen": -1.0282070636749268,
907
+ "rewards/margins": 9.943269729614258,
908
+ "rewards/rejected": -10.971476554870605,
909
+ "step": 50
910
+ },
911
+ {
912
+ "epoch": 0.09497206703910614,
913
+ "grad_norm": 2.3756279945373535,
914
+ "learning_rate": 4.722222222222222e-06,
915
+ "logps/chosen": -146.2957763671875,
916
+ "logps/rejected": -290.5218505859375,
917
+ "loss": 0.0115,
918
+ "losses/dpo": 0.021228570491075516,
919
+ "losses/sft": 2.0093941688537598,
920
+ "losses/total": 0.021228570491075516,
921
+ "ref_logps/chosen": -144.70184326171875,
922
+ "ref_logps/rejected": -169.62815856933594,
923
+ "rewards/accuracies": 1.0,
924
+ "rewards/chosen": -0.15939302742481232,
925
+ "rewards/margins": 11.929975509643555,
926
+ "rewards/rejected": -12.08936882019043,
927
+ "step": 51
928
+ },
929
+ {
930
+ "epoch": 0.09683426443202979,
931
+ "grad_norm": 4.650004863739014,
932
+ "learning_rate": 4.814814814814815e-06,
933
+ "logps/chosen": -151.42132568359375,
934
+ "logps/rejected": -283.79669189453125,
935
+ "loss": 0.0183,
936
+ "losses/dpo": 0.005594337359070778,
937
+ "losses/sft": 2.4590542316436768,
938
+ "losses/total": 0.005594337359070778,
939
+ "ref_logps/chosen": -148.097412109375,
940
+ "ref_logps/rejected": -164.08462524414062,
941
+ "rewards/accuracies": 1.0,
942
+ "rewards/chosen": -0.33239230513572693,
943
+ "rewards/margins": 11.638812065124512,
944
+ "rewards/rejected": -11.97120475769043,
945
+ "step": 52
946
+ },
947
+ {
948
+ "epoch": 0.09869646182495345,
949
+ "grad_norm": 7.309722423553467,
950
+ "learning_rate": 4.907407407407408e-06,
951
+ "logps/chosen": -163.71875,
952
+ "logps/rejected": -280.90655517578125,
953
+ "loss": 0.0367,
954
+ "losses/dpo": 0.05281204357743263,
955
+ "losses/sft": 2.374516725540161,
956
+ "losses/total": 0.05281204357743263,
957
+ "ref_logps/chosen": -151.98492431640625,
958
+ "ref_logps/rejected": -164.55319213867188,
959
+ "rewards/accuracies": 1.0,
960
+ "rewards/chosen": -1.173383116722107,
961
+ "rewards/margins": 10.461955070495605,
962
+ "rewards/rejected": -11.635337829589844,
963
+ "step": 53
964
+ },
965
+ {
966
+ "epoch": 0.1005586592178771,
967
+ "grad_norm": 6.878481388092041,
968
+ "learning_rate": 5e-06,
969
+ "logps/chosen": -158.5335693359375,
970
+ "logps/rejected": -270.8926086425781,
971
+ "loss": 0.0462,
972
+ "losses/dpo": 0.008242600597441196,
973
+ "losses/sft": 2.6928701400756836,
974
+ "losses/total": 0.008242600597441196,
975
+ "ref_logps/chosen": -154.32550048828125,
976
+ "ref_logps/rejected": -162.00128173828125,
977
+ "rewards/accuracies": 0.96875,
978
+ "rewards/chosen": -0.4208071529865265,
979
+ "rewards/margins": 10.468324661254883,
980
+ "rewards/rejected": -10.889131546020508,
981
+ "step": 54
982
+ },
983
+ {
984
+ "epoch": 0.10242085661080075,
985
+ "grad_norm": 5.541274070739746,
986
+ "learning_rate": 4.989648033126294e-06,
987
+ "logps/chosen": -150.91574096679688,
988
+ "logps/rejected": -279.86248779296875,
989
+ "loss": 0.0329,
990
+ "losses/dpo": 0.0699252262711525,
991
+ "losses/sft": 2.2539589405059814,
992
+ "losses/total": 0.0699252262711525,
993
+ "ref_logps/chosen": -147.29766845703125,
994
+ "ref_logps/rejected": -170.07183837890625,
995
+ "rewards/accuracies": 0.984375,
996
+ "rewards/chosen": -0.36180704832077026,
997
+ "rewards/margins": 10.61725902557373,
998
+ "rewards/rejected": -10.97906494140625,
999
+ "step": 55
1000
+ },
1001
+ {
1002
+ "epoch": 0.1042830540037244,
1003
+ "grad_norm": 6.928717613220215,
1004
+ "learning_rate": 4.9792960662525884e-06,
1005
+ "logps/chosen": -157.90420532226562,
1006
+ "logps/rejected": -274.0709228515625,
1007
+ "loss": 0.0401,
1008
+ "losses/dpo": 0.0023821014910936356,
1009
+ "losses/sft": 2.440980911254883,
1010
+ "losses/total": 0.0023821014910936356,
1011
+ "ref_logps/chosen": -145.0316925048828,
1012
+ "ref_logps/rejected": -164.71649169921875,
1013
+ "rewards/accuracies": 0.984375,
1014
+ "rewards/chosen": -1.2872496843338013,
1015
+ "rewards/margins": 9.648194313049316,
1016
+ "rewards/rejected": -10.935443878173828,
1017
+ "step": 56
1018
+ },
1019
+ {
1020
+ "epoch": 0.10614525139664804,
1021
+ "grad_norm": 12.181221961975098,
1022
+ "learning_rate": 4.968944099378882e-06,
1023
+ "logps/chosen": -164.98733520507812,
1024
+ "logps/rejected": -292.568115234375,
1025
+ "loss": 0.0608,
1026
+ "losses/dpo": 0.2203754484653473,
1027
+ "losses/sft": 2.4729645252227783,
1028
+ "losses/total": 0.2203754484653473,
1029
+ "ref_logps/chosen": -145.78073120117188,
1030
+ "ref_logps/rejected": -170.54788208007812,
1031
+ "rewards/accuracies": 0.984375,
1032
+ "rewards/chosen": -1.9206593036651611,
1033
+ "rewards/margins": 10.28136157989502,
1034
+ "rewards/rejected": -12.202020645141602,
1035
+ "step": 57
1036
+ },
1037
+ {
1038
+ "epoch": 0.10800744878957169,
1039
+ "grad_norm": 3.31730580329895,
1040
+ "learning_rate": 4.9585921325051765e-06,
1041
+ "logps/chosen": -144.76385498046875,
1042
+ "logps/rejected": -279.8059997558594,
1043
+ "loss": 0.0189,
1044
+ "losses/dpo": 0.023249687626957893,
1045
+ "losses/sft": 2.241105079650879,
1046
+ "losses/total": 0.023249687626957893,
1047
+ "ref_logps/chosen": -135.7710723876953,
1048
+ "ref_logps/rejected": -163.89041137695312,
1049
+ "rewards/accuracies": 1.0,
1050
+ "rewards/chosen": -0.8992785215377808,
1051
+ "rewards/margins": 10.692280769348145,
1052
+ "rewards/rejected": -11.591558456420898,
1053
+ "step": 58
1054
+ },
1055
+ {
1056
+ "epoch": 0.10986964618249534,
1057
+ "grad_norm": 5.754706382751465,
1058
+ "learning_rate": 4.94824016563147e-06,
1059
+ "logps/chosen": -159.64266967773438,
1060
+ "logps/rejected": -272.6561584472656,
1061
+ "loss": 0.0327,
1062
+ "losses/dpo": 0.0525113008916378,
1063
+ "losses/sft": 2.5197577476501465,
1064
+ "losses/total": 0.0525113008916378,
1065
+ "ref_logps/chosen": -142.01556396484375,
1066
+ "ref_logps/rejected": -161.62533569335938,
1067
+ "rewards/accuracies": 1.0,
1068
+ "rewards/chosen": -1.7627112865447998,
1069
+ "rewards/margins": 9.340368270874023,
1070
+ "rewards/rejected": -11.103079795837402,
1071
+ "step": 59
1072
+ },
1073
+ {
1074
+ "epoch": 0.11173184357541899,
1075
+ "grad_norm": 2.9009838104248047,
1076
+ "learning_rate": 4.9378881987577645e-06,
1077
+ "logps/chosen": -153.46998596191406,
1078
+ "logps/rejected": -300.505615234375,
1079
+ "loss": 0.0109,
1080
+ "losses/dpo": 0.05081873759627342,
1081
+ "losses/sft": 2.3817033767700195,
1082
+ "losses/total": 0.05081873759627342,
1083
+ "ref_logps/chosen": -137.58828735351562,
1084
+ "ref_logps/rejected": -164.65621948242188,
1085
+ "rewards/accuracies": 1.0,
1086
+ "rewards/chosen": -1.5881686210632324,
1087
+ "rewards/margins": 11.996772766113281,
1088
+ "rewards/rejected": -13.584940910339355,
1089
+ "step": 60
1090
+ },
1091
+ {
1092
+ "epoch": 0.11359404096834265,
1093
+ "grad_norm": 10.7985258102417,
1094
+ "learning_rate": 4.927536231884059e-06,
1095
+ "logps/chosen": -161.22463989257812,
1096
+ "logps/rejected": -312.06597900390625,
1097
+ "loss": 0.0594,
1098
+ "losses/dpo": 0.02082718536257744,
1099
+ "losses/sft": 2.446563720703125,
1100
+ "losses/total": 0.02082718536257744,
1101
+ "ref_logps/chosen": -150.21656799316406,
1102
+ "ref_logps/rejected": -171.88088989257812,
1103
+ "rewards/accuracies": 0.984375,
1104
+ "rewards/chosen": -1.1008076667785645,
1105
+ "rewards/margins": 12.917702674865723,
1106
+ "rewards/rejected": -14.018510818481445,
1107
+ "step": 61
1108
+ },
1109
+ {
1110
+ "epoch": 0.1154562383612663,
1111
+ "grad_norm": 16.700149536132812,
1112
+ "learning_rate": 4.9171842650103525e-06,
1113
+ "logps/chosen": -157.88522338867188,
1114
+ "logps/rejected": -296.93817138671875,
1115
+ "loss": 0.0929,
1116
+ "losses/dpo": 0.0005755929159931839,
1117
+ "losses/sft": 2.514739751815796,
1118
+ "losses/total": 0.0005755929159931839,
1119
+ "ref_logps/chosen": -140.2955780029297,
1120
+ "ref_logps/rejected": -165.69281005859375,
1121
+ "rewards/accuracies": 0.984375,
1122
+ "rewards/chosen": -1.7589635848999023,
1123
+ "rewards/margins": 11.365575790405273,
1124
+ "rewards/rejected": -13.124539375305176,
1125
+ "step": 62
1126
+ },
1127
+ {
1128
+ "epoch": 0.11731843575418995,
1129
+ "grad_norm": 10.798019409179688,
1130
+ "learning_rate": 4.906832298136646e-06,
1131
+ "logps/chosen": -160.2606201171875,
1132
+ "logps/rejected": -316.3315734863281,
1133
+ "loss": 0.0485,
1134
+ "losses/dpo": 0.0018289771396666765,
1135
+ "losses/sft": 3.0345890522003174,
1136
+ "losses/total": 0.0018289771396666765,
1137
+ "ref_logps/chosen": -144.05250549316406,
1138
+ "ref_logps/rejected": -170.66879272460938,
1139
+ "rewards/accuracies": 0.984375,
1140
+ "rewards/chosen": -1.6208107471466064,
1141
+ "rewards/margins": 12.945466041564941,
1142
+ "rewards/rejected": -14.566276550292969,
1143
+ "step": 63
1144
+ },
1145
+ {
1146
+ "epoch": 0.1191806331471136,
1147
+ "grad_norm": 8.646073341369629,
1148
+ "learning_rate": 4.896480331262941e-06,
1149
+ "logps/chosen": -163.75540161132812,
1150
+ "logps/rejected": -315.20458984375,
1151
+ "loss": 0.0576,
1152
+ "losses/dpo": 0.014824606478214264,
1153
+ "losses/sft": 3.261763572692871,
1154
+ "losses/total": 0.014824606478214264,
1155
+ "ref_logps/chosen": -149.48907470703125,
1156
+ "ref_logps/rejected": -173.37435913085938,
1157
+ "rewards/accuracies": 0.984375,
1158
+ "rewards/chosen": -1.4266325235366821,
1159
+ "rewards/margins": 12.756391525268555,
1160
+ "rewards/rejected": -14.183023452758789,
1161
+ "step": 64
1162
+ },
1163
+ {
1164
+ "epoch": 0.12104283054003724,
1165
+ "grad_norm": 5.649742603302002,
1166
+ "learning_rate": 4.886128364389234e-06,
1167
+ "logps/chosen": -174.45712280273438,
1168
+ "logps/rejected": -324.653076171875,
1169
+ "loss": 0.018,
1170
+ "losses/dpo": 0.03694448992609978,
1171
+ "losses/sft": 2.400181293487549,
1172
+ "losses/total": 0.03694448992609978,
1173
+ "ref_logps/chosen": -155.43272399902344,
1174
+ "ref_logps/rejected": -175.6329345703125,
1175
+ "rewards/accuracies": 1.0,
1176
+ "rewards/chosen": -1.902438759803772,
1177
+ "rewards/margins": 12.999577522277832,
1178
+ "rewards/rejected": -14.902015686035156,
1179
+ "step": 65
1180
+ },
1181
+ {
1182
+ "epoch": 0.12290502793296089,
1183
+ "grad_norm": 4.2772345542907715,
1184
+ "learning_rate": 4.875776397515528e-06,
1185
+ "logps/chosen": -160.1391143798828,
1186
+ "logps/rejected": -298.70098876953125,
1187
+ "loss": 0.0198,
1188
+ "losses/dpo": 0.009300258941948414,
1189
+ "losses/sft": 2.3741018772125244,
1190
+ "losses/total": 0.009300258941948414,
1191
+ "ref_logps/chosen": -143.62374877929688,
1192
+ "ref_logps/rejected": -162.72705078125,
1193
+ "rewards/accuracies": 1.0,
1194
+ "rewards/chosen": -1.6515378952026367,
1195
+ "rewards/margins": 11.945855140686035,
1196
+ "rewards/rejected": -13.597392082214355,
1197
+ "step": 66
1198
+ },
1199
+ {
1200
+ "epoch": 0.12476722532588454,
1201
+ "grad_norm": 12.112936019897461,
1202
+ "learning_rate": 4.865424430641822e-06,
1203
+ "logps/chosen": -171.04205322265625,
1204
+ "logps/rejected": -303.92144775390625,
1205
+ "loss": 0.0626,
1206
+ "losses/dpo": 0.36290252208709717,
1207
+ "losses/sft": 2.4964911937713623,
1208
+ "losses/total": 0.36290252208709717,
1209
+ "ref_logps/chosen": -152.29478454589844,
1210
+ "ref_logps/rejected": -161.23959350585938,
1211
+ "rewards/accuracies": 0.984375,
1212
+ "rewards/chosen": -1.8747248649597168,
1213
+ "rewards/margins": 12.393461227416992,
1214
+ "rewards/rejected": -14.268186569213867,
1215
+ "step": 67
1216
+ },
1217
+ {
1218
+ "epoch": 0.1266294227188082,
1219
+ "grad_norm": 5.256114959716797,
1220
+ "learning_rate": 4.855072463768117e-06,
1221
+ "logps/chosen": -174.6351776123047,
1222
+ "logps/rejected": -348.933837890625,
1223
+ "loss": 0.0251,
1224
+ "losses/dpo": 0.09207179397344589,
1225
+ "losses/sft": 2.219914436340332,
1226
+ "losses/total": 0.09207179397344589,
1227
+ "ref_logps/chosen": -151.7803955078125,
1228
+ "ref_logps/rejected": -185.33236694335938,
1229
+ "rewards/accuracies": 1.0,
1230
+ "rewards/chosen": -2.285478353500366,
1231
+ "rewards/margins": 14.074670791625977,
1232
+ "rewards/rejected": -16.36014747619629,
1233
+ "step": 68
1234
+ },
1235
+ {
1236
+ "epoch": 0.12849162011173185,
1237
+ "grad_norm": 5.340096950531006,
1238
+ "learning_rate": 4.84472049689441e-06,
1239
+ "logps/chosen": -161.56605529785156,
1240
+ "logps/rejected": -303.75201416015625,
1241
+ "loss": 0.027,
1242
+ "losses/dpo": 0.013381706550717354,
1243
+ "losses/sft": 2.1893093585968018,
1244
+ "losses/total": 0.013381706550717354,
1245
+ "ref_logps/chosen": -150.4385986328125,
1246
+ "ref_logps/rejected": -168.53863525390625,
1247
+ "rewards/accuracies": 1.0,
1248
+ "rewards/chosen": -1.1127451658248901,
1249
+ "rewards/margins": 12.40859317779541,
1250
+ "rewards/rejected": -13.521337509155273,
1251
+ "step": 69
1252
+ },
1253
+ {
1254
+ "epoch": 0.1303538175046555,
1255
+ "grad_norm": 13.288958549499512,
1256
+ "learning_rate": 4.834368530020705e-06,
1257
+ "logps/chosen": -172.6832275390625,
1258
+ "logps/rejected": -334.8218078613281,
1259
+ "loss": 0.1264,
1260
+ "losses/dpo": 0.0077868797816336155,
1261
+ "losses/sft": 2.6989755630493164,
1262
+ "losses/total": 0.0077868797816336155,
1263
+ "ref_logps/chosen": -152.52114868164062,
1264
+ "ref_logps/rejected": -174.5692901611328,
1265
+ "rewards/accuracies": 0.96875,
1266
+ "rewards/chosen": -2.0162088871002197,
1267
+ "rewards/margins": 14.009044647216797,
1268
+ "rewards/rejected": -16.02525520324707,
1269
+ "step": 70
1270
+ },
1271
+ {
1272
+ "epoch": 0.13221601489757914,
1273
+ "grad_norm": 6.7856059074401855,
1274
+ "learning_rate": 4.824016563146998e-06,
1275
+ "logps/chosen": -166.0758056640625,
1276
+ "logps/rejected": -302.7711181640625,
1277
+ "loss": 0.0375,
1278
+ "losses/dpo": 0.008599307388067245,
1279
+ "losses/sft": 2.780581474304199,
1280
+ "losses/total": 0.008599307388067245,
1281
+ "ref_logps/chosen": -144.70916748046875,
1282
+ "ref_logps/rejected": -158.5432586669922,
1283
+ "rewards/accuracies": 0.984375,
1284
+ "rewards/chosen": -2.136662244796753,
1285
+ "rewards/margins": 12.286121368408203,
1286
+ "rewards/rejected": -14.422782897949219,
1287
+ "step": 71
1288
+ },
1289
+ {
1290
+ "epoch": 0.1340782122905028,
1291
+ "grad_norm": 7.3059282302856445,
1292
+ "learning_rate": 4.813664596273293e-06,
1293
+ "logps/chosen": -175.05950927734375,
1294
+ "logps/rejected": -346.2865905761719,
1295
+ "loss": 0.0272,
1296
+ "losses/dpo": 0.06259185075759888,
1297
+ "losses/sft": 2.080183982849121,
1298
+ "losses/total": 0.06259185075759888,
1299
+ "ref_logps/chosen": -153.53565979003906,
1300
+ "ref_logps/rejected": -175.8729248046875,
1301
+ "rewards/accuracies": 0.984375,
1302
+ "rewards/chosen": -2.152385950088501,
1303
+ "rewards/margins": 14.888980865478516,
1304
+ "rewards/rejected": -17.041364669799805,
1305
+ "step": 72
1306
+ },
1307
+ {
1308
+ "epoch": 0.13594040968342644,
1309
+ "grad_norm": 14.326214790344238,
1310
+ "learning_rate": 4.803312629399586e-06,
1311
+ "logps/chosen": -159.51559448242188,
1312
+ "logps/rejected": -303.34576416015625,
1313
+ "loss": 0.1862,
1314
+ "losses/dpo": 0.1816004067659378,
1315
+ "losses/sft": 2.4103786945343018,
1316
+ "losses/total": 0.1816004067659378,
1317
+ "ref_logps/chosen": -137.89926147460938,
1318
+ "ref_logps/rejected": -151.60308837890625,
1319
+ "rewards/accuracies": 0.9375,
1320
+ "rewards/chosen": -2.1616339683532715,
1321
+ "rewards/margins": 13.0126371383667,
1322
+ "rewards/rejected": -15.174270629882812,
1323
+ "step": 73
1324
+ },
1325
+ {
1326
+ "epoch": 0.1378026070763501,
1327
+ "grad_norm": 11.816011428833008,
1328
+ "learning_rate": 4.79296066252588e-06,
1329
+ "logps/chosen": -163.58856201171875,
1330
+ "logps/rejected": -307.9797058105469,
1331
+ "loss": 0.076,
1332
+ "losses/dpo": 0.04001971334218979,
1333
+ "losses/sft": 2.3666553497314453,
1334
+ "losses/total": 0.04001971334218979,
1335
+ "ref_logps/chosen": -140.401123046875,
1336
+ "ref_logps/rejected": -169.53005981445312,
1337
+ "rewards/accuracies": 0.96875,
1338
+ "rewards/chosen": -2.3187437057495117,
1339
+ "rewards/margins": 11.52622127532959,
1340
+ "rewards/rejected": -13.844964981079102,
1341
+ "step": 74
1342
+ },
1343
+ {
1344
+ "epoch": 0.13966480446927373,
1345
+ "grad_norm": 13.491276741027832,
1346
+ "learning_rate": 4.782608695652174e-06,
1347
+ "logps/chosen": -163.01483154296875,
1348
+ "logps/rejected": -305.1317443847656,
1349
+ "loss": 0.1175,
1350
+ "losses/dpo": 0.021199261769652367,
1351
+ "losses/sft": 2.5555906295776367,
1352
+ "losses/total": 0.021199261769652367,
1353
+ "ref_logps/chosen": -135.76666259765625,
1354
+ "ref_logps/rejected": -159.01651000976562,
1355
+ "rewards/accuracies": 0.96875,
1356
+ "rewards/chosen": -2.72481632232666,
1357
+ "rewards/margins": 11.88670825958252,
1358
+ "rewards/rejected": -14.61152458190918,
1359
+ "step": 75
1360
+ },
1361
+ {
1362
+ "epoch": 0.14152700186219738,
1363
+ "grad_norm": 13.310988426208496,
1364
+ "learning_rate": 4.772256728778468e-06,
1365
+ "logps/chosen": -166.88494873046875,
1366
+ "logps/rejected": -300.84918212890625,
1367
+ "loss": 0.0695,
1368
+ "losses/dpo": 0.23336642980575562,
1369
+ "losses/sft": 2.7684144973754883,
1370
+ "losses/total": 0.23336642980575562,
1371
+ "ref_logps/chosen": -145.76715087890625,
1372
+ "ref_logps/rejected": -162.08209228515625,
1373
+ "rewards/accuracies": 0.953125,
1374
+ "rewards/chosen": -2.1117796897888184,
1375
+ "rewards/margins": 11.764933586120605,
1376
+ "rewards/rejected": -13.876713752746582,
1377
+ "step": 76
1378
+ },
1379
+ {
1380
+ "epoch": 0.14338919925512103,
1381
+ "grad_norm": 5.915053844451904,
1382
+ "learning_rate": 4.761904761904762e-06,
1383
+ "logps/chosen": -166.23074340820312,
1384
+ "logps/rejected": -321.7993469238281,
1385
+ "loss": 0.0245,
1386
+ "losses/dpo": 0.00781274028122425,
1387
+ "losses/sft": 2.6997854709625244,
1388
+ "losses/total": 0.00781274028122425,
1389
+ "ref_logps/chosen": -146.89723205566406,
1390
+ "ref_logps/rejected": -168.7777099609375,
1391
+ "rewards/accuracies": 1.0,
1392
+ "rewards/chosen": -1.9333515167236328,
1393
+ "rewards/margins": 13.368810653686523,
1394
+ "rewards/rejected": -15.302162170410156,
1395
+ "step": 77
1396
+ },
1397
+ {
1398
+ "epoch": 0.1452513966480447,
1399
+ "grad_norm": 8.788707733154297,
1400
+ "learning_rate": 4.751552795031056e-06,
1401
+ "logps/chosen": -175.02999877929688,
1402
+ "logps/rejected": -321.0180969238281,
1403
+ "loss": 0.0271,
1404
+ "losses/dpo": 0.14333294332027435,
1405
+ "losses/sft": 2.538876533508301,
1406
+ "losses/total": 0.14333294332027435,
1407
+ "ref_logps/chosen": -157.10501098632812,
1408
+ "ref_logps/rejected": -181.61178588867188,
1409
+ "rewards/accuracies": 0.984375,
1410
+ "rewards/chosen": -1.7924991846084595,
1411
+ "rewards/margins": 12.148136138916016,
1412
+ "rewards/rejected": -13.940634727478027,
1413
+ "step": 78
1414
+ },
1415
+ {
1416
+ "epoch": 0.14711359404096835,
1417
+ "grad_norm": 3.836193323135376,
1418
+ "learning_rate": 4.74120082815735e-06,
1419
+ "logps/chosen": -161.6851806640625,
1420
+ "logps/rejected": -307.22308349609375,
1421
+ "loss": 0.018,
1422
+ "losses/dpo": 0.005441778339445591,
1423
+ "losses/sft": 2.4244680404663086,
1424
+ "losses/total": 0.005441778339445591,
1425
+ "ref_logps/chosen": -141.81394958496094,
1426
+ "ref_logps/rejected": -166.87547302246094,
1427
+ "rewards/accuracies": 1.0,
1428
+ "rewards/chosen": -1.9871225357055664,
1429
+ "rewards/margins": 12.047636985778809,
1430
+ "rewards/rejected": -14.034758567810059,
1431
+ "step": 79
1432
+ },
1433
+ {
1434
+ "epoch": 0.148975791433892,
1435
+ "grad_norm": 1.0960124731063843,
1436
+ "learning_rate": 4.730848861283645e-06,
1437
+ "logps/chosen": -164.8990020751953,
1438
+ "logps/rejected": -306.0439453125,
1439
+ "loss": 0.0046,
1440
+ "losses/dpo": 0.00012883917952422053,
1441
+ "losses/sft": 2.337103843688965,
1442
+ "losses/total": 0.00012883917952422053,
1443
+ "ref_logps/chosen": -150.0802001953125,
1444
+ "ref_logps/rejected": -169.78543090820312,
1445
+ "rewards/accuracies": 1.0,
1446
+ "rewards/chosen": -1.48188054561615,
1447
+ "rewards/margins": 12.143974304199219,
1448
+ "rewards/rejected": -13.6258544921875,
1449
+ "step": 80
1450
+ },
1451
+ {
1452
+ "epoch": 0.15083798882681565,
1453
+ "grad_norm": 10.833060264587402,
1454
+ "learning_rate": 4.7204968944099384e-06,
1455
+ "logps/chosen": -166.50360107421875,
1456
+ "logps/rejected": -291.5777893066406,
1457
+ "loss": 0.0699,
1458
+ "losses/dpo": 0.3678208291530609,
1459
+ "losses/sft": 2.360893726348877,
1460
+ "losses/total": 0.3678208291530609,
1461
+ "ref_logps/chosen": -139.50567626953125,
1462
+ "ref_logps/rejected": -159.87924194335938,
1463
+ "rewards/accuracies": 0.984375,
1464
+ "rewards/chosen": -2.699791193008423,
1465
+ "rewards/margins": 10.470064163208008,
1466
+ "rewards/rejected": -13.169855117797852,
1467
+ "step": 81
1468
+ },
1469
+ {
1470
+ "epoch": 0.1527001862197393,
1471
+ "grad_norm": 10.486247062683105,
1472
+ "learning_rate": 4.710144927536232e-06,
1473
+ "logps/chosen": -158.02520751953125,
1474
+ "logps/rejected": -298.325927734375,
1475
+ "loss": 0.0641,
1476
+ "losses/dpo": 0.04021751880645752,
1477
+ "losses/sft": 2.221280574798584,
1478
+ "losses/total": 0.04021751880645752,
1479
+ "ref_logps/chosen": -147.9010009765625,
1480
+ "ref_logps/rejected": -168.83761596679688,
1481
+ "rewards/accuracies": 0.96875,
1482
+ "rewards/chosen": -1.0124225616455078,
1483
+ "rewards/margins": 11.936410903930664,
1484
+ "rewards/rejected": -12.948833465576172,
1485
+ "step": 82
1486
+ },
1487
+ {
1488
+ "epoch": 0.15456238361266295,
1489
+ "grad_norm": 4.4983320236206055,
1490
+ "learning_rate": 4.6997929606625265e-06,
1491
+ "logps/chosen": -157.27609252929688,
1492
+ "logps/rejected": -284.30670166015625,
1493
+ "loss": 0.0134,
1494
+ "losses/dpo": 0.07455439865589142,
1495
+ "losses/sft": 2.680065631866455,
1496
+ "losses/total": 0.07455439865589142,
1497
+ "ref_logps/chosen": -143.0989990234375,
1498
+ "ref_logps/rejected": -166.63612365722656,
1499
+ "rewards/accuracies": 1.0,
1500
+ "rewards/chosen": -1.4177087545394897,
1501
+ "rewards/margins": 10.349348068237305,
1502
+ "rewards/rejected": -11.767056465148926,
1503
+ "step": 83
1504
+ },
1505
+ {
1506
+ "epoch": 0.1564245810055866,
1507
+ "grad_norm": 5.925443172454834,
1508
+ "learning_rate": 4.68944099378882e-06,
1509
+ "logps/chosen": -166.75436401367188,
1510
+ "logps/rejected": -307.4884338378906,
1511
+ "loss": 0.0294,
1512
+ "losses/dpo": 0.018268397077918053,
1513
+ "losses/sft": 2.3180205821990967,
1514
+ "losses/total": 0.018268397077918053,
1515
+ "ref_logps/chosen": -146.59835815429688,
1516
+ "ref_logps/rejected": -171.87164306640625,
1517
+ "rewards/accuracies": 0.984375,
1518
+ "rewards/chosen": -2.015601634979248,
1519
+ "rewards/margins": 11.546079635620117,
1520
+ "rewards/rejected": -13.561680793762207,
1521
+ "step": 84
1522
+ },
1523
+ {
1524
+ "epoch": 0.15828677839851024,
1525
+ "grad_norm": 10.823968887329102,
1526
+ "learning_rate": 4.679089026915114e-06,
1527
+ "logps/chosen": -164.76190185546875,
1528
+ "logps/rejected": -291.744873046875,
1529
+ "loss": 0.0787,
1530
+ "losses/dpo": 0.0033458764664828777,
1531
+ "losses/sft": 2.6443734169006348,
1532
+ "losses/total": 0.0033458764664828777,
1533
+ "ref_logps/chosen": -147.3948516845703,
1534
+ "ref_logps/rejected": -165.80938720703125,
1535
+ "rewards/accuracies": 0.96875,
1536
+ "rewards/chosen": -1.73670494556427,
1537
+ "rewards/margins": 10.856843948364258,
1538
+ "rewards/rejected": -12.593548774719238,
1539
+ "step": 85
1540
+ },
1541
+ {
1542
+ "epoch": 0.1601489757914339,
1543
+ "grad_norm": 10.07721996307373,
1544
+ "learning_rate": 4.668737060041408e-06,
1545
+ "logps/chosen": -170.19273376464844,
1546
+ "logps/rejected": -289.0329284667969,
1547
+ "loss": 0.0578,
1548
+ "losses/dpo": 0.22292722761631012,
1549
+ "losses/sft": 2.8669309616088867,
1550
+ "losses/total": 0.22292722761631012,
1551
+ "ref_logps/chosen": -155.04888916015625,
1552
+ "ref_logps/rejected": -169.506103515625,
1553
+ "rewards/accuracies": 0.984375,
1554
+ "rewards/chosen": -1.5143851041793823,
1555
+ "rewards/margins": 10.438299179077148,
1556
+ "rewards/rejected": -11.95268440246582,
1557
+ "step": 86
1558
+ },
1559
+ {
1560
+ "epoch": 0.16201117318435754,
1561
+ "grad_norm": 7.202996730804443,
1562
+ "learning_rate": 4.6583850931677025e-06,
1563
+ "logps/chosen": -152.85894775390625,
1564
+ "logps/rejected": -276.5400390625,
1565
+ "loss": 0.0464,
1566
+ "losses/dpo": 0.0008024298003874719,
1567
+ "losses/sft": 2.105069637298584,
1568
+ "losses/total": 0.0008024298003874719,
1569
+ "ref_logps/chosen": -135.02166748046875,
1570
+ "ref_logps/rejected": -162.55917358398438,
1571
+ "rewards/accuracies": 0.96875,
1572
+ "rewards/chosen": -1.783726453781128,
1573
+ "rewards/margins": 9.614358901977539,
1574
+ "rewards/rejected": -11.39808464050293,
1575
+ "step": 87
1576
+ },
1577
+ {
1578
+ "epoch": 0.16387337057728119,
1579
+ "grad_norm": 10.529645919799805,
1580
+ "learning_rate": 4.648033126293996e-06,
1581
+ "logps/chosen": -167.41766357421875,
1582
+ "logps/rejected": -312.9851989746094,
1583
+ "loss": 0.1059,
1584
+ "losses/dpo": 1.8586399164632894e-05,
1585
+ "losses/sft": 2.3417227268218994,
1586
+ "losses/total": 1.8586399164632894e-05,
1587
+ "ref_logps/chosen": -150.24447631835938,
1588
+ "ref_logps/rejected": -171.88534545898438,
1589
+ "rewards/accuracies": 0.984375,
1590
+ "rewards/chosen": -1.7173188924789429,
1591
+ "rewards/margins": 12.392667770385742,
1592
+ "rewards/rejected": -14.1099853515625,
1593
+ "step": 88
1594
+ },
1595
+ {
1596
+ "epoch": 0.16573556797020483,
1597
+ "grad_norm": 5.379014492034912,
1598
+ "learning_rate": 4.637681159420291e-06,
1599
+ "logps/chosen": -149.58767700195312,
1600
+ "logps/rejected": -283.6622009277344,
1601
+ "loss": 0.0265,
1602
+ "losses/dpo": 0.0076700253412127495,
1603
+ "losses/sft": 2.0566470623016357,
1604
+ "losses/total": 0.0076700253412127495,
1605
+ "ref_logps/chosen": -142.82745361328125,
1606
+ "ref_logps/rejected": -166.57040405273438,
1607
+ "rewards/accuracies": 1.0,
1608
+ "rewards/chosen": -0.6760219931602478,
1609
+ "rewards/margins": 11.03315544128418,
1610
+ "rewards/rejected": -11.709177017211914,
1611
+ "step": 89
1612
+ },
1613
+ {
1614
+ "epoch": 0.16759776536312848,
1615
+ "grad_norm": 7.1271820068359375,
1616
+ "learning_rate": 4.627329192546584e-06,
1617
+ "logps/chosen": -166.07994079589844,
1618
+ "logps/rejected": -288.1270446777344,
1619
+ "loss": 0.0383,
1620
+ "losses/dpo": 0.006637689657509327,
1621
+ "losses/sft": 2.695986270904541,
1622
+ "losses/total": 0.006637689657509327,
1623
+ "ref_logps/chosen": -152.90316772460938,
1624
+ "ref_logps/rejected": -167.97433471679688,
1625
+ "rewards/accuracies": 0.984375,
1626
+ "rewards/chosen": -1.3176772594451904,
1627
+ "rewards/margins": 10.697591781616211,
1628
+ "rewards/rejected": -12.01526927947998,
1629
+ "step": 90
1630
+ },
1631
+ {
1632
+ "epoch": 0.16945996275605213,
1633
+ "grad_norm": 3.960089683532715,
1634
+ "learning_rate": 4.616977225672879e-06,
1635
+ "logps/chosen": -155.234619140625,
1636
+ "logps/rejected": -274.2052917480469,
1637
+ "loss": 0.0283,
1638
+ "losses/dpo": 0.029921425506472588,
1639
+ "losses/sft": 2.448662757873535,
1640
+ "losses/total": 0.029921425506472588,
1641
+ "ref_logps/chosen": -146.57998657226562,
1642
+ "ref_logps/rejected": -161.9693603515625,
1643
+ "rewards/accuracies": 1.0,
1644
+ "rewards/chosen": -0.8654617071151733,
1645
+ "rewards/margins": 10.35813045501709,
1646
+ "rewards/rejected": -11.223592758178711,
1647
+ "step": 91
1648
+ },
1649
+ {
1650
+ "epoch": 0.1713221601489758,
1651
+ "grad_norm": 8.884647369384766,
1652
+ "learning_rate": 4.606625258799172e-06,
1653
+ "logps/chosen": -156.59239196777344,
1654
+ "logps/rejected": -269.66265869140625,
1655
+ "loss": 0.0549,
1656
+ "losses/dpo": 0.000325945409713313,
1657
+ "losses/sft": 2.27170991897583,
1658
+ "losses/total": 0.000325945409713313,
1659
+ "ref_logps/chosen": -143.44590759277344,
1660
+ "ref_logps/rejected": -160.02975463867188,
1661
+ "rewards/accuracies": 0.96875,
1662
+ "rewards/chosen": -1.3146480321884155,
1663
+ "rewards/margins": 9.648642539978027,
1664
+ "rewards/rejected": -10.963289260864258,
1665
+ "step": 92
1666
+ },
1667
+ {
1668
+ "epoch": 0.17318435754189945,
1669
+ "grad_norm": 8.483807563781738,
1670
+ "learning_rate": 4.596273291925466e-06,
1671
+ "logps/chosen": -157.52755737304688,
1672
+ "logps/rejected": -302.79736328125,
1673
+ "loss": 0.0525,
1674
+ "losses/dpo": 0.007826759479939938,
1675
+ "losses/sft": 2.404571533203125,
1676
+ "losses/total": 0.007826759479939938,
1677
+ "ref_logps/chosen": -151.92408752441406,
1678
+ "ref_logps/rejected": -174.8727569580078,
1679
+ "rewards/accuracies": 0.96875,
1680
+ "rewards/chosen": -0.5603480339050293,
1681
+ "rewards/margins": 12.232114791870117,
1682
+ "rewards/rejected": -12.792463302612305,
1683
+ "step": 93
1684
+ },
1685
+ {
1686
+ "epoch": 0.1750465549348231,
1687
+ "grad_norm": 2.803177833557129,
1688
+ "learning_rate": 4.58592132505176e-06,
1689
+ "logps/chosen": -154.108154296875,
1690
+ "logps/rejected": -316.1098937988281,
1691
+ "loss": 0.0115,
1692
+ "losses/dpo": 0.014974968507885933,
1693
+ "losses/sft": 2.5976779460906982,
1694
+ "losses/total": 0.014974968507885933,
1695
+ "ref_logps/chosen": -139.6196746826172,
1696
+ "ref_logps/rejected": -177.22061157226562,
1697
+ "rewards/accuracies": 1.0,
1698
+ "rewards/chosen": -1.4488475322723389,
1699
+ "rewards/margins": 12.440080642700195,
1700
+ "rewards/rejected": -13.88892936706543,
1701
+ "step": 94
1702
+ },
1703
+ {
1704
+ "epoch": 0.17690875232774675,
1705
+ "grad_norm": 10.011286735534668,
1706
+ "learning_rate": 4.575569358178054e-06,
1707
+ "logps/chosen": -161.99615478515625,
1708
+ "logps/rejected": -290.70928955078125,
1709
+ "loss": 0.0521,
1710
+ "losses/dpo": 0.004248100332915783,
1711
+ "losses/sft": 2.6869282722473145,
1712
+ "losses/total": 0.004248100332915783,
1713
+ "ref_logps/chosen": -150.13644409179688,
1714
+ "ref_logps/rejected": -170.98199462890625,
1715
+ "rewards/accuracies": 0.96875,
1716
+ "rewards/chosen": -1.1859712600708008,
1717
+ "rewards/margins": 10.786758422851562,
1718
+ "rewards/rejected": -11.972728729248047,
1719
+ "step": 95
1720
+ },
1721
+ {
1722
+ "epoch": 0.1787709497206704,
1723
+ "grad_norm": 13.34592342376709,
1724
+ "learning_rate": 4.565217391304348e-06,
1725
+ "logps/chosen": -157.46038818359375,
1726
+ "logps/rejected": -293.8796691894531,
1727
+ "loss": 0.0613,
1728
+ "losses/dpo": 0.0029001892544329166,
1729
+ "losses/sft": 2.191887378692627,
1730
+ "losses/total": 0.0029001892544329166,
1731
+ "ref_logps/chosen": -147.4150390625,
1732
+ "ref_logps/rejected": -175.098876953125,
1733
+ "rewards/accuracies": 0.96875,
1734
+ "rewards/chosen": -1.004532814025879,
1735
+ "rewards/margins": 10.873546600341797,
1736
+ "rewards/rejected": -11.878079414367676,
1737
+ "step": 96
1738
+ },
1739
+ {
1740
+ "epoch": 0.18063314711359404,
1741
+ "grad_norm": 13.375811576843262,
1742
+ "learning_rate": 4.554865424430642e-06,
1743
+ "logps/chosen": -166.24191284179688,
1744
+ "logps/rejected": -305.58819580078125,
1745
+ "loss": 0.0981,
1746
+ "losses/dpo": 0.0007697511464357376,
1747
+ "losses/sft": 2.3007588386535645,
1748
+ "losses/total": 0.0007697511464357376,
1749
+ "ref_logps/chosen": -144.6219482421875,
1750
+ "ref_logps/rejected": -174.1811981201172,
1751
+ "rewards/accuracies": 0.953125,
1752
+ "rewards/chosen": -2.1619954109191895,
1753
+ "rewards/margins": 10.978706359863281,
1754
+ "rewards/rejected": -13.140701293945312,
1755
+ "step": 97
1756
+ },
1757
+ {
1758
+ "epoch": 0.1824953445065177,
1759
+ "grad_norm": 11.116385459899902,
1760
+ "learning_rate": 4.544513457556936e-06,
1761
+ "logps/chosen": -151.30795288085938,
1762
+ "logps/rejected": -303.7850341796875,
1763
+ "loss": 0.0838,
1764
+ "losses/dpo": 0.006321331951767206,
1765
+ "losses/sft": 2.45919132232666,
1766
+ "losses/total": 0.006321331951767206,
1767
+ "ref_logps/chosen": -141.2546844482422,
1768
+ "ref_logps/rejected": -166.2907257080078,
1769
+ "rewards/accuracies": 0.96875,
1770
+ "rewards/chosen": -1.0053260326385498,
1771
+ "rewards/margins": 12.744101524353027,
1772
+ "rewards/rejected": -13.749427795410156,
1773
+ "step": 98
1774
+ },
1775
+ {
1776
+ "epoch": 0.18435754189944134,
1777
+ "grad_norm": 8.1275053024292,
1778
+ "learning_rate": 4.534161490683231e-06,
1779
+ "logps/chosen": -169.12844848632812,
1780
+ "logps/rejected": -291.58355712890625,
1781
+ "loss": 0.0379,
1782
+ "losses/dpo": 0.04353442043066025,
1783
+ "losses/sft": 2.9029669761657715,
1784
+ "losses/total": 0.04353442043066025,
1785
+ "ref_logps/chosen": -148.775390625,
1786
+ "ref_logps/rejected": -162.24786376953125,
1787
+ "rewards/accuracies": 0.984375,
1788
+ "rewards/chosen": -2.0353074073791504,
1789
+ "rewards/margins": 10.898263931274414,
1790
+ "rewards/rejected": -12.933570861816406,
1791
+ "step": 99
1792
+ },
1793
+ {
1794
+ "epoch": 0.186219739292365,
1795
+ "grad_norm": 3.8263306617736816,
1796
+ "learning_rate": 4.523809523809524e-06,
1797
+ "logps/chosen": -160.37161254882812,
1798
+ "logps/rejected": -291.48577880859375,
1799
+ "loss": 0.0159,
1800
+ "losses/dpo": 0.06080714985728264,
1801
+ "losses/sft": 2.056828498840332,
1802
+ "losses/total": 0.06080714985728264,
1803
+ "ref_logps/chosen": -143.4698486328125,
1804
+ "ref_logps/rejected": -165.9519500732422,
1805
+ "rewards/accuracies": 1.0,
1806
+ "rewards/chosen": -1.6901748180389404,
1807
+ "rewards/margins": 10.863210678100586,
1808
+ "rewards/rejected": -12.553384780883789,
1809
+ "step": 100
1810
+ },
1811
+ {
1812
+ "epoch": 0.18808193668528864,
1813
+ "grad_norm": 9.816635131835938,
1814
+ "learning_rate": 4.513457556935818e-06,
1815
+ "logps/chosen": -177.78240966796875,
1816
+ "logps/rejected": -330.1160888671875,
1817
+ "loss": 0.0489,
1818
+ "losses/dpo": 0.2933081388473511,
1819
+ "losses/sft": 2.7717084884643555,
1820
+ "losses/total": 0.2933081388473511,
1821
+ "ref_logps/chosen": -155.60540771484375,
1822
+ "ref_logps/rejected": -177.44467163085938,
1823
+ "rewards/accuracies": 0.984375,
1824
+ "rewards/chosen": -2.2176995277404785,
1825
+ "rewards/margins": 13.04944133758545,
1826
+ "rewards/rejected": -15.267141342163086,
1827
+ "step": 101
1828
+ },
1829
+ {
1830
+ "epoch": 0.18994413407821228,
1831
+ "grad_norm": 1.3710126876831055,
1832
+ "learning_rate": 4.503105590062112e-06,
1833
+ "logps/chosen": -166.66590881347656,
1834
+ "logps/rejected": -290.761962890625,
1835
+ "loss": 0.0054,
1836
+ "losses/dpo": 0.02758178301155567,
1837
+ "losses/sft": 3.162290096282959,
1838
+ "losses/total": 0.02758178301155567,
1839
+ "ref_logps/chosen": -147.08917236328125,
1840
+ "ref_logps/rejected": -151.07664489746094,
1841
+ "rewards/accuracies": 1.0,
1842
+ "rewards/chosen": -1.9576739072799683,
1843
+ "rewards/margins": 12.010858535766602,
1844
+ "rewards/rejected": -13.96853256225586,
1845
+ "step": 102
1846
+ },
1847
+ {
1848
+ "epoch": 0.19180633147113593,
1849
+ "grad_norm": 7.93828010559082,
1850
+ "learning_rate": 4.492753623188406e-06,
1851
+ "logps/chosen": -146.8543701171875,
1852
+ "logps/rejected": -312.2845764160156,
1853
+ "loss": 0.0425,
1854
+ "losses/dpo": 0.2460930347442627,
1855
+ "losses/sft": 2.2462611198425293,
1856
+ "losses/total": 0.2460930347442627,
1857
+ "ref_logps/chosen": -135.14158630371094,
1858
+ "ref_logps/rejected": -168.27618408203125,
1859
+ "rewards/accuracies": 0.984375,
1860
+ "rewards/chosen": -1.1712779998779297,
1861
+ "rewards/margins": 13.229562759399414,
1862
+ "rewards/rejected": -14.400840759277344,
1863
+ "step": 103
1864
+ },
1865
+ {
1866
+ "epoch": 0.19366852886405958,
1867
+ "grad_norm": 13.03959846496582,
1868
+ "learning_rate": 4.4824016563146996e-06,
1869
+ "logps/chosen": -163.9506378173828,
1870
+ "logps/rejected": -338.9526062011719,
1871
+ "loss": 0.105,
1872
+ "losses/dpo": 1.4241080862120725e-05,
1873
+ "losses/sft": 2.1639623641967773,
1874
+ "losses/total": 1.4241080862120725e-05,
1875
+ "ref_logps/chosen": -144.27719116210938,
1876
+ "ref_logps/rejected": -172.1199951171875,
1877
+ "rewards/accuracies": 0.984375,
1878
+ "rewards/chosen": -1.9673457145690918,
1879
+ "rewards/margins": 14.71591567993164,
1880
+ "rewards/rejected": -16.683259963989258,
1881
+ "step": 104
1882
+ },
1883
+ {
1884
+ "epoch": 0.19553072625698323,
1885
+ "grad_norm": 12.940238952636719,
1886
+ "learning_rate": 4.472049689440994e-06,
1887
+ "logps/chosen": -167.60537719726562,
1888
+ "logps/rejected": -305.2105407714844,
1889
+ "loss": 0.1266,
1890
+ "losses/dpo": 0.016943011432886124,
1891
+ "losses/sft": 2.568720817565918,
1892
+ "losses/total": 0.016943011432886124,
1893
+ "ref_logps/chosen": -144.21282958984375,
1894
+ "ref_logps/rejected": -158.3541259765625,
1895
+ "rewards/accuracies": 0.953125,
1896
+ "rewards/chosen": -2.339254140853882,
1897
+ "rewards/margins": 12.34638786315918,
1898
+ "rewards/rejected": -14.685641288757324,
1899
+ "step": 105
1900
+ },
1901
+ {
1902
+ "epoch": 0.1973929236499069,
1903
+ "grad_norm": 9.413414001464844,
1904
+ "learning_rate": 4.4616977225672884e-06,
1905
+ "logps/chosen": -171.18016052246094,
1906
+ "logps/rejected": -302.7274475097656,
1907
+ "loss": 0.0475,
1908
+ "losses/dpo": 0.0004924152162857354,
1909
+ "losses/sft": 2.5999226570129395,
1910
+ "losses/total": 0.0004924152162857354,
1911
+ "ref_logps/chosen": -144.5438690185547,
1912
+ "ref_logps/rejected": -162.2921905517578,
1913
+ "rewards/accuracies": 0.984375,
1914
+ "rewards/chosen": -2.6636300086975098,
1915
+ "rewards/margins": 11.37989616394043,
1916
+ "rewards/rejected": -14.043526649475098,
1917
+ "step": 106
1918
+ },
1919
+ {
1920
+ "epoch": 0.19925512104283055,
1921
+ "grad_norm": 1.439731240272522,
1922
+ "learning_rate": 4.451345755693582e-06,
1923
+ "logps/chosen": -169.63963317871094,
1924
+ "logps/rejected": -335.1871337890625,
1925
+ "loss": 0.0046,
1926
+ "losses/dpo": 0.01760055683553219,
1927
+ "losses/sft": 2.191661834716797,
1928
+ "losses/total": 0.01760055683553219,
1929
+ "ref_logps/chosen": -148.11293029785156,
1930
+ "ref_logps/rejected": -171.40560913085938,
1931
+ "rewards/accuracies": 1.0,
1932
+ "rewards/chosen": -2.1526713371276855,
1933
+ "rewards/margins": 14.225481986999512,
1934
+ "rewards/rejected": -16.37815284729004,
1935
+ "step": 107
1936
+ },
1937
+ {
1938
+ "epoch": 0.2011173184357542,
1939
+ "grad_norm": 12.107815742492676,
1940
+ "learning_rate": 4.4409937888198765e-06,
1941
+ "logps/chosen": -174.36929321289062,
1942
+ "logps/rejected": -322.1456604003906,
1943
+ "loss": 0.1067,
1944
+ "losses/dpo": 0.0005443975096568465,
1945
+ "losses/sft": 2.864640712738037,
1946
+ "losses/total": 0.0005443975096568465,
1947
+ "ref_logps/chosen": -149.93984985351562,
1948
+ "ref_logps/rejected": -166.4761962890625,
1949
+ "rewards/accuracies": 0.953125,
1950
+ "rewards/chosen": -2.442944288253784,
1951
+ "rewards/margins": 13.124004364013672,
1952
+ "rewards/rejected": -15.566947937011719,
1953
+ "step": 108
1954
+ },
1955
+ {
1956
+ "epoch": 0.20297951582867785,
1957
+ "grad_norm": 7.122522354125977,
1958
+ "learning_rate": 4.43064182194617e-06,
1959
+ "logps/chosen": -155.75596618652344,
1960
+ "logps/rejected": -314.00762939453125,
1961
+ "loss": 0.0363,
1962
+ "losses/dpo": 0.1575506329536438,
1963
+ "losses/sft": 2.296907424926758,
1964
+ "losses/total": 0.1575506329536438,
1965
+ "ref_logps/chosen": -143.9917755126953,
1966
+ "ref_logps/rejected": -175.87164306640625,
1967
+ "rewards/accuracies": 0.984375,
1968
+ "rewards/chosen": -1.1764178276062012,
1969
+ "rewards/margins": 12.637179374694824,
1970
+ "rewards/rejected": -13.813596725463867,
1971
+ "step": 109
1972
+ },
1973
+ {
1974
+ "epoch": 0.2048417132216015,
1975
+ "grad_norm": 1.012307047843933,
1976
+ "learning_rate": 4.4202898550724645e-06,
1977
+ "logps/chosen": -164.65115356445312,
1978
+ "logps/rejected": -318.34326171875,
1979
+ "loss": 0.0034,
1980
+ "losses/dpo": 0.011834606528282166,
1981
+ "losses/sft": 2.363070249557495,
1982
+ "losses/total": 0.011834606528282166,
1983
+ "ref_logps/chosen": -149.04367065429688,
1984
+ "ref_logps/rejected": -166.6707000732422,
1985
+ "rewards/accuracies": 1.0,
1986
+ "rewards/chosen": -1.5607484579086304,
1987
+ "rewards/margins": 13.606508255004883,
1988
+ "rewards/rejected": -15.167256355285645,
1989
+ "step": 110
1990
+ },
1991
+ {
1992
+ "epoch": 0.20670391061452514,
1993
+ "grad_norm": 5.557791233062744,
1994
+ "learning_rate": 4.409937888198758e-06,
1995
+ "logps/chosen": -150.85084533691406,
1996
+ "logps/rejected": -309.1524658203125,
1997
+ "loss": 0.0232,
1998
+ "losses/dpo": 0.014252948574721813,
1999
+ "losses/sft": 2.4658865928649902,
2000
+ "losses/total": 0.014252948574721813,
2001
+ "ref_logps/chosen": -137.03976440429688,
2002
+ "ref_logps/rejected": -161.1869659423828,
2003
+ "rewards/accuracies": 0.984375,
2004
+ "rewards/chosen": -1.381108045578003,
2005
+ "rewards/margins": 13.415441513061523,
2006
+ "rewards/rejected": -14.796548843383789,
2007
+ "step": 111
2008
+ },
2009
+ {
2010
+ "epoch": 0.2085661080074488,
2011
+ "grad_norm": 9.207598686218262,
2012
+ "learning_rate": 4.399585921325052e-06,
2013
+ "logps/chosen": -167.4930419921875,
2014
+ "logps/rejected": -303.5692138671875,
2015
+ "loss": 0.0453,
2016
+ "losses/dpo": 0.0007799149607308209,
2017
+ "losses/sft": 2.518667459487915,
2018
+ "losses/total": 0.0007799149607308209,
2019
+ "ref_logps/chosen": -149.21981811523438,
2020
+ "ref_logps/rejected": -170.2943115234375,
2021
+ "rewards/accuracies": 0.984375,
2022
+ "rewards/chosen": -1.8273215293884277,
2023
+ "rewards/margins": 11.500171661376953,
2024
+ "rewards/rejected": -13.327491760253906,
2025
+ "step": 112
2026
+ },
2027
+ {
2028
+ "epoch": 0.21042830540037244,
2029
+ "grad_norm": 7.050368309020996,
2030
+ "learning_rate": 4.389233954451346e-06,
2031
+ "logps/chosen": -159.51071166992188,
2032
+ "logps/rejected": -307.4903869628906,
2033
+ "loss": 0.0301,
2034
+ "losses/dpo": 0.0039618429727852345,
2035
+ "losses/sft": 2.5831570625305176,
2036
+ "losses/total": 0.0039618429727852345,
2037
+ "ref_logps/chosen": -137.70912170410156,
2038
+ "ref_logps/rejected": -167.49789428710938,
2039
+ "rewards/accuracies": 0.984375,
2040
+ "rewards/chosen": -2.180156946182251,
2041
+ "rewards/margins": 11.819091796875,
2042
+ "rewards/rejected": -13.999248504638672,
2043
+ "step": 113
2044
+ },
2045
+ {
2046
+ "epoch": 0.2122905027932961,
2047
+ "grad_norm": 11.241766929626465,
2048
+ "learning_rate": 4.37888198757764e-06,
2049
+ "logps/chosen": -164.54208374023438,
2050
+ "logps/rejected": -306.73822021484375,
2051
+ "loss": 0.0491,
2052
+ "losses/dpo": 0.17672446370124817,
2053
+ "losses/sft": 2.2986695766448975,
2054
+ "losses/total": 0.17672446370124817,
2055
+ "ref_logps/chosen": -145.33531188964844,
2056
+ "ref_logps/rejected": -164.6031951904297,
2057
+ "rewards/accuracies": 0.984375,
2058
+ "rewards/chosen": -1.92067551612854,
2059
+ "rewards/margins": 12.292827606201172,
2060
+ "rewards/rejected": -14.213502883911133,
2061
+ "step": 114
2062
+ },
2063
+ {
2064
+ "epoch": 0.21415270018621974,
2065
+ "grad_norm": 4.248304843902588,
2066
+ "learning_rate": 4.368530020703934e-06,
2067
+ "logps/chosen": -160.98765563964844,
2068
+ "logps/rejected": -324.66845703125,
2069
+ "loss": 0.0157,
2070
+ "losses/dpo": 0.028906403109431267,
2071
+ "losses/sft": 2.51163387298584,
2072
+ "losses/total": 0.028906403109431267,
2073
+ "ref_logps/chosen": -142.23577880859375,
2074
+ "ref_logps/rejected": -178.46255493164062,
2075
+ "rewards/accuracies": 1.0,
2076
+ "rewards/chosen": -1.875186800956726,
2077
+ "rewards/margins": 12.745403289794922,
2078
+ "rewards/rejected": -14.620590209960938,
2079
+ "step": 115
2080
+ },
2081
+ {
2082
+ "epoch": 0.21601489757914338,
2083
+ "grad_norm": 11.712361335754395,
2084
+ "learning_rate": 4.358178053830228e-06,
2085
+ "logps/chosen": -176.92311096191406,
2086
+ "logps/rejected": -309.1515808105469,
2087
+ "loss": 0.0532,
2088
+ "losses/dpo": 0.0009635839378461242,
2089
+ "losses/sft": 2.519484758377075,
2090
+ "losses/total": 0.0009635839378461242,
2091
+ "ref_logps/chosen": -155.59686279296875,
2092
+ "ref_logps/rejected": -167.6163787841797,
2093
+ "rewards/accuracies": 0.984375,
2094
+ "rewards/chosen": -2.132624626159668,
2095
+ "rewards/margins": 12.020895004272461,
2096
+ "rewards/rejected": -14.153520584106445,
2097
+ "step": 116
2098
+ },
2099
+ {
2100
+ "epoch": 0.21787709497206703,
2101
+ "grad_norm": 12.869205474853516,
2102
+ "learning_rate": 4.347826086956522e-06,
2103
+ "logps/chosen": -164.499755859375,
2104
+ "logps/rejected": -322.2271728515625,
2105
+ "loss": 0.0481,
2106
+ "losses/dpo": 0.026883596554398537,
2107
+ "losses/sft": 2.4711999893188477,
2108
+ "losses/total": 0.026883596554398537,
2109
+ "ref_logps/chosen": -141.9335174560547,
2110
+ "ref_logps/rejected": -174.70437622070312,
2111
+ "rewards/accuracies": 0.984375,
2112
+ "rewards/chosen": -2.256622791290283,
2113
+ "rewards/margins": 12.495659828186035,
2114
+ "rewards/rejected": -14.75228214263916,
2115
+ "step": 117
2116
+ },
2117
+ {
2118
+ "epoch": 0.21973929236499068,
2119
+ "grad_norm": 7.2970499992370605,
2120
+ "learning_rate": 4.337474120082817e-06,
2121
+ "logps/chosen": -154.8804931640625,
2122
+ "logps/rejected": -307.47796630859375,
2123
+ "loss": 0.0217,
2124
+ "losses/dpo": 0.005061456002295017,
2125
+ "losses/sft": 2.7621004581451416,
2126
+ "losses/total": 0.005061456002295017,
2127
+ "ref_logps/chosen": -144.57373046875,
2128
+ "ref_logps/rejected": -164.53269958496094,
2129
+ "rewards/accuracies": 0.984375,
2130
+ "rewards/chosen": -1.0306763648986816,
2131
+ "rewards/margins": 13.263851165771484,
2132
+ "rewards/rejected": -14.294527053833008,
2133
+ "step": 118
2134
+ },
2135
+ {
2136
+ "epoch": 0.22160148975791433,
2137
+ "grad_norm": 12.324630737304688,
2138
+ "learning_rate": 4.32712215320911e-06,
2139
+ "logps/chosen": -169.20716857910156,
2140
+ "logps/rejected": -327.8011474609375,
2141
+ "loss": 0.0692,
2142
+ "losses/dpo": 0.05636470764875412,
2143
+ "losses/sft": 2.663618326187134,
2144
+ "losses/total": 0.05636470764875412,
2145
+ "ref_logps/chosen": -141.14559936523438,
2146
+ "ref_logps/rejected": -169.51641845703125,
2147
+ "rewards/accuracies": 0.96875,
2148
+ "rewards/chosen": -2.806156635284424,
2149
+ "rewards/margins": 13.022315979003906,
2150
+ "rewards/rejected": -15.828473091125488,
2151
+ "step": 119
2152
+ },
2153
+ {
2154
+ "epoch": 0.22346368715083798,
2155
+ "grad_norm": 7.082119464874268,
2156
+ "learning_rate": 4.316770186335404e-06,
2157
+ "logps/chosen": -162.06655883789062,
2158
+ "logps/rejected": -312.4548645019531,
2159
+ "loss": 0.0211,
2160
+ "losses/dpo": 0.14828312397003174,
2161
+ "losses/sft": 2.6526529788970947,
2162
+ "losses/total": 0.14828312397003174,
2163
+ "ref_logps/chosen": -147.12899780273438,
2164
+ "ref_logps/rejected": -163.53634643554688,
2165
+ "rewards/accuracies": 0.984375,
2166
+ "rewards/chosen": -1.4937548637390137,
2167
+ "rewards/margins": 13.39809799194336,
2168
+ "rewards/rejected": -14.891853332519531,
2169
+ "step": 120
2170
+ },
2171
+ {
2172
+ "epoch": 0.22532588454376165,
2173
+ "grad_norm": 7.707842826843262,
2174
+ "learning_rate": 4.306418219461698e-06,
2175
+ "logps/chosen": -169.38192749023438,
2176
+ "logps/rejected": -323.21514892578125,
2177
+ "loss": 0.0198,
2178
+ "losses/dpo": 0.1473950445652008,
2179
+ "losses/sft": 2.6331777572631836,
2180
+ "losses/total": 0.1473950445652008,
2181
+ "ref_logps/chosen": -149.7891845703125,
2182
+ "ref_logps/rejected": -174.02752685546875,
2183
+ "rewards/accuracies": 0.984375,
2184
+ "rewards/chosen": -1.9592747688293457,
2185
+ "rewards/margins": 12.959487915039062,
2186
+ "rewards/rejected": -14.918761253356934,
2187
+ "step": 121
2188
+ },
2189
+ {
2190
+ "epoch": 0.2271880819366853,
2191
+ "grad_norm": 6.156428337097168,
2192
+ "learning_rate": 4.296066252587992e-06,
2193
+ "logps/chosen": -177.64190673828125,
2194
+ "logps/rejected": -320.0240478515625,
2195
+ "loss": 0.0313,
2196
+ "losses/dpo": 0.0023224614560604095,
2197
+ "losses/sft": 2.282505512237549,
2198
+ "losses/total": 0.0023224614560604095,
2199
+ "ref_logps/chosen": -150.99745178222656,
2200
+ "ref_logps/rejected": -166.0125732421875,
2201
+ "rewards/accuracies": 0.984375,
2202
+ "rewards/chosen": -2.664444923400879,
2203
+ "rewards/margins": 12.736701011657715,
2204
+ "rewards/rejected": -15.40114688873291,
2205
+ "step": 122
2206
+ },
2207
+ {
2208
+ "epoch": 0.22905027932960895,
2209
+ "grad_norm": 12.972006797790527,
2210
+ "learning_rate": 4.2857142857142855e-06,
2211
+ "logps/chosen": -171.61099243164062,
2212
+ "logps/rejected": -314.13824462890625,
2213
+ "loss": 0.204,
2214
+ "losses/dpo": 0.11684274673461914,
2215
+ "losses/sft": 2.5414156913757324,
2216
+ "losses/total": 0.11684274673461914,
2217
+ "ref_logps/chosen": -152.2470703125,
2218
+ "ref_logps/rejected": -169.70111083984375,
2219
+ "rewards/accuracies": 0.953125,
2220
+ "rewards/chosen": -1.936392903327942,
2221
+ "rewards/margins": 12.507322311401367,
2222
+ "rewards/rejected": -14.443716049194336,
2223
+ "step": 123
2224
+ },
2225
+ {
2226
+ "epoch": 0.2309124767225326,
2227
+ "grad_norm": 8.865942001342773,
2228
+ "learning_rate": 4.27536231884058e-06,
2229
+ "logps/chosen": -178.2666015625,
2230
+ "logps/rejected": -294.51971435546875,
2231
+ "loss": 0.0887,
2232
+ "losses/dpo": 0.04358779639005661,
2233
+ "losses/sft": 2.905651569366455,
2234
+ "losses/total": 0.04358779639005661,
2235
+ "ref_logps/chosen": -146.95541381835938,
2236
+ "ref_logps/rejected": -152.81776428222656,
2237
+ "rewards/accuracies": 0.984375,
2238
+ "rewards/chosen": -3.1311190128326416,
2239
+ "rewards/margins": 11.039077758789062,
2240
+ "rewards/rejected": -14.170197486877441,
2241
+ "step": 124
2242
+ },
2243
+ {
2244
+ "epoch": 0.23277467411545624,
2245
+ "grad_norm": 14.640778541564941,
2246
+ "learning_rate": 4.265010351966874e-06,
2247
+ "logps/chosen": -176.9116668701172,
2248
+ "logps/rejected": -332.41241455078125,
2249
+ "loss": 0.1211,
2250
+ "losses/dpo": 0.053478606045246124,
2251
+ "losses/sft": 2.7518341541290283,
2252
+ "losses/total": 0.053478606045246124,
2253
+ "ref_logps/chosen": -150.49269104003906,
2254
+ "ref_logps/rejected": -171.48587036132812,
2255
+ "rewards/accuracies": 0.984375,
2256
+ "rewards/chosen": -2.641897678375244,
2257
+ "rewards/margins": 13.450759887695312,
2258
+ "rewards/rejected": -16.0926570892334,
2259
+ "step": 125
2260
+ }
2261
+ ],
2262
+ "logging_steps": 1.0,
2263
+ "max_steps": 537,
2264
+ "num_input_tokens_seen": 0,
2265
+ "num_train_epochs": 1,
2266
+ "save_steps": 25,
2267
+ "stateful_callbacks": {
2268
+ "TrainerControl": {
2269
+ "args": {
2270
+ "should_epoch_stop": false,
2271
+ "should_evaluate": false,
2272
+ "should_log": false,
2273
+ "should_save": true,
2274
+ "should_training_stop": false
2275
+ },
2276
+ "attributes": {}
2277
+ }
2278
+ },
2279
+ "total_flos": 0.0,
2280
+ "train_batch_size": 2,
2281
+ "trial_name": null,
2282
+ "trial_params": null
2283
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:494f542b170a80f31e9196cf525539951715f4482c3206039742a9432cebf311
3
+ size 6904
vocab.json ADDED
The diff for this file is too large to render. See raw diff