SirajRLX commited on
Commit
8f557b6
·
verified ·
1 Parent(s): 0b86081

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +4 -0
  2. grpo_qwen_14B_v2/best_adapter/README.md +207 -0
  3. grpo_qwen_14B_v2/best_adapter/adapter_config.json +46 -0
  4. grpo_qwen_14B_v2/best_adapter/adapter_model.safetensors +3 -0
  5. grpo_qwen_14B_v2/best_adapter/added_tokens.json +24 -0
  6. grpo_qwen_14B_v2/best_adapter/chat_template.jinja +54 -0
  7. grpo_qwen_14B_v2/best_adapter/merges.txt +0 -0
  8. grpo_qwen_14B_v2/best_adapter/special_tokens_map.json +31 -0
  9. grpo_qwen_14B_v2/best_adapter/tokenizer.json +3 -0
  10. grpo_qwen_14B_v2/best_adapter/tokenizer_config.json +207 -0
  11. grpo_qwen_14B_v2/best_adapter/training_args.bin +3 -0
  12. grpo_qwen_14B_v2/best_adapter/vocab.json +0 -0
  13. grpo_qwen_14B_v2/checkpoints/checkpoint-400/README.md +207 -0
  14. grpo_qwen_14B_v2/checkpoints/checkpoint-400/adapter_config.json +46 -0
  15. grpo_qwen_14B_v2/checkpoints/checkpoint-400/adapter_model.safetensors +3 -0
  16. grpo_qwen_14B_v2/checkpoints/checkpoint-400/added_tokens.json +24 -0
  17. grpo_qwen_14B_v2/checkpoints/checkpoint-400/chat_template.jinja +54 -0
  18. grpo_qwen_14B_v2/checkpoints/checkpoint-400/merges.txt +0 -0
  19. grpo_qwen_14B_v2/checkpoints/checkpoint-400/optimizer.pt +3 -0
  20. grpo_qwen_14B_v2/checkpoints/checkpoint-400/rng_state.pth +3 -0
  21. grpo_qwen_14B_v2/checkpoints/checkpoint-400/scheduler.pt +3 -0
  22. grpo_qwen_14B_v2/checkpoints/checkpoint-400/special_tokens_map.json +31 -0
  23. grpo_qwen_14B_v2/checkpoints/checkpoint-400/tokenizer.json +3 -0
  24. grpo_qwen_14B_v2/checkpoints/checkpoint-400/tokenizer_config.json +207 -0
  25. grpo_qwen_14B_v2/checkpoints/checkpoint-400/trainer_state.json +914 -0
  26. grpo_qwen_14B_v2/checkpoints/checkpoint-400/training_args.bin +3 -0
  27. grpo_qwen_14B_v2/checkpoints/checkpoint-400/vocab.json +0 -0
  28. grpo_qwen_14B_v2/checkpoints/checkpoint-500/README.md +207 -0
  29. grpo_qwen_14B_v2/checkpoints/checkpoint-500/adapter_config.json +46 -0
  30. grpo_qwen_14B_v2/checkpoints/checkpoint-500/adapter_model.safetensors +3 -0
  31. grpo_qwen_14B_v2/checkpoints/checkpoint-500/added_tokens.json +24 -0
  32. grpo_qwen_14B_v2/checkpoints/checkpoint-500/chat_template.jinja +54 -0
  33. grpo_qwen_14B_v2/checkpoints/checkpoint-500/merges.txt +0 -0
  34. grpo_qwen_14B_v2/checkpoints/checkpoint-500/optimizer.pt +3 -0
  35. grpo_qwen_14B_v2/checkpoints/checkpoint-500/rng_state.pth +3 -0
  36. grpo_qwen_14B_v2/checkpoints/checkpoint-500/scheduler.pt +3 -0
  37. grpo_qwen_14B_v2/checkpoints/checkpoint-500/special_tokens_map.json +31 -0
  38. grpo_qwen_14B_v2/checkpoints/checkpoint-500/tokenizer.json +3 -0
  39. grpo_qwen_14B_v2/checkpoints/checkpoint-500/tokenizer_config.json +207 -0
  40. grpo_qwen_14B_v2/checkpoints/checkpoint-500/trainer_state.json +1134 -0
  41. grpo_qwen_14B_v2/checkpoints/checkpoint-500/training_args.bin +3 -0
  42. grpo_qwen_14B_v2/checkpoints/checkpoint-500/vocab.json +0 -0
  43. grpo_qwen_14B_v2/config_resolved.yaml +115 -0
  44. grpo_qwen_14B_v2/logs/grpo_metrics.jsonl +101 -0
  45. grpo_qwen_14B_v2/logs/train.jsonl +102 -0
  46. grpo_qwen_14B_v2/wandb/debug-internal.log +12 -0
  47. grpo_qwen_14B_v2/wandb/debug.log +29 -0
  48. grpo_qwen_14B_v2/wandb/run-20251227_194423-jz7bptqa/files/config.yaml +725 -0
  49. grpo_qwen_14B_v2/wandb/run-20251227_194423-jz7bptqa/files/output.log +134 -0
  50. grpo_qwen_14B_v2/wandb/run-20251227_194423-jz7bptqa/files/requirements.txt +102 -0
.gitattributes CHANGED
@@ -64,3 +64,7 @@ dpo_qwen_14B/checkpoint-100/tokenizer.json filter=lfs diff=lfs merge=lfs -text
64
  dpo_qwen_14B/wandb/run-20251226_152332-r9hfat2g/run-r9hfat2g.wandb filter=lfs diff=lfs merge=lfs -text
65
  dpo_qwen_14B/wandb/run-20251226_152936-r1nptay8/run-r1nptay8.wandb filter=lfs diff=lfs merge=lfs -text
66
  dpo_qwen_14B/wandb/run-20251226_155650-wbzoafvt/run-wbzoafvt.wandb filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
64
  dpo_qwen_14B/wandb/run-20251226_152332-r9hfat2g/run-r9hfat2g.wandb filter=lfs diff=lfs merge=lfs -text
65
  dpo_qwen_14B/wandb/run-20251226_152936-r1nptay8/run-r1nptay8.wandb filter=lfs diff=lfs merge=lfs -text
66
  dpo_qwen_14B/wandb/run-20251226_155650-wbzoafvt/run-wbzoafvt.wandb filter=lfs diff=lfs merge=lfs -text
67
+ grpo_qwen_14B_v2/best_adapter/tokenizer.json filter=lfs diff=lfs merge=lfs -text
68
+ grpo_qwen_14B_v2/checkpoints/checkpoint-400/tokenizer.json filter=lfs diff=lfs merge=lfs -text
69
+ grpo_qwen_14B_v2/checkpoints/checkpoint-500/tokenizer.json filter=lfs diff=lfs merge=lfs -text
70
+ grpo_qwen_14B_v2/wandb/run-20251227_194423-jz7bptqa/run-jz7bptqa.wandb filter=lfs diff=lfs merge=lfs -text
grpo_qwen_14B_v2/best_adapter/README.md ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: /workspace/Models/Qwen2.5-Coder-14B-CPT-SFT_v2
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:/workspace/Models/Qwen2.5-Coder-14B-CPT-SFT_v2
7
+ - lora
8
+ - transformers
9
+ ---
10
+
11
+ # Model Card for Model ID
12
+
13
+ <!-- Provide a quick summary of what the model is/does. -->
14
+
15
+
16
+
17
+ ## Model Details
18
+
19
+ ### Model Description
20
+
21
+ <!-- Provide a longer summary of what this model is. -->
22
+
23
+
24
+
25
+ - **Developed by:** [More Information Needed]
26
+ - **Funded by [optional]:** [More Information Needed]
27
+ - **Shared by [optional]:** [More Information Needed]
28
+ - **Model type:** [More Information Needed]
29
+ - **Language(s) (NLP):** [More Information Needed]
30
+ - **License:** [More Information Needed]
31
+ - **Finetuned from model [optional]:** [More Information Needed]
32
+
33
+ ### Model Sources [optional]
34
+
35
+ <!-- Provide the basic links for the model. -->
36
+
37
+ - **Repository:** [More Information Needed]
38
+ - **Paper [optional]:** [More Information Needed]
39
+ - **Demo [optional]:** [More Information Needed]
40
+
41
+ ## Uses
42
+
43
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
44
+
45
+ ### Direct Use
46
+
47
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
48
+
49
+ [More Information Needed]
50
+
51
+ ### Downstream Use [optional]
52
+
53
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
54
+
55
+ [More Information Needed]
56
+
57
+ ### Out-of-Scope Use
58
+
59
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
60
+
61
+ [More Information Needed]
62
+
63
+ ## Bias, Risks, and Limitations
64
+
65
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
66
+
67
+ [More Information Needed]
68
+
69
+ ### Recommendations
70
+
71
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
72
+
73
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
74
+
75
+ ## How to Get Started with the Model
76
+
77
+ Use the code below to get started with the model.
78
+
79
+ [More Information Needed]
80
+
81
+ ## Training Details
82
+
83
+ ### Training Data
84
+
85
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
86
+
87
+ [More Information Needed]
88
+
89
+ ### Training Procedure
90
+
91
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
92
+
93
+ #### Preprocessing [optional]
94
+
95
+ [More Information Needed]
96
+
97
+
98
+ #### Training Hyperparameters
99
+
100
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
101
+
102
+ #### Speeds, Sizes, Times [optional]
103
+
104
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
105
+
106
+ [More Information Needed]
107
+
108
+ ## Evaluation
109
+
110
+ <!-- This section describes the evaluation protocols and provides the results. -->
111
+
112
+ ### Testing Data, Factors & Metrics
113
+
114
+ #### Testing Data
115
+
116
+ <!-- This should link to a Dataset Card if possible. -->
117
+
118
+ [More Information Needed]
119
+
120
+ #### Factors
121
+
122
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
123
+
124
+ [More Information Needed]
125
+
126
+ #### Metrics
127
+
128
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
129
+
130
+ [More Information Needed]
131
+
132
+ ### Results
133
+
134
+ [More Information Needed]
135
+
136
+ #### Summary
137
+
138
+
139
+
140
+ ## Model Examination [optional]
141
+
142
+ <!-- Relevant interpretability work for the model goes here -->
143
+
144
+ [More Information Needed]
145
+
146
+ ## Environmental Impact
147
+
148
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
149
+
150
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
151
+
152
+ - **Hardware Type:** [More Information Needed]
153
+ - **Hours used:** [More Information Needed]
154
+ - **Cloud Provider:** [More Information Needed]
155
+ - **Compute Region:** [More Information Needed]
156
+ - **Carbon Emitted:** [More Information Needed]
157
+
158
+ ## Technical Specifications [optional]
159
+
160
+ ### Model Architecture and Objective
161
+
162
+ [More Information Needed]
163
+
164
+ ### Compute Infrastructure
165
+
166
+ [More Information Needed]
167
+
168
+ #### Hardware
169
+
170
+ [More Information Needed]
171
+
172
+ #### Software
173
+
174
+ [More Information Needed]
175
+
176
+ ## Citation [optional]
177
+
178
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
179
+
180
+ **BibTeX:**
181
+
182
+ [More Information Needed]
183
+
184
+ **APA:**
185
+
186
+ [More Information Needed]
187
+
188
+ ## Glossary [optional]
189
+
190
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
191
+
192
+ [More Information Needed]
193
+
194
+ ## More Information [optional]
195
+
196
+ [More Information Needed]
197
+
198
+ ## Model Card Authors [optional]
199
+
200
+ [More Information Needed]
201
+
202
+ ## Model Card Contact
203
+
204
+ [More Information Needed]
205
+ ### Framework versions
206
+
207
+ - PEFT 0.18.0
grpo_qwen_14B_v2/best_adapter/adapter_config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": null,
6
+ "base_model_name_or_path": "/workspace/Models/Qwen2.5-Coder-14B-CPT-SFT_v2",
7
+ "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 32,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.05,
22
+ "megatron_config": null,
23
+ "megatron_core": "megatron.core",
24
+ "modules_to_save": null,
25
+ "peft_type": "LORA",
26
+ "peft_version": "0.18.0",
27
+ "qalora_group_size": 16,
28
+ "r": 16,
29
+ "rank_pattern": {},
30
+ "revision": null,
31
+ "target_modules": [
32
+ "q_proj",
33
+ "o_proj",
34
+ "v_proj",
35
+ "up_proj",
36
+ "gate_proj",
37
+ "down_proj",
38
+ "k_proj"
39
+ ],
40
+ "target_parameters": null,
41
+ "task_type": "CAUSAL_LM",
42
+ "trainable_token_indices": null,
43
+ "use_dora": false,
44
+ "use_qalora": false,
45
+ "use_rslora": false
46
+ }
grpo_qwen_14B_v2/best_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfddfaff53a19bede615799a30a42fe443ed6813a68f14c0a605ed1e0b75aa7a
3
+ size 275341720
grpo_qwen_14B_v2/best_adapter/added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
grpo_qwen_14B_v2/best_adapter/chat_template.jinja ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0]['role'] == 'system' %}
4
+ {{- messages[0]['content'] }}
5
+ {%- else %}
6
+ {{- 'You are a helpful assistant.' }}
7
+ {%- endif %}
8
+ {{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
9
+ {%- for tool in tools %}
10
+ {{- "\n" }}
11
+ {{- tool | tojson }}
12
+ {%- endfor %}
13
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
14
+ {%- else %}
15
+ {%- if messages[0]['role'] == 'system' %}
16
+ {{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }}
17
+ {%- else %}
18
+ {{- '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}
19
+ {%- endif %}
20
+ {%- endif %}
21
+ {%- for message in messages %}
22
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %}
23
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
24
+ {%- elif message.role == "assistant" %}
25
+ {{- '<|im_start|>' + message.role }}
26
+ {%- if message.content %}
27
+ {{- '\n' + message.content }}
28
+ {%- endif %}
29
+ {%- for tool_call in message.tool_calls %}
30
+ {%- if tool_call.function is defined %}
31
+ {%- set tool_call = tool_call.function %}
32
+ {%- endif %}
33
+ {{- '\n<tool_call>\n{"name": "' }}
34
+ {{- tool_call.name }}
35
+ {{- '", "arguments": ' }}
36
+ {{- tool_call.arguments | tojson }}
37
+ {{- '}\n</tool_call>' }}
38
+ {%- endfor %}
39
+ {{- '<|im_end|>\n' }}
40
+ {%- elif message.role == "tool" %}
41
+ {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %}
42
+ {{- '<|im_start|>user' }}
43
+ {%- endif %}
44
+ {{- '\n<tool_response>\n' }}
45
+ {{- message.content }}
46
+ {{- '\n</tool_response>' }}
47
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
48
+ {{- '<|im_end|>\n' }}
49
+ {%- endif %}
50
+ {%- endif %}
51
+ {%- endfor %}
52
+ {%- if add_generation_prompt %}
53
+ {{- '<|im_start|>assistant\n' }}
54
+ {%- endif %}
grpo_qwen_14B_v2/best_adapter/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
grpo_qwen_14B_v2/best_adapter/special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
grpo_qwen_14B_v2/best_adapter/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c5ae00e602b8860cbd784ba82a8aa14e8feecec692e7076590d014d7b7fdafa
3
+ size 11421896
grpo_qwen_14B_v2/best_adapter/tokenizer_config.json ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ }
181
+ },
182
+ "additional_special_tokens": [
183
+ "<|im_start|>",
184
+ "<|im_end|>",
185
+ "<|object_ref_start|>",
186
+ "<|object_ref_end|>",
187
+ "<|box_start|>",
188
+ "<|box_end|>",
189
+ "<|quad_start|>",
190
+ "<|quad_end|>",
191
+ "<|vision_start|>",
192
+ "<|vision_end|>",
193
+ "<|vision_pad|>",
194
+ "<|image_pad|>",
195
+ "<|video_pad|>"
196
+ ],
197
+ "bos_token": null,
198
+ "clean_up_tokenization_spaces": false,
199
+ "eos_token": "<|endoftext|>",
200
+ "errors": "replace",
201
+ "extra_special_tokens": {},
202
+ "model_max_length": 32768,
203
+ "pad_token": "<|endoftext|>",
204
+ "split_special_tokens": false,
205
+ "tokenizer_class": "Qwen2Tokenizer",
206
+ "unk_token": null
207
+ }
grpo_qwen_14B_v2/best_adapter/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4a4e48ed61b7c96f3bd2836ac828013a311834ab8a9542ea461fe1ff953396b
3
+ size 5496
grpo_qwen_14B_v2/best_adapter/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
grpo_qwen_14B_v2/checkpoints/checkpoint-400/README.md ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: /workspace/Models/Qwen2.5-Coder-14B-CPT-SFT_v2
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:/workspace/Models/Qwen2.5-Coder-14B-CPT-SFT_v2
7
+ - lora
8
+ - transformers
9
+ ---
10
+
11
+ # Model Card for Model ID
12
+
13
+ <!-- Provide a quick summary of what the model is/does. -->
14
+
15
+
16
+
17
+ ## Model Details
18
+
19
+ ### Model Description
20
+
21
+ <!-- Provide a longer summary of what this model is. -->
22
+
23
+
24
+
25
+ - **Developed by:** [More Information Needed]
26
+ - **Funded by [optional]:** [More Information Needed]
27
+ - **Shared by [optional]:** [More Information Needed]
28
+ - **Model type:** [More Information Needed]
29
+ - **Language(s) (NLP):** [More Information Needed]
30
+ - **License:** [More Information Needed]
31
+ - **Finetuned from model [optional]:** [More Information Needed]
32
+
33
+ ### Model Sources [optional]
34
+
35
+ <!-- Provide the basic links for the model. -->
36
+
37
+ - **Repository:** [More Information Needed]
38
+ - **Paper [optional]:** [More Information Needed]
39
+ - **Demo [optional]:** [More Information Needed]
40
+
41
+ ## Uses
42
+
43
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
44
+
45
+ ### Direct Use
46
+
47
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
48
+
49
+ [More Information Needed]
50
+
51
+ ### Downstream Use [optional]
52
+
53
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
54
+
55
+ [More Information Needed]
56
+
57
+ ### Out-of-Scope Use
58
+
59
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
60
+
61
+ [More Information Needed]
62
+
63
+ ## Bias, Risks, and Limitations
64
+
65
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
66
+
67
+ [More Information Needed]
68
+
69
+ ### Recommendations
70
+
71
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
72
+
73
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
74
+
75
+ ## How to Get Started with the Model
76
+
77
+ Use the code below to get started with the model.
78
+
79
+ [More Information Needed]
80
+
81
+ ## Training Details
82
+
83
+ ### Training Data
84
+
85
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
86
+
87
+ [More Information Needed]
88
+
89
+ ### Training Procedure
90
+
91
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
92
+
93
+ #### Preprocessing [optional]
94
+
95
+ [More Information Needed]
96
+
97
+
98
+ #### Training Hyperparameters
99
+
100
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
101
+
102
+ #### Speeds, Sizes, Times [optional]
103
+
104
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
105
+
106
+ [More Information Needed]
107
+
108
+ ## Evaluation
109
+
110
+ <!-- This section describes the evaluation protocols and provides the results. -->
111
+
112
+ ### Testing Data, Factors & Metrics
113
+
114
+ #### Testing Data
115
+
116
+ <!-- This should link to a Dataset Card if possible. -->
117
+
118
+ [More Information Needed]
119
+
120
+ #### Factors
121
+
122
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
123
+
124
+ [More Information Needed]
125
+
126
+ #### Metrics
127
+
128
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
129
+
130
+ [More Information Needed]
131
+
132
+ ### Results
133
+
134
+ [More Information Needed]
135
+
136
+ #### Summary
137
+
138
+
139
+
140
+ ## Model Examination [optional]
141
+
142
+ <!-- Relevant interpretability work for the model goes here -->
143
+
144
+ [More Information Needed]
145
+
146
+ ## Environmental Impact
147
+
148
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
149
+
150
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
151
+
152
+ - **Hardware Type:** [More Information Needed]
153
+ - **Hours used:** [More Information Needed]
154
+ - **Cloud Provider:** [More Information Needed]
155
+ - **Compute Region:** [More Information Needed]
156
+ - **Carbon Emitted:** [More Information Needed]
157
+
158
+ ## Technical Specifications [optional]
159
+
160
+ ### Model Architecture and Objective
161
+
162
+ [More Information Needed]
163
+
164
+ ### Compute Infrastructure
165
+
166
+ [More Information Needed]
167
+
168
+ #### Hardware
169
+
170
+ [More Information Needed]
171
+
172
+ #### Software
173
+
174
+ [More Information Needed]
175
+
176
+ ## Citation [optional]
177
+
178
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
179
+
180
+ **BibTeX:**
181
+
182
+ [More Information Needed]
183
+
184
+ **APA:**
185
+
186
+ [More Information Needed]
187
+
188
+ ## Glossary [optional]
189
+
190
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
191
+
192
+ [More Information Needed]
193
+
194
+ ## More Information [optional]
195
+
196
+ [More Information Needed]
197
+
198
+ ## Model Card Authors [optional]
199
+
200
+ [More Information Needed]
201
+
202
+ ## Model Card Contact
203
+
204
+ [More Information Needed]
205
+ ### Framework versions
206
+
207
+ - PEFT 0.18.0
grpo_qwen_14B_v2/checkpoints/checkpoint-400/adapter_config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": null,
6
+ "base_model_name_or_path": "/workspace/Models/Qwen2.5-Coder-14B-CPT-SFT_v2",
7
+ "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 32,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.05,
22
+ "megatron_config": null,
23
+ "megatron_core": "megatron.core",
24
+ "modules_to_save": null,
25
+ "peft_type": "LORA",
26
+ "peft_version": "0.18.0",
27
+ "qalora_group_size": 16,
28
+ "r": 16,
29
+ "rank_pattern": {},
30
+ "revision": null,
31
+ "target_modules": [
32
+ "q_proj",
33
+ "o_proj",
34
+ "v_proj",
35
+ "up_proj",
36
+ "gate_proj",
37
+ "down_proj",
38
+ "k_proj"
39
+ ],
40
+ "target_parameters": null,
41
+ "task_type": "CAUSAL_LM",
42
+ "trainable_token_indices": null,
43
+ "use_dora": false,
44
+ "use_qalora": false,
45
+ "use_rslora": false
46
+ }
grpo_qwen_14B_v2/checkpoints/checkpoint-400/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a30d78957cdbfa18908fab85c7673f6fc53d959c42542fa7a0679c258a0bc12
3
+ size 275341720
grpo_qwen_14B_v2/checkpoints/checkpoint-400/added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
grpo_qwen_14B_v2/checkpoints/checkpoint-400/chat_template.jinja ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0]['role'] == 'system' %}
4
+ {{- messages[0]['content'] }}
5
+ {%- else %}
6
+ {{- 'You are a helpful assistant.' }}
7
+ {%- endif %}
8
+ {{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
9
+ {%- for tool in tools %}
10
+ {{- "\n" }}
11
+ {{- tool | tojson }}
12
+ {%- endfor %}
13
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
14
+ {%- else %}
15
+ {%- if messages[0]['role'] == 'system' %}
16
+ {{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }}
17
+ {%- else %}
18
+ {{- '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}
19
+ {%- endif %}
20
+ {%- endif %}
21
+ {%- for message in messages %}
22
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %}
23
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
24
+ {%- elif message.role == "assistant" %}
25
+ {{- '<|im_start|>' + message.role }}
26
+ {%- if message.content %}
27
+ {{- '\n' + message.content }}
28
+ {%- endif %}
29
+ {%- for tool_call in message.tool_calls %}
30
+ {%- if tool_call.function is defined %}
31
+ {%- set tool_call = tool_call.function %}
32
+ {%- endif %}
33
+ {{- '\n<tool_call>\n{"name": "' }}
34
+ {{- tool_call.name }}
35
+ {{- '", "arguments": ' }}
36
+ {{- tool_call.arguments | tojson }}
37
+ {{- '}\n</tool_call>' }}
38
+ {%- endfor %}
39
+ {{- '<|im_end|>\n' }}
40
+ {%- elif message.role == "tool" %}
41
+ {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %}
42
+ {{- '<|im_start|>user' }}
43
+ {%- endif %}
44
+ {{- '\n<tool_response>\n' }}
45
+ {{- message.content }}
46
+ {{- '\n</tool_response>' }}
47
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
48
+ {{- '<|im_end|>\n' }}
49
+ {%- endif %}
50
+ {%- endif %}
51
+ {%- endfor %}
52
+ {%- if add_generation_prompt %}
53
+ {{- '<|im_start|>assistant\n' }}
54
+ {%- endif %}
grpo_qwen_14B_v2/checkpoints/checkpoint-400/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
grpo_qwen_14B_v2/checkpoints/checkpoint-400/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:061d403dcb81ead17fa966d89635c2c9386b81af84fa79f38fe72b302078ce56
3
+ size 551070514
grpo_qwen_14B_v2/checkpoints/checkpoint-400/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df773721583080a5937ae757c6cabb41d190e187d9dcba072940a8fb11a22714
3
+ size 14244
grpo_qwen_14B_v2/checkpoints/checkpoint-400/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f06565d55295681785448bb88140fb9d768cda54cb2f3e5bfc6f0d216a81cfc8
3
+ size 1064
grpo_qwen_14B_v2/checkpoints/checkpoint-400/special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
grpo_qwen_14B_v2/checkpoints/checkpoint-400/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c5ae00e602b8860cbd784ba82a8aa14e8feecec692e7076590d014d7b7fdafa
3
+ size 11421896
grpo_qwen_14B_v2/checkpoints/checkpoint-400/tokenizer_config.json ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ }
181
+ },
182
+ "additional_special_tokens": [
183
+ "<|im_start|>",
184
+ "<|im_end|>",
185
+ "<|object_ref_start|>",
186
+ "<|object_ref_end|>",
187
+ "<|box_start|>",
188
+ "<|box_end|>",
189
+ "<|quad_start|>",
190
+ "<|quad_end|>",
191
+ "<|vision_start|>",
192
+ "<|vision_end|>",
193
+ "<|vision_pad|>",
194
+ "<|image_pad|>",
195
+ "<|video_pad|>"
196
+ ],
197
+ "bos_token": null,
198
+ "clean_up_tokenization_spaces": false,
199
+ "eos_token": "<|endoftext|>",
200
+ "errors": "replace",
201
+ "extra_special_tokens": {},
202
+ "model_max_length": 32768,
203
+ "pad_token": "<|endoftext|>",
204
+ "split_special_tokens": false,
205
+ "tokenizer_class": "Qwen2Tokenizer",
206
+ "unk_token": null
207
+ }
grpo_qwen_14B_v2/checkpoints/checkpoint-400/trainer_state.json ADDED
@@ -0,0 +1,914 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 1.6006006006006006,
6
+ "eval_steps": 50,
7
+ "global_step": 400,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.02002002002002002,
14
+ "grad_norm": 0.05460292845964432,
15
+ "grpo_mean_advantage": -1.3560057254835556e-07,
16
+ "grpo_mean_group_score": 0.5922331809997559,
17
+ "grpo_mean_kl_div": 0.0,
18
+ "grpo_std_advantage": 3.0318567496578908e-06,
19
+ "learning_rate": 8.000000000000001e-07,
20
+ "loss": 0.007,
21
+ "step": 5
22
+ },
23
+ {
24
+ "epoch": 0.04004004004004004,
25
+ "grad_norm": 0.0679207444190979,
26
+ "grpo_mean_advantage": 3.6619603633880615e-06,
27
+ "grpo_mean_group_score": 0.5561589002609253,
28
+ "grpo_mean_kl_div": 0.0,
29
+ "grpo_std_advantage": 1.6246918676188216e-05,
30
+ "learning_rate": 1.8000000000000001e-06,
31
+ "loss": 0.0107,
32
+ "step": 10
33
+ },
34
+ {
35
+ "epoch": 0.06006006006006006,
36
+ "grad_norm": 0.05788416787981987,
37
+ "grpo_mean_advantage": -1.0654330395709621e-07,
38
+ "grpo_mean_group_score": 0.5759152173995972,
39
+ "grpo_mean_kl_div": 0.0,
40
+ "grpo_std_advantage": 5.399440965447866e-07,
41
+ "learning_rate": 2.8000000000000003e-06,
42
+ "loss": 0.007,
43
+ "step": 15
44
+ },
45
+ {
46
+ "epoch": 0.08008008008008008,
47
+ "grad_norm": 0.0746568813920021,
48
+ "grpo_mean_advantage": -5.871057737749652e-07,
49
+ "grpo_mean_group_score": 0.5127314329147339,
50
+ "grpo_mean_kl_div": 0.0,
51
+ "grpo_std_advantage": 2.6951597646984737e-06,
52
+ "learning_rate": 3.8000000000000005e-06,
53
+ "loss": 0.0246,
54
+ "step": 20
55
+ },
56
+ {
57
+ "epoch": 0.1001001001001001,
58
+ "grad_norm": 0.11442846059799194,
59
+ "grpo_mean_advantage": 6.370246410369873e-07,
60
+ "grpo_mean_group_score": 0.539706826210022,
61
+ "grpo_mean_kl_div": 0.0,
62
+ "grpo_std_advantage": 2.8908377771585947e-06,
63
+ "learning_rate": 4.800000000000001e-06,
64
+ "loss": 0.0337,
65
+ "step": 25
66
+ },
67
+ {
68
+ "epoch": 0.12012012012012012,
69
+ "grad_norm": 0.05778791010379791,
70
+ "grpo_mean_advantage": 6.705522359595761e-09,
71
+ "grpo_mean_group_score": 0.5812538862228394,
72
+ "grpo_mean_kl_div": 0.0,
73
+ "grpo_std_advantage": 6.189450800775376e-07,
74
+ "learning_rate": 4.999125183044924e-06,
75
+ "loss": 0.0171,
76
+ "step": 30
77
+ },
78
+ {
79
+ "epoch": 0.14014014014014015,
80
+ "grad_norm": 0.05819695070385933,
81
+ "grpo_mean_advantage": 3.859400692363124e-07,
82
+ "grpo_mean_group_score": 0.5909844636917114,
83
+ "grpo_mean_kl_div": 0.0,
84
+ "grpo_std_advantage": 1.6833292875162442e-06,
85
+ "learning_rate": 4.995572288443412e-06,
86
+ "loss": 0.0145,
87
+ "step": 35
88
+ },
89
+ {
90
+ "epoch": 0.16016016016016016,
91
+ "grad_norm": 0.07968433201313019,
92
+ "grpo_mean_advantage": 2.600252742013254e-07,
93
+ "grpo_mean_group_score": 0.5630953907966614,
94
+ "grpo_mean_kl_div": 0.0,
95
+ "grpo_std_advantage": 1.4095899132371414e-06,
96
+ "learning_rate": 4.98929052218411e-06,
97
+ "loss": 0.0196,
98
+ "step": 40
99
+ },
100
+ {
101
+ "epoch": 0.18018018018018017,
102
+ "grad_norm": 0.0733402892947197,
103
+ "grpo_mean_advantage": -1.2591480924584175e-07,
104
+ "grpo_mean_group_score": 0.5604403614997864,
105
+ "grpo_mean_kl_div": 0.0,
106
+ "grpo_std_advantage": 1.0309080380466185e-06,
107
+ "learning_rate": 4.980286753286196e-06,
108
+ "loss": 0.0186,
109
+ "step": 45
110
+ },
111
+ {
112
+ "epoch": 0.2002002002002002,
113
+ "grad_norm": 0.07136482000350952,
114
+ "grpo_mean_advantage": -2.808868941883702e-07,
115
+ "grpo_mean_group_score": 0.5971035957336426,
116
+ "grpo_mean_kl_div": 0.0,
117
+ "grpo_std_advantage": 1.5696078889959608e-06,
118
+ "learning_rate": 4.9685708272387645e-06,
119
+ "loss": 0.0286,
120
+ "step": 50
121
+ },
122
+ {
123
+ "epoch": 0.22022022022022023,
124
+ "grad_norm": 0.08851475268602371,
125
+ "grpo_mean_advantage": 2.6822089438383045e-08,
126
+ "grpo_mean_group_score": 0.5892971754074097,
127
+ "grpo_mean_kl_div": 0.0,
128
+ "grpo_std_advantage": 3.7878271541558206e-07,
129
+ "learning_rate": 4.9541555552349404e-06,
130
+ "loss": 0.0054,
131
+ "step": 55
132
+ },
133
+ {
134
+ "epoch": 0.24024024024024024,
135
+ "grad_norm": 0.07778509706258774,
136
+ "grpo_mean_advantage": -5.662441182607836e-08,
137
+ "grpo_mean_group_score": 0.564322292804718,
138
+ "grpo_mean_kl_div": 0.0,
139
+ "grpo_std_advantage": 6.128998393251095e-07,
140
+ "learning_rate": 4.9370567001630155e-06,
141
+ "loss": -0.0074,
142
+ "step": 60
143
+ },
144
+ {
145
+ "epoch": 0.2602602602602603,
146
+ "grad_norm": 0.08740051090717316,
147
+ "grpo_mean_advantage": -1.5944242193199898e-07,
148
+ "grpo_mean_group_score": 0.562497615814209,
149
+ "grpo_mean_kl_div": 0.0,
150
+ "grpo_std_advantage": 1.6374274309782777e-06,
151
+ "learning_rate": 4.917292959369968e-06,
152
+ "loss": 0.0145,
153
+ "step": 65
154
+ },
155
+ {
156
+ "epoch": 0.2802802802802803,
157
+ "grad_norm": 0.19070060551166534,
158
+ "grpo_mean_advantage": 1.6838312433264946e-07,
159
+ "grpo_mean_group_score": 0.5904761552810669,
160
+ "grpo_mean_kl_div": 0.0,
161
+ "grpo_std_advantage": 8.536571272088622e-07,
162
+ "learning_rate": 4.8948859442161876e-06,
163
+ "loss": 0.0257,
164
+ "step": 70
165
+ },
166
+ {
167
+ "epoch": 0.3003003003003003,
168
+ "grad_norm": 0.07321271300315857,
169
+ "grpo_mean_advantage": 1.1175870895385742e-07,
170
+ "grpo_mean_group_score": 0.5765624046325684,
171
+ "grpo_mean_kl_div": 0.0,
172
+ "grpo_std_advantage": 6.451961667153228e-07,
173
+ "learning_rate": 4.869860156443768e-06,
174
+ "loss": 0.0024,
175
+ "step": 75
176
+ },
177
+ {
178
+ "epoch": 0.3203203203203203,
179
+ "grad_norm": 0.07126748561859131,
180
+ "grpo_mean_advantage": -1.4603138254187797e-07,
181
+ "grpo_mean_group_score": 0.5858271718025208,
182
+ "grpo_mean_kl_div": 0.0,
183
+ "grpo_std_advantage": 1.1309343790344428e-06,
184
+ "learning_rate": 4.842242961384211e-06,
185
+ "loss": 0.0277,
186
+ "step": 80
187
+ },
188
+ {
189
+ "epoch": 0.34034034034034033,
190
+ "grad_norm": 0.08629189431667328,
191
+ "grpo_mean_advantage": -1.817941665649414e-06,
192
+ "grpo_mean_group_score": 0.5871662497520447,
193
+ "grpo_mean_kl_div": 0.0,
194
+ "grpo_std_advantage": 1.1141768482048064e-05,
195
+ "learning_rate": 4.812064558034847e-06,
196
+ "loss": 0.0246,
197
+ "step": 85
198
+ },
199
+ {
200
+ "epoch": 0.36036036036036034,
201
+ "grad_norm": 0.0998779758810997,
202
+ "grpo_mean_advantage": 1.8179416372277046e-07,
203
+ "grpo_mean_group_score": 0.5330992937088013,
204
+ "grpo_mean_kl_div": 0.0,
205
+ "grpo_std_advantage": 6.210335072864837e-07,
206
+ "learning_rate": 4.779357946036662e-06,
207
+ "loss": 0.0056,
208
+ "step": 90
209
+ },
210
+ {
211
+ "epoch": 0.38038038038038036,
212
+ "grad_norm": 0.10614689439535141,
213
+ "grpo_mean_advantage": -2.972781771859445e-07,
214
+ "grpo_mean_group_score": 0.5265295505523682,
215
+ "grpo_mean_kl_div": 0.0,
216
+ "grpo_std_advantage": 3.1582342217006953e-06,
217
+ "learning_rate": 4.74415888958968e-06,
218
+ "loss": 0.0053,
219
+ "step": 95
220
+ },
221
+ {
222
+ "epoch": 0.4004004004004004,
223
+ "grad_norm": 0.10345634073019028,
224
+ "grpo_mean_advantage": -7.033348197182931e-07,
225
+ "grpo_mean_group_score": 0.5660771131515503,
226
+ "grpo_mean_kl_div": 0.0,
227
+ "grpo_std_advantage": 4.245831405569334e-06,
228
+ "learning_rate": 4.706505878345343e-06,
229
+ "loss": 0.0134,
230
+ "step": 100
231
+ },
232
+ {
233
+ "epoch": 0.42042042042042044,
234
+ "grad_norm": 0.10077933222055435,
235
+ "grpo_mean_advantage": 1.1920928955078125e-07,
236
+ "grpo_mean_group_score": 0.57631915807724,
237
+ "grpo_mean_kl_div": 0.0,
238
+ "grpo_std_advantage": 3.2809634831210133e-07,
239
+ "learning_rate": 4.666440085318626e-06,
240
+ "loss": 0.0004,
241
+ "step": 105
242
+ },
243
+ {
244
+ "epoch": 0.44044044044044045,
245
+ "grad_norm": 0.09548182785511017,
246
+ "grpo_mean_advantage": -4.0978193283081055e-07,
247
+ "grpo_mean_group_score": 0.546563982963562,
248
+ "grpo_mean_kl_div": 0.0,
249
+ "grpo_std_advantage": 6.0397578636184335e-06,
250
+ "learning_rate": 4.624005321865968e-06,
251
+ "loss": 0.0033,
252
+ "step": 110
253
+ },
254
+ {
255
+ "epoch": 0.46046046046046046,
256
+ "grad_norm": 0.09417816251516342,
257
+ "grpo_mean_advantage": -1.467764434437413e-07,
258
+ "grpo_mean_group_score": 0.5519219636917114,
259
+ "grpo_mean_kl_div": 0.0,
260
+ "grpo_std_advantage": 2.2689375782647403e-06,
261
+ "learning_rate": 4.57924798977818e-06,
262
+ "loss": 0.0095,
263
+ "step": 115
264
+ },
265
+ {
266
+ "epoch": 0.4804804804804805,
267
+ "grad_norm": 0.10022275149822235,
268
+ "grpo_mean_advantage": -5.215406329028838e-09,
269
+ "grpo_mean_group_score": 0.5490407943725586,
270
+ "grpo_mean_kl_div": 0.0,
271
+ "grpo_std_advantage": 7.929010621410271e-07,
272
+ "learning_rate": 4.532217030540781e-06,
273
+ "loss": 0.0006,
274
+ "step": 120
275
+ },
276
+ {
277
+ "epoch": 0.5005005005005005,
278
+ "grad_norm": 0.14057794213294983,
279
+ "grpo_mean_advantage": -5.7369469175228005e-08,
280
+ "grpo_mean_group_score": 0.5646580457687378,
281
+ "grpo_mean_kl_div": 0.0,
282
+ "grpo_std_advantage": 1.2823379620385822e-06,
283
+ "learning_rate": 4.482963871817195e-06,
284
+ "loss": -0.0046,
285
+ "step": 125
286
+ },
287
+ {
288
+ "epoch": 0.5205205205205206,
289
+ "grad_norm": 0.12420658767223358,
290
+ "grpo_mean_advantage": 2.9876827056796174e-07,
291
+ "grpo_mean_group_score": 0.6111599802970886,
292
+ "grpo_mean_kl_div": 0.0,
293
+ "grpo_std_advantage": 1.0496698905626545e-06,
294
+ "learning_rate": 4.4315423712133595e-06,
295
+ "loss": -0.003,
296
+ "step": 130
297
+ },
298
+ {
299
+ "epoch": 0.5405405405405406,
300
+ "grad_norm": 0.14342808723449707,
301
+ "grpo_mean_advantage": 1.5869736103013565e-07,
302
+ "grpo_mean_group_score": 0.5619662404060364,
303
+ "grpo_mean_kl_div": 0.0,
304
+ "grpo_std_advantage": 1.2748531617035042e-06,
305
+ "learning_rate": 4.378008757385222e-06,
306
+ "loss": 0.0154,
307
+ "step": 135
308
+ },
309
+ {
310
+ "epoch": 0.5605605605605606,
311
+ "grad_norm": 0.14729444682598114,
312
+ "grpo_mean_advantage": 3.0100346748440643e-07,
313
+ "grpo_mean_group_score": 0.5795454978942871,
314
+ "grpo_mean_kl_div": 0.0,
315
+ "grpo_std_advantage": 2.4499684059264837e-06,
316
+ "learning_rate": 4.322421568553529e-06,
317
+ "loss": -0.0262,
318
+ "step": 140
319
+ },
320
+ {
321
+ "epoch": 0.5805805805805806,
322
+ "grad_norm": 0.15249410271644592,
323
+ "grpo_mean_advantage": -3.233552092751779e-07,
324
+ "grpo_mean_group_score": 0.5804953575134277,
325
+ "grpo_mean_kl_div": 0.0,
326
+ "grpo_std_advantage": 1.248456669600273e-06,
327
+ "learning_rate": 4.2648415884931476e-06,
328
+ "loss": 0.0018,
329
+ "step": 145
330
+ },
331
+ {
332
+ "epoch": 0.6006006006006006,
333
+ "grad_norm": 0.1841023564338684,
334
+ "grpo_mean_advantage": 3.2261013416245987e-07,
335
+ "grpo_mean_group_score": 0.5628539323806763,
336
+ "grpo_mean_kl_div": 0.0,
337
+ "grpo_std_advantage": 1.4773489738217904e-06,
338
+ "learning_rate": 4.205331780066892e-06,
339
+ "loss": -0.017,
340
+ "step": 150
341
+ },
342
+ {
343
+ "epoch": 0.6206206206206206,
344
+ "grad_norm": 0.18597163259983063,
345
+ "grpo_mean_advantage": -2.5331974029541016e-07,
346
+ "grpo_mean_group_score": 0.5727725625038147,
347
+ "grpo_mean_kl_div": 0.0,
348
+ "grpo_std_advantage": 1.5092309695319273e-06,
349
+ "learning_rate": 4.1439572163765615e-06,
350
+ "loss": 0.0044,
351
+ "step": 155
352
+ },
353
+ {
354
+ "epoch": 0.6406406406406406,
355
+ "grad_norm": 0.18310388922691345,
356
+ "grpo_mean_advantage": -6.780028627417778e-08,
357
+ "grpo_mean_group_score": 0.5833909511566162,
358
+ "grpo_mean_kl_div": 0.0,
359
+ "grpo_std_advantage": 8.550978805033083e-07,
360
+ "learning_rate": 4.0807850096064605e-06,
361
+ "loss": -0.005,
362
+ "step": 160
363
+ },
364
+ {
365
+ "epoch": 0.6606606606606606,
366
+ "grad_norm": 0.2192923128604889,
367
+ "grpo_mean_advantage": -5.587935447692871e-08,
368
+ "grpo_mean_group_score": 0.5742615461349487,
369
+ "grpo_mean_kl_div": 0.0,
370
+ "grpo_std_advantage": 3.564579174053506e-07,
371
+ "learning_rate": 4.015884237637206e-06,
372
+ "loss": -0.015,
373
+ "step": 165
374
+ },
375
+ {
376
+ "epoch": 0.6806806806806807,
377
+ "grad_norm": 0.16708803176879883,
378
+ "grpo_mean_advantage": -5.327165126800537e-07,
379
+ "grpo_mean_group_score": 0.5758188962936401,
380
+ "grpo_mean_kl_div": 0.0,
381
+ "grpo_std_advantage": 2.309018327650847e-06,
382
+ "learning_rate": 3.949325868510083e-06,
383
+ "loss": -0.0314,
384
+ "step": 170
385
+ },
386
+ {
387
+ "epoch": 0.7007007007007007,
388
+ "grad_norm": 0.3401262164115906,
389
+ "grpo_mean_advantage": 5.863606702405377e-07,
390
+ "grpo_mean_group_score": 0.5767683982849121,
391
+ "grpo_mean_kl_div": 0.0,
392
+ "grpo_std_advantage": 2.4449204829579685e-06,
393
+ "learning_rate": 3.881182682824534e-06,
394
+ "loss": -0.0441,
395
+ "step": 175
396
+ },
397
+ {
398
+ "epoch": 0.7207207207207207,
399
+ "grad_norm": 0.1931898146867752,
400
+ "grpo_mean_advantage": 3.2186508747145126e-07,
401
+ "grpo_mean_group_score": 0.586772084236145,
402
+ "grpo_mean_kl_div": 0.0,
403
+ "grpo_std_advantage": 2.293551688126172e-06,
404
+ "learning_rate": 3.811529194153635e-06,
405
+ "loss": -0.0162,
406
+ "step": 180
407
+ },
408
+ {
409
+ "epoch": 0.7407407407407407,
410
+ "grad_norm": 0.2537969648838043,
411
+ "grpo_mean_advantage": -4.470348358154297e-08,
412
+ "grpo_mean_group_score": 0.549396276473999,
413
+ "grpo_mean_kl_div": 0.0,
414
+ "grpo_std_advantage": 3.7067667335577426e-07,
415
+ "learning_rate": 3.7404415675646054e-06,
416
+ "loss": -0.0386,
417
+ "step": 185
418
+ },
419
+ {
420
+ "epoch": 0.7607607607607607,
421
+ "grad_norm": 0.20326584577560425,
422
+ "grpo_mean_advantage": -2.1010637851759384e-07,
423
+ "grpo_mean_group_score": 0.5798425078392029,
424
+ "grpo_mean_kl_div": 0.0,
425
+ "grpo_std_advantage": 1.1695076409523608e-06,
426
+ "learning_rate": 3.667997536333424e-06,
427
+ "loss": -0.037,
428
+ "step": 190
429
+ },
430
+ {
431
+ "epoch": 0.7807807807807807,
432
+ "grad_norm": 0.25048357248306274,
433
+ "grpo_mean_advantage": 1.765787658314366e-07,
434
+ "grpo_mean_group_score": 0.5584167838096619,
435
+ "grpo_mean_kl_div": 0.0,
436
+ "grpo_std_advantage": 2.429934738756856e-06,
437
+ "learning_rate": 3.59427631694463e-06,
438
+ "loss": -0.0292,
439
+ "step": 195
440
+ },
441
+ {
442
+ "epoch": 0.8008008008008008,
443
+ "grad_norm": 0.2687569260597229,
444
+ "grpo_mean_advantage": 1.6540289493605087e-07,
445
+ "grpo_mean_group_score": 0.5676193237304688,
446
+ "grpo_mean_kl_div": 0.0,
447
+ "grpo_std_advantage": 2.6342788714828203e-06,
448
+ "learning_rate": 3.5193585224692595e-06,
449
+ "loss": -0.0454,
450
+ "step": 200
451
+ },
452
+ {
453
+ "epoch": 0.8208208208208209,
454
+ "grad_norm": 0.22301620244979858,
455
+ "grpo_mean_advantage": -1.0944902442133753e-06,
456
+ "grpo_mean_group_score": 0.5669739842414856,
457
+ "grpo_mean_kl_div": 0.0,
458
+ "grpo_std_advantage": 5.346942998585291e-06,
459
+ "learning_rate": 3.44332607441564e-06,
460
+ "loss": -0.0423,
461
+ "step": 205
462
+ },
463
+ {
464
+ "epoch": 0.8408408408408409,
465
+ "grad_norm": 0.3040211498737335,
466
+ "grpo_mean_advantage": 2.4065374759629776e-07,
467
+ "grpo_mean_group_score": 0.5922158360481262,
468
+ "grpo_mean_kl_div": 0.0,
469
+ "grpo_std_advantage": 1.6327536513927043e-06,
470
+ "learning_rate": 3.3662621131494204e-06,
471
+ "loss": -0.0857,
472
+ "step": 210
473
+ },
474
+ {
475
+ "epoch": 0.8608608608608609,
476
+ "grad_norm": 0.27231141924858093,
477
+ "grpo_mean_advantage": -5.21540641784668e-08,
478
+ "grpo_mean_group_score": 0.5473950505256653,
479
+ "grpo_mean_kl_div": 0.0,
480
+ "grpo_std_advantage": 5.847922466273303e-07,
481
+ "learning_rate": 3.2882509069808044e-06,
482
+ "loss": -0.0278,
483
+ "step": 215
484
+ },
485
+ {
486
+ "epoch": 0.8808808808808809,
487
+ "grad_norm": 0.3571636378765106,
488
+ "grpo_mean_advantage": 6.541609991472797e-07,
489
+ "grpo_mean_group_score": 0.5880032777786255,
490
+ "grpo_mean_kl_div": 0.0,
491
+ "grpo_std_advantage": 4.072162937518442e-06,
492
+ "learning_rate": 3.2093777600183873e-06,
493
+ "loss": -0.0727,
494
+ "step": 220
495
+ },
496
+ {
497
+ "epoch": 0.9009009009009009,
498
+ "grad_norm": 0.306273490190506,
499
+ "grpo_mean_advantage": -1.2218951894737984e-07,
500
+ "grpo_mean_group_score": 0.5835092663764954,
501
+ "grpo_mean_kl_div": 0.0,
502
+ "grpo_std_advantage": 4.386006935419573e-07,
503
+ "learning_rate": 3.1297289188903705e-06,
504
+ "loss": -0.0464,
505
+ "step": 225
506
+ },
507
+ {
508
+ "epoch": 0.9209209209209209,
509
+ "grad_norm": 0.2700377106666565,
510
+ "grpo_mean_advantage": 1.7605722177904681e-06,
511
+ "grpo_mean_group_score": 0.5394966006278992,
512
+ "grpo_mean_kl_div": 0.0,
513
+ "grpo_std_advantage": 8.007580618141219e-06,
514
+ "learning_rate": 3.049391478435133e-06,
515
+ "loss": -0.0295,
516
+ "step": 230
517
+ },
518
+ {
519
+ "epoch": 0.9409409409409409,
520
+ "grad_norm": 0.39531761407852173,
521
+ "grpo_mean_advantage": -3.3080578987210174e-07,
522
+ "grpo_mean_group_score": 0.5687432289123535,
523
+ "grpo_mean_kl_div": 0.0,
524
+ "grpo_std_advantage": 1.551636614749441e-06,
525
+ "learning_rate": 2.9684532864643123e-06,
526
+ "loss": -0.031,
527
+ "step": 235
528
+ },
529
+ {
530
+ "epoch": 0.960960960960961,
531
+ "grad_norm": 0.5987040996551514,
532
+ "grpo_mean_advantage": 2.712011450967111e-07,
533
+ "grpo_mean_group_score": 0.5550583600997925,
534
+ "grpo_mean_kl_div": 0.0,
535
+ "grpo_std_advantage": 1.4400844747797237e-06,
536
+ "learning_rate": 2.887002847702504e-06,
537
+ "loss": -0.0789,
538
+ "step": 240
539
+ },
540
+ {
541
+ "epoch": 0.980980980980981,
542
+ "grad_norm": 0.5680716037750244,
543
+ "grpo_mean_advantage": -3.2857059295565705e-07,
544
+ "grpo_mean_group_score": 0.558111310005188,
545
+ "grpo_mean_kl_div": 0.0,
546
+ "grpo_std_advantage": 2.105091425619321e-06,
547
+ "learning_rate": 2.8051292270086506e-06,
548
+ "loss": -0.1131,
549
+ "step": 245
550
+ },
551
+ {
552
+ "epoch": 1.0,
553
+ "grad_norm": 0.6204046010971069,
554
+ "grpo_mean_advantage": 4.470348358154297e-08,
555
+ "grpo_mean_group_score": 0.6196198463439941,
556
+ "grpo_mean_kl_div": 0.0,
557
+ "grpo_std_advantage": 5.315724820320611e-07,
558
+ "learning_rate": 2.722921951984927e-06,
559
+ "loss": -0.2232,
560
+ "step": 250
561
+ },
562
+ {
563
+ "epoch": 1.02002002002002,
564
+ "grad_norm": 0.8389026522636414,
565
+ "grpo_mean_advantage": 9.290873776990338e-07,
566
+ "grpo_mean_group_score": 0.582168459892273,
567
+ "grpo_mean_kl_div": 0.0,
568
+ "grpo_std_advantage": 4.219644324621186e-06,
569
+ "learning_rate": 2.640470915079614e-06,
570
+ "loss": -0.1363,
571
+ "step": 255
572
+ },
573
+ {
574
+ "epoch": 1.04004004004004,
575
+ "grad_norm": 0.9067686796188354,
576
+ "grpo_mean_advantage": 2.533197474008375e-08,
577
+ "grpo_mean_group_score": 0.5551307797431946,
578
+ "grpo_mean_kl_div": 0.0,
579
+ "grpo_std_advantage": 1.6600588992332632e-07,
580
+ "learning_rate": 2.557866275291035e-06,
581
+ "loss": -0.1868,
582
+ "step": 260
583
+ },
584
+ {
585
+ "epoch": 1.06006006006006,
586
+ "grad_norm": 0.9277902841567993,
587
+ "grpo_mean_advantage": -5.662441182607836e-08,
588
+ "grpo_mean_group_score": 0.535040020942688,
589
+ "grpo_mean_kl_div": 0.0,
590
+ "grpo_std_advantage": 1.0909400316450046e-06,
591
+ "learning_rate": 2.4751983595800093e-06,
592
+ "loss": -0.1792,
593
+ "step": 265
594
+ },
595
+ {
596
+ "epoch": 1.08008008008008,
597
+ "grad_norm": 1.0715463161468506,
598
+ "grpo_mean_advantage": -9.536743306171047e-08,
599
+ "grpo_mean_group_score": 0.5673571825027466,
600
+ "grpo_mean_kl_div": 0.0,
601
+ "grpo_std_advantage": 5.838213610331877e-07,
602
+ "learning_rate": 2.392557564098649e-06,
603
+ "loss": -0.1691,
604
+ "step": 270
605
+ },
606
+ {
607
+ "epoch": 1.1001001001001,
608
+ "grad_norm": 0.7759184837341309,
609
+ "grpo_mean_advantage": 3.278255533700758e-08,
610
+ "grpo_mean_group_score": 0.5874732732772827,
611
+ "grpo_mean_kl_div": 0.0,
612
+ "grpo_std_advantage": 9.317170679423725e-07,
613
+ "learning_rate": 2.3100342553434924e-06,
614
+ "loss": -0.1655,
615
+ "step": 275
616
+ },
617
+ {
618
+ "epoch": 1.12012012012012,
619
+ "grad_norm": 0.9387398958206177,
620
+ "grpo_mean_advantage": -1.206994113545079e-07,
621
+ "grpo_mean_group_score": 0.5569106340408325,
622
+ "grpo_mean_kl_div": 0.0,
623
+ "grpo_std_advantage": 6.201085511747806e-07,
624
+ "learning_rate": 2.2277186713410688e-06,
625
+ "loss": -0.1821,
626
+ "step": 280
627
+ },
628
+ {
629
+ "epoch": 1.14014014014014,
630
+ "grad_norm": 1.6132302284240723,
631
+ "grpo_mean_advantage": 4.470348358154297e-08,
632
+ "grpo_mean_group_score": 0.5578873157501221,
633
+ "grpo_mean_kl_div": 0.0,
634
+ "grpo_std_advantage": 6.115651558502577e-07,
635
+ "learning_rate": 2.1457008229739395e-06,
636
+ "loss": -0.2102,
637
+ "step": 285
638
+ },
639
+ {
640
+ "epoch": 1.16016016016016,
641
+ "grad_norm": 0.8679026961326599,
642
+ "grpo_mean_advantage": -3.3453108017056365e-07,
643
+ "grpo_mean_group_score": 0.5735999345779419,
644
+ "grpo_mean_kl_div": 0.0,
645
+ "grpo_std_advantage": 3.5326345368957845e-06,
646
+ "learning_rate": 2.0640703955551214e-06,
647
+ "loss": -0.2937,
648
+ "step": 290
649
+ },
650
+ {
651
+ "epoch": 1.1801801801801801,
652
+ "grad_norm": 1.0550166368484497,
653
+ "grpo_mean_advantage": -1.110136480519941e-07,
654
+ "grpo_mean_group_score": 0.5626259446144104,
655
+ "grpo_mean_kl_div": 0.0,
656
+ "grpo_std_advantage": 4.731904823529476e-07,
657
+ "learning_rate": 1.9829166507585084e-06,
658
+ "loss": -0.2598,
659
+ "step": 295
660
+ },
661
+ {
662
+ "epoch": 1.2002002002002001,
663
+ "grad_norm": 1.2819372415542603,
664
+ "grpo_mean_advantage": -5.08874677507265e-07,
665
+ "grpo_mean_group_score": 0.5463050603866577,
666
+ "grpo_mean_kl_div": 0.0,
667
+ "grpo_std_advantage": 1.840126174101897e-06,
668
+ "learning_rate": 1.90232832901255e-06,
669
+ "loss": -0.2546,
670
+ "step": 300
671
+ },
672
+ {
673
+ "epoch": 1.2202202202202201,
674
+ "grad_norm": 1.0188143253326416,
675
+ "grpo_mean_advantage": 1.01327898960335e-07,
676
+ "grpo_mean_group_score": 0.5352144241333008,
677
+ "grpo_mean_kl_div": 0.0,
678
+ "grpo_std_advantage": 7.798533943059738e-07,
679
+ "learning_rate": 1.82239355246389e-06,
680
+ "loss": -0.1809,
681
+ "step": 305
682
+ },
683
+ {
684
+ "epoch": 1.2402402402402402,
685
+ "grad_norm": 2.0709052085876465,
686
+ "grpo_mean_advantage": 1.341104507446289e-07,
687
+ "grpo_mean_group_score": 0.5547868013381958,
688
+ "grpo_mean_kl_div": 0.0,
689
+ "grpo_std_advantage": 7.821902840987605e-07,
690
+ "learning_rate": 1.7431997286170923e-06,
691
+ "loss": -0.3559,
692
+ "step": 310
693
+ },
694
+ {
695
+ "epoch": 1.2602602602602602,
696
+ "grad_norm": 1.8516215085983276,
697
+ "grpo_mean_advantage": 9.015202806494926e-08,
698
+ "grpo_mean_group_score": 0.5859472751617432,
699
+ "grpo_mean_kl_div": 0.0,
700
+ "grpo_std_advantage": 1.0693488547985908e-06,
701
+ "learning_rate": 1.6648334547558227e-06,
702
+ "loss": -0.3874,
703
+ "step": 315
704
+ },
705
+ {
706
+ "epoch": 1.2802802802802802,
707
+ "grad_norm": 1.283104419708252,
708
+ "grpo_mean_advantage": -2.443790378947597e-07,
709
+ "grpo_mean_group_score": 0.5751550793647766,
710
+ "grpo_mean_kl_div": 0.0,
711
+ "grpo_std_advantage": 1.183122208203713e-06,
712
+ "learning_rate": 1.5873804232499862e-06,
713
+ "loss": -0.3467,
714
+ "step": 320
715
+ },
716
+ {
717
+ "epoch": 1.3003003003003002,
718
+ "grad_norm": 1.4108576774597168,
719
+ "grpo_mean_advantage": -6.705522537231445e-08,
720
+ "grpo_mean_group_score": 0.5497723817825317,
721
+ "grpo_mean_kl_div": 0.0,
722
+ "grpo_std_advantage": 6.109748937888071e-07,
723
+ "learning_rate": 1.51092532785238e-06,
724
+ "loss": -0.1703,
725
+ "step": 325
726
+ },
727
+ {
728
+ "epoch": 1.3203203203203202,
729
+ "grad_norm": 1.0421361923217773,
730
+ "grpo_mean_advantage": -1.639127766850379e-08,
731
+ "grpo_mean_group_score": 0.55989670753479,
732
+ "grpo_mean_kl_div": 0.0,
733
+ "grpo_std_advantage": 5.529495297196263e-07,
734
+ "learning_rate": 1.4355517710873184e-06,
735
+ "loss": -0.2918,
736
+ "step": 330
737
+ },
738
+ {
739
+ "epoch": 1.3403403403403402,
740
+ "grad_norm": 1.3465828895568848,
741
+ "grpo_mean_advantage": 4.418194237132411e-07,
742
+ "grpo_mean_group_score": 0.5809233784675598,
743
+ "grpo_mean_kl_div": 0.0,
744
+ "grpo_std_advantage": 2.9275292945385445e-06,
745
+ "learning_rate": 1.361342172832502e-06,
746
+ "loss": -0.3069,
747
+ "step": 335
748
+ },
749
+ {
750
+ "epoch": 1.3603603603603602,
751
+ "grad_norm": 1.1959459781646729,
752
+ "grpo_mean_advantage": 9.685754776000977e-08,
753
+ "grpo_mean_group_score": 0.5568087100982666,
754
+ "grpo_mean_kl_div": 0.0,
755
+ "grpo_std_advantage": 3.754235251562932e-07,
756
+ "learning_rate": 1.2883776801940884e-06,
757
+ "loss": -0.5594,
758
+ "step": 340
759
+ },
760
+ {
761
+ "epoch": 1.3803803803803802,
762
+ "grad_norm": 1.8967422246932983,
763
+ "grpo_mean_advantage": -2.384185791015625e-07,
764
+ "grpo_mean_group_score": 0.5655568838119507,
765
+ "grpo_mean_kl_div": 0.0,
766
+ "grpo_std_advantage": 6.821086913078034e-07,
767
+ "learning_rate": 1.216738078773522e-06,
768
+ "loss": -0.4102,
769
+ "step": 345
770
+ },
771
+ {
772
+ "epoch": 1.4004004004004005,
773
+ "grad_norm": 2.221132755279541,
774
+ "grpo_mean_advantage": -8.717179156292332e-08,
775
+ "grpo_mean_group_score": 0.6089578866958618,
776
+ "grpo_mean_kl_div": 0.0,
777
+ "grpo_std_advantage": 2.500940354366321e-06,
778
+ "learning_rate": 1.146501705423155e-06,
779
+ "loss": -0.338,
780
+ "step": 350
781
+ },
782
+ {
783
+ "epoch": 1.4204204204204205,
784
+ "grad_norm": 2.3640377521514893,
785
+ "grpo_mean_advantage": 2.1606683731079102e-07,
786
+ "grpo_mean_group_score": 0.6129671335220337,
787
+ "grpo_mean_kl_div": 0.0,
788
+ "grpo_std_advantage": 1.4568390724889468e-06,
789
+ "learning_rate": 1.0777453625860474e-06,
790
+ "loss": -0.4985,
791
+ "step": 355
792
+ },
793
+ {
794
+ "epoch": 1.4404404404404405,
795
+ "grad_norm": 1.9084734916687012,
796
+ "grpo_mean_advantage": -3.725290298461914e-09,
797
+ "grpo_mean_group_score": 0.5562310814857483,
798
+ "grpo_mean_kl_div": 0.0,
799
+ "grpo_std_advantage": 2.965894054796081e-06,
800
+ "learning_rate": 1.0105442343136184e-06,
801
+ "loss": -0.4347,
802
+ "step": 360
803
+ },
804
+ {
805
+ "epoch": 1.4604604604604605,
806
+ "grad_norm": 1.6063904762268066,
807
+ "grpo_mean_advantage": 4.313886279305734e-07,
808
+ "grpo_mean_group_score": 0.5884170532226562,
809
+ "grpo_mean_kl_div": 0.0,
810
+ "grpo_std_advantage": 1.9621948013082147e-06,
811
+ "learning_rate": 9.449718040529987e-07,
812
+ "loss": -0.6217,
813
+ "step": 365
814
+ },
815
+ {
816
+ "epoch": 1.4804804804804805,
817
+ "grad_norm": 2.114664077758789,
818
+ "grpo_mean_advantage": 2.0489096641540527e-07,
819
+ "grpo_mean_group_score": 0.5795440673828125,
820
+ "grpo_mean_kl_div": 0.0,
821
+ "grpo_std_advantage": 1.0235522722723545e-06,
822
+ "learning_rate": 8.810997742939531e-07,
823
+ "loss": -0.5364,
824
+ "step": 370
825
+ },
826
+ {
827
+ "epoch": 1.5005005005005005,
828
+ "grad_norm": 1.8450465202331543,
829
+ "grpo_mean_advantage": -1.4185905001795618e-06,
830
+ "grpo_mean_group_score": 0.5607603788375854,
831
+ "grpo_mean_kl_div": 0.0,
832
+ "grpo_std_advantage": 1.0947338523692451e-05,
833
+ "learning_rate": 8.189979881632634e-07,
834
+ "loss": -0.4798,
835
+ "step": 375
836
+ },
837
+ {
838
+ "epoch": 1.5205205205205206,
839
+ "grad_norm": 2.673438787460327,
840
+ "grpo_mean_advantage": -1.758337049295733e-07,
841
+ "grpo_mean_group_score": 0.5381432771682739,
842
+ "grpo_mean_kl_div": 0.0,
843
+ "grpo_std_advantage": 9.663675655247062e-07,
844
+ "learning_rate": 7.587343530522945e-07,
845
+ "loss": -0.4805,
846
+ "step": 380
847
+ },
848
+ {
849
+ "epoch": 1.5405405405405406,
850
+ "grad_norm": 2.2263550758361816,
851
+ "grpo_mean_advantage": -6.973743325033865e-07,
852
+ "grpo_mean_group_score": 0.5528443455696106,
853
+ "grpo_mean_kl_div": 0.0,
854
+ "grpo_std_advantage": 4.341973180999048e-06,
855
+ "learning_rate": 7.003747663612581e-07,
856
+ "loss": -0.433,
857
+ "step": 385
858
+ },
859
+ {
860
+ "epoch": 1.5605605605605606,
861
+ "grad_norm": 2.3657093048095703,
862
+ "grpo_mean_advantage": 1.7881394143159923e-08,
863
+ "grpo_mean_group_score": 0.6091476678848267,
864
+ "grpo_mean_kl_div": 0.0,
865
+ "grpo_std_advantage": 1.3004198251564958e-07,
866
+ "learning_rate": 6.439830434413754e-07,
867
+ "loss": -0.6021,
868
+ "step": 390
869
+ },
870
+ {
871
+ "epoch": 1.5805805805805806,
872
+ "grad_norm": 1.9847129583358765,
873
+ "grpo_mean_advantage": 3.4868716625169327e-07,
874
+ "grpo_mean_group_score": 0.5397372245788574,
875
+ "grpo_mean_kl_div": 0.0,
876
+ "grpo_std_advantage": 2.059372718576924e-06,
877
+ "learning_rate": 5.896208478137222e-07,
878
+ "loss": -0.5595,
879
+ "step": 395
880
+ },
881
+ {
882
+ "epoch": 1.6006006006006006,
883
+ "grad_norm": 2.922114133834839,
884
+ "grpo_mean_advantage": -2.1636485598719446e-06,
885
+ "grpo_mean_group_score": 0.5873125195503235,
886
+ "grpo_mean_kl_div": 0.0,
887
+ "grpo_std_advantage": 9.725940799398813e-06,
888
+ "learning_rate": 5.373476237410808e-07,
889
+ "loss": -0.5592,
890
+ "step": 400
891
+ }
892
+ ],
893
+ "logging_steps": 5,
894
+ "max_steps": 500,
895
+ "num_input_tokens_seen": 0,
896
+ "num_train_epochs": 2,
897
+ "save_steps": 100,
898
+ "stateful_callbacks": {
899
+ "TrainerControl": {
900
+ "args": {
901
+ "should_epoch_stop": false,
902
+ "should_evaluate": false,
903
+ "should_log": false,
904
+ "should_save": true,
905
+ "should_training_stop": false
906
+ },
907
+ "attributes": {}
908
+ }
909
+ },
910
+ "total_flos": 0.0,
911
+ "train_batch_size": 1,
912
+ "trial_name": null,
913
+ "trial_params": null
914
+ }
grpo_qwen_14B_v2/checkpoints/checkpoint-400/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4a4e48ed61b7c96f3bd2836ac828013a311834ab8a9542ea461fe1ff953396b
3
+ size 5496
grpo_qwen_14B_v2/checkpoints/checkpoint-400/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
grpo_qwen_14B_v2/checkpoints/checkpoint-500/README.md ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: /workspace/Models/Qwen2.5-Coder-14B-CPT-SFT_v2
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:/workspace/Models/Qwen2.5-Coder-14B-CPT-SFT_v2
7
+ - lora
8
+ - transformers
9
+ ---
10
+
11
+ # Model Card for Model ID
12
+
13
+ <!-- Provide a quick summary of what the model is/does. -->
14
+
15
+
16
+
17
+ ## Model Details
18
+
19
+ ### Model Description
20
+
21
+ <!-- Provide a longer summary of what this model is. -->
22
+
23
+
24
+
25
+ - **Developed by:** [More Information Needed]
26
+ - **Funded by [optional]:** [More Information Needed]
27
+ - **Shared by [optional]:** [More Information Needed]
28
+ - **Model type:** [More Information Needed]
29
+ - **Language(s) (NLP):** [More Information Needed]
30
+ - **License:** [More Information Needed]
31
+ - **Finetuned from model [optional]:** [More Information Needed]
32
+
33
+ ### Model Sources [optional]
34
+
35
+ <!-- Provide the basic links for the model. -->
36
+
37
+ - **Repository:** [More Information Needed]
38
+ - **Paper [optional]:** [More Information Needed]
39
+ - **Demo [optional]:** [More Information Needed]
40
+
41
+ ## Uses
42
+
43
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
44
+
45
+ ### Direct Use
46
+
47
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
48
+
49
+ [More Information Needed]
50
+
51
+ ### Downstream Use [optional]
52
+
53
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
54
+
55
+ [More Information Needed]
56
+
57
+ ### Out-of-Scope Use
58
+
59
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
60
+
61
+ [More Information Needed]
62
+
63
+ ## Bias, Risks, and Limitations
64
+
65
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
66
+
67
+ [More Information Needed]
68
+
69
+ ### Recommendations
70
+
71
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
72
+
73
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
74
+
75
+ ## How to Get Started with the Model
76
+
77
+ Use the code below to get started with the model.
78
+
79
+ [More Information Needed]
80
+
81
+ ## Training Details
82
+
83
+ ### Training Data
84
+
85
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
86
+
87
+ [More Information Needed]
88
+
89
+ ### Training Procedure
90
+
91
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
92
+
93
+ #### Preprocessing [optional]
94
+
95
+ [More Information Needed]
96
+
97
+
98
+ #### Training Hyperparameters
99
+
100
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
101
+
102
+ #### Speeds, Sizes, Times [optional]
103
+
104
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
105
+
106
+ [More Information Needed]
107
+
108
+ ## Evaluation
109
+
110
+ <!-- This section describes the evaluation protocols and provides the results. -->
111
+
112
+ ### Testing Data, Factors & Metrics
113
+
114
+ #### Testing Data
115
+
116
+ <!-- This should link to a Dataset Card if possible. -->
117
+
118
+ [More Information Needed]
119
+
120
+ #### Factors
121
+
122
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
123
+
124
+ [More Information Needed]
125
+
126
+ #### Metrics
127
+
128
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
129
+
130
+ [More Information Needed]
131
+
132
+ ### Results
133
+
134
+ [More Information Needed]
135
+
136
+ #### Summary
137
+
138
+
139
+
140
+ ## Model Examination [optional]
141
+
142
+ <!-- Relevant interpretability work for the model goes here -->
143
+
144
+ [More Information Needed]
145
+
146
+ ## Environmental Impact
147
+
148
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
149
+
150
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
151
+
152
+ - **Hardware Type:** [More Information Needed]
153
+ - **Hours used:** [More Information Needed]
154
+ - **Cloud Provider:** [More Information Needed]
155
+ - **Compute Region:** [More Information Needed]
156
+ - **Carbon Emitted:** [More Information Needed]
157
+
158
+ ## Technical Specifications [optional]
159
+
160
+ ### Model Architecture and Objective
161
+
162
+ [More Information Needed]
163
+
164
+ ### Compute Infrastructure
165
+
166
+ [More Information Needed]
167
+
168
+ #### Hardware
169
+
170
+ [More Information Needed]
171
+
172
+ #### Software
173
+
174
+ [More Information Needed]
175
+
176
+ ## Citation [optional]
177
+
178
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
179
+
180
+ **BibTeX:**
181
+
182
+ [More Information Needed]
183
+
184
+ **APA:**
185
+
186
+ [More Information Needed]
187
+
188
+ ## Glossary [optional]
189
+
190
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
191
+
192
+ [More Information Needed]
193
+
194
+ ## More Information [optional]
195
+
196
+ [More Information Needed]
197
+
198
+ ## Model Card Authors [optional]
199
+
200
+ [More Information Needed]
201
+
202
+ ## Model Card Contact
203
+
204
+ [More Information Needed]
205
+ ### Framework versions
206
+
207
+ - PEFT 0.18.0
grpo_qwen_14B_v2/checkpoints/checkpoint-500/adapter_config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": null,
6
+ "base_model_name_or_path": "/workspace/Models/Qwen2.5-Coder-14B-CPT-SFT_v2",
7
+ "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 32,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.05,
22
+ "megatron_config": null,
23
+ "megatron_core": "megatron.core",
24
+ "modules_to_save": null,
25
+ "peft_type": "LORA",
26
+ "peft_version": "0.18.0",
27
+ "qalora_group_size": 16,
28
+ "r": 16,
29
+ "rank_pattern": {},
30
+ "revision": null,
31
+ "target_modules": [
32
+ "q_proj",
33
+ "o_proj",
34
+ "v_proj",
35
+ "up_proj",
36
+ "gate_proj",
37
+ "down_proj",
38
+ "k_proj"
39
+ ],
40
+ "target_parameters": null,
41
+ "task_type": "CAUSAL_LM",
42
+ "trainable_token_indices": null,
43
+ "use_dora": false,
44
+ "use_qalora": false,
45
+ "use_rslora": false
46
+ }
grpo_qwen_14B_v2/checkpoints/checkpoint-500/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfddfaff53a19bede615799a30a42fe443ed6813a68f14c0a605ed1e0b75aa7a
3
+ size 275341720
grpo_qwen_14B_v2/checkpoints/checkpoint-500/added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
grpo_qwen_14B_v2/checkpoints/checkpoint-500/chat_template.jinja ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0]['role'] == 'system' %}
4
+ {{- messages[0]['content'] }}
5
+ {%- else %}
6
+ {{- 'You are a helpful assistant.' }}
7
+ {%- endif %}
8
+ {{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
9
+ {%- for tool in tools %}
10
+ {{- "\n" }}
11
+ {{- tool | tojson }}
12
+ {%- endfor %}
13
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
14
+ {%- else %}
15
+ {%- if messages[0]['role'] == 'system' %}
16
+ {{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }}
17
+ {%- else %}
18
+ {{- '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}
19
+ {%- endif %}
20
+ {%- endif %}
21
+ {%- for message in messages %}
22
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %}
23
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
24
+ {%- elif message.role == "assistant" %}
25
+ {{- '<|im_start|>' + message.role }}
26
+ {%- if message.content %}
27
+ {{- '\n' + message.content }}
28
+ {%- endif %}
29
+ {%- for tool_call in message.tool_calls %}
30
+ {%- if tool_call.function is defined %}
31
+ {%- set tool_call = tool_call.function %}
32
+ {%- endif %}
33
+ {{- '\n<tool_call>\n{"name": "' }}
34
+ {{- tool_call.name }}
35
+ {{- '", "arguments": ' }}
36
+ {{- tool_call.arguments | tojson }}
37
+ {{- '}\n</tool_call>' }}
38
+ {%- endfor %}
39
+ {{- '<|im_end|>\n' }}
40
+ {%- elif message.role == "tool" %}
41
+ {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %}
42
+ {{- '<|im_start|>user' }}
43
+ {%- endif %}
44
+ {{- '\n<tool_response>\n' }}
45
+ {{- message.content }}
46
+ {{- '\n</tool_response>' }}
47
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
48
+ {{- '<|im_end|>\n' }}
49
+ {%- endif %}
50
+ {%- endif %}
51
+ {%- endfor %}
52
+ {%- if add_generation_prompt %}
53
+ {{- '<|im_start|>assistant\n' }}
54
+ {%- endif %}
grpo_qwen_14B_v2/checkpoints/checkpoint-500/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
grpo_qwen_14B_v2/checkpoints/checkpoint-500/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d70395226e3da425d344a5d58bb1d49f495aa05919324a7452c67daccffb659e
3
+ size 551070514
grpo_qwen_14B_v2/checkpoints/checkpoint-500/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:140a5b8faae8f6aa3d7c8abd94812eb021a794706417ce578c4228067f08f646
3
+ size 14244
grpo_qwen_14B_v2/checkpoints/checkpoint-500/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:622e95a06e3b8ce556346f153f9b954936262c4f61ed83e89373f1300da67c29
3
+ size 1064
grpo_qwen_14B_v2/checkpoints/checkpoint-500/special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
grpo_qwen_14B_v2/checkpoints/checkpoint-500/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c5ae00e602b8860cbd784ba82a8aa14e8feecec692e7076590d014d7b7fdafa
3
+ size 11421896
grpo_qwen_14B_v2/checkpoints/checkpoint-500/tokenizer_config.json ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ }
181
+ },
182
+ "additional_special_tokens": [
183
+ "<|im_start|>",
184
+ "<|im_end|>",
185
+ "<|object_ref_start|>",
186
+ "<|object_ref_end|>",
187
+ "<|box_start|>",
188
+ "<|box_end|>",
189
+ "<|quad_start|>",
190
+ "<|quad_end|>",
191
+ "<|vision_start|>",
192
+ "<|vision_end|>",
193
+ "<|vision_pad|>",
194
+ "<|image_pad|>",
195
+ "<|video_pad|>"
196
+ ],
197
+ "bos_token": null,
198
+ "clean_up_tokenization_spaces": false,
199
+ "eos_token": "<|endoftext|>",
200
+ "errors": "replace",
201
+ "extra_special_tokens": {},
202
+ "model_max_length": 32768,
203
+ "pad_token": "<|endoftext|>",
204
+ "split_special_tokens": false,
205
+ "tokenizer_class": "Qwen2Tokenizer",
206
+ "unk_token": null
207
+ }
grpo_qwen_14B_v2/checkpoints/checkpoint-500/trainer_state.json ADDED
@@ -0,0 +1,1134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 2.0,
6
+ "eval_steps": 50,
7
+ "global_step": 500,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.02002002002002002,
14
+ "grad_norm": 0.05460292845964432,
15
+ "grpo_mean_advantage": -1.3560057254835556e-07,
16
+ "grpo_mean_group_score": 0.5922331809997559,
17
+ "grpo_mean_kl_div": 0.0,
18
+ "grpo_std_advantage": 3.0318567496578908e-06,
19
+ "learning_rate": 8.000000000000001e-07,
20
+ "loss": 0.007,
21
+ "step": 5
22
+ },
23
+ {
24
+ "epoch": 0.04004004004004004,
25
+ "grad_norm": 0.0679207444190979,
26
+ "grpo_mean_advantage": 3.6619603633880615e-06,
27
+ "grpo_mean_group_score": 0.5561589002609253,
28
+ "grpo_mean_kl_div": 0.0,
29
+ "grpo_std_advantage": 1.6246918676188216e-05,
30
+ "learning_rate": 1.8000000000000001e-06,
31
+ "loss": 0.0107,
32
+ "step": 10
33
+ },
34
+ {
35
+ "epoch": 0.06006006006006006,
36
+ "grad_norm": 0.05788416787981987,
37
+ "grpo_mean_advantage": -1.0654330395709621e-07,
38
+ "grpo_mean_group_score": 0.5759152173995972,
39
+ "grpo_mean_kl_div": 0.0,
40
+ "grpo_std_advantage": 5.399440965447866e-07,
41
+ "learning_rate": 2.8000000000000003e-06,
42
+ "loss": 0.007,
43
+ "step": 15
44
+ },
45
+ {
46
+ "epoch": 0.08008008008008008,
47
+ "grad_norm": 0.0746568813920021,
48
+ "grpo_mean_advantage": -5.871057737749652e-07,
49
+ "grpo_mean_group_score": 0.5127314329147339,
50
+ "grpo_mean_kl_div": 0.0,
51
+ "grpo_std_advantage": 2.6951597646984737e-06,
52
+ "learning_rate": 3.8000000000000005e-06,
53
+ "loss": 0.0246,
54
+ "step": 20
55
+ },
56
+ {
57
+ "epoch": 0.1001001001001001,
58
+ "grad_norm": 0.11442846059799194,
59
+ "grpo_mean_advantage": 6.370246410369873e-07,
60
+ "grpo_mean_group_score": 0.539706826210022,
61
+ "grpo_mean_kl_div": 0.0,
62
+ "grpo_std_advantage": 2.8908377771585947e-06,
63
+ "learning_rate": 4.800000000000001e-06,
64
+ "loss": 0.0337,
65
+ "step": 25
66
+ },
67
+ {
68
+ "epoch": 0.12012012012012012,
69
+ "grad_norm": 0.05778791010379791,
70
+ "grpo_mean_advantage": 6.705522359595761e-09,
71
+ "grpo_mean_group_score": 0.5812538862228394,
72
+ "grpo_mean_kl_div": 0.0,
73
+ "grpo_std_advantage": 6.189450800775376e-07,
74
+ "learning_rate": 4.999125183044924e-06,
75
+ "loss": 0.0171,
76
+ "step": 30
77
+ },
78
+ {
79
+ "epoch": 0.14014014014014015,
80
+ "grad_norm": 0.05819695070385933,
81
+ "grpo_mean_advantage": 3.859400692363124e-07,
82
+ "grpo_mean_group_score": 0.5909844636917114,
83
+ "grpo_mean_kl_div": 0.0,
84
+ "grpo_std_advantage": 1.6833292875162442e-06,
85
+ "learning_rate": 4.995572288443412e-06,
86
+ "loss": 0.0145,
87
+ "step": 35
88
+ },
89
+ {
90
+ "epoch": 0.16016016016016016,
91
+ "grad_norm": 0.07968433201313019,
92
+ "grpo_mean_advantage": 2.600252742013254e-07,
93
+ "grpo_mean_group_score": 0.5630953907966614,
94
+ "grpo_mean_kl_div": 0.0,
95
+ "grpo_std_advantage": 1.4095899132371414e-06,
96
+ "learning_rate": 4.98929052218411e-06,
97
+ "loss": 0.0196,
98
+ "step": 40
99
+ },
100
+ {
101
+ "epoch": 0.18018018018018017,
102
+ "grad_norm": 0.0733402892947197,
103
+ "grpo_mean_advantage": -1.2591480924584175e-07,
104
+ "grpo_mean_group_score": 0.5604403614997864,
105
+ "grpo_mean_kl_div": 0.0,
106
+ "grpo_std_advantage": 1.0309080380466185e-06,
107
+ "learning_rate": 4.980286753286196e-06,
108
+ "loss": 0.0186,
109
+ "step": 45
110
+ },
111
+ {
112
+ "epoch": 0.2002002002002002,
113
+ "grad_norm": 0.07136482000350952,
114
+ "grpo_mean_advantage": -2.808868941883702e-07,
115
+ "grpo_mean_group_score": 0.5971035957336426,
116
+ "grpo_mean_kl_div": 0.0,
117
+ "grpo_std_advantage": 1.5696078889959608e-06,
118
+ "learning_rate": 4.9685708272387645e-06,
119
+ "loss": 0.0286,
120
+ "step": 50
121
+ },
122
+ {
123
+ "epoch": 0.22022022022022023,
124
+ "grad_norm": 0.08851475268602371,
125
+ "grpo_mean_advantage": 2.6822089438383045e-08,
126
+ "grpo_mean_group_score": 0.5892971754074097,
127
+ "grpo_mean_kl_div": 0.0,
128
+ "grpo_std_advantage": 3.7878271541558206e-07,
129
+ "learning_rate": 4.9541555552349404e-06,
130
+ "loss": 0.0054,
131
+ "step": 55
132
+ },
133
+ {
134
+ "epoch": 0.24024024024024024,
135
+ "grad_norm": 0.07778509706258774,
136
+ "grpo_mean_advantage": -5.662441182607836e-08,
137
+ "grpo_mean_group_score": 0.564322292804718,
138
+ "grpo_mean_kl_div": 0.0,
139
+ "grpo_std_advantage": 6.128998393251095e-07,
140
+ "learning_rate": 4.9370567001630155e-06,
141
+ "loss": -0.0074,
142
+ "step": 60
143
+ },
144
+ {
145
+ "epoch": 0.2602602602602603,
146
+ "grad_norm": 0.08740051090717316,
147
+ "grpo_mean_advantage": -1.5944242193199898e-07,
148
+ "grpo_mean_group_score": 0.562497615814209,
149
+ "grpo_mean_kl_div": 0.0,
150
+ "grpo_std_advantage": 1.6374274309782777e-06,
151
+ "learning_rate": 4.917292959369968e-06,
152
+ "loss": 0.0145,
153
+ "step": 65
154
+ },
155
+ {
156
+ "epoch": 0.2802802802802803,
157
+ "grad_norm": 0.19070060551166534,
158
+ "grpo_mean_advantage": 1.6838312433264946e-07,
159
+ "grpo_mean_group_score": 0.5904761552810669,
160
+ "grpo_mean_kl_div": 0.0,
161
+ "grpo_std_advantage": 8.536571272088622e-07,
162
+ "learning_rate": 4.8948859442161876e-06,
163
+ "loss": 0.0257,
164
+ "step": 70
165
+ },
166
+ {
167
+ "epoch": 0.3003003003003003,
168
+ "grad_norm": 0.07321271300315857,
169
+ "grpo_mean_advantage": 1.1175870895385742e-07,
170
+ "grpo_mean_group_score": 0.5765624046325684,
171
+ "grpo_mean_kl_div": 0.0,
172
+ "grpo_std_advantage": 6.451961667153228e-07,
173
+ "learning_rate": 4.869860156443768e-06,
174
+ "loss": 0.0024,
175
+ "step": 75
176
+ },
177
+ {
178
+ "epoch": 0.3203203203203203,
179
+ "grad_norm": 0.07126748561859131,
180
+ "grpo_mean_advantage": -1.4603138254187797e-07,
181
+ "grpo_mean_group_score": 0.5858271718025208,
182
+ "grpo_mean_kl_div": 0.0,
183
+ "grpo_std_advantage": 1.1309343790344428e-06,
184
+ "learning_rate": 4.842242961384211e-06,
185
+ "loss": 0.0277,
186
+ "step": 80
187
+ },
188
+ {
189
+ "epoch": 0.34034034034034033,
190
+ "grad_norm": 0.08629189431667328,
191
+ "grpo_mean_advantage": -1.817941665649414e-06,
192
+ "grpo_mean_group_score": 0.5871662497520447,
193
+ "grpo_mean_kl_div": 0.0,
194
+ "grpo_std_advantage": 1.1141768482048064e-05,
195
+ "learning_rate": 4.812064558034847e-06,
196
+ "loss": 0.0246,
197
+ "step": 85
198
+ },
199
+ {
200
+ "epoch": 0.36036036036036034,
201
+ "grad_norm": 0.0998779758810997,
202
+ "grpo_mean_advantage": 1.8179416372277046e-07,
203
+ "grpo_mean_group_score": 0.5330992937088013,
204
+ "grpo_mean_kl_div": 0.0,
205
+ "grpo_std_advantage": 6.210335072864837e-07,
206
+ "learning_rate": 4.779357946036662e-06,
207
+ "loss": 0.0056,
208
+ "step": 90
209
+ },
210
+ {
211
+ "epoch": 0.38038038038038036,
212
+ "grad_norm": 0.10614689439535141,
213
+ "grpo_mean_advantage": -2.972781771859445e-07,
214
+ "grpo_mean_group_score": 0.5265295505523682,
215
+ "grpo_mean_kl_div": 0.0,
216
+ "grpo_std_advantage": 3.1582342217006953e-06,
217
+ "learning_rate": 4.74415888958968e-06,
218
+ "loss": 0.0053,
219
+ "step": 95
220
+ },
221
+ {
222
+ "epoch": 0.4004004004004004,
223
+ "grad_norm": 0.10345634073019028,
224
+ "grpo_mean_advantage": -7.033348197182931e-07,
225
+ "grpo_mean_group_score": 0.5660771131515503,
226
+ "grpo_mean_kl_div": 0.0,
227
+ "grpo_std_advantage": 4.245831405569334e-06,
228
+ "learning_rate": 4.706505878345343e-06,
229
+ "loss": 0.0134,
230
+ "step": 100
231
+ },
232
+ {
233
+ "epoch": 0.42042042042042044,
234
+ "grad_norm": 0.10077933222055435,
235
+ "grpo_mean_advantage": 1.1920928955078125e-07,
236
+ "grpo_mean_group_score": 0.57631915807724,
237
+ "grpo_mean_kl_div": 0.0,
238
+ "grpo_std_advantage": 3.2809634831210133e-07,
239
+ "learning_rate": 4.666440085318626e-06,
240
+ "loss": 0.0004,
241
+ "step": 105
242
+ },
243
+ {
244
+ "epoch": 0.44044044044044045,
245
+ "grad_norm": 0.09548182785511017,
246
+ "grpo_mean_advantage": -4.0978193283081055e-07,
247
+ "grpo_mean_group_score": 0.546563982963562,
248
+ "grpo_mean_kl_div": 0.0,
249
+ "grpo_std_advantage": 6.0397578636184335e-06,
250
+ "learning_rate": 4.624005321865968e-06,
251
+ "loss": 0.0033,
252
+ "step": 110
253
+ },
254
+ {
255
+ "epoch": 0.46046046046046046,
256
+ "grad_norm": 0.09417816251516342,
257
+ "grpo_mean_advantage": -1.467764434437413e-07,
258
+ "grpo_mean_group_score": 0.5519219636917114,
259
+ "grpo_mean_kl_div": 0.0,
260
+ "grpo_std_advantage": 2.2689375782647403e-06,
261
+ "learning_rate": 4.57924798977818e-06,
262
+ "loss": 0.0095,
263
+ "step": 115
264
+ },
265
+ {
266
+ "epoch": 0.4804804804804805,
267
+ "grad_norm": 0.10022275149822235,
268
+ "grpo_mean_advantage": -5.215406329028838e-09,
269
+ "grpo_mean_group_score": 0.5490407943725586,
270
+ "grpo_mean_kl_div": 0.0,
271
+ "grpo_std_advantage": 7.929010621410271e-07,
272
+ "learning_rate": 4.532217030540781e-06,
273
+ "loss": 0.0006,
274
+ "step": 120
275
+ },
276
+ {
277
+ "epoch": 0.5005005005005005,
278
+ "grad_norm": 0.14057794213294983,
279
+ "grpo_mean_advantage": -5.7369469175228005e-08,
280
+ "grpo_mean_group_score": 0.5646580457687378,
281
+ "grpo_mean_kl_div": 0.0,
282
+ "grpo_std_advantage": 1.2823379620385822e-06,
283
+ "learning_rate": 4.482963871817195e-06,
284
+ "loss": -0.0046,
285
+ "step": 125
286
+ },
287
+ {
288
+ "epoch": 0.5205205205205206,
289
+ "grad_norm": 0.12420658767223358,
290
+ "grpo_mean_advantage": 2.9876827056796174e-07,
291
+ "grpo_mean_group_score": 0.6111599802970886,
292
+ "grpo_mean_kl_div": 0.0,
293
+ "grpo_std_advantage": 1.0496698905626545e-06,
294
+ "learning_rate": 4.4315423712133595e-06,
295
+ "loss": -0.003,
296
+ "step": 130
297
+ },
298
+ {
299
+ "epoch": 0.5405405405405406,
300
+ "grad_norm": 0.14342808723449707,
301
+ "grpo_mean_advantage": 1.5869736103013565e-07,
302
+ "grpo_mean_group_score": 0.5619662404060364,
303
+ "grpo_mean_kl_div": 0.0,
304
+ "grpo_std_advantage": 1.2748531617035042e-06,
305
+ "learning_rate": 4.378008757385222e-06,
306
+ "loss": 0.0154,
307
+ "step": 135
308
+ },
309
+ {
310
+ "epoch": 0.5605605605605606,
311
+ "grad_norm": 0.14729444682598114,
312
+ "grpo_mean_advantage": 3.0100346748440643e-07,
313
+ "grpo_mean_group_score": 0.5795454978942871,
314
+ "grpo_mean_kl_div": 0.0,
315
+ "grpo_std_advantage": 2.4499684059264837e-06,
316
+ "learning_rate": 4.322421568553529e-06,
317
+ "loss": -0.0262,
318
+ "step": 140
319
+ },
320
+ {
321
+ "epoch": 0.5805805805805806,
322
+ "grad_norm": 0.15249410271644592,
323
+ "grpo_mean_advantage": -3.233552092751779e-07,
324
+ "grpo_mean_group_score": 0.5804953575134277,
325
+ "grpo_mean_kl_div": 0.0,
326
+ "grpo_std_advantage": 1.248456669600273e-06,
327
+ "learning_rate": 4.2648415884931476e-06,
328
+ "loss": 0.0018,
329
+ "step": 145
330
+ },
331
+ {
332
+ "epoch": 0.6006006006006006,
333
+ "grad_norm": 0.1841023564338684,
334
+ "grpo_mean_advantage": 3.2261013416245987e-07,
335
+ "grpo_mean_group_score": 0.5628539323806763,
336
+ "grpo_mean_kl_div": 0.0,
337
+ "grpo_std_advantage": 1.4773489738217904e-06,
338
+ "learning_rate": 4.205331780066892e-06,
339
+ "loss": -0.017,
340
+ "step": 150
341
+ },
342
+ {
343
+ "epoch": 0.6206206206206206,
344
+ "grad_norm": 0.18597163259983063,
345
+ "grpo_mean_advantage": -2.5331974029541016e-07,
346
+ "grpo_mean_group_score": 0.5727725625038147,
347
+ "grpo_mean_kl_div": 0.0,
348
+ "grpo_std_advantage": 1.5092309695319273e-06,
349
+ "learning_rate": 4.1439572163765615e-06,
350
+ "loss": 0.0044,
351
+ "step": 155
352
+ },
353
+ {
354
+ "epoch": 0.6406406406406406,
355
+ "grad_norm": 0.18310388922691345,
356
+ "grpo_mean_advantage": -6.780028627417778e-08,
357
+ "grpo_mean_group_score": 0.5833909511566162,
358
+ "grpo_mean_kl_div": 0.0,
359
+ "grpo_std_advantage": 8.550978805033083e-07,
360
+ "learning_rate": 4.0807850096064605e-06,
361
+ "loss": -0.005,
362
+ "step": 160
363
+ },
364
+ {
365
+ "epoch": 0.6606606606606606,
366
+ "grad_norm": 0.2192923128604889,
367
+ "grpo_mean_advantage": -5.587935447692871e-08,
368
+ "grpo_mean_group_score": 0.5742615461349487,
369
+ "grpo_mean_kl_div": 0.0,
370
+ "grpo_std_advantage": 3.564579174053506e-07,
371
+ "learning_rate": 4.015884237637206e-06,
372
+ "loss": -0.015,
373
+ "step": 165
374
+ },
375
+ {
376
+ "epoch": 0.6806806806806807,
377
+ "grad_norm": 0.16708803176879883,
378
+ "grpo_mean_advantage": -5.327165126800537e-07,
379
+ "grpo_mean_group_score": 0.5758188962936401,
380
+ "grpo_mean_kl_div": 0.0,
381
+ "grpo_std_advantage": 2.309018327650847e-06,
382
+ "learning_rate": 3.949325868510083e-06,
383
+ "loss": -0.0314,
384
+ "step": 170
385
+ },
386
+ {
387
+ "epoch": 0.7007007007007007,
388
+ "grad_norm": 0.3401262164115906,
389
+ "grpo_mean_advantage": 5.863606702405377e-07,
390
+ "grpo_mean_group_score": 0.5767683982849121,
391
+ "grpo_mean_kl_div": 0.0,
392
+ "grpo_std_advantage": 2.4449204829579685e-06,
393
+ "learning_rate": 3.881182682824534e-06,
394
+ "loss": -0.0441,
395
+ "step": 175
396
+ },
397
+ {
398
+ "epoch": 0.7207207207207207,
399
+ "grad_norm": 0.1931898146867752,
400
+ "grpo_mean_advantage": 3.2186508747145126e-07,
401
+ "grpo_mean_group_score": 0.586772084236145,
402
+ "grpo_mean_kl_div": 0.0,
403
+ "grpo_std_advantage": 2.293551688126172e-06,
404
+ "learning_rate": 3.811529194153635e-06,
405
+ "loss": -0.0162,
406
+ "step": 180
407
+ },
408
+ {
409
+ "epoch": 0.7407407407407407,
410
+ "grad_norm": 0.2537969648838043,
411
+ "grpo_mean_advantage": -4.470348358154297e-08,
412
+ "grpo_mean_group_score": 0.549396276473999,
413
+ "grpo_mean_kl_div": 0.0,
414
+ "grpo_std_advantage": 3.7067667335577426e-07,
415
+ "learning_rate": 3.7404415675646054e-06,
416
+ "loss": -0.0386,
417
+ "step": 185
418
+ },
419
+ {
420
+ "epoch": 0.7607607607607607,
421
+ "grad_norm": 0.20326584577560425,
422
+ "grpo_mean_advantage": -2.1010637851759384e-07,
423
+ "grpo_mean_group_score": 0.5798425078392029,
424
+ "grpo_mean_kl_div": 0.0,
425
+ "grpo_std_advantage": 1.1695076409523608e-06,
426
+ "learning_rate": 3.667997536333424e-06,
427
+ "loss": -0.037,
428
+ "step": 190
429
+ },
430
+ {
431
+ "epoch": 0.7807807807807807,
432
+ "grad_norm": 0.25048357248306274,
433
+ "grpo_mean_advantage": 1.765787658314366e-07,
434
+ "grpo_mean_group_score": 0.5584167838096619,
435
+ "grpo_mean_kl_div": 0.0,
436
+ "grpo_std_advantage": 2.429934738756856e-06,
437
+ "learning_rate": 3.59427631694463e-06,
438
+ "loss": -0.0292,
439
+ "step": 195
440
+ },
441
+ {
442
+ "epoch": 0.8008008008008008,
443
+ "grad_norm": 0.2687569260597229,
444
+ "grpo_mean_advantage": 1.6540289493605087e-07,
445
+ "grpo_mean_group_score": 0.5676193237304688,
446
+ "grpo_mean_kl_div": 0.0,
447
+ "grpo_std_advantage": 2.6342788714828203e-06,
448
+ "learning_rate": 3.5193585224692595e-06,
449
+ "loss": -0.0454,
450
+ "step": 200
451
+ },
452
+ {
453
+ "epoch": 0.8208208208208209,
454
+ "grad_norm": 0.22301620244979858,
455
+ "grpo_mean_advantage": -1.0944902442133753e-06,
456
+ "grpo_mean_group_score": 0.5669739842414856,
457
+ "grpo_mean_kl_div": 0.0,
458
+ "grpo_std_advantage": 5.346942998585291e-06,
459
+ "learning_rate": 3.44332607441564e-06,
460
+ "loss": -0.0423,
461
+ "step": 205
462
+ },
463
+ {
464
+ "epoch": 0.8408408408408409,
465
+ "grad_norm": 0.3040211498737335,
466
+ "grpo_mean_advantage": 2.4065374759629776e-07,
467
+ "grpo_mean_group_score": 0.5922158360481262,
468
+ "grpo_mean_kl_div": 0.0,
469
+ "grpo_std_advantage": 1.6327536513927043e-06,
470
+ "learning_rate": 3.3662621131494204e-06,
471
+ "loss": -0.0857,
472
+ "step": 210
473
+ },
474
+ {
475
+ "epoch": 0.8608608608608609,
476
+ "grad_norm": 0.27231141924858093,
477
+ "grpo_mean_advantage": -5.21540641784668e-08,
478
+ "grpo_mean_group_score": 0.5473950505256653,
479
+ "grpo_mean_kl_div": 0.0,
480
+ "grpo_std_advantage": 5.847922466273303e-07,
481
+ "learning_rate": 3.2882509069808044e-06,
482
+ "loss": -0.0278,
483
+ "step": 215
484
+ },
485
+ {
486
+ "epoch": 0.8808808808808809,
487
+ "grad_norm": 0.3571636378765106,
488
+ "grpo_mean_advantage": 6.541609991472797e-07,
489
+ "grpo_mean_group_score": 0.5880032777786255,
490
+ "grpo_mean_kl_div": 0.0,
491
+ "grpo_std_advantage": 4.072162937518442e-06,
492
+ "learning_rate": 3.2093777600183873e-06,
493
+ "loss": -0.0727,
494
+ "step": 220
495
+ },
496
+ {
497
+ "epoch": 0.9009009009009009,
498
+ "grad_norm": 0.306273490190506,
499
+ "grpo_mean_advantage": -1.2218951894737984e-07,
500
+ "grpo_mean_group_score": 0.5835092663764954,
501
+ "grpo_mean_kl_div": 0.0,
502
+ "grpo_std_advantage": 4.386006935419573e-07,
503
+ "learning_rate": 3.1297289188903705e-06,
504
+ "loss": -0.0464,
505
+ "step": 225
506
+ },
507
+ {
508
+ "epoch": 0.9209209209209209,
509
+ "grad_norm": 0.2700377106666565,
510
+ "grpo_mean_advantage": 1.7605722177904681e-06,
511
+ "grpo_mean_group_score": 0.5394966006278992,
512
+ "grpo_mean_kl_div": 0.0,
513
+ "grpo_std_advantage": 8.007580618141219e-06,
514
+ "learning_rate": 3.049391478435133e-06,
515
+ "loss": -0.0295,
516
+ "step": 230
517
+ },
518
+ {
519
+ "epoch": 0.9409409409409409,
520
+ "grad_norm": 0.39531761407852173,
521
+ "grpo_mean_advantage": -3.3080578987210174e-07,
522
+ "grpo_mean_group_score": 0.5687432289123535,
523
+ "grpo_mean_kl_div": 0.0,
524
+ "grpo_std_advantage": 1.551636614749441e-06,
525
+ "learning_rate": 2.9684532864643123e-06,
526
+ "loss": -0.031,
527
+ "step": 235
528
+ },
529
+ {
530
+ "epoch": 0.960960960960961,
531
+ "grad_norm": 0.5987040996551514,
532
+ "grpo_mean_advantage": 2.712011450967111e-07,
533
+ "grpo_mean_group_score": 0.5550583600997925,
534
+ "grpo_mean_kl_div": 0.0,
535
+ "grpo_std_advantage": 1.4400844747797237e-06,
536
+ "learning_rate": 2.887002847702504e-06,
537
+ "loss": -0.0789,
538
+ "step": 240
539
+ },
540
+ {
541
+ "epoch": 0.980980980980981,
542
+ "grad_norm": 0.5680716037750244,
543
+ "grpo_mean_advantage": -3.2857059295565705e-07,
544
+ "grpo_mean_group_score": 0.558111310005188,
545
+ "grpo_mean_kl_div": 0.0,
546
+ "grpo_std_advantage": 2.105091425619321e-06,
547
+ "learning_rate": 2.8051292270086506e-06,
548
+ "loss": -0.1131,
549
+ "step": 245
550
+ },
551
+ {
552
+ "epoch": 1.0,
553
+ "grad_norm": 0.6204046010971069,
554
+ "grpo_mean_advantage": 4.470348358154297e-08,
555
+ "grpo_mean_group_score": 0.6196198463439941,
556
+ "grpo_mean_kl_div": 0.0,
557
+ "grpo_std_advantage": 5.315724820320611e-07,
558
+ "learning_rate": 2.722921951984927e-06,
559
+ "loss": -0.2232,
560
+ "step": 250
561
+ },
562
+ {
563
+ "epoch": 1.02002002002002,
564
+ "grad_norm": 0.8389026522636414,
565
+ "grpo_mean_advantage": 9.290873776990338e-07,
566
+ "grpo_mean_group_score": 0.582168459892273,
567
+ "grpo_mean_kl_div": 0.0,
568
+ "grpo_std_advantage": 4.219644324621186e-06,
569
+ "learning_rate": 2.640470915079614e-06,
570
+ "loss": -0.1363,
571
+ "step": 255
572
+ },
573
+ {
574
+ "epoch": 1.04004004004004,
575
+ "grad_norm": 0.9067686796188354,
576
+ "grpo_mean_advantage": 2.533197474008375e-08,
577
+ "grpo_mean_group_score": 0.5551307797431946,
578
+ "grpo_mean_kl_div": 0.0,
579
+ "grpo_std_advantage": 1.6600588992332632e-07,
580
+ "learning_rate": 2.557866275291035e-06,
581
+ "loss": -0.1868,
582
+ "step": 260
583
+ },
584
+ {
585
+ "epoch": 1.06006006006006,
586
+ "grad_norm": 0.9277902841567993,
587
+ "grpo_mean_advantage": -5.662441182607836e-08,
588
+ "grpo_mean_group_score": 0.535040020942688,
589
+ "grpo_mean_kl_div": 0.0,
590
+ "grpo_std_advantage": 1.0909400316450046e-06,
591
+ "learning_rate": 2.4751983595800093e-06,
592
+ "loss": -0.1792,
593
+ "step": 265
594
+ },
595
+ {
596
+ "epoch": 1.08008008008008,
597
+ "grad_norm": 1.0715463161468506,
598
+ "grpo_mean_advantage": -9.536743306171047e-08,
599
+ "grpo_mean_group_score": 0.5673571825027466,
600
+ "grpo_mean_kl_div": 0.0,
601
+ "grpo_std_advantage": 5.838213610331877e-07,
602
+ "learning_rate": 2.392557564098649e-06,
603
+ "loss": -0.1691,
604
+ "step": 270
605
+ },
606
+ {
607
+ "epoch": 1.1001001001001,
608
+ "grad_norm": 0.7759184837341309,
609
+ "grpo_mean_advantage": 3.278255533700758e-08,
610
+ "grpo_mean_group_score": 0.5874732732772827,
611
+ "grpo_mean_kl_div": 0.0,
612
+ "grpo_std_advantage": 9.317170679423725e-07,
613
+ "learning_rate": 2.3100342553434924e-06,
614
+ "loss": -0.1655,
615
+ "step": 275
616
+ },
617
+ {
618
+ "epoch": 1.12012012012012,
619
+ "grad_norm": 0.9387398958206177,
620
+ "grpo_mean_advantage": -1.206994113545079e-07,
621
+ "grpo_mean_group_score": 0.5569106340408325,
622
+ "grpo_mean_kl_div": 0.0,
623
+ "grpo_std_advantage": 6.201085511747806e-07,
624
+ "learning_rate": 2.2277186713410688e-06,
625
+ "loss": -0.1821,
626
+ "step": 280
627
+ },
628
+ {
629
+ "epoch": 1.14014014014014,
630
+ "grad_norm": 1.6132302284240723,
631
+ "grpo_mean_advantage": 4.470348358154297e-08,
632
+ "grpo_mean_group_score": 0.5578873157501221,
633
+ "grpo_mean_kl_div": 0.0,
634
+ "grpo_std_advantage": 6.115651558502577e-07,
635
+ "learning_rate": 2.1457008229739395e-06,
636
+ "loss": -0.2102,
637
+ "step": 285
638
+ },
639
+ {
640
+ "epoch": 1.16016016016016,
641
+ "grad_norm": 0.8679026961326599,
642
+ "grpo_mean_advantage": -3.3453108017056365e-07,
643
+ "grpo_mean_group_score": 0.5735999345779419,
644
+ "grpo_mean_kl_div": 0.0,
645
+ "grpo_std_advantage": 3.5326345368957845e-06,
646
+ "learning_rate": 2.0640703955551214e-06,
647
+ "loss": -0.2937,
648
+ "step": 290
649
+ },
650
+ {
651
+ "epoch": 1.1801801801801801,
652
+ "grad_norm": 1.0550166368484497,
653
+ "grpo_mean_advantage": -1.110136480519941e-07,
654
+ "grpo_mean_group_score": 0.5626259446144104,
655
+ "grpo_mean_kl_div": 0.0,
656
+ "grpo_std_advantage": 4.731904823529476e-07,
657
+ "learning_rate": 1.9829166507585084e-06,
658
+ "loss": -0.2598,
659
+ "step": 295
660
+ },
661
+ {
662
+ "epoch": 1.2002002002002001,
663
+ "grad_norm": 1.2819372415542603,
664
+ "grpo_mean_advantage": -5.08874677507265e-07,
665
+ "grpo_mean_group_score": 0.5463050603866577,
666
+ "grpo_mean_kl_div": 0.0,
667
+ "grpo_std_advantage": 1.840126174101897e-06,
668
+ "learning_rate": 1.90232832901255e-06,
669
+ "loss": -0.2546,
670
+ "step": 300
671
+ },
672
+ {
673
+ "epoch": 1.2202202202202201,
674
+ "grad_norm": 1.0188143253326416,
675
+ "grpo_mean_advantage": 1.01327898960335e-07,
676
+ "grpo_mean_group_score": 0.5352144241333008,
677
+ "grpo_mean_kl_div": 0.0,
678
+ "grpo_std_advantage": 7.798533943059738e-07,
679
+ "learning_rate": 1.82239355246389e-06,
680
+ "loss": -0.1809,
681
+ "step": 305
682
+ },
683
+ {
684
+ "epoch": 1.2402402402402402,
685
+ "grad_norm": 2.0709052085876465,
686
+ "grpo_mean_advantage": 1.341104507446289e-07,
687
+ "grpo_mean_group_score": 0.5547868013381958,
688
+ "grpo_mean_kl_div": 0.0,
689
+ "grpo_std_advantage": 7.821902840987605e-07,
690
+ "learning_rate": 1.7431997286170923e-06,
691
+ "loss": -0.3559,
692
+ "step": 310
693
+ },
694
+ {
695
+ "epoch": 1.2602602602602602,
696
+ "grad_norm": 1.8516215085983276,
697
+ "grpo_mean_advantage": 9.015202806494926e-08,
698
+ "grpo_mean_group_score": 0.5859472751617432,
699
+ "grpo_mean_kl_div": 0.0,
700
+ "grpo_std_advantage": 1.0693488547985908e-06,
701
+ "learning_rate": 1.6648334547558227e-06,
702
+ "loss": -0.3874,
703
+ "step": 315
704
+ },
705
+ {
706
+ "epoch": 1.2802802802802802,
707
+ "grad_norm": 1.283104419708252,
708
+ "grpo_mean_advantage": -2.443790378947597e-07,
709
+ "grpo_mean_group_score": 0.5751550793647766,
710
+ "grpo_mean_kl_div": 0.0,
711
+ "grpo_std_advantage": 1.183122208203713e-06,
712
+ "learning_rate": 1.5873804232499862e-06,
713
+ "loss": -0.3467,
714
+ "step": 320
715
+ },
716
+ {
717
+ "epoch": 1.3003003003003002,
718
+ "grad_norm": 1.4108576774597168,
719
+ "grpo_mean_advantage": -6.705522537231445e-08,
720
+ "grpo_mean_group_score": 0.5497723817825317,
721
+ "grpo_mean_kl_div": 0.0,
722
+ "grpo_std_advantage": 6.109748937888071e-07,
723
+ "learning_rate": 1.51092532785238e-06,
724
+ "loss": -0.1703,
725
+ "step": 325
726
+ },
727
+ {
728
+ "epoch": 1.3203203203203202,
729
+ "grad_norm": 1.0421361923217773,
730
+ "grpo_mean_advantage": -1.639127766850379e-08,
731
+ "grpo_mean_group_score": 0.55989670753479,
732
+ "grpo_mean_kl_div": 0.0,
733
+ "grpo_std_advantage": 5.529495297196263e-07,
734
+ "learning_rate": 1.4355517710873184e-06,
735
+ "loss": -0.2918,
736
+ "step": 330
737
+ },
738
+ {
739
+ "epoch": 1.3403403403403402,
740
+ "grad_norm": 1.3465828895568848,
741
+ "grpo_mean_advantage": 4.418194237132411e-07,
742
+ "grpo_mean_group_score": 0.5809233784675598,
743
+ "grpo_mean_kl_div": 0.0,
744
+ "grpo_std_advantage": 2.9275292945385445e-06,
745
+ "learning_rate": 1.361342172832502e-06,
746
+ "loss": -0.3069,
747
+ "step": 335
748
+ },
749
+ {
750
+ "epoch": 1.3603603603603602,
751
+ "grad_norm": 1.1959459781646729,
752
+ "grpo_mean_advantage": 9.685754776000977e-08,
753
+ "grpo_mean_group_score": 0.5568087100982666,
754
+ "grpo_mean_kl_div": 0.0,
755
+ "grpo_std_advantage": 3.754235251562932e-07,
756
+ "learning_rate": 1.2883776801940884e-06,
757
+ "loss": -0.5594,
758
+ "step": 340
759
+ },
760
+ {
761
+ "epoch": 1.3803803803803802,
762
+ "grad_norm": 1.8967422246932983,
763
+ "grpo_mean_advantage": -2.384185791015625e-07,
764
+ "grpo_mean_group_score": 0.5655568838119507,
765
+ "grpo_mean_kl_div": 0.0,
766
+ "grpo_std_advantage": 6.821086913078034e-07,
767
+ "learning_rate": 1.216738078773522e-06,
768
+ "loss": -0.4102,
769
+ "step": 345
770
+ },
771
+ {
772
+ "epoch": 1.4004004004004005,
773
+ "grad_norm": 2.221132755279541,
774
+ "grpo_mean_advantage": -8.717179156292332e-08,
775
+ "grpo_mean_group_score": 0.6089578866958618,
776
+ "grpo_mean_kl_div": 0.0,
777
+ "grpo_std_advantage": 2.500940354366321e-06,
778
+ "learning_rate": 1.146501705423155e-06,
779
+ "loss": -0.338,
780
+ "step": 350
781
+ },
782
+ {
783
+ "epoch": 1.4204204204204205,
784
+ "grad_norm": 2.3640377521514893,
785
+ "grpo_mean_advantage": 2.1606683731079102e-07,
786
+ "grpo_mean_group_score": 0.6129671335220337,
787
+ "grpo_mean_kl_div": 0.0,
788
+ "grpo_std_advantage": 1.4568390724889468e-06,
789
+ "learning_rate": 1.0777453625860474e-06,
790
+ "loss": -0.4985,
791
+ "step": 355
792
+ },
793
+ {
794
+ "epoch": 1.4404404404404405,
795
+ "grad_norm": 1.9084734916687012,
796
+ "grpo_mean_advantage": -3.725290298461914e-09,
797
+ "grpo_mean_group_score": 0.5562310814857483,
798
+ "grpo_mean_kl_div": 0.0,
799
+ "grpo_std_advantage": 2.965894054796081e-06,
800
+ "learning_rate": 1.0105442343136184e-06,
801
+ "loss": -0.4347,
802
+ "step": 360
803
+ },
804
+ {
805
+ "epoch": 1.4604604604604605,
806
+ "grad_norm": 1.6063904762268066,
807
+ "grpo_mean_advantage": 4.313886279305734e-07,
808
+ "grpo_mean_group_score": 0.5884170532226562,
809
+ "grpo_mean_kl_div": 0.0,
810
+ "grpo_std_advantage": 1.9621948013082147e-06,
811
+ "learning_rate": 9.449718040529987e-07,
812
+ "loss": -0.6217,
813
+ "step": 365
814
+ },
815
+ {
816
+ "epoch": 1.4804804804804805,
817
+ "grad_norm": 2.114664077758789,
818
+ "grpo_mean_advantage": 2.0489096641540527e-07,
819
+ "grpo_mean_group_score": 0.5795440673828125,
820
+ "grpo_mean_kl_div": 0.0,
821
+ "grpo_std_advantage": 1.0235522722723545e-06,
822
+ "learning_rate": 8.810997742939531e-07,
823
+ "loss": -0.5364,
824
+ "step": 370
825
+ },
826
+ {
827
+ "epoch": 1.5005005005005005,
828
+ "grad_norm": 1.8450465202331543,
829
+ "grpo_mean_advantage": -1.4185905001795618e-06,
830
+ "grpo_mean_group_score": 0.5607603788375854,
831
+ "grpo_mean_kl_div": 0.0,
832
+ "grpo_std_advantage": 1.0947338523692451e-05,
833
+ "learning_rate": 8.189979881632634e-07,
834
+ "loss": -0.4798,
835
+ "step": 375
836
+ },
837
+ {
838
+ "epoch": 1.5205205205205206,
839
+ "grad_norm": 2.673438787460327,
840
+ "grpo_mean_advantage": -1.758337049295733e-07,
841
+ "grpo_mean_group_score": 0.5381432771682739,
842
+ "grpo_mean_kl_div": 0.0,
843
+ "grpo_std_advantage": 9.663675655247062e-07,
844
+ "learning_rate": 7.587343530522945e-07,
845
+ "loss": -0.4805,
846
+ "step": 380
847
+ },
848
+ {
849
+ "epoch": 1.5405405405405406,
850
+ "grad_norm": 2.2263550758361816,
851
+ "grpo_mean_advantage": -6.973743325033865e-07,
852
+ "grpo_mean_group_score": 0.5528443455696106,
853
+ "grpo_mean_kl_div": 0.0,
854
+ "grpo_std_advantage": 4.341973180999048e-06,
855
+ "learning_rate": 7.003747663612581e-07,
856
+ "loss": -0.433,
857
+ "step": 385
858
+ },
859
+ {
860
+ "epoch": 1.5605605605605606,
861
+ "grad_norm": 2.3657093048095703,
862
+ "grpo_mean_advantage": 1.7881394143159923e-08,
863
+ "grpo_mean_group_score": 0.6091476678848267,
864
+ "grpo_mean_kl_div": 0.0,
865
+ "grpo_std_advantage": 1.3004198251564958e-07,
866
+ "learning_rate": 6.439830434413754e-07,
867
+ "loss": -0.6021,
868
+ "step": 390
869
+ },
870
+ {
871
+ "epoch": 1.5805805805805806,
872
+ "grad_norm": 1.9847129583358765,
873
+ "grpo_mean_advantage": 3.4868716625169327e-07,
874
+ "grpo_mean_group_score": 0.5397372245788574,
875
+ "grpo_mean_kl_div": 0.0,
876
+ "grpo_std_advantage": 2.059372718576924e-06,
877
+ "learning_rate": 5.896208478137222e-07,
878
+ "loss": -0.5595,
879
+ "step": 395
880
+ },
881
+ {
882
+ "epoch": 1.6006006006006006,
883
+ "grad_norm": 2.922114133834839,
884
+ "grpo_mean_advantage": -2.1636485598719446e-06,
885
+ "grpo_mean_group_score": 0.5873125195503235,
886
+ "grpo_mean_kl_div": 0.0,
887
+ "grpo_std_advantage": 9.725940799398813e-06,
888
+ "learning_rate": 5.373476237410808e-07,
889
+ "loss": -0.5592,
890
+ "step": 400
891
+ },
892
+ {
893
+ "epoch": 1.6206206206206206,
894
+ "grad_norm": 1.8524045944213867,
895
+ "grpo_mean_advantage": -5.960464477539063e-08,
896
+ "grpo_mean_group_score": 0.5601426362991333,
897
+ "grpo_mean_kl_div": 0.0,
898
+ "grpo_std_advantage": 3.460792754594877e-07,
899
+ "learning_rate": 4.872205312265074e-07,
900
+ "loss": -0.5623,
901
+ "step": 405
902
+ },
903
+ {
904
+ "epoch": 1.6406406406406406,
905
+ "grad_norm": 1.7269790172576904,
906
+ "grpo_mean_advantage": 2.6226044269606064e-07,
907
+ "grpo_mean_group_score": 0.578656792640686,
908
+ "grpo_mean_kl_div": 0.0,
909
+ "grpo_std_advantage": 7.928817922220333e-07,
910
+ "learning_rate": 4.3929438350970687e-07,
911
+ "loss": -0.5943,
912
+ "step": 410
913
+ },
914
+ {
915
+ "epoch": 1.6606606606606606,
916
+ "grad_norm": 2.26530122756958,
917
+ "grpo_mean_advantage": 2.3558736756967846e-06,
918
+ "grpo_mean_group_score": 0.5885810852050781,
919
+ "grpo_mean_kl_div": 0.0,
920
+ "grpo_std_advantage": 1.4469559573626611e-05,
921
+ "learning_rate": 3.936215871295634e-07,
922
+ "loss": -0.6193,
923
+ "step": 415
924
+ },
925
+ {
926
+ "epoch": 1.6806806806806807,
927
+ "grad_norm": 2.6794464588165283,
928
+ "grpo_mean_advantage": 1.639127766850379e-08,
929
+ "grpo_mean_group_score": 0.5805023312568665,
930
+ "grpo_mean_kl_div": 0.0,
931
+ "grpo_std_advantage": 9.352411325380672e-07,
932
+ "learning_rate": 3.502520846183577e-07,
933
+ "loss": -0.6934,
934
+ "step": 420
935
+ },
936
+ {
937
+ "epoch": 1.7007007007007007,
938
+ "grad_norm": 2.100447654724121,
939
+ "grpo_mean_advantage": 3.2387674764322583e-06,
940
+ "grpo_mean_group_score": 0.5655918121337891,
941
+ "grpo_mean_kl_div": 0.0,
942
+ "grpo_std_advantage": 1.999079904635437e-05,
943
+ "learning_rate": 3.092332998903416e-07,
944
+ "loss": -0.5126,
945
+ "step": 425
946
+ },
947
+ {
948
+ "epoch": 1.7207207207207207,
949
+ "grad_norm": 2.1027915477752686,
950
+ "grpo_mean_advantage": 3.5464762504489045e-07,
951
+ "grpo_mean_group_score": 0.5474504232406616,
952
+ "grpo_mean_kl_div": 0.0,
953
+ "grpo_std_advantage": 1.7663603557593888e-06,
954
+ "learning_rate": 2.706100863843822e-07,
955
+ "loss": -0.5446,
956
+ "step": 430
957
+ },
958
+ {
959
+ "epoch": 1.7407407407407407,
960
+ "grad_norm": 2.289045572280884,
961
+ "grpo_mean_advantage": 3.6135315895080566e-07,
962
+ "grpo_mean_group_score": 0.5874254703521729,
963
+ "grpo_mean_kl_div": 0.0,
964
+ "grpo_std_advantage": 2.356920958845876e-06,
965
+ "learning_rate": 2.3442467801738867e-07,
966
+ "loss": -0.5125,
967
+ "step": 435
968
+ },
969
+ {
970
+ "epoch": 1.7607607607607607,
971
+ "grad_norm": 2.278038501739502,
972
+ "grpo_mean_advantage": 2.7567148563889532e-08,
973
+ "grpo_mean_group_score": 0.5815118551254272,
974
+ "grpo_mean_kl_div": 0.0,
975
+ "grpo_std_advantage": 9.97340521280421e-07,
976
+ "learning_rate": 2.007166430021415e-07,
977
+ "loss": -0.595,
978
+ "step": 440
979
+ },
980
+ {
981
+ "epoch": 1.7807807807807807,
982
+ "grad_norm": 2.340942621231079,
983
+ "grpo_mean_advantage": -8.34465012644614e-08,
984
+ "grpo_mean_group_score": 0.5611211061477661,
985
+ "grpo_mean_kl_div": 0.0,
986
+ "grpo_std_advantage": 5.558832185670326e-07,
987
+ "learning_rate": 1.6952284058003366e-07,
988
+ "loss": -0.8055,
989
+ "step": 445
990
+ },
991
+ {
992
+ "epoch": 1.800800800800801,
993
+ "grad_norm": 2.4256298542022705,
994
+ "grpo_mean_advantage": -1.9818544672034477e-07,
995
+ "grpo_mean_group_score": 0.590424656867981,
996
+ "grpo_mean_kl_div": 0.0,
997
+ "grpo_std_advantage": 6.800727305744658e-07,
998
+ "learning_rate": 1.4087738071603075e-07,
999
+ "loss": -0.8561,
1000
+ "step": 450
1001
+ },
1002
+ {
1003
+ "epoch": 1.820820820820821,
1004
+ "grad_norm": 1.6453255414962769,
1005
+ "grpo_mean_advantage": -1.9371508841459217e-08,
1006
+ "grpo_mean_group_score": 0.5670351982116699,
1007
+ "grpo_mean_kl_div": 0.0,
1008
+ "grpo_std_advantage": 3.142378943721269e-07,
1009
+ "learning_rate": 1.1481158679992554e-07,
1010
+ "loss": -0.429,
1011
+ "step": 455
1012
+ },
1013
+ {
1014
+ "epoch": 1.840840840840841,
1015
+ "grad_norm": 2.3458049297332764,
1016
+ "grpo_mean_advantage": 2.3692845729783585e-07,
1017
+ "grpo_mean_group_score": 0.5640432834625244,
1018
+ "grpo_mean_kl_div": 0.0,
1019
+ "grpo_std_advantage": 1.682946731307311e-06,
1020
+ "learning_rate": 9.135396139467151e-08,
1021
+ "loss": -0.642,
1022
+ "step": 460
1023
+ },
1024
+ {
1025
+ "epoch": 1.860860860860861,
1026
+ "grad_norm": 2.730945110321045,
1027
+ "grpo_mean_advantage": 1.110136480519941e-07,
1028
+ "grpo_mean_group_score": 0.5584251284599304,
1029
+ "grpo_mean_kl_div": 0.0,
1030
+ "grpo_std_advantage": 8.930008448260196e-07,
1031
+ "learning_rate": 7.053015506924749e-08,
1032
+ "loss": -0.583,
1033
+ "step": 465
1034
+ },
1035
+ {
1036
+ "epoch": 1.880880880880881,
1037
+ "grad_norm": 2.1463465690612793,
1038
+ "grpo_mean_advantage": 2.5406478698641877e-07,
1039
+ "grpo_mean_group_score": 0.5392154455184937,
1040
+ "grpo_mean_kl_div": 0.0,
1041
+ "grpo_std_advantage": 9.93092498902115e-07,
1042
+ "learning_rate": 5.236293835013839e-08,
1043
+ "loss": -0.5197,
1044
+ "step": 470
1045
+ },
1046
+ {
1047
+ "epoch": 1.900900900900901,
1048
+ "grad_norm": 2.427900791168213,
1049
+ "grpo_mean_advantage": -8.940697071579962e-09,
1050
+ "grpo_mean_group_score": 0.5686308741569519,
1051
+ "grpo_mean_kl_div": 0.0,
1052
+ "grpo_std_advantage": 5.835169645251881e-07,
1053
+ "learning_rate": 3.687217682209837e-08,
1054
+ "loss": -0.5864,
1055
+ "step": 475
1056
+ },
1057
+ {
1058
+ "epoch": 1.920920920920921,
1059
+ "grad_norm": 2.042795419692993,
1060
+ "grpo_mean_advantage": 4.0605664253234863e-07,
1061
+ "grpo_mean_group_score": 0.5842767357826233,
1062
+ "grpo_mean_kl_div": 0.0,
1063
+ "grpo_std_advantage": 2.3210795916384086e-06,
1064
+ "learning_rate": 2.4074809405425227e-08,
1065
+ "loss": -0.5721,
1066
+ "step": 480
1067
+ },
1068
+ {
1069
+ "epoch": 1.940940940940941,
1070
+ "grad_norm": 2.800136089324951,
1071
+ "grpo_mean_advantage": 1.341104507446289e-07,
1072
+ "grpo_mean_group_score": 0.5495311617851257,
1073
+ "grpo_mean_kl_div": 0.0,
1074
+ "grpo_std_advantage": 1.507950400991831e-06,
1075
+ "learning_rate": 1.3984829833499636e-08,
1076
+ "loss": -0.5944,
1077
+ "step": 485
1078
+ },
1079
+ {
1080
+ "epoch": 1.960960960960961,
1081
+ "grad_norm": 2.8475866317749023,
1082
+ "grpo_mean_advantage": 2.689659481802664e-07,
1083
+ "grpo_mean_group_score": 0.549436628818512,
1084
+ "grpo_mean_kl_div": 0.0,
1085
+ "grpo_std_advantage": 8.491958851664094e-07,
1086
+ "learning_rate": 6.6132713508446075e-09,
1087
+ "loss": -0.7015,
1088
+ "step": 490
1089
+ },
1090
+ {
1091
+ "epoch": 1.980980980980981,
1092
+ "grad_norm": 2.9422402381896973,
1093
+ "grpo_mean_advantage": 8.195638656616211e-08,
1094
+ "grpo_mean_group_score": 0.544632077217102,
1095
+ "grpo_mean_kl_div": 0.0,
1096
+ "grpo_std_advantage": 3.802849732892355e-06,
1097
+ "learning_rate": 1.9681946484320645e-09,
1098
+ "loss": -0.4033,
1099
+ "step": 495
1100
+ },
1101
+ {
1102
+ "epoch": 2.0,
1103
+ "grad_norm": 2.66204833984375,
1104
+ "grpo_mean_advantage": 7.552536089860951e-07,
1105
+ "grpo_mean_group_score": 0.5968535542488098,
1106
+ "grpo_mean_kl_div": 0.0,
1107
+ "grpo_std_advantage": 4.143997102801222e-06,
1108
+ "learning_rate": 5.467904943851077e-11,
1109
+ "loss": -0.6773,
1110
+ "step": 500
1111
+ }
1112
+ ],
1113
+ "logging_steps": 5,
1114
+ "max_steps": 500,
1115
+ "num_input_tokens_seen": 0,
1116
+ "num_train_epochs": 2,
1117
+ "save_steps": 100,
1118
+ "stateful_callbacks": {
1119
+ "TrainerControl": {
1120
+ "args": {
1121
+ "should_epoch_stop": false,
1122
+ "should_evaluate": false,
1123
+ "should_log": false,
1124
+ "should_save": true,
1125
+ "should_training_stop": true
1126
+ },
1127
+ "attributes": {}
1128
+ }
1129
+ },
1130
+ "total_flos": 0.0,
1131
+ "train_batch_size": 1,
1132
+ "trial_name": null,
1133
+ "trial_params": null
1134
+ }
grpo_qwen_14B_v2/checkpoints/checkpoint-500/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4a4e48ed61b7c96f3bd2836ac828013a311834ab8a9542ea461fe1ff953396b
3
+ size 5496
grpo_qwen_14B_v2/checkpoints/checkpoint-500/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
grpo_qwen_14B_v2/config_resolved.yaml ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ run:
2
+ run_dir: runs/grpo_qwen_14b
3
+ model:
4
+ repo_id: /workspace/Models/Qwen2.5-Coder-14B-CPT-SFT_v2
5
+ tokenizer_name: Qwen/Qwen2.5-Coder-14B
6
+ load_in_8bit: false
7
+ load_in_4bit: false
8
+ torch_dtype: bfloat16
9
+ device_map: auto
10
+ trust_remote_code: true
11
+ wandb:
12
+ enabled: true
13
+ project: rl-training
14
+ entity: null
15
+ name: null
16
+ tags:
17
+ - grpo-lora
18
+ - 14B-QWEN
19
+ notes: null
20
+ data:
21
+ train_jsonl: grpo_dataset.jsonl
22
+ eval_jsonl: null
23
+ eval_split_ratio: 0.0
24
+ shuffle: true
25
+ num_proc: 1
26
+ prompt_field: prompt
27
+ completions_field: completions
28
+ scores_field: scores
29
+ format_type: raw
30
+ max_length: 2048
31
+ min_completions: 2
32
+ system_prompt: "You are a Hyperswitch Rust code analyzer. Identify functions/structs\
33
+ \ that need modification for a given task.\n\n## Output Format\n\n##OUTPUT\nExplain\
34
+ \ the data flow and why each component must change:\n- Flow: [Input \u2192 Processing\
35
+ \ \u2192 Output with arrows]\n- For each component: \"The [ComponentName] ([path])\
36
+ \ must [action] because [reason]\u2014without this, [consequence]\"\n- Explain\
37
+ \ coupling between components\n\n##SELECT\nmodify::crates/path/to/file.rs::impl::ComponentName\n\
38
+ add::crates/another/file.rs::function::AnotherComponent\n<EOS>\n\n## Rules\n\n\
39
+ 1. Use full paths: `remove::crates/folder/file.rs::Type::Name`\n2. Use `::` for\
40
+ \ nested items: `status::StructName::Type::Name`\n3. Always explain \"must change\
41
+ \ because\" and \"without this\"\n3. Types of components: function, struct, enum,\
42
+ \ impl, trait\n4. If there is extra information (e.g., enum variants), include\
43
+ \ that too.\n5. Start with ##OUTPUT, end with ##SELECT, terminate with <EOS>\n\
44
+ \n## Example\n\n##TASK\nAdd webhook subscription support\n\n##OUTPUT\nThe webhook\
45
+ \ system routes events via EventClass enum. Flow: webhook \u2192 EventClass \u2192\
46
+ \ handler \u2192 processing. The EventClass enum (crates/common_enums/src/enums.rs::EventClass)\
47
+ \ must add Subscriptions variant because it defines event routing\u2014without\
48
+ \ this, subscription events cannot be processed. The SubscriptionStatus impl (crates/common_enums/src/transformers.rs::SubscriptionStatus)\
49
+ \ must map to EventType because it converts status to events\u2014without this,\
50
+ \ status changes don't trigger webhooks. These are coupled: EventClass routes\
51
+ \ to handlers that use SubscriptionStatus mappings.\n\n##SELECT\ncrates/common_enums/src/enums.rs::EventClass\n\
52
+ crates/common_enums/src/transformers.rs::SubscriptionStatus\n<EOS>\n"
53
+ custom_template: '##INSTRUCTION
54
+
55
+ {instruction}<|im_end|>
56
+
57
+ {input}<|im_end|>
58
+
59
+ {output}<|im_end|>'
60
+ grpo:
61
+ group_size: 4
62
+ kl_coef: 0.05
63
+ normalize_advantages: true
64
+ reward_scaling: 1.0
65
+ reward_bias: 0.0
66
+ reward_clip: 5.0
67
+ advantage_temperature: 1.0
68
+ use_reference_model: false
69
+ seed: 42
70
+ peft:
71
+ enabled: true
72
+ r: 16
73
+ lora_alpha: 32
74
+ lora_dropout: 0.05
75
+ target_modules:
76
+ - q_proj
77
+ - k_proj
78
+ - v_proj
79
+ - o_proj
80
+ - gate_proj
81
+ - up_proj
82
+ - down_proj
83
+ bias: none
84
+ task_type: CAUSAL_LM
85
+ train:
86
+ output_dir: runs/grpo_14b_run1
87
+ num_train_epochs: 2
88
+ per_device_train_batch_size: 1
89
+ gradient_accumulation_steps: 8
90
+ per_device_eval_batch_size: 1
91
+ learning_rate: 5.0e-06
92
+ weight_decay: 0.01
93
+ warmup_ratio: 0.05
94
+ lr_scheduler_type: cosine
95
+ fp16: false
96
+ bf16: true
97
+ max_grad_norm: 1.0
98
+ gradient_checkpointing: true
99
+ logging_steps: 5
100
+ save_steps: 100
101
+ save_total_limit: 2
102
+ evaluation_strategy: 'no'
103
+ dataloader_num_workers: 4
104
+ dataloader_pin_memory: true
105
+ remove_unused_columns: false
106
+ report_to: []
107
+ seed: 42
108
+ ddp_find_unused_parameters: false
109
+ merge:
110
+ enabled: true
111
+ merged_dtype: float16
112
+ max_shard_size: 2GB
113
+ output_dir: ./Models/Qwen-Coder-14B-HS-CPT-SFT-v2-GRPO
114
+ upload:
115
+ enabled: false
grpo_qwen_14B_v2/logs/grpo_metrics.jsonl ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"ts": "2025-12-27T19:40:44", "step": 5, "epoch": 0.02, "grpo_mean_advantage": -1.3560057254835556e-07, "grpo_std_advantage": 3.0318567496578908e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5922331809997559}
2
+ {"ts": "2025-12-27T19:46:55", "step": 5, "epoch": 0.02, "grpo_mean_advantage": -1.3560057254835556e-07, "grpo_std_advantage": 3.0318567496578908e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5922331809997559}
3
+ {"ts": "2025-12-27T19:48:18", "step": 10, "epoch": 0.04, "grpo_mean_advantage": 3.6619603633880615e-06, "grpo_std_advantage": 1.6246918676188216e-05, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5561589002609253}
4
+ {"ts": "2025-12-27T19:49:40", "step": 15, "epoch": 0.0601, "grpo_mean_advantage": -1.0654330395709621e-07, "grpo_std_advantage": 5.399440965447866e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5759152173995972}
5
+ {"ts": "2025-12-27T19:51:01", "step": 20, "epoch": 0.0801, "grpo_mean_advantage": -5.871057737749652e-07, "grpo_std_advantage": 2.6951597646984737e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5127314329147339}
6
+ {"ts": "2025-12-27T19:52:23", "step": 25, "epoch": 0.1001, "grpo_mean_advantage": 6.370246410369873e-07, "grpo_std_advantage": 2.8908377771585947e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.539706826210022}
7
+ {"ts": "2025-12-27T19:53:48", "step": 30, "epoch": 0.1201, "grpo_mean_advantage": 6.705522359595761e-09, "grpo_std_advantage": 6.189450800775376e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5812538862228394}
8
+ {"ts": "2025-12-27T19:55:13", "step": 35, "epoch": 0.1401, "grpo_mean_advantage": 3.859400692363124e-07, "grpo_std_advantage": 1.6833292875162442e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5909844636917114}
9
+ {"ts": "2025-12-27T19:56:38", "step": 40, "epoch": 0.1602, "grpo_mean_advantage": 2.600252742013254e-07, "grpo_std_advantage": 1.4095899132371414e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5630953907966614}
10
+ {"ts": "2025-12-27T19:58:00", "step": 45, "epoch": 0.1802, "grpo_mean_advantage": -1.2591480924584175e-07, "grpo_std_advantage": 1.0309080380466185e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5604403614997864}
11
+ {"ts": "2025-12-27T19:59:27", "step": 50, "epoch": 0.2002, "grpo_mean_advantage": -2.808868941883702e-07, "grpo_std_advantage": 1.5696078889959608e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5971035957336426}
12
+ {"ts": "2025-12-27T20:00:53", "step": 55, "epoch": 0.2202, "grpo_mean_advantage": 2.6822089438383045e-08, "grpo_std_advantage": 3.7878271541558206e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5892971754074097}
13
+ {"ts": "2025-12-27T20:02:17", "step": 60, "epoch": 0.2402, "grpo_mean_advantage": -5.662441182607836e-08, "grpo_std_advantage": 6.128998393251095e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.564322292804718}
14
+ {"ts": "2025-12-27T20:03:45", "step": 65, "epoch": 0.2603, "grpo_mean_advantage": -1.5944242193199898e-07, "grpo_std_advantage": 1.6374274309782777e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.562497615814209}
15
+ {"ts": "2025-12-27T20:05:10", "step": 70, "epoch": 0.2803, "grpo_mean_advantage": 1.6838312433264946e-07, "grpo_std_advantage": 8.536571272088622e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5904761552810669}
16
+ {"ts": "2025-12-27T20:06:36", "step": 75, "epoch": 0.3003, "grpo_mean_advantage": 1.1175870895385742e-07, "grpo_std_advantage": 6.451961667153228e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5765624046325684}
17
+ {"ts": "2025-12-27T20:08:03", "step": 80, "epoch": 0.3203, "grpo_mean_advantage": -1.4603138254187797e-07, "grpo_std_advantage": 1.1309343790344428e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5858271718025208}
18
+ {"ts": "2025-12-27T20:09:31", "step": 85, "epoch": 0.3403, "grpo_mean_advantage": -1.817941665649414e-06, "grpo_std_advantage": 1.1141768482048064e-05, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5871662497520447}
19
+ {"ts": "2025-12-27T20:10:55", "step": 90, "epoch": 0.3604, "grpo_mean_advantage": 1.8179416372277046e-07, "grpo_std_advantage": 6.210335072864837e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5330992937088013}
20
+ {"ts": "2025-12-27T20:12:19", "step": 95, "epoch": 0.3804, "grpo_mean_advantage": -2.972781771859445e-07, "grpo_std_advantage": 3.1582342217006953e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5265295505523682}
21
+ {"ts": "2025-12-27T20:13:46", "step": 100, "epoch": 0.4004, "grpo_mean_advantage": -7.033348197182931e-07, "grpo_std_advantage": 4.245831405569334e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5660771131515503}
22
+ {"ts": "2025-12-27T20:15:15", "step": 105, "epoch": 0.4204, "grpo_mean_advantage": 1.1920928955078125e-07, "grpo_std_advantage": 3.2809634831210133e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.57631915807724}
23
+ {"ts": "2025-12-27T20:16:39", "step": 110, "epoch": 0.4404, "grpo_mean_advantage": -4.0978193283081055e-07, "grpo_std_advantage": 6.0397578636184335e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.546563982963562}
24
+ {"ts": "2025-12-27T20:18:01", "step": 115, "epoch": 0.4605, "grpo_mean_advantage": -1.467764434437413e-07, "grpo_std_advantage": 2.2689375782647403e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5519219636917114}
25
+ {"ts": "2025-12-27T20:19:25", "step": 120, "epoch": 0.4805, "grpo_mean_advantage": -5.215406329028838e-09, "grpo_std_advantage": 7.929010621410271e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5490407943725586}
26
+ {"ts": "2025-12-27T20:20:48", "step": 125, "epoch": 0.5005, "grpo_mean_advantage": -5.7369469175228005e-08, "grpo_std_advantage": 1.2823379620385822e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5646580457687378}
27
+ {"ts": "2025-12-27T20:22:09", "step": 130, "epoch": 0.5205, "grpo_mean_advantage": 2.9876827056796174e-07, "grpo_std_advantage": 1.0496698905626545e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.6111599802970886}
28
+ {"ts": "2025-12-27T20:23:32", "step": 135, "epoch": 0.5405, "grpo_mean_advantage": 1.5869736103013565e-07, "grpo_std_advantage": 1.2748531617035042e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5619662404060364}
29
+ {"ts": "2025-12-27T20:24:57", "step": 140, "epoch": 0.5606, "grpo_mean_advantage": 3.0100346748440643e-07, "grpo_std_advantage": 2.4499684059264837e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5795454978942871}
30
+ {"ts": "2025-12-27T20:26:19", "step": 145, "epoch": 0.5806, "grpo_mean_advantage": -3.233552092751779e-07, "grpo_std_advantage": 1.248456669600273e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5804953575134277}
31
+ {"ts": "2025-12-27T20:27:43", "step": 150, "epoch": 0.6006, "grpo_mean_advantage": 3.2261013416245987e-07, "grpo_std_advantage": 1.4773489738217904e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5628539323806763}
32
+ {"ts": "2025-12-27T20:29:07", "step": 155, "epoch": 0.6206, "grpo_mean_advantage": -2.5331974029541016e-07, "grpo_std_advantage": 1.5092309695319273e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5727725625038147}
33
+ {"ts": "2025-12-27T20:30:30", "step": 160, "epoch": 0.6406, "grpo_mean_advantage": -6.780028627417778e-08, "grpo_std_advantage": 8.550978805033083e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5833909511566162}
34
+ {"ts": "2025-12-27T20:31:51", "step": 165, "epoch": 0.6607, "grpo_mean_advantage": -5.587935447692871e-08, "grpo_std_advantage": 3.564579174053506e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5742615461349487}
35
+ {"ts": "2025-12-27T20:33:17", "step": 170, "epoch": 0.6807, "grpo_mean_advantage": -5.327165126800537e-07, "grpo_std_advantage": 2.309018327650847e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5758188962936401}
36
+ {"ts": "2025-12-27T20:34:40", "step": 175, "epoch": 0.7007, "grpo_mean_advantage": 5.863606702405377e-07, "grpo_std_advantage": 2.4449204829579685e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5767683982849121}
37
+ {"ts": "2025-12-27T20:36:09", "step": 180, "epoch": 0.7207, "grpo_mean_advantage": 3.2186508747145126e-07, "grpo_std_advantage": 2.293551688126172e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.586772084236145}
38
+ {"ts": "2025-12-27T20:37:33", "step": 185, "epoch": 0.7407, "grpo_mean_advantage": -4.470348358154297e-08, "grpo_std_advantage": 3.7067667335577426e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.549396276473999}
39
+ {"ts": "2025-12-27T20:38:56", "step": 190, "epoch": 0.7608, "grpo_mean_advantage": -2.1010637851759384e-07, "grpo_std_advantage": 1.1695076409523608e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5798425078392029}
40
+ {"ts": "2025-12-27T20:40:21", "step": 195, "epoch": 0.7808, "grpo_mean_advantage": 1.765787658314366e-07, "grpo_std_advantage": 2.429934738756856e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5584167838096619}
41
+ {"ts": "2025-12-27T20:41:46", "step": 200, "epoch": 0.8008, "grpo_mean_advantage": 1.6540289493605087e-07, "grpo_std_advantage": 2.6342788714828203e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5676193237304688}
42
+ {"ts": "2025-12-27T20:43:13", "step": 205, "epoch": 0.8208, "grpo_mean_advantage": -1.0944902442133753e-06, "grpo_std_advantage": 5.346942998585291e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5669739842414856}
43
+ {"ts": "2025-12-27T20:44:38", "step": 210, "epoch": 0.8408, "grpo_mean_advantage": 2.4065374759629776e-07, "grpo_std_advantage": 1.6327536513927043e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5922158360481262}
44
+ {"ts": "2025-12-27T20:46:02", "step": 215, "epoch": 0.8609, "grpo_mean_advantage": -5.21540641784668e-08, "grpo_std_advantage": 5.847922466273303e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5473950505256653}
45
+ {"ts": "2025-12-27T20:47:28", "step": 220, "epoch": 0.8809, "grpo_mean_advantage": 6.541609991472797e-07, "grpo_std_advantage": 4.072162937518442e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5880032777786255}
46
+ {"ts": "2025-12-27T20:48:53", "step": 225, "epoch": 0.9009, "grpo_mean_advantage": -1.2218951894737984e-07, "grpo_std_advantage": 4.386006935419573e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5835092663764954}
47
+ {"ts": "2025-12-27T20:50:20", "step": 230, "epoch": 0.9209, "grpo_mean_advantage": 1.7605722177904681e-06, "grpo_std_advantage": 8.007580618141219e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5394966006278992}
48
+ {"ts": "2025-12-27T20:51:41", "step": 235, "epoch": 0.9409, "grpo_mean_advantage": -3.3080578987210174e-07, "grpo_std_advantage": 1.551636614749441e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5687432289123535}
49
+ {"ts": "2025-12-27T20:53:06", "step": 240, "epoch": 0.961, "grpo_mean_advantage": 2.712011450967111e-07, "grpo_std_advantage": 1.4400844747797237e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5550583600997925}
50
+ {"ts": "2025-12-27T20:54:27", "step": 245, "epoch": 0.981, "grpo_mean_advantage": -3.2857059295565705e-07, "grpo_std_advantage": 2.105091425619321e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.558111310005188}
51
+ {"ts": "2025-12-27T20:55:45", "step": 250, "epoch": 1.0, "grpo_mean_advantage": 4.470348358154297e-08, "grpo_std_advantage": 5.315724820320611e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.6196198463439941}
52
+ {"ts": "2025-12-27T20:57:06", "step": 255, "epoch": 1.02, "grpo_mean_advantage": 9.290873776990338e-07, "grpo_std_advantage": 4.219644324621186e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.582168459892273}
53
+ {"ts": "2025-12-27T20:58:26", "step": 260, "epoch": 1.04, "grpo_mean_advantage": 2.533197474008375e-08, "grpo_std_advantage": 1.6600588992332632e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5551307797431946}
54
+ {"ts": "2025-12-27T20:59:48", "step": 265, "epoch": 1.0601, "grpo_mean_advantage": -5.662441182607836e-08, "grpo_std_advantage": 1.0909400316450046e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.535040020942688}
55
+ {"ts": "2025-12-27T21:01:15", "step": 270, "epoch": 1.0801, "grpo_mean_advantage": -9.536743306171047e-08, "grpo_std_advantage": 5.838213610331877e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5673571825027466}
56
+ {"ts": "2025-12-27T21:02:37", "step": 275, "epoch": 1.1001, "grpo_mean_advantage": 3.278255533700758e-08, "grpo_std_advantage": 9.317170679423725e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5874732732772827}
57
+ {"ts": "2025-12-27T21:03:59", "step": 280, "epoch": 1.1201, "grpo_mean_advantage": -1.206994113545079e-07, "grpo_std_advantage": 6.201085511747806e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5569106340408325}
58
+ {"ts": "2025-12-27T21:05:19", "step": 285, "epoch": 1.1401, "grpo_mean_advantage": 4.470348358154297e-08, "grpo_std_advantage": 6.115651558502577e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5578873157501221}
59
+ {"ts": "2025-12-27T21:06:43", "step": 290, "epoch": 1.1602, "grpo_mean_advantage": -3.3453108017056365e-07, "grpo_std_advantage": 3.5326345368957845e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5735999345779419}
60
+ {"ts": "2025-12-27T21:08:10", "step": 295, "epoch": 1.1802, "grpo_mean_advantage": -1.110136480519941e-07, "grpo_std_advantage": 4.731904823529476e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5626259446144104}
61
+ {"ts": "2025-12-27T21:09:34", "step": 300, "epoch": 1.2002, "grpo_mean_advantage": -5.08874677507265e-07, "grpo_std_advantage": 1.840126174101897e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5463050603866577}
62
+ {"ts": "2025-12-27T21:10:59", "step": 305, "epoch": 1.2202, "grpo_mean_advantage": 1.01327898960335e-07, "grpo_std_advantage": 7.798533943059738e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5352144241333008}
63
+ {"ts": "2025-12-27T21:12:25", "step": 310, "epoch": 1.2402, "grpo_mean_advantage": 1.341104507446289e-07, "grpo_std_advantage": 7.821902840987605e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5547868013381958}
64
+ {"ts": "2025-12-27T21:13:52", "step": 315, "epoch": 1.2603, "grpo_mean_advantage": 9.015202806494926e-08, "grpo_std_advantage": 1.0693488547985908e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5859472751617432}
65
+ {"ts": "2025-12-27T21:15:21", "step": 320, "epoch": 1.2803, "grpo_mean_advantage": -2.443790378947597e-07, "grpo_std_advantage": 1.183122208203713e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5751550793647766}
66
+ {"ts": "2025-12-27T21:16:47", "step": 325, "epoch": 1.3003, "grpo_mean_advantage": -6.705522537231445e-08, "grpo_std_advantage": 6.109748937888071e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5497723817825317}
67
+ {"ts": "2025-12-27T21:18:13", "step": 330, "epoch": 1.3203, "grpo_mean_advantage": -1.639127766850379e-08, "grpo_std_advantage": 5.529495297196263e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.55989670753479}
68
+ {"ts": "2025-12-27T21:19:37", "step": 335, "epoch": 1.3403, "grpo_mean_advantage": 4.418194237132411e-07, "grpo_std_advantage": 2.9275292945385445e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5809233784675598}
69
+ {"ts": "2025-12-27T21:20:59", "step": 340, "epoch": 1.3604, "grpo_mean_advantage": 9.685754776000977e-08, "grpo_std_advantage": 3.754235251562932e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5568087100982666}
70
+ {"ts": "2025-12-27T21:22:27", "step": 345, "epoch": 1.3804, "grpo_mean_advantage": -2.384185791015625e-07, "grpo_std_advantage": 6.821086913078034e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5655568838119507}
71
+ {"ts": "2025-12-27T21:23:49", "step": 350, "epoch": 1.4004, "grpo_mean_advantage": -8.717179156292332e-08, "grpo_std_advantage": 2.500940354366321e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.6089578866958618}
72
+ {"ts": "2025-12-27T21:25:13", "step": 355, "epoch": 1.4204, "grpo_mean_advantage": 2.1606683731079102e-07, "grpo_std_advantage": 1.4568390724889468e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.6129671335220337}
73
+ {"ts": "2025-12-27T21:26:36", "step": 360, "epoch": 1.4404, "grpo_mean_advantage": -3.725290298461914e-09, "grpo_std_advantage": 2.965894054796081e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5562310814857483}
74
+ {"ts": "2025-12-27T21:27:58", "step": 365, "epoch": 1.4605, "grpo_mean_advantage": 4.313886279305734e-07, "grpo_std_advantage": 1.9621948013082147e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5884170532226562}
75
+ {"ts": "2025-12-27T21:29:21", "step": 370, "epoch": 1.4805, "grpo_mean_advantage": 2.0489096641540527e-07, "grpo_std_advantage": 1.0235522722723545e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5795440673828125}
76
+ {"ts": "2025-12-27T21:30:45", "step": 375, "epoch": 1.5005, "grpo_mean_advantage": -1.4185905001795618e-06, "grpo_std_advantage": 1.0947338523692451e-05, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5607603788375854}
77
+ {"ts": "2025-12-27T21:32:13", "step": 380, "epoch": 1.5205, "grpo_mean_advantage": -1.758337049295733e-07, "grpo_std_advantage": 9.663675655247062e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5381432771682739}
78
+ {"ts": "2025-12-27T21:33:36", "step": 385, "epoch": 1.5405, "grpo_mean_advantage": -6.973743325033865e-07, "grpo_std_advantage": 4.341973180999048e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5528443455696106}
79
+ {"ts": "2025-12-27T21:34:59", "step": 390, "epoch": 1.5606, "grpo_mean_advantage": 1.7881394143159923e-08, "grpo_std_advantage": 1.3004198251564958e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.6091476678848267}
80
+ {"ts": "2025-12-27T21:36:21", "step": 395, "epoch": 1.5806, "grpo_mean_advantage": 3.4868716625169327e-07, "grpo_std_advantage": 2.059372718576924e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5397372245788574}
81
+ {"ts": "2025-12-27T21:37:48", "step": 400, "epoch": 1.6006, "grpo_mean_advantage": -2.1636485598719446e-06, "grpo_std_advantage": 9.725940799398813e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5873125195503235}
82
+ {"ts": "2025-12-27T21:39:11", "step": 405, "epoch": 1.6206, "grpo_mean_advantage": -5.960464477539063e-08, "grpo_std_advantage": 3.460792754594877e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5601426362991333}
83
+ {"ts": "2025-12-27T21:40:38", "step": 410, "epoch": 1.6406, "grpo_mean_advantage": 2.6226044269606064e-07, "grpo_std_advantage": 7.928817922220333e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.578656792640686}
84
+ {"ts": "2025-12-27T21:42:02", "step": 415, "epoch": 1.6607, "grpo_mean_advantage": 2.3558736756967846e-06, "grpo_std_advantage": 1.4469559573626611e-05, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5885810852050781}
85
+ {"ts": "2025-12-27T21:43:27", "step": 420, "epoch": 1.6807, "grpo_mean_advantage": 1.639127766850379e-08, "grpo_std_advantage": 9.352411325380672e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5805023312568665}
86
+ {"ts": "2025-12-27T21:44:52", "step": 425, "epoch": 1.7007, "grpo_mean_advantage": 3.2387674764322583e-06, "grpo_std_advantage": 1.999079904635437e-05, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5655918121337891}
87
+ {"ts": "2025-12-27T21:46:17", "step": 430, "epoch": 1.7207, "grpo_mean_advantage": 3.5464762504489045e-07, "grpo_std_advantage": 1.7663603557593888e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5474504232406616}
88
+ {"ts": "2025-12-27T21:47:43", "step": 435, "epoch": 1.7407, "grpo_mean_advantage": 3.6135315895080566e-07, "grpo_std_advantage": 2.356920958845876e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5874254703521729}
89
+ {"ts": "2025-12-27T21:49:08", "step": 440, "epoch": 1.7608, "grpo_mean_advantage": 2.7567148563889532e-08, "grpo_std_advantage": 9.97340521280421e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5815118551254272}
90
+ {"ts": "2025-12-27T21:50:35", "step": 445, "epoch": 1.7808, "grpo_mean_advantage": -8.34465012644614e-08, "grpo_std_advantage": 5.558832185670326e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5611211061477661}
91
+ {"ts": "2025-12-27T21:52:06", "step": 450, "epoch": 1.8008, "grpo_mean_advantage": -1.9818544672034477e-07, "grpo_std_advantage": 6.800727305744658e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.590424656867981}
92
+ {"ts": "2025-12-27T21:53:32", "step": 455, "epoch": 1.8208, "grpo_mean_advantage": -1.9371508841459217e-08, "grpo_std_advantage": 3.142378943721269e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5670351982116699}
93
+ {"ts": "2025-12-27T21:55:00", "step": 460, "epoch": 1.8408, "grpo_mean_advantage": 2.3692845729783585e-07, "grpo_std_advantage": 1.682946731307311e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5640432834625244}
94
+ {"ts": "2025-12-27T21:56:27", "step": 465, "epoch": 1.8609, "grpo_mean_advantage": 1.110136480519941e-07, "grpo_std_advantage": 8.930008448260196e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5584251284599304}
95
+ {"ts": "2025-12-27T21:57:50", "step": 470, "epoch": 1.8809, "grpo_mean_advantage": 2.5406478698641877e-07, "grpo_std_advantage": 9.93092498902115e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5392154455184937}
96
+ {"ts": "2025-12-27T21:59:15", "step": 475, "epoch": 1.9009, "grpo_mean_advantage": -8.940697071579962e-09, "grpo_std_advantage": 5.835169645251881e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5686308741569519}
97
+ {"ts": "2025-12-27T22:00:36", "step": 480, "epoch": 1.9209, "grpo_mean_advantage": 4.0605664253234863e-07, "grpo_std_advantage": 2.3210795916384086e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5842767357826233}
98
+ {"ts": "2025-12-27T22:01:58", "step": 485, "epoch": 1.9409, "grpo_mean_advantage": 1.341104507446289e-07, "grpo_std_advantage": 1.507950400991831e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5495311617851257}
99
+ {"ts": "2025-12-27T22:03:20", "step": 490, "epoch": 1.961, "grpo_mean_advantage": 2.689659481802664e-07, "grpo_std_advantage": 8.491958851664094e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.549436628818512}
100
+ {"ts": "2025-12-27T22:04:42", "step": 495, "epoch": 1.981, "grpo_mean_advantage": 8.195638656616211e-08, "grpo_std_advantage": 3.802849732892355e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.544632077217102}
101
+ {"ts": "2025-12-27T22:06:01", "step": 500, "epoch": 2.0, "grpo_mean_advantage": 7.552536089860951e-07, "grpo_std_advantage": 4.143997102801222e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5968535542488098}
grpo_qwen_14B_v2/logs/train.jsonl ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"ts": "2025-12-27T19:40:44", "event": "train_log", "step": 5, "epoch": 0.02002002002002002, "progress_pct": 1.0, "epoch_pct": 1.0, "eta": "02:23:23", "max_grad_norm": 1.0, "loss": 0.007, "grad_norm": 0.06052486598491669, "learning_rate": 8.000000000000001e-07, "grpo_mean_advantage": -1.3560057254835556e-07, "grpo_std_advantage": 3.0318567496578908e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5922331809997559}
2
+ {"ts": "2025-12-27T19:46:55", "event": "train_log", "step": 5, "epoch": 0.02002002002002002, "progress_pct": 1.0, "epoch_pct": 1.0, "eta": "02:16:53", "max_grad_norm": 1.0, "loss": 0.007, "grad_norm": 0.05460292845964432, "learning_rate": 8.000000000000001e-07, "grpo_mean_advantage": -1.3560057254835556e-07, "grpo_std_advantage": 3.0318567496578908e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5922331809997559}
3
+ {"ts": "2025-12-27T19:48:18", "event": "train_log", "step": 10, "epoch": 0.04004004004004004, "progress_pct": 2.0, "epoch_pct": 2.0, "eta": "02:15:24", "max_grad_norm": 1.0, "loss": 0.0107, "grad_norm": 0.0679207444190979, "learning_rate": 1.8000000000000001e-06, "grpo_mean_advantage": 3.6619603633880615e-06, "grpo_std_advantage": 1.6246918676188216e-05, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5561589002609253}
4
+ {"ts": "2025-12-27T19:49:40", "event": "train_log", "step": 15, "epoch": 0.06006006006006006, "progress_pct": 3.0, "epoch_pct": 3.0, "eta": "02:13:25", "max_grad_norm": 1.0, "loss": 0.007, "grad_norm": 0.05788416787981987, "learning_rate": 2.8000000000000003e-06, "grpo_mean_advantage": -1.0654330395709621e-07, "grpo_std_advantage": 5.399440965447866e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5759152173995972}
5
+ {"ts": "2025-12-27T19:51:01", "event": "train_log", "step": 20, "epoch": 0.08008008008008008, "progress_pct": 4.0, "epoch_pct": 4.0, "eta": "02:11:33", "max_grad_norm": 1.0, "loss": 0.0246, "grad_norm": 0.0746568813920021, "learning_rate": 3.8000000000000005e-06, "grpo_mean_advantage": -5.871057737749652e-07, "grpo_std_advantage": 2.6951597646984737e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5127314329147339}
6
+ {"ts": "2025-12-27T19:52:23", "event": "train_log", "step": 25, "epoch": 0.1001001001001001, "progress_pct": 5.0, "epoch_pct": 5.01, "eta": "02:10:02", "max_grad_norm": 1.0, "loss": 0.0337, "grad_norm": 0.11442846059799194, "learning_rate": 4.800000000000001e-06, "grpo_mean_advantage": 6.370246410369873e-07, "grpo_std_advantage": 2.8908377771585947e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.539706826210022}
7
+ {"ts": "2025-12-27T19:53:48", "event": "train_log", "step": 30, "epoch": 0.12012012012012012, "progress_pct": 6.0, "epoch_pct": 6.01, "eta": "02:09:24", "max_grad_norm": 1.0, "loss": 0.0171, "grad_norm": 0.05778791010379791, "learning_rate": 4.999125183044924e-06, "grpo_mean_advantage": 6.705522359595761e-09, "grpo_std_advantage": 6.189450800775376e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5812538862228394}
8
+ {"ts": "2025-12-27T19:55:13", "event": "train_log", "step": 35, "epoch": 0.14014014014014015, "progress_pct": 7.0, "epoch_pct": 7.01, "eta": "02:08:36", "max_grad_norm": 1.0, "loss": 0.0145, "grad_norm": 0.05819695070385933, "learning_rate": 4.995572288443412e-06, "grpo_mean_advantage": 3.859400692363124e-07, "grpo_std_advantage": 1.6833292875162442e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5909844636917114}
9
+ {"ts": "2025-12-27T19:56:38", "event": "train_log", "step": 40, "epoch": 0.16016016016016016, "progress_pct": 8.0, "epoch_pct": 8.01, "eta": "02:07:30", "max_grad_norm": 1.0, "loss": 0.0196, "grad_norm": 0.07968433201313019, "learning_rate": 4.98929052218411e-06, "grpo_mean_advantage": 2.600252742013254e-07, "grpo_std_advantage": 1.4095899132371414e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5630953907966614}
10
+ {"ts": "2025-12-27T19:58:00", "event": "train_log", "step": 45, "epoch": 0.18018018018018017, "progress_pct": 9.0, "epoch_pct": 9.01, "eta": "02:06:04", "max_grad_norm": 1.0, "loss": 0.0186, "grad_norm": 0.0733402892947197, "learning_rate": 4.980286753286196e-06, "grpo_mean_advantage": -1.2591480924584175e-07, "grpo_std_advantage": 1.0309080380466185e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5604403614997864}
11
+ {"ts": "2025-12-27T19:59:27", "event": "train_log", "step": 50, "epoch": 0.2002002002002002, "progress_pct": 10.0, "epoch_pct": 10.01, "eta": "02:05:12", "max_grad_norm": 1.0, "loss": 0.0286, "grad_norm": 0.07136482000350952, "learning_rate": 4.9685708272387645e-06, "grpo_mean_advantage": -2.808868941883702e-07, "grpo_std_advantage": 1.5696078889959608e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5971035957336426}
12
+ {"ts": "2025-12-27T20:00:53", "event": "train_log", "step": 55, "epoch": 0.22022022022022023, "progress_pct": 11.0, "epoch_pct": 11.01, "eta": "02:04:07", "max_grad_norm": 1.0, "loss": 0.0054, "grad_norm": 0.08851475268602371, "learning_rate": 4.9541555552349404e-06, "grpo_mean_advantage": 2.6822089438383045e-08, "grpo_std_advantage": 3.7878271541558206e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5892971754074097}
13
+ {"ts": "2025-12-27T20:02:17", "event": "train_log", "step": 60, "epoch": 0.24024024024024024, "progress_pct": 12.0, "epoch_pct": 12.01, "eta": "02:02:51", "max_grad_norm": 1.0, "loss": -0.0074, "grad_norm": 0.07778509706258774, "learning_rate": 4.9370567001630155e-06, "grpo_mean_advantage": -5.662441182607836e-08, "grpo_std_advantage": 6.128998393251095e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.564322292804718}
14
+ {"ts": "2025-12-27T20:03:45", "event": "train_log", "step": 65, "epoch": 0.2602602602602603, "progress_pct": 13.0, "epoch_pct": 13.01, "eta": "02:01:55", "max_grad_norm": 1.0, "loss": 0.0145, "grad_norm": 0.08740051090717316, "learning_rate": 4.917292959369968e-06, "grpo_mean_advantage": -1.5944242193199898e-07, "grpo_std_advantage": 1.6374274309782777e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.562497615814209}
15
+ {"ts": "2025-12-27T20:05:10", "event": "train_log", "step": 70, "epoch": 0.2802802802802803, "progress_pct": 14.0, "epoch_pct": 14.01, "eta": "02:00:32", "max_grad_norm": 1.0, "loss": 0.0257, "grad_norm": 0.19070060551166534, "learning_rate": 4.8948859442161876e-06, "grpo_mean_advantage": 1.6838312433264946e-07, "grpo_std_advantage": 8.536571272088622e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5904761552810669}
16
+ {"ts": "2025-12-27T20:06:36", "event": "train_log", "step": 75, "epoch": 0.3003003003003003, "progress_pct": 15.0, "epoch_pct": 15.02, "eta": "01:59:23", "max_grad_norm": 1.0, "loss": 0.0024, "grad_norm": 0.07321271300315857, "learning_rate": 4.869860156443768e-06, "grpo_mean_advantage": 1.1175870895385742e-07, "grpo_std_advantage": 6.451961667153228e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5765624046325684}
17
+ {"ts": "2025-12-27T20:08:03", "event": "train_log", "step": 80, "epoch": 0.3203203203203203, "progress_pct": 16.0, "epoch_pct": 16.02, "eta": "01:58:12", "max_grad_norm": 1.0, "loss": 0.0277, "grad_norm": 0.07126748561859131, "learning_rate": 4.842242961384211e-06, "grpo_mean_advantage": -1.4603138254187797e-07, "grpo_std_advantage": 1.1309343790344428e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5858271718025208}
18
+ {"ts": "2025-12-27T20:09:31", "event": "train_log", "step": 85, "epoch": 0.34034034034034033, "progress_pct": 17.0, "epoch_pct": 17.02, "eta": "01:57:04", "max_grad_norm": 1.0, "loss": 0.0246, "grad_norm": 0.08629189431667328, "learning_rate": 4.812064558034847e-06, "grpo_mean_advantage": -1.817941665649414e-06, "grpo_std_advantage": 1.1141768482048064e-05, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5871662497520447}
19
+ {"ts": "2025-12-27T20:10:55", "event": "train_log", "step": 90, "epoch": 0.36036036036036034, "progress_pct": 18.0, "epoch_pct": 18.02, "eta": "01:55:38", "max_grad_norm": 1.0, "loss": 0.0056, "grad_norm": 0.0998779758810997, "learning_rate": 4.779357946036662e-06, "grpo_mean_advantage": 1.8179416372277046e-07, "grpo_std_advantage": 6.210335072864837e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5330992937088013}
20
+ {"ts": "2025-12-27T20:12:19", "event": "train_log", "step": 95, "epoch": 0.38038038038038036, "progress_pct": 19.0, "epoch_pct": 19.02, "eta": "01:54:09", "max_grad_norm": 1.0, "loss": 0.0053, "grad_norm": 0.10614689439535141, "learning_rate": 4.74415888958968e-06, "grpo_mean_advantage": -2.972781771859445e-07, "grpo_std_advantage": 3.1582342217006953e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5265295505523682}
21
+ {"ts": "2025-12-27T20:13:46", "event": "train_log", "step": 100, "epoch": 0.4004004004004004, "progress_pct": 20.0, "epoch_pct": 20.02, "eta": "01:52:55", "max_grad_norm": 1.0, "loss": 0.0134, "grad_norm": 0.10345634073019028, "learning_rate": 4.706505878345343e-06, "grpo_mean_advantage": -7.033348197182931e-07, "grpo_std_advantage": 4.245831405569334e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5660771131515503}
22
+ {"ts": "2025-12-27T20:15:15", "event": "train_log", "step": 105, "epoch": 0.42042042042042044, "progress_pct": 21.0, "epoch_pct": 21.02, "eta": "01:51:45", "max_grad_norm": 1.0, "loss": 0.0004, "grad_norm": 0.10077933222055435, "learning_rate": 4.666440085318626e-06, "grpo_mean_advantage": 1.1920928955078125e-07, "grpo_std_advantage": 3.2809634831210133e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.57631915807724}
23
+ {"ts": "2025-12-27T20:16:39", "event": "train_log", "step": 110, "epoch": 0.44044044044044045, "progress_pct": 22.0, "epoch_pct": 22.02, "eta": "01:50:19", "max_grad_norm": 1.0, "loss": 0.0033, "grad_norm": 0.09548182785511017, "learning_rate": 4.624005321865968e-06, "grpo_mean_advantage": -4.0978193283081055e-07, "grpo_std_advantage": 6.0397578636184335e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.546563982963562}
24
+ {"ts": "2025-12-27T20:18:01", "event": "train_log", "step": 115, "epoch": 0.46046046046046046, "progress_pct": 23.0, "epoch_pct": 23.02, "eta": "01:48:43", "max_grad_norm": 1.0, "loss": 0.0095, "grad_norm": 0.09417816251516342, "learning_rate": 4.57924798977818e-06, "grpo_mean_advantage": -1.467764434437413e-07, "grpo_std_advantage": 2.2689375782647403e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5519219636917114}
25
+ {"ts": "2025-12-27T20:19:25", "event": "train_log", "step": 120, "epoch": 0.4804804804804805, "progress_pct": 24.0, "epoch_pct": 24.02, "eta": "01:47:18", "max_grad_norm": 1.0, "loss": 0.0006, "grad_norm": 0.10022275149822235, "learning_rate": 4.532217030540781e-06, "grpo_mean_advantage": -5.215406329028838e-09, "grpo_std_advantage": 7.929010621410271e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5490407943725586}
26
+ {"ts": "2025-12-27T20:20:48", "event": "train_log", "step": 125, "epoch": 0.5005005005005005, "progress_pct": 25.0, "epoch_pct": 25.03, "eta": "01:45:46", "max_grad_norm": 1.0, "loss": -0.0046, "grad_norm": 0.14057794213294983, "learning_rate": 4.482963871817195e-06, "grpo_mean_advantage": -5.7369469175228005e-08, "grpo_std_advantage": 1.2823379620385822e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5646580457687378}
27
+ {"ts": "2025-12-27T20:22:09", "event": "train_log", "step": 130, "epoch": 0.5205205205205206, "progress_pct": 26.0, "epoch_pct": 26.03, "eta": "01:44:12", "max_grad_norm": 1.0, "loss": -0.003, "grad_norm": 0.12420658767223358, "learning_rate": 4.4315423712133595e-06, "grpo_mean_advantage": 2.9876827056796174e-07, "grpo_std_advantage": 1.0496698905626545e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.6111599802970886}
28
+ {"ts": "2025-12-27T20:23:32", "event": "train_log", "step": 135, "epoch": 0.5405405405405406, "progress_pct": 27.0, "epoch_pct": 27.03, "eta": "01:42:43", "max_grad_norm": 1.0, "loss": 0.0154, "grad_norm": 0.14342808723449707, "learning_rate": 4.378008757385222e-06, "grpo_mean_advantage": 1.5869736103013565e-07, "grpo_std_advantage": 1.2748531617035042e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5619662404060364}
29
+ {"ts": "2025-12-27T20:24:57", "event": "train_log", "step": 140, "epoch": 0.5605605605605606, "progress_pct": 28.0, "epoch_pct": 28.03, "eta": "01:41:21", "max_grad_norm": 1.0, "loss": -0.0262, "grad_norm": 0.14729444682598114, "learning_rate": 4.322421568553529e-06, "grpo_mean_advantage": 3.0100346748440643e-07, "grpo_std_advantage": 2.4499684059264837e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5795454978942871}
30
+ {"ts": "2025-12-27T20:26:19", "event": "train_log", "step": 145, "epoch": 0.5805805805805806, "progress_pct": 29.0, "epoch_pct": 29.03, "eta": "01:39:51", "max_grad_norm": 1.0, "loss": 0.0018, "grad_norm": 0.15249410271644592, "learning_rate": 4.2648415884931476e-06, "grpo_mean_advantage": -3.233552092751779e-07, "grpo_std_advantage": 1.248456669600273e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5804953575134277}
31
+ {"ts": "2025-12-27T20:27:43", "event": "train_log", "step": 150, "epoch": 0.6006006006006006, "progress_pct": 30.0, "epoch_pct": 30.03, "eta": "01:38:25", "max_grad_norm": 1.0, "loss": -0.017, "grad_norm": 0.1841023564338684, "learning_rate": 4.205331780066892e-06, "grpo_mean_advantage": 3.2261013416245987e-07, "grpo_std_advantage": 1.4773489738217904e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5628539323806763}
32
+ {"ts": "2025-12-27T20:29:07", "event": "train_log", "step": 155, "epoch": 0.6206206206206206, "progress_pct": 31.0, "epoch_pct": 31.03, "eta": "01:37:00", "max_grad_norm": 1.0, "loss": 0.0044, "grad_norm": 0.18597163259983063, "learning_rate": 4.1439572163765615e-06, "grpo_mean_advantage": -2.5331974029541016e-07, "grpo_std_advantage": 1.5092309695319273e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5727725625038147}
33
+ {"ts": "2025-12-27T20:30:30", "event": "train_log", "step": 160, "epoch": 0.6406406406406406, "progress_pct": 32.0, "epoch_pct": 32.03, "eta": "01:35:33", "max_grad_norm": 1.0, "loss": -0.005, "grad_norm": 0.18310388922691345, "learning_rate": 4.0807850096064605e-06, "grpo_mean_advantage": -6.780028627417778e-08, "grpo_std_advantage": 8.550978805033083e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5833909511566162}
34
+ {"ts": "2025-12-27T20:31:51", "event": "train_log", "step": 165, "epoch": 0.6606606606606606, "progress_pct": 33.0, "epoch_pct": 33.03, "eta": "01:34:01", "max_grad_norm": 1.0, "loss": -0.015, "grad_norm": 0.2192923128604889, "learning_rate": 4.015884237637206e-06, "grpo_mean_advantage": -5.587935447692871e-08, "grpo_std_advantage": 3.564579174053506e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5742615461349487}
35
+ {"ts": "2025-12-27T20:33:17", "event": "train_log", "step": 170, "epoch": 0.6806806806806807, "progress_pct": 34.0, "epoch_pct": 34.03, "eta": "01:32:41", "max_grad_norm": 1.0, "loss": -0.0314, "grad_norm": 0.16708803176879883, "learning_rate": 3.949325868510083e-06, "grpo_mean_advantage": -5.327165126800537e-07, "grpo_std_advantage": 2.309018327650847e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5758188962936401}
36
+ {"ts": "2025-12-27T20:34:40", "event": "train_log", "step": 175, "epoch": 0.7007007007007007, "progress_pct": 35.0, "epoch_pct": 35.04, "eta": "01:31:14", "max_grad_norm": 1.0, "loss": -0.0441, "grad_norm": 0.3401262164115906, "learning_rate": 3.881182682824534e-06, "grpo_mean_advantage": 5.863606702405377e-07, "grpo_std_advantage": 2.4449204829579685e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5767683982849121}
37
+ {"ts": "2025-12-27T20:36:09", "event": "train_log", "step": 180, "epoch": 0.7207207207207207, "progress_pct": 36.0, "epoch_pct": 36.04, "eta": "01:29:58", "max_grad_norm": 1.0, "loss": -0.0162, "grad_norm": 0.1931898146867752, "learning_rate": 3.811529194153635e-06, "grpo_mean_advantage": 3.2186508747145126e-07, "grpo_std_advantage": 2.293551688126172e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.586772084236145}
38
+ {"ts": "2025-12-27T20:37:33", "event": "train_log", "step": 185, "epoch": 0.7407407407407407, "progress_pct": 37.0, "epoch_pct": 37.04, "eta": "01:28:33", "max_grad_norm": 1.0, "loss": -0.0386, "grad_norm": 0.2537969648838043, "learning_rate": 3.7404415675646054e-06, "grpo_mean_advantage": -4.470348358154297e-08, "grpo_std_advantage": 3.7067667335577426e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.549396276473999}
39
+ {"ts": "2025-12-27T20:38:56", "event": "train_log", "step": 190, "epoch": 0.7607607607607607, "progress_pct": 38.0, "epoch_pct": 38.04, "eta": "01:27:06", "max_grad_norm": 1.0, "loss": -0.037, "grad_norm": 0.20326584577560425, "learning_rate": 3.667997536333424e-06, "grpo_mean_advantage": -2.1010637851759384e-07, "grpo_std_advantage": 1.1695076409523608e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5798425078392029}
40
+ {"ts": "2025-12-27T20:40:21", "event": "train_log", "step": 195, "epoch": 0.7807807807807807, "progress_pct": 39.0, "epoch_pct": 39.04, "eta": "01:25:43", "max_grad_norm": 1.0, "loss": -0.0292, "grad_norm": 0.25048357248306274, "learning_rate": 3.59427631694463e-06, "grpo_mean_advantage": 1.765787658314366e-07, "grpo_std_advantage": 2.429934738756856e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5584167838096619}
41
+ {"ts": "2025-12-27T20:41:46", "event": "train_log", "step": 200, "epoch": 0.8008008008008008, "progress_pct": 40.0, "epoch_pct": 40.04, "eta": "01:24:20", "max_grad_norm": 1.0, "loss": -0.0454, "grad_norm": 0.2687569260597229, "learning_rate": 3.5193585224692595e-06, "grpo_mean_advantage": 1.6540289493605087e-07, "grpo_std_advantage": 2.6342788714828203e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5676193237304688}
42
+ {"ts": "2025-12-27T20:43:13", "event": "train_log", "step": 205, "epoch": 0.8208208208208209, "progress_pct": 41.0, "epoch_pct": 41.04, "eta": "01:22:59", "max_grad_norm": 1.0, "loss": -0.0423, "grad_norm": 0.22301620244979858, "learning_rate": 3.44332607441564e-06, "grpo_mean_advantage": -1.0944902442133753e-06, "grpo_std_advantage": 5.346942998585291e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5669739842414856}
43
+ {"ts": "2025-12-27T20:44:38", "event": "train_log", "step": 210, "epoch": 0.8408408408408409, "progress_pct": 42.0, "epoch_pct": 42.04, "eta": "01:21:37", "max_grad_norm": 1.0, "loss": -0.0857, "grad_norm": 0.3040211498737335, "learning_rate": 3.3662621131494204e-06, "grpo_mean_advantage": 2.4065374759629776e-07, "grpo_std_advantage": 1.6327536513927043e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5922158360481262}
44
+ {"ts": "2025-12-27T20:46:02", "event": "train_log", "step": 215, "epoch": 0.8608608608608609, "progress_pct": 43.0, "epoch_pct": 43.04, "eta": "01:20:11", "max_grad_norm": 1.0, "loss": -0.0278, "grad_norm": 0.27231141924858093, "learning_rate": 3.2882509069808044e-06, "grpo_mean_advantage": -5.21540641784668e-08, "grpo_std_advantage": 5.847922466273303e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5473950505256653}
45
+ {"ts": "2025-12-27T20:47:28", "event": "train_log", "step": 220, "epoch": 0.8808808808808809, "progress_pct": 44.0, "epoch_pct": 44.04, "eta": "01:18:49", "max_grad_norm": 1.0, "loss": -0.0727, "grad_norm": 0.3571636378765106, "learning_rate": 3.2093777600183873e-06, "grpo_mean_advantage": 6.541609991472797e-07, "grpo_std_advantage": 4.072162937518442e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5880032777786255}
46
+ {"ts": "2025-12-27T20:48:53", "event": "train_log", "step": 225, "epoch": 0.9009009009009009, "progress_pct": 45.0, "epoch_pct": 45.05, "eta": "01:17:25", "max_grad_norm": 1.0, "loss": -0.0464, "grad_norm": 0.306273490190506, "learning_rate": 3.1297289188903705e-06, "grpo_mean_advantage": -1.2218951894737984e-07, "grpo_std_advantage": 4.386006935419573e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5835092663764954}
47
+ {"ts": "2025-12-27T20:50:20", "event": "train_log", "step": 230, "epoch": 0.9209209209209209, "progress_pct": 46.0, "epoch_pct": 46.05, "eta": "01:16:03", "max_grad_norm": 1.0, "loss": -0.0295, "grad_norm": 0.2700377106666565, "learning_rate": 3.049391478435133e-06, "grpo_mean_advantage": 1.7605722177904681e-06, "grpo_std_advantage": 8.007580618141219e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5394966006278992}
48
+ {"ts": "2025-12-27T20:51:41", "event": "train_log", "step": 235, "epoch": 0.9409409409409409, "progress_pct": 47.0, "epoch_pct": 47.05, "eta": "01:14:35", "max_grad_norm": 1.0, "loss": -0.031, "grad_norm": 0.39531761407852173, "learning_rate": 2.9684532864643123e-06, "grpo_mean_advantage": -3.3080578987210174e-07, "grpo_std_advantage": 1.551636614749441e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5687432289123535}
49
+ {"ts": "2025-12-27T20:53:06", "event": "train_log", "step": 240, "epoch": 0.960960960960961, "progress_pct": 48.0, "epoch_pct": 48.05, "eta": "01:13:11", "max_grad_norm": 1.0, "loss": -0.0789, "grad_norm": 0.5987040996551514, "learning_rate": 2.887002847702504e-06, "grpo_mean_advantage": 2.712011450967111e-07, "grpo_std_advantage": 1.4400844747797237e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5550583600997925}
50
+ {"ts": "2025-12-27T20:54:27", "event": "train_log", "step": 245, "epoch": 0.980980980980981, "progress_pct": 49.0, "epoch_pct": 49.05, "eta": "01:11:42", "max_grad_norm": 1.0, "loss": -0.1131, "grad_norm": 0.5680716037750244, "learning_rate": 2.8051292270086506e-06, "grpo_mean_advantage": -3.2857059295565705e-07, "grpo_std_advantage": 2.105091425619321e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.558111310005188}
51
+ {"ts": "2025-12-27T20:55:45", "event": "train_log", "step": 250, "epoch": 1.0, "progress_pct": 50.0, "epoch_pct": 50.0, "eta": "01:10:13", "max_grad_norm": 1.0, "loss": -0.2232, "grad_norm": 0.6204046010971069, "learning_rate": 2.722921951984927e-06, "grpo_mean_advantage": 4.470348358154297e-08, "grpo_std_advantage": 5.315724820320611e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.6196198463439941}
52
+ {"ts": "2025-12-27T20:57:06", "event": "train_log", "step": 255, "epoch": 1.02002002002002, "progress_pct": 51.0, "epoch_pct": 51.0, "eta": "01:08:45", "max_grad_norm": 1.0, "loss": -0.1363, "grad_norm": 0.8389026522636414, "learning_rate": 2.640470915079614e-06, "grpo_mean_advantage": 9.290873776990338e-07, "grpo_std_advantage": 4.219644324621186e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.582168459892273}
53
+ {"ts": "2025-12-27T20:58:26", "event": "train_log", "step": 260, "epoch": 1.04004004004004, "progress_pct": 52.0, "epoch_pct": 52.0, "eta": "01:07:17", "max_grad_norm": 1.0, "loss": -0.1868, "grad_norm": 0.9067686796188354, "learning_rate": 2.557866275291035e-06, "grpo_mean_advantage": 2.533197474008375e-08, "grpo_std_advantage": 1.6600588992332632e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5551307797431946}
54
+ {"ts": "2025-12-27T20:59:48", "event": "train_log", "step": 265, "epoch": 1.06006006006006, "progress_pct": 53.0, "epoch_pct": 53.0, "eta": "01:05:51", "max_grad_norm": 1.0, "loss": -0.1792, "grad_norm": 0.9277902841567993, "learning_rate": 2.4751983595800093e-06, "grpo_mean_advantage": -5.662441182607836e-08, "grpo_std_advantage": 1.0909400316450046e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.535040020942688}
55
+ {"ts": "2025-12-27T21:01:15", "event": "train_log", "step": 270, "epoch": 1.08008008008008, "progress_pct": 54.0, "epoch_pct": 54.0, "eta": "01:04:29", "max_grad_norm": 1.0, "loss": -0.1691, "grad_norm": 1.0715463161468506, "learning_rate": 2.392557564098649e-06, "grpo_mean_advantage": -9.536743306171047e-08, "grpo_std_advantage": 5.838213610331877e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5673571825027466}
56
+ {"ts": "2025-12-27T21:02:37", "event": "train_log", "step": 275, "epoch": 1.1001001001001, "progress_pct": 55.0, "epoch_pct": 55.01, "eta": "01:03:03", "max_grad_norm": 1.0, "loss": -0.1655, "grad_norm": 0.7759184837341309, "learning_rate": 2.3100342553434924e-06, "grpo_mean_advantage": 3.278255533700758e-08, "grpo_std_advantage": 9.317170679423725e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5874732732772827}
57
+ {"ts": "2025-12-27T21:03:59", "event": "train_log", "step": 280, "epoch": 1.12012012012012, "progress_pct": 56.0, "epoch_pct": 56.01, "eta": "01:01:38", "max_grad_norm": 1.0, "loss": -0.1821, "grad_norm": 0.9387398958206177, "learning_rate": 2.2277186713410688e-06, "grpo_mean_advantage": -1.206994113545079e-07, "grpo_std_advantage": 6.201085511747806e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5569106340408325}
58
+ {"ts": "2025-12-27T21:05:19", "event": "train_log", "step": 285, "epoch": 1.14014014014014, "progress_pct": 57.0, "epoch_pct": 57.01, "eta": "01:00:10", "max_grad_norm": 1.0, "loss": -0.2102, "grad_norm": 1.6132302284240723, "learning_rate": 2.1457008229739395e-06, "grpo_mean_advantage": 4.470348358154297e-08, "grpo_std_advantage": 6.115651558502577e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5578873157501221}
59
+ {"ts": "2025-12-27T21:06:43", "event": "train_log", "step": 290, "epoch": 1.16016016016016, "progress_pct": 58.0, "epoch_pct": 58.01, "eta": "00:58:47", "max_grad_norm": 1.0, "loss": -0.2937, "grad_norm": 0.8679026961326599, "learning_rate": 2.0640703955551214e-06, "grpo_mean_advantage": -3.3453108017056365e-07, "grpo_std_advantage": 3.5326345368957845e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5735999345779419}
60
+ {"ts": "2025-12-27T21:08:10", "event": "train_log", "step": 295, "epoch": 1.1801801801801801, "progress_pct": 59.0, "epoch_pct": 59.01, "eta": "00:57:24", "max_grad_norm": 1.0, "loss": -0.2598, "grad_norm": 1.0550166368484497, "learning_rate": 1.9829166507585084e-06, "grpo_mean_advantage": -1.110136480519941e-07, "grpo_std_advantage": 4.731904823529476e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5626259446144104}
61
+ {"ts": "2025-12-27T21:09:34", "event": "train_log", "step": 300, "epoch": 1.2002002002002001, "progress_pct": 60.0, "epoch_pct": 60.01, "eta": "00:56:00", "max_grad_norm": 1.0, "loss": -0.2546, "grad_norm": 1.2819372415542603, "learning_rate": 1.90232832901255e-06, "grpo_mean_advantage": -5.08874677507265e-07, "grpo_std_advantage": 1.840126174101897e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5463050603866577}
62
+ {"ts": "2025-12-27T21:10:59", "event": "train_log", "step": 305, "epoch": 1.2202202202202201, "progress_pct": 61.0, "epoch_pct": 61.01, "eta": "00:54:37", "max_grad_norm": 1.0, "loss": -0.1809, "grad_norm": 1.0188143253326416, "learning_rate": 1.82239355246389e-06, "grpo_mean_advantage": 1.01327898960335e-07, "grpo_std_advantage": 7.798533943059738e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5352144241333008}
63
+ {"ts": "2025-12-27T21:12:25", "event": "train_log", "step": 310, "epoch": 1.2402402402402402, "progress_pct": 62.0, "epoch_pct": 62.01, "eta": "00:53:14", "max_grad_norm": 1.0, "loss": -0.3559, "grad_norm": 2.0709052085876465, "learning_rate": 1.7431997286170923e-06, "grpo_mean_advantage": 1.341104507446289e-07, "grpo_std_advantage": 7.821902840987605e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5547868013381958}
64
+ {"ts": "2025-12-27T21:13:52", "event": "train_log", "step": 315, "epoch": 1.2602602602602602, "progress_pct": 63.0, "epoch_pct": 63.01, "eta": "00:51:52", "max_grad_norm": 1.0, "loss": -0.3874, "grad_norm": 1.8516215085983276, "learning_rate": 1.6648334547558227e-06, "grpo_mean_advantage": 9.015202806494926e-08, "grpo_std_advantage": 1.0693488547985908e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5859472751617432}
65
+ {"ts": "2025-12-27T21:15:21", "event": "train_log", "step": 320, "epoch": 1.2802802802802802, "progress_pct": 64.0, "epoch_pct": 64.01, "eta": "00:50:31", "max_grad_norm": 1.0, "loss": -0.3467, "grad_norm": 1.283104419708252, "learning_rate": 1.5873804232499862e-06, "grpo_mean_advantage": -2.443790378947597e-07, "grpo_std_advantage": 1.183122208203713e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5751550793647766}
66
+ {"ts": "2025-12-27T21:16:47", "event": "train_log", "step": 325, "epoch": 1.3003003003003002, "progress_pct": 65.0, "epoch_pct": 65.02, "eta": "00:49:07", "max_grad_norm": 1.0, "loss": -0.1703, "grad_norm": 1.4108576774597168, "learning_rate": 1.51092532785238e-06, "grpo_mean_advantage": -6.705522537231445e-08, "grpo_std_advantage": 6.109748937888071e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5497723817825317}
67
+ {"ts": "2025-12-27T21:18:13", "event": "train_log", "step": 330, "epoch": 1.3203203203203202, "progress_pct": 66.0, "epoch_pct": 66.02, "eta": "00:47:44", "max_grad_norm": 1.0, "loss": -0.2918, "grad_norm": 1.0421361923217773, "learning_rate": 1.4355517710873184e-06, "grpo_mean_advantage": -1.639127766850379e-08, "grpo_std_advantage": 5.529495297196263e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.55989670753479}
68
+ {"ts": "2025-12-27T21:19:37", "event": "train_log", "step": 335, "epoch": 1.3403403403403402, "progress_pct": 67.0, "epoch_pct": 67.02, "eta": "00:46:20", "max_grad_norm": 1.0, "loss": -0.3069, "grad_norm": 1.3465828895568848, "learning_rate": 1.361342172832502e-06, "grpo_mean_advantage": 4.418194237132411e-07, "grpo_std_advantage": 2.9275292945385445e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5809233784675598}
69
+ {"ts": "2025-12-27T21:20:59", "event": "train_log", "step": 340, "epoch": 1.3603603603603602, "progress_pct": 68.0, "epoch_pct": 68.02, "eta": "00:44:55", "max_grad_norm": 1.0, "loss": -0.5594, "grad_norm": 1.1959459781646729, "learning_rate": 1.2883776801940884e-06, "grpo_mean_advantage": 9.685754776000977e-08, "grpo_std_advantage": 3.754235251562932e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5568087100982666}
70
+ {"ts": "2025-12-27T21:22:27", "event": "train_log", "step": 345, "epoch": 1.3803803803803802, "progress_pct": 69.0, "epoch_pct": 69.02, "eta": "00:43:32", "max_grad_norm": 1.0, "loss": -0.4102, "grad_norm": 1.8967422246932983, "learning_rate": 1.216738078773522e-06, "grpo_mean_advantage": -2.384185791015625e-07, "grpo_std_advantage": 6.821086913078034e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5655568838119507}
71
+ {"ts": "2025-12-27T21:23:49", "event": "train_log", "step": 350, "epoch": 1.4004004004004005, "progress_pct": 70.0, "epoch_pct": 70.02, "eta": "00:42:06", "max_grad_norm": 1.0, "loss": -0.338, "grad_norm": 2.221132755279541, "learning_rate": 1.146501705423155e-06, "grpo_mean_advantage": -8.717179156292332e-08, "grpo_std_advantage": 2.500940354366321e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.6089578866958618}
72
+ {"ts": "2025-12-27T21:25:13", "event": "train_log", "step": 355, "epoch": 1.4204204204204205, "progress_pct": 71.0, "epoch_pct": 71.02, "eta": "00:40:42", "max_grad_norm": 1.0, "loss": -0.4985, "grad_norm": 2.3640377521514893, "learning_rate": 1.0777453625860474e-06, "grpo_mean_advantage": 2.1606683731079102e-07, "grpo_std_advantage": 1.4568390724889468e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.6129671335220337}
73
+ {"ts": "2025-12-27T21:26:36", "event": "train_log", "step": 360, "epoch": 1.4404404404404405, "progress_pct": 72.0, "epoch_pct": 72.02, "eta": "00:39:18", "max_grad_norm": 1.0, "loss": -0.4347, "grad_norm": 1.9084734916687012, "learning_rate": 1.0105442343136184e-06, "grpo_mean_advantage": -3.725290298461914e-09, "grpo_std_advantage": 2.965894054796081e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5562310814857483}
74
+ {"ts": "2025-12-27T21:27:58", "event": "train_log", "step": 365, "epoch": 1.4604604604604605, "progress_pct": 73.0, "epoch_pct": 73.02, "eta": "00:37:52", "max_grad_norm": 1.0, "loss": -0.6217, "grad_norm": 1.6063904762268066, "learning_rate": 9.449718040529987e-07, "grpo_mean_advantage": 4.313886279305734e-07, "grpo_std_advantage": 1.9621948013082147e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5884170532226562}
75
+ {"ts": "2025-12-27T21:29:21", "event": "train_log", "step": 370, "epoch": 1.4804804804804805, "progress_pct": 74.0, "epoch_pct": 74.02, "eta": "00:36:28", "max_grad_norm": 1.0, "loss": -0.5364, "grad_norm": 2.114664077758789, "learning_rate": 8.810997742939531e-07, "grpo_mean_advantage": 2.0489096641540527e-07, "grpo_std_advantage": 1.0235522722723545e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5795440673828125}
76
+ {"ts": "2025-12-27T21:30:45", "event": "train_log", "step": 375, "epoch": 1.5005005005005005, "progress_pct": 75.0, "epoch_pct": 75.03, "eta": "00:35:04", "max_grad_norm": 1.0, "loss": -0.4798, "grad_norm": 1.8450465202331543, "learning_rate": 8.189979881632634e-07, "grpo_mean_advantage": -1.4185905001795618e-06, "grpo_std_advantage": 1.0947338523692451e-05, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5607603788375854}
77
+ {"ts": "2025-12-27T21:32:13", "event": "train_log", "step": 380, "epoch": 1.5205205205205206, "progress_pct": 76.0, "epoch_pct": 76.03, "eta": "00:33:41", "max_grad_norm": 1.0, "loss": -0.4805, "grad_norm": 2.673438787460327, "learning_rate": 7.587343530522945e-07, "grpo_mean_advantage": -1.758337049295733e-07, "grpo_std_advantage": 9.663675655247062e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5381432771682739}
78
+ {"ts": "2025-12-27T21:33:36", "event": "train_log", "step": 385, "epoch": 1.5405405405405406, "progress_pct": 77.0, "epoch_pct": 77.03, "eta": "00:32:16", "max_grad_norm": 1.0, "loss": -0.433, "grad_norm": 2.2263550758361816, "learning_rate": 7.003747663612581e-07, "grpo_mean_advantage": -6.973743325033865e-07, "grpo_std_advantage": 4.341973180999048e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5528443455696106}
79
+ {"ts": "2025-12-27T21:34:59", "event": "train_log", "step": 390, "epoch": 1.5605605605605606, "progress_pct": 78.0, "epoch_pct": 78.03, "eta": "00:30:52", "max_grad_norm": 1.0, "loss": -0.6021, "grad_norm": 2.3657093048095703, "learning_rate": 6.439830434413754e-07, "grpo_mean_advantage": 1.7881394143159923e-08, "grpo_std_advantage": 1.3004198251564958e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.6091476678848267}
80
+ {"ts": "2025-12-27T21:36:21", "event": "train_log", "step": 395, "epoch": 1.5805805805805806, "progress_pct": 79.0, "epoch_pct": 79.03, "eta": "00:29:27", "max_grad_norm": 1.0, "loss": -0.5595, "grad_norm": 1.9847129583358765, "learning_rate": 5.896208478137222e-07, "grpo_mean_advantage": 3.4868716625169327e-07, "grpo_std_advantage": 2.059372718576924e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5397372245788574}
81
+ {"ts": "2025-12-27T21:37:48", "event": "train_log", "step": 400, "epoch": 1.6006006006006006, "progress_pct": 80.0, "epoch_pct": 80.03, "eta": "00:28:03", "max_grad_norm": 1.0, "loss": -0.5592, "grad_norm": 2.922114133834839, "learning_rate": 5.373476237410808e-07, "grpo_mean_advantage": -2.1636485598719446e-06, "grpo_std_advantage": 9.725940799398813e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5873125195503235}
82
+ {"ts": "2025-12-27T21:39:11", "event": "train_log", "step": 405, "epoch": 1.6206206206206206, "progress_pct": 81.0, "epoch_pct": 81.03, "eta": "00:26:39", "max_grad_norm": 1.0, "loss": -0.5623, "grad_norm": 1.8524045944213867, "learning_rate": 4.872205312265074e-07, "grpo_mean_advantage": -5.960464477539063e-08, "grpo_std_advantage": 3.460792754594877e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5601426362991333}
83
+ {"ts": "2025-12-27T21:40:38", "event": "train_log", "step": 410, "epoch": 1.6406406406406406, "progress_pct": 82.0, "epoch_pct": 82.03, "eta": "00:25:15", "max_grad_norm": 1.0, "loss": -0.5943, "grad_norm": 1.7269790172576904, "learning_rate": 4.3929438350970687e-07, "grpo_mean_advantage": 2.6226044269606064e-07, "grpo_std_advantage": 7.928817922220333e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.578656792640686}
84
+ {"ts": "2025-12-27T21:42:02", "event": "train_log", "step": 415, "epoch": 1.6606606606606606, "progress_pct": 83.0, "epoch_pct": 83.03, "eta": "00:23:51", "max_grad_norm": 1.0, "loss": -0.6193, "grad_norm": 2.26530122756958, "learning_rate": 3.936215871295634e-07, "grpo_mean_advantage": 2.3558736756967846e-06, "grpo_std_advantage": 1.4469559573626611e-05, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5885810852050781}
85
+ {"ts": "2025-12-27T21:43:27", "event": "train_log", "step": 420, "epoch": 1.6806806806806807, "progress_pct": 84.0, "epoch_pct": 84.03, "eta": "00:22:27", "max_grad_norm": 1.0, "loss": -0.6934, "grad_norm": 2.6794464588165283, "learning_rate": 3.502520846183577e-07, "grpo_mean_advantage": 1.639127766850379e-08, "grpo_std_advantage": 9.352411325380672e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5805023312568665}
86
+ {"ts": "2025-12-27T21:44:52", "event": "train_log", "step": 425, "epoch": 1.7007007007007007, "progress_pct": 85.0, "epoch_pct": 85.04, "eta": "00:21:03", "max_grad_norm": 1.0, "loss": -0.5126, "grad_norm": 2.100447654724121, "learning_rate": 3.092332998903416e-07, "grpo_mean_advantage": 3.2387674764322583e-06, "grpo_std_advantage": 1.999079904635437e-05, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5655918121337891}
87
+ {"ts": "2025-12-27T21:46:17", "event": "train_log", "step": 430, "epoch": 1.7207207207207207, "progress_pct": 86.0, "epoch_pct": 86.04, "eta": "00:19:39", "max_grad_norm": 1.0, "loss": -0.5446, "grad_norm": 2.1027915477752686, "learning_rate": 2.706100863843822e-07, "grpo_mean_advantage": 3.5464762504489045e-07, "grpo_std_advantage": 1.7663603557593888e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5474504232406616}
88
+ {"ts": "2025-12-27T21:47:43", "event": "train_log", "step": 435, "epoch": 1.7407407407407407, "progress_pct": 87.0, "epoch_pct": 87.04, "eta": "00:18:15", "max_grad_norm": 1.0, "loss": -0.5125, "grad_norm": 2.289045572280884, "learning_rate": 2.3442467801738867e-07, "grpo_mean_advantage": 3.6135315895080566e-07, "grpo_std_advantage": 2.356920958845876e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5874254703521729}
89
+ {"ts": "2025-12-27T21:49:08", "event": "train_log", "step": 440, "epoch": 1.7607607607607607, "progress_pct": 88.0, "epoch_pct": 88.04, "eta": "00:16:51", "max_grad_norm": 1.0, "loss": -0.595, "grad_norm": 2.278038501739502, "learning_rate": 2.007166430021415e-07, "grpo_mean_advantage": 2.7567148563889532e-08, "grpo_std_advantage": 9.97340521280421e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5815118551254272}
90
+ {"ts": "2025-12-27T21:50:35", "event": "train_log", "step": 445, "epoch": 1.7807807807807807, "progress_pct": 89.0, "epoch_pct": 89.04, "eta": "00:15:27", "max_grad_norm": 1.0, "loss": -0.8055, "grad_norm": 2.340942621231079, "learning_rate": 1.6952284058003366e-07, "grpo_mean_advantage": -8.34465012644614e-08, "grpo_std_advantage": 5.558832185670326e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5611211061477661}
91
+ {"ts": "2025-12-27T21:52:06", "event": "train_log", "step": 450, "epoch": 1.800800800800801, "progress_pct": 90.0, "epoch_pct": 90.04, "eta": "00:14:03", "max_grad_norm": 1.0, "loss": -0.8561, "grad_norm": 2.4256298542022705, "learning_rate": 1.4087738071603075e-07, "grpo_mean_advantage": -1.9818544672034477e-07, "grpo_std_advantage": 6.800727305744658e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.590424656867981}
92
+ {"ts": "2025-12-27T21:53:32", "event": "train_log", "step": 455, "epoch": 1.820820820820821, "progress_pct": 91.0, "epoch_pct": 91.04, "eta": "00:12:39", "max_grad_norm": 1.0, "loss": -0.429, "grad_norm": 1.6453255414962769, "learning_rate": 1.1481158679992554e-07, "grpo_mean_advantage": -1.9371508841459217e-08, "grpo_std_advantage": 3.142378943721269e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5670351982116699}
93
+ {"ts": "2025-12-27T21:55:00", "event": "train_log", "step": 460, "epoch": 1.840840840840841, "progress_pct": 92.0, "epoch_pct": 92.04, "eta": "00:11:15", "max_grad_norm": 1.0, "loss": -0.642, "grad_norm": 2.3458049297332764, "learning_rate": 9.135396139467151e-08, "grpo_mean_advantage": 2.3692845729783585e-07, "grpo_std_advantage": 1.682946731307311e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5640432834625244}
94
+ {"ts": "2025-12-27T21:56:27", "event": "train_log", "step": 465, "epoch": 1.860860860860861, "progress_pct": 93.0, "epoch_pct": 93.04, "eta": "00:09:51", "max_grad_norm": 1.0, "loss": -0.583, "grad_norm": 2.730945110321045, "learning_rate": 7.053015506924749e-08, "grpo_mean_advantage": 1.110136480519941e-07, "grpo_std_advantage": 8.930008448260196e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5584251284599304}
95
+ {"ts": "2025-12-27T21:57:50", "event": "train_log", "step": 470, "epoch": 1.880880880880881, "progress_pct": 94.0, "epoch_pct": 94.04, "eta": "00:08:26", "max_grad_norm": 1.0, "loss": -0.5197, "grad_norm": 2.1463465690612793, "learning_rate": 5.236293835013839e-08, "grpo_mean_advantage": 2.5406478698641877e-07, "grpo_std_advantage": 9.93092498902115e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5392154455184937}
96
+ {"ts": "2025-12-27T21:59:15", "event": "train_log", "step": 475, "epoch": 1.900900900900901, "progress_pct": 95.0, "epoch_pct": 95.05, "eta": "00:07:02", "max_grad_norm": 1.0, "loss": -0.5864, "grad_norm": 2.427900791168213, "learning_rate": 3.687217682209837e-08, "grpo_mean_advantage": -8.940697071579962e-09, "grpo_std_advantage": 5.835169645251881e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5686308741569519}
97
+ {"ts": "2025-12-27T22:00:36", "event": "train_log", "step": 480, "epoch": 1.920920920920921, "progress_pct": 96.0, "epoch_pct": 96.05, "eta": "00:05:37", "max_grad_norm": 1.0, "loss": -0.5721, "grad_norm": 2.042795419692993, "learning_rate": 2.4074809405425227e-08, "grpo_mean_advantage": 4.0605664253234863e-07, "grpo_std_advantage": 2.3210795916384086e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5842767357826233}
98
+ {"ts": "2025-12-27T22:01:58", "event": "train_log", "step": 485, "epoch": 1.940940940940941, "progress_pct": 97.0, "epoch_pct": 97.05, "eta": "00:04:13", "max_grad_norm": 1.0, "loss": -0.5944, "grad_norm": 2.800136089324951, "learning_rate": 1.3984829833499636e-08, "grpo_mean_advantage": 1.341104507446289e-07, "grpo_std_advantage": 1.507950400991831e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5495311617851257}
99
+ {"ts": "2025-12-27T22:03:20", "event": "train_log", "step": 490, "epoch": 1.960960960960961, "progress_pct": 98.0, "epoch_pct": 98.05, "eta": "00:02:48", "max_grad_norm": 1.0, "loss": -0.7015, "grad_norm": 2.8475866317749023, "learning_rate": 6.6132713508446075e-09, "grpo_mean_advantage": 2.689659481802664e-07, "grpo_std_advantage": 8.491958851664094e-07, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.549436628818512}
100
+ {"ts": "2025-12-27T22:04:42", "event": "train_log", "step": 495, "epoch": 1.980980980980981, "progress_pct": 99.0, "epoch_pct": 99.05, "eta": "00:01:24", "max_grad_norm": 1.0, "loss": -0.4033, "grad_norm": 2.9422402381896973, "learning_rate": 1.9681946484320645e-09, "grpo_mean_advantage": 8.195638656616211e-08, "grpo_std_advantage": 3.802849732892355e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.544632077217102}
101
+ {"ts": "2025-12-27T22:06:01", "event": "train_log", "step": 500, "epoch": 2.0, "progress_pct": 100.0, "epoch_pct": 100.0, "eta": "00:00:00", "max_grad_norm": 1.0, "loss": -0.6773, "grad_norm": 2.66204833984375, "learning_rate": 5.467904943851077e-11, "grpo_mean_advantage": 7.552536089860951e-07, "grpo_std_advantage": 4.143997102801222e-06, "grpo_mean_kl_div": 0.0, "grpo_mean_group_score": 0.5968535542488098}
102
+ {"ts": "2025-12-27T22:06:03", "event": "train_log", "step": 500, "epoch": 2.0, "progress_pct": 100.0, "epoch_pct": 100.0, "eta": "00:00:00", "max_grad_norm": 1.0, "train_runtime": 8430.3461, "train_samples_per_second": 0.474, "train_steps_per_second": 0.059, "total_flos": 0.0, "train_loss": -0.23323759501613678}
grpo_qwen_14B_v2/wandb/debug-internal.log ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"time":"2025-12-27T19:44:23.778699792Z","level":"INFO","msg":"stream: starting","core version":"0.23.1"}
2
+ {"time":"2025-12-27T19:44:23.931692267Z","level":"INFO","msg":"stream: created new stream","id":"jz7bptqa"}
3
+ {"time":"2025-12-27T19:44:23.931816408Z","level":"INFO","msg":"handler: started","stream_id":"jz7bptqa"}
4
+ {"time":"2025-12-27T19:44:23.931950499Z","level":"INFO","msg":"stream: started","id":"jz7bptqa"}
5
+ {"time":"2025-12-27T19:44:23.931981018Z","level":"INFO","msg":"writer: started","stream_id":"jz7bptqa"}
6
+ {"time":"2025-12-27T19:44:23.931980519Z","level":"INFO","msg":"sender: started","stream_id":"jz7bptqa"}
7
+ {"time":"2025-12-27T22:06:04.101056437Z","level":"INFO","msg":"fileTransfer: Close: file transfer manager closed"}
8
+ {"time":"2025-12-27T22:06:04.298272513Z","level":"INFO","msg":"handler: operation stats","stats":{}}
9
+ {"time":"2025-12-27T22:06:04.303656494Z","level":"INFO","msg":"stream: closing","id":"jz7bptqa"}
10
+ {"time":"2025-12-27T22:06:04.303677603Z","level":"INFO","msg":"handler: closed","stream_id":"jz7bptqa"}
11
+ {"time":"2025-12-27T22:06:04.303750712Z","level":"INFO","msg":"sender: closed","stream_id":"jz7bptqa"}
12
+ {"time":"2025-12-27T22:06:04.303767265Z","level":"INFO","msg":"stream: closed","id":"jz7bptqa"}
grpo_qwen_14B_v2/wandb/debug.log ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2025-12-27 19:44:23,485 INFO MainThread:592889 [wandb_setup.py:_flush():80] Current SDK version is 0.23.1
2
+ 2025-12-27 19:44:23,486 INFO MainThread:592889 [wandb_setup.py:_flush():80] Configure stats pid to 592889
3
+ 2025-12-27 19:44:23,486 INFO MainThread:592889 [wandb_setup.py:_flush():80] Loading settings from /root/.config/wandb/settings
4
+ 2025-12-27 19:44:23,486 INFO MainThread:592889 [wandb_setup.py:_flush():80] Loading settings from /workspace/trainer-kit/GRPO-14b/wandb/settings
5
+ 2025-12-27 19:44:23,486 INFO MainThread:592889 [wandb_setup.py:_flush():80] Loading settings from environment variables
6
+ 2025-12-27 19:44:23,486 INFO MainThread:592889 [wandb_init.py:setup_run_log_directory():714] Logging user logs to runs/grpo_14b_run1/wandb/run-20251227_194423-jz7bptqa/logs/debug.log
7
+ 2025-12-27 19:44:23,486 INFO MainThread:592889 [wandb_init.py:setup_run_log_directory():715] Logging internal logs to runs/grpo_14b_run1/wandb/run-20251227_194423-jz7bptqa/logs/debug-internal.log
8
+ 2025-12-27 19:44:23,486 INFO MainThread:592889 [wandb_init.py:init():841] calling init triggers
9
+ 2025-12-27 19:44:23,486 INFO MainThread:592889 [wandb_init.py:init():846] wandb.init called with sweep_config: {}
10
+ config: {'model': {'repo_id': '/workspace/Models/Qwen2.5-Coder-14B-CPT-SFT_v2', 'tokenizer_name': 'Qwen/Qwen2.5-Coder-14B', 'load_in_8bit': False, 'load_in_4bit': False, 'torch_dtype': 'bfloat16', 'device_map': 'auto', 'trust_remote_code': True}, 'data': {'train_jsonl': 'grpo_dataset.jsonl', 'eval_jsonl': None, 'eval_split_ratio': 0.0, 'shuffle': True, 'num_proc': 1, 'prompt_field': 'prompt', 'completions_field': 'completions', 'scores_field': 'scores', 'format_type': 'raw', 'max_length': 2048, 'min_completions': 2, 'system_prompt': 'You are a Hyperswitch Rust code analyzer. Identify functions/structs that need modification for a given task.\n\n## Output Format\n\n##OUTPUT\nExplain the data flow and why each component must change:\n- Flow: [Input → Processing → Output with arrows]\n- For each component: "The [ComponentName] ([path]) must [action] because [reason]—without this, [consequence]"\n- Explain coupling between components\n\n##SELECT\nmodify::crates/path/to/file.rs::impl::ComponentName\nadd::crates/another/file.rs::function::AnotherComponent\n<EOS>\n\n## Rules\n\n1. Use full paths: `remove::crates/folder/file.rs::Type::Name`\n2. Use `::` for nested items: `status::StructName::Type::Name`\n3. Always explain "must change because" and "without this"\n3. Types of components: function, struct, enum, impl, trait\n4. If there is extra information (e.g., enum variants), include that too.\n5. Start with ##OUTPUT, end with ##SELECT, terminate with <EOS>\n\n## Example\n\n##TASK\nAdd webhook subscription support\n\n##OUTPUT\nThe webhook system routes events via EventClass enum. Flow: webhook → EventClass → handler → processing. The EventClass enum (crates/common_enums/src/enums.rs::EventClass) must add Subscriptions variant because it defines event routing—without this, subscription events cannot be processed. The SubscriptionStatus impl (crates/common_enums/src/transformers.rs::SubscriptionStatus) must map to EventType because it converts status to events—without this, status changes don\'t trigger webhooks. These are coupled: EventClass routes to handlers that use SubscriptionStatus mappings.\n\n##SELECT\ncrates/common_enums/src/enums.rs::EventClass\ncrates/common_enums/src/transformers.rs::SubscriptionStatus\n<EOS>\n', 'custom_template': '##INSTRUCTION\n{instruction}<|im_end|>\n{input}<|im_end|>\n{output}<|im_end|>'}, 'peft': {'enabled': True, 'r': 16, 'lora_alpha': 32, 'lora_dropout': 0.05, 'target_modules': ['q_proj', 'k_proj', 'v_proj', 'o_proj', 'gate_proj', 'up_proj', 'down_proj'], 'bias': 'none', 'task_type': 'CAUSAL_LM'}, 'grpo': {'group_size': 4, 'kl_coef': 0.05, 'normalize_advantages': True, 'reward_scaling': 1.0, 'reward_bias': 0.0, 'reward_clip': 5.0, 'advantage_temperature': 1.0, 'use_reference_model': False, 'seed': 42}, 'train': {'output_dir': 'runs/grpo_14b_run1', 'num_train_epochs': 2, 'per_device_train_batch_size': 1, 'gradient_accumulation_steps': 8, 'per_device_eval_batch_size': 1, 'learning_rate': 5e-06, 'weight_decay': 0.01, 'warmup_ratio': 0.05, 'lr_scheduler_type': 'cosine', 'fp16': False, 'bf16': True, 'max_grad_norm': 1.0, 'gradient_checkpointing': True, 'logging_steps': 5, 'save_steps': 100, 'save_total_limit': 2, 'evaluation_strategy': 'no', 'dataloader_num_workers': 4, 'dataloader_pin_memory': True, 'remove_unused_columns': False, 'report_to': [], 'seed': 42, 'ddp_find_unused_parameters': False}, 'run_dir': 'runs/grpo_14b_run1', '_wandb': {}}
11
+ 2025-12-27 19:44:23,486 INFO MainThread:592889 [wandb_init.py:init():889] starting backend
12
+ 2025-12-27 19:44:23,772 INFO MainThread:592889 [wandb_init.py:init():892] sending inform_init request
13
+ 2025-12-27 19:44:23,776 INFO MainThread:592889 [wandb_init.py:init():900] backend started and connected
14
+ 2025-12-27 19:44:23,778 INFO MainThread:592889 [wandb_init.py:init():970] updated telemetry
15
+ 2025-12-27 19:44:23,779 INFO MainThread:592889 [wandb_init.py:init():994] communicating run to backend with 90.0 second timeout
16
+ 2025-12-27 19:44:24,241 INFO MainThread:592889 [wandb_init.py:init():1041] starting run threads in backend
17
+ 2025-12-27 19:44:24,354 INFO MainThread:592889 [wandb_run.py:_console_start():2521] atexit reg
18
+ 2025-12-27 19:44:24,354 INFO MainThread:592889 [wandb_run.py:_redirect():2369] redirect: wrap_raw
19
+ 2025-12-27 19:44:24,354 INFO MainThread:592889 [wandb_run.py:_redirect():2438] Wrapping output streams.
20
+ 2025-12-27 19:44:24,354 INFO MainThread:592889 [wandb_run.py:_redirect():2461] Redirects installed.
21
+ 2025-12-27 19:44:24,358 INFO MainThread:592889 [wandb_init.py:init():1081] run started, returning control to user process
22
+ 2025-12-27 19:45:32,787 INFO MainThread:592889 [wandb_run.py:_config_callback():1396] config_cb None None {'peft_config': {'default': {'task_type': 'CAUSAL_LM', 'peft_type': 'LORA', 'auto_mapping': None, 'peft_version': '0.18.0', 'base_model_name_or_path': '/workspace/Models/Qwen2.5-Coder-14B-CPT-SFT_v2', 'revision': None, 'inference_mode': False, 'r': 16, 'target_modules': ['q_proj', 'o_proj', 'v_proj', 'up_proj', 'gate_proj', 'down_proj', 'k_proj'], 'exclude_modules': None, 'lora_alpha': 32, 'lora_dropout': 0.05, 'fan_in_fan_out': False, 'bias': 'none', 'use_rslora': False, 'modules_to_save': None, 'init_lora_weights': True, 'layers_to_transform': None, 'layers_pattern': None, 'rank_pattern': {}, 'alpha_pattern': {}, 'megatron_config': None, 'megatron_core': 'megatron.core', 'trainable_token_indices': None, 'loftq_config': {}, 'eva_config': None, 'corda_config': None, 'use_dora': False, 'alora_invocation_tokens': None, 'use_qalora': False, 'qalora_group_size': 16, 'layer_replication': None, 'runtime_config': {'ephemeral_gpu_offload': False}, 'lora_bias': False, 'target_parameters': None, 'arrow_config': None, 'ensure_weight_tying': False}}, 'vocab_size': 152064, 'max_position_embeddings': 32768, 'hidden_size': 5120, 'intermediate_size': 13824, 'num_hidden_layers': 48, 'num_attention_heads': 40, 'use_sliding_window': False, 'sliding_window': None, 'max_window_layers': 48, 'num_key_value_heads': 8, 'hidden_act': 'silu', 'initializer_range': 0.02, 'rms_norm_eps': 1e-06, 'use_cache': False, 'rope_theta': 10000.0, 'rope_scaling': None, 'attention_dropout': 0.0, 'layer_types': ['full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention'], 'return_dict': True, 'output_hidden_states': False, 'torchscript': False, 'dtype': 'bfloat16', 'pruned_heads': {}, 'tie_word_embeddings': False, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'architectures': ['Qwen2ForCausalLM'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'task_specific_params': None, 'problem_type': None, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': 151643, 'pad_token_id': None, 'eos_token_id': 151643, 'sep_token_id': None, 'decoder_start_token_id': None, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'num_beam_groups': 1, 'diversity_penalty': 0.0, '_name_or_path': '/workspace/Models/Qwen2.5-Coder-14B-CPT-SFT_v2', 'transformers_version': '4.57.3', 'model_type': 'qwen2', 'rope_parameters': {'rope_theta': 1000000.0, 'rope_type': 'default'}, 'tf_legacy_loss': False, 'use_bfloat16': False, 'output_attentions': False, 'output_dir': 'runs/grpo_14b_run1/checkpoints', 'overwrite_output_dir': False, 'do_train': False, 'do_eval': False, 'do_predict': False, 'eval_strategy': 'no', 'prediction_loss_only': False, 'per_device_train_batch_size': 1, 'per_device_eval_batch_size': 1, 'per_gpu_train_batch_size': None, 'per_gpu_eval_batch_size': None, 'gradient_accumulation_steps': 8, 'eval_accumulation_steps': None, 'eval_delay': 0, 'torch_empty_cache_steps': None, 'learning_rate': 5e-06, 'weight_decay': 0.01, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 2.0, 'max_steps': -1, 'lr_scheduler_type': 'cosine', 'lr_scheduler_kwargs': {}, 'warmup_ratio': 0.05, 'warmup_steps': 0, 'log_level': 'passive', 'log_level_replica': 'warning', 'log_on_each_node': True, 'logging_dir': 'runs/grpo_14b_run1/checkpoints/runs/Dec27_19-45-32_a100-2gpu-shell-session-757d587799-mfdvv', 'logging_strategy': 'steps', 'logging_first_step': False, 'logging_steps': 5, 'logging_nan_inf_filter': True, 'save_strategy': 'steps', 'save_steps': 100, 'save_total_limit': 2, 'save_safetensors': True, 'save_on_each_node': False, 'save_only_model': False, 'restore_callback_states_from_checkpoint': False, 'no_cuda': False, 'use_cpu': False, 'use_mps_device': False, 'seed': 42, 'data_seed': None, 'jit_mode_eval': False, 'bf16': True, 'fp16': False, 'fp16_opt_level': 'O1', 'half_precision_backend': 'auto', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': None, 'local_rank': 0, 'ddp_backend': None, 'tpu_num_cores': None, 'tpu_metrics_debug': False, 'debug': [], 'dataloader_drop_last': False, 'eval_steps': 50, 'dataloader_num_workers': 0, 'dataloader_prefetch_factor': None, 'past_index': -1, 'run_name': None, 'disable_tqdm': False, 'remove_unused_columns': False, 'label_names': None, 'load_best_model_at_end': False, 'metric_for_best_model': 'eval_loss', 'greater_is_better': False, 'ignore_data_skip': False, 'fsdp': [], 'fsdp_min_num_params': 0, 'fsdp_config': {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, 'fsdp_transformer_layer_cls_to_wrap': None, 'accelerator_config': {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}, 'parallelism_config': None, 'deepspeed': None, 'label_smoothing_factor': 0.0, 'optim': 'adamw_torch', 'optim_args': None, 'adafactor': False, 'group_by_length': False, 'length_column_name': 'length', 'report_to': ['wandb'], 'project': 'huggingface', 'trackio_space_id': 'trackio', 'ddp_find_unused_parameters': None, 'ddp_bucket_cap_mb': None, 'ddp_broadcast_buffers': None, 'dataloader_pin_memory': True, 'dataloader_persistent_workers': False, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': False, 'resume_from_checkpoint': None, 'hub_model_id': None, 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'hub_private_repo': None, 'hub_always_push': False, 'hub_revision': None, 'gradient_checkpointing': False, 'gradient_checkpointing_kwargs': None, 'include_inputs_for_metrics': False, 'include_for_metrics': [], 'eval_do_concat_batches': True, 'fp16_backend': 'auto', 'push_to_hub_model_id': None, 'push_to_hub_organization': None, 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', 'mp_parameters': '', 'auto_find_batch_size': False, 'full_determinism': False, 'torchdynamo': None, 'ray_scope': 'last', 'ddp_timeout': 1800, 'torch_compile': False, 'torch_compile_backend': None, 'torch_compile_mode': None, 'include_tokens_per_second': False, 'include_num_input_tokens_seen': 'no', 'neftune_noise_alpha': None, 'optim_target_modules': None, 'batch_eval_metrics': False, 'eval_on_start': False, 'use_liger_kernel': False, 'liger_kernel_config': None, 'eval_use_gather_object': False, 'average_tokens_across_devices': True}
23
+ 2025-12-27 19:45:32,798 INFO MainThread:592889 [wandb_config.py:__setitem__():154] [no run ID] config set model/num_parameters = 14838846464 - <bound method Run._config_callback of <wandb.sdk.wandb_run.Run object at 0x7ada527aff70>>
24
+ 2025-12-27 19:45:32,798 INFO MainThread:592889 [wandb_run.py:_config_callback():1396] config_cb model/num_parameters 14838846464 None
25
+ 2025-12-27 22:06:03,800 INFO MainThread:592889 [wandb_run.py:_finish():2287] finishing run sirajuddin-shaik-007/rl-training/jz7bptqa
26
+ 2025-12-27 22:06:03,800 INFO MainThread:592889 [wandb_run.py:_atexit_cleanup():2486] got exitcode: 0
27
+ 2025-12-27 22:06:03,801 INFO MainThread:592889 [wandb_run.py:_restore():2468] restore
28
+ 2025-12-27 22:06:03,801 INFO MainThread:592889 [wandb_run.py:_restore():2474] restore done
29
+ 2025-12-27 22:06:04,302 INFO MainThread:592889 [wandb_run.py:_footer_sync_info():3862] logging synced files
grpo_qwen_14B_v2/wandb/run-20251227_194423-jz7bptqa/files/config.yaml ADDED
@@ -0,0 +1,725 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _name_or_path:
2
+ value: /workspace/Models/Qwen2.5-Coder-14B-CPT-SFT_v2
3
+ _wandb:
4
+ value:
5
+ cli_version: 0.23.1
6
+ e:
7
+ flq59s27gswy5t5lhj0bnmni5u7y1dm1:
8
+ args:
9
+ - --config
10
+ - config_grpo_14b.yaml
11
+ codePath: run_grpo_fixed.py
12
+ codePathLocal: run_grpo_fixed.py
13
+ cpu_count: 12
14
+ cpu_count_logical: 24
15
+ cudaVersion: "13.0"
16
+ disk:
17
+ /:
18
+ total: "791251738624"
19
+ used: "537236914176"
20
21
+ executable: /root/llm_finetuning_env/bin/python
22
+ gpu: NVIDIA A100-SXM4-80GB
23
+ gpu_count: 2
24
+ gpu_nvidia:
25
+ - architecture: Ampere
26
+ cudaCores: 6912
27
+ memoryTotal: "85899345920"
28
+ name: NVIDIA A100-SXM4-80GB
29
+ uuid: GPU-989794b0-ec3b-13bf-db9f-3fbe341497ba
30
+ - architecture: Ampere
31
+ cudaCores: 6912
32
+ memoryTotal: "85899345920"
33
+ name: NVIDIA A100-SXM4-80GB
34
+ uuid: GPU-3790aa64-60ef-9eac-b0b1-b278ee8c0d40
35
+ host: a100-2gpu-shell-session-757d587799-mfdvv
36
+ memory:
37
+ total: "359047892992"
38
+ os: Linux-6.12.46+-x86_64-with-glibc2.35
39
+ program: /workspace/trainer-kit/GRPO-14b/run_grpo_fixed.py
40
+ python: CPython 3.10.12
41
+ root: runs/grpo_14b_run1
42
+ startedAt: "2025-12-27T19:44:23.484378Z"
43
+ writerId: flq59s27gswy5t5lhj0bnmni5u7y1dm1
44
+ m:
45
+ - "1": train/global_step
46
+ "6":
47
+ - 3
48
+ "7": []
49
+ - "2": '*'
50
+ "5": 1
51
+ "6":
52
+ - 1
53
+ "7": []
54
+ python_version: 3.10.12
55
+ t:
56
+ "1":
57
+ - 1
58
+ - 11
59
+ - 41
60
+ - 49
61
+ - 51
62
+ - 71
63
+ - 98
64
+ "2":
65
+ - 1
66
+ - 11
67
+ - 41
68
+ - 49
69
+ - 51
70
+ - 71
71
+ - 98
72
+ "3":
73
+ - 2
74
+ - 7
75
+ - 15
76
+ - 16
77
+ - 19
78
+ - 62
79
+ - 66
80
+ "4": 3.10.12
81
+ "5": 0.23.1
82
+ "6": 4.57.3
83
+ "9":
84
+ "1": transformers_trainer
85
+ "12": 0.23.1
86
+ "13": linux-x86_64
87
+ accelerator_config:
88
+ value:
89
+ dispatch_batches: null
90
+ even_batches: true
91
+ gradient_accumulation_kwargs: null
92
+ non_blocking: false
93
+ split_batches: false
94
+ use_seedable_sampler: true
95
+ adafactor:
96
+ value: false
97
+ adam_beta1:
98
+ value: 0.9
99
+ adam_beta2:
100
+ value: 0.999
101
+ adam_epsilon:
102
+ value: 1e-08
103
+ add_cross_attention:
104
+ value: false
105
+ architectures:
106
+ value:
107
+ - Qwen2ForCausalLM
108
+ attention_dropout:
109
+ value: 0
110
+ auto_find_batch_size:
111
+ value: false
112
+ average_tokens_across_devices:
113
+ value: true
114
+ bad_words_ids:
115
+ value: null
116
+ batch_eval_metrics:
117
+ value: false
118
+ begin_suppress_tokens:
119
+ value: null
120
+ bf16:
121
+ value: true
122
+ bf16_full_eval:
123
+ value: false
124
+ bos_token_id:
125
+ value: 151643
126
+ chunk_size_feed_forward:
127
+ value: 0
128
+ cross_attention_hidden_size:
129
+ value: null
130
+ data:
131
+ value:
132
+ completions_field: completions
133
+ custom_template: |-
134
+ ##INSTRUCTION
135
+ {instruction}<|im_end|>
136
+ {input}<|im_end|>
137
+ {output}<|im_end|>
138
+ eval_jsonl: null
139
+ eval_split_ratio: 0
140
+ format_type: raw
141
+ max_length: 2048
142
+ min_completions: 2
143
+ num_proc: 1
144
+ prompt_field: prompt
145
+ scores_field: scores
146
+ shuffle: true
147
+ system_prompt: |
148
+ You are a Hyperswitch Rust code analyzer. Identify functions/structs that need modification for a given task.
149
+
150
+ ## Output Format
151
+
152
+ ##OUTPUT
153
+ Explain the data flow and why each component must change:
154
+ - Flow: [Input → Processing → Output with arrows]
155
+ - For each component: "The [ComponentName] ([path]) must [action] because [reason]—without this, [consequence]"
156
+ - Explain coupling between components
157
+
158
+ ##SELECT
159
+ modify::crates/path/to/file.rs::impl::ComponentName
160
+ add::crates/another/file.rs::function::AnotherComponent
161
+ <EOS>
162
+
163
+ ## Rules
164
+
165
+ 1. Use full paths: `remove::crates/folder/file.rs::Type::Name`
166
+ 2. Use `::` for nested items: `status::StructName::Type::Name`
167
+ 3. Always explain "must change because" and "without this"
168
+ 3. Types of components: function, struct, enum, impl, trait
169
+ 4. If there is extra information (e.g., enum variants), include that too.
170
+ 5. Start with ##OUTPUT, end with ##SELECT, terminate with <EOS>
171
+
172
+ ## Example
173
+
174
+ ##TASK
175
+ Add webhook subscription support
176
+
177
+ ##OUTPUT
178
+ The webhook system routes events via EventClass enum. Flow: webhook → EventClass → handler → processing. The EventClass enum (crates/common_enums/src/enums.rs::EventClass) must add Subscriptions variant because it defines event routing—without this, subscription events cannot be processed. The SubscriptionStatus impl (crates/common_enums/src/transformers.rs::SubscriptionStatus) must map to EventType because it converts status to events—without this, status changes don't trigger webhooks. These are coupled: EventClass routes to handlers that use SubscriptionStatus mappings.
179
+
180
+ ##SELECT
181
+ crates/common_enums/src/enums.rs::EventClass
182
+ crates/common_enums/src/transformers.rs::SubscriptionStatus
183
+ <EOS>
184
+ train_jsonl: grpo_dataset.jsonl
185
+ data_seed:
186
+ value: null
187
+ dataloader_drop_last:
188
+ value: false
189
+ dataloader_num_workers:
190
+ value: 0
191
+ dataloader_persistent_workers:
192
+ value: false
193
+ dataloader_pin_memory:
194
+ value: true
195
+ dataloader_prefetch_factor:
196
+ value: null
197
+ ddp_backend:
198
+ value: null
199
+ ddp_broadcast_buffers:
200
+ value: null
201
+ ddp_bucket_cap_mb:
202
+ value: null
203
+ ddp_find_unused_parameters:
204
+ value: null
205
+ ddp_timeout:
206
+ value: 1800
207
+ debug:
208
+ value: []
209
+ decoder_start_token_id:
210
+ value: null
211
+ deepspeed:
212
+ value: null
213
+ disable_tqdm:
214
+ value: false
215
+ diversity_penalty:
216
+ value: 0
217
+ do_eval:
218
+ value: false
219
+ do_predict:
220
+ value: false
221
+ do_sample:
222
+ value: false
223
+ do_train:
224
+ value: false
225
+ dtype:
226
+ value: bfloat16
227
+ early_stopping:
228
+ value: false
229
+ encoder_no_repeat_ngram_size:
230
+ value: 0
231
+ eos_token_id:
232
+ value: 151643
233
+ eval_accumulation_steps:
234
+ value: null
235
+ eval_delay:
236
+ value: 0
237
+ eval_do_concat_batches:
238
+ value: true
239
+ eval_on_start:
240
+ value: false
241
+ eval_steps:
242
+ value: 50
243
+ eval_strategy:
244
+ value: "no"
245
+ eval_use_gather_object:
246
+ value: false
247
+ exponential_decay_length_penalty:
248
+ value: null
249
+ finetuning_task:
250
+ value: null
251
+ forced_bos_token_id:
252
+ value: null
253
+ forced_eos_token_id:
254
+ value: null
255
+ fp16:
256
+ value: false
257
+ fp16_backend:
258
+ value: auto
259
+ fp16_full_eval:
260
+ value: false
261
+ fp16_opt_level:
262
+ value: O1
263
+ fsdp:
264
+ value: []
265
+ fsdp_config:
266
+ value:
267
+ min_num_params: 0
268
+ xla: false
269
+ xla_fsdp_grad_ckpt: false
270
+ xla_fsdp_v2: false
271
+ fsdp_min_num_params:
272
+ value: 0
273
+ fsdp_transformer_layer_cls_to_wrap:
274
+ value: null
275
+ full_determinism:
276
+ value: false
277
+ gradient_accumulation_steps:
278
+ value: 8
279
+ gradient_checkpointing:
280
+ value: false
281
+ gradient_checkpointing_kwargs:
282
+ value: null
283
+ greater_is_better:
284
+ value: false
285
+ group_by_length:
286
+ value: false
287
+ grpo:
288
+ value:
289
+ advantage_temperature: 1
290
+ group_size: 4
291
+ kl_coef: 0.05
292
+ normalize_advantages: true
293
+ reward_bias: 0
294
+ reward_clip: 5
295
+ reward_scaling: 1
296
+ seed: 42
297
+ use_reference_model: false
298
+ half_precision_backend:
299
+ value: auto
300
+ hidden_act:
301
+ value: silu
302
+ hidden_size:
303
+ value: 5120
304
+ hub_always_push:
305
+ value: false
306
+ hub_model_id:
307
+ value: null
308
+ hub_private_repo:
309
+ value: null
310
+ hub_revision:
311
+ value: null
312
+ hub_strategy:
313
+ value: every_save
314
+ hub_token:
315
+ value: <HUB_TOKEN>
316
+ id2label:
317
+ value:
318
+ "0": LABEL_0
319
+ "1": LABEL_1
320
+ ignore_data_skip:
321
+ value: false
322
+ include_for_metrics:
323
+ value: []
324
+ include_inputs_for_metrics:
325
+ value: false
326
+ include_num_input_tokens_seen:
327
+ value: "no"
328
+ include_tokens_per_second:
329
+ value: false
330
+ initializer_range:
331
+ value: 0.02
332
+ intermediate_size:
333
+ value: 13824
334
+ is_decoder:
335
+ value: false
336
+ is_encoder_decoder:
337
+ value: false
338
+ jit_mode_eval:
339
+ value: false
340
+ label_names:
341
+ value: null
342
+ label_smoothing_factor:
343
+ value: 0
344
+ label2id:
345
+ value:
346
+ LABEL_0: 0
347
+ LABEL_1: 1
348
+ layer_types:
349
+ value:
350
+ - full_attention
351
+ - full_attention
352
+ - full_attention
353
+ - full_attention
354
+ - full_attention
355
+ - full_attention
356
+ - full_attention
357
+ - full_attention
358
+ - full_attention
359
+ - full_attention
360
+ - full_attention
361
+ - full_attention
362
+ - full_attention
363
+ - full_attention
364
+ - full_attention
365
+ - full_attention
366
+ - full_attention
367
+ - full_attention
368
+ - full_attention
369
+ - full_attention
370
+ - full_attention
371
+ - full_attention
372
+ - full_attention
373
+ - full_attention
374
+ - full_attention
375
+ - full_attention
376
+ - full_attention
377
+ - full_attention
378
+ - full_attention
379
+ - full_attention
380
+ - full_attention
381
+ - full_attention
382
+ - full_attention
383
+ - full_attention
384
+ - full_attention
385
+ - full_attention
386
+ - full_attention
387
+ - full_attention
388
+ - full_attention
389
+ - full_attention
390
+ - full_attention
391
+ - full_attention
392
+ - full_attention
393
+ - full_attention
394
+ - full_attention
395
+ - full_attention
396
+ - full_attention
397
+ - full_attention
398
+ learning_rate:
399
+ value: 5e-06
400
+ length_column_name:
401
+ value: length
402
+ length_penalty:
403
+ value: 1
404
+ liger_kernel_config:
405
+ value: null
406
+ load_best_model_at_end:
407
+ value: false
408
+ local_rank:
409
+ value: 0
410
+ log_level:
411
+ value: passive
412
+ log_level_replica:
413
+ value: warning
414
+ log_on_each_node:
415
+ value: true
416
+ logging_dir:
417
+ value: runs/grpo_14b_run1/checkpoints/runs/Dec27_19-45-32_a100-2gpu-shell-session-757d587799-mfdvv
418
+ logging_first_step:
419
+ value: false
420
+ logging_nan_inf_filter:
421
+ value: true
422
+ logging_steps:
423
+ value: 5
424
+ logging_strategy:
425
+ value: steps
426
+ lr_scheduler_type:
427
+ value: cosine
428
+ max_grad_norm:
429
+ value: 1
430
+ max_length:
431
+ value: 20
432
+ max_position_embeddings:
433
+ value: 32768
434
+ max_steps:
435
+ value: -1
436
+ max_window_layers:
437
+ value: 48
438
+ metric_for_best_model:
439
+ value: eval_loss
440
+ min_length:
441
+ value: 0
442
+ model:
443
+ value:
444
+ device_map: auto
445
+ load_in_4bit: false
446
+ load_in_8bit: false
447
+ repo_id: /workspace/Models/Qwen2.5-Coder-14B-CPT-SFT_v2
448
+ tokenizer_name: Qwen/Qwen2.5-Coder-14B
449
+ torch_dtype: bfloat16
450
+ trust_remote_code: true
451
+ model/num_parameters:
452
+ value: 14838846464
453
+ model_type:
454
+ value: qwen2
455
+ mp_parameters:
456
+ value: ""
457
+ neftune_noise_alpha:
458
+ value: null
459
+ no_cuda:
460
+ value: false
461
+ no_repeat_ngram_size:
462
+ value: 0
463
+ num_attention_heads:
464
+ value: 40
465
+ num_beam_groups:
466
+ value: 1
467
+ num_beams:
468
+ value: 1
469
+ num_hidden_layers:
470
+ value: 48
471
+ num_key_value_heads:
472
+ value: 8
473
+ num_return_sequences:
474
+ value: 1
475
+ num_train_epochs:
476
+ value: 2
477
+ optim:
478
+ value: adamw_torch
479
+ optim_args:
480
+ value: null
481
+ optim_target_modules:
482
+ value: null
483
+ output_attentions:
484
+ value: false
485
+ output_dir:
486
+ value: runs/grpo_14b_run1/checkpoints
487
+ output_hidden_states:
488
+ value: false
489
+ output_scores:
490
+ value: false
491
+ overwrite_output_dir:
492
+ value: false
493
+ pad_token_id:
494
+ value: null
495
+ parallelism_config:
496
+ value: null
497
+ past_index:
498
+ value: -1
499
+ peft:
500
+ value:
501
+ bias: none
502
+ enabled: true
503
+ lora_alpha: 32
504
+ lora_dropout: 0.05
505
+ r: 16
506
+ target_modules:
507
+ - q_proj
508
+ - k_proj
509
+ - v_proj
510
+ - o_proj
511
+ - gate_proj
512
+ - up_proj
513
+ - down_proj
514
+ task_type: CAUSAL_LM
515
+ peft_config:
516
+ value:
517
+ default:
518
+ alora_invocation_tokens: null
519
+ arrow_config: null
520
+ auto_mapping: null
521
+ base_model_name_or_path: /workspace/Models/Qwen2.5-Coder-14B-CPT-SFT_v2
522
+ bias: none
523
+ corda_config: null
524
+ ensure_weight_tying: false
525
+ eva_config: null
526
+ exclude_modules: null
527
+ fan_in_fan_out: false
528
+ inference_mode: false
529
+ init_lora_weights: true
530
+ layer_replication: null
531
+ layers_pattern: null
532
+ layers_to_transform: null
533
+ lora_alpha: 32
534
+ lora_bias: false
535
+ lora_dropout: 0.05
536
+ megatron_config: null
537
+ megatron_core: megatron.core
538
+ modules_to_save: null
539
+ peft_type: LORA
540
+ peft_version: 0.18.0
541
+ qalora_group_size: 16
542
+ r: 16
543
+ revision: null
544
+ runtime_config:
545
+ ephemeral_gpu_offload: false
546
+ target_modules:
547
+ - q_proj
548
+ - o_proj
549
+ - v_proj
550
+ - up_proj
551
+ - gate_proj
552
+ - down_proj
553
+ - k_proj
554
+ target_parameters: null
555
+ task_type: CAUSAL_LM
556
+ trainable_token_indices: null
557
+ use_dora: false
558
+ use_qalora: false
559
+ use_rslora: false
560
+ per_device_eval_batch_size:
561
+ value: 1
562
+ per_device_train_batch_size:
563
+ value: 1
564
+ per_gpu_eval_batch_size:
565
+ value: null
566
+ per_gpu_train_batch_size:
567
+ value: null
568
+ prediction_loss_only:
569
+ value: false
570
+ prefix:
571
+ value: null
572
+ problem_type:
573
+ value: null
574
+ project:
575
+ value: huggingface
576
+ push_to_hub:
577
+ value: false
578
+ push_to_hub_model_id:
579
+ value: null
580
+ push_to_hub_organization:
581
+ value: null
582
+ push_to_hub_token:
583
+ value: <PUSH_TO_HUB_TOKEN>
584
+ ray_scope:
585
+ value: last
586
+ remove_invalid_values:
587
+ value: false
588
+ remove_unused_columns:
589
+ value: false
590
+ repetition_penalty:
591
+ value: 1
592
+ report_to:
593
+ value:
594
+ - wandb
595
+ restore_callback_states_from_checkpoint:
596
+ value: false
597
+ resume_from_checkpoint:
598
+ value: null
599
+ return_dict:
600
+ value: true
601
+ return_dict_in_generate:
602
+ value: false
603
+ rms_norm_eps:
604
+ value: 1e-06
605
+ rope_parameters:
606
+ value:
607
+ rope_theta: 1e+06
608
+ rope_type: default
609
+ rope_scaling:
610
+ value: null
611
+ rope_theta:
612
+ value: 10000
613
+ run_dir:
614
+ value: runs/grpo_14b_run1
615
+ run_name:
616
+ value: null
617
+ save_on_each_node:
618
+ value: false
619
+ save_only_model:
620
+ value: false
621
+ save_safetensors:
622
+ value: true
623
+ save_steps:
624
+ value: 100
625
+ save_strategy:
626
+ value: steps
627
+ save_total_limit:
628
+ value: 2
629
+ seed:
630
+ value: 42
631
+ sep_token_id:
632
+ value: null
633
+ skip_memory_metrics:
634
+ value: true
635
+ sliding_window:
636
+ value: null
637
+ suppress_tokens:
638
+ value: null
639
+ task_specific_params:
640
+ value: null
641
+ temperature:
642
+ value: 1
643
+ tf_legacy_loss:
644
+ value: false
645
+ tf32:
646
+ value: null
647
+ tie_encoder_decoder:
648
+ value: false
649
+ tie_word_embeddings:
650
+ value: false
651
+ tokenizer_class:
652
+ value: null
653
+ top_k:
654
+ value: 50
655
+ top_p:
656
+ value: 1
657
+ torch_compile:
658
+ value: false
659
+ torch_compile_backend:
660
+ value: null
661
+ torch_compile_mode:
662
+ value: null
663
+ torch_empty_cache_steps:
664
+ value: null
665
+ torchdynamo:
666
+ value: null
667
+ torchscript:
668
+ value: false
669
+ tpu_metrics_debug:
670
+ value: false
671
+ tpu_num_cores:
672
+ value: null
673
+ trackio_space_id:
674
+ value: trackio
675
+ train:
676
+ value:
677
+ bf16: true
678
+ dataloader_num_workers: 4
679
+ dataloader_pin_memory: true
680
+ ddp_find_unused_parameters: false
681
+ evaluation_strategy: "no"
682
+ fp16: false
683
+ gradient_accumulation_steps: 8
684
+ gradient_checkpointing: true
685
+ learning_rate: 5e-06
686
+ logging_steps: 5
687
+ lr_scheduler_type: cosine
688
+ max_grad_norm: 1
689
+ num_train_epochs: 2
690
+ output_dir: runs/grpo_14b_run1
691
+ per_device_eval_batch_size: 1
692
+ per_device_train_batch_size: 1
693
+ remove_unused_columns: false
694
+ report_to: []
695
+ save_steps: 100
696
+ save_total_limit: 2
697
+ seed: 42
698
+ warmup_ratio: 0.05
699
+ weight_decay: 0.01
700
+ transformers_version:
701
+ value: 4.57.3
702
+ typical_p:
703
+ value: 1
704
+ use_bfloat16:
705
+ value: false
706
+ use_cache:
707
+ value: false
708
+ use_cpu:
709
+ value: false
710
+ use_legacy_prediction_loop:
711
+ value: false
712
+ use_liger_kernel:
713
+ value: false
714
+ use_mps_device:
715
+ value: false
716
+ use_sliding_window:
717
+ value: false
718
+ vocab_size:
719
+ value: 152064
720
+ warmup_ratio:
721
+ value: 0.05
722
+ warmup_steps:
723
+ value: 0
724
+ weight_decay:
725
+ value: 0.01
grpo_qwen_14B_v2/wandb/run-20251227_194423-jz7bptqa/files/output.log ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Wandb initialized: project='rl-training', name='auto-generated'
2
+ `torch_dtype` is deprecated! Use `dtype` instead!
3
+ Loading checkpoint shards: 100%|████████████████████████████████████████████| 16/16 [00:17<00:00, 1.10s/it]
4
+ Formatting and tokenizing train GRPO data (num_proc=1): 100%|████| 2000/2000 [00:37<00:00, 53.65 examples/s]
5
+ Filter: 100%|███████████████████████████████████████████████████| 1998/1998 [00:08<00:00, 244.33 examples/s]
6
+ Training samples: 1998
7
+ Warning: group_size (4) > min_completions (2)
8
+ Setting group_size to 2
9
+
10
+ === GRPO Training Configuration ===
11
+ Group size: 2
12
+ KL coefficient: 0.05
13
+ Normalize advantages: True
14
+ Reward scaling: 1.0
15
+ Advantage temperature: 1.0
16
+ Use reference model: False
17
+ ===================================
18
+
19
+ Starting GRPO training...
20
+ 0%| | 0/500 [00:00<?, ?it/s]/workspace/trainer-kit/GRPO-14b/run_grpo_fixed.py:700: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.
21
+ with torch.cuda.amp.autocast(enabled=self.args.fp16 or self.args.bf16):
22
+ Could not estimate the number of tokens of the input, floating-point operations will not be computed
23
+ 20%|█████████████▍ | 100/500 [28:13<1:54:10, 17.13s/it]/workspace/trainer-kit/GRPO-14b/run_grpo_fixed.py:700: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.
24
+ {'loss': 0.007, 'grad_norm': 0.05460292845964432, 'learning_rate': 8.000000000000001e-07, 'grpo_mean_advantage': -1.3560057254835556e-07, 'grpo_std_advantage': 3.0318567496578908e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5922331809997559, 'epoch': 0.02}
25
+ {'loss': 0.0107, 'grad_norm': 0.0679207444190979, 'learning_rate': 1.8000000000000001e-06, 'grpo_mean_advantage': 3.6619603633880615e-06, 'grpo_std_advantage': 1.6246918676188216e-05, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5561589002609253, 'epoch': 0.04}
26
+ {'loss': 0.007, 'grad_norm': 0.05788416787981987, 'learning_rate': 2.8000000000000003e-06, 'grpo_mean_advantage': -1.0654330395709621e-07, 'grpo_std_advantage': 5.399440965447866e-07, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5759152173995972, 'epoch': 0.06}
27
+ {'loss': 0.0246, 'grad_norm': 0.0746568813920021, 'learning_rate': 3.8000000000000005e-06, 'grpo_mean_advantage': -5.871057737749652e-07, 'grpo_std_advantage': 2.6951597646984737e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5127314329147339, 'epoch': 0.08}
28
+ {'loss': 0.0337, 'grad_norm': 0.11442846059799194, 'learning_rate': 4.800000000000001e-06, 'grpo_mean_advantage': 6.370246410369873e-07, 'grpo_std_advantage': 2.8908377771585947e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.539706826210022, 'epoch': 0.1}
29
+ {'loss': 0.0171, 'grad_norm': 0.05778791010379791, 'learning_rate': 4.999125183044924e-06, 'grpo_mean_advantage': 6.705522359595761e-09, 'grpo_std_advantage': 6.189450800775376e-07, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5812538862228394, 'epoch': 0.12}
30
+ {'loss': 0.0145, 'grad_norm': 0.05819695070385933, 'learning_rate': 4.995572288443412e-06, 'grpo_mean_advantage': 3.859400692363124e-07, 'grpo_std_advantage': 1.6833292875162442e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5909844636917114, 'epoch': 0.14}
31
+ {'loss': 0.0196, 'grad_norm': 0.07968433201313019, 'learning_rate': 4.98929052218411e-06, 'grpo_mean_advantage': 2.600252742013254e-07, 'grpo_std_advantage': 1.4095899132371414e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5630953907966614, 'epoch': 0.16}
32
+ {'loss': 0.0186, 'grad_norm': 0.0733402892947197, 'learning_rate': 4.980286753286196e-06, 'grpo_mean_advantage': -1.2591480924584175e-07, 'grpo_std_advantage': 1.0309080380466185e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5604403614997864, 'epoch': 0.18}
33
+ {'loss': 0.0286, 'grad_norm': 0.07136482000350952, 'learning_rate': 4.9685708272387645e-06, 'grpo_mean_advantage': -2.808868941883702e-07, 'grpo_std_advantage': 1.5696078889959608e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5971035957336426, 'epoch': 0.2}
34
+ {'loss': 0.0054, 'grad_norm': 0.08851475268602371, 'learning_rate': 4.9541555552349404e-06, 'grpo_mean_advantage': 2.6822089438383045e-08, 'grpo_std_advantage': 3.7878271541558206e-07, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5892971754074097, 'epoch': 0.22}
35
+ {'loss': -0.0074, 'grad_norm': 0.07778509706258774, 'learning_rate': 4.9370567001630155e-06, 'grpo_mean_advantage': -5.662441182607836e-08, 'grpo_std_advantage': 6.128998393251095e-07, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.564322292804718, 'epoch': 0.24}
36
+ {'loss': 0.0145, 'grad_norm': 0.08740051090717316, 'learning_rate': 4.917292959369968e-06, 'grpo_mean_advantage': -1.5944242193199898e-07, 'grpo_std_advantage': 1.6374274309782777e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.562497615814209, 'epoch': 0.26}
37
+ {'loss': 0.0257, 'grad_norm': 0.19070060551166534, 'learning_rate': 4.8948859442161876e-06, 'grpo_mean_advantage': 1.6838312433264946e-07, 'grpo_std_advantage': 8.536571272088622e-07, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5904761552810669, 'epoch': 0.28}
38
+ {'loss': 0.0024, 'grad_norm': 0.07321271300315857, 'learning_rate': 4.869860156443768e-06, 'grpo_mean_advantage': 1.1175870895385742e-07, 'grpo_std_advantage': 6.451961667153228e-07, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5765624046325684, 'epoch': 0.3}
39
+ {'loss': 0.0277, 'grad_norm': 0.07126748561859131, 'learning_rate': 4.842242961384211e-06, 'grpo_mean_advantage': -1.4603138254187797e-07, 'grpo_std_advantage': 1.1309343790344428e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5858271718025208, 'epoch': 0.32}
40
+ {'loss': 0.0246, 'grad_norm': 0.08629189431667328, 'learning_rate': 4.812064558034847e-06, 'grpo_mean_advantage': -1.817941665649414e-06, 'grpo_std_advantage': 1.1141768482048064e-05, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5871662497520447, 'epoch': 0.34}
41
+ {'loss': 0.0056, 'grad_norm': 0.0998779758810997, 'learning_rate': 4.779357946036662e-06, 'grpo_mean_advantage': 1.8179416372277046e-07, 'grpo_std_advantage': 6.210335072864837e-07, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5330992937088013, 'epoch': 0.36}
42
+ {'loss': 0.0053, 'grad_norm': 0.10614689439535141, 'learning_rate': 4.74415888958968e-06, 'grpo_mean_advantage': -2.972781771859445e-07, 'grpo_std_advantage': 3.1582342217006953e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5265295505523682, 'epoch': 0.38}
43
+ {'loss': 0.0134, 'grad_norm': 0.10345634073019028, 'learning_rate': 4.706505878345343e-06, 'grpo_mean_advantage': -7.033348197182931e-07, 'grpo_std_advantage': 4.245831405569334e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5660771131515503, 'epoch': 0.4}
44
+ with torch.cuda.amp.autocast(enabled=self.args.fp16 or self.args.bf16):
45
+ 40%|██████████████████████████▊ | 200/500 [56:13<1:25:51, 17.17s/it]/workspace/trainer-kit/GRPO-14b/run_grpo_fixed.py:700: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.
46
+ {'loss': 0.0004, 'grad_norm': 0.10077933222055435, 'learning_rate': 4.666440085318626e-06, 'grpo_mean_advantage': 1.1920928955078125e-07, 'grpo_std_advantage': 3.2809634831210133e-07, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.57631915807724, 'epoch': 0.42}
47
+ {'loss': 0.0033, 'grad_norm': 0.09548182785511017, 'learning_rate': 4.624005321865968e-06, 'grpo_mean_advantage': -4.0978193283081055e-07, 'grpo_std_advantage': 6.0397578636184335e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.546563982963562, 'epoch': 0.44}
48
+ {'loss': 0.0095, 'grad_norm': 0.09417816251516342, 'learning_rate': 4.57924798977818e-06, 'grpo_mean_advantage': -1.467764434437413e-07, 'grpo_std_advantage': 2.2689375782647403e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5519219636917114, 'epoch': 0.46}
49
+ {'loss': 0.0006, 'grad_norm': 0.10022275149822235, 'learning_rate': 4.532217030540781e-06, 'grpo_mean_advantage': -5.215406329028838e-09, 'grpo_std_advantage': 7.929010621410271e-07, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5490407943725586, 'epoch': 0.48}
50
+ {'loss': -0.0046, 'grad_norm': 0.14057794213294983, 'learning_rate': 4.482963871817195e-06, 'grpo_mean_advantage': -5.7369469175228005e-08, 'grpo_std_advantage': 1.2823379620385822e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5646580457687378, 'epoch': 0.5}
51
+ {'loss': -0.003, 'grad_norm': 0.12420658767223358, 'learning_rate': 4.4315423712133595e-06, 'grpo_mean_advantage': 2.9876827056796174e-07, 'grpo_std_advantage': 1.0496698905626545e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.6111599802970886, 'epoch': 0.52}
52
+ {'loss': 0.0154, 'grad_norm': 0.14342808723449707, 'learning_rate': 4.378008757385222e-06, 'grpo_mean_advantage': 1.5869736103013565e-07, 'grpo_std_advantage': 1.2748531617035042e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5619662404060364, 'epoch': 0.54}
53
+ {'loss': -0.0262, 'grad_norm': 0.14729444682598114, 'learning_rate': 4.322421568553529e-06, 'grpo_mean_advantage': 3.0100346748440643e-07, 'grpo_std_advantage': 2.4499684059264837e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5795454978942871, 'epoch': 0.56}
54
+ {'loss': 0.0018, 'grad_norm': 0.15249410271644592, 'learning_rate': 4.2648415884931476e-06, 'grpo_mean_advantage': -3.233552092751779e-07, 'grpo_std_advantage': 1.248456669600273e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5804953575134277, 'epoch': 0.58}
55
+ {'loss': -0.017, 'grad_norm': 0.1841023564338684, 'learning_rate': 4.205331780066892e-06, 'grpo_mean_advantage': 3.2261013416245987e-07, 'grpo_std_advantage': 1.4773489738217904e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5628539323806763, 'epoch': 0.6}
56
+ {'loss': 0.0044, 'grad_norm': 0.18597163259983063, 'learning_rate': 4.1439572163765615e-06, 'grpo_mean_advantage': -2.5331974029541016e-07, 'grpo_std_advantage': 1.5092309695319273e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5727725625038147, 'epoch': 0.62}
57
+ {'loss': -0.005, 'grad_norm': 0.18310388922691345, 'learning_rate': 4.0807850096064605e-06, 'grpo_mean_advantage': -6.780028627417778e-08, 'grpo_std_advantage': 8.550978805033083e-07, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5833909511566162, 'epoch': 0.64}
58
+ {'loss': -0.015, 'grad_norm': 0.2192923128604889, 'learning_rate': 4.015884237637206e-06, 'grpo_mean_advantage': -5.587935447692871e-08, 'grpo_std_advantage': 3.564579174053506e-07, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5742615461349487, 'epoch': 0.66}
59
+ {'loss': -0.0314, 'grad_norm': 0.16708803176879883, 'learning_rate': 3.949325868510083e-06, 'grpo_mean_advantage': -5.327165126800537e-07, 'grpo_std_advantage': 2.309018327650847e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5758188962936401, 'epoch': 0.68}
60
+ {'loss': -0.0441, 'grad_norm': 0.3401262164115906, 'learning_rate': 3.881182682824534e-06, 'grpo_mean_advantage': 5.863606702405377e-07, 'grpo_std_advantage': 2.4449204829579685e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5767683982849121, 'epoch': 0.7}
61
+ {'loss': -0.0162, 'grad_norm': 0.1931898146867752, 'learning_rate': 3.811529194153635e-06, 'grpo_mean_advantage': 3.2186508747145126e-07, 'grpo_std_advantage': 2.293551688126172e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.586772084236145, 'epoch': 0.72}
62
+ {'loss': -0.0386, 'grad_norm': 0.2537969648838043, 'learning_rate': 3.7404415675646054e-06, 'grpo_mean_advantage': -4.470348358154297e-08, 'grpo_std_advantage': 3.7067667335577426e-07, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.549396276473999, 'epoch': 0.74}
63
+ {'loss': -0.037, 'grad_norm': 0.20326584577560425, 'learning_rate': 3.667997536333424e-06, 'grpo_mean_advantage': -2.1010637851759384e-07, 'grpo_std_advantage': 1.1695076409523608e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5798425078392029, 'epoch': 0.76}
64
+ {'loss': -0.0292, 'grad_norm': 0.25048357248306274, 'learning_rate': 3.59427631694463e-06, 'grpo_mean_advantage': 1.765787658314366e-07, 'grpo_std_advantage': 2.429934738756856e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5584167838096619, 'epoch': 0.78}
65
+ {'loss': -0.0454, 'grad_norm': 0.2687569260597229, 'learning_rate': 3.5193585224692595e-06, 'grpo_mean_advantage': 1.6540289493605087e-07, 'grpo_std_advantage': 2.6342788714828203e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5676193237304688, 'epoch': 0.8}
66
+ with torch.cuda.amp.autocast(enabled=self.args.fp16 or self.args.bf16):
67
+ 60%|████████████████████████████████████████▏ | 300/500 [1:24:01<55:53, 16.77s/it]/workspace/trainer-kit/GRPO-14b/run_grpo_fixed.py:700: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.
68
+ {'loss': -0.0423, 'grad_norm': 0.22301620244979858, 'learning_rate': 3.44332607441564e-06, 'grpo_mean_advantage': -1.0944902442133753e-06, 'grpo_std_advantage': 5.346942998585291e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5669739842414856, 'epoch': 0.82}
69
+ {'loss': -0.0857, 'grad_norm': 0.3040211498737335, 'learning_rate': 3.3662621131494204e-06, 'grpo_mean_advantage': 2.4065374759629776e-07, 'grpo_std_advantage': 1.6327536513927043e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5922158360481262, 'epoch': 0.84}
70
+ {'loss': -0.0278, 'grad_norm': 0.27231141924858093, 'learning_rate': 3.2882509069808044e-06, 'grpo_mean_advantage': -5.21540641784668e-08, 'grpo_std_advantage': 5.847922466273303e-07, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5473950505256653, 'epoch': 0.86}
71
+ {'loss': -0.0727, 'grad_norm': 0.3571636378765106, 'learning_rate': 3.2093777600183873e-06, 'grpo_mean_advantage': 6.541609991472797e-07, 'grpo_std_advantage': 4.072162937518442e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5880032777786255, 'epoch': 0.88}
72
+ {'loss': -0.0464, 'grad_norm': 0.306273490190506, 'learning_rate': 3.1297289188903705e-06, 'grpo_mean_advantage': -1.2218951894737984e-07, 'grpo_std_advantage': 4.386006935419573e-07, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5835092663764954, 'epoch': 0.9}
73
+ {'loss': -0.0295, 'grad_norm': 0.2700377106666565, 'learning_rate': 3.049391478435133e-06, 'grpo_mean_advantage': 1.7605722177904681e-06, 'grpo_std_advantage': 8.007580618141219e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5394966006278992, 'epoch': 0.92}
74
+ {'loss': -0.031, 'grad_norm': 0.39531761407852173, 'learning_rate': 2.9684532864643123e-06, 'grpo_mean_advantage': -3.3080578987210174e-07, 'grpo_std_advantage': 1.551636614749441e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5687432289123535, 'epoch': 0.94}
75
+ {'loss': -0.0789, 'grad_norm': 0.5987040996551514, 'learning_rate': 2.887002847702504e-06, 'grpo_mean_advantage': 2.712011450967111e-07, 'grpo_std_advantage': 1.4400844747797237e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5550583600997925, 'epoch': 0.96}
76
+ {'loss': -0.1131, 'grad_norm': 0.5680716037750244, 'learning_rate': 2.8051292270086506e-06, 'grpo_mean_advantage': -3.2857059295565705e-07, 'grpo_std_advantage': 2.105091425619321e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.558111310005188, 'epoch': 0.98}
77
+ {'loss': -0.2232, 'grad_norm': 0.6204046010971069, 'learning_rate': 2.722921951984927e-06, 'grpo_mean_advantage': 4.470348358154297e-08, 'grpo_std_advantage': 5.315724820320611e-07, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.6196198463439941, 'epoch': 1.0}
78
+ {'loss': -0.1363, 'grad_norm': 0.8389026522636414, 'learning_rate': 2.640470915079614e-06, 'grpo_mean_advantage': 9.290873776990338e-07, 'grpo_std_advantage': 4.219644324621186e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.582168459892273, 'epoch': 1.02}
79
+ {'loss': -0.1868, 'grad_norm': 0.9067686796188354, 'learning_rate': 2.557866275291035e-06, 'grpo_mean_advantage': 2.533197474008375e-08, 'grpo_std_advantage': 1.6600588992332632e-07, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5551307797431946, 'epoch': 1.04}
80
+ {'loss': -0.1792, 'grad_norm': 0.9277902841567993, 'learning_rate': 2.4751983595800093e-06, 'grpo_mean_advantage': -5.662441182607836e-08, 'grpo_std_advantage': 1.0909400316450046e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.535040020942688, 'epoch': 1.06}
81
+ {'loss': -0.1691, 'grad_norm': 1.0715463161468506, 'learning_rate': 2.392557564098649e-06, 'grpo_mean_advantage': -9.536743306171047e-08, 'grpo_std_advantage': 5.838213610331877e-07, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5673571825027466, 'epoch': 1.08}
82
+ {'loss': -0.1655, 'grad_norm': 0.7759184837341309, 'learning_rate': 2.3100342553434924e-06, 'grpo_mean_advantage': 3.278255533700758e-08, 'grpo_std_advantage': 9.317170679423725e-07, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5874732732772827, 'epoch': 1.1}
83
+ {'loss': -0.1821, 'grad_norm': 0.9387398958206177, 'learning_rate': 2.2277186713410688e-06, 'grpo_mean_advantage': -1.206994113545079e-07, 'grpo_std_advantage': 6.201085511747806e-07, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5569106340408325, 'epoch': 1.12}
84
+ {'loss': -0.2102, 'grad_norm': 1.6132302284240723, 'learning_rate': 2.1457008229739395e-06, 'grpo_mean_advantage': 4.470348358154297e-08, 'grpo_std_advantage': 6.115651558502577e-07, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5578873157501221, 'epoch': 1.14}
85
+ {'loss': -0.2937, 'grad_norm': 0.8679026961326599, 'learning_rate': 2.0640703955551214e-06, 'grpo_mean_advantage': -3.3453108017056365e-07, 'grpo_std_advantage': 3.5326345368957845e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5735999345779419, 'epoch': 1.16}
86
+ {'loss': -0.2598, 'grad_norm': 1.0550166368484497, 'learning_rate': 1.9829166507585084e-06, 'grpo_mean_advantage': -1.110136480519941e-07, 'grpo_std_advantage': 4.731904823529476e-07, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5626259446144104, 'epoch': 1.18}
87
+ {'loss': -0.2546, 'grad_norm': 1.2819372415542603, 'learning_rate': 1.90232832901255e-06, 'grpo_mean_advantage': -5.08874677507265e-07, 'grpo_std_advantage': 1.840126174101897e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5463050603866577, 'epoch': 1.2}
88
+ with torch.cuda.amp.autocast(enabled=self.args.fp16 or self.args.bf16):
89
+ 80%|█████████████████████████████████████████████████████▌ | 400/500 [1:52:15<28:44, 17.25s/it]/workspace/trainer-kit/GRPO-14b/run_grpo_fixed.py:700: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.
90
+ {'loss': -0.1809, 'grad_norm': 1.0188143253326416, 'learning_rate': 1.82239355246389e-06, 'grpo_mean_advantage': 1.01327898960335e-07, 'grpo_std_advantage': 7.798533943059738e-07, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5352144241333008, 'epoch': 1.22}
91
+ {'loss': -0.3559, 'grad_norm': 2.0709052085876465, 'learning_rate': 1.7431997286170923e-06, 'grpo_mean_advantage': 1.341104507446289e-07, 'grpo_std_advantage': 7.821902840987605e-07, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5547868013381958, 'epoch': 1.24}
92
+ {'loss': -0.3874, 'grad_norm': 1.8516215085983276, 'learning_rate': 1.6648334547558227e-06, 'grpo_mean_advantage': 9.015202806494926e-08, 'grpo_std_advantage': 1.0693488547985908e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5859472751617432, 'epoch': 1.26}
93
+ {'loss': -0.3467, 'grad_norm': 1.283104419708252, 'learning_rate': 1.5873804232499862e-06, 'grpo_mean_advantage': -2.443790378947597e-07, 'grpo_std_advantage': 1.183122208203713e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5751550793647766, 'epoch': 1.28}
94
+ {'loss': -0.1703, 'grad_norm': 1.4108576774597168, 'learning_rate': 1.51092532785238e-06, 'grpo_mean_advantage': -6.705522537231445e-08, 'grpo_std_advantage': 6.109748937888071e-07, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5497723817825317, 'epoch': 1.3}
95
+ {'loss': -0.2918, 'grad_norm': 1.0421361923217773, 'learning_rate': 1.4355517710873184e-06, 'grpo_mean_advantage': -1.639127766850379e-08, 'grpo_std_advantage': 5.529495297196263e-07, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.55989670753479, 'epoch': 1.32}
96
+ {'loss': -0.3069, 'grad_norm': 1.3465828895568848, 'learning_rate': 1.361342172832502e-06, 'grpo_mean_advantage': 4.418194237132411e-07, 'grpo_std_advantage': 2.9275292945385445e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5809233784675598, 'epoch': 1.34}
97
+ {'loss': -0.5594, 'grad_norm': 1.1959459781646729, 'learning_rate': 1.2883776801940884e-06, 'grpo_mean_advantage': 9.685754776000977e-08, 'grpo_std_advantage': 3.754235251562932e-07, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5568087100982666, 'epoch': 1.36}
98
+ {'loss': -0.4102, 'grad_norm': 1.8967422246932983, 'learning_rate': 1.216738078773522e-06, 'grpo_mean_advantage': -2.384185791015625e-07, 'grpo_std_advantage': 6.821086913078034e-07, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5655568838119507, 'epoch': 1.38}
99
+ {'loss': -0.338, 'grad_norm': 2.221132755279541, 'learning_rate': 1.146501705423155e-06, 'grpo_mean_advantage': -8.717179156292332e-08, 'grpo_std_advantage': 2.500940354366321e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.6089578866958618, 'epoch': 1.4}
100
+ {'loss': -0.4985, 'grad_norm': 2.3640377521514893, 'learning_rate': 1.0777453625860474e-06, 'grpo_mean_advantage': 2.1606683731079102e-07, 'grpo_std_advantage': 1.4568390724889468e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.6129671335220337, 'epoch': 1.42}
101
+ {'loss': -0.4347, 'grad_norm': 1.9084734916687012, 'learning_rate': 1.0105442343136184e-06, 'grpo_mean_advantage': -3.725290298461914e-09, 'grpo_std_advantage': 2.965894054796081e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5562310814857483, 'epoch': 1.44}
102
+ {'loss': -0.6217, 'grad_norm': 1.6063904762268066, 'learning_rate': 9.449718040529987e-07, 'grpo_mean_advantage': 4.313886279305734e-07, 'grpo_std_advantage': 1.9621948013082147e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5884170532226562, 'epoch': 1.46}
103
+ {'loss': -0.5364, 'grad_norm': 2.114664077758789, 'learning_rate': 8.810997742939531e-07, 'grpo_mean_advantage': 2.0489096641540527e-07, 'grpo_std_advantage': 1.0235522722723545e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5795440673828125, 'epoch': 1.48}
104
+ {'loss': -0.4798, 'grad_norm': 1.8450465202331543, 'learning_rate': 8.189979881632634e-07, 'grpo_mean_advantage': -1.4185905001795618e-06, 'grpo_std_advantage': 1.0947338523692451e-05, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5607603788375854, 'epoch': 1.5}
105
+ {'loss': -0.4805, 'grad_norm': 2.673438787460327, 'learning_rate': 7.587343530522945e-07, 'grpo_mean_advantage': -1.758337049295733e-07, 'grpo_std_advantage': 9.663675655247062e-07, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5381432771682739, 'epoch': 1.52}
106
+ {'loss': -0.433, 'grad_norm': 2.2263550758361816, 'learning_rate': 7.003747663612581e-07, 'grpo_mean_advantage': -6.973743325033865e-07, 'grpo_std_advantage': 4.341973180999048e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5528443455696106, 'epoch': 1.54}
107
+ {'loss': -0.6021, 'grad_norm': 2.3657093048095703, 'learning_rate': 6.439830434413754e-07, 'grpo_mean_advantage': 1.7881394143159923e-08, 'grpo_std_advantage': 1.3004198251564958e-07, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.6091476678848267, 'epoch': 1.56}
108
+ {'loss': -0.5595, 'grad_norm': 1.9847129583358765, 'learning_rate': 5.896208478137222e-07, 'grpo_mean_advantage': 3.4868716625169327e-07, 'grpo_std_advantage': 2.059372718576924e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5397372245788574, 'epoch': 1.58}
109
+ {'loss': -0.5592, 'grad_norm': 2.922114133834839, 'learning_rate': 5.373476237410808e-07, 'grpo_mean_advantage': -2.1636485598719446e-06, 'grpo_std_advantage': 9.725940799398813e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5873125195503235, 'epoch': 1.6}
110
+ with torch.cuda.amp.autocast(enabled=self.args.fp16 or self.args.bf16):
111
+ 100%|███████████████████████████████████████████████████████████████████| 500/500 [2:20:30<00:00, 16.86s/it]
112
+ {'loss': -0.5623, 'grad_norm': 1.8524045944213867, 'learning_rate': 4.872205312265074e-07, 'grpo_mean_advantage': -5.960464477539063e-08, 'grpo_std_advantage': 3.460792754594877e-07, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5601426362991333, 'epoch': 1.62}
113
+ {'loss': -0.5943, 'grad_norm': 1.7269790172576904, 'learning_rate': 4.3929438350970687e-07, 'grpo_mean_advantage': 2.6226044269606064e-07, 'grpo_std_advantage': 7.928817922220333e-07, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.578656792640686, 'epoch': 1.64}
114
+ {'loss': -0.6193, 'grad_norm': 2.26530122756958, 'learning_rate': 3.936215871295634e-07, 'grpo_mean_advantage': 2.3558736756967846e-06, 'grpo_std_advantage': 1.4469559573626611e-05, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5885810852050781, 'epoch': 1.66}
115
+ {'loss': -0.6934, 'grad_norm': 2.6794464588165283, 'learning_rate': 3.502520846183577e-07, 'grpo_mean_advantage': 1.639127766850379e-08, 'grpo_std_advantage': 9.352411325380672e-07, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5805023312568665, 'epoch': 1.68}
116
+ {'loss': -0.5126, 'grad_norm': 2.100447654724121, 'learning_rate': 3.092332998903416e-07, 'grpo_mean_advantage': 3.2387674764322583e-06, 'grpo_std_advantage': 1.999079904635437e-05, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5655918121337891, 'epoch': 1.7}
117
+ {'loss': -0.5446, 'grad_norm': 2.1027915477752686, 'learning_rate': 2.706100863843822e-07, 'grpo_mean_advantage': 3.5464762504489045e-07, 'grpo_std_advantage': 1.7663603557593888e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5474504232406616, 'epoch': 1.72}
118
+ {'loss': -0.5125, 'grad_norm': 2.289045572280884, 'learning_rate': 2.3442467801738867e-07, 'grpo_mean_advantage': 3.6135315895080566e-07, 'grpo_std_advantage': 2.356920958845876e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5874254703521729, 'epoch': 1.74}
119
+ {'loss': -0.595, 'grad_norm': 2.278038501739502, 'learning_rate': 2.007166430021415e-07, 'grpo_mean_advantage': 2.7567148563889532e-08, 'grpo_std_advantage': 9.97340521280421e-07, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5815118551254272, 'epoch': 1.76}
120
+ {'loss': -0.8055, 'grad_norm': 2.340942621231079, 'learning_rate': 1.6952284058003366e-07, 'grpo_mean_advantage': -8.34465012644614e-08, 'grpo_std_advantage': 5.558832185670326e-07, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5611211061477661, 'epoch': 1.78}
121
+ {'loss': -0.8561, 'grad_norm': 2.4256298542022705, 'learning_rate': 1.4087738071603075e-07, 'grpo_mean_advantage': -1.9818544672034477e-07, 'grpo_std_advantage': 6.800727305744658e-07, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.590424656867981, 'epoch': 1.8}
122
+ {'loss': -0.429, 'grad_norm': 1.6453255414962769, 'learning_rate': 1.1481158679992554e-07, 'grpo_mean_advantage': -1.9371508841459217e-08, 'grpo_std_advantage': 3.142378943721269e-07, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5670351982116699, 'epoch': 1.82}
123
+ {'loss': -0.642, 'grad_norm': 2.3458049297332764, 'learning_rate': 9.135396139467151e-08, 'grpo_mean_advantage': 2.3692845729783585e-07, 'grpo_std_advantage': 1.682946731307311e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5640432834625244, 'epoch': 1.84}
124
+ {'loss': -0.583, 'grad_norm': 2.730945110321045, 'learning_rate': 7.053015506924749e-08, 'grpo_mean_advantage': 1.110136480519941e-07, 'grpo_std_advantage': 8.930008448260196e-07, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5584251284599304, 'epoch': 1.86}
125
+ {'loss': -0.5197, 'grad_norm': 2.1463465690612793, 'learning_rate': 5.236293835013839e-08, 'grpo_mean_advantage': 2.5406478698641877e-07, 'grpo_std_advantage': 9.93092498902115e-07, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5392154455184937, 'epoch': 1.88}
126
+ {'loss': -0.5864, 'grad_norm': 2.427900791168213, 'learning_rate': 3.687217682209837e-08, 'grpo_mean_advantage': -8.940697071579962e-09, 'grpo_std_advantage': 5.835169645251881e-07, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5686308741569519, 'epoch': 1.9}
127
+ {'loss': -0.5721, 'grad_norm': 2.042795419692993, 'learning_rate': 2.4074809405425227e-08, 'grpo_mean_advantage': 4.0605664253234863e-07, 'grpo_std_advantage': 2.3210795916384086e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5842767357826233, 'epoch': 1.92}
128
+ {'loss': -0.5944, 'grad_norm': 2.800136089324951, 'learning_rate': 1.3984829833499636e-08, 'grpo_mean_advantage': 1.341104507446289e-07, 'grpo_std_advantage': 1.507950400991831e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5495311617851257, 'epoch': 1.94}
129
+ {'loss': -0.7015, 'grad_norm': 2.8475866317749023, 'learning_rate': 6.6132713508446075e-09, 'grpo_mean_advantage': 2.689659481802664e-07, 'grpo_std_advantage': 8.491958851664094e-07, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.549436628818512, 'epoch': 1.96}
130
+ {'loss': -0.4033, 'grad_norm': 2.9422402381896973, 'learning_rate': 1.9681946484320645e-09, 'grpo_mean_advantage': 8.195638656616211e-08, 'grpo_std_advantage': 3.802849732892355e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.544632077217102, 'epoch': 1.98}
131
+ {'loss': -0.6773, 'grad_norm': 2.66204833984375, 'learning_rate': 5.467904943851077e-11, 'grpo_mean_advantage': 7.552536089860951e-07, 'grpo_std_advantage': 4.143997102801222e-06, 'grpo_mean_kl_div': 0.0, 'grpo_mean_group_score': 0.5968535542488098, 'epoch': 2.0}
132
+ {'train_runtime': 8430.3461, 'train_samples_per_second': 0.474, 'train_steps_per_second': 0.059, 'train_loss': -0.23323759501613678, 'epoch': 2.0}
133
+ Saved best adapter -> runs/grpo_14b_run1/best_adapter
134
+ Merge disabled. Run with --merge-only later if needed.
grpo_qwen_14B_v2/wandb/run-20251227_194423-jz7bptqa/files/requirements.txt ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ exceptiongroup==1.3.1
2
+ wheel==0.45.1
3
+ python-dateutil==2.9.0.post0
4
+ huggingface-hub==0.36.0
5
+ idna==3.11
6
+ click==8.3.1
7
+ numpy==2.2.6
8
+ httpx==0.28.1
9
+ tokenizers==0.22.1
10
+ sympy==1.13.1
11
+ yarl==1.22.0
12
+ async-timeout==5.0.1
13
+ datasets==4.4.2
14
+ einops==0.8.1
15
+ platformdirs==4.5.1
16
+ nvidia-cuda-cupti-cu12==12.1.105
17
+ nvidia-nvtx-cu12==12.1.105
18
+ smmap==5.0.2
19
+ accelerate==1.12.0
20
+ requests==2.32.5
21
+ aiohttp==3.13.2
22
+ transformers==4.57.3
23
+ bitsandbytes==0.49.0
24
+ evaluate==0.4.6
25
+ nvidia-cublas-cu12==12.1.3.1
26
+ mpmath==1.3.0
27
+ torchaudio==2.5.1+cu121
28
+ nvidia-cuda-runtime-cu12==12.1.105
29
+ typing-inspection==0.4.2
30
+ GitPython==3.1.45
31
+ xxhash==3.6.0
32
+ nvidia-cusolver-cu12==11.4.5.107
33
+ pydantic_core==2.41.5
34
+ six==1.17.0
35
+ torchvision==0.20.1+cu121
36
+ typing_extensions==4.15.0
37
+ triton==3.1.0
38
+ charset-normalizer==3.4.4
39
+ wandb==0.23.1
40
+ regex==2025.11.3
41
+ pip==25.3
42
+ nvidia-cusparse-cu12==12.1.0.106
43
+ pytz==2025.2
44
+ Jinja2==3.1.6
45
+ psutil==7.2.0
46
+ pillow==12.0.0
47
+ packaging==25.0
48
+ safetensors==0.7.0
49
+ sentry-sdk==2.48.0
50
+ gitdb==4.0.12
51
+ httpcore==1.0.9
52
+ setuptools==80.9.0
53
+ nvidia-cufft-cu12==11.0.2.54
54
+ flash_attn==2.8.3
55
+ anyio==4.12.0
56
+ pydantic==2.12.5
57
+ fsspec==2025.10.0
58
+ filelock==3.20.0
59
+ PyYAML==6.0.3
60
+ hf-xet==1.2.0
61
+ nvidia-cudnn-cu12==9.1.0.70
62
+ tqdm==4.67.1
63
+ MarkupSafe==2.1.5
64
+ attrs==25.4.0
65
+ nvidia-cuda-nvrtc-cu12==12.1.105
66
+ peft==0.18.0
67
+ aiohappyeyeballs==2.6.1
68
+ networkx==3.4.2
69
+ nvidia-nvjitlink-cu12==12.9.86
70
+ certifi==2025.11.12
71
+ pyarrow==22.0.0
72
+ dill==0.4.0
73
+ protobuf==6.33.2
74
+ aiosignal==1.4.0
75
+ frozenlist==1.8.0
76
+ urllib3==2.6.2
77
+ propcache==0.4.1
78
+ tzdata==2025.3
79
+ pandas==2.3.3
80
+ annotated-types==0.7.0
81
+ nvidia-nccl-cu12==2.21.5
82
+ multidict==6.7.0
83
+ nvidia-curand-cu12==10.3.2.106
84
+ torch==2.5.1+cu121
85
+ h11==0.16.0
86
+ multiprocess==0.70.18
87
+ wheel==0.45.1
88
+ tomli==2.0.1
89
+ autocommand==2.2.2
90
+ jaraco.context==5.3.0
91
+ zipp==3.19.2
92
+ packaging==24.2
93
+ inflect==7.3.1
94
+ typing_extensions==4.12.2
95
+ platformdirs==4.2.2
96
+ jaraco.functools==4.0.1
97
+ jaraco.collections==5.1.0
98
+ jaraco.text==3.12.1
99
+ backports.tarfile==1.2.0
100
+ more-itertools==10.3.0
101
+ importlib_metadata==8.0.0
102
+ typeguard==4.3.0