jannalu commited on
Commit
ddbed1e
·
verified ·
1 Parent(s): c64e022

Add files using upload-large-folder tool

Browse files
Files changed (50) hide show
  1. README.md +284 -0
  2. data/0k/dataset_dict.json +1 -0
  3. data/0k/test/state.json +13 -0
  4. data/0k/train/dataset_info.json +58 -0
  5. data/0k/train/state.json +13 -0
  6. data/0k/validation/dataset_info.json +58 -0
  7. data/128k/dataset_dict.json +1 -0
  8. data/128k/prompt/dataset_info.json +58 -0
  9. data/128k/prompt/state.json +13 -0
  10. data/128k/test/dataset_info.json +58 -0
  11. data/128k/test/state.json +13 -0
  12. data/128k/train/dataset_info.json +58 -0
  13. data/128k/train/state.json +13 -0
  14. data/128k/validation/dataset_info.json +58 -0
  15. data/128k/validation/state.json +13 -0
  16. data/1m/dataset_dict.json +1 -0
  17. data/1m/prompt/dataset_info.json +58 -0
  18. data/1m/prompt/state.json +13 -0
  19. data/1m/test/dataset_info.json +58 -0
  20. data/1m/test/state.json +22 -0
  21. data/1m/train/dataset_info.json +58 -0
  22. data/1m/train/state.json +19 -0
  23. data/1m/validation/dataset_info.json +58 -0
  24. data/1m/validation/state.json +13 -0
  25. data/256k/dataset_dict.json +1 -0
  26. data/256k/prompt/dataset_info.json +58 -0
  27. data/256k/prompt/state.json +13 -0
  28. data/256k/test/dataset_info.json +58 -0
  29. data/256k/test/state.json +16 -0
  30. data/256k/train/dataset_info.json +58 -0
  31. data/256k/train/state.json +13 -0
  32. data/256k/validation/dataset_info.json +58 -0
  33. data/256k/validation/state.json +13 -0
  34. data/512k/dataset_dict.json +1 -0
  35. data/512k/prompt/dataset_info.json +58 -0
  36. data/512k/prompt/state.json +13 -0
  37. data/512k/test/dataset_info.json +58 -0
  38. data/512k/test/state.json +19 -0
  39. data/512k/train/dataset_info.json +58 -0
  40. data/512k/train/state.json +16 -0
  41. data/512k/validation/dataset_info.json +58 -0
  42. data/512k/validation/state.json +13 -0
  43. data/64k/dataset_dict.json +1 -0
  44. data/64k/prompt/dataset_info.json +58 -0
  45. data/64k/prompt/state.json +13 -0
  46. data/64k/test/state.json +13 -0
  47. data/64k/train/dataset_info.json +58 -0
  48. data/64k/train/state.json +13 -0
  49. data/64k/validation/state.json +13 -0
  50. utils.py +126 -0
README.md ADDED
@@ -0,0 +1,284 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ task_categories:
4
+ - text-generation
5
+ language:
6
+ - en
7
+ tags:
8
+ - code
9
+ - python
10
+ - long-context
11
+ - coding
12
+ size_categories:
13
+ - 1K<n<10K
14
+ configs:
15
+ - config_name: 0k
16
+ data_files:
17
+ - split: test
18
+ path: 0k/test-*
19
+ - split: train
20
+ path: 0k/train-*
21
+ - split: validation
22
+ path: 0k/validation-*
23
+ - split: prompt
24
+ path: 0k/prompt-*
25
+ - config_name: 1k
26
+ data_files:
27
+ - split: test
28
+ path: 1k/test-*
29
+ - split: train
30
+ path: 1k/train-*
31
+ - split: validation
32
+ path: 1k/validation-*
33
+ - split: prompt
34
+ path: 1k/prompt-*
35
+ - config_name: 2k
36
+ data_files:
37
+ - split: test
38
+ path: 2k/test-*
39
+ - split: train
40
+ path: 2k/train-*
41
+ - split: validation
42
+ path: 2k/validation-*
43
+ - split: prompt
44
+ path: 2k/prompt-*
45
+ - config_name: 4k
46
+ data_files:
47
+ - split: test
48
+ path: 4k/test-*
49
+ - split: train
50
+ path: 4k/train-*
51
+ - split: validation
52
+ path: 4k/validation-*
53
+ - split: prompt
54
+ path: 4k/prompt-*
55
+ - config_name: 16k
56
+ data_files:
57
+ - split: test
58
+ path: 16k/test-*
59
+ - split: train
60
+ path: 16k/train-*
61
+ - split: validation
62
+ path: 16k/validation-*
63
+ - split: prompt
64
+ path: 16k/prompt-*
65
+ - config_name: 32k
66
+ data_files:
67
+ - split: test
68
+ path: 32k/test-*
69
+ - split: train
70
+ path: 32k/train-*
71
+ - split: validation
72
+ path: 32k/validation-*
73
+ - split: prompt
74
+ path: 32k/prompt-*
75
+ - config_name: 64k
76
+ data_files:
77
+ - split: test
78
+ path: 64k/test-*
79
+ - split: train
80
+ path: 64k/train-*
81
+ - split: validation
82
+ path: 64k/validation-*
83
+ - split: prompt
84
+ path: 64k/prompt-*
85
+ - config_name: 128k
86
+ data_files:
87
+ - split: test
88
+ path: 128k/test-*
89
+ - split: train
90
+ path: 128k/train-*
91
+ - split: validation
92
+ path: 128k/validation-*
93
+ - split: prompt
94
+ path: 128k/prompt-*
95
+ - config_name: 256k
96
+ data_files:
97
+ - split: test
98
+ path: 256k/test-*
99
+ - split: train
100
+ path: 256k/train-*
101
+ - split: validation
102
+ path: 256k/validation-*
103
+ - split: prompt
104
+ path: 256k/prompt-*
105
+ - config_name: 512k
106
+ data_files:
107
+ - split: test
108
+ path: 512k/test-*
109
+ - split: train
110
+ path: 512k/train-*
111
+ - split: validation
112
+ path: 512k/validation-*
113
+ - split: prompt
114
+ path: 512k/prompt-*
115
+ - config_name: 1m
116
+ data_files:
117
+ - split: test
118
+ path: 1m/test-*
119
+ - split: train
120
+ path: 1m/train-*
121
+ - split: validation
122
+ path: 1m/validation-*
123
+ - split: prompt
124
+ path: 1m/prompt-*
125
+ dataset_info:
126
+ features:
127
+ - name: task_id
128
+ dtype: int64
129
+ - name: text
130
+ dtype: string
131
+ - name: code
132
+ dtype: string
133
+ - name: test_list
134
+ sequence: string
135
+ - name: test_setup_code
136
+ dtype: string
137
+ - name: challenge_test_list
138
+ sequence: string
139
+ - name: context_text
140
+ dtype: string
141
+ - name: context_id
142
+ dtype: string
143
+ - name: context_length_tokens
144
+ dtype: int64
145
+ - name: context_type
146
+ dtype: string
147
+ - name: code_length_chars
148
+ dtype: int64
149
+ - name: dataset_version
150
+ dtype: string
151
+ splits:
152
+ - name: test
153
+ num_examples: 500
154
+ - name: train
155
+ num_examples: 374
156
+ - name: validation
157
+ num_examples: 90
158
+ - name: prompt
159
+ num_examples: 10
160
+ ---
161
+
162
+ # MBPP Long-Context Dataset
163
+
164
+ ## Overview
165
+
166
+ MBPP Long-Context is a benchmark dataset that combines coding problems from the [MBPP (Mostly Basic Python Problems)](https://github.com/google-research/google-research/tree/master/mbpp) dataset with long-context distractors from [BABILong](https://github.com/booydar/babilong). This dataset evaluates code generation performance under long-context conditions, testing whether models can maintain coding ability while processing lengthy contextual information.
167
+
168
+ ## Dataset Description
169
+
170
+ This dataset contains **974 MBPP problems** (across test/train/validation/prompt splits), each prepended with narrative context from BABILong at various lengths. The dataset preserves the original MBPP split structure to maintain compatibility with existing evaluation protocols.
171
+
172
+ ### Key Features
173
+
174
+ - **Multiple Context Lengths**: 11 configurations from 0k to 1M tokens
175
+ - **Stratified Assignment**: Contexts are evenly distributed across problem difficulty levels
176
+ - **Split Preservation**: Maintains original MBPP splits (test: 500, train: 374, validation: 90, prompt: 10)
177
+ - **Reproducible**: Fixed seed ensures consistent context assignments
178
+ - **Metadata Rich**: Includes token counts, context IDs, and problem difficulty proxies
179
+
180
+ ## Dataset Configurations
181
+
182
+ Each configuration represents a different context length:
183
+
184
+ | Config | Approximate Tokens | BABILong Source |
185
+ |--------|-------------------|-----------------|
186
+ | `0k` | ~85 tokens | 0k baseline |
187
+ | `1k` | ~1,024 tokens | 1k |
188
+ | `2k` | ~2,048 tokens | 2k |
189
+ | `4k` | ~4,096 tokens | 4k |
190
+ | `16k` | ~16,384 tokens | 16k |
191
+ | `32k` | ~32,768 tokens | 32k |
192
+ | `64k` | ~65,536 tokens | 64k |
193
+ | `128k` | ~131,072 tokens | 128k |
194
+ | `256k` | ~262,144 tokens | 256k |
195
+ | `512k` | ~524,288 tokens | 512k |
196
+ | `1m` | ~1,048,576 tokens | 1M |
197
+
198
+ Token counts are approximate and measured using the Meta-Llama-3-8B tokenizer.
199
+
200
+ ## Dataset Structure
201
+
202
+ ### Data Fields
203
+
204
+ Each sample contains:
205
+
206
+ #### Original MBPP Fields
207
+ - `task_id` (int): Unique task identifier
208
+ - `text` (str): Problem description
209
+ - `code` (str): Reference solution
210
+ - `test_list` (List[str]): Test cases (assertions)
211
+ - `test_setup_code` (str): Optional setup code
212
+ - `challenge_test_list` (List[str]): Additional test cases
213
+
214
+ #### Long-Context Fields
215
+ - `context_text` (str): Prepended distractor text from BABILong
216
+ - `context_id` (str): BABILong source identifier (e.g., "babilong_128k_qa1_sample_42")
217
+ - `context_length_tokens` (int): Token count using Llama tokenizer
218
+ - `context_type` (str): Type of context (currently "narrative")
219
+
220
+ #### Metadata
221
+ - `code_length_chars` (int): Reference solution length for difficulty tracking
222
+ - `dataset_version` (str): Version tag ("1.0.0")
223
+
224
+ ### Data Splits
225
+
226
+ All configurations maintain the original MBPP split structure:
227
+
228
+ - **test**: 500 samples (primary evaluation set)
229
+ - **train**: 374 samples
230
+ - **validation**: 90 samples
231
+ - **prompt**: 10 samples (few-shot examples)
232
+
233
+ ## Dataset Creation
234
+
235
+ ### Context Assignment Methodology
236
+
237
+ To avoid confounding variables, this dataset uses **stratified random assignment**:
238
+
239
+ 1. **Sort MBPP tasks** by code length (difficulty proxy)
240
+ 2. **Extract contexts** from BABILong qa1-qa10 splits (~1000 contexts per length)
241
+ 3. **Extend contexts** to match task count (974 samples)
242
+ 4. **Shuffle contexts** with fixed seed (unique per split)
243
+ 5. **Assign contexts** to sorted tasks
244
+
245
+ This ensures each context appears evenly across difficulty levels, preventing context-specific bias in evaluation.
246
+
247
+ ## Source Datasets
248
+
249
+ ### MBPP (Mostly Basic Python Problems)
250
+ - **Source**: google-research-datasets/mbpp
251
+ - **License**: Apache 2.0
252
+ - **Size**: 974 problems
253
+ - **Paper**: [Program Synthesis with Large Language Models](https://arxiv.org/abs/2108.07732)
254
+
255
+ ### BABILong
256
+ - **Source**: RMT-team/babilong
257
+ - **License**: MIT
258
+ - **Context Extraction**: `input` field from qa1-qa10 splits
259
+ - **Paper**: [BABILong: Testing the Limits of LLMs with Long Context Reasoning-in-a-Haystack](https://arxiv.org/abs/2406.10149)
260
+
261
+
262
+ ## Citation
263
+
264
+ If you use this dataset, please cite both MBPP and BABILong:
265
+
266
+ ```bibtex
267
+ @article{austin2021program,
268
+ title={Program Synthesis with Large Language Models},
269
+ author={Austin, Jacob and Odena, Augustus and Nye, Maxwell and Bosma, Maarten and Michalewski, Henryk and Dohan, David and Jiang, Ellen and Cai, Carrie and Terry, Michael and Le, Quoc and others},
270
+ journal={arXiv preprint arXiv:2108.07732},
271
+ year={2021}
272
+ }
273
+
274
+ @article{kuratov2024babilong,
275
+ title={BABILong: Testing the Limits of LLMs with Long Context Reasoning-in-a-Haystack},
276
+ author={Kuratov, Yuri and Bulatov, Aydar and Anokhin, Petr and Rodkin, Ivan and Sorokin, Dmitriy and Burtsev, Mikhail},
277
+ journal={arXiv preprint arXiv:2406.10149},
278
+ year={2024}
279
+ }
280
+ ```
281
+
282
+ ## Dataset Card Authors
283
+
284
+ - Janna Lu ([@jannalu](https://github.com/jannalu))
data/0k/dataset_dict.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"splits": ["test", "train", "validation", "prompt"]}
data/0k/test/state.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "903073458c85d6f8",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_output_all_columns": false,
12
+ "_split": null
13
+ }
data/0k/train/dataset_info.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "citation": "",
3
+ "description": "",
4
+ "features": {
5
+ "task_id": {
6
+ "dtype": "int64",
7
+ "_type": "Value"
8
+ },
9
+ "text": {
10
+ "dtype": "string",
11
+ "_type": "Value"
12
+ },
13
+ "code": {
14
+ "dtype": "string",
15
+ "_type": "Value"
16
+ },
17
+ "test_list": {
18
+ "feature": {
19
+ "dtype": "string",
20
+ "_type": "Value"
21
+ },
22
+ "_type": "Sequence"
23
+ },
24
+ "test_setup_code": {
25
+ "dtype": "string",
26
+ "_type": "Value"
27
+ },
28
+ "challenge_test_list": {
29
+ "feature": {
30
+ "dtype": "null",
31
+ "_type": "Value"
32
+ },
33
+ "_type": "Sequence"
34
+ },
35
+ "context_text": {
36
+ "dtype": "string",
37
+ "_type": "Value"
38
+ },
39
+ "context_id": {
40
+ "dtype": "string",
41
+ "_type": "Value"
42
+ },
43
+ "context_length_tokens": {
44
+ "dtype": "int64",
45
+ "_type": "Value"
46
+ },
47
+ "context_type": {
48
+ "dtype": "string",
49
+ "_type": "Value"
50
+ },
51
+ "code_length_chars": {
52
+ "dtype": "int64",
53
+ "_type": "Value"
54
+ }
55
+ },
56
+ "homepage": "",
57
+ "license": ""
58
+ }
data/0k/train/state.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "c10c3aa5880c7ded",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_output_all_columns": false,
12
+ "_split": null
13
+ }
data/0k/validation/dataset_info.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "citation": "",
3
+ "description": "",
4
+ "features": {
5
+ "task_id": {
6
+ "dtype": "int64",
7
+ "_type": "Value"
8
+ },
9
+ "text": {
10
+ "dtype": "string",
11
+ "_type": "Value"
12
+ },
13
+ "code": {
14
+ "dtype": "string",
15
+ "_type": "Value"
16
+ },
17
+ "test_list": {
18
+ "feature": {
19
+ "dtype": "string",
20
+ "_type": "Value"
21
+ },
22
+ "_type": "Sequence"
23
+ },
24
+ "test_setup_code": {
25
+ "dtype": "string",
26
+ "_type": "Value"
27
+ },
28
+ "challenge_test_list": {
29
+ "feature": {
30
+ "dtype": "null",
31
+ "_type": "Value"
32
+ },
33
+ "_type": "Sequence"
34
+ },
35
+ "context_text": {
36
+ "dtype": "string",
37
+ "_type": "Value"
38
+ },
39
+ "context_id": {
40
+ "dtype": "string",
41
+ "_type": "Value"
42
+ },
43
+ "context_length_tokens": {
44
+ "dtype": "int64",
45
+ "_type": "Value"
46
+ },
47
+ "context_type": {
48
+ "dtype": "string",
49
+ "_type": "Value"
50
+ },
51
+ "code_length_chars": {
52
+ "dtype": "int64",
53
+ "_type": "Value"
54
+ }
55
+ },
56
+ "homepage": "",
57
+ "license": ""
58
+ }
data/128k/dataset_dict.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"splits": ["test", "train", "validation", "prompt"]}
data/128k/prompt/dataset_info.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "citation": "",
3
+ "description": "",
4
+ "features": {
5
+ "task_id": {
6
+ "dtype": "int64",
7
+ "_type": "Value"
8
+ },
9
+ "text": {
10
+ "dtype": "string",
11
+ "_type": "Value"
12
+ },
13
+ "code": {
14
+ "dtype": "string",
15
+ "_type": "Value"
16
+ },
17
+ "test_list": {
18
+ "feature": {
19
+ "dtype": "string",
20
+ "_type": "Value"
21
+ },
22
+ "_type": "Sequence"
23
+ },
24
+ "test_setup_code": {
25
+ "dtype": "string",
26
+ "_type": "Value"
27
+ },
28
+ "challenge_test_list": {
29
+ "feature": {
30
+ "dtype": "null",
31
+ "_type": "Value"
32
+ },
33
+ "_type": "Sequence"
34
+ },
35
+ "context_text": {
36
+ "dtype": "string",
37
+ "_type": "Value"
38
+ },
39
+ "context_id": {
40
+ "dtype": "string",
41
+ "_type": "Value"
42
+ },
43
+ "context_length_tokens": {
44
+ "dtype": "int64",
45
+ "_type": "Value"
46
+ },
47
+ "context_type": {
48
+ "dtype": "string",
49
+ "_type": "Value"
50
+ },
51
+ "code_length_chars": {
52
+ "dtype": "int64",
53
+ "_type": "Value"
54
+ }
55
+ },
56
+ "homepage": "",
57
+ "license": ""
58
+ }
data/128k/prompt/state.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "c5da1fa20006a79c",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_output_all_columns": false,
12
+ "_split": null
13
+ }
data/128k/test/dataset_info.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "citation": "",
3
+ "description": "",
4
+ "features": {
5
+ "task_id": {
6
+ "dtype": "int64",
7
+ "_type": "Value"
8
+ },
9
+ "text": {
10
+ "dtype": "string",
11
+ "_type": "Value"
12
+ },
13
+ "code": {
14
+ "dtype": "string",
15
+ "_type": "Value"
16
+ },
17
+ "test_list": {
18
+ "feature": {
19
+ "dtype": "string",
20
+ "_type": "Value"
21
+ },
22
+ "_type": "Sequence"
23
+ },
24
+ "test_setup_code": {
25
+ "dtype": "string",
26
+ "_type": "Value"
27
+ },
28
+ "challenge_test_list": {
29
+ "feature": {
30
+ "dtype": "string",
31
+ "_type": "Value"
32
+ },
33
+ "_type": "Sequence"
34
+ },
35
+ "context_text": {
36
+ "dtype": "string",
37
+ "_type": "Value"
38
+ },
39
+ "context_id": {
40
+ "dtype": "string",
41
+ "_type": "Value"
42
+ },
43
+ "context_length_tokens": {
44
+ "dtype": "int64",
45
+ "_type": "Value"
46
+ },
47
+ "context_type": {
48
+ "dtype": "string",
49
+ "_type": "Value"
50
+ },
51
+ "code_length_chars": {
52
+ "dtype": "int64",
53
+ "_type": "Value"
54
+ }
55
+ },
56
+ "homepage": "",
57
+ "license": ""
58
+ }
data/128k/test/state.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "d37dbcafe30bcee1",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_output_all_columns": false,
12
+ "_split": null
13
+ }
data/128k/train/dataset_info.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "citation": "",
3
+ "description": "",
4
+ "features": {
5
+ "task_id": {
6
+ "dtype": "int64",
7
+ "_type": "Value"
8
+ },
9
+ "text": {
10
+ "dtype": "string",
11
+ "_type": "Value"
12
+ },
13
+ "code": {
14
+ "dtype": "string",
15
+ "_type": "Value"
16
+ },
17
+ "test_list": {
18
+ "feature": {
19
+ "dtype": "string",
20
+ "_type": "Value"
21
+ },
22
+ "_type": "Sequence"
23
+ },
24
+ "test_setup_code": {
25
+ "dtype": "string",
26
+ "_type": "Value"
27
+ },
28
+ "challenge_test_list": {
29
+ "feature": {
30
+ "dtype": "null",
31
+ "_type": "Value"
32
+ },
33
+ "_type": "Sequence"
34
+ },
35
+ "context_text": {
36
+ "dtype": "string",
37
+ "_type": "Value"
38
+ },
39
+ "context_id": {
40
+ "dtype": "string",
41
+ "_type": "Value"
42
+ },
43
+ "context_length_tokens": {
44
+ "dtype": "int64",
45
+ "_type": "Value"
46
+ },
47
+ "context_type": {
48
+ "dtype": "string",
49
+ "_type": "Value"
50
+ },
51
+ "code_length_chars": {
52
+ "dtype": "int64",
53
+ "_type": "Value"
54
+ }
55
+ },
56
+ "homepage": "",
57
+ "license": ""
58
+ }
data/128k/train/state.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "c856cc9d17066e7a",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_output_all_columns": false,
12
+ "_split": null
13
+ }
data/128k/validation/dataset_info.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "citation": "",
3
+ "description": "",
4
+ "features": {
5
+ "task_id": {
6
+ "dtype": "int64",
7
+ "_type": "Value"
8
+ },
9
+ "text": {
10
+ "dtype": "string",
11
+ "_type": "Value"
12
+ },
13
+ "code": {
14
+ "dtype": "string",
15
+ "_type": "Value"
16
+ },
17
+ "test_list": {
18
+ "feature": {
19
+ "dtype": "string",
20
+ "_type": "Value"
21
+ },
22
+ "_type": "Sequence"
23
+ },
24
+ "test_setup_code": {
25
+ "dtype": "string",
26
+ "_type": "Value"
27
+ },
28
+ "challenge_test_list": {
29
+ "feature": {
30
+ "dtype": "null",
31
+ "_type": "Value"
32
+ },
33
+ "_type": "Sequence"
34
+ },
35
+ "context_text": {
36
+ "dtype": "string",
37
+ "_type": "Value"
38
+ },
39
+ "context_id": {
40
+ "dtype": "string",
41
+ "_type": "Value"
42
+ },
43
+ "context_length_tokens": {
44
+ "dtype": "int64",
45
+ "_type": "Value"
46
+ },
47
+ "context_type": {
48
+ "dtype": "string",
49
+ "_type": "Value"
50
+ },
51
+ "code_length_chars": {
52
+ "dtype": "int64",
53
+ "_type": "Value"
54
+ }
55
+ },
56
+ "homepage": "",
57
+ "license": ""
58
+ }
data/128k/validation/state.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "36c2963110f3acad",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_output_all_columns": false,
12
+ "_split": null
13
+ }
data/1m/dataset_dict.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"splits": ["test", "train", "validation", "prompt"]}
data/1m/prompt/dataset_info.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "citation": "",
3
+ "description": "",
4
+ "features": {
5
+ "task_id": {
6
+ "dtype": "int64",
7
+ "_type": "Value"
8
+ },
9
+ "text": {
10
+ "dtype": "string",
11
+ "_type": "Value"
12
+ },
13
+ "code": {
14
+ "dtype": "string",
15
+ "_type": "Value"
16
+ },
17
+ "test_list": {
18
+ "feature": {
19
+ "dtype": "string",
20
+ "_type": "Value"
21
+ },
22
+ "_type": "Sequence"
23
+ },
24
+ "test_setup_code": {
25
+ "dtype": "string",
26
+ "_type": "Value"
27
+ },
28
+ "challenge_test_list": {
29
+ "feature": {
30
+ "dtype": "null",
31
+ "_type": "Value"
32
+ },
33
+ "_type": "Sequence"
34
+ },
35
+ "context_text": {
36
+ "dtype": "string",
37
+ "_type": "Value"
38
+ },
39
+ "context_id": {
40
+ "dtype": "string",
41
+ "_type": "Value"
42
+ },
43
+ "context_length_tokens": {
44
+ "dtype": "int64",
45
+ "_type": "Value"
46
+ },
47
+ "context_type": {
48
+ "dtype": "string",
49
+ "_type": "Value"
50
+ },
51
+ "code_length_chars": {
52
+ "dtype": "int64",
53
+ "_type": "Value"
54
+ }
55
+ },
56
+ "homepage": "",
57
+ "license": ""
58
+ }
data/1m/prompt/state.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "fcda0c392e9c4a80",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_output_all_columns": false,
12
+ "_split": null
13
+ }
data/1m/test/dataset_info.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "citation": "",
3
+ "description": "",
4
+ "features": {
5
+ "task_id": {
6
+ "dtype": "int64",
7
+ "_type": "Value"
8
+ },
9
+ "text": {
10
+ "dtype": "string",
11
+ "_type": "Value"
12
+ },
13
+ "code": {
14
+ "dtype": "string",
15
+ "_type": "Value"
16
+ },
17
+ "test_list": {
18
+ "feature": {
19
+ "dtype": "string",
20
+ "_type": "Value"
21
+ },
22
+ "_type": "Sequence"
23
+ },
24
+ "test_setup_code": {
25
+ "dtype": "string",
26
+ "_type": "Value"
27
+ },
28
+ "challenge_test_list": {
29
+ "feature": {
30
+ "dtype": "string",
31
+ "_type": "Value"
32
+ },
33
+ "_type": "Sequence"
34
+ },
35
+ "context_text": {
36
+ "dtype": "string",
37
+ "_type": "Value"
38
+ },
39
+ "context_id": {
40
+ "dtype": "string",
41
+ "_type": "Value"
42
+ },
43
+ "context_length_tokens": {
44
+ "dtype": "int64",
45
+ "_type": "Value"
46
+ },
47
+ "context_type": {
48
+ "dtype": "string",
49
+ "_type": "Value"
50
+ },
51
+ "code_length_chars": {
52
+ "dtype": "int64",
53
+ "_type": "Value"
54
+ }
55
+ },
56
+ "homepage": "",
57
+ "license": ""
58
+ }
data/1m/test/state.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00004.arrow"
5
+ },
6
+ {
7
+ "filename": "data-00001-of-00004.arrow"
8
+ },
9
+ {
10
+ "filename": "data-00002-of-00004.arrow"
11
+ },
12
+ {
13
+ "filename": "data-00003-of-00004.arrow"
14
+ }
15
+ ],
16
+ "_fingerprint": "09326c5fc6853e06",
17
+ "_format_columns": null,
18
+ "_format_kwargs": {},
19
+ "_format_type": null,
20
+ "_output_all_columns": false,
21
+ "_split": null
22
+ }
data/1m/train/dataset_info.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "citation": "",
3
+ "description": "",
4
+ "features": {
5
+ "task_id": {
6
+ "dtype": "int64",
7
+ "_type": "Value"
8
+ },
9
+ "text": {
10
+ "dtype": "string",
11
+ "_type": "Value"
12
+ },
13
+ "code": {
14
+ "dtype": "string",
15
+ "_type": "Value"
16
+ },
17
+ "test_list": {
18
+ "feature": {
19
+ "dtype": "string",
20
+ "_type": "Value"
21
+ },
22
+ "_type": "Sequence"
23
+ },
24
+ "test_setup_code": {
25
+ "dtype": "string",
26
+ "_type": "Value"
27
+ },
28
+ "challenge_test_list": {
29
+ "feature": {
30
+ "dtype": "null",
31
+ "_type": "Value"
32
+ },
33
+ "_type": "Sequence"
34
+ },
35
+ "context_text": {
36
+ "dtype": "string",
37
+ "_type": "Value"
38
+ },
39
+ "context_id": {
40
+ "dtype": "string",
41
+ "_type": "Value"
42
+ },
43
+ "context_length_tokens": {
44
+ "dtype": "int64",
45
+ "_type": "Value"
46
+ },
47
+ "context_type": {
48
+ "dtype": "string",
49
+ "_type": "Value"
50
+ },
51
+ "code_length_chars": {
52
+ "dtype": "int64",
53
+ "_type": "Value"
54
+ }
55
+ },
56
+ "homepage": "",
57
+ "license": ""
58
+ }
data/1m/train/state.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00003.arrow"
5
+ },
6
+ {
7
+ "filename": "data-00001-of-00003.arrow"
8
+ },
9
+ {
10
+ "filename": "data-00002-of-00003.arrow"
11
+ }
12
+ ],
13
+ "_fingerprint": "40ada4799f4622ad",
14
+ "_format_columns": null,
15
+ "_format_kwargs": {},
16
+ "_format_type": null,
17
+ "_output_all_columns": false,
18
+ "_split": null
19
+ }
data/1m/validation/dataset_info.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "citation": "",
3
+ "description": "",
4
+ "features": {
5
+ "task_id": {
6
+ "dtype": "int64",
7
+ "_type": "Value"
8
+ },
9
+ "text": {
10
+ "dtype": "string",
11
+ "_type": "Value"
12
+ },
13
+ "code": {
14
+ "dtype": "string",
15
+ "_type": "Value"
16
+ },
17
+ "test_list": {
18
+ "feature": {
19
+ "dtype": "string",
20
+ "_type": "Value"
21
+ },
22
+ "_type": "Sequence"
23
+ },
24
+ "test_setup_code": {
25
+ "dtype": "string",
26
+ "_type": "Value"
27
+ },
28
+ "challenge_test_list": {
29
+ "feature": {
30
+ "dtype": "null",
31
+ "_type": "Value"
32
+ },
33
+ "_type": "Sequence"
34
+ },
35
+ "context_text": {
36
+ "dtype": "string",
37
+ "_type": "Value"
38
+ },
39
+ "context_id": {
40
+ "dtype": "string",
41
+ "_type": "Value"
42
+ },
43
+ "context_length_tokens": {
44
+ "dtype": "int64",
45
+ "_type": "Value"
46
+ },
47
+ "context_type": {
48
+ "dtype": "string",
49
+ "_type": "Value"
50
+ },
51
+ "code_length_chars": {
52
+ "dtype": "int64",
53
+ "_type": "Value"
54
+ }
55
+ },
56
+ "homepage": "",
57
+ "license": ""
58
+ }
data/1m/validation/state.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "b170b08e29820606",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_output_all_columns": false,
12
+ "_split": null
13
+ }
data/256k/dataset_dict.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"splits": ["test", "train", "validation", "prompt"]}
data/256k/prompt/dataset_info.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "citation": "",
3
+ "description": "",
4
+ "features": {
5
+ "task_id": {
6
+ "dtype": "int64",
7
+ "_type": "Value"
8
+ },
9
+ "text": {
10
+ "dtype": "string",
11
+ "_type": "Value"
12
+ },
13
+ "code": {
14
+ "dtype": "string",
15
+ "_type": "Value"
16
+ },
17
+ "test_list": {
18
+ "feature": {
19
+ "dtype": "string",
20
+ "_type": "Value"
21
+ },
22
+ "_type": "Sequence"
23
+ },
24
+ "test_setup_code": {
25
+ "dtype": "string",
26
+ "_type": "Value"
27
+ },
28
+ "challenge_test_list": {
29
+ "feature": {
30
+ "dtype": "null",
31
+ "_type": "Value"
32
+ },
33
+ "_type": "Sequence"
34
+ },
35
+ "context_text": {
36
+ "dtype": "string",
37
+ "_type": "Value"
38
+ },
39
+ "context_id": {
40
+ "dtype": "string",
41
+ "_type": "Value"
42
+ },
43
+ "context_length_tokens": {
44
+ "dtype": "int64",
45
+ "_type": "Value"
46
+ },
47
+ "context_type": {
48
+ "dtype": "string",
49
+ "_type": "Value"
50
+ },
51
+ "code_length_chars": {
52
+ "dtype": "int64",
53
+ "_type": "Value"
54
+ }
55
+ },
56
+ "homepage": "",
57
+ "license": ""
58
+ }
data/256k/prompt/state.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "9839b52e96fd161d",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_output_all_columns": false,
12
+ "_split": null
13
+ }
data/256k/test/dataset_info.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "citation": "",
3
+ "description": "",
4
+ "features": {
5
+ "task_id": {
6
+ "dtype": "int64",
7
+ "_type": "Value"
8
+ },
9
+ "text": {
10
+ "dtype": "string",
11
+ "_type": "Value"
12
+ },
13
+ "code": {
14
+ "dtype": "string",
15
+ "_type": "Value"
16
+ },
17
+ "test_list": {
18
+ "feature": {
19
+ "dtype": "string",
20
+ "_type": "Value"
21
+ },
22
+ "_type": "Sequence"
23
+ },
24
+ "test_setup_code": {
25
+ "dtype": "string",
26
+ "_type": "Value"
27
+ },
28
+ "challenge_test_list": {
29
+ "feature": {
30
+ "dtype": "string",
31
+ "_type": "Value"
32
+ },
33
+ "_type": "Sequence"
34
+ },
35
+ "context_text": {
36
+ "dtype": "string",
37
+ "_type": "Value"
38
+ },
39
+ "context_id": {
40
+ "dtype": "string",
41
+ "_type": "Value"
42
+ },
43
+ "context_length_tokens": {
44
+ "dtype": "int64",
45
+ "_type": "Value"
46
+ },
47
+ "context_type": {
48
+ "dtype": "string",
49
+ "_type": "Value"
50
+ },
51
+ "code_length_chars": {
52
+ "dtype": "int64",
53
+ "_type": "Value"
54
+ }
55
+ },
56
+ "homepage": "",
57
+ "license": ""
58
+ }
data/256k/test/state.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00002.arrow"
5
+ },
6
+ {
7
+ "filename": "data-00001-of-00002.arrow"
8
+ }
9
+ ],
10
+ "_fingerprint": "b79ca2be906eeb77",
11
+ "_format_columns": null,
12
+ "_format_kwargs": {},
13
+ "_format_type": null,
14
+ "_output_all_columns": false,
15
+ "_split": null
16
+ }
data/256k/train/dataset_info.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "citation": "",
3
+ "description": "",
4
+ "features": {
5
+ "task_id": {
6
+ "dtype": "int64",
7
+ "_type": "Value"
8
+ },
9
+ "text": {
10
+ "dtype": "string",
11
+ "_type": "Value"
12
+ },
13
+ "code": {
14
+ "dtype": "string",
15
+ "_type": "Value"
16
+ },
17
+ "test_list": {
18
+ "feature": {
19
+ "dtype": "string",
20
+ "_type": "Value"
21
+ },
22
+ "_type": "Sequence"
23
+ },
24
+ "test_setup_code": {
25
+ "dtype": "string",
26
+ "_type": "Value"
27
+ },
28
+ "challenge_test_list": {
29
+ "feature": {
30
+ "dtype": "null",
31
+ "_type": "Value"
32
+ },
33
+ "_type": "Sequence"
34
+ },
35
+ "context_text": {
36
+ "dtype": "string",
37
+ "_type": "Value"
38
+ },
39
+ "context_id": {
40
+ "dtype": "string",
41
+ "_type": "Value"
42
+ },
43
+ "context_length_tokens": {
44
+ "dtype": "int64",
45
+ "_type": "Value"
46
+ },
47
+ "context_type": {
48
+ "dtype": "string",
49
+ "_type": "Value"
50
+ },
51
+ "code_length_chars": {
52
+ "dtype": "int64",
53
+ "_type": "Value"
54
+ }
55
+ },
56
+ "homepage": "",
57
+ "license": ""
58
+ }
data/256k/train/state.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "529a8cd305158224",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_output_all_columns": false,
12
+ "_split": null
13
+ }
data/256k/validation/dataset_info.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "citation": "",
3
+ "description": "",
4
+ "features": {
5
+ "task_id": {
6
+ "dtype": "int64",
7
+ "_type": "Value"
8
+ },
9
+ "text": {
10
+ "dtype": "string",
11
+ "_type": "Value"
12
+ },
13
+ "code": {
14
+ "dtype": "string",
15
+ "_type": "Value"
16
+ },
17
+ "test_list": {
18
+ "feature": {
19
+ "dtype": "string",
20
+ "_type": "Value"
21
+ },
22
+ "_type": "Sequence"
23
+ },
24
+ "test_setup_code": {
25
+ "dtype": "string",
26
+ "_type": "Value"
27
+ },
28
+ "challenge_test_list": {
29
+ "feature": {
30
+ "dtype": "null",
31
+ "_type": "Value"
32
+ },
33
+ "_type": "Sequence"
34
+ },
35
+ "context_text": {
36
+ "dtype": "string",
37
+ "_type": "Value"
38
+ },
39
+ "context_id": {
40
+ "dtype": "string",
41
+ "_type": "Value"
42
+ },
43
+ "context_length_tokens": {
44
+ "dtype": "int64",
45
+ "_type": "Value"
46
+ },
47
+ "context_type": {
48
+ "dtype": "string",
49
+ "_type": "Value"
50
+ },
51
+ "code_length_chars": {
52
+ "dtype": "int64",
53
+ "_type": "Value"
54
+ }
55
+ },
56
+ "homepage": "",
57
+ "license": ""
58
+ }
data/256k/validation/state.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "6158e4ad508a807f",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_output_all_columns": false,
12
+ "_split": null
13
+ }
data/512k/dataset_dict.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"splits": ["test", "train", "validation", "prompt"]}
data/512k/prompt/dataset_info.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "citation": "",
3
+ "description": "",
4
+ "features": {
5
+ "task_id": {
6
+ "dtype": "int64",
7
+ "_type": "Value"
8
+ },
9
+ "text": {
10
+ "dtype": "string",
11
+ "_type": "Value"
12
+ },
13
+ "code": {
14
+ "dtype": "string",
15
+ "_type": "Value"
16
+ },
17
+ "test_list": {
18
+ "feature": {
19
+ "dtype": "string",
20
+ "_type": "Value"
21
+ },
22
+ "_type": "Sequence"
23
+ },
24
+ "test_setup_code": {
25
+ "dtype": "string",
26
+ "_type": "Value"
27
+ },
28
+ "challenge_test_list": {
29
+ "feature": {
30
+ "dtype": "null",
31
+ "_type": "Value"
32
+ },
33
+ "_type": "Sequence"
34
+ },
35
+ "context_text": {
36
+ "dtype": "string",
37
+ "_type": "Value"
38
+ },
39
+ "context_id": {
40
+ "dtype": "string",
41
+ "_type": "Value"
42
+ },
43
+ "context_length_tokens": {
44
+ "dtype": "int64",
45
+ "_type": "Value"
46
+ },
47
+ "context_type": {
48
+ "dtype": "string",
49
+ "_type": "Value"
50
+ },
51
+ "code_length_chars": {
52
+ "dtype": "int64",
53
+ "_type": "Value"
54
+ }
55
+ },
56
+ "homepage": "",
57
+ "license": ""
58
+ }
data/512k/prompt/state.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "d0f3b5a1351383c2",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_output_all_columns": false,
12
+ "_split": null
13
+ }
data/512k/test/dataset_info.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "citation": "",
3
+ "description": "",
4
+ "features": {
5
+ "task_id": {
6
+ "dtype": "int64",
7
+ "_type": "Value"
8
+ },
9
+ "text": {
10
+ "dtype": "string",
11
+ "_type": "Value"
12
+ },
13
+ "code": {
14
+ "dtype": "string",
15
+ "_type": "Value"
16
+ },
17
+ "test_list": {
18
+ "feature": {
19
+ "dtype": "string",
20
+ "_type": "Value"
21
+ },
22
+ "_type": "Sequence"
23
+ },
24
+ "test_setup_code": {
25
+ "dtype": "string",
26
+ "_type": "Value"
27
+ },
28
+ "challenge_test_list": {
29
+ "feature": {
30
+ "dtype": "string",
31
+ "_type": "Value"
32
+ },
33
+ "_type": "Sequence"
34
+ },
35
+ "context_text": {
36
+ "dtype": "string",
37
+ "_type": "Value"
38
+ },
39
+ "context_id": {
40
+ "dtype": "string",
41
+ "_type": "Value"
42
+ },
43
+ "context_length_tokens": {
44
+ "dtype": "int64",
45
+ "_type": "Value"
46
+ },
47
+ "context_type": {
48
+ "dtype": "string",
49
+ "_type": "Value"
50
+ },
51
+ "code_length_chars": {
52
+ "dtype": "int64",
53
+ "_type": "Value"
54
+ }
55
+ },
56
+ "homepage": "",
57
+ "license": ""
58
+ }
data/512k/test/state.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00003.arrow"
5
+ },
6
+ {
7
+ "filename": "data-00001-of-00003.arrow"
8
+ },
9
+ {
10
+ "filename": "data-00002-of-00003.arrow"
11
+ }
12
+ ],
13
+ "_fingerprint": "d125d238ab12bcfa",
14
+ "_format_columns": null,
15
+ "_format_kwargs": {},
16
+ "_format_type": null,
17
+ "_output_all_columns": false,
18
+ "_split": null
19
+ }
data/512k/train/dataset_info.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "citation": "",
3
+ "description": "",
4
+ "features": {
5
+ "task_id": {
6
+ "dtype": "int64",
7
+ "_type": "Value"
8
+ },
9
+ "text": {
10
+ "dtype": "string",
11
+ "_type": "Value"
12
+ },
13
+ "code": {
14
+ "dtype": "string",
15
+ "_type": "Value"
16
+ },
17
+ "test_list": {
18
+ "feature": {
19
+ "dtype": "string",
20
+ "_type": "Value"
21
+ },
22
+ "_type": "Sequence"
23
+ },
24
+ "test_setup_code": {
25
+ "dtype": "string",
26
+ "_type": "Value"
27
+ },
28
+ "challenge_test_list": {
29
+ "feature": {
30
+ "dtype": "null",
31
+ "_type": "Value"
32
+ },
33
+ "_type": "Sequence"
34
+ },
35
+ "context_text": {
36
+ "dtype": "string",
37
+ "_type": "Value"
38
+ },
39
+ "context_id": {
40
+ "dtype": "string",
41
+ "_type": "Value"
42
+ },
43
+ "context_length_tokens": {
44
+ "dtype": "int64",
45
+ "_type": "Value"
46
+ },
47
+ "context_type": {
48
+ "dtype": "string",
49
+ "_type": "Value"
50
+ },
51
+ "code_length_chars": {
52
+ "dtype": "int64",
53
+ "_type": "Value"
54
+ }
55
+ },
56
+ "homepage": "",
57
+ "license": ""
58
+ }
data/512k/train/state.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00002.arrow"
5
+ },
6
+ {
7
+ "filename": "data-00001-of-00002.arrow"
8
+ }
9
+ ],
10
+ "_fingerprint": "9aa6b6d58cf18c35",
11
+ "_format_columns": null,
12
+ "_format_kwargs": {},
13
+ "_format_type": null,
14
+ "_output_all_columns": false,
15
+ "_split": null
16
+ }
data/512k/validation/dataset_info.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "citation": "",
3
+ "description": "",
4
+ "features": {
5
+ "task_id": {
6
+ "dtype": "int64",
7
+ "_type": "Value"
8
+ },
9
+ "text": {
10
+ "dtype": "string",
11
+ "_type": "Value"
12
+ },
13
+ "code": {
14
+ "dtype": "string",
15
+ "_type": "Value"
16
+ },
17
+ "test_list": {
18
+ "feature": {
19
+ "dtype": "string",
20
+ "_type": "Value"
21
+ },
22
+ "_type": "Sequence"
23
+ },
24
+ "test_setup_code": {
25
+ "dtype": "string",
26
+ "_type": "Value"
27
+ },
28
+ "challenge_test_list": {
29
+ "feature": {
30
+ "dtype": "null",
31
+ "_type": "Value"
32
+ },
33
+ "_type": "Sequence"
34
+ },
35
+ "context_text": {
36
+ "dtype": "string",
37
+ "_type": "Value"
38
+ },
39
+ "context_id": {
40
+ "dtype": "string",
41
+ "_type": "Value"
42
+ },
43
+ "context_length_tokens": {
44
+ "dtype": "int64",
45
+ "_type": "Value"
46
+ },
47
+ "context_type": {
48
+ "dtype": "string",
49
+ "_type": "Value"
50
+ },
51
+ "code_length_chars": {
52
+ "dtype": "int64",
53
+ "_type": "Value"
54
+ }
55
+ },
56
+ "homepage": "",
57
+ "license": ""
58
+ }
data/512k/validation/state.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "702e8bfdc3207c42",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_output_all_columns": false,
12
+ "_split": null
13
+ }
data/64k/dataset_dict.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"splits": ["test", "train", "validation", "prompt"]}
data/64k/prompt/dataset_info.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "citation": "",
3
+ "description": "",
4
+ "features": {
5
+ "task_id": {
6
+ "dtype": "int64",
7
+ "_type": "Value"
8
+ },
9
+ "text": {
10
+ "dtype": "string",
11
+ "_type": "Value"
12
+ },
13
+ "code": {
14
+ "dtype": "string",
15
+ "_type": "Value"
16
+ },
17
+ "test_list": {
18
+ "feature": {
19
+ "dtype": "string",
20
+ "_type": "Value"
21
+ },
22
+ "_type": "Sequence"
23
+ },
24
+ "test_setup_code": {
25
+ "dtype": "string",
26
+ "_type": "Value"
27
+ },
28
+ "challenge_test_list": {
29
+ "feature": {
30
+ "dtype": "null",
31
+ "_type": "Value"
32
+ },
33
+ "_type": "Sequence"
34
+ },
35
+ "context_text": {
36
+ "dtype": "string",
37
+ "_type": "Value"
38
+ },
39
+ "context_id": {
40
+ "dtype": "string",
41
+ "_type": "Value"
42
+ },
43
+ "context_length_tokens": {
44
+ "dtype": "int64",
45
+ "_type": "Value"
46
+ },
47
+ "context_type": {
48
+ "dtype": "string",
49
+ "_type": "Value"
50
+ },
51
+ "code_length_chars": {
52
+ "dtype": "int64",
53
+ "_type": "Value"
54
+ }
55
+ },
56
+ "homepage": "",
57
+ "license": ""
58
+ }
data/64k/prompt/state.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "59eaccaba9080c49",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_output_all_columns": false,
12
+ "_split": null
13
+ }
data/64k/test/state.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "4e2e6ab8457d20a6",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_output_all_columns": false,
12
+ "_split": null
13
+ }
data/64k/train/dataset_info.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "citation": "",
3
+ "description": "",
4
+ "features": {
5
+ "task_id": {
6
+ "dtype": "int64",
7
+ "_type": "Value"
8
+ },
9
+ "text": {
10
+ "dtype": "string",
11
+ "_type": "Value"
12
+ },
13
+ "code": {
14
+ "dtype": "string",
15
+ "_type": "Value"
16
+ },
17
+ "test_list": {
18
+ "feature": {
19
+ "dtype": "string",
20
+ "_type": "Value"
21
+ },
22
+ "_type": "Sequence"
23
+ },
24
+ "test_setup_code": {
25
+ "dtype": "string",
26
+ "_type": "Value"
27
+ },
28
+ "challenge_test_list": {
29
+ "feature": {
30
+ "dtype": "null",
31
+ "_type": "Value"
32
+ },
33
+ "_type": "Sequence"
34
+ },
35
+ "context_text": {
36
+ "dtype": "string",
37
+ "_type": "Value"
38
+ },
39
+ "context_id": {
40
+ "dtype": "string",
41
+ "_type": "Value"
42
+ },
43
+ "context_length_tokens": {
44
+ "dtype": "int64",
45
+ "_type": "Value"
46
+ },
47
+ "context_type": {
48
+ "dtype": "string",
49
+ "_type": "Value"
50
+ },
51
+ "code_length_chars": {
52
+ "dtype": "int64",
53
+ "_type": "Value"
54
+ }
55
+ },
56
+ "homepage": "",
57
+ "license": ""
58
+ }
data/64k/train/state.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "7884e63b3025ca63",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_output_all_columns": false,
12
+ "_split": null
13
+ }
data/64k/validation/state.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "a9609982eae4707f",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_output_all_columns": false,
12
+ "_split": null
13
+ }
utils.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import re
3
+ from typing import Union
4
+
5
+ import datasets
6
+ import evaluate as hf_evaluate
7
+
8
+
9
+ eval_logger = logging.getLogger(__name__)
10
+
11
+
12
+ try:
13
+ pass_at_k = hf_evaluate.load("code_eval")
14
+
15
+ # run simple test to check code execution is enabled before model generation
16
+ test_cases = ["assert add(2, 3)==5"]
17
+ candidates = [["def add(a,b): return a*b"]]
18
+ results = pass_at_k.compute(references=test_cases, predictions=candidates, k=[1])
19
+ except Exception as e:
20
+ raise e
21
+
22
+
23
+ def load_dataset(**kwargs):
24
+ """Load MBPP long-context dataset with specified context length."""
25
+ context_length = kwargs.get("context_length", "128k")
26
+
27
+ eval_logger.info(
28
+ f"Loading mbpp_longcontext dataset: context_length={context_length}"
29
+ )
30
+ dataset = datasets.load_dataset("jannalu/mbpp-longcontext", name=context_length)
31
+ return dataset
32
+
33
+
34
+ def pass_at_1(
35
+ references: Union[str, list[str]], predictions: Union[str, list[list[str]]]
36
+ ) -> float:
37
+ """Compute pass@1 metric for code generation."""
38
+ if isinstance(references, str):
39
+ references = [references]
40
+ if isinstance(predictions[0], str):
41
+ predictions = [[p] for p in predictions]
42
+ return pass_at_k.compute(
43
+ references=references,
44
+ predictions=predictions,
45
+ k=[1],
46
+ )[0]["pass@1"]
47
+
48
+
49
+ def extract_code_blocks(text: str) -> str:
50
+ """Extract code from markdown code blocks in generated text."""
51
+ # Pattern to match ```...``` blocks
52
+ pattern = r"```(?:\w+)?\n?(.*?)\n?```"
53
+ # (+ ```) as we add the opening "```python" to the gen_prefix
54
+ matches = re.findall(pattern, r"```" + text, re.DOTALL)
55
+ # if no matches, try to match ```...``` blocks (after removing the language)
56
+ if not matches:
57
+ text_without_lang = re.sub(r"```python", "```", text)
58
+ matches = re.findall(pattern, text_without_lang, re.DOTALL)
59
+ if not matches:
60
+ return ""
61
+ else:
62
+ return matches[0]
63
+
64
+
65
+ def build_predictions(resps: list[list[str]], docs: list[dict]) -> list[list[str]]:
66
+ """Build predictions by extracting code blocks from model responses."""
67
+ return [[extract_code_blocks(r) for r in resp] for resp in resps]
68
+
69
+
70
+ def doc_to_metadata(doc: dict) -> dict:
71
+ """
72
+ Extract metadata from a document for tracking and analysis.
73
+
74
+ This extracts the context_length_tokens field so results can be
75
+ grouped and analyzed by sequence length.
76
+ """
77
+ return {
78
+ "seq_length": doc.get("context_length_tokens", 0),
79
+ "context_id": doc.get("context_id", ""),
80
+ "context_type": doc.get("context_type", "narrative"),
81
+ }
82
+
83
+
84
+ def list_fewshot_samples():
85
+ """
86
+ Return few-shot examples for MBPP long-context.
87
+ Note: These examples do NOT include the long context since they're used for few-shot.
88
+ """
89
+ return [
90
+ {
91
+ "task_id": 2,
92
+ "text": "Write a function to find the similar elements from the given two tuple lists.",
93
+ "code": "def similar_elements(test_tup1, test_tup2):\r\n res = tuple(set(test_tup1) & set(test_tup2))\r\n return (res) ",
94
+ "test_list": [
95
+ "assert similar_elements((3, 4, 5, 6),(5, 7, 4, 10)) == (4, 5)",
96
+ "assert similar_elements((1, 2, 3, 4),(5, 4, 3, 7)) == (3, 4)",
97
+ "assert similar_elements((11, 12, 14, 13),(17, 15, 14, 13)) == (13, 14)",
98
+ ],
99
+ "is_fewshot": True,
100
+ "prompt_with_context": "Here is your task: Write a function to find the similar elements from the given two tuple lists. Your code should pass these tests:\n\nassert similar_elements((3, 4, 5, 6),(5, 7, 4, 10)) == (4, 5)\nassert similar_elements((1, 2, 3, 4),(5, 4, 3, 7)) == (3, 4)\nassert similar_elements((11, 12, 14, 13),(17, 15, 14, 13)) == (13, 14)\n[BEGIN]\n",
101
+ },
102
+ {
103
+ "task_id": 3,
104
+ "text": "Write a python function to identify non-prime numbers.",
105
+ "code": "import math\r\ndef is_not_prime(n):\r\n result = False\r\n for i in range(2,int(math.sqrt(n)) + 1):\r\n if n % i == 0:\r\n result = True\r\n return result",
106
+ "test_list": [
107
+ "assert is_not_prime(2) == False",
108
+ "assert is_not_prime(10) == True",
109
+ "assert is_not_prime(35) == True",
110
+ ],
111
+ "is_fewshot": True,
112
+ "prompt_with_context": "Here is your task: Write a python function to identify non-prime numbers. Your code should pass these tests:\n\nassert is_not_prime(2) == False\nassert is_not_prime(10) == True\nassert is_not_prime(35) == True\n[BEGIN]\n",
113
+ },
114
+ {
115
+ "task_id": 4,
116
+ "text": "Write a function to find the largest integers from a given list of numbers using heap queue algorithm.",
117
+ "code": "import heapq as hq\r\ndef heap_queue_largest(nums,n):\r\n largest_nums = hq.nlargest(n, nums)\r\n return largest_nums",
118
+ "test_list": [
119
+ "assert heap_queue_largest( [25, 35, 22, 85, 14, 65, 75, 22, 58],3)==[85, 75, 65] ",
120
+ "assert heap_queue_largest( [25, 35, 22, 85, 14, 65, 75, 22, 58],2)==[85, 75] ",
121
+ "assert heap_queue_largest( [25, 35, 22, 85, 14, 65, 75, 22, 58],5)==[85, 75, 65, 58, 35]",
122
+ ],
123
+ "is_fewshot": True,
124
+ "prompt_with_context": "Here is your task: Write a function to find the largest integers from a given list of numbers using heap queue algorithm. Your code should pass these tests:\n\nassert heap_queue_largest( [25, 35, 22, 85, 14, 65, 75, 22, 58],3)==[85, 75, 65] \nassert heap_queue_largest( [25, 35, 22, 85, 14, 65, 75, 22, 58],2)==[85, 75] \nassert heap_queue_largest( [25, 35, 22, 85, 14, 65, 75, 22, 58],5)==[85, 75, 65, 58, 35]\n[BEGIN]\n",
125
+ },
126
+ ]