Datasets:

Languages:
code
ArXiv:
Tags:
code
License:
Muennighoff commited on
Commit
4e38633
·
2 Parent(s): c23113c f553a8c

Merge branch 'main' of https://huggingface.co/datasets/Muennighoff/humaneval-x-bugs

Browse files
Files changed (1) hide show
  1. humaneval-x-bugs.py +9 -5
humaneval-x-bugs.py CHANGED
@@ -57,14 +57,14 @@ class HumanEvalXBugs(datasets.GeneratorBasedBuilder):
57
  name="python",
58
  description="Python HumanEvalBugs",
59
  features=[
60
- "task_id", "prompt", "declaration", "buggy_solution", "canonical_solution", "test", "example_test", "bug_type", "failure_symptoms", "entry_point"
61
  ]
62
  ),
63
  HumanEvalXBugsConfig(
64
  name="cpp",
65
  description="C++ HumanEvalBugs",
66
  features=[
67
- "task_id", "prompt", "declaration", "buggy_solution", "canonical_solution", "test", "example_test", "bug_type", "failure_symptoms", "entry_point"
68
  ]
69
  ),
70
 
@@ -72,14 +72,14 @@ class HumanEvalXBugs(datasets.GeneratorBasedBuilder):
72
  name="go",
73
  description="Go HumanEvalBugs",
74
  features=[
75
- "task_id", "prompt", "declaration", "buggy_solution", "canonical_solution", "test", "example_test", "bug_type", "failure_symptoms", "entry_point"
76
  ]
77
  ),
78
  HumanEvalXBugsConfig(
79
  name="java",
80
  description="Java HumanEvalBugs",
81
  features=[
82
- "task_id", "prompt", "declaration", "buggy_solution", "canonical_solution", "test", "example_test", "bug_type", "failure_symptoms", "entry_point"
83
  ]
84
  ),
85
 
@@ -87,7 +87,7 @@ class HumanEvalXBugs(datasets.GeneratorBasedBuilder):
87
  name="js",
88
  description="JavaScript HumanEvalBugs",
89
  features=[
90
- "task_id", "prompt", "declaration", "buggy_solution", "canonical_solution", "test", "example_test", "bug_type", "failure_symptoms", "entry_point"
91
  ]
92
  ),
93
  ]
@@ -100,6 +100,7 @@ class HumanEvalXBugs(datasets.GeneratorBasedBuilder):
100
  {
101
  "task_id": datasets.Value("string"),
102
  "prompt": datasets.Value("string"),
 
103
  "declaration": datasets.Value("string"),
104
  "canonical_solution": datasets.Value("string"),
105
  "buggy_solution": datasets.Value("string"),
@@ -107,6 +108,7 @@ class HumanEvalXBugs(datasets.GeneratorBasedBuilder):
107
  "failure_symptoms": datasets.Value("string"),
108
  "entry_point": datasets.Value("string"),
109
  "test": datasets.Value("string"),
 
110
  "example_test": datasets.Value("string"),
111
  }
112
  ),
@@ -138,6 +140,7 @@ class HumanEvalXBugs(datasets.GeneratorBasedBuilder):
138
  yield key, {
139
  "task_id": row["task_id"],
140
  "prompt": row["prompt"],
 
141
  "declaration": row["declaration"],
142
  "buggy_solution": row["buggy_solution"],
143
  "canonical_solution": row["canonical_solution"],
@@ -145,6 +148,7 @@ class HumanEvalXBugs(datasets.GeneratorBasedBuilder):
145
  "failure_symptoms": row["failure_symptoms"],
146
  "entry_point": row["entry_point"],
147
  "test": row["test"],
 
148
  "example_test": row["example_test"],
149
  }
150
  key += 1
 
57
  name="python",
58
  description="Python HumanEvalBugs",
59
  features=[
60
+ "task_id", "prompt", "import", "declaration", "buggy_solution", "canonical_solution", "test", "test_setup", "example_test", "bug_type", "failure_symptoms", "entry_point"
61
  ]
62
  ),
63
  HumanEvalXBugsConfig(
64
  name="cpp",
65
  description="C++ HumanEvalBugs",
66
  features=[
67
+ "task_id", "prompt", "import", "declaration", "buggy_solution", "canonical_solution", "test", "test_setup", "example_test", "bug_type", "failure_symptoms", "entry_point"
68
  ]
69
  ),
70
 
 
72
  name="go",
73
  description="Go HumanEvalBugs",
74
  features=[
75
+ "task_id", "prompt", "import", "declaration", "buggy_solution", "canonical_solution", "test", "test_setup", "example_test", "bug_type", "failure_symptoms", "entry_point"
76
  ]
77
  ),
78
  HumanEvalXBugsConfig(
79
  name="java",
80
  description="Java HumanEvalBugs",
81
  features=[
82
+ "task_id", "prompt", "import", "declaration", "buggy_solution", "canonical_solution", "test", "test_setup", "example_test", "bug_type", "failure_symptoms", "entry_point"
83
  ]
84
  ),
85
 
 
87
  name="js",
88
  description="JavaScript HumanEvalBugs",
89
  features=[
90
+ "task_id", "prompt", "import", "declaration", "buggy_solution", "canonical_solution", "test", "test_setup", "example_test", "bug_type", "failure_symptoms", "entry_point"
91
  ]
92
  ),
93
  ]
 
100
  {
101
  "task_id": datasets.Value("string"),
102
  "prompt": datasets.Value("string"),
103
+ "import": datasets.Value("string"),
104
  "declaration": datasets.Value("string"),
105
  "canonical_solution": datasets.Value("string"),
106
  "buggy_solution": datasets.Value("string"),
 
108
  "failure_symptoms": datasets.Value("string"),
109
  "entry_point": datasets.Value("string"),
110
  "test": datasets.Value("string"),
111
+ "test_setup": datasets.Value("string"),
112
  "example_test": datasets.Value("string"),
113
  }
114
  ),
 
140
  yield key, {
141
  "task_id": row["task_id"],
142
  "prompt": row["prompt"],
143
+ "import": row.get("import", ""), # Only for Go
144
  "declaration": row["declaration"],
145
  "buggy_solution": row["buggy_solution"],
146
  "canonical_solution": row["canonical_solution"],
 
148
  "failure_symptoms": row["failure_symptoms"],
149
  "entry_point": row["entry_point"],
150
  "test": row["test"],
151
+ "test_setup": row.get("test_setup", ""), # Only for Go
152
  "example_test": row["example_test"],
153
  }
154
  key += 1