File size: 4,768 Bytes
2c29590
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""HumanEval-X dataset."""


import json
import datasets



_DESCRIPTION = """
HumanEval-X is a benchmark for the evaluation of the multilingual ability of code generative models. \
It consists of 820 high-quality human-crafted data samples (each with test cases) in Python, C++, Java, JavaScript, and Go, and can be used for various tasks.
"""

_HOMEPAGE = "https://github.com/THUDM/CodeGeeX"

def get_url(name):
    urls = {"test": f"data/{name}/data/humaneval.jsonl"}
    return urls

def split_generator(dl_manager, name):
    downloaded_files = get_url(name)
    downloaded_files = dl_manager.download(get_url(name))
    return [
    datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
            ]


class HumanEvalXConfig(datasets.BuilderConfig):
    """BuilderConfig """

    def __init__(self, name, description, features, **kwargs):
        super(HumanEvalXConfig, self).__init__(version=datasets.Version("2.1.0", ""), **kwargs)
        self.name = name
        self.description = description
        self.features = features


class HumanEvalX(datasets.GeneratorBasedBuilder):
    VERSION = datasets.Version("1.0.0")
    BUILDER_CONFIGS = [
        HumanEvalXConfig(
            name="python",
            description="Python HumanEval",
            features=["task_id", "prompt", "declaration", "canonical_solution", "test", "example_test"]
        ),
        HumanEvalXConfig(
            name="cpp",
            description="C++ HumanEval",
            features=["task_id", "prompt", "declaration", "canonical_solution", "test", "example_test"]
        ),

        HumanEvalXConfig(
            name="go",
            description="Go HumanEval",
            features=["task_id", "prompt", "declaration", "canonical_solution", "test", "example_test"]
        ),
        HumanEvalXConfig(
            name="java",
            description="Java HumanEval",
            features=["task_id", "prompt", "declaration", "canonical_solution", "test", "example_test"]
        ),

        HumanEvalXConfig(
            name="js",
            description="JavaScript HumanEval",
            features=["task_id", "prompt", "declaration", "canonical_solution", "test", "example_test"]
        ),
        ]
    DEFAULT_CONFIG_NAME = "python"

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features({"task_id": datasets.Value("string"),
                                        "prompt": datasets.Value("string"),
                                        "declaration": datasets.Value("string"),
                                        "canonical_solution": datasets.Value("string"),
                                        "test": datasets.Value("string"),
                                        "example_test": datasets.Value("string"),
                                        }),
            homepage=_HOMEPAGE,
        )

    def _split_generators(self, dl_manager):
        if self.config.name == "python":
            return split_generator(dl_manager, self.config.name)

        elif self.config.name == "cpp":
            return split_generator(dl_manager, self.config.name)

        elif self.config.name == "go":
            return split_generator(dl_manager, self.config.name)

        elif self.config.name == "java":
            return split_generator(dl_manager, self.config.name)

        elif self.config.name == "js":
            return split_generator(dl_manager, self.config.name)
           
    def _generate_examples(self, filepath):
        key = 0
        with open(filepath) as f:
            for line in f:
                row = json.loads(line)
                key += 1
                yield key, {
                    "task_id": row["task_id"],
                    "prompt": row["prompt"],
                    "declaration": row["declaration"],
                    "canonical_solution": row["canonical_solution"],
                    "test": row["test"],
                    "example_test": row["example_test"],

                }  
                key += 1