Datasets:

Modalities:
Text
Languages:
English
ArXiv:
Tags:
code
Libraries:
Datasets
License:
bc-transcoder / bc-transcoder.py
gabeorlanski's picture
Upload bc-transcoder.py
d73f58b
raw
history blame
4.06 kB
import json
import datasets
_DESCRIPTION = """The Transcoder dataset in BabelCode format. Currently supports translation from C++ and Python."""
_URL = "https://raw.githubusercontent.com/google-research/babelcode/main/data/hf_datasets/transcoder.jsonl"
_LANGUAGES = {
"C++",
"CSharp",
"Dart",
"Go",
"Haskell",
"Java",
"Javascript",
"Julia",
"Kotlin",
"Lua",
"PHP",
"Python",
"R",
"Rust",
"Scala",
"TypeScript",
}
_CITATION = """\
@article{orlanski2023measuring,
title={Measuring The Impact Of Programming Language Distribution},
author={Orlanski, Gabriel and Xiao, Kefan and Garcia, Xavier and Hui, Jeffrey and Howland, Joshua and Malmaud, Jonathan and Austin, Jacob and Singh, Rishah and Catasta, Michele},
journal={arXiv preprint arXiv:2302.01973},
year={2023}
}
@article{roziere2020unsupervised,
title={Unsupervised translation of programming languages},
author={Roziere, Baptiste and Lachaux, Marie-Anne and Chanussot, Lowik and Lample, Guillaume},
journal={Advances in Neural Information Processing Systems},
volume={33},
year={2020}
}"""
_HOMEPAGE = "https://github.com/google-research/babelcode"
_LICENSE = "CC-BY-4.0"
_VERSION = "1.0.0"
_KEYS_REMOVE = {
"header"
}
class BCTranscoder(datasets.GeneratorBasedBuilder):
"""BC-Transcoder"""
VERSION = datasets.Version(_VERSION)
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="all",
version=datasets.Version(_VERSION),
description=_DESCRIPTION,
),
] + [
datasets.BuilderConfig(
name=lang,
version=datasets.Version(_VERSION),
description=_DESCRIPTION + f" Examples are only in {lang}.",
)
for lang in _LANGUAGES
]
DEFAULT_CONFIG_NAME = "all"
def _info(self):
features = datasets.Features(
{
"qid": datasets.Value("string"),
"title": datasets.Value("string"),
"language": datasets.Value("string"),
"text":datasets.Value("string"),
"signature_with_docstring": datasets.Value("string"),
"signature": datasets.Value("string"),
"arguments": datasets.Sequence(datasets.Value("string")),
"entry_fn_name": datasets.Value("string"),
"entry_cls_name": datasets.Value("string"),
"test_code": datasets.Value("string"),
"source_py":datasets.Value("string"),
"source_cpp":datasets.Value("string")
}
)
description = _DESCRIPTION
if self.config.name != 'all':
description = _DESCRIPTION + f" Examples are only in {self.config.name}."
return datasets.DatasetInfo(
description=description,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
data_dir = dl_manager.download_and_extract(_URL)
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepath": data_dir},
),
]
def _generate_examples(self, filepath):
""" Yields the examples from the dataset"""
with open(filepath, encoding='utf-8') as file:
id_ = 0
for l in file:
if not l.strip():
continue
d = json.loads(l)
if self.config.name != 'all' and d['language'] != self.config.name:
continue
d['source_py'] = d.pop('solution_python')
d['source_cpp'] = d.pop('solution_cpp')
for k in _KEYS_REMOVE:
d.pop(k)
yield id_, d
id_+=1