|
"""Wikipedia snippets in parquet format""" |
|
import os |
|
import pyarrow.parquet as pq |
|
import datasets |
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
_CITATION = """\ |
|
@ONLINE {wikidump, |
|
author = {Wikimedia Foundation}, |
|
title = {Wikimedia Downloads}, |
|
url = {https://dumps.wikimedia.org} |
|
} |
|
""" |
|
_DESCRIPTION = """\ |
|
The dataset was built from the Wikipedia dump (https://dumps.wikimedia.org/). |
|
Each example contains the content of one full Wikipedia article with cleaning to strip |
|
markdown and unwanted sections (references, etc.). |
|
""" |
|
_LICENSE = ( |
|
"This work is licensed under the Creative Commons Attribution-ShareAlike " |
|
"3.0 Unported License. To view a copy of this license, visit " |
|
"http://creativecommons.org/licenses/by-sa/3.0/ or send a letter to " |
|
"Creative Commons, PO Box 1866, Mountain View, CA 94042, USA." |
|
) |
|
|
|
|
|
class WikipediaSnippetsStreamed(datasets.GeneratorBasedBuilder): |
|
"""Bengali wikipedia from 03/20/2021""" |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"wikidata_id": datasets.Value("string"), |
|
"text": datasets.Value("string"), |
|
"version_id": datasets.Value("string"), |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage="https://dumps.wikimedia.org", |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
url = "https://storage.googleapis.com/huggingface-nlp/cache/datasets/wiki40b/en/1.1.0/wiki40b-train.parquet" |
|
downloaded_file = dl_manager.download(url) |
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_file}), |
|
] |
|
|
|
def _generate_examples(self, filepath): |
|
logger.info("generating examples from = %s", filepath) |
|
with open(filepath, "rb") as f: |
|
pf = pq.ParquetFile(f) |
|
for i in range(pf.num_row_groups): |
|
id_ = f"{filepath}_{i}" |
|
yield id_, pf.read_row_group(i).to_pydict() |
|
|