Another fix
Browse files- wikipedia_snippets_streamed.py +11 -13
wikipedia_snippets_streamed.py
CHANGED
@@ -32,8 +32,9 @@ class WikipediaSnippetsStreamed(datasets.ArrowBasedBuilder):
|
|
32 |
description=_DESCRIPTION,
|
33 |
features=datasets.Features(
|
34 |
{
|
35 |
-
"
|
36 |
"text": datasets.Value("string"),
|
|
|
37 |
}
|
38 |
),
|
39 |
supervised_keys=None,
|
@@ -43,18 +44,15 @@ class WikipediaSnippetsStreamed(datasets.ArrowBasedBuilder):
|
|
43 |
|
44 |
def _split_generators(self, dl_manager):
|
45 |
url = "https://storage.googleapis.com/huggingface-nlp/cache/datasets/wiki40b/en/1.1.0/wiki40b-train.parquet"
|
46 |
-
|
47 |
return [
|
48 |
-
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"
|
49 |
]
|
50 |
|
51 |
-
def _generate_tables(self,
|
52 |
-
"
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
for i in range(pf.num_row_groups):
|
59 |
-
id_ = f"{filepath_id}_{i}"
|
60 |
-
yield id_, pf.read_row_group(i)
|
|
|
32 |
description=_DESCRIPTION,
|
33 |
features=datasets.Features(
|
34 |
{
|
35 |
+
"wikidata_id": datasets.Value("string"),
|
36 |
"text": datasets.Value("string"),
|
37 |
+
"version_id": datasets.Value("string"),
|
38 |
}
|
39 |
),
|
40 |
supervised_keys=None,
|
|
|
44 |
|
45 |
def _split_generators(self, dl_manager):
|
46 |
url = "https://storage.googleapis.com/huggingface-nlp/cache/datasets/wiki40b/en/1.1.0/wiki40b-train.parquet"
|
47 |
+
downloaded_file = dl_manager.download(url)
|
48 |
return [
|
49 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_file}),
|
50 |
]
|
51 |
|
52 |
+
def _generate_tables(self, filepath):
|
53 |
+
logger.info("generating examples from = %s", filepath)
|
54 |
+
with open(filepath, "rb") as f:
|
55 |
+
pf = pq.ParquetFile(f)
|
56 |
+
for i in range(pf.num_row_groups):
|
57 |
+
id_ = f"{filepath}_{i}"
|
58 |
+
yield id_, pf.read_row_group(i)
|
|
|
|
|
|