Problem with dataset = load_dataset('occiglot/occiglot-fineweb-v0.5', data_dir='it', verification_mode="no_checks")

#12
by giux78 - opened

Loading the dataset for the it folder there is an exception:

dataset = load_dataset('occiglot/occiglot-fineweb-v0.5', data_dir='it', verification_mode="no_checks")

with this error:

TypeError Traceback (most recent call last)
File ~/.local/lib/python3.10/site-packages/datasets/builder.py:1973, in ArrowBasedBuilder._prepare_split_single(self, gen_kwargs, fpath, file_format, max_shard_size, job_id)
1972 _time = time.time()
-> 1973 for _, table in generator:
1974 if max_shard_size is not None and writer._num_bytes > max_shard_size:

File ~/.local/lib/python3.10/site-packages/datasets/packaged_modules/parquet/parquet.py:96, in Parquet.generate_tables(self, files)
93 # Uncomment for debugging (will print the Arrow table size and elements)
94 # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
95 # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
---> 96 yield f"{file_idx}
{batch_idx}", self._cast_table(pa_table)
97 except ValueError as e:

File ~/.local/lib/python3.10/site-packages/datasets/packaged_modules/parquet/parquet.py:74, in Parquet._cast_table(self, pa_table)
71 if self.info.features is not None:
72 # more expensive cast to support nested features with keys in a different order
73 # allows str <-> int/float or str to Audio for example
---> 74 pa_table = table_cast(pa_table, self.info.features.arrow_schema)
75 return pa_table

File ~/.local/lib/python3.10/site-packages/datasets/table.py:2240, in table_cast(table, schema)
2239 if table.schema != schema:
-> 2240 return cast_table_to_schema(table, schema)
2241 elif table.schema.metadata != schema.metadata:

File ~/.local/lib/python3.10/site-packages/datasets/table.py:2199, in cast_table_to_schema(table, schema)
2194 raise CastError(
2195 f"Couldn't cast\n{table.schema}\nto\n{features}\nbecause column names don't match",
2196 table_column_names=table.column_names,
2197 requested_column_names=list(features),
2198 )
-> 2199 arrays = [cast_array_to_feature(table[name], feature) for name, feature in features.items()]
2200 return pa.Table.from_arrays(arrays, schema=schema)

File ~/.local/lib/python3.10/site-packages/datasets/table.py:2199, in (.0)
2194 raise CastError(
2195 f"Couldn't cast\n{table.schema}\nto\n{features}\nbecause column names don't match",
2196 table_column_names=table.column_names,
2197 requested_column_names=list(features),
2198 )
-> 2199 arrays = [cast_array_to_feature(table[name], feature) for name, feature in features.items()]
2200 return pa.Table.from_arrays(arrays, schema=schema)

File ~/.local/lib/python3.10/site-packages/datasets/table.py:1793, in _wrap_for_chunked_arrays..wrapper(array, *args, **kwargs)
1792 if isinstance(array, pa.ChunkedArray):
-> 1793 return pa.chunked_array([func(chunk, *args, **kwargs) for chunk in array.chunks])
1794 else:

File ~/.local/lib/python3.10/site-packages/datasets/table.py:1793, in (.0)
1792 if isinstance(array, pa.ChunkedArray):
-> 1793 return pa.chunked_array([func(chunk, *args, **kwargs) for chunk in array.chunks])
1794 else:

File ~/.local/lib/python3.10/site-packages/datasets/table.py:2066, in cast_array_to_feature(array, feature, allow_number_to_str)
2065 return array_cast(array, feature(), allow_number_to_str=allow_number_to_str)
-> 2066 raise TypeError(f"Couldn't cast array of type\n{array.type}\nto\n{feature}")

TypeError: Couldn't cast array of type
struct<data_set: string, file_path: string, source: string, timestamp: string, token_count: int64, url: string>
to
{'data_set': Value(dtype='string', id=None), 'file_path': Value(dtype='string', id=None), 'title': Value(dtype='string', id=None), 'token_count': Value(dtype='int64', id=None), 'url': Value(dtype='string', id=None)}

The above exception was the direct cause of the following exception:

DatasetGenerationError Traceback (most recent call last)
Cell In[12], line 2
1 dataset_name = "occiglot/occiglot-fineweb-v0.5"
----> 2 dataset = load_dataset(dataset_name, data_dir='it', verification_mode="no_checks")
4 #dataset = dataset.shuffle(seed=42).select(range(1000))
6 '''
7 def format_chat_template(row):
8 row["chosen"] = tokenizer.apply_chat_template(row["chosen"], tokenize=False)
(...)
16 dataset = dataset.train_test_split(test_size=0.01)
17 '''

File ~/.local/lib/python3.10/site-packages/datasets/load.py:2582, in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, verification_mode, ignore_verifications, keep_in_memory, save_infos, revision, token, use_auth_token, task, streaming, num_proc, storage_options, trust_remote_code, **config_kwargs)
2579 try_from_hf_gcs = path not in _PACKAGED_DATASETS_MODULES
2581 # Download and prepare data
-> 2582 builder_instance.download_and_prepare(
2583 download_config=download_config,
2584 download_mode=download_mode,
2585 verification_mode=verification_mode,
2586 try_from_hf_gcs=try_from_hf_gcs,
2587 num_proc=num_proc,
2588 storage_options=storage_options,
2589 )
2591 # Build dataset for splits
2592 keep_in_memory = (
2593 keep_in_memory if keep_in_memory is not None else is_small_dataset(builder_instance.info.dataset_size)
2594 )

File ~/.local/lib/python3.10/site-packages/datasets/builder.py:1005, in DatasetBuilder.download_and_prepare(self, output_dir, download_config, download_mode, verification_mode, ignore_verifications, try_from_hf_gcs, dl_manager, base_path, use_auth_token, file_format, max_shard_size, num_proc, storage_options, **download_and_prepare_kwargs)
1003 if num_proc is not None:
1004 prepare_split_kwargs["num_proc"] = num_proc
-> 1005 self._download_and_prepare(
1006 dl_manager=dl_manager,
1007 verification_mode=verification_mode,
1008 **prepare_split_kwargs,
1009 **download_and_prepare_kwargs,
1010 )
1011 # Sync info
1012 self.info.dataset_size = sum(split.num_bytes for split in self.info.splits.values())

File ~/.local/lib/python3.10/site-packages/datasets/builder.py:1100, in DatasetBuilder._download_and_prepare(self, dl_manager, verification_mode, **prepare_split_kwargs)
1096 split_dict.add(split_generator.split_info)
1098 try:
1099 # Prepare split will record examples associated to the split
-> 1100 self._prepare_split(split_generator, **prepare_split_kwargs)
1101 except OSError as e:
1102 raise OSError(
1103 "Cannot find data file. "
1104 + (self.manual_download_instructions or "")
1105 + "\nOriginal error:\n"
1106 + str(e)
1107 ) from None

File ~/.local/lib/python3.10/site-packages/datasets/builder.py:1860, in ArrowBasedBuilder._prepare_split(self, split_generator, file_format, num_proc, max_shard_size)
1858 job_id = 0
1859 with pbar:
-> 1860 for job_id, done, content in self._prepare_split_single(
1861 gen_kwargs=gen_kwargs, job_id=job_id, **_prepare_split_args
1862 ):
1863 if done:
1864 result = content

File ~/.local/lib/python3.10/site-packages/datasets/builder.py:2016, in ArrowBasedBuilder._prepare_split_single(self, gen_kwargs, fpath, file_format, max_shard_size, job_id)
2014 if isinstance(e, DatasetGenerationError):
2015 raise
-> 2016 raise DatasetGenerationError("An error occurred while generating the dataset") from e
2018 yield job_id, True, (total_num_examples, total_num_bytes, writer._features, num_shards, shard_lengths)

DatasetGenerationError: An error occurred while generating the dataset

Occiglot org

Using the data directly with HF datasets might not work. Can you try downloading the parquet files first and then opening them?

Sign up or log in to comment