eddie14 commited on
Commit
f920220
1 Parent(s): 4d12544
Files changed (1) hide show
  1. common_crawl.py +3 -8
common_crawl.py CHANGED
@@ -7,7 +7,7 @@ from typing import Optional
7
  from warcio.archiveiterator import ArchiveIterator
8
  import datasets
9
  from datasets import (
10
- BuilderConfig, Version, GeneratorBasedBuilder, DownloadManager, SplitGenerator, Split, Features, Value, DatasetInfo
11
  )
12
 
13
 
@@ -48,7 +48,7 @@ class CommonCrawlConfig(BuilderConfig):
48
  self.url = url
49
 
50
 
51
- class CommonCrawl(GeneratorBasedBuilder):
52
  BUILDER_CONFIG_CLASS = CommonCrawlConfig
53
  BUILDER_CONFIGS = [
54
  CommonCrawlConfig(name=f"crawl_data_{i+1}", description="", features="text", data_url=url, citation="", url="", version=VERSION)
@@ -69,7 +69,7 @@ class CommonCrawl(GeneratorBasedBuilder):
69
  )
70
 
71
  def _split_generators(self, dl_manager: DownloadManager):
72
- downloaded_files = dl_manager.download_and_extract({"train": self.config.url})
73
  return [SplitGenerator(Split.TRAIN, gen_kwargs={"data_file": downloaded_files["train"]})]
74
 
75
  def _generate_examples(self, filepath):
@@ -78,8 +78,3 @@ class CommonCrawl(GeneratorBasedBuilder):
78
  if record.rec_type == 'response' and record.http_headers.get_header('Content-Type') == 'text/html':
79
  text = self._process(_decode_text(record.content_stream().read()))
80
  yield {"text": str(text)}
81
-
82
-
83
- from datasets import load_dataset
84
-
85
- load_dataset("psyche/common_crawl")
 
7
  from warcio.archiveiterator import ArchiveIterator
8
  import datasets
9
  from datasets import (
10
+ BuilderConfig, Version, DownloadManager, SplitGenerator, Split, Features, Value
11
  )
12
 
13
 
 
48
  self.url = url
49
 
50
 
51
+ class CommonCrawl(datasets.GeneratorBasedBuilder):
52
  BUILDER_CONFIG_CLASS = CommonCrawlConfig
53
  BUILDER_CONFIGS = [
54
  CommonCrawlConfig(name=f"crawl_data_{i+1}", description="", features="text", data_url=url, citation="", url="", version=VERSION)
 
69
  )
70
 
71
  def _split_generators(self, dl_manager: DownloadManager):
72
+ downloaded_files = dl_manager.download_and_extract({"train": self.config.data_url})
73
  return [SplitGenerator(Split.TRAIN, gen_kwargs={"data_file": downloaded_files["train"]})]
74
 
75
  def _generate_examples(self, filepath):
 
78
  if record.rec_type == 'response' and record.http_headers.get_header('Content-Type') == 'text/html':
79
  text = self._process(_decode_text(record.content_stream().read()))
80
  yield {"text": str(text)}