eddie14 commited on
Commit
a5bd989
1 Parent(s): ef9d116
Files changed (1) hide show
  1. common_crawl.py +9 -9
common_crawl.py CHANGED
@@ -22,7 +22,7 @@ URL = "https://data.commoncrawl.org/crawl-data/CC-MAIN-2022-21/warc.paths.gz"
22
 
23
 
24
  logger = logging.getLogger(__name__)
25
-
26
 
27
  def get_common_crawl_url(url:str):
28
  with urllib.request.urlopen(url) as req:
@@ -48,14 +48,9 @@ class CommonCrawlConfig(BuilderConfig):
48
 
49
 
50
  class CommonCrawl(GeneratorBasedBuilder):
51
- BUILDER_CONFIGS = [
52
- CommonCrawlConfig(name=f"crawl_data_{i+1}", description="", features ="text", data_url=url, citation="", url="", version=Version("1.0.0"))
53
- for i, url in enumerate(get_common_crawl_url(URL))
54
- ]
55
-
56
  def _info(self):
57
  return DatasetInfo(
58
- description="aaa",
59
  features=Features(
60
  {
61
  "text": Value("string"),
@@ -63,9 +58,14 @@ class CommonCrawl(GeneratorBasedBuilder):
63
  ),
64
  supervised_keys=None,
65
  homepage="https://github.com/hkjeon13",
66
- citation="aaa",
67
  )
68
 
 
 
 
 
 
69
  def _split_generators(self, dl_manager: DownloadManager):
70
  downloaded_files = dl_manager.download_and_extract({"train": self.config.url})
71
  return [SplitGenerator(Split.TRAIN, gen_kwargs={"data_file": downloaded_files["train"]})]
@@ -75,6 +75,6 @@ class CommonCrawl(GeneratorBasedBuilder):
75
  for record in ArchiveIterator(requests.get(self.url, stream=True).raw, arc2warc=True):
76
  if record.rec_type == 'response' and record.http_headers.get_header('Content-Type') == 'text/html':
77
  text = self._process(_decode_text(record.content_stream().read()))
78
- yield {"text": text}
79
 
80
 
 
22
 
23
 
24
  logger = logging.getLogger(__name__)
25
+ VERSION = Version("1.0.0")
26
 
27
  def get_common_crawl_url(url:str):
28
  with urllib.request.urlopen(url) as req:
 
48
 
49
 
50
  class CommonCrawl(GeneratorBasedBuilder):
 
 
 
 
 
51
  def _info(self):
52
  return DatasetInfo(
53
+ description="",
54
  features=Features(
55
  {
56
  "text": Value("string"),
 
58
  ),
59
  supervised_keys=None,
60
  homepage="https://github.com/hkjeon13",
61
+ citation="",
62
  )
63
 
64
+ BUILDER_CONFIGS = [
65
+ CommonCrawlConfig(name=f"crawl_data_{i+1}", description="", features ="text", data_url=url, citation="", url="", version=VERSION)
66
+ for i, url in enumerate(get_common_crawl_url(URL))
67
+ ]
68
+
69
  def _split_generators(self, dl_manager: DownloadManager):
70
  downloaded_files = dl_manager.download_and_extract({"train": self.config.url})
71
  return [SplitGenerator(Split.TRAIN, gen_kwargs={"data_file": downloaded_files["train"]})]
 
75
  for record in ArchiveIterator(requests.get(self.url, stream=True).raw, arc2warc=True):
76
  if record.rec_type == 'response' and record.http_headers.get_header('Content-Type') == 'text/html':
77
  text = self._process(_decode_text(record.content_stream().read()))
78
+ yield {"text": str(text)}
79
 
80