common_crawl / common_crawl.py
eddie14's picture
initial
f920220
raw
history blame
2.92 kB
import gzip
import logging
import requests
import urllib.request
from typing import Optional
from warcio.archiveiterator import ArchiveIterator
import datasets
from datasets import (
BuilderConfig, Version, DownloadManager, SplitGenerator, Split, Features, Value
)
COMMON = "https://data.commoncrawl.org/"
LANGUAGES = ['af', 'ar', 'bg', 'bn', 'ca', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'fi', 'fr', 'gu',
'he,\nhi', 'hr', 'hu', 'id', 'it', 'ja', 'kn', 'ko', 'lt', 'lv', 'mk', 'ml', 'mr', 'ne', 'nl', 'no', 'pa',
'pl,\npt', 'ro', 'ru', 'sk', 'sl', 'so', 'sq', 'sv', 'sw', 'ta', 'te', 'th', 'tl', 'tr', 'uk', 'ur', 'vi',
'zh-cn', 'zh-tw']
URL = "https://data.commoncrawl.org/crawl-data/CC-MAIN-2022-21/warc.paths.gz"
logger = logging.getLogger(__name__)
VERSION = Version("1.0.0")
def get_common_crawl_url(url:str):
with urllib.request.urlopen(url) as req:
with gzip.GzipFile(fileobj=req) as uncomp:
return [COMMON+data for data in uncomp.read().decode().splitlines()]
def _decode_text(_bytes):
try:
return _bytes.decode('utf-8')
except:
return None
class CommonCrawlConfig(BuilderConfig):
def __init__(self, features, data_url, citation, url, version, label_classes: Optional[tuple] = None, **kwargs):
super(CommonCrawlConfig, self).__init__(version=version, **kwargs)
self.features = features
self.label_classes = label_classes
self.data_url = data_url
self.citation = citation
self.url = url
class CommonCrawl(datasets.GeneratorBasedBuilder):
BUILDER_CONFIG_CLASS = CommonCrawlConfig
BUILDER_CONFIGS = [
CommonCrawlConfig(name=f"crawl_data_{i+1}", description="", features="text", data_url=url, citation="", url="", version=VERSION)
for i, url in enumerate(get_common_crawl_url(URL))
]
def _info(self):
return datasets.DatasetInfo(
description="",
features=Features(
{
"text": Value("string"),
}
),
supervised_keys=None,
homepage="https://github.com/hkjeon13",
citation="",
)
def _split_generators(self, dl_manager: DownloadManager):
downloaded_files = dl_manager.download_and_extract({"train": self.config.data_url})
return [SplitGenerator(Split.TRAIN, gen_kwargs={"data_file": downloaded_files["train"]})]
def _generate_examples(self, filepath):
logger.info("generating examples from = %s", filepath)
for record in ArchiveIterator(requests.get(self.url, stream=True).raw, arc2warc=True):
if record.rec_type == 'response' and record.http_headers.get_header('Content-Type') == 'text/html':
text = self._process(_decode_text(record.content_stream().read()))
yield {"text": str(text)}