import gzip import logging import requests import urllib.request from typing import Optional from warcio.archiveiterator import ArchiveIterator from datasets import ( BuilderConfig, Version, GeneratorBasedBuilder, DownloadManager, SplitGenerator, Split ) COMMON = "https://data.commoncrawl.org/" LANGUAGES = ['af', 'ar', 'bg', 'bn', 'ca', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'fi', 'fr', 'gu', 'he,\nhi', 'hr', 'hu', 'id', 'it', 'ja', 'kn', 'ko', 'lt', 'lv', 'mk', 'ml', 'mr', 'ne', 'nl', 'no', 'pa', 'pl,\npt', 'ro', 'ru', 'sk', 'sl', 'so', 'sq', 'sv', 'sw', 'ta', 'te', 'th', 'tl', 'tr', 'uk', 'ur', 'vi', 'zh-cn', 'zh-tw'] URL = "https://data.commoncrawl.org/crawl-data/CC-MAIN-2022-21/warc.paths.gz" logger = logging.getLogger(__name__) def get_common_crawl_url(url:str): with urllib.request.urlopen(url) as req: with gzip.GzipFile(fileobj=req) as uncomp: return [COMMON+data for data in uncomp.read().decode().splitlines()] def _decode_text(_bytes): try: return _bytes.decode('utf-8') except: return None class CommonCrawlConfig(BuilderConfig): def __init__(self, features, data_url, citation, url, version, label_classes: Optional[tuple] = None, **kwargs): super(CommonCrawlConfig, self).__init__(version=version, **kwargs) self.features = features self.label_classes = label_classes self.data_url = data_url self.citation = citation self.url = url class CommonCrawl(GeneratorBasedBuilder): _URLS = get_common_crawl_url(URL) BUILDER_CONFIGS = [ CommonCrawlConfig(name=f"crawl_data_{i+1}", description="", features ="text", data_url=url, citation="", url=url, version=Version("1.0.0")) for i, url in enumerate(_URLS) ] DEFAULT_CONFIG_NAME = "first_domain" def _split_generators(self, dl_manager: DownloadManager): downloaded_files = dl_manager.download_and_extract({"train": self.config.url}) return [SplitGenerator(Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]})] def _generate_examples(self, filepath): logger.info("generating examples from = %s", filepath) for record in ArchiveIterator(requests.get(self.url, stream=True).raw, arc2warc=True): if record.rec_type == 'response' and record.http_headers.get_header('Content-Type') == 'text/html': text = self._process(_decode_text(record.content_stream().read())) if self.detect_languages(text): yield {"text": text} print(CommonCrawl())