File size: 2,604 Bytes
63760ef
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
094b1a9
63760ef
dfc9241
63760ef
 
 
094b1a9
 
63760ef
 
 
 
 
 
 
 
 
 
7a46d94
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
import gzip
import logging

import requests
import urllib.request
from typing import Optional
from warcio.archiveiterator import ArchiveIterator

from datasets import (
    BuilderConfig, Version, GeneratorBasedBuilder, DownloadManager, SplitGenerator, Split
)


COMMON = "https://data.commoncrawl.org/"

LANGUAGES = ['af', 'ar', 'bg', 'bn', 'ca', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'fi', 'fr', 'gu',
             'he,\nhi', 'hr', 'hu', 'id', 'it', 'ja', 'kn', 'ko', 'lt', 'lv', 'mk', 'ml', 'mr', 'ne', 'nl', 'no', 'pa',
             'pl,\npt', 'ro', 'ru', 'sk', 'sl', 'so', 'sq', 'sv', 'sw', 'ta', 'te', 'th', 'tl', 'tr', 'uk', 'ur', 'vi',
             'zh-cn', 'zh-tw']

URL = "https://data.commoncrawl.org/crawl-data/CC-MAIN-2022-21/warc.paths.gz"


logger = logging.getLogger(__name__)


def get_common_crawl_url(url:str):
    with urllib.request.urlopen(url) as req:
        with gzip.GzipFile(fileobj=req) as uncomp:
            return [COMMON+data for data in uncomp.read().decode().splitlines()]


def _decode_text(_bytes):
    try:
        return _bytes.decode('utf-8')
    except:
        return None


class CommonCrawlConfig(BuilderConfig):
    def __init__(self, features, data_url, citation, url, version, label_classes: Optional[tuple] = None, **kwargs):
        super(CommonCrawlConfig, self).__init__(version=version, **kwargs)
        self.features = features
        self.label_classes = label_classes
        self.data_url = data_url
        self.citation = citation
        self.url = url


class CommonCrawl(GeneratorBasedBuilder):
    _URLS = get_common_crawl_url(URL)

    BUILDER_CONFIGS = [
        CommonCrawlConfig(name=f"crawl_data_{i+1}", description="", features ="text", data_url=url, citation="", url=url, version=Version("1.0.0"))
        for i, url in enumerate(_URLS)
    ]

    DEFAULT_CONFIG_NAME = "first_domain"

    def _split_generators(self, dl_manager: DownloadManager):
        downloaded_files = dl_manager.download_and_extract({"train": self.config.url})
        return [SplitGenerator(Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]})]

    def _generate_examples(self, filepath):
        logger.info("generating examples from = %s", filepath)
        for record in ArchiveIterator(requests.get(self.url, stream=True).raw, arc2warc=True):
            if record.rec_type == 'response' and record.http_headers.get_header('Content-Type') == 'text/html':
                text = self._process(_decode_text(record.content_stream().read()))
                if self.detect_languages(text):
                    yield {"text": text}