File size: 3,024 Bytes
63760ef
 
 
 
 
 
 
4d12544
63760ef
f920220
63760ef
 
 
 
 
 
 
 
 
 
 
 
 
 
a5bd989
63760ef
49b8f38
63760ef
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4d12544
63760ef
 
 
 
 
 
 
f920220
f13f17e
 
48b81fa
f13f17e
 
 
7b774f0
4d12544
a5bd989
7b774f0
 
32dbacf
7b774f0
 
 
 
 
a5bd989
d1bc0bf
 
 
f920220
db08236
d1bc0bf
 
 
9b441ac
 
d1bc0bf
39c8e72
9b441ac
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
import gzip
import logging

import requests
import urllib.request
from typing import Optional
from warcio.archiveiterator import ArchiveIterator
import datasets
from datasets import (
    BuilderConfig, Version, DownloadManager, SplitGenerator, Split, Features, Value
)


COMMON = "https://data.commoncrawl.org/"

LANGUAGES = ['af', 'ar', 'bg', 'bn', 'ca', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'fi', 'fr', 'gu',
             'he,\nhi', 'hr', 'hu', 'id', 'it', 'ja', 'kn', 'ko', 'lt', 'lv', 'mk', 'ml', 'mr', 'ne', 'nl', 'no', 'pa',
             'pl,\npt', 'ro', 'ru', 'sk', 'sl', 'so', 'sq', 'sv', 'sw', 'ta', 'te', 'th', 'tl', 'tr', 'uk', 'ur', 'vi',
             'zh-cn', 'zh-tw']

URL = "https://data.commoncrawl.org/crawl-data/CC-MAIN-2022-21/warc.paths.gz"


logger = logging.getLogger(__name__)
VERSION = Version("1.0.0")


def get_common_crawl_url(url:str):
    with urllib.request.urlopen(url) as req:
        with gzip.GzipFile(fileobj=req) as uncomp:
            return [COMMON+data for data in uncomp.read().decode().splitlines()]


def _decode_text(_bytes):
    try:
        return _bytes.decode('utf-8')
    except:
        return None


class CommonCrawlConfig(BuilderConfig):
    def __init__(self, features, data_url, citation, url, version, label_classes: Optional[tuple] = None, **kwargs):
        super(CommonCrawlConfig, self).__init__(version=version, **kwargs)
        self.features = features
        self.label_classes = label_classes
        self.data_url = data_url
        self.citation = citation
        self.url = url


class CommonCrawl(datasets.GeneratorBasedBuilder):
    BUILDER_CONFIG_CLASS = CommonCrawlConfig
    BUILDER_CONFIGS = [
        CommonCrawlConfig(name=f"{i+1}", description="", features="text", data_url=url, citation="", url="", version=VERSION)
        for i, url in enumerate(get_common_crawl_url(URL))
    ]

    def _info(self):
        return datasets.DatasetInfo(
            description="",
            features=Features(
                {
                    "id": Value("int32"),
                    "text": Value("string"),
                }
            ),
            supervised_keys=None,
            homepage="https://github.com/hkjeon13",
            citation="",
        )

    def _split_generators(self, dl_manager: DownloadManager):
        downloaded_files = dl_manager.download_and_extract({"train": self.config.data_url})
        return [SplitGenerator(Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]})]

    def _generate_examples(self, filepath):
        logger.info("generating examples from = %s", filepath)
        number = 0
        for record in ArchiveIterator(requests.get(filepath.split("::")[1], stream=True).raw, arc2warc=True):
            if record.rec_type == 'response' and record.http_headers.get_header('Content-Type') == 'text/html':
                text = _decode_text(record.content_stream().read())
                if text is not None:
                    yield number, {"id": number, "text": text}