File size: 3,583 Bytes
63760ef
 
5b882d0
63760ef
 
 
 
c9cc1c6
 
4d12544
63760ef
f920220
63760ef
 
 
 
 
 
 
 
 
 
c9cc1c6
1fd8d67
63760ef
 
 
a5bd989
63760ef
49b8f38
c9cc1c6
63760ef
 
 
 
 
 
 
 
 
 
 
 
16011e0
 
 
 
 
 
 
 
 
 
 
 
5b882d0
 
c9cc1c6
 
 
63760ef
 
4d12544
63760ef
 
 
 
 
 
 
f920220
f13f17e
 
48b81fa
f13f17e
 
 
7b774f0
4d12544
a5bd989
7b774f0
 
32dbacf
7b774f0
 
 
 
 
a5bd989
d1bc0bf
 
 
f920220
db08236
d1bc0bf
 
 
4efe4be
8486c61
9b441ac
8486c61
d1bc0bf
16011e0
9b441ac
874d847
f9cb946
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
import gzip
import logging
import re
import requests
import urllib.request
from typing import Optional
from warcio.archiveiterator import ArchiveIterator
from selectolax.parser import HTMLParser

import datasets
from datasets import (
    BuilderConfig, Version, DownloadManager, SplitGenerator, Split, Features, Value
)


COMMON = "https://data.commoncrawl.org/"

LANGUAGES = ['af', 'ar', 'bg', 'bn', 'ca', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'fi', 'fr', 'gu',
             'he,\nhi', 'hr', 'hu', 'id', 'it', 'ja', 'kn', 'ko', 'lt', 'lv', 'mk', 'ml', 'mr', 'ne', 'nl', 'no', 'pa',
             'pl,\npt', 'ro', 'ru', 'sk', 'sl', 'so', 'sq', 'sv', 'sw', 'ta', 'te', 'th', 'tl', 'tr', 'uk', 'ur', 'vi',
             'zh-cn', 'zh-tw']


URL = "https://data.commoncrawl.org/crawl-data/CC-MAIN-2022-49/warc.paths.gz"


logger = logging.getLogger(__name__)
VERSION = Version("1.0.0")


def get_common_crawl_url(url: str):
    with urllib.request.urlopen(url) as req:
        with gzip.GzipFile(fileobj=req) as uncomp:
            return [COMMON+data for data in uncomp.read().decode().splitlines()]


def _decode_text(_bytes):
    try:
        return _bytes.decode('utf-8')
    except:
        return None


def _preprocessing(text):
    tree = HTMLParser(text)

    if tree.body is None:
        return None

    for tag in tree.css('script'):
        tag.decompose()
    for tag in tree.css('style'):
        tag.decompose()

    text = tree.body.text(separator='\n')
    text = re.sub("\n{2,}", "\n\n", text)
    text = re.sub("\t{2,}", "\t\t", text)
    return text


class CommonCrawlConfig(BuilderConfig):
    def __init__(self, features, data_url, citation, url, version, label_classes: Optional[tuple] = None, **kwargs):
        super(CommonCrawlConfig, self).__init__(version=version, **kwargs)
        self.features = features
        self.label_classes = label_classes
        self.data_url = data_url
        self.citation = citation
        self.url = url


class CommonCrawl(datasets.GeneratorBasedBuilder):
    BUILDER_CONFIG_CLASS = CommonCrawlConfig
    BUILDER_CONFIGS = [
        CommonCrawlConfig(name=f"{i+1}", description="", features="text", data_url=url, citation="", url="", version=VERSION)
        for i, url in enumerate(get_common_crawl_url(URL))
    ]

    def _info(self):
        return datasets.DatasetInfo(
            description="",
            features=Features(
                {
                    "id": Value("int32"),
                    "text": Value("string"),
                }
            ),
            supervised_keys=None,
            homepage="https://github.com/hkjeon13",
            citation="",
        )

    def _split_generators(self, dl_manager: DownloadManager):
        downloaded_files = dl_manager.download_and_extract({"train": self.config.data_url})
        return [SplitGenerator(Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]})]

    def _generate_examples(self, filepath):
        logger.info("generating examples from = %s", filepath)
        filepath = filepath.split("::")[-1]
        _open = requests.get(filepath, stream=True).raw if filepath.startswith("http") else open(filepath, "rb")
        number = 0
        for record in ArchiveIterator(_open, arc2warc=True):
            if record.rec_type == 'response' and record.http_headers.get_header('Content-Type') == 'text/html':
                text = _preprocessing(record.content_stream().read())
                if text is not None:
                    yield number, {"id": number, "text": text}
                    number += 1