common_crawl / common_crawl.py
psyche's picture
Update common_crawl.py
1fd8d67
raw
history blame
No virus
3.58 kB
import gzip
import logging
import re
import requests
import urllib.request
from typing import Optional
from warcio.archiveiterator import ArchiveIterator
from selectolax.parser import HTMLParser
import datasets
from datasets import (
BuilderConfig, Version, DownloadManager, SplitGenerator, Split, Features, Value
)
COMMON = "https://data.commoncrawl.org/"
LANGUAGES = ['af', 'ar', 'bg', 'bn', 'ca', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'fi', 'fr', 'gu',
'he,\nhi', 'hr', 'hu', 'id', 'it', 'ja', 'kn', 'ko', 'lt', 'lv', 'mk', 'ml', 'mr', 'ne', 'nl', 'no', 'pa',
'pl,\npt', 'ro', 'ru', 'sk', 'sl', 'so', 'sq', 'sv', 'sw', 'ta', 'te', 'th', 'tl', 'tr', 'uk', 'ur', 'vi',
'zh-cn', 'zh-tw']
URL = "https://data.commoncrawl.org/crawl-data/CC-MAIN-2022-49/warc.paths.gz"
logger = logging.getLogger(__name__)
VERSION = Version("1.0.0")
def get_common_crawl_url(url: str):
with urllib.request.urlopen(url) as req:
with gzip.GzipFile(fileobj=req) as uncomp:
return [COMMON+data for data in uncomp.read().decode().splitlines()]
def _decode_text(_bytes):
try:
return _bytes.decode('utf-8')
except:
return None
def _preprocessing(text):
tree = HTMLParser(text)
if tree.body is None:
return None
for tag in tree.css('script'):
tag.decompose()
for tag in tree.css('style'):
tag.decompose()
text = tree.body.text(separator='\n')
text = re.sub("\n{2,}", "\n\n", text)
text = re.sub("\t{2,}", "\t\t", text)
return text
class CommonCrawlConfig(BuilderConfig):
def __init__(self, features, data_url, citation, url, version, label_classes: Optional[tuple] = None, **kwargs):
super(CommonCrawlConfig, self).__init__(version=version, **kwargs)
self.features = features
self.label_classes = label_classes
self.data_url = data_url
self.citation = citation
self.url = url
class CommonCrawl(datasets.GeneratorBasedBuilder):
BUILDER_CONFIG_CLASS = CommonCrawlConfig
BUILDER_CONFIGS = [
CommonCrawlConfig(name=f"{i+1}", description="", features="text", data_url=url, citation="", url="", version=VERSION)
for i, url in enumerate(get_common_crawl_url(URL))
]
def _info(self):
return datasets.DatasetInfo(
description="",
features=Features(
{
"id": Value("int32"),
"text": Value("string"),
}
),
supervised_keys=None,
homepage="https://github.com/hkjeon13",
citation="",
)
def _split_generators(self, dl_manager: DownloadManager):
downloaded_files = dl_manager.download_and_extract({"train": self.config.data_url})
return [SplitGenerator(Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]})]
def _generate_examples(self, filepath):
logger.info("generating examples from = %s", filepath)
filepath = filepath.split("::")[-1]
_open = requests.get(filepath, stream=True).raw if filepath.startswith("http") else open(filepath, "rb")
number = 0
for record in ArchiveIterator(_open, arc2warc=True):
if record.rec_type == 'response' and record.http_headers.get_header('Content-Type') == 'text/html':
text = _preprocessing(record.content_stream().read())
if text is not None:
yield number, {"id": number, "text": text}
number += 1