File size: 3,292 Bytes
8e9f6e1
 
 
2be258b
 
 
 
 
 
 
 
8e9f6e1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2be258b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8e9f6e1
 
 
 
 
 
2be258b
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
from tqdm import tqdm
from datasets import load_dataset, Dataset

import hashlib
import re
import time
from datasets import load_dataset


PATTERN = re.compile(r"\s+")

def parse_data(ds):
    """Parse data into markdown-code pairs"""
    markdowns = []
    code_snippets = []
    paths = []
    repo_names = []
    licenses = []
    for i in tqdm(range(len(ds))):
        inner_markdowns = []
        inner_code_snippets = []
        types = ds[i]["types"]
        path = ds[i]["path"]
        repo = ds[i]["repo_name"]
        license = ds[i]["license"]
        if types[0] == "code":
            # drop first cell of code to have the notebook start with markdown
            cells = ds[i]["cells"][1:]
            types = types[1:]
        else:
            # drop first the two cells of markdown followed by code
            # the first markown cell of a notebook is often a long description of the whole notebook
            cells = ds[i]["cells"][2:]
            types = ds[i]["types"][2:]
        if len(cells) % 2 == 0:
            inner_markdowns = [cells[j] for j in range(len(cells)) if j % 2 == 0]
            inner_code_snippets = [cells[j+1] for j in range(len(cells) - 1) if j % 2 == 0]
        else:
            # delete last markdown cell that has no code next
            inner_markdowns = [cells[j] for j in range(len(cells) - 1) if j % 2 == 0]
            inner_code_snippets = [cells[j+1] for j in range(len(cells) - 2) if j % 2 == 0]

        markdowns.extend(inner_markdowns)
        code_snippets.extend(inner_code_snippets)

        paths.extend([path] * len(inner_markdowns))
        repo_names.extend([repo] * len(inner_markdowns))
        licenses.extend([license] * len(inner_markdowns))
    return markdowns, code_snippets, paths, repo_names, licenses


def get_hash(example):
    """Get hash of content field."""
    text = example["markdown"] + example["code"]
    return {"hash": hashlib.md5(re.sub(PATTERN, "", text).encode("utf-8")).hexdigest()}

def preprocess(example):
    """Chain all preprocessing steps into one function to not fill cache."""
    results = dict()
    results.update(get_hash(example))
    return results

def check_uniques(example, uniques):
    """Check if current hash is still in set of unique hashes and remove if true."""
    if example["hash"] in uniques:
        uniques.remove(example["hash"])
        return True
    else:
        return False

def filter(example, uniques):
    if not check_uniques(example, uniques):
        return False
    else:
        return True

if __name__ == "__main__":
    ds = load_dataset("codeparrot/github-jupyter-parsed", split="train")
    print("Parsing data...")
    markdowns, code_snippets, paths, repo_names, licenses = parse_data(ds)
    data = {"markdown": markdowns, "code": code_snippets, "path": paths, "repo_name": repo_names, "license": licenses}
    parsed_data = Dataset.from_dict(data)

    print("Deduplication...")
    parsed_data = parsed_data.map(preprocess)
    # Deduplicate hashes
    uniques = set(parsed_data.unique("hash"))
    frac = len(uniques) / len(parsed_data)
    print(f"Fraction of duplicates: {1-frac:.2%}")
    ds_filter = parsed_data.filter(filter, fn_kwargs={"uniques": uniques})

    ds_filter.push_to_hub("codeparrot/github-jupyter-text-code-pairs")