loubnabnl HF staff commited on
Commit
2be258b
1 Parent(s): f0b5618

add deduplication to preprocessing

Browse files
Files changed (1) hide show
  1. preprocessing.py +44 -1
preprocessing.py CHANGED
@@ -1,6 +1,14 @@
1
  from tqdm import tqdm
2
  from datasets import load_dataset, Dataset
3
 
 
 
 
 
 
 
 
 
4
  def parse_data(ds):
5
  """Parse data into markdown-code pairs"""
6
  markdowns = []
@@ -40,10 +48,45 @@ def parse_data(ds):
40
  licenses.extend([license] * len(inner_markdowns))
41
  return markdowns, code_snippets, paths, repo_names, licenses
42
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
  if __name__ == "__main__":
44
  ds = load_dataset("codeparrot/github-jupyter-parsed", split="train")
45
  print("Parsing data...")
46
  markdowns, code_snippets, paths, repo_names, licenses = parse_data(ds)
47
  data = {"markdown": markdowns, "code": code_snippets, "path": paths, "repo_name": repo_names, "license": licenses}
48
  parsed_data = Dataset.from_dict(data)
49
- parsed_data.push_to_hub("codeparrot/github-markdown-to-code")
 
 
 
 
 
 
 
 
 
 
1
  from tqdm import tqdm
2
  from datasets import load_dataset, Dataset
3
 
4
+ import hashlib
5
+ import re
6
+ import time
7
+ from datasets import load_dataset
8
+
9
+
10
+ PATTERN = re.compile(r"\s+")
11
+
12
  def parse_data(ds):
13
  """Parse data into markdown-code pairs"""
14
  markdowns = []
 
48
  licenses.extend([license] * len(inner_markdowns))
49
  return markdowns, code_snippets, paths, repo_names, licenses
50
 
51
+
52
+ def get_hash(example):
53
+ """Get hash of content field."""
54
+ text = example["markdown"] + example["code"]
55
+ return {"hash": hashlib.md5(re.sub(PATTERN, "", text).encode("utf-8")).hexdigest()}
56
+
57
+ def preprocess(example):
58
+ """Chain all preprocessing steps into one function to not fill cache."""
59
+ results = dict()
60
+ results.update(get_hash(example))
61
+ return results
62
+
63
+ def check_uniques(example, uniques):
64
+ """Check if current hash is still in set of unique hashes and remove if true."""
65
+ if example["hash"] in uniques:
66
+ uniques.remove(example["hash"])
67
+ return True
68
+ else:
69
+ return False
70
+
71
+ def filter(example, uniques):
72
+ if not check_uniques(example, uniques):
73
+ return False
74
+ else:
75
+ return True
76
+
77
  if __name__ == "__main__":
78
  ds = load_dataset("codeparrot/github-jupyter-parsed", split="train")
79
  print("Parsing data...")
80
  markdowns, code_snippets, paths, repo_names, licenses = parse_data(ds)
81
  data = {"markdown": markdowns, "code": code_snippets, "path": paths, "repo_name": repo_names, "license": licenses}
82
  parsed_data = Dataset.from_dict(data)
83
+
84
+ print("Deduplication...")
85
+ parsed_data = parsed_data.map(preprocess)
86
+ # Deduplicate hashes
87
+ uniques = set(parsed_data.unique("hash"))
88
+ frac = len(uniques) / len(parsed_data)
89
+ print(f"Fraction of duplicates: {1-frac:.2%}")
90
+ ds_filter = parsed_data.filter(filter, fn_kwargs={"uniques": uniques})
91
+
92
+ ds_filter.push_to_hub("codeparrot/github-jupyter-text-code-pairs")