carlosdanielhernandezmena commited on
Commit
b8830cf
1 Parent(s): caf5677

Upload cv17_es_other_automatically_verified.py

Browse files
cv17_es_other_automatically_verified.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict
2
+ import os
3
+ import json
4
+ import csv
5
+ csv.field_size_limit(100000000)
6
+
7
+ import datasets
8
+
9
+ _NAME="cv17_es_other_automatically_verified"
10
+ _VERSION="1.0.0"
11
+ _AUDIO_EXTENSIONS=".mp3"
12
+
13
+ _DESCRIPTION = """
14
+ Split called -other- of the Spanish Common Voice v17.0 that was automatically verified
15
+ using various ASR system.
16
+ """
17
+
18
+ _CITATION = """
19
+ @misc{carlosmena2024cv17autoveri,
20
+ title={Spanish Common Voice v17.0 Split Other Automatically Verified},
21
+ author={Mena, Carlos},
22
+ publisher={Barcelona Supercomputing Center}
23
+ year={2024},
24
+ url={https://huggingface.co/datasets/projecte-aina/cv17_es_other_automatically_verified},
25
+ }
26
+ """
27
+
28
+ _HOMEPAGE = "https://huggingface.co/datasets/projecte-aina/cv17_es_other_automatically_verified"
29
+
30
+ _LICENSE = "CC-BY-4.0, See https://creativecommons.org/licenses/by/4.0/"
31
+
32
+ _BASE_DATA_DIR = "corpus/"
33
+
34
+ _METADATA_OTHER = os.path.join(_BASE_DATA_DIR,"files","other.tsv")
35
+
36
+ _TARS_REPO = os.path.join(_BASE_DATA_DIR,"files","tars_repo.paths")
37
+
38
+ class CV17EsOtherAutomaticallyVerifiedConfig(datasets.BuilderConfig):
39
+ """BuilderConfig for The Spanish Common Voice v17.0 Split Other Automatically Verified"""
40
+
41
+ def __init__(self, name, **kwargs):
42
+ name=_NAME
43
+ super().__init__(name=name, **kwargs)
44
+
45
+ class CV17EsOtherAutomaticallyVerified(datasets.GeneratorBasedBuilder):
46
+ """Spanish Common Voice v17.0 Split Other Automatically Verified"""
47
+
48
+ VERSION = datasets.Version(_VERSION)
49
+ BUILDER_CONFIGS = [
50
+ CV17EsOtherAutomaticallyVerifiedConfig(
51
+ name=_NAME,
52
+ version=datasets.Version(_VERSION),
53
+ )
54
+ ]
55
+
56
+ def _info(self):
57
+ features = datasets.Features(
58
+ {
59
+ "audio": datasets.Audio(sampling_rate=16000),
60
+ "client_id": datasets.Value("string"),
61
+ "path": datasets.Value("string"),
62
+ "sentence_id": datasets.Value("string"),
63
+ "sentence": datasets.Value("string"),
64
+ "sentence_domain": datasets.Value("string"),
65
+ "up_votes": datasets.Value("int32"),
66
+ "down_votes": datasets.Value("int32"),
67
+ "age": datasets.Value("string"),
68
+ "gender": datasets.Value("string"),
69
+ "accents": datasets.Value("string"),
70
+ "variant": datasets.Value("string"),
71
+ "locale": datasets.Value("string"),
72
+ "segment": datasets.Value("string"),
73
+ }
74
+ )
75
+ return datasets.DatasetInfo(
76
+ description=_DESCRIPTION,
77
+ features=features,
78
+ homepage=_HOMEPAGE,
79
+ license=_LICENSE,
80
+ citation=_CITATION,
81
+ )
82
+
83
+ def _split_generators(self, dl_manager):
84
+
85
+ metadata_other=dl_manager.download_and_extract(_METADATA_OTHER)
86
+
87
+ tars_repo=dl_manager.download_and_extract(_TARS_REPO)
88
+
89
+ hash_tar_files=defaultdict(dict)
90
+
91
+ with open(tars_repo,'r') as f:
92
+ hash_tar_files['other']=[path.replace('\n','') for path in f]
93
+
94
+ hash_meta_paths={"other":metadata_other}
95
+
96
+ audio_paths = dl_manager.download(hash_tar_files)
97
+
98
+ splits=["other"]
99
+ local_extracted_audio_paths = (
100
+ dl_manager.extract(audio_paths) if not dl_manager.is_streaming else
101
+ {
102
+ split:[None] * len(audio_paths[split]) for split in splits
103
+ }
104
+ )
105
+
106
+ return [
107
+ datasets.SplitGenerator(
108
+ name="other",
109
+ gen_kwargs={
110
+ "audio_archives":[dl_manager.iter_archive(archive) for archive in audio_paths["other"]],
111
+ "local_extracted_archives_paths": local_extracted_audio_paths["other"],
112
+ "metadata_paths": hash_meta_paths["other"],
113
+ }
114
+ ),
115
+ ]
116
+
117
+ def _generate_examples(self, audio_archives, local_extracted_archives_paths, metadata_paths):
118
+
119
+ features = ["client_id","sentence_id","sentence","sentence_domain","up_votes",
120
+ "down_votes","age","gender", "accents","variant","locale","segment"]
121
+
122
+ with open(metadata_paths) as f:
123
+ metadata = {x["path"]: x for x in csv.DictReader(f, delimiter="\t")}
124
+
125
+ for audio_archive, local_extracted_archive_path in zip(audio_archives, local_extracted_archives_paths):
126
+ for audio_filename, audio_file in audio_archive:
127
+ audio_id =os.path.splitext(os.path.basename(audio_filename))[0]
128
+ audio_id=audio_id+_AUDIO_EXTENSIONS
129
+ path = os.path.join(local_extracted_archive_path, audio_filename) if local_extracted_archive_path else audio_filename
130
+
131
+ try:
132
+ yield audio_id, {
133
+ "path": audio_id,
134
+ **{feature: metadata[audio_id][feature] for feature in features},
135
+ "audio": {"path": path, "bytes": audio_file.read()},
136
+ }
137
+ except:
138
+ continue
139
+