carlosdanielhernandezmena commited on
Commit
9f2c4f3
1 Parent(s): 876442a

Delete loading script

Browse files
Files changed (1) hide show
  1. dummy_corpus_asr_es.py +0 -155
dummy_corpus_asr_es.py DELETED
@@ -1,155 +0,0 @@
1
- from collections import defaultdict
2
- import os
3
- import json
4
- import csv
5
-
6
- import datasets
7
-
8
- _NAME="dummy_corpus_asr_es"
9
- _VERSION="1.0.0"
10
-
11
- _DESCRIPTION = """
12
- An extremely small corpus of 40 audio files taken from Common Voice (es) with the objective of testing how to share datasets in Hugging Face.
13
- """
14
-
15
- _CITATION = """
16
- @misc{dummy-corpus-asr-es,
17
- title={Dummy Corpus for ASR in Spanish.},
18
- author={Hernandez Mena, Carlos Daniel},
19
- year={2022},
20
- url={https://huggingface.co/datasets/carlosdanielhernandezmena/dummy_corpus_asr_es},
21
- }
22
- """
23
-
24
- _HOMEPAGE = "https://huggingface.co/datasets/carlosdanielhernandezmena/dummy_corpus_asr_es"
25
-
26
- _LICENSE = "CC-BY-4.0, See https://creativecommons.org/licenses/by/4.0/"
27
-
28
- _BASE_DATA_DIR = "data/"
29
- _METADATA_TRAIN = _BASE_DATA_DIR + "train.tsv"
30
- _METADATA_TEST = _BASE_DATA_DIR + "test.tsv"
31
- _METADATA_DEV = _BASE_DATA_DIR + "dev.tsv"
32
-
33
- class DummyCorpusAsrEsConfig(datasets.BuilderConfig):
34
- """BuilderConfig for Dummy Corpus ASR ES."""
35
-
36
- def __init__(self, name, **kwargs):
37
- name=_NAME
38
- super().__init__(name=name, **kwargs)
39
-
40
- class DummyCorpusAsrEs(datasets.GeneratorBasedBuilder):
41
- """The Dummy Corpus ASR ES dataset."""
42
-
43
- VERSION = datasets.Version(_VERSION)
44
- BUILDER_CONFIGS = [
45
- DummyCorpusAsrEsConfig(
46
- name=_NAME,
47
- version=datasets.Version(_VERSION),
48
- )
49
- ]
50
-
51
- def _info(self):
52
- features = datasets.Features(
53
- {
54
- "audio_id": datasets.Value("string"),
55
- "audio": datasets.Audio(sampling_rate=16000),
56
- "split": datasets.Value("string"),
57
- "gender": datasets.Value("string"),
58
- "normalized_text": datasets.Value("string"),
59
- "relative_path": datasets.Value("string"),
60
- }
61
- )
62
- return datasets.DatasetInfo(
63
- description=_DESCRIPTION,
64
- features=features,
65
- homepage=_HOMEPAGE,
66
- license=_LICENSE,
67
- citation=_CITATION,
68
- )
69
-
70
- def _split_generators(self, dl_manager):
71
-
72
- metadata_train=dl_manager.download_and_extract(_METADATA_TRAIN)
73
- metadata_test=dl_manager.download_and_extract(_METADATA_TEST)
74
- metadata_dev=dl_manager.download_and_extract(_METADATA_DEV)
75
-
76
- meta_paths={"train":metadata_train,"test":metadata_test,"dev":metadata_dev}
77
-
78
- with open(metadata_train) as f:
79
- hash_meta_train = {x["audio_id"]: x for x in csv.DictReader(f, delimiter="\t")}
80
-
81
- with open(metadata_test) as f:
82
- hash_meta_test = {x["audio_id"]: x for x in csv.DictReader(f, delimiter="\t")}
83
-
84
- with open(metadata_dev) as f:
85
- hash_meta_dev = {x["audio_id"]: x for x in csv.DictReader(f, delimiter="\t")}
86
-
87
- hash_audios=defaultdict(dict)
88
- hash_audios["train"]=[]
89
- for audio_in in hash_meta_train:
90
- hash_audios["train"].append(hash_meta_train[audio_in]["relative_path"])
91
-
92
- hash_audios["test"]=[]
93
- for audio_in in hash_meta_test:
94
- hash_audios["test"].append(hash_meta_test[audio_in]["relative_path"])
95
-
96
- hash_audios["dev"]=[]
97
- for audio_in in hash_meta_dev:
98
- hash_audios["dev"].append(hash_meta_dev[audio_in]["relative_path"])
99
-
100
- relative_paths=hash_audios
101
-
102
- audio_paths = dl_manager.download(hash_audios)
103
-
104
- local_extracted_audio_paths = dl_manager.download_and_extract(audio_paths)
105
-
106
- return [
107
- datasets.SplitGenerator(
108
- name=datasets.Split.TRAIN,
109
- gen_kwargs={
110
- "audio_archives":[dl_manager.iter_archive(archive) for archive in audio_paths["train"]],
111
- "local_extracted_archives_paths": local_extracted_audio_paths["train"],
112
- "metadata_paths": meta_paths["train"],
113
- "relative_paths":relative_paths["train"],
114
- }
115
- ),
116
- datasets.SplitGenerator(
117
- name=datasets.Split.VALIDATION,
118
- gen_kwargs={
119
- "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_paths["dev"]],
120
- "local_extracted_archives_paths": local_extracted_audio_paths["dev"],
121
- "metadata_paths": meta_paths["dev"],
122
- "relative_paths":relative_paths["dev"],
123
- }
124
- ),
125
- datasets.SplitGenerator(
126
- name=datasets.Split.TEST,
127
- gen_kwargs={
128
- "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_paths["test"]],
129
- "local_extracted_archives_paths": local_extracted_audio_paths["test"],
130
- "metadata_paths": meta_paths["test"],
131
- "relative_paths":relative_paths["test"],
132
- }
133
- ),
134
- ]
135
-
136
- def _generate_examples(self, audio_archives, local_extracted_archives_paths, metadata_paths,relative_paths):
137
-
138
- features = ["normalized_text","gender","split","relative_path"]
139
-
140
- meta_path = metadata_paths
141
-
142
- with open(meta_path) as f:
143
- metadata = {x["audio_id"]: x for x in csv.DictReader(f, delimiter="\t")}
144
-
145
- for audio_archive,local_path,rel_path in zip(audio_archives,local_extracted_archives_paths,relative_paths):
146
- #audio_id = rel_path.split(os.sep)[-1].split(".flac")[0]
147
- audio_id =os.path.splitext(os.path.basename(rel_path))[0]
148
- path = local_path
149
-
150
- yield audio_id, {
151
- "audio_id": audio_id,
152
- **{feature: metadata[audio_id][feature] for feature in features},
153
- "audio": {"path": path},
154
- }
155
-