carlosdanielhernandezmena commited on
Commit
d114e20
1 Parent(s): a2167d3

Delete loading script

Browse files
Files changed (1) hide show
  1. ciempiess_balance.py +0 -123
ciempiess_balance.py DELETED
@@ -1,123 +0,0 @@
1
- from collections import defaultdict
2
- import os
3
- import json
4
- import csv
5
-
6
- import datasets
7
-
8
- _NAME="ciempiess_balance"
9
- _VERSION="1.0.0"
10
- _AUDIO_EXTENSIONS=".flac"
11
-
12
- _DESCRIPTION = """
13
- CIEMPIESS BALANCE is a Radio Corpus designed to create acoustic models for automatic speech recognition. It is "balance" because it is designed to balance the CIEMPIESS LIGHT, which means that if both corpora are combined, one will get a gender balanced corpus.
14
- """
15
-
16
- _CITATION = """
17
- @misc{carlosmenaciempiessbalance2018,
18
- title={CIEMPIESS BALANCE CORPUS: Audio and Transcripts of Mexican Spanish Broadcast Conversations.},
19
- ldc_catalog_no={LDC2018S11},
20
- DOI={https://doi.org/10.35111/rfmw-n126},
21
- author={Hernandez Mena, Carlos Daniel},
22
- journal={Linguistic Data Consortium, Philadelphia},
23
- year={2018},
24
- url={https://catalog.ldc.upenn.edu/LDC2018S11},
25
- }
26
- """
27
-
28
- _HOMEPAGE = "https://catalog.ldc.upenn.edu/LDC2018S11"
29
-
30
- _LICENSE = "CC-BY-SA-4.0, See https://creativecommons.org/licenses/by-sa/4.0/"
31
-
32
- _BASE_DATA_DIR = "corpus/"
33
- _METADATA_TRAIN = os.path.join(_BASE_DATA_DIR,"files", "metadata_train.tsv")
34
-
35
- _TARS_TRAIN = os.path.join(_BASE_DATA_DIR,"files", "tars_train.paths")
36
-
37
- class CiempiessBalanceConfig(datasets.BuilderConfig):
38
- """BuilderConfig for CIEMPIESS BALANCE Corpus"""
39
-
40
- def __init__(self, name, **kwargs):
41
- name=_NAME
42
- super().__init__(name=name, **kwargs)
43
-
44
- class CiempiessBalance(datasets.GeneratorBasedBuilder):
45
- """CIEMPIESS BALANCE Corpus"""
46
-
47
- VERSION = datasets.Version(_VERSION)
48
- BUILDER_CONFIGS = [
49
- CiempiessBalanceConfig(
50
- name=_NAME,
51
- version=datasets.Version(_VERSION),
52
- )
53
- ]
54
-
55
- def _info(self):
56
- features = datasets.Features(
57
- {
58
- "audio_id": datasets.Value("string"),
59
- "audio": datasets.Audio(sampling_rate=16000),
60
- "speaker_id": datasets.Value("string"),
61
- "gender": datasets.Value("string"),
62
- "duration": datasets.Value("float32"),
63
- "normalized_text": datasets.Value("string"),
64
- }
65
- )
66
- return datasets.DatasetInfo(
67
- description=_DESCRIPTION,
68
- features=features,
69
- homepage=_HOMEPAGE,
70
- license=_LICENSE,
71
- citation=_CITATION,
72
- )
73
-
74
- def _split_generators(self, dl_manager):
75
-
76
- metadata_train=dl_manager.download_and_extract(_METADATA_TRAIN)
77
-
78
- tars_train=dl_manager.download_and_extract(_TARS_TRAIN)
79
-
80
- hash_tar_files=defaultdict(dict)
81
-
82
- with open(tars_train,'r') as f:
83
- hash_tar_files['train']=[path.replace('\n','') for path in f]
84
-
85
- hash_meta_paths={"train":metadata_train}
86
- audio_paths = dl_manager.download(hash_tar_files)
87
-
88
- splits=["train"]
89
- local_extracted_audio_paths = (
90
- dl_manager.extract(audio_paths) if not dl_manager.is_streaming else
91
- {
92
- split:[None] * len(audio_paths[split]) for split in splits
93
- }
94
- )
95
-
96
- return [
97
- datasets.SplitGenerator(
98
- name=datasets.Split.TRAIN,
99
- gen_kwargs={
100
- "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_paths["train"]],
101
- "local_extracted_archives_paths": local_extracted_audio_paths["train"],
102
- "metadata_paths": hash_meta_paths["train"],
103
- }
104
- ),
105
- ]
106
-
107
- def _generate_examples(self, audio_archives, local_extracted_archives_paths, metadata_paths):
108
-
109
- features = ["speaker_id","gender","duration","normalized_text"]
110
-
111
- with open(metadata_paths) as f:
112
- metadata = {x["audio_id"]: x for x in csv.DictReader(f, delimiter="\t")}
113
-
114
- for audio_archive, local_extracted_archive_path in zip(audio_archives, local_extracted_archives_paths):
115
- for audio_filename, audio_file in audio_archive:
116
- audio_id = audio_filename.split(os.sep)[-1].split(_AUDIO_EXTENSIONS)[0]
117
- path = os.path.join(local_extracted_archive_path, audio_filename) if local_extracted_archive_path else audio_filename
118
-
119
- yield audio_id, {
120
- "audio_id": audio_id,
121
- **{feature: metadata[audio_id][feature] for feature in features},
122
- "audio": {"path": path, "bytes": audio_file.read()},
123
- }