ssolito commited on
Commit
c519bed
·
verified ·
1 Parent(s): ab014d9

Upload parlament_parla_v3_asr_a.py

Browse files
Files changed (1) hide show
  1. parlament_parla_v3_asr_a.py +302 -0
parlament_parla_v3_asr_a.py ADDED
@@ -0,0 +1,302 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict
2
+ import os
3
+ import json
4
+ import csv
5
+ import datasets
6
+
7
+ _NAME="parlament_parla_v3_asr_a"
8
+ _VERSION="1.0.0"
9
+
10
+ _DESCRIPTION = """
11
+ This is the third version of the ParlamentParla speech corpus for Catalan: a collection of speech recordings with transcriptions intended for Automatic Speech Recognition (ASR) applications.
12
+ """
13
+
14
+ _CITATION = """
15
+ @misc{bscib32024,
16
+ title={ParlamentParla v3 - Speech Corpus of Catalan Parliamentary Sessions},
17
+ author={Baybars, Kulebi},
18
+ publisher={Barcelona Supercomputing Center},
19
+ year={2024},
20
+ url={https://huggingface.co/datasets/projecte-aina/parlament_parla_v3_asr_a},
21
+ }
22
+ """
23
+
24
+ _HOMEPAGE = "https://huggingface.co/datasets/projecte-aina/parlament_parla_v3_asr_a"
25
+ _LICENSE = "CC-BY-4.0, See https://creativecommons.org/licenses/by/4.0/deed.es"
26
+
27
+ _BASE_DATA_DIR = "corpus/"
28
+
29
+ _METADATA_CLEAN_TRAIN_SHORT = os.path.join(_BASE_DATA_DIR,"files","clean_train_parlament_short.csv")
30
+ _METADATA_CLEAN_TEST_SHORT = os.path.join(_BASE_DATA_DIR,"files", "clean_test_parlament_short.csv")
31
+ _METADATA_CLEAN_DEV_SHORT = os.path.join(_BASE_DATA_DIR,"files", "clean_dev_parlament_short.csv")
32
+
33
+ _METADATA_OTHER_TRAIN_SHORT = os.path.join(_BASE_DATA_DIR,"files","other_train_parlament_short.csv")
34
+ _METADATA_OTHER_TEST_SHORT = os.path.join(_BASE_DATA_DIR,"files", "other_test_parlament_short.csv")
35
+ _METADATA_OTHER_DEV_SHORT = os.path.join(_BASE_DATA_DIR,"files", "other_dev_parlament_short.csv")
36
+
37
+ _TARS_CLEAN_TRAIN_SHORT = os.path.join(_BASE_DATA_DIR,"files","tars_clean_train_short.paths")
38
+ _TARS_CLEAN_TEST_SHORT = os.path.join(_BASE_DATA_DIR,"files", "tars_clean_test_short.paths")
39
+ _TARS_CLEAN_DEV_SHORT = os.path.join(_BASE_DATA_DIR,"files", "tars_clean_dev_short.paths")
40
+
41
+ _TARS_OTHER_TRAIN_SHORT = os.path.join(_BASE_DATA_DIR,"files","tars_other_train_short.paths")
42
+ _TARS_OTHER_TEST_SHORT = os.path.join(_BASE_DATA_DIR,"files", "tars_other_test_short.paths")
43
+ _TARS_OTHER_DEV_SHORT = os.path.join(_BASE_DATA_DIR,"files", "tars_other_dev_short.paths")
44
+
45
+
46
+ _METADATA_CLEAN_TRAIN_LONG = os.path.join(_BASE_DATA_DIR,"files","clean_train_parlament_long.csv")
47
+ _METADATA_CLEAN_TEST_LONG = os.path.join(_BASE_DATA_DIR,"files", "clean_test_parlament_long.csv")
48
+ _METADATA_CLEAN_DEV_LONG = os.path.join(_BASE_DATA_DIR,"files", "clean_dev_parlament_long.csv")
49
+
50
+ _METADATA_OTHER_TRAIN_LONG = os.path.join(_BASE_DATA_DIR,"files","other_train_parlament_long.csv")
51
+ _METADATA_OTHER_TEST_LONG = os.path.join(_BASE_DATA_DIR,"files", "other_test_parlament_long.csv")
52
+ _METADATA_OTHER_DEV_LONG = os.path.join(_BASE_DATA_DIR,"files", "other_dev_parlament_long.csv")
53
+
54
+ _TARS_CLEAN_TRAIN_LONG = os.path.join(_BASE_DATA_DIR,"files","tars_clean_train_long.paths")
55
+ _TARS_CLEAN_TEST_LONG = os.path.join(_BASE_DATA_DIR,"files", "tars_clean_test_long.paths")
56
+ _TARS_CLEAN_DEV_LONG = os.path.join(_BASE_DATA_DIR,"files", "tars_clean_dev_long.paths")
57
+
58
+ _TARS_OTHER_TRAIN_LONG = os.path.join(_BASE_DATA_DIR,"files","tars_other_train_long.paths")
59
+ _TARS_OTHER_TEST_LONG = os.path.join(_BASE_DATA_DIR,"files", "tars_other_test_long.paths")
60
+ _TARS_OTHER_DEV_LONG = os.path.join(_BASE_DATA_DIR,"files", "tars_other_dev_long.paths")
61
+
62
+
63
+
64
+ class ParlamentASRConfig(datasets.BuilderConfig):
65
+ """BuilderConfig for Parlament ASR"""
66
+
67
+ def __init__(self, name, **kwargs):
68
+ name=_NAME
69
+ super().__init__(name=name, **kwargs)
70
+
71
+ class ParlamentASR(datasets.GeneratorBasedBuilder):
72
+ """Parlament ASR"""
73
+
74
+ VERSION = datasets.Version(_VERSION)
75
+ BUILDER_CONFIGS = [
76
+ ParlamentASRConfig(
77
+ name=_NAME,
78
+ version=datasets.Version(_VERSION),
79
+ )
80
+ ]
81
+
82
+ def _info(self):
83
+ features = datasets.Features(
84
+ {
85
+ "identifier": datasets.Value("string"),
86
+ "audio": datasets.Audio(sampling_rate=16000),
87
+ "segment_path": datasets.Value("string"),
88
+ "text": datasets.Value("string"),
89
+ }
90
+ )
91
+ return datasets.DatasetInfo(
92
+ description=_DESCRIPTION,
93
+ features=features,
94
+ homepage=_HOMEPAGE,
95
+ license=_LICENSE,
96
+ citation=_CITATION,
97
+ )
98
+
99
+ def _split_generators(self, dl_manager):
100
+
101
+ metadata_clean_train_short=dl_manager.download_and_extract(_METADATA_CLEAN_TRAIN_SHORT)
102
+ metadata_clean_test_short=dl_manager.download_and_extract(_METADATA_CLEAN_TEST_SHORT)
103
+ metadata_clean_dev_short=dl_manager.download_and_extract(_METADATA_CLEAN_DEV_SHORT)
104
+
105
+ metadata_other_train_short=dl_manager.download_and_extract(_METADATA_OTHER_TRAIN_SHORT)
106
+ metadata_other_test_short=dl_manager.download_and_extract(_METADATA_OTHER_TEST_SHORT)
107
+ metadata_other_dev_short=dl_manager.download_and_extract(_METADATA_OTHER_DEV_SHORT)
108
+
109
+ tars_clean_train_short=dl_manager.download_and_extract(_TARS_CLEAN_TRAIN_SHORT)
110
+ tars_clean_test_short=dl_manager.download_and_extract(_TARS_CLEAN_TEST_SHORT)
111
+ tars_clean_dev_short=dl_manager.download_and_extract(_TARS_CLEAN_DEV_SHORT)
112
+
113
+ tars_other_train_short=dl_manager.download_and_extract(_TARS_OTHER_TRAIN_SHORT)
114
+ tars_other_test_short=dl_manager.download_and_extract(_TARS_OTHER_TEST_SHORT)
115
+ tars_other_dev_short=dl_manager.download_and_extract(_TARS_OTHER_DEV_SHORT)
116
+
117
+
118
+ metadata_clean_train_long=dl_manager.download_and_extract(_METADATA_CLEAN_TRAIN_LONG)
119
+ metadata_clean_test_long=dl_manager.download_and_extract(_METADATA_CLEAN_TEST_LONG)
120
+ metadata_clean_dev_long=dl_manager.download_and_extract(_METADATA_CLEAN_DEV_LONG)
121
+
122
+ metadata_other_train_long=dl_manager.download_and_extract(_METADATA_OTHER_TRAIN_LONG)
123
+ metadata_other_test_long=dl_manager.download_and_extract(_METADATA_OTHER_TEST_LONG)
124
+ metadata_other_dev_long=dl_manager.download_and_extract(_METADATA_OTHER_DEV_LONG)
125
+
126
+ tars_clean_train_long=dl_manager.download_and_extract(_TARS_CLEAN_TRAIN_LONG)
127
+ tars_clean_test_long=dl_manager.download_and_extract(_TARS_CLEAN_TEST_LONG)
128
+ tars_clean_dev_long=dl_manager.download_and_extract(_TARS_CLEAN_DEV_LONG)
129
+
130
+ tars_other_train_long=dl_manager.download_and_extract(_TARS_OTHER_TRAIN_LONG)
131
+ tars_other_test_long=dl_manager.download_and_extract(_TARS_OTHER_TEST_LONG)
132
+ tars_other_dev_long=dl_manager.download_and_extract(_TARS_OTHER_DEV_LONG)
133
+
134
+ hash_tar_files=defaultdict(dict)
135
+ with open(tars_clean_train_short,'r') as f:
136
+ hash_tar_files['clean_train_short']=[path.replace('\n','') for path in f]
137
+ with open(tars_clean_test_short,'r') as f:
138
+ hash_tar_files['clean_test_short']=[path.replace('\n','') for path in f]
139
+ with open(tars_clean_dev_short,'r') as f:
140
+ hash_tar_files['clean_dev_short']=[path.replace('\n','') for path in f]
141
+
142
+ with open(tars_other_train_short,'r') as f:
143
+ hash_tar_files['other_train_short']=[path.replace('\n','') for path in f]
144
+ with open(tars_other_test_short,'r') as f:
145
+ hash_tar_files['other_test_short']=[path.replace('\n','') for path in f]
146
+ with open(tars_other_dev_short,'r') as f:
147
+ hash_tar_files['other_dev_short']=[path.replace('\n','') for path in f]
148
+
149
+
150
+ with open(tars_clean_train_long,'r') as f:
151
+ hash_tar_files['clean_train_long']=[path.replace('\n','') for path in f]
152
+ with open(tars_clean_test_long,'r') as f:
153
+ hash_tar_files['clean_test_long']=[path.replace('\n','') for path in f]
154
+ with open(tars_clean_dev_long,'r') as f:
155
+ hash_tar_files['clean_dev_long']=[path.replace('\n','') for path in f]
156
+
157
+ with open(tars_other_train_long,'r') as f:
158
+ hash_tar_files['other_train_long']=[path.replace('\n','') for path in f]
159
+ with open(tars_other_test_long,'r') as f:
160
+ hash_tar_files['other_test_long']=[path.replace('\n','') for path in f]
161
+ with open(tars_other_dev_long,'r') as f:
162
+ hash_tar_files['other_dev_long']=[path.replace('\n','') for path in f]
163
+
164
+ hash_meta_paths={"clean_train_short":metadata_clean_train_short,
165
+ "clean_test_short":metadata_clean_test_short,
166
+ "clean_dev_short":metadata_clean_dev_short,
167
+ "other_train_short":metadata_other_train_short,
168
+ "other_test_short":metadata_other_test_short,
169
+ "other_dev_short":metadata_other_dev_short,
170
+ "clean_train_long":metadata_clean_train_long,
171
+ "clean_test_long":metadata_clean_test_long,
172
+ "clean_dev_long":metadata_clean_dev_long,
173
+ "other_train_long":metadata_other_train_long,
174
+ "other_test_long":metadata_other_test_long,
175
+ "other_dev_long":metadata_other_dev_long}
176
+
177
+ audio_paths = dl_manager.download(hash_tar_files)
178
+
179
+ splits=["clean_train_short","clean_test_short","clean_dev_short","other_train_short","other_test_short","other_dev_short","clean_train_long","clean_test_long","clean_dev_long","other_train_long","other_test_long","other_dev_long"]
180
+ local_extracted_audio_paths = (
181
+ dl_manager.extract(audio_paths) if not dl_manager.is_streaming else
182
+ {
183
+ split:[None] * len(audio_paths[split]) for split in splits
184
+ }
185
+ )
186
+
187
+ return [
188
+ datasets.SplitGenerator(
189
+ name="clean_train_short",
190
+ gen_kwargs={
191
+ "audio_archives":[dl_manager.iter_archive(archive) for archive in audio_paths["clean_train_short"]],
192
+ "local_extracted_archives_paths": local_extracted_audio_paths["clean_train_short"],
193
+ "metadata_paths": hash_meta_paths["clean_train_short"],
194
+ }
195
+ ),
196
+ datasets.SplitGenerator(
197
+ name="clean_test_short",
198
+ gen_kwargs={
199
+ "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_paths["clean_test_short"]],
200
+ "local_extracted_archives_paths": local_extracted_audio_paths["clean_test_short"],
201
+ "metadata_paths": hash_meta_paths["clean_test_short"],
202
+ }
203
+ ),
204
+ datasets.SplitGenerator(
205
+ name="clean_dev_short",
206
+ gen_kwargs={
207
+ "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_paths["clean_dev_short"]],
208
+ "local_extracted_archives_paths": local_extracted_audio_paths["clean_dev_short"],
209
+ "metadata_paths": hash_meta_paths["clean_dev_short"],
210
+ }
211
+ ),
212
+ datasets.SplitGenerator(
213
+ name="other_train_short",
214
+ gen_kwargs={
215
+ "audio_archives":[dl_manager.iter_archive(archive) for archive in audio_paths["other_train_short"]],
216
+ "local_extracted_archives_paths": local_extracted_audio_paths["other_train_short"],
217
+ "metadata_paths": hash_meta_paths["other_train_short"],
218
+ }
219
+ ),
220
+ datasets.SplitGenerator(
221
+ name="other_test_short",
222
+ gen_kwargs={
223
+ "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_paths["other_test_short"]],
224
+ "local_extracted_archives_paths": local_extracted_audio_paths["other_test_short"],
225
+ "metadata_paths": hash_meta_paths["other_test_short"],
226
+ }
227
+ ),
228
+ datasets.SplitGenerator(
229
+ name="other_dev_short",
230
+ gen_kwargs={
231
+ "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_paths["other_dev_short"]],
232
+ "local_extracted_archives_paths": local_extracted_audio_paths["other_dev_short"],
233
+ "metadata_paths": hash_meta_paths["other_dev_short"],
234
+ }
235
+ ),
236
+ datasets.SplitGenerator(
237
+ name="clean_train_long",
238
+ gen_kwargs={
239
+ "audio_archives":[dl_manager.iter_archive(archive) for archive in audio_paths["clean_train_long"]],
240
+ "local_extracted_archives_paths": local_extracted_audio_paths["clean_train_long"],
241
+ "metadata_paths": hash_meta_paths["clean_train_long"],
242
+ }
243
+ ),
244
+ datasets.SplitGenerator(
245
+ name="clean_test_long",
246
+ gen_kwargs={
247
+ "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_paths["clean_test_long"]],
248
+ "local_extracted_archives_paths": local_extracted_audio_paths["clean_test_long"],
249
+ "metadata_paths": hash_meta_paths["clean_test_long"],
250
+ }
251
+ ),
252
+ datasets.SplitGenerator(
253
+ name="clean_dev_long",
254
+ gen_kwargs={
255
+ "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_paths["clean_dev_long"]],
256
+ "local_extracted_archives_paths": local_extracted_audio_paths["clean_dev_long"],
257
+ "metadata_paths": hash_meta_paths["clean_dev_long"],
258
+ }
259
+ ),
260
+ datasets.SplitGenerator(
261
+ name="other_train_long",
262
+ gen_kwargs={
263
+ "audio_archives":[dl_manager.iter_archive(archive) for archive in audio_paths["other_train_long"]],
264
+ "local_extracted_archives_paths": local_extracted_audio_paths["other_train_long"],
265
+ "metadata_paths": hash_meta_paths["other_train_long"],
266
+ }
267
+ ),
268
+ datasets.SplitGenerator(
269
+ name="other_test_long",
270
+ gen_kwargs={
271
+ "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_paths["other_test_long"]],
272
+ "local_extracted_archives_paths": local_extracted_audio_paths["other_test_long"],
273
+ "metadata_paths": hash_meta_paths["other_test_long"],
274
+ }
275
+ ),
276
+ datasets.SplitGenerator(
277
+ name="other_dev_long",
278
+ gen_kwargs={
279
+ "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_paths["other_dev_long"]],
280
+ "local_extracted_archives_paths": local_extracted_audio_paths["other_dev_long"],
281
+ "metadata_paths": hash_meta_paths["other_dev_long"],
282
+ }
283
+ ),
284
+ ]
285
+
286
+ def _generate_examples(self, audio_archives, local_extracted_archives_paths, metadata_paths):
287
+
288
+ features = ["segment_path","text"]
289
+
290
+ with open(metadata_paths) as f:
291
+ metadata = {x["identifier"]: x for x in csv.DictReader(f, delimiter=",")}
292
+
293
+ for audio_archive, local_extracted_archive_path in zip(audio_archives, local_extracted_archives_paths):
294
+ for audio_filename, audio_file in audio_archive:
295
+ audio_id =os.path.splitext(os.path.basename(audio_filename))[0]
296
+ path = os.path.join(local_extracted_archive_path, audio_filename) if local_extracted_archive_path else audio_filename
297
+
298
+ yield audio_id, {
299
+ "identifier": audio_id,
300
+ **{feature: metadata[audio_id][feature] for feature in features},
301
+ "audio": {"path": path, "bytes": audio_file.read()},
302
+ }