Convert dataset to Parquet

#10
README.md CHANGED
@@ -34,45 +34,15 @@ task_ids:
34
  - parsing
35
  paperswithcode_id: alt
36
  pretty_name: Asian Language Treebank
 
 
 
 
 
 
 
 
37
  dataset_info:
38
- - config_name: alt-parallel
39
- features:
40
- - name: SNT.URLID
41
- dtype: string
42
- - name: SNT.URLID.SNTID
43
- dtype: string
44
- - name: url
45
- dtype: string
46
- - name: translation
47
- dtype:
48
- translation:
49
- languages:
50
- - bg
51
- - en
52
- - en_tok
53
- - fil
54
- - hi
55
- - id
56
- - ja
57
- - khm
58
- - lo
59
- - ms
60
- - my
61
- - th
62
- - vi
63
- - zh
64
- splits:
65
- - name: train
66
- num_bytes: 68445916
67
- num_examples: 18088
68
- - name: validation
69
- num_bytes: 3710979
70
- num_examples: 1000
71
- - name: test
72
- num_bytes: 3814431
73
- num_examples: 1019
74
- download_size: 21285784
75
- dataset_size: 75971326
76
  - config_name: alt-en
77
  features:
78
  - name: SNT.URLID
@@ -95,7 +65,7 @@ dataset_info:
95
  - name: test
96
  num_bytes: 567272
97
  num_examples: 1017
98
- download_size: 3871379
99
  dataset_size: 11187560
100
  - config_name: alt-jp
101
  features:
@@ -125,9 +95,9 @@ dataset_info:
125
  - name: test
126
  num_bytes: 1175592
127
  num_examples: 931
128
- download_size: 13191239
129
  dataset_size: 24245424
130
- - config_name: alt-my
131
  features:
132
  - name: SNT.URLID
133
  dtype: string
@@ -135,21 +105,23 @@ dataset_info:
135
  dtype: string
136
  - name: url
137
  dtype: string
138
- - name: value
 
 
139
  dtype: string
140
  splits:
141
  - name: train
142
- num_bytes: 20433275
143
  num_examples: 18088
144
  - name: validation
145
- num_bytes: 1111410
146
  num_examples: 1000
147
  - name: test
148
- num_bytes: 1135209
149
  num_examples: 1018
150
- download_size: 3028302
151
- dataset_size: 22679894
152
- - config_name: alt-km
153
  features:
154
  - name: SNT.URLID
155
  dtype: string
@@ -157,22 +129,20 @@ dataset_info:
157
  dtype: string
158
  - name: url
159
  dtype: string
160
- - name: km_pos_tag
161
- dtype: string
162
- - name: km_tokenized
163
  dtype: string
164
  splits:
165
  - name: train
166
- num_bytes: 12015411
167
  num_examples: 18088
168
  - name: validation
169
- num_bytes: 655232
170
  num_examples: 1000
171
  - name: test
172
- num_bytes: 673753
173
  num_examples: 1018
174
- download_size: 2410832
175
- dataset_size: 13344396
176
  - config_name: alt-my-transliteration
177
  features:
178
  - name: en
@@ -181,10 +151,10 @@ dataset_info:
181
  sequence: string
182
  splits:
183
  - name: train
184
- num_bytes: 4249424
185
  num_examples: 84022
186
- download_size: 1232127
187
- dataset_size: 4249424
188
  - config_name: alt-my-west-transliteration
189
  features:
190
  - name: en
@@ -193,18 +163,98 @@ dataset_info:
193
  sequence: string
194
  splits:
195
  - name: train
196
- num_bytes: 7412043
197
  num_examples: 107121
198
- download_size: 2830071
199
- dataset_size: 7412043
200
- config_names:
201
- - alt-en
202
- - alt-jp
203
- - alt-km
204
- - alt-my
205
- - alt-my-transliteration
206
- - alt-my-west-transliteration
207
- - alt-parallel
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
208
  ---
209
 
210
  # Dataset Card for Asian Language Treebank (ALT)
 
34
  - parsing
35
  paperswithcode_id: alt
36
  pretty_name: Asian Language Treebank
37
+ config_names:
38
+ - alt-en
39
+ - alt-jp
40
+ - alt-km
41
+ - alt-my
42
+ - alt-my-transliteration
43
+ - alt-my-west-transliteration
44
+ - alt-parallel
45
  dataset_info:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
  - config_name: alt-en
47
  features:
48
  - name: SNT.URLID
 
65
  - name: test
66
  num_bytes: 567272
67
  num_examples: 1017
68
+ download_size: 3781814
69
  dataset_size: 11187560
70
  - config_name: alt-jp
71
  features:
 
95
  - name: test
96
  num_bytes: 1175592
97
  num_examples: 931
98
+ download_size: 10355366
99
  dataset_size: 24245424
100
+ - config_name: alt-km
101
  features:
102
  - name: SNT.URLID
103
  dtype: string
 
105
  dtype: string
106
  - name: url
107
  dtype: string
108
+ - name: km_pos_tag
109
+ dtype: string
110
+ - name: km_tokenized
111
  dtype: string
112
  splits:
113
  - name: train
114
+ num_bytes: 12015371
115
  num_examples: 18088
116
  - name: validation
117
+ num_bytes: 655212
118
  num_examples: 1000
119
  - name: test
120
+ num_bytes: 673733
121
  num_examples: 1018
122
+ download_size: 4344096
123
+ dataset_size: 13344316
124
+ - config_name: alt-my
125
  features:
126
  - name: SNT.URLID
127
  dtype: string
 
129
  dtype: string
130
  - name: url
131
  dtype: string
132
+ - name: value
 
 
133
  dtype: string
134
  splits:
135
  - name: train
136
+ num_bytes: 20433243
137
  num_examples: 18088
138
  - name: validation
139
+ num_bytes: 1111394
140
  num_examples: 1000
141
  - name: test
142
+ num_bytes: 1135193
143
  num_examples: 1018
144
+ download_size: 6569025
145
+ dataset_size: 22679830
146
  - config_name: alt-my-transliteration
147
  features:
148
  - name: en
 
151
  sequence: string
152
  splits:
153
  - name: train
154
+ num_bytes: 4249316
155
  num_examples: 84022
156
+ download_size: 2163951
157
+ dataset_size: 4249316
158
  - config_name: alt-my-west-transliteration
159
  features:
160
  - name: en
 
163
  sequence: string
164
  splits:
165
  - name: train
166
+ num_bytes: 7411911
167
  num_examples: 107121
168
+ download_size: 2857511
169
+ dataset_size: 7411911
170
+ - config_name: alt-parallel
171
+ features:
172
+ - name: SNT.URLID
173
+ dtype: string
174
+ - name: SNT.URLID.SNTID
175
+ dtype: string
176
+ - name: url
177
+ dtype: string
178
+ - name: translation
179
+ dtype:
180
+ translation:
181
+ languages:
182
+ - bg
183
+ - en
184
+ - en_tok
185
+ - fil
186
+ - hi
187
+ - id
188
+ - ja
189
+ - khm
190
+ - lo
191
+ - ms
192
+ - my
193
+ - th
194
+ - vi
195
+ - zh
196
+ splits:
197
+ - name: train
198
+ num_bytes: 68445916
199
+ num_examples: 18088
200
+ - name: validation
201
+ num_bytes: 3710979
202
+ num_examples: 1000
203
+ - name: test
204
+ num_bytes: 3814431
205
+ num_examples: 1019
206
+ download_size: 34707907
207
+ dataset_size: 75971326
208
+ configs:
209
+ - config_name: alt-en
210
+ data_files:
211
+ - split: train
212
+ path: alt-en/train-*
213
+ - split: validation
214
+ path: alt-en/validation-*
215
+ - split: test
216
+ path: alt-en/test-*
217
+ - config_name: alt-jp
218
+ data_files:
219
+ - split: train
220
+ path: alt-jp/train-*
221
+ - split: validation
222
+ path: alt-jp/validation-*
223
+ - split: test
224
+ path: alt-jp/test-*
225
+ - config_name: alt-km
226
+ data_files:
227
+ - split: train
228
+ path: alt-km/train-*
229
+ - split: validation
230
+ path: alt-km/validation-*
231
+ - split: test
232
+ path: alt-km/test-*
233
+ - config_name: alt-my
234
+ data_files:
235
+ - split: train
236
+ path: alt-my/train-*
237
+ - split: validation
238
+ path: alt-my/validation-*
239
+ - split: test
240
+ path: alt-my/test-*
241
+ - config_name: alt-my-transliteration
242
+ data_files:
243
+ - split: train
244
+ path: alt-my-transliteration/train-*
245
+ - config_name: alt-my-west-transliteration
246
+ data_files:
247
+ - split: train
248
+ path: alt-my-west-transliteration/train-*
249
+ - config_name: alt-parallel
250
+ data_files:
251
+ - split: train
252
+ path: alt-parallel/train-*
253
+ - split: validation
254
+ path: alt-parallel/validation-*
255
+ - split: test
256
+ path: alt-parallel/test-*
257
+ default: true
258
  ---
259
 
260
  # Dataset Card for Asian Language Treebank (ALT)
alt-en/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4781b2072e22e14ecadf857d739ee3dde4bc92da0de7419421793649ed4db7bd
3
+ size 198031
alt-en/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f45bc270dcb512c8c9eddcc95fb8fd519034562f49161173c5c7266d059277ba
3
+ size 3397283
alt-en/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f44a8db070e4d5efaf94b58312c15cf4291fa1e8ea671f7a6a018ed598fd33e5
3
+ size 186500
alt-jp/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b26324b69caf21cd76df31d9690e373343e2c67778f94d8902a3f36bfcb59659
3
+ size 503984
alt-jp/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8261311ad16e2bd9d8522ee394b2669eb42b1242dec1181377282ee5083114cd
3
+ size 9343482
alt-jp/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c4b73a8a6f77c56c51eb947a7b70ed1bab255b383de26501fd0571fdd7c6add
3
+ size 507900
alt-km/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0aa67d57d58918e20e18114842e3c65bae0ebc6fc346b86aead05ee1a29c1c48
3
+ size 226454
alt-km/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:247ab63a2e22ecc4b0d6d0d1253a6e6ac578e7be7e57c5341ef34030ed3cc6b9
3
+ size 3901919
alt-km/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:574645f562ed68f962920fe60feb336a5d656197fe84d972c466acb6d37fcced
3
+ size 215723
alt-my-transliteration/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5784151722e03b202b10d76c6b39993ac125b0412fde34b821b97ff02a9b6a76
3
+ size 2163951
alt-my-west-transliteration/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32211139a984c890372ea264cf7a3ed08ff9d288355a444416c0e6df631b2f71
3
+ size 2857511
alt-my/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f709b4d9c3cf966f967cc2285a518f7df93c178f6b20a21c7f7e304f6ccb22f
3
+ size 340777
alt-my/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23509f0b2dc1827b64802456ebae65b2fdc701367984b3b72dbfeddb1838b175
3
+ size 5903589
alt-my/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:317766e8c7bb7f8dbbdba0dd0e21cb97625a60091a216922d4eba22296d1f69b
3
+ size 324659
alt-parallel/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:632ac5d29ef554abd68487254f452164e109b901eb45791d20df6df1073fc8e3
3
+ size 1786537
alt-parallel/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2d95c41b90c0fe94226f9b424e63f476b6027015df4beba3e5d7e19092a3a44
3
+ size 31211167
alt-parallel/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:646290f6bdb697763e8d00e98fb0487711d4b3ef948e199582197684739da6f8
3
+ size 1710203
alt.py DELETED
@@ -1,406 +0,0 @@
1
- #!/usr/bin/env python
2
- # -*- coding: utf-8 -*-
3
- """Asian Language Treebank (ALT) Project"""
4
-
5
-
6
- import os
7
-
8
- import datasets
9
-
10
-
11
- _CITATION = """\
12
- @inproceedings{riza2016introduction,
13
- title={Introduction of the asian language treebank},
14
- author={Riza, Hammam and Purwoadi, Michael and Uliniansyah, Teduh and Ti, Aw Ai and Aljunied, Sharifah Mahani and Mai, Luong Chi and Thang, Vu Tat and Thai, Nguyen Phuong and Chea, Vichet and Sam, Sethserey and others},
15
- booktitle={2016 Conference of The Oriental Chapter of International Committee for Coordination and Standardization of Speech Databases and Assessment Techniques (O-COCOSDA)},
16
- pages={1--6},
17
- year={2016},
18
- organization={IEEE}
19
- }
20
- """
21
-
22
- _HOMEPAGE = "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/"
23
-
24
- _DESCRIPTION = """\
25
- The ALT project aims to advance the state-of-the-art Asian natural language processing (NLP) techniques through the open collaboration for developing and using ALT. It was first conducted by NICT and UCSY as described in Ye Kyaw Thu, Win Pa Pa, Masao Utiyama, Andrew Finch and Eiichiro Sumita (2016). Then, it was developed under ASEAN IVO as described in this Web page. The process of building ALT began with sampling about 20,000 sentences from English Wikinews, and then these sentences were translated into the other languages. ALT now has 13 languages: Bengali, English, Filipino, Hindi, Bahasa Indonesia, Japanese, Khmer, Lao, Malay, Myanmar (Burmese), Thai, Vietnamese, Chinese (Simplified Chinese).
26
- """
27
-
28
- _URLs = {
29
- "alt": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/ALT-Parallel-Corpus-20191206.zip",
30
- "alt-en": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/English-ALT-20210218.zip",
31
- "alt-jp": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/Japanese-ALT-20210218.zip",
32
- "alt-my": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/my-alt-190530.zip",
33
- "alt-my-transliteration": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/my-en-transliteration.zip",
34
- "alt-my-west-transliteration": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/western-myanmar-transliteration.zip",
35
- "alt-km": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/km-nova-181101.zip",
36
- }
37
-
38
- _SPLIT = {
39
- "train": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/URL-train.txt",
40
- "dev": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/URL-dev.txt",
41
- "test": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/URL-test.txt",
42
- }
43
-
44
- _WIKI_URL = "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/ALT-Parallel-Corpus-20191206/URL.txt"
45
-
46
-
47
- class AltParallelConfig(datasets.BuilderConfig):
48
- """BuilderConfig for ALT Parallel."""
49
-
50
- def __init__(self, languages, **kwargs):
51
- """BuilderConfig for ALT Parallel.
52
-
53
- Args:
54
- languages: languages that will be used for translation.
55
- **kwargs: keyword arguments forwarded to super.
56
- """
57
-
58
- name = "alt-parallel"
59
-
60
- description = "ALT Parallel Corpus"
61
- super().__init__(
62
- name=name,
63
- description=description,
64
- version=datasets.Version("1.0.0", ""),
65
- **kwargs,
66
- )
67
-
68
- available_langs = {"bg", "en", "en_tok", "fil", "hi", "id", "ja", "khm", "lo", "ms", "my", "th", "vi", "zh"}
69
- for language in languages:
70
- assert language in available_langs
71
-
72
- self.languages = languages
73
-
74
-
75
- class Alt(datasets.GeneratorBasedBuilder):
76
- """Asian Language Treebank (ALT) Project"""
77
-
78
- BUILDER_CONFIGS = [
79
- AltParallelConfig(
80
- languages=["bg", "en", "en_tok", "fil", "hi", "id", "ja", "khm", "lo", "ms", "my", "th", "vi", "zh"]
81
- ),
82
- datasets.BuilderConfig(name="alt-en", version=datasets.Version("2.0.0"), description="English ALT 2021 version"),
83
- datasets.BuilderConfig(name="alt-jp", version=datasets.Version("2.0.0"), description="Japanese ALT 2021 version"),
84
- datasets.BuilderConfig(name="alt-my", version=datasets.Version("1.0.0"), description="Myanmar ALT"),
85
- datasets.BuilderConfig(name="alt-km", version=datasets.Version("1.0.0"), description="Khmer ALT"),
86
- datasets.BuilderConfig(
87
- name="alt-my-transliteration",
88
- version=datasets.Version("1.0.0"),
89
- description="Myanmar-English Transliteration Dataset",
90
- ),
91
- datasets.BuilderConfig(
92
- name="alt-my-west-transliteration",
93
- version=datasets.Version("1.0.0"),
94
- description="Latin-Myanmar Transliteration Dataset",
95
- ),
96
- ]
97
-
98
- DEFAULT_CONFIG_NAME = "alt-parallel"
99
-
100
- def _info(self):
101
- if self.config.name.startswith("alt-parallel"):
102
- features = datasets.Features(
103
- {
104
- "SNT.URLID": datasets.Value("string"),
105
- "SNT.URLID.SNTID": datasets.Value("string"),
106
- "url": datasets.Value("string"),
107
- "translation": datasets.features.Translation(languages=self.config.languages),
108
- }
109
- )
110
- elif self.config.name == "alt-en":
111
- features = datasets.Features(
112
- {
113
- "SNT.URLID": datasets.Value("string"),
114
- "SNT.URLID.SNTID": datasets.Value("string"),
115
- "url": datasets.Value("string"),
116
- "status": datasets.Value("string"),
117
- "value": datasets.Value("string"),
118
- }
119
- )
120
- elif self.config.name == "alt-jp":
121
- features = datasets.Features(
122
- {
123
- "SNT.URLID": datasets.Value("string"),
124
- "SNT.URLID.SNTID": datasets.Value("string"),
125
- "url": datasets.Value("string"),
126
- "status": datasets.Value("string"),
127
- "value": datasets.Value("string"),
128
- "word_alignment": datasets.Value("string"),
129
- "jp_tokenized": datasets.Value("string"),
130
- "en_tokenized": datasets.Value("string"),
131
- }
132
- )
133
- elif self.config.name == "alt-my":
134
- features = datasets.Features(
135
- {
136
- "SNT.URLID": datasets.Value("string"),
137
- "SNT.URLID.SNTID": datasets.Value("string"),
138
- "url": datasets.Value("string"),
139
- "value": datasets.Value("string"),
140
- }
141
- )
142
- elif self.config.name == "alt-my-transliteration":
143
- features = datasets.Features(
144
- {
145
- "en": datasets.Value("string"),
146
- "my": datasets.Sequence(datasets.Value("string")),
147
- }
148
- )
149
- elif self.config.name == "alt-my-west-transliteration":
150
- features = datasets.Features(
151
- {
152
- "en": datasets.Value("string"),
153
- "my": datasets.Sequence(datasets.Value("string")),
154
- }
155
- )
156
- elif self.config.name == "alt-km":
157
- features = datasets.Features(
158
- {
159
- "SNT.URLID": datasets.Value("string"),
160
- "SNT.URLID.SNTID": datasets.Value("string"),
161
- "url": datasets.Value("string"),
162
- "km_pos_tag": datasets.Value("string"),
163
- "km_tokenized": datasets.Value("string"),
164
- }
165
- )
166
- else:
167
- raise
168
-
169
- return datasets.DatasetInfo(
170
- description=_DESCRIPTION,
171
- features=features,
172
- supervised_keys=None,
173
- homepage=_HOMEPAGE,
174
- citation=_CITATION,
175
- )
176
-
177
- def _split_generators(self, dl_manager):
178
- if self.config.name.startswith("alt-parallel"):
179
- data_path = dl_manager.download_and_extract(_URLs["alt"])
180
- else:
181
- data_path = dl_manager.download_and_extract(_URLs[self.config.name])
182
-
183
- if self.config.name in {"alt-my-transliteration", "alt-my-west-transliteration"}:
184
- return [
185
- datasets.SplitGenerator(
186
- name=datasets.Split.TRAIN,
187
- gen_kwargs={"basepath": data_path, "split": None},
188
- )
189
- ]
190
- else:
191
- data_split = dl_manager.download(_SPLIT)
192
-
193
- return [
194
- datasets.SplitGenerator(
195
- name=datasets.Split.TRAIN,
196
- gen_kwargs={"basepath": data_path, "split": data_split["train"]},
197
- ),
198
- datasets.SplitGenerator(
199
- name=datasets.Split.VALIDATION,
200
- gen_kwargs={"basepath": data_path, "split": data_split["dev"]},
201
- ),
202
- datasets.SplitGenerator(
203
- name=datasets.Split.TEST,
204
- gen_kwargs={"basepath": data_path, "split": data_split["test"]},
205
- ),
206
- ]
207
-
208
- def _generate_examples(self, basepath, split=None):
209
- allow_urls = {}
210
- if split is not None:
211
- with open(split, encoding="utf-8") as fin:
212
- for line in fin:
213
- sp = line.strip().split("\t")
214
- urlid = sp[0].replace("URL.", "")
215
- allow_urls[urlid] = {"SNT.URLID": urlid, "url": sp[1]}
216
-
217
- if self.config.name.startswith("alt-parallel"):
218
- data = {}
219
- for lang in self.config.languages:
220
- file_path = os.path.join(basepath, "ALT-Parallel-Corpus-20191206", f"data_{lang}.txt")
221
- with open(file_path, encoding="utf-8") as fin:
222
- for line in fin:
223
- line = line.strip()
224
- sp = line.split("\t")
225
-
226
- _, urlid, sntid = sp[0].split(".")
227
- # Some lines have a trailing blank space: "SNT.102053.5598 " in data_fil.txt
228
- sntid = sntid.strip()
229
- if urlid not in allow_urls:
230
- continue
231
-
232
- if sntid not in data:
233
- data[sntid] = {
234
- "SNT.URLID": urlid,
235
- "SNT.URLID.SNTID": sntid,
236
- "url": allow_urls[urlid]["url"],
237
- "translation": {},
238
- }
239
-
240
- # Note that Japanese and Myanmar texts have empty sentence fields in this release.
241
- if len(sp) >= 2:
242
- data[sntid]["translation"][lang] = sp[1]
243
-
244
- for _id, item in enumerate(data.values()):
245
- yield _id, item
246
-
247
- elif self.config.name == "alt-en":
248
- data = {}
249
- for fname in ["English-ALT-Draft.txt", "English-ALT-Reviewed.txt"]:
250
- file_path = os.path.join(basepath, "English-ALT-20210218", fname)
251
- with open(file_path, encoding="utf-8") as fin:
252
- for line in fin:
253
- line = line.strip()
254
- sp = line.split("\t")
255
-
256
- _, urlid, sntid = sp[0].split(".")
257
- if urlid not in allow_urls:
258
- continue
259
-
260
- d = {
261
- "SNT.URLID": urlid,
262
- "SNT.URLID.SNTID": sntid,
263
- "url": allow_urls[urlid]["url"],
264
- "status": "draft" if fname == "English-ALT-Draft.txt" else "reviewed",
265
- "value": sp[1],
266
- }
267
-
268
- data[sntid] = d
269
-
270
- for _id, item in enumerate(data.values()):
271
- yield _id, item
272
-
273
- elif self.config.name == "alt-jp":
274
- data = {}
275
- for fname in ["Japanese-ALT-Draft.txt", "Japanese-ALT-Reviewed.txt"]:
276
- file_path = os.path.join(basepath, "Japanese-ALT-20210218", fname)
277
- with open(file_path, encoding="utf-8") as fin:
278
- for line in fin:
279
- line = line.strip()
280
- sp = line.split("\t")
281
- _, urlid, sntid = sp[0].split(".")
282
- if urlid not in allow_urls:
283
- continue
284
-
285
- d = {
286
- "SNT.URLID": urlid,
287
- "SNT.URLID.SNTID": sntid,
288
- "url": allow_urls[urlid]["url"],
289
- "value": sp[1],
290
- "status": "draft" if fname == "Japanese-ALT-Draft.txt" else "reviewed",
291
- "word_alignment": None,
292
- "en_tokenized": None,
293
- "jp_tokenized": None,
294
- }
295
-
296
- data[sntid] = d
297
-
298
- keys = {
299
- "word_alignment": "word-alignment/data_ja.en-ja",
300
- "en_tokenized": "word-alignment/data_ja.en-tok",
301
- "jp_tokenized": "word-alignment/data_ja.ja-tok",
302
- }
303
- for k in keys:
304
- file_path = os.path.join(basepath, "Japanese-ALT-20210218", keys[k])
305
- with open(file_path, encoding="utf-8") as fin:
306
- for line in fin:
307
- line = line.strip()
308
- sp = line.split("\t")
309
-
310
- # Note that Japanese and Myanmar texts have empty sentence fields in this release.
311
- if len(sp) < 2:
312
- continue
313
-
314
- _, urlid, sntid = sp[0].split(".")
315
- if urlid not in allow_urls:
316
- continue
317
-
318
- if sntid in data:
319
- data[sntid][k] = sp[1]
320
-
321
- for _id, item in enumerate(data.values()):
322
- yield _id, item
323
-
324
- elif self.config.name == "alt-my":
325
- _id = 0
326
- for fname in ["data"]:
327
- file_path = os.path.join(basepath, "my-alt-190530", fname)
328
- with open(file_path, encoding="utf-8") as fin:
329
- for line in fin:
330
- line = line.strip()
331
- sp = line.split("\t")
332
- _, urlid, sntid = sp[0].split(".")
333
- if urlid not in allow_urls:
334
- continue
335
-
336
- yield _id, {
337
- "SNT.URLID": urlid,
338
- "SNT.URLID.SNTID": sntid,
339
- "url": allow_urls[urlid]["url"],
340
- "value": sp[1],
341
- }
342
- _id += 1
343
-
344
- elif self.config.name == "alt-km":
345
- data = {}
346
- for fname in ["data_km.km-tag.nova", "data_km.km-tok.nova"]:
347
- file_path = os.path.join(basepath, "km-nova-181101", fname)
348
- with open(file_path, encoding="utf-8") as fin:
349
- for line in fin:
350
- line = line.strip()
351
- sp = line.split("\t")
352
- _, urlid, sntid = sp[0].split(".")
353
- if urlid not in allow_urls:
354
- continue
355
-
356
- k = "km_pos_tag" if fname == "data_km.km-tag.nova" else "km_tokenized"
357
- if sntid not in data:
358
- data[sntid] = {
359
- "SNT.URLID": urlid,
360
- "SNT.URLID.SNTID": sntid,
361
- "url": allow_urls[urlid]["url"],
362
- "km_pos_tag": None,
363
- "km_tokenized": None,
364
- }
365
- data[sntid][k] = sp[1]
366
-
367
- for _id, item in enumerate(data.values()):
368
- yield _id, item
369
-
370
- elif self.config.name == "alt-my-transliteration":
371
- file_path = os.path.join(basepath, "my-en-transliteration", "data.txt")
372
- # Need to set errors='ignore' because of the unknown error
373
- # UnicodeDecodeError: 'utf-8' codec can't decode byte 0xff in position 0: invalid start byte
374
- # It might due to some issues related to Myanmar alphabets
375
- with open(file_path, encoding="utf-8", errors="ignore") as fin:
376
- for _id, line in enumerate(fin):
377
- line = line.strip()
378
-
379
- # I don't know why there are \x00 between |||. They don't show in the editor.
380
- line = line.replace("\x00", "")
381
- sp = line.split("|||")
382
-
383
- # When I read data, it seems to have empty sentence betweem the actual sentence. Don't know why?
384
- if len(sp) < 2:
385
- continue
386
-
387
- yield _id, {
388
- "en": sp[0].strip(),
389
- "my": [sp[1].strip()],
390
- }
391
-
392
- elif self.config.name == "alt-my-west-transliteration":
393
- file_path = os.path.join(basepath, "western-myanmar-transliteration", "321.txt")
394
- # Need to set errors='ignore' because of the unknown error
395
- # UnicodeDecodeError: 'utf-8' codec can't decode byte 0xff in position 0: invalid start byte
396
- # It might due to some issues related to Myanmar alphabets
397
- with open(file_path, encoding="utf-8", errors="ignore") as fin:
398
- for _id, line in enumerate(fin):
399
- line = line.strip()
400
- line = line.replace("\x00", "")
401
- sp = line.split("|||")
402
-
403
- yield _id, {
404
- "en": sp[0].strip(),
405
- "my": [k.strip() for k in sp[1].split("|")],
406
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"alt-parallel": {"description": "The ALT project aims to advance the state-of-the-art Asian natural language processing (NLP) techniques through the open collaboration for developing and using ALT. It was first conducted by NICT and UCSY as described in Ye Kyaw Thu, Win Pa Pa, Masao Utiyama, Andrew Finch and Eiichiro Sumita (2016). Then, it was developed under ASEAN IVO as described in this Web page. The process of building ALT began with sampling about 20,000 sentences from English Wikinews, and then these sentences were translated into the other languages. ALT now has 13 languages: Bengali, English, Filipino, Hindi, Bahasa Indonesia, Japanese, Khmer, Lao, Malay, Myanmar (Burmese), Thai, Vietnamese, Chinese (Simplified Chinese).\n", "citation": "@inproceedings{riza2016introduction,\n title={Introduction of the asian language treebank},\n author={Riza, Hammam and Purwoadi, Michael and Uliniansyah, Teduh and Ti, Aw Ai and Aljunied, Sharifah Mahani and Mai, Luong Chi and Thang, Vu Tat and Thai, Nguyen Phuong and Chea, Vichet and Sam, Sethserey and others},\n booktitle={2016 Conference of The Oriental Chapter of International Committee for Coordination and Standardization of Speech Databases and Assessment Techniques (O-COCOSDA)},\n pages={1--6},\n year={2016},\n organization={IEEE}\n}\n", "homepage": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/", "license": "", "features": {"SNT.URLID": {"dtype": "string", "id": null, "_type": "Value"}, "SNT.URLID.SNTID": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}, "translation": {"languages": ["bg", "en", "en_tok", "fil", "hi", "id", "ja", "khm", "lo", "ms", "my", "th", "vi", "zh"], "id": null, "_type": "Translation"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "alt", "config_name": "alt-parallel", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 68452188, "num_examples": 18088, "dataset_name": "alt"}, "validation": {"name": "validation", "num_bytes": 3710979, "num_examples": 1000, "dataset_name": "alt"}, "test": {"name": "test", "num_bytes": 3815565, "num_examples": 1019, "dataset_name": "alt"}}, "download_checksums": {"https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/ALT-Parallel-Corpus-20191206.zip": {"num_bytes": 21105607, "checksum": "05f7b31b517d4c4e074bb7fb57277758c0e3e15d1ad9cfc5727e9bce79b07bbd"}, "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/URL-train.txt": {"num_bytes": 161862, "checksum": "d57d680eebc9823b65c74c5de95320f17c3a5ead94bfa66a6849f3ed0cdd411a"}, "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/URL-dev.txt": {"num_bytes": 9082, "checksum": "e3d35c2f54e204216011a2509925b359c5712c768c2b17bc74e19b8d4ec7e50d"}, "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/URL-test.txt": {"num_bytes": 9233, "checksum": "6d67d6bf5c4e7574116355d71ef927c66aca2f7ab7267b14591ea250f24ec722"}}, "download_size": 21285784, "post_processing_size": null, "dataset_size": 75978732, "size_in_bytes": 97264516}, "alt-en": {"description": "The ALT project aims to advance the state-of-the-art Asian natural language processing (NLP) techniques through the open collaboration for developing and using ALT. It was first conducted by NICT and UCSY as described in Ye Kyaw Thu, Win Pa Pa, Masao Utiyama, Andrew Finch and Eiichiro Sumita (2016). Then, it was developed under ASEAN IVO as described in this Web page. The process of building ALT began with sampling about 20,000 sentences from English Wikinews, and then these sentences were translated into the other languages. ALT now has 13 languages: Bengali, English, Filipino, Hindi, Bahasa Indonesia, Japanese, Khmer, Lao, Malay, Myanmar (Burmese), Thai, Vietnamese, Chinese (Simplified Chinese).\n", "citation": "@inproceedings{riza2016introduction,\n title={Introduction of the asian language treebank},\n author={Riza, Hammam and Purwoadi, Michael and Uliniansyah, Teduh and Ti, Aw Ai and Aljunied, Sharifah Mahani and Mai, Luong Chi and Thang, Vu Tat and Thai, Nguyen Phuong and Chea, Vichet and Sam, Sethserey and others},\n booktitle={2016 Conference of The Oriental Chapter of International Committee for Coordination and Standardization of Speech Databases and Assessment Techniques (O-COCOSDA)},\n pages={1--6},\n year={2016},\n organization={IEEE}\n}\n", "homepage": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/", "license": "", "features": {"SNT.URLID": {"dtype": "string", "id": null, "_type": "Value"}, "SNT.URLID.SNTID": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}, "status": {"dtype": "string", "id": null, "_type": "Value"}, "value": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "alt", "config_name": "alt-en", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 10075569, "num_examples": 17889, "dataset_name": "alt"}, "validation": {"name": "validation", "num_bytes": 544719, "num_examples": 988, "dataset_name": "alt"}, "test": {"name": "test", "num_bytes": 567272, "num_examples": 1017, "dataset_name": "alt"}}, "download_checksums": {"https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/English-ALT-20210218.zip": {"num_bytes": 3691202, "checksum": "16fc4d1702637a8057018c48d43a941267c5ab575c239415ac32a00d4cc14b38"}, "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/URL-train.txt": {"num_bytes": 161862, "checksum": "d57d680eebc9823b65c74c5de95320f17c3a5ead94bfa66a6849f3ed0cdd411a"}, "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/URL-dev.txt": {"num_bytes": 9082, "checksum": "e3d35c2f54e204216011a2509925b359c5712c768c2b17bc74e19b8d4ec7e50d"}, "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/URL-test.txt": {"num_bytes": 9233, "checksum": "6d67d6bf5c4e7574116355d71ef927c66aca2f7ab7267b14591ea250f24ec722"}}, "download_size": 3871379, "post_processing_size": null, "dataset_size": 11187560, "size_in_bytes": 15058939}, "alt-jp": {"description": "The ALT project aims to advance the state-of-the-art Asian natural language processing (NLP) techniques through the open collaboration for developing and using ALT. It was first conducted by NICT and UCSY as described in Ye Kyaw Thu, Win Pa Pa, Masao Utiyama, Andrew Finch and Eiichiro Sumita (2016). Then, it was developed under ASEAN IVO as described in this Web page. The process of building ALT began with sampling about 20,000 sentences from English Wikinews, and then these sentences were translated into the other languages. ALT now has 13 languages: Bengali, English, Filipino, Hindi, Bahasa Indonesia, Japanese, Khmer, Lao, Malay, Myanmar (Burmese), Thai, Vietnamese, Chinese (Simplified Chinese).\n", "citation": "@inproceedings{riza2016introduction,\n title={Introduction of the asian language treebank},\n author={Riza, Hammam and Purwoadi, Michael and Uliniansyah, Teduh and Ti, Aw Ai and Aljunied, Sharifah Mahani and Mai, Luong Chi and Thang, Vu Tat and Thai, Nguyen Phuong and Chea, Vichet and Sam, Sethserey and others},\n booktitle={2016 Conference of The Oriental Chapter of International Committee for Coordination and Standardization of Speech Databases and Assessment Techniques (O-COCOSDA)},\n pages={1--6},\n year={2016},\n organization={IEEE}\n}\n", "homepage": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/", "license": "", "features": {"SNT.URLID": {"dtype": "string", "id": null, "_type": "Value"}, "SNT.URLID.SNTID": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}, "status": {"dtype": "string", "id": null, "_type": "Value"}, "value": {"dtype": "string", "id": null, "_type": "Value"}, "word_alignment": {"dtype": "string", "id": null, "_type": "Value"}, "jp_tokenized": {"dtype": "string", "id": null, "_type": "Value"}, "en_tokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "alt", "config_name": "alt-jp", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 21891803, "num_examples": 17202, "dataset_name": "alt"}, "validation": {"name": "validation", "num_bytes": 1181555, "num_examples": 953, "dataset_name": "alt"}, "test": {"name": "test", "num_bytes": 1175592, "num_examples": 931, "dataset_name": "alt"}}, "download_checksums": {"https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/Japanese-ALT-20210218.zip": {"num_bytes": 13011062, "checksum": "6b09e15d7611c9fcc4dfa81b4d519637062dc350ce7d3c72adb67af885dd5bec"}, "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/URL-train.txt": {"num_bytes": 161862, "checksum": "d57d680eebc9823b65c74c5de95320f17c3a5ead94bfa66a6849f3ed0cdd411a"}, "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/URL-dev.txt": {"num_bytes": 9082, "checksum": "e3d35c2f54e204216011a2509925b359c5712c768c2b17bc74e19b8d4ec7e50d"}, "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/URL-test.txt": {"num_bytes": 9233, "checksum": "6d67d6bf5c4e7574116355d71ef927c66aca2f7ab7267b14591ea250f24ec722"}}, "download_size": 13191239, "post_processing_size": null, "dataset_size": 24248950, "size_in_bytes": 37440189}, "alt-my": {"description": "The ALT project aims to advance the state-of-the-art Asian natural language processing (NLP) techniques through the open collaboration for developing and using ALT. It was first conducted by NICT and UCSY as described in Ye Kyaw Thu, Win Pa Pa, Masao Utiyama, Andrew Finch and Eiichiro Sumita (2016). Then, it was developed under ASEAN IVO as described in this Web page. The process of building ALT began with sampling about 20,000 sentences from English Wikinews, and then these sentences were translated into the other languages. ALT now has 13 languages: Bengali, English, Filipino, Hindi, Bahasa Indonesia, Japanese, Khmer, Lao, Malay, Myanmar (Burmese), Thai, Vietnamese, Chinese (Simplified Chinese).\n", "citation": "@inproceedings{riza2016introduction,\n title={Introduction of the asian language treebank},\n author={Riza, Hammam and Purwoadi, Michael and Uliniansyah, Teduh and Ti, Aw Ai and Aljunied, Sharifah Mahani and Mai, Luong Chi and Thang, Vu Tat and Thai, Nguyen Phuong and Chea, Vichet and Sam, Sethserey and others},\n booktitle={2016 Conference of The Oriental Chapter of International Committee for Coordination and Standardization of Speech Databases and Assessment Techniques (O-COCOSDA)},\n pages={1--6},\n year={2016},\n organization={IEEE}\n}\n", "homepage": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/", "license": "", "features": {"SNT.URLID": {"dtype": "string", "id": null, "_type": "Value"}, "SNT.URLID.SNTID": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}, "value": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "alt", "config_name": "alt-my", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 20433275, "num_examples": 18088, "dataset_name": "alt"}, "validation": {"name": "validation", "num_bytes": 1111410, "num_examples": 1000, "dataset_name": "alt"}, "test": {"name": "test", "num_bytes": 1135209, "num_examples": 1018, "dataset_name": "alt"}}, "download_checksums": {"https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/my-alt-190530.zip": {"num_bytes": 2848125, "checksum": "d77ef18364bcb2b149503a5ed77734b07b103bd277f8ed92716555f3deedaf95"}, "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/URL-train.txt": {"num_bytes": 161862, "checksum": "d57d680eebc9823b65c74c5de95320f17c3a5ead94bfa66a6849f3ed0cdd411a"}, "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/URL-dev.txt": {"num_bytes": 9082, "checksum": "e3d35c2f54e204216011a2509925b359c5712c768c2b17bc74e19b8d4ec7e50d"}, "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/URL-test.txt": {"num_bytes": 9233, "checksum": "6d67d6bf5c4e7574116355d71ef927c66aca2f7ab7267b14591ea250f24ec722"}}, "download_size": 3028302, "post_processing_size": null, "dataset_size": 22679894, "size_in_bytes": 25708196}, "alt-km": {"description": "The ALT project aims to advance the state-of-the-art Asian natural language processing (NLP) techniques through the open collaboration for developing and using ALT. It was first conducted by NICT and UCSY as described in Ye Kyaw Thu, Win Pa Pa, Masao Utiyama, Andrew Finch and Eiichiro Sumita (2016). Then, it was developed under ASEAN IVO as described in this Web page. The process of building ALT began with sampling about 20,000 sentences from English Wikinews, and then these sentences were translated into the other languages. ALT now has 13 languages: Bengali, English, Filipino, Hindi, Bahasa Indonesia, Japanese, Khmer, Lao, Malay, Myanmar (Burmese), Thai, Vietnamese, Chinese (Simplified Chinese).\n", "citation": "@inproceedings{riza2016introduction,\n title={Introduction of the asian language treebank},\n author={Riza, Hammam and Purwoadi, Michael and Uliniansyah, Teduh and Ti, Aw Ai and Aljunied, Sharifah Mahani and Mai, Luong Chi and Thang, Vu Tat and Thai, Nguyen Phuong and Chea, Vichet and Sam, Sethserey and others},\n booktitle={2016 Conference of The Oriental Chapter of International Committee for Coordination and Standardization of Speech Databases and Assessment Techniques (O-COCOSDA)},\n pages={1--6},\n year={2016},\n organization={IEEE}\n}\n", "homepage": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/", "license": "", "features": {"SNT.URLID": {"dtype": "string", "id": null, "_type": "Value"}, "SNT.URLID.SNTID": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}, "km_pos_tag": {"dtype": "string", "id": null, "_type": "Value"}, "km_tokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "alt", "config_name": "alt-km", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 12015411, "num_examples": 18088, "dataset_name": "alt"}, "validation": {"name": "validation", "num_bytes": 655232, "num_examples": 1000, "dataset_name": "alt"}, "test": {"name": "test", "num_bytes": 673753, "num_examples": 1018, "dataset_name": "alt"}}, "download_checksums": {"https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/km-nova-181101.zip": {"num_bytes": 2230655, "checksum": "0c6457d4a3327f3dc0b381704cbad71af120e963bfa1cdb06765fa0ed0c9098a"}, "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/URL-train.txt": {"num_bytes": 161862, "checksum": "d57d680eebc9823b65c74c5de95320f17c3a5ead94bfa66a6849f3ed0cdd411a"}, "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/URL-dev.txt": {"num_bytes": 9082, "checksum": "e3d35c2f54e204216011a2509925b359c5712c768c2b17bc74e19b8d4ec7e50d"}, "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/URL-test.txt": {"num_bytes": 9233, "checksum": "6d67d6bf5c4e7574116355d71ef927c66aca2f7ab7267b14591ea250f24ec722"}}, "download_size": 2410832, "post_processing_size": null, "dataset_size": 13344396, "size_in_bytes": 15755228}, "alt-my-transliteration": {"description": "The ALT project aims to advance the state-of-the-art Asian natural language processing (NLP) techniques through the open collaboration for developing and using ALT. It was first conducted by NICT and UCSY as described in Ye Kyaw Thu, Win Pa Pa, Masao Utiyama, Andrew Finch and Eiichiro Sumita (2016). Then, it was developed under ASEAN IVO as described in this Web page. The process of building ALT began with sampling about 20,000 sentences from English Wikinews, and then these sentences were translated into the other languages. ALT now has 13 languages: Bengali, English, Filipino, Hindi, Bahasa Indonesia, Japanese, Khmer, Lao, Malay, Myanmar (Burmese), Thai, Vietnamese, Chinese (Simplified Chinese).\n", "citation": "@inproceedings{riza2016introduction,\n title={Introduction of the asian language treebank},\n author={Riza, Hammam and Purwoadi, Michael and Uliniansyah, Teduh and Ti, Aw Ai and Aljunied, Sharifah Mahani and Mai, Luong Chi and Thang, Vu Tat and Thai, Nguyen Phuong and Chea, Vichet and Sam, Sethserey and others},\n booktitle={2016 Conference of The Oriental Chapter of International Committee for Coordination and Standardization of Speech Databases and Assessment Techniques (O-COCOSDA)},\n pages={1--6},\n year={2016},\n organization={IEEE}\n}\n", "homepage": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/", "license": "", "features": {"en": {"dtype": "string", "id": null, "_type": "Value"}, "my": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "alt", "config_name": "alt-my-transliteration", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 4249424, "num_examples": 84022, "dataset_name": "alt"}}, "download_checksums": {"https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/my-en-transliteration.zip": {"num_bytes": 1232127, "checksum": "5b348c0f9e92d4699fddb4c64fd7d929eb6f6de6f7ce4d879bf91e8d4a82f063"}}, "download_size": 1232127, "post_processing_size": null, "dataset_size": 4249424, "size_in_bytes": 5481551}, "alt-my-west-transliteration": {"description": "The ALT project aims to advance the state-of-the-art Asian natural language processing (NLP) techniques through the open collaboration for developing and using ALT. It was first conducted by NICT and UCSY as described in Ye Kyaw Thu, Win Pa Pa, Masao Utiyama, Andrew Finch and Eiichiro Sumita (2016). Then, it was developed under ASEAN IVO as described in this Web page. The process of building ALT began with sampling about 20,000 sentences from English Wikinews, and then these sentences were translated into the other languages. ALT now has 13 languages: Bengali, English, Filipino, Hindi, Bahasa Indonesia, Japanese, Khmer, Lao, Malay, Myanmar (Burmese), Thai, Vietnamese, Chinese (Simplified Chinese).\n", "citation": "@inproceedings{riza2016introduction,\n title={Introduction of the asian language treebank},\n author={Riza, Hammam and Purwoadi, Michael and Uliniansyah, Teduh and Ti, Aw Ai and Aljunied, Sharifah Mahani and Mai, Luong Chi and Thang, Vu Tat and Thai, Nguyen Phuong and Chea, Vichet and Sam, Sethserey and others},\n booktitle={2016 Conference of The Oriental Chapter of International Committee for Coordination and Standardization of Speech Databases and Assessment Techniques (O-COCOSDA)},\n pages={1--6},\n year={2016},\n organization={IEEE}\n}\n", "homepage": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/", "license": "", "features": {"en": {"dtype": "string", "id": null, "_type": "Value"}, "my": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "alt", "config_name": "alt-my-west-transliteration", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 7412043, "num_examples": 107121, "dataset_name": "alt"}}, "download_checksums": {"https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/western-myanmar-transliteration.zip": {"num_bytes": 2830071, "checksum": "c3f1419022d823791b6d85b259a18ab11d8f8800367d7ec4319e49fc016ec396"}}, "download_size": 2830071, "post_processing_size": null, "dataset_size": 7412043, "size_in_bytes": 10242114}}