sumanthd commited on
Commit
164b418
1 Parent(s): 8aac53d

IndicQA test sets

Browse files
Files changed (3) hide show
  1. .gitignore +1 -0
  2. IndicQA.py +119 -0
  3. README.md +155 -0
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ .DS_Store
IndicQA.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """TODO(xquad): Add a description here."""
2
+
3
+
4
+ import json
5
+
6
+ import datasets
7
+ from datasets.tasks import QuestionAnsweringExtractive
8
+
9
+
10
+ _CITATION = """\
11
+
12
+ """
13
+
14
+ _DESCRIPTION = """\
15
+
16
+ """
17
+
18
+ _URL = "https://huggingface.co/datasets/ai4bharat/IndicQA/resolve/main/data/"
19
+ _LANG = ["as", "bn", "gu", "hi", "kn", "ml", "mr", "or", "pa", "ta", "te"]
20
+
21
+
22
+ class IndicqaConfig(datasets.BuilderConfig):
23
+
24
+ """BuilderConfig for Indicqa"""
25
+
26
+ def __init__(self, lang, **kwargs):
27
+ """
28
+
29
+ Args:
30
+ lang: string, language for the input text
31
+ **kwargs: keyword arguments forwarded to super.
32
+ """
33
+ super(IndicqaConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
34
+ self.lang = lang
35
+
36
+
37
+ class Xquad(datasets.GeneratorBasedBuilder):
38
+ """TODO(indicqa): Short description of my dataset."""
39
+
40
+ # TODO(indicqa): Set up version.
41
+ VERSION = datasets.Version("1.0.0")
42
+ BUILDER_CONFIGS = [IndicqaConfig(name=f"indicqa.{lang}", description=_DESCRIPTION, lang=lang) for lang in _LANG]
43
+
44
+ def _info(self):
45
+ # TODO(indicqa): Specifies the datasets.DatasetInfo object
46
+ return datasets.DatasetInfo(
47
+ # This is the description that will appear on the datasets page.
48
+ description=_DESCRIPTION,
49
+ # datasets.features.FeatureConnectors
50
+ features=datasets.Features(
51
+ {
52
+ "id": datasets.Value("string"),
53
+ "context": datasets.Value("string"),
54
+ "question": datasets.Value("string"),
55
+ "answers": datasets.features.Sequence(
56
+ {
57
+ "text": datasets.Value("string"),
58
+ "answer_start": datasets.Value("int32"),
59
+ }
60
+ ),
61
+ # These are the features of your dataset like images, labels ...
62
+ }
63
+ ),
64
+ # If there's a common (input, target) tuple from the features,
65
+ # specify them here. They'll be used if as_supervised=True in
66
+ # builder.as_dataset.
67
+ supervised_keys=None,
68
+ # Homepage of the dataset for documentation
69
+ homepage="",
70
+ citation=_CITATION,
71
+ task_templates=[
72
+ QuestionAnsweringExtractive(
73
+ question_column="question", context_column="context", answers_column="answers"
74
+ )
75
+ ],
76
+ )
77
+
78
+ def _split_generators(self, dl_manager):
79
+ """Returns SplitGenerators."""
80
+ # TODO(indicqa): Downloads the data and defines the splits
81
+ # dl_manager is a datasets.download.DownloadManager that can be used to
82
+ # download and extract URLs
83
+ urls_to_download = {lang: _URL + f"indicqa.{lang}.json" for lang in _LANG}
84
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
85
+
86
+ return [
87
+ datasets.SplitGenerator(
88
+ name=datasets.Split.VALIDATION,
89
+ # These kwargs will be passed to _generate_examples
90
+ gen_kwargs={"filepath": downloaded_files[self.config.lang]},
91
+ ),
92
+ ]
93
+
94
+ def _generate_examples(self, filepath):
95
+ """Yields examples."""
96
+ # TODO(indicqa): Yields (key, example) tuples from the dataset
97
+ with open(filepath, encoding="utf-8") as f:
98
+ indicqa = json.load(f)
99
+ id_ = 0
100
+ for article in indicqa["data"]:
101
+ for paragraph in article["paragraphs"]:
102
+ context = paragraph["context"].strip()
103
+ for qa in paragraph["qas"]:
104
+ question = qa["question"].strip()
105
+ answer_starts = [answer["answer_start"] for answer in qa["answers"]]
106
+ answers = [answer["text"].strip() for answer in qa["answers"]]
107
+
108
+ # Features currently used are "context", "question", and "answers".
109
+ # Others are extracted here for the ease of future expansions.
110
+ yield id_, {
111
+ "context": context,
112
+ "question": question,
113
+ "id": qa["id"],
114
+ "answers": {
115
+ "answer_start": answer_starts,
116
+ "text": answers,
117
+ },
118
+ }
119
+ id_ += 1
README.md ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - expert-generated
4
+ language:
5
+ - as
6
+ - bn
7
+ - gu
8
+ - hi
9
+ - kn
10
+ - ml
11
+ - mr
12
+ - or
13
+ - pa
14
+ - ta
15
+ - te
16
+ language_creators:
17
+ - found
18
+ license:
19
+ - cc-by-4.0
20
+ multilinguality:
21
+ - multilingual
22
+ pretty_name: IndicQA
23
+ size_categories:
24
+ - n<1K
25
+ source_datasets:
26
+ - original
27
+ tags: []
28
+ task_categories:
29
+ - question-answering
30
+ task_ids:
31
+ - closed-domain-qa
32
+ ---
33
+
34
+ # Dataset Card for [Dataset Name]
35
+
36
+ ## Table of Contents
37
+ - [Table of Contents](#table-of-contents)
38
+ - [Dataset Description](#dataset-description)
39
+ - [Dataset Summary](#dataset-summary)
40
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
41
+ - [Languages](#languages)
42
+ - [Dataset Structure](#dataset-structure)
43
+ - [Data Instances](#data-instances)
44
+ - [Data Fields](#data-fields)
45
+ - [Data Splits](#data-splits)
46
+ - [Dataset Creation](#dataset-creation)
47
+ - [Curation Rationale](#curation-rationale)
48
+ - [Source Data](#source-data)
49
+ - [Annotations](#annotations)
50
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
51
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
52
+ - [Social Impact of Dataset](#social-impact-of-dataset)
53
+ - [Discussion of Biases](#discussion-of-biases)
54
+ - [Other Known Limitations](#other-known-limitations)
55
+ - [Additional Information](#additional-information)
56
+ - [Dataset Curators](#dataset-curators)
57
+ - [Licensing Information](#licensing-information)
58
+ - [Citation Information](#citation-information)
59
+ - [Contributions](#contributions)
60
+
61
+ ## Dataset Description
62
+
63
+ - **Homepage:**
64
+ - **Repository:**
65
+ - **Paper:**
66
+ - **Leaderboard:**
67
+ - **Point of Contact:**
68
+
69
+ ### Dataset Summary
70
+
71
+ [More Information Needed]
72
+
73
+ ### Supported Tasks and Leaderboards
74
+
75
+ [More Information Needed]
76
+
77
+ ### Languages
78
+
79
+ [More Information Needed]
80
+
81
+ ## Dataset Structure
82
+
83
+ ### Data Instances
84
+
85
+ [More Information Needed]
86
+
87
+ ### Data Fields
88
+
89
+ [More Information Needed]
90
+
91
+ ### Data Splits
92
+
93
+ [More Information Needed]
94
+
95
+ ## Dataset Creation
96
+
97
+ ### Curation Rationale
98
+
99
+ [More Information Needed]
100
+
101
+ ### Source Data
102
+
103
+ #### Initial Data Collection and Normalization
104
+
105
+ [More Information Needed]
106
+
107
+ #### Who are the source language producers?
108
+
109
+ [More Information Needed]
110
+
111
+ ### Annotations
112
+
113
+ #### Annotation process
114
+
115
+ [More Information Needed]
116
+
117
+ #### Who are the annotators?
118
+
119
+ [More Information Needed]
120
+
121
+ ### Personal and Sensitive Information
122
+
123
+ [More Information Needed]
124
+
125
+ ## Considerations for Using the Data
126
+
127
+ ### Social Impact of Dataset
128
+
129
+ [More Information Needed]
130
+
131
+ ### Discussion of Biases
132
+
133
+ [More Information Needed]
134
+
135
+ ### Other Known Limitations
136
+
137
+ [More Information Needed]
138
+
139
+ ## Additional Information
140
+
141
+ ### Dataset Curators
142
+
143
+ [More Information Needed]
144
+
145
+ ### Licensing Information
146
+
147
+ [More Information Needed]
148
+
149
+ ### Citation Information
150
+
151
+ [More Information Needed]
152
+
153
+ ### Contributions
154
+
155
+ Thanks to [@github-username](https://github.com/<github-username>) for adding this dataset.