CaSSA-catalan-structured-sentiment-analysis / OLD /CaSSA-catalan-structured-sentiment-analysis.py
jsaizant
Moved files and dataloader to OLD
0757796
# Loading script for the ReviewsFinder dataset.
import json
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """ """
_DESCRIPTION = """ The CaSSA dataset is a corpus of 6,400 reviews and forum messages annotated with polar expressions. Each piece of text is annotated with all the expressions of polarity that it contains. For each polar expression, we annotated the expression itself, the target (the object of the expression), and the source (the subject expressing the sentiment). 25,453 polar expressions have been annotated.
"""
_HOMEPAGE = """ https://huggingface.co/datasets/projecte-aina/CaSSA-catalan-structured-sentiment-analysis/ """
_URL = "https://huggingface.co/datasets/projecte-aina/CaSSA-catalan-structured-sentiment-analysis/resolve/main/"
_FILE = "data.jsonl"
class CaSSAConfig(datasets.BuilderConfig):
""" Builder config for the CaSSA dataset """
def __init__(self, **kwargs):
"""BuilderConfig for CaSSA.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(CaSSAConfig, self).__init__(**kwargs)
class CaSSA(datasets.GeneratorBasedBuilder):
""" CaSSA Dataset """
BUILDER_CONFIGS = [
CaSSAConfig(
name="CaSSA",
version=datasets.Version("1.0.0"),
description="CaSSA dataset",
),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{"sent_id": datasets.Value("string"),
"text": datasets.Value("string"),
"opinions": [
{
"Source": datasets.features.Sequence(datasets.features.Sequence(datasets.Value("string"))),
"Target": datasets.features.Sequence(datasets.features.Sequence(datasets.Value("string"))),
"Polar_expression": datasets.features.Sequence(datasets.features.Sequence(datasets.Value("string"))),
"Polarity": datasets.Value("string"),
"Intensity": datasets.Value("string")
}
]
}),
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
urls_to_download = {
"data": f"{_URL}{_FILE}",
}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["data"]}),
]
def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
logger.info("generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
data = [json.loads(line) for line in f]
for id_, text in enumerate(data):
yield id_, {
"sent_id": text['sent_id'],
"text": text['text'],
"opinions": [{
"Source": text_iter["Source"] if not text_iter["Source"] == [[], []] else None,
"Target": text_iter["Target"] if not text_iter["Target"] == [[], []] else None,
"Polar_expression": text_iter["Polar_expression"] if not text_iter["Polar_expression"] == [[], []] else None,
"Polarity": text_iter["Polarity"],
"Intensity": text_iter["Intensity"]
} for text_iter in text["opinions"]]
}