Datasets:

Languages:
Vietnamese
ArXiv:
License:
holylovenia commited on
Commit
06f0a93
·
verified ·
1 Parent(s): c032aec

Upload uit_visd4sa.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. uit_visd4sa.py +202 -0
uit_visd4sa.py ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+
3
+ import json
4
+ from pathlib import Path
5
+ import re
6
+ from typing import Dict, List, Tuple
7
+
8
+ import datasets
9
+
10
+ from seacrowd.utils import schemas
11
+ from seacrowd.utils.configs import SEACrowdConfig
12
+ from seacrowd.utils.constants import Licenses, Tasks
13
+
14
+ _CITATION = """\
15
+ @inproceedings{thanh-etal-2021-span,
16
+ title = "Span Detection for Aspect-Based Sentiment Analysis in Vietnamese",
17
+ author = "Thanh, Kim Nguyen Thi and
18
+ Khai, Sieu Huynh and
19
+ Huynh, Phuc Pham and
20
+ Luc, Luong Phan and
21
+ Nguyen, Duc-Vu and
22
+ Van, Kiet Nguyen",
23
+ booktitle = "Proceedings of the 35th Pacific Asia Conference on Language, Information and Computation",
24
+ year = "2021",
25
+ publisher = "Association for Computational Lingustics",
26
+ url = "https://aclanthology.org/2021.paclic-1.34",
27
+ pages = "318--328",
28
+ }
29
+ """
30
+
31
+ _DATASETNAME = "uit_visd4sa"
32
+
33
+ _DESCRIPTION = """\
34
+ This dataset is designed for span detection for aspect-based sentiment analysis NLP task.
35
+ A Vietnamese dataset consisting of 35,396 human-annotated spans on 11,122 feedback
36
+ comments for evaluating span detection for aspect-based sentiment analysis for mobile e-commerce
37
+ """
38
+
39
+ _HOMEPAGE = "https://github.com/kimkim00/UIT-ViSD4SA"
40
+
41
+ _LICENSE = Licenses.UNKNOWN.value
42
+
43
+ _LANGUAGES = ["vie"]
44
+
45
+ _URLS = {
46
+ "train": "https://raw.githubusercontent.com/kimkim00/UIT-ViSD4SA/main/data/train.jsonl",
47
+ "dev": "https://raw.githubusercontent.com/kimkim00/UIT-ViSD4SA/main/data/dev.jsonl",
48
+ "test": "https://raw.githubusercontent.com/kimkim00/UIT-ViSD4SA/main/data/test.jsonl",
49
+ }
50
+
51
+ _SUPPORTED_TASKS = [Tasks.SPAN_BASED_ABSA]
52
+
53
+ _SOURCE_VERSION = "1.0.0"
54
+
55
+ _SEACROWD_VERSION = "2024.06.20"
56
+
57
+ _LOCAL = False
58
+
59
+
60
+ def construct_label_classes():
61
+ IOB_tag = ["I", "O", "B"]
62
+ aspects = ["SCREEN", "CAMERA", "FEATURES", "BATTERY", "PERFORMANCE", "STORAGE", "DESIGN", "PRICE", "GENERAL", "SER&ACC"]
63
+ ratings = ["POSITIVE", "NEUTRAL", "NEGATIVE"]
64
+ label_classes = []
65
+ for iob in IOB_tag:
66
+ if iob == "O":
67
+ label_classes.append("O")
68
+ else:
69
+ for aspect in aspects:
70
+ for rating in ratings:
71
+ label_classes.append("{iob}-{aspect}#{rating}".format(iob=iob, aspect=aspect, rating=rating))
72
+ return label_classes
73
+
74
+
75
+ def construct_IOB_sequences(text, labels):
76
+ labels.sort()
77
+ word_start = [0] + [match.start() + 1 for match in re.finditer(" ", text)]
78
+ is_not_O = False
79
+ iob_sequence = []
80
+ word_count = 0
81
+ lb_count = 0
82
+
83
+ while word_count < len(word_start):
84
+ if lb_count == len(labels):
85
+ for x in range(word_count, len(word_start)):
86
+ iob_sequence.append("O")
87
+ break
88
+ if not is_not_O:
89
+ if word_start[word_count] >= labels[lb_count][0]:
90
+ is_not_O = True
91
+ iob_sequence.append("B-" + labels[lb_count][-1])
92
+ word_count += 1
93
+ else:
94
+ iob_sequence.append("O")
95
+ word_count += 1
96
+ else:
97
+ if word_start[word_count] > labels[lb_count][1]:
98
+ is_not_O = False
99
+ lb_count += 1
100
+ else:
101
+ iob_sequence.append("I-" + labels[lb_count][-1])
102
+ word_count += 1
103
+ return iob_sequence
104
+
105
+
106
+ class UITViSD4SADataset(datasets.GeneratorBasedBuilder):
107
+ """This dataset is designed for span detection for aspect-based sentiment analysis NLP task.
108
+ A Vietnamese dataset consisting of 35,396 human-annotated spans on 11,122 feedback
109
+ comments for evaluating span detection for aspect-based sentiment analysis for mobile e-commerce"""
110
+
111
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
112
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
113
+
114
+ BUILDER_CONFIGS = [
115
+ SEACrowdConfig(
116
+ name=f"{_DATASETNAME}_source",
117
+ version=SOURCE_VERSION,
118
+ description="uit_visd4sa source schema",
119
+ schema="source",
120
+ subset_id="uit_visd4sa",
121
+ ),
122
+ SEACrowdConfig(
123
+ name=f"{_DATASETNAME}_seacrowd_seq_label",
124
+ version=SEACROWD_VERSION,
125
+ description="uit_visd4sa SEACrowd schema",
126
+ schema="seacrowd_seq_label",
127
+ subset_id="uit_visd4sa",
128
+ ),
129
+ ]
130
+
131
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
132
+
133
+ def _info(self) -> datasets.DatasetInfo:
134
+ if self.config.schema == "source":
135
+ features = datasets.Features(
136
+ {
137
+ "text": datasets.Value("string"),
138
+ "label": datasets.Sequence({"start": datasets.Value("int32"), "end": datasets.Value("int32"), "aspect": datasets.Value("string"), "rating": datasets.Value("string")}),
139
+ }
140
+ )
141
+
142
+ elif self.config.schema == "seacrowd_seq_label":
143
+ features = schemas.seq_label_features(construct_label_classes())
144
+
145
+ return datasets.DatasetInfo(
146
+ description=_DESCRIPTION,
147
+ features=features,
148
+ homepage=_HOMEPAGE,
149
+ license=_LICENSE,
150
+ citation=_CITATION,
151
+ )
152
+
153
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
154
+ """Returns SplitGenerators."""
155
+ path_dict = dl_manager.download_and_extract(_URLS)
156
+ train_path, dev_path, test_path = path_dict["train"], path_dict["dev"], path_dict["test"]
157
+
158
+ return [
159
+ datasets.SplitGenerator(
160
+ name=datasets.Split.TRAIN,
161
+ gen_kwargs={
162
+ "filepath": train_path,
163
+ },
164
+ ),
165
+ datasets.SplitGenerator(
166
+ name=datasets.Split.TEST,
167
+ gen_kwargs={
168
+ "filepath": test_path,
169
+ },
170
+ ),
171
+ datasets.SplitGenerator(
172
+ name=datasets.Split.VALIDATION,
173
+ gen_kwargs={
174
+ "filepath": dev_path,
175
+ },
176
+ ),
177
+ ]
178
+
179
+ def _generate_examples(self, filepath: Path) -> Tuple[int, Dict]:
180
+ """Yields examples as (key, example) tuples."""
181
+ with open(filepath, "r") as f:
182
+ df = [json.loads(line) for line in f.readlines()]
183
+ f.close()
184
+ if self.config.schema == "source":
185
+ for _id, row in enumerate(df):
186
+ labels = row["labels"]
187
+ entry_labels = []
188
+ for lb in labels:
189
+ entry_labels.append({"start": lb[0], "end": lb[1], "aspect": lb[-1].split("#")[0], "rating": lb[-1].split("#")[-1]})
190
+ entry = {
191
+ "text": row["text"],
192
+ "label": entry_labels,
193
+ }
194
+ yield _id, entry
195
+ elif self.config.schema == "seacrowd_seq_label":
196
+ for _id, row in enumerate(df):
197
+ entry = {
198
+ "id": str(_id),
199
+ "tokens": row["text"].split(" "),
200
+ "labels": construct_IOB_sequences(row["text"], row["labels"]),
201
+ }
202
+ yield _id, entry