Upload inset_lexicon.py with huggingface_hub
Browse files- inset_lexicon.py +12 -12
inset_lexicon.py
CHANGED
@@ -4,9 +4,9 @@ from typing import Dict, List, Tuple
|
|
4 |
import datasets
|
5 |
import pandas as pd
|
6 |
|
7 |
-
from
|
8 |
-
from
|
9 |
-
from
|
10 |
|
11 |
_CITATION = """\
|
12 |
@inproceedings{inproceedings,
|
@@ -36,28 +36,28 @@ _URLS = {_DATASETNAME: "https://github.com/fajri91/InSet/archive/refs/heads/mast
|
|
36 |
|
37 |
_SUPPORTED_TASKS = [Tasks.SENTIMENT_ANALYSIS]
|
38 |
_SOURCE_VERSION = "1.0.0"
|
39 |
-
|
40 |
|
41 |
|
42 |
class InsetLexicon(datasets.GeneratorBasedBuilder):
|
43 |
"""InSet, an Indonesian sentiment lexicon built to identify written opinion and categorize it into positive or negative opinion"""
|
44 |
|
45 |
SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
|
46 |
-
|
47 |
|
48 |
BUILDER_CONFIGS = [
|
49 |
-
|
50 |
name="inset_lexicon_source",
|
51 |
version=SOURCE_VERSION,
|
52 |
description="Inset Lexicon source schema",
|
53 |
schema="source",
|
54 |
subset_id="inset_lexicon",
|
55 |
),
|
56 |
-
|
57 |
-
name="
|
58 |
-
version=
|
59 |
description="Inset Lexicon Nusantara schema",
|
60 |
-
schema="
|
61 |
subset_id="inset_lexicon",
|
62 |
),
|
63 |
]
|
@@ -67,7 +67,7 @@ class InsetLexicon(datasets.GeneratorBasedBuilder):
|
|
67 |
def _info(self) -> datasets.DatasetInfo:
|
68 |
if self.config.schema == "source":
|
69 |
features = datasets.Features({"word": datasets.Value("string"), "weight": datasets.Value("string")})
|
70 |
-
elif self.config.schema == "
|
71 |
labels = list(range(-5, 6, 1))
|
72 |
labels = [str(label) for label in labels]
|
73 |
features = schemas.text_features(labels)
|
@@ -117,7 +117,7 @@ class InsetLexicon(datasets.GeneratorBasedBuilder):
|
|
117 |
}
|
118 |
yield row.id, ex
|
119 |
|
120 |
-
elif self.config.schema == "
|
121 |
for row in df.itertuples():
|
122 |
ex = {
|
123 |
"id": str(row.id),
|
|
|
4 |
import datasets
|
5 |
import pandas as pd
|
6 |
|
7 |
+
from seacrowd.utils import schemas
|
8 |
+
from seacrowd.utils.configs import SEACrowdConfig
|
9 |
+
from seacrowd.utils.constants import Tasks
|
10 |
|
11 |
_CITATION = """\
|
12 |
@inproceedings{inproceedings,
|
|
|
36 |
|
37 |
_SUPPORTED_TASKS = [Tasks.SENTIMENT_ANALYSIS]
|
38 |
_SOURCE_VERSION = "1.0.0"
|
39 |
+
_SEACROWD_VERSION = "2024.06.20"
|
40 |
|
41 |
|
42 |
class InsetLexicon(datasets.GeneratorBasedBuilder):
|
43 |
"""InSet, an Indonesian sentiment lexicon built to identify written opinion and categorize it into positive or negative opinion"""
|
44 |
|
45 |
SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
|
46 |
+
SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
|
47 |
|
48 |
BUILDER_CONFIGS = [
|
49 |
+
SEACrowdConfig(
|
50 |
name="inset_lexicon_source",
|
51 |
version=SOURCE_VERSION,
|
52 |
description="Inset Lexicon source schema",
|
53 |
schema="source",
|
54 |
subset_id="inset_lexicon",
|
55 |
),
|
56 |
+
SEACrowdConfig(
|
57 |
+
name="inset_lexicon_seacrowd_text",
|
58 |
+
version=SEACROWD_VERSION,
|
59 |
description="Inset Lexicon Nusantara schema",
|
60 |
+
schema="seacrowd_text",
|
61 |
subset_id="inset_lexicon",
|
62 |
),
|
63 |
]
|
|
|
67 |
def _info(self) -> datasets.DatasetInfo:
|
68 |
if self.config.schema == "source":
|
69 |
features = datasets.Features({"word": datasets.Value("string"), "weight": datasets.Value("string")})
|
70 |
+
elif self.config.schema == "seacrowd_text":
|
71 |
labels = list(range(-5, 6, 1))
|
72 |
labels = [str(label) for label in labels]
|
73 |
features = schemas.text_features(labels)
|
|
|
117 |
}
|
118 |
yield row.id, ex
|
119 |
|
120 |
+
elif self.config.schema == "seacrowd_text":
|
121 |
for row in df.itertuples():
|
122 |
ex = {
|
123 |
"id": str(row.id),
|