evaluation-suite-ci / evaluation-suite-ci.py
mathemakitten's picture
more data?
cc0f4a7
raw
history blame
1.33 kB
import evaluate
from evaluate.evaluation_suite import SubTask
class Suite(evaluate.EvaluationSuite):
def __init__(self, name):
super().__init__(name)
self.preprocessor = lambda x: {"text": x["text"].lower()}
self.suite = [
SubTask(
task_type="text-classification",
data="imdb",
split="test[:2]",
data_preprocessor=self.preprocessor,
args_for_task={
"metric": "accuracy",
"input_column": "text",
"label_column": "label",
"label_mapping": {
"LABEL_0": 0.0,
"LABEL_1": 1.0
}
}
),
# SubTask(
# task_type="text-classification",
# data="imdb",
# split="test[:2]",
# data_preprocessor=None,
# args_for_task={
# "metric": "accuracy",
# "input_column": "text",
# "label_column": "label",
# "label_mapping": {
# "LABEL_0": 0.0,
# "LABEL_1": 1.0
# }
# }
# )
]