evaluation-suite-ci / evaluation-suite-ci.py
mathemakitten's picture
test empty
79b3508
raw
history blame
1.24 kB
import evaluate
from evaluate.evaluation_suite import SubTask
class Suite(evaluate.EvaluationSuite):
def __init__(self, name):
super().__init__(name)
self.preprocessor = lambda x: {"text": x["text"].lower()}
self.suite = []
# self.suite = [
# SubTask(
# task_type="text-classification",
# data="imdb",
# split="test[:2]",
# data_preprocessor=self.preprocessor,
# args_for_task={
# "metric": "accuracy",
# "input_column": "text",
# "label_column": "label",
# "label_mapping": {"NEGATIVE": 0.0, "POSITIVE": 1.0}
# }
# ),
# SubTask(
# task_type="text-classification",
# data="imdb",
# split="test[:2]",
# data_preprocessor=None,
# args_for_task={
# "metric": "accuracy",
# "input_column": "text",
# "label_column": "label",
# "label_mapping": {"NEGATIVE": 0.0, "POSITIVE": 1.0}
# }
# )
# ]