File size: 2,446 Bytes
99614d2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
import json
from typing import Union, Dict
import logging
import os
import datasets
import numpy as np
from tqdm import tqdm
from evaluation.embeddings_generator import EmbeddingsGenerator
from evaluation.encoders import Model
from evaluation.eval_datasets import SimpleDataset
from evaluation.evaluator import IREvaluator
logger = logging.getLogger(__name__)
class MDCREvaluator(IREvaluator):
def __init__(self, name: str, meta_dataset: Union[str, tuple], test_dataset: Union[str, tuple], model: Model,
metrics: tuple = None, batch_size: int = 16, fields: list = None, key="paper_id"):
super(MDCREvaluator, self).__init__(name, meta_dataset, test_dataset, model, metrics, SimpleDataset,
batch_size, fields, key)
def get_qc_pairs(self, dataset):
qrpairs = dict()
for fos_dict in dataset:
for fos in fos_dict:
for query in fos_dict[fos]:
qrpairs[query] = dict()
for model in fos_dict[fos][query]:
cands = fos_dict[fos][query][model]
qrpairs[query].update({v: 1 if model == "true" else 0 for v in cands})
return qrpairs
def evaluate(self, embeddings, **kwargs):
logger.info(f"Loading test dataset from {self.test_dataset}")
split_dataset = datasets.load_dataset("json",
data_files={"test": self.test_dataset})
logger.info(f"Loaded {len(split_dataset['test'])} test query-candidate pairs")
if type(embeddings) == str and os.path.isfile(embeddings):
embeddings = EmbeddingsGenerator.load_embeddings_from_jsonl(embeddings)
qrels_hard = self.get_qc_pairs(split_dataset["test"])
preds = self.retrieval(embeddings, qrels_hard)
results = dict()
for q, cscores in tqdm(preds.items()):
for c in cscores:
results[f"{q}_{c}"] = cscores[c]
json.dump(results, open("scirepeval_mdcr.json", "w"))
return dict()
import sys
if __name__ == "__main__":
mname = sys.argv[1]
model = Model(variant="default", base_checkpoint=mname)
evaluator = MDCREvaluator("mcdr", "../mdcr/mdcr_test_data.jsonl", "../mdcr/mdcr_test.json", model, batch_size=32)
embeddings = evaluator.generate_embeddings(save_path="mdcr_embeddings.json")
evaluator.evaluate(embeddings)
|