Spaces:
Runtime error
Runtime error
File size: 5,921 Bytes
234bc89 b558422 47531dc b558422 234bc89 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 |
import json
import os
import pandas as pd
from src.display.formatting import has_no_nan_values, make_clickable_model
from src.display.utils import AutoEvalColumn, EvalQueueColumn
from src.leaderboard.read_evals import get_raw_eval_results
import yaml
from sklearn.metrics import cohen_kappa_score
import numpy as np
TYPES = ["str", "number", "number", "number", "number", "number"]
def read_json(file_path: str) -> list[dict]:
"""
Read a JSON/JSONL file and return its contents as a list of dictionaries.
Parameters:
file_path (str): The path to the JSON file.
Returns:
list[dict]: The contents of the JSON file as a list of dictionaries.
"""
try:
with open(file_path) as f:
data = [json.loads(x) for x in f]
return data
except json.decoder.JSONDecodeError:
with open(file_path) as f:
data = json.load(f)
return data
def pairwise_compare(
evaluator1_dir: str,
evaluator2_dir: str,
) -> tuple[float, float]:
"""
Compare pairwise evaluators.
Args:
evaluator1_dir: The directory containing the responses from the first evaluator.
evaluator2_dir: The directory containing the responses from the second evaluator.
Returns:
None
"""
evaluator1_responses = read_json(evaluator1_dir)
evaluator2_responses = read_json(evaluator2_dir)
assert len(evaluator1_responses) == len(evaluator2_responses)
evaluator1_winners = np.array(
[response["winner"] for response in evaluator1_responses]
)
evaluator2_winners = np.array(
[response["winner"] for response in evaluator2_responses]
)
acc = (evaluator1_winners == evaluator2_winners).mean().item()
agreement = cohen_kappa_score(evaluator1_winners, evaluator2_winners)
return acc, agreement
def pairwise_meta_eval(
human_dir: str,
model_dir: str,
model_dir_swap: str
) -> dict[float]:
"""
Evaluate a pairwise evaluator.
Args:
human_dir: The directory containing the human responses.
model_dir: The directory containing the model responses.
model_dir_swap: The directory containing the model responses with swapped inputs.
Returns:
dict[float]: The accuracy and agreement.
"""
acc, agr = pairwise_compare(human_dir, model_dir)
swap_acc, swap_agr = pairwise_compare(
human_dir, model_dir_swap,
)
acc = (acc + swap_acc) / 2
agr = (agr + swap_agr) / 2
models_acc, models_agr = pairwise_compare(
model_dir, model_dir_swap,
)
return acc, agr, models_acc, models_agr
def load_leaderboard() -> pd.DataFrame:
"""Loads the leaderboard from the file system"""
with open("./data/models.yaml") as fp:
models = yaml.safe_load(fp)
predictions = {k: [] for k in ["Model", "Accuracy", "Agreement", "Self-Accuracy", "Self-Agreement"]}
for model in models:
fdir = model["fdir"]
acc, agr, models_acc, models_agr = pairwise_meta_eval(
f"./data/instrusum.json",
f"./predictions/{fdir}.jsonl",
f"./predictions/{fdir}_swap.jsonl"
)
predictions["Model"].append(model["name"])
predictions["Accuracy"].append(acc)
predictions["Agreement"].append(agr)
predictions["Self-Accuracy"].append(models_acc)
predictions["Self-Agreement"].append(models_agr)
return pd.DataFrame(predictions).sort_values(by="Agreement", ascending=False).round(decimals=3)
def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
"""Creates a dataframe from all the individual experiment results"""
raw_data = get_raw_eval_results(results_path, requests_path)
all_data_json = [v.to_dict() for v in raw_data]
df = pd.DataFrame.from_records(all_data_json)
df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
df = df[cols].round(decimals=2)
# filter out if any of the benchmarks have not been produced
df = df[has_no_nan_values(df, benchmark_cols)]
return raw_data, df
def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
"""Creates the different dataframes for the evaluation queues requestes"""
entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")]
all_evals = []
for entry in entries:
if ".json" in entry:
file_path = os.path.join(save_path, entry)
with open(file_path) as fp:
data = json.load(fp)
data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
data[EvalQueueColumn.revision.name] = data.get("revision", "main")
all_evals.append(data)
elif ".md" not in entry:
# this is a folder
sub_entries = [e for e in os.listdir(f"{save_path}/{entry}") if not e.startswith(".")]
for sub_entry in sub_entries:
file_path = os.path.join(save_path, entry, sub_entry)
with open(file_path) as fp:
data = json.load(fp)
data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
data[EvalQueueColumn.revision.name] = data.get("revision", "main")
all_evals.append(data)
pending_list = [e for e in all_evals if e["status"] in ["PENDING", "RERUN"]]
running_list = [e for e in all_evals if e["status"] == "RUNNING"]
finished_list = [e for e in all_evals if e["status"].startswith("FINISHED") or e["status"] == "PENDING_NEW_EVAL"]
df_pending = pd.DataFrame.from_records(pending_list, columns=cols)
df_running = pd.DataFrame.from_records(running_list, columns=cols)
df_finished = pd.DataFrame.from_records(finished_list, columns=cols)
return df_finished[cols], df_running[cols], df_pending[cols]
|