|
import pandas as pd |
|
|
|
from display.utils import AutoEvalColumn |
|
from leaderboard.read_evals import get_raw_eval_results, EvalResult |
|
|
|
|
|
def get_leaderboard_df( |
|
results_path: str, requests_path: str, cols: list, benchmark_cols: list |
|
) -> tuple[list[EvalResult], pd.DataFrame]: |
|
"""Creates a dataframe from all the individual experiment results""" |
|
raw_data = get_raw_eval_results(results_path, requests_path) |
|
all_data_json = [v.to_dict() for v in raw_data] |
|
|
|
df = pd.DataFrame.from_records(all_data_json) |
|
df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False) |
|
df = df[cols].round(decimals=2) |
|
|
|
return raw_data, df |
|
|