xeon27
commited on
Commit
·
8555000
1
Parent(s):
26ed691
Add '-' for empty results
Browse files- src/populate.py +3 -7
src/populate.py
CHANGED
@@ -41,16 +41,12 @@ def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchm
|
|
41 |
# df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
|
42 |
df = df[cols].round(decimals=2)
|
43 |
|
44 |
-
#
|
45 |
-
|
46 |
-
# filter out if any of the benchmarks have not been produced
|
47 |
-
df = df[has_no_nan_values(df, benchmark_cols)]
|
48 |
-
# TMP: Debug
|
49 |
-
print(df.shape)
|
50 |
|
51 |
# make values clickable and link to log files
|
52 |
for col in benchmark_cols:
|
53 |
-
df[col] = df[[AutoEvalColumn.model.name, col]].apply(lambda x: f"[{x[col]}]({get_inspect_log_url(model_name=x[AutoEvalColumn.model.name].split('>')[1].split('<')[0], benchmark_name=TASK_NAME_INVERSE_MAP[col]['name'])})", axis=1)
|
54 |
|
55 |
# # make task names clickable and link to inspect-evals repository - this creates issues later
|
56 |
# df = df.rename(columns={col: f"[{col}]({TASK_NAME_INVERSE_MAP[col]['source']})" for col in benchmark_cols})
|
|
|
41 |
# df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
|
42 |
df = df[cols].round(decimals=2)
|
43 |
|
44 |
+
# # filter out if any of the benchmarks have not been produced
|
45 |
+
# df = df[has_no_nan_values(df, benchmark_cols)]
|
|
|
|
|
|
|
|
|
46 |
|
47 |
# make values clickable and link to log files
|
48 |
for col in benchmark_cols:
|
49 |
+
df[col] = df[[AutoEvalColumn.model.name, col]].apply(lambda x: f"[{x[col]}]({get_inspect_log_url(model_name=x[AutoEvalColumn.model.name].split('>')[1].split('<')[0], benchmark_name=TASK_NAME_INVERSE_MAP[col]['name'])})" if x[col] is not None else "-", axis=1)
|
50 |
|
51 |
# # make task names clickable and link to inspect-evals repository - this creates issues later
|
52 |
# df = df.rename(columns={col: f"[{col}]({TASK_NAME_INVERSE_MAP[col]['source']})" for col in benchmark_cols})
|