File size: 3,314 Bytes
09776c0 ad25c15 ec84dfc ad25c15 09776c0 ad25c15 ec84dfc ad25c15 ec84dfc 09776c0 ec84dfc 3d591e6 ec84dfc 3d591e6 ec84dfc 09776c0 ad25c15 3d591e6 ad25c15 ec84dfc ad25c15 ec84dfc ad25c15 3d591e6 09776c0 ec84dfc 09776c0 ad25c15 ec84dfc ad25c15 ec84dfc ad25c15 ec84dfc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 |
# src/populate.py
import json
import os
import pandas as pd
# ์ธ๋ถ์์ ์ ์๋ ํจ์๋ฅผ import ํด์ต๋๋ค.
from src.display.formatting import has_no_nan_values, make_clickable_model
from src.display.utils import AutoEvalColumn, EvalQueueColumn
from src.leaderboard.read_evals import get_raw_eval_results
def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
raw_data = get_raw_eval_results(results_path, requests_path)
all_data_json = [v.to_dict() for v in raw_data]
df = pd.DataFrame.from_records(all_data_json)
df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
df = df[cols].round(decimals=2)
# "model" ์ปฌ๋ผ์ make_clickable_model ์ ์ฉ
# ๋ฐ๋์ ์๋ณธ ๋ชจ๋ธ๋ช
์ด ๋ณด์กด๋๋๋ก ํฉ๋๋ค
if "model" in df.columns:
# ์๋ณธ ๋ชจ๋ธ๋ช
์์ ์ ์ฅ
df["original_model_name"] = df["model"].copy()
# ํ์ดํผ๋งํฌ ์ ์ฉ
df["model"] = df["model"].apply(make_clickable_model)
# ๋ชจ๋ ๋ฒค์น๋งํฌ๊ฐ ์์ฐ๋์ง ์์ ํ์ ํํฐ๋ง
df = df[has_no_nan_values(df, benchmark_cols)]
return df
def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
"""ํ๊ฐ ๋๊ธฐ์ด์ ๋ํ ๊ฐ DataFrame์ ์์ฑํฉ๋๋ค."""
entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")]
all_evals = []
for entry in entries:
if ".json" in entry:
file_path = os.path.join(save_path, entry)
with open(file_path) as fp:
data = json.load(fp)
# ์๋ณธ ๋ชจ๋ธ๋ช
์ ์ฅ
original_model = data.get("model", "")
data[EvalQueueColumn.model.name] = make_clickable_model(original_model)
data[EvalQueueColumn.revision.name] = data.get("revision", "main")
all_evals.append(data)
elif ".md" not in entry:
# ํด๋์ธ ๊ฒฝ์ฐ: ํ์ผ ์ฌ๋ถ๋ฅผ ํ์ธํ ๋ ์ ์ฒด ๊ฒฝ๋ก๋ฅผ ์ฌ์ฉ
sub_entries = [
e for e in os.listdir(os.path.join(save_path, entry))
if os.path.isfile(os.path.join(save_path, entry, e)) and not e.startswith(".")
]
for sub_entry in sub_entries:
file_path = os.path.join(save_path, entry, sub_entry)
with open(file_path) as fp:
data = json.load(fp)
original_model = data.get("model", "")
data[EvalQueueColumn.model.name] = make_clickable_model(original_model)
data[EvalQueueColumn.revision.name] = data.get("revision", "main")
all_evals.append(data)
pending_list = [e for e in all_evals if e["status"] in ["PENDING", "RERUN"]]
running_list = [e for e in all_evals if e["status"] == "RUNNING"]
finished_list = [e for e in all_evals if e["status"].startswith("FINISHED") or e["status"] == "PENDING_NEW_EVAL"]
df_pending = pd.DataFrame.from_records(pending_list, columns=cols)
df_running = pd.DataFrame.from_records(running_list, columns=cols)
df_finished = pd.DataFrame.from_records(finished_list, columns=cols)
return df_finished[cols], df_running[cols], df_pending[cols] |