Update src/populate.py
Browse files- src/populate.py +29 -29
src/populate.py
CHANGED
@@ -1,68 +1,68 @@
|
|
1 |
# src/populate.py
|
2 |
-
|
3 |
import json
|
4 |
-
import os
|
5 |
-
|
6 |
import pandas as pd
|
7 |
|
8 |
-
# ์๋ ์๋ local `make_clickable_model` ์ ๊ฑฐํ๊ณ ,
|
9 |
# ์ธ๋ถ์์ ์ ์๋ ํจ์๋ฅผ import ํด์ต๋๋ค.
|
10 |
from src.display.formatting import has_no_nan_values, make_clickable_model
|
11 |
from src.display.utils import AutoEvalColumn, EvalQueueColumn
|
12 |
-
from src.leaderboard.read_evals import get_raw_eval_results
|
13 |
-
|
14 |
-
|
15 |
-
def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
|
16 |
-
raw_data = get_raw_eval_results(results_path, requests_path)
|
17 |
-
all_data_json = [v.to_dict() for v in raw_data]
|
18 |
-
|
19 |
-
df = pd.DataFrame.from_records(all_data_json)
|
20 |
-
df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
|
21 |
-
df = df[cols].round(decimals=2)
|
22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
# "model" ์ปฌ๋ผ์ make_clickable_model ์ ์ฉ
|
|
|
24 |
if "model" in df.columns:
|
|
|
|
|
|
|
25 |
df["model"] = df["model"].apply(make_clickable_model)
|
26 |
-
|
27 |
# ๋ชจ๋ ๋ฒค์น๋งํฌ๊ฐ ์์ฐ๋์ง ์์ ํ์ ํํฐ๋ง
|
28 |
df = df[has_no_nan_values(df, benchmark_cols)]
|
29 |
return df
|
30 |
|
31 |
-
|
32 |
def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
|
33 |
"""ํ๊ฐ ๋๊ธฐ์ด์ ๋ํ ๊ฐ DataFrame์ ์์ฑํฉ๋๋ค."""
|
34 |
entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")]
|
35 |
all_evals = []
|
36 |
-
|
37 |
for entry in entries:
|
38 |
if ".json" in entry:
|
39 |
file_path = os.path.join(save_path, entry)
|
40 |
with open(file_path) as fp:
|
41 |
data = json.load(fp)
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
elif ".md" not in entry:
|
48 |
# ํด๋์ธ ๊ฒฝ์ฐ: ํ์ผ ์ฌ๋ถ๋ฅผ ํ์ธํ ๋ ์ ์ฒด ๊ฒฝ๋ก๋ฅผ ์ฌ์ฉ
|
49 |
sub_entries = [
|
50 |
-
e for e in os.listdir(os.path.join(save_path, entry))
|
51 |
if os.path.isfile(os.path.join(save_path, entry, e)) and not e.startswith(".")
|
52 |
]
|
53 |
for sub_entry in sub_entries:
|
54 |
file_path = os.path.join(save_path, entry, sub_entry)
|
55 |
with open(file_path) as fp:
|
56 |
data = json.load(fp)
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
pending_list = [e for e in all_evals if e["status"] in ["PENDING", "RERUN"]]
|
63 |
running_list = [e for e in all_evals if e["status"] == "RUNNING"]
|
64 |
finished_list = [e for e in all_evals if e["status"].startswith("FINISHED") or e["status"] == "PENDING_NEW_EVAL"]
|
|
|
65 |
df_pending = pd.DataFrame.from_records(pending_list, columns=cols)
|
66 |
df_running = pd.DataFrame.from_records(running_list, columns=cols)
|
67 |
df_finished = pd.DataFrame.from_records(finished_list, columns=cols)
|
68 |
-
|
|
|
|
1 |
# src/populate.py
|
|
|
2 |
import json
|
3 |
+
import os
|
|
|
4 |
import pandas as pd
|
5 |
|
|
|
6 |
# ์ธ๋ถ์์ ์ ์๋ ํจ์๋ฅผ import ํด์ต๋๋ค.
|
7 |
from src.display.formatting import has_no_nan_values, make_clickable_model
|
8 |
from src.display.utils import AutoEvalColumn, EvalQueueColumn
|
9 |
+
from src.leaderboard.read_evals import get_raw_eval_results
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
+
def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
|
12 |
+
raw_data = get_raw_eval_results(results_path, requests_path)
|
13 |
+
all_data_json = [v.to_dict() for v in raw_data]
|
14 |
+
df = pd.DataFrame.from_records(all_data_json)
|
15 |
+
df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
|
16 |
+
df = df[cols].round(decimals=2)
|
17 |
+
|
18 |
# "model" ์ปฌ๋ผ์ make_clickable_model ์ ์ฉ
|
19 |
+
# ๋ฐ๋์ ์๋ณธ ๋ชจ๋ธ๋ช
์ด ๋ณด์กด๋๋๋ก ํฉ๋๋ค
|
20 |
if "model" in df.columns:
|
21 |
+
# ์๋ณธ ๋ชจ๋ธ๋ช
์์ ์ ์ฅ
|
22 |
+
df["original_model_name"] = df["model"].copy()
|
23 |
+
# ํ์ดํผ๋งํฌ ์ ์ฉ
|
24 |
df["model"] = df["model"].apply(make_clickable_model)
|
25 |
+
|
26 |
# ๋ชจ๋ ๋ฒค์น๋งํฌ๊ฐ ์์ฐ๋์ง ์์ ํ์ ํํฐ๋ง
|
27 |
df = df[has_no_nan_values(df, benchmark_cols)]
|
28 |
return df
|
29 |
|
|
|
30 |
def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
|
31 |
"""ํ๊ฐ ๋๊ธฐ์ด์ ๋ํ ๊ฐ DataFrame์ ์์ฑํฉ๋๋ค."""
|
32 |
entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")]
|
33 |
all_evals = []
|
34 |
+
|
35 |
for entry in entries:
|
36 |
if ".json" in entry:
|
37 |
file_path = os.path.join(save_path, entry)
|
38 |
with open(file_path) as fp:
|
39 |
data = json.load(fp)
|
40 |
+
# ์๋ณธ ๋ชจ๋ธ๋ช
์ ์ฅ
|
41 |
+
original_model = data.get("model", "")
|
42 |
+
data[EvalQueueColumn.model.name] = make_clickable_model(original_model)
|
43 |
+
data[EvalQueueColumn.revision.name] = data.get("revision", "main")
|
44 |
+
all_evals.append(data)
|
45 |
elif ".md" not in entry:
|
46 |
# ํด๋์ธ ๊ฒฝ์ฐ: ํ์ผ ์ฌ๋ถ๋ฅผ ํ์ธํ ๋ ์ ์ฒด ๊ฒฝ๋ก๋ฅผ ์ฌ์ฉ
|
47 |
sub_entries = [
|
48 |
+
e for e in os.listdir(os.path.join(save_path, entry))
|
49 |
if os.path.isfile(os.path.join(save_path, entry, e)) and not e.startswith(".")
|
50 |
]
|
51 |
for sub_entry in sub_entries:
|
52 |
file_path = os.path.join(save_path, entry, sub_entry)
|
53 |
with open(file_path) as fp:
|
54 |
data = json.load(fp)
|
55 |
+
original_model = data.get("model", "")
|
56 |
+
data[EvalQueueColumn.model.name] = make_clickable_model(original_model)
|
57 |
+
data[EvalQueueColumn.revision.name] = data.get("revision", "main")
|
58 |
+
all_evals.append(data)
|
59 |
+
|
60 |
pending_list = [e for e in all_evals if e["status"] in ["PENDING", "RERUN"]]
|
61 |
running_list = [e for e in all_evals if e["status"] == "RUNNING"]
|
62 |
finished_list = [e for e in all_evals if e["status"].startswith("FINISHED") or e["status"] == "PENDING_NEW_EVAL"]
|
63 |
+
|
64 |
df_pending = pd.DataFrame.from_records(pending_list, columns=cols)
|
65 |
df_running = pd.DataFrame.from_records(running_list, columns=cols)
|
66 |
df_finished = pd.DataFrame.from_records(finished_list, columns=cols)
|
67 |
+
|
68 |
+
return df_finished[cols], df_running[cols], df_pending[cols]
|