Spaces:
Running
Running
Commit
·
6c10fa6
1
Parent(s):
8b771ed
[TEMP] Offline
Browse files- app.py +1 -0
- src/leaderboard/read_evals.py +5 -1
- src/populate.py +1 -1
- src/submission/submit.py +28 -21
app.py
CHANGED
@@ -75,6 +75,7 @@ open_ended_leaderboard_df = open_ended_original_df.copy()
|
|
75 |
_, med_safety_original_df = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, MED_SAFETY_COLS, MED_SAFETY_BENCHMARK_COLS, "score", "med_safety")
|
76 |
med_safety_leaderboard_df = med_safety_original_df.copy()
|
77 |
|
|
|
78 |
# breakpoint()
|
79 |
# # Token based results
|
80 |
# _, token_based_datasets_original_df = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, DATASET_COLS, DATASET_BENCHMARK_COLS, "TokenBasedWithMacroAverage", "datasets")
|
|
|
75 |
_, med_safety_original_df = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, MED_SAFETY_COLS, MED_SAFETY_BENCHMARK_COLS, "score", "med_safety")
|
76 |
med_safety_leaderboard_df = med_safety_original_df.copy()
|
77 |
|
78 |
+
# breakpoint()
|
79 |
# breakpoint()
|
80 |
# # Token based results
|
81 |
# _, token_based_datasets_original_df = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, DATASET_COLS, DATASET_BENCHMARK_COLS, "TokenBasedWithMacroAverage", "datasets")
|
src/leaderboard/read_evals.py
CHANGED
@@ -45,7 +45,11 @@ class EvalResult:
|
|
45 |
def init_from_json_file(self, json_filepath, evaluation_metric):
|
46 |
"""Inits the result from the specific model result file"""
|
47 |
with open(json_filepath) as fp:
|
48 |
-
|
|
|
|
|
|
|
|
|
49 |
|
50 |
config = data.get("config")
|
51 |
|
|
|
45 |
def init_from_json_file(self, json_filepath, evaluation_metric):
|
46 |
"""Inits the result from the specific model result file"""
|
47 |
with open(json_filepath) as fp:
|
48 |
+
try:
|
49 |
+
data = json.load(fp)
|
50 |
+
except:
|
51 |
+
breakpoint()
|
52 |
+
|
53 |
|
54 |
config = data.get("config")
|
55 |
|
src/populate.py
CHANGED
@@ -68,7 +68,7 @@ def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
|
|
68 |
for run in all_evals:
|
69 |
# changes to be made here
|
70 |
status_list = [run["status"]["closed-ended"], run["status"]["open-ended"], run["status"]["med-safety"], run["status"]["cross-examination"]]
|
71 |
-
status_list = status_list[:
|
72 |
if "RUNNING" in status_list:
|
73 |
running_list.append(run)
|
74 |
elif "PENDING" in status_list or "RERUN" in status_list:
|
|
|
68 |
for run in all_evals:
|
69 |
# changes to be made here
|
70 |
status_list = [run["status"]["closed-ended"], run["status"]["open-ended"], run["status"]["med-safety"], run["status"]["cross-examination"]]
|
71 |
+
status_list = status_list[:3]
|
72 |
if "RUNNING" in status_list:
|
73 |
running_list.append(run)
|
74 |
elif "PENDING" in status_list or "RERUN" in status_list:
|
src/submission/submit.py
CHANGED
@@ -63,12 +63,17 @@ def add_new_eval(
|
|
63 |
global USERS_TO_SUBMISSION_DATES
|
64 |
if not REQUESTED_MODELS:
|
65 |
REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
|
|
|
|
|
|
|
|
|
|
72 |
|
73 |
# precision = precision.split(" ")[0]
|
74 |
current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
|
@@ -96,22 +101,22 @@ def add_new_eval(
|
|
96 |
|
97 |
# Is the model info correctly filled?
|
98 |
try:
|
99 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
100 |
except Exception:
|
101 |
return styled_error("Could not get your model information. Please fill it up properly.")
|
102 |
|
103 |
-
|
104 |
-
|
105 |
-
# Were the model card and license filled?
|
106 |
-
try:
|
107 |
-
license = model_info.cardData["license"]
|
108 |
-
except Exception:
|
109 |
-
return styled_error("Please select a license for your model")
|
110 |
-
|
111 |
-
modelcard_OK, error_msg = check_model_card(model)
|
112 |
-
if not modelcard_OK:
|
113 |
-
return styled_error(error_msg)
|
114 |
-
|
115 |
# Verify the inference config now
|
116 |
# try:
|
117 |
# label_normalization_map = ast.literal_eval(label_normalization_map)
|
@@ -143,10 +148,10 @@ def add_new_eval(
|
|
143 |
},
|
144 |
"submitted_time": current_time,
|
145 |
"model_type": model_type,
|
146 |
-
"likes":
|
147 |
"num_params": model_size,
|
148 |
"license": license,
|
149 |
-
"private":
|
150 |
"slurm_id": None
|
151 |
}
|
152 |
|
@@ -158,6 +163,8 @@ def add_new_eval(
|
|
158 |
print("Creating eval file")
|
159 |
OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
|
160 |
os.makedirs(OUT_DIR, exist_ok=True)
|
|
|
|
|
161 |
out_path = f"{OUT_DIR}/{model_path}_{revision}_{precision}_{weight_type}_eval_request.json"
|
162 |
|
163 |
with open(out_path, "w") as f:
|
|
|
63 |
global USERS_TO_SUBMISSION_DATES
|
64 |
if not REQUESTED_MODELS:
|
65 |
REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
|
66 |
+
if model.startswith("/"):
|
67 |
+
user_name = ""
|
68 |
+
model_path = model
|
69 |
+
private = True
|
70 |
+
else:
|
71 |
+
user_name = ""
|
72 |
+
model_path = model
|
73 |
+
if "/" in model:
|
74 |
+
user_name = model.split("/")[0]
|
75 |
+
model_path = model.split("/")[1]
|
76 |
+
private = False
|
77 |
|
78 |
# precision = precision.split(" ")[0]
|
79 |
current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
|
|
|
101 |
|
102 |
# Is the model info correctly filled?
|
103 |
try:
|
104 |
+
if model.startswith("/"):
|
105 |
+
model_info = API.model_info(repo_id=model, revision=revision)
|
106 |
+
model_size = get_model_size(model_info=model_info)
|
107 |
+
license = model_info.cardData["license"]
|
108 |
+
modelcard_OK, error_msg = check_model_card(model)
|
109 |
+
if not modelcard_OK:
|
110 |
+
return styled_error(error_msg)
|
111 |
+
likes = model_info.likes
|
112 |
+
else:
|
113 |
+
model_size = None
|
114 |
+
license = None
|
115 |
+
likes = 0
|
116 |
except Exception:
|
117 |
return styled_error("Could not get your model information. Please fill it up properly.")
|
118 |
|
119 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
120 |
# Verify the inference config now
|
121 |
# try:
|
122 |
# label_normalization_map = ast.literal_eval(label_normalization_map)
|
|
|
148 |
},
|
149 |
"submitted_time": current_time,
|
150 |
"model_type": model_type,
|
151 |
+
"likes": likes,
|
152 |
"num_params": model_size,
|
153 |
"license": license,
|
154 |
+
"private": private,
|
155 |
"slurm_id": None
|
156 |
}
|
157 |
|
|
|
163 |
print("Creating eval file")
|
164 |
OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
|
165 |
os.makedirs(OUT_DIR, exist_ok=True)
|
166 |
+
if model_path.startswith("/"):
|
167 |
+
os.makedirs(f"{OUT_DIR}/{model_path}", exist_ok=True)
|
168 |
out_path = f"{OUT_DIR}/{model_path}_{revision}_{precision}_{weight_type}_eval_request.json"
|
169 |
|
170 |
with open(out_path, "w") as f:
|