“WadoodAbdul”
commited on
Commit
·
3aa629d
1
Parent(s):
d330702
added option to not display result
Browse files
src/leaderboard/read_evals.py
CHANGED
@@ -33,6 +33,7 @@ class EvalResult:
|
|
33 |
num_params: int = 0
|
34 |
date: str = "" # submission date of request file
|
35 |
still_on_hub: bool = False
|
|
|
36 |
|
37 |
@classmethod
|
38 |
def init_from_json_file(self, json_filepath):
|
@@ -49,6 +50,8 @@ class EvalResult:
|
|
49 |
# print(model_architecture, model_type)
|
50 |
license = config.get("license", "?")
|
51 |
num_params = config.get("num_params", "?")
|
|
|
|
|
52 |
|
53 |
# Get model and org
|
54 |
org_and_model = config.get("model_name", config.get("model_args", None))
|
@@ -112,7 +115,8 @@ class EvalResult:
|
|
112 |
backbone=backbone,
|
113 |
model_type=model_type,
|
114 |
num_params=num_params,
|
115 |
-
license=license
|
|
|
116 |
)
|
117 |
|
118 |
def update_with_request_file(self, requests_path):
|
@@ -154,6 +158,7 @@ class EvalResult:
|
|
154 |
AutoEvalColumn.likes.name: self.likes,
|
155 |
AutoEvalColumn.params.name: self.num_params,
|
156 |
AutoEvalColumn.still_on_hub.name: self.still_on_hub,
|
|
|
157 |
}
|
158 |
|
159 |
for task in Tasks:
|
@@ -178,6 +183,7 @@ class EvalResult:
|
|
178 |
AutoEvalColumn.likes.name: self.likes,
|
179 |
AutoEvalColumn.params.name: self.num_params,
|
180 |
AutoEvalColumn.still_on_hub.name: self.still_on_hub,
|
|
|
181 |
}
|
182 |
|
183 |
for clinical_type in ClinicalTypes:
|
@@ -242,6 +248,8 @@ def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResu
|
|
242 |
for v in eval_results.values():
|
243 |
try:
|
244 |
v.to_dict(subset="dataset") # we test if the dict version is complete
|
|
|
|
|
245 |
results.append(v)
|
246 |
except KeyError: # not all eval values present
|
247 |
continue
|
|
|
33 |
num_params: int = 0
|
34 |
date: str = "" # submission date of request file
|
35 |
still_on_hub: bool = False
|
36 |
+
display_result:bool = True
|
37 |
|
38 |
@classmethod
|
39 |
def init_from_json_file(self, json_filepath):
|
|
|
50 |
# print(model_architecture, model_type)
|
51 |
license = config.get("license", "?")
|
52 |
num_params = config.get("num_params", "?")
|
53 |
+
display_result = config.get("display_result", True)
|
54 |
+
display_result = False if display_result=="False" else True
|
55 |
|
56 |
# Get model and org
|
57 |
org_and_model = config.get("model_name", config.get("model_args", None))
|
|
|
115 |
backbone=backbone,
|
116 |
model_type=model_type,
|
117 |
num_params=num_params,
|
118 |
+
license=license,
|
119 |
+
display_result=display_result
|
120 |
)
|
121 |
|
122 |
def update_with_request_file(self, requests_path):
|
|
|
158 |
AutoEvalColumn.likes.name: self.likes,
|
159 |
AutoEvalColumn.params.name: self.num_params,
|
160 |
AutoEvalColumn.still_on_hub.name: self.still_on_hub,
|
161 |
+
"display_result" : self.display_result,
|
162 |
}
|
163 |
|
164 |
for task in Tasks:
|
|
|
183 |
AutoEvalColumn.likes.name: self.likes,
|
184 |
AutoEvalColumn.params.name: self.num_params,
|
185 |
AutoEvalColumn.still_on_hub.name: self.still_on_hub,
|
186 |
+
"display_result" : self.display_result,
|
187 |
}
|
188 |
|
189 |
for clinical_type in ClinicalTypes:
|
|
|
248 |
for v in eval_results.values():
|
249 |
try:
|
250 |
v.to_dict(subset="dataset") # we test if the dict version is complete
|
251 |
+
if not v.display_result:
|
252 |
+
continue
|
253 |
results.append(v)
|
254 |
except KeyError: # not all eval values present
|
255 |
continue
|