xeon27
commited on
Commit
·
26ed691
1
Parent(s):
7dd6db3
Keep empty results
Browse files
src/leaderboard/read_evals.py
CHANGED
@@ -73,10 +73,14 @@ class EvalResult:
|
|
73 |
|
74 |
# We average all scores of a given metric (not all metrics are present in all files)
|
75 |
accs = np.array([v.get(task.metric, None) for k, v in data["results"].items() if task.benchmark == k])
|
76 |
-
if accs.size == 0 or any([acc is None for acc in accs]):
|
|
|
|
|
77 |
continue
|
78 |
-
|
79 |
-
|
|
|
|
|
80 |
results[task.benchmark] = mean_acc
|
81 |
|
82 |
return self(
|
|
|
73 |
|
74 |
# We average all scores of a given metric (not all metrics are present in all files)
|
75 |
accs = np.array([v.get(task.metric, None) for k, v in data["results"].items() if task.benchmark == k])
|
76 |
+
# if accs.size == 0 or any([acc is None for acc in accs]):
|
77 |
+
# continue
|
78 |
+
if accs.size == 0:
|
79 |
continue
|
80 |
+
elif any([acc is None for acc in accs]):
|
81 |
+
mean_acc = None
|
82 |
+
else:
|
83 |
+
mean_acc = np.mean(accs) * 100.0
|
84 |
results[task.benchmark] = mean_acc
|
85 |
|
86 |
return self(
|