Clémentine commited on
Commit
f278160
·
1 Parent(s): 610240a

fix num question count

Browse files
Files changed (1) hide show
  1. app.py +4 -0
app.py CHANGED
@@ -28,6 +28,7 @@ LEADERBOARD_PATH = f"{OWNER}/leaderboard"
28
  api = HfApi()
29
 
30
  YEAR_VERSION = "2023"
 
31
 
32
  os.makedirs("scored", exist_ok=True)
33
 
@@ -170,6 +171,9 @@ def add_new_eval(
170
  "score_level3": scores[3]/num_questions[3],
171
  "date": datetime.datetime.today().strftime('%Y-%m-%d')
172
  }
 
 
 
173
  eval_results[val_or_test] = eval_results[val_or_test].add_item(eval_entry)
174
  print(eval_results)
175
  eval_results.push_to_hub(RESULTS_DATASET, config_name = YEAR_VERSION, token=TOKEN)
 
28
  api = HfApi()
29
 
30
  YEAR_VERSION = "2023"
31
+ ref_scores_len = {"validation": 165, "test": 301}
32
 
33
  os.makedirs("scored", exist_ok=True)
34
 
 
171
  "score_level3": scores[3]/num_questions[3],
172
  "date": datetime.datetime.today().strftime('%Y-%m-%d')
173
  }
174
+ if len(scores["all"]) != ref_scores_len[val_or_test]:
175
+ return format_error(f"Your submission has {len(scores['all'])} questions for the {val_or_test} set, but it should have {ref_scores_len[val_or_test]}. Please check your submission.")
176
+
177
  eval_results[val_or_test] = eval_results[val_or_test].add_item(eval_entry)
178
  print(eval_results)
179
  eval_results.push_to_hub(RESULTS_DATASET, config_name = YEAR_VERSION, token=TOKEN)