SondosMB commited on
Commit
8b5944b
·
verified ·
1 Parent(s): b0b8450

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -1
app.py CHANGED
@@ -361,7 +361,7 @@ def load_leaderboard():
361
  # except Exception as e:
362
  # return f"Error during evaluation: {str(e)}", load_leaderboard()
363
 
364
- def evaluate_predictions(prediction_file, model_name, add_to_leaderboard):
365
  try:
366
  ground_truth_path = hf_hub_download(
367
  repo_id="SondosMB/ground-truth-dataset",
@@ -405,6 +405,7 @@ def evaluate_predictions(prediction_file, model_name, add_to_leaderboard):
405
  'overall_accuracy': overall_accuracy,
406
  'correct_predictions': correct_predictions,
407
  'total_questions': total_predictions,
 
408
  }
409
 
410
  if add_to_leaderboard:
 
361
  # except Exception as e:
362
  # return f"Error during evaluation: {str(e)}", load_leaderboard()
363
 
364
+ def evaluate_predictions(prediction_file, model_name,Team_name ,add_to_leaderboard):
365
  try:
366
  ground_truth_path = hf_hub_download(
367
  repo_id="SondosMB/ground-truth-dataset",
 
405
  'overall_accuracy': overall_accuracy,
406
  'correct_predictions': correct_predictions,
407
  'total_questions': total_predictions,
408
+ 'Team_name': model_name if model_name else "Unknown Team",
409
  }
410
 
411
  if add_to_leaderboard: