SondosMB commited on
Commit
fb73da9
·
verified ·
1 Parent(s): 6bcbc7b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -8
app.py CHANGED
@@ -354,7 +354,6 @@
354
  # gr.Markdown(f"Last updated on **{LAST_UPDATED}**")
355
 
356
  # demo.launch()
357
-
358
  import gradio as gr
359
  import pandas as pd
360
  import os
@@ -364,12 +363,15 @@ from datetime import datetime
364
  LEADERBOARD_FILE = "leaderboard.csv" # File to store all submissions persistently
365
  LAST_UPDATED = datetime.now().strftime("%B %d, %Y")
366
 
367
- # Initialize the leaderboard file if it doesn't exist
368
- if not os.path.exists(LEADERBOARD_FILE):
369
- pd.DataFrame(columns=[
370
- "Model Name", "Overall Accuracy", "Valid Accuracy",
371
- "Correct Predictions", "Total Questions", "Timestamp"
372
- ]).to_csv(LEADERBOARD_FILE, index=False)
 
 
 
373
 
374
  def clean_answer(answer):
375
  """
@@ -403,11 +405,20 @@ def load_leaderboard():
403
  """
404
  Load all submissions from the leaderboard file.
405
  """
 
 
 
 
 
 
 
 
 
406
  return pd.read_csv(LEADERBOARD_FILE)
407
 
408
  def evaluate_predictions_and_update_leaderboard(prediction_file):
409
  """
410
- Evaluate predictions and append results to the leaderboard.
411
  """
412
  ground_truth_file = "ground_truth.csv"
413
  if not os.path.exists(ground_truth_file):
@@ -449,6 +460,9 @@ def evaluate_predictions_and_update_leaderboard(prediction_file):
449
  except Exception as e:
450
  return f"Error during evaluation: {str(e)}", load_leaderboard()
451
 
 
 
 
452
  # Gradio Interface
453
  with gr.Blocks() as demo:
454
  gr.Markdown("# Prediction Evaluation Tool with Leaderboard")
 
354
  # gr.Markdown(f"Last updated on **{LAST_UPDATED}**")
355
 
356
  # demo.launch()
 
357
  import gradio as gr
358
  import pandas as pd
359
  import os
 
363
  LEADERBOARD_FILE = "leaderboard.csv" # File to store all submissions persistently
364
  LAST_UPDATED = datetime.now().strftime("%B %d, %Y")
365
 
366
+ def initialize_leaderboard_file():
367
+ """
368
+ Check if the leaderboard file exists. If not, create it with appropriate headers.
369
+ """
370
+ if not os.path.exists(LEADERBOARD_FILE):
371
+ pd.DataFrame(columns=[
372
+ "Model Name", "Overall Accuracy", "Valid Accuracy",
373
+ "Correct Predictions", "Total Questions", "Timestamp"
374
+ ]).to_csv(LEADERBOARD_FILE, index=False)
375
 
376
  def clean_answer(answer):
377
  """
 
405
  """
406
  Load all submissions from the leaderboard file.
407
  """
408
+ if not os.path.exists(LEADERBOARD_FILE):
409
+ return pd.DataFrame({
410
+ "Model Name": [],
411
+ "Overall Accuracy": [],
412
+ "Valid Accuracy": [],
413
+ "Correct Predictions": [],
414
+ "Total Questions": [],
415
+ "Timestamp": [],
416
+ })
417
  return pd.read_csv(LEADERBOARD_FILE)
418
 
419
  def evaluate_predictions_and_update_leaderboard(prediction_file):
420
  """
421
+ Evaluate predictions, update the leaderboard, and return the updated leaderboard.
422
  """
423
  ground_truth_file = "ground_truth.csv"
424
  if not os.path.exists(ground_truth_file):
 
460
  except Exception as e:
461
  return f"Error during evaluation: {str(e)}", load_leaderboard()
462
 
463
+ # Initialize leaderboard file
464
+ initialize_leaderboard_file()
465
+
466
  # Gradio Interface
467
  with gr.Blocks() as demo:
468
  gr.Markdown("# Prediction Evaluation Tool with Leaderboard")