SondosMB commited on
Commit
d6777ef
Β·
verified Β·
1 Parent(s): 97d602f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -25
app.py CHANGED
@@ -406,46 +406,73 @@ with gr.Blocks(css=css_tech_theme) as demo:
406
  with gr.TabItem("πŸ“€ Submission"):
407
  with gr.Markdown("""
408
  <div class="submission-section">
409
- <h2>Submit Your Predictions</h2>
410
- <p>Upload your prediction file and provide your model name to evaluate and submit to the leaderboard.</p> </div>"""):
 
 
411
  with gr.Row(elem_id="submission-fields"):
412
  file_input = gr.File(label="Upload Prediction CSV", file_types=[".csv"], interactive=True)
413
  model_name_input = gr.Textbox(label="Model Name", placeholder="Enter your model name")
 
414
  with gr.Row(elem_id="submission-results"):
415
  overall_accuracy_display = gr.Number(label="Overall Accuracy", interactive=False)
 
416
  with gr.Row(elem_id="submission-buttons"):
417
  eval_button = gr.Button("Evaluate")
418
  submit_button = gr.Button("Prove and Submit to Leaderboard", visible=False)
419
  eval_status = gr.Textbox(label="Evaluation Status", interactive=False)
420
- def handle_evaluation(file, model_name):
421
- # Check if required inputs are provided
422
- if not file:
423
- return "Error: Please upload a prediction file.", 0, gr.update(visible=False)
424
- if not model_name or model_name.strip() == "":
425
- return "Error: Please enter a model name.", 0, gr.update(visible=False)
426
- # Perform evaluation
 
 
 
 
 
 
 
 
 
 
 
 
 
 
427
  status, leaderboard = evaluate_predictions(file, model_name, add_to_leaderboard=False)
428
  if leaderboard.empty:
429
  overall_accuracy = 0
430
  else:
431
  overall_accuracy = leaderboard.iloc[-1]["Overall Accuracy"]
432
- # Show the submit button after evaluation
 
433
  return status, overall_accuracy, gr.update(visible=True)
434
- def handle_submission(file, model_name):
435
- # Handle leaderboard submission
436
- status, _ = evaluate_predictions(file, model_name, add_to_leaderboard=True)
437
- return f"Submission to leaderboard completed: {status}"
438
- eval_button.click(
439
- handle_evaluation,
440
- inputs=[file_input, model_name_input],
441
- outputs=[eval_status, overall_accuracy_display, submit_button],
442
- )
443
- submit_button.click(
444
- handle_submission,
445
- inputs=[file_input, model_name_input],
446
- outputs=[eval_status],
447
- )
448
-
 
 
 
 
 
 
 
 
449
 
450
 
451
  with gr.TabItem("πŸ… Leaderboard"):
 
406
  with gr.TabItem("πŸ“€ Submission"):
407
  with gr.Markdown("""
408
  <div class="submission-section">
409
+ <h2>Submit Your Predictions</h2>
410
+ <p>Upload your prediction file and provide your model name to evaluate and submit to the leaderboard.</p>
411
+ </div>
412
+ """):
413
  with gr.Row(elem_id="submission-fields"):
414
  file_input = gr.File(label="Upload Prediction CSV", file_types=[".csv"], interactive=True)
415
  model_name_input = gr.Textbox(label="Model Name", placeholder="Enter your model name")
416
+
417
  with gr.Row(elem_id="submission-results"):
418
  overall_accuracy_display = gr.Number(label="Overall Accuracy", interactive=False)
419
+
420
  with gr.Row(elem_id="submission-buttons"):
421
  eval_button = gr.Button("Evaluate")
422
  submit_button = gr.Button("Prove and Submit to Leaderboard", visible=False)
423
  eval_status = gr.Textbox(label="Evaluation Status", interactive=False)
424
+
425
+ # Define the functions outside the `with` block
426
+ def handle_evaluation(file, model_name):
427
+ # Check if required inputs are provided
428
+ if not file:
429
+ return "Error: Please upload a prediction file.", 0, gr.update(visible=False)
430
+ if not model_name or model_name.strip() == "":
431
+ return "Error: Please enter a model name.", 0, gr.update(visible=False)
432
+
433
+ try:
434
+ # Load predictions file
435
+ predictions_df = pd.read_csv(file.name)
436
+
437
+ # Validate required columns in the prediction file
438
+ required_columns = ['question_id', 'predicted_answer']
439
+ missing_columns = [col for col in required_columns if col not in predictions_df.columns]
440
+ if missing_columns:
441
+ return (f"Error: Missing required columns in prediction file: {', '.join(missing_columns)}.",
442
+ 0, gr.update(visible=False))
443
+
444
+ # Perform evaluation
445
  status, leaderboard = evaluate_predictions(file, model_name, add_to_leaderboard=False)
446
  if leaderboard.empty:
447
  overall_accuracy = 0
448
  else:
449
  overall_accuracy = leaderboard.iloc[-1]["Overall Accuracy"]
450
+
451
+ # Show the submit button after successful evaluation
452
  return status, overall_accuracy, gr.update(visible=True)
453
+
454
+ except Exception as e:
455
+ # Handle unexpected errors
456
+ return f"Error during evaluation: {str(e)}", 0, gr.update(visible=False)
457
+
458
+ def handle_submission(file, model_name):
459
+ # Handle leaderboard submission
460
+ status, _ = evaluate_predictions(file, model_name, add_to_leaderboard=True)
461
+ return f"Submission to leaderboard completed: {status}"
462
+
463
+ # Connect button clicks to the functions
464
+ eval_button.click(
465
+ handle_evaluation,
466
+ inputs=[file_input, model_name_input],
467
+ outputs=[eval_status, overall_accuracy_display, submit_button],
468
+ )
469
+
470
+ submit_button.click(
471
+ handle_submission,
472
+ inputs=[file_input, model_name_input],
473
+ outputs=[eval_status],
474
+ )
475
+
476
 
477
 
478
  with gr.TabItem("πŸ… Leaderboard"):