SondosMB commited on
Commit
fca838b
·
verified ·
1 Parent(s): 7365a96

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -2
app.py CHANGED
@@ -378,7 +378,7 @@ Check the leaderboard for real-time rankings.
378
  For questions or support, contact us at: [Insert Email Address]
379
  """)
380
 
381
- with gr.TabItem("📤 Submission"):
382
  with gr.Row():
383
  file_input = gr.File(label="Upload Prediction CSV", file_types=[".csv"], interactive=True)
384
  model_name_input = gr.Textbox(label="Model Name", placeholder="Enter your model name")
@@ -390,8 +390,16 @@ For questions or support, contact us at: [Insert Email Address]
390
  eval_button = gr.Button("Evaluate")
391
  eval_status = gr.Textbox(label="Evaluation Status", interactive=False)
392
 
 
 
 
 
 
 
 
 
393
  eval_button.click(
394
- evaluate_predictions,
395
  inputs=[file_input, model_name_input, add_to_leaderboard_checkbox],
396
  outputs=[eval_status, overall_accuracy_display],
397
  )
 
378
  For questions or support, contact us at: [Insert Email Address]
379
  """)
380
 
381
+ with gr.TabItem("📤 Submission"):
382
  with gr.Row():
383
  file_input = gr.File(label="Upload Prediction CSV", file_types=[".csv"], interactive=True)
384
  model_name_input = gr.Textbox(label="Model Name", placeholder="Enter your model name")
 
390
  eval_button = gr.Button("Evaluate")
391
  eval_status = gr.Textbox(label="Evaluation Status", interactive=False)
392
 
393
+ def handle_evaluation(file, model_name, add_to_leaderboard):
394
+ status, leaderboard = evaluate_predictions(file, model_name, add_to_leaderboard)
395
+ if leaderboard.empty:
396
+ overall_accuracy = 0
397
+ else:
398
+ overall_accuracy = leaderboard.iloc[-1]["Overall Accuracy"]
399
+ return status, overall_accuracy
400
+
401
  eval_button.click(
402
+ handle_evaluation,
403
  inputs=[file_input, model_name_input, add_to_leaderboard_checkbox],
404
  outputs=[eval_status, overall_accuracy_display],
405
  )