SondosMB commited on
Commit
5d7850e
·
verified ·
1 Parent(s): 40a6d25

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -87
app.py CHANGED
@@ -401,98 +401,34 @@ with gr.Blocks(css=css_tech_theme) as demo:
401
  <li>5️⃣ <strong>Leaderboard:</strong> View real-time rankings on the leaderboard.</li>
402
  </ul>
403
  </div>
404
- """)
405
-
406
-
407
- # with gr.TabItem("📤 Submission"):
408
- # gr.Markdown("<div class='tabs'><h2>Submit Your Predictions</h2></div>")
409
- # with gr.Row():
410
- # file_input = gr.File(label="Upload Prediction CSV", file_types=[".csv"], interactive=True)
411
- # model_name_input = gr.Textbox(label="Model Name", placeholder="Enter your model name")
412
-
413
- # with gr.Row():
414
- # overall_accuracy_display = gr.Number(label="Overall Accuracy", interactive=False)
415
- # add_to_leaderboard_checkbox = gr.Checkbox(label="Add to Leaderboard?", value=True)
416
-
417
- # eval_button = gr.Button("Evaluate")
418
- # eval_status = gr.Textbox(label="Evaluation Status", interactive=False)
419
-
420
- # def handle_evaluation(file, model_name, add_to_leaderboard):
421
- # status, leaderboard = evaluate_predictions(file, model_name, add_to_leaderboard)
422
- # if leaderboard.empty:
423
- # overall_accuracy = 0
424
- # else:
425
- # overall_accuracy = leaderboard.iloc[-1]["Overall Accuracy"]
426
- # return status, overall_accuracy
427
-
428
- # eval_button.click(
429
- # handle_evaluation,
430
- # inputs=[file_input, model_name_input, add_to_leaderboard_checkbox],
431
- # outputs=[eval_status, overall_accuracy_display],
432
- # )
433
-
434
- # with gr.TabItem("📤 Submission"):
435
- # with gr.Row():
436
- # file_input = gr.File(label="Upload Prediction CSV", file_types=[".csv"], interactive=True)
437
- # model_name_input = gr.Textbox(label="Model Name", placeholder="Enter your model name")
438
- # with gr.Row():
439
- # overall_accuracy_display = gr.Number(label="Overall Accuracy", interactive=False)
440
- # eval_button = gr.Button("Evaluate")
441
- # eval_status = gr.Textbox(label="Evaluation Status", interactive=False)
442
- # submit_button = gr.Button("Prove and Submit to Leaderboard", visible=False) # Initially hidden
443
- # def handle_evaluation(file, model_name):
444
- # # Check if required inputs are provided
445
- # if not file:
446
- # return "Error: Please upload a prediction file.", 0, gr.update(visible=False)
447
- # if not model_name or model_name.strip() == "":
448
- # return "Error: Please enter a model name.", 0, gr.update(visible=False)
449
- # # Perform evaluation
450
- # status, leaderboard = evaluate_predictions(file, model_name, add_to_leaderboard=False)
451
- # if leaderboard.empty:
452
- # overall_accuracy = 0
453
- # else:
454
- # overall_accuracy = leaderboard.iloc[-1]["Overall Accuracy"]
455
- # # Show the submit button after evaluation
456
- # return status, overall_accuracy, gr.update(visible=True)
457
- # def handle_submission(file, model_name):
458
- # # Handle leaderboard submission
459
- # status, _ = evaluate_predictions(file, model_name, add_to_leaderboard=True)
460
- # return f"Submission to leaderboard completed: {status}"
461
- # eval_button.click(
462
- # handle_evaluation,
463
- # inputs=[file_input, model_name_input],
464
- # outputs=[eval_status, overall_accuracy_display, submit_button],)
465
- # submit_button.click(
466
- # handle_submission,
467
- # inputs=[file_input, model_name_input],
468
- # outputs=[eval_status],)
469
  with gr.TabItem("📤 Submission"):
470
  with gr.Markdown("""
471
  <div class="submission-section">
472
  <h2>Submit Your Predictions</h2>
473
  <p>Upload your prediction file and provide your model name to evaluate and submit to the leaderboard.</p> </div>"""):
474
-
475
- with gr.Row(elem_id="submission-fields"):
476
- file_input = gr.File(label="Upload Prediction CSV", file_types=[".csv"], interactive=True)
477
- model_name_input = gr.Textbox(label="Model Name", placeholder="Enter your model name")
478
- with gr.Row(elem_id="submission-results"):
479
- overall_accuracy_display = gr.Number(label="Overall Accuracy", interactive=False)
480
- with gr.Row(elem_id="submission-buttons"):
481
- eval_button = gr.Button("Evaluate")
482
- submit_button = gr.Button("Prove and Submit to Leaderboard", visible=False)
483
- eval_status = gr.Textbox(label="Evaluation Status", interactive=False)
484
- def handle_evaluation(file, model_name):
485
- # Check if required inputs are provided
486
- if not file:
487
- return "Error: Please upload a prediction file.", 0, gr.update(visible=False)
488
- if not model_name or model_name.strip() == "":
489
- return "Error: Please enter a model name.", 0, gr.update(visible=False)
490
- # Perform evaluation
491
- status, leaderboard = evaluate_predictions(file, model_name, add_to_leaderboard=False)
492
- if leaderboard.empty:
493
- overall_accuracy = 0
494
- else:
495
- overall_accuracy = leaderboard.iloc[-1]["Overall Accuracy"]
496
  # Show the submit button after evaluation
497
  return status, overall_accuracy, gr.update(visible=True)
498
  def handle_submission(file, model_name):
 
401
  <li>5️⃣ <strong>Leaderboard:</strong> View real-time rankings on the leaderboard.</li>
402
  </ul>
403
  </div>
404
+ """)
405
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
406
  with gr.TabItem("📤 Submission"):
407
  with gr.Markdown("""
408
  <div class="submission-section">
409
  <h2>Submit Your Predictions</h2>
410
  <p>Upload your prediction file and provide your model name to evaluate and submit to the leaderboard.</p> </div>"""):
411
+ with gr.Row(elem_id="submission-fields"):
412
+ file_input = gr.File(label="Upload Prediction CSV", file_types=[".csv"], interactive=True)
413
+ model_name_input = gr.Textbox(label="Model Name", placeholder="Enter your model name")
414
+ with gr.Row(elem_id="submission-results"):
415
+ overall_accuracy_display = gr.Number(label="Overall Accuracy", interactive=False)
416
+ with gr.Row(elem_id="submission-buttons"):
417
+ eval_button = gr.Button("Evaluate")
418
+ submit_button = gr.Button("Prove and Submit to Leaderboard", visible=False)
419
+ eval_status = gr.Textbox(label="Evaluation Status", interactive=False)
420
+ def handle_evaluation(file, model_name):
421
+ # Check if required inputs are provided
422
+ if not file:
423
+ return "Error: Please upload a prediction file.", 0, gr.update(visible=False)
424
+ if not model_name or model_name.strip() == "":
425
+ return "Error: Please enter a model name.", 0, gr.update(visible=False)
426
+ # Perform evaluation
427
+ status, leaderboard = evaluate_predictions(file, model_name, add_to_leaderboard=False)
428
+ if leaderboard.empty:
429
+ overall_accuracy = 0
430
+ else:
431
+ overall_accuracy = leaderboard.iloc[-1]["Overall Accuracy"]
 
432
  # Show the submit button after evaluation
433
  return status, overall_accuracy, gr.update(visible=True)
434
  def handle_submission(file, model_name):