SondosMB commited on
Commit
5ede32f
Β·
verified Β·
1 Parent(s): f630bf2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -15
app.py CHANGED
@@ -393,32 +393,64 @@ with gr.Blocks(css=css_tech_theme) as demo:
393
  """)
394
 
395
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
396
  with gr.TabItem("πŸ“€ Submission"):
397
- gr.Markdown("<div class='tabs'><h2>Submit Your Predictions</h2></div>")
398
  with gr.Row():
399
  file_input = gr.File(label="Upload Prediction CSV", file_types=[".csv"], interactive=True)
400
  model_name_input = gr.Textbox(label="Model Name", placeholder="Enter your model name")
401
-
402
  with gr.Row():
403
  overall_accuracy_display = gr.Number(label="Overall Accuracy", interactive=False)
404
- add_to_leaderboard_checkbox = gr.Checkbox(label="Add to Leaderboard?", value=True)
405
-
406
- eval_button = gr.Button("Evaluate")
407
- eval_status = gr.Textbox(label="Evaluation Status", interactive=False)
408
-
409
- def handle_evaluation(file, model_name, add_to_leaderboard):
410
- status, leaderboard = evaluate_predictions(file, model_name, add_to_leaderboard)
411
  if leaderboard.empty:
412
  overall_accuracy = 0
413
  else:
414
  overall_accuracy = leaderboard.iloc[-1]["Overall Accuracy"]
415
- return status, overall_accuracy
416
-
 
 
 
 
417
  eval_button.click(
418
  handle_evaluation,
419
- inputs=[file_input, model_name_input, add_to_leaderboard_checkbox],
420
- outputs=[eval_status, overall_accuracy_display],
421
- )
 
 
 
 
422
 
423
  with gr.TabItem("πŸ… Leaderboard"):
424
  leaderboard_table = gr.Dataframe(
@@ -454,7 +486,7 @@ with gr.Blocks(css=css_tech_theme) as demo:
454
  </p>
455
  <div class="social-links">
456
  <a href="https://website.com" target="_blank" class="social-link">🌐 Website</a>
457
- <a href="https://github.com" target="_blank" class="social-link">πŸ™ GitHub</a>
458
  </div>
459
  </footer>
460
  """)
 
393
  """)
394
 
395
 
396
+ # with gr.TabItem("πŸ“€ Submission"):
397
+ # gr.Markdown("<div class='tabs'><h2>Submit Your Predictions</h2></div>")
398
+ # with gr.Row():
399
+ # file_input = gr.File(label="Upload Prediction CSV", file_types=[".csv"], interactive=True)
400
+ # model_name_input = gr.Textbox(label="Model Name", placeholder="Enter your model name")
401
+
402
+ # with gr.Row():
403
+ # overall_accuracy_display = gr.Number(label="Overall Accuracy", interactive=False)
404
+ # add_to_leaderboard_checkbox = gr.Checkbox(label="Add to Leaderboard?", value=True)
405
+
406
+ # eval_button = gr.Button("Evaluate")
407
+ # eval_status = gr.Textbox(label="Evaluation Status", interactive=False)
408
+
409
+ # def handle_evaluation(file, model_name, add_to_leaderboard):
410
+ # status, leaderboard = evaluate_predictions(file, model_name, add_to_leaderboard)
411
+ # if leaderboard.empty:
412
+ # overall_accuracy = 0
413
+ # else:
414
+ # overall_accuracy = leaderboard.iloc[-1]["Overall Accuracy"]
415
+ # return status, overall_accuracy
416
+
417
+ # eval_button.click(
418
+ # handle_evaluation,
419
+ # inputs=[file_input, model_name_input, add_to_leaderboard_checkbox],
420
+ # outputs=[eval_status, overall_accuracy_display],
421
+ # )
422
+
423
  with gr.TabItem("πŸ“€ Submission"):
 
424
  with gr.Row():
425
  file_input = gr.File(label="Upload Prediction CSV", file_types=[".csv"], interactive=True)
426
  model_name_input = gr.Textbox(label="Model Name", placeholder="Enter your model name")
 
427
  with gr.Row():
428
  overall_accuracy_display = gr.Number(label="Overall Accuracy", interactive=False)
429
+ eval_button = gr.Button("Evaluate")
430
+ eval_status = gr.Textbox(label="Evaluation Status", interactive=False)
431
+ submit_button = gr.Button("Prove and Submit to Leaderboard", visible=False) # Initially hidden
432
+ def handle_evaluation(file, model_name):
433
+ # Perform evaluation
434
+ status, leaderboard = evaluate_predictions(file, model_name, add_to_leaderboard=False)
 
435
  if leaderboard.empty:
436
  overall_accuracy = 0
437
  else:
438
  overall_accuracy = leaderboard.iloc[-1]["Overall Accuracy"]
439
+ # Show the submit button after evaluation
440
+ return status, overall_accuracy, gr.update(visible=True)
441
+ def handle_submission(file, model_name):
442
+ # Handle leaderboard submission
443
+ status, _ = evaluate_predictions(file, model_name, add_to_leaderboard=True)
444
+ return f"Submission to leaderboard completed: {status}"
445
  eval_button.click(
446
  handle_evaluation,
447
+ inputs=[file_input, model_name_input],
448
+ outputs=[eval_status, overall_accuracy_display, submit_button],)
449
+ submit_button.click(
450
+ handle_submission,
451
+ inputs=[file_input, model_name_input],
452
+ outputs=[eval_status],)
453
+
454
 
455
  with gr.TabItem("πŸ… Leaderboard"):
456
  leaderboard_table = gr.Dataframe(
 
486
  </p>
487
  <div class="social-links">
488
  <a href="https://website.com" target="_blank" class="social-link">🌐 Website</a>
489
+ <a href="https://github.com/VILA-Lab/Mobile-MMLU" target="_blank" class="social-link">πŸ™ GitHub</a>
490
  </div>
491
  </footer>
492
  """)