SondosMB commited on
Commit
5578c3c
Β·
verified Β·
1 Parent(s): a0dbb90

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -5
app.py CHANGED
@@ -772,6 +772,7 @@ with gr.Blocks(css=css_tech_theme) as demo:
772
  with gr.Row(elem_id="submission-fields"):
773
  file_input = gr.File(label="πŸ“‚ Upload Prediction CSV", file_types=[".csv"], interactive=True,scale=1, min_width=12000)
774
  model_name_input = gr.Textbox(label="🏷️ Model Name", placeholder="Enter your model name",scale=1, min_width=800)
 
775
 
776
  with gr.Row(elem_id="submission-results"):
777
  overall_accuracy_display = gr.Number(label="πŸ“Š Overall Accuracy (%)", interactive=False,scale=1,min_width=1200)
@@ -815,11 +816,13 @@ with gr.Blocks(css=css_tech_theme) as demo:
815
  # # Handle unexpected errors
816
  # return f"Error during evaluation: {str(e)}", 0, gr.update(visible=False)
817
 
818
- def handle_evaluation(file, model_name):
819
  if not file:
820
  return "Error: Please upload a prediction file.", 0, gr.update(visible=False)
821
  if not model_name or model_name.strip() == "":
822
  return "Error: Please enter a model name.", 0, gr.update(visible=False)
 
 
823
 
824
  try:
825
  # Load predictions file
@@ -861,21 +864,21 @@ with gr.Blocks(css=css_tech_theme) as demo:
861
 
862
 
863
 
864
- def handle_submission(file, model_name):
865
  # Handle leaderboard submission
866
- status, _ = evaluate_predictions(file, model_name, add_to_leaderboard=True)
867
  return f"Submission to leaderboard completed: {status}"
868
 
869
  # Connect button clicks to the functions
870
  eval_button.click(
871
  handle_evaluation,
872
- inputs=[file_input, model_name_input],
873
  outputs=[eval_status, overall_accuracy_display, submit_button],
874
  )
875
 
876
  submit_button.click(
877
  handle_submission,
878
- inputs=[file_input, model_name_input],
879
  outputs=[eval_status],
880
  )
881
 
 
772
  with gr.Row(elem_id="submission-fields"):
773
  file_input = gr.File(label="πŸ“‚ Upload Prediction CSV", file_types=[".csv"], interactive=True,scale=1, min_width=12000)
774
  model_name_input = gr.Textbox(label="🏷️ Model Name", placeholder="Enter your model name",scale=1, min_width=800)
775
+ Team_name_input = gr.Textbox(label="🏷️ Team Name", placeholder="Enter your Team name",scale=1, min_width=800)
776
 
777
  with gr.Row(elem_id="submission-results"):
778
  overall_accuracy_display = gr.Number(label="πŸ“Š Overall Accuracy (%)", interactive=False,scale=1,min_width=1200)
 
816
  # # Handle unexpected errors
817
  # return f"Error during evaluation: {str(e)}", 0, gr.update(visible=False)
818
 
819
+ def handle_evaluation(file, model_name, Team_name):
820
  if not file:
821
  return "Error: Please upload a prediction file.", 0, gr.update(visible=False)
822
  if not model_name or model_name.strip() == "":
823
  return "Error: Please enter a model name.", 0, gr.update(visible=False)
824
+ if not Team_name or Team_name.strip() == "":
825
+ return "Error: Please enter a Team name.", 0, gr.update(visible=False)
826
 
827
  try:
828
  # Load predictions file
 
864
 
865
 
866
 
867
+ def handle_submission(file, model_name,Team_name):
868
  # Handle leaderboard submission
869
+ status, _ = evaluate_predictions(file, model_name,Team_name, add_to_leaderboard=True)
870
  return f"Submission to leaderboard completed: {status}"
871
 
872
  # Connect button clicks to the functions
873
  eval_button.click(
874
  handle_evaluation,
875
+ inputs=[file_input, model_name_input,Team_name_input],
876
  outputs=[eval_status, overall_accuracy_display, submit_button],
877
  )
878
 
879
  submit_button.click(
880
  handle_submission,
881
+ inputs=[file_input, model_name_input,Team_name_input],
882
  outputs=[eval_status],
883
  )
884