SondosMB commited on
Commit
1ce757f
·
verified ·
1 Parent(s): cd3118d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -37
app.py CHANGED
@@ -641,40 +641,8 @@ with gr.Blocks(css=css_tech_theme) as demo:
641
  eval_status = gr.Textbox(label="🛠️ Evaluation Status", interactive=False,scale=1,min_width=1200)
642
 
643
  # Define the functions outside the `with` block
644
- # def handle_evaluation(file, model_name):
645
- # # Check if required inputs are provided
646
- # if not file:
647
- # return "Error: Please upload a prediction file.", 0, gr.update(visible=False)
648
- # if not model_name or model_name.strip() == "":
649
- # return "Error: Please enter a model name.", 0, gr.update(visible=False)
650
-
651
- # try:
652
- # # Load predictions file
653
- # predictions_df = pd.read_csv(file.name)
654
-
655
- # # Validate required columns in the prediction file
656
- # required_columns = ['question_id', 'predicted_answer']
657
- # missing_columns = [col for col in required_columns if col not in predictions_df.columns]
658
- # if missing_columns:
659
- # return (f"Error: Missing required columns in prediction file: {', '.join(missing_columns)}.",
660
- # 0, gr.update(visible=False))
661
-
662
- # # Perform evaluation
663
- # status, leaderboard = evaluate_predictions(file, model_name, add_to_leaderboard=False)
664
- # if leaderboard.empty:
665
- # overall_accuracy = 0
666
- # else:
667
- # overall_accuracy = leaderboard.iloc[-1]["Overall Accuracy"]
668
-
669
- # # Show the submit button after successful evaluation
670
- # return status, overall_accuracy, gr.update(visible=True)
671
-
672
- # except Exception as e:
673
- # # Handle unexpected errors
674
- # return f"Error during evaluation: {str(e)}", 0, gr.update(visible=False)
675
-
676
  def handle_evaluation(file, model_name):
677
- # Check if required inputs are provided
678
  if not file:
679
  return "Error: Please upload a prediction file.", 0, gr.update(visible=False)
680
  if not model_name or model_name.strip() == "":
@@ -692,16 +660,20 @@ with gr.Blocks(css=css_tech_theme) as demo:
692
  0, gr.update(visible=False))
693
 
694
  # Perform evaluation
695
- results = evaluate_predictions(file, model_name, add_to_leaderboard=False)
696
- overall_accuracy = results['overall_accuracy'] * 100 # Convert to percentage
697
-
 
 
 
698
  # Show the submit button after successful evaluation
699
- return "Evaluation completed.", overall_accuracy, gr.update(visible=True)
700
 
701
  except Exception as e:
702
  # Handle unexpected errors
703
  return f"Error during evaluation: {str(e)}", 0, gr.update(visible=False)
704
 
 
705
 
706
  def handle_submission(file, model_name):
707
  # Handle leaderboard submission
 
641
  eval_status = gr.Textbox(label="🛠️ Evaluation Status", interactive=False,scale=1,min_width=1200)
642
 
643
  # Define the functions outside the `with` block
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
644
  def handle_evaluation(file, model_name):
645
+ # Check if required inputs are provided
646
  if not file:
647
  return "Error: Please upload a prediction file.", 0, gr.update(visible=False)
648
  if not model_name or model_name.strip() == "":
 
660
  0, gr.update(visible=False))
661
 
662
  # Perform evaluation
663
+ status, leaderboard = evaluate_predictions(file, model_name, add_to_leaderboard=False)
664
+ if leaderboard.empty:
665
+ overall_accuracy = 0
666
+ else:
667
+ overall_accuracy = leaderboard.iloc[-1]["Overall Accuracy"]
668
+
669
  # Show the submit button after successful evaluation
670
+ return status, overall_accuracy, gr.update(visible=True)
671
 
672
  except Exception as e:
673
  # Handle unexpected errors
674
  return f"Error during evaluation: {str(e)}", 0, gr.update(visible=False)
675
 
676
+
677
 
678
  def handle_submission(file, model_name):
679
  # Handle leaderboard submission