SondosMB commited on
Commit
3be6b18
Β·
verified Β·
1 Parent(s): 958befc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -9
app.py CHANGED
@@ -213,18 +213,26 @@ with gr.Blocks(css=css_tech_theme) as demo:
213
 
214
  with gr.TabItem("πŸ“€ Submission"):
215
  with gr.Row():
216
- file_input = gr.File(label="πŸ“‚ Upload Prediction CSV", file_types=[".csv"], interactive=True)
217
- model_name_input = gr.Textbox(label="πŸ–‹οΈ Model Name", placeholder="Enter your model name")
218
 
219
  with gr.Row():
220
- overall_accuracy_display = gr.Number(label="πŸ… Overall Accuracy", interactive=False)
221
- add_to_leaderboard_checkbox = gr.Checkbox(label="πŸ“Š Add to Leaderboard?", value=True)
222
 
223
- eval_button = gr.Button("Evaluate", elem_id="evaluate-button")
224
- eval_status = gr.Textbox(label="πŸ“’ Evaluation Status", interactive=False)
 
 
 
 
 
 
 
 
225
 
226
  eval_button.click(
227
- evaluate_predictions,
228
  inputs=[file_input, model_name_input, add_to_leaderboard_checkbox],
229
  outputs=[eval_status, overall_accuracy_display],
230
  )
@@ -243,6 +251,6 @@ with gr.Blocks(css=css_tech_theme) as demo:
243
  outputs=[leaderboard_table],
244
  )
245
 
246
- gr.Markdown(f"**Last updated:** {LAST_UPDATED}")
247
 
248
- demo.launch()
 
213
 
214
  with gr.TabItem("πŸ“€ Submission"):
215
  with gr.Row():
216
+ file_input = gr.File(label="Upload Prediction CSV", file_types=[".csv"], interactive=True)
217
+ model_name_input = gr.Textbox(label="Model Name", placeholder="Enter your model name")
218
 
219
  with gr.Row():
220
+ overall_accuracy_display = gr.Number(label="Overall Accuracy", interactive=False)
221
+ add_to_leaderboard_checkbox = gr.Checkbox(label="Add to Leaderboard?", value=True)
222
 
223
+ eval_button = gr.Button("Evaluate")
224
+ eval_status = gr.Textbox(label="Evaluation Status", interactive=False)
225
+
226
+ def handle_evaluation(file, model_name, add_to_leaderboard):
227
+ status, leaderboard = evaluate_predictions(file, model_name, add_to_leaderboard)
228
+ if leaderboard.empty:
229
+ overall_accuracy = 0
230
+ else:
231
+ overall_accuracy = leaderboard.iloc[-1]["Overall Accuracy"]
232
+ return status, overall_accuracy
233
 
234
  eval_button.click(
235
+ handle_evaluation,
236
  inputs=[file_input, model_name_input, add_to_leaderboard_checkbox],
237
  outputs=[eval_status, overall_accuracy_display],
238
  )
 
251
  outputs=[leaderboard_table],
252
  )
253
 
254
+ gr.Markdown(f"Last updated on **{LAST_UPDATED}**")
255
 
256
+ demo.launch()