SondosMB commited on
Commit
54aa8d9
Β·
verified Β·
1 Parent(s): c2fa8d0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -4
app.py CHANGED
@@ -308,7 +308,6 @@ def evaluate_predictions(prediction_file, model_name, add_to_leaderboard):
308
  return f"Error during evaluation: {str(e)}", load_leaderboard()
309
 
310
  initialize_leaderboard_file()
311
-
312
  with gr.Blocks() as demo:
313
  gr.Markdown("""
314
  # Competition Title
@@ -337,7 +336,7 @@ Mobile-MMLU is a benchmark designed to test the capabilities of LLMs optimized f
337
  Access the dataset and detailed generation instructions on our [GitHub page](https://github.com/your-github-repo).
338
 
339
  2. **Generate Predictions**
340
- Use your LLM to answer the questions and format your predictions as a CSV file with the following structure as written on our gothub page :
341
 
342
  3. **Submit Predictions**
343
  Upload your predictions via the submission portal.
@@ -375,7 +374,7 @@ Check the leaderboard for real-time rankings.
375
  For questions or support, contact us at: [Insert Email Address]
376
  """)
377
 
378
- with gr.TabItem("πŸ“€ Submission"):
379
  with gr.Row():
380
  file_input = gr.File(label="Upload Prediction CSV", file_types=[".csv"], interactive=True)
381
  model_name_input = gr.Textbox(label="Model Name", placeholder="Enter your model name")
@@ -393,7 +392,6 @@ For questions or support, contact us at: [Insert Email Address]
393
  outputs=[eval_status, overall_accuracy_display],
394
  )
395
 
396
-
397
  with gr.TabItem("πŸ… Leaderboard"):
398
  leaderboard_table = gr.Dataframe(
399
  value=load_leaderboard(),
 
308
  return f"Error during evaluation: {str(e)}", load_leaderboard()
309
 
310
  initialize_leaderboard_file()
 
311
  with gr.Blocks() as demo:
312
  gr.Markdown("""
313
  # Competition Title
 
336
  Access the dataset and detailed generation instructions on our [GitHub page](https://github.com/your-github-repo).
337
 
338
  2. **Generate Predictions**
339
+ Use your LLM to answer the questions and format your predictions as a CSV file with the following structure as written on our GitHub page :
340
 
341
  3. **Submit Predictions**
342
  Upload your predictions via the submission portal.
 
374
  For questions or support, contact us at: [Insert Email Address]
375
  """)
376
 
377
+ with gr.TabItem("πŸ“€ Submission"):
378
  with gr.Row():
379
  file_input = gr.File(label="Upload Prediction CSV", file_types=[".csv"], interactive=True)
380
  model_name_input = gr.Textbox(label="Model Name", placeholder="Enter your model name")
 
392
  outputs=[eval_status, overall_accuracy_display],
393
  )
394
 
 
395
  with gr.TabItem("πŸ… Leaderboard"):
396
  leaderboard_table = gr.Dataframe(
397
  value=load_leaderboard(),