Update app.py
Browse files
app.py
CHANGED
@@ -516,18 +516,7 @@ with gr.Blocks(css=css_tech_theme) as demo:
|
|
516 |
<h3 style="color: #6a1b9a;">How It Works</h3>
|
517 |
<ol>
|
518 |
<li>1️⃣ <strong>Download the Dataset:</strong> Access the dataset and detailed instructions on the <a href="https://github.com/your-github-repo" target="_blank">GitHub page</a>. Follow the steps to ensure your environment is set up correctly.</li>
|
519 |
-
<li>2️⃣ <strong>Generate Predictions:</strong> Use the provided script in the GitHub repository to generate answers. Ensure the output file matches the
|
520 |
-
</ol>
|
521 |
-
<pre style="background-color: #f9f7fd; border: 1px solid #d3bce8; border-radius: 8px; padding: 10px;">
|
522 |
-
question_id,predicted_answer
|
523 |
-
q1,A
|
524 |
-
q2,B
|
525 |
-
q3,C
|
526 |
-
...
|
527 |
-
</pre>
|
528 |
-
<p>The <code>question_id</code> is the unique identifier for each question, and <code>predicted_answer</code> is your model's prediction (A, B, C, or D).</p>
|
529 |
-
|
530 |
-
<ol start="3">
|
531 |
<li>3️⃣ <strong>Submit Predictions:</strong> Upload your CSV file to the <strong>Submission Page</strong> on this platform.</li>
|
532 |
<li>4️⃣ <strong>Evaluation:</strong> Your submission will be scored based on accuracy. The results will include overall and valid accuracy metrics.</li>
|
533 |
<li>5️⃣ <strong>Leaderboard:</strong> Optionally, add your results to the real-time leaderboard to compare your model's performance with others.</li>
|
@@ -550,16 +539,16 @@ with gr.Blocks(css=css_tech_theme) as demo:
|
|
550 |
</div>
|
551 |
""")
|
552 |
with gr.Row(elem_id="submission-fields"):
|
553 |
-
file_input = gr.File(label="Upload Prediction CSV", file_types=[".csv"], interactive=True,scale=1,
|
554 |
-
model_name_input = gr.Textbox(label="Model Name", placeholder="Enter your model name",scale=1,
|
555 |
|
556 |
with gr.Row(elem_id="submission-results"):
|
557 |
-
overall_accuracy_display = gr.Number(label="Overall Accuracy", interactive=False,scale=1,
|
558 |
|
559 |
with gr.Row(elem_id="submission-buttons"):
|
560 |
-
eval_button = gr.Button("Evaluate")
|
561 |
-
submit_button = gr.Button("Prove and Submit to Leaderboard", elem_id="evaluation-status", visible=False)
|
562 |
-
eval_status = gr.Textbox(label="Evaluation Status", interactive=False)
|
563 |
|
564 |
# Define the functions outside the `with` block
|
565 |
def handle_evaluation(file, model_name):
|
|
|
516 |
<h3 style="color: #6a1b9a;">How It Works</h3>
|
517 |
<ol>
|
518 |
<li>1️⃣ <strong>Download the Dataset:</strong> Access the dataset and detailed instructions on the <a href="https://github.com/your-github-repo" target="_blank">GitHub page</a>. Follow the steps to ensure your environment is set up correctly.</li>
|
519 |
+
<li>2️⃣ <strong>Generate Predictions:</strong> Use the provided script in the GitHub repository to generate answers. Ensure the output file matches the format in the github </li>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
520 |
<li>3️⃣ <strong>Submit Predictions:</strong> Upload your CSV file to the <strong>Submission Page</strong> on this platform.</li>
|
521 |
<li>4️⃣ <strong>Evaluation:</strong> Your submission will be scored based on accuracy. The results will include overall and valid accuracy metrics.</li>
|
522 |
<li>5️⃣ <strong>Leaderboard:</strong> Optionally, add your results to the real-time leaderboard to compare your model's performance with others.</li>
|
|
|
539 |
</div>
|
540 |
""")
|
541 |
with gr.Row(elem_id="submission-fields"):
|
542 |
+
file_input = gr.File(label="Upload Prediction CSV", file_types=[".csv"], interactive=True,scale=1, max_width=100%)
|
543 |
+
model_name_input = gr.Textbox(label="Model Name", placeholder="Enter your model name",scale=1, max_width=100%)
|
544 |
|
545 |
with gr.Row(elem_id="submission-results"):
|
546 |
+
overall_accuracy_display = gr.Number(label="Overall Accuracy", interactive=False,scale=1, max_width=100%)
|
547 |
|
548 |
with gr.Row(elem_id="submission-buttons"):
|
549 |
+
eval_button = gr.Button("Evaluate",scale=1, max_width=100%)
|
550 |
+
submit_button = gr.Button("Prove and Submit to Leaderboard", elem_id="evaluation-status", visible=False,scale=1, max_width=100%)
|
551 |
+
eval_status = gr.Textbox(label="Evaluation Status", interactive=False,scale=1, max_width=100%)
|
552 |
|
553 |
# Define the functions outside the `with` block
|
554 |
def handle_evaluation(file, model_name):
|