Update app.py
Browse files
app.py
CHANGED
@@ -388,7 +388,7 @@ display:flex;
|
|
388 |
padding: 12px 15px;
|
389 |
font-size: 1em;
|
390 |
border: 2px solid #d3bce8;
|
391 |
-
|
392 |
border-radius: 8px;
|
393 |
box-shadow: inset 0 2px 4px rgba(0, 0, 0, 0.05);
|
394 |
transition: border-color 0.3s ease;
|
@@ -501,45 +501,45 @@ with gr.Blocks(css=css_tech_theme) as demo:
|
|
501 |
with gr.TabItem("π Overview"):
|
502 |
gr.Markdown("""
|
503 |
<div class="tabs">
|
504 |
-
|
505 |
-
|
506 |
-
|
507 |
-
|
508 |
-
|
509 |
-
|
510 |
-
|
511 |
-
|
512 |
-
|
513 |
-
|
514 |
-
|
515 |
-
|
516 |
-
<h3 style="color: #6a1b9a;">How It Works</h3>
|
517 |
-
<ol>
|
518 |
-
<li>1οΈβ£ <strong>Download the Dataset:</strong> Access the dataset and detailed instructions on the <a href="https://github.com/your-github-repo" target="_blank">GitHub page</a>. Follow the steps to ensure your environment is set up correctly.</li>
|
519 |
-
<li>2οΈβ£ <strong>Generate Predictions:</strong> Use the provided script in the GitHub repository to generate answers. Ensure the output file matches the following format:</li>
|
520 |
-
</ol>
|
521 |
-
<pre style="background-color: #f9f7fd; border: 1px solid #d3bce8; border-radius: 8px; padding: 10px;">
|
522 |
-
question_id,predicted_answer
|
523 |
-
q1,A
|
524 |
-
q2,B
|
525 |
-
q3,C
|
526 |
-
...
|
527 |
-
</pre>
|
528 |
-
<p>The <code>question_id</code> is the unique identifier for each question, and <code>predicted_answer</code> is your model's prediction (A, B, C, or D).</p>
|
529 |
-
|
530 |
-
<ol start="3">
|
531 |
-
<li>3οΈβ£ <strong>Submit Predictions:</strong> Upload your CSV file to the <strong>Submission Page</strong> on this platform.</li>
|
532 |
-
<li>4οΈβ£ <strong>Evaluation:</strong> Your submission will be scored based on accuracy. The results will include overall and valid accuracy metrics.</li>
|
533 |
-
<li>5οΈβ£ <strong>Leaderboard:</strong> Optionally, add your results to the real-time leaderboard to compare your model's performance with others.</li>
|
534 |
-
</ol>
|
535 |
|
536 |
-
|
537 |
-
|
538 |
-
|
539 |
-
|
540 |
-
|
541 |
-
|
542 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
543 |
""")
|
544 |
|
545 |
with gr.TabItem("π€ Submission"):
|
|
|
388 |
padding: 12px 15px;
|
389 |
font-size: 1em;
|
390 |
border: 2px solid #d3bce8;
|
391 |
+
border-color: #5e1287;
|
392 |
border-radius: 8px;
|
393 |
box-shadow: inset 0 2px 4px rgba(0, 0, 0, 0.05);
|
394 |
transition: border-color 0.3s ease;
|
|
|
501 |
with gr.TabItem("π Overview"):
|
502 |
gr.Markdown("""
|
503 |
<div class="tabs">
|
504 |
+
<h2 style="color: #6a1b9a; text-align: center;">About the Competition</h2>
|
505 |
+
<p>The <strong>Mobile-MMLU Benchmark Competition</strong> is a premier challenge designed to evaluate and advance mobile-optimized Large Language Models (LLMs). This competition is an excellent opportunity to showcase your model's ability to handle real-world scenarios and excel in mobile intelligence.</p>
|
506 |
+
<p>With a dataset spanning <strong>80 distinct fields</strong> and featuring <strong>16,186 questions</strong>, the competition emphasizes practical applications, from education and healthcare to technology and daily life.</p>
|
507 |
+
|
508 |
+
<h3 style="color: #8e44ad;">Why Compete?</h3>
|
509 |
+
<p>Participating in this competition allows you to:</p>
|
510 |
+
<ul>
|
511 |
+
<li>π Showcase your expertise in developing and optimizing LLMs for mobile platforms.</li>
|
512 |
+
<li>π Benchmark your modelβs performance against others in a highly competitive environment.</li>
|
513 |
+
<li>π Contribute to advancements in mobile AI, shaping the future of user-centric AI systems.</li>
|
514 |
+
</ul>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
515 |
|
516 |
+
<h3 style="color: #6a1b9a;">How It Works</h3>
|
517 |
+
<ol>
|
518 |
+
<li>1οΈβ£ <strong>Download the Dataset:</strong> Access the dataset and detailed instructions on the <a href="https://github.com/your-github-repo" target="_blank">GitHub page</a>. Follow the steps to ensure your environment is set up correctly.</li>
|
519 |
+
<li>2οΈβ£ <strong>Generate Predictions:</strong> Use the provided script in the GitHub repository to generate answers. Ensure the output file matches the following format:</li>
|
520 |
+
</ol>
|
521 |
+
<pre style="background-color: #f9f7fd; border: 1px solid #d3bce8; border-radius: 8px; padding: 10px;">
|
522 |
+
question_id,predicted_answer
|
523 |
+
q1,A
|
524 |
+
q2,B
|
525 |
+
q3,C
|
526 |
+
...
|
527 |
+
</pre>
|
528 |
+
<p>The <code>question_id</code> is the unique identifier for each question, and <code>predicted_answer</code> is your model's prediction (A, B, C, or D).</p>
|
529 |
+
|
530 |
+
<ol start="3">
|
531 |
+
<li>3οΈβ£ <strong>Submit Predictions:</strong> Upload your CSV file to the <strong>Submission Page</strong> on this platform.</li>
|
532 |
+
<li>4οΈβ£ <strong>Evaluation:</strong> Your submission will be scored based on accuracy. The results will include overall and valid accuracy metrics.</li>
|
533 |
+
<li>5οΈβ£ <strong>Leaderboard:</strong> Optionally, add your results to the real-time leaderboard to compare your model's performance with others.</li>
|
534 |
+
</ol>
|
535 |
+
|
536 |
+
<h3 style="color: #8e44ad;">Resources</h3>
|
537 |
+
<ul>
|
538 |
+
<li>π <a href="https://github.com/your-github-repo" target="_blank">GitHub Repository</a>: Contains the dataset, scripts, and detailed instructions.</li>
|
539 |
+
<li>π <a href="https://github.com/your-dataset-link" target="_blank">Dataset Link</a>: Direct access to the competition dataset.</li>
|
540 |
+
<li>β <a href="https://github.com/your-github-repo/issues" target="_blank">Support Page</a>: Use this for queries or issues during participation.</li>
|
541 |
+
</ul>
|
542 |
+
</div>
|
543 |
""")
|
544 |
|
545 |
with gr.TabItem("π€ Submission"):
|