|
import gradio as gr |
|
import pandas as pd |
|
import os |
|
import re |
|
from datetime import datetime |
|
from huggingface_hub import hf_hub_download |
|
from huggingface_hub import HfApi, HfFolder |
|
|
|
LEADERBOARD_FILE = "leaderboard.csv" |
|
GROUND_TRUTH_FILE = "ground_truth.csv" |
|
LAST_UPDATED = datetime.now().strftime("%B %d, %Y") |
|
|
|
|
|
os.environ["HF_HUB_DISABLE_SYMLINKS_WARNING"] = "1" |
|
HF_TOKEN = os.getenv("HF_TOKEN") |
|
if not HF_TOKEN: |
|
raise ValueError("HF_TOKEN environment variable is not set or invalid.") |
|
|
|
def initialize_leaderboard_file(): |
|
""" |
|
Ensure the leaderboard file exists and has the correct headers. |
|
""" |
|
if not os.path.exists(LEADERBOARD_FILE): |
|
pd.DataFrame(columns=[ |
|
"Model Name", "Overall Accuracy", "Valid Accuracy", |
|
"Correct Predictions", "Total Questions", "Timestamp" |
|
]).to_csv(LEADERBOARD_FILE, index=False) |
|
elif os.stat(LEADERBOARD_FILE).st_size == 0: |
|
pd.DataFrame(columns=[ |
|
"Model Name", "Overall Accuracy", "Valid Accuracy", |
|
"Correct Predictions", "Total Questions", "Timestamp" |
|
]).to_csv(LEADERBOARD_FILE, index=False) |
|
|
|
def clean_answer(answer): |
|
if pd.isna(answer): |
|
return None |
|
answer = str(answer) |
|
clean = re.sub(r'[^A-Da-d]', '', answer) |
|
return clean[0].upper() if clean else None |
|
|
|
|
|
def update_leaderboard(results): |
|
""" |
|
Append new submission results to the leaderboard file and push updates to the Hugging Face repository. |
|
""" |
|
new_entry = { |
|
"Model Name": results['model_name'], |
|
"Overall Accuracy": round(results['overall_accuracy'] * 100, 2), |
|
"Valid Accuracy": round(results['valid_accuracy'] * 100, 2), |
|
"Correct Predictions": results['correct_predictions'], |
|
"Total Questions": results['total_questions'], |
|
"Timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"), |
|
} |
|
|
|
try: |
|
|
|
new_entry_df = pd.DataFrame([new_entry]) |
|
file_exists = os.path.exists(LEADERBOARD_FILE) |
|
|
|
new_entry_df.to_csv( |
|
LEADERBOARD_FILE, |
|
mode='a', |
|
index=False, |
|
header=not file_exists |
|
) |
|
print(f"Leaderboard updated successfully at {LEADERBOARD_FILE}") |
|
|
|
|
|
api = HfApi() |
|
token = HfFolder.get_token() |
|
|
|
api.upload_file( |
|
path_or_fileobj=LEADERBOARD_FILE, |
|
path_in_repo="leaderboard.csv", |
|
repo_id="SondosMB/ss", |
|
repo_type="space", |
|
token=token |
|
) |
|
print("Leaderboard changes pushed to Hugging Face repository.") |
|
|
|
except Exception as e: |
|
print(f"Error updating leaderboard file: {e}") |
|
|
|
|
|
|
|
def load_leaderboard(): |
|
if not os.path.exists(LEADERBOARD_FILE) or os.stat(LEADERBOARD_FILE).st_size == 0: |
|
return pd.DataFrame({ |
|
"Model Name": [], |
|
"Overall Accuracy": [], |
|
"Valid Accuracy": [], |
|
"Correct Predictions": [], |
|
"Total Questions": [], |
|
"Timestamp": [], |
|
}) |
|
return pd.read_csv(LEADERBOARD_FILE) |
|
|
|
def evaluate_predictions(prediction_file, model_name, add_to_leaderboard): |
|
try: |
|
ground_truth_path = hf_hub_download( |
|
repo_id="SondosMB/ground-truth-dataset", |
|
filename="ground_truth.csv", |
|
repo_type="dataset", |
|
use_auth_token=True |
|
) |
|
ground_truth_df = pd.read_csv(ground_truth_path) |
|
except FileNotFoundError: |
|
return "Ground truth file not found in the dataset repository.", load_leaderboard() |
|
except Exception as e: |
|
return f"Error loading ground truth: {e}", load_leaderboard() |
|
|
|
if not prediction_file: |
|
return "Prediction file not uploaded.", load_leaderboard() |
|
|
|
try: |
|
predictions_df = pd.read_csv(prediction_file.name) |
|
merged_df = pd.merge(predictions_df, ground_truth_df, on='question_id', how='inner') |
|
merged_df['pred_answer'] = merged_df['predicted_answer'].apply(clean_answer) |
|
|
|
valid_predictions = merged_df.dropna(subset=['pred_answer']) |
|
correct_predictions = (valid_predictions['pred_answer'] == valid_predictions['Answer']).sum() |
|
total_predictions = len(merged_df) |
|
total_valid_predictions = len(valid_predictions) |
|
|
|
overall_accuracy = correct_predictions / total_predictions if total_predictions > 0 else 0 |
|
valid_accuracy = correct_predictions / total_valid_predictions if total_valid_predictions > 0 else 0 |
|
|
|
results = { |
|
'model_name': model_name if model_name else "Unknown Model", |
|
'overall_accuracy': overall_accuracy, |
|
'valid_accuracy': valid_accuracy, |
|
'correct_predictions': correct_predictions, |
|
'total_questions': total_predictions, |
|
} |
|
|
|
if add_to_leaderboard: |
|
update_leaderboard(results) |
|
return "Evaluation completed and added to leaderboard.", load_leaderboard() |
|
else: |
|
return "Evaluation completed but not added to leaderboard.", load_leaderboard() |
|
|
|
except Exception as e: |
|
return f"Error during evaluation: {str(e)}", load_leaderboard() |
|
|
|
initialize_leaderboard_file() |
|
|
|
|
|
|
|
import gradio as gr |
|
|
|
|
|
|
|
|
|
css_tech_theme = """ |
|
body { |
|
font-family: 'Roboto', sans-serif; |
|
background-color: #f4f6fa; |
|
color: #333333; |
|
margin: 0; |
|
padding: 0; |
|
} |
|
|
|
/* Header Styling */ |
|
header { |
|
text-align: center; |
|
padding: 60px 20px; |
|
background: linear-gradient(135deg, #6a1b9a, #64b5f6); |
|
color: #ffffff; |
|
border-radius: 12px; |
|
margin-bottom: 30px; |
|
box-shadow: 0 6px 20px rgba(0, 0, 0, 0.2); |
|
} |
|
|
|
header h1 { |
|
font-size: 3.5em; |
|
font-weight: bold; |
|
margin-bottom: 10px; |
|
} |
|
|
|
header h2 { |
|
font-size: 2em; |
|
margin-bottom: 15px; |
|
} |
|
|
|
header p { |
|
font-size: 1.7em; |
|
line-height: 1.8; |
|
} |
|
|
|
.header-buttons { |
|
display: flex; |
|
justify-content: center; |
|
gap: 15px; |
|
margin-top: 20px; |
|
} |
|
|
|
.header-buttons a { |
|
text-decoration: none; |
|
font-size: 1.5em; |
|
padding: 15px 30px; |
|
border-radius: 30px; |
|
font-weight: bold; |
|
background: #ffffff; |
|
color: #6a1b9a; |
|
transition: transform 0.3s, background 0.3s; |
|
box-shadow: 0 4px 10px rgba(0, 0, 0, 0.1); |
|
} |
|
|
|
.header-buttons a:hover { |
|
background: #64b5f6; |
|
color: #ffffff; |
|
transform: scale(1.05); |
|
} |
|
|
|
/* Pre-Tabs Section */ |
|
.pre-tabs { |
|
text-align: center; |
|
padding: 40px 20px; |
|
background: linear-gradient(135deg, #ffffff, #f9fafb); |
|
border-top: 5px solid #64b5f6; |
|
border-bottom: 5px solid #6a1b9a; |
|
} |
|
|
|
.pre-tabs h2 { |
|
font-size: 3.4em; |
|
color: #333333; |
|
margin-bottom: 15px; |
|
} |
|
|
|
.pre-tabs p { |
|
font-size: 2em; |
|
color: #555555; |
|
line-height: 1.8; |
|
} |
|
|
|
/* Tabs Section */ |
|
.tabs { |
|
margin: 0 auto; |
|
padding: 20px; |
|
background: #ffffff; |
|
border-radius: 12px; |
|
box-shadow: 0 4px 15px rgba(0, 0, 0, 0.1); |
|
max-width: 1200px; |
|
} |
|
|
|
/* Post-Tabs Section */ |
|
.post-tabs { |
|
text-align: center; |
|
padding: 40px 20px; |
|
background: linear-gradient(135deg, #64b5f6, #6a1b9a); |
|
color: #ffffff; |
|
border-radius: 12px; |
|
margin-top: 30px; |
|
} |
|
|
|
.post-tabs h2 { |
|
font-size: 3.4em; |
|
margin-bottom: 15px; |
|
} |
|
|
|
.post-tabs p { |
|
font-size: 2em; |
|
line-height: 1.8; |
|
margin-bottom: 20px; |
|
} |
|
|
|
.post-tabs a { |
|
text-decoration: none; |
|
font-size: 1.1em; |
|
padding: 15px 30px; |
|
border-radius: 30px; |
|
font-weight: bold; |
|
background: #ffffff; |
|
color: #6a1b9a; |
|
transition: transform 0.3s, background 0.3s; |
|
box-shadow: 0 4px 10px rgba(0, 0, 0, 0.1); |
|
} |
|
|
|
.post-tabs a:hover { |
|
background: #6a1b9a; |
|
color: #ffffff; |
|
transform: scale(1.05); |
|
} |
|
|
|
/* Footer */ |
|
footer { |
|
background: linear-gradient(135deg, #6a1b9a, #8e44ad); |
|
color: #ffffff; |
|
text-align: center; |
|
padding: 40px 20px; |
|
margin-top: 30px; |
|
border-radius: 12px; |
|
box-shadow: 0 4px 10px rgba(0, 0, 0, 0.2); |
|
} |
|
|
|
footer h2 { |
|
font-size: 1.9em; |
|
margin-bottom: 15px; |
|
} |
|
|
|
footer p { |
|
font-size: 1.5em; |
|
line-height: 1.6; |
|
margin-bottom: 20px; |
|
} |
|
/* Link Styling */ |
|
.social-links { |
|
display: flex; |
|
justify-content: center; |
|
gap: 15px; /* Space between links */ |
|
} |
|
|
|
.social-link { |
|
display: inline-block; |
|
text-decoration: none; |
|
color: #ffffff; |
|
background-color: #6a1b9a; /* Purple button background */ |
|
padding: 10px 20px; |
|
border-radius: 30px; |
|
font-size: 16px; |
|
font-weight: bold; |
|
transition: all 0.3s ease; |
|
box-shadow: 0 4px 10px rgba(0, 0, 0, 0.1); |
|
} |
|
|
|
.social-link:hover { |
|
background-color: #8c52d3; /* Darker shade on hover */ |
|
box-shadow: 0 6px 15px rgba(0, 0, 0, 0.2); |
|
transform: translateY(-2px); |
|
} |
|
|
|
.social-link:active { |
|
transform: translateY(1px); |
|
box-shadow: 0 3px 8px rgba(0, 0, 0, 0.1); |
|
} |
|
""" |
|
|
|
|
|
|
|
with gr.Blocks(css=css_tech_theme) as demo: |
|
|
|
gr.Markdown(""" |
|
<header> |
|
<h1>π Mobile-MMLU Benchmark Competition</h1> |
|
<h2>π Push the Boundaries of Mobile AI</h2> |
|
<p style="color:#ffffff;> |
|
Test and optimize mobile-compatible Large Language Models (LLMs) with cutting-edge benchmarks |
|
across 80 fields and over 16,000 questions. |
|
</p> |
|
</header> |
|
""") |
|
|
|
gr.Markdown(""" |
|
<section class="pre-tabs"> |
|
<h2>Why Participate?</h2> |
|
<p> |
|
The Mobile-MMLU Benchmark Competition is a unique opportunity to test your LLMs against |
|
real-world scenarios. Compete to drive innovation and make your mark in mobile AI. |
|
</p> |
|
</section> |
|
""") |
|
|
|
|
|
with gr.Tabs(elem_id="tabs"): |
|
|
|
with gr.TabItem("π Overview"): |
|
gr.Markdown(""" |
|
<div class="tabs"> |
|
<h2>About the Competition</h2> |
|
<p> |
|
The **Mobile-MMLU Benchmark Competition** is an exciting challenge for mobile-optimized |
|
LLMs. Compete to achieve the highest accuracy and contribute to advancements in mobile AI. |
|
</p> |
|
<h3>How It Works</h3> |
|
<ul> |
|
<li>1οΈβ£ <strong>Download the Dataset:</strong> Access the dataset and instructions on our |
|
<a href="https://github.com/your-github-repo" target="_blank">GitHub page</a>.</li> |
|
<li>2οΈβ£ <strong>Generate Predictions:</strong> Use your LLM to answer the dataset questions. |
|
Format your predictions as a CSV file.</li> |
|
<li>3οΈβ£ <strong>Submit Predictions:</strong> Upload your predictions on this platform.</li> |
|
<li>4οΈβ£ <strong>Evaluation:</strong> Submissions are scored based on accuracy.</li> |
|
<li>5οΈβ£ <strong>Leaderboard:</strong> View real-time rankings on the leaderboard.</li> |
|
</ul> |
|
</div> |
|
""") |
|
|
|
|
|
with gr.TabItem("π€ Submission"): |
|
gr.Markdown("<div class='tabs'><h2>Submit Your Predictions</h2></div>") |
|
with gr.Row(): |
|
file_input = gr.File(label="Upload Prediction CSV", file_types=[".csv"], interactive=True) |
|
model_name_input = gr.Textbox(label="Model Name", placeholder="Enter your model name") |
|
|
|
with gr.Row(): |
|
overall_accuracy_display = gr.Number(label="Overall Accuracy", interactive=False) |
|
add_to_leaderboard_checkbox = gr.Checkbox(label="Add to Leaderboard?", value=True) |
|
|
|
eval_button = gr.Button("Evaluate") |
|
eval_status = gr.Textbox(label="Evaluation Status", interactive=False) |
|
|
|
def handle_evaluation(file, model_name, add_to_leaderboard): |
|
status, leaderboard = evaluate_predictions(file, model_name, add_to_leaderboard) |
|
if leaderboard.empty: |
|
overall_accuracy = 0 |
|
else: |
|
overall_accuracy = leaderboard.iloc[-1]["Overall Accuracy"] |
|
return status, overall_accuracy |
|
|
|
eval_button.click( |
|
handle_evaluation, |
|
inputs=[file_input, model_name_input, add_to_leaderboard_checkbox], |
|
outputs=[eval_status, overall_accuracy_display], |
|
) |
|
|
|
with gr.TabItem("π
Leaderboard"): |
|
leaderboard_table = gr.Dataframe( |
|
value=load_leaderboard(), |
|
label="Leaderboard", |
|
interactive=False, |
|
wrap=True, |
|
) |
|
refresh_button = gr.Button("Refresh Leaderboard") |
|
refresh_button.click( |
|
lambda: load_leaderboard(), |
|
inputs=[], |
|
outputs=[leaderboard_table], |
|
) |
|
|
|
|
|
gr.Markdown(""" |
|
<section class="post-tabs"> |
|
<h2>Ready to Compete?</h2> |
|
<p> |
|
Submit your predictions today and make your mark in advancing mobile AI technologies. |
|
Show the world what your model can achieve! |
|
</p> |
|
<a href="#submission">Start Submitting</a> |
|
</section> |
|
""") |
|
|
|
|
|
gr.Markdown(""" |
|
<footer> |
|
<h2>Stay Connected</h2> |
|
<p> |
|
Follow us on social media or contact us for any queries. Let's shape the future of AI together! |
|
</p> |
|
<div class="social-links"> |
|
<a href="https://website.com" target="_blank" class="social-link">π Website</a> |
|
<a href="https://github.com" target="_blank" class="social-link">π GitHub</a> |
|
</div> |
|
</footer> |
|
""") |
|
|
|
demo.launch() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|