SondosMB's picture
Update app.py
a45bd57 verified
raw
history blame
15.9 kB
# # demo.launch()
# import gradio as gr
# import pandas as pd
# import os
# import re
# from datetime import datetime
# LEADERBOARD_FILE = "leaderboard.csv" # File to store all submissions persistently
# LAST_UPDATED = datetime.now().strftime("%B %d, %Y")
# def initialize_leaderboard_file():
# """
# Ensure the leaderboard file exists and has the correct headers.
# """
# if not os.path.exists(LEADERBOARD_FILE):
# # Create the file with headers
# pd.DataFrame(columns=[
# "Model Name", "Overall Accuracy", "Valid Accuracy",
# "Correct Predictions", "Total Questions", "Timestamp"
# ]).to_csv(LEADERBOARD_FILE, index=False)
# else:
# # Check if the file is empty and write headers if needed
# if os.stat(LEADERBOARD_FILE).st_size == 0:
# pd.DataFrame(columns=[
# "Model Name", "Overall Accuracy", "Valid Accuracy",
# "Correct Predictions", "Total Questions", "Timestamp"
# ]).to_csv(LEADERBOARD_FILE, index=False)
# def clean_answer(answer):
# """
# Clean and normalize the predicted answers.
# """
# if pd.isna(answer):
# return None
# answer = str(answer)
# clean = re.sub(r'[^A-Da-d]', '', answer)
# if clean:
# return clean[0].upper()
# return None
# def update_leaderboard(results):
# """
# Append new submission results to the leaderboard file.
# """
# new_entry = {
# "Model Name": results['model_name'],
# "Overall Accuracy": round(results['overall_accuracy'] * 100, 2),
# "Valid Accuracy": round(results['valid_accuracy'] * 100, 2),
# "Correct Predictions": results['correct_predictions'],
# "Total Questions": results['total_questions'],
# "Timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
# }
# new_entry_df = pd.DataFrame([new_entry])
# new_entry_df.to_csv(LEADERBOARD_FILE, mode='a', index=False, header=False)
# def load_leaderboard():
# """
# Load all submissions from the leaderboard file.
# """
# if not os.path.exists(LEADERBOARD_FILE) or os.stat(LEADERBOARD_FILE).st_size == 0:
# return pd.DataFrame({
# "Model Name": [],
# "Overall Accuracy": [],
# "Valid Accuracy": [],
# "Correct Predictions": [],
# "Total Questions": [],
# "Timestamp": [],
# })
# return pd.read_csv(LEADERBOARD_FILE)
# def evaluate_predictions(prediction_file, model_name, add_to_leaderboard):
# """
# Evaluate predictions and optionally add results to the leaderboard.
# """
# ground_truth_file = "ground_truth.csv"
# if not os.path.exists(ground_truth_file):
# return "Ground truth file not found.", load_leaderboard()
# if not prediction_file:
# return "Prediction file not uploaded.", load_leaderboard()
# try:
# # Load predictions and ground truth
# predictions_df = pd.read_csv(prediction_file.name)
# ground_truth_df = pd.read_csv(ground_truth_file)
# # Merge predictions with ground truth
# merged_df = pd.merge(predictions_df, ground_truth_df, on='question_id', how='inner')
# merged_df['pred_answer'] = merged_df['predicted_answer'].apply(clean_answer)
# # Evaluate predictions
# valid_predictions = merged_df.dropna(subset=['pred_answer'])
# correct_predictions = (valid_predictions['pred_answer'] == valid_predictions['Answer']).sum()
# total_predictions = len(merged_df)
# total_valid_predictions = len(valid_predictions)
# # Calculate accuracy
# overall_accuracy = correct_predictions / total_predictions if total_predictions > 0 else 0
# valid_accuracy = correct_predictions / total_valid_predictions if total_valid_predictions > 0 else 0
# results = {
# 'model_name': model_name if model_name else "Unknown Model",
# 'overall_accuracy': overall_accuracy,
# 'valid_accuracy': valid_accuracy,
# 'correct_predictions': correct_predictions,
# 'total_questions': total_predictions,
# }
# # Update leaderboard only if opted in
# if add_to_leaderboard:
# update_leaderboard(results)
# return "Evaluation completed and added to leaderboard.", load_leaderboard()
# else:
# return "Evaluation completed but not added to leaderboard.", load_leaderboard()
# except Exception as e:
# return f"Error during evaluation: {str(e)}", load_leaderboard()
# # Initialize leaderboard file
# initialize_leaderboard_file()
# # Gradio Interface
# with gr.Blocks() as demo:
# gr.Markdown("# Prediction Evaluation Tool with Leaderboard")
# with gr.Tabs():
# # Submission Tab
# with gr.TabItem("πŸ… Submission"):
# file_input = gr.File(label="Upload Prediction CSV")
# model_name_input = gr.Textbox(label="Model Name", placeholder="Enter your model name")
# add_to_leaderboard_checkbox = gr.Checkbox(label="Add to Leaderboard?", value=True)
# eval_status = gr.Textbox(label="Evaluation Status", interactive=False)
# leaderboard_table_preview = gr.Dataframe(
# value=load_leaderboard(),
# label="Leaderboard (Preview)",
# interactive=False,
# wrap=True,
# )
# eval_button = gr.Button("Evaluate and Update Leaderboard")
# eval_button.click(
# evaluate_predictions,
# inputs=[file_input, model_name_input, add_to_leaderboard_checkbox],
# outputs=[eval_status, leaderboard_table_preview],
# )
# # Leaderboard Tab
# with gr.TabItem("πŸ… Leaderboard"):
# leaderboard_table = gr.Dataframe(
# value=load_leaderboard(),
# label="Leaderboard",
# interactive=False,
# wrap=True,
# )
# refresh_button = gr.Button("Refresh Leaderboard")
# refresh_button.click(
# lambda: load_leaderboard(),
# inputs=[],
# outputs=[leaderboard_table],
# )
# gr.Markdown(f"Last updated on **{LAST_UPDATED}**")
# demo.launch()
import gradio as gr
import pandas as pd
import os
import re
from datetime import datetime
from huggingface_hub import hf_hub_download
from huggingface_hub import HfApi, HfFolder
LEADERBOARD_FILE = "leaderboard.csv"
GROUND_TRUTH_FILE = "ground_truth.csv"
LAST_UPDATED = datetime.now().strftime("%B %d, %Y")
# Ensure authentication and suppress warnings
os.environ["HF_HUB_DISABLE_SYMLINKS_WARNING"] = "1"
HF_TOKEN = os.getenv("HF_TOKEN")
if not HF_TOKEN:
raise ValueError("HF_TOKEN environment variable is not set or invalid.")
def initialize_leaderboard_file():
"""
Ensure the leaderboard file exists and has the correct headers.
"""
if not os.path.exists(LEADERBOARD_FILE):
pd.DataFrame(columns=[
"Model Name", "Overall Accuracy", "Valid Accuracy",
"Correct Predictions", "Total Questions", "Timestamp"
]).to_csv(LEADERBOARD_FILE, index=False)
elif os.stat(LEADERBOARD_FILE).st_size == 0:
pd.DataFrame(columns=[
"Model Name", "Overall Accuracy", "Valid Accuracy",
"Correct Predictions", "Total Questions", "Timestamp"
]).to_csv(LEADERBOARD_FILE, index=False)
def clean_answer(answer):
if pd.isna(answer):
return None
answer = str(answer)
clean = re.sub(r'[^A-Da-d]', '', answer)
return clean[0].upper() if clean else None
def update_leaderboard(results):
"""
Append new submission results to the leaderboard file and push updates to the Hugging Face repository.
"""
new_entry = {
"Model Name": results['model_name'],
"Overall Accuracy": round(results['overall_accuracy'] * 100, 2),
"Valid Accuracy": round(results['valid_accuracy'] * 100, 2),
"Correct Predictions": results['correct_predictions'],
"Total Questions": results['total_questions'],
"Timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
}
try:
# Update the local leaderboard file
new_entry_df = pd.DataFrame([new_entry])
file_exists = os.path.exists(LEADERBOARD_FILE)
new_entry_df.to_csv(
LEADERBOARD_FILE,
mode='a', # Append mode
index=False,
header=not file_exists # Write header only if the file is new
)
print(f"Leaderboard updated successfully at {LEADERBOARD_FILE}")
# Push the updated file to the Hugging Face repository using HTTP API
api = HfApi()
token = HfFolder.get_token()
api.upload_file(
path_or_fileobj=LEADERBOARD_FILE,
path_in_repo="leaderboard.csv",
repo_id="SondosMB/ss", # Your Space repository
repo_type="space",
token=token
)
print("Leaderboard changes pushed to Hugging Face repository.")
except Exception as e:
print(f"Error updating leaderboard file: {e}")
def load_leaderboard():
if not os.path.exists(LEADERBOARD_FILE) or os.stat(LEADERBOARD_FILE).st_size == 0:
return pd.DataFrame({
"Model Name": [],
"Overall Accuracy": [],
"Valid Accuracy": [],
"Correct Predictions": [],
"Total Questions": [],
"Timestamp": [],
})
return pd.read_csv(LEADERBOARD_FILE)
def evaluate_predictions(prediction_file, model_name, add_to_leaderboard):
try:
ground_truth_path = hf_hub_download(
repo_id="SondosMB/ground-truth-dataset",
filename="ground_truth.csv",
repo_type="dataset",
use_auth_token=True
)
ground_truth_df = pd.read_csv(ground_truth_path)
except FileNotFoundError:
return "Ground truth file not found in the dataset repository.", load_leaderboard()
except Exception as e:
return f"Error loading ground truth: {e}", load_leaderboard()
if not prediction_file:
return "Prediction file not uploaded.", load_leaderboard()
try:
predictions_df = pd.read_csv(prediction_file.name)
merged_df = pd.merge(predictions_df, ground_truth_df, on='question_id', how='inner')
merged_df['pred_answer'] = merged_df['predicted_answer'].apply(clean_answer)
valid_predictions = merged_df.dropna(subset=['pred_answer'])
correct_predictions = (valid_predictions['pred_answer'] == valid_predictions['Answer']).sum()
total_predictions = len(merged_df)
total_valid_predictions = len(valid_predictions)
overall_accuracy = correct_predictions / total_predictions if total_predictions > 0 else 0
valid_accuracy = correct_predictions / total_valid_predictions if total_valid_predictions > 0 else 0
results = {
'model_name': model_name if model_name else "Unknown Model",
'overall_accuracy': overall_accuracy,
'valid_accuracy': valid_accuracy,
'correct_predictions': correct_predictions,
'total_questions': total_predictions,
}
if add_to_leaderboard:
update_leaderboard(results)
return "Evaluation completed and added to leaderboard.", load_leaderboard()
else:
return "Evaluation completed but not added to leaderboard.", load_leaderboard()
except Exception as e:
return f"Error during evaluation: {str(e)}", load_leaderboard()
initialize_leaderboard_file()
with gr.Blocks() as demo:
gr.Markdown("""
# Competition Title
### Welcome to the Competition Overview
![Competition Logo](![image/jpeg](https://cdn-uploads.huggingface.co/production/uploads/644c4c848c51ddbe0ea841db/mDW2bak1sPUbOafx2FELE.jpeg)
)
Here you can submit your predictions, view the leaderboard, and track your performance!
""")
with gr.Tabs():
with gr.TabItem("πŸ“– Overview"):
gr.Markdown("""
## Overview
# Welcome to the Mobile-MMLU Benchmark Competition
Evaluate the performance of mobile-compatible Large Language Models (LLMs) on 16,186 scenario-based and factual questions across 80 fields. Compete to showcase your model’s accuracy for real-world mobile scenarios.
## What is Mobile-MMLU?
Mobile-MMLU is a benchmark designed to test the capabilities of LLMs optimized for mobile use. By participating in this competition, you contribute to advancing mobile intelligence benchmarks and shaping the future of mobile-compatible AI systems.
---
## How It Works
1. **Download the Dataset**
Access the dataset and detailed generation instructions on our [GitHub page](https://github.com/your-github-repo).
2. **Generate Predictions**
Use your LLM to answer the questions and format your predictions as a CSV file with the following structure as written on our GitHub page :
3. **Submit Predictions**
Upload your predictions via the submission portal.
4. **Evaluation**
Your submission will be scored on accuracy
5. **Leaderboard**
Compare your results against other participants on the live leaderboard.
---
## Competition Tasks
Participants are tasked with generating predictions for the dataset and optimizing their models for:
- **Accuracy**: Correctly answering questions across diverse fields.
---
## Get Started
1. **Prepare Your Model**
Refer to our [GitHub page](https://github.com/your-github-repo) for dataset access and response generation instructions.
2. **Submit Predictions**
Format your submission as specified in the rules.
3. **Track Progress**
Check the leaderboard for real-time rankings.
---
## Contact Us
For questions or support, contact us at: [Insert Email Address]
""")
with gr.TabItem("πŸ“€ Submission"):
with gr.Row():
file_input = gr.File(label="Upload Prediction CSV", file_types=[".csv"], interactive=True)
model_name_input = gr.Textbox(label="Model Name", placeholder="Enter your model name")
with gr.Row():
overall_accuracy_display = gr.Number(label="Overall Accuracy", interactive=False)
add_to_leaderboard_checkbox = gr.Checkbox(label="Add to Leaderboard?", value=True)
eval_button = gr.Button("Evaluate")
eval_status = gr.Textbox(label="Evaluation Status", interactive=False)
def handle_evaluation(file, model_name, add_to_leaderboard):
status, leaderboard = evaluate_predictions(file, model_name, add_to_leaderboard)
if leaderboard.empty:
overall_accuracy = 0
else:
overall_accuracy = leaderboard.iloc[-1]["Overall Accuracy"]
return status, overall_accuracy
eval_button.click(
handle_evaluation,
inputs=[file_input, model_name_input, add_to_leaderboard_checkbox],
outputs=[eval_status, overall_accuracy_display],
)
with gr.TabItem("πŸ… Leaderboard"):
leaderboard_table = gr.Dataframe(
value=load_leaderboard(),
label="Leaderboard",
interactive=False,
wrap=True,
)
refresh_button = gr.Button("Refresh Leaderboard")
refresh_button.click(
lambda: load_leaderboard(),
inputs=[],
outputs=[leaderboard_table],
)
gr.Markdown(f"Last updated on **{LAST_UPDATED}**")
demo.launch()