Quazim0t0's picture
Upload app.py
b6ddb3a verified
raw
history blame
15.7 kB
"""
Main application for Dynamic Highscores system.
This file integrates all components into a unified application.
"""
import os
import gradio as gr
import threading
import time
from database_schema import DynamicHighscoresDB
from auth import HuggingFaceAuth
from benchmark_selection import BenchmarkSelector, create_benchmark_selection_ui
from evaluation_queue import EvaluationQueue, create_model_submission_ui
from leaderboard import Leaderboard, create_leaderboard_ui
from sample_benchmarks import add_sample_benchmarks
# Initialize components in main thread
db = DynamicHighscoresDB()
auth_manager = HuggingFaceAuth(db)
benchmark_selector = BenchmarkSelector(db, auth_manager)
evaluation_queue = EvaluationQueue(db, auth_manager)
leaderboard = Leaderboard(db)
# Initialize sample benchmarks if none exist
print("Checking for existing benchmarks...")
benchmarks = db.get_benchmarks()
if not benchmarks or len(benchmarks) == 0:
print("No benchmarks found. Adding sample benchmarks...")
try:
# Make sure the database path is clear
print(f"Database path: {db.db_path}")
# Import and call the function directly
num_added = add_sample_benchmarks()
print(f"Added {num_added} sample benchmarks.")
except Exception as e:
print(f"Error adding sample benchmarks: {str(e)}")
# Try direct DB insertion as fallback
try:
print("Attempting direct benchmark insertion...")
db.add_benchmark(
name="MMLU (Massive Multitask Language Understanding)",
dataset_id="cais/mmlu",
description="Tests knowledge across 57 subjects"
)
print("Added fallback benchmark.")
except Exception as inner_e:
print(f"Fallback insertion failed: {str(inner_e)}")
else:
print(f"Found {len(benchmarks)} existing benchmarks.")
# Custom CSS with theme awareness
css = """
/* Theme-adaptive colored info box */
.info-text {
background-color: rgba(53, 130, 220, 0.1);
padding: 12px;
border-radius: 8px;
border-left: 4px solid #3498db;
margin: 12px 0;
}
/* High-contrast text for elements - works in light and dark themes */
.info-text, .header, .footer, .tab-content,
button, input, textarea, select, option,
.gradio-container *, .markdown-text {
color: var(--text-color, inherit) !important;
}
/* Container styling */
.container {
max-width: 1200px;
margin: 0 auto;
}
/* Header styling */
.header {
text-align: center;
margin-bottom: 20px;
font-weight: bold;
font-size: 24px;
}
/* Footer styling */
.footer {
text-align: center;
margin-top: 40px;
padding: 20px;
border-top: 1px solid var(--border-color-primary, #eee);
}
/* Login section styling */
.login-section {
padding: 10px;
margin-bottom: 15px;
border-radius: 8px;
background-color: rgba(250, 250, 250, 0.1);
text-align: center;
}
/* Login button styling */
.login-button {
background-color: #4CAF50 !important;
color: white !important;
font-weight: bold;
}
/* Force high contrast on specific input areas */
input[type="text"], input[type="password"], textarea {
background-color: var(--background-fill-primary) !important;
color: var(--body-text-color) !important;
}
/* Force text visibility in multiple contexts */
.gradio-markdown p, .gradio-markdown h1, .gradio-markdown h2,
.gradio-markdown h3, .gradio-markdown h4, .gradio-markdown li {
color: var(--body-text-color) !important;
}
/* Fix dark mode text visibility */
@media (prefers-color-scheme: dark) {
input, textarea, select {
color: #ffffff !important;
}
::placeholder {
color: rgba(255, 255, 255, 0.5) !important;
}
}
"""
# JavaScript login implementation
def js_login_script():
space_host = os.environ.get("SPACE_HOST", "localhost:7860")
redirect_uri = f"https://{space_host}"
client_id = os.environ.get("OAUTH_CLIENT_ID", "")
return f"""
<script src="https://unpkg.com/@huggingface/[email protected]/dist/index.umd.min.js"></script>
<script>
(async function() {{
const HfHub = window.HfHub;
try {{
// Check if we're returning from OAuth redirect
const oauthResult = await HfHub.oauthHandleRedirectIfPresent();
if (oauthResult) {{
console.log("User logged in:", oauthResult);
// Store the user info in localStorage
localStorage.setItem("hf_user", JSON.stringify(oauthResult.userInfo));
localStorage.setItem("hf_token", oauthResult.accessToken);
// Update the UI to show logged in state
document.getElementById("login-status").textContent = "Logged in as: " + oauthResult.userInfo.name;
document.getElementById("login-button").style.display = "none";
// Add token to headers for future requests
const originalFetch = window.fetch;
window.fetch = function(url, options = {{}}) {{
if (!options.headers) {{
options.headers = {{}};
}}
// Add the token to the headers
options.headers["HF-Token"] = oauthResult.accessToken;
return originalFetch(url, options);
}};
// Refresh the page to update server-side state
setTimeout(() => window.location.reload(), 1000);
}}
}} catch (error) {{
console.error("OAuth error:", error);
}}
// Check if user is already logged in from localStorage
const storedUser = localStorage.getItem("hf_user");
const storedToken = localStorage.getItem("hf_token");
if (storedUser && storedToken) {{
try {{
const userInfo = JSON.parse(storedUser);
document.getElementById("login-status").textContent = "Logged in as: " + userInfo.name;
document.getElementById("login-button").style.display = "none";
// Add token to headers for future requests
const originalFetch = window.fetch;
window.fetch = function(url, options = {{}}) {{
if (!options.headers) {{
options.headers = {{}};
}}
// Add the token to the headers
options.headers["HF-Token"] = storedToken;
return originalFetch(url, options);
}};
}} catch (e) {{
console.error("Error parsing stored user:", e);
}}
}}
// Setup login button
const loginButton = document.getElementById("login-button");
if (loginButton) {{
loginButton.addEventListener("click", async function() {{
try {{
const clientId = "{client_id}";
if (clientId) {{
// Use HuggingFace OAuth
const loginUrl = await HfHub.oauthLoginUrl({{
clientId: clientId,
redirectUrl: "{redirect_uri}",
scopes: ["openid", "profile"]
}});
console.log("Redirecting to:", loginUrl);
window.location.href = loginUrl;
}} else {{
// Fallback to token-based login
const token = prompt("Enter your HuggingFace token:");
if (token) {{
// Set the token as a cookie
document.cookie = "hf_token=" + token + "; path=/; SameSite=Strict";
// Reload the page to apply the token
window.location.reload();
}}
}}
}} catch (error) {{
console.error("Error starting login process:", error);
alert("Error starting login process. Please try again.");
}}
}});
}}
}})();
</script>
"""
# Simple manual authentication check
def check_user(request: gr.Request):
if request:
# Check for HF-User header from Space OAuth
username = request.headers.get("HF-User")
if username:
# User is logged in via HF Spaces
print(f"User logged in via HF-User header: {username}")
user = db.get_user_by_username(username)
if not user:
# Create user if they don't exist
print(f"Creating new user: {username}")
is_admin = (username == "Quazim0t0")
db.add_user(username, username, is_admin)
user = db.get_user_by_username(username)
return username
# Check for token in headers (from our custom JS)
token = request.headers.get("HF-Token")
if token:
try:
# Validate token with HuggingFace
user_info = auth_manager.hf_api.whoami(token=token)
if user_info:
username = user_info.get("name", "")
print(f"User logged in via token: {username}")
return username
except Exception as e:
print(f"Token validation error: {e}")
return None
# Start evaluation queue worker
def start_queue_worker():
# Wait a moment to ensure app is initialized
time.sleep(2)
try:
print("Starting evaluation queue worker...")
evaluation_queue.start_worker()
except Exception as e:
print(f"Error starting queue worker: {e}")
# Create Gradio app
with gr.Blocks(css=css, title="Dynamic Highscores") as app:
# State to track user
user_state = gr.State(None)
# Login section
with gr.Row(elem_classes=["login-section"]):
with gr.Column():
gr.HTML("""
<div style="display: flex; justify-content: space-between; align-items: center;">
<div id="login-status">Not logged in</div>
<button id="login-button" style="padding: 8px 16px; background-color: #4CAF50; color: white; border: none; border-radius: 4px; cursor: pointer;">Login with HuggingFace</button>
</div>
""")
# Add the JS login script
gr.HTML(js_login_script())
gr.Markdown("# πŸ† Dynamic Highscores", elem_classes=["header"])
gr.Markdown("""
Welcome to Dynamic Highscores - a community benchmark platform for evaluating and comparing language models.
- **Add your own benchmarks** from HuggingFace datasets
- **Submit your models** for CPU-only evaluation
- **Compare performance** across different models and benchmarks
- **Filter results** by model type (Merge, Agent, Reasoning, Coding, etc.)
""", elem_classes=["info-text"])
# Main tabs
with gr.Tabs() as tabs:
with gr.TabItem("πŸ“Š Leaderboard", id=0):
leaderboard_ui = create_leaderboard_ui(leaderboard, db)
with gr.TabItem("πŸš€ Submit Model", id=1):
submission_ui = create_model_submission_ui(evaluation_queue, auth_manager, db)
with gr.TabItem("πŸ” Benchmarks", id=2):
benchmark_ui = create_benchmark_selection_ui(benchmark_selector, auth_manager)
with gr.TabItem("🌐 Community Framework", id=3):
# Create a simple placeholder for the Community Framework tab
gr.Markdown("""
# 🌐 Dynamic Highscores Community Framework
## About Dynamic Highscores
Dynamic Highscores is an open-source community benchmark system for evaluating language models on any dataset. This project was created to fill the gap left by the retirement of HuggingFace's "Open LLM Leaderboards" which were discontinued due to outdated benchmarks.
### Key Features
- **Flexible Benchmarking**: Test models against any HuggingFace dataset, not just predefined benchmarks
- **Community-Driven**: Anyone can add new benchmarks and submit models for evaluation
- **Modern Evaluation**: Focus on contemporary benchmarks that better reflect current model capabilities
- **CPU-Only Evaluation**: Ensures fair comparisons across different models
- **Daily Submission Limits**: Prevents system abuse (one benchmark per day per user)
- **Model Tagging**: Categorize models as Merge, Agent, Reasoning, Coding, etc.
- **Unified Leaderboard**: View all models with filtering capabilities by tags
### Why This Project Matters
When HuggingFace retired their "Open LLM Leaderboards," the community lost a valuable resource for comparing model performance. The benchmarks used had become outdated and didn't reflect the rapid advances in language model capabilities.
Dynamic Highscores addresses this issue by allowing users to select from any benchmark on HuggingFace, including the most recent and relevant datasets. This ensures that models are evaluated on tasks that matter for current applications.
## Model Configuration System (Coming Soon)
We're working on a modular system for model configurations that will allow users to:
- Create and apply predefined configurations for different model types
- Define parameters such as Temperature, Top-K, Min-P, Top-P, and Repetition Penalty
- Share optimal configurations with the community
### Example Configuration (Gemma)
```
Temperature: 1.0
Top_K: 64
Min_P: 0.01
Top_P: 0.95
Repetition Penalty: 1.0
```
## Contributing to the Project
We welcome contributions from the community! If you'd like to improve Dynamic Highscores, here are some ways to get involved:
- **Add New Features**: Enhance the platform with additional functionality
- **Improve Evaluation Methods**: Help make model evaluations more accurate and efficient
- **Fix Bugs**: Address issues in the codebase
- **Enhance Documentation**: Make the project more accessible to new users
- **Add Model Configurations**: Contribute optimal configurations for different model types
To contribute, fork the repository, make your changes, and submit a pull request. We appreciate all contributions, big or small!
""")
gr.Markdown("""
### About Dynamic Highscores
This platform allows users to select benchmarks from HuggingFace datasets and evaluate models against them.
Each user can submit one benchmark per day (admin users are exempt from this limit).
All evaluations run on CPU only to ensure fair comparisons.
Created by Quazim0t0
""", elem_classes=["footer"])
# Check login on page load
app.load(
fn=check_user,
inputs=[],
outputs=[user_state]
)
# Launch the app
if __name__ == "__main__":
# Start queue worker in a separate thread
queue_thread = threading.Thread(target=start_queue_worker)
queue_thread.daemon = True
queue_thread.start()
# Launch the app
app.launch()