File size: 3,766 Bytes
6b3e7b5 d8a1516 6b3e7b5 d8a1516 6b3e7b5 d8a1516 6b3e7b5 d8a1516 6b3e7b5 d8a1516 6b3e7b5 d8a1516 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 |
"""
Main application for Dynamic Highscores system.
This file integrates all components into a unified application.
"""
import os
import gradio as gr
import threading
import time
from database_schema import DynamicHighscoresDB
from auth import HuggingFaceAuth, create_login_ui, setup_auth_handlers
from benchmark_selection import BenchmarkSelector, create_benchmark_selection_ui
from evaluation_queue import EvaluationQueue, create_model_submission_ui
from leaderboard import Leaderboard, create_leaderboard_ui
from sample_benchmarks import add_sample_benchmarks
# Initialize components in main thread
db = DynamicHighscoresDB()
auth_manager = HuggingFaceAuth(db)
benchmark_selector = BenchmarkSelector(db, auth_manager)
evaluation_queue = EvaluationQueue(db, auth_manager)
leaderboard = Leaderboard(db)
# Initialize sample benchmarks if none exist
benchmarks = db.get_benchmarks()
if not benchmarks or len(benchmarks) == 0:
print("No benchmarks found. Adding sample benchmarks...")
try:
num_added = add_sample_benchmarks()
print(f"Added {num_added} sample benchmarks.")
except Exception as e:
print(f"Error adding sample benchmarks: {e}")
# Custom CSS
css = """
.info-text {
background-color: #f0f7ff;
padding: 12px;
border-radius: 8px;
border-left: 4px solid #3498db;
margin: 12px 0;
}
.container {
max-width: 1200px;
margin: 0 auto;
}
.header {
text-align: center;
margin-bottom: 20px;
}
.footer {
text-align: center;
margin-top: 40px;
padding: 20px;
border-top: 1px solid #eee;
}
"""
# Start evaluation queue worker
def start_queue_worker():
# Wait a moment to ensure app is initialized
time.sleep(2)
try:
print("Starting evaluation queue worker...")
evaluation_queue.start_worker()
except Exception as e:
print(f"Error starting queue worker: {e}")
# Create Gradio app
with gr.Blocks(css=css, title="Dynamic Highscores") as app:
gr.Markdown("# ๐ Dynamic Highscores", elem_classes=["header"])
gr.Markdown("""
Welcome to Dynamic Highscores - a community benchmark platform for evaluating and comparing language models.
- **Add your own benchmarks** from HuggingFace datasets
- **Submit your models** for CPU-only evaluation
- **Compare performance** across different models and benchmarks
- **Filter results** by model type (Merge, Agent, Reasoning, Coding, etc.)
""", elem_classes=["info-text"])
# Authentication UI
login_button, logout_button, token_input, user_info = create_login_ui()
setup_auth_handlers(login_button, logout_button, token_input, user_info, auth_manager)
# Main tabs
with gr.Tabs() as tabs:
with gr.TabItem("๐ Leaderboard", id=0):
leaderboard_ui = create_leaderboard_ui(leaderboard, db)
with gr.TabItem("๐ Submit Model", id=1):
submission_ui = create_model_submission_ui(evaluation_queue, auth_manager, db)
with gr.TabItem("๐ Benchmarks", id=2):
benchmark_ui = create_benchmark_selection_ui(benchmark_selector, auth_manager)
gr.Markdown("""
### About Dynamic Highscores
This platform allows users to select benchmarks from HuggingFace datasets and evaluate models against them.
Each user can submit one benchmark per day (admin users are exempt from this limit).
All evaluations run on CPU only to ensure fair comparisons.
Created by Quazim0t0
""", elem_classes=["footer"])
# Launch the app
if __name__ == "__main__":
# Start queue worker in a separate thread
queue_thread = threading.Thread(target=start_queue_worker)
queue_thread.daemon = True
queue_thread.start()
app.launch() |