File size: 7,706 Bytes
9a46619
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
"""
Test script for Dynamic Highscores application.

This script tests the key functionality of the Dynamic Highscores application
to ensure everything works as expected before deployment.
"""

import os
import unittest
import tempfile
import sqlite3
from unittest.mock import MagicMock, patch

# Import components to test
from database_schema import DynamicHighscoresDB
from auth import HuggingFaceAuth
from benchmark_selection import BenchmarkSelector
from evaluation_queue import EvaluationQueue
from leaderboard import Leaderboard

class TestDynamicHighscores(unittest.TestCase):
    """Test cases for Dynamic Highscores application."""
    
    def setUp(self):
        """Set up test environment."""
        # Create temporary database
        self.db_fd, self.db_path = tempfile.mkstemp()
        self.db = DynamicHighscoresDB(self.db_path)
        
        # Mock auth manager
        self.auth_manager = HuggingFaceAuth(self.db)
        
        # Mock components
        self.benchmark_selector = BenchmarkSelector(self.db, self.auth_manager)
        self.evaluation_queue = EvaluationQueue(self.db, self.auth_manager)
        self.leaderboard = Leaderboard(self.db)
    
    def tearDown(self):
        """Clean up test environment."""
        os.close(self.db_fd)
        os.unlink(self.db_path)
    
    def test_database_schema(self):
        """Test database schema creation."""
        # Check if tables were created
        conn = sqlite3.connect(self.db_path)
        cursor = conn.cursor()
        
        # Get list of tables
        cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
        tables = cursor.fetchall()
        table_names = [table[0] for table in tables]
        
        # Check if all expected tables exist
        expected_tables = ['users', 'benchmarks', 'models', 'evaluations', 'queue']
        for table in expected_tables:
            self.assertIn(table, table_names)
        
        conn.close()
    
    def test_user_management(self):
        """Test user management functionality."""
        # Add a test user
        user_id = self.db.add_user("test_user", "test_hf_id", False)
        self.assertIsNotNone(user_id)
        
        # Add an admin user
        admin_id = self.db.add_user("admin_user", "admin_hf_id", True)
        self.assertIsNotNone(admin_id)
        
        # Test submission limits
        self.assertTrue(self.db.can_submit_today(user_id))
        self.db.update_submission_date(user_id)
        self.assertFalse(self.db.can_submit_today(user_id))
        
        # Admin should always be able to submit
        self.assertTrue(self.db.can_submit_today(admin_id))
    
    def test_benchmark_management(self):
        """Test benchmark management functionality."""
        # Add a test benchmark
        benchmark_id = self.db.add_benchmark(
            name="Test Benchmark",
            dataset_id="test/dataset",
            description="Test description",
            metrics={"accuracy": 1.0}
        )
        self.assertIsNotNone(benchmark_id)
        
        # Get benchmarks
        benchmarks = self.db.get_benchmarks()
        self.assertEqual(len(benchmarks), 1)
        self.assertEqual(benchmarks[0]["name"], "Test Benchmark")
    
    def test_model_management(self):
        """Test model management functionality."""
        # Add a test user
        user_id = self.db.add_user("test_user", "test_hf_id", False)
        
        # Add a test model
        model_id = self.db.add_model(
            name="Test Model",
            hf_model_id="test/model",
            user_id=user_id,
            tag="Reasoning",
            parameters="7B",
            description="Test model description"
        )
        self.assertIsNotNone(model_id)
        
        # Get models
        models = self.db.get_models()
        self.assertEqual(len(models), 1)
        self.assertEqual(models[0]["name"], "Test Model")
        
        # Get models by tag
        models = self.db.get_models(tag="Reasoning")
        self.assertEqual(len(models), 1)
        self.assertEqual(models[0]["tag"], "Reasoning")
    
    def test_evaluation_management(self):
        """Test evaluation management functionality."""
        # Add a test user
        user_id = self.db.add_user("test_user", "test_hf_id", False)
        
        # Add a test model
        model_id = self.db.add_model(
            name="Test Model",
            hf_model_id="test/model",
            user_id=user_id,
            tag="Reasoning"
        )
        
        # Add a test benchmark
        benchmark_id = self.db.add_benchmark(
            name="Test Benchmark",
            dataset_id="test/dataset"
        )
        
        # Add a test evaluation
        evaluation_id = self.db.add_evaluation(
            model_id=model_id,
            benchmark_id=benchmark_id
        )
        self.assertIsNotNone(evaluation_id)
        
        # Update evaluation status
        self.db.update_evaluation_status(
            evaluation_id=evaluation_id,
            status="running"
        )
        
        # Get next in queue
        next_eval = self.db.get_next_in_queue()
        self.assertIsNotNone(next_eval)
        self.assertEqual(next_eval["evaluation_id"], evaluation_id)
        
        # Complete evaluation
        self.db.update_evaluation_status(
            evaluation_id=evaluation_id,
            status="completed",
            results={"accuracy": 0.85},
            score=85.0
        )
        
        # Get evaluation results
        results = self.db.get_evaluation_results()
        self.assertEqual(len(results), 1)
        self.assertEqual(results[0]["score"], 85.0)
    
    def test_leaderboard(self):
        """Test leaderboard functionality."""
        # Add test data
        user_id = self.db.add_user("test_user", "test_hf_id", False)
        
        # Add models with different tags
        model1_id = self.db.add_model(
            name="Model 1",
            hf_model_id="test/model1",
            user_id=user_id,
            tag="Reasoning"
        )
        
        model2_id = self.db.add_model(
            name="Model 2",
            hf_model_id="test/model2",
            user_id=user_id,
            tag="Coding"
        )
        
        # Add a benchmark
        benchmark_id = self.db.add_benchmark(
            name="Test Benchmark",
            dataset_id="test/dataset"
        )
        
        # Add evaluations
        eval1_id = self.db.add_evaluation(
            model_id=model1_id,
            benchmark_id=benchmark_id
        )
        
        eval2_id = self.db.add_evaluation(
            model_id=model2_id,
            benchmark_id=benchmark_id
        )
        
        # Complete evaluations
        self.db.update_evaluation_status(
            evaluation_id=eval1_id,
            status="completed",
            results={"accuracy": 0.9},
            score=90.0
        )
        
        self.db.update_evaluation_status(
            evaluation_id=eval2_id,
            status="completed",
            results={"accuracy": 0.8},
            score=80.0
        )
        
        # Get leaderboard data
        df = self.leaderboard.get_leaderboard_data()
        self.assertEqual(len(df), 2)
        
        # Test filtering by tag
        df_reasoning = self.leaderboard.get_leaderboard_data(tag="Reasoning")
        self.assertEqual(len(df_reasoning), 1)
        self.assertEqual(df_reasoning.iloc[0]["score"], 90.0)
        
        df_coding = self.leaderboard.get_leaderboard_data(tag="Coding")
        self.assertEqual(len(df_coding), 1)
        self.assertEqual(df_coding.iloc[0]["score"], 80.0)

if __name__ == "__main__":
    unittest.main()