File size: 20,303 Bytes
61cd432
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e4f522a
61cd432
e4f522a
61cd432
 
 
e4f522a
61cd432
e4f522a
61cd432
 
 
 
 
 
 
 
 
 
e4f522a
 
61cd432
 
 
 
 
 
 
 
 
 
 
 
e4f522a
 
61cd432
 
e4f522a
61cd432
e4f522a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61cd432
e4f522a
61cd432
 
e4f522a
 
 
61cd432
 
 
 
 
 
 
0e30fae
 
1a0aefb
 
61cd432
 
 
 
 
e4f522a
 
61cd432
 
e4f522a
 
 
 
 
61cd432
e4f522a
 
 
61cd432
 
 
 
 
 
e4f522a
 
 
0e30fae
e4f522a
 
 
 
 
 
 
 
 
 
61cd432
 
 
 
 
 
e4f522a
61cd432
e4f522a
61cd432
 
 
 
 
 
 
e4f522a
61cd432
 
e4f522a
 
 
61cd432
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e4f522a
61cd432
 
 
 
 
e4f522a
61cd432
e4f522a
61cd432
 
 
 
 
 
 
e4f522a
61cd432
 
 
e4f522a
 
61cd432
 
 
 
 
e4f522a
61cd432
 
 
 
 
 
e4f522a
61cd432
e4f522a
61cd432
 
 
 
 
 
 
e4f522a
61cd432
 
 
e4f522a
 
61cd432
 
 
 
 
e4f522a
61cd432
 
 
 
 
 
 
 
e4f522a
61cd432
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0e30fae
61cd432
0e30fae
 
61cd432
 
 
 
 
 
e4f522a
61cd432
 
 
 
 
0e30fae
61cd432
0e30fae
61cd432
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
import gradio as gr
import pandas as pd
import plotly.express as px
import os # To check if files exist

# --- Configuration ---
DATA_DIR = "." # Assume CSV files are in the same directory as app.py
SUMMARY_FILE = os.path.join(DATA_DIR, "data/summary_data.csv")
DOMAIN_RANKS_FILE = os.path.join(DATA_DIR, "data/domain_ranks.csv")
COST_FILE = os.path.join(DATA_DIR, "data/cost_data.csv")
AVG_LATENCY_FILE = os.path.join(DATA_DIR, "data/avg_latency.csv")
P99_LATENCY_FILE = os.path.join(DATA_DIR, "data/p99_latency.csv")

# --- Helper Function to Load Data ---
def load_data(filepath, separator=','):
    """Loads data, handling potential file not found errors."""
    if not os.path.exists(filepath):
        print(f"Warning: Data file not found at {filepath}")
        return pd.DataFrame() # Return empty DataFrame
    try:
        # Adjust separator if needed (e.g., sep='\t' for tab-separated)
        df = pd.read_csv(filepath, sep=separator)
        # Basic cleanup: remove potential unnamed index columns often added by spreadsheets
        df = df.loc[:, ~df.columns.str.contains('^Unnamed')]
        # Attempt to convert numeric columns, coercing errors to NaN
        for col in df.columns:
            if col != 'Model Name' and col != 'model_name': # Avoid converting model names
                 # Check if column might represent numeric data before converting
                 if df[col].astype(str).str.contains(r'^[0-9.,eE-]+$').any():
                     df[col] = pd.to_numeric(df[col], errors='coerce')
        return df
    except Exception as e:
        print(f"Error loading {filepath}: {e}")
        return pd.DataFrame()

# --- Load All Data ---
print("Loading data...")
df_summary = load_data(SUMMARY_FILE)
df_domain = load_data(DOMAIN_RANKS_FILE)
df_cost = load_data(COST_FILE)
df_avg_latency = load_data(AVG_LATENCY_FILE)
df_p99_latency = load_data(P99_LATENCY_FILE)
print("Data loading complete.")

# --- *** NEW: Convert Costs to USD Cents *** ---
COST_COLUMN_SUMMARY = 'Costs (USD)' # IMPORTANT: Check this matches your summary_data.csv header EXACTLY
NEW_COST_COLUMN_SUMMARY = 'Avg Cost ($ Cents)' # This is the new name we'll use

# Convert summary cost
if not df_summary.empty and COST_COLUMN_SUMMARY in df_summary.columns:
    df_summary[COST_COLUMN_SUMMARY] = (pd.to_numeric(df_summary[COST_COLUMN_SUMMARY], errors='coerce') * 100).round(3) # <-- ADDED .round(3)
    df_summary.rename(columns={COST_COLUMN_SUMMARY: NEW_COST_COLUMN_SUMMARY}, inplace=True)
    print(f"Converted '{COST_COLUMN_SUMMARY}' to $ Cents and renamed to '{NEW_COST_COLUMN_SUMMARY}' in df_summary.")
else:
    print(f"Warning: Column '{COST_COLUMN_SUMMARY}' not found in df_summary for conversion.")

# Convert cost breakdown data
if not df_cost.empty:
    # IMPORTANT: Check if your model name column in cost_data.csv is 'model_name' or 'Model Name' etc.
    model_col_name = 'model_name' # Adjust if needed
    cost_cols = [col for col in df_cost.columns if col != model_col_name]
    for col in cost_cols:
        # Handle potential non-numeric data gracefully before multiplying
        df_cost[col] = (pd.to_numeric(df_cost[col], errors='coerce') * 100).round(3) # <-- ADDED .round(3)
    print("Converted cost breakdown columns to $ Cents in df_cost.")
# --- *** End of Cost Conversion *** ---

# Rename columns for clarity if needed (example for summary)
# Make sure the original names match your CSV headers EXACTLY
try:
    df_summary = df_summary.rename(columns={
        'Model Name': 'Model', # If your CSV uses 'Model Name'
        # Add other renames here if your CSV headers differ from the target names below
        # 'Costs (USD)': 'Avg Cost (USD/response)',
        # 'Avg Answer Duration (sec)': 'Avg Latency (s)',
        # 'P99 Answer Duration (sec)': 'P99 Latency (s)'
    })
    # Select and reorder columns for the main table - REMOVED BENCHMARK COLUMNS
    summary_cols_display = ['Model', 'AutoBench', NEW_COST_COLUMN_SUMMARY, 'Avg Answer Duration (sec)', 'P99 Answer Duration (sec)']
    # Filter to only columns that actually exist after loading and renaming
    summary_cols_display = [col for col in summary_cols_display if col in df_summary.columns]
    df_summary_display = df_summary[summary_cols_display].copy() # Use .copy() to avoid SettingWithCopyWarning

    # Select columns for the new benchmark comparison table
    benchmark_cols = ['Model', 'AutoBench', 'Chatbot Ar.', 'AAI Index', 'MMLU Index']
    benchmark_cols = [col for col in benchmark_cols if col in df_summary.columns] # Filter existing
    df_benchmark_display = df_summary[benchmark_cols].copy() # Use .copy()

    # Ensure AutoBench score is numeric for sorting BOTH display tables
    if 'AutoBench' in df_summary_display.columns:
        df_summary_display['AutoBench'] = pd.to_numeric(df_summary_display['AutoBench'], errors='coerce')
        df_summary_display.sort_values(by='AutoBench', ascending=False, inplace=True) # Use inplace=True
    else:
        print("Warning: 'AutoBench' column not found for sorting summary table.")

    if 'AutoBench' in df_benchmark_display.columns:
        df_benchmark_display['AutoBench'] = pd.to_numeric(df_benchmark_display['AutoBench'], errors='coerce')
        df_benchmark_display.sort_values(by='AutoBench', ascending=False, inplace=True) # Use inplace=True
    else:
        print("Warning: 'AutoBench' column not found for sorting benchmark table.")

except KeyError as e:
    print(f"Error preparing display columns: Missing key {e}. Check CSV headers and rename mapping.")
    df_summary_display = df_summary.copy() # Fallback
    df_benchmark_display = pd.DataFrame() # Fallback to empty for benchmark table


# --- Build Gradio App ---
with gr.Blocks(theme=gr.themes.Soft()) as app:
    gr.Markdown("# AutoBench LLM Leaderboard")
    gr.Markdown(
        "Interactive leaderboard for AutoBench, where LLMs rank LLMs' responses. "
        "Includes performance, cost, and latency metrics."
        "Data updated on April 25, 2025."
        "\n\nMore info for this benchmark run: [AutoBench Run 2 Results](https://huggingface.co/blog/PeterKruger/autobench-2nd-run). "
        "If you want to know more about AutoBench: [AutoBench Release](https://huggingface.co/blog/PeterKruger/autobench)."
        )

    # --- Tab 1: Overall Ranking ---
    with gr.Tab("Overall Ranking"):
        gr.Markdown("## Overall Model Performance")
        # REMOVED benchmark correlations from Markdown
        gr.Markdown("Models ranked by AutoBench score. Lower cost ($ Cents) and latency (s) are better.")
        # Check if df_summary_display has data before rendering
        if not df_summary_display.empty:
             # Create a copy specifically for this tab's display and rename the column
             df_overall_rank_display = df_summary_display.copy()
             if 'AutoBench' in df_overall_rank_display.columns:
                 df_overall_rank_display.rename(columns={'AutoBench': 'Rank'}, inplace=True)

             gr.DataFrame(
                df_overall_rank_display, # Pass the renamed DF
                # Adjust datatype length based on potentially fewer columns
                datatype=['str'] + ['number'] * (len(df_overall_rank_display.columns) - 1),
                interactive=True, # Allows sorting
                # height=600 # Adjust height as needed
             )
        else:
             gr.Markdown("_(Summary data failed to load or is empty. Please check `summary_data.csv`)_")

    # --- NEW Tab 1.5: Benchmark Comparison ---
    with gr.Tab("Benchmark Comparison"):
        gr.Markdown("## Benchmark Comparison")
        gr.Markdown("Comparison of AutoBench scores with other popular benchmarks. AutoBench features 82.51% correlation with Chatbot Arena, 83.74% with Artificial Analysis Intelligence Index, and 71.51% with MMLU. Models sorted by AutoBench score.")
        if not df_benchmark_display.empty:
            gr.DataFrame(
                df_benchmark_display,
                datatype=['str'] + ['number'] * (len(df_benchmark_display.columns) - 1),
                interactive=True # Allow sorting
            )
        else:
            gr.Markdown("_(Benchmark comparison data could not be prepared. Check `summary_data.csv` for 'Chatbot Ar.', 'AAI Index', 'MMLU Index' columns.)_")

    # --- Tab 2: Performance Plots ---
    with gr.Tab("Performance Plots"):
        gr.Markdown("## Performance Visualizations")
        gr.Markdown("Exploring relationships between AutoBench Rank, Latency, and Cost.")

        # Scatter Plot 1 (using summary data)
        gr.Markdown("### Rank vs. Average Cost") 
        if not df_summary.empty and 'AutoBench' in df_summary.columns and NEW_COST_COLUMN_SUMMARY in df_summary.columns:   
            # Filter out rows where essential plot data might be missing
            plot_df = df_summary.dropna(subset=['AutoBench', NEW_COST_COLUMN_SUMMARY, 'Model']).copy()
            plot_df[NEW_COST_COLUMN_SUMMARY] = pd.to_numeric(plot_df[NEW_COST_COLUMN_SUMMARY], errors='coerce')
            plot_df = plot_df.dropna(subset=[NEW_COST_COLUMN_SUMMARY]) # Drop if cost conversion failed

            if not plot_df.empty:
                fig_cost = px.scatter(
                    plot_df,
                    x=NEW_COST_COLUMN_SUMMARY,
                    y="AutoBench",
                    text="Model", # Show model name near point
                    log_x=True, # Use log scale for cost
                    title="AutoBench Rank vs. Average Cost per Response ($ Cents - Log Scale)",
                    labels={'AutoBench': 'AutoBench Rank', NEW_COST_COLUMN_SUMMARY: 'Avg Cost ($ Cents) - Log Scale'},
                    hover_data=['Model', 'AutoBench', NEW_COST_COLUMN_SUMMARY, 'Avg Answer Duration (sec)'] # Show details on hover
                )
                fig_cost.update_traces(textposition='top center')
                fig_cost.update_layout(
                    xaxis_title="Avg Cost ($ Cents) - Log Scale", # Keep bottom axis title
                    yaxis_title="AutoBench Rank",
                    width=1000, # Your existing width
                    height=800, # Your existing height (if you added it)
                    # --- ADD THE FOLLOWING ---
                    xaxis2=dict(
                        overlaying='x',     # Link to primary x-axis
                        matches='x',        # Explicitly match primary x-axis properties (like type='log')
                        side='top',         # Position on top
                        showticklabels=True,# Show the labels (numbers)
                        showline=True,      # Explicitly show the axis line itself
                        title=None          # No title for the top axis
                    )
                    # --- END OF ADDITION ---
                )                
                gr.Plot(fig_cost)
            else:
                gr.Markdown("_(Insufficient valid data for Rank vs Cost plot. Check 'AutoBench' and NEW_COST_COLUMN_SUMMARY columns in `summary_data.csv`)_")
        else:
             gr.Markdown("_(Summary data failed to load or essential columns missing for Rank vs Cost plot)_")

        # Plot 2: Rank vs Average Latency
        gr.Markdown("### Rank vs. Average Latency")
        if not df_summary.empty and 'AutoBench' in df_summary.columns and 'Avg Answer Duration (sec)' in df_summary.columns:
            # Filter out rows where essential plot data might be missing
            plot_df_avg_latency = df_summary.dropna(subset=['AutoBench', 'Avg Answer Duration (sec)', 'Model']).copy()
            plot_df_avg_latency['Avg Answer Duration (sec)'] = pd.to_numeric(plot_df_avg_latency['Avg Answer Duration (sec)'], errors='coerce')
            plot_df_avg_latency = plot_df_avg_latency.dropna(subset=['Avg Answer Duration (sec)']) # Drop if conversion failed

            if not plot_df_avg_latency.empty:
                fig_avg_latency = px.scatter(
                    plot_df_avg_latency,
                    x="Avg Answer Duration (sec)",
                    y="AutoBench",
                    text="Model",
                    log_x=True,  # Use log scale for latency - adjust if not desired
                    title="AutoBench Rank vs. Average Latency (Log Scale)",
                    labels={'AutoBench': 'AutoBench Rank', 'Avg Answer Duration (sec)': 'Avg Latency (s) - Log Scale'},
                    hover_data=['Model', 'AutoBench', 'Avg Answer Duration (sec)', NEW_COST_COLUMN_SUMMARY]
                )
                fig_avg_latency.update_traces(textposition='top center')
                fig_avg_latency.update_layout(xaxis_title="Avg Latency (s) - Log Scale", yaxis_title="AutoBench Rank", width=1000, height=800)
                gr.Plot(fig_avg_latency)
            else:
                gr.Markdown("_(Insufficient valid data for Rank vs Avg Latency plot. Check 'AutoBench' and 'Avg Answer Duration (sec)' columns in `summary_data.csv`)_")
        else:
             gr.Markdown("_(Summary data failed to load or essential columns missing for Rank vs Avg Latency plot)_")


        # Plot 3: Rank vs P99 Latency
        gr.Markdown("### Rank vs. P99 Latency")
        if not df_summary.empty and 'AutoBench' in df_summary.columns and 'P99 Answer Duration (sec)' in df_summary.columns:
            # Filter out rows where essential plot data might be missing
            plot_df_p99_latency = df_summary.dropna(subset=['AutoBench', 'P99 Answer Duration (sec)', 'Model']).copy()
            plot_df_p99_latency['P99 Answer Duration (sec)'] = pd.to_numeric(plot_df_p99_latency['P99 Answer Duration (sec)'], errors='coerce')
            plot_df_p99_latency = plot_df_p99_latency.dropna(subset=['P99 Answer Duration (sec)']) # Drop if conversion failed

            if not plot_df_p99_latency.empty:
                fig_p99_latency = px.scatter(
                    plot_df_p99_latency,
                    x="P99 Answer Duration (sec)",
                    y="AutoBench",
                    text="Model",
                    log_x=True,  # Use log scale for latency - adjust if not desired
                    title="AutoBench Rank vs. P99 Latency (Log Scale)",
                    labels={'AutoBench': 'AutoBench Rank', 'P99 Answer Duration (sec)': 'P99 Latency (s) - Log Scale'},
                    hover_data=['Model', 'AutoBench', 'P99 Answer Duration (sec)', 'Avg Answer Duration (sec)', NEW_COST_COLUMN_SUMMARY]
                )
                fig_p99_latency.update_traces(textposition='top center')
                fig_p99_latency.update_layout(xaxis_title="P99 Latency (s) - Log Scale", yaxis_title="AutoBench Rank", width=1000, height=800)
                gr.Plot(fig_p99_latency)
            else:
                gr.Markdown("_(Insufficient valid data for Rank vs P99 Latency plot. Check 'AutoBench' and 'P99 Answer Duration (sec)' columns in `summary_data.csv`)_")
        else:
             gr.Markdown("_(Summary data failed to load or essential columns missing for Rank vs P99 Latency plot)_")

    # --- Tab 3: Cost & Latency Analysis ---
    with gr.Tab("Cost & Latency Analysis"):
        gr.Markdown("## Performance vs. Cost/Latency Trade-offs")

        # Cost Breakdown Table
        gr.Markdown("### Cost Breakdown per Domain ($ Cents/Response)") # <-- MODIFIED
        if not df_cost.empty:
            # Make model name the first column if it exists
            if 'model_name' in df_cost.columns:
                 cols = ['model_name'] + [col for col in df_cost.columns if col != 'model_name']
                 df_cost_display = df_cost[cols]
            else:
                 df_cost_display = df_cost # Use as is if 'model_name' isn't found
            gr.DataFrame(df_cost_display, interactive=True)
        else:
             gr.Markdown("_(Cost breakdown data failed to load or is empty. Please check `cost_data.csv`)_")

        # Latency Breakdown Tables
        gr.Markdown("### Average Latency Breakdown per Domain (Seconds)")
        if not df_avg_latency.empty:
            if 'model_name' in df_avg_latency.columns:
                 cols = ['model_name'] + [col for col in df_avg_latency.columns if col != 'model_name']
                 df_avg_latency_display = df_avg_latency[cols]
            else:
                 df_avg_latency_display = df_avg_latency
            gr.DataFrame(df_avg_latency_display, interactive=True)
        else:
             gr.Markdown("_(Average latency data failed to load or is empty. Please check `avg_latency.csv`)_")

        gr.Markdown("### P99 Latency Breakdown per Domain (Seconds)")
        if not df_p99_latency.empty:
            if 'model_name' in df_p99_latency.columns:
                 cols = ['model_name'] + [col for col in df_p99_latency.columns if col != 'model_name']
                 df_p99_latency_display = df_p99_latency[cols]
            else:
                 df_p99_latency_display = df_p99_latency
            gr.DataFrame(df_p99_latency_display, interactive=True)
        else:
            gr.Markdown("_(P99 latency data failed to load or is empty. Please check `p99_latency.csv`)_")


    # --- Tab 4: Domain Performance ---
    with gr.Tab("Domain Performance"):
        gr.Markdown("## Performance Across Different Domains")
        gr.Markdown("Model ranks within specific knowledge or task areas. Higher is better.")
        if not df_domain.empty:
            if 'Model Name' in df_domain.columns:
                 # Attempt to make Model Name first col
                 cols = ['Model Name'] + [col for col in df_domain.columns if col != 'Model Name']
                 df_domain_display = df_domain[cols]
            else:
                 df_domain_display = df_domain # Use as is
            gr.DataFrame(df_domain_display, interactive=True)
        else:
            gr.Markdown("_(Domain ranks data failed to load or is empty. Please check `domain_ranks.csv`)_")

    # --- Tab 5: About ---
    with gr.Tab("About AutoBench"):
        gr.Markdown("""
        ## About AutoBench

        AutoBench is an LLM benchmark where Large Language Models (LLMs) evaluate and rank the responses generated by other LLMs. The questions themselves are also generated by LLMs across a diverse set of domains and ranked for quality.

        ### Methodology
        1.  **Question Generation:** High-quality questions across various domains (Coding, History, Science, etc.) are generated by selected LLMs.
        2.  **Response Generation:** The models being benchmarked generate answers to these questions.
        3.  **Ranking:** Ranking LLMs rank the responses from different models for each question, on a 1-5 scale.
        4.  **Aggregation:** Scores are averaged across multiple questions and domains to produce the final AutoBench rank. 

        ### Metrics
        * **AutoBench Score (AB):** The average rank received by a model's responses across all questions/domains (higher is better).
        * **Avg Cost (USD Cents/response):** Estimated average cost to generate one response based on model provider pricing (input+output tokens). Lower is better.
        * **Avg Latency (s):** Average time taken by the model to generate a response. Lower is better.
        * **P99 Latency (s):** The 99th percentile of response time, indicating worst-case latency. Lower is better.
        * **Chatbot Arena / Artificial Analysis Intelligence Index / MMLU:** Scores from other well-known benchmarks for comparison (where available).

        ### Data
        This leaderboard reflects a run completed on April 23, 2025. Models included recently released models such as o4-mini, Gpt-4.1-mini, Gemini 2.5 Pro Preview, Claude 3.7 Sonnet:thikning, etc..

        ### Links
        * [AutoBench Run 2 Results](https://huggingface.co/blog/PeterKruger/autobench-2nd-run)
        * [AutoBench Blog Post](https://huggingface.co/blog/PeterKruger/autobench)
        * [Autobench Repositories](https://huggingface.co/AutoBench)

        **Disclaimer:** Benchmark results provide one perspective on model capabilities. Performance can vary based on specific tasks, prompts, and API conditions. Costs are estimates and subject to change by providers. Latency depends on server load and geographic location.
        """)

# --- Launch the App ---
print("Launching Gradio app...")
app.launch()
print("Gradio app launched.")