import dash
from dash import html, dcc, Input, Output, State, no_update
import dash_ag_grid as dag
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
import base64
import os
import logging
import sys
import json
# This setup works with the PYTHONUNBUFFERED=1 environment variable.
logging.basicConfig(
    level=logging.INFO,
    format="%(asctime)s [%(levelname)s] %(message)s",
    handlers=[
        logging.StreamHandler(sys.stdout)
    ]
)
# Helper function to create a checklist option
def create_option(value, label):
    return {'label': label, 'value': value}
# Define groups of columns that will be toggled together
COLUMN_GROUPS = {
    "uncensored_ugi_cats": ["Hazardous", "Entertainment", "SocPol"],
    "w10_sub_scores": ["W/10-Direct", "W/10-Adherence"],
    "natint_sub_scores": ["Standard", "Pop Culture", "World Model"],
    "writing_repetition_group": [
        "lexical_stuckness", "originality_score", "internal_semantic_redundancy"
    ],
    "writing_style_group": [
        "Readability_Grade_Level", "Verb_to_Noun_Ratio", "Adjective_Adverb_Percentage", "Dialogue_Percentage"
    ],
    "nsfw_dark_group": ["avg_nsfw_score", "avg_dark_score"],
    "length_adherence_group": ["avg_length_error_pct", "creative_writing_wc_exceeded_pct"],
    "politics_agg_group": ["govt", "dipl", "econ", "scty"],
    "politics_axes_group": {
        'Federal-Unitary': 110,
        'Democratic-Autocratic': 130,
        'Security-Freedom': 125,
        'Nationalism-Internationalism': 170,
        'Militarist-Pacifist': 125,
        'Assimilationist-Multiculturalist': 160,
        'Collectivize-Privatize': 140,
        'Planned-LaissezFaire': 145,
        'Isolationism-Globalism': 145,
        'Irreligious-Religious': 135,
        'Progressive-Traditional': 145,
        'Acceleration-Bioconservative': 175
    },
    "world_model_group": [
        'wm_recipe_percent_error', 'wm_geoguesser_mae', 'wm_weight_percent_error',
        'wm_music_mae', 'Show Rec Score',
        "Show Rec MAE", "Show Rec Correlation", "Show Rec Std Dev Error"
    ],
}
# Define the columns for each preset, using group keys for grouped columns
PRESET_COLUMNS = {
    "Overview": {
        "UGI 🏆": "UGI 🏆", "W/10 👍": "W/10 👍", "NatInt 💡": "NatInt 💡", "Writing ✍️": "Writing ✍️",
        "Political Lean 📋": "Political Lean 📋"
    },
    "Uncensored": {
        "UGI 🏆": "UGI 🏆",
        "uncensored_ugi_cats": "UGI Categories",
        "W/10 👍": "W/10 👍",
        "w10_sub_scores": "W/10 Categories"
    },
    "Intelligence": {
        "NatInt 💡": "NatInt 💡", "natint_sub_scores": "NatInt Categories",
        "world_model_group": "World Model Tests",
    },
    "Writing": {
        "Writing ✍️": "Writing ✍️",
        "nsfw_dark_group": "NSFW / Dark Lean",
        "writing_style_group": "Stylistic Metrics",
        "writing_repetition_group": "Repetition Metrics",
        "length_adherence_group": "Length Adherence",
        "avg_writing_style_score": "Style Adherence",
    },
    "Politics": {
        "Political Lean 📋": "Political Lean 📋", "12axes Ideology": "Ideology",
        "politics_agg_group": "Aggregate Scores",
        "politics_axes_group": "12 Axes Scores"
    }
}
# Create the checklist options from the preset definitions
PRESET_OPTIONS = {
    preset: [create_option(col, label) for col, label in cols.items()]
    for preset, cols in PRESET_COLUMNS.items()
}
# Define other toggleable columns that are not part of presets
OTHER_TOGGLES = {
    "Prompt Template": "Template",
    "Architecture": "Architecture",
    "Avg Thinking Chars": "Avg Thinking Chars"
}
def load_leaderboard_data(csv_file_path):
    try:
        # Load the CSV without special boolean handling first
        df = pd.read_csv(csv_file_path, na_values=['NA'])
        # Defensive: remove any leading/trailing whitespace from headers
        df.columns = df.columns.str.strip()
        if 'Is Thinking Model' in df.columns:
            df['Is Thinking Model'] = df['Is Thinking Model'].astype(str).fillna('FALSE').str.strip().str.upper() == 'TRUE'
        else:
            df['Is Thinking Model'] = False
        # Add type sort value
        def get_type_sort_value(row):
            if pd.isna(row['Total Parameters']):
                return 3  # P (Proprietary)
            if row['Is Foundation'] and not row['Is Merged']:
                return 0  # B (Base)
            if row['Is Merged']:
                return 2  # M (Merge)
            if row['Is Finetuned'] and not row['Is Merged']:
                return 1  # F (Finetune)
            return 4 # Unknown
        df['model_type_sort'] = df.apply(get_type_sort_value, axis=1)
        df['type'] = df['model_type_sort']
        # Convert date columns to datetime
        for col in ['Release Date', 'Test Date']:
            df[col] = pd.to_datetime(df[col], format='%m/%d/%Y', errors='coerce')
        # Store original release date for sorting
        df['Release_Date_Sort'] = df['Release Date']
        # Format dates as strings for display
        df['Release Date'] = df['Release Date'].dt.strftime('%Y-%m-%d')
        df['Test Date'] = df['Test Date'].dt.strftime('%Y-%m-%d')
        # Calculate the date for the 'new' emoji
        two_weeks_ago = (datetime.now() - timedelta(days=2)).strftime('%Y-%m-%d') # temp set to 2
        df['is_new'] = df.apply(
            lambda row: '🆕' if pd.notna(row["Test Date"]) and row["Test Date"] >= two_weeks_ago else '',
            axis=1
        )
        # Store model name and link separately
        df['Model_Link'] = df['Model Link'].fillna('')
        df['Model_Display'] = df['author/model_name']
        # Add pinned and selected columns
        df['pinned'] = False
        df['selected'] = False
        # Flatten the list of political columns, expanding group keys into their actual column names
        politics_keys = list(PRESET_COLUMNS['Politics'].keys())
        all_politics_individual_cols = []
        for key in politics_keys:
            if key in COLUMN_GROUPS:
                all_politics_individual_cols.extend(COLUMN_GROUPS[key])
            else:
                all_politics_individual_cols.append(key)
        # Now, process only the real columns that are percentages
        percentage_columns = [col for col in all_politics_individual_cols if col != '12axes Ideology']
        for col in percentage_columns:
            if col in df.columns: # Check if the column exists before processing
                df[col] = pd.to_numeric(df[col].astype(str).str.rstrip('%'), errors='coerce')
        # Replace NaN with large/small numbers for sorting, which serialize reliably to JSON
        # Higher is better -> fill with a very small number so they sort last when descending
        df['Show Rec Score'].fillna(-99999, inplace=True)
        df['Show Rec Correlation'].fillna(-99999, inplace=True)
        # Lower is better -> fill with a very large number so they sort last when ascending
        df['Show Rec MAE'].fillna(99999, inplace=True)
        df['Show Rec Std Dev Error'].fillna(99999, inplace=True)
        # Sort with multiple keys
        df = df.sort_values(
            by=['UGI 🏆', 'NatInt 💡', 'Release_Date_Sort'],
            ascending=[False, False, True]
        )
        return df
    except Exception as e:
        print(f"Error loading CSV file: {e}")
        # Print the full traceback to help debug future issues
        import traceback
        traceback.print_exc()
        return pd.DataFrame()
def load_ideology_descriptions():
    try:
        with open('ideologies.js', 'r', encoding='utf-8') as file:
            content = file.read()
            # Extract the array content between brackets
            start_idx = content.find('[')
            end_idx = content.rfind(']') + 1
            if start_idx == -1 or end_idx == 0:
                return {}
                
            ideology_data = content[start_idx:end_idx]
            # Convert JavaScript object syntax to Python
            ideology_data = ideology_data.replace('true', 'True').replace('false', 'False')
            ideology_data = eval(ideology_data)
            
            # Create a dictionary mapping ideology names to their descriptions
            return {item['name']: item['desc'] for item in ideology_data}
    except Exception as e:
        print(f"Error loading ideologies.js: {e}")
        return {}
# Load descriptions once at startup
IDEOLOGY_DESCRIPTIONS = load_ideology_descriptions()
def get_kofi_button_base64():
    current_dir = os.path.dirname(os.path.realpath(__file__))
    
    # Return both light and dark theme images as a dictionary
    images = {}
    for theme in ['light', 'dark']:
        filename = 'support_me_on_kofi_white.png' if theme == 'light' else 'support_me_on_kofi_dark.png'
        with open(os.path.join(current_dir, f"Images/{filename}"), "rb") as image_file:
            images[theme] = base64.b64encode(image_file.read()).decode('utf-8')
    return images
# Initialize the Dash app
app = dash.Dash(__name__, external_stylesheets=[
    "https://use.fontawesome.com/releases/v5.15.4/css/all.css"
])
server = app.server
# Custom CSS
app.index_string = '''
    
        {%metas%}
        UGI Leaderboard 
        {%favicon%}
        {%css%}
        
    
    
        {%app_entry%}
        
            {%config%}
            {%scripts%}
            {%renderer%}
         
    
'''
# Load data
df = load_leaderboard_data("ugi-leaderboard-data.csv")
def create_numeric_column(field, width=125, **kwargs):
    base_classes = "ag-left-aligned-cell"
    custom_class = kwargs.get("cellClass", "")
    if isinstance(custom_class, list):
        custom_class = " ".join(custom_class)
    final_cell_class = f"{base_classes} {custom_class}".strip()
    incoming_filter_params = kwargs.pop('filterParams', {})
    column = {
        "field": field,
        "width": width,
        "filter": "agNumberColumnFilter",
        "filterParams": {
            "defaultOption": "inRange",
            "filterOptions": ['equals', 'notEqual', 'greaterThan', 'greaterThanOrEqual', 'lessThan', 'lessThanOrEqual', 'inRange']
        },
        "valueFormatter": {"function": "params.value == null ? '' : String(params.value)"},
        "headerClass": "ag-left-aligned-header wrap-text",
        "cellClass": final_cell_class,
        "wrapHeaderText": True,
        "autoHeaderHeight": True,
        "suppressSizeToFit": True,
        "sortingOrder": ['desc', 'asc'],
    }
    column['filterParams'].update(incoming_filter_params)
    column.update(kwargs)
    return column
def create_text_column(field, width=120, **kwargs):
    base_classes = "ag-left-aligned-cell"
    custom_class = kwargs.get("cellClass", "")
    if isinstance(custom_class, list):
        custom_class = " ".join(custom_class)
    final_cell_class = f"{base_classes} {custom_class}".strip()
    incoming_filter_params = kwargs.pop('filterParams', {})
    column = {
        "field": field,
        "width": width,
        "filter": "agTextColumnFilter",
        "filterParams": {
            "defaultOption": "contains",
            "filterOptions": ['contains', 'notContains', 'startsWith', 'endsWith']
        },
        "headerClass": "ag-left-aligned-header wrap-text",
        "cellClass": final_cell_class,
        "wrapHeaderText": True,
        "autoHeaderHeight": True,
    }
    column['filterParams'].update(incoming_filter_params)
    column.update(kwargs)
    return column
    
template_with_split_header = """
    
    
"""
template_with_optimal_value = """
    
    
"""
# This master list defines the final, non-negotiable order of columns in the grid.
MASTER_COLUMN_ORDER = [
    "pinned", "is_new", "R", "Avg Thinking Chars", "#P", "type", "Model_Display",
    # Other Toggles
    "Prompt Template", "Architecture",
    # Uncensored
    "UGI 🏆", "Hazardous", "Entertainment", "SocPol",
    "W/10 👍", "W/10-Direct", "W/10-Adherence",
    # Intelligence
    "NatInt 💡",
    "Standard", "Pop Culture", "World Model",
    'wm_recipe_percent_error', 'wm_geoguesser_mae', 'wm_weight_percent_error',
    'wm_music_mae',
    "Show Rec Score", # Main Score
    "Show Rec MAE", "Show Rec Correlation", "Show Rec Std Dev Error",
    # Writing
    "Writing ✍️",
    "avg_nsfw_score", "avg_dark_score",
    "Readability_Grade_Level", "Verb_to_Noun_Ratio", "Adjective_Adverb_Percentage", "Dialogue_Percentage",
    "lexical_stuckness", "originality_score", "internal_semantic_redundancy",
    "avg_length_error_pct", "creative_writing_wc_exceeded_pct",
    "avg_writing_style_score",
    # Politics
    "Political Lean 📋",
    "12axes Ideology", "govt", "dipl", "econ", "scty",
    'Federal-Unitary', 'Democratic-Autocratic', 'Security-Freedom', 'Nationalism-Internationalism',
    'Militarist-Pacifist', 'Assimilationist-Multiculturalist', 'Collectivize-Privatize',
    'Planned-LaissezFaire', 'Isolationism-Globalism', 'Irreligious-Religious',
    'Progressive-Traditional', 'Acceleration-Bioconservative',
    # Always at the end
    "Release Date", "Test Date"
]
# Master dictionary containing definitions for ALL possible columns
ALL_COLUMN_DEFS = {
    # --- Always Visible ---
    "pinned": {"headerName": "📌", "field": "pinned", "width": 40, "minWidth": 40, "filter": False, "suppressMenu": True, "cellRenderer": "PinRenderer", "suppressSizeToFit": True, "headerClass": "center-aligned-header"},
    "is_new": {"headerName": "", "field": "is_new", "width": 30, "minWidth": 30, "filter": False, "suppressMenu": True, "suppressSizeToFit": True},
    "R": {"headerName": "R", "field": "Is Thinking Model", "cellRenderer": "ReasoningRenderer", "width": 34, "minWidth": 34, "filter": False, "suppressMenu": True, "sortable": True, "suppressSizeToFit": True, "headerClass": "center-aligned-header"},
    "#P": {"field": "#P", "width": 105, "filter": "agNumberColumnFilter", "filterParams": {"defaultOption": "equals"}, "headerClass": "ag-left-aligned-header wrap-text", "cellClass": "ag-right-aligned-cell", "wrapHeaderText": True, "autoHeaderHeight": True, "suppressSizeToFit": True, "sortingOrder": ['desc', 'asc']},
    "type": {"headerName": "T", "field": "type", "width": 32, "minWidth": 32, "filter": False, "suppressMenu": True, "cellRenderer": "TypeRenderer", "sortable": True, "sortingOrder": ['asc', 'desc'], "suppressSizeToFit": True, "headerClass": "center-aligned-header"},
    "Model_Display": {"field": "Model_Display", "headerName": "Model", "cellRenderer": "ModelLink", "filter": "agTextColumnFilter", "filterParams": {"defaultOption": "contains"}, "width": 395, "suppressMenu": False, "headerClass": "ag-left-aligned-header wrap-text", "wrapHeaderText": True, "autoHeaderHeight": True},
    "Release Date": {"field": "Release Date", "width": 105, "filter": "agDateColumnFilter", "filterParams": {"browserDatePicker": True, "inRangeInclusive": True, "defaultOption": "greaterThan"}, "cellClass": ["ag-left-aligned-cell", "border-left"], "headerClass": "ag-left-aligned-header wrap-text", "wrapHeaderText": True, "autoHeaderHeight": True, "sortable": True},
    "Test Date": {"field": "Test Date", "width": 105, "filter": "agDateColumnFilter", "filterParams": {"browserDatePicker": True, "inRangeInclusive": True, "defaultOption": "greaterThan"}, "cellClass": "ag-left-aligned-cell", "headerClass": "ag-left-aligned-header wrap-text", "wrapHeaderText": True, "autoHeaderHeight": True, "sortable": True},
    # --- Main Scores (Overview Columns) ---
    "UGI 🏆": create_numeric_column("UGI 🏆", headerName="UGI 🏆", width=120, filterParams={"defaultOption": "greaterThanOrEqual"}),
    "W/10 👍": create_numeric_column("W/10 👍", headerName="W/10 👍", width=116, filterParams={"defaultOption": "greaterThanOrEqual"}),
    "NatInt 💡": create_numeric_column("NatInt 💡", headerName="NatInt 💡", width=140, filterParams={"defaultOption": "greaterThanOrEqual"}),
    "Writing ✍️": create_numeric_column("Writing ✍️", headerName="Writing ✍️", width=135, filterParams={"defaultOption": "greaterThanOrEqual"}),
    "Political Lean 📋": create_numeric_column("Political Lean 📋", headerName="Political Lean 📋", width=135, valueFormatter={"function": "params.value == null ? '' : params.value.toFixed(1) + '%'"}, filterParams={"defaultOption": "inRange"}),
    # --- UGI Categories ---
    "Hazardous": create_numeric_column("Hazardous", width=120, filterParams={"defaultOption": "greaterThanOrEqual"}),
    "Entertainment": create_numeric_column("Entertainment", width=122, filterParams={"defaultOption": "greaterThanOrEqual"}),
    "SocPol": create_numeric_column("SocPol", width=120, filterParams={"defaultOption": "greaterThanOrEqual"}),
    # --- W/10 Types ---
    "W/10-Direct": create_numeric_column("W/10-Direct", width=110, filterParams={"defaultOption": "greaterThanOrEqual"}),
    "W/10-Adherence": create_numeric_column("W/10-Adherence", width=120, filterParams={"defaultOption": "greaterThanOrEqual"}),
    # --- NatInt Categories ---
    "Standard": create_numeric_column("Standard", width=120, cellClass="border-left", filterParams={"defaultOption": "greaterThanOrEqual"}),
    "Pop Culture": create_numeric_column("Pop Culture", width=120, filterParams={"defaultOption": "greaterThanOrEqual"}),
    "World Model": create_numeric_column("World Model", width=120, filterParams={"defaultOption": "greaterThanOrEqual"}),
    'wm_recipe_percent_error': create_numeric_column('wm_recipe_percent_error', headerName="Cooking (% Error)", width=120, cellClass="border-left", filterParams={"defaultOption": "lessThanOrEqual"}, sortingOrder=['asc', 'desc']),
    'wm_geoguesser_mae': create_numeric_column('wm_geoguesser_mae', headerName="GeoGuesser (km Error)", width=128, filterParams={"defaultOption": "lessThanOrEqual"}, sortingOrder=['asc', 'desc']),
    'wm_weight_percent_error': create_numeric_column('wm_weight_percent_error', headerName="Weight (% Error)", width=120, filterParams={"defaultOption": "lessThanOrEqual"}, sortingOrder=['asc', 'desc']),
    'wm_music_mae': create_numeric_column('wm_music_mae', headerName="Music (Error)", width=120, filterParams={"defaultOption": "lessThanOrEqual"}, sortingOrder=['asc', 'desc']),
    "Show Rec Score": create_numeric_column(
        "Show Rec Score", 
        headerName="Show Rec Score", 
        width=120, 
        filterParams={"defaultOption": "greaterThanOrEqual"},
        valueFormatter={"function": "params.value === -99999 ? '' : String(params.value)"}
    ),
    "Show Rec MAE": create_numeric_column(
        "Show Rec MAE", 
        headerName="Show Rec MAE", 
        width=120, 
        filterParams={"defaultOption": "lessThanOrEqual"}, 
        sortingOrder=['asc', 'desc'],
        valueFormatter={"function": "params.value === 99999 ? '' : String(params.value)"},
        cellClass="border-left-dashed"
    ),
    "Show Rec Correlation": create_numeric_column(
        "Show Rec Correlation",  
        headerName="Show Rec Correlation", 
        width=125, 
        filterParams={"defaultOption": "greaterThanOrEqual"},
        # Add this formatter to hide the placeholder
        valueFormatter={"function": "params.value === -99999 ? '' : String(params.value)"}
    ),
    "Show Rec Std Dev Error": create_numeric_column(
        "Show Rec Std Dev Error", 
        headerName="Show Rec Std Dev Error", 
        width=120, 
        filterParams={"defaultOption": "lessThanOrEqual"}, 
        sortingOrder=['asc', 'desc'],
        # Add this formatter to hide the placeholder
        valueFormatter={"function": "params.value === 99999 ? '' : String(params.value)"}
    ),
    
    # --- Writing Categories ---
    "avg_nsfw_score": create_numeric_column("avg_nsfw_score", headerComponentParams={"template": template_with_split_header.format(high='NSFW', low='SFW')}, width=105, cellClass="border-left", filterParams={"defaultOption": "greaterThanOrEqual"}),
    "avg_dark_score": create_numeric_column("avg_dark_score", headerComponentParams={"template": template_with_split_header.format(high='Dark', low='Tame')}, width=105, filterParams={"defaultOption": "greaterThanOrEqual"}),
    "Dialogue_Percentage": create_numeric_column("Dialogue_Percentage", headerName="Dialogue %", width=110, filterParams={"defaultOption": "greaterThanOrEqual"}),
    "Verb_to_Noun_Ratio": create_numeric_column("Verb_to_Noun_Ratio", headerName="Verb/Noun Ratio", width=123, filterParams={"defaultOption": "inRange"}),
    "Adjective_Adverb_Percentage": create_numeric_column("Adjective_Adverb_Percentage", headerName="Adj&Adv %", width=115, filterParams={"defaultOption": "inRange"}),
    "Readability_Grade_Level": create_numeric_column("Readability_Grade_Level", headerName="Readability Grade", width=124, cellClass="border-left", filterParams={"defaultOption": "inRange"}, sortingOrder=['desc', 'asc']),
    "avg_writing_style_score": create_numeric_column("avg_writing_style_score", headerName="Style Adherence", width=121, cellClass="border-left", filterParams={"defaultOption": "greaterThanOrEqual"}),
    "avg_length_error_pct": create_numeric_column("avg_length_error_pct", headerName="Length Error %", width=113, cellClass="border-left", filterParams={"defaultOption": "lessThanOrEqual"}, sortingOrder=['asc', 'desc']),
    "creative_writing_wc_exceeded_pct": create_numeric_column("creative_writing_wc_exceeded_pct", headerName="Exceeded %", width=118, filterParams={"defaultOption": "inRange"}),
    "originality_score": create_numeric_column("originality_score", headerName="Originality", width=120, filterParams={"defaultOption": "greaterThanOrEqual"}),
    "internal_semantic_redundancy": create_numeric_column("internal_semantic_redundancy", headerName="Semantic Redundancy", width=125, filterParams={"defaultOption": "lessThanOrEqual"}, sortingOrder=['asc', 'desc']),
    "lexical_stuckness": create_numeric_column("lexical_stuckness", headerName="Lexical Stuckness", width=118, cellClass="border-left", filterParams={"defaultOption": "lessThanOrEqual"}, sortingOrder=['asc', 'desc']),
    
    # --- Politics ---
    "12axes Ideology": create_text_column("12axes Ideology", width=170, cellClass="border-left", filterParams={"defaultOption": "contains"}),
    "govt": create_numeric_column("govt", width=105, valueFormatter={"function": "params.value == null ? '' : params.value.toFixed(1) + '%'"}, cellClass="border-left", filterParams={"defaultOption": "inRange"}),
    "dipl": create_numeric_column("dipl", width=105, valueFormatter={"function": "params.value == null ? '' : params.value.toFixed(1) + '%'"}, filterParams={"defaultOption": "inRange"}),
    "econ": create_numeric_column("econ", width=105, valueFormatter={"function": "params.value == null ? '' : params.value.toFixed(1) + '%'"}, filterParams={"defaultOption": "inRange"}),
    "scty": create_numeric_column("scty", width=105, valueFormatter={"function": "params.value == null ? '' : params.value.toFixed(1) + '%'"}, filterParams={"defaultOption": "inRange"}),
    **{
        col: create_numeric_column(
            col,
            headerComponentParams={"template": template_with_split_header.format(high=col.split('-')[0], low=col.split('-')[1])},
            width=width,  # Use the width from the dictionary
            valueFormatter={"function": "params.value == null ? '' : params.value.toFixed(1) + '%'"},
            cellClass="border-left" if i == 0 else "",
            filterParams={"defaultOption": "inRange"}
        ) for i, (col, width) in enumerate(COLUMN_GROUPS["politics_axes_group"].items())
    },
    # --- Other Toggles ---
    "Prompt Template": create_text_column("Prompt Template", width=160, filterParams={"defaultOption": "contains"}),
    "Architecture": create_text_column("Architecture", width=160, filterParams={"defaultOption": "contains"}),
    "Avg Thinking Chars": create_numeric_column("Avg Thinking Chars", width=120, filterParams={"defaultOption": "greaterThanOrEqual"}, valueFormatter={"function": "params.value === 0 ? '' : params.value"}),
}
# Define the grid options with postSort
dashGridOptions = {
    "animateRows": True,
    "pagination": False,
    "enableCellTextSelection": True,
    "ensureDomOrder": True,
    "suppressRowClickSelection": True,
    "suppressCellFocus": True,
    "getRowId": "params => params.data.Model_Display",
    "pinnedTopRowData": [],
    "suppressMaintainUnsortedOrder": True,
    "suppressMultiSort": True,
    # "maintainColumnOrder": True,
    "rowBuffer": 10,
    "maxBlocksInCache": 2,
    "icons": {
        "menu": ' '
    },
    "theme": "ag-theme-alpine-dark" if "prefers-color-scheme: dark" else "ag-theme-alpine"
}
def get_initial_column_defs():
    """Generates the column definitions for the initial page load."""
    visible_cols = {"pinned", "is_new", "R", "#P", "type", "Model_Display", "Release Date", "Test Date"}
    visible_cols.update(PRESET_COLUMNS['Overview'].keys())
    
    primary_sort_col = "UGI 🏆"
    pinned_cols = ["pinned", "is_new", "R", "Avg Thinking Chars", "#P", "type", "Model_Display"]
    initial_defs = []
    for col_name in MASTER_COLUMN_ORDER:
        if col_name not in ALL_COLUMN_DEFS:
            continue
        
        # --- START OF MODIFICATION ---
        if col_name == "Writing ✍️":
            # Manually create the definition for our test column
            col_def = {
                "field": "Writing ✍️",
                "headerName": "Writing ✍️",
                "width": 135
            }
        else:
            # Use the existing logic for all other columns
            col_def = ALL_COLUMN_DEFS[col_name].copy()
        # --- END OF MODIFICATION ---
        col_def['hide'] = col_name not in visible_cols
        col_def['pinned'] = 'left' if col_name in pinned_cols else None
        
        if col_def.get('field') == primary_sort_col:
            col_def['sort'] = 'desc'
            col_def['sortIndex'] = 0
        
        initial_defs.append(col_def)
    border_cols = {"UGI 🏆", "NatInt 💡", "Writing ✍️", "Political Lean 📋"}
    for col_def in initial_defs:
        if col_def.get('field') in border_cols:
            current_class = col_def.get('cellClass', '')
            if 'border-left' not in current_class:
                col_def['cellClass'] = f"{current_class} border-left".strip()
                
    return initial_defs
# Define the layout
app.layout = html.Div([
    dcc.Location(id='url', refresh=False),
    dcc.Store(id='pinned-models-store', data=[]),
    
    # Header
    html.Div([
        html.Div([
            html.A("Contact/Model Requests", href="mailto:ugi.leaderboard@gmail.com", className="model-link"),
            html.Span(" (or create a HF discussion)")
        ], style={'float': 'left'}),
        html.Div([
            html.A(
                html.Img(
                    src=f"data:image/png;base64,{get_kofi_button_base64()['light']}",
                    style={'width': '165px'},
                    className='kofi-light'
                ),
                href="https://ko-fi.com/dontplantoend",
                target="_blank"
            ),
            html.A(
                html.Img(
                    src=f"data:image/png;base64,{get_kofi_button_base64()['dark']}",
                    style={'width': '165px'},
                    className='kofi-dark'
                ),
                href="https://ko-fi.com/dontplantoend",
                target="_blank"
            )
        ], style={'float': 'right'})
    ], style={'overflow': 'hidden', 'marginBottom': '20px', 'padding': '0 20px'}),
    html.H1("📢 UGI Leaderboard", className="page-title", style={'fontSize': '38px'}),
    html.H2("Uncensored General Intelligence", className="page-subtitle"),
    html.Div([
        "To filter columns, click the ", html.I(className="fas fa-search"), " icon. On mobile, hold the column name for the menu to appear."
    ], style={'marginTop': '40px', 'marginBottom': '20px', 'padding': '0 20px'}),
    # --- TOP FILTER SECTION ---
    html.Div([
        # Left side: Model Type
        html.Div([
            # The label is now a direct child, so it will appear on its own line above the checklists.
            html.Label("Display Models:", className="model-type-filter"),
            
            # A new sub-container for the interactive elements, using flexbox for horizontal alignment.
            html.Div(
                [
                    # Checklist for the main types
                    dcc.Checklist(
                        id='model-type-filter-main',
                        options=[
                            {'label': html.Span('Base', style={'color': '#71de5f'}), 'value': 'Is Foundation'},
                            {'label': html.Span('Finetune', style={'color': '#f6b10b'}), 'value': 'Is Finetuned'},
                            {'label': html.Span('Merge', style={'color': '#f08aff'}), 'value': 'Is Merged'},
                            {'label': html.Span('Proprietary', style={'color': '#19cdce'}), 'value': 'proprietary'},
                        ],
                        value=['Is Foundation', 'Is Finetuned', 'Is Merged', 'proprietary'],
                        inline=True,
                        labelStyle={'fontWeight': 'normal', 'marginRight': '15px'}
                    ),
                    
                    # The visual divider with adjusted margins for balanced spacing.
                    # It has less left margin to compensate for the right margin of "Proprietary".
                    html.Span('|', style={
                        'marginLeft': '-5px', 
                        'marginRight': '10px', 
                        'color': 'var(--secondary-text)'
                    }),
                    
                    # Checklist for the reasoning type
                    dcc.Checklist(
                        id='model-type-filter-reasoning',
                        options=[
                            {'label': html.Span('Reasoning'), 'value': 'Is Thinking Model'}
                        ],
                        value=['Is Thinking Model'],
                        inline=True,
                    ),
                ],
                style={'display': 'flex', 'alignItems': 'center'} # Flexbox applies only to this line
            )
        ]),
        # Right side: Other Options
        html.Div([
            html.Label("Other Options:", className="model-type-filter"),
            dcc.Checklist(
                id='other-toggles-checklist',
                options=[{'label': label, 'value': col} for col, label in OTHER_TOGGLES.items()] +
                        [{'label': 'NA Models', 'value': 'show_na'}],
                value=[],
                inline=True,
                labelStyle={'fontWeight': 'normal', 'marginRight': '15px'}
            )
        ], style={'textAlign': 'left'}), # Corrected alignment
    ], style={'display': 'flex', 'flexWrap': 'wrap', 'justifyContent': 'space-between', 'alignItems': 'center', 'padding': '0 20px', 'marginBottom': '20px'}),
    # --- HORIZONTAL CONTAINER FOR PRESETS AND CHECKLISTS ---
    html.Div(
        [
            # Create a vertical block for each preset
            html.Div(
                [
                    dcc.RadioItems(
                        id=f'{preset.lower()}-selector',
                        className='preset-selector',
                        options=[{'label': preset, 'value': preset}],
                        value='Overview' if preset == 'Overview' else None,
                        inputStyle={"marginRight": "8px"}
                    ),
                    dcc.Checklist(
                        id=f'{preset.lower()}-checklist',
                        className='preset-checklist',
                        options=PRESET_OPTIONS[preset],
                        value=[],
                        labelStyle={'display': 'block', 'marginBottom': '8px', 'fontWeight': 'normal'}
                    ) if preset != "Overview" else None
                ],
                className='preset-column',
                id=f'{preset.lower()}-preset-div'
            )
            for preset in PRESET_COLUMNS.keys()
        ],
        className='preset-container',
        style={'padding': '0 20px', 'marginBottom': '20px'}
    ),
    # Grid
    html.Div([
        dag.AgGrid(
            id='leaderboard-grid',
            rowData=df.to_dict('records'),
            columnDefs=get_initial_column_defs(),
            defaultColDef={
                "sortable": True, "resizable": True, "filter": True, "floatingFilter": False,
                "suppressMovable": True,  # This disables column dragging for all columns
                "sortingOrder": ['desc', 'asc'],
                "filterParams": {
                    "defaultOption": "between"
                },
            },
            dashGridOptions=dashGridOptions,
            dangerously_allow_code=True,
            className="ag-theme-alpine",
            style={"height": "600px", "width": "100%"},
            enableEnterpriseModules=False,
            getRowId="params.data.Model_Display"
        )
    ], style={'marginBottom': '30px'}),
    
    # html.Div([
    #     html.H4("Debug Information"),
    #     html.Pre(id='debug-output', style={'border': '1px solid #ccc', 'padding': '10px', 'whiteSpace': 'pre-wrap', 'maxHeight': '400px', 'overflowY': 'auto'})
    # ]),
    
    # Description
    html.Div([
        html.H3("About the Benchmarks", style={'fontSize': '22px', 'marginBottom': '10px'}),
        html.P(
            "To ensure a fair evaluation, all test questions are kept private. This prevents models from being specifically trained on the benchmark itself."
        ),
        # --- Uncensored Section ---
        html.P([html.Strong("UGI 🏆"), ": Uncensored General Intelligence"], style={'marginTop': '20px', 'fontSize': '1.2em'}),
        html.P("Measures a model's knowledge of sensitive topics and its ability to follow instructions when faced with controversial prompts."),
        html.Details([
            html.Summary("UGI Metrics", style={'fontWeight': 'normal', 'fontSize': '1em', 'marginLeft': '20px', 'cursor': 'pointer'}),
            html.Ul([
                html.Li([html.Strong("Categories:")]),
                html.Ul([
                    html.Li([html.Strong("Hazardous:"), " Knowledge of topics that LLMs probably shouldn't assist with."]),
                    html.Li([html.Strong("Entertainment:"), " Knowledge of adult or controversial entertainment and media."]),
                    html.Li([html.Strong("SocPol:"), " Knowledge of sensitive socio-political topics."]),
                ], style={'listStyleType': 'circle', 'marginLeft': '20px'}),
                html.Li([html.Strong("W/10 👍 (Willingness/10):"), " A component of the UGI score that measures how far a model can be pushed before it refuses to answer or deviates from instructions."]),
                html.Ul([
                    html.Li([html.Strong("W/10-Direct:"), " Measures if the model directly refuses to respond to certain prompts."]),
                    html.Li([html.Strong("W/10-Adherence:"), " Measures if a model deviates from instructions, which can be a form of refusal or a lack of instruction following capabilities."]),
                ], style={'listStyleType': 'circle', 'marginLeft': '20px'}),
            ], style={'marginTop': '5px', 'marginLeft': '40px'})
        ], open=True),
        # --- Intelligence Section ---
        html.P([html.Strong("NatInt 💡"), ": Natural Intelligence"], style={'marginTop': '20px', 'fontSize': '1.2em'}),
        html.P("Measures a model's general knowledge and reasoning capabilities across a range of standard and specialized domains."),
        html.Details([
            html.Summary("Intelligence Metrics", style={'fontWeight': 'normal', 'fontSize': '1em', 'marginLeft': '20px', 'cursor': 'pointer'}),
            html.Ul([
                html.Li([html.Strong("Standard:"), " Measures knowledge of standard, factual information like dates, statistics, math, and logic."]),
                html.Li([html.Strong("Pop Culture:"), " Knowledge of specific details from things like video games, movies, music, and internet culture."]),
                html.Li([html.Strong("World Model:"), " Tasks that test a model's understanding of real-world properties and patterns."]),
                html.Ul([
                    html.Li([html.Strong("Cooking (% Error):"), " Predicts needed ingredient amounts for recipes."]),
                    html.Li([html.Strong("GeoGuesser (km Error):"), " Identifies a location based on a description of its surroundings."]),
                    html.Li([html.Strong("Weight (% Error):"), " Estimates the weight of various objects based on their description."]),
                    html.Li([html.Strong("Music (Error):"), " Predicts a song's musical attributes (like bpm and loudness) based on its lyrics."]),
                    html.Li([html.Strong("Show Recommendation Score:"), " A model's ability to predict what rating out of ten a person will rate a TV show based on their previous ratings."]),
                    html.Ul([
                        html.Li([html.Strong("Show Rec MAE:"), " The mean absolute error between the model's predicted ratings and the user's true ratings."]),
                        html.Li([html.Strong("Show Rec Correlation:"), " Measures how well the model's predictions trend with the user's true ratings."]),
                        html.Li([html.Strong("Show Rec Std Dev Error:"), " The absolute difference between the spread of the model's predictions and the spread of the true ratings."]),
                    ], style={'listStyleType': 'circle', 'marginLeft': '20px'}),
                ], style={'listStyleType': 'circle', 'marginLeft': '20px'}),
            ], style={'marginTop': '5px', 'marginLeft': '40px'})
        ], open=True),
        # --- Writing Section ---
        html.P([html.Strong("Writing ✍️")], style={'marginTop': '20px', 'fontSize': '1.2em'}),
        html.P("A score of a model's writing ability, factoring in intelligence, writing style, amount of repetition, and adherence to requested output length. The score attempts to match the average person's preferences. Optimal values are displayed in parentheses in the column headers for the metrics used in the formula (e.g., 'Readability Grade (~5.5)'). These values were estimated using human feedback through model preference."),
        html.P("Models that are not able to consistently produce writing responses due to irreparable repetition issues or broken outputs are not given a writing score."),
        html.Details([
            html.Summary("Writing Metrics", style={'fontWeight': 'normal', 'fontSize': '1em', 'marginLeft': '20px', 'cursor': 'pointer'}),
            html.Ul([
                html.Li([html.Strong("NSFW/Dark Lean:"), " Measures the tonal direction a model takes when doing creative writing, from SFW to explicit (NSFW) and from lighthearted to violent/tragic (Dark). NOTE: A high or low number does not mean it is high or low quality. These two metrics solely measure frequency."]),
                html.Li([html.Strong("Stylistic Metrics:")]),
                html.Ul([
                    html.Li([html.Strong("Readability Grade:"), " The estimated US school grade level needed to understand the text."]),
                    html.Li([html.Strong("Verb/Noun Ratio:"), " The ratio of action words (verbs) to naming words (nouns)."]),
                    html.Li([html.Strong("Adj&Adv %:"), " The percentage of descriptive words (adjectives and adverbs) out of total words."]),
                    html.Li([html.Strong("Dialogue %:"), " The percentage of sentences in the model's response that is dialogue when writing stories."]),
                ], style={'listStyleType': 'circle', 'marginLeft': '20px'}),
                html.Li([html.Strong("Repetition Metrics:")]),
                html.Ul([
                    html.Li([html.Strong("Lexical Stuckness:"), " Measures if the model gets 'stuck' using a limited vocabulary in parts of its writing."]),
                    html.Li([html.Strong("Originality:"), " Measures how unique a model's writing outputs are by comparing the word usage and themes used across different writing prompts."]),
                    html.Li([html.Strong("Semantic Redundancy:"), " Detects when the same concept is expressed multiple times with different wording."]),
                ], style={'listStyleType': 'circle', 'marginLeft': '20px'}),
                html.Li([html.Strong("Length Adherence:")]),
                html.Ul([
                    html.Li([html.Strong("Length Error %:"), " The average percentage difference between a user-requested word count and the generated word count."]),
                    html.Li([html.Strong("Exceeded %:"), " The percentage of times the model responds with more words than requested."]),
                ], style={'listStyleType': 'circle', 'marginLeft': '20px'}),
                html.Li([html.Strong("Style Adherence:"), " How closely the model is able to match the writing style of a given example."]),
            ], style={'marginTop': '5px', 'marginLeft': '40px'})
        ], open=True),
        # --- Politics Section ---
        html.P([html.Strong("Political Lean 📋")], style={'marginTop': '20px', 'fontSize': '1.2em'}),
        html.Details([
            html.Summary("Political Metrics", style={'fontWeight': 'normal', 'fontSize': '1em', 'marginLeft': '20px', 'cursor': 'pointer'}),
            html.Ul([
                html.Li([html.Strong("Political Lean 📋:"), " Measures a model's political alignment based on its responses to the ", html.A("12axes", href="https://politicaltests.github.io/12axes/", target="_blank", style={'color': 'var(--link-color)'}), " test. The Political Lean metric uses a simplified version with the Assimilationist-Multiculturalist, Average(Collectivize-Privatize & Planned-LaissezFaire), and Progressive-Traditional axes. The score ranges from -100% (Left) to 100% (Right)."]),
                html.Li([html.Strong("12axes Ideology:"), " The closest matching political ideology from the 12axes test."]),
                html.Li([html.Strong("Aggregate Scores:")]),
                html.Ul([
                    html.Li("Govt: Higher = State authority, Lower = Individual liberty"),
                    html.Li("Dipl: Higher = Global outlook, Lower = National interests"),
                    html.Li("Econ: Higher = Economic equality, Lower = Market freedom"),
                    html.Li("Scty: Higher = Progressive values, Lower = Traditional values")
                ], style={'listStyleType': 'circle', 'marginLeft': '20px'}),
            ], style={'marginTop': '5px', 'marginLeft': '40px'})
        ], open=True),
        html.Details([
            html.Summary("12axes Ideology Descriptions", style={'fontWeight': 'normal', 'fontSize': '1em', 'marginLeft': '20px', 'cursor': 'pointer', 'marginTop': '10px'}),
            html.Div([
                html.I("Only showing ideologies at least one model has.", className='ideology-note', style={'fontSize': '0.9em'}),
                dcc.Markdown("\n\n".join([
                    f"**{ideology}**: {IDEOLOGY_DESCRIPTIONS.get(ideology, 'No description available.')}"
                    for ideology in sorted(set(df['12axes Ideology'].dropna()))
                    if ideology
                ]), className='markdown-content'),
                html.Div([
                    html.A("Source", href="https://github.com/politicaltests/politicaltests.github.io/blob/main/12axes/ideologies.js", target="_blank", className="source-link")
                ], style={'marginTop': '20px'})
            ], style={'paddingTop': '10px', 'marginLeft': '40px'})
        ]),
    ], style={
        'maxWidth': '1200px',
        'margin': '0 auto',
        'padding': '0 20px',
        'color': 'var(--text-color)',
        'marginBottom': '80px'
    }),
    
], style={'maxWidth': '100%', 'margin': '0 auto'})
OVERVIEW_MAPPING = {
    "Uncensored": ["UGI 🏆", "W/10 👍"],
    "Intelligence": ["NatInt 💡"],
    "Writing": ["Writing ✍️"],
    "Politics": ["Political Lean 📋"]
}
@app.callback(
    [Output(f'{p.lower()}-checklist', 'value') for p in PRESET_COLUMNS.keys() if p != "Overview"] +
    [Output(f'{p.lower()}-selector', 'value') for p in PRESET_COLUMNS.keys()],
    [Input(f'{p.lower()}-selector', 'value') for p in PRESET_COLUMNS.keys()],
    prevent_initial_call=False
)
def sync_presets_and_checklists(*selector_values):
    ctx = dash.callback_context
    if not ctx.triggered_id:
        selected_preset = "Overview"
    else:
        triggering_id_root = ctx.triggered_id.split('.')[0]
        selected_preset = triggering_id_root.replace('-selector', '').capitalize()
    
    checklist_outputs = {p: [] for p in PRESET_COLUMNS.keys() if p != "Overview"}
    selector_outputs = {p: None for p in PRESET_COLUMNS.keys()}
    if selected_preset == "Overview":
        for preset, cols in OVERVIEW_MAPPING.items():
            checklist_outputs[preset] = cols
    # Simplified this logic since the special case is gone.
    elif selected_preset == "Intelligence":
        checklist_outputs["Intelligence"] = list(PRESET_COLUMNS["Intelligence"].keys())
    elif selected_preset == "Writing":
        checklist_outputs["Writing"] = list(PRESET_COLUMNS["Writing"].keys())
        checklist_outputs["Intelligence"] = ["NatInt 💡"]
    elif selected_preset in checklist_outputs:
        checklist_outputs[selected_preset] = list(PRESET_COLUMNS[selected_preset].keys())
    selector_outputs[selected_preset] = selected_preset
    
    final_checklist_values = [checklist_outputs[p] for p in PRESET_COLUMNS.keys() if p != "Overview"]
    final_selector_values = [selector_outputs[p] for p in PRESET_COLUMNS.keys()]
    
    return final_checklist_values + final_selector_values
@app.callback(
    Output('leaderboard-grid', 'columnDefs', allow_duplicate=True),
    [Input(f'{p.lower()}-checklist', 'value') for p in PRESET_COLUMNS.keys() if p != "Overview"] +
    [Input('other-toggles-checklist', 'value')] +
    [Input(f'{p.lower()}-selector', 'value') for p in PRESET_COLUMNS.keys()],
    prevent_initial_call=True
)
def update_columns_and_sort(uncensored_cols, intelligence_cols, writing_cols, politics_cols, other_toggles, *selector_values):
    ctx = dash.callback_context
    
    apply_default_sort = False
    if ctx.triggered_id and ctx.triggered_id.endswith('-selector'):
        apply_default_sort = True
    active_preset = 'Overview'
    for i, preset_name in enumerate(PRESET_COLUMNS.keys()):
        if selector_values[i] == preset_name:
            active_preset = preset_name
            break
            
    all_selections = set(uncensored_cols + intelligence_cols + writing_cols + politics_cols + other_toggles)
        
    expanded_selections = set()
    for item in all_selections:
        if item in COLUMN_GROUPS:
            expanded_selections.update(COLUMN_GROUPS[item])
        else:
            expanded_selections.add(item)
            
    visible_cols = {"pinned", "is_new", "R", "#P", "type", "Model_Display", "Release Date", "Test Date"}
    visible_cols.update(expanded_selections)
    
    sort_map = {
        "Overview": "UGI 🏆",
        "Uncensored": "UGI 🏆",
        "Intelligence": "NatInt 💡",
        "Writing": "Writing ✍️",
        "Politics": None
    }
    primary_sort_col = sort_map.get(active_preset)
    pinned_cols = ["pinned", "is_new", "R", "Avg Thinking Chars", "#P", "type", "Model_Display"]
    # --- FINAL CORRECTED LOGIC ---
    
    final_defs = []
    for col_name in MASTER_COLUMN_ORDER:
        if col_name not in ALL_COLUMN_DEFS:
            continue
            
        col_def = ALL_COLUMN_DEFS[col_name].copy()
        
        # THIS IS THE LINE THAT HAS BEEN REMOVED.
        # col_def.pop('headerComponentParams', None) 
        
        col_def['hide'] = col_name not in visible_cols
        col_def['pinned'] = 'left' if col_name in pinned_cols else None
        
        if apply_default_sort:
            if col_def.get('field') == primary_sort_col:
                col_def['sort'] = 'desc'
                col_def['sortIndex'] = 0
            else:
                col_def['sort'] = None
                col_def['sortIndex'] = None
        
        final_defs.append(col_def)
    if active_preset == 'Writing':
        natint_col_def = next((col for col in final_defs if col.get('field') == 'NatInt 💡'), None)
        
        if natint_col_def:
            temp_defs = [col for col in final_defs if col.get('field') != 'NatInt 💡']
            try:
                insert_index = next(i for i, col in enumerate(temp_defs) if col.get('field') == 'Writing ✍️') + 1
                temp_defs.insert(insert_index, natint_col_def)
                final_defs = temp_defs
            except StopIteration:
                pass
    # --- Logic for adding optimal values via CSS classes (unchanged and correct) ---
    WRITING_OPTIMAL_CLASSES = {
        "avg_length_error_pct": "header-optimal-len-err",
        "NatInt 💡": "header-optimal-natint",
        "originality_score": "header-optimal-orig",
        "internal_semantic_redundancy": "header-optimal-sem-red",
        "lexical_stuckness": "header-optimal-lex-stuck",
        "Adjective_Adverb_Percentage": "header-optimal-adj-adv",
        "Readability_Grade_Level": "header-optimal-read-grade",
        "Dialogue_Percentage": "header-optimal-dialogue"
    }
    for col_def in final_defs:
        # Clear any previous optimal classes first
        current_classes = col_def.get('headerClass', '').split()
        cleaned_classes = [c for c in current_classes if not c.startswith('header-optimal-')]
        col_def['headerClass'] = ' '.join(cleaned_classes)
        if active_preset == 'Writing':
            field = col_def.get('field')
            if field in WRITING_OPTIMAL_CLASSES:
                class_to_add = WRITING_OPTIMAL_CLASSES[field]
                current_classes = col_def.get('headerClass', '').split()
                if class_to_add not in current_classes:
                    current_classes.append(class_to_add)
                    col_def['headerClass'] = ' '.join(current_classes)
    # --- Border logic (unchanged) ---
    border_cols = set()
    if active_preset == 'Overview':
        border_cols = {"UGI 🏆", "NatInt 💡", "Writing ✍️", "Political Lean 📋"}
    elif active_preset == 'Uncensored':
        border_cols = {"UGI 🏆", "W/10 👍"}
    elif active_preset == 'Intelligence':
         border_cols = {"NatInt 💡"}
    elif active_preset == 'Writing':
        border_cols = {"Writing ✍️", "NatInt 💡"}
    else:
        main_score_columns = ["UGI 🏆", "W/10 👍", "NatInt 💡", "Writing ✍️", "Political Lean 📋"]
        for col_def in final_defs:
            if not col_def.get('hide', True) and col_def.get('field') in main_score_columns:
                border_cols.add(col_def.get('field'))
                break
    for col_def in final_defs:
        if col_def.get('field') in border_cols:
            current_class = col_def.get('cellClass', '')
            if 'border-left' not in current_class:
                col_def['cellClass'] = f"{current_class} border-left".strip()
                
    return final_defs
    
@app.callback(
    Output('leaderboard-grid', 'rowData'),
    [Input(f'{p.lower()}-selector', 'value') for p in PRESET_COLUMNS.keys()] +
    [Input(f'{p.lower()}-checklist', 'value') for p in PRESET_COLUMNS.keys() if p != "Overview"] +
    [
        Input('model-type-filter-main', 'value'),
        Input('model-type-filter-reasoning', 'value'),
        Input('other-toggles-checklist', 'value')
    ]
)
def update_grid_rows(*args):
    # 1. Unpack arguments
    num_presets = len(PRESET_COLUMNS)
    num_checklists = num_presets - 1
    selector_values = args[:num_presets]
    checklist_values = args[num_presets : num_presets + num_checklists]
    main_types = args[num_presets + num_checklists]
    reasoning_type = args[num_presets + num_checklists + 1]
    other_toggles = args[num_presets + num_checklists + 2]
    uncensored_cols, intelligence_cols, writing_cols, politics_cols = checklist_values
    # 2. Basic setup
    selected_types = main_types + reasoning_type
    show_na_filter = 'show_na' in other_toggles
    filtered_df = df.copy()
    # 3. Model Type Filtering (unchanged)
    categories = {
        'Is Foundation': (filtered_df['Is Foundation'] & ~filtered_df['Is Merged'] & pd.notna(filtered_df['Total Parameters'])),
        'Is Finetuned': (filtered_df['Is Finetuned'] & ~filtered_df['Is Merged']),
        'Is Merged': filtered_df['Is Merged'],
        'proprietary': pd.isna(filtered_df['Total Parameters']),
        'Is Thinking Model': filtered_df['Is Thinking Model']
    }
    final_mask = pd.Series(True, index=filtered_df.index)
    for category_value, condition_mask in categories.items():
        if category_value not in selected_types:
            final_mask &= ~condition_mask
    filtered_df = filtered_df[final_mask]
    # 4. Determine active preset
    active_preset = None
    for i, preset_name in enumerate(PRESET_COLUMNS.keys()):
        if selector_values[i] == preset_name:
            active_preset = preset_name
            break
    # 5. Apply preset-specific filtering for Writing preset
    if active_preset == 'Writing' and not show_na_filter:
        filtered_df.dropna(subset=['Writing ✍️'], inplace=True)
    # 6. Apply context-aware "NA Models" filter
    if show_na_filter:
        all_selections = set(uncensored_cols + intelligence_cols + writing_cols + politics_cols)
        if active_preset == 'Overview':
             all_selections.update(PRESET_COLUMNS['Overview'].keys())
        is_writing_visible = 'Writing ✍️' in all_selections
        is_politics_visible = 'Political Lean 📋' in all_selections
        # Now correctly checks if the "World Model Tests" checkbox is ticked
        is_pred_reasoning_visible = 'world_model_group' in intelligence_cols
        
        na_conditions = []
        if is_writing_visible:
            na_conditions.append(filtered_df['Writing ✍️'].isna())
        if is_pred_reasoning_visible:
            na_conditions.append(filtered_df['Show Rec Score'] == -99999)
        if is_politics_visible:
            na_conditions.append(filtered_df['Political Lean 📋'].isna())
        if na_conditions:
            final_na_mask = pd.Series(False, index=filtered_df.index)
            for condition in na_conditions:
                final_na_mask |= condition
            filtered_df = filtered_df[final_na_mask]
        else:
            filtered_df = filtered_df.iloc[0:0]
    return filtered_df.to_dict('records')
    
app.clientside_callback(
    """
    function(_, columnDefs) {
        // This function runs once on page load to set the initial pinning based on screen size.
        const isMobile = window.innerWidth < 800;
        if (!isMobile || !columnDefs) {
            // On desktop, or if defs are not ready, do nothing.
            return dash_clientside.no_update;
        }
        
        // On mobile, create a new set of definitions with all pinning removed.
        const newDefs = columnDefs.map(col => {
            const newCol = Object.assign({}, col);
            newCol.pinned = null; // Un-pin the column
            return newCol;
        });
        return newDefs;
    }
    """,
    Output('leaderboard-grid', 'columnDefs', allow_duplicate=True),
    Input('url', 'pathname'),
    State('leaderboard-grid', 'columnDefs'),
    prevent_initial_call=True
)
    
if __name__ == '__main__':
    app.run_server(host='0.0.0.0', port=8050)