""" Leaderboard table components for the leaderboard application. """ import streamlit as st from src.data.processors import get_model_type_style, get_rank_style def render_leaderboard_table(display_df, metric_columns): """ Render the custom HTML leaderboard table Args: display_df (pandas.DataFrame): The DataFrame with the display data metric_columns (list): List of metric column names """ from src.components.header import render_section_header # Display model ranking header without the box render_section_header("Model Rankings") # Start building the HTML table structure html_table = """
""" # Add the metric header html_table += f'' # Continue the table structure html_table += """ """ # Add individual column headers for metrics for col in metric_columns: column_class = "overall-cell" if col == "Metric Average" else "metric-cell" html_table += f'' # Close the header and start the body html_table += """ """ # Add the data rows for i, (idx, row) in enumerate(display_df.iterrows()): # Define background colors to ensure consistency row_bg = "#0a0a0a" if i % 2 == 0 else "#111111" # Start the row html_table += f'' # Add Rank with medal styling and consistent background rank_style = f"background-color: {row_bg};" # Add row background to fixed columns rank_styles = get_rank_style(row["Rank"]) for style_key, style_value in rank_styles.items(): rank_style += f"{style_key}: {style_value};" html_table += f'' # Model name fixed column with consistent background html_table += f'' # Model type cell model_type = row["Model Type"] type_style = f"background-color: {row_bg};" model_type_styles = get_model_type_style(model_type) for style_key, style_value in model_type_styles.items(): if style_value: type_style += f"{style_key}: {style_value};" html_table += f'' # Add metric values with minimal styling for col in metric_columns: cell_class = "table-cell overall-cell" if col == "Metric Average" else "table-cell metric-cell" value_text = row[col] # Simple styling based on positive/negative values try: value = float(str(row[col]).replace(',', '')) if value > 0: cell_class += " positive-value" elif value < 0: cell_class += " negative-value" except: pass html_table += f'' html_table += "" # Close the table html_table += """
Rank Model + Scaffolding Model TypeMargin To Human
{col}
{row["Rank"]}{row["Model Name"]}{model_type}{value_text}
""" # Add metric definition below the table metric_definition = """

Margin to Human

This metric measures what percentage of the top 1 human-to-baseline performance gap an agent can close on challenging Machine Learning Research Competition problems. For example, if the baseline is 100, top human performance is 200, and the agent scores 110, the agent has closed 10% of the gap between baseline and top human performance. Higher percentages indicate models that more effectively approach top human-level research capabilities.

""" # Display the custom HTML table and metric definition st.markdown(html_table + metric_definition, unsafe_allow_html=True) def render_empty_state(): """ Render an empty state when no data is available """ st.markdown("""
No data to display. Please select at least one task and one model type to view the data.
""", unsafe_allow_html=True)