File size: 2,711 Bytes
c1ddcde aa37927 c1ddcde aa37927 c1ddcde aa37927 f8ae415 aa37927 c1ddcde aa37927 c391bf5 f8ae415 c1ddcde c391bf5 c1ddcde f8ae415 c1ddcde f8ae415 c1ddcde f8ae415 c1ddcde f8ae415 c1ddcde f8ae415 c1ddcde f8ae415 c1ddcde f8ae415 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 |
# src/display/utils.py
from dataclasses import dataclass
from enum import Enum
from typing import Any, List
from src.about import Tasks
@dataclass
class ColumnContent:
name: str
type: Any
label: str
description: str
hidden: bool = False
displayed_by_default: bool = True # All columns displayed by default
never_hidden: bool = False
# Initialize the list of columns for the leaderboard
COLUMNS: List[ColumnContent] = []
# Essential columns
COLUMNS.append(
ColumnContent(
name="model",
type=str,
label="Model",
description="Model name",
never_hidden=True,
)
)
COLUMNS.append(
ColumnContent(
name="average",
type=float,
label="Average Accuracy (%)",
description="Average accuracy across all subjects",
)
)
# Include per-subject accuracy columns based on your subjects
for task in Tasks:
COLUMNS.append(
ColumnContent(
name=task.value.benchmark,
type=float,
label=f"{task.value.col_name} (%)",
description=f"Accuracy on {task.value.col_name}",
displayed_by_default=True,
)
)
# Additional columns
COLUMNS.extend([
ColumnContent(
name="model_type",
type=str,
label="Model Type",
description="Type of the model (e.g., Transformer, RNN, etc.)",
displayed_by_default=True,
),
ColumnContent(
name="weight_type",
type=str,
label="Weight Type",
description="Type of model weights (e.g., Original, Delta, Adapter)",
displayed_by_default=True,
),
ColumnContent(
name="precision",
type=str,
label="Precision",
description="Precision of the model weights (e.g., float16)",
displayed_by_default=True,
),
ColumnContent(
name="license",
type=str,
label="License",
description="License of the model",
displayed_by_default=True,
),
ColumnContent(
name="likes",
type=int,
label="Likes",
description="Number of likes on the Hugging Face Hub",
displayed_by_default=True,
),
ColumnContent(
name="still_on_hub",
type=bool,
label="Available on the Hub",
description="Whether the model is still available on the Hugging Face Hub",
displayed_by_default=True,
),
])
# Now we can create lists of column names for use in the application
COLS = [col.name for col in COLUMNS]
BENCHMARK_COLS = [col.name for col in COLUMNS if col.name not in [
"model", "average", "model_type", "weight_type", "precision", "license", "likes", "still_on_hub"
]]
|