File size: 5,581 Bytes
9346f1c
4103566
9346f1c
4596a70
8b1f7a0
 
01ea22b
b98f07f
 
 
 
 
 
54eae7e
3b3db42
 
 
 
5f7fcf4
370d5a0
3b3db42
 
 
9446fe5
 
b98f07f
2a73469
10f9b3c
30dede7
fabb601
4103566
fabb601
 
 
08ba1fc
fabb601
9446fe5
 
 
 
 
1b8a36b
a885f09
370d5a0
2a73469
5f7fcf4
4103566
 
 
 
 
 
 
 
9446fe5
4103566
 
5f7fcf4
 
4103566
 
 
 
beaaa9e
5f7fcf4
 
 
 
 
 
 
 
 
9446fe5
5f7fcf4
 
 
 
 
 
 
 
 
370d5a0
 
 
 
 
 
 
 
 
9446fe5
370d5a0
 
 
 
 
 
 
 
 
 
f6475aa
01233b7
 
58733e4
d4ccaf3
10f9b3c
8daa060
5f7fcf4
 
 
 
 
 
370d5a0
 
 
 
d4ccaf3
e7226cc
f7d1b51
 
 
 
 
5acb894
f7d1b51
818f024
 
f7d1b51
10f9b3c
511c060
10f9b3c
c438de2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
import gradio as gr
from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns
import pandas as pd
from apscheduler.schedulers.background import BackgroundScheduler
from huggingface_hub import snapshot_download

from src.about import (
    CITATION_BUTTON_LABEL,
    CITATION_BUTTON_TEXT,
    INTRODUCTION_TEXT,
    LLM_BENCHMARKS_TEXT,
    TITLE,
)
from src.display.css_html_js import custom_css
from src.display.utils import (
    BENCHMARK_COLS,
    COLS,
    AutoEvalColumn,
    singletable_AutoEvalColumn,
    singlecolumn_AutoEvalColumn,
    ModelType,
    fields,
)
from src.envs import API, EVAL_RESULTS_PATH, REPO_ID, RESULTS_REPO, TOKEN
from src.populate import get_leaderboard_df


def restart_space():
    API.restart_space(repo_id=REPO_ID)

### Space initialisation
try:
    print(EVAL_RESULTS_PATH)
    snapshot_download(
        repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
    )
except Exception as e:
    print(f"Error downloading results: {e}")
    # Create the directory if it doesn't exist
    import os
    os.makedirs(EVAL_RESULTS_PATH, exist_ok=True)


SINGLECOLUMN_LEADERBOARD_DF, SINGLETABLE_LEADERBOARD_DF, MULTITABLE_LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, COLS, BENCHMARK_COLS)

def init_multitable_leaderboard(dataframe):
    return Leaderboard(
        value=dataframe,
        datatype=[c.type for c in fields(AutoEvalColumn)],
        select_columns=SelectColumns(
            default_selection=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default],
            cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden],
            label="Select Columns to Display:",
        ),
        search_columns=[AutoEvalColumn.model.name],
        hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
        filter_columns=[
            ColumnFilter(AutoEvalColumn.dataset.name, type="checkboxgroup", label="Datasets"),
            ColumnFilter(AutoEvalColumn.model.name, type="checkboxgroup", label="Models"),
        ],
        bool_checkboxgroup_label="Hide models",
        interactive=False,
    )

def init_singletable_leaderboard(dataframe):
    return Leaderboard(
        value=dataframe,
        datatype=[c.type for c in fields(singletable_AutoEvalColumn)],
        select_columns=SelectColumns(
            default_selection=[c.name for c in fields(singletable_AutoEvalColumn) if c.displayed_by_default],
            cant_deselect=[c.name for c in fields(singletable_AutoEvalColumn) if c.never_hidden],
            label="Select Columns to Display:",
        ),
        search_columns=[singletable_AutoEvalColumn.model.name],
        hide_columns=[c.name for c in fields(singletable_AutoEvalColumn) if c.hidden],
        filter_columns=[
            ColumnFilter(singletable_AutoEvalColumn.dataset.name, type="checkboxgroup", label="Datasets"),
            ColumnFilter(singletable_AutoEvalColumn.model.name, type="checkboxgroup", label="Models"),
        ],
        bool_checkboxgroup_label="Hide models",
        interactive=False,
    )

def init_singlecolumn_leaderboard(dataframe):
    return Leaderboard(
        value=dataframe,
        datatype=[c.type for c in fields(singlecolumn_AutoEvalColumn)],
        select_columns=SelectColumns(
            default_selection=[c.name for c in fields(singlecolumn_AutoEvalColumn) if c.displayed_by_default],
            cant_deselect=[c.name for c in fields(singlecolumn_AutoEvalColumn) if c.never_hidden],
            label="Select Columns to Display:",
        ),
        search_columns=[singlecolumn_AutoEvalColumn.model.name],
        hide_columns=[c.name for c in fields(singlecolumn_AutoEvalColumn) if c.hidden],
        filter_columns=[
            ColumnFilter(singlecolumn_AutoEvalColumn.dataset.name, type="checkboxgroup", label="Datasets"),
            ColumnFilter(singlecolumn_AutoEvalColumn.table.name, type="checkboxgroup", label="Tables"),
            ColumnFilter(singlecolumn_AutoEvalColumn.model.name, type="checkboxgroup", label="Models"),
        ],
        bool_checkboxgroup_label="Hide models",
        interactive=False,
    )


demo = gr.Blocks(css=custom_css)
with demo:
    gr.HTML(TITLE)
    gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")

    with gr.Tabs(elem_classes="tab-buttons") as tabs:
        with gr.TabItem("πŸ… MultiTable", elem_id="syntherela-benchmark-tab-table", id=0):
            leaderboard = init_multitable_leaderboard(MULTITABLE_LEADERBOARD_DF)
        
        with gr.TabItem("πŸ… SingleTable", elem_id="syntherela-benchmark-tab-table", id=1):
            singletable_leaderboard = init_singletable_leaderboard(SINGLETABLE_LEADERBOARD_DF)

        with gr.TabItem("πŸ… SingleColumn", elem_id="syntherela-benchmark-tab-table", id=2):
            singlecolumn_leaderboard = init_singlecolumn_leaderboard(SINGLECOLUMN_LEADERBOARD_DF)

        with gr.TabItem("πŸ“ About", elem_id="syntherela-benchmark-tab-table", id=3):
            gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")

    with gr.Row():
        with gr.Accordion("πŸ“™ Citation", open=False):
            citation_button = gr.Textbox(
                value=CITATION_BUTTON_TEXT,
                label=CITATION_BUTTON_LABEL,
                lines=8,
                elem_id="citation-button",
                show_copy_button=True,
            )

scheduler = BackgroundScheduler()
scheduler.add_job(restart_space, "interval", seconds=1800)
scheduler.start()
demo.queue(default_concurrency_limit=40).launch()