xeon27 commited on
Commit
a9a4909
Β·
1 Parent(s): ba2f546

Remove filters and extra columns

Browse files
Files changed (2) hide show
  1. app.py +111 -110
  2. src/display/utils.py +11 -11
app.py CHANGED
@@ -70,20 +70,21 @@ def init_leaderboard(dataframe):
70
  ),
71
  search_columns=[AutoEvalColumn.model.name, AutoEvalColumn.license.name],
72
  hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
73
- filter_columns=[
74
- ColumnFilter(AutoEvalColumn.model_type.name, type="checkboxgroup", label="Model types"),
75
- ColumnFilter(AutoEvalColumn.precision.name, type="checkboxgroup", label="Precision"),
76
- ColumnFilter(
77
- AutoEvalColumn.params.name,
78
- type="slider",
79
- min=0.01,
80
- max=150,
81
- label="Select the number of parameters (B)",
82
- ),
83
- ColumnFilter(
84
- AutoEvalColumn.still_on_hub.name, type="boolean", label="Deleted/incomplete", default=True
85
- ),
86
- ],
 
87
  bool_checkboxgroup_label="Hide models",
88
  interactive=False,
89
  )
@@ -101,102 +102,102 @@ with demo:
101
  with gr.TabItem("πŸ“ About", elem_id="llm-benchmark-tab-table", id=2):
102
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
103
 
104
- with gr.TabItem("πŸš€ Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
105
- with gr.Column():
106
- with gr.Row():
107
- gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
108
-
109
- with gr.Column():
110
- with gr.Accordion(
111
- f"βœ… Finished Evaluations ({len(finished_eval_queue_df)})",
112
- open=False,
113
- ):
114
- with gr.Row():
115
- finished_eval_table = gr.components.Dataframe(
116
- value=finished_eval_queue_df,
117
- headers=EVAL_COLS,
118
- datatype=EVAL_TYPES,
119
- row_count=5,
120
- )
121
- with gr.Accordion(
122
- f"πŸ”„ Running Evaluation Queue ({len(running_eval_queue_df)})",
123
- open=False,
124
- ):
125
- with gr.Row():
126
- running_eval_table = gr.components.Dataframe(
127
- value=running_eval_queue_df,
128
- headers=EVAL_COLS,
129
- datatype=EVAL_TYPES,
130
- row_count=5,
131
- )
132
-
133
- with gr.Accordion(
134
- f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
135
- open=False,
136
- ):
137
- with gr.Row():
138
- pending_eval_table = gr.components.Dataframe(
139
- value=pending_eval_queue_df,
140
- headers=EVAL_COLS,
141
- datatype=EVAL_TYPES,
142
- row_count=5,
143
- )
144
- with gr.Row():
145
- gr.Markdown("# βœ‰οΈβœ¨ Submit your model here!", elem_classes="markdown-text")
146
-
147
- with gr.Row():
148
- with gr.Column():
149
- model_name_textbox = gr.Textbox(label="Model name")
150
- revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
151
- model_type = gr.Dropdown(
152
- choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
153
- label="Model type",
154
- multiselect=False,
155
- value=None,
156
- interactive=True,
157
- )
158
-
159
- with gr.Column():
160
- precision = gr.Dropdown(
161
- choices=[i.value.name for i in Precision if i != Precision.Unknown],
162
- label="Precision",
163
- multiselect=False,
164
- value="float16",
165
- interactive=True,
166
- )
167
- weight_type = gr.Dropdown(
168
- choices=[i.value.name for i in WeightType],
169
- label="Weights type",
170
- multiselect=False,
171
- value="Original",
172
- interactive=True,
173
- )
174
- base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
175
-
176
- submit_button = gr.Button("Submit Eval")
177
- submission_result = gr.Markdown()
178
- submit_button.click(
179
- add_new_eval,
180
- [
181
- model_name_textbox,
182
- base_model_name_textbox,
183
- revision_name_textbox,
184
- precision,
185
- weight_type,
186
- model_type,
187
- ],
188
- submission_result,
189
- )
190
-
191
- with gr.Row():
192
- with gr.Accordion("πŸ“™ Citation", open=False):
193
- citation_button = gr.Textbox(
194
- value=CITATION_BUTTON_TEXT,
195
- label=CITATION_BUTTON_LABEL,
196
- lines=20,
197
- elem_id="citation-button",
198
- show_copy_button=True,
199
- )
200
 
201
  scheduler = BackgroundScheduler()
202
  scheduler.add_job(restart_space, "interval", seconds=1800)
 
70
  ),
71
  search_columns=[AutoEvalColumn.model.name, AutoEvalColumn.license.name],
72
  hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
73
+ # filter_columns=[
74
+ # ColumnFilter(AutoEvalColumn.model_type.name, type="checkboxgroup", label="Model types"),
75
+ # ColumnFilter(AutoEvalColumn.precision.name, type="checkboxgroup", label="Precision"),
76
+ # ColumnFilter(
77
+ # AutoEvalColumn.params.name,
78
+ # type="slider",
79
+ # min=0.01,
80
+ # max=150,
81
+ # label="Select the number of parameters (B)",
82
+ # ),
83
+ # ColumnFilter(
84
+ # AutoEvalColumn.still_on_hub.name, type="boolean", label="Deleted/incomplete", default=False
85
+ # ),
86
+ # ],
87
+ filter_columns=[],
88
  bool_checkboxgroup_label="Hide models",
89
  interactive=False,
90
  )
 
102
  with gr.TabItem("πŸ“ About", elem_id="llm-benchmark-tab-table", id=2):
103
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
104
 
105
+ # with gr.TabItem("πŸš€ Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
106
+ # with gr.Column():
107
+ # with gr.Row():
108
+ # gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
109
+
110
+ # with gr.Column():
111
+ # with gr.Accordion(
112
+ # f"βœ… Finished Evaluations ({len(finished_eval_queue_df)})",
113
+ # open=False,
114
+ # ):
115
+ # with gr.Row():
116
+ # finished_eval_table = gr.components.Dataframe(
117
+ # value=finished_eval_queue_df,
118
+ # headers=EVAL_COLS,
119
+ # datatype=EVAL_TYPES,
120
+ # row_count=5,
121
+ # )
122
+ # with gr.Accordion(
123
+ # f"πŸ”„ Running Evaluation Queue ({len(running_eval_queue_df)})",
124
+ # open=False,
125
+ # ):
126
+ # with gr.Row():
127
+ # running_eval_table = gr.components.Dataframe(
128
+ # value=running_eval_queue_df,
129
+ # headers=EVAL_COLS,
130
+ # datatype=EVAL_TYPES,
131
+ # row_count=5,
132
+ # )
133
+
134
+ # with gr.Accordion(
135
+ # f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
136
+ # open=False,
137
+ # ):
138
+ # with gr.Row():
139
+ # pending_eval_table = gr.components.Dataframe(
140
+ # value=pending_eval_queue_df,
141
+ # headers=EVAL_COLS,
142
+ # datatype=EVAL_TYPES,
143
+ # row_count=5,
144
+ # )
145
+ # with gr.Row():
146
+ # gr.Markdown("# βœ‰οΈβœ¨ Submit your model here!", elem_classes="markdown-text")
147
+
148
+ # with gr.Row():
149
+ # with gr.Column():
150
+ # model_name_textbox = gr.Textbox(label="Model name")
151
+ # revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
152
+ # model_type = gr.Dropdown(
153
+ # choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
154
+ # label="Model type",
155
+ # multiselect=False,
156
+ # value=None,
157
+ # interactive=True,
158
+ # )
159
+
160
+ # with gr.Column():
161
+ # precision = gr.Dropdown(
162
+ # choices=[i.value.name for i in Precision if i != Precision.Unknown],
163
+ # label="Precision",
164
+ # multiselect=False,
165
+ # value="float16",
166
+ # interactive=True,
167
+ # )
168
+ # weight_type = gr.Dropdown(
169
+ # choices=[i.value.name for i in WeightType],
170
+ # label="Weights type",
171
+ # multiselect=False,
172
+ # value="Original",
173
+ # interactive=True,
174
+ # )
175
+ # base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
176
+
177
+ # submit_button = gr.Button("Submit Eval")
178
+ # submission_result = gr.Markdown()
179
+ # submit_button.click(
180
+ # add_new_eval,
181
+ # [
182
+ # model_name_textbox,
183
+ # base_model_name_textbox,
184
+ # revision_name_textbox,
185
+ # precision,
186
+ # weight_type,
187
+ # model_type,
188
+ # ],
189
+ # submission_result,
190
+ # )
191
+
192
+ # with gr.Row():
193
+ # with gr.Accordion("πŸ“™ Citation", open=False):
194
+ # citation_button = gr.Textbox(
195
+ # value=CITATION_BUTTON_TEXT,
196
+ # label=CITATION_BUTTON_LABEL,
197
+ # lines=20,
198
+ # elem_id="citation-button",
199
+ # show_copy_button=True,
200
+ # )
201
 
202
  scheduler = BackgroundScheduler()
203
  scheduler.add_job(restart_space, "interval", seconds=1800)
src/display/utils.py CHANGED
@@ -23,22 +23,22 @@ class ColumnContent:
23
  ## Leaderboard columns
24
  auto_eval_column_dict = []
25
  # Init
26
- auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
27
  auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
28
  #Scores
29
  auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average ⬆️", "number", True)])
30
  for task in Tasks:
31
  auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)])
32
- # Model information
33
- auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])
34
- auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
35
- auto_eval_column_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False, True)])
36
- auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False)])
37
- auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False)])
38
- auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False)])
39
- auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❀️", "number", False)])
40
- auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
41
- auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
42
 
43
  # We use make dataclass to dynamically fill the scores from Tasks
44
  AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
 
23
  ## Leaderboard columns
24
  auto_eval_column_dict = []
25
  # Init
26
+ # auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
27
  auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
28
  #Scores
29
  auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average ⬆️", "number", True)])
30
  for task in Tasks:
31
  auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)])
32
+ # # Model information
33
+ # auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])
34
+ # auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
35
+ # auto_eval_column_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False, True)])
36
+ # auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False)])
37
+ # auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False)])
38
+ # auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False)])
39
+ # auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❀️", "number", False)])
40
+ # auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
41
+ # auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
42
 
43
  # We use make dataclass to dynamically fill the scores from Tasks
44
  AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)