Spaces:
Running
Running
lixuejing
commited on
Commit
Β·
e55692d
1
Parent(s):
e8c65fe
add quota
Browse files
app.py
CHANGED
@@ -69,8 +69,8 @@ def init_space():
|
|
69 |
dynamic_path=DYNAMIC_INFO_FILE_PATH,
|
70 |
#cols=COLS,
|
71 |
#benchmark_cols=BENCHMARK_COLS,
|
72 |
-
cols=QUOTACOLS,
|
73 |
-
benchmark_cols=BENCHMARK_QUOTACOLS
|
74 |
)
|
75 |
update_collections(original_df.copy())
|
76 |
leaderboard_df = original_df.copy()
|
@@ -251,13 +251,15 @@ with demo:
|
|
251 |
elem_id="filter-columns-size",
|
252 |
)
|
253 |
|
|
|
|
|
254 |
leaderboard_table = gr.components.Dataframe(
|
255 |
value=leaderboard_df[
|
256 |
-
[c.name for c in fields(
|
257 |
+ shown_columns.value
|
258 |
+ [AutoEvalColumnQuota.dummy.name]
|
259 |
],
|
260 |
-
headers=[c.name for c in fields(
|
261 |
datatype=TYPES,
|
262 |
elem_id="leaderboard-table",
|
263 |
interactive=False,
|
@@ -320,6 +322,135 @@ with demo:
|
|
320 |
leaderboard_table,
|
321 |
queue=True,
|
322 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
323 |
with gr.TabItem("π About", elem_id="llm-benchmark-tab-table", id=2):
|
324 |
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
|
325 |
|
|
|
69 |
dynamic_path=DYNAMIC_INFO_FILE_PATH,
|
70 |
#cols=COLS,
|
71 |
#benchmark_cols=BENCHMARK_COLS,
|
72 |
+
cols=QUOTACOLS+COLS,
|
73 |
+
benchmark_cols=BENCHMARK_QUOTACOLS+BENCHMARK_COLS
|
74 |
)
|
75 |
update_collections(original_df.copy())
|
76 |
leaderboard_df = original_df.copy()
|
|
|
251 |
elem_id="filter-columns-size",
|
252 |
)
|
253 |
|
254 |
+
print("c.name for c in fields(AutoEvalColumn) if c.never_hidden",c.name for c in fields(AutoEvalColumn) if c.never_hidden)
|
255 |
+
|
256 |
leaderboard_table = gr.components.Dataframe(
|
257 |
value=leaderboard_df[
|
258 |
+
[c.name for c in fields(AutoEvalColumn) if c.never_hidden]
|
259 |
+ shown_columns.value
|
260 |
+ [AutoEvalColumnQuota.dummy.name]
|
261 |
],
|
262 |
+
headers=[c.name for c in fields(AutoEvalColumn) if c.never_hidden] + shown_columns.value,
|
263 |
datatype=TYPES,
|
264 |
elem_id="leaderboard-table",
|
265 |
interactive=False,
|
|
|
322 |
leaderboard_table,
|
323 |
queue=True,
|
324 |
)
|
325 |
+
|
326 |
+
with gr.TabItem("π
VLM QuotaBenchmark", elem_id="vlm-quota-benchmark-tab-table", id=0):
|
327 |
+
#leaderboard = init_leaderboard(LEADERBOARD_DF)
|
328 |
+
with gr.Row():
|
329 |
+
with gr.Column():
|
330 |
+
with gr.Row():
|
331 |
+
search_bar = gr.Textbox(
|
332 |
+
placeholder=" π Search for your model (separate multiple queries with `;`) and press ENTER...",
|
333 |
+
show_label=False,
|
334 |
+
elem_id="search-bar",
|
335 |
+
)
|
336 |
+
with gr.Row():
|
337 |
+
shown_columns = gr.CheckboxGroup(
|
338 |
+
choices=[
|
339 |
+
c.name
|
340 |
+
for c in fields(AutoEvalColumnQuota)
|
341 |
+
if not c.hidden and not c.never_hidden and not c.dummy
|
342 |
+
],
|
343 |
+
value=[
|
344 |
+
c.name
|
345 |
+
for c in fields(AutoEvalColumnQuota)
|
346 |
+
if c.displayed_by_default and not c.hidden and not c.never_hidden
|
347 |
+
],
|
348 |
+
label="Select columns to show",
|
349 |
+
elem_id="column-select",
|
350 |
+
interactive=True,
|
351 |
+
)
|
352 |
+
with gr.Row():
|
353 |
+
hide_models = gr.CheckboxGroup(
|
354 |
+
label="Hide models",
|
355 |
+
choices = ["Private or deleted", "Contains a merge/moerge", "Flagged", "MoE"],
|
356 |
+
value=[],
|
357 |
+
interactive=True
|
358 |
+
)
|
359 |
+
with gr.Column(min_width=320):
|
360 |
+
#with gr.Box(elem_id="box-filter"):
|
361 |
+
filter_columns_type = gr.CheckboxGroup(
|
362 |
+
label="Model types",
|
363 |
+
choices=[t.to_str() for t in ModelType],
|
364 |
+
value=[t.to_str() for t in ModelType],
|
365 |
+
interactive=True,
|
366 |
+
elem_id="filter-columns-type",
|
367 |
+
)
|
368 |
+
filter_columns_precision = gr.CheckboxGroup(
|
369 |
+
label="Precision",
|
370 |
+
choices=[i.value.name for i in Precision],
|
371 |
+
value=[i.value.name for i in Precision],
|
372 |
+
interactive=True,
|
373 |
+
elem_id="filter-columns-precision",
|
374 |
+
)
|
375 |
+
filter_columns_size = gr.CheckboxGroup(
|
376 |
+
label="Model sizes (in billions of parameters)",
|
377 |
+
choices=list(NUMERIC_INTERVALS.keys()),
|
378 |
+
value=list(NUMERIC_INTERVALS.keys()),
|
379 |
+
interactive=True,
|
380 |
+
elem_id="filter-columns-size",
|
381 |
+
)
|
382 |
+
|
383 |
+
print("c.name for c in fields(AutoEvalColumnQuota) if c.never_hidden",c.name for c in fields(AutoEvalColumnQuota) if c.never_hidden)
|
384 |
+
|
385 |
+
leaderboard_table = gr.components.Dataframe(
|
386 |
+
value=leaderboard_df[
|
387 |
+
[c.name for c in fields(AutoEvalColumnQuota) if c.never_hidden]
|
388 |
+
+ shown_columns.value
|
389 |
+
+ [AutoEvalColumnQuota.dummy.name]
|
390 |
+
],
|
391 |
+
headers=[c.name for c in fields(AutoEvalColumnQuota) if c.never_hidden] + shown_columns.value,
|
392 |
+
datatype=QUOTATYPES,
|
393 |
+
elem_id="leaderboard-table",
|
394 |
+
interactive=False,
|
395 |
+
visible=True,
|
396 |
+
#column_widths=["2%", "33%"]
|
397 |
+
)
|
398 |
+
|
399 |
+
# Dummy leaderboard for handling the case when the user uses backspace key
|
400 |
+
hidden_leaderboard_table_for_search = gr.components.Dataframe(
|
401 |
+
value=original_df[QUOTACOLS],
|
402 |
+
#value=leaderboard_df[QUOTACOLS],
|
403 |
+
headers=QUOTACOLS,
|
404 |
+
datatype=QUOTATYPES,
|
405 |
+
visible=False,
|
406 |
+
)
|
407 |
+
search_bar.submit(
|
408 |
+
update_table,
|
409 |
+
[
|
410 |
+
hidden_leaderboard_table_for_search,
|
411 |
+
shown_columns,
|
412 |
+
filter_columns_type,
|
413 |
+
filter_columns_precision,
|
414 |
+
filter_columns_size,
|
415 |
+
hide_models,
|
416 |
+
search_bar,
|
417 |
+
],
|
418 |
+
leaderboard_table,
|
419 |
+
)
|
420 |
+
|
421 |
+
# Define a hidden component that will trigger a reload only if a query parameter has been set
|
422 |
+
hidden_search_bar = gr.Textbox(value="", visible=False)
|
423 |
+
hidden_search_bar.change(
|
424 |
+
update_table,
|
425 |
+
[
|
426 |
+
hidden_leaderboard_table_for_search,
|
427 |
+
shown_columns,
|
428 |
+
filter_columns_type,
|
429 |
+
filter_columns_precision,
|
430 |
+
filter_columns_size,
|
431 |
+
hide_models,
|
432 |
+
search_bar,
|
433 |
+
],
|
434 |
+
leaderboard_table,
|
435 |
+
)
|
436 |
+
# Check query parameter once at startup and update search bar + hidden component
|
437 |
+
demo.load(load_query, inputs=[], outputs=[search_bar, hidden_search_bar])
|
438 |
+
|
439 |
+
for selector in [shown_columns, filter_columns_type, filter_columns_precision, filter_columns_size, hide_models]:
|
440 |
+
selector.change(
|
441 |
+
update_table,
|
442 |
+
[
|
443 |
+
hidden_leaderboard_table_for_search,
|
444 |
+
shown_columns,
|
445 |
+
filter_columns_type,
|
446 |
+
filter_columns_precision,
|
447 |
+
filter_columns_size,
|
448 |
+
hide_models,
|
449 |
+
search_bar,
|
450 |
+
],
|
451 |
+
leaderboard_table,
|
452 |
+
queue=True,
|
453 |
+
)
|
454 |
with gr.TabItem("π About", elem_id="llm-benchmark-tab-table", id=2):
|
455 |
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
|
456 |
|