Spaces:
Runtime error
Runtime error
File size: 3,639 Bytes
7ae1238 a80d977 618f59b a80d977 618f59b a80d977 7ae1238 f4b9c44 7ae1238 f4b9c44 7ae1238 a80d977 7ae1238 a80d977 7ae1238 f4b9c44 7ae1238 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 |
import json
import os
from datetime import datetime, timezone
import gradio as gr
import numpy as np
import pandas as pd
from apscheduler.schedulers.background import BackgroundScheduler
from huggingface_hub import HfApi
from transformers import AutoConfig
from src.auto_leaderboard.get_model_metadata import apply_metadata
from src.assets.text_content import *
from src.auto_leaderboard.load_results import get_eval_results_dicts, make_clickable_model
from src.assets.hardcoded_evals import gpt4_values, gpt35_values, baseline
from src.assets.css_html_js import custom_css, get_window_url_params
from src.utils_display import AutoEvalColumn, EvalQueueColumn, fields, styled_error, styled_warning, styled_message
from src.init import get_all_requested_models, load_all_info_from_hub
def get_leaderboard_df():
data = {
'Datasets': ['SOTA(FT)', 'SOTA(ZS)', 'FLAN-T5', 'GPT-3', 'GPT-3.5v2', 'GPT-3.5v3', 'ChatGPT', 'GPT-4'],
'KQApro': ['93.85', '94.20', '37.27', '38.28', '38.01', '40.35', '47.93', '57.20'],
'LC-quad2': ['33.10', '-', '30.14', '33.04', '33.77', '39.04', '42.76', '54.95'],
'WQSP': ['73.10', '62.98', '59.87', '67.68', '72.34', '79.60', '83.70', '90.45'],
'CWQ': ['72.20', '-', '46.69', '51.77', '53.96', '57.54', '64.02', '71.00'],
'GrailQA': ['76.31', '-', '29.02', '27.58', '30.50', '35.43', '46.77', '51.40'],
'GraphQ': ['41.30', '-', '32.27', '38.32', '40.85', '47.95', '53.10', '63.20'],
'QALD-9': ['67.82', '-', '30.17', '38.54', '44.96', '46.19', '45.71', '57.20'],
'MKQA': ['46.00', '-', '20.17', '26.97', '30.14', '39.05', '44.30', '59.20']
}
df = pd.DataFrame(data)
return df
original_df = get_leaderboard_df()
leaderboard_df = original_df.copy()
def search_table(df, query):
if query == "":
return df
else:
return df[df.apply(lambda row: query.lower() in row.astype(str).str.lower().any(), axis=1)]
demo = gr.Blocks(css=custom_css)
with demo:
gr.HTML(TITLE)
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
with gr.Row():
with gr.Box(elem_id="search-bar-table-box"):
search_bar = gr.Textbox(
placeholder="๐ Search your model and press ENTER...",
show_label=False,
elem_id="search-bar",
)
with gr.Tabs(elem_classes="tab-buttons") as tabs:
with gr.TabItem("๐
LLM Benchmark", elem_id="llm-benchmark-tab-table", id=1):
leaderboard_table = gr.components.Dataframe(
value=leaderboard_df,
max_rows=None,
elem_id="leaderboard-table",
)
# Dummy leaderboard for handling the case when the user uses backspace key
hidden_leaderboard_table_for_search = gr.components.Dataframe(
value=original_df,
max_rows=None,
visible=False,
)
search_bar.submit(
search_table,
[hidden_leaderboard_table_for_search, search_bar],
leaderboard_table,
)
with gr.TabItem("About", elem_id="llm-benchmark-tab-table", id=2):
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
with gr.Row():
with gr.Accordion("๐ Citation", open=False):
citation_button = gr.Textbox(
value=CITATION_BUTTON_TEXT,
label=CITATION_BUTTON_LABEL,
elem_id="citation-button",
).style(show_copy_button=True)
demo.queue(concurrency_count=40).launch()
|