DD-Ranking / app.py
Soptq's picture
Update app.py
9b27ece verified
raw
history blame
4.7 kB
import gradio as gr
import pandas as pd
from constants import *
def get_data(verified, dataset, ipc, label_type, metric_weights=None):
if metric_weights is None:
metric_weights = [1.0 / len(METRICS) for _ in METRICS]
if not isinstance(label_type, list):
label_type = [label_type]
data = pd.read_csv("data.csv")
data["verified"] = data["verified"].apply(lambda x: bool(x))
data["dataset"] = data["dataset"].apply(lambda x: DATASET_LIST[x])
data["ipc"] = data["ipc"].apply(lambda x: IPC_LIST[x])
data["label_type"] = data["label_type"].apply(lambda x: LABEL_TYPE_LIST[x])
if verified:
data = data[data["verified"] == verified]
data = data[data["dataset"] == dataset]
data = data[data["ipc"] == ipc]
data = data[data["label_type"].apply(lambda x: x in label_type)]
# create a new column for the score
data["score"] = data[METRICS[0].lower()] * 0.0
for i, metric in enumerate(METRICS):
data["score"] += data[metric.lower()] * metric_weights[i]
data = data.sort_values(by="score", ascending=False)
data["ranking"] = range(1, len(data) + 1)
# formatting
data["method"] = "[" + data["method"] + "](" + data["method_reference"] + ")"
data["verified"] = data["verified"].apply(lambda x: "✅" if x else "")
data = data.drop(columns=["method_reference", "dataset", "ipc"])
data = data[['ranking', 'method', 'verified', 'date', 'label_type', 'hlr', 'ior', 'score']]
if label_type == "Hard Label":
data = data.rename(columns={"ranking": "Ranking", "method": "Method", "date": "Date", "label_type": "Label Type", "hlr": "HLR↓", "ior": "IOR↑", "score": "Score", "verified": "Verified"})
else:
data = data.rename(columns={"ranking": "Ranking", "method": "Method", "date": "Date", "label_type": "Label Type", "hlr": "HLR↓", "ior": "IOR↑", "score": "Score", "verified": "Verified"})
return data
with gr.Blocks() as leaderboard:
gr.Markdown(LEADERBOARD_INTRODUCTION)
verified = gr.Checkbox(
label="Verified by DD-Ranking Team (Uncheck to view all submissions)",
value=True,
interactive=True
)
dataset = gr.Radio(
label="Dataset",
choices=DATASET_LIST,
value=DATASET_LIST[0],
interactive=True,
)
ipc = gr.Radio(
label="IPC",
choices=IPC_LIST,
value=IPC_LIST[0],
interactive=True,
info=IPC_INFO
)
label = gr.CheckboxGroup(
label="Label Type",
choices=LABEL_TYPE_LIST,
value=LABEL_TYPE_LIST,
info=LABEL_TYPE_INFO,
interactive=True,
)
with gr.Accordion("Adjust Score Weights", open=False):
gr.Markdown(WEIGHT_ADJUSTMENT_INTRODUCTION, latex_delimiters=[
{'left': '$$', 'right': '$$', 'display': True},
{'left': '$', 'right': '$', 'display': False},
{'left': '\\(', 'right': '\\)', 'display': False},
{'left': '\\[', 'right': '\\]', 'display': True}
])
metric_sliders = []
for metric in METRICS:
metric_sliders.append(gr.Slider(label=f"Weight for {metric}", minimum=0.0, maximum=1.0, value=0.5, interactive=True))
adjust_btn = gr.Button("Adjust Weights")
metric_weights = [s.value for s in metric_sliders]
board = gr.components.Dataframe(
value=get_data(verified.value, dataset.value, ipc.value, label.value, metric_weights),
headers=COLUMN_NAMES,
type="pandas",
datatype=DATA_TITLE_TYPE,
interactive=False,
visible=True,
max_height=500,
)
for component in [verified, dataset, ipc, label]:
component.change(lambda v, d, i, l, *m: gr.components.Dataframe(
value=get_data(v, d, i, l, [s for s in m]),
headers=COLUMN_NAMES,
type="pandas",
datatype=DATA_TITLE_TYPE,
interactive=False,
visible=True,
max_height=500,
), inputs=[verified, dataset, ipc, label] + metric_sliders, outputs=board)
adjust_btn.click(fn=lambda v, d, i, l, *m: gr.components.Dataframe(
value=get_data(v, d, i, l, [s for s in m]),
headers=COLUMN_NAMES,
type="pandas",
datatype=DATA_TITLE_TYPE,
interactive=False,
visible=True,
max_height=500,
), inputs=[verified, dataset, ipc, label] + metric_sliders, outputs=board)
citation_button = gr.Textbox(
value=CITATION_BUTTON_TEXT,
label=CITATION_BUTTON_LABEL,
elem_id="citation-button",
lines=6,
show_copy_button=True,
)
leaderboard.launch()