Spaces:
Sleeping
Sleeping
File size: 1,699 Bytes
713ff7f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
import pandas as pd
import gradio as gr
data = {
"Method": [
"OR-Tools", "ACO", "LKH3", "NN2Opt", "Tabu",
],
"TWCVRP": [
57.90, 48.82, 51.35, 46.67, 45.58,
],
"TWCVRP": [
59.11, 42.89, 49.06, 36.59, 33.59,
]
}
df = pd.DataFrame(data)
df['Average Score'] = df.iloc[:, 1:].mean(axis=1).round(2)
df = df[['Method', 'Average Score'] + [col for col in df.columns if col not in ['Method', 'Average Score']]]
def display_data():
return df
with gr.Blocks() as demo:
# gr.Markdown("", elem_id="camel-icon") # Replace with actual camel icon URL
gr.Markdown("# **SVRP-Bench: Real-World Vehicle Routing Benchmark**")
gr.Markdown("""
This table shows the performance of different models across various tasks/datasets including CVRP and TWVRP.
""")
with gr.Tabs(elem_classes="tab-buttons") as tabs:
with gr.TabItem("🏅 LLM Leaderboard", elem_id="llm-benchmark-tab-table", id=0):
# with gr.Row():
# with gr.Column():
gr.Dataframe(value=df, label="SVRP-Bench Model Performance", interactive=False)
with gr.TabItem("📤 How to Submit", elem_id="submission-tab", id=1):
gr.Markdown("""
## Submission Instructions
To contribute your model's results to the SVRP-Bench leaderboard:
- **Via Email**:
- Send your results to **[email protected]**, and we’ll add them to the leaderboard for you.
**We look forward to seeing your contributions!**
""")
demo.launch()
|