File size: 5,980 Bytes
f7f7ef8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
import gradio as gr
import pandas as pd

# Data for Table 1: Robustness Results
robustness_data = {
    "Model Name": [
        "Gemini 2.0 Flash Exp", "Gemini 1.5 Pro 002", "OpenAI GPT-4o", "OpenAI o1", "OpenAI o3-mini",
        "DeepSeek-R1-Distill-Llama-8B", "DeepSeek-R1-Distill-Qwen-14B", "DeepSeek-R1-Distill-Qwen-32B",
        "DeepSeek-R1-Distill-Llama-70B", "DeepSeek-R1", "Meta-Llama-3.1-8B-Instruct",
        "Meta-Llama-3.1-70B-Instruct", "Meta-Llama-3.3-70B-Instruct", "Qwen2.5-7B-Instruct",
        "Qwen2.5-14B-Instruct", "Qwen2.5-32B-Instruct", "Qwen2.5-72B-Instruct", "Qwen2.5-7B-Instruct-1M",
        "Qwen2.5-14B-Instruct-1M", "Nemotron-70B-Instruct-HF", "Phi-3-mini-128k-Instruct",
        "Phi-3-small-128k-Instruct", "Phi-3-medium-128k-Instruct", "Palmyra-Fin-128k-Instruct"
    ],
    "Baseline": [0.95, 0.96, 0.95, 0.97, 0.98, 0.83, 0.95, 0.95, 0.96, 0.94, 0.91, 0.94, 0.95, 0.92, 0.95, 0.95, 0.94, 0.91, 0.95, 0.94, 0.86, 0.88, 0.89, 0.96],
    "Misspelled (Ξ”)": ["0.95 (0.0)", "0.95 (0.0)", "0.94 (↓0.01)", "0.95 (↓0.02)", "0.96 (↓0.02)", "0.85 (↑0.02)", "0.90 (↓0.05)", "0.97 (↑0.02)", "0.97 (↑0.01)", "0.94 (0.0)", "0.90 (↓0.01)", "0.92 (↓0.02)", "0.92 (↓0.03)", "0.91 (↓0.01)", "0.94 (↓0.01)", "0.94 (0.0)", "0.94 (0.0)", "0.91 (0.0)", "0.92 (↓0.03)", "0.94 (0.0)", "0.85 (↓0.01)", "0.84 (↓0.04)", "0.84 (↓0.05)", "0.93 (↓0.03)"],
    "Incomplete (Ξ”)": ["0.95 (0.0)", "0.94 (↓0.02)", "0.94 (↓0.01)", "0.94 (↓0.03)", "0.96 (↓0.02)", "0.82 (↓0.01)", "0.92 (↓0.03)", "0.95 (0.0)", "0.95 (↓0.01)", "0.93 (↓0.01)", "0.86 (↓0.05)", "0.94 (0.0)", "0.93 (↓0.02)", "0.90 (↓0.02)", "0.94 (↓0.01)", "0.93 (↓0.02)", "0.93 (↓0.01)", "0.91 (0.0)", "0.91 (↓0.04)", "0.93 (↓0.01)", "0.78 (↓0.08)", "0.78 (↓0.10)", "0.84 (↓0.05)", "0.92 (↓0.04)"],
    "Out-of-Domain (Ξ”)": ["0.88 (↓0.07)", "0.92 (↓0.04)", "0.92 (↓0.03)", "0.89 (↓0.08)", "0.95 (↓0.03)", "0.87 (↑0.04)", "0.93 (↓0.02)", "0.92 (↓0.03)", "0.94 (↓0.02)", "0.91 (↓0.03)", "0.82 (↓0.09)", "0.87 (↓0.07)", "0.90 (↓0.05)", "0.85 (↓0.07)", "0.94 (↓0.01)", "0.92 (↓0.03)", "0.92 (↓0.02)", "0.86 (↓0.05)", "0.91 (↓0.04)", "0.90 (↓0.04)", "0.79 (↓0.07)", "0.83 (↓0.05)", "0.81 (↓0.08)", "0.90 (↓0.06)"],
    "OCR Context (Ξ”)": ["0.91 (↓0.04)", "0.92 (↓0.04)", "0.95 (0.0)", "0.94 (↓0.03)", "0.90 (↓0.08)", "0.72 (↓0.11)", "0.86 (↓0.09)", "0.89 (↓0.06)", "0.93 (↓0.03)", "0.88 (↓0.06)", "0.80 (↓0.11)", "0.88 (↓0.06)", "0.89 (↓0.06)", "0.80 (↓0.12)", "0.88 (↓0.07)", "0.92 (↓0.03)", "0.91 (↓0.03)", "0.77 (↓0.14)", "0.89 (↓0.06)", "0.91 (↓0.03)", "0.69 (↓0.17)", "0.78 (↓0.10)", "0.72 (↓0.17)", "0.89 (↓0.07)"],
    "Robustness (Ξ”)": ["0.83 (↓0.12)", "0.84 (↓0.12)", "0.85 (↓0.10)", "0.81 (↓0.16)", "0.90 (↓0.08)", "0.64 (↓0.19)", "0.82 (↓0.13)", "0.86 (↓0.09)", "0.89 (↓0.07)", "0.80 (↓0.14)", "0.70 (↓0.21)", "0.80 (↓0.14)", "0.82 (↓0.13)", "0.75 (↓0.17)", "0.86 (↓0.09)", "0.85 (↓0.10)", "0.84 (↓0.10)", "0.74 (↓0.17)", "0.80 (↓0.15)", "0.82 (↓0.12)", "0.58 (↓0.28)", "0.70 (↓0.18)", "0.63 (↓0.26)", "0.83 (↓0.13)"]
}

# Data for Table 2: Context Grounding Results
context_grounding_data = {
    "Model Name": [
        "Gemini 2.0 Flash Exp", "Gemini 1.5 Pro 002", "OpenAI GPT-4o", "OpenAI o1", "OpenAI o3-mini",
        "DeepSeek-R1-Distill-Llama-8B", "DeepSeek-R1-Distill-Qwen-14B", "DeepSeek-R1-Distill-Qwen-32B",
        "DeepSeek-R1-Distill-Llama-70B", "DeepSeek-R1", "Meta-Llama-3.1-8B-Instruct",
        "Meta-Llama-3.1-70B-Instruct", "Meta-Llama-3.3-70B-Instruct", "Qwen2.5-7B-Instruct",
        "Qwen2.5-14B-Instruct", "Qwen2.5-32B-Instruct", "Qwen2.5-72B-Instruct", "Qwen2.5-7B-Instruct-1M",
        "Qwen2.5-14B-Instruct-1M", "Nemotron-70B-Instruct-HF", "Phi-3-mini-128k-Instruct",
        "Phi-3-small-128k-Instruct", "Phi-3-medium-128k-Instruct", "Palmyra-Fin-128k-Instruct"
    ],
    "Irrelevant Ctx": [0.81, 0.74, 0.52, 0.56, 0.67, 0.32, 0.49, 0.54, 0.50, 0.51, 0.67, 0.46, 0.50, 0.75, 0.75, 0.89, 0.69, 0.63, 0.78, 0.52, 0.54, 0.37, 0.36, 0.95],
    "No Ctx": [0.66, 0.64, 0.43, 0.55, 0.51, 0.27, 0.21, 0.24, 0.27, 0.22, 0.63, 0.37, 0.40, 0.64, 0.61, 0.68, 0.60, 0.58, 0.53, 0.48, 0.34, 0.26, 0.25, 0.66],
    "Ctx Grounding QA": [0.77, 0.72, 0.50, 0.57, 0.63, 0.30, 0.36, 0.40, 0.41, 0.39, 0.70, 0.48, 0.47, 0.75, 0.70, 0.82, 0.68, 0.65, 0.69, 0.52, 0.47, 0.34, 0.33, 0.83],
    "Ctx Grounding TG": [0.46, 0.52, 0.25, 0.45, 0.27, 0.25, 0.27, 0.35, 0.22, 0.20, 0.27, 0.37, 0.31, 0.31, 0.55, 0.55, 0.39, 0.29, 0.37, 0.39, 0.24, 0.10, 0.14, 0.65],
    "Ctx Grounding": [0.74, 0.69, 0.47, 0.55, 0.59, 0.30, 0.35, 0.39, 0.38, 0.37, 0.65, 0.47, 0.45, 0.70, 0.68, 0.79, 0.64, 0.60, 0.65, 0.50, 0.44, 0.31, 0.30, 0.80],
    "Robustness": [0.83, 0.84, 0.85, 0.81, 0.90, 0.64, 0.82, 0.86, 0.89, 0.80, 0.70, 0.80, 0.82, 0.75, 0.86, 0.85, 0.84, 0.74, 0.80, 0.82, 0.58, 0.70, 0.63, 0.83],
    "Compliance": [0.76, 0.72, 0.52, 0.59, 0.63, 0.34, 0.40, 0.44, 0.43, 0.41, 0.66, 0.51, 0.49, 0.71, 0.71, 0.80, 0.67, 0.62, 0.68, 0.54, 0.46, 0.35, 0.34, 0.81]
}

# Function to create the Gradio interface
def create_leaderboard():
    # Convert data to DataFrames for better display
    robustness_df = pd.DataFrame(robustness_data)
    context_grounding_df = pd.DataFrame(context_grounding_data)

    # Create Gradio interface with two tabs for each table
    with gr.Blocks(title="Model Performance Leaderboard") as demo:
        gr.Markdown("# Model Performance Leaderboard")
        
        with gr.Tab("Robustness Results"):
            gr.DataFrame(value=robustness_df, label="Robustness Results", wrap=True)
        
        with gr.Tab("Context Grounding Results"):
            gr.DataFrame(value=context_grounding_df, label="Context Grounding Results", wrap=True)

    return demo

# Launch the Gradio app
if __name__ == "__main__":
    demo = create_leaderboard()
    demo.launch()