Joschka Strueber commited on
Commit
b776365
·
1 Parent(s): 75b9622

[Add] error messages

Browse files
Files changed (1) hide show
  1. app.py +17 -6
app.py CHANGED
@@ -3,8 +3,10 @@ import gradio as gr
3
  import numpy as np
4
  import matplotlib.pyplot as plt
5
  import seaborn as sns
 
6
  from io import BytesIO
7
  from PIL import Image
 
8
 
9
  print(gr.__version__)
10
 
@@ -71,12 +73,21 @@ def validate_inputs(selected_models, selected_dataset):
71
 
72
 
73
  def update_datasets_based_on_models(selected_models, current_dataset):
74
- available_datasets = get_leaderboard_datasets(selected_models) if selected_models else []
75
- valid_dataset = current_dataset if current_dataset in available_datasets else None
76
- return gr.update(
77
- choices=available_datasets,
78
- value=valid_dataset
79
- )
 
 
 
 
 
 
 
 
 
80
 
81
  with gr.Blocks(title="LLM Similarity Analyzer") as demo:
82
  gr.Markdown("## Model Similarity Comparison Tool \n\nAs Language Model (LM) capabilities advance, evaluating and supervising them at scale is getting harder for humans. There is hope that other language models can automate both these tasks, which we refer to as AI Oversight. We study how model similarity affects both aspects of AI oversight by proposing a probabilistic metric for LM similarity based on overlap in model mistakes. Using this metric, we first show that LLM-as-a-judge scores favor models similar to the judge, generalizing recent self-preference results. Then, we study training on LM annotations, and find complementary knowledge between the weak supervisor and strong student model plays a crucial role in gains from weak-to-strong generalization. As model capabilities increase, it becomes harder to find their mistakes, and we might defer more to AI oversight. However, we observe a concerning trend -- model mistakes are becoming more similar with increasing capabilities, pointing to risks from correlated failures. Our work underscores the importance of reporting and correcting for model similarity, especially in the emerging paradigm of AI oversight. ")
 
3
  import numpy as np
4
  import matplotlib.pyplot as plt
5
  import seaborn as sns
6
+ import re
7
  from io import BytesIO
8
  from PIL import Image
9
+ from datasets import DatasetNotFoundError
10
 
11
  print(gr.__version__)
12
 
 
73
 
74
 
75
  def update_datasets_based_on_models(selected_models, current_dataset):
76
+ try:
77
+ available_datasets = get_leaderboard_datasets(selected_models) if selected_models else []
78
+ valid_dataset = current_dataset if current_dataset in available_datasets else None
79
+ return gr.update(
80
+ choices=available_datasets,
81
+ value=valid_dataset
82
+ )
83
+ except DatasetNotFoundError as e:
84
+ # Extract model name from error message
85
+ match = re.search(r"open-llm-leaderboard/([\w\-]+)", str(e))
86
+ model_name = match.group(1) if match else "Unknown Model"
87
+
88
+ # Display a shorter warning
89
+ gr.Warning(f"Data for '{model_name}' is gated or unavailable.")
90
+ return gr.update(choices=[], value=None)
91
 
92
  with gr.Blocks(title="LLM Similarity Analyzer") as demo:
93
  gr.Markdown("## Model Similarity Comparison Tool \n\nAs Language Model (LM) capabilities advance, evaluating and supervising them at scale is getting harder for humans. There is hope that other language models can automate both these tasks, which we refer to as AI Oversight. We study how model similarity affects both aspects of AI oversight by proposing a probabilistic metric for LM similarity based on overlap in model mistakes. Using this metric, we first show that LLM-as-a-judge scores favor models similar to the judge, generalizing recent self-preference results. Then, we study training on LM annotations, and find complementary knowledge between the weak supervisor and strong student model plays a crucial role in gains from weak-to-strong generalization. As model capabilities increase, it becomes harder to find their mistakes, and we might defer more to AI oversight. However, we observe a concerning trend -- model mistakes are becoming more similar with increasing capabilities, pointing to risks from correlated failures. Our work underscores the importance of reporting and correcting for model similarity, especially in the emerging paradigm of AI oversight. ")