Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -34,6 +34,29 @@ client = Client(f"http://{os.environ['SERVER']}/predict")
|
|
34 |
def get_layerwise_nonlinearity(task_name: str, model_name: str, text: str, normalization_type: str) -> Tuple[Any, str]:
|
35 |
return client.send_request(task_name, model_name, text, normalization_type)
|
36 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
|
38 |
with gr.Blocks() as demo:
|
39 |
gr.Markdown("# π¬ LLM-Microscope β Understanding Token Representations in Transformers")
|
@@ -65,22 +88,24 @@ with gr.Blocks() as demo:
|
|
65 |
label="Select Normalization"
|
66 |
)
|
67 |
|
|
|
|
|
68 |
with gr.Column():
|
69 |
text_message = gr.Textbox(label="Enter your input text:", value="I love to live my life")
|
70 |
submit = gr.Button("Submit")
|
71 |
box_for_plot = gr.Image(label="Visualization", type="pil")
|
72 |
|
73 |
-
|
74 |
-
|
75 |
-
### π Legend and Interpretation
|
76 |
-
|
77 |
This heatmap shows **how each token is processed** across layers of a language model. Here's how to read it:
|
78 |
|
79 |
- **Rows**: layers of the model (bottom = deeper)
|
80 |
- **Columns**: input tokens
|
81 |
- **Colors**: intensity of effect (depends on the selected metric)
|
82 |
|
83 |
-
|
|
|
|
|
84 |
|
85 |
- `Layer wise non-linearity`: how nonlinear the transformation is at each layer (red = more nonlinear).
|
86 |
- `Next-token prediction from intermediate representations`: shows which layers begin to make good predictions.
|
@@ -89,29 +114,16 @@ This heatmap shows **how each token is processed** across layers of a language m
|
|
89 |
- `Tokenwise loss without i-th layer`: shows how much each token depends on a specific layer. Red means performance drops if we skip this layer.
|
90 |
|
91 |
Use this tool to **peek inside the black box** β it reveals which layers matter most, which tokens carry the most memory, and how LLMs evolve their predictions.
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
def check_normalization(task_name: str, normalization_name) -> Tuple[str]:
|
104 |
-
if task_name == "Contextualization measurement" and normalization_name == "token-wise":
|
105 |
-
return "global"
|
106 |
-
return normalization_name
|
107 |
-
|
108 |
-
task_selector.select(set_default, [task_selector], [normalization_selector])
|
109 |
-
normalization_selector.select(check_normalization, [task_selector, normalization_selector], [normalization_selector])
|
110 |
-
submit.click(
|
111 |
-
fn=update_output,
|
112 |
-
inputs=[task_selector, model_selector, text_message, normalization_selector],
|
113 |
-
outputs=[box_for_plot]
|
114 |
-
)
|
115 |
|
116 |
if __name__ == "__main__":
|
117 |
demo.launch(share=True, server_port=7860, server_name="0.0.0.0")
|
|
|
34 |
def get_layerwise_nonlinearity(task_name: str, model_name: str, text: str, normalization_type: str) -> Tuple[Any, str]:
|
35 |
return client.send_request(task_name, model_name, text, normalization_type)
|
36 |
|
37 |
+
def update_output(task_name: str, model_name: str, text: str, normalization_type: str) -> Tuple[Any]:
|
38 |
+
img, _ = get_layerwise_nonlinearity(task_name, model_name, text, normalization_type)
|
39 |
+
return img
|
40 |
+
|
41 |
+
def set_default(task_name: str) -> str:
|
42 |
+
if task_name in ["Layer wise non-linearity", "Next-token prediction from intermediate representations", "Tokenwise loss without i-th layer"]:
|
43 |
+
return "token-wise"
|
44 |
+
return "global"
|
45 |
+
|
46 |
+
def check_normalization(task_name: str, normalization_name) -> Tuple[str]:
|
47 |
+
if task_name == "Contextualization measurement" and normalization_name == "token-wise":
|
48 |
+
return "global"
|
49 |
+
return normalization_name
|
50 |
+
|
51 |
+
def update_description(task_name: str) -> str:
|
52 |
+
descriptions = {
|
53 |
+
"Layer wise non-linearity": "Non-linearity per layer: shows how complex each layer's transformation is. Red = more nonlinear.",
|
54 |
+
"Next-token prediction from intermediate representations": "Layerwise token prediction: when does the model start guessing correctly?",
|
55 |
+
"Contextualization measurement": "Context stored in each token: how well can the model reconstruct the previous context?",
|
56 |
+
"Layerwise predictions (logit lens)": "Logit lens: what does each layer believe the next token should be?",
|
57 |
+
"Tokenwise loss without i-th layer": "Layer ablation: how much does performance drop if a layer is removed?"
|
58 |
+
}
|
59 |
+
return descriptions.get(task_name, "βΉοΈ No description available.")
|
60 |
|
61 |
with gr.Blocks() as demo:
|
62 |
gr.Markdown("# π¬ LLM-Microscope β Understanding Token Representations in Transformers")
|
|
|
88 |
label="Select Normalization"
|
89 |
)
|
90 |
|
91 |
+
task_description = gr.Markdown("βΉοΈ Choose a mode to see what it does.")
|
92 |
+
|
93 |
with gr.Column():
|
94 |
text_message = gr.Textbox(label="Enter your input text:", value="I love to live my life")
|
95 |
submit = gr.Button("Submit")
|
96 |
box_for_plot = gr.Image(label="Visualization", type="pil")
|
97 |
|
98 |
+
with gr.Accordion("π Full Legend and Interpretation", open=False):
|
99 |
+
gr.Markdown("""
|
|
|
|
|
100 |
This heatmap shows **how each token is processed** across layers of a language model. Here's how to read it:
|
101 |
|
102 |
- **Rows**: layers of the model (bottom = deeper)
|
103 |
- **Columns**: input tokens
|
104 |
- **Colors**: intensity of effect (depends on the selected metric)
|
105 |
|
106 |
+
---
|
107 |
+
|
108 |
+
### Metrics explained:
|
109 |
|
110 |
- `Layer wise non-linearity`: how nonlinear the transformation is at each layer (red = more nonlinear).
|
111 |
- `Next-token prediction from intermediate representations`: shows which layers begin to make good predictions.
|
|
|
114 |
- `Tokenwise loss without i-th layer`: shows how much each token depends on a specific layer. Red means performance drops if we skip this layer.
|
115 |
|
116 |
Use this tool to **peek inside the black box** β it reveals which layers matter most, which tokens carry the most memory, and how LLMs evolve their predictions.
|
117 |
+
""")
|
118 |
+
|
119 |
+
task_selector.change(fn=update_description, inputs=[task_selector], outputs=[task_description])
|
120 |
+
task_selector.select(set_default, [task_selector], [normalization_selector])
|
121 |
+
normalization_selector.select(check_normalization, [task_selector, normalization_selector], [normalization_selector])
|
122 |
+
submit.click(
|
123 |
+
fn=update_output,
|
124 |
+
inputs=[task_selector, model_selector, text_message, normalization_selector],
|
125 |
+
outputs=[box_for_plot]
|
126 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
127 |
|
128 |
if __name__ == "__main__":
|
129 |
demo.launch(share=True, server_port=7860, server_name="0.0.0.0")
|