Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -97,29 +97,8 @@ def update_leaderboard(selected_methods, selected_metrics):
|
|
97 |
|
98 |
# ------- Visualisation helpers ---------------------------------------------
|
99 |
|
100 |
-
def get_plot_explanation(benchmark_type, x_metric, y_metric, aspect, dataset, single_metric):
|
101 |
-
if benchmark_type == "similarity":
|
102 |
-
return (
|
103 |
-
f"Scatter plot compares models on **{x_metric}** (x‑axis) and **{y_metric}** (y‑axis). "
|
104 |
-
"Upper‑right points indicate jointly strong performance."
|
105 |
-
)
|
106 |
-
if benchmark_type == "function":
|
107 |
-
return (
|
108 |
-
f"Heat‑map shows model scores for **{aspect.upper()}** terms with **{single_metric}**. "
|
109 |
-
"Darker squares → better predictions."
|
110 |
-
)
|
111 |
-
if benchmark_type == "family":
|
112 |
-
return (
|
113 |
-
f"Box‑plots summarise cross‑fold MCC on **{dataset}**; higher medians are better."
|
114 |
-
)
|
115 |
-
if benchmark_type == "affinity":
|
116 |
-
return (
|
117 |
-
f"Box‑plots display distribution of **{single_metric}** scores for affinity prediction; higher values are better."
|
118 |
-
)
|
119 |
-
return ""
|
120 |
-
|
121 |
|
122 |
-
def
|
123 |
plot_path = benchmark_plot(
|
124 |
benchmark_type,
|
125 |
methods_selected,
|
@@ -129,8 +108,7 @@ def generate_plot_and_explanation(benchmark_type, methods_selected, x_metric, y_
|
|
129 |
dataset,
|
130 |
single_metric,
|
131 |
)
|
132 |
-
|
133 |
-
return explanation, plot_path
|
134 |
|
135 |
# ---------------------------------------------------------------------------
|
136 |
# Custom CSS for frozen first column and clearer table styles
|
@@ -258,7 +236,7 @@ with block:
|
|
258 |
# ------------------------------------------------------------------
|
259 |
# 2️⃣ Visualisation tab
|
260 |
# ------------------------------------------------------------------
|
261 |
-
with gr.TabItem("📊
|
262 |
gr.Markdown(
|
263 |
"""## **Interactive Visualizations**
|
264 |
Choose a benchmark type; context‑specific options will appear. Click **Plot** and an explanation will follow the figure.""",
|
@@ -298,7 +276,7 @@ with block:
|
|
298 |
],
|
299 |
)
|
300 |
plot_button.click(
|
301 |
-
|
302 |
inputs=[
|
303 |
vis_benchmark_type_selector,
|
304 |
vis_method_selector,
|
@@ -308,7 +286,7 @@ with block:
|
|
308 |
vis_dataset_selector,
|
309 |
vis_single_metric_selector,
|
310 |
],
|
311 |
-
outputs=[
|
312 |
)
|
313 |
|
314 |
# ------------------------------------------------------------------
|
|
|
97 |
|
98 |
# ------- Visualisation helpers ---------------------------------------------
|
99 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
100 |
|
101 |
+
def generate_plot(benchmark_type, methods_selected, x_metric, y_metric, aspect, dataset, single_metric):
|
102 |
plot_path = benchmark_plot(
|
103 |
benchmark_type,
|
104 |
methods_selected,
|
|
|
108 |
dataset,
|
109 |
single_metric,
|
110 |
)
|
111 |
+
return plot_path
|
|
|
112 |
|
113 |
# ---------------------------------------------------------------------------
|
114 |
# Custom CSS for frozen first column and clearer table styles
|
|
|
236 |
# ------------------------------------------------------------------
|
237 |
# 2️⃣ Visualisation tab
|
238 |
# ------------------------------------------------------------------
|
239 |
+
with gr.TabItem("📊 Visualization", elem_id="probe-benchmark-tab-visualization", id=2):
|
240 |
gr.Markdown(
|
241 |
"""## **Interactive Visualizations**
|
242 |
Choose a benchmark type; context‑specific options will appear. Click **Plot** and an explanation will follow the figure.""",
|
|
|
276 |
],
|
277 |
)
|
278 |
plot_button.click(
|
279 |
+
generate_plot,
|
280 |
inputs=[
|
281 |
vis_benchmark_type_selector,
|
282 |
vis_method_selector,
|
|
|
286 |
vis_dataset_selector,
|
287 |
vis_single_metric_selector,
|
288 |
],
|
289 |
+
outputs=[plot_output],
|
290 |
)
|
291 |
|
292 |
# ------------------------------------------------------------------
|