mgyigit commited on
Commit
edb9d91
·
verified ·
1 Parent(s): b696eae

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +220 -100
app.py CHANGED
@@ -1,5 +1,3 @@
1
- __all__ = ['block', 'make_clickable_model', 'make_clickable_user', 'get_submissions']
2
-
3
  import gradio as gr
4
  import pandas as pd
5
  import re
@@ -22,6 +20,9 @@ from src.saving_utils import *
22
  from src.vis_utils import *
23
  from src.bin.PROBE import run_probe
24
 
 
 
 
25
 
26
  def add_new_eval(
27
  human_file,
@@ -35,6 +36,7 @@ def add_new_eval(
35
  family_prediction_dataset,
36
  save,
37
  ):
 
38
  if any(task in benchmark_types for task in ['similarity', 'family', 'function']) and human_file is None:
39
  gr.Warning("Human representations are required for similarity, family, or function benchmarks!")
40
  return -1
@@ -43,27 +45,36 @@ def add_new_eval(
43
  gr.Warning("SKEMPI representations are required for affinity benchmark!")
44
  return -1
45
 
46
- processing_info = gr.Info("Your submission is being processed...")
47
 
48
  representation_name = model_name_textbox if revision_name_textbox == '' else revision_name_textbox
49
 
50
  try:
51
- results = run_probe(benchmark_types, representation_name, human_file, skempi_file, similarity_tasks, function_prediction_aspect, function_prediction_dataset, family_prediction_dataset)
52
- except:
53
- completion_info = gr.Warning("Your submission has not been processed. Please check your representation files!")
 
 
 
 
 
 
 
 
 
54
  return -1
55
 
56
-
57
  if save:
58
  save_results(representation_name, benchmark_types, results)
59
- completion_info = gr.Info("Your submission has been processed and results are saved!")
60
-
61
  else:
62
- completion_info = gr.Info("Your submission has been processed!")
63
 
64
  return 0
65
 
 
66
  def refresh_data():
 
67
  api.restart_space(repo_id=repo_id)
68
  benchmark_types = ["similarity", "function", "family", "affinity", "leaderboard"]
69
 
@@ -75,63 +86,130 @@ def refresh_data():
75
  benchmark_types.remove("leaderboard")
76
  download_from_hub(benchmark_types)
77
 
78
- # Define a function to update metrics based on benchmark type selection
 
 
79
  def update_metrics(selected_benchmarks):
 
80
  updated_metrics = set()
81
  for benchmark in selected_benchmarks:
82
  updated_metrics.update(benchmark_metric_mapping.get(benchmark, []))
83
  return list(updated_metrics)
84
-
85
- # Define a function to update the leaderboard
86
  def update_leaderboard(selected_methods, selected_metrics):
87
  updated_df = get_baseline_df(selected_methods, selected_metrics)
88
  return updated_df
89
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
  block = gr.Blocks()
91
 
92
  with block:
93
  gr.Markdown(LEADERBOARD_INTRODUCTION)
94
-
95
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
 
 
 
96
  with gr.TabItem("🏅 PROBE Leaderboard", elem_id="probe-benchmark-tab-table", id=1):
 
97
 
98
- leaderboard = get_baseline_df(None, None) #get baseline leaderboard without filtering
99
-
100
  method_names = leaderboard['Method'].unique().tolist()
101
  metric_names = leaderboard.columns.tolist()
102
- metrics_with_method = metric_names.copy()
103
- metric_names.remove('Method') # Remove method_name from the metric options
104
 
105
  benchmark_metric_mapping = {
106
- "similarity": [metric for metric in metric_names if metric.startswith('sim_')],
107
- "function": [metric for metric in metric_names if metric.startswith('func')],
108
- "family": [metric for metric in metric_names if metric.startswith('fam_')],
109
- "affinity": [metric for metric in metric_names if metric.startswith('aff_')],
110
  }
111
-
112
- # Leaderboard section with method and metric selectors
113
  leaderboard_method_selector = gr.CheckboxGroup(
114
- choices=method_names, label="Select Methods for the Leaderboard", value=method_names, interactive=True
 
 
 
115
  )
116
 
117
- benchmark_type_selector = gr.CheckboxGroup(
118
- choices=list(benchmark_metric_mapping.keys()),
119
- label="Select Benchmark Types",
120
- value=None, # Initially select all benchmark types
121
- interactive=True
122
  )
 
123
  leaderboard_metric_selector = gr.CheckboxGroup(
124
- choices=metric_names, label="Select Metrics for the Leaderboard", value=None, interactive=True
 
 
 
125
  )
126
 
127
- # Display the filtered leaderboard
128
  baseline_value = get_baseline_df(method_names, metric_names)
129
- baseline_value = baseline_value.applymap(lambda x: round(x, 4) if isinstance(x, (int, float)) else x) # Round all numeric values to 4 decimal places
130
  baseline_header = ["Method"] + metric_names
131
  baseline_datatype = ['markdown'] + ['number'] * len(metric_names)
132
 
133
  with gr.Row(show_progress=True, variant='panel'):
134
- data_component = gr.components.Dataframe(
135
  value=baseline_value,
136
  headers=baseline_header,
137
  type="pandas",
@@ -140,78 +218,121 @@ with block:
140
  visible=True,
141
  )
142
 
143
- # Update leaderboard when method/metric selection changes
144
  leaderboard_method_selector.change(
145
- get_baseline_df,
146
- inputs=[leaderboard_method_selector, leaderboard_metric_selector],
147
- outputs=data_component
148
  )
149
-
150
- # Update metrics when benchmark type changes
151
- benchmark_type_selector.change(
152
- lambda selected_benchmarks: update_metrics(selected_benchmarks),
153
- inputs=[benchmark_type_selector],
154
- outputs=leaderboard_metric_selector
155
  )
156
 
157
  leaderboard_metric_selector.change(
158
- get_baseline_df,
159
- inputs=[leaderboard_method_selector, leaderboard_metric_selector],
160
- outputs=data_component
161
  )
162
 
163
- with gr.Row():
164
- gr.Markdown(
165
- """
166
- ## **Below, you can visualize the results displayed in the Leaderboard.**
167
- ### Once you choose a benchmark type, the related options for metrics, datasets, and other parameters will become visible. Select the methods and metrics of interest from the options to generate visualizations.
168
- """
169
- )
170
-
171
- # Dropdown for benchmark type
172
- benchmark_type_selector = gr.Dropdown(choices=list(benchmark_specific_metrics.keys()), label="Select Benchmark Type", value=None)
 
 
 
 
 
 
 
 
173
 
174
- with gr.Row():
175
- # Dynamic selectors
176
- x_metric_selector = gr.Dropdown(choices=[], label="Select X-axis Metric", visible=False)
177
- y_metric_selector = gr.Dropdown(choices=[], label="Select Y-axis Metric", visible=False)
178
- aspect_type_selector = gr.Dropdown(choices=[], label="Select Aspect Type", visible=False)
179
- dataset_selector = gr.Dropdown(choices=[], label="Select Dataset", visible=False)
180
- single_metric_selector = gr.Dropdown(choices=[], label="Select Metric", visible=False)
 
181
 
182
- method_selector = gr.CheckboxGroup(choices=method_names, label="Select methods to visualize", interactive=True, value=method_names)
183
-
184
- # Button to draw the plot for the selected benchmark
 
 
 
 
 
 
 
 
 
 
185
 
186
  plot_button = gr.Button("Plot")
187
 
188
  with gr.Row(show_progress=True, variant='panel'):
189
  plot_output = gr.Image(label="Plot")
190
-
191
- # Update selectors when benchmark type changes
192
- benchmark_type_selector.change(
 
 
 
 
 
193
  update_metric_choices,
194
- inputs=[benchmark_type_selector],
195
- outputs=[x_metric_selector, y_metric_selector, aspect_type_selector, dataset_selector, single_metric_selector]
 
 
 
 
 
 
196
  )
197
-
198
  plot_button.click(
199
- benchmark_plot,
200
- inputs=[benchmark_type_selector, method_selector, x_metric_selector, y_metric_selector, aspect_type_selector, dataset_selector, single_metric_selector],
201
- outputs=plot_output
 
 
 
 
 
 
 
 
202
  )
203
-
204
- with gr.TabItem("📝 About", elem_id="probe-benchmark-tab-table", id=2):
 
 
 
205
  with gr.Row():
206
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
207
  with gr.Row():
208
  gr.Image(
209
- value="./src/data/PROBE_workflow_figure.jpg", # Replace with your image file path or URL
210
- label="PROBE Workflow Figure", # Optional label
211
- elem_classes="about-image", # Optional CSS class for styling
212
  )
213
-
214
- with gr.TabItem("🚀 Submit here! ", elem_id="probe-benchmark-tab-table", id=3):
 
 
 
215
  with gr.Row():
216
  gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
217
 
@@ -220,12 +341,8 @@ with block:
220
 
221
  with gr.Row():
222
  with gr.Column():
223
- model_name_textbox = gr.Textbox(
224
- label="Method name",
225
- )
226
- revision_name_textbox = gr.Textbox(
227
- label="Revision Method Name",
228
- )
229
 
230
  benchmark_types = gr.CheckboxGroup(
231
  choices=TASK_INFO,
@@ -237,35 +354,34 @@ with block:
237
  label="Similarity Tasks",
238
  interactive=True,
239
  )
240
-
241
  function_prediction_aspect = gr.Radio(
242
  choices=function_prediction_aspect_options,
243
  label="Function Prediction Aspects",
244
  interactive=True,
245
  )
246
-
247
  family_prediction_dataset = gr.CheckboxGroup(
248
  choices=family_prediction_dataset_options,
249
  label="Family Prediction Datasets",
250
  interactive=True,
251
  )
252
-
253
  function_dataset = gr.Textbox(
254
  label="Function Prediction Datasets",
255
  visible=False,
256
- value="All_Data_Sets"
257
  )
258
 
259
  save_checkbox = gr.Checkbox(
260
  label="Save results for leaderboard and visualization",
261
- value=True
262
  )
263
 
264
- #with gr.Column():
265
  with gr.Row():
266
- human_file = gr.components.File(label="The representation file (csv) for Human dataset", file_count="single", type='filepath')
267
- skempi_file = gr.components.File(label="The representation file (csv) for SKEMPI dataset", file_count="single", type='filepath')
268
-
269
  submit_button = gr.Button("Submit Eval")
270
  submission_result = gr.Markdown()
271
  submit_button.click(
@@ -284,6 +400,9 @@ with block:
284
  ],
285
  )
286
 
 
 
 
287
  with gr.Row():
288
  data_run = gr.Button("Refresh")
289
  data_run.click(refresh_data, outputs=[data_component])
@@ -296,4 +415,5 @@ with block:
296
  show_copy_button=True,
297
  )
298
 
299
- block.launch()
 
 
 
 
1
  import gradio as gr
2
  import pandas as pd
3
  import re
 
20
  from src.vis_utils import *
21
  from src.bin.PROBE import run_probe
22
 
23
+ # ------------------------------------------------------------------
24
+ # Helper functions moved / added here so that UI callbacks can see them
25
+ # ------------------------------------------------------------------
26
 
27
  def add_new_eval(
28
  human_file,
 
36
  family_prediction_dataset,
37
  save,
38
  ):
39
+ """Validate inputs, run evaluation and (optionally) save results."""
40
  if any(task in benchmark_types for task in ['similarity', 'family', 'function']) and human_file is None:
41
  gr.Warning("Human representations are required for similarity, family, or function benchmarks!")
42
  return -1
 
45
  gr.Warning("SKEMPI representations are required for affinity benchmark!")
46
  return -1
47
 
48
+ gr.Info("Your submission is being processed")
49
 
50
  representation_name = model_name_textbox if revision_name_textbox == '' else revision_name_textbox
51
 
52
  try:
53
+ results = run_probe(
54
+ benchmark_types,
55
+ representation_name,
56
+ human_file,
57
+ skempi_file,
58
+ similarity_tasks,
59
+ function_prediction_aspect,
60
+ function_prediction_dataset,
61
+ family_prediction_dataset,
62
+ )
63
+ except Exception:
64
+ gr.Warning("Your submission has not been processed. Please check your representation files!")
65
  return -1
66
 
 
67
  if save:
68
  save_results(representation_name, benchmark_types, results)
69
+ gr.Info("Your submission has been processed and results are saved!")
 
70
  else:
71
+ gr.Info("Your submission has been processed!")
72
 
73
  return 0
74
 
75
+
76
  def refresh_data():
77
+ """Re‑start the space and pull fresh leaderboard CSVs from the HF Hub."""
78
  api.restart_space(repo_id=repo_id)
79
  benchmark_types = ["similarity", "function", "family", "affinity", "leaderboard"]
80
 
 
86
  benchmark_types.remove("leaderboard")
87
  download_from_hub(benchmark_types)
88
 
89
+
90
+ # ------- Leaderboard helpers -------------------------------------------------
91
+
92
  def update_metrics(selected_benchmarks):
93
+ """Populate metric selector according to chosen benchmark types."""
94
  updated_metrics = set()
95
  for benchmark in selected_benchmarks:
96
  updated_metrics.update(benchmark_metric_mapping.get(benchmark, []))
97
  return list(updated_metrics)
98
+
99
+
100
  def update_leaderboard(selected_methods, selected_metrics):
101
  updated_df = get_baseline_df(selected_methods, selected_metrics)
102
  return updated_df
103
+
104
+ # ------- Visualisation helpers ----------------------------------------------
105
+
106
+ def get_plot_explanation(benchmark_type, x_metric, y_metric, aspect, dataset, single_metric):
107
+ """Return a short natural‑language explanation for the produced plot."""
108
+ if benchmark_type == "similarity":
109
+ return (
110
+ f"The scatter plot compares models on **{x_metric}** (x‑axis) and "
111
+ f"**{y_metric}** (y‑axis). Points further to the upper‑right indicate better "
112
+ "performance on both metrics."
113
+ )
114
+ elif benchmark_type == "function":
115
+ return (
116
+ f"The heat‑map shows performance of each model (columns) across GO terms "
117
+ f"for the **{aspect.upper()}** aspect using the **{single_metric}** metric. "
118
+ "Darker squares correspond to stronger performance; hierarchical clustering "
119
+ "groups similar models and tasks together."
120
+ )
121
+ elif benchmark_type == "family":
122
+ return (
123
+ f"The horizontal box‑plots summarise cross‑validation performance on the "
124
+ f"**{dataset}** dataset. Higher median MCC values indicate better family‑"
125
+ "classification accuracy."
126
+ )
127
+ elif benchmark_type == "affinity":
128
+ return (
129
+ f"Each box‑plot shows the distribution of **{single_metric}** scores for every "
130
+ "model when predicting binding affinity changes. Higher values are better."
131
+ )
132
+ return ""
133
+
134
+
135
+ def generate_plot_and_explanation(
136
+ benchmark_type,
137
+ methods_selected,
138
+ x_metric,
139
+ y_metric,
140
+ aspect,
141
+ dataset,
142
+ single_metric,
143
+ ):
144
+ """Callback wrapper that returns both the image path and a textual explanation."""
145
+ plot_path = benchmark_plot(
146
+ benchmark_type,
147
+ methods_selected,
148
+ x_metric,
149
+ y_metric,
150
+ aspect,
151
+ dataset,
152
+ single_metric,
153
+ )
154
+ explanation = get_plot_explanation(benchmark_type, x_metric, y_metric, aspect, dataset, single_metric)
155
+ return plot_path, explanation
156
+
157
+ # ------------------------------------------------------------------
158
+ # UI definition
159
+ # ------------------------------------------------------------------
160
  block = gr.Blocks()
161
 
162
  with block:
163
  gr.Markdown(LEADERBOARD_INTRODUCTION)
164
+
165
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
166
+ # ------------------------------------------------------------------
167
+ # 1️⃣ Leaderboard tab
168
+ # ------------------------------------------------------------------
169
  with gr.TabItem("🏅 PROBE Leaderboard", elem_id="probe-benchmark-tab-table", id=1):
170
+ leaderboard = get_baseline_df(None, None) # baseline leaderboard without filtering
171
 
 
 
172
  method_names = leaderboard['Method'].unique().tolist()
173
  metric_names = leaderboard.columns.tolist()
174
+ metric_names.remove('Method') # remove non‑metric column
 
175
 
176
  benchmark_metric_mapping = {
177
+ "similarity": [m for m in metric_names if m.startswith('sim_')],
178
+ "function": [m for m in metric_names if m.startswith('func')],
179
+ "family": [m for m in metric_names if m.startswith('fam_')],
180
+ "affinity": [m for m in metric_names if m.startswith('aff_')],
181
  }
182
+
183
+ # selectors -----------------------------------------------------
184
  leaderboard_method_selector = gr.CheckboxGroup(
185
+ choices=method_names,
186
+ label="Select Methods for the Leaderboard",
187
+ value=method_names,
188
+ interactive=True,
189
  )
190
 
191
+ benchmark_type_selector_lb = gr.CheckboxGroup(
192
+ choices=list(benchmark_metric_mapping.keys()),
193
+ label="Select Benchmark Types",
194
+ value=None,
195
+ interactive=True,
196
  )
197
+
198
  leaderboard_metric_selector = gr.CheckboxGroup(
199
+ choices=metric_names,
200
+ label="Select Metrics for the Leaderboard",
201
+ value=None,
202
+ interactive=True,
203
  )
204
 
205
+ # leaderboard table --------------------------------------------
206
  baseline_value = get_baseline_df(method_names, metric_names)
207
+ baseline_value = baseline_value.applymap(lambda x: round(x, 4) if isinstance(x, (int, float)) else x)
208
  baseline_header = ["Method"] + metric_names
209
  baseline_datatype = ['markdown'] + ['number'] * len(metric_names)
210
 
211
  with gr.Row(show_progress=True, variant='panel'):
212
+ data_component = gr.Dataframe(
213
  value=baseline_value,
214
  headers=baseline_header,
215
  type="pandas",
 
218
  visible=True,
219
  )
220
 
221
+ # callbacks -----------------------------------------------------
222
  leaderboard_method_selector.change(
223
+ get_baseline_df,
224
+ inputs=[leaderboard_method_selector, leaderboard_metric_selector],
225
+ outputs=data_component,
226
  )
227
+
228
+ benchmark_type_selector_lb.change(
229
+ lambda selected: update_metrics(selected),
230
+ inputs=[benchmark_type_selector_lb],
231
+ outputs=leaderboard_metric_selector,
 
232
  )
233
 
234
  leaderboard_metric_selector.change(
235
+ get_baseline_df,
236
+ inputs=[leaderboard_method_selector, leaderboard_metric_selector],
237
+ outputs=data_component,
238
  )
239
 
240
+ # ------------------------------------------------------------------
241
+ # 2️⃣ Visualisation tab
242
+ # ------------------------------------------------------------------
243
+ with gr.TabItem("📊 Visualization", elem_id="probe-benchmark-tab-visualization", id=2):
244
+ # Intro / instructions
245
+ gr.Markdown(
246
+ """
247
+ ## **Interactive Visualizations**
248
+ Select a benchmark type first; context‑specific options will appear automatically.
249
+ Once your parameters are set, click **Plot** to generate the figure.
250
+
251
+ **How to read the plots**
252
+ * **Similarity (scatter)** – Each point is a model. Points nearer the top‑right perform well on both chosen similarity metrics.
253
+ * **Function prediction (heat‑map)** – Darker squares denote better scores. Rows/columns are clustered to reveal shared structure.
254
+ * **Family / Affinity (boxplots)** – Boxes summarise distribution across folds/targets. Higher medians indicate stronger performance.
255
+ """,
256
+ elem_classes="markdown-text",
257
+ )
258
 
259
+ # ------------------------------------------------------------------
260
+ # selectors specific to visualisation
261
+ # ------------------------------------------------------------------
262
+ vis_benchmark_type_selector = gr.Dropdown(
263
+ choices=list(benchmark_specific_metrics.keys()),
264
+ label="Select Benchmark Type",
265
+ value=None,
266
+ )
267
 
268
+ with gr.Row():
269
+ vis_x_metric_selector = gr.Dropdown(choices=[], label="Select X‑axis Metric", visible=False)
270
+ vis_y_metric_selector = gr.Dropdown(choices=[], label="Select Y‑axis Metric", visible=False)
271
+ vis_aspect_type_selector = gr.Dropdown(choices=[], label="Select Aspect Type", visible=False)
272
+ vis_dataset_selector = gr.Dropdown(choices=[], label="Select Dataset", visible=False)
273
+ vis_single_metric_selector = gr.Dropdown(choices=[], label="Select Metric", visible=False)
274
+
275
+ vis_method_selector = gr.CheckboxGroup(
276
+ choices=method_names,
277
+ label="Select methods to visualize",
278
+ interactive=True,
279
+ value=method_names,
280
+ )
281
 
282
  plot_button = gr.Button("Plot")
283
 
284
  with gr.Row(show_progress=True, variant='panel'):
285
  plot_output = gr.Image(label="Plot")
286
+
287
+ # textual explanation below the image
288
+ plot_explanation = gr.Markdown(visible=False)
289
+
290
+ # ------------------------------------------------------------------
291
+ # callbacks for visualisation tab
292
+ # ------------------------------------------------------------------
293
+ vis_benchmark_type_selector.change(
294
  update_metric_choices,
295
+ inputs=[vis_benchmark_type_selector],
296
+ outputs=[
297
+ vis_x_metric_selector,
298
+ vis_y_metric_selector,
299
+ vis_aspect_type_selector,
300
+ vis_dataset_selector,
301
+ vis_single_metric_selector,
302
+ ],
303
  )
304
+
305
  plot_button.click(
306
+ generate_plot_and_explanation,
307
+ inputs=[
308
+ vis_benchmark_type_selector,
309
+ vis_method_selector,
310
+ vis_x_metric_selector,
311
+ vis_y_metric_selector,
312
+ vis_aspect_type_selector,
313
+ vis_dataset_selector,
314
+ vis_single_metric_selector,
315
+ ],
316
+ outputs=[plot_output, plot_explanation],
317
  )
318
+
319
+ # ------------------------------------------------------------------
320
+ # 3️⃣ About tab
321
+ # ------------------------------------------------------------------
322
+ with gr.TabItem("📝 About", elem_id="probe-benchmark-tab-table", id=3):
323
  with gr.Row():
324
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
325
  with gr.Row():
326
  gr.Image(
327
+ value="./src/data/PROBE_workflow_figure.jpg",
328
+ label="PROBE Workflow Figure",
329
+ elem_classes="about-image",
330
  )
331
+
332
+ # ------------------------------------------------------------------
333
+ # 4️⃣ Submit tab
334
+ # ------------------------------------------------------------------
335
+ with gr.TabItem("🚀 Submit here! ", elem_id="probe-benchmark-tab-table", id=4):
336
  with gr.Row():
337
  gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
338
 
 
341
 
342
  with gr.Row():
343
  with gr.Column():
344
+ model_name_textbox = gr.Textbox(label="Method name")
345
+ revision_name_textbox = gr.Textbox(label="Revision Method Name")
 
 
 
 
346
 
347
  benchmark_types = gr.CheckboxGroup(
348
  choices=TASK_INFO,
 
354
  label="Similarity Tasks",
355
  interactive=True,
356
  )
357
+
358
  function_prediction_aspect = gr.Radio(
359
  choices=function_prediction_aspect_options,
360
  label="Function Prediction Aspects",
361
  interactive=True,
362
  )
363
+
364
  family_prediction_dataset = gr.CheckboxGroup(
365
  choices=family_prediction_dataset_options,
366
  label="Family Prediction Datasets",
367
  interactive=True,
368
  )
369
+
370
  function_dataset = gr.Textbox(
371
  label="Function Prediction Datasets",
372
  visible=False,
373
+ value="All_Data_Sets",
374
  )
375
 
376
  save_checkbox = gr.Checkbox(
377
  label="Save results for leaderboard and visualization",
378
+ value=True,
379
  )
380
 
 
381
  with gr.Row():
382
+ human_file = gr.File(label="Representation file (CSV) for Human dataset", file_count="single", type='filepath')
383
+ skempi_file = gr.File(label="Representation file (CSV) for SKEMPI dataset", file_count="single", type='filepath')
384
+
385
  submit_button = gr.Button("Submit Eval")
386
  submission_result = gr.Markdown()
387
  submit_button.click(
 
400
  ],
401
  )
402
 
403
+ # ----------------------------------------------------------------------
404
+ # global refresh button & citation accordion
405
+ # ----------------------------------------------------------------------
406
  with gr.Row():
407
  data_run = gr.Button("Refresh")
408
  data_run.click(refresh_data, outputs=[data_component])
 
415
  show_copy_button=True,
416
  )
417
 
418
+ # -----------------------------------------------------------------------------
419
+ block.launch()