awacke1 commited on
Commit
d4b6a9f
·
verified ·
1 Parent(s): 1cac3e1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +129 -425
app.py CHANGED
@@ -1,449 +1,153 @@
1
- import os
2
- import json
3
- import tempfile
4
- import zipfile
5
- from datetime import datetime
6
-
7
  import gradio as gr
8
- import numpy as np
9
- import torch
10
- from PIL import Image
11
-
12
- # Program A imports
13
  from utils import MEGABenchEvalDataLoader
14
- from constants import * # This is assumed to define CITATION_BUTTON_TEXT, CITATION_BUTTON_LABEL, TABLE_INTRODUCTION, LEADERBOARD_INTRODUCTION, DATA_INFO, SUBMIT_INTRODUCTION, BASE_MODEL_GROUPS, etc.
15
-
16
- # Program B imports
17
- import spaces
18
- from transformers import Qwen2VLForConditionalGeneration, AutoProcessor, Qwen2_5_VLForConditionalGeneration
19
- from qwen_vl_utils import process_vision_info
20
- from gliner import GLiNER
21
 
22
- # ----------------------------------------------------------------
23
- # Combined CSS
24
- # ----------------------------------------------------------------
25
  current_dir = os.path.dirname(os.path.abspath(__file__))
26
- with open(os.path.join(current_dir, "static", "css", "style.css"), "r") as f:
 
 
 
 
 
 
27
  base_css = f.read()
28
- with open(os.path.join(current_dir, "static", "css", "table.css"), "r") as f:
29
  table_css = f.read()
30
 
31
- css_program_b = """
32
- /* Program B CSS */
33
- .gradio-container {
34
- max-width: 1200px !important;
35
- margin: 0 auto;
36
- padding: 20px;
37
- background-color: #f8f9fa;
38
- }
39
- .tabs {
40
- border-radius: 8px;
41
- background: white;
42
- padding: 20px;
43
- box-shadow: 0 2px 6px rgba(0, 0, 0, 0.1);
44
- }
45
- .input-container, .output-container {
46
- background: white;
47
- border-radius: 8px;
48
- padding: 15px;
49
- margin: 10px 0;
50
- box-shadow: 0 2px 4px rgba(0, 0, 0, 0.05);
51
- }
52
- .submit-btn {
53
- background-color: #2d31fa !important;
54
- border: none !important;
55
- padding: 8px 20px !important;
56
- border-radius: 6px !important;
57
- color: white !important;
58
- transition: all 0.3s ease !important;
59
- }
60
- .submit-btn:hover {
61
- background-color: #1f24c7 !important;
62
- transform: translateY(-1px);
63
- }
64
- #output {
65
- height: 500px;
66
- overflow: auto;
67
- border: 1px solid #e0e0e0;
68
- border-radius: 6px;
69
- padding: 15px;
70
- background: #ffffff;
71
- font-family: 'Arial', sans-serif;
72
- }
73
- .gr-dropdown {
74
- border-radius: 6px !important;
75
- border: 1px solid #e0e0e0 !important;
76
- }
77
- .gr-image-input {
78
- border: 2px dashed #ccc;
79
- border-radius: 8px;
80
- padding: 20px;
81
- transition: all 0.3s ease;
82
- }
83
- .gr-image-input:hover {
84
- border-color: #2d31fa;
85
- }
86
- """
87
- css_global = base_css + "\n" + table_css + "\n" + css_program_b
88
-
89
- # ----------------------------------------------------------------
90
- # Program A Global Initializations
91
- # ----------------------------------------------------------------
92
  default_loader = MEGABenchEvalDataLoader("./static/eval_results/Default")
93
  si_loader = MEGABenchEvalDataLoader("./static/eval_results/SI")
94
 
95
- # ----------------------------------------------------------------
96
- # Program B Global Initializations
97
- # ----------------------------------------------------------------
98
- gliner_model = GLiNER.from_pretrained("knowledgator/modern-gliner-bi-large-v1.0")
99
- DEFAULT_NER_LABELS = "person, organization, location, date, event"
100
-
101
- models = {
102
- "Qwen/Qwen2.5-VL-7B-Instruct": Qwen2_5_VLForConditionalGeneration.from_pretrained(
103
- "Qwen/Qwen2.5-VL-7B-Instruct", trust_remote_code=True, torch_dtype="auto"
104
- ).cuda().eval()
105
- }
106
- processors = {
107
- "Qwen/Qwen2.5-VL-7B-Instruct": AutoProcessor.from_pretrained(
108
- "Qwen/Qwen2.5-VL-7B-Instruct", trust_remote_code=True
109
  )
110
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
 
112
- user_prompt = '<|user|>\n'
113
- assistant_prompt = '<|assistant|>\n'
114
- prompt_suffix = "<|end|>\n"
 
 
 
115
 
116
- # A simple metadata container for OCR results and entity information.
117
- class TextWithMetadata(list):
118
- def __init__(self, *args, **kwargs):
119
- super().__init__(*args)
120
- self.original_text = kwargs.get('original_text', '')
121
- self.entities = kwargs.get('entities', [])
122
 
123
- # ----------------------------------------------------------------
124
- # UI DEFINITION (placed at the top)
125
- # ----------------------------------------------------------------
126
- with gr.Blocks(css=css_global) as demo:
127
- with gr.Tabs():
128
- # -------------------------
129
- # Tab 1: Dashboard (Program A)
130
- # -------------------------
131
- with gr.TabItem("Dashboard"):
132
- with gr.Tabs(elem_classes="tab-buttons") as dashboard_tabs:
133
- # --- MEGA-Bench Leaderboard Tab ---
134
- with gr.TabItem("📊 MEGA-Bench"):
135
- # Inject table CSS (will be updated when the table is refreshed)
136
- css_style = gr.HTML(f"<style>{base_css}\n{table_css}</style>", visible=False)
137
-
138
- # Define captions for default vs. single-image tables
139
- default_caption = ("**Table 1: MEGA-Bench full results.** The number in the parentheses is the number of tasks "
140
- "of each keyword. <br> The Core set contains $N_{\\text{core}} = 440$ tasks evaluated by "
141
- "rule-based metrics, and the Open-ended set contains $N_{\\text{open}} = 65$ tasks evaluated by a "
142
- "VLM judge (we use GPT-4o-0806). <br> Different from the results in our paper, we only use the Core "
143
- "results with CoT prompting here for clarity and compatibility with the released data. <br> "
144
- "$\\text{Overall} \\ = \\ \\frac{\\text{Core} \\ \\cdot \\ N_{\\text{core}} \\ + \\ \\text{Open-ended} "
145
- "\\ \\cdot \\ N_{\\text{open}}}{N_{\\text{core}} \\ + \\ N_{\\text{open}}}$ <br> * indicates self-reported "
146
- "results from the model authors.")
147
- single_image_caption = ("**Table 2: MEGA-Bench Single-image setting results.** The number in the parentheses is the number of tasks "
148
- "in each keyword. <br> This subset contains 273 single-image tasks from the Core set and 42 single-image tasks "
149
- "from the Open-ended set. For open-source models, we drop the image input in the 1-shot demonstration example so that "
150
- "the entire query contains a single image only. <br> Compared to the default table, some models with only "
151
- "single-image support are added.")
152
-
153
- caption_component = gr.Markdown(
154
- value=default_caption,
155
- elem_classes="table-caption",
156
- latex_delimiters=[{"left": "$", "right": "$", "display": False}],
157
- )
158
-
159
- with gr.Row():
160
- super_group_selector = gr.Radio(
161
- choices=list(default_loader.SUPER_GROUPS.keys()),
162
- label="Select a dimension to display breakdown results. We use different column colors to distinguish the overall benchmark scores and breakdown results.",
163
- value=list(default_loader.SUPER_GROUPS.keys())[0]
164
- )
165
- model_group_selector = gr.Radio(
166
- choices=list(BASE_MODEL_GROUPS.keys()),
167
- label="Select a model group",
168
- value="All"
169
- )
170
-
171
- initial_headers, initial_data = default_loader.get_leaderboard_data(
172
- list(default_loader.SUPER_GROUPS.keys())[0], "All"
173
- )
174
- data_component = gr.Dataframe(
175
- value=initial_data,
176
- headers=initial_headers,
177
- datatype=["number", "html"] + ["number"] * (len(initial_headers) - 2),
178
- interactive=True,
179
- elem_classes="custom-dataframe",
180
- max_height=2400,
181
- column_widths=["100px", "240px"] + ["160px"] * 3 + ["210px"] * (len(initial_headers) - 5),
182
- )
183
-
184
- with gr.Row():
185
- with gr.Accordion("Citation", open=False):
186
- citation_button = gr.Textbox(
187
- value=CITATION_BUTTON_TEXT,
188
- label=CITATION_BUTTON_LABEL,
189
- elem_id="citation-button",
190
- lines=10,
191
- )
192
- gr.Markdown(TABLE_INTRODUCTION)
193
-
194
- with gr.Row():
195
- table_selector = gr.Radio(
196
- choices=["Default", "Single Image"],
197
- label="Select table to display. Default: all MEGA-Bench tasks; Single Image: single-image tasks only.",
198
- value="Default"
199
- )
200
-
201
- refresh_button = gr.Button("Refresh")
202
-
203
- # Wire up event handlers (functions defined below)
204
- refresh_button.click(
205
- fn=update_table_and_caption,
206
- inputs=[table_selector, super_group_selector, model_group_selector],
207
- outputs=[data_component, caption_component, css_style]
208
- )
209
- super_group_selector.change(
210
- fn=update_table_and_caption,
211
- inputs=[table_selector, super_group_selector, model_group_selector],
212
- outputs=[data_component, caption_component, css_style]
213
- )
214
- model_group_selector.change(
215
- fn=update_table_and_caption,
216
- inputs=[table_selector, super_group_selector, model_group_selector],
217
- outputs=[data_component, caption_component, css_style]
218
- )
219
- table_selector.change(
220
- fn=update_selectors,
221
- inputs=[table_selector],
222
- outputs=[super_group_selector, model_group_selector]
223
- ).then(
224
- fn=update_table_and_caption,
225
- inputs=[table_selector, super_group_selector, model_group_selector],
226
- outputs=[data_component, caption_component, css_style]
227
- )
228
-
229
- # --- Introduction Tab ---
230
- with gr.TabItem("📚 Introduction"):
231
- gr.Markdown(LEADERBOARD_INTRODUCTION)
232
- # --- Data Information Tab ---
233
- with gr.TabItem("📝 Data Information"):
234
- gr.Markdown(DATA_INFO, elem_classes="markdown-text")
235
- # --- Submit Tab ---
236
- with gr.TabItem("🚀 Submit"):
237
- with gr.Row():
238
- gr.Markdown(SUBMIT_INTRODUCTION, elem_classes="markdown-text")
239
-
240
- # -------------------------
241
- # Tab 2: Image Processing (Program B)
242
- # -------------------------
243
- with gr.TabItem("Image Processing"):
244
- # A default image is shown for context.
245
- gr.Image("Caracal.jpg", interactive=False)
246
- # It is important to create a state variable to store the OCR/NER result.
247
- ocr_state = gr.State()
248
- with gr.Tab(label="Image Input", elem_classes="tabs"):
249
- with gr.Row():
250
- with gr.Column(elem_classes="input-container"):
251
- input_img = gr.Image(label="Input Picture", elem_classes="gr-image-input")
252
- model_selector = gr.Dropdown(
253
- choices=list(models.keys()),
254
- label="Model",
255
- value="Qwen/Qwen2.5-VL-7B-Instruct",
256
- elem_classes="gr-dropdown"
257
- )
258
- with gr.Row():
259
- ner_checkbox = gr.Checkbox(label="Run Named Entity Recognition", value=False)
260
- ner_labels = gr.Textbox(
261
- label="NER Labels (comma-separated)",
262
- value=DEFAULT_NER_LABELS,
263
- visible=False
264
- )
265
- submit_btn = gr.Button(value="Submit", elem_classes="submit-btn")
266
- with gr.Column(elem_classes="output-container"):
267
- output_text = gr.HighlightedText(label="Output Text", elem_id="output")
268
- # Toggle visibility of the NER labels textbox.
269
- ner_checkbox.change(
270
- lambda x: gr.update(visible=x),
271
- inputs=[ner_checkbox],
272
- outputs=[ner_labels]
273
  )
274
- submit_btn.click(
275
- fn=run_example,
276
- inputs=[input_img, model_selector, ner_checkbox, ner_labels],
277
- outputs=[output_text, ocr_state]
278
  )
279
- with gr.Row():
280
- filename = gr.Textbox(label="Save filename (without extension)", placeholder="Enter filename to save")
281
- download_btn = gr.Button("Download Image & Text", elem_classes="submit-btn")
282
- download_output = gr.File(label="Download")
283
- download_btn.click(
284
- fn=create_zip,
285
- inputs=[input_img, filename, ocr_state],
286
- outputs=[download_output]
287
- )
288
 
289
- # ----------------------------------------------------------------
290
- # FUNCTION DEFINITIONS
291
- # ----------------------------------------------------------------
 
 
 
 
 
 
 
292
 
293
- def update_table_and_caption(table_type, super_group, model_group):
294
- """
295
- Updates the leaderboard DataFrame, caption and CSS based on the table type and selectors.
296
- """
297
- if table_type == "Default":
298
- headers, data = default_loader.get_leaderboard_data(super_group, model_group)
299
- caption = ("**Table 1: MEGA-Bench full results.** The number in the parentheses is the number of tasks "
300
- "of each keyword. <br> The Core set contains $N_{\\text{core}} = 440$ tasks evaluated by rule-based metrics, and the "
301
- "Open-ended set contains $N_{\\text{open}} = 65$ tasks evaluated by a VLM judge (we use GPT-4o-0806). <br> "
302
- "Different from the results in our paper, we only use the Core results with CoT prompting here for clarity and compatibility "
303
- "with the released data. <br> $\\text{Overall} \\ = \\ \\frac{\\text{Core} \\ \\cdot \\ N_{\\text{core}} \\ + \\ \\text{Open-ended} "
304
- "\\ \\cdot \\ N_{\\text{open}}}{N_{\\text{core}} \\ + \\ N_{\\text{open}}}$ <br> * indicates self-reported results from the model authors.")
305
- else: # Single-image table
306
- headers, data = si_loader.get_leaderboard_data(super_group, model_group)
307
- caption = ("**Table 2: MEGA-Bench Single-image setting results.** The number in the parentheses is the number of tasks "
308
- "in each keyword. <br> This subset contains 273 single-image tasks from the Core set and 42 single-image tasks from the Open-ended set. "
309
- "For open-source models, we drop the image input in the 1-shot demonstration example so that the entire query contains a single image only. <br> "
310
- "Compared to the default table, some models with only single-image support are added.")
311
-
312
- dataframe = gr.Dataframe(
313
- value=data,
314
- headers=headers,
315
- datatype=["number", "html"] + ["number"] * (len(headers) - 2),
316
- interactive=True,
317
- column_widths=["100px", "240px"] + ["160px"] * 3 + ["210px"] * (len(headers) - 5),
318
- )
319
- style_html = f"<style>{base_css}\n{table_css}</style>"
320
- return dataframe, caption, style_html
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
321
 
322
- def update_selectors(table_type):
323
- """
324
- Updates the options in the radio selectors based on the selected table type.
325
- """
326
- loader = default_loader if table_type == "Default" else si_loader
327
- return [gr.Radio.update(choices=list(loader.SUPER_GROUPS.keys())),
328
- gr.Radio.update(choices=list(loader.MODEL_GROUPS.keys()))]
329
 
330
- def array_to_image_path(image_array):
331
- """
332
- Converts a NumPy image array to a PIL Image, saves it to disk, and returns its path.
333
- """
334
- img = Image.fromarray(np.uint8(image_array))
335
- img.thumbnail((1024, 1024))
336
- timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
337
- filename = f"image_{timestamp}.png"
338
- img.save(filename)
339
- return os.path.abspath(filename)
340
 
341
- @spaces.GPU
342
- def run_example(image, model_id="Qwen/Qwen2.5-VL-7B-Instruct", run_ner=False, ner_labels=DEFAULT_NER_LABELS):
343
- """
344
- Given an input image, uses the selected VL model to perform OCR (and optionally NER).
345
- Returns the highlighted text and stores the raw OCR output in state.
346
- """
347
- text_input = "Convert the image to text."
348
- image_path = array_to_image_path(image)
349
-
350
- model = models[model_id]
351
- processor = processors[model_id]
352
-
353
- prompt = f"{user_prompt}<|image_1|>\n{text_input}{prompt_suffix}{assistant_prompt}"
354
- image_pil = Image.fromarray(image).convert("RGB")
355
- messages = [
356
- {
357
- "role": "user",
358
- "content": [
359
- {"type": "image", "image": image_path},
360
- {"type": "text", "text": text_input},
361
- ],
362
- }
363
- ]
364
- # Prepare text and vision inputs
365
- text_full = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
366
- image_inputs, video_inputs = process_vision_info(messages)
367
- inputs = processor(
368
- text=[text_full],
369
- images=image_inputs,
370
- videos=video_inputs,
371
- padding=True,
372
- return_tensors="pt",
373
- )
374
- inputs = inputs.to("cuda")
375
-
376
- # Generate model output
377
- generated_ids = model.generate(**inputs, max_new_tokens=1024)
378
- generated_ids_trimmed = [out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)]
379
- output_text = processor.batch_decode(
380
- generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
381
- )
382
- ocr_text = output_text[0]
383
-
384
- if run_ner:
385
- ner_results = gliner_model.predict_entities(ocr_text, ner_labels.split(","), threshold=0.3)
386
- highlighted_text = []
387
- last_end = 0
388
- for entity in sorted(ner_results, key=lambda x: x["start"]):
389
- if last_end < entity["start"]:
390
- highlighted_text.append((ocr_text[last_end:entity["start"]], None))
391
- highlighted_text.append((ocr_text[entity["start"]:entity["end"]], entity["label"]))
392
- last_end = entity["end"]
393
- if last_end < len(ocr_text):
394
- highlighted_text.append((ocr_text[last_end:], None))
395
- result = TextWithMetadata(highlighted_text, original_text=ocr_text, entities=ner_results)
396
- return result, result # one for display, one for state
397
- result = TextWithMetadata([(ocr_text, None)], original_text=ocr_text, entities=[])
398
- return result, result
399
 
400
- def create_zip(image, fname, ocr_result):
401
- """
402
- Creates a zip file containing the saved image, the OCR text, and a JSON of the OCR output.
403
- """
404
- if not fname or image is None:
405
- return None
406
- try:
407
- if isinstance(image, np.ndarray):
408
- image_pil = Image.fromarray(image)
409
- elif isinstance(image, Image.Image):
410
- image_pil = image
411
- else:
412
- return None
413
-
414
- with tempfile.TemporaryDirectory() as temp_dir:
415
- img_path = os.path.join(temp_dir, f"{fname}.png")
416
- image_pil.save(img_path)
417
-
418
- original_text = ocr_result.original_text if ocr_result else ""
419
- txt_path = os.path.join(temp_dir, f"{fname}.txt")
420
- with open(txt_path, 'w', encoding='utf-8') as f:
421
- f.write(original_text)
422
-
423
- json_data = {
424
- "text": original_text,
425
- "entities": ocr_result.entities if ocr_result else [],
426
- "image_file": f"{fname}.png"
427
- }
428
- json_path = os.path.join(temp_dir, f"{fname}.json")
429
- with open(json_path, 'w', encoding='utf-8') as f:
430
- json.dump(json_data, f, indent=2, ensure_ascii=False)
431
-
432
- output_dir = "downloads"
433
- os.makedirs(output_dir, exist_ok=True)
434
- zip_path = os.path.join(output_dir, f"{fname}.zip")
435
- with zipfile.ZipFile(zip_path, 'w') as zipf:
436
- zipf.write(img_path, os.path.basename(img_path))
437
- zipf.write(txt_path, os.path.basename(txt_path))
438
- zipf.write(json_path, os.path.basename(json_path))
439
- return zip_path
440
- except Exception as e:
441
- print(f"Error creating zip: {str(e)}")
442
- return None
443
 
444
- # ----------------------------------------------------------------
445
- # Launch the merged Gradio app
446
- # ----------------------------------------------------------------
447
  if __name__ == "__main__":
448
- demo.queue(api_open=False)
449
- demo.launch(debug=True)
 
 
 
 
 
 
 
1
  import gradio as gr
 
 
 
 
 
2
  from utils import MEGABenchEvalDataLoader
3
+ import os
4
+ from constants import *
 
 
 
 
 
5
 
6
+ # Get the directory of the current script
 
 
7
  current_dir = os.path.dirname(os.path.abspath(__file__))
8
+
9
+ # Construct paths to CSS files
10
+ base_css_file = os.path.join(current_dir, "static", "css", "style.css")
11
+ table_css_file = os.path.join(current_dir, "static", "css", "table.css")
12
+
13
+ # Read CSS files
14
+ with open(base_css_file, "r") as f:
15
  base_css = f.read()
16
+ with open(table_css_file, "r") as f:
17
  table_css = f.read()
18
 
19
+ # Initialize data loaders
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  default_loader = MEGABenchEvalDataLoader("./static/eval_results/Default")
21
  si_loader = MEGABenchEvalDataLoader("./static/eval_results/SI")
22
 
23
+ with gr.Blocks() as block:
24
+ # Add a style element that we'll update
25
+ css_style = gr.HTML(
26
+ f"<style>{base_css}\n{table_css}</style>",
27
+ visible=False
 
 
 
 
 
 
 
 
 
28
  )
29
+
30
+ gr.Markdown(
31
+ LEADERBOARD_INTRODUCTION
32
+ )
33
+ with gr.Tabs(elem_classes="tab-buttons") as tabs:
34
+ with gr.TabItem("📊 MEGA-Bench", elem_id="qa-tab-table1", id=1):
35
+ with gr.Row():
36
+ with gr.Accordion("Citation", open=False):
37
+ citation_button = gr.Textbox(
38
+ value=CITATION_BUTTON_TEXT,
39
+ label=CITATION_BUTTON_LABEL,
40
+ elem_id="citation-button",
41
+ lines=10,
42
+ )
43
+ gr.Markdown(
44
+ TABLE_INTRODUCTION
45
+ )
46
 
47
+ with gr.Row():
48
+ table_selector = gr.Radio(
49
+ choices=["Default", "Single Image"],
50
+ label="Select table to display. Default: all MEGA-Bench tasks; Single Image: single-image tasks only.",
51
+ value="Default"
52
+ )
53
 
54
+ # Define different captions for each table
55
+ default_caption = "**Table 1: MEGA-Bench full results.** The number in the parentheses is the number of tasks of each keyword. <br> The Core set contains $N_{\\text{core}} = 440$ tasks evaluated by rule-based metrics, and the Open-ended set contains $N_{\\text{open}} = 65$ tasks evaluated by a VLM judge (we use GPT-4o-0806). <br> Different from the results in our paper, we only use the Core results with CoT prompting here for clarity and compatibility with the released data. <br> $\\text{Overall} \\ = \\ \\frac{\\text{Core} \\ \\cdot \\ N_{\\text{core}} \\ + \\ \\text{Open-ended} \\ \\cdot \\ N_{\\text{open}}}{N_{\\text{core}} \\ + \\ N_{\\text{open}}}$ <br> * indicates self-reported results from the model authors."
 
 
 
 
56
 
57
+ single_image_caption = "**Table 2: MEGA-Bench Single-image setting results.** The number in the parentheses is the number of tasks in each keyword. <br> This subset contains 273 single-image tasks from the Core set and 42 single-image tasks from the Open-ended set. For open-source models, we drop the image input in the 1-shot demonstration example so that the entire query contains a single image only. <br> Compared to the default table, some models with only single-image support are added."
58
+
59
+ caption_component = gr.Markdown(
60
+ value=default_caption,
61
+ elem_classes="table-caption",
62
+ latex_delimiters=[{"left": "$", "right": "$", "display": False}],
63
+ )
64
+
65
+ with gr.Row():
66
+ super_group_selector = gr.Radio(
67
+ choices=list(default_loader.SUPER_GROUPS.keys()),
68
+ label="Select a dimension to display breakdown results. We use different column colors to distinguish the overall benchmark scores and breakdown results.",
69
+ value=list(default_loader.SUPER_GROUPS.keys())[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
  )
71
+ model_group_selector = gr.Radio(
72
+ choices=list(BASE_MODEL_GROUPS.keys()),
73
+ label="Select a model group",
74
+ value="All"
75
  )
 
 
 
 
 
 
 
 
 
76
 
77
+ initial_headers, initial_data = default_loader.get_leaderboard_data(list(default_loader.SUPER_GROUPS.keys())[0], "All")
78
+ data_component = gr.Dataframe(
79
+ value=initial_data,
80
+ headers=initial_headers,
81
+ datatype=["number", "html"] + ["number"] * (len(initial_headers) - 2),
82
+ interactive=False,
83
+ elem_classes="custom-dataframe",
84
+ max_height=2400,
85
+ column_widths=["100px", "240px"] + ["160px"] * 3 + ["210px"] * (len(initial_headers) - 5),
86
+ )
87
 
88
+ def update_table_and_caption(table_type, super_group, model_group):
89
+ if table_type == "Default":
90
+ headers, data = default_loader.get_leaderboard_data(super_group, model_group)
91
+ caption = default_caption
92
+ else: # Single-image
93
+ headers, data = si_loader.get_leaderboard_data(super_group, model_group)
94
+ caption = single_image_caption
95
+
96
+ return [
97
+ gr.Dataframe(
98
+ value=data,
99
+ headers=headers,
100
+ datatype=["number", "html"] + ["number"] * (len(headers) - 2),
101
+ interactive=False,
102
+ column_widths=["100px", "240px"] + ["160px"] * 3 + ["210px"] * (len(headers) - 5),
103
+ ),
104
+ caption,
105
+ f"<style>{base_css}\n{table_css}</style>"
106
+ ]
107
+
108
+ def update_selectors(table_type):
109
+ loader = default_loader if table_type == "Default" else si_loader
110
+ return [
111
+ gr.Radio(choices=list(loader.SUPER_GROUPS.keys())),
112
+ gr.Radio(choices=list(loader.MODEL_GROUPS.keys()))
113
+ ]
114
+
115
+ refresh_button = gr.Button("Refresh")
116
+
117
+ # Update click and change handlers to include caption updates
118
+ refresh_button.click(
119
+ fn=update_table_and_caption,
120
+ inputs=[table_selector, super_group_selector, model_group_selector],
121
+ outputs=[data_component, caption_component, css_style]
122
+ )
123
+ super_group_selector.change(
124
+ fn=update_table_and_caption,
125
+ inputs=[table_selector, super_group_selector, model_group_selector],
126
+ outputs=[data_component, caption_component, css_style]
127
+ )
128
+ model_group_selector.change(
129
+ fn=update_table_and_caption,
130
+ inputs=[table_selector, super_group_selector, model_group_selector],
131
+ outputs=[data_component, caption_component, css_style]
132
+ )
133
+ table_selector.change(
134
+ fn=update_selectors,
135
+ inputs=[table_selector],
136
+ outputs=[super_group_selector, model_group_selector]
137
+ ).then(
138
+ fn=update_table_and_caption,
139
+ inputs=[table_selector, super_group_selector, model_group_selector],
140
+ outputs=[data_component, caption_component, css_style]
141
+ )
142
 
143
+ with gr.TabItem("📝 Data Information", elem_id="qa-tab-table2", id=2):
144
+ gr.Markdown(DATA_INFO, elem_classes="markdown-text")
 
 
 
 
 
145
 
146
+ with gr.TabItem("🚀 Submit", elem_id="submit-tab", id=3):
147
+ with gr.Row():
148
+ gr.Markdown(SUBMIT_INTRODUCTION, elem_classes="markdown-text")
 
 
 
 
 
 
 
149
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
150
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151
 
 
 
 
152
  if __name__ == "__main__":
153
+ block.launch(share=True)