lixuejing commited on
Commit
beb73b8
·
1 Parent(s): caa7799
Files changed (2) hide show
  1. app.py +46 -46
  2. src/about.py +10 -10
app.py CHANGED
@@ -230,29 +230,29 @@ with demo:
230
  # value=[],
231
  # interactive=True
232
  # )
233
- with gr.Column(min_width=320):
234
- #with gr.Box(elem_id="box-filter"):
235
- filter_columns_type = gr.CheckboxGroup(
236
- label="Model types",
237
- choices=[t.to_str() for t in ModelType],
238
- value=[t.to_str() for t in ModelType],
239
- interactive=True,
240
- elem_id="filter-columns-type",
241
- )
242
- filter_columns_precision = gr.CheckboxGroup(
243
- label="Precision",
244
- choices=[i.value.name for i in Precision],
245
- value=[i.value.name for i in Precision],
246
- interactive=True,
247
- elem_id="filter-columns-precision",
248
- )
249
- filter_columns_size = gr.CheckboxGroup(
250
- label="Model sizes (in billions of parameters)",
251
- choices=list(NUMERIC_INTERVALS.keys()),
252
- value=list(NUMERIC_INTERVALS.keys()),
253
- interactive=True,
254
- elem_id="filter-columns-size",
255
- )
256
 
257
 
258
  leaderboard_table = gr.components.Dataframe(
@@ -359,29 +359,29 @@ with demo:
359
  # value=[],
360
  # interactive=True
361
  # )
362
- with gr.Column(min_width=320):
363
- #with gr.Box(elem_id="box-filter"):
364
- filter_columns_type = gr.CheckboxGroup(
365
- label="Model types",
366
- choices=[t.to_str() for t in ModelType],
367
- value=[t.to_str() for t in ModelType],
368
- interactive=True,
369
- elem_id="filter-columns-type",
370
- )
371
- filter_columns_precision = gr.CheckboxGroup(
372
- label="Precision",
373
- choices=[i.value.name for i in Precision],
374
- value=[i.value.name for i in Precision],
375
- interactive=True,
376
- elem_id="filter-columns-precision",
377
- )
378
- filter_columns_size = gr.CheckboxGroup(
379
- label="Model sizes (in billions of parameters)",
380
- choices=list(NUMERIC_INTERVALS.keys()),
381
- value=list(NUMERIC_INTERVALS.keys()),
382
- interactive=True,
383
- elem_id="filter-columns-size",
384
- )
385
 
386
 
387
  leaderboard_table = gr.components.Dataframe(
 
230
  # value=[],
231
  # interactive=True
232
  # )
233
+ #with gr.Column(min_width=320):
234
+ # #with gr.Box(elem_id="box-filter"):
235
+ # filter_columns_type = gr.CheckboxGroup(
236
+ # label="Model types",
237
+ # choices=[t.to_str() for t in ModelType],
238
+ # value=[t.to_str() for t in ModelType],
239
+ # interactive=True,
240
+ # elem_id="filter-columns-type",
241
+ # )
242
+ # filter_columns_precision = gr.CheckboxGroup(
243
+ # label="Precision",
244
+ # choices=[i.value.name for i in Precision],
245
+ # value=[i.value.name for i in Precision],
246
+ # interactive=True,
247
+ # elem_id="filter-columns-precision",
248
+ # )
249
+ # filter_columns_size = gr.CheckboxGroup(
250
+ # label="Model sizes (in billions of parameters)",
251
+ # choices=list(NUMERIC_INTERVALS.keys()),
252
+ # value=list(NUMERIC_INTERVALS.keys()),
253
+ # interactive=True,
254
+ # elem_id="filter-columns-size",
255
+ # )
256
 
257
 
258
  leaderboard_table = gr.components.Dataframe(
 
359
  # value=[],
360
  # interactive=True
361
  # )
362
+ #with gr.Column(min_width=320):
363
+ # #with gr.Box(elem_id="box-filter"):
364
+ # filter_columns_type = gr.CheckboxGroup(
365
+ # label="Model types",
366
+ # choices=[t.to_str() for t in ModelType],
367
+ # value=[t.to_str() for t in ModelType],
368
+ # interactive=True,
369
+ # elem_id="filter-columns-type",
370
+ # )
371
+ # filter_columns_precision = gr.CheckboxGroup(
372
+ # label="Precision",
373
+ # choices=[i.value.name for i in Precision],
374
+ # value=[i.value.name for i in Precision],
375
+ # interactive=True,
376
+ # elem_id="filter-columns-precision",
377
+ # )
378
+ # filter_columns_size = gr.CheckboxGroup(
379
+ # label="Model sizes (in billions of parameters)",
380
+ # choices=list(NUMERIC_INTERVALS.keys()),
381
+ # value=list(NUMERIC_INTERVALS.keys()),
382
+ # interactive=True,
383
+ # elem_id="filter-columns-size",
384
+ # )
385
 
386
 
387
  leaderboard_table = gr.components.Dataframe(
src/about.py CHANGED
@@ -13,15 +13,15 @@ class Task:
13
  class Tasks(Enum):
14
  # task_key in the json file, metric_key in the json file, name to display in the leaderboard
15
  Where2Place = Task("Where2Place", "overall", "Where2Place")
16
- blink_val_ev= Task("blink_val_ev", "overall", "blink_val_ev")
17
- cv_bench_test = Task("cv_bench_test", "overall", "cv_bench_test")
18
- robo_spatial_home_all = Task("robo_spatial_home_all", "overall", "robo_spatial_home_all")
19
- embspatial_bench = Task("embspatial_bench", "overall", "embspatial_bench")
20
- all_angles_bench = Task("all_angles_bench", "overall", "all_angles_bench")
21
- vsi_bench_tiny = Task("vsi_bench_tiny", "overall", "vsi_bench_tiny")
22
  SAT = Task("SAT", "overall", "SAT")
23
- egoplan_bench2 = Task("egoplan_bench2", "overall", "egoplan_bench2")
24
- erqa = Task("erqa", "overall", "erqa")
25
 
26
  class Quotas(Enum):
27
  Perception = Task("Perception", "overall", "Perception")
@@ -38,7 +38,7 @@ class Quotas(Enum):
38
  SpatialReasoning_se = Task("SpatialReasoning", "Size estimation", "SR_Se")
39
  Prediction = Task("Prediction", "overall", "Prediction")
40
  Prediction_T = Task("Prediction", "Trajectory", "P_T")
41
- Prediction_F = Task("Prediction", "Future prediction",P_Fp")
42
  Planning = Task("Planning", "overall", "Planning")
43
  Planning_G = Task("Planning", "Goal Decomposition", "P_GD")
44
  Planning_N = Task("Planning", "Navigation", "P_N")
@@ -80,7 +80,7 @@ We hope to promote a more open ecosystem for embodied model developers to partic
80
 
81
  # How it works
82
 
83
- ## Embodied verse tool - FlagEvalMM
84
  FlagEvalMM是一个开源评估框架,旨在全面评估多模态模型,其提供了一种标准化的方法来评估跨各种任务和指标使用多种模式(文本、图像、视频)的模型。
85
 
86
  - 灵活的架构:支持多个多模态模型和评估任务,包括VQA、图像检索、文本到图像等。
 
13
  class Tasks(Enum):
14
  # task_key in the json file, metric_key in the json file, name to display in the leaderboard
15
  Where2Place = Task("Where2Place", "overall", "Where2Place")
16
+ blink_val_ev= Task("blink_val_ev", "overall", "Blink")
17
+ cv_bench_test = Task("cv_bench_test", "overall", "CVBench")
18
+ robo_spatial_home_all = Task("robo_spatial_home_all", "overall", "RoboSpatial-Home")
19
+ embspatial_bench = Task("embspatial_bench", "overall", "EmbspatialBench")
20
+ all_angles_bench = Task("all_angles_bench", "overall", "All-Angles Bench")
21
+ vsi_bench_tiny = Task("vsi_bench_tiny", "overall", "VSI-Bench")
22
  SAT = Task("SAT", "overall", "SAT")
23
+ egoplan_bench2 = Task("egoplan_bench2", "overall", "EgoPlan-Bench2")
24
+ erqa = Task("erqa", "overall", "ERQA")
25
 
26
  class Quotas(Enum):
27
  Perception = Task("Perception", "overall", "Perception")
 
38
  SpatialReasoning_se = Task("SpatialReasoning", "Size estimation", "SR_Se")
39
  Prediction = Task("Prediction", "overall", "Prediction")
40
  Prediction_T = Task("Prediction", "Trajectory", "P_T")
41
+ Prediction_F = Task("Prediction", "Future prediction","P_Fp")
42
  Planning = Task("Planning", "overall", "Planning")
43
  Planning_G = Task("Planning", "Goal Decomposition", "P_GD")
44
  Planning_N = Task("Planning", "Navigation", "P_N")
 
80
 
81
  # How it works
82
 
83
+ ## Embodied Verse tool - FlagEvalMM
84
  FlagEvalMM是一个开源评估框架,旨在全面评估多模态模型,其提供了一种标准化的方法来评估跨各种任务和指标使用多种模式(文本、图像、视频)的模型。
85
 
86
  - 灵活的架构:支持多个多模态模型和评估任务,包括VQA、图像检索、文本到图像等。