neelsj commited on
Commit
e4c3cbb
·
verified ·
1 Parent(s): cca467a

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +110 -110
README.md CHANGED
@@ -1,115 +1,115 @@
1
  ---
2
- license: apache-2.0
3
  dataset_info:
4
- - config_name: object_detection_single
5
- features:
6
- - name: id
7
- dtype: int32
8
- - name: image
9
- dtype: image
10
- - name: prompt
11
- dtype: string
12
- - config_name: object_detection_pairs
13
- features:
14
- - name: id
15
- dtype: int32
16
- - name: image
17
- dtype: image
18
- - name: prompt
19
- dtype: string
20
- - config_name: object_recognition_single
21
- features:
22
- - name: id
23
- dtype: int32
24
- - name: image
25
- dtype: image
26
- - name: prompt
27
- dtype: string
28
- - name: ground_truth
29
- dtype: string
30
- - config_name: object_recognition_pairs
31
- features:
32
- - name: id
33
- dtype: int32
34
- - name: image
35
- dtype: image
36
- - name: prompt
37
- dtype: string
38
- - name: ground_truth
39
- dtype: string
40
- - config_name: spatial_reasoning_lrtb_single
41
- features:
42
- - name: id
43
- dtype: int32
44
- - name: image
45
- dtype: image
46
- - name: prompt
47
- dtype: string
48
- - name: ground_truth
49
- dtype: string
50
- - config_name: spatial_reasoning_lrtb_pairs
51
- features:
52
- - name: id
53
- dtype: int32
54
- - name: image
55
- dtype: image
56
- - name: prompt
57
- dtype: string
58
- - name: ground_truth
59
- dtype: string
60
- - config_name: visual_prompting_single
61
- features:
62
- - name: id
63
- dtype: int32
64
- - name: image
65
- dtype: image
66
- - name: prompt
67
- dtype: string
68
- - name: ground_truth
69
- dtype: string
70
- - config_name: visual_prompting_pairs
71
- features:
72
- - name: id
73
- dtype: int32
74
- - name: image
75
- dtype: image
76
- - name: prompt
77
- dtype: string
78
- - name: ground_truth
79
- dtype: string
80
  configs:
81
- - config_name: object_detection_single
82
- data_files:
83
- - split: val
84
- path: object_detection_single/object_detection_val_long_prompt.parquet
85
- - config_name: object_detection_pairs
86
- data_files:
87
- - split: val
88
- path: object_detection_pairs/object_detection_val_long_prompt.parquet
89
- - config_name: object_recognition_single
90
- data_files:
91
- - split: val
92
- path: spatial_reasoning_lrtb_single/recognition_val.parquet
93
- - config_name: object_recognition_pairs
94
- data_files:
95
- - split: val
96
- path: spatial_reasoning_lrtb_pairs/recognition_val.parquet
97
- - config_name: spatial_reasoning_lrtb_single
98
- data_files:
99
- - split: val
100
- path: spatial_reasoning_lrtb_single/spatial_reasoning_lrtb_single.parquet
101
- - config_name: spatial_reasoning_lrtb_pairs
102
- data_files:
103
- - split: val
104
- path: spatial_reasoning_lrtb_pairs/spatial_reasoning_lrtb_pairs.parquet
105
- - config_name: visual_prompting_single
106
- data_files:
107
- - split: val
108
- path: visual_prompting_single/visual_prompting_val.parquet
109
- - config_name: visual_prompting_pairs
110
- data_files:
111
- - split: val
112
- path: visual_prompting_pairs/visual_prompting_val.parquet
113
  ---
114
 
115
  A key question for understanding multimodal performance is analyzing the ability for a model to have basic
@@ -194,4 +194,4 @@ Answer type: Open-ended
194
 
195
  Example for "pairs":
196
 
197
- {"images": ["val\\sheep_banana\\left\\landfill\\0000099_0000001_Places365_val_00031238.jpg"], "prompt": "What objects are in the red and yellow box in this image?", "ground_truth": "['sheep', 'banana']"}
 
1
  ---
2
+ license: cdla-permissive-2.0
3
  dataset_info:
4
+ - config_name: object_detection_single
5
+ features:
6
+ - name: id
7
+ dtype: int32
8
+ - name: image
9
+ dtype: image
10
+ - name: prompt
11
+ dtype: string
12
+ - config_name: object_detection_pairs
13
+ features:
14
+ - name: id
15
+ dtype: int32
16
+ - name: image
17
+ dtype: image
18
+ - name: prompt
19
+ dtype: string
20
+ - config_name: object_recognition_single
21
+ features:
22
+ - name: id
23
+ dtype: int32
24
+ - name: image
25
+ dtype: image
26
+ - name: prompt
27
+ dtype: string
28
+ - name: ground_truth
29
+ dtype: string
30
+ - config_name: object_recognition_pairs
31
+ features:
32
+ - name: id
33
+ dtype: int32
34
+ - name: image
35
+ dtype: image
36
+ - name: prompt
37
+ dtype: string
38
+ - name: ground_truth
39
+ dtype: string
40
+ - config_name: spatial_reasoning_lrtb_single
41
+ features:
42
+ - name: id
43
+ dtype: int32
44
+ - name: image
45
+ dtype: image
46
+ - name: prompt
47
+ dtype: string
48
+ - name: ground_truth
49
+ dtype: string
50
+ - config_name: spatial_reasoning_lrtb_pairs
51
+ features:
52
+ - name: id
53
+ dtype: int32
54
+ - name: image
55
+ dtype: image
56
+ - name: prompt
57
+ dtype: string
58
+ - name: ground_truth
59
+ dtype: string
60
+ - config_name: visual_prompting_single
61
+ features:
62
+ - name: id
63
+ dtype: int32
64
+ - name: image
65
+ dtype: image
66
+ - name: prompt
67
+ dtype: string
68
+ - name: ground_truth
69
+ dtype: string
70
+ - config_name: visual_prompting_pairs
71
+ features:
72
+ - name: id
73
+ dtype: int32
74
+ - name: image
75
+ dtype: image
76
+ - name: prompt
77
+ dtype: string
78
+ - name: ground_truth
79
+ dtype: string
80
  configs:
81
+ - config_name: object_detection_single
82
+ data_files:
83
+ - split: val
84
+ path: object_detection_single/object_detection_val_long_prompt.parquet
85
+ - config_name: object_detection_pairs
86
+ data_files:
87
+ - split: val
88
+ path: object_detection_pairs/object_detection_val_long_prompt.parquet
89
+ - config_name: object_recognition_single
90
+ data_files:
91
+ - split: val
92
+ path: spatial_reasoning_lrtb_single/recognition_val.parquet
93
+ - config_name: object_recognition_pairs
94
+ data_files:
95
+ - split: val
96
+ path: spatial_reasoning_lrtb_pairs/recognition_val.parquet
97
+ - config_name: spatial_reasoning_lrtb_single
98
+ data_files:
99
+ - split: val
100
+ path: spatial_reasoning_lrtb_single/spatial_reasoning_lrtb_single.parquet
101
+ - config_name: spatial_reasoning_lrtb_pairs
102
+ data_files:
103
+ - split: val
104
+ path: spatial_reasoning_lrtb_pairs/spatial_reasoning_lrtb_pairs.parquet
105
+ - config_name: visual_prompting_single
106
+ data_files:
107
+ - split: val
108
+ path: visual_prompting_single/visual_prompting_val.parquet
109
+ - config_name: visual_prompting_pairs
110
+ data_files:
111
+ - split: val
112
+ path: visual_prompting_pairs/visual_prompting_val.parquet
113
  ---
114
 
115
  A key question for understanding multimodal performance is analyzing the ability for a model to have basic
 
194
 
195
  Example for "pairs":
196
 
197
+ {"images": ["val\\sheep_banana\\left\\landfill\\0000099_0000001_Places365_val_00031238.jpg"], "prompt": "What objects are in the red and yellow box in this image?", "ground_truth": "['sheep', 'banana']"}