Create README.md
Browse files
README.md
ADDED
@@ -0,0 +1,229 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
# Example metadata to be added to a dataset card.
|
3 |
+
# Full dataset card template at https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md
|
4 |
+
language:
|
5 |
+
- en
|
6 |
+
license: mit # Example: apache-2.0 or any license from https://hf.co/docs/hub/repositories-licenses
|
7 |
+
tags:
|
8 |
+
- robotics
|
9 |
+
- manipulation
|
10 |
+
- rearrangement
|
11 |
+
- computer-vision
|
12 |
+
- reinforcement-learning
|
13 |
+
- imitation-learning
|
14 |
+
- rgbd
|
15 |
+
- rgb
|
16 |
+
- depth
|
17 |
+
- low-level-control
|
18 |
+
- whole-body-control
|
19 |
+
- home-assistant
|
20 |
+
- simulation
|
21 |
+
- maniskill
|
22 |
+
annotations_creators:
|
23 |
+
- machine-generated # Generated from RL policies with filtering
|
24 |
+
language_creators:
|
25 |
+
- machine-generated
|
26 |
+
language_details: en-US
|
27 |
+
pretty_name: ManiSkill-HAB SetTabkle Dataset
|
28 |
+
size_categories:
|
29 |
+
- 1M<n<10M # Dataset has 8K episodes with 1.6M transitions
|
30 |
+
# source_datasets: # None, original
|
31 |
+
task_categories:
|
32 |
+
- robotics
|
33 |
+
- reinforcement-learning
|
34 |
+
task_ids:
|
35 |
+
- grasping
|
36 |
+
- task-planning
|
37 |
+
|
38 |
+
configs:
|
39 |
+
- config_name: pick-013_apple
|
40 |
+
data_files:
|
41 |
+
- split: trajectories
|
42 |
+
path: pick/013_apple.h5
|
43 |
+
- split: metadata
|
44 |
+
path: pick/013_apple.json
|
45 |
+
|
46 |
+
- config_name: pick-024_bowl
|
47 |
+
data_files:
|
48 |
+
- split: trajectories
|
49 |
+
path: pick/024_bowl.h5
|
50 |
+
- split: metadata
|
51 |
+
path: pick/024_bowl.json
|
52 |
+
|
53 |
+
- config_name: place-013_apple
|
54 |
+
data_files:
|
55 |
+
- split: trajectories
|
56 |
+
path: place/013_apple.h5
|
57 |
+
- split: metadata
|
58 |
+
path: place/013_apple.json
|
59 |
+
|
60 |
+
- config_name: place-024_bowl
|
61 |
+
data_files:
|
62 |
+
- split: trajectories
|
63 |
+
path: place/024_bowl.h5
|
64 |
+
- split: metadata
|
65 |
+
path: place/024_bowl.json
|
66 |
+
|
67 |
+
- config_name: open-fridge
|
68 |
+
data_files:
|
69 |
+
- split: trajectories
|
70 |
+
path: open/fridge.h5
|
71 |
+
- split: metadata
|
72 |
+
path: open/fridge.json
|
73 |
+
|
74 |
+
- config_name: open-kitchen_counter
|
75 |
+
data_files:
|
76 |
+
- split: trajectories
|
77 |
+
path: open/kitchen_counter.h5
|
78 |
+
- split: metadata
|
79 |
+
path: open/kitchen_counter.json
|
80 |
+
|
81 |
+
- config_name: close-fridge
|
82 |
+
data_files:
|
83 |
+
- split: trajectories
|
84 |
+
path: close/fridge.h5
|
85 |
+
- split: metadata
|
86 |
+
path: close/fridge.json
|
87 |
+
|
88 |
+
- config_name: close-kitchen_counter
|
89 |
+
data_files:
|
90 |
+
- split: trajectories
|
91 |
+
path: close/kitchen_counter.h5
|
92 |
+
- split: metadata
|
93 |
+
path: close/kitchen_counter.json
|
94 |
+
|
95 |
+
# # Optional. This part can be used to store the feature types and size of the dataset to be used in python. This can be automatically generated using the datasets-cli.
|
96 |
+
# dataset_info:
|
97 |
+
# features:
|
98 |
+
# - name: {feature_name_0} # Example: id
|
99 |
+
# dtype: {feature_dtype_0} # Example: int32
|
100 |
+
# - name: {feature_name_1} # Example: text
|
101 |
+
# dtype: {feature_dtype_1} # Example: string
|
102 |
+
# - name: {feature_name_2} # Example: image
|
103 |
+
# dtype: {feature_dtype_2} # Example: image
|
104 |
+
# # Example for SQuAD:
|
105 |
+
# # - name: id
|
106 |
+
# # dtype: string
|
107 |
+
# # - name: title
|
108 |
+
# # dtype: string
|
109 |
+
# # - name: context
|
110 |
+
# # dtype: string
|
111 |
+
# # - name: question
|
112 |
+
# # dtype: string
|
113 |
+
# # - name: answers
|
114 |
+
# # sequence:
|
115 |
+
# # - name: text
|
116 |
+
# # dtype: string
|
117 |
+
# # - name: answer_start
|
118 |
+
# # dtype: int32
|
119 |
+
# config_name: {config_name} # Name of the dataset subset. Example for glue: sst2
|
120 |
+
# splits:
|
121 |
+
# - name: {split_name_0} # Example: train
|
122 |
+
# num_bytes: {split_num_bytes_0} # Example for SQuAD: 79317110
|
123 |
+
# num_examples: {split_num_examples_0} # Example for SQuAD: 87599
|
124 |
+
# download_size: {dataset_download_size} # Example for SQuAD: 35142551
|
125 |
+
# dataset_size: {dataset_size} # Example for SQuAD: 89789763
|
126 |
+
|
127 |
+
# It can also be a list of multiple subsets (also called "configurations"):
|
128 |
+
# ```yaml
|
129 |
+
# dataset_info:
|
130 |
+
# - config_name: {config0}
|
131 |
+
# features:
|
132 |
+
# ...
|
133 |
+
# - config_name: {config1}
|
134 |
+
# features:
|
135 |
+
# ...
|
136 |
+
# ```
|
137 |
+
|
138 |
+
# # Optional. If you want your dataset to be protected behind a gate that users have to accept to access the dataset. More info at https://huggingface.co/docs/hub/datasets-gated
|
139 |
+
# extra_gated_fields:
|
140 |
+
# - {field_name_0}: {field_type_0} # Example: Name: text
|
141 |
+
# - {field_name_1}: {field_type_1} # Example: Affiliation: text
|
142 |
+
# - {field_name_2}: {field_type_2} # Example: Email: text
|
143 |
+
# - {field_name_3}: {field_type_3} # Example for speech datasets: I agree to not attempt to determine the identity of speakers in this dataset: checkbox
|
144 |
+
# extra_gated_prompt: {extra_gated_prompt} # Example for speech datasets: By clicking on “Access repository” below, you also agree to not attempt to determine the identity of speakers in the dataset.
|
145 |
+
|
146 |
+
# # Optional. Add this if you want to encode a train and evaluation info in a structured way for AutoTrain or Evaluation on the Hub
|
147 |
+
# train-eval-index:
|
148 |
+
# - config: {config_name} # The dataset subset name to use. Example for datasets without subsets: default. Example for glue: sst2
|
149 |
+
# task: {task_name} # The task category name (same as task_category). Example: question-answering
|
150 |
+
# task_id: {task_type} # The AutoTrain task id. Example: extractive_question_answering
|
151 |
+
# splits:
|
152 |
+
# train_split: train # The split to use for training. Example: train
|
153 |
+
# eval_split: validation # The split to use for evaluation. Example: test
|
154 |
+
# col_mapping: # The columns mapping needed to configure the task_id.
|
155 |
+
# # Example for extractive_question_answering:
|
156 |
+
# # question: question
|
157 |
+
# # context: context
|
158 |
+
# # answers:
|
159 |
+
# # text: text
|
160 |
+
# # answer_start: answer_start
|
161 |
+
# metrics:
|
162 |
+
# - type: {metric_type} # The metric id. Example: wer. Use metric id from https://hf.co/metrics
|
163 |
+
# name: {metric_name} # Tne metric name to be displayed. Example: Test WER
|
164 |
+
---
|
165 |
+
|
166 |
+
# ManiSkill-HAB SetTable Dataset
|
167 |
+
|
168 |
+
**[Paper (arXiv TBA)]()**
|
169 |
+
| **[Website](https://arth-shukla.github.io/mshab)**
|
170 |
+
| **[Code](https://github.com/arth-shukla/mshab)**
|
171 |
+
| **[Models](https://huggingface.co/arth-shukla/mshab_checkpoints)**
|
172 |
+
| **[(Full) Dataset](https://arth-shukla.github.io/mshab/#dataset-section)**
|
173 |
+
| **[Supplementary](https://sites.google.com/view/maniskill-hab)**
|
174 |
+
|
175 |
+
|
176 |
+
<!-- Provide a quick summary of the dataset. -->
|
177 |
+
|
178 |
+
Whole-body, low-level control/manipulation demonstration dataset for ManiSkill-HAB SetTable.
|
179 |
+
|
180 |
+
## Dataset Details
|
181 |
+
|
182 |
+
### Dataset Description
|
183 |
+
|
184 |
+
<!-- Provide a longer summary of what this dataset is. -->
|
185 |
+
|
186 |
+
Demonstration dataset for ManiSkill-HAB SetTable. Each subtask/object combination (e.g pick 013_apple) has 1000 successful episodes (200 samples/demonstration) gathered using [RL policies](https://huggingface.co/arth-shukla/mshab_checkpoints) fitered for safe robot behavior with a rule-based event labeling system.
|
187 |
+
|
188 |
+
SetTable contains the Pick, Place, Open, and Close subtasks. Relative to the other MS-HAB long-horizon tasks (TidyHouse, PrepareGroceries), SetTable Pick, Place, Open, and Close are easy difficulty (on a scale of easy-medium-hard). The difficulty of SetTable primarily comes from skill chaining rather than individual subtasks.
|
189 |
+
|
190 |
+
### Related Datasets
|
191 |
+
|
192 |
+
Full information about the MS-HAB datasets (size, difficulty, links, etc), including the other long horizon tasks, are available [on the ManiSkill-HAB website](https://arth-shukla.github.io/mshab/#dataset-section).
|
193 |
+
|
194 |
+
- [ManiSkill-HAB TidyHouse Dataset](https://huggingface.co/datasets/arth-shukla/MS-HAB-TidyHouse)
|
195 |
+
- [ManiSkill-HAB PrepareGroceries Dataset](https://huggingface.co/datasets/arth-shukla/MS-HAB-PrepareGroceries)
|
196 |
+
|
197 |
+
## Uses
|
198 |
+
|
199 |
+
<!-- Address questions around how the dataset is intended to be used. -->
|
200 |
+
|
201 |
+
### Direct Use
|
202 |
+
|
203 |
+
This dataset can be used to train vision-based learning from demonstrations and imitation learning methods, which can be evaluated with the [MS-HAB environments](https://github.com/arth-shukla/mshab). This dataset may be useful as synthetic data for computer vision tasks as well.
|
204 |
+
|
205 |
+
### Out-of-Scope Use
|
206 |
+
|
207 |
+
While blind state-based policies can be trained on this dataset, it is recommended to train vision-based policies to handle collisions and obstructions.
|
208 |
+
|
209 |
+
## Dataset Structure
|
210 |
+
|
211 |
+
Each subtask/object combination has files `[SUBTASK]/[OBJECT].json` and `[SUBTASK]/[OBJECT].h5`. The JSON file contains episode metadata, event labels, etc, while the HDF5 file contains the demonstration data.
|
212 |
+
|
213 |
+
## Dataset Creation
|
214 |
+
|
215 |
+
<!-- TODO (arth): link paper appendix, maybe html, for the event labeling system -->
|
216 |
+
The data is gathered using [RL policies](https://huggingface.co/arth-shukla/mshab_checkpoints) fitered for safe robot behavior with a rule-based event labeling system.
|
217 |
+
|
218 |
+
## Bias, Risks, and Limitations
|
219 |
+
|
220 |
+
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
|
221 |
+
|
222 |
+
The dataset is purely synthetic.
|
223 |
+
|
224 |
+
While MS-HAB supports high-quality ray-traced rendering, this dataset uses ManiSkill's default rendering for data generation due to efficiency. However, users can generate their own data with the [data generation code](https://github.com/arth-shukla/mshab/blob/main/mshab/utils/gen/gen_data.py).
|
225 |
+
|
226 |
+
<!-- TODO (arth): citation -->
|
227 |
+
<!-- ## Citation [TBA]
|
228 |
+
|
229 |
+
[Citation TBA] -->
|