Commit
·
d1b244e
1
Parent(s):
dc0a88a
fixed new dataset
Browse files- README.md +14 -5
- env_num_test.zip +2 -2
- env_num_train.zip +2 -2
- env_num_valid.zip +2 -2
README.md
CHANGED
@@ -16,7 +16,7 @@ git clone https://huggingface.co/datasets/DolphinNie/dungeon-dataset
|
|
16 |
|
17 |
## 1. Data Explanation
|
18 |
|
19 |
-
This is the Map dataset from the open-sourced game [Brogue](https://github.com/tmewett/BrogueCE). It contains
|
20 |
|
21 |
Each map is stored in a `.csv` file. The map is a `(32x32)` array, which is the map size.
|
22 |
|
@@ -87,8 +87,17 @@ import matplotlib.pyplot as plt
|
|
87 |
# Load dataset from hugging face
|
88 |
dataset = load_dataset("DolphinNie/dungeon-dataset")
|
89 |
|
90 |
-
|
91 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
dataset_train = list()
|
93 |
dataset_test = list()
|
94 |
dataset_valid = list()
|
@@ -98,11 +107,11 @@ def dataset_convert(dataset):
|
|
98 |
datapoint_num = int(dataset[name[i]].num_rows / 32)
|
99 |
dataset_tf = dataset[name[i]].to_pandas()
|
100 |
for n in range(datapoint_num):
|
101 |
-
env_num =
|
102 |
datasets[i].append(env_num)
|
103 |
return dataset_train, dataset_test, dataset_valid
|
104 |
|
105 |
-
dataset_train, dataset_test, dataset_valid =
|
106 |
|
107 |
# Visualize the datapoints if you want
|
108 |
def visualize_map(dungeon_map):
|
|
|
16 |
|
17 |
## 1. Data Explanation
|
18 |
|
19 |
+
This is the Map dataset from the open-sourced game [Brogue](https://github.com/tmewett/BrogueCE). It contains 49,000 train dataset, 14,000 test dataset and 7,000 validation dataset.
|
20 |
|
21 |
Each map is stored in a `.csv` file. The map is a `(32x32)` array, which is the map size.
|
22 |
|
|
|
87 |
# Load dataset from hugging face
|
88 |
dataset = load_dataset("DolphinNie/dungeon-dataset")
|
89 |
|
90 |
+
|
91 |
+
def get_processed_dataset(load_dataset_from_pickle=False,
|
92 |
+
save_dataset_to_pickle=False,
|
93 |
+
pickle_save_path='dungeon-dataset.pkl'):
|
94 |
+
dataset = pull_hugging_face_dataset(load_dataset_from_pickle,
|
95 |
+
save_dataset_to_pickle,
|
96 |
+
pickle_save_path)
|
97 |
+
dataset_train, dataset_test, dataset_valid = convert_dataset(dataset)
|
98 |
+
return dataset_train, dataset_test, dataset_valid
|
99 |
+
|
100 |
+
def convert_dataset(dataset):
|
101 |
dataset_train = list()
|
102 |
dataset_test = list()
|
103 |
dataset_valid = list()
|
|
|
107 |
datapoint_num = int(dataset[name[i]].num_rows / 32)
|
108 |
dataset_tf = dataset[name[i]].to_pandas()
|
109 |
for n in range(datapoint_num):
|
110 |
+
env_num = dataset_tf[n * 32:(n + 1) * 32]
|
111 |
datasets[i].append(env_num)
|
112 |
return dataset_train, dataset_test, dataset_valid
|
113 |
|
114 |
+
dataset_train, dataset_test, dataset_valid = get_processed_dataset(load_dataset_from_pickle, save_dataset_to_pickle)
|
115 |
|
116 |
# Visualize the datapoints if you want
|
117 |
def visualize_map(dungeon_map):
|
env_num_test.zip
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:96a40d89fee5643190fb4c0279fcb2ddf8163c2f7f48e91c78740790f979f8e8
|
3 |
+
size 5966510
|
env_num_train.zip
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b70df308db028325a2c98de635bc88e3acd1d9956f9a73c9dbcdc7f1f4e2cb96
|
3 |
+
size 21061633
|
env_num_valid.zip
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7301cd6db76f25ea986244771d57d8c63f23ac94149c438c5954593905495ff2
|
3 |
+
size 3011923
|