OliverUrbann commited on
Commit
bbeb72c
·
verified ·
1 Parent(s): bc5a291

Optimizing RAM utilization during dataset loading (#2)

Browse files

- add venv folder and memory mapped files to gitignore (14ec4ba9bf703563dc2fe7dba6d8921a15b19937)
- add pycache folder (b4ce5310d64c155c6bb37673730777584f207e6d)
- add preprocessed dataset (55635196d2682016f5fb21c09e774dc46ba6a8a7)
- add RAM utilization note (ea1a1e5bfbd3c96cbce2530bb085d1c5d88b5e2e)

.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ __pycache__/
2
+ .venv/
3
+ *.npy
README.md CHANGED
@@ -43,6 +43,9 @@ If you use this dataset in your research, please cite it as follows:
43
 
44
  To get started with the **Fall Prediction Dataset for Humanoid Robots**, follow the steps below:
45
 
 
 
 
46
  ### 1. Set Up a Virtual Environment
47
 
48
  It's recommended to create a virtual environment to isolate dependencies. You can do this with the following command:
@@ -70,13 +73,20 @@ Once the virtual environment is active, install the necessary packages by runnin
70
  ```bash
71
  pip install -r requirements.txt
72
  ```
 
 
 
 
 
 
 
73
 
74
  ### 3. Run the Example Script
75
 
76
- To load and use the dataset for training a simple LSTM model, run the `usage_example.py` script:
77
 
78
  ```bash
79
- python usage_example.py
80
  ```
81
 
82
  This script demonstrates how to:
@@ -86,7 +96,23 @@ This script demonstrates how to:
86
  - Train a basic LSTM model to predict falls
87
  - Evaluate the model on the test set
88
 
89
- Make sure to check the script and adjust the dataset paths if necessary. For further details, see the comments within the script.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
 
91
 
92
  ---
 
43
 
44
  To get started with the **Fall Prediction Dataset for Humanoid Robots**, follow the steps below:
45
 
46
+ ### 0. Clone the repository
47
+ Please make sure that you have installed git large file support (git-lfs) before cloning this repository.
48
+
49
  ### 1. Set Up a Virtual Environment
50
 
51
  It's recommended to create a virtual environment to isolate dependencies. You can do this with the following command:
 
73
  ```bash
74
  pip install -r requirements.txt
75
  ```
76
+ If you have trouble downloading the requirements, check your internet connection. Alternatively, try increasing the pip timeout or upgrading your pip installation:
77
+ ```bash
78
+ # Increase the timeout by 120 seconds
79
+ pip install --default-timeout=120 -r requirements.txt
80
+ # or upgrade pip
81
+ python -m pip install --upgrade pip
82
+ ```
83
 
84
  ### 3. Run the Example Script
85
 
86
+ To load and use the plain csv dataset for training a simple LSTM model, run the `plain_dataset_usage_example.py` script (RAM utilisation exceeds 16 GB):
87
 
88
  ```bash
89
+ python plain_dataset_usage_example.py
90
  ```
91
 
92
  This script demonstrates how to:
 
96
  - Train a basic LSTM model to predict falls
97
  - Evaluate the model on the test set
98
 
99
+ To load and use a already prepared dataset, with reduced RAM utilisation, for training a simple LSTM model, run the `lightweight_dataset_usage_example.py` script (RAM utilisation less than 2 GB):
100
+
101
+ ```bash
102
+ python lightweight_dataset_usage_example.py
103
+ ```
104
+
105
+ This script demonstrates how to:
106
+ - Convert the csv dataset into a memory mapped file
107
+ - Load the memory mapped version of the dataset
108
+ - Train a basic LSTM model to predict falls
109
+ - Evaluate the model on the test set
110
+
111
+ The script `convert_and_load_dataset.py` used by the lightweight example demonstrates how to:
112
+ - Select the relevant sensor columns
113
+ - Split the data into training and test sets
114
+
115
+ Make sure to check the scripts and adjust the dataset paths if necessary. For further details, see the comments and docstrings within the scripts.
116
 
117
 
118
  ---
convert_and_load_dataset.py ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Helper functions to reduce RAM utilization
2
+ # Please install dependencies before:
3
+ # pip install -r requirements.txt
4
+
5
+ # Import necessary libraries
6
+ import os
7
+ import pickle
8
+ import tarfile
9
+ import pandas as pd
10
+ import numpy as np
11
+
12
+ from tqdm import tqdm
13
+ from tqdm import trange
14
+ from pathlib import Path
15
+ from sklearn.model_selection import train_test_split
16
+
17
+
18
+ def delete_temporary_files():
19
+ if Path('tmp_dataset_X.pkl').exists():
20
+ os.remove('tmp_dataset_X.pkl')
21
+ if Path('tmp_dataset_y.pkl').exists():
22
+ os.remove('tmp_dataset_y.pkl')
23
+ if Path('tmp_dataset_X.npy').exists():
24
+ os.remove('tmp_dataset_X.npy')
25
+ if Path('tmp_dataset_y.npy').exists():
26
+ os.remove('tmp_dataset_y.npy')
27
+
28
+ if Path('tmp_X_train.pkl').exists():
29
+ os.remove('tmp_X_train.npy')
30
+ if Path('tmp_y_train.pkl').exists():
31
+ os.remove('tmp_y_train.npy')
32
+ if Path('tmp_X_test.npy').exists():
33
+ os.remove('tmp_X_test.npy')
34
+ if Path('tmp_y_test.npy').exists():
35
+ os.remove('tmp_y_test.npy')
36
+
37
+
38
+
39
+ def load_dataset(file_path='dataset.tar.bz2'):
40
+ """
41
+ Decompress and load already prepared dataset.
42
+
43
+ Parameters:
44
+ file_path (str): Path to the compressed version of the prepared dataset (default dataset.tar.bz2)
45
+
46
+ Returns:
47
+ X_train (np.memmap): Memory-mapped NumPy array of the X training data
48
+ X_test (np.memmap): Memory-mapped NumPy array of the X test data
49
+ y_train (np.memmap): Memory-mapped NumPy array of the y training data
50
+ y_test (np.memmap): Memory-mapped NumPy array of the y test data
51
+ """
52
+
53
+ # Return the pepared dataset if it already exists
54
+ if Path('X_train.npy').exists() and Path('y_train.npy').exists() and Path('X_test.npy').exists() and Path('y_test.npy').exists():
55
+ X_train = np.load('X_train.npy', mmap_mode='r')
56
+ y_train = np.load('y_train.npy', mmap_mode='r')
57
+ X_test = np.load('X_test.npy', mmap_mode='r')
58
+ y_test = np.load('y_test.npy', mmap_mode='r')
59
+
60
+ return X_train, X_test, y_train, y_test
61
+
62
+ # Decompress memory mapped files
63
+ if Path(file_path).exists():
64
+ with tarfile.open(file_path) as dataset:
65
+ dataset.extractall(path='.')
66
+
67
+ # Load the dataset
68
+ dataset_X = np.load('dataset_X.npy', mmap_mode='r')
69
+ dataset_y = np.load('dataset_y.npy', mmap_mode='r')
70
+
71
+ # Create a train test split with memory mapped files
72
+ X_train, X_test, y_train, y_test = train_test_split_memmapped(dataset_X, dataset_y)
73
+
74
+ return X_train, X_test, y_train, y_test
75
+ else:
76
+ print('ERROR: file not found')
77
+
78
+ def convert_and_load_dataset(file_path='dataset.csv.bz2'):
79
+ """
80
+ Converts a CSV dataset into a NumPy memory-mapped dataset and load it.
81
+
82
+ This function transforms a given CSV dataset into a memory-mapped NumPy array.
83
+ Memory-mapping helps to reduce RAM usage by loading the dataset in smaller chunks.
84
+ However, it requires additional disk space during the conversion process.
85
+
86
+ Parameters:
87
+ file_path (str): Path to the CSV dataset file (default dataset.csv.bz2)
88
+
89
+ Returns:
90
+ X_train (np.memmap): Memory-mapped NumPy array of the X training data
91
+ X_test (np.memmap): Memory-mapped NumPy array of the X test data
92
+ y_train (np.memmap): Memory-mapped NumPy array of the y training data
93
+ y_test (np.memmap): Memory-mapped NumPy array of the y test data
94
+ """
95
+
96
+ # Return the pepared dataset if it already exists
97
+ if Path('X_train.npy').exists() and Path('y_train.npy').exists() and Path('X_test.npy').exists() and Path('y_test.npy').exists():
98
+ return load_dataset()
99
+
100
+ # Load and prepare dataset
101
+ delete_temporary_files()
102
+ with open('tmp_dataset_X.pkl', 'ab') as tmp_dataset_X, open('tmp_dataset_y.pkl', 'ab') as tmp_dataset_y:
103
+ shape = None
104
+ num_of_chunks = 0
105
+
106
+ # Load the dataset from a local file path
107
+ # Replace with Huggingface dataset call if applicable
108
+ for real_data_chunk in tqdm(pd.read_csv(file_path, compression='bz2', chunksize=4096), desc='Read and Prepare Dataset'):
109
+ # Select relevant columns (replace these with actual column names from your dataset)
110
+ # Here we assume that the dataset contains sensor readings like gyroscope and accelerometer data
111
+ relevant_columns = ['gyro_x', 'gyro_y', 'gyro_z', 'acc_x', 'acc_y', 'acc_z', 'upright']
112
+ sensordata_chunk = real_data_chunk[relevant_columns]
113
+
114
+ # Split the data into features (X) and labels (y)
115
+ # 'fall_label' is assumed to be the column indicating whether a fall occurred
116
+ X_chunk = np.array(sensordata_chunk.drop(columns=['upright'])) # Replace 'fall_label' with the actual label column
117
+ y_chunk = np.array(sensordata_chunk['upright'])
118
+
119
+ if shape is None:
120
+ # Preview the dataset
121
+ print('\n' + str(real_data_chunk.head()))
122
+
123
+ if shape is None:
124
+ shape = np.array(X_chunk.shape)
125
+ else:
126
+ shape[0] += X_chunk.shape[0]
127
+
128
+ pickle.dump(X_chunk, tmp_dataset_X)
129
+ pickle.dump(y_chunk, tmp_dataset_y)
130
+ num_of_chunks += 1
131
+
132
+ # Convert dataset into a memory-mapped array stored in a binary file on disk.
133
+ X_idx = 0
134
+ y_idx = 0
135
+ dataset_X = np.memmap('tmp_dataset_X.npy', mode='w+', dtype=np.float32, shape=(shape[0], shape[1], 1))
136
+ dataset_y = np.memmap('tmp_dataset_y.npy', mode='w+', dtype=np.float32, shape=(shape[0], 1))
137
+ with open('tmp_dataset_X.pkl', 'rb') as tmp_dataset_X, open('tmp_dataset_y.pkl', 'rb') as tmp_dataset_y:
138
+ for _ in trange(0, num_of_chunks, 1, desc='Convert Dataset'):
139
+ X_chunk = pickle.load(tmp_dataset_X)
140
+ y_chunk = pickle.load(tmp_dataset_y)
141
+
142
+ # Reshape data for LSTM input (assuming time-series data)
143
+ # Adjust the reshaping based on your dataset structure
144
+ for X_data in X_chunk:
145
+ dataset_X[X_idx] = np.expand_dims(X_data, axis=-1)
146
+ X_idx += 1
147
+ for y_data in y_chunk:
148
+ dataset_y[y_idx] = np.expand_dims(y_data, axis=-1)
149
+ y_idx += 1
150
+
151
+ # Delete temporary files
152
+ os.remove('tmp_dataset_X.pkl')
153
+ os.remove('tmp_dataset_y.pkl')
154
+
155
+ # Save the memory-mapped arrays
156
+ with open('dataset_X.npy', 'wb') as dataset_x_file, open('dataset_y.npy', 'wb') as dataset_y_file:
157
+ np.save(dataset_x_file, dataset_X, allow_pickle=False, fix_imports=True)
158
+ np.save(dataset_y_file, dataset_y, allow_pickle=False, fix_imports=True)
159
+
160
+ # Delete temporary files
161
+ dataset_X._mmap.close()
162
+ dataset_y._mmap.close()
163
+ os.remove('tmp_dataset_X.npy')
164
+ os.remove('tmp_dataset_y.npy')
165
+
166
+ # Reload memory-mapped arrays
167
+ dataset_X = np.load('dataset_X.npy', mmap_mode='r')
168
+ dataset_y = np.load('dataset_y.npy', mmap_mode='r')
169
+
170
+ # Create a train test split with memory mapped files
171
+ X_train, X_test, y_train, y_test = train_test_split_memmapped(dataset_X, dataset_y)
172
+
173
+ return X_train, X_test, y_train, y_test
174
+
175
+
176
+ def train_test_split_memmapped(dataset_X, dataset_y, test_size=0.2, random_state=42):
177
+ """
178
+ Create memory-mapped files for train and test datasets.
179
+
180
+ Parameters:
181
+ dataset_X (np.memmap): X part of the complete dataset
182
+ dataset_y (np.memmap): y part of the complete dataset
183
+ test_size (float): Propotion of the dataset used for the test split (default 0.2)
184
+ random_state (int): Random state used for repeatability (default 42)
185
+
186
+ Returns:
187
+ X_train (np.memmap): Memory-mapped NumPy array of the X training data
188
+ X_test (np.memmap): Memory-mapped NumPy array of the X test data
189
+ y_train (np.memmap): Memory-mapped NumPy array of the y training data
190
+ y_test (np.memmap): Memory-mapped NumPy array of the y test data
191
+ """
192
+ delete_temporary_files()
193
+
194
+ # Split data into training and test sets
195
+ idxs = np.arange(dataset_X.shape[0])
196
+ train_idx, test_idx = train_test_split(idxs, test_size=test_size, random_state=random_state)
197
+
198
+ # Create memory-mapped files for train and test sets
199
+ X_train = np.memmap('tmp_X_train.npy', dtype=dataset_X.dtype, mode='w+', shape=(len(train_idx), dataset_X.shape[1], 1))
200
+ y_train = np.memmap('tmp_y_train.npy', dtype=dataset_y.dtype, mode='w+', shape=(len(train_idx), dataset_y.shape[1]))
201
+ X_test = np.memmap('tmp_X_test.npy', dtype=dataset_X.dtype, mode='w+', shape=(len(test_idx), dataset_X.shape[1], 1))
202
+ y_test = np.memmap('tmp_y_test.npy', dtype=dataset_y.dtype, mode='w+', shape=(len(test_idx), dataset_y.shape[1]))
203
+
204
+ # Assign values to the train and test memmap arrays
205
+ X_train[:] = dataset_X[train_idx]
206
+ y_train[:] = dataset_y[train_idx]
207
+ X_test[:] = dataset_X[test_idx]
208
+ y_test[:] = dataset_y[test_idx]
209
+
210
+ # Save the memory-mapped arrays
211
+ with open('X_train.npy', 'wb') as X_train_file, open('y_train.npy', 'wb') as y_train_file, open('X_test.npy', 'wb') as X_test_file, open('y_test.npy', 'wb') as y_test_file:
212
+ np.save(X_train_file, X_train, allow_pickle=False, fix_imports=True)
213
+ np.save(y_train_file, y_train, allow_pickle=False, fix_imports=True)
214
+ np.save(X_test_file, X_test, allow_pickle=False, fix_imports=True)
215
+ np.save(y_test_file, y_test, allow_pickle=False, fix_imports=True)
216
+
217
+ X_train._mmap.close()
218
+ y_train._mmap.close()
219
+ X_test._mmap.close()
220
+ y_test._mmap.close()
221
+
222
+ # Delete temporary files
223
+ os.remove('tmp_X_train.npy')
224
+ os.remove('tmp_y_train.npy')
225
+ os.remove('tmp_X_test.npy')
226
+ os.remove('tmp_y_test.npy')
227
+
228
+ X_train = np.load('X_train.npy', mmap_mode='r')
229
+ y_train = np.load('y_train.npy', mmap_mode='r')
230
+ X_test = np.load('X_test.npy', mmap_mode='r')
231
+ y_test = np.load('y_test.npy', mmap_mode='r')
232
+
233
+ return X_train, X_test, y_train, y_test
dataset.tar.bz2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b32bfbbc2d3dececb55185fc344bd8e1c0228c70fdb5970ba0b9c9e04216d9b
3
+ size 100001092
lightweight_dataset_usage_example.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Usage Example for the Fall Prediction Dataset
2
+ # Please install dependencies before:
3
+ # pip install -r requirements.txt
4
+
5
+ # Import necessary libraries
6
+ import tensorflow as tf
7
+
8
+ from tensorflow.keras.models import Sequential
9
+ from tensorflow.keras.layers import Dense, LSTM, Dropout, Input
10
+ from convert_and_load_dataset import load_dataset, convert_and_load_dataset
11
+
12
+
13
+ # Example for local converting and loading (frist time usage take a while)
14
+ # X_train, X_test, y_train, y_test = convert_and_load_dataset()
15
+
16
+ # Example for local loading (first time usage may take a while)
17
+ X_train, X_test, y_train, y_test = load_dataset()
18
+
19
+ # Define a simple LSTM model
20
+ model = Sequential()
21
+ model.add(Input((X_train.shape[1], 1)))
22
+ model.add(LSTM(64))
23
+ model.add(Dropout(0.2))
24
+ model.add(Dense(1, activation='sigmoid'))
25
+
26
+ # Compile the model
27
+ model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
28
+
29
+ # Train the model
30
+ history = model.fit(X_train, y_train, epochs=10, batch_size=64, validation_data=(X_test, y_test))
31
+
32
+ # Evaluate the model on the test set
33
+ loss, accuracy = model.evaluate(X_test, y_test)
34
+ print(f"Test Accuracy: {accuracy * 100:.2f}%")
35
+
36
+ # You can save the model if needed
37
+ # model.save('fall_prediction_model.h5')
usage_example.py → plain_dataset_usage_example.py RENAMED
@@ -6,7 +6,7 @@
6
  import pandas as pd
7
  import tensorflow as tf
8
  from tensorflow.keras.models import Sequential
9
- from tensorflow.keras.layers import Dense, LSTM, Dropout
10
  from sklearn.model_selection import train_test_split
11
 
12
  # Load the dataset from Huggingface or a local file path
@@ -36,6 +36,7 @@ X_test = X_test.values.reshape(X_test.shape[0], X_test.shape[1], 1)
36
 
37
  # Define a simple LSTM model
38
  model = Sequential()
 
39
  model.add(LSTM(64, input_shape=(X_train.shape[1], 1)))
40
  model.add(Dropout(0.2))
41
  model.add(Dense(1, activation='sigmoid'))
 
6
  import pandas as pd
7
  import tensorflow as tf
8
  from tensorflow.keras.models import Sequential
9
+ from tensorflow.keras.layers import Dense, LSTM, Dropout, Input
10
  from sklearn.model_selection import train_test_split
11
 
12
  # Load the dataset from Huggingface or a local file path
 
36
 
37
  # Define a simple LSTM model
38
  model = Sequential()
39
+ model.add(Input((X_train.shape[1], 1)))
40
  model.add(LSTM(64, input_shape=(X_train.shape[1], 1)))
41
  model.add(Dropout(0.2))
42
  model.add(Dense(1, activation='sigmoid'))
requirements.txt CHANGED
@@ -1,3 +1,4 @@
 
1
  pandas
2
  tensorflow
3
  scikit-learn
 
1
+ tqdm
2
  pandas
3
  tensorflow
4
  scikit-learn