Datasets:
Formats:
csv
Size:
10M - 100M
Tags:
humanoid-robotics
fall-prediction
machine-learning
sensor-data
robotics
temporal-convolutional-networks
License:
| # Helper functions to reduce RAM utilization | |
| # Please install dependencies before: | |
| # pip install -r requirements.txt | |
| # Import necessary libraries | |
| import os | |
| import pickle | |
| import tarfile | |
| import pandas as pd | |
| import numpy as np | |
| from tqdm import tqdm | |
| from tqdm import trange | |
| from pathlib import Path | |
| from sklearn.model_selection import train_test_split | |
| def delete_temporary_files(): | |
| if Path('tmp_dataset_X.pkl').exists(): | |
| os.remove('tmp_dataset_X.pkl') | |
| if Path('tmp_dataset_y.pkl').exists(): | |
| os.remove('tmp_dataset_y.pkl') | |
| if Path('tmp_dataset_X.npy').exists(): | |
| os.remove('tmp_dataset_X.npy') | |
| if Path('tmp_dataset_y.npy').exists(): | |
| os.remove('tmp_dataset_y.npy') | |
| if Path('tmp_X_train.pkl').exists(): | |
| os.remove('tmp_X_train.npy') | |
| if Path('tmp_y_train.pkl').exists(): | |
| os.remove('tmp_y_train.npy') | |
| if Path('tmp_X_test.npy').exists(): | |
| os.remove('tmp_X_test.npy') | |
| if Path('tmp_y_test.npy').exists(): | |
| os.remove('tmp_y_test.npy') | |
| def load_dataset(file_path='dataset.tar.bz2'): | |
| """ | |
| Decompress and load already prepared dataset. | |
| Parameters: | |
| file_path (str): Path to the compressed version of the prepared dataset (default dataset.tar.bz2) | |
| Returns: | |
| X_train (np.memmap): Memory-mapped NumPy array of the X training data | |
| X_test (np.memmap): Memory-mapped NumPy array of the X test data | |
| y_train (np.memmap): Memory-mapped NumPy array of the y training data | |
| y_test (np.memmap): Memory-mapped NumPy array of the y test data | |
| """ | |
| # Return the pepared dataset if it already exists | |
| if Path('X_train.npy').exists() and Path('y_train.npy').exists() and Path('X_test.npy').exists() and Path('y_test.npy').exists(): | |
| X_train = np.load('X_train.npy', mmap_mode='r') | |
| y_train = np.load('y_train.npy', mmap_mode='r') | |
| X_test = np.load('X_test.npy', mmap_mode='r') | |
| y_test = np.load('y_test.npy', mmap_mode='r') | |
| return X_train, X_test, y_train, y_test | |
| # Decompress memory mapped files | |
| if Path(file_path).exists(): | |
| with tarfile.open(file_path) as dataset: | |
| dataset.extractall(path='.') | |
| # Load the dataset | |
| dataset_X = np.load('dataset_X.npy', mmap_mode='r') | |
| dataset_y = np.load('dataset_y.npy', mmap_mode='r') | |
| # Create a train test split with memory mapped files | |
| X_train, X_test, y_train, y_test = train_test_split_memmapped(dataset_X, dataset_y) | |
| return X_train, X_test, y_train, y_test | |
| else: | |
| print('ERROR: file not found') | |
| def convert_and_load_dataset(file_path='dataset.csv.bz2'): | |
| """ | |
| Converts a CSV dataset into a NumPy memory-mapped dataset and load it. | |
| This function transforms a given CSV dataset into a memory-mapped NumPy array. | |
| Memory-mapping helps to reduce RAM usage by loading the dataset in smaller chunks. | |
| However, it requires additional disk space during the conversion process. | |
| Parameters: | |
| file_path (str): Path to the CSV dataset file (default dataset.csv.bz2) | |
| Returns: | |
| X_train (np.memmap): Memory-mapped NumPy array of the X training data | |
| X_test (np.memmap): Memory-mapped NumPy array of the X test data | |
| y_train (np.memmap): Memory-mapped NumPy array of the y training data | |
| y_test (np.memmap): Memory-mapped NumPy array of the y test data | |
| """ | |
| # Return the pepared dataset if it already exists | |
| if Path('X_train.npy').exists() and Path('y_train.npy').exists() and Path('X_test.npy').exists() and Path('y_test.npy').exists(): | |
| return load_dataset() | |
| # Load and prepare dataset | |
| delete_temporary_files() | |
| with open('tmp_dataset_X.pkl', 'ab') as tmp_dataset_X, open('tmp_dataset_y.pkl', 'ab') as tmp_dataset_y: | |
| shape = None | |
| num_of_chunks = 0 | |
| # Load the dataset from a local file path | |
| # Replace with Huggingface dataset call if applicable | |
| for real_data_chunk in tqdm(pd.read_csv(file_path, compression='bz2', chunksize=4096), desc='Read and Prepare Dataset'): | |
| # Select relevant columns (replace these with actual column names from your dataset) | |
| # Here we assume that the dataset contains sensor readings like gyroscope and accelerometer data | |
| relevant_columns = ['gyro_x', 'gyro_y', 'gyro_z', 'acc_x', 'acc_y', 'acc_z', 'upright'] | |
| sensordata_chunk = real_data_chunk[relevant_columns] | |
| # Split the data into features (X) and labels (y) | |
| # 'fall_label' is assumed to be the column indicating whether a fall occurred | |
| X_chunk = np.array(sensordata_chunk.drop(columns=['upright'])) # Replace 'fall_label' with the actual label column | |
| y_chunk = np.array(sensordata_chunk['upright']) | |
| if shape is None: | |
| # Preview the dataset | |
| print('\n' + str(real_data_chunk.head())) | |
| if shape is None: | |
| shape = np.array(X_chunk.shape) | |
| else: | |
| shape[0] += X_chunk.shape[0] | |
| pickle.dump(X_chunk, tmp_dataset_X) | |
| pickle.dump(y_chunk, tmp_dataset_y) | |
| num_of_chunks += 1 | |
| # Convert dataset into a memory-mapped array stored in a binary file on disk. | |
| X_idx = 0 | |
| y_idx = 0 | |
| dataset_X = np.memmap('tmp_dataset_X.npy', mode='w+', dtype=np.float32, shape=(shape[0], shape[1], 1)) | |
| dataset_y = np.memmap('tmp_dataset_y.npy', mode='w+', dtype=np.float32, shape=(shape[0], 1)) | |
| with open('tmp_dataset_X.pkl', 'rb') as tmp_dataset_X, open('tmp_dataset_y.pkl', 'rb') as tmp_dataset_y: | |
| for _ in trange(0, num_of_chunks, 1, desc='Convert Dataset'): | |
| X_chunk = pickle.load(tmp_dataset_X) | |
| y_chunk = pickle.load(tmp_dataset_y) | |
| # Reshape data for LSTM input (assuming time-series data) | |
| # Adjust the reshaping based on your dataset structure | |
| for X_data in X_chunk: | |
| dataset_X[X_idx] = np.expand_dims(X_data, axis=-1) | |
| X_idx += 1 | |
| for y_data in y_chunk: | |
| dataset_y[y_idx] = np.expand_dims(y_data, axis=-1) | |
| y_idx += 1 | |
| # Delete temporary files | |
| os.remove('tmp_dataset_X.pkl') | |
| os.remove('tmp_dataset_y.pkl') | |
| # Save the memory-mapped arrays | |
| with open('dataset_X.npy', 'wb') as dataset_x_file, open('dataset_y.npy', 'wb') as dataset_y_file: | |
| np.save(dataset_x_file, dataset_X, allow_pickle=False, fix_imports=True) | |
| np.save(dataset_y_file, dataset_y, allow_pickle=False, fix_imports=True) | |
| # Delete temporary files | |
| dataset_X._mmap.close() | |
| dataset_y._mmap.close() | |
| os.remove('tmp_dataset_X.npy') | |
| os.remove('tmp_dataset_y.npy') | |
| # Reload memory-mapped arrays | |
| dataset_X = np.load('dataset_X.npy', mmap_mode='r') | |
| dataset_y = np.load('dataset_y.npy', mmap_mode='r') | |
| # Create a train test split with memory mapped files | |
| X_train, X_test, y_train, y_test = train_test_split_memmapped(dataset_X, dataset_y) | |
| return X_train, X_test, y_train, y_test | |
| def train_test_split_memmapped(dataset_X, dataset_y, test_size=0.2, random_state=42): | |
| """ | |
| Create memory-mapped files for train and test datasets. | |
| Parameters: | |
| dataset_X (np.memmap): X part of the complete dataset | |
| dataset_y (np.memmap): y part of the complete dataset | |
| test_size (float): Propotion of the dataset used for the test split (default 0.2) | |
| random_state (int): Random state used for repeatability (default 42) | |
| Returns: | |
| X_train (np.memmap): Memory-mapped NumPy array of the X training data | |
| X_test (np.memmap): Memory-mapped NumPy array of the X test data | |
| y_train (np.memmap): Memory-mapped NumPy array of the y training data | |
| y_test (np.memmap): Memory-mapped NumPy array of the y test data | |
| """ | |
| delete_temporary_files() | |
| # Split data into training and test sets | |
| idxs = np.arange(dataset_X.shape[0]) | |
| train_idx, test_idx = train_test_split(idxs, test_size=test_size, random_state=random_state) | |
| # Create memory-mapped files for train and test sets | |
| X_train = np.memmap('tmp_X_train.npy', dtype=dataset_X.dtype, mode='w+', shape=(len(train_idx), dataset_X.shape[1], 1)) | |
| y_train = np.memmap('tmp_y_train.npy', dtype=dataset_y.dtype, mode='w+', shape=(len(train_idx), dataset_y.shape[1])) | |
| X_test = np.memmap('tmp_X_test.npy', dtype=dataset_X.dtype, mode='w+', shape=(len(test_idx), dataset_X.shape[1], 1)) | |
| y_test = np.memmap('tmp_y_test.npy', dtype=dataset_y.dtype, mode='w+', shape=(len(test_idx), dataset_y.shape[1])) | |
| # Assign values to the train and test memmap arrays | |
| X_train[:] = dataset_X[train_idx] | |
| y_train[:] = dataset_y[train_idx] | |
| X_test[:] = dataset_X[test_idx] | |
| y_test[:] = dataset_y[test_idx] | |
| # Save the memory-mapped arrays | |
| with open('X_train.npy', 'wb') as X_train_file, open('y_train.npy', 'wb') as y_train_file, open('X_test.npy', 'wb') as X_test_file, open('y_test.npy', 'wb') as y_test_file: | |
| np.save(X_train_file, X_train, allow_pickle=False, fix_imports=True) | |
| np.save(y_train_file, y_train, allow_pickle=False, fix_imports=True) | |
| np.save(X_test_file, X_test, allow_pickle=False, fix_imports=True) | |
| np.save(y_test_file, y_test, allow_pickle=False, fix_imports=True) | |
| X_train._mmap.close() | |
| y_train._mmap.close() | |
| X_test._mmap.close() | |
| y_test._mmap.close() | |
| # Delete temporary files | |
| os.remove('tmp_X_train.npy') | |
| os.remove('tmp_y_train.npy') | |
| os.remove('tmp_X_test.npy') | |
| os.remove('tmp_y_test.npy') | |
| X_train = np.load('X_train.npy', mmap_mode='r') | |
| y_train = np.load('y_train.npy', mmap_mode='r') | |
| X_test = np.load('X_test.npy', mmap_mode='r') | |
| y_test = np.load('y_test.npy', mmap_mode='r') | |
| return X_train, X_test, y_train, y_test |