File size: 1,869 Bytes
1593542 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 |
import pandas
import pyarrow.parquet
import pickle
# Gabe requested that the splits defined in ThermoMPNN of the MegaScale dataset be the default splits
# ThermoMPNN/datsets.py
# class MegaScaleDataset
# def __init__(csg, split):
# fname = self.cfg.data_loc.megascale_csv
# df = pd.read_csv(fname, usecols=["ddG_ML", "mut_type", "WT_name", "aa_seq", "dG_ML"])
#
# # remove unreliable data and more complicated mutations
# df = df.loc[df.ddG_ML != '-', :].reset_index(drop=True)
# df = df.loc[
# ~df.mut_type.str.contains("ins") &
# ~df.mut_type.str.contains("del") &
# ~df.mut_type.str.contains(":"), :].reset_index(drop=True)
#
# splits = <load from self.cfg.data_loc.megascale_splits>
#
# if self.split != 'all' and (cfg.reduce != 'prot' or self.split != 'train'):
# self.wt_names = splits[split]
#
#
#
# local.yaml
# data_loc:
# megascale_csv: "<truncated>/Processed_K50_dG_datasets/Tsuboyama2023_Dataset2_Dataset3_20230416.csv"
with open("data/ThermoMPNN/dataset_splits/mega_splits.pkl", "rb") as f:
mega_splits = pickle.load(f)
splits = []
for split_name, split_ids in mega_splits.items():
splits.append(
pandas.DataFrame({
'split_name': split_name,
'id': split_ids}))
splits = pandas.concat(splits)
splits.reset_index(drop=True, inplace=True)
pyarrow.parquet.write_table(
pyarrow.Table.from_pandas(splits),
where = "intermediate/ThermoMPNN_splits.parquet")
parquet_file = pyarrow.parquet.ParquetFile('intermediate/ThermoMPNN_splits.parquet')
parquet_file.metadata
# <pyarrow._parquet.FileMetaData object at 0x149f5d2667a0>
# created_by: parquet-cpp-arrow version 17.0.0
# num_columns: 2
# num_rows: 2020
# num_row_groups: 1
# format_version: 2.6
# serialized_size: 1881
|