MegaScale / src /02.1_gather_ThermoMPNN_splits.py
maom's picture
update readme
4d350b8
import pandas
import pyarrow.parquet
import pickle
# Gabe requested that the splits defined in ThermoMPNN of the MegaScale dataset be the default splits
# ThermoMPNN/datsets.py
# class MegaScaleDataset
# def __init__(csg, split):
# fname = self.cfg.data_loc.megascale_csv
# df = pd.read_csv(fname, usecols=["ddG_ML", "mut_type", "WT_name", "aa_seq", "dG_ML"])
#
# # remove unreliable data and more complicated mutations
# df = df.loc[df.ddG_ML != '-', :].reset_index(drop=True)
# df = df.loc[
# ~df.mut_type.str.contains("ins") &
# ~df.mut_type.str.contains("del") &
# ~df.mut_type.str.contains(":"), :].reset_index(drop=True)
#
# splits = <load from self.cfg.data_loc.megascale_splits>
#
# if self.split != 'all' and (cfg.reduce != 'prot' or self.split != 'train'):
# self.wt_names = splits[split]
#
#
#
# local.yaml
# data_loc:
# megascale_csv: "<truncated>/Processed_K50_dG_datasets/Tsuboyama2023_Dataset2_Dataset3_20230416.csv"
with open("data/ThermoMPNN/dataset_splits/mega_splits.pkl", "rb") as f:
mega_splits = pickle.load(f)
splits = []
for split_name, split_ids in mega_splits.items():
splits.append(
pandas.DataFrame({
'split_name': split_name,
'id': split_ids}))
splits = pandas.concat(splits)
splits.reset_index(drop=True, inplace=True)
pyarrow.parquet.write_table(
pyarrow.Table.from_pandas(splits),
where = "intermediate/ThermoMPNN_splits.parquet")
parquet_file = pyarrow.parquet.ParquetFile('intermediate/ThermoMPNN_splits.parquet')
parquet_file.metadata
# <pyarrow._parquet.FileMetaData object at 0x149f5d2667a0>
# created_by: parquet-cpp-arrow version 17.0.0
# num_columns: 2
# num_rows: 2020
# num_row_groups: 1
# format_version: 2.6
# serialized_size: 1881