haneulpark commited on
Commit
30a12b9
·
verified ·
1 Parent(s): 71a11f4

Delete Molecule3D preprocessing script.py

Browse files
Files changed (1) hide show
  1. Molecule3D preprocessing script.py +0 -257
Molecule3D preprocessing script.py DELETED
@@ -1,257 +0,0 @@
1
- # This is a script for Mole3D dataset preprocessing
2
-
3
- # 1. Load modules
4
-
5
- pip install rdkit
6
- pip install molvs
7
-
8
- import pandas as pd
9
- import numpy as np
10
- import urllib.request
11
- import tqdm
12
- import rdkit
13
- from rdkit import Chem
14
- import os
15
- import molvs
16
- import csv
17
- import json
18
-
19
- standardizer = molvs.Standardizer()
20
- fragment_remover = molvs.fragment.FragmentRemover()
21
-
22
-
23
- # 2. Download the original dataset
24
-
25
- # Original data
26
- # Molecule3D: A Benchmark for Predicting 3D Geometries from Molecular Graphs
27
- # Zhao Xu, Youzhi Luo, Xuan Zhang, Xinyi Xu, Yaochen Xie, Meng Liu, Kaleb Dickerson, Cheng Deng, Maho Nakata, Shuiwang Ji
28
-
29
- # Please download the sdf files from the link below:
30
- # https://drive.google.com/drive/u/2/folders/1y-EyoDYMvWZwClc2uvXrM4_hQBtM85BI
31
-
32
- # And please save the files to your local directory
33
-
34
-
35
- # 3. This part adds SMILES in addition to SDF and save CSV files
36
-
37
- # List of file ranges and corresponding SDF/CSV filenames
38
- file_ranges = [
39
- (0, 1000000),
40
- (1000001, 2000000),
41
- (2000001, 3000000),
42
- (3000001, 3899647)
43
- ]
44
-
45
- # Base directory for input and output files
46
- base_dir = '/YOUR LOCAL DIRECTORY/' # Please change this part
47
-
48
- for start, end in file_ranges:
49
- sdf_file = os.path.join(base_dir, f'combined_mols_{start}_to_{end}.sdf')
50
- output_csv = os.path.join(base_dir, f'smiles_{start}_{end}.csv')
51
-
52
- # Read the SDF file
53
- suppl = Chem.SDMolSupplier(sdf_file)
54
-
55
- # Write to CSV file with SMILES
56
- with open(output_csv, mode='w', newline='') as file:
57
- writer = csv.writer(file)
58
- writer.writerow(['index', 'SMILES'])
59
-
60
- for idx, mol in enumerate(suppl):
61
- if mol is None:
62
- continue
63
-
64
- smiles = Chem.MolToSmiles(mol)
65
- writer.writerow([f'{idx + start + 1}', smiles])
66
-
67
-
68
- ''' These files would be stored:
69
- smiles_sdf_0_1000000.csv
70
- smiles_sdf_1000001_2000000.csv
71
- smiles_sdf_2000001_3000000.csv
72
- smiles_sdf_3000001_3899647.csv'''
73
-
74
-
75
- # 4. Check if there are any missing SMILES or sdf
76
-
77
- df1 = pd.read_csv(f'{base_dir}/smiles_sdf_0_1000000.csv') # Suppose that you have already change the 'base_dir' above
78
- df2 = pd.read_csv(f'{base_dir}/smiles_sdf_1000001_2000000.csv')
79
- df3 = pd.read_csv(f'{base_dir}/smiles_sdf_2000001_3000000.csv')
80
- df4 = pd.read_csv(f'{base_dir}/smiles_sdf_3000001_3899647.csv')
81
-
82
- missing_1 = df1[df1.isna().any(axis = 1)]
83
- missing_2 = df2[df2.isna().any(axis = 1)]
84
- missing_3 = df3[df3.isna().any(axis = 1)]
85
- missing_4 = df4[df4.isna().any(axis = 1)]
86
-
87
- print('For smiles_sdf_0_1000000.csv file : ', missing_1)
88
- print('For smiles_sdf_1000001_2000000.csv file : ', missing_2)
89
- print('For smiles_sdf_2000001_3000000.csv file : ', missing_3)
90
- print('For smiles_sdf_3000001_3899647.csv file : ', missing_4)
91
-
92
-
93
-
94
- # 5. Sanitize the molecules with MolVS
95
-
96
- # This part would take a few hours
97
-
98
- df1['X'] = [ \
99
- rdkit.Chem.MolToSmiles(
100
- fragment_remover.remove(
101
- standardizer.standardize(
102
- rdkit.Chem.MolFromSmiles(
103
- smiles))))
104
- for smiles in df1['SMILES']]
105
-
106
- problems = []
107
- for index, row in tqdm.tqdm(df1.iterrows()):
108
- result = molvs.validate_smiles(row['X'])
109
- if len(result) == 0:
110
- continue
111
- problems.append( (row['X'], result) )
112
-
113
- # Most are because it includes the salt form and/or it is not neutralized
114
- for result, alert in problems:
115
- print(f"SMILES: {result}, problem: {alert[0]}")
116
-
117
- df1.to_csv('smiles_sdf_0_1000000_sanitized.csv')
118
-
119
-
120
- ###
121
- df2['X'] = [ \
122
- rdkit.Chem.MolToSmiles(
123
- fragment_remover.remove(
124
- standardizer.standardize(
125
- rdkit.Chem.MolFromSmiles(
126
- smiles))))
127
- for smiles in df2['SMILES']]
128
-
129
- problems = []
130
- for index, row in tqdm.tqdm(df2.iterrows()):
131
- result = molvs.validate_smiles(row['X'])
132
- if len(result) == 0:
133
- continue
134
- problems.append( (row['X'], result) )
135
-
136
- # Most are because it includes the salt form and/or it is not neutralized
137
- for result, alert in problems:
138
- print(f"SMILES: {result}, problem: {alert[0]}")
139
-
140
- df2.to_csv('smiles_sdf_1000001_2000000_sanitized.csv')
141
-
142
-
143
- ###
144
- df3['X'] = [ \
145
- rdkit.Chem.MolToSmiles(
146
- fragment_remover.remove(
147
- standardizer.standardize(
148
- rdkit.Chem.MolFromSmiles(
149
- smiles))))
150
- for smiles in df3['SMILES']]
151
-
152
- problems = []
153
- for index, row in tqdm.tqdm(df3.iterrows()):
154
- result = molvs.validate_smiles(row['X'])
155
- if len(result) == 0:
156
- continue
157
- problems.append( (row['X'], result) )
158
-
159
- # Most are because it includes the salt form and/or it is not neutralized
160
- for result, alert in problems:
161
- print(f"SMILES: {result}, problem: {alert[0]}")
162
-
163
- df3.to_csv('smiles_sdf_2000001_3000000_sanitized.csv')
164
-
165
-
166
- ###
167
- df4['X'] = [ \
168
- rdkit.Chem.MolToSmiles(
169
- fragment_remover.remove(
170
- standardizer.standardize(
171
- rdkit.Chem.MolFromSmiles(
172
- smiles))))
173
- for smiles in df4['SMILES']]
174
-
175
- problems = []
176
- for index, row in tqdm.tqdm(df4.iterrows()):
177
- result = molvs.validate_smiles(row['X'])
178
- if len(result) == 0:
179
- continue
180
- problems.append( (row['X'], result) )
181
-
182
- # Most are because it includes the salt form and/or it is not neutralized
183
- for result, alert in problems:
184
- print(f"SMILES: {result}, problem: {alert[0]}")
185
-
186
- df4.to_csv('smiles_sdf_3000001_3899647_sanitized.csv')
187
-
188
-
189
- # 6. Concatenate four sanitized files to one long file
190
-
191
- sanitized1 = pd.read_csv('smiles_sdf_0_1000000_sanitized.csv')
192
- sanitized2 = pd.read_csv('smiles_sdf_1000001_2000000_sanitized.csv')
193
- sanitized3 = pd.read_csv('smiles_sdf_2000001_3000000_sanitized.csv')
194
- sanitized4 = pd.read_csv('smiles_sdf_3000001_3899647_sanitized.csv')
195
-
196
- smiles_sdf_concatenated = pd.concat([sanitized1, sanitized2, sanitized3, sanitized4], ignore_index=True)
197
-
198
- smiles_sdf_concatenated.to_csv('smiles_sdf_concatenated.csv', index = False)
199
-
200
-
201
- # 7. Combine the properties file to the smiles_sdf_concatenated.csv
202
-
203
- smiles_sdf_concatenated = pd.read_csv('smiles_sdf_concatenated.csv')
204
-
205
- properties = pd.read_csv('properties.csv') # This file is also from the link provided above
206
-
207
- smiles_sdf_properties_concatenated = pd.concat([smiles_sdf_concatenated, properties], axis=1)
208
-
209
- smiles_sdf_properties_concatenated.to_csv('smiles_sdf_properties.csv', index = False)
210
-
211
-
212
- # 8. Rename the columns
213
-
214
- columns_selected = smiles_sdf_properties_concatenated[['Unnamed: 0', 'X', 'sdf', 'cid', 'dipole x', 'dipole y', 'dipole z', 'homo', 'lumo', 'homolumogap', 'scf energy']]
215
- columns_selected.rename(columns={'Unnamed: 0': 'index', 'X': 'SMILES'}, inplace=True)
216
-
217
- columns_selected.to_csv('Molecule3D_final.csv', index=False)
218
-
219
-
220
- # 9. Split the dataset by using radom split and scaffold split
221
-
222
- # Random split
223
-
224
- Molecule3D_final = pd.read_csv('Molecule3D_final.csv')
225
-
226
- with open('random_split_inds.json', 'r') as f: # random or scaffold
227
- split_data = json.load(f)
228
-
229
- train_data = Molecule3D_final[Molecule3D_final['index'].isin(split_data['train'])]
230
- test_data = Molecule3D_final[Molecule3D_final['index'].isin(split_data['test'])]
231
- valid_data = Molecule3D_final[Molecule3D_final['index'].isin(split_data['valid'])]
232
-
233
- train_data.to_csv('Molecule3D_random_train.csv', index=False)
234
- test_data.to_csv('Molecule3D_random_test.csv', index=False)
235
- valid_data.to_csv('Molecule3D_random_validation.csv', index=False)
236
-
237
- print(f"Length of the train dataset: {len(train_data)} rows")
238
- print(f"Length of the test dataset: {len(test_data)} rows")
239
- print(f"Length of the valid dataset: {len(valid_data)} rows")
240
-
241
-
242
- # Scaffold split
243
-
244
- with open('scaffold_split_inds.json', 'r') as f: # random or scaffold
245
- split_scaffold = json.load(f)
246
-
247
- scaffold_train = Molecule3D_final[Molecule3D_final['index'].isin(split_scaffold['train'])]
248
- scaffold_test = Molecule3D_final[Molecule3D_final['index'].isin(split_scaffold['test'])]
249
- scaffold_valid = Molecule3D_final[Molecule3D_final['index'].isin(split_scaffold['valid'])]
250
-
251
- scaffold_train.to_csv('Molecule3D_scaffold_train.csv', index=False)
252
- scaffold_test.to_csv('Molecule3D_scaffold_test.csv', index=False)
253
- scaffold_valid.to_csv('Molecule3D_scaffold_validation.csv', index=False)
254
-
255
- print(f"Length of the train dataset: {len(scaffold_train)} rows")
256
- print(f"Length of the test dataset: {len(scaffold_test)} rows")
257
- print(f"Length of the valid dataset: {len(scaffold_valid)} rows")