Migrated from GitHub
Browse files- .gitattributes +3 -0
- data/LICENSE +21 -0
- data/Model/GCN/callbacks.py +39 -0
- data/Model/GCN/mol2graph.py +291 -0
- data/Model/GCN/network.py +44 -0
- data/Model/GCN/utils.py +65 -0
- data/Model/QSAR/qsar_AKT1_optimized.pkl +3 -0
- data/Model/QSAR/qsar_CXCR4_optimized.pkl +3 -0
- data/Model/QSAR/qsar_DRD2_optimized.pkl +3 -0
- data/Model/Transformer/model.py +201 -0
- data/Utils/reward.py +112 -0
- data/Utils/utils.py +297 -0
- data/ckpts/GCN/GCN.pth +3 -0
- data/config/config.py +104 -0
- data/data/QSAR/AKT1/akt1_test.csv +0 -0
- data/data/QSAR/AKT1/akt1_train.csv +0 -0
- data/data/QSAR/CXCR4/cxcr4_test.csv +428 -0
- data/data/QSAR/CXCR4/cxcr4_train.csv +0 -0
- data/data/QSAR/DRD2/drd2_test.csv +0 -0
- data/data/QSAR/DRD2/drd2_train.csv +3 -0
- data/data/USPTO/src_test.txt +0 -0
- data/data/USPTO/src_train.txt +3 -0
- data/data/USPTO/src_valid.txt +0 -0
- data/data/USPTO/tgt_test.txt +0 -0
- data/data/USPTO/tgt_train.txt +3 -0
- data/data/USPTO/tgt_valid.txt +0 -0
- data/data/beamsearch_template_list.txt +575 -0
- data/data/input/init_smiles_akt1.txt +5 -0
- data/data/input/init_smiles_cxcr4.txt +5 -0
- data/data/input/init_smiles_drd2.txt +5 -0
- data/data/input/test.txt +1 -0
- data/data/input/unseen_ZINC_AKT1.txt +1 -0
- data/data/input/unseen_ZINC_CXCR4.txt +1 -0
- data/data/input/unseen_ZINC_DRD2.txt +1 -0
- data/data/label_template.json +0 -0
- data/env.yml +24 -0
- data/scripts/beam_search.py +300 -0
- data/scripts/gcn_train.py +161 -0
- data/scripts/mcts.py +395 -0
- data/scripts/preprocess.py +148 -0
- data/scripts/transformer_train.py +167 -0
- data/scripts/translate.py +193 -0
- data/set_up.sh +9 -0
- data/translation/out_beam10_best10.txt +91 -0
- data/translation/viewer.ipynb +0 -0
.gitattributes
CHANGED
@@ -57,3 +57,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
57 |
# Video files - compressed
|
58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
57 |
# Video files - compressed
|
58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
60 |
+
data/data/QSAR/DRD2/drd2_train.csv filter=lfs diff=lfs merge=lfs -text
|
61 |
+
data/data/USPTO/src_train.txt filter=lfs diff=lfs merge=lfs -text
|
62 |
+
data/data/USPTO/tgt_train.txt filter=lfs diff=lfs merge=lfs -text
|
data/LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2024 Sekijima Laboratory, Tokyo Institute of Technology
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
data/Model/GCN/callbacks.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import numpy as np
|
3 |
+
|
4 |
+
|
5 |
+
class EarlyStopping:
|
6 |
+
|
7 |
+
def __init__(self, patience=5, verbose=False, path='checkpoint_model.pth'):
|
8 |
+
|
9 |
+
self.patience = patience # stop cpunter
|
10 |
+
self.verbose = verbose
|
11 |
+
self.counter = 0 # current counter
|
12 |
+
self.best_score = None # best score
|
13 |
+
self.early_stop = False # stop flag
|
14 |
+
self.val_loss_min = np.Inf # to memorize previous best score
|
15 |
+
self.path = path # path to save the best model
|
16 |
+
|
17 |
+
def __call__(self, val_loss, model):
|
18 |
+
|
19 |
+
score = -val_loss
|
20 |
+
|
21 |
+
if self.best_score is None: #1Epoch
|
22 |
+
self.best_score = score
|
23 |
+
self.checkpoint(val_loss, model) # save model and show score
|
24 |
+
elif score < self.best_score: # if it can not update best score
|
25 |
+
self.counter += 1 # stop counter +1
|
26 |
+
if self.verbose:
|
27 |
+
print(f'EarlyStopping counter: {self.counter} out of {self.patience}')
|
28 |
+
if self.counter >= self.patience:
|
29 |
+
self.early_stop = True
|
30 |
+
else: # if it update best score
|
31 |
+
self.best_score = score
|
32 |
+
self.checkpoint(val_loss, model) # save model and show score
|
33 |
+
self.counter = 0 # stop counter is reset
|
34 |
+
|
35 |
+
def checkpoint(self, val_loss, model):
|
36 |
+
if self.verbose:
|
37 |
+
print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...')
|
38 |
+
torch.save(model.state_dict(), self.path)
|
39 |
+
self.val_loss_min = val_loss
|
data/Model/GCN/mol2graph.py
ADDED
@@ -0,0 +1,291 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from rdkit import Chem
|
3 |
+
import torch
|
4 |
+
from torch_geometric.data import Data
|
5 |
+
|
6 |
+
|
7 |
+
def one_of_k_encoding(x, allowable_set):
|
8 |
+
"""
|
9 |
+
Encodes elements of a provided set as integers.
|
10 |
+
Parameters
|
11 |
+
----------
|
12 |
+
x: object
|
13 |
+
Must be present in `allowable_set`.
|
14 |
+
allowable_set: list
|
15 |
+
List of allowable quantities.
|
16 |
+
Example
|
17 |
+
-------
|
18 |
+
>>> import deepchem as dc
|
19 |
+
>>> dc.feat.graph_features.one_of_k_encoding("a", ["a", "b", "c"])
|
20 |
+
[True, False, False]
|
21 |
+
Raises
|
22 |
+
------
|
23 |
+
`ValueError` if `x` is not in `allowable_set`.
|
24 |
+
"""
|
25 |
+
if x not in allowable_set:
|
26 |
+
raise Exception(f"input {x} not in allowable set{allowable_set}:")
|
27 |
+
return list(map(lambda s: x == s, allowable_set))
|
28 |
+
# map(適用する関数, 適用するリスト)
|
29 |
+
# allowable_setの中でxが該当する位置にだけ1が入ったone_hotベクトルをreturn
|
30 |
+
|
31 |
+
|
32 |
+
def one_of_k_encoding_unk(x, allowable_set):
|
33 |
+
"""
|
34 |
+
Maps inputs not in the allowable set to the last element.
|
35 |
+
Unlike `one_of_k_encoding`, if `x` is not in `allowable_set`, this method
|
36 |
+
pretends that `x` is the last element of `allowable_set`.
|
37 |
+
Parameters
|
38 |
+
----------
|
39 |
+
x: object
|
40 |
+
Must be present in `allowable_set`.
|
41 |
+
allowable_set: list
|
42 |
+
List of allowable quantities.
|
43 |
+
Examples
|
44 |
+
--------
|
45 |
+
>>> dc.feat.graph_features.one_of_k_encoding_unk("s", ["a", "b", "c"])
|
46 |
+
[False, False, True]
|
47 |
+
"""
|
48 |
+
if x not in allowable_set:
|
49 |
+
x = allowable_set[-1]
|
50 |
+
return list(map(lambda s: x == s, allowable_set))
|
51 |
+
# one_of_k_encodingする際、allowable_setの最後にunknownを追加して使う
|
52 |
+
|
53 |
+
|
54 |
+
def get_intervals(l):
|
55 |
+
"""For list of lists, gets the cumulative products of the lengths"""
|
56 |
+
intervals = len(l) * [0] # [0, 0, ... , 0]
|
57 |
+
intervals[0] = 1 # Initalize with 1
|
58 |
+
for k in range(1, len(l)):
|
59 |
+
intervals[k] = (len(l[k]) + 1) * intervals[k - 1]
|
60 |
+
return intervals
|
61 |
+
|
62 |
+
|
63 |
+
def safe_index(l, e):
|
64 |
+
"""Gets the index of e in l, providing an index of len(l) if not found"""
|
65 |
+
try:
|
66 |
+
return l.index(e)
|
67 |
+
except:
|
68 |
+
return len(l)
|
69 |
+
|
70 |
+
|
71 |
+
class GraphConvConstants(object):
|
72 |
+
"""This class defines a collection of constants which are useful for graph convolutions on molecules."""
|
73 |
+
possible_atom_list = [
|
74 |
+
'C', 'N', 'O', 'S', 'F', 'P', 'Cl', 'Mg', 'Na', 'Br', 'Fe', 'Ca', 'Cu','Mc', 'Pd', 'Pb', 'K', 'I', 'Al', 'Ni', 'Mn'
|
75 |
+
]
|
76 |
+
"""Allowed Numbers of Hydrogens"""
|
77 |
+
possible_numH_list = [0, 1, 2, 3, 4]
|
78 |
+
"""Allowed Valences for Atoms"""
|
79 |
+
possible_valence_list = [0, 1, 2, 3, 4, 5, 6]
|
80 |
+
"""Allowed Formal Charges for Atoms"""
|
81 |
+
possible_formal_charge_list = [-3, -2, -1, 0, 1, 2, 3]
|
82 |
+
"""This is a placeholder for documentation. These will be replaced with corresponding values of the rdkit HybridizationType"""
|
83 |
+
possible_hybridization_list = ["SP", "SP2", "SP3", "SP3D", "SP3D2"]
|
84 |
+
"""Allowed number of radical electrons."""
|
85 |
+
possible_number_radical_e_list = [0, 1, 2]
|
86 |
+
"""Allowed types of Chirality"""
|
87 |
+
possible_chirality_list = ['R', 'S']
|
88 |
+
"""The set of all values allowed."""
|
89 |
+
reference_lists = [
|
90 |
+
possible_atom_list, possible_numH_list, possible_valence_list,
|
91 |
+
possible_formal_charge_list, possible_number_radical_e_list,
|
92 |
+
possible_hybridization_list, possible_chirality_list
|
93 |
+
]
|
94 |
+
"""The number of different values that can be taken. See `get_intervals()`"""
|
95 |
+
intervals = get_intervals(reference_lists)
|
96 |
+
"""Possible stereochemistry. We use E-Z notation for stereochemistry
|
97 |
+
https://en.wikipedia.org/wiki/E%E2%80%93Z_notation"""
|
98 |
+
possible_bond_stereo = ["STEREONONE", "STEREOANY", "STEREOZ", "STEREOE"]
|
99 |
+
"""Number of different bond types not counting stereochemistry."""
|
100 |
+
bond_fdim_base = 6
|
101 |
+
|
102 |
+
|
103 |
+
def get_feature_list(atom):
|
104 |
+
possible_atom_list = GraphConvConstants.possible_atom_list
|
105 |
+
possible_numH_list = GraphConvConstants.possible_numH_list
|
106 |
+
possible_valence_list = GraphConvConstants.possible_valence_list
|
107 |
+
possible_formal_charge_list = GraphConvConstants.possible_formal_charge_list
|
108 |
+
possible_number_radical_e_list = GraphConvConstants.possible_number_radical_e_list
|
109 |
+
possible_hybridization_list = GraphConvConstants.possible_hybridization_list
|
110 |
+
# Replace the hybridization
|
111 |
+
from rdkit import Chem
|
112 |
+
#global possible_hybridization_list
|
113 |
+
possible_hybridization_list = [
|
114 |
+
Chem.rdchem.HybridizationType.SP, Chem.rdchem.HybridizationType.SP2,
|
115 |
+
Chem.rdchem.HybridizationType.SP3, Chem.rdchem.HybridizationType.SP3D,
|
116 |
+
Chem.rdchem.HybridizationType.SP3D2
|
117 |
+
]
|
118 |
+
|
119 |
+
# atom featuresを6種類定義し、feature vectorを作る操作
|
120 |
+
features = 6 * [0]
|
121 |
+
features[0] = safe_index(possible_atom_list, atom.GetSymbol())
|
122 |
+
features[1] = safe_index(possible_numH_list, atom.GetTotalNumHs())
|
123 |
+
features[2] = safe_index(possible_valence_list, atom.GetImplicitValence())
|
124 |
+
features[3] = safe_index(possible_formal_charge_list, atom.GetFormalCharge())
|
125 |
+
features[4] = safe_index(possible_number_radical_e_list,
|
126 |
+
atom.GetNumRadicalElectrons())
|
127 |
+
features[5] = safe_index(possible_hybridization_list, atom.GetHybridization())
|
128 |
+
return features
|
129 |
+
|
130 |
+
|
131 |
+
def features_to_id(features, intervals):
|
132 |
+
"""Convert list of features into index using spacings provided in intervals"""
|
133 |
+
id = 0
|
134 |
+
for k in range(len(intervals-1)):
|
135 |
+
id += features[k] * intervals[k]
|
136 |
+
# Allow 0 index to correspond to null molecule 1
|
137 |
+
id = id + 1
|
138 |
+
return id
|
139 |
+
|
140 |
+
|
141 |
+
def id_to_features(id, intervals):
|
142 |
+
features = 6 * [0]
|
143 |
+
# Correct for null
|
144 |
+
id -= 1
|
145 |
+
for k in range(0, 6 - 1):
|
146 |
+
# print(6-k-1, id)
|
147 |
+
features[6 - k - 1] = id // intervals[6 - k - 1]
|
148 |
+
id -= features[6 - k - 1] * intervals[6 - k - 1]
|
149 |
+
# Correct for last one
|
150 |
+
features[0] = id
|
151 |
+
return features
|
152 |
+
|
153 |
+
|
154 |
+
def atom_to_id(atom):
|
155 |
+
"""Return a unique id corresponding to the atom type"""
|
156 |
+
features = get_feature_list(atom)
|
157 |
+
return features_to_id(features, intervals)
|
158 |
+
|
159 |
+
|
160 |
+
def atom_features(atom, bool_id_feat=False, explicit_H=False,use_chirality=False):
|
161 |
+
if bool_id_feat:
|
162 |
+
return np.array([atom_to_id(atom)])
|
163 |
+
else:
|
164 |
+
# concatnate all atom features
|
165 |
+
results_ = one_of_k_encoding_unk(
|
166 |
+
atom.GetSymbol(),
|
167 |
+
[
|
168 |
+
'C',
|
169 |
+
'N',
|
170 |
+
'O',
|
171 |
+
'S',
|
172 |
+
'F',
|
173 |
+
'Si',
|
174 |
+
'P',
|
175 |
+
'Cl',
|
176 |
+
'Br',
|
177 |
+
'Mg',
|
178 |
+
'Na',
|
179 |
+
'Ca',
|
180 |
+
'Fe',
|
181 |
+
'As',
|
182 |
+
'Al',
|
183 |
+
'I',
|
184 |
+
'B',
|
185 |
+
'V',
|
186 |
+
'K',
|
187 |
+
'Tl',
|
188 |
+
'Yb',
|
189 |
+
'Sb',
|
190 |
+
'Sn',
|
191 |
+
'Ag',
|
192 |
+
'Pd',
|
193 |
+
'Co',
|
194 |
+
'Se',
|
195 |
+
'Ti',
|
196 |
+
'Zn',
|
197 |
+
'H',
|
198 |
+
'Li',
|
199 |
+
'Ge',
|
200 |
+
'Cu',
|
201 |
+
'Au',
|
202 |
+
'Ni',
|
203 |
+
'Cd',
|
204 |
+
'In',
|
205 |
+
'Mn',
|
206 |
+
'Zr',
|
207 |
+
'Cr',
|
208 |
+
'Pt',
|
209 |
+
'Hg',
|
210 |
+
'Pb',
|
211 |
+
'Unknown'
|
212 |
+
] # allowable set
|
213 |
+
)
|
214 |
+
results=results_ + \
|
215 |
+
one_of_k_encoding(
|
216 |
+
atom.GetDegree(),
|
217 |
+
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
|
218 |
+
) + \
|
219 |
+
one_of_k_encoding_unk(
|
220 |
+
atom.GetImplicitValence(),
|
221 |
+
[0, 1, 2, 3, 4, 5, 6]
|
222 |
+
) + \
|
223 |
+
[
|
224 |
+
atom.GetFormalCharge(), atom.GetNumRadicalElectrons()
|
225 |
+
] + \
|
226 |
+
one_of_k_encoding_unk(
|
227 |
+
atom.GetHybridization().name,
|
228 |
+
[
|
229 |
+
Chem.rdchem.HybridizationType.SP.name,
|
230 |
+
Chem.rdchem.HybridizationType.SP2.name,
|
231 |
+
Chem.rdchem.HybridizationType.SP3.name,
|
232 |
+
Chem.rdchem.HybridizationType.SP3D.name,
|
233 |
+
Chem.rdchem.HybridizationType.SP3D2.name
|
234 |
+
]
|
235 |
+
) + \
|
236 |
+
[atom.GetIsAromatic()]
|
237 |
+
# In case of explicit hydrogen(QM8, QM9), avoid calling `GetTotalNumHs`
|
238 |
+
if not explicit_H:
|
239 |
+
results = results + one_of_k_encoding_unk(
|
240 |
+
atom.GetTotalNumHs(),
|
241 |
+
[0, 1, 2, 3, 4]
|
242 |
+
)
|
243 |
+
if use_chirality:
|
244 |
+
try:
|
245 |
+
results = results + one_of_k_encoding_unk(
|
246 |
+
atom.GetProp('_CIPCode'),
|
247 |
+
['R', 'S']) + [atom.HasProp('_ChiralityPossible')]
|
248 |
+
except:
|
249 |
+
results = results + [False, False] + [atom.HasProp('_ChiralityPossible')]
|
250 |
+
|
251 |
+
return results
|
252 |
+
|
253 |
+
|
254 |
+
def bond_features(bond, use_chirality=False):
|
255 |
+
from rdkit import Chem
|
256 |
+
bt = bond.GetBondType()
|
257 |
+
bond_feats = [
|
258 |
+
bt == Chem.rdchem.BondType.SINGLE, bt == Chem.rdchem.BondType.DOUBLE,
|
259 |
+
bt == Chem.rdchem.BondType.TRIPLE, bt == Chem.rdchem.BondType.AROMATIC,
|
260 |
+
bond.GetIsConjugated(),
|
261 |
+
bond.IsInRing()
|
262 |
+
] # if C-C single bond in cyclopropane: [1, 0, 0, 0, 0, 1]
|
263 |
+
if use_chirality:
|
264 |
+
bond_feats = bond_feats + one_of_k_encoding_unk(
|
265 |
+
str(bond.GetStereo()),
|
266 |
+
["STEREONONE", "STEREOANY", "STEREOZ", "STEREOE"]
|
267 |
+
)
|
268 |
+
return bond_feats
|
269 |
+
|
270 |
+
def get_bond_pair(mol):
|
271 |
+
bonds = mol.GetBonds()
|
272 |
+
res = [[],[]]
|
273 |
+
for bond in bonds:
|
274 |
+
res[0] += [bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()]
|
275 |
+
res[1] += [bond.GetEndAtomIdx(), bond.GetBeginAtomIdx()]
|
276 |
+
return res
|
277 |
+
|
278 |
+
def mol2vec(mol):
|
279 |
+
atoms = mol.GetAtoms()
|
280 |
+
bonds = mol.GetBonds()
|
281 |
+
node_f= [atom_features(atom) for atom in atoms]
|
282 |
+
edge_index = get_bond_pair(mol)
|
283 |
+
edge_attr = [bond_features(bond, use_chirality=False) for bond in bonds]
|
284 |
+
for bond in bonds:
|
285 |
+
edge_attr.append(bond_features(bond))
|
286 |
+
data = Data(
|
287 |
+
x=torch.tensor(node_f, dtype=torch.float), # shape [num_nodes, num_node_features] を持つ特徴行列
|
288 |
+
edge_index=torch.tensor(edge_index, dtype=torch.long), #shape [2, num_edges] と型 torch.long を持つ COO フォーマットによるグラフ連結度
|
289 |
+
edge_attr=torch.tensor(edge_attr,dtype=torch.float) # shape [num_edges, num_edge_features] によるエッジ特徴行列
|
290 |
+
)
|
291 |
+
return data
|
data/Model/GCN/network.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from torch_geometric.nn import GCNConv
|
2 |
+
from torch_geometric.nn import global_add_pool
|
3 |
+
import torch.nn.functional as F
|
4 |
+
from torch.nn import ModuleList, Linear, BatchNorm1d
|
5 |
+
import torch
|
6 |
+
|
7 |
+
|
8 |
+
class MolecularGCN(torch.nn.Module):
|
9 |
+
def __init__(self, dim, n_conv_hidden, n_mlp_hidden, dropout):
|
10 |
+
super(MolecularGCN, self).__init__()
|
11 |
+
self.n_features = 75 # This is the mol2graph.py-specific value
|
12 |
+
self.n_conv_hidden = n_conv_hidden
|
13 |
+
self.n_mlp_hidden = n_mlp_hidden
|
14 |
+
self.dim = dim
|
15 |
+
self.dropout = dropout
|
16 |
+
self.graphconv1 = GCNConv(self.n_features, self.dim, cached=False)
|
17 |
+
self.bn1 = BatchNorm1d(self.dim)
|
18 |
+
self.graphconv_hidden = ModuleList(
|
19 |
+
[GCNConv(self.dim, self.dim, cached=False) for _ in range(self.n_conv_hidden)]
|
20 |
+
)
|
21 |
+
self.bn_conv = ModuleList(
|
22 |
+
[BatchNorm1d(self.dim) for _ in range(self.n_conv_hidden)]
|
23 |
+
)
|
24 |
+
self.mlp_hidden = ModuleList(
|
25 |
+
[Linear(self.dim, self.dim) for _ in range(self.n_mlp_hidden)]
|
26 |
+
)
|
27 |
+
self.bn_mlp = ModuleList(
|
28 |
+
[BatchNorm1d(self.dim) for _ in range(self.n_mlp_hidden)]
|
29 |
+
)
|
30 |
+
self.mlp_out = Linear(self.dim, 1000) # classification of 1000 templates.
|
31 |
+
|
32 |
+
def forward(self, x, edge_index, batch, edge_weight=None):
|
33 |
+
x = F.relu(self.graphconv1(x, edge_index, edge_weight))
|
34 |
+
x = self.bn1(x)
|
35 |
+
for graphconv, bn_conv in zip(self.graphconv_hidden, self.bn_conv):
|
36 |
+
x = graphconv(x, edge_index, edge_weight)
|
37 |
+
x = bn_conv(x)
|
38 |
+
x = global_add_pool(x, batch)
|
39 |
+
for fc_mlp, bn_mlp in zip(self.mlp_hidden, self.bn_mlp):
|
40 |
+
x = F.relu(fc_mlp(x))
|
41 |
+
x = bn_mlp(x)
|
42 |
+
x = F.dropout(x, p=self.dropout, training=self.training)
|
43 |
+
x = F.log_softmax(self.mlp_out(x), dim=-1)
|
44 |
+
return x
|
data/Model/GCN/utils.py
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from rdkit import Chem
|
2 |
+
from rdkit.Chem import AllChem
|
3 |
+
import numpy as np
|
4 |
+
import torch
|
5 |
+
from Model.GCN import mol2graph
|
6 |
+
|
7 |
+
def get_data(data_path):
|
8 |
+
mols, labels = [], []
|
9 |
+
with open(data_path, 'r') as f:
|
10 |
+
smis = f.read().splitlines()
|
11 |
+
for smi in smis:
|
12 |
+
smi = smi.split(' ')
|
13 |
+
labels.append(int(smi[0].strip('[]')))
|
14 |
+
smi = smi[1:]
|
15 |
+
smi = ''.join(smi)
|
16 |
+
mols.append(Chem.MolFromSmiles(smi))
|
17 |
+
return np.array(mols), np.array(labels)
|
18 |
+
|
19 |
+
"""
|
20 |
+
get_neg_sample: select negative sample according to the frequent distribution of library.
|
21 |
+
Correct fragments(y) and fragments couldn't be connected to target(y_mask) are masked. """
|
22 |
+
@torch.no_grad()
|
23 |
+
def get_neg_sample(freq, y):
|
24 |
+
# y: (batch_size, )
|
25 |
+
# freq: (1, ), frequency of templates
|
26 |
+
batch_size = y.size(0)
|
27 |
+
freq = freq.repeat(batch_size, 1)
|
28 |
+
freq.scatter_(1, y.unsqueeze(1), 0)
|
29 |
+
neg_idxs = torch.multinomial(freq, 1, True).view(-1)
|
30 |
+
return neg_idxs
|
31 |
+
|
32 |
+
def template_prediction(GCN_model, input_smi, num_sampling, GCN_device=None):
|
33 |
+
mol = Chem.MolFromSmiles(input_smi)
|
34 |
+
data = mol2graph.mol2vec(mol).to(GCN_device)
|
35 |
+
with torch.no_grad():
|
36 |
+
output = GCN_model.forward(data.x, data.edge_index, data.batch).squeeze() # shape(1, 1000) -> (1000,)
|
37 |
+
try:
|
38 |
+
_, indices = torch.topk(output, num_sampling)
|
39 |
+
except:
|
40 |
+
indices = None
|
41 |
+
return indices
|
42 |
+
|
43 |
+
def batch_template_prediction(GCN_model, input_smi, num_sampling=5, GCN_device=None):
|
44 |
+
mol = Chem.MolFromSmiles(input_smi)
|
45 |
+
data = mol2graph.mol2vec(mol).to(GCN_device)
|
46 |
+
output = GCN_model.forward(data.x, data.edge_index, data.batch).squeeze() # shape(1, 1000) -> (1000,)
|
47 |
+
_, indices = torch.topk(output, num_sampling)
|
48 |
+
return indices
|
49 |
+
|
50 |
+
def check_templates(indices, input_smi, r_dict):
|
51 |
+
matched_indices = []
|
52 |
+
molecule = Chem.MolFromSmiles(input_smi)
|
53 |
+
for i in indices:
|
54 |
+
idx = str(i.item())
|
55 |
+
rsmi = r_dict[idx]
|
56 |
+
rxn = AllChem.ReactionFromSmarts(rsmi)
|
57 |
+
reactants = rxn.GetReactants()
|
58 |
+
flag = False
|
59 |
+
for reactant in reactants:
|
60 |
+
if molecule.HasSubstructMatch(reactant):
|
61 |
+
flag = True
|
62 |
+
if flag == True:
|
63 |
+
matched_indices.append(f'[{i.item()}]')
|
64 |
+
return matched_indices # list of string, ex) ['[0]', '[123]', ... '[742]']
|
65 |
+
|
data/Model/QSAR/qsar_AKT1_optimized.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:06b15206f5a79076646130dd07bea5bc9111b278a27eb55c978d23f57855c56f
|
3 |
+
size 8643648
|
data/Model/QSAR/qsar_CXCR4_optimized.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:29e8e71fb9791bcdc99a339357ac8e5b2a3660e5f1743c011b55cea61d202e4a
|
3 |
+
size 2125387
|
data/Model/QSAR/qsar_DRD2_optimized.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2db876d2c1d2831594021419fdf8b037c624e68126b3cc240116f99b3c1609aa
|
3 |
+
size 49644044
|
data/Model/Transformer/model.py
ADDED
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import os
|
3 |
+
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
|
4 |
+
|
5 |
+
import math
|
6 |
+
from typing import Optional, Any, Union, Callable
|
7 |
+
|
8 |
+
import torch
|
9 |
+
from torch import Tensor
|
10 |
+
import torch.nn as nn
|
11 |
+
import torch.nn.functional as F
|
12 |
+
from torch.optim.lr_scheduler import _LRScheduler
|
13 |
+
from torch.nn.init import xavier_uniform_
|
14 |
+
import torchtext.vocab.vocab as Vocab
|
15 |
+
|
16 |
+
|
17 |
+
|
18 |
+
class PositionalEncoding(nn.Module):
|
19 |
+
# P(pos, 2d) = sin(pos/10000**(2d/D)), where d=index of token, D=d_model
|
20 |
+
def __init__(self, d_model: int, pad_idx: int=1, dropout: float = 0.1, max_len: int = 5000):
|
21 |
+
super().__init__()
|
22 |
+
self.dropout = nn.Dropout(p=dropout)
|
23 |
+
self.d_model = d_model
|
24 |
+
self.pad_idx = pad_idx
|
25 |
+
|
26 |
+
position = torch.arange(max_len).unsqueeze(1) # shape: (max_len, 1) の列ベクトル
|
27 |
+
div_term = torch.exp(torch.arange(0, self.d_model, 2) * (-math.log(10000.0) / self.d_model))
|
28 |
+
# torch.arange(start, stop, step) -> shape: (d_model/2,) 1次元ベクトル
|
29 |
+
pe = torch.zeros(max_len, 1, self.d_model) # (seq_length, 1, emb_dim)
|
30 |
+
pe[:, 0, 0::2] = torch.sin(position * div_term)
|
31 |
+
pe[:, 0, 1::2] = torch.cos(position * div_term)
|
32 |
+
self.register_buffer('pe', pe)
|
33 |
+
|
34 |
+
def forward(self, x:torch.tensor, pad_mask=None) -> torch.tensor:
|
35 |
+
"""
|
36 |
+
Args:
|
37 |
+
x: Tensor, shape [seq_len, batch_size, embedding_dim]
|
38 |
+
"""
|
39 |
+
if pad_mask is not None:
|
40 |
+
mask = pad_mask.permute(1, 0).unsqueeze(-1).repeat(1, 1, self.d_model) # paddingの位置をTrue
|
41 |
+
# make embeddings relatively larger
|
42 |
+
x = x * math.sqrt(self.d_model)
|
43 |
+
x = x + self.pe[:x.size(0)] # max_lenが5000とかでも入力のseq_lenまでを切り取って足してる
|
44 |
+
|
45 |
+
if pad_mask is not None:
|
46 |
+
x = torch.where(mask == True, 0, x) # mask=Trueの位置を0に置換, それ以外はいじらない
|
47 |
+
return self.dropout(x)
|
48 |
+
|
49 |
+
|
50 |
+
# Learning Rate Scheduler
|
51 |
+
class TransformerLR(_LRScheduler):
|
52 |
+
"""TransformerLR class for adjustment of learning rate.
|
53 |
+
|
54 |
+
The scheduling is based on the method proposed in 'Attention is All You Need'.
|
55 |
+
"""
|
56 |
+
|
57 |
+
def __init__(self, optimizer, warmup_epochs=8000, last_epoch=-1, verbose=False):
|
58 |
+
"""Initialize class."""
|
59 |
+
self.warmup_epochs = warmup_epochs
|
60 |
+
self.normalize = self.warmup_epochs**0.5
|
61 |
+
super().__init__(optimizer, last_epoch, verbose)
|
62 |
+
|
63 |
+
def get_lr(self):
|
64 |
+
"""Return adjusted learning rate."""
|
65 |
+
step = self.last_epoch + 1
|
66 |
+
scale = self.normalize * min(step**-0.5, step * self.warmup_epochs**-1.5)
|
67 |
+
return [base_lr * scale for base_lr in self.base_lrs]
|
68 |
+
|
69 |
+
|
70 |
+
# Transformer model
|
71 |
+
class Transformer(nn.Module):
|
72 |
+
def __init__(self, d_model: int = 256, nhead: int = 8, num_encoder_layers: int = 4, num_decoder_layers: int =4,
|
73 |
+
dim_feedforward: int = 2048, dropout: float = 0.1, activation: Union[str, Callable[[Tensor], Tensor]] = F.relu,
|
74 |
+
vocab: Vocab = None, layer_norm_eps: float = 1e-5, batch_first: bool = False, norm_first: bool = False,
|
75 |
+
device=None, dtype=None) -> None:
|
76 |
+
factory_kwargs = {'device': device, 'dtype': dtype}
|
77 |
+
super().__init__()
|
78 |
+
|
79 |
+
if vocab == None:
|
80 |
+
raise RuntimeError("set vocab: torch.vocab.vocab")
|
81 |
+
|
82 |
+
# INFO
|
83 |
+
self.model_type = "Transformer"
|
84 |
+
self.vocab = vocab
|
85 |
+
num_tokens = vocab.__len__()
|
86 |
+
|
87 |
+
self.positional_encoder = PositionalEncoding(d_model=d_model,
|
88 |
+
pad_idx=self.vocab['<pad>'],
|
89 |
+
dropout=dropout,
|
90 |
+
max_len=5000
|
91 |
+
)
|
92 |
+
self.embedding = nn.Embedding(num_tokens, d_model, padding_idx=self.vocab['<pad>'])
|
93 |
+
|
94 |
+
encoder_layer = nn.TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout,
|
95 |
+
activation, layer_norm_eps, batch_first, norm_first,
|
96 |
+
**factory_kwargs)
|
97 |
+
encoder_norm = nn.LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
|
98 |
+
self.encoder = nn.TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
|
99 |
+
|
100 |
+
decoder_layer = nn.TransformerDecoderLayer(d_model, nhead, dim_feedforward, dropout,
|
101 |
+
activation, layer_norm_eps, batch_first, norm_first,
|
102 |
+
**factory_kwargs)
|
103 |
+
decoder_norm = nn.LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
|
104 |
+
self.decoder = nn.TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm)
|
105 |
+
|
106 |
+
self.out = nn.Linear(d_model, num_tokens)
|
107 |
+
|
108 |
+
self._reset_parameters()
|
109 |
+
self.d_model = d_model
|
110 |
+
self.nhead = nhead
|
111 |
+
self.batch_first = batch_first
|
112 |
+
|
113 |
+
|
114 |
+
# Transformer blocks - Out size = (seq_length, batch_size, num_tokens)
|
115 |
+
# input src, tgt must be (seq_length, batch_size)
|
116 |
+
def forward(self, src: Tensor, tgt: Tensor, src_mask: Optional[Tensor] = None,
|
117 |
+
tgt_mask: Optional[Tensor] = None, memory_mask: Optional[Tensor] = None,
|
118 |
+
src_pad_mask: bool = False, tgt_pad_mask: bool = False,
|
119 |
+
memory_pad_mask: bool = False) -> Tensor:
|
120 |
+
|
121 |
+
if src_pad_mask is True:
|
122 |
+
src_pad_mask = (src == self.vocab['<pad>']).permute(1, 0)
|
123 |
+
else:
|
124 |
+
src_pad_mask = None
|
125 |
+
|
126 |
+
if tgt_pad_mask is True:
|
127 |
+
tgt_pad_mask = (tgt == self.vocab['<pad>']).permute(1, 0)
|
128 |
+
else:
|
129 |
+
tgt_pad_mask = None
|
130 |
+
|
131 |
+
if memory_pad_mask is True:
|
132 |
+
memory_pad_mask = (src == self.vocab['<pad>']).permute(1, 0)
|
133 |
+
else:
|
134 |
+
memory_pad_mask = None
|
135 |
+
|
136 |
+
# Embedding
|
137 |
+
src = self.embedding(src)
|
138 |
+
tgt = self.embedding(tgt)
|
139 |
+
src = self.positional_encoder(src, src_pad_mask)
|
140 |
+
tgt = self.positional_encoder(tgt, tgt_pad_mask)
|
141 |
+
|
142 |
+
# Transformer layer
|
143 |
+
is_batched = src.dim() == 3
|
144 |
+
if not self.batch_first and src.size(1) != tgt.size(1) and is_batched:
|
145 |
+
raise RuntimeError("the batch number of src and tgt must be equal")
|
146 |
+
elif self.batch_first and src.size(0) != tgt.size(0) and is_batched:
|
147 |
+
raise RuntimeError("the batch number of src and tgt must be equal")
|
148 |
+
if src.size(-1) != self.d_model or tgt.size(-1) != self.d_model:
|
149 |
+
raise RuntimeError("the feature number of src and tgt must be equal to d_model")
|
150 |
+
|
151 |
+
memory = self.encoder(src=src, mask=src_mask, src_key_padding_mask=src_pad_mask)
|
152 |
+
transformer_out = self.decoder(tgt=tgt, memory=memory, tgt_mask=tgt_mask, memory_mask=memory_mask,
|
153 |
+
tgt_key_padding_mask=tgt_pad_mask, memory_key_padding_mask=memory_pad_mask)
|
154 |
+
out = self.out(transformer_out)
|
155 |
+
|
156 |
+
return out
|
157 |
+
|
158 |
+
def encode(self, src: Tensor, src_mask: Optional[Tensor] = None,
|
159 |
+
src_pad_mask: bool = False) -> Tensor:
|
160 |
+
|
161 |
+
if src_pad_mask is True:
|
162 |
+
src_pad_mask = (src == self.vocab['<pad>']).permute(1, 0)
|
163 |
+
else:
|
164 |
+
src_pad_mask = None
|
165 |
+
|
166 |
+
# Embedding + PE
|
167 |
+
src = self.embedding(src)
|
168 |
+
src = self.positional_encoder(src, src_pad_mask)
|
169 |
+
|
170 |
+
# Transformer Encoder
|
171 |
+
memory = self.encoder(src=src, mask=src_mask, src_key_padding_mask=src_pad_mask)
|
172 |
+
|
173 |
+
return memory, src_pad_mask
|
174 |
+
|
175 |
+
def decode(self, memory: Tensor, tgt: Tensor, tgt_mask: Optional[Tensor] = None,
|
176 |
+
memory_mask: Optional[Tensor] = None, tgt_pad_mask: bool = False,
|
177 |
+
memory_pad_mask: Optional[Tensor] = None) -> Tensor:
|
178 |
+
|
179 |
+
if tgt_pad_mask is True:
|
180 |
+
tgt_pad_mask = (tgt == self.vocab['<pad>']).permute(1, 0)
|
181 |
+
else:
|
182 |
+
tgt_pad_mask = None
|
183 |
+
|
184 |
+
tgt = self.embedding(tgt)
|
185 |
+
tgt = self.positional_encoder(tgt, tgt_pad_mask)
|
186 |
+
transformer_out = self.decoder(tgt=tgt, memory=memory, tgt_mask=tgt_mask, memory_mask=memory_mask,
|
187 |
+
tgt_key_padding_mask=tgt_pad_mask, memory_key_padding_mask=memory_pad_mask)
|
188 |
+
out = self.out(transformer_out)
|
189 |
+
|
190 |
+
return out
|
191 |
+
|
192 |
+
def embed(self, src):
|
193 |
+
src_embed = self.embedding(src)
|
194 |
+
return src_embed
|
195 |
+
|
196 |
+
def _reset_parameters(self):
|
197 |
+
r"""Initiate parameters in the transformer model."""
|
198 |
+
|
199 |
+
for p in self.parameters():
|
200 |
+
if p.dim() > 1:
|
201 |
+
xavier_uniform_(p)
|
data/Utils/reward.py
ADDED
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pandas as pd
|
3 |
+
import pickle
|
4 |
+
import hydra
|
5 |
+
|
6 |
+
import warnings
|
7 |
+
warnings.filterwarnings('ignore')
|
8 |
+
|
9 |
+
import rdkit.Chem as Chem
|
10 |
+
from rdkit import RDLogger
|
11 |
+
RDLogger.DisableLog('rdApp.*')
|
12 |
+
from rdkit.Chem import AllChem, QED
|
13 |
+
|
14 |
+
def getReward(name,
|
15 |
+
receptor_path=None,
|
16 |
+
pdbqt_path=None,
|
17 |
+
VinaGPU_path=None,
|
18 |
+
VinaGPU_config=None):
|
19 |
+
if name == "QED":
|
20 |
+
return QEDReward()
|
21 |
+
elif name == 'DRD2':
|
22 |
+
with open(hydra.utils.get_original_cwd() + '/Model/QSAR/drd2_qsar_optimized.pkl', mode='rb') as f:
|
23 |
+
qsar_model = pickle.load(f)
|
24 |
+
return QSAR_Reward(qsar_model)
|
25 |
+
elif name == 'AKT1':
|
26 |
+
with open(hydra.utils.get_original_cwd() + '/Model/QSAR/akt1_qsar_optimized.pkl', mode='rb') as f:
|
27 |
+
qsar_model = pickle.load(f)
|
28 |
+
return QSAR_Reward(qsar_model)
|
29 |
+
|
30 |
+
|
31 |
+
class Reward:
|
32 |
+
def __init__(self):
|
33 |
+
self.vmin = -100
|
34 |
+
self.max_r = -10000
|
35 |
+
return
|
36 |
+
|
37 |
+
def reward(self):
|
38 |
+
raise NotImplementedError()
|
39 |
+
|
40 |
+
class QSAR_Reward(Reward):
|
41 |
+
def __init__(self, qsar_model, *args, **kwargs):
|
42 |
+
super().__init__(*args, **kwargs)
|
43 |
+
self.qsar_model = qsar_model
|
44 |
+
|
45 |
+
def reward(self, score_que:list = None):
|
46 |
+
max_smi = None
|
47 |
+
scores = []
|
48 |
+
mols = [Chem.MolFromSmiles(smi) for smi in score_que]
|
49 |
+
ecfps = []
|
50 |
+
None_indices = []
|
51 |
+
for i, mol in enumerate(mols):
|
52 |
+
if mol is not None:
|
53 |
+
ecfps.append(AllChem.GetMorganFingerprintAsBitVect(mol, 3, nBits=2048))
|
54 |
+
else:
|
55 |
+
None_indices.append(i)
|
56 |
+
ecfps.append([0]*2048)
|
57 |
+
if len(ecfps) == 0:
|
58 |
+
return [], None, None
|
59 |
+
ecfp6_array = np.array(ecfps)
|
60 |
+
X = pd.DataFrame(ecfp6_array, columns=[f'bit_{i}' for i in range(2048)])
|
61 |
+
y_pred = self.qsar_model.predict_proba(X)[:, 1]
|
62 |
+
for None_idx in None_indices:
|
63 |
+
y_pred[None_idx] = np.nan
|
64 |
+
max_score = np.nanmax(y_pred)
|
65 |
+
for smi, score in zip(score_que, y_pred):
|
66 |
+
if score == np.nan:
|
67 |
+
pass
|
68 |
+
elif score == max_score:
|
69 |
+
max_smi = smi
|
70 |
+
scores.append((smi, score))
|
71 |
+
return scores, max_smi, max_score
|
72 |
+
|
73 |
+
def reward_remove_nan(self, score_que:list = None):
|
74 |
+
max_smi = None
|
75 |
+
scores = []
|
76 |
+
# convert smiles to mol if mol is not none.
|
77 |
+
valid_smiles = []
|
78 |
+
mols = []
|
79 |
+
for smi in score_que:
|
80 |
+
mol = Chem.MolFromSmiles(smi)
|
81 |
+
if mol is not None:
|
82 |
+
valid_smiles.append(smi)
|
83 |
+
mols.append(mol)
|
84 |
+
ecfps = []
|
85 |
+
for i, mol in enumerate(mols):
|
86 |
+
if mol is not None:
|
87 |
+
ecfps.append(AllChem.GetMorganFingerprintAsBitVect(mol, 3, nBits=2048))
|
88 |
+
if len(ecfps) == 0:
|
89 |
+
return [], None, None
|
90 |
+
ecfp6_array = np.array(ecfps)
|
91 |
+
X = pd.DataFrame(ecfp6_array, columns=[f'bit_{i}' for i in range(2048)])
|
92 |
+
y_pred = self.qsar_model.predict_proba(X)[:, 1]
|
93 |
+
max_score = np.nanmax(y_pred)
|
94 |
+
for smi, score in zip(valid_smiles, y_pred):
|
95 |
+
if score == max_score:
|
96 |
+
max_smi = smi
|
97 |
+
scores.append((smi, score))
|
98 |
+
return scores, max_smi, max_score
|
99 |
+
|
100 |
+
class QEDReward(Reward):
|
101 |
+
def __init__(self, *args, **kwargs):
|
102 |
+
super().__init__(*args, **kwargs)
|
103 |
+
self.vmin = 0
|
104 |
+
|
105 |
+
def reward(self, smi):
|
106 |
+
mol = Chem.MolFromSmiles(smi)
|
107 |
+
try:
|
108 |
+
score = QED.qed(mol)
|
109 |
+
except:
|
110 |
+
score = None
|
111 |
+
|
112 |
+
return score
|
data/Utils/utils.py
ADDED
@@ -0,0 +1,297 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import math
|
3 |
+
from tqdm import tqdm
|
4 |
+
from copy import deepcopy
|
5 |
+
import random
|
6 |
+
import numpy as np
|
7 |
+
import warnings
|
8 |
+
warnings.filterwarnings('ignore')
|
9 |
+
|
10 |
+
from rdkit import RDLogger
|
11 |
+
RDLogger.DisableLog('rdApp.*')
|
12 |
+
from rdkit.Chem import Descriptors
|
13 |
+
|
14 |
+
import torch
|
15 |
+
|
16 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
17 |
+
|
18 |
+
def smi_tokenizer(smi):
|
19 |
+
'''
|
20 |
+
Tokenize a SMILES molecule or reaction
|
21 |
+
'''
|
22 |
+
import re
|
23 |
+
pattern = '(\[[^\]]+]|Br?|Cl?|N|O|S|P|F|I|b|c|n|o|s|p|\(|\)|\.|=|#|-|\+|\\\\|\/|:|~|@|\?|>|\*|\$|\%[0-9]{2}|[0-9])'
|
24 |
+
regex = re.compile(pattern)
|
25 |
+
tokens = [token for token in regex.findall(smi)]
|
26 |
+
assert smi == ''.join(tokens)
|
27 |
+
return ' '.join(tokens)
|
28 |
+
|
29 |
+
class Node:
|
30 |
+
def __init__(self):
|
31 |
+
self.parent = None
|
32 |
+
self.template = None
|
33 |
+
self.path = []
|
34 |
+
self.depth = -100
|
35 |
+
self.visit = 1
|
36 |
+
self.children = []
|
37 |
+
self.imm_score = 0
|
38 |
+
self.cum_score = 0
|
39 |
+
self.c = 1
|
40 |
+
self.id = -1
|
41 |
+
self.rollout_result = ('None', -1000)
|
42 |
+
|
43 |
+
def add_Node(self, c):
|
44 |
+
c.parent = self
|
45 |
+
c.depth = self.depth + 1
|
46 |
+
self.children.append(c)
|
47 |
+
|
48 |
+
def calc_UCB(self):
|
49 |
+
if self.visit == 0:
|
50 |
+
ucb = 1e+6
|
51 |
+
else:
|
52 |
+
ucb = self.cum_score/self.visit + self.c*math.sqrt(2*math.log(self.parent.visit)/self.visit)
|
53 |
+
return ucb
|
54 |
+
|
55 |
+
def select_children(self):
|
56 |
+
children_ucb = []
|
57 |
+
for cn in self.children:
|
58 |
+
children_ucb.append(cn.calc_UCB())
|
59 |
+
max_ind = np.random.choice(np.where(np.array(children_ucb) == max(children_ucb))[0])
|
60 |
+
return self.children[max_ind]
|
61 |
+
|
62 |
+
def select_children_rand(self):
|
63 |
+
indices = list(range(0, len(self.children)))
|
64 |
+
ind = np.random.choice(indices)
|
65 |
+
return self.children[ind]
|
66 |
+
|
67 |
+
|
68 |
+
class RootNode(Node):
|
69 |
+
def __init__(self, c=1/np.sqrt(2)):
|
70 |
+
super().__init__()
|
71 |
+
self.smi = '&&'
|
72 |
+
self.depth = 0
|
73 |
+
|
74 |
+
self.c = c
|
75 |
+
|
76 |
+
class NormalNode(Node):
|
77 |
+
def __init__(self, smi, c=1/np.sqrt(2)):
|
78 |
+
super().__init__()
|
79 |
+
self.smi = smi
|
80 |
+
self.c = c
|
81 |
+
self.template = None
|
82 |
+
|
83 |
+
def remove_Node(self):
|
84 |
+
self.parent.children.remove(self)
|
85 |
+
|
86 |
+
def read_smilesset(path):
|
87 |
+
smiles_list = []
|
88 |
+
with open(path) as f:
|
89 |
+
for smiles in f:
|
90 |
+
smiles_list.append(smiles.rstrip())
|
91 |
+
|
92 |
+
return smiles_list
|
93 |
+
|
94 |
+
# caluculate the number of parameters
|
95 |
+
def tally_parameters(model):
|
96 |
+
n_params = sum([p.nelement() for p in model.parameters()])
|
97 |
+
enc = 0
|
98 |
+
dec = 0
|
99 |
+
for name, param in model.named_parameters():
|
100 |
+
if 'encoder' in name:
|
101 |
+
enc += param.nelement()
|
102 |
+
elif 'decoder' or 'generator' in name:
|
103 |
+
dec += param.nelement()
|
104 |
+
return n_params, enc, dec
|
105 |
+
|
106 |
+
|
107 |
+
class EarlyStopping:
|
108 |
+
def __init__(self, patience=10, ckpt_dir=None):
|
109 |
+
'''引数: 最小値の非更新数カウンタ、表示設定、モデル格納path'''
|
110 |
+
|
111 |
+
self.patience = patience #設定ストップカウンタ
|
112 |
+
self.counter = 0 #現在のカウンタ値
|
113 |
+
self.best_score = None #ベストスコア
|
114 |
+
self.early_stop = False #ストップフラグ
|
115 |
+
self.val_loss_min = np.Inf #前回のベストスコア記憶用
|
116 |
+
self.path = ckpt_dir #ベストモデル格納path
|
117 |
+
|
118 |
+
def __call__(self, val_loss, step, optimizer, cur_loss, model):
|
119 |
+
'''
|
120 |
+
特殊(call)メソッド
|
121 |
+
実際に学習ループ内で最小lossを更新したか否かを計算させる部分
|
122 |
+
'''
|
123 |
+
score = -val_loss
|
124 |
+
|
125 |
+
if self.best_score is None: #1Epoch目の処理
|
126 |
+
self.best_score = score #1Epoch目はそのままベストスコアとして記録する
|
127 |
+
self.checkpoint(val_loss, step, optimizer, cur_loss, model) #記録後にモデルを保存してスコア表示する
|
128 |
+
elif score < self.best_score: # ベストスコアを更新できなかった場合
|
129 |
+
self.counter += 1 #ストップカウンタを+1
|
130 |
+
print(f'Validation loss increased ({self.val_loss_min:.6f} --> {val_loss:.6f}).')
|
131 |
+
self.checkpoint(val_loss, step, optimizer, cur_loss, model)
|
132 |
+
print(f'EarlyStopping counter: {self.counter} out of {self.patience}') #現在のカウンタを表示する
|
133 |
+
if self.counter >= self.patience: #設定カウントを上回ったらストップフラグをTrueに変更
|
134 |
+
self.early_stop = True
|
135 |
+
else: #ベストスコアを更新した場合
|
136 |
+
self.best_score = score #ベストスコアを上書き
|
137 |
+
print(f'Validation loss decreased! ({self.val_loss_min:.6f} --> {val_loss:.6f}) Saving model ...')
|
138 |
+
self.checkpoint(val_loss, step, optimizer, cur_loss, model) #モデルを保存してスコア表示
|
139 |
+
self.counter = 0 #ストップカウンタリセット
|
140 |
+
|
141 |
+
def checkpoint(self, val_loss, step, optimizer, cur_loss, model):
|
142 |
+
torch.save({'step': step,
|
143 |
+
'model_state_dict': model.state_dict(),
|
144 |
+
'optimizer_state_dict': optimizer.state_dict(),
|
145 |
+
'loss': cur_loss,}, f'{self.path}/ckpt_{step+1}.pth')
|
146 |
+
self.val_loss_min = val_loss #その時のlossを記録する
|
147 |
+
|
148 |
+
class AverageMeter(object):
|
149 |
+
'''Computes and stores the average and current value'''
|
150 |
+
def __init__(self):
|
151 |
+
self.reset()
|
152 |
+
|
153 |
+
def reset(self):
|
154 |
+
self.value = 0 # latest value
|
155 |
+
self.avg = 0
|
156 |
+
self.sum = 0
|
157 |
+
self.count = 0
|
158 |
+
|
159 |
+
def update(self, value, n=1):
|
160 |
+
self.value = value
|
161 |
+
self.sum += value * n
|
162 |
+
self.count += n
|
163 |
+
self.avg = self.sum / self.count
|
164 |
+
|
165 |
+
# def accuracy(output, target, batch_size, v=None):
|
166 |
+
# '''
|
167 |
+
# Computes the accuracy of top1 prediction
|
168 |
+
|
169 |
+
# output: (seq_length*batch_size, num_tokens)
|
170 |
+
# target: (seq_length*batch_size)
|
171 |
+
# '''
|
172 |
+
|
173 |
+
# pad_mask = (target != v['<pad>']) # padはFalse, それ以外はTrue
|
174 |
+
# true_pos = torch.nonzero(pad_mask).squeeze().tolist()
|
175 |
+
# out_extracted = output[true_pos]
|
176 |
+
# t_extracted = target[true_pos]
|
177 |
+
# _, pred = out_extracted.topk(1, 1, True, True) # arg of topk: (k, dim=1, largest=True, sorted=True)
|
178 |
+
# pred = pred.t() # (seq*batch, maxk) -> (maxk, seq*batch)
|
179 |
+
# correct = pred.eq(t_extracted.reshape(1, -1).expand_as(pred)) # target:(seq*batch, 1) -> (1, seq*batch) -> (maxk, seq*batch)
|
180 |
+
# # Tensor.eq: compute element-wise equality, correct: bool matrix
|
181 |
+
# correct_rate = (correct[0].float().sum(0, keepdim=True)) / len(t_extracted)
|
182 |
+
|
183 |
+
# # compute accuracy per whole molecule
|
184 |
+
# target = target.reshape(-1, batch_size)
|
185 |
+
# output = output.reshape(-1, batch_size, v.__len__())
|
186 |
+
# _, pred = output.topk(10, 2, True, True)
|
187 |
+
# top1, top5, top10 = pred[:, :, 0], pred[:, :, 0:4], pred[:, :, 0:9]
|
188 |
+
# pred_list = [top1, top5, top10]
|
189 |
+
# perfect_acc_list = []
|
190 |
+
# EOS_token = v['<eos>']
|
191 |
+
# for pred in pred_list:
|
192 |
+
# correct_cum = 0
|
193 |
+
# for i in range(batch_size):
|
194 |
+
# t = target[:, i].tolist()
|
195 |
+
# eos_idx = t.index(EOS_token)
|
196 |
+
# t = t[0:eos_idx]
|
197 |
+
# p = pred[:, i].tolist()
|
198 |
+
# p = p[0:len(t)]
|
199 |
+
# if t == p:
|
200 |
+
# correct_cum += 1
|
201 |
+
# perfect_acc_list.append(correct_cum / batch_size)
|
202 |
+
# return correct_rate.item(), perfect_acc_list
|
203 |
+
|
204 |
+
def accuracy(output, target, batch_size, v=None):
|
205 |
+
'''
|
206 |
+
Computes the accuracy of top1 prediction
|
207 |
+
|
208 |
+
output: (seq_length*batch_size, num_tokens)
|
209 |
+
target: (seq_length*batch_size)
|
210 |
+
'''
|
211 |
+
|
212 |
+
pad_mask = (target != v['<pad>']) # padはFalse, それ以外はTrue
|
213 |
+
true_pos = torch.nonzero(pad_mask).squeeze().tolist()
|
214 |
+
out_extracted = output[true_pos]
|
215 |
+
t_extracted = target[true_pos]
|
216 |
+
_, pred = out_extracted.topk(1, 1, True, True) # arg of topk: (k, dim=1, largest=True, sorted=True)
|
217 |
+
pred = pred.t() # (seq*batch, maxk) -> (maxk, seq*batch)
|
218 |
+
correct = pred.eq(t_extracted.reshape(1, -1).expand_as(pred)) # target:(seq*batch, 1) -> (1, seq*batch) -> (maxk, seq*batch)
|
219 |
+
# Tensor.eq: compute element-wise equality, correct: bool matrix
|
220 |
+
correct_rate = (correct[0].float().sum(0, keepdim=True)) / len(t_extracted)
|
221 |
+
|
222 |
+
# compute accuracy per whole molecule
|
223 |
+
target = target.reshape(-1, batch_size)
|
224 |
+
output = output.reshape(-1, batch_size, v.__len__())
|
225 |
+
_, pred = output.topk(1, 2, True, True)
|
226 |
+
pred = pred.squeeze() # (seq, batch) -> (batch, seq)
|
227 |
+
correct_cum = 0
|
228 |
+
EOS_token = v['<eos>']
|
229 |
+
for i in range(batch_size):
|
230 |
+
t = target[:, i].tolist()
|
231 |
+
eos_idx = t.index(EOS_token)
|
232 |
+
t = t[0:eos_idx]
|
233 |
+
p = pred[:, i].tolist()
|
234 |
+
p = p[0:len(t)]
|
235 |
+
if t == p:
|
236 |
+
correct_cum += 1
|
237 |
+
perfect_acc = correct_cum / batch_size
|
238 |
+
return correct_rate.item(), perfect_acc
|
239 |
+
|
240 |
+
def calc_topk_perfect_acc(x, target, batch_size, EOS):
|
241 |
+
'''
|
242 |
+
x: predicted tensor of shape (seq, batch, k)
|
243 |
+
target: (seq, batch)
|
244 |
+
'''
|
245 |
+
correct_cum = 0
|
246 |
+
if x.dim() < 3:
|
247 |
+
x = x.unsqueeze(-1)
|
248 |
+
for i in range(batch_size):
|
249 |
+
t = target[:, i].tolist()
|
250 |
+
eos_idx = t.index(EOS)
|
251 |
+
t = t[0:eos_idx]
|
252 |
+
for j in range(x.size(2)):
|
253 |
+
p = x[:, i, j].tolist()
|
254 |
+
p = p[0:len(t)]
|
255 |
+
if t == p:
|
256 |
+
correct_cum += 1
|
257 |
+
break
|
258 |
+
return correct_cum / batch_size
|
259 |
+
|
260 |
+
|
261 |
+
def MW_checker(mol, threshold:int = 500):
|
262 |
+
MW = Descriptors.ExactMolWt(mol)
|
263 |
+
if MW > threshold:
|
264 |
+
return False
|
265 |
+
else:
|
266 |
+
return True
|
267 |
+
|
268 |
+
def is_empty(li):
|
269 |
+
return all(not sublist for sublist in li)
|
270 |
+
|
271 |
+
def torch_fix_seed(seed=42):
|
272 |
+
# Python random
|
273 |
+
random.seed(seed)
|
274 |
+
# Numpy
|
275 |
+
np.random.seed(seed)
|
276 |
+
# Pytorch
|
277 |
+
torch.manual_seed(seed)
|
278 |
+
torch.cuda.manual_seed(seed)
|
279 |
+
torch.backends.cudnn.deterministic = True
|
280 |
+
torch.use_deterministic_algorithms = True
|
281 |
+
|
282 |
+
|
283 |
+
# 例えばimport utils とした場合、そのutils.__name__ にはモジュール名(ファイル名)が格納される
|
284 |
+
# このファイルをimportで呼び出した場合、print(utils.__name__) の出力結果は'utils'
|
285 |
+
# ただし、importではなくコマンドラインで直接実行された場合は__name__ に __main__ が格納される
|
286 |
+
# よって、以下はimportされたときには実行されず、コマンドラインで実行されたときにだけ動く
|
287 |
+
if __name__ == '__main__':
|
288 |
+
smiles_list = read_smilesset('Data/input/250k_rndm_zinc_drugs_clean.smi')
|
289 |
+
vocab = []
|
290 |
+
for smiles in tqdm(smiles_list):
|
291 |
+
p = parse_smiles(smiles)
|
292 |
+
vocab.extend(p)
|
293 |
+
|
294 |
+
vocab = list(set(vocab))
|
295 |
+
vocab.sort()
|
296 |
+
print(vocab)
|
297 |
+
|
data/ckpts/GCN/GCN.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5c6b85821672caa8cf4b35d11c1241e76fe0109ee635a9a5fe1549e3fd10b92a
|
3 |
+
size 2188609
|
data/config/config.py
ADDED
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
|
3 |
+
from hydra.core.config_store import ConfigStore
|
4 |
+
from dataclasses import dataclass
|
5 |
+
|
6 |
+
@dataclass
|
7 |
+
class PreProcess:
|
8 |
+
augm_size: int = 1
|
9 |
+
src_train: str = '/data/USPTO/src_train.txt'
|
10 |
+
tgt_train: str = '/data/USPTO/tgt_train.txt'
|
11 |
+
src_valid: str = '/data/USPTO/src_valid.txt'
|
12 |
+
tgt_valid: str = '/data/USPTO/tgt_valid.txt'
|
13 |
+
batch_size: int = 256
|
14 |
+
|
15 |
+
@dataclass
|
16 |
+
class ModelConfig:
|
17 |
+
dim_model: int = 512
|
18 |
+
num_encoder_layers: int = 6
|
19 |
+
num_decoder_layers: int = 6
|
20 |
+
nhead: int = 8
|
21 |
+
dropout: float = 0.1
|
22 |
+
dim_ff: int = 2048
|
23 |
+
ckpt:str = '/ckpts/Transformer/ckpt_conditional.pth'
|
24 |
+
|
25 |
+
@dataclass
|
26 |
+
class TrainConfig:
|
27 |
+
src_train: str = '/data/USPTO/src_train.txt'
|
28 |
+
tgt_train: str = '/data/USPTO/tgt_train.txt'
|
29 |
+
src_valid: str = '/data/USPTO/src_valid.txt'
|
30 |
+
tgt_valid: str = '/data/USPTO/tgt_valid.txt'
|
31 |
+
batch_size: int = 128
|
32 |
+
label_smoothing: float = 0.0
|
33 |
+
lr: float = 0.001
|
34 |
+
betas: tuple = (0.9, 0.998)
|
35 |
+
step_num: int = 500000 # set training steps
|
36 |
+
patience: int = 10
|
37 |
+
log_interval : int = 100
|
38 |
+
val_interval: int = 1000
|
39 |
+
save_interval: int = 10000
|
40 |
+
|
41 |
+
@dataclass
|
42 |
+
class TranslateConfig:
|
43 |
+
src_train: str = '/data/USPTO/src_train.txt'
|
44 |
+
tgt_train: str = '/data/USPTO/tgt_train.txt'
|
45 |
+
src_valid: str = '/data/USPTO/src_valid.txt'
|
46 |
+
tgt_valid: str = '/data/USPTO/tgt_valid.txt'
|
47 |
+
GCN_ckpt: str = '/ckpts/GCN/GCN.pth'
|
48 |
+
out_dir: str = '/translation'
|
49 |
+
src_test_path: str = '/data/input/test.txt'
|
50 |
+
annotated_templates: str = '/data/beamsearch_template_list.txt'
|
51 |
+
filename: str = 'test'
|
52 |
+
GCN_num_sampling: int = 10
|
53 |
+
inf_max_len: int = 256
|
54 |
+
nbest: int = 10
|
55 |
+
beam_size: int = 10
|
56 |
+
|
57 |
+
@dataclass
|
58 |
+
class GCN_TrainConfig:
|
59 |
+
train: str = '/data/USPTO/src_train.txt'
|
60 |
+
valid: str = '/data/USPTO/src_valid.txt'
|
61 |
+
test: str = '/data/USPTO/src_test.txt'
|
62 |
+
batch_size: int = 256
|
63 |
+
dim: int = 256
|
64 |
+
n_conv_hidden: int = 1
|
65 |
+
n_mlp_hidden: int = 3
|
66 |
+
dropout: float = 0.1
|
67 |
+
lr: float = 0.0004
|
68 |
+
epochs: int = 100
|
69 |
+
patience: int = 5
|
70 |
+
save_path: str = '/ckpts/GCN'
|
71 |
+
|
72 |
+
|
73 |
+
|
74 |
+
@dataclass
|
75 |
+
class MCTSConfig:
|
76 |
+
src_train: str = '/data/USPTO/src_train.txt'
|
77 |
+
tgt_train: str = '/data/USPTO/tgt_train.txt'
|
78 |
+
src_valid: str = '/data/USPTO/src_valid.txt'
|
79 |
+
tgt_valid: str = '/data/USPTO/tgt_valid.txt'
|
80 |
+
n_step: int = 200
|
81 |
+
max_depth: int = 10
|
82 |
+
in_smiles_file: str = '/data/input/init_smiles_drd2.txt'
|
83 |
+
out_dir: str = '/mcts_out'
|
84 |
+
ucb_c: float = 1/math.sqrt(2)
|
85 |
+
reward_name: str = 'DRD2' # 'DRD2' or 'QED'
|
86 |
+
ckpt_Transformer: str = '/ckpts/Transformer/ckpt_conditional.pth'
|
87 |
+
ckpt_GCN: str = '/ckpts/GCN/GCN.pth'
|
88 |
+
beam_width:int = 10
|
89 |
+
nbest:int = 10
|
90 |
+
exp_num_sampling:int = 10
|
91 |
+
rollout_depth:int = 2
|
92 |
+
roll_num_sampling:int = 5
|
93 |
+
|
94 |
+
@dataclass
|
95 |
+
class Config:
|
96 |
+
prep: PreProcess = PreProcess()
|
97 |
+
model: ModelConfig = ModelConfig()
|
98 |
+
train: TrainConfig = TrainConfig()
|
99 |
+
translate: TranslateConfig = TranslateConfig()
|
100 |
+
GCN_train: GCN_TrainConfig = GCN_TrainConfig()
|
101 |
+
mcts: MCTSConfig = MCTSConfig()
|
102 |
+
|
103 |
+
cs = ConfigStore.instance()
|
104 |
+
cs.store(name="config", node=Config)
|
data/data/QSAR/AKT1/akt1_test.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/data/QSAR/AKT1/akt1_train.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/data/QSAR/CXCR4/cxcr4_test.csv
ADDED
@@ -0,0 +1,428 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
canonical,activity
|
2 |
+
CCCCCCNC(=NC1CCCCC1)SCC1=CSC2=NCCN12,1
|
3 |
+
c1ccc(CNCc2ccc(CN(Cc3nc4ccccc4[nH]3)C3CCCc4cccnc43)cc2)nc1,1
|
4 |
+
C1=C(CSC(=NC2CCCCC2)NC23CC4CC(CC(C4)C2)C3)N2CCN=C2S1,1
|
5 |
+
CC1C(=O)NC(CCCN=C(N)N)C(=O)NC(Cc2ccc3ccccc3c2)C(=O)NCC(=O)NC(Cc2ccc(O)cc2)C(=O)N1C,1
|
6 |
+
CC(=O)NCCCC1C(=O)NC(CCCNC(N)=O)C(=O)NC(Cc2ccc3ccccc3c2)C(=O)NCC(=O)NC(Cc2ccc(O)cc2)C(=O)N1C,1
|
7 |
+
CN1C(=O)C(Cc2ccc3ccccc3c2)NC(=O)CNC(=O)C(Cc2ccc(O)cc2)NC(=O)C(CCCNC(=N)N)NC(=O)C1CCCNC(=N)N,1
|
8 |
+
CC(C)CC(NC(=N)N)C(=O)Nc1ccc2c(c1)cc(C(=O)NCCc1c[nH]c3ccccc13)n2CCCNC(=N)N,1
|
9 |
+
COC(=O)C1C(O)CCC2CN(C#N)C(c3[nH]c4ccccc4c3CCn3cc(-c4cccc(Cl)c4)nn3)CC21,1
|
10 |
+
N=C(N)NCCCC1NC(=O)C(Cc2ccc(O)cc2)NC(=O)CCC(Cc2ccc3ccccc3c2)NC(=O)C(CCCN=C(N)N)NC1=O,1
|
11 |
+
N=C(N)NCCCC1NC(=O)C(NC(=O)CCNC(=N)N)CSSCC(C(N)=O)NC(=O)C(Cc2ccc3ccccc3c2)NC1=O,1
|
12 |
+
Cc1cc(NC2CCN(C(=O)CCNCC(=O)O)CC2)nc(NCc2cn(CCCNCCCNC3CCCCC3)nn2)n1,1
|
13 |
+
N=C(N)NCCCn1c(C(=O)NCCc2c[nH]c3ccccc23)cc2cc(NC(=O)C(Cc3ccccc3)NC(=N)N)ccc21,1
|
14 |
+
Oc1cccc(CN(CCN2CCCCC2)CC2CCCN(C3CCCC3)C2)c1,1
|
15 |
+
c1ccc(CNCc2ccc(CN(Cc3nc4ccccc4[nH]3)C3CCCc4cccnc43)cc2)nc1,1
|
16 |
+
CN1C(=O)C(CCCNC(=N)N)NC(=O)C(Cc2ccc3ccccc3c2)NC(=O)CNC(=O)C(Cc2ccc(O)cc2)NC(=O)C1CCCNC(=N)N,1
|
17 |
+
N=C(N)NCCCn1c(C(=O)NCCc2c[nH]c3ccccc23)cc2cc(NC(=O)CNC(=N)N)ccc21,1
|
18 |
+
N=C(N)NCCCC1NC(=O)C(NC(=O)CCNC(=N)N)CC=CCC(C(N)=O)NC(=O)C(Cc2ccc3ccccc3c2)NC1=O,1
|
19 |
+
N=C(N)NCCC(=O)NC1CC=CCCC(C(N)=O)NC(=O)C(Cc2ccc3ccccc3c2)NC(=O)C(CCCN=C(N)N)NC1=O,1
|
20 |
+
CCCCCCNC(=NC1CCCCC1)SCC1=CSC2=NCCN12,1
|
21 |
+
CCCCCCNC(=NC1CCCCC1)SCC1=CSC2=NCCN12,1
|
22 |
+
Cc1cc(N(C)C2CCN(C(=O)CCNCC(=O)O)CC2)nc(NCc2cn(CCCNCCCNC3CCCCC3)nn2)n1,1
|
23 |
+
CCc1cc(NC2CCN(C(=O)CCNCC(=O)O)CC2)nc(NCc2cn(CCCNCCCNC3CCCCC3)nn2)n1,1
|
24 |
+
c1ccc(CNCc2ccc(CN(Cc3nc4ccccc4[nH]3)C3CCCc4cccnc43)cc2)nc1,1
|
25 |
+
C1=C(CSC(=NC2CCCCC2)NC23CC4CC(CC(C4)C2)C3)N2CCN=C2S1,1
|
26 |
+
Cc1cc(NC2CCN(C(=O)CCNCCC(=O)O)CC2)nc(NCc2cn(CCCNCCCNC3CCCCC3)nn2)n1,1
|
27 |
+
Cc1cc(NC2CCN(CCCCC(=O)O)CC2)nc(NCc2cn(CCCNCCCNC3CCCCC3)nn2)n1,1
|
28 |
+
N=C(N)NCCC(=O)NC1CCC=CCCC(C(N)=O)NC(=O)C(Cc2ccc3ccccc3c2)NC(=O)C(CCCN=C(N)N)NC1=O,1
|
29 |
+
N=C(N)NCCCn1c(C(=O)NCCc2c[nH]c3ccccc23)cc2cc(NC(=O)C(Cc3ccccc3)NC(=N)N)ccc21,1
|
30 |
+
N=C(N)NCCc1cc2cc(NC(=O)CCc3ccc(O)cc3)ccc2n1CCc1ccc2ccccc2c1,1
|
31 |
+
Cc1cc(NC2CCN(C(=O)CCCNCC(=O)O)CC2)nc(NCc2cn(CCCNCCCNC3CCCCC3)nn2)n1,1
|
32 |
+
N=C(N)NCCCn1c(C(=O)Nc2cccc3ccccc23)cc2cc(NC(=O)Cc3ccc(O)cc3)ccc21,1
|
33 |
+
CN1C(=O)C(Cc2ccc(O)cc2)NC(=O)CNC(=O)C(Cc2ccc3ccccc3c2)NC(=O)C(CCCNC(=N)N)NC(=O)C1CCCNC(=N)N,1
|
34 |
+
CC(=O)NCCCC1C(=O)NC(CCCNC(=N)N)C(=O)NC(Cc2ccc3ccccc3c2)C(=O)NCC(=O)NC(Cc2ccc(O)cc2)C(=O)N1C,1
|
35 |
+
N=C(N)NCCCC1NC(=O)C(Cc2ccc(O)cc2)NC(=O)CNC(=O)C(Cc2ccc3ccccc3c2)NC(=O)C(CCCNC(=N)N)NC1=N,1
|
36 |
+
CN1C(=O)C(Cc2ccc(O)cc2)NC(=O)CNC(=O)C(Cc2ccc3ccccc3c2)NC(=O)C(CCCNC(=N)N)NC(=O)C1CCCNC(=N)N,1
|
37 |
+
N=C(N)NCCCn1c(C(=O)NCCc2c[nH]c3ccccc23)cc2cc(NC(=O)C3CCN(C(=N)N)CC3)ccc21,1
|
38 |
+
Cc1cc(NC2CCN(CCC(=O)O)CC2)nc(NCc2cn(CCCNCCCNC3CCCCC3)nn2)n1,1
|
39 |
+
Cc1cc(NC2CCN(CCC(N)=O)CC2)nc(NCc2cn(CCCNCCCNC3CCCCC3)nn2)n1,1
|
40 |
+
CCCN1CCC(Nc2cc(C)nc(NCc3cn(CCCNCCCNC4CCCCC4)nn3)n2)CC1,1
|
41 |
+
N=C(N)NCCCC1NC(=O)C2CC(N=C(N)N)CN2C(=O)C(Cc2ccc(O)cc2)NC(=O)CNC(=O)C(Cc2ccc3ccccc3c2)NC1=O,1
|
42 |
+
N=C(N)NCCCC1NC(=O)C(Cc2ccc3ccccc3c2)NC(=O)CCNC(=O)C(Cc2ccc(O)cc2)NC(=O)C(CCCNC(=N)N)NC1=O,1
|
43 |
+
N=C(N)NCCCC1NC(=O)C(Cc2ccc3ccccc3c2)NC(=O)CCNC(=O)C(Cc2ccc(O)cc2)NC(=O)C(CCCNC(=N)N)NC1=O,1
|
44 |
+
NCCCCC1NC(=O)C(Cc2ccc(O)cc2)NC(=O)CNC(=O)C(Cc2ccc3ccccc3c2)NC(=O)C(CCCN=C(N)N)NC1=O,1
|
45 |
+
CN1C(=O)C(Cc2ccc(O)cc2)NC(=O)CNC(=O)C(Cc2ccc3ccccc3c2)NC(=O)C(CCCNC(=N)N)NC(=O)C1CCCNC(=N)N,1
|
46 |
+
CN1C(=O)C(Cc2ccc(O)cc2)NC(=O)CNC(=O)C(Cc2ccc3ccccc3c2)NC(=O)C(CCCNC(=N)N)NC(=O)C1CCCNC(=N)N,1
|
47 |
+
CC(C)CC(NC(=N)N)C(=O)Nc1ccc2c(c1)cc(C(=O)NCCc1c[nH]c3ccccc13)n2CCCNC(=N)N,1
|
48 |
+
O=C(O)CNCCC(=O)N1CCC(Nc2ccnc(NCc3cn(CCCNCCCNC4CCCCC4)nn3)n2)CC1,1
|
49 |
+
Cc1cc(NC2CCN(CCC#N)CC2)nc(NCc2cn(CCCNCCCNC3CCCCC3)nn2)n1,1
|
50 |
+
Cc1cc(NC2CCN(C(=O)CCCC(=O)O)CC2)nc(NCc2cn(CCCNCCCNC3CCCCC3)nn2)n1,1
|
51 |
+
CCc1cc(NC2CCN(C(=O)CCNCCP(=O)(O)O)CC2)nc(NCc2cn(CCCNCCCNC3CCCCC3)nn2)n1,1
|
52 |
+
N=C(N)NCCCC1NC(=O)C(CCCNC(=N)N)NC(=O)C(Cc2ccc(O)cc2)NC(=O)CNC(=O)C(Cc2ccc3ccccc3c2)NC1=N,1
|
53 |
+
Cc1cc(NC2CCN(C(=O)CCC(N)C(=O)O)CC2)nc(NCc2cn(CCCNCCCNC3CCCCC3)nn2)n1,1
|
54 |
+
Cc1cc(NC2CCN(CCCCCC(=O)O)CC2)nc(NCc2cn(CCCNCCCNC3CCCCC3)nn2)n1,1
|
55 |
+
N=C(N)NCCCC1NC(=O)C2CC(N=C(N)N)CN2C(=O)C(Cc2ccc(O)cc2)NC(=O)CNC(=O)C(Cc2ccc3ccccc3c2)NC1=O,1
|
56 |
+
NC(=O)CCC1NC(=O)C(Cc2ccc(O)cc2)NC(=O)CNC(=O)C(Cc2ccc3ccccc3c2)NC(=O)C(CCCN=C(N)N)NC1=O,1
|
57 |
+
CN1C(=O)C(Cc2ccc(O)cc2)NC(=O)CNC(=O)C(Cc2ccc3ccccc3c2)NC(=O)C(CCCNC(=N)N)NC(=O)C1CCCN,1
|
58 |
+
N=C(N)NCCCNC1CSSCC(C(N)=O)NC(=O)C(Cc2ccc3ccccc3c2)NC(=O)C(CCCN=C(N)N)NC1=O,1
|
59 |
+
N=C(N)NCCCC1NC(=O)C(Cc2ccc(O)cc2)NC(=O)CCNC(=O)C(Cc2ccc3ccccc3c2)NC(=O)C(CCCNC(=N)N)NC1=O,1
|
60 |
+
CN1C(=O)C(Cc2ccc3ccccc3c2)NC(=O)CNC(=O)C(Cc2ccc(O)cc2)NC(=O)C(CCCNC(=N)N)NC(=O)C1CCCNC(=N)N,1
|
61 |
+
CC1C(=O)NC(CCCN=C(N)N)C(=O)NC(Cc2ccc3ccccc3c2)C(=O)NCC(=O)NC(Cc2ccc(O)cc2)C(=O)N1C,1
|
62 |
+
CN1C(=O)C(CCCNC(=N)N)NC(=O)C(Cc2ccc3ccccc3c2)NC(=O)CNC(=O)C(Cc2ccc(O)cc2)NC(=O)C1CCCNC(=N)N,1
|
63 |
+
NCCC1NC(=O)C(Cc2ccc(O)cc2)NC(=O)CNC(=O)C(Cc2ccc3ccccc3c2)NC(=O)C(CCCN=C(N)N)NC1=O,1
|
64 |
+
NC(=O)CC1NC(=O)C(Cc2ccc(O)cc2)NC(=O)CNC(=O)C(Cc2ccc3ccccc3c2)NC(=O)C(CCCN=C(N)N)NC1=O,1
|
65 |
+
Cc1cc(NC2CCN(C(=O)CNCCC(=O)O)CC2)nc(NCc2cn(CCCNCCCNC3CCCCC3)nn2)n1,1
|
66 |
+
C1=C(CSC(=NC2CCCCC2)NC23CC4CC(CC(C4)C2)C3)N2CCN=C2S1,1
|
67 |
+
CCCCN(c1nc2ccccc2o1)C(CC)c1ccc(-c2ccccc2-c2nnn[nH]2)cc1,1
|
68 |
+
CCCCN(c1nc2ccccc2o1)C(C)c1ccc(-c2ccccc2-c2nnn[nH]2)cc1,1
|
69 |
+
Cc1cc(NC2CCN(C(=O)CCNCP(=O)(O)O)CC2)nc(NCc2cn(CCCNCCCNC3CCCCC3)nn2)n1,1
|
70 |
+
CCCCN(c1nc2ccccc2o1)C(C)c1ccc(-c2ccccc2-c2nnn[nH]2)cc1,1
|
71 |
+
CCCCN(c1nc2ccccc2o1)C(C)c1ccc(-c2ccccc2-c2nnn[nH]2)cc1,1
|
72 |
+
CCN(CC)CCCC(C)Nc1cc(C=Cc2ccc([N+](=O)[O-])cc2)nc2cc(OC)ccc12,1
|
73 |
+
Oc1cccc(CN(CCN2CCCCC2)CC2CCCN(C3CCCC3)C2)c1,1
|
74 |
+
CCC(C)N1C(=O)C2C(Cc3c[nH]c4ccccc34)[NH2+]C3(C(=O)Nc4ccc(C)cc43)C2C1=O,0
|
75 |
+
COc1cccc(NC(=O)c2sc3[nH+]c(N4CCCC4)c4c(c3c2N)CC(C)(C)OC4)c1,0
|
76 |
+
CC[NH+]1CCN(C(c2cccs2)C(C)NC(=O)C(=O)NCCc2c[nH]c3ccccc23)CC1,0
|
77 |
+
CSC1=CC2CC3COP(=O)(c4ccccc4O)N[NH+]3CC2C=C1c1ccccc1O,0
|
78 |
+
CC(C)S(=O)(=O)C1=CC2CC3COP(=O)(c4ccccc4O)N[NH+]3CC2C=C1c1ccccc1O,0
|
79 |
+
COc1ccc2[nH]cc(CCNC(=O)C3CN(C(=O)c4ccccc4C)CC34CC[NH2+]CC4)c2c1,0
|
80 |
+
COc1ccc2[nH]cc(CCNC(=O)C3CN(C(=O)c4cc(Cl)cc(Cl)c4)CC34CC[NH2+]CC4)c2c1,0
|
81 |
+
COc1ccc2[nH]cc(CCNC(=O)C3CN(C(=O)c4cccc(Cl)c4F)CC34CC[NH2+]CC4)c2c1,0
|
82 |
+
COc1ccc2[nH]cc(CCNC(=O)C3CN(C(=O)C4CC=CCC4)CC34CC[NH2+]CC4)c2c1,0
|
83 |
+
CC(CCCC(C)(C)O)NC(=O)C1CN(C(=O)c2c(F)cccc2F)CC12CC[NH2+]CC2,0
|
84 |
+
CC(CCCC(C)(C)O)NC(=O)C1CN(C(=O)c2cc(Cl)cc(Cl)c2)CC12CC[NH2+]CC2,0
|
85 |
+
CC(CCCC(C)(C)O)NC(=O)C1CN(C(=O)c2ccc(Br)cc2)CC12CC[NH2+]CC2,0
|
86 |
+
COc1c(Cl)cc(C(=O)N2CC(C(=O)NC(C)CCCC(C)(C)O)C3(CC[NH2+]CC3)C2)cc1Cl,0
|
87 |
+
CCCCC(C)C(=O)N1CC(C(=O)NCCc2c[nH]c3ccc(Cl)cc23)C2(CC[NH2+]CC2)C1,0
|
88 |
+
Cc1cccc2c(CCNC(=O)C3CN(C(=O)C(Oc4ccccc4)C(C)C)CC34CC[NH2+]CC4)c[nH]c12,0
|
89 |
+
CCc1cccc2c1=[NH+]CC=2C(=O)CSC1NNC(c2ccc(F)cc2)[NH+]1CCC(=O)[O-],0
|
90 |
+
Cc1n[nH]c(C)c1C(C)NC(=O)C1CN(C(=O)c2ccc(C(F)(F)F)cc2)CC12CC[NH2+]CC2,0
|
91 |
+
Cc1n[nH]c(C)c1C(C)NC(=O)C1CN(C(=O)c2cc(Cl)ccc2F)CC12CC[NH2+]CC2,0
|
92 |
+
Cc1cc(C)c2c(N)c(C(=O)OCC(=O)Nc3sc4c(c3C#N)CCCCC4)sc2[nH+]1,0
|
93 |
+
CCC(Sc1nc2n[nH]c(C)c2c(N)[n+]1-c1ccc(C)c(Cl)c1)C(=O)NCc1ccco1,0
|
94 |
+
Cc1ccc(C2CC(C(F)(F)F)N3NC=C(C(=O)Nc4cc5c(cc4C(C)O)OCO5)C3=[NH+]2)cc1,0
|
95 |
+
CC(O)c1cc2c(cc1NC(=O)C1=CNN3C1=[NH+]C(c1ccco1)CC3C(F)(F)F)OCO2,0
|
96 |
+
COc1cc(-c2cc(=[NH+]C(Cc3c[nH]c[nH+]3)C(=O)[O-])c3cc(Cl)c(C)cc3o2)ccc1O,0
|
97 |
+
O=C(NCCC1=c2cc(Cl)ccc2=[NH+]C1)C1C=C2NC(=C3CC3)C=C(C(F)(F)F)N2N1,0
|
98 |
+
O=C(NCCC1=c2cc(Cl)ccc2=[NH+]C1)C1C=C2NC(=C3CC3)C=C(C(F)(F)F)N2N1,0
|
99 |
+
CCOCCC1NNC(NC(=O)C2CC(=O)N(CCC3=c4cc(F)ccc4=[NH+]C3)C2)S1,0
|
100 |
+
[H]N=C(Nc1nc(C)cc(C)n1)N(CCC1=c2ccccc2=[NH+]C1)C(=S)Nc1ccc(F)cc1,0
|
101 |
+
COc1ccc(NC(=O)CSC2NC(=CC(=O)NCCC3=c4ccccc4=[NH+]C3)CS2)cc1,0
|
102 |
+
O=C(C=C1CSC(SCC(=O)Nc2ccc(Cl)cc2)N1)NCCC1=c2ccccc2=[NH+]C1,0
|
103 |
+
CNS(=O)(=O)c1ccc(NC(=O)C(CC2=c3ccccc3=[NH+]C2)NC(=O)c2cccs2)cc1,0
|
104 |
+
COC12CCC3(CC1C(C)(O)C(C)(C)C)C1C(O)c4ccc(O)c5c4C3(CC[NH+]1CC1CC1)C2O5,0
|
105 |
+
[NH3+]C(CO)C(=O)NC(Cc1ccc(O)cc1)C(=O)[NH+]=c1ccc2c(C(F)(F)F)cc(O)oc-2c1,0
|
106 |
+
CC1=CC(C(=O)N2CC(C(=O)NCCC3=c4cc(C#N)ccc4=[NH+]C3C)C3(CC[NH2+]CC3)C2)NN1C,0
|
107 |
+
CC1=CC(C(=O)N2CC(C(=O)NCCC3=c4cc(C#N)ccc4=[NH+]C3C)C3(CC[NH2+]CC3)C2)NN1C,0
|
108 |
+
Cc1n[nH]c2ncc(-c3cc(OCC([NH3+])CC4=c5ccccc5=[NH+]C4)cnc3-c3ccoc3)cc12,0
|
109 |
+
Cc1n[nH]c2ccc(-c3cc(OCC([NH3+])CC4=c5ccccc5=[NH+]C4)cnc3-c3ccoc3)nc12,0
|
110 |
+
Cc1n[nH]c2ccc(-c3cc(OCC([NH3+])CC4=c5cccnc5=[NH+]C4)cnc3-c3ccoc3)cc12,0
|
111 |
+
Cc1occc1-c1nc(N)c(OCC([NH3+])CC2=c3ccccc3=[NH+]C2)cc1-c1cc2c(C)n[nH]c2cn1,0
|
112 |
+
CC(C)(CC1=CN=CC1)C1C(=O)Nc2ccc(-c3cncc(OCC([NH3+])CC4=c5ccccc5=[NH+]C4)c3)cc21,0
|
113 |
+
Cc1ccc2c(c1)=[NH+]C(SC(C(=O)Nc1ccc(S(N)(=O)=O)cc1)c1ccccc1)[NH+]=2,0
|
114 |
+
Cc1ccc2c(c1)=[NH+]C(SC(C(=O)Nc1ccc(S(N)(=O)=O)cc1)c1ccccc1)[NH+]=2,0
|
115 |
+
Cc1ccc2c(c1)=[NH+]C(CNC(=O)C1[NH+]=C(Cn3nc(-c4ccc(F)cc4)ccc3=O)NO1)[NH+]=2,0
|
116 |
+
Cc1ccc2c(c1)=[NH+]C(CNC(=O)C1[NH+]=C(Cn3nc(-c4ccc(F)cc4)ccc3=O)NO1)[NH+]=2,0
|
117 |
+
Cn1c(SCC(=O)NCCC2[NH+]=c3ccccc3=[NH+]2)nnc1C1[NH+]=C(c2ccc(F)cc2)NO1,0
|
118 |
+
Cc1ccc(C2=[NH+]C(c3nnc(SCC(=O)NCCC4[NH+]=c5ccccc5=[NH+]4)n3C)ON2)cc1,0
|
119 |
+
COc1cc(C2CC(=NNC3[NH+]=c4ccccc4=[NH+]3)c3ccc(C)cc3O2)ccc1O,0
|
120 |
+
CCN1NC(C)=C(NC(=O)CSC2[NH+]=c3cc(C)cc(C)c3=[NH+]2)C1C,0
|
121 |
+
COc1ccc2c(c1)=CC(C[NH2+]CC1CN(c3cccc(F)c3)NC1c1ccc(C)o1)[NH+]=2,0
|
122 |
+
O=C1CCC(CC[NH2+]CCOc2ccc(F)cc2)N1CC1([NH+]2CCOCC2)CCCCC1,0
|
123 |
+
COC(=O)C1[NH+]=c2ccc(Cl)cc2=C1NC(=O)C(C)[NH+]1CCN(c2cccc(OC)c2)C(C)C1,0
|
124 |
+
COc1ccc(C2=CN=[NH+]C2c2ccc(OCC[NH+]3CCN(c4ccc(F)cc4)CC3)cc2O)cc1,0
|
125 |
+
CCOC(=O)c1cccc(NC2[NH+]=c3ccccc3=[NH+]C2=NS(=O)(=O)c2ccc(F)cc2)c1,0
|
126 |
+
O=C(Nc1ccc(N2CCC([NH2+]C(CCn3cc[nH+]c3)c3ccccc3)CC2)cc1)c1ccccc1F,0
|
127 |
+
COc1ccc2c(c1)=C(CCNC(=O)C1C=C3[NH+]=C(c4ccco4)C=C(C(F)(F)F)N3N1)C[NH+]=2,0
|
128 |
+
CCC(C)Nc1c[nH+]c2c(c1)C(NC(C)=O)C(C(=O)OC)N2CCC1=c2ccccc2=[NH+]C1,0
|
129 |
+
COc1cccc(Cn2nc(C(=O)NC3CC3)c3c2CCN(CC2[NH+]=c4ccccc4=[NH+]2)C3)c1,0
|
130 |
+
COC(=O)C1C(NC(C)=O)c2cc(NC(C)CC(C)C)c[nH+]c2N1CCC1=c2ccccc2=[NH+]C1,0
|
131 |
+
COc1cc(CC2CC(C[NH2+]CCCC3=c4ccccc4=[NH+]C3C)=NO2)c(OC)c2c1OCO2,0
|
132 |
+
CCc1ccccc1NC(=O)CSC1N=NC(C([NH3+])CC2=c3ccccc3=[NH+]C2)O1,0
|
133 |
+
C#CCN(C)C(=O)CC1CC[NH2+]CC1Cc1cc(-c2ccc(F)cc2)on1,0
|
134 |
+
CCCNC(=O)C1CCCN(Cc2coc(-c3ccc(C)cc3)[nH+]2)C1,0
|
135 |
+
CCCCNC(=O)C1CCC(C)[NH+](Cc2c(C)nn(C)c2Cl)C1,0
|
136 |
+
C[NH2+]C(C)C(=O)NC1C=C(C(=O)NC(Cc2ccc(OC)cc2)C(N)=O)CC(O)C1O,0
|
137 |
+
COc1cccc2c1C(=O)c1c(O)c3c(c(O)c1C2=O)CC(O)(C(C)=O)CC3OC(=O)CC[NH3+],0
|
138 |
+
COC(=O)C(Cc1c[nH]c2ccc(F)cc12)NC(=O)C([NH3+])Cc1c[nH]c2ccccc12,0
|
139 |
+
Cc1ccc(Nc2nc(N)nc(C[NH2+]CC(O)c3ccc([N+](=O)[O-])cc3)n2)cc1,0
|
140 |
+
C=C1c2cccc(O)c2C(=O)C2=C(O)C3(O)C(=O)C(C(N)=O)=C(O)C([NH+](C)C)C3C(O)C12,0
|
141 |
+
CC(=O)C1(O)Cc2c(O)c3c(c(O)c2C(OC2CC([NH3+])C(O)C(C)O2)C1)C(=O)c1c(O)cccc1C3=O,0
|
142 |
+
CN1Cc2c(N3CCOCC3)[nH+]c3sc(C(=O)NN)c(N)c3c2CC1(C)C,0
|
143 |
+
CC(=O)C1([NH3+])Cc2c(O)c3c(c(O)c2C(OC2CC(O)C(O)CO2)C1)C(=O)c1ccccc1C3=O,0
|
144 |
+
CCCC1CC(C(=O)NC(C(C)O)C2OC(SC)C(O)C(O)C2O)[NH+](C)C1,0
|
145 |
+
Cc1cc(O)oc2cc(=[NH+]C(=O)C(CCCNC(N)=[NH2+])NC(=O)c3ccccc3)ccc1-2,0
|
146 |
+
CCCC1CC(C(=O)NC(C(C)O)C2OC(SC)C(O)C(O)C2O)[NH+](C)C1,0
|
147 |
+
CC(=O)C1([NH3+])Cc2c(O)c3c(c(O)c2C(OC2CC(O)C(O)CO2)C1)C(=O)c1ccccc1C3=O,0
|
148 |
+
COc1ccc(-c2nc3n(c(=[NH2+])c2C#N)NC(=[NH2+])C3=NNc2ccc3c(c2)OCO3)cc1,0
|
149 |
+
Cc1cc(C)c2c(N)c(C(=O)OCC(=O)Nc3sc4c(c3C(N)=O)CCC4)sc2[nH+]1,0
|
150 |
+
O=C1[NH+]=c2ccccc2=[NH+]C1C(=NNc1ccc([N+](=O)[O-])cc1)C(O)C(O)CO,0
|
151 |
+
CC1(O)c2c(Cl)ccc(O)c2C(=O)C2C1CC1C([NH3+])C(O)C(C(N)=O)C(=O)C1(O)C2O,0
|
152 |
+
CC(O)C1([NH3+])Cc2c(O)c3c(c(O)c2C(OC2CC(O)C(O)CO2)C1)C(=O)c1ccccc1C3=O,0
|
153 |
+
CCCC1CC(C(=O)NC(C(C)O)C2OC(SC)C(O)C(O)C2O)[NH+](C)C1,0
|
154 |
+
CC(=O)C1([NH3+])Cc2c(O)c3c(c(O)c2C(OC2CC(O)C(O)CO2)C1)C(=O)c1ccccc1C3=O,0
|
155 |
+
Cc1cc(O)oc2cc(=[NH+]C(=O)C(CCC[NH+]=C(N)N)NC(=O)C[NH3+])ccc1-2,0
|
156 |
+
CC([NH3+])C(=O)NC(CCCNC(N)=[NH2+])C(=O)[NH+]=c1ccc2c(C(F)(F)F)cc(O)oc-2c1,0
|
157 |
+
CC(NC(=O)C([NH3+])CCCC[NH3+])C(=O)[NH+]=c1ccc2c(C(F)(F)F)cc(O)oc-2c1,0
|
158 |
+
[NH3+]C(COc1cncc(-c2ccc3[nH]nc(Cl)c3c2)c1)CC1=c2ccccc2=[NH+]C1,0
|
159 |
+
CCN1NC(C)=C(NC(=O)CSC2[NH+]=c3cc(C)cc(C)c3=[NH+]2)C1C,0
|
160 |
+
Cc1cccc(NC(=O)CSC2N=NC(C([NH3+])CC3=c4ccccc4=[NH+]C3)O2)c1C,0
|
161 |
+
Cc1cccc(NC(=O)CSC2N=NC(C([NH3+])CC3=c4ccccc4=[NH+]C3)O2)c1C,0
|
162 |
+
Cc1ccc2c(c1)=[NH+]C(CNC(=O)c1ccccc1C1NC(c3ccc(F)cc3)=NO1)[NH+]=2,0
|
163 |
+
Cc1ccc2c(c1)=[NH+]C(CNC(=O)c1ccccc1C1NC(c3ccc(F)cc3)=NO1)[NH+]=2,0
|
164 |
+
CC1[NH+]=c2ccc(C#N)cc2=C1CCNC(=O)CC1C(=O)NCC[NH+]1C(C)C,0
|
165 |
+
Cc1ccn2cc(CNC(=O)C3CN(C(=O)C4CC=CCC4)CC34CC[NH2+]CC4)[nH+]c2c1,0
|
166 |
+
Cc1ccc2[nH+]c(CNC(=O)C3CN(C(=O)C4CC=CCC4)CC34CC[NH2+]CC4)cn2c1,0
|
167 |
+
Cc1ccc(C(=O)N2CC(C(=O)NCCC3=c4cc(C#N)ccc4=[NH+]C3C)C3(CC[NH2+]CC3)C2)cc1,0
|
168 |
+
CC1[NH+]=c2ccc(C#N)cc2=C1CCNC(=O)C1CN(C(=O)C2CC=CCC2)CC12CC[NH2+]CC2,0
|
169 |
+
Cc1cccc(C2=c3cc(S(N)(=O)=O)ccc3=[NH+]C2C(=O)N[n+]2c(C)cc(C)cc2C)c1,0
|
170 |
+
COc1ccc(N2CC(=O)C(C3[NH+]=c4ccc(C)cc4=[NH+]3)=C2N)cc1Cl,0
|
171 |
+
COCC(=O)N1CC2C[NH2+]CC2(C(=O)NC(C)(C)C[NH+]2CCCC2)C1,0
|
172 |
+
COc1ccc2c(c1)=[NH+]C(C(C#N)=Cc1cc(C)n(C3SC4CCCCC4=C3C#N)c1C)[NH+]=2,0
|
173 |
+
COc1ccc(C(=O)C2CC2C[NH+]2CC=C(C3=c4c(Br)cccc4=[NH+]C3)CC2)cc1,0
|
174 |
+
Cc1ccc(OC2N=C3C=CC=CN3C(=O)C2C=C(C#N)C2[NH+]=c3ccccc3=[NH+]2)cc1,0
|
175 |
+
N#CC(=CC1C(=O)N2C=CC=CC2=NC1Oc1ccc(Cl)cc1)C1[NH+]=c2ccccc2=[NH+]1,0
|
176 |
+
Cc1cc(OC2N=C3C=CC=CN3C(=O)C2C=C(C#N)C2[NH+]=c3ccccc3=[NH+]2)ccc1Cl,0
|
177 |
+
Cc1cc(C)cc(OC2N=C3C=CC=CN3C(=O)C2C=C(C#N)C2[NH+]=c3ccccc3=[NH+]2)c1,0
|
178 |
+
COCCN1C(=O)c2ccccc2C(C(=O)N=C2[NH+]=c3ccccc3=[NH+]2)C12CCCCC2,0
|
179 |
+
O=C(C1CCCC1)N(CC[NH+]1CCCCC1)CC1CCC[NH+](C2CCCC2)C1,0
|
180 |
+
COc1ccc(-c2nn(-c3ccccc3)cc2C[NH+]2CCC3(CC[NH+](C)C3)C2)c(F)c1,0
|
181 |
+
CC1=C(C#N)C(n2c(C)cc(C=C(C#N)C3[NH+]=c4ccccc4=[NH+]3)c2C)OC1C,0
|
182 |
+
CC1=C(C#N)C(n2c(C)cc(C=C(C#N)C3[NH+]=c4ccccc4=[NH+]3)c2C)OC1C,0
|
183 |
+
CC1C2CCC3C4CC=C5CC([NH+](C)C)CCC5(C)C4CCC32C[NH+]1C,0
|
184 |
+
COc1c2c(cc3c1C(C#CC[NH+](C)C)[NH+](C)CC3)OCO2,0
|
185 |
+
C[NH+](CC#N)C1CCC2C3CCC4CC(OC(=O)C[N+](C)(C)C)CCC4(C)C3CCC21C,0
|
186 |
+
CC1C2CCC3C4CC=C5CC([NH+](C)C)CCC5(C)C4CCC32C[NH+]1C,0
|
187 |
+
Fc1ccc(C=[NH+]C2=CC(c3ccc(Br)cc3)[NH+]=N2)cc1Oc1ccccc1,0
|
188 |
+
CCCC[NH+]1CCCCC1CNC(=O)NC(C)Cn1cc[nH+]c1,0
|
189 |
+
CCCC[NH+]1CCCCC1CNC(=O)NC(C)Cn1cc[nH+]c1,0
|
190 |
+
C[NH+]1CCN(C2N=C3C=CC=CN3C(=O)C2C=C(C#N)C2[NH+]=c3ccccc3=[NH+]2)CC1,0
|
191 |
+
C[NH+](CC1=CN=[NH+]C1c1ccc2c(c1)OCO2)CC1C=c2cc(F)ccc2=[NH+]1,0
|
192 |
+
CC([NH2+]CC1C=c2ccccc2=[NH+]1)C1C=NN(c2cccc(F)c2)C1C,0
|
193 |
+
O=C(C1CCCC1)N(CC[NH+]1CCCCC1)CC1CCC[NH+](C2CCCC2)C1,0
|
194 |
+
CCCC[NH+](CC(=O)N1CCSC1c1ccc(F)cc1)Cc1ccccc1F,0
|
195 |
+
Cc1cccc(C(=O)N2CC(C[NH+]3CCCC3)C(c3cccc(C(F)(F)F)c3)C2)c1,0
|
196 |
+
N#Cc1cccc(C[NH+](Cc2cccn2Cc2cccc(C(F)(F)F)c2)C2CC2)c1,0
|
197 |
+
O=C(c1ccco1)N1CC(C[NH+]2CCCC2)C(c2cccc(C(F)(F)F)c2)C1,0
|
198 |
+
COc1ccc(C2c3cccn3CCC[NH+]2Cc2cccc(Cl)c2)cc1OC,0
|
199 |
+
COc1cc(C[NH+]2CCCn3cccc3C2c2cc(C)ccc2C)cc(OC)c1,0
|
200 |
+
CN(C(=O)CCC1=c2ccccc2=[NH+]C1c1ccc(F)cc1)c1cccc(C#N)c1,0
|
201 |
+
COc1cccc(C(CNC(=O)CCc2[nH+]cc(-c3ccc(F)cc3)o2)[NH+](C)C)c1,0
|
202 |
+
CCCCc1[nH]cc(C[NH2+]CCC2C(C)=Nc3ccc(OC)cc32)[nH+]1,0
|
203 |
+
O=S(=O)(c1ccccc1)N1CC[NH+](CC[NH+]=c2cc(O)oc3ccccc23)CC1,0
|
204 |
+
COC(=O)C1[NH+]=c2ccccc2=C1NC(=O)C(C)[NH+]1CCN(c2ccccc2)CC1,0
|
205 |
+
COC(=O)C1[NH+]=c2ccc(Cl)cc2=C1NC(=O)C(C)[NH+]1CCN(c2ccccc2)CC1,0
|
206 |
+
CCOC(=O)C1[NH+]=c2ccc(Br)cc2=C1NC(=O)CC[NH+]1CCCC(C)(C)C1,0
|
207 |
+
CCOc1ccc2c(c1)=CC(C1N=C3C=CC=CN3C1[NH2+]Cc1ccco1)C(=O)[NH+]=2,0
|
208 |
+
CCOc1ccc2c(c1)=CC(C1N=C3C=CC=CN3C1[NH2+]Cc1ccco1)C(=O)[NH+]=2,0
|
209 |
+
CCOc1ccc2c(c1)=CC(c1[nH+]c3cc(C)ccn3c1NCCOC)C(=O)[NH+]=2,0
|
210 |
+
COc1ccc(C2CC(C(=O)NCCC3=c4ccccc4=[NH+]C3)N=[NH+]2)c(OC)c1,0
|
211 |
+
COc1ccc(C2CC(C(=O)NCCC3=c4ccccc4=[NH+]C3)N=[NH+]2)c(OC)c1,0
|
212 |
+
CCCSc1ccc2c(c1)=[NH+]C(NC(=O)CSc1nc(C)cc(C)n1)[NH+]=2,0
|
213 |
+
O=C(Cc1ccc(Cl)cc1)Nc1nnc(SCC2[NH+]=c3ccccc3=[NH+]C2=O)s1,0
|
214 |
+
Cc1cc2c(cc1C)=[NH+]C(CSc1nnc(NC(=O)C(C)(C)C)s1)C(=O)[NH+]=2,0
|
215 |
+
CCCC(=O)Nc1nnc(SCC2[NH+]=c3cc(C)c(C)cc3=[NH+]C2=O)s1,0
|
216 |
+
CCn1nc(C(=O)N2CCOCC2)c2c1CCC([NH2+]CCC1=c3ccccc3=[NH+]C1)C2,0
|
217 |
+
Cc1ccc2c(c1)=[NH+]C(SCc1noc(CCC(=O)Nc3c(F)cccc3F)n1)[NH+]=2,0
|
218 |
+
Cc1ccc2c(c1)=[NH+]C(SCc1noc(CCC(=O)Nc3ccc(F)cc3F)n1)[NH+]=2,0
|
219 |
+
COc1ccc(C(=O)CC2(O)C(=O)N(C[NH+]3CCCCCC3)c3ccccc32)cc1,0
|
220 |
+
Cc1ccsc1C[NH+](C)CC(=O)Nc1c(C#N)c(C)c(C)n1CC1CCCO1,0
|
221 |
+
COc1ccc(N2CC[NH+](CCNS(=O)(=O)c3ccc(Cl)s3)CC2)cc1,0
|
222 |
+
CCOc1cc2c(cc1C[NH+]1CCC(NC(C)=O)(c3ccccc3F)CC1)OCO2,0
|
223 |
+
Cc1ccc(Cn2cnc3sc4c(c3c2=O)CCC([NH2+]CCCn2ccnc2)C4)cc1,0
|
224 |
+
CC(C)Cn1nc(C(=O)N2CCOCC2)c2c1CCC([NH2+]CCc1ccccc1F)C2,0
|
225 |
+
CCOC(=O)N1CCC([NH2+]Cc2cc3ccc(F)cc3n(CC(C)C)c2=O)CC1,0
|
226 |
+
Cc1ccc(NC(=O)COc2coc(C[NH+]3CCc4ccccc4C3)cc2=O)c(C)c1,0
|
227 |
+
O=C(Cn1c(=O)oc2ccccc21)NCC(c1ccsc1)[NH+]1CCCCCC1,0
|
228 |
+
Cc1ccc(N2CC(C(=O)NCC(c3cccn3C)[NH+]3CCCCCC3)CC2=O)cc1C,0
|
229 |
+
COc1ccc(-n2c(C)cc(C=C(C#N)C(=O)NCC[NH+]3CCCCC3)c2C)cc1,0
|
230 |
+
COc1ccc2c(c1)OC(=O)C(CCC(=O)NCCC1=c3cc(F)ccc3=[NH+]C1)C2C,0
|
231 |
+
CCOc1ccccc1NC(=O)C1CC(=O)N(CCC2=c3cc(F)ccc3=[NH+]C2)C1,0
|
232 |
+
CN(CC1C=c2cc(F)ccc2=[NH+]1)C(=O)C1C=C(COc2c(F)cccc2F)ON1,0
|
233 |
+
O=C(C1CC1)N(CC[NH+]1CCCCC1)CC1CCC[NH+](C2CCCC2)C1,0
|
234 |
+
Cc1cc(C=C(C#N)C2[NH+]=c3ccccc3=[NH+]2)c(C)n1C1SC2CCCCC2=C1C#N,0
|
235 |
+
CC12CCC3C(CCC4(O)CC(O)CCC34C=[NH+]Cc3ccncc3)C1(O)CCC2C1=CC(=O)OC1,0
|
236 |
+
CCOC(=O)c1ccc(N2C(=O)C3C(CC(N)=O)[NH2+]C4(C(=O)Nc5ccc(Cl)cc54)C3C2=O)cc1,0
|
237 |
+
COc1ccc2c3c1OC1C4(OC)C=CC5(c6c(O)cc(SCCO)c(O)c64)C(C2)[NH+](C)CCC315,0
|
238 |
+
COC(C[NH+]=CC12CCC(O)CC1(O)CCC1C2CCC2(C)C(C3=CC(=O)OC3)CCC12O)OC,0
|
239 |
+
O=C(CSc1nnc(-c2ccc(F)cc2)n1CC[NH+]1CCOCC1)Nc1ccc2[nH]c(=O)[nH]c2c1,0
|
240 |
+
O=C(CCC1CNC(=O)C2CC(NC(=O)c3ccc(F)c(Cl)c3)C[NH+]12)NCc1ccc(F)cc1,0
|
241 |
+
CCOc1ccc(C=NNC(=O)c2ccc(N)cc2)cc1C[NH+]1CC2CC(C1)c1cccc(=O)n1C2,0
|
242 |
+
Cc1nn(C(C)C)c(C)c1C=NNc1nc(SCC(=O)NC2CC(C)(C)[NH2+]C(C)(C)C2)n[nH]1,0
|
243 |
+
CC(c1ccc(S(N)(=O)=O)cc1)[NH+](C)CC(=O)Nc1cc(C(F)(F)F)ccc1-n1cncn1,0
|
244 |
+
COc1cccc(C=[NH+]CCNC(=O)c2cc3n(n2)C(C(F)(F)C(F)(F)F)CC(C)N3)c1O,0
|
245 |
+
COc1ccc2[nH]cc(CCNC(=O)C3CN(C(=O)c4ccccc4OC(C)=O)CC34CC[NH2+]CC4)c2c1,0
|
246 |
+
COc1ccc2[nH]cc(CCNC(=O)C3CN(C(=O)c4c(Cl)cccc4Cl)CC34CC[NH2+]CC4)c2c1,0
|
247 |
+
COc1ccc2[nH]cc(CCNC(=O)C3CN(C(=O)C(C)Oc4ccccc4)CC34CC[NH2+]CC4)c2c1,0
|
248 |
+
CC(CCCC(C)(C)O)NC(=O)C1CN(C(=O)c2ccccc2C(F)(F)F)CC12CC[NH2+]CC2,0
|
249 |
+
Cc1cc(C(=O)N2CC(C(=O)NC(C)CCCC(C)(C)O)C3(CC[NH2+]CC3)C2)ccc1Br,0
|
250 |
+
Cc1cc(-c2onc(C)c2C(=O)N2CC(C(=O)NC(C)CCCC(C)(C)O)C3(CC[NH2+]CC3)C2)no1,0
|
251 |
+
Cc1nn(C)c2ncc(C(=O)N3CC(C(=O)NC(C)CCCC(C)(C)O)C4(CC[NH2+]CC4)C3)c(Cl)c12,0
|
252 |
+
Cc1ccc2[nH]cc(CCNC(=O)C3CN(C(=O)c4ccc(N5CCOCC5)nc4)CC34CC[NH2+]CC4)c2c1,0
|
253 |
+
CC1=C(C(=O)N2CC(C(=O)NCCc3c[nH]c4ccc(C)cc34)C3(CC[NH2+]CC3)C2)C(c2ccccc2)NO1,0
|
254 |
+
CC1[NH+]=c2ccc(C#N)cc2=C1CCNC(=O)C1CN(C(=O)C2(C)CCCCC2)CC12CC[NH2+]CC2,0
|
255 |
+
CC(=O)N1CCC(C(=O)N2CC(C(=O)NCCc3c[nH]c4ccc(Cl)cc34)C3(CC[NH2+]CC3)C2)CC1,0
|
256 |
+
Cc1ccc(F)cc1C(=O)N1CC(C(=O)NCCc2c[nH]c3ccc(Cl)cc23)C2(CC[NH2+]CC2)C1,0
|
257 |
+
CC1=CC(C(=O)N2CC(C(=O)NCCc3c[nH]c4ccc(Cl)cc34)C3(CC[NH2+]CC3)C2)NN1C,0
|
258 |
+
CC(C)(C)OC(=O)NCCNC(=O)C1CN(C(=O)C2CCCN2C(=O)C(F)(F)F)CC12CC[NH2+]CC2,0
|
259 |
+
CC(C)(C)OC(=O)NCCNC(=O)C1CN(C(=O)c2cc(Cl)ccc2F)CC12CC[NH2+]CC2,0
|
260 |
+
Cc1n[nH]c(C)c1C(C)NC(=O)C1CN(C(=O)c2cc(F)cc(C(F)(F)F)c2)CC12CC[NH2+]CC2,0
|
261 |
+
COc1ccc(C(=O)N2CC(C(=O)NC(C)c3c(C)n[nH]c3C)C3(CC[NH2+]CC3)C2)cc1C(F)(F)F,0
|
262 |
+
Cc1ccc2nc(C(C)NC(=O)C3CN(C(=O)c4ccc(C)c(F)c4F)CC34CC[NH2+]CC4)[nH]c2c1,0
|
263 |
+
COc1ccc(OC)c(C(=O)N2CC(C(=O)NC(C)c3nc4ccc(C)cc4[nH]3)C3(CC[NH2+]CC3)C2)c1,0
|
264 |
+
Cc1ccc(N2C(=O)C3C(c4ccccc4O)[NH2+]C(CC4=c5ccccc5=[NH+]C4)(C(=O)[O-])C3C2=O)cc1C,0
|
265 |
+
COc1ccc2c(c1)=C(CCN1CC(C(=O)NC3NNC(Cc4ccccc4)S3)CC1=O)C[NH+]=2,0
|
266 |
+
CNS(=O)(=O)c1ccc(NC(=O)C(CC2=c3ccccc3=[NH+]C2)NC(=O)c2cccs2)cc1,0
|
267 |
+
CCC(Sc1nc2n[nH]c(C)c2c(N)[n+]1-c1ccc(Br)cc1)C(=O)NCc1ccco1,0
|
268 |
+
COc1ccc2c(c1)OC1(CC[NH+](CC(O)c3c[nH]c4cc(C)ccc34)CC1)CC2O,0
|
269 |
+
Cc1ccc(F)cc1CC([NH3+])COc1cncc(-c2ccc3[nH]nc(C)c3c2)c1,0
|
270 |
+
CCC(=O)Nc1ccc(C(C)[NH2+]C(C)C(=O)Nc2cccc(Cl)c2C)cc1,0
|
271 |
+
CCC(=O)Nc1ccc(C(C)[NH2+]C(C)C(=O)Nc2cccc(Cl)c2C)cc1,0
|
272 |
+
CCC(C)C(=O)N1CC(C(=O)NCCc2c[nH]c3ccc(C)cc23)C2(CC[NH2+]CC2)C1,0
|
273 |
+
Cc1n[nH]c2cnc(-c3cncc(OCC([NH3+])Cc4ccc(F)c(F)c4F)c3)cc12,0
|
274 |
+
O=C(NC1Nc2ccc(NC(=O)C3C=c4ccccc4=[NH+]3)cc2S1)c1ccccc1,0
|
275 |
+
CC[NH+]1CCN(c2c(Cl)cccc2NC(=S)NC(=O)c2ccc(OC)c(Br)c2)CC1,0
|
276 |
+
CC(C)c1ccc(NC(=O)C(=O)NCC(c2ccc3c(c2)CCN3C)N2CC[NH+](C)CC2)cc1,0
|
277 |
+
FC(F)(F)c1ccc2c(NC3CCC([NH2+]Cc4cccnc4)CC3)ncc(-c3ccsc3)c2n1,0
|
278 |
+
COc1ccc(C[NH+](C)Cc2nc3sc(C(=O)Nc4ccc(C)cc4C)c(C)c3c(=O)[nH]2)cc1F,0
|
279 |
+
Cc1ccccc1-c1csc2nc(C[NH+](CC(O)COc3ccccc3)C(C)C)[nH]c(=O)c12,0
|
280 |
+
CC(C)OCC(O)C[NH+](Cc1ccccc1)Cc1nc2sc3c(c2c(=O)[nH]1)CCCCC3,0
|
281 |
+
CCOc1ccc(C2CC(c3ccc(C)cc3C)=NN2C(=O)C[NH+]2CCC(C(N)=O)CC2)cc1,0
|
282 |
+
CC(=O)C1CCC2C3CCC4=CC(=NOCC(=O)NCCc5c[nH]c[nH+]5)CCC4(C)C3CCC12C,0
|
283 |
+
CC[NH+]1CCN(c2c(Cl)cccc2NC(=S)NC(=O)c2cc(Br)ccc2OC)CC1,0
|
284 |
+
Cc1ccc(C(=O)NC(=CC2CN(C)c3ccccc32)C(=O)NCCC2=c3ccccc3=[NH+]C2)cc1,0
|
285 |
+
COc1ccc2cc(C(=O)N3CCC(CC(=O)NCCC4=c5cc(C)ccc5=[NH+]C4)CC3)[nH]c2c1,0
|
286 |
+
COc1cccc(CNC2=[NH+]CCNC23CCN(S(=O)(=O)c2ccc(C(C)C)cc2)CC3)c1,0
|
287 |
+
CCC([NH2+]CCc1c[nH]c2cc(F)ccc12)C(=O)Nc1cc(C)on1,0
|
288 |
+
CCC([NH2+]CCc1c[nH]c2cc(F)ccc12)C(=O)Nc1cc(C)on1,0
|
289 |
+
COc1cccc(NC(=O)N(Cc2ccsc2)Cc2ccco2)c1,0
|
290 |
+
COc1ccc(CC(NC(=O)C2=CC(NC(=O)C([NH3+])C(C)C)C(O)C(O)C2)C(N)=O)cc1,0
|
291 |
+
CCCC1CC(C(=O)NC(C(C)O)C2OC(SC)C(O)C(O)C2O)[NH+](C)C1,0
|
292 |
+
OCC1OC(CO)(Oc2cc3c(O)cc(O)cc3[o+]c2-c2cc(O)c(O)c(O)c2)C(O)C1O,0
|
293 |
+
COc1cccc2c1C(=O)c1c(O)c3c(c(O)c1C2=O)CC(O)(C(C)=O)CC3SCC[NH3+],0
|
294 |
+
COC(=O)C(Cc1c[nH]c2ccc(F)cc12)NC(=O)C([NH3+])Cc1c[nH]c2ccccc12,0
|
295 |
+
CC=C(C)C(=O)OC1CCC2(C)C3CCC4C5(O)CC(O)C6(O)C(C[NH+]7CC(C)CCC7C6(C)O)C5(O)CC42OC13O,0
|
296 |
+
N#CC(=CNc1ccc(S(=O)(=O)NC(N)=[NH2+])cc1)c1nc(-c2cccc(Br)c2)cs1,0
|
297 |
+
C[NH+](C)C1C(O)=C(C(N)=O)C(=O)C2(O)C(O)=C3C(=O)c4c(O)cccc4C(C)(O)C3C(O)C12,0
|
298 |
+
C[NH+](C)C1C(O)=C(C(N)=O)C(=O)C2(O)C(O)=C3C(=O)c4c(O)cccc4C(C)(O)C3C(O)C12,0
|
299 |
+
Cc1cc(NC(=O)NC(=O)c2ccccc2NC(=O)C(C)[NH3+])ccc1Oc1ncc(Br)cn1,0
|
300 |
+
CCOc1cc(C=NNc2nc(SCC(=O)NC3CC(C)(C)[NH2+]C(C)(C)C3)n[nH]2)ccc1O,0
|
301 |
+
CC(=O)C1([NH3+])Cc2c(O)c3c(c(O)c2C(OC2CC(O)C(O)CO2)C1)C(=O)c1ccccc1C3=O,0
|
302 |
+
CC1(O)c2c(Cl)ccc(O)c2C(=O)C2C1CC1C([NH3+])C(O)C(C(N)=O)C(=O)C1(O)C2O,0
|
303 |
+
CCCC1CC(C(=O)NC(C(C)O)C2OC(SC)C(O)C(O)C2O)[NH+](C)C1,0
|
304 |
+
CCC1CC[NH2+]C(C(=O)NC(C(C)Cl)C2OC(SC)C(O)C(O)C2O)C1,0
|
305 |
+
CC1(O)c2c(Cl)ccc(O)c2C(=O)C2C1CC1C([NH3+])C(O)C(C(N)=O)C(=O)C1(O)C2O,0
|
306 |
+
CCCC1CC(C(=O)NC(C(C)O)C2OC(SC)C(O)C(O)C2O)[NH+](C)C1,0
|
307 |
+
CCCC1CC(C(=O)NC(C(C)O)C2OC(SC)C(O)C(O)C2O)[NH+](C)C1,0
|
308 |
+
O=C1[NH+]=c2ccccc2=[NH+]C1C(=NNc1ccc([N+](=O)[O-])cc1)C(O)C(O)CO,0
|
309 |
+
CCCC1CC(C(=O)NC(C(C)O)C2OC(SC)C(O)C(O)C2O)[NH+](C)C1,0
|
310 |
+
CCC1CC[NH2+]C(C(=O)NC(C(C)Cl)C2OC(SC)C(O)C(O)C2O)C1,0
|
311 |
+
CC(C)(C)OC(=O)NC(CC1=c2ccccc2=[NH+]C1)C(=O)OCC1OC(O)C(O)C(O)C1O,0
|
312 |
+
O=C(CSC1NC(c2ccco2)Nc2cc(=O)[nH]n21)NCC1[NH+]=c2ccccc2=[NH+]1,0
|
313 |
+
O=C(CSC1NC(c2ccco2)Nc2cc(=O)[nH]n21)NCCC1[NH+]=c2ccccc2=[NH+]1,0
|
314 |
+
CC(C)(C)OC(=O)NC(CC1=c2ccccc2=[NH+]C1)C(=O)OCC1OC(O)C(O)C(O)C1O,0
|
315 |
+
Cc1ccccc1-[n+]1c(SCC(=O)NCCc2ccc(S(N)(=O)=O)cc2)nc2[nH]nc(C)c2c1N,0
|
316 |
+
CCC(=O)Nc1cc(C(O)C[NH+]2CCc3[nH]c4c(C)cccc4c3C2)ccc1OC,0
|
317 |
+
NC(=O)CN1CCCC([NH2+]Cc2ccc(-c3cccc(Cl)c3Cl)o2)C1,0
|
318 |
+
CCOC(=O)c1c[nH+]c2c(C)cc(Cl)cc2c1NCCC[NH+]1CCOCC1,0
|
319 |
+
COc1ccc(C[NH+]2CCN(Cc3[nH+]ccn3-c3cccc(C)c3)CC2CCO)c(F)c1,0
|
320 |
+
C[NH+]1CCC[NH+](CC(=O)N2CC(c3ccc(Cl)cc3)C(c3csnn3)C2)CC1,0
|
321 |
+
CCC(C)c1ccccc1NC(=O)C(C)SC1=[NH+]c2ccccc2C(=O)[N+]1=c1cc(C)cc[nH]1,0
|
322 |
+
C=C1CCCC2(C)CC3OC(=O)C(C[NH2+]CCC4=c5cc(O)ccc5=[NH+]C4)C3CC12,0
|
323 |
+
CCOC(=O)C1[NH+]=c2ccc(Br)cc2=C1NC(=O)CC[NH+]1CCC(C)CC1,0
|
324 |
+
Cc1ccc2c(c1C)=[NH+]C(=O)C(CCNC(=O)C(=O)C1=c3ccccc3=[NH+]C1C)C=2,0
|
325 |
+
CC1CCCC[NH+]1CCCNC(=O)C1CN(C(=O)C2CCC2)CC12CC[NH2+]CC2,0
|
326 |
+
CCC(CC)C(=O)N1CC(C(=O)NCCC[NH+]2CCCCC2C)C2(CC[NH2+]CC2)C1,0
|
327 |
+
CSCCC(=O)N1CC(C(=O)NCCC[NH+]2CCCCC2C)C2(CC[NH2+]CC2)C1,0
|
328 |
+
C[NH+](C)C(CNC(=O)CCc1[nH+]cc(-c2ccc(F)cc2)o1)c1ccsc1,0
|
329 |
+
N#Cc1cccc2c1=[NH+]CC=2C1=CC[NH+](CCC2OCCc3cc(C(N)=O)ccc32)CC1,0
|
330 |
+
Cc1ccc2cc(CNCCC[NH+]3CC4C=CC3C4)c(N3CCC(O)CC3)[nH+]c2c1,0
|
331 |
+
O=C(NCC1CCC[NH+](Cc2cccc3c2OCO3)C1)C1C=c2ccccc2=[NH+]1,0
|
332 |
+
CNc1cc(C(=O)N2CCC3(CCC[NH+](Cc4cc(OC)ccc4F)C3)C2)cc[nH+]1,0
|
333 |
+
Cc1ccc2c(c1C)=[NH+]C(=O)C(CCNC(=O)C(=O)C1=c3ccccc3=[NH+]C1C)C=2,0
|
334 |
+
CCC[NH+]1CCC(N2CC([NH2+]Cc3ccc(F)cc3F)CC2C(=O)NCC)CC1,0
|
335 |
+
C[NH+](C)CCCn1c(=O)[nH]c2sc3c(c2c1=[NH2+])CC(C)(C)OC3,0
|
336 |
+
COc1ccc2c(c1)=CC(CNC(=O)C(C)n1cc[nH+]c1C(C)C)[NH+]=2,0
|
337 |
+
Cc1ccc(Cn2nc(C)c(C[NH2+]CC(C)Cn3cc[nH+]c3)c2C)cc1,0
|
338 |
+
CC1CC(C)C[NH+](CCCNC(=O)C2C=C3C(=O)[NH+]=c4ccccc4=C3N2C)C1,0
|
339 |
+
CCc1ccccc1N1C(=O)CC([NH2+]CCC2=c3cc(OC)ccc3=[NH+]C2)C1=O,0
|
340 |
+
O=C(NCC1CCC[NH+](Cc2cccc3c2OCO3)C1)C1C=c2ccccc2=[NH+]1,0
|
341 |
+
CC1(C2CC[NH+](CC3C=c4ccccc4=[NH+]3)CC2)NC(=O)N(Cc2ccsc2)C1=O,0
|
342 |
+
O=C(C[NH+]1CCCC1C(=O)Nc1ccccc1)C1=c2ccccc2=[NH+]C1,0
|
343 |
+
Cn1cnnc1CC1CC[NH+](CC2C=c3ccc(F)cc3=[NH+]C2=O)CC1,0
|
344 |
+
CC[NH+]1CCN(C(=O)c2ccc(N3CCC([NH+]4CCc5sccc5C4)CC3)cc2)CC1,0
|
345 |
+
CC1CCCC[NH+]1CCC[n+]1c(-c2ccccc2)csc1Nc1cc(F)ccc1F,0
|
346 |
+
OC1C[NH+](CC=Cc2ccco2)CCC1N1CCN(c2cccc[nH+]2)CC1,0
|
347 |
+
Cc1ccc2c(c1)=C1SC(C(=O)NCCC[NH+]3CCC(C)CC3)C=C1C(=O)[NH+]=2,0
|
348 |
+
Cc1ccc2c(c1)=C1SC(C(=O)NCCC[NH+]3C(C)CCCC3C)C=C1C(=O)[NH+]=2,0
|
349 |
+
O=C(CC1=c2ccccc2=[NH+]C1)N1CCCC(C2[NH+]=C(c3ccc(Cl)cc3)NO2)C1,0
|
350 |
+
O=C1CC([NH2+]CCC2=c3ccccc3=[NH+]C2)C(=O)N1c1cccc(C(F)(F)F)c1,0
|
351 |
+
Cc1[nH+]ccn1CC1CC[NH+](Cc2c(-c3ccc(F)cc3)nc3sccn23)CC1,0
|
352 |
+
Cc1cccc(C(=O)N2CC[NH+](CC[NH+]=c3cc(O)oc4ccccc34)CC2)c1,0
|
353 |
+
COCCNc1c(C2C=c3cc(C)ccc3=[NH+]C2=O)[nH+]c2cc(C)ccn12,0
|
354 |
+
Cc1cc(=O)oc2c3c(c(Cl)cc12)OC[NH+](CCCn1cc[nH+]c1)C3,0
|
355 |
+
Cc1ccc(Cn2nc(C)c(C[NH2+]CC(C)Cn3cc[nH+]c3)c2Cl)cc1,0
|
356 |
+
COc1ccc(F)c(C[NH+]2CCCC3(CCN(C(=O)C4C=c5ccccc5=[NH+]4)C3)C2)c1,0
|
357 |
+
CCC1C(C(=O)N2CCC3=c4ccccc4=[NH+]C3C2)C(=O)C=C(C)N1CC1CCC[NH+]1CC,0
|
358 |
+
CC(=O)Oc1c(C)cc(C[NH2+]CC2C=c3ccc(F)cc3=[NH+]C2=O)cc1C,0
|
359 |
+
Cc1ccccc1-n1nc(C)c(C[NH+](C)Cc2cn3ccccc3[nH+]2)c1C,0
|
360 |
+
O=c1c(-c2ccccc2C[NH+]2CCOCC2)ccc2n1CC1CC2C[NH+](CC2CCCCC2)C1,0
|
361 |
+
Cc1ccc(OCCN(C)C(=O)C2CC23CC[NH+](CC2C=c4ccccc4=[NH+]C2=O)CC3)cc1,0
|
362 |
+
CCc1cccc2c1=[NH+]CC=2C(=O)CSC1N=NC(C(C)[NH+](C)C)N1c1ccc(F)cc1,0
|
363 |
+
CC1=CN2C(=NC(C3C=c4cc(C)cc(C)c4=[NH+]C3=O)C2[NH2+]Cc2ccco2)C=C1,0
|
364 |
+
COc1ccc2c(c1)=[NH+]C(=O)C(C1N=C3C=CC(C)=CN3C1[NH2+]Cc1ccccc1)C=2,0
|
365 |
+
Cc1c(Cc2ccccc2)c(=O)oc2c3c(c(Cl)cc12)OC[NH+](CCCn1cc[nH+]c1)C3,0
|
366 |
+
CCOc1cc(C[NH2+]CC2C=c3ccc(F)cc3=[NH+]C2=O)cc(Cl)c1OCC,0
|
367 |
+
O=C(CC[NH+]1Cc2ccccc2OC(c2ccccc2)C1)NCCC1=c2ccccc2=[NH+]C1,0
|
368 |
+
Cc1[nH+]ccn1CCC(=O)N1CCC2=c3ccccc3=[NH+]C2C1C1CCCCC1,0
|
369 |
+
CCOC(=O)C1[NH+]=c2ccc(OC)cc2=C1NC(=O)C(C)[NH+]1CCC(C(N)=O)CC1,0
|
370 |
+
CCOC(=O)C1[NH+]=c2ccc(OC)cc2=C1NC(=O)C(C)[NH+]1CCC(C(N)=O)CC1,0
|
371 |
+
Cc1cccc2c1=[NH+]C(CN(C)C(=O)CN1N[NH2+]NC1C[NH+](C)C(C)C)[NH+]=2,0
|
372 |
+
O=c1c(-c2ccccc2C[NH+]2CCOCC2)ccc2n1CC1CC2C[NH+](C2CCOCC2)C1,0
|
373 |
+
COc1cc(C[NH+]2CCC3(CCC[NH+](CC4CCC4)C3)C2)cc2c1OCO2,0
|
374 |
+
Cc1ccc2c(c1)C[NH+](CC(=O)N1CCC([NH+]3CCOCC3)CC1)CC(c1ccsc1)O2,0
|
375 |
+
C[NH+]1CCC2(CCN(C(=O)c3ccc(OC4CC[NH+](CCc5ccccc5)CC4)cc3)C2)C1,0
|
376 |
+
CCOC(=O)C1[NH+]=c2ccc(F)cc2=C1NC(=O)C(C)[NH+]1CCCCC1,0
|
377 |
+
O=C(CCC1=c2ccccc2=[NH+]C1)NCc1ccc(N2CCOCC2)[nH+]c1,0
|
378 |
+
Cc1ccc2c(c1)=C1SC(C(=O)NCCC[NH+](C)Cc3ccccc3)C=C1C(=O)[NH+]=2,0
|
379 |
+
COc1ccc2c(c1)=C(CC[NH2+]C1CC(=O)N(c3cccc4ccccc34)C1=O)C[NH+]=2,0
|
380 |
+
O=C(NCC1CCC[NH+](C2CSCCSC2)C1)C1C=c2ccccc2=[NH+]1,0
|
381 |
+
Cc1ccc2occ(C[NH+]3CCCC(c4ccnc(SCC[NH+]5CCCC5)n4)C3)c(=O)c2c1,0
|
382 |
+
COc1ccc(-c2nn(-c3ccccc3C)cc2C[NH2+]CCC2CCC[NH+]2C)cc1F,0
|
383 |
+
CCC[NH+]1CCC(N2CCC3(CCC[NH+](Cc4cccc(F)c4F)C3)C2)CC1,0
|
384 |
+
O=C(c1ccc(Cl)cc1)N1CC[NH+](CC[NH+]=c2cc(O)oc3ccccc23)CC1,0
|
385 |
+
O=c1cc(C[NH+]2CCN(c3ccc(O)cc3)CC2)[nH+]c2scc(-c3ccccc3)n12,0
|
386 |
+
O=C(CSC1N[NH+]=C(N2CCCCC2)N1c1ccccc1)C1=c2ccccc2=[NH+]C1,0
|
387 |
+
CC([NH2+]CC(C1=c2ccccc2=[NH+]C1)C1CC=CS1)C(=O)N1CCc2ccccc21,0
|
388 |
+
O=C(CC1=c2ccccc2=[NH+]C1)NCC[NH+]1Cc2ccccc2OC(c2ccccc2)C1,0
|
389 |
+
Cc1[nH+]ccn1CCC(=O)N1CCC2=c3ccccc3=[NH+]C2C1C1CCCCC1,0
|
390 |
+
O=C1C2C3=C(CC([NH2+]CCC4=c5ccccc5=[NH+]C4)CC3)SC2N=CN1Cc1cccc(F)c1,0
|
391 |
+
COc1ccc2c(c1)=[NH+]C(CN1CCN(C(=O)C3(c4cccs4)CCCC3)CC1)[NH+]=2,0
|
392 |
+
CCC1CCCCN1C(=O)C(C)[NH2+]CC(C1=c2ccccc2=[NH+]C1)C1CC=CS1,0
|
393 |
+
COc1ccc(-c2nn(-c3ccccc3)cc2C[NH+]2CCN(c3cc[nH+]cc3)CC2)c(F)c1,0
|
394 |
+
O=C(NCCCN1CCC([NH+]2CCCCC2)CC1)C1CCCN(c2[nH+]ccc3sccc23)C1,0
|
395 |
+
Nc1c2c(-c3ccccc3)c(-c3ccccc3)n(Cc3ccccc3)c2nc[n+]1CCCn1cc[nH+]c1,0
|
396 |
+
Cc1ccc2c(c1)=C1SC(C(=O)NCCC[NH+]3CC(C)CC(C)C3)C=C1C(=O)[NH+]=2,0
|
397 |
+
CC1(C2CC[NH+](CC3C=c4ccccc4=[NH+]3)CC2)NC(=O)N(Cc2ccsc2)C1=O,0
|
398 |
+
O=C(Nc1cccc(C2C=c3ccccc3=[NH+]2)c1)C1CCC[NH+](Cc2ccc(CO)o2)C1,0
|
399 |
+
Cc1c(C[NH+]2CCCC(C(=O)Nc3cccc(C4C=c5ccccc5=[NH+]4)c3)C2)cnn1C,0
|
400 |
+
COc1ccc(C[NH+]2Cc3cc(C)ccc3OC(c3ccsc3)C2)c(OC)c1OC,0
|
401 |
+
COc1ccccc1-c1nn(C[NH+](Cc2c(F)cccc2Cl)C2CC2)c(=S)n1C,0
|
402 |
+
CC(C)c1ccccc1-n1c(-c2cccnc2)nn(C[NH+]2CCc3sccc3C2)c1=S,0
|
403 |
+
CC1(C)CC(C[NH+]2CCC(Oc3cc(Cl)ccc3C(=O)N3CCCCC3)CC2)CCO1,0
|
404 |
+
COc1ccc(C[NH+]2CCCn3cccc3C2c2cccc(Cl)c2)c(OC)c1OC,0
|
405 |
+
CC(c1cccs1)[NH+](C)C1CCC(=O)N(Cc2cc3c(cc2Cl)OCO3)CC1,0
|
406 |
+
COc1cc2c(cc1OC)C(c1cccs1)N(Cc1[nH+]cc(C)c(OC)c1C)CC2,0
|
407 |
+
CCOC(=O)C1CCCN(C(=O)CC2=c3ccccc3=[NH+]C2Sc2ccccc2)C1,0
|
408 |
+
C[NH+](C)CCN1C(=O)c2oc3ccc(Cl)cc3c(=O)c2C1c1cccs1,0
|
409 |
+
N#Cc1cccc(C[NH+]2CC3CC(C2)c2c(-c4ccco4)ccc(=O)n2C3)c1,0
|
410 |
+
C[NH+](C)CCN1C(=O)c2oc3ccc(Cl)cc3c(=O)c2C1c1cccc(F)c1,0
|
411 |
+
O=C(C=Cc1ccco1)N1CCCC([NH+]2CCN(c3ccccc3F)CC2)C1,0
|
412 |
+
COc1cc(-c2nc(C[NH+]3CCCCC3c3nccs3)c(C)o2)ccc1F,0
|
413 |
+
COC(=O)C(c1ccc(Cl)cc1)[NH+]1CCN(C(=O)c2sccc2C)CC1,0
|
414 |
+
Cn1c[nH+]cc1CN1CCC2=NN=C(C3(c4ccc(Cl)cc4)CCCC3)C2C1,0
|
415 |
+
CCOc1ccc(C2C3N=C4C=CC=CC4C3CC[NH+]2CC2C=CN=N2)cc1,0
|
416 |
+
Cn1cc(C[NH+]2CC3(C)CC2CC(C)(C)C3)c(-c2ccc3c(c2)OCCO3)n1,0
|
417 |
+
C=C1CCCC2(C)CC3OC(=O)C(C[NH+]4CCC(C(=O)OCC)CC4)C3CC12,0
|
418 |
+
Cn1c(-c2ccco2)nn(C[NH+]2CCCC2Cc2cccc(F)c2)c1=S,0
|
419 |
+
Cc1ccc(C)c(OC2CC[NH+](C(C)c3nnc(-c4cccs4)o3)CC2)c1,0
|
420 |
+
Cc1c(CCC(C)C)c(NCCC[NH+]2CCOCC2)[n+]2c([nH]c3ccccc32)c1C#N,0
|
421 |
+
COc1cc(C(=O)NCCC2CCC[NH+]2C)ccc1OC1CC[NH+](C2CCCC2)CC1,0
|
422 |
+
CCOC(=O)C1[NH+]=c2ccc(Br)cc2=C1NC(=O)CC[NH+](C)C,0
|
423 |
+
CC[NH+](CC)CCCNC(=O)C1C=C2C(=O)[NH+]=c3ccccc3=C2N1C,0
|
424 |
+
CN1C2=c3ccccc3=[NH+]C(=O)C2=CC1C(=O)NCCC[NH+]1CCCC1,0
|
425 |
+
O=C1CC(C(F)F)=NC(c2ccccc2C[NH+]2CCC(Cn3cc[nH+]c3)CC2)=N1,0
|
426 |
+
C[NH+](C)CCN1CC(C(=O)NCC2C=c3cc(F)ccc3=[NH+]2)CCC1=O,0
|
427 |
+
O=C(C[NH+]1CCCC1C(=O)Nc1ccccc1)C1=c2ccccc2=[NH+]C1,0
|
428 |
+
Cn1cnnc1CC1CC[NH+](CC2C=c3ccc(F)cc3=[NH+]C2=O)CC1,0
|
data/data/QSAR/CXCR4/cxcr4_train.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/data/QSAR/DRD2/drd2_test.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/data/QSAR/DRD2/drd2_train.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:47c9146a2736905da6cb08178d330ac7167f53989459b28ad5581fc1f7cb040b
|
3 |
+
size 12970112
|
data/data/USPTO/src_test.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/data/USPTO/src_train.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3984cf01980610f69e07336dfcc32de8268e075c678c5b46a1fda6c01b66349d
|
3 |
+
size 28402794
|
data/data/USPTO/src_valid.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/data/USPTO/tgt_test.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/data/USPTO/tgt_train.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1ebb47dc821e1f27079c97b8aab778ab2de874badee069a248cd7f45be809f14
|
3 |
+
size 30955321
|
data/data/USPTO/tgt_valid.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/data/beamsearch_template_list.txt
ADDED
@@ -0,0 +1,575 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
9
|
2 |
+
10
|
3 |
+
11
|
4 |
+
15
|
5 |
+
16
|
6 |
+
17
|
7 |
+
18
|
8 |
+
26
|
9 |
+
31
|
10 |
+
33
|
11 |
+
34
|
12 |
+
35
|
13 |
+
36
|
14 |
+
37
|
15 |
+
38
|
16 |
+
39
|
17 |
+
42
|
18 |
+
43
|
19 |
+
44
|
20 |
+
45
|
21 |
+
49
|
22 |
+
50
|
23 |
+
51
|
24 |
+
52
|
25 |
+
54
|
26 |
+
55
|
27 |
+
57
|
28 |
+
60
|
29 |
+
61
|
30 |
+
62
|
31 |
+
63
|
32 |
+
64
|
33 |
+
67
|
34 |
+
68
|
35 |
+
70
|
36 |
+
73
|
37 |
+
74
|
38 |
+
75
|
39 |
+
76
|
40 |
+
77
|
41 |
+
78
|
42 |
+
79
|
43 |
+
83
|
44 |
+
85
|
45 |
+
94
|
46 |
+
95
|
47 |
+
99
|
48 |
+
101
|
49 |
+
102
|
50 |
+
104
|
51 |
+
105
|
52 |
+
107
|
53 |
+
108
|
54 |
+
110
|
55 |
+
111
|
56 |
+
113
|
57 |
+
115
|
58 |
+
119
|
59 |
+
120
|
60 |
+
123
|
61 |
+
124
|
62 |
+
125
|
63 |
+
126
|
64 |
+
127
|
65 |
+
129
|
66 |
+
130
|
67 |
+
131
|
68 |
+
132
|
69 |
+
133
|
70 |
+
134
|
71 |
+
135
|
72 |
+
137
|
73 |
+
138
|
74 |
+
144
|
75 |
+
145
|
76 |
+
146
|
77 |
+
147
|
78 |
+
148
|
79 |
+
149
|
80 |
+
153
|
81 |
+
154
|
82 |
+
156
|
83 |
+
159
|
84 |
+
160
|
85 |
+
161
|
86 |
+
162
|
87 |
+
165
|
88 |
+
166
|
89 |
+
167
|
90 |
+
169
|
91 |
+
170
|
92 |
+
174
|
93 |
+
176
|
94 |
+
184
|
95 |
+
185
|
96 |
+
187
|
97 |
+
194
|
98 |
+
196
|
99 |
+
197
|
100 |
+
198
|
101 |
+
201
|
102 |
+
206
|
103 |
+
208
|
104 |
+
209
|
105 |
+
210
|
106 |
+
211
|
107 |
+
213
|
108 |
+
214
|
109 |
+
216
|
110 |
+
219
|
111 |
+
220
|
112 |
+
221
|
113 |
+
227
|
114 |
+
230
|
115 |
+
232
|
116 |
+
233
|
117 |
+
235
|
118 |
+
236
|
119 |
+
238
|
120 |
+
240
|
121 |
+
241
|
122 |
+
244
|
123 |
+
248
|
124 |
+
250
|
125 |
+
252
|
126 |
+
256
|
127 |
+
257
|
128 |
+
258
|
129 |
+
261
|
130 |
+
267
|
131 |
+
270
|
132 |
+
271
|
133 |
+
272
|
134 |
+
273
|
135 |
+
275
|
136 |
+
276
|
137 |
+
277
|
138 |
+
279
|
139 |
+
280
|
140 |
+
284
|
141 |
+
285
|
142 |
+
289
|
143 |
+
294
|
144 |
+
295
|
145 |
+
296
|
146 |
+
297
|
147 |
+
298
|
148 |
+
299
|
149 |
+
302
|
150 |
+
304
|
151 |
+
305
|
152 |
+
308
|
153 |
+
309
|
154 |
+
310
|
155 |
+
314
|
156 |
+
315
|
157 |
+
321
|
158 |
+
323
|
159 |
+
326
|
160 |
+
327
|
161 |
+
329
|
162 |
+
330
|
163 |
+
331
|
164 |
+
332
|
165 |
+
333
|
166 |
+
334
|
167 |
+
336
|
168 |
+
338
|
169 |
+
340
|
170 |
+
342
|
171 |
+
346
|
172 |
+
347
|
173 |
+
349
|
174 |
+
351
|
175 |
+
352
|
176 |
+
353
|
177 |
+
354
|
178 |
+
355
|
179 |
+
359
|
180 |
+
364
|
181 |
+
365
|
182 |
+
367
|
183 |
+
368
|
184 |
+
371
|
185 |
+
375
|
186 |
+
379
|
187 |
+
380
|
188 |
+
382
|
189 |
+
385
|
190 |
+
389
|
191 |
+
392
|
192 |
+
393
|
193 |
+
398
|
194 |
+
399
|
195 |
+
405
|
196 |
+
406
|
197 |
+
407
|
198 |
+
411
|
199 |
+
413
|
200 |
+
414
|
201 |
+
416
|
202 |
+
422
|
203 |
+
425
|
204 |
+
426
|
205 |
+
427
|
206 |
+
429
|
207 |
+
433
|
208 |
+
434
|
209 |
+
435
|
210 |
+
437
|
211 |
+
438
|
212 |
+
439
|
213 |
+
440
|
214 |
+
441
|
215 |
+
442
|
216 |
+
443
|
217 |
+
446
|
218 |
+
450
|
219 |
+
454
|
220 |
+
455
|
221 |
+
458
|
222 |
+
459
|
223 |
+
460
|
224 |
+
461
|
225 |
+
462
|
226 |
+
463
|
227 |
+
464
|
228 |
+
467
|
229 |
+
468
|
230 |
+
469
|
231 |
+
470
|
232 |
+
473
|
233 |
+
474
|
234 |
+
478
|
235 |
+
479
|
236 |
+
482
|
237 |
+
483
|
238 |
+
484
|
239 |
+
485
|
240 |
+
487
|
241 |
+
488
|
242 |
+
493
|
243 |
+
494
|
244 |
+
495
|
245 |
+
496
|
246 |
+
498
|
247 |
+
499
|
248 |
+
501
|
249 |
+
502
|
250 |
+
503
|
251 |
+
505
|
252 |
+
508
|
253 |
+
509
|
254 |
+
510
|
255 |
+
511
|
256 |
+
512
|
257 |
+
513
|
258 |
+
514
|
259 |
+
515
|
260 |
+
516
|
261 |
+
517
|
262 |
+
519
|
263 |
+
521
|
264 |
+
523
|
265 |
+
524
|
266 |
+
525
|
267 |
+
526
|
268 |
+
527
|
269 |
+
528
|
270 |
+
530
|
271 |
+
534
|
272 |
+
536
|
273 |
+
539
|
274 |
+
540
|
275 |
+
541
|
276 |
+
542
|
277 |
+
544
|
278 |
+
547
|
279 |
+
548
|
280 |
+
549
|
281 |
+
550
|
282 |
+
551
|
283 |
+
553
|
284 |
+
554
|
285 |
+
556
|
286 |
+
557
|
287 |
+
558
|
288 |
+
559
|
289 |
+
563
|
290 |
+
564
|
291 |
+
565
|
292 |
+
566
|
293 |
+
567
|
294 |
+
572
|
295 |
+
577
|
296 |
+
578
|
297 |
+
579
|
298 |
+
580
|
299 |
+
583
|
300 |
+
584
|
301 |
+
585
|
302 |
+
586
|
303 |
+
587
|
304 |
+
589
|
305 |
+
592
|
306 |
+
593
|
307 |
+
596
|
308 |
+
597
|
309 |
+
598
|
310 |
+
599
|
311 |
+
600
|
312 |
+
601
|
313 |
+
603
|
314 |
+
604
|
315 |
+
605
|
316 |
+
606
|
317 |
+
607
|
318 |
+
608
|
319 |
+
609
|
320 |
+
611
|
321 |
+
612
|
322 |
+
613
|
323 |
+
614
|
324 |
+
616
|
325 |
+
621
|
326 |
+
623
|
327 |
+
627
|
328 |
+
629
|
329 |
+
633
|
330 |
+
635
|
331 |
+
636
|
332 |
+
637
|
333 |
+
640
|
334 |
+
644
|
335 |
+
645
|
336 |
+
646
|
337 |
+
647
|
338 |
+
648
|
339 |
+
649
|
340 |
+
651
|
341 |
+
652
|
342 |
+
653
|
343 |
+
656
|
344 |
+
657
|
345 |
+
658
|
346 |
+
659
|
347 |
+
662
|
348 |
+
663
|
349 |
+
664
|
350 |
+
666
|
351 |
+
667
|
352 |
+
668
|
353 |
+
669
|
354 |
+
673
|
355 |
+
674
|
356 |
+
675
|
357 |
+
676
|
358 |
+
678
|
359 |
+
679
|
360 |
+
681
|
361 |
+
683
|
362 |
+
685
|
363 |
+
688
|
364 |
+
690
|
365 |
+
691
|
366 |
+
692
|
367 |
+
693
|
368 |
+
696
|
369 |
+
697
|
370 |
+
699
|
371 |
+
700
|
372 |
+
701
|
373 |
+
702
|
374 |
+
709
|
375 |
+
710
|
376 |
+
712
|
377 |
+
714
|
378 |
+
715
|
379 |
+
716
|
380 |
+
718
|
381 |
+
719
|
382 |
+
721
|
383 |
+
722
|
384 |
+
723
|
385 |
+
724
|
386 |
+
725
|
387 |
+
727
|
388 |
+
728
|
389 |
+
729
|
390 |
+
733
|
391 |
+
734
|
392 |
+
736
|
393 |
+
737
|
394 |
+
738
|
395 |
+
739
|
396 |
+
741
|
397 |
+
744
|
398 |
+
747
|
399 |
+
749
|
400 |
+
751
|
401 |
+
753
|
402 |
+
756
|
403 |
+
757
|
404 |
+
759
|
405 |
+
760
|
406 |
+
763
|
407 |
+
764
|
408 |
+
765
|
409 |
+
766
|
410 |
+
767
|
411 |
+
769
|
412 |
+
770
|
413 |
+
773
|
414 |
+
774
|
415 |
+
775
|
416 |
+
776
|
417 |
+
777
|
418 |
+
778
|
419 |
+
780
|
420 |
+
785
|
421 |
+
787
|
422 |
+
788
|
423 |
+
790
|
424 |
+
791
|
425 |
+
792
|
426 |
+
794
|
427 |
+
795
|
428 |
+
797
|
429 |
+
798
|
430 |
+
799
|
431 |
+
801
|
432 |
+
802
|
433 |
+
804
|
434 |
+
808
|
435 |
+
809
|
436 |
+
810
|
437 |
+
811
|
438 |
+
814
|
439 |
+
816
|
440 |
+
817
|
441 |
+
818
|
442 |
+
819
|
443 |
+
821
|
444 |
+
822
|
445 |
+
823
|
446 |
+
824
|
447 |
+
828
|
448 |
+
829
|
449 |
+
833
|
450 |
+
835
|
451 |
+
836
|
452 |
+
837
|
453 |
+
842
|
454 |
+
844
|
455 |
+
845
|
456 |
+
846
|
457 |
+
851
|
458 |
+
853
|
459 |
+
855
|
460 |
+
857
|
461 |
+
858
|
462 |
+
859
|
463 |
+
860
|
464 |
+
861
|
465 |
+
862
|
466 |
+
863
|
467 |
+
864
|
468 |
+
865
|
469 |
+
866
|
470 |
+
868
|
471 |
+
869
|
472 |
+
870
|
473 |
+
871
|
474 |
+
872
|
475 |
+
873
|
476 |
+
874
|
477 |
+
875
|
478 |
+
876
|
479 |
+
878
|
480 |
+
879
|
481 |
+
880
|
482 |
+
882
|
483 |
+
883
|
484 |
+
884
|
485 |
+
886
|
486 |
+
888
|
487 |
+
889
|
488 |
+
890
|
489 |
+
891
|
490 |
+
892
|
491 |
+
893
|
492 |
+
894
|
493 |
+
895
|
494 |
+
899
|
495 |
+
900
|
496 |
+
901
|
497 |
+
902
|
498 |
+
903
|
499 |
+
904
|
500 |
+
905
|
501 |
+
906
|
502 |
+
907
|
503 |
+
908
|
504 |
+
909
|
505 |
+
910
|
506 |
+
911
|
507 |
+
912
|
508 |
+
913
|
509 |
+
914
|
510 |
+
916
|
511 |
+
917
|
512 |
+
918
|
513 |
+
919
|
514 |
+
920
|
515 |
+
921
|
516 |
+
922
|
517 |
+
924
|
518 |
+
925
|
519 |
+
926
|
520 |
+
927
|
521 |
+
928
|
522 |
+
929
|
523 |
+
931
|
524 |
+
932
|
525 |
+
933
|
526 |
+
936
|
527 |
+
937
|
528 |
+
938
|
529 |
+
939
|
530 |
+
941
|
531 |
+
944
|
532 |
+
946
|
533 |
+
947
|
534 |
+
948
|
535 |
+
950
|
536 |
+
951
|
537 |
+
952
|
538 |
+
953
|
539 |
+
954
|
540 |
+
955
|
541 |
+
957
|
542 |
+
958
|
543 |
+
960
|
544 |
+
961
|
545 |
+
962
|
546 |
+
963
|
547 |
+
965
|
548 |
+
967
|
549 |
+
968
|
550 |
+
969
|
551 |
+
970
|
552 |
+
971
|
553 |
+
972
|
554 |
+
973
|
555 |
+
974
|
556 |
+
975
|
557 |
+
976
|
558 |
+
977
|
559 |
+
980
|
560 |
+
982
|
561 |
+
984
|
562 |
+
985
|
563 |
+
986
|
564 |
+
987
|
565 |
+
989
|
566 |
+
990
|
567 |
+
991
|
568 |
+
992
|
569 |
+
993
|
570 |
+
994
|
571 |
+
995
|
572 |
+
996
|
573 |
+
997
|
574 |
+
998
|
575 |
+
999
|
data/data/input/init_smiles_akt1.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
CC1CCCN1C1CCNC1
|
2 |
+
N#Cc1ncc(C(F)(F)F)cc1N
|
3 |
+
N#Cc1cnc2c(c1)CCC2=O
|
4 |
+
Nc1ccc(Oc2cccnc2)cc1
|
5 |
+
N#Cc1ccc(-c2cnco2)cc1
|
data/data/input/init_smiles_cxcr4.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
NCC(O)c1ccccc1
|
2 |
+
Cc1cn(C)c(CSCCN)n1
|
3 |
+
NCc1ccc2ccccc2c1
|
4 |
+
NNc1cccc2ncccc12
|
5 |
+
NCCc1ccccn1
|
data/data/input/init_smiles_drd2.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
OC1CCc2cc(F)ccc21
|
2 |
+
c1ccc2c(c1)CC1CNCCN21
|
3 |
+
NC1CCN(CCc2ccccc2)C1
|
4 |
+
Oc1ccc2c(c1)CNCC2
|
5 |
+
N#Cc1cccc(N2CCNCC2)c1
|
data/data/input/test.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
C c 1 c c c 2 [nH] c3 c ( c 2 c 1 ) C N ( C ) C C 3
|
data/data/input/unseen_ZINC_AKT1.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
Brc1ccccn1
|
data/data/input/unseen_ZINC_CXCR4.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
O=C1N=C(O)C(Br)N1
|
data/data/input/unseen_ZINC_DRD2.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
Brc1ccccn1
|
data/data/label_template.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/env.yml
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: tracer
|
2 |
+
channels:
|
3 |
+
- pyg
|
4 |
+
- pytorch
|
5 |
+
- nvidia
|
6 |
+
- conda-forge
|
7 |
+
- defaults
|
8 |
+
dependencies:
|
9 |
+
- mkl==2024.0
|
10 |
+
- numpy=1.23.5
|
11 |
+
- pandas=1.5.3
|
12 |
+
- pip=23.0.1
|
13 |
+
- pyg=2.3.0=py310_torch_2.0.0_cu118
|
14 |
+
- python=3.10.10
|
15 |
+
- pytorch=2.0.1=py3.10_cuda11.8_cudnn8.7.0_0
|
16 |
+
- pytorch-cuda=11.8
|
17 |
+
- rdkit=2022.03.2
|
18 |
+
- torchtext=0.15.2
|
19 |
+
- tqdm=4.65.0
|
20 |
+
- pip:
|
21 |
+
- hydra-core==1.3.2
|
22 |
+
- json5==0.9.14
|
23 |
+
- omegaconf==2.3.0
|
24 |
+
- scikit-learn==1.2.2
|
data/scripts/beam_search.py
ADDED
@@ -0,0 +1,300 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import operator
|
3 |
+
import itertools
|
4 |
+
import re
|
5 |
+
import json
|
6 |
+
import hydra
|
7 |
+
from tqdm.auto import tqdm
|
8 |
+
from config.config import cs
|
9 |
+
from omegaconf import DictConfig
|
10 |
+
|
11 |
+
import rdkit.Chem as Chem
|
12 |
+
from rdkit.Chem import AllChem
|
13 |
+
|
14 |
+
import torch
|
15 |
+
import torchtext.vocab.vocab as Vocab
|
16 |
+
import torch.nn.functional as F
|
17 |
+
|
18 |
+
from Model.Transformer.model import Transformer
|
19 |
+
from scripts.preprocess import make_counter ,make_transforms
|
20 |
+
from Utils.utils import smi_tokenizer
|
21 |
+
from Model.GCN import network
|
22 |
+
from Model.GCN.utils import template_prediction, check_templates
|
23 |
+
|
24 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
25 |
+
|
26 |
+
with open('./data/label_template.json') as f:
|
27 |
+
r_dict = json.load(f)
|
28 |
+
|
29 |
+
class BeamSearchNode(object):
|
30 |
+
def __init__(self, previousNode, decoder_input, logProb, length):
|
31 |
+
self.prevNode = previousNode
|
32 |
+
self.dec_in = decoder_input
|
33 |
+
self.logp = logProb
|
34 |
+
self.leng = length
|
35 |
+
|
36 |
+
def eval(self, alpha=0.6):
|
37 |
+
return self.logp / (((5 + self.leng) / (5 + 1)) ** alpha)
|
38 |
+
|
39 |
+
def check_templates(indices, input_smi):
|
40 |
+
matched_indices = []
|
41 |
+
input_smi = input_smi.replace(' ','')
|
42 |
+
molecule = Chem.MolFromSmiles(input_smi)
|
43 |
+
for i in indices:
|
44 |
+
idx = str(i.item())
|
45 |
+
rsmi = r_dict[idx]
|
46 |
+
rxn = AllChem.ReactionFromSmarts(rsmi)
|
47 |
+
reactants = rxn.GetReactants()
|
48 |
+
flag = False
|
49 |
+
for reactant in reactants:
|
50 |
+
if molecule.HasSubstructMatch(reactant):
|
51 |
+
flag = True
|
52 |
+
if flag == True:
|
53 |
+
matched_indices.append(f'[{i.item()}]')
|
54 |
+
return matched_indices # ['[0]', '[123]', ... '[742]']
|
55 |
+
|
56 |
+
def beam_decode(v:Vocab, model=None, input_tokens=None, template_idx=None,
|
57 |
+
device=None, inf_max_len=None, beam_width=10, nbest=5, Temp=None,
|
58 |
+
beam_templates:list=None):
|
59 |
+
|
60 |
+
SOS_token = v['<bos>']
|
61 |
+
EOS_token = v['<eos>']
|
62 |
+
if template_idx is not None:
|
63 |
+
template_idx = re.sub(r'\D', '', template_idx)
|
64 |
+
if template_idx not in beam_templates:
|
65 |
+
beam_width = 5
|
66 |
+
nbest = 1
|
67 |
+
|
68 |
+
# A batch of one input for Encoder
|
69 |
+
encoder_input = input_tokens
|
70 |
+
|
71 |
+
# Generate encoded features
|
72 |
+
with torch.no_grad():
|
73 |
+
encoder_input = encoder_input.unsqueeze(-1) # (seq, 1), batch_size=1
|
74 |
+
encoder_output, memory_pad_mask = model.encode(encoder_input, src_pad_mask=True) # encoder_output.shape: (seq, 1, d_model)
|
75 |
+
|
76 |
+
# Start with the start of the sentence token
|
77 |
+
decoder_input = torch.tensor([[SOS_token]]) # (1,1)
|
78 |
+
|
79 |
+
# Starting node
|
80 |
+
counter = itertools.count()
|
81 |
+
|
82 |
+
node = BeamSearchNode(previousNode=None,
|
83 |
+
decoder_input=decoder_input,
|
84 |
+
logProb=0, length=0)
|
85 |
+
|
86 |
+
with torch.no_grad():
|
87 |
+
tgt_mask = torch.nn.Transformer.generate_square_subsequent_mask(decoder_input.size(1)).to(device)
|
88 |
+
logits = model.decode(memory=encoder_output, tgt=decoder_input.permute(1, 0).to(device), tgt_mask=tgt_mask, memory_pad_mask=memory_pad_mask)
|
89 |
+
logits = logits.permute(1, 0, 2) # logits: (seq, 1, vocab) -> (1, seq, vocab), batch=1
|
90 |
+
decoder_output = torch.log_softmax(logits[:, -1, :]/Temp, dim=1).to('cpu') # (1, vocab)
|
91 |
+
|
92 |
+
tmp_beam_width = min(beam_width, decoder_output.size(1))
|
93 |
+
log_prob, indices = torch.topk(decoder_output, tmp_beam_width) # (tmp_beam_with,)
|
94 |
+
nextnodes = []
|
95 |
+
for new_k in range(tmp_beam_width):
|
96 |
+
decoded_t = indices[0][new_k].view(1, -1)
|
97 |
+
log_p = log_prob[0][new_k].item()
|
98 |
+
next_decoder_input = torch.cat([node.dec_in, decoded_t],dim=1) # dec_in:(1, seq)
|
99 |
+
nn = BeamSearchNode(previousNode=node,
|
100 |
+
decoder_input=next_decoder_input,
|
101 |
+
logProb=node.logp + log_p,
|
102 |
+
length=node.leng + 1)
|
103 |
+
score = -nn.eval()
|
104 |
+
count = next(counter)
|
105 |
+
nextnodes.append((score, count, nn))
|
106 |
+
|
107 |
+
# start beam search
|
108 |
+
for i in range(inf_max_len - 1):
|
109 |
+
# fetch the best node
|
110 |
+
if i == 0:
|
111 |
+
current_nodes = sorted(nextnodes)[:tmp_beam_width]
|
112 |
+
else:
|
113 |
+
current_nodes = sorted(nextnodes)[:beam_width]
|
114 |
+
|
115 |
+
nextnodes=[]
|
116 |
+
# current_nodes = [(score, count, node), (score, count, node)...], shape:(beam_width,)
|
117 |
+
scores, counts, nodes, decoder_inputs = [], [], [], []
|
118 |
+
|
119 |
+
for score, count, node in current_nodes:
|
120 |
+
if node.dec_in[0][-1].item() == EOS_token:
|
121 |
+
nextnodes.append((score, count, node))
|
122 |
+
else:
|
123 |
+
scores.append(score)
|
124 |
+
counts.append(count)
|
125 |
+
nodes.append(node)
|
126 |
+
decoder_inputs.append(node.dec_in)
|
127 |
+
if not bool(decoder_inputs):
|
128 |
+
break
|
129 |
+
|
130 |
+
decoder_inputs = torch.vstack(decoder_inputs) # (batch=beam, seq)
|
131 |
+
|
132 |
+
# adjust batch_size
|
133 |
+
enc_out = encoder_output.repeat(1, decoder_inputs.size(0), 1)
|
134 |
+
mask = memory_pad_mask.repeat(decoder_inputs.size(0), 1)
|
135 |
+
|
136 |
+
with torch.no_grad():
|
137 |
+
tgt_mask = torch.nn.Transformer.generate_square_subsequent_mask(decoder_inputs.size(1)).to(device)
|
138 |
+
logits = model.decode(memory=enc_out, tgt=decoder_inputs.permute(1, 0).to(device), tgt_mask=tgt_mask, memory_pad_mask=mask)
|
139 |
+
logits = logits.permute(1, 0, 2) # logits: (seq, batch, vocab) -> (batch, seq, vocab)
|
140 |
+
decoder_output = torch.log_softmax(logits[:, -1, :]/Temp, dim=1).to('cpu') # extract log_softmax of last token
|
141 |
+
# decoder_output.shape = (batch, vocab)
|
142 |
+
|
143 |
+
for beam, score in enumerate(scores):
|
144 |
+
for token in range(EOS_token, decoder_output.size(-1)): # remove unk, pad, bosは最初から捨てる
|
145 |
+
decoded_t = torch.tensor([[token]])
|
146 |
+
log_p = decoder_output[beam, token].item()
|
147 |
+
next_decoder_input = torch.cat([nodes[beam].dec_in, decoded_t],dim=1)
|
148 |
+
node = BeamSearchNode(previousNode=nodes[beam],
|
149 |
+
decoder_input=next_decoder_input,
|
150 |
+
logProb=nodes[beam].logp + log_p,
|
151 |
+
length=nodes[beam].leng + 1)
|
152 |
+
score = -node.eval()
|
153 |
+
count = next(counter)
|
154 |
+
nextnodes.append((score, count, node))
|
155 |
+
|
156 |
+
outputs = []
|
157 |
+
for score, _, n in sorted(nextnodes, key=operator.itemgetter(0))[:nbest]:
|
158 |
+
# endnodes = [(score, node), (score, node)...]
|
159 |
+
output = n.dec_in.squeeze(0).tolist()[1:-1] # remove bos and eos
|
160 |
+
output = v.lookup_tokens(output)
|
161 |
+
output = ''.join(output)
|
162 |
+
outputs.append(output)
|
163 |
+
return outputs
|
164 |
+
|
165 |
+
def greedy_translate(v:Vocab, model=None, input_tokens=None, device=None, inf_max_len=None):
|
166 |
+
'''
|
167 |
+
in:
|
168 |
+
input_tokens: (seq, batch)
|
169 |
+
|
170 |
+
out:
|
171 |
+
outputs: list of SMILES(str).
|
172 |
+
'''
|
173 |
+
|
174 |
+
SOS_token = v['<bos>']
|
175 |
+
EOS_token = v['<eos>']
|
176 |
+
|
177 |
+
# A batch of one input for Encoder
|
178 |
+
encoder_input = input_tokens.permute(1, 0) # (batch,seq) -> (seq, batch)
|
179 |
+
|
180 |
+
# Generate encoded features
|
181 |
+
with torch.no_grad():
|
182 |
+
enc_out, memory_pad_mask = model.encode(encoder_input, src_pad_mask=True) # encoder_output.shape: (seq, 1, d_model)
|
183 |
+
|
184 |
+
# Start with the SOS token
|
185 |
+
dec_inp = torch.tensor([[SOS_token]]).expand(1, encoder_input.size(1)).to(device) # (1, batch)
|
186 |
+
EOS_dic = {i:False for i in range(encoder_input.size(1))}
|
187 |
+
|
188 |
+
for i in range(inf_max_len - 1):
|
189 |
+
tgt_mask = torch.nn.Transformer.generate_square_subsequent_mask(dec_inp.size(0)).to(device)
|
190 |
+
logits = model.decode(memory=enc_out, tgt=dec_inp, tgt_mask=tgt_mask, memory_pad_mask=memory_pad_mask)
|
191 |
+
dec_out = F.softmax(logits[-1, :, :], dim=1) # extract softmax of last token, (batch, vocab)
|
192 |
+
next_items = dec_out.topk(1)[1].permute(1, 0) # (seq, batch) -> (batch, seq)
|
193 |
+
EOS_indices = (next_items == EOS_token)
|
194 |
+
# update EOS_dic
|
195 |
+
for j, EOS in enumerate(EOS_indices[0]):
|
196 |
+
if EOS:
|
197 |
+
EOS_dic[j] = True
|
198 |
+
|
199 |
+
dec_inp = torch.cat([dec_inp, next_items], dim=0)
|
200 |
+
if sum(list(EOS_dic.values())) == encoder_input.size(1):
|
201 |
+
break
|
202 |
+
out = dec_inp.permute(1, 0).to('cpu') # (seq, batch) -> (batch, seq)
|
203 |
+
outputs = []
|
204 |
+
for i in range(out.size(0)):
|
205 |
+
out_tokens = v.lookup_tokens(out[i].tolist())
|
206 |
+
try:
|
207 |
+
eos_idx = out_tokens.index('<eos>')
|
208 |
+
out_tokens = out_tokens[1:eos_idx]
|
209 |
+
outputs.append(''.join(out_tokens))
|
210 |
+
except ValueError:
|
211 |
+
continue
|
212 |
+
|
213 |
+
return outputs
|
214 |
+
|
215 |
+
def translate(cfg:DictConfig):
|
216 |
+
print('Loading...')
|
217 |
+
# make transforms and vocabulary
|
218 |
+
src_train_path = hydra.utils.get_original_cwd()+cfg['translate']['src_train']
|
219 |
+
tgt_train_path = hydra.utils.get_original_cwd()+cfg['translate']['tgt_train']
|
220 |
+
src_valid_path = hydra.utils.get_original_cwd()+cfg['translate']['src_valid']
|
221 |
+
tgt_valid_path = hydra.utils.get_original_cwd()+cfg['translate']['tgt_valid']
|
222 |
+
data_dict = make_counter(src_train_path=src_train_path,
|
223 |
+
tgt_train_path=tgt_train_path,
|
224 |
+
src_valid_path=src_valid_path,
|
225 |
+
tgt_valid_path=tgt_valid_path
|
226 |
+
)
|
227 |
+
src_transforms, _, v = make_transforms(data_dict=data_dict, make_vocab=True, vocab_load_path=None)
|
228 |
+
|
229 |
+
# load model
|
230 |
+
d_model = cfg['model']['dim_model']
|
231 |
+
num_encoder_layers = cfg['model']['num_encoder_layers']
|
232 |
+
num_decoder_layers = cfg['model']['num_decoder_layers']
|
233 |
+
nhead = cfg['model']['nhead']
|
234 |
+
dropout = cfg['model']['dropout']
|
235 |
+
dim_ff = cfg['model']['dim_ff']
|
236 |
+
model = Transformer(d_model=d_model, nhead=nhead, num_encoder_layers=num_encoder_layers, num_decoder_layers=num_decoder_layers,
|
237 |
+
dim_feedforward=dim_ff,vocab=v, dropout=dropout, device=device).to(device)
|
238 |
+
ckpt = torch.load(hydra.utils.get_original_cwd() + cfg['model']['ckpt'], map_location=device)
|
239 |
+
model.load_state_dict(ckpt['model_state_dict'])
|
240 |
+
model.eval()
|
241 |
+
|
242 |
+
# make dataset
|
243 |
+
src = []
|
244 |
+
src_test_path = hydra.utils.get_original_cwd() + cfg['translate']['src_test_path']
|
245 |
+
with open(src_test_path,'r') as f:
|
246 |
+
for line in f:
|
247 |
+
src.append(line.rstrip())
|
248 |
+
|
249 |
+
dim_GCN = cfg['GCN_train']['dim']
|
250 |
+
n_conv_hidden = cfg['GCN_train']['n_conv_hidden']
|
251 |
+
n_mlp_hidden = cfg['GCN_train']['n_mlp_hidden']
|
252 |
+
GCN_model = network.MolecularGCN(dim = dim_GCN,
|
253 |
+
n_conv_hidden = n_conv_hidden,
|
254 |
+
n_mlp_hidden = n_mlp_hidden,
|
255 |
+
dropout = dropout).to(device)
|
256 |
+
GCN_ckpt = hydra.utils.get_original_cwd() + cfg['translate']['GCN_ckpt']
|
257 |
+
GCN_model.load_state_dict(torch.load(GCN_ckpt))
|
258 |
+
GCN_model.eval()
|
259 |
+
|
260 |
+
out_dir = cfg['translate']['out_dir']
|
261 |
+
beam_width = cfg['translate']['beam_size']
|
262 |
+
nbest = cfg['translate']['nbest']
|
263 |
+
inf_max_len = cfg['translate']['inf_max_len']
|
264 |
+
GCN_num_sampling = cfg['translate']['GCN_num_sampling']
|
265 |
+
with open(hydra.utils.get_original_cwd() + cfg['translate']['annotated_templates'], 'r') as f:
|
266 |
+
beam_templates = f.read().splitlines()
|
267 |
+
f.close()
|
268 |
+
print(f'The number of sampling for GCN: {GCN_num_sampling}')
|
269 |
+
print('Start translation...')
|
270 |
+
rsmis =[]
|
271 |
+
for input_smi in tqdm(src):
|
272 |
+
input_smi = input_smi.replace(' ', '')
|
273 |
+
indices = template_prediction(GCN_model=GCN_model, input_smi=input_smi,
|
274 |
+
num_sampling=GCN_num_sampling, GCN_device=device)
|
275 |
+
matched_indices = check_templates(indices, input_smi)
|
276 |
+
print(f"{len(matched_indices)} reaction templates are matched for '{input_smi}'.")
|
277 |
+
with torch.no_grad():
|
278 |
+
for i in matched_indices:
|
279 |
+
input_conditional = smi_tokenizer(i + input_smi).split(' ')
|
280 |
+
input_tokens = src_transforms(input_conditional).to(device)
|
281 |
+
outputs = beam_decode(v=v, model=model, input_tokens=input_tokens, template_idx=i,
|
282 |
+
device=device, inf_max_len=inf_max_len, beam_width=beam_width, nbest=nbest,
|
283 |
+
Temp=1, beam_templates=beam_templates)
|
284 |
+
for output in outputs:
|
285 |
+
output = smi_tokenizer(output)
|
286 |
+
rsmis.append(i + ' ' + smi_tokenizer(input_smi) + ' >> ' + output)
|
287 |
+
|
288 |
+
# set output file name
|
289 |
+
os.makedirs(hydra.utils.get_original_cwd() + out_dir, exist_ok=True)
|
290 |
+
with open(hydra.utils.get_original_cwd() + f'{out_dir}/out_beam{beam_width}_best{nbest}2.txt','w') as f:
|
291 |
+
for rsmi in rsmis:
|
292 |
+
f.write(rsmi + '\n')
|
293 |
+
f.close()
|
294 |
+
|
295 |
+
@hydra.main(config_path=None, config_name='config', version_base=None)
|
296 |
+
def main(cfg: DictConfig):
|
297 |
+
translate(cfg)
|
298 |
+
|
299 |
+
if __name__ == '__main__':
|
300 |
+
main()
|
data/scripts/gcn_train.py
ADDED
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import torch
|
3 |
+
from torch_geometric.data import DataLoader
|
4 |
+
import torch.nn.functional as F
|
5 |
+
|
6 |
+
from Model.GCN import mol2graph
|
7 |
+
from Model.GCN.callbacks import EarlyStopping
|
8 |
+
from Model.GCN.network import MolecularGCN
|
9 |
+
from Model.GCN.utils import get_data
|
10 |
+
|
11 |
+
import hydra
|
12 |
+
import datetime
|
13 |
+
from config.config import cs
|
14 |
+
from omegaconf import DictConfig, OmegaConf
|
15 |
+
from tqdm.auto import tqdm
|
16 |
+
|
17 |
+
|
18 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
19 |
+
date = datetime.datetime.now().strftime('%Y%m%d')
|
20 |
+
|
21 |
+
def train(model, optimizer, loader):
|
22 |
+
model.train()
|
23 |
+
loss_all = 0
|
24 |
+
for data in loader:
|
25 |
+
optimizer.zero_grad()
|
26 |
+
data = data.to(device)
|
27 |
+
output = model.forward(data.x, data.edge_index, data.batch).squeeze(1)
|
28 |
+
loss = F.cross_entropy(output, data.y)
|
29 |
+
loss.backward()
|
30 |
+
loss_all += loss.item() * data.num_graphs
|
31 |
+
optimizer.step()
|
32 |
+
|
33 |
+
return loss_all / len(loader)
|
34 |
+
|
35 |
+
|
36 |
+
def eval(model, loader, ks=None):
|
37 |
+
model.eval()
|
38 |
+
score_list = []
|
39 |
+
with torch.no_grad():
|
40 |
+
loss_all = 0
|
41 |
+
for data in loader:
|
42 |
+
data = data.to(device)
|
43 |
+
output = model.forward(data.x, data.edge_index, data.batch) # output.shape = (batch_size, vocab_size)
|
44 |
+
loss = F.cross_entropy(output, data.y)
|
45 |
+
loss_all += loss.item() * data.num_graphs
|
46 |
+
if ks is not None:
|
47 |
+
for k in ks:
|
48 |
+
score_list.append(topk_accuracy(data, output, k))
|
49 |
+
return loss_all/len(loader), score_list
|
50 |
+
|
51 |
+
def topk_accuracy(data, output, k: int):
|
52 |
+
_, pred = output.topk(k, 1, True, True) # (k, dim=1, largest=True, sorted=True)
|
53 |
+
pred = pred.t() # (batch, maxk) -> (maxk, batch)
|
54 |
+
correct = pred.eq(data.y.unsqueeze(0).expand_as(pred)) # target:(batch,) -> (1, batch) -> (maxk, batch)
|
55 |
+
# Tensor.eq: compute element-wise equality, correct: bool matrix
|
56 |
+
score = correct.float().sum() / len(data)
|
57 |
+
score = score.detach().item()
|
58 |
+
return score
|
59 |
+
|
60 |
+
|
61 |
+
@hydra.main(config_path=None, config_name='config', version_base=None)
|
62 |
+
def main(cfg: DictConfig):
|
63 |
+
print('Loading data...')
|
64 |
+
train_path = cfg['GCN_train']['train']
|
65 |
+
valid_path = cfg['GCN_train']['valid']
|
66 |
+
test_path = cfg['GCN_train']['test']
|
67 |
+
batch_size = cfg['GCN_train']['batch_size']
|
68 |
+
dim = cfg['GCN_train']['dim']
|
69 |
+
n_conv_hidden = cfg['GCN_train']['n_conv_hidden']
|
70 |
+
n_mlp_hidden = cfg['GCN_train']['n_mlp_hidden']
|
71 |
+
dropout = cfg['GCN_train']['dropout']
|
72 |
+
lr = cfg['GCN_train']['lr']
|
73 |
+
epochs = cfg['GCN_train']['epochs']
|
74 |
+
patience = cfg['GCN_train']['patience']
|
75 |
+
save_path = cfg['GCN_train']['save_path']
|
76 |
+
ks = [1, 3, 5, 10]
|
77 |
+
|
78 |
+
mols_train, y_train = get_data(hydra.utils.get_original_cwd() + train_path)
|
79 |
+
mols_valid, y_valid = get_data(hydra.utils.get_original_cwd() + valid_path)
|
80 |
+
|
81 |
+
print('-'*100)
|
82 |
+
print('Training: ', mols_train.shape)
|
83 |
+
print('Validation: ', mols_valid.shape)
|
84 |
+
print('-'*100)
|
85 |
+
|
86 |
+
labels = y_train.tolist() + y_valid.tolist()
|
87 |
+
|
88 |
+
# Mol to Graph
|
89 |
+
print('Converting mol to graph...')
|
90 |
+
X_train = [mol2graph.mol2vec(m) for m in tqdm(mols_train.tolist())]
|
91 |
+
for i, data in enumerate(X_train):
|
92 |
+
data.y = torch.LongTensor([y_train[i]]).to(device)
|
93 |
+
X_valid = [mol2graph.mol2vec(m) for m in tqdm(mols_valid.tolist())]
|
94 |
+
for i, data in enumerate(X_valid):
|
95 |
+
data.y = torch.LongTensor([y_valid[i]]).to(device)
|
96 |
+
train_loader = DataLoader(X_train, batch_size=batch_size, shuffle=True, drop_last=True)
|
97 |
+
valid_loader = DataLoader(X_valid, batch_size=batch_size, shuffle=True, drop_last=True)
|
98 |
+
print('completed.')
|
99 |
+
print('-'*100)
|
100 |
+
|
101 |
+
num = 1
|
102 |
+
while True:
|
103 |
+
ckpt_dir = hydra.utils.get_original_cwd()+f'{save_path}/checkpoints_{date}_{num}'
|
104 |
+
try:
|
105 |
+
if any(os.scandir(ckpt_dir)):
|
106 |
+
num +=1
|
107 |
+
continue
|
108 |
+
else:
|
109 |
+
break
|
110 |
+
except:
|
111 |
+
os.makedirs(ckpt_dir, exist_ok=True)
|
112 |
+
break
|
113 |
+
train_path = cfg['GCN_train']['train']
|
114 |
+
valid_path = cfg['GCN_train']['valid']
|
115 |
+
test_path = cfg['GCN_train']['test']
|
116 |
+
batch_size = cfg['GCN_train']['batch_size']
|
117 |
+
dim = cfg['GCN_train']['dim']
|
118 |
+
n_conv_hidden = cfg['GCN_train']['n_conv_hidden']
|
119 |
+
n_mlp_hidden = cfg['GCN_train']['n_mlp_hidden']
|
120 |
+
dropout = cfg['GCN_train']['dropout']
|
121 |
+
lr = cfg['GCN_train']['lr']
|
122 |
+
epochs = cfg['GCN_train']['epochs']
|
123 |
+
patience = cfg['GCN_train']['patience']
|
124 |
+
|
125 |
+
# Model instance construction
|
126 |
+
print('Model instance construction')
|
127 |
+
model = MolecularGCN(
|
128 |
+
dim = dim,
|
129 |
+
n_conv_hidden = n_conv_hidden,
|
130 |
+
n_mlp_hidden = n_mlp_hidden,
|
131 |
+
dropout = dropout
|
132 |
+
).to(device)
|
133 |
+
print(model)
|
134 |
+
print('-'*100)
|
135 |
+
|
136 |
+
# Training
|
137 |
+
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
|
138 |
+
earlystopping = EarlyStopping(patience=patience, path=ckpt_dir + '/ckpt.pth', verbose=True)
|
139 |
+
for epoch in range(1, epochs+1):
|
140 |
+
# training
|
141 |
+
train_loss = train(model, optimizer, train_loader)
|
142 |
+
|
143 |
+
# performance evaluation
|
144 |
+
loss_train, _ = eval(model, train_loader)
|
145 |
+
loss_valid, score_list = eval(model, valid_loader, ks=ks)
|
146 |
+
top1acc = score_list[0]
|
147 |
+
top3acc = score_list[1]
|
148 |
+
top5acc = score_list[2]
|
149 |
+
top10acc = score_list[3]
|
150 |
+
|
151 |
+
print(f'Epoch: {epoch}/{epochs}, loss_train: {loss_train:.5}, loss_valid: {loss_valid:.5}')
|
152 |
+
print(f'top k accuracy: top1={top1acc:.2}, top3={top3acc:.2}, top5={top5acc:.2}, top10={top10acc:.2}')
|
153 |
+
# early stopping detection
|
154 |
+
earlystopping(loss_valid, model)
|
155 |
+
if earlystopping.early_stop:
|
156 |
+
print('Early Stopping!')
|
157 |
+
print('-'*100)
|
158 |
+
break
|
159 |
+
|
160 |
+
if __name__ == '__main__':
|
161 |
+
main()
|
data/scripts/mcts.py
ADDED
@@ -0,0 +1,395 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import numpy as np
|
3 |
+
import pandas as pd
|
4 |
+
import json
|
5 |
+
import pickle
|
6 |
+
import datetime
|
7 |
+
|
8 |
+
import hydra
|
9 |
+
from config.config import cs
|
10 |
+
from omegaconf import DictConfig
|
11 |
+
|
12 |
+
import torch
|
13 |
+
import torch.nn.functional as F
|
14 |
+
|
15 |
+
import time
|
16 |
+
|
17 |
+
import warnings
|
18 |
+
warnings.filterwarnings('ignore')
|
19 |
+
|
20 |
+
import rdkit.Chem as Chem
|
21 |
+
from rdkit import RDLogger
|
22 |
+
from rdkit.Chem import Descriptors
|
23 |
+
RDLogger.DisableLog('rdApp.*')
|
24 |
+
|
25 |
+
from Model.Transformer.model import Transformer
|
26 |
+
from scripts.preprocess import make_counter ,make_transforms
|
27 |
+
from Model.GCN import network
|
28 |
+
from Model.GCN.utils import template_prediction, check_templates
|
29 |
+
from scripts.beam_search import beam_decode, greedy_translate
|
30 |
+
|
31 |
+
from Utils.utils import read_smilesset, RootNode, NormalNode, smi_tokenizer, MW_checker, is_empty
|
32 |
+
from Utils.reward import getReward
|
33 |
+
|
34 |
+
class MCTS():
|
35 |
+
def __init__(self, init_smiles, model, GCN_model, vocab, Reward, max_depth=10, c=1, step=0, n_valid=0,
|
36 |
+
n_invalid=0, max_r=-1000, r_dict=None, src_transforms=None, beam_width=10, nbest=5,
|
37 |
+
inf_max_len=256, beam_templates:list=None, rollout_depth=None, device=None, GCN_device=None,
|
38 |
+
exp_num_sampling=None, roll_num_sampling=None):
|
39 |
+
self.init_smiles = init_smiles
|
40 |
+
self.model = model
|
41 |
+
self.GCN_model = GCN_model
|
42 |
+
self.vocab = vocab
|
43 |
+
self.Reward = Reward
|
44 |
+
self.max_depth = max_depth
|
45 |
+
self.valid_smiles = {}
|
46 |
+
self.terminate_smiles = {}
|
47 |
+
self.c = c
|
48 |
+
self.count = 0
|
49 |
+
self.max_score = max_r
|
50 |
+
self.step = step
|
51 |
+
self.n_valid = n_valid
|
52 |
+
self.n_invalid = n_invalid
|
53 |
+
self.total_nodes = 0
|
54 |
+
self.expand_max = 0
|
55 |
+
self.r_dict = r_dict
|
56 |
+
self.transforms = src_transforms
|
57 |
+
self.beam_width = beam_width
|
58 |
+
self.nbest = nbest
|
59 |
+
self.inf_max_len = inf_max_len
|
60 |
+
self.beam_templates = beam_templates
|
61 |
+
self.rollout_depth = rollout_depth
|
62 |
+
self.device = device
|
63 |
+
self.GCN_device = GCN_device
|
64 |
+
self.gen_templates = []
|
65 |
+
self.num_sampling = exp_num_sampling
|
66 |
+
self.roll_num_sampling = roll_num_sampling
|
67 |
+
self.no_template = False
|
68 |
+
self.smi_to_template = {}
|
69 |
+
self.accum_time = 0
|
70 |
+
|
71 |
+
def select(self):
|
72 |
+
raise NotImplementedError()
|
73 |
+
|
74 |
+
def expand(self):
|
75 |
+
raise NotImplementedError()
|
76 |
+
|
77 |
+
def simulate(self):
|
78 |
+
raise NotImplementedError()
|
79 |
+
|
80 |
+
def backprop(self):
|
81 |
+
raise NotImplementedError()
|
82 |
+
|
83 |
+
def search(self, n_step):
|
84 |
+
raise NotImplementedError()
|
85 |
+
|
86 |
+
class ParseSelectMCTS(MCTS):
|
87 |
+
def __init__(self, *args, **kwargs):
|
88 |
+
super().__init__(*args, **kwargs)
|
89 |
+
self.root = RootNode()
|
90 |
+
self.current_node = None
|
91 |
+
self.next_smiles = {}
|
92 |
+
self.rollout_result = {}
|
93 |
+
scores, _, _ = self.Reward.reward([self.init_smiles])
|
94 |
+
_, self.init_score = scores[0]
|
95 |
+
|
96 |
+
def select(self):
|
97 |
+
'''
|
98 |
+
search for the node with no child nodes and maximum UCB score
|
99 |
+
'''
|
100 |
+
self.current_node = self.root
|
101 |
+
while len(self.current_node.children) != 0:
|
102 |
+
self.current_node = self.current_node.select_children()
|
103 |
+
if self.current_node.depth+1 > self.max_depth:
|
104 |
+
tmp = self.current_node
|
105 |
+
# update
|
106 |
+
while self.current_node is not None:
|
107 |
+
self.current_node.cum_score += -1
|
108 |
+
self.current_node.visit += 1
|
109 |
+
self.current_node = self.current_node.parent
|
110 |
+
tmp.remove_Node()
|
111 |
+
self.current_node = self.root
|
112 |
+
|
113 |
+
def expand(self):
|
114 |
+
'''
|
115 |
+
self.no_template: If the output of template_prediction for selected node is empty, self.no_template = True
|
116 |
+
self.next_smiles: key=smiles, value=reward score
|
117 |
+
|
118 |
+
'''
|
119 |
+
|
120 |
+
self.next_smiles = {}
|
121 |
+
self.smi_to_template = {}
|
122 |
+
self.expand_max = 0
|
123 |
+
|
124 |
+
''' prediction of reaction templates '''
|
125 |
+
matched_indices = []
|
126 |
+
input_smi = self.current_node.smi
|
127 |
+
self.no_template = False
|
128 |
+
indices = template_prediction(GCN_model=self.GCN_model, input_smi=input_smi,
|
129 |
+
num_sampling=self.num_sampling, GCN_device=self.GCN_device)
|
130 |
+
matched_indices = check_templates(indices, input_smi, self.r_dict)
|
131 |
+
if len(matched_indices) != 0:
|
132 |
+
self.gen_templates.extend(matched_indices)
|
133 |
+
''' prediction of products '''
|
134 |
+
with torch.no_grad():
|
135 |
+
for i in matched_indices:
|
136 |
+
input_conditional = smi_tokenizer(i + input_smi).split(' ')
|
137 |
+
input_tokens = self.transforms(input_conditional).to(self.device)
|
138 |
+
outputs = beam_decode(v=self.vocab, model=self.model, input_tokens=input_tokens, template_idx=i,
|
139 |
+
device=self.device, inf_max_len=self.inf_max_len, beam_width=self.beam_width,
|
140 |
+
nbest=self.nbest, Temp=1, beam_templates=self.beam_templates)
|
141 |
+
for output in outputs:
|
142 |
+
self.next_smiles[output] = 0
|
143 |
+
self.smi_to_template[output] = i
|
144 |
+
self.check()
|
145 |
+
else:
|
146 |
+
self.no_template = True
|
147 |
+
while (len(self.current_node.children) == 0) or (min([cn.visit for cn in self.current_node.children]) >= 10000):
|
148 |
+
self.current_node.cum_score = -10000
|
149 |
+
self.current_node.visit = 10000
|
150 |
+
self.current_node = self.current_node.parent
|
151 |
+
|
152 |
+
def check(self):
|
153 |
+
valid_list = []
|
154 |
+
invalid_list = []
|
155 |
+
score_que = []
|
156 |
+
score = None
|
157 |
+
reaction_path = []
|
158 |
+
tmp = self.current_node
|
159 |
+
|
160 |
+
if len(self.next_smiles) == 0:
|
161 |
+
self.current_node.cum_score = -100000
|
162 |
+
self.current_node.visit = 100000
|
163 |
+
self.current_node.remove_Node()
|
164 |
+
print('0 molecules are expanded.')
|
165 |
+
|
166 |
+
else:
|
167 |
+
# make reaction path
|
168 |
+
while self.current_node.depth > 0:
|
169 |
+
reaction_path.insert(0, f'{self.current_node.template}.{self.current_node.smi}')
|
170 |
+
self.current_node = self.current_node.parent
|
171 |
+
self.current_node = tmp
|
172 |
+
|
173 |
+
# scoring
|
174 |
+
for smi in self.next_smiles.keys():
|
175 |
+
mol = Chem.MolFromSmiles(smi)
|
176 |
+
if mol is None:
|
177 |
+
self.n_invalid += 1
|
178 |
+
invalid_list.append(smi)
|
179 |
+
elif (mol is not None) and (MW_checker(mol, 600) == True):
|
180 |
+
score_que.append(smi)
|
181 |
+
self.n_valid += 1
|
182 |
+
else:
|
183 |
+
invalid_list.append(smi)
|
184 |
+
|
185 |
+
scores, _, _ = self.Reward.reward(score_que)
|
186 |
+
if len(scores) != 0:
|
187 |
+
valid_scores = []
|
188 |
+
for smi, score in scores:
|
189 |
+
template = self.smi_to_template[smi]
|
190 |
+
path = reaction_path.copy()
|
191 |
+
path.append(f'{template}.{smi}')
|
192 |
+
path = '.'.join(path)
|
193 |
+
if score is not None:
|
194 |
+
self.valid_smiles[self.step, smi, path] = score
|
195 |
+
valid_list.append((score, smi))
|
196 |
+
valid_scores.append(score)
|
197 |
+
self.max_score = max(self.max_score, score)
|
198 |
+
self.expand_max = max(self.expand_max, score)
|
199 |
+
for smi in invalid_list:
|
200 |
+
self.next_smiles.pop(smi)
|
201 |
+
print(f'{len(self.next_smiles)} molecules are expanded.')
|
202 |
+
else:
|
203 |
+
self.no_template = True
|
204 |
+
while (len(self.current_node.children) == 0) or (min([cn.visit for cn in self.current_node.children]) >= 100000):
|
205 |
+
self.current_node.cum_score = -100000
|
206 |
+
self.current_node.visit = 100000
|
207 |
+
self.current_node = self.current_node.parent
|
208 |
+
|
209 |
+
def simulate(self):
|
210 |
+
'''rollout'''
|
211 |
+
self.rollout_result = {} # key:next_tokennext_smi, value:(smi, avg_score)
|
212 |
+
for orig_smi in self.next_smiles:
|
213 |
+
depth = 0
|
214 |
+
smi_que = [orig_smi]
|
215 |
+
max_smi = None
|
216 |
+
max_score = -10000
|
217 |
+
while depth < self.rollout_depth:
|
218 |
+
input_conditional = []
|
219 |
+
for next_smi in smi_que:
|
220 |
+
if Chem.MolFromSmiles(next_smi) is not None:
|
221 |
+
indices = template_prediction(self.GCN_model, next_smi, num_sampling=self.roll_num_sampling, GCN_device=self.GCN_device)
|
222 |
+
matched_indices = check_templates(indices, next_smi, self.r_dict)
|
223 |
+
for t in matched_indices:
|
224 |
+
input_conditional.append(smi_tokenizer(t + next_smi).split(' '))
|
225 |
+
if is_empty(input_conditional) == False:
|
226 |
+
with torch.no_grad():
|
227 |
+
input_tokens = self.transforms(input_conditional).to(self.device)
|
228 |
+
output = greedy_translate(v=self.vocab, model=self.model, input_tokens=input_tokens,
|
229 |
+
inf_max_len=self.inf_max_len, device=self.device) # output: list of SMILES
|
230 |
+
scores, max_smi_tmp, max_score_tmp = self.Reward.reward_remove_nan(output)
|
231 |
+
if max_score_tmp is None:
|
232 |
+
max_score_tmp = -10000
|
233 |
+
elif max_score < max_score_tmp:
|
234 |
+
max_score = max_score_tmp
|
235 |
+
max_smi = max_smi_tmp
|
236 |
+
else:
|
237 |
+
break
|
238 |
+
depth += 1
|
239 |
+
smi_que = output
|
240 |
+
if max_score > 0:
|
241 |
+
self.next_smiles[orig_smi] = max_score
|
242 |
+
self.rollout_result[orig_smi] = (max_smi, max_score)
|
243 |
+
else:
|
244 |
+
self.next_smiles[orig_smi] = 0
|
245 |
+
|
246 |
+
def backprop(self):
|
247 |
+
for key, value in self.next_smiles.items():
|
248 |
+
child = NormalNode(smi=key, c=self.c)
|
249 |
+
child.template = self.smi_to_template[key]
|
250 |
+
child.cum_score += value
|
251 |
+
child.imm_score = value
|
252 |
+
child.id = self.total_nodes
|
253 |
+
self.total_nodes += 1
|
254 |
+
try:
|
255 |
+
child.rollout_result = self.rollout_result[key]
|
256 |
+
except KeyError:
|
257 |
+
child.rollout_result = ('Termination', -10000)
|
258 |
+
self.current_node.add_Node(child)
|
259 |
+
max_reward = max(self.next_smiles.values())
|
260 |
+
self.max_score = max(self.max_score, max_reward)
|
261 |
+
while self.current_node is not None:
|
262 |
+
self.current_node.visit += 1
|
263 |
+
self.current_node.cum_score += max_reward
|
264 |
+
self.current_node.imm_score = max(self.current_node.imm_score, max_reward)
|
265 |
+
self.current_node = self.current_node.parent
|
266 |
+
|
267 |
+
def search(self, n_step):
|
268 |
+
n = NormalNode(self.init_smiles)
|
269 |
+
self.root.add_Node(n)
|
270 |
+
while self.step < n_step:
|
271 |
+
self.step += 1
|
272 |
+
if self.n_valid+self.n_invalid == 0:
|
273 |
+
valid_rate = 0
|
274 |
+
else:
|
275 |
+
valid_rate = self.n_valid/(self.n_valid+self.n_invalid)
|
276 |
+
print(f'step:{self.step}, INIT_SCORE:{self.init_score}, MAX_SCORE:{self.max_score}, VALIDITY:{valid_rate}')
|
277 |
+
self.select()
|
278 |
+
print(f'selected_score:{self.current_node.imm_score}')
|
279 |
+
self.expand()
|
280 |
+
expand_max = self.expand_max if self.expand_max != 0 else None
|
281 |
+
if self.no_template == True:
|
282 |
+
print('no template')
|
283 |
+
continue
|
284 |
+
if len(self.next_smiles) != 0:
|
285 |
+
self.simulate()
|
286 |
+
self.backprop()
|
287 |
+
|
288 |
+
@hydra.main(config_path=None, config_name='config', version_base=None)
|
289 |
+
def main(cfg: DictConfig):
|
290 |
+
date = datetime.datetime.now().strftime('%Y%m%d')
|
291 |
+
num = 1
|
292 |
+
while True:
|
293 |
+
out_dir = hydra.utils.get_original_cwd()+f"{cfg['mcts']['out_dir']}/{date}_{num}"
|
294 |
+
if os.path.isdir(out_dir):
|
295 |
+
num += 1
|
296 |
+
continue
|
297 |
+
else:
|
298 |
+
os.makedirs(out_dir, exist_ok=True)
|
299 |
+
break
|
300 |
+
print(f'{out_dir} was created.')
|
301 |
+
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
302 |
+
|
303 |
+
''' preprocess '''
|
304 |
+
src_train_path = hydra.utils.get_original_cwd()+cfg['mcts']['src_train']
|
305 |
+
tgt_train_path = hydra.utils.get_original_cwd()+cfg['mcts']['tgt_train']
|
306 |
+
src_valid_path = hydra.utils.get_original_cwd()+cfg['mcts']['src_valid']
|
307 |
+
tgt_valid_path = hydra.utils.get_original_cwd()+cfg['mcts']['tgt_valid']
|
308 |
+
data_dict = make_counter(src_train_path=src_train_path,
|
309 |
+
tgt_train_path=tgt_train_path,
|
310 |
+
src_valid_path=src_valid_path,
|
311 |
+
tgt_valid_path=tgt_valid_path
|
312 |
+
)
|
313 |
+
src_transforms, _, v = make_transforms(data_dict=data_dict, make_vocab=True)
|
314 |
+
|
315 |
+
'''input smiles set'''
|
316 |
+
init_smiles = read_smilesset(hydra.utils.get_original_cwd() + cfg['mcts']['in_smiles_file'])
|
317 |
+
n_valid = 0
|
318 |
+
n_invalid = 0
|
319 |
+
mcts = None
|
320 |
+
|
321 |
+
''' load model '''
|
322 |
+
d_model = cfg['model']['dim_model']
|
323 |
+
num_encoder_layers = cfg['model']['num_encoder_layers']
|
324 |
+
num_decoder_layers = cfg['model']['num_decoder_layers']
|
325 |
+
nhead = cfg['model']['nhead']
|
326 |
+
dropout = cfg['model']['dropout']
|
327 |
+
dim_ff = cfg['model']['dim_ff']
|
328 |
+
ckpt = cfg['mcts']['ckpt_Transformer']
|
329 |
+
model = Transformer(d_model=d_model, nhead=nhead, num_encoder_layers=num_encoder_layers, num_decoder_layers=num_decoder_layers,
|
330 |
+
dim_feedforward=dim_ff,vocab=v, dropout=dropout, device=device).to(device)
|
331 |
+
ckpt = torch.load(hydra.utils.get_original_cwd() + cfg['model']['ckpt'])
|
332 |
+
model.load_state_dict(ckpt['model_state_dict'])
|
333 |
+
model.eval()
|
334 |
+
|
335 |
+
''' load GCN model'''
|
336 |
+
dim_GCN = cfg['GCN_train']['dim']
|
337 |
+
n_conv_hidden = cfg['GCN_train']['n_conv_hidden']
|
338 |
+
n_mlp_hidden = cfg['GCN_train']['n_mlp_hidden']
|
339 |
+
ckpt_GCN = cfg['mcts']['ckpt_GCN']
|
340 |
+
GCN_model = network.MolecularGCN(dim = dim_GCN,
|
341 |
+
n_conv_hidden = n_conv_hidden,
|
342 |
+
n_mlp_hidden = n_mlp_hidden,
|
343 |
+
dropout = dropout).to(device)
|
344 |
+
GCN_model.load_state_dict(torch.load(hydra.utils.get_original_cwd() + ckpt_GCN))
|
345 |
+
GCN_model.eval()
|
346 |
+
|
347 |
+
'''MCTS'''
|
348 |
+
reward = getReward(name=cfg['mcts']['reward_name'])
|
349 |
+
print('REWARD:',cfg['mcts']['reward_name'])
|
350 |
+
with open(hydra.utils.get_original_cwd() + '/data/label_template.json') as f:
|
351 |
+
r_dict = json.load(f)
|
352 |
+
f.close()
|
353 |
+
with open(hydra.utils.get_original_cwd()+'/data/beamsearch_template_list.txt', 'r') as f:
|
354 |
+
beam_templates = f.read().splitlines()
|
355 |
+
f.close()
|
356 |
+
for start_smiles in init_smiles:
|
357 |
+
input_smiles = start_smiles
|
358 |
+
start = time.time()
|
359 |
+
mcts = ParseSelectMCTS(input_smiles, model=model, GCN_model=GCN_model, vocab=v, Reward=reward,
|
360 |
+
max_depth=cfg['mcts']['max_depth'], step=0, n_valid=n_valid, n_invalid=n_invalid,
|
361 |
+
c=cfg['mcts']['ucb_c'], max_r=reward.max_r, r_dict=r_dict, src_transforms=src_transforms,
|
362 |
+
beam_width=cfg['mcts']['beam_width'], nbest=cfg['mcts']['nbest'],
|
363 |
+
beam_templates=beam_templates, rollout_depth=cfg['mcts']['rollout_depth'],
|
364 |
+
roll_num_sampling=cfg['mcts']['roll_num_sampling'], device=device,
|
365 |
+
GCN_device=device, exp_num_sampling=cfg['mcts']['exp_num_sampling'])
|
366 |
+
mcts.search(n_step=cfg['mcts']['n_step'])
|
367 |
+
reward.max_r = mcts.max_score
|
368 |
+
n_valid += mcts.n_valid
|
369 |
+
n_invalid += mcts.n_invalid
|
370 |
+
end = time.time()
|
371 |
+
print('Elapsed Time: %f' % (end-start))
|
372 |
+
|
373 |
+
generated_smiles = pd.DataFrame(columns=['SMILES', 'Reward', 'Imp', 'MW', 'step', 'reaction_path'])
|
374 |
+
scores, _, _ = reward.reward([start_smiles])
|
375 |
+
start_reward = scores[0][1]
|
376 |
+
for kv in mcts.valid_smiles.items():
|
377 |
+
step, smi, path = kv[0]
|
378 |
+
step = int(step)
|
379 |
+
try:
|
380 |
+
w = Descriptors.MolWt(Chem.MolFromSmiles(smi))
|
381 |
+
except:
|
382 |
+
w = 0
|
383 |
+
if (kv[1] is None) or (start_reward is None):
|
384 |
+
Imp = None
|
385 |
+
else:
|
386 |
+
Imp = kv[1] - start_reward
|
387 |
+
row = {'SMILES': smi, 'Reward': kv[1], 'Imp': Imp,
|
388 |
+
'MW': w, 'step': step, 'reaction_path': path}
|
389 |
+
generated_smiles = generated_smiles.append(row, ignore_index=True)
|
390 |
+
generated_smiles = generated_smiles.sort_values('Reward', ascending=False)
|
391 |
+
generated_smiles.to_csv(out_dir + f'/{start_smiles}.csv', index=False)
|
392 |
+
|
393 |
+
if __name__ == '__main__':
|
394 |
+
main()
|
395 |
+
|
data/scripts/preprocess.py
ADDED
@@ -0,0 +1,148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import warnings
|
2 |
+
import hydra
|
3 |
+
import os
|
4 |
+
from config.config import cs
|
5 |
+
from omegaconf import DictConfig
|
6 |
+
warnings.filterwarnings('ignore')
|
7 |
+
from collections import Counter
|
8 |
+
|
9 |
+
import torch
|
10 |
+
from torch.utils.data import Dataset
|
11 |
+
from torch.utils.data import DataLoader
|
12 |
+
from torchtext.vocab import vocab
|
13 |
+
import torchtext.transforms as T
|
14 |
+
|
15 |
+
class smi_Dataset(Dataset):
|
16 |
+
def __init__(self, src, tgt):
|
17 |
+
super().__init__()
|
18 |
+
self.src = src
|
19 |
+
self.tgt = tgt
|
20 |
+
|
21 |
+
def __getitem__(self, i):
|
22 |
+
src = self.src[i]
|
23 |
+
tgt = self.tgt[i]
|
24 |
+
return src, tgt
|
25 |
+
|
26 |
+
def __len__(self):
|
27 |
+
return len(self.src)
|
28 |
+
|
29 |
+
def make_smi_list(path, counter):
|
30 |
+
smi_list = []
|
31 |
+
max_length = 0
|
32 |
+
with open(path,'r') as f:
|
33 |
+
for line in f:
|
34 |
+
smi_list.append(line.rstrip().split(' '))
|
35 |
+
for i in smi_list:
|
36 |
+
counter.update(i)
|
37 |
+
if len(i) > max_length:
|
38 |
+
max_length = len(i)
|
39 |
+
return smi_list, max_length
|
40 |
+
|
41 |
+
|
42 |
+
def make_counter(src_train_path, tgt_train_path, src_valid_path, tgt_valid_path) -> dict:
|
43 |
+
src_counter = Counter()
|
44 |
+
tgt_counter = Counter()
|
45 |
+
src_train, max_src_train = make_smi_list(src_train_path, src_counter)
|
46 |
+
tgt_train, max_tgt_train = make_smi_list(tgt_train_path, tgt_counter)
|
47 |
+
src_valid, max_src_valid = make_smi_list(src_valid_path, src_counter)
|
48 |
+
tgt_valid, max_tgt_valid = make_smi_list(tgt_valid_path, tgt_counter)
|
49 |
+
|
50 |
+
src_max_length = max([max_src_train, max_src_valid])
|
51 |
+
tgt_max_length = max([max_tgt_train, max_tgt_valid])
|
52 |
+
tgt_max_length = tgt_max_length+2 # bosとeosの分を加算
|
53 |
+
|
54 |
+
datasets = []
|
55 |
+
datasets.append(src_train)
|
56 |
+
datasets.append(tgt_train)
|
57 |
+
datasets.append(src_valid)
|
58 |
+
datasets.append(tgt_valid)
|
59 |
+
|
60 |
+
return {'src_counter': src_counter, 'tgt_counter': tgt_counter,
|
61 |
+
'src_max_len': src_max_length, 'tgt_max_len': tgt_max_length, 'datasets': datasets}
|
62 |
+
|
63 |
+
def make_transforms(data_dict, make_vocab: bool = False, vocab_load_path=None):
|
64 |
+
if make_vocab == False and vocab_load_path is None:
|
65 |
+
raise ValueError('The make_transforms function is not being passed the vocab_load_path.')
|
66 |
+
if make_vocab:
|
67 |
+
counter = data_dict['src_counter'] + data_dict['tgt_counter']
|
68 |
+
v = vocab(counter, min_freq=5, specials=(['<unk>', '<pad>', '<bos>', '<eos>']))
|
69 |
+
v.set_default_index(v['<unk>'])
|
70 |
+
else:
|
71 |
+
v = torch.load(vocab_load_path)
|
72 |
+
|
73 |
+
src_transforms = T.Sequential(
|
74 |
+
T.VocabTransform(v),
|
75 |
+
T.ToTensor(padding_value=v['<pad>']),
|
76 |
+
T.PadTransform(max_length=data_dict['src_max_len'], pad_value=v['<pad>']) # srcはbosとeosが不要
|
77 |
+
)
|
78 |
+
|
79 |
+
tgt_transforms = T.Sequential(
|
80 |
+
T.VocabTransform(v),
|
81 |
+
T.AddToken(token=v['<bos>'], begin=True),
|
82 |
+
T.AddToken(token=v['<eos>'], begin=False),
|
83 |
+
T.ToTensor(padding_value=v['<pad>']),
|
84 |
+
T.PadTransform(max_length=data_dict['tgt_max_len'],pad_value=v['<pad>'])
|
85 |
+
)
|
86 |
+
|
87 |
+
return src_transforms, tgt_transforms, v
|
88 |
+
|
89 |
+
|
90 |
+
def make_dataloader(datasets, src_transforms, tgt_transforms, batch_size):
|
91 |
+
'''
|
92 |
+
datasets: output of make_counter()
|
93 |
+
transforms: output of make_vocab()
|
94 |
+
'''
|
95 |
+
|
96 |
+
src_train = datasets[0]
|
97 |
+
tgt_train = datasets[1]
|
98 |
+
src_valid = datasets[2]
|
99 |
+
tgt_valid = datasets[3]
|
100 |
+
|
101 |
+
src_train, src_valid = src_transforms(src_train), src_transforms(src_valid)
|
102 |
+
tgt_train, tgt_valid = tgt_transforms(tgt_train), tgt_transforms(tgt_valid)
|
103 |
+
|
104 |
+
train_dataset = smi_Dataset(src=src_train, tgt=tgt_train)
|
105 |
+
valid_dataset = smi_Dataset(src=src_valid, tgt=tgt_valid)
|
106 |
+
|
107 |
+
train_dataloader = DataLoader(dataset=train_dataset,
|
108 |
+
batch_size=batch_size,
|
109 |
+
drop_last=True,
|
110 |
+
shuffle=True,
|
111 |
+
num_workers=2,
|
112 |
+
pin_memory=True
|
113 |
+
)
|
114 |
+
valid_dataloader = DataLoader(dataset=valid_dataset,
|
115 |
+
batch_size=batch_size,
|
116 |
+
drop_last=False,
|
117 |
+
shuffle=False,
|
118 |
+
num_workers=2,
|
119 |
+
pin_memory=True
|
120 |
+
)
|
121 |
+
|
122 |
+
return train_dataloader, valid_dataloader
|
123 |
+
|
124 |
+
|
125 |
+
@hydra.main(config_path=None, config_name='config', version_base=None)
|
126 |
+
def main(cfg: DictConfig):
|
127 |
+
# Loading data
|
128 |
+
print('Saving vocabulary...')
|
129 |
+
src_train_path = hydra.utils.get_original_cwd()+cfg['prep']['src_train']
|
130 |
+
tgt_train_path = hydra.utils.get_original_cwd()+cfg['prep']['tgt_train']
|
131 |
+
src_valid_path = hydra.utils.get_original_cwd()+cfg['prep']['src_valid']
|
132 |
+
tgt_valid_path = hydra.utils.get_original_cwd()+cfg['prep']['tgt_valid']
|
133 |
+
|
134 |
+
data_dict= make_counter(src_train_path=src_valid_path,
|
135 |
+
tgt_train_path=tgt_train_path,
|
136 |
+
src_valid_path=src_train_path,
|
137 |
+
tgt_valid_path=tgt_valid_path)
|
138 |
+
|
139 |
+
_, _, v = make_transforms(data_dict=data_dict, make_vocab=True, vocab_load_path=None)
|
140 |
+
torch.save(v, hydra.utils.get_original_cwd()+'/vocab.pth')
|
141 |
+
print('done.')
|
142 |
+
|
143 |
+
if __name__ == '__main__':
|
144 |
+
main()
|
145 |
+
|
146 |
+
|
147 |
+
|
148 |
+
|
data/scripts/transformer_train.py
ADDED
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import os
|
3 |
+
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
|
4 |
+
|
5 |
+
|
6 |
+
import time
|
7 |
+
import math
|
8 |
+
import hydra
|
9 |
+
from config.config import cs
|
10 |
+
from omegaconf import DictConfig, OmegaConf
|
11 |
+
|
12 |
+
import torch
|
13 |
+
import torch.nn as nn
|
14 |
+
import torch.optim as optim
|
15 |
+
import torch.backends.cudnn as cudnn
|
16 |
+
import torch.distributed as dist
|
17 |
+
import torch.multiprocessing as mp
|
18 |
+
|
19 |
+
from Model.Transformer.model import TransformerLR, Transformer
|
20 |
+
from scripts.preprocess import make_counter, make_transforms, make_dataloader
|
21 |
+
from Utils.utils import tally_parameters, EarlyStopping, AverageMeter, accuracy, torch_fix_seed
|
22 |
+
|
23 |
+
import datetime
|
24 |
+
date = datetime.datetime.now().strftime('%Y%m%d')
|
25 |
+
|
26 |
+
torch_fix_seed()
|
27 |
+
|
28 |
+
def train(cfg):
|
29 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
30 |
+
num = 1
|
31 |
+
while True:
|
32 |
+
ckpt_dir = hydra.utils.get_original_cwd()+f'/ckpts/checkpoints_{date}_{num}'
|
33 |
+
if os.path.isdir(ckpt_dir):
|
34 |
+
num += 1
|
35 |
+
continue
|
36 |
+
else:
|
37 |
+
os.makedirs(ckpt_dir, exist_ok=True)
|
38 |
+
break
|
39 |
+
print(f'{ckpt_dir} was created.')
|
40 |
+
|
41 |
+
data_dict = make_counter(src_train_path=hydra.utils.get_original_cwd()+cfg['train']['src_train'],
|
42 |
+
tgt_train_path=hydra.utils.get_original_cwd()+cfg['train']['tgt_train'],
|
43 |
+
src_valid_path=hydra.utils.get_original_cwd()+cfg['train']['src_valid'],
|
44 |
+
tgt_valid_path=hydra.utils.get_original_cwd()+cfg['train']['tgt_valid']
|
45 |
+
)
|
46 |
+
print('making dataloader...')
|
47 |
+
src_transforms, tgt_transforms, v = make_transforms(data_dict=data_dict, make_vocab=True, vocab_load_path=None)
|
48 |
+
train_dataloader, valid_dataloader = make_dataloader(datasets=data_dict['datasets'], src_transforms=src_transforms,
|
49 |
+
tgt_transforms=tgt_transforms,batch_size=cfg['train']['batch_size'])
|
50 |
+
print('max length of src sentence:', data_dict['src_max_len'])
|
51 |
+
d_model = cfg['model']['dim_model']
|
52 |
+
nhead = cfg['model']['nhead']
|
53 |
+
dropout = cfg['model']['dropout']
|
54 |
+
dim_ff = cfg['model']['dim_ff']
|
55 |
+
num_encoder_layers = cfg['model']['num_encoder_layers']
|
56 |
+
num_decoder_layers = cfg['model']['num_decoder_layers']
|
57 |
+
model = Transformer(d_model=d_model, nhead=nhead, num_encoder_layers=num_encoder_layers, num_decoder_layers=num_decoder_layers,
|
58 |
+
dim_feedforward=dim_ff,vocab=v, dropout=dropout, device=device).to(device)
|
59 |
+
cudnn.benchmark = True
|
60 |
+
if device == 'cuda':
|
61 |
+
model = torch.nn.DataParallel(model) # make parallel
|
62 |
+
torch.backends.cudnn.benchmark = True
|
63 |
+
|
64 |
+
# count the number of parameters.
|
65 |
+
n_params, enc, dec = tally_parameters(model)
|
66 |
+
print('encoder: %d' % enc)
|
67 |
+
print('decoder: %d' % dec)
|
68 |
+
print('* number of parameters: %d' % n_params)
|
69 |
+
|
70 |
+
lr = cfg['train']['lr']
|
71 |
+
betas = cfg['train']['betas']
|
72 |
+
patience = cfg['train']['patience']
|
73 |
+
optimizer = optim.Adam(model.parameters(), lr=lr, betas=betas)
|
74 |
+
scheduler = TransformerLR(optimizer, warmup_epochs=8000)
|
75 |
+
label_smoothing = cfg['train']['label_smoothing']
|
76 |
+
criterion = nn.CrossEntropyLoss(label_smoothing=label_smoothing,
|
77 |
+
reduction='none',
|
78 |
+
ignore_index=v['<pad>']
|
79 |
+
)
|
80 |
+
earlystopping = EarlyStopping(patience=patience, ckpt_dir=ckpt_dir)
|
81 |
+
|
82 |
+
step_num = cfg['train']['step_num']
|
83 |
+
log_interval_step = cfg['train']['log_interval']
|
84 |
+
valid_interval_steps = cfg['train']['val_interval']
|
85 |
+
save_interval_steps = cfg['train']['save_interval']
|
86 |
+
accum_count = 1
|
87 |
+
|
88 |
+
valid_len = 0
|
89 |
+
for _, d in enumerate(valid_dataloader):
|
90 |
+
valid_len += len(d[0])
|
91 |
+
|
92 |
+
step = 0
|
93 |
+
tgt_mask = nn.Transformer.generate_square_subsequent_mask(data_dict['tgt_max_len']-1).to(device)
|
94 |
+
scaler = torch.cuda.amp.GradScaler()
|
95 |
+
total_loss = 0
|
96 |
+
accum_loss = 0
|
97 |
+
model.train()
|
98 |
+
start_time = time.time()
|
99 |
+
print('start training...')
|
100 |
+
while step < step_num:
|
101 |
+
for i, data in enumerate(train_dataloader):
|
102 |
+
src, tgt = data[0].to(device).permute(1, 0), data[1].to(device).permute(1, 0)
|
103 |
+
tgt_input = tgt[:-1, :] # (seq, batch)
|
104 |
+
tgt_output = tgt[1:, :] # shifted right
|
105 |
+
with torch.amp.autocast('cuda'):
|
106 |
+
outputs = model(src=src, tgt=tgt_input, tgt_mask=tgt_mask,
|
107 |
+
src_pad_mask=True, tgt_pad_mask=True, memory_pad_mask=True) # out: (seq_length, batch_size, vocab_size)
|
108 |
+
loss = (criterion(outputs.reshape(-1, v.__len__()), tgt_output.reshape(-1)).sum() / len(data[0])) / accum_count
|
109 |
+
scaler.scale(loss).backward()
|
110 |
+
accum_loss += loss.detach().item()
|
111 |
+
if ((i + 1) % accum_count == 0) or ((i + 1) == len(train_dataloader)):
|
112 |
+
scaler.unscale_(optimizer)
|
113 |
+
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
|
114 |
+
scaler.step(optimizer)
|
115 |
+
scaler.update()
|
116 |
+
scheduler.step()
|
117 |
+
optimizer.zero_grad()
|
118 |
+
step += 1
|
119 |
+
total_loss += accum_loss
|
120 |
+
accum_loss = 0
|
121 |
+
|
122 |
+
if (step + 1) % log_interval_step == 0:
|
123 |
+
lr = scheduler.get_last_lr()[0]
|
124 |
+
cur_loss = total_loss / log_interval_step
|
125 |
+
ppl = math.exp(cur_loss)
|
126 |
+
end_time = time.time()
|
127 |
+
print(f'| step {step+1} | lr {lr:03.5f} | loss {cur_loss:5.5f} | ppl {ppl:8.5f} | time per {log_interval_step} step {end_time - start_time:3.1f}|')
|
128 |
+
total_loss = 0
|
129 |
+
start_time = time.time()
|
130 |
+
|
131 |
+
# validation step
|
132 |
+
if (step + 1) % valid_interval_steps == 0:
|
133 |
+
model.eval()
|
134 |
+
top1 = AverageMeter()
|
135 |
+
perfect_acc_top1 = AverageMeter()
|
136 |
+
eval_total_loss = 0.
|
137 |
+
with torch.no_grad():
|
138 |
+
for val_i, val_data in enumerate(valid_dataloader):
|
139 |
+
src, tgt = val_data[0].to(device).permute(1, 0), val_data[1].to(device).permute(1, 0)
|
140 |
+
tgt_input = tgt[:-1, :]
|
141 |
+
tgt_output = tgt[1:, :]
|
142 |
+
outputs = model(src=src, tgt=tgt_input, tgt_mask=tgt_mask,
|
143 |
+
src_pad_mask=True, tgt_pad_mask=True, memory_pad_mask=True)
|
144 |
+
tmp_eval_loss = criterion(outputs.reshape(-1, v.__len__()), tgt_output.reshape(-1)).sum() / len(val_data[0])
|
145 |
+
eval_total_loss += tmp_eval_loss.detach().item()
|
146 |
+
partial_top1, perfect_acc = accuracy(outputs.reshape(-1, v.__len__()), tgt_output.reshape(-1), batch_size=tgt_output.size(1), v=v)
|
147 |
+
top1.update(partial_top1, src.size(1))
|
148 |
+
perfect_acc_top1.update(perfect_acc, src.size(1))
|
149 |
+
eval_loss = eval_total_loss / (val_i + 1)
|
150 |
+
print(f'validation step {step+1} | validation loss {eval_loss:5.5f} | partial top1 accuracy {top1.avg:.3f} | perfect top1 accuracy {perfect_acc_top1.avg:.3f}')
|
151 |
+
if (step + 1) % save_interval_steps == 0:
|
152 |
+
earlystopping(val_loss=eval_loss, step=step, optimizer=optimizer, cur_loss=cur_loss, model=model)
|
153 |
+
model.train()
|
154 |
+
start_time = time.time()
|
155 |
+
if earlystopping.early_stop:
|
156 |
+
print('Early Stopping!')
|
157 |
+
break
|
158 |
+
if earlystopping.early_stop:
|
159 |
+
break
|
160 |
+
|
161 |
+
@hydra.main(config_path=None, config_name='config', version_base=None)
|
162 |
+
def main(cfg: DictConfig):
|
163 |
+
train(cfg)
|
164 |
+
|
165 |
+
|
166 |
+
if __name__ == '__main__':
|
167 |
+
main()
|
data/scripts/translate.py
ADDED
@@ -0,0 +1,193 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import operator
|
2 |
+
import torch
|
3 |
+
import torchtext.vocab.vocab as Vocab
|
4 |
+
import rdkit.Chem as Chem
|
5 |
+
import hydra
|
6 |
+
from config.config import cs
|
7 |
+
from omegaconf import DictConfig
|
8 |
+
|
9 |
+
from Model.Transformer.model import Transformer
|
10 |
+
from scripts.preprocess import make_counter ,make_transforms
|
11 |
+
|
12 |
+
import itertools
|
13 |
+
import os
|
14 |
+
|
15 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
16 |
+
|
17 |
+
class BeamSearchNode(object):
|
18 |
+
def __init__(self, previousNode, decoder_input, logProb, length):
|
19 |
+
self.prevNode = previousNode
|
20 |
+
self.dec_in = decoder_input.to('cpu')
|
21 |
+
self.logp = logProb
|
22 |
+
self.leng = length
|
23 |
+
|
24 |
+
def eval(self, alpha=0.6):
|
25 |
+
return self.logp / (((5 + self.leng) / (5 + 1)) ** alpha)
|
26 |
+
|
27 |
+
|
28 |
+
def beam_decode(cfg: DictConfig, v:Vocab, model=None, input_tokens=None, Temp=1):
|
29 |
+
global beam_width, nbest
|
30 |
+
SOS_token = v['<bos>']
|
31 |
+
EOS_token = v['<eos>']
|
32 |
+
beam_width = cfg['translate']['beam_size']
|
33 |
+
nbest = cfg['translate']['nbest']
|
34 |
+
inf_max_len = cfg['translate']['inf_max_len']
|
35 |
+
|
36 |
+
# A batch of one input for Encoder
|
37 |
+
encoder_input = input_tokens
|
38 |
+
|
39 |
+
# Generate encoded features
|
40 |
+
with torch.no_grad():
|
41 |
+
encoder_input = encoder_input.unsqueeze(-1) # (seq, 1), batch_size=1
|
42 |
+
encoder_output, memory_pad_mask = model.encode(encoder_input, src_pad_mask=True) # encoder_output.shape: (seq, 1, d_model)
|
43 |
+
|
44 |
+
# Start with the start of the sentence token
|
45 |
+
decoder_input = torch.tensor([[SOS_token]]).to(device) # (1,1)
|
46 |
+
|
47 |
+
# Starting node
|
48 |
+
counter = itertools.count()
|
49 |
+
|
50 |
+
node = BeamSearchNode(previousNode=None,
|
51 |
+
decoder_input=decoder_input,
|
52 |
+
logProb=0, length=0)
|
53 |
+
|
54 |
+
with torch.no_grad():
|
55 |
+
tgt_mask = torch.nn.Transformer.generate_square_subsequent_mask(decoder_input.size(1)).to(device)
|
56 |
+
logits = model.decode(memory=encoder_output, tgt=decoder_input.permute(1, 0), tgt_mask=tgt_mask, memory_pad_mask=memory_pad_mask)
|
57 |
+
logits = logits.permute(1, 0, 2) # logits: (seq, 1, vocab) -> (1, seq, vocab), batch=1になってる
|
58 |
+
decoder_output = torch.log_softmax(logits[:, -1, :]/Temp, dim=1) # 最後のseqだけ取り出してlog_softmax, (1, vocab)
|
59 |
+
|
60 |
+
|
61 |
+
tmp_beam_width = min(beam_width, decoder_output.size(1))
|
62 |
+
log_prob, indexes = torch.topk(decoder_output, tmp_beam_width) # (tmp_beam_with,)
|
63 |
+
nextnodes = []
|
64 |
+
for new_k in range(tmp_beam_width):
|
65 |
+
decoded_t = indexes[0][new_k].view(1, -1).to('cpu') # indexを取得, shape: (1,1)
|
66 |
+
log_p = log_prob[0][new_k].item() # logpを取得
|
67 |
+
next_decoder_input = torch.cat([node.dec_in, decoded_t],dim=1) # dec_in:(1, seq)
|
68 |
+
nn = BeamSearchNode(previousNode=node,
|
69 |
+
decoder_input=next_decoder_input,
|
70 |
+
logProb=node.logp + log_p,
|
71 |
+
length=node.leng + 1)
|
72 |
+
score = -nn.eval()
|
73 |
+
count = next(counter)
|
74 |
+
nextnodes.append((score, count, nn))
|
75 |
+
|
76 |
+
# start beam search
|
77 |
+
for i in range(inf_max_len - 1):
|
78 |
+
# fetch the best node
|
79 |
+
if i == 0:
|
80 |
+
current_nodes = sorted(nextnodes)[:tmp_beam_width]
|
81 |
+
else:
|
82 |
+
current_nodes = sorted(nextnodes)[:beam_width]
|
83 |
+
|
84 |
+
nextnodes=[]
|
85 |
+
# current_nodes = [(score, count, node), (score, count, node)...], shape:(beam_width,)
|
86 |
+
scores, counts, nodes, decoder_inputs = [], [], [], []
|
87 |
+
for score, count, node in current_nodes:
|
88 |
+
if node.dec_in[0][-1].item() == EOS_token:
|
89 |
+
nextnodes.append((score, count, node))
|
90 |
+
else:
|
91 |
+
scores.append(score)
|
92 |
+
counts.append(count)
|
93 |
+
nodes.append(node)
|
94 |
+
decoder_inputs.append(node.dec_in)
|
95 |
+
if not bool(decoder_inputs):
|
96 |
+
break
|
97 |
+
|
98 |
+
decoder_inputs = torch.vstack(decoder_inputs) # (batch=beam, seq)
|
99 |
+
|
100 |
+
# adjust batch_size
|
101 |
+
enc_out = encoder_output.repeat(1, decoder_inputs.size(0), 1)
|
102 |
+
mask = memory_pad_mask.repeat(decoder_inputs.size(0), 1)
|
103 |
+
|
104 |
+
with torch.no_grad():
|
105 |
+
tgt_mask = torch.nn.Transformer.generate_square_subsequent_mask(decoder_inputs.size(1)).to(device)
|
106 |
+
logits = model.decode(memory=enc_out, tgt=decoder_inputs.permute(1, 0).to(device), tgt_mask=tgt_mask, memory_pad_mask=mask)
|
107 |
+
logits = logits.permute(1, 0, 2) # logits: (seq, batch, vocab) -> (batch, seq, vocab)
|
108 |
+
decoder_output = torch.log_softmax(logits[:, -1, :]/Temp, dim=1) # extract log_softmax of last token
|
109 |
+
# decoder_output.shape = (batch, vocab)
|
110 |
+
|
111 |
+
for beam, score in enumerate(scores):
|
112 |
+
for token in range(EOS_token, decoder_output.size(-1)): # indexを取得, unk, pad, bosは最初から捨てる
|
113 |
+
decoded_t = torch.tensor([[token]])
|
114 |
+
log_p = decoder_output[beam, token].item()
|
115 |
+
next_decoder_input = torch.cat([nodes[beam].dec_in, decoded_t],dim=1)
|
116 |
+
node = BeamSearchNode(previousNode=nodes[beam],
|
117 |
+
decoder_input=next_decoder_input,
|
118 |
+
logProb=nodes[beam].logp + log_p,
|
119 |
+
length=nodes[beam].leng + 1)
|
120 |
+
score = -node.eval()
|
121 |
+
count = next(counter)
|
122 |
+
nextnodes.append((score, count, node))
|
123 |
+
|
124 |
+
outputs = []
|
125 |
+
for score, _, n in sorted(nextnodes, key=operator.itemgetter(0))[:nbest]:
|
126 |
+
# endnodes = [(score, node), (score, node)...] なのでitemgetter(0)でscoreをkeyに指定している
|
127 |
+
output = n.dec_in.squeeze(0).tolist()[1:-1] # bosとeos削除
|
128 |
+
output = v.lookup_tokens(output)
|
129 |
+
output = ' '.join(output)
|
130 |
+
outputs.append(output)
|
131 |
+
|
132 |
+
return outputs
|
133 |
+
|
134 |
+
|
135 |
+
def translation(cfg:DictConfig):
|
136 |
+
# make transforms and vocabulary
|
137 |
+
src_train_path = hydra.utils.get_original_cwd()+cfg['translate']['src_train']
|
138 |
+
tgt_train_path = hydra.utils.get_original_cwd()+cfg['translate']['tgt_train']
|
139 |
+
src_valid_path = hydra.utils.get_original_cwd()+cfg['translate']['src_valid']
|
140 |
+
tgt_valid_path = hydra.utils.get_original_cwd()+cfg['translate']['tgt_valid']
|
141 |
+
data_dict = make_counter(src_train_path=src_train_path,
|
142 |
+
tgt_train_path=tgt_train_path,
|
143 |
+
src_valid_path=src_valid_path,
|
144 |
+
tgt_valid_path=tgt_valid_path
|
145 |
+
)
|
146 |
+
src_transforms, _, v = make_transforms(data_dict=data_dict, make_vocab=True, vocab_load_path=None)
|
147 |
+
|
148 |
+
# load model
|
149 |
+
d_model = cfg['model']['dim_model']
|
150 |
+
num_encoder_layers = cfg['model']['num_encoder_layers']
|
151 |
+
num_decoder_layers = cfg['model']['num_decoder_layers']
|
152 |
+
nhead = cfg['model']['nhead']
|
153 |
+
dropout = cfg['model']['dropout']
|
154 |
+
dim_ff = cfg['model']['dim_ff']
|
155 |
+
model = Transformer(d_model=d_model, nhead=nhead, num_encoder_layers=num_encoder_layers, num_decoder_layers=num_decoder_layers,
|
156 |
+
dim_feedforward=dim_ff,vocab=v, dropout=dropout, device=device).to(device)
|
157 |
+
ckpt = torch.load(hydra.utils.get_original_cwd() + cfg['model']['ckpt'], map_location=device)
|
158 |
+
model.load_state_dict(ckpt['model_state_dict'])
|
159 |
+
|
160 |
+
# make dataset
|
161 |
+
src = []
|
162 |
+
src_test_path = hydra.utils.get_original_cwd() + cfg['translate']['src_test_path']
|
163 |
+
with open(src_test_path,'r') as f:
|
164 |
+
for line in f:
|
165 |
+
src.append(line.rstrip().split(' '))
|
166 |
+
src = src_transforms(src).to(device)
|
167 |
+
|
168 |
+
rsmis =[]
|
169 |
+
for i, input_tokens in enumerate(src):
|
170 |
+
outputs = beam_decode(cfg=cfg, v=v, model=model, input_tokens=input_tokens)
|
171 |
+
input_tokens = input_tokens.tolist()
|
172 |
+
input_smi = input_tokens[0:input_tokens.index(v['<pad>'])]
|
173 |
+
input_smi = v.lookup_tokens(input_smi)
|
174 |
+
input_smi = ' '.join(input_smi)
|
175 |
+
for output in outputs:
|
176 |
+
rsmis.append(input_smi + ' >> ' + output)
|
177 |
+
|
178 |
+
out_dir = cfg['translate']['out_dir']
|
179 |
+
filename = cfg['translate']['filename']
|
180 |
+
|
181 |
+
# set output file name
|
182 |
+
os.makedirs(hydra.utils.get_original_cwd() + out_dir, exist_ok=True)
|
183 |
+
with open(hydra.utils.get_original_cwd() + f'{out_dir}/out_beam{beam_width}_best{nbest}_file_{filename}.txt','w') as f:
|
184 |
+
for rsmi in rsmis:
|
185 |
+
f.write(rsmi + '\n')
|
186 |
+
f.close()
|
187 |
+
|
188 |
+
@hydra.main(config_path=None, config_name='config', version_base=None)
|
189 |
+
def main(cfg: DictConfig):
|
190 |
+
translation(cfg)
|
191 |
+
|
192 |
+
if __name__ == '__main__':
|
193 |
+
main()
|
data/set_up.sh
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env bash
|
2 |
+
# You should source this script to get the correct additions to Python path
|
3 |
+
|
4 |
+
# Directory of script
|
5 |
+
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
6 |
+
|
7 |
+
# Set up the python paths
|
8 |
+
export PYTHONPATH=${PYTHONPATH}:${DIR}/
|
9 |
+
export PYTHONPATH=${PYTHONPATH}:${DIR}/Model/
|
data/translation/out_beam10_best10.txt
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[749] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C C ( C ) ( O ) c 2 c c n c c 2 ) C C N ( C ) C 1
|
2 |
+
[749] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C C ( C ) ( O ) c 2 c c c n c 2 ) C C N ( C ) C 1
|
3 |
+
[749] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C C ( C ) ( O ) c 2 c c n c n 2 ) C C N ( C ) C 1
|
4 |
+
[749] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C C ( O ) c 2 c c n c c 2 ) C C N ( C ) C 1
|
5 |
+
[749] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C C ( C ) ( O ) c 2 c n c n c 2 ) C C N ( C ) C 1
|
6 |
+
[749] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C C ( C ) ( O ) c 2 c n c c n 2 ) C C N ( C ) C 1
|
7 |
+
[749] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C O c 1 c c c ( C ( O ) C n 2 c 3 c ( c 4 c c ( C ) c c c 4 2 ) C N ( C ) C C 3 ) c n 1
|
8 |
+
[749] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C C ( C ) ( O ) c 2 c n n ( C ) c 2 ) C C N ( C ) C 1
|
9 |
+
[749] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C C ( C ) ( O ) c 2 c c c ( C ) n c 2 ) C C N ( C ) C 1
|
10 |
+
[749] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C C ( C ) ( O ) c 2 c c c c n 2 ) C C N ( C ) C 1
|
11 |
+
[987] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C C ( = C n 1 c 2 c ( c 3 c c ( C ) c c c 3 1 ) C N ( C ) C C 2 ) c 1 c c c ( F ) c ( F ) c 1
|
12 |
+
[987] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C C ( = C n 1 c 2 c ( c 3 c c ( C ) c c c 3 1 ) C N ( C ) C C 2 ) c 1 c c c ( F ) c c 1
|
13 |
+
[987] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C C ( = C n 1 c 2 c ( c 3 c c ( C ) c c c 3 1 ) C N ( C ) C C 2 ) c 1 c c c ( C ) n c 1
|
14 |
+
[987] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C O c 1 c c c ( C ( C ) = C n 2 c 3 c ( c 4 c c ( C ) c c c 4 2 ) C N ( C ) C C 3 ) c c 1
|
15 |
+
[987] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C C ( = C n 1 c 2 c ( c 3 c c ( C ) c c c 3 1 ) C N ( C ) C C 2 ) c 1 c c c ( Cl ) c ( Cl ) c 1
|
16 |
+
[987] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C C ( = C n 1 c 2 c ( c 3 c c ( C ) c c c 3 1 ) C N ( C ) C C 2 ) c 1 c c c c ( F ) c 1
|
17 |
+
[987] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C C ( = C n 1 c 2 c ( c 3 c c ( C ) c c c 3 1 ) C N ( C ) C C 2 ) c 1 c c n c c 1
|
18 |
+
[987] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C C ( = C n 1 c 2 c ( c 3 c c ( C ) c c c 3 1 ) C N ( C ) C C 2 ) c 1 c c c ( F ) c c 1 F
|
19 |
+
[987] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C C ( = C n 1 c 2 c ( c 3 c c ( C ) c c c 3 1 ) C N ( C ) C C 2 ) c 1 c c c c c 1 F
|
20 |
+
[987] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C C ( = C n 1 c 2 c ( c 3 c c ( C ) c c c 3 1 ) C N ( C ) C C 2 ) c 1 c c c ( Cl ) c c 1
|
21 |
+
[986] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C C c 2 c c c ( C ( F ) ( F ) F ) n c 2 ) C C N ( C ) C 1
|
22 |
+
[986] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C C c 2 c c c ( C ) n c 2 ) C C N ( C ) C 1
|
23 |
+
[986] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C C c 2 c n c 3 c c c c c 3 c 2 ) C C N ( C ) C 1
|
24 |
+
[986] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C C c 2 c n c c ( F ) c 2 ) C C N ( C ) C 1
|
25 |
+
[986] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C C c 1 c c c ( C C n 2 c 3 c ( c 4 c c ( C ) c c c 4 2 ) C N ( C ) C C 3 ) c n 1
|
26 |
+
[986] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C C c 2 c c c ( N ( C ) C ) n c 2 ) C C N ( C ) C 1
|
27 |
+
[986] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C C c 2 c n c c c 2 C ( F ) ( F ) F ) C C N ( C ) C 1
|
28 |
+
[986] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C C c 2 c n c ( C ) c c 2 C ( F ) ( F ) F ) C C N ( C ) C 1
|
29 |
+
[986] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C C c 2 c n c c ( Br ) c 2 ) C C N ( C ) C 1
|
30 |
+
[986] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C C c 2 c c c ( = O ) [nH] c 2 ) C C N ( C ) C 1
|
31 |
+
[349] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 - c 2 c c c 3 n c c c c 3 c 2 ) C C N ( C ) C 1
|
32 |
+
[349] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 - c 2 c c c 3 c c n c c 3 c 2 ) C C N ( C ) C 1
|
33 |
+
[349] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 - c 2 c c c c c 2 ) C C N ( C ) C 1
|
34 |
+
[349] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 - c 2 c c c c ( Br ) c 2 ) C C N ( C ) C 1
|
35 |
+
[349] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c ( - n 2 c 3 c ( c 4 c c ( C ) c c c 4 2 ) C N ( C ) C C 3 ) c c 1
|
36 |
+
[349] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 - c 2 c c c 3 [nH] c c c 3 c 2 ) C C N ( C ) C 1
|
37 |
+
[349] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 - c 2 c c c 3 c n c c c 3 c 2 ) C C N ( C ) C 1
|
38 |
+
[349] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 - c 2 c c c 3 n c n c c 3 c 2 ) C C N ( C ) C 1
|
39 |
+
[349] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 - c 2 c c c 3 c n n c c 3 c 2 ) C C N ( C ) C 1
|
40 |
+
[349] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C O c 1 c c c ( - n 2 c 3 c ( c 4 c c ( C ) c c c 4 2 ) C N ( C ) C C 3 ) c c 1
|
41 |
+
[556] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C c 2 c c c c c 2 ) C C N ( C ) C 1
|
42 |
+
[556] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C c 2 c c c c c 2 Cl ) C C N ( C ) C 1
|
43 |
+
[556] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C c 2 c c c ( Cl ) c c 2 ) C C N ( C ) C 1
|
44 |
+
[556] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C c 2 c c n c c 2 ) C C N ( C ) C 1
|
45 |
+
[556] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C O c 1 c c c ( C n 2 c 3 c ( c 4 c c ( C ) c c c 4 2 ) C N ( C ) C C 3 ) c c 1
|
46 |
+
[556] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C c 2 c c c ( F ) c c 2 ) C C N ( C ) C 1
|
47 |
+
[556] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C c 2 c c c c c 2 C ( F ) ( F ) F ) C C N ( C ) C 1
|
48 |
+
[556] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C c 2 c c c c c 2 F ) C C N ( C ) C 1
|
49 |
+
[556] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C c 2 c c c c ( Cl ) c 2 ) C C N ( C ) C 1
|
50 |
+
[556] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C c 2 c ( F ) c c c c 2 F ) C C N ( C ) C 1
|
51 |
+
[648] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 S ( = O ) ( = O ) c 2 c c c c c 2 ) C C N ( C ) C 1
|
52 |
+
[648] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 S ( = O ) ( = O ) c 2 c c c ( Cl ) c c 2 ) C C N ( C ) C 1
|
53 |
+
[648] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 S ( = O ) ( = O ) c 2 c c c c ( Cl ) c 2 ) C C N ( C ) C 1
|
54 |
+
[648] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 S ( = O ) ( = O ) c 2 c c c ( F ) c c 2 ) C C N ( C ) C 1
|
55 |
+
[648] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 S ( = O ) ( = O ) c 2 c c c ( C ) c c 2 ) C C N ( C ) C 1
|
56 |
+
[648] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 S ( = O ) ( = O ) c 2 c c c c c 2 Cl ) C C N ( C ) C 1
|
57 |
+
[648] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 S ( = O ) ( = O ) c 2 c c c c 3 c c c c c 2 3 ) C C N ( C ) C 1
|
58 |
+
[648] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 S ( = O ) ( = O ) c 2 c c c c c 2 F ) C C N ( C ) C 1
|
59 |
+
[648] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C O c 1 c c c ( S ( = O ) ( = O ) n 2 c 3 c ( c 4 c c ( C ) c c c 4 2 ) C N ( C ) C C 3 ) c c 1
|
60 |
+
[648] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 S ( = O ) ( = O ) c 2 c c c 3 c c n c c 3 c 2 ) C C N ( C ) C 1
|
61 |
+
[302] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C C n 1 c 2 c ( c 3 c c ( C ) c c c 3 1 ) C N ( C ) C C 2
|
62 |
+
[302] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C C C n 1 c 2 c ( c 3 c c ( C ) c c c 3 1 ) C N ( C ) C C 2
|
63 |
+
[302] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C C ( C ) C ) C C N ( C ) C 1
|
64 |
+
[302] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C C C C n 1 c 2 c ( c 3 c c ( C ) c c c 3 1 ) C N ( C ) C C 2
|
65 |
+
[302] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C C C ( F ) ( F ) F ) C C N ( C ) C 1
|
66 |
+
[302] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C C ( C ) ( C ) C ) C C N ( C ) C 1
|
67 |
+
[302] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C C c 2 c c c c c 2 ) C C N ( C ) C 1
|
68 |
+
[302] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C C C ( C ) C ) C C N ( C ) C 1
|
69 |
+
[302] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C C C C C n 1 c 2 c ( c 3 c c ( C ) c c c 3 1 ) C N ( C ) C C 2
|
70 |
+
[302] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C O C C n 1 c 2 c ( c 3 c c ( C ) c c c 3 1 ) C N ( C ) C C 2
|
71 |
+
[191] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C ) C C N ( C ) C 1
|
72 |
+
[232] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C ( = O ) c 2 c c c c c 2 ) C C N ( C ) C 1
|
73 |
+
[232] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C ( = O ) c 2 c c c ( Cl ) c c 2 ) C C N ( C ) C 1
|
74 |
+
[232] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C ( = O ) c 2 c c c ( F ) c c 2 ) C C N ( C ) C 1
|
75 |
+
[232] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c ( C ( = O ) n 2 c 3 c ( c 4 c c ( C ) c c c 4 2 ) C N ( C ) C C 3 ) c c 1
|
76 |
+
[232] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C ( = O ) c 2 c ( Cl ) c c c c 2 Cl ) C C N ( C ) C 1
|
77 |
+
[232] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C ( = O ) c 2 c c n c c 2 ) C C N ( C ) C 1
|
78 |
+
[232] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C ( = O ) c 2 c c ( Cl ) c c ( Cl ) c 2 ) C C N ( C ) C 1
|
79 |
+
[232] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C ( = O ) c 2 c c c ( C ) c c 2 ) C C N ( C ) C 1
|
80 |
+
[232] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C ( = O ) c 2 c c c ( Br ) c c 2 ) C C N ( C ) C 1
|
81 |
+
[232] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C ( = O ) c 2 c c c c c 2 Cl ) C C N ( C ) C 1
|
82 |
+
[528] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C c 2 c c c c c 2 ) C C N ( C ) C 1
|
83 |
+
[528] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C c 2 c c c c ( Br ) c 2 ) C C N ( C ) C 1
|
84 |
+
[528] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C c 2 c c c ( F ) c c 2 ) C C N ( C ) C 1
|
85 |
+
[528] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C c 2 c c c c c 2 C # N ) C C N ( C ) C 1
|
86 |
+
[528] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C c 2 c c c c c 2 Cl ) C C N ( C ) C 1
|
87 |
+
[528] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C c 2 c c c c c 2 F ) C C N ( C ) C 1
|
88 |
+
[528] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C c 2 c c c c ( F ) c 2 ) C C N ( C ) C 1
|
89 |
+
[528] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C c 2 c c c ( Br ) c c 2 ) C C N ( C ) C 1
|
90 |
+
[528] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C c 2 c c c c c 2 Br ) C C N ( C ) C 1
|
91 |
+
[528] C c 1 c c c 2 [nH] c 3 c ( c 2 c 1 ) C N ( C ) C C 3 >> C c 1 c c c 2 c ( c 1 ) c 1 c ( n 2 C c 2 c c c ( Cl ) c c 2 ) C C N ( C ) C 1
|
data/translation/viewer.ipynb
ADDED
The diff for this file is too large to render.
See raw diff
|
|