Migrated from GitHub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- data/LICENSE +21 -0
- data/configs/protein_localization/eval_full_prot.json +11 -0
- data/configs/protein_localization/full_prot_comp_pred.json +55 -0
- data/data/dataset.json +3 -0
- data/data/mmseqs_row2cluster_30seq_80cov.p +0 -0
- data/environment.yml +133 -0
- data/notebook/Analysis.ipynb +0 -0
- data/protgps/__init__.py +51 -0
- data/protgps/callbacks/__init__.py +0 -0
- data/protgps/callbacks/basic.py +26 -0
- data/protgps/callbacks/swa.py +55 -0
- data/protgps/datasets/__init__.py +0 -0
- data/protgps/datasets/abstract.py +271 -0
- data/protgps/datasets/disprot.py +169 -0
- data/protgps/datasets/protein_compartments.py +412 -0
- data/protgps/datasets/reverse_homology.py +216 -0
- data/protgps/learning/losses/__init__.py +0 -0
- data/protgps/learning/losses/basic.py +156 -0
- data/protgps/learning/metrics/__init__.py +0 -0
- data/protgps/learning/metrics/basic.py +359 -0
- data/protgps/learning/optimizers/__init__.py +0 -0
- data/protgps/learning/optimizers/basic.py +29 -0
- data/protgps/learning/schedulers/__init__.py +0 -0
- data/protgps/learning/schedulers/basic.py +53 -0
- data/protgps/learning/searchers/__init__.py +0 -0
- data/protgps/learning/searchers/basic.py +16 -0
- data/protgps/learning/utils.py +5 -0
- data/protgps/lightning/__init__.py +0 -0
- data/protgps/lightning/base.py +455 -0
- data/protgps/loggers/__init__.py +0 -0
- data/protgps/loggers/comet.py +25 -0
- data/protgps/loggers/tensorboard.py +25 -0
- data/protgps/loggers/wandb.py +25 -0
- data/protgps/models/__init__.py +0 -0
- data/protgps/models/abstract.py +13 -0
- data/protgps/models/classifier.py +123 -0
- data/protgps/models/fair_esm.py +585 -0
- data/protgps/utils/__init__.py +0 -0
- data/protgps/utils/callbacks.py +57 -0
- data/protgps/utils/classes.py +126 -0
- data/protgps/utils/debug.py +13 -0
- data/protgps/utils/download.py +37 -0
- data/protgps/utils/loading.py +235 -0
- data/protgps/utils/messages.py +3 -0
- data/protgps/utils/parsing.py +597 -0
- data/protgps/utils/registry.py +44 -0
- data/protgps/utils/sampler.py +100 -0
- data/pyproject.toml +25 -0
- data/scripts/dispatcher.py +146 -0
.gitattributes
CHANGED
@@ -57,3 +57,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
57 |
# Video files - compressed
|
58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
|
|
|
57 |
# Video files - compressed
|
58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
60 |
+
data/data/dataset.json filter=lfs diff=lfs merge=lfs -text
|
data/LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2024 Peter Mikhael
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
data/configs/protein_localization/eval_full_prot.json
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"script": "main",
|
3 |
+
"train_config": "configs/protein_localization/full_prot_comp_pred.json",
|
4 |
+
"log_dir": "protgps/logs/",
|
5 |
+
"available_gpus": ["0,1,2,3,4,5,6,7"],
|
6 |
+
"eval_args": {
|
7 |
+
"test": [true],
|
8 |
+
"from_checkpoint": [true],
|
9 |
+
"checkpoint_path": ["checkpoints/prtogps/32bf44b16a4e770a674896b81dfb3729epoch=26.ckpt"]
|
10 |
+
}
|
11 |
+
}
|
data/configs/protein_localization/full_prot_comp_pred.json
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"script": "main",
|
3 |
+
"paired_hyperparams": {
|
4 |
+
"dataset_name": ["protein_condensates_combined"],
|
5 |
+
"protein_encoder_type": ["fair_esm_fast"]
|
6 |
+
},
|
7 |
+
"cartesian_hyperparams": {
|
8 |
+
"batch_size": [10],
|
9 |
+
"freeze_encoder": [false],
|
10 |
+
"freeze_esm": [false],
|
11 |
+
"logger_tags": ["full_prot_pred"],
|
12 |
+
"max_prot_len": [1800],
|
13 |
+
"accumulate_grad_batches": [1],
|
14 |
+
"precision": [16],
|
15 |
+
"dataset_file_path": ["data/dataset.json"],
|
16 |
+
"assign_splits": [true],
|
17 |
+
"split_seed": [0],
|
18 |
+
"split_probs": ["0.7 0.15 0.15"],
|
19 |
+
"lightning_name": ["base"],
|
20 |
+
"weight_decay": [0],
|
21 |
+
"momentum": [0.9],
|
22 |
+
"max_epochs": [30],
|
23 |
+
"lr_decay": [0.91],
|
24 |
+
"scheduler_name": ["reduce_on_plateau"],
|
25 |
+
"lr": [1e-3],
|
26 |
+
"model_name": ["protein_encoder"],
|
27 |
+
"pretrained_hub_dir": ["checkpoints/esm2"],
|
28 |
+
"output_residue_hiddens": [true],
|
29 |
+
"esm_hidden_layer": [6],
|
30 |
+
"esm_name": ["esm2_t6_8M_UR50D"],
|
31 |
+
"protein_hidden_dim" : [320],
|
32 |
+
"mlp_layer_configuration": ["512 512"],
|
33 |
+
"mlp_use_batch_norm": [true],
|
34 |
+
"loss_names": ["binary_cross_entropy"],
|
35 |
+
"metric_names": ["multilabel_classification"],
|
36 |
+
"checkpoint_dir": ["checkpoints/protgps"],
|
37 |
+
"checkpoint_save_last": [true],
|
38 |
+
"monitor": ["val_roc_auc"],
|
39 |
+
"num_workers": [30],
|
40 |
+
"optimizer_name": ["adam"],
|
41 |
+
"patience": [5],
|
42 |
+
"train": [true],
|
43 |
+
"dev": [true],
|
44 |
+
"test": [true],
|
45 |
+
"callback_names": ["checkpointer lr_monitor"],
|
46 |
+
"num_sanity_val_steps": [0],
|
47 |
+
"profiler": ["simple"],
|
48 |
+
"logger_name": ["tensorboard"],
|
49 |
+
"gpus": [1],
|
50 |
+
"val_check_interval": [1.0],
|
51 |
+
"ignore_warnings": [false],
|
52 |
+
"dropout": [0.1]
|
53 |
+
},
|
54 |
+
"available_gpus": ["0"]
|
55 |
+
}
|
data/data/dataset.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:02c2cf66a58f5ed3a30650da9d9d9b71c2b0700005e93aac48c7ef7bc9f05d60
|
3 |
+
size 13875542
|
data/data/mmseqs_row2cluster_30seq_80cov.p
ADDED
Binary file (32.4 kB). View file
|
|
data/environment.yml
ADDED
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: protgps
|
2 |
+
channels:
|
3 |
+
- pytorch
|
4 |
+
- nvidia
|
5 |
+
- conda-forge
|
6 |
+
- defaults
|
7 |
+
dependencies:
|
8 |
+
- _libgcc_mutex=0.1=conda_forge
|
9 |
+
- _openmp_mutex=4.5=2_gnu
|
10 |
+
- blas=2.16=mkl
|
11 |
+
- bzip2=1.0.8=hd590300_5
|
12 |
+
- ca-certificates=2024.2.2=hbcca054_0
|
13 |
+
- captum==0.7.0
|
14 |
+
- cuda-cudart=11.7.99=0
|
15 |
+
- cuda-cupti=11.7.101=0
|
16 |
+
- cuda-libraries=11.7.1=0
|
17 |
+
- cuda-nvrtc=11.7.99=0
|
18 |
+
- cuda-nvtx=11.7.91=0
|
19 |
+
- cuda-runtime=11.7.1=0
|
20 |
+
- filelock=3.13.1=pyhd8ed1ab_0
|
21 |
+
- gmp=6.3.0=h59595ed_0
|
22 |
+
- gmpy2=2.1.2=py38h793c122_1
|
23 |
+
- intel-openmp=2023.1.0=hdb19cb5_46306
|
24 |
+
- jinja2=3.1.3=pyhd8ed1ab_0
|
25 |
+
- ld_impl_linux-64=2.40=h41732ed_0
|
26 |
+
- libblas=3.8.0=16_mkl
|
27 |
+
- libcblas=3.8.0=16_mkl
|
28 |
+
- libcublas=11.10.3.66=0
|
29 |
+
- libcufft=10.7.2.124=h4fbf590_0
|
30 |
+
- libcufile=1.8.1.2=0
|
31 |
+
- libcurand=10.3.4.107=0
|
32 |
+
- libcusolver=11.4.0.1=0
|
33 |
+
- libcusparse=11.7.4.91=0
|
34 |
+
- libffi=3.4.2=h7f98852_5
|
35 |
+
- libgcc-ng=13.2.0=h807b86a_5
|
36 |
+
- libgfortran-ng=7.5.0=h14aa051_20
|
37 |
+
- libgfortran4=7.5.0=h14aa051_20
|
38 |
+
- libgomp=13.2.0=h807b86a_5
|
39 |
+
- liblapack=3.8.0=16_mkl
|
40 |
+
- liblapacke=3.8.0=16_mkl
|
41 |
+
- libnpp=11.7.4.75=0
|
42 |
+
- libnsl=2.0.1=hd590300_0
|
43 |
+
- libnvjpeg=11.8.0.2=0
|
44 |
+
- libsqlite=3.45.1=h2797004_0
|
45 |
+
- libstdcxx-ng=13.2.0=h7e041cc_5
|
46 |
+
- libuuid=2.38.1=h0b41bf4_0
|
47 |
+
- libzlib=1.2.13=hd590300_5
|
48 |
+
- markupsafe=2.1.5=py38h01eb140_0
|
49 |
+
- mkl=2020.2=256
|
50 |
+
- mpc=1.3.1=hfe3b2da_0
|
51 |
+
- mpfr=4.2.1=h9458935_0
|
52 |
+
- mpmath=1.3.0=pyhd8ed1ab_0
|
53 |
+
- ncurses=6.4=h59595ed_2
|
54 |
+
- networkx=3.1=pyhd8ed1ab_0
|
55 |
+
- openssl=3.2.1=hd590300_0
|
56 |
+
- pip=24.0=pyhd8ed1ab_0
|
57 |
+
- python=3.8.15=he550d4f_1_cpython
|
58 |
+
- python_abi=3.8=4_cp38
|
59 |
+
- pytorch=2.0.0=py3.8_cuda11.7_cudnn8.5.0_0
|
60 |
+
- pytorch-cuda=11.7=h778d358_5
|
61 |
+
- pytorch-mutex=1.0=cuda
|
62 |
+
- readline=8.2=h8228510_1
|
63 |
+
- setuptools=69.1.0=pyhd8ed1ab_1
|
64 |
+
- sympy=1.12=pypyh9d50eac_103
|
65 |
+
- torchtriton=2.0.0=py38
|
66 |
+
- typing_extensions=4.9.0=pyha770c72_0
|
67 |
+
- wheel=0.42.0=pyhd8ed1ab_0
|
68 |
+
- xz=5.2.6=h166bdaf_0
|
69 |
+
- zlib=1.2.13=hd590300_5
|
70 |
+
- pip:
|
71 |
+
- absl-py==2.1.0
|
72 |
+
- cachetools==5.3.2
|
73 |
+
- certifi==2024.2.2
|
74 |
+
- charset-normalizer==3.3.2
|
75 |
+
- click==8.1.7
|
76 |
+
- comet-ml==3.28.1
|
77 |
+
- configobj==5.0.8
|
78 |
+
- docker-pycreds==0.4.0
|
79 |
+
- dulwich==0.21.7
|
80 |
+
- everett==3.3.0
|
81 |
+
- gitdb==4.0.11
|
82 |
+
- gitpython==3.1.42
|
83 |
+
- google-auth==2.28.1
|
84 |
+
- google-auth-oauthlib==1.0.0
|
85 |
+
- grpcio==1.62.0
|
86 |
+
- idna==3.6
|
87 |
+
- importlib-metadata==7.0.1
|
88 |
+
- importlib-resources==6.1.1
|
89 |
+
- joblib==1.3.2
|
90 |
+
- markdown==3.5.2
|
91 |
+
- numpy==1.23.4
|
92 |
+
- nvidia-ml-py3==7.352.0
|
93 |
+
- oauthlib==3.2.2
|
94 |
+
- openpyxl==3.1.2
|
95 |
+
- packaging==23.2
|
96 |
+
- pandas==2.0.3
|
97 |
+
- pathtools==0.1.2
|
98 |
+
- pillow==10.2.0
|
99 |
+
- pkgutil-resolve-name==1.3.10
|
100 |
+
- promise==2.3
|
101 |
+
- protobuf==3.20.1
|
102 |
+
- protpy==1.2.1
|
103 |
+
- psutil==5.9.8
|
104 |
+
- pyasn1==0.5.1
|
105 |
+
- pyasn1-modules==0.3.0
|
106 |
+
- pydeprecate==0.3.2
|
107 |
+
- python-dateutil==2.8.2
|
108 |
+
- pytorch-lightning==1.6.4
|
109 |
+
- pytz==2024.1
|
110 |
+
- pyyaml==6.0.1
|
111 |
+
- rdkit==2022.9.5
|
112 |
+
- requests-oauthlib==1.3.1
|
113 |
+
- requests-toolbelt==1.0.0
|
114 |
+
- rsa==4.9
|
115 |
+
- scikit-learn==1.0.1
|
116 |
+
- scipy==1.10.1
|
117 |
+
- semantic-version==2.10.0
|
118 |
+
- sentry-sdk==1.40.5
|
119 |
+
- setproctitle==1.3.3
|
120 |
+
- shortuuid==1.0.11
|
121 |
+
- six==1.16.0
|
122 |
+
- smmap==5.0.1
|
123 |
+
- tensorboard==2.14.0
|
124 |
+
- tensorboard-data-server==0.7.2
|
125 |
+
- threadpoolctl==3.3.0
|
126 |
+
- tqdm==4.62.3
|
127 |
+
- urllib3==2.2.1
|
128 |
+
- wandb==0.12.19
|
129 |
+
- websocket-client==1.7.0
|
130 |
+
- werkzeug==3.0.1
|
131 |
+
- wrapt==1.16.0
|
132 |
+
- wurlitzer==3.0.3
|
133 |
+
- zipp==3.17.0
|
data/notebook/Analysis.ipynb
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/protgps/__init__.py
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# type: ignore
|
2 |
+
|
3 |
+
import sys
|
4 |
+
|
5 |
+
if sys.version_info[:2] >= (3, 8):
|
6 |
+
# TODO: Import directly (no need for conditional) when `python_requires = >= 3.8`
|
7 |
+
from importlib.metadata import PackageNotFoundError, version # pragma: no cover
|
8 |
+
else:
|
9 |
+
from importlib_metadata import PackageNotFoundError, version # pragma: no cover
|
10 |
+
|
11 |
+
try:
|
12 |
+
# Change here if project is renamed and does not equal the package name
|
13 |
+
dist_name = __name__
|
14 |
+
__version__ = version(dist_name)
|
15 |
+
except PackageNotFoundError: # pragma: no cover
|
16 |
+
__version__ = "unknown"
|
17 |
+
finally:
|
18 |
+
del version, PackageNotFoundError
|
19 |
+
|
20 |
+
|
21 |
+
# data
|
22 |
+
import protgps.datasets.protein_compartments
|
23 |
+
import protgps.datasets.reverse_homology
|
24 |
+
|
25 |
+
# lightning
|
26 |
+
import protgps.lightning.base
|
27 |
+
|
28 |
+
# optimizers
|
29 |
+
import protgps.learning.optimizers.basic
|
30 |
+
|
31 |
+
# scheduler
|
32 |
+
import protgps.learning.schedulers.basic
|
33 |
+
|
34 |
+
# losses
|
35 |
+
import protgps.learning.losses.basic
|
36 |
+
|
37 |
+
# metrics
|
38 |
+
import protgps.learning.metrics.basic
|
39 |
+
|
40 |
+
# callbacks
|
41 |
+
import protgps.callbacks.basic
|
42 |
+
import protgps.callbacks.swa
|
43 |
+
|
44 |
+
# models
|
45 |
+
import protgps.models.classifier
|
46 |
+
import protgps.models.fair_esm
|
47 |
+
|
48 |
+
# comet
|
49 |
+
import protgps.loggers.comet
|
50 |
+
import protgps.loggers.wandb
|
51 |
+
import protgps.loggers.tensorboard
|
data/protgps/callbacks/__init__.py
ADDED
File without changes
|
data/protgps/callbacks/basic.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from protgps.utils.registry import register_object
|
3 |
+
from protgps.utils.classes import ProtGPS
|
4 |
+
from pytorch_lightning.callbacks import ModelCheckpoint, LearningRateMonitor
|
5 |
+
|
6 |
+
# TODO: add args for various callbacks -- currently hardcoded
|
7 |
+
|
8 |
+
|
9 |
+
@register_object("checkpointer", "callback")
|
10 |
+
class Checkpoint(ModelCheckpoint, ProtGPS):
|
11 |
+
def __init__(self, args) -> None:
|
12 |
+
super().__init__(
|
13 |
+
monitor=args.monitor,
|
14 |
+
dirpath=os.path.join(args.checkpoint_dir, args.experiment_name),
|
15 |
+
mode="min" if "loss" in args.monitor else "max",
|
16 |
+
filename="{}".format(args.experiment_name) + "{epoch}",
|
17 |
+
every_n_epochs=1,
|
18 |
+
save_top_k=args.checkpoint_save_top_k,
|
19 |
+
save_last=args.checkpoint_save_last,
|
20 |
+
)
|
21 |
+
|
22 |
+
|
23 |
+
@register_object("lr_monitor", "callback")
|
24 |
+
class LRMonitor(LearningRateMonitor, ProtGPS):
|
25 |
+
def __init__(self, args) -> None:
|
26 |
+
super().__init__(logging_interval="step")
|
data/protgps/callbacks/swa.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from protgps.utils.registry import register_object
|
2 |
+
from pytorch_lightning.callbacks import StochasticWeightAveraging
|
3 |
+
from protgps.utils.classes import ProtGPS
|
4 |
+
|
5 |
+
|
6 |
+
@register_object("swa", "callback")
|
7 |
+
class SWA(StochasticWeightAveraging, ProtGPS):
|
8 |
+
def __init__(self, args) -> None:
|
9 |
+
if "." in args.swa_epoch:
|
10 |
+
swa_epoch = float(args.swa_epoch)
|
11 |
+
else:
|
12 |
+
swa_epoch = int(args.swa_epoch)
|
13 |
+
|
14 |
+
super().__init__(
|
15 |
+
swa_epoch_start=swa_epoch,
|
16 |
+
swa_lrs=args.swa_lr,
|
17 |
+
annealing_epochs=args.swa_annealing_epochs,
|
18 |
+
annealing_strategy=args.swa_annealing_strategy,
|
19 |
+
avg_fn=None,
|
20 |
+
)
|
21 |
+
|
22 |
+
@staticmethod
|
23 |
+
def add_args(parser) -> None:
|
24 |
+
"""Add class specific args
|
25 |
+
|
26 |
+
Args:
|
27 |
+
parser (argparse.ArgumentParser): argument parser
|
28 |
+
"""
|
29 |
+
# stochastic weight averaging
|
30 |
+
parser.add_argument(
|
31 |
+
"--swa_epoch",
|
32 |
+
type=str,
|
33 |
+
default="0.8",
|
34 |
+
help="when to start swa",
|
35 |
+
)
|
36 |
+
|
37 |
+
parser.add_argument(
|
38 |
+
"--swa_lr",
|
39 |
+
type=float,
|
40 |
+
default=None,
|
41 |
+
help="lr for swa. None will use existing lr",
|
42 |
+
)
|
43 |
+
parser.add_argument(
|
44 |
+
"--swa_annealing_epochs",
|
45 |
+
type=int,
|
46 |
+
default=10,
|
47 |
+
help="number of epochs in the annealing phase",
|
48 |
+
)
|
49 |
+
parser.add_argument(
|
50 |
+
"--swa_annealing_strategy",
|
51 |
+
type=str,
|
52 |
+
choices=["cos", "linear"],
|
53 |
+
default="cos",
|
54 |
+
help="lr annealing strategy",
|
55 |
+
)
|
data/protgps/datasets/__init__.py
ADDED
File without changes
|
data/protgps/datasets/abstract.py
ADDED
@@ -0,0 +1,271 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import traceback, warnings
|
2 |
+
import argparse
|
3 |
+
from typing import List, Literal
|
4 |
+
from abc import ABCMeta, abstractmethod
|
5 |
+
import json
|
6 |
+
from collections import Counter
|
7 |
+
import numpy as np
|
8 |
+
from torch.utils import data
|
9 |
+
from protgps.utils.classes import ProtGPS, set_protgps_type, classproperty
|
10 |
+
from protgps.utils.messages import METAFILE_NOTFOUND_ERR, LOAD_FAIL_MSG
|
11 |
+
import pickle
|
12 |
+
|
13 |
+
|
14 |
+
class AbstractDataset(data.Dataset, ProtGPS):
|
15 |
+
def __init__(self, args: argparse.ArgumentParser, split_group: str) -> None:
|
16 |
+
"""
|
17 |
+
Abstract Dataset
|
18 |
+
params: args - config.
|
19 |
+
params: split_group - ['train'|'dev'|'test'].
|
20 |
+
|
21 |
+
constructs: standard pytorch Dataset obj, which can be fed in a DataLoader for batching
|
22 |
+
"""
|
23 |
+
__metaclass__ = ABCMeta
|
24 |
+
|
25 |
+
super(AbstractDataset, self).__init__()
|
26 |
+
|
27 |
+
self.split_group = split_group
|
28 |
+
self.args = args
|
29 |
+
|
30 |
+
self.init_class(args, split_group)
|
31 |
+
|
32 |
+
self.dataset = self.create_dataset(split_group)
|
33 |
+
if len(self.dataset) == 0:
|
34 |
+
return
|
35 |
+
|
36 |
+
self.set_sample_weights(args)
|
37 |
+
|
38 |
+
self.print_summary_statement(self.dataset, split_group)
|
39 |
+
|
40 |
+
def init_class(self, args: argparse.ArgumentParser, split_group: str) -> None:
|
41 |
+
"""Perform Class-Specific init methods
|
42 |
+
Default is to load JSON dataset
|
43 |
+
|
44 |
+
Args:
|
45 |
+
args (argparse.ArgumentParser)
|
46 |
+
split_group (str)
|
47 |
+
"""
|
48 |
+
self.load_dataset(args)
|
49 |
+
|
50 |
+
def load_dataset(self, args: argparse.ArgumentParser) -> None:
|
51 |
+
"""Loads dataset file
|
52 |
+
|
53 |
+
Args:
|
54 |
+
args (argparse.ArgumentParser)
|
55 |
+
|
56 |
+
Raises:
|
57 |
+
Exception: Unable to load
|
58 |
+
"""
|
59 |
+
try:
|
60 |
+
self.metadata_json = json.load(open(args.dataset_file_path, "r"))
|
61 |
+
except Exception as e:
|
62 |
+
raise Exception(METAFILE_NOTFOUND_ERR.format(args.dataset_file_path, e))
|
63 |
+
|
64 |
+
@abstractmethod
|
65 |
+
def create_dataset(
|
66 |
+
self, split_group: Literal["train", "dev", "test"]
|
67 |
+
) -> List[dict]:
|
68 |
+
"""
|
69 |
+
Creates the dataset of samples from json metadata file.
|
70 |
+
"""
|
71 |
+
pass
|
72 |
+
|
73 |
+
@abstractmethod
|
74 |
+
def skip_sample(self, sample) -> bool:
|
75 |
+
"""
|
76 |
+
Return True if sample should be skipped and not included in data
|
77 |
+
"""
|
78 |
+
return False
|
79 |
+
|
80 |
+
@abstractmethod
|
81 |
+
def check_label(self, sample) -> bool:
|
82 |
+
"""
|
83 |
+
Return True if the row contains a valid label for the task
|
84 |
+
"""
|
85 |
+
pass
|
86 |
+
|
87 |
+
@abstractmethod
|
88 |
+
def get_label(self, sample):
|
89 |
+
"""
|
90 |
+
Get task specific label for a given sample
|
91 |
+
"""
|
92 |
+
pass
|
93 |
+
|
94 |
+
@property
|
95 |
+
@abstractmethod
|
96 |
+
def SUMMARY_STATEMENT(self) -> None:
|
97 |
+
"""
|
98 |
+
Prints summary statement with dataset stats
|
99 |
+
"""
|
100 |
+
pass
|
101 |
+
|
102 |
+
def print_summary_statement(self, dataset, split_group):
|
103 |
+
statement = "{} DATASET CREATED FOR {}.\n{}".format(
|
104 |
+
split_group.upper(), self.args.dataset_name.upper(), self.SUMMARY_STATEMENT
|
105 |
+
)
|
106 |
+
print(statement)
|
107 |
+
|
108 |
+
def __len__(self) -> int:
|
109 |
+
return len(self.dataset)
|
110 |
+
|
111 |
+
@abstractmethod
|
112 |
+
def __getitem__(self, index):
|
113 |
+
"""
|
114 |
+
Fetch single sample from dataset
|
115 |
+
|
116 |
+
Args:
|
117 |
+
index (int): random index of sample from dataset
|
118 |
+
|
119 |
+
Returns:
|
120 |
+
sample (dict): a sample
|
121 |
+
"""
|
122 |
+
sample = self.dataset[index]
|
123 |
+
try:
|
124 |
+
return sample
|
125 |
+
except Exception:
|
126 |
+
warnings.warn(
|
127 |
+
LOAD_FAIL_MSG.format(sample["sample_id"], traceback.print_exc())
|
128 |
+
)
|
129 |
+
|
130 |
+
def assign_splits(self, metadata_json, split_probs, seed=0) -> None:
|
131 |
+
"""
|
132 |
+
Assign samples to data splits
|
133 |
+
|
134 |
+
Args:
|
135 |
+
metadata_json (dict): raw json dataset loaded
|
136 |
+
"""
|
137 |
+
np.random.seed(seed)
|
138 |
+
if self.args.split_type == "random":
|
139 |
+
for idx in range(len(metadata_json)):
|
140 |
+
if metadata_json[idx] is None:
|
141 |
+
continue
|
142 |
+
metadata_json[idx]["split"] = np.random.choice(
|
143 |
+
["train", "dev", "test"], p=split_probs
|
144 |
+
)
|
145 |
+
elif self.args.split_type == "mmseqs":
|
146 |
+
# mmseqs easy-cluster --min-seq-id 0.3 -c 0.8
|
147 |
+
# get all samples
|
148 |
+
to_split = {}
|
149 |
+
|
150 |
+
row2clust = pickle.load(
|
151 |
+
open(
|
152 |
+
"data/mmseqs_row2cluster_30seq_80cov.p",
|
153 |
+
"rb",
|
154 |
+
)
|
155 |
+
)
|
156 |
+
# rule id
|
157 |
+
clusters = list(row2clust.values())
|
158 |
+
clust2count = Counter(clusters)
|
159 |
+
samples = sorted(list(set(clusters)))
|
160 |
+
np.random.shuffle(samples)
|
161 |
+
samples_cumsum = np.cumsum([clust2count[s] for s in samples])
|
162 |
+
# Find the indices for each quantile
|
163 |
+
split_indices = [
|
164 |
+
np.searchsorted(
|
165 |
+
samples_cumsum, np.round(q, 3) * samples_cumsum[-1], side="right"
|
166 |
+
)
|
167 |
+
for q in np.cumsum(split_probs)
|
168 |
+
]
|
169 |
+
split_indices[-1] = len(samples)
|
170 |
+
split_indices = np.concatenate([[0], split_indices])
|
171 |
+
for i in range(len(split_indices) - 1):
|
172 |
+
to_split.update(
|
173 |
+
{
|
174 |
+
sample: ["train", "dev", "test"][i]
|
175 |
+
for sample in samples[split_indices[i] : split_indices[i + 1]]
|
176 |
+
}
|
177 |
+
)
|
178 |
+
for idx in range(len(metadata_json)):
|
179 |
+
metadata_json[idx]["split"] = to_split[row2clust[idx]]
|
180 |
+
|
181 |
+
def set_sample_weights(self, args: argparse.ArgumentParser) -> None:
|
182 |
+
"""
|
183 |
+
Set weights for each sample
|
184 |
+
|
185 |
+
Args:
|
186 |
+
args (argparse.ArgumentParser)
|
187 |
+
"""
|
188 |
+
if args.class_bal:
|
189 |
+
label_dist = [str(d[args.class_bal_key]) for d in self.dataset]
|
190 |
+
label_counts = Counter(label_dist)
|
191 |
+
weight_per_label = 1.0 / len(label_counts)
|
192 |
+
label_weights = {
|
193 |
+
label: weight_per_label / count for label, count in label_counts.items()
|
194 |
+
}
|
195 |
+
|
196 |
+
print("Class counts are: {}".format(label_counts))
|
197 |
+
print("Label weights are {}".format(label_weights))
|
198 |
+
self.weights = [
|
199 |
+
label_weights[str(d[args.class_bal_key])] for d in self.dataset
|
200 |
+
]
|
201 |
+
else:
|
202 |
+
pass
|
203 |
+
|
204 |
+
@classproperty
|
205 |
+
def DATASET_ITEM_KEYS(cls) -> list:
|
206 |
+
"""
|
207 |
+
List of keys to be included in sample when being batched
|
208 |
+
|
209 |
+
Returns:
|
210 |
+
list
|
211 |
+
"""
|
212 |
+
standard = ["sample_id"]
|
213 |
+
return standard
|
214 |
+
|
215 |
+
@staticmethod
|
216 |
+
def add_args(parser) -> None:
|
217 |
+
"""Add class specific args
|
218 |
+
|
219 |
+
Args:
|
220 |
+
parser (argparse.ArgumentParser): argument parser
|
221 |
+
"""
|
222 |
+
parser.add_argument(
|
223 |
+
"--class_bal", action="store_true", default=False, help="class balance"
|
224 |
+
)
|
225 |
+
parser.add_argument(
|
226 |
+
"--class_bal_key",
|
227 |
+
type=str,
|
228 |
+
default="y",
|
229 |
+
help="dataset key to use for class balancing",
|
230 |
+
)
|
231 |
+
parser.add_argument(
|
232 |
+
"--dataset_file_path",
|
233 |
+
type=str,
|
234 |
+
default=None,
|
235 |
+
help="Path to dataset file",
|
236 |
+
)
|
237 |
+
parser.add_argument(
|
238 |
+
"--data_dir",
|
239 |
+
type=str,
|
240 |
+
default=None,
|
241 |
+
help="Path to dataset directory",
|
242 |
+
)
|
243 |
+
parser.add_argument(
|
244 |
+
"--num_classes", type=int, default=6, help="Number of classes to predict"
|
245 |
+
)
|
246 |
+
# Alternative training/testing schemes
|
247 |
+
parser.add_argument(
|
248 |
+
"--assign_splits",
|
249 |
+
action="store_true",
|
250 |
+
default=False,
|
251 |
+
help="Whether to assign different splits than those predetermined in dataset",
|
252 |
+
)
|
253 |
+
parser.add_argument(
|
254 |
+
"--split_type",
|
255 |
+
type=str,
|
256 |
+
default="random",
|
257 |
+
help="How to split dataset if assign_split = True..",
|
258 |
+
)
|
259 |
+
parser.add_argument(
|
260 |
+
"--split_probs",
|
261 |
+
type=float,
|
262 |
+
nargs="+",
|
263 |
+
default=[0.6, 0.2, 0.2],
|
264 |
+
help="Split probs for datasets without fixed train dev test. ",
|
265 |
+
)
|
266 |
+
parser.add_argument(
|
267 |
+
"--split_seed",
|
268 |
+
type=int,
|
269 |
+
default=0,
|
270 |
+
help="seed for consistent randomization",
|
271 |
+
)
|
data/protgps/datasets/disprot.py
ADDED
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset utils
|
2 |
+
from random import sample
|
3 |
+
import warnings
|
4 |
+
from typing import Literal, List
|
5 |
+
from protgps.datasets.abstract import AbstractDataset
|
6 |
+
from protgps.utils.registry import register_object, get_object
|
7 |
+
from protgps.utils.classes import set_protgps_type
|
8 |
+
from tqdm import tqdm
|
9 |
+
import argparse
|
10 |
+
import torch
|
11 |
+
|
12 |
+
|
13 |
+
@register_object("disprot", "dataset")
|
14 |
+
class Disprot(AbstractDataset):
|
15 |
+
"""A pytorch Dataset for the classifying protein intrinsically disordered regions from Disprot DB."""
|
16 |
+
|
17 |
+
def init_class(self, args: argparse.ArgumentParser, split_group: str) -> None:
|
18 |
+
"""Perform Class-Specific init methods
|
19 |
+
Default is to load JSON dataset
|
20 |
+
|
21 |
+
Args:
|
22 |
+
args (argparse.ArgumentParser)
|
23 |
+
split_group (str)
|
24 |
+
"""
|
25 |
+
self.load_dataset(args)
|
26 |
+
if args.assign_splits:
|
27 |
+
self.assign_splits(
|
28 |
+
self.metadata_json, split_probs=args.split_probs, seed=args.split_seed
|
29 |
+
)
|
30 |
+
|
31 |
+
if args.precomputed_protein_embeddings:
|
32 |
+
self.protein_encoder = get_object(self.args.protein_encoder_name, "model")(
|
33 |
+
args
|
34 |
+
).to("cuda")
|
35 |
+
self.protein_encoder.eval()
|
36 |
+
|
37 |
+
def skip_sample(self, sample, split_group) -> bool:
|
38 |
+
"""
|
39 |
+
Return True if sample should be skipped and not included in data
|
40 |
+
"""
|
41 |
+
if sample["split"] != split_group:
|
42 |
+
return True
|
43 |
+
return False
|
44 |
+
|
45 |
+
def create_dataset(
|
46 |
+
self, split_group: Literal["train", "dev", "test"]
|
47 |
+
) -> List[dict]:
|
48 |
+
|
49 |
+
sequences = []
|
50 |
+
dataset = []
|
51 |
+
for protein_dict in tqdm(self.metadata_json["data"]):
|
52 |
+
if self.skip_sample(protein_dict, split_group):
|
53 |
+
continue
|
54 |
+
|
55 |
+
item = {
|
56 |
+
"x": protein_dict["sequence"],
|
57 |
+
"y": self.get_label(sample_dict),
|
58 |
+
"sample_id": protein_dict["disprot_id"],
|
59 |
+
}
|
60 |
+
sequences.append(protein_dict["sequence"])
|
61 |
+
dataset.append(item)
|
62 |
+
|
63 |
+
|
64 |
+
if args.precomputed_protein_embeddings:
|
65 |
+
# this batches protein sequences and then converts to features
|
66 |
+
batch_size = 10
|
67 |
+
hiddens = []
|
68 |
+
for i in tqdm(range(0, len(ids), batch_size)):
|
69 |
+
preds = self.protein_encoder(sequences[i : i + batch_size])
|
70 |
+
hiddens.append( preds["hidden"].cpu() )
|
71 |
+
hiddens = torch.stack(hiddens)
|
72 |
+
|
73 |
+
for i, h in enumerate(hiddens):
|
74 |
+
dataset[i]["sequence"] = dataset[i]["x"]
|
75 |
+
dataset[i]["x"] = h
|
76 |
+
|
77 |
+
return dataset
|
78 |
+
|
79 |
+
def get_label(self, protein_dict):
|
80 |
+
"""
|
81 |
+
Get task specific label for a given sample
|
82 |
+
"""
|
83 |
+
y = torch.zeros(len(protein_dict["sequence"]))
|
84 |
+
for disordered_region in protein_dict["regions"]:
|
85 |
+
start = disordered_region["start"] - 1
|
86 |
+
end = disordered_region["end"]
|
87 |
+
y[start:end] = 1
|
88 |
+
return y
|
89 |
+
|
90 |
+
def __getitem__(self, index):
|
91 |
+
try:
|
92 |
+
return self.dataset[index]
|
93 |
+
|
94 |
+
except Exception:
|
95 |
+
warnings.warn("Could not load sample")
|
96 |
+
|
97 |
+
@property
|
98 |
+
def SUMMARY_STATEMENT(self) -> None:
|
99 |
+
"""
|
100 |
+
Prints summary statement with dataset stats
|
101 |
+
"""
|
102 |
+
return f"{len(self.dataset)} Proteins."
|
103 |
+
|
104 |
+
@staticmethod
|
105 |
+
def set_args(args) -> None:
|
106 |
+
args.num_classes = 1
|
107 |
+
|
108 |
+
|
109 |
+
@staticmethod
|
110 |
+
def add_args(parser) -> None:
|
111 |
+
"""Add class specific args
|
112 |
+
|
113 |
+
Args:
|
114 |
+
parser (argparse.ArgumentParser): argument parser
|
115 |
+
"""
|
116 |
+
super(Disprot, Disprot).add_args(parser)
|
117 |
+
parser.add_argument(
|
118 |
+
"--precomputed_protein_embeddings",
|
119 |
+
default=False,
|
120 |
+
action="store_true",
|
121 |
+
help="whether to use precomputed embeddings",
|
122 |
+
)
|
123 |
+
|
124 |
+
|
125 |
+
|
126 |
+
@register_object("protein_compartment_precomputed", "dataset")
|
127 |
+
class Protein_Compartments_Precomputed(Protein_Compartments):
|
128 |
+
"""A pytorch Dataset for the classifying proteins into compartment."""
|
129 |
+
|
130 |
+
|
131 |
+
def create_dataset(
|
132 |
+
self, split_group: Literal["train", "dev", "test"]
|
133 |
+
) -> List[dict]:
|
134 |
+
|
135 |
+
dataset = []
|
136 |
+
for sample_dict in tqdm(self.metadata_json):
|
137 |
+
if self.skip_sample(sample_dict, split_group):
|
138 |
+
continue
|
139 |
+
|
140 |
+
item = {
|
141 |
+
"sequence": sample_dict["Sequence"],
|
142 |
+
"x": torch.tensor(sample_dict["esm2_embedding"]),
|
143 |
+
"y": self.get_label(sample_dict),
|
144 |
+
"sample_id": sample_dict["Entry"],
|
145 |
+
}
|
146 |
+
dataset.append(item)
|
147 |
+
|
148 |
+
|
149 |
+
return dataset
|
150 |
+
|
151 |
+
@staticmethod
|
152 |
+
def set_args(args) -> None:
|
153 |
+
args.num_classes = 5
|
154 |
+
args.mlp_input_dim = args.protein_hidden_dim
|
155 |
+
|
156 |
+
@staticmethod
|
157 |
+
def add_args(parser) -> None:
|
158 |
+
"""Add class specific args
|
159 |
+
|
160 |
+
Args:
|
161 |
+
parser (argparse.ArgumentParser): argument parser
|
162 |
+
"""
|
163 |
+
super(Protein_Compartments_Precomputed, Protein_Compartments_Precomputed).add_args(parser)
|
164 |
+
parser.add_argument(
|
165 |
+
"--protein_hidden_dim",
|
166 |
+
type=int,
|
167 |
+
default=1280,
|
168 |
+
help="hidden dimension of the protein",
|
169 |
+
)
|
data/protgps/datasets/protein_compartments.py
ADDED
@@ -0,0 +1,412 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset utils
|
2 |
+
import warnings
|
3 |
+
from typing import Literal, List
|
4 |
+
from protgps.datasets.abstract import AbstractDataset
|
5 |
+
from protgps.utils.registry import register_object
|
6 |
+
from tqdm import tqdm
|
7 |
+
import argparse
|
8 |
+
import torch
|
9 |
+
|
10 |
+
|
11 |
+
@register_object("protein_compartment", "dataset")
|
12 |
+
class Protein_Compartments(AbstractDataset):
|
13 |
+
"""A pytorch Dataset for the classifying proteins into compartment."""
|
14 |
+
|
15 |
+
def init_class(self, args: argparse.ArgumentParser, split_group: str) -> None:
|
16 |
+
"""Perform Class-Specific init methods
|
17 |
+
Default is to load JSON dataset
|
18 |
+
|
19 |
+
Args:
|
20 |
+
args (argparse.ArgumentParser)
|
21 |
+
split_group (str)
|
22 |
+
"""
|
23 |
+
self.load_dataset(args)
|
24 |
+
if args.assign_splits:
|
25 |
+
self.assign_splits(
|
26 |
+
self.metadata_json, split_probs=args.split_probs, seed=args.split_seed
|
27 |
+
)
|
28 |
+
|
29 |
+
@property
|
30 |
+
def COMPARTMENTS(self):
|
31 |
+
return ["cytosol", "nucleoli", "nucleoplasm", "ER", "mitochondria"]
|
32 |
+
|
33 |
+
def skip_sample(self, sample, split_group) -> bool:
|
34 |
+
"""
|
35 |
+
Return True if sample should be skipped and not included in data
|
36 |
+
"""
|
37 |
+
if sample["split"] != split_group:
|
38 |
+
return True
|
39 |
+
if "Sequence" in sample and len(sample["Sequence"]) < 10:
|
40 |
+
return True
|
41 |
+
if "sequence" in sample and len(sample["sequence"]) < 10:
|
42 |
+
return True
|
43 |
+
|
44 |
+
if "Sequence" in sample and len(sample["Sequence"]) > self.args.max_prot_len:
|
45 |
+
return True
|
46 |
+
if "sequence" in sample and len(sample["sequence"]) > self.args.max_prot_len:
|
47 |
+
return True
|
48 |
+
|
49 |
+
return False
|
50 |
+
|
51 |
+
def create_dataset(
|
52 |
+
self, split_group: Literal["train", "dev", "test"]
|
53 |
+
) -> List[dict]:
|
54 |
+
dataset = []
|
55 |
+
for sample_dict in tqdm(self.metadata_json):
|
56 |
+
if self.skip_sample(sample_dict, split_group):
|
57 |
+
continue
|
58 |
+
|
59 |
+
item = {
|
60 |
+
"x": sample_dict["Sequence"],
|
61 |
+
"y": self.get_label(sample_dict),
|
62 |
+
"sample_id": sample_dict["Entry"],
|
63 |
+
}
|
64 |
+
dataset.append(item)
|
65 |
+
return dataset
|
66 |
+
|
67 |
+
def get_label(self, sample):
|
68 |
+
"""
|
69 |
+
Get task specific label for a given sample
|
70 |
+
"""
|
71 |
+
try:
|
72 |
+
return torch.tensor([sample[c] for c in self.COMPARTMENTS])
|
73 |
+
except:
|
74 |
+
return None
|
75 |
+
|
76 |
+
def __getitem__(self, index):
|
77 |
+
try:
|
78 |
+
return self.dataset[index]
|
79 |
+
|
80 |
+
except Exception:
|
81 |
+
warnings.warn("Could not load sample")
|
82 |
+
|
83 |
+
@property
|
84 |
+
def SUMMARY_STATEMENT(self) -> None:
|
85 |
+
"""
|
86 |
+
Prints summary statement with dataset stats
|
87 |
+
"""
|
88 |
+
try:
|
89 |
+
compartment_counts = (
|
90 |
+
torch.stack([d["y"] for d in self.dataset]).sum(0).tolist()
|
91 |
+
)
|
92 |
+
compartment_str = ""
|
93 |
+
for i, (c, count) in enumerate(zip(self.COMPARTMENTS, compartment_counts)):
|
94 |
+
compartment_str += f"{count} {c.upper()}"
|
95 |
+
if i < len(self.COMPARTMENTS) - 1:
|
96 |
+
compartment_str += " -- "
|
97 |
+
return f"* {len(self.dataset)} Proteins.\n* {compartment_str}"
|
98 |
+
except:
|
99 |
+
return "Could not produce summary statement"
|
100 |
+
|
101 |
+
@staticmethod
|
102 |
+
def set_args(args) -> None:
|
103 |
+
args.num_classes = 5
|
104 |
+
|
105 |
+
@staticmethod
|
106 |
+
def add_args(parser) -> None:
|
107 |
+
"""Add class specific args
|
108 |
+
|
109 |
+
Args:
|
110 |
+
parser (argparse.ArgumentParser): argument parser
|
111 |
+
"""
|
112 |
+
super(Protein_Compartments, Protein_Compartments).add_args(parser)
|
113 |
+
parser.add_argument(
|
114 |
+
"--max_prot_len",
|
115 |
+
type=int,
|
116 |
+
default=2000,
|
117 |
+
help="len above which to skip prots",
|
118 |
+
)
|
119 |
+
|
120 |
+
|
121 |
+
@register_object("protein_compartment_guy", "dataset")
|
122 |
+
class ProteinCompartmentsGuy(AbstractDataset):
|
123 |
+
"""A pytorch Dataset for the classifying proteins into compartment."""
|
124 |
+
|
125 |
+
def init_class(self, args: argparse.ArgumentParser, split_group: str) -> None:
|
126 |
+
"""Perform Class-Specific init methods
|
127 |
+
Default is to load JSON dataset
|
128 |
+
|
129 |
+
Args:
|
130 |
+
args (argparse.ArgumentParser)
|
131 |
+
split_group (str)
|
132 |
+
"""
|
133 |
+
self.load_dataset(args)
|
134 |
+
if args.assign_splits:
|
135 |
+
self.assign_splits(
|
136 |
+
self.metadata_json, split_probs=args.split_probs, seed=args.split_seed
|
137 |
+
)
|
138 |
+
|
139 |
+
@property
|
140 |
+
def COMPARTMENTS(self):
|
141 |
+
return [
|
142 |
+
"Nucleus",
|
143 |
+
"Cytoplasm",
|
144 |
+
"Secreted",
|
145 |
+
"Mitochondrion",
|
146 |
+
"Membrane",
|
147 |
+
"Endoplasmic",
|
148 |
+
"Plastid",
|
149 |
+
"Golgi_apparatus",
|
150 |
+
"Lysosome",
|
151 |
+
"Peroxisome",
|
152 |
+
]
|
153 |
+
|
154 |
+
@property
|
155 |
+
def esm_tokens(self):
|
156 |
+
return [
|
157 |
+
"L",
|
158 |
+
"A",
|
159 |
+
"G",
|
160 |
+
"V",
|
161 |
+
"S",
|
162 |
+
"E",
|
163 |
+
"R",
|
164 |
+
"T",
|
165 |
+
"I",
|
166 |
+
"D",
|
167 |
+
"P",
|
168 |
+
"K",
|
169 |
+
"Q",
|
170 |
+
"N",
|
171 |
+
"F",
|
172 |
+
"Y",
|
173 |
+
"M",
|
174 |
+
"H",
|
175 |
+
"W",
|
176 |
+
"C",
|
177 |
+
"X",
|
178 |
+
"B",
|
179 |
+
"U",
|
180 |
+
"Z",
|
181 |
+
"O",
|
182 |
+
".",
|
183 |
+
"-",
|
184 |
+
]
|
185 |
+
|
186 |
+
def target_index(self, target):
|
187 |
+
return self.COMPARTMENTS.index(target)
|
188 |
+
|
189 |
+
def skip_sample(self, sample, split_group) -> bool:
|
190 |
+
"""
|
191 |
+
Return True if sample should be skipped and not included in data
|
192 |
+
"""
|
193 |
+
if sample is None:
|
194 |
+
return True
|
195 |
+
if self.get_label(sample) is None:
|
196 |
+
# print("Skipped because no label")
|
197 |
+
return True
|
198 |
+
if self.args.drop_multilabel:
|
199 |
+
if self.get_label(sample).sum() > 1: # skip multi-compartment samples
|
200 |
+
print("Skipped because multi label")
|
201 |
+
return True
|
202 |
+
if split_group in ["train", "dev", "test"]:
|
203 |
+
if sample["split"] != split_group:
|
204 |
+
return True
|
205 |
+
if "sequence" in sample and len(sample["sequence"]) < 10:
|
206 |
+
return True
|
207 |
+
if "sequence" in sample and len(sample["sequence"]) > self.args.max_prot_len:
|
208 |
+
return True
|
209 |
+
if "Sequence" in sample and len(sample["Sequence"]) < 10:
|
210 |
+
return True
|
211 |
+
if "Sequence" in sample and len(sample["Sequence"]) > self.args.max_prot_len:
|
212 |
+
return True
|
213 |
+
if "sequence" in sample and not set(sample["sequence"]).issubset(
|
214 |
+
self.esm_tokens
|
215 |
+
):
|
216 |
+
return True
|
217 |
+
if "Sequence" in sample and not set(sample["Sequence"]).issubset(
|
218 |
+
self.esm_tokens
|
219 |
+
):
|
220 |
+
return True
|
221 |
+
return False
|
222 |
+
|
223 |
+
def skip_idr_sample(self, sample, split_group) -> bool:
|
224 |
+
if self.skip_sample(sample, split_group):
|
225 |
+
return True
|
226 |
+
|
227 |
+
if all([len(s) < 10 for s in sample["idrs"]]): # if all IDRs are small
|
228 |
+
print("Skipped because all IDRs are len 10 or less")
|
229 |
+
return True
|
230 |
+
|
231 |
+
if len(sample["idrs"]) == 0: # if there are no idrs
|
232 |
+
print("Skipped because no IDRs")
|
233 |
+
return True
|
234 |
+
|
235 |
+
def create_dataset(
|
236 |
+
self, split_group: Literal["train", "dev", "test"]
|
237 |
+
) -> List[dict]:
|
238 |
+
dataset = []
|
239 |
+
for sample_dict in tqdm(self.metadata_json):
|
240 |
+
if self.skip_sample(sample_dict, split_group):
|
241 |
+
continue
|
242 |
+
sss = "sequence" if "sequence" in sample_dict else "Sequence"
|
243 |
+
eid = "entry" if "entry" in sample_dict else "Entry"
|
244 |
+
item = {
|
245 |
+
"x": sample_dict[sss],
|
246 |
+
"y": self.get_label(sample_dict),
|
247 |
+
"entry_id": sample_dict[eid],
|
248 |
+
# "sample_id": sample_dict["Entry"],
|
249 |
+
}
|
250 |
+
dataset.append(item)
|
251 |
+
return dataset
|
252 |
+
|
253 |
+
def get_label(self, sample):
|
254 |
+
"""
|
255 |
+
Get task specific label for a given sample
|
256 |
+
"""
|
257 |
+
try:
|
258 |
+
return torch.tensor([sample["labels"][c] for c in self.COMPARTMENTS])
|
259 |
+
except:
|
260 |
+
return None
|
261 |
+
|
262 |
+
def __getitem__(self, index):
|
263 |
+
try:
|
264 |
+
return self.dataset[index]
|
265 |
+
|
266 |
+
except Exception:
|
267 |
+
warnings.warn("Could not load sample")
|
268 |
+
|
269 |
+
@property
|
270 |
+
def SUMMARY_STATEMENT(self) -> None:
|
271 |
+
"""
|
272 |
+
Prints summary statement with dataset stats
|
273 |
+
"""
|
274 |
+
try:
|
275 |
+
compartment_counts = (
|
276 |
+
torch.stack([d["y"] for d in self.dataset]).sum(0).tolist()
|
277 |
+
)
|
278 |
+
compartment_str = ""
|
279 |
+
for i, (c, count) in enumerate(zip(self.COMPARTMENTS, compartment_counts)):
|
280 |
+
compartment_str += f"{count} {c.upper()}"
|
281 |
+
if i < len(self.COMPARTMENTS) - 1:
|
282 |
+
compartment_str += " -- "
|
283 |
+
return f"* {len(self.dataset)} Proteins.\n* {compartment_str}"
|
284 |
+
except:
|
285 |
+
return "Could not produce summary statement"
|
286 |
+
|
287 |
+
@staticmethod
|
288 |
+
def set_args(args) -> None:
|
289 |
+
args.num_classes = 10
|
290 |
+
|
291 |
+
@staticmethod
|
292 |
+
def add_args(parser) -> None:
|
293 |
+
"""Add class specific args
|
294 |
+
|
295 |
+
Args:
|
296 |
+
parser (argparse.ArgumentParser): argument parser
|
297 |
+
"""
|
298 |
+
Protein_Compartments.add_args(parser)
|
299 |
+
parser.add_argument(
|
300 |
+
"--max_prot_len",
|
301 |
+
type=int,
|
302 |
+
default=2000,
|
303 |
+
help="len above which to skip prots",
|
304 |
+
)
|
305 |
+
|
306 |
+
parser.add_argument(
|
307 |
+
"--drop_multilabel",
|
308 |
+
type=bool,
|
309 |
+
default=False,
|
310 |
+
help="whether to drop multilabel samples",
|
311 |
+
)
|
312 |
+
|
313 |
+
|
314 |
+
@register_object("protein_compartment_uniprot_combined", "dataset")
|
315 |
+
class ProteinCompartmentsUniprotCombined(ProteinCompartmentsGuy):
|
316 |
+
def get_label(self, sample):
|
317 |
+
"""
|
318 |
+
Get task specific label for a given sample
|
319 |
+
"""
|
320 |
+
try:
|
321 |
+
label = []
|
322 |
+
for c in self.COMPARTMENTS:
|
323 |
+
if isinstance(c, str):
|
324 |
+
if c in sample["labels"]:
|
325 |
+
label.append(sample["labels"][c])
|
326 |
+
else:
|
327 |
+
label.append(0)
|
328 |
+
else:
|
329 |
+
l = 0
|
330 |
+
for c_ in c:
|
331 |
+
if c_ in sample["labels"]:
|
332 |
+
if sample["labels"][c_] == 1:
|
333 |
+
l = 1
|
334 |
+
break
|
335 |
+
else:
|
336 |
+
continue
|
337 |
+
label.append(l)
|
338 |
+
if sum(label) > 0:
|
339 |
+
return torch.tensor(label)
|
340 |
+
else:
|
341 |
+
return None
|
342 |
+
except:
|
343 |
+
return None
|
344 |
+
|
345 |
+
def target_index(self, target):
|
346 |
+
for i, c in enumerate(self.COMPARTMENTS):
|
347 |
+
if isinstance(c, str):
|
348 |
+
if isinstance(target, str):
|
349 |
+
if c == target:
|
350 |
+
return i
|
351 |
+
else:
|
352 |
+
if target in c:
|
353 |
+
return i
|
354 |
+
elif next(iter(target)) in c:
|
355 |
+
return i
|
356 |
+
return None
|
357 |
+
|
358 |
+
@property
|
359 |
+
def COMPARTMENTS(self):
|
360 |
+
return [
|
361 |
+
"nuclear_membrane",
|
362 |
+
"rough_endoplasmic_reticulum",
|
363 |
+
"vacuole",
|
364 |
+
"nucleus",
|
365 |
+
"inflammasome",
|
366 |
+
{"endplasmic_reticulum", "endoplasmic_reticulum"},
|
367 |
+
"cytoplasm",
|
368 |
+
"nuclear_gem",
|
369 |
+
{"membrane", "cell_membrane"},
|
370 |
+
"mitochondrion",
|
371 |
+
{"vesicle", "vesicles"},
|
372 |
+
"cell_projection",
|
373 |
+
"lipid_droplet",
|
374 |
+
"sarcoplasmic_reticulum",
|
375 |
+
"endosome",
|
376 |
+
"centromere",
|
377 |
+
"nuclear_body",
|
378 |
+
"nucleoplasm",
|
379 |
+
"golgi_apparatus",
|
380 |
+
{"excretion_vesicles", "excretion_vesicle"},
|
381 |
+
"peroxisome",
|
382 |
+
"lysosome",
|
383 |
+
]
|
384 |
+
|
385 |
+
@staticmethod
|
386 |
+
def set_args(args) -> None:
|
387 |
+
args.num_classes = 22
|
388 |
+
|
389 |
+
|
390 |
+
# USE THIS
|
391 |
+
@register_object("protein_condensates_combined", "dataset")
|
392 |
+
class ProteinCondensatesCombined(ProteinCompartmentsUniprotCombined):
|
393 |
+
@property
|
394 |
+
def COMPARTMENTS(self):
|
395 |
+
return [
|
396 |
+
{"nuclear_speckles", "nuclear_speckle"},
|
397 |
+
{"pbody", "p-body"},
|
398 |
+
{"pml_body", "pml-bdoy"},
|
399 |
+
"post_synaptic_density",
|
400 |
+
"stress_granule",
|
401 |
+
{"chromosomes", "chromosome"},
|
402 |
+
"nucleolus",
|
403 |
+
"nuclear_pore_complex",
|
404 |
+
"cajal_body",
|
405 |
+
"rna_granule",
|
406 |
+
"cell_junction",
|
407 |
+
"transcriptional",
|
408 |
+
]
|
409 |
+
|
410 |
+
@staticmethod
|
411 |
+
def set_args(args) -> None:
|
412 |
+
args.num_classes = 12
|
data/protgps/datasets/reverse_homology.py
ADDED
@@ -0,0 +1,216 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset utils
|
2 |
+
from random import sample
|
3 |
+
import warnings
|
4 |
+
from typing import Literal, List
|
5 |
+
from protgps.datasets.abstract import AbstractDataset
|
6 |
+
from protgps.utils.registry import register_object, get_object
|
7 |
+
from protgps.utils.classes import set_protgps_type
|
8 |
+
from tqdm import tqdm
|
9 |
+
import argparse
|
10 |
+
import torch
|
11 |
+
import os, glob
|
12 |
+
import re
|
13 |
+
import numpy as np
|
14 |
+
from argparse import Namespace
|
15 |
+
import copy
|
16 |
+
@register_object("reverse_homology", "dataset")
|
17 |
+
class ReverseHomology(AbstractDataset):
|
18 |
+
"""A pytorch Dataset for the classifying proteins into compartment."""
|
19 |
+
def load_homology_dataset(self, args: argparse.ArgumentParser) -> None:
|
20 |
+
"""Loads fasta files from dataset folder
|
21 |
+
Args:
|
22 |
+
args (argparse.ArgumentParser)
|
23 |
+
Raises:
|
24 |
+
Exception: Unable to load
|
25 |
+
"""
|
26 |
+
data_folders = args.homology_dataset_folder.split(",")
|
27 |
+
fasta_paths = []
|
28 |
+
for folder_path in data_folders:
|
29 |
+
fasta_paths.extend(glob.glob(os.path.join(folder_path, '*.fasta')))
|
30 |
+
print("Loading fasta files...")
|
31 |
+
for fasta in tqdm(fasta_paths):
|
32 |
+
idrs = []
|
33 |
+
f=open(fasta, 'r')
|
34 |
+
lines=f.readlines()
|
35 |
+
for line in lines:
|
36 |
+
outh=re.search('>', line)
|
37 |
+
if outh:
|
38 |
+
pass
|
39 |
+
else:
|
40 |
+
s = line.replace('-','').strip()
|
41 |
+
if len(s) <= self.args.max_idr_len: # skip long sequences
|
42 |
+
idrs.append(s)
|
43 |
+
if len(idrs) >= self.args.pos_samples+1:
|
44 |
+
self.homology_sets.append(np.array(idrs))
|
45 |
+
|
46 |
+
def init_class(self, args: argparse.ArgumentParser, split_group: str) -> None:
|
47 |
+
"""Perform Class-Specific init methods
|
48 |
+
Default is to load JSON dataset
|
49 |
+
|
50 |
+
Args:
|
51 |
+
args (argparse.ArgumentParser)
|
52 |
+
split_group (str)
|
53 |
+
"""
|
54 |
+
self.homology_sets = []
|
55 |
+
if self.args.homology_dataset_folder and self.args.use_homology_dataset:
|
56 |
+
self.load_homology_dataset(args)
|
57 |
+
if self.args.compartment_dataset_file and self.args.use_compartment_dataset:
|
58 |
+
self.load_compartment_dataset(copy.deepcopy(args))
|
59 |
+
|
60 |
+
def load_compartment_dataset(self, args: argparse.ArgumentParser) -> None:
|
61 |
+
"""Loads dataset from json file
|
62 |
+
Args:
|
63 |
+
args (argparse.ArgumentParser)
|
64 |
+
"""
|
65 |
+
if self.args.compartment_dataset_name:
|
66 |
+
args.dataset_file_path = self.args.compartment_dataset_file
|
67 |
+
args.drop_multilabel = False
|
68 |
+
args.max_prot_len = np.inf
|
69 |
+
dataset = get_object(self.args.compartment_dataset_name, "dataset")(
|
70 |
+
args, "train"
|
71 |
+
)
|
72 |
+
comp_dict = {}
|
73 |
+
for sample_dict in tqdm(dataset.metadata_json):
|
74 |
+
idrs = "".join(sample_dict["idrs"])
|
75 |
+
if len(idrs) <= self.args.max_idr_len:
|
76 |
+
label = dataset.get_label(sample_dict)
|
77 |
+
for l in torch.argwhere(label == 1).T[0]:
|
78 |
+
l = l.item()
|
79 |
+
if l in comp_dict:
|
80 |
+
comp_dict[l].append(idrs)
|
81 |
+
else:
|
82 |
+
comp_dict[l] = [idrs]
|
83 |
+
|
84 |
+
for label in comp_dict:
|
85 |
+
if len(comp_dict[label]) >= self.args.pos_samples+1:
|
86 |
+
self.homology_sets.append(np.array(comp_dict[label]))
|
87 |
+
else:
|
88 |
+
raise Exception("No compartment dataset name provided")
|
89 |
+
|
90 |
+
|
91 |
+
def create_dataset(
|
92 |
+
self, split_group: Literal["train", "dev", "test"]
|
93 |
+
) -> List[dict]:
|
94 |
+
dataset = []
|
95 |
+
print(f"Creating '{split_group}' dataset...")
|
96 |
+
if split_group == "train":
|
97 |
+
hom_mult = self.args.homology_multiple*self.args.split_probs[0]
|
98 |
+
rng = np.random.default_rng(self.args.dataset_seed)
|
99 |
+
elif split_group == "dev":
|
100 |
+
hom_mult = self.args.homology_multiple*self.args.split_probs[1]
|
101 |
+
rng = np.random.default_rng(self.args.dataset_seed+1)
|
102 |
+
elif split_group == "test":
|
103 |
+
hom_mult = self.args.homology_multiple*self.args.split_probs[2]
|
104 |
+
rng = np.random.default_rng(self.args.dataset_seed+2)
|
105 |
+
|
106 |
+
for _ in tqdm(range(int(hom_mult*len(self.homology_sets)))):
|
107 |
+
sample, rng = self.generate_sample(rng)
|
108 |
+
dataset.append(sample)
|
109 |
+
return dataset
|
110 |
+
|
111 |
+
def generate_sample(self, rng) -> dict:
|
112 |
+
"""Generates sample for contrastive learning of homology sets
|
113 |
+
Args:
|
114 |
+
rng: numpy random generator
|
115 |
+
Returns:
|
116 |
+
list: list of strings
|
117 |
+
"""
|
118 |
+
if len(self.homology_sets) < self.args.neg_samples+1:
|
119 |
+
self.args.neg_samples = len(self.homology_sets)-1
|
120 |
+
neg_idx = rng.choice(len(self.homology_sets), size=self.args.neg_samples+1, replace=False)
|
121 |
+
pos_idx, neg_idx = neg_idx[0], neg_idx[1:]
|
122 |
+
pos_samples = rng.choice(self.homology_sets[pos_idx], size=self.args.pos_samples+1, replace=False)
|
123 |
+
anchor, pos_samples = pos_samples[0], pos_samples[1:]
|
124 |
+
neg_samples = np.array([rng.choice(self.homology_sets[i],size=self.args.neg_multiple) for i in neg_idx]).flatten()
|
125 |
+
return {"x":[anchor, *pos_samples, *neg_samples]}, rng
|
126 |
+
|
127 |
+
def __getitem__(self, index):
|
128 |
+
# rng = np.random.default_rng(self.args.dataset_seed)
|
129 |
+
try:
|
130 |
+
return self.dataset[index]
|
131 |
+
except Exception:
|
132 |
+
warnings.warn("Could not load sample")
|
133 |
+
|
134 |
+
@property
|
135 |
+
def SUMMARY_STATEMENT(self) -> None:
|
136 |
+
"""
|
137 |
+
Prints summary statement with dataset stats
|
138 |
+
"""
|
139 |
+
try:
|
140 |
+
return f"Reverse Homology Dataset with {len(self.dataset)} samples\n"\
|
141 |
+
+ f"Using Homology sets: {len(self.homology_sets)}\n"\
|
142 |
+
+ f"Using {self.args.pos_samples} positive samples and {self.args.neg_samples*self.args.neg_multiple} negative samples\n"
|
143 |
+
except:
|
144 |
+
return "Could not produce summary statement"
|
145 |
+
|
146 |
+
@staticmethod
|
147 |
+
def set_args(args) -> None:
|
148 |
+
args.num_classes = 2
|
149 |
+
|
150 |
+
@staticmethod
|
151 |
+
def add_args(parser) -> None:
|
152 |
+
"""Add class specific args
|
153 |
+
|
154 |
+
Args:
|
155 |
+
parser (argparse.ArgumentParser): argument parser
|
156 |
+
"""
|
157 |
+
super(ReverseHomology, ReverseHomology).add_args(parser)
|
158 |
+
parser.add_argument(
|
159 |
+
"--homology_dataset_folder",
|
160 |
+
type=str,
|
161 |
+
help="folders containing fasta files seperated by comma",
|
162 |
+
)
|
163 |
+
parser.add_argument(
|
164 |
+
"--dataset_seed",
|
165 |
+
type=int,
|
166 |
+
help="seed for dataset generation",
|
167 |
+
)
|
168 |
+
parser.add_argument(
|
169 |
+
"--homology_multiple",
|
170 |
+
type=float,
|
171 |
+
default=1,
|
172 |
+
help="the expected number of times to use each homology set as a positive example",
|
173 |
+
)
|
174 |
+
parser.add_argument(
|
175 |
+
"--pos_samples",
|
176 |
+
type=int,
|
177 |
+
help="number of positive samples to use from the anchor homology set",
|
178 |
+
)
|
179 |
+
parser.add_argument(
|
180 |
+
"--neg_samples",
|
181 |
+
type=int,
|
182 |
+
help="number of homology sets to draw negative samples from",
|
183 |
+
)
|
184 |
+
parser.add_argument(
|
185 |
+
"--max_idr_len",
|
186 |
+
type=int,
|
187 |
+
help="max total length of idrs in a protein",
|
188 |
+
)
|
189 |
+
parser.add_argument(
|
190 |
+
"--compartment_dataset_file",
|
191 |
+
type=str,
|
192 |
+
help="json file containing compartment dataset",
|
193 |
+
)
|
194 |
+
parser.add_argument(
|
195 |
+
"--compartment_dataset_name",
|
196 |
+
type=str,
|
197 |
+
help="protgps name of compartment dataset object",
|
198 |
+
)
|
199 |
+
parser.add_argument(
|
200 |
+
"--use_compartment_dataset",
|
201 |
+
action="store_true",
|
202 |
+
default=False,
|
203 |
+
help="use compartment dataset to generate homology sets",
|
204 |
+
)
|
205 |
+
parser.add_argument(
|
206 |
+
"--use_homology_dataset",
|
207 |
+
action="store_true",
|
208 |
+
default=False,
|
209 |
+
help="use homology dataset to generate homology sets",
|
210 |
+
)
|
211 |
+
parser.add_argument(
|
212 |
+
"--neg_multiple",
|
213 |
+
type=int,
|
214 |
+
default=1,
|
215 |
+
help="number of negative samples to draw from each negative homology set",
|
216 |
+
)
|
data/protgps/learning/losses/__init__.py
ADDED
File without changes
|
data/protgps/learning/losses/basic.py
ADDED
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from protgps.utils.registry import register_object
|
2 |
+
import torch
|
3 |
+
import torch.nn.functional as F
|
4 |
+
import torch.nn as nn
|
5 |
+
from collections import OrderedDict
|
6 |
+
import pdb
|
7 |
+
from protgps.utils.classes import ProtGPS
|
8 |
+
|
9 |
+
|
10 |
+
@register_object("cross_entropy", "loss")
|
11 |
+
class CrossEntropyLoss(ProtGPS):
|
12 |
+
def __init__(self) -> None:
|
13 |
+
super().__init__()
|
14 |
+
|
15 |
+
def __call__(self, model_output, batch, model, args):
|
16 |
+
logging_dict, predictions = OrderedDict(), OrderedDict()
|
17 |
+
logit = model_output["logit"]
|
18 |
+
loss = F.cross_entropy(logit, batch["y"].long()) * args.ce_loss_lambda
|
19 |
+
logging_dict["cross_entropy_loss"] = loss.detach()
|
20 |
+
predictions["probs"] = F.softmax(logit, dim=-1).detach()
|
21 |
+
predictions["golds"] = batch["y"]
|
22 |
+
predictions["preds"] = predictions["probs"].argmax(axis=-1).reshape(-1)
|
23 |
+
return loss, logging_dict, predictions
|
24 |
+
|
25 |
+
@staticmethod
|
26 |
+
def add_args(parser) -> None:
|
27 |
+
"""Add class specific args
|
28 |
+
|
29 |
+
Args:
|
30 |
+
parser (argparse.ArgumentParser): argument parser
|
31 |
+
"""
|
32 |
+
parser.add_argument(
|
33 |
+
"--ce_loss_lambda",
|
34 |
+
type=float,
|
35 |
+
default=1.0,
|
36 |
+
help="Lambda to weigh the cross-entropy loss.",
|
37 |
+
)
|
38 |
+
|
39 |
+
|
40 |
+
@register_object("binary_cross_entropy", "loss")
|
41 |
+
class BinaryCrossEntropyLoss(ProtGPS):
|
42 |
+
def __init__(self) -> None:
|
43 |
+
super().__init__()
|
44 |
+
|
45 |
+
def __call__(self, model_output, batch, model, args):
|
46 |
+
logging_dict, predictions = OrderedDict(), OrderedDict()
|
47 |
+
logit = model_output["logit"]
|
48 |
+
loss = (
|
49 |
+
F.binary_cross_entropy_with_logits(logit, batch["y"].float())
|
50 |
+
* args.bce_loss_lambda
|
51 |
+
)
|
52 |
+
logging_dict["binary_cross_entropy_loss"] = loss.detach()
|
53 |
+
predictions["probs"] = torch.sigmoid(logit).detach()
|
54 |
+
predictions["golds"] = batch["y"]
|
55 |
+
predictions["preds"] = (predictions["probs"] > 0.5).int()
|
56 |
+
return loss, logging_dict, predictions
|
57 |
+
|
58 |
+
@staticmethod
|
59 |
+
def add_args(parser) -> None:
|
60 |
+
"""Add class specific args
|
61 |
+
|
62 |
+
Args:
|
63 |
+
parser (argparse.ArgumentParser): argument parser
|
64 |
+
"""
|
65 |
+
parser.add_argument(
|
66 |
+
"--bce_loss_lambda",
|
67 |
+
type=float,
|
68 |
+
default=1.0,
|
69 |
+
help="Lambda to weigh the binary cross-entropy loss.",
|
70 |
+
)
|
71 |
+
|
72 |
+
|
73 |
+
@register_object("survival", "loss")
|
74 |
+
class SurvivalLoss(ProtGPS):
|
75 |
+
def __init__(self) -> None:
|
76 |
+
super().__init__()
|
77 |
+
|
78 |
+
def __call__(self, model_output, batch, model, args):
|
79 |
+
logging_dict, predictions = OrderedDict(), OrderedDict()
|
80 |
+
logit = model_output["logit"]
|
81 |
+
y_seq, y_mask = batch["y_seq"], batch["y_mask"]
|
82 |
+
loss = F.binary_cross_entropy_with_logits(
|
83 |
+
logit, y_seq.float(), weight=y_mask.float(), reduction="sum"
|
84 |
+
) / torch.sum(y_mask.float())
|
85 |
+
logging_dict["survival_loss"] = loss.detach()
|
86 |
+
predictions["probs"] = torch.sigmoid(logit).detach()
|
87 |
+
predictions["golds"] = batch["y"]
|
88 |
+
predictions["censors"] = batch["time_at_event"]
|
89 |
+
return loss, logging_dict, predictions
|
90 |
+
|
91 |
+
|
92 |
+
@register_object("ordinal_cross_entropy", "loss")
|
93 |
+
class RankConsistentLoss(ProtGPS):
|
94 |
+
def __init__(self) -> None:
|
95 |
+
super().__init__()
|
96 |
+
|
97 |
+
def __call__(self, model_output, batch, model, args):
|
98 |
+
"""
|
99 |
+
Computes cross-entropy loss
|
100 |
+
|
101 |
+
If batch contains they key 'has_y', the cross entropy loss will be computed for samples where batch['has_y'] = 1
|
102 |
+
Expects model_output to contain 'logit'
|
103 |
+
|
104 |
+
Returns:
|
105 |
+
loss: cross entropy loss
|
106 |
+
l_dict (dict): dictionary containing cross_entropy_loss detached from computation graph
|
107 |
+
p_dict (dict): dictionary of model predictions and ground truth labels (preds, probs, golds)
|
108 |
+
"""
|
109 |
+
loss = 0
|
110 |
+
l_dict, p_dict = OrderedDict(), OrderedDict()
|
111 |
+
logit = model_output["logit"]
|
112 |
+
yseq = batch["yseq"]
|
113 |
+
ymask = batch["ymask"]
|
114 |
+
|
115 |
+
loss = F.binary_cross_entropy_with_logits(
|
116 |
+
logit, yseq.float(), weight=ymask.float(), reduction="sum"
|
117 |
+
) / torch.sum(ymask.float())
|
118 |
+
|
119 |
+
probs = F.logsigmoid(logit) # log_sum to add probs
|
120 |
+
probs = probs.unsqueeze(1).repeat(1, len(args.rank_thresholds), 1)
|
121 |
+
probs = torch.tril(probs).sum(2)
|
122 |
+
probs = torch.exp(probs)
|
123 |
+
|
124 |
+
p_dict["logits"] = logit.detach()
|
125 |
+
p_dict["probs"] = probs.detach()
|
126 |
+
preds = probs > 0.5 # class = last prob > 0.5
|
127 |
+
preds = preds.sum(-1)
|
128 |
+
p_dict["preds"] = preds
|
129 |
+
p_dict["golds"] = batch["y"]
|
130 |
+
|
131 |
+
return loss, l_dict, p_dict
|
132 |
+
|
133 |
+
@register_object("contrastive", "loss")
|
134 |
+
class ContrastiveLoss(ProtGPS):
|
135 |
+
def __init__(self) -> None:
|
136 |
+
super().__init__()
|
137 |
+
|
138 |
+
def __call__(self, model_output, batch, model, args):
|
139 |
+
logging_dict, predictions = OrderedDict(), OrderedDict()
|
140 |
+
logit = model_output["hidden"]
|
141 |
+
sample_size = 1 + args.pos_samples + args.neg_samples
|
142 |
+
idx = 0 #NOTE: this is for batch size 1
|
143 |
+
anchor = logit[idx]
|
144 |
+
pos_samples = logit[idx+1:idx+args.pos_samples+1].mean(0)
|
145 |
+
neg_samples = logit[idx+args.pos_samples+1:idx+sample_size]
|
146 |
+
assert neg_samples.shape[0] == args.neg_samples
|
147 |
+
pos_score = torch.exp(torch.dot(pos_samples, anchor))
|
148 |
+
neg_score = torch.exp(torch.matmul(neg_samples, anchor)).sum()
|
149 |
+
prob = pos_score/(pos_score + neg_score)
|
150 |
+
loss = -torch.log(prob)
|
151 |
+
logging_dict["contrastive_loss"] = loss.detach()
|
152 |
+
probs = torch.Tensor([prob, 1-prob])
|
153 |
+
predictions["probs"] = probs.detach()
|
154 |
+
predictions["preds"] = (probs > 0.5).int()
|
155 |
+
predictions["golds"] = torch.Tensor([1,0]).int()
|
156 |
+
return loss, logging_dict, predictions
|
data/protgps/learning/metrics/__init__.py
ADDED
File without changes
|
data/protgps/learning/metrics/basic.py
ADDED
@@ -0,0 +1,359 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Dict
|
2 |
+
from protgps.utils.registry import register_object
|
3 |
+
from collections import OrderedDict
|
4 |
+
from protgps.utils.classes import ProtGPS
|
5 |
+
import numpy as np
|
6 |
+
import pdb
|
7 |
+
from torchmetrics.functional import (
|
8 |
+
accuracy,
|
9 |
+
auroc,
|
10 |
+
precision,
|
11 |
+
recall,
|
12 |
+
confusion_matrix,
|
13 |
+
f1_score,
|
14 |
+
precision_recall_curve,
|
15 |
+
average_precision,
|
16 |
+
)
|
17 |
+
from torchmetrics.utilities.compute import auc
|
18 |
+
import torch
|
19 |
+
import copy
|
20 |
+
|
21 |
+
EPSILON = 1e-6
|
22 |
+
BINARY_CLASSIF_THRESHOLD = 0.5
|
23 |
+
|
24 |
+
precision_recall = lambda probs, golds, **kwargs: (
|
25 |
+
precision(probs, golds, **kwargs),
|
26 |
+
recall(probs, golds, **kwargs),
|
27 |
+
)
|
28 |
+
|
29 |
+
|
30 |
+
@register_object("classification", "metric")
|
31 |
+
class BaseClassification(ProtGPS):
|
32 |
+
def __init__(self, args) -> None:
|
33 |
+
super().__init__()
|
34 |
+
|
35 |
+
@property
|
36 |
+
def metric_keys(self):
|
37 |
+
return ["probs", "preds", "golds"]
|
38 |
+
|
39 |
+
def __call__(self, predictions_dict, args) -> Dict:
|
40 |
+
"""
|
41 |
+
Computes standard classification metrics
|
42 |
+
|
43 |
+
Args:
|
44 |
+
predictions_dict: dictionary obtained from computing loss and model outputs
|
45 |
+
* should contain the keys ['probs', 'preds', 'golds']
|
46 |
+
args: argparser Namespace
|
47 |
+
|
48 |
+
Returns:
|
49 |
+
stats_dict (dict): contains (where applicable) values for accuracy, confusion matrix, precision, recall, f1, precision-recall auc, roc auc
|
50 |
+
|
51 |
+
Note:
|
52 |
+
In multiclass setting (>2), accuracy, and micro-f1, micro-recall, micro-precision are equivalent
|
53 |
+
Macro: calculates metric per class then averages
|
54 |
+
"""
|
55 |
+
stats_dict = OrderedDict()
|
56 |
+
|
57 |
+
probs = predictions_dict["probs"] # B, C (float)
|
58 |
+
preds = predictions_dict["preds"] # B
|
59 |
+
golds = predictions_dict["golds"] # B
|
60 |
+
stats_dict["accuracy"] = accuracy(golds, preds)
|
61 |
+
stats_dict["confusion_matrix"] = confusion_matrix(
|
62 |
+
preds, golds, args.num_classes
|
63 |
+
)
|
64 |
+
if args.num_classes == 2:
|
65 |
+
if len(probs.shape) == 1:
|
66 |
+
stats_dict["precision"], stats_dict["recall"] = precision_recall(
|
67 |
+
probs, golds
|
68 |
+
)
|
69 |
+
stats_dict["f1"] = f1_score(probs, golds)
|
70 |
+
pr, rc, _ = precision_recall_curve(probs, golds)
|
71 |
+
stats_dict["pr_auc"] = auc(rc, pr)
|
72 |
+
try:
|
73 |
+
stats_dict["roc_auc"] = auroc(probs, golds, pos_label=1)
|
74 |
+
except:
|
75 |
+
pass
|
76 |
+
else:
|
77 |
+
stats_dict["precision"], stats_dict["recall"] = precision_recall(
|
78 |
+
probs, golds, multiclass=False, num_classes=2
|
79 |
+
)
|
80 |
+
stats_dict["f1"] = f1_score(
|
81 |
+
probs, golds, multiclass=False, num_classes=2
|
82 |
+
)
|
83 |
+
pr, rc, _ = precision_recall_curve(probs, golds, num_classes=2)
|
84 |
+
stats_dict["pr_auc"] = auc(rc[-1], pr[-1])
|
85 |
+
try:
|
86 |
+
stats_dict["roc_auc"] = auroc(probs, golds, num_classes=2)
|
87 |
+
except:
|
88 |
+
pass
|
89 |
+
else:
|
90 |
+
stats_dict["precision"], stats_dict["recall"] = precision_recall(
|
91 |
+
probs, golds, num_classes=args.num_classes, average="macro"
|
92 |
+
)
|
93 |
+
stats_dict["f1"] = f1_score(
|
94 |
+
probs, golds, num_classes=args.num_classes, average="macro"
|
95 |
+
)
|
96 |
+
stats_dict["micro_f1"] = f1_score(
|
97 |
+
probs, golds, num_classes=args.num_classes, average="micro"
|
98 |
+
)
|
99 |
+
if len(torch.unique(golds)) == args.num_classes:
|
100 |
+
pr, rc, _ = precision_recall_curve(
|
101 |
+
probs, golds, num_classes=args.num_classes
|
102 |
+
)
|
103 |
+
stats_dict["pr_auc"] = torch.mean(
|
104 |
+
torch.stack([auc(rc[i], pr[i]) for i in range(args.num_classes)])
|
105 |
+
)
|
106 |
+
stats_dict["roc_auc"] = auroc(
|
107 |
+
probs, golds, num_classes=args.num_classes, average="macro"
|
108 |
+
)
|
109 |
+
|
110 |
+
if args.store_classwise_metrics:
|
111 |
+
classwise_metrics = {}
|
112 |
+
(
|
113 |
+
classwise_metrics["precisions"],
|
114 |
+
classwise_metrics["recalls"],
|
115 |
+
) = precision_recall(
|
116 |
+
probs, golds, num_classes=args.num_classes, average="none"
|
117 |
+
)
|
118 |
+
classwise_metrics["f1s"] = f1_score(
|
119 |
+
probs, golds, num_classes=args.num_classes, average="none"
|
120 |
+
)
|
121 |
+
pr, rc, _ = precision_recall_curve(
|
122 |
+
probs, golds, num_classes=args.num_classes
|
123 |
+
)
|
124 |
+
classwise_metrics["pr_aucs"] = [
|
125 |
+
auc(rc[i], pr[i]) for i in range(args.num_classes)
|
126 |
+
]
|
127 |
+
classwise_metrics["accs"] = accuracy(
|
128 |
+
golds, preds, num_classes=args.num_classes, average="none"
|
129 |
+
)
|
130 |
+
try:
|
131 |
+
classwise_metrics["rocaucs"] = auroc(
|
132 |
+
probs, golds, num_classes=args.num_classes, average="none"
|
133 |
+
)
|
134 |
+
except:
|
135 |
+
pass
|
136 |
+
|
137 |
+
for metricname in [
|
138 |
+
"precisions",
|
139 |
+
"recalls",
|
140 |
+
"f1s",
|
141 |
+
"rocaucs",
|
142 |
+
"pr_aucs",
|
143 |
+
"accs",
|
144 |
+
]:
|
145 |
+
if metricname in classwise_metrics:
|
146 |
+
stats_dict.update(
|
147 |
+
{
|
148 |
+
"class{}_{}".format(i + 1, metricname): v
|
149 |
+
for i, v in enumerate(classwise_metrics[metricname])
|
150 |
+
}
|
151 |
+
)
|
152 |
+
return stats_dict
|
153 |
+
|
154 |
+
@staticmethod
|
155 |
+
def add_args(parser) -> None:
|
156 |
+
"""Add class specific args
|
157 |
+
|
158 |
+
Args:
|
159 |
+
parser (argparse.ArgumentParser): argument parser
|
160 |
+
"""
|
161 |
+
parser.add_argument(
|
162 |
+
"--store_classwise_metrics",
|
163 |
+
action="store_true",
|
164 |
+
default=False,
|
165 |
+
help="Whether to log metrics per class or just log average across classes",
|
166 |
+
)
|
167 |
+
|
168 |
+
|
169 |
+
@register_object("multilabel_classification", "metric")
|
170 |
+
class MultiLabelClassification(ProtGPS):
|
171 |
+
def __init__(self, args) -> None:
|
172 |
+
super().__init__()
|
173 |
+
|
174 |
+
@property
|
175 |
+
def metric_keys(self):
|
176 |
+
return ["probs", "preds", "golds"]
|
177 |
+
|
178 |
+
def __call__(self, predictions_dict, args) -> Dict:
|
179 |
+
"""
|
180 |
+
Computes classification metrics for multi-label predictions (i.e., predicting multiple categories independently -- sigmoid outputs)
|
181 |
+
|
182 |
+
Args:
|
183 |
+
predictions_dict: dictionary obtained from computing loss and model outputs
|
184 |
+
* should contain the keys ['probs', 'preds', 'golds']
|
185 |
+
args: argparser Namespace
|
186 |
+
|
187 |
+
Returns:
|
188 |
+
stats_dict (dict): contains (where applicable) values for accuracy, confusion matrix, precision, recall, f1, precision-recall auc, roc auc
|
189 |
+
|
190 |
+
"""
|
191 |
+
stats_dict = OrderedDict()
|
192 |
+
|
193 |
+
probs = predictions_dict["probs"] # B, C
|
194 |
+
preds = predictions_dict["preds"] # B, C
|
195 |
+
golds = predictions_dict["golds"].int() # B, C
|
196 |
+
stats_dict["accuracy"] = accuracy(
|
197 |
+
golds, preds, task="multilabel", num_labels=args.num_classes
|
198 |
+
)
|
199 |
+
|
200 |
+
stats_dict["precision"], stats_dict["recall"] = precision_recall(
|
201 |
+
probs,
|
202 |
+
golds,
|
203 |
+
average="macro",
|
204 |
+
task="multilabel",
|
205 |
+
num_labels=args.num_classes,
|
206 |
+
)
|
207 |
+
stats_dict["f1"] = f1_score(
|
208 |
+
probs,
|
209 |
+
golds,
|
210 |
+
num_labels=args.num_classes,
|
211 |
+
average="macro",
|
212 |
+
task="multilabel",
|
213 |
+
)
|
214 |
+
stats_dict["micro_f1"] = f1_score(
|
215 |
+
probs,
|
216 |
+
golds,
|
217 |
+
num_labels=args.num_classes,
|
218 |
+
average="micro",
|
219 |
+
task="multilabel",
|
220 |
+
)
|
221 |
+
stats_dict["ap_score"] = average_precision(
|
222 |
+
probs,
|
223 |
+
golds,
|
224 |
+
num_labels=args.num_classes,
|
225 |
+
average="macro",
|
226 |
+
task="multilabel",
|
227 |
+
)
|
228 |
+
|
229 |
+
stats_dict["roc_auc"] = auroc(
|
230 |
+
probs,
|
231 |
+
golds,
|
232 |
+
num_labels=args.num_classes,
|
233 |
+
average="macro",
|
234 |
+
task="multilabel",
|
235 |
+
)
|
236 |
+
|
237 |
+
return stats_dict
|
238 |
+
|
239 |
+
|
240 |
+
@register_object("ordinal_classification", "metric")
|
241 |
+
class Ordinal_Classification(BaseClassification):
|
242 |
+
def __call__(self, predictions_dict, args) -> Dict:
|
243 |
+
"""
|
244 |
+
Computes classification for metrics when predicting multiple independent classes
|
245 |
+
|
246 |
+
Args:
|
247 |
+
predictions_dict: dictionary obtained from computing loss and model outputs
|
248 |
+
args: argparser Namespace
|
249 |
+
|
250 |
+
Returns:
|
251 |
+
stats_dict (dict): contains (where applicable) values for accuracy, confusion matrix, precision, recall, f1, precision-recall auc, roc auc, prefixed by col index
|
252 |
+
"""
|
253 |
+
stats_dict = OrderedDict()
|
254 |
+
|
255 |
+
probs = predictions_dict["probs"] # B, C (float)
|
256 |
+
preds = predictions_dict["preds"] # B
|
257 |
+
golds = predictions_dict["golds"] # B
|
258 |
+
stats_dict["accuracy"] = accuracy(golds, preds)
|
259 |
+
stats_dict["confusion_matrix"] = confusion_matrix(
|
260 |
+
preds, golds, args.num_classes + 1
|
261 |
+
)
|
262 |
+
|
263 |
+
for classindex in range(golds.shape[-1]):
|
264 |
+
(
|
265 |
+
stats_dict["class{}_precision".format(classindex)],
|
266 |
+
stats_dict["class{}_recall".format(classindex)],
|
267 |
+
) = precision_recall(probs, golds)
|
268 |
+
stats_dict["class{}_f1".format(classindex)] = f1_score(probs, golds)
|
269 |
+
pr, rc, _ = precision_recall_curve(probs, golds)
|
270 |
+
stats_dict["class{}_pr_auc".format(classindex)] = auc(rc, pr)
|
271 |
+
try:
|
272 |
+
stats_dict["class{}_roc_auc".format(classindex)] = auroc(
|
273 |
+
probs, golds, pos_label=1
|
274 |
+
)
|
275 |
+
except:
|
276 |
+
pass
|
277 |
+
|
278 |
+
return stats_dict
|
279 |
+
|
280 |
+
|
281 |
+
@register_object("survival_classification", "metric")
|
282 |
+
class Survival_Classification(BaseClassification):
|
283 |
+
def __call__(self, predictions_dict, args):
|
284 |
+
stats_dict = OrderedDict()
|
285 |
+
|
286 |
+
golds = predictions_dict["golds"]
|
287 |
+
probs = predictions_dict["probs"]
|
288 |
+
preds = probs[:, -1].view(-1) > 0.5
|
289 |
+
probs = probs.reshape((-1, probs.shape[-1]))[:, -1]
|
290 |
+
|
291 |
+
stats_dict["accuracy"] = accuracy(golds, preds)
|
292 |
+
|
293 |
+
if (args.num_classes == 2) and not (
|
294 |
+
np.unique(golds)[-1] > 1 or np.unique(preds)[-1] > 1
|
295 |
+
):
|
296 |
+
stats_dict["precision"], stats_dict["recall"] = precision_recall(
|
297 |
+
probs, golds
|
298 |
+
)
|
299 |
+
stats_dict["f1"] = f1_score(probs, golds)
|
300 |
+
num_pos = golds.sum()
|
301 |
+
if num_pos > 0 and num_pos < len(golds):
|
302 |
+
stats_dict["auc"] = auroc(probs, golds, pos_label=1)
|
303 |
+
stats_dict["ap_score"] = average_precision(probs, golds)
|
304 |
+
precision, recall, _ = precision_recall_curve(probs, golds)
|
305 |
+
stats_dict["prauc"] = auc(recall, precision)
|
306 |
+
return stats_dict
|
307 |
+
|
308 |
+
|
309 |
+
@register_object("discrim_classification", "metric")
|
310 |
+
class Discriminator_Classification(BaseClassification):
|
311 |
+
def __init__(self, args) -> None:
|
312 |
+
super().__init__(args)
|
313 |
+
|
314 |
+
@property
|
315 |
+
def metric_keys(self):
|
316 |
+
return ["discrim_probs", "discrim_golds"]
|
317 |
+
|
318 |
+
def __call__(self, predictions_dict, args):
|
319 |
+
stats_dict = OrderedDict()
|
320 |
+
|
321 |
+
golds = predictions_dict["discrim_golds"]
|
322 |
+
probs = predictions_dict["discrim_probs"]
|
323 |
+
preds = predictions_dict["discrim_probs"].argmax(axis=-1).reshape(-1)
|
324 |
+
|
325 |
+
nargs = copy.deepcopy(args)
|
326 |
+
nargs.num_classes = probs.shape[-1]
|
327 |
+
stats_dict = super().__call__(
|
328 |
+
{"golds": golds, "probs": probs, "preds": preds}, nargs
|
329 |
+
)
|
330 |
+
stats_dict = {"discrim_{}".format(k): v for k, v in stats_dict.items()}
|
331 |
+
|
332 |
+
return stats_dict
|
333 |
+
|
334 |
+
|
335 |
+
@register_object("multi_discrim_classification", "metric")
|
336 |
+
class MultiDiscriminator_Classification(BaseClassification):
|
337 |
+
def __init__(self, args) -> None:
|
338 |
+
super().__init__(args)
|
339 |
+
|
340 |
+
@property
|
341 |
+
def metric_keys(self):
|
342 |
+
return ["device_probs", "device_golds", "thickness_probs", "thickness_golds"]
|
343 |
+
|
344 |
+
def __call__(self, predictions_dict, args):
|
345 |
+
stats_dict = OrderedDict()
|
346 |
+
|
347 |
+
for key in ["device", "thickness"]:
|
348 |
+
golds = predictions_dict["{}_golds".format(key)]
|
349 |
+
probs = predictions_dict["{}_probs".format(key)]
|
350 |
+
preds = predictions_dict["{}_probs".format(key)].argmax(axis=-1).reshape(-1)
|
351 |
+
|
352 |
+
nargs = copy.deepcopy(args)
|
353 |
+
nargs.num_classes = probs.shape[-1]
|
354 |
+
stats_dict = super().__call__(
|
355 |
+
{"golds": golds, "probs": probs, "preds": preds}, nargs
|
356 |
+
)
|
357 |
+
stats_dict = {"{}_{}".format(key, k): v for k, v in stats_dict.items()}
|
358 |
+
|
359 |
+
return stats_dict
|
data/protgps/learning/optimizers/__init__.py
ADDED
File without changes
|
data/protgps/learning/optimizers/basic.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from torch import optim
|
3 |
+
from protgps.utils.registry import register_object
|
4 |
+
from protgps.utils.classes import ProtGPS
|
5 |
+
|
6 |
+
|
7 |
+
@register_object("sgd", "optimizer")
|
8 |
+
class SGD(optim.SGD, ProtGPS):
|
9 |
+
"""
|
10 |
+
https://pytorch.org/docs/stable/generated/torch.optim.SGD.html#torch.optim.SGD
|
11 |
+
"""
|
12 |
+
|
13 |
+
def __init__(self, params, args):
|
14 |
+
super().__init__(
|
15 |
+
params=params,
|
16 |
+
lr=args.lr,
|
17 |
+
momentum=args.momentum,
|
18 |
+
weight_decay=args.weight_decay,
|
19 |
+
)
|
20 |
+
|
21 |
+
|
22 |
+
@register_object("adam", "optimizer")
|
23 |
+
class Adam(optim.Adam, ProtGPS):
|
24 |
+
"""
|
25 |
+
https://pytorch.org/docs/stable/generated/torch.optim.Adam.html#torch.optim.Adam
|
26 |
+
"""
|
27 |
+
|
28 |
+
def __init__(self, params, args):
|
29 |
+
super().__init__(params=params, lr=args.lr, weight_decay=args.weight_decay)
|
data/protgps/learning/schedulers/__init__.py
ADDED
File without changes
|
data/protgps/learning/schedulers/basic.py
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from torch import optim
|
3 |
+
from protgps.utils.registry import register_object
|
4 |
+
from protgps.utils.classes import ProtGPS
|
5 |
+
|
6 |
+
|
7 |
+
@register_object("reduce_on_plateau", "scheduler")
|
8 |
+
class ReduceLROnPlateau(optim.lr_scheduler.ReduceLROnPlateau, ProtGPS):
|
9 |
+
"""
|
10 |
+
https://pytorch.org/docs/stable/generated/torch.optim.lr_scheduler.ReduceLROnPlateau.html#torch.optim.lr_scheduler.ReduceLROnPlateau
|
11 |
+
"""
|
12 |
+
|
13 |
+
def __init__(self, optimizer, args):
|
14 |
+
super().__init__(
|
15 |
+
optimizer,
|
16 |
+
patience=args.patience,
|
17 |
+
factor=args.lr_decay,
|
18 |
+
mode="min" if "loss" in args.monitor else "max",
|
19 |
+
)
|
20 |
+
|
21 |
+
|
22 |
+
@register_object("exponential_decay", "scheduler")
|
23 |
+
class ExponentialLR(optim.lr_scheduler.ExponentialLR, ProtGPS):
|
24 |
+
"""
|
25 |
+
https://pytorch.org/docs/stable/generated/torch.optim.lr_scheduler.ExponentialLR.html#torch.optim.lr_scheduler.ExponentialLR
|
26 |
+
"""
|
27 |
+
|
28 |
+
def __init__(self, optimizer, args):
|
29 |
+
super().__init__(optimizer, gamma=args.lr_decay)
|
30 |
+
|
31 |
+
|
32 |
+
@register_object("cosine_annealing", "scheduler")
|
33 |
+
class CosineAnnealingLR(optim.lr_scheduler.CosineAnnealingLR, ProtGPS):
|
34 |
+
"""
|
35 |
+
https://pytorch.org/docs/stable/generated/torch.optim.lr_scheduler.CosineAnnealingLR.html
|
36 |
+
"""
|
37 |
+
|
38 |
+
def __init__(self, optimizer, args):
|
39 |
+
super().__init__(optimizer, args.cosine_annealing_period)
|
40 |
+
|
41 |
+
|
42 |
+
@register_object("cosine_annealing_restarts", "scheduler")
|
43 |
+
class CosineAnnealingWarmRestarts(optim.lr_scheduler.CosineAnnealingWarmRestarts, ProtGPS):
|
44 |
+
"""
|
45 |
+
https://pytorch.org/docs/stable/generated/torch.optim.lr_scheduler.CosineAnnealingWarmRestarts.html#torch.optim.lr_scheduler.CosineAnnealingWarmRestarts
|
46 |
+
"""
|
47 |
+
|
48 |
+
def __init__(self, optimizer, args):
|
49 |
+
super().__init__(
|
50 |
+
optimizer,
|
51 |
+
T_0=args.cosine_annealing_period,
|
52 |
+
T_mult=args.cosine_annealing_period_scaling,
|
53 |
+
)
|
data/protgps/learning/searchers/__init__.py
ADDED
File without changes
|
data/protgps/learning/searchers/basic.py
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from torch import optim
|
3 |
+
from protgps.utils.registry import register_object
|
4 |
+
from protgps.utils.classes import ProtGPS
|
5 |
+
from ray.tune.suggest import BasicVariantGenerator
|
6 |
+
|
7 |
+
|
8 |
+
@register_object("basic", "searcher")
|
9 |
+
class BasicSearch(BasicVariantGenerator, ProtGPS):
|
10 |
+
"""Description
|
11 |
+
|
12 |
+
See: https://docs.ray.io/en/releases-0.8.4/tune-searchalg.html#variant-generation-grid-search-random-search
|
13 |
+
"""
|
14 |
+
|
15 |
+
def __init__(self, args):
|
16 |
+
super().__init__()
|
data/protgps/learning/utils.py
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
def off_diagonal(x):
|
2 |
+
n, m = x.shape
|
3 |
+
assert n == m
|
4 |
+
return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten()
|
5 |
+
|
data/protgps/lightning/__init__.py
ADDED
File without changes
|
data/protgps/lightning/base.py
ADDED
@@ -0,0 +1,455 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import pytorch_lightning as pl
|
3 |
+
import torch.nn.functional as F
|
4 |
+
import numpy as np
|
5 |
+
from collections import OrderedDict
|
6 |
+
import pickle
|
7 |
+
import os
|
8 |
+
from protgps.utils.registry import get_object, register_object
|
9 |
+
from protgps.utils.classes import ProtGPS, set_protgps_type
|
10 |
+
|
11 |
+
|
12 |
+
@register_object("base", "lightning")
|
13 |
+
class Base(pl.LightningModule, ProtGPS):
|
14 |
+
"""
|
15 |
+
PyTorch Lightning module used as base for running training and test loops
|
16 |
+
|
17 |
+
Args:
|
18 |
+
args: argparser Namespace
|
19 |
+
"""
|
20 |
+
|
21 |
+
def __init__(self, args):
|
22 |
+
super(Base, self).__init__()
|
23 |
+
self.save_hyperparameters()
|
24 |
+
self.args = args
|
25 |
+
self.model = get_object(args.model_name, "model")(args)
|
26 |
+
|
27 |
+
def setup(self, stage):
|
28 |
+
self.loss_fns = {
|
29 |
+
"train": [get_object(l, "loss")() for l in self.args.loss_names]
|
30 |
+
}
|
31 |
+
self.loss_fns["val"] = self.loss_fns["train"]
|
32 |
+
self.loss_fns["test"] = (
|
33 |
+
self.loss_fns["train"]
|
34 |
+
if self.args.loss_names_for_eval is None
|
35 |
+
else [get_object(l, "loss")() for l in self.args.loss_names_for_eval]
|
36 |
+
)
|
37 |
+
self.metrics = [
|
38 |
+
get_object(m, "metric")(self.args) for m in self.args.metric_names
|
39 |
+
]
|
40 |
+
self.metric_keys = list(
|
41 |
+
set([key for metric in self.metrics for key in metric.metric_keys])
|
42 |
+
)
|
43 |
+
|
44 |
+
@property
|
45 |
+
def LOG_KEYS(self):
|
46 |
+
return [
|
47 |
+
"loss",
|
48 |
+
"accuracy",
|
49 |
+
"mean",
|
50 |
+
"std",
|
51 |
+
"precision",
|
52 |
+
"recall",
|
53 |
+
"f1",
|
54 |
+
"auc",
|
55 |
+
"similarity",
|
56 |
+
"tau",
|
57 |
+
"mse",
|
58 |
+
"mae",
|
59 |
+
"r2",
|
60 |
+
"c_index",
|
61 |
+
"hit",
|
62 |
+
"pearson",
|
63 |
+
"spearman",
|
64 |
+
]
|
65 |
+
|
66 |
+
@property
|
67 |
+
def UNLOG_KEYS(self):
|
68 |
+
default = ["activ", "hidden"]
|
69 |
+
keys_to_unlog = []
|
70 |
+
for k in default:
|
71 |
+
if k not in self.metric_keys:
|
72 |
+
keys_to_unlog.append(k)
|
73 |
+
return keys_to_unlog
|
74 |
+
|
75 |
+
def step(self, batch, batch_idx, optimizer_idx):
|
76 |
+
"""
|
77 |
+
Defines a single training or validation step:
|
78 |
+
Computes losses given batch and model outputs
|
79 |
+
|
80 |
+
Returns:
|
81 |
+
logged_output: dict with losses and predictions
|
82 |
+
|
83 |
+
Args:
|
84 |
+
batch: dict obtained from DataLoader. batch must contain they keys ['x', 'sample_id']
|
85 |
+
"""
|
86 |
+
logged_output = OrderedDict()
|
87 |
+
model_output = self.model(batch)
|
88 |
+
loss, logging_dict, predictions_dict = self.compute_loss(model_output, batch)
|
89 |
+
predictions_dict = self.store_in_predictions(predictions_dict, batch)
|
90 |
+
predictions_dict = self.store_in_predictions(predictions_dict, model_output)
|
91 |
+
|
92 |
+
logged_output["loss"] = loss
|
93 |
+
logged_output.update(logging_dict)
|
94 |
+
logged_output["preds_dict"] = predictions_dict
|
95 |
+
|
96 |
+
if (
|
97 |
+
(self.args.log_gen_image)
|
98 |
+
and (self.trainer.is_global_zero)
|
99 |
+
and (batch_idx == 0)
|
100 |
+
and (self.current_epoch % 100 == 0)
|
101 |
+
):
|
102 |
+
self.log_image(model_output, batch)
|
103 |
+
|
104 |
+
return logged_output
|
105 |
+
|
106 |
+
def forward(self, batch, batch_idx=0):
|
107 |
+
"""
|
108 |
+
Forward defines the prediction/inference actions
|
109 |
+
Similar to self.step() but also allows for saving predictions and hiddens
|
110 |
+
Computes losses given batch and model outputs
|
111 |
+
|
112 |
+
Returns:
|
113 |
+
logged_output: dict with losses and predictions
|
114 |
+
|
115 |
+
Args:
|
116 |
+
batch: dict obtained from DataLoader. batch must contain they keys ['x', 'sample_id']
|
117 |
+
"""
|
118 |
+
logged_output = OrderedDict()
|
119 |
+
model_output = self.model(batch)
|
120 |
+
if not self.args.predict:
|
121 |
+
loss, logging_dict, predictions_dict = self.compute_loss(
|
122 |
+
model_output, batch
|
123 |
+
)
|
124 |
+
predictions_dict = self.store_in_predictions(predictions_dict, batch)
|
125 |
+
predictions_dict = self.store_in_predictions(predictions_dict, model_output)
|
126 |
+
logged_output["loss"] = loss
|
127 |
+
logged_output.update(logging_dict)
|
128 |
+
logged_output["preds_dict"] = predictions_dict
|
129 |
+
if self.args.save_hiddens:
|
130 |
+
logged_output["preds_dict"].update(model_output)
|
131 |
+
|
132 |
+
if (self.args.log_gen_image) and (batch_idx == 0):
|
133 |
+
self.log_image(model_output, batch)
|
134 |
+
return logged_output
|
135 |
+
|
136 |
+
def training_step(self, batch, batch_idx, optimizer_idx=None):
|
137 |
+
"""
|
138 |
+
Single training step
|
139 |
+
"""
|
140 |
+
self.phase = "train"
|
141 |
+
output = self.step(batch, batch_idx, optimizer_idx)
|
142 |
+
return output
|
143 |
+
|
144 |
+
def validation_step(self, batch, batch_idx, optimizer_idx=None):
|
145 |
+
"""
|
146 |
+
Single validation step
|
147 |
+
"""
|
148 |
+
self.phase = "val"
|
149 |
+
output = self.step(batch, batch_idx, optimizer_idx)
|
150 |
+
return output
|
151 |
+
|
152 |
+
def test_step(self, batch, batch_idx):
|
153 |
+
"""
|
154 |
+
Single testing step
|
155 |
+
|
156 |
+
* save_predictions will save the dictionary output['preds_dict'], which typically includes sample_ids, probs, predictions, etc.
|
157 |
+
* save_hiddens: will save the value of output['preds_dict']['hidden']
|
158 |
+
"""
|
159 |
+
self.phase = "test"
|
160 |
+
output = self.forward(batch, batch_idx)
|
161 |
+
if self.args.save_predictions:
|
162 |
+
self.save_predictions(output["preds_dict"])
|
163 |
+
elif self.args.save_hiddens:
|
164 |
+
self.save_hiddens(output["preds_dict"])
|
165 |
+
output = {k: v for k, v in output.items() if k not in self.UNLOG_KEYS}
|
166 |
+
output["preds_dict"] = {
|
167 |
+
k: v for k, v in output["preds_dict"].items() if k not in self.UNLOG_KEYS
|
168 |
+
}
|
169 |
+
return output
|
170 |
+
|
171 |
+
def training_epoch_end(self, outputs):
|
172 |
+
"""
|
173 |
+
End of single training epoch
|
174 |
+
- Aggregates predictions and losses from all steps
|
175 |
+
- Computes the metric (auc, accuracy, etc.)
|
176 |
+
"""
|
177 |
+
if len(outputs) == 0:
|
178 |
+
return
|
179 |
+
outputs = gather_step_outputs(outputs)
|
180 |
+
outputs["loss"] = outputs["loss"].mean()
|
181 |
+
outputs.update(self.compute_metric(outputs["preds_dict"]))
|
182 |
+
self.log_outputs(outputs, "train")
|
183 |
+
return
|
184 |
+
|
185 |
+
def validation_epoch_end(self, outputs):
|
186 |
+
"""
|
187 |
+
End of single validation epoch
|
188 |
+
- Aggregates predictions and losses from all steps
|
189 |
+
- Computes the metric (auc, accuracy, etc.)
|
190 |
+
"""
|
191 |
+
if len(outputs) == 0:
|
192 |
+
return
|
193 |
+
outputs = gather_step_outputs(outputs)
|
194 |
+
outputs["loss"] = outputs["loss"].mean()
|
195 |
+
outputs.update(self.compute_metric(outputs["preds_dict"]))
|
196 |
+
self.log_outputs(outputs, "val")
|
197 |
+
return
|
198 |
+
|
199 |
+
def test_epoch_end(self, outputs):
|
200 |
+
"""
|
201 |
+
End of testing
|
202 |
+
- Aggregates predictions and losses from all batches
|
203 |
+
- Computes the metric if defined in args
|
204 |
+
"""
|
205 |
+
if len(outputs) == 0:
|
206 |
+
return
|
207 |
+
outputs = gather_step_outputs(outputs)
|
208 |
+
if isinstance(outputs.get("loss", 0), torch.Tensor):
|
209 |
+
outputs["loss"] = outputs["loss"].mean()
|
210 |
+
if not self.args.predict:
|
211 |
+
outputs.update(self.compute_metric(outputs["preds_dict"]))
|
212 |
+
self.log_outputs(outputs, "test")
|
213 |
+
return
|
214 |
+
|
215 |
+
def configure_optimizers(self):
|
216 |
+
"""
|
217 |
+
Obtain optimizers and hyperparameter schedulers for model
|
218 |
+
|
219 |
+
"""
|
220 |
+
optimizer = get_object(self.args.optimizer_name, "optimizer")(
|
221 |
+
self.parameters(), self.args
|
222 |
+
)
|
223 |
+
schedule = get_object(self.args.scheduler_name, "scheduler")(
|
224 |
+
optimizer, self.args
|
225 |
+
)
|
226 |
+
|
227 |
+
scheduler = {
|
228 |
+
"scheduler": schedule,
|
229 |
+
"monitor": self.args.monitor,
|
230 |
+
"interval": "epoch",
|
231 |
+
"frequency": 1,
|
232 |
+
}
|
233 |
+
return [optimizer], [scheduler]
|
234 |
+
|
235 |
+
def compute_loss(self, model_output, batch):
|
236 |
+
"""
|
237 |
+
Compute model loss:
|
238 |
+
Iterates through loss functions defined in args and computes losses and predictions
|
239 |
+
Adds losses and stores predictions for batch in dictionary
|
240 |
+
|
241 |
+
Returns:
|
242 |
+
total_loss (torch.Tensor): aggregate loss value that is propagated backwards for gradient computation
|
243 |
+
logging_dict: dict of losses (and other metrics)
|
244 |
+
predictions: dict of predictions (preds, probs, etc.)
|
245 |
+
"""
|
246 |
+
total_loss = 0
|
247 |
+
logging_dict, predictions = OrderedDict(), OrderedDict()
|
248 |
+
for loss_fn in self.loss_fns[self.phase]:
|
249 |
+
loss, l_dict, p_dict = loss_fn(model_output, batch, self, self.args)
|
250 |
+
total_loss += loss
|
251 |
+
logging_dict.update(l_dict)
|
252 |
+
predictions.update(p_dict)
|
253 |
+
return total_loss, logging_dict, predictions
|
254 |
+
|
255 |
+
def compute_metric(self, predictions):
|
256 |
+
logging_dict = OrderedDict()
|
257 |
+
for metric_fn in self.metrics:
|
258 |
+
l_dict = metric_fn(predictions, self.args)
|
259 |
+
logging_dict.update(l_dict)
|
260 |
+
return logging_dict
|
261 |
+
|
262 |
+
def store_in_predictions(self, preds, storage_dict):
|
263 |
+
for m in get_object(self.args.dataset_name, "dataset").DATASET_ITEM_KEYS:
|
264 |
+
if m in storage_dict:
|
265 |
+
preds[m] = storage_dict[m]
|
266 |
+
|
267 |
+
for m in self.metric_keys:
|
268 |
+
if m in storage_dict:
|
269 |
+
if torch.is_tensor(storage_dict[m]) and storage_dict[m].requires_grad:
|
270 |
+
preds[m] = storage_dict[m].detach()
|
271 |
+
else:
|
272 |
+
preds[m] = storage_dict[m]
|
273 |
+
return preds
|
274 |
+
|
275 |
+
def log_outputs(self, outputs, key):
|
276 |
+
"""
|
277 |
+
Compute performance metrics after epoch ends:
|
278 |
+
Iterates through metric functions defined in args and computes metrics
|
279 |
+
Logs the metric values into logger (Comet, Tensorboard, etc.)
|
280 |
+
"""
|
281 |
+
logging_dict = {}
|
282 |
+
for k, v in outputs.items():
|
283 |
+
if isinstance(v, torch.Tensor) and any([i in k for i in self.LOG_KEYS]):
|
284 |
+
logging_dict["{}_{}".format(key, k)] = v.mean()
|
285 |
+
# log clocktime of methods for epoch
|
286 |
+
if (self.args.profiler is not None) and (self.args.log_profiler):
|
287 |
+
logging_dict.update(self.get_time_profile(key))
|
288 |
+
self.log_dict(logging_dict, prog_bar=True, logger=True)
|
289 |
+
|
290 |
+
def get_time_profile(self, key):
|
291 |
+
"""Obtain trainer method times
|
292 |
+
|
293 |
+
Args:
|
294 |
+
key (str): one of ['train', 'val', 'test]
|
295 |
+
|
296 |
+
Returns:
|
297 |
+
dict: mean of clocktime of each method for past epoch
|
298 |
+
"""
|
299 |
+
if key == "train":
|
300 |
+
num_steps = self.trainer.num_training_batches
|
301 |
+
if key == "val":
|
302 |
+
num_steps = self.trainer.num_val_batches[0]
|
303 |
+
if key == "test":
|
304 |
+
num_steps = self.trainer.num_test_batches[0]
|
305 |
+
|
306 |
+
time_profile = {}
|
307 |
+
for k, v in self.trainer.profiler.recorded_durations.items():
|
308 |
+
time_profile[k] = np.mean(v[-num_steps:])
|
309 |
+
return time_profile
|
310 |
+
|
311 |
+
def save_predictions(self, outputs):
|
312 |
+
"""
|
313 |
+
Saves model predictions as pickle files
|
314 |
+
Makes a directory under /inference_dir/experiment_name/
|
315 |
+
Stores predictions for each sample individually under /inference_dir/experiment_name/sample_[sample_id].predictions
|
316 |
+
|
317 |
+
* Requires outputs to contain the keys ['sample_id']
|
318 |
+
"""
|
319 |
+
experiment_name = (
|
320 |
+
os.path.splitext(os.path.basename(self.args.checkpoint_path))[0]
|
321 |
+
if (self.args.from_checkpoint and not self.args.train)
|
322 |
+
else self.args.experiment_name
|
323 |
+
)
|
324 |
+
for idx, sampleid in enumerate(outputs["sample_id"]):
|
325 |
+
sampledict = {
|
326 |
+
k: v[idx]
|
327 |
+
for k, v in outputs.items()
|
328 |
+
if (len(v) == len(outputs["sample_id"]))
|
329 |
+
}
|
330 |
+
if "nodeid2nodeidx" in outputs:
|
331 |
+
sampledict["nodeid2nodeidx"] = outputs["nodeid2nodeidx"]
|
332 |
+
for k, v in sampledict.items():
|
333 |
+
if isinstance(v, torch.Tensor) and v.is_cuda:
|
334 |
+
sampledict[k] = v.cpu()
|
335 |
+
predictions_filename = os.path.join(
|
336 |
+
self.args.inference_dir,
|
337 |
+
experiment_name,
|
338 |
+
"sample_{}.predictions".format(sampleid),
|
339 |
+
)
|
340 |
+
dump_pickle(sampledict, predictions_filename)
|
341 |
+
|
342 |
+
def save_hiddens(self, outputs):
|
343 |
+
"""
|
344 |
+
Saves the model's hidden layer outputs as pickle files
|
345 |
+
Makes a directory under /inference_dir/experiment_name/
|
346 |
+
Stores predictions for each sample individually under /inference_dir/experiment_name/sample_[sample_id].hiddens
|
347 |
+
|
348 |
+
* Requires outputs to contain the keys ['sample_id', 'hidden]
|
349 |
+
"""
|
350 |
+
experiment_name = (
|
351 |
+
os.path.splitext(os.path.basename(self.args.checkpoint_path))[0]
|
352 |
+
if (self.args.from_checkpoint and not self.args.train)
|
353 |
+
else self.args.experiment_name
|
354 |
+
)
|
355 |
+
idx = outputs["sample_id"]
|
356 |
+
# hiddens = nn.functional.normalize(outputs['hidden'], dim = 1)
|
357 |
+
hiddens = [
|
358 |
+
{
|
359 |
+
k: v[i].cpu() if v.is_cuda else v[i]
|
360 |
+
for k, v in outputs.items()
|
361 |
+
if ("hidden" in k) and (len(v) == len(idx))
|
362 |
+
}
|
363 |
+
for i in range(len(idx))
|
364 |
+
]
|
365 |
+
for i, h in zip(idx, hiddens):
|
366 |
+
predictions_filename = os.path.join(
|
367 |
+
self.args.inference_dir, experiment_name, "sample_{}.hiddens".format(i)
|
368 |
+
)
|
369 |
+
dump_pickle(h, predictions_filename)
|
370 |
+
|
371 |
+
def log_image(self, model_output, batch):
|
372 |
+
# log one sample from each epoch
|
373 |
+
sid = batch["sample_id"][0]
|
374 |
+
for k, v in model_output.items():
|
375 |
+
if "reconstruction" in k:
|
376 |
+
img = model_output[k][0].detach().cpu()
|
377 |
+
if img.shape[0] != 3:
|
378 |
+
img = img.numpy()
|
379 |
+
for cid, chan in enumerate(img):
|
380 |
+
self.logger.log_image(
|
381 |
+
chan,
|
382 |
+
"Sample{}_{}_Chan{}_Epoch{}_Step{}".format(
|
383 |
+
sid, k, cid, self.current_epoch, self.global_step
|
384 |
+
),
|
385 |
+
)
|
386 |
+
else:
|
387 |
+
img = img.permute(1, 2, 0).numpy()
|
388 |
+
self.logger.log_image(
|
389 |
+
img,
|
390 |
+
"Sample{}_{}_Epoch{}_Step{}".format(
|
391 |
+
sid, k, self.current_epoch, self.global_step
|
392 |
+
),
|
393 |
+
)
|
394 |
+
|
395 |
+
@staticmethod
|
396 |
+
def add_args(parser) -> None:
|
397 |
+
"""Add class specific args
|
398 |
+
|
399 |
+
Args:
|
400 |
+
parser (argparse.ArgumentParser): argument parser
|
401 |
+
"""
|
402 |
+
parser.add_argument(
|
403 |
+
"--model_name",
|
404 |
+
type=str,
|
405 |
+
action=set_protgps_type("model"),
|
406 |
+
default="classifier",
|
407 |
+
help="Name of parent model",
|
408 |
+
)
|
409 |
+
|
410 |
+
|
411 |
+
def gather_step_outputs(outputs):
|
412 |
+
"""
|
413 |
+
Collates the dictionary outputs from each step into a single dictionary
|
414 |
+
|
415 |
+
Returns:
|
416 |
+
output_dict (dict): dictionary mapping step output keys to lists or tensors
|
417 |
+
"""
|
418 |
+
|
419 |
+
output_dict = OrderedDict()
|
420 |
+
if isinstance(outputs[-1], list): # adversarial setting with two optimizers
|
421 |
+
outputs = outputs[0]
|
422 |
+
|
423 |
+
for k in outputs[-1].keys():
|
424 |
+
if k == "preds_dict":
|
425 |
+
output_dict[k] = gather_step_outputs(
|
426 |
+
[output["preds_dict"] for output in outputs]
|
427 |
+
)
|
428 |
+
elif (
|
429 |
+
isinstance(outputs[-1][k], torch.Tensor) and len(outputs[-1][k].shape) == 0
|
430 |
+
):
|
431 |
+
output_dict[k] = torch.stack([output[k] for output in outputs])
|
432 |
+
elif isinstance(outputs[-1][k], torch.Tensor):
|
433 |
+
output_dict[k] = torch.cat([output[k] for output in outputs], dim=0)
|
434 |
+
else:
|
435 |
+
output_dict[k] = [output[k] for output in outputs]
|
436 |
+
return output_dict
|
437 |
+
|
438 |
+
|
439 |
+
def dump_pickle(file_obj, file_name):
|
440 |
+
"""
|
441 |
+
Saves object as a binary pickle file
|
442 |
+
Creates directory of file
|
443 |
+
Saves file
|
444 |
+
|
445 |
+
Args:
|
446 |
+
file_obj: object
|
447 |
+
file_name: path to file
|
448 |
+
"""
|
449 |
+
if not os.path.exists(os.path.dirname(file_name)):
|
450 |
+
try:
|
451 |
+
os.makedirs(os.path.dirname(file_name))
|
452 |
+
except OSError as exc: # Guard against race condition
|
453 |
+
if exc.errno != exc.errno.EEXIST:
|
454 |
+
raise
|
455 |
+
pickle.dump(file_obj, open(file_name, "wb"))
|
data/protgps/loggers/__init__.py
ADDED
File without changes
|
data/protgps/loggers/comet.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from protgps.utils.registry import register_object
|
2 |
+
import pytorch_lightning as pl
|
3 |
+
import os
|
4 |
+
from protgps.utils.classes import ProtGPS
|
5 |
+
|
6 |
+
|
7 |
+
@register_object("comet", "logger")
|
8 |
+
class COMET(pl.loggers.CometLogger, ProtGPS):
|
9 |
+
def __init__(self, args) -> None:
|
10 |
+
super().__init__(
|
11 |
+
api_key=os.environ.get("COMET_API_KEY"),
|
12 |
+
project_name=args.project_name,
|
13 |
+
experiment_name=args.experiment_name,
|
14 |
+
workspace=args.workspace,
|
15 |
+
log_env_details=True,
|
16 |
+
log_env_cpu=True,
|
17 |
+
)
|
18 |
+
|
19 |
+
def setup(self, **kwargs):
|
20 |
+
self.experiment.set_model_graph(kwargs["model"])
|
21 |
+
self.experiment.add_tags(kwargs["args"].logger_tags)
|
22 |
+
self.experiment.log_parameters(kwargs["args"])
|
23 |
+
|
24 |
+
def log_image(self, image, name):
|
25 |
+
self.experiment.log_image(image, name)
|
data/protgps/loggers/tensorboard.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from protgps.utils.registry import register_object
|
2 |
+
import pytorch_lightning as pl
|
3 |
+
import os
|
4 |
+
from protgps.utils.classes import ProtGPS
|
5 |
+
|
6 |
+
|
7 |
+
@register_object("tensorboard", "logger")
|
8 |
+
class PLTensorBoardLogger(pl.loggers.TensorBoardLogger, ProtGPS):
|
9 |
+
def __init__(self, args) -> None:
|
10 |
+
super().__init__(args.logger_dir)
|
11 |
+
|
12 |
+
def setup(self, **kwargs):
|
13 |
+
pass
|
14 |
+
|
15 |
+
def log_image(self, image, name):
|
16 |
+
pass
|
17 |
+
|
18 |
+
@staticmethod
|
19 |
+
def add_args(parser) -> None:
|
20 |
+
parser.add_argument(
|
21 |
+
"--logger_dir",
|
22 |
+
type=str,
|
23 |
+
default=".",
|
24 |
+
help="directory to save tensorboard logs",
|
25 |
+
)
|
data/protgps/loggers/wandb.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from protgps.utils.registry import register_object
|
2 |
+
import pytorch_lightning as pl
|
3 |
+
import os
|
4 |
+
from protgps.utils.classes import ProtGPS
|
5 |
+
|
6 |
+
|
7 |
+
@register_object("wandb", "logger")
|
8 |
+
class WandB(pl.loggers.WandbLogger, ProtGPS):
|
9 |
+
def __init__(self, args) -> None:
|
10 |
+
super().__init__(
|
11 |
+
project=args.project_name,
|
12 |
+
name=args.experiment_name,
|
13 |
+
entity=args.workspace,
|
14 |
+
tags = args.logger_tags
|
15 |
+
)
|
16 |
+
|
17 |
+
def setup(self, **kwargs):
|
18 |
+
# "gradients", "parameters", "all", or None
|
19 |
+
# # change "log_freq" log frequency of gradients and parameters (100 steps by default)
|
20 |
+
if kwargs["args"].local_rank == 0:
|
21 |
+
self.watch(kwargs["model"], log="all")
|
22 |
+
self.experiment.config.update(kwargs["args"])
|
23 |
+
|
24 |
+
def log_image(self, image, name):
|
25 |
+
self.log_image(images=[image], caption=[name])
|
data/protgps/models/__init__.py
ADDED
File without changes
|
data/protgps/models/abstract.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch.nn as nn
|
2 |
+
from protgps.utils.classes import ProtGPS
|
3 |
+
from abc import ABCMeta, abstractmethod
|
4 |
+
|
5 |
+
# from efficientnet_pytorch import EfficientNet
|
6 |
+
import math
|
7 |
+
|
8 |
+
|
9 |
+
class AbstractModel(nn.Module, ProtGPS):
|
10 |
+
__metaclass__ = ABCMeta
|
11 |
+
|
12 |
+
def __init__(self):
|
13 |
+
super(AbstractModel, self).__init__()
|
data/protgps/models/classifier.py
ADDED
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
import copy
|
4 |
+
from protgps.utils.registry import register_object, get_object
|
5 |
+
from protgps.utils.classes import set_protgps_type
|
6 |
+
from protgps.models.abstract import AbstractModel
|
7 |
+
|
8 |
+
|
9 |
+
@register_object("classifier", "model")
|
10 |
+
class Classifier(AbstractModel):
|
11 |
+
def __init__(self, args):
|
12 |
+
super(Classifier, self).__init__()
|
13 |
+
|
14 |
+
self.args = args
|
15 |
+
self.encoder = get_object(args.model_name_for_encoder, "model")(args)
|
16 |
+
cargs = copy.deepcopy(args)
|
17 |
+
self.mlp = get_object("mlp_classifier", "model")(cargs)
|
18 |
+
|
19 |
+
def forward(self, batch=None):
|
20 |
+
output = {}
|
21 |
+
output["encoder_hidden"] = self.encoder(batch)["hidden"]
|
22 |
+
output.update(self.mlp({"x": output["encoder_hidden"]}))
|
23 |
+
return output
|
24 |
+
|
25 |
+
@staticmethod
|
26 |
+
def add_args(parser) -> None:
|
27 |
+
"""Add class specific args
|
28 |
+
|
29 |
+
Args:
|
30 |
+
parser (argparse.ArgumentParser): argument parser
|
31 |
+
"""
|
32 |
+
parser.add_argument(
|
33 |
+
"--model_name_for_encoder",
|
34 |
+
type=str,
|
35 |
+
action=set_protgps_type("model"),
|
36 |
+
default="resnet18",
|
37 |
+
help="Name of encoder to use",
|
38 |
+
)
|
39 |
+
parser.add_argument(
|
40 |
+
"--mlp_input_dim", type=int, default=512, help="Dim of input to mlp"
|
41 |
+
)
|
42 |
+
parser.add_argument(
|
43 |
+
"--mlp_layer_configuration",
|
44 |
+
type=int,
|
45 |
+
nargs="*",
|
46 |
+
default=[128, 128],
|
47 |
+
help="MLP layer dimensions",
|
48 |
+
)
|
49 |
+
parser.add_argument(
|
50 |
+
"--mlp_use_batch_norm",
|
51 |
+
action="store_true",
|
52 |
+
default=False,
|
53 |
+
help="Use batchnorm in mlp",
|
54 |
+
)
|
55 |
+
parser.add_argument(
|
56 |
+
"--mlp_use_layer_norm",
|
57 |
+
action="store_true",
|
58 |
+
default=False,
|
59 |
+
help="Use LayerNorm in mlp",
|
60 |
+
)
|
61 |
+
|
62 |
+
|
63 |
+
@register_object("mlp_classifier", "model")
|
64 |
+
class MLPClassifier(AbstractModel):
|
65 |
+
def __init__(self, args):
|
66 |
+
super(MLPClassifier, self).__init__()
|
67 |
+
|
68 |
+
self.args = args
|
69 |
+
|
70 |
+
model_layers = []
|
71 |
+
cur_dim = args.mlp_input_dim
|
72 |
+
for layer_size in args.mlp_layer_configuration:
|
73 |
+
model_layers.extend(self.append_layer(cur_dim, layer_size, args))
|
74 |
+
cur_dim = layer_size
|
75 |
+
|
76 |
+
self.mlp = nn.Sequential(*model_layers)
|
77 |
+
self.predictor = nn.Linear(cur_dim, args.num_classes)
|
78 |
+
|
79 |
+
def append_layer(self, cur_dim, layer_size, args, with_dropout=True):
|
80 |
+
linear_layer = nn.Linear(cur_dim, layer_size)
|
81 |
+
bn = nn.BatchNorm1d(layer_size)
|
82 |
+
ln = nn.LayerNorm(layer_size)
|
83 |
+
if args.mlp_use_batch_norm:
|
84 |
+
seq = [linear_layer, bn, nn.ReLU()]
|
85 |
+
elif args.mlp_use_layer_norm:
|
86 |
+
seq = [linear_layer, ln, nn.ReLU()]
|
87 |
+
else:
|
88 |
+
seq = [linear_layer, nn.ReLU()]
|
89 |
+
if with_dropout:
|
90 |
+
seq.append(nn.Dropout(p=args.dropout))
|
91 |
+
return seq
|
92 |
+
|
93 |
+
def forward(self, batch=None):
|
94 |
+
output = {}
|
95 |
+
z = self.mlp(batch["x"])
|
96 |
+
output["logit"] = self.predictor(z)
|
97 |
+
output["hidden"] = z
|
98 |
+
return output
|
99 |
+
|
100 |
+
@staticmethod
|
101 |
+
def add_args(parser):
|
102 |
+
parser.add_argument(
|
103 |
+
"--mlp_input_dim", type=int, default=512, help="Dim of input to mlp"
|
104 |
+
)
|
105 |
+
parser.add_argument(
|
106 |
+
"--mlp_layer_configuration",
|
107 |
+
type=int,
|
108 |
+
nargs="*",
|
109 |
+
default=[128, 128],
|
110 |
+
help="MLP layer dimensions",
|
111 |
+
)
|
112 |
+
parser.add_argument(
|
113 |
+
"--mlp_use_batch_norm",
|
114 |
+
action="store_true",
|
115 |
+
default=False,
|
116 |
+
help="Use batchnorm in mlp",
|
117 |
+
)
|
118 |
+
parser.add_argument(
|
119 |
+
"--mlp_use_layer_norm",
|
120 |
+
action="store_true",
|
121 |
+
default=False,
|
122 |
+
help="Use LayerNorm in mlp",
|
123 |
+
)
|
data/protgps/models/fair_esm.py
ADDED
@@ -0,0 +1,585 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
import copy
|
4 |
+
from protgps.models.abstract import AbstractModel
|
5 |
+
from protgps.utils.classes import set_protgps_type
|
6 |
+
from protgps.utils.registry import register_object, get_object
|
7 |
+
from torch.nn.utils.rnn import pad_sequence
|
8 |
+
import functools
|
9 |
+
|
10 |
+
|
11 |
+
@register_object("fair_esm", "model")
|
12 |
+
class FairEsm(AbstractModel):
|
13 |
+
"""
|
14 |
+
Refer to https://github.com/facebookresearch/esm#available-models
|
15 |
+
"""
|
16 |
+
|
17 |
+
def __init__(self, args):
|
18 |
+
super(FairEsm, self).__init__()
|
19 |
+
self.args = args
|
20 |
+
torch.hub.set_dir(args.pretrained_hub_dir)
|
21 |
+
self.model, self.alphabet = torch.hub.load(
|
22 |
+
"facebookresearch/esm:main", args.esm_name
|
23 |
+
)
|
24 |
+
self.batch_converter = (
|
25 |
+
self.alphabet.get_batch_converter()
|
26 |
+
) # TODO: Move to dataloader, so that we can batch in parallel
|
27 |
+
self.register_buffer("devicevar", torch.zeros(1, dtype=torch.int8))
|
28 |
+
if args.freeze_esm:
|
29 |
+
self.model.eval()
|
30 |
+
|
31 |
+
self.repr_layer = args.esm_hidden_layer
|
32 |
+
print("Using ESM hidden layers", self.repr_layer)
|
33 |
+
|
34 |
+
def forward(self, x, tokens=False, soft=False):
|
35 |
+
"""
|
36 |
+
x: list of str (protein sequences)
|
37 |
+
tokens: tokenized or tensorized input
|
38 |
+
soft: embeddings precomputed
|
39 |
+
"""
|
40 |
+
output = {}
|
41 |
+
if tokens:
|
42 |
+
batch_tokens = x.unsqueeze(0)
|
43 |
+
else:
|
44 |
+
fair_x = self.truncate_protein(x, self.args.max_prot_len)
|
45 |
+
batch_labels, batch_strs, batch_tokens = self.batch_converter(fair_x)
|
46 |
+
|
47 |
+
batch_tokens = batch_tokens.to(self.devicevar.device)
|
48 |
+
|
49 |
+
# use partial for cleanness
|
50 |
+
model_func = functools.partial(
|
51 |
+
self.model,
|
52 |
+
repr_layers=[self.repr_layer],
|
53 |
+
return_contacts=False,
|
54 |
+
)
|
55 |
+
if soft:
|
56 |
+
model_func = functools.partial(model_func, soft=soft)
|
57 |
+
|
58 |
+
if self.args.freeze_esm:
|
59 |
+
with torch.no_grad():
|
60 |
+
result = model_func(batch_tokens)
|
61 |
+
else:
|
62 |
+
result = model_func(batch_tokens)
|
63 |
+
|
64 |
+
# Generate per-sequence representations via averaging
|
65 |
+
hiddens = []
|
66 |
+
for sample_num, sample in enumerate(x):
|
67 |
+
# breakpoint()
|
68 |
+
hiddens.append(
|
69 |
+
result["representations"][self.repr_layer][
|
70 |
+
sample_num, 1 : len(sample) + 1
|
71 |
+
].mean(0)
|
72 |
+
)
|
73 |
+
if self.args.output_residue_hiddens:
|
74 |
+
output["residues"] = result["representations"][self.repr_layer]
|
75 |
+
|
76 |
+
output["hidden"] = torch.stack(hiddens)
|
77 |
+
|
78 |
+
return output
|
79 |
+
|
80 |
+
def truncate_protein(self, x, max_length=None):
|
81 |
+
# max length allowed is 1024
|
82 |
+
return [
|
83 |
+
(i, s[: max_length - 2])
|
84 |
+
if not isinstance(x[0], list)
|
85 |
+
else (i, s[0][: max_length - 2])
|
86 |
+
for i, s in enumerate(x)
|
87 |
+
]
|
88 |
+
|
89 |
+
@staticmethod
|
90 |
+
def add_args(parser) -> None:
|
91 |
+
"""Add class specific args
|
92 |
+
|
93 |
+
Args:
|
94 |
+
parser (argparse.ArgumentParser): argument parser
|
95 |
+
"""
|
96 |
+
parser.add_argument(
|
97 |
+
"--pretrained_hub_dir",
|
98 |
+
type=str,
|
99 |
+
default="/home/protgps/esm_models",
|
100 |
+
help="directory to torch hub where pretrained models are saved",
|
101 |
+
)
|
102 |
+
parser.add_argument(
|
103 |
+
"--esm_name",
|
104 |
+
type=str,
|
105 |
+
default="esm2_t12_35M_UR50D",
|
106 |
+
help="directory to torch hub where pretrained models are saved",
|
107 |
+
)
|
108 |
+
parser.add_argument(
|
109 |
+
"--freeze_esm",
|
110 |
+
action="store_true",
|
111 |
+
default=False,
|
112 |
+
help="do not update encoder weights",
|
113 |
+
)
|
114 |
+
parser.add_argument(
|
115 |
+
"--esm_hidden_layer",
|
116 |
+
type=int,
|
117 |
+
default=12,
|
118 |
+
help="do not update encoder weights",
|
119 |
+
)
|
120 |
+
parser.add_argument(
|
121 |
+
"--output_residue_hiddens",
|
122 |
+
action="store_true",
|
123 |
+
default=False,
|
124 |
+
help="do not return residue-level hiddens, only sequence average",
|
125 |
+
)
|
126 |
+
|
127 |
+
|
128 |
+
@register_object("fair_esm2", "model")
|
129 |
+
class FairEsm2(FairEsm):
|
130 |
+
# def forward(self, x):
|
131 |
+
# """
|
132 |
+
# x: list of str (protein sequences)
|
133 |
+
# """
|
134 |
+
# output = {}
|
135 |
+
# fair_x = self.truncate_protein(x)
|
136 |
+
# batch_labels, batch_strs, batch_tokens = self.batch_converter(fair_x)
|
137 |
+
# batch_tokens = batch_tokens.to(self.devicevar.device)
|
138 |
+
|
139 |
+
# if self.args.freeze_esm:
|
140 |
+
# with torch.no_grad():
|
141 |
+
# result = self.model(
|
142 |
+
# batch_tokens, repr_layers=[self.repr_layer], return_contacts=False
|
143 |
+
# )
|
144 |
+
# else:
|
145 |
+
# result = self.model(
|
146 |
+
# batch_tokens, repr_layers=[self.repr_layer], return_contacts=False
|
147 |
+
# )
|
148 |
+
|
149 |
+
# # Generate per-sequence representations via averaging
|
150 |
+
# hiddens = []
|
151 |
+
# for sample_num, sample in enumerate(x):
|
152 |
+
# hiddens.append(
|
153 |
+
# result["representations"][self.repr_layer][
|
154 |
+
# sample_num, 1 : len(sample) + 1
|
155 |
+
# ]
|
156 |
+
# )
|
157 |
+
# if self.args.output_residue_hiddens:
|
158 |
+
# output["residues"] = result["representations"][self.repr_layer]
|
159 |
+
|
160 |
+
# output["hidden"] = hiddens
|
161 |
+
# return output
|
162 |
+
|
163 |
+
def truncate_protein(self, x, max_length=torch.inf):
|
164 |
+
return [
|
165 |
+
(i, s) if not isinstance(x[0], list) else (i, s[0]) for i, s in enumerate(x)
|
166 |
+
]
|
167 |
+
|
168 |
+
|
169 |
+
@register_object("fair_esm_fast", "model")
|
170 |
+
class FairEsmFast(FairEsm):
|
171 |
+
def forward(self, x, tokens=False, soft=False):
|
172 |
+
"""
|
173 |
+
x: list of str (protein sequences)
|
174 |
+
"""
|
175 |
+
output = {}
|
176 |
+
if tokens:
|
177 |
+
batch_tokens = x.unsqueeze(0)
|
178 |
+
else:
|
179 |
+
fair_x = [(i, v) for i, v in enumerate(x)]
|
180 |
+
batch_labels, batch_strs, batch_tokens = self.batch_converter(fair_x)
|
181 |
+
batch_tokens = batch_tokens.to(self.devicevar.device)
|
182 |
+
|
183 |
+
# use partial for cleanness
|
184 |
+
model_func = functools.partial(
|
185 |
+
self.model,
|
186 |
+
repr_layers=[self.repr_layer],
|
187 |
+
return_contacts=False,
|
188 |
+
)
|
189 |
+
if soft:
|
190 |
+
model_func = functools.partial(model_func, soft=soft)
|
191 |
+
|
192 |
+
if self.args.freeze_esm:
|
193 |
+
with torch.no_grad():
|
194 |
+
result = model_func(batch_tokens)
|
195 |
+
else:
|
196 |
+
result = model_func(batch_tokens)
|
197 |
+
|
198 |
+
if self.args.output_residue_hiddens:
|
199 |
+
output["residues"] = result["representations"][self.repr_layer]
|
200 |
+
|
201 |
+
output["hidden"] = result["representations"][self.repr_layer].mean(axis=1)
|
202 |
+
return output
|
203 |
+
|
204 |
+
|
205 |
+
import numpy as np
|
206 |
+
|
207 |
+
|
208 |
+
@register_object("reverse_hom", "model")
|
209 |
+
class ReverseHomology(FairEsm):
|
210 |
+
def forward(self, batch):
|
211 |
+
"""
|
212 |
+
x: list of str (protein sequences)
|
213 |
+
"""
|
214 |
+
output = {}
|
215 |
+
x = np.array(batch["x"]).reshape(-1, order="F")
|
216 |
+
fair_x = [(i, v) for i, v in enumerate(x)]
|
217 |
+
_, _, batch_tokens = self.batch_converter(fair_x)
|
218 |
+
batch_tokens = batch_tokens.to(self.devicevar.device)
|
219 |
+
if self.args.freeze_esm:
|
220 |
+
with torch.no_grad():
|
221 |
+
result = self.model(
|
222 |
+
batch_tokens, repr_layers=[self.repr_layer], return_contacts=False
|
223 |
+
)
|
224 |
+
else:
|
225 |
+
result = self.model(
|
226 |
+
batch_tokens, repr_layers=[self.repr_layer], return_contacts=False
|
227 |
+
)
|
228 |
+
if self.args.output_residue_hiddens:
|
229 |
+
output["residues"] = result["representations"][self.repr_layer]
|
230 |
+
|
231 |
+
# NOTE: works for batch size of 1 only (otherwise need to reshape)
|
232 |
+
output["hidden"] = result["representations"][self.repr_layer].mean(axis=1)
|
233 |
+
|
234 |
+
return output
|
235 |
+
|
236 |
+
|
237 |
+
@register_object("protein_encoder", "model")
|
238 |
+
class ProteinEncoder(AbstractModel):
|
239 |
+
def __init__(self, args):
|
240 |
+
super(ProteinEncoder, self).__init__()
|
241 |
+
self.args = args
|
242 |
+
self.encoder = get_object(args.protein_encoder_type, "model")(args)
|
243 |
+
cargs = copy.deepcopy(args)
|
244 |
+
cargs.mlp_input_dim = args.protein_hidden_dim
|
245 |
+
args.freeze_esm = args.freeze_encoder
|
246 |
+
self.mlp = get_object(args.protein_classifer, "model")(cargs)
|
247 |
+
if self.args.freeze_encoder:
|
248 |
+
self.encoder.eval()
|
249 |
+
|
250 |
+
def forward(self, batch, tokens=False, soft=False):
|
251 |
+
output = {}
|
252 |
+
if self.args.freeze_encoder:
|
253 |
+
with torch.no_grad():
|
254 |
+
output_esm = self.encoder(batch["x"], tokens=tokens, soft=soft)
|
255 |
+
else:
|
256 |
+
output_esm = self.encoder(batch["x"], tokens=tokens, soft=soft)
|
257 |
+
# output["protein_hidden"] = output_esm["hidden"]
|
258 |
+
output.update(self.mlp({"x": output_esm["hidden"]}))
|
259 |
+
return output
|
260 |
+
|
261 |
+
@staticmethod
|
262 |
+
def add_args(parser) -> None:
|
263 |
+
"""Add class specific args
|
264 |
+
|
265 |
+
Args:
|
266 |
+
parser (argparse.ArgumentParser): argument parser
|
267 |
+
"""
|
268 |
+
parser.add_argument(
|
269 |
+
"--protein_encoder_type",
|
270 |
+
type=str,
|
271 |
+
default="fair_esm2",
|
272 |
+
help="name of the protein encoder",
|
273 |
+
action=set_protgps_type("model"),
|
274 |
+
)
|
275 |
+
parser.add_argument(
|
276 |
+
"--freeze_encoder",
|
277 |
+
action="store_true",
|
278 |
+
default=False,
|
279 |
+
help="do not update encoder weights",
|
280 |
+
)
|
281 |
+
parser.add_argument(
|
282 |
+
"--protein_hidden_dim",
|
283 |
+
type=int,
|
284 |
+
default=480,
|
285 |
+
help="hidden dimension of the protein",
|
286 |
+
)
|
287 |
+
parser.add_argument(
|
288 |
+
"--protein_classifer",
|
289 |
+
type=str,
|
290 |
+
default="mlp_classifier",
|
291 |
+
help="name of classifier",
|
292 |
+
action=set_protgps_type("model"),
|
293 |
+
)
|
294 |
+
|
295 |
+
|
296 |
+
@register_object("protein_encoder_attention", "model")
|
297 |
+
class ProteinEncoderAttention(ProteinEncoder):
|
298 |
+
def __init__(self, args):
|
299 |
+
super(ProteinEncoder, self).__init__()
|
300 |
+
self.args = args
|
301 |
+
self.encoder = get_object(args.protein_encoder_type, "model")(args)
|
302 |
+
cargs = copy.deepcopy(args)
|
303 |
+
cargs.mlp_input_dim = args.protein_hidden_dim
|
304 |
+
args.freeze_esm = args.freeze_encoder
|
305 |
+
self.mlp = get_object(args.protein_classifer, "model")(cargs)
|
306 |
+
if self.args.freeze_encoder:
|
307 |
+
self.encoder.eval()
|
308 |
+
|
309 |
+
heads = 8
|
310 |
+
encoder_layer = nn.TransformerEncoderLayer(
|
311 |
+
d_model=args.protein_hidden_dim, nhead=heads
|
312 |
+
)
|
313 |
+
self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=6)
|
314 |
+
|
315 |
+
def forward(self, batch):
|
316 |
+
output = {}
|
317 |
+
if self.args.freeze_encoder:
|
318 |
+
with torch.no_grad():
|
319 |
+
output_esm = self.encoder(batch["x"])
|
320 |
+
else:
|
321 |
+
output_esm = self.encoder(batch["x"])
|
322 |
+
|
323 |
+
v_attention = []
|
324 |
+
for v in output_esm["hidden"]:
|
325 |
+
v = self.transformer_encoder(v)
|
326 |
+
v_attention.append(v.mean(0))
|
327 |
+
|
328 |
+
output.update(self.mlp({"x": torch.stack(v_attention)}))
|
329 |
+
return output
|
330 |
+
|
331 |
+
|
332 |
+
@register_object("protein_encoder_esm_embeddings", "model")
|
333 |
+
class ProteinEncoderESMEmbeddings(ProteinEncoder):
|
334 |
+
def forward(self, batch):
|
335 |
+
output = {}
|
336 |
+
|
337 |
+
fair_x = self.encoder.truncate_protein(batch["x"])
|
338 |
+
_, _, batch_tokens = self.encoder.batch_converter(fair_x)
|
339 |
+
batch_tokens = batch_tokens.to(self.encoder.devicevar.device)
|
340 |
+
esm_embedded = self.encoder.model.embed_tokens(batch_tokens).mean(1)
|
341 |
+
|
342 |
+
# output["protein_hidden"] = output_esm["hidden"]
|
343 |
+
output.update(self.mlp({"x": esm_embedded}))
|
344 |
+
return output
|
345 |
+
|
346 |
+
|
347 |
+
@register_object("idr_encoder", "model")
|
348 |
+
class IDREncoder(ProteinEncoder):
|
349 |
+
def forward(self, batch):
|
350 |
+
output = {}
|
351 |
+
|
352 |
+
if self.args.freeze_encoder:
|
353 |
+
with torch.no_grad():
|
354 |
+
idr_embeddings = self._forward_function(batch)
|
355 |
+
else:
|
356 |
+
idr_embeddings = self._forward_function(batch)
|
357 |
+
|
358 |
+
output.update(self.mlp({"x": torch.stack(idr_embeddings)}))
|
359 |
+
return output
|
360 |
+
|
361 |
+
def _forward_function(self, batch) -> list:
|
362 |
+
output_esm = self.encoder(batch["x"])
|
363 |
+
# mask out non-idr residues and average
|
364 |
+
B, N, H = output_esm["residues"].shape
|
365 |
+
mask = torch.zeros(B, N)
|
366 |
+
for i in range(B):
|
367 |
+
mask[i, batch["start_idx"][i] : batch["end_idx"][i]] = 1
|
368 |
+
|
369 |
+
idr_residue_embeddings = output_esm["residues"] * mask.unsqueeze(-1).to(
|
370 |
+
output_esm["residues"].device
|
371 |
+
)
|
372 |
+
idr_embeddings = []
|
373 |
+
for idx, sample in enumerate(idr_residue_embeddings):
|
374 |
+
avg_sample = sample.sum(0) / mask[idx].sum()
|
375 |
+
idr_embeddings.append(avg_sample)
|
376 |
+
|
377 |
+
return idr_embeddings
|
378 |
+
|
379 |
+
@staticmethod
|
380 |
+
def set_args(args) -> None:
|
381 |
+
args.output_residue_hiddens = True
|
382 |
+
|
383 |
+
|
384 |
+
@register_object("all_idr_encoder", "model")
|
385 |
+
class AllIDREncoder(ProteinEncoder):
|
386 |
+
def forward(self, batch):
|
387 |
+
output = {}
|
388 |
+
|
389 |
+
if self.args.freeze_encoder:
|
390 |
+
with torch.no_grad():
|
391 |
+
idr_embeddings = self._forward_function(batch)
|
392 |
+
else:
|
393 |
+
idr_embeddings = self._forward_function(batch)
|
394 |
+
|
395 |
+
output.update(self.mlp({"x": torch.stack(idr_embeddings)}))
|
396 |
+
return output
|
397 |
+
|
398 |
+
def _forward_function(self, batch) -> list:
|
399 |
+
output_esm = self.encoder(batch["x"])
|
400 |
+
|
401 |
+
# mask out non-idr residues and average
|
402 |
+
B, N, H = output_esm["residues"].shape
|
403 |
+
mask = torch.zeros(B, N)
|
404 |
+
|
405 |
+
for i in range(B):
|
406 |
+
start_indices = [int(n) for n in batch["start_indices"][i].split("_")]
|
407 |
+
end_indices = [int(n) for n in batch["end_indices"][i].split("_")]
|
408 |
+
for idr_idx in range(len(start_indices)):
|
409 |
+
mask[i, start_indices[idr_idx] : end_indices[idr_idx]] = 1
|
410 |
+
|
411 |
+
idr_residue_embeddings = output_esm["residues"] * mask.unsqueeze(-1).to(
|
412 |
+
output_esm["residues"].device
|
413 |
+
)
|
414 |
+
idr_embeddings = []
|
415 |
+
for idx, sample in enumerate(idr_residue_embeddings):
|
416 |
+
avg_sample = sample.sum(0) / mask[idx].sum()
|
417 |
+
idr_embeddings.append(avg_sample)
|
418 |
+
|
419 |
+
return idr_embeddings
|
420 |
+
|
421 |
+
@staticmethod
|
422 |
+
def set_args(args) -> None:
|
423 |
+
args.output_residue_hiddens = True
|
424 |
+
|
425 |
+
|
426 |
+
@register_object("all_idr_esm_embeddings_encoder", "model")
|
427 |
+
class AllIDRESMEmbeddingsEncoder(ProteinEncoder):
|
428 |
+
def forward(self, batch):
|
429 |
+
output = {}
|
430 |
+
|
431 |
+
fair_x = self.encoder.truncate_protein(batch["x"])
|
432 |
+
_, _, batch_tokens = self.encoder.batch_converter(fair_x)
|
433 |
+
batch_tokens = batch_tokens.to(self.encoder.devicevar.device)
|
434 |
+
esm_embedded = self.encoder.model.embed_tokens(batch_tokens)
|
435 |
+
|
436 |
+
# mask out non-idr residues and average
|
437 |
+
B, N, H = esm_embedded.shape
|
438 |
+
mask = torch.zeros(B, N)
|
439 |
+
|
440 |
+
for i in range(B):
|
441 |
+
start_indices = [int(n) for n in batch["start_indices"][i].split("_")]
|
442 |
+
end_indices = [int(n) for n in batch["end_indices"][i].split("_")]
|
443 |
+
for idr_idx in range(len(start_indices)):
|
444 |
+
mask[i, start_indices[idr_idx] : end_indices[idr_idx]] = 1
|
445 |
+
|
446 |
+
idr_residue_embeddings = esm_embedded * mask.unsqueeze(-1).to(
|
447 |
+
esm_embedded.device
|
448 |
+
)
|
449 |
+
idr_embeddings = []
|
450 |
+
for idx, sample in enumerate(idr_residue_embeddings):
|
451 |
+
avg_sample = sample.sum(0) / mask[idx].sum()
|
452 |
+
idr_embeddings.append(avg_sample)
|
453 |
+
|
454 |
+
output.update(self.mlp({"x": torch.stack(idr_embeddings)}))
|
455 |
+
return output
|
456 |
+
|
457 |
+
|
458 |
+
@register_object("all_not_idr_esm_embeddings_encoder", "model")
|
459 |
+
class AllNotIDRESMEmbeddingsEncoder(ProteinEncoder):
|
460 |
+
def forward(self, batch):
|
461 |
+
output = {}
|
462 |
+
|
463 |
+
fair_x = self.encoder.truncate_protein(batch["x"])
|
464 |
+
_, _, batch_tokens = self.encoder.batch_converter(fair_x)
|
465 |
+
batch_tokens = batch_tokens.to(self.encoder.devicevar.device)
|
466 |
+
esm_embedded = self.encoder.model.embed_tokens(batch_tokens)
|
467 |
+
|
468 |
+
# mask out non-idr residues and average
|
469 |
+
B, N, H = esm_embedded.shape
|
470 |
+
mask = torch.ones(B, N)
|
471 |
+
|
472 |
+
for i in range(B):
|
473 |
+
start_indices = [int(n) for n in batch["start_indices"][i].split("_")]
|
474 |
+
end_indices = [int(n) for n in batch["end_indices"][i].split("_")]
|
475 |
+
for idr_idx in range(len(start_indices)):
|
476 |
+
mask[i, start_indices[idr_idx] : end_indices[idr_idx]] = 0
|
477 |
+
|
478 |
+
idr_residue_embeddings = esm_embedded * mask.unsqueeze(-1).to(
|
479 |
+
esm_embedded.device
|
480 |
+
)
|
481 |
+
idr_embeddings = []
|
482 |
+
for idx, sample in enumerate(idr_residue_embeddings):
|
483 |
+
avg_sample = sample.sum(0) / mask[idx].sum()
|
484 |
+
idr_embeddings.append(avg_sample)
|
485 |
+
|
486 |
+
output.update(self.mlp({"x": torch.stack(idr_embeddings)}))
|
487 |
+
return output
|
488 |
+
|
489 |
+
|
490 |
+
@register_object("all_not_idr_encoder", "model")
|
491 |
+
class AllNotIDREncoder(AllIDREncoder):
|
492 |
+
def _forward_function(self, batch) -> list:
|
493 |
+
output_esm = self.encoder(batch["x"])
|
494 |
+
|
495 |
+
# mask out non-idr residues and average
|
496 |
+
B, N, H = output_esm["residues"].shape
|
497 |
+
mask = torch.ones(B, N)
|
498 |
+
|
499 |
+
for i in range(B):
|
500 |
+
start_indices = [int(n) for n in batch["start_indices"][i].split("_")]
|
501 |
+
end_indices = [int(n) for n in batch["end_indices"][i].split("_")]
|
502 |
+
for idr_idx in range(len(start_indices)):
|
503 |
+
mask[i, start_indices[idr_idx] : end_indices[idr_idx]] = 0
|
504 |
+
|
505 |
+
idr_residue_embeddings = output_esm["residues"] * mask.unsqueeze(-1).to(
|
506 |
+
output_esm["residues"].device
|
507 |
+
)
|
508 |
+
idr_embeddings = []
|
509 |
+
for idx, sample in enumerate(idr_residue_embeddings):
|
510 |
+
avg_sample = sample.sum(0) / mask[idx].sum()
|
511 |
+
idr_embeddings.append(avg_sample)
|
512 |
+
|
513 |
+
return idr_embeddings
|
514 |
+
|
515 |
+
|
516 |
+
@register_object("context_idr_hiddens", "model")
|
517 |
+
class ContextIDREncoder(ProteinEncoder):
|
518 |
+
def forward(self, batch):
|
519 |
+
output = {}
|
520 |
+
|
521 |
+
if self.args.freeze_encoder:
|
522 |
+
with torch.no_grad():
|
523 |
+
idr_embeddings = self._forward_function(batch)
|
524 |
+
else:
|
525 |
+
idr_embeddings
|
526 |
+
|
527 |
+
output["hidden"] = torch.stack(idr_embeddings)
|
528 |
+
return output
|
529 |
+
|
530 |
+
def _forward_function(self, batch) -> list:
|
531 |
+
output_esm = self.encoder(batch["x"])
|
532 |
+
# mask out non-idr residues and average
|
533 |
+
B, N, H = output_esm["residues"].shape
|
534 |
+
mask = torch.zeros(B, N)
|
535 |
+
for i in range(B):
|
536 |
+
mask[i, batch["start_idx"][i] : batch["end_idx"][i]] = 1
|
537 |
+
|
538 |
+
idr_residue_embeddings = output_esm["residues"] * mask.unsqueeze(-1).to(
|
539 |
+
output_esm["residues"].device
|
540 |
+
)
|
541 |
+
idr_embeddings = []
|
542 |
+
for idx, sample in enumerate(idr_residue_embeddings):
|
543 |
+
avg_sample = sample.sum(0) / mask[idx].sum()
|
544 |
+
idr_embeddings.append(avg_sample)
|
545 |
+
|
546 |
+
return idr_embeddings
|
547 |
+
|
548 |
+
@staticmethod
|
549 |
+
def set_args(args) -> None:
|
550 |
+
args.output_residue_hiddens = True
|
551 |
+
|
552 |
+
|
553 |
+
@register_object("fair_esm_hiddens", "model")
|
554 |
+
class FairEsmHiddens(AbstractModel):
|
555 |
+
def __init__(self, args):
|
556 |
+
super(FairEsmHiddens, self).__init__()
|
557 |
+
self.args = args
|
558 |
+
self.encoder = get_object(args.fair_esm_type, "model")(args)
|
559 |
+
if self.args.freeze_esm:
|
560 |
+
self.encoder.eval()
|
561 |
+
|
562 |
+
def forward(self, batch):
|
563 |
+
output = {}
|
564 |
+
if self.args.freeze_esm:
|
565 |
+
with torch.no_grad():
|
566 |
+
output_esm = self.encoder(batch["x"])
|
567 |
+
else:
|
568 |
+
output_esm = self.encoder(batch["x"])
|
569 |
+
|
570 |
+
return output_esm
|
571 |
+
|
572 |
+
@staticmethod
|
573 |
+
def add_args(parser) -> None:
|
574 |
+
"""Add class specific args
|
575 |
+
|
576 |
+
Args:
|
577 |
+
parser (argparse.ArgumentParser): argument parser
|
578 |
+
"""
|
579 |
+
parser.add_argument(
|
580 |
+
"--fair_esm_type",
|
581 |
+
type=str,
|
582 |
+
default="fair_esm2",
|
583 |
+
help="name of the protein encoder",
|
584 |
+
action=set_protgps_type("model"),
|
585 |
+
)
|
data/protgps/utils/__init__.py
ADDED
File without changes
|
data/protgps/utils/callbacks.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pytorch_lightning.callbacks import Callback
|
2 |
+
from protgps.utils.registry import get_object
|
3 |
+
|
4 |
+
|
5 |
+
def set_callbacks(trainer, args):
|
6 |
+
"""
|
7 |
+
Set callbacks for trainer, taking into consideration callbacks already set by trainer args.
|
8 |
+
Callbacks that are preset by args and perform the same function as those manually selected
|
9 |
+
are removed by comparing parent classes between callbacks.
|
10 |
+
|
11 |
+
Parameters
|
12 |
+
----------
|
13 |
+
trainer : pl.Trainer
|
14 |
+
lightning trainer
|
15 |
+
args : Namespace
|
16 |
+
global args
|
17 |
+
|
18 |
+
Returns
|
19 |
+
-------
|
20 |
+
callbacks: list
|
21 |
+
complete list of callbacks to be used by trainer
|
22 |
+
"""
|
23 |
+
callbacks = []
|
24 |
+
for cback in args.callback_names:
|
25 |
+
callbacks.append(get_object(cback, "callback")(args))
|
26 |
+
|
27 |
+
# remove callbacks that are set manually
|
28 |
+
redundant_callbacks = []
|
29 |
+
for cback in trainer.callbacks:
|
30 |
+
parent_cls_preset = get_callback_parent_class(cback)
|
31 |
+
for new_cback in callbacks:
|
32 |
+
parent_cls_postset = get_callback_parent_class(new_cback)
|
33 |
+
if parent_cls_preset == parent_cls_postset:
|
34 |
+
redundant_callbacks.append(cback)
|
35 |
+
|
36 |
+
for cback in trainer.callbacks:
|
37 |
+
if cback not in redundant_callbacks:
|
38 |
+
callbacks.append(cback)
|
39 |
+
|
40 |
+
return callbacks
|
41 |
+
|
42 |
+
|
43 |
+
def get_callback_parent_class(obj):
|
44 |
+
"""
|
45 |
+
Parameters
|
46 |
+
----------
|
47 |
+
obj : Callback
|
48 |
+
instance of a callback class
|
49 |
+
|
50 |
+
Returns
|
51 |
+
-------
|
52 |
+
class
|
53 |
+
parent class of callback that is the first child of the Callback class
|
54 |
+
"""
|
55 |
+
parent_id = [cls == Callback for cls in obj.__class__.__mro__].index(True)
|
56 |
+
parent_cls = obj.__class__.__mro__[parent_id - 1]
|
57 |
+
return parent_cls
|
data/protgps/utils/classes.py
ADDED
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from abc import ABCMeta
|
2 |
+
import argparse
|
3 |
+
from protgps.utils.registry import get_object
|
4 |
+
|
5 |
+
INITED_OBJ = []
|
6 |
+
|
7 |
+
|
8 |
+
class classproperty(object):
|
9 |
+
"""
|
10 |
+
Method decorator behaves as @classmethod + @property
|
11 |
+
"""
|
12 |
+
|
13 |
+
def __init__(self, fget):
|
14 |
+
self.fget = fget
|
15 |
+
|
16 |
+
def __get__(self, owner_self, owner_cls):
|
17 |
+
return self.fget(owner_cls)
|
18 |
+
|
19 |
+
|
20 |
+
class ProtGPS(object):
|
21 |
+
__metaclass__ = ABCMeta
|
22 |
+
|
23 |
+
def __init__(self, **kwargs) -> None:
|
24 |
+
super(ProtGPS, self).__init__()
|
25 |
+
|
26 |
+
@staticmethod
|
27 |
+
def add_args(parser) -> None:
|
28 |
+
"""Add class specific args
|
29 |
+
|
30 |
+
Args:
|
31 |
+
parser (argparse.ArgumentParser): argument parser
|
32 |
+
"""
|
33 |
+
pass
|
34 |
+
|
35 |
+
@staticmethod
|
36 |
+
def set_args(args) -> None:
|
37 |
+
"""Set values for class specific args
|
38 |
+
|
39 |
+
Args:
|
40 |
+
args (argparse.Namespace): arguments
|
41 |
+
"""
|
42 |
+
pass
|
43 |
+
|
44 |
+
|
45 |
+
def set_protgps_type(object_name):
|
46 |
+
"""
|
47 |
+
Build argparse action class for registry items
|
48 |
+
Used to add and set object-level args
|
49 |
+
|
50 |
+
Args:
|
51 |
+
object_name (str): kind of protgps class uses (e.g., dataset, model, lightning)
|
52 |
+
|
53 |
+
Returns:
|
54 |
+
argparse.Action: action for specific protgps class
|
55 |
+
"""
|
56 |
+
|
57 |
+
class ProtGPSAction(argparse.Action):
|
58 |
+
def __init__(
|
59 |
+
self,
|
60 |
+
option_strings,
|
61 |
+
dest,
|
62 |
+
nargs=None,
|
63 |
+
const=None,
|
64 |
+
default=None,
|
65 |
+
type=None,
|
66 |
+
choices=None,
|
67 |
+
required=False,
|
68 |
+
help=None,
|
69 |
+
metavar=None,
|
70 |
+
):
|
71 |
+
super().__init__(
|
72 |
+
option_strings=option_strings,
|
73 |
+
dest=dest,
|
74 |
+
nargs=nargs,
|
75 |
+
const=const,
|
76 |
+
default=default,
|
77 |
+
type=type,
|
78 |
+
choices=choices,
|
79 |
+
required=required,
|
80 |
+
help=help,
|
81 |
+
metavar=metavar,
|
82 |
+
)
|
83 |
+
self.is_protgps_action = True
|
84 |
+
self.object_name = object_name
|
85 |
+
|
86 |
+
def __call__(self, parser, namespace, values, option_string=None) -> None:
|
87 |
+
setattr(namespace, self.dest, values)
|
88 |
+
|
89 |
+
def add_args(self, parser, values) -> None:
|
90 |
+
"""
|
91 |
+
Add object-level args when an add_argument is called
|
92 |
+
|
93 |
+
Args:
|
94 |
+
parser (argparse.parser): protgps parser object
|
95 |
+
values (Union[list, str]): argument values inputted
|
96 |
+
"""
|
97 |
+
if isinstance(values, list):
|
98 |
+
for v in values:
|
99 |
+
obj_val_str = f"{v}_{object_name}"
|
100 |
+
# if object has already been called, conflict arises with add parse called multiple times
|
101 |
+
if obj_val_str not in INITED_OBJ:
|
102 |
+
get_object(v, object_name).add_args(parser)
|
103 |
+
INITED_OBJ.append(obj_val_str)
|
104 |
+
|
105 |
+
elif isinstance(values, str):
|
106 |
+
obj_val_str = f"{values}_{object_name}"
|
107 |
+
# if object has already been called, conflict arises with add parse called multiple times
|
108 |
+
if obj_val_str not in INITED_OBJ:
|
109 |
+
get_object(values, object_name).add_args(parser)
|
110 |
+
INITED_OBJ.append(obj_val_str)
|
111 |
+
|
112 |
+
def set_args(self, args, val) -> None:
|
113 |
+
"""
|
114 |
+
Call object-level set_args method
|
115 |
+
|
116 |
+
Args:
|
117 |
+
args (argparse.namespace): global args
|
118 |
+
val (Union[list,str]): value for argument
|
119 |
+
"""
|
120 |
+
if isinstance(val, list):
|
121 |
+
for v in val:
|
122 |
+
get_object(v, object_name).set_args(args)
|
123 |
+
elif isinstance(val, str):
|
124 |
+
get_object(val, object_name).set_args(args)
|
125 |
+
|
126 |
+
return ProtGPSAction
|
data/protgps/utils/debug.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
def debug_vscode():
|
2 |
+
"""
|
3 |
+
Since this requires listening on a local port, this will only work when VSCode is connected to the same machine.
|
4 |
+
Before running, add the following to your launch.json:
|
5 |
+
{"version":"0.2.0","configurations":[{"name":"Python: Remote Attach","type":"python","request":"attach","connect":{"host":"localhost","port":5678},"pathMappings":[{"localRoot":"${workspaceFolder}",
|
6 |
+
"remoteRoot":"."}],"justMyCode":true}]}
|
7 |
+
"""
|
8 |
+
import debugpy
|
9 |
+
|
10 |
+
print("Waiting for VSCode debugger to attach...")
|
11 |
+
debugpy.listen(5678)
|
12 |
+
debugpy.wait_for_client()
|
13 |
+
print("VSCode debugger attached!")
|
data/protgps/utils/download.py
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Code taken from
|
2 |
+
# https://stackoverflow.com/questions/38511444/python-download-files-from-google-drive-using-url
|
3 |
+
|
4 |
+
|
5 |
+
import requests
|
6 |
+
|
7 |
+
|
8 |
+
def download_file_from_google_drive(id, destination):
|
9 |
+
URL = "https://docs.google.com/uc?export=download"
|
10 |
+
|
11 |
+
session = requests.Session()
|
12 |
+
|
13 |
+
response = session.get(URL, params={"id": id}, stream=True)
|
14 |
+
token = get_confirm_token(response)
|
15 |
+
|
16 |
+
if token:
|
17 |
+
params = {"id": id, "confirm": token}
|
18 |
+
response = session.get(URL, params=params, stream=True)
|
19 |
+
|
20 |
+
save_response_content(response, destination)
|
21 |
+
|
22 |
+
|
23 |
+
def get_confirm_token(response):
|
24 |
+
for key, value in response.cookies.items():
|
25 |
+
if key.startswith("download_warning"):
|
26 |
+
return value
|
27 |
+
|
28 |
+
return None
|
29 |
+
|
30 |
+
|
31 |
+
def save_response_content(response, destination):
|
32 |
+
CHUNK_SIZE = 32768
|
33 |
+
|
34 |
+
with open(destination, "wb") as f:
|
35 |
+
for chunk in response.iter_content(CHUNK_SIZE):
|
36 |
+
if chunk: # filter out keep-alive new chunks
|
37 |
+
f.write(chunk)
|
data/protgps/utils/loading.py
ADDED
@@ -0,0 +1,235 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from argparse import Namespace, FileType
|
2 |
+
import pickle
|
3 |
+
import collections.abc as container_abcs
|
4 |
+
import re
|
5 |
+
from tabnanny import check
|
6 |
+
from typing import Literal, Optional
|
7 |
+
from protgps.utils.registry import get_object
|
8 |
+
import torch
|
9 |
+
from torch.utils import data
|
10 |
+
from protgps.utils.sampler import DistributedWeightedSampler
|
11 |
+
from pytorch_lightning.utilities.cloud_io import load as pl_load
|
12 |
+
|
13 |
+
|
14 |
+
string_classes = (str, bytes)
|
15 |
+
int_classes = int
|
16 |
+
np_str_obj_array_pattern = re.compile(r"[SaUO]")
|
17 |
+
|
18 |
+
default_collate_err_msg_format = (
|
19 |
+
"default_collate: batch must contain tensors, numpy arrays, numbers, PyG Data or HeteroData, "
|
20 |
+
"dicts, or lists; found {}"
|
21 |
+
)
|
22 |
+
|
23 |
+
|
24 |
+
def default_collate(batch):
|
25 |
+
r"""Puts each data field into a tensor with outer dimension batch size"""
|
26 |
+
|
27 |
+
elem = batch[0]
|
28 |
+
elem_type = type(elem)
|
29 |
+
if isinstance(elem, torch.Tensor):
|
30 |
+
out = None
|
31 |
+
if torch.utils.data.get_worker_info() is not None:
|
32 |
+
# If we're in a background process, concatenate directly into a
|
33 |
+
# shared memory tensor to avoid an extra copy
|
34 |
+
numel = sum([x.numel() for x in batch])
|
35 |
+
storage = elem._typed_storage()._new_shared(numel, device=elem.device)
|
36 |
+
out = elem.new(storage).view(-1, *list(elem.size()))
|
37 |
+
return torch.stack(batch, 0, out=out)
|
38 |
+
elif (
|
39 |
+
elem_type.__module__ == "numpy"
|
40 |
+
and elem_type.__name__ != "str_"
|
41 |
+
and elem_type.__name__ != "string_"
|
42 |
+
):
|
43 |
+
if elem_type.__name__ == "ndarray" or elem_type.__name__ == "memmap":
|
44 |
+
# array of string classes and object
|
45 |
+
if np_str_obj_array_pattern.search(elem.dtype.str) is not None:
|
46 |
+
raise TypeError(default_collate_err_msg_format.format(elem.dtype))
|
47 |
+
|
48 |
+
return default_collate([torch.as_tensor(b) for b in batch])
|
49 |
+
elif elem.shape == (): # scalars
|
50 |
+
return torch.as_tensor(batch)
|
51 |
+
elif isinstance(elem, float):
|
52 |
+
return torch.tensor(batch, dtype=torch.float64)
|
53 |
+
elif isinstance(elem, int_classes):
|
54 |
+
return torch.tensor(batch)
|
55 |
+
elif isinstance(elem, string_classes):
|
56 |
+
return batch
|
57 |
+
elif isinstance(elem, container_abcs.Mapping):
|
58 |
+
return {key: default_collate([d[key] for d in batch]) for key in elem}
|
59 |
+
elif isinstance(elem, tuple) and hasattr(elem, "_fields"): # namedtuple
|
60 |
+
return elem_type(*(default_collate(samples) for samples in zip(*batch)))
|
61 |
+
elif isinstance(elem, container_abcs.Sequence):
|
62 |
+
# check to make sure that the elements in batch have consistent size
|
63 |
+
it = iter(batch)
|
64 |
+
elem_size = len(next(it))
|
65 |
+
if not all(len(elem) == elem_size for elem in it):
|
66 |
+
raise RuntimeError("each element in list of batch should be of equal size")
|
67 |
+
transposed = zip(*batch)
|
68 |
+
return [default_collate(samples) for samples in transposed]
|
69 |
+
|
70 |
+
raise TypeError(default_collate_err_msg_format.format(elem_type))
|
71 |
+
|
72 |
+
|
73 |
+
def ignore_None_collate(batch):
|
74 |
+
"""
|
75 |
+
default_collate wrapper that creates batches only of not None values.
|
76 |
+
Useful for cases when the dataset.__getitem__ can return None because of some
|
77 |
+
exception and then we will want to exclude that sample from the batch.
|
78 |
+
"""
|
79 |
+
batch = [x for x in batch if x is not None]
|
80 |
+
if len(batch) == 0:
|
81 |
+
return None
|
82 |
+
return default_collate(batch)
|
83 |
+
|
84 |
+
|
85 |
+
def get_train_dataset_loader(args: Namespace, split: Optional[str] = "train"):
|
86 |
+
"""Given arg configuration, return appropriate torch.DataLoader
|
87 |
+
for train data loader
|
88 |
+
|
89 |
+
Args:
|
90 |
+
args (Namespace): args
|
91 |
+
split (str, optional): dataset split. Defaults to "train".
|
92 |
+
|
93 |
+
Returns:
|
94 |
+
train_data_loader: iterator that returns batches
|
95 |
+
"""
|
96 |
+
train_data = get_object(args.dataset_name, "dataset")(args, split)
|
97 |
+
|
98 |
+
if args.class_bal:
|
99 |
+
if args.strategy == "ddp":
|
100 |
+
sampler = DistributedWeightedSampler(
|
101 |
+
train_data,
|
102 |
+
weights=train_data.weights,
|
103 |
+
replacement=True,
|
104 |
+
rank=args.global_rank,
|
105 |
+
num_replicas=args.world_size,
|
106 |
+
)
|
107 |
+
else:
|
108 |
+
sampler = data.sampler.WeightedRandomSampler(
|
109 |
+
weights=train_data.weights,
|
110 |
+
num_samples=len(train_data),
|
111 |
+
replacement=True,
|
112 |
+
)
|
113 |
+
else:
|
114 |
+
if args.strategy == "ddp":
|
115 |
+
sampler = torch.utils.data.distributed.DistributedSampler(
|
116 |
+
train_data,
|
117 |
+
shuffle=True,
|
118 |
+
rank=args.global_rank,
|
119 |
+
num_replicas=args.world_size,
|
120 |
+
)
|
121 |
+
else:
|
122 |
+
sampler = data.sampler.RandomSampler(train_data)
|
123 |
+
|
124 |
+
train_data_loader = data.DataLoader(
|
125 |
+
train_data,
|
126 |
+
num_workers=args.num_workers,
|
127 |
+
sampler=sampler,
|
128 |
+
pin_memory=True,
|
129 |
+
batch_size=args.batch_size,
|
130 |
+
collate_fn=ignore_None_collate,
|
131 |
+
drop_last=True,
|
132 |
+
)
|
133 |
+
|
134 |
+
return train_data_loader
|
135 |
+
|
136 |
+
|
137 |
+
def get_eval_dataset_loader(
|
138 |
+
args: Namespace, split: Literal["train", "dev", "test"], shuffle=False
|
139 |
+
):
|
140 |
+
"""_summary_
|
141 |
+
|
142 |
+
Args:
|
143 |
+
args (Namespace): args
|
144 |
+
split (Literal["train", "dev", "test"]): dataset split.
|
145 |
+
shuffle (bool, optional): whether to shuffle dataset. Defaults to False.
|
146 |
+
|
147 |
+
Returns:
|
148 |
+
data_loader: iterator that returns batches
|
149 |
+
"""
|
150 |
+
|
151 |
+
eval_data = get_object(args.dataset_name, "dataset")(args, split)
|
152 |
+
|
153 |
+
if args.strategy == "ddp":
|
154 |
+
sampler = torch.utils.data.distributed.DistributedSampler(
|
155 |
+
eval_data,
|
156 |
+
shuffle=shuffle,
|
157 |
+
rank=args.global_rank,
|
158 |
+
num_replicas=args.world_size,
|
159 |
+
)
|
160 |
+
else:
|
161 |
+
sampler = (
|
162 |
+
torch.utils.data.sampler.RandomSampler(eval_data)
|
163 |
+
if shuffle
|
164 |
+
else torch.utils.data.sampler.SequentialSampler(eval_data)
|
165 |
+
)
|
166 |
+
data_loader = torch.utils.data.DataLoader(
|
167 |
+
eval_data,
|
168 |
+
batch_size=args.batch_size,
|
169 |
+
num_workers=args.num_workers,
|
170 |
+
collate_fn=ignore_None_collate,
|
171 |
+
pin_memory=True,
|
172 |
+
drop_last=False,
|
173 |
+
sampler=sampler,
|
174 |
+
)
|
175 |
+
|
176 |
+
return data_loader
|
177 |
+
|
178 |
+
|
179 |
+
@torch.no_grad()
|
180 |
+
def concat_all_gather(tensor):
|
181 |
+
"""
|
182 |
+
Performs all_gather operation on the provided tensors.
|
183 |
+
*** Warning ***: torch.distributed.all_gather has no gradient.
|
184 |
+
"""
|
185 |
+
|
186 |
+
tensors_gather = [
|
187 |
+
torch.ones_like(tensor) for _ in range(torch.distributed.get_world_size())
|
188 |
+
]
|
189 |
+
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
|
190 |
+
output = torch.cat(tensors_gather, dim=0)
|
191 |
+
return output
|
192 |
+
|
193 |
+
|
194 |
+
def get_lightning_model(args: Namespace):
|
195 |
+
"""Create new model or load from checkpoint
|
196 |
+
|
197 |
+
Args:
|
198 |
+
args (Namespace): global args
|
199 |
+
|
200 |
+
Raises:
|
201 |
+
FileType: checkpoint_path must be ".args" or ".ckpt" file
|
202 |
+
|
203 |
+
Returns:
|
204 |
+
model: pl.LightningModule instance
|
205 |
+
"""
|
206 |
+
if args.from_checkpoint:
|
207 |
+
if args.checkpoint_path.endswith(".args"):
|
208 |
+
snargs = Namespace(**pickle.load(open(args.checkpoint_path, "rb")))
|
209 |
+
# update saved args with new arguments
|
210 |
+
for k, v in vars(args).items():
|
211 |
+
if k not in snargs:
|
212 |
+
setattr(snargs, k, v)
|
213 |
+
model = get_object(snargs.lightning_name, "lightning")(snargs)
|
214 |
+
modelpath = snargs.model_path
|
215 |
+
elif args.checkpoint_path.endswith(".ckpt"):
|
216 |
+
model = get_object(args.lightning_name, "lightning")(args)
|
217 |
+
modelpath = args.checkpoint_path
|
218 |
+
checkpoint = pl_load(
|
219 |
+
args.checkpoint_path, map_location=lambda storage, loc: storage
|
220 |
+
)
|
221 |
+
snargs = checkpoint["hyper_parameters"]["args"]
|
222 |
+
else:
|
223 |
+
raise FileType("checkpoint_path should be an args or ckpt file.")
|
224 |
+
# update args with old args if not found
|
225 |
+
for k, v in vars(snargs).items():
|
226 |
+
if k not in args:
|
227 |
+
setattr(args, k, v)
|
228 |
+
model = model.load_from_checkpoint(
|
229 |
+
checkpoint_path=modelpath,
|
230 |
+
strict=not args.relax_checkpoint_matching,
|
231 |
+
**{"args": args},
|
232 |
+
)
|
233 |
+
else:
|
234 |
+
model = get_object(args.lightning_name, "lightning")(args)
|
235 |
+
return model
|
data/protgps/utils/messages.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
# Error Messages
|
2 |
+
METAFILE_NOTFOUND_ERR = "Metadata file {} could not be parsed! Exception: {}!"
|
3 |
+
LOAD_FAIL_MSG = "Failed to load image: {}\nException: {}"
|
data/protgps/utils/parsing.py
ADDED
@@ -0,0 +1,597 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import os
|
3 |
+
import pwd
|
4 |
+
from pytorch_lightning import Trainer
|
5 |
+
import itertools
|
6 |
+
from protgps.utils.registry import md5
|
7 |
+
import json
|
8 |
+
import copy
|
9 |
+
from protgps.utils.classes import set_protgps_type
|
10 |
+
|
11 |
+
EMPTY_NAME_ERR = 'Name of augmentation or one of its arguments cant be empty\n\
|
12 |
+
Use "name/arg1=value/arg2=value" format'
|
13 |
+
POSS_VAL_NOT_LIST = (
|
14 |
+
"Flag {} has an invalid list of values: {}. Length of list must be >=1"
|
15 |
+
)
|
16 |
+
|
17 |
+
|
18 |
+
class GlobalNamespace(argparse.Namespace):
|
19 |
+
pass
|
20 |
+
|
21 |
+
|
22 |
+
def parse_dispatcher_config(config):
|
23 |
+
"""
|
24 |
+
Parses an experiment config, and creates jobs. For flags that are expected to be a single item,
|
25 |
+
but the config contains a list, this will return one job for each item in the list.
|
26 |
+
:config - experiment_config
|
27 |
+
|
28 |
+
returns: jobs - a list of flag strings, each of which encapsulates one job.
|
29 |
+
*Example: --train --cuda --dropout=0.1 ...
|
30 |
+
returns: experiment_axies - axies that the grid search is searching over
|
31 |
+
"""
|
32 |
+
|
33 |
+
assert all(
|
34 |
+
[
|
35 |
+
k
|
36 |
+
in [
|
37 |
+
"script",
|
38 |
+
"available_gpus",
|
39 |
+
"cartesian_hyperparams",
|
40 |
+
"paired_hyperparams",
|
41 |
+
"tune_hyperparams",
|
42 |
+
]
|
43 |
+
for k in config.keys()
|
44 |
+
]
|
45 |
+
)
|
46 |
+
|
47 |
+
cartesian_hyperparamss = config["cartesian_hyperparams"]
|
48 |
+
paired_hyperparams = config.get("paired_hyperparams", [])
|
49 |
+
flags = []
|
50 |
+
arguments = []
|
51 |
+
experiment_axies = []
|
52 |
+
|
53 |
+
# add anything outside search space as fixed
|
54 |
+
fixed_args = ""
|
55 |
+
for arg in config:
|
56 |
+
if arg not in [
|
57 |
+
"script",
|
58 |
+
"cartesian_hyperparams",
|
59 |
+
"paired_hyperparams",
|
60 |
+
"available_gpus",
|
61 |
+
]:
|
62 |
+
if type(config[arg]) is bool:
|
63 |
+
if config[arg]:
|
64 |
+
fixed_args += "--{} ".format(str(arg))
|
65 |
+
else:
|
66 |
+
continue
|
67 |
+
else:
|
68 |
+
fixed_args += "--{} {} ".format(arg, config[arg])
|
69 |
+
|
70 |
+
# add paired combo of search space
|
71 |
+
paired_args_list = [""]
|
72 |
+
if len(paired_hyperparams) > 0:
|
73 |
+
paired_args_list = []
|
74 |
+
paired_keys = list(paired_hyperparams.keys())
|
75 |
+
paired_vals = list(paired_hyperparams.values())
|
76 |
+
flags.extend(paired_keys)
|
77 |
+
for paired_combo in zip(*paired_vals):
|
78 |
+
paired_args = ""
|
79 |
+
for i, flg_value in enumerate(paired_combo):
|
80 |
+
if type(flg_value) is bool:
|
81 |
+
if flg_value:
|
82 |
+
paired_args += "--{} ".format(str(paired_keys[i]))
|
83 |
+
else:
|
84 |
+
continue
|
85 |
+
else:
|
86 |
+
paired_args += "--{} {} ".format(
|
87 |
+
str(paired_keys[i]), str(flg_value)
|
88 |
+
)
|
89 |
+
paired_args_list.append(paired_args)
|
90 |
+
|
91 |
+
# add every combo of search space
|
92 |
+
product_flags = []
|
93 |
+
for key, value in cartesian_hyperparamss.items():
|
94 |
+
flags.append(key)
|
95 |
+
product_flags.append(key)
|
96 |
+
arguments.append(value)
|
97 |
+
if len(value) > 1:
|
98 |
+
experiment_axies.append(key)
|
99 |
+
|
100 |
+
experiments = []
|
101 |
+
exps_combs = list(itertools.product(*arguments))
|
102 |
+
|
103 |
+
for tpl in exps_combs:
|
104 |
+
exp = ""
|
105 |
+
for idx, flg in enumerate(product_flags):
|
106 |
+
if type(tpl[idx]) is bool:
|
107 |
+
if tpl[idx]:
|
108 |
+
exp += "--{} ".format(str(flg))
|
109 |
+
else:
|
110 |
+
continue
|
111 |
+
else:
|
112 |
+
exp += "--{} {} ".format(str(flg), str(tpl[idx]))
|
113 |
+
exp += fixed_args
|
114 |
+
for paired_args in paired_args_list:
|
115 |
+
experiments.append(exp + paired_args)
|
116 |
+
|
117 |
+
return experiments, flags, experiment_axies
|
118 |
+
|
119 |
+
|
120 |
+
def prepare_training_config_for_eval(train_config):
|
121 |
+
"""Convert training config to an eval config for testing.
|
122 |
+
|
123 |
+
Parameters
|
124 |
+
----------
|
125 |
+
train_config: dict
|
126 |
+
config with the following structure:
|
127 |
+
{
|
128 |
+
"train_config": , # path to train config
|
129 |
+
"log_dir": , # log directory used by dispatcher during training
|
130 |
+
"eval_args": {} # test set-specific arguments beyond default
|
131 |
+
}
|
132 |
+
|
133 |
+
Returns
|
134 |
+
-------
|
135 |
+
experiments: list
|
136 |
+
flags: list
|
137 |
+
experiment_axies: list
|
138 |
+
"""
|
139 |
+
|
140 |
+
train_args = json.load(open(train_config["train_config"], "r"))
|
141 |
+
|
142 |
+
experiments, _, _ = parse_dispatcher_config(train_args)
|
143 |
+
stem_names = [md5(e) for e in experiments]
|
144 |
+
eval_args = copy.deepcopy(train_args)
|
145 |
+
eval_args["cartesian_hyperparams"].update(train_config["eval_args"])
|
146 |
+
|
147 |
+
# reset defaults
|
148 |
+
eval_args["cartesian_hyperparams"]["train"] = [False]
|
149 |
+
eval_args["cartesian_hyperparams"]["test"] = [True]
|
150 |
+
eval_args["cartesian_hyperparams"]["from_checkpoint"] = [True]
|
151 |
+
eval_args["cartesian_hyperparams"]["gpus"] = [1]
|
152 |
+
# eval_args["cartesian_hyperparams"]["comet_tags"][0] += " eval"
|
153 |
+
eval_args["available_gpus"] = train_config["available_gpus"]
|
154 |
+
eval_args["script"] = train_config["script"]
|
155 |
+
|
156 |
+
experiments, flags, experiment_axies = parse_dispatcher_config(eval_args)
|
157 |
+
|
158 |
+
if ("checkpoint_path" not in eval_args["cartesian_hyperparams"]) or (
|
159 |
+
"checkpoint_path" in train_args["cartesian_hyperparams"]
|
160 |
+
):
|
161 |
+
for (idx, e), s in zip(enumerate(experiments), stem_names):
|
162 |
+
experiments[idx] += " --checkpoint_path {}".format(
|
163 |
+
os.path.join(train_config["log_dir"], "{}.args".format(s))
|
164 |
+
)
|
165 |
+
|
166 |
+
return experiments, flags, experiment_axies
|
167 |
+
|
168 |
+
|
169 |
+
def get_parser():
|
170 |
+
global_namespace = GlobalNamespace(allow_abbrev=False)
|
171 |
+
|
172 |
+
parser = argparse.ArgumentParser(
|
173 |
+
description="ProtGPS Standard Args.", allow_abbrev=False
|
174 |
+
)
|
175 |
+
|
176 |
+
# -------------------------------------
|
177 |
+
# Run Setup
|
178 |
+
# -------------------------------------
|
179 |
+
parser.add_argument(
|
180 |
+
"--train",
|
181 |
+
action="store_true",
|
182 |
+
default=False,
|
183 |
+
help="Whether or not to train model",
|
184 |
+
)
|
185 |
+
parser.add_argument(
|
186 |
+
"--dev",
|
187 |
+
action="store_true",
|
188 |
+
default=False,
|
189 |
+
help="Whether or not to run model on dev set",
|
190 |
+
)
|
191 |
+
parser.add_argument(
|
192 |
+
"--test",
|
193 |
+
action="store_true",
|
194 |
+
default=False,
|
195 |
+
help="Whether or not to run model on test set",
|
196 |
+
)
|
197 |
+
parser.add_argument(
|
198 |
+
"--predict",
|
199 |
+
action="store_true",
|
200 |
+
default=False,
|
201 |
+
help="Whether to run model for pure prediction where labels are not known",
|
202 |
+
)
|
203 |
+
parser.add_argument(
|
204 |
+
"--eval_on_train",
|
205 |
+
action="store_true",
|
206 |
+
default=False,
|
207 |
+
help="Whether or not to evaluate model on train split",
|
208 |
+
)
|
209 |
+
|
210 |
+
# -------------------------------------
|
211 |
+
# Data
|
212 |
+
# -------------------------------------
|
213 |
+
parser.add_argument(
|
214 |
+
"--dataset_name",
|
215 |
+
type=str,
|
216 |
+
action=set_protgps_type("dataset"),
|
217 |
+
default="mnist",
|
218 |
+
help="Name of dataset",
|
219 |
+
)
|
220 |
+
parser.add_argument(
|
221 |
+
"--img_size",
|
222 |
+
type=int,
|
223 |
+
nargs="+",
|
224 |
+
default=[256, 256],
|
225 |
+
help="Width and height of image in pixels. [default: [256,256]]",
|
226 |
+
)
|
227 |
+
parser.add_argument(
|
228 |
+
"--num_chan", type=int, default=3, help="Number of channels for input image"
|
229 |
+
)
|
230 |
+
parser.add_argument(
|
231 |
+
"--img_mean",
|
232 |
+
type=float,
|
233 |
+
nargs="+",
|
234 |
+
default=[128.1722],
|
235 |
+
help="Mean of image per channel",
|
236 |
+
)
|
237 |
+
parser.add_argument(
|
238 |
+
"--img_std",
|
239 |
+
type=float,
|
240 |
+
nargs="+",
|
241 |
+
default=[87.1849],
|
242 |
+
help="Standard deviation of image per channel",
|
243 |
+
)
|
244 |
+
parser.add_argument(
|
245 |
+
"--img_file_type",
|
246 |
+
type=str,
|
247 |
+
default="png",
|
248 |
+
choices=["png", "dicom"],
|
249 |
+
help="Type of image. one of [png, dicom]",
|
250 |
+
)
|
251 |
+
|
252 |
+
# -------------------------------------
|
253 |
+
# Losses
|
254 |
+
# -------------------------------------
|
255 |
+
|
256 |
+
# losses and metrics
|
257 |
+
parser.add_argument(
|
258 |
+
"--loss_names",
|
259 |
+
type=str,
|
260 |
+
action=set_protgps_type("loss"),
|
261 |
+
nargs="*",
|
262 |
+
default=[],
|
263 |
+
help="Name of loss",
|
264 |
+
)
|
265 |
+
parser.add_argument(
|
266 |
+
"--loss_names_for_eval",
|
267 |
+
type=str,
|
268 |
+
action=set_protgps_type("loss"),
|
269 |
+
nargs="*",
|
270 |
+
default=None,
|
271 |
+
help="Name of loss",
|
272 |
+
)
|
273 |
+
|
274 |
+
# -------------------------------------
|
275 |
+
# Metrics
|
276 |
+
# -------------------------------------
|
277 |
+
|
278 |
+
parser.add_argument(
|
279 |
+
"--metric_names",
|
280 |
+
type=str,
|
281 |
+
action=set_protgps_type("metric"),
|
282 |
+
nargs="*",
|
283 |
+
default=[],
|
284 |
+
help="Name of performance metric",
|
285 |
+
)
|
286 |
+
|
287 |
+
# -------------------------------------
|
288 |
+
# Training Module
|
289 |
+
# -------------------------------------
|
290 |
+
|
291 |
+
parser.add_argument(
|
292 |
+
"--lightning_name",
|
293 |
+
type=str,
|
294 |
+
action=set_protgps_type("lightning"),
|
295 |
+
default="base",
|
296 |
+
help="Name of lightning module",
|
297 |
+
)
|
298 |
+
|
299 |
+
# -------------------------------------
|
300 |
+
# Hyper parameters
|
301 |
+
# -------------------------------------
|
302 |
+
# learning
|
303 |
+
parser.add_argument(
|
304 |
+
"--batch_size",
|
305 |
+
type=int,
|
306 |
+
default=32,
|
307 |
+
help="Batch size for training [default: 128]",
|
308 |
+
)
|
309 |
+
parser.add_argument(
|
310 |
+
"--lr",
|
311 |
+
type=float,
|
312 |
+
default=0.001,
|
313 |
+
help="Initial learning rate [default: 0.001]",
|
314 |
+
)
|
315 |
+
parser.add_argument(
|
316 |
+
"--dropout",
|
317 |
+
type=float,
|
318 |
+
default=0.25,
|
319 |
+
help="Amount of dropout to apply on last hidden layer [default: 0.25]",
|
320 |
+
)
|
321 |
+
parser.add_argument(
|
322 |
+
"--optimizer_name",
|
323 |
+
type=str,
|
324 |
+
action=set_protgps_type("optimizer"),
|
325 |
+
default="adam",
|
326 |
+
help="Optimizer to use [default: adam]",
|
327 |
+
)
|
328 |
+
parser.add_argument(
|
329 |
+
"--momentum", type=float, default=0, help="Momentum to use with SGD"
|
330 |
+
)
|
331 |
+
parser.add_argument(
|
332 |
+
"--lr_decay",
|
333 |
+
type=float,
|
334 |
+
default=0.1,
|
335 |
+
help="Initial learning rate [default: 0.5]",
|
336 |
+
)
|
337 |
+
parser.add_argument(
|
338 |
+
"--weight_decay",
|
339 |
+
type=float,
|
340 |
+
default=0,
|
341 |
+
help="L2 Regularization penaty [default: 0]",
|
342 |
+
)
|
343 |
+
|
344 |
+
# tune
|
345 |
+
parser.add_argument(
|
346 |
+
"--tune_hyperopt",
|
347 |
+
action="store_true",
|
348 |
+
default=False,
|
349 |
+
help="Whether to run hyper-parameter optimization",
|
350 |
+
)
|
351 |
+
parser.add_argument(
|
352 |
+
"--tune_search_alg",
|
353 |
+
type=str,
|
354 |
+
default="search",
|
355 |
+
help="Optimization algorithm",
|
356 |
+
)
|
357 |
+
parser.add_argument(
|
358 |
+
"--tune_hyperparam_names",
|
359 |
+
type=str,
|
360 |
+
nargs="*",
|
361 |
+
default=[],
|
362 |
+
help="Name of parameters being optimized",
|
363 |
+
)
|
364 |
+
|
365 |
+
# -------------------------------------
|
366 |
+
# Schedule
|
367 |
+
# -------------------------------------
|
368 |
+
parser.add_argument(
|
369 |
+
"--scheduler_name",
|
370 |
+
type=str,
|
371 |
+
action=set_protgps_type("scheduler"),
|
372 |
+
default="reduce_on_plateau",
|
373 |
+
help="Name of scheduler",
|
374 |
+
)
|
375 |
+
parser.add_argument(
|
376 |
+
"--cosine_annealing_period",
|
377 |
+
type=int,
|
378 |
+
default=10,
|
379 |
+
help="length of period of lr cosine anneal",
|
380 |
+
)
|
381 |
+
parser.add_argument(
|
382 |
+
"--cosine_annealing_period_scaling",
|
383 |
+
type=int,
|
384 |
+
default=2,
|
385 |
+
help="how much to multiply each period in successive annealing",
|
386 |
+
)
|
387 |
+
parser.add_argument(
|
388 |
+
"--patience",
|
389 |
+
type=int,
|
390 |
+
default=5,
|
391 |
+
help="Number of epochs without improvement on dev before halving learning rate and reloading best model [default: 5]",
|
392 |
+
)
|
393 |
+
parser.add_argument(
|
394 |
+
"--num_adv_steps",
|
395 |
+
type=int,
|
396 |
+
default=1,
|
397 |
+
help="Number of steps for domain adaptation discriminator per one step of encoding model [default: 5]",
|
398 |
+
)
|
399 |
+
|
400 |
+
# -------------------------------------
|
401 |
+
# Callbacks
|
402 |
+
# -------------------------------------
|
403 |
+
|
404 |
+
parser.add_argument(
|
405 |
+
"--callback_names",
|
406 |
+
type=str,
|
407 |
+
action=set_protgps_type("callback"),
|
408 |
+
nargs="*",
|
409 |
+
default=["checkpointer", "lr_monitor"],
|
410 |
+
help="Lightning callbacks",
|
411 |
+
)
|
412 |
+
|
413 |
+
parser.add_argument(
|
414 |
+
"--monitor",
|
415 |
+
type=str,
|
416 |
+
default=None,
|
417 |
+
help="Name of metric to use to decide when to save model",
|
418 |
+
)
|
419 |
+
|
420 |
+
parser.add_argument(
|
421 |
+
"--checkpoint_save_top_k",
|
422 |
+
type=int,
|
423 |
+
default=1,
|
424 |
+
help="the best k models according to the quantity monitored will be saved",
|
425 |
+
)
|
426 |
+
parser.add_argument(
|
427 |
+
"--checkpoint_save_last",
|
428 |
+
action="store_true",
|
429 |
+
default=False,
|
430 |
+
help="save the last model to last.ckpt",
|
431 |
+
)
|
432 |
+
|
433 |
+
# -------------------------------------
|
434 |
+
# Model checkpointing
|
435 |
+
# -------------------------------------
|
436 |
+
|
437 |
+
parser.add_argument(
|
438 |
+
"--checkpoint_dir", type=str, default="snapshot", help="Where to dump the model"
|
439 |
+
)
|
440 |
+
parser.add_argument(
|
441 |
+
"--from_checkpoint",
|
442 |
+
action="store_true",
|
443 |
+
default=False,
|
444 |
+
help="Whether loading a model from a saved checkpoint",
|
445 |
+
)
|
446 |
+
parser.add_argument(
|
447 |
+
"--relax_checkpoint_matching",
|
448 |
+
action="store_true",
|
449 |
+
default=False,
|
450 |
+
help="Do not enforce that the keys in checkpoint_path match the keys returned by this module’s state dict",
|
451 |
+
)
|
452 |
+
parser.add_argument(
|
453 |
+
"--checkpoint_path",
|
454 |
+
type=str,
|
455 |
+
default=None,
|
456 |
+
help="Filename of model snapshot to load[default: None]",
|
457 |
+
)
|
458 |
+
|
459 |
+
# -------------------------------------
|
460 |
+
# Storing model outputs
|
461 |
+
# -------------------------------------
|
462 |
+
parser.add_argument(
|
463 |
+
"--save_hiddens",
|
464 |
+
action="store_true",
|
465 |
+
default=False,
|
466 |
+
help="Save hidden repr from each image to an npz based off results path, git hash and exam name",
|
467 |
+
)
|
468 |
+
parser.add_argument(
|
469 |
+
"--save_predictions",
|
470 |
+
action="store_true",
|
471 |
+
default=False,
|
472 |
+
help="Save hidden repr from each image to an npz based off results path, git hash and exam name",
|
473 |
+
)
|
474 |
+
parser.add_argument(
|
475 |
+
"--inference_dir",
|
476 |
+
type=str,
|
477 |
+
default="hiddens/test_run",
|
478 |
+
help='Dir to store hiddens npy"s when store_hiddens is true',
|
479 |
+
)
|
480 |
+
|
481 |
+
# -------------------------------------
|
482 |
+
# Run outputs
|
483 |
+
# -------------------------------------
|
484 |
+
parser.add_argument(
|
485 |
+
"--results_path",
|
486 |
+
type=str,
|
487 |
+
default="logs/test.args",
|
488 |
+
help="Where to save the result logs",
|
489 |
+
)
|
490 |
+
parser.add_argument(
|
491 |
+
"--experiment_name",
|
492 |
+
type=str,
|
493 |
+
help="defined either automatically by dispatcher.py or time in main.py. Keep without default",
|
494 |
+
)
|
495 |
+
|
496 |
+
# -------------------------------------
|
497 |
+
# System
|
498 |
+
# -------------------------------------
|
499 |
+
parser.add_argument(
|
500 |
+
"--num_workers",
|
501 |
+
type=int,
|
502 |
+
default=8,
|
503 |
+
help="Num workers for each data loader [default: 4]",
|
504 |
+
)
|
505 |
+
|
506 |
+
# cache
|
507 |
+
parser.add_argument(
|
508 |
+
"--cache_path", type=str, default=None, help="Dir to cache images."
|
509 |
+
)
|
510 |
+
|
511 |
+
# -------------------------------------
|
512 |
+
# Logging
|
513 |
+
# -------------------------------------
|
514 |
+
|
515 |
+
parser.add_argument(
|
516 |
+
"--logger_name",
|
517 |
+
type=str,
|
518 |
+
action=set_protgps_type("logger"),
|
519 |
+
choices=["tensorboard", "comet", "wandb"],
|
520 |
+
default="tensorboard",
|
521 |
+
help="experiment logger to use",
|
522 |
+
)
|
523 |
+
parser.add_argument(
|
524 |
+
"--logger_tags", nargs="*", default=[], help="List of tags for logger"
|
525 |
+
)
|
526 |
+
parser.add_argument("--project_name", default="CancerCures", help="Comet project")
|
527 |
+
parser.add_argument("--workspace", default="pgmikhael", help="Comet workspace")
|
528 |
+
parser.add_argument(
|
529 |
+
"--log_gen_image",
|
530 |
+
action="store_true",
|
531 |
+
default=False,
|
532 |
+
help="Whether to log sample generated image to comet",
|
533 |
+
)
|
534 |
+
parser.add_argument(
|
535 |
+
"--log_profiler",
|
536 |
+
action="store_true",
|
537 |
+
default=False,
|
538 |
+
help="Log profiler times to logger",
|
539 |
+
)
|
540 |
+
|
541 |
+
# -------------------------------------
|
542 |
+
# Add object-level args
|
543 |
+
# -------------------------------------
|
544 |
+
|
545 |
+
def add_class_args(args_as_dict, parser):
|
546 |
+
# for loop
|
547 |
+
for argname, argval in args_as_dict.items():
|
548 |
+
args_for_protgpss = {
|
549 |
+
a.dest: a for a in parser._actions if hasattr(a, "is_protgps_action")
|
550 |
+
}
|
551 |
+
old_args = vars(parser.parse_known_args()[0])
|
552 |
+
if argname in args_for_protgpss:
|
553 |
+
args_for_protgpss[argname].add_args(parser, argval)
|
554 |
+
newargs = vars(parser.parse_known_args()[0])
|
555 |
+
newargs = {k: v for k, v in newargs.items() if k not in old_args}
|
556 |
+
add_class_args(newargs, parser)
|
557 |
+
|
558 |
+
parser.parse_known_args(namespace=global_namespace)
|
559 |
+
add_class_args(vars(global_namespace), parser)
|
560 |
+
|
561 |
+
return parser
|
562 |
+
|
563 |
+
|
564 |
+
def parse_args(args_strings=None):
|
565 |
+
# run
|
566 |
+
parser = Trainer.add_argparse_args(get_parser())
|
567 |
+
if args_strings is None:
|
568 |
+
args = parser.parse_args()
|
569 |
+
else:
|
570 |
+
args = parser.parse_args(args_strings)
|
571 |
+
|
572 |
+
# using gpus
|
573 |
+
if (isinstance(args.gpus, str) and len(args.gpus.split(",")) > 1) or (
|
574 |
+
isinstance(args.gpus, int) and args.gpus > 1
|
575 |
+
):
|
576 |
+
args.strategy = "ddp"
|
577 |
+
args.replace_sampler_ddp = False
|
578 |
+
else:
|
579 |
+
args.strategy = None
|
580 |
+
args.replace_sampler_ddp = False
|
581 |
+
|
582 |
+
# username
|
583 |
+
args.unix_username = pwd.getpwuid(os.getuid())[0]
|
584 |
+
|
585 |
+
# learning initial state
|
586 |
+
args.step_indx = 1
|
587 |
+
|
588 |
+
# set args
|
589 |
+
args_for_protgpss = {a.dest: a for a in parser._actions if hasattr(a, "is_protgps_action")}
|
590 |
+
for argname, argval in vars(args).items():
|
591 |
+
if argname in args_for_protgpss:
|
592 |
+
args_for_protgpss[argname].set_args(args, argval)
|
593 |
+
|
594 |
+
# parse tune parameters
|
595 |
+
# args = parse_tune_params(args)
|
596 |
+
|
597 |
+
return args
|
data/protgps/utils/registry.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import hashlib
|
2 |
+
|
3 |
+
REGISTRIES = {
|
4 |
+
"LIGHTNING_REGISTRY": {},
|
5 |
+
"DATASET_REGISTRY": {},
|
6 |
+
"MODEL_REGISTRY": {},
|
7 |
+
"LOSS_REGISTRY": {},
|
8 |
+
"METRIC_REGISTRY": {},
|
9 |
+
"OPTIMIZER_REGISTRY": {},
|
10 |
+
"SCHEDULER_REGISTRY": {},
|
11 |
+
"SEARCHER_REGISTRY": {},
|
12 |
+
"CALLBACK_REGISTRY": {},
|
13 |
+
"INPUT_LOADER_REGISTRY": {},
|
14 |
+
"AUGMENTATION_REGISTRY": {},
|
15 |
+
"LOGGER_REGISTRY": {},
|
16 |
+
}
|
17 |
+
|
18 |
+
|
19 |
+
def get_object(object_name, object_type):
|
20 |
+
if object_name not in REGISTRIES["{}_REGISTRY".format(object_type.upper())]:
|
21 |
+
raise Exception(
|
22 |
+
"INVALID {} NAME: {}. AVAILABLE {}".format(
|
23 |
+
object_type.upper(),
|
24 |
+
object_name,
|
25 |
+
REGISTRIES["{}_REGISTRY".format(object_type.upper())].keys(),
|
26 |
+
)
|
27 |
+
)
|
28 |
+
return REGISTRIES["{}_REGISTRY".format(object_type.upper())][object_name]
|
29 |
+
|
30 |
+
|
31 |
+
def register_object(object_name, object_type):
|
32 |
+
def decorator(obj):
|
33 |
+
REGISTRIES["{}_REGISTRY".format(object_type.upper())][object_name] = obj
|
34 |
+
obj.name = object_name
|
35 |
+
return obj
|
36 |
+
|
37 |
+
return decorator
|
38 |
+
|
39 |
+
|
40 |
+
def md5(key):
|
41 |
+
"""
|
42 |
+
returns a hashed with md5 string of the key
|
43 |
+
"""
|
44 |
+
return hashlib.md5(key.encode()).hexdigest()
|
data/protgps/utils/sampler.py
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
from typing import TypeVar, Optional, Iterator, Sequence
|
3 |
+
|
4 |
+
import torch
|
5 |
+
from torch.utils.data import Dataset
|
6 |
+
import torch.distributed as dist
|
7 |
+
|
8 |
+
|
9 |
+
T_co = TypeVar('T_co', covariant=True)
|
10 |
+
|
11 |
+
|
12 |
+
class DistributedWeightedSampler(torch.utils.data.distributed.DistributedSampler):
|
13 |
+
r"""Extension of pytorch's native distributed sampler, but supports weighted sampling
|
14 |
+
.. note::
|
15 |
+
Dataset is assumed to be of constant size.
|
16 |
+
Arguments:
|
17 |
+
dataset: Dataset used for sampling.
|
18 |
+
num_replicas (int, optional): Number of processes participating in
|
19 |
+
distributed training. By default, :attr:`rank` is retrieved from the
|
20 |
+
current distributed group.
|
21 |
+
rank (int, optional): Rank of the current process within :attr:`num_replicas`.
|
22 |
+
By default, :attr:`rank` is retrieved from the current distributed
|
23 |
+
group.
|
24 |
+
indices.
|
25 |
+
seed (int, optional): random seed used to shuffle the sampler if
|
26 |
+
:attr:`shuffle=True`. This number should be identical across all
|
27 |
+
processes in the distributed group. Default: ``0``.
|
28 |
+
drop_last (bool, optional): if ``True``, then the sampler will drop the
|
29 |
+
tail of the data to make it evenly divisible across the number of
|
30 |
+
replicas. If ``False``, the sampler will add extra indices to make
|
31 |
+
the data evenly divisible across the replicas. Default: ``False``.
|
32 |
+
.. warning::
|
33 |
+
In distributed mode, calling the :meth:`set_epoch` method at
|
34 |
+
the beginning of each epoch **before** creating the :class:`DataLoader` iterator
|
35 |
+
is necessary to make shuffling work properly across multiple epochs. Otherwise,
|
36 |
+
the same ordering will be always used.
|
37 |
+
Example::
|
38 |
+
>>> sampler = DistributedSampler(dataset) if is_distributed else None
|
39 |
+
>>> loader = DataLoader(dataset, shuffle=(sampler is None),
|
40 |
+
... sampler=sampler)
|
41 |
+
>>> for epoch in range(start_epoch, n_epochs):
|
42 |
+
... if is_distributed:
|
43 |
+
... sampler.set_epoch(epoch)
|
44 |
+
... train(loader)
|
45 |
+
"""
|
46 |
+
|
47 |
+
def __init__(self, dataset: Dataset, weights: Sequence[float],
|
48 |
+
replacement: bool = True, generator=None, num_replicas: Optional[int] = None,
|
49 |
+
rank: Optional[int] = None,
|
50 |
+
seed: int = 0, drop_last: bool = False) -> None:
|
51 |
+
if num_replicas is None:
|
52 |
+
if not dist.is_available():
|
53 |
+
raise RuntimeError("Requires distributed package to be available")
|
54 |
+
num_replicas = dist.get_world_size()
|
55 |
+
if rank is None:
|
56 |
+
if not dist.is_available():
|
57 |
+
raise RuntimeError("Requires distributed package to be available")
|
58 |
+
rank = dist.get_rank()
|
59 |
+
self.dataset = dataset
|
60 |
+
self.num_replicas = num_replicas
|
61 |
+
self.rank = rank
|
62 |
+
self.epoch = 0
|
63 |
+
self.drop_last = drop_last
|
64 |
+
self.weights = torch.Tensor(weights)
|
65 |
+
self.replacement = replacement
|
66 |
+
self.generator = generator
|
67 |
+
# If the dataset length is evenly divisible by # of replicas, then there
|
68 |
+
# is no need to drop any data, since the dataset will be split equally.
|
69 |
+
if self.drop_last and len(self.dataset) % self.num_replicas != 0:
|
70 |
+
# Split to nearest available length that is evenly divisible.
|
71 |
+
# This is to ensure each rank receives the same amount of data when
|
72 |
+
# using this Sampler.
|
73 |
+
self.num_samples = math.ceil(
|
74 |
+
(len(self.dataset) - self.num_replicas) / self.num_replicas
|
75 |
+
)
|
76 |
+
else:
|
77 |
+
self.num_samples = math.ceil(len(self.dataset) / self.num_replicas)
|
78 |
+
self.total_size = self.num_samples * self.num_replicas
|
79 |
+
self.seed = seed
|
80 |
+
|
81 |
+
def __iter__(self) -> Iterator[T_co]:
|
82 |
+
indices = list(range(len(self.dataset)))
|
83 |
+
|
84 |
+
if not self.drop_last:
|
85 |
+
# add extra samples to make it evenly divisible
|
86 |
+
indices += indices[:(self.total_size - len(indices))]
|
87 |
+
else:
|
88 |
+
# remove tail of data to make it evenly divisible.
|
89 |
+
indices = indices[:self.total_size]
|
90 |
+
assert len(indices) == self.total_size
|
91 |
+
|
92 |
+
# subsample
|
93 |
+
indices = indices[self.rank:self.total_size:self.num_replicas]
|
94 |
+
weights = self.weights[self.rank:self.total_size:self.num_replicas]
|
95 |
+
|
96 |
+
assert len(indices) == self.num_samples
|
97 |
+
|
98 |
+
rand_tensor = torch.multinomial(self.weights, self.num_samples, self.replacement, generator=self.generator)
|
99 |
+
return iter(rand_tensor)
|
100 |
+
|
data/pyproject.toml
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[tool.poetry]
|
2 |
+
name = "protgps"
|
3 |
+
version = "0.0.1"
|
4 |
+
description = ""
|
5 |
+
authors = ["Peter G Mikhael <[email protected]>"]
|
6 |
+
license = "MIT"
|
7 |
+
readme = "README.md"
|
8 |
+
repository = "https://github.com/pgmikhael/protgps"
|
9 |
+
|
10 |
+
|
11 |
+
[tool.poetry.dependencies]
|
12 |
+
python = "3.8"
|
13 |
+
|
14 |
+
[tool.poetry.group.ci.dependencies]
|
15 |
+
black = "^23.3.0"
|
16 |
+
mypy = "^1.1.1"
|
17 |
+
pylint = "^2.13.0"
|
18 |
+
pytest = "^7.1.2"
|
19 |
+
pytest-cov = "^3.0.0"
|
20 |
+
rstcheck = { version = "^6.1.2", python = "<4" }
|
21 |
+
ruff = "^0.0.291"
|
22 |
+
|
23 |
+
[build-system]
|
24 |
+
requires = ["poetry-core"]
|
25 |
+
build-backend = "poetry.core.masonry.api"
|
data/scripts/dispatcher.py
ADDED
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# append project root to system path
|
2 |
+
import sys, os
|
3 |
+
from os.path import dirname, realpath
|
4 |
+
|
5 |
+
sys.path.append((dirname(dirname(realpath(__file__)))))
|
6 |
+
import argparse
|
7 |
+
import subprocess
|
8 |
+
import multiprocessing
|
9 |
+
import pickle
|
10 |
+
import json
|
11 |
+
import protgps.utils.parsing as parsing
|
12 |
+
from protgps.utils.registry import md5
|
13 |
+
|
14 |
+
EXPERIMENT_CRASH_MSG = "ALERT! job:[{}] has crashed! Check logfile at:[{}]"
|
15 |
+
CONFIG_NOT_FOUND_MSG = "ALERT! {} config {} file does not exist!"
|
16 |
+
SUCESSFUL_SEARCH_STR = "SUCCESS! Grid search results dumped to {}."
|
17 |
+
|
18 |
+
parser = argparse.ArgumentParser(description="Dispatcher.")
|
19 |
+
parser.add_argument(
|
20 |
+
"--config_path",
|
21 |
+
"-c",
|
22 |
+
type=str,
|
23 |
+
required=True,
|
24 |
+
default="configs/config_file.json",
|
25 |
+
help="path to model configurations json file",
|
26 |
+
)
|
27 |
+
parser.add_argument(
|
28 |
+
"--log_dir",
|
29 |
+
"-l",
|
30 |
+
type=str,
|
31 |
+
default="logs",
|
32 |
+
help="path to store logs and detailed job level result files",
|
33 |
+
)
|
34 |
+
parser.add_argument(
|
35 |
+
"--dry_run",
|
36 |
+
"-n",
|
37 |
+
action="store_true",
|
38 |
+
default=False,
|
39 |
+
help="print out commands without running",
|
40 |
+
)
|
41 |
+
parser.add_argument(
|
42 |
+
"--eval_train_config",
|
43 |
+
"-e",
|
44 |
+
action="store_true",
|
45 |
+
default=False,
|
46 |
+
help="create evaluation run from a training config",
|
47 |
+
)
|
48 |
+
|
49 |
+
|
50 |
+
def launch_experiment(script, gpu, flag_string):
|
51 |
+
"""
|
52 |
+
Launch an experiment and direct logs and results to a unique filepath.
|
53 |
+
|
54 |
+
Args:
|
55 |
+
script (str): file name to run as main
|
56 |
+
gpu (str): gpu this worker can access.
|
57 |
+
flag_string (str): arguments and values as a single blob.
|
58 |
+
|
59 |
+
Returns:
|
60 |
+
results_path (str): path to saved args pickle file
|
61 |
+
log_path (str): path to logs
|
62 |
+
"""
|
63 |
+
if not os.path.isdir(args.log_dir):
|
64 |
+
os.makedirs(args.log_dir)
|
65 |
+
|
66 |
+
log_name = md5(flag_string)
|
67 |
+
log_stem = os.path.join(args.log_dir, log_name)
|
68 |
+
log_path = "{}.txt".format(log_stem)
|
69 |
+
results_path = "{}.args".format(log_stem)
|
70 |
+
|
71 |
+
experiment_string = f"CUDA_VISIBLE_DEVICES={gpu} python -u scripts/{script}.py {flag_string} --results_path {log_stem} --experiment_name {log_name}" # use log_stem instead of results_path, add extensions in main/learn.py
|
72 |
+
|
73 |
+
# forward logs to logfile
|
74 |
+
if "--resume" in flag_string:
|
75 |
+
pipe_str = ">>"
|
76 |
+
else:
|
77 |
+
pipe_str = ">"
|
78 |
+
|
79 |
+
shell_cmd = f"{experiment_string} {pipe_str} {log_path} 2>&1"
|
80 |
+
print("Launched exp: {}".format(shell_cmd))
|
81 |
+
|
82 |
+
if not os.path.exists(results_path) and (not args.dry_run):
|
83 |
+
subprocess.call(shell_cmd, shell=True)
|
84 |
+
|
85 |
+
return results_path, log_path
|
86 |
+
|
87 |
+
|
88 |
+
def worker(script, gpu, job_queue, done_queue):
|
89 |
+
"""
|
90 |
+
Worker thread for each gpu. Consumes all jobs and pushes results to done_queue.
|
91 |
+
|
92 |
+
Args:
|
93 |
+
script (str): file name to run as main
|
94 |
+
gpu (str): gpu this worker can access.
|
95 |
+
job_queue (Queue): queue of available jobs.
|
96 |
+
done_queue (Queue): queue where to push results.
|
97 |
+
"""
|
98 |
+
|
99 |
+
while not job_queue.empty():
|
100 |
+
params = job_queue.get()
|
101 |
+
if params is None:
|
102 |
+
return
|
103 |
+
done_queue.put(launch_experiment(script, gpu, params))
|
104 |
+
|
105 |
+
|
106 |
+
if __name__ == "__main__":
|
107 |
+
|
108 |
+
args = parser.parse_args()
|
109 |
+
if not os.path.exists(args.config_path):
|
110 |
+
print(CONFIG_NOT_FOUND_MSG.format("experiment", args.config_path))
|
111 |
+
sys.exit(1)
|
112 |
+
experiment_config = json.load(open(args.config_path, "r"))
|
113 |
+
|
114 |
+
if args.eval_train_config:
|
115 |
+
experiments, flags, experiment_axies = parsing.prepare_training_config_for_eval(
|
116 |
+
experiment_config
|
117 |
+
)
|
118 |
+
else:
|
119 |
+
experiments, flags, experiment_axies = parsing.parse_dispatcher_config(
|
120 |
+
experiment_config
|
121 |
+
)
|
122 |
+
|
123 |
+
job_queue = multiprocessing.Queue()
|
124 |
+
done_queue = multiprocessing.Queue()
|
125 |
+
|
126 |
+
for job in experiments:
|
127 |
+
job_queue.put(job)
|
128 |
+
print("Launching Dispatcher with {} jobs!".format(len(experiments)))
|
129 |
+
print()
|
130 |
+
|
131 |
+
for gpu in experiment_config["available_gpus"]:
|
132 |
+
print("Start gpu worker {}".format(gpu))
|
133 |
+
multiprocessing.Process(
|
134 |
+
target=worker,
|
135 |
+
args=(experiment_config["script"], gpu, job_queue, done_queue),
|
136 |
+
).start()
|
137 |
+
print()
|
138 |
+
|
139 |
+
for i in range(len(experiments)):
|
140 |
+
result_path, log_path = done_queue.get() # .rslt and .txt (stderr/out) files
|
141 |
+
try:
|
142 |
+
result_dict = pickle.load(open(result_path, "rb"))
|
143 |
+
dump_result_string = SUCESSFUL_SEARCH_STR.format(result_path)
|
144 |
+
print("({}/{}) \t {}".format(i + 1, len(experiments), dump_result_string))
|
145 |
+
except Exception:
|
146 |
+
print("Experiment failed! Logs are located at: {}".format(log_path))
|