F M L V S T L W W K Q K R L N N A V R T H T K F L T T I N N P W R D F C S H R K K Y C Q K R K H E H A T L K S W G T N N G S R R A A G I C S G Y G P E H S P D A N T V K H C C I D Y D S I D P I R C T R L V Q L V H A A G G V A A L G A F V L F H D G V V L V V G K D V Q L D V D V V G A G Q L H G L L G L V G G L D L S V V V A A V L Q L Q P L L D L A L Q G A V L G V H P L S G G L P A H G T T L H Y G A V G G E V G A A Q L H L V D E L A V L Q G G V L G H G H H A A V L E V H H A L P I E A L G E G Q L Q V V G D V G G V L H V G L G A V H E L R G Q D V P G E G Q G A T L G H L Q L G G L G A L V G A A L A L A L D L E L V A V H G A L H V H L E A H E L L D D G H V I L L A L A H
nuc1
nucleolus (0.90)
H F M R I A D R K V M H H G C A K Q G N S W N H I G Q K P C C S K V K K G E Q S Q K A D A V V W G V K C H M K W E A R S Q C N Q S F E K M Q L H C P M S C R V Q E S S H N Q H N I Q P K A N H Q A M I H L V Q L V H A A G G V A A L G A F V L F H D G V V L V V G K D V Q L D V D V V G A G Q L H G L L G L V G G L D L S V V V A A V L Q L Q P L L D L A L Q G A V L G V H P L S G G L P A H G T T L H Y G A V G G E V G A A Q L H L V D E L A V L Q G G V L G H G H H A A V L E V H H A L P I E A L G E G Q L Q V V G D V G G V L H V G L G A V H E L R G Q D V P G E G Q G A T L G H L Q L G G L G A L V G A A L A L A L D L E L V A V H G A L H V H L E A H E L L D D G H V I L L A L A H
nuc2
nucleolus (0.93)
H G Q N R R R K N I G T L K M H T I R G F F P M F S E I R N N H T F T I H G S K S F N S D F Q D Q N L H C H D R M M H L Q I S D S M N N T G E E W M T E K V N S L P R K G K S G G P P Y K P K V W S V Q L V Q L V H A A G G V A A L G A F V L F H D G V V L V V G K D V Q L D V D V V G A G Q L H G L L G L V G G L D L S V V V A A V L Q L Q P L L D L A L Q G A V L G V H P L S G G L P A H G T T L H Y G A V G G E V G A A Q L H L V D E L A V L Q G G V L G H G H H A A V L E V H H A L P I E A L G E G Q L Q V V G D V G G V L H V G L G A V H E L R G Q D V P G E G Q G A T L G H L Q L G G L G A L V G A A L A L A L D L E L V A V H G A L H V H L E A H E L L D D G H V I L L A L A H
nuc7
nucleolus (0.95)
V N D I T D V E M A V G R V P R E G G N A T E R C Y A C F H H L D D Y D L H Q Q M H G R D A P H M R N N S Y K K A A H S E H I N E V D H Q G L Q S D V E E Y E G V M N E D T F K Y M A D E R D C S P R N L V Q L V H A A G G V A A L G A F V L F H D G V V L V V G K D V Q L D V D V V G A G Q L H G L L G L V G G L D L S V V V A A V L Q L Q P L L D L A L Q G A V L G V H P L S G G L P A H G T T L H Y G A V G G E V G A A Q L H L V D E L A V L Q G G V L G H G H H A A V L E V H H A L P I E A L G E G Q L Q V V G D V G G V L H V G L G A V H E L R G Q D V P G E G Q G A T L G H L Q L G G L G A L V G A A L A L A L D L E L V A V H G A L H V H L E A H E L L D D G H V I L L A L A H
spk2
nuclear_speckle (0.97)
T K I K K H R S T P N M I Q S P V T Y P D E D H T N N H A G W K T T K A A A P K F R C A A R Q I N R T A M M R C E N F A I T I D D M P S Q D W P H K D D H G A G D D K K D C M P A R Y D G H T E E T N D L V Q L V H A A G G V A A L G A F V L F H D G V V L V V G K D V Q L D V D V V G A G Q L H G L L G L V G G L D L S V V V A A V L Q L Q P L L D L A L Q G A V L G V H P L S G G L P A H G T T L H Y G A V G G E V G A A Q L H L V D E L A V L Q G G V L G H G H H A A V L E V H H A L P I E A L G E G Q L Q V V G D V G G V L H V G L G A V H E L R G Q D V P G E G Q G A T L G H L Q L G G L G A L V G A A L A L A L D L E L V A V H G A L H V H L E A H E L L D D G H V I L L A L A H
spk3
nuclear_speckle (0.91)
"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "html = visualize_text(records)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 56,
+ "id": "a5d1c9ec-8131-4592-abe3-ce8e6fc34014",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "with open('html_file.html', 'w') as f:\n",
+ " f.write(html.data)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 57,
+ "id": "95c3a4ba-378e-4d24-a057-30d1eb14493d",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "for sequence_dict in sequence_dict_copy:\n",
+ " seq = sequence_dict['sequence']\n",
+ " sequence_dict['full_sequence'] = seq + mcherry"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 58,
+ "id": "16d48495-6d1b-47a1-8fae-f8d07c0adaa9",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "pd.DataFrame(sequence_dict_copy).to_csv('attributions.csv', index=False)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "a713920b-7b97-4f6c-a01d-2b584d43ac1f",
+ "metadata": {
+ "jp-MarkdownHeadingCollapsed": true
+ },
+ "source": [
+ "## Trajectories"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 27,
+ "id": "2ff5a4fc-9c7c-4fdd-b85e-e306a05d077f",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "paths = []\n",
+ "for root,_,files in os.walk(os.path.join(esm_directory, \"trajectories\")):\n",
+ " paths.extend([ os.path.join(root, f) for f in files if f.endswith('.txt')])\n",
+ "\n",
+ "\n",
+ "idr2scores = defaultdict(list)\n",
+ "preds = []\n",
+ "for p in paths:\n",
+ " config = os.path.join(os.path.dirname(p), \".hydra/config.yaml\")\n",
+ " with open(config, 'r') as file:\n",
+ " config = yaml.safe_load(file)\n",
+ " condensate = p.split('/')[-1].split('.')[0]\n",
+ " with open(p, 'r') as f:\n",
+ " preds = f.readlines()\n",
+ " preds = [p.strip('\\n') for p in preds]\n",
+ " idr2scores[f\"{condensate}_{config['seed']}\"].extend([p.split('\\t') for p in preds])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 28,
+ "id": "4f093844-c7db-4727-9a8f-c161df9547b6",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "dict_keys(['nuclear_speckle_7', 'nucleolus_7', 'nuclear_speckle_8', 'nucleolus_6', 'nucleolus_1'])"
+ ]
+ },
+ "execution_count": 28,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "idr2scores.keys()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 30,
+ "id": "4e5d07a9-1d80-4355-9eb7-c802db5b9539",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "trajectories = defaultdict(list)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 47,
+ "id": "3f494d6e-c5b9-4d13-bcc3-5249c99a9527",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "for generated, traj in idr2scores.items():\n",
+ " \n",
+ " target, seed = generated[:-2], generated[-1]\n",
+ " trajectory = [(int(i.split(':')[0]), s, float(p)) for i,s,p in traj ]\n",
+ " trajectory = sorted(trajectory, key = lambda x: x[0])\n",
+ " \n",
+ " steps = [s[0] for s in trajectory]\n",
+ " seqs = [s[1] for s in trajectory]\n",
+ " scores = [s[2] for s in trajectory]\n",
+ " \n",
+ " trajectories[\"Target Compartment\"].extend( [target] * len(steps) )\n",
+ " trajectories[\"Seed\"].extend([seed] * len(steps))\n",
+ " trajectories[\"Step\"].extend(steps)\n",
+ " trajectories[\"IDR Sequence\"].extend(seqs)\n",
+ " trajectories[\"Localization Score\"].extend(scores)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 48,
+ "id": "4c63c113-49e6-4e27-8d8f-834aa18d1b5a",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "pd.DataFrame(trajectories).to_csv('trajectories.csv', index=False)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 271,
+ "id": "4ec952fd-c3ff-4104-b00b-4794145551a7",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAikAAAGdCAYAAADXIOPgAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/MnkTPAAAACXBIWXMAAA9hAAAPYQGoP6dpAABsQ0lEQVR4nO2de5xUdf3/X7N3FpblvtwWXLxBooJLKgjeMkzNbpaWJlraLyJR5FspWllmUX2LL5WBlprfygtf85IZqWshouCFlVUUEBDkusuyXHYX2NvMnN8fs+fM51znnDPnzJwz83o+HrAzZ87lM5+Z+Xxe5337RCRJkkAIIYQQEjAKst0AQgghhBAjKFIIIYQQEkgoUgghhBASSChSCCGEEBJIKFIIIYQQEkgoUgghhBASSChSCCGEEBJIKFIIIYQQEkiKst0AO8TjcezduxcVFRWIRCLZbg4hhBBCbCBJEtrb2zFy5EgUFDi3i4RCpOzduxfV1dXZbgYhhBBCXLBr1y6MHj3a8XGhECkVFRUAEm+yf//+WW4NIYQQQuzQ1taG6upqZR53SihEiuzi6d+/P0UKIYQQEjLchmowcJYQQgghgYQihRBCCCGBhCKFEEIIIYGEIoUQQgghgYQihRBCCCGBhCKFEEIIIYGEIoUQQgghgYQihRBCCCGBhCKFEEIIIYHEsUh55ZVXcPnll2PkyJGIRCJ45plnUh6zcuVK1NbWoqysDOPGjcN9993npq2EEEIIySMci5SjR4/i9NNPx7333mtr/+3bt+PSSy/FjBkzsG7dOtxxxx24+eab8eSTTzpuLCGEEELyB8dr91xyySW45JJLbO9/3333YcyYMVi8eDEAYMKECVi7di1+9atf4YorrnB6eUIIIYTkCb4vMLhmzRrMnDlTte3iiy/Ggw8+iJ6eHhQXF+uO6erqQldXl/K8ra3N72YSQkhgaNh1GOv3tGL3wWOYfuIQzDhxaLabREhW8D1wtqmpCVVVVaptVVVViEajaGlpMTxm4cKFqKysVP5VV1f73UxCCPGdhf/aiNl/qceBI12m+xw82o0r71+DHzzzHu5/ZRu+/cjbiMelDLaSkOCQkewe7RLNkiQZbpdZsGABWltblX+7du3yvY2EEOInja0duH/lNjz/fhNe2rjPdL97ntuA7mhced7WGcXr2w9koommSJKE3/57C+54ej2OdEWz2haSX/guUoYPH46mpibVtubmZhQVFWHw4MGGx5SWlqJ///6qf4QQEmaa25LWk9aOHtP96nceAgB8+rQRGD2wDwDggVXb/W1cCjbvO4JFdZvx6Bs78eL7TakPIMQjfBcpU6dORV1dnWrbiy++iClTphjGoxBCSC7SIrh49hzqwIpNzThqYJVo6xUwc84/ARdNSLjKe2Jx3X6Z5ObH1imP97WZu6oI8RrHIuXIkSNoaGhAQ0MDgESKcUNDA3bu3Akg4aqZNWuWsv/s2bOxY8cOzJ8/Hxs3bsRDDz2EBx98EN/5zne8eQeEEBJwdh08hu/+7V3l+f+u2YGvPfwW7nr2fdV+kiShvTMhXAb2LUbt2IEAsi9SDh3rVh63WMTTEOI1jkXK2rVrMXnyZEyePBkAMH/+fEyePBk//OEPAQCNjY2KYAGAmpoaLF++HC+//DImTZqEn/zkJ/jtb3/L9GNCSN7w839twsGj3brtf6vfrXreFY0j2hskW1FWjOLCxBDdE8tu4GyXECOz4oPmLLaE5BuOU5DPP/98JfDViIcffli37bzzzsPbb7/t9FKEEJITbGhMXUYhGotj96FjAIBIBCgvLkRxYUR5LZuIgbzb9h9FY2sHRlT2yWKLSL7AtXsIIcRH4nEJ21uOWu7T3tmDc3+5AhctegUAUFQQQUFBRLGkvLO7FZ+591W0d5oH3PqFJEnoisZU2771V950ksxAkUIIIT4iphsP6Veqe71uwz7U7ziEva2dyjbZvSOLFAB4d3crXv5gP+p3HMyoZSUalyCXafn6OTUAgM372i0t6oR4BUUKIST07G/vwl9e34FdB49luyk6PmhqVx6v/f5FOGPMANXrKz5oVmXPAMCVU0YDgOLukZn72DpcsXQNfvLcBn8aa4Do6pl74QkAgGPdMbR1sl4K8R+KFEJI6Pn+M+vxg2few7ceqc92U3Q0tyeyYeQJ/tqpYyHWsXz0jZ3KhP/rL52ODXdfjF9+8XQAakuKyP+u2YHN+9oNX/MaMWi2f59iDChPlI6o33EwI9cn+Q1FCiEk9KzYtB8A8N6e4K3z9X9rExWzh/UvAwB8fvJoNPxgJt688xOq/fqVFuELZ4xCeUkyn8FMpADAln1HfGitHtmSUlwYQWFBBNUDywEAN/7v2qynRpPchyKFEEJ84qOWo4olYvSAZDZMZXkxhlWU4VvnH69sO35YP91SIVp3j0g0nhmBIAfNlvQKpv937jgAQFwC/vluY0baQLxn/e5W/PrFDwLpIhXxfRVkQgjxGwnBDOL8cH/S2jHjxCG612+YXoOjXVEc647hK2fqF1K1sqTEMrTooCyySosLAQCXnz4Sc3tjaPa2dmSkDcR75j72Nj46cAwNuw7jLzecle3mmEKRQgghPvGfTYnCZxefUoUiA8ExpF8p7v7sRNPjiywsKZkSKbK7p7Qo2f5rzx6Lv7y+A53dMbPDSMD56EDCgrJqS0uWW2IN3T2EkNAT1GzYx99KxKNU9cajOKUkEJaUXnePIFL6lCSsKp1RxqQQf6FIIYQQnyjsjTG5/PSRro63cvdEMyRS/vp6YpkTUTCV9QqWDlpSQosoOl/bGlxrCkUKIST0BNGQEo9L6O7Nfhk3pK+rcwTB3SOLIVEUlcmWlB6KlLAypG+J8vjRN3da7JldKFIIIaEniNVPxfoiZb1Bp04JQuCs3Lezpo5VtpUV0d0TdmLCb+af7zaiub3TYu/sQZFCCCE+IFoZxKBTJwRCpPT+FW06ckwK3T3hRVviJlN1d5xCkUIICT3Bs6MkLSlFBRHDzB47FBZEcNGEYago1SdiZiomRe5csYZLWXHi/by0cR+ueeB1/GfTPqMjSYCJ9dbZGVaRWE9qf29l5KBBkUIIyVvu/c8WnPGTOvy9YY/n55YtKW6tKDIPXPdxvHPXTN32WIaKuck1aMQ6c30E99VrWw/gnuc2ZqQtxDtkkTuiMpF59veGPYEUKhQphJCcwkl8yq9e3IyDR7vxy+c/8LwdsiXFbTyKSEGBPoBWNNe3dvTgn+82YsWmZt9WSBZbUKp5T4eOdftyTeIfcUWkJCohr/hgP9ZsO5DNJhnCYm6EkNAj6pJYXLLMijHiwFHv7yC9sqSYIVpS5i9rwL97C8d96/zjcdunxnt2HSPNd1JVher5kH6lnl2PZAbZkvLF2tFobu/Eka4oKsqCJwmC1yJCCEmDaFxCkUPjRU/M+/gOWaR4YUkxQoxJkQUKAPzxlW3+iBTB3zNKWIcIADqjDKANG/HeD/aUUf3x1Jxzstwac+juIYTkFG6yXvzIlJHdPSUeWVJOHVWpem7WZq8DapWYFM32QsEF1dnDVOSwIX9PCg1ciUGCIoUQklNEfbCKOGV7y1HMeuhNAN5ZUhZ+4VRcccZoTKoeACCTdVISfyMWcxmLuoWLeFxSPtdCqw82AFCkEEJCjTZQNpqhrBcr/vVeo/L4tNGVFnvaZ+KoSvz6ytMx/YTEasoZS0HuJaKzpSTpoiUlVIiF3IoKgi0Dgt06QghJgTaeJNOTtxGx3jZNHjMAP/7MKZ6eWzbPZ7qYmxXdsXjG2kPSR/ysCh0GmWcaihRCSKg51h1VPY/GJWxqasPvV2xFU6v9Ut+tHT1eNw3jh/dXFUHzAlmkZEqM2XH3AMnVkknwUYkUunsIIcQ/nnu3UfU8FpNw4/+uxX+/8AHuevY90+O0bqJ/as6TDn7KB1mkxDNmuTAOnNVyjCXyQ4MocBk4SwghPnKkS21J6YnHsftQBwDgrY8OmR6ndRNpLTLpYNf64IaigFpSptzzkiPLFckecYoUQgjJDNpYiBVCzZAei+qr2gBbPyZ9P4b/ZExKpsriJ7AKnJVZ+vJWfxtDPEH8rgdco1CkEELCjVakyFYUo9dEeqLq17wM/JR8dPhkOiZFIeCTGbFPTKiR4nXMlNdQpBBCQo3VZG31Wo/WkuJhfZVMuHsyVyfF/nWY3xMO5BTkoLt6AIoUQkjIsQogtVpsT+sK8sN9YsdF4pTC3roWmSqglnT3kFxBTpEPemYPQJFCCAk5WmuJeOdvZWzQunt6PHX3+IdsSVnxwX7TmJvVH7bgaJc3gcBJq1DqCS34Ux4BkpaUIlpSCCHEX+IO3BEiWnePp+6T3jb5caN6Zs0g5fHBo92G7pir//gGbnr0bU+u58SSQndPOJCthkEv5AZQpBBCQo42lsTuRLm/vUt3HkmS0B31zu3jxxRw3JC+KO1dtDAal0zjblZ8sN/T64bAM0BsoiwuGIIPlSKFEBJqtJYUu4aVZ9btUT2PxuO47k9v4dQfvYCVm9Ob4P22KCjBszHJ9wUVnQTOknAgZvcEHYoUQkio0dY7sZv+W6Qxde9v78Irm/ejKxrHa1tb0mqTkzgONyTTkOMZW1AxBDfdxCZ1G/YBoEghhBDfcXtXKAuJkl7Xyb/ea1JeC/rQXVSYaHMsnglLSuKvH5lKJDvsa0u4OhtDUCGYIoUQEmq0IsWpd6LYQNx0W6Qu28HPYm6AuqCb30Xd5PdCS0oukfhM53/ypCy3IzUUKYSQUCNP0rLYsDtly/vJVgnVOdO0TvhZzA1QF3TLlLuH5A7y9zME3h6KFEJIuJGLucliw7ElxSAN06uJ3y8XiWhJ2dTY7ss1ZMz6MwTzGzHB75gpL6FIIYSEGtmSkixMZU+lyAO1USxLdzRNS0paR6dGWQk5Fsfr2w/4eq0wTWjEHn67I72EIoUQEmriaa5DUlRg4O5J05Lit7tHtKT4HdCqxKTotpOw4vf300soUgghoUaOHykSAmfFct/mdT56Y1kM3D1m5ead4tccIAurWFzyvY5JmCY0Yg/5G1MQgg+VIoUQEmpkS4oYkyJaVT7cf8TyeCMLTE+6gbMZzO7x26KRLIsf/AmN2EP+zYThE6VIIYSEGiUmpVDO7pFQUVakvL7mQ+uYjWKD7J60LSl+Z/cUytk9cVNLipGFKB3svBcWpw0JIbKOUaQQQkJNTBc4qw7yNHX29L6grTwLpJ+CbNQOL1EsKTHJVBgYxdq4gsIj5wiTdYwihRASapLF3JLuHid39EaTefrF3PxFrJNidi0j8eUGs8BZo7OH4c6cJOO0wvB5UaQQQkKNNgVZEv4HzAWLYkkxiEmJpitSfPb5q2JSTN6fkRvLDU4CZ+nuCQdh+pgoUgghoSaujUlxaEkZUF6i25Z24GzSnu4LYnaPdhXo5D5eWVJkQnDbTWwRpto3Ral3IYSQ4JIsi9/r7tHk1pgFlsp7TR4zAB8/biCee7cRhQURNOw67GEKsv8VZ83wypIio53PjK4cgjmPIJndw7L4hBDiM0bF3ERhksomEokA3zzvePxj7nTcfsl4ANaTvx0yF5Nint3jWUyKA7MU3T3hwGdDn6dQpBBCQo1SzE2elCV7IsFoQpXTdrc2H8G8x9fhlc37XbUpkxVnzd5riVcxKb1/wzChEZuEyN1DkUIICS2dPTFsaGwDkJy4JagFSKq7e9ElI7pInmnYi/964p202udbxVmlToo+cHb6CUMS1/bo4mGKXyD2UDK2QvCRUqQQQkLLxl6BAgA1Q/oCSLgn7LgojPY4YVg/1fP97V2u2uV/xdnE0N0T0wfO9i0tTLTBoyaYWVJCML8RExThmd1m2IIihRASWuQJtLykEKMG9LHcxwzxbrK8pAjfmFGjPO9TXOiuXX5XnBVjUjSv+RWsG4a7bpKaRXWb8a/3mhJPQvChUqQQQkKLLAaG9CtNbgPsZfeYqJeLJlQpj0cOKEurfX4JBlmkPP7mLt378HzeYTRszrD3cAd+++8tynNm9xBCSAaIRJIxE5JWpaQ6VvP8rHGD8cCsKQCCu0qsnHzUFY1D+2YjyfhhT1DcPcHsCuKAY90x1XOWxSeEkAwhD7d2NYpV3Ejf0iLlXG7wu+z41WdVAwAKCvw3dCTjF9Rvxuiy/q/JTNJBW/8nDMLTlUhZsmQJampqUFZWhtraWqxatcpy/0ceeQSnn346ysvLMWLECHzta1/DgQPWK5MSQkhqhHqosgXBZuCs9jijbU7OY3jutI42p0CwGuncPZBf80YwKMIjABOaJEn485qPUL/jYLabEkq0C2cG4CNNiWORsmzZMsybNw933nkn1q1bhxkzZuCSSy7Bzp07Dfd/9dVXMWvWLNxwww14//338cQTT+Ctt97CjTfemHbjCSEESAy24oCrjkkxOchiDo+k3sUSv+0JomtLVxbfp5nHzmn9dh+s+KAZP/z7+7hi6Rpfr5OraBfOzElLyqJFi3DDDTfgxhtvxIQJE7B48WJUV1dj6dKlhvu//vrrOO6443DzzTejpqYG06dPxze/+U2sXbs27cYTQvIbIwGiq5PiQjJE0gzskHwO5DATZOJrnsWkmJzI6J357e75sPmor+fPdXTunhDYUhyJlO7ubtTX12PmzJmq7TNnzsTq1asNj5k2bRp2796N5cuXQ5Ik7Nu3D3/7299w2WWXmV6nq6sLbW1tqn+EEGJGJBJRCQs7k2Wy/od+oPYq+DQTU4A+u8fbq7KYW+6gdfeEQKM4EyktLS2IxWKoqqpSba+qqkJTU5PhMdOmTcMjjzyCq666CiUlJRg+fDgGDBiA3/3ud6bXWbhwISorK5V/1dXVTppJCMlDksJCXYXVTWiGYo1wGdfht0VBFX+jze5JNsITWBY/d9BaUoKavSbiKnBWq6glSTJV2Rs2bMDNN9+MH/7wh6ivr8fzzz+P7du3Y/bs2abnX7BgAVpbW5V/u3btctNMQkiOI87DVi4Qw2MtMnDStaT4XcxNCY5V/hNe8/iafmcqkcyhd/cEnyInOw8ZMgSFhYU6q0lzc7POuiKzcOFCnHPOOfjud78LADjttNPQt29fzJgxA/fccw9GjBihO6a0tBSlpaW67YQQYoRKoGhykN0JDaHmShr4Xf3VKHDWr4knDPELxJoebXZPCD5SR5aUkpIS1NbWoq6uTrW9rq4O06ZNMzzm2LFjKChQX6awUF5bgjn1hBCPENNyBWniyt0juI7ckMmRTRc4G4kYbk+XMExoxJpoPA+ye+bPn48HHngADz30EDZu3Ihbb70VO3fuVNw3CxYswKxZs5T9L7/8cjz11FNYunQptm3bhtdeew0333wzzjzzTIwcOdK7d0IIyTtEAZLMatGvDGx4rMVryZiU9Nrl9yRg972mdQ3eS+YM3dHwZfc4cvcAwFVXXYUDBw7g7rvvRmNjIyZOnIjly5dj7NixAIDGxkZVzZTrr78e7e3tuPfee/Ff//VfGDBgAC688EL84he/8O5dEELym4jaBaKqk5LCpmAUT6cqsZ9es3zB7L2K1/S6mFvwpzOSijC6exyLFACYM2cO5syZY/jaww8/rNs2d+5czJ07182lCCHEFHEiFu8K7UzQVrukP3b7nN0jBM7q3qvngbP+nJdkHr27J/gfKtfuIYSEHm1mj62KswbHKtvSLIvve3aPpSXF25gUq3oyJFzo3T3BhyKFEJITqCbutGNSvJnoM3GnqhVSfgsjmepB5Wmdrzsax29e2oK/vL4jrfMQ+0Tj4XP3UKQQQkKLcZ0UZ9LCeoFBF41K4zi7JNusD5z1et4xsyY9cN0UXHDyUMw4cYir867+sAX/89Jm/OCZ97DncEc6TSQ26Qlh4CxFCiEk9CTK4icea+dUs0nWVtyK6xTkDMWkGK2CnKbA0mIWknL80H7409fOxOTqAa7O29EdUx4f64q6Ogdxxn0rP1Q9pyWFEEJ8xGyBQScYjdNyufB4mhO97zEp8F8QQYmv8W9GS7efiT36lBSqnodAo1CkEELCTwRJ64K2AquZRcEyJiXo7h7lOkbuHjmexqsU5N7zms1oLsWL2LqYTZXiuyDLccysbkGGIoUQkhuYuXtSHWZYJ8X66Ide3Y4fPfu+uStJaZL/s4B2fvdvvaDUOBFn4r5aYUn8QbeEQghUiqs6KYQQEgTEO2t5uLU94VnWSbEu5nb3cxsAAJ+dNBKTxww0P08G3D3aN+J5TEoGBITdzywMgZ5BRidos9MMR9CSQggJPZGIeZXYlHVS0lgF+ZgQ/OnkmuljHjjr9dSTyt0jbnYiykSBadfdQ9IjjJYUihRCSHgxDJxNP74h3dLyfpeSF4vN6RcY9PZayS5IfWK6e4KN3+nqfkCRQggJPRHBEaCp/O0q2NKuJcXuebwmuZiiQTE3+TXPUpB7BZfX4kd4TENKZshU4T8voUghhOQE8oBrO7tHTq01PpvlsSnJ4KTr96Ws+8mbiY7unsyQqSBrL6FIIYSEFlXFWQ8DRtNeu0c+j08G9Yhg6slYdo/HJxb7Nk6RkhHCGJPC7B5CSOgRx1qte8ds+kuu7muQgpziWDft8hJrd496NehtLUchSRKOH9rP1aSUiXARapTMEMaYFIoUQkhOkCzm5sG50gxK8Ttt18rSI772p9c+UtKl/+uTJ2HuJ050f03T7elPdTEGzmaEMFpS6O4hhIQWccw1i0kxMwVYZeBYWVLsrfnjL+IqzVZ3x5v3tSuPtzQfcXUt+f36lzVEd0+m0ImULLXDCRQphJCcQjvfuZn+rCwVTm76/b5TlSS9e0u8ZlTojHRjgL2OrxHbbTcFmWXx08PvdHU/oEghhIQWw0nLoevAsJibYKnQX9NGu/xeu0eMwbFYm8hLC4VpMTeXE53Ybmb3+I/VOk9BhiKFEBJ6IpGIYkF4Z3er6rVUKcjG5zPfR7zrT7V4od9TgATJ0gqhsqS4zVTKSOAsy+L7jVEXF4SgOylSCCE5gdvx1mriM7LUOKmO6vvaPQZl8cXXYp64e+wf6eQa6oqzDg4krjD83lKkEEKIfxgFzur2MZk6reZFK0uKHY3if3aPEDirfU2YeVRuFNeZSvI13R1vel7hMd09/mPUxWGwTFGkEEJCTwTmA667BQYtYlLsiBShXb5ioFKS2dOSJnDWn8J0bt+jqpgbU5B9x6iPGThLCCE+ks7UZhmTYnEBJ5O9X9k94ll12T3CYzvxM7av6eOERpGSHUKgUShSCCHhJxKxcvekONbkfIljjWJSbDQoQ9k9icBZ49cAbeCsu2v5pR/U7h5/rkGSGFtSgi9TKFIIITmBi4LvFucyX2DQXjE3fwqgyYjt069sm3wtJiwJ7b7GiPV7cf0eWcwtoxjGpARfo1CkEELCizhBm1pSXMWk9B5rsL+T+dSvOUBsnz5wNknMQ0uKn0GWdPf4j1EfMwWZEEIygJs7QjsxKYZWEzsZRRmac40KdEEQMN6kIPee1vPsnmSLuHaP/0iGLrXgqxSKFEJIaLGzEokrN4elJcWGu8evmV0+rXgti/28sKQYXVO13eV7dLN2D8viu4fZPYQQkiUiiJgPuCmrwuoPtIxJcdQunxBFlG7RODEmRXzNbcVZf4SBeFaGpPiPoUjJQjucQpFCCMkJvBxwre4wjdJ673luA77x57U4eLQ7sd3nO35RiPie3WNwXq+xW8wtDMXHgopRDzO7hxBC/EQVH2Lm7jE5NBkRqiNitJ/yXDy3hJYjXXjg1e2o27APL3/QrNrH77L4chtUrwmPPYlJUQ70eBVkVVl8mlL8hpYUQgjJEl6LAVHwaMd2daVUIBpLPu/RFPzIxJ2/2do9gKSJSUnP3eNn4CxFirfE4xIee3Mnnn1nr7LNeIHB4MuUomw3gBBCvMA0JMVkArQqXW8VmBq3sADI7hW/p1x1VVnta8Zr96Sd3ePyeDuwmJu3rN1xCAueWg8AOGPMAIweWM7AWUIIyTTi3biX6cAqd4rW3SNO95LGIiCLFN/dPaKlR1vMTd6uTu1Nvyy+ydo9Lt8j3T3+0d7ZIzyOAghvcDJFCiEk9ERgPVkueXkrrnngdXy4/4iyLSkkzLN7AHeWFKVKa6qGu8RsfR4toivK9Rzl4EAnWkOV3RPWGTSgqOKmeh8b9TEtKYQQ4iN2VyT+5fMf4LWtB/Cn17bbO7HKkqK9pto6Ib6uzVLJROCs3t3T2zZoM5FcxqRozusZQntYzM1b1Inn5n0bhmwpihRCSPiJRGwNuE2tXcpjy5gUi+wZKzeF/DyTc67OkiI0PuqBhSJV4KzY725FGYu5+YdiSWFMCiGEZBEbMSltHT3GO1mcSm9JER5DfdeqDZz1605V5Y4ysaQA9uuPWOHkvbh291B7eIrW2geEd4FBZvcQQkKLOCmaF5xN7nS4o1v3uvECg+ajt9aFokpJNvO9eI1FYG8ycFZSTUzpB86md7wWsT3v72319uR5jpG7x3iBweCrFFpSCCGhJxE4m3rA3bzviDKpW8VoWFpSLF5TLCk++3vEt6qN5zBPQXZbJ8V+W9yy4oP96Z+EGCJ/fkbfyeBLFIoUQkiIsTPtasfmhl2HVc+NLSniNYzjThKPjRfK87u2iCq7R1NjRLGkADjSFVW2uy+L74/gEifNAeXFvlwjX9G6JIHwunsoUgghoScSsS8Imlo7U5/PIuZDWxZfRBuomom1UcysNoeP9Wj2c3v+xF/vK84S/xCFtFUwd/BVCkUKISQnsDuJtnWqJ2/DVZBVlhQ12rL4IjHLCcE7RPFjNwzGtbvH4Jp2rpfyvC7iZcKQMhs0mN1DCCFZQhQMZhOYdnBu64j2Huv8GonzqV9T1UmJZd7do1tg0MPKu2bX9Bq/Y3jyDXV3mgfOhkCjUKQQQsKP1WCrHZzbO1OnIVtaUoQt2nFfF8SagWJu2nVvnK4GnRKf9INk8pikj1Fwd1gXGKRIIYSEFnHctWtB2HnwWO+x5kXKrGJSxEDVuCSpREssU9k9qvbZNQm5u5ZVP1ltT3leWk8yQjJwlu4eQgjJCpGIebSCNmbjmYa9KYucRdT+FBWWlpQMlcUX0U4+pmItzRRkX+NBqFc8xSjjzDC7JwQOH4oUQkhuYDre6kfn7mjcMkbDKuZDWxbfcu0evyrOWq7dY+LucW1J0V8z9d7urpF6P6oZO6iEdO/fsFpSWHGWEBJa1BVnTQJn44abLRHjOuRrtHX24Mv3v44NjW3J16CeYJPuHufXdItZxVndfmme3+v5TJ3dQ/HhF1YxKWGAlhRCSOhxEjgLJO40k/U/DFKQVfsmWL+7VSVQAP3kqoiUFHEc6aIK7LWbguxTeo9aHNp/w0Z3+8QbjGr5GFacDYElhSKFEJITOLEgOCn1niyjb3wecfDXZvf4har0vc1rurak+Lg38QeDDGSTirPBVykUKYSQECPWSTHG2JIiWDsMjlG5e5S/+vNoB/5oFtw9tgNn3cak+BQ466aYG3FOXBEp+k4uCL5GYUwKIST8RCIWd4WGFhD7s6KVT1/SSJe4RqT4dafqJHC2INK7xpCH17SzPRXqOilUKV4ifr+1qyD3Ky3CqAF9MGpgH1RVlGWlfU6gSCGEhBY7WsPUkqIICePjIpFed47FBPr+3jZFmAAGa/ekbp4r1Ks0W1tSIsobcS4E1BV9/YNl8f1DK7JHDeiDF249N3sNcghFCiEk9EQQMRUbRr54O5NiBL13+/Igb7DPo2/sVF9LEzjrF6KFpieWouaLD9f0Arp4MoPirkwhyoMKY1IIITmB+cJ6xhuV+h8mR8qTspO5VBuT4lt2j9VrEa27x/n7kPFTSDC7xz+M0rtli2IYSuGLuBIpS5YsQU1NDcrKylBbW4tVq1ZZ7t/V1YU777wTY8eORWlpKY4//ng89NBDrhpMCCEy4uRmbkkxTkFOhXy6pLk89TG6INYMuSf+csOZwjU19G5wIzhU/ev8cOtzq4NS7B1DOWMLbVXkptZOfOuv9QCAgpCZJhy7e5YtW4Z58+ZhyZIlOOecc3D//ffjkksuwYYNGzBmzBjDY6688krs27cPDz74IE444QQ0NzcjGo2m3XhCCAHQO4M6rGJmIyYlsZv9iXHVlhbLS3qFts2D+paY7lvg4n3IqGJSfK2KT/HhFxIkPPn2bhztjgEAoincg0HDsUhZtGgRbrjhBtx4440AgMWLF+OFF17A0qVLsXDhQt3+zz//PFauXIlt27Zh0KBBAIDjjjsuvVYTQgjSCJy1FZOSiEpRLCk229TU2plSAKWL1qUjWmy011TcPWlbUqzdYunA+BRv0aZ3t3akXvk7qDgy/HR3d6O+vh4zZ85UbZ85cyZWr15teMyzzz6LKVOm4Je//CVGjRqFk046Cd/5znfQ0dHhvtWEECIQgVN3j/pY05PCuVWkvTM5IWTD+6+9ptZt5dmJ04Sl8P1DK1I6e2LZa0yaOLKktLS0IBaLoaqqSrW9qqoKTU1Nhsds27YNr776KsrKyvD000+jpaUFc+bMwcGDB03jUrq6utDV1aU8b2trM9yPEEJktHPokH6laDnSZVIpVlvhRI/sJlFSjG3OqT2x1Of2GlGghSVwVnWdzFwmL5EQbpHiKoRG+yOQJMnU5BePxxGJRPDII4/gzDPPxKWXXopFixbh4YcfNrWmLFy4EJWVlcq/6upqN80khOQ4ohgQx6DvXnwyvj79OAAmKcjCY9OYFJemg2g8ntV0T32dlMRfN5YLdf+aXM/xWeX2iI8pU7xEVWBQktAVdbHKZkBwJFKGDBmCwsJCndWkublZZ12RGTFiBEaNGoXKykpl24QJEyBJEnbv3m14zIIFC9Da2qr827Vrl5NmEkLyDCsxsOKDZt02O7XNkpN771+b9/s9MXFCyIxKUVlSdK+5b4N6lWln+6fc1+QxSR9VxdmQu3sciZSSkhLU1tairq5Otb2urg7Tpk0zPOacc87B3r17ceTIEWXb5s2bUVBQgNGjRxseU1paiv79+6v+EUKIFeIkWhCJKJaQWFxvDXCUguxwCk24e7KIzt2T+Jv2Isgs5hZSJHT25IklBQDmz5+PBx54AA899BA2btyIW2+9FTt37sTs2bMBJKwgs2bNUva/+uqrMXjwYHzta1/Dhg0b8Morr+C73/0uvv71r6NPnz7evRNCSN6hutOPqB8bzanKJtVaNimKuWnKiqeiJxZX7mSDUDcrGZPiXBWs39OqPDZ7K9p+dwPL4nuLykolAV3R8FpSHKcgX3XVVThw4ADuvvtuNDY2YuLEiVi+fDnGjh0LAGhsbMTOnclS0f369UNdXR3mzp2LKVOmYPDgwbjyyitxzz33ePcuCCF5TQQR1QQmL6qn2keY3+zMiUlLijPEOhSZmlJVKcja19KwpGzYm0xaKC8pTLm/M3cPTSm+IakfipaUsFmwXK3dM2fOHMyZM8fwtYcffli3bfz48ToXESGEpItZAGxCsugX+1MW2wNSWzs0Aad2B/fuWDyr06/hAoNwm92TOOqyU0f47u6xSsAg7olLEt4TLGJhI2QFcgkhRI9ZRot6W8RRzZB0LCnJ7J5sBM6axaS4ye7Rn193bZN2+AGtL/YQ+2nv4Q7V6txh04EUKYSQnCMS0a+KLBZ8EyuZmMdaaGJSbF47Gk+a1rNSzE33vtOvk2JXbLnN7nF6LLFG7MvG1s7sNcQDKFIIIaHFbG2ZCPQWhUhEmLDtWFLEKFsHdEcz7+6JmDwGkpYUN41KJeTSQvMhUKP4Q9yoUFCIoEghhIQeUYAAwsQs7oOIqtR9KiuBm1WQASRM61nM7vEjJsXS3ePyTeotKeGeTIOE2JOxkPcrRQohJOcwcvcIGsXWhOh2cheLuWVFpBhYkID0REAm3obT1lHUmCN2TYyWFEIIyS4RqEWJsSVFnY6bMiYFyX0B+5NoNoq5RSz8PV6s3eNHADA1RmagSCGEkACgnqj1k6rTodpJdVqRRDG33nMEoPiYF/rC6hSuC7hp+tWpaKHIMUfs21h4i80CoEghhIQY9doy6pgUo7t/VeBsyngLdxVno+KskDGNYlHMrfevm0k9kzYhphd7h/hZx0Ou5ihSCCGhR1sG39CCITm763dbqfW1rQeyOuFqxVk6ZfGlVD6xNNAXc0t9TBAsU2GD7h5CCMkSZhNvQcR4XhUXDUxVqEy/wKC9wb5+xyHB3ZMZrFZBhkuxBYhxO+bvxO17dBUjIxwV7qnXX1TZPRQphBCSfexUPtUWaLM8n2ZytzvJ9y1NrnGTjTLv2ksWOHjPWpKBs2k2yuLcxAeEzqVIIYSQAKBejVefgixBUpW6TxXc6ta1IJRJyRhWAs0o08kusuXC7inSed/OA2fDPflmCm2dlLB1G0UKISS0qAdcdfCo4cTqoGaIPNn/4929iMftR3SI+2alLL62TooSAOw+JsUXS4o2u4cOHM8QezLsFWddrYJMCCFBQutWMXKzSJKzRQO7ooksnftXbkP1wHIM6ltiqy3inWsQFnNLplKncQ4rueU+B1n91Kklxd1V8wJVMTdNxwbhO+kEWlIIITmBtpibYQqyEJ+h3LmbDNqjBvRRHtdt2JdyEi3s9avE4lJGXRERzXv1MibF7JxeQJGRGbQxKXT3EEJIhlDXSREep5xUU4/UMz9W5eB8QKGBGMhEymyqK7gtSgdkNu7DeVl8X5qRE0gMnCWEkOCQKHkvFnMzCpxVi41UacJGgbdWFPSOppl290QiEY1AM6mT4lN2j+sUZF1AZ7gn0yBhlYJMdw8hhGQIcfg1q7RqtM1eCrKz0byoV6Uk3D2ODk2LVO+7QLGkOEeQWy6OTnFubUyK0+PpMLIFK84SQkiWMYvDEJEkSbWycarF87TnSDXWi6m+8sSQiZtWXbVdnWoJZp0U4h/iZx2lu4cQQrJPuVBErW9poeHM6sySoj/OikJBpSgm9ky4e0xSjmWSzXIRk2KjTopHyT0si+8huVRxlinIhJDQIsYxDKsowy+/eBpajnRh+glDsWztLt3+YhBpqlom2iJoqYb6woLkPZ827dNXUszboak4a6N9qrL44Z57MwZFCiGEZBl5Dr1ySrVum4xoE7AzwRm5jKwoFOzS8sSQqewe8TraZjupDaPFzto9bmExN/9gdg8hhAQAq+HXSGOI6/HIA7ldLZIq+6Qwonf3ZCa7R/Nc83rSkuLelGKd3UMXTJBh4CwhhGQZO5k44lht567dqSWlwCAmJSOBs4hYBs56U3HWe3TZPeGeSwMLLSmEEBJAjO7wjQJnzSwBBaqJP/U0XSQckMm7V33TtEsEuD93Jqc3FnPzDlVZfFacJYSQLOFwwHUyYYuWETuHiftHFXdP5ivO6t0/XgTOmr8Pr94ii7l5h2gpzGgQtw9QpBBCQo9h4TajmBRhz1SZK7rA25R1UpJul8zGpGhTkDXt6h3lXa2C7FAFOtlfV3HW0ZUYaGuXeFz9PGw1byhSCCGhxWqishIuflScBZJxLPEMxgEklgQQnutES7KAnVN8TUH2/pSkF7p7CCEkQNidRJPpuMlKKeZ1UjQVZ1NMqxEkM3yiGQyc1V7E1P3jxt2jnNPC3aN6bP8dpxs4G7bJNpOoirmFvKMoUgghOYlxCrL9+AzLUvMmKIsMZtLdk+J1JQXZxbmdWlLSccHQfeMPXGCQEEKyhNubRHHtHjN0FWdT7B+JJAVBJu9eI5GIysVjmoKcRkyKLynIWlHCsvieQXcPIXlOLC7h0NHubDeDKBilG5tPaJI6B9n4jC5uOQt1lpoMZPeYiBKZdCwpZue0+5oVrrKNxLL47i6bF4j9lMn4KD+gSCHEBbMeegOTf1KH9/e2ZrspxAyrirNIPck5ze6JIKJKQxav5ycRWMeFGNWGsU0G57dwT6XBRWvVo7uHkDzgta0HAACPv6lfxI5kDqcTm1F2j3kxN2ejeSSiXgkZAAaWlzg6hxt0Fh+dZUW2pLhx95hcwwPcrIKs3p+yxgwrd0/YoEghhIQe45ooRtvsT7YFqtExYmuK18ax1Azpa/t6btGlIGteLzAQZnZR1jeyvL47AaPL7qEtxRcYk0IIIVnC6YCbnMyllAsMFmiCUe3cuYvHTB4zwFnjXKKPSTF2OaWT3eNPaE16k2fI5tqswRRkQvIY3v0FA+PCbfbW7vES0d1TUpip4dVaQRSkoVLs1EkhwUMU1NrvOmNSCCEkAFgJFzFw1n4xtxTXi0RUx5QUZWZ4jUTUIkLv7kkjJsVOnRSPsnu4wKB3WPVN2PqNIoUQElqcTrxOLCmOA2ehjmMpzZRI0T7XbUj8SSfl15c6KbqKsyGbPUlGoEghhIQe4+qy5vtLkqTcuptlruiCUW3MoYXZsqSo2qp+PxmtOJuGznB8LDWNKVZdQ3cPIYRkCMcTm4PwDG2mTspTR6Cqk5KpmBRdXRRdMbfE33QsFX7EpDCeyz/o7iGEkABhNIlapSULhhTTO0uthcXOpJo1S4r4XPt671+/5ia38iVskyXJDhQphJC8wUlhM20KspZ+pUWacyMrgbPFWouNWVn8dOqk+OAiSLuYGy0xplj1Dd09hOQRvBvMLpa+d8P1fIRjUxQqS1UWf82CC/H8vBmq66ndPYUWrUufq6ZUAwC+MaNG1Vjd+05jUvK1TIruWvwxeUUuuXuKUu9CCCHBxm7gbMSB76MgxS1cRVkxBvSJqbaJRg2/LSk/v+JU3HHZBFT2KUZze6fpfunEk0ipfGJwXzJfn92T3vEkN6ElhZA0CJvpNOdwOFPJE7aqToqNmJSISVl8VVaN5jx+i5RIJILKPsWWbQLUAcBOg2d9TUGm5cQ3mN1DCCEBwu7Aa7TAoOm+ds6neT73whOVx5mqk5JoR+piboB7a0UmJjZmIHuIxQcdNgsURQohaRC2HzxJIEFK+dnpKs6m2D8C4LyThirPh/Yrddm69DBbuwdwLwSsXEau9QuLuREbMCaFEBJarM3a5tOnej403s9WxVlV7m8EZcWF+Pu3z8G2liO49NQRqY/3CCu3U0RlSZHgRlZkJLvH6fEUNabkUs9QpBBCQk+qTB5lm2rtHuuhXDvxG+1vdN3Tqwfg9OoBluf2E727J/k4SOvjUGT4h1XXMiaFEEICSrKYW3IUNw+ctXE+ben8LGG5/p8qcNbpmVMHzno16Tmvk0JSMbBcH1gdNm1IkUIICS1O7xiVwNkUxwLOY1KCivg+jnVHHR3rZ+CsvjtD2sEBRLb6FTpd2yGAUKQQQsKPw+weO4emqjjr4LIZRdtWMcvoxff3OTpXUqT4sHZPmnVSiDlyXxrFVdHdQwghGcIqrsE4TiVpSkmZraM53LhOSjBGfKt2lBYVom9JovptR0/MdD8jglbLRPzMKGpSYyRSwtZvFCmEpEHIfu85i3GQrPk2cfI1m+DdrIIcDPQNOf/kYQBcFHNz6O5xcvZ0s3uIOXJf0t1DCCEhQlwF2f7evRYYg4PUawGl07L0SBnY6nKuslMnxS1awWSn/yTVY8oaMxR3j8EMHxwxbQ+KFEJIaLFeYNDiOOFAi2oqKa8fxAFfVydFeOw2Bdl2RV8n59Y9p+jwmrx19yxZsgQ1NTUoKytDbW0tVq1aZeu41157DUVFRZg0aZKbyxISOAI4R+UltmNDhDopTvjn+kbsPtTh8KjMkSoV2pkFyfz4VKQz/9mypKhNKcQEJbsniCraIY5FyrJlyzBv3jzceeedWLduHWbMmIFLLrkEO3futDyutbUVs2bNwic+8QnXjSUkaHCczC6OU5CV4yTF3WA+jqtfuP+VbQZ7hGASiERUReyc4Kt1gz8e/1DcPXmY3bNo0SLccMMNuPHGGzFhwgQsXrwY1dXVWLp0qeVx3/zmN3H11Vdj6tSprhtLCCFGGI+75gO0vfnRo1UIs4wYkuK4yqsNd4/bSU8rgOzFpEjCY5IKo7jZnHb3dHd3o76+HjNnzlRtnzlzJlavXm163J/+9Cd8+OGHuOuuu2xdp6urC21tbap/hBCSLqLbw8+g0EyT6j24FxKpz+/VpMeYFO+Qe9LW+lMBx5FIaWlpQSwWQ1VVlWp7VVUVmpqaDI/ZsmULbr/9djzyyCMoKrK3VNDChQtRWVmp/KuurnbSTEJInmC9wKDRNieDdup9gzgHGK2C7LaZqV1i7nFTzI11Uuwhf255m4Ks/RFIkmT444/FYrj66qvx4x//GCeddJLt8y9YsACtra3Kv127drlpJiEkT3CefSJ5UvI9ncwZT7Fbx8SH9XFcW2koMnwnFywpjlZBHjJkCAoLC3VWk+bmZp11BQDa29uxdu1arFu3DjfddBMAIB6PQ5IkFBUV4cUXX8SFF16oO660tBSlpaVOmkYIyUOsK84abJNjUjyaIINScVZE26IIxMBZt8XcvHf30L3jH8k6KcH7fjrFkSWlpKQEtbW1qKurU22vq6vDtGnTdPv3798f69evR0NDg/Jv9uzZOPnkk9HQ0ICzzjorvdYTQogJlhMrvJ8ks7oKcsT4sfzcbQpyMibFf5y3jSLHDKXibPg1ijNLCgDMnz8f1157LaZMmYKpU6fiD3/4A3bu3InZs2cDSLhq9uzZgz//+c8oKCjAxIkTVccPGzYMZWVluu2EEOIWu2OxHADqmSVFeBzoKdNRVpPB4X5k92hjUoLdg6EkFxYYdCxSrrrqKhw4cAB33303GhsbMXHiRCxfvhxjx44FADQ2NqasmUJIrkC/enDRjsWFBRFhspZsfHbhqThrWRYf7jOY7KQsu3f3OD+P2B7+9syxcveErd9cBc7OmTMHH330Ebq6ulBfX49zzz1Xee3hhx/Gyy+/bHrsj370IzQ0NLi5LCGEGGInNuSuyz9m6PYIitDwi0jEfSxOJt09xDvyuuIsIYSEAXF8fubb52DW1ONUxdxs2Els7BG8ScCypolTl4qNwFnv3D3OjgmZQSArcIFBQgjJInbL4stWb6MJPB2hEZQB31pERNIInE1dJ8W9+0BbcZaywysUd0++LjBICCFBIpVWkKfppNvDTkxK7pCutcP2AoMO+tSVJUV1fB59gC7J22JuhBASBKzcF6KFRJ6kjavQGh9fXlKYTtMyil2R5nRiV3b32GQkSRL+vanZ03MSPblQzI0ihRASfhyOxYnJ13rCHj+8AmXF1kNkUOcAbd2UdBcB9PptHjzarb8Wy+J7hixGKVIIyXs4UgYVMf1SNnsrFgXhczMbxyORCH746VP8a6CHWNYxQcR9do8HSwcYEYsnGzJmULl8NW8vkscoxdxyYIZ3XCeFEEKCgtWkWzt2IKafMATlJYU4cVg/AOpUXDsTdqrJWRV0G4Jbe9fF3Dy2pcjtKCpwL6BIanLBkkKRQkhahH8QyAWMJtF+pUX4643GS2+o6qRYfIap4g6DOgdEINQ4ichb/Flg0A1xYXVlJ13IqrT2yNu1ewghJEg4nbKSC+3ZXOE3JCLUqp0RCBYk1wsMumxYyvNGVJ8J8QYWcyOE9MKhNQjYHYuT9UJSx6SoDnD3ctbQ1k1xWycFPgXOKpYU8UoMnPWcHDCkUKQQQvIHpzeWqXz6dsrxZ4JUCwCmXSfFJ0tKgarQHFWHV9DdQ0gew8E0ODj9KJQJEcnPMQ1DSnAtKarHESGryRnJtXu8rpPSe97E6oeqa9lpT+Ixf4dmKNk9ARHR6UCRQohDqFGCh+Oh2OZnaLT2SahxXMzNYclZu+dFso5H+KfR4JIL2T259hMkxHfiVCmhJRmkKWmyX0z2TzGFBnUOMCvm5t6S4i1xA+1j62cl7MSfoTl09xCSx3BsDA5OTf5OA0hT1kkJiEpJXc8lgduJ3e77tPt5SGIKsiAciVf0ZvfkwAyfA2+BkMzCO7jgYTu7R7Ao2PkcS4tyY4h0KwT8+q4rlhTR3WMnu8fkMTGG7h5C8hBxoKdgyS7O+9+oqJn5QD79xKH4wuRR9tritCk+ol5c0f1E5dTdYz/CRI5JCa7LLMyI2VNhhyKFEIdQmAQPu5OjWNTMTpZWv9Ii/OKLp6XTtIxgu5ib47V7km4ZW/vblGpqSwqLuXmN/DkXMiaFEELCg1FsRqoJ2O7daKCmA5PGuF67x7c6Kfptdo5LPKasSUUOaBSKFEKcImb35IA1Na8QPy+7U5zdgT6bU2bKYm5u1+5RsnDsCjV7+yV/QxGVdYt4g5LinQMqhSKFEIewNHfwcCoWxY8tZcG2ECpRbYtdr90Df9w9ri0pYjyYvSblJYq7J4TfXS0UKYQ4hINjcHBq8lfu9CXJ0QcZ9LE+VdVct833L7tHn4JMnPGfTfvw7Dt7LffJhcDZomw3gJCwQV948HCagvyTf25EdzTeuy31wQWRCGIh/dwTKw33PnHr7vFpshMnUYe13PLaiilJEr7+8FoAwJnHDcLwyjL1671/6e4hJA/J47Ex9AwoLwYARaDYxc5QH6RJU6spknVSXJ4vveboEFdB5gKDzukSvr+HjnXrXqe7h5A8hmNpcHD6Wcy76CSUFauHPTvDeNDN5laWDlWwsNO1e3yS5KKFxknJfsniWT7R2ROztV8OGFIoUghxCu/4goi90biqfxm+dd4JaZ++xKASbZB0jKqYG9yXxVetVuwh6pgUe8es2rIfS1/+0NuGhJTOnqQlJRrTf6jM7iEkj6FGCQ5uPgrteiZ2JkntWP+Pm6br25LF70XKt5D2AoPeTnZyMTeVhSpF4+Y+tk71PJ9/hx2CJaXDyKrCYm6E5C95PDYGFid3+m7uLsXJ9JqzxuDk4RWOz5FJVP0hVHV1jE+WFAipzcmKs9a/rMPHerxuRGgR3T1Wrp8c0CgUKYQ4RXT35PPdXFjRxpfYmcCDPtZbFnMTXnfs7pHFhLtmmSJaUty3LX9JJVL8soBlA4oUQhyiXok1n4fK7ONGJLrJeBCFTZBiT+ySzEB2twqyX2Xx04mXyWdSuXucrrkUZChSCHEIazUEDydjsT491+MLZAFtdo/G26Pg3lrh7doAYuBsTsykGaZLCJwVHxvxqVOG4/TqAT63yD9YzI0Qh6jcPVlsB3GHm2BClSUl6IpFQ0RYH8f1OfyypAgnbm7vcnWOfES0nnRGzd09AHDftbWQJAk1C5ZnoGXeQ0sKIQ5RuXvyeKAMAm7cbW5qnoQtAFFvWZEXGHTq7vGrTkpvimwE6OiOAgCeadjjy7Vykf9d/ZHyuKPbyN2T+Ct/D8K89ABFCiEO4XLxwcPv7J5IiGNSnNQi0ZIMwPQWMbBzcvVAAED/MmeG/XyOB+uJxQ0f5yIUKYQ4hCuxBgc3GtGNVSR0lhTNY5dL9/i2do8Yk1J73MDebZ5eIqfpEQq4/a1+N87/7xXY2nxE2eaXuMwGFCmEOISWlODhJE5Em91jb/6NGDwKEfLaPS4DZz23pAjiR3a/xR27ojxuVIgQrScfHTiGjw4cw21PvqvbL2xWPyMoUghxiDg28u4vfKQbk2JmVQiU+0HV3uRjx230KZU1LsSkyH3L35J9ogadJdZLyaWbJ4oUQhwSj9PdExTc9L82JsWOKyPoCwxaIdabdW1J8Tq7RzhvgWLloSXFLkZxKEYrDIT3W5uEIoWQNMilO5Yw42QS1a7d0680dcBmiDUKgDQCZ5Wia952QDK7J5ke7dTdk88YLSpoRJizemQoUghxiComJXvNIICr22nRKtK/rAiVfYodHRNUqvqXoiACnFjVTy0pVOvjOENxD/lZcVaOSXGYpBIo91qGMbKkqH4KOdQ1LOZGiENU2T28+wsETuZQUXCcWTPI+bUCqldeve1CxOISyooLVdvTWbtHPIeXyB7TROBs4nE+iw6npEo7VtZcCuh31QkUKYQ4hGXxw40oUuxWny0Igc25uLAAsj4xL5Nv/wu79OUP8d6eNk/apkVcWyaZ3ePLpXIS2+4en9uRCULw0yMkWLDibHBw0/1iTEqRTfUR6rL4QtudfF9/8fwm5bHX7i5xFWTFksLAWdt0p3D35FLfUKQQ4hAxwI8m6mDgJEAw4sKSEi5Zojbze+HuKfK8ml2vJQVCTAp/SrYxSkEWOdKVWGogF/w9FCmEOEQc6DmwZhc3k65YzM3u5KuypIRw3E83y8PNUgJWqC0p7oq55SvxuISYwcAjf8QL/7URq7a0JLZlsmE+QZFCiGPEwNksNoO4QvTwFBXaHMZt7BbU70Jaxdx6sSvm7J5d6SsWc3NMT4o0qH+tb1Ief2xkf7+b4zsMnCXEIerJiCNr2FAHzrqJSQk+qrV7Is7dPXGNYvDekiJWnGUxNyeYBc3K/XG019Xz3NzpmDiqMlPN8g1aUghxCANng4Mby0CBK3dP6n2C7AZyWidF63qx2092u0BcBRmKJYU/JjukyuyR41Hs1P8JAxQphDhE9AdzYA0GzirOugmcTR2TEqSvQsQkG8luG2OaHe1m99h39/RaUgqEFGSHxdyufegNvPB+U+odcwyjzB6ZaCyOrmjidTuVlMMARQohDvlb/W7lcYDmJWITcb61bSEIsJXEDoq7x+Y3VitmbMfu2EQst58s5uaMw8d6cNOjb3varjAQtVBzR7uSiwz2pUghJD8RLSlBunvOR9LN7im0Ofmqs3uCr1h0MSkOj9dmjxR6/J5lC2Q6CwwCQI/Noma5REt7t+lrR7oTrp6SwgKUFOXG9J4b74KQDCIOpnT3BAMnBdZEF49dS0pHTyz1TgFG0Rgu3T123WJ2USwpXGDQMX9v2GP62o4DRwEA/cpyw4oCUKQQ4hgOpcHBzWcRcZHdc6qQJRF8O4oep4Gzksaj4LVIMcruYQqyPazW7Xnq7T0p9wkbFCmEOERVcZYDayBwGzhr15Jy1jjnCxFmE1XFWcFaYdel4taSYvf8QpkUFnNzSGdPQoBos3ckJLPQascOzHCr/IMihRCHqNbIoF0ldIjzre0FBrV15kOE2Fz52/ru7sOo33HQ9BitYPDe3SNbUsS1ezy9RM4iux77lhTqXpOtUWePG5zJJvkKRQohDhHHUqdpk8Rb3Exs6dZJMYt/CdYka97GPYc78Jl7X8MVS9dg8752w/20xdxsW1Jsti4ZkyKu3ROoDgwsskgpN8jeEd1ouYIrkbJkyRLU1NSgrKwMtbW1WLVqlem+Tz31FD75yU9i6NCh6N+/P6ZOnYoXXnjBdYMJyTaiSZuWlGDgZExWV5y1m4IcrlFfjEkQhQAA7GvrVB43tXbCCG18iPfZPXLbGDjrlE4LS4rchV6vWp1NHIuUZcuWYd68ebjzzjuxbt06zJgxA5dccgl27txpuP8rr7yCT37yk1i+fDnq6+txwQUX4PLLL8e6devSbjwh2SBXl0QPI25EoihMigtdlMU3Gf+DNC8M6luiPC6MJG0/EtRWEjNhoI1JKbIZYGwX+XMTY1L4W7JHR3evJaXE3JISNlFtheNv3qJFi3DDDTfgxhtvxIQJE7B48WJUV1dj6dKlhvsvXrwY3/ve9/Dxj38cJ554In72s5/hxBNPxD/+8Y+0G09INlDHpJAg4GRMdheTknqfIE2yn500UnlcWKAOnI3aECn6tXvsXdf22kCqVZCdHZvvdEZlkaK3pMj1bfLW3dPd3Y36+nrMnDlTtX3mzJlYvXq1rXPE43G0t7dj0CDzaPmuri60tbWp/hESFMS7dzcFqEh2KXCR3RO2BQYLNS4tU0uKSUyVfu0ej8MXDYq50d1jD9mS0ofuHj0tLS2IxWKoqqpSba+qqkJTk701FH7961/j6NGjuPLKK033WbhwISorK5V/1dXVTppJiK/E6e4JDmkGztqPSXF+nWwiVtJVvUcJtiwp2oqzti0pNtsnWlIYk2Kf+h0H8eH+RMG2vhbuHq9Xrc4mruSx1t8lSZItH9hjjz2GH/3oR1i2bBmGDRtmut+CBQvQ2tqq/Nu1a5ebZhLiC3T3BA8nPnjRymB3TRo7MSlBQmdJEZ6L8Sam7h7t2j0eW1KU67KYm206umO4Yuka5bnWkiJJUk5m9ziqnTtkyBAUFhbqrCbNzc0664qWZcuW4YYbbsATTzyBiy66yHLf0tJSlJaWOmkaIRmD7p7g4Kb3K8uTRbCG9iuzdYwdYRKktVK0Kz3L7W/t6EFPNOnjMRMGWvHi9aQnuiXSWbsnnzjSFVU9LzX4vsXz3d1TUlKC2tpa1NXVqbbX1dVh2rRppsc99thjuP766/Hoo4/isssuc9dSQgKCOJby7i8YOBmSK/sU4+k50/DgdVMw7Xh7Ra/UMSnqq33tnOPQp7gQ3zx3nINW+ItKpAjZPa9ubcH/+0u98prWrSOjFSl2LVV2hYaShYKkAOJvyRpdnJCBFTCeg4Gzjlchmj9/Pq699lpMmTIFU6dOxR/+8Afs3LkTs2fPBpBw1ezZswd//vOfASQEyqxZs/Cb3/wGZ599tmKF6dOnDyorK02vQ0hQUddJIWFk8hhnZcOtBv27Lj8FP/z0xwKV9ikGBBcVRkxNQXZjUvyiQGhaqpiUSCS/Y8C0/WOUPm8nBdnr6sF+49g+edVVV2Hx4sW4++67MWnSJLzyyitYvnw5xo4dCwBobGxU1Uy5//77EY1G8e1vfxsjRoxQ/t1yyy3evQtCMohqqMjnUTMAZMpFEEkRkxIkgQKoAycLIuZrRJsJA7+7VZxMlYqzKYRRsHo482iFo7FISfw1cvf88ounYWRlGRZddbov7fMLV+s5z5kzB3PmzDF87eGHH1Y9f/nll91cgpDAEqe7J3j4PIOFzcevsqRYBL2apSC7taS4KYuvxKSkOCaS56YU7WdllD4viz+jGoVXTqnGlVPClykbnEgvQkICy+LnH+q1e4KPKKoKCswDf7WVZWX8TgdWyuLDfjG3MPS7n2g/KyNLSt7XSSGEqO/48vjGLhBkqv/DNuiLQZVFBQUWiyJ6LFJsHra1+QiAhPhjMTd76N095paUoLkf04EihRCnsJhb4DCPuvDo/BGzJ8FEXbDOvMlmXp2Yz6t7P/9eIwCgOxZ3FDibz+ize/TTd96XxSeEqAcL3v1ll0z1fugsKUIcSmFBgamEs5OCPFCoK+MVg/sl6mBdcPIw28Xc/BaiQcdO4CzdPYQQWk8CiN9jcujW7hFG9kKh9LwWU3dP74RY2acYq2//hO3r2o3Rkifc44b0tV3MLYfmXVc4cffQkkJIHqOuOJvFhpCMEbZBv1C0pFiU/je3pCT+jqgsM1zILl1kQVIorILsNlMuXyrV2ln0MSlSQvaFtYAihRCHqBYYZHZPVsnU/JSqTkrQ0FlSTOw/pjEpLic7u5+Hkt0TgWKaSuU6NXs1X8oA2LOkJP5SpBCSx7AsfvDwe0hWpyAHfwJQWVIKIqYdZCYMmlo7ksf6gHjHn3T3pDio9/WKMnV5r3yJC7NVFl9ZBTkjTcoIOfRWCMkUXGAwKGTKkhW2lE5RWxQWOKs4+6/1jbjtyfW689jhX+81pd4Jwh1/gfqu3+r3JH/W2iJm+SNS1M+130lJYgoyIQRadw8JAv4HzmbuWl6TWAXZuNFGqcZrth1QHkddmAqPalbrNUJSWVKS260uJ2uRQo2ZwKxqbq6hdfcYuXTkvqC7h5A8RnW3R5WSF4T5ztSpJaVfadKd0trR4/h6j725E4tf2oyO7pjpPmIWiti3ZlaR1mM9imDKW0uKTqQY7JOD2T2u1u4hJJ8Rh4p8GSCDSuYqziYfh238N1rjRcbIvSJuaT3mXKTc88+NAIATh1XgstNGGO6TDJzVWlKMP9CbHntbeayNkzEr7Z9raN+n1loiLm1UGGJRrYWWFEIcQndP8PA7mDXM5vMCizopRu4e8Y693Ybrxoyj3VHTlY2NAmcBc9G5akuL8lgbMCrlqbtH+5kyJoUQAkCzwCBVSl5QELIUZJGiAnORYmS5ELdNqh7g+rqLXtyM8T94Hn99fYfutWRlVHXf2rFMaq0E+WLN1L5PI+Ecy0F3D0UKIWmQLwNkvhM2YSJSUGBVJ0X//RWtK7/84mmur9vU1onuWBy/evEDg2skLSkRlbsn9Xnz1t2jsRgZiRRF/OWQSqFIIcQhqrhZB+NjPC5ha3O7aZVP4p6MlsUPoWJxY0m56YITcFJVRdrXbu/Uu4ySbgloRIoNS0qeBs7qs3vUr3+wrx07Dhw1fC3MUKQQ4hC3g+JPl2/ERYtewe1Pvutxi/KXTNWpyaXiWCKGMSkeuwyMfi/iQnh2YlJEtDEp+ZKCrO3HQX1LDPZJ/A2jkDYjR396hPiH2pJif5J88NXtAIAn6nd73aS8x/+Ks+Ea9O0GEht9f70OvjT6iZgFzm5tbk95Pl2dlDy1pIwb2s9037B9X62gSCHEIWKVU3pu8oOwmc/PrBmEk6uSKcBmgsMqJsWvkvjidROBs8ntb2w/mPLY4jx19xi9zwHlxYb7MgWZkDyGCwwGh0z1ftgWGCwpKsDz82bg91efAcDc0mTk7pF8yBC58X/fQnc0ebFkWfxENdzLTh3Re+3U59IGheaLu8dJLFsYvqN2oUghxCkuA2eJj/g8KofRfG5HWBlbUuRF6rx7zy9tbMb6Pa0A1C4muV/79ylWXduKvK046+BthvH7agZFCiEOEa0n+TE8BpdMzU8R1ePwTQBOUpAVK4fHE51sSREnW1lvFPbORHZESr6mIJsVxjMilwK9c+itEJIZVO6ePBkggw4DZ61xk4LsdVxDNC6LlOQ1ZWuPfC07VhGtJeXrD7/lao2hsOFEjIX9+ypCkUKIQ1hxNv8Qx/wcGv8tU5C9fp/RmKQ6PyBaUhJTkZ1Vl7XZPTsOHMOaDw+Y7J07OIlJCVugtxUUKYQ4RDJ5TDJPpgKXw17B06z1RpZAeTL0Orunp1cRSSp3T68lpXcmsuPSMFowscdIbeUYorg7q2aQ5b60pBCSx7itkyJTXJg7A0hQ8L/irHAtfy/lC+YLDFoXWvMS2UoiXlO+hiwCncakHD+0r+3jwo78Hs+qGYRHv3E2APPvIkUKIXmMKEzsjo3iMUW5FNWWZTLlbgv/oG8WOKvf5kd2D5C0dqhjUhJ/5ZgUO3EXokgpLrTvJgo78ucyvLIspZUr/N/XJBwtCXGIyt1jc5bsFszR2rLeJH38zrgJe0yKWZutKs567eFKxqQktyXdPb2Bsw7dPbJIiXlULEWSJLyz67CyBk6QePLtPQDUAc1mvRXG76gZFCmEOETl7rF5zN7DncpjeWAl6fGPd/bikTd2ZuRaYb8zNWv9U+v26Lb5ld2TjEnRB84WuLSkyILfK0vKG9sP4rO/fw2f+PVKHOnSL4yYTY72tqfHxnsNewyVCEdLQhwSt5nds+bDA7ho0Uq8se0Alq9vVLaHfcILCnc8vT5j13K6CF6YOKqZjP2qkyJPrlaWFDvxr0WCyC8ukC0p3nwo21sSFpRoXMKho92enNNrZk0dqzw2j0nJTFsyAUUKIQ6xGzj7lT++jq3NR3DVH15HV1QcfXNslssS7Z3JyTWTgbNh/PSsFgvUWgzkCd/7FGSLmBQH7h7REFlc1GtJiXnzqbR3JuutBK2Srfy52LHEcu0eQvIYNynIXdGY8jgfMhFyDXGSD9rkZQejKat/WREAtdgDBHePx7fj2jopkUiyX524e8T4I3nC9uozOSL0RdCCceVxwygFW8vQilK/m5MxKFIIcYibYm7i4moUKd5QUVqUsWupLCkh/PiMbqwryhLr5WgtKcnAWa/dPeo6KeL5ixykIIufRZGDInB2aBNEipMy9JkgZvNz+eykkZaWs7BBkUKIQ8RJyu4d3Ivv7zM8nrhHXpQOSLoS/EIdkxK+D9BozurXK/KOaC0pvV3pdfCl1pIiuiSc1EkpUGX32D/ODqJgC9qaQLJoEi1cRmKkT3FhxtqUCShSCHGImwUG9xzuUB4HbfALK+UlycHY72yGSNgtKQYOn3697h5zS4q3bUjGpPS2STi/nJXv1t3jVUzKik3NyuOgWTxjihvOer8yihRC8hvVOGowjj3/XhNuevRt0+ODNviFFbkXjxtcjq+cOcbXa4kF+CrLiy32DA99ey0pr27dDwDo7ImhYddhbOvNcPE8BVnO7onr3RZOAmdV7h7FkpK+Je3wsW4cEDJ6PCq94hkxg34zsur1KcktkZI5py4hOYIqBdlApcz+a73l8WG8Ew8i8qD9yy+ejqr+Zb5eq6SoAPdfW4sPmtrx+cmjfL2WH4wYYN4/r21NLM5306Nv46WNSUuC13EN2uweUWw4cfeIzSrxsOLsoWPqlZSDZvFMBs5a2xbKaUkhJL8Rhy43Y2PQBr+w4tdCeGZcfMpw3PyJE5WA0zAxfnh//OOm6aptn5s0EkAyaFUUKIAfCwyq66SoLCm9j+3EeIniKVlxNv3flDauKWgWz+RyBdb75ZolhSKFEBtIkoSPWo6iub1TpVLcBFEGbfALK5kWKWHn1NGVquejB5YDMLdCeN2t/7d2FwB1CrJyLQtLyoEjXarnYrOKPAyc7dHEtQQt1dwoNdzI2pVrMSl09xBig3v/sxW/rtus2+52GJMkKafSBLOBX+Xb8wU5M6bHJDPKq2DkWy86Cf/z0mYc646hJxZXhL14/uQCg/rjf/efreoNQrO8XGAwqglC8SoY1ysUUZ7i+87sHkLykMfeNF4jxu3NFq0p6RO1af4mxsgTvCgcRLyqk3L1Wcmg5ub2LmN3j0XgrLZsf4lYFj9PLCmSJCX7LYV4HNg3fO5IK/jzJsQGVlYPcYC3WwCKcSnpE7cZSEiMEdN3uw2sKV5ZqPqUFGLUgD4AgH1tnYaBs7JIeXVrC77x57Vobu80daV++rSROHFYP3z549UoLUpYDTyxpAQ4JkVsi1XF2ZOq+mH6CUMz0aSMwV83ITaQfd9GiGOp0WCf6hjiDrt1I4gx8ne6OxZHR3dM97pXMSkFEaCqf6JM++KXtiipvREDSwoA1G3YhzN/+m98/Kf/VtUXkjl5eAXq5p+Hn19xmrAwYfr5wlqhE6QbCbEtVpaUOeefgJKi3PpB5Na7IcQnrO4qxaFMvZCgOUG6SwsrRnUjiDUnV1UAAD42on8yfTcm4aiBSPEqZqogEsGYQYkg3d0HjxmnIBtcq+VIF6594A08Ub/b9NyyVcELS4r2BiNIZfFFDWY1FuXiT4EihRAbWGWQiL7rbo1IkScFLUG6SwsrzO5xzm++MgnfPHccFn95kmJJ6eiJYf6yBt2+XvVrQSSCOy6dAADY1nJUKRZnFJOiRd7XjEIH9VVSoQ2UDdKNhBjUm2/fd4oUQmxgNTBYuXumHj8YXzvnOP0xAatmGUZoSXHO+OH9seDSCTipqkKJSQGAN7Yf1O3rpbtnaEWpsiDkzY+t690uihR35/ZWpGgsKQG6kRAtKfn2fadIIcQGliJFcPh09ajN5hVlRUpwnwgtKekjTyJW8ULEnOIUAcduUpC/ed44/XkiEUQiEcz75Ema86v3cYOT1ZNT0aM5h1crK3uBOF6IgbP58M2nSCHEAkmSsPTlD/H+3jaLfZKPtZaUfqVFKDGYRINkSg4rdutGEGOKi6z7zY1wWHDJBPziilPV5+mdVL827TjT87t1YRT2Kh27sWBWhCW7x+/FNIMGRQohFny4/yh+8fwmy31UIkUzWPYtLTIMQHRTqZYkcVI3ghiTKnW7xKUPRhQf2vV5rheEilFZfKfIVoX/bGo2LUpnF21MSpDcPfkcf0WRQogFbZ09KfeRIKErGsOX7luNz9z7mrJ99MA+OOeEIYYR93T3pId4Z0lLijuKLdxkk6oHYPxw46DvVKhFivoaowf2UR4blcV3ypTjBiqP97V1ujqHTE9ca0lJ63SeEsvj6soUKYRY0NWTeqTa19aFO556D299dEjZNqhvCV697ULUDOmLiOA5lmsYBMmUHEbs1o0g5lilGN/zuYmu+1UVa6I5x6cmDscZYwagelAfXHPWWGX7+OEVGFpRqjy/4ozRmDiqv+7cp2nWHxo3tB8GlicqrBrVenFCTzTIKcj5W12Za/cQYkFXNPXA98O/v4dVW1pU28zGd/lOiIaU9BBveq0qcBJ3pONWMHP3AIlFDZ+ac47umAHlJVhz+4XojMbRp7hQdf1oLI7OaBwFEeN1acpLinDoWI9hrRcA+Fv9brzwfhNuvegkfGykXvgo1wlyMTeb1ZVzcT2wPNRlhNin04YlRStQEogrlSa3ymPvluZ2W64kYkw+143IBOkIPyt3j+U1CwvQr7RI93nK28tLjOO7yksSwuVYd1T3GgDc8fR61G3Yh0t/uwo//Pt7ptfXrt0jWjvnPrYOk+5+Eas/NPqt+4+yTlUeftUpUgixwI4lxQmy+fvrD6/Fhb9aic4eb8+fL+Rz3YhMUJTGWgP9ypIG+mNpumDsoIiULv21uqIxVTD7n9fswNbmdt1+3dG4ToDIgbMtR7rwj3f24vCxHvzz3UYvm24bZcXvPFQpFCmEWOBFaqM4rBwRVnRtOdKFptb0gv3yFdEUn48Dt1cM6VdiuD0dS8r0E4a4PtYN5SUJUXTMQPAfONKt29bRrf9N/+L5TTqLaDQmYcWmZky55yVl2/72rnSb6wq72T2Dyo0/zzBDkUKIBdribCKXnjpc9fz5eTMM9xslZDQsuGS86rWWI9kZ9ILMpqY2/PrFD/Dh/iOm+6jqRlCjuOaB6z6O7158sm57OsKvuLDAVPz4QdKSonf3vGrgijWyjhpZSDp6Yvjaw2+ptr24YR86e2Lo7Inhnuc24I+vbHPbbEeYVVcWn35jRg3OOWFwRtqTSVyJlCVLlqCmpgZlZWWora3FqlWrLPdfuXIlamtrUVZWhnHjxuG+++5z1VhCMo2VJaWyT7Hqed8SMQ49OYl+dtIofPPccXjo+inoX6Y+psXgTi+oSJKE/35hE/7xzl7D1xtbO9JOAwWA7z/9Hn73n634xK9X6gpsyYjm71wMFswUk6oH4NsXnKDbnm4w8pB+pal38ojy3nL79TsOqbav392K7z35rm7/dgMxI7qoZESBc+/Vk5XH43/wPMb/4Hk88Op2/HT5RnyUYn0hL1CqK1t8Lnde9rGc/C04FinLli3DvHnzcOedd2LdunWYMWMGLrnkEuzcudNw/+3bt+PSSy/FjBkzsG7dOtxxxx24+eab8eSTT6bdeEL8RhQpS685Q3Xn0l8jUvqVFuELk0cBAG4SBv7CgggWXDoBF46v0g2G97/yoQ+t9odXt7bg9ys+xNzetVdEtrccxfRfrMD0X/wHew53mJ7j8LFufPb3r+HzS15Du0ng8FphsnnsTfW4sn53K/6nbjN2HjwGID/rRmSCdNO6px2fcPmMrCzzojmW9ClOTGPPCuL5o5ajuPzeVw33P9KpFyli+vIZYwYASIhuABjctwSfPm2kad2Y7T6LlGgsjsff2gVA/7kEKAHJNxynIC9atAg33HADbrzxRgDA4sWL8cILL2Dp0qVYuHChbv/77rsPY8aMweLFiwEAEyZMwNq1a/GrX/0KV1xxRXqtJ8RnZHfPrKljccmpI3DTBSfgd//ZioHlxTh+aD/Vvn1Li/CLL56Gb5w7znRA006q21uOQpKkUNwB7RXEx0ctR9HU1olhFaV47M2deKJ+N2JxCTEAb24/gL837MW+ti7c/dlT0NkTw5HOKO5dsVW1vMCCp9bjtk+Nx9CKUqz96BD+vWkfJgzvj0gkOfj+4O/vY9TAPrhwfBVicQlfWPoaemISfvPvLQDys25EJkj323jrJ0/ECcP6qYqt+cWXzxyD/1u7G92xODq6Y+hTUoh397Sq9rl+2nF4ePVHAID2zii6ojG0HOnG8P5leHf3YUVY/+4rk/Ha1ha8vfMwmnqtgp+YMAwA8JUzx+CuZ9/XXf9nyzdixolDDIONe2Jx1UKOQKLmiRMR+Ow7e/HoGwmxno/p9o5ESnd3N+rr63H77berts+cOROrV682PGbNmjWYOXOmatvFF1+MBx98ED09PSguLtYd09XVha6upK++rc183ZR0eLJ+N9ZrvszpYqfceVwCPmhqx6C+JRjWP7VZ1IlaFhe78+6cNvdzpOp9aKfNfe32EQC8uzvx/SjtLcL2XzNPxlfPHou+pUUq8/LQilKlUNuEEea1GLQcPtaDWx5vsB58LF8yf9FK91gNdWbH/d/a3crj83/1sunxv35xM3YfSgz6X7pvjel+z73biOdsZEt8/eG1OKmqHzbv08eouC3dTtRMO34wVn94QHmeqh5HKirKinH1WWPSbZYtJlcPQHlJIY51xzDhh88rj2VGDeiD+TNPUkTKHU+vx/efWQ+jWm3nnzwUa7Yl+kEuPyC7rr5YOxqHj/VgRGUZVm1tUdyeW5qP4KTv/wu1YwdiQHkJ9rV14v29bUocycjKMkTjEspLCtHWGcXBo90YN6Qvzj5+sK14qr++nrQmfu2cGtVrIbi3SRtHIqWlpQWxWAxVVVWq7VVVVWhqajI8pqmpyXD/aDSKlpYWjBgxQnfMwoUL8eMf/9hJ01yxcvN+lYmQEDPEaphV/RMm7CljB+LM4wahrbMHD17/cVvnOXtcIrCtZkhflBYVYFNTe058B8cMKldcMLJA0TK0ohS/+tLp6InG8YdV27DnUIfONXTGmAEoiETw6dNG4JRRlYrIkQVKWXEBJCnphrtes2gdccfSr9bi5Q+a0bDrMPoUF6KyXH/zaMbp1QP8a5gNIpEIJlUPUESWKFCGVZTisW+cjf5lxbjj0vH42fLEOlxGAmXquMGoKCvGAI0bt3pQOYCEpfSWi04EAFz58Wr85qpJ+NyS1/Du7lbEJagqTovsNcjg29ZyFNscuon+csOZmHHiUNW2r549Fotf2oIZJ2Y2oyqTuKo4qzVNpzJXG+1vtF1mwYIFmD9/vvK8ra0N1dXVbppqycxTqjCm9wvohnRU7I4DxzCobwn6GwRsub2Q3ebYOZ3VHbrTcyXO5+G5PLx9sHOqitIifP6M0brtfUuL8H+zpzq63sC+JXjnhzNRVlKA3Yc6sGJTs6kFyMriY2U1SmUnsj7W/MWCSARjB5XjwNFuFBdGcNGEKsSlRHZF39IidHTH8NS63WjvjKK8pBCfmzwKTa2diMYkHO2OYsrYgcpnd9HHEjcuPbE4OnpiOHy0B8P6l6JMU1F01fcuwN8b9qCkqAC1YwfitNEDUFQQQd2GfSgvKcL0HB6cM0lln2J8dtIofHbSKMfHHj+0H56bOz2jwbJaln61Fu/taUV3LI7SogKUlxShraMH044frLhhvjFjHE4ZWYk9hztw3klDEZck/Gt9E/qUFCICYOYpiWy9G6bXoKKsGB3dUQwoL8HnTPqkoCCCZ+acg72tHdjSfATNbZ1K0bW4BBw/pC8qyorR1tmDyj7F6Oh1e5YUFaBh12HbCyIWRiIYP6K/YWr3TRecgLNqBuP06kqDI3ODiORgOdbu7m6Ul5fjiSeewOc//3ll+y233IKGhgasXLlSd8y5556LyZMn4ze/+Y2y7emnn8aVV16JY8eOGbp7tLS1taGyshKtra3o39++KZ0QQggh2SPd+duR47GkpAS1tbWoq6tTba+rq8O0adMMj5k6dapu/xdffBFTpkyxJVAIIYQQkp84jo6aP38+HnjgATz00EPYuHEjbr31VuzcuROzZ88GkHDVzJo1S9l/9uzZ2LFjB+bPn4+NGzfioYcewoMPPojvfOc73r0LQgghhOQcjmNSrrrqKhw4cAB33303GhsbMXHiRCxfvhxjxyaW3W5sbFTVTKmpqcHy5ctx66234ve//z1GjhyJ3/72t0w/JoQQQogljmJSsgVjUgghhJDwkdGYFEIIIYSQTEGRQgghhJBAQpFCCCGEkEBCkUIIIYSQQEKRQgghhJBAQpFCCCGEkEBCkUIIIYSQQEKRQgghhJBAQpFCCCGEkEDiuCx+NpCL4ra1tWW5JYQQQgixizxvuy1uHwqR0t7eDgCorq7OcksIIYQQ4pT29nZUVlY6Pi4Ua/fE43Hs3bsXFRUViEQinp23ra0N1dXV2LVrF9cEsgn7zDnsM+ewz5zDPnMO+8w5TvtMkiS0t7dj5MiRKChwHmESCktKQUEBRo8e7dv5+/fvzy+oQ9hnzmGfOYd95hz2mXPYZ85x0mduLCgyDJwlhBBCSCChSCGEEEJIIMlrkVJaWoq77roLpaWl2W5KaGCfOYd95hz2mXPYZ85hnzkn030WisBZQgghhOQfeW1JIYQQQkhwoUghhBBCSCChSCGEEEJIIKFIIYQQQkggyVuRsmTJEtTU1KCsrAy1tbVYtWpVtpuUNRYuXIiPf/zjqKiowLBhw/C5z30OH3zwgWofSZLwox/9CCNHjkSfPn1w/vnn4/3331ft09XVhblz52LIkCHo27cvPvOZz2D37t2ZfCtZYeHChYhEIpg3b56yjf1lzJ49e/DVr34VgwcPRnl5OSZNmoT6+nrldfabmmg0iu9///uoqalBnz59MG7cONx9992Ix+PKPvneZ6+88gouv/xyjBw5EpFIBM8884zqda/659ChQ7j22mtRWVmJyspKXHvttTh8+LDP784frPqsp6cHt912G0499VT07dsXI0eOxKxZs7B3717VOTLWZ1Ie8vjjj0vFxcXSH//4R2nDhg3SLbfcIvXt21fasWNHtpuWFS6++GLpT3/6k/Tee+9JDQ0N0mWXXSaNGTNGOnLkiLLPz3/+c6miokJ68sknpfXr10tXXXWVNGLECKmtrU3ZZ/bs2dKoUaOkuro66e2335YuuOAC6fTTT5ei0Wg23lZGePPNN6XjjjtOOu2006RbbrlF2c7+0nPw4EFp7Nix0vXXXy+98cYb0vbt26WXXnpJ2rp1q7IP+03NPffcIw0ePFh67rnnpO3bt0tPPPGE1K9fP2nx4sXKPvneZ8uXL5fuvPNO6cknn5QASE8//bTqda/651Of+pQ0ceJEafXq1dLq1auliRMnSp/+9Kcz9TY9xarPDh8+LF100UXSsmXLpE2bNklr1qyRzjrrLKm2tlZ1jkz1WV6KlDPPPFOaPXu2atv48eOl22+/PUstChbNzc0SAGnlypWSJElSPB6Xhg8fLv385z9X9uns7JQqKyul++67T5KkxBe7uLhYevzxx5V99uzZIxUUFEjPP/98Zt9Ahmhvb5dOPPFEqa6uTjrvvPMUkcL+Mua2226Tpk+fbvo6+03PZZddJn39619XbfvCF74gffWrX5UkiX2mRTvhetU/GzZskABIr7/+urLPmjVrJADSpk2bfH5X/mIk7LS8+eabEgDlRj6TfZZ37p7u7m7U19dj5syZqu0zZ87E6tWrs9SqYNHa2goAGDRoEABg+/btaGpqUvVZaWkpzjvvPKXP6uvr0dPTo9pn5MiRmDhxYs7267e//W1cdtlluOiii1Tb2V/GPPvss5gyZQq+9KUvYdiwYZg8eTL++Mc/Kq+z3/RMnz4d//73v7F582YAwDvvvINXX30Vl156KQD2WSq86p81a9agsrISZ511lrLP2WefjcrKypzvQyAxJ0QiEQwYMABAZvssFAsMeklLSwtisRiqqqpU26uqqtDU1JSlVgUHSZIwf/58TJ8+HRMnTgQApV+M+mzHjh3KPiUlJRg4cKBun1zs18cffxxvv/023nrrLd1r7C9jtm3bhqVLl2L+/Pm444478Oabb+Lmm29GaWkpZs2axX4z4LbbbkNrayvGjx+PwsJCxGIx/PSnP8VXvvIVAPyupcKr/mlqasKwYcN05x82bFjO92FnZyduv/12XH311cqCgpnss7wTKTKRSET1XJIk3bZ85KabbsK7776LV199Vfeamz7LxX7dtWsXbrnlFrz44osoKysz3Y/9pSYej2PKlCn42c9+BgCYPHky3n//fSxduhSzZs1S9mO/JVm2bBn++te/4tFHH8Upp5yChoYGzJs3DyNHjsR1112n7Mc+s8aL/jHaP9f7sKenB1/+8pcRj8exZMmSlPv70Wd55+4ZMmQICgsLdUquublZp7bzjblz5+LZZ5/FihUrMHr0aGX78OHDAcCyz4YPH47u7m4cOnTIdJ9cob6+Hs3NzaitrUVRURGKioqwcuVK/Pa3v0VRUZHyftlfakaMGIGPfexjqm0TJkzAzp07AfB7ZsR3v/td3H777fjyl7+MU089Fddeey1uvfVWLFy4EAD7LBVe9c/w4cOxb98+3fn379+fs33Y09ODK6+8Etu3b0ddXZ1iRQEy22d5J1JKSkpQW1uLuro61fa6ujpMmzYtS63KLpIk4aabbsJTTz2F//znP6ipqVG9XlNTg+HDh6v6rLu7GytXrlT6rLa2FsXFxap9Ghsb8d577+Vcv37iE5/A+vXr0dDQoPybMmUKrrnmGjQ0NGDcuHHsLwPOOeccXWr75s2bMXbsWAD8nhlx7NgxFBSoh+nCwkIlBZl9Zo1X/TN16lS0trbizTffVPZ544030NrampN9KAuULVu24KWXXsLgwYNVr2e0z2yH2OYQcgrygw8+KG3YsEGaN2+e1LdvX+mjjz7KdtOywre+9S2psrJSevnll6XGxkbl37Fjx5R9fv7zn0uVlZXSU089Ja1fv176yle+YpjGN3r0aOmll16S3n77benCCy/MmTTHVIjZPZLE/jLizTfflIqKiqSf/vSn0pYtW6RHHnlEKi8vl/76178q+7Df1Fx33XXSqFGjlBTkp556ShoyZIj0ve99T9kn3/usvb1dWrdunbRu3ToJgLRo0SJp3bp1SiaKV/3zqU99SjrttNOkNWvWSGvWrJFOPfXU0KYgW/VZT0+P9JnPfEYaPXq01NDQoJoTurq6lHNkqs/yUqRIkiT9/ve/l8aOHSuVlJRIZ5xxhpJum48AMPz3pz/9SdknHo9Ld911lzR8+HCptLRUOvfcc6X169erztPR0SHddNNN0qBBg6Q+ffpIn/70p6WdO3dm+N1kB61IYX8Z849//EOaOHGiVFpaKo0fP176wx/+oHqd/aamra1NuuWWW6QxY8ZIZWVl0rhx46Q777xTNVnke5+tWLHCcPy67rrrJEnyrn8OHDggXXPNNVJFRYVUUVEhXXPNNdKhQ4cy9C69xarPtm/fbjonrFixQjlHpvosIkmSZN/uQgghhBCSGfIuJoUQQggh4YAihRBCCCGBhCKFEEIIIYGEIoUQQgghgYQihRBCCCGBhCKFEEIIIYGEIoUQQgghgYQihRBCCCGBhCKFEEIIIYGEIoUQQgghgYQihRBCCCGBhCKFEEIIIYHk/wNeIIJFqDVg8wAAAABJRU5ErkJggg==",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "plt.plot( [i[0] for i in trajectory], [i[-1] for i in trajectory])\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "4817474e-f9ee-4d78-a327-4b6eb15f55ac",
+ "metadata": {},
+ "source": [
+ "# Revisions"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "id": "f4b449fd-720f-4198-8114-6dd7d9f48efa",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# model\n",
+ "from sklearn.ensemble import RandomForestClassifier \n",
+ "from sklearn.linear_model import LogisticRegression\n",
+ "from sklearn.svm import SVC\n",
+ "from sklearn.multioutput import MultiOutputClassifier, ClassifierChain\n",
+ "from sklearn.metrics import roc_auc_score"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "id": "1df9bb79-38cc-4fc5-b560-41e796ad7e81",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def make_features(sequence):\n",
+ " features = [protpy.amino_acid_composition(sequence)]\n",
+ " for key in [\"hydrophobicity\", \"polarity\", \"charge\", \"solvent_accessibility\", \"polarizability\"]:\n",
+ " features.extend([\n",
+ " protpy.ctd_composition(sequence, property=key),\n",
+ " protpy.ctd_transition(sequence, property=key),\n",
+ " protpy.ctd_distribution(sequence, property=key)\n",
+ " ]) \n",
+ " features = pd.concat(features, axis=1)\n",
+ " features = np.array(features)\n",
+ " return features"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "52cccf01-2fa4-4467-8209-f783059fe5ba",
+ "metadata": {
+ "jp-MarkdownHeadingCollapsed": true
+ },
+ "source": [
+ "## Classic Model"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "id": "436f8366-1965-4a56-b5e2-839f7087ea12",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "args = Namespace(**pickle.load(open(os.path.join(PROTGPS_PARENT_DIR, 'checkpoints/protgps/32bf44b16a4e770a674896b81dfb3729.args'),'rb')))\n",
+ "args.pretrained_hub_dir = \"/home/protgps/esm_models/esm2\"\n",
+ "args.dataset_file_path = os.path.join(PROTGPS_PARENT_DIR, \"data/new_condensate_dataset_m3_c5_mmseqs.json\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "id": "a0cf3bd2-da42-4e21-a848-cabbe5de3e6e",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 5480/5480 [00:00<00:00, 19211.00it/s]\n"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "TRAIN DATASET CREATED FOR PROTEIN_CONDENSATES_COMBINED.\n",
+ "Could not produce summary statement\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 5480/5480 [00:00<00:00, 31098.76it/s]\n"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "DEV DATASET CREATED FOR PROTEIN_CONDENSATES_COMBINED.\n",
+ "Could not produce summary statement\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 5480/5480 [00:00<00:00, 24179.16it/s]"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "TEST DATASET CREATED FOR PROTEIN_CONDENSATES_COMBINED.\n",
+ "Could not produce summary statement\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "train_dataset = get_object(args.dataset_name, \"dataset\")(args, \"train\")\n",
+ "dev_dataset = get_object(args.dataset_name, \"dataset\")(args, \"dev\")\n",
+ "test_dataset = get_object(args.dataset_name, \"dataset\")(args, \"test\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 55,
+ "id": "34b4bb62-802c-49a1-afb2-030616cfe448",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "100%|███████████████████████████████████████████████████████████| 3766/3766 [01:29<00:00, 42.18it/s]\n"
+ ]
+ }
+ ],
+ "source": [
+ "train_data_classic = []\n",
+ "for sample in tqdm(train_dataset.dataset, ncols=100):\n",
+ " if any(k not in protpyAA for k in sample['x']):\n",
+ " continue\n",
+ " train_data_classic.append({\n",
+ " \"x\": make_features(sample['x']),\n",
+ " 'y': sample['y'],\n",
+ " })"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 64,
+ "id": "29508cdd-3b6d-4704-908c-7c6c9d1dc319",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "trainX = np.concatenate([d['x'] for d in train_data_classic])\n",
+ "trainY = np.stack([d['y'] for d in train_data_classic])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 68,
+ "id": "94b80947-defb-48fc-9867-4774440c9ff9",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "100%|█████████████████████████████████████████████████████████████| 803/803 [00:18<00:00, 43.05it/s]\n"
+ ]
+ }
+ ],
+ "source": [
+ "test_data_classic = []\n",
+ "for sample in tqdm(test_dataset.dataset, ncols=100):\n",
+ " if any(k not in protpyAA for k in sample['x']):\n",
+ " continue\n",
+ " test_data_classic.append({\n",
+ " \"x\": make_features(sample['x']),\n",
+ " 'y': sample['y'],\n",
+ " })"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 69,
+ "id": "46ec0a65-c274-4274-ae38-de3469d23bf5",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "(799, 803)"
+ ]
+ },
+ "execution_count": 69,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "len(test_data_classic), len(test_dataset.dataset)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 70,
+ "id": "662568f6-1b26-46ca-9281-b43afd1e4c7b",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "testX = np.concatenate([d['x'] for d in test_data_classic])\n",
+ "testY = np.stack([d['y'] for d in test_data_classic])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 109,
+ "id": "d6a2e588-03dd-4502-9d55-02789ad46b5e",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "ROC-AUC nuclear_speckle: 0.8611388611388612\n",
+ "ROC-AUC p-body: 0.7530266343825666\n",
+ "ROC-AUC pml-bdoy: 0.5554846938775511\n",
+ "ROC-AUC post_synaptic_density: 0.7673996302165874\n",
+ "ROC-AUC stress_granule: 0.6580655957161983\n",
+ "ROC-AUC chromosome: 0.7941110917301395\n",
+ "ROC-AUC nucleolus: 0.825681905493661\n",
+ "ROC-AUC nuclear_pore_complex: 0.8468906842942833\n",
+ "ROC-AUC cajal_body: 0.6639344262295082\n",
+ "ROC-AUC rna_granule: 1.0\n",
+ "ROC-AUC cell_junction: 0.8344669543382335\n",
+ "ROC-AUC transcriptional: 0.7089917825537295\n"
+ ]
+ }
+ ],
+ "source": [
+ "# RANDOM FOREST\n",
+ "rf = RandomForestClassifier(\n",
+ " n_estimators=100,\n",
+ " max_depth=400, \n",
+ " random_state=0, \n",
+ ")\n",
+ "\n",
+ "multi_target_rf = ClassifierChain(rf)\n",
+ "multi_target_rf.fit(trainX, trainY)\n",
+ "predY = multi_target_rf.predict_proba(testX)\n",
+ "\n",
+ "for i, c in enumerate(OLDCOMPS):\n",
+ " auc = roc_auc_score(testY[:,i], predY[:,i])\n",
+ " print(f\"ROC-AUC {c}: {auc}\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 113,
+ "id": "8afdd96a-7ea9-4a5b-916e-11b5ee401b02",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "ROC-AUC nuclear_speckle: 0.7397768897768897\n",
+ "ROC-AUC p-body: 0.6447119264915877\n",
+ "ROC-AUC pml-bdoy: 0.5536139455782313\n",
+ "ROC-AUC post_synaptic_density: 0.7415525243377858\n",
+ "ROC-AUC stress_granule: 0.6493250780901383\n",
+ "ROC-AUC chromosome: 0.692142954047716\n",
+ "ROC-AUC nucleolus: 0.6520745293891663\n",
+ "ROC-AUC nuclear_pore_complex: 0.8626820908311912\n",
+ "ROC-AUC cajal_body: 0.8215636822194199\n",
+ "ROC-AUC rna_granule: 0.9943538268506901\n",
+ "ROC-AUC cell_junction: 0.7129988541895219\n",
+ "ROC-AUC transcriptional: 0.5559418457648546\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Logistic Regression\n",
+ "logreg = LogisticRegression(solver=\"liblinear\", random_state=0)\n",
+ "\n",
+ "multi_target_lr = ClassifierChain(logreg)\n",
+ "multi_target_lr.fit(trainX, trainY)\n",
+ "predY = multi_target_lr.predict_proba(testX)\n",
+ "\n",
+ "for i, c in enumerate(OLDCOMPS):\n",
+ " auc = roc_auc_score(testY[:,i], predY[:,i])\n",
+ " print(f\"ROC-AUC {c}: {auc}\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "2598c0d0-16ff-49d8-855f-ac29f368dc5a",
+ "metadata": {},
+ "source": [
+ "## MMSeqs"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "id": "600ecbfb-b8a5-4115-b689-5dd5602cbbec",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "args = Namespace(**pickle.load(open(os.path.join(PROTGPS_PARENT_DIR, 'checkpoints/protgps/7c4853cd22080b250ef89af2a1b25102.args'),'rb')))\n",
+ "args.from_checkpoint = True\n",
+ "args.checkpoint_path = os.path.join(PROTGPS_PARENT_DIR,\"checkpoints/protgps/7c4853cd22080b250ef89af2a1b25102epoch=3.ckpt\")\n",
+ "args.model_path = args.checkpoint_path\n",
+ "args.pretrained_hub_dir = \"/home/protgps/esm_models/esm2\"\n",
+ "args.dataset_file_path = os.path.join(PROTGPS_PARENT_DIR, \"data/new_condensate_dataset_m3_c5_mmseqs.json\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "9d8bac71",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "model = load_model(args)\n",
+ "model = model[0]\n",
+ "model.eval()\n",
+ "print()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 269,
+ "id": "3f597c74-e592-4c46-972e-4cd5f1cb953f",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 5480/5480 [00:00<00:00, 28547.10it/s]\n"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "TEST DATASET CREATED FOR PROTEIN_CONDENSATES_COMBINED.\n",
+ "Could not produce summary statement\n"
+ ]
+ }
+ ],
+ "source": [
+ "test_dataset = get_object(args.dataset_name, \"dataset\")(args, \"test\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 270,
+ "id": "35edb2f9-49d5-4b62-ab1a-bb7b4c7455cc",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "test_x = [s['x'] for s in test_dataset.dataset]\n",
+ "test_y = [s['y'] for s in test_dataset.dataset]\n",
+ "test_id = [s['entry_id'] for s in test_dataset.dataset]\n",
+ "test_y = torch.vstack(test_y)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 280,
+ "id": "b55ce365-3753-493c-879e-3981bf28e47b",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "100%|█████████████████████████████████████████████████████████████| 803/803 [01:24<00:00, 9.50it/s]\n"
+ ]
+ }
+ ],
+ "source": [
+ "test_preds = predict_condensates(model, test_x, 1, round=False)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 283,
+ "id": "1bc32ae8-f0cb-4956-87f3-545e2d0cddaf",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "nuclear_speckle:\t0.759\n",
+ "p-body:\t0.688\n",
+ "pml-bdoy:\t0.614\n",
+ "post_synaptic_density:\t0.766\n",
+ "stress_granule:\t0.619\n",
+ "chromosome:\t0.663\n",
+ "nucleolus:\t0.796\n",
+ "nuclear_pore_complex:\t0.855\n",
+ "cajal_body:\t0.646\n",
+ "rna_granule:\t0.964\n",
+ "cell_junction:\t0.727\n",
+ "transcriptional:\t0.605\n"
+ ]
+ }
+ ],
+ "source": [
+ "for j,condensate in enumerate(OLDCOMPS):\n",
+ " auc = roc_auc_score(test_y[:,j], test_preds[:,j])\n",
+ " print(f\"{condensate}:\\t{round(auc,3)}\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "9e495d6d-53ea-49b9-80d4-d1bb8a8d1bfe",
+ "metadata": {},
+ "source": [
+ "### classical models"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "id": "6a2530fd-fca1-41bc-8a36-a4e4eb8b5e73",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 5480/5480 [00:00<00:00, 17356.74it/s]\n"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "TRAIN DATASET CREATED FOR PROTEIN_CONDENSATES_COMBINED.\n",
+ "Could not produce summary statement\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 5480/5480 [00:00<00:00, 42154.20it/s]\n"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "DEV DATASET CREATED FOR PROTEIN_CONDENSATES_COMBINED.\n",
+ "Could not produce summary statement\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 5480/5480 [00:00<00:00, 31194.95it/s]\n"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "TEST DATASET CREATED FOR PROTEIN_CONDENSATES_COMBINED.\n",
+ "Could not produce summary statement\n"
+ ]
+ }
+ ],
+ "source": [
+ "train_dataset = get_object(args.dataset_name, \"dataset\")(args, \"train\")\n",
+ "dev_dataset = get_object(args.dataset_name, \"dataset\")(args, \"dev\")\n",
+ "test_dataset = get_object(args.dataset_name, \"dataset\")(args, \"test\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "id": "79fe2356-a094-4a0e-af03-86fb15a6bb4c",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "100%|███████████████████████████████████████████████████████████| 3737/3737 [01:10<00:00, 53.36it/s]\n"
+ ]
+ }
+ ],
+ "source": [
+ "train_data_classic = []\n",
+ "for sample in tqdm(train_dataset.dataset, ncols=100):\n",
+ " if any(k not in protpyAA for k in sample['x']):\n",
+ " continue\n",
+ " train_data_classic.append({\n",
+ " \"x\": make_features(sample['x']),\n",
+ " 'y': sample['y'],\n",
+ " })\n",
+ "\n",
+ "trainX = np.concatenate([d['x'] for d in train_data_classic])\n",
+ "trainY = np.stack([d['y'] for d in train_data_classic])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "id": "6fe84d2c-ca33-4640-9a48-0aa166e03626",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "100%|█████████████████████████████████████████████████████████████| 810/810 [00:14<00:00, 55.78it/s]\n"
+ ]
+ }
+ ],
+ "source": [
+ "test_data_classic = []\n",
+ "for sample in tqdm(test_dataset.dataset, ncols=100):\n",
+ " if any(k not in protpyAA for k in sample['x']):\n",
+ " continue\n",
+ " test_data_classic.append({\n",
+ " \"x\": make_features(sample['x']),\n",
+ " 'y': sample['y'],\n",
+ " })\n",
+ "\n",
+ "testX = np.concatenate([d['x'] for d in test_data_classic])\n",
+ "testY = np.stack([d['y'] for d in test_data_classic])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "id": "fa387bed-a645-4d3e-8958-8daa44953568",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "(807, 810)"
+ ]
+ },
+ "execution_count": 15,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "len(test_data_classic), len(test_dataset.dataset)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 19,
+ "id": "7fb5eddc-c211-44f1-bac3-7867aa834adb",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "ROC-AUC nuclear_speckle: 0.744546080832823\n",
+ "ROC-AUC p-body: 0.6188677361379219\n",
+ "ROC-AUC pml-bdoy: 0.5492851084454138\n",
+ "ROC-AUC post_synaptic_density: 0.7177075855467816\n",
+ "ROC-AUC stress_granule: 0.6676669599463718\n",
+ "ROC-AUC chromosome: 0.674722594935361\n",
+ "ROC-AUC nucleolus: 0.7367739782768779\n",
+ "ROC-AUC nuclear_pore_complex: 0.9280735107731305\n",
+ "ROC-AUC cajal_body: 0.6202311333890281\n",
+ "ROC-AUC rna_granule: 0.9978260869565218\n",
+ "ROC-AUC cell_junction: 0.7836348434174522\n",
+ "ROC-AUC transcriptional: 0.7077568134171908\n"
+ ]
+ }
+ ],
+ "source": [
+ "# RANDOM FOREST\n",
+ "rf = RandomForestClassifier(\n",
+ " n_estimators=100,\n",
+ " max_depth=400, \n",
+ " random_state=0, \n",
+ ")\n",
+ "\n",
+ "multi_target_rf = ClassifierChain(rf)\n",
+ "multi_target_rf.fit(trainX, trainY)\n",
+ "predY = multi_target_rf.predict_proba(testX)\n",
+ "\n",
+ "for i, c in enumerate(OLDCOMPS):\n",
+ " auc = roc_auc_score(testY[:,i], predY[:,i])\n",
+ " print(f\"ROC-AUC {c}: {auc}\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 20,
+ "id": "153be311-27dc-44f7-b3a9-7edb428e879c",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "ROC-AUC nuclear_speckle: 0.7686007348438457\n",
+ "ROC-AUC p-body: 0.5427843972575385\n",
+ "ROC-AUC pml-bdoy: 0.5874530473767114\n",
+ "ROC-AUC post_synaptic_density: 0.759069155300311\n",
+ "ROC-AUC stress_granule: 0.6214544578515167\n",
+ "ROC-AUC chromosome: 0.6326699039464997\n",
+ "ROC-AUC nucleolus: 0.6008318759204713\n",
+ "ROC-AUC nuclear_pore_complex: 0.7758766370933672\n",
+ "ROC-AUC cajal_body: 0.4980506822612085\n",
+ "ROC-AUC rna_granule: 0.9677018633540373\n",
+ "ROC-AUC cell_junction: 0.7314685314685316\n",
+ "ROC-AUC transcriptional: 0.67479035639413\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Logistic Regression\n",
+ "logreg = LogisticRegression(solver=\"liblinear\", random_state=0)\n",
+ "\n",
+ "multi_target_lr = ClassifierChain(logreg)\n",
+ "multi_target_lr.fit(trainX, trainY)\n",
+ "predY = multi_target_lr.predict_proba(testX)\n",
+ "\n",
+ "for i, c in enumerate(OLDCOMPS):\n",
+ " auc = roc_auc_score(testY[:,i], predY[:,i])\n",
+ " print(f\"ROC-AUC {c}: {auc}\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 30,
+ "id": "de2d5188-d373-4b09-8c4e-87275ea5b9db",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "{'CTD_D_01_001_hydrophobicity': {0: 0.719},\n",
+ " 'CTD_D_01_025_hydrophobicity': {0: 19.964},\n",
+ " 'CTD_D_01_050_hydrophobicity': {0: 37.41},\n",
+ " 'CTD_D_01_075_hydrophobicity': {0: 67.266},\n",
+ " 'CTD_D_01_100_hydrophobicity': {0: 99.82},\n",
+ " 'CTD_D_02_001_hydrophobicity': {0: 0.36},\n",
+ " 'CTD_D_02_025_hydrophobicity': {0: 24.281},\n",
+ " 'CTD_D_02_050_hydrophobicity': {0: 54.137},\n",
+ " 'CTD_D_02_075_hydrophobicity': {0: 77.338},\n",
+ " 'CTD_D_02_100_hydrophobicity': {0: 99.64},\n",
+ " 'CTD_D_03_001_hydrophobicity': {0: 0.18},\n",
+ " 'CTD_D_03_025_hydrophobicity': {0: 34.173},\n",
+ " 'CTD_D_03_050_hydrophobicity': {0: 58.453},\n",
+ " 'CTD_D_03_075_hydrophobicity': {0: 79.137},\n",
+ " 'CTD_D_03_100_hydrophobicity': {0: 100.0}}"
+ ]
+ },
+ "execution_count": 30,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "protpy.ctd_distribution(sample['x']).to_dict()"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "protgps",
+ "language": "python",
+ "name": "protgps"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.8.15"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/data/protgps/__init__.py b/data/protgps/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..fbb38b383c54875ffceb00ada2ef3ecc2c93db38
--- /dev/null
+++ b/data/protgps/__init__.py
@@ -0,0 +1,51 @@
+# type: ignore
+
+import sys
+
+if sys.version_info[:2] >= (3, 8):
+ # TODO: Import directly (no need for conditional) when `python_requires = >= 3.8`
+ from importlib.metadata import PackageNotFoundError, version # pragma: no cover
+else:
+ from importlib_metadata import PackageNotFoundError, version # pragma: no cover
+
+try:
+ # Change here if project is renamed and does not equal the package name
+ dist_name = __name__
+ __version__ = version(dist_name)
+except PackageNotFoundError: # pragma: no cover
+ __version__ = "unknown"
+finally:
+ del version, PackageNotFoundError
+
+
+# data
+import protgps.datasets.protein_compartments
+import protgps.datasets.reverse_homology
+
+# lightning
+import protgps.lightning.base
+
+# optimizers
+import protgps.learning.optimizers.basic
+
+# scheduler
+import protgps.learning.schedulers.basic
+
+# losses
+import protgps.learning.losses.basic
+
+# metrics
+import protgps.learning.metrics.basic
+
+# callbacks
+import protgps.callbacks.basic
+import protgps.callbacks.swa
+
+# models
+import protgps.models.classifier
+import protgps.models.fair_esm
+
+# comet
+import protgps.loggers.comet
+import protgps.loggers.wandb
+import protgps.loggers.tensorboard
diff --git a/data/protgps/callbacks/__init__.py b/data/protgps/callbacks/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/data/protgps/callbacks/basic.py b/data/protgps/callbacks/basic.py
new file mode 100644
index 0000000000000000000000000000000000000000..ca4eff6f5dbb525b72da9a7f32b203eeb288c568
--- /dev/null
+++ b/data/protgps/callbacks/basic.py
@@ -0,0 +1,26 @@
+import os
+from protgps.utils.registry import register_object
+from protgps.utils.classes import ProtGPS
+from pytorch_lightning.callbacks import ModelCheckpoint, LearningRateMonitor
+
+# TODO: add args for various callbacks -- currently hardcoded
+
+
+@register_object("checkpointer", "callback")
+class Checkpoint(ModelCheckpoint, ProtGPS):
+ def __init__(self, args) -> None:
+ super().__init__(
+ monitor=args.monitor,
+ dirpath=os.path.join(args.checkpoint_dir, args.experiment_name),
+ mode="min" if "loss" in args.monitor else "max",
+ filename="{}".format(args.experiment_name) + "{epoch}",
+ every_n_epochs=1,
+ save_top_k=args.checkpoint_save_top_k,
+ save_last=args.checkpoint_save_last,
+ )
+
+
+@register_object("lr_monitor", "callback")
+class LRMonitor(LearningRateMonitor, ProtGPS):
+ def __init__(self, args) -> None:
+ super().__init__(logging_interval="step")
diff --git a/data/protgps/callbacks/swa.py b/data/protgps/callbacks/swa.py
new file mode 100644
index 0000000000000000000000000000000000000000..ed4043a24d023167e5bdf0716add896f55c1463e
--- /dev/null
+++ b/data/protgps/callbacks/swa.py
@@ -0,0 +1,55 @@
+from protgps.utils.registry import register_object
+from pytorch_lightning.callbacks import StochasticWeightAveraging
+from protgps.utils.classes import ProtGPS
+
+
+@register_object("swa", "callback")
+class SWA(StochasticWeightAveraging, ProtGPS):
+ def __init__(self, args) -> None:
+ if "." in args.swa_epoch:
+ swa_epoch = float(args.swa_epoch)
+ else:
+ swa_epoch = int(args.swa_epoch)
+
+ super().__init__(
+ swa_epoch_start=swa_epoch,
+ swa_lrs=args.swa_lr,
+ annealing_epochs=args.swa_annealing_epochs,
+ annealing_strategy=args.swa_annealing_strategy,
+ avg_fn=None,
+ )
+
+ @staticmethod
+ def add_args(parser) -> None:
+ """Add class specific args
+
+ Args:
+ parser (argparse.ArgumentParser): argument parser
+ """
+ # stochastic weight averaging
+ parser.add_argument(
+ "--swa_epoch",
+ type=str,
+ default="0.8",
+ help="when to start swa",
+ )
+
+ parser.add_argument(
+ "--swa_lr",
+ type=float,
+ default=None,
+ help="lr for swa. None will use existing lr",
+ )
+ parser.add_argument(
+ "--swa_annealing_epochs",
+ type=int,
+ default=10,
+ help="number of epochs in the annealing phase",
+ )
+ parser.add_argument(
+ "--swa_annealing_strategy",
+ type=str,
+ choices=["cos", "linear"],
+ default="cos",
+ help="lr annealing strategy",
+ )
diff --git a/data/protgps/datasets/__init__.py b/data/protgps/datasets/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/data/protgps/datasets/abstract.py b/data/protgps/datasets/abstract.py
new file mode 100644
index 0000000000000000000000000000000000000000..07ec79eb79402a51fa29854869135ef8da0b0c1e
--- /dev/null
+++ b/data/protgps/datasets/abstract.py
@@ -0,0 +1,271 @@
+import traceback, warnings
+import argparse
+from typing import List, Literal
+from abc import ABCMeta, abstractmethod
+import json
+from collections import Counter
+import numpy as np
+from torch.utils import data
+from protgps.utils.classes import ProtGPS, set_protgps_type, classproperty
+from protgps.utils.messages import METAFILE_NOTFOUND_ERR, LOAD_FAIL_MSG
+import pickle
+
+
+class AbstractDataset(data.Dataset, ProtGPS):
+ def __init__(self, args: argparse.ArgumentParser, split_group: str) -> None:
+ """
+ Abstract Dataset
+ params: args - config.
+ params: split_group - ['train'|'dev'|'test'].
+
+ constructs: standard pytorch Dataset obj, which can be fed in a DataLoader for batching
+ """
+ __metaclass__ = ABCMeta
+
+ super(AbstractDataset, self).__init__()
+
+ self.split_group = split_group
+ self.args = args
+
+ self.init_class(args, split_group)
+
+ self.dataset = self.create_dataset(split_group)
+ if len(self.dataset) == 0:
+ return
+
+ self.set_sample_weights(args)
+
+ self.print_summary_statement(self.dataset, split_group)
+
+ def init_class(self, args: argparse.ArgumentParser, split_group: str) -> None:
+ """Perform Class-Specific init methods
+ Default is to load JSON dataset
+
+ Args:
+ args (argparse.ArgumentParser)
+ split_group (str)
+ """
+ self.load_dataset(args)
+
+ def load_dataset(self, args: argparse.ArgumentParser) -> None:
+ """Loads dataset file
+
+ Args:
+ args (argparse.ArgumentParser)
+
+ Raises:
+ Exception: Unable to load
+ """
+ try:
+ self.metadata_json = json.load(open(args.dataset_file_path, "r"))
+ except Exception as e:
+ raise Exception(METAFILE_NOTFOUND_ERR.format(args.dataset_file_path, e))
+
+ @abstractmethod
+ def create_dataset(
+ self, split_group: Literal["train", "dev", "test"]
+ ) -> List[dict]:
+ """
+ Creates the dataset of samples from json metadata file.
+ """
+ pass
+
+ @abstractmethod
+ def skip_sample(self, sample) -> bool:
+ """
+ Return True if sample should be skipped and not included in data
+ """
+ return False
+
+ @abstractmethod
+ def check_label(self, sample) -> bool:
+ """
+ Return True if the row contains a valid label for the task
+ """
+ pass
+
+ @abstractmethod
+ def get_label(self, sample):
+ """
+ Get task specific label for a given sample
+ """
+ pass
+
+ @property
+ @abstractmethod
+ def SUMMARY_STATEMENT(self) -> None:
+ """
+ Prints summary statement with dataset stats
+ """
+ pass
+
+ def print_summary_statement(self, dataset, split_group):
+ statement = "{} DATASET CREATED FOR {}.\n{}".format(
+ split_group.upper(), self.args.dataset_name.upper(), self.SUMMARY_STATEMENT
+ )
+ print(statement)
+
+ def __len__(self) -> int:
+ return len(self.dataset)
+
+ @abstractmethod
+ def __getitem__(self, index):
+ """
+ Fetch single sample from dataset
+
+ Args:
+ index (int): random index of sample from dataset
+
+ Returns:
+ sample (dict): a sample
+ """
+ sample = self.dataset[index]
+ try:
+ return sample
+ except Exception:
+ warnings.warn(
+ LOAD_FAIL_MSG.format(sample["sample_id"], traceback.print_exc())
+ )
+
+ def assign_splits(self, metadata_json, split_probs, seed=0) -> None:
+ """
+ Assign samples to data splits
+
+ Args:
+ metadata_json (dict): raw json dataset loaded
+ """
+ np.random.seed(seed)
+ if self.args.split_type == "random":
+ for idx in range(len(metadata_json)):
+ if metadata_json[idx] is None:
+ continue
+ metadata_json[idx]["split"] = np.random.choice(
+ ["train", "dev", "test"], p=split_probs
+ )
+ elif self.args.split_type == "mmseqs":
+ # mmseqs easy-cluster --min-seq-id 0.3 -c 0.8
+ # get all samples
+ to_split = {}
+
+ row2clust = pickle.load(
+ open(
+ "data/mmseqs_row2cluster_30seq_80cov.p",
+ "rb",
+ )
+ )
+ # rule id
+ clusters = list(row2clust.values())
+ clust2count = Counter(clusters)
+ samples = sorted(list(set(clusters)))
+ np.random.shuffle(samples)
+ samples_cumsum = np.cumsum([clust2count[s] for s in samples])
+ # Find the indices for each quantile
+ split_indices = [
+ np.searchsorted(
+ samples_cumsum, np.round(q, 3) * samples_cumsum[-1], side="right"
+ )
+ for q in np.cumsum(split_probs)
+ ]
+ split_indices[-1] = len(samples)
+ split_indices = np.concatenate([[0], split_indices])
+ for i in range(len(split_indices) - 1):
+ to_split.update(
+ {
+ sample: ["train", "dev", "test"][i]
+ for sample in samples[split_indices[i] : split_indices[i + 1]]
+ }
+ )
+ for idx in range(len(metadata_json)):
+ metadata_json[idx]["split"] = to_split[row2clust[idx]]
+
+ def set_sample_weights(self, args: argparse.ArgumentParser) -> None:
+ """
+ Set weights for each sample
+
+ Args:
+ args (argparse.ArgumentParser)
+ """
+ if args.class_bal:
+ label_dist = [str(d[args.class_bal_key]) for d in self.dataset]
+ label_counts = Counter(label_dist)
+ weight_per_label = 1.0 / len(label_counts)
+ label_weights = {
+ label: weight_per_label / count for label, count in label_counts.items()
+ }
+
+ print("Class counts are: {}".format(label_counts))
+ print("Label weights are {}".format(label_weights))
+ self.weights = [
+ label_weights[str(d[args.class_bal_key])] for d in self.dataset
+ ]
+ else:
+ pass
+
+ @classproperty
+ def DATASET_ITEM_KEYS(cls) -> list:
+ """
+ List of keys to be included in sample when being batched
+
+ Returns:
+ list
+ """
+ standard = ["sample_id"]
+ return standard
+
+ @staticmethod
+ def add_args(parser) -> None:
+ """Add class specific args
+
+ Args:
+ parser (argparse.ArgumentParser): argument parser
+ """
+ parser.add_argument(
+ "--class_bal", action="store_true", default=False, help="class balance"
+ )
+ parser.add_argument(
+ "--class_bal_key",
+ type=str,
+ default="y",
+ help="dataset key to use for class balancing",
+ )
+ parser.add_argument(
+ "--dataset_file_path",
+ type=str,
+ default=None,
+ help="Path to dataset file",
+ )
+ parser.add_argument(
+ "--data_dir",
+ type=str,
+ default=None,
+ help="Path to dataset directory",
+ )
+ parser.add_argument(
+ "--num_classes", type=int, default=6, help="Number of classes to predict"
+ )
+ # Alternative training/testing schemes
+ parser.add_argument(
+ "--assign_splits",
+ action="store_true",
+ default=False,
+ help="Whether to assign different splits than those predetermined in dataset",
+ )
+ parser.add_argument(
+ "--split_type",
+ type=str,
+ default="random",
+ help="How to split dataset if assign_split = True..",
+ )
+ parser.add_argument(
+ "--split_probs",
+ type=float,
+ nargs="+",
+ default=[0.6, 0.2, 0.2],
+ help="Split probs for datasets without fixed train dev test. ",
+ )
+ parser.add_argument(
+ "--split_seed",
+ type=int,
+ default=0,
+ help="seed for consistent randomization",
+ )
diff --git a/data/protgps/datasets/disprot.py b/data/protgps/datasets/disprot.py
new file mode 100644
index 0000000000000000000000000000000000000000..67ffbe3bf55f16752b642a7194c08ed0c6dc8160
--- /dev/null
+++ b/data/protgps/datasets/disprot.py
@@ -0,0 +1,169 @@
+# dataset utils
+from random import sample
+import warnings
+from typing import Literal, List
+from protgps.datasets.abstract import AbstractDataset
+from protgps.utils.registry import register_object, get_object
+from protgps.utils.classes import set_protgps_type
+from tqdm import tqdm
+import argparse
+import torch
+
+
+@register_object("disprot", "dataset")
+class Disprot(AbstractDataset):
+ """A pytorch Dataset for the classifying protein intrinsically disordered regions from Disprot DB."""
+
+ def init_class(self, args: argparse.ArgumentParser, split_group: str) -> None:
+ """Perform Class-Specific init methods
+ Default is to load JSON dataset
+
+ Args:
+ args (argparse.ArgumentParser)
+ split_group (str)
+ """
+ self.load_dataset(args)
+ if args.assign_splits:
+ self.assign_splits(
+ self.metadata_json, split_probs=args.split_probs, seed=args.split_seed
+ )
+
+ if args.precomputed_protein_embeddings:
+ self.protein_encoder = get_object(self.args.protein_encoder_name, "model")(
+ args
+ ).to("cuda")
+ self.protein_encoder.eval()
+
+ def skip_sample(self, sample, split_group) -> bool:
+ """
+ Return True if sample should be skipped and not included in data
+ """
+ if sample["split"] != split_group:
+ return True
+ return False
+
+ def create_dataset(
+ self, split_group: Literal["train", "dev", "test"]
+ ) -> List[dict]:
+
+ sequences = []
+ dataset = []
+ for protein_dict in tqdm(self.metadata_json["data"]):
+ if self.skip_sample(protein_dict, split_group):
+ continue
+
+ item = {
+ "x": protein_dict["sequence"],
+ "y": self.get_label(sample_dict),
+ "sample_id": protein_dict["disprot_id"],
+ }
+ sequences.append(protein_dict["sequence"])
+ dataset.append(item)
+
+
+ if args.precomputed_protein_embeddings:
+ # this batches protein sequences and then converts to features
+ batch_size = 10
+ hiddens = []
+ for i in tqdm(range(0, len(ids), batch_size)):
+ preds = self.protein_encoder(sequences[i : i + batch_size])
+ hiddens.append( preds["hidden"].cpu() )
+ hiddens = torch.stack(hiddens)
+
+ for i, h in enumerate(hiddens):
+ dataset[i]["sequence"] = dataset[i]["x"]
+ dataset[i]["x"] = h
+
+ return dataset
+
+ def get_label(self, protein_dict):
+ """
+ Get task specific label for a given sample
+ """
+ y = torch.zeros(len(protein_dict["sequence"]))
+ for disordered_region in protein_dict["regions"]:
+ start = disordered_region["start"] - 1
+ end = disordered_region["end"]
+ y[start:end] = 1
+ return y
+
+ def __getitem__(self, index):
+ try:
+ return self.dataset[index]
+
+ except Exception:
+ warnings.warn("Could not load sample")
+
+ @property
+ def SUMMARY_STATEMENT(self) -> None:
+ """
+ Prints summary statement with dataset stats
+ """
+ return f"{len(self.dataset)} Proteins."
+
+ @staticmethod
+ def set_args(args) -> None:
+ args.num_classes = 1
+
+
+ @staticmethod
+ def add_args(parser) -> None:
+ """Add class specific args
+
+ Args:
+ parser (argparse.ArgumentParser): argument parser
+ """
+ super(Disprot, Disprot).add_args(parser)
+ parser.add_argument(
+ "--precomputed_protein_embeddings",
+ default=False,
+ action="store_true",
+ help="whether to use precomputed embeddings",
+ )
+
+
+
+@register_object("protein_compartment_precomputed", "dataset")
+class Protein_Compartments_Precomputed(Protein_Compartments):
+ """A pytorch Dataset for the classifying proteins into compartment."""
+
+
+ def create_dataset(
+ self, split_group: Literal["train", "dev", "test"]
+ ) -> List[dict]:
+
+ dataset = []
+ for sample_dict in tqdm(self.metadata_json):
+ if self.skip_sample(sample_dict, split_group):
+ continue
+
+ item = {
+ "sequence": sample_dict["Sequence"],
+ "x": torch.tensor(sample_dict["esm2_embedding"]),
+ "y": self.get_label(sample_dict),
+ "sample_id": sample_dict["Entry"],
+ }
+ dataset.append(item)
+
+
+ return dataset
+
+ @staticmethod
+ def set_args(args) -> None:
+ args.num_classes = 5
+ args.mlp_input_dim = args.protein_hidden_dim
+
+ @staticmethod
+ def add_args(parser) -> None:
+ """Add class specific args
+
+ Args:
+ parser (argparse.ArgumentParser): argument parser
+ """
+ super(Protein_Compartments_Precomputed, Protein_Compartments_Precomputed).add_args(parser)
+ parser.add_argument(
+ "--protein_hidden_dim",
+ type=int,
+ default=1280,
+ help="hidden dimension of the protein",
+ )
diff --git a/data/protgps/datasets/protein_compartments.py b/data/protgps/datasets/protein_compartments.py
new file mode 100644
index 0000000000000000000000000000000000000000..1ed5033dd7534e28f1dc0f995a6cac3c3ce75a98
--- /dev/null
+++ b/data/protgps/datasets/protein_compartments.py
@@ -0,0 +1,412 @@
+# dataset utils
+import warnings
+from typing import Literal, List
+from protgps.datasets.abstract import AbstractDataset
+from protgps.utils.registry import register_object
+from tqdm import tqdm
+import argparse
+import torch
+
+
+@register_object("protein_compartment", "dataset")
+class Protein_Compartments(AbstractDataset):
+ """A pytorch Dataset for the classifying proteins into compartment."""
+
+ def init_class(self, args: argparse.ArgumentParser, split_group: str) -> None:
+ """Perform Class-Specific init methods
+ Default is to load JSON dataset
+
+ Args:
+ args (argparse.ArgumentParser)
+ split_group (str)
+ """
+ self.load_dataset(args)
+ if args.assign_splits:
+ self.assign_splits(
+ self.metadata_json, split_probs=args.split_probs, seed=args.split_seed
+ )
+
+ @property
+ def COMPARTMENTS(self):
+ return ["cytosol", "nucleoli", "nucleoplasm", "ER", "mitochondria"]
+
+ def skip_sample(self, sample, split_group) -> bool:
+ """
+ Return True if sample should be skipped and not included in data
+ """
+ if sample["split"] != split_group:
+ return True
+ if "Sequence" in sample and len(sample["Sequence"]) < 10:
+ return True
+ if "sequence" in sample and len(sample["sequence"]) < 10:
+ return True
+
+ if "Sequence" in sample and len(sample["Sequence"]) > self.args.max_prot_len:
+ return True
+ if "sequence" in sample and len(sample["sequence"]) > self.args.max_prot_len:
+ return True
+
+ return False
+
+ def create_dataset(
+ self, split_group: Literal["train", "dev", "test"]
+ ) -> List[dict]:
+ dataset = []
+ for sample_dict in tqdm(self.metadata_json):
+ if self.skip_sample(sample_dict, split_group):
+ continue
+
+ item = {
+ "x": sample_dict["Sequence"],
+ "y": self.get_label(sample_dict),
+ "sample_id": sample_dict["Entry"],
+ }
+ dataset.append(item)
+ return dataset
+
+ def get_label(self, sample):
+ """
+ Get task specific label for a given sample
+ """
+ try:
+ return torch.tensor([sample[c] for c in self.COMPARTMENTS])
+ except:
+ return None
+
+ def __getitem__(self, index):
+ try:
+ return self.dataset[index]
+
+ except Exception:
+ warnings.warn("Could not load sample")
+
+ @property
+ def SUMMARY_STATEMENT(self) -> None:
+ """
+ Prints summary statement with dataset stats
+ """
+ try:
+ compartment_counts = (
+ torch.stack([d["y"] for d in self.dataset]).sum(0).tolist()
+ )
+ compartment_str = ""
+ for i, (c, count) in enumerate(zip(self.COMPARTMENTS, compartment_counts)):
+ compartment_str += f"{count} {c.upper()}"
+ if i < len(self.COMPARTMENTS) - 1:
+ compartment_str += " -- "
+ return f"* {len(self.dataset)} Proteins.\n* {compartment_str}"
+ except:
+ return "Could not produce summary statement"
+
+ @staticmethod
+ def set_args(args) -> None:
+ args.num_classes = 5
+
+ @staticmethod
+ def add_args(parser) -> None:
+ """Add class specific args
+
+ Args:
+ parser (argparse.ArgumentParser): argument parser
+ """
+ super(Protein_Compartments, Protein_Compartments).add_args(parser)
+ parser.add_argument(
+ "--max_prot_len",
+ type=int,
+ default=2000,
+ help="len above which to skip prots",
+ )
+
+
+@register_object("protein_compartment_guy", "dataset")
+class ProteinCompartmentsGuy(AbstractDataset):
+ """A pytorch Dataset for the classifying proteins into compartment."""
+
+ def init_class(self, args: argparse.ArgumentParser, split_group: str) -> None:
+ """Perform Class-Specific init methods
+ Default is to load JSON dataset
+
+ Args:
+ args (argparse.ArgumentParser)
+ split_group (str)
+ """
+ self.load_dataset(args)
+ if args.assign_splits:
+ self.assign_splits(
+ self.metadata_json, split_probs=args.split_probs, seed=args.split_seed
+ )
+
+ @property
+ def COMPARTMENTS(self):
+ return [
+ "Nucleus",
+ "Cytoplasm",
+ "Secreted",
+ "Mitochondrion",
+ "Membrane",
+ "Endoplasmic",
+ "Plastid",
+ "Golgi_apparatus",
+ "Lysosome",
+ "Peroxisome",
+ ]
+
+ @property
+ def esm_tokens(self):
+ return [
+ "L",
+ "A",
+ "G",
+ "V",
+ "S",
+ "E",
+ "R",
+ "T",
+ "I",
+ "D",
+ "P",
+ "K",
+ "Q",
+ "N",
+ "F",
+ "Y",
+ "M",
+ "H",
+ "W",
+ "C",
+ "X",
+ "B",
+ "U",
+ "Z",
+ "O",
+ ".",
+ "-",
+ ]
+
+ def target_index(self, target):
+ return self.COMPARTMENTS.index(target)
+
+ def skip_sample(self, sample, split_group) -> bool:
+ """
+ Return True if sample should be skipped and not included in data
+ """
+ if sample is None:
+ return True
+ if self.get_label(sample) is None:
+ # print("Skipped because no label")
+ return True
+ if self.args.drop_multilabel:
+ if self.get_label(sample).sum() > 1: # skip multi-compartment samples
+ print("Skipped because multi label")
+ return True
+ if split_group in ["train", "dev", "test"]:
+ if sample["split"] != split_group:
+ return True
+ if "sequence" in sample and len(sample["sequence"]) < 10:
+ return True
+ if "sequence" in sample and len(sample["sequence"]) > self.args.max_prot_len:
+ return True
+ if "Sequence" in sample and len(sample["Sequence"]) < 10:
+ return True
+ if "Sequence" in sample and len(sample["Sequence"]) > self.args.max_prot_len:
+ return True
+ if "sequence" in sample and not set(sample["sequence"]).issubset(
+ self.esm_tokens
+ ):
+ return True
+ if "Sequence" in sample and not set(sample["Sequence"]).issubset(
+ self.esm_tokens
+ ):
+ return True
+ return False
+
+ def skip_idr_sample(self, sample, split_group) -> bool:
+ if self.skip_sample(sample, split_group):
+ return True
+
+ if all([len(s) < 10 for s in sample["idrs"]]): # if all IDRs are small
+ print("Skipped because all IDRs are len 10 or less")
+ return True
+
+ if len(sample["idrs"]) == 0: # if there are no idrs
+ print("Skipped because no IDRs")
+ return True
+
+ def create_dataset(
+ self, split_group: Literal["train", "dev", "test"]
+ ) -> List[dict]:
+ dataset = []
+ for sample_dict in tqdm(self.metadata_json):
+ if self.skip_sample(sample_dict, split_group):
+ continue
+ sss = "sequence" if "sequence" in sample_dict else "Sequence"
+ eid = "entry" if "entry" in sample_dict else "Entry"
+ item = {
+ "x": sample_dict[sss],
+ "y": self.get_label(sample_dict),
+ "entry_id": sample_dict[eid],
+ # "sample_id": sample_dict["Entry"],
+ }
+ dataset.append(item)
+ return dataset
+
+ def get_label(self, sample):
+ """
+ Get task specific label for a given sample
+ """
+ try:
+ return torch.tensor([sample["labels"][c] for c in self.COMPARTMENTS])
+ except:
+ return None
+
+ def __getitem__(self, index):
+ try:
+ return self.dataset[index]
+
+ except Exception:
+ warnings.warn("Could not load sample")
+
+ @property
+ def SUMMARY_STATEMENT(self) -> None:
+ """
+ Prints summary statement with dataset stats
+ """
+ try:
+ compartment_counts = (
+ torch.stack([d["y"] for d in self.dataset]).sum(0).tolist()
+ )
+ compartment_str = ""
+ for i, (c, count) in enumerate(zip(self.COMPARTMENTS, compartment_counts)):
+ compartment_str += f"{count} {c.upper()}"
+ if i < len(self.COMPARTMENTS) - 1:
+ compartment_str += " -- "
+ return f"* {len(self.dataset)} Proteins.\n* {compartment_str}"
+ except:
+ return "Could not produce summary statement"
+
+ @staticmethod
+ def set_args(args) -> None:
+ args.num_classes = 10
+
+ @staticmethod
+ def add_args(parser) -> None:
+ """Add class specific args
+
+ Args:
+ parser (argparse.ArgumentParser): argument parser
+ """
+ Protein_Compartments.add_args(parser)
+ parser.add_argument(
+ "--max_prot_len",
+ type=int,
+ default=2000,
+ help="len above which to skip prots",
+ )
+
+ parser.add_argument(
+ "--drop_multilabel",
+ type=bool,
+ default=False,
+ help="whether to drop multilabel samples",
+ )
+
+
+@register_object("protein_compartment_uniprot_combined", "dataset")
+class ProteinCompartmentsUniprotCombined(ProteinCompartmentsGuy):
+ def get_label(self, sample):
+ """
+ Get task specific label for a given sample
+ """
+ try:
+ label = []
+ for c in self.COMPARTMENTS:
+ if isinstance(c, str):
+ if c in sample["labels"]:
+ label.append(sample["labels"][c])
+ else:
+ label.append(0)
+ else:
+ l = 0
+ for c_ in c:
+ if c_ in sample["labels"]:
+ if sample["labels"][c_] == 1:
+ l = 1
+ break
+ else:
+ continue
+ label.append(l)
+ if sum(label) > 0:
+ return torch.tensor(label)
+ else:
+ return None
+ except:
+ return None
+
+ def target_index(self, target):
+ for i, c in enumerate(self.COMPARTMENTS):
+ if isinstance(c, str):
+ if isinstance(target, str):
+ if c == target:
+ return i
+ else:
+ if target in c:
+ return i
+ elif next(iter(target)) in c:
+ return i
+ return None
+
+ @property
+ def COMPARTMENTS(self):
+ return [
+ "nuclear_membrane",
+ "rough_endoplasmic_reticulum",
+ "vacuole",
+ "nucleus",
+ "inflammasome",
+ {"endplasmic_reticulum", "endoplasmic_reticulum"},
+ "cytoplasm",
+ "nuclear_gem",
+ {"membrane", "cell_membrane"},
+ "mitochondrion",
+ {"vesicle", "vesicles"},
+ "cell_projection",
+ "lipid_droplet",
+ "sarcoplasmic_reticulum",
+ "endosome",
+ "centromere",
+ "nuclear_body",
+ "nucleoplasm",
+ "golgi_apparatus",
+ {"excretion_vesicles", "excretion_vesicle"},
+ "peroxisome",
+ "lysosome",
+ ]
+
+ @staticmethod
+ def set_args(args) -> None:
+ args.num_classes = 22
+
+
+# USE THIS
+@register_object("protein_condensates_combined", "dataset")
+class ProteinCondensatesCombined(ProteinCompartmentsUniprotCombined):
+ @property
+ def COMPARTMENTS(self):
+ return [
+ {"nuclear_speckles", "nuclear_speckle"},
+ {"pbody", "p-body"},
+ {"pml_body", "pml-bdoy"},
+ "post_synaptic_density",
+ "stress_granule",
+ {"chromosomes", "chromosome"},
+ "nucleolus",
+ "nuclear_pore_complex",
+ "cajal_body",
+ "rna_granule",
+ "cell_junction",
+ "transcriptional",
+ ]
+
+ @staticmethod
+ def set_args(args) -> None:
+ args.num_classes = 12
diff --git a/data/protgps/datasets/reverse_homology.py b/data/protgps/datasets/reverse_homology.py
new file mode 100644
index 0000000000000000000000000000000000000000..43cd54efd4fd32f1940c051137fe8357886ed462
--- /dev/null
+++ b/data/protgps/datasets/reverse_homology.py
@@ -0,0 +1,216 @@
+# dataset utils
+from random import sample
+import warnings
+from typing import Literal, List
+from protgps.datasets.abstract import AbstractDataset
+from protgps.utils.registry import register_object, get_object
+from protgps.utils.classes import set_protgps_type
+from tqdm import tqdm
+import argparse
+import torch
+import os, glob
+import re
+import numpy as np
+from argparse import Namespace
+import copy
+@register_object("reverse_homology", "dataset")
+class ReverseHomology(AbstractDataset):
+ """A pytorch Dataset for the classifying proteins into compartment."""
+ def load_homology_dataset(self, args: argparse.ArgumentParser) -> None:
+ """Loads fasta files from dataset folder
+ Args:
+ args (argparse.ArgumentParser)
+ Raises:
+ Exception: Unable to load
+ """
+ data_folders = args.homology_dataset_folder.split(",")
+ fasta_paths = []
+ for folder_path in data_folders:
+ fasta_paths.extend(glob.glob(os.path.join(folder_path, '*.fasta')))
+ print("Loading fasta files...")
+ for fasta in tqdm(fasta_paths):
+ idrs = []
+ f=open(fasta, 'r')
+ lines=f.readlines()
+ for line in lines:
+ outh=re.search('>', line)
+ if outh:
+ pass
+ else:
+ s = line.replace('-','').strip()
+ if len(s) <= self.args.max_idr_len: # skip long sequences
+ idrs.append(s)
+ if len(idrs) >= self.args.pos_samples+1:
+ self.homology_sets.append(np.array(idrs))
+
+ def init_class(self, args: argparse.ArgumentParser, split_group: str) -> None:
+ """Perform Class-Specific init methods
+ Default is to load JSON dataset
+
+ Args:
+ args (argparse.ArgumentParser)
+ split_group (str)
+ """
+ self.homology_sets = []
+ if self.args.homology_dataset_folder and self.args.use_homology_dataset:
+ self.load_homology_dataset(args)
+ if self.args.compartment_dataset_file and self.args.use_compartment_dataset:
+ self.load_compartment_dataset(copy.deepcopy(args))
+
+ def load_compartment_dataset(self, args: argparse.ArgumentParser) -> None:
+ """Loads dataset from json file
+ Args:
+ args (argparse.ArgumentParser)
+ """
+ if self.args.compartment_dataset_name:
+ args.dataset_file_path = self.args.compartment_dataset_file
+ args.drop_multilabel = False
+ args.max_prot_len = np.inf
+ dataset = get_object(self.args.compartment_dataset_name, "dataset")(
+ args, "train"
+ )
+ comp_dict = {}
+ for sample_dict in tqdm(dataset.metadata_json):
+ idrs = "".join(sample_dict["idrs"])
+ if len(idrs) <= self.args.max_idr_len:
+ label = dataset.get_label(sample_dict)
+ for l in torch.argwhere(label == 1).T[0]:
+ l = l.item()
+ if l in comp_dict:
+ comp_dict[l].append(idrs)
+ else:
+ comp_dict[l] = [idrs]
+
+ for label in comp_dict:
+ if len(comp_dict[label]) >= self.args.pos_samples+1:
+ self.homology_sets.append(np.array(comp_dict[label]))
+ else:
+ raise Exception("No compartment dataset name provided")
+
+
+ def create_dataset(
+ self, split_group: Literal["train", "dev", "test"]
+ ) -> List[dict]:
+ dataset = []
+ print(f"Creating '{split_group}' dataset...")
+ if split_group == "train":
+ hom_mult = self.args.homology_multiple*self.args.split_probs[0]
+ rng = np.random.default_rng(self.args.dataset_seed)
+ elif split_group == "dev":
+ hom_mult = self.args.homology_multiple*self.args.split_probs[1]
+ rng = np.random.default_rng(self.args.dataset_seed+1)
+ elif split_group == "test":
+ hom_mult = self.args.homology_multiple*self.args.split_probs[2]
+ rng = np.random.default_rng(self.args.dataset_seed+2)
+
+ for _ in tqdm(range(int(hom_mult*len(self.homology_sets)))):
+ sample, rng = self.generate_sample(rng)
+ dataset.append(sample)
+ return dataset
+
+ def generate_sample(self, rng) -> dict:
+ """Generates sample for contrastive learning of homology sets
+ Args:
+ rng: numpy random generator
+ Returns:
+ list: list of strings
+ """
+ if len(self.homology_sets) < self.args.neg_samples+1:
+ self.args.neg_samples = len(self.homology_sets)-1
+ neg_idx = rng.choice(len(self.homology_sets), size=self.args.neg_samples+1, replace=False)
+ pos_idx, neg_idx = neg_idx[0], neg_idx[1:]
+ pos_samples = rng.choice(self.homology_sets[pos_idx], size=self.args.pos_samples+1, replace=False)
+ anchor, pos_samples = pos_samples[0], pos_samples[1:]
+ neg_samples = np.array([rng.choice(self.homology_sets[i],size=self.args.neg_multiple) for i in neg_idx]).flatten()
+ return {"x":[anchor, *pos_samples, *neg_samples]}, rng
+
+ def __getitem__(self, index):
+ # rng = np.random.default_rng(self.args.dataset_seed)
+ try:
+ return self.dataset[index]
+ except Exception:
+ warnings.warn("Could not load sample")
+
+ @property
+ def SUMMARY_STATEMENT(self) -> None:
+ """
+ Prints summary statement with dataset stats
+ """
+ try:
+ return f"Reverse Homology Dataset with {len(self.dataset)} samples\n"\
+ + f"Using Homology sets: {len(self.homology_sets)}\n"\
+ + f"Using {self.args.pos_samples} positive samples and {self.args.neg_samples*self.args.neg_multiple} negative samples\n"
+ except:
+ return "Could not produce summary statement"
+
+ @staticmethod
+ def set_args(args) -> None:
+ args.num_classes = 2
+
+ @staticmethod
+ def add_args(parser) -> None:
+ """Add class specific args
+
+ Args:
+ parser (argparse.ArgumentParser): argument parser
+ """
+ super(ReverseHomology, ReverseHomology).add_args(parser)
+ parser.add_argument(
+ "--homology_dataset_folder",
+ type=str,
+ help="folders containing fasta files seperated by comma",
+ )
+ parser.add_argument(
+ "--dataset_seed",
+ type=int,
+ help="seed for dataset generation",
+ )
+ parser.add_argument(
+ "--homology_multiple",
+ type=float,
+ default=1,
+ help="the expected number of times to use each homology set as a positive example",
+ )
+ parser.add_argument(
+ "--pos_samples",
+ type=int,
+ help="number of positive samples to use from the anchor homology set",
+ )
+ parser.add_argument(
+ "--neg_samples",
+ type=int,
+ help="number of homology sets to draw negative samples from",
+ )
+ parser.add_argument(
+ "--max_idr_len",
+ type=int,
+ help="max total length of idrs in a protein",
+ )
+ parser.add_argument(
+ "--compartment_dataset_file",
+ type=str,
+ help="json file containing compartment dataset",
+ )
+ parser.add_argument(
+ "--compartment_dataset_name",
+ type=str,
+ help="protgps name of compartment dataset object",
+ )
+ parser.add_argument(
+ "--use_compartment_dataset",
+ action="store_true",
+ default=False,
+ help="use compartment dataset to generate homology sets",
+ )
+ parser.add_argument(
+ "--use_homology_dataset",
+ action="store_true",
+ default=False,
+ help="use homology dataset to generate homology sets",
+ )
+ parser.add_argument(
+ "--neg_multiple",
+ type=int,
+ default=1,
+ help="number of negative samples to draw from each negative homology set",
+ )
\ No newline at end of file
diff --git a/data/protgps/learning/losses/__init__.py b/data/protgps/learning/losses/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/data/protgps/learning/losses/basic.py b/data/protgps/learning/losses/basic.py
new file mode 100644
index 0000000000000000000000000000000000000000..357008486dce9093982ceae67a796807c717fc28
--- /dev/null
+++ b/data/protgps/learning/losses/basic.py
@@ -0,0 +1,156 @@
+from protgps.utils.registry import register_object
+import torch
+import torch.nn.functional as F
+import torch.nn as nn
+from collections import OrderedDict
+import pdb
+from protgps.utils.classes import ProtGPS
+
+
+@register_object("cross_entropy", "loss")
+class CrossEntropyLoss(ProtGPS):
+ def __init__(self) -> None:
+ super().__init__()
+
+ def __call__(self, model_output, batch, model, args):
+ logging_dict, predictions = OrderedDict(), OrderedDict()
+ logit = model_output["logit"]
+ loss = F.cross_entropy(logit, batch["y"].long()) * args.ce_loss_lambda
+ logging_dict["cross_entropy_loss"] = loss.detach()
+ predictions["probs"] = F.softmax(logit, dim=-1).detach()
+ predictions["golds"] = batch["y"]
+ predictions["preds"] = predictions["probs"].argmax(axis=-1).reshape(-1)
+ return loss, logging_dict, predictions
+
+ @staticmethod
+ def add_args(parser) -> None:
+ """Add class specific args
+
+ Args:
+ parser (argparse.ArgumentParser): argument parser
+ """
+ parser.add_argument(
+ "--ce_loss_lambda",
+ type=float,
+ default=1.0,
+ help="Lambda to weigh the cross-entropy loss.",
+ )
+
+
+@register_object("binary_cross_entropy", "loss")
+class BinaryCrossEntropyLoss(ProtGPS):
+ def __init__(self) -> None:
+ super().__init__()
+
+ def __call__(self, model_output, batch, model, args):
+ logging_dict, predictions = OrderedDict(), OrderedDict()
+ logit = model_output["logit"]
+ loss = (
+ F.binary_cross_entropy_with_logits(logit, batch["y"].float())
+ * args.bce_loss_lambda
+ )
+ logging_dict["binary_cross_entropy_loss"] = loss.detach()
+ predictions["probs"] = torch.sigmoid(logit).detach()
+ predictions["golds"] = batch["y"]
+ predictions["preds"] = (predictions["probs"] > 0.5).int()
+ return loss, logging_dict, predictions
+
+ @staticmethod
+ def add_args(parser) -> None:
+ """Add class specific args
+
+ Args:
+ parser (argparse.ArgumentParser): argument parser
+ """
+ parser.add_argument(
+ "--bce_loss_lambda",
+ type=float,
+ default=1.0,
+ help="Lambda to weigh the binary cross-entropy loss.",
+ )
+
+
+@register_object("survival", "loss")
+class SurvivalLoss(ProtGPS):
+ def __init__(self) -> None:
+ super().__init__()
+
+ def __call__(self, model_output, batch, model, args):
+ logging_dict, predictions = OrderedDict(), OrderedDict()
+ logit = model_output["logit"]
+ y_seq, y_mask = batch["y_seq"], batch["y_mask"]
+ loss = F.binary_cross_entropy_with_logits(
+ logit, y_seq.float(), weight=y_mask.float(), reduction="sum"
+ ) / torch.sum(y_mask.float())
+ logging_dict["survival_loss"] = loss.detach()
+ predictions["probs"] = torch.sigmoid(logit).detach()
+ predictions["golds"] = batch["y"]
+ predictions["censors"] = batch["time_at_event"]
+ return loss, logging_dict, predictions
+
+
+@register_object("ordinal_cross_entropy", "loss")
+class RankConsistentLoss(ProtGPS):
+ def __init__(self) -> None:
+ super().__init__()
+
+ def __call__(self, model_output, batch, model, args):
+ """
+ Computes cross-entropy loss
+
+ If batch contains they key 'has_y', the cross entropy loss will be computed for samples where batch['has_y'] = 1
+ Expects model_output to contain 'logit'
+
+ Returns:
+ loss: cross entropy loss
+ l_dict (dict): dictionary containing cross_entropy_loss detached from computation graph
+ p_dict (dict): dictionary of model predictions and ground truth labels (preds, probs, golds)
+ """
+ loss = 0
+ l_dict, p_dict = OrderedDict(), OrderedDict()
+ logit = model_output["logit"]
+ yseq = batch["yseq"]
+ ymask = batch["ymask"]
+
+ loss = F.binary_cross_entropy_with_logits(
+ logit, yseq.float(), weight=ymask.float(), reduction="sum"
+ ) / torch.sum(ymask.float())
+
+ probs = F.logsigmoid(logit) # log_sum to add probs
+ probs = probs.unsqueeze(1).repeat(1, len(args.rank_thresholds), 1)
+ probs = torch.tril(probs).sum(2)
+ probs = torch.exp(probs)
+
+ p_dict["logits"] = logit.detach()
+ p_dict["probs"] = probs.detach()
+ preds = probs > 0.5 # class = last prob > 0.5
+ preds = preds.sum(-1)
+ p_dict["preds"] = preds
+ p_dict["golds"] = batch["y"]
+
+ return loss, l_dict, p_dict
+
+@register_object("contrastive", "loss")
+class ContrastiveLoss(ProtGPS):
+ def __init__(self) -> None:
+ super().__init__()
+
+ def __call__(self, model_output, batch, model, args):
+ logging_dict, predictions = OrderedDict(), OrderedDict()
+ logit = model_output["hidden"]
+ sample_size = 1 + args.pos_samples + args.neg_samples
+ idx = 0 #NOTE: this is for batch size 1
+ anchor = logit[idx]
+ pos_samples = logit[idx+1:idx+args.pos_samples+1].mean(0)
+ neg_samples = logit[idx+args.pos_samples+1:idx+sample_size]
+ assert neg_samples.shape[0] == args.neg_samples
+ pos_score = torch.exp(torch.dot(pos_samples, anchor))
+ neg_score = torch.exp(torch.matmul(neg_samples, anchor)).sum()
+ prob = pos_score/(pos_score + neg_score)
+ loss = -torch.log(prob)
+ logging_dict["contrastive_loss"] = loss.detach()
+ probs = torch.Tensor([prob, 1-prob])
+ predictions["probs"] = probs.detach()
+ predictions["preds"] = (probs > 0.5).int()
+ predictions["golds"] = torch.Tensor([1,0]).int()
+ return loss, logging_dict, predictions
\ No newline at end of file
diff --git a/data/protgps/learning/metrics/__init__.py b/data/protgps/learning/metrics/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/data/protgps/learning/metrics/basic.py b/data/protgps/learning/metrics/basic.py
new file mode 100644
index 0000000000000000000000000000000000000000..9dbc216ada9b2ce369e9a04c9fb01b6c51e30f95
--- /dev/null
+++ b/data/protgps/learning/metrics/basic.py
@@ -0,0 +1,359 @@
+from typing import Dict
+from protgps.utils.registry import register_object
+from collections import OrderedDict
+from protgps.utils.classes import ProtGPS
+import numpy as np
+import pdb
+from torchmetrics.functional import (
+ accuracy,
+ auroc,
+ precision,
+ recall,
+ confusion_matrix,
+ f1_score,
+ precision_recall_curve,
+ average_precision,
+)
+from torchmetrics.utilities.compute import auc
+import torch
+import copy
+
+EPSILON = 1e-6
+BINARY_CLASSIF_THRESHOLD = 0.5
+
+precision_recall = lambda probs, golds, **kwargs: (
+ precision(probs, golds, **kwargs),
+ recall(probs, golds, **kwargs),
+)
+
+
+@register_object("classification", "metric")
+class BaseClassification(ProtGPS):
+ def __init__(self, args) -> None:
+ super().__init__()
+
+ @property
+ def metric_keys(self):
+ return ["probs", "preds", "golds"]
+
+ def __call__(self, predictions_dict, args) -> Dict:
+ """
+ Computes standard classification metrics
+
+ Args:
+ predictions_dict: dictionary obtained from computing loss and model outputs
+ * should contain the keys ['probs', 'preds', 'golds']
+ args: argparser Namespace
+
+ Returns:
+ stats_dict (dict): contains (where applicable) values for accuracy, confusion matrix, precision, recall, f1, precision-recall auc, roc auc
+
+ Note:
+ In multiclass setting (>2), accuracy, and micro-f1, micro-recall, micro-precision are equivalent
+ Macro: calculates metric per class then averages
+ """
+ stats_dict = OrderedDict()
+
+ probs = predictions_dict["probs"] # B, C (float)
+ preds = predictions_dict["preds"] # B
+ golds = predictions_dict["golds"] # B
+ stats_dict["accuracy"] = accuracy(golds, preds)
+ stats_dict["confusion_matrix"] = confusion_matrix(
+ preds, golds, args.num_classes
+ )
+ if args.num_classes == 2:
+ if len(probs.shape) == 1:
+ stats_dict["precision"], stats_dict["recall"] = precision_recall(
+ probs, golds
+ )
+ stats_dict["f1"] = f1_score(probs, golds)
+ pr, rc, _ = precision_recall_curve(probs, golds)
+ stats_dict["pr_auc"] = auc(rc, pr)
+ try:
+ stats_dict["roc_auc"] = auroc(probs, golds, pos_label=1)
+ except:
+ pass
+ else:
+ stats_dict["precision"], stats_dict["recall"] = precision_recall(
+ probs, golds, multiclass=False, num_classes=2
+ )
+ stats_dict["f1"] = f1_score(
+ probs, golds, multiclass=False, num_classes=2
+ )
+ pr, rc, _ = precision_recall_curve(probs, golds, num_classes=2)
+ stats_dict["pr_auc"] = auc(rc[-1], pr[-1])
+ try:
+ stats_dict["roc_auc"] = auroc(probs, golds, num_classes=2)
+ except:
+ pass
+ else:
+ stats_dict["precision"], stats_dict["recall"] = precision_recall(
+ probs, golds, num_classes=args.num_classes, average="macro"
+ )
+ stats_dict["f1"] = f1_score(
+ probs, golds, num_classes=args.num_classes, average="macro"
+ )
+ stats_dict["micro_f1"] = f1_score(
+ probs, golds, num_classes=args.num_classes, average="micro"
+ )
+ if len(torch.unique(golds)) == args.num_classes:
+ pr, rc, _ = precision_recall_curve(
+ probs, golds, num_classes=args.num_classes
+ )
+ stats_dict["pr_auc"] = torch.mean(
+ torch.stack([auc(rc[i], pr[i]) for i in range(args.num_classes)])
+ )
+ stats_dict["roc_auc"] = auroc(
+ probs, golds, num_classes=args.num_classes, average="macro"
+ )
+
+ if args.store_classwise_metrics:
+ classwise_metrics = {}
+ (
+ classwise_metrics["precisions"],
+ classwise_metrics["recalls"],
+ ) = precision_recall(
+ probs, golds, num_classes=args.num_classes, average="none"
+ )
+ classwise_metrics["f1s"] = f1_score(
+ probs, golds, num_classes=args.num_classes, average="none"
+ )
+ pr, rc, _ = precision_recall_curve(
+ probs, golds, num_classes=args.num_classes
+ )
+ classwise_metrics["pr_aucs"] = [
+ auc(rc[i], pr[i]) for i in range(args.num_classes)
+ ]
+ classwise_metrics["accs"] = accuracy(
+ golds, preds, num_classes=args.num_classes, average="none"
+ )
+ try:
+ classwise_metrics["rocaucs"] = auroc(
+ probs, golds, num_classes=args.num_classes, average="none"
+ )
+ except:
+ pass
+
+ for metricname in [
+ "precisions",
+ "recalls",
+ "f1s",
+ "rocaucs",
+ "pr_aucs",
+ "accs",
+ ]:
+ if metricname in classwise_metrics:
+ stats_dict.update(
+ {
+ "class{}_{}".format(i + 1, metricname): v
+ for i, v in enumerate(classwise_metrics[metricname])
+ }
+ )
+ return stats_dict
+
+ @staticmethod
+ def add_args(parser) -> None:
+ """Add class specific args
+
+ Args:
+ parser (argparse.ArgumentParser): argument parser
+ """
+ parser.add_argument(
+ "--store_classwise_metrics",
+ action="store_true",
+ default=False,
+ help="Whether to log metrics per class or just log average across classes",
+ )
+
+
+@register_object("multilabel_classification", "metric")
+class MultiLabelClassification(ProtGPS):
+ def __init__(self, args) -> None:
+ super().__init__()
+
+ @property
+ def metric_keys(self):
+ return ["probs", "preds", "golds"]
+
+ def __call__(self, predictions_dict, args) -> Dict:
+ """
+ Computes classification metrics for multi-label predictions (i.e., predicting multiple categories independently -- sigmoid outputs)
+
+ Args:
+ predictions_dict: dictionary obtained from computing loss and model outputs
+ * should contain the keys ['probs', 'preds', 'golds']
+ args: argparser Namespace
+
+ Returns:
+ stats_dict (dict): contains (where applicable) values for accuracy, confusion matrix, precision, recall, f1, precision-recall auc, roc auc
+
+ """
+ stats_dict = OrderedDict()
+
+ probs = predictions_dict["probs"] # B, C
+ preds = predictions_dict["preds"] # B, C
+ golds = predictions_dict["golds"].int() # B, C
+ stats_dict["accuracy"] = accuracy(
+ golds, preds, task="multilabel", num_labels=args.num_classes
+ )
+
+ stats_dict["precision"], stats_dict["recall"] = precision_recall(
+ probs,
+ golds,
+ average="macro",
+ task="multilabel",
+ num_labels=args.num_classes,
+ )
+ stats_dict["f1"] = f1_score(
+ probs,
+ golds,
+ num_labels=args.num_classes,
+ average="macro",
+ task="multilabel",
+ )
+ stats_dict["micro_f1"] = f1_score(
+ probs,
+ golds,
+ num_labels=args.num_classes,
+ average="micro",
+ task="multilabel",
+ )
+ stats_dict["ap_score"] = average_precision(
+ probs,
+ golds,
+ num_labels=args.num_classes,
+ average="macro",
+ task="multilabel",
+ )
+
+ stats_dict["roc_auc"] = auroc(
+ probs,
+ golds,
+ num_labels=args.num_classes,
+ average="macro",
+ task="multilabel",
+ )
+
+ return stats_dict
+
+
+@register_object("ordinal_classification", "metric")
+class Ordinal_Classification(BaseClassification):
+ def __call__(self, predictions_dict, args) -> Dict:
+ """
+ Computes classification for metrics when predicting multiple independent classes
+
+ Args:
+ predictions_dict: dictionary obtained from computing loss and model outputs
+ args: argparser Namespace
+
+ Returns:
+ stats_dict (dict): contains (where applicable) values for accuracy, confusion matrix, precision, recall, f1, precision-recall auc, roc auc, prefixed by col index
+ """
+ stats_dict = OrderedDict()
+
+ probs = predictions_dict["probs"] # B, C (float)
+ preds = predictions_dict["preds"] # B
+ golds = predictions_dict["golds"] # B
+ stats_dict["accuracy"] = accuracy(golds, preds)
+ stats_dict["confusion_matrix"] = confusion_matrix(
+ preds, golds, args.num_classes + 1
+ )
+
+ for classindex in range(golds.shape[-1]):
+ (
+ stats_dict["class{}_precision".format(classindex)],
+ stats_dict["class{}_recall".format(classindex)],
+ ) = precision_recall(probs, golds)
+ stats_dict["class{}_f1".format(classindex)] = f1_score(probs, golds)
+ pr, rc, _ = precision_recall_curve(probs, golds)
+ stats_dict["class{}_pr_auc".format(classindex)] = auc(rc, pr)
+ try:
+ stats_dict["class{}_roc_auc".format(classindex)] = auroc(
+ probs, golds, pos_label=1
+ )
+ except:
+ pass
+
+ return stats_dict
+
+
+@register_object("survival_classification", "metric")
+class Survival_Classification(BaseClassification):
+ def __call__(self, predictions_dict, args):
+ stats_dict = OrderedDict()
+
+ golds = predictions_dict["golds"]
+ probs = predictions_dict["probs"]
+ preds = probs[:, -1].view(-1) > 0.5
+ probs = probs.reshape((-1, probs.shape[-1]))[:, -1]
+
+ stats_dict["accuracy"] = accuracy(golds, preds)
+
+ if (args.num_classes == 2) and not (
+ np.unique(golds)[-1] > 1 or np.unique(preds)[-1] > 1
+ ):
+ stats_dict["precision"], stats_dict["recall"] = precision_recall(
+ probs, golds
+ )
+ stats_dict["f1"] = f1_score(probs, golds)
+ num_pos = golds.sum()
+ if num_pos > 0 and num_pos < len(golds):
+ stats_dict["auc"] = auroc(probs, golds, pos_label=1)
+ stats_dict["ap_score"] = average_precision(probs, golds)
+ precision, recall, _ = precision_recall_curve(probs, golds)
+ stats_dict["prauc"] = auc(recall, precision)
+ return stats_dict
+
+
+@register_object("discrim_classification", "metric")
+class Discriminator_Classification(BaseClassification):
+ def __init__(self, args) -> None:
+ super().__init__(args)
+
+ @property
+ def metric_keys(self):
+ return ["discrim_probs", "discrim_golds"]
+
+ def __call__(self, predictions_dict, args):
+ stats_dict = OrderedDict()
+
+ golds = predictions_dict["discrim_golds"]
+ probs = predictions_dict["discrim_probs"]
+ preds = predictions_dict["discrim_probs"].argmax(axis=-1).reshape(-1)
+
+ nargs = copy.deepcopy(args)
+ nargs.num_classes = probs.shape[-1]
+ stats_dict = super().__call__(
+ {"golds": golds, "probs": probs, "preds": preds}, nargs
+ )
+ stats_dict = {"discrim_{}".format(k): v for k, v in stats_dict.items()}
+
+ return stats_dict
+
+
+@register_object("multi_discrim_classification", "metric")
+class MultiDiscriminator_Classification(BaseClassification):
+ def __init__(self, args) -> None:
+ super().__init__(args)
+
+ @property
+ def metric_keys(self):
+ return ["device_probs", "device_golds", "thickness_probs", "thickness_golds"]
+
+ def __call__(self, predictions_dict, args):
+ stats_dict = OrderedDict()
+
+ for key in ["device", "thickness"]:
+ golds = predictions_dict["{}_golds".format(key)]
+ probs = predictions_dict["{}_probs".format(key)]
+ preds = predictions_dict["{}_probs".format(key)].argmax(axis=-1).reshape(-1)
+
+ nargs = copy.deepcopy(args)
+ nargs.num_classes = probs.shape[-1]
+ stats_dict = super().__call__(
+ {"golds": golds, "probs": probs, "preds": preds}, nargs
+ )
+ stats_dict = {"{}_{}".format(key, k): v for k, v in stats_dict.items()}
+
+ return stats_dict
diff --git a/data/protgps/learning/optimizers/__init__.py b/data/protgps/learning/optimizers/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/data/protgps/learning/optimizers/basic.py b/data/protgps/learning/optimizers/basic.py
new file mode 100644
index 0000000000000000000000000000000000000000..8a0c8e8cfafefa8de4e435f18a85995099bde027
--- /dev/null
+++ b/data/protgps/learning/optimizers/basic.py
@@ -0,0 +1,29 @@
+import torch
+from torch import optim
+from protgps.utils.registry import register_object
+from protgps.utils.classes import ProtGPS
+
+
+@register_object("sgd", "optimizer")
+class SGD(optim.SGD, ProtGPS):
+ """
+ https://pytorch.org/docs/stable/generated/torch.optim.SGD.html#torch.optim.SGD
+ """
+
+ def __init__(self, params, args):
+ super().__init__(
+ params=params,
+ lr=args.lr,
+ momentum=args.momentum,
+ weight_decay=args.weight_decay,
+ )
+
+
+@register_object("adam", "optimizer")
+class Adam(optim.Adam, ProtGPS):
+ """
+ https://pytorch.org/docs/stable/generated/torch.optim.Adam.html#torch.optim.Adam
+ """
+
+ def __init__(self, params, args):
+ super().__init__(params=params, lr=args.lr, weight_decay=args.weight_decay)
diff --git a/data/protgps/learning/schedulers/__init__.py b/data/protgps/learning/schedulers/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/data/protgps/learning/schedulers/basic.py b/data/protgps/learning/schedulers/basic.py
new file mode 100644
index 0000000000000000000000000000000000000000..8d24f7a9482a6586ea82737b09d0f2d4e4357c0e
--- /dev/null
+++ b/data/protgps/learning/schedulers/basic.py
@@ -0,0 +1,53 @@
+import torch
+from torch import optim
+from protgps.utils.registry import register_object
+from protgps.utils.classes import ProtGPS
+
+
+@register_object("reduce_on_plateau", "scheduler")
+class ReduceLROnPlateau(optim.lr_scheduler.ReduceLROnPlateau, ProtGPS):
+ """
+ https://pytorch.org/docs/stable/generated/torch.optim.lr_scheduler.ReduceLROnPlateau.html#torch.optim.lr_scheduler.ReduceLROnPlateau
+ """
+
+ def __init__(self, optimizer, args):
+ super().__init__(
+ optimizer,
+ patience=args.patience,
+ factor=args.lr_decay,
+ mode="min" if "loss" in args.monitor else "max",
+ )
+
+
+@register_object("exponential_decay", "scheduler")
+class ExponentialLR(optim.lr_scheduler.ExponentialLR, ProtGPS):
+ """
+ https://pytorch.org/docs/stable/generated/torch.optim.lr_scheduler.ExponentialLR.html#torch.optim.lr_scheduler.ExponentialLR
+ """
+
+ def __init__(self, optimizer, args):
+ super().__init__(optimizer, gamma=args.lr_decay)
+
+
+@register_object("cosine_annealing", "scheduler")
+class CosineAnnealingLR(optim.lr_scheduler.CosineAnnealingLR, ProtGPS):
+ """
+ https://pytorch.org/docs/stable/generated/torch.optim.lr_scheduler.CosineAnnealingLR.html
+ """
+
+ def __init__(self, optimizer, args):
+ super().__init__(optimizer, args.cosine_annealing_period)
+
+
+@register_object("cosine_annealing_restarts", "scheduler")
+class CosineAnnealingWarmRestarts(optim.lr_scheduler.CosineAnnealingWarmRestarts, ProtGPS):
+ """
+ https://pytorch.org/docs/stable/generated/torch.optim.lr_scheduler.CosineAnnealingWarmRestarts.html#torch.optim.lr_scheduler.CosineAnnealingWarmRestarts
+ """
+
+ def __init__(self, optimizer, args):
+ super().__init__(
+ optimizer,
+ T_0=args.cosine_annealing_period,
+ T_mult=args.cosine_annealing_period_scaling,
+ )
diff --git a/data/protgps/learning/searchers/__init__.py b/data/protgps/learning/searchers/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/data/protgps/learning/searchers/basic.py b/data/protgps/learning/searchers/basic.py
new file mode 100644
index 0000000000000000000000000000000000000000..58ca1904f4f42d6d4e4dcb8b47399b29e2a4e706
--- /dev/null
+++ b/data/protgps/learning/searchers/basic.py
@@ -0,0 +1,16 @@
+import torch
+from torch import optim
+from protgps.utils.registry import register_object
+from protgps.utils.classes import ProtGPS
+from ray.tune.suggest import BasicVariantGenerator
+
+
+@register_object("basic", "searcher")
+class BasicSearch(BasicVariantGenerator, ProtGPS):
+ """Description
+
+ See: https://docs.ray.io/en/releases-0.8.4/tune-searchalg.html#variant-generation-grid-search-random-search
+ """
+
+ def __init__(self, args):
+ super().__init__()
diff --git a/data/protgps/learning/utils.py b/data/protgps/learning/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..7fee4c724ba464389f0a36306a53ead153c2ebcd
--- /dev/null
+++ b/data/protgps/learning/utils.py
@@ -0,0 +1,5 @@
+def off_diagonal(x):
+ n, m = x.shape
+ assert n == m
+ return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten()
+
\ No newline at end of file
diff --git a/data/protgps/lightning/__init__.py b/data/protgps/lightning/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/data/protgps/lightning/base.py b/data/protgps/lightning/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..1b2e7694821788a6c93795fde86dc134962a17f2
--- /dev/null
+++ b/data/protgps/lightning/base.py
@@ -0,0 +1,455 @@
+import torch
+import pytorch_lightning as pl
+import torch.nn.functional as F
+import numpy as np
+from collections import OrderedDict
+import pickle
+import os
+from protgps.utils.registry import get_object, register_object
+from protgps.utils.classes import ProtGPS, set_protgps_type
+
+
+@register_object("base", "lightning")
+class Base(pl.LightningModule, ProtGPS):
+ """
+ PyTorch Lightning module used as base for running training and test loops
+
+ Args:
+ args: argparser Namespace
+ """
+
+ def __init__(self, args):
+ super(Base, self).__init__()
+ self.save_hyperparameters()
+ self.args = args
+ self.model = get_object(args.model_name, "model")(args)
+
+ def setup(self, stage):
+ self.loss_fns = {
+ "train": [get_object(l, "loss")() for l in self.args.loss_names]
+ }
+ self.loss_fns["val"] = self.loss_fns["train"]
+ self.loss_fns["test"] = (
+ self.loss_fns["train"]
+ if self.args.loss_names_for_eval is None
+ else [get_object(l, "loss")() for l in self.args.loss_names_for_eval]
+ )
+ self.metrics = [
+ get_object(m, "metric")(self.args) for m in self.args.metric_names
+ ]
+ self.metric_keys = list(
+ set([key for metric in self.metrics for key in metric.metric_keys])
+ )
+
+ @property
+ def LOG_KEYS(self):
+ return [
+ "loss",
+ "accuracy",
+ "mean",
+ "std",
+ "precision",
+ "recall",
+ "f1",
+ "auc",
+ "similarity",
+ "tau",
+ "mse",
+ "mae",
+ "r2",
+ "c_index",
+ "hit",
+ "pearson",
+ "spearman",
+ ]
+
+ @property
+ def UNLOG_KEYS(self):
+ default = ["activ", "hidden"]
+ keys_to_unlog = []
+ for k in default:
+ if k not in self.metric_keys:
+ keys_to_unlog.append(k)
+ return keys_to_unlog
+
+ def step(self, batch, batch_idx, optimizer_idx):
+ """
+ Defines a single training or validation step:
+ Computes losses given batch and model outputs
+
+ Returns:
+ logged_output: dict with losses and predictions
+
+ Args:
+ batch: dict obtained from DataLoader. batch must contain they keys ['x', 'sample_id']
+ """
+ logged_output = OrderedDict()
+ model_output = self.model(batch)
+ loss, logging_dict, predictions_dict = self.compute_loss(model_output, batch)
+ predictions_dict = self.store_in_predictions(predictions_dict, batch)
+ predictions_dict = self.store_in_predictions(predictions_dict, model_output)
+
+ logged_output["loss"] = loss
+ logged_output.update(logging_dict)
+ logged_output["preds_dict"] = predictions_dict
+
+ if (
+ (self.args.log_gen_image)
+ and (self.trainer.is_global_zero)
+ and (batch_idx == 0)
+ and (self.current_epoch % 100 == 0)
+ ):
+ self.log_image(model_output, batch)
+
+ return logged_output
+
+ def forward(self, batch, batch_idx=0):
+ """
+ Forward defines the prediction/inference actions
+ Similar to self.step() but also allows for saving predictions and hiddens
+ Computes losses given batch and model outputs
+
+ Returns:
+ logged_output: dict with losses and predictions
+
+ Args:
+ batch: dict obtained from DataLoader. batch must contain they keys ['x', 'sample_id']
+ """
+ logged_output = OrderedDict()
+ model_output = self.model(batch)
+ if not self.args.predict:
+ loss, logging_dict, predictions_dict = self.compute_loss(
+ model_output, batch
+ )
+ predictions_dict = self.store_in_predictions(predictions_dict, batch)
+ predictions_dict = self.store_in_predictions(predictions_dict, model_output)
+ logged_output["loss"] = loss
+ logged_output.update(logging_dict)
+ logged_output["preds_dict"] = predictions_dict
+ if self.args.save_hiddens:
+ logged_output["preds_dict"].update(model_output)
+
+ if (self.args.log_gen_image) and (batch_idx == 0):
+ self.log_image(model_output, batch)
+ return logged_output
+
+ def training_step(self, batch, batch_idx, optimizer_idx=None):
+ """
+ Single training step
+ """
+ self.phase = "train"
+ output = self.step(batch, batch_idx, optimizer_idx)
+ return output
+
+ def validation_step(self, batch, batch_idx, optimizer_idx=None):
+ """
+ Single validation step
+ """
+ self.phase = "val"
+ output = self.step(batch, batch_idx, optimizer_idx)
+ return output
+
+ def test_step(self, batch, batch_idx):
+ """
+ Single testing step
+
+ * save_predictions will save the dictionary output['preds_dict'], which typically includes sample_ids, probs, predictions, etc.
+ * save_hiddens: will save the value of output['preds_dict']['hidden']
+ """
+ self.phase = "test"
+ output = self.forward(batch, batch_idx)
+ if self.args.save_predictions:
+ self.save_predictions(output["preds_dict"])
+ elif self.args.save_hiddens:
+ self.save_hiddens(output["preds_dict"])
+ output = {k: v for k, v in output.items() if k not in self.UNLOG_KEYS}
+ output["preds_dict"] = {
+ k: v for k, v in output["preds_dict"].items() if k not in self.UNLOG_KEYS
+ }
+ return output
+
+ def training_epoch_end(self, outputs):
+ """
+ End of single training epoch
+ - Aggregates predictions and losses from all steps
+ - Computes the metric (auc, accuracy, etc.)
+ """
+ if len(outputs) == 0:
+ return
+ outputs = gather_step_outputs(outputs)
+ outputs["loss"] = outputs["loss"].mean()
+ outputs.update(self.compute_metric(outputs["preds_dict"]))
+ self.log_outputs(outputs, "train")
+ return
+
+ def validation_epoch_end(self, outputs):
+ """
+ End of single validation epoch
+ - Aggregates predictions and losses from all steps
+ - Computes the metric (auc, accuracy, etc.)
+ """
+ if len(outputs) == 0:
+ return
+ outputs = gather_step_outputs(outputs)
+ outputs["loss"] = outputs["loss"].mean()
+ outputs.update(self.compute_metric(outputs["preds_dict"]))
+ self.log_outputs(outputs, "val")
+ return
+
+ def test_epoch_end(self, outputs):
+ """
+ End of testing
+ - Aggregates predictions and losses from all batches
+ - Computes the metric if defined in args
+ """
+ if len(outputs) == 0:
+ return
+ outputs = gather_step_outputs(outputs)
+ if isinstance(outputs.get("loss", 0), torch.Tensor):
+ outputs["loss"] = outputs["loss"].mean()
+ if not self.args.predict:
+ outputs.update(self.compute_metric(outputs["preds_dict"]))
+ self.log_outputs(outputs, "test")
+ return
+
+ def configure_optimizers(self):
+ """
+ Obtain optimizers and hyperparameter schedulers for model
+
+ """
+ optimizer = get_object(self.args.optimizer_name, "optimizer")(
+ self.parameters(), self.args
+ )
+ schedule = get_object(self.args.scheduler_name, "scheduler")(
+ optimizer, self.args
+ )
+
+ scheduler = {
+ "scheduler": schedule,
+ "monitor": self.args.monitor,
+ "interval": "epoch",
+ "frequency": 1,
+ }
+ return [optimizer], [scheduler]
+
+ def compute_loss(self, model_output, batch):
+ """
+ Compute model loss:
+ Iterates through loss functions defined in args and computes losses and predictions
+ Adds losses and stores predictions for batch in dictionary
+
+ Returns:
+ total_loss (torch.Tensor): aggregate loss value that is propagated backwards for gradient computation
+ logging_dict: dict of losses (and other metrics)
+ predictions: dict of predictions (preds, probs, etc.)
+ """
+ total_loss = 0
+ logging_dict, predictions = OrderedDict(), OrderedDict()
+ for loss_fn in self.loss_fns[self.phase]:
+ loss, l_dict, p_dict = loss_fn(model_output, batch, self, self.args)
+ total_loss += loss
+ logging_dict.update(l_dict)
+ predictions.update(p_dict)
+ return total_loss, logging_dict, predictions
+
+ def compute_metric(self, predictions):
+ logging_dict = OrderedDict()
+ for metric_fn in self.metrics:
+ l_dict = metric_fn(predictions, self.args)
+ logging_dict.update(l_dict)
+ return logging_dict
+
+ def store_in_predictions(self, preds, storage_dict):
+ for m in get_object(self.args.dataset_name, "dataset").DATASET_ITEM_KEYS:
+ if m in storage_dict:
+ preds[m] = storage_dict[m]
+
+ for m in self.metric_keys:
+ if m in storage_dict:
+ if torch.is_tensor(storage_dict[m]) and storage_dict[m].requires_grad:
+ preds[m] = storage_dict[m].detach()
+ else:
+ preds[m] = storage_dict[m]
+ return preds
+
+ def log_outputs(self, outputs, key):
+ """
+ Compute performance metrics after epoch ends:
+ Iterates through metric functions defined in args and computes metrics
+ Logs the metric values into logger (Comet, Tensorboard, etc.)
+ """
+ logging_dict = {}
+ for k, v in outputs.items():
+ if isinstance(v, torch.Tensor) and any([i in k for i in self.LOG_KEYS]):
+ logging_dict["{}_{}".format(key, k)] = v.mean()
+ # log clocktime of methods for epoch
+ if (self.args.profiler is not None) and (self.args.log_profiler):
+ logging_dict.update(self.get_time_profile(key))
+ self.log_dict(logging_dict, prog_bar=True, logger=True)
+
+ def get_time_profile(self, key):
+ """Obtain trainer method times
+
+ Args:
+ key (str): one of ['train', 'val', 'test]
+
+ Returns:
+ dict: mean of clocktime of each method for past epoch
+ """
+ if key == "train":
+ num_steps = self.trainer.num_training_batches
+ if key == "val":
+ num_steps = self.trainer.num_val_batches[0]
+ if key == "test":
+ num_steps = self.trainer.num_test_batches[0]
+
+ time_profile = {}
+ for k, v in self.trainer.profiler.recorded_durations.items():
+ time_profile[k] = np.mean(v[-num_steps:])
+ return time_profile
+
+ def save_predictions(self, outputs):
+ """
+ Saves model predictions as pickle files
+ Makes a directory under /inference_dir/experiment_name/
+ Stores predictions for each sample individually under /inference_dir/experiment_name/sample_[sample_id].predictions
+
+ * Requires outputs to contain the keys ['sample_id']
+ """
+ experiment_name = (
+ os.path.splitext(os.path.basename(self.args.checkpoint_path))[0]
+ if (self.args.from_checkpoint and not self.args.train)
+ else self.args.experiment_name
+ )
+ for idx, sampleid in enumerate(outputs["sample_id"]):
+ sampledict = {
+ k: v[idx]
+ for k, v in outputs.items()
+ if (len(v) == len(outputs["sample_id"]))
+ }
+ if "nodeid2nodeidx" in outputs:
+ sampledict["nodeid2nodeidx"] = outputs["nodeid2nodeidx"]
+ for k, v in sampledict.items():
+ if isinstance(v, torch.Tensor) and v.is_cuda:
+ sampledict[k] = v.cpu()
+ predictions_filename = os.path.join(
+ self.args.inference_dir,
+ experiment_name,
+ "sample_{}.predictions".format(sampleid),
+ )
+ dump_pickle(sampledict, predictions_filename)
+
+ def save_hiddens(self, outputs):
+ """
+ Saves the model's hidden layer outputs as pickle files
+ Makes a directory under /inference_dir/experiment_name/
+ Stores predictions for each sample individually under /inference_dir/experiment_name/sample_[sample_id].hiddens
+
+ * Requires outputs to contain the keys ['sample_id', 'hidden]
+ """
+ experiment_name = (
+ os.path.splitext(os.path.basename(self.args.checkpoint_path))[0]
+ if (self.args.from_checkpoint and not self.args.train)
+ else self.args.experiment_name
+ )
+ idx = outputs["sample_id"]
+ # hiddens = nn.functional.normalize(outputs['hidden'], dim = 1)
+ hiddens = [
+ {
+ k: v[i].cpu() if v.is_cuda else v[i]
+ for k, v in outputs.items()
+ if ("hidden" in k) and (len(v) == len(idx))
+ }
+ for i in range(len(idx))
+ ]
+ for i, h in zip(idx, hiddens):
+ predictions_filename = os.path.join(
+ self.args.inference_dir, experiment_name, "sample_{}.hiddens".format(i)
+ )
+ dump_pickle(h, predictions_filename)
+
+ def log_image(self, model_output, batch):
+ # log one sample from each epoch
+ sid = batch["sample_id"][0]
+ for k, v in model_output.items():
+ if "reconstruction" in k:
+ img = model_output[k][0].detach().cpu()
+ if img.shape[0] != 3:
+ img = img.numpy()
+ for cid, chan in enumerate(img):
+ self.logger.log_image(
+ chan,
+ "Sample{}_{}_Chan{}_Epoch{}_Step{}".format(
+ sid, k, cid, self.current_epoch, self.global_step
+ ),
+ )
+ else:
+ img = img.permute(1, 2, 0).numpy()
+ self.logger.log_image(
+ img,
+ "Sample{}_{}_Epoch{}_Step{}".format(
+ sid, k, self.current_epoch, self.global_step
+ ),
+ )
+
+ @staticmethod
+ def add_args(parser) -> None:
+ """Add class specific args
+
+ Args:
+ parser (argparse.ArgumentParser): argument parser
+ """
+ parser.add_argument(
+ "--model_name",
+ type=str,
+ action=set_protgps_type("model"),
+ default="classifier",
+ help="Name of parent model",
+ )
+
+
+def gather_step_outputs(outputs):
+ """
+ Collates the dictionary outputs from each step into a single dictionary
+
+ Returns:
+ output_dict (dict): dictionary mapping step output keys to lists or tensors
+ """
+
+ output_dict = OrderedDict()
+ if isinstance(outputs[-1], list): # adversarial setting with two optimizers
+ outputs = outputs[0]
+
+ for k in outputs[-1].keys():
+ if k == "preds_dict":
+ output_dict[k] = gather_step_outputs(
+ [output["preds_dict"] for output in outputs]
+ )
+ elif (
+ isinstance(outputs[-1][k], torch.Tensor) and len(outputs[-1][k].shape) == 0
+ ):
+ output_dict[k] = torch.stack([output[k] for output in outputs])
+ elif isinstance(outputs[-1][k], torch.Tensor):
+ output_dict[k] = torch.cat([output[k] for output in outputs], dim=0)
+ else:
+ output_dict[k] = [output[k] for output in outputs]
+ return output_dict
+
+
+def dump_pickle(file_obj, file_name):
+ """
+ Saves object as a binary pickle file
+ Creates directory of file
+ Saves file
+
+ Args:
+ file_obj: object
+ file_name: path to file
+ """
+ if not os.path.exists(os.path.dirname(file_name)):
+ try:
+ os.makedirs(os.path.dirname(file_name))
+ except OSError as exc: # Guard against race condition
+ if exc.errno != exc.errno.EEXIST:
+ raise
+ pickle.dump(file_obj, open(file_name, "wb"))
diff --git a/data/protgps/loggers/__init__.py b/data/protgps/loggers/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/data/protgps/loggers/comet.py b/data/protgps/loggers/comet.py
new file mode 100644
index 0000000000000000000000000000000000000000..34a337ae64c21c4f190b8bfdf817846846d26895
--- /dev/null
+++ b/data/protgps/loggers/comet.py
@@ -0,0 +1,25 @@
+from protgps.utils.registry import register_object
+import pytorch_lightning as pl
+import os
+from protgps.utils.classes import ProtGPS
+
+
+@register_object("comet", "logger")
+class COMET(pl.loggers.CometLogger, ProtGPS):
+ def __init__(self, args) -> None:
+ super().__init__(
+ api_key=os.environ.get("COMET_API_KEY"),
+ project_name=args.project_name,
+ experiment_name=args.experiment_name,
+ workspace=args.workspace,
+ log_env_details=True,
+ log_env_cpu=True,
+ )
+
+ def setup(self, **kwargs):
+ self.experiment.set_model_graph(kwargs["model"])
+ self.experiment.add_tags(kwargs["args"].logger_tags)
+ self.experiment.log_parameters(kwargs["args"])
+
+ def log_image(self, image, name):
+ self.experiment.log_image(image, name)
diff --git a/data/protgps/loggers/tensorboard.py b/data/protgps/loggers/tensorboard.py
new file mode 100644
index 0000000000000000000000000000000000000000..a94ae7b54d59aa179c55435da1789610b5e670c8
--- /dev/null
+++ b/data/protgps/loggers/tensorboard.py
@@ -0,0 +1,25 @@
+from protgps.utils.registry import register_object
+import pytorch_lightning as pl
+import os
+from protgps.utils.classes import ProtGPS
+
+
+@register_object("tensorboard", "logger")
+class PLTensorBoardLogger(pl.loggers.TensorBoardLogger, ProtGPS):
+ def __init__(self, args) -> None:
+ super().__init__(args.logger_dir)
+
+ def setup(self, **kwargs):
+ pass
+
+ def log_image(self, image, name):
+ pass
+
+ @staticmethod
+ def add_args(parser) -> None:
+ parser.add_argument(
+ "--logger_dir",
+ type=str,
+ default=".",
+ help="directory to save tensorboard logs",
+ )
diff --git a/data/protgps/loggers/wandb.py b/data/protgps/loggers/wandb.py
new file mode 100644
index 0000000000000000000000000000000000000000..9e0472f75dd726e4fe6e0f4955535eef258aa414
--- /dev/null
+++ b/data/protgps/loggers/wandb.py
@@ -0,0 +1,25 @@
+from protgps.utils.registry import register_object
+import pytorch_lightning as pl
+import os
+from protgps.utils.classes import ProtGPS
+
+
+@register_object("wandb", "logger")
+class WandB(pl.loggers.WandbLogger, ProtGPS):
+ def __init__(self, args) -> None:
+ super().__init__(
+ project=args.project_name,
+ name=args.experiment_name,
+ entity=args.workspace,
+ tags = args.logger_tags
+ )
+
+ def setup(self, **kwargs):
+ # "gradients", "parameters", "all", or None
+ # # change "log_freq" log frequency of gradients and parameters (100 steps by default)
+ if kwargs["args"].local_rank == 0:
+ self.watch(kwargs["model"], log="all")
+ self.experiment.config.update(kwargs["args"])
+
+ def log_image(self, image, name):
+ self.log_image(images=[image], caption=[name])
diff --git a/data/protgps/models/__init__.py b/data/protgps/models/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/data/protgps/models/abstract.py b/data/protgps/models/abstract.py
new file mode 100644
index 0000000000000000000000000000000000000000..46a2845a019c9860ef419ccbe86ab3f89f703dde
--- /dev/null
+++ b/data/protgps/models/abstract.py
@@ -0,0 +1,13 @@
+import torch.nn as nn
+from protgps.utils.classes import ProtGPS
+from abc import ABCMeta, abstractmethod
+
+# from efficientnet_pytorch import EfficientNet
+import math
+
+
+class AbstractModel(nn.Module, ProtGPS):
+ __metaclass__ = ABCMeta
+
+ def __init__(self):
+ super(AbstractModel, self).__init__()
diff --git a/data/protgps/models/classifier.py b/data/protgps/models/classifier.py
new file mode 100644
index 0000000000000000000000000000000000000000..9e6554861fb5fa5b214b9ec9c118e8b499a25f3e
--- /dev/null
+++ b/data/protgps/models/classifier.py
@@ -0,0 +1,123 @@
+import torch
+import torch.nn as nn
+import copy
+from protgps.utils.registry import register_object, get_object
+from protgps.utils.classes import set_protgps_type
+from protgps.models.abstract import AbstractModel
+
+
+@register_object("classifier", "model")
+class Classifier(AbstractModel):
+ def __init__(self, args):
+ super(Classifier, self).__init__()
+
+ self.args = args
+ self.encoder = get_object(args.model_name_for_encoder, "model")(args)
+ cargs = copy.deepcopy(args)
+ self.mlp = get_object("mlp_classifier", "model")(cargs)
+
+ def forward(self, batch=None):
+ output = {}
+ output["encoder_hidden"] = self.encoder(batch)["hidden"]
+ output.update(self.mlp({"x": output["encoder_hidden"]}))
+ return output
+
+ @staticmethod
+ def add_args(parser) -> None:
+ """Add class specific args
+
+ Args:
+ parser (argparse.ArgumentParser): argument parser
+ """
+ parser.add_argument(
+ "--model_name_for_encoder",
+ type=str,
+ action=set_protgps_type("model"),
+ default="resnet18",
+ help="Name of encoder to use",
+ )
+ parser.add_argument(
+ "--mlp_input_dim", type=int, default=512, help="Dim of input to mlp"
+ )
+ parser.add_argument(
+ "--mlp_layer_configuration",
+ type=int,
+ nargs="*",
+ default=[128, 128],
+ help="MLP layer dimensions",
+ )
+ parser.add_argument(
+ "--mlp_use_batch_norm",
+ action="store_true",
+ default=False,
+ help="Use batchnorm in mlp",
+ )
+ parser.add_argument(
+ "--mlp_use_layer_norm",
+ action="store_true",
+ default=False,
+ help="Use LayerNorm in mlp",
+ )
+
+
+@register_object("mlp_classifier", "model")
+class MLPClassifier(AbstractModel):
+ def __init__(self, args):
+ super(MLPClassifier, self).__init__()
+
+ self.args = args
+
+ model_layers = []
+ cur_dim = args.mlp_input_dim
+ for layer_size in args.mlp_layer_configuration:
+ model_layers.extend(self.append_layer(cur_dim, layer_size, args))
+ cur_dim = layer_size
+
+ self.mlp = nn.Sequential(*model_layers)
+ self.predictor = nn.Linear(cur_dim, args.num_classes)
+
+ def append_layer(self, cur_dim, layer_size, args, with_dropout=True):
+ linear_layer = nn.Linear(cur_dim, layer_size)
+ bn = nn.BatchNorm1d(layer_size)
+ ln = nn.LayerNorm(layer_size)
+ if args.mlp_use_batch_norm:
+ seq = [linear_layer, bn, nn.ReLU()]
+ elif args.mlp_use_layer_norm:
+ seq = [linear_layer, ln, nn.ReLU()]
+ else:
+ seq = [linear_layer, nn.ReLU()]
+ if with_dropout:
+ seq.append(nn.Dropout(p=args.dropout))
+ return seq
+
+ def forward(self, batch=None):
+ output = {}
+ z = self.mlp(batch["x"])
+ output["logit"] = self.predictor(z)
+ output["hidden"] = z
+ return output
+
+ @staticmethod
+ def add_args(parser):
+ parser.add_argument(
+ "--mlp_input_dim", type=int, default=512, help="Dim of input to mlp"
+ )
+ parser.add_argument(
+ "--mlp_layer_configuration",
+ type=int,
+ nargs="*",
+ default=[128, 128],
+ help="MLP layer dimensions",
+ )
+ parser.add_argument(
+ "--mlp_use_batch_norm",
+ action="store_true",
+ default=False,
+ help="Use batchnorm in mlp",
+ )
+ parser.add_argument(
+ "--mlp_use_layer_norm",
+ action="store_true",
+ default=False,
+ help="Use LayerNorm in mlp",
+ )
diff --git a/data/protgps/models/fair_esm.py b/data/protgps/models/fair_esm.py
new file mode 100644
index 0000000000000000000000000000000000000000..879fb714e3346ef123943a57d15c9ae819c98697
--- /dev/null
+++ b/data/protgps/models/fair_esm.py
@@ -0,0 +1,585 @@
+import torch
+import torch.nn as nn
+import copy
+from protgps.models.abstract import AbstractModel
+from protgps.utils.classes import set_protgps_type
+from protgps.utils.registry import register_object, get_object
+from torch.nn.utils.rnn import pad_sequence
+import functools
+
+
+@register_object("fair_esm", "model")
+class FairEsm(AbstractModel):
+ """
+ Refer to https://github.com/facebookresearch/esm#available-models
+ """
+
+ def __init__(self, args):
+ super(FairEsm, self).__init__()
+ self.args = args
+ torch.hub.set_dir(args.pretrained_hub_dir)
+ self.model, self.alphabet = torch.hub.load(
+ "facebookresearch/esm:main", args.esm_name
+ )
+ self.batch_converter = (
+ self.alphabet.get_batch_converter()
+ ) # TODO: Move to dataloader, so that we can batch in parallel
+ self.register_buffer("devicevar", torch.zeros(1, dtype=torch.int8))
+ if args.freeze_esm:
+ self.model.eval()
+
+ self.repr_layer = args.esm_hidden_layer
+ print("Using ESM hidden layers", self.repr_layer)
+
+ def forward(self, x, tokens=False, soft=False):
+ """
+ x: list of str (protein sequences)
+ tokens: tokenized or tensorized input
+ soft: embeddings precomputed
+ """
+ output = {}
+ if tokens:
+ batch_tokens = x.unsqueeze(0)
+ else:
+ fair_x = self.truncate_protein(x, self.args.max_prot_len)
+ batch_labels, batch_strs, batch_tokens = self.batch_converter(fair_x)
+
+ batch_tokens = batch_tokens.to(self.devicevar.device)
+
+ # use partial for cleanness
+ model_func = functools.partial(
+ self.model,
+ repr_layers=[self.repr_layer],
+ return_contacts=False,
+ )
+ if soft:
+ model_func = functools.partial(model_func, soft=soft)
+
+ if self.args.freeze_esm:
+ with torch.no_grad():
+ result = model_func(batch_tokens)
+ else:
+ result = model_func(batch_tokens)
+
+ # Generate per-sequence representations via averaging
+ hiddens = []
+ for sample_num, sample in enumerate(x):
+ # breakpoint()
+ hiddens.append(
+ result["representations"][self.repr_layer][
+ sample_num, 1 : len(sample) + 1
+ ].mean(0)
+ )
+ if self.args.output_residue_hiddens:
+ output["residues"] = result["representations"][self.repr_layer]
+
+ output["hidden"] = torch.stack(hiddens)
+
+ return output
+
+ def truncate_protein(self, x, max_length=None):
+ # max length allowed is 1024
+ return [
+ (i, s[: max_length - 2])
+ if not isinstance(x[0], list)
+ else (i, s[0][: max_length - 2])
+ for i, s in enumerate(x)
+ ]
+
+ @staticmethod
+ def add_args(parser) -> None:
+ """Add class specific args
+
+ Args:
+ parser (argparse.ArgumentParser): argument parser
+ """
+ parser.add_argument(
+ "--pretrained_hub_dir",
+ type=str,
+ default="/home/protgps/esm_models",
+ help="directory to torch hub where pretrained models are saved",
+ )
+ parser.add_argument(
+ "--esm_name",
+ type=str,
+ default="esm2_t12_35M_UR50D",
+ help="directory to torch hub where pretrained models are saved",
+ )
+ parser.add_argument(
+ "--freeze_esm",
+ action="store_true",
+ default=False,
+ help="do not update encoder weights",
+ )
+ parser.add_argument(
+ "--esm_hidden_layer",
+ type=int,
+ default=12,
+ help="do not update encoder weights",
+ )
+ parser.add_argument(
+ "--output_residue_hiddens",
+ action="store_true",
+ default=False,
+ help="do not return residue-level hiddens, only sequence average",
+ )
+
+
+@register_object("fair_esm2", "model")
+class FairEsm2(FairEsm):
+ # def forward(self, x):
+ # """
+ # x: list of str (protein sequences)
+ # """
+ # output = {}
+ # fair_x = self.truncate_protein(x)
+ # batch_labels, batch_strs, batch_tokens = self.batch_converter(fair_x)
+ # batch_tokens = batch_tokens.to(self.devicevar.device)
+
+ # if self.args.freeze_esm:
+ # with torch.no_grad():
+ # result = self.model(
+ # batch_tokens, repr_layers=[self.repr_layer], return_contacts=False
+ # )
+ # else:
+ # result = self.model(
+ # batch_tokens, repr_layers=[self.repr_layer], return_contacts=False
+ # )
+
+ # # Generate per-sequence representations via averaging
+ # hiddens = []
+ # for sample_num, sample in enumerate(x):
+ # hiddens.append(
+ # result["representations"][self.repr_layer][
+ # sample_num, 1 : len(sample) + 1
+ # ]
+ # )
+ # if self.args.output_residue_hiddens:
+ # output["residues"] = result["representations"][self.repr_layer]
+
+ # output["hidden"] = hiddens
+ # return output
+
+ def truncate_protein(self, x, max_length=torch.inf):
+ return [
+ (i, s) if not isinstance(x[0], list) else (i, s[0]) for i, s in enumerate(x)
+ ]
+
+
+@register_object("fair_esm_fast", "model")
+class FairEsmFast(FairEsm):
+ def forward(self, x, tokens=False, soft=False):
+ """
+ x: list of str (protein sequences)
+ """
+ output = {}
+ if tokens:
+ batch_tokens = x.unsqueeze(0)
+ else:
+ fair_x = [(i, v) for i, v in enumerate(x)]
+ batch_labels, batch_strs, batch_tokens = self.batch_converter(fair_x)
+ batch_tokens = batch_tokens.to(self.devicevar.device)
+
+ # use partial for cleanness
+ model_func = functools.partial(
+ self.model,
+ repr_layers=[self.repr_layer],
+ return_contacts=False,
+ )
+ if soft:
+ model_func = functools.partial(model_func, soft=soft)
+
+ if self.args.freeze_esm:
+ with torch.no_grad():
+ result = model_func(batch_tokens)
+ else:
+ result = model_func(batch_tokens)
+
+ if self.args.output_residue_hiddens:
+ output["residues"] = result["representations"][self.repr_layer]
+
+ output["hidden"] = result["representations"][self.repr_layer].mean(axis=1)
+ return output
+
+
+import numpy as np
+
+
+@register_object("reverse_hom", "model")
+class ReverseHomology(FairEsm):
+ def forward(self, batch):
+ """
+ x: list of str (protein sequences)
+ """
+ output = {}
+ x = np.array(batch["x"]).reshape(-1, order="F")
+ fair_x = [(i, v) for i, v in enumerate(x)]
+ _, _, batch_tokens = self.batch_converter(fair_x)
+ batch_tokens = batch_tokens.to(self.devicevar.device)
+ if self.args.freeze_esm:
+ with torch.no_grad():
+ result = self.model(
+ batch_tokens, repr_layers=[self.repr_layer], return_contacts=False
+ )
+ else:
+ result = self.model(
+ batch_tokens, repr_layers=[self.repr_layer], return_contacts=False
+ )
+ if self.args.output_residue_hiddens:
+ output["residues"] = result["representations"][self.repr_layer]
+
+ # NOTE: works for batch size of 1 only (otherwise need to reshape)
+ output["hidden"] = result["representations"][self.repr_layer].mean(axis=1)
+
+ return output
+
+
+@register_object("protein_encoder", "model")
+class ProteinEncoder(AbstractModel):
+ def __init__(self, args):
+ super(ProteinEncoder, self).__init__()
+ self.args = args
+ self.encoder = get_object(args.protein_encoder_type, "model")(args)
+ cargs = copy.deepcopy(args)
+ cargs.mlp_input_dim = args.protein_hidden_dim
+ args.freeze_esm = args.freeze_encoder
+ self.mlp = get_object(args.protein_classifer, "model")(cargs)
+ if self.args.freeze_encoder:
+ self.encoder.eval()
+
+ def forward(self, batch, tokens=False, soft=False):
+ output = {}
+ if self.args.freeze_encoder:
+ with torch.no_grad():
+ output_esm = self.encoder(batch["x"], tokens=tokens, soft=soft)
+ else:
+ output_esm = self.encoder(batch["x"], tokens=tokens, soft=soft)
+ # output["protein_hidden"] = output_esm["hidden"]
+ output.update(self.mlp({"x": output_esm["hidden"]}))
+ return output
+
+ @staticmethod
+ def add_args(parser) -> None:
+ """Add class specific args
+
+ Args:
+ parser (argparse.ArgumentParser): argument parser
+ """
+ parser.add_argument(
+ "--protein_encoder_type",
+ type=str,
+ default="fair_esm2",
+ help="name of the protein encoder",
+ action=set_protgps_type("model"),
+ )
+ parser.add_argument(
+ "--freeze_encoder",
+ action="store_true",
+ default=False,
+ help="do not update encoder weights",
+ )
+ parser.add_argument(
+ "--protein_hidden_dim",
+ type=int,
+ default=480,
+ help="hidden dimension of the protein",
+ )
+ parser.add_argument(
+ "--protein_classifer",
+ type=str,
+ default="mlp_classifier",
+ help="name of classifier",
+ action=set_protgps_type("model"),
+ )
+
+
+@register_object("protein_encoder_attention", "model")
+class ProteinEncoderAttention(ProteinEncoder):
+ def __init__(self, args):
+ super(ProteinEncoder, self).__init__()
+ self.args = args
+ self.encoder = get_object(args.protein_encoder_type, "model")(args)
+ cargs = copy.deepcopy(args)
+ cargs.mlp_input_dim = args.protein_hidden_dim
+ args.freeze_esm = args.freeze_encoder
+ self.mlp = get_object(args.protein_classifer, "model")(cargs)
+ if self.args.freeze_encoder:
+ self.encoder.eval()
+
+ heads = 8
+ encoder_layer = nn.TransformerEncoderLayer(
+ d_model=args.protein_hidden_dim, nhead=heads
+ )
+ self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=6)
+
+ def forward(self, batch):
+ output = {}
+ if self.args.freeze_encoder:
+ with torch.no_grad():
+ output_esm = self.encoder(batch["x"])
+ else:
+ output_esm = self.encoder(batch["x"])
+
+ v_attention = []
+ for v in output_esm["hidden"]:
+ v = self.transformer_encoder(v)
+ v_attention.append(v.mean(0))
+
+ output.update(self.mlp({"x": torch.stack(v_attention)}))
+ return output
+
+
+@register_object("protein_encoder_esm_embeddings", "model")
+class ProteinEncoderESMEmbeddings(ProteinEncoder):
+ def forward(self, batch):
+ output = {}
+
+ fair_x = self.encoder.truncate_protein(batch["x"])
+ _, _, batch_tokens = self.encoder.batch_converter(fair_x)
+ batch_tokens = batch_tokens.to(self.encoder.devicevar.device)
+ esm_embedded = self.encoder.model.embed_tokens(batch_tokens).mean(1)
+
+ # output["protein_hidden"] = output_esm["hidden"]
+ output.update(self.mlp({"x": esm_embedded}))
+ return output
+
+
+@register_object("idr_encoder", "model")
+class IDREncoder(ProteinEncoder):
+ def forward(self, batch):
+ output = {}
+
+ if self.args.freeze_encoder:
+ with torch.no_grad():
+ idr_embeddings = self._forward_function(batch)
+ else:
+ idr_embeddings = self._forward_function(batch)
+
+ output.update(self.mlp({"x": torch.stack(idr_embeddings)}))
+ return output
+
+ def _forward_function(self, batch) -> list:
+ output_esm = self.encoder(batch["x"])
+ # mask out non-idr residues and average
+ B, N, H = output_esm["residues"].shape
+ mask = torch.zeros(B, N)
+ for i in range(B):
+ mask[i, batch["start_idx"][i] : batch["end_idx"][i]] = 1
+
+ idr_residue_embeddings = output_esm["residues"] * mask.unsqueeze(-1).to(
+ output_esm["residues"].device
+ )
+ idr_embeddings = []
+ for idx, sample in enumerate(idr_residue_embeddings):
+ avg_sample = sample.sum(0) / mask[idx].sum()
+ idr_embeddings.append(avg_sample)
+
+ return idr_embeddings
+
+ @staticmethod
+ def set_args(args) -> None:
+ args.output_residue_hiddens = True
+
+
+@register_object("all_idr_encoder", "model")
+class AllIDREncoder(ProteinEncoder):
+ def forward(self, batch):
+ output = {}
+
+ if self.args.freeze_encoder:
+ with torch.no_grad():
+ idr_embeddings = self._forward_function(batch)
+ else:
+ idr_embeddings = self._forward_function(batch)
+
+ output.update(self.mlp({"x": torch.stack(idr_embeddings)}))
+ return output
+
+ def _forward_function(self, batch) -> list:
+ output_esm = self.encoder(batch["x"])
+
+ # mask out non-idr residues and average
+ B, N, H = output_esm["residues"].shape
+ mask = torch.zeros(B, N)
+
+ for i in range(B):
+ start_indices = [int(n) for n in batch["start_indices"][i].split("_")]
+ end_indices = [int(n) for n in batch["end_indices"][i].split("_")]
+ for idr_idx in range(len(start_indices)):
+ mask[i, start_indices[idr_idx] : end_indices[idr_idx]] = 1
+
+ idr_residue_embeddings = output_esm["residues"] * mask.unsqueeze(-1).to(
+ output_esm["residues"].device
+ )
+ idr_embeddings = []
+ for idx, sample in enumerate(idr_residue_embeddings):
+ avg_sample = sample.sum(0) / mask[idx].sum()
+ idr_embeddings.append(avg_sample)
+
+ return idr_embeddings
+
+ @staticmethod
+ def set_args(args) -> None:
+ args.output_residue_hiddens = True
+
+
+@register_object("all_idr_esm_embeddings_encoder", "model")
+class AllIDRESMEmbeddingsEncoder(ProteinEncoder):
+ def forward(self, batch):
+ output = {}
+
+ fair_x = self.encoder.truncate_protein(batch["x"])
+ _, _, batch_tokens = self.encoder.batch_converter(fair_x)
+ batch_tokens = batch_tokens.to(self.encoder.devicevar.device)
+ esm_embedded = self.encoder.model.embed_tokens(batch_tokens)
+
+ # mask out non-idr residues and average
+ B, N, H = esm_embedded.shape
+ mask = torch.zeros(B, N)
+
+ for i in range(B):
+ start_indices = [int(n) for n in batch["start_indices"][i].split("_")]
+ end_indices = [int(n) for n in batch["end_indices"][i].split("_")]
+ for idr_idx in range(len(start_indices)):
+ mask[i, start_indices[idr_idx] : end_indices[idr_idx]] = 1
+
+ idr_residue_embeddings = esm_embedded * mask.unsqueeze(-1).to(
+ esm_embedded.device
+ )
+ idr_embeddings = []
+ for idx, sample in enumerate(idr_residue_embeddings):
+ avg_sample = sample.sum(0) / mask[idx].sum()
+ idr_embeddings.append(avg_sample)
+
+ output.update(self.mlp({"x": torch.stack(idr_embeddings)}))
+ return output
+
+
+@register_object("all_not_idr_esm_embeddings_encoder", "model")
+class AllNotIDRESMEmbeddingsEncoder(ProteinEncoder):
+ def forward(self, batch):
+ output = {}
+
+ fair_x = self.encoder.truncate_protein(batch["x"])
+ _, _, batch_tokens = self.encoder.batch_converter(fair_x)
+ batch_tokens = batch_tokens.to(self.encoder.devicevar.device)
+ esm_embedded = self.encoder.model.embed_tokens(batch_tokens)
+
+ # mask out non-idr residues and average
+ B, N, H = esm_embedded.shape
+ mask = torch.ones(B, N)
+
+ for i in range(B):
+ start_indices = [int(n) for n in batch["start_indices"][i].split("_")]
+ end_indices = [int(n) for n in batch["end_indices"][i].split("_")]
+ for idr_idx in range(len(start_indices)):
+ mask[i, start_indices[idr_idx] : end_indices[idr_idx]] = 0
+
+ idr_residue_embeddings = esm_embedded * mask.unsqueeze(-1).to(
+ esm_embedded.device
+ )
+ idr_embeddings = []
+ for idx, sample in enumerate(idr_residue_embeddings):
+ avg_sample = sample.sum(0) / mask[idx].sum()
+ idr_embeddings.append(avg_sample)
+
+ output.update(self.mlp({"x": torch.stack(idr_embeddings)}))
+ return output
+
+
+@register_object("all_not_idr_encoder", "model")
+class AllNotIDREncoder(AllIDREncoder):
+ def _forward_function(self, batch) -> list:
+ output_esm = self.encoder(batch["x"])
+
+ # mask out non-idr residues and average
+ B, N, H = output_esm["residues"].shape
+ mask = torch.ones(B, N)
+
+ for i in range(B):
+ start_indices = [int(n) for n in batch["start_indices"][i].split("_")]
+ end_indices = [int(n) for n in batch["end_indices"][i].split("_")]
+ for idr_idx in range(len(start_indices)):
+ mask[i, start_indices[idr_idx] : end_indices[idr_idx]] = 0
+
+ idr_residue_embeddings = output_esm["residues"] * mask.unsqueeze(-1).to(
+ output_esm["residues"].device
+ )
+ idr_embeddings = []
+ for idx, sample in enumerate(idr_residue_embeddings):
+ avg_sample = sample.sum(0) / mask[idx].sum()
+ idr_embeddings.append(avg_sample)
+
+ return idr_embeddings
+
+
+@register_object("context_idr_hiddens", "model")
+class ContextIDREncoder(ProteinEncoder):
+ def forward(self, batch):
+ output = {}
+
+ if self.args.freeze_encoder:
+ with torch.no_grad():
+ idr_embeddings = self._forward_function(batch)
+ else:
+ idr_embeddings
+
+ output["hidden"] = torch.stack(idr_embeddings)
+ return output
+
+ def _forward_function(self, batch) -> list:
+ output_esm = self.encoder(batch["x"])
+ # mask out non-idr residues and average
+ B, N, H = output_esm["residues"].shape
+ mask = torch.zeros(B, N)
+ for i in range(B):
+ mask[i, batch["start_idx"][i] : batch["end_idx"][i]] = 1
+
+ idr_residue_embeddings = output_esm["residues"] * mask.unsqueeze(-1).to(
+ output_esm["residues"].device
+ )
+ idr_embeddings = []
+ for idx, sample in enumerate(idr_residue_embeddings):
+ avg_sample = sample.sum(0) / mask[idx].sum()
+ idr_embeddings.append(avg_sample)
+
+ return idr_embeddings
+
+ @staticmethod
+ def set_args(args) -> None:
+ args.output_residue_hiddens = True
+
+
+@register_object("fair_esm_hiddens", "model")
+class FairEsmHiddens(AbstractModel):
+ def __init__(self, args):
+ super(FairEsmHiddens, self).__init__()
+ self.args = args
+ self.encoder = get_object(args.fair_esm_type, "model")(args)
+ if self.args.freeze_esm:
+ self.encoder.eval()
+
+ def forward(self, batch):
+ output = {}
+ if self.args.freeze_esm:
+ with torch.no_grad():
+ output_esm = self.encoder(batch["x"])
+ else:
+ output_esm = self.encoder(batch["x"])
+
+ return output_esm
+
+ @staticmethod
+ def add_args(parser) -> None:
+ """Add class specific args
+
+ Args:
+ parser (argparse.ArgumentParser): argument parser
+ """
+ parser.add_argument(
+ "--fair_esm_type",
+ type=str,
+ default="fair_esm2",
+ help="name of the protein encoder",
+ action=set_protgps_type("model"),
+ )
diff --git a/data/protgps/utils/__init__.py b/data/protgps/utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/data/protgps/utils/callbacks.py b/data/protgps/utils/callbacks.py
new file mode 100644
index 0000000000000000000000000000000000000000..efdc678bb57e0939990b818e28ab84ca6667fb4d
--- /dev/null
+++ b/data/protgps/utils/callbacks.py
@@ -0,0 +1,57 @@
+from pytorch_lightning.callbacks import Callback
+from protgps.utils.registry import get_object
+
+
+def set_callbacks(trainer, args):
+ """
+ Set callbacks for trainer, taking into consideration callbacks already set by trainer args.
+ Callbacks that are preset by args and perform the same function as those manually selected
+ are removed by comparing parent classes between callbacks.
+
+ Parameters
+ ----------
+ trainer : pl.Trainer
+ lightning trainer
+ args : Namespace
+ global args
+
+ Returns
+ -------
+ callbacks: list
+ complete list of callbacks to be used by trainer
+ """
+ callbacks = []
+ for cback in args.callback_names:
+ callbacks.append(get_object(cback, "callback")(args))
+
+ # remove callbacks that are set manually
+ redundant_callbacks = []
+ for cback in trainer.callbacks:
+ parent_cls_preset = get_callback_parent_class(cback)
+ for new_cback in callbacks:
+ parent_cls_postset = get_callback_parent_class(new_cback)
+ if parent_cls_preset == parent_cls_postset:
+ redundant_callbacks.append(cback)
+
+ for cback in trainer.callbacks:
+ if cback not in redundant_callbacks:
+ callbacks.append(cback)
+
+ return callbacks
+
+
+def get_callback_parent_class(obj):
+ """
+ Parameters
+ ----------
+ obj : Callback
+ instance of a callback class
+
+ Returns
+ -------
+ class
+ parent class of callback that is the first child of the Callback class
+ """
+ parent_id = [cls == Callback for cls in obj.__class__.__mro__].index(True)
+ parent_cls = obj.__class__.__mro__[parent_id - 1]
+ return parent_cls
diff --git a/data/protgps/utils/classes.py b/data/protgps/utils/classes.py
new file mode 100644
index 0000000000000000000000000000000000000000..f30c976511fdbaf5a62e814e6e55e22115e60973
--- /dev/null
+++ b/data/protgps/utils/classes.py
@@ -0,0 +1,126 @@
+from abc import ABCMeta
+import argparse
+from protgps.utils.registry import get_object
+
+INITED_OBJ = []
+
+
+class classproperty(object):
+ """
+ Method decorator behaves as @classmethod + @property
+ """
+
+ def __init__(self, fget):
+ self.fget = fget
+
+ def __get__(self, owner_self, owner_cls):
+ return self.fget(owner_cls)
+
+
+class ProtGPS(object):
+ __metaclass__ = ABCMeta
+
+ def __init__(self, **kwargs) -> None:
+ super(ProtGPS, self).__init__()
+
+ @staticmethod
+ def add_args(parser) -> None:
+ """Add class specific args
+
+ Args:
+ parser (argparse.ArgumentParser): argument parser
+ """
+ pass
+
+ @staticmethod
+ def set_args(args) -> None:
+ """Set values for class specific args
+
+ Args:
+ args (argparse.Namespace): arguments
+ """
+ pass
+
+
+def set_protgps_type(object_name):
+ """
+ Build argparse action class for registry items
+ Used to add and set object-level args
+
+ Args:
+ object_name (str): kind of protgps class uses (e.g., dataset, model, lightning)
+
+ Returns:
+ argparse.Action: action for specific protgps class
+ """
+
+ class ProtGPSAction(argparse.Action):
+ def __init__(
+ self,
+ option_strings,
+ dest,
+ nargs=None,
+ const=None,
+ default=None,
+ type=None,
+ choices=None,
+ required=False,
+ help=None,
+ metavar=None,
+ ):
+ super().__init__(
+ option_strings=option_strings,
+ dest=dest,
+ nargs=nargs,
+ const=const,
+ default=default,
+ type=type,
+ choices=choices,
+ required=required,
+ help=help,
+ metavar=metavar,
+ )
+ self.is_protgps_action = True
+ self.object_name = object_name
+
+ def __call__(self, parser, namespace, values, option_string=None) -> None:
+ setattr(namespace, self.dest, values)
+
+ def add_args(self, parser, values) -> None:
+ """
+ Add object-level args when an add_argument is called
+
+ Args:
+ parser (argparse.parser): protgps parser object
+ values (Union[list, str]): argument values inputted
+ """
+ if isinstance(values, list):
+ for v in values:
+ obj_val_str = f"{v}_{object_name}"
+ # if object has already been called, conflict arises with add parse called multiple times
+ if obj_val_str not in INITED_OBJ:
+ get_object(v, object_name).add_args(parser)
+ INITED_OBJ.append(obj_val_str)
+
+ elif isinstance(values, str):
+ obj_val_str = f"{values}_{object_name}"
+ # if object has already been called, conflict arises with add parse called multiple times
+ if obj_val_str not in INITED_OBJ:
+ get_object(values, object_name).add_args(parser)
+ INITED_OBJ.append(obj_val_str)
+
+ def set_args(self, args, val) -> None:
+ """
+ Call object-level set_args method
+
+ Args:
+ args (argparse.namespace): global args
+ val (Union[list,str]): value for argument
+ """
+ if isinstance(val, list):
+ for v in val:
+ get_object(v, object_name).set_args(args)
+ elif isinstance(val, str):
+ get_object(val, object_name).set_args(args)
+
+ return ProtGPSAction
diff --git a/data/protgps/utils/debug.py b/data/protgps/utils/debug.py
new file mode 100644
index 0000000000000000000000000000000000000000..1bb6e8acfe5aa88a7752452fc91c898bf3e1f12c
--- /dev/null
+++ b/data/protgps/utils/debug.py
@@ -0,0 +1,13 @@
+def debug_vscode():
+ """
+ Since this requires listening on a local port, this will only work when VSCode is connected to the same machine.
+ Before running, add the following to your launch.json:
+ {"version":"0.2.0","configurations":[{"name":"Python: Remote Attach","type":"python","request":"attach","connect":{"host":"localhost","port":5678},"pathMappings":[{"localRoot":"${workspaceFolder}",
+ "remoteRoot":"."}],"justMyCode":true}]}
+ """
+ import debugpy
+
+ print("Waiting for VSCode debugger to attach...")
+ debugpy.listen(5678)
+ debugpy.wait_for_client()
+ print("VSCode debugger attached!")
diff --git a/data/protgps/utils/download.py b/data/protgps/utils/download.py
new file mode 100644
index 0000000000000000000000000000000000000000..b54d5760a6477c0a95daf3fc4b17f288368d7c27
--- /dev/null
+++ b/data/protgps/utils/download.py
@@ -0,0 +1,37 @@
+# Code taken from
+# https://stackoverflow.com/questions/38511444/python-download-files-from-google-drive-using-url
+
+
+import requests
+
+
+def download_file_from_google_drive(id, destination):
+ URL = "https://docs.google.com/uc?export=download"
+
+ session = requests.Session()
+
+ response = session.get(URL, params={"id": id}, stream=True)
+ token = get_confirm_token(response)
+
+ if token:
+ params = {"id": id, "confirm": token}
+ response = session.get(URL, params=params, stream=True)
+
+ save_response_content(response, destination)
+
+
+def get_confirm_token(response):
+ for key, value in response.cookies.items():
+ if key.startswith("download_warning"):
+ return value
+
+ return None
+
+
+def save_response_content(response, destination):
+ CHUNK_SIZE = 32768
+
+ with open(destination, "wb") as f:
+ for chunk in response.iter_content(CHUNK_SIZE):
+ if chunk: # filter out keep-alive new chunks
+ f.write(chunk)
diff --git a/data/protgps/utils/loading.py b/data/protgps/utils/loading.py
new file mode 100644
index 0000000000000000000000000000000000000000..55f54fb28274c6b88705114e83993eac544927d8
--- /dev/null
+++ b/data/protgps/utils/loading.py
@@ -0,0 +1,235 @@
+from argparse import Namespace, FileType
+import pickle
+import collections.abc as container_abcs
+import re
+from tabnanny import check
+from typing import Literal, Optional
+from protgps.utils.registry import get_object
+import torch
+from torch.utils import data
+from protgps.utils.sampler import DistributedWeightedSampler
+from pytorch_lightning.utilities.cloud_io import load as pl_load
+
+
+string_classes = (str, bytes)
+int_classes = int
+np_str_obj_array_pattern = re.compile(r"[SaUO]")
+
+default_collate_err_msg_format = (
+ "default_collate: batch must contain tensors, numpy arrays, numbers, PyG Data or HeteroData, "
+ "dicts, or lists; found {}"
+)
+
+
+def default_collate(batch):
+ r"""Puts each data field into a tensor with outer dimension batch size"""
+
+ elem = batch[0]
+ elem_type = type(elem)
+ if isinstance(elem, torch.Tensor):
+ out = None
+ if torch.utils.data.get_worker_info() is not None:
+ # If we're in a background process, concatenate directly into a
+ # shared memory tensor to avoid an extra copy
+ numel = sum([x.numel() for x in batch])
+ storage = elem._typed_storage()._new_shared(numel, device=elem.device)
+ out = elem.new(storage).view(-1, *list(elem.size()))
+ return torch.stack(batch, 0, out=out)
+ elif (
+ elem_type.__module__ == "numpy"
+ and elem_type.__name__ != "str_"
+ and elem_type.__name__ != "string_"
+ ):
+ if elem_type.__name__ == "ndarray" or elem_type.__name__ == "memmap":
+ # array of string classes and object
+ if np_str_obj_array_pattern.search(elem.dtype.str) is not None:
+ raise TypeError(default_collate_err_msg_format.format(elem.dtype))
+
+ return default_collate([torch.as_tensor(b) for b in batch])
+ elif elem.shape == (): # scalars
+ return torch.as_tensor(batch)
+ elif isinstance(elem, float):
+ return torch.tensor(batch, dtype=torch.float64)
+ elif isinstance(elem, int_classes):
+ return torch.tensor(batch)
+ elif isinstance(elem, string_classes):
+ return batch
+ elif isinstance(elem, container_abcs.Mapping):
+ return {key: default_collate([d[key] for d in batch]) for key in elem}
+ elif isinstance(elem, tuple) and hasattr(elem, "_fields"): # namedtuple
+ return elem_type(*(default_collate(samples) for samples in zip(*batch)))
+ elif isinstance(elem, container_abcs.Sequence):
+ # check to make sure that the elements in batch have consistent size
+ it = iter(batch)
+ elem_size = len(next(it))
+ if not all(len(elem) == elem_size for elem in it):
+ raise RuntimeError("each element in list of batch should be of equal size")
+ transposed = zip(*batch)
+ return [default_collate(samples) for samples in transposed]
+
+ raise TypeError(default_collate_err_msg_format.format(elem_type))
+
+
+def ignore_None_collate(batch):
+ """
+ default_collate wrapper that creates batches only of not None values.
+ Useful for cases when the dataset.__getitem__ can return None because of some
+ exception and then we will want to exclude that sample from the batch.
+ """
+ batch = [x for x in batch if x is not None]
+ if len(batch) == 0:
+ return None
+ return default_collate(batch)
+
+
+def get_train_dataset_loader(args: Namespace, split: Optional[str] = "train"):
+ """Given arg configuration, return appropriate torch.DataLoader
+ for train data loader
+
+ Args:
+ args (Namespace): args
+ split (str, optional): dataset split. Defaults to "train".
+
+ Returns:
+ train_data_loader: iterator that returns batches
+ """
+ train_data = get_object(args.dataset_name, "dataset")(args, split)
+
+ if args.class_bal:
+ if args.strategy == "ddp":
+ sampler = DistributedWeightedSampler(
+ train_data,
+ weights=train_data.weights,
+ replacement=True,
+ rank=args.global_rank,
+ num_replicas=args.world_size,
+ )
+ else:
+ sampler = data.sampler.WeightedRandomSampler(
+ weights=train_data.weights,
+ num_samples=len(train_data),
+ replacement=True,
+ )
+ else:
+ if args.strategy == "ddp":
+ sampler = torch.utils.data.distributed.DistributedSampler(
+ train_data,
+ shuffle=True,
+ rank=args.global_rank,
+ num_replicas=args.world_size,
+ )
+ else:
+ sampler = data.sampler.RandomSampler(train_data)
+
+ train_data_loader = data.DataLoader(
+ train_data,
+ num_workers=args.num_workers,
+ sampler=sampler,
+ pin_memory=True,
+ batch_size=args.batch_size,
+ collate_fn=ignore_None_collate,
+ drop_last=True,
+ )
+
+ return train_data_loader
+
+
+def get_eval_dataset_loader(
+ args: Namespace, split: Literal["train", "dev", "test"], shuffle=False
+):
+ """_summary_
+
+ Args:
+ args (Namespace): args
+ split (Literal["train", "dev", "test"]): dataset split.
+ shuffle (bool, optional): whether to shuffle dataset. Defaults to False.
+
+ Returns:
+ data_loader: iterator that returns batches
+ """
+
+ eval_data = get_object(args.dataset_name, "dataset")(args, split)
+
+ if args.strategy == "ddp":
+ sampler = torch.utils.data.distributed.DistributedSampler(
+ eval_data,
+ shuffle=shuffle,
+ rank=args.global_rank,
+ num_replicas=args.world_size,
+ )
+ else:
+ sampler = (
+ torch.utils.data.sampler.RandomSampler(eval_data)
+ if shuffle
+ else torch.utils.data.sampler.SequentialSampler(eval_data)
+ )
+ data_loader = torch.utils.data.DataLoader(
+ eval_data,
+ batch_size=args.batch_size,
+ num_workers=args.num_workers,
+ collate_fn=ignore_None_collate,
+ pin_memory=True,
+ drop_last=False,
+ sampler=sampler,
+ )
+
+ return data_loader
+
+
+@torch.no_grad()
+def concat_all_gather(tensor):
+ """
+ Performs all_gather operation on the provided tensors.
+ *** Warning ***: torch.distributed.all_gather has no gradient.
+ """
+
+ tensors_gather = [
+ torch.ones_like(tensor) for _ in range(torch.distributed.get_world_size())
+ ]
+ torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
+ output = torch.cat(tensors_gather, dim=0)
+ return output
+
+
+def get_lightning_model(args: Namespace):
+ """Create new model or load from checkpoint
+
+ Args:
+ args (Namespace): global args
+
+ Raises:
+ FileType: checkpoint_path must be ".args" or ".ckpt" file
+
+ Returns:
+ model: pl.LightningModule instance
+ """
+ if args.from_checkpoint:
+ if args.checkpoint_path.endswith(".args"):
+ snargs = Namespace(**pickle.load(open(args.checkpoint_path, "rb")))
+ # update saved args with new arguments
+ for k, v in vars(args).items():
+ if k not in snargs:
+ setattr(snargs, k, v)
+ model = get_object(snargs.lightning_name, "lightning")(snargs)
+ modelpath = snargs.model_path
+ elif args.checkpoint_path.endswith(".ckpt"):
+ model = get_object(args.lightning_name, "lightning")(args)
+ modelpath = args.checkpoint_path
+ checkpoint = pl_load(
+ args.checkpoint_path, map_location=lambda storage, loc: storage
+ )
+ snargs = checkpoint["hyper_parameters"]["args"]
+ else:
+ raise FileType("checkpoint_path should be an args or ckpt file.")
+ # update args with old args if not found
+ for k, v in vars(snargs).items():
+ if k not in args:
+ setattr(args, k, v)
+ model = model.load_from_checkpoint(
+ checkpoint_path=modelpath,
+ strict=not args.relax_checkpoint_matching,
+ **{"args": args},
+ )
+ else:
+ model = get_object(args.lightning_name, "lightning")(args)
+ return model
diff --git a/data/protgps/utils/messages.py b/data/protgps/utils/messages.py
new file mode 100644
index 0000000000000000000000000000000000000000..14c9ddc0abe77c0cd7eaa09c173de41ca6256700
--- /dev/null
+++ b/data/protgps/utils/messages.py
@@ -0,0 +1,3 @@
+# Error Messages
+METAFILE_NOTFOUND_ERR = "Metadata file {} could not be parsed! Exception: {}!"
+LOAD_FAIL_MSG = "Failed to load image: {}\nException: {}"
diff --git a/data/protgps/utils/parsing.py b/data/protgps/utils/parsing.py
new file mode 100644
index 0000000000000000000000000000000000000000..3fd78428a184aa749c501fba3ca24354a0d37dbe
--- /dev/null
+++ b/data/protgps/utils/parsing.py
@@ -0,0 +1,597 @@
+import argparse
+import os
+import pwd
+from pytorch_lightning import Trainer
+import itertools
+from protgps.utils.registry import md5
+import json
+import copy
+from protgps.utils.classes import set_protgps_type
+
+EMPTY_NAME_ERR = 'Name of augmentation or one of its arguments cant be empty\n\
+ Use "name/arg1=value/arg2=value" format'
+POSS_VAL_NOT_LIST = (
+ "Flag {} has an invalid list of values: {}. Length of list must be >=1"
+)
+
+
+class GlobalNamespace(argparse.Namespace):
+ pass
+
+
+def parse_dispatcher_config(config):
+ """
+ Parses an experiment config, and creates jobs. For flags that are expected to be a single item,
+ but the config contains a list, this will return one job for each item in the list.
+ :config - experiment_config
+
+ returns: jobs - a list of flag strings, each of which encapsulates one job.
+ *Example: --train --cuda --dropout=0.1 ...
+ returns: experiment_axies - axies that the grid search is searching over
+ """
+
+ assert all(
+ [
+ k
+ in [
+ "script",
+ "available_gpus",
+ "cartesian_hyperparams",
+ "paired_hyperparams",
+ "tune_hyperparams",
+ ]
+ for k in config.keys()
+ ]
+ )
+
+ cartesian_hyperparamss = config["cartesian_hyperparams"]
+ paired_hyperparams = config.get("paired_hyperparams", [])
+ flags = []
+ arguments = []
+ experiment_axies = []
+
+ # add anything outside search space as fixed
+ fixed_args = ""
+ for arg in config:
+ if arg not in [
+ "script",
+ "cartesian_hyperparams",
+ "paired_hyperparams",
+ "available_gpus",
+ ]:
+ if type(config[arg]) is bool:
+ if config[arg]:
+ fixed_args += "--{} ".format(str(arg))
+ else:
+ continue
+ else:
+ fixed_args += "--{} {} ".format(arg, config[arg])
+
+ # add paired combo of search space
+ paired_args_list = [""]
+ if len(paired_hyperparams) > 0:
+ paired_args_list = []
+ paired_keys = list(paired_hyperparams.keys())
+ paired_vals = list(paired_hyperparams.values())
+ flags.extend(paired_keys)
+ for paired_combo in zip(*paired_vals):
+ paired_args = ""
+ for i, flg_value in enumerate(paired_combo):
+ if type(flg_value) is bool:
+ if flg_value:
+ paired_args += "--{} ".format(str(paired_keys[i]))
+ else:
+ continue
+ else:
+ paired_args += "--{} {} ".format(
+ str(paired_keys[i]), str(flg_value)
+ )
+ paired_args_list.append(paired_args)
+
+ # add every combo of search space
+ product_flags = []
+ for key, value in cartesian_hyperparamss.items():
+ flags.append(key)
+ product_flags.append(key)
+ arguments.append(value)
+ if len(value) > 1:
+ experiment_axies.append(key)
+
+ experiments = []
+ exps_combs = list(itertools.product(*arguments))
+
+ for tpl in exps_combs:
+ exp = ""
+ for idx, flg in enumerate(product_flags):
+ if type(tpl[idx]) is bool:
+ if tpl[idx]:
+ exp += "--{} ".format(str(flg))
+ else:
+ continue
+ else:
+ exp += "--{} {} ".format(str(flg), str(tpl[idx]))
+ exp += fixed_args
+ for paired_args in paired_args_list:
+ experiments.append(exp + paired_args)
+
+ return experiments, flags, experiment_axies
+
+
+def prepare_training_config_for_eval(train_config):
+ """Convert training config to an eval config for testing.
+
+ Parameters
+ ----------
+ train_config: dict
+ config with the following structure:
+ {
+ "train_config": , # path to train config
+ "log_dir": , # log directory used by dispatcher during training
+ "eval_args": {} # test set-specific arguments beyond default
+ }
+
+ Returns
+ -------
+ experiments: list
+ flags: list
+ experiment_axies: list
+ """
+
+ train_args = json.load(open(train_config["train_config"], "r"))
+
+ experiments, _, _ = parse_dispatcher_config(train_args)
+ stem_names = [md5(e) for e in experiments]
+ eval_args = copy.deepcopy(train_args)
+ eval_args["cartesian_hyperparams"].update(train_config["eval_args"])
+
+ # reset defaults
+ eval_args["cartesian_hyperparams"]["train"] = [False]
+ eval_args["cartesian_hyperparams"]["test"] = [True]
+ eval_args["cartesian_hyperparams"]["from_checkpoint"] = [True]
+ eval_args["cartesian_hyperparams"]["gpus"] = [1]
+ # eval_args["cartesian_hyperparams"]["comet_tags"][0] += " eval"
+ eval_args["available_gpus"] = train_config["available_gpus"]
+ eval_args["script"] = train_config["script"]
+
+ experiments, flags, experiment_axies = parse_dispatcher_config(eval_args)
+
+ if ("checkpoint_path" not in eval_args["cartesian_hyperparams"]) or (
+ "checkpoint_path" in train_args["cartesian_hyperparams"]
+ ):
+ for (idx, e), s in zip(enumerate(experiments), stem_names):
+ experiments[idx] += " --checkpoint_path {}".format(
+ os.path.join(train_config["log_dir"], "{}.args".format(s))
+ )
+
+ return experiments, flags, experiment_axies
+
+
+def get_parser():
+ global_namespace = GlobalNamespace(allow_abbrev=False)
+
+ parser = argparse.ArgumentParser(
+ description="ProtGPS Standard Args.", allow_abbrev=False
+ )
+
+ # -------------------------------------
+ # Run Setup
+ # -------------------------------------
+ parser.add_argument(
+ "--train",
+ action="store_true",
+ default=False,
+ help="Whether or not to train model",
+ )
+ parser.add_argument(
+ "--dev",
+ action="store_true",
+ default=False,
+ help="Whether or not to run model on dev set",
+ )
+ parser.add_argument(
+ "--test",
+ action="store_true",
+ default=False,
+ help="Whether or not to run model on test set",
+ )
+ parser.add_argument(
+ "--predict",
+ action="store_true",
+ default=False,
+ help="Whether to run model for pure prediction where labels are not known",
+ )
+ parser.add_argument(
+ "--eval_on_train",
+ action="store_true",
+ default=False,
+ help="Whether or not to evaluate model on train split",
+ )
+
+ # -------------------------------------
+ # Data
+ # -------------------------------------
+ parser.add_argument(
+ "--dataset_name",
+ type=str,
+ action=set_protgps_type("dataset"),
+ default="mnist",
+ help="Name of dataset",
+ )
+ parser.add_argument(
+ "--img_size",
+ type=int,
+ nargs="+",
+ default=[256, 256],
+ help="Width and height of image in pixels. [default: [256,256]]",
+ )
+ parser.add_argument(
+ "--num_chan", type=int, default=3, help="Number of channels for input image"
+ )
+ parser.add_argument(
+ "--img_mean",
+ type=float,
+ nargs="+",
+ default=[128.1722],
+ help="Mean of image per channel",
+ )
+ parser.add_argument(
+ "--img_std",
+ type=float,
+ nargs="+",
+ default=[87.1849],
+ help="Standard deviation of image per channel",
+ )
+ parser.add_argument(
+ "--img_file_type",
+ type=str,
+ default="png",
+ choices=["png", "dicom"],
+ help="Type of image. one of [png, dicom]",
+ )
+
+ # -------------------------------------
+ # Losses
+ # -------------------------------------
+
+ # losses and metrics
+ parser.add_argument(
+ "--loss_names",
+ type=str,
+ action=set_protgps_type("loss"),
+ nargs="*",
+ default=[],
+ help="Name of loss",
+ )
+ parser.add_argument(
+ "--loss_names_for_eval",
+ type=str,
+ action=set_protgps_type("loss"),
+ nargs="*",
+ default=None,
+ help="Name of loss",
+ )
+
+ # -------------------------------------
+ # Metrics
+ # -------------------------------------
+
+ parser.add_argument(
+ "--metric_names",
+ type=str,
+ action=set_protgps_type("metric"),
+ nargs="*",
+ default=[],
+ help="Name of performance metric",
+ )
+
+ # -------------------------------------
+ # Training Module
+ # -------------------------------------
+
+ parser.add_argument(
+ "--lightning_name",
+ type=str,
+ action=set_protgps_type("lightning"),
+ default="base",
+ help="Name of lightning module",
+ )
+
+ # -------------------------------------
+ # Hyper parameters
+ # -------------------------------------
+ # learning
+ parser.add_argument(
+ "--batch_size",
+ type=int,
+ default=32,
+ help="Batch size for training [default: 128]",
+ )
+ parser.add_argument(
+ "--lr",
+ type=float,
+ default=0.001,
+ help="Initial learning rate [default: 0.001]",
+ )
+ parser.add_argument(
+ "--dropout",
+ type=float,
+ default=0.25,
+ help="Amount of dropout to apply on last hidden layer [default: 0.25]",
+ )
+ parser.add_argument(
+ "--optimizer_name",
+ type=str,
+ action=set_protgps_type("optimizer"),
+ default="adam",
+ help="Optimizer to use [default: adam]",
+ )
+ parser.add_argument(
+ "--momentum", type=float, default=0, help="Momentum to use with SGD"
+ )
+ parser.add_argument(
+ "--lr_decay",
+ type=float,
+ default=0.1,
+ help="Initial learning rate [default: 0.5]",
+ )
+ parser.add_argument(
+ "--weight_decay",
+ type=float,
+ default=0,
+ help="L2 Regularization penaty [default: 0]",
+ )
+
+ # tune
+ parser.add_argument(
+ "--tune_hyperopt",
+ action="store_true",
+ default=False,
+ help="Whether to run hyper-parameter optimization",
+ )
+ parser.add_argument(
+ "--tune_search_alg",
+ type=str,
+ default="search",
+ help="Optimization algorithm",
+ )
+ parser.add_argument(
+ "--tune_hyperparam_names",
+ type=str,
+ nargs="*",
+ default=[],
+ help="Name of parameters being optimized",
+ )
+
+ # -------------------------------------
+ # Schedule
+ # -------------------------------------
+ parser.add_argument(
+ "--scheduler_name",
+ type=str,
+ action=set_protgps_type("scheduler"),
+ default="reduce_on_plateau",
+ help="Name of scheduler",
+ )
+ parser.add_argument(
+ "--cosine_annealing_period",
+ type=int,
+ default=10,
+ help="length of period of lr cosine anneal",
+ )
+ parser.add_argument(
+ "--cosine_annealing_period_scaling",
+ type=int,
+ default=2,
+ help="how much to multiply each period in successive annealing",
+ )
+ parser.add_argument(
+ "--patience",
+ type=int,
+ default=5,
+ help="Number of epochs without improvement on dev before halving learning rate and reloading best model [default: 5]",
+ )
+ parser.add_argument(
+ "--num_adv_steps",
+ type=int,
+ default=1,
+ help="Number of steps for domain adaptation discriminator per one step of encoding model [default: 5]",
+ )
+
+ # -------------------------------------
+ # Callbacks
+ # -------------------------------------
+
+ parser.add_argument(
+ "--callback_names",
+ type=str,
+ action=set_protgps_type("callback"),
+ nargs="*",
+ default=["checkpointer", "lr_monitor"],
+ help="Lightning callbacks",
+ )
+
+ parser.add_argument(
+ "--monitor",
+ type=str,
+ default=None,
+ help="Name of metric to use to decide when to save model",
+ )
+
+ parser.add_argument(
+ "--checkpoint_save_top_k",
+ type=int,
+ default=1,
+ help="the best k models according to the quantity monitored will be saved",
+ )
+ parser.add_argument(
+ "--checkpoint_save_last",
+ action="store_true",
+ default=False,
+ help="save the last model to last.ckpt",
+ )
+
+ # -------------------------------------
+ # Model checkpointing
+ # -------------------------------------
+
+ parser.add_argument(
+ "--checkpoint_dir", type=str, default="snapshot", help="Where to dump the model"
+ )
+ parser.add_argument(
+ "--from_checkpoint",
+ action="store_true",
+ default=False,
+ help="Whether loading a model from a saved checkpoint",
+ )
+ parser.add_argument(
+ "--relax_checkpoint_matching",
+ action="store_true",
+ default=False,
+ help="Do not enforce that the keys in checkpoint_path match the keys returned by this module’s state dict",
+ )
+ parser.add_argument(
+ "--checkpoint_path",
+ type=str,
+ default=None,
+ help="Filename of model snapshot to load[default: None]",
+ )
+
+ # -------------------------------------
+ # Storing model outputs
+ # -------------------------------------
+ parser.add_argument(
+ "--save_hiddens",
+ action="store_true",
+ default=False,
+ help="Save hidden repr from each image to an npz based off results path, git hash and exam name",
+ )
+ parser.add_argument(
+ "--save_predictions",
+ action="store_true",
+ default=False,
+ help="Save hidden repr from each image to an npz based off results path, git hash and exam name",
+ )
+ parser.add_argument(
+ "--inference_dir",
+ type=str,
+ default="hiddens/test_run",
+ help='Dir to store hiddens npy"s when store_hiddens is true',
+ )
+
+ # -------------------------------------
+ # Run outputs
+ # -------------------------------------
+ parser.add_argument(
+ "--results_path",
+ type=str,
+ default="logs/test.args",
+ help="Where to save the result logs",
+ )
+ parser.add_argument(
+ "--experiment_name",
+ type=str,
+ help="defined either automatically by dispatcher.py or time in main.py. Keep without default",
+ )
+
+ # -------------------------------------
+ # System
+ # -------------------------------------
+ parser.add_argument(
+ "--num_workers",
+ type=int,
+ default=8,
+ help="Num workers for each data loader [default: 4]",
+ )
+
+ # cache
+ parser.add_argument(
+ "--cache_path", type=str, default=None, help="Dir to cache images."
+ )
+
+ # -------------------------------------
+ # Logging
+ # -------------------------------------
+
+ parser.add_argument(
+ "--logger_name",
+ type=str,
+ action=set_protgps_type("logger"),
+ choices=["tensorboard", "comet", "wandb"],
+ default="tensorboard",
+ help="experiment logger to use",
+ )
+ parser.add_argument(
+ "--logger_tags", nargs="*", default=[], help="List of tags for logger"
+ )
+ parser.add_argument("--project_name", default="CancerCures", help="Comet project")
+ parser.add_argument("--workspace", default="pgmikhael", help="Comet workspace")
+ parser.add_argument(
+ "--log_gen_image",
+ action="store_true",
+ default=False,
+ help="Whether to log sample generated image to comet",
+ )
+ parser.add_argument(
+ "--log_profiler",
+ action="store_true",
+ default=False,
+ help="Log profiler times to logger",
+ )
+
+ # -------------------------------------
+ # Add object-level args
+ # -------------------------------------
+
+ def add_class_args(args_as_dict, parser):
+ # for loop
+ for argname, argval in args_as_dict.items():
+ args_for_protgpss = {
+ a.dest: a for a in parser._actions if hasattr(a, "is_protgps_action")
+ }
+ old_args = vars(parser.parse_known_args()[0])
+ if argname in args_for_protgpss:
+ args_for_protgpss[argname].add_args(parser, argval)
+ newargs = vars(parser.parse_known_args()[0])
+ newargs = {k: v for k, v in newargs.items() if k not in old_args}
+ add_class_args(newargs, parser)
+
+ parser.parse_known_args(namespace=global_namespace)
+ add_class_args(vars(global_namespace), parser)
+
+ return parser
+
+
+def parse_args(args_strings=None):
+ # run
+ parser = Trainer.add_argparse_args(get_parser())
+ if args_strings is None:
+ args = parser.parse_args()
+ else:
+ args = parser.parse_args(args_strings)
+
+ # using gpus
+ if (isinstance(args.gpus, str) and len(args.gpus.split(",")) > 1) or (
+ isinstance(args.gpus, int) and args.gpus > 1
+ ):
+ args.strategy = "ddp"
+ args.replace_sampler_ddp = False
+ else:
+ args.strategy = None
+ args.replace_sampler_ddp = False
+
+ # username
+ args.unix_username = pwd.getpwuid(os.getuid())[0]
+
+ # learning initial state
+ args.step_indx = 1
+
+ # set args
+ args_for_protgpss = {a.dest: a for a in parser._actions if hasattr(a, "is_protgps_action")}
+ for argname, argval in vars(args).items():
+ if argname in args_for_protgpss:
+ args_for_protgpss[argname].set_args(args, argval)
+
+ # parse tune parameters
+ # args = parse_tune_params(args)
+
+ return args
diff --git a/data/protgps/utils/registry.py b/data/protgps/utils/registry.py
new file mode 100644
index 0000000000000000000000000000000000000000..84e02755e6e1b3382229ef7a9b984cdb0914c163
--- /dev/null
+++ b/data/protgps/utils/registry.py
@@ -0,0 +1,44 @@
+import hashlib
+
+REGISTRIES = {
+ "LIGHTNING_REGISTRY": {},
+ "DATASET_REGISTRY": {},
+ "MODEL_REGISTRY": {},
+ "LOSS_REGISTRY": {},
+ "METRIC_REGISTRY": {},
+ "OPTIMIZER_REGISTRY": {},
+ "SCHEDULER_REGISTRY": {},
+ "SEARCHER_REGISTRY": {},
+ "CALLBACK_REGISTRY": {},
+ "INPUT_LOADER_REGISTRY": {},
+ "AUGMENTATION_REGISTRY": {},
+ "LOGGER_REGISTRY": {},
+}
+
+
+def get_object(object_name, object_type):
+ if object_name not in REGISTRIES["{}_REGISTRY".format(object_type.upper())]:
+ raise Exception(
+ "INVALID {} NAME: {}. AVAILABLE {}".format(
+ object_type.upper(),
+ object_name,
+ REGISTRIES["{}_REGISTRY".format(object_type.upper())].keys(),
+ )
+ )
+ return REGISTRIES["{}_REGISTRY".format(object_type.upper())][object_name]
+
+
+def register_object(object_name, object_type):
+ def decorator(obj):
+ REGISTRIES["{}_REGISTRY".format(object_type.upper())][object_name] = obj
+ obj.name = object_name
+ return obj
+
+ return decorator
+
+
+def md5(key):
+ """
+ returns a hashed with md5 string of the key
+ """
+ return hashlib.md5(key.encode()).hexdigest()
diff --git a/data/protgps/utils/sampler.py b/data/protgps/utils/sampler.py
new file mode 100644
index 0000000000000000000000000000000000000000..94e521df115f21a5b96968917b93dd24c4368756
--- /dev/null
+++ b/data/protgps/utils/sampler.py
@@ -0,0 +1,100 @@
+import math
+from typing import TypeVar, Optional, Iterator, Sequence
+
+import torch
+from torch.utils.data import Dataset
+import torch.distributed as dist
+
+
+T_co = TypeVar('T_co', covariant=True)
+
+
+class DistributedWeightedSampler(torch.utils.data.distributed.DistributedSampler):
+ r"""Extension of pytorch's native distributed sampler, but supports weighted sampling
+ .. note::
+ Dataset is assumed to be of constant size.
+ Arguments:
+ dataset: Dataset used for sampling.
+ num_replicas (int, optional): Number of processes participating in
+ distributed training. By default, :attr:`rank` is retrieved from the
+ current distributed group.
+ rank (int, optional): Rank of the current process within :attr:`num_replicas`.
+ By default, :attr:`rank` is retrieved from the current distributed
+ group.
+ indices.
+ seed (int, optional): random seed used to shuffle the sampler if
+ :attr:`shuffle=True`. This number should be identical across all
+ processes in the distributed group. Default: ``0``.
+ drop_last (bool, optional): if ``True``, then the sampler will drop the
+ tail of the data to make it evenly divisible across the number of
+ replicas. If ``False``, the sampler will add extra indices to make
+ the data evenly divisible across the replicas. Default: ``False``.
+ .. warning::
+ In distributed mode, calling the :meth:`set_epoch` method at
+ the beginning of each epoch **before** creating the :class:`DataLoader` iterator
+ is necessary to make shuffling work properly across multiple epochs. Otherwise,
+ the same ordering will be always used.
+ Example::
+ >>> sampler = DistributedSampler(dataset) if is_distributed else None
+ >>> loader = DataLoader(dataset, shuffle=(sampler is None),
+ ... sampler=sampler)
+ >>> for epoch in range(start_epoch, n_epochs):
+ ... if is_distributed:
+ ... sampler.set_epoch(epoch)
+ ... train(loader)
+ """
+
+ def __init__(self, dataset: Dataset, weights: Sequence[float],
+ replacement: bool = True, generator=None, num_replicas: Optional[int] = None,
+ rank: Optional[int] = None,
+ seed: int = 0, drop_last: bool = False) -> None:
+ if num_replicas is None:
+ if not dist.is_available():
+ raise RuntimeError("Requires distributed package to be available")
+ num_replicas = dist.get_world_size()
+ if rank is None:
+ if not dist.is_available():
+ raise RuntimeError("Requires distributed package to be available")
+ rank = dist.get_rank()
+ self.dataset = dataset
+ self.num_replicas = num_replicas
+ self.rank = rank
+ self.epoch = 0
+ self.drop_last = drop_last
+ self.weights = torch.Tensor(weights)
+ self.replacement = replacement
+ self.generator = generator
+ # If the dataset length is evenly divisible by # of replicas, then there
+ # is no need to drop any data, since the dataset will be split equally.
+ if self.drop_last and len(self.dataset) % self.num_replicas != 0:
+ # Split to nearest available length that is evenly divisible.
+ # This is to ensure each rank receives the same amount of data when
+ # using this Sampler.
+ self.num_samples = math.ceil(
+ (len(self.dataset) - self.num_replicas) / self.num_replicas
+ )
+ else:
+ self.num_samples = math.ceil(len(self.dataset) / self.num_replicas)
+ self.total_size = self.num_samples * self.num_replicas
+ self.seed = seed
+
+ def __iter__(self) -> Iterator[T_co]:
+ indices = list(range(len(self.dataset)))
+
+ if not self.drop_last:
+ # add extra samples to make it evenly divisible
+ indices += indices[:(self.total_size - len(indices))]
+ else:
+ # remove tail of data to make it evenly divisible.
+ indices = indices[:self.total_size]
+ assert len(indices) == self.total_size
+
+ # subsample
+ indices = indices[self.rank:self.total_size:self.num_replicas]
+ weights = self.weights[self.rank:self.total_size:self.num_replicas]
+
+ assert len(indices) == self.num_samples
+
+ rand_tensor = torch.multinomial(self.weights, self.num_samples, self.replacement, generator=self.generator)
+ return iter(rand_tensor)
+
diff --git a/data/pyproject.toml b/data/pyproject.toml
new file mode 100644
index 0000000000000000000000000000000000000000..7bf8dc048fe4153f47978971b5ddceb02cee2e6a
--- /dev/null
+++ b/data/pyproject.toml
@@ -0,0 +1,25 @@
+[tool.poetry]
+name = "protgps"
+version = "0.0.1"
+description = ""
+authors = ["Peter G Mikhael "]
+license = "MIT"
+readme = "README.md"
+repository = "https://github.com/pgmikhael/protgps"
+
+
+[tool.poetry.dependencies]
+python = "3.8"
+
+[tool.poetry.group.ci.dependencies]
+black = "^23.3.0"
+mypy = "^1.1.1"
+pylint = "^2.13.0"
+pytest = "^7.1.2"
+pytest-cov = "^3.0.0"
+rstcheck = { version = "^6.1.2", python = "<4" }
+ruff = "^0.0.291"
+
+[build-system]
+requires = ["poetry-core"]
+build-backend = "poetry.core.masonry.api"
diff --git a/data/scripts/dispatcher.py b/data/scripts/dispatcher.py
new file mode 100644
index 0000000000000000000000000000000000000000..defb2bc037da0f435cfceb37c8140bc4735a4398
--- /dev/null
+++ b/data/scripts/dispatcher.py
@@ -0,0 +1,146 @@
+# append project root to system path
+import sys, os
+from os.path import dirname, realpath
+
+sys.path.append((dirname(dirname(realpath(__file__)))))
+import argparse
+import subprocess
+import multiprocessing
+import pickle
+import json
+import protgps.utils.parsing as parsing
+from protgps.utils.registry import md5
+
+EXPERIMENT_CRASH_MSG = "ALERT! job:[{}] has crashed! Check logfile at:[{}]"
+CONFIG_NOT_FOUND_MSG = "ALERT! {} config {} file does not exist!"
+SUCESSFUL_SEARCH_STR = "SUCCESS! Grid search results dumped to {}."
+
+parser = argparse.ArgumentParser(description="Dispatcher.")
+parser.add_argument(
+ "--config_path",
+ "-c",
+ type=str,
+ required=True,
+ default="configs/config_file.json",
+ help="path to model configurations json file",
+)
+parser.add_argument(
+ "--log_dir",
+ "-l",
+ type=str,
+ default="logs",
+ help="path to store logs and detailed job level result files",
+)
+parser.add_argument(
+ "--dry_run",
+ "-n",
+ action="store_true",
+ default=False,
+ help="print out commands without running",
+)
+parser.add_argument(
+ "--eval_train_config",
+ "-e",
+ action="store_true",
+ default=False,
+ help="create evaluation run from a training config",
+)
+
+
+def launch_experiment(script, gpu, flag_string):
+ """
+ Launch an experiment and direct logs and results to a unique filepath.
+
+ Args:
+ script (str): file name to run as main
+ gpu (str): gpu this worker can access.
+ flag_string (str): arguments and values as a single blob.
+
+ Returns:
+ results_path (str): path to saved args pickle file
+ log_path (str): path to logs
+ """
+ if not os.path.isdir(args.log_dir):
+ os.makedirs(args.log_dir)
+
+ log_name = md5(flag_string)
+ log_stem = os.path.join(args.log_dir, log_name)
+ log_path = "{}.txt".format(log_stem)
+ results_path = "{}.args".format(log_stem)
+
+ experiment_string = f"CUDA_VISIBLE_DEVICES={gpu} python -u scripts/{script}.py {flag_string} --results_path {log_stem} --experiment_name {log_name}" # use log_stem instead of results_path, add extensions in main/learn.py
+
+ # forward logs to logfile
+ if "--resume" in flag_string:
+ pipe_str = ">>"
+ else:
+ pipe_str = ">"
+
+ shell_cmd = f"{experiment_string} {pipe_str} {log_path} 2>&1"
+ print("Launched exp: {}".format(shell_cmd))
+
+ if not os.path.exists(results_path) and (not args.dry_run):
+ subprocess.call(shell_cmd, shell=True)
+
+ return results_path, log_path
+
+
+def worker(script, gpu, job_queue, done_queue):
+ """
+ Worker thread for each gpu. Consumes all jobs and pushes results to done_queue.
+
+ Args:
+ script (str): file name to run as main
+ gpu (str): gpu this worker can access.
+ job_queue (Queue): queue of available jobs.
+ done_queue (Queue): queue where to push results.
+ """
+
+ while not job_queue.empty():
+ params = job_queue.get()
+ if params is None:
+ return
+ done_queue.put(launch_experiment(script, gpu, params))
+
+
+if __name__ == "__main__":
+
+ args = parser.parse_args()
+ if not os.path.exists(args.config_path):
+ print(CONFIG_NOT_FOUND_MSG.format("experiment", args.config_path))
+ sys.exit(1)
+ experiment_config = json.load(open(args.config_path, "r"))
+
+ if args.eval_train_config:
+ experiments, flags, experiment_axies = parsing.prepare_training_config_for_eval(
+ experiment_config
+ )
+ else:
+ experiments, flags, experiment_axies = parsing.parse_dispatcher_config(
+ experiment_config
+ )
+
+ job_queue = multiprocessing.Queue()
+ done_queue = multiprocessing.Queue()
+
+ for job in experiments:
+ job_queue.put(job)
+ print("Launching Dispatcher with {} jobs!".format(len(experiments)))
+ print()
+
+ for gpu in experiment_config["available_gpus"]:
+ print("Start gpu worker {}".format(gpu))
+ multiprocessing.Process(
+ target=worker,
+ args=(experiment_config["script"], gpu, job_queue, done_queue),
+ ).start()
+ print()
+
+ for i in range(len(experiments)):
+ result_path, log_path = done_queue.get() # .rslt and .txt (stderr/out) files
+ try:
+ result_dict = pickle.load(open(result_path, "rb"))
+ dump_result_string = SUCESSFUL_SEARCH_STR.format(result_path)
+ print("({}/{}) \t {}".format(i + 1, len(experiments), dump_result_string))
+ except Exception:
+ print("Experiment failed! Logs are located at: {}".format(log_path))
diff --git a/data/scripts/inference.py b/data/scripts/inference.py
new file mode 100644
index 0000000000000000000000000000000000000000..fcef1e464c318d3780e685aea864770ef400bf0b
--- /dev/null
+++ b/data/scripts/inference.py
@@ -0,0 +1,161 @@
+import sys, os
+
+sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
+import argparse
+from tqdm import tqdm
+import pandas as pd
+import torch
+from pytorch_lightning.utilities.cloud_io import load as pl_load
+from typing import List
+from protgps.utils.registry import get_object
+
+COMPARTMENTS = [
+ "nuclear_speckle",
+ "p-body",
+ "pml-bdoy",
+ "post_synaptic_density",
+ "stress_granule",
+ "chromosome",
+ "nucleolus",
+ "nuclear_pore_complex",
+ "cajal_body",
+ "rna_granule",
+ "cell_junction",
+ "transcriptional",
+]
+
+
+def load_model(model_path: str, esm_dir: str) -> torch.nn.Module:
+ """
+ Load model from checkpoint
+
+ Parameters
+ ----------
+ model_path : str
+ Path to the model checkpoint
+ esm_dir : str
+ Path to ESM-2 directory
+
+ Returns
+ -------
+ torch.nn.Module
+ Model instance loaded from the checkpoint
+ """
+ checkpoint = pl_load(model_path, map_location=lambda storage, loc: storage)
+ args = checkpoint["hyper_parameters"]["args"]
+ args.pretrained_hub_dir = esm_dir
+ model = get_object(args.lightning_name, "lightning")(args)
+ model = model.load_from_checkpoint(
+ checkpoint_path=model_path,
+ **{"args": args},
+ )
+ return model
+
+
+def predict_condensates(
+ model: torch.nn.Module, sequences: List[str], batch_size: int, round: bool = True
+) -> torch.Tensor:
+ """
+ Predict condensate ID for the given sequences
+
+ Parameters
+ ----------
+ model : torch.nn.Module
+ protGPS
+ sequences : list
+ List of sequences
+ batch_size : int
+ Batch size for inference
+ round : bool, optional
+ whether to round scores, by default True
+
+ Returns
+ -------
+ torch.Tensor
+ Predicted scores for each condensate
+ """
+ scores = []
+ for i in tqdm(range(0, len(sequences), batch_size), ncols=100):
+ batch = sequences[i : (i + batch_size)]
+ with torch.no_grad():
+ out = model.model({"x": batch})
+ s = torch.sigmoid(out["logit"]).to("cpu")
+ scores.append(s)
+ scores = torch.vstack(scores)
+ if round:
+ scores = torch.round(scores, decimals=3)
+
+ scores = scores.cpu() # move to cpu
+ return scores
+
+
+def get_valid_rows(df: pd.DataFrame, cols: list) -> list:
+ """
+ Get rows with valid sequence length
+
+ Parameters
+ ----------
+ df : pd.DataFrame
+ Input dataframe
+ cols : list
+ Column name of the sequences
+
+ Returns
+ -------
+ list
+ List of row indices with valid sequence length
+ """
+ rows_with_valid_seq_len = []
+ for i in range(len(df)):
+ if all([len(df.iloc[i][c]) < 1800 for c in cols]):
+ rows_with_valid_seq_len.append(i)
+ return rows_with_valid_seq_len
+
+
+parser = argparse.ArgumentParser(description="Inference script")
+parser.add_argument("--model_path", "-m", type=str, help="Input file path")
+parser.add_argument(
+ "--esm_dir", type=str, help="Path to ESM2 directory", default="esm_checkpoint"
+)
+parser.add_argument(
+ "--device", type=str, help="Device to run inference on", default="cpu"
+)
+parser.add_argument("--input", "-i", type=str, help="Input file path")
+parser.add_argument(
+ "--colname", type=str, help="Column name of the sequences", default="Sequence"
+)
+parser.add_argument("--output", "-o", type=str, help="Output file path")
+
+if __name__ == "__main__":
+ args = parser.parse_args()
+ # load model
+ model = load_model(args.model_path, args.esm_dir)
+ model.eval()
+ print()
+
+ # move model to device
+ if torch.cuda.is_available():
+ device = torch.device("cuda")
+ else:
+ device = torch.device("cpu")
+ model = model.to(device)
+
+ # read input data
+ data = pd.read_excel(args.input)
+ assert (
+ args.colname in data.columns
+ ), f"Column name {args.colname} not found in the input file"
+
+ # get valid rows (sequences with length < 1800)
+ rows_with_valid_seq_len = get_valid_rows(data, [args.colname])
+ data = data.loc[rows_with_valid_seq_len]
+
+ sequences = [s.upper() for s in list(data[args.colname])]
+
+ # predict condensates
+ scores = predict_condensates(model, sequences, batch_size=1, round=False)
+ for j, condensate in enumerate(COMPARTMENTS):
+ data[f"{condensate.upper()}_Score"] = scores[:, j].tolist()
+
+ # save output
+ data.to_csv(args.output, index=False)
diff --git a/data/scripts/main.py b/data/scripts/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..eab43ce6b85182712dcee829fd08c056b7edcd08
--- /dev/null
+++ b/data/scripts/main.py
@@ -0,0 +1,129 @@
+import sys, os
+
+sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
+
+import pickle
+import time
+import git
+import torch
+import pytorch_lightning as pl
+from pytorch_lightning import _logger as log
+
+from protgps.utils.parsing import parse_args
+from protgps.utils.registry import get_object
+import protgps.utils.loading as loaders
+from protgps.utils.callbacks import set_callbacks
+
+
+def train(args):
+ # Remove callbacks from args for safe pickling later
+ trainer = pl.Trainer.from_argparse_args(args)
+ args.callbacks = None
+ args.num_nodes = trainer.num_nodes
+ args.num_processes = trainer.num_devices
+ args.world_size = args.num_nodes * args.num_processes
+ args.global_rank = trainer.global_rank
+ args.local_rank = trainer.local_rank
+
+ repo = git.Repo(search_parent_directories=True)
+ commit = repo.head.object
+ log.info(
+ "\nProject main running by author: {} \ndate:{}, \nfrom commit: {} -- {}".format(
+ commit.author,
+ time.strftime("%m-%d-%Y %H:%M:%S", time.localtime(commit.committed_date)),
+ commit.hexsha,
+ commit.message,
+ )
+ )
+
+ # print args
+ for key, value in sorted(vars(args).items()):
+ print("{} -- {}".format(key.upper(), value))
+
+ # create or load lightning model from checkpoint
+ model = loaders.get_lightning_model(args)
+
+ # logger
+ trainer.logger = get_object(args.logger_name, "logger")(args)
+
+ # push to logger
+ trainer.logger.setup(**{"args": args, "model": model})
+
+ # add callbacks
+ trainer.callbacks = set_callbacks(trainer, args)
+
+ # train model
+ if args.train:
+ train_dataset = loaders.get_train_dataset_loader(args)
+ dev_dataset = loaders.get_eval_dataset_loader(args, split="dev")
+ log.info("\nTraining Phase...")
+ trainer.fit(model, train_dataset, dev_dataset)
+ if trainer.checkpoint_callback:
+ args.model_path = trainer.checkpoint_callback.best_model_path
+
+ # save args
+ if args.local_rank == 0:
+ print("Saving args to {}.args".format(args.results_path))
+ pickle.dump(vars(args), open("{}.args".format(args.results_path), "wb"))
+
+ return model, trainer.logger
+
+
+def eval(model, logger, args):
+ # reinit trainer
+ trainer = pl.Trainer(gpus=1)
+
+ # reset ddp
+ args.strategy = None
+
+ # connect to same logger as in training
+ trainer.logger = logger
+
+ # set callbacks
+ trainer.callbacks = set_callbacks(trainer, args)
+
+ # eval on train
+ if args.eval_on_train:
+ log.info("\nInference Phase on train set...")
+ train_dataset = loaders.get_eval_dataset_loader(args, split="train")
+
+ if args.train and trainer.checkpoint_callback:
+ trainer.test(model, train_dataset, ckpt_path=args.model_path)
+ else:
+ trainer.test(model, train_dataset)
+
+ # eval on dev
+ if args.dev:
+ log.info("\nValidation Phase...")
+ dev_dataset = loaders.get_eval_dataset_loader(args, split="dev")
+ if args.train and trainer.checkpoint_callback:
+ trainer.test(model, dev_dataset, ckpt_path=args.model_path)
+ else:
+ trainer.test(model, dev_dataset)
+
+ # eval on test
+ if args.test:
+ log.info("\nInference Phase on test set...")
+ test_dataset = loaders.get_eval_dataset_loader(args, split="test")
+
+ if args.train and trainer.checkpoint_callback:
+ trainer.test(model, test_dataset, ckpt_path=args.model_path)
+ else:
+ trainer.test(model, test_dataset)
+
+
+if __name__ == "__main__":
+ args = parse_args()
+ model, logger = train(args)
+
+ if args.dev or args.test or args.eval_on_train:
+ if args.strategy == "ddp":
+ torch.distributed.destroy_process_group()
+ log.info("\n\n")
+ log.info(">" * 33)
+ log.info("Destroyed process groups for eval")
+ log.info("<" * 33)
+ log.info("\n\n")
+
+ if args.global_rank == 0:
+ eval(model, logger, args)
diff --git a/data/tests/__init__.py b/data/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391