diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..35ae02a940d66a9e289d99dbde1f46d5c3dc15b0 --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +**/__pycache__ +/.vscode \ No newline at end of file diff --git a/app.py b/app.py new file mode 100644 index 0000000000000000000000000000000000000000..90928ceb56245d311fc1e2ca6594839db1cb9ce5 --- /dev/null +++ b/app.py @@ -0,0 +1,115 @@ +import gradio as gr +import os, requests +import numpy as np +import torch +import cv2 +from cell_segmentation.inference.inference_cellvit_experiment_pannuke import InferenceCellViTParser,InferenceCellViT +from cell_segmentation.inference.inference_cellvit_experiment_monuseg import InferenceCellViTMoNuSegParser,MoNuSegInference + + +## local | remote +RUN_MODE = "remote" +if RUN_MODE != "local": + os.system("wget https://huggingface.co/xiazhi/LKCell-demo/resolve/main/model_best.pth") + ## examples + os.system("wget https://huggingface.co/xiazhi/LKCell-demo/resolve/main/1.png") + os.system("wget https://huggingface.co/xiazhi/LKCell-demo/resolve/main/2.png") + os.system("wget https://huggingface.co/xiazhi/LKCell-demo/resolve/main/3.png") + os.system("wget https://huggingface.co/xiazhi/LKCell-demo/resolve/main/4.png") + +## step 1: set up model + +device = "cpu" + +## pannuke set +pannuke_parser = InferenceCellViTParser() +pannuke_configurations = pannuke_parser.parse_arguments() +pannuke_inf = InferenceCellViT( + run_dir=pannuke_configurations["run_dir"], + checkpoint_name=pannuke_configurations["checkpoint_name"], + gpu=pannuke_configurations["gpu"], + magnification=pannuke_configurations["magnification"], + ) + +pannuke_checkpoint = torch.load( + pannuke_inf.run_dir / pannuke_inf.checkpoint_name, map_location="cpu" +) +pannuke_model = pannuke_inf.get_model(model_type=pannuke_checkpoint["arch"]) +pannuke_model.load_state_dict(pannuke_checkpoint["model_state_dict"]) +# # put model in eval mode +pannuke_model.to(device) +pannuke_model.eval() + + +## monuseg set +monuseg_parser = InferenceCellViTMoNuSegParser() +monuseg_configurations = monuseg_parser.parse_arguments() +monuseg_inf = MoNuSegInference( + model_path=monuseg_configurations["model"], + dataset_path=monuseg_configurations["dataset"], + outdir=monuseg_configurations["outdir"], + gpu=monuseg_configurations["gpu"], + patching=monuseg_configurations["patching"], + magnification=monuseg_configurations["magnification"], + overlap=monuseg_configurations["overlap"], + ) + + +def click_process(image_input , type_dataset): + if type_dataset == "pannuke": + pannuke_inf.run_single_image_inference(pannuke_model,image_input) + else: + monuseg_inf.run_single_image_inference(monuseg_inf.model, image_input) + + image_output = cv2.imread("pred_img.png") + image_output = cv2.cvtColor(image_output, cv2.COLOR_BGR2RGB) + return image_output + + +demo = gr.Blocks(title="LkCell") +with demo: + gr.Markdown(value=""" + **Gradio demo for LKCell: Efficient Cell Nuclei Instance Segmentation with Large Convolution Kernels**. Check our [Github Repo](https://github.com/ziwei-cui/LKCellv1) 😛. + """) + with gr.Row(): + with gr.Column(): + with gr.Row(): + Image_input = gr.Image(type="numpy", label="Input", interactive=True,height=480) + with gr.Row(): + Type_dataset = gr.Radio(choices=["pannuke", "monuseg"], label=" input image's dataset type",value="pannuke") + + with gr.Column(): + with gr.Row(): + image_output = gr.Image(type="numpy", label="Output",height=480) + with gr.Row(): + Button_run = gr.Button("🚀 Submit (发送) ") + clear_button = gr.ClearButton(components=[Image_input,Type_dataset,image_output],value="🧹 Clear (清除)") + + Button_run.click(fn=click_process, inputs=[Image_input, Type_dataset ], outputs=[image_output]) + + ## guiline + gr.Markdown(value=""" + 🔔**Guideline** + 1. Upload your image or select one from the examples. + 2. Set up the arguments: "Type_dataset". + 3. Run the Submit button to get the output. + """) + # if RUN_MODE != "local": + gr.Examples(examples=[ + ['1.png', "pannuke"], + ['2.png', "pannuke"], + ['3.png', "monuseg"], + ['4.png', "monuseg"], + ], + inputs=[Image_input, Type_dataset], outputs=[image_output], label="Examples") + gr.HTML(value=""" +
+ """) + gr.Markdown(value=""" + Template is adapted from [Here](https://huggingface.co/spaces/menghanxia/disco) + """) + +if RUN_MODE == "local": + demo.launch(server_name='127.0.0.1',server_port=8003) +else: + demo.launch() \ No newline at end of file diff --git a/base_ml/base_cli.py b/base_ml/base_cli.py new file mode 100644 index 0000000000000000000000000000000000000000..835edf4506ee5c87dfaa2cf97cacf506e1eb53e9 --- /dev/null +++ b/base_ml/base_cli.py @@ -0,0 +1,120 @@ +# -*- coding: utf-8 -*- +# Base CLI to parse Arguments +# +# @ Fabian Hörst, fabian.hoerst@uk-essen.de +# Institute for Artifical Intelligence in Medicine, +# University Medicine Essen + +import argparse +import logging +from abc import ABC, abstractmethod +from typing import Tuple, Union + +import yaml +from pydantic import BaseModel + + +class ABCParser(ABC): + """Blueprint for Argument Parser""" + + @abstractmethod + def __init__(self) -> None: + pass + + @abstractmethod + def get_config(self) -> Tuple[Union[BaseModel, dict], logging.Logger]: + """Load configuration and create a logger + + Returns: + Tuple[PreProcessingConfig, logging.Logger]: Configuration and Logger + """ + pass + + @abstractmethod + def store_config(self) -> None: + """Store the config file in the logging directory to keep track of the configuration.""" + pass + + +class ExperimentBaseParser: + """Configuration Parser for Machine Learning Experiments""" + + def __init__(self) -> None: + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + description="Start an experiment with given configuration file.", + ) + requiredNamed = parser.add_argument_group("required named arguments") + requiredNamed.add_argument( + "--config", type=str, help="Path to a config file", required=True + ) + parser.add_argument("--gpu", type=int, help="Cuda-GPU ID") + group = parser.add_mutually_exclusive_group(required=False) + group.add_argument( + "--sweep", + action="store_true", + help="Starting a sweep. For this the configuration file must be structured according to WandB sweeping. " + "Compare https://docs.wandb.ai/guides/sweeps and https://community.wandb.ai/t/nested-sweep-configuration/3369/3 " + "for further information. This parameter cannot be set in the config file!", + ) + group.add_argument( + "--agent", + type=str, + help="Add a new agent to the sweep. " + "Please pass the sweep ID as argument in the way entity/project/sweep_id, e.g., user1/test_project/v4hwbijh. " + "The agent configuration can be found in the WandB dashboard for the running sweep in the sweep overview tab " + "under launch agent. Just paste the entity/project/sweep_id given there. The provided config file must be a sweep config file." + "This parameter cannot be set in the config file!", + ) + group.add_argument( + "--checkpoint", + type=str, + help="Path to a PyTorch checkpoint file. " + "The file is loaded and continued to train with the provided settings. " + "If this is passed, no sweeps are possible. " + "This parameter cannot be set in the config file!", + ) + + self.parser = parser + + def parse_arguments(self) -> Tuple[Union[BaseModel, dict]]: + """Parse the arguments from CLI and load yaml config + + Returns: + Tuple[Union[BaseModel, dict]]: Parsed arguments + """ + # parse the arguments + opt = self.parser.parse_args() #定义了一个opt变量,用来存储参数 + with open(opt.config, "r") as config_file: + yaml_config = yaml.safe_load(config_file) + yaml_config_dict = dict(yaml_config) #将yaml文件转换为字典 + + opt_dict = vars(opt) #将opt转换为字典 + # check for gpu to overwrite with cli argument + if "gpu" in opt_dict: #如果gpu在opt_dict中 + if opt_dict["gpu"] is not None: + yaml_config_dict["gpu"] = opt_dict["gpu"] #将opt_dict中的gpu值赋给yaml_config_dict中的gpu + + # check if either training, sweep, checkpoint or start agent should be called + # first step: remove such keys from the config file + if "run_sweep" in yaml_config_dict: #如果yaml_config_dict中有run_sweep + yaml_config_dict.pop("run_sweep") #删除yaml_config_dict中的run_sweep + if "agent" in yaml_config_dict: + yaml_config_dict.pop("agent") + if "checkpoint" in yaml_config_dict: + yaml_config_dict.pop("checkpoint") + + # select one of the options + if "sweep" in opt_dict and opt_dict["sweep"] is True: + yaml_config_dict["run_sweep"] = True + else: + yaml_config_dict["run_sweep"] = False + if "agent" in opt_dict: + yaml_config_dict["agent"] = opt_dict["agent"] + if "checkpoint" in opt_dict: + if opt_dict["checkpoint"] is not None: + yaml_config_dict["checkpoint"] = opt_dict["checkpoint"] + + self.config = yaml_config_dict #将yaml_config_dict赋给self.config + + return self.config diff --git a/base_ml/base_early_stopping.py b/base_ml/base_early_stopping.py new file mode 100644 index 0000000000000000000000000000000000000000..e8b72f9aa384be178656129ed3337697d851ac02 --- /dev/null +++ b/base_ml/base_early_stopping.py @@ -0,0 +1,83 @@ +# -*- coding: utf-8 -*- +# Base Machine Learning Experiment +# +# @ Fabian Hörst, fabian.hoerst@uk-essen.de +# Institute for Artifical Intelligence in Medicine, +# University Medicine Essen + +import logging + +logger = logging.getLogger("__main__") +logger.addHandler(logging.NullHandler()) + +import wandb + + +class EarlyStopping: + """Early Stopping Class + + Args: + patience (int): Patience to wait before stopping + strategy (str, optional): Optimization strategy. + Please select 'minimize' or 'maximize' for strategy. Defaults to "minimize". + """ + + def __init__(self, patience: int, strategy: str = "minimize"): + assert strategy.lower() in [ + "minimize", + "maximize", + ], "Please select 'minimize' or 'maximize' for strategy" + + self.patience = patience + self.counter = 0 + self.strategy = strategy.lower() + self.best_metric = None + self.best_epoch = None + self.early_stop = False + + logger.info( + f"Using early stopping with a range of {self.patience} and {self.strategy} strategy" + ) + + def __call__(self, metric: float, epoch: int) -> bool: + """Early stopping update call + + Args: + metric (float): Metric for early stopping + epoch (int): Current epoch + + Returns: + bool: Returns true if the model is performing better than the current best model, + otherwise false + """ + if self.best_metric is None: + self.best_metric = metric + self.best_epoch = epoch + return True + else: + if self.strategy == "minimize": + if self.best_metric >= metric: + self.best_metric = metric + self.best_epoch = epoch + self.counter = 0 + wandb.run.summary["Best-Epoch"] = epoch + wandb.run.summary["Best-Metric"] = metric + return True + else: + self.counter += 1 + if self.counter >= self.patience: + self.early_stop = True + return False + elif self.strategy == "maximize": + if self.best_metric <= metric: + self.best_metric = metric + self.best_epoch = epoch + self.counter = 0 + wandb.run.summary["Best-Epoch"] = epoch + wandb.run.summary["Best-Metric"] = metric + return True + else: + self.counter += 1 + if self.counter >= self.patience: + self.early_stop = True + return False diff --git a/base_ml/base_experiment.py b/base_ml/base_experiment.py new file mode 100644 index 0000000000000000000000000000000000000000..ea45913a195623d4cbc6c7281ce7714d81274dc4 --- /dev/null +++ b/base_ml/base_experiment.py @@ -0,0 +1,445 @@ +# -*- coding: utf-8 -*- +# Base Machine Learning Experiment +# +# @ Fabian Hörst, fabian.hoerst@uk-essen.de +# Institute for Artifical Intelligence in Medicine, +# University Medicine Essen + +import copy +import inspect +import logging +import os +import random +import sys +from abc import abstractmethod +from pathlib import Path +from typing import Tuple, Union +import argparse + +currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) +parentdir = os.path.dirname(currentdir) +sys.path.insert(0, parentdir) + +import numpy as np +import pandas as pd +import torch +import torch.nn as nn +import yaml +from pydantic import BaseModel +from torch.nn.modules.loss import _Loss +from torch.optim import Optimizer +from torch.optim.lr_scheduler import ConstantLR, _LRScheduler +from torch.utils.data import Dataset, Sampler + +from base_ml.base_optim import OPTI_DICT +from base_ml.base_validator import sweep_schema +from utils.logger import Logger +from utils.tools import flatten_dict, remove_parameter_tag, unflatten_dict + +from base_ml.optim_factory import LayerDecayValueAssigner, create_optimizer + + +class BaseExperiment: + """BaseExperiment Class + + An experiment consistsn of the follwing key methods: + + * run_experiment: Main Code for running the experiment with implemented coordinaten and training call + * + * + Args: + default_conf (dict): Default configuration + """ + + def __init__(self, default_conf: dict, checkpoint=None) -> None: + # setup configuration + self.default_conf = default_conf + self.run_conf = None + self.logger = logging.getLogger(__name__) + + # resolve_paths + self.default_conf["logging"]["log_dir"] = str( + Path(default_conf["logging"]["log_dir"]).resolve() + ) + self.default_conf["logging"]["wandb_dir"] = str( + Path(default_conf["logging"]["wandb_dir"]).resolve() + ) + + if checkpoint is not None: + self.checkpoint = torch.load(checkpoint, map_location="cpu") + else: + self.checkpoint = None + + # seeding + self.seed_run(seed=self.default_conf["random_seed"]) + + @abstractmethod + def run_experiment(self): + """Experiment Code + + Main Code for running the experiment. The following steps should be performed: + 1.) Set run name + 2.) Initialize WandB and update config (According to Sweep or predefined) + 3.) Create Output directory and setup logger + 4.) Machine Learning Setup + 4.1) Loss functions + 4.2) Model + 4.3) Optimizer + 4.4) Scheduler + 5.) Load and Setup Dataset + 6.) Define Trainer + 7.) trainer.fit() + + Raises: + NotImplementedError: Needs to be implemented + """ + raise NotImplementedError + + @abstractmethod + def get_train_model(self) -> nn.Module: + """Retrieve torch model for training + + Raises: + NotImplementedError: Needs to be implemented + + Returns: + nn.Module: Torch Model + """ + raise NotImplementedError + + @abstractmethod + def get_loss_fn(self) -> _Loss: + """Retrieve torch loss function for training + + Raises: + NotImplementedError: Needs to be implemented + + Returns: + _Loss: Loss function + """ + raise NotImplementedError + + def get_argparser(): + parser = argparse.ArgumentParser('ConvNeXt training and evaluation script for image classification', add_help=False) + + # Optimization parameters + parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER', + help='Optimizer (default: "adamw"') + parser.add_argument('--opt_eps', default=1e-8, type=float, metavar='EPSILON', + help='Optimizer Epsilon (default: 1e-8)') + parser.add_argument('--opt_betas', default=None, type=float, nargs='+', metavar='BETA', + help='Optimizer Betas (default: None, use opt default)') + parser.add_argument('--momentum', type=float, default=0.9, metavar='M', + help='SGD momentum (default: 0.9)') + parser.add_argument('--weight_decay', type=float, default=0.05, + help='weight decay (default: 0.05)') + parser.add_argument('--weight_decay_end', type=float, default=None, help="""Final value of the + weight decay. We use a cosine schedule for WD and using a larger decay by + the end of training improves performance for ViTs.""") + + parser.add_argument('--lr', type=float, default=4e-3, metavar='LR', + help='learning rate (default: 4e-3), with total batch size 4096') + parser.add_argument('--layer_decay', type=float, default=0.9999) + + + return parser + + + + + def get_optimizer( + self, model: nn.Module, opt: str, hp: dict, layer_decay:float, + ) -> Optimizer: + """Retrieve optimizer for training + + All Torch Optimizers are possible + + Args: + model (nn.Module): Training model + optimizer_name (str): Name of the optimizer, all current PyTorch Optimizer are possible + hp (dict): Hyperparameter as dictionary. For further information, + see documentation here: https://pytorch.org/docs/stable/optim.html#algorithms + + Raises: + NotImplementedError: Raises error if an undefined Optimizer differing from torch is used + + Returns: + Optimizer: PyTorch Optimizer + """ + # if optimizer_name not in OPTI_DICT: + # raise NotImplementedError("Optimizer not known") + + if layer_decay < 1.0 or layer_decay > 1.0: + num_layers = 12 # convnext layers divided into 12 parts, each with a different decayed lr value. + + assigner = LayerDecayValueAssigner(list(layer_decay ** (num_layers + 1 - i) for i in range(num_layers + 2))) + else: + assigner = None + + #optim = OPTI_DICT[optimizer_name] + # optimizer = optim( + # params=filter(lambda p: p.requires_grad, model.parameters()), **hp + # ) + #optimizer = optim(params=model.parameters(), **hp) + + optimizer = create_optimizer( + model, weight_decay=hp["weight_decay"], lr=hp["lr"], opt=opt, get_num_layer=assigner.get_layer_id, get_layer_scale=assigner.get_scale) + + self.logger.info( + f"Loaded Optimizer with following hyperparameters:" + ) + self.logger.info(hp) + + return optimizer + + def get_scheduler(self, optimizer: Optimizer) -> _LRScheduler: + """Retrieve learning rate scheduler for training + + Currently, just constant scheduler. Should be extended to add a configurable scheduler. + Maybe reimplement in specific experiment file. + + Args: + optimizer (Optimizer): Optimizer + + Returns: + _LRScheduler: PyTorch Scheduler + """ + scheduler = ConstantLR(optimizer, factor=1, total_iters=1000) + self.logger.info("Scheduler: ConstantLR scheduler") + return scheduler + + def get_sampler(self) -> Sampler: + """Retrieve data sampler for training + + Raises: + NotImplementedError: Needs to be implemented + + Returns: + Sampler: Training sampler + """ + raise NotImplementedError + + def get_train_dataset(self) -> Dataset: + """Retrieve training dataset + + Raises: + NotImplementedError: Needs to be implemented + + Returns: + Dataset: Training dataset + """ + raise NotImplementedError + + def get_val_dataset(self) -> Dataset: + """Retrieve validation dataset + + Raises: + NotImplementedError: Needs to be implemented + + Returns: + Dataset: Validation dataset + """ + raise NotImplementedError + + def load_file_split( + self, fold: int = None + ) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]: + """Load the file split for training, validation and test + + If no fold is provided, the current file split is loaded. Otherwise the files in the fold are loaded + + The folder (filelist_path) must be built up in the following way: + 1.) No-Multifold: + filelist_path: + train_split.csv + val_split.csv + test_split.csv + 2.) Multifold: + filelist_path: + fold1: + train_split.csv + val_split.csv + test_split.csv + fold2: + train_split.csv + val_split.csv + test_split.csv + ... + foldN: + train_split.csv + val_split.csv + test_split.csv + + Args: + fold (int, optional): Fold. Defaults to None. + + Raises: + NotImplementedError: Fold selection is currently not Implemented + + Returns: + Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]: Train, Val and Test split as Pandas Dataframe + """ + filelist_path = Path(self.default_conf["split_path"]).resolve() + self.logger.info(f"Loading filesplit from folder: {filelist_path}") + if fold is None: + train_split = pd.read_csv(filelist_path / "train_split.csv") + val_split = pd.read_csv(filelist_path / "val_split.csv") + test_split = pd.read_csv(filelist_path / "test_split.csv") + else: + train_split = pd.read_csv(filelist_path / f"fold{fold}" / "train_split.csv") + val_split = pd.read_csv(filelist_path / f"fold{fold}" / "val_split.csv") + test_split = None + + self.logger.info(f"Train size: {len(train_split)}") + self.logger.info(f"Val-Split: {len(val_split)}") + return train_split, val_split, test_split + + # Methods regarding logging and storing + def instantiate_logger(self) -> Logger: + """Instantiate a logger + + Returns: + Logger: Logger + """ + logger = Logger( + level=self.default_conf["logging"]["level"].upper(), + log_dir=Path(self.run_conf["logging"]["log_dir"]).resolve(), + comment="logs", + use_timestamp=False, + ) + self.logger = logger.create_logger() + return self.logger + + @staticmethod + def create_output_dir(folder_path: Union[str, Path]) -> None: + """Create folder at given path + + Args: + folder_path (Union[str, Path]): Folder that should be created + """ + folder_path = Path(folder_path).resolve() + folder_path.mkdir(parents=True, exist_ok=True) + + def store_config(self) -> None: + """Store the config file in the logging directory to keep track of the configuration.""" + # store in log directory + with open( + (Path(self.run_conf["logging"]["log_dir"]) / "config.yaml").resolve(), "w" + ) as yaml_file: + tmp_config = copy.deepcopy(self.run_conf) + tmp_config["logging"]["log_dir"] = str(tmp_config["logging"]["log_dir"]) + + yaml.dump(tmp_config, yaml_file, sort_keys=False) + + self.logger.debug( + f"Stored config under: {(Path(self.run_conf['logging']['log_dir']) / 'config.yaml').resolve()}" + ) + + @staticmethod + def extract_sweep_arguments(config: dict) -> Tuple[Union[BaseModel, dict]]: + """Extract sweep argument from the provided dictionary + + The config dictionary must contain a "sweep" entry with the sweep configuration. + The file structure is documented here: ./base_ml/base_validator.py + We follow the official sweep guidlines of WandB + Example Sweep files are provided in the ./configs/examples folder + + Args: + config (dict): Dictionary with all configurations + + Raises: + KeyError: Missing Sweep Keys + + Returns: + Tuple[Union[BaseModel, dict]]: Sweep arguments + """ + # validate sweep settings + if "sweep" not in config: + raise KeyError("No Sweep configuration provided") + sweep_schema.validate(config["sweep"]) + + sweep_conf = config["sweep"] + + # load parameters + flattened_dict = flatten_dict(config, sep=".") + filtered_dict = { + k: v for k, v in flattened_dict.items() if "parameters" in k.split(".") + } + parameters = remove_parameter_tag(filtered_dict, sep=".") + + sweep_conf["parameters"] = parameters + + return sweep_conf + + def overwrite_sweep_values(self, run_conf: dict, sweep_run_conf: dict) -> None: + """Overwrite run_conf file with the sweep values + + For the sweep, sweeping parameters are a flattened dict, with keys beeing specific with '.' separator. + These dictionary with the sweep hyperparameter selection needs to be unflattened (convert '.' into nested dict) + Afterward, keys are insertd in the run_conf dictionary + + Args: + run_conf (dict): Current dictionary without sweep selected parameters + sweep_run_conf (dict): Dictionary with the sweep config + """ + flattened_run_conf = flatten_dict(run_conf, sep=".") + filtered_dict = { + k: v + for k, v in flattened_run_conf.items() + if "parameters" not in k.split(".") + } + run_parameters = {**filtered_dict, **sweep_run_conf} + run_parameters = unflatten_dict(run_parameters, ".") + + self.run_conf = run_parameters + + @staticmethod + def seed_run(seed: int) -> None: + """Seed the experiment + + Args: + seed (int): Seed + """ + # seeding + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + os.environ["PYTHONHASHSEED"] = str(seed) + np.random.seed(seed) + random.seed(seed) + from packaging.version import parse, Version + + try: + import tensorflow as tf + except ImportError: + pass + else: + if parse(tf.__version__) >= Version("2.0.0"): + tf.random.set_seed(seed) + elif parse(tf.__version__) <= Version("1.13.2"): + tf.set_random_seed(seed) + else: + tf.compat.v1.set_random_seed(seed) + + @staticmethod + def seed_worker(worker_id) -> None: + """Seed a worker + + Args: + worker_id (_type_): Worker ID + """ + worker_seed = torch.initial_seed() % 2**32 + torch.manual_seed(worker_seed) + torch.cuda.manual_seed_all(worker_seed) + np.random.seed(worker_seed) + random.seed(worker_seed) + + def close_remaining_logger(self) -> None: + """Close all remaining loggers""" + logger = logging.getLogger("__main__") + for handler in logger.handlers: + logger.removeHandler(handler) + handler.close() + logger.handlers.clear() + logging.shutdown() diff --git a/base_ml/base_loss.py b/base_ml/base_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..193d62f8c23ca19dee44c9da9c01020f2204588c --- /dev/null +++ b/base_ml/base_loss.py @@ -0,0 +1,1171 @@ +# -*- coding: utf-8 -*- +# Loss functions (PyTorch and own defined) +# +# Own defined loss functions: +# xentropy_loss, dice_loss, mse_loss and msge_loss (https://github.com/vqdang/hover_net) +# WeightedBaseLoss, MAEWeighted, MSEWeighted, BCEWeighted, CEWeighted (https://github.com/okunator/cellseg_models.pytorch) +# @ Fabian Hörst, fabian.hoerst@uk-essen.de +# Institute for Artifical Intelligence in Medicine, +# University Medicine Essen + + +import torch +import torch.nn.functional as F +from typing import List, Tuple +from torch import nn +from torch.nn.modules.loss import _Loss +from base_ml.base_utils import filter2D, gaussian_kernel2d + + +class XentropyLoss(_Loss): + """Cross entropy loss""" + + def __init__(self, reduction: str = "mean") -> None: + super().__init__(size_average=None, reduce=None, reduction=reduction) + + def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: + """Assumes NCHW shape of array, must be torch.float32 dtype + + Args: + input (torch.Tensor): Ground truth array with shape (N, C, H, W) with N being the batch-size, H the height, W the width and C the number of classes + target (torch.Tensor): Prediction array with shape (N, C, H, W) with N being the batch-size, H the height, W the width and C the number of classes + + Returns: + torch.Tensor: Cross entropy loss, with shape () [scalar], grad_fn = MeanBackward0 + """ + # reshape + input = input.permute(0, 2, 3, 1) + target = target.permute(0, 2, 3, 1) + + epsilon = 10e-8 + # scale preds so that the class probs of each sample sum to 1 + pred = input / torch.sum(input, -1, keepdim=True) + # manual computation of crossentropy + pred = torch.clamp(pred, epsilon, 1.0 - epsilon) + loss = -torch.sum((target * torch.log(pred)), -1, keepdim=True) + loss = loss.mean() if self.reduction == "mean" else loss.sum() + + return loss + + +class DiceLoss(_Loss): + """Dice loss + + Args: + smooth (float, optional): Smoothing value. Defaults to 1e-3. + """ + + def __init__(self, smooth: float = 1e-3) -> None: + super().__init__(size_average=None, reduce=None, reduction="mean") + self.smooth = smooth + + def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: + """Assumes NCHW shape of array, must be torch.float32 dtype + + `pred` and `true` must be of torch.float32. Assuming of shape NxHxWxC. + + Args: + input (torch.Tensor): Prediction array with shape (N, C, H, W) with N being the batch-size, H the height, W the width and C the number of classes + target (torch.Tensor): Ground truth array with shape (N, C, H, W) with N being the batch-size, H the height, W the width and C the number of classes + + Returns: + torch.Tensor: Dice loss, with shape () [scalar], grad_fn=SumBackward0 + """ + input = input.permute(0, 2, 3, 1) + target = target.permute(0, 2, 3, 1) + inse = torch.sum(input * target, (0, 1, 2)) + l = torch.sum(input, (0, 1, 2)) + r = torch.sum(target, (0, 1, 2)) + loss = 1.0 - (2.0 * inse + self.smooth) / (l + r + self.smooth) + loss = torch.sum(loss) + + return loss + + +class MSELossMaps(_Loss): + """Calculate mean squared error loss for combined horizontal and vertical maps of segmentation tasks.""" + + def __init__(self) -> None: + super().__init__(size_average=None, reduce=None, reduction="mean") + + def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: + """Loss calculation + + Args: + input (torch.Tensor): Prediction of combined horizontal and vertical maps + with shape (N, 2, H, W), channel 0 is vertical and channel 1 is horizontal + target (torch.Tensor): Ground truth of combined horizontal and vertical maps + with shape (N, 2, H, W), channel 0 is vertical and channel 1 is horizontal + + Returns: + torch.Tensor: Mean squared error per pixel with shape (N, 2, H, W), grad_fn=SubBackward0 + + """ + # reshape + loss = input - target + loss = (loss * loss).mean() + return loss + + +class MSGELossMaps(_Loss): + def __init__(self) -> None: + super().__init__(size_average=None, reduce=None, reduction="mean") + + def get_sobel_kernel( + self, size: int, device: str + ) -> Tuple[torch.Tensor, torch.Tensor]: + """Get sobel kernel with a given size. + + Args: + size (int): Kernel site + device (str): Cuda device + + Returns: + Tuple[torch.Tensor, torch.Tensor]: Horizontal and vertical sobel kernel, each with shape (size, size) + """ + assert size % 2 == 1, "Must be odd, get size=%d" % size + + h_range = torch.arange( + -size // 2 + 1, + size // 2 + 1, + dtype=torch.float32, + device=device, + requires_grad=False, + ) + v_range = torch.arange( + -size // 2 + 1, + size // 2 + 1, + dtype=torch.float32, + device=device, + requires_grad=False, + ) + h, v = torch.meshgrid(h_range, v_range, indexing="ij") + kernel_h = h / (h * h + v * v + 1.0e-15) + kernel_v = v / (h * h + v * v + 1.0e-15) + return kernel_h, kernel_v + + def get_gradient_hv(self, hv: torch.Tensor, device: str) -> torch.Tensor: + """For calculating gradient of horizontal and vertical prediction map + + + Args: + hv (torch.Tensor): horizontal and vertical map + device (str): CUDA device + + Returns: + torch.Tensor: Gradient with same shape as input + """ + kernel_h, kernel_v = self.get_sobel_kernel(5, device=device) + kernel_h = kernel_h.view(1, 1, 5, 5) # constant + kernel_v = kernel_v.view(1, 1, 5, 5) # constant + + h_ch = hv[..., 0].unsqueeze(1) # Nx1xHxW + v_ch = hv[..., 1].unsqueeze(1) # Nx1xHxW + + # can only apply in NCHW mode + h_dh_ch = F.conv2d(h_ch, kernel_h, padding=2) + v_dv_ch = F.conv2d(v_ch, kernel_v, padding=2) + dhv = torch.cat([h_dh_ch, v_dv_ch], dim=1) + dhv = dhv.permute(0, 2, 3, 1).contiguous() # to NHWC + return dhv + + def forward( + self, + input: torch.Tensor, + target: torch.Tensor, + focus: torch.Tensor, + device: str, + ) -> torch.Tensor: + """MSGE (Gradient of MSE) loss + + Args: + input (torch.Tensor): Input with shape (B, C, H, W) + target (torch.Tensor): Target with shape (B, C, H, W) + focus (torch.Tensor): Focus, type of masking (B, C, W, W) + device (str): CUDA device to work with. + + Returns: + torch.Tensor: MSGE loss + """ + input = input.permute(0, 2, 3, 1) + target = target.permute(0, 2, 3, 1) + focus = focus.permute(0, 2, 3, 1) + focus = focus[..., 1] + + focus = (focus[..., None]).float() # assume input NHW + focus = torch.cat([focus, focus], axis=-1).to(device) + true_grad = self.get_gradient_hv(target, device) + pred_grad = self.get_gradient_hv(input, device) + loss = pred_grad - true_grad + loss = focus * (loss * loss) + # artificial reduce_mean with focused region + loss = loss.sum() / (focus.sum() + 1.0e-8) + return loss + + +class FocalTverskyLoss(nn.Module): + """FocalTverskyLoss + + PyTorch implementation of the Focal Tversky Loss Function for multiple classes + doi: 10.1109/ISBI.2019.8759329 + Abraham, N., & Khan, N. M. (2019). + A Novel Focal Tversky Loss Function With Improved Attention U-Net for Lesion Segmentation. + In International Symposium on Biomedical Imaging. https://doi.org/10.1109/isbi.2019.8759329 + + @ Fabian Hörst, fabian.hoerst@uk-essen.de + Institute for Artifical Intelligence in Medicine, + University Medicine Essen + + Args: + alpha_t (float, optional): Alpha parameter for tversky loss (multiplied with false-negatives). Defaults to 0.7. + beta_t (float, optional): Beta parameter for tversky loss (multiplied with false-positives). Defaults to 0.3. + gamma_f (float, optional): Gamma Focal parameter. Defaults to 4/3. + smooth (float, optional): Smooting factor. Defaults to 0.000001. + """ + + def __init__( + self, + alpha_t: float = 0.7, + beta_t: float = 0.3, + gamma_f: float = 4 / 3, + smooth: float = 1e-6, + ) -> None: + super().__init__() + self.alpha_t = alpha_t + self.beta_t = beta_t + self.gamma_f = gamma_f + self.smooth = smooth + self.num_classes = 2 + + def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: + """Loss calculation + + Args: + input (torch.Tensor): Predictions, logits (without Softmax). Shape: (B, C, H, W) + target (torch.Tensor): Targets, either flattened (Shape: (C, H, W) or as one-hot encoded (Shape: (batch-size, C, H, W)). + + Raises: + ValueError: Error if there is a shape missmatch + + Returns: + torch.Tensor: FocalTverskyLoss (weighted) + """ + input = input.permute(0, 2, 3, 1) + if input.shape[-1] != self.num_classes: + raise ValueError( + "Predictions must be a logit tensor with the last dimension shape beeing equal to the number of classes" + ) + if len(target.shape) != len(input.shape): + # convert the targets to onehot + target = F.one_hot(target, num_classes=self.num_classes) + + # flatten + target = target.permute(0, 2, 3, 1) + target = target.view(-1) + input = torch.softmax(input, dim=-1).view(-1) + + # calculate true positives, false positives and false negatives + tp = (input * target).sum() + fp = ((1 - target) * input).sum() + fn = (target * (1 - input)).sum() + + Tversky = (tp + self.smooth) / ( + tp + self.alpha_t * fn + self.beta_t * fp + self.smooth + ) + FocalTversky = (1 - Tversky) ** self.gamma_f + + return FocalTversky + + +class MCFocalTverskyLoss(FocalTverskyLoss): + """Multiclass FocalTverskyLoss + + PyTorch implementation of the Focal Tversky Loss Function for multiple classes + doi: 10.1109/ISBI.2019.8759329 + Abraham, N., & Khan, N. M. (2019). + A Novel Focal Tversky Loss Function With Improved Attention U-Net for Lesion Segmentation. + In International Symposium on Biomedical Imaging. https://doi.org/10.1109/isbi.2019.8759329 + + @ Fabian Hörst, fabian.hoerst@uk-essen.de + Institute for Artifical Intelligence in Medicine, + University Medicine Essen + + Args: + alpha_t (float, optional): Alpha parameter for tversky loss (multiplied with false-negatives). Defaults to 0.7. + beta_t (float, optional): Beta parameter for tversky loss (multiplied with false-positives). Defaults to 0.3. + gamma_f (float, optional): Gamma Focal parameter. Defaults to 4/3. + smooth (float, optional): Smooting factor. Defaults to 0.000001. + num_classes (int, optional): Number of output classes. For binary segmentation, prefer FocalTverskyLoss (speed optimized). Defaults to 2. + class_weights (List[int], optional): Weights for each class. If not provided, equal weight. Length must be equal to num_classes. Defaults to None. + """ + + def __init__( + self, + alpha_t: float = 0.7, + beta_t: float = 0.3, + gamma_f: float = 4 / 3, + smooth: float = 0.000001, + num_classes: int = 2, + class_weights: List[int] = None, + ) -> None: + super().__init__(alpha_t, beta_t, gamma_f, smooth) + self.num_classes = num_classes + if class_weights is None: + self.class_weights = [1 for i in range(self.num_classes)] + else: + assert ( + len(class_weights) == self.num_classes + ), "Please provide matching weights" + self.class_weights = class_weights + self.class_weights = torch.Tensor(self.class_weights) + + def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: + """Loss calculation + + Args: + input (torch.Tensor): Predictions, logits (without Softmax). Shape: (B, num_classes, H, W) + target (torch.Tensor): Targets, either flattened (Shape: (B, H, W) or as one-hot encoded (Shape: (B, num_classes, H, W)). + + Raises: + ValueError: Error if there is a shape missmatch + + Returns: + torch.Tensor: FocalTverskyLoss (weighted) + """ + input = input.permute(0, 2, 3, 1) + if input.shape[-1] != self.num_classes: + raise ValueError( + "Predictions must be a logit tensor with the last dimension shape beeing equal to the number of classes" + ) + if len(target.shape) != len(input.shape): + # convert the targets to onehot + target = F.one_hot(target, num_classes=self.num_classes) + + target = target.permute(0, 2, 3, 1) + # Softmax + input = torch.softmax(input, dim=-1) + + # Reshape + input = torch.permute(input, (3, 1, 2, 0)) + target = torch.permute(target, (3, 1, 2, 0)) + + input = torch.flatten(input, start_dim=1) + target = torch.flatten(target, start_dim=1) + + tp = torch.sum(input * target, 1) + fp = torch.sum((1 - target) * input, 1) + fn = torch.sum(target * (1 - input), 1) + + Tversky = (tp + self.smooth) / ( + tp + self.alpha_t * fn + self.beta_t * fp + self.smooth + ) + FocalTversky = (1 - Tversky) ** self.gamma_f + + self.class_weights = self.class_weights.to(FocalTversky.device) + return torch.sum(self.class_weights * FocalTversky) + + +class WeightedBaseLoss(nn.Module): + """Init a base class for weighted cross entropy based losses. + + Enables weighting for object instance edges and classes. + + Adapted/Copied from: https://github.com/okunator/cellseg_models.pytorch (10.5281/zenodo.7064617) + + Args: + apply_sd (bool, optional): If True, Spectral decoupling regularization will be applied to the + loss matrix. Defaults to False. + apply_ls (bool, optional): If True, Label smoothing will be applied to the target.. Defaults to False. + apply_svls (bool, optional): If True, spatially varying label smoothing will be applied to the target. Defaults to False. + apply_mask (bool, optional): If True, a mask will be applied to the loss matrix. Mask shape: (B, H, W). Defaults to False. + class_weights (torch.Tensor, optional): Class weights. A tensor of shape (C, ). Defaults to None. + edge_weight (float, optional): Weight for the object instance border pixels. Defaults to None. + """ + + def __init__( + self, + apply_sd: bool = False, + apply_ls: bool = False, + apply_svls: bool = False, + apply_mask: bool = False, + class_weights: torch.Tensor = None, + edge_weight: float = None, + **kwargs, + ) -> None: + super().__init__() + self.apply_sd = apply_sd + self.apply_ls = apply_ls + self.apply_svls = apply_svls + self.apply_mask = apply_mask + self.class_weights = class_weights + self.edge_weight = edge_weight + + def apply_spectral_decouple( + self, loss_matrix: torch.Tensor, yhat: torch.Tensor, lam: float = 0.01 + ) -> torch.Tensor: + """Apply spectral decoupling L2 norm after the loss. + + https://arxiv.org/abs/2011.09468 + + Args: + loss_matrix (torch.Tensor): Pixelwise losses. A tensor of shape (B, H, W). + yhat (torch.Tensor): The pixel predictions of the model. Shape (B, C, H, W). + lam (float, optional): Lambda constant.. Defaults to 0.01. + + Returns: + torch.Tensor: SD-regularized loss matrix. Same shape as input. + """ + return loss_matrix + (lam / 2) * (yhat**2).mean(axis=1) + + def apply_ls_to_target( + self, + target: torch.Tensor, + num_classes: int, + label_smoothing: float = 0.1, + ) -> torch.Tensor: + """_summary_ + + Args: + target (torch.Tensor): Number of classes in the data. + num_classes (int): The target one hot tensor. Shape (B, C, H, W) + label_smoothing (float, optional): The smoothing coeff alpha. Defaults to 0.1. + + Returns: + torch.Tensor: Label smoothed target. Same shape as input. + """ + return target * (1 - label_smoothing) + label_smoothing / num_classes + + def apply_svls_to_target( + self, + target: torch.Tensor, + num_classes: int, + kernel_size: int = 5, + sigma: int = 3, + **kwargs, + ) -> torch.Tensor: + """Apply spatially varying label smoothihng to target map. + + https://arxiv.org/abs/2104.05788 + + Args: + target (torch.Tensor): The target one hot tensor. Shape (B, C, H, W). + num_classes (int): Number of classes in the data. + kernel_size (int, optional): Size of a square kernel.. Defaults to 5. + sigma (int, optional): The std of the gaussian. Defaults to 3. + + Returns: + torch.Tensor: Label smoothed target. Same shape as input. + """ + my, mx = kernel_size // 2, kernel_size // 2 + gaussian_kernel = gaussian_kernel2d( + kernel_size, sigma, num_classes, device=target.device + ) + neighborsum = (1 - gaussian_kernel[..., my, mx]) + 1e-16 + gaussian_kernel = gaussian_kernel.clone() + gaussian_kernel[..., my, mx] = neighborsum + svls_kernel = gaussian_kernel / neighborsum[0] + + return filter2D(target.float(), svls_kernel) / svls_kernel[0].sum() + + def apply_class_weights( + self, loss_matrix: torch.Tensor, target: torch.Tensor + ) -> torch.Tensor: + """Multiply pixelwise loss matrix by the class weights. + + NOTE: No normalization + + Args: + loss_matrix (torch.Tensor): Pixelwise losses. A tensor of shape (B, H, W). + target (torch.Tensor): The target mask. Shape (B, H, W). + + Returns: + torch.Tensor: The loss matrix scaled with the weight matrix. Shape (B, H, W). + """ + weight_mat = self.class_weights[target.long()].to(target.device) # to (B, H, W) + loss = loss_matrix * weight_mat + + return loss + + def apply_edge_weights( + self, loss_matrix: torch.Tensor, weight_map: torch.Tensor + ) -> torch.Tensor: + """Apply weights to the object boundaries. + + Basically just computes `edge_weight`**`weight_map`. + + Args: + loss_matrix (torch.Tensor): Pixelwise losses. A tensor of shape (B, H, W). + weight_map (torch.Tensor): Map that points to the pixels that will be weighted. Shape (B, H, W). + + Returns: + torch.Tensor: The loss matrix scaled with the nuclear boundary weights. Shape (B, H, W). + """ + return loss_matrix * self.edge_weight**weight_map + + def apply_mask_weight( + self, loss_matrix: torch.Tensor, mask: torch.Tensor, norm: bool = True + ) -> torch.Tensor: + """Apply a mask to the loss matrix. + + Args: + loss_matrix (torch.Tensor): Pixelwise losses. A tensor of shape (B, H, W). + mask (torch.Tensor): The mask. Shape (B, H, W). + norm (bool, optional): If True, the loss matrix will be normalized by the mean of the mask. Defaults to True. + + Returns: + torch.Tensor: The loss matrix scaled with the mask. Shape (B, H, W). + """ + loss_matrix *= mask + if norm: + norm_mask = torch.mean(mask.float()) + 1e-7 + loss_matrix /= norm_mask + + return loss_matrix + + def extra_repr(self) -> str: + """Add info to print.""" + s = "apply_sd={apply_sd}, apply_ls={apply_ls}, apply_svls={apply_svls}, apply_mask={apply_mask}, class_weights={class_weights}, edge_weight={edge_weight}" # noqa + return s.format(**self.__dict__) + + +class MAEWeighted(WeightedBaseLoss): + """Compute the MAE loss. Used in the stardist method. + + Stardist: + https://arxiv.org/pdf/1806.03535.pdf + Adapted/Copied from: https://github.com/okunator/cellseg_models.pytorch (10.5281/zenodo.7064617) + + NOTE: We have added the option to apply spectral decoupling and edge weights + to the loss matrix. + + Args: + alpha (float, optional): Weight regulizer b/w [0,1]. In stardist repo, this is the + 'train_background_reg' parameter. Defaults to 1e-4. + apply_sd (bool, optional): If True, Spectral decoupling regularization will be applied to the + loss matrix. Defaults to False. + apply_mask (bool, optional): f True, a mask will be applied to the loss matrix. Mask shape: (B, H, W). Defaults to False. + edge_weight (float, optional): Weight that is added to object borders. Defaults to None. + """ + + def __init__( + self, + alpha: float = 1e-4, + apply_sd: bool = False, + apply_mask: bool = False, + edge_weight: float = None, + **kwargs, + ) -> None: + super().__init__(apply_sd, False, False, apply_mask, False, edge_weight) + self.alpha = alpha + self.eps = 1e-7 + + def forward( + self, + input: torch.Tensor, + target: torch.Tensor, + target_weight: torch.Tensor = None, + mask: torch.Tensor = None, + **kwargs, + ) -> torch.Tensor: + """Compute the masked MAE loss. + + Args: + input (torch.Tensor): The prediction map. Shape (B, C, H, W). + target (torch.Tensor): The ground truth annotations. Shape (B, H, W). + target_weight (torch.Tensor, optional): The edge weight map. Shape (B, H, W). Defaults to None. + mask (torch.Tensor, optional): The mask map. Shape (B, H, W). Defaults to None. + + Raises: + ValueError: Pred and target shapes must match. + + Returns: + torch.Tensor: Computed MAE loss (scalar). + """ + yhat = input + n_classes = yhat.shape[1] + if target.size() != yhat.size(): + target = target.unsqueeze(1).repeat_interleave(n_classes, dim=1) + + if not yhat.shape == target.shape: + raise ValueError( + f"Pred and target shapes must match. Got: {yhat.shape}, {target.shape}" + ) + + # compute the MAE loss with alpha as weight + mae_loss = torch.mean(torch.abs(target - yhat), axis=1) # (B, H, W) + + if self.apply_mask and mask is not None: + mae_loss = self.apply_mask_weight(mae_loss, mask, norm=True) # (B, H, W) + + # add the background regularization + if self.alpha > 0: + reg = torch.mean(((1 - mask).unsqueeze(1)) * torch.abs(yhat), axis=1) + mae_loss += self.alpha * reg + + if self.apply_sd: + mae_loss = self.apply_spectral_decouple(mae_loss, yhat) + + if self.edge_weight is not None: + mae_loss = self.apply_edge_weights(mae_loss, target_weight) + + return mae_loss.mean() + + +class MSEWeighted(WeightedBaseLoss): + """MSE-loss. + + Args: + apply_sd (bool, optional): If True, Spectral decoupling regularization will be applied to the + loss matrix. Defaults to False. + apply_ls (bool, optional): If True, Label smoothing will be applied to the target. Defaults to False. + apply_svls (bool, optional): If True, spatially varying label smoothing will be applied to the target. Defaults to False. + apply_mask (bool, optional): If True, a mask will be applied to the loss matrix. Mask shape: (B, H, W). Defaults to False. + edge_weight (float, optional): Weight that is added to object borders. Defaults to None. + class_weights (torch.Tensor, optional): Class weights. A tensor of shape (n_classes,). Defaults to None. + """ + + def __init__( + self, + apply_sd: bool = False, + apply_ls: bool = False, + apply_svls: bool = False, + apply_mask: bool = False, + edge_weight: float = None, + class_weights: torch.Tensor = None, + **kwargs, + ) -> None: + super().__init__( + apply_sd, apply_ls, apply_svls, apply_mask, class_weights, edge_weight + ) + + @staticmethod + def tensor_one_hot(type_map: torch.Tensor, n_classes: int) -> torch.Tensor: + """Convert a segmentation mask into one-hot-format. + + I.e. Takes in a segmentation mask of shape (B, H, W) and reshapes it + into a tensor of shape (B, C, H, W). + + Args: + type_map (torch.Tensor): Multi-label Segmentation mask. Shape (B, H, W). + n_classes (int): Number of classes. (Zero-class included.) + + Raises: + TypeError: Input `type_map` should have dtype: torch.int64. + + Returns: + torch.Tensor: A one hot tensor. Shape: (B, C, H, W). Dtype: torch.FloatTensor. + """ + if not type_map.dtype == torch.int64: + raise TypeError( + f""" + Input `type_map` should have dtype: torch.int64. Got: {type_map.dtype}.""" + ) + + one_hot = torch.zeros( + type_map.shape[0], + n_classes, + *type_map.shape[1:], + device=type_map.device, + dtype=type_map.dtype, + ) + + return one_hot.scatter_(dim=1, index=type_map.unsqueeze(1), value=1.0) + 1e-7 + + def forward( + self, + input: torch.Tensor, + target: torch.Tensor, + target_weight: torch.Tensor = None, + mask: torch.Tensor = None, + **kwargs, + ) -> torch.Tensor: + """Compute the MSE-loss. + + Args: + input (torch.Tensor): The prediction map. Shape (B, C, H, W, C). + target (torch.Tensor): The ground truth annotations. Shape (B, H, W). + target_weight (torch.Tensor, optional): The edge weight map. Shape (B, H, W). Defaults to None. + mask (torch.Tensor, optional): The mask map. Shape (B, H, W). Defaults to None. + + Returns: + torch.Tensor: Computed MSE loss (scalar). + """ + yhat = input + target_one_hot = target + num_classes = yhat.shape[1] + + if target.size() != yhat.size(): + if target.dtype == torch.float32: + target_one_hot = target.unsqueeze(1) + else: + target_one_hot = MSEWeighted.tensor_one_hot(target, num_classes) + + if self.apply_svls: + target_one_hot = self.apply_svls_to_target( + target_one_hot, num_classes, **kwargs + ) + + if self.apply_ls: + target_one_hot = self.apply_ls_to_target( + target_one_hot, num_classes, **kwargs + ) + + mse = F.mse_loss(yhat, target_one_hot, reduction="none") # (B, C, H, W) + mse = torch.mean(mse, dim=1) # to (B, H, W) + + if self.apply_mask and mask is not None: + mse = self.apply_mask_weight(mse, mask, norm=False) # (B, H, W) + + if self.apply_sd: + mse = self.apply_spectral_decouple(mse, yhat) + + if self.class_weights is not None: + mse = self.apply_class_weights(mse, target) + + if self.edge_weight is not None: + mse = self.apply_edge_weights(mse, target_weight) + + return torch.mean(mse) + + +class BCEWeighted(WeightedBaseLoss): + def __init__( + self, + apply_sd: bool = False, + apply_ls: bool = False, + apply_svls: bool = False, + apply_mask: bool = False, + edge_weight: float = None, + class_weights: torch.Tensor = None, + **kwargs, + ) -> None: + """Binary cross entropy loss with weighting and other tricks. + + Parameters + ---------- + apply_sd : bool, default=False + If True, Spectral decoupling regularization will be applied to the + loss matrix. + apply_ls : bool, default=False + If True, Label smoothing will be applied to the target. + apply_svls : bool, default=False + If True, spatially varying label smoothing will be applied to the target + apply_mask : bool, default=False + If True, a mask will be applied to the loss matrix. Mask shape: (B, H, W) + edge_weight : float, default=None + Weight that is added to object borders. + class_weights : torch.Tensor, default=None + Class weights. A tensor of shape (n_classes,). + """ + super().__init__( + apply_sd, apply_ls, apply_svls, apply_mask, class_weights, edge_weight + ) + self.eps = 1e-8 + + def forward( + self, + input: torch.Tensor, + target: torch.Tensor, + target_weight: torch.Tensor = None, + mask: torch.Tensor = None, + **kwargs, + ) -> torch.Tensor: + """Compute binary cross entropy loss. + + Parameters + ---------- + yhat : torch.Tensor + The prediction map. Shape (B, C, H, W). + target : torch.Tensor + the ground truth annotations. Shape (B, H, W). + target_weight : torch.Tensor, default=None + The edge weight map. Shape (B, H, W). + mask : torch.Tensor, default=None + The mask map. Shape (B, H, W). + + Returns + ------- + torch.Tensor: + Computed BCE loss (scalar). + """ + # Logits input + yhat = input + num_classes = yhat.shape[1] + yhat = torch.clip(yhat, self.eps, 1.0 - self.eps) + + if target.size() != yhat.size(): + target = target.unsqueeze(1).repeat_interleave(num_classes, dim=1) + + if self.apply_svls: + target = self.apply_svls_to_target(target, num_classes, **kwargs) + + if self.apply_ls: + target = self.apply_ls_to_target(target, num_classes, **kwargs) + + bce = F.binary_cross_entropy_with_logits( + yhat.float(), target.float(), reduction="none" + ) # (B, C, H, W) + bce = torch.mean(bce, dim=1) # (B, H, W) + + if self.apply_mask and mask is not None: + bce = self.apply_mask_weight(bce, mask, norm=False) # (B, H, W) + + if self.apply_sd: + bce = self.apply_spectral_decouple(bce, yhat) + + if self.class_weights is not None: + bce = self.apply_class_weights(bce, target) + + if self.edge_weight is not None: + bce = self.apply_edge_weights(bce, target_weight) + + return torch.mean(bce) + + +# class BCEWeighted(WeightedBaseLoss): +# """Binary cross entropy loss with weighting and other tricks. +# Adapted/Copied from: https://github.com/okunator/cellseg_models.pytorch (10.5281/zenodo.7064617) + +# Args: +# apply_sd (bool, optional): If True, Spectral decoupling regularization will be applied to the +# loss matrix. Defaults to False. +# apply_ls (bool, optional): If True, Label smoothing will be applied to the target. Defaults to False. +# apply_svls (bool, optional): If True, spatially varying label smoothing will be applied to the target. Defaults to False. +# apply_mask (bool, optional): If True, a mask will be applied to the loss matrix. Mask shape: (B, H, W). Defaults to False. +# edge_weight (float, optional): Weight that is added to object borders. Defaults to None. +# class_weights (torch.Tensor, optional): Class weights. A tensor of shape (n_classes,). Defaults to None. +# """ + +# def __init__( +# self, +# apply_sd: bool = False, +# apply_ls: bool = False, +# apply_svls: bool = False, +# apply_mask: bool = False, +# edge_weight: float = None, +# class_weights: torch.Tensor = None, +# **kwargs, +# ) -> None: +# super().__init__( +# apply_sd, apply_ls, apply_svls, apply_mask, class_weights, edge_weight +# ) +# self.eps = 1e-8 + +# def forward( +# self, +# input: torch.Tensor, +# target: torch.Tensor, +# target_weight: torch.Tensor = None, +# mask: torch.Tensor = None, +# **kwargs, +# ) -> torch.Tensor: +# """Compute binary cross entropy loss. + +# Args: +# input (torch.Tensor): The prediction map. We internally convert back via logit function. Shape (B, C, H, W). +# target (torch.Tensor): the ground truth annotations. Shape (B, H, W). +# target_weight (torch.Tensor, optional): The edge weight map. Shape (B, H, W). Defaults to None. +# mask (torch.Tensor, optional): The mask map. Shape (B, H, W). Defaults to None. + +# Returns: +# torch.Tensor: Computed BCE loss (scalar). +# """ +# yhat = input +# yhat = torch.special.logit(yhat) +# num_classes = yhat.shape[1] +# yhat = torch.clip(yhat, self.eps, 1.0 - self.eps) + +# if target.size() != yhat.size(): +# target = target.unsqueeze(1).repeat_interleave(num_classes, dim=1) + +# if self.apply_svls: +# target = self.apply_svls_to_target(target, num_classes, **kwargs) + +# if self.apply_ls: +# target = self.apply_ls_to_target(target, num_classes, **kwargs) + +# bce = F.binary_cross_entropy_with_logits( +# yhat.float(), target.float(), reduction="none" +# ) # (B, C, H, W) +# bce = torch.mean(bce, dim=1) # (B, H, W) + +# if self.apply_mask and mask is not None: +# bce = self.apply_mask_weight(bce, mask, norm=False) # (B, H, W) + +# if self.apply_sd: +# bce = self.apply_spectral_decouple(bce, yhat) + +# if self.class_weights is not None: +# bce = self.apply_class_weights(bce, target) + +# if self.edge_weight is not None: +# bce = self.apply_edge_weights(bce, target_weight) + +# return torch.mean(bce) + + +class CEWeighted(WeightedBaseLoss): + def __init__( + self, + apply_sd: bool = False, + apply_ls: bool = False, + apply_svls: bool = False, + apply_mask: bool = False, + edge_weight: float = None, + class_weights: torch.Tensor = None, + **kwargs, + ) -> None: + """Cross-Entropy loss with weighting. + + Parameters + ---------- + apply_sd : bool, default=False + If True, Spectral decoupling regularization will be applied to the + loss matrix. + apply_ls : bool, default=False + If True, Label smoothing will be applied to the target. + apply_svls : bool, default=False + If True, spatially varying label smoothing will be applied to the target + apply_mask : bool, default=False + If True, a mask will be applied to the loss matrix. Mask shape: (B, H, W) + edge_weight : float, default=None + Weight that is added to object borders. + class_weights : torch.Tensor, default=None + Class weights. A tensor of shape (n_classes,). + """ + super().__init__( + apply_sd, apply_ls, apply_svls, apply_mask, class_weights, edge_weight + ) + self.eps = 1e-8 + + def forward( + self, + input: torch.Tensor, + target: torch.Tensor, + target_weight: torch.Tensor = None, + mask: torch.Tensor = None, + **kwargs, + ) -> torch.Tensor: + """Compute the cross entropy loss. + + Parameters + ---------- + yhat : torch.Tensor + The prediction map. Shape (B, C, H, W). + target : torch.Tensor + the ground truth annotations. Shape (B, H, W). + target_weight : torch.Tensor, default=None + The edge weight map. Shape (B, H, W). + mask : torch.Tensor, default=None + The mask map. Shape (B, H, W). + + Returns + ------- + torch.Tensor: + Computed CE loss (scalar). + """ + yhat = input # TODO: remove doubled Softmax -> this function needs logits instead of softmax output + input_soft = F.softmax(yhat, dim=1) + self.eps # (B, C, H, W) + num_classes = yhat.shape[1] + if len(target.shape) != len(yhat.shape) and target.shape[1] != num_classes: + target_one_hot = MSEWeighted.tensor_one_hot( + target, num_classes + ) # (B, C, H, W) + else: + target_one_hot = target + target = torch.argmax(target, dim=1) + assert target_one_hot.shape == yhat.shape + + if self.apply_svls: + target_one_hot = self.apply_svls_to_target( + target_one_hot, num_classes, **kwargs + ) + + if self.apply_ls: + target_one_hot = self.apply_ls_to_target( + target_one_hot, num_classes, **kwargs + ) + + loss = -torch.sum(target_one_hot * torch.log(input_soft), dim=1) # (B, H, W) + + if self.apply_mask and mask is not None: + loss = self.apply_mask_weight(loss, mask, norm=False) # (B, H, W) + + if self.apply_sd: + loss = self.apply_spectral_decouple(loss, yhat) + + if self.class_weights is not None: + loss = self.apply_class_weights(loss, target) + + if self.edge_weight is not None: + loss = self.apply_edge_weights(loss, target_weight) + + return loss.mean() + + +# class CEWeighted(WeightedBaseLoss): +# """Cross-Entropy loss with weighting. +# Adapted/Copied from: https://github.com/okunator/cellseg_models.pytorch (10.5281/zenodo.7064617) + +# Args: +# apply_sd (bool, optional): If True, Spectral decoupling regularization will be applied to the loss matrix. Defaults to False. +# apply_ls (bool, optional): If True, Label smoothing will be applied to the target. Defaults to False. +# apply_svls (bool, optional): If True, spatially varying label smoothing will be applied to the target. Defaults to False. +# apply_mask (bool, optional): If True, a mask will be applied to the loss matrix. Mask shape: (B, H, W). Defaults to False. +# edge_weight (float, optional): Weight that is added to object borders. Defaults to None. +# class_weights (torch.Tensor, optional): Class weights. A tensor of shape (n_classes,). Defaults to None. +# logits (bool, optional): If work on logit values. Defaults to False. Defaults to False. +# """ + +# def __init__( +# self, +# apply_sd: bool = False, +# apply_ls: bool = False, +# apply_svls: bool = False, +# apply_mask: bool = False, +# edge_weight: float = None, +# class_weights: torch.Tensor = None, +# logits: bool = False, +# **kwargs, +# ) -> None: +# super().__init__( +# apply_sd, apply_ls, apply_svls, apply_mask, class_weights, edge_weight +# ) +# self.eps = 1e-8 +# self.logits = logits + +# def forward( +# self, +# input: torch.Tensor, +# target: torch.Tensor, +# target_weight: torch.Tensor = None, +# mask: torch.Tensor = None, +# **kwargs, +# ) -> torch.Tensor: +# """Compute the cross entropy loss. + +# Args: +# input (torch.Tensor): The prediction map. Shape (B, C, H, W). +# target (torch.Tensor): The ground truth annotations. Shape (B, H, W). +# target_weight (torch.Tensor, optional): The edge weight map. Shape (B, H, W). Defaults to None. +# mask (torch.Tensor, optional): The mask map. Shape (B, H, W). Defaults to None. + +# Returns: +# torch.Tensor: Computed CE loss (scalar). +# """ +# yhat = input +# if self.logits: +# input_soft = ( +# F.softmax(yhat, dim=1) + self.eps +# ) # (B, C, H, W) # check if doubled softmax +# else: +# input_soft = input + +# num_classes = yhat.shape[1] +# if len(target.shape) != len(yhat.shape) and target.shape[1] != num_classes: +# target_one_hot = MSEWeighted.tensor_one_hot( +# target, num_classes +# ) # (B, C, H, W) +# else: +# target_one_hot = target +# target = torch.argmax(target, dim=1) +# assert target_one_hot.shape == yhat.shape + +# if self.apply_svls: +# target_one_hot = self.apply_svls_to_target( +# target_one_hot, num_classes, **kwargs +# ) + +# if self.apply_ls: +# target_one_hot = self.apply_ls_to_target( +# target_one_hot, num_classes, **kwargs +# ) + +# loss = -torch.sum(target_one_hot * torch.log(input_soft), dim=1) # (B, H, W) + +# if self.apply_mask and mask is not None: +# loss = self.apply_mask_weight(loss, mask, norm=False) # (B, H, W) + +# if self.apply_sd: +# loss = self.apply_spectral_decouple(loss, yhat) + +# if self.class_weights is not None: +# loss = self.apply_class_weights(loss, target) + +# if self.edge_weight is not None: +# loss = self.apply_edge_weights(loss, target_weight) + +# return loss.mean() + + +### Stardist loss functions +class L1LossWeighted(nn.Module): + def __init__(self) -> None: + super().__init__() + + def forward( + self, + input: torch.Tensor, + target: torch.Tensor, + target_weight: torch.Tensor = None, + ) -> torch.Tensor: + l1loss = F.l1_loss(input, target, size_average=True, reduce=False) + l1loss = torch.mean(l1loss, dim=1) + if target_weight is not None: + l1loss = torch.mean(target_weight * l1loss) + else: + l1loss = torch.mean(l1loss) + return l1loss + + +def retrieve_loss_fn(loss_name: dict, **kwargs) -> _Loss: + """Return the loss function with given name defined in the LOSS_DICT and initialize with kwargs + + kwargs must match with the parameters defined in the initialization method of the selected loss object + + Args: + loss_name (dict): Name of the loss function + + Returns: + _Loss: Loss + """ + loss_fn = LOSS_DICT[loss_name] + loss_fn = loss_fn(**kwargs) + + return loss_fn + + +LOSS_DICT = { + "xentropy_loss": XentropyLoss, + "dice_loss": DiceLoss, + "mse_loss_maps": MSELossMaps, + "msge_loss_maps": MSGELossMaps, + "FocalTverskyLoss": FocalTverskyLoss, + "MCFocalTverskyLoss": MCFocalTverskyLoss, + "CrossEntropyLoss": nn.CrossEntropyLoss, # input logits, targets + "L1Loss": nn.L1Loss, + "MSELoss": nn.MSELoss, + "CTCLoss": nn.CTCLoss, # probability + "NLLLoss": nn.NLLLoss, # log-probabilities of each class + "PoissonNLLLoss": nn.PoissonNLLLoss, + "GaussianNLLLoss": nn.GaussianNLLLoss, + "KLDivLoss": nn.KLDivLoss, # argument input in log-space + "BCELoss": nn.BCELoss, # probabilities + "BCEWithLogitsLoss": nn.BCEWithLogitsLoss, # logits + "MarginRankingLoss": nn.MarginRankingLoss, + "HingeEmbeddingLoss": nn.HingeEmbeddingLoss, + "MultiLabelMarginLoss": nn.MultiLabelMarginLoss, + "HuberLoss": nn.HuberLoss, + "SmoothL1Loss": nn.SmoothL1Loss, + "SoftMarginLoss": nn.SoftMarginLoss, # logits + "MultiLabelSoftMarginLoss": nn.MultiLabelSoftMarginLoss, + "CosineEmbeddingLoss": nn.CosineEmbeddingLoss, + "MultiMarginLoss": nn.MultiMarginLoss, + "TripletMarginLoss": nn.TripletMarginLoss, + "TripletMarginWithDistanceLoss": nn.TripletMarginWithDistanceLoss, + "MAEWeighted": MAEWeighted, + "MSEWeighted": MSEWeighted, + "BCEWeighted": BCEWeighted, # logits + "CEWeighted": CEWeighted, # logits + "L1LossWeighted": L1LossWeighted, +} diff --git a/base_ml/base_optim.py b/base_ml/base_optim.py new file mode 100644 index 0000000000000000000000000000000000000000..653ea62bbd5a082153f5e086b785dcfa913daf33 --- /dev/null +++ b/base_ml/base_optim.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +# Wrappping all available PyTorch Optimizer +# +# @ Fabian Hörst, fabian.hoerst@uk-essen.de +# Institute for Artifical Intelligence in Medicine, +# University Medicine Essen + +from torch.optim import ( + ASGD, + LBFGS, + SGD, + Adadelta, + Adagrad, + Adam, + Adamax, + AdamW, + RAdam, + RMSprop, + Rprop, + SparseAdam, +) + +OPTI_DICT = { + "Adadelta": Adadelta, + "Adagrad": Adagrad, + "Adam": Adam, + "AdamW": AdamW, + "SparseAdam": SparseAdam, + "Adamax": Adamax, + "ASGD": ASGD, + "LBFGS": LBFGS, + "RAdam": RAdam, + "RMSprop": RMSprop, + "Rprop": Rprop, + "SGD": SGD, +} diff --git a/base_ml/base_trainer.py b/base_ml/base_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..e1e5ded9206bf897dce54b1e1049e0e95983f703 --- /dev/null +++ b/base_ml/base_trainer.py @@ -0,0 +1,274 @@ +# -*- coding: utf-8 -*- +# Base Trainer Class +# +# @ Fabian Hörst, fabian.hoerst@uk-essen.de +# Institute for Artifical Intelligence in Medicine, +# University Medicine Essen + +import logging +from abc import abstractmethod +from typing import Tuple, Union + +import torch +import torch.nn as nn +import wandb +from base_ml.base_early_stopping import EarlyStopping +from pathlib import Path +from torch.nn.modules.loss import _Loss +from torch.optim import Optimizer +from torch.optim.lr_scheduler import _LRScheduler +from torch.utils.data import DataLoader +from utils.tools import flatten_dict + + +class BaseTrainer: + """ + Base class for all trainers with important ML components + + Args: + model (nn.Module): Model that should be trained + loss_fn (_Loss): Loss function + optimizer (Optimizer): Optimizer + scheduler (_LRScheduler): Learning rate scheduler + device (str): Cuda device to use, e.g., cuda:0. + logger (logging.Logger): Logger module + logdir (Union[Path, str]): Logging directory + experiment_config (dict): Configuration of this experiment + early_stopping (EarlyStopping, optional): Early Stopping Class. Defaults to None. + accum_iter (int, optional): Accumulation steps for gradient accumulation. + Provide a number greater than 1 for activating gradient accumulation. Defaults to 1. + mixed_precision (bool, optional): If mixed-precision should be used. Defaults to False. + log_images (bool, optional): If images should be logged to WandB. Defaults to False. + """ + + def __init__( + self, + model: nn.Module, + loss_fn: _Loss, + optimizer: Optimizer, + scheduler: _LRScheduler, + device: str, + logger: logging.Logger, + logdir: Union[Path, str], + experiment_config: dict, + early_stopping: EarlyStopping = None, + accum_iter: int = 1, + mixed_precision: bool = False, + log_images: bool = False, + #model_ema: bool = True, + ) -> None: + self.model = model + + self.loss_fn = loss_fn + self.optimizer = optimizer + self.scheduler = scheduler + self.device = device + self.logger = logger + self.logdir = Path(logdir) + self.early_stopping = early_stopping + self.accum_iter = accum_iter + self.start_epoch = 0 + self.experiment_config = experiment_config + self.log_images = log_images + self.mixed_precision = mixed_precision + if self.mixed_precision: + self.scaler = torch.cuda.amp.GradScaler(enabled=True) + else: + self.scaler = None + + @abstractmethod + def train_epoch( + self, epoch: int, train_loader: DataLoader, **kwargs + ) -> Tuple[dict, dict]: + """Training logic for a training epoch + + Args: + epoch (int): Current epoch number + train_loader (DataLoader): Train dataloader + + Raises: + NotImplementedError: Needs to be implemented + + Returns: + Tuple[dict, dict]: wandb logging dictionaries + * Scalar metrics + * Image metrics + """ + raise NotImplementedError + + @abstractmethod + def validation_epoch( + self, epoch: int, val_dataloader: DataLoader + ) -> Tuple[dict, dict, float]: + """Training logic for an validation epoch + + Args: + epoch (int): Current epoch number + val_dataloader (DataLoader): Validation dataloader + + Raises: + NotImplementedError: Needs to be implemented + + Returns: + Tuple[dict, dict, float]: wandb logging dictionaries and early_stopping_metric + * Scalar metrics + * Image metrics + * Early Stopping metric as float + """ + raise NotImplementedError + + @abstractmethod + def train_step(self, batch: object, batch_idx: int, num_batches: int): + """Training logic for one training batch + + Args: + batch (object): A training batch + batch_idx (int): Current batch index + num_batches (int): Maximum number of batches + + Raises: + NotImplementedError: Needs to be implemented + """ + + raise NotImplementedError + + @abstractmethod + def validation_step(self, batch, batch_idx: int): + """Training logic for one validation batch + + Args: + batch (object): A training batch + batch_idx (int): Current batch index + + Raises: + NotImplementedError: Needs to be implemented + """ + + def fit( + self, + epochs: int, + train_dataloader: DataLoader, + val_dataloader: DataLoader, + metric_init: dict = None, + eval_every: int = 1, + **kwargs, + ): + """Fitting function to start training and validation of the trainer + + Args: + epochs (int): Number of epochs the network should be training + train_dataloader (DataLoader): Dataloader with training data + val_dataloader (DataLoader): Dataloader with validation data + metric_init (dict, optional): Initialization dictionary with scalar metrics that should be initialized for startup. + This is just import for logging with wandb if you want to have the plots properly scaled. + The data in the the metric dictionary is used as values for epoch 0 (before training has startetd). + If not provided, step 0 (epoch 0) is not logged. Should have the same scalar keys as training and validation epochs report. + For more information, you should have a look into the train_epoch and val_epoch methods where the wandb logging dicts are assembled. + Defaults to None. + eval_every (int, optional): How often the network should be evaluated (after how many epochs). Defaults to 1. + **kwargs + """ + + self.logger.info(f"Starting training, total number of epochs: {epochs}") + if metric_init is not None and self.start_epoch == 0: + wandb.log(metric_init, step=0) + for epoch in range(self.start_epoch, epochs): + # training epoch + #train_sampler.set_epoch(epoch) # for distributed training + self.logger.info(f"Epoch: {epoch+1}/{epochs}") + train_scalar_metrics, train_image_metrics = self.train_epoch( + epoch, train_dataloader, **kwargs + ) + wandb.log(train_scalar_metrics, step=epoch + 1) + if self.log_images: + wandb.log(train_image_metrics, step=epoch + 1) + if epoch >=95 and ((epoch + 1)) % eval_every == 0: + # validation epoch + ( + val_scalar_metrics, + val_image_metrics, + early_stopping_metric, + ) = self.validation_epoch(epoch, val_dataloader) + wandb.log(val_scalar_metrics, step=epoch + 1) + if self.log_images: + wandb.log(val_image_metrics, step=epoch + 1) + + #self.save_checkpoint(epoch, f"checkpoint_{epoch}.pth") + + # log learning rate + curr_lr = self.optimizer.param_groups[0]["lr"] + wandb.log( + { + "Learning-Rate/Learning-Rate": curr_lr, + }, + step=epoch + 1, + ) + if epoch >=95 and ((epoch + 1)) % eval_every == 0: + # early stopping + if self.early_stopping is not None: + best_model = self.early_stopping(early_stopping_metric, epoch) + if best_model: + self.logger.info("New best model - save checkpoint") + self.save_checkpoint(epoch, "model_best.pth") + elif self.early_stopping.early_stop: + self.logger.info("Performing early stopping!") + break + self.save_checkpoint(epoch, "latest_checkpoint.pth") + + # scheduling + if type(self.scheduler) == torch.optim.lr_scheduler.ReduceLROnPlateau: + self.scheduler.step(float(val_scalar_metrics["Loss/Validation"])) + else: + self.scheduler.step() + new_lr = self.optimizer.param_groups[0]["lr"] + self.logger.debug(f"Old lr: {curr_lr:.6f} - New lr: {new_lr:.6f}") + + def save_checkpoint(self, epoch: int, checkpoint_name: str): + if self.early_stopping is None: + best_metric = None + best_epoch = None + else: + best_metric = self.early_stopping.best_metric + best_epoch = self.early_stopping.best_epoch + + arch = type(self.model).__name__ + state = { + "arch": arch, + "epoch": epoch, + "model_state_dict": self.model.state_dict(), + "optimizer_state_dict": self.optimizer.state_dict(), + "scheduler_state_dict": self.scheduler.state_dict(), + "best_metric": best_metric, + "best_epoch": best_epoch, + "config": flatten_dict(wandb.config), + "wandb_id": wandb.run.id, + "logdir": str(self.logdir.resolve()), + "run_name": str(Path(self.logdir).name), + "scaler_state_dict": self.scaler.state_dict() + if self.scaler is not None + else None, + } + + checkpoint_dir = self.logdir / "checkpoints" + checkpoint_dir.mkdir(exist_ok=True, parents=True) + + filename = str(checkpoint_dir / checkpoint_name) + torch.save(state, filename) + + def resume_checkpoint(self, checkpoint): + self.logger.info("Loading checkpoint") + self.logger.info("Loading Model") + self.model.load_state_dict(checkpoint["model_state_dict"]) + self.logger.info("Loading Optimizer state dict") + self.optimizer.load_state_dict(checkpoint["optimizer_state_dict"]) + self.scheduler.load_state_dict(checkpoint["scheduler_state_dict"]) + + if self.early_stopping is not None: + self.early_stopping.best_metric = checkpoint["best_metric"] + self.early_stopping.best_epoch = checkpoint["best_epoch"] + if self.scaler is not None: + self.scaler.load_state_dict(checkpoint["scaler_state_dict"]) + + self.logger.info(f"Checkpoint epoch: {int(checkpoint['epoch'])}") + self.start_epoch = int(checkpoint["epoch"]) + self.logger.info(f"Next epoch is: {self.start_epoch + 1}") diff --git a/base_ml/base_utils.py b/base_ml/base_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ca2cd2b585f737deec2352eb34cdeba2deed49e8 --- /dev/null +++ b/base_ml/base_utils.py @@ -0,0 +1,136 @@ +# -*- coding: utf-8 -*- +import torch +import torch.nn.functional as F + +__all__ = ["filter2D", "gaussian", "gaussian_kernel2d", "sobel_hv"] + + +def filter2D(input_tensor: torch.Tensor, kernel: torch.Tensor) -> torch.Tensor: + """Convolves a given kernel on input tensor without losing dimensional shape. + + Parameters + ---------- + input_tensor : torch.Tensor + Input image/tensor. + kernel : torch.Tensor + Convolution kernel/window. + + Returns + ------- + torch.Tensor: + The convolved tensor of same shape as the input. + """ + (_, channel, _, _) = input_tensor.size() + + # "SAME" padding to avoid losing height and width + pad = [ + kernel.size(2) // 2, + kernel.size(2) // 2, + kernel.size(3) // 2, + kernel.size(3) // 2, + ] + pad_tensor = F.pad(input_tensor, pad, "replicate") + + out = F.conv2d(pad_tensor, kernel, groups=channel) + return out + + +def gaussian( + window_size: int, sigma: float, device: torch.device = None +) -> torch.Tensor: + """Create a gaussian 1D tensor. + + Parameters + ---------- + window_size : int + Number of elements for the output tensor. + sigma : float + Std of the gaussian distribution. + device : torch.device + Device for the tensor. + + Returns + ------- + torch.Tensor: + A gaussian 1D tensor. Shape: (window_size, ). + """ + x = torch.arange(window_size, device=device).float() - window_size // 2 + if window_size % 2 == 0: + x = x + 0.5 + + gauss = torch.exp((-x.pow(2.0) / float(2 * sigma**2))) + + return gauss / gauss.sum() + + +def gaussian_kernel2d( + window_size: int, sigma: float, n_channels: int = 1, device: torch.device = None +) -> torch.Tensor: + """Create 2D window_size**2 sized kernel a gaussial kernel. + + Parameters + ---------- + window_size : int + Number of rows and columns for the output tensor. + sigma : float + Std of the gaussian distribution. + n_channel : int + Number of channels in the image that will be convolved with + this kernel. + device : torch.device + Device for the kernel. + + Returns: + ----------- + torch.Tensor: + A tensor of shape (1, 1, window_size, window_size) + """ + kernel_x = gaussian(window_size, sigma, device=device) + kernel_y = gaussian(window_size, sigma, device=device) + + kernel_2d = torch.matmul(kernel_x.unsqueeze(-1), kernel_y.unsqueeze(-1).t()) + kernel_2d = kernel_2d.expand(n_channels, 1, window_size, window_size) + + return kernel_2d + + +def sobel_hv(window_size: int = 5, device: torch.device = None): + """Create a kernel that is used to compute 1st order derivatives. + + Parameters + ---------- + window_size : int + Size of the convolution kernel. + device : torch.device: + Device for the kernel. + + Returns + ------- + torch.Tensor: + the computed 1st order derivatives of the input tensor. + Shape (B, 2, H, W) + + Raises + ------ + ValueError: + If `window_size` is not an odd number. + """ + if not window_size % 2 == 1: + raise ValueError(f"window_size must be odd. Got: {window_size}") + + # Generate the sobel kernels + range_h = torch.arange( + -window_size // 2 + 1, window_size // 2 + 1, dtype=torch.float32, device=device + ) + range_v = torch.arange( + -window_size // 2 + 1, window_size // 2 + 1, dtype=torch.float32, device=device + ) + h, v = torch.meshgrid(range_h, range_v) + + kernel_h = h / (h * h + v * v + 1e-6) + kernel_h = kernel_h.unsqueeze(0).unsqueeze(0) + + kernel_v = v / (h * h + v * v + 1e-6) + kernel_v = kernel_v.unsqueeze(0).unsqueeze(0) + + return torch.cat([kernel_h, kernel_v], dim=0) diff --git a/base_ml/base_validator.py b/base_ml/base_validator.py new file mode 100644 index 0000000000000000000000000000000000000000..59b8ae2ce876005c16c5c4eae6198b97c74fefb0 --- /dev/null +++ b/base_ml/base_validator.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- +# Validators +# +# @ Fabian Hörst, fabian.hoerst@uk-essen.de +# Institute for Artifical Intelligence in Medicine, +# University Medicine Essen + +from schema import Schema, Or + +sweep_schema = Schema( + { + "method": Or("grid", "random", "bayes"), + "name": str, + "metric": {"name": str, "goal": Or("maximize", "minimize")}, + "run_cap": int, + }, + ignore_extra_keys=True, +) diff --git a/base_ml/optim_factory.py b/base_ml/optim_factory.py new file mode 100644 index 0000000000000000000000000000000000000000..6133c311e206304afa85b50f544c10d7014fb967 --- /dev/null +++ b/base_ml/optim_factory.py @@ -0,0 +1,190 @@ +# UniRepLKNet: A Universal Perception Large-Kernel ConvNet for Audio, Video, Point Cloud, Time-Series and Image Recognition +# Github source: https://github.com/AILab-CVC/UniRepLKNet +# Licensed under The Apache License 2.0 License [see LICENSE for details] +# Based on RepLKNet, ConvNeXt, timm, DINO and DeiT code bases +# https://github.com/DingXiaoH/RepLKNet-pytorch +# https://github.com/facebookresearch/ConvNeXt +# https://github.com/rwightman/pytorch-image-models/tree/master/timm +# https://github.com/facebookresearch/deit/ +# https://github.com/facebookresearch/dino +# --------------------------------------------------------' +import torch +from torch import optim as optim + +from timm.optim.adafactor import Adafactor +from timm.optim.adahessian import Adahessian +from timm.optim.adamp import AdamP +from timm.optim.lookahead import Lookahead +from timm.optim.nadam import Nadam +from timm.optim.radam import RAdam +from timm.optim.rmsprop_tf import RMSpropTF +from timm.optim.sgdp import SGDP + +import json + +try: + from apex.optimizers import FusedNovoGrad, FusedAdam, FusedLAMB, FusedSGD + has_apex = True +except ImportError: + has_apex = False + + +def get_num_layer_for_convnext(var_name): + """ + Divide [3, 3, 27, 3] layers into 12 groups; each group is three + consecutive blocks, including possible neighboring downsample layers; + adapted from https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py + """ + num_max_layer = 12 + if var_name.startswith("downsample_layers"): + stage_id = int(var_name.split('.')[1]) + if stage_id == 0: + layer_id = 0 + elif stage_id == 1 or stage_id == 2: + layer_id = stage_id + 1 + elif stage_id == 3: + layer_id = 12 + return layer_id + + elif var_name.startswith("stages"): + stage_id = int(var_name.split('.')[1]) + block_id = int(var_name.split('.')[2]) + if stage_id == 0 or stage_id == 1: + layer_id = stage_id + 1 + elif stage_id == 2: + layer_id = 3 + block_id // 3 + elif stage_id == 3: + layer_id = 12 + return layer_id + else: + return num_max_layer + 1 + +class LayerDecayValueAssigner(object): + def __init__(self, values): + self.values = values + + def get_scale(self, layer_id): + return self.values[layer_id] + + def get_layer_id(self, var_name): + return get_num_layer_for_convnext(var_name) + + +def get_parameter_groups(model, weight_decay=1e-5, skip_list=(), get_num_layer=None, get_layer_scale=None): + parameter_group_names = {} + parameter_group_vars = {} + + for name, param in model.named_parameters(): + if not param.requires_grad: + continue # frozen weights + if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list: + group_name = "no_decay" + this_weight_decay = 0. + else: + group_name = "decay" + this_weight_decay = weight_decay + if get_num_layer is not None: + layer_id = get_num_layer(name) + group_name = "layer_%d_%s" % (layer_id, group_name) + else: + layer_id = None + + if group_name not in parameter_group_names: + if get_layer_scale is not None: + scale = get_layer_scale(layer_id) + else: + scale = 1. + + parameter_group_names[group_name] = { + "weight_decay": this_weight_decay, + "params": [], + "lr_scale": scale + } + parameter_group_vars[group_name] = { + "weight_decay": this_weight_decay, + "params": [], + "lr_scale": scale + } + + parameter_group_vars[group_name]["params"].append(param) + parameter_group_names[group_name]["params"].append(name) + print("Param groups = %s" % json.dumps(parameter_group_names, indent=2)) + return list(parameter_group_vars.values()) + + +def create_optimizer(model, weight_decay, lr, opt, get_num_layer=None, opt_eps=None, opt_betas=None, get_layer_scale=None, filter_bias_and_bn=True, skip_list=None, momentum = 0.9): + opt_lower = opt.lower() + weight_decay = weight_decay + # if weight_decay and filter_bias_and_bn: + if filter_bias_and_bn: + skip = {} + if skip_list is not None: + skip = skip_list + elif hasattr(model, 'no_weight_decay'): + skip = model.no_weight_decay() + parameters = get_parameter_groups(model, weight_decay, skip, get_num_layer, get_layer_scale) + weight_decay = 0. + else: + parameters = model.parameters() + + if 'fused' in opt_lower: + assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers' + + opt_args = dict(lr=lr, weight_decay=weight_decay) + if opt_eps is not None: + opt_args['eps'] = opt_eps + if opt_betas is not None: + opt_args['betas'] = opt_betas + + opt_split = opt_lower.split('_') + opt_lower = opt_split[-1] + if opt_lower == 'sgd' or opt_lower == 'nesterov': + opt_args.pop('eps', None) + optimizer = optim.SGD(parameters, momentum=momentum, nesterov=True, **opt_args) + elif opt_lower == 'momentum': + opt_args.pop('eps', None) + optimizer = optim.SGD(parameters, momentum=momentum, nesterov=False, **opt_args) + elif opt_lower == 'adam': + optimizer = optim.Adam(parameters, **opt_args) + if opt_lower == 'adamw': + optimizer = optim.AdamW(parameters, **opt_args) + elif opt_lower == 'nadam': + optimizer = Nadam(parameters, **opt_args) + elif opt_lower == 'radam': + optimizer = RAdam(parameters, **opt_args) + elif opt_lower == 'adamp': + optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args) + elif opt_lower == 'sgdp': + optimizer = SGDP(parameters, momentum=momentum, nesterov=True, **opt_args) + elif opt_lower == 'adadelta': + optimizer = optim.Adadelta(parameters, **opt_args) + + elif opt_lower == 'adahessian': + optimizer = Adahessian(parameters, **opt_args) + elif opt_lower == 'rmsprop': + optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=momentum, **opt_args) + elif opt_lower == 'rmsproptf': + optimizer = RMSpropTF(parameters, alpha=0.9, momentum=momentum, **opt_args) + elif opt_lower == 'fusedsgd': + opt_args.pop('eps', None) + optimizer = FusedSGD(parameters, momentum=momentum, nesterov=True, **opt_args) + elif opt_lower == 'fusedmomentum': + opt_args.pop('eps', None) + optimizer = FusedSGD(parameters, momentum=momentum, nesterov=False, **opt_args) + elif opt_lower == 'fusedadam': + optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args) + elif opt_lower == 'fusedadamw': + optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args) + elif opt_lower == 'fusedlamb': + optimizer = FusedLAMB(parameters, **opt_args) + elif opt_lower == 'fusednovograd': + opt_args.setdefault('betas', (0.95, 0.98)) + optimizer = FusedNovoGrad(parameters, **opt_args) + else: + assert False and "Invalid optimizer" + + if len(opt_split) > 1: + if opt_split[0] == 'lookahead': + optimizer = Lookahead(optimizer) + + return optimizer diff --git a/base_ml/unireplknet_layer_decay_optimizer_constructor.py b/base_ml/unireplknet_layer_decay_optimizer_constructor.py new file mode 100644 index 0000000000000000000000000000000000000000..97db978ab1f6085e859565a3149bbe55da59bfbd --- /dev/null +++ b/base_ml/unireplknet_layer_decay_optimizer_constructor.py @@ -0,0 +1,169 @@ +# -------------------------------------------------------- +# UniRepLKNet +# https://github.com/AILab-CVC/UniRepLKNet +# Licensed under The Apache 2.0 License [see LICENSE for details] +# -------------------------------------------------------- +import json +from mmcv.runner import OPTIMIZER_BUILDERS, DefaultOptimizerConstructor +from mmcv.runner import get_dist_info +from mmdet.utils import get_root_logger + +def get_layer_id(var_name, max_layer_id,): + """Get the layer id to set the different learning rates in ``layer_wise`` + decay_type. + + Args: + var_name (str): The key of the model. + max_layer_id (int): Maximum layer id. + + Returns: + int: The id number corresponding to different learning rate in + ``LearningRateDecayOptimizerConstructor``. + """ + + if var_name in ('backbone.cls_token', 'backbone.mask_token', + 'backbone.pos_embed'): + return 0 + + elif var_name.startswith('backbone.downsample_layers'): + stage_id = int(var_name.split('.')[2]) + if stage_id == 0: + layer_id = 0 + elif stage_id == 1: + layer_id = 2 + elif stage_id == 2: + layer_id = 3 + elif stage_id == 3: + layer_id = max_layer_id + return layer_id + + elif var_name.startswith('backbone.stages'): + stage_id = int(var_name.split('.')[2]) + block_id = int(var_name.split('.')[3]) + if stage_id == 0: + layer_id = 1 + elif stage_id == 1: + layer_id = 2 + elif stage_id == 2: + layer_id = 3 + block_id // 3 + elif stage_id == 3: + layer_id = max_layer_id + return layer_id + + else: + return max_layer_id + 1 + + + +def get_stage_id(var_name, max_stage_id): + """Get the stage id to set the different learning rates in ``stage_wise`` + decay_type. + + Args: + var_name (str): The key of the model. + max_stage_id (int): Maximum stage id. + + Returns: + int: The id number corresponding to different learning rate in + ``LearningRateDecayOptimizerConstructor``. + """ + + if var_name in ('backbone.cls_token', 'backbone.mask_token', + 'backbone.pos_embed'): + return 0 + elif var_name.startswith('backbone.downsample_layers'): + return 0 + elif var_name.startswith('backbone.stages'): + stage_id = int(var_name.split('.')[2]) + return stage_id + 1 + else: + return max_stage_id - 1 + + +@OPTIMIZER_BUILDERS.register_module() +class UniRepLKNetLearningRateDecayOptimizerConstructor(DefaultOptimizerConstructor): + # Different learning rates are set for different layers of backbone. + # The design is inspired by and adapted from ConvNeXt. + + def add_params(self, params, module, **kwargs): + """Add all parameters of module to the params list. + + The parameters of the given module will be added to the list of param + groups, with specific rules defined by paramwise_cfg. + + Args: + params (list[dict]): A list of param groups, it will be modified + in place. + module (nn.Module): The module to be added. + """ + logger = get_root_logger() + + parameter_groups = {} + logger.info(f'self.paramwise_cfg is {self.paramwise_cfg}') + num_layers = self.paramwise_cfg.get('num_layers') + 2 + decay_rate = self.paramwise_cfg.get('decay_rate') + decay_type = self.paramwise_cfg.get('decay_type', 'layer_wise') + dw_scale = self.paramwise_cfg.get('dw_scale', 1) + logger.info('Build UniRepLKNetLearningRateDecayOptimizerConstructor ' + f'{decay_type} {decay_rate} - {num_layers}') + weight_decay = self.base_wd + for name, param in module.named_parameters(): + if not param.requires_grad: + continue # frozen weights + if len(param.shape) == 1 or name.endswith('.bias') or name in ( + 'pos_embed', 'cls_token'): + group_name = 'no_decay' + this_weight_decay = 0. + else: + group_name = 'decay' + this_weight_decay = weight_decay + if 'layer_wise' in decay_type: + layer_id = get_layer_id(name, self.paramwise_cfg.get('num_layers')) + logger.info(f'set param {name} as id {layer_id}') + elif decay_type == 'stage_wise': + layer_id = get_stage_id(name, num_layers) + logger.info(f'set param {name} as id {layer_id}') + + if dw_scale == 1 or 'dwconv' not in name: + group_name = f'layer_{layer_id}_{group_name}' + if group_name not in parameter_groups: + scale = decay_rate ** (num_layers - layer_id - 1) + parameter_groups[group_name] = { + 'weight_decay': this_weight_decay, + 'params': [], + 'param_names': [], + 'lr_scale': scale, + 'group_name': group_name, + 'lr': scale * self.base_lr, + } + + parameter_groups[group_name]['params'].append(param) + parameter_groups[group_name]['param_names'].append(name) + else: + group_name = f'layer_{layer_id}_{group_name}_dwconv' + if group_name not in parameter_groups: + scale = decay_rate ** (num_layers - layer_id - 1) * dw_scale + parameter_groups[group_name] = { + 'weight_decay': this_weight_decay, + 'params': [], + 'param_names': [], + 'lr_scale': scale, + 'group_name': group_name, + 'lr': scale * self.base_lr, + } + + parameter_groups[group_name]['params'].append(param) + parameter_groups[group_name]['param_names'].append(name) + + rank, _ = get_dist_info() + if rank == 0: + to_display = {} + for key in parameter_groups: + to_display[key] = { + 'param_names': parameter_groups[key]['param_names'], + 'lr_scale': parameter_groups[key]['lr_scale'], + 'lr': parameter_groups[key]['lr'], + 'weight_decay': parameter_groups[key]['weight_decay'], + } + logger.info(f'Param groups = {json.dumps(to_display, indent=2)}') + params.extend(parameter_groups.values()) diff --git a/cell_segmentation/__init__.py b/cell_segmentation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e9084422382ae6d59db773716de20b15ee4d8fe8 --- /dev/null +++ b/cell_segmentation/__init__.py @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +# Cell Segmentation and detection using our cellvit model +# +# @ Fabian Hörst, fabian.hoerst@uk-essen.de +# Institute for Artifical Intelligence in Medicine, +# University Medicine Essen diff --git a/cell_segmentation/datasets/base_cell.py b/cell_segmentation/datasets/base_cell.py new file mode 100644 index 0000000000000000000000000000000000000000..a5f82c118761e587ecc274f6212235444ccdaef1 --- /dev/null +++ b/cell_segmentation/datasets/base_cell.py @@ -0,0 +1,85 @@ +# -*- coding: utf-8 -*- +# Base cell segmentation dataset, based on torch Dataset implementation +# +# @ Fabian Hörst, fabian.hoerst@uk-essen.de +# Institute for Artifical Intelligence in Medicine, +# University Medicine Essen + +import logging +from typing import Callable + +import torch +from torch.utils.data import Dataset + +logger = logging.getLogger() +logger.addHandler(logging.NullHandler()) + +from abc import abstractmethod + + +class CellDataset(Dataset): + def set_transforms(self, transforms: Callable) -> None: + self.transforms = transforms + + @abstractmethod + def load_cell_count(self): + """Load Cell count from cell_count.csv file. File must be located inside the fold folder + + Example file beginning: + Image,Neoplastic,Inflammatory,Connective,Dead,Epithelial + 0_0.png,4,2,2,0,0 + 0_1.png,8,1,1,0,0 + 0_10.png,17,0,1,0,0 + 0_100.png,10,0,11,0,0 + ... + """ + pass + + @abstractmethod + def get_sampling_weights_tissue(self, gamma: float = 1) -> torch.Tensor: + """Get sampling weights calculated by tissue type statistics + + For this, a file named "weight_config.yaml" with the content: + tissue: + tissue_1: xxx + tissue_2: xxx (name of tissue: count) + ... + Must exists in the dataset main folder (parent path, not inside the folds) + + Args: + gamma (float, optional): Gamma scaling factor, between 0 and 1. + 1 means total balancing, 0 means original weights. Defaults to 1. + + Returns: + torch.Tensor: Weights for each sample + """ + + @abstractmethod + def get_sampling_weights_cell(self, gamma: float = 1) -> torch.Tensor: + """Get sampling weights calculated by cell type statistics + + Args: + gamma (float, optional): Gamma scaling factor, between 0 and 1. + 1 means total balancing, 0 means original weights. Defaults to 1. + + Returns: + torch.Tensor: Weights for each sample + """ + + def get_sampling_weights_cell_tissue(self, gamma: float = 1) -> torch.Tensor: + """Get combined sampling weights by calculating tissue and cell sampling weights, + normalizing them and adding them up to yield one score. + + Args: + gamma (float, optional): Gamma scaling factor, between 0 and 1. + 1 means total balancing, 0 means original weights. Defaults to 1. + + Returns: + torch.Tensor: Weights for each sample + """ + assert 0 <= gamma <= 1, "Gamma must be between 0 and 1" + tw = self.get_sampling_weights_tissue(gamma) + cw = self.get_sampling_weights_cell(gamma) + weights = tw / torch.max(tw) + cw / torch.max(cw) + + return weights diff --git a/cell_segmentation/datasets/cell_graph_datamodel.py b/cell_segmentation/datasets/cell_graph_datamodel.py new file mode 100644 index 0000000000000000000000000000000000000000..bcd48898db143c4369f4aa0fe168c9215611cdca --- /dev/null +++ b/cell_segmentation/datasets/cell_graph_datamodel.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# Graph Data model +# +# For more information, please check out docs/readmes/graphs.md +# +# @ Fabian Hörst, fabian.hoerst@uk-essen.de +# Institute for Artifical Intelligence in Medicine, +# University Medicine Essen + +from dataclasses import dataclass +from typing import List + +import torch + +from datamodel.graph_datamodel import GraphDataWSI + + +@dataclass +class CellGraphDataWSI(GraphDataWSI): + """Dataclass for Graph Data + + Args: + contours (List[torch.Tensor]): Contour Data for each object. + """ + + contours: List[torch.Tensor] diff --git a/cell_segmentation/datasets/conic.py b/cell_segmentation/datasets/conic.py new file mode 100644 index 0000000000000000000000000000000000000000..905b9ba941144301f43c9c7fc383c9249e454744 --- /dev/null +++ b/cell_segmentation/datasets/conic.py @@ -0,0 +1,243 @@ +# -*- coding: utf-8 -*- +# PanNuke Dataset +# +# Dataset information: https://arxiv.org/abs/2108.11195 +# Please Prepare Dataset as described here: docs/readmes/pannuke.md # TODO: write own documentation +# +# @ Fabian Hörst, fabian.hoerst@uk-essen.de +# Institute for Artifical Intelligence in Medicine, +# University Medicine Essen + + +import logging +from pathlib import Path +from typing import Callable, Tuple, Union, List + +import numpy as np +import pandas as pd +import torch +from PIL import Image + +from cell_segmentation.datasets.base_cell import CellDataset +from cell_segmentation.datasets.pannuke import PanNukeDataset + +logger = logging.getLogger() +logger.addHandler(logging.NullHandler()) + + +class CoNicDataset(CellDataset): + """Lizzard dataset + + This dataset is always cached + + Args: + dataset_path (Union[Path, str]): Path to Lizzard dataset. Structure is described under ./docs/readmes/cell_segmentation.md + folds (Union[int, list[int]]): Folds to use for this dataset + transforms (Callable, optional): PyTorch transformations. Defaults to None. + stardist (bool, optional): Return StarDist labels. Defaults to False + regression (bool, optional): Return Regression of cells in x and y direction. Defaults to False + **kwargs are irgnored + """ + + def __init__( + self, + dataset_path: Union[Path, str], + folds: Union[int, List[int]], + transforms: Callable = None, + stardist: bool = False, + regression: bool = False, + **kwargs, + ) -> None: + if isinstance(folds, int): + folds = [folds] + + self.dataset = Path(dataset_path).resolve() + self.transforms = transforms + self.images = [] + self.masks = [] + self.img_names = [] + self.folds = folds + self.stardist = stardist + self.regression = regression + for fold in folds: + image_path = self.dataset / f"fold{fold}" / "images" + fold_images = [f for f in sorted(image_path.glob("*.png")) if f.is_file()] + + # sanity_check: mask must exist for image + for fold_image in fold_images: + mask_path = ( + self.dataset / f"fold{fold}" / "labels" / f"{fold_image.stem}.npy" + ) + if mask_path.is_file(): + self.images.append(fold_image) + self.masks.append(mask_path) + self.img_names.append(fold_image.name) + + else: + logger.debug( + "Found image {fold_image}, but no corresponding annotation file!" + ) + + # load everything in advance to speedup, as the dataset is rather small + self.loaded_imgs = [] + self.loaded_masks = [] + for idx in range(len(self.images)): + img_path = self.images[idx] + img = np.array(Image.open(img_path)).astype(np.uint8) + + mask_path = self.masks[idx] + mask = np.load(mask_path, allow_pickle=True) + inst_map = mask[()]["inst_map"].astype(np.int32) + type_map = mask[()]["type_map"].astype(np.int32) + mask = np.stack([inst_map, type_map], axis=-1) + self.loaded_imgs.append(img) + self.loaded_masks.append(mask) + + logger.info(f"Created Pannuke Dataset by using fold(s) {self.folds}") + logger.info(f"Resulting dataset length: {self.__len__()}") + + def __getitem__(self, index: int) -> Tuple[torch.Tensor, dict, str, str]: + """Get one dataset item consisting of transformed image, + masks (instance_map, nuclei_type_map, nuclei_binary_map, hv_map) and tissue type as string + + Args: + index (int): Index of element to retrieve + + Returns: + Tuple[torch.Tensor, dict, str, str]: + torch.Tensor: Image, with shape (3, H, W), shape is arbitrary for Lizzard (H and W approx. between 500 and 2000) + dict: + "instance_map": Instance-Map, each instance is has one integer starting by 1 (zero is background), Shape (256, 256) + "nuclei_type_map": Nuclei-Type-Map, for each nucleus (instance) the class is indicated by an integer. Shape (256, 256) + "nuclei_binary_map": Binary Nuclei-Mask, Shape (256, 256) + "hv_map": Horizontal and vertical instance map. + Shape: (H, W, 2). First dimension is horizontal (horizontal gradient (-1 to 1)), + last is vertical (vertical gradient (-1 to 1)) Shape (256, 256, 2) + "dist_map": Probability distance map. Shape (256, 256) + "stardist_map": Stardist vector map. Shape (n_rays, 256, 256) + [Optional if regression] + "regression_map": Regression map. Shape (2, 256, 256). First is vertical, second horizontal. + str: Tissue type + str: Image Name + """ + img_path = self.images[index] + img = self.loaded_imgs[index] + mask = self.loaded_masks[index] + + if self.transforms is not None: + transformed = self.transforms(image=img, mask=mask) + img = transformed["image"] + mask = transformed["mask"] + + inst_map = mask[:, :, 0].copy() + type_map = mask[:, :, 1].copy() + np_map = mask[:, :, 0].copy() + np_map[np_map > 0] = 1 + hv_map = PanNukeDataset.gen_instance_hv_map(inst_map) + + # torch convert + img = torch.Tensor(img).type(torch.float32) + img = img.permute(2, 0, 1) + if torch.max(img) >= 5: + img = img / 255 + + masks = { + "instance_map": torch.Tensor(inst_map).type(torch.int64), + "nuclei_type_map": torch.Tensor(type_map).type(torch.int64), + "nuclei_binary_map": torch.Tensor(np_map).type(torch.int64), + "hv_map": torch.Tensor(hv_map).type(torch.float32), + } + if self.stardist: + dist_map = PanNukeDataset.gen_distance_prob_maps(inst_map) + stardist_map = PanNukeDataset.gen_stardist_maps(inst_map) + masks["dist_map"] = torch.Tensor(dist_map).type(torch.float32) + masks["stardist_map"] = torch.Tensor(stardist_map).type(torch.float32) + if self.regression: + masks["regression_map"] = PanNukeDataset.gen_regression_map(inst_map) + + return img, masks, "Colon", Path(img_path).name + + def __len__(self) -> int: + """Length of Dataset + + Returns: + int: Length of Dataset + """ + return len(self.images) + + def set_transforms(self, transforms: Callable) -> None: + """Set the transformations, can be used tp exchange transformations + + Args: + transforms (Callable): PyTorch transformations + """ + self.transforms = transforms + + def load_cell_count(self): + """Load Cell count from cell_count.csv file. File must be located inside the fold folder + and named "cell_count.csv" + + Example file beginning: + Image,Neutrophil,Epithelial,Lymphocyte,Plasma,Eosinophil,Connective + consep_1_0000.png,0,117,0,0,0,0 + consep_1_0001.png,0,95,1,0,0,8 + consep_1_0002.png,0,172,3,0,0,2 + ... + """ + df_placeholder = [] + for fold in self.folds: + csv_path = self.dataset / f"fold{fold}" / "cell_count.csv" + cell_count = pd.read_csv(csv_path, index_col=0) + df_placeholder.append(cell_count) + self.cell_count = pd.concat(df_placeholder) + self.cell_count = self.cell_count.reindex(self.img_names) + + def get_sampling_weights_cell(self, gamma: float = 1) -> torch.Tensor: + """Get sampling weights calculated by cell type statistics + + Args: + gamma (float, optional): Gamma scaling factor, between 0 and 1. + 1 means total balancing, 0 means original weights. Defaults to 1. + + Returns: + torch.Tensor: Weights for each sample + """ + assert 0 <= gamma <= 1, "Gamma must be between 0 and 1" + assert hasattr(self, "cell_count"), "Please run .load_cell_count() in advance!" + binary_weight_factors = np.array([1069, 4189, 4356, 3103, 1025, 4527]) + k = np.sum(binary_weight_factors) + cell_counts_imgs = np.clip(self.cell_count.to_numpy(), 0, 1) + weight_vector = k / (gamma * binary_weight_factors + (1 - gamma) * k) + img_weight = (1 - gamma) * np.max(cell_counts_imgs, axis=-1) + gamma * np.sum( + cell_counts_imgs * weight_vector, axis=-1 + ) + img_weight[np.where(img_weight == 0)] = np.min( + img_weight[np.nonzero(img_weight)] + ) + + return torch.Tensor(img_weight) + + # def get_sampling_weights_cell(self, gamma: float = 1) -> torch.Tensor: + # """Get sampling weights calculated by cell type statistics + + # Args: + # gamma (float, optional): Gamma scaling factor, between 0 and 1. + # 1 means total balancing, 0 means original weights. Defaults to 1. + + # Returns: + # torch.Tensor: Weights for each sample + # """ + # assert 0 <= gamma <= 1, "Gamma must be between 0 and 1" + # assert hasattr(self, "cell_count"), "Please run .load_cell_count() in advance!" + # binary_weight_factors = np.array([4012, 222017, 93612, 24793, 2999, 98783]) + # k = np.sum(binary_weight_factors) + # cell_counts_imgs = self.cell_count.to_numpy() + # weight_vector = k / (gamma * binary_weight_factors + (1 - gamma) * k) + # img_weight = (1 - gamma) * np.max(cell_counts_imgs, axis=-1) + gamma * np.sum( + # cell_counts_imgs * weight_vector, axis=-1 + # ) + # img_weight[np.where(img_weight == 0)] = np.min( + # img_weight[np.nonzero(img_weight)] + # ) + + # return torch.Tensor(img_weight) diff --git a/cell_segmentation/datasets/consep.py b/cell_segmentation/datasets/consep.py new file mode 100644 index 0000000000000000000000000000000000000000..fba812188027a34988db8613ab8849d39e907a3e --- /dev/null +++ b/cell_segmentation/datasets/consep.py @@ -0,0 +1,118 @@ +# -*- coding: utf-8 -*- +# MoNuSeg Dataset +# +# Dataset information: https://monuseg.grand-challenge.org/Home/ +# Please Prepare Dataset as described here: docs/readmes/monuseg.md +# +# @ Fabian Hörst, fabian.hoerst@uk-essen.de +# Institute for Artifical Intelligence in Medicine, +# University Medicine Essen + +import logging +from pathlib import Path +from typing import Callable, Union, Tuple + +import numpy as np +import torch +from PIL import Image +from torch.utils.data import Dataset + +from cell_segmentation.datasets.pannuke import PanNukeDataset + +logger = logging.getLogger() +logger.addHandler(logging.NullHandler()) + + +class CoNSePDataset(Dataset): + def __init__( + self, + dataset_path: Union[Path, str], + transforms: Callable = None, + ) -> None: + """MoNuSeg Dataset + + Args: + dataset_path (Union[Path, str]): Path to dataset + transforms (Callable, optional): Transformations to apply on images. Defaults to None. + Raises: + FileNotFoundError: If no ground-truth annotation file was found in path + """ + self.dataset = Path(dataset_path).resolve() + self.transforms = transforms + self.masks = [] + self.img_names = [] + + image_path = self.dataset / "images" + label_path = self.dataset / "labels" + self.images = [f for f in sorted(image_path.glob("*.png")) if f.is_file()] + self.masks = [f for f in sorted(label_path.glob("*.npy")) if f.is_file()] + + # sanity_check + for idx, image in enumerate(self.images): + image_name = image.stem + mask_name = self.masks[idx].stem + if image_name != mask_name: + raise FileNotFoundError(f"Annotation for file {image_name} is missing") + + def __getitem__(self, index: int) -> Tuple[torch.Tensor, dict, str]: + """Get one item from dataset + + Args: + index (int): Item to get + + Returns: + Tuple[torch.Tensor, dict, str]: Trainings-Batch + * torch.Tensor: Image + * dict: Ground-Truth values: keys are "instance map", "nuclei_binary_map" and "hv_map" + * str: filename + """ + img_path = self.images[index] + img = np.array(Image.open(img_path)).astype(np.uint8) + + mask_path = self.masks[index] + mask = np.load(mask_path, allow_pickle=True) + inst_map = mask[()]["inst_map"].astype(np.int32) + type_map = mask[()]["type_map"].astype(np.int32) + mask = np.stack([inst_map, type_map], axis=-1) + + if self.transforms is not None: + transformed = self.transforms(image=img, mask=mask) + img = transformed["image"] + mask = transformed["mask"] + + inst_map = mask[:, :, 0].copy() + type_map = mask[:, :, 1].copy() + np_map = mask[:, :, 0].copy() + np_map[np_map > 0] = 1 + hv_map = PanNukeDataset.gen_instance_hv_map(inst_map) + + # torch convert + img = torch.Tensor(img).type(torch.float32) + img = img.permute(2, 0, 1) + if torch.max(img) >= 5: + img = img / 255 + + masks = { + "instance_map": torch.Tensor(inst_map).type(torch.int64), + "nuclei_type_map": torch.Tensor(type_map).type(torch.int64), + "nuclei_binary_map": torch.Tensor(np_map).type(torch.int64), + "hv_map": torch.Tensor(hv_map).type(torch.float32), + } + + return img, masks, Path(img_path).name + + def __len__(self) -> int: + """Length of Dataset + + Returns: + int: Length of Dataset + """ + return len(self.images) + + def set_transforms(self, transforms: Callable) -> None: + """Set the transformations, can be used tp exchange transformations + + Args: + transforms (Callable): PyTorch transformations + """ + self.transforms = transforms diff --git a/cell_segmentation/datasets/dataset_coordinator.py b/cell_segmentation/datasets/dataset_coordinator.py new file mode 100644 index 0000000000000000000000000000000000000000..387f102e5ae2fb967e971980a8b3ebd37df01e71 --- /dev/null +++ b/cell_segmentation/datasets/dataset_coordinator.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- +# Coordinate the datasets, used to select the right dataset with corresponding setting +# +# @ Fabian Hörst, fabian.hoerst@uk-essen.de +# Institute for Artifical Intelligence in Medicine, +# University Medicine Essen + +from typing import Callable + +from torch.utils.data import Dataset +from cell_segmentation.datasets.conic import CoNicDataset + +from cell_segmentation.datasets.pannuke import PanNukeDataset + + +def select_dataset( + dataset_name: str, split: str, dataset_config: dict, transforms: Callable = None +) -> Dataset: + """Select a cell segmentation dataset from the provided ones, currently just PanNuke is implemented here + + Args: + dataset_name (str): Name of dataset to use. + Must be one of: [pannuke, lizzard] + split (str): Split to use. + Must be one of: ["train", "val", "validation", "test"] + dataset_config (dict): Dictionary with dataset configuration settings + transforms (Callable, optional): PyTorch Image and Mask transformations. Defaults to None. + + Raises: + NotImplementedError: Unknown dataset + + Returns: + Dataset: Cell segmentation dataset + """ + assert split.lower() in [ + "train", + "val", + "validation", + "test", + ], "Unknown split type!" + + if dataset_name.lower() == "pannuke": + if split == "train": + folds = dataset_config["train_folds"] + if split == "val" or split == "validation": + folds = dataset_config["val_folds"] + if split == "test": + folds = dataset_config["test_folds"] + dataset = PanNukeDataset( + dataset_path=dataset_config["dataset_path"], + folds=folds, + transforms=transforms, + stardist=dataset_config.get("stardist", False), + regression=dataset_config.get("regression_loss", False), + ) + elif dataset_name.lower() == "conic": + if split == "train": + folds = dataset_config["train_folds"] + if split == "val" or split == "validation": + folds = dataset_config["val_folds"] + if split == "test": + folds = dataset_config["test_folds"] + dataset = CoNicDataset( + dataset_path=dataset_config["dataset_path"], + folds=folds, + transforms=transforms, + stardist=dataset_config.get("stardist", False), + regression=dataset_config.get("regression_loss", False), + # TODO: Stardist and regression loss + ) + else: + raise NotImplementedError(f"Unknown dataset: {dataset_name}") + return dataset diff --git a/cell_segmentation/datasets/monuseg.py b/cell_segmentation/datasets/monuseg.py new file mode 100644 index 0000000000000000000000000000000000000000..4db6a3f9154a4d8649a369ca02ec992eecbafb53 --- /dev/null +++ b/cell_segmentation/datasets/monuseg.py @@ -0,0 +1,128 @@ +# -*- coding: utf-8 -*- +# MoNuSeg Dataset +# +# Dataset information: https://monuseg.grand-challenge.org/Home/ +# Please Prepare Dataset as described here: docs/readmes/monuseg.md +# +# @ Fabian Hörst, fabian.hoerst@uk-essen.de +# Institute for Artifical Intelligence in Medicine, +# University Medicine Essen + +import logging +from pathlib import Path +from typing import Callable, Union, Tuple + +import numpy as np +import torch +from PIL import Image +from torch.utils.data import Dataset + +from cell_segmentation.datasets.pannuke import PanNukeDataset +from einops import rearrange + +logger = logging.getLogger() +logger.addHandler(logging.NullHandler()) + + +class MoNuSegDataset(Dataset): + def __init__( + self, + dataset_path: Union[Path, str], + transforms: Callable = None, + patching: bool = False, + overlap: int = 0, + ) -> None: + """MoNuSeg Dataset + + Args: + dataset_path (Union[Path, str]): Path to dataset + transforms (Callable, optional): Transformations to apply on images. Defaults to None. + patching (bool, optional): If patches with size 256px should be used Otherwise, the entire MoNuSeg images are loaded. Defaults to False. + overlap: (bool, optional): If overlap should be used for patch sampling. Overlap in pixels. + Recommended value other than 0 is 64. Defaults to 0. + Raises: + FileNotFoundError: If no ground-truth annotation file was found in path + """ + self.dataset = Path(dataset_path).resolve() + self.transforms = transforms + self.masks = [] + self.img_names = [] + self.patching = patching + self.overlap = overlap + + image_path = self.dataset / "images" + label_path = self.dataset / "labels" + self.images = [f for f in sorted(image_path.glob("*.png")) if f.is_file()] + self.masks = [f for f in sorted(label_path.glob("*.npy")) if f.is_file()] + + # sanity_check + for idx, image in enumerate(self.images): + image_name = image.stem + mask_name = self.masks[idx].stem + if image_name != mask_name: + raise FileNotFoundError(f"Annotation for file {image_name} is missing") + + def __getitem__(self, index: int) -> Tuple[torch.Tensor, dict, str]: + """Get one item from dataset + + Args: + index (int): Item to get + + Returns: + Tuple[torch.Tensor, dict, str]: Trainings-Batch + * torch.Tensor: Image + * dict: Ground-Truth values: keys are "instance map", "nuclei_binary_map" and "hv_map" + * str: filename + """ + img_path = self.images[index] + img = np.array(Image.open(img_path)).astype(np.uint8) + + mask_path = self.masks[index] + mask = np.load(mask_path, allow_pickle=True) + mask = mask.astype(np.int64) + + if self.transforms is not None: + transformed = self.transforms(image=img, mask=mask) + img = transformed["image"] + mask = transformed["mask"] + + hv_map = PanNukeDataset.gen_instance_hv_map(mask) + np_map = mask.copy() + np_map[np_map > 0] = 1 + + # torch convert + img = torch.Tensor(img).type(torch.float32) + img = img.permute(2, 0, 1) + if torch.max(img) >= 5: + img = img / 255 + + if self.patching and self.overlap == 0: + img = rearrange(img, "c (h i) (w j) -> c h w i j", i=256, j=256) + if self.patching and self.overlap != 0: + img = img.unfold(1, 256, 256 - self.overlap).unfold( + 2, 256, 256 - self.overlap + ) + + masks = { + "instance_map": torch.Tensor(mask).type(torch.int64), + "nuclei_binary_map": torch.Tensor(np_map).type(torch.int64), + "hv_map": torch.Tensor(hv_map).type(torch.float32), + } + + return img, masks, Path(img_path).name + + def __len__(self) -> int: + """Length of Dataset + + Returns: + int: Length of Dataset + """ + return len(self.images) + + def set_transforms(self, transforms: Callable) -> None: + """Set the transformations, can be used tp exchange transformations + + Args: + transforms (Callable): PyTorch transformations + """ + self.transforms = transforms diff --git a/cell_segmentation/datasets/pannuke.py b/cell_segmentation/datasets/pannuke.py new file mode 100644 index 0000000000000000000000000000000000000000..d636620f1a9b73a6361de56ca513e207ac33f980 --- /dev/null +++ b/cell_segmentation/datasets/pannuke.py @@ -0,0 +1,537 @@ +# -*- coding: utf-8 -*- +# PanNuke Dataset +# +# Dataset information: https://arxiv.org/abs/2003.10778 +# Please Prepare Dataset as described here: docs/readmes/pannuke.md +# +# @ Fabian Hörst, fabian.hoerst@uk-essen.de +# Institute for Artifical Intelligence in Medicine, +# University Medicine Essen + + +import logging +import sys # remove +from pathlib import Path +from typing import Callable, Tuple, Union, List + +sys.path.append("/homes/fhoerst/histo-projects/CellViT/") # remove + +import numpy as np +import pandas as pd +import torch +import yaml +from numba import njit +from PIL import Image +from scipy.ndimage import center_of_mass, distance_transform_edt + +from cell_segmentation.datasets.base_cell import CellDataset +from cell_segmentation.utils.tools import fix_duplicates, get_bounding_box + +logger = logging.getLogger() +logger.addHandler(logging.NullHandler()) + +from natsort import natsorted + + +class PanNukeDataset(CellDataset): + """PanNuke dataset + + Args: + dataset_path (Union[Path, str]): Path to PanNuke dataset. Structure is described under ./docs/readmes/cell_segmentation.md + folds (Union[int, list[int]]): Folds to use for this dataset + transforms (Callable, optional): PyTorch transformations. Defaults to None. + stardist (bool, optional): Return StarDist labels. Defaults to False + regression (bool, optional): Return Regression of cells in x and y direction. Defaults to False + cache_dataset: If the dataset should be loaded to host memory in first epoch. + Be careful, workers in DataLoader needs to be persistent to have speedup. + Recommended to false, just use if you have enough RAM and your I/O operations might be limited. + Defaults to False. + """ + + def __init__( + self, + dataset_path: Union[Path, str], + folds: Union[int, List[int]], + transforms: Callable = None, + stardist: bool = False, + regression: bool = False, + cache_dataset: bool = False, + ) -> None: + if isinstance(folds, int): + folds = [folds] + + self.dataset = Path(dataset_path).resolve() + self.transforms = transforms + self.images = [] + self.masks = [] + self.types = {} + self.img_names = [] + self.folds = folds + self.cache_dataset = cache_dataset + self.stardist = stardist + self.regression = regression + for fold in folds: + image_path = self.dataset / f"fold{fold}" / "images" + fold_images = [ + f for f in natsorted(image_path.glob("*.png")) if f.is_file() + ] + + # sanity_check: mask must exist for image + for fold_image in fold_images: + mask_path = ( + self.dataset / f"fold{fold}" / "labels" / f"{fold_image.stem}.npy" + ) + if mask_path.is_file(): + self.images.append(fold_image) + self.masks.append(mask_path) + self.img_names.append(fold_image.name) + + else: + logger.debug( + "Found image {fold_image}, but no corresponding annotation file!" + ) + fold_types = pd.read_csv(self.dataset / f"fold{fold}" / "types.csv") + fold_type_dict = fold_types.set_index("img")["type"].to_dict() + self.types = { + **self.types, + **fold_type_dict, + } # careful - should all be named differently + + logger.info(f"Created Pannuke Dataset by using fold(s) {self.folds}") + logger.info(f"Resulting dataset length: {self.__len__()}") + + if self.cache_dataset: + self.cached_idx = [] # list of idx that should be cached + self.cached_imgs = {} # keys: idx, values: numpy array of imgs + self.cached_masks = {} # keys: idx, values: numpy array of masks + logger.info("Using cached dataset. Cache is built up during first epoch.") + + def __getitem__(self, index: int) -> Tuple[torch.Tensor, dict, str, str]: + """Get one dataset item consisting of transformed image, + masks (instance_map, nuclei_type_map, nuclei_binary_map, hv_map) and tissue type as string + + Args: + index (int): Index of element to retrieve + + Returns: + Tuple[torch.Tensor, dict, str, str]: + torch.Tensor: Image, with shape (3, H, W), in this case (3, 256, 256) + dict: + "instance_map": Instance-Map, each instance is has one integer starting by 1 (zero is background), Shape (256, 256) + "nuclei_type_map": Nuclei-Type-Map, for each nucleus (instance) the class is indicated by an integer. Shape (256, 256) + "nuclei_binary_map": Binary Nuclei-Mask, Shape (256, 256) + "hv_map": Horizontal and vertical instance map. + Shape: (2 , H, W). First dimension is horizontal (horizontal gradient (-1 to 1)), + last is vertical (vertical gradient (-1 to 1)) Shape (2, 256, 256) + [Optional if stardist] + "dist_map": Probability distance map. Shape (256, 256) + "stardist_map": Stardist vector map. Shape (n_rays, 256, 256) + [Optional if regression] + "regression_map": Regression map. Shape (2, 256, 256). First is vertical, second horizontal. + str: Tissue type + str: Image Name + """ + img_path = self.images[index] + + if self.cache_dataset: + if index in self.cached_idx: + img = self.cached_imgs[index] + mask = self.cached_masks[index] + else: + # cache file + img = self.load_imgfile(index) + mask = self.load_maskfile(index) + self.cached_imgs[index] = img + self.cached_masks[index] = mask + self.cached_idx.append(index) + + else: + img = self.load_imgfile(index) + mask = self.load_maskfile(index) + + if self.transforms is not None: + transformed = self.transforms(image=img, mask=mask) + img = transformed["image"] + mask = transformed["mask"] + + tissue_type = self.types[img_path.name] + inst_map = mask[:, :, 0].copy() + type_map = mask[:, :, 1].copy() + np_map = mask[:, :, 0].copy() + np_map[np_map > 0] = 1 + hv_map = PanNukeDataset.gen_instance_hv_map(inst_map) + + # torch convert + img = torch.Tensor(img).type(torch.float32) + img = img.permute(2, 0, 1) + if torch.max(img) >= 5: + img = img / 255 + + masks = { + "instance_map": torch.Tensor(inst_map).type(torch.int64), + "nuclei_type_map": torch.Tensor(type_map).type(torch.int64), + "nuclei_binary_map": torch.Tensor(np_map).type(torch.int64), + "hv_map": torch.Tensor(hv_map).type(torch.float32), + } + + # load stardist transforms if neccessary + if self.stardist: + dist_map = PanNukeDataset.gen_distance_prob_maps(inst_map) + stardist_map = PanNukeDataset.gen_stardist_maps(inst_map) + masks["dist_map"] = torch.Tensor(dist_map).type(torch.float32) + masks["stardist_map"] = torch.Tensor(stardist_map).type(torch.float32) + if self.regression: + masks["regression_map"] = PanNukeDataset.gen_regression_map(inst_map) + + return img, masks, tissue_type, Path(img_path).name + + def __len__(self) -> int: + """Length of Dataset + + Returns: + int: Length of Dataset + """ + return len(self.images) + + def set_transforms(self, transforms: Callable) -> None: + """Set the transformations, can be used tp exchange transformations + + Args: + transforms (Callable): PyTorch transformations + """ + self.transforms = transforms + + def load_imgfile(self, index: int) -> np.ndarray: + """Load image from file (disk) + + Args: + index (int): Index of file + + Returns: + np.ndarray: Image as array with shape (H, W, 3) + """ + img_path = self.images[index] + return np.array(Image.open(img_path)).astype(np.uint8) + + def load_maskfile(self, index: int) -> np.ndarray: + """Load mask from file (disk) + + Args: + index (int): Index of file + + Returns: + np.ndarray: Mask as array with shape (H, W, 2) + """ + mask_path = self.masks[index] + mask = np.load(mask_path, allow_pickle=True) + inst_map = mask[()]["inst_map"].astype(np.int32) + type_map = mask[()]["type_map"].astype(np.int32) + mask = np.stack([inst_map, type_map], axis=-1) + return mask + + def load_cell_count(self): + """Load Cell count from cell_count.csv file. File must be located inside the fold folder + and named "cell_count.csv" + + Example file beginning: + Image,Neoplastic,Inflammatory,Connective,Dead,Epithelial + 0_0.png,4,2,2,0,0 + 0_1.png,8,1,1,0,0 + 0_10.png,17,0,1,0,0 + 0_100.png,10,0,11,0,0 + ... + """ + df_placeholder = [] + for fold in self.folds: + csv_path = self.dataset / f"fold{fold}" / "cell_count.csv" + cell_count = pd.read_csv(csv_path, index_col=0) + df_placeholder.append(cell_count) + self.cell_count = pd.concat(df_placeholder) + self.cell_count = self.cell_count.reindex(self.img_names) + + def get_sampling_weights_tissue(self, gamma: float = 1) -> torch.Tensor: + """Get sampling weights calculated by tissue type statistics + + For this, a file named "weight_config.yaml" with the content: + tissue: + tissue_1: xxx + tissue_2: xxx (name of tissue: count) + ... + Must exists in the dataset main folder (parent path, not inside the folds) + + Args: + gamma (float, optional): Gamma scaling factor, between 0 and 1. + 1 means total balancing, 0 means original weights. Defaults to 1. + + Returns: + torch.Tensor: Weights for each sample + """ + assert 0 <= gamma <= 1, "Gamma must be between 0 and 1" + with open( + (self.dataset / "weight_config.yaml").resolve(), "r" + ) as run_config_file: + yaml_config = yaml.safe_load(run_config_file) + tissue_counts = dict(yaml_config)["tissue"] + + # calculate weight for each tissue + weights_dict = {} + k = np.sum(list(tissue_counts.values())) + for tissue, count in tissue_counts.items(): + w = k / (gamma * count + (1 - gamma) * k) + weights_dict[tissue] = w + + weights = [] + for idx in range(self.__len__()): + img_idx = self.img_names[idx] + type_str = self.types[img_idx] + weights.append(weights_dict[type_str]) + + return torch.Tensor(weights) + + def get_sampling_weights_cell(self, gamma: float = 1) -> torch.Tensor: + """Get sampling weights calculated by cell type statistics + + Args: + gamma (float, optional): Gamma scaling factor, between 0 and 1. + 1 means total balancing, 0 means original weights. Defaults to 1. + + Returns: + torch.Tensor: Weights for each sample + """ + assert 0 <= gamma <= 1, "Gamma must be between 0 and 1" + assert hasattr(self, "cell_count"), "Please run .load_cell_count() in advance!" + binary_weight_factors = np.array([4191, 4132, 6140, 232, 1528]) + k = np.sum(binary_weight_factors) + cell_counts_imgs = np.clip(self.cell_count.to_numpy(), 0, 1) + weight_vector = k / (gamma * binary_weight_factors + (1 - gamma) * k) + img_weight = (1 - gamma) * np.max(cell_counts_imgs, axis=-1) + gamma * np.sum( + cell_counts_imgs * weight_vector, axis=-1 + ) + img_weight[np.where(img_weight == 0)] = np.min( + img_weight[np.nonzero(img_weight)] + ) + + return torch.Tensor(img_weight) + + def get_sampling_weights_cell_tissue(self, gamma: float = 1) -> torch.Tensor: + """Get combined sampling weights by calculating tissue and cell sampling weights, + normalizing them and adding them up to yield one score. + + Args: + gamma (float, optional): Gamma scaling factor, between 0 and 1. + 1 means total balancing, 0 means original weights. Defaults to 1. + + Returns: + torch.Tensor: Weights for each sample + """ + assert 0 <= gamma <= 1, "Gamma must be between 0 and 1" + tw = self.get_sampling_weights_tissue(gamma) + cw = self.get_sampling_weights_cell(gamma) + weights = tw / torch.max(tw) + cw / torch.max(cw) + + return weights + + @staticmethod + def gen_instance_hv_map(inst_map: np.ndarray) -> np.ndarray: + """Obtain the horizontal and vertical distance maps for each + nuclear instance. + + Args: + inst_map (np.ndarray): Instance map with each instance labelled as a unique integer + Shape: (H, W) + Returns: + np.ndarray: Horizontal and vertical instance map. + Shape: (2, H, W). First dimension is horizontal (horizontal gradient (-1 to 1)), + last is vertical (vertical gradient (-1 to 1)) + """ + orig_inst_map = inst_map.copy() # instance ID map + + x_map = np.zeros(orig_inst_map.shape[:2], dtype=np.float32) + y_map = np.zeros(orig_inst_map.shape[:2], dtype=np.float32) + + inst_list = list(np.unique(orig_inst_map)) + inst_list.remove(0) # 0 is background + for inst_id in inst_list: + inst_map = np.array(orig_inst_map == inst_id, np.uint8) + inst_box = get_bounding_box(inst_map) + + # expand the box by 2px + # Because we first pad the ann at line 207, the bboxes + # will remain valid after expansion + if inst_box[0] >= 2: + inst_box[0] -= 2 + if inst_box[2] >= 2: + inst_box[2] -= 2 + if inst_box[1] <= orig_inst_map.shape[0] - 2: + inst_box[1] += 2 + if inst_box[3] <= orig_inst_map.shape[0] - 2: + inst_box[3] += 2 + + # improvement + inst_map = inst_map[inst_box[0] : inst_box[1], inst_box[2] : inst_box[3]] + + if inst_map.shape[0] < 2 or inst_map.shape[1] < 2: + continue + + # instance center of mass, rounded to nearest pixel + inst_com = list(center_of_mass(inst_map)) + + inst_com[0] = int(inst_com[0] + 0.5) + inst_com[1] = int(inst_com[1] + 0.5) + + inst_x_range = np.arange(1, inst_map.shape[1] + 1) + inst_y_range = np.arange(1, inst_map.shape[0] + 1) + # shifting center of pixels grid to instance center of mass + inst_x_range -= inst_com[1] + inst_y_range -= inst_com[0] + + inst_x, inst_y = np.meshgrid(inst_x_range, inst_y_range) + + # remove coord outside of instance + inst_x[inst_map == 0] = 0 + inst_y[inst_map == 0] = 0 + inst_x = inst_x.astype("float32") + inst_y = inst_y.astype("float32") + + # normalize min into -1 scale + if np.min(inst_x) < 0: + inst_x[inst_x < 0] /= -np.amin(inst_x[inst_x < 0]) + if np.min(inst_y) < 0: + inst_y[inst_y < 0] /= -np.amin(inst_y[inst_y < 0]) + # normalize max into +1 scale + if np.max(inst_x) > 0: + inst_x[inst_x > 0] /= np.amax(inst_x[inst_x > 0]) + if np.max(inst_y) > 0: + inst_y[inst_y > 0] /= np.amax(inst_y[inst_y > 0]) + + #### + x_map_box = x_map[inst_box[0] : inst_box[1], inst_box[2] : inst_box[3]] + x_map_box[inst_map > 0] = inst_x[inst_map > 0] + + y_map_box = y_map[inst_box[0] : inst_box[1], inst_box[2] : inst_box[3]] + y_map_box[inst_map > 0] = inst_y[inst_map > 0] + + hv_map = np.stack([x_map, y_map]) + return hv_map + + @staticmethod + def gen_distance_prob_maps(inst_map: np.ndarray) -> np.ndarray: + """Generate distance probability maps + + Args: + inst_map (np.ndarray): Instance-Map, each instance is has one integer starting by 1 (zero is background), Shape (H, W) + + Returns: + np.ndarray: Distance probability map, shape (H, W) + """ + inst_map = fix_duplicates(inst_map) + dist = np.zeros_like(inst_map, dtype=np.float64) + inst_list = list(np.unique(inst_map)) + if 0 in inst_list: + inst_list.remove(0) + + for inst_id in inst_list: + inst = np.array(inst_map == inst_id, np.uint8) + + y1, y2, x1, x2 = get_bounding_box(inst) + y1 = y1 - 2 if y1 - 2 >= 0 else y1 + x1 = x1 - 2 if x1 - 2 >= 0 else x1 + x2 = x2 + 2 if x2 + 2 <= inst_map.shape[1] - 1 else x2 + y2 = y2 + 2 if y2 + 2 <= inst_map.shape[0] - 1 else y2 + + inst = inst[y1:y2, x1:x2] + + if inst.shape[0] < 2 or inst.shape[1] < 2: + continue + + # chessboard distance map generation + # normalize distance to 0-1 + inst_dist = distance_transform_edt(inst) + inst_dist = inst_dist.astype("float64") + + max_value = np.amax(inst_dist) + if max_value <= 0: + continue + inst_dist = inst_dist / (np.max(inst_dist) + 1e-10) + + dist_map_box = dist[y1:y2, x1:x2] + dist_map_box[inst > 0] = inst_dist[inst > 0] + + return dist + + @staticmethod + @njit + def gen_stardist_maps(inst_map: np.ndarray) -> np.ndarray: + """Generate StarDist map with 32 nrays + + Args: + inst_map (np.ndarray): Instance-Map, each instance is has one integer starting by 1 (zero is background), Shape (H, W) + + Returns: + np.ndarray: Stardist vector map, shape (n_rays, H, W) + """ + n_rays = 32 + # inst_map = fix_duplicates(inst_map) + dist = np.empty(inst_map.shape + (n_rays,), np.float32) + + st_rays = np.float32((2 * np.pi) / n_rays) + for i in range(inst_map.shape[0]): + for j in range(inst_map.shape[1]): + value = inst_map[i, j] + if value == 0: + dist[i, j] = 0 + else: + for k in range(n_rays): + phi = np.float32(k * st_rays) + dy = np.cos(phi) + dx = np.sin(phi) + x, y = np.float32(0), np.float32(0) + while True: + x += dx + y += dy + ii = int(round(i + x)) + jj = int(round(j + y)) + if ( + ii < 0 + or ii >= inst_map.shape[0] + or jj < 0 + or jj >= inst_map.shape[1] + or value != inst_map[ii, jj] + ): + # small correction as we overshoot the boundary + t_corr = 1 - 0.5 / max(np.abs(dx), np.abs(dy)) + x -= t_corr * dx + y -= t_corr * dy + dst = np.sqrt(x**2 + y**2) + dist[i, j, k] = dst + break + + return dist.transpose(2, 0, 1) + + @staticmethod + def gen_regression_map(inst_map: np.ndarray): + n_directions = 2 + dist = np.zeros(inst_map.shape + (n_directions,), np.float32).transpose(2, 0, 1) + inst_map = fix_duplicates(inst_map) + inst_list = list(np.unique(inst_map)) + if 0 in inst_list: + inst_list.remove(0) + for inst_id in inst_list: + inst = np.array(inst_map == inst_id, np.uint8) + y1, y2, x1, x2 = get_bounding_box(inst) + y1 = y1 - 2 if y1 - 2 >= 0 else y1 + x1 = x1 - 2 if x1 - 2 >= 0 else x1 + x2 = x2 + 2 if x2 + 2 <= inst_map.shape[1] - 1 else x2 + y2 = y2 + 2 if y2 + 2 <= inst_map.shape[0] - 1 else y2 + + inst = inst[y1:y2, x1:x2] + y_mass, x_mass = center_of_mass(inst) + x_map = np.repeat(np.arange(1, x2 - x1 + 1)[None, :], y2 - y1, axis=0) + y_map = np.repeat(np.arange(1, y2 - y1 + 1)[:, None], x2 - x1, axis=1) + # we use a transposed coordinate system to align to HV-map, correct would be -1*x_dist_map and -1*y_dist_map + x_dist_map = (x_map - x_mass) * np.clip(inst, 0, 1) + y_dist_map = (y_map - y_mass) * np.clip(inst, 0, 1) + dist[0, y1:y2, x1:x2] = x_dist_map + dist[1, y1:y2, x1:x2] = y_dist_map + + return dist diff --git a/cell_segmentation/datasets/prepare_monuseg.py b/cell_segmentation/datasets/prepare_monuseg.py new file mode 100644 index 0000000000000000000000000000000000000000..240570a1cc3bb7eed0ee6a1c0d7f9944b6905da1 --- /dev/null +++ b/cell_segmentation/datasets/prepare_monuseg.py @@ -0,0 +1,115 @@ +# -*- coding: utf-8 -*- +# Prepare MoNuSeg Dataset By converting and resorting files +# +# @ Fabian Hörst, fabian.hoerst@uk-essen.de +# Institute for Artifical Intelligence in Medicine, +# University Medicine Essen + +from PIL import Image +import xml.etree.ElementTree as ET +from skimage import draw +import numpy as np +from pathlib import Path +from typing import Union +import argparse + + +def convert_monuseg( + input_path: Union[Path, str], output_path: Union[Path, str] +) -> None: + """Convert the MoNuSeg dataset to a new format (1000 -> 1024, tiff to png and xml to npy) + + Args: + input_path (Union[Path, str]): Input dataset + output_path (Union[Path, str]): Output path + """ + input_path = Path(input_path) + output_path = Path(output_path) + output_path.mkdir(exist_ok=True, parents=True) + + # testing and training + parts = ["testing", "training"] + for part in parts: + print(f"Prepare: {part}") + input_path_part = input_path / part + output_path_part = output_path / part + output_path_part.mkdir(exist_ok=True, parents=True) + (output_path_part / "images").mkdir(exist_ok=True, parents=True) + (output_path_part / "labels").mkdir(exist_ok=True, parents=True) + + # images + images = [f for f in sorted((input_path_part / "images").glob("*.tif"))] + for img_path in images: + loaded_image = Image.open(img_path) + resized = loaded_image.resize( + (1024, 1024), resample=Image.Resampling.LANCZOS + ) + new_img_path = output_path_part / "images" / f"{img_path.stem}.png" + resized.save(new_img_path) + # masks + annotations = [f for f in sorted((input_path_part / "labels").glob("*.xml"))] + for annot_path in annotations: + binary_mask = np.transpose(np.zeros((1000, 1000))) + + # extract xml file + tree = ET.parse(annot_path) + root = tree.getroot() + child = root[0] + + for x in child: + r = x.tag + if r == "Regions": + element_idx = 1 + for y in x: + y_tag = y.tag + + if y_tag == "Region": + regions = [] + vertices = y[1] + coords = np.zeros((len(vertices), 2)) + for i, vertex in enumerate(vertices): + coords[i][0] = vertex.attrib["X"] + coords[i][1] = vertex.attrib["Y"] + regions.append(coords) + vertex_row_coords = regions[0][:, 0] + vertex_col_coords = regions[0][:, 1] + fill_row_coords, fill_col_coords = draw.polygon( + vertex_col_coords, vertex_row_coords, binary_mask.shape + ) + binary_mask[fill_row_coords, fill_col_coords] = element_idx + + element_idx = element_idx + 1 + inst_image = Image.fromarray(binary_mask) + resized_mask = np.array( + inst_image.resize((1024, 1024), resample=Image.Resampling.NEAREST) + ) + new_mask_path = output_path_part / "labels" / f"{annot_path.stem}.npy" + np.save(new_mask_path, resized_mask) + print("Finished") + + +parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + description="Convert the MoNuSeg dataset", +) +parser.add_argument( + "--input_path", + type=str, + help="Input path of the original MoNuSeg dataset", + required=True, +) +parser.add_argument( + "--output_path", + type=str, + help="Output path to store the processed MoNuSeg dataset", + required=True, +) + +if __name__ == "__main__": + opt = parser.parse_args() + configuration = vars(opt) + + input_path = Path(configuration["input_path"]) + output_path = Path(configuration["output_path"]) + + convert_monuseg(input_path=input_path, output_path=output_path) diff --git a/cell_segmentation/datasets/prepare_pannuke_origin.py b/cell_segmentation/datasets/prepare_pannuke_origin.py new file mode 100644 index 0000000000000000000000000000000000000000..2102a32a4e38f96c2e042a2955bfa2cf8daeb93e --- /dev/null +++ b/cell_segmentation/datasets/prepare_pannuke_origin.py @@ -0,0 +1,95 @@ +# -*- coding: utf-8 -*- +# Prepare MoNuSeg Dataset By converting and resorting files +# +# @ Fabian Hörst, fabian.hoerst@uk-essen.de +# Institute for Artifical Intelligence in Medicine, +# University Medicine Essen + +import inspect +import os +import sys + +currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) +parentdir = os.path.dirname(currentdir) +sys.path.insert(0, parentdir) +parentdir = os.path.dirname(parentdir) +sys.path.insert(0, parentdir) + +import numpy as np +from pathlib import Path +from PIL import Image +from tqdm import tqdm +import argparse +from cell_segmentation.utils.metrics import remap_label + + +def process_fold(fold, input_path, output_path) -> None: + fold_path = Path(input_path) / f"fold{fold}" + output_fold_path = Path(output_path) / f"fold{fold}" + output_fold_path.mkdir(exist_ok=True, parents=True) + (output_fold_path / "images").mkdir(exist_ok=True, parents=True) + (output_fold_path / "labels").mkdir(exist_ok=True, parents=True) + + print(f"Fold: {fold}") + print("Loading large numpy files, this may take a while") + images = np.load(fold_path / "images.npy") + masks = np.load(fold_path / "masks.npy") + + print("Process images") + for i in tqdm(range(len(images)), total=len(images)): + outname = f"{fold}_{i}.png" + out_img = images[i] + im = Image.fromarray(out_img.astype(np.uint8)) + im.save(output_fold_path / "images" / outname) + + print("Process masks") + for i in tqdm(range(len(images)), total=len(images)): + outname = f"{fold}_{i}.npy" + + # need to create instance map and type map with shape 256x256 + mask = masks[i] + inst_map = np.zeros((256, 256)) + num_nuc = 0 + for j in range(5): + # copy value from new array if value is not equal 0 + layer_res = remap_label(mask[:, :, j]) + # inst_map = np.where(mask[:,:,j] != 0, mask[:,:,j], inst_map) + inst_map = np.where(layer_res != 0, layer_res + num_nuc, inst_map) + num_nuc = num_nuc + np.max(layer_res) + inst_map = remap_label(inst_map) + + type_map = np.zeros((256, 256)).astype(np.int32) + for j in range(5): + layer_res = ((j + 1) * np.clip(mask[:, :, j], 0, 1)).astype(np.int32) + type_map = np.where(layer_res != 0, layer_res, type_map) + + outdict = {"inst_map": inst_map, "type_map": type_map} + np.save(output_fold_path / "labels" / outname, outdict) + + +parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + description="Perform CellViT inference for given run-directory with model checkpoints and logs", +) +parser.add_argument( + "--input_path", + type=str, + help="Input path of the original PanNuke dataset", + required=True, +) +parser.add_argument( + "--output_path", + type=str, + help="Output path to store the processed PanNuke dataset", + required=True, +) + +if __name__ == "__main__": + opt = parser.parse_args() + configuration = vars(opt) + + input_path = Path(configuration["input_path"]) + output_path = Path(configuration["output_path"]) + + for fold in [0, 1, 2]: + process_fold(fold, input_path, output_path) diff --git a/cell_segmentation/experiments/__init__.py b/cell_segmentation/experiments/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1136a0466a98ae89430f0c641640c9e5f02c90ec --- /dev/null +++ b/cell_segmentation/experiments/__init__.py @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +# Experiment related methods for each network type +# +# @ Fabian Hörst, fabian.hoerst@uk-essen.de +# Institute for Artifical Intelligence in Medicine, +# University Medicine Essen diff --git a/cell_segmentation/experiments/experiment_cellvit_conic.py b/cell_segmentation/experiments/experiment_cellvit_conic.py new file mode 100644 index 0000000000000000000000000000000000000000..e4862497d1ed0af1e6c7b95e2e8e8ed431348930 --- /dev/null +++ b/cell_segmentation/experiments/experiment_cellvit_conic.py @@ -0,0 +1,808 @@ +# -*- coding: utf-8 -*- +# CellVit Experiment Class +# +# @ Fabian Hörst, fabian.hoerst@uk-essen.de +# Institute for Artifical Intelligence in Medicine, +# University Medicine Essen + +import copy +import datetime +import inspect +import os +import shutil +import sys + +import yaml + +currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) +parentdir = os.path.dirname(currentdir) +sys.path.insert(0, parentdir) + +import uuid +from pathlib import Path +from typing import Callable, Tuple, Union + +import albumentations as A +import torch +import torch.nn as nn +import wandb +from torch.optim import Optimizer +from torch.optim.lr_scheduler import ( + ConstantLR, + CosineAnnealingLR, + ExponentialLR, + SequentialLR, + _LRScheduler, +) +from torch.utils.data import ( + DataLoader, + Dataset, + RandomSampler, + Sampler, + Subset, + WeightedRandomSampler, +) +from torchinfo import summary +from wandb.sdk.lib.runid import generate_id + +from base_ml.base_early_stopping import EarlyStopping +from base_ml.base_experiment import BaseExperiment +from base_ml.base_loss import retrieve_loss_fn +from cell_segmentation.datasets.base_cell import CellDataset +from cell_segmentation.datasets.dataset_coordinator import select_dataset +from cell_segmentation.trainer.trainer_cellvit import CellViTTrainer +from models.segmentation.cell_segmentation.cellvit import CellViT +from utils.tools import close_logger + + +class ExperimentCellViTCoNic(BaseExperiment): + def __init__(self, default_conf: dict, checkpoint=None) -> None: + super().__init__(default_conf, checkpoint) + self.load_dataset_setup(dataset_path=self.default_conf["data"]["dataset_path"]) + + def run_experiment(self) -> Tuple[Path, dict, nn.Module, dict]: + """Main Experiment Code""" + ### Setup + # close loggers + self.close_remaining_logger() + + # get the config for the current run + self.run_conf = copy.deepcopy(self.default_conf) + self.run_conf["dataset_config"] = self.dataset_config + self.run_name = f"{datetime.datetime.now().strftime('%Y-%m-%dT%H%M%S')}_{self.run_conf['logging']['log_comment']}" + + wandb_run_id = generate_id() + resume = None + if self.checkpoint is not None: + wandb_run_id = self.checkpoint["wandb_id"] + resume = "must" + self.run_name = self.checkpoint["run_name"] + + # initialize wandb + run = wandb.init( + project=self.run_conf["logging"]["project"], + tags=self.run_conf["logging"].get("tags", []), + name=self.run_name, + notes=self.run_conf["logging"]["notes"], + dir=self.run_conf["logging"]["wandb_dir"], + mode=self.run_conf["logging"]["mode"].lower(), + group=self.run_conf["logging"].get("group", str(uuid.uuid4())), + allow_val_change=True, + id=wandb_run_id, + resume=resume, + settings=wandb.Settings(start_method="fork"), + ) + + # get ids + self.run_conf["logging"]["run_id"] = run.id + self.run_conf["logging"]["wandb_file"] = run.id + + # overwrite configuration with sweep values are leave them as they are + if self.run_conf["run_sweep"] is True: + self.run_conf["logging"]["sweep_id"] = run.sweep_id + self.run_conf["logging"]["log_dir"] = str( + Path(self.default_conf["logging"]["log_dir"]) + / f"sweep_{run.sweep_id}" + / f"{self.run_name}_{self.run_conf['logging']['run_id']}" + ) + self.overwrite_sweep_values(self.run_conf, run.config) + else: + self.run_conf["logging"]["log_dir"] = str( + Path(self.default_conf["logging"]["log_dir"]) / self.run_name + ) + + # update wandb + wandb.config.update( + self.run_conf, allow_val_change=True + ) # this may lead to the problem + + # create output folder, instantiate logger and store config + self.create_output_dir(self.run_conf["logging"]["log_dir"]) + self.logger = self.instantiate_logger() + self.logger.info("Instantiated Logger. WandB init and config update finished.") + self.logger.info(f"Run ist stored here: {self.run_conf['logging']['log_dir']}") + self.store_config() + + self.logger.info( + f"Cuda devices: {[torch.cuda.device(i) for i in range(torch.cuda.device_count())]}" + ) + ### Machine Learning + device = f"cuda:{self.run_conf['gpu']}" + self.logger.info(f"Using GPU: {device}") + self.logger.info(f"Using device: {device}") + + # loss functions + loss_fn_dict = self.get_loss_fn(self.run_conf.get("loss", {})) + self.logger.info("Loss functions:") + self.logger.info(loss_fn_dict) + + # model + model = self.get_train_model( + pretrained_encoder=self.run_conf["model"].get("pretrained_encoder", None), + pretrained_model=self.run_conf["model"].get("pretrained", None), + backbone_type=self.run_conf["model"].get("backbone", "default"), + shared_decoders=self.run_conf["model"].get("shared_decoders", False), + regression_loss=self.run_conf["model"].get("regression_loss", False), + ) + model.to(device) + + # optimizer + optimizer = self.get_optimizer( + model, + self.run_conf["training"]["optimizer"], + self.run_conf["training"]["optimizer_hyperparameter"], + ) + + # scheduler + scheduler = self.get_scheduler( + optimizer=optimizer, + scheduler_type=self.run_conf["training"]["scheduler"]["scheduler_type"], + ) + + # early stopping (no early stopping for basic setup) + early_stopping = None + if "early_stopping_patience" in self.run_conf["training"]: + if self.run_conf["training"]["early_stopping_patience"] is not None: + early_stopping = EarlyStopping( + patience=self.run_conf["training"]["early_stopping_patience"], + strategy="maximize", + ) + + ### Data handling + train_transforms, val_transforms = self.get_transforms( + self.run_conf["transformations"], + input_shape=self.run_conf["data"].get("input_shape", 256), + ) + + train_dataset, val_dataset = self.get_datasets( + train_transforms=train_transforms, + val_transforms=val_transforms, + ) + + # load sampler + training_sampler = self.get_sampler( + train_dataset=train_dataset, + strategy=self.run_conf["training"].get("sampling_strategy", "random"), + gamma=self.run_conf["training"].get("sampling_gamma", 1), + ) + + # define dataloaders + train_dataloader = DataLoader( + train_dataset, + batch_size=self.run_conf["training"]["batch_size"], + sampler=training_sampler, + num_workers=16, + pin_memory=False, + worker_init_fn=self.seed_worker, + ) + + val_dataloader = DataLoader( + val_dataset, + batch_size=128, + num_workers=16, + pin_memory=True, + worker_init_fn=self.seed_worker, + ) + + # start Training + self.logger.info("Instantiate Trainer") + trainer = CellViTTrainer( + model=model, + loss_fn_dict=loss_fn_dict, + optimizer=optimizer, + scheduler=scheduler, + device=device, + logger=self.logger, + logdir=self.run_conf["logging"]["log_dir"], + num_classes=self.run_conf["data"]["num_nuclei_classes"], + dataset_config=self.dataset_config, + early_stopping=early_stopping, + experiment_config=self.run_conf, + log_images=self.run_conf["logging"].get("log_images", False), + magnification=self.run_conf["data"].get("magnification", 40), + mixed_precision=self.run_conf["training"].get("mixed_precision", False), + ) + + # Load checkpoint if provided + if self.checkpoint is not None: + self.logger.info("Checkpoint was provided. Restore ...") + trainer.resume_checkpoint(self.checkpoint) + + # Call fit method + self.logger.info("Calling Trainer Fit") + trainer.fit( + epochs=self.run_conf["training"]["epochs"], + train_dataloader=train_dataloader, + val_dataloader=val_dataloader, + metric_init=self.get_wandb_init_dict(), + unfreeze_epoch=self.run_conf["training"]["unfreeze_epoch"], + eval_every=self.run_conf["training"].get("eval_every", 1), + ) + + # Select best model if not provided by early stopping + checkpoint_dir = Path(self.run_conf["logging"]["log_dir"]) / "checkpoints" + if not (checkpoint_dir / "model_best.pth").is_file(): + shutil.copy( + checkpoint_dir / "latest_checkpoint.pth", + checkpoint_dir / "model_best.pth", + ) + + # At the end close logger + self.logger.info(f"Finished run {run.id}") + close_logger(self.logger) + + return self.run_conf["logging"]["log_dir"] + + def load_dataset_setup(self, dataset_path: Union[Path, str]) -> None: + """Load the configuration of the cell segmentation dataset. + + The dataset must have a dataset_config.yaml file in their dataset path with the following entries: + * nuclei_types: describing the present nuclei types with corresponding integer + + Args: + dataset_path (Union[Path, str]): Path to dataset folder + """ + dataset_config_path = Path(dataset_path) / "dataset_config.yaml" + with open(dataset_config_path, "r") as dataset_config_file: + yaml_config = yaml.safe_load(dataset_config_file) + self.dataset_config = dict(yaml_config) + + def get_loss_fn(self, loss_fn_settings: dict) -> dict: + """Create a dictionary with loss functions for all branches + + Branches: "nuclei_binary_map", "hv_map", "nuclei_type_map" + + Args: + loss_fn_settings (dict): Dictionary with the loss function settings. Structure + branch_name(str): + loss_name(str): + loss_fn(str): String matching to the loss functions defined in the LOSS_DICT (base_ml.base_loss) + weight(float): Weighting factor as float value + (optional) args: Optional parameters for initializing the loss function + arg_name: value + + If a branch is not provided, the defaults settings (described below) are used. + + For further information, please have a look at the file configs/examples/cell_segmentation/train_cellvit.yaml + under the section "loss" + + Example: + nuclei_binary_map: + bce: + loss_fn: xentropy_loss + weight: 1 + dice: + loss_fn: dice_loss + weight: 1 + + Returns: + dict: Dictionary with loss functions for each branch. Structure: + branch_name(str): + loss_name(str): + "loss_fn": Callable loss function + "weight": weight of the loss since in the end all losses of all branches are added together for backward pass + loss_name(str): + "loss_fn": Callable loss function + "weight": weight of the loss since in the end all losses of all branches are added together for backward pass + branch_name(str) + ... + + Default loss dictionary: + nuclei_binary_map: + bce: + loss_fn: xentropy_loss + weight: 1 + dice: + loss_fn: dice_loss + weight: 1 + hv_map: + mse: + loss_fn: mse_loss_maps + weight: 1 + msge: + loss_fn: msge_loss_maps + weight: 1 + nuclei_type_map + bce: + loss_fn: xentropy_loss + weight: 1 + dice: + loss_fn: dice_loss + weight: 1 + """ + loss_fn_dict = {} + if "nuclei_binary_map" in loss_fn_settings.keys(): + loss_fn_dict["nuclei_binary_map"] = {} + for loss_name, loss_sett in loss_fn_settings["nuclei_binary_map"].items(): + parameters = loss_sett.get("args", {}) + loss_fn_dict["nuclei_binary_map"][loss_name] = { + "loss_fn": retrieve_loss_fn(loss_sett["loss_fn"], **parameters), + "weight": loss_sett["weight"], + } + else: + loss_fn_dict["nuclei_binary_map"] = { + "bce": {"loss_fn": retrieve_loss_fn("xentropy_loss"), "weight": 1}, + "dice": {"loss_fn": retrieve_loss_fn("dice_loss"), "weight": 1}, + } + if "hv_map" in loss_fn_settings.keys(): + loss_fn_dict["hv_map"] = {} + for loss_name, loss_sett in loss_fn_settings["hv_map"].items(): + parameters = loss_sett.get("args", {}) + loss_fn_dict["hv_map"][loss_name] = { + "loss_fn": retrieve_loss_fn(loss_sett["loss_fn"], **parameters), + "weight": loss_sett["weight"], + } + else: + loss_fn_dict["hv_map"] = { + "mse": {"loss_fn": retrieve_loss_fn("mse_loss_maps"), "weight": 1}, + "msge": {"loss_fn": retrieve_loss_fn("msge_loss_maps"), "weight": 1}, + } + if "nuclei_type_map" in loss_fn_settings.keys(): + loss_fn_dict["nuclei_type_map"] = {} + for loss_name, loss_sett in loss_fn_settings["nuclei_type_map"].items(): + parameters = loss_sett.get("args", {}) + loss_fn_dict["nuclei_type_map"][loss_name] = { + "loss_fn": retrieve_loss_fn(loss_sett["loss_fn"], **parameters), + "weight": loss_sett["weight"], + } + else: + loss_fn_dict["nuclei_type_map"] = { + "bce": {"loss_fn": retrieve_loss_fn("xentropy_loss"), "weight": 1}, + "dice": {"loss_fn": retrieve_loss_fn("dice_loss"), "weight": 1}, + } + if "regression_loss" in loss_fn_settings.keys(): + loss_fn_dict["regression_map"] = {} + for loss_name, loss_sett in loss_fn_settings["regression_loss"].items(): + parameters = loss_sett.get("args", {}) + loss_fn_dict["regression_map"][loss_name] = { + "loss_fn": retrieve_loss_fn(loss_sett["loss_fn"], **parameters), + "weight": loss_sett["weight"], + } + elif "regression_loss" in self.run_conf["model"].keys(): + loss_fn_dict["regression_map"] = { + "mse": {"loss_fn": retrieve_loss_fn("mse_loss_maps"), "weight": 1}, + } + return loss_fn_dict + + def get_scheduler(self, scheduler_type: str, optimizer: Optimizer) -> _LRScheduler: + """Get the learning rate scheduler for CellViT + + The configuration of the scheduler is given in the "training" -> "scheduler" section. + Currenlty, "constant", "exponential" and "cosine" schedulers are implemented. + + Required parameters for implemented schedulers: + - "constant": None + - "exponential": gamma (optional, defaults to 0.95) + - "cosine": eta_min (optional, defaults to 1-e5) + + Args: + scheduler_type (str): Type of scheduler as a string. Currently implemented: + - "constant" (lowering by a factor of ten after 25 epochs, increasing after 50, decreasimg again after 75) + - "exponential" (ExponentialLR with given gamma, gamma defaults to 0.95) + - "cosine" (CosineAnnealingLR, eta_min as parameter, defaults to 1-e5) + optimizer (Optimizer): Optimizer + + Returns: + _LRScheduler: PyTorch Scheduler + """ + implemented_schedulers = ["constant", "exponential", "cosine"] + if scheduler_type.lower() not in implemented_schedulers: + self.logger.warning( + f"Unknown Scheduler - No scheduler from the list {implemented_schedulers} select. Using default scheduling." + ) + if scheduler_type.lower() == "constant": + scheduler = SequentialLR( + optimizer=optimizer, + schedulers=[ + ConstantLR(optimizer, factor=1, total_iters=25), + ConstantLR(optimizer, factor=0.1, total_iters=25), + ConstantLR(optimizer, factor=1, total_iters=25), + ConstantLR(optimizer, factor=0.1, total_iters=1000), + ], + milestones=[24, 49, 74], + ) + elif scheduler_type.lower() == "exponential": + scheduler = ExponentialLR( + optimizer, + gamma=self.run_conf["training"]["scheduler"].get("gamma", 0.95), + ) + elif scheduler_type.lower() == "cosine": + scheduler = CosineAnnealingLR( + optimizer, + T_max=self.run_conf["training"]["epochs"], + eta_min=self.run_conf["training"]["scheduler"].get("eta_min", 1e-5), + ) + else: + scheduler = super().get_scheduler(optimizer) + return scheduler + + def get_datasets( + self, + train_transforms: Callable = None, + val_transforms: Callable = None, + ) -> Tuple[Dataset, Dataset]: + """Retrieve training dataset and validation dataset + + Args: + train_transforms (Callable, optional): PyTorch transformations for train set. Defaults to None. + val_transforms (Callable, optional): PyTorch transformations for validation set. Defaults to None. + + Returns: + Tuple[Dataset, Dataset]: Training dataset and validation dataset + """ + if ( + "val_split" in self.run_conf["data"] + and "val_folds" in self.run_conf["data"] + ): + raise RuntimeError( + "Provide either val_splits or val_folds in configuration file, not both." + ) + if ( + "val_split" not in self.run_conf["data"] + and "val_folds" not in self.run_conf["data"] + ): + raise RuntimeError( + "Provide either val_split or val_folds in configuration file, one is necessary." + ) + if ( + "val_split" not in self.run_conf["data"] + and "val_folds" not in self.run_conf["data"] + ): + raise RuntimeError( + "Provide either val_split or val_fold in configuration file, one is necessary." + ) + if "regression_loss" in self.run_conf["model"].keys(): + self.run_conf["data"]["regression_loss"] = True + + full_dataset = select_dataset( + dataset_name="conic", + split="train", + dataset_config=self.run_conf["data"], + transforms=train_transforms, + ) + if "val_split" in self.run_conf["data"]: + generator_split = torch.Generator().manual_seed( + self.default_conf["random_seed"] + ) + val_splits = float(self.run_conf["data"]["val_split"]) + train_dataset, val_dataset = torch.utils.data.random_split( + full_dataset, + lengths=[1 - val_splits, val_splits], + generator=generator_split, + ) + val_dataset.dataset = copy.deepcopy(full_dataset) + val_dataset.dataset.set_transforms(val_transforms) + else: + train_dataset = full_dataset + val_dataset = select_dataset( + dataset_name="conic", + split="validation", + dataset_config=self.run_conf["data"], + transforms=val_transforms, + ) + + return train_dataset, val_dataset + + def get_train_model( + self, + pretrained_encoder: Union[Path, str] = None, + pretrained_model: Union[Path, str] = None, + backbone_type: str = "default", + shared_decoders: bool = False, + regression_loss: bool = False, + **kwargs, + ) -> CellViT: + """Return the CellViT training model + + Args: + pretrained_encoder (Union[Path, str]): Path to a pretrained encoder. Defaults to None. + pretrained_model (Union[Path, str], optional): Path to a pretrained model. Defaults to None. + backbone_type (str, optional): Backbone Type. Currently supported are default (None, ViT256, SAM-B, SAM-L, SAM-H). Defaults to None + shared_decoders (bool, optional): If shared skip decoders should be used. Defaults to False. + regression_loss (bool, optional): If regression loss is used. Defaults to False + + Returns: + CellViT: CellViT training model with given setup + """ + # reseed needed, due to subprocess seeding compatibility + self.seed_run(self.default_conf["random_seed"]) + + # check for backbones + implemented_backbones = ["default", "vit256", "sam-b", "sam-l", "sam-h"] + if backbone_type.lower() not in implemented_backbones: + raise NotImplementedError( + f"Unknown Backbone Type - Currently supported are: {implemented_backbones}" + ) + if backbone_type.lower() == "default": + if shared_decoders: + model_class = CellViTShared + else: + model_class = CellViT + model = model_class( + num_nuclei_classes=self.run_conf["data"]["num_nuclei_classes"], + num_tissue_classes=1, + embed_dim=self.run_conf["model"]["embed_dim"], + input_channels=self.run_conf["model"].get("input_channels", 3), + depth=self.run_conf["model"]["depth"], + num_heads=self.run_conf["model"]["num_heads"], + extract_layers=self.run_conf["model"]["extract_layers"], + drop_rate=self.run_conf["training"].get("drop_rate", 0), + attn_drop_rate=self.run_conf["training"].get("attn_drop_rate", 0), + drop_path_rate=self.run_conf["training"].get("drop_path_rate", 0), + regression_loss=regression_loss, + ) + + if pretrained_model is not None: + self.logger.info( + f"Loading pretrained CellViT model from path: {pretrained_model}" + ) + cellvit_pretrained = torch.load(pretrained_model) + self.logger.info(model.load_state_dict(cellvit_pretrained, strict=True)) + self.logger.info("Loaded CellViT model") + + if backbone_type.lower() == "vit256": + if shared_decoders: + model_class = CellViT256Shared + else: + model_class = CellViT256 + model = model_class( + model256_path=pretrained_encoder, + num_nuclei_classes=self.run_conf["data"]["num_nuclei_classes"], + num_tissue_classes=1, + drop_rate=self.run_conf["training"].get("drop_rate", 0), + attn_drop_rate=self.run_conf["training"].get("attn_drop_rate", 0), + drop_path_rate=self.run_conf["training"].get("drop_path_rate", 0), + regression_loss=regression_loss, + ) + model.load_pretrained_encoder(model.model256_path) + if pretrained_model is not None: + self.logger.info( + f"Loading pretrained CellViT model from path: {pretrained_model}" + ) + cellvit_pretrained = torch.load(pretrained_model, map_location="cpu") + self.logger.info(model.load_state_dict(cellvit_pretrained, strict=True)) + model.freeze_encoder() + self.logger.info("Loaded CellVit256 model") + if backbone_type.lower() in ["sam-b", "sam-l", "sam-h"]: + if shared_decoders: + model_class = CellViTSAMShared + else: + model_class = CellViTSAM + model = model_class( + model_path=pretrained_encoder, + num_nuclei_classes=self.run_conf["data"]["num_nuclei_classes"], + num_tissue_classes=1, + vit_structure=backbone_type, + drop_rate=self.run_conf["training"].get("drop_rate", 0), + regression_loss=regression_loss, + ) + model.load_pretrained_encoder(model.model_path) + if pretrained_model is not None: + self.logger.info( + f"Loading pretrained CellViT model from path: {pretrained_model}" + ) + cellvit_pretrained = torch.load(pretrained_model, map_location="cpu") + self.logger.info(model.load_state_dict(cellvit_pretrained, strict=True)) + model.freeze_encoder() + self.logger.info(f"Loaded CellViT-SAM model with backbone: {backbone_type}") + + self.logger.info(f"\nModel: {model}") + model = model.to("cpu") + self.logger.info( + f"\n{summary(model, input_size=(1, 3, 256, 256), device='cpu')}" + ) + + return model + + def get_wandb_init_dict(self) -> dict: + pass + + def get_transforms( + self, transform_settings: dict, input_shape: int = 256 + ) -> Tuple[Callable, Callable]: + """Get Transformations (Albumentation Transformations). Return both training and validation transformations. + + The transformation settings are given in the following format: + key: dict with parameters + Example: + colorjitter: + p: 0.1 + scale_setting: 0.5 + scale_color: 0.1 + + For further information on how to setup the dictionary and default (recommended) values is given here: + configs/examples/cell_segmentation/train_cellvit.yaml + + Training Transformations: + Implemented are: + - A.RandomRotate90: Key in transform_settings: randomrotate90, parameters: p + - A.HorizontalFlip: Key in transform_settings: horizontalflip, parameters: p + - A.VerticalFlip: Key in transform_settings: verticalflip, parameters: p + - A.Downscale: Key in transform_settings: downscale, parameters: p, scale + - A.Blur: Key in transform_settings: blur, parameters: p, blur_limit + - A.GaussNoise: Key in transform_settings: gaussnoise, parameters: p, var_limit + - A.ColorJitter: Key in transform_settings: colorjitter, parameters: p, scale_setting, scale_color + - A.Superpixels: Key in transform_settings: superpixels, parameters: p + - A.ZoomBlur: Key in transform_settings: zoomblur, parameters: p + - A.RandomSizedCrop: Key in transform_settings: randomsizedcrop, parameters: p + - A.ElasticTransform: Key in transform_settings: elastictransform, parameters: p + Always implemented at the end of the pipeline: + - A.Normalize with given mean (default: (0.5, 0.5, 0.5)) and std (default: (0.5, 0.5, 0.5)) + + Validation Transformations: + A.Normalize with given mean (default: (0.5, 0.5, 0.5)) and std (default: (0.5, 0.5, 0.5)) + + Args: + transform_settings (dict): dictionay with the transformation settings. + input_shape (int, optional): Input shape of the images to used. Defaults to 256. + + Returns: + Tuple[Callable, Callable]: Train Transformations, Validation Transformations + + """ + transform_list = [] + transform_settings = {k.lower(): v for k, v in transform_settings.items()} + if "RandomRotate90".lower() in transform_settings: + p = transform_settings["randomrotate90"]["p"] + if p > 0 and p <= 1: + transform_list.append(A.RandomRotate90(p=p)) + if "HorizontalFlip".lower() in transform_settings.keys(): + p = transform_settings["horizontalflip"]["p"] + if p > 0 and p <= 1: + transform_list.append(A.HorizontalFlip(p=p)) + if "VerticalFlip".lower() in transform_settings: + p = transform_settings["verticalflip"]["p"] + if p > 0 and p <= 1: + transform_list.append(A.VerticalFlip(p=p)) + if "Downscale".lower() in transform_settings: + p = transform_settings["downscale"]["p"] + scale = transform_settings["downscale"]["scale"] + if p > 0 and p <= 1: + transform_list.append( + A.Downscale(p=p, scale_max=scale, scale_min=scale) + ) + if "Blur".lower() in transform_settings: + p = transform_settings["blur"]["p"] + blur_limit = transform_settings["blur"]["blur_limit"] + if p > 0 and p <= 1: + transform_list.append(A.Blur(p=p, blur_limit=blur_limit)) + if "GaussNoise".lower() in transform_settings: + p = transform_settings["gaussnoise"]["p"] + var_limit = transform_settings["gaussnoise"]["var_limit"] + if p > 0 and p <= 1: + transform_list.append(A.GaussNoise(p=p, var_limit=var_limit)) + if "ColorJitter".lower() in transform_settings: + p = transform_settings["colorjitter"]["p"] + scale_setting = transform_settings["colorjitter"]["scale_setting"] + scale_color = transform_settings["colorjitter"]["scale_color"] + if p > 0 and p <= 1: + transform_list.append( + A.ColorJitter( + p=p, + brightness=scale_setting, + contrast=scale_setting, + saturation=scale_color, + hue=scale_color / 2, + ) + ) + if "Superpixels".lower() in transform_settings: + p = transform_settings["superpixels"]["p"] + if p > 0 and p <= 1: + transform_list.append( + A.Superpixels( + p=p, + p_replace=0.1, + n_segments=200, + max_size=int(input_shape / 2), + ) + ) + if "ZoomBlur".lower() in transform_settings: + p = transform_settings["zoomblur"]["p"] + if p > 0 and p <= 1: + transform_list.append(A.ZoomBlur(p=p, max_factor=1.05)) + if "RandomSizedCrop".lower() in transform_settings: + p = transform_settings["randomsizedcrop"]["p"] + if p > 0 and p <= 1: + transform_list.append( + A.RandomSizedCrop( + min_max_height=(input_shape / 2, input_shape), + height=input_shape, + width=input_shape, + p=p, + ) + ) + if "ElasticTransform".lower() in transform_settings: + p = transform_settings["elastictransform"]["p"] + if p > 0 and p <= 1: + transform_list.append( + A.ElasticTransform(p=p, sigma=25, alpha=0.5, alpha_affine=15) + ) + + if "normalize" in transform_settings: + mean = transform_settings["normalize"].get("mean", (0.5, 0.5, 0.5)) + std = transform_settings["normalize"].get("std", (0.5, 0.5, 0.5)) + else: + mean = (0.5, 0.5, 0.5) + std = (0.5, 0.5, 0.5) + transform_list.append(A.Normalize(mean=mean, std=std)) + + train_transforms = A.Compose(transform_list) + val_transforms = A.Compose([A.Normalize(mean=mean, std=std)]) + + return train_transforms, val_transforms + + def get_sampler( + self, train_dataset: CellDataset, strategy: str = "random", gamma: float = 1 + ) -> Sampler: + """Return the sampler (either RandomSampler or WeightedRandomSampler) + + Args: + train_dataset (CellDataset): Dataset for training + strategy (str, optional): Sampling strategy. Defaults to "random" (random sampling). + Implemented are "random" and "cell" + gamma (float, optional): Gamma scaling factor, between 0 and 1. + 1 means total balancing, 0 means original weights. Defaults to 1. + + Raises: + NotImplementedError: Not implemented sampler is selected + + Returns: + Sampler: Sampler for training + """ + if strategy.lower() == "random": + sampling_generator = torch.Generator().manual_seed( + self.default_conf["random_seed"] + ) + sampler = RandomSampler(train_dataset, generator=sampling_generator) + self.logger.info("Using RandomSampler") + else: + # this solution is not accurate when a subset is used since the weights are calculated on the whole training dataset + if isinstance(train_dataset, Subset): + ds = train_dataset.dataset + else: + ds = train_dataset + ds.load_cell_count() + if strategy.lower() == "cell": + weights = ds.get_sampling_weights_cell(gamma) + else: + raise NotImplementedError( + "Unknown sampling strategy - Implemented is cell" + ) + + if isinstance(train_dataset, Subset): + weights = torch.Tensor([weights[i] for i in train_dataset.indices]) + + sampling_generator = torch.Generator().manual_seed( + self.default_conf["random_seed"] + ) + sampler = WeightedRandomSampler( + weights=weights, + num_samples=len(train_dataset), + replacement=True, + generator=sampling_generator, + ) + + self.logger.info(f"Using Weighted Sampling with strategy: {strategy}") + self.logger.info(f"Unique-Weights: {torch.unique(weights)}") + + return sampler diff --git a/cell_segmentation/experiments/experiment_cellvit_pannuke.py b/cell_segmentation/experiments/experiment_cellvit_pannuke.py new file mode 100644 index 0000000000000000000000000000000000000000..dede2e7eea852e1be7ae2fe64b4f06d458e81949 --- /dev/null +++ b/cell_segmentation/experiments/experiment_cellvit_pannuke.py @@ -0,0 +1,861 @@ +# -*- coding: utf-8 -*- +# CellVit Experiment Class +# +# @ Fabian Hörst, fabian.hoerst@uk-essen.de +# Institute for Artifical Intelligence in Medicine, +# University Medicine Essen +import argparse +import copy +import datetime +import inspect +import os +import shutil +import sys + +import yaml +import numpy as np +import math + +currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) +parentdir = os.path.dirname(currentdir) +sys.path.insert(0, parentdir) + +import uuid +from pathlib import Path +from typing import Callable, Tuple, Union +import torch +from torchsummary import summary +from torchstat import stat +import albumentations as A +import torch +import torch.nn as nn +import wandb +from torch.optim import Optimizer +from torch.optim.lr_scheduler import ( + ConstantLR, + CosineAnnealingLR, + ExponentialLR, + SequentialLR, + _LRScheduler, + CosineAnnealingWarmRestarts, +) +from torch.utils.data import ( + DataLoader, + Dataset, + RandomSampler, + Sampler, + Subset, + WeightedRandomSampler, +) +from torchinfo import summary +from wandb.sdk.lib.runid import generate_id + +from base_ml.base_early_stopping import EarlyStopping +from base_ml.base_experiment import BaseExperiment +from base_ml.base_loss import retrieve_loss_fn +from base_ml.base_trainer import BaseTrainer +from cell_segmentation.datasets.base_cell import CellDataset +from cell_segmentation.datasets.dataset_coordinator import select_dataset +from cell_segmentation.trainer.trainer_cellvit import CellViTTrainer +from models.segmentation.cell_segmentation.cellvit import CellViT + +from utils.tools import close_logger + + +class WarmupCosineAnnealingLR(CosineAnnealingLR): + def __init__(self, optimizer, T_max, eta_min=0, warmup_epochs=0, warmup_factor=0): + super().__init__(optimizer, T_max=T_max, eta_min=eta_min) + self.warmup_epochs = warmup_epochs + self.warmup_factor = warmup_factor + self.initial_lr = [group['lr'] for group in optimizer.param_groups] #初始化的学习率 + + def get_lr(self): + if self.last_epoch < self.warmup_epochs: + warmup_factor = self.warmup_factor + (1.0 - self.warmup_factor) * (self.last_epoch / self.warmup_epochs) + return [base_lr * warmup_factor for base_lr in self.initial_lr] + else: + return [base_lr * self.get_lr_ratio() for base_lr in self.initial_lr] + + def get_lr_ratio(self): + T_cur = min(self.last_epoch - self.warmup_epochs, self.T_max - self.warmup_epochs) + return 0.5 * (1 + math.cos(math.pi * T_cur / (self.T_max - self.warmup_epochs))) + + + +class ExperimentCellVitPanNuke(BaseExperiment): + def __init__(self, default_conf: dict, checkpoint=None) -> None: + super().__init__(default_conf, checkpoint) + self.load_dataset_setup(dataset_path=self.default_conf["data"]["dataset_path"]) + + def run_experiment(self) -> Tuple[Path, dict, nn.Module, dict]: + """Main Experiment Code""" + ### Setup + # close loggers + self.close_remaining_logger() + + # Initialize distributed training environment + + + # get the config for the current run + self.run_conf = copy.deepcopy(self.default_conf) + self.run_conf["dataset_config"] = self.dataset_config + self.run_name = f"{datetime.datetime.now().strftime('%Y-%m-%dT%H%M%S')}_{self.run_conf['logging']['log_comment']}" + + wandb_run_id = generate_id() + resume = None + if self.checkpoint is not None: + wandb_run_id = self.checkpoint["wandb_id"] + resume = "must" + self.run_name = self.checkpoint["run_name"] + + # initialize wandb + run = wandb.init( + project=self.run_conf["logging"]["project"], + tags=self.run_conf["logging"].get("tags", []), + name=self.run_name, + notes=self.run_conf["logging"]["notes"], + dir=self.run_conf["logging"]["wandb_dir"], + mode=self.run_conf["logging"]["mode"].lower(), + group=self.run_conf["logging"].get("group", str(uuid.uuid4())), + allow_val_change=True, + id=wandb_run_id, + resume=resume, + settings=wandb.Settings(start_method="fork"), + ) + + # get ids + self.run_conf["logging"]["run_id"] = run.id + self.run_conf["logging"]["wandb_file"] = run.id + + # overwrite configuration with sweep values are leave them as they are + if self.run_conf["run_sweep"] is True: + self.run_conf["logging"]["sweep_id"] = run.sweep_id + self.run_conf["logging"]["log_dir"] = str( + Path(self.default_conf["logging"]["log_dir"]) + / f"sweep_{run.sweep_id}" + / f"{self.run_name}_{self.run_conf['logging']['run_id']}" + ) + self.overwrite_sweep_values(self.run_conf, run.config) + else: + self.run_conf["logging"]["log_dir"] = str( + Path(self.default_conf["logging"]["log_dir"]) / self.run_name + ) + + # update wandb + wandb.config.update( + self.run_conf, allow_val_change=True + ) # this may lead to the problem + + # create output folder, instantiate logger and store config + self.create_output_dir(self.run_conf["logging"]["log_dir"]) + self.logger = self.instantiate_logger() + self.logger.info("Instantiated Logger. WandB init and config update finished.") + self.logger.info(f"Run ist stored here: {self.run_conf['logging']['log_dir']}") + self.store_config() + + self.logger.info( + f"Cuda devices: {[torch.cuda.device(i) for i in range(torch.cuda.device_count())]}" + ) + ### Machine Learning + #device = f"cuda:{2}" + #device = torch.device("cuda:2") + + device = f"cuda:{self.run_conf['gpu']}" + self.logger.info(f"Using GPU: {device}") + self.logger.info(f"Using device: {device}") + + # loss functions + loss_fn_dict = self.get_loss_fn(self.run_conf.get("loss", {})) + self.logger.info("Loss functions:") + self.logger.info(loss_fn_dict) + + # model + model = self.get_train_model( + pretrained_encoder=self.run_conf["model"].get("pretrained_encoder", None), + pretrained_model=self.run_conf["model"].get("pretrained", None), + backbone_type=self.run_conf["model"].get("backbone", "default"), + shared_decoders=self.run_conf["model"].get("shared_decoders", False), + regression_loss=self.run_conf["model"].get("regression_loss", False), + ) + model.to(device) + + # optimizer + optimizer = self.get_optimizer( + model, + self.run_conf["training"]["optimizer"].lower(), + self.run_conf["training"]["optimizer_hyperparameter"], + #self.run_conf["training"]["optimizer"], + self.run_conf["training"]["layer_decay"], + + ) + + # scheduler + scheduler = self.get_scheduler( + optimizer=optimizer, + scheduler_type=self.run_conf["training"]["scheduler"]["scheduler_type"], + ) + + # early stopping (no early stopping for basic setup) + early_stopping = None + if "early_stopping_patience" in self.run_conf["training"]: + if self.run_conf["training"]["early_stopping_patience"] is not None: + early_stopping = EarlyStopping( + patience=self.run_conf["training"]["early_stopping_patience"], + strategy="maximize", + ) + + ### Data handling + train_transforms, val_transforms = self.get_transforms( + self.run_conf["transformations"], + input_shape=self.run_conf["data"].get("input_shape", 256), + ) + + train_dataset, val_dataset = self.get_datasets( + train_transforms=train_transforms, + val_transforms=val_transforms, + ) + + # load sampler + training_sampler = self.get_sampler( + train_dataset=train_dataset, + strategy=self.run_conf["training"].get("sampling_strategy", "random"), + gamma=self.run_conf["training"].get("sampling_gamma", 1), + ) + + # define dataloaders + train_dataloader = DataLoader( + train_dataset, + batch_size=self.run_conf["training"]["batch_size"], + sampler=training_sampler, + num_workers=16, + pin_memory=False, + worker_init_fn=self.seed_worker, + ) + + val_dataloader = DataLoader( + val_dataset, + batch_size=64, + num_workers=8, + pin_memory=True, + worker_init_fn=self.seed_worker, + ) + + # start Training + self.logger.info("Instantiate Trainer") + trainer_fn = self.get_trainer() + trainer = trainer_fn( + model=model, + loss_fn_dict=loss_fn_dict, + optimizer=optimizer, + scheduler=scheduler, + device=device, + logger=self.logger, + logdir=self.run_conf["logging"]["log_dir"], + num_classes=self.run_conf["data"]["num_nuclei_classes"], + dataset_config=self.dataset_config, + early_stopping=early_stopping, + experiment_config=self.run_conf, + log_images=self.run_conf["logging"].get("log_images", False), + magnification=self.run_conf["data"].get("magnification", 40), + mixed_precision=self.run_conf["training"].get("mixed_precision", False), + ) + + # Load checkpoint if provided + if self.checkpoint is not None: + self.logger.info("Checkpoint was provided. Restore ...") + trainer.resume_checkpoint(self.checkpoint) + + # Call fit method + self.logger.info("Calling Trainer Fit") + trainer.fit( + epochs=self.run_conf["training"]["epochs"], + train_dataloader=train_dataloader, + val_dataloader=val_dataloader, + metric_init=self.get_wandb_init_dict(), + unfreeze_epoch=self.run_conf["training"]["unfreeze_epoch"], + eval_every=self.run_conf["training"].get("eval_every", 1), + ) + + # Select best model if not provided by early stopping + checkpoint_dir = Path(self.run_conf["logging"]["log_dir"]) / "checkpoints" + if not (checkpoint_dir / "model_best.pth").is_file(): + shutil.copy( + checkpoint_dir / "latest_checkpoint.pth", + checkpoint_dir / "model_best.pth", + ) + + # At the end close logger + self.logger.info(f"Finished run {run.id}") + close_logger(self.logger) + + return self.run_conf["logging"]["log_dir"] + + def load_dataset_setup(self, dataset_path: Union[Path, str]) -> None: + """Load the configuration of the cell segmentation dataset. + + The dataset must have a dataset_config.yaml file in their dataset path with the following entries: + * tissue_types: describing the present tissue types with corresponding integer + * nuclei_types: describing the present nuclei types with corresponding integer + + Args: + dataset_path (Union[Path, str]): Path to dataset folder + """ + dataset_config_path = Path(dataset_path) / "dataset_config.yaml" + with open(dataset_config_path, "r") as dataset_config_file: + yaml_config = yaml.safe_load(dataset_config_file) + self.dataset_config = dict(yaml_config) + + def get_loss_fn(self, loss_fn_settings: dict) -> dict: + """Create a dictionary with loss functions for all branches + + Branches: "nuclei_binary_map", "hv_map", "nuclei_type_map", "tissue_types" + + Args: + loss_fn_settings (dict): Dictionary with the loss function settings. Structure + branch_name(str): + loss_name(str): + loss_fn(str): String matching to the loss functions defined in the LOSS_DICT (base_ml.base_loss) + weight(float): Weighting factor as float value + (optional) args: Optional parameters for initializing the loss function + arg_name: value + + If a branch is not provided, the defaults settings (described below) are used. + + For further information, please have a look at the file configs/examples/cell_segmentation/train_cellvit.yaml + under the section "loss" + + Example: + nuclei_binary_map: + bce: + loss_fn: xentropy_loss + weight: 1 + dice: + loss_fn: dice_loss + weight: 1 + + Returns: + dict: Dictionary with loss functions for each branch. Structure: + branch_name(str): + loss_name(str): + "loss_fn": Callable loss function + "weight": weight of the loss since in the end all losses of all branches are added together for backward pass + loss_name(str): + "loss_fn": Callable loss function + "weight": weight of the loss since in the end all losses of all branches are added together for backward pass + branch_name(str) + ... + + Default loss dictionary: + nuclei_binary_map: + bce: + loss_fn: xentropy_loss + weight: 1 + dice: + loss_fn: dice_loss + weight: 1 + hv_map: + mse: + loss_fn: mse_loss_maps + weight: 1 + msge: + loss_fn: msge_loss_maps + weight: 1 + nuclei_type_map + bce: + loss_fn: xentropy_loss + weight: 1 + dice: + loss_fn: dice_loss + weight: 1 + tissue_types + ce: + loss_fn: nn.CrossEntropyLoss() + weight: 1 + """ + loss_fn_dict = {} + if "nuclei_binary_map" in loss_fn_settings.keys(): + loss_fn_dict["nuclei_binary_map"] = {} + for loss_name, loss_sett in loss_fn_settings["nuclei_binary_map"].items(): + parameters = loss_sett.get("args", {}) + loss_fn_dict["nuclei_binary_map"][loss_name] = { + "loss_fn": retrieve_loss_fn(loss_sett["loss_fn"], **parameters), + "weight": loss_sett["weight"], + } + else: + loss_fn_dict["nuclei_binary_map"] = { + "bce": {"loss_fn": retrieve_loss_fn("xentropy_loss"), "weight": 1}, + "dice": {"loss_fn": retrieve_loss_fn("dice_loss"), "weight": 1}, + } + if "hv_map" in loss_fn_settings.keys(): + loss_fn_dict["hv_map"] = {} + for loss_name, loss_sett in loss_fn_settings["hv_map"].items(): + parameters = loss_sett.get("args", {}) + loss_fn_dict["hv_map"][loss_name] = { + "loss_fn": retrieve_loss_fn(loss_sett["loss_fn"], **parameters), + "weight": loss_sett["weight"], + } + else: + loss_fn_dict["hv_map"] = { + "mse": {"loss_fn": retrieve_loss_fn("mse_loss_maps"), "weight": 1}, + "msge": {"loss_fn": retrieve_loss_fn("msge_loss_maps"), "weight": 1}, + } + if "nuclei_type_map" in loss_fn_settings.keys(): + loss_fn_dict["nuclei_type_map"] = {} + for loss_name, loss_sett in loss_fn_settings["nuclei_type_map"].items(): + parameters = loss_sett.get("args", {}) + loss_fn_dict["nuclei_type_map"][loss_name] = { + "loss_fn": retrieve_loss_fn(loss_sett["loss_fn"], **parameters), + "weight": loss_sett["weight"], + } + else: + loss_fn_dict["nuclei_type_map"] = { + "bce": {"loss_fn": retrieve_loss_fn("xentropy_loss"), "weight": 1}, + "dice": {"loss_fn": retrieve_loss_fn("dice_loss"), "weight": 1}, + } + if "tissue_types" in loss_fn_settings.keys(): + loss_fn_dict["tissue_types"] = {} + for loss_name, loss_sett in loss_fn_settings["tissue_types"].items(): + parameters = loss_sett.get("args", {}) + loss_fn_dict["tissue_types"][loss_name] = { + "loss_fn": retrieve_loss_fn(loss_sett["loss_fn"], **parameters), + "weight": loss_sett["weight"], + } + else: + loss_fn_dict["tissue_types"] = { + "ce": {"loss_fn": nn.CrossEntropyLoss(), "weight": 1}, + } + if "regression_loss" in loss_fn_settings.keys(): + loss_fn_dict["regression_map"] = {} + for loss_name, loss_sett in loss_fn_settings["regression_loss"].items(): + parameters = loss_sett.get("args", {}) + loss_fn_dict["regression_map"][loss_name] = { + "loss_fn": retrieve_loss_fn(loss_sett["loss_fn"], **parameters), + "weight": loss_sett["weight"], + } + elif "regression_loss" in self.run_conf["model"].keys(): + loss_fn_dict["regression_map"] = { + "mse": {"loss_fn": retrieve_loss_fn("mse_loss_maps"), "weight": 1}, + } + return loss_fn_dict + + + + def get_scheduler(self, scheduler_type: str, optimizer: Optimizer) -> _LRScheduler: + """Get the learning rate scheduler for CellViT + + The configuration of the scheduler is given in the "training" -> "scheduler" section. + Currenlty, "constant", "exponential" and "cosine" schedulers are implemented. + + Required parameters for implemented schedulers: + - "constant": None + - "exponential": gamma (optional, defaults to 0.95) + - "cosine": eta_min (optional, defaults to 1-e5) + + Args: + scheduler_type (str): Type of scheduler as a string. Currently implemented: + - "constant" (lowering by a factor of ten after 25 epochs, increasing after 50, decreasimg again after 75) + - "exponential" (ExponentialLR with given gamma, gamma defaults to 0.95) + - "cosine" (CosineAnnealingLR, eta_min as parameter, defaults to 1-e5) + optimizer (Optimizer): Optimizer + + Returns: + _LRScheduler: PyTorch Scheduler + """ + implemented_schedulers = ["constant", "exponential", "cosine", "default"] + if scheduler_type.lower() not in implemented_schedulers: + self.logger.warning( + f"Unknown Scheduler - No scheduler from the list {implemented_schedulers} select. Using default scheduling." + ) + if scheduler_type.lower() == "constant": + scheduler = SequentialLR( + optimizer=optimizer, + schedulers=[ + ConstantLR(optimizer, factor=1, total_iters=25), + ConstantLR(optimizer, factor=0.1, total_iters=25), + ConstantLR(optimizer, factor=1, total_iters=25), + ConstantLR(optimizer, factor=0.1, total_iters=1000), + ], + milestones=[24, 49, 74], + ) + elif scheduler_type.lower() == "exponential": + scheduler = ExponentialLR( + optimizer, + gamma=self.run_conf["training"]["scheduler"].get("gamma", 0.95), + ) + elif scheduler_type.lower() == "cosine": + scheduler = CosineAnnealingLR( + optimizer, + T_max=self.run_conf["training"]["epochs"], + eta_min=self.run_conf["training"]["scheduler"].get("eta_min", 1e-5), + ) + # elif scheduler_type.lower == "cosinewarmrestarts": + # scheduler = CosineAnnealingWarmRestarts( + # optimizer, + # T_0=self.run_conf["training"]["scheduler"]["T_0"], + # T_mult=self.run_conf["training"]["scheduler"]["T_mult"], + # eta_min=self.run_conf["training"]["scheduler"].get("eta_min", 1e-5) + # ) + elif scheduler_type.lower() == "default": + scheduler = super().get_scheduler(optimizer) + return scheduler + + def get_datasets( + self, + train_transforms: Callable = None, + val_transforms: Callable = None, + ) -> Tuple[Dataset, Dataset]: + """Retrieve training dataset and validation dataset + + Args: + train_transforms (Callable, optional): PyTorch transformations for train set. Defaults to None. + val_transforms (Callable, optional): PyTorch transformations for validation set. Defaults to None. + + Returns: + Tuple[Dataset, Dataset]: Training dataset and validation dataset + """ + if ( + "val_split" in self.run_conf["data"] + and "val_folds" in self.run_conf["data"] + ): + raise RuntimeError( + "Provide either val_splits or val_folds in configuration file, not both." + ) + if ( + "val_split" not in self.run_conf["data"] + and "val_folds" not in self.run_conf["data"] + ): + raise RuntimeError( + "Provide either val_split or val_folds in configuration file, one is necessary." + ) + if ( + "val_split" not in self.run_conf["data"] + and "val_folds" not in self.run_conf["data"] + ): + raise RuntimeError( + "Provide either val_split or val_fold in configuration file, one is necessary." + ) + if "regression_loss" in self.run_conf["model"].keys(): + self.run_conf["data"]["regression_loss"] = True + + full_dataset = select_dataset( + dataset_name="pannuke", + split="train", + dataset_config=self.run_conf["data"], + transforms=train_transforms, + ) + if "val_split" in self.run_conf["data"]: + generator_split = torch.Generator().manual_seed( + self.default_conf["random_seed"] + ) + val_splits = float(self.run_conf["data"]["val_split"]) + train_dataset, val_dataset = torch.utils.data.random_split( + full_dataset, + lengths=[1 - val_splits, val_splits], + generator=generator_split, + ) + val_dataset.dataset = copy.deepcopy(full_dataset) + val_dataset.dataset.set_transforms(val_transforms) + else: + train_dataset = full_dataset + val_dataset = select_dataset( + dataset_name="pannuke", + split="validation", + dataset_config=self.run_conf["data"], + transforms=val_transforms, + ) + + return train_dataset, val_dataset + + def get_train_model( + self, + pretrained_encoder: Union[Path, str] = None, + pretrained_model: Union[Path, str] = None, + backbone_type: str = "default", + shared_decoders: bool = False, + regression_loss: bool = False, + **kwargs, + ) -> CellViT: + """Return the CellViT training model + + Args: + pretrained_encoder (Union[Path, str]): Path to a pretrained encoder. Defaults to None. + pretrained_model (Union[Path, str], optional): Path to a pretrained model. Defaults to None. + backbone_type (str, optional): Backbone Type. Currently supported are default (None, ViT256, SAM-B, SAM-L, SAM-H). Defaults to None + shared_decoders (bool, optional): If shared skip decoders should be used. Defaults to False. + regression_loss (bool, optional): If regression loss is used. Defaults to False + + Returns: + CellViT: CellViT training model with given setup + """ + # reseed needed, due to subprocess seeding compatibility + self.seed_run(self.default_conf["random_seed"]) + + # check for backbones + implemented_backbones = ["default", "UniRepLKNet", "vit256", "sam-b", "sam-l", "sam-h"] + if backbone_type.lower() not in implemented_backbones: + raise NotImplementedError( + f"Unknown Backbone Type - Currently supported are: {implemented_backbones}" + ) + if backbone_type.lower() == "default": + model_class = CellViT + model = model_class( + model256_path = pretrained_encoder, + num_nuclei_classes=self.run_conf["data"]["num_nuclei_classes"], + num_tissue_classes=self.run_conf["data"]["num_tissue_classes"], + #embed_dim=self.run_conf["model"]["embed_dim"], + in_channels=self.run_conf["model"].get("input_chanels", 3), + #depth=self.run_conf["model"]["depth"], + #change + #depth=(3, 3, 27, 3), + #num_heads=self.run_conf["model"]["num_heads"], + # extract_layers=self.run_conf["model"]["extract_layers"], + + dropout=self.run_conf["training"].get("drop_rate", 0), + #attn_drop_rate=self.run_conf["training"].get("attn_drop_rate", 0), + drop_path_rate=self.run_conf["training"].get("drop_path_rate", 0.1), + #regression_loss=regression_loss, + ) + model.load_pretrained_encoder(model.model256_path) + #model.load_state_dict(checkpoint["model"]) + + if pretrained_model is not None: + self.logger.info( + f"Loading pretrained CellViT model from path: {pretrained_model}" + ) + cellvit_pretrained = torch.load(pretrained_model) + self.logger.info(model.load_state_dict(cellvit_pretrained, strict=True)) + self.logger.info("Loaded CellViT model") + + self.logger.info(f"\nModel: {model}") + print(f"\nModel: {model}") + model = model.to("cuda") + self.logger.info( + f"\n{summary(model, input_size=(1, 3, 256, 256), device='cuda')}" + ) + # from thop import profile + # input_size=torch.randn(1, 3, 256, 256) + # self.logger.info( + # f"\n{profile(model, inputs=(input_size,))}" + # ) + #self.logger.info(f"\n{stat(model, (3, 256, 256))}") + total_params = 0 + Trainable_params = 0 + NonTrainable_params = 0 + for param in model.parameters(): + multvalue = np.prod(param.size()) + total_params += multvalue + if param.requires_grad: + Trainable_params += multvalue # 可训练参数量 + else: + NonTrainable_params += multvalue # 非可训练参数量 + + print(f'Total params: {total_params}') + print(f'Trainable params: {Trainable_params}') + print(f'Non-trainable params: {NonTrainable_params}') + + return model + + def get_wandb_init_dict(self) -> dict: + pass + + def get_transforms( + self, transform_settings: dict, input_shape: int = 256 + ) -> Tuple[Callable, Callable]: + """Get Transformations (Albumentation Transformations). Return both training and validation transformations. + + The transformation settings are given in the following format: + key: dict with parameters + Example: + colorjitter: + p: 0.1 + scale_setting: 0.5 + scale_color: 0.1 + + For further information on how to setup the dictionary and default (recommended) values is given here: + configs/examples/cell_segmentation/train_cellvit.yaml + + Training Transformations: + Implemented are: + - A.RandomRotate90: Key in transform_settings: randomrotate90, parameters: p + - A.HorizontalFlip: Key in transform_settings: horizontalflip, parameters: p + - A.VerticalFlip: Key in transform_settings: verticalflip, parameters: p + - A.Downscale: Key in transform_settings: downscale, parameters: p, scale + - A.Blur: Key in transform_settings: blur, parameters: p, blur_limit + - A.GaussNoise: Key in transform_settings: gaussnoise, parameters: p, var_limit + - A.ColorJitter: Key in transform_settings: colorjitter, parameters: p, scale_setting, scale_color + - A.Superpixels: Key in transform_settings: superpixels, parameters: p + - A.ZoomBlur: Key in transform_settings: zoomblur, parameters: p + - A.RandomSizedCrop: Key in transform_settings: randomsizedcrop, parameters: p + - A.ElasticTransform: Key in transform_settings: elastictransform, parameters: p + Always implemented at the end of the pipeline: + - A.Normalize with given mean (default: (0.5, 0.5, 0.5)) and std (default: (0.5, 0.5, 0.5)) + + Validation Transformations: + A.Normalize with given mean (default: (0.5, 0.5, 0.5)) and std (default: (0.5, 0.5, 0.5)) + + Args: + transform_settings (dict): dictionay with the transformation settings. + input_shape (int, optional): Input shape of the images to used. Defaults to 256. + + Returns: + Tuple[Callable, Callable]: Train Transformations, Validation Transformations + + """ + transform_list = [] + transform_settings = {k.lower(): v for k, v in transform_settings.items()} + if "RandomRotate90".lower() in transform_settings: + p = transform_settings["randomrotate90"]["p"] + if p > 0 and p <= 1: + transform_list.append(A.RandomRotate90(p=p)) + if "HorizontalFlip".lower() in transform_settings.keys(): + p = transform_settings["horizontalflip"]["p"] + if p > 0 and p <= 1: + transform_list.append(A.HorizontalFlip(p=p)) + if "VerticalFlip".lower() in transform_settings: + p = transform_settings["verticalflip"]["p"] + if p > 0 and p <= 1: + transform_list.append(A.VerticalFlip(p=p)) + if "Downscale".lower() in transform_settings: + p = transform_settings["downscale"]["p"] + scale = transform_settings["downscale"]["scale"] + if p > 0 and p <= 1: + transform_list.append( + A.Downscale(p=p, scale_max=scale, scale_min=scale) + ) + if "Blur".lower() in transform_settings: + p = transform_settings["blur"]["p"] + blur_limit = transform_settings["blur"]["blur_limit"] + if p > 0 and p <= 1: + transform_list.append(A.Blur(p=p, blur_limit=blur_limit)) + if "GaussNoise".lower() in transform_settings: + p = transform_settings["gaussnoise"]["p"] + var_limit = transform_settings["gaussnoise"]["var_limit"] + if p > 0 and p <= 1: + transform_list.append(A.GaussNoise(p=p, var_limit=var_limit)) + if "ColorJitter".lower() in transform_settings: + p = transform_settings["colorjitter"]["p"] + scale_setting = transform_settings["colorjitter"]["scale_setting"] + scale_color = transform_settings["colorjitter"]["scale_color"] + if p > 0 and p <= 1: + transform_list.append( + A.ColorJitter( + p=p, + brightness=scale_setting, + contrast=scale_setting, + saturation=scale_color, + hue=scale_color / 2, + ) + ) + if "Superpixels".lower() in transform_settings: + p = transform_settings["superpixels"]["p"] + if p > 0 and p <= 1: + transform_list.append( + A.Superpixels( + p=p, + p_replace=0.1, + n_segments=200, + max_size=int(input_shape / 2), + ) + ) + if "ZoomBlur".lower() in transform_settings: + p = transform_settings["zoomblur"]["p"] + if p > 0 and p <= 1: + transform_list.append(A.ZoomBlur(p=p, max_factor=1.05)) + if "RandomSizedCrop".lower() in transform_settings: + p = transform_settings["randomsizedcrop"]["p"] + if p > 0 and p <= 1: + transform_list.append( + A.RandomSizedCrop( + min_max_height=(input_shape / 2, input_shape), + height=input_shape, + width=input_shape, + p=p, + ) + ) + if "ElasticTransform".lower() in transform_settings: + p = transform_settings["elastictransform"]["p"] + if p > 0 and p <= 1: + transform_list.append( + A.ElasticTransform(p=p, sigma=25, alpha=0.5, alpha_affine=15) + ) + + if "normalize" in transform_settings: + mean = transform_settings["normalize"].get("mean", (0.5, 0.5, 0.5)) + std = transform_settings["normalize"].get("std", (0.5, 0.5, 0.5)) + else: + mean = (0.5, 0.5, 0.5) + std = (0.5, 0.5, 0.5) + transform_list.append(A.Normalize(mean=mean, std=std)) + + train_transforms = A.Compose(transform_list) + val_transforms = A.Compose([A.Normalize(mean=mean, std=std)]) + + return train_transforms, val_transforms + + def get_sampler( + self, train_dataset: CellDataset, strategy: str = "random", gamma: float = 1 + ) -> Sampler: + """Return the sampler (either RandomSampler or WeightedRandomSampler) + + Args: + train_dataset (CellDataset): Dataset for training + strategy (str, optional): Sampling strategy. Defaults to "random" (random sampling). + Implemented are "random", "cell", "tissue", "cell+tissue". + gamma (float, optional): Gamma scaling factor, between 0 and 1. + 1 means total balancing, 0 means original weights. Defaults to 1. + + Raises: + NotImplementedError: Not implemented sampler is selected + + Returns: + Sampler: Sampler for training + """ + if strategy.lower() == "random": + sampling_generator = torch.Generator().manual_seed( + self.default_conf["random_seed"] + ) + sampler = RandomSampler(train_dataset, generator=sampling_generator) + self.logger.info("Using RandomSampler") + else: + # this solution is not accurate when a subset is used since the weights are calculated on the whole training dataset + if isinstance(train_dataset, Subset): + ds = train_dataset.dataset + else: + ds = train_dataset + ds.load_cell_count() + if strategy.lower() == "cell": + weights = ds.get_sampling_weights_cell(gamma) + elif strategy.lower() == "tissue": + weights = ds.get_sampling_weights_tissue(gamma) + elif strategy.lower() == "cell+tissue": + weights = ds.get_sampling_weights_cell_tissue(gamma) + else: + raise NotImplementedError( + "Unknown sampling strategy - Implemented are cell, tissue and cell+tissue" + ) + + if isinstance(train_dataset, Subset): + weights = torch.Tensor([weights[i] for i in train_dataset.indices]) + + sampling_generator = torch.Generator().manual_seed( + self.default_conf["random_seed"] + ) + sampler = WeightedRandomSampler( + weights=weights, + num_samples=len(train_dataset), + replacement=True, + generator=sampling_generator, + ) + + self.logger.info(f"Using Weighted Sampling with strategy: {strategy}") + self.logger.info(f"Unique-Weights: {torch.unique(weights)}") + + return sampler + + def get_trainer(self) -> BaseTrainer: + """Return Trainer matching to this network + + Returns: + BaseTrainer: Trainer + """ + return CellViTTrainer diff --git a/cell_segmentation/experiments/experiment_cpp_net_pannuke.py b/cell_segmentation/experiments/experiment_cpp_net_pannuke.py new file mode 100644 index 0000000000000000000000000000000000000000..1d9bc41101b9b60eaed7493507bf658b66435547 --- /dev/null +++ b/cell_segmentation/experiments/experiment_cpp_net_pannuke.py @@ -0,0 +1,296 @@ +# -*- coding: utf-8 -*- +# CPP-Net Experiment Class +# +# @ Fabian Hörst, fabian.hoerst@uk-essen.de +# Institute for Artifical Intelligence in Medicine, +# University Medicine Essen + +import inspect +import os +import sys + + +from base_ml.base_trainer import BaseTrainer + +currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) +parentdir = os.path.dirname(currentdir) +sys.path.insert(0, parentdir) + +from pathlib import Path +from typing import Union + +import torch +import torch.nn as nn +from torchinfo import summary + +from base_ml.base_loss import retrieve_loss_fn +from cell_segmentation.experiments.experiment_stardist_pannuke import ( + ExperimentCellViTStarDist, +) +from cell_segmentation.trainer.trainer_cpp_net import CellViTCPPTrainer +from models.segmentation.cell_segmentation.cellvit_cpp_net import ( + CellViT256CPP, + CellViTCPP, + CellViTSAMCPP, +) + + +class ExperimentCellViTCPP(ExperimentCellViTStarDist): + def get_loss_fn(self, loss_fn_settings: dict) -> dict: + """Create a dictionary with loss functions for all branches + + Branches: "dist_map", "stardist_map", "stardist_map_refined", "nuclei_type_map", "tissue_types" + + Args: + loss_fn_settings (dict): Dictionary with the loss function settings. Structure + branch_name(str): + loss_name(str): + loss_fn(str): String matching to the loss functions defined in the LOSS_DICT (base_ml.base_loss) + weight(float): Weighting factor as float value + (optional) args: Optional parameters for initializing the loss function + arg_name: value + + If a branch is not provided, the defaults settings (described below) are used. + + For further information, please have a look at the file configs/examples/cell_segmentation/train_cellvit.yaml + under the section "loss" + + Example: + nuclei_type_map: + bce: + loss_fn: xentropy_loss + weight: 1 + dice: + loss_fn: dice_loss + weight: 1 + + Returns: + dict: Dictionary with loss functions for each branch. Structure: + branch_name(str): + loss_name(str): + "loss_fn": Callable loss function + "weight": weight of the loss since in the end all losses of all branches are added together for backward pass + loss_name(str): + "loss_fn": Callable loss function + "weight": weight of the loss since in the end all losses of all branches are added together for backward pass + branch_name(str) + ... + + Default loss dictionary: + dist_map: + bceweighted: + loss_fn: BCEWithLogitsLoss + weight: 1 + stardist_map: + L1LossWeighted: + loss_fn: L1LossWeighted + weight: 1 + nuclei_type_map + bce: + loss_fn: xentropy_loss + weight: 1 + dice: + loss_fn: dice_loss + weight: 1 + tissue_type has no default loss and might be skipped + """ + loss_fn_dict = {} + if "dist_map" in loss_fn_settings.keys(): + loss_fn_dict["dist_map"] = {} + for loss_name, loss_sett in loss_fn_settings["dist_map"].items(): + parameters = loss_sett.get("args", {}) + loss_fn_dict["dist_map"][loss_name] = { + "loss_fn": retrieve_loss_fn(loss_sett["loss_fn"], **parameters), + "weight": loss_sett["weight"], + } + else: + loss_fn_dict["dist_map"] = { + "bceweighted": { + "loss_fn": retrieve_loss_fn("BCEWithLogitsLoss"), + "weight": 1, + }, + } + if "stardist_map" in loss_fn_settings.keys(): + loss_fn_dict["stardist_map"] = {} + for loss_name, loss_sett in loss_fn_settings["stardist_map"].items(): + parameters = loss_sett.get("args", {}) + loss_fn_dict["stardist_map"][loss_name] = { + "loss_fn": retrieve_loss_fn(loss_sett["loss_fn"], **parameters), + "weight": loss_sett["weight"], + } + else: + loss_fn_dict["stardist_map"] = { + "L1LossWeighted": { + "loss_fn": retrieve_loss_fn("L1LossWeighted"), + "weight": 1, + }, + } + if "stardist_map_refined" in loss_fn_settings.keys(): + loss_fn_dict["stardist_map_refined"] = {} + for loss_name, loss_sett in loss_fn_settings[ + "stardist_map_refined" + ].items(): + parameters = loss_sett.get("args", {}) + loss_fn_dict["stardist_map_refined"][loss_name] = { + "loss_fn": retrieve_loss_fn(loss_sett["loss_fn"], **parameters), + "weight": loss_sett["weight"], + } + else: + loss_fn_dict["stardist_map_refined"] = { + "L1LossWeighted": { + "loss_fn": retrieve_loss_fn("L1LossWeighted"), + "weight": 1, + }, + } + if "nuclei_type_map" in loss_fn_settings.keys(): + loss_fn_dict["nuclei_type_map"] = {} + for loss_name, loss_sett in loss_fn_settings["nuclei_type_map"].items(): + parameters = loss_sett.get("args", {}) + loss_fn_dict["nuclei_type_map"][loss_name] = { + "loss_fn": retrieve_loss_fn(loss_sett["loss_fn"], **parameters), + "weight": loss_sett["weight"], + } + else: + loss_fn_dict["nuclei_type_map"] = { + "bce": {"loss_fn": retrieve_loss_fn("xentropy_loss"), "weight": 1}, + "dice": {"loss_fn": retrieve_loss_fn("dice_loss"), "weight": 1}, + } + if "tissue_types" in loss_fn_settings.keys(): + loss_fn_dict["tissue_types"] = {} + for loss_name, loss_sett in loss_fn_settings["tissue_types"].items(): + parameters = loss_sett.get("args", {}) + loss_fn_dict["tissue_types"][loss_name] = { + "loss_fn": retrieve_loss_fn(loss_sett["loss_fn"], **parameters), + "weight": loss_sett["weight"], + } + # skip default tissue loss! + return loss_fn_dict + + def get_train_model( + self, + pretrained_encoder: Union[Path, str] = None, + pretrained_model: Union[Path, str] = None, + backbone_type: str = "default", + shared_decoders: bool = False, + **kwargs, + ) -> nn.Module: + """Return the CellViTStarDist training model + + Args: + pretrained_encoder (Union[Path, str]): Path to a pretrained encoder. Defaults to None. + pretrained_model (Union[Path, str], optional): Path to a pretrained model. Defaults to None. + backbone_type (str, optional): Backbone Type. Currently supported are default (None, ViT256, SAM-B, SAM-L, SAM-H). Defaults to None + shared_decoders (bool, optional): If shared skip decoders should be used. Defaults to False. + + Returns: + nn.Module: StarDist training model with given setup + """ + # reseed needed, due to subprocess seeding compatibility + self.seed_run(self.default_conf["random_seed"]) + + # check for backbones + implemented_backbones = [ + "default", + "vit256", + "sam-b", + "sam-l", + "sam-h", + ] + if backbone_type.lower() not in implemented_backbones: + raise NotImplementedError( + f"Unknown Backbone Type - Currently supported are: {implemented_backbones}" + ) + if backbone_type.lower() == "default": + if shared_decoders: + raise NotImplementedError( + "Shared decoders are not implemented for StarDist" + ) + else: + model_class = CellViTCPP + model = model_class( + num_nuclei_classes=self.run_conf["data"]["num_nuclei_classes"], + num_tissue_classes=self.run_conf["data"]["num_tissue_classes"], + embed_dim=self.run_conf["model"]["embed_dim"], + input_channels=self.run_conf["model"].get("input_channels", 3), + depth=self.run_conf["model"]["depth"], + num_heads=self.run_conf["model"]["num_heads"], + extract_layers=self.run_conf["model"]["extract_layers"], + drop_rate=self.run_conf["training"].get("drop_rate", 0), + attn_drop_rate=self.run_conf["training"].get("attn_drop_rate", 0), + drop_path_rate=self.run_conf["training"].get("drop_path_rate", 0), + nrays=self.run_conf["model"].get("nrays", 32), + ) + + if pretrained_model is not None: + self.logger.info( + f"Loading pretrained CellViT model from path: {pretrained_model}" + ) + cellvit_pretrained = torch.load(pretrained_model) + self.logger.info(model.load_state_dict(cellvit_pretrained, strict=True)) + self.logger.info("Loaded CellViT model") + + if backbone_type.lower() == "vit256": + if shared_decoders: + raise NotImplementedError( + "Shared decoders are not implemented for StarDist" + ) + else: + model_class = CellViT256CPP + model = model_class( + model256_path=pretrained_encoder, + num_nuclei_classes=self.run_conf["data"]["num_nuclei_classes"], + num_tissue_classes=self.run_conf["data"]["num_tissue_classes"], + drop_rate=self.run_conf["training"].get("drop_rate", 0), + attn_drop_rate=self.run_conf["training"].get("attn_drop_rate", 0), + drop_path_rate=self.run_conf["training"].get("drop_path_rate", 0), + nrays=self.run_conf["model"].get("nrays", 32), + ) + model.load_pretrained_encoder(model.model256_path) + if pretrained_model is not None: + self.logger.info( + f"Loading pretrained CellViT model from path: {pretrained_model}" + ) + cellvit_pretrained = torch.load(pretrained_model, map_location="cpu") + self.logger.info(model.load_state_dict(cellvit_pretrained, strict=True)) + model.freeze_encoder() + self.logger.info("Loaded CellVit256 model") + if backbone_type.lower() in ["sam-b", "sam-l", "sam-h"]: + if shared_decoders: + raise NotImplementedError( + "Shared decoders are not implemented for StarDist" + ) + else: + model_class = CellViTSAMCPP + model = model_class( + model_path=pretrained_encoder, + num_nuclei_classes=self.run_conf["data"]["num_nuclei_classes"], + num_tissue_classes=self.run_conf["data"]["num_tissue_classes"], + vit_structure=backbone_type, + drop_rate=self.run_conf["training"].get("drop_rate", 0), + nrays=self.run_conf["model"].get("nrays", 32), + ) + model.load_pretrained_encoder(model.model_path) + if pretrained_model is not None: + self.logger.info( + f"Loading pretrained CellViT model from path: {pretrained_model}" + ) + cellvit_pretrained = torch.load(pretrained_model, map_location="cpu") + self.logger.info(model.load_state_dict(cellvit_pretrained, strict=True)) + model.freeze_encoder() + self.logger.info(f"Loaded CellViT-SAM model with backbone: {backbone_type}") + + self.logger.info(f"\nModel: {model}") + model = model.to("cpu") + self.logger.info( + f"\n{summary(model, input_size=(1, 3, 256, 256), device='cpu')}" + ) + + return model + + def get_trainer(self) -> BaseTrainer: + """Return Trainer matching to this network + + Returns: + BaseTrainer: Trainer + """ + return CellViTCPPTrainer diff --git a/cell_segmentation/experiments/experiment_stardist_pannuke.py b/cell_segmentation/experiments/experiment_stardist_pannuke.py new file mode 100644 index 0000000000000000000000000000000000000000..8c0e344af0e1f6e4cfb51e1390df91360a617d17 --- /dev/null +++ b/cell_segmentation/experiments/experiment_stardist_pannuke.py @@ -0,0 +1,392 @@ +# -*- coding: utf-8 -*- +# StarDist Experiment Class +# +# @ Fabian Hörst, fabian.hoerst@uk-essen.de +# Institute for Artifical Intelligence in Medicine, +# University Medicine Essen + +import inspect +import os +import sys + +import yaml + +from base_ml.base_trainer import BaseTrainer + +currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) +parentdir = os.path.dirname(currentdir) +sys.path.insert(0, parentdir) + +from pathlib import Path +from typing import Callable, Tuple, Union + +import torch +import torch.nn as nn +from torch.optim import Optimizer +from torch.optim.lr_scheduler import ( + ConstantLR, + CosineAnnealingLR, + ExponentialLR, + ReduceLROnPlateau, + SequentialLR, + _LRScheduler, +) +from torch.utils.data import Dataset +from torchinfo import summary + +from base_ml.base_loss import retrieve_loss_fn +from cell_unireplknet.cell_segmentation.experiments.experiment_cellvit_pannuke_origin import ( + ExperimentCellVitPanNuke, +) +from cell_segmentation.trainer.trainer_stardist import CellViTStarDistTrainer +from models.segmentation.cell_segmentation.cellvit_stardist import ( + CellViTStarDist, + CellViT256StarDist, + CellViTSAMStarDist, +) +from models.segmentation.cell_segmentation.cellvit_stardist_shared import ( + CellViTStarDistShared, + CellViT256StarDistShared, + CellViTSAMStarDistShared, +) +from models.segmentation.cell_segmentation.cpp_net_stardist_rn50 import StarDistRN50 + + +class ExperimentCellViTStarDist(ExperimentCellVitPanNuke): + def load_dataset_setup(self, dataset_path: Union[Path, str]) -> None: + """Load the configuration of the PanNuke cell segmentation dataset. + + The dataset must have a dataset_config.yaml file in their dataset path with the following entries: + * tissue_types: describing the present tissue types with corresponding integer + * nuclei_types: describing the present nuclei types with corresponding integer + + Args: + dataset_path (Union[Path, str]): Path to dataset folder + """ + dataset_config_path = Path(dataset_path) / "dataset_config.yaml" + with open(dataset_config_path, "r") as dataset_config_file: + yaml_config = yaml.safe_load(dataset_config_file) + self.dataset_config = dict(yaml_config) + + def get_loss_fn(self, loss_fn_settings: dict) -> dict: + """Create a dictionary with loss functions for all branches + + Branches: "dist_map", "stardist_map", "nuclei_type_map", "tissue_types" + + Args: + loss_fn_settings (dict): Dictionary with the loss function settings. Structure + branch_name(str): + loss_name(str): + loss_fn(str): String matching to the loss functions defined in the LOSS_DICT (base_ml.base_loss) + weight(float): Weighting factor as float value + (optional) args: Optional parameters for initializing the loss function + arg_name: value + + If a branch is not provided, the defaults settings (described below) are used. + + For further information, please have a look at the file configs/examples/cell_segmentation/train_cellvit.yaml + under the section "loss" + + Example: + nuclei_type_map: + bce: + loss_fn: xentropy_loss + weight: 1 + dice: + loss_fn: dice_loss + weight: 1 + + Returns: + dict: Dictionary with loss functions for each branch. Structure: + branch_name(str): + loss_name(str): + "loss_fn": Callable loss function + "weight": weight of the loss since in the end all losses of all branches are added together for backward pass + loss_name(str): + "loss_fn": Callable loss function + "weight": weight of the loss since in the end all losses of all branches are added together for backward pass + branch_name(str) + ... + + Default loss dictionary: + dist_map: + bceweighted: + loss_fn: BCEWithLogitsLoss + weight: 1 + stardist_map: + L1LossWeighted: + loss_fn: L1LossWeighted + weight: 1 + nuclei_type_map + bce: + loss_fn: xentropy_loss + weight: 1 + dice: + loss_fn: dice_loss + weight: 1 + tissue_type has no default loss and might be skipped + """ + loss_fn_dict = {} + if "dist_map" in loss_fn_settings.keys(): + loss_fn_dict["dist_map"] = {} + for loss_name, loss_sett in loss_fn_settings["dist_map"].items(): + parameters = loss_sett.get("args", {}) + loss_fn_dict["dist_map"][loss_name] = { + "loss_fn": retrieve_loss_fn(loss_sett["loss_fn"], **parameters), + "weight": loss_sett["weight"], + } + else: + loss_fn_dict["dist_map"] = { + "bceweighted": { + "loss_fn": retrieve_loss_fn("BCEWithLogitsLoss"), + "weight": 1, + }, + } + if "stardist_map" in loss_fn_settings.keys(): + loss_fn_dict["stardist_map"] = {} + for loss_name, loss_sett in loss_fn_settings["stardist_map"].items(): + parameters = loss_sett.get("args", {}) + loss_fn_dict["stardist_map"][loss_name] = { + "loss_fn": retrieve_loss_fn(loss_sett["loss_fn"], **parameters), + "weight": loss_sett["weight"], + } + else: + loss_fn_dict["stardist_map"] = { + "L1LossWeighted": { + "loss_fn": retrieve_loss_fn("L1LossWeighted"), + "weight": 1, + }, + } + if "nuclei_type_map" in loss_fn_settings.keys(): + loss_fn_dict["nuclei_type_map"] = {} + for loss_name, loss_sett in loss_fn_settings["nuclei_type_map"].items(): + parameters = loss_sett.get("args", {}) + loss_fn_dict["nuclei_type_map"][loss_name] = { + "loss_fn": retrieve_loss_fn(loss_sett["loss_fn"], **parameters), + "weight": loss_sett["weight"], + } + else: + loss_fn_dict["nuclei_type_map"] = { + "bce": {"loss_fn": retrieve_loss_fn("xentropy_loss"), "weight": 1}, + "dice": {"loss_fn": retrieve_loss_fn("dice_loss"), "weight": 1}, + } + if "tissue_types" in loss_fn_settings.keys(): + loss_fn_dict["tissue_types"] = {} + for loss_name, loss_sett in loss_fn_settings["tissue_types"].items(): + parameters = loss_sett.get("args", {}) + loss_fn_dict["tissue_types"][loss_name] = { + "loss_fn": retrieve_loss_fn(loss_sett["loss_fn"], **parameters), + "weight": loss_sett["weight"], + } + # skip default tissue loss! + return loss_fn_dict + + def get_scheduler(self, scheduler_type: str, optimizer: Optimizer) -> _LRScheduler: + """Get the learning rate scheduler for CellViT + + The configuration of the scheduler is given in the "training" -> "scheduler" section. + Currenlty, "constant", "exponential" and "cosine" schedulers are implemented. + + Required parameters for implemented schedulers: + - "constant": None + - "exponential": gamma (optional, defaults to 0.95) + - "cosine": eta_min (optional, defaults to 1-e5) + - "reducelronplateau": everything hardcoded right now, uses vall los for checking + Args: + scheduler_type (str): Type of scheduler as a string. Currently implemented: + - "constant" (lowering by a factor of ten after 25 epochs, increasing after 50, decreasimg again after 75) + - "exponential" (ExponentialLR with given gamma, gamma defaults to 0.95) + - "cosine" (CosineAnnealingLR, eta_min as parameter, defaults to 1-e5) + optimizer (Optimizer): Optimizer + + Returns: + _LRScheduler: PyTorch Scheduler + """ + implemented_schedulers = [ + "constant", + "exponential", + "cosine", + "reducelronplateau", + ] + if scheduler_type.lower() not in implemented_schedulers: + self.logger.warning( + f"Unknown Scheduler - No scheduler from the list {implemented_schedulers} select. Using default scheduling." + ) + if scheduler_type.lower() == "constant": + scheduler = SequentialLR( + optimizer=optimizer, + schedulers=[ + ConstantLR(optimizer, factor=1, total_iters=25), + ConstantLR(optimizer, factor=0.1, total_iters=25), + ConstantLR(optimizer, factor=1, total_iters=25), + ConstantLR(optimizer, factor=0.1, total_iters=1000), + ], + milestones=[24, 49, 74], + ) + elif scheduler_type.lower() == "exponential": + scheduler = ExponentialLR( + optimizer, + gamma=self.run_conf["training"]["scheduler"].get("gamma", 0.95), + ) + elif scheduler_type.lower() == "cosine": + scheduler = CosineAnnealingLR( + optimizer, + T_max=self.run_conf["training"]["epochs"], + eta_min=self.run_conf["training"]["scheduler"].get("eta_min", 1e-5), + ) + elif scheduler_type.lower() == "reducelronplateau": + scheduler = ReduceLROnPlateau( + optimizer, + mode="min", + factor=0.5, + min_lr=0.0000001, + patience=10, + threshold=1e-20, + ) + else: + scheduler = super().get_scheduler(optimizer) + return scheduler + + def get_datasets( + self, + train_transforms: Callable = None, + val_transforms: Callable = None, + ) -> Tuple[Dataset, Dataset]: + """Retrieve training dataset and validation dataset + + Args: + dataset_name (str): Name of dataset to use + train_transforms (Callable, optional): PyTorch transformations for train set. Defaults to None. + val_transforms (Callable, optional): PyTorch transformations for validation set. Defaults to None. + + Returns: + Tuple[Dataset, Dataset]: Training dataset and validation dataset + """ + self.run_conf["data"]["stardist"] = True + train_dataset, val_dataset = super().get_datasets( + train_transforms=train_transforms, + val_transforms=val_transforms, + ) + return train_dataset, val_dataset + + def get_train_model( + self, + pretrained_encoder: Union[Path, str] = None, + pretrained_model: Union[Path, str] = None, + backbone_type: str = "default", + shared_decoders: bool = False, + **kwargs, + ) -> nn.Module: + """Return the CellViTStarDist training model + + Args: + pretrained_encoder (Union[Path, str]): Path to a pretrained encoder. Defaults to None. + pretrained_model (Union[Path, str], optional): Path to a pretrained model. Defaults to None. + backbone_type (str, optional): Backbone Type. Currently supported are default (None, ViT256, SAM-B, SAM-L, SAM-H, RN50). Defaults to None + shared_decoders (bool, optional): If shared skip decoders should be used. Defaults to False. + + Returns: + nn.Module: StarDist training model with given setup + """ + # reseed needed, due to subprocess seeding compatibility + self.seed_run(self.default_conf["random_seed"]) + + # check for backbones + implemented_backbones = ["default", "vit256", "sam-b", "sam-l", "sam-h", "rn50"] + if backbone_type.lower() not in implemented_backbones: + raise NotImplementedError( + f"Unknown Backbone Type - Currently supported are: {implemented_backbones}" + ) + if backbone_type.lower() == "default": + if shared_decoders: + model_class = CellViTStarDistShared + else: + model_class = CellViTStarDist + model = model_class( + num_nuclei_classes=self.run_conf["data"]["num_nuclei_classes"], + num_tissue_classes=self.run_conf["data"]["num_tissue_classes"], + embed_dim=self.run_conf["model"]["embed_dim"], + input_channels=self.run_conf["model"].get("input_channels", 3), + depth=self.run_conf["model"]["depth"], + num_heads=self.run_conf["model"]["num_heads"], + extract_layers=self.run_conf["model"]["extract_layers"], + drop_rate=self.run_conf["training"].get("drop_rate", 0), + attn_drop_rate=self.run_conf["training"].get("attn_drop_rate", 0), + drop_path_rate=self.run_conf["training"].get("drop_path_rate", 0), + nrays=self.run_conf["model"].get("nrays", 32), + ) + + if pretrained_model is not None: + self.logger.info( + f"Loading pretrained CellViT model from path: {pretrained_model}" + ) + cellvit_pretrained = torch.load(pretrained_model) + self.logger.info(model.load_state_dict(cellvit_pretrained, strict=True)) + self.logger.info("Loaded CellViT model") + + if backbone_type.lower() == "vit256": + if shared_decoders: + model_class = CellViT256StarDistShared + else: + model_class = CellViT256StarDist + model = model_class( + model256_path=pretrained_encoder, + num_nuclei_classes=self.run_conf["data"]["num_nuclei_classes"], + num_tissue_classes=self.run_conf["data"]["num_tissue_classes"], + drop_rate=self.run_conf["training"].get("drop_rate", 0), + attn_drop_rate=self.run_conf["training"].get("attn_drop_rate", 0), + drop_path_rate=self.run_conf["training"].get("drop_path_rate", 0), + nrays=self.run_conf["model"].get("nrays", 32), + ) + model.load_pretrained_encoder(model.model256_path) + if pretrained_model is not None: + self.logger.info( + f"Loading pretrained CellViT model from path: {pretrained_model}" + ) + cellvit_pretrained = torch.load(pretrained_model, map_location="cpu") + self.logger.info(model.load_state_dict(cellvit_pretrained, strict=True)) + model.freeze_encoder() + self.logger.info("Loaded CellVit256 model") + if backbone_type.lower() in ["sam-b", "sam-l", "sam-h"]: + if shared_decoders: + model_class = CellViTSAMStarDistShared + else: + model_class = CellViTSAMStarDist + model = model_class( + model_path=pretrained_encoder, + num_nuclei_classes=self.run_conf["data"]["num_nuclei_classes"], + num_tissue_classes=self.run_conf["data"]["num_tissue_classes"], + vit_structure=backbone_type, + drop_rate=self.run_conf["training"].get("drop_rate", 0), + nrays=self.run_conf["model"].get("nrays", 32), + ) + model.load_pretrained_encoder(model.model_path) + if pretrained_model is not None: + self.logger.info( + f"Loading pretrained CellViT model from path: {pretrained_model}" + ) + cellvit_pretrained = torch.load(pretrained_model, map_location="cpu") + self.logger.info(model.load_state_dict(cellvit_pretrained, strict=True)) + model.freeze_encoder() + self.logger.info(f"Loaded CellViT-SAM model with backbone: {backbone_type}") + if backbone_type.lower() == "rn50": + model = StarDistRN50( + n_rays=self.run_conf["model"].get("nrays", 32), + n_seg_cls=self.run_conf["data"]["num_nuclei_classes"], + ) + + self.logger.info(f"\nModel: {model}") + model = model.to("cpu") + self.logger.info( + f"\n{summary(model, input_size=(1, 3, 256, 256), device='cpu')}" + ) + + return model + + def get_trainer(self) -> BaseTrainer: + """Return Trainer matching to this network + + Returns: + BaseTrainer: Trainer + """ + return CellViTStarDistTrainer diff --git a/cell_segmentation/inference/__init__.py b/cell_segmentation/inference/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..db8c6f77217b6de88ce13d16ba92cfdda4f56bfe --- /dev/null +++ b/cell_segmentation/inference/__init__.py @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +# Inference related methods for each network type +# +# @ Fabian Hörst, fabian.hoerst@uk-essen.de +# Institute for Artifical Intelligence in Medicine, +# University Medicine Essen diff --git a/cell_segmentation/inference/cell_detection.py b/cell_segmentation/inference/cell_detection.py new file mode 100644 index 0000000000000000000000000000000000000000..388b473017d276f3cd3751155ff223b850d36649 --- /dev/null +++ b/cell_segmentation/inference/cell_detection.py @@ -0,0 +1,1077 @@ +# -*- coding: utf-8 -*- +# CellViT Inference Method for Patch-Wise Inference on a patches test set/Whole WSI +# +# Detect Cells with our Networks +# Patches dataset needs to have the follwoing requirements: +# Patch-Size must be 1024, with overlap of 64 +# +# We provide preprocessing code here: ./preprocessing/patch_extraction/main_extraction.py +# +# @ Fabian Hörst, fabian.hoerst@uk-essen.de +# Institute for Artifical Intelligence in Medicine, +# University Medicine Essen + +import inspect +import os +import sys + +currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) +parentdir = os.path.dirname(currentdir) +sys.path.insert(0, parentdir) +parentdir = os.path.dirname(parentdir) +sys.path.insert(0, parentdir) + +import argparse +import logging +import uuid +import warnings +from collections import deque +from pathlib import Path +from typing import List, Tuple, Union + +import numpy as np +import pandas as pd +import torch +import torch.nn.functional as F +import tqdm +import ujson +from einops import rearrange +from pandarallel import pandarallel + +# from PIL import Image +from shapely import strtree +from shapely.errors import ShapelyDeprecationWarning +from shapely.geometry import Polygon, MultiPolygon + +# from skimage.color import rgba2rgb +from torch.utils.data import DataLoader +from torchvision import transforms as T + +from cell_segmentation.datasets.cell_graph_datamodel import CellGraphDataWSI +from cell_segmentation.utils.template_geojson import ( + get_template_point, + get_template_segmentation, +) +from datamodel.wsi_datamodel import WSI +from models.segmentation.cell_segmentation.cellvit import ( + CellViT, +) + +from preprocessing.encoding.datasets.patched_wsi_inference import PatchedWSIInference +from utils.file_handling import load_wsi_files_from_csv +from utils.logger import Logger +from utils.tools import unflatten_dict, get_size_of_dict + +warnings.filterwarnings("ignore", category=ShapelyDeprecationWarning) +pandarallel.initialize(progress_bar=False, nb_workers=12) + + +# color setup +COLOR_DICT = { + 1: [255, 0, 0], + 2: [34, 221, 77], + 3: [35, 92, 236], + 4: [254, 255, 0], + 5: [255, 159, 68], +} + +TYPE_NUCLEI_DICT = { + 1: "Neoplastic", + 2: "Inflammatory", + 3: "Connective", + 4: "Dead", + 5: "Epithelial", +} + +class CellSegmentationInference: + def __init__( + self, + model_path: Union[Path, str], + gpu: int, + enforce_mixed_precision: bool = False, + ) -> None: + """Cell Segmentation Inference class. + + After setup, a WSI can be processed by calling process_wsi method + + Args: + model_path (Union[Path, str]): Path to model checkpoint + gpu (int): CUDA GPU id to use + enforce_mixed_precision (bool, optional): Using PyTorch autocasting with dtype float16 to speed up inference. Also good for trained amp networks. + Can be used to enforce amp inference even for networks trained without amp. Otherwise, the network setting is used. + Defaults to False. + """ + self.model_path = Path(model_path) + self.device = f"cuda:{gpu}" + self.__instantiate_logger() + self.__load_model() + self.__load_inference_transforms() + self.__setup_amp(enforce_mixed_precision=enforce_mixed_precision) + + def __instantiate_logger(self) -> None: + """Instantiate logger + + Logger is using no formatters. Logs are stored in the run directory under the filename: inference.log + """ + logger = Logger( + level="INFO", + ) + self.logger = logger.create_logger() + + def __load_model(self) -> None: + """Load model and checkpoint and load the state_dict""" + self.logger.info(f"Loading model: {self.model_path}") + + model_checkpoint = torch.load(self.model_path, map_location="cpu") + + # unpack checkpoint + self.run_conf = unflatten_dict(model_checkpoint["config"], ".") + self.model = self.__get_model(model_type=model_checkpoint["arch"]) + self.logger.info( + self.model.load_state_dict(model_checkpoint["model_state_dict"]) + ) + self.model.eval() + self.model.to(self.device) + + def __get_model( + self, model_type: str + ) -> Union[ + CellViT]: + """Return the trained model for inference + + Args: + model_type (str): Name of the model. Must either be one of: + CellViT, CellViTShared, CellViT256, CellViT256Shared, CellViTSAM, CellViTSAMShared + + Returns: + Union[CellViT, CellViTShared, CellViT256, CellViT256Shared, CellViTSAM, CellViTSAMShared]: Model + """ + implemented_models = [ + "CellViT", + ] + if model_type not in implemented_models: + raise NotImplementedError( + f"Unknown model type. Please select one of {implemented_models}" + ) + if model_type in ["CellViT", "CellViTShared"]: + if model_type == "CellViT": + model_class = CellViT + model = model_class( + num_nuclei_classes=self.run_conf["data"]["num_nuclei_classes"], + num_tissue_classes=self.run_conf["data"]["num_tissue_classes"], + embed_dim=self.run_conf["model"]["embed_dim"], + input_channels=self.run_conf["model"].get("input_channels", 3), + depth=self.run_conf["model"]["depth"], + num_heads=self.run_conf["model"]["num_heads"], + extract_layers=self.run_conf["model"]["extract_layers"], + regression_loss=self.run_conf["model"].get("regression_loss", False), + ) + return model + + def __load_inference_transforms(self): + """Load the inference transformations from the run_configuration""" + self.logger.info("Loading inference transformations") + + transform_settings = self.run_conf["transformations"] + if "normalize" in transform_settings: + mean = transform_settings["normalize"].get("mean", (0.5, 0.5, 0.5)) + std = transform_settings["normalize"].get("std", (0.5, 0.5, 0.5)) + else: + mean = (0.5, 0.5, 0.5) + std = (0.5, 0.5, 0.5) + self.inference_transforms = T.Compose( + [T.ToTensor(), T.Normalize(mean=mean, std=std)] + ) + + def __setup_amp(self, enforce_mixed_precision: bool = False) -> None: + """Setup automated mixed precision (amp) for inference. + + Args: + enforce_mixed_precision (bool, optional): Using PyTorch autocasting with dtype float16 to speed up inference. Also good for trained amp networks. + Can be used to enforce amp inference even for networks trained without amp. Otherwise, the network setting is used. + Defaults to False. + """ + if enforce_mixed_precision: + self.mixed_precision = enforce_mixed_precision + else: + self.mixed_precision = self.run_conf["training"].get( + "mixed_precision", False + ) + + def process_wsi( + self, + wsi: WSI, + subdir_name: str = None, + patch_size: int = 1024, + overlap: int = 64, + batch_size: int = 8, + geojson: bool = False, + ) -> None: + """Process WSI file + + Args: + wsi (WSI): WSI object + subdir_name (str, optional): If provided, a subdir with the given name is created in the cell_detection folder. + Helpful if you need to store different cell detection results next to each other. Defaults to None (no subdir). + patch_size (int, optional): Patch-Size. Default to 1024. + overlap (int, optional): Overlap between patches. Defaults to 64. + batch_size (int, optional): Batch-size for inference. Defaults to 8. + geosjon (bool, optional): If a geojson export should be performed. Defaults to False. + """ + self.logger.info(f"Processing WSI: {wsi.name}") + + wsi_inference_dataset = PatchedWSIInference( + wsi, transform=self.inference_transforms + ) + + num_workers = int(3 / 4 * os.cpu_count()) + if num_workers is None: + num_workers = 16 + num_workers = int(np.clip(num_workers, 1, 2 * batch_size)) + + wsi_inference_dataloader = DataLoader( + dataset=wsi_inference_dataset, + batch_size=batch_size, + num_workers=num_workers, + shuffle=False, + collate_fn=wsi_inference_dataset.collate_batch, + pin_memory=False, + ) + dataset_config = self.run_conf["dataset_config"] + nuclei_types = dataset_config["nuclei_types"] + + if subdir_name is not None: + outdir = Path(wsi.patched_slide_path) / "cell_detection" / subdir_name + else: + outdir = Path(wsi.patched_slide_path) / "cell_detection" + outdir.mkdir(exist_ok=True, parents=True) + + cell_dict_wsi = [] # for storing all cell information + cell_dict_detection = [] # for storing only the centroids + + graph_data = { + "cell_tokens": [], + "positions": [], + "contours": [], + "metadata": {"wsi_metadata": wsi.metadata, "nuclei_types": nuclei_types}, + } + processed_patches = [] + + memory_usage = 0 + cell_count = 0 + + with torch.no_grad(): + + pbar = tqdm.tqdm(wsi_inference_dataloader, total=len(wsi_inference_dataset)) + + for batch in wsi_inference_dataloader: + patches = batch[0].to(self.device) + + metadata = batch[1] + if self.mixed_precision: + with torch.autocast(device_type="cuda", dtype=torch.float16): + predictions = self.model.forward(patches, retrieve_tokens=True) + else: + predictions = self.model.forward(patches, retrieve_tokens=True) + # reshape, apply softmax to segmentation maps + # predictions = self.model.reshape_model_output(predictions_, self.device) + instance_types, tokens = self.get_cell_predictions_with_tokens( + predictions, magnification=wsi.metadata["magnification"] + ) + print(f"Token-Shape: {tokens.shape}") + # unpack each patch from batch + for idx, (patch_instance_types, patch_metadata) in enumerate( + zip(instance_types, metadata) + ): + pbar.update(1) + # add global patch metadata + patch_cell_detection = {} + patch_cell_detection["patch_metadata"] = patch_metadata + patch_cell_detection["type_map"] = dataset_config["nuclei_types"] + + processed_patches.append( + f"{patch_metadata['row']}_{patch_metadata['col']}" + ) + + # calculate coordinate on highest magnifications + # wsi_scaling_factor = patch_metadata["wsi_metadata"]["downsampling"] + # patch_size = patch_metadata["wsi_metadata"]["patch_size"] + wsi_scaling_factor = wsi.metadata["downsampling"] + patch_size = wsi.metadata["patch_size"] + x_global = int( + patch_metadata["row"] * patch_size * wsi_scaling_factor + - (patch_metadata["row"] + 0.5) * overlap + ) + y_global = int( + patch_metadata["col"] * patch_size * wsi_scaling_factor + - (patch_metadata["col"] + 0.5) * overlap + ) + + # extract cell information + for cell in patch_instance_types.values(): + if cell["type"] == nuclei_types["Background"]: + continue + offset_global = np.array([x_global, y_global]) + centroid_global = cell["centroid"] + np.flip(offset_global) + contour_global = cell["contour"] + np.flip(offset_global) + bbox_global = cell["bbox"] + offset_global + cell_dict = { + "bbox": bbox_global.tolist(), + "centroid": centroid_global.tolist(), + "contour": contour_global.tolist(), + "type_prob": cell["type_prob"], + "type": cell["type"], + "patch_coordinates": [ + patch_metadata["row"], + patch_metadata["col"], + ], + "cell_status": get_cell_position_marging( + cell["bbox"], 1024, 64 + ), + "offset_global": offset_global.tolist() + } + cell_detection = { + "bbox": bbox_global.tolist(), + "centroid": centroid_global.tolist(), + "type": cell["type"], + } + if np.max(cell["bbox"]) == 1024 or np.min(cell["bbox"]) == 0: + position = get_cell_position(cell["bbox"], 1024) + cell_dict["edge_position"] = True + cell_dict["edge_information"] = {} + cell_dict["edge_information"]["position"] = position + cell_dict["edge_information"][ + "edge_patches" + ] = get_edge_patch( + position, patch_metadata["row"], patch_metadata["col"] + ) + else: + cell_dict["edge_position"] = False + + cell_dict_wsi.append(cell_dict) + cell_dict_detection.append(cell_detection) + + # get the cell token + bb_index = cell["bbox"] / self.model.patch_size + bb_index[0, :] = np.floor(bb_index[0, :]) + bb_index[1, :] = np.ceil(bb_index[1, :]) + bb_index = bb_index.astype(np.uint8) + print(f"Token-Shape-Patch: {idx.shape}") + cell_token = tokens[ + idx, + :, + bb_index[0, 1] : bb_index[1, 1], + bb_index[0, 0] : bb_index[1, 0], + ] + cell_token = torch.mean( + rearrange(cell_token, "D H W -> (H W) D"), dim=0 + ) + + graph_data["cell_tokens"].append(cell_token) + graph_data["positions"].append(torch.Tensor(centroid_global)) + graph_data["contours"].append(torch.Tensor(contour_global)) + + cell_count = cell_count + 1 + # dict sizes + memory_usage = memory_usage + get_size_of_dict(cell_dict)/(1024*1024) + get_size_of_dict(cell_detection)/(1024*1024) # + sys.getsizeof(cell_token)/(1024*1024) + # pytorch + memory_usage = memory_usage + (cell_token.nelement() * cell_token.element_size())/(1024*1024) + centroid_global.nbytes/(1024*1024) + contour_global.nbytes/(1024*1024) + + pbar.set_postfix(Cells=cell_count, Memory=f"{memory_usage:.2f} MB") + + # post processing + self.logger.info(f"Detected cells before cleaning: {len(cell_dict_wsi)}") + keep_idx = self.post_process_edge_cells(cell_list=cell_dict_wsi) + cell_dict_wsi = [cell_dict_wsi[idx_c] for idx_c in keep_idx] + cell_dict_detection = [cell_dict_detection[idx_c] for idx_c in keep_idx] + graph_data["cell_tokens"] = [ + graph_data["cell_tokens"][idx_c] for idx_c in keep_idx + ] + graph_data["positions"] = [graph_data["positions"][idx_c] for idx_c in keep_idx] + graph_data["contours"] = [graph_data["contours"][idx_c] for idx_c in keep_idx] + self.logger.info(f"Detected cells after cleaning: {len(keep_idx)}") + + self.logger.info( + f"Processed all patches. Storing final results: {str(outdir / f'cells.json')} and cell_detection.json" + ) + cell_dict_wsi = { + "wsi_metadata": wsi.metadata, + "processed_patches": processed_patches, + "type_map": dataset_config["nuclei_types"], + "cells": cell_dict_wsi, + } + with open(str(outdir / "cells.json"), "w") as outfile: + ujson.dump(cell_dict_wsi, outfile, indent=2) + if geojson: + self.logger.info("Converting segmentation to geojson") + geojson_list = self.convert_geojson(cell_dict_wsi["cells"], True) + with open(str(str(outdir / "cells.geojson")), "w") as outfile: + ujson.dump(geojson_list, outfile, indent=2) + + cell_dict_detection = { + "wsi_metadata": wsi.metadata, + "processed_patches": processed_patches, + "type_map": dataset_config["nuclei_types"], + "cells": cell_dict_detection, + } + with open(str(outdir / "cell_detection.json"), "w") as outfile: + ujson.dump(cell_dict_detection, outfile, indent=2) + if geojson: + self.logger.info("Converting detection to geojson") + geojson_list = self.convert_geojson(cell_dict_wsi["cells"], False) + with open(str(str(outdir / "cell_detection.geojson")), "w") as outfile: + ujson.dump(geojson_list, outfile, indent=2) + + self.logger.info( + f"Create cell graph with embeddings and save it under: {str(outdir / 'cells.pt')}" + ) + graph = CellGraphDataWSI( + x=torch.stack(graph_data["cell_tokens"]), + positions=torch.stack(graph_data["positions"]), + contours=graph_data["contours"], + metadata=graph_data["metadata"], + ) + torch.save(graph, outdir / "cells.pt") + + cell_stats_df = pd.DataFrame(cell_dict_wsi["cells"]) + cell_stats = dict(cell_stats_df.value_counts("type")) + nuclei_types_inverse = {v: k for k, v in nuclei_types.items()} + verbose_stats = {nuclei_types_inverse[k]: v for k, v in cell_stats.items()} + self.logger.info(f"Finished with cell detection for WSI {wsi.name}") + self.logger.info("Stats:") + self.logger.info(f"{verbose_stats}") + + def get_cell_predictions_with_tokens( + self, predictions: dict, magnification: int = 40 + ) -> Tuple[List[dict], torch.Tensor]: + """Take the raw predictions, apply softmax and calculate type instances + + Args: + predictions (dict): Network predictions with tokens. Keys: + magnification (int, optional): WSI magnification. Defaults to 40. + + Returns: + Tuple[List[dict], torch.Tensor]: + * List[dict]: List with a dictionary for each batch element with cell seg results + Contains bbox, contour, 2D-position, type and type_prob for each cell + * List[dict]: Network tokens on cpu device with shape (batch_size, num_tokens_h, num_tokens_w, embd_dim) + """ + predictions["nuclei_binary_map"] = F.softmax( + predictions["nuclei_binary_map"], dim=1 + ) # shape: (batch_size, 2, H, W) + predictions["nuclei_type_map"] = F.softmax( + predictions["nuclei_type_map"], dim=1 + ) # shape: (batch_size, num_nuclei_classes, H, W) + # get the instance types + ( + _, + instance_types, + ) = self.model.calculate_instance_map(predictions, magnification=magnification) + + tokens = predictions["tokens"].to("cpu") + + return instance_types, tokens + + def post_process_edge_cells(self, cell_list: List[dict]) -> List[int]: + """Use the CellPostProcessor to remove multiple cells and merge due to overlap + + Args: + cell_list (List[dict]): List with cell-dictionaries. Required keys: + * bbox + * centroid + * contour + * type_prob + * type + * patch_coordinates + * cell_status + * offset_global + + Returns: + List[int]: List with integers of cells that should be kept + """ + cell_processor = CellPostProcessor(cell_list, self.logger) + cleaned_cells = cell_processor.post_process_cells() + + return list(cleaned_cells.index.values) + + def convert_geojson( + self, cell_list: list[dict], polygons: bool = False + ) -> List[dict]: + """Convert a list of cells to a geojson object + + Either a segmentation object (polygon) or detection points are converted + + Args: + cell_list (list[dict]): Cell list with dict entry for each cell. + Required keys for detection: + * type + * centroid + Required keys for segmentation: + * type + * contour + polygons (bool, optional): If polygon segmentations (True) or detection points (False). Defaults to False. + + Returns: + List[dict]: Geojson like list + """ + if polygons: + cell_segmentation_df = pd.DataFrame(cell_list) + detected_types = sorted(cell_segmentation_df.type.unique()) + geojson_placeholder = [] + for cell_type in detected_types: + cells = cell_segmentation_df[cell_segmentation_df["type"] == cell_type] + contours = cells["contour"].to_list() + final_c = [] + for c in contours: + c.append(c[0]) + final_c.append([c]) + + cell_geojson_object = get_template_segmentation() + cell_geojson_object["id"] = str(uuid.uuid4()) + cell_geojson_object["geometry"]["coordinates"] = final_c + cell_geojson_object["properties"]["classification"][ + "name" + ] = TYPE_NUCLEI_DICT[cell_type] + cell_geojson_object["properties"]["classification"][ + "color" + ] = COLOR_DICT[cell_type] + geojson_placeholder.append(cell_geojson_object) + else: + cell_detection_df = pd.DataFrame(cell_list) + detected_types = sorted(cell_detection_df.type.unique()) + geojson_placeholder = [] + for cell_type in detected_types: + cells = cell_detection_df[cell_detection_df["type"] == cell_type] + centroids = cells["centroid"].to_list() + cell_geojson_object = get_template_point() + cell_geojson_object["id"] = str(uuid.uuid4()) + cell_geojson_object["geometry"]["coordinates"] = centroids + cell_geojson_object["properties"]["classification"][ + "name" + ] = TYPE_NUCLEI_DICT[cell_type] + cell_geojson_object["properties"]["classification"][ + "color" + ] = COLOR_DICT[cell_type] + geojson_placeholder.append(cell_geojson_object) + return geojson_placeholder + + +class CellPostProcessor: + def __init__(self, cell_list: List[dict], logger: logging.Logger) -> None: + """POst-Processing a list of cells from one WSI + + Args: + cell_list (List[dict]): List with cell-dictionaries. Required keys: + * bbox + * centroid + * contour + * type_prob + * type + * patch_coordinates + * cell_status + * offset_global + logger (logging.Logger): Logger + """ + self.logger = logger + self.logger.info("Initializing Cell-Postprocessor") + self.cell_df = pd.DataFrame(cell_list) + self.cell_df = self.cell_df.parallel_apply(convert_coordinates, axis=1) + + self.mid_cells = self.cell_df[ + self.cell_df["cell_status"] == 0 + ] # cells in the mid + self.cell_df_margin = self.cell_df[ + self.cell_df["cell_status"] != 0 + ] # cells either torching the border or margin + + def post_process_cells(self) -> pd.DataFrame: + """Main Post-Processing coordinator, entry point + + Returns: + pd.DataFrame: DataFrame with post-processed and cleaned cells + """ + self.logger.info("Finding edge-cells for merging") + cleaned_edge_cells = self._clean_edge_cells() + self.logger.info("Removal of cells detected multiple times") + cleaned_edge_cells = self._remove_overlap(cleaned_edge_cells) + + # merge with mid cells + postprocessed_cells = pd.concat( + [self.mid_cells, cleaned_edge_cells] + ).sort_index() + return postprocessed_cells + + def _clean_edge_cells(self) -> pd.DataFrame: + """Create a DataFrame that just contains all margin cells (cells inside the margin, not touching the border) + and border/edge cells (touching border) with no overlapping equivalent (e.g, if patch has no neighbour) + + Returns: + pd.DataFrame: Cleaned DataFrame + """ + + margin_cells = self.cell_df_margin[ + self.cell_df_margin["edge_position"] == 0 + ] # cells at the margin, but not touching the border + edge_cells = self.cell_df_margin[ + self.cell_df_margin["edge_position"] == 1 + ] # cells touching the border + existing_patches = list(set(self.cell_df_margin["patch_coordinates"].to_list())) + + edge_cells_unique = pd.DataFrame( + columns=self.cell_df_margin.columns + ) # cells torching the border without having an overlap from other patches + + for idx, cell_info in edge_cells.iterrows(): + edge_information = dict(cell_info["edge_information"]) + edge_patch = edge_information["edge_patches"][0] + edge_patch = f"{edge_patch[0]}_{edge_patch[1]}" + if edge_patch not in existing_patches: + edge_cells_unique.loc[idx, :] = cell_info + + cleaned_edge_cells = pd.concat([margin_cells, edge_cells_unique]) + + return cleaned_edge_cells.sort_index() + + def _remove_overlap(self, cleaned_edge_cells: pd.DataFrame) -> pd.DataFrame: + """Remove overlapping cells from provided DataFrame + + Args: + cleaned_edge_cells (pd.DataFrame): DataFrame that should be cleaned + + Returns: + pd.DataFrame: Cleaned DataFrame + """ + merged_cells = cleaned_edge_cells + + for iteration in range(20): + poly_list = [] + for idx, cell_info in merged_cells.iterrows(): + poly = Polygon(cell_info["contour"]) + if not poly.is_valid: + self.logger.debug("Found invalid polygon - Fixing with buffer 0") + multi = poly.buffer(0) + if isinstance(multi, MultiPolygon): + if len(multi) > 1: + poly_idx = np.argmax([p.area for p in multi]) + poly = multi[poly_idx] + poly = Polygon(poly) + else: + poly = multi[0] + poly = Polygon(poly) + else: + poly = Polygon(multi) + poly.uid = idx + poly_list.append(poly) + + # use an strtree for fast querying + tree = strtree.STRtree(poly_list) + + merged_idx = deque() + iterated_cells = set() + overlaps = 0 + + for query_poly in poly_list: + if query_poly.uid not in iterated_cells: + intersected_polygons = tree.query( + query_poly + ) # this also contains a self-intersection + if ( + len(intersected_polygons) > 1 + ): # we have more at least one intersection with another cell + submergers = [] # all cells that overlap with query + for inter_poly in intersected_polygons: + if ( + inter_poly.uid != query_poly.uid + and inter_poly.uid not in iterated_cells + ): + if ( + query_poly.intersection(inter_poly).area + / query_poly.area + > 0.01 + or query_poly.intersection(inter_poly).area + / inter_poly.area + > 0.01 + ): + overlaps = overlaps + 1 + submergers.append(inter_poly) + iterated_cells.add(inter_poly.uid) + # catch block: empty list -> some cells are touching, but not overlapping strongly enough + if len(submergers) == 0: + merged_idx.append(query_poly.uid) + else: # merging strategy: take the biggest cell, other merging strategies needs to get implemented + selected_poly_index = np.argmax( + np.array([p.area for p in submergers]) + ) + selected_poly_uid = submergers[selected_poly_index].uid + merged_idx.append(selected_poly_uid) + else: + # no intersection, just add + merged_idx.append(query_poly.uid) + iterated_cells.add(query_poly.uid) + + self.logger.info( + f"Iteration {iteration}: Found overlap of # cells: {overlaps}" + ) + if overlaps == 0: + self.logger.info("Found all overlapping cells") + break + elif iteration == 20: + self.logger.info( + f"Not all doubled cells removed, still {overlaps} to remove. For perfomance issues, we stop iterations now. Please raise an issue in git or increase number of iterations." + ) + merged_cells = cleaned_edge_cells.loc[ + cleaned_edge_cells.index.isin(merged_idx) + ].sort_index() + + return merged_cells.sort_index() + + +def convert_coordinates(row: pd.Series) -> pd.Series: + """Convert a row from x,y type to one string representation of the patch position for fast querying + Repr: x_y + + Args: + row (pd.Series): Row to be processed + + Returns: + pd.Series: Processed Row + """ + x, y = row["patch_coordinates"] + row["patch_row"] = x + row["patch_col"] = y + row["patch_coordinates"] = f"{x}_{y}" + return row + + +def get_cell_position(bbox: np.ndarray, patch_size: int = 1024) -> List[int]: + """Get cell position as a list + + Entry is 1, if cell touches the border: [top, right, down, left] + + Args: + bbox (np.ndarray): Bounding-Box of cell + patch_size (int, optional): Patch-size. Defaults to 1024. + + Returns: + List[int]: List with 4 integers for each position + """ + # bbox = 2x2 array in h, w style + # bbox[0,0] = upper position (height) + # bbox[1,0] = lower dimension (height) + # boox[0,1] = left position (width) + # bbox[1,1] = right position (width) + # bbox[:,0] -> x dimensions + top, left, down, right = False, False, False, False + if bbox[0, 0] == 0: + top = True + if bbox[0, 1] == 0: + left = True + if bbox[1, 0] == patch_size: + down = True + if bbox[1, 1] == patch_size: + right = True + position = [top, right, down, left] + position = [int(pos) for pos in position] + + return position + + +def get_cell_position_marging( + bbox: np.ndarray, patch_size: int = 1024, margin: int = 64 +) -> int: + """Get the status of the cell, describing the cell position + + A cell is either in the mid (0) or at one of the borders (1-8) + + # Numbers are assigned clockwise, starting from top left + # i.e., top left = 1, top = 2, top right = 3, right = 4, bottom right = 5 bottom = 6, bottom left = 7, left = 8 + # Mid status is denoted by 0 + + Args: + bbox (np.ndarray): Bounding Box of cell + patch_size (int, optional): Patch-Size. Defaults to 1024. + margin (int, optional): Margin-Size. Defaults to 64. + + Returns: + int: Cell Status + """ + cell_status = None + if np.max(bbox) > patch_size - margin or np.min(bbox) < margin: + if bbox[0, 0] < margin: + # top left, top or top right + if bbox[0, 1] < margin: + # top left + cell_status = 1 + elif bbox[1, 1] > patch_size - margin: + # top right + cell_status = 3 + else: + # top + cell_status = 2 + elif bbox[1, 1] > patch_size - margin: + # top right, right or bottom right + if bbox[1, 0] > patch_size - margin: + # bottom right + cell_status = 5 + else: + # right + cell_status = 4 + elif bbox[1, 0] > patch_size - margin: + # bottom right, bottom, bottom left + if bbox[0, 1] < margin: + # bottom left + cell_status = 7 + else: + # bottom + cell_status = 6 + elif bbox[0, 1] < margin: + # bottom left, left, top left, but only left is left + cell_status = 8 + else: + cell_status = 0 + + return cell_status + + +def get_edge_patch(position, row, col): + # row starting on bottom or on top? + if position == [1, 0, 0, 0]: + # top + return [[row - 1, col]] + if position == [1, 1, 0, 0]: + # top and right + return [[row - 1, col], [row - 1, col + 1], [row, col + 1]] + if position == [0, 1, 0, 0]: + # right + return [[row, col + 1]] + if position == [0, 1, 1, 0]: + # right and down + return [[row, col + 1], [row + 1, col + 1], [row + 1, col]] + if position == [0, 0, 1, 0]: + # down + return [[row + 1, col]] + if position == [0, 0, 1, 1]: + # down and left + return [[row + 1, col], [row + 1, col - 1], [row, col - 1]] + if position == [0, 0, 0, 1]: + # left + return [[row, col - 1]] + if position == [1, 0, 0, 1]: + # left and top + return [[row, col - 1], [row - 1, col - 1], [row - 1, col]] + + +# CLI +class InferenceWSIParser: + """Parser""" + + def __init__(self) -> None: + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + description="Perform CellViT inference for given run-directory with model checkpoints and logs. Just for CellViT, not for StarDist models", + ) + requiredNamed = parser.add_argument_group("required named arguments") + requiredNamed.add_argument( + "--model", + type=str, + help="Model checkpoint file that is used for inference", + required=True, + ) + parser.add_argument( + "--gpu", type=int, help="Cuda-GPU ID for inference. Default: 0", default=0 + ) + parser.add_argument( + "--magnification", + type=float, + help="Network magnification. Is used for checking patch magnification such that we use the correct resolution for network. Default: 40", + default=40, + ) + parser.add_argument( + "--enforce_amp", + action="store_true", + help="Whether to use mixed precision for inference (enforced). Otherwise network default training settings are used." + " Default: False", + ) + parser.add_argument( + "--batch_size", + type=int, + help="Inference batch-size. Default: 8", + default=8, + ) + parser.add_argument( + "--outdir_subdir", + type=str, + help="If provided, a subdir with the given name is created in the cell_detection folder where the results are stored. Default: None", + default=None, + ) + parser.add_argument( + "--geojson", + action="store_true", + help="Set this flag to export results as additional geojson files for loading them into Software like QuPath.", + ) + + # subparsers for either loading a WSI or a WSI folder + + # WSI + subparsers = parser.add_subparsers( + dest="command", + description="Main run command for either performing inference on single WSI-file or on whole dataset", + ) + subparser_wsi = subparsers.add_parser( + "process_wsi", description="Process a single WSI file" + ) + subparser_wsi.add_argument( + "--wsi_path", + type=str, + help="Path to WSI file", + ) + subparser_wsi.add_argument( + "--patched_slide_path", + type=str, + help="Path to patched WSI file (specific WSI file, not parent path of patched slide dataset)", + ) + + # Dataset + subparser_dataset = subparsers.add_parser( + "process_dataset", + description="Process a whole dataset", + ) + subparser_dataset.add_argument( + "--wsi_paths", type=str, help="Path to the folder where all WSI are stored" + ) + subparser_dataset.add_argument( + "--patch_dataset_path", + type=str, + help="Path to the folder where the patch dataset is stored", + ) + subparser_dataset.add_argument( + "--filelist", + type=str, + help="Filelist with WSI to process. Must be a .csv file with one row denoting the filenames (named 'Filename')." + "If not provided, all WSI files with given ending in the filelist are processed.", + default=None, + ) + subparser_dataset.add_argument( + "--wsi_extension", + type=str, + help="The extension types used for the WSI files, see configs.python.config (WSI_EXT)", + default="svs", + ) + + self.parser = parser + + def parse_arguments(self) -> dict: + opt = self.parser.parse_args() + return vars(opt) + + +def check_wsi(wsi: WSI, magnification: float = 40.0): + """Check if provided patched WSI is having the right settings + + Args: + wsi (WSI): WSI to check + magnification (float, optional): Check magnification. Defaults to 40.0. + + Raises: + RuntimeError: The magnification is not matching to the network input magnification. + RuntimeError: The patch-size is not devisible by 256. + RunTimeError: The patch-size is not 1024 + RunTimeError: The overlap is not 64px sized + """ + if wsi.metadata["magnification"] is not None: + patch_magnification = float(wsi.metadata["magnification"]) + else: + patch_magnification = float( + float(wsi.metadata["base_magnification"]) / wsi.metadata["downsampling"] + ) + patch_size = int(wsi.metadata["patch_size"]) + + if patch_magnification != magnification: + raise RuntimeError( + "The magnification is not matching to the network input magnification." + ) + if (patch_size % 256) != 0: + raise RuntimeError("The patch-size must be devisible by 256.") + if wsi.metadata["patch_size"] != 1024: + raise RuntimeError("The patch-size must be 1024.") + if wsi.metadata["patch_overlap"] != 64: + raise RuntimeError("The patch-overlap must be 64") + + +if __name__ == "__main__": + configuration_parser = InferenceWSIParser() + configuration = configuration_parser.parse_arguments() + command = configuration["command"] + + cell_segmentation = CellSegmentationInference( + model_path=configuration["model"], + gpu=configuration["gpu"], + enforce_mixed_precision=configuration["enforce_amp"], + ) + + if command.lower() == "process_wsi": + cell_segmentation.logger.info("Processing single WSI file") + wsi_path = Path(configuration["wsi_path"]) + wsi_name = wsi_path.stem + wsi_file = WSI( + name=wsi_name, + patient=wsi_name, + slide_path=wsi_path, + patched_slide_path=configuration["patched_slide_path"], + ) + check_wsi(wsi=wsi_file, magnification=configuration["magnification"]) + cell_segmentation.process_wsi( + wsi_file, + subdir_name=configuration["outdir_subdir"], + geojson=configuration["geojson"], + batch_size=configuration["batch_size"], + ) + + elif command.lower() == "process_dataset": + cell_segmentation.logger.info("Processing whole dataset") + if configuration["filelist"] is not None: + if Path(configuration["filelist"]).suffix != ".csv": + raise ValueError("Filelist must be a .csv file!") + cell_segmentation.logger.info( + f"Loading files from filelist {configuration['filelist']}" + ) + wsi_filelist = load_wsi_files_from_csv( + csv_path=configuration["filelist"], + wsi_extension=configuration["wsi_extension"], + ) + wsi_filelist = [ + Path(configuration["wsi_paths"]) / f + if configuration["wsi_paths"] not in f + else Path(f) + for f in wsi_filelist + ] + else: + cell_segmentation.logger.info( + f"Loading all files from folder {configuration['wsi_paths']}. No filelist provided." + ) + wsi_filelist = [ + f + for f in sorted( + Path(configuration["wsi_paths"]).glob( + f"**/*.{configuration['wsi_extension']}" + ) + ) + ] + for i, wsi_path in enumerate(wsi_filelist): + wsi_path = Path(wsi_path) + wsi_name = wsi_path.stem + patched_slide_path = Path(configuration["patch_dataset_path"]) / wsi_name + cell_segmentation.logger.info(f"File {i+1}/{len(wsi_filelist)}: {wsi_name}") + wsi_file = WSI( + name=wsi_name, + patient=wsi_name, + slide_path=wsi_path, + patched_slide_path=patched_slide_path, + ) + check_wsi(wsi=wsi_file, magnification=configuration["magnification"]) + cell_segmentation.process_wsi( + wsi_file, + subdir_name=configuration["outdir_subdir"], + geojson=configuration["geojson"], + batch_size=configuration["batch_size"], + ) diff --git a/cell_segmentation/inference/cell_detection_256.py b/cell_segmentation/inference/cell_detection_256.py new file mode 100644 index 0000000000000000000000000000000000000000..6c8eed2e43cfe501edceec520e83b54d7f9ec0e9 --- /dev/null +++ b/cell_segmentation/inference/cell_detection_256.py @@ -0,0 +1,1111 @@ +# -*- coding: utf-8 -*- +# CellViT Inference Method for Patch-Wise Inference on a patches test set/Whole WSI +# +# Detect Cells with our Networks +# Patches dataset needs to have the follwoing requirements: +# Patch-Size must be 256, with overlap of 64 +# +# We provide preprocessing code here: ./preprocessing/patch_extraction/main_extraction.py +# +# @ Fabian Hörst, fabian.hoerst@uk-essen.de +# Institute for Artifical Intelligence in Medicine, +# University Medicine Essen + +import inspect +import os +import sys + +currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) +parentdir = os.path.dirname(currentdir) +sys.path.insert(0, parentdir) +parentdir = os.path.dirname(parentdir) +sys.path.insert(0, parentdir) + +import argparse +import logging +import uuid +import warnings +from collections import deque +from pathlib import Path +from typing import List, Tuple, Union + +import numpy as np +import pandas as pd +import torch +import torch.nn.functional as F +import tqdm +import ujson +from einops import rearrange +from pandarallel import pandarallel + +# from PIL import Image +from shapely import strtree +from shapely.errors import ShapelyDeprecationWarning +from shapely.geometry import Polygon, MultiPolygon + +# from skimage.color import rgba2rgb +from torch.utils.data import DataLoader +from torchvision import transforms as T + +from cell_segmentation.datasets.cell_graph_datamodel import CellGraphDataWSI +from cell_segmentation.utils.template_geojson import ( + get_template_point, + get_template_segmentation, +) +from datamodel.wsi_datamodel import WSI +from models.segmentation.cell_segmentation.cellvit import ( + CellViT, + CellViT256, + CellViTSAM, +) +from models.segmentation.cell_segmentation.cellvit_shared import ( + CellViT256Shared, + CellViTSAMShared, + CellViTShared, +) +from preprocessing.encoding.datasets.patched_wsi_inference import PatchedWSIInference +from utils.file_handling import load_wsi_files_from_csv +from utils.logger import Logger +from utils.tools import unflatten_dict + +warnings.filterwarnings("ignore", category=ShapelyDeprecationWarning) +pandarallel.initialize(progress_bar=False, nb_workers=12) + + +# color setup +COLOR_DICT = { + 1: [255, 0, 0], + 2: [34, 221, 77], + 3: [35, 92, 236], + 4: [254, 255, 0], + 5: [255, 159, 68], +} + +TYPE_NUCLEI_DICT = { + 1: "Neoplastic", + 2: "Inflammatory", + 3: "Connective", + 4: "Dead", + 5: "Epithelial", +} + + +class CellSegmentationInference: + def __init__( + self, + model_path: Union[Path, str], + gpu: int, + enforce_mixed_precision: bool = False, + ) -> None: + """Cell Segmentation Inference class. + + After setup, a WSI can be processed by calling process_wsi method + + Args: + model_path (Union[Path, str]): Path to model checkpoint + gpu (int): CUDA GPU id to use + enforce_mixed_precision (bool, optional): Using PyTorch autocasting with dtype float16 to speed up inference. Also good for trained amp networks. + Can be used to enforce amp inference even for networks trained without amp. Otherwise, the network setting is used. + Defaults to False. + """ + self.model_path = Path(model_path) + self.device = f"cuda:{gpu}" + self.__instantiate_logger() + self.__load_model() + self.__load_inference_transforms() + self.__setup_amp(enforce_mixed_precision=enforce_mixed_precision) + + def __instantiate_logger(self) -> None: + """Instantiate logger + + Logger is using no formatters. Logs are stored in the run directory under the filename: inference.log + """ + logger = Logger( + level="INFO", + ) + self.logger = logger.create_logger() + + def __load_model(self) -> None: + """Load model and checkpoint and load the state_dict""" + self.logger.info(f"Loading model: {self.model_path}") + + model_checkpoint = torch.load(self.model_path, map_location="cpu") + + # unpack checkpoint + self.run_conf = unflatten_dict(model_checkpoint["config"], ".") + self.model = self.__get_model(model_type=model_checkpoint["arch"]) + self.logger.info( + self.model.load_state_dict(model_checkpoint["model_state_dict"]) + ) + self.model.eval() + self.model.to(self.device) + + def __get_model( + self, model_type: str + ) -> Union[ + CellViT, + CellViTShared, + CellViT256, + CellViT256Shared, + CellViTSAM, + CellViTSAMShared, + ]: + """Return the trained model for inference + + Args: + model_type (str): Name of the model. Must either be one of: + CellViT, CellViTShared, CellViT256, CellViT256Shared, CellViTSAM, CellViTSAMShared + + Returns: + Union[CellViT, CellViTShared, CellViT256, CellViT256Shared, CellViTSAM, CellViTSAMShared]: Model + """ + implemented_models = [ + "CellViT", + "CellViTShared", + "CellViT256", + "CellViT256Shared", + "CellViTSAM", + "CellViTSAMShared", + ] + if model_type not in implemented_models: + raise NotImplementedError( + f"Unknown model type. Please select one of {implemented_models}" + ) + if model_type in ["CellViT", "CellViTShared"]: + if model_type == "CellViT": + model_class = CellViT + elif model_type == "CellViTShared": + model_class = CellViTShared + model = model_class( + num_nuclei_classes=self.run_conf["data"]["num_nuclei_classes"], + num_tissue_classes=self.run_conf["data"]["num_tissue_classes"], + embed_dim=self.run_conf["model"]["embed_dim"], + input_channels=self.run_conf["model"].get("input_channels", 3), + depth=self.run_conf["model"]["depth"], + num_heads=self.run_conf["model"]["num_heads"], + extract_layers=self.run_conf["model"]["extract_layers"], + regression_loss=self.run_conf["model"].get("regression_loss", False), + ) + + elif model_type in ["CellViT256", "CellViT256Shared"]: + if model_type == "CellViT256": + model_class = CellViT256 + elif model_type == "CellViTVIT256Shared": + model_class = CellViT256Shared + model = model_class( + model256_path=None, + num_nuclei_classes=self.run_conf["data"]["num_nuclei_classes"], + num_tissue_classes=self.run_conf["data"]["num_tissue_classes"], + regression_loss=self.run_conf["model"].get("regression_loss", False), + ) + elif model_type in ["CellViTSAM", "CellViTSAMShared"]: + if model_type == "CellViTSAM": + model_class = CellViTSAM + elif model_type == "CellViTSAMShared": + model_class = CellViTSAMShared + model = model_class( + model_path=None, + num_nuclei_classes=self.run_conf["data"]["num_nuclei_classes"], + num_tissue_classes=self.run_conf["data"]["num_tissue_classes"], + vit_structure=self.run_conf["model"]["backbone"], + regression_loss=self.run_conf["model"].get("regression_loss", False), + ) + return model + + def __load_inference_transforms(self): + """Load the inference transformations from the run_configuration""" + self.logger.info("Loading inference transformations") + + transform_settings = self.run_conf["transformations"] + if "normalize" in transform_settings: + mean = transform_settings["normalize"].get("mean", (0.5, 0.5, 0.5)) + std = transform_settings["normalize"].get("std", (0.5, 0.5, 0.5)) + else: + mean = (0.5, 0.5, 0.5) + std = (0.5, 0.5, 0.5) + self.inference_transforms = T.Compose( + [T.ToTensor(), T.Normalize(mean=mean, std=std)] + ) + + def __setup_amp(self, enforce_mixed_precision: bool = False) -> None: + """Setup automated mixed precision (amp) for inference. + + Args: + enforce_mixed_precision (bool, optional): Using PyTorch autocasting with dtype float16 to speed up inference. Also good for trained amp networks. + Can be used to enforce amp inference even for networks trained without amp. Otherwise, the network setting is used. + Defaults to False. + """ + if enforce_mixed_precision: + self.mixed_precision = enforce_mixed_precision + else: + self.mixed_precision = self.run_conf["training"].get( + "mixed_precision", False + ) + + def process_wsi( + self, + wsi: WSI, + subdir_name: str = None, + patch_size: int = 256, + overlap: int = 64, + batch_size: int = 8, + geojson: bool = False, + ) -> None: + """Process WSI file + + Args: + wsi (WSI): WSI object + subdir_name (str, optional): If provided, a subdir with the given name is created in the cell_detection folder. + Helpful if you need to store different cell detection results next to each other. Defaults to None (no subdir). + patch_size (int, optional): Patch-Size. Default to 256. + overlap (int, optional): Overlap between patches. Defaults to 64. + batch_size (int, optional): Batch-size for inference. Defaults to 8. + geosjon (bool, optional): If a geojson export should be performed. Defaults to False. + """ + self.logger.info(f"Processing WSI: {wsi.name}") + + wsi_inference_dataset = PatchedWSIInference( + wsi, transform=self.inference_transforms + ) + + num_workers = int(3 / 4 * os.cpu_count()) + if num_workers is None: + num_workers = 16 + num_workers = int(np.clip(num_workers, 1, 2 * batch_size)) + + wsi_inference_dataloader = DataLoader( + dataset=wsi_inference_dataset, + batch_size=batch_size, + num_workers=num_workers, + shuffle=False, + collate_fn=wsi_inference_dataset.collate_batch, + pin_memory=False, + ) + dataset_config = self.run_conf["dataset_config"] + nuclei_types = dataset_config["nuclei_types"] + + if subdir_name is not None: + outdir = Path(wsi.patched_slide_path) / "cell_detection" / subdir_name + else: + outdir = Path(wsi.patched_slide_path) / "cell_detection" + outdir.mkdir(exist_ok=True, parents=True) + + cell_dict_wsi = [] # for storing all cell information + cell_dict_detection = [] # for storing only the centroids + + graph_data = { + "cell_tokens": [], + "positions": [], + "contours": [], + "metadata": {"wsi_metadata": wsi.metadata, "nuclei_types": nuclei_types}, + } + processed_patches = [] + + with torch.no_grad(): + for batch in tqdm.tqdm( + wsi_inference_dataloader, total=len(wsi_inference_dataloader) + ): + patches = batch[0].to(self.device) + + metadata = batch[1] + if self.mixed_precision: + with torch.autocast(device_type="cuda", dtype=torch.float16): + predictions = self.model.forward(patches, retrieve_tokens=True) + else: + predictions = self.model.forward(patches, retrieve_tokens=True) + # reshape, apply softmax to segmentation maps + # predictions = self.model.reshape_model_output(predictions_, self.device) + instance_types, tokens = self.get_cell_predictions_with_tokens( + predictions, magnification=wsi.metadata["magnification"] + ) + + # unpack each patch from batch + for idx, (patch_instance_types, patch_metadata) in enumerate( + zip(instance_types, metadata) + ): + # add global patch metadata + patch_cell_detection = {} + patch_cell_detection["patch_metadata"] = patch_metadata + patch_cell_detection["type_map"] = dataset_config["nuclei_types"] + + processed_patches.append( + f"{patch_metadata['row']}_{patch_metadata['col']}" + ) + + # calculate coordinate on highest magnifications + # wsi_scaling_factor = patch_metadata["wsi_metadata"]["downsampling"] + # patch_size = patch_metadata["wsi_metadata"]["patch_size"] + wsi_scaling_factor = wsi.metadata["downsampling"] + patch_size = wsi.metadata["patch_size"] + x_global = int( + patch_metadata["row"] * patch_size * wsi_scaling_factor + - (patch_metadata["row"] + 0.5) * overlap + ) + y_global = int( + patch_metadata["col"] * patch_size * wsi_scaling_factor + - (patch_metadata["col"] + 0.5) * overlap + ) + + # extract cell information + for cell in patch_instance_types.values(): + if cell["type"] == nuclei_types["Background"]: + continue + offset_global = np.array([x_global, y_global]) + centroid_global = cell["centroid"] + np.flip(offset_global) + contour_global = cell["contour"] + np.flip(offset_global) + bbox_global = cell["bbox"] + offset_global + cell_dict = { + "bbox": bbox_global.tolist(), + "centroid": centroid_global.tolist(), + "contour": contour_global.tolist(), + "type_prob": cell["type_prob"], + "type": cell["type"], + "patch_coordinates": [ + patch_metadata["row"], + patch_metadata["col"], + ], + "cell_status": get_cell_position_marging( + cell["bbox"], 256, 64 + ), + "offset_global": offset_global.tolist() + # optional: Local positional information + # "bbox_local": cell["bbox"].tolist(), + # "centroid_local": cell["centroid"].tolist(), + # "contour_local": cell["contour"].tolist(), + } + cell_detection = { + "bbox": bbox_global.tolist(), + "centroid": centroid_global.tolist(), + "type": cell["type"], + } + if np.max(cell["bbox"]) == 256 or np.min(cell["bbox"]) == 0: + position = get_cell_position(cell["bbox"], 256) + cell_dict["edge_position"] = True + cell_dict["edge_information"] = {} + cell_dict["edge_information"]["position"] = position + cell_dict["edge_information"][ + "edge_patches" + ] = get_edge_patch( + position, patch_metadata["row"], patch_metadata["col"] + ) + else: + cell_dict["edge_position"] = False + + cell_dict_wsi.append(cell_dict) + cell_dict_detection.append(cell_detection) + + # get the cell token + bb_index = cell["bbox"] / self.model.patch_size + bb_index[0, :] = np.floor(bb_index[0, :]) + bb_index[1, :] = np.ceil(bb_index[1, :]) + bb_index = bb_index.astype(np.uint8) + cell_token = tokens[ + idx, + bb_index[0, 1] : bb_index[1, 1], + bb_index[0, 0] : bb_index[1, 0], + :, + ] + cell_token = torch.mean( + rearrange(cell_token, "H W D -> (H W) D"), dim=0 + ) + + graph_data["cell_tokens"].append(cell_token) + graph_data["positions"].append(torch.Tensor(centroid_global)) + graph_data["contours"].append(torch.Tensor(contour_global)) + + # post processing + self.logger.info(f"Detected cells before cleaning: {len(cell_dict_wsi)}") + keep_idx = self.post_process_edge_cells(cell_list=cell_dict_wsi) + cell_dict_wsi = [cell_dict_wsi[idx_c] for idx_c in keep_idx] + cell_dict_detection = [cell_dict_detection[idx_c] for idx_c in keep_idx] + graph_data["cell_tokens"] = [ + graph_data["cell_tokens"][idx_c] for idx_c in keep_idx + ] + graph_data["positions"] = [graph_data["positions"][idx_c] for idx_c in keep_idx] + graph_data["contours"] = [graph_data["contours"][idx_c] for idx_c in keep_idx] + self.logger.info(f"Detected cells after cleaning: {len(keep_idx)}") + + self.logger.info( + f"Processed all patches. Storing final results: {str(outdir / f'cells.json')} and cell_detection.json" + ) + cell_dict_wsi = { + "wsi_metadata": wsi.metadata, + "processed_patches": processed_patches, + "type_map": dataset_config["nuclei_types"], + "cells": cell_dict_wsi, + } + with open(str(outdir / "cells.json"), "w") as outfile: + ujson.dump(cell_dict_wsi, outfile, indent=2) + if geojson: + self.logger.info("Converting segmentation to geojson") + geojson_list = self.convert_geojson(cell_dict_wsi["cells"], True) + with open(str(str(outdir / "cells.geojson")), "w") as outfile: + ujson.dump(geojson_list, outfile, indent=2) + + cell_dict_detection = { + "wsi_metadata": wsi.metadata, + "processed_patches": processed_patches, + "type_map": dataset_config["nuclei_types"], + "cells": cell_dict_detection, + } + with open(str(outdir / "cell_detection.json"), "w") as outfile: + ujson.dump(cell_dict_detection, outfile, indent=2) + if geojson: + self.logger.info("Converting detection to geojson") + geojson_list = self.convert_geojson(cell_dict_wsi["cells"], False) + with open(str(str(outdir / "cell_detection.geojson")), "w") as outfile: + ujson.dump(geojson_list, outfile, indent=2) + + self.logger.info( + f"Create cell graph with embeddings and save it under: {str(outdir / 'cells.pt')}" + ) + graph = CellGraphDataWSI( + x=torch.stack(graph_data["cell_tokens"]), + positions=torch.stack(graph_data["positions"]), + contours=graph_data["contours"], + metadata=graph_data["metadata"], + ) + torch.save(graph, outdir / "cells.pt") + + cell_stats_df = pd.DataFrame(cell_dict_wsi["cells"]) + cell_stats = dict(cell_stats_df.value_counts("type")) + nuclei_types_inverse = {v: k for k, v in nuclei_types.items()} + verbose_stats = {nuclei_types_inverse[k]: v for k, v in cell_stats.items()} + self.logger.info(f"Finished with cell detection for WSI {wsi.name}") + self.logger.info("Stats:") + self.logger.info(f"{verbose_stats}") + + def get_cell_predictions_with_tokens( + self, predictions: dict, magnification: int = 40 + ) -> Tuple[List[dict], torch.Tensor]: + """Take the raw predictions, apply softmax and calculate type instances + + Args: + predictions (dict): Network predictions with tokens. Keys: + magnification (int, optional): WSI magnification. Defaults to 40. + + Returns: + Tuple[List[dict], torch.Tensor]: + * List[dict]: List with a dictionary for each batch element with cell seg results + Contains bbox, contour, 2D-position, type and type_prob for each cell + * List[dict]: Network tokens on cpu device with shape (batch_size, num_tokens_h, num_tokens_w, embd_dim) + """ + predictions["nuclei_binary_map"] = F.softmax( + predictions["nuclei_binary_map"], dim=1 + ) # shape: (batch_size, 2, H, W) + predictions["nuclei_type_map"] = F.softmax( + predictions["nuclei_type_map"], dim=1 + ) # shape: (batch_size, num_nuclei_classes, H, W) + # get the instance types + ( + _, + instance_types, + ) = self.model.calculate_instance_map(predictions, magnification=magnification) + + tokens = predictions["tokens"].to("cpu") + + return instance_types, tokens + + def post_process_edge_cells(self, cell_list: List[dict]) -> List[int]: + """Use the CellPostProcessor to remove multiple cells and merge due to overlap + + Args: + cell_list (List[dict]): List with cell-dictionaries. Required keys: + * bbox + * centroid + * contour + * type_prob + * type + * patch_coordinates + * cell_status + * offset_global + + Returns: + List[int]: List with integers of cells that should be kept + """ + cell_processor = CellPostProcessor(cell_list, self.logger) + cleaned_cells = cell_processor.post_process_cells() + + return list(cleaned_cells.index.values) + + def convert_geojson( + self, cell_list: list[dict], polygons: bool = False + ) -> List[dict]: + """Convert a list of cells to a geojson object + + Either a segmentation object (polygon) or detection points are converted + + Args: + cell_list (list[dict]): Cell list with dict entry for each cell. + Required keys for detection: + * type + * centroid + Required keys for segmentation: + * type + * contour + polygons (bool, optional): If polygon segmentations (True) or detection points (False). Defaults to False. + + Returns: + List[dict]: Geojson like list + """ + if polygons: + cell_segmentation_df = pd.DataFrame(cell_list) + detected_types = sorted(cell_segmentation_df.type.unique()) + geojson_placeholder = [] + for cell_type in detected_types: + cells = cell_segmentation_df[cell_segmentation_df["type"] == cell_type] + contours = cells["contour"].to_list() + final_c = [] + for c in contours: + c.append(c[0]) + final_c.append([c]) + + cell_geojson_object = get_template_segmentation() + cell_geojson_object["id"] = str(uuid.uuid4()) + cell_geojson_object["geometry"]["coordinates"] = final_c + cell_geojson_object["properties"]["classification"][ + "name" + ] = TYPE_NUCLEI_DICT[cell_type] + cell_geojson_object["properties"]["classification"][ + "color" + ] = COLOR_DICT[cell_type] + geojson_placeholder.append(cell_geojson_object) + else: + cell_detection_df = pd.DataFrame(cell_list) + detected_types = sorted(cell_detection_df.type.unique()) + geojson_placeholder = [] + for cell_type in detected_types: + cells = cell_detection_df[cell_detection_df["type"] == cell_type] + centroids = cells["centroid"].to_list() + cell_geojson_object = get_template_point() + cell_geojson_object["id"] = str(uuid.uuid4()) + cell_geojson_object["geometry"]["coordinates"] = centroids + cell_geojson_object["properties"]["classification"][ + "name" + ] = TYPE_NUCLEI_DICT[cell_type] + cell_geojson_object["properties"]["classification"][ + "color" + ] = COLOR_DICT[cell_type] + geojson_placeholder.append(cell_geojson_object) + return geojson_placeholder + + +class CellPostProcessor: + def __init__(self, cell_list: List[dict], logger: logging.Logger) -> None: + """POst-Processing a list of cells from one WSI + + Args: + cell_list (List[dict]): List with cell-dictionaries. Required keys: + * bbox + * centroid + * contour + * type_prob + * type + * patch_coordinates + * cell_status + * offset_global + logger (logging.Logger): Logger + """ + self.logger = logger + self.logger.info("Initializing Cell-Postprocessor") + self.cell_df = pd.DataFrame(cell_list) + self.cell_df = self.cell_df.parallel_apply(convert_coordinates, axis=1) + + self.mid_cells = self.cell_df[ + self.cell_df["cell_status"] == 0 + ] # cells in the mid + self.cell_df_margin = self.cell_df[ + self.cell_df["cell_status"] != 0 + ] # cells either torching the border or margin + + def post_process_cells(self) -> pd.DataFrame: + """Main Post-Processing coordinator, entry point + + Returns: + pd.DataFrame: DataFrame with post-processed and cleaned cells + """ + self.logger.info("Finding edge-cells for merging") + cleaned_edge_cells = self._clean_edge_cells() + self.logger.info("Removal of cells detected multiple times") + cleaned_edge_cells = self._remove_overlap(cleaned_edge_cells) + + # merge with mid cells + postprocessed_cells = pd.concat( + [self.mid_cells, cleaned_edge_cells] + ).sort_index() + return postprocessed_cells + + def _clean_edge_cells(self) -> pd.DataFrame: + """Create a DataFrame that just contains all margin cells (cells inside the margin, not touching the border) + and border/edge cells (touching border) with no overlapping equivalent (e.g, if patch has no neighbour) + + Returns: + pd.DataFrame: Cleaned DataFrame + """ + + margin_cells = self.cell_df_margin[ + self.cell_df_margin["edge_position"] == 0 + ] # cells at the margin, but not touching the border + edge_cells = self.cell_df_margin[ + self.cell_df_margin["edge_position"] == 1 + ] # cells touching the border + existing_patches = list(set(self.cell_df_margin["patch_coordinates"].to_list())) + + edge_cells_unique = pd.DataFrame( + columns=self.cell_df_margin.columns + ) # cells torching the border without having an overlap from other patches + + for idx, cell_info in edge_cells.iterrows(): + edge_information = dict(cell_info["edge_information"]) + edge_patch = edge_information["edge_patches"][0] + edge_patch = f"{edge_patch[0]}_{edge_patch[1]}" + if edge_patch not in existing_patches: + edge_cells_unique.loc[idx, :] = cell_info + + cleaned_edge_cells = pd.concat([margin_cells, edge_cells_unique]) + + return cleaned_edge_cells.sort_index() + + def _remove_overlap(self, cleaned_edge_cells: pd.DataFrame) -> pd.DataFrame: + """Remove overlapping cells from provided DataFrame + + Args: + cleaned_edge_cells (pd.DataFrame): DataFrame that should be cleaned + + Returns: + pd.DataFrame: Cleaned DataFrame + """ + merged_cells = cleaned_edge_cells + + for iteration in range(20): + poly_list = [] + for idx, cell_info in merged_cells.iterrows(): + poly = Polygon(cell_info["contour"]) + if not poly.is_valid: + self.logger.debug("Found invalid polygon - Fixing with buffer 0") + multi = poly.buffer(0) + if isinstance(multi, MultiPolygon): + if len(multi) > 1: + poly_idx = np.argmax([p.area for p in multi]) + poly = multi[poly_idx] + poly = Polygon(poly) + else: + poly = multi[0] + poly = Polygon(poly) + else: + poly = Polygon(multi) + poly.uid = idx + poly_list.append(poly) + + # use an strtree for fast querying + tree = strtree.STRtree(poly_list) + + merged_idx = deque() + iterated_cells = set() + overlaps = 0 + + for query_poly in poly_list: + if query_poly.uid not in iterated_cells: + intersected_polygons = tree.query( + query_poly + ) # this also contains a self-intersection + if ( + len(intersected_polygons) > 1 + ): # we have more at least one intersection with another cell + submergers = [] # all cells that overlap with query + for inter_poly in intersected_polygons: + if ( + inter_poly.uid != query_poly.uid + and inter_poly.uid not in iterated_cells + ): + if ( + query_poly.intersection(inter_poly).area + / query_poly.area + > 0.01 + or query_poly.intersection(inter_poly).area + / inter_poly.area + > 0.01 + ): + overlaps = overlaps + 1 + submergers.append(inter_poly) + iterated_cells.add(inter_poly.uid) + # catch block: empty list -> some cells are touching, but not overlapping strongly enough + if len(submergers) == 0: + merged_idx.append(query_poly.uid) + else: # merging strategy: take the biggest cell, other merging strategies needs to get implemented + selected_poly_index = np.argmax( + np.array([p.area for p in submergers]) + ) + selected_poly_uid = submergers[selected_poly_index].uid + merged_idx.append(selected_poly_uid) + else: + # no intersection, just add + merged_idx.append(query_poly.uid) + iterated_cells.add(query_poly.uid) + + self.logger.info( + f"Iteration {iteration}: Found overlap of # cells: {overlaps}" + ) + if overlaps == 0: + self.logger.info("Found all overlapping cells") + break + elif iteration == 20: + self.logger.info( + f"Not all doubled cells removed, still {overlaps} to remove. For perfomance issues, we stop iterations now. Please raise an issue in git or increase number of iterations." + ) + merged_cells = cleaned_edge_cells.loc[ + cleaned_edge_cells.index.isin(merged_idx) + ].sort_index() + + return merged_cells.sort_index() + + +def convert_coordinates(row: pd.Series) -> pd.Series: + """Convert a row from x,y type to one string representation of the patch position for fast querying + Repr: x_y + + Args: + row (pd.Series): Row to be processed + + Returns: + pd.Series: Processed Row + """ + x, y = row["patch_coordinates"] + row["patch_row"] = x + row["patch_col"] = y + row["patch_coordinates"] = f"{x}_{y}" + return row + + +def get_cell_position(bbox: np.ndarray, patch_size: int = 1024) -> List[int]: + """Get cell position as a list + + Entry is 1, if cell touches the border: [top, right, down, left] + + Args: + bbox (np.ndarray): Bounding-Box of cell + patch_size (int, optional): Patch-size. Defaults to 1024. + + Returns: + List[int]: List with 4 integers for each position + """ + # bbox = 2x2 array in h, w style + # bbox[0,0] = upper position (height) + # bbox[1,0] = lower dimension (height) + # boox[0,1] = left position (width) + # bbox[1,1] = right position (width) + # bbox[:,0] -> x dimensions + top, left, down, right = False, False, False, False + if bbox[0, 0] == 0: + top = True + if bbox[0, 1] == 0: + left = True + if bbox[1, 0] == patch_size: + down = True + if bbox[1, 1] == patch_size: + right = True + position = [top, right, down, left] + position = [int(pos) for pos in position] + + return position + + +def get_cell_position_marging( + bbox: np.ndarray, patch_size: int = 1024, margin: int = 64 +) -> int: + """Get the status of the cell, describing the cell position + + A cell is either in the mid (0) or at one of the borders (1-8) + + # Numbers are assigned clockwise, starting from top left + # i.e., top left = 1, top = 2, top right = 3, right = 4, bottom right = 5 bottom = 6, bottom left = 7, left = 8 + # Mid status is denoted by 0 + + Args: + bbox (np.ndarray): Bounding Box of cell + patch_size (int, optional): Patch-Size. Defaults to 1024. + margin (int, optional): Margin-Size. Defaults to 64. + + Returns: + int: Cell Status + """ + cell_status = None + if np.max(bbox) > patch_size - margin or np.min(bbox) < margin: + if bbox[0, 0] < margin: + # top left, top or top right + if bbox[0, 1] < margin: + # top left + cell_status = 1 + elif bbox[1, 1] > patch_size - margin: + # top right + cell_status = 3 + else: + # top + cell_status = 2 + elif bbox[1, 1] > patch_size - margin: + # top right, right or bottom right + if bbox[1, 0] > patch_size - margin: + # bottom right + cell_status = 5 + else: + # right + cell_status = 4 + elif bbox[1, 0] > patch_size - margin: + # bottom right, bottom, bottom left + if bbox[0, 1] < margin: + # bottom left + cell_status = 7 + else: + # bottom + cell_status = 6 + elif bbox[0, 1] < margin: + # bottom left, left, top left, but only left is left + cell_status = 8 + else: + cell_status = 0 + + return cell_status + + +def get_edge_patch(position, row, col): + # row starting on bottom or on top? + if position == [1, 0, 0, 0]: + # top + return [[row - 1, col]] + if position == [1, 1, 0, 0]: + # top and right + return [[row - 1, col], [row - 1, col + 1], [row, col + 1]] + if position == [0, 1, 0, 0]: + # right + return [[row, col + 1]] + if position == [0, 1, 1, 0]: + # right and down + return [[row, col + 1], [row + 1, col + 1], [row + 1, col]] + if position == [0, 0, 1, 0]: + # down + return [[row + 1, col]] + if position == [0, 0, 1, 1]: + # down and left + return [[row + 1, col], [row + 1, col - 1], [row, col - 1]] + if position == [0, 0, 0, 1]: + # left + return [[row, col - 1]] + if position == [1, 0, 0, 1]: + # left and top + return [[row, col - 1], [row - 1, col - 1], [row - 1, col]] + + +# CLI +class InferenceWSIParser: + """Parser""" + + def __init__(self) -> None: + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + description="Perform CellViT inference for given run-directory with model checkpoints and logs. Just for CellViT, not for StarDist models", + ) + requiredNamed = parser.add_argument_group("required named arguments") + requiredNamed.add_argument( + "--model", + type=str, + help="Model checkpoint file that is used for inference", + required=True, + ) + parser.add_argument( + "--gpu", type=int, help="Cuda-GPU ID for inference. Default: 0", default=0 + ) + parser.add_argument( + "--magnification", + type=float, + help="Network magnification. Is used for checking patch magnification such that we use the correct resolution for network. Default: 40", + default=40, + ) + parser.add_argument( + "--enforce_amp", + action="store_true", + help="Whether to use mixed precision for inference (enforced). Otherwise network default training settings are used." + " Default: False", + ) + parser.add_argument( + "--batch_size", + type=int, + help="Inference batch-size. Default: 8", + default=8, + ) + parser.add_argument( + "--outdir_subdir", + type=str, + help="If provided, a subdir with the given name is created in the cell_detection folder where the results are stored. Default: None", + default=None, + ) + parser.add_argument( + "--geojson", + action="store_true", + help="Set this flag to export results as additional geojson files for loading them into Software like QuPath.", + ) + + # subparsers for either loading a WSI or a WSI folder + + # WSI + subparsers = parser.add_subparsers( + dest="command", + description="Main run command for either performing inference on single WSI-file or on whole dataset", + ) + subparser_wsi = subparsers.add_parser( + "process_wsi", description="Process a single WSI file" + ) + subparser_wsi.add_argument( + "--wsi_path", + type=str, + help="Path to WSI file", + ) + subparser_wsi.add_argument( + "--patched_slide_path", + type=str, + help="Path to patched WSI file (specific WSI file, not parent path of patched slide dataset)", + ) + + # Dataset + subparser_dataset = subparsers.add_parser( + "process_dataset", + description="Process a whole dataset", + ) + subparser_dataset.add_argument( + "--wsi_paths", type=str, help="Path to the folder where all WSI are stored" + ) + subparser_dataset.add_argument( + "--patch_dataset_path", + type=str, + help="Path to the folder where the patch dataset is stored", + ) + subparser_dataset.add_argument( + "--filelist", + type=str, + help="Filelist with WSI to process. Must be a .csv file with one row denoting the filenames (named 'Filename')." + "If not provided, all WSI files with given ending in the filelist are processed.", + default=None, + ) + subparser_dataset.add_argument( + "--wsi_extension", + type=str, + help="The extension types used for the WSI files, see configs.python.config (WSI_EXT)", + default="svs", + ) + + self.parser = parser + + def parse_arguments(self) -> dict: + opt = self.parser.parse_args() + return vars(opt) + + +def check_wsi(wsi: WSI, magnification: float = 40.0): + """Check if provided patched WSI is having the right settings + + Args: + wsi (WSI): WSI to check + magnification (float, optional): Check magnification. Defaults to 40.0. + + Raises: + RuntimeError: The magnification is not matching to the network input magnification. + RuntimeError: The patch-size is not devisible by 256. + RunTimeError: The patch-size is not 256 + RunTimeError: The overlap is not 64px sized + """ + if wsi.metadata["magnification"] is not None: + patch_magnification = float(wsi.metadata["magnification"]) + else: + patch_magnification = float( + float(wsi.metadata["base_magnification"]) / wsi.metadata["downsampling"] + ) + patch_size = int(wsi.metadata["patch_size"]) + + if patch_magnification != magnification: + raise RuntimeError( + "The magnification is not matching to the network input magnification." + ) + if (patch_size % 256) != 0: + raise RuntimeError("The patch-size must be devisible by 256.") + if wsi.metadata["patch_size"] != 256: + raise RuntimeError("The patch-size must be 256.") + if wsi.metadata["patch_overlap"] != 64: + raise RuntimeError("The patch-overlap must be 64") + + +if __name__ == "__main__": + configuration_parser = InferenceWSIParser() + configuration = configuration_parser.parse_arguments() + command = configuration["command"] + + cell_segmentation = CellSegmentationInference( + model_path=configuration["model"], + gpu=configuration["gpu"], + enforce_mixed_precision=configuration["enforce_amp"], + ) + + if command.lower() == "process_wsi": + cell_segmentation.logger.info("Processing single WSI file") + wsi_path = Path(configuration["wsi_path"]) + wsi_name = wsi_path.stem + wsi_file = WSI( + name=wsi_name, + patient=wsi_name, + slide_path=wsi_path, + patched_slide_path=configuration["patched_slide_path"], + ) + check_wsi(wsi=wsi_file, magnification=configuration["magnification"]) + cell_segmentation.process_wsi( + wsi_file, + subdir_name=configuration["outdir_subdir"], + geojson=configuration["geojson"], + batch_size=configuration["batch_size"], + ) + + elif command.lower() == "process_dataset": + cell_segmentation.logger.info("Processing whole dataset") + if configuration["filelist"] is not None: + if Path(configuration["filelist"]).suffix != ".csv": + raise ValueError("Filelist must be a .csv file!") + cell_segmentation.logger.info( + f"Loading files from filelist {configuration['filelist']}" + ) + wsi_filelist = load_wsi_files_from_csv( + csv_path=configuration["filelist"], + wsi_extension=configuration["wsi_extension"], + ) + wsi_filelist = [ + Path(configuration["wsi_paths"]) / f + if configuration["wsi_paths"] not in f + else Path(f) + for f in wsi_filelist + ] + else: + cell_segmentation.logger.info( + f"Loading all files from folder {configuration['wsi_paths']}. No filelist provided." + ) + wsi_filelist = [ + f + for f in sorted( + Path(configuration["wsi_paths"]).glob( + f"**/*.{configuration['wsi_extension']}" + ) + ) + ] + for i, wsi_path in enumerate(wsi_filelist): + wsi_path = Path(wsi_path) + wsi_name = wsi_path.stem + patched_slide_path = Path(configuration["patch_dataset_path"]) / wsi_name + cell_segmentation.logger.info(f"File {i+1}/{len(wsi_filelist)}: {wsi_name}") + wsi_file = WSI( + name=wsi_name, + patient=wsi_name, + slide_path=wsi_path, + patched_slide_path=patched_slide_path, + ) + check_wsi(wsi=wsi_file, magnification=configuration["magnification"]) + cell_segmentation.process_wsi( + wsi_file, + subdir_name=configuration["outdir_subdir"], + geojson=configuration["geojson"], + batch_size=configuration["batch_size"], + ) diff --git a/cell_segmentation/inference/cell_detection_mp.py b/cell_segmentation/inference/cell_detection_mp.py new file mode 100644 index 0000000000000000000000000000000000000000..12259b6e2bbc5a0ec7dfd3243ab217502a76b566 --- /dev/null +++ b/cell_segmentation/inference/cell_detection_mp.py @@ -0,0 +1,1527 @@ +# -*- coding: utf-8 -*- +# CellViT Inference Method for Patch-Wise Inference on a patches test set/Whole WSI +# +# Detect Cells with our Networks +# Patches dataset needs to have the follwoing requirements: +# Patch-Size must be 1024, with overlap of 64 +# +# We provide preprocessing code here: ./preprocessing/patch_extraction/main_extraction.py +# +# @ Fabian Hörst, fabian.hoerst@uk-essen.de +# Institute for Artifical Intelligence in Medicine, +# University Medicine Essen +# @ Erik Ylipää, erik.ylipaa@gmail.com +# Linköping University +# Luleå, Sweden + + +from dataclasses import dataclass +from functools import partial +import inspect +from io import BytesIO +import os +import queue +import sys +import multiprocessing +from multiprocessing.pool import ThreadPool +import zipfile +from time import sleep + +currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) +parentdir = os.path.dirname(currentdir) +sys.path.insert(0, parentdir) +parentdir = os.path.dirname(parentdir) +sys.path.insert(0, parentdir) + +from cellvit.cell_segmentation.utils.post_proc import DetectionCellPostProcessor + + +import argparse +import logging +import uuid +import warnings +from collections import defaultdict, deque +from pathlib import Path +from typing import Dict, List, Literal, OrderedDict, Tuple, Union, Callable + +import numpy as np +import pandas as pd +import torch +import torch.nn.functional as F +import tqdm +import ujson +from einops import rearrange + +# from PIL import Image +from shapely import strtree +from shapely.errors import ShapelyDeprecationWarning +from shapely.geometry import Polygon, MultiPolygon + + +# from skimage.color import rgba2rgb +from torch.utils.data import DataLoader, Dataset +from torchvision import transforms as T +#from torch.profiler import profile, record_function, ProfilerActivity + +from cellvit.cell_segmentation.datasets.cell_graph_datamodel import CellGraphDataWSI +from cellvit.cell_segmentation.utils.template_geojson import ( + get_template_point, + get_template_segmentation, +) +from cellvit.datamodel.wsi_datamodel import WSI +from cellvit.models.segmentation.cell_segmentation.cellvit import ( + CellViT, + CellViT256, + CellViT256Unshared, + CellViTSAM, + CellViTSAMUnshared, + CellViTUnshared, +) +from cellvit.preprocessing.encoding.datasets.patched_wsi_inference import PatchedWSIInference +from cellvit.utils.file_handling import load_wsi_files_from_csv +from cellvit.utils.logger import Logger +from cellvit.utils.tools import unflatten_dict + +warnings.filterwarnings("ignore", category=ShapelyDeprecationWarning) +#pandarallel.initialize(progress_bar=False, nb_workers=12) + + + +# color setup +COLOR_DICT = { + 1: [255, 0, 0], + 2: [34, 221, 77], + 3: [35, 92, 236], + 4: [254, 255, 0], + 5: [255, 159, 68], +} + +TYPE_NUCLEI_DICT = { + 1: "Neoplastic", + 2: "Inflammatory", + 3: "Connective", + 4: "Dead", + 5: "Epithelial", +} + +# This file will be used to indicate that a image has been processed +FLAG_FILE_NAME = ".cell_detection_done" + +def load_wsi(wsi_path, overwrite=False): + try: + wsi_name = wsi_path.stem + patched_slide_path = Path(configuration["patch_dataset_path"]) / wsi_name + flag_file_path = patched_slide_path / "cell_detection" / FLAG_FILE_NAME + if not overwrite and flag_file_path.exists(): + return + wsi_file = WSI( + name=wsi_name, + patient=wsi_name, + slide_path=wsi_path, + patched_slide_path=patched_slide_path, + ) + check_wsi(wsi=wsi_file, magnification=configuration["magnification"]) + return wsi_file + except BaseException as e: + e.wsi_file = wsi_path + return e + + +class InferenceWSIDataset(Dataset): + def __init__(self, wsi_filelist, n_workers: int = 0, overwrite=False, transform: Callable = None): + self.wsi_files = [] + + # This index will contain a repeat of all the wsi objects the number of + # patches they have. This means that it will be as long as the total number + # of patches in all WSI files. One can simply get the desired patch by + # subscripting into this list to get the correct WSI file object and + # pertinent metadata + self.wsi_index = [] + self.transform = transform + + pb = tqdm.trange(len(wsi_filelist), desc='Loading WSI file list') + already_processed_files = [] + if n_workers > 0: + #Since this is mostly and IO-bound task, we use a thread pool + #with multiprocessing.Pool(n_workers) as pool: + with ThreadPool(n_workers) as pool: + load_wsi_partial = partial(load_wsi, overwrite=overwrite) + for wsi_file in pool.imap(load_wsi_partial, wsi_filelist): + if isinstance(wsi_file, BaseException): + logging.warn(f"Could not load file {wsi_file.wsi_file}, caught exception {str(wsi_file)}") + elif wsi_file is None: + already_processed_files.append(wsi_file) + else: + self.wsi_files.append(wsi_file) + n_patches = wsi_file.get_number_patches() + indexing_info = [(wsi_file, i) for i in range(n_patches)] + self.wsi_index.extend(indexing_info) + pb.update() + else: + for wsi_file_path in wsi_filelist: + wsi_file = load_wsi(wsi_file_path, overwrite) + if isinstance(wsi_file, BaseException): + logging.warn(f"Could not load file {wsi_file.wsi_file}, caught exception {str(wsi_file)}") + elif wsi_file is None: + already_processed_files.append(wsi_file) + else: + self.wsi_files.append(wsi_file) + n_patches = wsi_file.get_number_patches() + indexing_info = [(wsi_file, i) for i in range(n_patches)] + self.wsi_index.extend(indexing_info) + pb.update() + + + def __len__(self): + return len(self.wsi_index) + + def __getitem__(self, item): + wsi_file, local_idx = self.wsi_index[item] + patch, metadata = wsi_file.get_patch(local_idx, self.transform) + return patch, local_idx, wsi_file, metadata + + def get_n_files(self): + return len(self.wsi_files) + + +def wsi_patch_collator(batch): + patches, local_idx, wsi_file, metadata = zip(*batch) # Transpose the batch + patches = torch.stack(patches) + return patches, local_idx, wsi_file, metadata + + +def f_post_processing_worker(wsi_file, wsi_work_list, postprocess_arguments): + local_idxs, predictions_records, metadata = zip(*wsi_work_list) + # Merge the prediction records into a single dictionary again. + predictions = defaultdict(list) + for record in predictions_records: + for k,v in record.items(): + predictions[k].append(v) + predictions_stacked = {k: torch.stack(v).to(torch.float32) for k,v in predictions.items()} + postprocess_predictions(predictions_stacked, metadata, wsi_file, postprocess_arguments) + + +@dataclass +class PostprocessArguments: + n_images: int + num_nuclei_classes: int + dataset_config: Dict + overlap: int + patch_size: int + geojson: bool + subdir_name: str + logger: Logger + n_workers: int = 0 + wait_time: float = 2. + + +def postprocess_predictions(predictions, metadata, wsi, postprocessing_args: PostprocessArguments): + # logger = postprocessing_args.logger + logger = logging.getLogger() + num_nuclei_classes = postprocessing_args.num_nuclei_classes + dataset_config = postprocessing_args.dataset_config + overlap = postprocessing_args.overlap + patch_size = postprocessing_args.patch_size + geojson = postprocessing_args.geojson + subdir_name = postprocessing_args.subdir_name + + if subdir_name is not None: + outdir = Path(wsi.patched_slide_path) / "cell_detection" / subdir_name + else: + outdir = Path(wsi.patched_slide_path) / "cell_detection" + outdir.mkdir(exist_ok=True, parents=True) + + outfile = outdir / "cell_detection.zip" + + instance_types, tokens = get_cell_predictions_with_tokens(num_nuclei_classes, + predictions, magnification=wsi.metadata["magnification"] + ) + + processed_patches = [] + # unpack each patch from batch + cell_dict_wsi = [] # for storing all cell information + cell_dict_detection = [] # for storing only the centroids + nuclei_types = dataset_config["nuclei_types"] + + graph_data = { + "cell_tokens": [], + "positions": [], + "contours": [], + "metadata": {"wsi_metadata": wsi.metadata, "nuclei_types": nuclei_types}, + } + + for idx, (patch_instance_types, patch_metadata) in enumerate( + zip(instance_types, metadata) + ): + # add global patch metadata + patch_cell_detection = {} + patch_cell_detection["patch_metadata"] = patch_metadata + patch_cell_detection["type_map"] = dataset_config["nuclei_types"] + + processed_patches.append( + f"{patch_metadata['row']}_{patch_metadata['col']}" + ) + + # calculate coordinate on highest magnifications + # wsi_scaling_factor = patch_metadata["wsi_metadata"]["downsampling"] + # patch_size = patch_metadata["wsi_metadata"]["patch_size"] + wsi_scaling_factor = wsi.metadata["downsampling"] + patch_size = wsi.metadata["patch_size"] + x_global = int( + patch_metadata["row"] * patch_size * wsi_scaling_factor + - (patch_metadata["row"] + 0.5) * overlap + ) + y_global = int( + patch_metadata["col"] * patch_size * wsi_scaling_factor + - (patch_metadata["col"] + 0.5) * overlap + ) + + # extract cell information + for cell in patch_instance_types.values(): + if cell["type"] == nuclei_types["Background"]: + continue + offset_global = np.array([x_global, y_global]) + centroid_global = cell["centroid"] + np.flip(offset_global) + contour_global = cell["contour"] + np.flip(offset_global) + bbox_global = cell["bbox"] + offset_global + cell_dict = { + "bbox": bbox_global.tolist(), + "centroid": centroid_global.tolist(), + "contour": contour_global.tolist(), + "type_prob": cell["type_prob"], + "type": cell["type"], + "patch_coordinates": [ + patch_metadata["row"], + patch_metadata["col"], + ], + "cell_status": get_cell_position_marging( + cell["bbox"], 1024, 64 + ), + "offset_global": offset_global.tolist() + # optional: Local positional information + # "bbox_local": cell["bbox"].tolist(), + # "centroid_local": cell["centroid"].tolist(), + # "contour_local": cell["contour"].tolist(), + } + cell_detection = { + "bbox": bbox_global.tolist(), + "centroid": centroid_global.tolist(), + "type": cell["type"], + } + if np.max(cell["bbox"]) == 1024 or np.min(cell["bbox"]) == 0: + position = get_cell_position(cell["bbox"], 1024) + cell_dict["edge_position"] = True + cell_dict["edge_information"] = {} + cell_dict["edge_information"]["position"] = position + cell_dict["edge_information"][ + "edge_patches" + ] = get_edge_patch( + position, patch_metadata["row"], patch_metadata["col"] + ) + else: + cell_dict["edge_position"] = False + + cell_dict_wsi.append(cell_dict) + cell_dict_detection.append(cell_detection) + + # get the cell token + bb_index = cell["bbox"] / patch_size + bb_index[0, :] = np.floor(bb_index[0, :]) + bb_index[1, :] = np.ceil(bb_index[1, :]) + bb_index = bb_index.astype(np.uint8) + cell_token = tokens[ + idx, + bb_index[0, 1] : bb_index[1, 1], + bb_index[0, 0] : bb_index[1, 0], + :, + ] + cell_token = torch.mean( + rearrange(cell_token, "H W D -> (H W) D"), dim=0 + ) + + graph_data["cell_tokens"].append(cell_token) + graph_data["positions"].append(torch.Tensor(centroid_global)) + graph_data["contours"].append(torch.Tensor(contour_global)) + + # post processing + logger.info(f"Detected cells before cleaning: {len(cell_dict_wsi)}") + keep_idx = post_process_edge_cells(cell_list=cell_dict_wsi, logger=logger) + cell_dict_wsi = [cell_dict_wsi[idx_c] for idx_c in keep_idx] + cell_dict_detection = [cell_dict_detection[idx_c] for idx_c in keep_idx] + graph_data["cell_tokens"] = [ + graph_data["cell_tokens"][idx_c] for idx_c in keep_idx + ] + graph_data["positions"] = [graph_data["positions"][idx_c] for idx_c in keep_idx] + graph_data["contours"] = [graph_data["contours"][idx_c] for idx_c in keep_idx] + logger.info(f"Detected cells after cleaning: {len(keep_idx)}") + + logger.info( + f"Processed all patches. Storing final results: {str(outdir / f'cells.json')} and cell_detection.json" + ) + cell_dict_wsi = { + "wsi_metadata": wsi.metadata, + "processed_patches": processed_patches, + "type_map": dataset_config["nuclei_types"], + "cells": cell_dict_wsi, + } + + with zipfile.ZipFile(outfile, "w", compression=zipfile.ZIP_DEFLATED, compresslevel=9) as zf: + zf.writestr("cells.json", ujson.dumps(cell_dict_wsi, outfile, indent=2)) + + if geojson: + logger.info("Converting segmentation to geojson") + + geojson_list = convert_geojson(cell_dict_wsi["cells"], True) + zf.writestr("cells.geojson", ujson.dumps(geojson_list, outfile, indent=2)) + + cell_dict_detection = { + "wsi_metadata": wsi.metadata, + "processed_patches": processed_patches, + "type_map": dataset_config["nuclei_types"], + "cells": cell_dict_detection, + } + zf.writestr("cell_detection.json", ujson.dumps(cell_dict_detection, outfile, indent=2)) + if geojson: + logger.info("Converting detection to geojson") + geojson_list = convert_geojson(cell_dict_wsi["cells"], False) + zf.writestr("cell_detection.geojson", ujson.dumps(geojson_list, outfile, indent=2)) + + logger.info( + f"Create cell graph with embeddings and save it under: {str(outdir / 'cells.pt')}" + ) + graph = CellGraphDataWSI( + x=torch.stack(graph_data["cell_tokens"]), + positions=torch.stack(graph_data["positions"]), + contours=graph_data["contours"], + metadata=graph_data["metadata"], + ) + torch_bytes_io = BytesIO() + #torch.save(graph, outdir / "cells.pt") + torch.save(graph, torch_bytes_io) + zf.writestr("cells.pt", torch_bytes_io.getvalue()) + + flag_file = outdir / FLAG_FILE_NAME + flag_file.touch() + + cell_stats_df = pd.DataFrame(cell_dict_wsi["cells"]) + cell_stats = dict(cell_stats_df.value_counts("type")) + nuclei_types_inverse = {v: k for k, v in nuclei_types.items()} + verbose_stats = {nuclei_types_inverse[k]: v for k, v in cell_stats.items()} + logger.info(f"Finished with cell detection for WSI {wsi.name}") + logger.info("Stats:") + logger.info(f"{verbose_stats}") + + +def post_process_edge_cells(cell_list: List[dict], logger) -> List[int]: + """Use the CellPostProcessor to remove multiple cells and merge due to overlap + + Args: + cell_list (List[dict]): List with cell-dictionaries. Required keys: + * bbox + * centroid + * contour + * type_prob + * type + * patch_coordinates + * cell_status + * offset_global + + Returns: + List[int]: List with integers of cells that should be kept + """ + cell_processor = CellPostProcessor(cell_list, logger) + cleaned_cells_idx = cell_processor.post_process_cells() + + return sorted(cell_record["index"] for cell_record in cleaned_cells_idx) + + +def convert_geojson(cell_list: list[dict], polygons: bool = False) -> List[dict]: + """Convert a list of cells to a geojson object + + Either a segmentation object (polygon) or detection points are converted + + Args: + cell_list (list[dict]): Cell list with dict entry for each cell. + Required keys for detection: + * type + * centroid + Required keys for segmentation: + * type + * contour + polygons (bool, optional): If polygon segmentations (True) or detection points (False). Defaults to False. + + Returns: + List[dict]: Geojson like list + """ + if polygons: + cell_segmentation_df = pd.DataFrame(cell_list) + detected_types = sorted(cell_segmentation_df.type.unique()) + geojson_placeholder = [] + for cell_type in detected_types: + cells = cell_segmentation_df[cell_segmentation_df["type"] == cell_type] + contours = cells["contour"].to_list() + final_c = [] + for c in contours: + c.append(c[0]) + final_c.append([c]) + + cell_geojson_object = get_template_segmentation() + cell_geojson_object["id"] = str(uuid.uuid4()) + cell_geojson_object["geometry"]["coordinates"] = final_c + cell_geojson_object["properties"]["classification"][ + "name" + ] = TYPE_NUCLEI_DICT[cell_type] + cell_geojson_object["properties"]["classification"][ + "color" + ] = COLOR_DICT[cell_type] + geojson_placeholder.append(cell_geojson_object) + else: + cell_detection_df = pd.DataFrame(cell_list) + detected_types = sorted(cell_detection_df.type.unique()) + geojson_placeholder = [] + for cell_type in detected_types: + cells = cell_detection_df[cell_detection_df["type"] == cell_type] + centroids = cells["centroid"].to_list() + cell_geojson_object = get_template_point() + cell_geojson_object["id"] = str(uuid.uuid4()) + cell_geojson_object["geometry"]["coordinates"] = centroids + cell_geojson_object["properties"]["classification"][ + "name" + ] = TYPE_NUCLEI_DICT[cell_type] + cell_geojson_object["properties"]["classification"][ + "color" + ] = COLOR_DICT[cell_type] + geojson_placeholder.append(cell_geojson_object) + return geojson_placeholder + + +def calculate_instance_map(num_nuclei_classes: int, predictions: OrderedDict, magnification: Literal[20, 40] = 40 + ) -> Tuple[torch.Tensor, List[dict]]: + """Calculate Instance Map from network predictions (after Softmax output) + + Args: + predictions (dict): Dictionary with the following required keys: + * nuclei_binary_map: Binary Nucleus Predictions. Shape: (batch_size, H, W, 2) + * nuclei_type_map: Type prediction of nuclei. Shape: (batch_size, H, W, 6) + * hv_map: Horizontal-Vertical nuclei mapping. Shape: (batch_size, H, W, 2) + magnification (Literal[20, 40], optional): Which magnification the data has. Defaults to 40. + + Returns: + Tuple[torch.Tensor, List[dict]]: + * torch.Tensor: Instance map. Each Instance has own integer. Shape: (batch_size, H, W) + * List of dictionaries. Each List entry is one image. Each dict contains another dict for each detected nucleus. + For each nucleus, the following information are returned: "bbox", "centroid", "contour", "type_prob", "type" + """ + cell_post_processor = DetectionCellPostProcessor(nr_types=num_nuclei_classes, magnification=magnification, gt=False) + instance_preds = [] + type_preds = [] + max_nuclei_type_predictions = predictions["nuclei_type_map"].argmax(dim=-1, keepdims=True).detach() + max_nuclei_type_predictions = max_nuclei_type_predictions.cpu() # This is a costly operation because this map is rather large + max_nuclei_location_predictions = predictions["nuclei_binary_map"].argmax(dim=-1, keepdims=True).detach().cpu() + + for i in range(predictions["nuclei_binary_map"].shape[0]): + # Broke this out to profile better + pred_map = np.concatenate( + [ + max_nuclei_type_predictions[i], + max_nuclei_location_predictions[i], + predictions["hv_map"][i].detach().cpu(), + ], + axis=-1, + ) + instance_pred = cell_post_processor.post_process_cell_segmentation(pred_map) + instance_preds.append(instance_pred[0]) + type_preds.append(instance_pred[1]) + + return torch.Tensor(np.stack(instance_preds)), type_preds + + +def get_cell_predictions_with_tokens(num_nuclei_classes: int, + predictions: dict, magnification: int = 40 + ) -> Tuple[List[dict], torch.Tensor]: + """Take the raw predictions, apply softmax and calculate type instances + + Args: + predictions (dict): Network predictions with tokens. Keys: + magnification (int, optional): WSI magnification. Defaults to 40. + + Returns: + Tuple[List[dict], torch.Tensor]: + * List[dict]: List with a dictionary for each batch element with cell seg results + Contains bbox, contour, 2D-position, type and type_prob for each cell + * List[dict]: Network tokens on cpu device with shape (batch_size, num_tokens_h, num_tokens_w, embd_dim) + """ + predictions["nuclei_binary_map"] = F.softmax( + predictions["nuclei_binary_map"], dim=-1 + ) + predictions["nuclei_type_map"] = F.softmax( + predictions["nuclei_type_map"], dim=-1 + ) + + # get the instance types + ( + _, + instance_types, + ) = calculate_instance_map(num_nuclei_classes, predictions, magnification=magnification) + # get the tokens + tokens = predictions["tokens"] + + return instance_types, tokens + + +class CellSegmentationInference: + def __init__( + self, + model_path: Union[Path, str], + gpu: int, + enforce_mixed_precision: bool = False, + ) -> None: + """Cell Segmentation Inference class. + + After setup, a WSI can be processed by calling process_wsi method + + Args: + model_path (Union[Path, str]): Path to model checkpoint + gpu (int): CUDA GPU id to use + enforce_mixed_precision (bool, optional): Using PyTorch autocasting with dtype float16 to speed up inference. Also good for trained amp networks. + Can be used to enforce amp inference even for networks trained without amp. Otherwise, the network setting is used. + Defaults to False. + """ + self.model_path = Path(model_path) + if gpu >= 0: + self.device = f"cuda:{gpu}" + else: + self.device = "cpu" + self.__instantiate_logger() + self.__load_model() + self.__load_inference_transforms() + self.__setup_amp(enforce_mixed_precision=enforce_mixed_precision) + + def __instantiate_logger(self) -> None: + """Instantiate logger + + Logger is using no formatters. Logs are stored in the run directory under the filename: inference.log + """ + logger = Logger( + level="INFO", + ) + self.logger = logger.create_logger() + + def __load_model(self) -> None: + """Load model and checkpoint and load the state_dict""" + self.logger.info(f"Loading model: {self.model_path}") + + model_checkpoint = torch.load(self.model_path, map_location="cpu") + + # unpack checkpoint + self.run_conf = unflatten_dict(model_checkpoint["config"], ".") + self.model = self.__get_model(model_type=model_checkpoint["arch"]) + self.logger.info( + self.model.load_state_dict(model_checkpoint["model_state_dict"]) + ) + + self.model.eval() + self.model.to(self.device) + + + def __get_model( + self, model_type: str + ) -> Union[ + CellViT, + CellViTUnshared, + CellViT256, + CellViTUnshared, + CellViTSAM, + CellViTSAMUnshared, + ]: + """Return the trained model for inference + + Args: + model_type (str): Name of the model. Must either be one of: + CellViT, CellViTUnshared, CellViT256, CellViT256Unshared, CellViTSAM, CellViTSAMUnshared + + Returns: + Union[CellViT, CellViTUnshared, CellViT256, CellViT256Unshared, CellViTSAM, CellViTSAMUnshared]: Model + """ + implemented_models = [ + "CellViT", + "CellViTUnshared", + "CellViT256", + "CellViT256Unshared", + "CellViTSAM", + "CellViTSAMUnshared", + ] + if model_type not in implemented_models: + raise NotImplementedError( + f"Unknown model type. Please select one of {implemented_models}" + ) + if model_type in ["CellViT", "CellViTUnshared"]: + if model_type == "CellViT": + model_class = CellViT + elif model_type == "CellViTUnshared": + model_class = CellViTUnshared + model = model_class( + num_nuclei_classes=self.run_conf["data"]["num_nuclei_classes"], + num_tissue_classes=self.run_conf["data"]["num_tissue_classes"], + embed_dim=self.run_conf["model"]["embed_dim"], + input_channels=self.run_conf["model"].get("input_channels", 3), + depth=self.run_conf["model"]["depth"], + num_heads=self.run_conf["model"]["num_heads"], + extract_layers=self.run_conf["model"]["extract_layers"], + ) + + elif model_type in ["CellViT256", "CellViT256Unshared"]: + if model_type == "CellViT256": + model_class = CellViT256 + elif model_type == "CellViTVIT256Unshared": + model_class = CellViT256Unshared + model = model_class( + model256_path=None, + num_nuclei_classes=self.run_conf["data"]["num_nuclei_classes"], + num_tissue_classes=self.run_conf["data"]["num_tissue_classes"], + ) + elif model_type in ["CellViTSAM", "CellViTSAMUnshared"]: + if model_type == "CellViTSAM": + model_class = CellViTSAM + elif model_type == "CellViTSAMUnshared": + model_class = CellViTSAMUnshared + model = model_class( + model_path=None, + num_nuclei_classes=self.run_conf["data"]["num_nuclei_classes"], + num_tissue_classes=self.run_conf["data"]["num_tissue_classes"], + vit_structure=self.run_conf["model"]["backbone"], + ) + return model + + def __load_inference_transforms(self): + """Load the inference transformations from the run_configuration""" + self.logger.info("Loading inference transformations") + + transform_settings = self.run_conf["transformations"] + if "normalize" in transform_settings: + mean = transform_settings["normalize"].get("mean", (0.5, 0.5, 0.5)) + std = transform_settings["normalize"].get("std", (0.5, 0.5, 0.5)) + else: + mean = (0.5, 0.5, 0.5) + std = (0.5, 0.5, 0.5) + self.inference_transforms = T.Compose( + [T.ToTensor(), T.Normalize(mean=mean, std=std)] + ) + + def __setup_amp(self, enforce_mixed_precision: bool = False) -> None: + """Setup automated mixed precision (amp) for inference. + + Args: + enforce_mixed_precision (bool, optional): Using PyTorch autocasting with dtype float16 to speed up inference. Also good for trained amp networks. + Can be used to enforce amp inference even for networks trained without amp. Otherwise, the network setting is used. + Defaults to False. + """ + if enforce_mixed_precision: + self.mixed_precision = enforce_mixed_precision + else: + self.mixed_precision = self.run_conf["training"].get( + "mixed_precision", False + ) + + def process_wsi( + self, + wsi: WSI, + subdir_name: str = None, + patch_size: int = 1024, + overlap: int = 64, + batch_size: int = 8, + geojson: bool = False, + ) -> None: + """Process WSI file + + Args: + wsi (WSI): WSI object + subdir_name (str, optional): If provided, a subdir with the given name is created in the cell_detection folder. + Helpful if you need to store different cell detection results next to each other. Defaults to None (no subdir). + patch_size (int, optional): Patch-Size. Default to 1024. + overlap (int, optional): Overlap between patches. Defaults to 64. + batch_size (int, optional): Batch-size for inference. Defaults to 8. + geosjon (bool, optional): If a geojson export should be performed. Defaults to False. + """ + self.logger.info(f"Processing WSI: {wsi.name}") + + wsi_inference_dataset = PatchedWSIInference( + wsi, transform=self.inference_transforms + ) + + num_workers = int(3 / 4 * os.cpu_count()) + if num_workers is None: + num_workers = 16 + num_workers = int(np.clip(num_workers, 1, 2 * batch_size)) + + wsi_inference_dataloader = DataLoader( + dataset=wsi_inference_dataset, + batch_size=batch_size, + num_workers=num_workers, + shuffle=False, + collate_fn=wsi_inference_dataset.collate_batch, + pin_memory=False, + ) + dataset_config = self.run_conf["dataset_config"] + nuclei_types = dataset_config["nuclei_types"] + + if subdir_name is not None: + outdir = Path(wsi.patched_slide_path) / "cell_detection" / subdir_name + else: + outdir = Path(wsi.patched_slide_path) / "cell_detection" + outdir.mkdir(exist_ok=True, parents=True) + + predicted_batches = [] + with torch.no_grad(): + for batch in tqdm.tqdm( + wsi_inference_dataloader, total=len(wsi_inference_dataloader) + ): + patches = batch[0].to(self.device) + + metadata = batch[1] + if self.mixed_precision: + with torch.autocast(device_type="cuda", dtype=torch.float16): + predictions_ = self.model(patches, retrieve_tokens=True) + else: + predictions_ = self.model(patches, retrieve_tokens=True) + # reshape, apply softmax to segmentation maps + #predictions = self.model.reshape_model_output(predictions_, self.device) + predictions = self.model.reshape_model_output(predictions_, 'cpu') + predicted_batches.append((predictions, metadata)) + + postprocess_predictions(predicted_batches, self.model.num_nuclei_classes, wsi, self.logger, dataset_config, overlap, patch_size, geojson, outdir) + + def process_wsi_filelist(self, + wsi_filelist, + subdir_name: str = None, + patch_size: int = 1024, + overlap: int = 64, + batch_size: int = 8, + torch_compile: bool = False, + geojson: bool = False, + n_postprocess_workers: int = 0, + n_dataloader_workers: int = 4, + overwrite: bool = False): + if torch_compile: + self.logger.info("Model will be compiled using torch.compile. First batch will take a lot more time to compute.") + self.model = torch.compile(self.model) + + dataset = InferenceWSIDataset(wsi_filelist, transform=self.inference_transforms, overwrite=overwrite, n_workers=n_postprocess_workers) + self.logger.info(f"Loaded dataset with {dataset.get_n_files()} images") + + dataloader = DataLoader(dataset, batch_size=batch_size, collate_fn=wsi_patch_collator, num_workers=n_dataloader_workers) + #with profile(activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA], record_shapes=True) as prof: + post_process_arguments = PostprocessArguments(n_images=dataset.get_n_files(), + num_nuclei_classes=self.model.num_nuclei_classes, + dataset_config=self.run_conf['dataset_config'], + overlap=overlap, + patch_size=patch_size, + geojson=geojson, + subdir_name=subdir_name, + n_workers=n_postprocess_workers, + logger=self.logger) + if n_postprocess_workers > 0: + self._process_wsi_filelist_multiprocessing(dataloader, + post_process_arguments) + else: + self._process_wsi_filelist_singleprocessing(dataloader, + post_process_arguments) + + + #print(prof.key_averages().table(sort_by="cpu_time_total", row_limit=10)) + + def _process_wsi_filelist_singleprocessing(self, + dataloader, + post_process_arguments): + wsi_work_map = {} + + with torch.no_grad(): + try: + for batch in tqdm.tqdm(dataloader, desc="Processing patches"): + patches, local_idxs, wsi_files, metadatas = batch + patches = patches.to(self.device) + + if self.mixed_precision: + with torch.autocast(device_type="cuda", dtype=torch.float16): + predictions_ = self.model(patches, retrieve_tokens=True) + else: + predictions_ = self.model(patches, retrieve_tokens=True) + # reshape, apply softmax to segmentation maps + #predictions = self.model.reshape_model_output(predictions_, self.device) + predictions = self.model.reshape_model_output(predictions_, 'cpu') + # We break out the predictions into records (one dict per patch instead of all patches in one dict) + prediction_records = [{k: v[i] for k,v in predictions.items()} for i in range(len(local_idxs))] + + for i, wsi_file in enumerate(wsi_files): + wsi_name = wsi_file.name + if wsi_name not in wsi_work_map: + wsi_work_map[wsi_name] = [] + (wsi_work_list) = wsi_work_map[wsi_name] + work_package = (local_idxs[i], prediction_records[i], metadatas[i]) + (wsi_work_list).append(work_package) + if len((wsi_work_list)) == wsi_file.get_number_patches(): + local_idxs, predictions_records, metadata = zip(*wsi_work_list) + # Merge the prediction records into a single dictionary again. + predictions = defaultdict(list) + for record in predictions_records: + for k,v in record.items(): + predictions[k].append(v) + predictions_stacked = {k: torch.stack(v).to(torch.float32) for k,v in predictions.items()} + postprocess_predictions(predictions_stacked, metadata, wsi_file, post_process_arguments) + del wsi_work_map[wsi_name] + + except KeyboardInterrupt: + pass + + def _process_wsi_filelist_multiprocessing(self, + dataloader, + post_process_arguments: PostprocessArguments): + + pbar_batches = tqdm.trange(len(dataloader), desc="Processing patch-batches") + pbar_postprocessing = tqdm.trange(post_process_arguments.n_images, desc="Postprocessed images") + + wsi_work_map = {} + + with torch.no_grad(): + with multiprocessing.Pool(post_process_arguments.n_workers) as pool: + try: + results = [] + + for batch in dataloader: + patches, local_idxs, wsi_files, metadatas = batch + patches = patches.to(self.device) + + if self.mixed_precision: + with torch.autocast(device_type="cuda", dtype=torch.float16): + predictions_ = self.model(patches, retrieve_tokens=True) + else: + predictions_ = self.model(patches, retrieve_tokens=True) + # reshape, apply softmax to segmentation maps + #predictions = self.model.reshape_model_output(predictions_, self.device) + predictions = self.model.reshape_model_output(predictions_, 'cpu') + pbar_batches.update() + + # We break out the predictions into records (one dict per patch instead of all patches in one dict) + prediction_records = [{k: v[i] for k,v in predictions.items()} for i in range(len(local_idxs))] + + for i, wsi_file in enumerate(wsi_files): + wsi_name = wsi_file.name + if wsi_name not in wsi_work_map: + wsi_work_map[wsi_name] = [] + wsi_work_list = wsi_work_map[wsi_name] + work_package = (local_idxs[i], prediction_records[i], metadatas[i]) + wsi_work_list.append(work_package) + if len((wsi_work_list)) == wsi_file.get_number_patches(): + while len(results) >= post_process_arguments.n_workers: + n_working = len(results) + results = [result for result in results if not result.ready()] + n_done = n_working - len(results) + pbar_postprocessing.update(n_done) + pbar_batches.set_description(f"Processing patch-batches (waiting on postprocessing workers)") + sleep(post_process_arguments.wait_time) + result = pool.apply_async(f_post_processing_worker, (wsi_file, wsi_work_list, post_process_arguments)) + pbar_batches.set_description(f"Processing patch-batches") + results.append(result) + del wsi_work_map[wsi_name] + self.logger.info("Model predictions done, waiting for postprocessing to finish.") + pool.close() + pool.join() + except KeyboardInterrupt: + pool.terminate() + pool.join() + + def get_cell_predictions_with_tokens( + self, predictions: dict, magnification: int = 40 + ) -> Tuple[List[dict], torch.Tensor]: + """Take the raw predictions, apply softmax and calculate type instances + + Args: + predictions (dict): Network predictions with tokens. Keys: + magnification (int, optional): WSI magnification. Defaults to 40. + + Returns: + Tuple[List[dict], torch.Tensor]: + * List[dict]: List with a dictionary for each batch element with cell seg results + Contains bbox, contour, 2D-position, type and type_prob for each cell + * List[dict]: Network tokens on cpu device with shape (batch_size, num_tokens_h, num_tokens_w, embd_dim) + """ + predictions["nuclei_binary_map"] = F.softmax( + predictions["nuclei_binary_map"], dim=-1 + ) + predictions["nuclei_type_map"] = F.softmax( + predictions["nuclei_type_map"], dim=-1 + ) + + # get the instance types + ( + _, + instance_types, + ) = calculate_instance_map(self.model.num_nuclei_classes, predictions, magnification=magnification) + # get the tokens + tokens = predictions["tokens"].to("cpu") + + return instance_types, tokens + + def post_process_edge_cells(self, cell_list: List[dict]) -> List[int]: + """Use the CellPostProcessor to remove multiple cells and merge due to overlap + + Args: + cell_list (List[dict]): List with cell-dictionaries. Required keys: + * bbox + * centroid + * contour + * type_prob + * type + * patch_coordinates + * cell_status + * offset_global + + Returns: + List[int]: List with integers of cells that should be kept + """ + cell_processor = CellPostProcessor(cell_list, self.logger) + cleaned_cells = cell_processor.post_process_cells() + + return list(cleaned_cells.index.values) + + +class CellPostProcessor: + def __init__(self, cell_list: List[dict], logger: logging.Logger) -> None: + """POst-Processing a list of cells from one WSI + + Args: + cell_list (List[dict]): List with cell-dictionaries. Required keys: + * bbox + * centroid + * contour + * type_prob + * type + * patch_coordinates + * cell_status + * offset_global + logger (logging.Logger): Logger + """ + self.logger = logger + self.logger.info("Initializing Cell-Postprocessor") + + for index, cell_dict in enumerate(cell_list): + # TODO: Shouldn't it be the other way around? Column = x, Row = Y + x,y = cell_dict["patch_coordinates"] + cell_dict["patch_row"] = x + cell_dict["patch_col"] = y + cell_dict["patch_coordinates"] = f"{x}_{y}" + cell_dict["index"] = index + + #self.cell_df = pd.DataFrame(cell_list) + self.cell_records = cell_list + + #xs, ys = zip(*self.cell_df["patch_coordinates"]) + + #self.cell_df["patch_row"] = xs + #self.cell_df["patch_col"] = ys + #self.cell_df["patch_coordinates"] = [f"{x}_{y}" for x,y in zip(xs, ys)] + # The call to DataFrame.apply below was exceedingly slow, the list comprehension above is _much_ faster + #self.cell_df = self.cell_df.apply(convert_coordinates, axis=1) + self.mid_cells = [cell_record for cell_record in self.cell_records if cell_record["cell_status"] == 0] + self.margin_cells = [cell_record for cell_record in self.cell_records if cell_record["cell_status"] != 0] + + def post_process_cells(self) -> List[Dict]: + """Main Post-Processing coordinator, entry point + + Returns: + List[Dict]: List of records (dictionaries) with post-processed and cleaned cells + """ + self.logger.info("Finding edge-cells for merging") + cleaned_edge_cells = self._clean_edge_cells() + self.logger.info("Removal of cells detected multiple times") + cleaned_edge_cells = self._remove_overlap(cleaned_edge_cells) + + # merge with mid cells + postprocessed_cells = self.mid_cells + cleaned_edge_cells + + return postprocessed_cells + + def _clean_edge_cells(self) -> List[Dict]: + """Create a record list that just contains all margin cells (cells inside the margin, not touching the border) + and border/edge cells (touching border) with no overlapping equivalent (e.g, if patch has no neighbour) + + Returns: + List[Dict]: Cleaned record list + """ + + margin_cells = [record for record in self.cell_records if record["edge_position"] == 0] + edge_cells = [record for record in self.cell_records if record["edge_position"] == 1] + + existing_patches = list(set(record["patch_coordinates"] for record in self.margin_cells)) + + edge_cells_unique = [] + + for record in edge_cells: + edge_information = record["edge_information"] + edge_patch = edge_information["edge_patches"][0] + edge_patch = f"{edge_patch[0]}_{edge_patch[1]}" + if edge_patch not in existing_patches: + edge_cells_unique.append(record) + + cleaned_edge_cells = margin_cells + edge_cells_unique + + return cleaned_edge_cells + + def _remove_overlap(self, cleaned_edge_cells: List[Dict]) -> List[Dict]: + """Remove overlapping cells from provided cell record list + + Args: + cleaned_edge_cells (List[Dict]): List[Dict] that should be cleaned + + Returns: + List[Dict]: Cleaned cell records + """ + merged_cells = cleaned_edge_cells + + for iteration in range(20): + poly_list = [] + for i, cell_info in enumerate(merged_cells): + poly = Polygon(cell_info["contour"]) + if not poly.is_valid: + self.logger.debug("Found invalid polygon - Fixing with buffer 0") + multi = poly.buffer(0) + if isinstance(multi, MultiPolygon): + if len(multi) > 1: + poly_idx = np.argmax([p.area for p in multi]) + poly = multi[poly_idx] + poly = Polygon(poly) + else: + poly = multi[0] + poly = Polygon(poly) + else: + poly = Polygon(multi) + poly.uid = i + poly_list.append(poly) + + # use an strtree for fast querying + tree = strtree.STRtree(poly_list) + + merged_idx = deque() + iterated_cells = set() + overlaps = 0 + + for query_poly in poly_list: + if query_poly.uid not in iterated_cells: + intersected_polygons = tree.query( + query_poly + ) # this also contains a self-intersection + if ( + len(intersected_polygons) > 1 + ): # we have more at least one intersection with another cell + submergers = [] # all cells that overlap with query + for inter_poly in intersected_polygons: + if ( + inter_poly.uid != query_poly.uid + and inter_poly.uid not in iterated_cells + ): + if ( + query_poly.intersection(inter_poly).area + / query_poly.area + > 0.01 + or query_poly.intersection(inter_poly).area + / inter_poly.area + > 0.01 + ): + overlaps = overlaps + 1 + submergers.append(inter_poly) + iterated_cells.add(inter_poly.uid) + # catch block: empty list -> some cells are touching, but not overlapping strongly enough + if len(submergers) == 0: + merged_idx.append(query_poly.uid) + else: # merging strategy: take the biggest cell, other merging strategies needs to get implemented + selected_poly_index = np.argmax( + np.array([p.area for p in submergers]) + ) + selected_poly_uid = submergers[selected_poly_index].uid + merged_idx.append(selected_poly_uid) + else: + # no intersection, just add + merged_idx.append(query_poly.uid) + iterated_cells.add(query_poly.uid) + + self.logger.info( + f"Iteration {iteration}: Found overlap of # cells: {overlaps}" + ) + if overlaps == 0: + self.logger.info("Found all overlapping cells") + break + elif iteration == 20: + self.logger.info( + f"Not all doubled cells removed, still {overlaps} to remove. For perfomance issues, we stop iterations now. Please raise an issue in git or increase number of iterations." + ) + + merged_cells = [cleaned_edge_cells[i] for i in merged_idx] + return merged_cells + + +def convert_coordinates(row: pd.Series) -> pd.Series: + """Convert a row from x,y type to one string representation of the patch position for fast querying + Repr: x_y + + Args: + row (pd.Series): Row to be processed + + Returns: + pd.Series: Processed Row + """ + x, y = row["patch_coordinates"] + row["patch_row"] = x + row["patch_col"] = y + row["patch_coordinates"] = f"{x}_{y}" + return row + + +def get_cell_position(bbox: np.ndarray, patch_size: int = 1024) -> List[int]: + """Get cell position as a list + + Entry is 1, if cell touches the border: [top, right, down, left] + + Args: + bbox (np.ndarray): Bounding-Box of cell + patch_size (int, optional): Patch-size. Defaults to 1024. + + Returns: + List[int]: List with 4 integers for each position + """ + # bbox = 2x2 array in h, w style + # bbox[0,0] = upper position (height) + # bbox[1,0] = lower dimension (height) + # boox[0,1] = left position (width) + # bbox[1,1] = right position (width) + # bbox[:,0] -> x dimensions + top, left, down, right = False, False, False, False + if bbox[0, 0] == 0: + top = True + if bbox[0, 1] == 0: + left = True + if bbox[1, 0] == patch_size: + down = True + if bbox[1, 1] == patch_size: + right = True + position = [top, right, down, left] + position = [int(pos) for pos in position] + + return position + + +def get_cell_position_marging( + bbox: np.ndarray, patch_size: int = 1024, margin: int = 64 +) -> int: + """Get the status of the cell, describing the cell position + + A cell is either in the mid (0) or at one of the borders (1-8) + + # Numbers are assigned clockwise, starting from top left + # i.e., top left = 1, top = 2, top right = 3, right = 4, bottom right = 5 bottom = 6, bottom left = 7, left = 8 + # Mid status is denoted by 0 + + Args: + bbox (np.ndarray): Bounding Box of cell + patch_size (int, optional): Patch-Size. Defaults to 1024. + margin (int, optional): Margin-Size. Defaults to 64. + + Returns: + int: Cell Status + """ + cell_status = None + if np.max(bbox) > patch_size - margin or np.min(bbox) < margin: + if bbox[0, 0] < margin: + # top left, top or top right + if bbox[0, 1] < margin: + # top left + cell_status = 1 + elif bbox[1, 1] > patch_size - margin: + # top right + cell_status = 3 + else: + # top + cell_status = 2 + elif bbox[1, 1] > patch_size - margin: + # top right, right or bottom right + if bbox[1, 0] > patch_size - margin: + # bottom right + cell_status = 5 + else: + # right + cell_status = 4 + elif bbox[1, 0] > patch_size - margin: + # bottom right, bottom, bottom left + if bbox[0, 1] < margin: + # bottom left + cell_status = 7 + else: + # bottom + cell_status = 6 + elif bbox[0, 1] < margin: + # bottom left, left, top left, but only left is left + cell_status = 8 + else: + cell_status = 0 + + return cell_status + + +def get_edge_patch(position, row, col): + # row starting on bottom or on top? + if position == [1, 0, 0, 0]: + # top + return [[row - 1, col]] + if position == [1, 1, 0, 0]: + # top and right + return [[row - 1, col], [row - 1, col + 1], [row, col + 1]] + if position == [0, 1, 0, 0]: + # right + return [[row, col + 1]] + if position == [0, 1, 1, 0]: + # right and down + return [[row, col + 1], [row + 1, col + 1], [row + 1, col]] + if position == [0, 0, 1, 0]: + # down + return [[row + 1, col]] + if position == [0, 0, 1, 1]: + # down and left + return [[row + 1, col], [row + 1, col - 1], [row, col - 1]] + if position == [0, 0, 0, 1]: + # left + return [[row, col - 1]] + if position == [1, 0, 0, 1]: + # left and top + return [[row, col - 1], [row - 1, col - 1], [row - 1, col]] + + +# CLI +class InferenceWSIParser: + """Parser""" + + def __init__(self) -> None: + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + description="Perform CellViT inference for given run-directory with model checkpoints and logs", + ) + requiredNamed = parser.add_argument_group("required named arguments") + requiredNamed.add_argument( + "--model", + type=str, + help="Model checkpoint file that is used for inference", + required=True, + ) + parser.add_argument( + "--gpu", type=int, help="Cuda-GPU ID for inference. Default: 0", default=0 + ) + parser.add_argument( + "--magnification", + type=float, + help="Network magnification. Is used for checking patch magnification such that we use the correct resolution for network. Default: 40", + default=40, + ) + parser.add_argument( + "--enforce_amp", + action="store_true", + help="Whether to use mixed precision for inference (enforced). Otherwise network default training settings are used." + " Default: False", + ) + parser.add_argument( + "--torch_compile", + action="store_true", + help="Whether to use torch.compile to compile the model before inference. Has an large overhead for single predictions but leads to a significant speedup when predicting on multiple images." + " Default: False", + ) + + parser.add_argument( + "--batch_size", + type=int, + help="Inference batch-size. Default: 8", + default=8, + ) + + parser.add_argument( + "--n_postprocess_workers", + type=int, + help="Number of processes to dedicate to post processing. Set to 0 to disable multiprocessing for post processing. Default: 8", + default=8, + ) + + parser.add_argument( + "--n_dataloader_workers", + type=int, + help="Number of workers to use for the pytorch patch dataloader. Default: 4", + default=4, + ) + + parser.add_argument( + "--outdir_subdir", + type=str, + help="If provided, a subdir with the given name is created in the cell_detection folder where the results are stored. Default: None", + default=None, + ) + parser.add_argument( + "--geojson", + action="store_true", + help="Set this flag to export results as additional geojson files for loading them into Software like QuPath.", + ) + + parser.add_argument( + "--overwrite", + action="store_true", + help=f"If set, include all found pre-processed files even if they include a \"{FLAG_FILE_NAME}\" file.", + ) + + # subparsers for either loading a WSI or a WSI folder + + # WSI + subparsers = parser.add_subparsers( + dest="command", + description="Main run command for either performing inference on single WSI-file or on whole dataset", + ) + subparser_wsi = subparsers.add_parser( + "process_wsi", description="Process a single WSI file" + ) + subparser_wsi.add_argument( + "--wsi_path", + type=str, + help="Path to WSI file", + ) + subparser_wsi.add_argument( + "--patched_slide_path", + type=str, + help="Path to patched WSI file (specific WSI file, not parent path of patched slide dataset)", + ) + + # Dataset + subparser_dataset = subparsers.add_parser( + "process_dataset", + description="Process a whole dataset", + ) + subparser_dataset.add_argument( + "--wsi_paths", type=str, help="Path to the folder where all WSI are stored" + ) + subparser_dataset.add_argument( + "--patch_dataset_path", + type=str, + help="Path to the folder where the patch dataset is stored", + ) + subparser_dataset.add_argument( + "--filelist", + type=str, + help="Filelist with WSI to process. Must be a .csv file with one row denoting the filenames (named 'Filename')." + "If not provided, all WSI files with given ending in the filelist are processed.", + default=None, + ) + subparser_dataset.add_argument( + "--wsi_extension", + type=str, + help="The extension types used for the WSI files, see configs.python.config (WSI_EXT)", + default="svs", + ) + + self.parser = parser + + def parse_arguments(self) -> dict: + opt = self.parser.parse_args() + return vars(opt) + + +def check_wsi(wsi: WSI, magnification: float = 40.0): + """Check if provided patched WSI is having the right settings + + Args: + wsi (WSI): WSI to check + magnification (float, optional): Check magnification. Defaults to 40.0. + + Raises: + RuntimeError: The magnification is not matching to the network input magnification. + RuntimeError: The patch-size is not devisible by 256. + RunTimeError: The patch-size is not 1024 + RunTimeError: The overlap is not 64px sized + """ + if wsi.metadata["magnification"] is not None: + patch_magnification = float(wsi.metadata["magnification"]) + else: + patch_magnification = float( + float(wsi.metadata["base_magnification"]) / wsi.metadata["downsampling"] + ) + patch_size = int(wsi.metadata["patch_size"]) + + if patch_magnification != magnification: + raise RuntimeError( + "The magnification is not matching to the network input magnification." + ) + if (patch_size % 256) != 0: + raise RuntimeError("The patch-size must be devisible by 256.") + if wsi.metadata["patch_size"] != 1024: + raise RuntimeError("The patch-size must be 1024.") + if wsi.metadata["patch_overlap"] != 64: + raise RuntimeError("The patch-overlap must be 64") + + +if __name__ == "__main__": + configuration_parser = InferenceWSIParser() + configuration = configuration_parser.parse_arguments() + command = configuration["command"] + + cell_segmentation = CellSegmentationInference( + model_path=configuration["model"], + gpu=configuration["gpu"], + enforce_mixed_precision=configuration["enforce_amp"], + ) + + if command.lower() == "process_wsi": + cell_segmentation.logger.info("Processing single WSI file") + wsi_path = Path(configuration["wsi_path"]) + wsi_name = wsi_path.stem + wsi_file = WSI( + name=wsi_name, + patient=wsi_name, + slide_path=wsi_path, + patched_slide_path=configuration["patched_slide_path"], + ) + check_wsi(wsi=wsi_file, magnification=configuration["magnification"]) + cell_segmentation.process_wsi( + wsi_file, + subdir_name=configuration["outdir_subdir"], + geojson=configuration["geojson"], + batch_size=configuration["batch_size"], + ) + + elif command.lower() == "process_dataset": + cell_segmentation.logger.info("Processing whole dataset") + if configuration["filelist"] is not None: + if Path(configuration["filelist"]).suffix != ".csv": + raise ValueError("Filelist must be a .csv file!") + cell_segmentation.logger.info( + f"Loading files from filelist {configuration['filelist']}" + ) + wsi_filelist = load_wsi_files_from_csv( + csv_path=configuration["filelist"], + wsi_extension=configuration["wsi_extension"], + ) + else: + cell_segmentation.logger.info( + f"Loading all files from folder {configuration['wsi_paths']}. No filelist provided." + ) + wsi_filelist = [ + f + for f in sorted( + Path(configuration["wsi_paths"]).glob( + f"**/*.{configuration['wsi_extension']}" + ) + ) + ] + #if not configuration["overwrite"]: + # wsi_filelist = filter_processed_file(wsi_filelist) + + cell_segmentation.process_wsi_filelist( + wsi_filelist, + subdir_name=configuration["outdir_subdir"], + geojson=configuration["geojson"], + batch_size=configuration["batch_size"], + torch_compile=configuration["torch_compile"], + n_postprocess_workers=configuration["n_postprocess_workers"], + n_dataloader_workers=configuration["n_dataloader_workers"], + overwrite=configuration["overwrite"] + ) \ No newline at end of file diff --git a/cell_segmentation/inference/inference_cellvit_experiment_monuseg.py b/cell_segmentation/inference/inference_cellvit_experiment_monuseg.py new file mode 100644 index 0000000000000000000000000000000000000000..4344c0da041b1b86d3afe5d526e8c0b5d1fa5ef8 --- /dev/null +++ b/cell_segmentation/inference/inference_cellvit_experiment_monuseg.py @@ -0,0 +1,1002 @@ +# -*- coding: utf-8 -*- +# CellViT Inference Method for Patch-Wise Inference on MoNuSeg dataset +# +# @ Fabian Hörst, fabian.hoerst@uk-essen.de +# Institute for Artifical Intelligence in Medicine, +# University Medicine Essen + +import argparse +import inspect +import os +import sys + +currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) +parentdir = os.path.dirname(currentdir) +sys.path.insert(0, parentdir) +parentdir = os.path.dirname(parentdir) +sys.path.insert(0, parentdir) + +from base_ml.base_experiment import BaseExperiment + +BaseExperiment.seed_run(1232) + +from pathlib import Path +from typing import List, Union, Tuple + +import albumentations as A +import cv2 as cv2 +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +import tqdm +from einops import rearrange +from matplotlib import pyplot as plt +from PIL import Image, ImageDraw +from skimage.color import rgba2rgb +from torch.utils.data import DataLoader +from torchmetrics.functional import dice +from torchmetrics.functional.classification import binary_jaccard_index +from torchvision import transforms + +from cell_segmentation.datasets.monuseg import MoNuSegDataset +from cell_segmentation.inference.cell_detection import ( + CellPostProcessor, + get_cell_position, + get_cell_position_marging, + get_edge_patch, +) +from cell_segmentation.utils.metrics import ( + cell_detection_scores, + get_fast_pq, + remap_label, +) +from cell_segmentation.utils.post_proc_cellvit import calculate_instances +from cell_segmentation.utils.tools import pair_coordinates +from models.segmentation.cell_segmentation.cellvit import CellViT + +from utils.logger import Logger +from utils.tools import unflatten_dict + + +class MoNuSegInference: + def __init__( + self, + model_path: Union[Path, str], + dataset_path: Union[Path, str], + outdir: Union[Path, str], + gpu: int, + patching: bool = False, + overlap: int = 0, + magnification: int = 40, + ) -> None: + """Cell Segmentation Inference class for MoNuSeg dataset + + Args: + model_path (Union[Path, str]): Path to model checkpoint + dataset_path (Union[Path, str]): Path to dataset + outdir (Union[Path, str]): Output directory + gpu (int): CUDA GPU id to use + patching (bool, optional): If dataset should be pacthed to 256px. Defaults to False. + overlap (int, optional): If overlap should be used. Recommed (next to no overlap) is 64 px. Overlap in px. + If overlap is used, patching must be True. Defaults to 0. + magnification (int, optional): Dataset magnification. Defaults to 40. + """ + self.model_path = Path(model_path) + self.device = "cpu" + self.magnification = magnification + self.overlap = overlap + self.patching = patching + if overlap > 0: + assert patching, "Patching must be activated" + + # self.__instantiate_logger() + self.__load_model() + self.__load_inference_transforms() + self.__setup_amp() + self.inference_dataset = MoNuSegDataset( + dataset_path=dataset_path, + transforms=self.inference_transforms, + patching=patching, + overlap=overlap, + ) + self.inference_dataloader = DataLoader( + self.inference_dataset, + batch_size=1, + num_workers=8, + pin_memory=False, + shuffle=False, + ) + + def __instantiate_logger(self) -> None: + """Instantiate logger + + Logger is using no formatters. Logs are stored in the run directory under the filename: inference.log + """ + logger = Logger( + level="INFO", + log_dir=self.outdir, + comment="inference_monuseg", + use_timestamp=False, + formatter="%(message)s", + ) + self.logger = logger.create_logger() + + def __load_model(self) -> None: + """Load model and checkpoint and load the state_dict""" + self.logger.info(f"Loading model: {self.model_path}") + + model_checkpoint = torch.load(self.model_path, map_location="cpu") + + # unpack checkpoint + self.run_conf = unflatten_dict(model_checkpoint["config"], ".") + self.model = self.__get_model(model_type=model_checkpoint["arch"]) + self.logger.info( + self.model.load_state_dict(model_checkpoint["model_state_dict"]) + ) + self.model.eval() + self.model.to(self.device) + + def __get_model( + self, model_type: str + ) -> Union[ + CellViT, + ]: + """Return the trained model for inference + + Args: + model_type (str): Name of the model. Must either be one of: + CellViT, CellViTShared, CellViT256, CellViT256Shared, CellViTSAM, CellViTSAMShared + + Returns: + Union[CellViT, CellViTShared, CellViT256, CellViTShared, CellViTSAM, CellViTSAMShared]: Model + """ + implemented_models = [ + "CellViT", + ] + if model_type not in implemented_models: + raise NotImplementedError( + f"Unknown model type. Please select one of {implemented_models}" + ) + + if model_type in ["CellViT", "CellViTShared"]: + if model_type == "CellViT": + model_class = CellViT + model = model_class( + model256_path=self.run_conf["model"].get("pretrained_encoder"), + num_nuclei_classes=self.run_conf["data"]["num_nuclei_classes"], + num_tissue_classes=self.run_conf["data"]["num_tissue_classes"], + #embed_dim=self.run_conf["model"]["embed_dim"], + in_channels=self.run_conf["model"].get("input_channels", 3), + #depth=self.run_conf["model"]["depth"], + #num_heads=self.run_conf["model"]["num_heads"], + #extract_layers=self.run_conf["model"]["extract_layers"], + #regression_loss=self.run_conf["model"].get("regression_loss", False), + ) + + return model + + def __load_inference_transforms(self) -> None: + """Load the inference transformations from the run_configuration""" + self.logger.info("Loading inference transformations") + + transform_settings = self.run_conf["transformations"] + if "normalize" in transform_settings: + mean = transform_settings["normalize"].get("mean", (0.5, 0.5, 0.5)) + std = transform_settings["normalize"].get("std", (0.5, 0.5, 0.5)) + else: + mean = (0.5, 0.5, 0.5) + std = (0.5, 0.5, 0.5) + self.inference_transforms = A.Compose([A.Normalize(mean=mean, std=std)]) + + def __setup_amp(self) -> None: + """Setup automated mixed precision (amp) for inference.""" + self.mixed_precision = self.run_conf["training"].get("mixed_precision", False) + + def run_inference(self, generate_plots: bool = False) -> None: + """Run inference + + Args: + generate_plots (bool, optional): If plots should be generated. Defaults to False. + """ + self.model.eval() + + # setup score tracker + image_names = [] # image names as str + binary_dice_scores = [] # binary dice scores per image + binary_jaccard_scores = [] # binary jaccard scores per image + pq_scores = [] # pq-scores per image + dq_scores = [] # dq-scores per image + sq_scores = [] # sq-scores per image + f1_ds = [] # f1-scores per image + prec_ds = [] # precision per image + rec_ds = [] # recall per image + + inference_loop = tqdm.tqdm( + enumerate(self.inference_dataloader), total=len(self.inference_dataloader) + ) + + with torch.no_grad(): + for image_idx, batch in inference_loop: + image_metrics = self.inference_step( + model=self.model, batch=batch, generate_plots=generate_plots + ) + image_names.append(image_metrics["image_name"]) + binary_dice_scores.append(image_metrics["binary_dice_score"]) + binary_jaccard_scores.append(image_metrics["binary_jaccard_score"]) + pq_scores.append(image_metrics["pq_score"]) + dq_scores.append(image_metrics["dq_score"]) + sq_scores.append(image_metrics["sq_score"]) + f1_ds.append(image_metrics["f1_d"]) + prec_ds.append(image_metrics["prec_d"]) + rec_ds.append(image_metrics["rec_d"]) + + # average metrics for dataset + binary_dice_scores = np.array(binary_dice_scores) + binary_jaccard_scores = np.array(binary_jaccard_scores) + pq_scores = np.array(pq_scores) + dq_scores = np.array(dq_scores) + sq_scores = np.array(sq_scores) + f1_ds = np.array(f1_ds) + prec_ds = np.array(prec_ds) + rec_ds = np.array(rec_ds) + + dataset_metrics = { + "Binary-Cell-Dice-Mean": float(np.nanmean(binary_dice_scores)), + "Binary-Cell-Jacard-Mean": float(np.nanmean(binary_jaccard_scores)), + "bPQ": float(np.nanmean(pq_scores)), + "bDQ": float(np.nanmean(dq_scores)), + "bSQ": float(np.nanmean(sq_scores)), + "f1_detection": float(np.nanmean(f1_ds)), + "precision_detection": float(np.nanmean(prec_ds)), + "recall_detection": float(np.nanmean(rec_ds)), + } + self.logger.info(f"{20*'*'} Binary Dataset metrics {20*'*'}") + [self.logger.info(f"{f'{k}:': <25} {v}") for k, v in dataset_metrics.items()] + + def inference_step( + self, model: nn.Module, batch: object, generate_plots: bool = False + ) -> dict: + """Inference step + + Args: + model (nn.Module): Training model, must return "nuclei_binary_map", "nuclei_type_map", "tissue_type" and "hv_map" + batch (object): Training batch, consisting of images ([0]), masks ([1]), tissue_types ([2]) and figure filenames ([3]) + generate_plots (bool, optional): If plots should be generated. Defaults to False. + + Returns: + Dict: Image_metrics with keys: + + """ + img = batch[0].to(self.device) + if len(img.shape) > 4: + img = img[0] + img = rearrange(img, "c i j w h -> (i j) c w h") + mask = batch[1] + image_name = list(batch[2]) + mask["instance_types"] = calculate_instances( + torch.unsqueeze(mask["nuclei_binary_map"], dim=0), mask["instance_map"] + ) + + model.zero_grad() + + if self.mixed_precision: + with torch.autocast(device_type="cuda", dtype=torch.float16): + predictions_ = model.forward(img) + else: + predictions_ = model.forward(img) + + if self.overlap == 0: + if self.patching: + predictions_ = self.post_process_patching(predictions_) + predictions = self.get_cell_predictions(predictions_) + image_metrics = self.calculate_step_metric( + predictions=predictions, gt=mask, image_name=image_name + ) + + elif self.patching and self.overlap != 0: + cell_list = self.post_process_patching_overlap( + predictions_, overlap=self.overlap + ) + image_metrics, predictions = self.calculate_step_metric_overlap( + cell_list=cell_list, gt=mask, image_name=image_name + ) + + scores = [ + float(image_metrics["binary_dice_score"].detach().cpu()), + float(image_metrics["binary_jaccard_score"].detach().cpu()), + image_metrics["pq_score"], + ] + if generate_plots: + if self.overlap == 0 and self.patching: + batch_size = img.shape[0] + num_elems = int(np.sqrt(batch_size)) + img = torch.permute(img, (0, 2, 3, 1)) + img = rearrange( + img, "(i j) h w c -> (i h) (j w) c", i=num_elems, j=num_elems + ) + img = torch.unsqueeze(img, dim=0) + img = torch.permute(img, (0, 3, 1, 2)) + elif self.overlap != 0 and self.patching: + h, w = mask["nuclei_binary_map"].shape[1:] + total_img = torch.zeros((3, h, w)) + decomposed_patch_num = int(np.sqrt(img.shape[0])) + for i in range(decomposed_patch_num): + for j in range(decomposed_patch_num): + x_global = i * 256 - i * self.overlap + y_global = j * 256 - j * self.overlap + total_img[ + :, x_global : x_global + 256, y_global : y_global + 256 + ] = img[i * decomposed_patch_num + j] + img = total_img + img = img[None, :, :, :] + self.plot_results( + img=img, + predictions=predictions, + ground_truth=mask, + img_name=image_name[0], + scores=scores, + ) + + return image_metrics + + def run_single_image_inference(self, model: nn.Module, image: np.ndarray, generate_plots: bool = True, + ) -> dict: + """Inference step + + Args: + model (nn.Module): Training model, must return "nuclei_binary_map", "nuclei_type_map", "tissue_type" and "hv_map" + batch (object): Training batch, consisting of images ([0]), masks ([1]), tissue_types ([2]) and figure filenames ([3]) + generate_plots (bool, optional): If plots should be generated. Defaults to False. + + Returns: + Dict: Image_metrics with keys: + + """ + # set image transforms + transform_settings = self.run_conf["transformations"] + if "normalize" in transform_settings: + mean = transform_settings["normalize"].get("mean", (0.5, 0.5, 0.5)) + std = transform_settings["normalize"].get("std", (0.5, 0.5, 0.5)) + else: + mean = (0.5, 0.5, 0.5) + std = (0.5, 0.5, 0.5) + transforms = A.Compose([A.Normalize(mean=mean, std=std)]) + + transformed_img = transforms(image=image)["image"] + image = torch.from_numpy(transformed_img).permute(2, 0, 1).unsqueeze(0).float() + img = image.to(self.device) + + + model.zero_grad() + predictions_ = model.forward(img) + + if self.overlap == 0: + if self.patching: + predictions_ = self.post_process_patching(predictions_) + predictions = self.get_cell_predictions(predictions_) + + + + image_output = self.plot_results( + img=img, + predictions=predictions + ) + + return image_output + + + def calculate_step_metric( + self, predictions: dict, gt: dict, image_name: List[str] + ) -> dict: + """Calculate step metric for one MoNuSeg image. + + Args: + predictions (dict): Necssary keys: + * instance_map: Pixel-wise nuclear instance segmentation. + Each instance has its own integer, starting from 1. Shape: (1, H, W) + * nuclei_binary_map: Softmax output for binary nuclei branch. Shape: (1, 2, H, W) + * instance_types: Instance type prediction list. + Each list entry stands for one image. Each list entry is a dictionary with the following structure: + Main Key is the nuclei instance number (int), with a dict as value. + For each instance, the dictionary contains the keys: bbox (bounding box), centroid (centroid coordinates), + contour, type_prob (probability), type (nuclei type). Actually just one list entry, as we expecting batch-size=1 (one image) + gt (dict): Necessary keys: + * instance_map + * nuclei_binary_map + * instance_types + image_name (List[str]): Name of the image, list with [str]. List is necessary for backward compatibility + + Returns: + dict: Image metrics for one MoNuSeg image. Keys are: + * image_name + * binary_dice_score + * binary_jaccard_score + * pq_score + * dq_score + * sq_score + * f1_d + * prec_d + * rec_d + """ + predictions["instance_map"] = predictions["instance_map"].detach().cpu() + instance_maps_gt = gt["instance_map"].detach().cpu() + + pred_binary_map = torch.argmax(predictions["nuclei_binary_map"], dim=1) + target_binary_map = gt["nuclei_binary_map"].to(self.device) + + cell_dice = ( + dice(preds=pred_binary_map, target=target_binary_map, ignore_index=0) + .detach() + .cpu() + ) + cell_jaccard = ( + binary_jaccard_index( + preds=pred_binary_map, + target=target_binary_map, + ) + .detach() + .cpu() + ) + remapped_instance_pred = remap_label(predictions["instance_map"]) + remapped_gt = remap_label(instance_maps_gt) + [dq, sq, pq], _ = get_fast_pq(true=remapped_gt, pred=remapped_instance_pred) + + # detection scores + true_centroids = np.array( + [v["centroid"] for k, v in gt["instance_types"][0].items()] + ) + pred_centroids = np.array( + [v["centroid"] for k, v in predictions["instance_types"].items()] + ) + if true_centroids.shape[0] == 0: + true_centroids = np.array([[0, 0]]) + if pred_centroids.shape[0] == 0: + pred_centroids = np.array([[0, 0]]) + + if self.magnification == 40: + pairing_radius = 12 + else: + pairing_radius = 6 + paired, unpaired_true, unpaired_pred = pair_coordinates( + true_centroids, pred_centroids, pairing_radius + ) + f1_d, prec_d, rec_d = cell_detection_scores( + paired_true=paired[:, 0], + paired_pred=paired[:, 1], + unpaired_true=unpaired_true, + unpaired_pred=unpaired_pred, + ) + + image_metrics = { + "image_name": image_name, + "binary_dice_score": cell_dice, + "binary_jaccard_score": cell_jaccard, + "pq_score": pq, + "dq_score": dq, + "sq_score": sq, + "f1_d": f1_d, + "prec_d": prec_d, + "rec_d": rec_d, + } + + return image_metrics + + def convert_binary_type(self, instance_types: dict) -> dict: + """Clean nuclei detection from type prediction to binary prediction + + Args: + instance_types (dict): Dictionary with cells + + Returns: + dict: Cleaned with just one class + """ + cleaned_instance_types = {} + for key, elem in instance_types.items(): + if elem["type"] == 0: + continue + else: + elem["type"] = 0 + cleaned_instance_types[key] = elem + + return cleaned_instance_types + + def get_cell_predictions(self, predictions: dict) -> dict: + """Reshaping predictions and calculating instance maps and instance types + + Args: + predictions (dict): Dictionary with the following keys: + * tissue_types: Logit tissue prediction output. Shape: (B, num_tissue_classes) + * nuclei_binary_map: Logit output for binary nuclei prediction branch. Shape: (B, H, W, 2) + * hv_map: Logit output for hv-prediction. Shape: (B, 2, H, W) + * nuclei_type_map: Logit output for nuclei instance-prediction. Shape: (B, num_nuclei_classes, H, W) + + Returns: + dict: + * nuclei_binary_map: Softmax binary prediction. Shape: (B, 2, H, W + * nuclei_type_map: Softmax nuclei type map. Shape: (B, num_nuclei_classes, H, W) + * hv_map: Logit output for hv-prediction. Shape: (B, 2, H, W) + * tissue_types: Logit tissue prediction output. Shape: (B, num_tissue_classes) + * instance_map: Instance map, each instance has one integer. Shape: (B, H, W) + * instance_types: Instance type dict, cleaned. Keys: + 'bbox', 'centroid', 'contour', 'type_prob', 'type' + """ + predictions["nuclei_binary_map"] = F.softmax( + predictions["nuclei_binary_map"], dim=1 + ) + predictions["nuclei_type_map"] = F.softmax( + predictions["nuclei_type_map"], dim=1 + ) + ( + predictions["instance_map"], + predictions["instance_types"], + ) = self.model.calculate_instance_map( + predictions, magnification=self.magnification + ) + predictions["instance_types"] = self.convert_binary_type( + predictions["instance_types"][0] + ) + + return predictions + + def post_process_patching(self, predictions: dict) -> dict: + """Post-process patching by reassamble (without overlap) stitched predictions to one big image prediction + + Args: + predictions (dict): Necessary keys: + * nuclei_binary_map: Logit binary prediction. Shape: (B, 2, 256, 256) + * hv_map: Logit output for hv-prediction. Shape: (B, 2, H, W) + * nuclei_type_map: Logit output for nuclei instance-prediction. Shape: (B, num_nuclei_classes, 256, 256) + Returns: + dict: Return elements that have been changed: + * nuclei_binary_map: Shape: (1, 2, H, W) + * hv_map: Shape: (1, 2, H, W) + * nuclei_type_map: (1, num_nuclei_classes, H, W) + """ + batch_size = predictions["nuclei_binary_map"].shape[0] + num_elems = int(np.sqrt(batch_size)) + predictions["nuclei_binary_map"] = rearrange( + predictions["nuclei_binary_map"], + "(i j) d w h ->d (i w) (j h)", + i=num_elems, + j=num_elems, + ) + predictions["hv_map"] = rearrange( + predictions["hv_map"], + "(i j) d w h -> d (i w) (j h)", + i=num_elems, + j=num_elems, + ) + predictions["nuclei_type_map"] = rearrange( + predictions["nuclei_type_map"], + "(i j) d w h -> d (i w) (j h)", + i=num_elems, + j=num_elems, + ) + + predictions["nuclei_binary_map"] = torch.unsqueeze( + predictions["nuclei_binary_map"], dim=0 + ) + predictions["hv_map"] = torch.unsqueeze(predictions["hv_map"], dim=0) + predictions["nuclei_type_map"] = torch.unsqueeze( + predictions["nuclei_type_map"], dim=0 + ) + + return predictions + + def post_process_patching_overlap(self, predictions: dict, overlap: int) -> List: + """Post processing overlapping cells by merging overlap. Use same merging strategy as for our + + Args: + predictions (dict): Predictions with necessary keys: + * nuclei_binary_map: Binary nuclei prediction, Shape: (B, 2, H, W) + * nuclei_type_map: Nuclei type prediction, Shape: (B, num_nuclei_classes, H, W) + * hv_map: Binary HV Map predictions. Shape: (B, 2, H, W) + overlap (int): Used overlap as integer + + Returns: + List: Cleaned (merged) cell list with each entry beeing one detected cell with dictionary as entries. + """ + predictions["nuclei_binary_map"] = F.softmax( + predictions["nuclei_binary_map"], dim=1 + ) + predictions["nuclei_type_map"] = F.softmax( + predictions["nuclei_type_map"], dim=1 + ) + ( + predictions["instance_map"], + predictions["instance_types"], + ) = self.model.calculate_instance_map( + predictions, magnification=self.magnification + ) + predictions = self.merge_predictions(predictions, overlap) + + return predictions + + def merge_predictions(self, predictions: dict, overlap: int) -> list: + """Merge overlapping cell predictions + + Args: + predictions (dict): Predictions with necessary keys: + * nuclei_binary_map: Binary nuclei prediction, Shape: (B, 2, H, W) + * instance_types: Instance type dictionary with cell entries + overlap (int): Used overlap as integer + + Returns: + list: Cleaned (merged) cell list with each entry beeing one detected cell with dictionary as entries. + """ + cell_list = [] + decomposed_patch_num = int(np.sqrt(predictions["nuclei_binary_map"].shape[0])) + + for i in range(decomposed_patch_num): + for j in range(decomposed_patch_num): + x_global = i * 256 - i * overlap + y_global = j * 256 - j * overlap + patch_instance_types = predictions["instance_types"][ + i * decomposed_patch_num + j + ] + for cell in patch_instance_types.values(): + if cell["type"] == 0: + continue + offset_global = np.array([x_global, y_global]) + centroid_global = cell["centroid"] + np.flip(offset_global) + contour_global = cell["contour"] + np.flip(offset_global) + bbox_global = cell["bbox"] + offset_global + cell_dict = { + "bbox": bbox_global.tolist(), + "centroid": centroid_global.tolist(), + "contour": contour_global.tolist(), + "type_prob": cell["type_prob"], + "type": cell["type"], + "patch_coordinates": [ + i, # row + j, # col + ], + "cell_status": get_cell_position_marging(cell["bbox"], 256, 64), + "offset_global": offset_global.tolist(), + } + if np.max(cell["bbox"]) == 256 or np.min(cell["bbox"]) == 0: + position = get_cell_position(cell["bbox"], 256) + cell_dict["edge_position"] = True + cell_dict["edge_information"] = {} + cell_dict["edge_information"]["position"] = position + cell_dict["edge_information"]["edge_patches"] = get_edge_patch( + position, i, j # row, col + ) + else: + cell_dict["edge_position"] = False + cell_list.append(cell_dict) + self.logger.info(f"Detected cells before cleaning: {len(cell_list)}") + cell_processor = CellPostProcessor(cell_list, self.logger) + cleaned_cells = cell_processor.post_process_cells() + cell_list = [cell_list[idx_c] for idx_c in cleaned_cells.index.values] + self.logger.info(f"Detected cells after cleaning: {len(cell_list)}") + + return cell_list + + def calculate_step_metric_overlap( + self, cell_list: List[dict], gt: dict, image_name: List[str] + ) -> Tuple[dict, dict]: + """Calculate step metric and return merged predictions for plotting + + Args: + cell_list (List[dict]): List with cell dicts + gt (dict): Ground-Truth dictionary + image_name (List[str]): Image Name as list with just one entry + + Returns: + Tuple[dict, dict]: + dict: Image metrics for one MoNuSeg image. Keys are: + * image_name + * binary_dice_score + * binary_jaccard_score + * pq_score + * dq_score + * sq_score + * f1_d + * prec_d + * rec_d + dict: Predictions, reshaped for one image and for plotting + * nuclei_binary_map: Shape (1, 2, 1024, 1024) or (1, 2, 1024, 1024) + * instance_map: Shape (1, 1024, 1024) or or (1, 2, 512, 512) + * instance_types: Dict for each nuclei + """ + predictions = {} + h, w = gt["nuclei_binary_map"].shape[1:] + instance_type_map = np.zeros((h, w), dtype=np.int32) + + for instance, cell in enumerate(cell_list): + contour = np.array(cell["contour"])[None, :, :] + cv2.fillPoly(instance_type_map, contour, instance) + + predictions["instance_map"] = torch.Tensor(instance_type_map) + instance_maps_gt = gt["instance_map"].detach().cpu() + + pred_arr = np.clip(instance_type_map, 0, 1) + target_binary_map = gt["nuclei_binary_map"].to(self.device).squeeze() + predictions["nuclei_binary_map"] = pred_arr + + predictions["instance_types"] = cell_list + + cell_dice = ( + dice( + preds=torch.Tensor(pred_arr).to(self.device), + target=target_binary_map, + ignore_index=0, + ) + .detach() + .cpu() + ) + cell_jaccard = ( + binary_jaccard_index( + preds=torch.Tensor(pred_arr).to(self.device), + target=target_binary_map, + ) + .detach() + .cpu() + ) + remapped_instance_pred = remap_label(predictions["instance_map"])[None, :, :] + remapped_gt = remap_label(instance_maps_gt) + [dq, sq, pq], _ = get_fast_pq(true=remapped_gt, pred=remapped_instance_pred) + + # detection scores + true_centroids = np.array( + [v["centroid"] for k, v in gt["instance_types"][0].items()] + ) + pred_centroids = np.array([v["centroid"] for v in cell_list]) + if true_centroids.shape[0] == 0: + true_centroids = np.array([[0, 0]]) + if pred_centroids.shape[0] == 0: + pred_centroids = np.array([[0, 0]]) + + if self.magnification == 40: + pairing_radius = 12 + else: + pairing_radius = 6 + paired, unpaired_true, unpaired_pred = pair_coordinates( + true_centroids, pred_centroids, pairing_radius + ) + f1_d, prec_d, rec_d = cell_detection_scores( + paired_true=paired[:, 0], + paired_pred=paired[:, 1], + unpaired_true=unpaired_true, + unpaired_pred=unpaired_pred, + ) + + image_metrics = { + "image_name": image_name, + "binary_dice_score": cell_dice, + "binary_jaccard_score": cell_jaccard, + "pq_score": pq, + "dq_score": dq, + "sq_score": sq, + "f1_d": f1_d, + "prec_d": prec_d, + "rec_d": rec_d, + } + + # align to common shapes + cleaned_instance_types = { + k + 1: v for k, v in enumerate(predictions["instance_types"]) + } + for cell, results in cleaned_instance_types.items(): + results["contour"] = np.array(results["contour"]) + cleaned_instance_types[cell] = results + predictions["instance_types"] = cleaned_instance_types + predictions["instance_map"] = predictions["instance_map"][None, :, :] + predictions["nuclei_binary_map"] = F.one_hot( + torch.Tensor(predictions["nuclei_binary_map"]).type(torch.int64), + num_classes=2, + ).permute(2, 0, 1)[None, :, :, :] + + return image_metrics, predictions + + def plot_results( + self, + img: torch.Tensor, + predictions: dict, + ) -> None: + """Plot MoNuSeg results + + Args: + img (torch.Tensor): Image as torch.Tensor, with Shape (1, 3, 1024, 1024) or (1, 3, 512, 512) + predictions (dict): Prediction dictionary. Necessary keys: + * nuclei_binary_map: Shape (1, 2, 1024, 1024) or (1, 2, 512, 512) + * instance_map: Shape (1, 1024, 1024) or (1, 512, 512) + * instance_types: List[dict], but just one entry in list + ground_truth (dict): Ground-Truth dictionary. Necessary keys: + * nuclei_binary_map: (1, 1024, 1024) or or (1, 512, 512) + * instance_map: (1, 1024, 1024) or or (1, 512, 512) + * instance_types: List[dict], but just one entry in list + img_name (str): Image name as string + outdir (Path): Output directory for storing + scores (List[float]): Scores as list [Dice, Jaccard, bPQ] + """ + + predictions["nuclei_binary_map"] = predictions["nuclei_binary_map"].permute( + 0, 2, 3, 1 + ) + + h = predictions["instance_map"].shape[1] + w = predictions["instance_map"].shape[2] + + # process image and other maps + sample_image = img.permute(0, 2, 3, 1).contiguous().cpu().numpy() + + pred_sample_binary_map = ( + predictions["nuclei_binary_map"][:, :, :, 1].detach().cpu().numpy() + )[0] + pred_sample_instance_maps = ( + predictions["instance_map"].detach().cpu().numpy()[0] + ) + + + binary_cmap = plt.get_cmap("Greys_r") + instance_map = plt.get_cmap("viridis") + + # invert the normalization of the sample images + transform_settings = self.run_conf["transformations"] + if "normalize" in transform_settings: + mean = transform_settings["normalize"].get("mean", (0.5, 0.5, 0.5)) + std = transform_settings["normalize"].get("std", (0.5, 0.5, 0.5)) + else: + mean = (0.5, 0.5, 0.5) + std = (0.5, 0.5, 0.5) + inv_normalize = transforms.Normalize( + mean=[-0.5 / mean[0], -0.5 / mean[1], -0.5 / mean[2]], + std=[1 / std[0], 1 / std[1], 1 / std[2]], + ) + inv_samples = inv_normalize(torch.tensor(sample_image).permute(0, 3, 1, 2)) + sample_image = inv_samples.permute(0, 2, 3, 1).detach().cpu().numpy()[0] + + # start overlaying on image + placeholder = np.zeros(( h, 4 * w, 3)) + # orig image + placeholder[:h, :w, :3] = sample_image + # binary prediction + placeholder[:h, w : 2 * w, :3] = rgba2rgb( + binary_cmap(pred_sample_binary_map * 255) + ) + # instance_predictions + placeholder[:h, 2 * w : 3 * w, :3] = rgba2rgb( + instance_map( + (pred_sample_instance_maps - np.min(pred_sample_instance_maps)) + / ( + np.max(pred_sample_instance_maps) + - np.min(pred_sample_instance_maps + 1e-10) + ) + ) + ) + # pred + pred_contours_polygon = [ + v["contour"] for v in predictions["instance_types"].values() + ] + pred_contours_polygon = [ + list(zip(poly[:, 0], poly[:, 1])) for poly in pred_contours_polygon + ] + pred_contour_colors_polygon = [ + "#70c6ff" for i in range(len(pred_contours_polygon)) + ] + pred_cell_image = Image.fromarray( + (sample_image * 255).astype(np.uint8) + ).convert("RGB") + pred_drawing = ImageDraw.Draw(pred_cell_image) + add_patch = lambda poly, color: pred_drawing.polygon( + poly, outline=color, width=2 + ) + [ + add_patch(poly, c) + for poly, c in zip(pred_contours_polygon, pred_contour_colors_polygon) + ] + placeholder[: h, 3 * w : 4 * w, :3] = np.asarray(pred_cell_image) / 255 + + # plotting + test_image = Image.fromarray((placeholder * 255).astype(np.uint8)) + fig, axs = plt.subplots(figsize=(3, 2), dpi=1200) + axs.imshow(placeholder) + axs.set_xticks(np.arange(w / 2, 4 * w, w)) + axs.set_xticklabels( + [ + "Image", + "Binary-Cells", + "Instances", + "Countours", + ], + fontsize=6, + ) + axs.xaxis.tick_top() + + axs.set_yticks([h / 2]) + axs.set_yticklabels([ "Pred."], fontsize=6) + axs.tick_params(axis="both", which="both", length=0) + grid_x = np.arange(w, 3 * w, w) + grid_y = np.arange(h, 2 * h, h) + + for x_seg in grid_x: + axs.axvline(x_seg, color="black") + for y_seg in grid_y: + axs.axhline(y_seg, color="black") + + fig.suptitle(f"Patch Predictions for input image", fontsize=6) + fig.tight_layout() + fig.savefig("pred_img.png") + plt.close() + + +# CLI +class InferenceCellViTMoNuSegParser: + def __init__(self) -> None: + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + description="Perform CellViT inference for MoNuSeg dataset", + ) + + parser.add_argument( + "--model", + type=str, + help="Model checkpoint file that is used for inference", + default="./model_best.pth", + ) + parser.add_argument( + "--dataset", + type=str, + help="Path to MoNuSeg dataset.", + default="/data/lunbinzeng/datasets/monuseg/testing/", + ) + parser.add_argument( + "--outdir", + type=str, + help="Path to output directory to store results.", + default="/data/lunbinzeng/results/lkcell/small/2024-04-22T232903_CellViT-unireplknet-fold1-final/monuseg/inference/", + ) + parser.add_argument( + "--gpu", type=int, help="Cuda-GPU ID for inference. Default: 0", default=0 + ) + parser.add_argument( + "--magnification", + type=int, + help="Dataset Magnification. Either 20 or 40. Default: 40", + choices=[20, 40], + default=20, + ) + parser.add_argument( + "--patching", + type=bool, + help="Patch to 256px images. Default: False", + default=False, + ) + parser.add_argument( + "--overlap", + type=int, + help="Patch overlap, just valid for patching", + default=0, + ) + parser.add_argument( + "--plots", + type=bool, + help="Generate result plots. Default: False", + default=True, + ) + + self.parser = parser + + def parse_arguments(self) -> dict: + opt = self.parser.parse_args() + return vars(opt) + + +if __name__ == "__main__": + configuration_parser = InferenceCellViTMoNuSegParser() + configuration = configuration_parser.parse_arguments() + print(configuration) + + inf = MoNuSegInference( + model_path=configuration["model"], + dataset_path=configuration["dataset"], + outdir=configuration["outdir"], + gpu=configuration["gpu"], + patching=configuration["patching"], + magnification=configuration["magnification"], + overlap=configuration["overlap"], + ) + inf.run_inference(generate_plots=configuration["plots"]) diff --git a/cell_segmentation/inference/inference_cellvit_experiment_pannuke.py b/cell_segmentation/inference/inference_cellvit_experiment_pannuke.py new file mode 100644 index 0000000000000000000000000000000000000000..7248093304f569a3c21c13f9c0a6817adc79d476 --- /dev/null +++ b/cell_segmentation/inference/inference_cellvit_experiment_pannuke.py @@ -0,0 +1,1157 @@ +# -*- coding: utf-8 -*- +# CellViT Inference Method for Patch-Wise Inference on a test set +# Without merging WSI +# +# Aim is to calculate metrics as defined for the PanNuke dataset +# +# @ Fabian Hörst, fabian.hoerst@uk-essen.de +# Institute for Artifical Intelligence in Medicine, +# University Medicine Essen + +import argparse +import inspect +import os +import sys + +currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) +parentdir = os.path.dirname(currentdir) +sys.path.insert(0, parentdir) +parentdir = os.path.dirname(parentdir) +sys.path.insert(0, parentdir) + +from base_ml.base_experiment import BaseExperiment + +BaseExperiment.seed_run(1232) + +import json +from pathlib import Path +from typing import List, Tuple, Union + +import albumentations as A +import numpy as np +import torch +import torch.nn.functional as F +import tqdm +import yaml +from matplotlib import pyplot as plt +from PIL import Image, ImageDraw +from skimage.color import rgba2rgb +from sklearn.metrics import accuracy_score +from tabulate import tabulate +from torch.utils.data import DataLoader +from torchmetrics.functional import dice +from torchmetrics.functional.classification import binary_jaccard_index +from torchvision import transforms + +from cell_segmentation.datasets.dataset_coordinator import select_dataset +from models.segmentation.cell_segmentation.cellvit import DataclassHVStorage +from cell_segmentation.utils.metrics import ( + cell_detection_scores, + cell_type_detection_scores, + get_fast_pq, + remap_label, + binarize, +) +from cell_segmentation.utils.post_proc_cellvit import calculate_instances +from cell_segmentation.utils.tools import cropping_center, pair_coordinates +from models.segmentation.cell_segmentation.cellvit import CellViT +from utils.logger import Logger + + +class InferenceCellViT: + def __init__( + self, + run_dir: Union[Path, str], + gpu: int, + magnification: int = 40, + checkpoint_name: str = "model_best.pth", + ) -> None: + """Inference for HoverNet + + Args: + run_dir (Union[Path, str]): logging directory with checkpoints and configs + gpu (int): CUDA GPU device to use for inference + magnification (int, optional): Dataset magnification. Defaults to 40. + checkpoint_name (str, optional): Select name of the model to load. Defaults to model_best.pth + """ + self.run_dir = Path(run_dir) + self.device = "cpu" + self.run_conf: dict = None + self.logger: Logger = None + self.magnification = magnification + self.checkpoint_name = checkpoint_name + + self.__load_run_conf() + # self.__instantiate_logger() + self.__setup_amp() + + self.num_classes = self.run_conf["data"]["num_nuclei_classes"] + + def __load_run_conf(self) -> None: + """Load the config.yaml file with the run setup + + Be careful with loading and usage, since original None values in the run configuration are not stored when dumped to yaml file. + If you want to check if a key is not defined, first check if the key does exists in the dict. + """ + with open((self.run_dir / "config.yaml").resolve(), "r") as run_config_file: + yaml_config = yaml.safe_load(run_config_file) + self.run_conf = dict(yaml_config) + + def __load_dataset_setup(self, dataset_path: Union[Path, str]) -> None: + """Load the configuration of the cell segmentation dataset. + + The dataset must have a dataset_config.yaml file in their dataset path with the following entries: + * tissue_types: describing the present tissue types with corresponding integer + * nuclei_types: describing the present nuclei types with corresponding integer + + Args: + dataset_path (Union[Path, str]): Path to dataset folder + """ + dataset_config_path = Path(dataset_path) / "dataset_config.yaml" + with open(dataset_config_path, "r") as dataset_config_file: + yaml_config = yaml.safe_load(dataset_config_file) + self.dataset_config = dict(yaml_config) + + def __instantiate_logger(self) -> None: + """Instantiate logger + + Logger is using no formatters. Logs are stored in the run directory under the filename: inference.log + """ + logger = Logger( + level=self.run_conf["logging"]["level"].upper(), + log_dir=Path(self.run_dir).resolve(), + comment="inference", + use_timestamp=False, + formatter="%(message)s", + ) + self.logger = logger.create_logger() + + def __check_eval_model(self) -> None: + """Check if there is a best model pytorch file""" + assert (self.run_dir / "checkpoints" / self.checkpoint_name).is_file() + + def __setup_amp(self) -> None: + """Setup automated mixed precision (amp) for inference.""" + self.mixed_precision = self.run_conf["training"].get("mixed_precision", False) + + def get_model( + self, model_type: str + ) -> CellViT: + """Return the trained model for inference + + Args: + model_type (str): Name of the model. Must either be one of: + CellViT, CellViTShared, CellViT256, CellViT256Shared, CellViTSAM, CellViTSAMShared + + Returns: + Union[CellViT, CellViTShared, CellViT256, CellViT256Shared, CellViTSAM, CellViTSAMShared]: Model + """ + implemented_models = [ + "CellViT", + ] + if model_type not in implemented_models: + raise NotImplementedError( + f"Unknown model type. Please select one of {implemented_models}" + ) + if model_type in ["CellViT", "CellViTShared"]: + if model_type == "CellViT": + model_class = CellViT + + model = model_class( + model256_path=self.run_conf["model"].get("pretrained_encoder"), + num_nuclei_classes=self.run_conf["data"]["num_nuclei_classes"], + num_tissue_classes=self.run_conf["data"]["num_tissue_classes"], + #embed_dim=self.run_conf["model"]["embed_dim"], + in_channels=self.run_conf["model"].get("input_chanels", 3), + #embed_dim=self.run_conf["model"]["embed_dim"], + #input_channels=self.run_conf["model"].get("input_channels", 3), + #depth=self.run_conf["model"]["depth"], + #num_heads=self.run_conf["model"]["num_heads"], + #extract_layers=self.run_conf["model"]["extract_layers"], + #regression_loss=self.run_conf["model"].get("regression_loss", False), + ) + + + return model + + def setup_patch_inference( + self, test_folds: List[int] = None + ) -> Tuple[ + CellViT, + DataLoader, + dict, + ]: + """Setup patch inference by defining a patch-wise datalaoder and loading the model checkpoint + + Args: + test_folds (List[int], optional): Test fold to use. Otherwise defined folds from config.yaml (in run_dir) are loaded. Defaults to None. + + Returns: + tuple[Union[CellViT, CellViTShared, CellViT256, CellViT256Shared, CellViTSAM, CellViTSAMShared], DataLoader, dict]: + Union[CellViT, CellViTShared, CellViT256, CellViT256Shared, CellViTSAM, CellViTSAMShared]: Best model loaded form checkpoint + DataLoader: Inference DataLoader + dict: Dataset configuration. Keys are: + * "tissue_types": describing the present tissue types with corresponding integer + * "nuclei_types": describing the present nuclei types with corresponding integer + + """ + + # get dataset + if test_folds is None: + if "test_folds" in self.run_conf["data"]: + if self.run_conf["data"]["test_folds"] is None: + self.logger.info( + "There was no test set provided. We now use the validation dataset for testing" + ) + self.run_conf["data"]["test_folds"] = self.run_conf["data"][ + "val_folds" + ] + else: + self.logger.info( + "There was no test set provided. We now use the validation dataset for testing" + ) + self.run_conf["data"]["test_folds"] = self.run_conf["data"]["val_folds"] + else: + self.run_conf["data"]["test_folds"] = self.run_conf["data"]["val_folds"] + self.logger.info( + f"Performing Inference on test set: {self.run_conf['data']['test_folds']}" + ) + + + inference_dataset = select_dataset( + dataset_name=self.run_conf["data"]["dataset"], + split="test", + dataset_config=self.run_conf["data"], + transforms=transforms, + ) + + inference_dataloader = DataLoader( + inference_dataset, + batch_size=1, + num_workers=12, + pin_memory=False, + shuffle=False, + ) + + return inference_dataloader, self.dataset_config + + def run_patch_inference( + self, + model: CellViT, + inference_dataloader: DataLoader, + dataset_config: dict, + generate_plots: bool = False, + ) -> None: + """Run Patch inference with given setup + + Args: + model (Union[CellViT, CellViTShared, CellViT256, CellViT256Shared, CellViTSAM, CellViTSAMShared]): Model to use for inference + inference_dataloader (DataLoader): Inference Dataloader. Must return a batch with the following structure: + * Images (torch.Tensor) + * Masks (dict) + * Tissue types as str + * Image name as str + dataset_config (dict): Dataset configuration. Required keys are: + * "tissue_types": describing the present tissue types with corresponding integer + * "nuclei_types": describing the present nuclei types with corresponding integer + generate_plots (bool, optional): If inference plots should be generated. Defaults to False. + """ + # put model in eval mode + model.to(device=self.device) + model.eval() + + # setup score tracker + image_names = [] # image names as str + binary_dice_scores = [] # binary dice scores per image + binary_jaccard_scores = [] # binary jaccard scores per image + pq_scores = [] # pq-scores per image + dq_scores = [] # dq-scores per image + sq_scores = [] # sq-scores per image + cell_type_pq_scores = [] # pq-scores per cell type and image + cell_type_dq_scores = [] # dq-scores per cell type and image + cell_type_sq_scores = [] # sq-scores per cell type and image + tissue_pred = [] # tissue predictions for each image + tissue_gt = [] # ground truth tissue image class + tissue_types_inf = [] # string repr of ground truth tissue image class + + paired_all_global = [] # unique matched index pair + unpaired_true_all_global = ( + [] + ) # the index must exist in `true_inst_type_all` and unique + unpaired_pred_all_global = ( + [] + ) # the index must exist in `pred_inst_type_all` and unique + true_inst_type_all_global = [] # each index is 1 independent data point + pred_inst_type_all_global = [] # each index is 1 independent data point + + # for detections scores + true_idx_offset = 0 + pred_idx_offset = 0 + + inference_loop = tqdm.tqdm( + enumerate(inference_dataloader), total=len(inference_dataloader) + ) + + with torch.no_grad(): + for batch_idx, batch in inference_loop: + batch_metrics = self.inference_step( + model, batch, generate_plots=generate_plots + ) + # unpack batch_metrics + image_names = image_names + batch_metrics["image_names"] + + # dice scores + binary_dice_scores = ( + binary_dice_scores + batch_metrics["binary_dice_scores"] + ) + binary_jaccard_scores = ( + binary_jaccard_scores + batch_metrics["binary_jaccard_scores"] + ) + + # pq scores + pq_scores = pq_scores + batch_metrics["pq_scores"] + dq_scores = dq_scores + batch_metrics["dq_scores"] + sq_scores = sq_scores + batch_metrics["sq_scores"] + tissue_types_inf = tissue_types_inf + batch_metrics["tissue_types"] + cell_type_pq_scores = ( + cell_type_pq_scores + batch_metrics["cell_type_pq_scores"] + ) + cell_type_dq_scores = ( + cell_type_dq_scores + batch_metrics["cell_type_dq_scores"] + ) + cell_type_sq_scores = ( + cell_type_sq_scores + batch_metrics["cell_type_sq_scores"] + ) + tissue_pred.append(batch_metrics["tissue_pred"]) + tissue_gt.append(batch_metrics["tissue_gt"]) + + # detection scores + true_idx_offset = ( + true_idx_offset + true_inst_type_all_global[-1].shape[0] + if batch_idx != 0 + else 0 + ) + pred_idx_offset = ( + pred_idx_offset + pred_inst_type_all_global[-1].shape[0] + if batch_idx != 0 + else 0 + ) + true_inst_type_all_global.append(batch_metrics["true_inst_type_all"]) + pred_inst_type_all_global.append(batch_metrics["pred_inst_type_all"]) + # increment the pairing index statistic + batch_metrics["paired_all"][:, 0] += true_idx_offset + batch_metrics["paired_all"][:, 1] += pred_idx_offset + paired_all_global.append(batch_metrics["paired_all"]) + + batch_metrics["unpaired_true_all"] += true_idx_offset + batch_metrics["unpaired_pred_all"] += pred_idx_offset + unpaired_true_all_global.append(batch_metrics["unpaired_true_all"]) + unpaired_pred_all_global.append(batch_metrics["unpaired_pred_all"]) + + # assemble batches to datasets (global) + tissue_types_inf = [t.lower() for t in tissue_types_inf] + + paired_all = np.concatenate(paired_all_global, axis=0) + unpaired_true_all = np.concatenate(unpaired_true_all_global, axis=0) + unpaired_pred_all = np.concatenate(unpaired_pred_all_global, axis=0) + true_inst_type_all = np.concatenate(true_inst_type_all_global, axis=0) + pred_inst_type_all = np.concatenate(pred_inst_type_all_global, axis=0) + paired_true_type = true_inst_type_all[paired_all[:, 0]] + paired_pred_type = pred_inst_type_all[paired_all[:, 1]] + unpaired_true_type = true_inst_type_all[unpaired_true_all] + unpaired_pred_type = pred_inst_type_all[unpaired_pred_all] + + binary_dice_scores = np.array(binary_dice_scores) + binary_jaccard_scores = np.array(binary_jaccard_scores) + pq_scores = np.array(pq_scores) + dq_scores = np.array(dq_scores) + sq_scores = np.array(sq_scores) + + tissue_detection_accuracy = accuracy_score( + y_true=np.concatenate(tissue_gt), y_pred=np.concatenate(tissue_pred) + ) + f1_d, prec_d, rec_d = cell_detection_scores( + paired_true=paired_true_type, + paired_pred=paired_pred_type, + unpaired_true=unpaired_true_type, + unpaired_pred=unpaired_pred_type, + ) + dataset_metrics = { + "Binary-Cell-Dice-Mean": float(np.nanmean(binary_dice_scores)), + "Binary-Cell-Jacard-Mean": float(np.nanmean(binary_jaccard_scores)), + "Tissue-Multiclass-Accuracy": tissue_detection_accuracy, + "bPQ": float(np.nanmean(pq_scores)), + "bDQ": float(np.nanmean(dq_scores)), + "bSQ": float(np.nanmean(sq_scores)), + "mPQ": float(np.nanmean([np.nanmean(pq) for pq in cell_type_pq_scores])), + "mDQ": float(np.nanmean([np.nanmean(dq) for dq in cell_type_dq_scores])), + "mSQ": float(np.nanmean([np.nanmean(sq) for sq in cell_type_sq_scores])), + "f1_detection": float(f1_d), + "precision_detection": float(prec_d), + "recall_detection": float(rec_d), + } + + # calculate tissue metrics + tissue_types = dataset_config["tissue_types"] + tissue_metrics = {} + for tissue in tissue_types.keys(): + tissue = tissue.lower() + tissue_ids = np.where(np.asarray(tissue_types_inf) == tissue) + tissue_metrics[f"{tissue}"] = {} + tissue_metrics[f"{tissue}"]["Dice"] = float( + np.nanmean(binary_dice_scores[tissue_ids]) + ) + tissue_metrics[f"{tissue}"]["Jaccard"] = float( + np.nanmean(binary_jaccard_scores[tissue_ids]) + ) + tissue_metrics[f"{tissue}"]["mPQ"] = float( + np.nanmean( + [np.nanmean(pq) for pq in np.array(cell_type_pq_scores)[tissue_ids]] + ) + ) + tissue_metrics[f"{tissue}"]["bPQ"] = float( + np.nanmean(pq_scores[tissue_ids]) + ) + + # calculate nuclei metrics + nuclei_types = dataset_config["nuclei_types"] + nuclei_metrics_d = {} + nuclei_metrics_pq = {} + nuclei_metrics_dq = {} + nuclei_metrics_sq = {} + for nuc_name, nuc_type in nuclei_types.items(): + if nuc_name.lower() == "background": + continue + nuclei_metrics_pq[nuc_name] = np.nanmean( + [pq[nuc_type] for pq in cell_type_pq_scores] + ) + nuclei_metrics_dq[nuc_name] = np.nanmean( + [dq[nuc_type] for dq in cell_type_dq_scores] + ) + nuclei_metrics_sq[nuc_name] = np.nanmean( + [sq[nuc_type] for sq in cell_type_sq_scores] + ) + f1_cell, prec_cell, rec_cell = cell_type_detection_scores( + paired_true_type, + paired_pred_type, + unpaired_true_type, + unpaired_pred_type, + nuc_type, + ) + nuclei_metrics_d[nuc_name] = { + "f1_cell": f1_cell, + "prec_cell": prec_cell, + "rec_cell": rec_cell, + } + + # print final results + # binary + self.logger.info(f"{20*'*'} Binary Dataset metrics {20*'*'}") + [self.logger.info(f"{f'{k}:': <25} {v}") for k, v in dataset_metrics.items()] + # tissue -> the PQ values are bPQ values -> what about mBQ? + self.logger.info(f"{20*'*'} Tissue metrics {20*'*'}") + flattened_tissue = [] + for key in tissue_metrics: + flattened_tissue.append( + [ + key, + tissue_metrics[key]["Dice"], + tissue_metrics[key]["Jaccard"], + tissue_metrics[key]["mPQ"], + tissue_metrics[key]["bPQ"], + ] + ) + self.logger.info( + tabulate( + flattened_tissue, headers=["Tissue", "Dice", "Jaccard", "mPQ", "bPQ"] + ) + ) + # nuclei types + self.logger.info(f"{20*'*'} Nuclei Type Metrics {20*'*'}") + flattened_nuclei_type = [] + for key in nuclei_metrics_pq: + flattened_nuclei_type.append( + [ + key, + nuclei_metrics_dq[key], + nuclei_metrics_sq[key], + nuclei_metrics_pq[key], + ] + ) + self.logger.info( + tabulate(flattened_nuclei_type, headers=["Nuclei Type", "DQ", "SQ", "PQ"]) + ) + # nuclei detection metrics + self.logger.info(f"{20*'*'} Nuclei Detection Metrics {20*'*'}") + flattened_detection = [] + for key in nuclei_metrics_d: + flattened_detection.append( + [ + key, + nuclei_metrics_d[key]["prec_cell"], + nuclei_metrics_d[key]["rec_cell"], + nuclei_metrics_d[key]["f1_cell"], + ] + ) + self.logger.info( + tabulate( + flattened_detection, + headers=["Nuclei Type", "Precision", "Recall", "F1"], + ) + ) + + # save all folds + image_metrics = {} + for idx, image_name in enumerate(image_names): + image_metrics[image_name] = { + "Dice": float(binary_dice_scores[idx]), + "Jaccard": float(binary_jaccard_scores[idx]), + "bPQ": float(pq_scores[idx]), + } + all_metrics = { + "dataset": dataset_metrics, + "tissue_metrics": tissue_metrics, + "image_metrics": image_metrics, + "nuclei_metrics_pq": nuclei_metrics_pq, + "nuclei_metrics_d": nuclei_metrics_d, + } + + # saving + with open(str(self.run_dir / "inference_results.json"), "w") as outfile: + json.dump(all_metrics, outfile, indent=2) + + def inference_step( + self, + model: CellViT, + batch: tuple, + generate_plots: bool = False, + ) -> None: + """Inference step for a patch-wise batch + + Args: + model (CellViT): Model to use for inference + batch (tuple): Batch with the following structure: + * Images (torch.Tensor) + * Masks (dict) + * Tissue types as str + * Image name as str + generate_plots (bool, optional): If inference plots should be generated. Defaults to False. + """ + # unpack batch, for shape compare train_step method + imgs = batch[0].to(self.device) + masks = batch[1] + tissue_types = list(batch[2]) + image_names = list(batch[3]) + + model.zero_grad() + if self.mixed_precision: + with torch.autocast(device_type="cuda", dtype=torch.float16): + predictions = model.forward(imgs) + else: + predictions = model.forward(imgs) + predictions = self.unpack_predictions(predictions=predictions, model=model) + gt = self.unpack_masks(masks=masks, tissue_types=tissue_types, model=model) + + # scores + batch_metrics, scores = self.calculate_step_metric(predictions, gt, image_names) + batch_metrics["tissue_types"] = tissue_types + if generate_plots: + self.plot_results( + imgs=imgs, + predictions=predictions, + ground_truth=gt, + img_names=image_names, + num_nuclei_classes=self.num_classes, + outdir=Path(self.run_dir / "inference_predictions"), + scores=scores, + ) + + return batch_metrics + + def run_single_image_inference( self, model: CellViT, image: np.ndarray, generate_plots: bool = True, ) -> None: + + + + # set image transforms + transform_settings = self.run_conf["transformations"] + if "normalize" in transform_settings: + mean = transform_settings["normalize"].get("mean", (0.5, 0.5, 0.5)) + std = transform_settings["normalize"].get("std", (0.5, 0.5, 0.5)) + else: + mean = (0.5, 0.5, 0.5) + std = (0.5, 0.5, 0.5) + transforms = A.Compose([A.Normalize(mean=mean, std=std)]) + + transformed_img = transforms(image=image)["image"] + image = torch.from_numpy(transformed_img).permute(2, 0, 1).unsqueeze(0).float() + imgs = image.to(self.device) + + model.zero_grad() + predictions = model.forward(imgs) + predictions = self.unpack_predictions(predictions=predictions, model=model) + + + + image_output = self.plot_results( + imgs=imgs, + predictions=predictions, + num_nuclei_classes=self.num_classes, + outdir=Path(self.run_dir), + ) + + return image_output + + + + + def unpack_predictions( + self, predictions: dict, model: CellViT + ) -> DataclassHVStorage: + """Unpack the given predictions. Main focus lays on reshaping and postprocessing predictions, e.g. separating instances + + Args: + predictions (dict): Dictionary with the following keys: + * tissue_types: Logit tissue prediction output. Shape: (batch_size, num_tissue_classes) + * nuclei_binary_map: Logit output for binary nuclei prediction branch. Shape: (batch_size, H, W, 2) + * hv_map: Logit output for hv-prediction. Shape: (batch_size, H, W, 2) + * nuclei_type_map: Logit output for nuclei instance-prediction. Shape: (batch_size, num_nuclei_classes, H, W) + model (CellViT): Current model + + Returns: + DataclassHVStorage: Processed network output + + """ + predictions["tissue_types"] = predictions["tissue_types"].to(self.device) + predictions["nuclei_binary_map"] = F.softmax( + predictions["nuclei_binary_map"], dim=1 + ) # shape: (batch_size, 2, H, W) + predictions["nuclei_type_map"] = F.softmax( + predictions["nuclei_type_map"], dim=1 + ) # shape: (batch_size, num_nuclei_classes, H, W) + ( + predictions["instance_map"], + predictions["instance_types"], + ) = model.calculate_instance_map( + predictions, magnification=self.magnification + ) # shape: (batch_size, H', W') + predictions["instance_types_nuclei"] = model.generate_instance_nuclei_map( + predictions["instance_map"], predictions["instance_types"] + ).permute(0, 3, 1, 2).to( + self.device + ) # shape: (batch_size, num_nuclei_classes, H, W) change + predictions = DataclassHVStorage( + nuclei_binary_map=predictions["nuclei_binary_map"], #[64, 2, 256, 256] + hv_map=predictions["hv_map"], #[64, 2, 256, 256] + nuclei_type_map=predictions["nuclei_type_map"], #[64, 6, 256, 256] + tissue_types=predictions["tissue_types"], #[64,19] + instance_map=predictions["instance_map"], #[64, 256, 256] + instance_types=predictions["instance_types"], #list of 64 tensors, each tensor is [256,256] + instance_types_nuclei=predictions["instance_types_nuclei"], #[64,256,256,6] + batch_size=predictions["tissue_types"].shape[0],#64 + ) + + return predictions + + def unpack_masks( + self, masks: dict, tissue_types: list, model: CellViT + ) -> DataclassHVStorage: + # get ground truth values, perform one hot encoding for segmentation maps + gt_nuclei_binary_map_onehot = ( + F.one_hot(masks["nuclei_binary_map"], num_classes=2) + ).type( + torch.float32 + ) # background, nuclei #[64, 256,256,2] + nuclei_type_maps = torch.squeeze(masks["nuclei_type_map"]).type(torch.int64) #[64,256,256] + gt_nuclei_type_maps_onehot = F.one_hot( + nuclei_type_maps, num_classes=self.num_classes + ).type( + torch.float32 + ) # background + nuclei types [64, 256, 256, 6] + + # assemble ground truth dictionary + gt = { + "nuclei_type_map": gt_nuclei_type_maps_onehot.permute(0, 3, 1, 2).to( + self.device + ), # shape: (batch_size, H, W, num_nuclei_classes) #[64,256,256,6] ->[64,6,256,256] + "nuclei_binary_map": gt_nuclei_binary_map_onehot.permute(0, 3, 1, 2).to( + self.device + ), # shape: (batch_size, H, W, 2) #[64,256,256,2] ->[64,2,256,256] + "hv_map": masks["hv_map"].to(self.device), # shape: (batch_size, H, W, 2)原来的是错的 [64, 2, 256, 256] + "instance_map": masks["instance_map"].to( + self.device + ), # shape: (batch_size, H, W) -> each instance has one integer (64,256,256) + "instance_types_nuclei": ( + gt_nuclei_type_maps_onehot * masks["instance_map"][..., None] + ) + .permute(0, 3, 1, 2) + .to( + self.device + ), # shape: (batch_size, num_nuclei_classes, H, W) -> instance has one integer, for each nuclei class (64,256,256,6) + "tissue_types": torch.Tensor( + [self.dataset_config["tissue_types"][t] for t in tissue_types] + ) + .type(torch.LongTensor) + .to(self.device), # shape: batch_size 64 + } + gt["instance_types"] = calculate_instances( + gt["nuclei_type_map"], gt["instance_map"] + ) + gt = DataclassHVStorage(**gt, batch_size=gt["tissue_types"].shape[0]) + return gt + + def calculate_step_metric( + self, + predictions: DataclassHVStorage, + gt: DataclassHVStorage, + image_names: List[str], + ) -> Tuple[dict, list]: + """Calculate the metrics for the validation step + + Args: + predictions (DataclassHVStorage): Processed network output + gt (DataclassHVStorage): Ground truth values + image_names (list(str)): List with image names + + Returns: + Tuple[dict, list]: + * dict: Dictionary with metrics. Structure not fixed yet + * list with cell_dice, cell_jaccard and pq for each image + """ + predictions = predictions.get_dict() + gt = gt.get_dict() + + # preparation and device movement + predictions["tissue_types_classes"] = F.softmax( + predictions["tissue_types"], dim=-1 + ) + pred_tissue = ( + torch.argmax(predictions["tissue_types_classes"], dim=-1) + .detach() + .cpu() + .numpy() + .astype(np.uint8) + ) + predictions["instance_map"] = predictions["instance_map"].detach().cpu() + predictions["instance_types_nuclei"] = ( + predictions["instance_types_nuclei"].detach().cpu().numpy().astype("int32") + ) # shape: (batch_size, num_nuclei_classes, H, W) [64,256,256,6] + instance_maps_gt = gt["instance_map"].detach().cpu() #[64,256,256] + gt["tissue_types"] = gt["tissue_types"].detach().cpu().numpy().astype(np.uint8) + gt["nuclei_binary_map"] = torch.argmax(gt["nuclei_binary_map"], dim=1).type( + torch.uint8 + ) + gt["instance_types_nuclei"] = ( + gt["instance_types_nuclei"].detach().cpu().numpy().astype("int32") + ) # shape: (batch_size, num_nuclei_classes, H, W) [64,6,256,256] ################################与前面的predictions的形状不同 + + # segmentation scores + binary_dice_scores = [] # binary dice scores per image + binary_jaccard_scores = [] # binary jaccard scores per image + pq_scores = [] # pq-scores per image + dq_scores = [] # dq-scores per image + sq_scores = [] # sq_scores per image + cell_type_pq_scores = [] # pq-scores per cell type and image + cell_type_dq_scores = [] # dq-scores per cell type and image + cell_type_sq_scores = [] # sq-scores per cell type and image + scores = [] # all scores in one list + + # detection scores + paired_all = [] # unique matched index pair + unpaired_true_all = ( + [] + ) # the index must exist in `true_inst_type_all` and unique + unpaired_pred_all = ( + [] + ) # the index must exist in `pred_inst_type_all` and unique + true_inst_type_all = [] # each index is 1 independent data point + pred_inst_type_all = [] # each index is 1 independent data point + + # for detections scores + true_idx_offset = 0 + pred_idx_offset = 0 + + for i in range(len(pred_tissue)): + # binary dice score: Score for cell detection per image, without background + pred_binary_map = torch.argmax(predictions["nuclei_binary_map"][i], dim=0) + target_binary_map = gt["nuclei_binary_map"][i] + cell_dice = ( + dice(preds=pred_binary_map, target=target_binary_map, ignore_index=0) + .detach() + .cpu() + ) + binary_dice_scores.append(float(cell_dice)) + + # binary aji + cell_jaccard = ( + binary_jaccard_index( + preds=pred_binary_map, + target=target_binary_map, + ) + .detach() + .cpu() + ) + binary_jaccard_scores.append(float(cell_jaccard)) + + # pq values + if len(np.unique(instance_maps_gt[i])) == 1: + dq, sq, pq = np.nan, np.nan, np.nan + else: + remapped_instance_pred = binarize( + predictions["instance_types_nuclei"][i][1:].transpose(1, 2, 0) + ) #(256,6) + remapped_gt = remap_label(instance_maps_gt[i]) #(256,256) + # remapped_instance_pred = binarize(predictions["instance_types_nuclei"][i].transpose(2,1,0)[1:]) #[64,256,256,6] + + [dq, sq, pq], _ = get_fast_pq( + true=remapped_gt, pred=remapped_instance_pred + ) #(256,256) (256,256) true是instance map,在这里true的形状应该是真实的实例图,pred是预测的实例图,形状应该相等,都为(256,256) + pq_scores.append(pq) + dq_scores.append(dq) + sq_scores.append(sq) + scores.append( + [ + cell_dice.detach().cpu().numpy(), + cell_jaccard.detach().cpu().numpy(), + pq, + ] + ) + + # pq values per class (with class 0 beeing background -> should be skipped in the future) + nuclei_type_pq = [] + nuclei_type_dq = [] + nuclei_type_sq = [] + for j in range(0, self.num_classes): + pred_nuclei_instance_class = remap_label( + predictions["instance_types_nuclei"][i][j, ...] + ) + target_nuclei_instance_class = remap_label( + gt["instance_types_nuclei"][i][j, ...] + ) + + # if ground truth is empty, skip from calculation + if len(np.unique(target_nuclei_instance_class)) == 1: + pq_tmp = np.nan + dq_tmp = np.nan + sq_tmp = np.nan + else: + [dq_tmp, sq_tmp, pq_tmp], _ = get_fast_pq( + pred_nuclei_instance_class, + target_nuclei_instance_class, + match_iou=0.5, + ) + nuclei_type_pq.append(pq_tmp) + nuclei_type_dq.append(dq_tmp) + nuclei_type_sq.append(sq_tmp) + + # detection scores + true_centroids = np.array( + [v["centroid"] for k, v in gt["instance_types"][i].items()] + ) + true_instance_type = np.array( + [v["type"] for k, v in gt["instance_types"][i].items()] + ) + pred_centroids = np.array( + [v["centroid"] for k, v in predictions["instance_types"][i].items()] + ) + pred_instance_type = np.array( + [v["type"] for k, v in predictions["instance_types"][i].items()] + ) + + if true_centroids.shape[0] == 0: + true_centroids = np.array([[0, 0]]) + true_instance_type = np.array([0]) + if pred_centroids.shape[0] == 0: + pred_centroids = np.array([[0, 0]]) + pred_instance_type = np.array([0]) + if self.magnification == 40: + pairing_radius = 12 + else: + pairing_radius = 6 + paired, unpaired_true, unpaired_pred = pair_coordinates( + true_centroids, pred_centroids, pairing_radius + ) + true_idx_offset = ( + true_idx_offset + true_inst_type_all[-1].shape[0] if i != 0 else 0 + ) + pred_idx_offset = ( + pred_idx_offset + pred_inst_type_all[-1].shape[0] if i != 0 else 0 + ) + true_inst_type_all.append(true_instance_type) + pred_inst_type_all.append(pred_instance_type) + + # increment the pairing index statistic + if paired.shape[0] != 0: # ! sanity + paired[:, 0] += true_idx_offset + paired[:, 1] += pred_idx_offset + paired_all.append(paired) + + unpaired_true += true_idx_offset + unpaired_pred += pred_idx_offset + unpaired_true_all.append(unpaired_true) + unpaired_pred_all.append(unpaired_pred) + + cell_type_pq_scores.append(nuclei_type_pq) + cell_type_dq_scores.append(nuclei_type_dq) + cell_type_sq_scores.append(nuclei_type_sq) + + paired_all = np.concatenate(paired_all, axis=0) + unpaired_true_all = np.concatenate(unpaired_true_all, axis=0) + unpaired_pred_all = np.concatenate(unpaired_pred_all, axis=0) + true_inst_type_all = np.concatenate(true_inst_type_all, axis=0) + pred_inst_type_all = np.concatenate(pred_inst_type_all, axis=0) + + batch_metrics = { + "image_names": image_names, + "binary_dice_scores": binary_dice_scores, + "binary_jaccard_scores": binary_jaccard_scores, + "pq_scores": pq_scores, + "dq_scores": dq_scores, + "sq_scores": sq_scores, + "cell_type_pq_scores": cell_type_pq_scores, + "cell_type_dq_scores": cell_type_dq_scores, + "cell_type_sq_scores": cell_type_sq_scores, + "tissue_pred": pred_tissue, + "tissue_gt": gt["tissue_types"], + "paired_all": paired_all, + "unpaired_true_all": unpaired_true_all, + "unpaired_pred_all": unpaired_pred_all, + "true_inst_type_all": true_inst_type_all, + "pred_inst_type_all": pred_inst_type_all, + } + + return batch_metrics, scores + + def plot_results( + self, + imgs: Union[torch.Tensor, np.ndarray], + predictions: dict, + num_nuclei_classes: int, + outdir: Union[Path, str], + ) -> None: + # TODO: Adapt Docstring and function, currently not working with our shape + """Generate example plot with image, binary_pred, hv-map and instance map from prediction and ground-truth + + Args: + imgs (Union[torch.Tensor, np.ndarray]): Images to process, a random number (num_images) is selected from this stack + Shape: (batch_size, 3, H', W') + predictions (dict): Predictions of models. Keys: + "nuclei_type_map": Shape: (batch_size, H', W', num_nuclei) + "nuclei_binary_map": Shape: (batch_size, H', W', 2) + "hv_map": Shape: (batch_size, H', W', 2) + "instance_map": Shape: (batch_size, H', W') + ground_truth (dict): Ground truth values. Keys: + "nuclei_type_map": Shape: (batch_size, H', W', num_nuclei) + "nuclei_binary_map": Shape: (batch_size, H', W', 2) + "hv_map": Shape: (batch_size, H', W', 2) + "instance_map": Shape: (batch_size, H', W') + img_names (List): Names of images as list + num_nuclei_classes (int): Number of total nuclei classes including background + outdir (Union[Path, str]): Output directory where images should be stored + scores (List[List[float]], optional): List with scores for each image. + Each list entry is a list with 3 scores: Dice, Jaccard and bPQ for the image. + Defaults to None. + """ + outdir = Path(outdir) + outdir.mkdir(exist_ok=True, parents=True) + + # permute for gt and predictions + predictions.hv_map = predictions.hv_map.permute(0, 2, 3, 1) + predictions.nuclei_binary_map = predictions.nuclei_binary_map.permute(0, 2, 3, 1) + predictions.nuclei_type_map = predictions.nuclei_type_map.permute(0, 2, 3, 1) + + h = predictions.hv_map.shape[1] + w = predictions.hv_map.shape[2] + + # convert to rgb and crop to selection + sample_images = ( + imgs.permute(0, 2, 3, 1).contiguous().cpu().numpy() + ) # convert to rgb + sample_images = cropping_center(sample_images, (h, w), True) + + pred_sample_binary_map = ( + predictions.nuclei_binary_map[:, :, :, 1].detach().cpu().numpy() + ) + pred_sample_hv_map = predictions.hv_map.detach().cpu().numpy() + pred_sample_instance_maps = predictions.instance_map.detach().cpu().numpy() + pred_sample_type_maps = ( + torch.argmax(predictions.nuclei_type_map, dim=-1).detach().cpu().numpy() + ) + + # create colormaps + hv_cmap = plt.get_cmap("jet") + binary_cmap = plt.get_cmap("jet") + instance_map = plt.get_cmap("viridis") + cell_colors = ["#ffffff", "#ff0000", "#00ff00", "#1e00ff", "#feff00", "#ffbf00"] + + # invert the normalization of the sample images + transform_settings = self.run_conf["transformations"] + if "normalize" in transform_settings: + mean = transform_settings["normalize"].get("mean", (0.5, 0.5, 0.5)) + std = transform_settings["normalize"].get("std", (0.5, 0.5, 0.5)) + else: + mean = (0.5, 0.5, 0.5) + std = (0.5, 0.5, 0.5) + inv_normalize = transforms.Normalize( + mean=[-0.5 / mean[0], -0.5 / mean[1], -0.5 / mean[2]], + std=[1 / std[0], 1 / std[1], 1 / std[2]], + ) + inv_samples = inv_normalize(torch.tensor(sample_images).permute(0, 3, 1, 2)) + sample_images = inv_samples.permute(0, 2, 3, 1).detach().cpu().numpy() + + for i in range(len(imgs)): + fig, axs = plt.subplots(figsize=(6, 2), dpi=300) + placeholder = np.zeros((h, 7 * w, 3)) + # orig image + placeholder[:h, :w, :3] = sample_images[i] + # binary prediction + placeholder[: h, w : 2 * w, :3] = rgba2rgb( + binary_cmap(pred_sample_binary_map[i]) + ) # *255? + # hv maps + placeholder[: h, 2 * w : 3 * w, :3] = rgba2rgb( + hv_cmap((pred_sample_hv_map[i, :, :, 0] + 1) / 2) + ) + placeholder[: h, 3 * w : 4 * w, :3] = rgba2rgb( + hv_cmap((pred_sample_hv_map[i, :, :, 1] + 1) / 2) + ) + # instance_predictions + placeholder[: h, 4 * w : 5 * w, :3] = rgba2rgb( + instance_map( + ( + pred_sample_instance_maps[i] + - np.min(pred_sample_instance_maps[i]) + ) + / ( + np.max(pred_sample_instance_maps[i]) + - np.min(pred_sample_instance_maps[i] + 1e-10) + ) + ) + ) + # type_predictions + placeholder[: h, 5 * w : 6 * w, :3] = rgba2rgb( + binary_cmap(pred_sample_type_maps[i] / num_nuclei_classes) + ) + + # contours + # pred + pred_contours_polygon = [ + v["contour"] for v in predictions.instance_types[i].values() + ] + pred_contours_polygon = [ + list(zip(poly[:, 0], poly[:, 1])) for poly in pred_contours_polygon + ] + pred_contour_colors_polygon = [ + cell_colors[v["type"]] + for v in predictions.instance_types[i].values() + ] + pred_cell_image = Image.fromarray( + (sample_images[i] * 255).astype(np.uint8) + ).convert("RGB") + pred_drawing = ImageDraw.Draw(pred_cell_image) + add_patch = lambda poly, color: pred_drawing.polygon( + poly, outline=color, width=2 + ) + [ + add_patch(poly, c) + for poly, c in zip(pred_contours_polygon, pred_contour_colors_polygon) + ] + pred_cell_image.save("raw_pred.png") + placeholder[: h, 6 * w : 7 * w, :3] = ( + np.asarray(pred_cell_image) / 255 + ) + + # plotting + axs.imshow(placeholder) + axs.set_xticks(np.arange(w / 2, 7 * w, w)) + axs.set_xticklabels( + [ + "Image", + "Binary-Cells", + "HV-Map-0", + "HV-Map-1", + "Instances", + "Nuclei-Pred", + "Countours", + ], + fontsize=6, + ) + axs.xaxis.tick_top() + + axs.set_yticks([ h /2 ]) + axs.set_yticklabels(["Pred."], fontsize=6) + axs.tick_params(axis="both", which="both", length=0) + grid_x = np.arange(w, 6 * w, w) + grid_y = np.arange(h, 2 * h, h) + + for x_seg in grid_x: + axs.axvline(x_seg, color="black") + for y_seg in grid_y: + axs.axhline(y_seg, color="black") + + fig.suptitle(f"Predictions for input image") + fig.tight_layout() + fig.savefig("pred_img.png") + plt.close() + + +# CLI +class InferenceCellViTParser: + def __init__(self) -> None: + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + description="Perform CellViT inference for given run-directory with model checkpoints and logs", + ) + + parser.add_argument( + "--run_dir", + type=str, + help="Logging directory of a training run.", + default="./", + ) + parser.add_argument( + "--checkpoint_name", + type=str, + help="Name of the checkpoint. Either select 'best_checkpoint.pth'," + "'latest_checkpoint.pth' or one of the intermediate checkpoint names," + "e.g., 'checkpoint_100.pth'", + default="model_best.pth", + ) + parser.add_argument( + "--gpu", type=int, help="Cuda-GPU ID for inference", default=0 + ) + parser.add_argument( + "--magnification", + type=int, + help="Dataset Magnification. Either 20 or 40. Default: 40", + choices=[20, 40], + default=40, + ) + parser.add_argument( + "--plots", + action="store_true", + help="Generate inference plots in run_dir", + default=True, + ) + + self.parser = parser + + def parse_arguments(self) -> dict: + opt = self.parser.parse_args() + return vars(opt) + + +if __name__ == "__main__": + configuration_parser = InferenceCellViTParser() + configuration = configuration_parser.parse_arguments() + print(configuration) + inf = InferenceCellViT( + run_dir=configuration["run_dir"], + checkpoint_name=configuration["checkpoint_name"], + gpu=configuration["gpu"], + magnification=configuration["magnification"], + ) + model, dataloader, conf = inf.setup_patch_inference() + + inf.run_patch_inference( + model, dataloader, conf, generate_plots=configuration["plots"] + ) diff --git a/cell_segmentation/run_cellvit.py b/cell_segmentation/run_cellvit.py new file mode 100644 index 0000000000000000000000000000000000000000..6fcb2e08a98f39236360b3421fb31dcd6eb437e1 --- /dev/null +++ b/cell_segmentation/run_cellvit.py @@ -0,0 +1,103 @@ +# -*- coding: utf-8 -*- +# Running an Experiment Using CellViT cell segmentation network +# +# @ Fabian Hörst, fabian.hoerst@uk-essen.de +# Institute for Artifical Intelligence in Medicine, +# University Medicine Essen + +import inspect +import os +import sys + +currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) +parentdir = os.path.dirname(currentdir) +sys.path.insert(0, parentdir) + +import wandb + +from base_ml.base_cli import ExperimentBaseParser +from cell_segmentation.experiments.experiment_cellvit_pannuke import ( + ExperimentCellVitPanNuke, +) +from cell_segmentation.experiments.experiment_cellvit_conic import ( + ExperimentCellViTCoNic, +) + +from cell_segmentation.inference.inference_cellvit_experiment_pannuke import ( + InferenceCellViT, +) + +if __name__ == "__main__": + # Parse arguments + configuration_parser = ExperimentBaseParser() + configuration = configuration_parser.parse_arguments() + + if configuration["data"]["dataset"].lower() == "pannuke": + experiment_class = ExperimentCellVitPanNuke + elif configuration["data"]["dataset"].lower() == "conic": + experiment_class = ExperimentCellViTCoNic + # Setup experiment + if "checkpoint" in configuration: + # continue checkpoint + experiment = experiment_class( + default_conf=configuration, checkpoint=configuration["checkpoint"] + ) + outdir = experiment.run_experiment() + inference = InferenceCellViT( + run_dir=outdir, + gpu=configuration["gpu"], + checkpoint_name=configuration["eval_checkpoint"], + magnification=configuration["data"].get("magnification", 40), + ) + ( + trained_model, + inference_dataloader, + dataset_config, + ) = inference.setup_patch_inference() + inference.run_patch_inference( + trained_model, inference_dataloader, dataset_config, generate_plots=False + ) + else: + experiment = experiment_class(default_conf=configuration) + if configuration["run_sweep"] is True: + # run new sweep + sweep_configuration = experiment_class.extract_sweep_arguments( + configuration + ) + os.environ["WANDB_DIR"] = os.path.abspath( + configuration["logging"]["wandb_dir"] + ) + sweep_id = wandb.sweep( + sweep=sweep_configuration, project=configuration["logging"]["project"] + ) + wandb.agent(sweep_id=sweep_id, function=experiment.run_experiment) + elif "agent" in configuration and configuration["agent"] is not None: + # add agent to already existing sweep, not run sweep must be set to true + configuration["run_sweep"] = True + os.environ["WANDB_DIR"] = os.path.abspath( + configuration["logging"]["wandb_dir"] + ) + wandb.agent( + sweep_id=configuration["agent"], function=experiment.run_experiment + ) + else: + # casual run + outdir = experiment.run_experiment() + inference = InferenceCellViT( + run_dir=outdir, + gpu=configuration["gpu"], + checkpoint_name=configuration["eval_checkpoint"], + magnification=configuration["data"].get("magnification", 40), + ) + ( + trained_model, + inference_dataloader, + dataset_config, + ) = inference.setup_patch_inference() + inference.run_patch_inference( + trained_model, + inference_dataloader, + dataset_config, + generate_plots=False, + ) + wandb.finish() diff --git a/cell_segmentation/trainer/__init__.py b/cell_segmentation/trainer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..13abf5423440a2da8cd0433d1d0afa37bc1874fe --- /dev/null +++ b/cell_segmentation/trainer/__init__.py @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +# Trainer for each network type +# +# @ Fabian Hörst, fabian.hoerst@uk-essen.de +# Institute for Artifical Intelligence in Medicine, +# University Medicine Essen diff --git a/cell_segmentation/trainer/trainer_cellvit.py b/cell_segmentation/trainer/trainer_cellvit.py new file mode 100644 index 0000000000000000000000000000000000000000..bb9ce06a43211f05ebebd6e5ca330b2fec3e66ab --- /dev/null +++ b/cell_segmentation/trainer/trainer_cellvit.py @@ -0,0 +1,1092 @@ +# -*- coding: utf-8 -*- +# CellViT Trainer Class +# +# @ Fabian Hörst, fabian.hoerst@uk-essen.de +# Institute for Artifical Intelligence in Medicine, +# University Medicine Essen + +import logging +from pathlib import Path +from typing import Tuple, Union + +import numpy as np +import torch +import torch.nn.functional as F +import tqdm +import math +import csv + +# import wandb +from matplotlib import pyplot as plt +from skimage.color import rgba2rgb +from sklearn.metrics import accuracy_score +from torch.optim import Optimizer +from torch.optim.lr_scheduler import _LRScheduler +from torch.utils.data import DataLoader +from torchmetrics.functional import dice +from torchmetrics.functional.classification import binary_jaccard_index + +from base_ml.base_early_stopping import EarlyStopping +from base_ml.base_trainer import BaseTrainer +from models.segmentation.cell_segmentation.cellvit import DataclassHVStorage +from cell_segmentation.utils.metrics import get_fast_pq, remap_label +from cell_segmentation.utils.tools import cropping_center +from models.segmentation.cell_segmentation.cellvit import CellViT +from utils.tools import AverageMeter +from timm.utils import ModelEma +from torch.cuda.amp import GradScaler, autocast + +class CellViTTrainer(BaseTrainer): + """CellViT trainer class + + Args: + model (CellViT): CellViT model that should be trained + loss_fn_dict (dict): Dictionary with loss functions for each branch with a dictionary of loss functions. + Name of branch as top-level key, followed by a dictionary with loss name, loss fn and weighting factor + Example: + { + "nuclei_binary_map": {"bce": {loss_fn(Callable), weight_factor(float)}, "dice": {loss_fn(Callable), weight_factor(float)}}, + "hv_map": {"bce": {loss_fn(Callable), weight_factor(float)}, "dice": {loss_fn(Callable), weight_factor(float)}}, + "nuclei_type_map": {"bce": {loss_fn(Callable), weight_factor(float)}, "dice": {loss_fn(Callable), weight_factor(float)}} + "tissue_types": {"ce": {loss_fn(Callable), weight_factor(float)}} + } + Required Keys are: + * nuclei_binary_map + * hv_map + * nuclei_type_map + * tissue types + optimizer (Optimizer): Optimizer + scheduler (_LRScheduler): Learning rate scheduler + device (str): Cuda device to use, e.g., cuda:0. + logger (logging.Logger): Logger module + logdir (Union[Path, str]): Logging directory + num_classes (int): Number of nuclei classes + dataset_config (dict): Dataset configuration. Required Keys are: + * "tissue_types": describing the present tissue types with corresponding integer + * "nuclei_types": describing the present nuclei types with corresponding integer + experiment_config (dict): Configuration of this experiment + early_stopping (EarlyStopping, optional): Early Stopping Class. Defaults to None. + log_images (bool, optional): If images should be logged to WandB. Defaults to False. + magnification (int, optional): Image magnification. Please select either 40 or 20. Defaults to 40. + mixed_precision (bool, optional): If mixed-precision should be used. Defaults to False. + """ + + def __init__( + self, + model: CellViT, + loss_fn_dict: dict, + optimizer: Optimizer, + scheduler: _LRScheduler, + device: str, + logger: logging.Logger, + logdir: Union[Path, str], + num_classes: int, + dataset_config: dict, + experiment_config: dict, + early_stopping: EarlyStopping = None, + log_images: bool = False, + magnification: int = 40, + mixed_precision: bool = False, + #model_ema : bool = True, + ): + super().__init__( + model=model, + loss_fn=None, + optimizer=optimizer, + scheduler=scheduler, + device=device, + logger=logger, + logdir=logdir, + experiment_config=experiment_config, + early_stopping=early_stopping, + accum_iter=1, + log_images=log_images, + mixed_precision=mixed_precision, + + ) + self.loss_fn_dict = loss_fn_dict + self.num_classes = num_classes + self.dataset_config = dataset_config + self.tissue_types = dataset_config["tissue_types"] + self.reverse_tissue_types = {v: k for k, v in self.tissue_types.items()} + self.nuclei_types = dataset_config["nuclei_types"] + self.magnification = magnification + #self.model_ema = model_ema + + # setup logging objects + self.loss_avg_tracker = {"Total_Loss": AverageMeter("Total_Loss", ":.4f")} + for branch, loss_fns in self.loss_fn_dict.items(): + for loss_name in loss_fns: + self.loss_avg_tracker[f"{branch}_{loss_name}"] = AverageMeter( + f"{branch}_{loss_name}", ":.4f" + ) + + self.batch_avg_tissue_acc = AverageMeter("Batch_avg_tissue_ACC", ":4.f") + + def train_epoch( + self, epoch: int, train_dataloader: DataLoader, unfreeze_epoch: int = 50 + ) -> Tuple[dict, dict]: + """Training logic for a training epoch + + Args: + epoch (int): Current epoch number + train_dataloader (DataLoader): Train dataloader + unfreeze_epoch (int, optional): Epoch to unfreeze layers + Returns: + Tuple[dict, dict]: wandb logging dictionaries + * Scalar metrics + * Image metrics + """ + self.model.train() + if epoch >= unfreeze_epoch: + self.model.unfreeze_encoder() + + + # if self.model_ema and epoch == 0: + # self.model_ema_instance = ModelEma( + # model=self.model, + # decay=0.9999, + # device='cuda', + # resume='' + # ) + + binary_dice_scores = [] + binary_jaccard_scores = [] + tissue_pred = [] + tissue_gt = [] + train_example_img = None + + # reset metrics + self.loss_avg_tracker["Total_Loss"].reset() + for branch, loss_fns in self.loss_fn_dict.items(): + for loss_name in loss_fns: + self.loss_avg_tracker[f"{branch}_{loss_name}"].reset() + self.batch_avg_tissue_acc.reset() + + # randomly select a batch that should be displayed + if self.log_images: + select_example_image = int(torch.randint(0, len(train_dataloader), (1,))) + else: + select_example_image = None + train_loop = tqdm.tqdm(enumerate(train_dataloader), total=len(train_dataloader)) + + for batch_idx, batch in train_loop: + return_example_images = batch_idx == select_example_image + batch_metrics, example_img = self.train_step( + batch, + batch_idx, + len(train_dataloader), + return_example_images=return_example_images, + ) + if example_img is not None: + train_example_img = example_img + binary_dice_scores = ( + binary_dice_scores + batch_metrics["binary_dice_scores"] + ) + binary_jaccard_scores = ( + binary_jaccard_scores + batch_metrics["binary_jaccard_scores"] + ) + tissue_pred.append(batch_metrics["tissue_pred"]) + tissue_gt.append(batch_metrics["tissue_gt"]) + train_loop.set_postfix( + { + "Loss": np.round(self.loss_avg_tracker["Total_Loss"].avg, 3), + "Dice": np.round(np.nanmean(binary_dice_scores), 3), + "Pred-Acc": np.round(self.batch_avg_tissue_acc.avg, 3), + } + ) + + # calculate global metrics + binary_dice_scores = np.array(binary_dice_scores) + binary_jaccard_scores = np.array(binary_jaccard_scores) + tissue_detection_accuracy = accuracy_score( + y_true=np.concatenate(tissue_gt), y_pred=np.concatenate(tissue_pred) + ) + + scalar_metrics = { + "Loss/Train": self.loss_avg_tracker["Total_Loss"].avg, + "Binary-Cell-Dice-Mean/Train": np.nanmean(binary_dice_scores), + "Binary-Cell-Jacard-Mean/Train": np.nanmean(binary_jaccard_scores), + "Tissue-Multiclass-Accuracy/Train": tissue_detection_accuracy, + } + + for branch, loss_fns in self.loss_fn_dict.items(): + for loss_name in loss_fns: + scalar_metrics[f"{branch}_{loss_name}/Train"] = self.loss_avg_tracker[ + f"{branch}_{loss_name}" + ].avg + + + self.logger.info( + f"{'Training epoch stats:' : <25} " + f"Loss: {self.loss_avg_tracker['Total_Loss'].avg:.4f} - " + f"Binary-Cell-Dice: {np.nanmean(binary_dice_scores):.4f} - " + f"Binary-Cell-Jacard: {np.nanmean(binary_jaccard_scores):.4f} - " + f"Tissue-MC-Acc.: {tissue_detection_accuracy:.4f}" + ) + + image_metrics = {"Example-Predictions/Train": train_example_img} + + return scalar_metrics, image_metrics + + def train_step( + self, + batch: object, + batch_idx: int, + num_batches: int, + return_example_images: bool, + ) -> Tuple[dict, Union[plt.Figure, None]]: + """Training step + + Args: + batch (object): Training batch, consisting of images ([0]), masks ([1]), tissue_types ([2]) and figure filenames ([3]) + batch_idx (int): Batch index + num_batches (int): Total number of batches in epoch + return_example_images (bool): If an example preciction image should be returned + + Returns: + Tuple[dict, Union[plt.Figure, None]]: + * Batch-Metrics: dictionary with the following keys: + * Example prediction image + """ + # unpack batch + imgs = batch[0].to(self.device) # imgs shape: (batch_size, 3, H, W) (16,3,256,256) + masks = batch[ + 1 + ] # dict: keys: "instance_map", [16,256,256],"nuclei_map",[16,256,256], "nuclei_binary_map",[16,256,256], "hv_map"[16,2,256,256] + tissue_types = batch[2] # list[str] + #change + #scaler = GradScaler(init_scale=2.0) + + if self.mixed_precision: + with torch.autocast(device_type="cuda", dtype=torch.float16): + #with torch.cuda.amp.autocast(False): + # make predictions + predictions_ = self.model.forward(imgs) #img.shape=(16,3,256,256) model.forward(imgs) 'tissue_types'(16,19),'nuclei_binary_map'(16,2,128,128),'hv_map'(16,2,128,128),'nuclei_type_map'(16,6,128,128) + + # reshaping and postprocessing + predictions = self.unpack_predictions(predictions=predictions_) + gt = self.unpack_masks(masks=masks, tissue_types=tissue_types) + + # calculate loss + total_loss = self.calculate_loss(predictions, gt) + # if torch.isnan(total_loss): + # print("nan in loss") + #if math.isnan(total_loss.item()): + #print("nan") + # import pdb; pdb.set_trace() + + # backward pass + self.scaler.scale(total_loss).backward() + # 阈值剪切梯度 + #torch.nn.utils.clip_grad_value_(self.model.parameters(), clip_value=1.0) + # if torch.any(torch.tensor([torch.any(torch.isnan(param.data)) for param in self.model.parameters()])): + # print("nan in model parameters") + if ( + ((batch_idx + 1) % self.accum_iter == 0) + or ((batch_idx + 1) == num_batches) + or (self.accum_iter == 1) + ): + # self.scaler.unscale_(self.optimizer) + # torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=1.0) + self.scaler.step(self.optimizer) + self.scaler.update() + # if self.model_ema: + # self.model_ema_instance.update(self.model) + self.optimizer.zero_grad(set_to_none=True) + self.model.zero_grad() + else: + predictions_ = self.model.forward(imgs) + predictions = self.unpack_predictions(predictions=predictions_) + gt = self.unpack_masks(masks=masks, tissue_types=tissue_types) + + # calculate loss + total_loss = self.calculate_loss(predictions, gt) + + total_loss.backward() + if ( + ((batch_idx + 1) % self.accum_iter == 0) + or ((batch_idx + 1) == num_batches) + or (self.accum_iter == 1) + ): + self.optimizer.step() + # if self.model_ema: + # self.model_ema_instance.update(self.model) + self.optimizer.zero_grad(set_to_none=True) + self.model.zero_grad() + with torch.cuda.device(self.device): + torch.cuda.empty_cache() + + batch_metrics = self.calculate_step_metric_train(predictions, gt) + + if return_example_images: + return_example_images = self.generate_example_image( + imgs, predictions, gt, num_images=4, num_nuclei_classes=self.num_classes + ) + else: + return_example_images = None + + return batch_metrics, return_example_images + + def validation_epoch( + self, epoch: int, val_dataloader: DataLoader + ) -> Tuple[dict, dict, float]: + """Validation logic for a validation epoch + + Args: + epoch (int): Current epoch number + val_dataloader (DataLoader): Validation dataloader + + Returns: + Tuple[dict, dict, float]: wandb logging dictionaries + * Scalar metrics + * Image metrics + * Early stopping metric + """ + self.model.eval() + + binary_dice_scores = [] + binary_jaccard_scores = [] + pq_scores = [] + cell_type_pq_scores = [] + tissue_pred = [] + tissue_gt = [] + val_example_img = None + + + # reset metrics + self.loss_avg_tracker["Total_Loss"].reset() + for branch, loss_fns in self.loss_fn_dict.items(): + for loss_name in loss_fns: + self.loss_avg_tracker[f"{branch}_{loss_name}"].reset() + self.batch_avg_tissue_acc.reset() + + # randomly select a batch that should be displayed + if self.log_images: + select_example_image = int(torch.randint(0, len(val_dataloader), (1,))) + else: + select_example_image = None + + val_loop = tqdm.tqdm(enumerate(val_dataloader), total=len(val_dataloader)) + + + with torch.no_grad(): + for batch_idx, batch in val_loop: + return_example_images = batch_idx == select_example_image + batch_metrics, example_img= self.validation_step( + batch, batch_idx, return_example_images + ) + + # 检查总体损失是否为NaN + # if np.isnan(self.loss_avg_tracker["Total_Loss"].avg): + # print("NaN loss for image:", batch_idx) + + + if example_img is not None: + val_example_img = example_img + binary_dice_scores = ( + binary_dice_scores + batch_metrics["binary_dice_scores"] + ) + binary_jaccard_scores = ( + binary_jaccard_scores + batch_metrics["binary_jaccard_scores"] + ) + pq_scores = pq_scores + batch_metrics["pq_scores"] + cell_type_pq_scores = ( + cell_type_pq_scores + batch_metrics["cell_type_pq_scores"] + ) + tissue_pred.append(batch_metrics["tissue_pred"]) + tissue_gt.append(batch_metrics["tissue_gt"]) + val_loop.set_postfix( + { + "Loss": np.round(self.loss_avg_tracker["Total_Loss"].avg, 3), + "Dice": np.round(np.nanmean(binary_dice_scores), 3), + "Pred-Acc": np.round(self.batch_avg_tissue_acc.avg, 3), + } + ) + tissue_types_val = [ + self.reverse_tissue_types[t].lower() for t in np.concatenate(tissue_gt) + ] + + # calculate global metrics + binary_dice_scores = np.array(binary_dice_scores) + binary_jaccard_scores = np.array(binary_jaccard_scores) + pq_scores = np.array(pq_scores) + tissue_detection_accuracy = accuracy_score( + y_true=np.concatenate(tissue_gt), y_pred=np.concatenate(tissue_pred) + ) + + scalar_metrics = { + "Loss/Validation": self.loss_avg_tracker["Total_Loss"].avg, + "Binary-Cell-Dice-Mean/Validation": np.nanmean(binary_dice_scores), + "Binary-Cell-Jacard-Mean/Validation": np.nanmean(binary_jaccard_scores), + "Tissue-Multiclass-Accuracy/Validation": tissue_detection_accuracy, + "bPQ/Validation": np.nanmean(pq_scores), + "mPQ/Validation": np.nanmean( + [np.nanmean(pq) for pq in cell_type_pq_scores] + ), + } + + for branch, loss_fns in self.loss_fn_dict.items(): + for loss_name in loss_fns: + scalar_metrics[ + f"{branch}_{loss_name}/Validation" + ] = self.loss_avg_tracker[f"{branch}_{loss_name}"].avg #这里的loss_avg_tracker是在train_step中定义的 + + # calculate local metrics + # per tissue class + for tissue in self.tissue_types.keys(): + tissue = tissue.lower() + tissue_ids = np.where(np.asarray(tissue_types_val) == tissue) + scalar_metrics[f"{tissue}-Dice/Validation"] = np.nanmean( + binary_dice_scores[tissue_ids] + ) + scalar_metrics[f"{tissue}-Jaccard/Validation"] = np.nanmean( + binary_jaccard_scores[tissue_ids] + ) + scalar_metrics[f"{tissue}-bPQ/Validation"] = np.nanmean( + pq_scores[tissue_ids] + ) + scalar_metrics[f"{tissue}-mPQ/Validation"] = np.nanmean( + [np.nanmean(pq) for pq in np.array(cell_type_pq_scores)[tissue_ids]] + ) + + # calculate nuclei metrics + for nuc_name, nuc_type in self.nuclei_types.items(): + if nuc_name.lower() == "background": + continue + scalar_metrics[f"{nuc_name}-PQ/Validation"] = np.nanmean( + [pq[nuc_type] for pq in cell_type_pq_scores] + ) + + self.logger.info( + f"{'Validation epoch stats:' : <25} " + f"Loss: {self.loss_avg_tracker['Total_Loss'].avg:.4f} - " + f"Binary-Cell-Dice: {np.nanmean(binary_dice_scores):.4f} - " + f"Binary-Cell-Jacard: {np.nanmean(binary_jaccard_scores):.4f} - " + f"bPQ-Score: {np.nanmean(pq_scores):.4f} - " + f"mPQ-Score: {scalar_metrics['mPQ/Validation']:.4f} - " + f"Tissue-MC-Acc.: {tissue_detection_accuracy:.4f}" + ) + + image_metrics = {"Example-Predictions/Validation": val_example_img} + + return scalar_metrics, image_metrics, np.nanmean(pq_scores) + + def validation_step( + self, + batch: object, + batch_idx: int, + return_example_images: bool, + ): + """Validation step + + Args: + batch (object): Training batch, consisting of images ([0]), masks ([1]), tissue_types ([2]) and figure filenames ([3]) + batch_idx (int): Batch index + return_example_images (bool): If an example preciction image should be returned + + Returns: + Tuple[dict, Union[plt.Figure, None]]: + * Batch-Metrics: dictionary, structure not fixed yet + * Example prediction image + """ + # unpack batch, for shape compare train_step method + imgs = batch[0].to(self.device) + masks = batch[1] + tissue_types = batch[2] + # nan_loss_images = [] + # csv_file = "/data3/ziweicui/PanNuke/cellvit-png/fold1_nan_loss_images.csv" + + + self.model.zero_grad() + self.optimizer.zero_grad() + # with open(csv_file, 'a') as f: + # csv_write = csv.writer(f) + if self.mixed_precision: + with torch.autocast(device_type="cuda", dtype=torch.float16): + # make predictions + predictions_ = self.model.forward(imgs) + # reshaping and postprocessing + predictions = self.unpack_predictions(predictions=predictions_) + gt = self.unpack_masks(masks=masks, tissue_types=tissue_types) + # calculate loss + _ = self.calculate_loss(predictions, gt) + # 检查损失是否为NaN + #loss_value = _.item() + # if math.isnan(loss_value): + # print("NaN loss for image:", batch[3]) + #nan_loss_images.append(batch[3]) + + + else: + predictions_ = self.model.forward(imgs) + # reshaping and postprocessing + predictions = self.unpack_predictions(predictions=predictions_) + gt = self.unpack_masks(masks=masks, tissue_types=tissue_types) + # calculate loss + _ = self.calculate_loss(predictions, gt) + # 检查损失是否为NaN + loss_value = _.item() + if math.isnan(loss_value): + print("NaN loss for image:", batch[3]) + + + + + # get metrics for this batch + batch_metrics = self.calculate_step_metric_validation(predictions, gt) + + if return_example_images: + try: + return_example_images = self.generate_example_image( + imgs, + predictions, + gt, + num_images=4, + num_nuclei_classes=self.num_classes, + ) + except AssertionError: + self.logger.error( + "AssertionError for Example Image. Please check. Continue without image." + ) + return_example_images = None + else: + return_example_images = None + + return batch_metrics, return_example_images + + def unpack_predictions(self, predictions: dict) -> DataclassHVStorage: + """Unpack the given predictions. Main focus lays on reshaping and postprocessing predictions, e.g. separating instances + + Args: + predictions (dict): Dictionary with the following keys: + * tissue_types: Logit tissue prediction output. Shape: (batch_size, num_tissue_classes) + * nuclei_binary_map: Logit output for binary nuclei prediction branch. Shape: (batch_size, 2, H, W) + * hv_map: Logit output for hv-prediction. Shape: (batch_size, 2, H, W) + * nuclei_type_map: Logit output for nuclei instance-prediction. Shape: (batch_size, num_nuclei_classes, H, W) + + Returns: + DataclassHVStorage: Processed network output + """ + predictions["tissue_types"] = predictions["tissue_types"].to(self.device) + predictions["nuclei_binary_map"] = F.softmax( + predictions["nuclei_binary_map"], dim=1 + ) # shape: (batch_size, 2, H, W) + predictions["nuclei_type_map"] = F.softmax( + predictions["nuclei_type_map"], dim=1 + ) # shape: (batch_size, num_nuclei_classes, H, W) + ( + predictions["instance_map"], + predictions["instance_types"], + ) = self.model.calculate_instance_map( + predictions, self.magnification + ) # shape: (batch_size, H, W) + predictions["instance_types_nuclei"] = self.model.generate_instance_nuclei_map( + predictions["instance_map"], predictions["instance_types"] + ).to( + self.device + ) # shape: (batch_size, num_nuclei_classes, H, W) (32, 256, 256, 6) + + if "regression_map" not in predictions.keys(): + predictions["regression_map"] = None + + predictions = DataclassHVStorage( + nuclei_binary_map=predictions["nuclei_binary_map"], + hv_map=predictions["hv_map"], + nuclei_type_map=predictions["nuclei_type_map"], + tissue_types=predictions["tissue_types"], + instance_map=predictions["instance_map"], + instance_types=predictions["instance_types"], + instance_types_nuclei=predictions["instance_types_nuclei"], + batch_size=predictions["tissue_types"].shape[0], + regression_map=predictions["regression_map"], + num_nuclei_classes=self.num_classes, + ) + + return predictions + + def unpack_masks(self, masks: dict, tissue_types: list) -> DataclassHVStorage: + """Unpack the given masks. Main focus lays on reshaping and postprocessing masks to generate one dict + + Args: + masks (dict): Required keys are: + * instance_map: Pixel-wise nuclear instance segmentations. Shape: (batch_size, H, W) + * nuclei_binary_map: Binary nuclei segmentations. Shape: (batch_size, H, W) + * hv_map: HV-Map. Shape: (batch_size, 2, H, W) + * nuclei_type_map: Nuclei instance-prediction and segmentation (not binary, each instance has own integer). + Shape: (batch_size, num_nuclei_classes, H, W) + + tissue_types (list): List of string names of ground-truth tissue types + + Returns: + DataclassHVStorage: GT-Results with matching shapes and output types + """ + # get ground truth values, perform one hot encoding for segmentation maps + gt_nuclei_binary_map_onehot = ( + F.one_hot(masks["nuclei_binary_map"], num_classes=2) + ).type( + torch.float32 + ) # background, nuclei + #nuclei_type_maps = torch.squeeze(masks["nuclei_type_map"]).type(torch.int64) + nuclei_type_maps = masks["nuclei_type_map"].type(torch.int64) + gt_nuclei_type_maps_onehot = F.one_hot( + nuclei_type_maps, num_classes=self.num_classes + ).type( + torch.float32 + ) # background + nuclei types + + # assemble ground truth dictionary + gt = { + "nuclei_type_map": gt_nuclei_type_maps_onehot.permute(0, 3, 1, 2).to( + self.device + ), # shape: (batch_size, H, W, num_nuclei_classes) + "nuclei_binary_map": gt_nuclei_binary_map_onehot.permute(0, 3, 1, 2).to( + self.device + ), # shape: (batch_size, H, W, 2) + "hv_map": masks["hv_map"].to(self.device), # shape: (batch_size,2, H, W) + "instance_map": masks["instance_map"].to( + self.device + ), # shape: (batch_size, H, W) -> each instance has one integer + "instance_types_nuclei": ( + gt_nuclei_type_maps_onehot * masks["instance_map"][..., None] + ) + .permute(0, 3, 1, 2) + .to( + self.device + ), # shape: (batch_size, num_nuclei_classes, H, W) -> instance has one integer, for each nuclei class + "tissue_types": torch.Tensor([self.tissue_types[t] for t in tissue_types]) + .type(torch.LongTensor) + .to(self.device), # shape: batch_size + } + if "regression_map" in masks: + gt["regression_map"] = masks["regression_map"].to(self.device) + + gt = DataclassHVStorage( + **gt, + batch_size=gt["tissue_types"].shape[0], + num_nuclei_classes=self.num_classes, + ) + return gt + + def calculate_loss( + self, predictions: DataclassHVStorage, gt: DataclassHVStorage + ) -> torch.Tensor: + """Calculate the loss + + Args: + predictions (DataclassHVStorage): Predictions + gt (DataclassHVStorage): Ground-Truth values + + Returns: + torch.Tensor: Loss + """ + predictions = predictions.get_dict() + gt = gt.get_dict() + + total_loss = 0 + + for branch, pred in predictions.items(): + if branch in [ + "instance_map", + "instance_types", + "instance_types_nuclei", + ]: + continue + if branch not in self.loss_fn_dict: + continue + branch_loss_fns = self.loss_fn_dict[branch] + for loss_name, loss_setting in branch_loss_fns.items(): + loss_fn = loss_setting["loss_fn"] + weight = loss_setting["weight"] + if loss_name == "msge": + loss_value = loss_fn( + input=pred, + target=gt[branch], + focus=gt["nuclei_binary_map"], + device=self.device, + ) + else: + loss_value = loss_fn(input=pred, target=gt[branch]) + total_loss = total_loss + weight * loss_value + self.loss_avg_tracker[f"{branch}_{loss_name}"].update( + loss_value.detach().cpu().numpy() + ) + self.loss_avg_tracker["Total_Loss"].update(total_loss.detach().cpu().numpy()) + + return total_loss + + def calculate_step_metric_train( + self, predictions: DataclassHVStorage, gt: DataclassHVStorage + ) -> dict: + """Calculate the metrics for the training step + + Args: + predictions (DataclassHVStorage): Processed network output + gt (DataclassHVStorage): Ground truth values + Returns: + dict: Dictionary with metrics. Keys: + binary_dice_scores, binary_jaccard_scores, tissue_pred, tissue_gt + """ + predictions = predictions.get_dict() + gt = gt.get_dict() + + # Tissue Tpyes logits to probs and argmax to get class + predictions["tissue_types_classes"] = F.softmax( + predictions["tissue_types"], dim=-1 + ) + pred_tissue = ( + torch.argmax(predictions["tissue_types_classes"], dim=-1) + .detach() + .cpu() + .numpy() + .astype(np.uint8) + ) + predictions["instance_map"] = predictions["instance_map"].detach().cpu() + predictions["instance_types_nuclei"] = ( + predictions["instance_types_nuclei"].detach().cpu().numpy().astype("int32") + ) + gt["tissue_types"] = gt["tissue_types"].detach().cpu().numpy().astype(np.uint8) + gt["nuclei_binary_map"] = torch.argmax(gt["nuclei_binary_map"], dim=1).type( + torch.uint8 + ) + gt["instance_types_nuclei"] = ( + gt["instance_types_nuclei"].detach().cpu().numpy().astype("int32") + ) + + tissue_detection_accuracy = accuracy_score( + y_true=gt["tissue_types"], y_pred=pred_tissue + ) + self.batch_avg_tissue_acc.update(tissue_detection_accuracy) + + binary_dice_scores = [] + binary_jaccard_scores = [] + + for i in range(len(pred_tissue)): + # binary dice score: Score for cell detection per image, without background + pred_binary_map = torch.argmax(predictions["nuclei_binary_map"][i], dim=0) + target_binary_map = gt["nuclei_binary_map"][i] + cell_dice = ( + dice(preds=pred_binary_map, target=target_binary_map, ignore_index=0) + .detach() + .cpu() + ) + binary_dice_scores.append(float(cell_dice)) + + # binary aji + cell_jaccard = ( + binary_jaccard_index( + preds=pred_binary_map, + target=target_binary_map, + ) + .detach() + .cpu() + ) + binary_jaccard_scores.append(float(cell_jaccard)) + + batch_metrics = { + "binary_dice_scores": binary_dice_scores, + "binary_jaccard_scores": binary_jaccard_scores, + "tissue_pred": pred_tissue, + "tissue_gt": gt["tissue_types"], + } + + return batch_metrics + + def calculate_step_metric_validation(self, predictions: dict, gt: dict) -> dict: + """Calculate the metrics for the training step + + Args: + predictions (DataclassHVStorage): OrderedDict: Processed network output + gt (DataclassHVStorage): Ground truth values + Returns: + dict: Dictionary with metrics. Keys: + binary_dice_scores, binary_jaccard_scores, tissue_pred, tissue_gt + """ + predictions = predictions.get_dict() + gt = gt.get_dict() + + # Tissue Tpyes logits to probs and argmax to get class + predictions["tissue_types_classes"] = F.softmax( + predictions["tissue_types"], dim=-1 + ) + pred_tissue = ( + torch.argmax(predictions["tissue_types_classes"], dim=-1) + .detach() + .cpu() + .numpy() + .astype(np.uint8) + ) + predictions["instance_map"] = predictions["instance_map"].detach().cpu() + predictions["instance_types_nuclei"] = ( + predictions["instance_types_nuclei"].detach().cpu().numpy().astype("int32") + ) + #change + predictions["instance_types_nuclei"] = predictions["instance_types_nuclei"].transpose(0, 3, 1, 2) + instance_maps_gt = gt["instance_map"].detach().cpu() + gt["tissue_types"] = gt["tissue_types"].detach().cpu().numpy().astype(np.uint8) + gt["nuclei_binary_map"] = torch.argmax(gt["nuclei_binary_map"], dim=1).type( + torch.uint8 + ) + gt["instance_types_nuclei"] = ( + gt["instance_types_nuclei"].detach().cpu().numpy().astype("int32") + ) + + tissue_detection_accuracy = accuracy_score( + y_true=gt["tissue_types"], y_pred=pred_tissue + ) + self.batch_avg_tissue_acc.update(tissue_detection_accuracy) + + binary_dice_scores = [] + binary_jaccard_scores = [] + cell_type_pq_scores = [] + pq_scores = [] + + for i in range(len(pred_tissue)): + # binary dice score: Score for cell detection per image, without background + pred_binary_map = torch.argmax(predictions["nuclei_binary_map"][i], dim=0) + target_binary_map = gt["nuclei_binary_map"][i] + cell_dice = ( + dice(preds=pred_binary_map, target=target_binary_map, ignore_index=0) + .detach() + .cpu() + ) + binary_dice_scores.append(float(cell_dice)) + + # binary aji + cell_jaccard = ( + binary_jaccard_index( + preds=pred_binary_map, + target=target_binary_map, + ) + .detach() + .cpu() + ) + binary_jaccard_scores.append(float(cell_jaccard)) + # pq values + remapped_instance_pred = remap_label(predictions["instance_map"][i]) + remapped_gt = remap_label(instance_maps_gt[i]) + [_, _, pq], _ = get_fast_pq(true=remapped_gt, pred=remapped_instance_pred) + pq_scores.append(pq) + + #pq values per class (skip background) + nuclei_type_pq = [] + for j in range(0, self.num_classes): + pred_nuclei_instance_class = remap_label( + predictions["instance_types_nuclei"][i][j, ...] + ) + target_nuclei_instance_class = remap_label( + gt["instance_types_nuclei"][i][j, ...] + ) + + # if ground truth is empty, skip from calculation + if len(np.unique(target_nuclei_instance_class)) == 1: + pq_tmp = np.nan + else: + [_, _, pq_tmp], _ = get_fast_pq( + pred_nuclei_instance_class, + target_nuclei_instance_class, + match_iou=0.5, + ) + nuclei_type_pq.append(pq_tmp) + + cell_type_pq_scores.append(nuclei_type_pq) + + batch_metrics = { + "binary_dice_scores": binary_dice_scores, + "binary_jaccard_scores": binary_jaccard_scores, + "pq_scores": pq_scores, + "cell_type_pq_scores": cell_type_pq_scores, + "tissue_pred": pred_tissue, + "tissue_gt": gt["tissue_types"], + } + + return batch_metrics + + @staticmethod + def generate_example_image( + imgs: Union[torch.Tensor, np.ndarray], + predictions: DataclassHVStorage, + gt: DataclassHVStorage, + num_nuclei_classes: int, + num_images: int = 2, + ) -> plt.Figure: + """Generate example plot with image, binary_pred, hv-map and instance map from prediction and ground-truth + + Args: + imgs (Union[torch.Tensor, np.ndarray]): Images to process, a random number (num_images) is selected from this stack + Shape: (batch_size, 3, H', W') + predictions (DataclassHVStorage): Predictions + gt (DataclassHVStorage): gt + num_nuclei_classes (int): Number of total nuclei classes including background + num_images (int, optional): Number of example patches to display. Defaults to 2. + + Returns: + plt.Figure: Figure with example patches + """ + predictions = predictions.get_dict() + gt = gt.get_dict() + + assert num_images <= imgs.shape[0] + num_images = 4 + + predictions["nuclei_binary_map"] = predictions["nuclei_binary_map"].permute( + 0, 2, 3, 1 + ) + predictions["hv_map"] = predictions["hv_map"].permute(0, 2, 3, 1) + predictions["nuclei_type_map"] = predictions["nuclei_type_map"].permute( + 0, 2, 3, 1 + ) + predictions["instance_types_nuclei"] = predictions[ + "instance_types_nuclei" + ].transpose(0, 2, 3, 1) + + gt["hv_map"] = gt["hv_map"].permute(0, 2, 3, 1) + gt["nuclei_type_map"] = gt["nuclei_type_map"].permute(0, 2, 3, 1) + predictions["instance_types_nuclei"] = predictions[ + "instance_types_nuclei" + ].transpose(0, 2, 3, 1) + + h = gt["hv_map"].shape[1] + w = gt["hv_map"].shape[2] + + sample_indices = torch.randint(0, imgs.shape[0], (num_images,)) + # convert to rgb and crop to selection + sample_images = ( + imgs[sample_indices].permute(0, 2, 3, 1).contiguous().cpu().numpy() + ) # convert to rgb + sample_images = cropping_center(sample_images, (h, w), True) + + # get predictions + pred_sample_binary_map = ( + predictions["nuclei_binary_map"][sample_indices, :, :, 1] + .detach() + .cpu() + .numpy() + ) + pred_sample_hv_map = ( + predictions["hv_map"][sample_indices].detach().cpu().numpy() + ) + pred_sample_instance_maps = ( + predictions["instance_map"][sample_indices].detach().cpu().numpy() + ) + pred_sample_type_maps = ( + torch.argmax(predictions["nuclei_type_map"][sample_indices], dim=-1) + .detach() + .cpu() + .numpy() + ) + + # get ground truth labels + gt_sample_binary_map = ( + gt["nuclei_binary_map"][sample_indices].detach().cpu().numpy() + ) + gt_sample_hv_map = gt["hv_map"][sample_indices].detach().cpu().numpy() + gt_sample_instance_map = ( + gt["instance_map"][sample_indices].detach().cpu().numpy() + ) + gt_sample_type_map = ( + torch.argmax(gt["nuclei_type_map"][sample_indices], dim=-1) + .detach() + .cpu() + .numpy() + ) + + # create colormaps + hv_cmap = plt.get_cmap("jet") + binary_cmap = plt.get_cmap("jet") + instance_map = plt.get_cmap("viridis") + + # setup plot + fig, axs = plt.subplots(num_images, figsize=(6, 2 * num_images), dpi=150) + + for i in range(num_images): + placeholder = np.zeros((2 * h, 6 * w, 3)) + # orig image + placeholder[:h, :w, :3] = sample_images[i] + placeholder[h : 2 * h, :w, :3] = sample_images[i] + # binary prediction + placeholder[:h, w : 2 * w, :3] = rgba2rgb( + binary_cmap(gt_sample_binary_map[i] * 255) + ) + placeholder[h : 2 * h, w : 2 * w, :3] = rgba2rgb( + binary_cmap(pred_sample_binary_map[i]) + ) # *255? + # hv maps + placeholder[:h, 2 * w : 3 * w, :3] = rgba2rgb( + hv_cmap((gt_sample_hv_map[i, :, :, 0] + 1) / 2) + ) + placeholder[h : 2 * h, 2 * w : 3 * w, :3] = rgba2rgb( + hv_cmap((pred_sample_hv_map[i, :, :, 0] + 1) / 2) + ) + placeholder[:h, 3 * w : 4 * w, :3] = rgba2rgb( + hv_cmap((gt_sample_hv_map[i, :, :, 1] + 1) / 2) + ) + placeholder[h : 2 * h, 3 * w : 4 * w, :3] = rgba2rgb( + hv_cmap((pred_sample_hv_map[i, :, :, 1] + 1) / 2) + ) + # instance_predictions + placeholder[:h, 4 * w : 5 * w, :3] = rgba2rgb( + instance_map( + (gt_sample_instance_map[i] - np.min(gt_sample_instance_map[i])) + / ( + np.max(gt_sample_instance_map[i]) + - np.min(gt_sample_instance_map[i] + 1e-10) + ) + ) + ) + placeholder[h : 2 * h, 4 * w : 5 * w, :3] = rgba2rgb( + instance_map( + ( + pred_sample_instance_maps[i] + - np.min(pred_sample_instance_maps[i]) + ) + / ( + np.max(pred_sample_instance_maps[i]) + - np.min(pred_sample_instance_maps[i] + 1e-10) + ) + ) + ) + # type_predictions + placeholder[:h, 5 * w : 6 * w, :3] = rgba2rgb( + binary_cmap(gt_sample_type_map[i] / num_nuclei_classes) + ) + placeholder[h : 2 * h, 5 * w : 6 * w, :3] = rgba2rgb( + binary_cmap(pred_sample_type_maps[i] / num_nuclei_classes) + ) + + # plotting + axs[i].imshow(placeholder) + axs[i].set_xticks([], []) + + # plot labels in first row + if i == 0: + axs[i].set_xticks(np.arange(w / 2, 6 * w, w)) + axs[i].set_xticklabels( + [ + "Image", + "Binary-Cells", + "HV-Map-0", + "HV-Map-1", + "Cell Instances", + "Nuclei-Instances", + ], + fontsize=6, + ) + axs[i].xaxis.tick_top() + + axs[i].set_yticks(np.arange(h / 2, 2 * h, h)) + axs[i].set_yticklabels(["GT", "Pred."], fontsize=6) + axs[i].tick_params(axis="both", which="both", length=0) + grid_x = np.arange(w, 6 * w, w) + grid_y = np.arange(h, 2 * h, h) + + for x_seg in grid_x: + axs[i].axvline(x_seg, color="black") + for y_seg in grid_y: + axs[i].axhline(y_seg, color="black") + + fig.suptitle(f"Patch Predictions for {num_images} Examples") + + fig.tight_layout() + + return fig diff --git a/cell_segmentation/utils/__init__.py b/cell_segmentation/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..272a422273f1955e69c32a2192e52ddb9a3bac8c --- /dev/null +++ b/cell_segmentation/utils/__init__.py @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +# Utils +# +# @ Fabian Hörst, fabian.hoerst@uk-essen.de +# Institute for Artifical Intelligence in Medicine, +# University Medicine Essen diff --git a/cell_segmentation/utils/metrics.py b/cell_segmentation/utils/metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..9fc15d9333d06a0da1400344b310929d45a247bf --- /dev/null +++ b/cell_segmentation/utils/metrics.py @@ -0,0 +1,276 @@ +# -*- coding: utf-8 -*- +# Implemented Metrics for Cell detection +# +# This code is based on the following repository: https://github.com/TissueImageAnalytics/PanNuke-metrics +# +# Implemented metrics are: +# +# Instance Segmentation Metrics +# Binary PQ +# Multiclass PQ +# Neoplastic PQ +# Non-Neoplastic PQ +# Inflammatory PQ +# Dead PQ +# Inflammatory PQ +# Dead PQ +# +# Detection and Classification Metrics +# Precision, Recall, F1 +# +# Other +# dice1, dice2, aji, aji_plus +# +# Binary PQ (bPQ): Assumes all nuclei belong to same class and reports the average PQ across tissue types. +# Multi-Class PQ (mPQ): Reports the average PQ across the classes and tissue types. +# Neoplastic PQ: Reports the PQ for the neoplastic class on all tissues. +# Non-Neoplastic PQ: Reports the PQ for the non-neoplastic class on all tissues. +# Inflammatory PQ: Reports the PQ for the inflammatory class on all tissues. +# Connective PQ: Reports the PQ for the connective class on all tissues. +# Dead PQ: Reports the PQ for the dead class on all tissues. +# +# @ Fabian Hörst, fabian.hoerst@uk-essen.de +# Institute for Artifical Intelligence in Medicine, +# University Medicine Essen + +from typing import List +import numpy as np +from scipy.optimize import linear_sum_assignment + + +def get_fast_pq(true, pred, match_iou=0.5): + """ + `match_iou` is the IoU threshold level to determine the pairing between + GT instances `p` and prediction instances `g`. `p` and `g` is a pair + if IoU > `match_iou`. However, pair of `p` and `g` must be unique + (1 prediction instance to 1 GT instance mapping). + + If `match_iou` < 0.5, Munkres assignment (solving minimum weight matching + in bipartite graphs) is caculated to find the maximal amount of unique pairing. + + If `match_iou` >= 0.5, all IoU(p,g) > 0.5 pairing is proven to be unique and + the number of pairs is also maximal. + + Fast computation requires instance IDs are in contiguous orderding + i.e [1, 2, 3, 4] not [2, 3, 6, 10]. Please call `remap_label` beforehand + and `by_size` flag has no effect on the result. + + Returns: + [dq, sq, pq]: measurement statistic + + [paired_true, paired_pred, unpaired_true, unpaired_pred]: + pairing information to perform measurement + + """ + assert match_iou >= 0.0, "Cant' be negative" + + true = np.copy(true) #[256,256] + pred = np.copy(pred) #(256,256) #pred是预测的mask + true_id_list = list(np.unique(true)) + pred_id_list = list(np.unique(pred)) #pred_id_list是预测的mask的id + + # if there is no background, fixing by adding it + if 0 not in pred_id_list: + pred_id_list = [0] + pred_id_list + + true_masks = [ + None, + ] + for t in true_id_list[1:]: #t最大8 + t_mask = np.array(true == t, np.uint8) + true_masks.append(t_mask) #true_masks是真实的mask true_masks[1].shape =[256,256] + + pred_masks = [ + None, + ] + for p in pred_id_list[1:]: #p最大9 + p_mask = np.array(pred == p, np.uint8) + pred_masks.append(p_mask) #pred_masks是预测的mask pred_masks[1].shape =[256,256] + + # prefill with value重新填充值 + pairwise_iou = np.zeros( + [len(true_id_list) - 1, len(pred_id_list) - 1], dtype=np.float64 + ) + + # caching pairwise iou for all instances 为所有的实例缓存iou + for true_id in true_id_list[1:]: # 0-th is background 0是背景 + #import pdb; pdb.set_trace() + t_mask = true_masks[true_id] # 256*256为true_id的mask,也就是找到正确的mask + #import pdb; pdb.set_trace() + pred_true_overlap = pred[t_mask > 0] # 256*256的mask中,找到预测的mask,这两者的交集也就是预测正确的mask,也就是说这个mask是正确的, + #t_mask是真实的mask,pred[t_mask > 0]是预测的mask中的pred是用来找到预测的mask的,也就是说pred的形状和t_mask的形状是一样的 + #import pdb; pdb.set_trace() + pred_true_overlap_id = np.unique(pred_true_overlap) + pred_true_overlap_id = list(pred_true_overlap_id) + for pred_id in pred_true_overlap_id: + if pred_id == 0: # ignore + continue # overlaping background + p_mask = pred_masks[pred_id] + total = (t_mask + p_mask).sum() + inter = (t_mask * p_mask).sum() + iou = inter / (total - inter) + pairwise_iou[true_id - 1, pred_id - 1] = iou + # + if match_iou >= 0.5: + paired_iou = pairwise_iou[pairwise_iou > match_iou] + pairwise_iou[pairwise_iou <= match_iou] = 0.0 + paired_true, paired_pred = np.nonzero(pairwise_iou) + paired_iou = pairwise_iou[paired_true, paired_pred] + paired_true += 1 # index is instance id - 1 + paired_pred += 1 # hence return back to original + else: # * Exhaustive maximal unique pairing + #### Munkres pairing with scipy library + # the algorithm return (row indices, matched column indices) + # if there is multiple same cost in a row, index of first occurence + # is return, thus the unique pairing is ensure + # inverse pair to get high IoU as minimum + paired_true, paired_pred = linear_sum_assignment(-pairwise_iou) + ### extract the paired cost and remove invalid pair + paired_iou = pairwise_iou[paired_true, paired_pred] + + # now select those above threshold level + # paired with iou = 0.0 i.e no intersection => FP or FN + paired_true = list(paired_true[paired_iou > match_iou] + 1) + paired_pred = list(paired_pred[paired_iou > match_iou] + 1) + paired_iou = paired_iou[paired_iou > match_iou] + + # get the actual FP and FN + unpaired_true = [idx for idx in true_id_list[1:] if idx not in paired_true] + unpaired_pred = [idx for idx in pred_id_list[1:] if idx not in paired_pred] + # print(paired_iou.shape, paired_true.shape, len(unpaired_true), len(unpaired_pred)) + + # + tp = len(paired_true) + fp = len(unpaired_pred) + fn = len(unpaired_true) + # get the F1-score i.e DQ + dq = tp / (tp + 0.5 * fp + 0.5 * fn + 1.0e-6) # good practice? + # get the SQ, no paired has 0 iou so not impact + sq = paired_iou.sum() / (tp + 1.0e-6) + + return [dq, sq, dq * sq], [paired_true, paired_pred, unpaired_true, unpaired_pred] + + +##### + + +def remap_label(pred, by_size=False): + """ + Rename all instance id so that the id is contiguous i.e [0, 1, 2, 3] + not [0, 2, 4, 6]. The ordering of instances (which one comes first) + is preserved unless by_size=True, then the instances will be reordered + so that bigger nucler has smaller ID + + Args: + pred : the 2d array contain instances where each instances is marked + by non-zero integer + by_size : renaming with larger nuclei has smaller id (on-top) + """ + pred_id = list(np.unique(pred)) + if 0 in pred_id: + pred_id.remove(0) + if len(pred_id) == 0: + return pred # no label + if by_size: + pred_size = [] + for inst_id in pred_id: + size = (pred == inst_id).sum() + pred_size.append(size) + # sort the id by size in descending order + pair_list = zip(pred_id, pred_size) + pair_list = sorted(pair_list, key=lambda x: x[1], reverse=True) + pred_id, pred_size = zip(*pair_list) + + new_pred = np.zeros(pred.shape, np.int32) + for idx, inst_id in enumerate(pred_id): + new_pred[pred == inst_id] = idx + 1 + return new_pred + + +#### + + +def binarize(x): + """ + convert multichannel (multiclass) instance segmetation tensor + to binary instance segmentation (bg and nuclei), + + :param x: B*B*C (for PanNuke 256*256*5 ) + :return: Instance segmentation 这段代码的作用是将多通道的mask转换为单通道的mask + """ + #x = np.transpose(x, (1, 2, 0)) #[256,256,5] + + out = np.zeros([x.shape[0], x.shape[1]]) #首先为out赋值为0,形状为256*256 + count = 1 + for i in range(x.shape[2]): #遍历通道数 + x_ch = x[:, :, i] #[256,256] #取出每个通道的mask 形状为256*256 + unique_vals = np.unique(x_ch) #找到每个通道的mask中的唯一值,形状为(1,) + unique_vals = unique_vals.tolist() #将unique_vals转换为list + unique_vals.remove(0) #移除0 + for j in unique_vals: #遍历unique_vals,也就是遍历每个通道的mask中的唯一值 + x_tmp = x_ch == j #找到每个通道的mask中的唯一值的mask,在创建一个布尔类型的数组,其中元素为 True 的位置表示原始数组 x_ch 中对应位置的元素等于 j,元素为 False 的位置表示不等于 j + x_tmp_c = 1 - x_tmp #找到每个通道的mask中的唯一值的mask的补集 + out *= x_tmp_c #将out中的值乘以x_tmp_c + out += count * x_tmp #将out中的值加上count*x_tmp + count += 1 + out = out.astype("int32") + return out + + +def get_tissue_idx(tissue_indices, idx): + for i in range(len(tissue_indices)): + if tissue_indices[i].count(idx) == 1: + tiss_idx = i + return tiss_idx + + +def cell_detection_scores( + paired_true, paired_pred, unpaired_true, unpaired_pred, w: List = [1, 1] +): + tp_d = paired_pred.shape[0] + fp_d = unpaired_pred.shape[0] + fn_d = unpaired_true.shape[0] + + # tp_tn_dt = (paired_pred == paired_true).sum() + # fp_fn_dt = (paired_pred != paired_true).sum() + prec_d = tp_d / (tp_d + fp_d) + rec_d = tp_d / (tp_d + fn_d) + + f1_d = 2 * tp_d / (2 * tp_d + w[0] * fp_d + w[1] * fn_d) + + return f1_d, prec_d, rec_d + + +def cell_type_detection_scores( + paired_true, + paired_pred, + unpaired_true, + unpaired_pred, + type_id, + w: List = [2, 2, 1, 1], + exhaustive: bool = True, +): + type_samples = (paired_true == type_id) | (paired_pred == type_id) + + paired_true = paired_true[type_samples] + paired_pred = paired_pred[type_samples] + + tp_dt = ((paired_true == type_id) & (paired_pred == type_id)).sum() + tn_dt = ((paired_true != type_id) & (paired_pred != type_id)).sum() + fp_dt = ((paired_true != type_id) & (paired_pred == type_id)).sum() + fn_dt = ((paired_true == type_id) & (paired_pred != type_id)).sum() + + if not exhaustive: + ignore = (paired_true == -1).sum() + fp_dt -= ignore + + fp_d = (unpaired_pred == type_id).sum() # + fn_d = (unpaired_true == type_id).sum() + + prec_type = (tp_dt + tn_dt) / (tp_dt + tn_dt + w[0] * fp_dt + w[2] * fp_d) + rec_type = (tp_dt + tn_dt) / (tp_dt + tn_dt + w[1] * fn_dt + w[3] * fn_d) + + f1_type = (2 * (tp_dt + tn_dt)) / ( + 2 * (tp_dt + tn_dt) + w[0] * fp_dt + w[1] * fn_dt + w[2] * fp_d + w[3] * fn_d + ) + return f1_type, prec_type, rec_type diff --git a/cell_segmentation/utils/post_proc_cellvit.py b/cell_segmentation/utils/post_proc_cellvit.py new file mode 100644 index 0000000000000000000000000000000000000000..aacdbd413132fd296b47f3b0155d1fc6c0633232 --- /dev/null +++ b/cell_segmentation/utils/post_proc_cellvit.py @@ -0,0 +1,328 @@ +# -*- coding: utf-8 -*- +# PostProcessing Pipeline +# +# Adapted from HoverNet +# HoverNet Network (https://doi.org/10.1016/j.media.2019.101563) +# Code Snippet adapted from HoverNet implementation (https://github.com/vqdang/hover_net) +# +# @ Fabian Hörst, fabian.hoerst@uk-essen.de +# Institute for Artifical Intelligence in Medicine, +# University Medicine Essen + + +import warnings +from typing import Tuple, Literal,List + +import cv2 +import numpy as np +from scipy.ndimage import measurements +from scipy.ndimage.morphology import binary_fill_holes +from skimage.segmentation import watershed +import torch + +from .tools import get_bounding_box, remove_small_objects + + +def noop(*args, **kargs): + pass + + +warnings.warn = noop + + +class DetectionCellPostProcessor: + def __init__( + self, + nr_types: int = None, + magnification: Literal[20, 40] = 40, + gt: bool = False, + ) -> None: + """DetectionCellPostProcessor for postprocessing prediction maps and get detected cells + + Args: + nr_types (int, optional): Number of cell types, including background (background = 0). Defaults to None. + magnification (Literal[20, 40], optional): Which magnification the data has. Defaults to 40. + gt (bool, optional): If this is gt data (used that we do not suppress tiny cells that may be noise in a prediction map). + Defaults to False. + + Raises: + NotImplementedError: Unknown magnification + """ + self.nr_types = nr_types + self.magnification = magnification + self.gt = gt + + if magnification == 40: + self.object_size = 10 + self.k_size = 21 + elif magnification == 20: + self.object_size = 3 # 3 or 40, we used 5 + self.k_size = 11 # 11 or 41, we used 13 + else: + raise NotImplementedError("Unknown magnification") + if gt: # to not supress something in gt! + self.object_size = 100 + self.k_size = 21 + + def post_process_cell_segmentation( + self, + pred_map: np.ndarray, + ) -> Tuple[np.ndarray, dict]: + """Post processing of one image tile + + Args: + pred_map (np.ndarray): Combined output of tp, np and hv branches, in the same order. Shape: (H, W, 4) + + Returns: + Tuple[np.ndarray, dict]: + np.ndarray: Instance map for one image. Each nuclei has own integer. Shape: (H, W) + dict: Instance dictionary. Main Key is the nuclei instance number (int), with a dict as value. + For each instance, the dictionary contains the keys: bbox (bounding box), centroid (centroid coordinates), + contour, type_prob (probability), type (nuclei type) + """ + if self.nr_types is not None: + pred_type = pred_map[..., :1] + pred_inst = pred_map[..., 1:] + pred_type = pred_type.astype(np.int32) + else: + pred_inst = pred_map + + pred_inst = np.squeeze(pred_inst) + pred_inst = self.__proc_np_hv( + pred_inst, object_size=self.object_size, ksize=self.k_size + ) + + inst_id_list = np.unique(pred_inst)[1:] # exlcude background + inst_info_dict = {} + for inst_id in inst_id_list: + inst_map = pred_inst == inst_id + rmin, rmax, cmin, cmax = get_bounding_box(inst_map) + inst_bbox = np.array([[rmin, cmin], [rmax, cmax]]) + inst_map = inst_map[ + inst_bbox[0][0] : inst_bbox[1][0], inst_bbox[0][1] : inst_bbox[1][1] + ] + inst_map = inst_map.astype(np.uint8) + inst_moment = cv2.moments(inst_map) + inst_contour = cv2.findContours( + inst_map, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE + ) + # * opencv protocol format may break + inst_contour = np.squeeze(inst_contour[0][0].astype("int32")) + # < 3 points dont make a contour, so skip, likely artifact too + # as the contours obtained via approximation => too small or sthg + if inst_contour.shape[0] < 3: + continue + if len(inst_contour.shape) != 2: + continue # ! check for trickery shape + inst_centroid = [ + (inst_moment["m10"] / inst_moment["m00"]), + (inst_moment["m01"] / inst_moment["m00"]), + ] + inst_centroid = np.array(inst_centroid) + inst_contour[:, 0] += inst_bbox[0][1] # X + inst_contour[:, 1] += inst_bbox[0][0] # Y + inst_centroid[0] += inst_bbox[0][1] # X + inst_centroid[1] += inst_bbox[0][0] # Y + inst_info_dict[inst_id] = { # inst_id should start at 1 + "bbox": inst_bbox, + "centroid": inst_centroid, + "contour": inst_contour, + "type_prob": None, + "type": None, + } + + #### * Get class of each instance id, stored at index id-1 (inst_id = number of deteced nucleus) + for inst_id in list(inst_info_dict.keys()): + rmin, cmin, rmax, cmax = (inst_info_dict[inst_id]["bbox"]).flatten() + inst_map_crop = pred_inst[rmin:rmax, cmin:cmax] + inst_type_crop = pred_type[rmin:rmax, cmin:cmax] + inst_map_crop = inst_map_crop == inst_id + inst_type = inst_type_crop[inst_map_crop] + type_list, type_pixels = np.unique(inst_type, return_counts=True) + type_list = list(zip(type_list, type_pixels)) + type_list = sorted(type_list, key=lambda x: x[1], reverse=True) + inst_type = type_list[0][0] + if inst_type == 0: # ! pick the 2nd most dominant if exist + if len(type_list) > 1: + inst_type = type_list[1][0] + type_dict = {v[0]: v[1] for v in type_list} + type_prob = type_dict[inst_type] / (np.sum(inst_map_crop) + 1.0e-6) + inst_info_dict[inst_id]["type"] = int(inst_type) + inst_info_dict[inst_id]["type_prob"] = float(type_prob) + + return pred_inst, inst_info_dict + + def __proc_np_hv( + self, pred: np.ndarray, object_size: int = 10, ksize: int = 21 + ) -> np.ndarray: + """Process Nuclei Prediction with XY Coordinate Map and generate instance map (each instance has unique integer) + + Separate Instances (also overlapping ones) from binary nuclei map and hv map by using morphological operations and watershed + + Args: + pred (np.ndarray): Prediction output, assuming. Shape: (H, W, 3) + * channel 0 contain probability map of nuclei + * channel 1 containing the regressed X-map + * channel 2 containing the regressed Y-map + object_size (int, optional): Smallest oject size for filtering. Defaults to 10 + k_size (int, optional): Sobel Kernel size. Defaults to 21 + Returns: + np.ndarray: Instance map for one image. Each nuclei has own integer. Shape: (H, W) + """ + pred = np.array(pred, dtype=np.float32) + + blb_raw = pred[..., 0] + h_dir_raw = pred[..., 1] + v_dir_raw = pred[..., 2] + + # processing + blb = np.array(blb_raw >= 0.5, dtype=np.int32) + + blb = measurements.label(blb)[0] # ndimage.label(blb)[0] + blb = remove_small_objects(blb, min_size=10) # 10 + blb[blb > 0] = 1 # background is 0 already + + h_dir = cv2.normalize( + h_dir_raw, + None, + alpha=0, + beta=1, + norm_type=cv2.NORM_MINMAX, + dtype=cv2.CV_32F, + ) + v_dir = cv2.normalize( + v_dir_raw, + None, + alpha=0, + beta=1, + norm_type=cv2.NORM_MINMAX, + dtype=cv2.CV_32F, + ) + + # ksize = int((20 * scale_factor) + 1) # 21 vs 41 + # obj_size = math.ceil(10 * (scale_factor**2)) #10 vs 40 + + sobelh = cv2.Sobel(h_dir, cv2.CV_64F, 1, 0, ksize=ksize) + sobelv = cv2.Sobel(v_dir, cv2.CV_64F, 0, 1, ksize=ksize) + + sobelh = 1 - ( + cv2.normalize( + sobelh, + None, + alpha=0, + beta=1, + norm_type=cv2.NORM_MINMAX, + dtype=cv2.CV_32F, + ) + ) + sobelv = 1 - ( + cv2.normalize( + sobelv, + None, + alpha=0, + beta=1, + norm_type=cv2.NORM_MINMAX, + dtype=cv2.CV_32F, + ) + ) + + overall = np.maximum(sobelh, sobelv) + overall = overall - (1 - blb) + overall[overall < 0] = 0 + + dist = (1.0 - overall) * blb + ## nuclei values form mountains so inverse to get basins + dist = -cv2.GaussianBlur(dist, (3, 3), 0) + + overall = np.array(overall >= 0.4, dtype=np.int32) + + marker = blb - overall + marker[marker < 0] = 0 + marker = binary_fill_holes(marker).astype("uint8") + kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)) + marker = cv2.morphologyEx(marker, cv2.MORPH_OPEN, kernel) + marker = measurements.label(marker)[0] + marker = remove_small_objects(marker, min_size=object_size) + + proced_pred = watershed(dist, markers=marker, mask=blb) + + return proced_pred + + +def calculate_instances( + pred_types: torch.Tensor, pred_insts: torch.Tensor +) -> List[dict]: + """Best used for GT + + Args: + pred_types (torch.Tensor): Binary or type map ground-truth. + Shape must be (B, C, H, W) with C=1 for binary or num_nuclei_types for multi-class. + pred_insts (torch.Tensor): Ground-Truth instance map with shape (B, H, W) + + Returns: + list[dict]: Dictionary with nuclei informations, output similar to post_process_cell_segmentation + """ + type_preds = [] + pred_types = pred_types.permute(0, 2, 3, 1) + for i in range(pred_types.shape[0]): + pred_type = torch.argmax(pred_types, dim=-1)[i].detach().cpu().numpy() + pred_inst = pred_insts[i].detach().cpu().numpy() + inst_id_list = np.unique(pred_inst)[1:] # exlcude background + inst_info_dict = {} + for inst_id in inst_id_list: + inst_map = pred_inst == inst_id + rmin, rmax, cmin, cmax = get_bounding_box(inst_map) + inst_bbox = np.array([[rmin, cmin], [rmax, cmax]]) + inst_map = inst_map[ + inst_bbox[0][0] : inst_bbox[1][0], inst_bbox[0][1] : inst_bbox[1][1] + ] + inst_map = inst_map.astype(np.uint8) + inst_moment = cv2.moments(inst_map) + inst_contour = cv2.findContours( + inst_map, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE + ) + # * opencv protocol format may break + inst_contour = np.squeeze(inst_contour[0][0].astype("int32")) + # < 3 points dont make a contour, so skip, likely artifact too + # as the contours obtained via approximation => too small or sthg + if inst_contour.shape[0] < 3: + continue + if len(inst_contour.shape) != 2: + continue # ! check for trickery shape + inst_centroid = [ + (inst_moment["m10"] / inst_moment["m00"]), + (inst_moment["m01"] / inst_moment["m00"]), + ] + inst_centroid = np.array(inst_centroid) + inst_contour[:, 0] += inst_bbox[0][1] # X + inst_contour[:, 1] += inst_bbox[0][0] # Y + inst_centroid[0] += inst_bbox[0][1] # X + inst_centroid[1] += inst_bbox[0][0] # Y + inst_info_dict[inst_id] = { # inst_id should start at 1 + "bbox": inst_bbox, + "centroid": inst_centroid, + "contour": inst_contour, + "type_prob": None, + "type": None, + } + #### * Get class of each instance id, stored at index id-1 (inst_id = number of deteced nucleus) + for inst_id in list(inst_info_dict.keys()): + rmin, cmin, rmax, cmax = (inst_info_dict[inst_id]["bbox"]).flatten() + inst_map_crop = pred_inst[rmin:rmax, cmin:cmax] + inst_type_crop = pred_type[rmin:rmax, cmin:cmax] + inst_map_crop = inst_map_crop == inst_id + inst_type = inst_type_crop[inst_map_crop] + type_list, type_pixels = np.unique(inst_type, return_counts=True) + type_list = list(zip(type_list, type_pixels)) + type_list = sorted(type_list, key=lambda x: x[1], reverse=True) + inst_type = type_list[0][0] + if inst_type == 0: # ! pick the 2nd most dominant if exist + if len(type_list) > 1: + inst_type = type_list[1][0] + type_dict = {v[0]: v[1] for v in type_list} + type_prob = type_dict[inst_type] / (np.sum(inst_map_crop) + 1.0e-6) + inst_info_dict[inst_id]["type"] = int(inst_type) + inst_info_dict[inst_id]["type_prob"] = float(type_prob) + type_preds.append(inst_info_dict) + + return type_preds diff --git a/cell_segmentation/utils/template_geojson.py b/cell_segmentation/utils/template_geojson.py new file mode 100644 index 0000000000000000000000000000000000000000..005748d9caadef82a83616e3ecc203796162625d --- /dev/null +++ b/cell_segmentation/utils/template_geojson.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# GeoJson templates +# +# @ Fabian Hörst, fabian.hoerst@uk-essen.de +# Institute for Artifical Intelligence in Medicine, +# University Medicine Essen + + +def get_template_point() -> dict: + """Return a template for a Point geojson object + + Returns: + dict: Template + """ + template_point = { + "type": "Feature", + "id": "TODO", + "geometry": { + "type": "MultiPoint", + "coordinates": [ + [], + ], + }, + "properties": { + "objectType": "annotation", + "classification": {"name": "TODO", "color": []}, + }, + } + return template_point + + +def get_template_segmentation() -> dict: + """Return a template for a MultiPolygon geojson object + + Returns: + dict: Template + """ + template_multipolygon = { + "type": "Feature", + "id": "TODO", + "geometry": { + "type": "MultiPolygon", + "coordinates": [ + [], + ], + }, + "properties": { + "objectType": "annotation", + "classification": {"name": "TODO", "color": []}, + }, + } + return template_multipolygon diff --git a/cell_segmentation/utils/tools.py b/cell_segmentation/utils/tools.py new file mode 100644 index 0000000000000000000000000000000000000000..ea503a3907f59c7d375f9b8d2020ca51a84da5ff --- /dev/null +++ b/cell_segmentation/utils/tools.py @@ -0,0 +1,400 @@ +# -*- coding: utf-8 -*- +# Helpful functions Pipeline +# +# Adapted from HoverNet +# HoverNet Network (https://doi.org/10.1016/j.media.2019.101563) +# Code Snippet adapted from HoverNet implementation (https://github.com/vqdang/hover_net) +# +# @ Fabian Hörst, fabian.hoerst@uk-essen.de +# Institute for Artifical Intelligence in Medicine, +# University Medicine Essen + + +import math +from typing import Tuple + +import numpy as np +import scipy +from numba import njit, prange +from scipy import ndimage +from scipy.optimize import linear_sum_assignment +from skimage.draw import polygon + + +def get_bounding_box(img): + """Get bounding box coordinate information.""" + rows = np.any(img, axis=1) + cols = np.any(img, axis=0) + rmin, rmax = np.where(rows)[0][[0, -1]] + cmin, cmax = np.where(cols)[0][[0, -1]] + # due to python indexing, need to add 1 to max + # else accessing will be 1px in the box, not out + rmax += 1 + cmax += 1 + return [rmin, rmax, cmin, cmax] + + +@njit +def cropping_center(x, crop_shape, batch=False): + """Crop an input image at the centre. + + Args: + x: input array + crop_shape: dimensions of cropped array + + Returns: + x: cropped array + + """ + orig_shape = x.shape + if not batch: + h0 = int((orig_shape[0] - crop_shape[0]) * 0.5) + w0 = int((orig_shape[1] - crop_shape[1]) * 0.5) + x = x[h0 : h0 + crop_shape[0], w0 : w0 + crop_shape[1], ...] + else: + h0 = int((orig_shape[1] - crop_shape[0]) * 0.5) + w0 = int((orig_shape[2] - crop_shape[1]) * 0.5) + x = x[:, h0 : h0 + crop_shape[0], w0 : w0 + crop_shape[1], ...] + return x + + +def remove_small_objects(pred, min_size=64, connectivity=1): + """Remove connected components smaller than the specified size. + + This function is taken from skimage.morphology.remove_small_objects, but the warning + is removed when a single label is provided. + + Args: + pred: input labelled array + min_size: minimum size of instance in output array + connectivity: The connectivity defining the neighborhood of a pixel. + + Returns: + out: output array with instances removed under min_size + + """ + out = pred + + if min_size == 0: # shortcut for efficiency + return out + + if out.dtype == bool: + selem = ndimage.generate_binary_structure(pred.ndim, connectivity) + ccs = np.zeros_like(pred, dtype=np.int32) + ndimage.label(pred, selem, output=ccs) + else: + ccs = out + + try: + component_sizes = np.bincount(ccs.ravel()) + except ValueError: + raise ValueError( + "Negative value labels are not supported. Try " + "relabeling the input with `scipy.ndimage.label` or " + "`skimage.morphology.label`." + ) + + too_small = component_sizes < min_size + too_small_mask = too_small[ccs] + out[too_small_mask] = 0 + + return out + + +def pair_coordinates( + setA: np.ndarray, setB: np.ndarray, radius: float +) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + """Use the Munkres or Kuhn-Munkres algorithm to find the most optimal + unique pairing (largest possible match) when pairing points in set B + against points in set A, using distance as cost function. + + Args: + setA (np.ndarray): np.array (float32) of size Nx2 contains the of XY coordinate + of N different points + setB (np.ndarray): np.array (float32) of size Nx2 contains the of XY coordinate + of N different points + radius (float): valid area around a point in setA to consider + a given coordinate in setB a candidate for match + + Returns: + Tuple[np.ndarray, np.ndarray, np.ndarray]: + pairing: pairing is an array of indices + where point at index pairing[0] in set A paired with point + in set B at index pairing[1] + unparedA: remaining point in set A unpaired + unparedB: remaining point in set B unpaired + """ + # * Euclidean distance as the cost matrix + pair_distance = scipy.spatial.distance.cdist(setA, setB, metric="euclidean") + + # * Munkres pairing with scipy library + # the algorithm return (row indices, matched column indices) + # if there is multiple same cost in a row, index of first occurence + # is return, thus the unique pairing is ensured + indicesA, paired_indicesB = linear_sum_assignment(pair_distance) + + # extract the paired cost and remove instances + # outside of designated radius + pair_cost = pair_distance[indicesA, paired_indicesB] + + pairedA = indicesA[pair_cost <= radius] + pairedB = paired_indicesB[pair_cost <= radius] + + pairing = np.concatenate([pairedA[:, None], pairedB[:, None]], axis=-1) + unpairedA = np.delete(np.arange(setA.shape[0]), pairedA) + unpairedB = np.delete(np.arange(setB.shape[0]), pairedB) + + return pairing, unpairedA, unpairedB + + +def fix_duplicates(inst_map: np.ndarray) -> np.ndarray: + """Re-label duplicated instances in an instance labelled mask. + + Parameters + ---------- + inst_map : np.ndarray + Instance labelled mask. Shape (H, W). + + Returns + ------- + np.ndarray: + The instance labelled mask without duplicated indices. + Shape (H, W). + """ + current_max_id = np.amax(inst_map) + inst_list = list(np.unique(inst_map)) + if 0 in inst_list: + inst_list.remove(0) + + for inst_id in inst_list: + inst = np.array(inst_map == inst_id, np.uint8) + remapped_ids = ndimage.label(inst)[0] + remapped_ids[remapped_ids > 1] += current_max_id + inst_map[remapped_ids > 1] = remapped_ids[remapped_ids > 1] + current_max_id = np.amax(inst_map) + + return inst_map + + +def polygons_to_label_coord( + coord: np.ndarray, shape: Tuple[int, int], labels: np.ndarray = None +) -> np.ndarray: + """Render polygons to image given a shape. + + Parameters + ---------- + coord.shape : np.ndarray + Shape: (n_polys, n_rays) + shape : Tuple[int, int] + Shape of the output mask. + labels : np.ndarray, optional + Sorted indices of the centroids. + + Returns + ------- + np.ndarray: + Instance labelled mask. Shape: (H, W). + """ + coord = np.asarray(coord) + if labels is None: + labels = np.arange(len(coord)) + + assert coord.ndim == 3 and coord.shape[1] == 2 and len(coord) == len(labels) + + lbl = np.zeros(shape, np.int32) + + for i, c in zip(labels, coord): + rr, cc = polygon(*c, shape) + lbl[rr, cc] = i + 1 + + return lbl + + +def ray_angles(n_rays: int = 32): + """Get linearly spaced angles for rays.""" + return np.linspace(0, 2 * np.pi, n_rays, endpoint=False) + + +def dist_to_coord( + dist: np.ndarray, points: np.ndarray, scale_dist: Tuple[int, int] = (1, 1) +) -> np.ndarray: + """Convert list of distances and centroids from polar to cartesian coordinates. + + Parameters + ---------- + dist : np.ndarray + The centerpoint pixels of the radial distance map. Shape (n_polys, n_rays). + points : np.ndarray + The centroids of the instances. Shape: (n_polys, 2). + scale_dist : Tuple[int, int], default=(1, 1) + Scaling factor. + + Returns + ------- + np.ndarray: + Cartesian cooridnates of the polygons. Shape (n_polys, 2, n_rays). + """ + dist = np.asarray(dist) + points = np.asarray(points) + assert ( + dist.ndim == 2 + and points.ndim == 2 + and len(dist) == len(points) + and points.shape[1] == 2 + and len(scale_dist) == 2 + ) + n_rays = dist.shape[1] + phis = ray_angles(n_rays) + coord = (dist[:, np.newaxis] * np.array([np.sin(phis), np.cos(phis)])).astype( + np.float32 + ) + coord *= np.asarray(scale_dist).reshape(1, 2, 1) + coord += points[..., np.newaxis] + return coord + + +def polygons_to_label( + dist: np.ndarray, + points: np.ndarray, + shape: Tuple[int, int], + prob: np.ndarray = None, + thresh: float = -np.inf, + scale_dist: Tuple[int, int] = (1, 1), +) -> np.ndarray: + """Convert distances and center points to instance labelled mask. + + Parameters + ---------- + dist : np.ndarray + The centerpoint pixels of the radial distance map. Shape (n_polys, n_rays). + points : np.ndarray + The centroids of the instances. Shape: (n_polys, 2). + shape : Tuple[int, int]: + Shape of the output mask. + prob : np.ndarray, optional + The centerpoint pixels of the regressed distance transform. + Shape: (n_polys, n_rays). + thresh : float, default=-np.inf + Threshold for the regressed distance transform. + scale_dist : Tuple[int, int], default=(1, 1) + Scaling factor. + + Returns + ------- + np.ndarray: + Instance labelled mask. Shape (H, W). + """ + dist = np.asarray(dist) + points = np.asarray(points) + prob = np.inf * np.ones(len(points)) if prob is None else np.asarray(prob) + + assert dist.ndim == 2 and points.ndim == 2 and len(dist) == len(points) + assert len(points) == len(prob) and points.shape[1] == 2 and prob.ndim == 1 + + ind = prob > thresh + points = points[ind] + dist = dist[ind] + prob = prob[ind] + + ind = np.argsort(prob, kind="stable") + points = points[ind] + dist = dist[ind] + + coord = dist_to_coord(dist, points, scale_dist=scale_dist) + + return polygons_to_label_coord(coord, shape=shape, labels=ind) + + +@njit(cache=True, fastmath=True) +def intersection(boxA: np.ndarray, boxB: np.ndarray): + """Compute area of intersection of two boxes. + + Parameters + ---------- + boxA : np.ndarray + First boxes + boxB : np.ndarray + Second box + + Returns + ------- + float64: + Area of intersection + """ + xA = max(boxA[..., 0], boxB[..., 0]) + xB = min(boxA[..., 2], boxB[..., 2]) + dx = xB - xA + if dx <= 0: + return 0.0 + + yA = max(boxA[..., 1], boxB[..., 1]) + yB = min(boxA[..., 3], boxB[..., 3]) + dy = yB - yA + if dy <= 0.0: + return 0.0 + + return dx * dy + + +@njit(parallel=True) +def get_bboxes( + dist: np.ndarray, points: np.ndarray +) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, int]: + """Get bounding boxes from the non-zero pixels of the radial distance maps. + + This is basically a translation from the stardist repo cpp code to python + + NOTE: jit compiled and parallelized with numba. + + Parameters + ---------- + dist : np.ndarray + The non-zero values of the radial distance maps. Shape: (n_nonzero, n_rays). + points : np.ndarray + The yx-coordinates of the non-zero points. Shape (n_nonzero, 2). + + Returns + ------- + Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, int]: + Returns the x0, y0, x1, y1 bbox coordinates, bbox areas and the maximum + radial distance in the image. + """ + n_polys = dist.shape[0] + n_rays = dist.shape[1] + + bbox_x1 = np.zeros(n_polys) + bbox_x2 = np.zeros(n_polys) + bbox_y1 = np.zeros(n_polys) + bbox_y2 = np.zeros(n_polys) + + areas = np.zeros(n_polys) + angle_pi = 2 * math.pi / n_rays + max_dist = 0 + + for i in prange(n_polys): + max_radius_outer = 0 + py = points[i, 0] + px = points[i, 1] + + for k in range(n_rays): + d = dist[i, k] + y = py + d * np.sin(angle_pi * k) + x = px + d * np.cos(angle_pi * k) + + if k == 0: + bbox_x1[i] = x + bbox_x2[i] = x + bbox_y1[i] = y + bbox_y2[i] = y + else: + bbox_x1[i] = min(x, bbox_x1[i]) + bbox_x2[i] = max(x, bbox_x2[i]) + bbox_y1[i] = min(y, bbox_y1[i]) + bbox_y2[i] = max(y, bbox_y2[i]) + + max_radius_outer = max(d, max_radius_outer) + + areas[i] = (bbox_x2[i] - bbox_x1[i]) * (bbox_y2[i] - bbox_y1[i]) + max_dist = max(max_dist, max_radius_outer) + + return bbox_x1, bbox_y1, bbox_x2, bbox_y2, areas, max_dist diff --git a/config.yaml b/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a9d70c4cd6c6dc9ce9fa6c62a782665f3109e8d7 --- /dev/null +++ b/config.yaml @@ -0,0 +1,158 @@ +CUDA_VISIBLE_DEVICES: 3 +logging: + log_dir: /data5/ziweicui/cellvit256-unireplknet-n + mode: online + project: Cell-Segmentation + notes: CellViT-256 + log_comment: CellViT-256-resnet50-tiny + tags: + - Fold-1 + - ViT256 + wandb_dir: /data5/ziweicui/UniRepLKNet-optimizerconfig-unetdecoder-inputconv/results + level: Debug + group: CellViT256 + run_id: anifw9ux + wandb_file: anifw9ux +random_seed: 19 +gpu: 0 +data: + dataset: PanNuke + dataset_path: /data5/ziweicui/cellvit-png + train_folds: + - 0 + val_folds: + - 1 + test_folds: + - 2 + num_nuclei_classes: 6 + num_tissue_classes: 19 +model: + backbone: default + pretrained_encoder: /data5/ziweicui/semi_supervised_resnet50-08389792.pth + shared_skip_connections: true +loss: + nuclei_binary_map: + focaltverskyloss: + loss_fn: FocalTverskyLoss + weight: 1 + dice: + loss_fn: dice_loss + weight: 1 + hv_map: + mse: + loss_fn: mse_loss_maps + weight: 2.5 + msge: + loss_fn: msge_loss_maps + weight: 8 + nuclei_type_map: + bce: + loss_fn: xentropy_loss + weight: 0.5 + dice: + loss_fn: dice_loss + weight: 0.2 + mcfocaltverskyloss: + loss_fn: MCFocalTverskyLoss + weight: 0.5 + args: + num_classes: 6 + tissue_types: + ce: + loss_fn: CrossEntropyLoss + weight: 0.1 +training: + drop_rate: 0 + attn_drop_rate: 0.1 + drop_path_rate: 0.1 + batch_size: 32 + epochs: 130 + optimizer: AdamW + early_stopping_patience: 130 + scheduler: + scheduler_type: cosine + hyperparameters: + #gamma: 0.85 + eta_min: 1e-5 + optimizer_hyperparameter: + # betas: + # - 0.85 + # - 0.95 + #lr: 0.004 + opt_lower: 'AdamW' + lr: 0.0008 + opt_betas: [0.85,0.95] + weight_decay: 0.05 + opt_eps: 0.00000008 + unfreeze_epoch: 25 + sampling_gamma: 0.85 + sampling_strategy: cell+tissue + mixed_precision: true +transformations: + randomrotate90: + p: 0.5 + horizontalflip: + p: 0.5 + verticalflip: + p: 0.5 + downscale: + p: 0.15 + scale: 0.5 + blur: + p: 0.2 + blur_limit: 10 + gaussnoise: + p: 0.25 + var_limit: 50 + colorjitter: + p: 0.2 + scale_setting: 0.25 + scale_color: 0.1 + superpixels: + p: 0.1 + zoomblur: + p: 0.1 + randomsizedcrop: + p: 0.1 + elastictransform: + p: 0.2 + normalize: + mean: + - 0.5 + - 0.5 + - 0.5 + std: + - 0.5 + - 0.5 + - 0.5 +eval_checkpoint: latest_checkpoint.pth +dataset_config: + tissue_types: + Adrenal_gland: 0 + Bile-duct: 1 + Bladder: 2 + Breast: 3 + Cervix: 4 + Colon: 5 + Esophagus: 6 + HeadNeck: 7 + Kidney: 8 + Liver: 9 + Lung: 10 + Ovarian: 11 + Pancreatic: 12 + Prostate: 13 + Skin: 14 + Stomach: 15 + Testis: 16 + Thyroid: 17 + Uterus: 18 + nuclei_types: + Background: 0 + Neoplastic: 1 + Inflammatory: 2 + Connective: 3 + Dead: 4 + Epithelial: 5 +run_sweep: false +agent: null diff --git a/datamodel/__init__.py b/datamodel/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2febf884cab62e8f554fb07d1da1034a2897819e --- /dev/null +++ b/datamodel/__init__.py @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +# Data models +# +# @ Fabian Hörst, fabian.hoerst@uk-essen.de +# Institute for Artifical Intelligence in Medicine, +# University Medicine Essen diff --git a/datamodel/graph_datamodel.py b/datamodel/graph_datamodel.py new file mode 100644 index 0000000000000000000000000000000000000000..18e82d356b9a20e3596d852204dd7131e73a8a73 --- /dev/null +++ b/datamodel/graph_datamodel.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- +# Graph Data model +# +# For more information, please check out docs/readmes/graphs.md +# +# @ Fabian Hörst, fabian.hoerst@uk-essen.de +# Institute for Artifical Intelligence in Medicine, +# University Medicine Essen + +from dataclasses import dataclass + +import torch + + +@dataclass +class GraphDataWSI: + """Dataclass for Graph Data + + Args: + x (torch.Tensor): Node feature matrix with shape (num_nodes, num_nodes_features) + positions(torch.Tensor): Each of the objects defined in x has a physical position in a Cartesian coordinate system, + be it detected cells or extracted patches. That's why we store the 2D position here, globally for the WSI. + Shape (num_nodes, 2) + metadata (dict, optional): Metadata about the object is stored here. Defaults to None + """ + + x: torch.Tensor + positions: torch.Tensor + metadata: dict diff --git a/datamodel/wsi_datamodel.py b/datamodel/wsi_datamodel.py new file mode 100644 index 0000000000000000000000000000000000000000..97e60862190541179da7a04cd5e2e9f2af152281 --- /dev/null +++ b/datamodel/wsi_datamodel.py @@ -0,0 +1,193 @@ +# -*- coding: utf-8 -*- +# WSI Model +# +# @ Fabian Hörst, fabian.hoerst@uk-essen.de +# Institute for Artifical Intelligence in Medicine, +# University Medicine Essen + + +import json +from pathlib import Path +from typing import Union, List, Callable, Tuple + +from dataclasses import dataclass, field +import numpy as np +import yaml +import logging +import torch +from PIL import Image + + +@dataclass +class WSI: + """WSI object + + Args: + name (str): WSI name + patient (str): Patient name + slide_path (Union[str, Path]): Full path to the WSI file. + patched_slide_path (Union[str, Path], optional): Full path to preprocessed WSI files (patches). Defaults to None. + embedding_name (Union[str, Path], optional): Defaults to None. + label (Union[str, int, float, np.ndarray], optional): Label of the WSI. Defaults to None. + logger (logging.logger, optional): Logger module for logging information. Defaults to None. + """ + + name: str + patient: str + slide_path: Union[str, Path] + patched_slide_path: Union[str, Path] = None + embedding_name: Union[str, Path] = None + label: Union[str, int, float, np.ndarray] = None + logger: logging.Logger = None + + # unset attributes used in this class + metadata: dict = field(init=False, repr=False) + all_patch_metadata: List[dict] = field(init=False, repr=False) + patches_list: List = field(init=False, repr=False) + patch_transform: Callable = field(init=False, repr=False) + + # name without ending (e.g. slide1 instead of slide1.svs) + def __post_init__(self): + """Post-Processing object""" + super().__init__() + # define paramaters that are used, but not defined at startup + + # convert string to path + self.slide_path = Path(self.slide_path).resolve() + if self.patched_slide_path is not None: + self.patched_slide_path = Path(self.patched_slide_path).resolve() + # load metadata + self._get_metadata() + self._get_wsi_patch_metadata() + self.patch_transform = None # hardcode to None (should not be a parameter, but should be defined) + + if self.logger is not None: + self.logger.debug(self.__repr__()) + + def _get_metadata(self) -> None: + """Load metadata yaml file""" + self.metadata_path = self.patched_slide_path / "metadata.yaml" + with open(self.metadata_path.resolve(), "r") as metadata_yaml: + try: + self.metadata = yaml.safe_load(metadata_yaml) + except yaml.YAMLError as exc: + print(exc) + self.metadata["label_map_inverse"] = { + v: k for k, v in self.metadata["label_map"].items() + } + + def _get_wsi_patch_metadata(self) -> None: + """Load patch_metadata json file and convert to dict and lists""" + with open(self.patched_slide_path / "patch_metadata.json", "r") as json_file: + metadata = json.load(json_file) + self.patches_list = [str(list(elem.keys())[0]) for elem in metadata] + self.all_patch_metadata = { + str(list(elem.keys())[0]): elem[str(list(elem.keys())[0])] + for elem in metadata + } + + def load_patch_metadata(self, patch_name: str) -> dict: + """Return the metadata of a patch with given name (including patch suffix, e.g., wsi_1_1.png) + + This function assumes that metadata path is a subpath of the patches dataset path + + Args: + patch_name (str): Name of patch + + Returns: + dict: metadata + """ + patch_metadata_path = self.all_patch_metadata[patch_name]["metadata_path"] + patch_metadata_path = self.patched_slide_path / patch_metadata_path + + # open + with open(patch_metadata_path, "r") as metadata_yaml: + patch_metadata = yaml.safe_load(metadata_yaml) + patch_metadata["name"] = patch_name + + return patch_metadata + + def set_patch_transform(self, transform: Callable) -> None: + """Set the transformation function to process a patch + + Args: + transform (Callable): Transformation function + """ + self.patch_transform = transform + + # patch processing + def process_patch_image( + self, patch_name: str, transform: Callable = None + ) -> Tuple[torch.Tensor, dict]: + """Process one patch: Load from disk, apply transformation if needed. ToTensor is applied automatically + + Args: + patch_name (Path): Name of patch to load, including patch suffix, e.g., wsi_1_1.png + transform (Callable, optional): Optional Patch-Transformation + Returns: + Tuple[torch.Tensor, dict]: + + * torch.Tensor: patch as torch.tensor (:,:,3) + * dict: patch metadata as dictionary + """ + patch = Image.open(self.patched_slide_path / "patches" / patch_name) + if transform: + patch = transform(patch) + + metadata = self.load_patch_metadata(patch_name) + return patch, metadata + + def get_number_patches(self) -> int: + """Return the number of patches for this WSI + + Returns: + int: number of patches + """ + return int(len(self.patches_list)) + + def get_patches( + self, transform: Callable = None + ) -> Tuple[torch.Tensor, list, list]: + """Get all patches for one image + + Args: + transform (Callable, optional): Optional Patch-Transformation + + Returns: + Tuple[torch.Tensor, list]: + + * patched image: Shape of torch.Tensor(num_patches, 3, :, :) + * coordinates as list metadata_dictionary + + """ + if self.logger is not None: + self.logger.warning(f"Loading {self.get_number_patches()} patches!") + patches = [] + metadata = [] + for patch in self.patches_list: + transformed_patch, meta = self.process_patch_image(patch, transform) + patches.append(transformed_patch) + metadata.append(meta) + patches = torch.stack(patches) + + return patches, metadata + + def load_embedding(self) -> torch.Tensor: + """Load embedding from subfolder patched_slide_path/embedding/ + + Raises: + FileNotFoundError: If embedding is not given + + Returns: + torch.Tensor: WSI embedding + """ + embedding_path = ( + self.patched_slide_path / "embeddings" / f"{self.embedding_name}.pt" + ) + if embedding_path.is_file(): + embedding = torch.load(embedding_path) + return embedding + else: + raise FileNotFoundError( + f"Embeddings for WSI {self.slide_path} cannot be found in path {embedding_path}" + ) diff --git a/docs/datasets/PanNuke/dataset_config.yaml b/docs/datasets/PanNuke/dataset_config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3050dd24fc44522f27ee01c4360be03150c4c907 --- /dev/null +++ b/docs/datasets/PanNuke/dataset_config.yaml @@ -0,0 +1,28 @@ +tissue_types: + "Adrenal_gland": 0 + "Bile-duct": 1 + "Bladder": 2 + "Breast": 3 + "Cervix": 4 + "Colon": 5 + "Esophagus": 6 + "HeadNeck": 7 + "Kidney": 8 + "Liver": 9 + "Lung": 10 + "Ovarian": 11 + "Pancreatic": 12 + "Prostate": 13 + "Skin": 14 + "Stomach": 15 + "Testis": 16 + "Thyroid": 17 + "Uterus": 18 + +nuclei_types: + "Background": 0 + "Neoplastic": 1 + "Inflammatory": 2 + "Connective": 3 + "Dead": 4 + "Epithelial": 5 diff --git a/docs/datasets/PanNuke/fold0/cell_count.csv b/docs/datasets/PanNuke/fold0/cell_count.csv new file mode 100644 index 0000000000000000000000000000000000000000..ba0933ec0dc6dcf0ef913543cdc1b2ec2ff5dc74 --- /dev/null +++ b/docs/datasets/PanNuke/fold0/cell_count.csv @@ -0,0 +1,2657 @@ +Image,Neoplastic,Inflammatory,Connective,Dead,Epithelial +0_0.png,4,2,2,0,0 +0_1.png,8,1,1,0,0 +0_10.png,17,0,1,0,0 +0_100.png,10,0,11,0,0 +0_1000.png,0,0,2,0,0 +0_1001.png,0,0,7,0,0 +0_1002.png,0,0,5,0,0 +0_1003.png,0,0,6,0,0 +0_1004.png,0,0,5,0,0 +0_1005.png,0,0,5,0,0 +0_1006.png,0,0,7,0,0 +0_1007.png,0,0,5,0,0 +0_1008.png,0,0,17,0,0 +0_1009.png,0,0,12,0,0 +0_101.png,10,5,10,0,0 +0_1010.png,0,0,15,0,0 +0_1011.png,0,0,6,0,0 +0_1012.png,0,0,10,0,0 +0_1013.png,0,0,1,0,0 +0_1014.png,0,0,12,0,0 +0_1015.png,0,0,2,0,0 +0_1016.png,0,0,8,0,0 +0_1017.png,0,1,9,0,0 +0_1018.png,17,27,3,0,0 +0_1019.png,20,35,0,0,0 +0_102.png,11,0,13,0,0 +0_1020.png,17,42,1,0,0 +0_1021.png,26,32,7,0,0 +0_1022.png,22,18,0,0,0 +0_1023.png,20,33,4,0,0 +0_1024.png,29,27,4,0,0 +0_1025.png,10,0,0,28,0 +0_1026.png,35,0,6,34,0 +0_1027.png,28,0,6,34,0 +0_1028.png,15,0,9,25,0 +0_1029.png,35,0,6,50,0 +0_103.png,0,0,8,0,15 +0_1030.png,18,0,17,47,0 +0_1031.png,39,0,5,25,0 +0_1032.png,15,0,11,15,0 +0_1033.png,0,0,0,55,0 +0_1034.png,3,0,4,58,0 +0_1035.png,21,5,11,46,0 +0_1036.png,15,0,19,35,0 +0_1037.png,33,0,23,28,0 +0_1038.png,57,0,3,21,0 +0_1039.png,0,0,1,48,0 +0_104.png,0,0,8,0,17 +0_1040.png,33,0,1,34,0 +0_1041.png,0,0,0,53,0 +0_1042.png,9,0,12,0,0 +0_1043.png,5,0,17,0,0 +0_1044.png,3,0,15,0,0 +0_1045.png,22,0,5,0,0 +0_1046.png,24,0,0,0,0 +0_1047.png,1,1,17,0,0 +0_1048.png,12,0,9,0,0 +0_1049.png,24,0,1,0,0 +0_105.png,0,0,7,0,24 +0_1050.png,28,0,0,0,0 +0_1051.png,5,0,16,0,0 +0_1052.png,7,0,17,0,0 +0_1053.png,0,0,8,0,0 +0_1054.png,21,0,4,0,0 +0_1055.png,2,0,25,0,0 +0_1056.png,18,0,12,0,0 +0_1057.png,30,0,1,0,0 +0_1058.png,9,0,19,0,0 +0_1059.png,13,0,19,0,0 +0_106.png,0,0,4,0,30 +0_1060.png,34,0,0,0,0 +0_1061.png,0,74,104,0,0 +0_1062.png,0,113,60,0,23 +0_1063.png,0,193,75,0,8 +0_1064.png,18,0,0,0,0 +0_1065.png,11,1,4,0,0 +0_1066.png,4,3,4,0,0 +0_1067.png,0,0,6,0,0 +0_1068.png,0,3,9,0,0 +0_1069.png,0,7,16,0,0 +0_107.png,0,2,9,0,19 +0_1070.png,3,9,16,0,0 +0_1071.png,9,2,7,0,0 +0_1072.png,0,12,9,0,0 +0_1073.png,0,15,12,0,0 +0_1074.png,13,0,6,0,0 +0_1075.png,1,5,20,0,0 +0_1076.png,1,7,16,0,0 +0_1077.png,0,6,17,0,0 +0_1078.png,16,1,6,0,0 +0_1079.png,0,6,12,0,0 +0_108.png,0,0,11,0,9 +0_1080.png,7,3,6,0,0 +0_1081.png,17,0,4,0,0 +0_1082.png,11,1,1,0,0 +0_1083.png,0,4,12,0,0 +0_1084.png,0,0,11,0,0 +0_1085.png,14,0,2,0,0 +0_1086.png,0,93,2,0,0 +0_1087.png,0,63,3,0,0 +0_1088.png,0,0,5,0,27 +0_1089.png,0,0,2,0,34 +0_109.png,0,0,13,0,13 +0_1090.png,0,0,3,0,37 +0_1091.png,0,0,0,0,4 +0_1092.png,0,0,0,0,8 +0_1093.png,0,0,0,0,14 +0_1094.png,0,0,0,0,2 +0_1095.png,0,2,13,0,24 +0_1096.png,0,4,36,0,10 +0_1097.png,0,0,5,0,0 +0_1098.png,0,0,5,0,0 +0_1099.png,1,0,25,0,0 +0_11.png,9,0,5,0,0 +0_110.png,0,0,7,0,32 +0_1100.png,0,0,3,0,0 +0_1101.png,0,3,5,0,0 +0_1102.png,3,4,5,0,0 +0_1103.png,14,2,23,0,0 +0_1104.png,0,0,4,0,0 +0_1105.png,0,0,9,0,0 +0_1106.png,0,1,4,0,0 +0_1107.png,7,0,0,0,0 +0_1108.png,20,1,1,0,0 +0_1109.png,0,1,4,0,0 +0_111.png,0,0,5,0,29 +0_1110.png,0,1,2,0,0 +0_1111.png,18,3,9,0,0 +0_1112.png,25,0,7,0,0 +0_1113.png,19,2,2,0,0 +0_1114.png,14,0,9,0,0 +0_1115.png,0,0,9,0,0 +0_1116.png,0,0,9,0,0 +0_1117.png,0,1,7,0,0 +0_1118.png,0,0,12,0,0 +0_1119.png,0,0,13,0,0 +0_112.png,38,0,2,0,0 +0_1120.png,0,0,4,0,0 +0_1121.png,0,0,8,0,0 +0_1122.png,0,0,4,0,0 +0_1123.png,0,0,5,0,0 +0_1124.png,0,0,3,0,0 +0_1125.png,0,0,12,0,0 +0_1126.png,0,0,11,0,0 +0_1127.png,0,0,18,0,0 +0_1128.png,0,0,14,0,0 +0_1129.png,0,0,16,0,0 +0_113.png,28,0,2,0,0 +0_1130.png,0,0,4,0,0 +0_1131.png,0,0,9,0,0 +0_1132.png,0,0,9,0,0 +0_1133.png,21,0,9,0,0 +0_1134.png,37,0,0,0,0 +0_1135.png,26,0,6,0,0 +0_1136.png,34,0,0,0,0 +0_1137.png,32,0,2,0,0 +0_1138.png,35,0,2,0,0 +0_1139.png,33,0,2,0,0 +0_114.png,28,0,3,0,0 +0_1140.png,17,0,15,0,0 +0_1141.png,28,0,2,0,0 +0_1142.png,38,0,0,0,0 +0_1143.png,9,0,32,0,0 +0_1144.png,19,0,25,0,0 +0_1145.png,29,0,10,0,0 +0_1146.png,21,0,13,0,0 +0_1147.png,0,0,0,0,0 +0_1148.png,0,0,0,0,0 +0_1149.png,0,0,0,0,0 +0_115.png,26,0,7,0,0 +0_1150.png,0,3,0,0,0 +0_1151.png,0,5,0,0,0 +0_1152.png,0,0,0,0,0 +0_1153.png,0,4,0,0,0 +0_1154.png,0,1,0,0,0 +0_1155.png,0,0,0,0,0 +0_1156.png,0,5,0,0,0 +0_1157.png,0,1,1,0,0 +0_1158.png,0,0,0,0,0 +0_1159.png,0,2,0,0,0 +0_116.png,49,1,0,0,0 +0_1160.png,0,0,0,0,0 +0_1161.png,0,0,0,0,0 +0_1162.png,0,3,0,0,0 +0_1163.png,0,7,1,0,0 +0_1164.png,0,0,0,0,0 +0_1165.png,0,1,0,0,0 +0_1166.png,0,0,0,0,0 +0_1167.png,0,1,0,0,0 +0_1168.png,0,2,0,0,0 +0_1169.png,0,5,0,0,0 +0_117.png,0,69,3,0,0 +0_1170.png,0,2,0,0,0 +0_1171.png,26,1,6,0,0 +0_1172.png,19,0,3,0,0 +0_1173.png,19,0,4,0,0 +0_1174.png,22,2,2,0,0 +0_1175.png,26,1,4,0,0 +0_1176.png,24,1,5,0,0 +0_1177.png,19,0,3,0,0 +0_1178.png,15,0,2,0,0 +0_1179.png,26,1,3,0,0 +0_118.png,0,36,14,0,0 +0_1180.png,20,2,4,0,0 +0_1181.png,21,0,7,0,0 +0_1182.png,20,1,4,0,0 +0_1183.png,17,1,5,0,0 +0_1184.png,21,1,3,0,0 +0_1185.png,16,0,4,0,0 +0_1186.png,1,0,8,0,0 +0_1187.png,16,1,8,0,0 +0_1188.png,10,0,6,0,0 +0_1189.png,0,0,1,0,0 +0_119.png,9,48,3,0,0 +0_1190.png,0,0,6,0,0 +0_1191.png,6,0,0,0,0 +0_1192.png,7,0,0,0,0 +0_1193.png,11,0,6,0,0 +0_1194.png,11,0,0,0,0 +0_1195.png,11,0,1,0,0 +0_1196.png,12,0,0,0,0 +0_1197.png,13,0,0,0,0 +0_1198.png,15,0,0,0,0 +0_1199.png,15,0,0,0,0 +0_12.png,7,0,2,0,0 +0_120.png,2,58,3,0,0 +0_1200.png,12,0,2,0,0 +0_1201.png,9,1,3,0,0 +0_1202.png,16,0,2,0,0 +0_1203.png,9,0,4,0,0 +0_1204.png,6,0,2,0,0 +0_1205.png,6,0,0,0,0 +0_1206.png,12,0,0,0,0 +0_1207.png,10,3,0,0,0 +0_1208.png,12,0,2,0,0 +0_1209.png,14,0,0,0,0 +0_121.png,29,2,5,0,0 +0_1210.png,14,0,0,0,0 +0_1211.png,7,0,0,0,0 +0_1212.png,10,0,6,0,0 +0_1213.png,6,0,4,0,0 +0_1214.png,6,0,1,0,0 +0_1215.png,8,0,3,0,0 +0_1216.png,6,0,3,0,0 +0_1217.png,10,2,0,0,0 +0_1218.png,19,0,2,0,0 +0_1219.png,8,0,0,0,0 +0_122.png,28,9,2,0,0 +0_1220.png,10,0,0,0,0 +0_1221.png,9,0,4,0,0 +0_1222.png,14,0,0,0,0 +0_1223.png,15,0,0,0,0 +0_1224.png,13,0,0,0,0 +0_1225.png,10,0,1,0,0 +0_1226.png,11,0,1,0,0 +0_1227.png,15,0,0,0,0 +0_1228.png,10,0,0,0,0 +0_1229.png,11,0,1,0,0 +0_123.png,0,1,11,0,18 +0_1230.png,12,0,0,0,0 +0_1231.png,15,1,1,0,0 +0_1232.png,20,0,0,0,0 +0_1233.png,6,2,8,0,0 +0_1234.png,14,1,0,0,0 +0_1235.png,9,0,3,0,0 +0_1236.png,13,0,4,0,0 +0_1237.png,2,0,11,0,0 +0_1238.png,0,0,1,0,12 +0_1239.png,0,0,4,0,4 +0_124.png,0,0,8,0,29 +0_1240.png,0,4,3,0,10 +0_1241.png,0,0,0,0,5 +0_1242.png,0,0,0,0,8 +0_1243.png,0,4,6,0,7 +0_1244.png,0,1,0,0,6 +0_1245.png,0,6,14,0,12 +0_1246.png,0,0,11,0,0 +0_1247.png,0,0,9,0,0 +0_1248.png,4,0,4,0,0 +0_1249.png,13,0,2,0,0 +0_125.png,0,1,6,0,29 +0_1250.png,15,0,5,0,0 +0_1251.png,0,0,11,0,0 +0_1252.png,21,0,0,0,0 +0_1253.png,11,0,9,0,0 +0_1254.png,2,0,7,0,0 +0_1255.png,25,1,1,0,0 +0_1256.png,0,0,12,0,0 +0_1257.png,2,0,11,0,0 +0_1258.png,21,1,1,0,0 +0_1259.png,18,0,3,0,0 +0_126.png,0,2,12,0,11 +0_1260.png,3,0,5,0,0 +0_1261.png,14,0,3,0,0 +0_1262.png,11,0,7,0,0 +0_1263.png,0,0,6,0,0 +0_1264.png,0,0,0,0,25 +0_1265.png,0,1,2,0,14 +0_1266.png,0,0,1,0,14 +0_1267.png,0,1,3,0,21 +0_1268.png,1,0,2,0,23 +0_1269.png,0,0,3,0,26 +0_127.png,0,0,10,0,22 +0_1270.png,0,0,2,0,19 +0_1271.png,0,0,3,0,21 +0_1272.png,0,0,5,0,34 +0_1273.png,0,4,10,0,17 +0_1274.png,0,2,6,0,23 +0_1275.png,0,2,6,0,23 +0_1276.png,0,1,3,0,37 +0_1277.png,0,1,5,0,27 +0_1278.png,0,4,10,0,5 +0_1279.png,0,10,9,0,33 +0_128.png,0,2,10,0,27 +0_1280.png,0,21,24,0,14 +0_1281.png,0,0,29,0,0 +0_1282.png,0,0,12,0,0 +0_1283.png,0,0,11,0,0 +0_1284.png,0,7,12,0,0 +0_1285.png,0,1,13,0,0 +0_1286.png,0,6,14,0,0 +0_1287.png,0,4,16,0,0 +0_1288.png,0,0,10,0,0 +0_1289.png,0,0,17,0,0 +0_129.png,0,2,12,0,35 +0_1290.png,1,1,7,0,0 +0_1291.png,29,0,5,0,0 +0_1292.png,6,0,4,0,0 +0_1293.png,0,1,8,0,0 +0_1294.png,16,1,3,0,0 +0_1295.png,0,1,18,0,0 +0_1296.png,0,1,26,0,0 +0_1297.png,17,0,1,0,0 +0_1298.png,0,1,24,0,0 +0_1299.png,0,1,14,0,0 +0_13.png,3,0,6,0,0 +0_130.png,0,1,12,0,6 +0_1300.png,32,1,3,0,0 +0_1301.png,21,1,3,0,0 +0_1302.png,12,2,9,0,0 +0_1303.png,11,1,2,0,0 +0_1304.png,0,0,12,0,0 +0_1305.png,12,7,0,0,0 +0_1306.png,13,1,0,0,0 +0_1307.png,1,1,12,0,0 +0_1308.png,10,0,7,0,0 +0_1309.png,19,1,0,0,0 +0_131.png,0,2,2,0,45 +0_1310.png,9,0,6,0,0 +0_1311.png,5,0,13,0,0 +0_1312.png,9,0,3,0,0 +0_1313.png,13,2,1,0,0 +0_1314.png,16,0,2,0,0 +0_1315.png,13,0,7,0,0 +0_1316.png,13,0,1,0,0 +0_1317.png,17,2,2,0,0 +0_1318.png,13,1,5,0,0 +0_1319.png,14,8,4,0,0 +0_132.png,0,0,17,0,13 +0_1320.png,21,1,3,0,0 +0_1321.png,19,2,0,0,0 +0_1322.png,24,3,0,0,0 +0_1323.png,18,5,2,0,0 +0_1324.png,16,4,0,0,0 +0_1325.png,13,5,0,0,0 +0_1326.png,14,3,3,0,0 +0_1327.png,18,3,0,0,0 +0_1328.png,18,7,0,0,0 +0_1329.png,16,4,2,0,0 +0_133.png,0,1,3,0,1 +0_1330.png,7,9,4,0,0 +0_1331.png,21,9,0,0,0 +0_1332.png,22,4,0,0,0 +0_1333.png,11,7,2,0,0 +0_1334.png,18,5,6,0,0 +0_1335.png,18,6,0,0,0 +0_1336.png,15,3,0,0,0 +0_1337.png,4,13,14,0,0 +0_1338.png,22,5,0,0,0 +0_1339.png,20,10,1,0,0 +0_134.png,0,0,5,0,8 +0_1340.png,8,9,11,0,0 +0_1341.png,18,4,3,0,0 +0_1342.png,22,9,6,0,0 +0_1343.png,19,3,4,0,0 +0_1344.png,32,1,1,1,0 +0_1345.png,52,0,0,0,0 +0_1346.png,0,6,0,0,0 +0_1347.png,17,9,2,0,0 +0_1348.png,61,0,0,0,0 +0_1349.png,39,0,0,7,0 +0_135.png,0,1,5,0,30 +0_1350.png,0,14,3,0,0 +0_1351.png,48,1,0,0,0 +0_1352.png,74,0,0,0,0 +0_1353.png,0,15,5,0,0 +0_1354.png,0,17,3,0,0 +0_1355.png,0,9,3,0,0 +0_1356.png,0,35,4,0,0 +0_1357.png,0,27,0,0,0 +0_1358.png,0,23,4,0,0 +0_1359.png,1,10,3,0,0 +0_136.png,0,3,14,0,15 +0_1360.png,24,8,2,0,0 +0_1361.png,0,13,7,0,0 +0_1362.png,0,16,7,0,0 +0_1363.png,0,10,4,0,0 +0_1364.png,0,13,4,0,0 +0_1365.png,0,10,7,0,0 +0_1366.png,0,9,5,0,0 +0_1367.png,0,19,6,0,0 +0_1368.png,0,19,2,0,0 +0_1369.png,14,14,3,0,0 +0_137.png,0,1,4,0,18 +0_1370.png,5,1,7,0,0 +0_1371.png,0,2,9,0,0 +0_1372.png,15,1,2,0,0 +0_1373.png,3,0,10,0,0 +0_1374.png,22,2,5,0,0 +0_1375.png,8,1,6,0,0 +0_1376.png,8,2,1,0,0 +0_1377.png,15,2,15,0,0 +0_1378.png,5,2,11,0,0 +0_1379.png,11,1,2,0,0 +0_138.png,0,0,7,0,29 +0_1380.png,7,2,4,0,0 +0_1381.png,14,1,0,0,0 +0_1382.png,7,4,7,0,0 +0_1383.png,9,8,8,0,0 +0_1384.png,15,2,4,0,0 +0_1385.png,9,0,11,0,0 +0_1386.png,0,1,12,0,0 +0_1387.png,11,2,7,0,0 +0_1388.png,21,0,0,0,0 +0_1389.png,16,0,0,0,0 +0_139.png,0,0,2,0,38 +0_1390.png,13,0,0,0,0 +0_1391.png,22,0,1,0,0 +0_1392.png,7,0,0,0,0 +0_1393.png,15,0,1,0,0 +0_1394.png,15,0,1,0,0 +0_1395.png,16,0,0,0,0 +0_1396.png,19,0,0,0,0 +0_1397.png,17,0,0,0,0 +0_1398.png,13,0,0,0,0 +0_1399.png,20,0,2,0,0 +0_14.png,5,1,3,0,0 +0_140.png,0,1,9,0,0 +0_1400.png,5,0,0,0,0 +0_1401.png,21,0,0,0,0 +0_1402.png,24,0,0,0,0 +0_1403.png,11,0,0,0,0 +0_1404.png,14,0,0,0,0 +0_1405.png,7,0,0,0,0 +0_1406.png,17,0,0,0,0 +0_1407.png,12,0,1,0,0 +0_1408.png,11,0,1,0,0 +0_1409.png,20,0,0,0,0 +0_141.png,9,0,2,0,0 +0_1410.png,13,0,3,0,0 +0_1411.png,23,0,0,0,0 +0_1412.png,16,2,2,0,0 +0_1413.png,21,4,1,0,0 +0_1414.png,2,3,7,0,0 +0_1415.png,17,9,3,0,0 +0_1416.png,9,7,2,0,0 +0_1417.png,22,0,2,0,0 +0_1418.png,1,12,6,0,0 +0_1419.png,0,2,18,0,0 +0_142.png,18,0,5,0,0 +0_1420.png,9,1,4,0,0 +0_1421.png,0,6,13,0,0 +0_1422.png,0,4,19,0,0 +0_1423.png,5,2,13,0,0 +0_1424.png,0,4,12,0,0 +0_1425.png,0,3,14,0,15 +0_1426.png,0,1,0,0,25 +0_1427.png,0,1,4,0,38 +0_1428.png,0,2,4,0,36 +0_1429.png,0,0,0,0,14 +0_143.png,26,1,3,0,0 +0_1430.png,0,0,0,0,3 +0_1431.png,0,5,7,0,15 +0_1432.png,0,0,0,0,0 +0_1433.png,0,0,0,0,0 +0_1434.png,0,0,1,0,19 +0_1435.png,0,4,18,0,2 +0_1436.png,0,4,12,0,20 +0_1437.png,0,0,0,0,32 +0_1438.png,0,7,6,0,22 +0_1439.png,0,8,16,0,0 +0_144.png,17,1,2,0,0 +0_1440.png,5,10,12,0,0 +0_1441.png,10,12,4,0,0 +0_1442.png,12,15,9,0,0 +0_1443.png,0,2,16,0,0 +0_1444.png,0,1,25,0,9 +0_1445.png,0,1,18,0,15 +0_1446.png,39,0,2,0,0 +0_1447.png,8,0,5,0,0 +0_1448.png,34,0,2,0,0 +0_1449.png,23,0,7,0,0 +0_145.png,6,0,3,0,0 +0_1450.png,21,0,7,0,0 +0_1451.png,13,0,9,0,0 +0_1452.png,9,0,11,0,0 +0_1453.png,13,0,6,0,0 +0_1454.png,14,0,2,0,0 +0_1455.png,6,1,11,0,0 +0_1456.png,50,0,0,0,0 +0_1457.png,16,0,6,0,0 +0_1458.png,18,0,11,0,0 +0_1459.png,0,1,2,0,0 +0_146.png,14,2,2,0,0 +0_1460.png,0,3,13,0,0 +0_1461.png,0,0,20,0,0 +0_1462.png,18,12,8,0,0 +0_1463.png,0,27,16,0,0 +0_1464.png,0,13,24,0,0 +0_1465.png,46,2,1,0,0 +0_1466.png,0,24,15,0,0 +0_1467.png,23,1,4,0,0 +0_1468.png,30,0,2,0,0 +0_1469.png,30,0,3,0,0 +0_147.png,12,0,1,0,0 +0_1470.png,30,1,0,0,0 +0_1471.png,31,0,0,0,0 +0_1472.png,5,5,17,0,0 +0_1473.png,20,26,4,0,0 +0_1474.png,23,0,2,0,0 +0_1475.png,10,1,5,0,0 +0_1476.png,6,1,5,0,0 +0_1477.png,5,13,13,0,0 +0_1478.png,0,19,14,0,0 +0_1479.png,0,21,14,0,22 +0_148.png,0,0,2,0,15 +0_1480.png,0,3,9,0,34 +0_1481.png,67,0,0,3,0 +0_1482.png,7,0,0,68,0 +0_1483.png,0,14,35,0,20 +0_1484.png,0,5,5,0,14 +0_1485.png,0,31,56,0,0 +0_1486.png,0,97,57,0,0 +0_1487.png,0,104,85,0,0 +0_1488.png,45,1,1,0,0 +0_1489.png,28,1,3,0,0 +0_149.png,0,0,0,0,37 +0_1490.png,59,1,0,0,0 +0_1491.png,54,0,1,0,0 +0_1492.png,27,2,7,0,0 +0_1493.png,33,0,2,0,0 +0_1494.png,0,10,50,0,0 +0_1495.png,0,10,50,0,0 +0_1496.png,0,0,23,0,0 +0_1497.png,0,0,20,0,0 +0_1498.png,0,0,12,0,0 +0_1499.png,0,0,12,0,0 +0_15.png,4,0,8,0,0 +0_150.png,0,0,11,0,27 +0_1500.png,47,0,3,0,0 +0_1501.png,4,1,18,0,0 +0_1502.png,32,0,1,0,0 +0_1503.png,30,0,0,2,0 +0_1504.png,3,7,49,0,0 +0_1505.png,1,14,32,0,0 +0_1506.png,8,26,57,0,0 +0_1507.png,0,5,28,0,0 +0_1508.png,54,5,11,0,0 +0_1509.png,0,5,18,0,0 +0_151.png,0,3,4,0,19 +0_1510.png,0,3,21,0,0 +0_1511.png,0,0,14,0,0 +0_1512.png,0,0,0,0,0 +0_1513.png,79,0,0,0,0 +0_1514.png,59,0,3,0,0 +0_1515.png,32,0,0,0,0 +0_1516.png,59,1,1,0,0 +0_1517.png,51,2,2,0,0 +0_1518.png,0,7,23,0,0 +0_1519.png,0,9,34,0,0 +0_152.png,0,1,0,0,33 +0_1520.png,28,0,2,0,0 +0_1521.png,35,0,6,0,0 +0_1522.png,32,0,8,0,0 +0_1523.png,24,0,1,0,0 +0_1524.png,23,2,0,0,0 +0_1525.png,23,0,0,0,0 +0_1526.png,0,8,15,0,0 +0_1527.png,0,1,0,0,0 +0_1528.png,0,0,4,0,0 +0_1529.png,0,8,6,0,0 +0_153.png,12,4,0,0,0 +0_1530.png,0,1,1,0,0 +0_1531.png,0,2,14,0,0 +0_1532.png,7,0,1,0,0 +0_1533.png,15,0,0,0,0 +0_1534.png,11,0,0,0,0 +0_1535.png,15,0,0,0,0 +0_1536.png,11,1,0,0,0 +0_1537.png,11,0,0,0,0 +0_1538.png,9,0,0,0,0 +0_1539.png,8,0,0,0,0 +0_154.png,1,9,5,0,0 +0_1540.png,23,0,0,0,0 +0_1541.png,17,0,2,0,0 +0_1542.png,0,0,0,0,3 +0_1543.png,0,0,2,0,3 +0_1544.png,0,0,4,0,1 +0_1545.png,0,0,4,0,0 +0_1546.png,0,0,4,0,0 +0_1547.png,0,0,7,0,0 +0_1548.png,0,4,28,0,0 +0_1549.png,6,0,0,0,0 +0_155.png,8,3,1,0,0 +0_1550.png,7,0,0,0,0 +0_1551.png,6,0,0,0,0 +0_1552.png,11,0,0,0,0 +0_1553.png,11,0,2,0,0 +0_1554.png,8,0,0,0,0 +0_1555.png,8,0,4,0,0 +0_1556.png,9,0,0,0,0 +0_1557.png,12,2,4,0,0 +0_1558.png,19,2,1,0,0 +0_1559.png,14,0,4,0,0 +0_156.png,4,7,5,0,0 +0_1560.png,17,3,4,0,0 +0_1561.png,7,2,2,0,0 +0_1562.png,11,1,1,0,0 +0_1563.png,14,0,6,0,0 +0_1564.png,18,1,2,0,0 +0_1565.png,8,0,7,0,0 +0_1566.png,10,0,8,0,0 +0_1567.png,1,1,1,0,0 +0_1568.png,5,0,3,0,0 +0_1569.png,2,1,0,0,0 +0_157.png,6,5,4,0,0 +0_1570.png,23,1,2,0,0 +0_1571.png,15,0,4,0,0 +0_1572.png,2,0,9,0,0 +0_1573.png,18,1,4,0,0 +0_1574.png,0,0,2,0,0 +0_1575.png,0,0,4,0,0 +0_1576.png,0,0,1,0,0 +0_1577.png,0,0,2,0,0 +0_1578.png,0,0,0,0,0 +0_1579.png,0,0,4,0,0 +0_158.png,0,1,5,0,0 +0_1580.png,0,9,21,0,0 +0_1581.png,0,11,21,0,0 +0_1582.png,0,7,18,0,1 +0_1583.png,0,5,15,0,1 +0_1584.png,0,20,14,0,14 +0_1585.png,2,0,14,0,0 +0_1586.png,2,0,9,0,0 +0_1587.png,1,4,9,0,0 +0_1588.png,5,2,16,0,0 +0_1589.png,2,3,10,0,0 +0_159.png,0,1,9,0,8 +0_1590.png,2,0,7,0,0 +0_1591.png,14,0,2,0,0 +0_1592.png,9,0,6,0,0 +0_1593.png,0,3,15,0,0 +0_1594.png,0,0,10,0,0 +0_1595.png,0,2,22,0,0 +0_1596.png,0,5,5,0,9 +0_1597.png,0,10,8,0,2 +0_1598.png,0,3,4,0,7 +0_1599.png,0,0,7,0,10 +0_16.png,7,1,4,0,0 +0_160.png,0,1,11,0,9 +0_1600.png,0,0,1,0,9 +0_1601.png,0,5,5,0,10 +0_1602.png,0,4,4,0,14 +0_1603.png,0,0,6,0,9 +0_1604.png,0,5,10,0,0 +0_1605.png,0,4,22,0,0 +0_1606.png,0,10,33,0,0 +0_1607.png,21,0,7,0,0 +0_1608.png,21,0,14,0,0 +0_1609.png,21,1,8,0,0 +0_161.png,0,2,1,0,30 +0_1610.png,16,1,16,0,0 +0_1611.png,20,1,14,0,0 +0_1612.png,27,1,9,0,0 +0_1613.png,23,0,11,0,0 +0_1614.png,0,0,0,0,0 +0_1615.png,0,0,0,0,0 +0_1616.png,0,0,0,0,0 +0_1617.png,0,0,0,0,0 +0_1618.png,0,0,0,0,0 +0_1619.png,0,0,0,0,0 +0_162.png,0,2,6,0,24 +0_1620.png,0,0,0,0,0 +0_1621.png,8,11,12,0,0 +0_1622.png,21,8,7,0,0 +0_1623.png,11,14,22,0,0 +0_1624.png,22,6,7,0,0 +0_1625.png,28,4,2,0,0 +0_1626.png,28,6,0,0,0 +0_1627.png,13,17,2,0,0 +0_1628.png,0,2,3,0,0 +0_1629.png,0,0,0,0,0 +0_163.png,0,1,7,0,15 +0_1630.png,0,3,9,0,0 +0_1631.png,0,0,2,0,0 +0_1632.png,0,0,0,0,0 +0_1633.png,6,2,0,0,0 +0_1634.png,4,19,10,0,0 +0_1635.png,13,3,0,0,0 +0_1636.png,9,15,2,0,0 +0_1637.png,1,89,7,0,0 +0_1638.png,0,63,13,0,0 +0_1639.png,18,0,1,0,0 +0_164.png,0,1,4,0,2 +0_1640.png,12,1,3,0,0 +0_1641.png,12,1,1,0,0 +0_1642.png,24,0,0,0,0 +0_1643.png,1,2,12,0,0 +0_1644.png,0,9,17,0,0 +0_1645.png,0,6,8,0,0 +0_1646.png,0,6,11,0,0 +0_1647.png,0,13,17,0,0 +0_1648.png,4,31,17,0,0 +0_1649.png,3,25,11,0,0 +0_165.png,0,1,14,0,5 +0_1650.png,0,9,17,0,0 +0_1651.png,0,29,9,0,0 +0_1652.png,0,5,17,0,0 +0_1653.png,0,3,18,0,0 +0_1654.png,0,12,12,0,0 +0_1655.png,0,1,8,0,0 +0_1656.png,0,1,6,0,10 +0_1657.png,0,0,20,0,1 +0_1658.png,0,1,18,0,0 +0_1659.png,0,4,12,0,0 +0_166.png,0,0,5,0,31 +0_1660.png,23,1,4,0,0 +0_1661.png,18,0,0,0,0 +0_1662.png,30,0,0,0,0 +0_1663.png,0,1,11,0,0 +0_1664.png,0,0,5,0,0 +0_1665.png,0,0,8,0,0 +0_1666.png,0,1,16,0,0 +0_1667.png,0,0,7,0,0 +0_1668.png,0,0,8,0,0 +0_1669.png,0,0,12,0,0 +0_167.png,0,2,7,0,30 +0_1670.png,0,0,13,0,0 +0_1671.png,0,0,8,0,0 +0_1672.png,0,0,8,0,0 +0_1673.png,0,0,8,0,0 +0_1674.png,0,0,7,0,0 +0_1675.png,0,2,2,0,0 +0_1676.png,0,0,9,0,0 +0_1677.png,0,0,8,0,0 +0_1678.png,0,1,3,0,0 +0_1679.png,0,0,2,0,0 +0_168.png,0,4,12,0,15 +0_1680.png,0,0,1,0,0 +0_1681.png,0,3,4,0,0 +0_1682.png,0,0,2,0,0 +0_1683.png,0,0,0,0,20 +0_1684.png,0,0,6,0,28 +0_1685.png,0,2,10,0,2 +0_1686.png,0,2,18,0,0 +0_1687.png,8,5,13,0,0 +0_1688.png,15,0,0,0,0 +0_1689.png,0,3,22,0,0 +0_169.png,0,1,10,0,18 +0_1690.png,0,8,24,0,0 +0_1691.png,2,10,13,0,0 +0_1692.png,3,6,9,0,0 +0_1693.png,10,7,4,0,0 +0_1694.png,5,1,17,0,0 +0_1695.png,4,0,15,0,0 +0_1696.png,18,0,0,0,0 +0_1697.png,11,1,0,6,0 +0_1698.png,27,0,0,0,0 +0_1699.png,24,0,0,0,0 +0_17.png,10,1,6,0,0 +0_170.png,0,1,3,0,27 +0_1700.png,22,1,0,0,0 +0_1701.png,22,0,0,0,0 +0_1702.png,21,0,0,0,0 +0_1703.png,16,0,0,0,0 +0_1704.png,0,0,0,0,0 +0_1705.png,0,0,0,0,0 +0_1706.png,0,1,1,0,0 +0_1707.png,0,0,0,0,0 +0_1708.png,0,0,0,0,0 +0_1709.png,0,0,0,0,0 +0_171.png,0,0,7,0,20 +0_1710.png,15,0,0,0,0 +0_1711.png,12,0,0,0,0 +0_1712.png,13,0,2,0,0 +0_1713.png,19,0,0,0,0 +0_1714.png,21,0,0,0,0 +0_1715.png,19,0,2,0,0 +0_1716.png,19,1,1,0,0 +0_1717.png,19,1,0,0,0 +0_1718.png,0,0,8,0,0 +0_1719.png,0,1,8,0,0 +0_172.png,4,1,1,0,0 +0_1720.png,0,0,9,0,0 +0_1721.png,0,0,6,0,0 +0_1722.png,0,0,12,0,0 +0_1723.png,0,0,0,0,0 +0_1724.png,0,0,0,0,0 +0_1725.png,0,0,0,0,0 +0_1726.png,79,0,3,0,0 +0_1727.png,83,0,8,0,0 +0_1728.png,77,0,13,0,0 +0_1729.png,91,1,1,0,0 +0_173.png,1,1,2,0,0 +0_1730.png,99,0,4,0,0 +0_1731.png,79,1,5,0,0 +0_1732.png,12,5,19,0,0 +0_1733.png,17,2,9,0,0 +0_1734.png,21,3,16,0,0 +0_1735.png,17,0,11,0,0 +0_1736.png,26,0,16,0,0 +0_1737.png,38,0,1,0,0 +0_1738.png,26,0,11,0,0 +0_1739.png,6,2,12,0,0 +0_174.png,0,1,3,0,2 +0_1740.png,12,1,12,0,0 +0_1741.png,12,1,12,0,0 +0_1742.png,28,2,3,0,0 +0_1743.png,45,0,2,0,0 +0_1744.png,20,0,5,0,0 +0_1745.png,31,0,9,0,0 +0_1746.png,10,0,13,0,0 +0_1747.png,18,1,7,0,0 +0_1748.png,0,0,0,0,0 +0_1749.png,0,0,0,0,0 +0_175.png,0,0,7,0,30 +0_1750.png,0,0,0,0,0 +0_1751.png,0,0,0,0,0 +0_1752.png,0,0,0,0,0 +0_1753.png,0,0,0,0,0 +0_1754.png,47,0,0,0,0 +0_1755.png,38,0,0,7,0 +0_1756.png,4,0,0,19,0 +0_1757.png,0,3,4,16,0 +0_1758.png,41,1,0,0,0 +0_1759.png,35,1,0,0,0 +0_176.png,0,0,2,0,28 +0_1760.png,56,1,1,0,0 +0_1761.png,58,2,0,0,0 +0_1762.png,65,2,0,0,0 +0_1763.png,63,1,4,0,0 +0_1764.png,17,20,9,0,0 +0_1765.png,0,2,36,0,0 +0_1766.png,6,5,8,0,0 +0_1767.png,0,5,50,0,0 +0_1768.png,8,5,17,0,0 +0_1769.png,39,0,0,0,0 +0_177.png,0,2,8,0,18 +0_1770.png,26,2,6,0,0 +0_1771.png,17,2,4,0,0 +0_1772.png,39,0,0,0,0 +0_1773.png,8,0,19,0,0 +0_1774.png,8,1,5,0,0 +0_1775.png,0,3,4,0,0 +0_1776.png,0,0,7,0,0 +0_1777.png,0,0,16,0,0 +0_1778.png,0,0,8,0,0 +0_1779.png,0,3,21,0,0 +0_178.png,0,1,2,1,36 +0_1780.png,0,0,8,0,0 +0_1781.png,0,3,18,0,0 +0_1782.png,0,1,9,0,0 +0_1783.png,0,1,10,0,0 +0_1784.png,0,2,9,0,0 +0_1785.png,0,2,17,0,0 +0_1786.png,0,0,14,0,0 +0_1787.png,41,3,1,0,0 +0_1788.png,47,1,0,0,0 +0_1789.png,9,4,6,0,0 +0_179.png,0,2,2,0,56 +0_1790.png,30,6,8,0,0 +0_1791.png,32,1,0,0,0 +0_1792.png,5,3,8,0,0 +0_1793.png,37,3,5,0,0 +0_1794.png,28,0,0,0,0 +0_1795.png,49,2,0,0,0 +0_1796.png,34,2,0,0,0 +0_1797.png,45,2,0,0,0 +0_1798.png,28,3,1,0,0 +0_1799.png,27,2,2,0,0 +0_18.png,4,1,2,0,0 +0_180.png,2,2,5,0,0 +0_1800.png,28,1,0,0,0 +0_1801.png,24,2,1,0,0 +0_1802.png,35,6,1,0,0 +0_1803.png,25,3,2,0,0 +0_1804.png,0,10,39,0,0 +0_1805.png,0,0,33,0,0 +0_1806.png,4,59,4,0,0 +0_1807.png,0,15,33,0,0 +0_1808.png,0,0,32,0,0 +0_1809.png,17,12,1,0,0 +0_181.png,1,0,8,0,0 +0_1810.png,11,32,4,0,0 +0_1811.png,18,9,2,0,0 +0_1812.png,27,8,0,0,0 +0_1813.png,26,22,3,0,0 +0_1814.png,24,3,0,0,0 +0_1815.png,40,0,0,0,0 +0_1816.png,14,0,0,0,0 +0_1817.png,34,0,0,0,0 +0_1818.png,41,5,0,0,0 +0_1819.png,20,3,0,0,0 +0_182.png,1,0,2,0,0 +0_1820.png,43,6,0,0,0 +0_1821.png,34,0,0,0,0 +0_1822.png,0,2,1,0,6 +0_1823.png,0,3,6,0,17 +0_1824.png,0,11,7,0,0 +0_1825.png,0,11,2,0,0 +0_1826.png,0,1,5,0,19 +0_1827.png,0,2,3,0,23 +0_1828.png,0,2,6,0,18 +0_1829.png,0,3,12,0,8 +0_183.png,0,0,14,0,0 +0_1830.png,0,0,0,0,12 +0_1831.png,0,4,9,0,11 +0_1832.png,0,9,4,0,10 +0_1833.png,0,9,7,0,11 +0_1834.png,0,4,2,0,17 +0_1835.png,0,0,4,0,15 +0_1836.png,0,0,2,0,15 +0_1837.png,0,3,6,0,8 +0_1838.png,0,0,0,0,8 +0_1839.png,0,1,0,0,3 +0_184.png,0,2,17,0,26 +0_1840.png,0,2,5,0,18 +0_1841.png,0,0,0,0,21 +0_1842.png,0,1,2,0,11 +0_1843.png,0,4,7,0,10 +0_1844.png,0,2,4,0,17 +0_1845.png,0,5,2,0,8 +0_1846.png,0,0,1,0,10 +0_1847.png,0,0,0,0,2 +0_1848.png,0,9,3,0,11 +0_1849.png,0,0,0,0,11 +0_185.png,0,6,2,0,34 +0_1850.png,0,13,8,0,0 +0_1851.png,1,3,11,0,4 +0_1852.png,0,15,8,0,0 +0_1853.png,0,18,10,0,0 +0_1854.png,0,18,11,0,7 +0_1855.png,0,0,0,0,9 +0_1856.png,0,3,3,0,12 +0_1857.png,0,7,10,0,8 +0_1858.png,2,14,9,0,0 +0_1859.png,0,0,0,0,0 +0_186.png,0,0,3,0,47 +0_1860.png,0,18,4,0,5 +0_1861.png,0,3,9,0,17 +0_1862.png,0,2,1,0,12 +0_1863.png,0,6,7,0,16 +0_1864.png,0,0,0,0,0 +0_1865.png,0,3,4,0,4 +0_1866.png,0,3,6,0,4 +0_1867.png,0,5,3,0,1 +0_1868.png,0,4,5,0,0 +0_1869.png,0,3,1,0,0 +0_187.png,0,0,9,0,40 +0_1870.png,0,0,0,0,0 +0_1871.png,0,0,2,0,4 +0_1872.png,0,3,4,0,1 +0_1873.png,0,0,0,0,8 +0_1874.png,0,0,4,0,12 +0_1875.png,0,0,2,0,7 +0_1876.png,0,0,3,0,1 +0_1877.png,0,1,4,0,3 +0_1878.png,0,3,3,0,0 +0_1879.png,0,1,6,0,7 +0_188.png,0,2,2,0,42 +0_1880.png,0,0,0,0,0 +0_1881.png,0,0,8,0,3 +0_1882.png,0,0,3,0,8 +0_1883.png,0,2,3,0,0 +0_1884.png,0,0,2,0,0 +0_1885.png,0,0,0,0,6 +0_1886.png,0,0,13,0,0 +0_1887.png,0,0,5,0,6 +0_1888.png,0,1,5,0,0 +0_1889.png,0,1,1,0,0 +0_189.png,0,1,13,0,36 +0_1890.png,0,0,1,0,0 +0_1891.png,0,1,4,0,0 +0_1892.png,0,0,1,0,5 +0_1893.png,0,0,2,0,11 +0_1894.png,0,0,5,0,1 +0_1895.png,0,0,10,0,0 +0_1896.png,4,0,0,0,0 +0_1897.png,0,0,0,0,7 +0_1898.png,0,0,0,0,6 +0_1899.png,0,1,4,0,11 +0_19.png,2,3,8,0,0 +0_190.png,0,2,4,0,28 +0_1900.png,3,0,0,0,0 +0_1901.png,1,0,0,0,7 +0_1902.png,0,0,2,0,23 +0_1903.png,0,1,4,0,8 +0_1904.png,0,0,0,0,31 +0_1905.png,0,1,6,0,15 +0_1906.png,0,18,8,0,4 +0_1907.png,0,4,2,0,18 +0_1908.png,0,3,7,0,11 +0_1909.png,0,0,0,0,2 +0_191.png,0,1,4,0,18 +0_1910.png,0,4,8,0,17 +0_1911.png,0,0,0,0,8 +0_1912.png,0,0,3,0,12 +0_1913.png,0,0,0,0,8 +0_1914.png,0,3,5,0,7 +0_1915.png,0,2,4,0,20 +0_1916.png,0,0,0,0,12 +0_1917.png,0,0,3,0,19 +0_1918.png,0,0,0,0,5 +0_1919.png,0,4,4,0,7 +0_192.png,0,0,0,0,45 +0_1920.png,0,5,10,0,16 +0_1921.png,0,2,13,0,13 +0_1922.png,0,0,2,0,0 +0_1923.png,0,0,0,0,0 +0_1924.png,0,0,11,0,0 +0_1925.png,0,0,9,0,0 +0_1926.png,0,0,5,0,0 +0_1927.png,0,1,14,0,0 +0_1928.png,0,0,0,0,6 +0_1929.png,0,1,5,0,10 +0_193.png,0,0,0,0,23 +0_1930.png,0,2,20,0,0 +0_1931.png,0,4,28,0,0 +0_1932.png,0,0,13,0,15 +0_1933.png,0,0,0,0,5 +0_1934.png,0,1,2,0,13 +0_1935.png,0,3,26,0,3 +0_1936.png,0,0,0,0,1 +0_1937.png,0,2,19,0,0 +0_1938.png,0,8,11,0,7 +0_1939.png,0,0,0,0,0 +0_194.png,2,0,2,0,0 +0_1940.png,0,0,0,0,0 +0_1941.png,0,0,0,0,4 +0_1942.png,0,0,0,0,13 +0_1943.png,0,7,9,0,14 +0_1944.png,0,13,25,0,0 +0_1945.png,0,0,0,0,0 +0_1946.png,0,0,0,0,0 +0_1947.png,0,4,1,0,25 +0_1948.png,0,0,0,0,4 +0_1949.png,0,0,0,0,7 +0_195.png,11,0,1,0,0 +0_1950.png,0,6,4,0,9 +0_1951.png,0,0,0,0,8 +0_1952.png,0,0,0,0,5 +0_1953.png,0,3,5,0,14 +0_1954.png,0,13,3,0,4 +0_1955.png,0,7,5,0,11 +0_1956.png,0,0,0,0,12 +0_1957.png,0,0,0,0,4 +0_1958.png,0,19,3,0,3 +0_1959.png,0,1,0,0,8 +0_196.png,6,0,6,0,0 +0_1960.png,0,8,3,0,8 +0_1961.png,0,20,2,0,5 +0_1962.png,0,11,3,0,10 +0_1963.png,0,8,2,0,7 +0_1964.png,0,0,0,0,6 +0_1965.png,0,1,0,0,7 +0_1966.png,0,0,0,0,0 +0_1967.png,0,0,0,0,0 +0_1968.png,0,0,0,0,0 +0_1969.png,0,0,0,0,0 +0_197.png,23,0,5,0,0 +0_1970.png,0,0,0,0,0 +0_1971.png,0,0,0,0,0 +0_1972.png,0,0,0,0,0 +0_1973.png,0,0,0,0,0 +0_1974.png,0,0,0,0,0 +0_1975.png,0,0,0,0,0 +0_1976.png,0,0,0,0,0 +0_1977.png,0,0,0,0,0 +0_1978.png,0,0,0,0,0 +0_1979.png,0,0,0,0,0 +0_198.png,8,1,3,0,1 +0_1980.png,0,8,0,0,13 +0_1981.png,0,22,1,0,9 +0_1982.png,4,17,1,0,9 +0_1983.png,0,19,2,0,6 +0_1984.png,0,24,0,0,5 +0_1985.png,0,1,0,0,17 +0_1986.png,0,22,2,0,10 +0_1987.png,0,8,0,0,13 +0_1988.png,0,8,0,0,16 +0_1989.png,0,11,2,0,5 +0_199.png,4,0,7,0,0 +0_1990.png,0,1,0,0,9 +0_1991.png,0,24,1,0,3 +0_1992.png,0,18,0,0,10 +0_1993.png,0,27,3,0,4 +0_1994.png,0,9,1,0,0 +0_1995.png,0,9,0,0,12 +0_1996.png,0,9,0,0,9 +0_1997.png,0,1,0,0,13 +0_1998.png,0,14,1,0,8 +0_1999.png,0,7,6,0,5 +0_2.png,8,0,3,0,0 +0_20.png,10,0,0,0,0 +0_200.png,0,2,3,0,22 +0_2000.png,0,8,8,0,0 +0_2001.png,0,0,1,0,12 +0_2002.png,0,0,0,0,10 +0_2003.png,0,7,14,0,0 +0_2004.png,0,6,4,0,10 +0_2005.png,0,13,8,0,0 +0_2006.png,0,0,1,0,12 +0_2007.png,0,0,0,0,0 +0_2008.png,0,11,4,0,6 +0_2009.png,0,0,0,0,0 +0_201.png,0,0,5,0,51 +0_2010.png,0,2,3,0,7 +0_2011.png,0,0,1,0,7 +0_2012.png,0,5,9,0,5 +0_2013.png,0,2,4,0,4 +0_2014.png,0,0,0,0,14 +0_2015.png,0,0,0,0,7 +0_2016.png,0,11,7,0,0 +0_2017.png,0,3,6,0,5 +0_2018.png,0,0,0,0,15 +0_2019.png,0,0,0,0,0 +0_202.png,0,3,12,0,12 +0_2020.png,0,0,0,0,0 +0_2021.png,0,1,7,0,0 +0_2022.png,0,0,0,0,0 +0_2023.png,0,2,19,0,0 +0_2024.png,0,0,18,0,0 +0_2025.png,0,0,10,0,0 +0_2026.png,0,2,30,0,0 +0_2027.png,0,0,8,0,0 +0_2028.png,0,0,17,0,0 +0_2029.png,0,0,14,0,0 +0_203.png,0,6,7,0,0 +0_2030.png,0,0,17,0,0 +0_2031.png,0,2,19,0,0 +0_2032.png,0,7,28,0,0 +0_2033.png,0,2,14,0,0 +0_2034.png,0,0,15,0,0 +0_2035.png,0,0,12,0,0 +0_2036.png,0,0,17,0,0 +0_2037.png,0,0,20,0,0 +0_2038.png,0,3,12,0,0 +0_2039.png,0,1,13,0,0 +0_204.png,0,2,8,0,38 +0_2040.png,0,1,9,0,0 +0_2041.png,0,6,20,0,0 +0_2042.png,0,3,18,0,0 +0_2043.png,0,0,15,0,5 +0_2044.png,0,0,7,0,9 +0_2045.png,0,0,1,0,11 +0_2046.png,0,0,5,0,12 +0_2047.png,0,0,9,0,12 +0_2048.png,0,3,0,0,1 +0_2049.png,0,0,6,0,10 +0_205.png,0,0,2,0,42 +0_2050.png,0,0,14,0,12 +0_2051.png,0,0,13,0,4 +0_2052.png,0,0,4,0,18 +0_2053.png,0,1,14,0,6 +0_2054.png,0,0,8,0,11 +0_2055.png,0,0,17,0,10 +0_2056.png,0,0,8,0,23 +0_2057.png,19,0,0,1,0 +0_2058.png,12,0,0,18,0 +0_2059.png,11,1,0,4,0 +0_206.png,0,1,6,0,30 +0_2060.png,14,0,8,0,0 +0_2061.png,28,0,0,0,0 +0_2062.png,20,1,0,0,0 +0_2063.png,33,2,0,0,0 +0_2064.png,48,0,0,0,0 +0_2065.png,39,3,0,0,0 +0_2066.png,0,0,10,0,0 +0_2067.png,39,9,1,0,0 +0_2068.png,28,11,4,0,0 +0_2069.png,16,4,0,0,0 +0_207.png,0,0,2,0,30 +0_2070.png,19,0,4,0,0 +0_2071.png,15,0,4,1,0 +0_2072.png,23,0,1,0,0 +0_2073.png,15,1,1,0,0 +0_2074.png,8,0,15,0,0 +0_2075.png,9,1,7,0,0 +0_2076.png,22,1,3,8,0 +0_2077.png,10,1,10,0,0 +0_2078.png,20,0,1,2,0 +0_2079.png,0,2,17,0,0 +0_208.png,0,1,8,0,21 +0_2080.png,0,0,22,0,0 +0_2081.png,0,3,19,0,0 +0_2082.png,0,3,13,0,0 +0_2083.png,0,0,18,0,0 +0_2084.png,0,4,28,0,0 +0_2085.png,0,4,33,0,0 +0_2086.png,21,0,1,0,0 +0_2087.png,13,0,10,0,0 +0_2088.png,0,8,20,1,0 +0_2089.png,1,2,14,0,0 +0_209.png,0,4,8,0,15 +0_2090.png,0,5,16,0,0 +0_2091.png,0,0,19,0,0 +0_2092.png,0,0,24,0,0 +0_2093.png,0,0,20,0,0 +0_2094.png,0,0,25,0,0 +0_2095.png,0,0,26,0,0 +0_2096.png,0,0,20,0,0 +0_2097.png,0,0,22,0,0 +0_2098.png,0,0,0,0,0 +0_2099.png,13,0,6,0,0 +0_21.png,9,0,2,0,0 +0_210.png,0,2,3,0,36 +0_2100.png,22,1,5,0,0 +0_2101.png,21,1,1,0,0 +0_2102.png,18,0,2,0,0 +0_2103.png,0,0,11,0,0 +0_2104.png,9,0,6,0,0 +0_2105.png,5,2,17,0,0 +0_2106.png,0,10,22,0,0 +0_2107.png,4,4,11,0,0 +0_2108.png,0,0,0,0,0 +0_2109.png,0,0,0,0,0 +0_211.png,0,3,14,0,2 +0_2110.png,0,0,0,0,0 +0_2111.png,0,0,0,0,0 +0_2112.png,15,0,0,0,0 +0_2113.png,18,0,0,0,0 +0_2114.png,2,0,5,0,0 +0_2115.png,0,0,0,0,0 +0_2116.png,3,1,9,0,0 +0_2117.png,9,0,2,0,0 +0_2118.png,0,91,3,0,0 +0_2119.png,0,83,5,0,0 +0_212.png,0,0,5,0,46 +0_2120.png,0,91,3,0,0 +0_2121.png,0,90,2,0,0 +0_2122.png,18,3,4,0,0 +0_2123.png,17,4,3,0,0 +0_2124.png,25,2,0,0,0 +0_2125.png,0,4,7,0,0 +0_2126.png,0,1,8,0,0 +0_2127.png,0,0,0,0,8 +0_2128.png,0,0,0,0,15 +0_2129.png,0,0,0,0,13 +0_213.png,0,3,10,0,32 +0_2130.png,0,0,0,0,16 +0_2131.png,0,0,0,0,0 +0_2132.png,0,0,0,0,0 +0_2133.png,0,0,0,0,0 +0_2134.png,0,0,0,0,0 +0_2135.png,0,0,0,0,0 +0_2136.png,0,0,5,0,0 +0_2137.png,0,0,0,0,0 +0_2138.png,0,0,13,0,0 +0_2139.png,0,0,25,0,0 +0_214.png,0,2,11,0,29 +0_2140.png,0,0,21,0,0 +0_2141.png,0,0,16,0,0 +0_2142.png,0,0,15,0,0 +0_2143.png,0,0,18,0,0 +0_2144.png,0,0,18,0,0 +0_2145.png,0,0,18,0,0 +0_2146.png,0,0,8,0,3 +0_2147.png,0,0,1,0,4 +0_2148.png,0,0,8,0,4 +0_2149.png,0,0,8,0,2 +0_215.png,0,3,13,0,22 +0_2150.png,0,0,13,0,0 +0_2151.png,0,1,10,0,0 +0_2152.png,0,2,14,0,0 +0_2153.png,49,0,0,0,0 +0_2154.png,38,0,0,0,0 +0_2155.png,0,27,16,0,0 +0_2156.png,1,5,6,0,0 +0_2157.png,0,6,7,0,0 +0_2158.png,0,4,5,0,0 +0_2159.png,19,0,0,0,0 +0_216.png,0,0,5,0,16 +0_2160.png,31,0,2,0,0 +0_2161.png,4,5,0,8,0 +0_2162.png,29,3,0,0,0 +0_2163.png,0,0,0,0,0 +0_2164.png,0,0,0,0,0 +0_2165.png,0,0,0,0,0 +0_2166.png,39,0,2,0,0 +0_2167.png,38,2,0,0,0 +0_2168.png,2,0,6,0,0 +0_2169.png,24,0,0,0,0 +0_217.png,0,0,7,0,23 +0_2170.png,9,0,1,0,0 +0_2171.png,16,0,8,0,0 +0_2172.png,21,0,3,0,0 +0_2173.png,25,0,0,0,0 +0_2174.png,0,0,0,0,0 +0_2175.png,0,1,0,0,0 +0_2176.png,0,0,0,0,0 +0_2177.png,0,0,9,0,0 +0_2178.png,0,1,11,0,0 +0_2179.png,0,0,11,0,0 +0_218.png,0,1,11,0,15 +0_2180.png,0,0,11,0,0 +0_2181.png,0,0,9,0,0 +0_2182.png,22,2,7,0,0 +0_2183.png,19,0,2,0,0 +0_2184.png,15,4,3,0,0 +0_2185.png,0,0,7,0,9 +0_2186.png,0,0,3,0,11 +0_2187.png,0,0,6,0,7 +0_2188.png,0,0,8,0,13 +0_2189.png,22,0,0,0,0 +0_219.png,0,11,5,0,22 +0_2190.png,15,2,0,0,0 +0_2191.png,8,0,0,0,0 +0_2192.png,15,4,6,0,0 +0_2193.png,1,7,28,0,0 +0_2194.png,9,0,0,0,0 +0_2195.png,6,1,1,0,0 +0_2196.png,8,0,4,0,0 +0_2197.png,14,0,1,0,0 +0_2198.png,13,0,3,0,0 +0_2199.png,13,0,1,0,0 +0_22.png,8,0,0,0,0 +0_220.png,0,0,0,0,36 +0_2200.png,0,9,7,0,0 +0_2201.png,0,3,12,0,0 +0_2202.png,0,0,0,0,0 +0_2203.png,17,4,1,0,0 +0_2204.png,8,3,0,0,0 +0_2205.png,16,2,1,0,0 +0_2206.png,14,3,5,0,0 +0_2207.png,0,5,36,0,0 +0_2208.png,16,2,0,0,0 +0_2209.png,0,0,3,0,68 +0_221.png,0,0,7,0,25 +0_2210.png,0,3,2,0,60 +0_2211.png,0,3,1,0,59 +0_2212.png,0,1,3,0,57 +0_2213.png,0,0,0,0,10 +0_2214.png,0,0,2,0,12 +0_2215.png,0,0,8,0,13 +0_2216.png,0,0,3,0,12 +0_2217.png,0,0,1,0,17 +0_2218.png,0,0,1,0,14 +0_2219.png,17,1,1,0,0 +0_222.png,0,0,4,0,32 +0_2220.png,29,1,2,0,0 +0_2221.png,23,0,0,0,0 +0_2222.png,18,2,2,0,0 +0_2223.png,20,0,3,0,0 +0_2224.png,8,3,2,0,0 +0_2225.png,24,0,0,0,0 +0_2226.png,30,0,0,0,0 +0_2227.png,0,2,15,0,0 +0_2228.png,0,0,12,0,0 +0_2229.png,0,1,12,0,0 +0_223.png,0,0,2,0,51 +0_2230.png,0,0,14,0,0 +0_2231.png,23,1,0,0,0 +0_2232.png,19,5,2,0,0 +0_2233.png,25,0,0,0,0 +0_2234.png,17,4,10,0,0 +0_2235.png,18,0,0,0,0 +0_2236.png,13,1,12,0,0 +0_2237.png,0,0,27,0,0 +0_2238.png,11,1,8,0,0 +0_2239.png,3,0,13,0,0 +0_224.png,0,2,1,0,35 +0_2240.png,14,0,2,0,0 +0_2241.png,3,0,13,0,0 +0_2242.png,0,2,0,0,12 +0_2243.png,0,0,0,0,11 +0_2244.png,0,2,0,0,23 +0_2245.png,0,0,15,0,0 +0_2246.png,0,2,17,0,0 +0_2247.png,12,3,9,0,0 +0_2248.png,3,4,13,0,0 +0_2249.png,20,1,1,0,0 +0_225.png,0,1,4,0,20 +0_2250.png,22,0,6,0,0 +0_2251.png,14,2,4,0,0 +0_2252.png,15,1,2,0,0 +0_2253.png,18,0,0,0,0 +0_2254.png,20,2,0,0,0 +0_2255.png,23,2,1,0,0 +0_2256.png,14,0,4,0,0 +0_2257.png,19,2,5,0,0 +0_2258.png,28,0,0,0,0 +0_2259.png,24,1,1,0,0 +0_226.png,0,0,7,0,8 +0_2260.png,28,0,0,0,0 +0_2261.png,18,3,2,0,0 +0_2262.png,13,2,2,0,0 +0_2263.png,0,0,0,0,0 +0_2264.png,0,0,0,0,0 +0_2265.png,0,0,0,0,0 +0_2266.png,3,1,1,0,0 +0_2267.png,0,0,0,0,0 +0_2268.png,0,10,8,0,0 +0_2269.png,0,3,11,0,0 +0_227.png,0,1,5,0,37 +0_2270.png,0,2,16,0,0 +0_2271.png,0,5,14,0,0 +0_2272.png,0,2,6,0,0 +0_2273.png,0,0,42,0,0 +0_2274.png,0,1,34,0,0 +0_2275.png,0,1,39,0,0 +0_2276.png,0,0,42,0,0 +0_2277.png,0,0,37,0,33 +0_2278.png,0,1,5,0,22 +0_2279.png,0,1,4,0,11 +0_228.png,0,2,7,0,31 +0_2280.png,0,0,5,0,13 +0_2281.png,0,4,32,0,1 +0_2282.png,0,1,9,0,35 +0_2283.png,0,1,40,0,4 +0_2284.png,29,0,0,0,0 +0_2285.png,6,1,0,0,0 +0_2286.png,16,0,0,0,0 +0_2287.png,8,1,6,0,0 +0_2288.png,29,1,0,0,0 +0_2289.png,22,0,4,0,0 +0_229.png,0,2,10,0,26 +0_2290.png,20,1,5,0,0 +0_2291.png,1,1,15,0,0 +0_2292.png,6,3,7,0,0 +0_2293.png,12,4,16,0,0 +0_2294.png,11,4,12,0,0 +0_2295.png,23,5,6,0,0 +0_2296.png,21,11,10,0,0 +0_2297.png,20,20,7,0,0 +0_2298.png,12,6,15,0,0 +0_2299.png,21,5,7,0,0 +0_23.png,7,0,5,0,0 +0_230.png,0,1,8,0,32 +0_2300.png,2,6,8,0,0 +0_2301.png,16,0,0,0,0 +0_2302.png,22,0,2,0,0 +0_2303.png,19,0,0,0,0 +0_2304.png,29,0,0,0,0 +0_2305.png,29,0,0,0,0 +0_2306.png,30,1,0,0,0 +0_2307.png,25,2,0,0,0 +0_2308.png,32,0,0,0,0 +0_2309.png,40,0,0,0,0 +0_231.png,0,1,11,0,11 +0_2310.png,32,0,0,0,0 +0_2311.png,30,0,0,0,0 +0_2312.png,34,0,0,0,0 +0_2313.png,0,4,7,0,8 +0_2314.png,0,3,1,0,0 +0_2315.png,0,0,5,0,12 +0_2316.png,0,4,6,0,10 +0_2317.png,0,0,8,0,9 +0_2318.png,0,2,4,0,19 +0_2319.png,6,0,0,0,0 +0_232.png,0,2,4,0,27 +0_2320.png,2,0,2,0,0 +0_2321.png,9,0,1,0,0 +0_2322.png,3,1,10,0,0 +0_2323.png,0,0,14,0,0 +0_2324.png,0,2,11,0,0 +0_2325.png,8,1,20,0,0 +0_2326.png,10,7,18,0,0 +0_2327.png,23,0,3,0,0 +0_2328.png,30,0,5,0,0 +0_2329.png,9,0,21,0,0 +0_233.png,0,0,0,0,37 +0_2330.png,29,0,0,0,0 +0_2331.png,2,1,21,0,0 +0_2332.png,0,3,22,0,0 +0_2333.png,0,12,22,0,0 +0_2334.png,0,5,35,0,0 +0_2335.png,0,1,31,0,0 +0_2336.png,0,5,20,0,0 +0_2337.png,0,13,18,0,0 +0_2338.png,0,8,14,0,0 +0_2339.png,0,5,5,0,27 +0_234.png,1,0,3,0,0 +0_2340.png,0,10,15,0,31 +0_2341.png,0,1,3,0,0 +0_2342.png,0,3,8,0,0 +0_2343.png,0,1,6,0,0 +0_2344.png,0,0,15,0,0 +0_2345.png,0,2,10,0,0 +0_2346.png,0,1,7,0,0 +0_2347.png,0,0,14,0,0 +0_2348.png,0,0,14,0,0 +0_2349.png,0,0,9,0,0 +0_235.png,9,0,3,0,0 +0_2350.png,0,0,17,0,0 +0_2351.png,0,0,13,0,0 +0_2352.png,0,0,10,0,0 +0_2353.png,0,0,6,0,0 +0_2354.png,0,3,14,0,0 +0_2355.png,0,4,6,0,0 +0_2356.png,0,3,19,0,0 +0_2357.png,6,1,19,0,0 +0_2358.png,17,0,0,0,0 +0_2359.png,16,0,2,0,0 +0_236.png,0,0,4,0,0 +0_2360.png,0,0,5,0,35 +0_2361.png,0,0,22,0,12 +0_2362.png,0,0,9,0,26 +0_2363.png,0,0,3,0,11 +0_2364.png,0,0,14,0,22 +0_2365.png,10,0,1,0,0 +0_2366.png,15,0,1,0,0 +0_2367.png,15,0,3,0,0 +0_2368.png,12,0,2,0,0 +0_2369.png,9,0,3,0,0 +0_237.png,5,0,1,0,0 +0_2370.png,5,0,2,0,0 +0_2371.png,16,0,1,0,0 +0_2372.png,28,1,4,0,4 +0_2373.png,0,0,22,0,0 +0_2374.png,25,0,7,0,7 +0_2375.png,0,0,13,0,7 +0_2376.png,0,0,13,0,0 +0_2377.png,0,0,9,0,0 +0_2378.png,0,0,12,0,0 +0_2379.png,0,3,18,0,0 +0_238.png,16,1,1,0,0 +0_2380.png,0,0,7,0,0 +0_2381.png,0,0,11,0,0 +0_2382.png,0,1,11,0,0 +0_2383.png,1,0,5,0,0 +0_2384.png,48,0,0,0,0 +0_2385.png,11,0,10,0,0 +0_2386.png,18,0,13,0,0 +0_2387.png,41,0,0,0,0 +0_2388.png,44,0,0,0,0 +0_2389.png,45,0,0,0,0 +0_239.png,0,0,4,0,0 +0_2390.png,37,0,0,0,0 +0_2391.png,18,0,6,0,0 +0_2392.png,23,0,3,0,0 +0_2393.png,0,0,3,0,0 +0_2394.png,0,0,2,0,0 +0_2395.png,0,0,5,0,0 +0_2396.png,0,0,5,0,0 +0_2397.png,0,0,4,0,0 +0_2398.png,0,0,4,0,0 +0_2399.png,0,0,10,0,0 +0_24.png,10,0,0,0,0 +0_240.png,0,5,7,0,23 +0_2400.png,16,0,11,0,0 +0_2401.png,7,0,15,0,0 +0_2402.png,15,0,9,0,0 +0_2403.png,11,0,12,0,0 +0_2404.png,5,2,0,0,0 +0_2405.png,6,1,0,0,0 +0_2406.png,0,0,0,0,0 +0_2407.png,0,0,0,0,0 +0_2408.png,1,0,0,0,0 +0_2409.png,13,0,2,0,0 +0_241.png,0,7,11,0,9 +0_2410.png,23,0,0,0,0 +0_2411.png,14,0,5,0,0 +0_2412.png,6,1,13,0,0 +0_2413.png,25,21,0,0,0 +0_2414.png,38,58,0,0,0 +0_2415.png,20,33,0,0,0 +0_2416.png,31,11,0,0,0 +0_2417.png,0,3,14,0,0 +0_2418.png,0,1,18,0,0 +0_2419.png,0,6,4,0,0 +0_242.png,0,10,8,0,20 +0_2420.png,0,3,4,0,0 +0_2421.png,55,0,4,0,0 +0_2422.png,68,1,1,0,0 +0_2423.png,80,2,11,0,0 +0_2424.png,71,1,1,0,0 +0_2425.png,62,1,5,0,0 +0_2426.png,53,0,5,0,0 +0_2427.png,62,1,1,0,0 +0_2428.png,0,0,6,0,1 +0_2429.png,0,0,1,0,0 +0_243.png,0,11,8,0,13 +0_2430.png,0,3,0,0,28 +0_2431.png,0,1,3,0,3 +0_2432.png,0,2,10,0,0 +0_2433.png,0,1,3,0,0 +0_2434.png,0,25,29,0,0 +0_2435.png,0,92,27,0,0 +0_2436.png,0,89,31,0,0 +0_2437.png,0,85,22,0,0 +0_2438.png,0,59,24,0,0 +0_2439.png,0,91,32,0,0 +0_244.png,0,9,12,0,0 +0_2440.png,0,109,14,0,0 +0_2441.png,16,0,0,0,0 +0_2442.png,19,0,0,0,0 +0_2443.png,14,0,0,0,0 +0_2444.png,19,1,0,0,0 +0_2445.png,5,5,0,0,0 +0_2446.png,36,1,0,0,0 +0_2447.png,28,0,7,0,0 +0_2448.png,22,0,15,0,0 +0_2449.png,36,1,1,0,0 +0_245.png,0,1,6,0,17 +0_2450.png,9,3,0,0,0 +0_2451.png,14,2,5,0,0 +0_2452.png,17,6,0,0,0 +0_2453.png,14,2,0,0,0 +0_2454.png,29,1,3,0,0 +0_2455.png,21,6,2,0,0 +0_2456.png,27,8,3,0,0 +0_2457.png,18,4,7,0,0 +0_2458.png,28,2,2,0,0 +0_2459.png,17,10,1,0,0 +0_246.png,0,7,6,0,21 +0_2460.png,25,14,0,0,0 +0_2461.png,0,0,3,0,0 +0_2462.png,0,0,0,0,0 +0_2463.png,0,0,0,0,0 +0_2464.png,0,0,0,0,0 +0_2465.png,0,0,0,0,0 +0_2466.png,0,0,11,0,0 +0_2467.png,0,0,14,0,0 +0_2468.png,0,1,10,0,0 +0_2469.png,0,0,20,0,0 +0_247.png,0,1,3,0,36 +0_2470.png,0,0,17,0,0 +0_2471.png,32,1,6,3,0 +0_2472.png,25,2,4,36,0 +0_2473.png,40,1,2,5,0 +0_2474.png,11,2,12,5,0 +0_2475.png,27,5,1,0,0 +0_2476.png,38,3,0,1,0 +0_2477.png,17,12,14,1,0 +0_2478.png,36,7,2,1,0 +0_2479.png,26,12,1,1,0 +0_248.png,0,3,5,0,41 +0_2480.png,39,3,5,0,0 +0_2481.png,21,8,5,1,0 +0_2482.png,19,5,11,3,0 +0_2483.png,33,0,0,1,0 +0_2484.png,26,10,6,5,0 +0_2485.png,21,3,9,1,0 +0_2486.png,16,5,6,0,0 +0_2487.png,0,1,18,0,0 +0_2488.png,0,0,7,0,0 +0_2489.png,4,0,3,0,0 +0_249.png,0,0,3,0,46 +0_2490.png,0,0,16,0,0 +0_2491.png,1,4,7,0,0 +0_2492.png,1,0,12,0,0 +0_2493.png,0,1,20,0,0 +0_2494.png,26,0,2,0,0 +0_2495.png,25,7,10,1,0 +0_2496.png,22,6,4,0,0 +0_2497.png,0,0,8,0,0 +0_2498.png,1,0,12,0,0 +0_2499.png,0,0,8,0,0 +0_25.png,13,0,1,0,0 +0_250.png,0,6,20,0,20 +0_2500.png,10,0,2,0,0 +0_2501.png,3,0,10,0,0 +0_2502.png,8,0,9,0,0 +0_2503.png,8,0,5,0,0 +0_2504.png,8,26,6,0,0 +0_2505.png,18,8,5,1,0 +0_2506.png,20,8,0,0,0 +0_2507.png,23,7,3,1,0 +0_2508.png,13,3,1,1,0 +0_2509.png,17,20,8,0,0 +0_251.png,0,11,3,0,27 +0_2510.png,3,9,17,0,0 +0_2511.png,11,13,12,1,0 +0_2512.png,0,2,1,0,0 +0_2513.png,0,2,1,0,0 +0_2514.png,0,2,0,0,0 +0_2515.png,0,0,0,0,0 +0_2516.png,0,1,2,0,0 +0_2517.png,0,1,12,0,0 +0_2518.png,3,0,0,0,0 +0_2519.png,13,1,0,0,0 +0_252.png,0,19,4,0,28 +0_2520.png,0,0,8,0,0 +0_2521.png,17,1,0,0,0 +0_2522.png,18,0,0,0,0 +0_2523.png,22,2,1,0,0 +0_2524.png,13,2,3,0,0 +0_2525.png,9,81,0,0,0 +0_2526.png,9,70,10,0,0 +0_2527.png,5,30,6,0,0 +0_2528.png,14,54,7,0,0 +0_2529.png,12,29,7,0,0 +0_253.png,0,5,10,0,33 +0_2530.png,16,39,4,0,0 +0_2531.png,23,1,0,0,0 +0_2532.png,19,0,1,0,0 +0_2533.png,20,1,0,0,0 +0_2534.png,19,2,5,0,0 +0_2535.png,19,1,0,0,0 +0_2536.png,20,0,0,0,0 +0_2537.png,0,3,4,0,14 +0_2538.png,0,1,2,0,13 +0_2539.png,0,1,5,2,11 +0_254.png,0,10,6,0,27 +0_2540.png,0,1,0,0,5 +0_2541.png,0,2,1,0,14 +0_2542.png,0,0,2,0,13 +0_2543.png,0,1,0,0,11 +0_2544.png,0,0,0,0,24 +0_2545.png,0,0,2,0,24 +0_2546.png,0,0,0,0,21 +0_2547.png,0,0,1,0,16 +0_2548.png,0,0,8,0,4 +0_2549.png,0,0,0,0,8 +0_255.png,0,16,6,0,24 +0_2550.png,0,0,0,0,0 +0_2551.png,0,0,0,0,0 +0_2552.png,0,0,0,0,0 +0_2553.png,0,0,4,0,22 +0_2554.png,0,0,1,0,20 +0_2555.png,0,1,1,0,30 +0_2556.png,0,2,2,0,24 +0_2557.png,0,0,2,0,34 +0_2558.png,0,0,1,0,18 +0_2559.png,0,1,4,0,22 +0_256.png,0,1,6,0,0 +0_2560.png,0,1,5,0,34 +0_2561.png,0,0,4,0,14 +0_2562.png,0,14,2,0,24 +0_2563.png,13,0,1,0,0 +0_2564.png,43,0,4,0,0 +0_2565.png,36,1,6,0,0 +0_2566.png,6,4,11,0,0 +0_2567.png,8,6,17,0,0 +0_2568.png,3,10,21,0,0 +0_2569.png,9,10,13,0,0 +0_257.png,0,0,15,0,0 +0_2570.png,9,6,12,0,0 +0_2571.png,9,9,20,0,0 +0_2572.png,0,6,14,0,28 +0_2573.png,0,0,1,0,28 +0_2574.png,0,1,3,0,21 +0_2575.png,0,5,9,0,20 +0_2576.png,0,0,2,0,14 +0_2577.png,0,1,10,0,29 +0_2578.png,0,0,17,0,0 +0_2579.png,0,0,23,0,0 +0_258.png,0,0,4,0,27 +0_2580.png,0,1,7,0,0 +0_2581.png,30,0,1,0,0 +0_2582.png,1,1,3,0,0 +0_2583.png,0,0,30,0,0 +0_2584.png,24,2,9,0,0 +0_2585.png,35,0,0,0,0 +0_2586.png,0,0,29,0,0 +0_2587.png,0,1,25,1,0 +0_2588.png,0,0,19,0,0 +0_2589.png,61,1,2,2,0 +0_259.png,0,1,5,0,19 +0_2590.png,41,0,0,0,0 +0_2591.png,22,1,0,0,0 +0_2592.png,0,12,18,0,38 +0_2593.png,0,7,27,0,32 +0_2594.png,0,34,36,0,23 +0_2595.png,0,0,8,0,0 +0_2596.png,0,0,8,0,0 +0_2597.png,0,0,4,0,0 +0_2598.png,0,5,9,0,0 +0_2599.png,56,0,4,0,0 +0_26.png,4,0,7,0,0 +0_260.png,0,0,6,0,22 +0_2600.png,0,1,18,0,0 +0_2601.png,15,2,0,0,0 +0_2602.png,46,2,7,0,0 +0_2603.png,55,0,2,0,0 +0_2604.png,0,0,17,0,0 +0_2605.png,0,4,21,0,0 +0_2606.png,0,0,23,0,0 +0_2607.png,0,0,10,0,0 +0_2608.png,41,4,2,0,0 +0_2609.png,0,138,28,0,0 +0_261.png,19,1,0,0,0 +0_2610.png,0,150,18,0,0 +0_2611.png,0,145,20,0,0 +0_2612.png,0,145,21,0,0 +0_2613.png,53,0,3,16,0 +0_2614.png,37,4,0,0,0 +0_2615.png,0,8,49,0,0 +0_2616.png,0,6,48,0,0 +0_2617.png,59,3,15,0,0 +0_2618.png,28,4,36,0,0 +0_2619.png,0,1,0,0,42 +0_262.png,18,0,0,0,0 +0_2620.png,0,6,8,0,33 +0_2621.png,0,0,1,0,32 +0_2622.png,0,3,3,0,34 +0_2623.png,0,0,23,0,0 +0_2624.png,7,3,50,0,0 +0_2625.png,1,1,51,0,0 +0_2626.png,0,0,0,0,0 +0_2627.png,0,4,6,0,0 +0_2628.png,0,0,1,0,0 +0_2629.png,0,12,10,0,0 +0_263.png,16,0,1,0,0 +0_2630.png,0,0,14,0,0 +0_2631.png,0,0,1,0,0 +0_2632.png,0,2,8,0,0 +0_2633.png,22,1,32,0,0 +0_2634.png,13,10,42,0,0 +0_2635.png,34,2,13,0,0 +0_2636.png,0,0,0,0,0 +0_2637.png,0,0,2,0,0 +0_2638.png,0,0,15,0,0 +0_2639.png,0,0,0,0,0 +0_264.png,15,0,1,0,0 +0_2640.png,0,0,0,0,0 +0_2641.png,0,0,3,0,0 +0_2642.png,0,29,46,0,11 +0_2643.png,0,15,14,0,30 +0_2644.png,52,1,0,0,0 +0_2645.png,36,0,10,0,0 +0_2646.png,47,0,0,10,0 +0_2647.png,41,0,5,0,0 +0_2648.png,40,0,0,14,0 +0_2649.png,19,22,36,0,0 +0_265.png,17,0,3,0,0 +0_2650.png,27,25,20,0,0 +0_2651.png,61,6,11,0,0 +0_2652.png,10,24,56,0,0 +0_2653.png,0,19,15,0,0 +0_2654.png,0,5,22,0,0 +0_2655.png,0,10,30,0,0 +0_266.png,20,0,0,0,0 +0_267.png,19,0,0,0,0 +0_268.png,20,0,0,0,0 +0_269.png,11,9,5,0,0 +0_27.png,4,0,7,0,0 +0_270.png,17,7,1,0,0 +0_271.png,0,14,21,0,0 +0_272.png,2,29,14,0,0 +0_273.png,23,0,1,0,0 +0_274.png,16,0,1,0,0 +0_275.png,6,7,1,0,0 +0_276.png,19,0,0,0,0 +0_277.png,23,0,0,0,0 +0_278.png,16,0,3,0,0 +0_279.png,0,4,6,0,4 +0_28.png,6,0,10,0,0 +0_280.png,0,1,13,0,13 +0_281.png,0,2,12,0,25 +0_282.png,0,5,13,0,7 +0_283.png,12,0,7,0,0 +0_284.png,11,2,7,0,0 +0_285.png,20,5,3,0,0 +0_286.png,5,13,16,0,0 +0_287.png,5,12,11,0,0 +0_288.png,0,22,8,0,0 +0_289.png,0,23,5,0,0 +0_29.png,7,0,6,0,0 +0_290.png,2,28,14,0,0 +0_291.png,0,30,8,0,0 +0_292.png,1,15,7,0,0 +0_293.png,0,7,9,0,0 +0_294.png,0,12,9,0,0 +0_295.png,0,45,6,0,0 +0_296.png,26,4,2,0,0 +0_297.png,0,1,14,0,0 +0_298.png,11,2,9,0,0 +0_299.png,1,1,10,0,0 +0_3.png,10,0,0,0,0 +0_30.png,12,0,0,0,0 +0_300.png,3,1,8,0,0 +0_301.png,9,2,6,0,0 +0_302.png,10,1,1,0,0 +0_303.png,3,0,10,0,0 +0_304.png,0,2,9,0,0 +0_305.png,0,1,10,0,0 +0_306.png,6,0,7,0,0 +0_307.png,10,0,4,0,0 +0_308.png,11,0,8,0,0 +0_309.png,22,0,4,0,0 +0_31.png,0,7,16,0,0 +0_310.png,1,5,8,0,0 +0_311.png,0,1,7,0,15 +0_312.png,0,1,14,0,10 +0_313.png,0,1,14,0,9 +0_314.png,0,0,11,0,8 +0_315.png,0,2,10,0,18 +0_316.png,0,0,5,0,34 +0_317.png,0,0,15,0,14 +0_318.png,0,1,1,0,28 +0_319.png,0,1,3,0,20 +0_32.png,1,3,8,0,0 +0_320.png,0,5,0,0,31 +0_321.png,0,6,2,0,17 +0_322.png,16,0,0,0,0 +0_323.png,7,3,1,0,0 +0_324.png,19,3,4,0,0 +0_325.png,18,4,1,0,0 +0_326.png,15,11,2,0,0 +0_327.png,27,3,1,0,0 +0_328.png,24,0,0,0,0 +0_329.png,38,0,0,0,0 +0_33.png,11,0,1,0,0 +0_330.png,30,0,0,0,0 +0_331.png,26,2,3,0,0 +0_332.png,24,3,1,0,0 +0_333.png,28,0,5,0,0 +0_334.png,28,1,2,0,0 +0_335.png,0,24,17,0,0 +0_336.png,12,12,10,0,0 +0_337.png,4,0,1,0,0 +0_338.png,2,0,0,0,0 +0_339.png,9,0,1,0,0 +0_34.png,2,8,9,0,0 +0_340.png,7,0,0,0,0 +0_341.png,8,0,0,0,0 +0_342.png,0,2,11,0,0 +0_343.png,0,6,10,0,0 +0_344.png,0,6,5,0,0 +0_345.png,0,4,12,0,0 +0_346.png,2,0,5,0,0 +0_347.png,7,1,3,0,0 +0_348.png,5,2,2,0,0 +0_349.png,7,0,2,0,0 +0_35.png,6,2,3,0,0 +0_350.png,23,0,0,0,0 +0_351.png,22,0,0,0,0 +0_352.png,33,0,0,0,0 +0_353.png,33,0,0,0,0 +0_354.png,33,0,0,0,0 +0_355.png,26,0,0,0,0 +0_356.png,3,4,7,0,0 +0_357.png,1,0,8,0,0 +0_358.png,18,0,2,0,0 +0_359.png,1,0,3,0,0 +0_36.png,8,0,5,0,0 +0_360.png,12,2,4,0,0 +0_361.png,8,0,0,0,0 +0_362.png,6,1,5,0,0 +0_363.png,12,0,1,0,0 +0_364.png,12,0,1,0,0 +0_365.png,5,0,9,0,0 +0_366.png,11,0,4,0,0 +0_367.png,19,0,2,0,0 +0_368.png,17,0,4,0,0 +0_369.png,18,0,1,0,0 +0_37.png,2,1,8,0,0 +0_370.png,12,0,2,0,0 +0_371.png,12,0,0,0,0 +0_372.png,7,0,5,0,0 +0_373.png,14,0,0,0,0 +0_374.png,24,0,0,0,0 +0_375.png,11,0,1,0,0 +0_376.png,9,1,3,0,0 +0_377.png,14,0,2,0,0 +0_378.png,11,0,0,0,0 +0_379.png,9,0,2,0,0 +0_38.png,0,1,6,0,0 +0_380.png,11,0,1,0,0 +0_381.png,11,0,2,0,0 +0_382.png,12,0,0,0,0 +0_383.png,12,1,5,0,0 +0_384.png,21,1,7,0,0 +0_385.png,18,10,6,0,0 +0_386.png,18,7,15,0,0 +0_387.png,9,1,2,0,0 +0_388.png,27,1,0,0,0 +0_389.png,5,10,7,0,0 +0_39.png,10,0,4,0,0 +0_390.png,18,3,0,0,0 +0_391.png,9,3,3,0,0 +0_392.png,5,10,0,0,0 +0_393.png,2,7,9,0,0 +0_394.png,9,1,3,0,0 +0_395.png,0,6,10,0,0 +0_396.png,0,4,4,0,0 +0_397.png,0,8,11,0,0 +0_398.png,10,2,2,0,0 +0_399.png,5,14,2,0,0 +0_4.png,3,2,2,0,0 +0_40.png,3,1,8,0,0 +0_400.png,15,2,1,0,0 +0_401.png,7,6,5,0,0 +0_402.png,2,3,7,0,0 +0_403.png,0,12,9,0,0 +0_404.png,9,0,0,0,0 +0_405.png,11,0,2,0,0 +0_406.png,13,1,1,0,0 +0_407.png,4,2,2,0,0 +0_408.png,7,2,9,0,0 +0_409.png,8,0,7,0,0 +0_41.png,0,1,8,0,0 +0_410.png,14,1,2,0,0 +0_411.png,9,0,12,0,0 +0_412.png,0,1,7,0,0 +0_413.png,2,1,12,0,0 +0_414.png,24,0,2,0,0 +0_415.png,15,0,2,0,0 +0_416.png,17,1,3,0,0 +0_417.png,11,1,1,0,0 +0_418.png,0,0,3,0,0 +0_419.png,10,3,1,0,1 +0_42.png,9,0,5,0,0 +0_420.png,16,1,4,0,0 +0_421.png,19,0,2,0,0 +0_422.png,20,0,1,0,0 +0_423.png,6,0,5,0,0 +0_424.png,24,0,0,0,0 +0_425.png,19,2,0,0,0 +0_426.png,17,0,1,0,0 +0_427.png,25,2,1,0,0 +0_428.png,14,7,8,0,0 +0_429.png,16,0,9,0,0 +0_43.png,2,0,9,0,0 +0_430.png,27,3,4,0,0 +0_431.png,10,2,13,0,0 +0_432.png,28,0,4,0,0 +0_433.png,20,0,0,0,0 +0_434.png,26,0,0,0,0 +0_435.png,24,0,0,0,0 +0_436.png,16,0,1,0,0 +0_437.png,21,0,2,0,0 +0_438.png,24,0,4,0,0 +0_439.png,22,0,1,0,0 +0_44.png,2,0,7,0,0 +0_440.png,13,0,1,0,0 +0_441.png,0,2,9,0,0 +0_442.png,4,4,7,0,0 +0_443.png,3,1,6,0,0 +0_444.png,3,0,18,0,0 +0_445.png,0,0,6,0,0 +0_446.png,4,3,13,0,0 +0_447.png,2,3,6,0,0 +0_448.png,6,2,4,0,0 +0_449.png,10,2,3,0,0 +0_45.png,0,1,2,0,19 +0_450.png,7,1,9,0,0 +0_451.png,5,2,8,0,0 +0_452.png,6,5,8,0,0 +0_453.png,5,1,12,0,0 +0_454.png,7,5,8,0,0 +0_455.png,6,3,9,0,0 +0_456.png,0,1,12,0,0 +0_457.png,3,3,4,0,0 +0_458.png,5,3,11,0,0 +0_459.png,16,1,0,0,0 +0_46.png,0,1,7,0,21 +0_460.png,7,2,3,0,0 +0_461.png,3,2,6,0,0 +0_462.png,0,2,10,0,0 +0_463.png,5,1,2,0,0 +0_464.png,6,2,1,0,0 +0_465.png,0,0,2,0,0 +0_466.png,1,1,5,0,0 +0_467.png,5,4,8,0,0 +0_468.png,0,0,5,0,0 +0_469.png,1,0,6,0,0 +0_47.png,0,1,8,0,14 +0_470.png,15,3,5,0,0 +0_471.png,1,5,8,0,0 +0_472.png,0,5,2,0,0 +0_473.png,12,3,2,0,0 +0_474.png,0,0,15,0,0 +0_475.png,2,12,8,0,0 +0_476.png,3,2,3,0,0 +0_477.png,7,1,7,0,0 +0_478.png,10,1,2,0,0 +0_479.png,11,1,2,0,0 +0_48.png,0,1,4,0,1 +0_480.png,11,2,6,0,0 +0_481.png,8,1,3,0,0 +0_482.png,0,4,10,0,0 +0_483.png,9,1,8,0,0 +0_484.png,7,3,6,0,0 +0_485.png,3,2,10,0,0 +0_486.png,7,3,9,0,0 +0_487.png,8,3,8,0,0 +0_488.png,11,0,3,0,0 +0_489.png,11,1,2,0,0 +0_49.png,0,0,1,0,28 +0_490.png,19,0,3,0,0 +0_491.png,21,0,0,0,0 +0_492.png,5,5,4,0,0 +0_493.png,3,1,8,0,0 +0_494.png,6,0,6,0,0 +0_495.png,8,0,2,0,0 +0_496.png,13,0,2,0,0 +0_497.png,20,1,1,0,0 +0_498.png,9,0,3,0,0 +0_499.png,15,0,1,0,0 +0_5.png,2,1,4,0,0 +0_50.png,0,0,2,0,13 +0_500.png,10,1,5,0,0 +0_501.png,0,4,8,0,0 +0_502.png,13,1,5,0,0 +0_503.png,0,2,10,0,0 +0_504.png,8,0,1,0,0 +0_505.png,8,0,5,0,0 +0_506.png,3,2,9,0,0 +0_507.png,26,2,0,0,0 +0_508.png,2,5,20,0,0 +0_509.png,35,1,1,0,0 +0_51.png,4,0,9,0,0 +0_510.png,27,0,2,0,0 +0_511.png,26,0,0,0,0 +0_512.png,6,0,9,0,0 +0_513.png,26,0,1,0,0 +0_514.png,22,4,4,0,0 +0_515.png,8,3,3,0,0 +0_516.png,16,1,1,0,0 +0_517.png,15,0,4,0,0 +0_518.png,11,0,3,0,0 +0_519.png,10,1,6,0,0 +0_52.png,3,0,1,0,0 +0_520.png,5,5,10,0,0 +0_521.png,19,0,2,0,0 +0_522.png,21,0,0,0,0 +0_523.png,18,1,2,0,0 +0_524.png,19,6,2,0,0 +0_525.png,17,4,1,0,0 +0_526.png,21,1,0,0,0 +0_527.png,30,0,1,0,0 +0_528.png,42,1,0,0,0 +0_529.png,25,0,1,0,0 +0_53.png,29,0,0,0,0 +0_530.png,26,0,0,0,0 +0_531.png,29,0,0,0,0 +0_532.png,26,3,0,0,0 +0_533.png,2,6,17,0,0 +0_534.png,13,1,10,0,0 +0_535.png,13,1,3,0,0 +0_536.png,13,4,5,0,0 +0_537.png,0,16,10,0,0 +0_538.png,0,13,9,0,0 +0_539.png,0,17,7,0,0 +0_54.png,4,1,4,0,0 +0_540.png,23,0,0,0,0 +0_541.png,0,13,7,0,0 +0_542.png,23,1,3,0,0 +0_543.png,19,8,0,0,0 +0_544.png,19,4,5,0,0 +0_545.png,25,1,5,0,0 +0_546.png,26,2,0,0,0 +0_547.png,25,2,1,0,0 +0_548.png,16,1,7,0,0 +0_549.png,11,5,5,0,0 +0_55.png,15,0,5,0,0 +0_550.png,16,2,0,0,0 +0_551.png,16,0,0,0,0 +0_552.png,4,1,16,0,0 +0_553.png,12,10,4,0,0 +0_554.png,24,0,0,0,0 +0_555.png,25,0,1,0,0 +0_556.png,10,4,8,0,0 +0_557.png,11,3,11,0,0 +0_558.png,18,0,0,0,0 +0_559.png,12,2,5,0,0 +0_56.png,4,0,12,0,0 +0_560.png,2,21,12,0,0 +0_561.png,14,3,6,0,0 +0_562.png,14,1,7,0,0 +0_563.png,20,0,2,0,0 +0_564.png,2,0,7,0,0 +0_565.png,0,1,2,0,28 +0_566.png,0,0,8,0,0 +0_567.png,0,2,1,0,9 +0_568.png,0,0,1,0,17 +0_569.png,0,0,4,0,11 +0_57.png,0,2,13,0,0 +0_570.png,0,2,5,0,9 +0_571.png,0,2,2,0,25 +0_572.png,0,4,9,0,12 +0_573.png,0,5,1,0,22 +0_574.png,0,1,1,0,32 +0_575.png,19,0,0,0,0 +0_576.png,20,0,0,0,0 +0_577.png,15,1,1,0,0 +0_578.png,20,0,2,0,0 +0_579.png,15,0,0,0,0 +0_58.png,0,0,1,0,30 +0_580.png,20,0,1,0,0 +0_581.png,11,0,0,0,0 +0_582.png,0,0,5,0,0 +0_583.png,0,1,2,0,0 +0_584.png,0,0,0,0,0 +0_585.png,19,0,0,0,0 +0_586.png,0,0,0,0,0 +0_587.png,10,0,6,0,0 +0_588.png,7,0,9,0,0 +0_589.png,10,0,1,0,0 +0_59.png,0,2,2,0,23 +0_590.png,11,0,2,0,0 +0_591.png,42,0,1,0,0 +0_592.png,1,0,3,0,0 +0_593.png,21,1,0,0,0 +0_594.png,11,0,3,0,0 +0_595.png,31,0,0,0,0 +0_596.png,32,0,0,0,0 +0_597.png,19,0,0,0,0 +0_598.png,45,1,0,0,0 +0_599.png,3,2,1,0,0 +0_6.png,5,2,2,0,0 +0_60.png,0,0,6,0,18 +0_600.png,42,0,0,0,0 +0_601.png,15,3,0,0,0 +0_602.png,34,0,0,0,0 +0_603.png,52,2,0,0,0 +0_604.png,0,0,0,0,0 +0_605.png,43,0,0,0,0 +0_606.png,33,0,0,0,0 +0_607.png,11,1,1,0,0 +0_608.png,12,1,0,0,0 +0_609.png,2,0,2,0,0 +0_61.png,0,0,0,0,40 +0_610.png,46,2,0,0,0 +0_611.png,35,2,0,0,0 +0_612.png,12,1,1,0,0 +0_613.png,58,0,0,0,0 +0_614.png,35,2,1,0,0 +0_615.png,54,2,2,0,0 +0_616.png,17,1,1,0,0 +0_617.png,8,1,1,0,0 +0_618.png,16,0,1,0,0 +0_619.png,9,2,6,0,0 +0_62.png,0,1,8,0,9 +0_620.png,0,0,4,0,37 +0_621.png,0,1,1,0,46 +0_622.png,0,0,2,0,48 +0_623.png,0,2,2,0,34 +0_624.png,0,4,2,0,34 +0_625.png,0,1,6,0,32 +0_626.png,0,6,13,0,0 +0_627.png,2,2,3,0,0 +0_628.png,2,5,6,0,0 +0_629.png,1,3,8,0,0 +0_63.png,0,0,9,0,11 +0_630.png,1,1,5,0,0 +0_631.png,3,1,6,0,0 +0_632.png,0,5,2,0,0 +0_633.png,0,0,3,0,0 +0_634.png,0,8,5,0,0 +0_635.png,0,4,2,0,0 +0_636.png,7,0,5,0,0 +0_637.png,2,0,3,0,0 +0_638.png,11,1,6,0,0 +0_639.png,0,0,4,0,0 +0_64.png,0,0,0,0,41 +0_640.png,0,12,6,0,0 +0_641.png,2,28,3,0,0 +0_642.png,1,18,8,0,0 +0_643.png,2,3,6,0,0 +0_644.png,0,0,1,0,0 +0_645.png,2,15,2,0,0 +0_646.png,3,6,6,0,0 +0_647.png,0,36,2,0,0 +0_648.png,0,29,5,0,0 +0_649.png,4,19,7,0,0 +0_65.png,0,0,14,0,7 +0_650.png,14,3,0,0,0 +0_651.png,0,30,2,0,0 +0_652.png,3,10,12,0,0 +0_653.png,15,0,0,0,0 +0_654.png,17,0,0,0,0 +0_655.png,0,34,4,0,0 +0_656.png,0,19,3,0,0 +0_657.png,0,12,7,0,0 +0_658.png,0,6,3,0,0 +0_659.png,0,2,5,0,0 +0_66.png,0,0,2,0,39 +0_660.png,0,6,4,0,0 +0_661.png,19,1,1,0,0 +0_662.png,3,8,6,0,0 +0_663.png,1,22,13,0,0 +0_664.png,7,10,3,0,0 +0_665.png,6,12,6,0,0 +0_666.png,8,1,3,0,0 +0_667.png,11,3,6,0,0 +0_668.png,14,1,3,0,0 +0_669.png,11,4,1,0,0 +0_67.png,0,2,2,0,33 +0_670.png,4,5,13,0,0 +0_671.png,24,1,0,0,0 +0_672.png,25,0,2,0,0 +0_673.png,33,0,0,0,0 +0_674.png,32,0,0,0,0 +0_675.png,12,1,6,0,0 +0_676.png,0,1,2,0,48 +0_677.png,0,2,3,0,42 +0_678.png,0,0,7,0,22 +0_679.png,0,1,5,0,42 +0_68.png,0,0,5,0,20 +0_680.png,20,1,1,0,0 +0_681.png,21,0,0,0,0 +0_682.png,23,4,0,0,0 +0_683.png,12,1,0,0,0 +0_684.png,0,32,7,0,0 +0_685.png,0,15,12,0,0 +0_686.png,7,23,8,0,0 +0_687.png,8,7,1,0,0 +0_688.png,1,28,7,0,0 +0_689.png,7,2,1,0,0 +0_69.png,0,1,4,0,13 +0_690.png,0,35,5,0,0 +0_691.png,3,4,9,0,0 +0_692.png,1,12,5,0,0 +0_693.png,1,13,15,0,0 +0_694.png,10,0,2,0,0 +0_695.png,9,0,2,0,0 +0_696.png,13,1,3,0,0 +0_697.png,7,2,5,0,0 +0_698.png,13,0,1,0,0 +0_699.png,8,2,6,0,0 +0_7.png,8,0,2,0,0 +0_70.png,0,0,8,0,15 +0_700.png,6,2,5,0,0 +0_701.png,13,0,0,0,0 +0_702.png,0,0,9,0,0 +0_703.png,8,0,1,0,0 +0_704.png,12,0,1,0,0 +0_705.png,13,0,2,0,0 +0_706.png,6,1,2,0,0 +0_707.png,12,0,7,0,0 +0_708.png,10,0,0,0,0 +0_709.png,9,0,1,0,0 +0_71.png,0,1,13,0,22 +0_710.png,9,0,2,0,0 +0_711.png,0,1,5,0,0 +0_712.png,14,1,1,0,0 +0_713.png,14,0,1,0,0 +0_714.png,15,0,3,0,0 +0_715.png,6,1,0,0,0 +0_716.png,11,0,1,0,0 +0_717.png,23,0,0,0,0 +0_718.png,9,2,5,0,0 +0_719.png,11,0,3,0,0 +0_72.png,0,0,3,0,47 +0_720.png,13,1,1,0,0 +0_721.png,4,0,4,0,0 +0_722.png,17,0,0,0,0 +0_723.png,17,0,2,0,0 +0_724.png,8,3,2,0,0 +0_725.png,13,0,1,0,0 +0_726.png,14,0,1,0,0 +0_727.png,4,1,3,0,0 +0_728.png,12,0,2,0,0 +0_729.png,9,0,2,0,0 +0_73.png,2,0,4,0,0 +0_730.png,11,0,1,0,0 +0_731.png,14,0,2,0,0 +0_732.png,6,1,2,0,0 +0_733.png,8,0,5,0,0 +0_734.png,10,4,8,0,0 +0_735.png,9,0,11,0,0 +0_736.png,7,1,10,0,0 +0_737.png,5,0,9,0,0 +0_738.png,8,16,20,0,0 +0_739.png,16,8,7,0,0 +0_74.png,1,0,4,0,0 +0_740.png,9,19,12,0,0 +0_741.png,35,0,1,0,0 +0_742.png,27,1,2,0,0 +0_743.png,32,0,2,0,0 +0_744.png,41,2,3,0,0 +0_745.png,0,9,1,0,0 +0_746.png,0,4,7,0,0 +0_747.png,0,4,6,0,0 +0_748.png,0,0,0,0,0 +0_749.png,0,24,2,0,0 +0_75.png,10,0,2,0,0 +0_750.png,0,0,0,0,0 +0_751.png,0,15,10,0,0 +0_752.png,0,1,1,0,0 +0_753.png,0,17,6,0,0 +0_754.png,27,1,1,0,0 +0_755.png,28,0,0,0,0 +0_756.png,24,0,1,0,0 +0_757.png,4,0,4,0,0 +0_758.png,27,0,0,0,0 +0_759.png,36,1,0,0,0 +0_76.png,7,1,3,0,0 +0_760.png,4,11,14,0,0 +0_761.png,0,4,9,0,0 +0_762.png,0,1,16,0,0 +0_763.png,0,8,11,0,0 +0_764.png,0,10,5,0,0 +0_765.png,0,9,9,0,0 +0_766.png,35,2,0,0,0 +0_767.png,31,1,0,0,0 +0_768.png,45,2,0,0,0 +0_769.png,35,9,0,0,0 +0_77.png,0,3,6,0,0 +0_770.png,20,0,4,0,0 +0_771.png,17,2,5,0,0 +0_772.png,39,1,2,0,0 +0_773.png,31,2,5,0,0 +0_774.png,0,8,2,0,0 +0_775.png,3,7,1,0,0 +0_776.png,0,6,1,7,0 +0_777.png,0,0,3,0,0 +0_778.png,0,1,2,0,0 +0_779.png,0,0,1,0,0 +0_78.png,7,0,2,0,0 +0_780.png,0,0,0,0,0 +0_781.png,25,1,0,0,0 +0_782.png,11,11,3,8,0 +0_783.png,20,1,2,0,0 +0_784.png,20,11,8,10,0 +0_785.png,35,0,2,0,0 +0_786.png,50,5,6,0,0 +0_787.png,15,0,0,0,0 +0_788.png,18,0,1,0,0 +0_789.png,19,0,2,0,0 +0_79.png,0,3,9,0,0 +0_790.png,16,0,0,1,0 +0_791.png,30,0,1,0,0 +0_792.png,12,0,0,0,0 +0_793.png,20,0,1,0,0 +0_794.png,8,1,1,0,0 +0_795.png,24,0,0,0,0 +0_796.png,26,0,0,0,0 +0_797.png,0,0,18,0,0 +0_798.png,0,3,18,0,0 +0_799.png,0,0,23,0,0 +0_8.png,2,1,8,0,0 +0_80.png,0,2,2,0,0 +0_800.png,0,0,13,0,0 +0_801.png,0,1,25,0,0 +0_802.png,0,3,11,0,0 +0_803.png,0,4,11,0,0 +0_804.png,0,1,4,0,0 +0_805.png,0,6,7,0,0 +0_806.png,0,7,16,0,0 +0_807.png,0,2,14,0,0 +0_808.png,0,1,1,0,0 +0_809.png,0,0,1,0,0 +0_81.png,2,0,8,0,0 +0_810.png,0,1,1,0,0 +0_811.png,0,0,0,0,0 +0_812.png,0,0,0,0,0 +0_813.png,0,0,0,0,0 +0_814.png,0,1,1,0,0 +0_815.png,0,5,4,0,0 +0_816.png,0,2,0,0,0 +0_817.png,0,106,2,0,0 +0_818.png,0,75,17,0,0 +0_819.png,0,114,4,0,0 +0_82.png,4,0,7,0,0 +0_820.png,0,0,2,0,0 +0_821.png,0,1,1,0,0 +0_822.png,2,1,7,0,0 +0_823.png,20,0,0,0,0 +0_824.png,17,1,0,0,0 +0_825.png,0,105,0,0,0 +0_826.png,0,76,9,0,0 +0_827.png,0,82,11,0,0 +0_828.png,0,0,0,0,0 +0_829.png,0,0,1,0,0 +0_83.png,0,0,2,0,23 +0_830.png,0,0,0,0,0 +0_831.png,0,0,1,0,0 +0_832.png,0,0,0,0,0 +0_833.png,0,0,0,0,0 +0_834.png,15,0,19,0,0 +0_835.png,34,0,14,0,0 +0_836.png,4,0,43,0,0 +0_837.png,0,4,2,0,0 +0_838.png,0,3,9,0,0 +0_839.png,0,4,6,0,0 +0_84.png,0,0,12,0,37 +0_840.png,4,9,7,0,0 +0_841.png,0,6,12,0,0 +0_842.png,0,23,7,0,0 +0_843.png,0,14,22,0,0 +0_844.png,0,78,2,0,0 +0_845.png,0,81,5,0,0 +0_846.png,0,10,17,0,0 +0_847.png,0,3,21,0,0 +0_848.png,0,10,21,0,0 +0_849.png,0,3,19,0,0 +0_85.png,0,0,11,0,31 +0_850.png,0,2,22,0,0 +0_851.png,19,2,0,0,0 +0_852.png,20,0,0,9,0 +0_853.png,32,0,1,0,0 +0_854.png,0,3,18,0,0 +0_855.png,3,2,15,0,0 +0_856.png,11,2,6,0,0 +0_857.png,0,4,18,0,0 +0_858.png,20,1,10,0,0 +0_859.png,12,6,21,0,0 +0_86.png,0,1,0,0,65 +0_860.png,24,1,3,0,0 +0_861.png,34,0,3,0,0 +0_862.png,19,0,2,0,0 +0_863.png,13,1,11,0,0 +0_864.png,0,6,31,0,0 +0_865.png,15,3,2,0,0 +0_866.png,34,6,9,0,0 +0_867.png,19,0,15,0,0 +0_868.png,11,0,14,0,0 +0_869.png,28,0,0,0,0 +0_87.png,0,0,9,0,19 +0_870.png,29,0,6,0,0 +0_871.png,35,3,0,0,0 +0_872.png,26,1,7,0,0 +0_873.png,18,0,0,0,0 +0_874.png,27,1,10,0,0 +0_875.png,37,0,0,0,0 +0_876.png,30,0,0,0,0 +0_877.png,1,10,17,0,0 +0_878.png,10,3,15,0,0 +0_879.png,36,0,2,0,0 +0_88.png,0,0,3,0,37 +0_880.png,27,6,3,0,0 +0_881.png,29,0,5,0,0 +0_882.png,18,5,10,0,0 +0_883.png,21,0,5,0,0 +0_884.png,23,0,0,0,0 +0_885.png,13,0,11,0,0 +0_886.png,22,9,8,0,0 +0_887.png,20,0,1,0,0 +0_888.png,14,0,4,0,0 +0_889.png,37,3,1,0,0 +0_89.png,0,0,11,0,12 +0_890.png,32,4,6,0,0 +0_891.png,29,2,5,0,0 +0_892.png,10,24,17,0,0 +0_893.png,16,1,8,0,0 +0_894.png,34,0,2,0,0 +0_895.png,14,2,15,0,0 +0_896.png,30,1,8,0,0 +0_897.png,0,14,13,0,0 +0_898.png,9,0,5,0,0 +0_899.png,13,4,1,0,0 +0_9.png,5,1,7,0,0 +0_90.png,0,0,0,0,52 +0_900.png,0,6,9,0,18 +0_901.png,1,2,21,0,3 +0_902.png,0,5,9,0,31 +0_903.png,28,1,4,0,0 +0_904.png,20,6,11,0,0 +0_905.png,44,14,5,0,0 +0_906.png,43,3,5,0,0 +0_907.png,27,0,0,0,0 +0_908.png,37,0,2,0,0 +0_909.png,24,0,3,0,0 +0_91.png,0,0,1,0,30 +0_910.png,25,1,7,0,0 +0_911.png,41,0,0,0,0 +0_912.png,22,0,1,0,0 +0_913.png,14,0,9,0,0 +0_914.png,10,1,6,0,0 +0_915.png,18,1,2,0,0 +0_916.png,18,0,5,0,0 +0_917.png,13,0,1,0,0 +0_918.png,22,1,9,0,0 +0_919.png,33,0,4,0,0 +0_92.png,0,0,1,0,37 +0_920.png,19,0,3,0,0 +0_921.png,24,0,0,0,0 +0_922.png,22,0,5,0,0 +0_923.png,19,1,0,0,0 +0_924.png,1,14,10,0,0 +0_925.png,8,3,3,0,0 +0_926.png,15,0,3,0,0 +0_927.png,16,0,0,0,0 +0_928.png,14,4,8,0,0 +0_929.png,7,1,5,0,0 +0_93.png,4,1,7,0,0 +0_930.png,3,2,15,0,23 +0_931.png,1,0,6,0,40 +0_932.png,20,1,2,0,4 +0_933.png,33,0,2,0,0 +0_934.png,29,0,6,0,0 +0_935.png,26,0,2,0,0 +0_936.png,33,0,1,0,0 +0_937.png,9,0,10,0,0 +0_938.png,18,0,7,0,0 +0_939.png,17,0,6,0,0 +0_94.png,5,0,9,0,0 +0_940.png,15,0,10,0,0 +0_941.png,30,2,1,0,0 +0_942.png,39,0,0,0,0 +0_943.png,30,1,3,0,0 +0_944.png,31,5,2,0,0 +0_945.png,29,1,5,0,0 +0_946.png,20,2,9,0,0 +0_947.png,39,0,4,0,0 +0_948.png,10,0,7,0,0 +0_949.png,20,0,6,0,0 +0_95.png,24,1,0,0,0 +0_950.png,20,0,6,0,0 +0_951.png,19,0,8,0,0 +0_952.png,38,0,2,0,0 +0_953.png,65,3,1,0,0 +0_954.png,54,17,2,0,0 +0_955.png,45,12,0,0,0 +0_956.png,27,34,4,0,0 +0_957.png,90,0,0,0,0 +0_958.png,100,0,0,0,0 +0_959.png,119,1,0,0,0 +0_96.png,14,0,2,0,0 +0_960.png,91,1,0,0,0 +0_961.png,50,29,36,0,0 +0_962.png,0,112,1,0,0 +0_963.png,26,0,0,0,0 +0_964.png,30,1,1,0,0 +0_965.png,10,0,0,9,0 +0_966.png,0,26,23,0,0 +0_967.png,0,72,6,0,0 +0_968.png,0,91,3,0,0 +0_969.png,4,0,7,0,0 +0_97.png,13,1,11,0,0 +0_970.png,0,0,14,0,0 +0_971.png,14,0,0,0,0 +0_972.png,3,0,11,0,0 +0_973.png,9,0,4,0,0 +0_974.png,2,0,7,0,0 +0_975.png,13,6,14,0,0 +0_976.png,14,1,6,0,0 +0_977.png,28,1,2,0,0 +0_978.png,34,0,0,0,0 +0_979.png,39,0,0,0,0 +0_98.png,10,2,4,0,0 +0_980.png,56,0,0,0,0 +0_981.png,56,0,1,0,0 +0_982.png,38,0,0,0,0 +0_983.png,45,0,2,0,0 +0_984.png,53,1,0,0,0 +0_985.png,48,1,0,0,0 +0_986.png,43,0,0,0,0 +0_987.png,49,0,1,0,0 +0_988.png,46,0,0,0,0 +0_989.png,48,0,0,0,0 +0_99.png,2,5,8,0,0 +0_990.png,46,0,0,0,0 +0_991.png,53,0,1,0,0 +0_992.png,41,0,0,0,0 +0_993.png,37,1,0,0,0 +0_994.png,42,0,0,0,0 +0_995.png,0,0,9,0,0 +0_996.png,0,0,0,0,0 +0_997.png,0,0,1,0,0 +0_998.png,0,0,0,0,0 +0_999.png,0,0,3,0,0 diff --git a/docs/datasets/PanNuke/fold0/types.csv b/docs/datasets/PanNuke/fold0/types.csv new file mode 100644 index 0000000000000000000000000000000000000000..e2bdbd5e80f8666816544d057b8ad8f53eb9082f --- /dev/null +++ b/docs/datasets/PanNuke/fold0/types.csv @@ -0,0 +1,2657 @@ +img,type +0_0.png,Breast +0_1.png,Breast +0_2.png,Breast +0_3.png,Breast +0_4.png,Breast +0_5.png,Breast +0_6.png,Breast +0_7.png,Breast +0_8.png,Breast +0_9.png,Breast +0_10.png,Breast +0_11.png,Breast +0_12.png,Breast +0_13.png,Breast +0_14.png,Breast +0_15.png,Breast +0_16.png,Breast +0_17.png,Breast +0_18.png,Breast +0_19.png,Breast +0_20.png,Breast +0_21.png,Breast +0_22.png,Breast +0_23.png,Breast +0_24.png,Breast +0_25.png,Breast +0_26.png,Breast +0_27.png,Breast +0_28.png,Breast +0_29.png,Breast +0_30.png,Breast +0_31.png,Breast +0_32.png,Breast +0_33.png,Breast +0_34.png,Breast +0_35.png,Breast +0_36.png,Breast +0_37.png,Breast +0_38.png,Breast +0_39.png,Breast +0_40.png,Breast +0_41.png,Breast +0_42.png,Breast +0_43.png,Breast +0_44.png,Breast +0_45.png,Breast +0_46.png,Breast +0_47.png,Breast +0_48.png,Breast +0_49.png,Breast +0_50.png,Breast +0_51.png,Breast +0_52.png,Breast +0_53.png,Breast +0_54.png,Breast +0_55.png,Breast +0_56.png,Breast +0_57.png,Breast +0_58.png,Breast +0_59.png,Breast +0_60.png,Breast +0_61.png,Breast +0_62.png,Breast +0_63.png,Breast +0_64.png,Breast +0_65.png,Breast +0_66.png,Breast +0_67.png,Breast +0_68.png,Breast +0_69.png,Breast +0_70.png,Breast +0_71.png,Breast +0_72.png,Breast +0_73.png,Breast +0_74.png,Breast +0_75.png,Breast +0_76.png,Breast +0_77.png,Breast +0_78.png,Breast +0_79.png,Breast +0_80.png,Breast +0_81.png,Breast +0_82.png,Breast +0_83.png,Breast +0_84.png,Breast +0_85.png,Breast +0_86.png,Breast +0_87.png,Breast +0_88.png,Breast +0_89.png,Breast +0_90.png,Breast +0_91.png,Breast +0_92.png,Breast +0_93.png,Breast +0_94.png,Breast +0_95.png,Breast +0_96.png,Breast +0_97.png,Breast +0_98.png,Breast +0_99.png,Breast +0_100.png,Breast +0_101.png,Breast +0_102.png,Breast +0_103.png,Breast +0_104.png,Breast +0_105.png,Breast +0_106.png,Breast +0_107.png,Breast +0_108.png,Breast +0_109.png,Breast +0_110.png,Breast +0_111.png,Breast +0_112.png,Breast +0_113.png,Breast +0_114.png,Breast +0_115.png,Breast +0_116.png,Breast +0_117.png,Breast +0_118.png,Breast +0_119.png,Breast +0_120.png,Breast +0_121.png,Breast +0_122.png,Breast +0_123.png,Breast +0_124.png,Breast +0_125.png,Breast +0_126.png,Breast +0_127.png,Breast +0_128.png,Breast +0_129.png,Breast +0_130.png,Breast +0_131.png,Breast +0_132.png,Breast +0_133.png,Breast +0_134.png,Breast +0_135.png,Breast +0_136.png,Breast +0_137.png,Breast +0_138.png,Breast +0_139.png,Breast +0_140.png,Breast +0_141.png,Breast +0_142.png,Breast +0_143.png,Breast +0_144.png,Breast +0_145.png,Breast +0_146.png,Breast +0_147.png,Breast +0_148.png,Breast +0_149.png,Breast +0_150.png,Breast +0_151.png,Breast +0_152.png,Breast +0_153.png,Breast +0_154.png,Breast +0_155.png,Breast +0_156.png,Breast +0_157.png,Breast +0_158.png,Breast +0_159.png,Breast +0_160.png,Breast +0_161.png,Breast +0_162.png,Breast +0_163.png,Breast +0_164.png,Breast +0_165.png,Breast +0_166.png,Breast +0_167.png,Breast +0_168.png,Breast +0_169.png,Breast +0_170.png,Breast +0_171.png,Breast +0_172.png,Breast +0_173.png,Breast +0_174.png,Breast +0_175.png,Breast +0_176.png,Breast +0_177.png,Breast +0_178.png,Breast +0_179.png,Breast +0_180.png,Breast +0_181.png,Breast +0_182.png,Breast +0_183.png,Breast +0_184.png,Breast +0_185.png,Breast +0_186.png,Breast +0_187.png,Breast +0_188.png,Breast +0_189.png,Breast +0_190.png,Breast +0_191.png,Breast +0_192.png,Breast +0_193.png,Breast +0_194.png,Breast +0_195.png,Breast +0_196.png,Breast +0_197.png,Breast +0_198.png,Breast +0_199.png,Breast +0_200.png,Breast +0_201.png,Breast +0_202.png,Breast +0_203.png,Breast +0_204.png,Breast +0_205.png,Breast +0_206.png,Breast +0_207.png,Breast +0_208.png,Breast +0_209.png,Breast +0_210.png,Breast +0_211.png,Breast +0_212.png,Breast +0_213.png,Breast +0_214.png,Breast +0_215.png,Breast +0_216.png,Breast +0_217.png,Breast +0_218.png,Breast +0_219.png,Breast +0_220.png,Breast +0_221.png,Breast +0_222.png,Breast +0_223.png,Breast +0_224.png,Breast +0_225.png,Breast +0_226.png,Breast +0_227.png,Breast +0_228.png,Breast +0_229.png,Breast +0_230.png,Breast +0_231.png,Breast +0_232.png,Breast +0_233.png,Breast +0_234.png,Breast +0_235.png,Breast +0_236.png,Breast +0_237.png,Breast +0_238.png,Breast +0_239.png,Breast +0_240.png,Breast +0_241.png,Breast +0_242.png,Breast +0_243.png,Breast +0_244.png,Breast +0_245.png,Breast +0_246.png,Breast +0_247.png,Breast +0_248.png,Breast +0_249.png,Breast +0_250.png,Breast +0_251.png,Breast +0_252.png,Breast +0_253.png,Breast +0_254.png,Breast +0_255.png,Breast +0_256.png,Breast +0_257.png,Breast +0_258.png,Breast +0_259.png,Breast +0_260.png,Breast +0_261.png,Breast +0_262.png,Breast +0_263.png,Breast +0_264.png,Breast +0_265.png,Breast +0_266.png,Breast +0_267.png,Breast +0_268.png,Breast +0_269.png,Breast +0_270.png,Breast +0_271.png,Breast +0_272.png,Breast +0_273.png,Breast +0_274.png,Breast +0_275.png,Breast +0_276.png,Breast +0_277.png,Breast +0_278.png,Breast +0_279.png,Breast +0_280.png,Breast +0_281.png,Breast +0_282.png,Breast +0_283.png,Breast +0_284.png,Breast +0_285.png,Breast +0_286.png,Breast +0_287.png,Breast +0_288.png,Breast +0_289.png,Breast +0_290.png,Breast +0_291.png,Breast +0_292.png,Breast +0_293.png,Breast +0_294.png,Breast +0_295.png,Breast +0_296.png,Breast +0_297.png,Breast +0_298.png,Breast +0_299.png,Breast +0_300.png,Breast +0_301.png,Breast +0_302.png,Breast +0_303.png,Breast +0_304.png,Breast +0_305.png,Breast +0_306.png,Breast +0_307.png,Breast +0_308.png,Breast +0_309.png,Breast +0_310.png,Breast +0_311.png,Breast +0_312.png,Breast +0_313.png,Breast +0_314.png,Breast +0_315.png,Breast +0_316.png,Breast +0_317.png,Breast +0_318.png,Breast +0_319.png,Breast +0_320.png,Breast +0_321.png,Breast +0_322.png,Breast +0_323.png,Breast +0_324.png,Breast +0_325.png,Breast +0_326.png,Breast +0_327.png,Breast +0_328.png,Breast +0_329.png,Breast +0_330.png,Breast +0_331.png,Breast +0_332.png,Breast +0_333.png,Breast +0_334.png,Breast +0_335.png,Breast +0_336.png,Breast +0_337.png,Breast +0_338.png,Breast +0_339.png,Breast +0_340.png,Breast +0_341.png,Breast +0_342.png,Breast +0_343.png,Breast +0_344.png,Breast +0_345.png,Breast +0_346.png,Breast +0_347.png,Breast +0_348.png,Breast +0_349.png,Breast +0_350.png,Breast +0_351.png,Breast +0_352.png,Breast +0_353.png,Breast +0_354.png,Breast +0_355.png,Breast +0_356.png,Breast +0_357.png,Breast +0_358.png,Breast +0_359.png,Breast +0_360.png,Breast +0_361.png,Breast +0_362.png,Breast +0_363.png,Breast +0_364.png,Breast +0_365.png,Breast +0_366.png,Breast +0_367.png,Breast +0_368.png,Breast +0_369.png,Breast +0_370.png,Breast +0_371.png,Breast +0_372.png,Breast +0_373.png,Breast +0_374.png,Breast +0_375.png,Breast +0_376.png,Breast +0_377.png,Breast +0_378.png,Breast +0_379.png,Breast +0_380.png,Breast +0_381.png,Breast +0_382.png,Breast +0_383.png,Breast +0_384.png,Breast +0_385.png,Breast +0_386.png,Breast +0_387.png,Breast +0_388.png,Breast +0_389.png,Breast +0_390.png,Breast +0_391.png,Breast +0_392.png,Breast +0_393.png,Breast +0_394.png,Breast +0_395.png,Breast +0_396.png,Breast +0_397.png,Breast +0_398.png,Breast +0_399.png,Breast +0_400.png,Breast +0_401.png,Breast +0_402.png,Breast +0_403.png,Breast +0_404.png,Breast +0_405.png,Breast +0_406.png,Breast +0_407.png,Breast +0_408.png,Breast +0_409.png,Breast +0_410.png,Breast +0_411.png,Breast +0_412.png,Breast +0_413.png,Breast +0_414.png,Breast +0_415.png,Breast +0_416.png,Breast +0_417.png,Breast +0_418.png,Breast +0_419.png,Breast +0_420.png,Breast +0_421.png,Breast +0_422.png,Breast +0_423.png,Breast +0_424.png,Breast +0_425.png,Breast +0_426.png,Breast +0_427.png,Breast +0_428.png,Breast +0_429.png,Breast +0_430.png,Breast +0_431.png,Breast +0_432.png,Breast +0_433.png,Breast +0_434.png,Breast +0_435.png,Breast +0_436.png,Breast +0_437.png,Breast +0_438.png,Breast +0_439.png,Breast +0_440.png,Breast +0_441.png,Breast +0_442.png,Breast +0_443.png,Breast +0_444.png,Breast +0_445.png,Breast +0_446.png,Breast +0_447.png,Breast +0_448.png,Breast +0_449.png,Breast +0_450.png,Breast +0_451.png,Breast +0_452.png,Breast +0_453.png,Breast +0_454.png,Breast +0_455.png,Breast +0_456.png,Breast +0_457.png,Breast +0_458.png,Breast +0_459.png,Breast +0_460.png,Breast +0_461.png,Breast +0_462.png,Breast +0_463.png,Breast +0_464.png,Breast +0_465.png,Breast +0_466.png,Breast +0_467.png,Breast +0_468.png,Breast +0_469.png,Breast +0_470.png,Breast +0_471.png,Breast +0_472.png,Breast +0_473.png,Breast +0_474.png,Breast +0_475.png,Breast +0_476.png,Breast +0_477.png,Breast +0_478.png,Breast +0_479.png,Breast +0_480.png,Breast +0_481.png,Breast +0_482.png,Breast +0_483.png,Breast +0_484.png,Breast +0_485.png,Breast +0_486.png,Breast +0_487.png,Breast +0_488.png,Breast +0_489.png,Breast +0_490.png,Breast +0_491.png,Breast +0_492.png,Breast +0_493.png,Breast +0_494.png,Breast +0_495.png,Breast +0_496.png,Breast +0_497.png,Breast +0_498.png,Breast +0_499.png,Breast +0_500.png,Breast +0_501.png,Breast +0_502.png,Breast +0_503.png,Breast +0_504.png,Breast +0_505.png,Breast +0_506.png,Breast +0_507.png,Breast +0_508.png,Breast +0_509.png,Breast +0_510.png,Breast +0_511.png,Breast +0_512.png,Breast +0_513.png,Breast +0_514.png,Breast +0_515.png,Breast +0_516.png,Breast +0_517.png,Breast +0_518.png,Breast +0_519.png,Breast +0_520.png,Breast +0_521.png,Breast +0_522.png,Breast +0_523.png,Breast +0_524.png,Breast +0_525.png,Breast +0_526.png,Breast +0_527.png,Breast +0_528.png,Breast +0_529.png,Breast +0_530.png,Breast +0_531.png,Breast +0_532.png,Breast +0_533.png,Breast +0_534.png,Breast +0_535.png,Breast +0_536.png,Breast +0_537.png,Breast +0_538.png,Breast +0_539.png,Breast +0_540.png,Breast +0_541.png,Breast +0_542.png,Breast +0_543.png,Breast +0_544.png,Breast +0_545.png,Breast +0_546.png,Breast +0_547.png,Breast +0_548.png,Breast +0_549.png,Breast +0_550.png,Breast +0_551.png,Breast +0_552.png,Breast +0_553.png,Breast +0_554.png,Breast +0_555.png,Breast +0_556.png,Breast +0_557.png,Breast +0_558.png,Breast +0_559.png,Breast +0_560.png,Breast +0_561.png,Breast +0_562.png,Breast +0_563.png,Breast +0_564.png,Breast +0_565.png,Breast +0_566.png,Breast +0_567.png,Breast +0_568.png,Breast +0_569.png,Breast +0_570.png,Breast +0_571.png,Breast +0_572.png,Breast +0_573.png,Breast +0_574.png,Breast +0_575.png,Breast +0_576.png,Breast +0_577.png,Breast +0_578.png,Breast +0_579.png,Breast +0_580.png,Breast +0_581.png,Breast +0_582.png,Breast +0_583.png,Breast +0_584.png,Breast +0_585.png,Breast +0_586.png,Breast +0_587.png,Breast +0_588.png,Breast +0_589.png,Breast +0_590.png,Breast +0_591.png,Breast +0_592.png,Breast +0_593.png,Breast +0_594.png,Breast +0_595.png,Breast +0_596.png,Breast +0_597.png,Breast +0_598.png,Breast +0_599.png,Breast +0_600.png,Breast +0_601.png,Breast +0_602.png,Breast +0_603.png,Breast +0_604.png,Breast +0_605.png,Breast +0_606.png,Breast +0_607.png,Breast +0_608.png,Breast +0_609.png,Breast +0_610.png,Breast +0_611.png,Breast +0_612.png,Breast +0_613.png,Breast +0_614.png,Breast +0_615.png,Breast +0_616.png,Breast +0_617.png,Breast +0_618.png,Breast +0_619.png,Breast +0_620.png,Breast +0_621.png,Breast +0_622.png,Breast +0_623.png,Breast +0_624.png,Breast +0_625.png,Breast +0_626.png,Breast +0_627.png,Breast +0_628.png,Breast +0_629.png,Breast +0_630.png,Breast +0_631.png,Breast +0_632.png,Breast +0_633.png,Breast +0_634.png,Breast +0_635.png,Breast +0_636.png,Breast +0_637.png,Breast +0_638.png,Breast +0_639.png,Breast +0_640.png,Breast +0_641.png,Breast +0_642.png,Breast +0_643.png,Breast +0_644.png,Breast +0_645.png,Breast +0_646.png,Breast +0_647.png,Breast +0_648.png,Breast +0_649.png,Breast +0_650.png,Breast +0_651.png,Breast +0_652.png,Breast +0_653.png,Breast +0_654.png,Breast +0_655.png,Breast +0_656.png,Breast +0_657.png,Breast +0_658.png,Breast +0_659.png,Breast +0_660.png,Breast +0_661.png,Breast +0_662.png,Breast +0_663.png,Breast +0_664.png,Breast +0_665.png,Breast +0_666.png,Breast +0_667.png,Breast +0_668.png,Breast +0_669.png,Breast +0_670.png,Breast +0_671.png,Breast +0_672.png,Breast +0_673.png,Breast +0_674.png,Breast +0_675.png,Breast +0_676.png,Breast +0_677.png,Breast +0_678.png,Breast +0_679.png,Breast +0_680.png,Breast +0_681.png,Breast +0_682.png,Breast +0_683.png,Breast +0_684.png,Breast +0_685.png,Breast +0_686.png,Breast +0_687.png,Breast +0_688.png,Breast +0_689.png,Breast +0_690.png,Breast +0_691.png,Breast +0_692.png,Breast +0_693.png,Breast +0_694.png,Breast +0_695.png,Breast +0_696.png,Breast +0_697.png,Breast +0_698.png,Breast +0_699.png,Breast +0_700.png,Breast +0_701.png,Breast +0_702.png,Breast +0_703.png,Breast +0_704.png,Breast +0_705.png,Breast +0_706.png,Breast +0_707.png,Breast +0_708.png,Breast +0_709.png,Breast +0_710.png,Breast +0_711.png,Breast +0_712.png,Breast +0_713.png,Breast +0_714.png,Breast +0_715.png,Breast +0_716.png,Breast +0_717.png,Breast +0_718.png,Breast +0_719.png,Breast +0_720.png,Breast +0_721.png,Breast +0_722.png,Breast +0_723.png,Breast +0_724.png,Breast +0_725.png,Breast +0_726.png,Breast +0_727.png,Breast +0_728.png,Breast +0_729.png,Breast +0_730.png,Breast +0_731.png,Breast +0_732.png,Breast +0_733.png,Breast +0_734.png,Breast +0_735.png,Breast +0_736.png,Breast +0_737.png,Breast +0_738.png,Colon +0_739.png,Colon +0_740.png,Colon +0_741.png,Colon +0_742.png,Colon +0_743.png,Colon +0_744.png,Colon +0_745.png,Colon +0_746.png,Colon +0_747.png,Colon +0_748.png,Colon +0_749.png,Colon +0_750.png,Colon +0_751.png,Colon +0_752.png,Colon +0_753.png,Colon +0_754.png,Colon +0_755.png,Colon +0_756.png,Colon +0_757.png,Colon +0_758.png,Colon +0_759.png,Colon +0_760.png,Colon +0_761.png,Colon +0_762.png,Colon +0_763.png,Colon +0_764.png,Colon +0_765.png,Colon +0_766.png,Colon +0_767.png,Colon +0_768.png,Colon +0_769.png,Colon +0_770.png,Colon +0_771.png,Colon +0_772.png,Colon +0_773.png,Colon +0_774.png,Colon +0_775.png,Colon +0_776.png,Colon +0_777.png,Colon +0_778.png,Colon +0_779.png,Colon +0_780.png,Colon +0_781.png,Colon +0_782.png,Colon +0_783.png,Colon +0_784.png,Colon +0_785.png,Colon +0_786.png,Colon +0_787.png,Colon +0_788.png,Colon +0_789.png,Colon +0_790.png,Colon +0_791.png,Colon +0_792.png,Colon +0_793.png,Colon +0_794.png,Colon +0_795.png,Colon +0_796.png,Colon +0_797.png,Colon +0_798.png,Colon +0_799.png,Colon +0_800.png,Colon +0_801.png,Colon +0_802.png,Colon +0_803.png,Colon +0_804.png,Colon +0_805.png,Colon +0_806.png,Colon +0_807.png,Colon +0_808.png,Colon +0_809.png,Colon +0_810.png,Colon +0_811.png,Colon +0_812.png,Colon +0_813.png,Colon +0_814.png,Colon +0_815.png,Colon +0_816.png,Colon +0_817.png,Colon +0_818.png,Colon +0_819.png,Colon +0_820.png,Colon +0_821.png,Colon +0_822.png,Colon +0_823.png,Colon +0_824.png,Colon +0_825.png,Colon +0_826.png,Colon +0_827.png,Colon +0_828.png,Colon +0_829.png,Colon +0_830.png,Colon +0_831.png,Colon +0_832.png,Colon +0_833.png,Colon +0_834.png,Colon +0_835.png,Colon +0_836.png,Colon +0_837.png,Colon +0_838.png,Colon +0_839.png,Colon +0_840.png,Colon +0_841.png,Colon +0_842.png,Colon +0_843.png,Colon +0_844.png,Colon +0_845.png,Colon +0_846.png,Colon +0_847.png,Colon +0_848.png,Colon +0_849.png,Colon +0_850.png,Colon +0_851.png,Colon +0_852.png,Colon +0_853.png,Colon +0_854.png,Colon +0_855.png,Colon +0_856.png,Colon +0_857.png,Colon +0_858.png,Colon +0_859.png,Colon +0_860.png,Colon +0_861.png,Colon +0_862.png,Colon +0_863.png,Colon +0_864.png,Colon +0_865.png,Colon +0_866.png,Colon +0_867.png,Lung +0_868.png,Lung +0_869.png,Lung +0_870.png,Lung +0_871.png,Lung +0_872.png,Lung +0_873.png,Lung +0_874.png,Lung +0_875.png,Lung +0_876.png,Lung +0_877.png,Lung +0_878.png,Lung +0_879.png,Lung +0_880.png,Lung +0_881.png,Lung +0_882.png,Lung +0_883.png,Lung +0_884.png,Lung +0_885.png,Lung +0_886.png,Lung +0_887.png,Lung +0_888.png,Lung +0_889.png,Lung +0_890.png,Lung +0_891.png,Lung +0_892.png,Lung +0_893.png,Lung +0_894.png,Lung +0_895.png,Lung +0_896.png,Lung +0_897.png,Breast +0_898.png,Breast +0_899.png,Breast +0_900.png,Breast +0_901.png,Breast +0_902.png,Breast +0_903.png,Breast +0_904.png,Breast +0_905.png,Breast +0_906.png,Breast +0_907.png,Breast +0_908.png,Breast +0_909.png,Colon +0_910.png,Colon +0_911.png,Colon +0_912.png,Kidney +0_913.png,Kidney +0_914.png,Kidney +0_915.png,Kidney +0_916.png,Kidney +0_917.png,Kidney +0_918.png,Kidney +0_919.png,Prostate +0_920.png,Prostate +0_921.png,Prostate +0_922.png,Prostate +0_923.png,Bladder +0_924.png,Breast +0_925.png,Breast +0_926.png,Breast +0_927.png,Breast +0_928.png,Breast +0_929.png,Breast +0_930.png,Breast +0_931.png,Breast +0_932.png,Bladder +0_933.png,Prostate +0_934.png,Prostate +0_935.png,Prostate +0_936.png,Prostate +0_937.png,Prostate +0_938.png,Prostate +0_939.png,Prostate +0_940.png,Prostate +0_941.png,Prostate +0_942.png,Prostate +0_943.png,Prostate +0_944.png,Prostate +0_945.png,Prostate +0_946.png,Prostate +0_947.png,Prostate +0_948.png,Prostate +0_949.png,Prostate +0_950.png,Prostate +0_951.png,Prostate +0_952.png,Prostate +0_953.png,Kidney +0_954.png,Kidney +0_955.png,Kidney +0_956.png,Kidney +0_957.png,Kidney +0_958.png,Kidney +0_959.png,Kidney +0_960.png,Kidney +0_961.png,Kidney +0_962.png,Stomach +0_963.png,Colon +0_964.png,Colon +0_965.png,Colon +0_966.png,Stomach +0_967.png,Stomach +0_968.png,Stomach +0_969.png,Ovarian +0_970.png,Ovarian +0_971.png,Ovarian +0_972.png,Ovarian +0_973.png,Ovarian +0_974.png,Ovarian +0_975.png,Ovarian +0_976.png,Ovarian +0_977.png,Ovarian +0_978.png,Esophagus +0_979.png,Esophagus +0_980.png,Esophagus +0_981.png,Esophagus +0_982.png,Esophagus +0_983.png,Esophagus +0_984.png,Esophagus +0_985.png,Esophagus +0_986.png,Esophagus +0_987.png,Esophagus +0_988.png,Esophagus +0_989.png,Esophagus +0_990.png,Esophagus +0_991.png,Esophagus +0_992.png,Esophagus +0_993.png,Esophagus +0_994.png,Esophagus +0_995.png,Esophagus +0_996.png,Esophagus +0_997.png,Esophagus +0_998.png,Esophagus +0_999.png,Esophagus +0_1000.png,Esophagus +0_1001.png,Esophagus +0_1002.png,Esophagus +0_1003.png,Esophagus +0_1004.png,Esophagus +0_1005.png,Esophagus +0_1006.png,Esophagus +0_1007.png,Esophagus +0_1008.png,Esophagus +0_1009.png,Esophagus +0_1010.png,Esophagus +0_1011.png,Esophagus +0_1012.png,Esophagus +0_1013.png,Esophagus +0_1014.png,Esophagus +0_1015.png,Esophagus +0_1016.png,Esophagus +0_1017.png,Esophagus +0_1018.png,Pancreatic +0_1019.png,Pancreatic +0_1020.png,Pancreatic +0_1021.png,Pancreatic +0_1022.png,Pancreatic +0_1023.png,Pancreatic +0_1024.png,Pancreatic +0_1025.png,Lung +0_1026.png,Lung +0_1027.png,Lung +0_1028.png,Lung +0_1029.png,Lung +0_1030.png,Lung +0_1031.png,Lung +0_1032.png,Lung +0_1033.png,Lung +0_1034.png,Lung +0_1035.png,Lung +0_1036.png,Lung +0_1037.png,Lung +0_1038.png,Lung +0_1039.png,Lung +0_1040.png,Lung +0_1041.png,Lung +0_1042.png,Lung +0_1043.png,Lung +0_1044.png,Lung +0_1045.png,Lung +0_1046.png,Lung +0_1047.png,Lung +0_1048.png,Lung +0_1049.png,Lung +0_1050.png,Lung +0_1051.png,Lung +0_1052.png,Lung +0_1053.png,Lung +0_1054.png,Lung +0_1055.png,Lung +0_1056.png,Lung +0_1057.png,Lung +0_1058.png,Lung +0_1059.png,Lung +0_1060.png,Lung +0_1061.png,Uterus +0_1062.png,Uterus +0_1063.png,Uterus +0_1064.png,Thyroid +0_1065.png,Thyroid +0_1066.png,Thyroid +0_1067.png,Thyroid +0_1068.png,Thyroid +0_1069.png,Thyroid +0_1070.png,Thyroid +0_1071.png,Thyroid +0_1072.png,Thyroid +0_1073.png,Thyroid +0_1074.png,Thyroid +0_1075.png,Thyroid +0_1076.png,Thyroid +0_1077.png,Thyroid +0_1078.png,Thyroid +0_1079.png,Thyroid +0_1080.png,Thyroid +0_1081.png,Thyroid +0_1082.png,Thyroid +0_1083.png,Thyroid +0_1084.png,Thyroid +0_1085.png,Skin +0_1086.png,Skin +0_1087.png,Skin +0_1088.png,Skin +0_1089.png,Skin +0_1090.png,Skin +0_1091.png,Cervix +0_1092.png,Cervix +0_1093.png,Cervix +0_1094.png,Cervix +0_1095.png,Cervix +0_1096.png,Cervix +0_1097.png,Thyroid +0_1098.png,Thyroid +0_1099.png,Thyroid +0_1100.png,Thyroid +0_1101.png,Thyroid +0_1102.png,Thyroid +0_1103.png,Thyroid +0_1104.png,Thyroid +0_1105.png,Thyroid +0_1106.png,Thyroid +0_1107.png,Thyroid +0_1108.png,Thyroid +0_1109.png,Thyroid +0_1110.png,Thyroid +0_1111.png,Thyroid +0_1112.png,Thyroid +0_1113.png,Thyroid +0_1114.png,Thyroid +0_1115.png,Esophagus +0_1116.png,Esophagus +0_1117.png,Esophagus +0_1118.png,Esophagus +0_1119.png,Esophagus +0_1120.png,Esophagus +0_1121.png,Esophagus +0_1122.png,Esophagus +0_1123.png,Esophagus +0_1124.png,Esophagus +0_1125.png,Esophagus +0_1126.png,Esophagus +0_1127.png,Esophagus +0_1128.png,Esophagus +0_1129.png,Esophagus +0_1130.png,Esophagus +0_1131.png,Esophagus +0_1132.png,Esophagus +0_1133.png,Esophagus +0_1134.png,Esophagus +0_1135.png,Esophagus +0_1136.png,Esophagus +0_1137.png,Esophagus +0_1138.png,Esophagus +0_1139.png,Esophagus +0_1140.png,Esophagus +0_1141.png,Esophagus +0_1142.png,Esophagus +0_1143.png,Esophagus +0_1144.png,Esophagus +0_1145.png,Esophagus +0_1146.png,Esophagus +0_1147.png,Cervix +0_1148.png,Cervix +0_1149.png,Cervix +0_1150.png,Cervix +0_1151.png,Cervix +0_1152.png,Cervix +0_1153.png,Cervix +0_1154.png,Cervix +0_1155.png,Cervix +0_1156.png,Cervix +0_1157.png,Cervix +0_1158.png,Cervix +0_1159.png,Cervix +0_1160.png,Cervix +0_1161.png,Cervix +0_1162.png,Cervix +0_1163.png,Cervix +0_1164.png,Cervix +0_1165.png,Cervix +0_1166.png,Cervix +0_1167.png,Cervix +0_1168.png,Cervix +0_1169.png,Cervix +0_1170.png,Cervix +0_1171.png,Adrenal_gland +0_1172.png,Adrenal_gland +0_1173.png,Adrenal_gland +0_1174.png,Adrenal_gland +0_1175.png,Adrenal_gland +0_1176.png,Adrenal_gland +0_1177.png,Adrenal_gland +0_1178.png,Adrenal_gland +0_1179.png,Adrenal_gland +0_1180.png,Adrenal_gland +0_1181.png,Adrenal_gland +0_1182.png,Adrenal_gland +0_1183.png,Adrenal_gland +0_1184.png,Adrenal_gland +0_1185.png,Adrenal_gland +0_1186.png,Adrenal_gland +0_1187.png,Adrenal_gland +0_1188.png,Adrenal_gland +0_1189.png,Adrenal_gland +0_1190.png,Adrenal_gland +0_1191.png,Adrenal_gland +0_1192.png,Adrenal_gland +0_1193.png,Adrenal_gland +0_1194.png,Adrenal_gland +0_1195.png,Adrenal_gland +0_1196.png,Adrenal_gland +0_1197.png,Adrenal_gland +0_1198.png,Adrenal_gland +0_1199.png,Adrenal_gland +0_1200.png,Adrenal_gland +0_1201.png,Adrenal_gland +0_1202.png,Adrenal_gland +0_1203.png,Adrenal_gland +0_1204.png,Adrenal_gland +0_1205.png,Adrenal_gland +0_1206.png,Adrenal_gland +0_1207.png,Adrenal_gland +0_1208.png,Adrenal_gland +0_1209.png,Adrenal_gland +0_1210.png,Adrenal_gland +0_1211.png,Adrenal_gland +0_1212.png,Adrenal_gland +0_1213.png,Adrenal_gland +0_1214.png,Adrenal_gland +0_1215.png,Adrenal_gland +0_1216.png,Adrenal_gland +0_1217.png,Adrenal_gland +0_1218.png,Adrenal_gland +0_1219.png,Esophagus +0_1220.png,Esophagus +0_1221.png,Esophagus +0_1222.png,Esophagus +0_1223.png,Esophagus +0_1224.png,Esophagus +0_1225.png,Esophagus +0_1226.png,Esophagus +0_1227.png,Esophagus +0_1228.png,Esophagus +0_1229.png,Esophagus +0_1230.png,Esophagus +0_1231.png,Esophagus +0_1232.png,Esophagus +0_1233.png,Esophagus +0_1234.png,Esophagus +0_1235.png,Esophagus +0_1236.png,Esophagus +0_1237.png,Esophagus +0_1238.png,Esophagus +0_1239.png,Esophagus +0_1240.png,Esophagus +0_1241.png,Esophagus +0_1242.png,Esophagus +0_1243.png,Esophagus +0_1244.png,Esophagus +0_1245.png,Esophagus +0_1246.png,Esophagus +0_1247.png,Esophagus +0_1248.png,Esophagus +0_1249.png,Esophagus +0_1250.png,Esophagus +0_1251.png,Esophagus +0_1252.png,Esophagus +0_1253.png,Esophagus +0_1254.png,Esophagus +0_1255.png,Esophagus +0_1256.png,Esophagus +0_1257.png,Esophagus +0_1258.png,Esophagus +0_1259.png,Esophagus +0_1260.png,Esophagus +0_1261.png,Esophagus +0_1262.png,Esophagus +0_1263.png,Esophagus +0_1264.png,Adrenal_gland +0_1265.png,Adrenal_gland +0_1266.png,Adrenal_gland +0_1267.png,Adrenal_gland +0_1268.png,Adrenal_gland +0_1269.png,Adrenal_gland +0_1270.png,Adrenal_gland +0_1271.png,Adrenal_gland +0_1272.png,Adrenal_gland +0_1273.png,Adrenal_gland +0_1274.png,Adrenal_gland +0_1275.png,Adrenal_gland +0_1276.png,Adrenal_gland +0_1277.png,Adrenal_gland +0_1278.png,Adrenal_gland +0_1279.png,Pancreatic +0_1280.png,Pancreatic +0_1281.png,Pancreatic +0_1282.png,Pancreatic +0_1283.png,Pancreatic +0_1284.png,Pancreatic +0_1285.png,Pancreatic +0_1286.png,Pancreatic +0_1287.png,Pancreatic +0_1288.png,Pancreatic +0_1289.png,Pancreatic +0_1290.png,Pancreatic +0_1291.png,Pancreatic +0_1292.png,Pancreatic +0_1293.png,Pancreatic +0_1294.png,Pancreatic +0_1295.png,Pancreatic +0_1296.png,Pancreatic +0_1297.png,Pancreatic +0_1298.png,Pancreatic +0_1299.png,Pancreatic +0_1300.png,Pancreatic +0_1301.png,Pancreatic +0_1302.png,Pancreatic +0_1303.png,Adrenal_gland +0_1304.png,Adrenal_gland +0_1305.png,Adrenal_gland +0_1306.png,Adrenal_gland +0_1307.png,Adrenal_gland +0_1308.png,Adrenal_gland +0_1309.png,Adrenal_gland +0_1310.png,Adrenal_gland +0_1311.png,Adrenal_gland +0_1312.png,Adrenal_gland +0_1313.png,Adrenal_gland +0_1314.png,Adrenal_gland +0_1315.png,Adrenal_gland +0_1316.png,Adrenal_gland +0_1317.png,Adrenal_gland +0_1318.png,Adrenal_gland +0_1319.png,Cervix +0_1320.png,Cervix +0_1321.png,Cervix +0_1322.png,Cervix +0_1323.png,Cervix +0_1324.png,Cervix +0_1325.png,Cervix +0_1326.png,Cervix +0_1327.png,Cervix +0_1328.png,Cervix +0_1329.png,Cervix +0_1330.png,Cervix +0_1331.png,Cervix +0_1332.png,Cervix +0_1333.png,Cervix +0_1334.png,Cervix +0_1335.png,Cervix +0_1336.png,Cervix +0_1337.png,Cervix +0_1338.png,Cervix +0_1339.png,Cervix +0_1340.png,Cervix +0_1341.png,Cervix +0_1342.png,Cervix +0_1343.png,Cervix +0_1344.png,Cervix +0_1345.png,Cervix +0_1346.png,Cervix +0_1347.png,Cervix +0_1348.png,Cervix +0_1349.png,Cervix +0_1350.png,Cervix +0_1351.png,Cervix +0_1352.png,Cervix +0_1353.png,Cervix +0_1354.png,Cervix +0_1355.png,Cervix +0_1356.png,Cervix +0_1357.png,Cervix +0_1358.png,Cervix +0_1359.png,Cervix +0_1360.png,Cervix +0_1361.png,Cervix +0_1362.png,Cervix +0_1363.png,Cervix +0_1364.png,Cervix +0_1365.png,Cervix +0_1366.png,Cervix +0_1367.png,Cervix +0_1368.png,Cervix +0_1369.png,Cervix +0_1370.png,Bile-duct +0_1371.png,Bile-duct +0_1372.png,Bile-duct +0_1373.png,Bile-duct +0_1374.png,Bile-duct +0_1375.png,Bile-duct +0_1376.png,Bile-duct +0_1377.png,Bile-duct +0_1378.png,Bile-duct +0_1379.png,Bile-duct +0_1380.png,Bile-duct +0_1381.png,Bile-duct +0_1382.png,Bile-duct +0_1383.png,Bile-duct +0_1384.png,Bile-duct +0_1385.png,Bile-duct +0_1386.png,Bile-duct +0_1387.png,Bile-duct +0_1388.png,Bile-duct +0_1389.png,Bile-duct +0_1390.png,Bile-duct +0_1391.png,Bile-duct +0_1392.png,Bile-duct +0_1393.png,Bile-duct +0_1394.png,Bile-duct +0_1395.png,Bile-duct +0_1396.png,Bile-duct +0_1397.png,Bile-duct +0_1398.png,Bile-duct +0_1399.png,Bile-duct +0_1400.png,Bile-duct +0_1401.png,Bile-duct +0_1402.png,Bile-duct +0_1403.png,Bile-duct +0_1404.png,Bile-duct +0_1405.png,Bile-duct +0_1406.png,Bile-duct +0_1407.png,Bile-duct +0_1408.png,Bile-duct +0_1409.png,Bile-duct +0_1410.png,Bile-duct +0_1411.png,Testis +0_1412.png,Testis +0_1413.png,Testis +0_1414.png,Testis +0_1415.png,Testis +0_1416.png,Testis +0_1417.png,Testis +0_1418.png,Testis +0_1419.png,Testis +0_1420.png,Testis +0_1421.png,Testis +0_1422.png,Testis +0_1423.png,Testis +0_1424.png,Testis +0_1425.png,Testis +0_1426.png,Testis +0_1427.png,Testis +0_1428.png,Testis +0_1429.png,Testis +0_1430.png,Testis +0_1431.png,Testis +0_1432.png,Testis +0_1433.png,Testis +0_1434.png,Testis +0_1435.png,Testis +0_1436.png,Testis +0_1437.png,Testis +0_1438.png,Testis +0_1439.png,Testis +0_1440.png,Testis +0_1441.png,Testis +0_1442.png,Testis +0_1443.png,Testis +0_1444.png,Testis +0_1445.png,Testis +0_1446.png,Bile-duct +0_1447.png,Bile-duct +0_1448.png,Bile-duct +0_1449.png,Bile-duct +0_1450.png,Bile-duct +0_1451.png,Bile-duct +0_1452.png,Bile-duct +0_1453.png,Bile-duct +0_1454.png,Bile-duct +0_1455.png,Bile-duct +0_1456.png,Bile-duct +0_1457.png,Bile-duct +0_1458.png,Bile-duct +0_1459.png,Bile-duct +0_1460.png,Bile-duct +0_1461.png,Bile-duct +0_1462.png,Bile-duct +0_1463.png,Bile-duct +0_1464.png,Bile-duct +0_1465.png,Bile-duct +0_1466.png,Bile-duct +0_1467.png,Bile-duct +0_1468.png,Bile-duct +0_1469.png,Bile-duct +0_1470.png,Bile-duct +0_1471.png,Bile-duct +0_1472.png,Bile-duct +0_1473.png,Bile-duct +0_1474.png,Bile-duct +0_1475.png,Bile-duct +0_1476.png,Bile-duct +0_1477.png,Bile-duct +0_1478.png,Bile-duct +0_1479.png,Colon +0_1480.png,Colon +0_1481.png,Colon +0_1482.png,Colon +0_1483.png,Colon +0_1484.png,Colon +0_1485.png,Colon +0_1486.png,Colon +0_1487.png,Colon +0_1488.png,Colon +0_1489.png,Colon +0_1490.png,Colon +0_1491.png,Colon +0_1492.png,Colon +0_1493.png,Colon +0_1494.png,Colon +0_1495.png,Colon +0_1496.png,Colon +0_1497.png,Colon +0_1498.png,Colon +0_1499.png,Colon +0_1500.png,Colon +0_1501.png,Colon +0_1502.png,Colon +0_1503.png,Colon +0_1504.png,Colon +0_1505.png,Colon +0_1506.png,Colon +0_1507.png,Colon +0_1508.png,Colon +0_1509.png,Colon +0_1510.png,Colon +0_1511.png,Colon +0_1512.png,Colon +0_1513.png,Colon +0_1514.png,Colon +0_1515.png,Colon +0_1516.png,Colon +0_1517.png,Colon +0_1518.png,Colon +0_1519.png,Colon +0_1520.png,Adrenal_gland +0_1521.png,Adrenal_gland +0_1522.png,Adrenal_gland +0_1523.png,Adrenal_gland +0_1524.png,Adrenal_gland +0_1525.png,Adrenal_gland +0_1526.png,Adrenal_gland +0_1527.png,Adrenal_gland +0_1528.png,Adrenal_gland +0_1529.png,Adrenal_gland +0_1530.png,Adrenal_gland +0_1531.png,Adrenal_gland +0_1532.png,Adrenal_gland +0_1533.png,Adrenal_gland +0_1534.png,Adrenal_gland +0_1535.png,Adrenal_gland +0_1536.png,Adrenal_gland +0_1537.png,Adrenal_gland +0_1538.png,Adrenal_gland +0_1539.png,Adrenal_gland +0_1540.png,Adrenal_gland +0_1541.png,Adrenal_gland +0_1542.png,Adrenal_gland +0_1543.png,Adrenal_gland +0_1544.png,Adrenal_gland +0_1545.png,Adrenal_gland +0_1546.png,Adrenal_gland +0_1547.png,Adrenal_gland +0_1548.png,Adrenal_gland +0_1549.png,Adrenal_gland +0_1550.png,Adrenal_gland +0_1551.png,Adrenal_gland +0_1552.png,Adrenal_gland +0_1553.png,Adrenal_gland +0_1554.png,Adrenal_gland +0_1555.png,Adrenal_gland +0_1556.png,Adrenal_gland +0_1557.png,Adrenal_gland +0_1558.png,Adrenal_gland +0_1559.png,Adrenal_gland +0_1560.png,Adrenal_gland +0_1561.png,Adrenal_gland +0_1562.png,Adrenal_gland +0_1563.png,Adrenal_gland +0_1564.png,Adrenal_gland +0_1565.png,Adrenal_gland +0_1566.png,Adrenal_gland +0_1567.png,Adrenal_gland +0_1568.png,Adrenal_gland +0_1569.png,Adrenal_gland +0_1570.png,Adrenal_gland +0_1571.png,Adrenal_gland +0_1572.png,Adrenal_gland +0_1573.png,Adrenal_gland +0_1574.png,Adrenal_gland +0_1575.png,Bile-duct +0_1576.png,Bile-duct +0_1577.png,Bile-duct +0_1578.png,Bile-duct +0_1579.png,Bile-duct +0_1580.png,Bile-duct +0_1581.png,Bile-duct +0_1582.png,Bile-duct +0_1583.png,Bile-duct +0_1584.png,Bile-duct +0_1585.png,Bile-duct +0_1586.png,Bile-duct +0_1587.png,Bile-duct +0_1588.png,Bile-duct +0_1589.png,Bile-duct +0_1590.png,Bile-duct +0_1591.png,Bile-duct +0_1592.png,Bile-duct +0_1593.png,Bile-duct +0_1594.png,Bile-duct +0_1595.png,Bile-duct +0_1596.png,Bile-duct +0_1597.png,Bile-duct +0_1598.png,Bile-duct +0_1599.png,Bile-duct +0_1600.png,Bile-duct +0_1601.png,Bile-duct +0_1602.png,Bile-duct +0_1603.png,Bile-duct +0_1604.png,Bile-duct +0_1605.png,Bile-duct +0_1606.png,Bile-duct +0_1607.png,Bile-duct +0_1608.png,Bile-duct +0_1609.png,Bile-duct +0_1610.png,Bile-duct +0_1611.png,Bile-duct +0_1612.png,Bile-duct +0_1613.png,Bile-duct +0_1614.png,Bile-duct +0_1615.png,Bile-duct +0_1616.png,Bile-duct +0_1617.png,Bile-duct +0_1618.png,Bile-duct +0_1619.png,Bile-duct +0_1620.png,Bile-duct +0_1621.png,Bile-duct +0_1622.png,Bile-duct +0_1623.png,Bile-duct +0_1624.png,Bile-duct +0_1625.png,Bile-duct +0_1626.png,Bile-duct +0_1627.png,Bile-duct +0_1628.png,Bile-duct +0_1629.png,Bile-duct +0_1630.png,Bile-duct +0_1631.png,Bile-duct +0_1632.png,Bile-duct +0_1633.png,Bile-duct +0_1634.png,Bile-duct +0_1635.png,Bile-duct +0_1636.png,Bile-duct +0_1637.png,Bile-duct +0_1638.png,Bile-duct +0_1639.png,Bile-duct +0_1640.png,Bile-duct +0_1641.png,Bile-duct +0_1642.png,Bile-duct +0_1643.png,Bile-duct +0_1644.png,Bile-duct +0_1645.png,Bile-duct +0_1646.png,Bile-duct +0_1647.png,Bile-duct +0_1648.png,Bile-duct +0_1649.png,Bile-duct +0_1650.png,Bile-duct +0_1651.png,Bile-duct +0_1652.png,Bile-duct +0_1653.png,Bile-duct +0_1654.png,Bile-duct +0_1655.png,Bile-duct +0_1656.png,Bile-duct +0_1657.png,Bile-duct +0_1658.png,Bile-duct +0_1659.png,Bile-duct +0_1660.png,Bladder +0_1661.png,Bladder +0_1662.png,Bladder +0_1663.png,Bladder +0_1664.png,Bladder +0_1665.png,Bladder +0_1666.png,Bladder +0_1667.png,Bladder +0_1668.png,Bladder +0_1669.png,Bladder +0_1670.png,Bladder +0_1671.png,Bladder +0_1672.png,Bladder +0_1673.png,Bladder +0_1674.png,Bladder +0_1675.png,Bladder +0_1676.png,Bladder +0_1677.png,Bladder +0_1678.png,Bladder +0_1679.png,Breast +0_1680.png,Breast +0_1681.png,Breast +0_1682.png,Breast +0_1683.png,Breast +0_1684.png,Breast +0_1685.png,Breast +0_1686.png,Breast +0_1687.png,Breast +0_1688.png,Breast +0_1689.png,Breast +0_1690.png,Breast +0_1691.png,Breast +0_1692.png,Breast +0_1693.png,Breast +0_1694.png,Breast +0_1695.png,Breast +0_1696.png,Breast +0_1697.png,Breast +0_1698.png,Breast +0_1699.png,Breast +0_1700.png,Breast +0_1701.png,Breast +0_1702.png,Breast +0_1703.png,Breast +0_1704.png,Breast +0_1705.png,Breast +0_1706.png,Breast +0_1707.png,Breast +0_1708.png,Breast +0_1709.png,Breast +0_1710.png,Breast +0_1711.png,Breast +0_1712.png,Breast +0_1713.png,Breast +0_1714.png,Breast +0_1715.png,Breast +0_1716.png,Breast +0_1717.png,Breast +0_1718.png,Breast +0_1719.png,Breast +0_1720.png,Breast +0_1721.png,Breast +0_1722.png,Breast +0_1723.png,Breast +0_1724.png,Breast +0_1725.png,Breast +0_1726.png,Breast +0_1727.png,Breast +0_1728.png,Breast +0_1729.png,Breast +0_1730.png,Breast +0_1731.png,Breast +0_1732.png,Breast +0_1733.png,Breast +0_1734.png,Breast +0_1735.png,Breast +0_1736.png,Breast +0_1737.png,Breast +0_1738.png,Breast +0_1739.png,Breast +0_1740.png,Breast +0_1741.png,Breast +0_1742.png,Breast +0_1743.png,Breast +0_1744.png,Breast +0_1745.png,Breast +0_1746.png,Breast +0_1747.png,Breast +0_1748.png,Cervix +0_1749.png,Cervix +0_1750.png,Cervix +0_1751.png,Cervix +0_1752.png,Cervix +0_1753.png,Cervix +0_1754.png,Cervix +0_1755.png,Cervix +0_1756.png,Cervix +0_1757.png,Cervix +0_1758.png,Cervix +0_1759.png,Cervix +0_1760.png,Cervix +0_1761.png,Cervix +0_1762.png,Cervix +0_1763.png,Cervix +0_1764.png,Cervix +0_1765.png,Cervix +0_1766.png,Cervix +0_1767.png,Cervix +0_1768.png,Cervix +0_1769.png,Cervix +0_1770.png,Cervix +0_1771.png,Cervix +0_1772.png,Cervix +0_1773.png,Cervix +0_1774.png,Cervix +0_1775.png,Cervix +0_1776.png,Cervix +0_1777.png,Cervix +0_1778.png,Cervix +0_1779.png,Cervix +0_1780.png,Cervix +0_1781.png,Cervix +0_1782.png,Cervix +0_1783.png,Cervix +0_1784.png,Cervix +0_1785.png,Cervix +0_1786.png,Cervix +0_1787.png,Cervix +0_1788.png,Cervix +0_1789.png,Cervix +0_1790.png,Cervix +0_1791.png,Cervix +0_1792.png,Cervix +0_1793.png,Cervix +0_1794.png,Cervix +0_1795.png,Cervix +0_1796.png,Cervix +0_1797.png,Cervix +0_1798.png,Cervix +0_1799.png,Cervix +0_1800.png,Cervix +0_1801.png,Cervix +0_1802.png,Cervix +0_1803.png,Cervix +0_1804.png,Cervix +0_1805.png,Cervix +0_1806.png,Cervix +0_1807.png,Cervix +0_1808.png,Cervix +0_1809.png,Cervix +0_1810.png,Cervix +0_1811.png,Cervix +0_1812.png,Cervix +0_1813.png,Cervix +0_1814.png,Cervix +0_1815.png,Cervix +0_1816.png,Cervix +0_1817.png,Cervix +0_1818.png,Cervix +0_1819.png,Cervix +0_1820.png,Cervix +0_1821.png,Cervix +0_1822.png,Colon +0_1823.png,Colon +0_1824.png,Colon +0_1825.png,Colon +0_1826.png,Colon +0_1827.png,Colon +0_1828.png,Colon +0_1829.png,Colon +0_1830.png,Colon +0_1831.png,Colon +0_1832.png,Colon +0_1833.png,Colon +0_1834.png,Colon +0_1835.png,Colon +0_1836.png,Colon +0_1837.png,Colon +0_1838.png,Colon +0_1839.png,Colon +0_1840.png,Colon +0_1841.png,Colon +0_1842.png,Colon +0_1843.png,Colon +0_1844.png,Colon +0_1845.png,Colon +0_1846.png,Colon +0_1847.png,Colon +0_1848.png,Colon +0_1849.png,Colon +0_1850.png,Colon +0_1851.png,Colon +0_1852.png,Colon +0_1853.png,Colon +0_1854.png,Colon +0_1855.png,Colon +0_1856.png,Colon +0_1857.png,Colon +0_1858.png,Colon +0_1859.png,Colon +0_1860.png,Colon +0_1861.png,Colon +0_1862.png,Colon +0_1863.png,Colon +0_1864.png,Colon +0_1865.png,Colon +0_1866.png,Colon +0_1867.png,Colon +0_1868.png,Colon +0_1869.png,Colon +0_1870.png,Colon +0_1871.png,Colon +0_1872.png,Colon +0_1873.png,Colon +0_1874.png,Colon +0_1875.png,Colon +0_1876.png,Colon +0_1877.png,Colon +0_1878.png,Colon +0_1879.png,Colon +0_1880.png,Colon +0_1881.png,Colon +0_1882.png,Colon +0_1883.png,Colon +0_1884.png,Colon +0_1885.png,Colon +0_1886.png,Colon +0_1887.png,Colon +0_1888.png,Colon +0_1889.png,Colon +0_1890.png,Colon +0_1891.png,Colon +0_1892.png,Colon +0_1893.png,Colon +0_1894.png,Colon +0_1895.png,Colon +0_1896.png,Colon +0_1897.png,Colon +0_1898.png,Colon +0_1899.png,Colon +0_1900.png,Colon +0_1901.png,Colon +0_1902.png,Colon +0_1903.png,Colon +0_1904.png,Colon +0_1905.png,Colon +0_1906.png,Colon +0_1907.png,Colon +0_1908.png,Colon +0_1909.png,Colon +0_1910.png,Colon +0_1911.png,Colon +0_1912.png,Colon +0_1913.png,Colon +0_1914.png,Colon +0_1915.png,Colon +0_1916.png,Colon +0_1917.png,Colon +0_1918.png,Colon +0_1919.png,Colon +0_1920.png,Colon +0_1921.png,Colon +0_1922.png,Colon +0_1923.png,Colon +0_1924.png,Colon +0_1925.png,Colon +0_1926.png,Colon +0_1927.png,Colon +0_1928.png,Colon +0_1929.png,Colon +0_1930.png,Colon +0_1931.png,Colon +0_1932.png,Colon +0_1933.png,Colon +0_1934.png,Colon +0_1935.png,Colon +0_1936.png,Colon +0_1937.png,Colon +0_1938.png,Colon +0_1939.png,Colon +0_1940.png,Colon +0_1941.png,Colon +0_1942.png,Colon +0_1943.png,Colon +0_1944.png,Colon +0_1945.png,Colon +0_1946.png,Colon +0_1947.png,Colon +0_1948.png,Colon +0_1949.png,Colon +0_1950.png,Colon +0_1951.png,Colon +0_1952.png,Colon +0_1953.png,Colon +0_1954.png,Colon +0_1955.png,Colon +0_1956.png,Colon +0_1957.png,Colon +0_1958.png,Colon +0_1959.png,Colon +0_1960.png,Colon +0_1961.png,Colon +0_1962.png,Colon +0_1963.png,Colon +0_1964.png,Colon +0_1965.png,Colon +0_1966.png,Colon +0_1967.png,Colon +0_1968.png,Colon +0_1969.png,Colon +0_1970.png,Colon +0_1971.png,Colon +0_1972.png,Colon +0_1973.png,Colon +0_1974.png,Colon +0_1975.png,Colon +0_1976.png,Colon +0_1977.png,Colon +0_1978.png,Colon +0_1979.png,Colon +0_1980.png,Colon +0_1981.png,Colon +0_1982.png,Colon +0_1983.png,Colon +0_1984.png,Colon +0_1985.png,Colon +0_1986.png,Colon +0_1987.png,Colon +0_1988.png,Colon +0_1989.png,Colon +0_1990.png,Colon +0_1991.png,Colon +0_1992.png,Colon +0_1993.png,Colon +0_1994.png,Colon +0_1995.png,Colon +0_1996.png,Colon +0_1997.png,Colon +0_1998.png,Colon +0_1999.png,Colon +0_2000.png,Colon +0_2001.png,Colon +0_2002.png,Colon +0_2003.png,Colon +0_2004.png,Colon +0_2005.png,Colon +0_2006.png,Colon +0_2007.png,Colon +0_2008.png,Colon +0_2009.png,Colon +0_2010.png,Colon +0_2011.png,Colon +0_2012.png,Colon +0_2013.png,Colon +0_2014.png,Colon +0_2015.png,Colon +0_2016.png,Colon +0_2017.png,Colon +0_2018.png,Colon +0_2019.png,Colon +0_2020.png,Colon +0_2021.png,Colon +0_2022.png,Colon +0_2023.png,Colon +0_2024.png,Colon +0_2025.png,Colon +0_2026.png,Colon +0_2027.png,Colon +0_2028.png,Colon +0_2029.png,Colon +0_2030.png,Colon +0_2031.png,Colon +0_2032.png,Colon +0_2033.png,Colon +0_2034.png,Colon +0_2035.png,Colon +0_2036.png,Colon +0_2037.png,Colon +0_2038.png,Colon +0_2039.png,Colon +0_2040.png,Colon +0_2041.png,Colon +0_2042.png,Colon +0_2043.png,Colon +0_2044.png,Colon +0_2045.png,Colon +0_2046.png,Colon +0_2047.png,Colon +0_2048.png,Colon +0_2049.png,Colon +0_2050.png,Colon +0_2051.png,Colon +0_2052.png,Colon +0_2053.png,Colon +0_2054.png,Colon +0_2055.png,Colon +0_2056.png,Colon +0_2057.png,Colon +0_2058.png,Colon +0_2059.png,Colon +0_2060.png,Esophagus +0_2061.png,Esophagus +0_2062.png,Esophagus +0_2063.png,Esophagus +0_2064.png,Esophagus +0_2065.png,Esophagus +0_2066.png,Esophagus +0_2067.png,Esophagus +0_2068.png,Esophagus +0_2069.png,Esophagus +0_2070.png,Esophagus +0_2071.png,Esophagus +0_2072.png,Esophagus +0_2073.png,Esophagus +0_2074.png,Esophagus +0_2075.png,Esophagus +0_2076.png,Esophagus +0_2077.png,Esophagus +0_2078.png,Esophagus +0_2079.png,Esophagus +0_2080.png,Esophagus +0_2081.png,Esophagus +0_2082.png,Esophagus +0_2083.png,Esophagus +0_2084.png,Esophagus +0_2085.png,Esophagus +0_2086.png,Esophagus +0_2087.png,Esophagus +0_2088.png,Esophagus +0_2089.png,Esophagus +0_2090.png,Esophagus +0_2091.png,Esophagus +0_2092.png,Esophagus +0_2093.png,Esophagus +0_2094.png,Esophagus +0_2095.png,Esophagus +0_2096.png,Esophagus +0_2097.png,Esophagus +0_2098.png,HeadNeck +0_2099.png,HeadNeck +0_2100.png,HeadNeck +0_2101.png,HeadNeck +0_2102.png,HeadNeck +0_2103.png,HeadNeck +0_2104.png,HeadNeck +0_2105.png,HeadNeck +0_2106.png,HeadNeck +0_2107.png,HeadNeck +0_2108.png,HeadNeck +0_2109.png,HeadNeck +0_2110.png,HeadNeck +0_2111.png,HeadNeck +0_2112.png,HeadNeck +0_2113.png,HeadNeck +0_2114.png,HeadNeck +0_2115.png,HeadNeck +0_2116.png,HeadNeck +0_2117.png,HeadNeck +0_2118.png,HeadNeck +0_2119.png,HeadNeck +0_2120.png,HeadNeck +0_2121.png,HeadNeck +0_2122.png,HeadNeck +0_2123.png,HeadNeck +0_2124.png,HeadNeck +0_2125.png,HeadNeck +0_2126.png,HeadNeck +0_2127.png,HeadNeck +0_2128.png,HeadNeck +0_2129.png,HeadNeck +0_2130.png,HeadNeck +0_2131.png,HeadNeck +0_2132.png,HeadNeck +0_2133.png,HeadNeck +0_2134.png,HeadNeck +0_2135.png,HeadNeck +0_2136.png,HeadNeck +0_2137.png,HeadNeck +0_2138.png,HeadNeck +0_2139.png,HeadNeck +0_2140.png,HeadNeck +0_2141.png,HeadNeck +0_2142.png,HeadNeck +0_2143.png,HeadNeck +0_2144.png,HeadNeck +0_2145.png,HeadNeck +0_2146.png,HeadNeck +0_2147.png,HeadNeck +0_2148.png,HeadNeck +0_2149.png,HeadNeck +0_2150.png,HeadNeck +0_2151.png,HeadNeck +0_2152.png,HeadNeck +0_2153.png,HeadNeck +0_2154.png,HeadNeck +0_2155.png,HeadNeck +0_2156.png,HeadNeck +0_2157.png,HeadNeck +0_2158.png,HeadNeck +0_2159.png,HeadNeck +0_2160.png,HeadNeck +0_2161.png,HeadNeck +0_2162.png,HeadNeck +0_2163.png,HeadNeck +0_2164.png,HeadNeck +0_2165.png,HeadNeck +0_2166.png,HeadNeck +0_2167.png,HeadNeck +0_2168.png,Kidney +0_2169.png,Kidney +0_2170.png,Kidney +0_2171.png,Kidney +0_2172.png,Kidney +0_2173.png,Kidney +0_2174.png,Kidney +0_2175.png,Kidney +0_2176.png,Kidney +0_2177.png,Kidney +0_2178.png,Kidney +0_2179.png,Kidney +0_2180.png,Kidney +0_2181.png,Kidney +0_2182.png,Kidney +0_2183.png,Kidney +0_2184.png,Kidney +0_2185.png,Kidney +0_2186.png,Kidney +0_2187.png,Kidney +0_2188.png,Kidney +0_2189.png,Liver +0_2190.png,Liver +0_2191.png,Liver +0_2192.png,Liver +0_2193.png,Liver +0_2194.png,Liver +0_2195.png,Liver +0_2196.png,Liver +0_2197.png,Liver +0_2198.png,Liver +0_2199.png,Liver +0_2200.png,Liver +0_2201.png,Liver +0_2202.png,Liver +0_2203.png,Liver +0_2204.png,Liver +0_2205.png,Liver +0_2206.png,Liver +0_2207.png,Liver +0_2208.png,Liver +0_2209.png,Liver +0_2210.png,Liver +0_2211.png,Liver +0_2212.png,Liver +0_2213.png,Liver +0_2214.png,Liver +0_2215.png,Liver +0_2216.png,Liver +0_2217.png,Liver +0_2218.png,Liver +0_2219.png,Liver +0_2220.png,Liver +0_2221.png,Liver +0_2222.png,Liver +0_2223.png,Liver +0_2224.png,Liver +0_2225.png,Liver +0_2226.png,Liver +0_2227.png,Liver +0_2228.png,Liver +0_2229.png,Liver +0_2230.png,Liver +0_2231.png,Liver +0_2232.png,Liver +0_2233.png,Liver +0_2234.png,Liver +0_2235.png,Liver +0_2236.png,Liver +0_2237.png,Liver +0_2238.png,Liver +0_2239.png,Liver +0_2240.png,Liver +0_2241.png,Liver +0_2242.png,Liver +0_2243.png,Liver +0_2244.png,Liver +0_2245.png,Liver +0_2246.png,Liver +0_2247.png,Liver +0_2248.png,Liver +0_2249.png,Liver +0_2250.png,Liver +0_2251.png,Liver +0_2252.png,Liver +0_2253.png,Liver +0_2254.png,Lung +0_2255.png,Lung +0_2256.png,Lung +0_2257.png,Lung +0_2258.png,Lung +0_2259.png,Lung +0_2260.png,Lung +0_2261.png,Lung +0_2262.png,Lung +0_2263.png,Lung +0_2264.png,Lung +0_2265.png,Lung +0_2266.png,Lung +0_2267.png,Lung +0_2268.png,Ovarian +0_2269.png,Ovarian +0_2270.png,Ovarian +0_2271.png,Ovarian +0_2272.png,Ovarian +0_2273.png,Ovarian +0_2274.png,Ovarian +0_2275.png,Ovarian +0_2276.png,Ovarian +0_2277.png,Ovarian +0_2278.png,Ovarian +0_2279.png,Ovarian +0_2280.png,Ovarian +0_2281.png,Ovarian +0_2282.png,Ovarian +0_2283.png,Ovarian +0_2284.png,Ovarian +0_2285.png,Ovarian +0_2286.png,Ovarian +0_2287.png,Ovarian +0_2288.png,Ovarian +0_2289.png,Ovarian +0_2290.png,Ovarian +0_2291.png,Ovarian +0_2292.png,Ovarian +0_2293.png,Ovarian +0_2294.png,Ovarian +0_2295.png,Ovarian +0_2296.png,Ovarian +0_2297.png,Ovarian +0_2298.png,Ovarian +0_2299.png,Ovarian +0_2300.png,Ovarian +0_2301.png,Ovarian +0_2302.png,Ovarian +0_2303.png,Ovarian +0_2304.png,Ovarian +0_2305.png,Ovarian +0_2306.png,Ovarian +0_2307.png,Ovarian +0_2308.png,Ovarian +0_2309.png,Ovarian +0_2310.png,Ovarian +0_2311.png,Ovarian +0_2312.png,Ovarian +0_2313.png,Pancreatic +0_2314.png,Pancreatic +0_2315.png,Pancreatic +0_2316.png,Pancreatic +0_2317.png,Pancreatic +0_2318.png,Pancreatic +0_2319.png,Pancreatic +0_2320.png,Pancreatic +0_2321.png,Pancreatic +0_2322.png,Pancreatic +0_2323.png,Pancreatic +0_2324.png,Pancreatic +0_2325.png,Pancreatic +0_2326.png,Pancreatic +0_2327.png,Pancreatic +0_2328.png,Pancreatic +0_2329.png,Pancreatic +0_2330.png,Pancreatic +0_2331.png,Pancreatic +0_2332.png,Pancreatic +0_2333.png,Pancreatic +0_2334.png,Pancreatic +0_2335.png,Pancreatic +0_2336.png,Pancreatic +0_2337.png,Pancreatic +0_2338.png,Pancreatic +0_2339.png,Pancreatic +0_2340.png,Pancreatic +0_2341.png,Pancreatic +0_2342.png,Pancreatic +0_2343.png,Pancreatic +0_2344.png,Pancreatic +0_2345.png,Pancreatic +0_2346.png,Pancreatic +0_2347.png,Pancreatic +0_2348.png,Pancreatic +0_2349.png,Pancreatic +0_2350.png,Pancreatic +0_2351.png,Pancreatic +0_2352.png,Pancreatic +0_2353.png,Pancreatic +0_2354.png,Pancreatic +0_2355.png,Pancreatic +0_2356.png,Pancreatic +0_2357.png,Pancreatic +0_2358.png,Pancreatic +0_2359.png,Pancreatic +0_2360.png,Prostate +0_2361.png,Prostate +0_2362.png,Prostate +0_2363.png,Prostate +0_2364.png,Prostate +0_2365.png,Prostate +0_2366.png,Prostate +0_2367.png,Prostate +0_2368.png,Prostate +0_2369.png,Prostate +0_2370.png,Prostate +0_2371.png,Prostate +0_2372.png,Prostate +0_2373.png,Prostate +0_2374.png,Prostate +0_2375.png,Prostate +0_2376.png,Prostate +0_2377.png,Prostate +0_2378.png,Prostate +0_2379.png,Prostate +0_2380.png,Prostate +0_2381.png,Prostate +0_2382.png,Prostate +0_2383.png,Prostate +0_2384.png,Prostate +0_2385.png,Prostate +0_2386.png,Prostate +0_2387.png,Prostate +0_2388.png,Prostate +0_2389.png,Prostate +0_2390.png,Prostate +0_2391.png,Prostate +0_2392.png,Prostate +0_2393.png,Prostate +0_2394.png,Prostate +0_2395.png,Prostate +0_2396.png,Prostate +0_2397.png,Prostate +0_2398.png,Prostate +0_2399.png,Prostate +0_2400.png,Prostate +0_2401.png,Prostate +0_2402.png,Prostate +0_2403.png,Prostate +0_2404.png,Prostate +0_2405.png,Prostate +0_2406.png,Prostate +0_2407.png,Prostate +0_2408.png,Prostate +0_2409.png,Prostate +0_2410.png,Prostate +0_2411.png,Prostate +0_2412.png,Prostate +0_2413.png,Skin +0_2414.png,Skin +0_2415.png,Skin +0_2416.png,Skin +0_2417.png,Skin +0_2418.png,Skin +0_2419.png,Skin +0_2420.png,Skin +0_2421.png,Skin +0_2422.png,Skin +0_2423.png,Skin +0_2424.png,Skin +0_2425.png,Skin +0_2426.png,Skin +0_2427.png,Skin +0_2428.png,Skin +0_2429.png,Skin +0_2430.png,Skin +0_2431.png,Skin +0_2432.png,Skin +0_2433.png,Skin +0_2434.png,Skin +0_2435.png,Skin +0_2436.png,Skin +0_2437.png,Skin +0_2438.png,Skin +0_2439.png,Skin +0_2440.png,Skin +0_2441.png,Skin +0_2442.png,Skin +0_2443.png,Skin +0_2444.png,Skin +0_2445.png,Skin +0_2446.png,Skin +0_2447.png,Skin +0_2448.png,Skin +0_2449.png,Skin +0_2450.png,Skin +0_2451.png,Skin +0_2452.png,Skin +0_2453.png,Skin +0_2454.png,Skin +0_2455.png,Skin +0_2456.png,Skin +0_2457.png,Skin +0_2458.png,Skin +0_2459.png,Skin +0_2460.png,Skin +0_2461.png,Skin +0_2462.png,Skin +0_2463.png,Skin +0_2464.png,Skin +0_2465.png,Skin +0_2466.png,Stomach +0_2467.png,Stomach +0_2468.png,Stomach +0_2469.png,Stomach +0_2470.png,Stomach +0_2471.png,Stomach +0_2472.png,Stomach +0_2473.png,Stomach +0_2474.png,Stomach +0_2475.png,Stomach +0_2476.png,Stomach +0_2477.png,Stomach +0_2478.png,Stomach +0_2479.png,Stomach +0_2480.png,Stomach +0_2481.png,Stomach +0_2482.png,Stomach +0_2483.png,Stomach +0_2484.png,Stomach +0_2485.png,Stomach +0_2486.png,Stomach +0_2487.png,Stomach +0_2488.png,Stomach +0_2489.png,Stomach +0_2490.png,Stomach +0_2491.png,Stomach +0_2492.png,Stomach +0_2493.png,Stomach +0_2494.png,Stomach +0_2495.png,Stomach +0_2496.png,Stomach +0_2497.png,Stomach +0_2498.png,Stomach +0_2499.png,Stomach +0_2500.png,Stomach +0_2501.png,Stomach +0_2502.png,Stomach +0_2503.png,Stomach +0_2504.png,Stomach +0_2505.png,Stomach +0_2506.png,Stomach +0_2507.png,Stomach +0_2508.png,Stomach +0_2509.png,Stomach +0_2510.png,Stomach +0_2511.png,Stomach +0_2512.png,Testis +0_2513.png,Testis +0_2514.png,Testis +0_2515.png,Testis +0_2516.png,Testis +0_2517.png,Testis +0_2518.png,Testis +0_2519.png,Testis +0_2520.png,Testis +0_2521.png,Testis +0_2522.png,Testis +0_2523.png,Testis +0_2524.png,Testis +0_2525.png,Testis +0_2526.png,Testis +0_2527.png,Testis +0_2528.png,Testis +0_2529.png,Testis +0_2530.png,Testis +0_2531.png,Testis +0_2532.png,Testis +0_2533.png,Testis +0_2534.png,Testis +0_2535.png,Testis +0_2536.png,Testis +0_2537.png,Thyroid +0_2538.png,Thyroid +0_2539.png,Thyroid +0_2540.png,Thyroid +0_2541.png,Thyroid +0_2542.png,Thyroid +0_2543.png,Thyroid +0_2544.png,Thyroid +0_2545.png,Thyroid +0_2546.png,Thyroid +0_2547.png,Thyroid +0_2548.png,Thyroid +0_2549.png,Thyroid +0_2550.png,Thyroid +0_2551.png,Thyroid +0_2552.png,Thyroid +0_2553.png,Thyroid +0_2554.png,Thyroid +0_2555.png,Thyroid +0_2556.png,Thyroid +0_2557.png,Thyroid +0_2558.png,Thyroid +0_2559.png,Thyroid +0_2560.png,Thyroid +0_2561.png,Thyroid +0_2562.png,Thyroid +0_2563.png,Thyroid +0_2564.png,Thyroid +0_2565.png,Thyroid +0_2566.png,Thyroid +0_2567.png,Thyroid +0_2568.png,Thyroid +0_2569.png,Thyroid +0_2570.png,Thyroid +0_2571.png,Thyroid +0_2572.png,Thyroid +0_2573.png,Thyroid +0_2574.png,Thyroid +0_2575.png,Thyroid +0_2576.png,Thyroid +0_2577.png,Thyroid +0_2578.png,Uterus +0_2579.png,Uterus +0_2580.png,Uterus +0_2581.png,Uterus +0_2582.png,Uterus +0_2583.png,Uterus +0_2584.png,Uterus +0_2585.png,Uterus +0_2586.png,Uterus +0_2587.png,Uterus +0_2588.png,Uterus +0_2589.png,Uterus +0_2590.png,Uterus +0_2591.png,Uterus +0_2592.png,Colon +0_2593.png,Colon +0_2594.png,Colon +0_2595.png,Colon +0_2596.png,Colon +0_2597.png,Colon +0_2598.png,Colon +0_2599.png,Colon +0_2600.png,Colon +0_2601.png,Colon +0_2602.png,Colon +0_2603.png,Colon +0_2604.png,Colon +0_2605.png,Colon +0_2606.png,Colon +0_2607.png,Colon +0_2608.png,Colon +0_2609.png,Colon +0_2610.png,Colon +0_2611.png,Colon +0_2612.png,Colon +0_2613.png,Colon +0_2614.png,Colon +0_2615.png,Colon +0_2616.png,Colon +0_2617.png,Colon +0_2618.png,Colon +0_2619.png,Colon +0_2620.png,Colon +0_2621.png,Colon +0_2622.png,Colon +0_2623.png,Colon +0_2624.png,Colon +0_2625.png,Colon +0_2626.png,Colon +0_2627.png,Colon +0_2628.png,Colon +0_2629.png,Colon +0_2630.png,Colon +0_2631.png,Colon +0_2632.png,Colon +0_2633.png,Colon +0_2634.png,Colon +0_2635.png,Colon +0_2636.png,Colon +0_2637.png,Colon +0_2638.png,Colon +0_2639.png,Colon +0_2640.png,Colon +0_2641.png,Colon +0_2642.png,Colon +0_2643.png,Colon +0_2644.png,Colon +0_2645.png,Colon +0_2646.png,Colon +0_2647.png,Colon +0_2648.png,Colon +0_2649.png,Colon +0_2650.png,Colon +0_2651.png,Colon +0_2652.png,Colon +0_2653.png,Colon +0_2654.png,Colon +0_2655.png,Colon diff --git a/docs/datasets/PanNuke/fold1/cell_count.csv b/docs/datasets/PanNuke/fold1/cell_count.csv new file mode 100644 index 0000000000000000000000000000000000000000..422167a6a33cef4855392b3588834f95304a2c6b --- /dev/null +++ b/docs/datasets/PanNuke/fold1/cell_count.csv @@ -0,0 +1,2524 @@ +Image,Neoplastic,Inflammatory,Connective,Dead,Epithelial +1_0.png,6,0,3,0,0 +1_1.png,1,0,9,0,0 +1_10.png,7,1,3,0,0 +1_100.png,0,1,5,0,23 +1_1000.png,0,0,23,0,0 +1_1001.png,0,0,13,0,0 +1_1002.png,0,0,7,0,0 +1_1003.png,0,0,10,0,0 +1_1004.png,0,0,8,0,0 +1_1005.png,0,0,10,0,0 +1_1006.png,0,0,13,0,0 +1_1007.png,34,0,8,0,0 +1_1008.png,36,0,8,0,0 +1_1009.png,32,0,2,2,0 +1_101.png,1,0,6,0,17 +1_1010.png,34,0,1,4,0 +1_1011.png,33,0,2,0,0 +1_1012.png,33,0,1,0,0 +1_1013.png,35,0,5,0,0 +1_1014.png,22,0,8,0,0 +1_1015.png,10,0,27,0,0 +1_1016.png,23,1,10,0,0 +1_1017.png,4,1,42,0,0 +1_1018.png,4,1,24,0,0 +1_1019.png,29,0,1,0,0 +1_102.png,2,1,6,0,0 +1_1020.png,0,0,35,0,0 +1_1021.png,0,0,28,0,0 +1_1022.png,6,1,30,0,0 +1_1023.png,0,5,0,0,0 +1_1024.png,0,2,0,0,0 +1_1025.png,0,3,2,0,0 +1_1026.png,0,5,0,0,0 +1_1027.png,0,0,1,0,0 +1_1028.png,0,0,0,0,0 +1_1029.png,0,0,0,0,0 +1_103.png,0,1,3,0,41 +1_1030.png,0,7,1,0,0 +1_1031.png,17,0,1,0,0 +1_1032.png,18,0,6,0,0 +1_1033.png,20,0,4,0,0 +1_1034.png,12,0,5,0,0 +1_1035.png,26,1,2,0,0 +1_1036.png,20,0,6,0,0 +1_1037.png,20,1,2,0,0 +1_1038.png,19,1,9,0,0 +1_1039.png,13,0,4,0,0 +1_104.png,0,0,6,0,32 +1_1040.png,13,0,3,0,0 +1_1041.png,17,0,2,0,0 +1_1042.png,17,0,0,0,0 +1_1043.png,20,0,1,0,0 +1_1044.png,22,0,0,0,0 +1_1045.png,8,0,5,0,0 +1_1046.png,0,0,4,0,0 +1_1047.png,6,1,4,0,0 +1_1048.png,11,0,0,0,0 +1_1049.png,11,0,1,0,0 +1_105.png,31,0,1,0,0 +1_1050.png,8,1,7,0,0 +1_1051.png,14,0,4,0,0 +1_1052.png,14,0,4,0,0 +1_1053.png,15,0,0,0,0 +1_1054.png,10,0,3,0,0 +1_1055.png,9,0,1,0,0 +1_1056.png,6,1,1,0,0 +1_1057.png,14,0,1,0,0 +1_1058.png,9,0,3,0,0 +1_1059.png,12,0,1,0,0 +1_106.png,24,0,9,0,0 +1_1060.png,6,1,2,0,0 +1_1061.png,13,0,1,0,0 +1_1062.png,18,0,0,0,0 +1_1063.png,10,0,3,0,0 +1_1064.png,10,0,3,0,0 +1_1065.png,8,0,2,0,0 +1_1066.png,9,0,2,0,0 +1_1067.png,5,1,1,0,0 +1_1068.png,10,1,2,0,0 +1_1069.png,13,0,0,0,0 +1_107.png,43,0,1,0,0 +1_1070.png,10,0,1,0,0 +1_1071.png,17,0,1,0,0 +1_1072.png,8,0,1,0,0 +1_1073.png,8,0,0,0,0 +1_1074.png,15,0,0,0,0 +1_1075.png,11,0,0,0,0 +1_1076.png,11,0,1,0,0 +1_1077.png,12,0,0,0,0 +1_1078.png,13,0,0,0,0 +1_1079.png,10,0,0,0,0 +1_108.png,38,1,0,0,0 +1_1080.png,8,0,2,0,0 +1_1081.png,14,0,0,0,0 +1_1082.png,11,0,0,0,0 +1_1083.png,10,0,4,0,0 +1_1084.png,10,1,4,0,0 +1_1085.png,8,1,8,0,0 +1_1086.png,2,1,15,0,0 +1_1087.png,7,0,6,0,0 +1_1088.png,0,5,5,0,12 +1_1089.png,0,0,0,0,4 +1_109.png,0,67,2,0,0 +1_1090.png,0,3,8,0,13 +1_1091.png,0,8,4,0,14 +1_1092.png,0,0,3,0,18 +1_1093.png,24,0,0,0,0 +1_1094.png,23,0,1,0,0 +1_1095.png,5,2,5,0,0 +1_1096.png,6,0,7,0,0 +1_1097.png,12,0,5,0,0 +1_1098.png,13,0,8,0,0 +1_1099.png,20,0,5,0,0 +1_11.png,6,1,4,0,0 +1_110.png,3,35,14,0,0 +1_1100.png,15,1,3,0,0 +1_1101.png,6,0,7,0,0 +1_1102.png,2,0,11,0,0 +1_1103.png,4,0,10,0,0 +1_1104.png,1,0,5,0,0 +1_1105.png,31,0,0,0,0 +1_1106.png,0,0,9,0,0 +1_1107.png,6,0,5,0,0 +1_1108.png,0,1,3,0,8 +1_1109.png,0,0,1,0,16 +1_111.png,9,25,10,0,0 +1_1110.png,0,2,3,0,23 +1_1111.png,0,0,3,0,20 +1_1112.png,0,0,4,0,19 +1_1113.png,0,1,3,0,16 +1_1114.png,0,1,2,0,18 +1_1115.png,0,1,4,0,11 +1_1116.png,0,0,1,0,35 +1_1117.png,0,1,8,0,16 +1_1118.png,0,1,2,0,21 +1_1119.png,0,2,11,0,20 +1_112.png,3,37,10,0,0 +1_1120.png,0,2,4,0,36 +1_1121.png,0,1,14,0,12 +1_1122.png,0,3,4,0,22 +1_1123.png,0,3,0,0,29 +1_1124.png,0,5,5,0,29 +1_1125.png,0,7,32,0,0 +1_1126.png,0,12,14,0,32 +1_1127.png,0,0,13,0,0 +1_1128.png,0,0,7,0,0 +1_1129.png,0,0,19,0,0 +1_113.png,0,1,11,0,27 +1_1130.png,0,0,14,0,0 +1_1131.png,0,2,20,0,0 +1_1132.png,0,0,11,0,0 +1_1133.png,0,2,15,0,0 +1_1134.png,4,0,4,0,0 +1_1135.png,0,0,14,0,0 +1_1136.png,7,0,6,0,0 +1_1137.png,0,0,15,0,0 +1_1138.png,3,1,12,0,0 +1_1139.png,17,0,7,0,0 +1_114.png,0,0,0,0,21 +1_1140.png,9,0,8,0,0 +1_1141.png,1,0,21,0,0 +1_1142.png,0,1,19,0,0 +1_1143.png,25,0,2,0,0 +1_1144.png,1,0,15,0,0 +1_1145.png,0,0,19,0,0 +1_1146.png,22,0,6,0,0 +1_1147.png,2,2,8,0,0 +1_1148.png,0,1,11,0,0 +1_1149.png,0,3,14,0,0 +1_115.png,0,0,6,0,25 +1_1150.png,12,0,2,0,0 +1_1151.png,3,1,9,0,0 +1_1152.png,1,1,14,0,0 +1_1153.png,1,0,16,0,0 +1_1154.png,7,4,0,0,0 +1_1155.png,6,1,7,0,0 +1_1156.png,10,1,9,0,0 +1_1157.png,3,1,3,0,0 +1_1158.png,17,0,0,0,0 +1_1159.png,0,1,15,0,0 +1_116.png,0,0,5,0,34 +1_1160.png,8,0,5,0,0 +1_1161.png,10,1,8,0,0 +1_1162.png,16,0,0,0,0 +1_1163.png,5,1,5,0,0 +1_1164.png,18,0,0,0,0 +1_1165.png,15,1,2,0,0 +1_1166.png,14,1,8,0,0 +1_1167.png,18,1,2,0,0 +1_1168.png,6,2,9,0,0 +1_1169.png,19,0,4,0,0 +1_117.png,0,1,1,0,35 +1_1170.png,16,0,2,0,0 +1_1171.png,18,2,2,0,0 +1_1172.png,15,3,0,0,0 +1_1173.png,27,1,0,0,0 +1_1174.png,16,5,2,0,0 +1_1175.png,13,4,0,0,0 +1_1176.png,17,13,0,0,0 +1_1177.png,17,13,0,0,0 +1_1178.png,21,2,0,0,0 +1_1179.png,15,6,5,0,0 +1_118.png,0,0,0,0,36 +1_1180.png,12,8,3,0,0 +1_1181.png,16,4,1,0,0 +1_1182.png,22,1,0,0,0 +1_1183.png,16,3,2,1,0 +1_1184.png,49,2,1,0,0 +1_1185.png,55,0,1,0,0 +1_1186.png,27,0,0,5,0 +1_1187.png,48,0,0,0,0 +1_1188.png,41,13,3,0,0 +1_1189.png,45,0,0,0,0 +1_119.png,0,2,8,0,19 +1_1190.png,0,12,4,0,0 +1_1191.png,0,10,2,0,0 +1_1192.png,0,17,6,0,0 +1_1193.png,0,12,7,0,0 +1_1194.png,5,0,11,0,0 +1_1195.png,9,0,9,0,0 +1_1196.png,16,4,0,0,0 +1_1197.png,9,1,4,0,0 +1_1198.png,12,4,4,0,0 +1_1199.png,10,1,4,0,0 +1_12.png,5,2,3,0,0 +1_120.png,0,0,3,0,28 +1_1200.png,10,0,11,0,0 +1_1201.png,30,5,4,0,0 +1_1202.png,9,0,0,0,0 +1_1203.png,11,0,0,0,0 +1_1204.png,15,0,0,0,0 +1_1205.png,14,0,0,0,0 +1_1206.png,23,0,1,0,0 +1_1207.png,20,0,0,0,0 +1_1208.png,20,0,0,0,0 +1_1209.png,18,0,0,0,0 +1_121.png,0,2,14,0,20 +1_1210.png,14,0,2,0,0 +1_1211.png,20,0,3,0,0 +1_1212.png,20,0,1,0,0 +1_1213.png,11,1,3,0,0 +1_1214.png,19,0,0,0,0 +1_1215.png,19,0,0,0,0 +1_1216.png,27,0,0,0,0 +1_1217.png,22,1,0,0,0 +1_1218.png,2,2,13,0,0 +1_1219.png,17,0,0,0,0 +1_122.png,0,2,3,0,37 +1_1220.png,13,0,0,0,0 +1_1221.png,20,2,5,0,0 +1_1222.png,21,0,1,0,0 +1_1223.png,19,3,5,0,0 +1_1224.png,11,9,3,0,0 +1_1225.png,17,3,5,0,0 +1_1226.png,15,0,9,0,0 +1_1227.png,7,0,17,0,0 +1_1228.png,5,2,13,0,0 +1_1229.png,14,3,6,0,0 +1_123.png,0,2,3,0,23 +1_1230.png,16,5,11,0,0 +1_1231.png,0,0,12,0,0 +1_1232.png,3,4,13,0,0 +1_1233.png,0,11,12,0,0 +1_1234.png,0,3,15,0,0 +1_1235.png,1,10,10,0,0 +1_1236.png,0,13,12,0,0 +1_1237.png,0,1,6,0,25 +1_1238.png,0,0,4,0,27 +1_1239.png,0,3,13,0,19 +1_124.png,1,1,6,0,0 +1_1240.png,0,4,7,0,17 +1_1241.png,0,0,0,0,0 +1_1242.png,0,0,8,0,20 +1_1243.png,0,0,0,0,3 +1_1244.png,0,0,0,0,9 +1_1245.png,0,0,2,0,1 +1_1246.png,0,1,2,0,23 +1_1247.png,0,1,7,0,24 +1_1248.png,0,0,0,0,0 +1_1249.png,0,0,0,0,0 +1_125.png,22,0,2,0,0 +1_1250.png,0,0,0,0,1 +1_1251.png,0,4,23,0,13 +1_1252.png,0,9,14,0,14 +1_1253.png,0,10,15,0,0 +1_1254.png,0,0,11,0,12 +1_1255.png,0,0,0,0,2 +1_1256.png,0,0,5,0,5 +1_1257.png,0,1,9,0,24 +1_1258.png,0,17,24,0,0 +1_1259.png,1,24,13,0,0 +1_126.png,21,0,1,0,0 +1_1260.png,0,16,28,0,0 +1_1261.png,9,18,18,0,0 +1_1262.png,0,4,10,0,0 +1_1263.png,0,0,22,0,16 +1_1264.png,0,0,20,0,3 +1_1265.png,24,0,1,4,0 +1_1266.png,19,0,5,2,0 +1_1267.png,0,0,6,0,0 +1_1268.png,14,0,9,0,0 +1_1269.png,29,0,7,0,0 +1_127.png,8,0,4,0,0 +1_1270.png,8,0,4,0,0 +1_1271.png,2,0,14,0,0 +1_1272.png,0,0,10,0,0 +1_1273.png,1,0,8,0,0 +1_1274.png,1,0,13,0,0 +1_1275.png,0,0,10,0,0 +1_1276.png,12,0,8,0,0 +1_1277.png,0,0,6,0,0 +1_1278.png,0,0,5,0,0 +1_1279.png,0,0,2,0,0 +1_128.png,3,0,2,0,0 +1_1280.png,0,0,27,0,0 +1_1281.png,0,8,18,0,0 +1_1282.png,11,7,15,0,0 +1_1283.png,15,4,12,0,0 +1_1284.png,7,18,13,0,0 +1_1285.png,0,30,13,0,0 +1_1286.png,42,1,0,0,0 +1_1287.png,3,29,9,0,0 +1_1288.png,6,2,18,0,0 +1_1289.png,15,0,3,0,0 +1_129.png,0,1,7,0,19 +1_1290.png,41,2,4,0,0 +1_1291.png,19,22,5,0,0 +1_1292.png,0,42,14,0,0 +1_1293.png,0,24,14,0,0 +1_1294.png,8,20,3,0,0 +1_1295.png,0,7,11,0,29 +1_1296.png,0,22,16,0,33 +1_1297.png,81,1,0,0,0 +1_1298.png,44,0,0,20,0 +1_1299.png,46,4,12,1,0 +1_13.png,12,3,1,0,0 +1_130.png,0,1,2,0,36 +1_1300.png,0,0,0,0,10 +1_1301.png,0,3,9,0,27 +1_1302.png,0,49,61,0,0 +1_1303.png,0,1,6,0,0 +1_1304.png,0,62,70,0,0 +1_1305.png,0,6,26,0,0 +1_1306.png,78,1,0,0,0 +1_1307.png,56,1,20,0,0 +1_1308.png,46,4,28,0,0 +1_1309.png,0,2,53,0,0 +1_131.png,0,1,5,0,36 +1_1310.png,0,4,38,0,0 +1_1311.png,0,7,24,0,0 +1_1312.png,24,0,0,1,0 +1_1313.png,30,2,7,0,0 +1_1314.png,44,0,0,0,0 +1_1315.png,43,0,0,0,0 +1_1316.png,26,0,0,1,0 +1_1317.png,43,1,0,0,0 +1_1318.png,18,1,3,0,0 +1_1319.png,12,11,35,0,0 +1_132.png,0,1,1,0,45 +1_1320.png,0,7,18,0,0 +1_1321.png,0,6,14,0,0 +1_1322.png,0,10,18,0,0 +1_1323.png,0,8,35,0,0 +1_1324.png,0,1,6,0,0 +1_1325.png,0,0,12,0,0 +1_1326.png,103,0,0,0,0 +1_1327.png,47,0,0,0,0 +1_1328.png,12,0,0,0,0 +1_1329.png,0,8,11,0,0 +1_133.png,8,3,3,0,0 +1_1330.png,0,3,25,0,0 +1_1331.png,0,2,34,0,0 +1_1332.png,26,0,4,0,0 +1_1333.png,32,0,1,0,0 +1_1334.png,25,1,0,0,0 +1_1335.png,37,0,0,0,0 +1_1336.png,25,0,4,0,0 +1_1337.png,0,3,16,0,0 +1_1338.png,0,1,9,0,0 +1_1339.png,0,0,14,0,0 +1_134.png,12,6,2,0,0 +1_1340.png,0,1,0,0,0 +1_1341.png,0,0,2,0,0 +1_1342.png,8,0,0,0,0 +1_1343.png,25,0,0,0,0 +1_1344.png,6,0,3,0,0 +1_1345.png,12,0,2,0,0 +1_1346.png,20,0,0,0,0 +1_1347.png,31,0,0,0,0 +1_1348.png,33,0,0,0,0 +1_1349.png,17,0,0,0,0 +1_135.png,0,0,11,0,17 +1_1350.png,23,0,0,0,0 +1_1351.png,15,0,3,0,0 +1_1352.png,0,0,0,0,8 +1_1353.png,0,0,0,0,6 +1_1354.png,0,0,2,0,8 +1_1355.png,0,0,1,0,1 +1_1356.png,0,0,5,0,0 +1_1357.png,0,0,6,0,0 +1_1358.png,0,0,3,0,0 +1_1359.png,0,0,15,0,0 +1_136.png,0,2,5,0,1 +1_1360.png,0,0,5,0,0 +1_1361.png,0,0,4,0,0 +1_1362.png,0,0,7,0,0 +1_1363.png,0,0,7,0,0 +1_1364.png,11,0,0,0,0 +1_1365.png,6,0,1,0,0 +1_1366.png,4,0,10,0,0 +1_1367.png,10,0,2,0,0 +1_1368.png,9,0,0,0,0 +1_1369.png,0,0,2,0,0 +1_137.png,0,0,4,0,6 +1_1370.png,19,0,5,0,0 +1_1371.png,15,2,0,0,0 +1_1372.png,17,3,1,0,0 +1_1373.png,21,0,5,0,0 +1_1374.png,13,0,0,0,0 +1_1375.png,10,1,1,0,0 +1_1376.png,16,0,2,0,0 +1_1377.png,10,0,0,0,0 +1_1378.png,9,3,3,0,0 +1_1379.png,10,1,1,0,0 +1_138.png,0,1,9,0,1 +1_1380.png,11,0,1,0,0 +1_1381.png,11,0,6,0,0 +1_1382.png,14,2,1,0,0 +1_1383.png,14,0,4,0,0 +1_1384.png,11,0,7,0,0 +1_1385.png,9,0,10,0,0 +1_1386.png,14,2,9,0,0 +1_1387.png,16,1,10,0,0 +1_1388.png,6,0,1,0,0 +1_1389.png,0,4,1,0,0 +1_139.png,0,2,11,0,12 +1_1390.png,0,2,0,0,0 +1_1391.png,0,5,9,0,0 +1_1392.png,0,5,4,0,0 +1_1393.png,4,0,10,0,0 +1_1394.png,5,1,10,0,0 +1_1395.png,4,0,6,0,0 +1_1396.png,0,0,1,0,0 +1_1397.png,8,0,0,0,0 +1_1398.png,5,0,5,0,0 +1_1399.png,0,0,2,0,0 +1_14.png,10,0,0,0,0 +1_140.png,0,0,4,0,0 +1_1400.png,0,0,5,0,0 +1_1401.png,0,0,8,0,0 +1_1402.png,0,0,1,0,0 +1_1403.png,0,0,0,0,0 +1_1404.png,0,0,0,0,0 +1_1405.png,0,7,18,0,0 +1_1406.png,0,6,21,0,11 +1_1407.png,0,12,10,0,11 +1_1408.png,0,1,16,0,12 +1_1409.png,0,18,16,0,11 +1_141.png,2,0,5,0,0 +1_1410.png,0,7,13,0,0 +1_1411.png,0,9,18,0,0 +1_1412.png,0,4,17,0,0 +1_1413.png,1,2,12,0,0 +1_1414.png,1,1,7,0,0 +1_1415.png,2,5,10,0,0 +1_1416.png,7,2,6,0,0 +1_1417.png,3,0,16,0,0 +1_1418.png,0,1,23,0,0 +1_1419.png,0,0,6,0,0 +1_142.png,4,0,3,0,0 +1_1420.png,0,0,10,0,0 +1_1421.png,0,3,4,0,8 +1_1422.png,0,3,1,0,13 +1_1423.png,0,1,5,0,11 +1_1424.png,0,0,11,0,0 +1_1425.png,0,5,5,0,0 +1_1426.png,0,2,17,0,0 +1_1427.png,0,15,15,0,0 +1_1428.png,0,5,22,0,0 +1_1429.png,0,5,18,0,0 +1_143.png,2,1,1,0,0 +1_1430.png,15,0,9,0,0 +1_1431.png,16,2,14,0,0 +1_1432.png,21,1,15,0,0 +1_1433.png,22,1,8,0,0 +1_1434.png,0,0,0,0,0 +1_1435.png,0,0,0,0,0 +1_1436.png,0,0,0,0,0 +1_1437.png,18,9,15,0,0 +1_1438.png,20,6,9,0,0 +1_1439.png,26,1,0,0,0 +1_144.png,2,0,1,0,0 +1_1440.png,0,0,0,0,0 +1_1441.png,0,0,3,0,0 +1_1442.png,0,0,2,0,0 +1_1443.png,0,2,4,0,0 +1_1444.png,0,0,2,0,0 +1_1445.png,7,9,5,0,0 +1_1446.png,7,30,13,0,0 +1_1447.png,5,1,11,0,0 +1_1448.png,1,36,15,0,0 +1_1449.png,0,2,16,0,0 +1_145.png,2,2,3,0,0 +1_1450.png,0,2,7,0,11 +1_1451.png,0,6,10,0,0 +1_1452.png,0,2,8,0,12 +1_1453.png,8,4,10,0,0 +1_1454.png,19,1,4,0,0 +1_1455.png,15,0,10,0,0 +1_1456.png,22,2,2,0,0 +1_1457.png,20,0,3,0,0 +1_1458.png,7,1,23,0,0 +1_1459.png,20,1,2,0,0 +1_146.png,2,1,0,0,0 +1_1460.png,0,0,19,0,0 +1_1461.png,0,4,31,0,0 +1_1462.png,9,5,23,0,0 +1_1463.png,0,3,28,0,0 +1_1464.png,0,3,23,0,0 +1_1465.png,0,8,22,0,0 +1_1466.png,0,3,6,0,0 +1_1467.png,0,1,17,0,0 +1_1468.png,0,8,19,0,0 +1_1469.png,0,4,12,0,0 +1_147.png,0,0,7,0,35 +1_1470.png,0,0,0,0,0 +1_1471.png,0,0,0,0,0 +1_1472.png,0,0,0,0,0 +1_1473.png,0,0,0,0,0 +1_1474.png,0,0,0,0,0 +1_1475.png,0,0,0,0,0 +1_1476.png,0,0,0,0,0 +1_1477.png,0,0,0,0,0 +1_1478.png,0,0,0,0,0 +1_1479.png,0,2,12,0,0 +1_148.png,0,0,12,0,2 +1_1480.png,0,0,23,0,0 +1_1481.png,0,0,22,0,0 +1_1482.png,0,1,21,0,0 +1_1483.png,0,1,20,0,0 +1_1484.png,0,0,15,0,0 +1_1485.png,0,2,14,0,0 +1_1486.png,0,1,27,0,0 +1_1487.png,0,2,20,0,0 +1_1488.png,23,0,0,0,0 +1_1489.png,7,2,14,0,0 +1_149.png,0,0,9,0,4 +1_1490.png,1,2,29,0,0 +1_1491.png,0,6,29,0,0 +1_1492.png,0,4,38,0,0 +1_1493.png,30,0,0,0,0 +1_1494.png,10,3,29,0,0 +1_1495.png,34,1,0,0,0 +1_1496.png,30,0,0,0,0 +1_1497.png,26,0,2,0,0 +1_1498.png,17,0,10,0,0 +1_1499.png,30,0,0,0,0 +1_15.png,11,0,1,0,0 +1_150.png,0,4,7,0,18 +1_1500.png,23,0,8,0,0 +1_1501.png,16,5,17,0,0 +1_1502.png,27,1,4,0,0 +1_1503.png,28,0,0,0,0 +1_1504.png,0,0,3,0,0 +1_1505.png,0,0,2,0,0 +1_1506.png,0,0,7,0,0 +1_1507.png,0,9,18,0,0 +1_1508.png,11,4,4,0,0 +1_1509.png,14,2,1,0,0 +1_151.png,0,0,3,0,34 +1_1510.png,14,3,3,0,0 +1_1511.png,10,1,11,0,0 +1_1512.png,23,2,4,0,0 +1_1513.png,13,3,4,0,0 +1_1514.png,1,0,8,0,0 +1_1515.png,36,0,6,0,0 +1_1516.png,44,1,1,0,0 +1_1517.png,8,7,8,0,0 +1_1518.png,25,0,0,0,0 +1_1519.png,0,1,2,0,0 +1_152.png,0,0,2,0,50 +1_1520.png,0,1,0,0,0 +1_1521.png,0,0,1,0,0 +1_1522.png,0,1,0,0,0 +1_1523.png,0,2,3,0,0 +1_1524.png,0,0,0,0,0 +1_1525.png,0,1,3,0,0 +1_1526.png,0,0,0,0,0 +1_1527.png,15,1,0,0,0 +1_1528.png,14,0,1,0,0 +1_1529.png,21,0,0,0,0 +1_153.png,0,2,1,0,66 +1_1530.png,14,0,0,0,0 +1_1531.png,21,0,0,0,0 +1_1532.png,0,2,8,0,0 +1_1533.png,0,0,6,0,0 +1_1534.png,0,0,10,0,0 +1_1535.png,0,0,5,0,0 +1_1536.png,0,1,6,0,0 +1_1537.png,0,0,7,0,0 +1_1538.png,0,0,0,0,0 +1_1539.png,0,0,0,0,0 +1_154.png,0,3,3,0,0 +1_1540.png,0,0,0,0,0 +1_1541.png,0,0,0,0,0 +1_1542.png,0,0,0,0,0 +1_1543.png,0,0,0,0,0 +1_1544.png,0,0,0,0,0 +1_1545.png,0,0,0,0,0 +1_1546.png,0,0,0,0,0 +1_1547.png,82,0,1,0,0 +1_1548.png,80,0,4,0,0 +1_1549.png,88,3,3,0,0 +1_155.png,0,3,6,6,0 +1_1550.png,84,2,6,0,0 +1_1551.png,52,0,12,0,0 +1_1552.png,89,0,5,0,0 +1_1553.png,85,2,7,0,0 +1_1554.png,26,0,4,0,0 +1_1555.png,13,0,11,0,0 +1_1556.png,12,0,11,0,0 +1_1557.png,24,0,2,0,0 +1_1558.png,0,0,0,0,0 +1_1559.png,0,0,0,0,0 +1_156.png,1,1,3,0,0 +1_1560.png,0,0,0,0,0 +1_1561.png,24,4,0,0,0 +1_1562.png,18,0,0,0,0 +1_1563.png,32,0,0,0,0 +1_1564.png,0,0,6,0,0 +1_1565.png,0,2,12,0,0 +1_1566.png,2,3,10,0,0 +1_1567.png,9,4,14,0,0 +1_1568.png,25,1,1,0,0 +1_1569.png,25,9,0,0,0 +1_157.png,1,1,1,0,0 +1_1570.png,0,33,35,0,0 +1_1571.png,6,29,11,0,0 +1_1572.png,35,1,0,0,0 +1_1573.png,36,3,0,0,0 +1_1574.png,33,0,0,0,0 +1_1575.png,0,0,0,0,9 +1_1576.png,0,0,3,0,19 +1_1577.png,0,2,7,0,14 +1_1578.png,0,6,12,0,9 +1_1579.png,0,0,0,0,13 +1_158.png,0,0,8,0,0 +1_1580.png,0,6,7,0,14 +1_1581.png,0,3,4,0,8 +1_1582.png,0,0,0,0,6 +1_1583.png,0,3,7,0,6 +1_1584.png,0,2,5,0,10 +1_1585.png,0,6,6,0,9 +1_1586.png,0,5,10,0,10 +1_1587.png,0,10,10,0,10 +1_1588.png,0,6,9,0,11 +1_1589.png,1,1,3,0,20 +1_159.png,0,1,15,0,14 +1_1590.png,17,14,5,0,0 +1_1591.png,0,12,4,0,6 +1_1592.png,0,1,0,0,6 +1_1593.png,0,4,4,0,12 +1_1594.png,0,9,5,0,11 +1_1595.png,0,0,1,0,8 +1_1596.png,0,0,0,0,1 +1_1597.png,5,8,4,0,3 +1_1598.png,0,3,1,0,13 +1_1599.png,0,1,5,0,15 +1_16.png,3,3,4,0,0 +1_160.png,0,0,4,0,56 +1_1600.png,0,0,0,0,0 +1_1601.png,0,0,0,0,0 +1_1602.png,0,11,13,0,1 +1_1603.png,0,2,0,0,7 +1_1604.png,0,3,5,2,13 +1_1605.png,0,5,2,1,0 +1_1606.png,0,1,0,1,4 +1_1607.png,0,0,0,0,0 +1_1608.png,3,2,4,1,0 +1_1609.png,0,1,7,0,8 +1_161.png,0,5,17,0,23 +1_1610.png,0,1,6,0,8 +1_1611.png,0,0,10,0,4 +1_1612.png,0,0,5,0,1 +1_1613.png,0,0,4,0,7 +1_1614.png,0,0,0,0,8 +1_1615.png,0,1,6,0,7 +1_1616.png,0,0,3,0,10 +1_1617.png,0,0,1,0,7 +1_1618.png,0,0,9,0,4 +1_1619.png,0,0,10,0,5 +1_162.png,0,1,17,0,5 +1_1620.png,0,0,2,0,0 +1_1621.png,0,0,3,0,0 +1_1622.png,2,0,0,0,0 +1_1623.png,9,0,8,0,0 +1_1624.png,0,0,0,0,1 +1_1625.png,0,6,1,0,6 +1_1626.png,0,11,5,0,1 +1_1627.png,0,2,3,0,13 +1_1628.png,0,8,6,0,2 +1_1629.png,0,3,11,0,6 +1_163.png,0,0,16,0,18 +1_1630.png,0,4,10,0,10 +1_1631.png,0,0,0,0,6 +1_1632.png,0,9,9,0,3 +1_1633.png,0,1,2,0,15 +1_1634.png,0,0,1,0,14 +1_1635.png,0,0,1,0,27 +1_1636.png,0,1,2,0,16 +1_1637.png,0,7,16,0,4 +1_1638.png,0,1,0,0,13 +1_1639.png,0,0,1,0,21 +1_164.png,0,1,9,0,13 +1_1640.png,0,2,6,0,5 +1_1641.png,0,1,5,0,7 +1_1642.png,0,0,9,0,0 +1_1643.png,0,0,8,0,0 +1_1644.png,0,0,0,0,33 +1_1645.png,0,6,19,0,17 +1_1646.png,0,2,3,0,14 +1_1647.png,0,1,8,0,12 +1_1648.png,0,6,22,0,1 +1_1649.png,0,0,0,0,13 +1_165.png,0,3,10,0,12 +1_1650.png,0,0,0,0,4 +1_1651.png,0,0,0,0,0 +1_1652.png,0,0,0,0,13 +1_1653.png,0,0,0,0,0 +1_1654.png,0,1,4,0,17 +1_1655.png,0,3,24,0,3 +1_1656.png,0,10,25,0,0 +1_1657.png,0,0,0,0,0 +1_1658.png,0,0,0,0,3 +1_1659.png,1,2,6,0,21 +1_166.png,0,0,0,0,52 +1_1660.png,0,0,0,0,0 +1_1661.png,0,0,0,0,0 +1_1662.png,0,0,0,0,0 +1_1663.png,0,1,3,0,11 +1_1664.png,0,7,4,0,9 +1_1665.png,0,0,0,0,0 +1_1666.png,0,12,3,0,4 +1_1667.png,0,8,0,0,17 +1_1668.png,0,6,3,0,11 +1_1669.png,0,0,0,0,16 +1_167.png,0,0,0,0,53 +1_1670.png,0,6,2,0,11 +1_1671.png,0,9,4,0,17 +1_1672.png,0,0,0,0,7 +1_1673.png,0,16,5,0,6 +1_1674.png,0,0,0,0,14 +1_1675.png,0,7,5,0,11 +1_1676.png,0,10,1,0,9 +1_1677.png,0,6,0,0,11 +1_1678.png,0,0,0,0,2 +1_1679.png,0,0,0,0,9 +1_168.png,0,0,3,0,40 +1_1680.png,0,8,2,0,9 +1_1681.png,0,7,6,0,3 +1_1682.png,0,16,3,0,13 +1_1683.png,0,4,6,0,4 +1_1684.png,0,0,0,0,0 +1_1685.png,0,0,0,0,0 +1_1686.png,0,0,0,0,0 +1_1687.png,0,0,0,0,0 +1_1688.png,0,0,0,0,0 +1_1689.png,0,0,0,0,0 +1_169.png,0,2,10,0,20 +1_1690.png,0,0,0,0,0 +1_1691.png,0,0,0,0,0 +1_1692.png,0,0,0,0,0 +1_1693.png,0,0,0,0,0 +1_1694.png,0,0,0,0,0 +1_1695.png,0,0,0,0,0 +1_1696.png,0,0,0,0,0 +1_1697.png,0,0,0,0,0 +1_1698.png,0,0,0,0,0 +1_1699.png,0,0,0,0,0 +1_17.png,13,2,0,0,0 +1_170.png,0,0,7,0,12 +1_1700.png,0,0,0,0,0 +1_1701.png,0,0,0,0,0 +1_1702.png,0,0,0,0,0 +1_1703.png,0,20,1,0,9 +1_1704.png,0,30,0,0,2 +1_1705.png,0,29,0,0,1 +1_1706.png,0,10,1,0,11 +1_1707.png,0,1,0,0,7 +1_1708.png,0,1,0,0,19 +1_1709.png,0,0,0,0,10 +1_171.png,0,0,5,0,0 +1_1710.png,0,1,0,0,23 +1_1711.png,0,25,0,0,16 +1_1712.png,0,3,0,0,25 +1_1713.png,0,9,1,0,14 +1_1714.png,0,18,0,0,12 +1_1715.png,0,1,0,0,2 +1_1716.png,0,13,1,0,10 +1_1717.png,0,20,0,0,9 +1_1718.png,0,9,2,0,14 +1_1719.png,0,7,7,0,4 +1_172.png,0,1,1,0,0 +1_1720.png,0,6,0,0,11 +1_1721.png,0,11,0,0,14 +1_1722.png,0,5,6,0,3 +1_1723.png,0,5,1,0,22 +1_1724.png,0,19,2,0,9 +1_1725.png,0,6,6,0,2 +1_1726.png,0,1,0,0,15 +1_1727.png,0,8,3,0,16 +1_1728.png,0,19,0,0,1 +1_1729.png,0,5,1,0,20 +1_173.png,33,0,1,0,0 +1_1730.png,0,0,0,0,10 +1_1731.png,0,14,10,0,0 +1_1732.png,0,0,2,0,17 +1_1733.png,0,2,9,0,7 +1_1734.png,0,0,0,0,1 +1_1735.png,0,15,6,0,0 +1_1736.png,0,4,11,0,5 +1_1737.png,0,3,10,0,1 +1_1738.png,0,0,0,0,0 +1_1739.png,0,8,4,0,8 +1_174.png,11,1,0,0,0 +1_1740.png,0,0,1,0,14 +1_1741.png,0,0,0,0,0 +1_1742.png,0,0,0,0,0 +1_1743.png,0,0,0,0,0 +1_1744.png,0,10,5,0,2 +1_1745.png,0,15,8,0,0 +1_1746.png,0,0,0,0,0 +1_1747.png,0,13,6,0,0 +1_1748.png,0,2,5,0,4 +1_1749.png,0,0,0,0,0 +1_175.png,0,3,5,0,20 +1_1750.png,0,0,0,0,11 +1_1751.png,0,0,2,0,13 +1_1752.png,0,0,0,0,4 +1_1753.png,0,0,10,0,0 +1_1754.png,0,2,20,0,0 +1_1755.png,0,3,32,0,0 +1_1756.png,0,0,19,0,0 +1_1757.png,0,0,13,0,0 +1_1758.png,0,0,18,0,0 +1_1759.png,0,0,13,0,0 +1_176.png,0,0,6,0,0 +1_1760.png,0,0,21,0,0 +1_1761.png,0,0,17,0,0 +1_1762.png,0,1,19,0,0 +1_1763.png,0,4,15,0,0 +1_1764.png,0,0,6,0,0 +1_1765.png,0,0,10,0,0 +1_1766.png,0,0,9,0,0 +1_1767.png,0,0,20,0,0 +1_1768.png,0,0,10,0,0 +1_1769.png,0,0,11,0,0 +1_177.png,0,4,3,0,30 +1_1770.png,0,0,11,0,0 +1_1771.png,0,0,6,0,0 +1_1772.png,0,0,10,0,0 +1_1773.png,0,0,11,0,0 +1_1774.png,0,0,19,0,0 +1_1775.png,0,1,7,0,0 +1_1776.png,0,1,11,0,0 +1_1777.png,0,0,2,0,7 +1_1778.png,0,0,6,0,6 +1_1779.png,0,1,11,0,11 +1_178.png,0,4,19,0,36 +1_1780.png,0,3,0,0,1 +1_1781.png,0,0,6,0,10 +1_1782.png,0,0,21,0,10 +1_1783.png,0,1,9,0,11 +1_1784.png,0,0,1,0,14 +1_1785.png,0,1,10,0,7 +1_1786.png,0,0,21,0,8 +1_1787.png,0,0,4,0,9 +1_1788.png,0,0,0,0,17 +1_1789.png,0,0,7,0,11 +1_179.png,0,0,2,0,39 +1_1790.png,0,0,6,0,11 +1_1791.png,0,0,5,0,15 +1_1792.png,0,0,23,0,4 +1_1793.png,0,1,7,0,15 +1_1794.png,0,0,0,0,9 +1_1795.png,0,0,10,0,21 +1_1796.png,0,0,2,0,7 +1_1797.png,0,0,10,0,10 +1_1798.png,0,0,10,0,7 +1_1799.png,0,0,5,0,24 +1_18.png,11,0,2,0,0 +1_180.png,0,0,6,0,5 +1_1800.png,0,0,6,0,0 +1_1801.png,0,0,10,0,0 +1_1802.png,0,0,6,0,1 +1_1803.png,0,0,9,0,5 +1_1804.png,0,1,11,0,4 +1_1805.png,0,1,9,0,2 +1_1806.png,27,0,0,0,0 +1_1807.png,22,0,0,0,0 +1_1808.png,11,0,0,6,0 +1_1809.png,23,1,0,0,0 +1_181.png,0,1,16,0,17 +1_1810.png,28,0,0,0,0 +1_1811.png,2,4,11,0,0 +1_1812.png,13,4,4,0,0 +1_1813.png,28,2,2,0,0 +1_1814.png,27,0,0,0,0 +1_1815.png,42,0,1,0,0 +1_1816.png,6,0,20,0,0 +1_1817.png,0,1,0,18,0 +1_1818.png,20,1,2,0,0 +1_1819.png,13,0,3,0,0 +1_182.png,0,2,16,0,12 +1_1820.png,17,1,3,0,0 +1_1821.png,11,0,0,23,0 +1_1822.png,30,1,0,0,0 +1_1823.png,36,2,5,0,0 +1_1824.png,0,7,12,0,0 +1_1825.png,6,5,7,0,0 +1_1826.png,12,2,6,0,0 +1_1827.png,16,3,12,0,0 +1_1828.png,24,1,14,0,0 +1_1829.png,3,1,15,0,0 +1_183.png,0,3,15,0,16 +1_1830.png,0,2,12,0,0 +1_1831.png,0,2,18,0,0 +1_1832.png,0,2,21,0,0 +1_1833.png,0,1,11,0,0 +1_1834.png,0,2,20,0,0 +1_1835.png,19,2,4,0,0 +1_1836.png,0,6,18,0,0 +1_1837.png,5,6,11,0,0 +1_1838.png,5,4,11,0,0 +1_1839.png,0,4,20,1,0 +1_184.png,0,0,2,0,7 +1_1840.png,9,8,12,0,0 +1_1841.png,0,4,17,0,0 +1_1842.png,0,0,18,0,0 +1_1843.png,0,0,21,0,0 +1_1844.png,0,0,10,0,0 +1_1845.png,0,0,0,0,0 +1_1846.png,0,0,0,0,0 +1_1847.png,0,0,0,0,0 +1_1848.png,0,0,0,0,0 +1_1849.png,0,0,0,0,0 +1_185.png,0,0,11,0,9 +1_1850.png,0,0,0,0,0 +1_1851.png,0,0,0,0,0 +1_1852.png,0,0,0,0,0 +1_1853.png,0,0,0,0,0 +1_1854.png,14,0,6,0,0 +1_1855.png,15,1,1,0,0 +1_1856.png,9,1,4,0,0 +1_1857.png,15,0,1,0,0 +1_1858.png,0,0,19,0,0 +1_1859.png,0,0,11,0,0 +1_186.png,0,0,1,0,23 +1_1860.png,0,0,13,0,0 +1_1861.png,0,0,13,0,0 +1_1862.png,2,0,26,0,0 +1_1863.png,0,1,32,0,0 +1_1864.png,19,1,0,0,0 +1_1865.png,0,3,11,0,0 +1_1866.png,13,1,3,0,0 +1_1867.png,0,6,19,0,0 +1_1868.png,0,12,27,0,0 +1_1869.png,0,13,20,0,0 +1_187.png,0,0,2,0,26 +1_1870.png,14,4,16,0,0 +1_1871.png,0,19,21,0,0 +1_1872.png,0,17,20,0,0 +1_1873.png,9,8,9,0,0 +1_1874.png,12,1,9,0,0 +1_1875.png,0,17,10,0,0 +1_1876.png,0,0,0,0,0 +1_1877.png,0,38,13,0,0 +1_1878.png,2,37,5,0,0 +1_1879.png,3,13,2,0,0 +1_188.png,0,1,13,0,11 +1_1880.png,2,3,0,0,0 +1_1881.png,21,32,2,0,0 +1_1882.png,14,27,1,0,0 +1_1883.png,19,0,0,0,0 +1_1884.png,15,0,0,0,0 +1_1885.png,14,0,0,0,0 +1_1886.png,20,0,1,0,0 +1_1887.png,15,0,2,0,0 +1_1888.png,8,0,2,0,0 +1_1889.png,0,0,0,0,0 +1_189.png,0,0,3,0,17 +1_1890.png,8,0,6,0,0 +1_1891.png,2,0,4,0,0 +1_1892.png,9,0,2,0,0 +1_1893.png,10,0,0,0,0 +1_1894.png,0,85,0,0,0 +1_1895.png,0,88,11,0,0 +1_1896.png,6,57,3,0,0 +1_1897.png,10,1,8,0,0 +1_1898.png,18,1,2,0,0 +1_1899.png,13,0,5,0,0 +1_19.png,9,1,1,0,0 +1_190.png,0,0,9,0,21 +1_1900.png,17,6,0,0,0 +1_1901.png,0,2,4,0,0 +1_1902.png,0,4,11,0,0 +1_1903.png,0,1,7,0,0 +1_1904.png,0,1,13,0,0 +1_1905.png,0,3,5,0,0 +1_1906.png,0,1,5,0,0 +1_1907.png,0,3,12,0,0 +1_1908.png,16,2,11,0,0 +1_1909.png,1,1,11,0,0 +1_191.png,0,2,5,0,19 +1_1910.png,30,0,5,0,0 +1_1911.png,0,1,11,0,0 +1_1912.png,0,2,9,0,0 +1_1913.png,0,4,9,0,0 +1_1914.png,0,7,15,0,0 +1_1915.png,0,1,14,0,0 +1_1916.png,0,6,24,0,0 +1_1917.png,0,7,0,0,0 +1_1918.png,0,4,8,0,0 +1_1919.png,0,1,4,0,0 +1_192.png,0,0,1,0,39 +1_1920.png,0,0,0,0,8 +1_1921.png,0,0,0,0,0 +1_1922.png,0,0,0,0,12 +1_1923.png,0,0,0,0,1 +1_1924.png,0,0,0,0,2 +1_1925.png,0,0,0,0,1 +1_1926.png,0,0,0,0,7 +1_1927.png,0,0,0,0,11 +1_1928.png,0,0,0,0,0 +1_1929.png,0,0,0,0,0 +1_193.png,0,3,4,0,33 +1_1930.png,0,0,0,0,0 +1_1931.png,0,0,0,0,0 +1_1932.png,0,0,0,0,0 +1_1933.png,0,0,0,0,0 +1_1934.png,0,0,5,0,0 +1_1935.png,0,0,4,0,0 +1_1936.png,0,0,0,0,0 +1_1937.png,0,0,4,0,0 +1_1938.png,0,0,3,0,0 +1_1939.png,0,0,1,0,0 +1_194.png,0,1,14,0,3 +1_1940.png,0,0,1,0,0 +1_1941.png,0,0,20,0,0 +1_1942.png,0,0,17,0,0 +1_1943.png,0,0,11,0,0 +1_1944.png,0,1,21,0,0 +1_1945.png,0,0,20,0,0 +1_1946.png,0,0,15,0,0 +1_1947.png,0,0,29,0,0 +1_1948.png,0,0,35,0,0 +1_1949.png,0,0,21,0,0 +1_195.png,0,2,12,0,24 +1_1950.png,0,0,18,0,0 +1_1951.png,0,0,30,0,0 +1_1952.png,0,0,32,0,0 +1_1953.png,0,0,1,0,3 +1_1954.png,0,1,2,0,0 +1_1955.png,0,0,0,0,3 +1_1956.png,0,0,2,0,0 +1_1957.png,0,1,5,0,0 +1_1958.png,0,1,5,0,2 +1_1959.png,0,0,4,0,2 +1_196.png,0,1,1,0,12 +1_1960.png,0,0,3,0,0 +1_1961.png,0,0,7,0,3 +1_1962.png,30,1,5,0,0 +1_1963.png,25,1,13,0,0 +1_1964.png,20,1,13,0,0 +1_1965.png,0,0,6,0,0 +1_1966.png,0,1,12,0,0 +1_1967.png,0,1,11,0,0 +1_1968.png,0,1,11,0,0 +1_1969.png,45,0,0,0,0 +1_197.png,0,1,9,0,20 +1_1970.png,27,0,0,0,0 +1_1971.png,33,0,3,0,0 +1_1972.png,9,0,13,0,0 +1_1973.png,7,0,15,0,0 +1_1974.png,20,0,3,0,0 +1_1975.png,23,0,2,0,0 +1_1976.png,22,0,0,0,0 +1_1977.png,20,7,3,0,0 +1_1978.png,0,23,10,0,0 +1_1979.png,5,37,11,0,0 +1_198.png,0,1,1,0,41 +1_1980.png,5,20,8,0,0 +1_1981.png,1,16,10,0,0 +1_1982.png,18,0,0,0,0 +1_1983.png,21,2,0,0,0 +1_1984.png,40,3,2,0,0 +1_1985.png,17,0,11,0,0 +1_1986.png,33,2,0,0,0 +1_1987.png,23,3,0,0,0 +1_1988.png,19,1,6,0,0 +1_1989.png,15,1,9,0,0 +1_199.png,0,0,6,0,17 +1_1990.png,17,0,6,0,0 +1_1991.png,52,2,0,0,0 +1_1992.png,4,10,0,5,0 +1_1993.png,13,10,0,6,0 +1_1994.png,1,25,0,9,0 +1_1995.png,33,3,0,3,0 +1_1996.png,0,8,0,8,0 +1_1997.png,5,12,0,3,0 +1_1998.png,0,0,0,0,0 +1_1999.png,0,0,0,0,0 +1_2.png,6,0,3,0,0 +1_20.png,2,0,3,0,0 +1_200.png,0,1,7,0,47 +1_2000.png,0,0,0,0,0 +1_2001.png,0,0,1,0,0 +1_2002.png,0,2,1,0,0 +1_2003.png,0,0,0,0,0 +1_2004.png,0,0,0,0,0 +1_2005.png,0,0,0,0,0 +1_2006.png,0,0,1,0,0 +1_2007.png,27,0,8,0,0 +1_2008.png,25,2,10,0,0 +1_2009.png,2,16,28,0,0 +1_201.png,0,0,7,0,22 +1_2010.png,8,4,17,0,0 +1_2011.png,14,7,23,0,0 +1_2012.png,20,1,12,0,0 +1_2013.png,38,1,0,0,0 +1_2014.png,15,0,7,0,0 +1_2015.png,9,0,4,0,0 +1_2016.png,18,0,2,0,0 +1_2017.png,20,0,1,0,0 +1_2018.png,18,0,4,0,0 +1_2019.png,0,0,0,0,0 +1_202.png,0,0,1,0,23 +1_2020.png,0,1,0,0,0 +1_2021.png,0,0,0,0,0 +1_2022.png,0,5,0,0,0 +1_2023.png,0,1,0,0,0 +1_2024.png,0,3,0,0,0 +1_2025.png,0,0,13,0,0 +1_2026.png,0,0,4,0,0 +1_2027.png,0,0,7,0,0 +1_2028.png,0,0,11,0,0 +1_2029.png,0,0,13,0,0 +1_203.png,0,0,4,0,7 +1_2030.png,19,7,8,0,0 +1_2031.png,30,3,4,0,0 +1_2032.png,18,2,7,0,0 +1_2033.png,24,2,6,0,0 +1_2034.png,23,0,6,0,0 +1_2035.png,24,2,2,0,0 +1_2036.png,0,0,2,0,2 +1_2037.png,0,0,2,0,5 +1_2038.png,0,0,9,0,13 +1_2039.png,0,0,6,0,5 +1_204.png,0,0,3,0,33 +1_2040.png,0,0,2,0,7 +1_2041.png,0,2,1,0,10 +1_2042.png,10,1,17,0,0 +1_2043.png,9,2,4,0,0 +1_2044.png,10,2,1,0,0 +1_2045.png,6,0,3,0,0 +1_2046.png,11,0,2,0,0 +1_2047.png,11,0,2,0,0 +1_2048.png,9,0,1,0,0 +1_2049.png,9,0,1,0,0 +1_205.png,0,2,16,0,17 +1_2050.png,9,0,0,0,0 +1_2051.png,0,1,8,0,0 +1_2052.png,0,3,5,0,0 +1_2053.png,0,3,10,0,0 +1_2054.png,0,3,13,0,0 +1_2055.png,0,1,8,0,0 +1_2056.png,0,3,5,0,0 +1_2057.png,0,1,6,0,0 +1_2058.png,0,2,9,0,0 +1_2059.png,0,2,11,0,0 +1_206.png,0,0,1,0,68 +1_2060.png,1,6,20,0,0 +1_2061.png,14,0,0,0,0 +1_2062.png,0,1,23,0,0 +1_2063.png,6,8,10,0,0 +1_2064.png,0,2,2,0,61 +1_2065.png,0,3,17,0,50 +1_2066.png,0,5,9,0,39 +1_2067.png,3,2,8,0,28 +1_2068.png,0,1,14,0,23 +1_2069.png,0,1,14,0,20 +1_207.png,0,1,5,0,17 +1_2070.png,0,1,23,0,2 +1_2071.png,0,1,0,0,12 +1_2072.png,0,0,2,0,10 +1_2073.png,33,0,4,0,0 +1_2074.png,30,1,6,0,0 +1_2075.png,18,1,3,0,0 +1_2076.png,4,7,15,0,0 +1_2077.png,0,9,18,0,0 +1_2078.png,31,1,0,0,0 +1_2079.png,26,3,0,0,0 +1_208.png,0,0,4,0,25 +1_2080.png,0,0,12,0,0 +1_2081.png,0,0,10,0,0 +1_2082.png,0,1,14,0,0 +1_2083.png,17,0,0,0,0 +1_2084.png,11,9,4,0,1 +1_2085.png,25,3,0,0,0 +1_2086.png,0,0,12,0,0 +1_2087.png,13,0,7,0,0 +1_2088.png,0,2,15,0,0 +1_2089.png,5,0,8,0,0 +1_209.png,0,0,6,0,34 +1_2090.png,0,0,0,0,7 +1_2091.png,0,3,1,0,18 +1_2092.png,0,0,1,0,17 +1_2093.png,0,1,0,0,12 +1_2094.png,0,1,2,0,12 +1_2095.png,0,2,0,0,19 +1_2096.png,0,1,1,0,14 +1_2097.png,0,0,1,0,13 +1_2098.png,0,2,1,0,15 +1_2099.png,0,1,11,0,0 +1_21.png,10,0,1,0,0 +1_210.png,0,2,6,0,15 +1_2100.png,11,0,4,0,0 +1_2101.png,5,0,13,0,0 +1_2102.png,18,1,3,0,0 +1_2103.png,27,1,6,0,0 +1_2104.png,29,0,0,0,0 +1_2105.png,9,3,10,0,0 +1_2106.png,18,0,0,0,0 +1_2107.png,17,1,1,0,0 +1_2108.png,7,0,0,0,0 +1_2109.png,16,0,1,0,0 +1_211.png,0,0,9,0,31 +1_2110.png,21,1,1,0,0 +1_2111.png,23,3,1,0,0 +1_2112.png,20,1,1,0,0 +1_2113.png,4,2,6,0,0 +1_2114.png,18,2,1,0,0 +1_2115.png,20,3,0,0,0 +1_2116.png,1,3,0,0,0 +1_2117.png,0,2,6,0,0 +1_2118.png,0,2,7,0,0 +1_2119.png,0,5,12,0,0 +1_212.png,0,0,8,0,0 +1_2120.png,0,0,11,0,0 +1_2121.png,0,1,9,0,0 +1_2122.png,0,3,40,0,0 +1_2123.png,0,3,38,0,0 +1_2124.png,0,1,46,0,0 +1_2125.png,0,1,15,0,27 +1_2126.png,0,0,0,0,26 +1_2127.png,0,2,24,0,19 +1_2128.png,0,1,11,0,18 +1_2129.png,0,1,5,0,25 +1_213.png,0,3,8,0,12 +1_2130.png,0,3,33,0,0 +1_2131.png,24,0,3,0,0 +1_2132.png,17,0,1,0,0 +1_2133.png,21,0,0,0,0 +1_2134.png,4,0,1,0,0 +1_2135.png,23,0,1,0,0 +1_2136.png,18,0,1,0,0 +1_2137.png,1,3,9,0,0 +1_2138.png,8,2,10,0,0 +1_2139.png,0,1,5,0,0 +1_214.png,0,5,4,0,14 +1_2140.png,5,2,10,0,0 +1_2141.png,12,5,15,0,0 +1_2142.png,25,2,5,0,0 +1_2143.png,14,0,0,0,0 +1_2144.png,21,0,0,0,0 +1_2145.png,21,0,0,0,0 +1_2146.png,19,1,0,0,0 +1_2147.png,19,0,0,0,0 +1_2148.png,24,0,0,0,0 +1_2149.png,34,3,1,0,0 +1_215.png,0,0,0,0,32 +1_2150.png,34,0,0,0,0 +1_2151.png,38,0,0,0,0 +1_2152.png,0,0,4,0,8 +1_2153.png,0,4,13,0,6 +1_2154.png,0,1,6,0,10 +1_2155.png,0,1,4,0,10 +1_2156.png,0,0,13,0,11 +1_2157.png,0,2,10,0,7 +1_2158.png,0,0,10,0,10 +1_2159.png,0,0,8,0,8 +1_216.png,0,0,10,0,4 +1_2160.png,0,2,10,0,4 +1_2161.png,1,0,2,0,0 +1_2162.png,7,0,0,0,0 +1_2163.png,1,2,15,0,0 +1_2164.png,0,3,24,0,0 +1_2165.png,0,3,28,0,0 +1_2166.png,15,5,15,0,0 +1_2167.png,5,3,12,0,0 +1_2168.png,10,1,16,0,0 +1_2169.png,19,4,11,0,0 +1_217.png,0,0,6,0,32 +1_2170.png,14,2,21,0,0 +1_2171.png,2,2,19,0,0 +1_2172.png,28,0,6,0,0 +1_2173.png,0,1,9,0,0 +1_2174.png,18,0,11,0,0 +1_2175.png,0,0,4,0,0 +1_2176.png,0,3,25,0,0 +1_2177.png,0,4,16,0,0 +1_2178.png,0,1,24,0,0 +1_2179.png,0,0,13,0,0 +1_218.png,0,3,1,0,47 +1_2180.png,0,0,15,0,0 +1_2181.png,0,0,26,0,0 +1_2182.png,0,10,30,0,0 +1_2183.png,0,2,17,0,0 +1_2184.png,0,6,19,0,0 +1_2185.png,0,0,17,0,0 +1_2186.png,0,7,13,0,0 +1_2187.png,0,7,21,0,0 +1_2188.png,0,18,15,0,0 +1_2189.png,0,19,18,0,3 +1_219.png,0,4,2,0,31 +1_2190.png,0,0,0,0,30 +1_2191.png,0,0,7,0,0 +1_2192.png,0,0,8,0,0 +1_2193.png,0,0,6,0,0 +1_2194.png,0,3,14,0,0 +1_2195.png,0,1,10,0,0 +1_2196.png,0,4,15,0,0 +1_2197.png,0,2,13,0,0 +1_2198.png,0,0,10,0,0 +1_2199.png,0,3,10,0,0 +1_22.png,0,3,10,0,0 +1_220.png,0,2,12,0,19 +1_2200.png,0,0,11,0,0 +1_2201.png,0,0,10,0,0 +1_2202.png,0,2,9,0,0 +1_2203.png,0,0,9,0,0 +1_2204.png,0,1,13,0,0 +1_2205.png,0,1,14,0,0 +1_2206.png,0,8,7,0,0 +1_2207.png,0,7,12,0,0 +1_2208.png,0,8,11,0,0 +1_2209.png,0,8,15,0,0 +1_221.png,0,3,1,0,42 +1_2210.png,7,4,19,0,0 +1_2211.png,9,1,26,0,0 +1_2212.png,24,1,1,0,0 +1_2213.png,0,0,6,0,19 +1_2214.png,0,0,3,0,28 +1_2215.png,0,0,0,0,18 +1_2216.png,0,0,14,0,16 +1_2217.png,0,0,12,0,26 +1_2218.png,0,0,0,0,4 +1_2219.png,8,0,7,0,0 +1_222.png,0,0,13,0,19 +1_2220.png,0,0,25,0,0 +1_2221.png,1,0,30,0,0 +1_2222.png,0,0,22,0,0 +1_2223.png,0,3,25,0,0 +1_2224.png,0,0,8,0,0 +1_2225.png,0,0,12,0,0 +1_2226.png,0,1,8,0,0 +1_2227.png,0,0,14,0,0 +1_2228.png,0,3,5,0,0 +1_2229.png,38,0,0,0,0 +1_223.png,17,0,3,0,0 +1_2230.png,36,0,5,0,0 +1_2231.png,36,0,1,0,0 +1_2232.png,16,0,9,0,0 +1_2233.png,0,0,7,0,0 +1_2234.png,0,0,5,0,0 +1_2235.png,0,0,4,0,0 +1_2236.png,0,0,2,0,0 +1_2237.png,26,0,2,0,0 +1_2238.png,9,0,15,0,0 +1_2239.png,17,0,9,0,0 +1_224.png,0,2,0,0,0 +1_2240.png,25,0,7,0,0 +1_2241.png,1,0,25,0,0 +1_2242.png,16,0,5,0,0 +1_2243.png,12,0,0,0,0 +1_2244.png,22,0,0,0,0 +1_2245.png,24,63,0,0,0 +1_2246.png,48,30,0,0,0 +1_2247.png,9,22,0,0,0 +1_2248.png,17,23,0,0,0 +1_2249.png,30,36,0,0,0 +1_225.png,8,0,7,0,0 +1_2250.png,19,36,0,0,0 +1_2251.png,0,2,15,0,0 +1_2252.png,0,4,15,0,0 +1_2253.png,0,10,15,0,0 +1_2254.png,0,22,36,0,0 +1_2255.png,0,4,8,0,0 +1_2256.png,0,6,9,0,0 +1_2257.png,0,1,9,0,0 +1_2258.png,52,2,4,0,0 +1_2259.png,79,2,4,0,0 +1_226.png,0,6,8,0,28 +1_2260.png,64,0,2,0,0 +1_2261.png,66,2,1,0,0 +1_2262.png,65,3,4,0,0 +1_2263.png,52,0,2,0,0 +1_2264.png,60,3,10,0,0 +1_2265.png,59,0,5,0,0 +1_2266.png,0,1,6,0,3 +1_2267.png,0,0,2,0,0 +1_2268.png,0,0,0,0,0 +1_2269.png,0,0,0,0,0 +1_227.png,0,3,3,0,30 +1_2270.png,0,0,4,0,0 +1_2271.png,0,0,2,0,33 +1_2272.png,0,67,27,0,0 +1_2273.png,0,79,34,0,0 +1_2274.png,0,50,26,0,0 +1_2275.png,0,49,34,0,0 +1_2276.png,0,82,27,0,0 +1_2277.png,0,56,28,0,0 +1_2278.png,8,0,0,0,0 +1_2279.png,19,1,0,0,0 +1_228.png,0,7,13,0,24 +1_2280.png,21,1,0,0,0 +1_2281.png,16,1,0,0,0 +1_2282.png,11,1,0,0,0 +1_2283.png,19,1,0,0,0 +1_2284.png,10,3,0,0,0 +1_2285.png,5,2,0,0,0 +1_2286.png,34,2,0,0,0 +1_2287.png,34,0,0,0,0 +1_2288.png,30,0,2,0,0 +1_2289.png,24,0,2,0,0 +1_229.png,0,10,10,0,10 +1_2290.png,20,0,13,0,0 +1_2291.png,27,0,4,0,0 +1_2292.png,28,0,5,0,0 +1_2293.png,29,0,2,0,0 +1_2294.png,40,0,0,0,0 +1_2295.png,12,4,0,0,0 +1_2296.png,8,5,7,0,0 +1_2297.png,12,2,0,0,0 +1_2298.png,12,3,6,0,0 +1_2299.png,17,2,3,0,0 +1_23.png,1,5,8,0,0 +1_230.png,0,3,13,0,24 +1_2300.png,9,4,10,0,0 +1_2301.png,12,0,0,0,0 +1_2302.png,12,0,5,0,0 +1_2303.png,30,4,2,0,0 +1_2304.png,20,7,0,0,0 +1_2305.png,32,4,1,0,0 +1_2306.png,19,4,1,0,0 +1_2307.png,20,8,2,0,0 +1_2308.png,0,0,8,0,0 +1_2309.png,0,0,6,0,0 +1_231.png,0,5,15,0,0 +1_2310.png,0,0,0,0,0 +1_2311.png,0,0,1,0,0 +1_2312.png,0,0,1,0,0 +1_2313.png,0,0,4,0,0 +1_2314.png,0,0,16,0,0 +1_2315.png,0,0,0,0,0 +1_2316.png,0,0,16,0,0 +1_2317.png,0,0,16,0,0 +1_2318.png,0,0,18,0,0 +1_2319.png,0,0,28,0,0 +1_232.png,0,2,1,0,26 +1_2320.png,19,3,8,2,0 +1_2321.png,24,1,9,13,0 +1_2322.png,15,6,16,0,0 +1_2323.png,13,2,8,0,0 +1_2324.png,14,11,7,1,0 +1_2325.png,21,11,6,1,0 +1_2326.png,17,11,1,2,0 +1_2327.png,24,5,2,1,0 +1_2328.png,27,0,0,3,0 +1_2329.png,34,1,0,0,0 +1_233.png,0,3,5,0,41 +1_2330.png,24,4,10,1,0 +1_2331.png,29,7,2,0,0 +1_2332.png,28,8,2,0,0 +1_2333.png,19,19,13,0,0 +1_2334.png,12,6,6,4,0 +1_2335.png,30,2,3,0,0 +1_2336.png,29,9,3,1,0 +1_2337.png,15,1,9,0,0 +1_2338.png,7,0,13,0,0 +1_2339.png,36,1,1,0,0 +1_234.png,0,2,11,0,18 +1_2340.png,34,1,1,5,0 +1_2341.png,27,0,2,1,0 +1_2342.png,12,3,11,0,0 +1_2343.png,33,1,2,0,0 +1_2344.png,25,1,1,0,0 +1_2345.png,19,1,4,0,0 +1_2346.png,16,5,4,0,0 +1_2347.png,0,0,11,0,0 +1_2348.png,1,0,9,0,0 +1_2349.png,0,0,13,0,0 +1_235.png,0,0,9,0,0 +1_2350.png,0,0,18,0,0 +1_2351.png,0,0,11,0,0 +1_2352.png,3,24,24,1,0 +1_2353.png,11,9,15,1,0 +1_2354.png,5,57,6,4,0 +1_2355.png,0,1,3,0,0 +1_2356.png,0,3,1,0,0 +1_2357.png,0,0,11,0,0 +1_2358.png,3,0,3,0,0 +1_2359.png,0,1,10,0,0 +1_236.png,0,3,2,0,44 +1_2360.png,7,2,11,0,0 +1_2361.png,10,0,1,0,0 +1_2362.png,2,2,15,0,0 +1_2363.png,6,5,4,0,0 +1_2364.png,10,0,3,0,0 +1_2365.png,0,4,5,0,0 +1_2366.png,1,1,18,0,0 +1_2367.png,15,3,0,0,0 +1_2368.png,17,0,0,0,0 +1_2369.png,26,0,0,0,0 +1_237.png,0,0,7,0,34 +1_2370.png,20,3,4,0,0 +1_2371.png,20,3,6,0,0 +1_2372.png,27,0,2,0,0 +1_2373.png,7,54,12,0,0 +1_2374.png,6,58,8,0,0 +1_2375.png,15,40,3,0,0 +1_2376.png,11,69,9,0,0 +1_2377.png,9,27,3,0,0 +1_2378.png,24,6,0,0,0 +1_2379.png,19,4,1,0,0 +1_238.png,0,5,6,0,33 +1_2380.png,22,2,3,0,0 +1_2381.png,16,4,10,0,0 +1_2382.png,0,0,0,0,11 +1_2383.png,0,0,3,0,17 +1_2384.png,0,3,2,0,18 +1_2385.png,0,0,0,0,8 +1_2386.png,0,6,3,0,13 +1_2387.png,0,5,2,0,15 +1_2388.png,0,0,1,0,16 +1_2389.png,0,1,3,0,23 +1_239.png,0,17,2,0,21 +1_2390.png,0,1,1,0,24 +1_2391.png,0,0,3,0,17 +1_2392.png,0,0,0,0,3 +1_2393.png,0,1,0,0,18 +1_2394.png,0,0,1,0,7 +1_2395.png,0,0,1,0,0 +1_2396.png,0,0,0,0,4 +1_2397.png,0,0,0,0,0 +1_2398.png,0,0,4,0,18 +1_2399.png,0,0,11,0,12 +1_24.png,5,0,3,0,0 +1_240.png,0,3,14,0,31 +1_2400.png,0,1,5,0,29 +1_2401.png,0,1,5,0,27 +1_2402.png,0,0,1,0,22 +1_2403.png,0,0,1,0,35 +1_2404.png,0,0,4,0,32 +1_2405.png,0,2,8,0,18 +1_2406.png,0,6,9,0,18 +1_2407.png,0,2,6,0,30 +1_2408.png,0,22,11,0,7 +1_2409.png,30,0,4,0,0 +1_241.png,0,11,6,0,27 +1_2410.png,37,0,2,0,0 +1_2411.png,5,0,3,0,10 +1_2412.png,16,1,10,0,1 +1_2413.png,24,0,5,0,0 +1_2414.png,21,0,6,0,2 +1_2415.png,10,8,11,0,0 +1_2416.png,4,12,9,0,0 +1_2417.png,14,9,8,0,0 +1_2418.png,3,19,11,0,0 +1_2419.png,4,13,11,0,0 +1_242.png,0,6,5,0,34 +1_2420.png,8,12,8,0,0 +1_2421.png,19,0,7,0,0 +1_2422.png,12,4,11,0,0 +1_2423.png,0,3,9,0,20 +1_2424.png,0,1,4,0,15 +1_2425.png,0,3,2,0,36 +1_2426.png,0,2,6,0,19 +1_2427.png,0,1,6,0,15 +1_2428.png,0,0,6,0,22 +1_2429.png,48,3,2,0,0 +1_243.png,0,4,4,0,39 +1_2430.png,27,1,0,0,0 +1_2431.png,30,0,0,0,0 +1_2432.png,0,22,31,0,32 +1_2433.png,0,25,24,0,41 +1_2434.png,0,9,16,0,14 +1_2435.png,0,1,2,0,0 +1_2436.png,0,0,2,0,0 +1_2437.png,0,0,16,0,0 +1_2438.png,0,0,1,0,0 +1_2439.png,0,1,5,0,0 +1_244.png,0,1,3,0,24 +1_2440.png,0,2,7,0,0 +1_2441.png,65,0,12,0,0 +1_2442.png,66,1,1,0,0 +1_2443.png,86,0,1,0,0 +1_2444.png,36,0,0,0,0 +1_2445.png,31,0,0,0,0 +1_2446.png,54,1,1,0,0 +1_2447.png,5,3,21,0,0 +1_2448.png,32,1,34,0,0 +1_2449.png,0,2,10,0,0 +1_245.png,0,0,6,0,21 +1_2450.png,0,0,0,0,0 +1_2451.png,0,8,18,0,0 +1_2452.png,0,3,6,0,0 +1_2453.png,0,6,34,0,0 +1_2454.png,0,1,11,0,0 +1_2455.png,58,7,1,0,0 +1_2456.png,101,3,5,0,0 +1_2457.png,47,0,0,0,0 +1_2458.png,55,2,2,0,0 +1_2459.png,0,120,34,0,0 +1_246.png,0,0,11,0,22 +1_2460.png,0,154,10,0,0 +1_2461.png,42,0,17,0,0 +1_2462.png,16,0,41,0,0 +1_2463.png,0,0,0,1,0 +1_2464.png,39,0,1,14,0 +1_2465.png,49,0,48,0,0 +1_2466.png,2,0,0,3,0 +1_2467.png,47,0,4,10,0 +1_2468.png,37,2,0,0,0 +1_2469.png,80,8,32,0,0 +1_247.png,10,0,6,0,0 +1_2470.png,40,2,8,0,0 +1_2471.png,0,0,0,0,28 +1_2472.png,0,0,2,0,25 +1_2473.png,0,0,3,0,31 +1_2474.png,0,0,22,0,0 +1_2475.png,0,0,24,0,0 +1_2476.png,0,1,13,0,0 +1_2477.png,0,0,22,0,0 +1_2478.png,0,0,13,0,0 +1_2479.png,0,21,43,0,0 +1_248.png,21,0,0,0,0 +1_2480.png,40,1,13,0,0 +1_2481.png,0,5,15,0,0 +1_2482.png,0,11,51,0,0 +1_2483.png,0,14,33,0,0 +1_2484.png,0,17,59,0,0 +1_2485.png,0,15,76,0,0 +1_2486.png,0,43,74,0,0 +1_2487.png,0,78,89,0,0 +1_2488.png,0,0,66,0,0 +1_2489.png,53,2,7,0,0 +1_249.png,18,0,1,0,0 +1_2490.png,45,5,4,0,0 +1_2491.png,26,12,3,0,0 +1_2492.png,0,9,2,0,0 +1_2493.png,0,5,5,0,0 +1_2494.png,0,0,0,0,0 +1_2495.png,0,0,1,0,0 +1_2496.png,0,0,2,0,0 +1_2497.png,0,0,8,0,0 +1_2498.png,0,0,2,0,0 +1_2499.png,0,0,23,0,0 +1_25.png,10,0,0,0,0 +1_250.png,13,0,0,0,0 +1_2500.png,0,0,12,0,0 +1_2501.png,0,16,0,0,0 +1_2502.png,0,1,0,0,0 +1_2503.png,0,0,4,0,0 +1_2504.png,0,3,25,0,0 +1_2505.png,0,1,19,0,0 +1_2506.png,0,4,30,0,0 +1_2507.png,58,1,2,0,0 +1_2508.png,0,0,0,0,0 +1_2509.png,0,0,14,0,0 +1_251.png,23,0,0,0,0 +1_2510.png,0,0,2,0,0 +1_2511.png,0,0,0,0,0 +1_2512.png,0,0,1,0,0 +1_2513.png,0,12,14,0,24 +1_2514.png,0,32,39,0,23 +1_2515.png,0,15,22,0,27 +1_2516.png,21,0,24,0,0 +1_2517.png,42,9,0,0,0 +1_2518.png,62,0,0,23,0 +1_2519.png,23,46,35,0,0 +1_252.png,23,0,2,0,0 +1_2520.png,0,8,22,0,0 +1_2521.png,0,16,40,0,0 +1_2522.png,0,16,33,0,0 +1_253.png,8,4,6,0,0 +1_254.png,0,27,11,0,0 +1_255.png,14,0,0,0,0 +1_256.png,24,0,0,0,0 +1_257.png,25,0,0,0,0 +1_258.png,22,0,1,0,0 +1_259.png,19,0,1,0,0 +1_26.png,7,0,6,0,0 +1_260.png,11,0,6,0,0 +1_261.png,22,0,5,0,0 +1_262.png,0,0,14,0,10 +1_263.png,0,1,11,0,25 +1_264.png,0,2,8,0,26 +1_265.png,0,11,15,0,7 +1_266.png,19,1,4,0,0 +1_267.png,25,0,2,0,0 +1_268.png,21,0,4,0,0 +1_269.png,20,1,1,0,0 +1_27.png,0,0,7,0,0 +1_270.png,1,32,10,0,0 +1_271.png,0,18,18,0,0 +1_272.png,6,9,10,0,0 +1_273.png,6,16,12,0,0 +1_274.png,8,15,7,0,0 +1_275.png,9,3,8,0,0 +1_276.png,0,27,23,0,0 +1_277.png,0,39,6,0,0 +1_278.png,14,25,10,0,0 +1_279.png,12,2,14,0,0 +1_28.png,2,0,6,0,0 +1_280.png,23,2,4,0,0 +1_281.png,0,5,11,0,0 +1_282.png,0,4,7,0,0 +1_283.png,0,1,14,0,0 +1_284.png,7,0,6,0,0 +1_285.png,1,0,11,0,0 +1_286.png,0,2,5,0,24 +1_287.png,0,0,3,0,20 +1_288.png,0,0,7,0,30 +1_289.png,0,0,5,0,32 +1_29.png,0,0,7,0,0 +1_290.png,0,2,10,0,23 +1_291.png,0,3,2,0,15 +1_292.png,0,4,0,0,5 +1_293.png,0,1,2,0,13 +1_294.png,0,3,1,0,30 +1_295.png,8,1,3,0,0 +1_296.png,21,1,0,0,0 +1_297.png,14,2,5,0,0 +1_298.png,9,1,2,0,1 +1_299.png,4,5,5,0,0 +1_3.png,8,1,1,0,0 +1_30.png,11,0,2,0,0 +1_300.png,2,0,4,0,0 +1_301.png,18,0,2,0,0 +1_302.png,21,3,2,0,0 +1_303.png,18,0,2,0,0 +1_304.png,22,6,0,0,0 +1_305.png,6,6,10,0,0 +1_306.png,23,0,1,0,0 +1_307.png,22,3,5,0,0 +1_308.png,22,0,1,0,0 +1_309.png,15,0,2,0,0 +1_31.png,0,1,8,0,12 +1_310.png,37,0,1,0,0 +1_311.png,22,0,0,0,0 +1_312.png,23,0,1,0,0 +1_313.png,25,1,1,0,0 +1_314.png,20,3,3,0,0 +1_315.png,8,8,10,0,0 +1_316.png,8,12,20,0,0 +1_317.png,8,0,0,0,0 +1_318.png,5,0,0,0,0 +1_319.png,5,0,0,0,0 +1_32.png,0,3,7,0,7 +1_320.png,15,0,0,0,0 +1_321.png,12,0,1,0,0 +1_322.png,4,0,0,0,0 +1_323.png,9,0,0,0,0 +1_324.png,0,5,8,0,0 +1_325.png,0,6,8,0,0 +1_326.png,0,4,7,0,0 +1_327.png,0,3,11,0,0 +1_328.png,0,5,12,0,0 +1_329.png,7,3,2,0,0 +1_33.png,0,1,4,0,19 +1_330.png,6,1,3,0,0 +1_331.png,6,1,5,0,0 +1_332.png,4,1,2,0,0 +1_333.png,28,0,0,0,0 +1_334.png,31,0,0,0,0 +1_335.png,23,0,0,0,0 +1_336.png,28,0,0,0,0 +1_337.png,36,0,0,0,0 +1_338.png,33,0,0,0,0 +1_339.png,0,1,1,0,0 +1_34.png,0,0,5,0,31 +1_340.png,24,1,2,0,0 +1_341.png,2,1,3,0,0 +1_342.png,7,0,2,0,0 +1_343.png,3,0,6,0,0 +1_344.png,4,5,8,0,0 +1_345.png,11,0,2,0,0 +1_346.png,1,3,6,0,0 +1_347.png,1,1,10,0,0 +1_348.png,10,0,2,0,0 +1_349.png,5,1,7,0,0 +1_35.png,0,2,6,0,27 +1_350.png,5,2,8,0,0 +1_351.png,13,0,2,0,0 +1_352.png,12,0,6,0,0 +1_353.png,9,0,8,0,0 +1_354.png,5,1,5,0,0 +1_355.png,14,0,1,0,0 +1_356.png,3,0,8,0,0 +1_357.png,15,0,1,0,0 +1_358.png,9,3,2,0,0 +1_359.png,8,0,2,0,0 +1_36.png,0,0,1,0,8 +1_360.png,20,0,0,0,0 +1_361.png,13,0,2,0,0 +1_362.png,9,1,5,0,0 +1_363.png,8,0,6,0,0 +1_364.png,7,7,7,0,0 +1_365.png,4,2,4,0,0 +1_366.png,12,0,1,0,0 +1_367.png,16,0,1,0,0 +1_368.png,19,1,5,0,0 +1_369.png,3,17,24,0,0 +1_37.png,0,0,2,0,11 +1_370.png,21,1,9,0,0 +1_371.png,7,17,21,0,0 +1_372.png,31,1,6,0,0 +1_373.png,4,2,4,0,0 +1_374.png,0,16,7,0,0 +1_375.png,6,10,5,0,0 +1_376.png,2,22,8,0,0 +1_377.png,13,2,1,0,0 +1_378.png,9,0,2,0,0 +1_379.png,27,2,0,0,0 +1_38.png,0,3,2,0,18 +1_380.png,8,0,1,0,0 +1_381.png,17,1,0,0,0 +1_382.png,0,3,12,0,0 +1_383.png,0,0,5,0,0 +1_384.png,0,11,8,0,0 +1_385.png,14,0,1,0,0 +1_386.png,0,12,6,0,0 +1_387.png,14,0,0,0,0 +1_388.png,7,6,3,0,0 +1_389.png,6,0,3,0,0 +1_39.png,0,1,1,0,10 +1_390.png,0,2,2,0,0 +1_391.png,12,1,1,0,0 +1_392.png,2,1,2,0,0 +1_393.png,11,0,3,0,0 +1_394.png,14,1,2,0,0 +1_395.png,11,1,3,0,0 +1_396.png,3,6,2,0,0 +1_397.png,10,5,4,0,0 +1_398.png,5,1,5,0,0 +1_399.png,14,0,2,0,0 +1_4.png,5,0,2,0,0 +1_40.png,0,0,2,0,18 +1_400.png,9,2,16,0,0 +1_401.png,21,4,2,0,0 +1_402.png,17,0,6,0,0 +1_403.png,20,2,4,0,0 +1_404.png,16,0,4,0,0 +1_405.png,8,1,6,0,0 +1_406.png,12,1,4,0,0 +1_407.png,12,0,1,0,0 +1_408.png,20,1,4,0,0 +1_409.png,7,7,7,0,0 +1_41.png,1,6,14,0,0 +1_410.png,14,3,5,0,0 +1_411.png,7,1,2,0,0 +1_412.png,13,0,3,0,0 +1_413.png,18,3,4,0,0 +1_414.png,21,2,1,0,0 +1_415.png,0,1,2,0,0 +1_416.png,31,1,2,0,0 +1_417.png,18,0,1,0,0 +1_418.png,0,0,8,0,0 +1_419.png,2,4,9,0,0 +1_42.png,29,0,0,0,0 +1_420.png,2,2,7,0,0 +1_421.png,3,2,14,0,0 +1_422.png,0,1,10,0,0 +1_423.png,4,3,8,0,0 +1_424.png,0,0,6,0,0 +1_425.png,6,1,13,0,0 +1_426.png,4,0,12,0,0 +1_427.png,1,1,11,0,0 +1_428.png,15,0,2,0,0 +1_429.png,1,3,11,0,0 +1_43.png,0,1,7,0,9 +1_430.png,3,0,11,0,0 +1_431.png,1,7,16,0,0 +1_432.png,3,4,4,0,0 +1_433.png,3,0,6,0,0 +1_434.png,6,1,4,0,0 +1_435.png,0,4,5,0,0 +1_436.png,0,2,5,0,0 +1_437.png,0,2,5,0,0 +1_438.png,2,2,4,0,0 +1_439.png,0,1,7,0,0 +1_44.png,0,0,2,0,35 +1_440.png,11,3,3,0,0 +1_441.png,11,4,1,0,0 +1_442.png,6,3,3,0,0 +1_443.png,6,0,3,0,0 +1_444.png,5,1,4,0,0 +1_445.png,0,2,7,0,0 +1_446.png,2,2,4,0,0 +1_447.png,3,20,6,0,0 +1_448.png,6,2,2,0,0 +1_449.png,11,2,5,0,0 +1_45.png,0,0,5,0,30 +1_450.png,2,1,5,0,0 +1_451.png,2,0,7,0,0 +1_452.png,0,2,13,0,0 +1_453.png,10,0,8,0,0 +1_454.png,18,2,0,0,0 +1_455.png,4,2,5,0,0 +1_456.png,4,2,9,0,0 +1_457.png,2,3,7,0,0 +1_458.png,14,0,7,0,0 +1_459.png,14,0,2,0,0 +1_46.png,0,1,4,0,17 +1_460.png,10,2,8,0,0 +1_461.png,24,1,1,0,0 +1_462.png,7,2,6,0,0 +1_463.png,9,1,4,0,0 +1_464.png,7,4,5,0,0 +1_465.png,16,0,2,0,0 +1_466.png,5,1,5,0,0 +1_467.png,4,2,8,0,0 +1_468.png,0,10,7,0,0 +1_469.png,3,3,4,0,0 +1_47.png,0,1,6,0,15 +1_470.png,10,0,1,0,0 +1_471.png,1,4,3,0,0 +1_472.png,3,2,0,0,0 +1_473.png,3,0,4,0,0 +1_474.png,5,0,0,0,0 +1_475.png,7,0,0,0,0 +1_476.png,36,0,0,0,0 +1_477.png,18,3,6,0,0 +1_478.png,11,1,8,0,0 +1_479.png,13,0,10,0,0 +1_48.png,0,1,4,0,29 +1_480.png,16,1,8,0,0 +1_481.png,15,0,10,0,0 +1_482.png,5,0,10,0,0 +1_483.png,22,0,2,0,0 +1_484.png,22,3,3,0,0 +1_485.png,19,0,1,0,0 +1_486.png,0,11,8,0,0 +1_487.png,6,3,8,0,0 +1_488.png,8,5,3,0,0 +1_489.png,23,5,0,0,0 +1_49.png,0,1,6,0,24 +1_490.png,28,2,0,0,0 +1_491.png,29,0,0,0,0 +1_492.png,31,0,0,0,0 +1_493.png,29,0,0,0,0 +1_494.png,0,4,18,0,0 +1_495.png,14,5,1,0,0 +1_496.png,28,0,2,0,0 +1_497.png,0,11,9,0,0 +1_498.png,0,24,12,0,0 +1_499.png,0,24,6,0,0 +1_5.png,4,0,2,0,0 +1_50.png,0,2,8,0,10 +1_500.png,11,4,10,0,0 +1_501.png,21,3,4,0,0 +1_502.png,30,2,1,0,0 +1_503.png,28,3,4,0,0 +1_504.png,24,1,7,0,0 +1_505.png,11,5,8,0,0 +1_506.png,19,2,4,0,0 +1_507.png,21,6,8,0,0 +1_508.png,1,12,8,0,0 +1_509.png,14,2,5,0,0 +1_51.png,0,0,13,0,4 +1_510.png,16,5,4,0,0 +1_511.png,11,2,13,0,0 +1_512.png,19,0,0,0,0 +1_513.png,11,11,6,0,0 +1_514.png,16,6,3,0,0 +1_515.png,2,2,6,0,0 +1_516.png,0,7,9,0,0 +1_517.png,1,2,5,0,0 +1_518.png,2,2,8,0,0 +1_519.png,2,2,8,0,0 +1_52.png,0,2,2,0,30 +1_520.png,1,2,4,0,0 +1_521.png,0,13,6,0,0 +1_522.png,0,2,8,0,0 +1_523.png,0,2,4,0,14 +1_524.png,0,0,0,0,25 +1_525.png,0,2,0,0,37 +1_526.png,0,2,7,0,5 +1_527.png,0,0,4,0,8 +1_528.png,0,1,1,0,24 +1_529.png,0,3,5,0,2 +1_53.png,0,0,4,0,22 +1_530.png,0,4,6,0,2 +1_531.png,0,0,0,0,26 +1_532.png,0,2,2,0,24 +1_533.png,14,0,0,0,0 +1_534.png,22,0,0,0,0 +1_535.png,17,0,2,0,0 +1_536.png,19,1,0,0,0 +1_537.png,14,1,0,0,0 +1_538.png,0,2,2,0,0 +1_539.png,10,0,0,0,0 +1_54.png,0,1,0,0,29 +1_540.png,20,0,3,0,0 +1_541.png,33,1,0,0,0 +1_542.png,0,0,2,0,0 +1_543.png,44,0,0,0,0 +1_544.png,0,0,0,0,0 +1_545.png,0,0,2,0,0 +1_546.png,11,0,0,0,0 +1_547.png,29,0,0,0,0 +1_548.png,0,0,1,0,0 +1_549.png,13,0,0,0,0 +1_55.png,0,0,2,0,22 +1_550.png,1,3,4,0,0 +1_551.png,89,0,0,0,0 +1_552.png,25,1,11,0,0 +1_553.png,26,0,4,0,0 +1_554.png,38,0,3,0,0 +1_555.png,47,0,4,0,0 +1_556.png,2,0,3,0,0 +1_557.png,31,0,0,0,0 +1_558.png,25,0,0,0,0 +1_559.png,69,1,0,0,0 +1_56.png,0,0,0,0,23 +1_560.png,12,0,3,0,0 +1_561.png,9,0,6,0,0 +1_562.png,0,0,6,0,0 +1_563.png,14,0,3,0,0 +1_564.png,12,0,1,0,0 +1_565.png,10,3,2,0,0 +1_566.png,0,6,8,0,0 +1_567.png,1,10,9,0,0 +1_568.png,5,10,4,0,0 +1_569.png,0,0,6,0,36 +1_57.png,0,0,10,0,1 +1_570.png,0,3,0,0,41 +1_571.png,0,3,2,0,56 +1_572.png,0,3,3,0,34 +1_573.png,0,1,9,0,6 +1_574.png,0,1,7,0,47 +1_575.png,3,7,5,0,0 +1_576.png,2,4,6,0,0 +1_577.png,2,2,2,0,0 +1_578.png,3,2,6,0,0 +1_579.png,0,1,3,0,0 +1_58.png,0,0,3,0,31 +1_580.png,1,1,0,0,0 +1_581.png,11,6,2,0,0 +1_582.png,1,1,4,0,0 +1_583.png,20,4,0,0,0 +1_584.png,21,5,0,0,0 +1_585.png,2,2,7,0,0 +1_586.png,14,3,0,0,0 +1_587.png,0,2,6,0,0 +1_588.png,2,15,8,0,0 +1_589.png,0,12,1,0,0 +1_59.png,0,0,7,0,43 +1_590.png,0,31,1,0,0 +1_591.png,0,30,9,0,0 +1_592.png,12,8,4,0,0 +1_593.png,0,19,12,0,0 +1_594.png,11,1,4,0,0 +1_595.png,0,29,4,0,0 +1_596.png,0,14,6,0,0 +1_597.png,0,4,5,0,0 +1_598.png,2,4,5,0,0 +1_599.png,3,5,8,0,0 +1_6.png,8,0,1,0,0 +1_60.png,5,0,5,0,0 +1_600.png,3,2,4,0,0 +1_601.png,18,1,1,0,0 +1_602.png,0,7,4,0,0 +1_603.png,7,4,8,0,0 +1_604.png,0,12,2,0,0 +1_605.png,10,2,4,0,0 +1_606.png,10,4,4,0,0 +1_607.png,8,3,7,0,0 +1_608.png,15,4,2,0,0 +1_609.png,6,3,9,0,0 +1_61.png,0,0,2,0,0 +1_610.png,1,17,9,0,0 +1_611.png,0,9,12,0,0 +1_612.png,20,0,2,0,0 +1_613.png,32,0,0,0,0 +1_614.png,25,0,0,0,0 +1_615.png,27,0,0,0,0 +1_616.png,19,0,4,0,0 +1_617.png,27,0,0,0,0 +1_618.png,0,0,2,0,35 +1_619.png,0,1,4,0,28 +1_62.png,5,0,3,0,0 +1_620.png,0,3,5,0,28 +1_621.png,0,2,1,0,47 +1_622.png,0,0,5,0,32 +1_623.png,0,0,14,0,8 +1_624.png,0,2,5,0,36 +1_625.png,0,0,4,0,55 +1_626.png,16,0,0,0,0 +1_627.png,17,0,0,0,0 +1_628.png,13,3,1,0,0 +1_629.png,13,5,0,0,0 +1_63.png,2,0,1,0,0 +1_630.png,15,7,2,0,0 +1_631.png,0,41,6,0,0 +1_632.png,5,14,9,0,0 +1_633.png,2,30,2,0,0 +1_634.png,2,9,6,0,0 +1_635.png,0,30,6,0,0 +1_636.png,0,14,13,0,0 +1_637.png,1,18,5,0,0 +1_638.png,0,30,10,0,0 +1_639.png,3,6,10,0,0 +1_64.png,10,0,1,0,0 +1_640.png,0,19,10,0,0 +1_641.png,1,13,15,0,0 +1_642.png,2,7,11,0,0 +1_643.png,0,11,10,0,0 +1_644.png,4,0,4,0,0 +1_645.png,4,0,1,0,0 +1_646.png,6,0,0,0,0 +1_647.png,5,0,3,0,0 +1_648.png,2,0,4,0,0 +1_649.png,7,0,3,0,0 +1_65.png,3,0,9,0,0 +1_650.png,7,1,1,0,0 +1_651.png,9,0,1,0,0 +1_652.png,7,0,2,0,0 +1_653.png,14,0,0,0,0 +1_654.png,17,1,0,0,0 +1_655.png,9,1,0,0,0 +1_656.png,14,0,2,0,0 +1_657.png,10,0,2,0,0 +1_658.png,1,0,5,0,0 +1_659.png,0,0,3,0,0 +1_66.png,0,0,8,0,0 +1_660.png,0,2,1,0,0 +1_661.png,21,0,0,0,0 +1_662.png,20,0,0,0,0 +1_663.png,8,0,1,0,0 +1_664.png,19,0,0,0,0 +1_665.png,24,0,0,0,0 +1_666.png,0,0,7,0,0 +1_667.png,12,0,2,0,0 +1_668.png,11,3,4,0,0 +1_669.png,0,3,10,0,0 +1_67.png,0,0,6,0,0 +1_670.png,11,0,0,0,0 +1_671.png,14,1,1,0,0 +1_672.png,8,2,4,0,0 +1_673.png,5,0,3,0,0 +1_674.png,7,1,4,0,0 +1_675.png,11,0,0,0,0 +1_676.png,9,0,3,0,0 +1_677.png,22,1,1,0,0 +1_678.png,0,9,4,0,0 +1_679.png,0,0,0,0,0 +1_68.png,5,1,3,0,0 +1_680.png,0,0,0,0,0 +1_681.png,0,15,3,0,0 +1_682.png,0,20,6,0,0 +1_683.png,0,2,0,0,0 +1_684.png,35,1,0,0,0 +1_685.png,0,0,6,0,0 +1_686.png,0,4,10,0,0 +1_687.png,0,10,10,0,0 +1_688.png,0,2,4,0,0 +1_689.png,0,1,5,0,0 +1_69.png,2,0,8,0,0 +1_690.png,20,2,0,0,0 +1_691.png,30,2,0,0,0 +1_692.png,30,1,0,0,0 +1_693.png,18,6,0,8,0 +1_694.png,25,1,5,0,0 +1_695.png,31,0,0,0,0 +1_696.png,22,0,1,0,0 +1_697.png,40,0,1,0,0 +1_698.png,0,0,2,0,0 +1_699.png,2,2,2,0,0 +1_7.png,11,1,4,0,0 +1_70.png,1,1,6,0,9 +1_700.png,4,4,4,0,0 +1_701.png,0,4,4,1,0 +1_702.png,0,1,3,0,0 +1_703.png,33,0,2,0,0 +1_704.png,30,0,3,0,0 +1_705.png,9,0,0,10,0 +1_706.png,14,1,3,0,0 +1_707.png,16,1,0,1,0 +1_708.png,0,3,31,0,0 +1_709.png,0,0,18,0,0 +1_71.png,2,2,6,0,0 +1_710.png,0,3,18,0,0 +1_711.png,0,5,22,0,0 +1_712.png,0,5,21,0,0 +1_713.png,0,4,20,0,0 +1_714.png,0,2,8,0,0 +1_715.png,0,2,5,0,0 +1_716.png,0,41,17,0,0 +1_717.png,0,7,19,0,0 +1_718.png,0,3,19,0,0 +1_719.png,0,1,7,0,0 +1_72.png,1,2,14,0,6 +1_720.png,0,1,6,0,0 +1_721.png,0,0,1,0,0 +1_722.png,0,2,4,0,0 +1_723.png,0,0,2,0,0 +1_724.png,0,0,0,0,0 +1_725.png,0,110,3,0,0 +1_726.png,0,117,1,0,0 +1_727.png,0,94,9,0,0 +1_728.png,0,112,4,0,0 +1_729.png,0,0,1,0,0 +1_73.png,2,1,12,0,9 +1_730.png,0,0,5,0,0 +1_731.png,0,5,9,0,0 +1_732.png,0,4,5,0,0 +1_733.png,0,1,8,0,0 +1_734.png,13,0,0,0,0 +1_735.png,39,0,0,0,0 +1_736.png,6,1,5,0,0 +1_737.png,7,2,5,0,0 +1_738.png,16,0,0,0,0 +1_739.png,23,0,0,0,0 +1_74.png,0,1,22,0,12 +1_740.png,20,0,0,4,0 +1_741.png,18,0,0,0,0 +1_742.png,21,0,4,0,0 +1_743.png,22,0,1,0,0 +1_744.png,2,3,2,0,0 +1_745.png,0,72,10,0,0 +1_746.png,0,83,10,0,0 +1_747.png,0,103,3,0,0 +1_748.png,0,75,2,0,0 +1_749.png,0,0,0,0,0 +1_75.png,0,0,13,0,36 +1_750.png,0,0,0,0,0 +1_751.png,0,0,1,0,0 +1_752.png,0,0,0,0,0 +1_753.png,0,0,0,0,0 +1_754.png,0,8,4,0,0 +1_755.png,0,4,12,0,0 +1_756.png,0,4,3,0,0 +1_757.png,0,11,9,0,0 +1_758.png,0,18,7,0,0 +1_759.png,0,2,8,0,0 +1_76.png,0,1,9,0,28 +1_760.png,0,4,21,0,0 +1_761.png,0,5,7,0,0 +1_762.png,2,13,15,0,0 +1_763.png,0,100,1,0,0 +1_764.png,0,87,3,0,0 +1_765.png,0,9,25,0,0 +1_766.png,0,4,7,0,0 +1_767.png,25,0,0,0,0 +1_768.png,15,2,1,0,0 +1_769.png,12,0,1,0,0 +1_77.png,0,0,6,0,28 +1_770.png,13,8,19,0,0 +1_771.png,22,0,0,0,0 +1_772.png,32,2,4,0,0 +1_773.png,19,0,0,4,0 +1_774.png,26,0,6,0,0 +1_775.png,21,0,0,0,0 +1_776.png,22,7,4,0,0 +1_777.png,19,7,5,0,0 +1_778.png,24,0,6,0,0 +1_779.png,15,1,15,0,0 +1_78.png,0,0,11,0,38 +1_780.png,0,15,16,0,0 +1_781.png,0,19,12,0,0 +1_782.png,9,9,3,0,0 +1_783.png,21,0,2,0,0 +1_784.png,9,2,3,0,0 +1_785.png,8,1,12,0,2 +1_786.png,2,3,19,0,0 +1_787.png,1,1,6,0,26 +1_788.png,22,7,9,0,0 +1_789.png,24,2,5,0,0 +1_79.png,0,0,4,0,37 +1_790.png,7,0,11,0,0 +1_791.png,17,0,3,0,0 +1_792.png,24,0,3,0,0 +1_793.png,13,2,6,0,0 +1_794.png,34,0,0,0,0 +1_795.png,25,0,9,0,0 +1_796.png,25,1,4,0,0 +1_797.png,28,0,0,0,0 +1_798.png,19,0,5,0,0 +1_799.png,23,0,2,0,0 +1_8.png,13,0,3,0,0 +1_80.png,0,0,3,0,42 +1_800.png,18,0,1,0,0 +1_801.png,27,0,1,0,0 +1_802.png,24,0,2,0,0 +1_803.png,25,0,0,0,0 +1_804.png,24,0,2,0,0 +1_805.png,19,0,0,0,0 +1_806.png,20,0,3,0,0 +1_807.png,23,0,4,0,0 +1_808.png,26,0,5,0,0 +1_809.png,15,0,6,0,0 +1_81.png,0,0,6,0,1 +1_810.png,16,0,7,0,0 +1_811.png,13,1,3,0,0 +1_812.png,22,0,10,0,0 +1_813.png,17,0,4,0,0 +1_814.png,23,0,5,0,0 +1_815.png,19,0,0,0,0 +1_816.png,22,0,1,0,0 +1_817.png,21,0,0,0,0 +1_818.png,22,0,0,0,0 +1_819.png,17,0,2,0,0 +1_82.png,0,0,3,0,31 +1_820.png,16,1,2,0,0 +1_821.png,2,0,12,0,2 +1_822.png,5,0,5,0,22 +1_823.png,1,0,8,0,5 +1_824.png,0,11,2,0,0 +1_825.png,0,8,7,0,0 +1_826.png,3,29,7,0,1 +1_827.png,16,0,13,0,5 +1_828.png,19,0,0,0,0 +1_829.png,17,1,7,0,0 +1_83.png,0,0,2,0,32 +1_830.png,31,0,7,0,0 +1_831.png,17,0,10,0,0 +1_832.png,63,21,1,0,0 +1_833.png,52,13,3,0,0 +1_834.png,44,36,7,0,0 +1_835.png,48,17,2,0,0 +1_836.png,103,0,0,0,0 +1_837.png,97,0,0,0,0 +1_838.png,116,0,0,0,0 +1_839.png,85,0,0,0,0 +1_84.png,0,0,6,0,14 +1_840.png,125,0,0,0,0 +1_841.png,85,0,0,0,0 +1_842.png,100,0,1,0,0 +1_843.png,106,4,0,0,0 +1_844.png,11,63,74,0,0 +1_845.png,0,113,0,0,0 +1_846.png,0,105,5,0,0 +1_847.png,0,96,4,0,0 +1_848.png,0,85,7,0,0 +1_849.png,28,1,1,2,0 +1_85.png,2,1,8,0,0 +1_850.png,24,1,7,0,0 +1_851.png,27,1,2,0,0 +1_852.png,17,0,6,0,0 +1_853.png,26,0,1,1,0 +1_854.png,22,2,5,0,0 +1_855.png,0,44,19,0,0 +1_856.png,0,76,2,0,0 +1_857.png,0,38,14,0,0 +1_858.png,0,84,5,0,0 +1_859.png,0,94,4,0,0 +1_86.png,4,1,9,0,0 +1_860.png,0,0,14,0,0 +1_861.png,17,0,0,0,0 +1_862.png,19,0,1,0,0 +1_863.png,34,1,0,0,0 +1_864.png,10,1,13,0,0 +1_865.png,52,0,0,0,0 +1_866.png,48,0,1,0,0 +1_867.png,57,1,1,0,0 +1_868.png,51,0,0,0,0 +1_869.png,39,0,0,0,0 +1_87.png,10,1,0,0,0 +1_870.png,46,1,0,0,0 +1_871.png,46,0,0,0,0 +1_872.png,49,0,0,0,0 +1_873.png,48,0,0,0,0 +1_874.png,35,1,0,0,0 +1_875.png,41,1,0,0,0 +1_876.png,38,0,0,0,0 +1_877.png,40,0,0,0,0 +1_878.png,0,0,4,0,0 +1_879.png,0,0,5,0,0 +1_88.png,13,1,1,0,0 +1_880.png,0,0,2,0,0 +1_881.png,0,0,4,0,0 +1_882.png,0,0,6,0,0 +1_883.png,0,0,4,0,0 +1_884.png,0,0,9,0,0 +1_885.png,0,2,3,0,0 +1_886.png,0,0,7,0,0 +1_887.png,0,0,5,0,0 +1_888.png,0,0,5,0,0 +1_889.png,0,0,12,0,0 +1_89.png,15,2,1,0,0 +1_890.png,0,0,3,0,0 +1_891.png,0,0,4,0,0 +1_892.png,0,0,14,0,0 +1_893.png,9,46,2,0,0 +1_894.png,25,36,3,0,0 +1_895.png,31,16,12,22,0 +1_896.png,9,0,28,36,0 +1_897.png,17,0,7,32,0 +1_898.png,26,0,8,25,0 +1_899.png,18,0,27,24,0 +1_9.png,12,0,2,0,0 +1_90.png,14,0,4,0,0 +1_900.png,41,0,5,20,0 +1_901.png,25,0,4,52,0 +1_902.png,45,0,5,39,0 +1_903.png,9,0,5,75,0 +1_904.png,5,0,2,59,0 +1_905.png,18,0,3,40,0 +1_906.png,33,4,18,35,0 +1_907.png,2,0,2,66,0 +1_908.png,26,8,22,22,0 +1_909.png,44,0,7,36,0 +1_91.png,6,0,0,0,0 +1_910.png,14,21,16,32,0 +1_911.png,63,4,17,12,0 +1_912.png,9,0,10,0,0 +1_913.png,28,0,0,0,0 +1_914.png,26,0,0,0,0 +1_915.png,3,0,22,0,0 +1_916.png,31,0,0,0,0 +1_917.png,3,0,10,0,0 +1_918.png,19,0,6,0,0 +1_919.png,11,2,8,0,0 +1_92.png,0,1,0,0,0 +1_920.png,17,0,6,0,0 +1_921.png,19,1,13,0,0 +1_922.png,1,0,23,0,0 +1_923.png,1,2,21,0,0 +1_924.png,3,2,23,0,0 +1_925.png,27,0,2,0,0 +1_926.png,17,0,23,0,0 +1_927.png,19,1,23,0,0 +1_928.png,19,1,17,0,0 +1_929.png,20,1,7,0,0 +1_93.png,17,0,2,0,0 +1_930.png,32,6,17,0,0 +1_931.png,65,29,21,0,9 +1_932.png,0,225,57,0,7 +1_933.png,0,13,16,0,0 +1_934.png,18,1,3,0,0 +1_935.png,7,3,8,0,0 +1_936.png,0,9,20,0,0 +1_937.png,0,1,31,0,0 +1_938.png,18,1,2,0,0 +1_939.png,1,7,15,0,0 +1_94.png,8,0,4,0,0 +1_940.png,0,9,22,0,0 +1_941.png,0,12,14,0,0 +1_942.png,1,6,19,0,0 +1_943.png,4,3,21,0,0 +1_944.png,16,4,9,0,0 +1_945.png,0,1,14,0,0 +1_946.png,1,12,11,0,0 +1_947.png,9,0,1,0,0 +1_948.png,0,0,11,0,0 +1_949.png,16,0,4,0,0 +1_95.png,23,0,0,0,0 +1_950.png,11,0,0,0,0 +1_951.png,8,0,0,0,0 +1_952.png,16,1,0,0,0 +1_953.png,23,0,4,0,0 +1_954.png,22,0,3,0,0 +1_955.png,23,1,4,0,0 +1_956.png,19,4,3,0,0 +1_957.png,19,1,3,0,0 +1_958.png,19,5,1,0,0 +1_959.png,5,68,2,0,0 +1_96.png,17,3,4,0,0 +1_960.png,0,83,3,0,0 +1_961.png,1,73,2,0,0 +1_962.png,0,92,3,0,0 +1_963.png,0,71,2,0,0 +1_964.png,0,0,4,0,25 +1_965.png,0,0,0,0,39 +1_966.png,0,1,9,0,12 +1_967.png,0,1,2,0,33 +1_968.png,0,0,0,0,5 +1_969.png,0,5,2,0,26 +1_97.png,5,2,11,0,0 +1_970.png,0,9,39,0,5 +1_971.png,0,3,15,0,24 +1_972.png,0,2,10,0,0 +1_973.png,0,1,9,0,0 +1_974.png,0,5,7,0,0 +1_975.png,0,2,3,0,0 +1_976.png,0,0,1,0,0 +1_977.png,0,0,4,0,0 +1_978.png,0,0,7,0,0 +1_979.png,0,0,4,0,0 +1_98.png,7,2,13,0,0 +1_980.png,0,0,4,0,0 +1_981.png,2,5,11,0,0 +1_982.png,21,4,14,0,0 +1_983.png,21,5,6,0,0 +1_984.png,14,2,4,0,0 +1_985.png,23,0,8,0,0 +1_986.png,13,0,3,0,0 +1_987.png,12,4,13,0,0 +1_988.png,26,0,1,0,0 +1_989.png,28,0,5,0,0 +1_99.png,0,0,10,0,13 +1_990.png,0,0,3,0,0 +1_991.png,0,0,8,0,0 +1_992.png,0,0,10,0,0 +1_993.png,0,0,12,0,0 +1_994.png,0,0,12,0,0 +1_995.png,0,0,15,0,0 +1_996.png,0,0,26,0,0 +1_997.png,0,0,10,0,0 +1_998.png,0,0,14,0,0 +1_999.png,0,0,17,0,0 diff --git a/docs/datasets/PanNuke/fold1/types.csv b/docs/datasets/PanNuke/fold1/types.csv new file mode 100644 index 0000000000000000000000000000000000000000..6a27ee39e842ca91daf334da759333be89a47f4e --- /dev/null +++ b/docs/datasets/PanNuke/fold1/types.csv @@ -0,0 +1,2524 @@ +img,type +1_0.png,Breast +1_1.png,Breast +1_2.png,Breast +1_3.png,Breast +1_4.png,Breast +1_5.png,Breast +1_6.png,Breast +1_7.png,Breast +1_8.png,Breast +1_9.png,Breast +1_10.png,Breast +1_11.png,Breast +1_12.png,Breast +1_13.png,Breast +1_14.png,Breast +1_15.png,Breast +1_16.png,Breast +1_17.png,Breast +1_18.png,Breast +1_19.png,Breast +1_20.png,Breast +1_21.png,Breast +1_22.png,Breast +1_23.png,Breast +1_24.png,Breast +1_25.png,Breast +1_26.png,Breast +1_27.png,Breast +1_28.png,Breast +1_29.png,Breast +1_30.png,Breast +1_31.png,Breast +1_32.png,Breast +1_33.png,Breast +1_34.png,Breast +1_35.png,Breast +1_36.png,Breast +1_37.png,Breast +1_38.png,Breast +1_39.png,Breast +1_40.png,Breast +1_41.png,Breast +1_42.png,Breast +1_43.png,Breast +1_44.png,Breast +1_45.png,Breast +1_46.png,Breast +1_47.png,Breast +1_48.png,Breast +1_49.png,Breast +1_50.png,Breast +1_51.png,Breast +1_52.png,Breast +1_53.png,Breast +1_54.png,Breast +1_55.png,Breast +1_56.png,Breast +1_57.png,Breast +1_58.png,Breast +1_59.png,Breast +1_60.png,Breast +1_61.png,Breast +1_62.png,Breast +1_63.png,Breast +1_64.png,Breast +1_65.png,Breast +1_66.png,Breast +1_67.png,Breast +1_68.png,Breast +1_69.png,Breast +1_70.png,Breast +1_71.png,Breast +1_72.png,Breast +1_73.png,Breast +1_74.png,Breast +1_75.png,Breast +1_76.png,Breast +1_77.png,Breast +1_78.png,Breast +1_79.png,Breast +1_80.png,Breast +1_81.png,Breast +1_82.png,Breast +1_83.png,Breast +1_84.png,Breast +1_85.png,Breast +1_86.png,Breast +1_87.png,Breast +1_88.png,Breast +1_89.png,Breast +1_90.png,Breast +1_91.png,Breast +1_92.png,Breast +1_93.png,Breast +1_94.png,Breast +1_95.png,Breast +1_96.png,Breast +1_97.png,Breast +1_98.png,Breast +1_99.png,Breast +1_100.png,Breast +1_101.png,Breast +1_102.png,Breast +1_103.png,Breast +1_104.png,Breast +1_105.png,Breast +1_106.png,Breast +1_107.png,Breast +1_108.png,Breast +1_109.png,Breast +1_110.png,Breast +1_111.png,Breast +1_112.png,Breast +1_113.png,Breast +1_114.png,Breast +1_115.png,Breast +1_116.png,Breast +1_117.png,Breast +1_118.png,Breast +1_119.png,Breast +1_120.png,Breast +1_121.png,Breast +1_122.png,Breast +1_123.png,Breast +1_124.png,Breast +1_125.png,Breast +1_126.png,Breast +1_127.png,Breast +1_128.png,Breast +1_129.png,Breast +1_130.png,Breast +1_131.png,Breast +1_132.png,Breast +1_133.png,Breast +1_134.png,Breast +1_135.png,Breast +1_136.png,Breast +1_137.png,Breast +1_138.png,Breast +1_139.png,Breast +1_140.png,Breast +1_141.png,Breast +1_142.png,Breast +1_143.png,Breast +1_144.png,Breast +1_145.png,Breast +1_146.png,Breast +1_147.png,Breast +1_148.png,Breast +1_149.png,Breast +1_150.png,Breast +1_151.png,Breast +1_152.png,Breast +1_153.png,Breast +1_154.png,Breast +1_155.png,Breast +1_156.png,Breast +1_157.png,Breast +1_158.png,Breast +1_159.png,Breast +1_160.png,Breast +1_161.png,Breast +1_162.png,Breast +1_163.png,Breast +1_164.png,Breast +1_165.png,Breast +1_166.png,Breast +1_167.png,Breast +1_168.png,Breast +1_169.png,Breast +1_170.png,Breast +1_171.png,Breast +1_172.png,Breast +1_173.png,Breast +1_174.png,Breast +1_175.png,Breast +1_176.png,Breast +1_177.png,Breast +1_178.png,Breast +1_179.png,Breast +1_180.png,Breast +1_181.png,Breast +1_182.png,Breast +1_183.png,Breast +1_184.png,Breast +1_185.png,Breast +1_186.png,Breast +1_187.png,Breast +1_188.png,Breast +1_189.png,Breast +1_190.png,Breast +1_191.png,Breast +1_192.png,Breast +1_193.png,Breast +1_194.png,Breast +1_195.png,Breast +1_196.png,Breast +1_197.png,Breast +1_198.png,Breast +1_199.png,Breast +1_200.png,Breast +1_201.png,Breast +1_202.png,Breast +1_203.png,Breast +1_204.png,Breast +1_205.png,Breast +1_206.png,Breast +1_207.png,Breast +1_208.png,Breast +1_209.png,Breast +1_210.png,Breast +1_211.png,Breast +1_212.png,Breast +1_213.png,Breast +1_214.png,Breast +1_215.png,Breast +1_216.png,Breast +1_217.png,Breast +1_218.png,Breast +1_219.png,Breast +1_220.png,Breast +1_221.png,Breast +1_222.png,Breast +1_223.png,Breast +1_224.png,Breast +1_225.png,Breast +1_226.png,Breast +1_227.png,Breast +1_228.png,Breast +1_229.png,Breast +1_230.png,Breast +1_231.png,Breast +1_232.png,Breast +1_233.png,Breast +1_234.png,Breast +1_235.png,Breast +1_236.png,Breast +1_237.png,Breast +1_238.png,Breast +1_239.png,Breast +1_240.png,Breast +1_241.png,Breast +1_242.png,Breast +1_243.png,Breast +1_244.png,Breast +1_245.png,Breast +1_246.png,Breast +1_247.png,Breast +1_248.png,Breast +1_249.png,Breast +1_250.png,Breast +1_251.png,Breast +1_252.png,Breast +1_253.png,Breast +1_254.png,Breast +1_255.png,Breast +1_256.png,Breast +1_257.png,Breast +1_258.png,Breast +1_259.png,Breast +1_260.png,Breast +1_261.png,Breast +1_262.png,Breast +1_263.png,Breast +1_264.png,Breast +1_265.png,Breast +1_266.png,Breast +1_267.png,Breast +1_268.png,Breast +1_269.png,Breast +1_270.png,Breast +1_271.png,Breast +1_272.png,Breast +1_273.png,Breast +1_274.png,Breast +1_275.png,Breast +1_276.png,Breast +1_277.png,Breast +1_278.png,Breast +1_279.png,Breast +1_280.png,Breast +1_281.png,Breast +1_282.png,Breast +1_283.png,Breast +1_284.png,Breast +1_285.png,Breast +1_286.png,Breast +1_287.png,Breast +1_288.png,Breast +1_289.png,Breast +1_290.png,Breast +1_291.png,Breast +1_292.png,Breast +1_293.png,Breast +1_294.png,Breast +1_295.png,Breast +1_296.png,Breast +1_297.png,Breast +1_298.png,Breast +1_299.png,Breast +1_300.png,Breast +1_301.png,Breast +1_302.png,Breast +1_303.png,Breast +1_304.png,Breast +1_305.png,Breast +1_306.png,Breast +1_307.png,Breast +1_308.png,Breast +1_309.png,Breast +1_310.png,Breast +1_311.png,Breast +1_312.png,Breast +1_313.png,Breast +1_314.png,Breast +1_315.png,Breast +1_316.png,Breast +1_317.png,Breast +1_318.png,Breast +1_319.png,Breast +1_320.png,Breast +1_321.png,Breast +1_322.png,Breast +1_323.png,Breast +1_324.png,Breast +1_325.png,Breast +1_326.png,Breast +1_327.png,Breast +1_328.png,Breast +1_329.png,Breast +1_330.png,Breast +1_331.png,Breast +1_332.png,Breast +1_333.png,Breast +1_334.png,Breast +1_335.png,Breast +1_336.png,Breast +1_337.png,Breast +1_338.png,Breast +1_339.png,Breast +1_340.png,Breast +1_341.png,Breast +1_342.png,Breast +1_343.png,Breast +1_344.png,Breast +1_345.png,Breast +1_346.png,Breast +1_347.png,Breast +1_348.png,Breast +1_349.png,Breast +1_350.png,Breast +1_351.png,Breast +1_352.png,Breast +1_353.png,Breast +1_354.png,Breast +1_355.png,Breast +1_356.png,Breast +1_357.png,Breast +1_358.png,Breast +1_359.png,Breast +1_360.png,Breast +1_361.png,Breast +1_362.png,Breast +1_363.png,Breast +1_364.png,Breast +1_365.png,Breast +1_366.png,Breast +1_367.png,Breast +1_368.png,Breast +1_369.png,Breast +1_370.png,Breast +1_371.png,Breast +1_372.png,Breast +1_373.png,Breast +1_374.png,Breast +1_375.png,Breast +1_376.png,Breast +1_377.png,Breast +1_378.png,Breast +1_379.png,Breast +1_380.png,Breast +1_381.png,Breast +1_382.png,Breast +1_383.png,Breast +1_384.png,Breast +1_385.png,Breast +1_386.png,Breast +1_387.png,Breast +1_388.png,Breast +1_389.png,Breast +1_390.png,Breast +1_391.png,Breast +1_392.png,Breast +1_393.png,Breast +1_394.png,Breast +1_395.png,Breast +1_396.png,Breast +1_397.png,Breast +1_398.png,Breast +1_399.png,Breast +1_400.png,Breast +1_401.png,Breast +1_402.png,Breast +1_403.png,Breast +1_404.png,Breast +1_405.png,Breast +1_406.png,Breast +1_407.png,Breast +1_408.png,Breast +1_409.png,Breast +1_410.png,Breast +1_411.png,Breast +1_412.png,Breast +1_413.png,Breast +1_414.png,Breast +1_415.png,Breast +1_416.png,Breast +1_417.png,Breast +1_418.png,Breast +1_419.png,Breast +1_420.png,Breast +1_421.png,Breast +1_422.png,Breast +1_423.png,Breast +1_424.png,Breast +1_425.png,Breast +1_426.png,Breast +1_427.png,Breast +1_428.png,Breast +1_429.png,Breast +1_430.png,Breast +1_431.png,Breast +1_432.png,Breast +1_433.png,Breast +1_434.png,Breast +1_435.png,Breast +1_436.png,Breast +1_437.png,Breast +1_438.png,Breast +1_439.png,Breast +1_440.png,Breast +1_441.png,Breast +1_442.png,Breast +1_443.png,Breast +1_444.png,Breast +1_445.png,Breast +1_446.png,Breast +1_447.png,Breast +1_448.png,Breast +1_449.png,Breast +1_450.png,Breast +1_451.png,Breast +1_452.png,Breast +1_453.png,Breast +1_454.png,Breast +1_455.png,Breast +1_456.png,Breast +1_457.png,Breast +1_458.png,Breast +1_459.png,Breast +1_460.png,Breast +1_461.png,Breast +1_462.png,Breast +1_463.png,Breast +1_464.png,Breast +1_465.png,Breast +1_466.png,Breast +1_467.png,Breast +1_468.png,Breast +1_469.png,Breast +1_470.png,Breast +1_471.png,Breast +1_472.png,Breast +1_473.png,Breast +1_474.png,Breast +1_475.png,Breast +1_476.png,Breast +1_477.png,Breast +1_478.png,Breast +1_479.png,Breast +1_480.png,Breast +1_481.png,Breast +1_482.png,Breast +1_483.png,Breast +1_484.png,Breast +1_485.png,Breast +1_486.png,Breast +1_487.png,Breast +1_488.png,Breast +1_489.png,Breast +1_490.png,Breast +1_491.png,Breast +1_492.png,Breast +1_493.png,Breast +1_494.png,Breast +1_495.png,Breast +1_496.png,Breast +1_497.png,Breast +1_498.png,Breast +1_499.png,Breast +1_500.png,Breast +1_501.png,Breast +1_502.png,Breast +1_503.png,Breast +1_504.png,Breast +1_505.png,Breast +1_506.png,Breast +1_507.png,Breast +1_508.png,Breast +1_509.png,Breast +1_510.png,Breast +1_511.png,Breast +1_512.png,Breast +1_513.png,Breast +1_514.png,Breast +1_515.png,Breast +1_516.png,Breast +1_517.png,Breast +1_518.png,Breast +1_519.png,Breast +1_520.png,Breast +1_521.png,Breast +1_522.png,Breast +1_523.png,Breast +1_524.png,Breast +1_525.png,Breast +1_526.png,Breast +1_527.png,Breast +1_528.png,Breast +1_529.png,Breast +1_530.png,Breast +1_531.png,Breast +1_532.png,Breast +1_533.png,Breast +1_534.png,Breast +1_535.png,Breast +1_536.png,Breast +1_537.png,Breast +1_538.png,Breast +1_539.png,Breast +1_540.png,Breast +1_541.png,Breast +1_542.png,Breast +1_543.png,Breast +1_544.png,Breast +1_545.png,Breast +1_546.png,Breast +1_547.png,Breast +1_548.png,Breast +1_549.png,Breast +1_550.png,Breast +1_551.png,Breast +1_552.png,Breast +1_553.png,Breast +1_554.png,Breast +1_555.png,Breast +1_556.png,Breast +1_557.png,Breast +1_558.png,Breast +1_559.png,Breast +1_560.png,Breast +1_561.png,Breast +1_562.png,Breast +1_563.png,Breast +1_564.png,Breast +1_565.png,Breast +1_566.png,Breast +1_567.png,Breast +1_568.png,Breast +1_569.png,Breast +1_570.png,Breast +1_571.png,Breast +1_572.png,Breast +1_573.png,Breast +1_574.png,Breast +1_575.png,Breast +1_576.png,Breast +1_577.png,Breast +1_578.png,Breast +1_579.png,Breast +1_580.png,Breast +1_581.png,Breast +1_582.png,Breast +1_583.png,Breast +1_584.png,Breast +1_585.png,Breast +1_586.png,Breast +1_587.png,Breast +1_588.png,Breast +1_589.png,Breast +1_590.png,Breast +1_591.png,Breast +1_592.png,Breast +1_593.png,Breast +1_594.png,Breast +1_595.png,Breast +1_596.png,Breast +1_597.png,Breast +1_598.png,Breast +1_599.png,Breast +1_600.png,Breast +1_601.png,Breast +1_602.png,Breast +1_603.png,Breast +1_604.png,Breast +1_605.png,Breast +1_606.png,Breast +1_607.png,Breast +1_608.png,Breast +1_609.png,Breast +1_610.png,Breast +1_611.png,Breast +1_612.png,Breast +1_613.png,Breast +1_614.png,Breast +1_615.png,Breast +1_616.png,Breast +1_617.png,Breast +1_618.png,Breast +1_619.png,Breast +1_620.png,Breast +1_621.png,Breast +1_622.png,Breast +1_623.png,Breast +1_624.png,Breast +1_625.png,Breast +1_626.png,Breast +1_627.png,Breast +1_628.png,Breast +1_629.png,Breast +1_630.png,Breast +1_631.png,Breast +1_632.png,Breast +1_633.png,Breast +1_634.png,Breast +1_635.png,Breast +1_636.png,Breast +1_637.png,Breast +1_638.png,Breast +1_639.png,Breast +1_640.png,Breast +1_641.png,Breast +1_642.png,Breast +1_643.png,Breast +1_644.png,Breast +1_645.png,Breast +1_646.png,Breast +1_647.png,Breast +1_648.png,Breast +1_649.png,Breast +1_650.png,Breast +1_651.png,Breast +1_652.png,Breast +1_653.png,Breast +1_654.png,Breast +1_655.png,Breast +1_656.png,Breast +1_657.png,Breast +1_658.png,Breast +1_659.png,Breast +1_660.png,Breast +1_661.png,Breast +1_662.png,Breast +1_663.png,Breast +1_664.png,Breast +1_665.png,Breast +1_666.png,Breast +1_667.png,Breast +1_668.png,Breast +1_669.png,Breast +1_670.png,Breast +1_671.png,Breast +1_672.png,Breast +1_673.png,Breast +1_674.png,Breast +1_675.png,Breast +1_676.png,Breast +1_677.png,Colon +1_678.png,Colon +1_679.png,Colon +1_680.png,Colon +1_681.png,Colon +1_682.png,Colon +1_683.png,Colon +1_684.png,Colon +1_685.png,Colon +1_686.png,Colon +1_687.png,Colon +1_688.png,Colon +1_689.png,Colon +1_690.png,Colon +1_691.png,Colon +1_692.png,Colon +1_693.png,Colon +1_694.png,Colon +1_695.png,Colon +1_696.png,Colon +1_697.png,Colon +1_698.png,Colon +1_699.png,Colon +1_700.png,Colon +1_701.png,Colon +1_702.png,Colon +1_703.png,Colon +1_704.png,Colon +1_705.png,Colon +1_706.png,Colon +1_707.png,Colon +1_708.png,Colon +1_709.png,Colon +1_710.png,Colon +1_711.png,Colon +1_712.png,Colon +1_713.png,Colon +1_714.png,Colon +1_715.png,Colon +1_716.png,Colon +1_717.png,Colon +1_718.png,Colon +1_719.png,Colon +1_720.png,Colon +1_721.png,Colon +1_722.png,Colon +1_723.png,Colon +1_724.png,Colon +1_725.png,Colon +1_726.png,Colon +1_727.png,Colon +1_728.png,Colon +1_729.png,Colon +1_730.png,Colon +1_731.png,Colon +1_732.png,Colon +1_733.png,Colon +1_734.png,Colon +1_735.png,Colon +1_736.png,Colon +1_737.png,Colon +1_738.png,Colon +1_739.png,Colon +1_740.png,Colon +1_741.png,Colon +1_742.png,Colon +1_743.png,Colon +1_744.png,Colon +1_745.png,Colon +1_746.png,Colon +1_747.png,Colon +1_748.png,Colon +1_749.png,Colon +1_750.png,Colon +1_751.png,Colon +1_752.png,Colon +1_753.png,Colon +1_754.png,Colon +1_755.png,Colon +1_756.png,Colon +1_757.png,Colon +1_758.png,Colon +1_759.png,Colon +1_760.png,Colon +1_761.png,Colon +1_762.png,Colon +1_763.png,Colon +1_764.png,Colon +1_765.png,Colon +1_766.png,Colon +1_767.png,Colon +1_768.png,Colon +1_769.png,Colon +1_770.png,Colon +1_771.png,Lung +1_772.png,Lung +1_773.png,Lung +1_774.png,Lung +1_775.png,Lung +1_776.png,Lung +1_777.png,Lung +1_778.png,Lung +1_779.png,Lung +1_780.png,Breast +1_781.png,Breast +1_782.png,Breast +1_783.png,Breast +1_784.png,Breast +1_785.png,Breast +1_786.png,Breast +1_787.png,Breast +1_788.png,Breast +1_789.png,Breast +1_790.png,Breast +1_791.png,Breast +1_792.png,Breast +1_793.png,Breast +1_794.png,Colon +1_795.png,Colon +1_796.png,Colon +1_797.png,Colon +1_798.png,Kidney +1_799.png,Kidney +1_800.png,Kidney +1_801.png,Kidney +1_802.png,Kidney +1_803.png,Kidney +1_804.png,Kidney +1_805.png,Kidney +1_806.png,Kidney +1_807.png,Kidney +1_808.png,Kidney +1_809.png,Kidney +1_810.png,Kidney +1_811.png,Kidney +1_812.png,Kidney +1_813.png,Prostate +1_814.png,Prostate +1_815.png,Bladder +1_816.png,Bladder +1_817.png,Bladder +1_818.png,Bladder +1_819.png,Bladder +1_820.png,Breast +1_821.png,Breast +1_822.png,Breast +1_823.png,Breast +1_824.png,Bladder +1_825.png,Bladder +1_826.png,Bladder +1_827.png,Bladder +1_828.png,Bladder +1_829.png,Prostate +1_830.png,Prostate +1_831.png,Prostate +1_832.png,Kidney +1_833.png,Kidney +1_834.png,Kidney +1_835.png,Kidney +1_836.png,Kidney +1_837.png,Kidney +1_838.png,Kidney +1_839.png,Kidney +1_840.png,Kidney +1_841.png,Kidney +1_842.png,Kidney +1_843.png,Kidney +1_844.png,Kidney +1_845.png,Stomach +1_846.png,Stomach +1_847.png,Stomach +1_848.png,Stomach +1_849.png,Colon +1_850.png,Colon +1_851.png,Colon +1_852.png,Colon +1_853.png,Colon +1_854.png,Colon +1_855.png,Stomach +1_856.png,Stomach +1_857.png,Stomach +1_858.png,Stomach +1_859.png,Stomach +1_860.png,Ovarian +1_861.png,Ovarian +1_862.png,Ovarian +1_863.png,Ovarian +1_864.png,Ovarian +1_865.png,Esophagus +1_866.png,Esophagus +1_867.png,Esophagus +1_868.png,Esophagus +1_869.png,Esophagus +1_870.png,Esophagus +1_871.png,Esophagus +1_872.png,Esophagus +1_873.png,Esophagus +1_874.png,Esophagus +1_875.png,Esophagus +1_876.png,Esophagus +1_877.png,Esophagus +1_878.png,Esophagus +1_879.png,Esophagus +1_880.png,Esophagus +1_881.png,Esophagus +1_882.png,Esophagus +1_883.png,Esophagus +1_884.png,Esophagus +1_885.png,Esophagus +1_886.png,Esophagus +1_887.png,Esophagus +1_888.png,Esophagus +1_889.png,Esophagus +1_890.png,Esophagus +1_891.png,Esophagus +1_892.png,Esophagus +1_893.png,Pancreatic +1_894.png,Pancreatic +1_895.png,Lung +1_896.png,Lung +1_897.png,Lung +1_898.png,Lung +1_899.png,Lung +1_900.png,Lung +1_901.png,Lung +1_902.png,Lung +1_903.png,Lung +1_904.png,Lung +1_905.png,Lung +1_906.png,Lung +1_907.png,Lung +1_908.png,Lung +1_909.png,Lung +1_910.png,Lung +1_911.png,Lung +1_912.png,Lung +1_913.png,Lung +1_914.png,Lung +1_915.png,Lung +1_916.png,Lung +1_917.png,Lung +1_918.png,Lung +1_919.png,Lung +1_920.png,Lung +1_921.png,Lung +1_922.png,Lung +1_923.png,Lung +1_924.png,Lung +1_925.png,Lung +1_926.png,Lung +1_927.png,Lung +1_928.png,Lung +1_929.png,Lung +1_930.png,Uterus +1_931.png,Uterus +1_932.png,Uterus +1_933.png,Thyroid +1_934.png,Thyroid +1_935.png,Thyroid +1_936.png,Thyroid +1_937.png,Thyroid +1_938.png,Thyroid +1_939.png,Thyroid +1_940.png,Thyroid +1_941.png,Thyroid +1_942.png,Thyroid +1_943.png,Thyroid +1_944.png,Thyroid +1_945.png,Thyroid +1_946.png,Thyroid +1_947.png,Thyroid +1_948.png,Thyroid +1_949.png,Thyroid +1_950.png,Thyroid +1_951.png,Thyroid +1_952.png,Skin +1_953.png,Skin +1_954.png,Skin +1_955.png,Skin +1_956.png,Skin +1_957.png,Skin +1_958.png,Skin +1_959.png,Skin +1_960.png,Skin +1_961.png,Skin +1_962.png,Skin +1_963.png,Skin +1_964.png,Skin +1_965.png,Skin +1_966.png,Skin +1_967.png,Skin +1_968.png,Cervix +1_969.png,Cervix +1_970.png,Cervix +1_971.png,Cervix +1_972.png,Thyroid +1_973.png,Thyroid +1_974.png,Thyroid +1_975.png,Thyroid +1_976.png,Thyroid +1_977.png,Thyroid +1_978.png,Thyroid +1_979.png,Thyroid +1_980.png,Thyroid +1_981.png,Thyroid +1_982.png,Thyroid +1_983.png,Thyroid +1_984.png,Thyroid +1_985.png,Thyroid +1_986.png,Thyroid +1_987.png,Thyroid +1_988.png,Thyroid +1_989.png,Thyroid +1_990.png,Esophagus +1_991.png,Esophagus +1_992.png,Esophagus +1_993.png,Esophagus +1_994.png,Esophagus +1_995.png,Esophagus +1_996.png,Esophagus +1_997.png,Esophagus +1_998.png,Esophagus +1_999.png,Esophagus +1_1000.png,Esophagus +1_1001.png,Esophagus +1_1002.png,Esophagus +1_1003.png,Esophagus +1_1004.png,Esophagus +1_1005.png,Esophagus +1_1006.png,Esophagus +1_1007.png,Esophagus +1_1008.png,Esophagus +1_1009.png,Esophagus +1_1010.png,Esophagus +1_1011.png,Esophagus +1_1012.png,Esophagus +1_1013.png,Esophagus +1_1014.png,Esophagus +1_1015.png,Esophagus +1_1016.png,Esophagus +1_1017.png,Esophagus +1_1018.png,Esophagus +1_1019.png,Esophagus +1_1020.png,Esophagus +1_1021.png,Esophagus +1_1022.png,Esophagus +1_1023.png,Cervix +1_1024.png,Cervix +1_1025.png,Cervix +1_1026.png,Cervix +1_1027.png,Cervix +1_1028.png,Cervix +1_1029.png,Cervix +1_1030.png,Cervix +1_1031.png,Adrenal_gland +1_1032.png,Adrenal_gland +1_1033.png,Adrenal_gland +1_1034.png,Adrenal_gland +1_1035.png,Adrenal_gland +1_1036.png,Adrenal_gland +1_1037.png,Adrenal_gland +1_1038.png,Adrenal_gland +1_1039.png,Adrenal_gland +1_1040.png,Adrenal_gland +1_1041.png,Adrenal_gland +1_1042.png,Adrenal_gland +1_1043.png,Adrenal_gland +1_1044.png,Adrenal_gland +1_1045.png,Adrenal_gland +1_1046.png,Adrenal_gland +1_1047.png,Adrenal_gland +1_1048.png,Adrenal_gland +1_1049.png,Adrenal_gland +1_1050.png,Adrenal_gland +1_1051.png,Adrenal_gland +1_1052.png,Adrenal_gland +1_1053.png,Adrenal_gland +1_1054.png,Adrenal_gland +1_1055.png,Adrenal_gland +1_1056.png,Adrenal_gland +1_1057.png,Adrenal_gland +1_1058.png,Adrenal_gland +1_1059.png,Adrenal_gland +1_1060.png,Adrenal_gland +1_1061.png,Adrenal_gland +1_1062.png,Adrenal_gland +1_1063.png,Adrenal_gland +1_1064.png,Adrenal_gland +1_1065.png,Adrenal_gland +1_1066.png,Adrenal_gland +1_1067.png,Adrenal_gland +1_1068.png,Adrenal_gland +1_1069.png,Adrenal_gland +1_1070.png,Adrenal_gland +1_1071.png,Adrenal_gland +1_1072.png,Adrenal_gland +1_1073.png,Adrenal_gland +1_1074.png,Adrenal_gland +1_1075.png,Esophagus +1_1076.png,Esophagus +1_1077.png,Esophagus +1_1078.png,Esophagus +1_1079.png,Esophagus +1_1080.png,Esophagus +1_1081.png,Esophagus +1_1082.png,Esophagus +1_1083.png,Esophagus +1_1084.png,Esophagus +1_1085.png,Esophagus +1_1086.png,Esophagus +1_1087.png,Esophagus +1_1088.png,Esophagus +1_1089.png,Esophagus +1_1090.png,Esophagus +1_1091.png,Esophagus +1_1092.png,Esophagus +1_1093.png,Esophagus +1_1094.png,Esophagus +1_1095.png,Esophagus +1_1096.png,Esophagus +1_1097.png,Esophagus +1_1098.png,Esophagus +1_1099.png,Esophagus +1_1100.png,Esophagus +1_1101.png,Esophagus +1_1102.png,Esophagus +1_1103.png,Esophagus +1_1104.png,Esophagus +1_1105.png,Esophagus +1_1106.png,Esophagus +1_1107.png,Esophagus +1_1108.png,Adrenal_gland +1_1109.png,Adrenal_gland +1_1110.png,Adrenal_gland +1_1111.png,Adrenal_gland +1_1112.png,Adrenal_gland +1_1113.png,Adrenal_gland +1_1114.png,Adrenal_gland +1_1115.png,Adrenal_gland +1_1116.png,Adrenal_gland +1_1117.png,Adrenal_gland +1_1118.png,Adrenal_gland +1_1119.png,Adrenal_gland +1_1120.png,Adrenal_gland +1_1121.png,Adrenal_gland +1_1122.png,Adrenal_gland +1_1123.png,Adrenal_gland +1_1124.png,Pancreatic +1_1125.png,Pancreatic +1_1126.png,Pancreatic +1_1127.png,Pancreatic +1_1128.png,Pancreatic +1_1129.png,Pancreatic +1_1130.png,Pancreatic +1_1131.png,Pancreatic +1_1132.png,Pancreatic +1_1133.png,Pancreatic +1_1134.png,Pancreatic +1_1135.png,Pancreatic +1_1136.png,Pancreatic +1_1137.png,Pancreatic +1_1138.png,Pancreatic +1_1139.png,Pancreatic +1_1140.png,Pancreatic +1_1141.png,Pancreatic +1_1142.png,Pancreatic +1_1143.png,Pancreatic +1_1144.png,Pancreatic +1_1145.png,Pancreatic +1_1146.png,Pancreatic +1_1147.png,Pancreatic +1_1148.png,Pancreatic +1_1149.png,Pancreatic +1_1150.png,Adrenal_gland +1_1151.png,Adrenal_gland +1_1152.png,Adrenal_gland +1_1153.png,Adrenal_gland +1_1154.png,Adrenal_gland +1_1155.png,Adrenal_gland +1_1156.png,Adrenal_gland +1_1157.png,Adrenal_gland +1_1158.png,Adrenal_gland +1_1159.png,Adrenal_gland +1_1160.png,Adrenal_gland +1_1161.png,Adrenal_gland +1_1162.png,Adrenal_gland +1_1163.png,Adrenal_gland +1_1164.png,Adrenal_gland +1_1165.png,Adrenal_gland +1_1166.png,Adrenal_gland +1_1167.png,Adrenal_gland +1_1168.png,Adrenal_gland +1_1169.png,Adrenal_gland +1_1170.png,Adrenal_gland +1_1171.png,Cervix +1_1172.png,Cervix +1_1173.png,Cervix +1_1174.png,Cervix +1_1175.png,Cervix +1_1176.png,Cervix +1_1177.png,Cervix +1_1178.png,Cervix +1_1179.png,Cervix +1_1180.png,Cervix +1_1181.png,Cervix +1_1182.png,Cervix +1_1183.png,Cervix +1_1184.png,Cervix +1_1185.png,Cervix +1_1186.png,Cervix +1_1187.png,Cervix +1_1188.png,Cervix +1_1189.png,Cervix +1_1190.png,Cervix +1_1191.png,Cervix +1_1192.png,Cervix +1_1193.png,Cervix +1_1194.png,Bile-duct +1_1195.png,Bile-duct +1_1196.png,Bile-duct +1_1197.png,Bile-duct +1_1198.png,Bile-duct +1_1199.png,Bile-duct +1_1200.png,Bile-duct +1_1201.png,Bile-duct +1_1202.png,Bile-duct +1_1203.png,Bile-duct +1_1204.png,Bile-duct +1_1205.png,Bile-duct +1_1206.png,Bile-duct +1_1207.png,Bile-duct +1_1208.png,Bile-duct +1_1209.png,Bile-duct +1_1210.png,Bile-duct +1_1211.png,Bile-duct +1_1212.png,Bile-duct +1_1213.png,Testis +1_1214.png,Testis +1_1215.png,Testis +1_1216.png,Testis +1_1217.png,Testis +1_1218.png,Testis +1_1219.png,Testis +1_1220.png,Testis +1_1221.png,Testis +1_1222.png,Testis +1_1223.png,Testis +1_1224.png,Testis +1_1225.png,Testis +1_1226.png,Testis +1_1227.png,Testis +1_1228.png,Testis +1_1229.png,Testis +1_1230.png,Testis +1_1231.png,Testis +1_1232.png,Testis +1_1233.png,Testis +1_1234.png,Testis +1_1235.png,Testis +1_1236.png,Testis +1_1237.png,Testis +1_1238.png,Testis +1_1239.png,Testis +1_1240.png,Testis +1_1241.png,Testis +1_1242.png,Testis +1_1243.png,Testis +1_1244.png,Testis +1_1245.png,Testis +1_1246.png,Testis +1_1247.png,Testis +1_1248.png,Testis +1_1249.png,Testis +1_1250.png,Testis +1_1251.png,Testis +1_1252.png,Testis +1_1253.png,Testis +1_1254.png,Testis +1_1255.png,Testis +1_1256.png,Testis +1_1257.png,Testis +1_1258.png,Testis +1_1259.png,Testis +1_1260.png,Testis +1_1261.png,Testis +1_1262.png,Testis +1_1263.png,Testis +1_1264.png,Testis +1_1265.png,Bile-duct +1_1266.png,Bile-duct +1_1267.png,Bile-duct +1_1268.png,Bile-duct +1_1269.png,Bile-duct +1_1270.png,Bile-duct +1_1271.png,Bile-duct +1_1272.png,Bile-duct +1_1273.png,Bile-duct +1_1274.png,Bile-duct +1_1275.png,Bile-duct +1_1276.png,Bile-duct +1_1277.png,Bile-duct +1_1278.png,Bile-duct +1_1279.png,Bile-duct +1_1280.png,Bile-duct +1_1281.png,Bile-duct +1_1282.png,Bile-duct +1_1283.png,Bile-duct +1_1284.png,Bile-duct +1_1285.png,Bile-duct +1_1286.png,Bile-duct +1_1287.png,Bile-duct +1_1288.png,Bile-duct +1_1289.png,Bile-duct +1_1290.png,Bile-duct +1_1291.png,Bile-duct +1_1292.png,Bile-duct +1_1293.png,Bile-duct +1_1294.png,Bile-duct +1_1295.png,Colon +1_1296.png,Colon +1_1297.png,Colon +1_1298.png,Colon +1_1299.png,Colon +1_1300.png,Colon +1_1301.png,Colon +1_1302.png,Colon +1_1303.png,Colon +1_1304.png,Colon +1_1305.png,Colon +1_1306.png,Colon +1_1307.png,Colon +1_1308.png,Colon +1_1309.png,Colon +1_1310.png,Colon +1_1311.png,Colon +1_1312.png,Colon +1_1313.png,Colon +1_1314.png,Colon +1_1315.png,Colon +1_1316.png,Colon +1_1317.png,Colon +1_1318.png,Colon +1_1319.png,Colon +1_1320.png,Colon +1_1321.png,Colon +1_1322.png,Colon +1_1323.png,Colon +1_1324.png,Colon +1_1325.png,Colon +1_1326.png,Colon +1_1327.png,Colon +1_1328.png,Colon +1_1329.png,Colon +1_1330.png,Colon +1_1331.png,Colon +1_1332.png,Adrenal_gland +1_1333.png,Adrenal_gland +1_1334.png,Adrenal_gland +1_1335.png,Adrenal_gland +1_1336.png,Adrenal_gland +1_1337.png,Adrenal_gland +1_1338.png,Adrenal_gland +1_1339.png,Adrenal_gland +1_1340.png,Adrenal_gland +1_1341.png,Adrenal_gland +1_1342.png,Adrenal_gland +1_1343.png,Adrenal_gland +1_1344.png,Adrenal_gland +1_1345.png,Adrenal_gland +1_1346.png,Adrenal_gland +1_1347.png,Adrenal_gland +1_1348.png,Adrenal_gland +1_1349.png,Adrenal_gland +1_1350.png,Adrenal_gland +1_1351.png,Adrenal_gland +1_1352.png,Adrenal_gland +1_1353.png,Adrenal_gland +1_1354.png,Adrenal_gland +1_1355.png,Adrenal_gland +1_1356.png,Adrenal_gland +1_1357.png,Adrenal_gland +1_1358.png,Adrenal_gland +1_1359.png,Adrenal_gland +1_1360.png,Adrenal_gland +1_1361.png,Adrenal_gland +1_1362.png,Adrenal_gland +1_1363.png,Adrenal_gland +1_1364.png,Adrenal_gland +1_1365.png,Adrenal_gland +1_1366.png,Adrenal_gland +1_1367.png,Adrenal_gland +1_1368.png,Adrenal_gland +1_1369.png,Adrenal_gland +1_1370.png,Adrenal_gland +1_1371.png,Adrenal_gland +1_1372.png,Adrenal_gland +1_1373.png,Adrenal_gland +1_1374.png,Adrenal_gland +1_1375.png,Adrenal_gland +1_1376.png,Adrenal_gland +1_1377.png,Adrenal_gland +1_1378.png,Adrenal_gland +1_1379.png,Adrenal_gland +1_1380.png,Adrenal_gland +1_1381.png,Adrenal_gland +1_1382.png,Adrenal_gland +1_1383.png,Adrenal_gland +1_1384.png,Adrenal_gland +1_1385.png,Adrenal_gland +1_1386.png,Adrenal_gland +1_1387.png,Adrenal_gland +1_1388.png,Adrenal_gland +1_1389.png,Adrenal_gland +1_1390.png,Adrenal_gland +1_1391.png,Adrenal_gland +1_1392.png,Adrenal_gland +1_1393.png,Adrenal_gland +1_1394.png,Adrenal_gland +1_1395.png,Adrenal_gland +1_1396.png,Adrenal_gland +1_1397.png,Adrenal_gland +1_1398.png,Adrenal_gland +1_1399.png,Bile-duct +1_1400.png,Bile-duct +1_1401.png,Bile-duct +1_1402.png,Bile-duct +1_1403.png,Bile-duct +1_1404.png,Bile-duct +1_1405.png,Bile-duct +1_1406.png,Bile-duct +1_1407.png,Bile-duct +1_1408.png,Bile-duct +1_1409.png,Bile-duct +1_1410.png,Bile-duct +1_1411.png,Bile-duct +1_1412.png,Bile-duct +1_1413.png,Bile-duct +1_1414.png,Bile-duct +1_1415.png,Bile-duct +1_1416.png,Bile-duct +1_1417.png,Bile-duct +1_1418.png,Bile-duct +1_1419.png,Bile-duct +1_1420.png,Bile-duct +1_1421.png,Bile-duct +1_1422.png,Bile-duct +1_1423.png,Bile-duct +1_1424.png,Bile-duct +1_1425.png,Bile-duct +1_1426.png,Bile-duct +1_1427.png,Bile-duct +1_1428.png,Bile-duct +1_1429.png,Bile-duct +1_1430.png,Bile-duct +1_1431.png,Bile-duct +1_1432.png,Bile-duct +1_1433.png,Bile-duct +1_1434.png,Bile-duct +1_1435.png,Bile-duct +1_1436.png,Bile-duct +1_1437.png,Bile-duct +1_1438.png,Bile-duct +1_1439.png,Bile-duct +1_1440.png,Bile-duct +1_1441.png,Bile-duct +1_1442.png,Bile-duct +1_1443.png,Bile-duct +1_1444.png,Bile-duct +1_1445.png,Bile-duct +1_1446.png,Bile-duct +1_1447.png,Bile-duct +1_1448.png,Bile-duct +1_1449.png,Bile-duct +1_1450.png,Bile-duct +1_1451.png,Bile-duct +1_1452.png,Bile-duct +1_1453.png,Bladder +1_1454.png,Bladder +1_1455.png,Bladder +1_1456.png,Bladder +1_1457.png,Bladder +1_1458.png,Bladder +1_1459.png,Bladder +1_1460.png,Bladder +1_1461.png,Bladder +1_1462.png,Bladder +1_1463.png,Bladder +1_1464.png,Bladder +1_1465.png,Bladder +1_1466.png,Bladder +1_1467.png,Bladder +1_1468.png,Bladder +1_1469.png,Bladder +1_1470.png,Bladder +1_1471.png,Bladder +1_1472.png,Bladder +1_1473.png,Bladder +1_1474.png,Bladder +1_1475.png,Bladder +1_1476.png,Bladder +1_1477.png,Bladder +1_1478.png,Bladder +1_1479.png,Bladder +1_1480.png,Bladder +1_1481.png,Bladder +1_1482.png,Bladder +1_1483.png,Bladder +1_1484.png,Bladder +1_1485.png,Bladder +1_1486.png,Bladder +1_1487.png,Bladder +1_1488.png,Bladder +1_1489.png,Bladder +1_1490.png,Bladder +1_1491.png,Bladder +1_1492.png,Bladder +1_1493.png,Bladder +1_1494.png,Bladder +1_1495.png,Bladder +1_1496.png,Bladder +1_1497.png,Bladder +1_1498.png,Bladder +1_1499.png,Bladder +1_1500.png,Bladder +1_1501.png,Bladder +1_1502.png,Bladder +1_1503.png,Bladder +1_1504.png,Breast +1_1505.png,Breast +1_1506.png,Breast +1_1507.png,Breast +1_1508.png,Breast +1_1509.png,Breast +1_1510.png,Breast +1_1511.png,Breast +1_1512.png,Breast +1_1513.png,Breast +1_1514.png,Breast +1_1515.png,Breast +1_1516.png,Breast +1_1517.png,Breast +1_1518.png,Breast +1_1519.png,Breast +1_1520.png,Breast +1_1521.png,Breast +1_1522.png,Breast +1_1523.png,Breast +1_1524.png,Breast +1_1525.png,Breast +1_1526.png,Breast +1_1527.png,Breast +1_1528.png,Breast +1_1529.png,Breast +1_1530.png,Breast +1_1531.png,Breast +1_1532.png,Breast +1_1533.png,Breast +1_1534.png,Breast +1_1535.png,Breast +1_1536.png,Breast +1_1537.png,Breast +1_1538.png,Breast +1_1539.png,Breast +1_1540.png,Breast +1_1541.png,Breast +1_1542.png,Breast +1_1543.png,Breast +1_1544.png,Breast +1_1545.png,Breast +1_1546.png,Breast +1_1547.png,Breast +1_1548.png,Breast +1_1549.png,Breast +1_1550.png,Breast +1_1551.png,Breast +1_1552.png,Breast +1_1553.png,Breast +1_1554.png,Breast +1_1555.png,Breast +1_1556.png,Breast +1_1557.png,Breast +1_1558.png,Cervix +1_1559.png,Cervix +1_1560.png,Cervix +1_1561.png,Cervix +1_1562.png,Cervix +1_1563.png,Cervix +1_1564.png,Cervix +1_1565.png,Cervix +1_1566.png,Cervix +1_1567.png,Cervix +1_1568.png,Cervix +1_1569.png,Cervix +1_1570.png,Cervix +1_1571.png,Cervix +1_1572.png,Cervix +1_1573.png,Cervix +1_1574.png,Cervix +1_1575.png,Colon +1_1576.png,Colon +1_1577.png,Colon +1_1578.png,Colon +1_1579.png,Colon +1_1580.png,Colon +1_1581.png,Colon +1_1582.png,Colon +1_1583.png,Colon +1_1584.png,Colon +1_1585.png,Colon +1_1586.png,Colon +1_1587.png,Colon +1_1588.png,Colon +1_1589.png,Colon +1_1590.png,Colon +1_1591.png,Colon +1_1592.png,Colon +1_1593.png,Colon +1_1594.png,Colon +1_1595.png,Colon +1_1596.png,Colon +1_1597.png,Colon +1_1598.png,Colon +1_1599.png,Colon +1_1600.png,Colon +1_1601.png,Colon +1_1602.png,Colon +1_1603.png,Colon +1_1604.png,Colon +1_1605.png,Colon +1_1606.png,Colon +1_1607.png,Colon +1_1608.png,Colon +1_1609.png,Colon +1_1610.png,Colon +1_1611.png,Colon +1_1612.png,Colon +1_1613.png,Colon +1_1614.png,Colon +1_1615.png,Colon +1_1616.png,Colon +1_1617.png,Colon +1_1618.png,Colon +1_1619.png,Colon +1_1620.png,Colon +1_1621.png,Colon +1_1622.png,Colon +1_1623.png,Colon +1_1624.png,Colon +1_1625.png,Colon +1_1626.png,Colon +1_1627.png,Colon +1_1628.png,Colon +1_1629.png,Colon +1_1630.png,Colon +1_1631.png,Colon +1_1632.png,Colon +1_1633.png,Colon +1_1634.png,Colon +1_1635.png,Colon +1_1636.png,Colon +1_1637.png,Colon +1_1638.png,Colon +1_1639.png,Colon +1_1640.png,Colon +1_1641.png,Colon +1_1642.png,Colon +1_1643.png,Colon +1_1644.png,Colon +1_1645.png,Colon +1_1646.png,Colon +1_1647.png,Colon +1_1648.png,Colon +1_1649.png,Colon +1_1650.png,Colon +1_1651.png,Colon +1_1652.png,Colon +1_1653.png,Colon +1_1654.png,Colon +1_1655.png,Colon +1_1656.png,Colon +1_1657.png,Colon +1_1658.png,Colon +1_1659.png,Colon +1_1660.png,Colon +1_1661.png,Colon +1_1662.png,Colon +1_1663.png,Colon +1_1664.png,Colon +1_1665.png,Colon +1_1666.png,Colon +1_1667.png,Colon +1_1668.png,Colon +1_1669.png,Colon +1_1670.png,Colon +1_1671.png,Colon +1_1672.png,Colon +1_1673.png,Colon +1_1674.png,Colon +1_1675.png,Colon +1_1676.png,Colon +1_1677.png,Colon +1_1678.png,Colon +1_1679.png,Colon +1_1680.png,Colon +1_1681.png,Colon +1_1682.png,Colon +1_1683.png,Colon +1_1684.png,Colon +1_1685.png,Colon +1_1686.png,Colon +1_1687.png,Colon +1_1688.png,Colon +1_1689.png,Colon +1_1690.png,Colon +1_1691.png,Colon +1_1692.png,Colon +1_1693.png,Colon +1_1694.png,Colon +1_1695.png,Colon +1_1696.png,Colon +1_1697.png,Colon +1_1698.png,Colon +1_1699.png,Colon +1_1700.png,Colon +1_1701.png,Colon +1_1702.png,Colon +1_1703.png,Colon +1_1704.png,Colon +1_1705.png,Colon +1_1706.png,Colon +1_1707.png,Colon +1_1708.png,Colon +1_1709.png,Colon +1_1710.png,Colon +1_1711.png,Colon +1_1712.png,Colon +1_1713.png,Colon +1_1714.png,Colon +1_1715.png,Colon +1_1716.png,Colon +1_1717.png,Colon +1_1718.png,Colon +1_1719.png,Colon +1_1720.png,Colon +1_1721.png,Colon +1_1722.png,Colon +1_1723.png,Colon +1_1724.png,Colon +1_1725.png,Colon +1_1726.png,Colon +1_1727.png,Colon +1_1728.png,Colon +1_1729.png,Colon +1_1730.png,Colon +1_1731.png,Colon +1_1732.png,Colon +1_1733.png,Colon +1_1734.png,Colon +1_1735.png,Colon +1_1736.png,Colon +1_1737.png,Colon +1_1738.png,Colon +1_1739.png,Colon +1_1740.png,Colon +1_1741.png,Colon +1_1742.png,Colon +1_1743.png,Colon +1_1744.png,Colon +1_1745.png,Colon +1_1746.png,Colon +1_1747.png,Colon +1_1748.png,Colon +1_1749.png,Colon +1_1750.png,Colon +1_1751.png,Colon +1_1752.png,Colon +1_1753.png,Colon +1_1754.png,Colon +1_1755.png,Colon +1_1756.png,Colon +1_1757.png,Colon +1_1758.png,Colon +1_1759.png,Colon +1_1760.png,Colon +1_1761.png,Colon +1_1762.png,Colon +1_1763.png,Colon +1_1764.png,Colon +1_1765.png,Colon +1_1766.png,Colon +1_1767.png,Colon +1_1768.png,Colon +1_1769.png,Colon +1_1770.png,Colon +1_1771.png,Colon +1_1772.png,Colon +1_1773.png,Colon +1_1774.png,Colon +1_1775.png,Colon +1_1776.png,Colon +1_1777.png,Colon +1_1778.png,Colon +1_1779.png,Colon +1_1780.png,Colon +1_1781.png,Colon +1_1782.png,Colon +1_1783.png,Colon +1_1784.png,Colon +1_1785.png,Colon +1_1786.png,Colon +1_1787.png,Colon +1_1788.png,Colon +1_1789.png,Colon +1_1790.png,Colon +1_1791.png,Colon +1_1792.png,Colon +1_1793.png,Colon +1_1794.png,Colon +1_1795.png,Colon +1_1796.png,Colon +1_1797.png,Colon +1_1798.png,Colon +1_1799.png,Colon +1_1800.png,Colon +1_1801.png,Colon +1_1802.png,Colon +1_1803.png,Colon +1_1804.png,Colon +1_1805.png,Colon +1_1806.png,Colon +1_1807.png,Colon +1_1808.png,Colon +1_1809.png,Colon +1_1810.png,Colon +1_1811.png,Esophagus +1_1812.png,Esophagus +1_1813.png,Esophagus +1_1814.png,Esophagus +1_1815.png,Esophagus +1_1816.png,Esophagus +1_1817.png,Esophagus +1_1818.png,Esophagus +1_1819.png,Esophagus +1_1820.png,Esophagus +1_1821.png,Esophagus +1_1822.png,Esophagus +1_1823.png,Esophagus +1_1824.png,Esophagus +1_1825.png,Esophagus +1_1826.png,Esophagus +1_1827.png,Esophagus +1_1828.png,Esophagus +1_1829.png,Esophagus +1_1830.png,Esophagus +1_1831.png,Esophagus +1_1832.png,Esophagus +1_1833.png,Esophagus +1_1834.png,Esophagus +1_1835.png,Esophagus +1_1836.png,Esophagus +1_1837.png,Esophagus +1_1838.png,Esophagus +1_1839.png,Esophagus +1_1840.png,Esophagus +1_1841.png,Esophagus +1_1842.png,Esophagus +1_1843.png,Esophagus +1_1844.png,Esophagus +1_1845.png,HeadNeck +1_1846.png,HeadNeck +1_1847.png,HeadNeck +1_1848.png,HeadNeck +1_1849.png,HeadNeck +1_1850.png,HeadNeck +1_1851.png,HeadNeck +1_1852.png,HeadNeck +1_1853.png,HeadNeck +1_1854.png,HeadNeck +1_1855.png,HeadNeck +1_1856.png,HeadNeck +1_1857.png,HeadNeck +1_1858.png,HeadNeck +1_1859.png,HeadNeck +1_1860.png,HeadNeck +1_1861.png,HeadNeck +1_1862.png,HeadNeck +1_1863.png,HeadNeck +1_1864.png,HeadNeck +1_1865.png,HeadNeck +1_1866.png,HeadNeck +1_1867.png,HeadNeck +1_1868.png,HeadNeck +1_1869.png,HeadNeck +1_1870.png,HeadNeck +1_1871.png,HeadNeck +1_1872.png,HeadNeck +1_1873.png,HeadNeck +1_1874.png,HeadNeck +1_1875.png,HeadNeck +1_1876.png,HeadNeck +1_1877.png,HeadNeck +1_1878.png,HeadNeck +1_1879.png,HeadNeck +1_1880.png,HeadNeck +1_1881.png,HeadNeck +1_1882.png,HeadNeck +1_1883.png,HeadNeck +1_1884.png,HeadNeck +1_1885.png,HeadNeck +1_1886.png,HeadNeck +1_1887.png,HeadNeck +1_1888.png,HeadNeck +1_1889.png,HeadNeck +1_1890.png,HeadNeck +1_1891.png,HeadNeck +1_1892.png,HeadNeck +1_1893.png,HeadNeck +1_1894.png,HeadNeck +1_1895.png,HeadNeck +1_1896.png,HeadNeck +1_1897.png,HeadNeck +1_1898.png,HeadNeck +1_1899.png,HeadNeck +1_1900.png,HeadNeck +1_1901.png,HeadNeck +1_1902.png,HeadNeck +1_1903.png,HeadNeck +1_1904.png,HeadNeck +1_1905.png,HeadNeck +1_1906.png,HeadNeck +1_1907.png,HeadNeck +1_1908.png,HeadNeck +1_1909.png,HeadNeck +1_1910.png,HeadNeck +1_1911.png,HeadNeck +1_1912.png,HeadNeck +1_1913.png,HeadNeck +1_1914.png,HeadNeck +1_1915.png,HeadNeck +1_1916.png,HeadNeck +1_1917.png,HeadNeck +1_1918.png,HeadNeck +1_1919.png,HeadNeck +1_1920.png,HeadNeck +1_1921.png,HeadNeck +1_1922.png,HeadNeck +1_1923.png,HeadNeck +1_1924.png,HeadNeck +1_1925.png,HeadNeck +1_1926.png,HeadNeck +1_1927.png,HeadNeck +1_1928.png,HeadNeck +1_1929.png,HeadNeck +1_1930.png,HeadNeck +1_1931.png,HeadNeck +1_1932.png,HeadNeck +1_1933.png,HeadNeck +1_1934.png,HeadNeck +1_1935.png,HeadNeck +1_1936.png,HeadNeck +1_1937.png,HeadNeck +1_1938.png,HeadNeck +1_1939.png,HeadNeck +1_1940.png,HeadNeck +1_1941.png,HeadNeck +1_1942.png,HeadNeck +1_1943.png,HeadNeck +1_1944.png,HeadNeck +1_1945.png,HeadNeck +1_1946.png,HeadNeck +1_1947.png,HeadNeck +1_1948.png,HeadNeck +1_1949.png,HeadNeck +1_1950.png,HeadNeck +1_1951.png,HeadNeck +1_1952.png,HeadNeck +1_1953.png,HeadNeck +1_1954.png,HeadNeck +1_1955.png,HeadNeck +1_1956.png,HeadNeck +1_1957.png,HeadNeck +1_1958.png,HeadNeck +1_1959.png,HeadNeck +1_1960.png,HeadNeck +1_1961.png,HeadNeck +1_1962.png,HeadNeck +1_1963.png,HeadNeck +1_1964.png,HeadNeck +1_1965.png,HeadNeck +1_1966.png,HeadNeck +1_1967.png,HeadNeck +1_1968.png,HeadNeck +1_1969.png,HeadNeck +1_1970.png,HeadNeck +1_1971.png,HeadNeck +1_1972.png,HeadNeck +1_1973.png,HeadNeck +1_1974.png,HeadNeck +1_1975.png,HeadNeck +1_1976.png,HeadNeck +1_1977.png,HeadNeck +1_1978.png,HeadNeck +1_1979.png,HeadNeck +1_1980.png,HeadNeck +1_1981.png,HeadNeck +1_1982.png,HeadNeck +1_1983.png,HeadNeck +1_1984.png,HeadNeck +1_1985.png,HeadNeck +1_1986.png,HeadNeck +1_1987.png,HeadNeck +1_1988.png,HeadNeck +1_1989.png,HeadNeck +1_1990.png,HeadNeck +1_1991.png,HeadNeck +1_1992.png,HeadNeck +1_1993.png,HeadNeck +1_1994.png,HeadNeck +1_1995.png,HeadNeck +1_1996.png,HeadNeck +1_1997.png,HeadNeck +1_1998.png,HeadNeck +1_1999.png,HeadNeck +1_2000.png,HeadNeck +1_2001.png,HeadNeck +1_2002.png,HeadNeck +1_2003.png,HeadNeck +1_2004.png,HeadNeck +1_2005.png,HeadNeck +1_2006.png,HeadNeck +1_2007.png,HeadNeck +1_2008.png,HeadNeck +1_2009.png,HeadNeck +1_2010.png,HeadNeck +1_2011.png,HeadNeck +1_2012.png,HeadNeck +1_2013.png,HeadNeck +1_2014.png,Kidney +1_2015.png,Kidney +1_2016.png,Kidney +1_2017.png,Kidney +1_2018.png,Kidney +1_2019.png,Kidney +1_2020.png,Kidney +1_2021.png,Kidney +1_2022.png,Kidney +1_2023.png,Kidney +1_2024.png,Kidney +1_2025.png,Kidney +1_2026.png,Kidney +1_2027.png,Kidney +1_2028.png,Kidney +1_2029.png,Kidney +1_2030.png,Kidney +1_2031.png,Kidney +1_2032.png,Kidney +1_2033.png,Kidney +1_2034.png,Kidney +1_2035.png,Kidney +1_2036.png,Kidney +1_2037.png,Kidney +1_2038.png,Kidney +1_2039.png,Kidney +1_2040.png,Kidney +1_2041.png,Kidney +1_2042.png,Liver +1_2043.png,Liver +1_2044.png,Liver +1_2045.png,Liver +1_2046.png,Liver +1_2047.png,Liver +1_2048.png,Liver +1_2049.png,Liver +1_2050.png,Liver +1_2051.png,Liver +1_2052.png,Liver +1_2053.png,Liver +1_2054.png,Liver +1_2055.png,Liver +1_2056.png,Liver +1_2057.png,Liver +1_2058.png,Liver +1_2059.png,Liver +1_2060.png,Liver +1_2061.png,Liver +1_2062.png,Liver +1_2063.png,Liver +1_2064.png,Liver +1_2065.png,Liver +1_2066.png,Liver +1_2067.png,Liver +1_2068.png,Liver +1_2069.png,Liver +1_2070.png,Liver +1_2071.png,Liver +1_2072.png,Liver +1_2073.png,Liver +1_2074.png,Liver +1_2075.png,Liver +1_2076.png,Liver +1_2077.png,Liver +1_2078.png,Liver +1_2079.png,Liver +1_2080.png,Liver +1_2081.png,Liver +1_2082.png,Liver +1_2083.png,Liver +1_2084.png,Liver +1_2085.png,Liver +1_2086.png,Liver +1_2087.png,Liver +1_2088.png,Liver +1_2089.png,Liver +1_2090.png,Liver +1_2091.png,Liver +1_2092.png,Liver +1_2093.png,Liver +1_2094.png,Liver +1_2095.png,Liver +1_2096.png,Liver +1_2097.png,Liver +1_2098.png,Liver +1_2099.png,Liver +1_2100.png,Liver +1_2101.png,Liver +1_2102.png,Liver +1_2103.png,Liver +1_2104.png,Liver +1_2105.png,Liver +1_2106.png,Liver +1_2107.png,Liver +1_2108.png,Lung +1_2109.png,Lung +1_2110.png,Lung +1_2111.png,Lung +1_2112.png,Lung +1_2113.png,Lung +1_2114.png,Lung +1_2115.png,Lung +1_2116.png,Lung +1_2117.png,Ovarian +1_2118.png,Ovarian +1_2119.png,Ovarian +1_2120.png,Ovarian +1_2121.png,Ovarian +1_2122.png,Ovarian +1_2123.png,Ovarian +1_2124.png,Ovarian +1_2125.png,Ovarian +1_2126.png,Ovarian +1_2127.png,Ovarian +1_2128.png,Ovarian +1_2129.png,Ovarian +1_2130.png,Ovarian +1_2131.png,Ovarian +1_2132.png,Ovarian +1_2133.png,Ovarian +1_2134.png,Ovarian +1_2135.png,Ovarian +1_2136.png,Ovarian +1_2137.png,Ovarian +1_2138.png,Ovarian +1_2139.png,Ovarian +1_2140.png,Ovarian +1_2141.png,Ovarian +1_2142.png,Ovarian +1_2143.png,Ovarian +1_2144.png,Ovarian +1_2145.png,Ovarian +1_2146.png,Ovarian +1_2147.png,Ovarian +1_2148.png,Ovarian +1_2149.png,Ovarian +1_2150.png,Ovarian +1_2151.png,Ovarian +1_2152.png,Pancreatic +1_2153.png,Pancreatic +1_2154.png,Pancreatic +1_2155.png,Pancreatic +1_2156.png,Pancreatic +1_2157.png,Pancreatic +1_2158.png,Pancreatic +1_2159.png,Pancreatic +1_2160.png,Pancreatic +1_2161.png,Pancreatic +1_2162.png,Pancreatic +1_2163.png,Pancreatic +1_2164.png,Pancreatic +1_2165.png,Pancreatic +1_2166.png,Pancreatic +1_2167.png,Pancreatic +1_2168.png,Pancreatic +1_2169.png,Pancreatic +1_2170.png,Pancreatic +1_2171.png,Pancreatic +1_2172.png,Pancreatic +1_2173.png,Pancreatic +1_2174.png,Pancreatic +1_2175.png,Pancreatic +1_2176.png,Pancreatic +1_2177.png,Pancreatic +1_2178.png,Pancreatic +1_2179.png,Pancreatic +1_2180.png,Pancreatic +1_2181.png,Pancreatic +1_2182.png,Pancreatic +1_2183.png,Pancreatic +1_2184.png,Pancreatic +1_2185.png,Pancreatic +1_2186.png,Pancreatic +1_2187.png,Pancreatic +1_2188.png,Pancreatic +1_2189.png,Pancreatic +1_2190.png,Pancreatic +1_2191.png,Pancreatic +1_2192.png,Pancreatic +1_2193.png,Pancreatic +1_2194.png,Pancreatic +1_2195.png,Pancreatic +1_2196.png,Pancreatic +1_2197.png,Pancreatic +1_2198.png,Pancreatic +1_2199.png,Pancreatic +1_2200.png,Pancreatic +1_2201.png,Pancreatic +1_2202.png,Pancreatic +1_2203.png,Pancreatic +1_2204.png,Pancreatic +1_2205.png,Pancreatic +1_2206.png,Pancreatic +1_2207.png,Pancreatic +1_2208.png,Pancreatic +1_2209.png,Pancreatic +1_2210.png,Pancreatic +1_2211.png,Pancreatic +1_2212.png,Pancreatic +1_2213.png,Prostate +1_2214.png,Prostate +1_2215.png,Prostate +1_2216.png,Prostate +1_2217.png,Prostate +1_2218.png,Prostate +1_2219.png,Prostate +1_2220.png,Prostate +1_2221.png,Prostate +1_2222.png,Prostate +1_2223.png,Prostate +1_2224.png,Prostate +1_2225.png,Prostate +1_2226.png,Prostate +1_2227.png,Prostate +1_2228.png,Prostate +1_2229.png,Prostate +1_2230.png,Prostate +1_2231.png,Prostate +1_2232.png,Prostate +1_2233.png,Prostate +1_2234.png,Prostate +1_2235.png,Prostate +1_2236.png,Prostate +1_2237.png,Prostate +1_2238.png,Prostate +1_2239.png,Prostate +1_2240.png,Prostate +1_2241.png,Prostate +1_2242.png,Prostate +1_2243.png,Prostate +1_2244.png,Prostate +1_2245.png,Skin +1_2246.png,Skin +1_2247.png,Skin +1_2248.png,Skin +1_2249.png,Skin +1_2250.png,Skin +1_2251.png,Skin +1_2252.png,Skin +1_2253.png,Skin +1_2254.png,Skin +1_2255.png,Skin +1_2256.png,Skin +1_2257.png,Skin +1_2258.png,Skin +1_2259.png,Skin +1_2260.png,Skin +1_2261.png,Skin +1_2262.png,Skin +1_2263.png,Skin +1_2264.png,Skin +1_2265.png,Skin +1_2266.png,Skin +1_2267.png,Skin +1_2268.png,Skin +1_2269.png,Skin +1_2270.png,Skin +1_2271.png,Skin +1_2272.png,Skin +1_2273.png,Skin +1_2274.png,Skin +1_2275.png,Skin +1_2276.png,Skin +1_2277.png,Skin +1_2278.png,Skin +1_2279.png,Skin +1_2280.png,Skin +1_2281.png,Skin +1_2282.png,Skin +1_2283.png,Skin +1_2284.png,Skin +1_2285.png,Skin +1_2286.png,Skin +1_2287.png,Skin +1_2288.png,Skin +1_2289.png,Skin +1_2290.png,Skin +1_2291.png,Skin +1_2292.png,Skin +1_2293.png,Skin +1_2294.png,Skin +1_2295.png,Skin +1_2296.png,Skin +1_2297.png,Skin +1_2298.png,Skin +1_2299.png,Skin +1_2300.png,Skin +1_2301.png,Skin +1_2302.png,Skin +1_2303.png,Skin +1_2304.png,Skin +1_2305.png,Skin +1_2306.png,Skin +1_2307.png,Skin +1_2308.png,Skin +1_2309.png,Skin +1_2310.png,Skin +1_2311.png,Skin +1_2312.png,Skin +1_2313.png,Skin +1_2314.png,Skin +1_2315.png,Skin +1_2316.png,Stomach +1_2317.png,Stomach +1_2318.png,Stomach +1_2319.png,Stomach +1_2320.png,Stomach +1_2321.png,Stomach +1_2322.png,Stomach +1_2323.png,Stomach +1_2324.png,Stomach +1_2325.png,Stomach +1_2326.png,Stomach +1_2327.png,Stomach +1_2328.png,Stomach +1_2329.png,Stomach +1_2330.png,Stomach +1_2331.png,Stomach +1_2332.png,Stomach +1_2333.png,Stomach +1_2334.png,Stomach +1_2335.png,Stomach +1_2336.png,Stomach +1_2337.png,Stomach +1_2338.png,Stomach +1_2339.png,Stomach +1_2340.png,Stomach +1_2341.png,Stomach +1_2342.png,Stomach +1_2343.png,Stomach +1_2344.png,Stomach +1_2345.png,Stomach +1_2346.png,Stomach +1_2347.png,Stomach +1_2348.png,Stomach +1_2349.png,Stomach +1_2350.png,Stomach +1_2351.png,Stomach +1_2352.png,Stomach +1_2353.png,Stomach +1_2354.png,Stomach +1_2355.png,Testis +1_2356.png,Testis +1_2357.png,Testis +1_2358.png,Testis +1_2359.png,Testis +1_2360.png,Testis +1_2361.png,Testis +1_2362.png,Testis +1_2363.png,Testis +1_2364.png,Testis +1_2365.png,Testis +1_2366.png,Testis +1_2367.png,Testis +1_2368.png,Testis +1_2369.png,Testis +1_2370.png,Testis +1_2371.png,Testis +1_2372.png,Testis +1_2373.png,Testis +1_2374.png,Testis +1_2375.png,Testis +1_2376.png,Testis +1_2377.png,Testis +1_2378.png,Testis +1_2379.png,Testis +1_2380.png,Testis +1_2381.png,Testis +1_2382.png,Thyroid +1_2383.png,Thyroid +1_2384.png,Thyroid +1_2385.png,Thyroid +1_2386.png,Thyroid +1_2387.png,Thyroid +1_2388.png,Thyroid +1_2389.png,Thyroid +1_2390.png,Thyroid +1_2391.png,Thyroid +1_2392.png,Thyroid +1_2393.png,Thyroid +1_2394.png,Thyroid +1_2395.png,Thyroid +1_2396.png,Thyroid +1_2397.png,Thyroid +1_2398.png,Thyroid +1_2399.png,Thyroid +1_2400.png,Thyroid +1_2401.png,Thyroid +1_2402.png,Thyroid +1_2403.png,Thyroid +1_2404.png,Thyroid +1_2405.png,Thyroid +1_2406.png,Thyroid +1_2407.png,Thyroid +1_2408.png,Thyroid +1_2409.png,Thyroid +1_2410.png,Thyroid +1_2411.png,Thyroid +1_2412.png,Thyroid +1_2413.png,Thyroid +1_2414.png,Thyroid +1_2415.png,Thyroid +1_2416.png,Thyroid +1_2417.png,Thyroid +1_2418.png,Thyroid +1_2419.png,Thyroid +1_2420.png,Thyroid +1_2421.png,Thyroid +1_2422.png,Thyroid +1_2423.png,Thyroid +1_2424.png,Thyroid +1_2425.png,Thyroid +1_2426.png,Thyroid +1_2427.png,Thyroid +1_2428.png,Thyroid +1_2429.png,Uterus +1_2430.png,Uterus +1_2431.png,Uterus +1_2432.png,Colon +1_2433.png,Colon +1_2434.png,Colon +1_2435.png,Colon +1_2436.png,Colon +1_2437.png,Colon +1_2438.png,Colon +1_2439.png,Colon +1_2440.png,Colon +1_2441.png,Colon +1_2442.png,Colon +1_2443.png,Colon +1_2444.png,Colon +1_2445.png,Colon +1_2446.png,Colon +1_2447.png,Colon +1_2448.png,Colon +1_2449.png,Colon +1_2450.png,Colon +1_2451.png,Colon +1_2452.png,Colon +1_2453.png,Colon +1_2454.png,Colon +1_2455.png,Colon +1_2456.png,Colon +1_2457.png,Colon +1_2458.png,Colon +1_2459.png,Colon +1_2460.png,Colon +1_2461.png,Colon +1_2462.png,Colon +1_2463.png,Colon +1_2464.png,Colon +1_2465.png,Colon +1_2466.png,Colon +1_2467.png,Colon +1_2468.png,Colon +1_2469.png,Colon +1_2470.png,Colon +1_2471.png,Colon +1_2472.png,Colon +1_2473.png,Colon +1_2474.png,Colon +1_2475.png,Colon +1_2476.png,Colon +1_2477.png,Colon +1_2478.png,Colon +1_2479.png,Colon +1_2480.png,Colon +1_2481.png,Colon +1_2482.png,Colon +1_2483.png,Colon +1_2484.png,Colon +1_2485.png,Colon +1_2486.png,Colon +1_2487.png,Colon +1_2488.png,Colon +1_2489.png,Colon +1_2490.png,Colon +1_2491.png,Colon +1_2492.png,Colon +1_2493.png,Colon +1_2494.png,Colon +1_2495.png,Colon +1_2496.png,Colon +1_2497.png,Colon +1_2498.png,Colon +1_2499.png,Colon +1_2500.png,Colon +1_2501.png,Colon +1_2502.png,Colon +1_2503.png,Colon +1_2504.png,Colon +1_2505.png,Colon +1_2506.png,Colon +1_2507.png,Colon +1_2508.png,Colon +1_2509.png,Colon +1_2510.png,Colon +1_2511.png,Colon +1_2512.png,Colon +1_2513.png,Colon +1_2514.png,Colon +1_2515.png,Colon +1_2516.png,Colon +1_2517.png,Colon +1_2518.png,Colon +1_2519.png,Colon +1_2520.png,Colon +1_2521.png,Colon +1_2522.png,Colon diff --git a/docs/datasets/PanNuke/fold2/cell_count.csv b/docs/datasets/PanNuke/fold2/cell_count.csv new file mode 100644 index 0000000000000000000000000000000000000000..2815e03bca5c3babd4308aebad0cc69dd48d645e --- /dev/null +++ b/docs/datasets/PanNuke/fold2/cell_count.csv @@ -0,0 +1,2723 @@ +Image,Neoplastic,Inflammatory,Connective,Dead,Epithelial +2_0.png,10,1,2,0,0 +2_1.png,4,0,4,0,0 +2_10.png,9,1,1,0,0 +2_100.png,17,1,2,0,0 +2_1000.png,24,0,0,15,0 +2_1001.png,35,0,0,2,0 +2_1002.png,43,2,0,0,0 +2_1003.png,32,1,0,1,0 +2_1004.png,112,19,9,0,0 +2_1005.png,12,136,66,0,0 +2_1006.png,45,6,44,0,0 +2_1007.png,0,71,106,0,9 +2_1008.png,1,28,8,0,0 +2_1009.png,20,0,3,0,0 +2_101.png,49,0,0,0,0 +2_1010.png,1,3,16,0,0 +2_1011.png,2,11,13,0,0 +2_1012.png,0,6,7,0,0 +2_1013.png,0,9,14,0,0 +2_1014.png,1,8,14,0,0 +2_1015.png,0,3,20,0,0 +2_1016.png,0,0,0,0,0 +2_1017.png,21,0,2,0,0 +2_1018.png,0,76,3,0,0 +2_1019.png,0,59,6,0,0 +2_102.png,6,0,8,0,0 +2_1020.png,0,0,11,0,3 +2_1021.png,0,0,0,0,39 +2_1022.png,0,0,0,0,21 +2_1023.png,0,0,0,0,1 +2_1024.png,0,0,0,0,4 +2_1025.png,0,0,0,0,7 +2_1026.png,0,1,11,0,27 +2_1027.png,0,5,37,0,0 +2_1028.png,0,7,35,0,11 +2_1029.png,0,3,42,0,0 +2_103.png,0,65,7,0,0 +2_1030.png,0,2,10,0,0 +2_1031.png,0,2,4,0,0 +2_1032.png,0,2,8,0,0 +2_1033.png,0,0,7,0,0 +2_1034.png,0,0,9,0,0 +2_1035.png,0,0,6,0,0 +2_1036.png,0,2,5,0,0 +2_1037.png,0,0,3,0,0 +2_1038.png,26,0,10,0,0 +2_1039.png,0,2,5,0,0 +2_104.png,4,13,12,0,0 +2_1040.png,27,0,2,0,0 +2_1041.png,0,0,3,0,0 +2_1042.png,19,3,6,0,0 +2_1043.png,0,0,5,0,0 +2_1044.png,0,0,14,0,0 +2_1045.png,0,0,10,0,0 +2_1046.png,0,0,17,0,0 +2_1047.png,0,0,12,0,0 +2_1048.png,0,0,11,0,0 +2_1049.png,0,0,12,0,0 +2_105.png,0,0,6,0,12 +2_1050.png,0,0,14,0,0 +2_1051.png,0,0,5,0,0 +2_1052.png,0,2,16,0,0 +2_1053.png,0,0,16,0,0 +2_1054.png,0,0,12,0,0 +2_1055.png,0,0,14,0,0 +2_1056.png,0,0,11,0,0 +2_1057.png,21,0,4,0,0 +2_1058.png,30,0,3,0,0 +2_1059.png,33,0,0,0,0 +2_106.png,0,4,6,0,25 +2_1060.png,30,0,2,0,0 +2_1061.png,31,0,1,0,0 +2_1062.png,23,2,7,11,0 +2_1063.png,36,1,6,0,0 +2_1064.png,27,0,2,0,0 +2_1065.png,29,0,0,0,0 +2_1066.png,36,0,2,0,0 +2_1067.png,11,0,28,0,0 +2_1068.png,25,0,8,0,0 +2_1069.png,26,0,2,0,0 +2_107.png,0,1,9,0,22 +2_1070.png,20,0,8,0,0 +2_1071.png,14,0,25,0,0 +2_1072.png,16,2,12,4,0 +2_1073.png,30,0,18,0,0 +2_1074.png,13,0,4,9,0 +2_1075.png,19,1,7,0,0 +2_1076.png,0,0,0,0,0 +2_1077.png,0,1,0,0,0 +2_1078.png,0,0,0,0,0 +2_1079.png,0,0,0,0,0 +2_108.png,0,4,2,0,33 +2_1080.png,0,3,0,0,0 +2_1081.png,0,1,0,0,0 +2_1082.png,0,1,0,0,0 +2_1083.png,0,0,0,0,0 +2_1084.png,0,1,0,0,0 +2_1085.png,0,0,0,0,0 +2_1086.png,0,3,0,0,0 +2_1087.png,0,6,1,0,0 +2_1088.png,0,0,0,0,0 +2_1089.png,0,1,0,0,0 +2_109.png,0,4,7,0,35 +2_1090.png,0,1,0,0,0 +2_1091.png,0,1,1,0,0 +2_1092.png,0,1,0,0,0 +2_1093.png,20,0,3,0,0 +2_1094.png,24,1,4,0,0 +2_1095.png,25,4,6,0,0 +2_1096.png,29,0,1,0,0 +2_1097.png,23,0,2,0,0 +2_1098.png,17,1,6,0,0 +2_1099.png,15,1,3,0,0 +2_11.png,11,0,3,0,0 +2_110.png,0,3,4,0,39 +2_1100.png,12,0,3,0,0 +2_1101.png,25,0,3,0,0 +2_1102.png,2,0,2,0,0 +2_1103.png,26,1,3,0,0 +2_1104.png,22,0,1,0,0 +2_1105.png,5,0,1,0,0 +2_1106.png,8,1,0,0,0 +2_1107.png,12,1,3,0,0 +2_1108.png,11,0,3,0,0 +2_1109.png,14,0,1,0,0 +2_111.png,0,0,3,0,18 +2_1110.png,10,0,2,0,0 +2_1111.png,11,0,3,0,0 +2_1112.png,10,1,0,0,0 +2_1113.png,12,0,1,0,0 +2_1114.png,16,0,1,0,0 +2_1115.png,10,0,3,0,0 +2_1116.png,6,0,4,0,0 +2_1117.png,8,0,2,0,0 +2_1118.png,10,0,4,0,0 +2_1119.png,9,0,4,0,0 +2_112.png,0,1,2,0,15 +2_1120.png,10,2,2,0,0 +2_1121.png,11,0,1,0,0 +2_1122.png,8,0,1,0,0 +2_1123.png,6,0,5,0,0 +2_1124.png,11,0,0,0,0 +2_1125.png,13,0,2,0,0 +2_1126.png,17,0,0,0,0 +2_1127.png,15,0,0,0,0 +2_1128.png,11,0,1,0,0 +2_1129.png,19,0,0,0,0 +2_113.png,0,0,4,0,19 +2_1130.png,19,0,0,0,0 +2_1131.png,15,0,1,0,0 +2_1132.png,12,0,0,0,0 +2_1133.png,14,0,1,0,0 +2_1134.png,18,0,2,0,0 +2_1135.png,6,0,1,0,0 +2_1136.png,10,0,1,0,0 +2_1137.png,10,0,1,0,0 +2_1138.png,11,0,0,0,0 +2_1139.png,7,0,3,0,0 +2_114.png,0,1,3,0,26 +2_1140.png,7,0,1,0,0 +2_1141.png,17,0,0,0,0 +2_1142.png,16,0,2,0,0 +2_1143.png,15,1,0,0,0 +2_1144.png,15,0,0,0,0 +2_1145.png,13,0,2,0,0 +2_1146.png,14,0,0,0,0 +2_1147.png,12,0,0,0,0 +2_1148.png,11,0,0,0,0 +2_1149.png,18,0,2,0,0 +2_115.png,0,2,5,0,16 +2_1150.png,12,1,0,0,0 +2_1151.png,16,0,1,0,0 +2_1152.png,8,0,0,0,0 +2_1153.png,12,0,1,0,0 +2_1154.png,8,1,3,0,0 +2_1155.png,13,0,0,0,0 +2_1156.png,8,0,7,0,0 +2_1157.png,16,0,0,0,0 +2_1158.png,10,1,0,0,0 +2_1159.png,11,0,2,0,0 +2_116.png,0,0,3,0,29 +2_1160.png,16,0,5,0,0 +2_1161.png,11,1,0,0,0 +2_1162.png,14,0,0,0,0 +2_1163.png,0,0,9,0,0 +2_1164.png,0,0,9,0,0 +2_1165.png,0,3,5,0,9 +2_1166.png,0,13,12,0,16 +2_1167.png,0,21,6,0,16 +2_1168.png,0,1,2,0,13 +2_1169.png,0,0,2,0,14 +2_117.png,0,1,7,0,20 +2_1170.png,0,0,6,0,0 +2_1171.png,25,0,1,0,0 +2_1172.png,7,0,8,0,0 +2_1173.png,13,0,0,0,0 +2_1174.png,26,0,0,0,0 +2_1175.png,26,0,0,0,0 +2_1176.png,14,0,6,0,0 +2_1177.png,23,0,0,0,0 +2_1178.png,4,0,6,0,0 +2_1179.png,13,1,7,0,0 +2_118.png,0,0,3,0,43 +2_1180.png,5,0,8,0,0 +2_1181.png,2,0,14,0,0 +2_1182.png,23,0,5,0,0 +2_1183.png,15,1,1,0,0 +2_1184.png,11,0,2,0,0 +2_1185.png,32,0,0,0,0 +2_1186.png,0,0,7,0,7 +2_1187.png,0,1,5,0,8 +2_1188.png,0,0,3,0,11 +2_1189.png,0,1,4,0,16 +2_119.png,0,0,3,0,42 +2_1190.png,0,0,3,0,12 +2_1191.png,0,0,4,0,19 +2_1192.png,0,1,4,0,18 +2_1193.png,0,0,1,0,25 +2_1194.png,0,0,3,0,15 +2_1195.png,0,1,3,0,21 +2_1196.png,0,0,3,0,11 +2_1197.png,0,0,3,0,28 +2_1198.png,0,0,5,0,22 +2_1199.png,0,0,6,0,11 +2_12.png,16,1,6,0,0 +2_120.png,0,2,2,0,50 +2_1200.png,0,1,3,0,30 +2_1201.png,0,2,13,0,16 +2_1202.png,0,1,13,0,20 +2_1203.png,0,2,5,0,16 +2_1204.png,0,12,6,0,26 +2_1205.png,0,26,15,0,10 +2_1206.png,0,10,30,0,5 +2_1207.png,0,2,12,0,0 +2_1208.png,0,1,19,0,0 +2_1209.png,0,0,16,0,0 +2_121.png,0,0,6,0,30 +2_1210.png,0,2,16,0,0 +2_1211.png,11,1,9,0,0 +2_1212.png,10,2,1,0,0 +2_1213.png,18,1,2,0,0 +2_1214.png,10,0,0,0,0 +2_1215.png,15,2,0,0,0 +2_1216.png,15,2,7,0,0 +2_1217.png,13,2,0,0,0 +2_1218.png,9,0,1,0,0 +2_1219.png,16,0,1,0,0 +2_122.png,0,0,6,0,9 +2_1220.png,11,0,0,0,0 +2_1221.png,13,1,3,0,0 +2_1222.png,21,0,0,0,0 +2_1223.png,17,1,0,0,0 +2_1224.png,17,2,0,0,0 +2_1225.png,13,6,0,0,0 +2_1226.png,17,7,0,0,0 +2_1227.png,15,3,1,0,0 +2_1228.png,17,5,11,0,0 +2_1229.png,18,8,1,0,0 +2_123.png,0,7,15,0,37 +2_1230.png,21,8,0,0,0 +2_1231.png,6,14,6,0,0 +2_1232.png,16,10,1,0,0 +2_1233.png,23,3,1,0,0 +2_1234.png,10,14,11,0,0 +2_1235.png,11,14,3,0,0 +2_1236.png,36,0,0,0,0 +2_1237.png,65,0,0,0,0 +2_1238.png,52,0,3,1,0 +2_1239.png,0,17,1,0,0 +2_124.png,0,0,8,0,28 +2_1240.png,11,10,3,0,0 +2_1241.png,69,0,0,5,0 +2_1242.png,48,0,1,1,0 +2_1243.png,2,8,6,0,0 +2_1244.png,56,0,2,0,0 +2_1245.png,0,20,6,0,0 +2_1246.png,15,12,1,0,0 +2_1247.png,1,20,4,0,0 +2_1248.png,8,1,8,0,0 +2_1249.png,4,3,10,0,0 +2_125.png,0,2,6,0,34 +2_1250.png,12,1,2,0,0 +2_1251.png,7,0,1,0,0 +2_1252.png,8,4,1,0,0 +2_1253.png,13,0,2,0,0 +2_1254.png,10,0,4,0,0 +2_1255.png,4,3,6,0,0 +2_1256.png,9,1,2,0,0 +2_1257.png,13,4,4,0,0 +2_1258.png,10,1,8,0,0 +2_1259.png,15,3,3,0,0 +2_126.png,0,0,7,0,24 +2_1260.png,10,1,3,0,0 +2_1261.png,8,2,4,0,0 +2_1262.png,9,3,7,0,0 +2_1263.png,3,2,6,0,0 +2_1264.png,6,1,7,0,0 +2_1265.png,5,2,4,0,0 +2_1266.png,0,0,2,0,0 +2_1267.png,1,3,4,0,0 +2_1268.png,3,3,10,0,0 +2_1269.png,17,5,5,0,0 +2_127.png,0,1,6,0,30 +2_1270.png,18,0,0,0,0 +2_1271.png,14,0,1,0,0 +2_1272.png,17,0,0,0,0 +2_1273.png,13,0,0,0,0 +2_1274.png,10,0,1,0,0 +2_1275.png,19,0,1,0,0 +2_1276.png,8,0,1,0,0 +2_1277.png,20,0,3,0,0 +2_1278.png,21,0,0,0,0 +2_1279.png,16,0,0,0,0 +2_128.png,0,0,11,0,5 +2_1280.png,9,0,1,0,0 +2_1281.png,20,0,0,0,0 +2_1282.png,13,0,0,0,0 +2_1283.png,18,0,1,0,0 +2_1284.png,17,0,2,0,0 +2_1285.png,14,0,1,0,0 +2_1286.png,16,0,0,0,0 +2_1287.png,17,2,8,0,0 +2_1288.png,18,2,0,0,0 +2_1289.png,0,8,6,0,0 +2_129.png,0,0,4,0,46 +2_1290.png,26,3,1,0,0 +2_1291.png,0,0,12,0,0 +2_1292.png,0,1,8,0,0 +2_1293.png,0,5,17,0,0 +2_1294.png,5,6,14,0,0 +2_1295.png,0,7,7,0,0 +2_1296.png,0,3,16,0,0 +2_1297.png,0,1,5,0,18 +2_1298.png,0,0,3,0,22 +2_1299.png,0,1,7,0,52 +2_13.png,4,0,7,0,0 +2_130.png,7,1,4,0,0 +2_1300.png,0,0,1,0,5 +2_1301.png,0,1,8,0,14 +2_1302.png,0,4,25,0,7 +2_1303.png,0,3,0,0,6 +2_1304.png,0,4,16,0,16 +2_1305.png,0,6,19,0,0 +2_1306.png,0,1,8,0,15 +2_1307.png,0,0,0,0,0 +2_1308.png,0,1,5,0,27 +2_1309.png,0,0,11,0,24 +2_131.png,12,0,2,0,0 +2_1310.png,0,4,13,0,16 +2_1311.png,0,9,24,0,0 +2_1312.png,0,0,0,0,15 +2_1313.png,0,2,5,0,0 +2_1314.png,0,2,20,0,15 +2_1315.png,9,1,10,0,0 +2_1316.png,2,0,13,0,0 +2_1317.png,17,2,10,0,0 +2_1318.png,3,3,10,0,0 +2_1319.png,3,3,15,0,0 +2_132.png,0,2,1,0,26 +2_1320.png,4,0,8,0,0 +2_1321.png,0,0,6,0,0 +2_1322.png,12,0,11,0,0 +2_1323.png,9,0,14,0,0 +2_1324.png,47,0,0,0,0 +2_1325.png,47,0,3,0,0 +2_1326.png,38,0,2,0,0 +2_1327.png,49,0,2,0,0 +2_1328.png,38,0,8,0,0 +2_1329.png,5,1,8,0,0 +2_133.png,0,0,0,0,47 +2_1330.png,10,0,13,0,0 +2_1331.png,8,0,11,0,0 +2_1332.png,3,9,15,0,0 +2_1333.png,21,9,5,0,0 +2_1334.png,0,3,22,0,0 +2_1335.png,39,0,2,0,0 +2_1336.png,35,6,2,0,0 +2_1337.png,0,27,17,0,0 +2_1338.png,24,6,5,0,0 +2_1339.png,39,2,0,0,0 +2_134.png,0,0,5,0,22 +2_1340.png,19,24,6,0,0 +2_1341.png,0,28,21,0,0 +2_1342.png,0,3,30,0,0 +2_1343.png,36,0,0,0,0 +2_1344.png,0,49,9,0,0 +2_1345.png,7,2,17,0,0 +2_1346.png,6,7,14,0,0 +2_1347.png,17,4,9,0,0 +2_1348.png,34,1,2,0,0 +2_1349.png,6,5,9,0,0 +2_135.png,0,2,8,0,14 +2_1350.png,0,20,21,0,36 +2_1351.png,0,6,16,0,38 +2_1352.png,0,52,30,0,12 +2_1353.png,0,20,17,0,29 +2_1354.png,0,1,4,0,8 +2_1355.png,106,0,0,0,0 +2_1356.png,41,3,4,0,0 +2_1357.png,38,1,0,75,0 +2_1358.png,30,0,0,57,0 +2_1359.png,0,0,0,0,5 +2_136.png,0,0,0,0,25 +2_1360.png,0,28,28,0,19 +2_1361.png,0,0,0,0,3 +2_1362.png,0,8,20,0,13 +2_1363.png,0,11,13,0,12 +2_1364.png,0,2,2,0,0 +2_1365.png,0,61,86,0,0 +2_1366.png,37,0,5,0,0 +2_1367.png,43,1,1,0,0 +2_1368.png,51,1,31,0,0 +2_1369.png,27,10,39,0,0 +2_137.png,0,0,1,0,35 +2_1370.png,59,11,28,0,0 +2_1371.png,16,3,37,0,0 +2_1372.png,0,4,24,0,0 +2_1373.png,0,2,20,0,0 +2_1374.png,0,1,19,0,0 +2_1375.png,14,0,0,9,0 +2_1376.png,11,0,18,0,0 +2_1377.png,23,0,2,3,0 +2_1378.png,32,0,2,5,0 +2_1379.png,23,0,1,2,0 +2_138.png,9,6,3,0,0 +2_1380.png,28,0,0,2,0 +2_1381.png,24,0,0,7,0 +2_1382.png,10,22,38,0,0 +2_1383.png,23,17,32,0,0 +2_1384.png,13,3,34,0,0 +2_1385.png,0,25,4,0,0 +2_1386.png,0,10,14,0,0 +2_1387.png,0,5,26,0,0 +2_1388.png,0,4,13,0,0 +2_1389.png,0,4,9,0,0 +2_139.png,9,5,0,0,0 +2_1390.png,0,7,19,0,0 +2_1391.png,0,9,0,0,0 +2_1392.png,0,0,5,0,0 +2_1393.png,25,0,0,0,0 +2_1394.png,0,8,11,0,0 +2_1395.png,0,5,17,0,0 +2_1396.png,0,9,37,0,0 +2_1397.png,0,7,21,0,0 +2_1398.png,29,0,0,0,0 +2_1399.png,28,0,1,0,0 +2_14.png,5,3,2,0,0 +2_140.png,1,8,9,0,0 +2_1400.png,32,2,5,0,0 +2_1401.png,25,1,2,0,0 +2_1402.png,26,4,0,0,0 +2_1403.png,0,3,3,0,0 +2_1404.png,0,3,14,0,0 +2_1405.png,0,1,0,0,0 +2_1406.png,0,0,17,0,0 +2_1407.png,0,1,2,0,0 +2_1408.png,15,0,0,0,0 +2_1409.png,14,0,0,0,0 +2_141.png,7,14,3,0,0 +2_1410.png,13,3,2,0,0 +2_1411.png,16,0,0,0,0 +2_1412.png,6,0,0,0,0 +2_1413.png,8,0,0,0,0 +2_1414.png,9,0,0,0,0 +2_1415.png,24,0,0,0,0 +2_1416.png,18,1,0,0,0 +2_1417.png,22,0,1,0,0 +2_1418.png,17,0,0,0,0 +2_1419.png,16,0,0,0,0 +2_142.png,7,15,9,0,0 +2_1420.png,0,0,0,0,3 +2_1421.png,0,0,0,0,5 +2_1422.png,0,0,0,0,4 +2_1423.png,0,0,0,0,6 +2_1424.png,0,0,2,0,5 +2_1425.png,0,0,0,0,6 +2_1426.png,0,0,2,0,2 +2_1427.png,0,0,1,0,1 +2_1428.png,0,0,1,0,2 +2_1429.png,0,0,7,0,0 +2_143.png,0,0,6,0,0 +2_1430.png,0,0,2,0,0 +2_1431.png,0,2,8,0,0 +2_1432.png,0,0,4,0,0 +2_1433.png,11,0,0,0,0 +2_1434.png,8,0,0,0,0 +2_1435.png,9,0,0,0,0 +2_1436.png,17,1,1,0,0 +2_1437.png,1,0,3,0,0 +2_1438.png,0,0,0,0,0 +2_1439.png,16,0,1,0,0 +2_144.png,0,0,8,0,9 +2_1440.png,24,1,1,0,0 +2_1441.png,9,0,5,0,0 +2_1442.png,3,0,5,0,0 +2_1443.png,16,2,0,0,0 +2_1444.png,5,3,0,0,0 +2_1445.png,3,5,4,0,0 +2_1446.png,6,6,4,0,0 +2_1447.png,9,5,3,0,0 +2_1448.png,16,0,7,0,0 +2_1449.png,13,1,6,0,0 +2_145.png,0,1,10,0,9 +2_1450.png,24,1,4,0,0 +2_1451.png,14,0,2,0,0 +2_1452.png,9,0,6,0,0 +2_1453.png,11,2,5,0,0 +2_1454.png,21,0,3,0,0 +2_1455.png,1,6,5,0,0 +2_1456.png,0,10,3,0,0 +2_1457.png,0,2,2,0,0 +2_1458.png,0,12,3,5,0 +2_1459.png,0,2,9,0,0 +2_146.png,0,0,0,0,50 +2_1460.png,0,1,0,0,0 +2_1461.png,1,0,0,0,0 +2_1462.png,0,1,1,0,0 +2_1463.png,1,3,5,0,0 +2_1464.png,1,1,11,0,0 +2_1465.png,3,2,5,0,0 +2_1466.png,1,0,0,0,0 +2_1467.png,12,1,5,0,0 +2_1468.png,0,0,3,0,0 +2_1469.png,0,0,2,0,0 +2_147.png,0,0,2,0,44 +2_1470.png,0,0,5,0,0 +2_1471.png,0,0,2,0,0 +2_1472.png,0,0,2,0,0 +2_1473.png,0,7,25,0,2 +2_1474.png,0,14,24,0,0 +2_1475.png,0,7,20,0,6 +2_1476.png,0,0,6,0,0 +2_1477.png,0,0,10,0,0 +2_1478.png,1,1,8,0,0 +2_1479.png,4,3,15,0,0 +2_148.png,0,2,5,0,27 +2_1480.png,0,0,4,0,0 +2_1481.png,2,4,12,0,0 +2_1482.png,15,0,1,0,0 +2_1483.png,3,0,5,0,0 +2_1484.png,3,2,19,0,0 +2_1485.png,0,0,23,0,0 +2_1486.png,5,4,13,0,0 +2_1487.png,0,1,7,0,0 +2_1488.png,0,0,13,0,0 +2_1489.png,0,1,3,0,7 +2_149.png,0,1,2,0,22 +2_1490.png,0,4,2,0,6 +2_1491.png,0,3,2,0,16 +2_1492.png,0,1,1,0,13 +2_1493.png,0,4,7,0,9 +2_1494.png,0,12,5,0,0 +2_1495.png,0,0,5,0,0 +2_1496.png,0,0,4,0,0 +2_1497.png,0,0,9,0,0 +2_1498.png,0,0,4,0,0 +2_1499.png,0,0,10,0,0 +2_15.png,9,0,8,0,0 +2_150.png,0,0,3,0,42 +2_1500.png,0,3,6,0,0 +2_1501.png,16,1,21,0,0 +2_1502.png,32,0,9,0,0 +2_1503.png,20,1,8,0,0 +2_1504.png,33,0,5,0,0 +2_1505.png,32,0,4,0,0 +2_1506.png,0,0,0,0,0 +2_1507.png,0,0,0,0,0 +2_1508.png,0,0,0,0,0 +2_1509.png,0,0,0,0,0 +2_151.png,1,0,2,0,0 +2_1510.png,0,0,0,0,0 +2_1511.png,0,0,0,0,0 +2_1512.png,12,18,7,0,0 +2_1513.png,9,13,14,0,0 +2_1514.png,2,16,22,0,0 +2_1515.png,27,3,1,0,0 +2_1516.png,17,8,7,0,0 +2_1517.png,23,0,5,0,0 +2_1518.png,0,0,0,0,0 +2_1519.png,0,0,0,0,0 +2_152.png,4,0,3,0,0 +2_1520.png,0,13,20,0,0 +2_1521.png,0,0,2,0,0 +2_1522.png,0,0,2,0,0 +2_1523.png,0,0,0,0,0 +2_1524.png,12,3,2,0,0 +2_1525.png,0,26,10,0,0 +2_1526.png,11,0,1,0,0 +2_1527.png,0,40,17,0,0 +2_1528.png,4,50,17,0,0 +2_1529.png,0,59,10,0,0 +2_153.png,1,0,5,0,0 +2_1530.png,0,37,23,0,0 +2_1531.png,0,38,12,0,0 +2_1532.png,15,0,3,0,0 +2_1533.png,21,0,3,0,0 +2_1534.png,12,4,4,0,0 +2_1535.png,9,2,7,0,0 +2_1536.png,1,5,13,0,0 +2_1537.png,0,7,15,0,0 +2_1538.png,1,17,12,0,0 +2_1539.png,0,19,16,0,0 +2_154.png,3,0,1,0,0 +2_1540.png,0,22,25,0,0 +2_1541.png,0,17,27,0,0 +2_1542.png,0,48,18,0,0 +2_1543.png,1,19,14,0,0 +2_1544.png,0,16,19,0,0 +2_1545.png,1,8,18,0,0 +2_1546.png,3,26,12,0,0 +2_1547.png,0,6,16,0,0 +2_1548.png,0,11,11,0,0 +2_1549.png,0,4,8,0,0 +2_155.png,3,0,0,0,0 +2_1550.png,0,0,8,0,15 +2_1551.png,0,0,4,0,18 +2_1552.png,0,1,12,0,0 +2_1553.png,13,0,6,0,0 +2_1554.png,20,0,10,0,0 +2_1555.png,8,0,22,0,0 +2_1556.png,21,0,3,0,0 +2_1557.png,17,1,6,0,0 +2_1558.png,15,2,5,0,0 +2_1559.png,10,0,3,0,0 +2_156.png,0,1,0,0,0 +2_1560.png,10,1,25,0,0 +2_1561.png,2,5,31,0,0 +2_1562.png,12,4,21,0,0 +2_1563.png,0,1,10,0,0 +2_1564.png,0,6,4,0,0 +2_1565.png,2,6,14,0,0 +2_1566.png,0,0,0,0,0 +2_1567.png,0,0,0,0,0 +2_1568.png,0,0,0,0,0 +2_1569.png,0,0,0,0,0 +2_157.png,0,4,11,0,15 +2_1570.png,0,0,0,0,0 +2_1571.png,0,0,0,0,0 +2_1572.png,0,0,0,0,0 +2_1573.png,0,2,29,0,0 +2_1574.png,0,0,14,0,0 +2_1575.png,0,0,22,0,0 +2_1576.png,0,0,20,0,0 +2_1577.png,0,1,22,0,0 +2_1578.png,0,0,26,0,0 +2_1579.png,0,0,16,0,0 +2_158.png,0,1,3,0,0 +2_1580.png,31,0,0,0,0 +2_1581.png,28,0,1,0,0 +2_1582.png,12,1,10,0,0 +2_1583.png,25,0,1,0,0 +2_1584.png,29,0,0,0,0 +2_1585.png,23,0,0,0,0 +2_1586.png,26,1,2,0,0 +2_1587.png,31,0,1,0,0 +2_1588.png,21,2,13,0,0 +2_1589.png,23,0,6,0,0 +2_159.png,2,0,3,0,0 +2_1590.png,30,1,0,0,0 +2_1591.png,32,0,0,0,0 +2_1592.png,29,0,0,0,0 +2_1593.png,27,0,0,0,0 +2_1594.png,31,1,0,0,0 +2_1595.png,0,0,18,0,0 +2_1596.png,0,1,11,0,0 +2_1597.png,0,1,14,0,0 +2_1598.png,0,0,14,0,0 +2_1599.png,0,0,21,0,0 +2_16.png,12,2,1,0,0 +2_160.png,0,0,6,0,0 +2_1600.png,0,0,18,0,0 +2_1601.png,0,0,13,0,0 +2_1602.png,0,2,16,0,0 +2_1603.png,0,0,13,0,0 +2_1604.png,0,0,9,0,0 +2_1605.png,0,0,9,0,0 +2_1606.png,0,0,10,0,0 +2_1607.png,0,0,3,0,0 +2_1608.png,0,0,6,0,0 +2_1609.png,0,0,5,0,0 +2_161.png,2,0,2,0,0 +2_1610.png,0,0,12,0,0 +2_1611.png,0,0,0,0,0 +2_1612.png,0,0,1,0,0 +2_1613.png,0,1,6,0,8 +2_1614.png,0,0,4,0,32 +2_1615.png,0,1,4,0,0 +2_1616.png,0,0,7,0,1 +2_1617.png,0,0,7,0,0 +2_1618.png,10,1,9,0,0 +2_1619.png,7,3,17,0,0 +2_162.png,0,0,5,0,0 +2_1620.png,0,5,17,0,0 +2_1621.png,0,0,12,0,0 +2_1622.png,45,0,0,0,0 +2_1623.png,18,1,8,0,0 +2_1624.png,26,0,10,0,0 +2_1625.png,42,0,1,0,0 +2_1626.png,13,7,10,0,0 +2_1627.png,3,5,17,0,0 +2_1628.png,18,0,0,0,0 +2_1629.png,20,0,0,0,0 +2_163.png,0,3,11,0,25 +2_1630.png,25,0,0,0,0 +2_1631.png,15,1,0,0,0 +2_1632.png,16,0,0,0,0 +2_1633.png,26,0,0,0,0 +2_1634.png,25,0,1,0,0 +2_1635.png,0,0,0,0,0 +2_1636.png,0,0,0,0,0 +2_1637.png,21,0,0,0,0 +2_1638.png,16,0,0,0,0 +2_1639.png,20,0,3,0,0 +2_164.png,0,2,7,0,17 +2_1640.png,0,1,6,0,0 +2_1641.png,0,0,3,0,0 +2_1642.png,0,1,11,0,0 +2_1643.png,0,1,6,0,0 +2_1644.png,0,1,9,0,0 +2_1645.png,0,0,0,0,0 +2_1646.png,0,0,0,0,0 +2_1647.png,0,0,0,0,0 +2_1648.png,0,0,0,0,0 +2_1649.png,80,1,7,0,0 +2_165.png,0,1,17,0,0 +2_1650.png,71,0,10,0,0 +2_1651.png,85,0,6,0,0 +2_1652.png,21,0,6,0,0 +2_1653.png,18,0,20,0,0 +2_1654.png,15,1,21,0,0 +2_1655.png,20,2,8,0,0 +2_1656.png,7,1,25,0,0 +2_1657.png,24,1,15,0,0 +2_1658.png,13,0,15,0,0 +2_1659.png,13,3,8,0,0 +2_166.png,0,0,3,0,57 +2_1660.png,31,1,7,0,0 +2_1661.png,9,0,10,0,0 +2_1662.png,24,0,5,0,0 +2_1663.png,25,1,3,0,0 +2_1664.png,0,0,0,0,0 +2_1665.png,0,0,0,0,0 +2_1666.png,0,0,0,0,0 +2_1667.png,0,0,0,0,0 +2_1668.png,0,0,0,0,0 +2_1669.png,0,0,0,0,0 +2_167.png,0,2,6,0,41 +2_1670.png,0,0,0,0,0 +2_1671.png,38,0,0,0,0 +2_1672.png,45,1,0,0,0 +2_1673.png,68,1,0,0,0 +2_1674.png,38,2,13,0,0 +2_1675.png,17,11,9,0,0 +2_1676.png,19,1,16,0,0 +2_1677.png,2,5,30,0,0 +2_1678.png,36,1,0,0,0 +2_1679.png,0,0,14,0,0 +2_168.png,0,4,10,0,41 +2_1680.png,0,0,3,0,0 +2_1681.png,22,1,0,0,0 +2_1682.png,39,1,0,0,0 +2_1683.png,30,0,0,0,0 +2_1684.png,28,8,0,0,0 +2_1685.png,27,14,3,0,0 +2_1686.png,29,1,1,0,0 +2_1687.png,33,8,2,0,0 +2_1688.png,32,2,1,0,0 +2_1689.png,26,4,2,0,0 +2_169.png,0,1,1,0,54 +2_1690.png,24,11,4,0,0 +2_1691.png,31,5,3,0,0 +2_1692.png,0,25,23,0,0 +2_1693.png,0,0,30,0,0 +2_1694.png,0,56,10,0,0 +2_1695.png,3,59,16,0,0 +2_1696.png,41,0,0,0,0 +2_1697.png,53,0,0,0,0 +2_1698.png,53,0,0,0,0 +2_1699.png,42,0,0,0,0 +2_17.png,17,0,0,0,0 +2_170.png,0,0,11,0,29 +2_1700.png,38,0,0,0,0 +2_1701.png,0,0,0,0,7 +2_1702.png,0,5,4,0,12 +2_1703.png,0,2,5,0,12 +2_1704.png,0,2,2,0,12 +2_1705.png,0,0,3,0,19 +2_1706.png,0,0,2,0,9 +2_1707.png,0,4,4,0,22 +2_1708.png,0,0,1,0,11 +2_1709.png,0,7,7,0,3 +2_171.png,0,0,12,0,56 +2_1710.png,0,7,3,0,9 +2_1711.png,0,10,7,0,7 +2_1712.png,0,6,3,0,11 +2_1713.png,0,10,3,0,11 +2_1714.png,0,0,0,0,8 +2_1715.png,0,6,5,0,10 +2_1716.png,0,10,5,0,6 +2_1717.png,0,0,1,0,9 +2_1718.png,0,4,4,0,9 +2_1719.png,0,0,1,0,7 +2_172.png,0,3,9,0,29 +2_1720.png,0,4,5,0,10 +2_1721.png,0,4,7,0,10 +2_1722.png,0,17,3,0,5 +2_1723.png,0,0,6,0,11 +2_1724.png,0,0,10,0,12 +2_1725.png,1,12,8,0,1 +2_1726.png,0,14,11,0,0 +2_1727.png,0,15,11,0,2 +2_1728.png,0,16,7,0,0 +2_1729.png,0,0,0,0,2 +2_173.png,0,4,9,0,37 +2_1730.png,12,7,6,1,2 +2_1731.png,35,0,0,0,0 +2_1732.png,0,20,3,0,0 +2_1733.png,0,15,3,0,0 +2_1734.png,0,1,1,0,21 +2_1735.png,0,9,16,0,2 +2_1736.png,0,5,3,0,7 +2_1737.png,0,0,5,0,17 +2_1738.png,0,5,14,0,6 +2_1739.png,0,0,0,0,13 +2_174.png,0,0,6,0,28 +2_1740.png,0,6,5,0,7 +2_1741.png,0,0,1,0,10 +2_1742.png,0,0,0,0,21 +2_1743.png,0,0,0,0,0 +2_1744.png,1,0,0,0,0 +2_1745.png,7,1,2,0,0 +2_1746.png,2,1,2,0,11 +2_1747.png,0,0,1,0,4 +2_1748.png,0,0,4,0,10 +2_1749.png,0,1,7,0,0 +2_175.png,0,1,1,0,63 +2_1750.png,0,0,9,0,0 +2_1751.png,0,0,2,0,0 +2_1752.png,0,0,1,0,0 +2_1753.png,0,0,2,0,0 +2_1754.png,0,0,8,0,0 +2_1755.png,0,0,3,0,0 +2_1756.png,0,0,3,0,9 +2_1757.png,0,0,0,0,0 +2_1758.png,0,0,0,0,7 +2_1759.png,0,0,7,0,0 +2_176.png,0,1,2,0,41 +2_1760.png,9,0,0,0,0 +2_1761.png,0,0,0,0,11 +2_1762.png,0,3,4,0,18 +2_1763.png,0,0,1,0,20 +2_1764.png,0,10,9,0,5 +2_1765.png,0,1,4,0,9 +2_1766.png,0,3,4,0,8 +2_1767.png,0,6,5,0,7 +2_1768.png,0,13,4,0,7 +2_1769.png,0,0,4,0,3 +2_177.png,0,0,5,0,30 +2_1770.png,0,3,6,0,10 +2_1771.png,0,2,5,0,21 +2_1772.png,0,0,3,0,18 +2_1773.png,0,0,1,0,15 +2_1774.png,0,2,4,0,10 +2_1775.png,0,0,0,0,9 +2_1776.png,0,0,0,0,6 +2_1777.png,0,0,0,0,0 +2_1778.png,0,0,4,0,0 +2_1779.png,0,0,0,0,0 +2_178.png,0,0,7,0,4 +2_1780.png,0,0,0,0,0 +2_1781.png,0,0,8,0,0 +2_1782.png,0,0,0,0,24 +2_1783.png,0,1,0,0,50 +2_1784.png,0,0,0,0,2 +2_1785.png,0,1,13,0,4 +2_1786.png,0,2,3,0,14 +2_1787.png,0,5,11,0,20 +2_1788.png,0,4,15,0,5 +2_1789.png,0,1,14,0,7 +2_179.png,0,1,7,0,14 +2_1790.png,0,0,10,0,10 +2_1791.png,0,1,0,0,8 +2_1792.png,0,2,10,0,11 +2_1793.png,0,0,0,0,29 +2_1794.png,0,2,24,0,4 +2_1795.png,0,0,7,0,20 +2_1796.png,0,0,3,0,7 +2_1797.png,0,0,0,0,0 +2_1798.png,0,3,19,0,3 +2_1799.png,0,6,11,0,0 +2_18.png,4,3,3,0,0 +2_180.png,0,4,8,0,10 +2_1800.png,0,0,2,0,7 +2_1801.png,0,1,15,0,0 +2_1802.png,0,4,12,0,11 +2_1803.png,0,0,0,0,0 +2_1804.png,0,0,0,0,0 +2_1805.png,0,0,0,0,0 +2_1806.png,0,2,3,0,7 +2_1807.png,0,14,2,0,8 +2_1808.png,0,9,5,0,10 +2_1809.png,0,0,0,0,1 +2_181.png,0,0,1,0,15 +2_1810.png,0,5,1,0,11 +2_1811.png,0,12,1,0,17 +2_1812.png,0,2,7,0,8 +2_1813.png,0,7,4,0,10 +2_1814.png,0,0,4,0,17 +2_1815.png,0,11,3,0,8 +2_1816.png,0,8,2,0,12 +2_1817.png,0,7,4,0,12 +2_1818.png,0,11,3,0,8 +2_1819.png,0,15,5,0,4 +2_182.png,0,1,0,0,41 +2_1820.png,0,8,7,0,6 +2_1821.png,0,8,1,0,7 +2_1822.png,0,5,2,0,11 +2_1823.png,0,0,0,0,2 +2_1824.png,0,0,0,0,5 +2_1825.png,0,9,3,0,4 +2_1826.png,0,15,0,0,2 +2_1827.png,0,11,3,0,14 +2_1828.png,0,11,4,0,8 +2_1829.png,0,2,7,0,6 +2_183.png,0,2,3,0,29 +2_1830.png,0,0,2,0,12 +2_1831.png,0,1,0,0,9 +2_1832.png,0,0,0,0,0 +2_1833.png,0,0,0,0,0 +2_1834.png,0,0,0,0,0 +2_1835.png,0,0,0,0,0 +2_1836.png,0,0,0,0,0 +2_1837.png,0,0,0,0,0 +2_1838.png,0,0,0,0,0 +2_1839.png,0,0,0,0,0 +2_184.png,0,0,6,0,18 +2_1840.png,0,0,0,0,0 +2_1841.png,0,0,0,0,0 +2_1842.png,0,0,0,0,0 +2_1843.png,0,0,0,0,0 +2_1844.png,0,0,0,0,0 +2_1845.png,0,0,0,0,0 +2_1846.png,0,0,0,0,0 +2_1847.png,0,0,0,0,0 +2_1848.png,0,0,0,0,0 +2_1849.png,0,0,0,0,0 +2_185.png,2,0,5,0,0 +2_1850.png,0,0,0,0,0 +2_1851.png,0,0,0,0,0 +2_1852.png,0,0,0,0,0 +2_1853.png,0,0,0,0,0 +2_1854.png,0,0,0,0,0 +2_1855.png,0,0,0,0,0 +2_1856.png,0,0,0,0,0 +2_1857.png,0,0,0,0,0 +2_1858.png,0,0,0,0,0 +2_1859.png,0,0,0,0,0 +2_186.png,18,0,4,0,0 +2_1860.png,0,0,0,0,0 +2_1861.png,0,0,0,0,0 +2_1862.png,0,0,0,0,0 +2_1863.png,3,12,1,0,8 +2_1864.png,0,14,1,0,12 +2_1865.png,0,4,0,0,5 +2_1866.png,0,9,5,0,4 +2_1867.png,0,17,3,0,5 +2_1868.png,0,4,2,0,15 +2_1869.png,0,15,7,0,2 +2_187.png,3,0,5,0,0 +2_1870.png,0,2,1,0,7 +2_1871.png,0,9,0,0,12 +2_1872.png,0,32,2,0,2 +2_1873.png,0,24,0,0,10 +2_1874.png,0,23,2,0,9 +2_1875.png,0,14,4,0,0 +2_1876.png,0,15,0,0,12 +2_1877.png,0,17,0,0,12 +2_1878.png,0,1,0,0,1 +2_1879.png,0,19,1,0,0 +2_188.png,0,0,8,0,0 +2_1880.png,0,13,13,0,2 +2_1881.png,0,6,6,0,3 +2_1882.png,0,2,8,0,7 +2_1883.png,0,11,11,0,0 +2_1884.png,0,0,0,0,13 +2_1885.png,0,0,0,0,4 +2_1886.png,0,1,2,0,11 +2_1887.png,0,6,11,0,0 +2_1888.png,0,0,3,0,8 +2_1889.png,0,0,0,0,7 +2_189.png,7,1,7,0,0 +2_1890.png,0,5,10,0,2 +2_1891.png,0,1,1,0,11 +2_1892.png,0,1,0,0,10 +2_1893.png,0,0,0,0,13 +2_1894.png,0,0,0,0,0 +2_1895.png,0,4,7,0,7 +2_1896.png,0,0,0,0,0 +2_1897.png,0,0,0,0,0 +2_1898.png,0,1,16,0,0 +2_1899.png,0,4,25,0,0 +2_19.png,7,0,2,0,0 +2_190.png,0,0,7,0,3 +2_1900.png,0,0,4,0,0 +2_1901.png,0,0,6,0,0 +2_1902.png,0,0,14,0,0 +2_1903.png,0,0,11,0,0 +2_1904.png,0,0,12,0,0 +2_1905.png,0,0,10,0,0 +2_1906.png,0,0,6,0,0 +2_1907.png,0,0,8,0,0 +2_1908.png,0,4,10,0,0 +2_1909.png,0,1,12,0,0 +2_191.png,0,0,6,0,21 +2_1910.png,0,0,8,0,0 +2_1911.png,0,0,8,0,0 +2_1912.png,0,0,16,0,0 +2_1913.png,0,0,21,0,0 +2_1914.png,0,0,25,0,0 +2_1915.png,0,3,10,0,0 +2_1916.png,0,0,15,0,0 +2_1917.png,0,0,16,0,0 +2_1918.png,0,0,1,0,8 +2_1919.png,0,0,2,0,9 +2_192.png,0,2,6,0,22 +2_1920.png,0,0,9,0,11 +2_1921.png,0,2,4,0,8 +2_1922.png,0,0,0,0,13 +2_1923.png,0,1,5,0,12 +2_1924.png,0,1,18,0,2 +2_1925.png,0,2,13,0,0 +2_1926.png,0,2,16,0,0 +2_1927.png,0,0,17,0,6 +2_1928.png,0,0,8,0,8 +2_1929.png,0,0,0,0,6 +2_193.png,0,0,2,0,40 +2_1930.png,0,0,2,0,23 +2_1931.png,0,1,12,0,11 +2_1932.png,0,0,1,0,20 +2_1933.png,0,0,8,0,17 +2_1934.png,0,1,4,0,14 +2_1935.png,0,1,12,0,8 +2_1936.png,0,1,12,0,1 +2_1937.png,0,0,3,0,5 +2_1938.png,0,0,3,0,1 +2_1939.png,26,1,0,0,0 +2_194.png,0,1,1,0,55 +2_1940.png,27,0,0,0,0 +2_1941.png,32,0,0,0,0 +2_1942.png,4,0,0,18,0 +2_1943.png,18,0,0,0,0 +2_1944.png,3,1,0,24,0 +2_1945.png,28,6,0,0,0 +2_1946.png,18,0,0,0,0 +2_1947.png,5,15,17,0,0 +2_1948.png,14,2,9,0,0 +2_1949.png,8,1,10,0,0 +2_195.png,0,0,6,0,1 +2_1950.png,3,3,7,0,0 +2_1951.png,31,2,0,0,0 +2_1952.png,26,1,0,0,0 +2_1953.png,21,3,1,0,0 +2_1954.png,23,2,0,0,0 +2_1955.png,23,0,0,0,0 +2_1956.png,33,1,0,0,0 +2_1957.png,47,3,0,0,0 +2_1958.png,26,1,4,0,0 +2_1959.png,19,0,4,0,0 +2_196.png,0,0,7,0,26 +2_1960.png,47,4,0,0,0 +2_1961.png,16,2,7,0,0 +2_1962.png,1,0,18,0,0 +2_1963.png,0,0,14,0,0 +2_1964.png,16,7,0,0,0 +2_1965.png,11,2,6,6,0 +2_1966.png,14,2,4,5,0 +2_1967.png,0,0,1,33,0 +2_1968.png,16,3,10,0,0 +2_1969.png,19,1,6,0,0 +2_197.png,0,0,4,0,9 +2_1970.png,17,1,6,0,0 +2_1971.png,14,0,5,0,0 +2_1972.png,20,0,4,0,0 +2_1973.png,0,3,16,0,0 +2_1974.png,0,1,18,0,0 +2_1975.png,0,4,19,0,0 +2_1976.png,0,2,20,0,0 +2_1977.png,0,4,14,0,0 +2_1978.png,3,5,5,0,0 +2_1979.png,0,4,18,0,0 +2_198.png,0,2,9,0,27 +2_1980.png,0,5,20,0,0 +2_1981.png,0,0,17,0,0 +2_1982.png,0,0,22,0,0 +2_1983.png,0,0,9,0,0 +2_1984.png,0,0,6,0,0 +2_1985.png,0,0,17,0,0 +2_1986.png,0,0,25,0,0 +2_1987.png,0,0,0,0,0 +2_1988.png,0,0,0,0,0 +2_1989.png,0,0,0,0,0 +2_199.png,0,2,3,0,9 +2_1990.png,0,0,0,0,0 +2_1991.png,0,0,0,0,0 +2_1992.png,0,0,0,0,0 +2_1993.png,17,1,0,0,0 +2_1994.png,16,1,7,0,0 +2_1995.png,2,1,17,0,0 +2_1996.png,20,0,2,0,0 +2_1997.png,17,2,1,0,0 +2_1998.png,18,0,2,0,0 +2_1999.png,16,0,2,0,0 +2_2.png,3,1,7,0,0 +2_20.png,10,0,0,0,0 +2_200.png,0,1,9,0,13 +2_2000.png,20,0,2,0,0 +2_2001.png,0,0,12,0,0 +2_2002.png,0,0,24,0,0 +2_2003.png,0,1,12,0,0 +2_2004.png,0,0,26,0,0 +2_2005.png,22,0,3,0,0 +2_2006.png,10,2,19,0,0 +2_2007.png,0,10,13,0,0 +2_2008.png,14,6,9,0,0 +2_2009.png,0,13,20,0,0 +2_201.png,0,0,7,0,1 +2_2010.png,6,8,20,0,0 +2_2011.png,0,0,1,0,0 +2_2012.png,3,46,4,0,0 +2_2013.png,6,55,0,0,0 +2_2014.png,16,30,2,0,0 +2_2015.png,10,0,0,0,0 +2_2016.png,4,0,1,0,0 +2_2017.png,16,0,2,0,0 +2_2018.png,13,0,0,0,0 +2_2019.png,17,1,1,0,0 +2_202.png,0,3,9,0,17 +2_2020.png,6,3,1,0,0 +2_2021.png,9,0,0,0,0 +2_2022.png,14,0,0,0,0 +2_2023.png,0,0,0,0,0 +2_2024.png,8,0,7,0,0 +2_2025.png,0,0,0,0,0 +2_2026.png,0,0,0,0,0 +2_2027.png,12,0,0,0,0 +2_2028.png,4,0,4,0,0 +2_2029.png,11,0,1,0,0 +2_203.png,0,2,10,0,7 +2_2030.png,5,62,1,0,0 +2_2031.png,6,67,0,0,0 +2_2032.png,0,96,3,0,0 +2_2033.png,0,92,4,0,0 +2_2034.png,12,43,0,0,0 +2_2035.png,4,90,3,0,0 +2_2036.png,0,92,7,0,0 +2_2037.png,6,19,0,0,0 +2_2038.png,0,85,5,0,0 +2_2039.png,18,0,8,0,0 +2_204.png,0,1,6,0,18 +2_2040.png,21,0,5,0,0 +2_2041.png,21,2,6,0,0 +2_2042.png,17,5,3,0,0 +2_2043.png,18,4,4,0,0 +2_2044.png,18,8,5,0,0 +2_2045.png,21,5,4,0,0 +2_2046.png,20,3,6,0,0 +2_2047.png,21,6,4,0,0 +2_2048.png,0,21,10,0,0 +2_2049.png,0,0,10,0,0 +2_205.png,0,0,3,0,23 +2_2050.png,0,1,5,0,0 +2_2051.png,20,1,6,0,0 +2_2052.png,0,1,6,0,0 +2_2053.png,0,2,8,0,0 +2_2054.png,0,0,9,0,0 +2_2055.png,0,1,9,0,0 +2_2056.png,0,0,7,0,0 +2_2057.png,0,0,10,0,0 +2_2058.png,0,3,13,0,0 +2_2059.png,0,0,0,0,18 +2_206.png,0,0,11,0,10 +2_2060.png,0,0,0,0,14 +2_2061.png,0,0,0,0,1 +2_2062.png,0,0,0,0,15 +2_2063.png,0,0,0,0,0 +2_2064.png,0,0,0,0,0 +2_2065.png,0,0,0,0,0 +2_2066.png,0,0,0,0,0 +2_2067.png,0,0,0,0,0 +2_2068.png,0,0,2,0,0 +2_2069.png,0,0,10,0,0 +2_207.png,0,1,15,0,17 +2_2070.png,0,0,12,0,0 +2_2071.png,0,0,2,0,0 +2_2072.png,0,0,1,0,0 +2_2073.png,0,0,2,0,0 +2_2074.png,0,0,0,0,0 +2_2075.png,0,1,17,0,0 +2_2076.png,0,0,12,0,0 +2_2077.png,0,0,23,0,0 +2_2078.png,0,0,16,0,0 +2_2079.png,0,0,10,0,0 +2_208.png,0,0,9,0,3 +2_2080.png,0,0,26,0,0 +2_2081.png,0,0,22,0,0 +2_2082.png,0,2,8,0,0 +2_2083.png,0,0,23,0,0 +2_2084.png,0,0,16,0,0 +2_2085.png,0,0,51,0,0 +2_2086.png,0,0,32,0,0 +2_2087.png,0,0,6,0,4 +2_2088.png,0,0,5,0,4 +2_2089.png,0,0,2,0,2 +2_209.png,0,2,4,0,40 +2_2090.png,28,1,10,0,0 +2_2091.png,0,2,9,0,0 +2_2092.png,0,3,9,0,0 +2_2093.png,0,1,13,0,0 +2_2094.png,0,0,15,0,0 +2_2095.png,0,2,12,0,0 +2_2096.png,38,0,0,0,0 +2_2097.png,38,0,0,0,0 +2_2098.png,50,0,0,0,0 +2_2099.png,2,0,6,0,0 +2_21.png,0,5,12,0,0 +2_210.png,0,1,15,0,6 +2_2100.png,1,0,17,0,0 +2_2101.png,24,0,7,0,0 +2_2102.png,0,12,14,0,0 +2_2103.png,0,13,11,0,0 +2_2104.png,0,22,22,0,0 +2_2105.png,0,10,16,0,0 +2_2106.png,0,25,16,0,0 +2_2107.png,0,2,3,0,0 +2_2108.png,0,12,2,0,0 +2_2109.png,27,0,0,0,0 +2_211.png,0,7,11,0,20 +2_2110.png,30,0,0,0,0 +2_2111.png,43,0,1,0,0 +2_2112.png,30,0,1,0,0 +2_2113.png,19,1,7,0,0 +2_2114.png,49,5,1,1,0 +2_2115.png,11,14,4,5,0 +2_2116.png,51,10,1,1,0 +2_2117.png,0,7,0,15,0 +2_2118.png,6,7,0,10,0 +2_2119.png,7,22,0,3,0 +2_212.png,0,5,9,0,14 +2_2120.png,14,13,1,4,0 +2_2121.png,0,0,4,0,0 +2_2122.png,0,1,6,0,0 +2_2123.png,0,0,0,0,0 +2_2124.png,0,0,0,0,0 +2_2125.png,11,0,21,0,0 +2_2126.png,31,1,1,0,0 +2_2127.png,23,1,6,0,0 +2_2128.png,36,0,0,0,0 +2_2129.png,38,1,0,0,0 +2_213.png,0,0,12,0,5 +2_2130.png,36,0,0,0,0 +2_2131.png,28,1,0,0,0 +2_2132.png,0,0,2,0,0 +2_2133.png,9,0,8,0,0 +2_2134.png,21,0,0,0,0 +2_2135.png,15,0,1,0,0 +2_2136.png,14,0,0,0,0 +2_2137.png,0,1,0,0,0 +2_2138.png,0,0,0,0,0 +2_2139.png,0,1,0,0,0 +2_214.png,0,2,6,0,30 +2_2140.png,0,4,0,0,0 +2_2141.png,0,2,0,0,0 +2_2142.png,0,0,0,0,0 +2_2143.png,0,0,0,0,0 +2_2144.png,0,0,3,0,0 +2_2145.png,0,1,13,0,0 +2_2146.png,0,0,16,0,0 +2_2147.png,0,0,12,0,0 +2_2148.png,0,0,11,0,0 +2_2149.png,0,0,4,0,0 +2_215.png,0,0,2,0,4 +2_2150.png,12,6,6,0,0 +2_2151.png,8,5,16,0,0 +2_2152.png,12,23,12,0,0 +2_2153.png,8,4,22,0,0 +2_2154.png,20,0,6,0,0 +2_2155.png,12,7,10,0,0 +2_2156.png,15,1,3,0,0 +2_2157.png,0,0,5,0,7 +2_2158.png,0,0,1,0,0 +2_2159.png,0,0,1,0,3 +2_216.png,0,0,2,0,47 +2_2160.png,0,1,2,0,9 +2_2161.png,0,2,4,0,19 +2_2162.png,0,0,4,0,8 +2_2163.png,17,3,0,0,0 +2_2164.png,8,4,24,0,0 +2_2165.png,4,0,12,0,0 +2_2166.png,1,2,33,0,0 +2_2167.png,8,8,8,0,0 +2_2168.png,6,4,0,0,0 +2_2169.png,5,4,4,0,0 +2_217.png,0,0,14,0,6 +2_2170.png,4,2,3,0,0 +2_2171.png,12,0,3,0,0 +2_2172.png,12,0,3,0,0 +2_2173.png,11,0,3,0,0 +2_2174.png,13,0,2,0,0 +2_2175.png,0,0,3,0,0 +2_2176.png,0,11,22,0,0 +2_2177.png,0,2,10,0,0 +2_2178.png,0,3,13,0,0 +2_2179.png,0,1,21,0,0 +2_218.png,0,3,3,0,39 +2_2180.png,2,6,18,0,0 +2_2181.png,0,3,36,0,0 +2_2182.png,0,15,46,1,0 +2_2183.png,1,3,26,0,0 +2_2184.png,15,0,2,0,0 +2_2185.png,0,3,4,0,66 +2_2186.png,0,3,1,0,70 +2_2187.png,0,0,0,0,65 +2_2188.png,0,5,2,0,52 +2_2189.png,12,14,31,0,0 +2_219.png,0,0,2,0,54 +2_2190.png,0,0,7,0,10 +2_2191.png,0,0,6,0,11 +2_2192.png,0,0,1,0,10 +2_2193.png,0,0,2,0,15 +2_2194.png,0,0,3,0,9 +2_2195.png,0,2,1,0,12 +2_2196.png,0,0,2,0,16 +2_2197.png,0,0,1,0,11 +2_2198.png,19,0,1,0,0 +2_2199.png,25,3,7,0,0 +2_22.png,11,1,6,0,0 +2_220.png,0,0,2,0,30 +2_2200.png,26,0,0,0,0 +2_2201.png,18,3,7,0,0 +2_2202.png,23,2,4,0,0 +2_2203.png,28,1,2,0,0 +2_2204.png,35,1,0,0,0 +2_2205.png,30,1,2,0,0 +2_2206.png,23,0,2,0,0 +2_2207.png,23,2,8,0,0 +2_2208.png,6,12,11,0,0 +2_2209.png,24,0,2,0,0 +2_221.png,0,3,2,0,33 +2_2210.png,13,6,8,0,0 +2_2211.png,28,2,0,0,0 +2_2212.png,9,3,8,0,0 +2_2213.png,32,2,0,0,0 +2_2214.png,22,1,0,0,0 +2_2215.png,0,0,8,0,0 +2_2216.png,0,0,13,0,0 +2_2217.png,0,1,14,0,0 +2_2218.png,0,0,22,0,0 +2_2219.png,0,1,10,0,0 +2_222.png,0,3,6,0,40 +2_2220.png,0,3,17,0,0 +2_2221.png,0,0,15,0,0 +2_2222.png,0,0,13,0,0 +2_2223.png,0,0,17,0,0 +2_2224.png,18,0,0,0,0 +2_2225.png,19,2,1,0,0 +2_2226.png,15,0,1,0,0 +2_2227.png,17,0,1,0,0 +2_2228.png,15,1,0,0,0 +2_2229.png,8,2,9,0,0 +2_223.png,0,0,1,0,43 +2_2230.png,9,1,9,0,0 +2_2231.png,0,0,7,0,0 +2_2232.png,0,0,19,0,0 +2_2233.png,0,0,17,0,0 +2_2234.png,0,3,11,0,0 +2_2235.png,0,1,14,0,0 +2_2236.png,14,1,3,0,0 +2_2237.png,0,0,11,0,6 +2_2238.png,0,0,1,0,9 +2_2239.png,0,0,0,0,20 +2_224.png,0,1,4,0,12 +2_2240.png,0,3,0,0,10 +2_2241.png,0,5,2,0,10 +2_2242.png,9,3,1,0,0 +2_2243.png,7,3,6,0,0 +2_2244.png,0,5,16,0,0 +2_2245.png,0,2,15,0,0 +2_2246.png,0,4,10,0,0 +2_2247.png,0,7,14,0,0 +2_2248.png,0,1,17,0,0 +2_2249.png,8,0,11,0,0 +2_225.png,0,0,6,0,0 +2_2250.png,0,1,17,0,0 +2_2251.png,17,0,5,0,0 +2_2252.png,19,0,0,0,0 +2_2253.png,20,0,1,0,0 +2_2254.png,19,0,2,0,0 +2_2255.png,14,0,4,0,0 +2_2256.png,21,2,0,0,0 +2_2257.png,27,0,1,0,0 +2_2258.png,12,1,0,0,0 +2_2259.png,21,3,2,0,0 +2_226.png,0,1,20,0,25 +2_2260.png,4,1,3,0,0 +2_2261.png,16,0,0,0,0 +2_2262.png,9,4,2,0,0 +2_2263.png,4,6,3,0,0 +2_2264.png,5,3,1,0,0 +2_2265.png,0,6,7,0,0 +2_2266.png,0,6,15,0,0 +2_2267.png,0,4,19,0,0 +2_2268.png,0,2,9,0,0 +2_2269.png,0,0,14,0,0 +2_227.png,0,4,3,0,32 +2_2270.png,0,0,4,0,0 +2_2271.png,0,1,46,0,0 +2_2272.png,0,0,46,0,0 +2_2273.png,0,3,20,0,14 +2_2274.png,0,0,8,0,25 +2_2275.png,0,0,0,0,19 +2_2276.png,0,0,14,0,14 +2_2277.png,0,2,7,0,20 +2_2278.png,0,1,36,0,0 +2_2279.png,0,0,14,0,0 +2_228.png,0,1,10,0,35 +2_2280.png,0,2,31,0,0 +2_2281.png,0,1,13,0,14 +2_2282.png,0,1,5,0,16 +2_2283.png,28,0,0,0,0 +2_2284.png,8,1,0,0,0 +2_2285.png,18,0,0,0,0 +2_2286.png,31,0,2,0,0 +2_2287.png,39,0,0,0,0 +2_2288.png,25,0,1,0,0 +2_2289.png,25,0,0,0,0 +2_229.png,0,1,8,0,0 +2_2290.png,7,3,6,0,0 +2_2291.png,45,1,0,0,0 +2_2292.png,38,0,5,0,0 +2_2293.png,23,0,5,0,0 +2_2294.png,34,2,3,0,0 +2_2295.png,32,0,0,0,0 +2_2296.png,13,4,12,0,0 +2_2297.png,29,5,5,0,0 +2_2298.png,24,4,2,0,0 +2_2299.png,27,3,2,0,0 +2_23.png,10,0,0,0,0 +2_230.png,0,3,5,0,0 +2_2300.png,6,5,17,0,0 +2_2301.png,11,0,13,0,0 +2_2302.png,26,0,0,0,0 +2_2303.png,22,0,0,0,0 +2_2304.png,24,0,0,0,0 +2_2305.png,26,0,0,0,0 +2_2306.png,25,0,1,0,0 +2_2307.png,22,1,0,0,0 +2_2308.png,41,0,0,0,0 +2_2309.png,37,2,0,0,0 +2_231.png,0,3,11,0,21 +2_2310.png,37,0,0,0,0 +2_2311.png,32,0,0,0,0 +2_2312.png,24,0,0,0,0 +2_2313.png,0,1,7,0,6 +2_2314.png,7,0,1,0,0 +2_2315.png,5,1,7,0,0 +2_2316.png,2,2,17,0,0 +2_2317.png,23,3,10,0,0 +2_2318.png,0,9,17,0,0 +2_2319.png,0,7,18,0,0 +2_232.png,0,2,9,0,36 +2_2320.png,0,1,24,0,0 +2_2321.png,0,10,20,0,0 +2_2322.png,0,7,15,0,0 +2_2323.png,0,1,5,0,41 +2_2324.png,0,4,9,0,48 +2_2325.png,0,1,2,0,44 +2_2326.png,0,2,7,0,0 +2_2327.png,0,1,15,0,0 +2_2328.png,0,3,9,0,0 +2_2329.png,0,0,13,0,0 +2_233.png,0,1,1,0,0 +2_2330.png,0,3,23,0,0 +2_2331.png,3,2,19,0,0 +2_2332.png,26,0,0,0,0 +2_2333.png,0,0,8,0,22 +2_2334.png,0,0,7,0,18 +2_2335.png,0,0,21,0,4 +2_2336.png,0,0,11,0,27 +2_2337.png,0,0,12,0,31 +2_2338.png,19,0,3,0,0 +2_2339.png,9,0,3,0,0 +2_234.png,10,0,7,0,0 +2_2340.png,11,0,1,0,0 +2_2341.png,12,0,2,0,0 +2_2342.png,8,0,1,0,0 +2_2343.png,14,0,1,0,0 +2_2344.png,9,0,3,0,0 +2_2345.png,9,0,1,0,0 +2_2346.png,1,6,25,0,0 +2_2347.png,0,0,15,0,0 +2_2348.png,0,0,21,0,0 +2_2349.png,0,1,7,0,0 +2_235.png,6,0,4,0,0 +2_2350.png,10,0,6,0,0 +2_2351.png,15,1,11,0,0 +2_2352.png,0,0,10,0,0 +2_2353.png,12,1,10,0,0 +2_2354.png,55,0,0,0,0 +2_2355.png,35,0,0,0,0 +2_2356.png,44,0,0,0,0 +2_2357.png,33,0,0,0,0 +2_2358.png,31,0,2,0,0 +2_2359.png,24,0,2,0,0 +2_236.png,0,0,0,0,0 +2_2360.png,0,0,4,0,0 +2_2361.png,0,0,2,0,0 +2_2362.png,0,0,5,0,0 +2_2363.png,0,0,8,0,0 +2_2364.png,0,0,10,0,0 +2_2365.png,19,0,5,0,0 +2_2366.png,21,2,2,0,0 +2_2367.png,26,1,8,0,0 +2_2368.png,5,1,13,0,0 +2_2369.png,28,0,6,0,0 +2_237.png,7,0,2,0,0 +2_2370.png,7,0,13,0,0 +2_2371.png,10,0,11,0,1 +2_2372.png,11,0,5,0,0 +2_2373.png,0,0,0,0,0 +2_2374.png,15,0,2,0,0 +2_2375.png,0,0,0,0,0 +2_2376.png,35,64,0,0,0 +2_2377.png,34,46,0,0,0 +2_2378.png,16,46,0,0,0 +2_2379.png,43,56,0,0,0 +2_238.png,11,1,2,0,0 +2_2380.png,39,45,0,0,0 +2_2381.png,38,41,0,0,0 +2_2382.png,0,0,4,0,0 +2_2383.png,0,0,11,0,0 +2_2384.png,0,3,7,0,0 +2_2385.png,0,4,3,0,0 +2_2386.png,0,0,1,0,0 +2_2387.png,63,1,2,0,0 +2_2388.png,0,0,2,0,20 +2_2389.png,0,1,0,0,16 +2_239.png,0,9,13,0,14 +2_2390.png,0,0,1,0,35 +2_2391.png,0,0,0,0,17 +2_2392.png,0,97,16,0,0 +2_2393.png,0,47,26,0,0 +2_2394.png,0,88,24,0,0 +2_2395.png,17,1,0,0,0 +2_2396.png,21,0,0,0,0 +2_2397.png,7,5,0,0,0 +2_2398.png,45,2,0,0,0 +2_2399.png,40,1,0,0,0 +2_24.png,10,0,0,0,0 +2_240.png,0,19,11,0,13 +2_2400.png,33,1,1,0,0 +2_2401.png,8,0,0,0,0 +2_2402.png,4,1,2,0,0 +2_2403.png,17,2,1,0,0 +2_2404.png,17,0,0,0,0 +2_2405.png,23,5,3,0,0 +2_2406.png,20,7,1,0,0 +2_2407.png,31,5,2,0,0 +2_2408.png,29,5,1,0,0 +2_2409.png,0,0,1,0,0 +2_241.png,0,12,11,0,6 +2_2410.png,0,0,1,0,0 +2_2411.png,0,0,13,0,0 +2_2412.png,0,1,18,0,0 +2_2413.png,0,0,9,0,0 +2_2414.png,0,0,23,0,0 +2_2415.png,0,0,15,0,0 +2_2416.png,0,0,21,0,0 +2_2417.png,0,0,22,0,0 +2_2418.png,0,0,23,0,0 +2_2419.png,17,2,7,21,0 +2_242.png,0,4,12,0,21 +2_2420.png,46,0,9,6,0 +2_2421.png,26,1,5,14,0 +2_2422.png,33,2,10,0,0 +2_2423.png,4,1,11,0,0 +2_2424.png,27,1,5,0,0 +2_2425.png,14,14,9,0,0 +2_2426.png,23,10,6,2,0 +2_2427.png,33,10,7,0,0 +2_2428.png,16,14,2,2,0 +2_2429.png,29,10,4,2,0 +2_243.png,0,12,7,0,14 +2_2430.png,25,4,6,2,0 +2_2431.png,19,11,1,3,0 +2_2432.png,21,6,6,0,0 +2_2433.png,32,3,6,1,0 +2_2434.png,23,0,5,0,0 +2_2435.png,3,0,24,0,0 +2_2436.png,23,0,2,0,0 +2_2437.png,0,0,16,0,0 +2_2438.png,0,0,16,0,0 +2_2439.png,4,1,4,0,0 +2_244.png,0,1,4,0,0 +2_2440.png,2,1,7,0,0 +2_2441.png,34,0,0,0,0 +2_2442.png,25,3,1,0,0 +2_2443.png,24,0,4,0,0 +2_2444.png,32,2,2,0,0 +2_2445.png,19,0,2,0,0 +2_2446.png,2,0,18,0,0 +2_2447.png,0,0,12,0,0 +2_2448.png,1,1,8,0,0 +2_2449.png,0,0,16,0,0 +2_245.png,0,1,3,0,33 +2_2450.png,24,11,2,2,0 +2_2451.png,14,2,9,3,0 +2_2452.png,2,68,6,2,0 +2_2453.png,13,12,1,4,0 +2_2454.png,12,43,1,0,0 +2_2455.png,0,1,3,0,0 +2_2456.png,0,2,2,0,0 +2_2457.png,0,1,1,0,0 +2_2458.png,0,2,2,0,0 +2_2459.png,0,3,1,0,0 +2_246.png,0,2,9,0,0 +2_2460.png,0,0,2,0,0 +2_2461.png,0,2,1,0,0 +2_2462.png,0,1,0,0,0 +2_2463.png,0,1,0,0,0 +2_2464.png,0,0,11,0,0 +2_2465.png,0,0,16,0,0 +2_2466.png,23,0,0,0,0 +2_2467.png,17,1,5,0,0 +2_2468.png,25,0,1,0,0 +2_2469.png,23,3,3,0,0 +2_247.png,0,0,6,0,5 +2_2470.png,27,0,0,0,0 +2_2471.png,24,4,2,0,0 +2_2472.png,10,71,8,0,0 +2_2473.png,13,61,5,0,0 +2_2474.png,15,28,7,0,0 +2_2475.png,12,51,6,0,0 +2_2476.png,19,24,13,0,0 +2_2477.png,15,4,2,0,0 +2_2478.png,21,3,0,0,0 +2_2479.png,19,1,0,0,0 +2_248.png,0,0,4,0,49 +2_2480.png,24,0,0,0,0 +2_2481.png,23,0,0,0,0 +2_2482.png,21,0,1,0,0 +2_2483.png,0,1,0,0,4 +2_2484.png,0,2,2,0,10 +2_2485.png,0,2,1,0,12 +2_2486.png,0,0,0,1,16 +2_2487.png,0,1,3,0,19 +2_2488.png,0,1,2,0,17 +2_2489.png,0,0,0,0,16 +2_249.png,0,1,5,0,45 +2_2490.png,0,2,0,0,20 +2_2491.png,0,1,1,0,18 +2_2492.png,0,0,0,0,35 +2_2493.png,0,0,3,0,32 +2_2494.png,0,0,1,0,16 +2_2495.png,0,0,0,0,3 +2_2496.png,0,0,6,0,6 +2_2497.png,0,2,0,0,0 +2_2498.png,0,0,1,0,7 +2_2499.png,0,3,3,0,22 +2_25.png,4,1,2,0,0 +2_250.png,0,0,8,0,30 +2_2500.png,0,2,2,0,24 +2_2501.png,0,0,11,0,23 +2_2502.png,0,0,5,0,22 +2_2503.png,0,4,2,0,24 +2_2504.png,0,4,5,0,25 +2_2505.png,0,3,6,0,24 +2_2506.png,0,5,9,0,13 +2_2507.png,0,9,5,0,5 +2_2508.png,0,3,5,0,32 +2_2509.png,0,5,6,0,8 +2_251.png,0,2,7,0,27 +2_2510.png,33,2,2,0,0 +2_2511.png,43,0,2,0,0 +2_2512.png,24,0,14,0,0 +2_2513.png,30,0,8,0,0 +2_2514.png,26,0,1,0,7 +2_2515.png,22,1,9,0,4 +2_2516.png,35,0,3,0,0 +2_2517.png,13,2,8,0,0 +2_2518.png,9,0,11,0,0 +2_2519.png,0,1,1,0,33 +2_252.png,0,6,6,0,40 +2_2520.png,0,6,7,0,32 +2_2521.png,0,0,6,0,22 +2_2522.png,0,3,12,0,30 +2_2523.png,0,0,19,0,0 +2_2524.png,0,2,9,0,0 +2_2525.png,0,0,9,0,0 +2_2526.png,0,1,17,0,0 +2_2527.png,0,1,25,0,0 +2_2528.png,0,0,21,0,0 +2_2529.png,0,0,13,0,0 +2_253.png,0,7,6,0,31 +2_2530.png,0,2,8,0,0 +2_2531.png,0,2,16,0,0 +2_2532.png,0,0,15,0,0 +2_2533.png,0,0,7,0,0 +2_2534.png,0,0,15,0,0 +2_2535.png,0,0,1,0,0 +2_2536.png,28,0,0,0,0 +2_2537.png,32,0,0,0,0 +2_2538.png,24,0,5,0,0 +2_2539.png,31,0,6,0,0 +2_254.png,0,12,8,0,23 +2_2540.png,41,0,4,0,0 +2_2541.png,36,0,0,0,0 +2_2542.png,35,2,2,0,0 +2_2543.png,25,0,6,0,0 +2_2544.png,30,0,0,0,0 +2_2545.png,31,0,0,0,0 +2_2546.png,36,0,0,0,0 +2_2547.png,26,0,0,0,0 +2_2548.png,29,0,0,0,0 +2_2549.png,34,0,0,0,0 +2_255.png,0,0,3,0,17 +2_2550.png,25,0,7,0,0 +2_2551.png,20,1,2,0,0 +2_2552.png,47,0,0,0,0 +2_2553.png,35,0,1,0,0 +2_2554.png,2,2,15,0,0 +2_2555.png,16,0,2,0,0 +2_2556.png,33,0,4,0,0 +2_2557.png,17,0,3,0,0 +2_2558.png,0,1,15,0,0 +2_2559.png,24,0,8,0,0 +2_256.png,0,0,6,0,19 +2_2560.png,16,0,16,0,0 +2_2561.png,2,1,24,0,0 +2_2562.png,0,0,19,0,0 +2_2563.png,31,1,1,0,0 +2_2564.png,0,3,18,0,0 +2_2565.png,4,4,12,0,0 +2_2566.png,0,3,41,0,0 +2_2567.png,0,0,44,0,0 +2_2568.png,0,7,30,0,0 +2_2569.png,0,3,31,0,0 +2_257.png,0,2,6,0,5 +2_2570.png,0,3,31,0,0 +2_2571.png,0,1,45,0,0 +2_2572.png,0,0,26,0,0 +2_2573.png,0,5,40,0,0 +2_2574.png,0,12,33,0,0 +2_2575.png,0,2,42,0,0 +2_2576.png,0,0,37,0,0 +2_2577.png,0,2,38,0,0 +2_2578.png,0,12,25,0,0 +2_2579.png,0,2,45,0,0 +2_258.png,0,0,2,0,25 +2_2580.png,0,2,32,0,0 +2_2581.png,33,2,17,0,0 +2_2582.png,20,0,0,0,0 +2_2583.png,17,0,2,0,0 +2_2584.png,14,2,13,0,0 +2_2585.png,17,1,13,0,0 +2_2586.png,18,0,6,0,0 +2_2587.png,28,1,1,0,0 +2_2588.png,27,2,7,0,0 +2_2589.png,24,2,3,0,0 +2_259.png,0,1,7,0,22 +2_2590.png,49,0,2,0,0 +2_2591.png,9,0,0,0,0 +2_2592.png,3,0,1,0,0 +2_2593.png,38,2,6,0,0 +2_2594.png,0,0,21,0,0 +2_2595.png,0,1,36,3,0 +2_2596.png,0,0,35,0,0 +2_2597.png,0,0,17,0,0 +2_2598.png,0,0,20,0,0 +2_2599.png,0,0,20,1,0 +2_26.png,0,0,8,0,0 +2_260.png,0,0,3,0,26 +2_2600.png,0,0,13,1,0 +2_2601.png,0,1,19,3,0 +2_2602.png,0,3,18,1,0 +2_2603.png,0,1,16,1,0 +2_2604.png,0,0,19,1,0 +2_2605.png,0,0,19,0,0 +2_2606.png,0,1,26,0,0 +2_2607.png,88,3,23,0,0 +2_2608.png,86,8,8,0,0 +2_2609.png,80,0,0,0,0 +2_261.png,0,0,5,0,20 +2_2610.png,56,1,8,0,0 +2_2611.png,81,0,0,0,0 +2_2612.png,122,4,0,0,0 +2_2613.png,119,0,1,0,0 +2_2614.png,92,2,0,0,0 +2_2615.png,102,1,4,0,0 +2_2616.png,94,7,15,0,0 +2_2617.png,107,2,1,0,0 +2_2618.png,84,1,4,0,0 +2_2619.png,97,4,21,1,0 +2_262.png,23,0,0,0,0 +2_2620.png,115,1,5,3,0 +2_2621.png,71,7,0,1,0 +2_2622.png,31,0,0,0,0 +2_2623.png,24,0,0,0,0 +2_2624.png,38,3,0,0,0 +2_2625.png,39,1,0,0,0 +2_2626.png,48,0,1,0,0 +2_2627.png,45,1,1,0,0 +2_2628.png,23,0,3,1,0 +2_2629.png,42,0,0,0,0 +2_263.png,17,6,6,0,0 +2_2630.png,66,2,0,0,0 +2_2631.png,36,0,0,0,0 +2_2632.png,18,0,0,0,0 +2_2633.png,24,0,0,0,0 +2_2634.png,0,28,36,0,14 +2_2635.png,0,13,17,0,35 +2_2636.png,0,1,0,0,17 +2_2637.png,0,0,5,0,0 +2_2638.png,0,0,12,0,0 +2_2639.png,0,0,12,0,0 +2_264.png,22,3,1,0,0 +2_2640.png,0,0,6,0,0 +2_2641.png,0,0,3,0,0 +2_2642.png,0,0,10,0,0 +2_2643.png,0,0,5,0,0 +2_2644.png,0,7,13,0,0 +2_2645.png,62,0,3,0,0 +2_2646.png,24,0,0,0,0 +2_2647.png,39,6,12,0,0 +2_2648.png,2,2,22,0,0 +2_2649.png,23,0,0,0,0 +2_265.png,14,2,2,0,0 +2_2650.png,0,3,16,0,0 +2_2651.png,0,3,15,0,0 +2_2652.png,0,1,9,0,0 +2_2653.png,0,1,3,0,0 +2_2654.png,0,0,2,0,0 +2_2655.png,0,4,4,0,0 +2_2656.png,0,0,2,0,0 +2_2657.png,0,4,18,0,0 +2_2658.png,51,2,0,0,0 +2_2659.png,44,4,6,0,0 +2_266.png,0,15,18,0,0 +2_2660.png,81,2,2,0,0 +2_2661.png,56,1,0,0,0 +2_2662.png,0,138,26,0,0 +2_2663.png,0,129,13,0,0 +2_2664.png,0,141,22,0,0 +2_2665.png,12,0,35,0,0 +2_2666.png,34,9,28,0,0 +2_2667.png,0,7,7,0,39 +2_2668.png,0,5,6,0,33 +2_2669.png,0,1,18,0,0 +2_267.png,0,17,21,0,0 +2_2670.png,0,1,10,0,0 +2_2671.png,0,0,27,0,0 +2_2672.png,0,17,38,0,0 +2_2673.png,0,6,33,0,0 +2_2674.png,0,10,52,0,0 +2_2675.png,53,1,8,0,0 +2_2676.png,66,1,8,0,0 +2_2677.png,1,5,24,0,0 +2_2678.png,80,1,0,0,0 +2_2679.png,0,17,17,0,0 +2_268.png,1,20,11,0,0 +2_2680.png,0,13,44,0,0 +2_2681.png,18,5,31,0,0 +2_2682.png,15,5,0,0,0 +2_2683.png,15,21,20,0,0 +2_2684.png,0,0,2,0,0 +2_2685.png,0,0,2,0,0 +2_2686.png,0,0,2,0,0 +2_2687.png,0,6,6,0,0 +2_2688.png,0,4,0,0,0 +2_2689.png,0,2,0,0,0 +2_269.png,25,0,0,0,0 +2_2690.png,0,0,3,0,0 +2_2691.png,0,0,9,0,0 +2_2692.png,0,0,10,0,0 +2_2693.png,0,6,1,0,0 +2_2694.png,0,0,0,0,0 +2_2695.png,0,0,0,0,0 +2_2696.png,0,0,2,0,0 +2_2697.png,0,1,17,0,0 +2_2698.png,20,14,43,0,0 +2_2699.png,23,2,18,0,0 +2_27.png,1,0,10,0,0 +2_270.png,16,13,5,0,0 +2_2700.png,28,2,16,0,0 +2_2701.png,33,0,10,0,0 +2_2702.png,35,2,7,0,0 +2_2703.png,0,0,3,0,0 +2_2704.png,0,0,4,0,0 +2_2705.png,0,0,4,0,0 +2_2706.png,0,0,12,0,0 +2_2707.png,0,0,2,0,0 +2_2708.png,0,0,1,0,0 +2_2709.png,0,0,6,0,0 +2_271.png,19,0,0,0,0 +2_2710.png,0,35,30,0,25 +2_2711.png,0,9,11,0,13 +2_2712.png,0,11,18,0,26 +2_2713.png,0,15,22,0,26 +2_2714.png,40,0,0,17,0 +2_2715.png,31,3,12,0,0 +2_2716.png,45,4,4,0,0 +2_2717.png,12,44,42,0,0 +2_2718.png,21,0,19,0,0 +2_2719.png,0,10,36,0,0 +2_272.png,8,0,3,0,0 +2_2720.png,0,18,33,0,0 +2_2721.png,0,39,36,0,0 +2_273.png,18,0,4,0,0 +2_274.png,0,13,12,0,12 +2_275.png,18,5,7,0,0 +2_276.png,25,0,0,0,0 +2_277.png,23,4,6,0,0 +2_278.png,19,1,4,0,0 +2_279.png,0,24,20,0,0 +2_28.png,10,0,3,0,0 +2_280.png,0,11,11,0,0 +2_281.png,13,9,13,0,0 +2_282.png,0,14,17,0,0 +2_283.png,0,5,9,0,0 +2_284.png,0,5,22,0,0 +2_285.png,31,0,5,0,0 +2_286.png,10,23,2,0,0 +2_287.png,6,6,12,0,0 +2_288.png,0,0,7,0,0 +2_289.png,27,4,6,0,0 +2_29.png,1,1,8,0,0 +2_290.png,5,3,3,0,0 +2_291.png,0,0,7,0,0 +2_292.png,2,3,11,0,0 +2_293.png,0,5,7,0,0 +2_294.png,4,3,5,0,0 +2_295.png,0,0,5,0,0 +2_296.png,6,0,3,0,0 +2_297.png,3,1,11,0,0 +2_298.png,4,3,10,0,0 +2_299.png,0,0,9,0,0 +2_3.png,10,0,1,0,0 +2_30.png,3,0,5,0,0 +2_300.png,0,1,8,0,0 +2_301.png,5,1,6,0,0 +2_302.png,12,0,4,0,0 +2_303.png,2,2,6,0,0 +2_304.png,0,0,6,0,0 +2_305.png,0,0,3,0,0 +2_306.png,0,0,2,0,37 +2_307.png,0,1,3,0,37 +2_308.png,0,1,3,0,14 +2_309.png,0,2,9,0,10 +2_31.png,6,1,6,0,0 +2_310.png,0,0,8,0,0 +2_311.png,0,0,1,0,23 +2_312.png,0,1,3,0,7 +2_313.png,0,0,1,0,13 +2_314.png,0,2,4,0,10 +2_315.png,29,1,0,0,0 +2_316.png,24,3,0,0,0 +2_317.png,17,6,0,0,0 +2_318.png,7,8,3,0,0 +2_319.png,15,6,4,0,0 +2_32.png,0,1,11,0,0 +2_320.png,14,2,4,0,0 +2_321.png,18,3,0,0,0 +2_322.png,13,2,4,0,0 +2_323.png,20,8,3,0,0 +2_324.png,25,3,2,0,0 +2_325.png,22,4,0,0,0 +2_326.png,16,6,5,0,0 +2_327.png,19,1,0,0,0 +2_328.png,23,0,0,0,0 +2_329.png,35,1,1,0,0 +2_33.png,2,1,4,0,0 +2_330.png,12,2,5,0,0 +2_331.png,19,8,5,0,0 +2_332.png,17,6,4,0,0 +2_333.png,15,0,4,0,0 +2_334.png,19,0,0,0,0 +2_335.png,5,0,0,0,0 +2_336.png,3,0,0,0,0 +2_337.png,7,0,0,0,0 +2_338.png,0,7,9,0,0 +2_339.png,0,2,12,0,0 +2_34.png,9,0,4,0,0 +2_340.png,0,3,11,0,0 +2_341.png,0,3,9,0,0 +2_342.png,0,1,8,0,0 +2_343.png,0,7,11,0,0 +2_344.png,5,0,2,0,0 +2_345.png,5,1,1,0,0 +2_346.png,5,0,4,0,0 +2_347.png,4,1,2,0,0 +2_348.png,5,4,4,0,0 +2_349.png,1,2,3,0,0 +2_35.png,0,0,4,0,2 +2_350.png,3,1,3,0,0 +2_351.png,28,0,0,0,0 +2_352.png,26,0,0,0,0 +2_353.png,26,0,0,0,0 +2_354.png,2,4,7,0,0 +2_355.png,18,1,1,0,0 +2_356.png,17,0,0,0,0 +2_357.png,7,0,5,0,0 +2_358.png,8,0,0,0,0 +2_359.png,16,0,0,0,0 +2_36.png,0,0,4,0,23 +2_360.png,2,1,7,0,0 +2_361.png,18,0,0,0,0 +2_362.png,4,0,8,0,0 +2_363.png,1,0,3,0,0 +2_364.png,10,0,3,0,0 +2_365.png,11,0,4,0,0 +2_366.png,5,0,3,0,0 +2_367.png,8,0,0,0,0 +2_368.png,15,0,0,0,0 +2_369.png,16,0,1,0,0 +2_37.png,0,0,13,0,11 +2_370.png,9,0,2,0,0 +2_371.png,6,16,7,0,0 +2_372.png,14,1,4,0,0 +2_373.png,6,1,4,0,0 +2_374.png,12,1,5,0,0 +2_375.png,6,0,7,0,0 +2_376.png,13,0,0,0,0 +2_377.png,12,1,1,0,0 +2_378.png,7,0,5,0,0 +2_379.png,9,0,8,0,0 +2_38.png,0,0,3,0,19 +2_380.png,4,0,4,0,0 +2_381.png,5,11,23,0,0 +2_382.png,28,1,1,0,0 +2_383.png,12,13,14,0,0 +2_384.png,10,12,26,0,0 +2_385.png,3,2,4,0,0 +2_386.png,21,0,1,0,0 +2_387.png,2,1,3,0,0 +2_388.png,9,1,2,0,0 +2_389.png,0,5,8,0,0 +2_39.png,0,0,0,0,24 +2_390.png,6,3,8,0,0 +2_391.png,0,2,10,0,0 +2_392.png,2,2,6,0,0 +2_393.png,3,9,4,0,0 +2_394.png,16,3,0,0,0 +2_395.png,9,3,2,0,0 +2_396.png,0,13,7,0,0 +2_397.png,10,0,0,0,0 +2_398.png,11,0,3,0,0 +2_399.png,14,0,2,0,0 +2_4.png,9,1,1,0,0 +2_40.png,0,1,5,0,0 +2_400.png,4,2,1,0,0 +2_401.png,11,0,7,0,0 +2_402.png,8,0,1,0,0 +2_403.png,4,1,4,0,0 +2_404.png,8,1,5,0,0 +2_405.png,12,2,1,0,0 +2_406.png,11,1,3,0,0 +2_407.png,15,2,9,0,0 +2_408.png,13,0,1,0,0 +2_409.png,5,1,3,0,0 +2_41.png,0,0,2,0,0 +2_410.png,5,2,3,0,0 +2_411.png,2,1,4,0,0 +2_412.png,7,1,0,0,0 +2_413.png,8,0,3,0,0 +2_414.png,12,0,3,0,0 +2_415.png,15,1,4,0,0 +2_416.png,1,0,5,0,0 +2_417.png,23,4,3,0,0 +2_418.png,19,0,3,0,0 +2_419.png,11,3,6,0,0 +2_42.png,0,0,5,0,12 +2_420.png,16,3,0,0,0 +2_421.png,21,1,0,0,0 +2_422.png,13,1,12,0,0 +2_423.png,0,1,4,0,0 +2_424.png,12,0,0,0,0 +2_425.png,16,0,2,0,0 +2_426.png,35,0,0,0,0 +2_427.png,17,0,1,0,0 +2_428.png,21,0,0,0,0 +2_429.png,24,0,3,0,0 +2_43.png,16,2,0,0,0 +2_430.png,1,2,5,0,0 +2_431.png,10,2,4,0,0 +2_432.png,5,1,7,0,0 +2_433.png,11,1,2,0,0 +2_434.png,0,0,7,0,0 +2_435.png,0,1,7,0,0 +2_436.png,0,1,16,0,0 +2_437.png,7,1,3,0,0 +2_438.png,5,3,7,0,0 +2_439.png,6,6,11,0,0 +2_44.png,6,4,8,0,0 +2_440.png,0,3,9,0,0 +2_441.png,8,3,6,0,0 +2_442.png,2,6,4,0,0 +2_443.png,1,3,2,0,0 +2_444.png,3,2,1,0,0 +2_445.png,6,2,2,0,0 +2_446.png,0,1,8,0,0 +2_447.png,5,0,5,0,0 +2_448.png,0,4,5,0,0 +2_449.png,14,3,3,0,0 +2_45.png,6,4,3,0,0 +2_450.png,4,1,7,0,0 +2_451.png,0,0,3,0,0 +2_452.png,7,1,1,0,0 +2_453.png,2,1,4,0,0 +2_454.png,0,0,5,0,0 +2_455.png,6,1,5,0,0 +2_456.png,0,0,7,0,0 +2_457.png,10,5,5,0,0 +2_458.png,11,7,5,0,0 +2_459.png,10,0,2,0,0 +2_46.png,25,0,1,0,0 +2_460.png,0,0,7,0,0 +2_461.png,2,3,11,0,0 +2_462.png,0,3,8,0,0 +2_463.png,2,0,6,0,0 +2_464.png,1,0,5,0,0 +2_465.png,1,1,7,0,0 +2_466.png,13,1,3,0,0 +2_467.png,6,0,9,0,0 +2_468.png,8,1,7,0,0 +2_469.png,4,0,2,0,0 +2_47.png,10,5,6,0,0 +2_470.png,14,4,3,0,0 +2_471.png,7,5,10,0,0 +2_472.png,19,1,5,0,0 +2_473.png,3,0,7,0,0 +2_474.png,4,1,8,0,0 +2_475.png,6,0,0,0,0 +2_476.png,7,0,2,0,0 +2_477.png,24,0,1,0,0 +2_478.png,1,2,10,0,0 +2_479.png,16,0,5,0,0 +2_48.png,2,0,4,0,0 +2_480.png,23,0,5,0,0 +2_481.png,1,2,13,0,0 +2_482.png,19,2,5,0,0 +2_483.png,16,2,2,0,0 +2_484.png,13,4,6,0,0 +2_485.png,12,1,8,0,0 +2_486.png,21,1,5,0,0 +2_487.png,10,5,2,0,0 +2_488.png,12,1,1,0,0 +2_489.png,10,0,4,0,0 +2_49.png,0,1,8,0,0 +2_490.png,6,2,8,0,0 +2_491.png,0,18,10,0,0 +2_492.png,0,36,8,0,0 +2_493.png,23,2,2,0,0 +2_494.png,22,1,3,0,0 +2_495.png,23,6,4,0,0 +2_496.png,27,0,0,0,0 +2_497.png,16,0,2,0,0 +2_498.png,18,3,2,0,0 +2_499.png,27,0,0,0,0 +2_5.png,13,0,1,0,0 +2_50.png,0,4,8,0,8 +2_500.png,29,0,0,0,0 +2_501.png,32,0,0,0,0 +2_502.png,31,0,0,0,0 +2_503.png,22,0,0,0,0 +2_504.png,28,1,0,0,0 +2_505.png,29,1,0,0,0 +2_506.png,16,1,0,0,0 +2_507.png,8,7,7,0,0 +2_508.png,3,23,7,0,0 +2_509.png,0,11,10,0,0 +2_51.png,0,0,1,0,44 +2_510.png,0,11,6,0,0 +2_511.png,0,25,7,0,0 +2_512.png,1,17,7,0,0 +2_513.png,0,24,8,0,0 +2_514.png,1,6,8,0,0 +2_515.png,23,9,2,0,0 +2_516.png,18,6,5,0,0 +2_517.png,1,17,9,0,0 +2_518.png,15,14,2,0,0 +2_519.png,14,7,9,0,0 +2_52.png,0,0,1,0,30 +2_520.png,22,1,1,0,0 +2_521.png,6,5,4,0,0 +2_522.png,2,5,8,0,0 +2_523.png,21,0,4,0,0 +2_524.png,8,4,7,0,0 +2_525.png,10,2,11,0,0 +2_526.png,17,0,0,0,0 +2_527.png,17,2,3,0,0 +2_528.png,0,2,7,0,0 +2_529.png,4,2,1,0,0 +2_53.png,0,0,2,0,12 +2_530.png,1,2,6,0,0 +2_531.png,0,0,10,0,11 +2_532.png,0,1,2,0,24 +2_533.png,0,0,1,0,27 +2_534.png,0,0,4,0,9 +2_535.png,0,5,6,0,0 +2_536.png,0,3,4,0,10 +2_537.png,0,1,1,0,36 +2_538.png,0,1,2,0,28 +2_539.png,0,1,3,0,21 +2_54.png,0,0,10,0,9 +2_540.png,0,2,7,0,4 +2_541.png,12,0,0,0,0 +2_542.png,26,0,0,0,0 +2_543.png,12,0,2,0,0 +2_544.png,12,0,1,0,0 +2_545.png,28,2,1,0,0 +2_546.png,0,0,0,0,0 +2_547.png,23,0,3,0,0 +2_548.png,50,0,0,0,0 +2_549.png,22,0,0,0,0 +2_55.png,0,2,2,0,36 +2_550.png,0,0,1,0,0 +2_551.png,0,0,3,0,0 +2_552.png,12,0,1,0,0 +2_553.png,39,0,0,0,0 +2_554.png,35,0,1,0,0 +2_555.png,11,0,1,0,0 +2_556.png,57,1,4,0,0 +2_557.png,27,0,1,0,0 +2_558.png,24,0,1,0,0 +2_559.png,0,1,2,0,0 +2_56.png,0,0,4,0,17 +2_560.png,0,0,2,0,0 +2_561.png,12,0,3,0,0 +2_562.png,11,1,2,0,0 +2_563.png,56,0,0,0,0 +2_564.png,45,1,0,0,0 +2_565.png,5,0,0,0,0 +2_566.png,0,0,6,0,0 +2_567.png,31,0,0,0,0 +2_568.png,26,0,0,0,0 +2_569.png,30,1,0,0,0 +2_57.png,0,0,2,0,20 +2_570.png,21,3,0,0,0 +2_571.png,27,4,0,0,0 +2_572.png,80,0,0,0,0 +2_573.png,6,0,1,0,0 +2_574.png,16,6,0,0,0 +2_575.png,21,0,0,0,0 +2_576.png,64,0,1,0,0 +2_577.png,58,1,0,0,0 +2_578.png,12,0,2,0,0 +2_579.png,15,4,3,0,0 +2_58.png,0,0,5,0,8 +2_580.png,0,5,5,0,0 +2_581.png,0,0,0,0,41 +2_582.png,0,2,11,1,7 +2_583.png,0,1,0,0,70 +2_584.png,0,3,11,0,0 +2_585.png,1,3,2,0,0 +2_586.png,1,6,4,0,0 +2_587.png,2,5,6,0,0 +2_588.png,2,6,11,0,0 +2_589.png,4,0,2,0,0 +2_59.png,0,0,1,0,16 +2_590.png,3,0,5,0,0 +2_591.png,4,0,5,0,0 +2_592.png,0,0,1,0,0 +2_593.png,0,0,1,0,0 +2_594.png,2,2,9,0,0 +2_595.png,0,2,0,0,0 +2_596.png,0,12,4,0,0 +2_597.png,1,5,4,0,0 +2_598.png,6,1,2,0,0 +2_599.png,5,1,2,0,0 +2_6.png,8,0,0,0,0 +2_60.png,0,2,1,0,24 +2_600.png,12,3,0,0,0 +2_601.png,9,2,1,0,0 +2_602.png,23,5,0,0,0 +2_603.png,0,19,8,0,0 +2_604.png,3,1,1,0,0 +2_605.png,1,1,2,0,0 +2_606.png,2,4,7,0,0 +2_607.png,1,23,8,0,0 +2_608.png,0,39,1,0,0 +2_609.png,0,42,1,0,0 +2_61.png,0,0,1,0,37 +2_610.png,0,8,4,0,0 +2_611.png,0,15,5,0,0 +2_612.png,0,7,7,0,0 +2_613.png,12,0,1,0,0 +2_614.png,15,1,0,0,0 +2_615.png,7,0,9,0,0 +2_616.png,0,3,6,0,0 +2_617.png,0,5,2,0,0 +2_618.png,4,5,2,0,0 +2_619.png,12,0,1,0,0 +2_62.png,0,1,6,0,33 +2_620.png,0,4,5,0,0 +2_621.png,0,7,4,0,0 +2_622.png,13,0,4,0,0 +2_623.png,10,6,3,0,0 +2_624.png,0,8,14,0,0 +2_625.png,3,5,6,0,0 +2_626.png,11,4,6,0,0 +2_627.png,12,3,4,0,0 +2_628.png,1,8,16,0,0 +2_629.png,7,7,2,0,0 +2_63.png,0,2,2,0,25 +2_630.png,5,6,19,0,0 +2_631.png,2,16,10,0,0 +2_632.png,1,5,11,0,0 +2_633.png,18,0,1,0,0 +2_634.png,20,0,2,0,0 +2_635.png,26,0,0,0,0 +2_636.png,26,0,0,0,0 +2_637.png,30,0,0,0,0 +2_638.png,0,0,8,0,11 +2_639.png,0,1,12,0,0 +2_64.png,0,0,9,0,31 +2_640.png,0,1,7,0,30 +2_641.png,9,0,0,0,0 +2_642.png,4,2,1,0,0 +2_643.png,15,1,0,0,0 +2_644.png,15,2,1,0,0 +2_645.png,19,0,0,0,0 +2_646.png,16,2,0,0,0 +2_647.png,5,4,7,0,0 +2_648.png,8,11,2,0,0 +2_649.png,0,44,3,0,0 +2_65.png,3,1,3,0,0 +2_650.png,0,18,14,0,0 +2_651.png,5,10,13,0,0 +2_652.png,0,16,4,0,0 +2_653.png,4,1,11,0,0 +2_654.png,18,1,1,0,0 +2_655.png,16,1,0,0,0 +2_656.png,7,1,0,0,0 +2_657.png,5,2,5,0,0 +2_658.png,11,1,5,0,0 +2_659.png,4,0,4,0,0 +2_66.png,0,2,6,0,0 +2_660.png,22,0,1,0,0 +2_661.png,9,0,4,0,0 +2_662.png,6,0,5,0,0 +2_663.png,8,0,2,0,0 +2_664.png,16,0,0,0,0 +2_665.png,13,0,0,0,0 +2_666.png,14,1,0,0,0 +2_667.png,9,0,2,0,0 +2_668.png,15,0,1,0,0 +2_669.png,1,0,3,0,0 +2_67.png,1,1,10,0,13 +2_670.png,5,1,5,0,0 +2_671.png,22,0,0,0,0 +2_672.png,13,0,4,0,0 +2_673.png,6,1,3,0,0 +2_674.png,11,1,5,0,0 +2_675.png,6,0,1,0,0 +2_676.png,13,0,0,0,0 +2_677.png,10,0,5,0,0 +2_678.png,8,0,2,0,0 +2_679.png,19,0,1,0,0 +2_68.png,0,1,7,0,34 +2_680.png,18,0,0,0,0 +2_681.png,7,1,7,0,0 +2_682.png,7,0,4,0,0 +2_683.png,20,0,1,0,0 +2_684.png,17,0,2,0,0 +2_685.png,13,0,2,0,0 +2_686.png,7,1,7,0,0 +2_687.png,11,0,2,0,0 +2_688.png,7,4,1,0,0 +2_689.png,11,0,5,0,0 +2_69.png,7,0,2,0,0 +2_690.png,14,0,3,0,0 +2_691.png,3,2,4,0,0 +2_692.png,9,0,4,0,0 +2_693.png,10,1,3,0,0 +2_694.png,11,1,2,0,0 +2_695.png,7,0,3,0,0 +2_696.png,11,0,2,0,0 +2_697.png,7,1,1,0,0 +2_698.png,5,1,0,0,0 +2_699.png,1,0,5,0,0 +2_7.png,6,0,4,0,0 +2_70.png,2,0,1,0,0 +2_700.png,18,0,2,0,0 +2_701.png,7,0,1,0,0 +2_702.png,10,2,9,0,0 +2_703.png,12,0,2,0,0 +2_704.png,7,0,5,0,0 +2_705.png,6,2,6,0,0 +2_706.png,13,17,13,0,0 +2_707.png,0,10,6,0,0 +2_708.png,0,4,12,0,0 +2_709.png,0,31,6,0,0 +2_71.png,4,1,4,0,0 +2_710.png,2,0,2,0,0 +2_711.png,31,3,0,0,0 +2_712.png,25,0,3,0,0 +2_713.png,13,0,4,0,0 +2_714.png,14,2,10,0,0 +2_715.png,23,1,4,0,0 +2_716.png,51,0,2,0,0 +2_717.png,2,0,13,0,0 +2_718.png,0,1,10,0,0 +2_719.png,0,1,8,0,0 +2_72.png,1,1,6,0,14 +2_720.png,0,5,10,0,0 +2_721.png,0,2,7,0,0 +2_722.png,0,2,6,0,0 +2_723.png,0,3,12,0,0 +2_724.png,0,11,15,0,0 +2_725.png,0,0,16,0,0 +2_726.png,0,2,8,0,0 +2_727.png,0,0,12,0,0 +2_728.png,28,2,3,0,0 +2_729.png,33,3,1,0,0 +2_73.png,1,2,15,0,1 +2_730.png,16,6,3,0,0 +2_731.png,0,2,9,0,0 +2_732.png,0,4,4,0,0 +2_733.png,0,2,3,0,0 +2_734.png,0,0,1,0,0 +2_735.png,0,0,0,0,0 +2_736.png,16,2,1,0,0 +2_737.png,34,8,5,0,0 +2_738.png,22,0,0,0,0 +2_739.png,22,0,2,0,0 +2_74.png,2,1,10,0,0 +2_740.png,2,0,0,0,0 +2_741.png,19,0,1,0,0 +2_742.png,0,0,23,0,0 +2_743.png,0,1,20,0,0 +2_744.png,0,0,20,0,0 +2_745.png,0,9,21,0,0 +2_746.png,0,6,19,0,0 +2_747.png,0,1,23,0,0 +2_748.png,0,7,28,0,0 +2_749.png,0,1,5,0,0 +2_75.png,0,0,15,0,16 +2_750.png,0,10,12,0,0 +2_751.png,0,6,6,0,0 +2_752.png,0,6,21,0,0 +2_753.png,0,4,18,0,0 +2_754.png,0,0,0,0,0 +2_755.png,0,3,1,0,0 +2_756.png,0,1,0,0,0 +2_757.png,0,1,1,0,0 +2_758.png,0,0,2,0,0 +2_759.png,0,114,1,0,0 +2_76.png,0,0,17,0,24 +2_760.png,0,114,3,0,0 +2_761.png,0,2,3,0,0 +2_762.png,0,0,0,0,0 +2_763.png,16,1,1,0,0 +2_764.png,20,1,1,0,0 +2_765.png,17,0,0,0,0 +2_766.png,26,0,0,0,0 +2_767.png,17,0,0,4,0 +2_768.png,19,2,3,0,0 +2_769.png,13,1,3,3,0 +2_77.png,0,1,12,0,15 +2_770.png,25,0,2,0,0 +2_771.png,19,0,0,3,0 +2_772.png,13,1,2,0,0 +2_773.png,19,1,5,0,0 +2_774.png,18,1,0,0,0 +2_775.png,22,0,1,0,0 +2_776.png,0,91,8,0,0 +2_777.png,0,91,5,0,0 +2_778.png,0,0,0,0,0 +2_779.png,0,0,1,0,0 +2_78.png,0,0,9,0,41 +2_780.png,0,0,0,0,0 +2_781.png,0,1,0,0,0 +2_782.png,0,1,3,0,0 +2_783.png,0,0,1,0,0 +2_784.png,0,0,0,0,0 +2_785.png,0,2,25,0,0 +2_786.png,0,7,37,0,0 +2_787.png,28,1,8,0,0 +2_788.png,2,0,34,0,0 +2_789.png,10,0,33,0,0 +2_79.png,0,0,7,0,38 +2_790.png,0,0,45,0,0 +2_791.png,0,5,6,0,0 +2_792.png,0,5,3,0,0 +2_793.png,0,1,5,0,0 +2_794.png,0,7,10,0,0 +2_795.png,0,0,3,0,0 +2_796.png,0,6,4,0,0 +2_797.png,0,4,14,0,0 +2_798.png,4,1,4,0,0 +2_799.png,0,25,12,0,0 +2_8.png,8,0,5,0,0 +2_80.png,13,0,1,0,0 +2_800.png,0,7,3,0,0 +2_801.png,2,11,4,0,0 +2_802.png,0,78,4,0,0 +2_803.png,0,86,3,0,0 +2_804.png,0,78,3,0,0 +2_805.png,0,77,4,0,0 +2_806.png,0,87,4,0,0 +2_807.png,0,14,20,0,0 +2_808.png,0,3,33,0,0 +2_809.png,0,12,13,0,0 +2_81.png,15,1,1,0,0 +2_810.png,8,7,18,0,0 +2_811.png,74,3,2,0,0 +2_812.png,58,2,0,0,0 +2_813.png,48,2,2,0,0 +2_814.png,1,15,35,0,0 +2_815.png,26,6,0,0,0 +2_816.png,22,2,10,0,0 +2_817.png,31,0,3,0,0 +2_818.png,7,0,13,0,0 +2_819.png,18,2,5,0,0 +2_82.png,0,0,4,0,0 +2_820.png,29,0,1,0,0 +2_821.png,14,1,3,0,0 +2_822.png,24,4,2,0,0 +2_823.png,13,3,13,0,0 +2_824.png,23,4,3,0,0 +2_825.png,28,2,14,0,0 +2_826.png,21,10,6,0,0 +2_827.png,10,23,17,0,0 +2_828.png,22,1,11,0,0 +2_829.png,23,1,8,0,0 +2_83.png,11,0,4,0,0 +2_830.png,34,0,4,0,0 +2_831.png,1,10,12,0,0 +2_832.png,1,15,21,0,0 +2_833.png,13,8,8,0,0 +2_834.png,24,1,0,0,0 +2_835.png,42,0,1,0,0 +2_836.png,42,2,1,0,0 +2_837.png,24,1,2,0,0 +2_838.png,43,1,0,0,0 +2_839.png,48,0,0,0,0 +2_84.png,14,0,3,0,0 +2_840.png,36,0,1,0,0 +2_841.png,24,0,1,0,0 +2_842.png,31,0,1,0,0 +2_843.png,24,0,0,0,0 +2_844.png,21,0,4,0,0 +2_845.png,16,0,10,0,0 +2_846.png,11,1,6,0,0 +2_847.png,19,1,7,0,0 +2_848.png,17,0,0,0,0 +2_849.png,14,0,0,0,0 +2_85.png,17,0,1,0,0 +2_850.png,17,0,1,0,0 +2_851.png,18,0,0,0,0 +2_852.png,19,0,1,0,0 +2_853.png,21,0,0,0,0 +2_854.png,10,5,9,0,0 +2_855.png,11,5,2,0,0 +2_856.png,22,0,0,0,0 +2_857.png,7,0,2,0,7 +2_858.png,1,0,0,0,42 +2_859.png,1,0,9,0,0 +2_86.png,7,1,0,0,0 +2_860.png,0,30,7,0,0 +2_861.png,18,0,3,0,3 +2_862.png,20,0,0,0,0 +2_863.png,27,0,3,0,0 +2_864.png,22,0,11,0,0 +2_865.png,29,0,8,0,0 +2_866.png,53,0,3,0,0 +2_867.png,25,1,6,0,0 +2_868.png,30,0,1,0,0 +2_869.png,15,0,2,0,0 +2_87.png,13,0,1,0,0 +2_870.png,16,1,7,0,0 +2_871.png,19,0,6,0,0 +2_872.png,27,0,6,0,0 +2_873.png,29,0,4,0,0 +2_874.png,34,4,0,0,0 +2_875.png,14,1,7,0,0 +2_876.png,22,1,7,0,0 +2_877.png,24,0,4,0,0 +2_878.png,36,0,3,0,0 +2_879.png,44,1,1,0,0 +2_88.png,6,4,4,0,0 +2_880.png,26,1,3,0,0 +2_881.png,43,0,3,0,0 +2_882.png,10,0,8,0,0 +2_883.png,25,0,3,0,0 +2_884.png,33,0,0,0,0 +2_885.png,47,31,4,0,0 +2_886.png,111,0,0,0,0 +2_887.png,111,1,0,0,0 +2_888.png,28,40,65,0,0 +2_889.png,10,67,87,0,0 +2_89.png,11,1,7,0,0 +2_890.png,0,94,1,0,0 +2_891.png,0,97,2,0,0 +2_892.png,0,102,1,0,0 +2_893.png,0,94,2,0,0 +2_894.png,0,108,2,0,0 +2_895.png,0,0,23,0,0 +2_896.png,1,0,30,0,0 +2_897.png,1,2,17,0,0 +2_898.png,8,3,15,0,0 +2_899.png,41,0,1,0,0 +2_9.png,10,0,1,0,0 +2_90.png,11,0,13,0,0 +2_900.png,31,1,0,0,0 +2_901.png,26,0,3,0,0 +2_902.png,50,0,0,0,0 +2_903.png,40,0,0,0,0 +2_904.png,41,0,0,0,0 +2_905.png,45,0,2,0,0 +2_906.png,42,0,0,0,0 +2_907.png,43,0,1,0,0 +2_908.png,46,0,0,0,0 +2_909.png,51,0,1,0,0 +2_91.png,0,1,8,0,8 +2_910.png,49,0,0,0,0 +2_911.png,42,1,0,0,0 +2_912.png,49,0,0,0,0 +2_913.png,45,0,0,0,0 +2_914.png,48,0,0,0,0 +2_915.png,43,0,0,0,0 +2_916.png,40,0,0,0,0 +2_917.png,46,0,0,0,0 +2_918.png,0,0,3,0,0 +2_919.png,0,0,10,0,0 +2_92.png,0,0,9,0,23 +2_920.png,0,0,2,0,0 +2_921.png,0,0,4,0,0 +2_922.png,0,0,7,0,0 +2_923.png,0,0,9,0,0 +2_924.png,0,0,2,0,0 +2_925.png,0,0,5,0,0 +2_926.png,0,0,3,0,0 +2_927.png,0,0,4,0,0 +2_928.png,0,0,3,0,0 +2_929.png,50,1,1,27,0 +2_93.png,0,3,7,0,14 +2_930.png,44,0,33,16,0 +2_931.png,58,0,8,19,0 +2_932.png,32,0,30,23,0 +2_933.png,15,0,14,40,0 +2_934.png,25,0,13,39,0 +2_935.png,28,0,6,44,0 +2_936.png,28,0,19,21,0 +2_937.png,4,0,9,40,0 +2_938.png,10,7,27,35,0 +2_939.png,24,4,12,37,0 +2_94.png,0,0,4,0,17 +2_940.png,33,0,8,34,0 +2_941.png,27,0,19,25,0 +2_942.png,10,4,12,58,0 +2_943.png,64,3,11,4,0 +2_944.png,11,0,11,0,0 +2_945.png,20,0,8,0,0 +2_946.png,34,0,0,0,0 +2_947.png,28,0,0,0,0 +2_948.png,20,0,0,0,0 +2_949.png,3,0,7,0,0 +2_95.png,0,0,5,0,25 +2_950.png,7,1,20,0,0 +2_951.png,19,0,11,0,0 +2_952.png,17,0,8,0,0 +2_953.png,12,1,5,0,0 +2_954.png,28,0,0,0,0 +2_955.png,26,0,5,0,0 +2_956.png,44,0,1,0,0 +2_957.png,10,4,35,0,0 +2_958.png,21,10,29,0,0 +2_959.png,50,11,8,0,0 +2_96.png,0,1,1,0,32 +2_960.png,42,1,0,0,0 +2_961.png,34,1,0,0,0 +2_962.png,48,0,0,0,0 +2_963.png,58,11,11,0,0 +2_964.png,27,6,40,0,0 +2_965.png,36,11,20,0,0 +2_966.png,33,17,3,9,0 +2_967.png,28,0,3,0,0 +2_968.png,28,0,0,0,0 +2_969.png,23,0,17,0,0 +2_97.png,0,0,9,0,29 +2_970.png,38,0,8,1,0 +2_971.png,38,1,9,1,0 +2_972.png,59,1,1,0,0 +2_973.png,27,0,0,5,0 +2_974.png,43,3,0,0,0 +2_975.png,47,4,0,0,0 +2_976.png,31,1,2,2,0 +2_977.png,33,0,0,0,0 +2_978.png,25,0,1,3,0 +2_979.png,39,4,1,3,0 +2_98.png,0,1,3,0,29 +2_980.png,44,4,0,3,0 +2_981.png,44,2,0,0,0 +2_982.png,36,0,0,0,0 +2_983.png,33,0,0,18,0 +2_984.png,48,1,0,0,0 +2_985.png,40,0,1,7,0 +2_986.png,39,0,0,4,0 +2_987.png,48,3,1,3,0 +2_988.png,37,0,0,11,0 +2_989.png,41,1,0,0,0 +2_99.png,0,0,5,0,49 +2_990.png,31,0,0,3,0 +2_991.png,35,3,0,0,0 +2_992.png,53,0,1,5,0 +2_993.png,41,0,0,7,0 +2_994.png,12,0,0,31,0 +2_995.png,25,3,0,11,0 +2_996.png,32,1,1,0,0 +2_997.png,24,0,0,16,0 +2_998.png,40,5,0,1,0 +2_999.png,46,0,0,0,0 diff --git a/docs/datasets/PanNuke/fold2/types.csv b/docs/datasets/PanNuke/fold2/types.csv new file mode 100644 index 0000000000000000000000000000000000000000..dd5d43b7e24e0cccd505d711962a04d23d32a9b6 --- /dev/null +++ b/docs/datasets/PanNuke/fold2/types.csv @@ -0,0 +1,2723 @@ +img,type +2_0.png,Breast +2_1.png,Breast +2_2.png,Breast +2_3.png,Breast +2_4.png,Breast +2_5.png,Breast +2_6.png,Breast +2_7.png,Breast +2_8.png,Breast +2_9.png,Breast +2_10.png,Breast +2_11.png,Breast +2_12.png,Breast +2_13.png,Breast +2_14.png,Breast +2_15.png,Breast +2_16.png,Breast +2_17.png,Breast +2_18.png,Breast +2_19.png,Breast +2_20.png,Breast +2_21.png,Breast +2_22.png,Breast +2_23.png,Breast +2_24.png,Breast +2_25.png,Breast +2_26.png,Breast +2_27.png,Breast +2_28.png,Breast +2_29.png,Breast +2_30.png,Breast +2_31.png,Breast +2_32.png,Breast +2_33.png,Breast +2_34.png,Breast +2_35.png,Breast +2_36.png,Breast +2_37.png,Breast +2_38.png,Breast +2_39.png,Breast +2_40.png,Breast +2_41.png,Breast +2_42.png,Breast +2_43.png,Breast +2_44.png,Breast +2_45.png,Breast +2_46.png,Breast +2_47.png,Breast +2_48.png,Breast +2_49.png,Breast +2_50.png,Breast +2_51.png,Breast +2_52.png,Breast +2_53.png,Breast +2_54.png,Breast +2_55.png,Breast +2_56.png,Breast +2_57.png,Breast +2_58.png,Breast +2_59.png,Breast +2_60.png,Breast +2_61.png,Breast +2_62.png,Breast +2_63.png,Breast +2_64.png,Breast +2_65.png,Breast +2_66.png,Breast +2_67.png,Breast +2_68.png,Breast +2_69.png,Breast +2_70.png,Breast +2_71.png,Breast +2_72.png,Breast +2_73.png,Breast +2_74.png,Breast +2_75.png,Breast +2_76.png,Breast +2_77.png,Breast +2_78.png,Breast +2_79.png,Breast +2_80.png,Breast +2_81.png,Breast +2_82.png,Breast +2_83.png,Breast +2_84.png,Breast +2_85.png,Breast +2_86.png,Breast +2_87.png,Breast +2_88.png,Breast +2_89.png,Breast +2_90.png,Breast +2_91.png,Breast +2_92.png,Breast +2_93.png,Breast +2_94.png,Breast +2_95.png,Breast +2_96.png,Breast +2_97.png,Breast +2_98.png,Breast +2_99.png,Breast +2_100.png,Breast +2_101.png,Breast +2_102.png,Breast +2_103.png,Breast +2_104.png,Breast +2_105.png,Breast +2_106.png,Breast +2_107.png,Breast +2_108.png,Breast +2_109.png,Breast +2_110.png,Breast +2_111.png,Breast +2_112.png,Breast +2_113.png,Breast +2_114.png,Breast +2_115.png,Breast +2_116.png,Breast +2_117.png,Breast +2_118.png,Breast +2_119.png,Breast +2_120.png,Breast +2_121.png,Breast +2_122.png,Breast +2_123.png,Breast +2_124.png,Breast +2_125.png,Breast +2_126.png,Breast +2_127.png,Breast +2_128.png,Breast +2_129.png,Breast +2_130.png,Breast +2_131.png,Breast +2_132.png,Breast +2_133.png,Breast +2_134.png,Breast +2_135.png,Breast +2_136.png,Breast +2_137.png,Breast +2_138.png,Breast +2_139.png,Breast +2_140.png,Breast +2_141.png,Breast +2_142.png,Breast +2_143.png,Breast +2_144.png,Breast +2_145.png,Breast +2_146.png,Breast +2_147.png,Breast +2_148.png,Breast +2_149.png,Breast +2_150.png,Breast +2_151.png,Breast +2_152.png,Breast +2_153.png,Breast +2_154.png,Breast +2_155.png,Breast +2_156.png,Breast +2_157.png,Breast +2_158.png,Breast +2_159.png,Breast +2_160.png,Breast +2_161.png,Breast +2_162.png,Breast +2_163.png,Breast +2_164.png,Breast +2_165.png,Breast +2_166.png,Breast +2_167.png,Breast +2_168.png,Breast +2_169.png,Breast +2_170.png,Breast +2_171.png,Breast +2_172.png,Breast +2_173.png,Breast +2_174.png,Breast +2_175.png,Breast +2_176.png,Breast +2_177.png,Breast +2_178.png,Breast +2_179.png,Breast +2_180.png,Breast +2_181.png,Breast +2_182.png,Breast +2_183.png,Breast +2_184.png,Breast +2_185.png,Breast +2_186.png,Breast +2_187.png,Breast +2_188.png,Breast +2_189.png,Breast +2_190.png,Breast +2_191.png,Breast +2_192.png,Breast +2_193.png,Breast +2_194.png,Breast +2_195.png,Breast +2_196.png,Breast +2_197.png,Breast +2_198.png,Breast +2_199.png,Breast +2_200.png,Breast +2_201.png,Breast +2_202.png,Breast +2_203.png,Breast +2_204.png,Breast +2_205.png,Breast +2_206.png,Breast +2_207.png,Breast +2_208.png,Breast +2_209.png,Breast +2_210.png,Breast +2_211.png,Breast +2_212.png,Breast +2_213.png,Breast +2_214.png,Breast +2_215.png,Breast +2_216.png,Breast +2_217.png,Breast +2_218.png,Breast +2_219.png,Breast +2_220.png,Breast +2_221.png,Breast +2_222.png,Breast +2_223.png,Breast +2_224.png,Breast +2_225.png,Breast +2_226.png,Breast +2_227.png,Breast +2_228.png,Breast +2_229.png,Breast +2_230.png,Breast +2_231.png,Breast +2_232.png,Breast +2_233.png,Breast +2_234.png,Breast +2_235.png,Breast +2_236.png,Breast +2_237.png,Breast +2_238.png,Breast +2_239.png,Breast +2_240.png,Breast +2_241.png,Breast +2_242.png,Breast +2_243.png,Breast +2_244.png,Breast +2_245.png,Breast +2_246.png,Breast +2_247.png,Breast +2_248.png,Breast +2_249.png,Breast +2_250.png,Breast +2_251.png,Breast +2_252.png,Breast +2_253.png,Breast +2_254.png,Breast +2_255.png,Breast +2_256.png,Breast +2_257.png,Breast +2_258.png,Breast +2_259.png,Breast +2_260.png,Breast +2_261.png,Breast +2_262.png,Breast +2_263.png,Breast +2_264.png,Breast +2_265.png,Breast +2_266.png,Breast +2_267.png,Breast +2_268.png,Breast +2_269.png,Breast +2_270.png,Breast +2_271.png,Breast +2_272.png,Breast +2_273.png,Breast +2_274.png,Breast +2_275.png,Breast +2_276.png,Breast +2_277.png,Breast +2_278.png,Breast +2_279.png,Breast +2_280.png,Breast +2_281.png,Breast +2_282.png,Breast +2_283.png,Breast +2_284.png,Breast +2_285.png,Breast +2_286.png,Breast +2_287.png,Breast +2_288.png,Breast +2_289.png,Breast +2_290.png,Breast +2_291.png,Breast +2_292.png,Breast +2_293.png,Breast +2_294.png,Breast +2_295.png,Breast +2_296.png,Breast +2_297.png,Breast +2_298.png,Breast +2_299.png,Breast +2_300.png,Breast +2_301.png,Breast +2_302.png,Breast +2_303.png,Breast +2_304.png,Breast +2_305.png,Breast +2_306.png,Breast +2_307.png,Breast +2_308.png,Breast +2_309.png,Breast +2_310.png,Breast +2_311.png,Breast +2_312.png,Breast +2_313.png,Breast +2_314.png,Breast +2_315.png,Breast +2_316.png,Breast +2_317.png,Breast +2_318.png,Breast +2_319.png,Breast +2_320.png,Breast +2_321.png,Breast +2_322.png,Breast +2_323.png,Breast +2_324.png,Breast +2_325.png,Breast +2_326.png,Breast +2_327.png,Breast +2_328.png,Breast +2_329.png,Breast +2_330.png,Breast +2_331.png,Breast +2_332.png,Breast +2_333.png,Breast +2_334.png,Breast +2_335.png,Breast +2_336.png,Breast +2_337.png,Breast +2_338.png,Breast +2_339.png,Breast +2_340.png,Breast +2_341.png,Breast +2_342.png,Breast +2_343.png,Breast +2_344.png,Breast +2_345.png,Breast +2_346.png,Breast +2_347.png,Breast +2_348.png,Breast +2_349.png,Breast +2_350.png,Breast +2_351.png,Breast +2_352.png,Breast +2_353.png,Breast +2_354.png,Breast +2_355.png,Breast +2_356.png,Breast +2_357.png,Breast +2_358.png,Breast +2_359.png,Breast +2_360.png,Breast +2_361.png,Breast +2_362.png,Breast +2_363.png,Breast +2_364.png,Breast +2_365.png,Breast +2_366.png,Breast +2_367.png,Breast +2_368.png,Breast +2_369.png,Breast +2_370.png,Breast +2_371.png,Breast +2_372.png,Breast +2_373.png,Breast +2_374.png,Breast +2_375.png,Breast +2_376.png,Breast +2_377.png,Breast +2_378.png,Breast +2_379.png,Breast +2_380.png,Breast +2_381.png,Breast +2_382.png,Breast +2_383.png,Breast +2_384.png,Breast +2_385.png,Breast +2_386.png,Breast +2_387.png,Breast +2_388.png,Breast +2_389.png,Breast +2_390.png,Breast +2_391.png,Breast +2_392.png,Breast +2_393.png,Breast +2_394.png,Breast +2_395.png,Breast +2_396.png,Breast +2_397.png,Breast +2_398.png,Breast +2_399.png,Breast +2_400.png,Breast +2_401.png,Breast +2_402.png,Breast +2_403.png,Breast +2_404.png,Breast +2_405.png,Breast +2_406.png,Breast +2_407.png,Breast +2_408.png,Breast +2_409.png,Breast +2_410.png,Breast +2_411.png,Breast +2_412.png,Breast +2_413.png,Breast +2_414.png,Breast +2_415.png,Breast +2_416.png,Breast +2_417.png,Breast +2_418.png,Breast +2_419.png,Breast +2_420.png,Breast +2_421.png,Breast +2_422.png,Breast +2_423.png,Breast +2_424.png,Breast +2_425.png,Breast +2_426.png,Breast +2_427.png,Breast +2_428.png,Breast +2_429.png,Breast +2_430.png,Breast +2_431.png,Breast +2_432.png,Breast +2_433.png,Breast +2_434.png,Breast +2_435.png,Breast +2_436.png,Breast +2_437.png,Breast +2_438.png,Breast +2_439.png,Breast +2_440.png,Breast +2_441.png,Breast +2_442.png,Breast +2_443.png,Breast +2_444.png,Breast +2_445.png,Breast +2_446.png,Breast +2_447.png,Breast +2_448.png,Breast +2_449.png,Breast +2_450.png,Breast +2_451.png,Breast +2_452.png,Breast +2_453.png,Breast +2_454.png,Breast +2_455.png,Breast +2_456.png,Breast +2_457.png,Breast +2_458.png,Breast +2_459.png,Breast +2_460.png,Breast +2_461.png,Breast +2_462.png,Breast +2_463.png,Breast +2_464.png,Breast +2_465.png,Breast +2_466.png,Breast +2_467.png,Breast +2_468.png,Breast +2_469.png,Breast +2_470.png,Breast +2_471.png,Breast +2_472.png,Breast +2_473.png,Breast +2_474.png,Breast +2_475.png,Breast +2_476.png,Breast +2_477.png,Breast +2_478.png,Breast +2_479.png,Breast +2_480.png,Breast +2_481.png,Breast +2_482.png,Breast +2_483.png,Breast +2_484.png,Breast +2_485.png,Breast +2_486.png,Breast +2_487.png,Breast +2_488.png,Breast +2_489.png,Breast +2_490.png,Breast +2_491.png,Breast +2_492.png,Breast +2_493.png,Breast +2_494.png,Breast +2_495.png,Breast +2_496.png,Breast +2_497.png,Breast +2_498.png,Breast +2_499.png,Breast +2_500.png,Breast +2_501.png,Breast +2_502.png,Breast +2_503.png,Breast +2_504.png,Breast +2_505.png,Breast +2_506.png,Breast +2_507.png,Breast +2_508.png,Breast +2_509.png,Breast +2_510.png,Breast +2_511.png,Breast +2_512.png,Breast +2_513.png,Breast +2_514.png,Breast +2_515.png,Breast +2_516.png,Breast +2_517.png,Breast +2_518.png,Breast +2_519.png,Breast +2_520.png,Breast +2_521.png,Breast +2_522.png,Breast +2_523.png,Breast +2_524.png,Breast +2_525.png,Breast +2_526.png,Breast +2_527.png,Breast +2_528.png,Breast +2_529.png,Breast +2_530.png,Breast +2_531.png,Breast +2_532.png,Breast +2_533.png,Breast +2_534.png,Breast +2_535.png,Breast +2_536.png,Breast +2_537.png,Breast +2_538.png,Breast +2_539.png,Breast +2_540.png,Breast +2_541.png,Breast +2_542.png,Breast +2_543.png,Breast +2_544.png,Breast +2_545.png,Breast +2_546.png,Breast +2_547.png,Breast +2_548.png,Breast +2_549.png,Breast +2_550.png,Breast +2_551.png,Breast +2_552.png,Breast +2_553.png,Breast +2_554.png,Breast +2_555.png,Breast +2_556.png,Breast +2_557.png,Breast +2_558.png,Breast +2_559.png,Breast +2_560.png,Breast +2_561.png,Breast +2_562.png,Breast +2_563.png,Breast +2_564.png,Breast +2_565.png,Breast +2_566.png,Breast +2_567.png,Breast +2_568.png,Breast +2_569.png,Breast +2_570.png,Breast +2_571.png,Breast +2_572.png,Breast +2_573.png,Breast +2_574.png,Breast +2_575.png,Breast +2_576.png,Breast +2_577.png,Breast +2_578.png,Breast +2_579.png,Breast +2_580.png,Breast +2_581.png,Breast +2_582.png,Breast +2_583.png,Breast +2_584.png,Breast +2_585.png,Breast +2_586.png,Breast +2_587.png,Breast +2_588.png,Breast +2_589.png,Breast +2_590.png,Breast +2_591.png,Breast +2_592.png,Breast +2_593.png,Breast +2_594.png,Breast +2_595.png,Breast +2_596.png,Breast +2_597.png,Breast +2_598.png,Breast +2_599.png,Breast +2_600.png,Breast +2_601.png,Breast +2_602.png,Breast +2_603.png,Breast +2_604.png,Breast +2_605.png,Breast +2_606.png,Breast +2_607.png,Breast +2_608.png,Breast +2_609.png,Breast +2_610.png,Breast +2_611.png,Breast +2_612.png,Breast +2_613.png,Breast +2_614.png,Breast +2_615.png,Breast +2_616.png,Breast +2_617.png,Breast +2_618.png,Breast +2_619.png,Breast +2_620.png,Breast +2_621.png,Breast +2_622.png,Breast +2_623.png,Breast +2_624.png,Breast +2_625.png,Breast +2_626.png,Breast +2_627.png,Breast +2_628.png,Breast +2_629.png,Breast +2_630.png,Breast +2_631.png,Breast +2_632.png,Breast +2_633.png,Breast +2_634.png,Breast +2_635.png,Breast +2_636.png,Breast +2_637.png,Breast +2_638.png,Breast +2_639.png,Breast +2_640.png,Breast +2_641.png,Breast +2_642.png,Breast +2_643.png,Breast +2_644.png,Breast +2_645.png,Breast +2_646.png,Breast +2_647.png,Breast +2_648.png,Breast +2_649.png,Breast +2_650.png,Breast +2_651.png,Breast +2_652.png,Breast +2_653.png,Breast +2_654.png,Breast +2_655.png,Breast +2_656.png,Breast +2_657.png,Breast +2_658.png,Breast +2_659.png,Breast +2_660.png,Breast +2_661.png,Breast +2_662.png,Breast +2_663.png,Breast +2_664.png,Breast +2_665.png,Breast +2_666.png,Breast +2_667.png,Breast +2_668.png,Breast +2_669.png,Breast +2_670.png,Breast +2_671.png,Breast +2_672.png,Breast +2_673.png,Breast +2_674.png,Breast +2_675.png,Breast +2_676.png,Breast +2_677.png,Breast +2_678.png,Breast +2_679.png,Breast +2_680.png,Breast +2_681.png,Breast +2_682.png,Breast +2_683.png,Breast +2_684.png,Breast +2_685.png,Breast +2_686.png,Breast +2_687.png,Breast +2_688.png,Breast +2_689.png,Breast +2_690.png,Breast +2_691.png,Breast +2_692.png,Breast +2_693.png,Breast +2_694.png,Breast +2_695.png,Breast +2_696.png,Breast +2_697.png,Breast +2_698.png,Breast +2_699.png,Breast +2_700.png,Breast +2_701.png,Breast +2_702.png,Breast +2_703.png,Breast +2_704.png,Breast +2_705.png,Breast +2_706.png,Colon +2_707.png,Colon +2_708.png,Colon +2_709.png,Colon +2_710.png,Colon +2_711.png,Colon +2_712.png,Colon +2_713.png,Colon +2_714.png,Colon +2_715.png,Colon +2_716.png,Colon +2_717.png,Colon +2_718.png,Colon +2_719.png,Colon +2_720.png,Colon +2_721.png,Colon +2_722.png,Colon +2_723.png,Colon +2_724.png,Colon +2_725.png,Colon +2_726.png,Colon +2_727.png,Colon +2_728.png,Colon +2_729.png,Colon +2_730.png,Colon +2_731.png,Colon +2_732.png,Colon +2_733.png,Colon +2_734.png,Colon +2_735.png,Colon +2_736.png,Colon +2_737.png,Colon +2_738.png,Colon +2_739.png,Colon +2_740.png,Colon +2_741.png,Colon +2_742.png,Colon +2_743.png,Colon +2_744.png,Colon +2_745.png,Colon +2_746.png,Colon +2_747.png,Colon +2_748.png,Colon +2_749.png,Colon +2_750.png,Colon +2_751.png,Colon +2_752.png,Colon +2_753.png,Colon +2_754.png,Colon +2_755.png,Colon +2_756.png,Colon +2_757.png,Colon +2_758.png,Colon +2_759.png,Colon +2_760.png,Colon +2_761.png,Colon +2_762.png,Colon +2_763.png,Colon +2_764.png,Colon +2_765.png,Colon +2_766.png,Colon +2_767.png,Colon +2_768.png,Colon +2_769.png,Colon +2_770.png,Colon +2_771.png,Colon +2_772.png,Colon +2_773.png,Colon +2_774.png,Colon +2_775.png,Colon +2_776.png,Colon +2_777.png,Colon +2_778.png,Colon +2_779.png,Colon +2_780.png,Colon +2_781.png,Colon +2_782.png,Colon +2_783.png,Colon +2_784.png,Colon +2_785.png,Colon +2_786.png,Colon +2_787.png,Colon +2_788.png,Colon +2_789.png,Colon +2_790.png,Colon +2_791.png,Colon +2_792.png,Colon +2_793.png,Colon +2_794.png,Colon +2_795.png,Colon +2_796.png,Colon +2_797.png,Colon +2_798.png,Colon +2_799.png,Colon +2_800.png,Colon +2_801.png,Colon +2_802.png,Colon +2_803.png,Colon +2_804.png,Colon +2_805.png,Colon +2_806.png,Colon +2_807.png,Colon +2_808.png,Colon +2_809.png,Colon +2_810.png,Colon +2_811.png,Colon +2_812.png,Colon +2_813.png,Colon +2_814.png,Colon +2_815.png,Colon +2_816.png,Lung +2_817.png,Lung +2_818.png,Lung +2_819.png,Lung +2_820.png,Lung +2_821.png,Lung +2_822.png,Lung +2_823.png,Lung +2_824.png,Lung +2_825.png,Lung +2_826.png,Lung +2_827.png,Lung +2_828.png,Lung +2_829.png,Lung +2_830.png,Lung +2_831.png,Breast +2_832.png,Breast +2_833.png,Breast +2_834.png,Breast +2_835.png,Breast +2_836.png,Breast +2_837.png,Breast +2_838.png,Breast +2_839.png,Breast +2_840.png,Breast +2_841.png,Colon +2_842.png,Colon +2_843.png,Kidney +2_844.png,Kidney +2_845.png,Kidney +2_846.png,Kidney +2_847.png,Kidney +2_848.png,Prostate +2_849.png,Prostate +2_850.png,Prostate +2_851.png,Bladder +2_852.png,Bladder +2_853.png,Bladder +2_854.png,Breast +2_855.png,Breast +2_856.png,Breast +2_857.png,Breast +2_858.png,Breast +2_859.png,Breast +2_860.png,Bladder +2_861.png,Bladder +2_862.png,Bladder +2_863.png,Prostate +2_864.png,Prostate +2_865.png,Prostate +2_866.png,Prostate +2_867.png,Prostate +2_868.png,Prostate +2_869.png,Prostate +2_870.png,Prostate +2_871.png,Prostate +2_872.png,Prostate +2_873.png,Prostate +2_874.png,Prostate +2_875.png,Prostate +2_876.png,Prostate +2_877.png,Prostate +2_878.png,Prostate +2_879.png,Prostate +2_880.png,Prostate +2_881.png,Prostate +2_882.png,Prostate +2_883.png,Prostate +2_884.png,Prostate +2_885.png,Kidney +2_886.png,Kidney +2_887.png,Kidney +2_888.png,Kidney +2_889.png,Kidney +2_890.png,Stomach +2_891.png,Stomach +2_892.png,Stomach +2_893.png,Stomach +2_894.png,Stomach +2_895.png,Ovarian +2_896.png,Ovarian +2_897.png,Ovarian +2_898.png,Ovarian +2_899.png,Esophagus +2_900.png,Esophagus +2_901.png,Esophagus +2_902.png,Esophagus +2_903.png,Esophagus +2_904.png,Esophagus +2_905.png,Esophagus +2_906.png,Esophagus +2_907.png,Esophagus +2_908.png,Esophagus +2_909.png,Esophagus +2_910.png,Esophagus +2_911.png,Esophagus +2_912.png,Esophagus +2_913.png,Esophagus +2_914.png,Esophagus +2_915.png,Esophagus +2_916.png,Esophagus +2_917.png,Esophagus +2_918.png,Esophagus +2_919.png,Esophagus +2_920.png,Esophagus +2_921.png,Esophagus +2_922.png,Esophagus +2_923.png,Esophagus +2_924.png,Esophagus +2_925.png,Esophagus +2_926.png,Esophagus +2_927.png,Esophagus +2_928.png,Esophagus +2_929.png,Lung +2_930.png,Lung +2_931.png,Lung +2_932.png,Lung +2_933.png,Lung +2_934.png,Lung +2_935.png,Lung +2_936.png,Lung +2_937.png,Lung +2_938.png,Lung +2_939.png,Lung +2_940.png,Lung +2_941.png,Lung +2_942.png,Lung +2_943.png,Lung +2_944.png,Lung +2_945.png,Lung +2_946.png,Lung +2_947.png,Lung +2_948.png,Lung +2_949.png,Lung +2_950.png,Lung +2_951.png,Lung +2_952.png,Lung +2_953.png,Lung +2_954.png,Lung +2_955.png,Lung +2_956.png,Uterus +2_957.png,Uterus +2_958.png,Uterus +2_959.png,Uterus +2_960.png,Uterus +2_961.png,Uterus +2_962.png,Uterus +2_963.png,Uterus +2_964.png,Uterus +2_965.png,Uterus +2_966.png,Uterus +2_967.png,Uterus +2_968.png,Uterus +2_969.png,Uterus +2_970.png,Uterus +2_971.png,Uterus +2_972.png,Uterus +2_973.png,Uterus +2_974.png,Uterus +2_975.png,Uterus +2_976.png,Uterus +2_977.png,Uterus +2_978.png,Uterus +2_979.png,Uterus +2_980.png,Uterus +2_981.png,Uterus +2_982.png,Uterus +2_983.png,Uterus +2_984.png,Uterus +2_985.png,Uterus +2_986.png,Uterus +2_987.png,Uterus +2_988.png,Uterus +2_989.png,Uterus +2_990.png,Uterus +2_991.png,Uterus +2_992.png,Uterus +2_993.png,Uterus +2_994.png,Uterus +2_995.png,Uterus +2_996.png,Uterus +2_997.png,Uterus +2_998.png,Uterus +2_999.png,Uterus +2_1000.png,Uterus +2_1001.png,Uterus +2_1002.png,Uterus +2_1003.png,Uterus +2_1004.png,Uterus +2_1005.png,Uterus +2_1006.png,Uterus +2_1007.png,Uterus +2_1008.png,Thyroid +2_1009.png,Thyroid +2_1010.png,Thyroid +2_1011.png,Thyroid +2_1012.png,Thyroid +2_1013.png,Thyroid +2_1014.png,Thyroid +2_1015.png,Thyroid +2_1016.png,Thyroid +2_1017.png,Skin +2_1018.png,Skin +2_1019.png,Skin +2_1020.png,Skin +2_1021.png,Skin +2_1022.png,Cervix +2_1023.png,Cervix +2_1024.png,Cervix +2_1025.png,Cervix +2_1026.png,Cervix +2_1027.png,Cervix +2_1028.png,Cervix +2_1029.png,Cervix +2_1030.png,Thyroid +2_1031.png,Thyroid +2_1032.png,Thyroid +2_1033.png,Thyroid +2_1034.png,Thyroid +2_1035.png,Thyroid +2_1036.png,Thyroid +2_1037.png,Thyroid +2_1038.png,Thyroid +2_1039.png,Thyroid +2_1040.png,Thyroid +2_1041.png,Thyroid +2_1042.png,Thyroid +2_1043.png,Esophagus +2_1044.png,Esophagus +2_1045.png,Esophagus +2_1046.png,Esophagus +2_1047.png,Esophagus +2_1048.png,Esophagus +2_1049.png,Esophagus +2_1050.png,Esophagus +2_1051.png,Esophagus +2_1052.png,Esophagus +2_1053.png,Esophagus +2_1054.png,Esophagus +2_1055.png,Esophagus +2_1056.png,Esophagus +2_1057.png,Esophagus +2_1058.png,Esophagus +2_1059.png,Esophagus +2_1060.png,Esophagus +2_1061.png,Esophagus +2_1062.png,Esophagus +2_1063.png,Esophagus +2_1064.png,Esophagus +2_1065.png,Esophagus +2_1066.png,Esophagus +2_1067.png,Esophagus +2_1068.png,Esophagus +2_1069.png,Esophagus +2_1070.png,Esophagus +2_1071.png,Esophagus +2_1072.png,Esophagus +2_1073.png,Esophagus +2_1074.png,Esophagus +2_1075.png,Esophagus +2_1076.png,Cervix +2_1077.png,Cervix +2_1078.png,Cervix +2_1079.png,Cervix +2_1080.png,Cervix +2_1081.png,Cervix +2_1082.png,Cervix +2_1083.png,Cervix +2_1084.png,Cervix +2_1085.png,Cervix +2_1086.png,Cervix +2_1087.png,Cervix +2_1088.png,Cervix +2_1089.png,Cervix +2_1090.png,Cervix +2_1091.png,Cervix +2_1092.png,Cervix +2_1093.png,Adrenal_gland +2_1094.png,Adrenal_gland +2_1095.png,Adrenal_gland +2_1096.png,Adrenal_gland +2_1097.png,Adrenal_gland +2_1098.png,Adrenal_gland +2_1099.png,Adrenal_gland +2_1100.png,Adrenal_gland +2_1101.png,Adrenal_gland +2_1102.png,Adrenal_gland +2_1103.png,Adrenal_gland +2_1104.png,Adrenal_gland +2_1105.png,Adrenal_gland +2_1106.png,Adrenal_gland +2_1107.png,Adrenal_gland +2_1108.png,Adrenal_gland +2_1109.png,Adrenal_gland +2_1110.png,Adrenal_gland +2_1111.png,Adrenal_gland +2_1112.png,Adrenal_gland +2_1113.png,Adrenal_gland +2_1114.png,Adrenal_gland +2_1115.png,Adrenal_gland +2_1116.png,Adrenal_gland +2_1117.png,Adrenal_gland +2_1118.png,Adrenal_gland +2_1119.png,Adrenal_gland +2_1120.png,Adrenal_gland +2_1121.png,Adrenal_gland +2_1122.png,Adrenal_gland +2_1123.png,Adrenal_gland +2_1124.png,Adrenal_gland +2_1125.png,Adrenal_gland +2_1126.png,Adrenal_gland +2_1127.png,Adrenal_gland +2_1128.png,Adrenal_gland +2_1129.png,Adrenal_gland +2_1130.png,Adrenal_gland +2_1131.png,Adrenal_gland +2_1132.png,Adrenal_gland +2_1133.png,Adrenal_gland +2_1134.png,Adrenal_gland +2_1135.png,Adrenal_gland +2_1136.png,Adrenal_gland +2_1137.png,Adrenal_gland +2_1138.png,Adrenal_gland +2_1139.png,Adrenal_gland +2_1140.png,Adrenal_gland +2_1141.png,Adrenal_gland +2_1142.png,Adrenal_gland +2_1143.png,Adrenal_gland +2_1144.png,Adrenal_gland +2_1145.png,Adrenal_gland +2_1146.png,Adrenal_gland +2_1147.png,Adrenal_gland +2_1148.png,Esophagus +2_1149.png,Esophagus +2_1150.png,Esophagus +2_1151.png,Esophagus +2_1152.png,Esophagus +2_1153.png,Esophagus +2_1154.png,Esophagus +2_1155.png,Esophagus +2_1156.png,Esophagus +2_1157.png,Esophagus +2_1158.png,Esophagus +2_1159.png,Esophagus +2_1160.png,Esophagus +2_1161.png,Esophagus +2_1162.png,Esophagus +2_1163.png,Esophagus +2_1164.png,Esophagus +2_1165.png,Esophagus +2_1166.png,Esophagus +2_1167.png,Esophagus +2_1168.png,Esophagus +2_1169.png,Esophagus +2_1170.png,Esophagus +2_1171.png,Esophagus +2_1172.png,Esophagus +2_1173.png,Esophagus +2_1174.png,Esophagus +2_1175.png,Esophagus +2_1176.png,Esophagus +2_1177.png,Esophagus +2_1178.png,Esophagus +2_1179.png,Esophagus +2_1180.png,Esophagus +2_1181.png,Esophagus +2_1182.png,Esophagus +2_1183.png,Esophagus +2_1184.png,Esophagus +2_1185.png,Esophagus +2_1186.png,Adrenal_gland +2_1187.png,Adrenal_gland +2_1188.png,Adrenal_gland +2_1189.png,Adrenal_gland +2_1190.png,Adrenal_gland +2_1191.png,Adrenal_gland +2_1192.png,Adrenal_gland +2_1193.png,Adrenal_gland +2_1194.png,Adrenal_gland +2_1195.png,Adrenal_gland +2_1196.png,Adrenal_gland +2_1197.png,Adrenal_gland +2_1198.png,Adrenal_gland +2_1199.png,Adrenal_gland +2_1200.png,Adrenal_gland +2_1201.png,Adrenal_gland +2_1202.png,Adrenal_gland +2_1203.png,Adrenal_gland +2_1204.png,Pancreatic +2_1205.png,Pancreatic +2_1206.png,Pancreatic +2_1207.png,Pancreatic +2_1208.png,Pancreatic +2_1209.png,Pancreatic +2_1210.png,Pancreatic +2_1211.png,Pancreatic +2_1212.png,Adrenal_gland +2_1213.png,Adrenal_gland +2_1214.png,Adrenal_gland +2_1215.png,Adrenal_gland +2_1216.png,Adrenal_gland +2_1217.png,Adrenal_gland +2_1218.png,Adrenal_gland +2_1219.png,Adrenal_gland +2_1220.png,Adrenal_gland +2_1221.png,Adrenal_gland +2_1222.png,Adrenal_gland +2_1223.png,Adrenal_gland +2_1224.png,Cervix +2_1225.png,Cervix +2_1226.png,Cervix +2_1227.png,Cervix +2_1228.png,Cervix +2_1229.png,Cervix +2_1230.png,Cervix +2_1231.png,Cervix +2_1232.png,Cervix +2_1233.png,Cervix +2_1234.png,Cervix +2_1235.png,Cervix +2_1236.png,Cervix +2_1237.png,Cervix +2_1238.png,Cervix +2_1239.png,Cervix +2_1240.png,Cervix +2_1241.png,Cervix +2_1242.png,Cervix +2_1243.png,Cervix +2_1244.png,Cervix +2_1245.png,Cervix +2_1246.png,Cervix +2_1247.png,Cervix +2_1248.png,Bile-duct +2_1249.png,Bile-duct +2_1250.png,Bile-duct +2_1251.png,Bile-duct +2_1252.png,Bile-duct +2_1253.png,Bile-duct +2_1254.png,Bile-duct +2_1255.png,Bile-duct +2_1256.png,Bile-duct +2_1257.png,Bile-duct +2_1258.png,Bile-duct +2_1259.png,Bile-duct +2_1260.png,Bile-duct +2_1261.png,Bile-duct +2_1262.png,Bile-duct +2_1263.png,Bile-duct +2_1264.png,Bile-duct +2_1265.png,Bile-duct +2_1266.png,Bile-duct +2_1267.png,Bile-duct +2_1268.png,Bile-duct +2_1269.png,Bile-duct +2_1270.png,Bile-duct +2_1271.png,Bile-duct +2_1272.png,Bile-duct +2_1273.png,Bile-duct +2_1274.png,Bile-duct +2_1275.png,Bile-duct +2_1276.png,Bile-duct +2_1277.png,Bile-duct +2_1278.png,Bile-duct +2_1279.png,Bile-duct +2_1280.png,Bile-duct +2_1281.png,Bile-duct +2_1282.png,Bile-duct +2_1283.png,Bile-duct +2_1284.png,Bile-duct +2_1285.png,Bile-duct +2_1286.png,Testis +2_1287.png,Testis +2_1288.png,Testis +2_1289.png,Testis +2_1290.png,Testis +2_1291.png,Testis +2_1292.png,Testis +2_1293.png,Testis +2_1294.png,Testis +2_1295.png,Testis +2_1296.png,Testis +2_1297.png,Testis +2_1298.png,Testis +2_1299.png,Testis +2_1300.png,Testis +2_1301.png,Testis +2_1302.png,Testis +2_1303.png,Testis +2_1304.png,Testis +2_1305.png,Testis +2_1306.png,Testis +2_1307.png,Testis +2_1308.png,Testis +2_1309.png,Testis +2_1310.png,Testis +2_1311.png,Testis +2_1312.png,Testis +2_1313.png,Testis +2_1314.png,Testis +2_1315.png,Bile-duct +2_1316.png,Bile-duct +2_1317.png,Bile-duct +2_1318.png,Bile-duct +2_1319.png,Bile-duct +2_1320.png,Bile-duct +2_1321.png,Bile-duct +2_1322.png,Bile-duct +2_1323.png,Bile-duct +2_1324.png,Bile-duct +2_1325.png,Bile-duct +2_1326.png,Bile-duct +2_1327.png,Bile-duct +2_1328.png,Bile-duct +2_1329.png,Bile-duct +2_1330.png,Bile-duct +2_1331.png,Bile-duct +2_1332.png,Bile-duct +2_1333.png,Bile-duct +2_1334.png,Bile-duct +2_1335.png,Bile-duct +2_1336.png,Bile-duct +2_1337.png,Bile-duct +2_1338.png,Bile-duct +2_1339.png,Bile-duct +2_1340.png,Bile-duct +2_1341.png,Bile-duct +2_1342.png,Bile-duct +2_1343.png,Bile-duct +2_1344.png,Bile-duct +2_1345.png,Bile-duct +2_1346.png,Bile-duct +2_1347.png,Bile-duct +2_1348.png,Bile-duct +2_1349.png,Bile-duct +2_1350.png,Colon +2_1351.png,Colon +2_1352.png,Colon +2_1353.png,Colon +2_1354.png,Colon +2_1355.png,Colon +2_1356.png,Colon +2_1357.png,Colon +2_1358.png,Colon +2_1359.png,Colon +2_1360.png,Colon +2_1361.png,Colon +2_1362.png,Colon +2_1363.png,Colon +2_1364.png,Colon +2_1365.png,Colon +2_1366.png,Colon +2_1367.png,Colon +2_1368.png,Colon +2_1369.png,Colon +2_1370.png,Colon +2_1371.png,Colon +2_1372.png,Colon +2_1373.png,Colon +2_1374.png,Colon +2_1375.png,Colon +2_1376.png,Colon +2_1377.png,Colon +2_1378.png,Colon +2_1379.png,Colon +2_1380.png,Colon +2_1381.png,Colon +2_1382.png,Colon +2_1383.png,Colon +2_1384.png,Colon +2_1385.png,Colon +2_1386.png,Colon +2_1387.png,Colon +2_1388.png,Colon +2_1389.png,Colon +2_1390.png,Colon +2_1391.png,Colon +2_1392.png,Colon +2_1393.png,Colon +2_1394.png,Colon +2_1395.png,Colon +2_1396.png,Colon +2_1397.png,Colon +2_1398.png,Adrenal_gland +2_1399.png,Adrenal_gland +2_1400.png,Adrenal_gland +2_1401.png,Adrenal_gland +2_1402.png,Adrenal_gland +2_1403.png,Adrenal_gland +2_1404.png,Adrenal_gland +2_1405.png,Adrenal_gland +2_1406.png,Adrenal_gland +2_1407.png,Adrenal_gland +2_1408.png,Adrenal_gland +2_1409.png,Adrenal_gland +2_1410.png,Adrenal_gland +2_1411.png,Adrenal_gland +2_1412.png,Adrenal_gland +2_1413.png,Adrenal_gland +2_1414.png,Adrenal_gland +2_1415.png,Adrenal_gland +2_1416.png,Adrenal_gland +2_1417.png,Adrenal_gland +2_1418.png,Adrenal_gland +2_1419.png,Adrenal_gland +2_1420.png,Adrenal_gland +2_1421.png,Adrenal_gland +2_1422.png,Adrenal_gland +2_1423.png,Adrenal_gland +2_1424.png,Adrenal_gland +2_1425.png,Adrenal_gland +2_1426.png,Adrenal_gland +2_1427.png,Adrenal_gland +2_1428.png,Adrenal_gland +2_1429.png,Adrenal_gland +2_1430.png,Adrenal_gland +2_1431.png,Adrenal_gland +2_1432.png,Adrenal_gland +2_1433.png,Adrenal_gland +2_1434.png,Adrenal_gland +2_1435.png,Adrenal_gland +2_1436.png,Adrenal_gland +2_1437.png,Adrenal_gland +2_1438.png,Adrenal_gland +2_1439.png,Adrenal_gland +2_1440.png,Adrenal_gland +2_1441.png,Adrenal_gland +2_1442.png,Adrenal_gland +2_1443.png,Adrenal_gland +2_1444.png,Adrenal_gland +2_1445.png,Adrenal_gland +2_1446.png,Adrenal_gland +2_1447.png,Adrenal_gland +2_1448.png,Adrenal_gland +2_1449.png,Adrenal_gland +2_1450.png,Adrenal_gland +2_1451.png,Adrenal_gland +2_1452.png,Adrenal_gland +2_1453.png,Adrenal_gland +2_1454.png,Adrenal_gland +2_1455.png,Adrenal_gland +2_1456.png,Adrenal_gland +2_1457.png,Adrenal_gland +2_1458.png,Adrenal_gland +2_1459.png,Adrenal_gland +2_1460.png,Adrenal_gland +2_1461.png,Adrenal_gland +2_1462.png,Adrenal_gland +2_1463.png,Adrenal_gland +2_1464.png,Adrenal_gland +2_1465.png,Adrenal_gland +2_1466.png,Adrenal_gland +2_1467.png,Adrenal_gland +2_1468.png,Bile-duct +2_1469.png,Bile-duct +2_1470.png,Bile-duct +2_1471.png,Bile-duct +2_1472.png,Bile-duct +2_1473.png,Bile-duct +2_1474.png,Bile-duct +2_1475.png,Bile-duct +2_1476.png,Bile-duct +2_1477.png,Bile-duct +2_1478.png,Bile-duct +2_1479.png,Bile-duct +2_1480.png,Bile-duct +2_1481.png,Bile-duct +2_1482.png,Bile-duct +2_1483.png,Bile-duct +2_1484.png,Bile-duct +2_1485.png,Bile-duct +2_1486.png,Bile-duct +2_1487.png,Bile-duct +2_1488.png,Bile-duct +2_1489.png,Bile-duct +2_1490.png,Bile-duct +2_1491.png,Bile-duct +2_1492.png,Bile-duct +2_1493.png,Bile-duct +2_1494.png,Bile-duct +2_1495.png,Bile-duct +2_1496.png,Bile-duct +2_1497.png,Bile-duct +2_1498.png,Bile-duct +2_1499.png,Bile-duct +2_1500.png,Bile-duct +2_1501.png,Bile-duct +2_1502.png,Bile-duct +2_1503.png,Bile-duct +2_1504.png,Bile-duct +2_1505.png,Bile-duct +2_1506.png,Bile-duct +2_1507.png,Bile-duct +2_1508.png,Bile-duct +2_1509.png,Bile-duct +2_1510.png,Bile-duct +2_1511.png,Bile-duct +2_1512.png,Bile-duct +2_1513.png,Bile-duct +2_1514.png,Bile-duct +2_1515.png,Bile-duct +2_1516.png,Bile-duct +2_1517.png,Bile-duct +2_1518.png,Bile-duct +2_1519.png,Bile-duct +2_1520.png,Bile-duct +2_1521.png,Bile-duct +2_1522.png,Bile-duct +2_1523.png,Bile-duct +2_1524.png,Bile-duct +2_1525.png,Bile-duct +2_1526.png,Bile-duct +2_1527.png,Bile-duct +2_1528.png,Bile-duct +2_1529.png,Bile-duct +2_1530.png,Bile-duct +2_1531.png,Bile-duct +2_1532.png,Bile-duct +2_1533.png,Bile-duct +2_1534.png,Bile-duct +2_1535.png,Bile-duct +2_1536.png,Bile-duct +2_1537.png,Bile-duct +2_1538.png,Bile-duct +2_1539.png,Bile-duct +2_1540.png,Bile-duct +2_1541.png,Bile-duct +2_1542.png,Bile-duct +2_1543.png,Bile-duct +2_1544.png,Bile-duct +2_1545.png,Bile-duct +2_1546.png,Bile-duct +2_1547.png,Bile-duct +2_1548.png,Bile-duct +2_1549.png,Bile-duct +2_1550.png,Bile-duct +2_1551.png,Bile-duct +2_1552.png,Bile-duct +2_1553.png,Bladder +2_1554.png,Bladder +2_1555.png,Bladder +2_1556.png,Bladder +2_1557.png,Bladder +2_1558.png,Bladder +2_1559.png,Bladder +2_1560.png,Bladder +2_1561.png,Bladder +2_1562.png,Bladder +2_1563.png,Bladder +2_1564.png,Bladder +2_1565.png,Bladder +2_1566.png,Bladder +2_1567.png,Bladder +2_1568.png,Bladder +2_1569.png,Bladder +2_1570.png,Bladder +2_1571.png,Bladder +2_1572.png,Bladder +2_1573.png,Bladder +2_1574.png,Bladder +2_1575.png,Bladder +2_1576.png,Bladder +2_1577.png,Bladder +2_1578.png,Bladder +2_1579.png,Bladder +2_1580.png,Bladder +2_1581.png,Bladder +2_1582.png,Bladder +2_1583.png,Bladder +2_1584.png,Bladder +2_1585.png,Bladder +2_1586.png,Bladder +2_1587.png,Bladder +2_1588.png,Bladder +2_1589.png,Bladder +2_1590.png,Bladder +2_1591.png,Bladder +2_1592.png,Bladder +2_1593.png,Bladder +2_1594.png,Bladder +2_1595.png,Bladder +2_1596.png,Bladder +2_1597.png,Bladder +2_1598.png,Bladder +2_1599.png,Bladder +2_1600.png,Bladder +2_1601.png,Bladder +2_1602.png,Bladder +2_1603.png,Bladder +2_1604.png,Bladder +2_1605.png,Bladder +2_1606.png,Bladder +2_1607.png,Bladder +2_1608.png,Bladder +2_1609.png,Bladder +2_1610.png,Bladder +2_1611.png,Breast +2_1612.png,Breast +2_1613.png,Breast +2_1614.png,Breast +2_1615.png,Breast +2_1616.png,Breast +2_1617.png,Breast +2_1618.png,Breast +2_1619.png,Breast +2_1620.png,Breast +2_1621.png,Breast +2_1622.png,Breast +2_1623.png,Breast +2_1624.png,Breast +2_1625.png,Breast +2_1626.png,Breast +2_1627.png,Breast +2_1628.png,Breast +2_1629.png,Breast +2_1630.png,Breast +2_1631.png,Breast +2_1632.png,Breast +2_1633.png,Breast +2_1634.png,Breast +2_1635.png,Breast +2_1636.png,Breast +2_1637.png,Breast +2_1638.png,Breast +2_1639.png,Breast +2_1640.png,Breast +2_1641.png,Breast +2_1642.png,Breast +2_1643.png,Breast +2_1644.png,Breast +2_1645.png,Breast +2_1646.png,Breast +2_1647.png,Breast +2_1648.png,Breast +2_1649.png,Breast +2_1650.png,Breast +2_1651.png,Breast +2_1652.png,Breast +2_1653.png,Breast +2_1654.png,Breast +2_1655.png,Breast +2_1656.png,Breast +2_1657.png,Breast +2_1658.png,Breast +2_1659.png,Breast +2_1660.png,Breast +2_1661.png,Breast +2_1662.png,Breast +2_1663.png,Breast +2_1664.png,Cervix +2_1665.png,Cervix +2_1666.png,Cervix +2_1667.png,Cervix +2_1668.png,Cervix +2_1669.png,Cervix +2_1670.png,Cervix +2_1671.png,Cervix +2_1672.png,Cervix +2_1673.png,Cervix +2_1674.png,Cervix +2_1675.png,Cervix +2_1676.png,Cervix +2_1677.png,Cervix +2_1678.png,Cervix +2_1679.png,Cervix +2_1680.png,Cervix +2_1681.png,Cervix +2_1682.png,Cervix +2_1683.png,Cervix +2_1684.png,Cervix +2_1685.png,Cervix +2_1686.png,Cervix +2_1687.png,Cervix +2_1688.png,Cervix +2_1689.png,Cervix +2_1690.png,Cervix +2_1691.png,Cervix +2_1692.png,Cervix +2_1693.png,Cervix +2_1694.png,Cervix +2_1695.png,Cervix +2_1696.png,Cervix +2_1697.png,Cervix +2_1698.png,Cervix +2_1699.png,Cervix +2_1700.png,Cervix +2_1701.png,Colon +2_1702.png,Colon +2_1703.png,Colon +2_1704.png,Colon +2_1705.png,Colon +2_1706.png,Colon +2_1707.png,Colon +2_1708.png,Colon +2_1709.png,Colon +2_1710.png,Colon +2_1711.png,Colon +2_1712.png,Colon +2_1713.png,Colon +2_1714.png,Colon +2_1715.png,Colon +2_1716.png,Colon +2_1717.png,Colon +2_1718.png,Colon +2_1719.png,Colon +2_1720.png,Colon +2_1721.png,Colon +2_1722.png,Colon +2_1723.png,Colon +2_1724.png,Colon +2_1725.png,Colon +2_1726.png,Colon +2_1727.png,Colon +2_1728.png,Colon +2_1729.png,Colon +2_1730.png,Colon +2_1731.png,Colon +2_1732.png,Colon +2_1733.png,Colon +2_1734.png,Colon +2_1735.png,Colon +2_1736.png,Colon +2_1737.png,Colon +2_1738.png,Colon +2_1739.png,Colon +2_1740.png,Colon +2_1741.png,Colon +2_1742.png,Colon +2_1743.png,Colon +2_1744.png,Colon +2_1745.png,Colon +2_1746.png,Colon +2_1747.png,Colon +2_1748.png,Colon +2_1749.png,Colon +2_1750.png,Colon +2_1751.png,Colon +2_1752.png,Colon +2_1753.png,Colon +2_1754.png,Colon +2_1755.png,Colon +2_1756.png,Colon +2_1757.png,Colon +2_1758.png,Colon +2_1759.png,Colon +2_1760.png,Colon +2_1761.png,Colon +2_1762.png,Colon +2_1763.png,Colon +2_1764.png,Colon +2_1765.png,Colon +2_1766.png,Colon +2_1767.png,Colon +2_1768.png,Colon +2_1769.png,Colon +2_1770.png,Colon +2_1771.png,Colon +2_1772.png,Colon +2_1773.png,Colon +2_1774.png,Colon +2_1775.png,Colon +2_1776.png,Colon +2_1777.png,Colon +2_1778.png,Colon +2_1779.png,Colon +2_1780.png,Colon +2_1781.png,Colon +2_1782.png,Colon +2_1783.png,Colon +2_1784.png,Colon +2_1785.png,Colon +2_1786.png,Colon +2_1787.png,Colon +2_1788.png,Colon +2_1789.png,Colon +2_1790.png,Colon +2_1791.png,Colon +2_1792.png,Colon +2_1793.png,Colon +2_1794.png,Colon +2_1795.png,Colon +2_1796.png,Colon +2_1797.png,Colon +2_1798.png,Colon +2_1799.png,Colon +2_1800.png,Colon +2_1801.png,Colon +2_1802.png,Colon +2_1803.png,Colon +2_1804.png,Colon +2_1805.png,Colon +2_1806.png,Colon +2_1807.png,Colon +2_1808.png,Colon +2_1809.png,Colon +2_1810.png,Colon +2_1811.png,Colon +2_1812.png,Colon +2_1813.png,Colon +2_1814.png,Colon +2_1815.png,Colon +2_1816.png,Colon +2_1817.png,Colon +2_1818.png,Colon +2_1819.png,Colon +2_1820.png,Colon +2_1821.png,Colon +2_1822.png,Colon +2_1823.png,Colon +2_1824.png,Colon +2_1825.png,Colon +2_1826.png,Colon +2_1827.png,Colon +2_1828.png,Colon +2_1829.png,Colon +2_1830.png,Colon +2_1831.png,Colon +2_1832.png,Colon +2_1833.png,Colon +2_1834.png,Colon +2_1835.png,Colon +2_1836.png,Colon +2_1837.png,Colon +2_1838.png,Colon +2_1839.png,Colon +2_1840.png,Colon +2_1841.png,Colon +2_1842.png,Colon +2_1843.png,Colon +2_1844.png,Colon +2_1845.png,Colon +2_1846.png,Colon +2_1847.png,Colon +2_1848.png,Colon +2_1849.png,Colon +2_1850.png,Colon +2_1851.png,Colon +2_1852.png,Colon +2_1853.png,Colon +2_1854.png,Colon +2_1855.png,Colon +2_1856.png,Colon +2_1857.png,Colon +2_1858.png,Colon +2_1859.png,Colon +2_1860.png,Colon +2_1861.png,Colon +2_1862.png,Colon +2_1863.png,Colon +2_1864.png,Colon +2_1865.png,Colon +2_1866.png,Colon +2_1867.png,Colon +2_1868.png,Colon +2_1869.png,Colon +2_1870.png,Colon +2_1871.png,Colon +2_1872.png,Colon +2_1873.png,Colon +2_1874.png,Colon +2_1875.png,Colon +2_1876.png,Colon +2_1877.png,Colon +2_1878.png,Colon +2_1879.png,Colon +2_1880.png,Colon +2_1881.png,Colon +2_1882.png,Colon +2_1883.png,Colon +2_1884.png,Colon +2_1885.png,Colon +2_1886.png,Colon +2_1887.png,Colon +2_1888.png,Colon +2_1889.png,Colon +2_1890.png,Colon +2_1891.png,Colon +2_1892.png,Colon +2_1893.png,Colon +2_1894.png,Colon +2_1895.png,Colon +2_1896.png,Colon +2_1897.png,Colon +2_1898.png,Colon +2_1899.png,Colon +2_1900.png,Colon +2_1901.png,Colon +2_1902.png,Colon +2_1903.png,Colon +2_1904.png,Colon +2_1905.png,Colon +2_1906.png,Colon +2_1907.png,Colon +2_1908.png,Colon +2_1909.png,Colon +2_1910.png,Colon +2_1911.png,Colon +2_1912.png,Colon +2_1913.png,Colon +2_1914.png,Colon +2_1915.png,Colon +2_1916.png,Colon +2_1917.png,Colon +2_1918.png,Colon +2_1919.png,Colon +2_1920.png,Colon +2_1921.png,Colon +2_1922.png,Colon +2_1923.png,Colon +2_1924.png,Colon +2_1925.png,Colon +2_1926.png,Colon +2_1927.png,Colon +2_1928.png,Colon +2_1929.png,Colon +2_1930.png,Colon +2_1931.png,Colon +2_1932.png,Colon +2_1933.png,Colon +2_1934.png,Colon +2_1935.png,Colon +2_1936.png,Colon +2_1937.png,Colon +2_1938.png,Colon +2_1939.png,Colon +2_1940.png,Colon +2_1941.png,Colon +2_1942.png,Colon +2_1943.png,Colon +2_1944.png,Colon +2_1945.png,Colon +2_1946.png,Colon +2_1947.png,Esophagus +2_1948.png,Esophagus +2_1949.png,Esophagus +2_1950.png,Esophagus +2_1951.png,Esophagus +2_1952.png,Esophagus +2_1953.png,Esophagus +2_1954.png,Esophagus +2_1955.png,Esophagus +2_1956.png,Esophagus +2_1957.png,Esophagus +2_1958.png,Esophagus +2_1959.png,Esophagus +2_1960.png,Esophagus +2_1961.png,Esophagus +2_1962.png,Esophagus +2_1963.png,Esophagus +2_1964.png,Esophagus +2_1965.png,Esophagus +2_1966.png,Esophagus +2_1967.png,Esophagus +2_1968.png,Esophagus +2_1969.png,Esophagus +2_1970.png,Esophagus +2_1971.png,Esophagus +2_1972.png,Esophagus +2_1973.png,Esophagus +2_1974.png,Esophagus +2_1975.png,Esophagus +2_1976.png,Esophagus +2_1977.png,Esophagus +2_1978.png,Esophagus +2_1979.png,Esophagus +2_1980.png,Esophagus +2_1981.png,Esophagus +2_1982.png,Esophagus +2_1983.png,Esophagus +2_1984.png,Esophagus +2_1985.png,Esophagus +2_1986.png,Esophagus +2_1987.png,HeadNeck +2_1988.png,HeadNeck +2_1989.png,HeadNeck +2_1990.png,HeadNeck +2_1991.png,HeadNeck +2_1992.png,HeadNeck +2_1993.png,HeadNeck +2_1994.png,HeadNeck +2_1995.png,HeadNeck +2_1996.png,HeadNeck +2_1997.png,HeadNeck +2_1998.png,HeadNeck +2_1999.png,HeadNeck +2_2000.png,HeadNeck +2_2001.png,HeadNeck +2_2002.png,HeadNeck +2_2003.png,HeadNeck +2_2004.png,HeadNeck +2_2005.png,HeadNeck +2_2006.png,HeadNeck +2_2007.png,HeadNeck +2_2008.png,HeadNeck +2_2009.png,HeadNeck +2_2010.png,HeadNeck +2_2011.png,HeadNeck +2_2012.png,HeadNeck +2_2013.png,HeadNeck +2_2014.png,HeadNeck +2_2015.png,HeadNeck +2_2016.png,HeadNeck +2_2017.png,HeadNeck +2_2018.png,HeadNeck +2_2019.png,HeadNeck +2_2020.png,HeadNeck +2_2021.png,HeadNeck +2_2022.png,HeadNeck +2_2023.png,HeadNeck +2_2024.png,HeadNeck +2_2025.png,HeadNeck +2_2026.png,HeadNeck +2_2027.png,HeadNeck +2_2028.png,HeadNeck +2_2029.png,HeadNeck +2_2030.png,HeadNeck +2_2031.png,HeadNeck +2_2032.png,HeadNeck +2_2033.png,HeadNeck +2_2034.png,HeadNeck +2_2035.png,HeadNeck +2_2036.png,HeadNeck +2_2037.png,HeadNeck +2_2038.png,HeadNeck +2_2039.png,HeadNeck +2_2040.png,HeadNeck +2_2041.png,HeadNeck +2_2042.png,HeadNeck +2_2043.png,HeadNeck +2_2044.png,HeadNeck +2_2045.png,HeadNeck +2_2046.png,HeadNeck +2_2047.png,HeadNeck +2_2048.png,HeadNeck +2_2049.png,HeadNeck +2_2050.png,HeadNeck +2_2051.png,HeadNeck +2_2052.png,HeadNeck +2_2053.png,HeadNeck +2_2054.png,HeadNeck +2_2055.png,HeadNeck +2_2056.png,HeadNeck +2_2057.png,HeadNeck +2_2058.png,HeadNeck +2_2059.png,HeadNeck +2_2060.png,HeadNeck +2_2061.png,HeadNeck +2_2062.png,HeadNeck +2_2063.png,HeadNeck +2_2064.png,HeadNeck +2_2065.png,HeadNeck +2_2066.png,HeadNeck +2_2067.png,HeadNeck +2_2068.png,HeadNeck +2_2069.png,HeadNeck +2_2070.png,HeadNeck +2_2071.png,HeadNeck +2_2072.png,HeadNeck +2_2073.png,HeadNeck +2_2074.png,HeadNeck +2_2075.png,HeadNeck +2_2076.png,HeadNeck +2_2077.png,HeadNeck +2_2078.png,HeadNeck +2_2079.png,HeadNeck +2_2080.png,HeadNeck +2_2081.png,HeadNeck +2_2082.png,HeadNeck +2_2083.png,HeadNeck +2_2084.png,HeadNeck +2_2085.png,HeadNeck +2_2086.png,HeadNeck +2_2087.png,HeadNeck +2_2088.png,HeadNeck +2_2089.png,HeadNeck +2_2090.png,HeadNeck +2_2091.png,HeadNeck +2_2092.png,HeadNeck +2_2093.png,HeadNeck +2_2094.png,HeadNeck +2_2095.png,HeadNeck +2_2096.png,HeadNeck +2_2097.png,HeadNeck +2_2098.png,HeadNeck +2_2099.png,HeadNeck +2_2100.png,HeadNeck +2_2101.png,HeadNeck +2_2102.png,HeadNeck +2_2103.png,HeadNeck +2_2104.png,HeadNeck +2_2105.png,HeadNeck +2_2106.png,HeadNeck +2_2107.png,HeadNeck +2_2108.png,HeadNeck +2_2109.png,HeadNeck +2_2110.png,HeadNeck +2_2111.png,HeadNeck +2_2112.png,HeadNeck +2_2113.png,HeadNeck +2_2114.png,HeadNeck +2_2115.png,HeadNeck +2_2116.png,HeadNeck +2_2117.png,HeadNeck +2_2118.png,HeadNeck +2_2119.png,HeadNeck +2_2120.png,HeadNeck +2_2121.png,HeadNeck +2_2122.png,HeadNeck +2_2123.png,HeadNeck +2_2124.png,HeadNeck +2_2125.png,HeadNeck +2_2126.png,HeadNeck +2_2127.png,HeadNeck +2_2128.png,HeadNeck +2_2129.png,HeadNeck +2_2130.png,HeadNeck +2_2131.png,HeadNeck +2_2132.png,Kidney +2_2133.png,Kidney +2_2134.png,Kidney +2_2135.png,Kidney +2_2136.png,Kidney +2_2137.png,Kidney +2_2138.png,Kidney +2_2139.png,Kidney +2_2140.png,Kidney +2_2141.png,Kidney +2_2142.png,Kidney +2_2143.png,Kidney +2_2144.png,Kidney +2_2145.png,Kidney +2_2146.png,Kidney +2_2147.png,Kidney +2_2148.png,Kidney +2_2149.png,Kidney +2_2150.png,Kidney +2_2151.png,Kidney +2_2152.png,Kidney +2_2153.png,Kidney +2_2154.png,Kidney +2_2155.png,Kidney +2_2156.png,Kidney +2_2157.png,Kidney +2_2158.png,Kidney +2_2159.png,Kidney +2_2160.png,Kidney +2_2161.png,Kidney +2_2162.png,Kidney +2_2163.png,Liver +2_2164.png,Liver +2_2165.png,Liver +2_2166.png,Liver +2_2167.png,Liver +2_2168.png,Liver +2_2169.png,Liver +2_2170.png,Liver +2_2171.png,Liver +2_2172.png,Liver +2_2173.png,Liver +2_2174.png,Liver +2_2175.png,Liver +2_2176.png,Liver +2_2177.png,Liver +2_2178.png,Liver +2_2179.png,Liver +2_2180.png,Liver +2_2181.png,Liver +2_2182.png,Liver +2_2183.png,Liver +2_2184.png,Liver +2_2185.png,Liver +2_2186.png,Liver +2_2187.png,Liver +2_2188.png,Liver +2_2189.png,Liver +2_2190.png,Liver +2_2191.png,Liver +2_2192.png,Liver +2_2193.png,Liver +2_2194.png,Liver +2_2195.png,Liver +2_2196.png,Liver +2_2197.png,Liver +2_2198.png,Liver +2_2199.png,Liver +2_2200.png,Liver +2_2201.png,Liver +2_2202.png,Liver +2_2203.png,Liver +2_2204.png,Liver +2_2205.png,Liver +2_2206.png,Liver +2_2207.png,Liver +2_2208.png,Liver +2_2209.png,Liver +2_2210.png,Liver +2_2211.png,Liver +2_2212.png,Liver +2_2213.png,Liver +2_2214.png,Liver +2_2215.png,Liver +2_2216.png,Liver +2_2217.png,Liver +2_2218.png,Liver +2_2219.png,Liver +2_2220.png,Liver +2_2221.png,Liver +2_2222.png,Liver +2_2223.png,Liver +2_2224.png,Liver +2_2225.png,Liver +2_2226.png,Liver +2_2227.png,Liver +2_2228.png,Liver +2_2229.png,Liver +2_2230.png,Liver +2_2231.png,Liver +2_2232.png,Liver +2_2233.png,Liver +2_2234.png,Liver +2_2235.png,Liver +2_2236.png,Liver +2_2237.png,Liver +2_2238.png,Liver +2_2239.png,Liver +2_2240.png,Liver +2_2241.png,Liver +2_2242.png,Liver +2_2243.png,Liver +2_2244.png,Liver +2_2245.png,Liver +2_2246.png,Liver +2_2247.png,Liver +2_2248.png,Liver +2_2249.png,Liver +2_2250.png,Liver +2_2251.png,Liver +2_2252.png,Liver +2_2253.png,Liver +2_2254.png,Liver +2_2255.png,Liver +2_2256.png,Lung +2_2257.png,Lung +2_2258.png,Lung +2_2259.png,Lung +2_2260.png,Lung +2_2261.png,Lung +2_2262.png,Lung +2_2263.png,Lung +2_2264.png,Lung +2_2265.png,Ovarian +2_2266.png,Ovarian +2_2267.png,Ovarian +2_2268.png,Ovarian +2_2269.png,Ovarian +2_2270.png,Ovarian +2_2271.png,Ovarian +2_2272.png,Ovarian +2_2273.png,Ovarian +2_2274.png,Ovarian +2_2275.png,Ovarian +2_2276.png,Ovarian +2_2277.png,Ovarian +2_2278.png,Ovarian +2_2279.png,Ovarian +2_2280.png,Ovarian +2_2281.png,Ovarian +2_2282.png,Ovarian +2_2283.png,Ovarian +2_2284.png,Ovarian +2_2285.png,Ovarian +2_2286.png,Ovarian +2_2287.png,Ovarian +2_2288.png,Ovarian +2_2289.png,Ovarian +2_2290.png,Ovarian +2_2291.png,Ovarian +2_2292.png,Ovarian +2_2293.png,Ovarian +2_2294.png,Ovarian +2_2295.png,Ovarian +2_2296.png,Ovarian +2_2297.png,Ovarian +2_2298.png,Ovarian +2_2299.png,Ovarian +2_2300.png,Ovarian +2_2301.png,Ovarian +2_2302.png,Ovarian +2_2303.png,Ovarian +2_2304.png,Ovarian +2_2305.png,Ovarian +2_2306.png,Ovarian +2_2307.png,Ovarian +2_2308.png,Ovarian +2_2309.png,Ovarian +2_2310.png,Ovarian +2_2311.png,Ovarian +2_2312.png,Ovarian +2_2313.png,Pancreatic +2_2314.png,Pancreatic +2_2315.png,Pancreatic +2_2316.png,Pancreatic +2_2317.png,Pancreatic +2_2318.png,Pancreatic +2_2319.png,Pancreatic +2_2320.png,Pancreatic +2_2321.png,Pancreatic +2_2322.png,Pancreatic +2_2323.png,Pancreatic +2_2324.png,Pancreatic +2_2325.png,Pancreatic +2_2326.png,Pancreatic +2_2327.png,Pancreatic +2_2328.png,Pancreatic +2_2329.png,Pancreatic +2_2330.png,Pancreatic +2_2331.png,Pancreatic +2_2332.png,Pancreatic +2_2333.png,Prostate +2_2334.png,Prostate +2_2335.png,Prostate +2_2336.png,Prostate +2_2337.png,Prostate +2_2338.png,Prostate +2_2339.png,Prostate +2_2340.png,Prostate +2_2341.png,Prostate +2_2342.png,Prostate +2_2343.png,Prostate +2_2344.png,Prostate +2_2345.png,Prostate +2_2346.png,Prostate +2_2347.png,Prostate +2_2348.png,Prostate +2_2349.png,Prostate +2_2350.png,Prostate +2_2351.png,Prostate +2_2352.png,Prostate +2_2353.png,Prostate +2_2354.png,Prostate +2_2355.png,Prostate +2_2356.png,Prostate +2_2357.png,Prostate +2_2358.png,Prostate +2_2359.png,Prostate +2_2360.png,Prostate +2_2361.png,Prostate +2_2362.png,Prostate +2_2363.png,Prostate +2_2364.png,Prostate +2_2365.png,Prostate +2_2366.png,Prostate +2_2367.png,Prostate +2_2368.png,Prostate +2_2369.png,Prostate +2_2370.png,Prostate +2_2371.png,Prostate +2_2372.png,Prostate +2_2373.png,Prostate +2_2374.png,Prostate +2_2375.png,Prostate +2_2376.png,Skin +2_2377.png,Skin +2_2378.png,Skin +2_2379.png,Skin +2_2380.png,Skin +2_2381.png,Skin +2_2382.png,Skin +2_2383.png,Skin +2_2384.png,Skin +2_2385.png,Skin +2_2386.png,Skin +2_2387.png,Skin +2_2388.png,Skin +2_2389.png,Skin +2_2390.png,Skin +2_2391.png,Skin +2_2392.png,Skin +2_2393.png,Skin +2_2394.png,Skin +2_2395.png,Skin +2_2396.png,Skin +2_2397.png,Skin +2_2398.png,Skin +2_2399.png,Skin +2_2400.png,Skin +2_2401.png,Skin +2_2402.png,Skin +2_2403.png,Skin +2_2404.png,Skin +2_2405.png,Skin +2_2406.png,Skin +2_2407.png,Skin +2_2408.png,Skin +2_2409.png,Skin +2_2410.png,Skin +2_2411.png,Skin +2_2412.png,Stomach +2_2413.png,Stomach +2_2414.png,Stomach +2_2415.png,Stomach +2_2416.png,Stomach +2_2417.png,Stomach +2_2418.png,Stomach +2_2419.png,Stomach +2_2420.png,Stomach +2_2421.png,Stomach +2_2422.png,Stomach +2_2423.png,Stomach +2_2424.png,Stomach +2_2425.png,Stomach +2_2426.png,Stomach +2_2427.png,Stomach +2_2428.png,Stomach +2_2429.png,Stomach +2_2430.png,Stomach +2_2431.png,Stomach +2_2432.png,Stomach +2_2433.png,Stomach +2_2434.png,Stomach +2_2435.png,Stomach +2_2436.png,Stomach +2_2437.png,Stomach +2_2438.png,Stomach +2_2439.png,Stomach +2_2440.png,Stomach +2_2441.png,Stomach +2_2442.png,Stomach +2_2443.png,Stomach +2_2444.png,Stomach +2_2445.png,Stomach +2_2446.png,Stomach +2_2447.png,Stomach +2_2448.png,Stomach +2_2449.png,Stomach +2_2450.png,Stomach +2_2451.png,Stomach +2_2452.png,Stomach +2_2453.png,Stomach +2_2454.png,Stomach +2_2455.png,Testis +2_2456.png,Testis +2_2457.png,Testis +2_2458.png,Testis +2_2459.png,Testis +2_2460.png,Testis +2_2461.png,Testis +2_2462.png,Testis +2_2463.png,Testis +2_2464.png,Testis +2_2465.png,Testis +2_2466.png,Testis +2_2467.png,Testis +2_2468.png,Testis +2_2469.png,Testis +2_2470.png,Testis +2_2471.png,Testis +2_2472.png,Testis +2_2473.png,Testis +2_2474.png,Testis +2_2475.png,Testis +2_2476.png,Testis +2_2477.png,Testis +2_2478.png,Testis +2_2479.png,Testis +2_2480.png,Testis +2_2481.png,Testis +2_2482.png,Testis +2_2483.png,Thyroid +2_2484.png,Thyroid +2_2485.png,Thyroid +2_2486.png,Thyroid +2_2487.png,Thyroid +2_2488.png,Thyroid +2_2489.png,Thyroid +2_2490.png,Thyroid +2_2491.png,Thyroid +2_2492.png,Thyroid +2_2493.png,Thyroid +2_2494.png,Thyroid +2_2495.png,Thyroid +2_2496.png,Thyroid +2_2497.png,Thyroid +2_2498.png,Thyroid +2_2499.png,Thyroid +2_2500.png,Thyroid +2_2501.png,Thyroid +2_2502.png,Thyroid +2_2503.png,Thyroid +2_2504.png,Thyroid +2_2505.png,Thyroid +2_2506.png,Thyroid +2_2507.png,Thyroid +2_2508.png,Thyroid +2_2509.png,Thyroid +2_2510.png,Thyroid +2_2511.png,Thyroid +2_2512.png,Thyroid +2_2513.png,Thyroid +2_2514.png,Thyroid +2_2515.png,Thyroid +2_2516.png,Thyroid +2_2517.png,Thyroid +2_2518.png,Thyroid +2_2519.png,Thyroid +2_2520.png,Thyroid +2_2521.png,Thyroid +2_2522.png,Thyroid +2_2523.png,Uterus +2_2524.png,Uterus +2_2525.png,Uterus +2_2526.png,Uterus +2_2527.png,Uterus +2_2528.png,Uterus +2_2529.png,Uterus +2_2530.png,Uterus +2_2531.png,Uterus +2_2532.png,Uterus +2_2533.png,Uterus +2_2534.png,Uterus +2_2535.png,Uterus +2_2536.png,Uterus +2_2537.png,Uterus +2_2538.png,Uterus +2_2539.png,Uterus +2_2540.png,Uterus +2_2541.png,Uterus +2_2542.png,Uterus +2_2543.png,Uterus +2_2544.png,Uterus +2_2545.png,Uterus +2_2546.png,Uterus +2_2547.png,Uterus +2_2548.png,Uterus +2_2549.png,Uterus +2_2550.png,Uterus +2_2551.png,Uterus +2_2552.png,Uterus +2_2553.png,Uterus +2_2554.png,Uterus +2_2555.png,Uterus +2_2556.png,Uterus +2_2557.png,Uterus +2_2558.png,Uterus +2_2559.png,Uterus +2_2560.png,Uterus +2_2561.png,Uterus +2_2562.png,Uterus +2_2563.png,Uterus +2_2564.png,Uterus +2_2565.png,Uterus +2_2566.png,Uterus +2_2567.png,Uterus +2_2568.png,Uterus +2_2569.png,Uterus +2_2570.png,Uterus +2_2571.png,Uterus +2_2572.png,Uterus +2_2573.png,Uterus +2_2574.png,Uterus +2_2575.png,Uterus +2_2576.png,Uterus +2_2577.png,Uterus +2_2578.png,Uterus +2_2579.png,Uterus +2_2580.png,Uterus +2_2581.png,Uterus +2_2582.png,Uterus +2_2583.png,Uterus +2_2584.png,Uterus +2_2585.png,Uterus +2_2586.png,Uterus +2_2587.png,Uterus +2_2588.png,Uterus +2_2589.png,Uterus +2_2590.png,Uterus +2_2591.png,Uterus +2_2592.png,Uterus +2_2593.png,Uterus +2_2594.png,Uterus +2_2595.png,Uterus +2_2596.png,Uterus +2_2597.png,Uterus +2_2598.png,Uterus +2_2599.png,Uterus +2_2600.png,Uterus +2_2601.png,Uterus +2_2602.png,Uterus +2_2603.png,Uterus +2_2604.png,Uterus +2_2605.png,Uterus +2_2606.png,Uterus +2_2607.png,Uterus +2_2608.png,Uterus +2_2609.png,Uterus +2_2610.png,Uterus +2_2611.png,Uterus +2_2612.png,Uterus +2_2613.png,Uterus +2_2614.png,Uterus +2_2615.png,Uterus +2_2616.png,Uterus +2_2617.png,Uterus +2_2618.png,Uterus +2_2619.png,Uterus +2_2620.png,Uterus +2_2621.png,Uterus +2_2622.png,Uterus +2_2623.png,Uterus +2_2624.png,Uterus +2_2625.png,Uterus +2_2626.png,Uterus +2_2627.png,Uterus +2_2628.png,Uterus +2_2629.png,Uterus +2_2630.png,Uterus +2_2631.png,Uterus +2_2632.png,Uterus +2_2633.png,Uterus +2_2634.png,Colon +2_2635.png,Colon +2_2636.png,Colon +2_2637.png,Colon +2_2638.png,Colon +2_2639.png,Colon +2_2640.png,Colon +2_2641.png,Colon +2_2642.png,Colon +2_2643.png,Colon +2_2644.png,Colon +2_2645.png,Colon +2_2646.png,Colon +2_2647.png,Colon +2_2648.png,Colon +2_2649.png,Colon +2_2650.png,Colon +2_2651.png,Colon +2_2652.png,Colon +2_2653.png,Colon +2_2654.png,Colon +2_2655.png,Colon +2_2656.png,Colon +2_2657.png,Colon +2_2658.png,Colon +2_2659.png,Colon +2_2660.png,Colon +2_2661.png,Colon +2_2662.png,Colon +2_2663.png,Colon +2_2664.png,Colon +2_2665.png,Colon +2_2666.png,Colon +2_2667.png,Colon +2_2668.png,Colon +2_2669.png,Colon +2_2670.png,Colon +2_2671.png,Colon +2_2672.png,Colon +2_2673.png,Colon +2_2674.png,Colon +2_2675.png,Colon +2_2676.png,Colon +2_2677.png,Colon +2_2678.png,Colon +2_2679.png,Colon +2_2680.png,Colon +2_2681.png,Colon +2_2682.png,Colon +2_2683.png,Colon +2_2684.png,Colon +2_2685.png,Colon +2_2686.png,Colon +2_2687.png,Colon +2_2688.png,Colon +2_2689.png,Colon +2_2690.png,Colon +2_2691.png,Colon +2_2692.png,Colon +2_2693.png,Colon +2_2694.png,Colon +2_2695.png,Colon +2_2696.png,Colon +2_2697.png,Colon +2_2698.png,Colon +2_2699.png,Colon +2_2700.png,Colon +2_2701.png,Colon +2_2702.png,Colon +2_2703.png,Colon +2_2704.png,Colon +2_2705.png,Colon +2_2706.png,Colon +2_2707.png,Colon +2_2708.png,Colon +2_2709.png,Colon +2_2710.png,Colon +2_2711.png,Colon +2_2712.png,Colon +2_2713.png,Colon +2_2714.png,Colon +2_2715.png,Colon +2_2716.png,Colon +2_2717.png,Colon +2_2718.png,Colon +2_2719.png,Colon +2_2720.png,Colon +2_2721.png,Colon diff --git a/docs/datasets/PanNuke/weight_config.yaml b/docs/datasets/PanNuke/weight_config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c2b961c5f5c892e79977e099c7acc68a3524cb4a --- /dev/null +++ b/docs/datasets/PanNuke/weight_config.yaml @@ -0,0 +1,20 @@ +tissue: + "Adrenal_gland": 437 + "Bile-duct": 420 + "Bladder": 146 + "Breast": 2351 + "Cervix": 293 + "Colon": 1440 + "Esophagus": 424 + "HeadNeck": 384 + "Kidney": 134 + "Liver": 224 + "Lung": 184 + "Ovarian": 146 + "Pancreatic": 195 + "Prostate": 182 + "Skin": 187 + "Stomach": 146 + "Testis": 196 + "Thyroid": 226 + "Uterus": 186 diff --git a/docs/figures/model.png b/docs/figures/model.png new file mode 100644 index 0000000000000000000000000000000000000000..b81639953216067369058ec2a7f095b732059626 Binary files /dev/null and b/docs/figures/model.png differ diff --git a/docs/readmes/cell_segmentation.md b/docs/readmes/cell_segmentation.md new file mode 100644 index 0000000000000000000000000000000000000000..27127f852a94696711ac8a4ade44866cf37cbbc5 --- /dev/null +++ b/docs/readmes/cell_segmentation.md @@ -0,0 +1,60 @@ +# Cell Segmentation + +## Training + +The data structure used to train cell segmentation networks is different than to train classification networks on WSI/Patient level. Cureently, due to the massive amount of cells inside a WSI, all famous cell segmentation datasets (such like [PanNuke](https://warwick.ac.uk/fac/cross_fac/tia/data/pannuke), https://doi.org/10.48550/arXiv.2003.10778) provide just patches with cell annotations. Therefore, we use the following dataset structure (with k folds): + +```bash +dataset +├── dataset_config.yaml +├── fold0 +│ ├── images +| | ├── 0_imgname0.png +| | ├── 0_imgname1.png +| | ├── 0_imgname2.png +... +| | └── 0_imgnameN.png +│ ├── labels +| | ├── 0_imgname0.npy +| | ├── 0_imgname1.npy +| | ├── 0_imgname2.npy +... +| | └── 0_imgnameN.npy +| └── types.csv +├── fold1 +│ ├── images +| | ├── 1_imgname0.png +| | ├── 1_imgname1.png +... +│ ├── labels +| | ├── 1_imgname0.npy +| | ├── 1_imgname1.npy +... +| └── types.csv +... +└── foldk +│ ├── images + | ├── k_imgname0.png + | ├── k_imgname1.png +... + ├── labels + | ├── k_imgname0.npy + | ├── k_imgname1.npy + └── types.csv +``` + +Each type csv should have the following header: +```csv +img,type # Header +foldnum_imgname0.png,SetTypeHeare # Each row is one patch with tissue type +``` + +The labels are numpy masks with the following structure: +TBD + +## Add a new dataset +add to dataset coordnator. + +All settings of the dataset must be performed in the correspondinng yaml file, under the data section + +dataset name is **not** case sensitive! diff --git a/docs/readmes/monuseg.md b/docs/readmes/monuseg.md new file mode 100644 index 0000000000000000000000000000000000000000..f36de57ef0dd84a7ea8baecc60b56ca7db868d4f --- /dev/null +++ b/docs/readmes/monuseg.md @@ -0,0 +1,34 @@ +## MoNuSeg Preparation +The original PanNuke dataset has the following style using .xml annotations and .tiff files with a size of $1000 \times 1000$ pixels: + +```bash +├── testing +│ ├── images +│ │ ├── TCGA-2Z-A9J9-01A-01-TS1.tif +│ │ ├── TCGA-44-2665-01B-06-BS6.tif +... +│ └── labels +│ ├── TCGA-2Z-A9J9-01A-01-TS1.xml +│ ├── TCGA-44-2665-01B-06-BS6.xml +... +└── training + ├── images + └── labels +``` +For our experiments, we resized the dataset images to $1024 \times 1024$ pixels and convert the .xml annotations to binary masks: +```bash +├── testing +│ ├── images +│ │ ├── TCGA-2Z-A9J9-01A-01-TS1.png +│ │ ├── TCGA-44-2665-01B-06-BS6.png +... +│ └── labels +│ │ ├── TCGA-2Z-A9J9-01A-01-TS1.npy +│ │ ├── TCGA-44-2665-01B-06-BS6.npy +... +└── training + ├── images + └── labels +``` + +Everythin can be extracted using the [`cell_segmentation/datasets/prepare_monuseg.py`](cell_segmentation/datasets/prepare_monuseg.py) script. diff --git a/docs/readmes/pannuke.md b/docs/readmes/pannuke.md new file mode 100644 index 0000000000000000000000000000000000000000..591b581cbcbefd94ed87c749f956defc339ef651 --- /dev/null +++ b/docs/readmes/pannuke.md @@ -0,0 +1,57 @@ +## PanNuke Preparation +The original PanNuke dataset has the following style using just one big array for each dataset split: + +```bash +├── fold0 +│ ├── images.npy +│ ├── masks.npy +│ └── types.npy +├── fold1 +│ ├── images.npy +│ ├── masks.npy +│ └── types.npy +└── fold2 + ├── images.npy + ├── masks.npy + └── types.npy +``` + +For memory efficieny and to make us of multi-threading dataloading with our augmentation pipeline, we reassemble the dataset to the following structure: +```bash +├── fold0 +│ ├── cell_count.csv # cell-count for each image to be used in sampling +│ ├── images # H&E Image for each sample as .png files +│ ├── images +│ │ ├── 0_0.png +│ │ ├── 0_1.png +│ │ ├── 0_2.png +... +│ ├── labels # label as .npy arrays for each sample +│ │ ├── 0_0.npy +│ │ ├── 0_1.npy +│ │ ├── 0_2.npy +... +│ └── types.csv # csv file with type for each image +├── fold1 +│ ├── cell_count.csv +│ ├── images +│ │ ├── 1_0.png +... +│ ├── labels +│ │ ├── 1_0.npy +... +│ └── types.csv +├── fold2 +│ ├── cell_count.csv +│ ├── images +│ │ ├── 2_0.png +... +│ ├── labels +│ │ ├── 2_0.npy +... +│ └── types.csv +├── dataset_config.yaml # dataset config with dataset information +└── weight_config.yaml # config file for our sampling +``` + +We provide all configuration files for the PanNuke dataset in the [`configs/datasets/PanNuke`](configs/datasets/PanNuke) folder. Please copy them in your dataset folder. Images and masks have to be extracted using the [`cell_segmentation/datasets/prepare_pannuke.py`](cell_segmentation/datasets/prepare_pannuke.py) script. diff --git a/models/__init__.py b/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a5bcbe92abc0705a3912b376771e614aaedcdea0 --- /dev/null +++ b/models/__init__.py @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +# Model implementations and pretrained models +# +# @ Fabian Hörst, fabian.hoerst@uk-essen.de +# Institute for Artifical Intelligence in Medicine, +# University Medicine Essen diff --git a/models/segmentation/__init__.py b/models/segmentation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/segmentation/cell_segmentation/__init__.py b/models/segmentation/cell_segmentation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/segmentation/cell_segmentation/cellvit.py b/models/segmentation/cell_segmentation/cellvit.py new file mode 100644 index 0000000000000000000000000000000000000000..a96c24f60142dba8010d7249b3be1b30669c458f --- /dev/null +++ b/models/segmentation/cell_segmentation/cellvit.py @@ -0,0 +1,617 @@ +# -*- coding: utf-8 -*- +# CellViT networks and adaptions +# +# UNETR paper and code: https://github.com/tamasino52/UNETR +# SAM paper and code: https://segment-anything.com/ +# +# @ Fabian Hörst, fabian.hoerst@uk-essen.de +# Institute for Artifical Intelligence in Medicine, +# University Medicine Essen + +from collections import OrderedDict +from dataclasses import dataclass +from functools import partial +from json import decoder +from pathlib import Path +from typing import List, Literal, Tuple, Union, Optional, Sequence, Callable + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F + +from cell_segmentation.utils.post_proc_cellvit import DetectionCellPostProcessor + +from monai.networks.blocks import UpSample +from monai.networks.layers.factories import Conv +from monai.networks.layers.utils import get_act_layer +from monai.networks.nets.basic_unet import UpCat,TwoConv +from monai.utils import InterpolateMode +from .cellvit_unirepLKnet import UniRepLKNet +from .replknet import * + + + +class LayerNorm(nn.Module): + """ LayerNorm that supports two data formats: channels_last (default) or channels_first. + The ordering of the dimensions in the inputs. channels_last corresponds to inputs with + shape (batch_size, height, width, channels) while channels_first corresponds to inputs + with shape (batch_size, channels, height, width). + """ + def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"): + super().__init__() + self.weight = nn.Parameter(torch.ones(normalized_shape)) + self.bias = nn.Parameter(torch.zeros(normalized_shape)) + self.eps = eps + self.data_format = data_format + if self.data_format not in ["channels_last", "channels_first"]: + raise NotImplementedError + self.normalized_shape = (normalized_shape, ) + + def forward(self, x): + if self.data_format == "channels_last": + return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) + elif self.data_format == "channels_first": + u = x.mean(1, keepdim=True) + s = (x - u).pow(2).mean(1, keepdim=True) + x = (x - u) / torch.sqrt(s + self.eps) + x = self.weight[:, None, None] * x + self.bias[:, None, None] + return x + + + + +encoder_feature_channel = { + "unireplknet_a": (40, 80, 160, 320), + "unireplknet_f": (48, 96, 192, 384), + "unireplknet_p": (64, 128, 256, 512), + "unireplknet_n": (80, 160, 320, 640), + "unireplknet_t": (80, 160, 320, 640), + "unireplknet_s": (96, 192, 384, 768), + "unireplknet_b": (128, 256, 512, 1024), + "unireplknet_l": (192, 384, 768, 1536), + "unireplknet_xl": (256, 512, 1024, 2048), + "convnext_tiny": (48, 96, 192, 384, 768), + "resnet50": (64, 256, 512, 1024, 2048), + "vitdet_small": (768, 768, 768, 768, 768), +} + + + +def _get_encoder_channels_by_backbone(backbone: str, in_channels: int = 3) -> tuple: + """ + Get the encoder output channels by given backbone name. + + Args: + backbone: name of backbone to generate features, can be from [efficientnet-b0, ..., efficientnet-b7]. + in_channels: channel of input tensor, default to 3. + + Returns: + A tuple of output feature map channels' length . + """ + encoder_channel_tuple = encoder_feature_channel[backbone] #encoder_channel_tuple是指的编码器通道元组,在这里是有4个通道的元组 + encoder_channel_list = [in_channels] + list(encoder_channel_tuple) #encoder_channel_list是指的编码器通道列表[3,80,160,320,640] + encoder_channel = tuple(encoder_channel_list) #encoder_channel是指的编码器通道元组(3,80,160,320,640) + return encoder_channel + + + +class RepLKDeocder(nn.Module): + def __init__(self, + encoder_channels: Sequence[int], + spatial_dims: int, + decoder_channels: Sequence[int], + stage_lk_sizes, + drop_path, + upsample: str, + pre_conv: Optional[str], + interp_mode: str, + align_corners: Optional[bool], + small_kernel, + dw_ratio: int=1, + small_kernel_merged=False, + norm: Union[str, tuple] = ("batch", {"eps": 1e-3, "momentum": 0.1}), + act: Union[str, tuple] = ("relu", {"inplace": True}), + dropout: Union[float, tuple] = 0.0, + bias: bool = False, + is_pad: bool = True, + ffn_ratio=4, + ): + super().__init__() + + in_channels = [encoder_channels[-1]] + list(decoder_channels[:-1]) #in_channels=[640,1024,512,256,128] + skip_channels = list(encoder_channels[1:-1][::-1]) + [0] + halves = [True] * (len(skip_channels) - 1) + halves.append(False) + stage_lk_sizes = stage_lk_sizes + blocks = [] + for in_chn, skip_chn, out_chn, halve in zip(in_channels, skip_channels, decoder_channels, halves): + blocks.append( + UpCat( + spatial_dims=spatial_dims, + in_chns=in_chn, + cat_chns=skip_chn, + out_chns=out_chn, + act=act, + norm=norm, + dropout=dropout, + bias=bias, + upsample=upsample, + pre_conv=pre_conv, + interp_mode=interp_mode, + align_corners=align_corners, + halves=halve, + is_pad=is_pad, + ) + + ) + + self.blocks = nn.ModuleList(blocks) + repblock = [] + for i in range(4): + repblock.append(RepLKBlock(in_channels=in_channels[i], dw_channels=int(in_channels[i] * dw_ratio), block_lk_size=stage_lk_sizes[i], + small_kernel=small_kernel, drop_path=drop_path, small_kernel_merged=small_kernel_merged)) + + self.repblock = nn.ModuleList(repblock) + + convffnblock = [] + for i in range(4): + convffnblock.append(ConvFFN(in_channels=in_channels[i], internal_channels=int(in_channels[i] * ffn_ratio), out_channels=in_channels[i], drop_path=drop_path)) + self.convffnblock = nn.ModuleList(convffnblock) + + + + + + self.upsample = [UpSample( + spatial_dims, + decoder_channels[i], + decoder_channels[i], + mode=upsample, + pre_conv=pre_conv, + interp_mode=interp_mode, + align_corners=align_corners,) for i in range(len(decoder_channels) - 1)] + + self.upsample1= UpSample( + spatial_dims, + 256, + 256, + 2, + mode=upsample, + pre_conv=pre_conv, + interp_mode=interp_mode, + align_corners=align_corners, + ) + self.upsample2= UpSample( + spatial_dims, + 128, + 128, + 2, + mode=upsample, + pre_conv=pre_conv, + interp_mode=interp_mode, + align_corners=align_corners, + ) + self.convs = TwoConv(spatial_dims, 304, decoder_channels[-2], act, norm, bias, dropout) + self.convs1 = TwoConv(spatial_dims, 152, decoder_channels[-1], act, norm,bias, dropout) + + + + + + def forward(self, features: List[torch.Tensor], input_feature: torch.Tensor, skip_connect: int = 3): + skips = features[:-1][::-1] #skips[0],[1],[2]=[16,320,16,16],[16,160,32,32],[16,80,64,64],[16,40,128,128] + features = features[1:][::-1] + #input_feature = self.conv1(input_feature) #input_feature=[16,64,256,256] + x = features[0] #x=[16,640,8,8] + for i, (block, repblock, convffnblock) in enumerate(zip(self.blocks, self.repblock, self.convffnblock)): + + if i < skip_connect: + skip = skips[i] #skip=[16,320,16,16], skip=[16,160,32,32], skip=[16,80,64,64], skip = [16,40,128,128] + #x = repblock(x) + #x = convffnblock(x) + x = block(x, skip) + + else: + #x = repblock(x) + skip = input_feature[1] + x =self.upsample1(x) + x = torch.cat([skip, x], dim=1) + x = self.convs(x) + + skip = input_feature[0] + x = self.upsample2(x) + x = torch.cat([skip, x], dim=1) + x = self.convs1(x) + + return x + + + +class SegmentationHead(nn.Sequential): + """ + Segmentation head. + This class refers to `segmentation_models.pytorch +