|
|
|
|
|
|
|
from albumentations import Compose, RandomCrop, ElasticTransform, GridDistortion, OpticalDistortion, RandomBrightnessContrast, GaussianNoise, Flip
|
|
from sklearn.svm._liblinear import train
|
|
|
|
|
|
def get_augmentation_pipeline():
|
|
return Compose([
|
|
Flip(p=0.5),
|
|
RandomCrop(height=128, width=128, p=0.5),
|
|
ElasticTransform(alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03, p=0.5),
|
|
GridDistortion(p=0.5),
|
|
OpticalDistortion(p=0.5),
|
|
GaussianNoise(p=0.5),
|
|
RandomBrightnessContrast(p=0.5)
|
|
])
|
|
|
|
augmentation_pipeline = get_augmentation_pipeline()
|
|
|
|
|
|
|
|
|
|
import segmentation_models_pytorch as smp
|
|
|
|
|
|
model = smp.UnetPlusPlus(
|
|
encoder_name="resnet34",
|
|
encoder_weights="imagenet",
|
|
in_channels=4,
|
|
classes=4
|
|
)
|
|
|
|
|
|
import torch
|
|
import torch.nn as nn
|
|
from segmentation_models_pytorch.losses import TverskyLoss
|
|
|
|
|
|
class CombinedLoss(nn.Module):
|
|
def __init__(self, alpha=0.5):
|
|
super(CombinedLoss, self).__init__()
|
|
self.dice_loss = smp.losses.DiceLoss("softmax")
|
|
self.tversky_loss = TverskyLoss("softmax", alpha=0.7, beta=0.3)
|
|
self.alpha = alpha
|
|
|
|
def forward(self, y_pred, y_true):
|
|
return self.alpha * self.dice_loss(y_pred, y_true) + (1 - self.alpha) * self.tversky_loss(y_pred, y_true)
|
|
|
|
loss_fn = CombinedLoss()
|
|
|
|
|
|
from torch.optim.lr_scheduler import CosineAnnealingLR
|
|
|
|
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
|
|
scheduler = CosineAnnealingLR(optimizer, T_max=10, eta_min=1e-5)
|
|
|
|
|
|
for epoch in range(num_epochs):
|
|
train(...)
|
|
scheduler.step()
|
|
|
|
|
|
import pydensecrf.densecrf as dcrf
|
|
|
|
def apply_crf(prob_map, img):
|
|
d = dcrf.DenseCRF2D(img.shape[1], img.shape[0], 4)
|
|
U = -np.log(prob_map)
|
|
d.setUnaryEnergy(U)
|
|
|
|
|
|
d.addPairwiseGaussian(sxy=3, compat=3)
|
|
d.addPairwiseBilateral(sxy=30, srgb=13, rgbim=img, compat=10)
|
|
|
|
Q = d.inference(5)
|
|
return np.argmax(Q, axis=0).reshape((img.shape[0], img.shape[1]))
|
|
|
|
|
|
|
|
|
|
from sklearn.model_selection import KFold
|
|
|
|
kf = KFold(n_splits=5)
|
|
for train_idx, valid_idx in kf.split(dataset):
|
|
train_data = Subset(dataset, train_idx)
|
|
valid_data = Subset(dataset, valid_idx)
|
|
|
|
train_loader = DataLoader(train_data, batch_size=16, shuffle=True)
|
|
valid_loader = DataLoader(valid_data, batch_size=16, shuffle=False)
|
|
|
|
train_model(train_loader, valid_loader)
|
|
|
|
|
|
class EnsembleModel(nn.Module):
|
|
def __init__(self, models):
|
|
super(EnsembleModel, self).__init__()
|
|
self.models = nn.ModuleList(models)
|
|
|
|
def forward(self, x):
|
|
outputs = [model(x) for model in self.models]
|
|
return torch.mean(torch.stack(outputs), dim=0)
|
|
|
|
|
|
models = [model1, model2, model3]
|
|
ensemble_model = EnsembleModel(models)
|
|
|
|
|
|
from sklearn.model_selection import ParameterGrid
|
|
|
|
param_grid = {
|
|
'learning_rate': [1e-3, 1e-4],
|
|
'batch_size': [8, 16],
|
|
'loss_alpha': [0.5, 0.7]
|
|
}
|
|
|
|
for params in ParameterGrid(param_grid):
|
|
optimizer = torch.optim.Adam(model.parameters(), lr=params['learning_rate'])
|
|
loss_fn = CombinedLoss(alpha=params['loss_alpha'])
|
|
train_loader = DataLoader(train_data, batch_size=params['batch_size'])
|
|
|
|
train_model(train_loader, valid_loader)
|
|
|