File size: 3,391 Bytes
60b0ddc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
from tqdm import tqdm
import torch
from net import Net
from batch_sampler import  BatchSampler
from torch.nn import functional as F
import numpy as np
from net import Net
from batch_sampler import  BatchSampler
from torch.nn import functional as F
import numpy as np
import torch.nn as nn

from net import Net, ResNetModel, EfficientNetModel, EfficientNetModel_b7
from batch_sampler import BatchSampler
from image_dataset import ImageDataset

from typing import Callable, List, Tuple

from sklearn.metrics import roc_curve, auc
from sklearn.preprocessing import label_binarize
import matplotlib.pyplot as plt



def train_model(
        # model: Net,  ## CHANGE NN HERE !
        model: Net,
        train_sampler: BatchSampler,
        optimizer: torch.optim.Optimizer,
        loss_function: Callable[..., torch.Tensor],
        device: str,
) -> List[torch.Tensor]:
    # Lets keep track of all the losses:
    losses = []
    # Put the model in train mode:
    model.train()
    # Feed all the batches one by one:
    for batch in tqdm(train_sampler):
        # Get a batch:
        x, y = batch
        # Making sure our samples are stored on the same device as our model:
        x, y = x.to(device), y.to(device)
        # Get predictions:
        predictions = model.forward(x)
        loss = loss_function(predictions, y)
        losses.append(loss)
        # We first need to make sure we reset our optimizer at the start.
        # We want to learn from each batch seperately,
        # not from the entire dataset at once.
        optimizer.zero_grad()
        # We now backpropagate our loss through our model:
        loss.backward()
        # We then make the optimizer take a step in the right direction.
        optimizer.step()
    return losses


def test_model(
        model: Net,
        test_sampler: BatchSampler,
        loss_function: Callable[..., torch.Tensor],
        device: str,
        fpr,
        tpr, 
        roc
) -> Tuple[List[torch.Tensor], List[np.ndarray]]:
    # Setting the model to evaluation mode:
    model.eval()
    losses = []
    all_y_pred_probs = []
    all_y_true = []

    # We need to make sure we do not update our model based on the test data:
    with torch.no_grad():
        for (x, y) in tqdm(test_sampler):
            # Making sure our samples are stored on the same device as our model:
            x = x.to(device)
            y = y.to(device)
            prediction = model.forward(x)
            loss = loss_function(prediction, y)
            losses.append(loss)
            probabilities = F.softmax(prediction, dim=1)
            all_y_pred_probs.append(probabilities.cpu().numpy())
            all_y_true.extend(y.cpu().numpy())

    y_pred_probs = np.concatenate(all_y_pred_probs, axis=0)
    y_true = np.array(all_y_true)

    # NOTE: Comment this for loop and uncomment the # ROC for binary class for loop in order to see the correct Binary ROC curve.
    # Compute ROC curve and ROC area for each class
    for i in range(6):  # 6 classes
        a, b, _ = roc_curve(y_true == i, y_pred_probs[:, i])
        fpr[i].extend(a)
        tpr[i].extend(b)
        roc[i] = auc(fpr[i], tpr[i])

    # # ROC for binary class
    # for i in range(2):  # For binary classification
    #     fpr[i], tpr[i], _ = roc_curve(y_true == i, y_pred_probs[:, i])
    #     roc[i] = auc(fpr[i], tpr[i])

    return losses, y_pred_probs