File size: 3,924 Bytes
78cb732
 
 
 
4866010
 
78cb732
7c0ac7f
 
914808d
78cb732
b717a9c
914808d
b717a9c
914808d
b717a9c
 
 
 
 
 
914808d
 
 
 
b717a9c
 
78cb732
914808d
 
 
 
 
 
 
 
 
99cd20b
4866010
78cb732
 
7c0ac7f
 
4866010
 
 
 
 
 
 
 
914808d
 
 
4866010
 
 
 
 
 
914808d
4866010
6af4f23
 
914808d
 
 
6af4f23
914808d
6af4f23
914808d
 
 
 
6af4f23
914808d
 
 
 
 
 
 
 
 
6af4f23
9bc917f
4866010
 
 
 
 
 
914808d
 
4866010
 
 
914808d
4866010
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7c0ac7f
78cb732
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
import streamlit as st
import tensorflow as tf
import cv2
import numpy as np
from lime import lime_image
from skimage.segmentation import mark_boundaries
import matplotlib.pyplot as plt
from tensorflow.keras.models import load_model
from grad_cam import GradCam
from vit import CNN_ViT

hp = {}
hp['image_size'] = 256
hp['num_channels'] = 3
hp['patch_size'] = 32
hp['num_patches'] = (hp['image_size']**2) // (hp["patch_size"]**2)
hp["flat_patches_shape"] = (hp["num_patches"], hp['patch_size']*hp['patch_size']*hp["num_channels"])
hp['batch_size'] = 32
hp['lr'] = 1e-4
hp["num_epochs"] = 30
hp['num_classes'] = 2
hp["num_layers"] = 6
hp["hidden_dim"] = 256
hp["mlp_dim"] = 256
hp['num_heads'] = 6
hp['dropout_rate'] = 0.1
hp['class_names'] = ["breast_benign", "breast_malignant"]

#model = load_model("model/resnet_for_breast_cancer-v1.h5")
model = CNN_ViT(hp)

model.compile(loss='binary_crossentropy',
              optimizer = tf.keras.optimizers.Adam(hp['lr'], clipvalue=1.0),
              metrics=['acc']
             )
model.load_weights("model/Breast-ResViT.keras")

print("Model initiated")
explainer = lime_image.LimeImageExplainer()




def main():
    st.title("Breast Cancer Classification")

    # Upload image through drag and drop
    uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])

    if uploaded_file is not None:
        # Convert the uploaded file to OpenCV format
        image, gray_img = convert_to_opencv(uploaded_file)
        gray_img = cv2.resize(gray_img, [256,256])
        #gradCam = GradCam(model, image, last_conv_layer_name='conv5_block3_3_conv')
        
        # Display the uploaded image
        st.image(image, channels="BGR", caption="Uploaded Image", use_column_width=True)

        # Display the image shape
        image_class = predict_single_image(image, model, hp)
        #gradCam.save_and_display_gradcam()
        st.write(f"Image Class: {image_class}")

        
        explanation = explainer.explain_instance(
            gray_img.astype('double'),
            model.predict,
            top_labels=2,
            hide_color=0,
            num_samples=100
        )
        temp, mask = explanation.get_image_and_mask(
            explanation.top_labels[0],
            positive_only=True,
            num_features=5,
            hide_rest=True
        )

        temp = (temp / 2 + 0.5)
        xai = mark_boundaries(temp.clip(0, 1), mask)

        # Save and display LIME explanation
        lime_explanation_path = 'lime_explanation.png'
        cv2.imwrite(lime_explanation_path, (xai * 255).astype(np.uint8))
        st.image((xai * 255).astype(np.uint8), caption="LIME Explanation", use_column_width=True)
        

def convert_to_opencv(uploaded_file):
    # Read the uploaded file using OpenCV
    image_bytes = uploaded_file.read()
    np_arr = np.frombuffer(image_bytes, np.uint8)
    image = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
    gray_img = cv2.imdecode(np_arr, cv2.IMREAD_GRAYSCALE)
    return image, gray_img

def process_image_as_batch(image):
    #resize the image
    image = cv2.resize(image, [256, 256])
    #scale the image
    image = image / 255.0
    #change the data type of image
    image = image.astype(np.float32)
    return image

def predict_single_image(image, model, hp):
    # Preprocess the image
    preprocessed_image = process_image_as_batch(image)
    # Convert the preprocessed image to a TensorFlow tensor if needed
    preprocessed_image = tf.convert_to_tensor(preprocessed_image)
    # Add an extra batch dimension (required for model.predict)
    preprocessed_image = tf.expand_dims(preprocessed_image, axis=0)
    # Make the prediction
    predictions = model.predict(preprocessed_image)

    np.around(predictions)
    y_pred_classes = np.argmax(predictions, axis=1)
    class_name = hp['class_names'][y_pred_classes[0]]
    return class_name




if __name__ == "__main__":
    main()