fradinho commited on
Commit
d28d900
·
1 Parent(s): 11af0fd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +699 -1
app.py CHANGED
@@ -20,6 +20,704 @@ def bce_dice(y_true, y_pred):
20
  return bce(y_true, y_pred) - K.log(jacard(y_true, y_pred))
21
 
22
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  size = 1024
24
  pach_size = 256
25
 
@@ -127,7 +825,7 @@ def weighted_categorical_crossentropy(weights):
127
  # Load the model
128
  #model = tf.keras.models.load_model("model.h5", custom_objects={"jacard":jacard, "wcce":weighted_categorical_crossentropy})
129
  #model = tf.keras.models.load_model("model_2.h5", custom_objects={"jacard":jacard, "bce_dice":bce_dice})
130
- model = tf.keras.models.load_model("model_2_A (1).h5", custom_objects={"jacard":jacard, "bce_dice":bce_dice})
131
 
132
  # Create a user interface for the model
133
  my_app = gr.Blocks()
 
20
  return bce(y_true, y_pred) - K.log(jacard(y_true, y_pred))
21
 
22
 
23
+ def upsample(X,X_side):
24
+ """
25
+ Upsampling and concatination with the side path
26
+ """
27
+
28
+ X = Conv2DTranspose(int(X.shape[1]/2), (3, 3), strides=(2, 2), padding='same')(X)
29
+ #X = tf.keras.layers.UpSampling2D((2,2))(X)
30
+ concat = tf.keras.layers.Concatenate()([X,X_side])
31
+ return concat
32
+
33
+ def gating_signal(input, out_size, batch_norm=False):
34
+ """
35
+ resize the down layer feature map into the same dimension as the up layer feature map
36
+ using 1x1 conv
37
+ :return: the gating feature map with the same dimension of the up layer feature map
38
+ """
39
+ x = layers.Conv2D(out_size, (1, 1), padding='same')(input)
40
+ if batch_norm:
41
+ x = layers.BatchNormalization()(x)
42
+ x = layers.Activation('relu')(x)
43
+ return x
44
+
45
+
46
+
47
+ def attention_block(x, gating, inter_shape):
48
+ shape_x = K.int_shape(x)
49
+ shape_g = K.int_shape(gating)
50
+
51
+ # Getting the x signal to the same shape as the gating signal
52
+ theta_x = layers.Conv2D(inter_shape, (2, 2), strides=(2, 2), padding='same')(x) # 16
53
+ shape_theta_x = K.int_shape(theta_x)
54
+
55
+ # Getting the gating signal to the same number of filters as the inter_shape
56
+ phi_g = layers.Conv2D(inter_shape, (1, 1), padding='same')(gating)
57
+ upsample_g = layers.Conv2DTranspose(inter_shape, (3, 3),
58
+ strides=(shape_theta_x[1] // shape_g[1], shape_theta_x[2] // shape_g[2]),
59
+ padding='same')(phi_g) # 16
60
+
61
+ concat_xg = layers.add([upsample_g, theta_x])
62
+ act_xg = layers.Activation('relu')(concat_xg)
63
+ psi = layers.Conv2D(1, (1, 1), padding='same')(act_xg)
64
+ sigmoid_xg = layers.Activation('sigmoid')(psi)
65
+ shape_sigmoid = K.int_shape(sigmoid_xg)
66
+ upsample_psi = layers.UpSampling2D(size=(shape_x[1] // shape_sigmoid[1], shape_x[2] // shape_sigmoid[2]))(sigmoid_xg) # 32
67
+
68
+ upsample_psi = repeat_elem(upsample_psi, shape_x[3])
69
+
70
+ y = layers.multiply([upsample_psi, x])
71
+
72
+ result = layers.Conv2D(shape_x[3], (1, 1), padding='same')(y)
73
+ result_bn = layers.BatchNormalization()(result)
74
+ return result_bn
75
+
76
+
77
+
78
+ def repeat_elem(tensor, rep):
79
+ # lambda function to repeat Repeats the elements of a tensor along an axis
80
+ #by a factor of rep.
81
+ # If tensor has shape (None, 256,256,3), lambda will return a tensor of shape
82
+ #(None, 256,256,6), if specified axis=3 and rep=2.
83
+
84
+ return layers.Lambda(lambda x, repnum: K.repeat_elements(x, repnum, axis=3),
85
+ arguments={'repnum': rep})(tensor)
86
+
87
+
88
+ activation_funtion = 'relu'
89
+ recurrent_repeats = 2 * 4
90
+ FILTER_NUM = 4 * 4
91
+ axis = 3
92
+
93
+ act_func = 'relu'
94
+ filters = 64
95
+ def encoder(inputs, input_tensor):
96
+
97
+ #Contraction path
98
+ conv_1 = Conv2D(filters, (3, 3), activation='relu', padding='same')(inputs)
99
+ conv_1 = BatchNormalization()(conv_1)
100
+ conv_1 = Dropout(0.1)(conv_1)
101
+ conv_1 = Conv2D(filters, (3, 3), activation='relu', padding='same')(conv_1)
102
+ conv_1 = BatchNormalization()(conv_1)
103
+ pool_1 = MaxPooling2D((2, 2))(conv_1)
104
+
105
+ conv_2 = Conv2D(2*filters, (3, 3), activation='relu', padding='same')(pool_1)
106
+ conv_2 = BatchNormalization()(conv_2)
107
+ conv_2 = Dropout(0.1)(conv_2)
108
+ conv_2 = Conv2D(2*filters, (3, 3), activation='relu', padding='same')(conv_2)
109
+ conv_2 = BatchNormalization()(conv_2)
110
+ pool_2 = MaxPooling2D((2, 2))(conv_2)
111
+
112
+ conv_3 = Conv2D(4*filters, (3, 3), activation='relu', padding='same')(pool_2)
113
+ conv_3 = BatchNormalization()(conv_3)
114
+ conv_3 = Dropout(0.1)(conv_3)
115
+ conv_3 = Conv2D(4*filters, (3, 3), activation='relu', padding='same')(conv_3)
116
+ conv_3 = BatchNormalization()(conv_3)
117
+ pool_3 = MaxPooling2D((2, 2))(conv_3)
118
+
119
+ conv_4 = Conv2D(8*filters, (3, 3), activation='relu', padding='same')(pool_3)
120
+ conv_4 = BatchNormalization()(conv_4)
121
+ conv_4 = Dropout(0.1)(conv_4)
122
+ conv_4 = Conv2D(8*filters, (3, 3), activation='relu', padding='same')(conv_4)
123
+ conv_4 = BatchNormalization()(conv_4)
124
+ pool_4 = MaxPooling2D(pool_size=(2, 2))(conv_4)
125
+
126
+ conv_5 = Conv2D(16*filters, (3, 3), activation='relu', padding='same')(pool_4)
127
+ conv_5 = BatchNormalization()(conv_5)
128
+ conv_5 = Dropout(0.1)(conv_5)
129
+
130
+
131
+ model = Model(inputs=[input_tensor], outputs=[conv_5, conv_4, conv_3, conv_2, conv_1])
132
+ return model
133
+
134
+ def encoder_unet(inputs):
135
+ ## Project residual
136
+ # residual = layers.Conv2D(filters, 1, strides=2, padding="same")(
137
+ # previous_block_activation
138
+ # )
139
+ #x = layers.add([x, residual]) # Add back residual
140
+ #Contraction path
141
+ #Contraction path
142
+ conv_11 = Conv2D(filters, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(inputs)
143
+ conv_11 = BatchNormalization()(conv_11)
144
+ conv_11 = Dropout(0.2)(conv_11)
145
+ conv_1 = Conv2D(filters, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv_11)
146
+ conv_1 = BatchNormalization()(conv_1)
147
+ #conv_1 = concatenate([resblock(conv_11, 64), conv_1], axis=3)
148
+ #conv_1 = Dropout(0.2)(conv_1)
149
+ #pool_1 = layers.GaussianNoise(0.1+np.random.random()*0.4)(conv_1)
150
+ pool_1 = MaxPooling2D((2, 2))(conv_1)
151
+
152
+
153
+
154
+ conv_2 = Conv2D(2*filters, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(pool_1)
155
+ conv_2 = BatchNormalization()(conv_2)
156
+ conv_2 = Dropout(0.2)(conv_2)
157
+ conv_2 = Conv2D(2*filters, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv_2)
158
+ conv_2 = BatchNormalization()(conv_2)
159
+ #conv_2 = Dropout(0.2)(conv_2)
160
+ #conv_2 = Conv2D(2*filters, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv_2)
161
+ #conv_2 = concatenate([resblock(pool_1, 128), conv_2], axis=3)
162
+ #conv_2 = BatchNormalization()(conv_2)
163
+ #conv_2 = Dropout(0.2)(conv_2)
164
+ #pool_2 = layers.GaussianNoise(0.1+np.random.random()*0.4)(conv_2)
165
+ pool_2 = MaxPooling2D((2, 2))(conv_2)
166
+
167
+
168
+
169
+ conv_3 = Conv2D(4*filters, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(pool_2)
170
+ conv_3 = BatchNormalization()(conv_3)
171
+ conv_3 = Dropout(0.2)(conv_3)
172
+ conv_3 = Conv2D(4*filters, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv_3)
173
+ conv_3 = BatchNormalization()(conv_3)
174
+ #conv_3 = Dropout(0.2)(conv_3)
175
+ #conv_3 = Conv2D(4*filters, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv_3)
176
+ #conv_3 = BatchNormalization()(conv_3)
177
+ #conv_3 = Dropout(0.2)(conv_3)
178
+ conv_3 = Conv2D(4*filters, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv_3)
179
+ conv_3 = BatchNormalization()(conv_3)
180
+ #conv_3 = concatenate([resblock(pool_2, 256), conv_3], axis=3)
181
+ #conv_3 = Dropout(0.2)(conv_3)
182
+ #pool_3 = layers.GaussianNoise(0.1+np.random.random()*0.4)(conv_3)
183
+ pool_3 = MaxPooling2D((2, 2))(conv_3)
184
+
185
+
186
+
187
+ conv_4 = Conv2D(8*filters, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(pool_3)
188
+ conv_4 = BatchNormalization()(conv_4)
189
+ conv_4 = Dropout(0.2)(conv_4)
190
+ #conv_4 = Conv2D(8*filters, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv_4)
191
+ #conv_4 = BatchNormalization()(conv_4)
192
+ #conv_4 = Dropout(0.2)(conv_4)
193
+ conv_4 = Conv2D(8*filters, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv_4)
194
+ conv_4 = BatchNormalization()(conv_4)
195
+ conv_4 = Dropout(0.2)(conv_4)
196
+ #conv_4 = Conv2D(8*filters, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv_4)
197
+ #conv_4 = BatchNormalization()(conv_4)
198
+ #conv_4 = Dropout(0.2)(conv_4)
199
+ conv_4 = Conv2D(8*filters, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv_4)
200
+ conv_4 = BatchNormalization()(conv_4)
201
+ #conv_4 = concatenate([resblock(pool_3, 512), conv_4], axis=3)
202
+ #conv_4 = Dropout(0.2)(conv_4)
203
+ #pool_4 = layers.GaussianNoise(0.1+np.random.random()*0.4)(conv_4)
204
+ pool_4 = MaxPooling2D(pool_size=(2, 2))(conv_4)
205
+
206
+
207
+ conv_44 = Conv2D(8*filters, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(pool_4)
208
+ conv_44 = BatchNormalization()(conv_44)
209
+ conv_44 = Dropout(0.2)(conv_44)
210
+ conv_44 = Conv2D(8*filters, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv_44)
211
+ conv_44 = BatchNormalization()(conv_44)
212
+ conv_44 = Dropout(0.2)(conv_44)
213
+ #conv_44 = Conv2D(8*filters, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv_44)
214
+ #conv_44 = BatchNormalization()(conv_44)
215
+ #conv_44 = Dropout(0.2)(conv_44)
216
+ #conv_4 = Conv2D(8*filters, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv_4)
217
+ #conv_4 = BatchNormalization()(conv_4)
218
+ #conv_4 = Dropout(0.2)(conv_4)
219
+ conv_44 = Conv2D(8*filters, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv_44)
220
+ conv_44 = BatchNormalization()(conv_44)
221
+ #conv_4 = concatenate([resblock(pool_3, 512), conv_4], axis=3)
222
+ #conv_44 = Dropout(0.2)(conv_44)
223
+ #pool_4 = layers.GaussianNoise(0.1+np.random.random()*0.4)(conv_4)
224
+ pool_44 = MaxPooling2D(pool_size=(2, 2))(conv_44)
225
+
226
+
227
+
228
+ conv_5 = Conv2D(16*filters, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(pool_44)
229
+ conv_5 = BatchNormalization()(conv_5)
230
+ #conv_5 = Conv2D(16*filters, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv_5)
231
+ #conv_5 = BatchNormalization()(conv_5)
232
+ #conv_5 = concatenate([resblock(pool_4, 1024), conv_5], axis=3)
233
+ #conv_5 = Dropout(0.2)(conv_5)
234
+ #conv_5 = layers.GaussianNoise(0.1)(conv_5)
235
+
236
+
237
+
238
+ model = Model(inputs=[inputs], outputs=[conv_5, conv_44, conv_3, conv_2, conv_1])
239
+ return model
240
+
241
+ def decoder(inputs, input_tensor):
242
+ #Expansive path
243
+
244
+ gating_64 = gating_signal(inputs[0], 16*FILTER_NUM, True)
245
+ att_64 = attention_block(inputs[1], gating_64, 16*FILTER_NUM)
246
+ up_stage_2 = upsample(inputs[0],inputs[1])
247
+ #u6 = Conv2DTranspose(512, (2, 2), strides=(2, 2), padding='same')(inputs[0])
248
+ u6 = concatenate([up_stage_2, att_64], axis=3)
249
+ #u6 = concatenate([att_5, u6])
250
+ #conv_6 = Conv2D(512, (3, 3), activation='relu', padding='same')(u6)
251
+ #conv_6 = BatchNormalization()(conv_6)
252
+ #conv_6 = Dropout(0.2)(conv_6)
253
+ #conv_6 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv_6)
254
+ #conv_6 = Dropout(0.2)(conv_6)
255
+ conv_6 = Conv2D(8*filters, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(u6)
256
+ conv_6 = BatchNormalization()(conv_6)
257
+ #conv_6 = Dropout(0.2)(conv_6)
258
+ conv_6 = Conv2D(8*filters, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv_6)
259
+ conv_6 = BatchNormalization()(conv_6)
260
+ #conv_6 = Conv2D(8*filters, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv_6)
261
+ #conv_6 = BatchNormalization()(conv_6)
262
+ #conv_6 = Dropout(0.2)(conv_6)
263
+ #conv_6 = Conv2D(8*filters, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv_6)
264
+ #conv_6 = BatchNormalization()(conv_6)
265
+ #conv_6 = Dropout(0.2)(conv_6)
266
+ conv_6 = Conv2D(8*filters, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv_6)
267
+ conv_6 = BatchNormalization()(conv_6)
268
+ conv_6 = Dropout(0.2)(conv_6)
269
+
270
+
271
+ up_stage_22 = Conv2DTranspose(int(conv_6.shape[1]/2), (3, 3), strides=(2, 2), padding='same')(conv_6)
272
+ conv_66 = Conv2D(8*filters, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(up_stage_22)
273
+ conv_66 = BatchNormalization()(conv_66)
274
+ #conv_6 = Dropout(0.2)(conv_6)
275
+ #conv_66 = Conv2D(8*filters, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv_66)
276
+ #conv_66 = BatchNormalization()(conv_66)
277
+ conv_66 = Conv2D(8*filters, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv_66)
278
+ conv_66 = BatchNormalization()(conv_66)
279
+ #conv_6 = Dropout(0.2)(conv_6)
280
+ #conv_66 = Conv2D(8*filters, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv_66)
281
+ #conv_66 = BatchNormalization()(conv_66)
282
+ #conv_6 = Dropout(0.2)(conv_6)
283
+ conv_66 = Conv2D(8*filters, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv_66)
284
+ conv_66 = BatchNormalization()(conv_66)
285
+ conv_66 = Dropout(0.2)(conv_66)
286
+
287
+
288
+ gating_128 = gating_signal(conv_66, 8*FILTER_NUM, True)
289
+ att_128 = attention_block(inputs[2], gating_128, 8*FILTER_NUM)
290
+ up_stage_3 = upsample(conv_66,inputs[2])
291
+ #u7 = Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv_6)
292
+ u7 = concatenate([up_stage_3, att_128], axis=3)
293
+ #conv_7 = Conv2D(256, (3, 3), activation='relu', padding='same')(u7)
294
+ #conv_7 = BatchNormalization()(conv_7)
295
+ #conv_7 = Dropout(0.2)(conv_7)
296
+ #conv_7 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv_7)
297
+ #conv_7 = Dropout(0.2)(conv_7)
298
+ conv_7 = Conv2D(4*filters, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(u7)
299
+ conv_7 = BatchNormalization()(conv_7)
300
+ #conv_7 = Dropout(0.2)(conv_7)
301
+ conv_7 = Conv2D(4*filters, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv_7)
302
+ conv_7 = BatchNormalization()(conv_7)
303
+ #conv_7 = Conv2D(4*filters, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv_7)
304
+ #conv_7 = BatchNormalization()(conv_7)
305
+ #conv_7 = Dropout(0.2)(conv_7)
306
+ conv_7 = Conv2D(4*filters, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv_7)
307
+ conv_7 = BatchNormalization()(conv_7)
308
+ conv_7 = Dropout(0.2)(conv_7)
309
+
310
+ gating_256 = gating_signal(conv_7, 4*FILTER_NUM, True)
311
+ att_256 = attention_block(inputs[3], gating_256, 4*FILTER_NUM)
312
+ up_stage_4 = upsample(conv_7,inputs[3])
313
+ #u8 = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv_7)
314
+ u8 = concatenate([up_stage_4, att_256], axis=3)
315
+ #conv_8 = Conv2D(128, (3, 3), activation='relu', padding='same')(u8)
316
+ #conv_8 = BatchNormalization()(conv_8)
317
+ #conv_8 = Dropout(0.1)(conv_8)
318
+ conv_8 = Conv2D(2*filters, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(u8)
319
+ conv_8 = BatchNormalization()(conv_8)
320
+ #conv_8 = Dropout(0.2)(conv_8)
321
+ #conv_8 = Conv2D(2*filters, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(u8)
322
+ #conv_8 = BatchNormalization()(conv_8)
323
+ #conv_8 = Dropout(0.2)(conv_8)
324
+ conv_8 = Conv2D(2*filters, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv_8)
325
+ conv_8 = BatchNormalization()(conv_8)
326
+ conv_8 = Dropout(0.2)(conv_8)
327
+
328
+ gating_512 = gating_signal(conv_8, 2*FILTER_NUM, True)
329
+ att_512 = attention_block(inputs[4], gating_512, 2*FILTER_NUM)
330
+ up_stage_5 = upsample(conv_8,inputs[4])
331
+ #u9 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv_8)
332
+ u9 = concatenate([up_stage_5, att_512], axis=3)
333
+
334
+ conv_9 = Conv2D(1*filters, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(u9)
335
+ conv_9 = BatchNormalization()(conv_9)
336
+ #conv_9 = Dropout(0.2)(conv_9)
337
+ conv_9 = Conv2D(1*filters, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv_9)
338
+ conv_9 = BatchNormalization()(conv_9)
339
+ conv_9 = Dropout(0.2)(conv_9)
340
+
341
+ model = Model(inputs=[input_tensor], outputs=[conv_9])
342
+ return model
343
+
344
+
345
+
346
+
347
+ def autoencoder(n_classes=2, height=size, width=size, channels=3):
348
+ inputs = Input((height, width, channels))
349
+ #Contraction path
350
+ conv_1 = Conv2D(32, (3, 3), activation='relu', padding='same')(inputs)
351
+ conv_1 = BatchNormalization()(conv_1)
352
+ conv_1 = Dropout(0.2)(conv_1)
353
+ conv_1 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv_1)
354
+ conv_1 = BatchNormalization()(conv_1)
355
+ pool_1 = MaxPooling2D((2, 2))(conv_1)
356
+
357
+ conv_2 = Conv2D(64, (3, 3), activation='relu', padding='same')(pool_1)
358
+ conv_2 = BatchNormalization()(conv_2)
359
+ conv_2 = Dropout(0.2)(conv_2)
360
+ conv_2 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv_2)
361
+ conv_2 = BatchNormalization()(conv_2)
362
+ pool_2 = MaxPooling2D((2, 2))(conv_2)
363
+
364
+ conv_3 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool_2)
365
+ conv_3 = BatchNormalization()(conv_3)
366
+ conv_3 = Dropout(0.2)(conv_3)
367
+ conv_3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv_3)
368
+ conv_3 = BatchNormalization()(conv_3)
369
+ pool_3 = MaxPooling2D((2, 2))(conv_3)
370
+
371
+ conv_4 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool_3)
372
+ conv_4 = BatchNormalization()(conv_4)
373
+ conv_4 = Dropout(0.2)(conv_4)
374
+ conv_4 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv_4)
375
+ conv_4 = BatchNormalization()(conv_4)
376
+ pool_4 = MaxPooling2D(pool_size=(2, 2))(conv_4)
377
+
378
+
379
+ #conv_5 = Conv2D(512, (3, 3), activation='relu', padding='same')(pool_4)
380
+ #conv_5 = BatchNormalization()(conv_5)
381
+ conv_5 = Dropout(0.1)(pool_4)
382
+
383
+ #Expansive path
384
+
385
+ u6 = UpSampling2D((2, 2))(conv_5)
386
+ #u6 = concatenate([att_5, u6])
387
+ conv_6 = Conv2D(256, (3, 3), activation='relu', padding='same')(u6)
388
+ conv_6 = BatchNormalization()(conv_6)
389
+ conv_6 = Dropout(0.2)(conv_6)
390
+ #conv_6 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv_6)
391
+ #conv_6 = Dropout(0.2)(conv_6)
392
+ #conv_6 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv_6)
393
+ #conv_6 = BatchNormalization()(conv_6)
394
+ #conv_6 = Dropout(0.2)(conv_6)
395
+ conv_6 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv_6)
396
+ conv_6 = BatchNormalization()(conv_6)
397
+
398
+ """
399
+ u66 = UpSampling2D((2, 2))(conv_6)
400
+ conv_66 = Conv2D(128, (3, 3), activation='relu', padding='same')(u66)
401
+ conv_66 = BatchNormalization()(conv_66)
402
+ conv_66 = Conv2D(128, (3, 3), activation='relu', padding='same')(u66)
403
+ conv_66 = Conv2D(128, (3, 3), activation='relu', padding='same')(u66)
404
+ conv_66 = BatchNormalization()(conv_66)
405
+ conv_66 = Dropout(0.2)(conv_66)
406
+ conv_66 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv_66)
407
+ """
408
+
409
+ u7 = UpSampling2D((2, 2))(conv_6)
410
+ conv_7 = Conv2D(128, (3, 3), activation='relu', padding='same')(u7)
411
+ conv_7 = BatchNormalization()(conv_7)
412
+ conv_7 = Dropout(0.2)(conv_7)
413
+ #conv_7 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv_7)
414
+ #conv_7 = Dropout(0.1)(conv_7)
415
+ #conv_7 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv_7)
416
+ #conv_7 = BatchNormalization()(conv_7)
417
+ #conv_7 = Dropout(0.1)(conv_7)
418
+ conv_7 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv_7)
419
+ conv_7 = BatchNormalization()(conv_7)
420
+
421
+ u8 = UpSampling2D((2, 2))(conv_7)
422
+ conv_8 = Conv2D(64, (3, 3), activation='relu', padding='same')(u8)
423
+ conv_8 = BatchNormalization()(conv_8)
424
+ conv_8 = Dropout(0.2)(conv_8)
425
+ #conv_8 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv_8)
426
+ #conv_8 = Dropout(0.2)(conv_8)
427
+ #conv_8 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv_8)
428
+ #conv_8 = BatchNormalization()(conv_8)
429
+ #conv_8 = Dropout(0.2)(conv_8)
430
+ conv_8 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv_8)
431
+ conv_8 = BatchNormalization()(conv_8)
432
+
433
+ u9 = UpSampling2D((2, 2))(conv_8)
434
+ conv_9 = Conv2D(32, (3, 3), activation='relu', padding='same')(u9)
435
+ conv_9 = BatchNormalization()(conv_9)
436
+ conv_9 = Dropout(0.2)(conv_9)
437
+ #conv_9 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv_9)
438
+ #conv_9 = Dropout(0.1)(conv_9)
439
+ #conv_9 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv_9)
440
+ #conv_9 = BatchNormalization()(conv_9)
441
+ #conv_9 = Dropout(0.1)(conv_9)
442
+ conv_9 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv_9)
443
+ conv_9 = BatchNormalization()(conv_9)
444
+
445
+ outputs = Conv2D(n_classes, (1, 1), activation='softmax')(conv_9)
446
+
447
+ model = Model(inputs=[inputs], outputs=[outputs])
448
+ return model
449
+
450
+ """
451
+ gating_16 = gating_signal(stage_5, 8*FILTER_NUM, True)
452
+ att_16 = attention_block(stage_4, stage_5, 8*FILTER_NUM)
453
+ up_stage_1 = upsample(stage_5,stage_4)
454
+ up_16 = layers.concatenate([up_stage_1, att_16], axis=axis)
455
+
456
+
457
+ gating_32 = gating_signal(up_repeat_elem1, 4*FILTER_NUM, True)
458
+ att_32 = attention_block(stage_3, gating_32, 4*FILTER_NUM)
459
+ up_stage_2 = upsample(up_repeat_elem1,stage_3)
460
+ up_32 = layers.concatenate([up_stage_2, att_32], axis=axis)
461
+
462
+
463
+ gating_64 = gating_signal(up_repeat_elem2, 2*FILTER_NUM, True)
464
+ att_64 = attention_block(stage_2, gating_64, 2*FILTER_NUM)
465
+ up_stage_3 = upsample(up_repeat_elem2,stage_2)
466
+ up_64 = layers.concatenate([up_stage_3, att_64], axis=axis)
467
+
468
+
469
+ gating_128 = gating_signal(up_repeat_elem3, FILTER_NUM, True)
470
+ att_128 = attention_block(stage_1, gating_128, FILTER_NUM)
471
+ up_stage_4 = upsample(up_repeat_elem3,stage_1)
472
+ up_128 = layers.concatenate([up_stage_4, att_128], axis=axis)
473
+
474
+
475
+ gating_256 = gating_signal(up_repeat_elem4, FILTER_NUM, True)
476
+ att_256 = attention_block(conv_1, gating_256, FILTER_NUM)
477
+ up_stage_5 = upsample(up_repeat_elem4,conv_1)
478
+ up_256 = layers.concatenate([up_stage_5, att_256], axis=axis)
479
+ """
480
+
481
+ def unet_2( n_classes=2, height=size, width=size, channels=3, metrics = ['accuracy']):
482
+ inputs = Input((height, width, channels))
483
+
484
+
485
+ encode = encoder_unet(inputs)
486
+ decode = decoder(encode.output, inputs)
487
+ #print(type(decode.output))
488
+ #print(decode.output.shape)
489
+
490
+ #encode_2 = encoder(decode.output, inputs)
491
+ #decode_2 = decoder(encode_2.output, inputs)
492
+ #########outputs = decode.output
493
+ #print(encode_2.output.shape)
494
+ #u7 = UpSampling2D((2, 2))(encode_2.output)
495
+ #u7 = Conv2D(32, (3, 3), activation='relu', padding='same')(u7)
496
+ #u7 = UpSampling2D((2, 2))(u7)
497
+ #u7 = Conv2D(64, (3, 3), activation='relu', padding='same')(u7)
498
+ #u7 = UpSampling2D((2, 2))(u7)
499
+ #u7 = Conv2D(128, (3, 3), activation='relu', padding='same')(u7)
500
+ #u7 = UpSampling2D((2, 2))(u7)
501
+ outputs = decode.output
502
+ #outputs = Conv2D(n_classes, (1, 1), activation='softmax', padding='same', kernel_initializer='he_normal')(decode.output)
503
+ #outputs = tf.reshape(encode_2.output[0], [None, 16, 16, 256])
504
+ model = Model(inputs=[inputs], outputs=[outputs])
505
+
506
+
507
+
508
+
509
+ if n_classes <= 2:
510
+ model.compile(optimizer = Adam(lr = 1e-3), loss = 'binary_crossentropy', metrics = metrics)
511
+ elif n_classes > 2:
512
+ model.compile(optimizer = Adam(lr = 1e-3), loss = 'categorical_crossentropy', metrics = metrics)
513
+
514
+
515
+ #model.summary()
516
+
517
+ return model
518
+
519
+ def unet_enssemble(n_classes=2, height=64, width=64, channels=3, metrics = ['accuracy']):
520
+ x = Input((height, width, channels))
521
+ #x = inputs
522
+
523
+ #augmented = data_augmentation(x)
524
+ #augmented_0 = data_augmentation_0(x)
525
+ #augmented_1 = data_augmentation_1(x)
526
+ #augmented_2 = data_augmentation_2(x)
527
+ #augmented_3 = data_augmentation_3(x)
528
+ #augmented_4 = data_augmentation_4(x)
529
+ #augmented_5 = data_augmentation_5(x)
530
+ #augmented = layers.GaussianNoise(0.1)(augmented)
531
+
532
+ #out_x = concatenate([augmented, augmented_0], axis=0)
533
+
534
+ #augmented = x
535
+ #BACKBONE = 'resnet152'
536
+ #BACKBONE = 'efficientnetb7'
537
+ #model5 = sm.Linknet(BACKBONE, encoder_weights='imagenet', classes=n_classes, activation='softmax')
538
+ #model10 = sm.Unet(BACKBONE,
539
+ #pyramid_block_filters=32,
540
+ # encoder_weights='imagenet', classes=n_classes, activation='softmax')
541
+ #BACKBONE = 'vgg16'
542
+ #model7 = sm.FPN(BACKBONE,
543
+ #encoder_freeze = True,
544
+ #pyramid_block_filters=16,
545
+ # encoder_weights='imagenet', classes=n_classes, activation='softmax')
546
+ #BACKBONE = 'inceptionresnetv2'
547
+ #model8 = sm.FPN(BACKBONE, pyramid_block_filters=16, encoder_weights='imagenet', classes=n_classes, activation='softmax')
548
+ #BACKBONE = 'resnext50'
549
+ #BACKBONE = 'seresnet152'
550
+ #decode_filt=(256, 128, 64, 32, 16)
551
+ #BACKBONE = 'mobilenetv2'
552
+ #model10 = sm.FPN(BACKBONE, pyramid_block_filters=256, encoder_weights='imagenet', classes=n_classes, activation='softmax')
553
+ #model10_x1 = sm.Unet(BACKBONE, decoder_filters=decode_filt,
554
+ # decoder_block_type='upsampling',
555
+ #decoder_block_type='transpose',
556
+ # encoder_weights='imagenet', classes=n_classes, activation='softmax')
557
+ #model10_x2 = sm.Linknet(BACKBONE, encoder_weights='imagenet', classes=n_classes, activation='softmax')
558
+ #BACKBONE = 'resnet18'
559
+ #BACKBONE = 'resnext50'
560
+ #BACKBONE = 'mobilenetv2'
561
+ #BACKBONE = 'efficientnetb7'
562
+ #model10 = sm.FPN(BACKBONE,
563
+ #encoder_freeze = True,
564
+ #pyramid_block_filters=16,
565
+ # encoder_weights='imagenet',
566
+ # classes=n_classes, activation='softmax')
567
+ #BACKBONE = 'vgg16'
568
+ #model7 = sm.FPN(BACKBONE,
569
+ # pyramid_block_filters=512,
570
+ # encoder_weights='imagenet', classes=n_classes, activation='softmax')
571
+ #model9 = create_cct_model(n_classes=n_classes, height = height, width = width, channels = n_channels)
572
+ #reshaped = tf.reshape(encoded_patches , [-1,256,256,64])
573
+
574
+ #model7 = unet_2( n_classes=n_classes, height = height, width = width, channels = 3)
575
+ model10 = unet_2( n_classes=n_classes, height = height, width = width, channels = 3)
576
+ #model10_xx = unet_2( n_classes=n_classes, height = height, width = width, channels = 3)
577
+ #model8 = unet_2( n_classes=n_classes,
578
+ # height = height, width = width, channels = n_channels)(augmented)
579
+ ###model8_x = unet_2( n_classes=n_classes,
580
+ ### height = height, width = width, channels = n_channels)(x)
581
+
582
+
583
+
584
+ #model1 = get_model(inputs=x, n_classes=n_classes, height = height, width = width, channels = n_channels)
585
+ #model2 = DeeplabV3Plus(model_input=x, image_size=256, num_classes=n_classes)
586
+ #model4 = unet_2(inputs=x, n_classes=n_classes, height = height, width = width, channels = n_channels)
587
+ #model3 = swin_unet_2d_base(x, filter_num_begin, depth, stack_num_down, stack_num_up,
588
+ # patch_size, num_heads, window_size, num_mlp,
589
+ # shift_window=shift_window, name='swin_unet')
590
+ #print(model1.output.shape, model2.output.shape)
591
+ #model5.trainable = False
592
+ #model6.trainable = False
593
+
594
+ #out = model11(augmented)
595
+ #out = Conv2D(3, (3, 3), activation=activation_funtion, padding='same')(out)
596
+ #out = K.flatten(out)
597
+ #out = K.reshape(out,(-1,256,256,1))
598
+ #out = model11(x)
599
+ #out = unet_2(inputs=augmented, n_classes=n_classes, height = height, width = width, channels = n_channels)
600
+
601
+ #quantize_model_7 = tfmot.quantization.keras.quantize_model
602
+ # q_aware stands for for quantization aware.
603
+ #q_aware_model_7 = quantize_model(model7)
604
+ #quantize_model_11 = tfmot.quantization.keras.quantize_model
605
+ # q_aware stands for for quantization aware.
606
+ #q_aware_model_11 = quantize_model(model11)
607
+
608
+
609
+ out = model10(x)
610
+ #out = layers.GaussianNoise(0.1+np.random.random()*0.4)(out)
611
+ #out = layers.GaussianNoise(0.1)(out)
612
+ #out = concatenate([q_aware_model_7(augmented), q_aware_model_11(augmented)], axis=3)
613
+ #out = concatenate([model6(augmented), model8(augmented), model6(x), model8(x)], axis=3)
614
+ #out = concatenate([model10_x1(augmented), model10_x1(x), model10_x1(augmented_0)], axis=3)
615
+ #out_7 = concatenate([model11(augmented), model7(augmented)], axis=3)
616
+
617
+ #out = concatenate([x, out], axis=3)
618
+ #out = tf.keras.layers.AveragePooling2D(pool_size=(2, 2), strides=(1, 1), padding='same')(out)
619
+ #out = Conv2D(3, (3, 3), activation=activation_funtion, padding='same')(out)
620
+ #out = model7(out)
621
+
622
+
623
+
624
+
625
+ #out = model10_x(attention_weights)
626
+ #model11 = Conv2D(32, (3, 3), activation=activation_funtion, padding='same')(model11)
627
+ #model7 = Conv2D(32, (3, 3), activation=activation_funtion, padding='same')(model7)
628
+ #out = concatenate([model10_x(x), model10_x(augmented), model10_x(augmented_0)], axis=3)
629
+ #out = concatenate([ model7(x), model11(x),
630
+ # model7(augmented_0), model11(augmented_0),
631
+ # model7(augmented_1), model11(augmented_1),
632
+ # model7(augmented_2), model11(augmented_2),
633
+ # model7(augmented_3), model11(augmented_3),
634
+ # model7(augmented_4), model11(augmented_4),
635
+ # model7(augmented_5), model11(augmented_5)],axis=3)
636
+
637
+ #out = tf.keras.layers.PReLU()(out)
638
+ #out = Conv2D(64, (3, 3), activation=activation_funtion, padding='same')(out)
639
+ #out = BatchNormalization()(out)
640
+ #out = Dropout(0.2)(out)
641
+
642
+ #####out = hybrid_pool_layer((2,2))(out)
643
+
644
+ #a = tf.keras.layers.AveragePooling2D(padding='same')(out)
645
+ #a = Lambda(lambda xx : xx*alpha)(a)
646
+ #m = tf.keras.layers.MaxPooling2D(padding='same')(out)
647
+ #m = Lambda(lambda xx : xx*(1-alpha))(m)
648
+ #out = tf.keras.layers.Add()([a,m])
649
+ #out = tf.keras.layers.AveragePooling2D(pool_size=(2, 2), strides=(1, 1), padding='same')(out)
650
+
651
+ #out = layers.add([model1.output, model2.output])
652
+ #out = layers.multiply([model1.output, model2.output])
653
+ ##out = layers.add([model, encode.output])
654
+ ##out = layers.multiply([model, encode.output])
655
+
656
+ #out = Conv2D(128, (3, 3), activation=activation_funtion, padding='same')(out)
657
+ #out = BatchNormalization()(out)
658
+ #out = Conv2D(64, (3, 3), activation=activation_funtion, padding='same')(out)
659
+ #out = SpikingActivation("relu")(out)
660
+ #out = BatchNormalization()(out)
661
+ #out = Dropout(0.2)(out)
662
+ #out = Conv2D(32, (3, 3), activation=activation_funtion, padding='same')(out)
663
+ #out = BatchNormalization()(out)
664
+ #out = Conv2D(64, (3, 3), activation=activation_funtion, padding='same')(out)
665
+ #out = BatchNormalization()(out)
666
+ #out = Dropout(0.2)(out)
667
+ #out = tf.keras.layers.PReLU()(out)
668
+ #out = Conv2D(64, (3, 3), activation=activation_funtion, padding='same')(out)
669
+ #out = BatchNormalization()(out)
670
+ #out = Dropout(0.2)(out)
671
+ #out = tf.keras.layers.PReLU()(out)
672
+
673
+ #out = concatenate([conv_out_jump, out], axis=3)
674
+ #out = Conv2D(256, (3, 3), activation=activation_funtion, padding='same')(out)
675
+ #out = BatchNormalization()(out)
676
+ #out = Dropout(0.2)(out)
677
+
678
+ #out = tf.keras.layers.AveragePooling2D(pool_size=(2, 2), strides=(1, 1), padding='same')(out)
679
+ #out = UpSampling2D((2, 2))(out)
680
+
681
+ #out_list = []
682
+ #for i in range(1,23):
683
+ # outputs1 = Conv2D(n_classes-i, (1, 1), activation='softmax')(out)
684
+ # out_list.append(outputs1)
685
+ #outputs2 = Conv2D(n_classes-1, (1, 1), activation='softmax')(out)
686
+ #outputs3 = Conv2D(n_classes-2, (1, 1), activation='softmax')(out)
687
+ #outputs4 = Conv2D(n_classes-3, (1, 1), activation='softmax')(out)
688
+ #outputs5 = Conv2D(n_classes-4, (1, 1), activation='softmax')(out)
689
+ #outputs6 = Conv2D(n_classes-5, (1, 1), activation='softmax')(out)
690
+ #outputs7 = Conv2D(n_classes-6, (1, 1), activation='softmax')(out)
691
+ #outputs8 = Conv2D(n_classes-7, (1, 1), activation='softmax')(out)
692
+ #outputs9 = Conv2D(n_classes-8, (1, 1), activation='softmax')(out)
693
+
694
+ #out_list = [outputs1, outputs2, outputs3, outputs4, outputs5, outputs6, outputs7, outputs8, outputs9]
695
+ #outputs = Conv2D(n_classes, (1, 1), activation='softmax')(encode.output)
696
+ #outputs = concatenate(out_list, axis=3)
697
+ #outputs = tf.keras.layers.AveragePooling2D(pool_size=(2, 2), strides=(1, 1), padding='same')(outputs)
698
+ outputs = Conv2D(n_classes, (1, 1), activation='softmax', padding='same')(out)
699
+
700
+
701
+ #model = Model(inputs=[inputs], outputs=[encode.output])
702
+ model = Model(inputs=[x], outputs=[outputs])
703
+ #model = Model(inputs=[model7.input, model11.input], outputs=[outputs])
704
+
705
+
706
+
707
+
708
+ if n_classes <= 2:
709
+ model.compile(optimizer = Adam(lr = 1e-3), loss = 'binary_crossentropy', metrics = metrics)
710
+ elif n_classes > 2:
711
+ model.compile(optimizer = Adam(lr = 1e-3), loss = 'categorical_crossentropy', metrics = metrics)
712
+
713
+ #if summary:
714
+ # model.summary()
715
+
716
+ return model
717
+
718
+ model = unet_enssemble(n_classes=n_classes, height = height, width = width, channels = n_channels)
719
+
720
+
721
  size = 1024
722
  pach_size = 256
723
 
 
825
  # Load the model
826
  #model = tf.keras.models.load_model("model.h5", custom_objects={"jacard":jacard, "wcce":weighted_categorical_crossentropy})
827
  #model = tf.keras.models.load_model("model_2.h5", custom_objects={"jacard":jacard, "bce_dice":bce_dice})
828
+ model = model.load_weights("model_2_A (1).h5")
829
 
830
  # Create a user interface for the model
831
  my_app = gr.Blocks()