NORLIE JHON MALAGDAO commited on
Commit
d9836f1
·
verified ·
1 Parent(s): 94a2a74

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +441 -136
app.py CHANGED
@@ -1,17 +1,78 @@
 
1
  import gradio as gr
2
- import matplotlib.pyplot as plt
3
- import numpy as np
4
  import os
5
- import PIL
 
 
 
 
 
6
  import tensorflow as tf
 
 
 
7
 
 
 
 
 
 
 
 
8
  from tensorflow import keras
9
- from tensorflow.keras import layers
10
- from tensorflow.keras.models import Sequential
11
- from PIL import Image
12
- import gdown
13
- import zipfile
14
- import pathlib
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
  # Define the Google Drive shareable link
17
  gdrive_url = 'https://drive.google.com/file/d/1HjHYlQyRz5oWt8kehkt1TiOGRRlKFsv8/view?usp=drive_link'
@@ -41,7 +102,7 @@ except zipfile.BadZipFile:
41
  os.remove(local_zip_file)
42
 
43
  # Convert the extracted directory path to a pathlib.Path object
44
- data_dir = pathlib.Path(extracted_path)
45
 
46
  # Print the directory structure to debug
47
  for root, dirs, files in os.walk(extracted_path):
@@ -52,158 +113,402 @@ for root, dirs, files in os.walk(extracted_path):
52
  for f in files:
53
  print(f"{subindent}{f}")
54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  # Path to the dataset directory
56
- data_dir = pathlib.Path('extracted_files/Pest_Dataset')
57
- data_dir = pathlib.Path(data_dir)
58
-
59
- bees = list(data_dir.glob('bees/*'))
60
- print(bees[0])
61
- PIL.Image.open(str(bees[0]))
62
-
63
- batch_size = 32
64
- img_height = 180
65
- img_width = 180
66
-
67
- train_ds = tf.keras.utils.image_dataset_from_directory(
68
- data_dir,
69
- validation_split=0.2,
70
- subset="training",
71
- seed=123,
72
- image_size=(img_height, img_width),
73
- batch_size=batch_size)
74
-
75
- val_ds = tf.keras.utils.image_dataset_from_directory(
76
- data_dir,
77
- validation_split=0.2,
78
- subset="validation",
79
- seed=123,
80
- image_size=(img_height, img_width),
81
- batch_size=batch_size)
82
-
83
- class_names = train_ds.class_names
84
- print(class_names)
85
-
86
- AUTOTUNE = tf.data.AUTOTUNE
87
-
88
- train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
89
- val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
90
-
91
- normalization_layer = layers.Rescaling(1./255)
92
-
93
- normalized_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))
94
- image_batch, labels_batch = next(iter(normalized_ds))
95
- first_image = image_batch[0]
96
- # Notice the pixel values are now in `[0,1]`.
97
- print(np.min(first_image), np.max(first_image))
98
-
99
- num_classes = len(class_names)
100
-
101
- model = Sequential([
102
- layers.Rescaling(1./255, input_shape=(img_height, img_width, 3)),
103
- layers.Conv2D(16, 3, padding='same', activation='relu'),
104
- layers.MaxPooling2D(),
105
- layers.Conv2D(32, 3, padding='same', activation='relu'),
106
- layers.MaxPooling2D(),
107
- layers.Conv2D(64, 3, padding='same', activation='relu'),
108
- layers.MaxPooling2D(),
109
- layers.Flatten(),
110
- layers.Dense(128, activation='relu'),
111
- layers.Dense(num_classes)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112
  ])
113
 
114
- model.compile(optimizer='adam',
115
- loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
116
- metrics=['accuracy'])
117
 
118
- model.summary()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119
 
120
- epochs=10
121
  history = model.fit(
122
- train_ds,
123
- validation_data=val_ds,
124
- epochs=epochs
 
 
 
 
 
 
 
 
125
  )
126
 
127
- acc = history.history['accuracy']
128
- val_acc = history.history['val_accuracy']
 
 
 
 
 
 
129
 
130
  loss = history.history['loss']
131
  val_loss = history.history['val_loss']
132
 
133
- epochs_range = range(epochs)
 
 
134
 
135
- plt.figure(figsize=(8, 8))
136
- plt.subplot(1, 2, 1)
137
- plt.plot(epochs_range, acc, label='Training Accuracy')
138
- plt.plot(epochs_range, val_acc, label='Validation Accuracy')
139
- plt.legend(loc='lower right')
140
- plt.title('Training and Validation Accuracy')
141
 
142
- plt.subplot(1, 2, 2)
143
- plt.plot(epochs_range, loss, label='Training Loss')
144
- plt.plot(epochs_range, val_loss, label='Validation Loss')
145
- plt.legend(loc='upper right')
146
- plt.title('Training and Validation Loss')
147
  plt.show()
148
 
149
- data_augmentation = keras.Sequential(
150
- [
151
- layers.RandomFlip("horizontal",
152
- input_shape=(img_height,
153
- img_width,
154
- 3)),
155
- layers.RandomRotation(0.1),
156
- layers.RandomZoom(0.1),
157
- ]
158
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159
 
160
- plt.figure(figsize=(10, 10))
161
- for images, _ in train_ds.take(1):
162
- for i in range(9):
163
- augmented_images = data_augmentation(images)
164
- ax = plt.subplot(3, 3, i + 1)
165
- plt.imshow(augmented_images[0].numpy().astype("uint8"))
166
- plt.axis("off")
167
-
168
- model = Sequential([
169
- data_augmentation,
170
- layers.Rescaling(1./255),
171
- layers.Conv2D(16, 3, padding='same', activation='relu'),
172
- layers.MaxPooling2D(),
173
- layers.Conv2D(32, 3, padding='same', activation='relu'),
174
- layers.MaxPooling2D(),
175
- layers.Conv2D(64, 3, padding='same', activation='relu'),
176
- layers.MaxPooling2D(),
177
- layers.Dropout(0.2),
178
- layers.Flatten(),
179
- layers.Dense(128, activation='relu'),
180
- layers.Dense(num_classes, name="outputs")
181
- ])
182
 
183
- model.compile(optimizer='adam',
184
- loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
185
- metrics=['accuracy'])
186
 
187
- model.summary()
188
 
189
- epochs = 15
190
- history = model.fit(
191
- train_ds,
192
- validation_data=val_ds,
193
- epochs=epochs
194
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
195
 
 
196
  def predict_image(img):
197
  img = np.array(img)
198
- img_resized = tf.image.resize(img, (180, 180))
199
  img_4d = tf.expand_dims(img_resized, axis=0)
200
  prediction = model.predict(img_4d)[0]
201
- probabilities = tf.nn.softmax(prediction).numpy()
202
- class_probabilities = {class_names[i]: probabilities[i] * 100 for i in range(len(class_names))}
203
- return class_probabilities
204
 
 
205
  image = gr.Image()
206
  label = gr.Label(num_top_classes=1)
207
 
208
- # Define custom CSS for background image
209
- custom_css = """
 
 
 
 
 
 
1
+ # Import Data Science Libraries
2
  import gradio as gr
 
 
3
  import os
4
+ import gdown
5
+ import zipfile
6
+ import pandas as pd
7
+ from pathlib import Path
8
+ from PIL import Image, UnidentifiedImageError
9
+ import numpy as np
10
  import tensorflow as tf
11
+ from sklearn.model_selection import train_test_split
12
+ import itertools
13
+ import random
14
 
15
+ # Import visualization libraries
16
+ import matplotlib.pyplot as plt
17
+ import matplotlib.cm as cm
18
+ import cv2
19
+ import seaborn as sns
20
+
21
+ # Tensorflow Libraries
22
  from tensorflow import keras
23
+ from tensorflow.keras import layers, models
24
+ from tensorflow.keras.preprocessing.image import ImageDataGenerator
25
+ from tensorflow.keras.layers import Dense, Dropout
26
+ from tensorflow.keras.callbacks import Callback, EarlyStopping, ModelCheckpoint
27
+ from tensorflow.keras.optimizers import Adam
28
+ from tensorflow.keras.applications import MobileNetV2
29
+ from tensorflow.keras import Model
30
+ from tensorflow.keras.layers.experimental import preprocessing
31
+ from keras.layers import Dense, Flatten, Dropout, BatchNormalization
32
+
33
+ # System libraries
34
+ from pathlib import Path
35
+ import os.path
36
+
37
+ # Metrics
38
+ from sklearn.metrics import classification_report, confusion_matrix
39
+
40
+ sns.set(style='darkgrid')
41
+
42
+
43
+ # Seed Everything to reproduce results for future use cases
44
+ def seed_everything(seed=42):
45
+ # Seed value for TensorFlow
46
+ tf.random.set_seed(seed)
47
+
48
+ # Seed value for NumPy
49
+ np.random.seed(seed)
50
+
51
+ # Seed value for Python's random library
52
+ random.seed(seed)
53
+
54
+ # Force TensorFlow to use single thread
55
+ # Multiple threads are a potential source of non-reproducible results.
56
+ session_conf = tf.compat.v1.ConfigProto(
57
+ intra_op_parallelism_threads=1,
58
+ inter_op_parallelism_threads=1
59
+ )
60
+
61
+ # Make sure that TensorFlow uses a deterministic operation wherever possible
62
+ tf.compat.v1.set_random_seed(seed)
63
+
64
+ sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph(), config=session_conf)
65
+ tf.compat.v1.keras.backend.set_session(sess)
66
+
67
+ seed_everything()
68
+
69
+ !wget https://raw.githubusercontent.com/mrdbourke/tensorflow-deep-learning/main/extras/helper_functions.py
70
+
71
+ # Import series of helper functions for our notebook
72
+ from helper_functions import create_tensorboard_callback, plot_loss_curves, unzip_data, compare_historys, walk_through_dir, pred_and_plot
73
+
74
+ BATCH_SIZE = 32
75
+ TARGET_SIZE = (224, 224)
76
 
77
  # Define the Google Drive shareable link
78
  gdrive_url = 'https://drive.google.com/file/d/1HjHYlQyRz5oWt8kehkt1TiOGRRlKFsv8/view?usp=drive_link'
 
102
  os.remove(local_zip_file)
103
 
104
  # Convert the extracted directory path to a pathlib.Path object
105
+ data_dir = Path(extracted_path)
106
 
107
  # Print the directory structure to debug
108
  for root, dirs, files in os.walk(extracted_path):
 
113
  for f in files:
114
  print(f"{subindent}{f}")
115
 
116
+ # Function to convert the directory path to a DataFrame
117
+ def convert_path_to_df(dataset):
118
+ image_dir = Path(dataset)
119
+
120
+ # Get filepaths and labels
121
+ filepaths = list(image_dir.glob(r'**/*.JPG')) + list(image_dir.glob(r'**/*.jpg')) + list(image_dir.glob(r'**/*.png')) + list(image_dir.glob(r'**/*.PNG'))
122
+
123
+ labels = list(map(lambda x: os.path.split(os.path.split(x)[0])[1], filepaths))
124
+
125
+ filepaths = pd.Series(filepaths, name='Filepath').astype(str)
126
+ labels = pd.Series(labels, name='Label')
127
+
128
+ # Concatenate filepaths and labels
129
+ image_df = pd.concat([filepaths, labels], axis=1)
130
+ return image_df
131
+
132
  # Path to the dataset directory
133
+ data_dir = Path('extracted_files/Pest_Dataset')
134
+ image_df = convert_path_to_df(data_dir)
135
+
136
+ # Check for corrupted images within the dataset
137
+ for img_p in data_dir.rglob("*.jpg"):
138
+ try:
139
+ img = Image.open(img_p)
140
+ except UnidentifiedImageError:
141
+ print(f"Corrupted image file: {img_p}")
142
+
143
+ # You can save the DataFrame to a CSV for further use
144
+ image_df.to_csv('image_dataset.csv', index=False)
145
+ print("DataFrame created and saved successfully!")
146
+
147
+ label_counts = image_df['Label'].value_counts()
148
+
149
+ plt.figure(figsize=(10, 6))
150
+ sns.barplot(x=label_counts.index, y=label_counts.values, alpha=0.8, palette='rocket')
151
+ plt.title('Distribution of Labels in Image Dataset', fontsize=16)
152
+ plt.xlabel('Label', fontsize=14)
153
+ plt.ylabel('Count', fontsize=14)
154
+ plt.xticks(rotation=45)
155
+ plt.show()
156
+
157
+ # Display 16 picture of the dataset with their labels
158
+ random_index = np.random.randint(0, len(image_df), 16)
159
+ fig, axes = plt.subplots(nrows=4, ncols=4, figsize=(10, 10),
160
+ subplot_kw={'xticks': [], 'yticks': []})
161
+
162
+ for i, ax in enumerate(axes.flat):
163
+ ax.imshow(plt.imread(image_df.Filepath[random_index[i]]))
164
+ ax.set_title(image_df.Label[random_index[i]])
165
+ plt.tight_layout()
166
+ plt.show()
167
+
168
+ # Function to return a random image path from a given directory
169
+ def random_sample(directory):
170
+ images = [os.path.join(directory, img) for img in os.listdir(directory) if img.endswith(('.jpg', '.jpeg', '.png'))]
171
+ return random.choice(images)
172
+
173
+ # Function to compute the Error Level Analysis (ELA) of an image
174
+ def compute_ela_cv(path, quality):
175
+ temp_filename = 'temp.jpg'
176
+ orig = cv2.imread(path)
177
+ cv2.imwrite(temp_filename, orig, [int(cv2.IMWRITE_JPEG_QUALITY), quality])
178
+ compressed = cv2.imread(temp_filename)
179
+ ela_image = cv2.absdiff(orig, compressed)
180
+ ela_image = np.clip(ela_image * 10, 0, 255).astype(np.uint8)
181
+ return ela_image
182
+
183
+ # View random sample from the dataset
184
+ p = random_sample('extracted_files/Pest_Dataset/beetle')
185
+ orig = cv2.imread(p)
186
+ orig = cv2.cvtColor(orig, cv2.COLOR_BGR2RGB) / 255.0
187
+ init_val = 100
188
+ columns = 3
189
+ rows = 3
190
+
191
+ fig=plt.figure(figsize=(15, 10))
192
+ for i in range(1, columns*rows +1):
193
+ quality=init_val - (i-1) * 8
194
+ img = compute_ela_cv(path=p, quality=quality)
195
+ if i == 1:
196
+ img = orig.copy()
197
+ ax = fig.add_subplot(rows, columns, i)
198
+ ax.title.set_text(f'q: {quality}')
199
+ plt.imshow(img)
200
+ plt.show()
201
+
202
+ # Separate in train and test data
203
+ train_df, test_df = train_test_split(image_df, test_size=0.2, shuffle=True, random_state=42)
204
+
205
+ train_generator = ImageDataGenerator(
206
+ preprocessing_function=tf.keras.applications.efficientnet_v2.preprocess_input,
207
+ validation_split=0.2
208
+ )
209
+
210
+ test_generator = ImageDataGenerator(
211
+ preprocessing_function=tf.keras.applications.efficientnet_v2.preprocess_input
212
+ )
213
+
214
+
215
+ # Split the data into three categories.
216
+ train_images = train_generator.flow_from_dataframe(
217
+ dataframe=train_df,
218
+ x_col='Filepath',
219
+ y_col='Label',
220
+ target_size=(224, 224),
221
+ color_mode='rgb',
222
+ class_mode='categorical',
223
+ batch_size=32,
224
+ shuffle=True,
225
+ seed=42,
226
+ subset='training'
227
+ )
228
+
229
+ val_images = train_generator.flow_from_dataframe(
230
+ dataframe=train_df,
231
+ x_col='Filepath',
232
+ y_col='Label',
233
+ target_size=(224, 224),
234
+ color_mode='rgb',
235
+ class_mode='categorical',
236
+ batch_size=32,
237
+ shuffle=True,
238
+ seed=42,
239
+ subset='validation'
240
+ )
241
+
242
+ test_images = test_generator.flow_from_dataframe(
243
+ dataframe=test_df,
244
+ x_col='Filepath',
245
+ y_col='Label',
246
+ target_size=(224, 224),
247
+ color_mode='rgb',
248
+ class_mode='categorical',
249
+ batch_size=32,
250
+ shuffle=False
251
+ )
252
+
253
+
254
+ # Data Augmentation Step
255
+ augment = tf.keras.Sequential([
256
+ layers.experimental.preprocessing.Resizing(224,224),
257
+ layers.experimental.preprocessing.Rescaling(1./255),
258
+ layers.experimental.preprocessing.RandomFlip("horizontal"),
259
+ layers.experimental.preprocessing.RandomRotation(0.1),
260
+ layers.experimental.preprocessing.RandomZoom(0.1),
261
+ layers.experimental.preprocessing.RandomContrast(0.1),
262
  ])
263
 
 
 
 
264
 
265
+ # Load the pretained model
266
+ pretrained_model = tf.keras.applications.efficientnet_v2.EfficientNetV2L(
267
+ input_shape=(224, 224, 3),
268
+ include_top=False,
269
+ weights='imagenet',
270
+ pooling='max'
271
+ )
272
+
273
+ pretrained_model.trainable = False
274
+
275
+
276
+ # Create checkpoint callback
277
+ checkpoint_path = "pests_cats_classification_model_checkpoint"
278
+ checkpoint_callback = ModelCheckpoint(checkpoint_path,
279
+ save_weights_only=True,
280
+ monitor="val_accuracy",
281
+ save_best_only=True)
282
+
283
+
284
+ # Setup EarlyStopping callback to stop training if model's val_loss doesn't improve for 3 epochs
285
+ early_stopping = EarlyStopping(monitor = "val_loss", # watch the val loss metric
286
+ patience = 5,
287
+ restore_best_weights = True) # if val loss decreases for 3 epochs in a row, stop training
288
+
289
+
290
+ inputs = pretrained_model.input
291
+ x = augment(inputs)
292
+
293
+ # x = Dense(128, activation='relu')(pretrained_model.output)
294
+ # x = Dropout(0.45)(x)
295
+ # x = Dense(256, activation='relu')(x)
296
+ # x = Dropout(0.45)(x)
297
+
298
+ # Add new classification layers
299
+ x = Flatten()(pretrained_model.output)
300
+ x = Dense(256, activation='relu')(x)
301
+ x = Dropout(0.5)(x)
302
+ x = BatchNormalization()(x)
303
+ x = Dense(128, activation='relu')(x)
304
+ x = Dropout(0.5)(x)
305
+
306
+
307
+ outputs = Dense(12, activation='softmax')(x)
308
+
309
+ model = Model(inputs=inputs, outputs=outputs)
310
+
311
+ model.compile(
312
+ optimizer=Adam(0.00001),
313
+ loss='categorical_crossentropy',
314
+ metrics=['accuracy']
315
+ )
316
 
 
317
  history = model.fit(
318
+ train_images,
319
+ steps_per_epoch=len(train_images),
320
+ validation_data=val_images,
321
+ validation_steps=len(val_images),
322
+ epochs=50,
323
+ callbacks=[
324
+ early_stopping,
325
+ create_tensorboard_callback("training_logs",
326
+ "pests_cats_classification"),
327
+ checkpoint_callback,
328
+ ]
329
  )
330
 
331
+
332
+ results = model.evaluate(test_images, verbose=0)
333
+
334
+ print(" Test Loss: {:.5f}".format(results[0]))
335
+ print("Test Accuracy: {:.2f}%".format(results[1] * 100))
336
+
337
+ accuracy = history.history['accuracy']
338
+ val_accuracy = history.history['val_accuracy']
339
 
340
  loss = history.history['loss']
341
  val_loss = history.history['val_loss']
342
 
343
+ epochs = range(len(accuracy))
344
+ plt.plot(epochs, accuracy, 'b', label='Training accuracy')
345
+ plt.plot(epochs, val_accuracy, 'r', label='Validation accuracy')
346
 
347
+ plt.title('Training and validation accuracy')
348
+ plt.legend()
349
+ plt.figure()
350
+ plt.plot(epochs, loss, 'b', label='Training loss')
351
+ plt.plot(epochs, val_loss, 'r', label='Validation loss')
 
352
 
353
+ plt.title('Training and validation loss')
354
+ plt.legend()
 
 
 
355
  plt.show()
356
 
357
+ # Predict the label of the test_images
358
+ pred = model.predict(test_images)
359
+ pred = np.argmax(pred,axis=1)
360
+
361
+ # Map the label
362
+ labels = (train_images.class_indices)
363
+ labels = dict((v,k) for k,v in labels.items())
364
+ pred = [labels[k] for k in pred]
365
+
366
+ # Display the result
367
+ print(f'The first 5 predictions: {pred[:5]}')
368
+
369
+ # Display 25 random pictures from the dataset with their labels
370
+ random_index = np.random.randint(0, len(test_df) - 1, 15)
371
+ fig, axes = plt.subplots(nrows=3, ncols=5, figsize=(25, 15),
372
+ subplot_kw={'xticks': [], 'yticks': []})
373
+
374
+ for i, ax in enumerate(axes.flat):
375
+ ax.imshow(plt.imread(test_df.Filepath.iloc[random_index[i]]))
376
+ if test_df.Label.iloc[random_index[i]] == pred[random_index[i]]:
377
+ color = "green"
378
+ else:
379
+ color = "red"
380
+ ax.set_title(f"True: {test_df.Label.iloc[random_index[i]]}\nPredicted: {pred[random_index[i]]}", color=color)
381
+ plt.show()
382
+ plt.tight_layout()
383
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
384
 
385
+ y_test = list(test_df.Label)
386
+ print(classification_report(y_test, pred))
 
387
 
 
388
 
389
+ report = classification_report(y_test, pred, output_dict=True)
390
+ df = pd.DataFrame(report).transpose()
391
+ df
392
+
393
+ from sklearn.metrics import confusion_matrix
394
+
395
+ # Assuming y_test contains the true labels and pred contains the predicted labels
396
+ cm = confusion_matrix(y_test, pred)
397
+ print(cm)
398
+
399
+
400
+ import numpy as np
401
+ import matplotlib.pyplot as plt
402
+ from tensorflow.keras.applications.efficientnet_v2 import preprocess_input
403
+ from tensorflow.keras.preprocessing import image
404
+ import tensorflow as tf
405
+ import cv2
406
+
407
+ def get_img_array(img_path, size):
408
+ # Load image and convert to array
409
+ img = image.load_img(img_path, target_size=size)
410
+ array = image.img_to_array(img)
411
+ array = np.expand_dims(array, axis=0)
412
+ return array
413
+
414
+ def make_gradcam_heatmap(img_array, model, last_conv_layer_name, pred_index=None):
415
+ # Create a model that maps the input image to the activations of the last conv layer
416
+ grad_model = tf.keras.models.Model(
417
+ [model.inputs], [model.get_layer(last_conv_layer_name).output, model.output]
418
+ )
419
+ # Compute the gradient of the top predicted class for the input image
420
+ with tf.GradientTape() as tape:
421
+ last_conv_layer_output, preds = grad_model(img_array)
422
+ if pred_index is None:
423
+ pred_index = tf.argmax(preds[0])
424
+ class_channel = preds[:, pred_index]
425
+
426
+ # Gradient of the predicted class with respect to the output feature map of the last conv layer
427
+ grads = tape.gradient(class_channel, last_conv_layer_output)
428
+
429
+ # Vector where each entry is the mean intensity of the gradient over a specific feature map channel
430
+ pooled_grads = tf.reduce_mean(grads, axis=(0, 1, 2))
431
+
432
+ # Multiply each channel in the feature map array by the "importance" of the channel
433
+ last_conv_layer_output = last_conv_layer_output[0]
434
+ heatmap = last_conv_layer_output @ pooled_grads[..., tf.newaxis]
435
+ heatmap = tf.squeeze(heatmap)
436
+
437
+ # For visualization purpose, normalize the heatmap between 0 & 1
438
+ heatmap = tf.maximum(heatmap, 0) / tf.math.reduce_max(heatmap)
439
+ return heatmap.numpy()
440
+
441
+ def save_and_display_gradcam(img_path, heatmap, alpha=0.4):
442
+ # Load the original image
443
+ img = cv2.imread(img_path)
444
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
445
+
446
+ # Rescale heatmap to a range 0-255
447
+ heatmap = np.uint8(255 * heatmap)
448
+
449
+ # Use jet colormap to colorize the heatmap
450
+ jet = cm.get_cmap("jet")
451
+
452
+ # Use RGB values of the colormap
453
+ jet_colors = jet(np.arange(256))[:, :3]
454
+ jet_heatmap = jet_colors[heatmap]
455
+
456
+ # Create an image with RGB colorized heatmap
457
+ jet_heatmap = tf.keras.preprocessing.image.array_to_img(jet_heatmap)
458
+ jet_heatmap = jet_heatmap.resize((img.shape[1], img.shape[0]))
459
+ jet_heatmap = tf.keras.preprocessing.image.img_to_array(jet_heatmap)
460
+
461
+ # Superimpose the heatmap on the original image
462
+ superimposed_img = jet_heatmap * alpha + img
463
+ superimposed_img = tf.keras.preprocessing.image.array_to_img(superimposed_img)
464
+
465
+ # Save the superimposed image
466
+ cam_path = "cam.jpg"
467
+ superimposed_img.save(cam_path)
468
+ return cam_path
469
+ import matplotlib.cm as cm
470
+ import pandas as pd
471
+
472
+ # Assuming you have test_df, model, and other variables defined
473
+ random_index = np.random.randint(0, len(test_df), 15)
474
+ img_size = (224, 224)
475
+ last_conv_layer_name = 'top_conv'
476
+
477
+ fig, axes = plt.subplots(nrows=3, ncols=5, figsize=(15, 10),
478
+ subplot_kw={'xticks': [], 'yticks': []})
479
+
480
+ for i, ax in enumerate(axes.flat):
481
+ img_path = test_df.Filepath.iloc[random_index[i]]
482
+ img_array = preprocess_input(get_img_array(img_path, size=img_size))
483
+ heatmap = make_gradcam_heatmap(img_array, model, last_conv_layer_name)
484
+ cam_path = save_and_display_gradcam(img_path, heatmap)
485
+ ax.imshow(plt.imread(cam_path))
486
+ ax.set_title(f"True: {test_df.Label.iloc[random_index[i]]}\nPredicted: {pred[random_index[i]]}")
487
+ plt.tight_layout()
488
+ plt.show()
489
+
490
+
491
+
492
+
493
+ class_names = train_images.class_indices
494
+ class_names = {v: k for k, v in class_names.items()}
495
 
496
+ # Gradio Interface for Prediction
497
  def predict_image(img):
498
  img = np.array(img)
499
+ img_resized = tf.image.resize(img, (TARGET_SIZE[0], TARGET_SIZE[1]))
500
  img_4d = tf.expand_dims(img_resized, axis=0)
501
  prediction = model.predict(img_4d)[0]
502
+ return {class_names[i]: float(prediction[i]) for i in range(len(class_names))}
 
 
503
 
504
+ # Launch Gradio interface
505
  image = gr.Image()
506
  label = gr.Label(num_top_classes=1)
507
 
508
+ gr.Interface(
509
+ fn=predict_image,
510
+ inputs=image,
511
+ outputs=label,
512
+ title="Welcome to Agricultural Pest Image Classification",
513
+ description="The image data set used was obtained from Kaggle and has a collection of 12 different types of agricultural pests: Ants, Bees, Beetles, Caterpillars, Earthworms, Earwigs, Grasshoppers, Moths, Slugs, Snails, Wasps, and Weevils",
514
+ ).launch(debug=True)