ANCKEM commited on
Commit
4ee22a5
·
verified ·
1 Parent(s): 510db66

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -18
app.py CHANGED
@@ -52,7 +52,6 @@ for root, dirs, files in os.walk(extracted_path):
52
 
53
  # Path to the dataset directory
54
  data_dir = pathlib.Path('extracted_files/Pest_Dataset')
55
- data_dir = pathlib.Path(data_dir)
56
 
57
  image_count = len(list(data_dir.glob('*/*.jpg')))
58
  print(image_count)
@@ -60,7 +59,6 @@ print(image_count)
60
  bees = list(data_dir.glob('bees/*'))
61
  print(bees[0])
62
  PIL.Image.open(str(bees[0]))
63
-
64
  batch_size = 32
65
  img_height = 180
66
  img_width = 180
@@ -73,7 +71,6 @@ train_ds = tf.keras.utils.image_dataset_from_directory(
73
  image_size=(img_height, img_width),
74
  batch_size=batch_size)
75
 
76
-
77
  val_ds = tf.keras.utils.image_dataset_from_directory(
78
  data_dir,
79
  validation_split=0.2,
@@ -82,7 +79,6 @@ val_ds = tf.keras.utils.image_dataset_from_directory(
82
  image_size=(img_height, img_width),
83
  batch_size=batch_size)
84
 
85
-
86
  class_names = train_ds.class_names
87
  print(class_names)
88
 
@@ -94,7 +90,6 @@ for images, labels in train_ds.take(1):
94
  plt.title(class_names[labels[i]])
95
  plt.axis("off")
96
 
97
-
98
  for image_batch, labels_batch in train_ds:
99
  print(image_batch.shape)
100
  print(labels_batch.shape)
@@ -105,7 +100,6 @@ AUTOTUNE = tf.data.AUTOTUNE
105
  train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
106
  val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
107
 
108
-
109
  normalization_layer = layers.Rescaling(1./255)
110
 
111
  normalized_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))
@@ -122,6 +116,7 @@ data_augmentation = keras.Sequential(
122
  3)),
123
  layers.RandomRotation(0.1),
124
  layers.RandomZoom(0.1),
 
125
  ]
126
  )
127
 
@@ -133,7 +128,6 @@ for images, _ in train_ds.take(1):
133
  plt.imshow(augmented_images[0].numpy().astype("uint8"))
134
  plt.axis("off")
135
 
136
-
137
  from tensorflow.keras.applications import EfficientNetB0
138
 
139
  base_model = EfficientNetB0(weights='imagenet', include_top=False, input_shape=(img_height, img_width, 3))
@@ -149,7 +143,6 @@ x = keras.layers.GlobalAveragePooling2D()(x)
149
  x = keras.layers.Dropout(0.2)(x)
150
  outputs = keras.layers.Dense(len(class_names), activation='softmax')(x)
151
 
152
-
153
  # Compile the model
154
  model = keras.Model(inputs, outputs)
155
  model.compile(optimizer='adam',
@@ -190,17 +183,51 @@ plt.legend(loc='upper right')
190
  plt.title('Training and Validation Loss')
191
  plt.show()
192
 
 
 
 
 
 
 
 
193
 
194
- # Evaluate the model on the validation dataset
195
- results = model.evaluate(val_ds, verbose=0)
196
 
197
- print("Validation Loss: {:.5f}".format(results[0]))
198
- print("Validation Accuracy: {:.2f}%".format(results[1] * 100))
199
 
200
 
201
- import gradio as gr
202
- import numpy as np
203
- import tensorflow as tf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
204
 
205
  def predict_image(img):
206
  img = np.array(img)
@@ -209,9 +236,8 @@ def predict_image(img):
209
  prediction = model.predict(img_4d)[0]
210
  return {class_names[i]: float(prediction[i]) for i in range(len(class_names))}
211
 
212
- image = gr.Image()
213
- label = gr.Label(num_top_classes=1)
214
-
215
 
216
  # Define custom CSS for background image
217
  custom_css = """
 
52
 
53
  # Path to the dataset directory
54
  data_dir = pathlib.Path('extracted_files/Pest_Dataset')
 
55
 
56
  image_count = len(list(data_dir.glob('*/*.jpg')))
57
  print(image_count)
 
59
  bees = list(data_dir.glob('bees/*'))
60
  print(bees[0])
61
  PIL.Image.open(str(bees[0]))
 
62
  batch_size = 32
63
  img_height = 180
64
  img_width = 180
 
71
  image_size=(img_height, img_width),
72
  batch_size=batch_size)
73
 
 
74
  val_ds = tf.keras.utils.image_dataset_from_directory(
75
  data_dir,
76
  validation_split=0.2,
 
79
  image_size=(img_height, img_width),
80
  batch_size=batch_size)
81
 
 
82
  class_names = train_ds.class_names
83
  print(class_names)
84
 
 
90
  plt.title(class_names[labels[i]])
91
  plt.axis("off")
92
 
 
93
  for image_batch, labels_batch in train_ds:
94
  print(image_batch.shape)
95
  print(labels_batch.shape)
 
100
  train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
101
  val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
102
 
 
103
  normalization_layer = layers.Rescaling(1./255)
104
 
105
  normalized_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))
 
116
  3)),
117
  layers.RandomRotation(0.1),
118
  layers.RandomZoom(0.1),
119
+ layers.RandomContrast(0.1),
120
  ]
121
  )
122
 
 
128
  plt.imshow(augmented_images[0].numpy().astype("uint8"))
129
  plt.axis("off")
130
 
 
131
  from tensorflow.keras.applications import EfficientNetB0
132
 
133
  base_model = EfficientNetB0(weights='imagenet', include_top=False, input_shape=(img_height, img_width, 3))
 
143
  x = keras.layers.Dropout(0.2)(x)
144
  outputs = keras.layers.Dense(len(class_names), activation='softmax')(x)
145
 
 
146
  # Compile the model
147
  model = keras.Model(inputs, outputs)
148
  model.compile(optimizer='adam',
 
183
  plt.title('Training and Validation Loss')
184
  plt.show()
185
 
186
+ test_ds = tf.keras.utils.image_dataset_from_directory(
187
+ data_dir,
188
+ validation_split=0.2,
189
+ subset="validation",
190
+ seed=123,
191
+ image_size=(img_height, img_width),
192
+ batch_size=batch_size)
193
 
194
+ results = model.evaluate(test_ds, verbose=0)
 
195
 
196
+ print(" Test Loss: {:.5f}".format(results[0]))
197
+ print("Test Accuracy: {:.2f}%".format(results[1] * 100))
198
 
199
 
200
+ # Metrics
201
+ y_true = []
202
+ y_pred = []
203
+
204
+ for images, labels in test_ds:
205
+ y_true.extend(labels.numpy())
206
+ preds = model.predict(images)
207
+ y_pred.extend(np.argmax(preds, axis=1))
208
+
209
+ from sklearn.metrics import classification_report, confusion_matrix
210
+ print(classification_report(y_true, y_pred, target_names=class_names))
211
+
212
+ import pandas as pd
213
+ report = classification_report(y_true, y_pred, target_names=class_names, output_dict=True)
214
+ df = pd.DataFrame(report).transpose()
215
+ print(df)
216
+
217
+ def make_confusion_matrix(y_true, y_pred, labels):
218
+ cm = confusion_matrix(y_true, y_pred)
219
+ fig, ax = plt.subplots(figsize=(10, 8))
220
+ cax = ax.matshow(cm, cmap=plt.cm.Blues)
221
+ plt.title('Confusion Matrix')
222
+ fig.colorbar(cax)
223
+ ax.set_xticklabels([''] + labels, rotation=90)
224
+ ax.set_yticklabels([''] + labels)
225
+ plt.xlabel('Predicted')
226
+ plt.ylabel('True')
227
+ plt.show()
228
+
229
+ make_confusion_matrix(y_true, y_pred, class_names)
230
+
231
 
232
  def predict_image(img):
233
  img = np.array(img)
 
236
  prediction = model.predict(img_4d)[0]
237
  return {class_names[i]: float(prediction[i]) for i in range(len(class_names))}
238
 
239
+ image = gr.Image(type="pil")
240
+ label = gr.Label(num_top_classes=12)
 
241
 
242
  # Define custom CSS for background image
243
  custom_css = """