NORLIE JHON MALAGDAO commited on
Commit
bbd4a95
·
verified ·
1 Parent(s): 5d28c8b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +115 -26
app.py CHANGED
@@ -2,13 +2,21 @@ import gradio as gr
2
  import matplotlib.pyplot as plt
3
  import numpy as np
4
  import os
 
5
  import tensorflow as tf
6
- from tensorflow.keras import layers, Sequential
7
- from tensorflow.keras.models import load_model
 
 
 
 
 
8
  import gdown
9
  import zipfile
 
10
  import pathlib
11
 
 
12
  # Define the Google Drive shareable link
13
  gdrive_url = 'https://drive.google.com/file/d/1HjHYlQyRz5oWt8kehkt1TiOGRRlKFsv8/view?usp=drive_link'
14
 
@@ -37,37 +45,63 @@ except zipfile.BadZipFile:
37
  os.remove(local_zip_file)
38
 
39
  # Convert the extracted directory path to a pathlib.Path object
40
- data_dir = pathlib.Path(extracted_path) / 'Pest_Dataset'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
- # Verify if the path exists
43
- assert data_dir.exists(), f"Path {data_dir} does not exist."
44
 
45
- # Load the dataset
46
- img_height, img_width = 180, 180
47
  batch_size = 32
 
 
48
 
49
- train_ds = tf.keras.preprocessing.image_dataset_from_directory(
 
50
  data_dir,
51
  validation_split=0.2,
52
  subset="training",
53
  seed=123,
54
  image_size=(img_height, img_width),
55
- batch_size=batch_size
56
- )
57
 
58
- val_ds = tf.keras.preprocessing.image_dataset_from_directory(
 
59
  data_dir,
60
  validation_split=0.2,
61
  subset="validation",
62
  seed=123,
63
  image_size=(img_height, img_width),
64
- batch_size=batch_size
65
- )
66
 
67
  class_names = train_ds.class_names
68
  print(class_names)
69
 
70
- # Plot some images from the training dataset
 
 
71
  plt.figure(figsize=(10, 10))
72
  for images, labels in train_ds.take(1):
73
  for i in range(9):
@@ -76,16 +110,57 @@ for images, labels in train_ds.take(1):
76
  plt.title(class_names[labels[i]])
77
  plt.axis("off")
78
 
79
- # Define data augmentation
80
- data_augmentation = Sequential(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
  [
82
- layers.RandomFlip("horizontal", input_shape=(img_height, img_width, 3)),
 
 
 
83
  layers.RandomRotation(0.1),
84
  layers.RandomZoom(0.1),
85
  ]
86
  )
87
 
88
- # Plot augmented images
 
89
  plt.figure(figsize=(10, 10))
90
  for images, _ in train_ds.take(1):
91
  for i in range(9):
@@ -94,11 +169,12 @@ for images, _ in train_ds.take(1):
94
  plt.imshow(augmented_images[0].numpy().astype("uint8"))
95
  plt.axis("off")
96
 
97
- # Define the model
98
- num_classes = len(class_names)
 
99
  model = Sequential([
100
  data_augmentation,
101
- layers.Rescaling(1./255, input_shape=(img_height, img_width, 3)),
102
  layers.Conv2D(16, 3, padding='same', activation='relu'),
103
  layers.MaxPooling2D(),
104
  layers.Conv2D(32, 3, padding='same', activation='relu'),
@@ -108,16 +184,21 @@ model = Sequential([
108
  layers.Dropout(0.2),
109
  layers.Flatten(),
110
  layers.Dense(128, activation='relu'),
111
- layers.Dense(num_classes, activation='softmax')
112
  ])
113
 
 
 
 
114
  model.compile(optimizer='adam',
115
- loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
116
  metrics=['accuracy'])
117
 
 
118
  model.summary()
119
 
120
- # Train the model
 
121
  epochs = 15
122
  history = model.fit(
123
  train_ds,
@@ -125,10 +206,11 @@ history = model.fit(
125
  epochs=epochs
126
  )
127
 
128
- # Define the Gradio interface
 
129
  def predict_image(img):
130
  img = np.array(img)
131
- img_resized = tf.image.resize(img, (img_height, img_width))
132
  img_4d = tf.expand_dims(img_resized, axis=0)
133
  prediction = model.predict(img_4d)[0]
134
  return {class_names[i]: float(prediction[i]) for i in range(len(class_names))}
@@ -155,3 +237,10 @@ gr.Interface(
155
  description="The image data set used was obtained from Kaggle and has a collection of 12 different types of agricultural pests: Ants, Bees, Beetles, Caterpillars, Earthworms, Earwigs, Grasshoppers, Moths, Slugs, Snails, Wasps, and Weevils",
156
  css=custom_css
157
  ).launch(debug=True)
 
 
 
 
 
 
 
 
2
  import matplotlib.pyplot as plt
3
  import numpy as np
4
  import os
5
+ import PIL
6
  import tensorflow as tf
7
+
8
+ from tensorflow import keras
9
+ from tensorflow.keras import layers
10
+ from tensorflow.keras.models import Sequential
11
+
12
+
13
+ from PIL import Image
14
  import gdown
15
  import zipfile
16
+
17
  import pathlib
18
 
19
+
20
  # Define the Google Drive shareable link
21
  gdrive_url = 'https://drive.google.com/file/d/1HjHYlQyRz5oWt8kehkt1TiOGRRlKFsv8/view?usp=drive_link'
22
 
 
45
  os.remove(local_zip_file)
46
 
47
  # Convert the extracted directory path to a pathlib.Path object
48
+ data_dir = pathlib.Path(extracted_path)
49
+
50
+ # Print the directory structure to debug
51
+ for root, dirs, files in os.walk(extracted_path):
52
+ level = root.replace(extracted_path, '').count(os.sep)
53
+ indent = ' ' * 4 * (level)
54
+ print(f"{indent}{os.path.basename(root)}/")
55
+ subindent = ' ' * 4 * (level + 1)
56
+ for f in files:
57
+ print(f"{subindent}{f}")
58
+
59
+
60
+ # Path to the dataset directory
61
+ data_dir = pathlib.Path('extracted_files/Pest_Dataset')
62
+ data_dir = pathlib.Path(data_dir)
63
+
64
+
65
+ bees = list(data_dir.glob('bees/*'))
66
+ print(bees[0])
67
+ PIL.Image.open(str(bees[0]))
68
+
69
+
70
+ bees = list(data_dir.glob('bees/*'))
71
+ print(bees[0])
72
+ PIL.Image.open(str(bees[0]))
73
+
74
 
 
 
75
 
 
 
76
  batch_size = 32
77
+ img_height = 180
78
+ img_width = 180
79
 
80
+
81
+ train_ds = tf.keras.utils.image_dataset_from_directory(
82
  data_dir,
83
  validation_split=0.2,
84
  subset="training",
85
  seed=123,
86
  image_size=(img_height, img_width),
87
+ batch_size=batch_size)
 
88
 
89
+
90
+ val_ds = tf.keras.utils.image_dataset_from_directory(
91
  data_dir,
92
  validation_split=0.2,
93
  subset="validation",
94
  seed=123,
95
  image_size=(img_height, img_width),
96
+ batch_size=batch_size)
97
+
98
 
99
  class_names = train_ds.class_names
100
  print(class_names)
101
 
102
+
103
+ import matplotlib.pyplot as plt
104
+
105
  plt.figure(figsize=(10, 10))
106
  for images, labels in train_ds.take(1):
107
  for i in range(9):
 
110
  plt.title(class_names[labels[i]])
111
  plt.axis("off")
112
 
113
+
114
+
115
+ for image_batch, labels_batch in train_ds:
116
+ print(image_batch.shape)
117
+ print(labels_batch.shape)
118
+ break
119
+
120
+
121
+ AUTOTUNE = tf.data.AUTOTUNE
122
+
123
+ train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
124
+ val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
125
+
126
+
127
+ normalization_layer = layers.Rescaling(1./255)
128
+
129
+
130
+
131
+
132
+
133
+
134
+ normalized_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))
135
+ image_batch, labels_batch = next(iter(normalized_ds))
136
+ first_image = image_batch[0]
137
+ # Notice the pixel values are now in `[0,1]`.
138
+ print(np.min(first_image), np.max(first_image))
139
+
140
+
141
+
142
+
143
+
144
+
145
+
146
+ num_classes = len(class_names)
147
+
148
+
149
+
150
+
151
+ data_augmentation = keras.Sequential(
152
  [
153
+ layers.RandomFlip("horizontal",
154
+ input_shape=(img_height,
155
+ img_width,
156
+ 3)),
157
  layers.RandomRotation(0.1),
158
  layers.RandomZoom(0.1),
159
  ]
160
  )
161
 
162
+
163
+
164
  plt.figure(figsize=(10, 10))
165
  for images, _ in train_ds.take(1):
166
  for i in range(9):
 
169
  plt.imshow(augmented_images[0].numpy().astype("uint8"))
170
  plt.axis("off")
171
 
172
+
173
+
174
+
175
  model = Sequential([
176
  data_augmentation,
177
+ layers.Rescaling(1./255),
178
  layers.Conv2D(16, 3, padding='same', activation='relu'),
179
  layers.MaxPooling2D(),
180
  layers.Conv2D(32, 3, padding='same', activation='relu'),
 
184
  layers.Dropout(0.2),
185
  layers.Flatten(),
186
  layers.Dense(128, activation='relu'),
187
+ layers.Dense(num_classes, name="outputs")
188
  ])
189
 
190
+
191
+
192
+
193
  model.compile(optimizer='adam',
194
+ loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
195
  metrics=['accuracy'])
196
 
197
+
198
  model.summary()
199
 
200
+
201
+
202
  epochs = 15
203
  history = model.fit(
204
  train_ds,
 
206
  epochs=epochs
207
  )
208
 
209
+
210
+
211
  def predict_image(img):
212
  img = np.array(img)
213
+ img_resized = tf.image.resize(img, (180, 180))
214
  img_4d = tf.expand_dims(img_resized, axis=0)
215
  prediction = model.predict(img_4d)[0]
216
  return {class_names[i]: float(prediction[i]) for i in range(len(class_names))}
 
237
  description="The image data set used was obtained from Kaggle and has a collection of 12 different types of agricultural pests: Ants, Bees, Beetles, Caterpillars, Earthworms, Earwigs, Grasshoppers, Moths, Slugs, Snails, Wasps, and Weevils",
238
  css=custom_css
239
  ).launch(debug=True)
240
+
241
+
242
+
243
+
244
+
245
+
246
+