NORLIE JHON MALAGDAO commited on
Commit
e5b5b7e
·
verified ·
1 Parent(s): aab5446

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +54 -120
app.py CHANGED
@@ -1,59 +1,36 @@
 
 
 
1
  import os
2
- import zipfile
3
- import gdown
4
- import pathlib
5
  import tensorflow as tf
6
  from tensorflow import keras
7
  from tensorflow.keras import layers
8
  from tensorflow.keras.models import Sequential
9
- import matplotlib.pyplot as plt
10
- import gradio as gr
11
- import numpy as np
 
12
 
13
- # Define the Google Drive shareable link
14
  gdrive_url = 'https://drive.google.com/file/d/1HjHYlQyRz5oWt8kehkt1TiOGRRlKFsv8/view?usp=drive_link'
15
-
16
- # Extract the file ID from the URL
17
  file_id = gdrive_url.split('/d/')[1].split('/view')[0]
18
  direct_download_url = f'https://drive.google.com/uc?id={file_id}'
19
-
20
- # Define the local filename to save the ZIP file
21
  local_zip_file = 'file.zip'
22
-
23
- # Download the ZIP file
24
  gdown.download(direct_download_url, local_zip_file, quiet=False)
25
-
26
- # Directory to extract files
27
  extracted_path = 'extracted_files'
28
-
29
- # Verify if the downloaded file is a ZIP file and extract it
30
  try:
31
  with zipfile.ZipFile(local_zip_file, 'r') as zip_ref:
32
  zip_ref.extractall(extracted_path)
33
  print("Extraction successful!")
34
  except zipfile.BadZipFile:
35
  print("Error: The downloaded file is not a valid ZIP file.")
36
-
37
- # Optionally, you can delete the ZIP file after extraction
38
  os.remove(local_zip_file)
 
39
 
40
- # Convert the extracted directory path to a pathlib.Path object
41
- data_dir = pathlib.Path('extracted_files/Pest_Dataset')
42
-
43
- # Verify the directory structure
44
- for root, dirs, files in os.walk(extracted_path):
45
- level = root.replace(extracted_path, '').count(os.sep)
46
- indent = ' ' * 4 * (level)
47
- print(f"{indent}{os.path.basename(root)}/")
48
- subindent = ' ' * 4 * (level + 1)
49
- for f in files:
50
- print(f"{subindent}{f}")
51
-
52
- # Set image dimensions and batch size
53
  img_height, img_width = 180, 180
54
  batch_size = 32
55
-
56
- # Create training and validation datasets
57
  train_ds = tf.keras.preprocessing.image_dataset_from_directory(
58
  data_dir,
59
  validation_split=0.2,
@@ -62,7 +39,6 @@ train_ds = tf.keras.preprocessing.image_dataset_from_directory(
62
  image_size=(img_height, img_width),
63
  batch_size=batch_size
64
  )
65
-
66
  val_ds = tf.keras.preprocessing.image_dataset_from_directory(
67
  data_dir,
68
  validation_split=0.2,
@@ -71,92 +47,55 @@ val_ds = tf.keras.preprocessing.image_dataset_from_directory(
71
  image_size=(img_height, img_width),
72
  batch_size=batch_size
73
  )
74
-
75
  class_names = train_ds.class_names
76
- print(class_names)
77
-
78
- # Display some sample images
79
- plt.figure(figsize=(10, 10))
80
- for images, labels in train_ds.take(1):
81
- for i in range(9):
82
- ax = plt.subplot(3, 3, i + 1)
83
- plt.imshow(images[i].numpy().astype("uint8"))
84
- plt.title(class_names[labels[i]])
85
- plt.axis("off")
86
 
87
- # Enhanced data augmentation
88
  data_augmentation = keras.Sequential(
89
  [
90
  layers.RandomFlip("horizontal", input_shape=(img_height, img_width, 3)),
91
- layers.RandomRotation(0.2),
92
- layers.RandomZoom(0.2),
93
- layers.RandomContrast(0.2),
94
  layers.RandomBrightness(0.2),
 
95
  ]
96
  )
97
 
98
- # Display augmented images
99
- plt.figure(figsize=(10, 10))
100
- for images, _ in train_ds.take(1):
101
- for i in range(9):
102
- augmented_images = data_augmentation(images)
103
- ax = plt.subplot(3, 3, i + 1)
104
- plt.imshow(augmented_images[0].numpy().astype("uint8"))
105
- plt.axis("off")
106
-
107
- # Define a deeper CNN model with more regularization techniques
108
  num_classes = len(class_names)
109
- model = Sequential()
110
-
111
- model.add(data_augmentation)
112
- model.add(layers.Rescaling(1./255))
113
-
114
- model.add(layers.Conv2D(32, 3, padding='same', activation='relu', kernel_regularizer=keras.regularizers.l2(0.001)))
115
- model.add(layers.BatchNormalization())
116
- model.add(layers.MaxPooling2D())
117
-
118
- model.add(layers.Conv2D(64, 3, padding='same', activation='relu', kernel_regularizer=keras.regularizers.l2(0.001)))
119
- model.add(layers.BatchNormalization())
120
- model.add(layers.MaxPooling2D())
121
-
122
- model.add(layers.Conv2D(128, 3, padding='same', activation='relu', kernel_regularizer=keras.regularizers.l2(0.001)))
123
- model.add(layers.BatchNormalization())
124
- model.add(layers.MaxPooling2D())
125
-
126
- model.add(layers.Conv2D(256, 3, padding='same', activation='relu', kernel_regularizer=keras.regularizers.l2(0.001)))
127
- model.add(layers.BatchNormalization())
128
- model.add(layers.MaxPooling2D())
129
-
130
- model.add(layers.Conv2D(512, 3, padding='same', activation='relu', kernel_regularizer=keras.regularizers.l2(0.001)))
131
- model.add(layers.BatchNormalization())
132
- model.add(layers.MaxPooling2D())
133
-
134
- model.add(layers.Dropout(0.5))
135
- model.add(layers.Flatten())
136
-
137
- model.add(layers.Dense(256, activation='relu', kernel_regularizer=keras.regularizers.l2(0.001)))
138
- model.add(layers.Dropout(0.5))
139
-
140
- model.add(layers.Dense(num_classes, activation='softmax', name="outputs"))
141
-
142
- model.compile(optimizer=keras.optimizers.Adam(learning_rate=1e-4),
143
  loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
144
  metrics=['accuracy'])
145
 
146
  model.summary()
147
 
148
- # Implement early stopping
149
- from tensorflow.keras.callbacks import EarlyStopping
150
-
151
- early_stopping = EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)
152
-
153
  # Train the model
154
- epochs = 30
155
  history = model.fit(
156
  train_ds,
157
  validation_data=val_ds,
158
  epochs=epochs,
159
- callbacks=[early_stopping]
160
  )
161
 
162
  # Define category descriptions
@@ -175,37 +114,32 @@ category_descriptions = {
175
  "Weevils": "Weevils are a type of beetle with a long snout, known for being pests to crops and stored grains."
176
  }
177
 
178
- # Define the prediction function
179
  def predict_image(img):
180
  img = np.array(img)
181
  img_resized = tf.image.resize(img, (180, 180))
182
  img_4d = tf.expand_dims(img_resized, axis=0)
183
  prediction = model.predict(img_4d)[0]
184
- predicted_class = np.argmax(prediction)
185
- predicted_label = class_names[predicted_class]
186
- predicted_description = category_descriptions[predicted_label]
187
- return {predicted_label: f"{float(prediction[predicted_class]):.2f} - {predicted_description}"}
188
-
189
- # Set up Gradio interface
 
 
190
  image = gr.Image()
191
- label = gr.Label(num_top_classes=1)
192
-
193
- # Define custom CSS for background image
194
  custom_css = """
195
- body {
196
- background-image: url('extracted_files/Pest_Dataset/bees/bees (444).jpg');
197
- background-size: cover;
198
- background-repeat: no-repeat;
199
- background-attachment: fixed;
200
- color: white;
201
- }
202
  """
203
 
204
  gr.Interface(
205
  fn=predict_image,
206
  inputs=image,
207
  outputs=label,
208
- title="Welcome to Agricultural Pest Image Classification",
209
- description="The image data set used was obtained from Kaggle and has a collection of 12 different types of agricultural pests: Ants, Bees, Beetles, Caterpillars, Earthworms, Earwigs, Grasshoppers, Moths, Slugs, Snails, Wasps, and Weevils",
210
  css=custom_css
211
  ).launch(debug=True)
 
1
+ import gradio as gr
2
+ import matplotlib.pyplot as plt
3
+ import numpy as np
4
  import os
5
+ import PIL
 
 
6
  import tensorflow as tf
7
  from tensorflow import keras
8
  from tensorflow.keras import layers
9
  from tensorflow.keras.models import Sequential
10
+ from PIL import Image
11
+ import gdown
12
+ import zipfile
13
+ import pathlib
14
 
15
+ # Download and extract dataset
16
  gdrive_url = 'https://drive.google.com/file/d/1HjHYlQyRz5oWt8kehkt1TiOGRRlKFsv8/view?usp=drive_link'
 
 
17
  file_id = gdrive_url.split('/d/')[1].split('/view')[0]
18
  direct_download_url = f'https://drive.google.com/uc?id={file_id}'
 
 
19
  local_zip_file = 'file.zip'
 
 
20
  gdown.download(direct_download_url, local_zip_file, quiet=False)
 
 
21
  extracted_path = 'extracted_files'
 
 
22
  try:
23
  with zipfile.ZipFile(local_zip_file, 'r') as zip_ref:
24
  zip_ref.extractall(extracted_path)
25
  print("Extraction successful!")
26
  except zipfile.BadZipFile:
27
  print("Error: The downloaded file is not a valid ZIP file.")
 
 
28
  os.remove(local_zip_file)
29
+ data_dir = pathlib.Path(extracted_path) / 'Pest_Dataset'
30
 
31
+ # Data loading and preprocessing
 
 
 
 
 
 
 
 
 
 
 
 
32
  img_height, img_width = 180, 180
33
  batch_size = 32
 
 
34
  train_ds = tf.keras.preprocessing.image_dataset_from_directory(
35
  data_dir,
36
  validation_split=0.2,
 
39
  image_size=(img_height, img_width),
40
  batch_size=batch_size
41
  )
 
42
  val_ds = tf.keras.preprocessing.image_dataset_from_directory(
43
  data_dir,
44
  validation_split=0.2,
 
47
  image_size=(img_height, img_width),
48
  batch_size=batch_size
49
  )
 
50
  class_names = train_ds.class_names
 
 
 
 
 
 
 
 
 
 
51
 
52
+ # Data augmentation
53
  data_augmentation = keras.Sequential(
54
  [
55
  layers.RandomFlip("horizontal", input_shape=(img_height, img_width, 3)),
56
+ layers.RandomRotation(0.1),
57
+ layers.RandomZoom(0.1),
 
58
  layers.RandomBrightness(0.2),
59
+ layers.RandomContrast(0.2),
60
  ]
61
  )
62
 
63
+ # Model definition
 
 
 
 
 
 
 
 
 
64
  num_classes = len(class_names)
65
+ model = Sequential([
66
+ data_augmentation,
67
+ layers.Rescaling(1./255),
68
+ layers.Conv2D(16, 3, padding='same', activation='relu'),
69
+ layers.MaxPooling2D(),
70
+ layers.Conv2D(32, 3, padding='same', activation='relu'),
71
+ layers.MaxPooling2D(),
72
+ layers.Conv2D(64, 3, padding='same', activation='relu'),
73
+ layers.MaxPooling2D(),
74
+ layers.Conv2D(128, 3, padding='same', activation='relu'),
75
+ layers.MaxPooling2D(),
76
+ layers.Dropout(0.5),
77
+ layers.Flatten(),
78
+ layers.Dense(256, activation='relu'),
79
+ layers.Dense(num_classes, activation='softmax', name="outputs")
80
+ ])
81
+
82
+ optimizer = keras.optimizers.Adam(learning_rate=0.001)
83
+ lr_scheduler = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3)
84
+ early_stopping = keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)
85
+
86
+ model.compile(optimizer=optimizer,
 
 
 
 
 
 
 
 
 
 
 
 
87
  loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
88
  metrics=['accuracy'])
89
 
90
  model.summary()
91
 
 
 
 
 
 
92
  # Train the model
93
+ epochs = 15
94
  history = model.fit(
95
  train_ds,
96
  validation_data=val_ds,
97
  epochs=epochs,
98
+ callbacks=[lr_scheduler, early_stopping]
99
  )
100
 
101
  # Define category descriptions
 
114
  "Weevils": "Weevils are a type of beetle with a long snout, known for being pests to crops and stored grains."
115
  }
116
 
117
+ # Prediction function
118
  def predict_image(img):
119
  img = np.array(img)
120
  img_resized = tf.image.resize(img, (180, 180))
121
  img_4d = tf.expand_dims(img_resized, axis=0)
122
  prediction = model.predict(img_4d)[0]
123
+ top_3_indices = prediction.argsort()[-3:][::-1]
124
+ results = {}
125
+ for i in top_3_indices:
126
+ class_name = class_names[i]
127
+ results[class_name] = f"{float(prediction[i]):.2f} - {category_descriptions[class_name]}"
128
+ return results
129
+
130
+ # Gradio interface setup
131
  image = gr.Image()
132
+ label = gr.Label(num_top_classes=3)
 
 
133
  custom_css = """
134
+ body {background-color: #f5f5f5;}
135
+ .gradio-container {border: 1px solid #ccc; border-radius: 10px; padding: 20px;}
 
 
 
 
 
136
  """
137
 
138
  gr.Interface(
139
  fn=predict_image,
140
  inputs=image,
141
  outputs=label,
142
+ title="Agricultural Pest Image Classification",
143
+ description="Identify 12 types of agricultural pests from images. This model was trained on a dataset from Kaggle.",
144
  css=custom_css
145
  ).launch(debug=True)