Commit
·
8872483
1
Parent(s):
5e7ba61
Update burrito.py
Browse files- burrito.py +84 -0
burrito.py
CHANGED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import tensorflow as tf
|
2 |
+
from tensorflow.keras.preprocessing.image import ImageDataGenerator
|
3 |
+
from tensorflow.keras.models import Sequential
|
4 |
+
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
|
5 |
+
import numpy as np
|
6 |
+
from tensorflow.keras.preprocessing import image
|
7 |
+
|
8 |
+
# Define image size and batch size
|
9 |
+
IMG_SIZE = 224
|
10 |
+
BATCH_SIZE = 32
|
11 |
+
|
12 |
+
# Define train and validation directories
|
13 |
+
train_dir = 'path/to/train/folder'
|
14 |
+
val_dir = 'path/to/validation/folder'
|
15 |
+
|
16 |
+
# Use ImageDataGenerator for data augmentation
|
17 |
+
train_datagen = ImageDataGenerator(
|
18 |
+
rescale=1./255,
|
19 |
+
rotation_range=20,
|
20 |
+
width_shift_range=0.1,
|
21 |
+
height_shift_range=0.1,
|
22 |
+
shear_range=0.2,
|
23 |
+
zoom_range=0.2,
|
24 |
+
horizontal_flip=True,
|
25 |
+
fill_mode='nearest')
|
26 |
+
|
27 |
+
val_datagen = ImageDataGenerator(rescale=1./255)
|
28 |
+
|
29 |
+
# Generate batches of augmented data from directories
|
30 |
+
train_generator = train_datagen.flow_from_directory(
|
31 |
+
train_dir,
|
32 |
+
target_size=(IMG_SIZE, IMG_SIZE),
|
33 |
+
batch_size=BATCH_SIZE,
|
34 |
+
class_mode='categorical')
|
35 |
+
|
36 |
+
val_generator = val_datagen.flow_from_directory(
|
37 |
+
val_dir,
|
38 |
+
target_size=(IMG_SIZE, IMG_SIZE),
|
39 |
+
batch_size=BATCH_SIZE,
|
40 |
+
class_mode='categorical')
|
41 |
+
|
42 |
+
# Define the model
|
43 |
+
model = Sequential()
|
44 |
+
|
45 |
+
# Add convolutional layers
|
46 |
+
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(IMG_SIZE, IMG_SIZE, 3)))
|
47 |
+
model.add(MaxPooling2D((2, 2)))
|
48 |
+
model.add(Conv2D(64, (3, 3), activation='relu'))
|
49 |
+
model.add(MaxPooling2D((2, 2)))
|
50 |
+
model.add(Conv2D(128, (3, 3), activation='relu'))
|
51 |
+
model.add(MaxPooling2D((2, 2)))
|
52 |
+
|
53 |
+
# Flatten and add dense layers
|
54 |
+
model.add(Flatten())
|
55 |
+
model.add(Dense(512, activation='relu'))
|
56 |
+
model.add(Dropout(0.5))
|
57 |
+
model.add(Dense(3, activation='softmax'))
|
58 |
+
|
59 |
+
# Compile the model
|
60 |
+
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
|
61 |
+
|
62 |
+
# Train the model
|
63 |
+
history = model.fit(
|
64 |
+
train_generator,
|
65 |
+
steps_per_epoch=train_generator.samples // BATCH_SIZE,
|
66 |
+
epochs=50,
|
67 |
+
validation_data=val_generator,
|
68 |
+
validation_steps=val_generator.samples // BATCH_SIZE)
|
69 |
+
|
70 |
+
# Load an image
|
71 |
+
img_path = 'path/to/image.jpg'
|
72 |
+
img = image.load_img(img_path, target_size=(IMG_SIZE, IMG_SIZE))
|
73 |
+
|
74 |
+
# Convert the image to a numpy array and normalize it
|
75 |
+
img_array = image.img_to_array(img) / 255.
|
76 |
+
|
77 |
+
# Expand the dimensions of the array to match the model input shape
|
78 |
+
img_array = np.expand_dims(img_array, axis=0)
|
79 |
+
|
80 |
+
# Use the model to predict the class probabilities
|
81 |
+
probs = model.predict(img_array)[0]
|
82 |
+
|
83 |
+
# Print the predicted class probabilities
|
84 |
+
print(probs)
|