File size: 7,365 Bytes
9a43209 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 |
# -*- coding: utf-8 -*-
"""ProyectoAvanzado1.ipynb
Automatically generated by Colab.
Original file is located at
https://colab.research.google.com/drive/1R5pOFRcOn9faAiaFmIL6GcuWweWYGswX
"""
import os
import pickle
import numpy as np
from sklearn.model_selection import train_test_split
from tensorflow.keras.utils import to_categorical
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from sklearn import preprocessing
import streamlit as st
import tensorflow as tf
import numpy as np
import cv2
from PIL import Image
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
# Función para cargar datos desde archivos .p
def load_pickle(file_path):
with open(file_path, 'rb') as file:
data = pickle.load(file)
return data
# Cargar datos de entrenamiento, validación y prueba
train_data = load_pickle(os.path.join("/Users/victorg/Documents/DataScience/ProjectoAvanzado/CSV/train.p"))
val_data = load_pickle(os.path.join("/Users/victorg/Documents/DataScience/ProjectoAvanzado/CSV/valid.p"))
test_data = load_pickle(os.path.join("/Users/victorg/Documents/DataScience/ProjectoAvanzado/CSV/test.p"))
# Separar imágenes y etiquetas
train_images, train_labels = train_data['features'], train_data['labels']
val_images, val_labels = val_data['features'], val_data['labels']
test_images, test_labels = test_data['features'], test_data['labels']
# Preprocesar datos
def preprocess_data(images, labels):
images = images.astype('float32') / 255.0
labels = to_categorical(labels, num_classes=43)
return images, labels
train_images, train_labels = preprocess_data(train_images, train_labels)
val_images, val_labels = preprocess_data(val_images, val_labels)
test_images, test_labels = preprocess_data(test_images, test_labels)
# Convertir etiquetas a DataFrame para visualización
train_df = pd.DataFrame({"label": train_labels.argmax(axis=1)})
# Conteo de imágenes por clase
plt.figure(figsize=(10, 6))
sns.countplot(x=train_df['label'])
plt.title("Conteo de Imágenes por Clase")
plt.xlabel("Clase")
plt.ylabel("Conteo")
plt.show()
# Visualización 2: Ejemplos de imágenes por clase
fig, axes = plt.subplots(5, 5, figsize=(15, 15))
axes = axes.ravel()
for i in range(25):
axes[i].imshow(train_images[i])
axes[i].set_title(f"Clase: {train_labels[i].argmax()}")
axes[i].axis('off')
plt.subplots_adjust(hspace=0.5)
plt.show()
# Visualización 3: Muestra aleatoria de imágenes
fig, axes = plt.subplots(3, 3, figsize=(12, 12))
axes = axes.ravel()
for i in range(9):
idx = np.random.randint(0, len(train_images))
axes[i].imshow(train_images[idx])
axes[i].set_title(f"Clase: {train_labels[idx].argmax()}")
axes[i].axis('off')
plt.subplots_adjust(hspace=0.5)
plt.show()
# Definir la arquitectura de la CNN
model = Sequential([
Conv2D(32, (3, 3), activation="relu", input_shape=(32, 32, 3)),
MaxPooling2D((2, 2)),
Conv2D(64, (3, 3), activation="relu"),
MaxPooling2D((2, 2)),
Conv2D(128, (3, 3), activation="relu"),
MaxPooling2D((2, 2)),
Flatten(),
Dense(215, activation="relu"),
Dropout(0.5),
Dense(43, activation="softmax")
])
# Compilar el modelo
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
# Verificar si el modelo ya está guardado
model_path = "traffic_sign_classifier.h5"
if os.path.exists(model_path):
model = tf.keras.models.load_model(model_path)
else:
model.fit(train_images, train_labels, epochs=10, validation_data=(val_images, val_labels))
model.save(model_path)
# Evaluar el modelo en datos de prueba
test_loss, test_acc = model.evaluate(test_images, test_labels)
print(f"Test Accuracy: {test_acc:.2f}")
# Cargar el modelo
model = tf.keras.models.load_model("traffic_sign_classifier.h5")
# Diccionario de clases de señales de tráfico
classes = {
0: 'Speed limit (20km/h)',
1: 'Speed limit (30km/h)',
2: 'Speed limit (50km/h)',
3: 'Speed limit (60km/h)',
4: 'Speed limit (70km/h)',
5: 'Speed limit (80km/h)',
6: 'End of speed limit (80km/h)',
7: 'Speed limit (100km/h)',
8: 'Speed limit (120km/h)',
9: 'No passing',
10: 'No passing for vehicles over 3.5 metric tons',
11: 'Right-of-way at the next intersection',
12: 'Priority road',
13: 'Yield',
14: 'Stop',
15: 'No vehicles',
16: 'Vehicles over 3.5 metric tons prohibited',
17: 'No entry',
18: 'General caution',
19: 'Dangerous curve to the left',
20: 'Dangerous curve to the right',
21: 'Double curve',
22: 'Bumpy road',
23: 'Slippery road',
24: 'Road narrows on the right',
25: 'Road work',
26: 'Traffic signals',
27: 'Pedestrians',
28: 'Children crossing',
29: 'Bicycles crossing',
30: 'Beware of ice/snow',
31: 'Wild animals crossing',
32: 'End of all speed and passing limits',
33: 'Turn right ahead',
34: 'Turn left ahead',
35: 'Ahead only',
36: 'Go straight or right',
37: 'Go straight or left',
38: 'Keep right',
39: 'Keep left',
40: 'Roundabout mandatory',
41: 'End of no passing',
42: 'End of no passing by vehicles over 3.5 metric tons'
}
# Función para predecir la clase de una imagen
def predict(image):
image = np.array(image)
image = cv2.resize(image, (32, 32))
image = image / 255.0
image = np.expand_dims(image, axis=0)
predictions = model.predict(image)
class_idx = np.argmax(predictions)
return classes[class_idx]
# Título y descripción de la aplicación
st.title("Traffic Sign Classifier")
st.write("Esta aplicación clasifica señales de tráfico usando un modelo de CNN.")
# Mostrar ejemplos de imágenes del conjunto de datos
st.header("Ejemplos de Imágenes del Conjunto de Datos")
fig, axes = plt.subplots(2, 5, figsize=(15, 6))
axes = axes.ravel()
for i in range(10):
idx = np.random.randint(0, len(train_images))
axes[i].imshow(train_images[idx])
axes[i].set_title(f"Clase: {train_labels[idx].argmax()}")
axes[i].axis('off')
st.pyplot(fig)
# Mostrar ejemplos de imágenes del conjunto de datos
fig, axes = plt.subplots(2, 5, figsize=(15, 6))
axes = axes.ravel()
for i in range(10):
idx = np.random.randint(0, len(train_images))
axes[i].imshow(train_images[idx])
axes[i].set_title(f"Clase: {train_labels[idx].argmax()}")
axes[i].axis('off')
st.pyplot(fig)
# Mostrar algunas visualizaciones de EDA
st.header("Visualizaciones de EDA")
fig, ax = plt.subplots(figsize=(10, 6))
sns.countplot(x='label', data=train_df, ax=ax)
ax.set_title("Conteo de Imágenes por Clase")
ax.set_xlabel("Clase")
ax.set_ylabel("Conteo")
st.pyplot(fig)
# Permitir al usuario cargar una imagen
st.header("Carga tu Propia Imagen de Señal de Tráfico")
uploaded_file = st.file_uploader("Elige una imagen...", type=["jpg", "jpeg", "png"])
if uploaded_file is not None:
image = Image.open(uploaded_file)
st.image(image, caption='Imagen Cargada', use_column_width=True)
st.write("")
st.write("Clasificando...")
label = predict(image)
st.write(f"Esta señal es: {label}")
# Mostrar métricas del modelo
st.header("Métricas del Modelo")
st.write(f"Exactitud del conjunto de prueba: {test_acc:.2f}") |