krishnamishra8848's picture
Update app.py
ad1e8d1 verified
raw
history blame
5.09 kB
import streamlit as st
from huggingface_hub import hf_hub_download
from ultralytics import YOLO
import cv2
import numpy as np
from PIL import Image
from tensorflow.keras.models import load_model
# Title for the Streamlit App
st.title("Nepal Vehicle License Plate and Character Recognition")
# Description
st.write("Upload an image to detect license plates, segment characters, and recognize each character using advanced YOLO and CNN models.")
# Download YOLO and CNN model weights from Hugging Face
@st.cache_resource
def load_models():
# Full license plate detection model
full_plate_model_path = hf_hub_download(
repo_id="krishnamishra8848/Nepal-Vehicle-License-Plate-Detection", filename="last.pt"
)
full_plate_model = YOLO(full_plate_model_path)
# Character detection model
character_model_path = hf_hub_download(
repo_id="krishnamishra8848/Nepal_Vehicle_License_Plates_Detection_Version3", filename="best.pt"
)
character_model = YOLO(character_model_path)
# Character recognition model
recognition_model_path = hf_hub_download(
repo_id="krishnamishra8848/Nepal_Vehicle_License_Plates_Character_Recognisation", filename="model.h5"
)
recognition_model = load_model(recognition_model_path)
return full_plate_model, character_model, recognition_model
# Load models
full_plate_model, character_model, recognition_model = load_models()
# Function to detect and crop license plates
def detect_and_crop_license_plate(image):
img_bgr = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
results = full_plate_model(img_bgr)
detected_image = img_bgr.copy()
cropped_images = []
for result in results:
if hasattr(result, 'boxes') and result.boxes is not None:
for box in result.boxes.xyxy:
x1, y1, x2, y2 = map(int, box)
cv2.rectangle(detected_image, (x1, y1), (x2, y2), (255, 0, 0), 2) # Draw bounding box
cropped_image = img_bgr[y1:y2, x1:x2]
cropped_images.append(cropped_image)
return cropped_images, detected_image
# Function to detect and crop characters
def detect_and_crop_characters(image):
results = character_model(image)
character_crops = []
for result in results:
if hasattr(result, 'boxes') and result.boxes is not None:
for box in result.boxes.xyxy:
x1, y1, x2, y2 = map(int, box)
character_crops.append(image[y1:y2, x1:x2])
return character_crops
# Function to recognize characters
def recognize_characters(character_crops):
class_labels = [
'क', 'को', 'ख', 'ग', 'च', 'ज', 'झ', 'ञ', 'डि', 'त', 'ना', 'प',
'प्र', 'ब', 'बा', 'भे', 'म', 'मे', 'य', 'लु', 'सी', 'सु', 'से', 'ह',
'०', '१', '२', '३', '४', '५', '६', '७', '८', '९'
]
recognized_characters = []
for char_crop in character_crops:
# Preprocess the cropped character for recognition model
resized = cv2.resize(char_crop, (64, 64))
normalized = resized / 255.0
reshaped = np.expand_dims(normalized, axis=0) # Add batch dimension
# Predict the character
prediction = recognition_model.predict(reshaped)
predicted_class = class_labels[np.argmax(prediction)]
recognized_characters.append(predicted_class)
return recognized_characters
# Upload an image file
uploaded_file = st.file_uploader("Choose an image file", type=["jpg", "jpeg", "png"])
if uploaded_file is not None:
# Load image
image = Image.open(uploaded_file)
# Detect license plates
with st.spinner("Processing image..."):
cropped_plates, detected_image = detect_and_crop_license_plate(image)
if cropped_plates:
st.image(cv2.cvtColor(detected_image, cv2.COLOR_BGR2RGB), caption="Detected License Plates", use_container_width=True)
st.write(f"Detected {len(cropped_plates)} license plate(s).")
for idx, cropped_plate in enumerate(cropped_plates, 1):
st.write(f"Processing License Plate {idx}:")
character_crops = detect_and_crop_characters(cropped_plate)
if character_crops:
recognized_characters = recognize_characters(character_crops)
st.write("Recognized Characters:", "".join(recognized_characters))
else:
st.write("No characters detected in this license plate.")
else:
st.write("No license plates detected. Running character detection on the full image.")
character_crops = detect_and_crop_characters(cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR))
if character_crops:
recognized_characters = recognize_characters(character_crops)
st.write("Recognized Characters:", "".join(recognized_characters))
else:
st.write("No characters detected in the full image.")
st.success("Processing complete!")