File size: 5,564 Bytes
860c4dc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 |
import streamlit as st
from ultralytics import YOLO
import cv2
import easyocr
import numpy as np
import pandas as pd
from PIL import Image
import tempfile
# Upload image
@st.cache_resource
def load_model():
model = YOLO('yolo11n-custom.pt')
model.fuse()
return model
model = load_model()
reader = easyocr.Reader(['en'])
def apply_filters(image, noise, sharpen, grayscale, threshold, edges, invert, auto, blur, contrast, brightness, scale, denoise, hist_eq, gamma, clahe):
img = np.array(image)
# Auto Enhancement
if auto:
lab = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
l, a, b = cv2.split(lab)
clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8))
l = clahe.apply(l)
img = cv2.merge((l, a, b))
img = cv2.cvtColor(img, cv2.COLOR_LAB2RGB)
# Scaling
if scale != 1.0:
height, width = img.shape[:2]
img = cv2.resize(img, (int(width * scale), int(height * scale)))
# Noise Reduction (Bilateral Filtering)
if noise:
img = cv2.bilateralFilter(img, 9, 75, 75)
# Noise Reduction (Non-Local Means)
if denoise:
img = cv2.fastNlMeansDenoisingColored(img, None, 10, 10, 7, 21)
# Sharpening
if sharpen:
kernel = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]])
img = cv2.filter2D(img, -1, kernel)
# Convert to Grayscale
if grayscale or threshold or hist_eq or clahe:
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Histogram Equalization
if hist_eq:
img = cv2.equalizeHist(img)
# CLAHE (Contrast Limited Adaptive Histogram Equalization)
if clahe:
clahe_filter = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8))
img = clahe_filter.apply(img)
# Adaptive Thresholding
if threshold:
img = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
# Edge Detection
if edges:
img = cv2.Canny(img, 100, 200)
# Invert Colors
if invert:
img = cv2.bitwise_not(img)
# Blur
if gamma != 1.0:
inv_gamma = 1.0 / gamma
table = np.array([(i / 255.0) ** inv_gamma * 255 for i in np.arange(0, 256)]).astype("uint8")
img = cv2.LUT(img, table)
# Blur
if blur:
img = cv2.GaussianBlur(img, (2*blur + 1, 2*blur + 1), 0)
# Contrast & Brightness
if contrast != 1.0 or brightness != 0:
img = cv2.convertScaleAbs(img, alpha=contrast, beta=brightness)
return img
st.title("πΌοΈ Refine Image for Detection")
st.write("Enhance the license plate image for better recognition.")
uploaded_file = st.file_uploader("π€ Upload an image", type=["jpg", "png", "jpeg"])
if uploaded_file:
# Read image
img = Image.open(uploaded_file)
img = np.array(img)
# Detect license plates
st.write("π Detecting license plates...")
results = model.predict(img, conf=0.15, iou=0.3, classes=[0])
plates = results[0].boxes.xyxy if len(results) > 0 else []
if len(plates) == 0:
st.error("β No license plates detected. Try another image.")
else:
st.write("π **Select a License Plate by Clicking Below**")
# Show detected plates in a grid
if "selected_plate_index" not in st.session_state:
st.session_state.selected_plate_index = 0
selected_plate_index = st.session_state.get("selected_plate_index", 0)
cols = st.columns(len(plates)) # Create dynamic columns
for i, (x1, y1, x2, y2) in enumerate(plates):
plate_img = img[int(y1):int(y2), int(x1):int(x2)]
plate_img = Image.fromarray(plate_img)
with cols[i]: # Place each image in a column
st.image(plate_img, caption=f"Plate {i+1}", use_container_width =True)
if st.button(f"Select Plate {i+1}", key=f"plate_{i}"):
st.session_state["selected_plate_index"] = i
# Get the selected plate
selected_index = st.session_state["selected_plate_index"]
x1, y1, x2, y2 = map(int, plates[selected_index])
cropped_plate = img[y1:y2, x1:x2]
refined_img = cropped_plate.copy()
# Sidebar for enhancements
st.sidebar.header("π§ Enhancement Options")
blur = st.sidebar.slider("πΉ Blur", 0, 10, 0)
contrast = st.sidebar.slider("πΉ Contrast", 0.5, 2.0, 1.0)
brightness = st.sidebar.slider("πΉ Brightness", 0.5, 2.0, 1.0)
gamma = st.sidebar.slider("Gamma Correction", 0.1, 3.0, 1.0, 0.1)
scale = st.sidebar.slider("πΉ Scale", 1.0, 10.0, 5.0)
noise = st.sidebar.checkbox("Noise Reduction (Bilateral)")
denoise = st.sidebar.checkbox("Denoise (Non-Local Means)")
sharpen = st.sidebar.checkbox("Sharpening")
hist_eq = st.sidebar.checkbox("Histogram Equalization")
clahe = st.sidebar.checkbox("CLAHE (Advanced Contrast)")
grayscale = st.sidebar.checkbox("Grayscale Conversion")
threshold = st.sidebar.checkbox("Adaptive Thresholding")
edges = st.sidebar.checkbox("Edge Detection")
invert = st.sidebar.checkbox("Invert Colors")
auto = st.sidebar.checkbox("Auto Enhancement")
refined_img = apply_filters(refined_img, noise, sharpen, grayscale, threshold, edges, invert, auto, blur, contrast, brightness, scale, denoise, hist_eq, gamma, clahe)
st.image(refined_img, caption="Refined License Plate", use_container_width=True)
if st.button("π Detect License Plate Text"):
with st.spinner("π Reading text..."):
ocr_result = reader.readtext(np.array(refined_img), detail=0, allowlist="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-")
plate_text = " ".join(ocr_result).upper() if ocr_result else "β No text detected."
# Show detected text
st.subheader("π Detected License Plate:")
st.code(plate_text, language="plaintext") |