Spaces:
Sleeping
Sleeping
import gradio as gr | |
import cv2 | |
import pytesseract | |
from PIL import Image | |
import io | |
import base64 | |
from datetime import datetime | |
import pytz | |
import numpy as np | |
import logging | |
import os | |
# Set up logging | |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') | |
# Configure Tesseract path | |
try: | |
pytesseract.pytesseract.tesseract_cmd = '/usr/bin/tesseract' | |
pytesseract.get_tesseract_version() # Test Tesseract availability | |
logging.info("Tesseract is available") | |
except Exception as e: | |
logging.error(f"Tesseract not found or misconfigured: {str(e)}") | |
def preprocess_image(img_cv): | |
"""Preprocess image for OCR: enhance contrast, reduce noise, and apply adaptive thresholding.""" | |
try: | |
# Convert to grayscale | |
gray = cv2.cvtColor(img_cv, cv2.COLOR_BGR2GRAY) | |
# Enhance contrast with CLAHE | |
clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8)) | |
contrast = clahe.apply(gray) | |
# Reduce noise with Gaussian blur | |
blurred = cv2.GaussianBlur(contrast, (5, 5), 0) | |
# Apply adaptive thresholding for better binary image representation | |
thresh = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2) | |
# Sharpen the image to bring out more details in the numbers | |
kernel = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]]) | |
sharpened = cv2.filter2D(thresh, -1, kernel) | |
return sharpened | |
except Exception as e: | |
logging.error(f"Image preprocessing failed: {str(e)}") | |
return img_cv | |
def detect_roi(img_cv): | |
"""Detect the region of interest (ROI) containing the weight display.""" | |
try: | |
# Convert to grayscale for edge detection | |
gray = cv2.cvtColor(img_cv, cv2.COLOR_BGR2GRAY) | |
# Apply edge detection | |
edges = cv2.Canny(gray, 50, 150) | |
# Find contours | |
contours, _ = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) | |
if not contours: | |
logging.warning("No contours detected for ROI") | |
return img_cv # Return full image if no contours found | |
# Find the largest contour (assuming it’s the display) | |
largest_contour = max(contours, key=cv2.contourArea) | |
x, y, w, h = cv2.boundingRect(largest_contour) | |
# Add padding to the detected region to ensure weight is fully captured | |
padding = 10 | |
x = max(0, x - padding) | |
y = max(0, y - padding) | |
w = min(img_cv.shape[1] - x, w + 2 * padding) | |
h = min(img_cv.shape[0] - y, h + 2 * padding) | |
roi = img_cv[y:y+h, x:x+w] | |
logging.info(f"ROI detected at ({x}, {y}, {w}, {h})") | |
return roi | |
except Exception as e: | |
logging.error(f"ROI detection failed: {str(e)}") | |
return img_cv | |
def extract_weight(img): | |
"""Extract weight from image using Tesseract OCR with improved configuration.""" | |
try: | |
if img is None: | |
logging.error("No image provided for OCR") | |
return "Not detected", 0.0 | |
# Convert PIL image to OpenCV format | |
img_cv = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR) | |
# Detect ROI | |
roi_img = detect_roi(img_cv) | |
# Preprocess the ROI | |
processed_img = preprocess_image(roi_img) | |
# OCR configuration for digit extraction | |
custom_config = r'--oem 3 --psm 6 -c tessedit_char_whitelist=0123456789.' | |
# Run OCR | |
text = pytesseract.image_to_string(processed_img, config=custom_config) | |
logging.info(f"OCR result: '{text}'") | |
# Extract valid weight from OCR result | |
weight = ''.join(filter(lambda x: x in '0123456789.', text.strip())) | |
if weight: | |
try: | |
weight_float = float(weight) | |
if weight_float >= 0: # Only accept valid weights | |
confidence = 95.0 # Assume high confidence if we have a valid weight | |
logging.info(f"Weight detected: {weight} (Confidence: {confidence:.2f}%)") | |
return weight, confidence | |
except ValueError: | |
logging.warning(f"Invalid number format: {weight}") | |
logging.error("OCR failed to detect a valid weight") | |
return "Not detected", 0.0 | |
except Exception as e: | |
logging.error(f"OCR processing failed: {str(e)}") | |
return "Not detected", 0.0 | |
def process_image(img): | |
"""Process uploaded or captured image and extract weight.""" | |
if img is None: | |
logging.error("No image provided") | |
return "No image uploaded", None, None, None, gr.update(visible=False), gr.update(visible=False) | |
ist_time = datetime.now(pytz.timezone("Asia/Kolkata")).strftime("%d-%m-%Y %I:%M:%S %p") | |
weight, confidence = extract_weight(img) | |
if weight == "Not detected" or confidence < 95.0: | |
logging.warning(f"Weight detection failed: {weight} (Confidence: {confidence:.2f}%)") | |
return f"{weight} (Confidence: {confidence:.2f}%)", ist_time, None, gr.update(visible=True), gr.update(visible=False) | |
return f"{weight} kg (Confidence: {confidence:.2f}%)", ist_time, None, gr.update(visible=True), gr.update(visible=True) | |
# Gradio Interface | |
with gr.Blocks(title="⚖️ Auto Weight Logger") as demo: | |
gr.Markdown("## ⚖️ Auto Weight Logger") | |
gr.Markdown("📷 Upload or capture an image of a digital weight scale (max 5MB).") | |
with gr.Row(): | |
image_input = gr.Image(type="pil", label="Upload / Capture Image", sources=["upload", "webcam"]) | |
output_weight = gr.Textbox(label="⚖️ Detected Weight (in kg)") | |
with gr.Row(): | |
timestamp = gr.Textbox(label="🕒 Captured At (IST)") | |
snapshot = gr.Image(label="📸 Snapshot Image") | |
submit = gr.Button("🔍 Detect Weight") | |
submit.click( | |
fn=process_image, | |
inputs=image_input, | |
outputs=[output_weight, timestamp, snapshot] | |
) | |
gr.Markdown(""" | |
### Instructions | |
- Upload a clear, well-lit image of a digital weight scale display (7-segment font preferred). | |
- Ensure the image is < 5MB (automatically resized if larger). | |
- Review the detected weight and try again if it's incorrect. | |
""") | |
if __name__ == "__main__": | |
demo.launch() | |