DHEIVER's picture
Update app.py
340e897 verified
raw
history blame
5.13 kB
import gradio as gr
import cv2
import numpy as np
from PIL import Image
from dataclasses import dataclass
from typing import Tuple, Dict, List
import torch
import torch.nn.functional as F
import torchvision.transforms as transforms
import logging
from datetime import datetime
# Configuração básica de logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
@dataclass
class IrisZone:
"""Classe para definir as características de uma zona da íris"""
name: str
inner_ratio: float
outer_ratio: float
color: Tuple[int, int, int]
angle_start: float = 0
angle_end: float = 360
class IrisAnalyzer:
"""Classe principal para análise da íris"""
def __init__(self):
self.zones = [
IrisZone("Zona Cerebral/Neural", 0.85, 1.0, (255, 0, 0)),
IrisZone("Zona Digestiva", 0.7, 0.85, (0, 255, 0)),
IrisZone("Zona Respiratória", 0.55, 0.7, (0, 0, 255)),
IrisZone("Zona Circulatória", 0.4, 0.55, (255, 255, 0)),
IrisZone("Zona Linfática", 0.25, 0.4, (255, 0, 255)),
IrisZone("Zona Endócrina", 0.15, 0.25, (0, 255, 255)),
IrisZone("Zona Pupilar", 0, 0.15, (128, 128, 128))
]
def detect_pupil(self, img: np.ndarray) -> Tuple[int, int, int]:
"""Detecta a pupila na imagem"""
try:
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
blur = cv2.GaussianBlur(gray, (5,5), 0)
_, thresh = cv2.threshold(blur, 30, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if not contours:
return None
# Encontrar o maior contorno circular
max_area = 0
best_contour = None
for contour in contours:
area = cv2.contourArea(contour)
if area > max_area:
max_area = area
best_contour = contour
if best_contour is None:
return None
(x, y), radius = cv2.minEnclosingCircle(best_contour)
return (int(x), int(y), int(radius))
except Exception as e:
logging.error(f"Erro na detecção da pupila: {str(e)}")
return None
def analyze_iris(self, img: np.ndarray) -> Tuple[np.ndarray, Dict]:
"""Análise principal da íris"""
try:
output_img = img.copy()
results = {}
pupil = self.detect_pupil(img)
if pupil is None:
return img, {"Erro": "Não foi possível detectar a pupila"}
x, y, pupil_radius = pupil
iris_radius = pupil_radius * 4
cv2.circle(output_img, (x, y), pupil_radius, (0, 0, 0), 2)
for zone in self.zones:
inner_r = int(iris_radius * zone.inner_ratio)
outer_r = int(iris_radius * zone.outer_ratio)
cv2.circle(output_img, (x, y), outer_r, zone.color, 2)
# Análise simplificada da zona
mask = np.zeros(img.shape[:2], dtype=np.uint8)
cv2.circle(mask, (x, y), outer_r, 255, -1)
cv2.circle(mask, (x, y), inner_r, 0, -1)
mean_color = cv2.mean(img, mask=mask)
results[zone.name] = f"Intensidade média: {mean_color[0]:.1f}"
# Adicionar rótulo
cv2.putText(output_img, zone.name,
(x - iris_radius, y + outer_r),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, zone.color, 1)
return output_img, results
except Exception as e:
logging.error(f"Erro na análise: {str(e)}")
return img, {"Erro": str(e)}
def process_image(img):
"""Função principal para processar imagem"""
if img is None:
return None, {"Erro": "Nenhuma imagem fornecida"}
analyzer = IrisAnalyzer()
return analyzer.analyze_iris(np.array(img))
# Interface Gradio
with gr.Blocks(theme=gr.themes.Soft()) as iface:
gr.Markdown("""
# 🔍 Analisador de Íris
### Análise de zonas da íris baseada na teoria de Jensen
""")
with gr.Row():
with gr.Column():
input_image = gr.Image(
label="Upload da imagem do olho",
type="numpy",
sources=["upload", "clipboard"]
)
analyze_btn = gr.Button("📸 Analisar", variant="primary")
with gr.Column():
output_image = gr.Image(label="Análise Visual")
results = gr.JSON(label="Resultados")
analyze_btn.click(
fn=process_image,
inputs=input_image,
outputs=[output_image, results]
)
if __name__ == "__main__":
iface.launch()