AI-LAB / app.py
Rammohan0504's picture
Update app.py
6b23de6 verified
raw
history blame
6.57 kB
import gradio as gr
import cv2
import numpy as np
import mediapipe as mp
from sklearn.linear_model import LinearRegression
import random
import joblib
# Setup for Face Mesh detection
mp_face_mesh = mp.solutions.face_mesh
face_mesh = mp_face_mesh.FaceMesh(static_image_mode=True, max_num_faces=1, refine_landmarks=True, min_detection_confidence=0.5)
# Function to extract color features from the image
def extract_features(image, landmarks):
red_channel = image[:, :, 2]
green_channel = image[:, :, 1]
blue_channel = image[:, :, 0]
red_percent = 100 * np.mean(red_channel) / 255
green_percent = 100 * np.mean(green_channel) / 255
blue_percent = 100 * np.mean(blue_channel) / 255
return [red_percent, green_percent, blue_percent]
# Mock models training (for demonstration)
def train_model(output_range):
X = [[random.uniform(0.2, 0.5), random.uniform(0.05, 0.2), random.uniform(0.05, 0.2),
random.uniform(0.2, 0.5), random.uniform(0.2, 0.5), random.uniform(0.2, 0.5),
random.uniform(0.2, 0.5)] for _ in range(100)]
y = [random.uniform(*output_range) for _ in X]
model = LinearRegression().fit(X, y)
return model
# Load pre-trained models for Hemoglobin, SPO2, and Heart Rate
hemoglobin_model = joblib.load("hemoglobin_model_from_anemia_dataset.pkl")
spo2_model = joblib.load("spo2_model_simulated.pkl")
hr_model = joblib.load("heart_rate_model.pkl")
# Model dictionary setup for other tests
models = {
"Hemoglobin": hemoglobin_model,
"WBC Count": train_model((4.0, 11.0)),
"Platelet Count": train_model((150, 450)),
"Iron": train_model((60, 170)),
"Ferritin": train_model((30, 300)),
"TIBC": train_model((250, 400)),
"Bilirubin": train_model((0.3, 1.2)),
"Creatinine": train_model((0.6, 1.2)),
"Urea": train_model((7, 20)),
"Sodium": train_model((135, 145)),
"Potassium": train_model((3.5, 5.1)),
"TSH": train_model((0.4, 4.0)),
"Cortisol": train_model((5, 25)),
"FBS": train_model((70, 110)),
"HbA1c": train_model((4.0, 5.7)),
"Albumin": train_model((3.5, 5.5)),
"BP Systolic": train_model((90, 120)),
"BP Diastolic": train_model((60, 80)),
"Temperature": train_model((97, 99))
}
# Function to determine risk level
def get_risk_color(value, normal_range):
low, high = normal_range
if value < low:
return ("Low", "πŸ”»", "#FFCCCC")
elif value > high:
return ("High", "πŸ”Ί", "#FFE680")
else:
return ("Normal", "βœ…", "#CCFFCC")
# Function to build an HTML table for displaying test results
def build_table(title, rows):
html = (
f'<div style="margin-bottom: 24px;">'
f'<h4 style="margin: 8px 0;">{title}</h4>'
f'<table style="width:100%; border-collapse:collapse;">'
f'<thead><tr style="background:#f0f0f0;"><th style="padding:8px;border:1px solid #ccc;">Test</th><th style="padding:8px;border:1px solid #ccc;">Result</th><th style="padding:8px;border:1px solid #ccc;">Expected Range</th><th style="padding:8px;border:1px solid #ccc;">Level</th></tr></thead><tbody>'
)
for label, value, ref in rows:
level, icon, bg = get_risk_color(value, ref)
html += f'<tr style="background:{bg};"><td style="padding:6px;border:1px solid #ccc;">{label}</td><td style="padding:6px;border:1px solid #ccc;">{value:.2f}</td><td style="padding:6px;border:1px solid #ccc;">{ref[0]} – {ref[1]}</td><td style="padding:6px;border:1px solid #ccc;">{icon} {level}</td></tr>'
html += '</tbody></table></div>'
return html
# Analyzing video for health metrics
def analyze_video(video):
# If video is passed as a path, open it using OpenCV
if isinstance(video, str):
cap = cv2.VideoCapture(video)
else:
# If video is passed as a numpy array, treat it as an in-memory video
cap = cv2.VideoCapture()
cap.open(video)
brightness_vals = []
green_vals = []
frame_sample = None
while True:
ret, frame = cap.read()
if not ret:
break
if frame_sample is None:
frame_sample = frame.copy()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
green = frame[:, :, 1]
brightness_vals.append(np.mean(gray))
green_vals.append(np.mean(green))
cap.release()
# Simulate heart rate and SPO2 estimation
brightness_std = np.std(brightness_vals) / 255
green_std = np.std(green_vals) / 255
tone_index = np.mean(frame_sample[100:150, 100:150]) / 255 if frame_sample[100:150, 100:150].size else 0.5
hr_features = [brightness_std, green_std, tone_index]
heart_rate = float(np.clip(hr_model.predict([hr_features])[0], 60, 100))
spo2_features = [heart_rate, np.std(brightness_vals), np.mean(frame_sample[100:150, 100:150])]
spo2 = spo2_model.predict([spo2_features])[0]
# Generating the health card with test results
html_output = "".join([
build_table("🩸 Hematology", [("Hemoglobin", models["Hemoglobin"].predict([hr_features])[0], (13.5, 17.5))]),
build_table("🧬 Iron Panel", [("Iron", models["Iron"].predict([hr_features])[0], (60, 170))]),
build_table("πŸ§ͺ Electrolytes", [("Sodium", models["Sodium"].predict([hr_features])[0], (135, 145))]),
build_table("❀️ Vitals", [("Heart Rate", heart_rate, (60, 100)), ("SpO2", spo2, (95, 100))]),
])
return html_output
# Gradio Interface setup
with gr.Blocks() as demo:
gr.Markdown("""
# 🧠 Face-Based Lab Test AI Report (Video Mode)
Upload a short face video (10–30s) to infer health diagnostics using rPPG analysis.
""")
with gr.Row():
with gr.Column():
mode_selector = gr.Radio(label="Choose Input Mode", choices=["Image", "Video"], value="Image")
image_input = gr.Image(type="numpy", label="πŸ“Έ Upload Face Image")
video_input = gr.Video(label="πŸ“½ Upload Face Video", sources=["upload", "webcam"])
submit_btn = gr.Button("πŸ” Analyze")
with gr.Column():
result_html = gr.HTML(label="πŸ§ͺ Health Report Table")
result_image = gr.Image(label="πŸ“· Key Frame Snapshot")
def route_inputs(mode, image, video):
return analyze_video(video) if mode == "Video" else analyze_video(image)
submit_btn.click(fn=route_inputs, inputs=[mode_selector, image_input, video_input], outputs=[result_html, result_image])
gr.Markdown("""---
βœ… Table Format β€’ AI Prediction β€’ rPPG-based HR β€’ Dynamic Summary β€’ Multilingual Support β€’ CTA""")
demo.launch()