Spaces:
Runtime error
Runtime error
File size: 6,379 Bytes
0983def eb3d3f0 9905bc8 420f765 eb3d3f0 420f765 eb3d3f0 0983def eb3d3f0 420f765 9905bc8 39eef5a 71a8976 39eef5a 9905bc8 420f765 9905bc8 360e696 9905bc8 420f765 edb3de6 f04f718 46d6884 f04f718 420f765 9905bc8 31ad924 9905bc8 420f765 7e2c1f5 accfefd 7e2c1f5 accfefd 7e2c1f5 accfefd 420f765 9e64c66 9905bc8 9e64c66 19e69ba 420f765 9161fde 420f765 9161fde 420f765 f04f718 420f765 19e69ba 420f765 19e69ba 420f765 19e69ba 420f765 19e69ba 9161fde 19e69ba bdf9b37 f8f4a14 10c366b f8f4a14 bdf9b37 19e69ba 9161fde f8f4a14 10c366b 420f765 10c366b f8f4a14 fcb6cb8 19e69ba |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 |
import gradio as gr
import cv2
import numpy as np
import mediapipe as mp
from sklearn.linear_model import LinearRegression
import random
import joblib
# Setup for Face Mesh detection
mp_face_mesh = mp.solutions.face_mesh
face_mesh = mp_face_mesh.FaceMesh(static_image_mode=True, max_num_faces=1, refine_landmarks=True, min_detection_confidence=0.5)
# Function to extract color features from the image
def extract_features(image, landmarks):
red_channel = image[:, :, 2]
green_channel = image[:, :, 1]
blue_channel = image[:, :, 0]
red_percent = 100 * np.mean(red_channel) / 255
green_percent = 100 * np.mean(green_channel) / 255
blue_percent = 100 * np.mean(blue_channel) / 255
return [red_percent, green_percent, blue_percent]
# Mock models training (for demonstration)
def train_model(output_range):
X = [[random.uniform(0.2, 0.5), random.uniform(0.05, 0.2), random.uniform(0.05, 0.2),
random.uniform(0.2, 0.5), random.uniform(0.2, 0.5), random.uniform(0.2, 0.5),
random.uniform(0.2, 0.5)] for _ in range(100)]
y = [random.uniform(*output_range) for _ in X]
model = LinearRegression().fit(X, y)
return model
# Load pre-trained models for Hemoglobin, SPO2, and Heart Rate
hemoglobin_model = joblib.load("hemoglobin_model_from_anemia_dataset.pkl")
spo2_model = joblib.load("spo2_model_simulated.pkl")
hr_model = joblib.load("heart_rate_model.pkl")
# Model dictionary setup for other tests
models = {
"Hemoglobin": hemoglobin_model,
"WBC Count": train_model((4.0, 11.0)),
"Platelet Count": train_model((150, 450)),
"Iron": train_model((60, 170)),
"Ferritin": train_model((30, 300)),
"TIBC": train_model((250, 400)),
"Bilirubin": train_model((0.3, 1.2)),
"Creatinine": train_model((0.6, 1.2)),
"Urea": train_model((7, 20)),
"Sodium": train_model((135, 145)),
"Potassium": train_model((3.5, 5.1)),
"TSH": train_model((0.4, 4.0)),
"Cortisol": train_model((5, 25)),
"FBS": train_model((70, 110)),
"HbA1c": train_model((4.0, 5.7)),
"Albumin": train_model((3.5, 5.5)),
"BP Systolic": train_model((90, 120)),
"BP Diastolic": train_model((60, 80)),
"Temperature": train_model((97, 99))
}
# Function to determine risk level
def get_risk_color(value, normal_range):
low, high = normal_range
if value < low:
return ("Low", "🔻", "#FFCCCC")
elif value > high:
return ("High", "🔺", "#FFE680")
else:
return ("Normal", "✅", "#CCFFCC")
# Function to build an HTML table for displaying test results
def build_table(title, rows):
html = (
f'<div style="margin-bottom: 24px;">'
f'<h4 style="margin: 8px 0;">{title}</h4>'
f'<table style="width:100%; border-collapse:collapse;">'
f'<thead><tr style="background:#f0f0f0;"><th style="padding:8px;border:1px solid #ccc;">Test</th><th style="padding:8px;border:1px solid #ccc;">Result</th><th style="padding:8px;border:1px solid #ccc;">Expected Range</th><th style="padding:8px;border:1px solid #ccc;">Level</th></tr></thead><tbody>'
)
for label, value, ref in rows:
level, icon, bg = get_risk_color(value, ref)
html += f'<tr style="background:{bg};"><td style="padding:6px;border:1px solid #ccc;">{label}</td><td style="padding:6px;border:1px solid #ccc;">{value:.2f}</td><td style="padding:6px;border:1px solid #ccc;">{ref[0]} – {ref[1]}</td><td style="padding:6px;border:1px solid #ccc;">{icon} {level}</td></tr>'
html += '</tbody></table></div>'
return html
# Analyzing video for health metrics
def analyze_video(video_path):
import matplotlib.pyplot as plt
cap = cv2.VideoCapture(video_path)
brightness_vals = []
green_vals = []
frame_sample = None
while True:
ret, frame = cap.read()
if not ret:
break
if frame_sample is None:
frame_sample = frame.copy()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
green = frame[:, :, 1]
brightness_vals.append(np.mean(gray))
green_vals.append(np.mean(green))
cap.release()
# Simulate heart rate and SPO2 estimation
brightness_std = np.std(brightness_vals) / 255
green_std = np.std(green_vals) / 255
tone_index = np.mean(frame_sample[100:150, 100:150]) / 255 if frame_sample[100:150, 100:150].size else 0.5
hr_features = [brightness_std, green_std, tone_index]
heart_rate = float(np.clip(hr_model.predict([hr_features])[0], 60, 100))
spo2_features = [heart_rate, np.std(brightness_vals), np.mean(frame_sample[100:150, 100:150])]
spo2 = spo2_model.predict([spo2_features])[0]
# Generating the health card with test results
html_output = "".join([
build_table("🩸 Hematology", [("Hemoglobin", models["Hemoglobin"].predict([hr_features])[0], (13.5, 17.5))]),
build_table("🧬 Iron Panel", [("Iron", models["Iron"].predict([hr_features])[0], (60, 170))]),
build_table("🧪 Electrolytes", [("Sodium", models["Sodium"].predict([hr_features])[0], (135, 145))]),
build_table("❤️ Vitals", [("Heart Rate", heart_rate, (60, 100)), ("SpO2", spo2, (95, 100))]),
])
return html_output
# Gradio Interface setup
with gr.Blocks() as demo:
gr.Markdown("""
# 🧠 Face-Based Lab Test AI Report (Video Mode)
Upload a short face video (10–30s) to infer health diagnostics using rPPG analysis.
""")
with gr.Row():
with gr.Column():
mode_selector = gr.Radio(label="Choose Input Mode", choices=["Image", "Video"], value="Image")
image_input = gr.Image(type="numpy", label="📸 Upload Face Image")
video_input = gr.Video(label="📽 Upload Face Video", sources=["upload", "webcam"])
submit_btn = gr.Button("🔍 Analyze")
with gr.Column():
result_html = gr.HTML(label="🧪 Health Report Table")
result_image = gr.Image(label="📷 Key Frame Snapshot")
def route_inputs(mode, image, video):
return analyze_video(video) if mode == "Video" else analyze_video(image)
submit_btn.click(fn=route_inputs, inputs=[mode_selector, image_input, video_input], outputs=[result_html, result_image])
gr.Markdown("""---
✅ Table Format • AI Prediction • rPPG-based HR • Dynamic Summary • Multilingual Support • CTA""")
demo.launch()
|