import gradio as gr import cv2 import numpy as np import mediapipe as mp from sklearn.linear_model import LinearRegression import random import joblib # Setup for Face Mesh detection mp_face_mesh = mp.solutions.face_mesh face_mesh = mp_face_mesh.FaceMesh(static_image_mode=True, max_num_faces=1, refine_landmarks=True, min_detection_confidence=0.5) # Function to extract color features from the image def extract_features(image, landmarks): red_channel = image[:, :, 2] green_channel = image[:, :, 1] blue_channel = image[:, :, 0] red_percent = 100 * np.mean(red_channel) / 255 green_percent = 100 * np.mean(green_channel) / 255 blue_percent = 100 * np.mean(blue_channel) / 255 return [red_percent, green_percent, blue_percent] # Mock models training (for demonstration) def train_model(output_range): X = [[random.uniform(0.2, 0.5), random.uniform(0.05, 0.2), random.uniform(0.05, 0.2), random.uniform(0.2, 0.5), random.uniform(0.2, 0.5), random.uniform(0.2, 0.5), random.uniform(0.2, 0.5)] for _ in range(100)] y = [random.uniform(*output_range) for _ in X] model = LinearRegression().fit(X, y) return model # Load pre-trained models for Hemoglobin, SPO2, and Heart Rate hemoglobin_model = joblib.load("hemoglobin_model_from_anemia_dataset.pkl") spo2_model = joblib.load("spo2_model_simulated.pkl") hr_model = joblib.load("heart_rate_model.pkl") # Model dictionary setup for other tests models = { "Hemoglobin": hemoglobin_model, "WBC Count": train_model((4.0, 11.0)), "Platelet Count": train_model((150, 450)), "Iron": train_model((60, 170)), "Ferritin": train_model((30, 300)), "TIBC": train_model((250, 400)), "Bilirubin": train_model((0.3, 1.2)), "Creatinine": train_model((0.6, 1.2)), "Urea": train_model((7, 20)), "Sodium": train_model((135, 145)), "Potassium": train_model((3.5, 5.1)), "TSH": train_model((0.4, 4.0)), "Cortisol": train_model((5, 25)), "FBS": train_model((70, 110)), "HbA1c": train_model((4.0, 5.7)), "Albumin": train_model((3.5, 5.5)), "BP Systolic": train_model((90, 120)), "BP Diastolic": train_model((60, 80)), "Temperature": train_model((97, 99)) } # Function to determine risk level def get_risk_color(value, normal_range): low, high = normal_range if value < low: return ("Low", "๐Ÿ”ป", "#FFCCCC") elif value > high: return ("High", "๐Ÿ”บ", "#FFE680") else: return ("Normal", "โœ…", "#CCFFCC") # Function to build an HTML table for displaying test results def build_table(title, rows): html = ( f'
' f'

{title}

' f'' f'' ) for label, value, ref in rows: level, icon, bg = get_risk_color(value, ref) html += f'' html += '
TestResultExpected RangeLevel
{label}{value:.2f}{ref[0]} โ€“ {ref[1]}{icon} {level}
' return html # Analyzing video for health metrics def analyze_video(video): # If video is passed as a path, open it using OpenCV if isinstance(video, str): cap = cv2.VideoCapture(video) else: # If video is passed as a numpy array, treat it as an in-memory video cap = cv2.VideoCapture() cap.open(video) brightness_vals = [] green_vals = [] frame_sample = None while True: ret, frame = cap.read() if not ret: break if frame_sample is None: frame_sample = frame.copy() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) green = frame[:, :, 1] brightness_vals.append(np.mean(gray)) green_vals.append(np.mean(green)) cap.release() # Simulate heart rate and SPO2 estimation brightness_std = np.std(brightness_vals) / 255 green_std = np.std(green_vals) / 255 tone_index = np.mean(frame_sample[100:150, 100:150]) / 255 if frame_sample[100:150, 100:150].size else 0.5 hr_features = [brightness_std, green_std, tone_index] heart_rate = float(np.clip(hr_model.predict([hr_features])[0], 60, 100)) spo2_features = [heart_rate, np.std(brightness_vals), np.mean(frame_sample[100:150, 100:150])] spo2 = spo2_model.predict([spo2_features])[0] # Generating the health card with test results html_output = "".join([ build_table("๐Ÿฉธ Hematology", [("Hemoglobin", models["Hemoglobin"].predict([hr_features])[0], (13.5, 17.5))]), build_table("๐Ÿงฌ Iron Panel", [("Iron", models["Iron"].predict([hr_features])[0], (60, 170))]), build_table("๐Ÿงช Electrolytes", [("Sodium", models["Sodium"].predict([hr_features])[0], (135, 145))]), build_table("โค๏ธ Vitals", [("Heart Rate", heart_rate, (60, 100)), ("SpO2", spo2, (95, 100))]), ]) return html_output # Gradio Interface setup with gr.Blocks() as demo: gr.Markdown(""" # ๐Ÿง  Face-Based Lab Test AI Report (Video Mode) Upload a short face video (10โ€“30s) to infer health diagnostics using rPPG analysis. """) with gr.Row(): with gr.Column(): mode_selector = gr.Radio(label="Choose Input Mode", choices=["Image", "Video"], value="Image") image_input = gr.Image(type="numpy", label="๐Ÿ“ธ Upload Face Image") video_input = gr.Video(label="๐Ÿ“ฝ Upload Face Video", sources=["upload", "webcam"]) submit_btn = gr.Button("๐Ÿ” Analyze") with gr.Column(): result_html = gr.HTML(label="๐Ÿงช Health Report Table") result_image = gr.Image(label="๐Ÿ“ท Key Frame Snapshot") def route_inputs(mode, image, video): return analyze_video(video) if mode == "Video" else analyze_video(image) submit_btn.click(fn=route_inputs, inputs=[mode_selector, image_input, video_input], outputs=[result_html, result_image]) gr.Markdown("""--- โœ… Table Format โ€ข AI Prediction โ€ข rPPG-based HR โ€ข Dynamic Summary โ€ข Multilingual Support โ€ข CTA""") demo.launch()