import gradio as gr
import cv2
import numpy as np
import mediapipe as mp
from sklearn.linear_model import LinearRegression
import random
import base64
import joblib
import pandas as pd
from reportlab.lib.pagesizes import letter
from reportlab.pdfgen import canvas
from io import BytesIO
# Initialize the face mesh model
mp_face_mesh = mp.solutions.face_mesh
face_mesh = mp_face_mesh.FaceMesh(static_image_mode=True,
max_num_faces=1,
refine_landmarks=True,
min_detection_confidence=0.5)
# Functions for feature extraction
def extract_features(image, landmarks):
red_channel = image[:, :, 2]
green_channel = image[:, :, 1]
blue_channel = image[:, :, 0]
red_percent = 100 * np.mean(red_channel) / 255
green_percent = 100 * np.mean(green_channel) / 255
blue_percent = 100 * np.mean(blue_channel) / 255
return [red_percent, green_percent, blue_percent]
def train_model(output_range):
X = [[
random.uniform(0.2, 0.5),
random.uniform(0.05, 0.2),
random.uniform(0.05, 0.2),
random.uniform(0.2, 0.5),
random.uniform(0.2, 0.5),
random.uniform(0.2, 0.5),
random.uniform(0.2, 0.5)
] for _ in range(100)]
y = [random.uniform(*output_range) for _ in X]
model = LinearRegression().fit(X, y)
return model
# Load models
try:
hemoglobin_model = joblib.load("hemoglobin_model_from_anemia_dataset.pkl")
spo2_model = joblib.load("spo2_model_simulated.pkl")
hr_model = joblib.load("heart_rate_model.pkl")
except FileNotFoundError:
print("Error: One or more .pkl model files are missing. Please upload them.")
exit(1)
models = {
"Hemoglobin": hemoglobin_model,
"WBC Count": train_model((4.0, 11.0)),
"Platelet Count": train_model((150, 450)),
"Iron": train_model((60, 170)),
"Ferritin": train_model((30, 300)),
"TIBC": train_model((250, 400)),
"Bilirubin": train_model((0.3, 1.2)),
"Creatinine": train_model((0.6, 1.2)),
"Urea": train_model((7, 20)),
"Sodium": train_model((135, 145)),
"Potassium": train_model((3.5, 5.1)),
"TSH": train_model((0.4, 4.0)),
"Cortisol": train_model((5, 25)),
"FBS": train_model((70, 110)),
"HbA1c": train_model((4.0, 5.7)),
"Albumin": train_model((3.5, 5.5)),
"BP Systolic": train_model((90, 120)),
"BP Diastolic": train_model((60, 80)),
"Temperature": train_model((97, 99))
}
# Helper function for risk level color coding
def get_risk_color(value, normal_range):
low, high = normal_range
if value < low:
return ("Low", "๐ป", "#fff3cd")
elif value > high:
return ("High", "๐บ", "#f8d7da")
else:
return ("Normal", "โ ", "#d4edda")
# Function to build table for test results
def build_table(title, rows):
html = (
f'
'
f'
'
f'
{title}
'
f'
'
f'
'
f'
Test
Result
Range
Level
'
)
for i, (label, value, ref) in enumerate(rows):
level, icon, bg = get_risk_color(value, ref)
row_bg = "#f8f9fa" if i % 2 == 0 else "white"
if level != "Normal":
row_bg = bg
# Format the value with appropriate units
if "Count" in label or "Platelet" in label:
value_str = f"{value:.0f}"
else:
value_str = f"{value:.2f}"
html += f'
{label}
{value_str}
{ref[0]} - {ref[1]}
{icon} {level}
'
html += '
'
return html
# Build health card layout
def build_health_card(profile_image, test_results, summary, patient_name="", patient_age="", patient_gender="", patient_id=""):
from datetime import datetime
current_date = datetime.now().strftime("%B %d, %Y")
html = f"""
HEALTH CARD
Report Date: {current_date}
{f'
Patient ID: {patient_id}
' if patient_id else ''}
{patient_name if patient_name else "Lab Test Results"}
{f"Age: {patient_age} | Gender: {patient_gender}" if patient_age and patient_gender else "AI-Generated Health Analysis"}
"""
return html
# Function to generate PDF from HTML content using reportlab
def generate_pdf(html_content):
buffer = BytesIO()
c = canvas.Canvas(buffer, pagesize=letter)
# Adding basic content to PDF (you can modify this to match your layout)
text = c.beginText(40, 750)
text.setFont("Helvetica", 12)
text.textLines(html_content) # Add the content
c.drawText(text)
c.showPage()
c.save()
buffer.seek(0)
return buffer
# Modified analyze_face function
def analyze_face(input_data):
if isinstance(input_data, str): # Video input (file path in Replit)
cap = cv2.VideoCapture(input_data)
if not cap.isOpened():
return "
โ ๏ธ Error: Could not open video.
", None
ret, frame = cap.read()
cap.release()
if not ret:
return "
โ ๏ธ Error: Could not read video frame.
", None
else: # Image input
frame = input_data
if frame is None:
return "
โ ๏ธ Error: No image provided.
", None
# Resize image to reduce processing time
frame = cv2.resize(frame, (640, 480)) # Adjust resolution for Replit
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Provide image dimensions to mediapipe to avoid NORM_RECT warning
result = face_mesh.process(frame_rgb)
if not result.multi_face_landmarks:
return "