Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,3 +1,4 @@
|
|
|
|
1 |
import gradio as gr
|
2 |
import cv2
|
3 |
import numpy as np
|
@@ -6,10 +7,6 @@ from sklearn.linear_model import LinearRegression
|
|
6 |
import random
|
7 |
import base64
|
8 |
import joblib
|
9 |
-
import pandas as pd
|
10 |
-
from reportlab.lib.pagesizes import letter
|
11 |
-
from reportlab.pdfgen import canvas
|
12 |
-
from io import BytesIO
|
13 |
|
14 |
# Initialize the face mesh model
|
15 |
mp_face_mesh = mp.solutions.face_mesh
|
@@ -18,6 +15,7 @@ face_mesh = mp_face_mesh.FaceMesh(static_image_mode=True,
|
|
18 |
refine_landmarks=True,
|
19 |
min_detection_confidence=0.5)
|
20 |
|
|
|
21 |
# Functions for feature extraction
|
22 |
def extract_features(image, landmarks):
|
23 |
red_channel = image[:, :, 2]
|
@@ -52,7 +50,8 @@ try:
|
|
52 |
spo2_model = joblib.load("spo2_model_simulated.pkl")
|
53 |
hr_model = joblib.load("heart_rate_model.pkl")
|
54 |
except FileNotFoundError:
|
55 |
-
print(
|
|
|
56 |
exit(1)
|
57 |
|
58 |
models = {
|
@@ -196,23 +195,8 @@ def build_health_card(profile_image, test_results, summary, patient_name="", pat
|
|
196 |
return html
|
197 |
|
198 |
|
199 |
-
#
|
200 |
-
|
201 |
-
buffer = BytesIO()
|
202 |
-
c = canvas.Canvas(buffer, pagesize=letter)
|
203 |
-
|
204 |
-
# Adding basic content to PDF (you can modify this to match your layout)
|
205 |
-
text = c.beginText(40, 750)
|
206 |
-
text.setFont("Helvetica", 12)
|
207 |
-
text.textLines(html_content) # Add the content
|
208 |
-
|
209 |
-
c.drawText(text)
|
210 |
-
c.showPage()
|
211 |
-
c.save()
|
212 |
-
|
213 |
-
buffer.seek(0)
|
214 |
-
return buffer
|
215 |
-
|
216 |
|
217 |
# Modified analyze_face function
|
218 |
def analyze_face(input_data):
|
@@ -239,16 +223,12 @@ def analyze_face(input_data):
|
|
239 |
landmarks = result.multi_face_landmarks[
|
240 |
0].landmark # Fixed: Use integer index
|
241 |
features = extract_features(frame_rgb, landmarks)
|
242 |
-
|
243 |
-
# Convert features to pandas DataFrame with correct column names (matching the training phase)
|
244 |
-
features_df = pd.DataFrame([features], columns=["%red pixel", "%green pixel", "%blue pixel"]) # Ensure lowercase column names
|
245 |
-
|
246 |
test_values = {}
|
247 |
r2_scores = {}
|
248 |
|
249 |
for label in models:
|
250 |
if label == "Hemoglobin":
|
251 |
-
prediction = models[label].predict(
|
252 |
test_values[label] = prediction
|
253 |
r2_scores[label] = 0.385
|
254 |
else:
|
@@ -319,10 +299,7 @@ def analyze_face(input_data):
|
|
319 |
current_patient_details['gender'],
|
320 |
current_patient_details['id']
|
321 |
)
|
322 |
-
|
323 |
-
# Generate PDF from the HTML content
|
324 |
-
pdf_file = generate_pdf(health_card_html)
|
325 |
-
return health_card_html, pdf_file
|
326 |
|
327 |
|
328 |
# Modified route_inputs function
|
@@ -341,9 +318,8 @@ def route_inputs(mode, image, video, patient_name, patient_age, patient_gender,
|
|
341 |
'id': patient_id
|
342 |
}
|
343 |
|
344 |
-
health_card_html,
|
345 |
-
|
346 |
-
return health_card_html, pdf_file
|
347 |
|
348 |
|
349 |
# Create Gradio interface
|
@@ -368,11 +344,10 @@ with gr.Blocks() as demo:
|
|
368 |
with gr.Column():
|
369 |
result_html = gr.HTML(label="🧪 Health Report Table")
|
370 |
result_image = gr.Image(label="📷 Key Frame Snapshot")
|
371 |
-
|
372 |
-
|
373 |
submit_btn.click(fn=route_inputs,
|
374 |
inputs=[mode_selector, image_input, video_input, patient_name, patient_age, patient_gender, patient_id],
|
375 |
outputs=[result_html, result_image])
|
376 |
|
377 |
# Launch Gradio for Replit
|
378 |
-
demo.launch(server_name="0.0.0.0", server_port=7860)
|
|
|
1 |
+
|
2 |
import gradio as gr
|
3 |
import cv2
|
4 |
import numpy as np
|
|
|
7 |
import random
|
8 |
import base64
|
9 |
import joblib
|
|
|
|
|
|
|
|
|
10 |
|
11 |
# Initialize the face mesh model
|
12 |
mp_face_mesh = mp.solutions.face_mesh
|
|
|
15 |
refine_landmarks=True,
|
16 |
min_detection_confidence=0.5)
|
17 |
|
18 |
+
|
19 |
# Functions for feature extraction
|
20 |
def extract_features(image, landmarks):
|
21 |
red_channel = image[:, :, 2]
|
|
|
50 |
spo2_model = joblib.load("spo2_model_simulated.pkl")
|
51 |
hr_model = joblib.load("heart_rate_model.pkl")
|
52 |
except FileNotFoundError:
|
53 |
+
print(
|
54 |
+
"Error: One or more .pkl model files are missing. Please upload them.")
|
55 |
exit(1)
|
56 |
|
57 |
models = {
|
|
|
195 |
return html
|
196 |
|
197 |
|
198 |
+
# Initialize global variable for patient details
|
199 |
+
current_patient_details = {'name': '', 'age': '', 'gender': '', 'id': ''}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
200 |
|
201 |
# Modified analyze_face function
|
202 |
def analyze_face(input_data):
|
|
|
223 |
landmarks = result.multi_face_landmarks[
|
224 |
0].landmark # Fixed: Use integer index
|
225 |
features = extract_features(frame_rgb, landmarks)
|
|
|
|
|
|
|
|
|
226 |
test_values = {}
|
227 |
r2_scores = {}
|
228 |
|
229 |
for label in models:
|
230 |
if label == "Hemoglobin":
|
231 |
+
prediction = models[label].predict([features])[0]
|
232 |
test_values[label] = prediction
|
233 |
r2_scores[label] = 0.385
|
234 |
else:
|
|
|
299 |
current_patient_details['gender'],
|
300 |
current_patient_details['id']
|
301 |
)
|
302 |
+
return health_card_html, frame_rgb
|
|
|
|
|
|
|
303 |
|
304 |
|
305 |
# Modified route_inputs function
|
|
|
318 |
'id': patient_id
|
319 |
}
|
320 |
|
321 |
+
health_card_html, frame_rgb = analyze_face(image if mode == "Image" else video)
|
322 |
+
return health_card_html, frame_rgb
|
|
|
323 |
|
324 |
|
325 |
# Create Gradio interface
|
|
|
344 |
with gr.Column():
|
345 |
result_html = gr.HTML(label="🧪 Health Report Table")
|
346 |
result_image = gr.Image(label="📷 Key Frame Snapshot")
|
347 |
+
|
|
|
348 |
submit_btn.click(fn=route_inputs,
|
349 |
inputs=[mode_selector, image_input, video_input, patient_name, patient_age, patient_gender, patient_id],
|
350 |
outputs=[result_html, result_image])
|
351 |
|
352 |
# Launch Gradio for Replit
|
353 |
+
demo.launch(server_name="0.0.0.0", server_port=7860)
|