Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -18,11 +18,11 @@ from PIL import Image
|
|
18 |
#model_alzheimer = keras.models.load_model(
|
19 |
# "models/model_alzheimer.h5", custom_objects={'KerasLayer': hub.KerasLayer}
|
20 |
|
21 |
-
# API key and user ID for
|
22 |
api_key = 'KGSjxB1uptfSk8I8A7ciCuNT9Xa3qWC3'
|
23 |
external_user_id = 'plugin-1717464304'
|
24 |
|
25 |
-
# Step 1: Create a chat session
|
26 |
def create_chat_session():
|
27 |
create_session_url = 'https://api.on-demand.io/chat/v1/sessions'
|
28 |
create_session_headers = {
|
@@ -32,13 +32,12 @@ def create_chat_session():
|
|
32 |
"pluginIds": [],
|
33 |
"externalUserId": external_user_id
|
34 |
}
|
35 |
-
# Make the request to create a chat session
|
36 |
response = requests.post(create_session_url, headers=create_session_headers, json=create_session_body)
|
37 |
response_data = response.json()
|
38 |
session_id = response_data['data']['id']
|
39 |
return session_id
|
40 |
|
41 |
-
# Step 2: Submit
|
42 |
def submit_query(session_id, query):
|
43 |
submit_query_url = f'https://api.on-demand.io/chat/v1/sessions/{session_id}/query'
|
44 |
submit_query_headers = {
|
@@ -51,9 +50,9 @@ def submit_query(session_id, query):
|
|
51 |
"responseMode": "sync"
|
52 |
}
|
53 |
response = requests.post(submit_query_url, headers=submit_query_headers, json=submit_query_body)
|
54 |
-
return response.json()
|
55 |
|
56 |
-
#
|
57 |
class CombinedDiseaseModel(tf.keras.Model):
|
58 |
def __init__(self, model_initial, model_alzheimer, model_tumor, model_stroke):
|
59 |
super(CombinedDiseaseModel, self).__init__()
|
@@ -62,7 +61,6 @@ class CombinedDiseaseModel(tf.keras.Model):
|
|
62 |
self.model_tumor = model_tumor
|
63 |
self.model_stroke = model_stroke
|
64 |
self.disease_labels = ["Alzheimer's", 'No Disease', 'Stroke', 'Tumor']
|
65 |
-
|
66 |
self.sub_models = {
|
67 |
"Alzheimer's": model_alzheimer,
|
68 |
'Tumor': model_tumor,
|
@@ -93,52 +91,30 @@ class CombinedDiseaseModel(tf.keras.Model):
|
|
93 |
|
94 |
sub_category = sub_category_label[sub_category]
|
95 |
|
96 |
-
return f"The MRI image shows {main_disease} with a probability of {main_disease_prob*100:.2f}%.\
|
97 |
-
|
98 |
-
# Example CNN models (mockup)
|
99 |
-
# cnn_model = CombinedDiseaseModel(model_initial, model_alzheimer, model_tumor, model_stroke)
|
100 |
|
|
|
101 |
def process_image(image):
|
102 |
-
"""
|
103 |
-
Processes the uploaded MRI image and makes predictions using the combined CNN model.
|
104 |
-
"""
|
105 |
image = image.resize((256, 256))
|
106 |
-
image
|
107 |
image_array = np.array(image) / 255.0
|
108 |
image_array = np.expand_dims(image_array, axis=0)
|
109 |
-
|
110 |
-
predictions = cnn_model(image_array)
|
111 |
-
return
|
112 |
-
|
113 |
-
def query_llm_via_on_demand(patient_info, query_type):
|
114 |
-
"""
|
115 |
-
Sends patient information and query type to the on-demand API and returns the generated response.
|
116 |
-
"""
|
117 |
-
session_id = create_chat_session()
|
118 |
-
query = f"Patient Information: {patient_info}\nQuery Type: {query_type}\nPlease provide additional insights."
|
119 |
-
|
120 |
-
response = submit_query(session_id, query)
|
121 |
-
return response
|
122 |
|
123 |
-
|
124 |
-
|
125 |
-
Gradio interface function that processes patient info, query type, and an optional MRI image,
|
126 |
-
and provides results from both the CNN model and the on-demand LLM API.
|
127 |
-
"""
|
128 |
-
image_response = ""
|
129 |
if image is not None:
|
130 |
image_response = process_image(image)
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
if image_response:
|
137 |
-
response += f"\n\nImage Diagnosis:\n{image_response}"
|
138 |
-
|
139 |
-
return response
|
140 |
|
141 |
-
#
|
142 |
iface = gr.Interface(
|
143 |
fn=gradio_interface,
|
144 |
inputs=[
|
@@ -150,18 +126,16 @@ iface = gr.Interface(
|
|
150 |
),
|
151 |
gr.Textbox(
|
152 |
label="Query Type",
|
153 |
-
placeholder="
|
154 |
),
|
155 |
gr.Image(
|
156 |
type="pil",
|
157 |
label="Upload an MRI Image",
|
158 |
-
optional=True # Allow the image input to be optional
|
159 |
)
|
160 |
],
|
161 |
outputs=gr.Textbox(label="Response", placeholder="The response will appear here..."),
|
162 |
-
title="Medical Diagnosis with MRI and
|
163 |
-
description="Upload MRI images and provide patient information for
|
164 |
)
|
165 |
|
166 |
-
# Launch the Gradio app
|
167 |
iface.launch()
|
|
|
18 |
#model_alzheimer = keras.models.load_model(
|
19 |
# "models/model_alzheimer.h5", custom_objects={'KerasLayer': hub.KerasLayer}
|
20 |
|
21 |
+
# API key and user ID for on-demand
|
22 |
api_key = 'KGSjxB1uptfSk8I8A7ciCuNT9Xa3qWC3'
|
23 |
external_user_id = 'plugin-1717464304'
|
24 |
|
25 |
+
# Step 1: Create a chat session with the API
|
26 |
def create_chat_session():
|
27 |
create_session_url = 'https://api.on-demand.io/chat/v1/sessions'
|
28 |
create_session_headers = {
|
|
|
32 |
"pluginIds": [],
|
33 |
"externalUserId": external_user_id
|
34 |
}
|
|
|
35 |
response = requests.post(create_session_url, headers=create_session_headers, json=create_session_body)
|
36 |
response_data = response.json()
|
37 |
session_id = response_data['data']['id']
|
38 |
return session_id
|
39 |
|
40 |
+
# Step 2: Submit query to the API
|
41 |
def submit_query(session_id, query):
|
42 |
submit_query_url = f'https://api.on-demand.io/chat/v1/sessions/{session_id}/query'
|
43 |
submit_query_headers = {
|
|
|
50 |
"responseMode": "sync"
|
51 |
}
|
52 |
response = requests.post(submit_query_url, headers=submit_query_headers, json=submit_query_body)
|
53 |
+
return response.json()
|
54 |
|
55 |
+
# Combined disease model (placeholder)
|
56 |
class CombinedDiseaseModel(tf.keras.Model):
|
57 |
def __init__(self, model_initial, model_alzheimer, model_tumor, model_stroke):
|
58 |
super(CombinedDiseaseModel, self).__init__()
|
|
|
61 |
self.model_tumor = model_tumor
|
62 |
self.model_stroke = model_stroke
|
63 |
self.disease_labels = ["Alzheimer's", 'No Disease', 'Stroke', 'Tumor']
|
|
|
64 |
self.sub_models = {
|
65 |
"Alzheimer's": model_alzheimer,
|
66 |
'Tumor': model_tumor,
|
|
|
91 |
|
92 |
sub_category = sub_category_label[sub_category]
|
93 |
|
94 |
+
return f"The MRI image shows {main_disease} with a probability of {main_disease_prob*100:.2f}%.\n" \
|
95 |
+
f"The subcategory of {main_disease} is {sub_category} with a probability of {sub_category_prob*100:.2f}%."
|
|
|
|
|
96 |
|
97 |
+
# Placeholder function to process images
|
98 |
def process_image(image):
|
|
|
|
|
|
|
99 |
image = image.resize((256, 256))
|
100 |
+
image.convert("RGB")
|
101 |
image_array = np.array(image) / 255.0
|
102 |
image_array = np.expand_dims(image_array, axis=0)
|
103 |
+
# Prediction logic here
|
104 |
+
# predictions = cnn_model(image_array)
|
105 |
+
return "Mock prediction: Disease identified with a probability of 85%."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
106 |
|
107 |
+
# Function to handle patient info, query, and image processing
|
108 |
+
def gradio_interface(patient_info, query_type, image):
|
|
|
|
|
|
|
|
|
109 |
if image is not None:
|
110 |
image_response = process_image(image)
|
111 |
+
session_id = create_chat_session()
|
112 |
+
llm_response = submit_query(session_id, f"Patient Info: {patient_info}. Query Type: {query_type}.")
|
113 |
+
return f"Patient Info: {patient_info}\nQuery Type: {query_type}\n\n{image_response}\n\nLLM Response:\n{llm_response['data']['message']}"
|
114 |
+
else:
|
115 |
+
return "Please upload an image."
|
|
|
|
|
|
|
|
|
116 |
|
117 |
+
# Gradio interface
|
118 |
iface = gr.Interface(
|
119 |
fn=gradio_interface,
|
120 |
inputs=[
|
|
|
126 |
),
|
127 |
gr.Textbox(
|
128 |
label="Query Type",
|
129 |
+
placeholder="Describe the type of diagnosis or information needed..."
|
130 |
),
|
131 |
gr.Image(
|
132 |
type="pil",
|
133 |
label="Upload an MRI Image",
|
|
|
134 |
)
|
135 |
],
|
136 |
outputs=gr.Textbox(label="Response", placeholder="The response will appear here..."),
|
137 |
+
title="Medical Diagnosis with MRI and LLM",
|
138 |
+
description="Upload MRI images and provide patient information for a combined CNN model and LLM analysis."
|
139 |
)
|
140 |
|
|
|
141 |
iface.launch()
|