yashbyname commited on
Commit
a149a7b
·
verified ·
1 Parent(s): ac8dfb0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +85 -34
app.py CHANGED
@@ -1,8 +1,8 @@
1
- import gradio as gr
2
  import numpy as np
3
  import tensorflow as tf
4
- from tensorflow import keras
5
  import tensorflow_hub as hub
 
6
  from PIL import Image
7
 
8
  # Load models
@@ -17,8 +17,43 @@ from PIL import Image
17
  #)
18
  #model_alzheimer = keras.models.load_model(
19
  # "models/model_alzheimer.h5", custom_objects={'KerasLayer': hub.KerasLayer}
20
- #)
21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  class CombinedDiseaseModel(tf.keras.Model):
23
  def __init__(self, model_initial, model_alzheimer, model_tumor, model_stroke):
24
  super(CombinedDiseaseModel, self).__init__()
@@ -60,37 +95,50 @@ class CombinedDiseaseModel(tf.keras.Model):
60
 
61
  return f"The MRI image shows {main_disease} with a probability of {main_disease_prob*100:.2f}%.\nThe subcategory of {main_disease} is {sub_category} with a probability of {sub_category_prob*100:.2f}%."
62
 
63
-
64
- # Initialize the combined model
65
- #cnn_model = CombinedDiseaseModel(
66
- # model_initial=model_initial,
67
- # model_alzheimer=model_alzheimer,
68
- # model_tumor=model_tumor,
69
- # model_stroke=model_stroke
70
- #)
71
-
72
 
73
  def process_image(image):
 
 
 
74
  image = image.resize((256, 256))
75
- image.convert("RGB")
76
  image_array = np.array(image) / 255.0
77
  image_array = np.expand_dims(image_array, axis=0)
78
- predictions = cnn_model(image_array)
 
79
  return predictions
80
 
81
-
82
- def gradio_interface(patient_info, query_type, image):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
  if image is not None:
84
  image_response = process_image(image)
85
- response = f"Patient Info: {patient_info}\nQuery Type: {query_type}\n{image_response}"
86
- return response
87
- else:
88
- return "Please upload an image."
89
-
90
- def gradio_interface(patient_info, query_type):
91
- return f"Patient Info: {patient_info}\nQuery Type: {query_type}"
92
-
93
- # Create Gradio app
 
 
94
  iface = gr.Interface(
95
  fn=gradio_interface,
96
  inputs=[
@@ -101,16 +149,19 @@ iface = gr.Interface(
101
  max_lines=10
102
  ),
103
  gr.Textbox(
104
- label="Query Type"
105
- )#,
106
- #gr.Image(
107
- # type="pil",
108
- # label="Upload an Image",
109
- #)
 
 
110
  ],
111
  outputs=gr.Textbox(label="Response", placeholder="The response will appear here..."),
112
- title="Medical Diagnosis with MRI",
113
- description="Upload MRI images and provide patient information for diagnosis.",
114
  )
115
 
116
- iface.launch()
 
 
1
+ import requests
2
  import numpy as np
3
  import tensorflow as tf
 
4
  import tensorflow_hub as hub
5
+ import gradio as gr
6
  from PIL import Image
7
 
8
  # Load models
 
17
  #)
18
  #model_alzheimer = keras.models.load_model(
19
  # "models/model_alzheimer.h5", custom_objects={'KerasLayer': hub.KerasLayer}
 
20
 
21
+ # API key and user ID for the new API
22
+ api_key = 'KGSjxB1uptfSk8I8A7ciCuNT9Xa3qWC3'
23
+ external_user_id = 'plugin-1717464304'
24
+
25
+ # Step 1: Create a chat session
26
+ def create_chat_session():
27
+ create_session_url = 'https://api.on-demand.io/chat/v1/sessions'
28
+ create_session_headers = {
29
+ 'apikey': api_key
30
+ }
31
+ create_session_body = {
32
+ "pluginIds": [],
33
+ "externalUserId": external_user_id
34
+ }
35
+ # Make the request to create a chat session
36
+ response = requests.post(create_session_url, headers=create_session_headers, json=create_session_body)
37
+ response_data = response.json()
38
+ session_id = response_data['data']['id']
39
+ return session_id
40
+
41
+ # Step 2: Submit a query to the API
42
+ def submit_query(session_id, query):
43
+ submit_query_url = f'https://api.on-demand.io/chat/v1/sessions/{session_id}/query'
44
+ submit_query_headers = {
45
+ 'apikey': api_key
46
+ }
47
+ submit_query_body = {
48
+ "endpointId": "predefined-openai-gpt4o",
49
+ "query": query,
50
+ "pluginIds": ["plugin-1712327325", "plugin-1713962163"],
51
+ "responseMode": "sync"
52
+ }
53
+ response = requests.post(submit_query_url, headers=submit_query_headers, json=submit_query_body)
54
+ return response.json()['data']['response']
55
+
56
+ # CNN Model for MRI Image Diagnosis (mockup since actual model code isn't available)
57
  class CombinedDiseaseModel(tf.keras.Model):
58
  def __init__(self, model_initial, model_alzheimer, model_tumor, model_stroke):
59
  super(CombinedDiseaseModel, self).__init__()
 
95
 
96
  return f"The MRI image shows {main_disease} with a probability of {main_disease_prob*100:.2f}%.\nThe subcategory of {main_disease} is {sub_category} with a probability of {sub_category_prob*100:.2f}%."
97
 
98
+ # Example CNN models (mockup)
99
+ # cnn_model = CombinedDiseaseModel(model_initial, model_alzheimer, model_tumor, model_stroke)
 
 
 
 
 
 
 
100
 
101
  def process_image(image):
102
+ """
103
+ Processes the uploaded MRI image and makes predictions using the combined CNN model.
104
+ """
105
  image = image.resize((256, 256))
106
+ image = image.convert("RGB")
107
  image_array = np.array(image) / 255.0
108
  image_array = np.expand_dims(image_array, axis=0)
109
+
110
+ predictions = cnn_model(image_array) # Call the model to get predictions (replace with actual model call)
111
  return predictions
112
 
113
+ def query_llm_via_on_demand(patient_info, query_type):
114
+ """
115
+ Sends patient information and query type to the on-demand API and returns the generated response.
116
+ """
117
+ session_id = create_chat_session()
118
+ query = f"Patient Information: {patient_info}\nQuery Type: {query_type}\nPlease provide additional insights."
119
+
120
+ response = submit_query(session_id, query)
121
+ return response
122
+
123
+ def gradio_interface(patient_info, query_type, image=None):
124
+ """
125
+ Gradio interface function that processes patient info, query type, and an optional MRI image,
126
+ and provides results from both the CNN model and the on-demand LLM API.
127
+ """
128
+ image_response = ""
129
  if image is not None:
130
  image_response = process_image(image)
131
+
132
+ llm_response = query_llm_via_on_demand(patient_info, query_type)
133
+
134
+ response = f"Patient Info: {patient_info}\nQuery Type: {query_type}\n\nLLM Response:\n{llm_response}"
135
+
136
+ if image_response:
137
+ response += f"\n\nImage Diagnosis:\n{image_response}"
138
+
139
+ return response
140
+
141
+ # Create Gradio interface with MRI image upload option
142
  iface = gr.Interface(
143
  fn=gradio_interface,
144
  inputs=[
 
149
  max_lines=10
150
  ),
151
  gr.Textbox(
152
+ label="Query Type",
153
+ placeholder="Enter the query type here..."
154
+ ),
155
+ gr.Image(
156
+ type="pil",
157
+ label="Upload an MRI Image",
158
+ optional=True # Allow the image input to be optional
159
+ )
160
  ],
161
  outputs=gr.Textbox(label="Response", placeholder="The response will appear here..."),
162
+ title="Medical Diagnosis with MRI and On-Demand LLM Insights",
163
+ description="Upload MRI images and provide patient information for diagnosis. The system integrates MRI diagnosis with insights from the on-demand LLM API."
164
  )
165
 
166
+ # Launch the Gradio app
167
+ iface.launch()