burhan112 commited on
Commit
a9d2dec
·
verified ·
1 Parent(s): 421c21c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -41
app.py CHANGED
@@ -9,7 +9,7 @@ import os
9
 
10
  # Load data and FAISS index
11
  def load_data_and_index():
12
- docs_df = pd.read_pickle("data.pkl") # Adjust path for HF Spaces
13
  embeddings = np.array(docs_df['embeddings'].tolist(), dtype=np.float32)
14
  dimension = embeddings.shape[1]
15
  index = faiss.IndexFlatL2(dimension)
@@ -44,7 +44,38 @@ def retrieve_docs(query, k=5):
44
  retrieved_docs['distance'] = distances[0]
45
  return retrieved_docs
46
 
47
- # Respond function with HTML formatting
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
  def respond(message, system_message, max_tokens, temperature, top_p):
49
  # Preprocess the user message
50
  preprocessed_query = preprocess_text(message)
@@ -58,7 +89,8 @@ def respond(message, system_message, max_tokens, temperature, top_p):
58
  prompt += (
59
  f"Query: {message}\n"
60
  f"Relevant Context: {context}\n"
61
- f"Generate a short, concise, and to-the-point response to the query based only on the provided context. Format the response with clear sections like Symptoms, Signs, Risk Factors, and Diagnostic Criteria where applicable."
 
62
  )
63
 
64
  # Generate response with Gemini
@@ -71,48 +103,34 @@ def respond(message, system_message, max_tokens, temperature, top_p):
71
  )
72
  answer = response.text.strip()
73
 
 
 
 
74
  # Format the response into HTML with CSS styling
75
  html_response = """
76
  <style>
77
- .diagnosis-container { font-family: Arial, sans-serif; line-height: 1.6; padding: 10px; }
78
- h2 { color: #2c3e50; font-size: 20px; margin-bottom: 10px; }
79
- h3 { color: #2980b9; font-size: 16px; margin-top: 15px; margin-bottom: 5px; }
80
- ul { margin: 0; padding-left: 20px; }
81
- li { margin-bottom: 5px; }
82
- p { margin: 5px 0; }
83
  </style>
84
  <div class="diagnosis-container">
85
- <h2>Diagnosis</h2>
 
86
  """
87
 
88
- # Parse the response and structure it (this is a simple example; adjust based on actual output)
89
- if "heart failure" in message.lower():
90
- html_response += """
91
- <p>Based on the provided context, the following information supports the query "heart failure":</p>
92
- <h3>Symptoms</h3>
93
- <ul>
94
- <li>Breathlessness (dyspnea on exertion, progressive SOB)</li>
95
- <li>Reduced exercise tolerance</li>
96
- <li>Ankle swelling (edema in legs)</li>
97
- </ul>
98
- <h3>Signs</h3>
99
- <ul>
100
- <li>Elevated jugular venous pressure (markedly elevated JVP)</li>
101
- </ul>
102
- <h3>Risk Factors/Past Medical History</h3>
103
- <ul>
104
- <li>Coronary artery disease (CAD s/p CABG)</li>
105
- <li>Arrhythmias (Paroxysmal atrial fibrillation)</li>
106
- <li>Hypertension</li>
107
- </ul>
108
- <h3>Diagnostic Criteria</h3>
109
- <ul>
110
- <li>Elevated BNP</li>
111
- </ul>
112
- """
113
- else:
114
- # Fallback for other queries
115
- html_response += f"<p>{answer}</p>"
116
 
117
  html_response += "</div>"
118
  return html_response
@@ -121,7 +139,7 @@ def respond(message, system_message, max_tokens, temperature, top_p):
121
  demo = gr.Interface(
122
  fn=respond,
123
  inputs=[
124
- gr.Textbox(label="Your Query", placeholder="Enter your medical question here (e.g., heart failure)..."),
125
  gr.Textbox(
126
  value="You are a medical AI assistant diagnosing patients based on their query, using relevant context from past records of other patients.",
127
  label="System Message"
@@ -137,8 +155,8 @@ demo = gr.Interface(
137
  ),
138
  ],
139
  outputs=gr.HTML(label="Diagnosis"),
140
- title="🏥 Medical Assistant",
141
- description="A simple medical assistant that diagnoses patient queries using AI and past records, with styled output."
142
  )
143
 
144
  if __name__ == "__main__":
 
9
 
10
  # Load data and FAISS index
11
  def load_data_and_index():
12
+ docs_df = pd.read_pickle("docs_with_embeddings (1).pkl") # Adjust path for HF Spaces
13
  embeddings = np.array(docs_df['embeddings'].tolist(), dtype=np.float32)
14
  dimension = embeddings.shape[1]
15
  index = faiss.IndexFlatL2(dimension)
 
44
  retrieved_docs['distance'] = distances[0]
45
  return retrieved_docs
46
 
47
+ # Parse response into structured sections
48
+ def parse_response(response_text):
49
+ sections = {
50
+ "Symptoms": [],
51
+ "Signs": [],
52
+ "Risk Factors": [],
53
+ "Diagnostic Criteria": [],
54
+ "Other": []
55
+ }
56
+
57
+ # Simple regex-based parsing (adjust based on your Gemini output format)
58
+ lines = response_text.split('\n')
59
+ current_section = "Other"
60
+
61
+ for line in lines:
62
+ line = line.strip()
63
+ if line.lower().startswith("symptoms:"):
64
+ current_section = "Symptoms"
65
+ elif line.lower().startswith("signs:"):
66
+ current_section = "Signs"
67
+ elif line.lower().startswith("risk factors") or line.lower().startswith("past medical history:"):
68
+ current_section = "Risk Factors"
69
+ elif line.lower().startswith("diagnostic criteria:"):
70
+ current_section = "Diagnostic Criteria"
71
+ elif line and not line.startswith((' ', '\t')) and ':' in line:
72
+ current_section = "Other"
73
+ if line and not line.endswith(':'):
74
+ sections[current_section].append(line)
75
+
76
+ return sections
77
+
78
+ # Respond function with generic HTML formatting
79
  def respond(message, system_message, max_tokens, temperature, top_p):
80
  # Preprocess the user message
81
  preprocessed_query = preprocess_text(message)
 
89
  prompt += (
90
  f"Query: {message}\n"
91
  f"Relevant Context: {context}\n"
92
+ f"Generate a concise response to the query based only on the provided context. "
93
+ f"Structure the response with clear sections like 'Symptoms:', 'Signs:', 'Risk Factors:', and 'Diagnostic Criteria:' where applicable."
94
  )
95
 
96
  # Generate response with Gemini
 
103
  )
104
  answer = response.text.strip()
105
 
106
+ # Parse the response into sections
107
+ sections = parse_response(answer)
108
+
109
  # Format the response into HTML with CSS styling
110
  html_response = """
111
  <style>
112
+ .diagnosis-container { font-family: Arial, sans-serif; line-height: 1.6; padding: 15px; max-width: 800px; margin: auto; }
113
+ h2 { color: #2c3e50; font-size: 24px; margin-bottom: 15px; border-bottom: 2px solid #2980b9; padding-bottom: 5px; }
114
+ h3 { color: #2980b9; font-size: 18px; margin-top: 15px; margin-bottom: 8px; }
115
+ ul { margin: 0; padding-left: 25px; }
116
+ li { margin-bottom: 6px; color: #34495e; }
117
+ p { margin: 5px 0; color: #34495e; }
118
  </style>
119
  <div class="diagnosis-container">
120
+ <h2>AI Response</h2>
121
+ <p>Based on the provided context, here is the information relevant to your query:</p>
122
  """
123
 
124
+ # Add sections dynamically
125
+ for section, items in sections.items():
126
+ if items: # Only include sections that have content
127
+ html_response += f"<h3>{section}</h3>"
128
+ html_response += "<ul>"
129
+ for item in items:
130
+ # Remove section prefix if present (e.g., "Symptoms:" from the first line)
131
+ cleaned_item = re.sub(rf"^{section}:", "", item, flags=re.IGNORECASE).strip()
132
+ html_response += f"<li>{cleaned_item}</li>"
133
+ html_response += "</ul>"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134
 
135
  html_response += "</div>"
136
  return html_response
 
139
  demo = gr.Interface(
140
  fn=respond,
141
  inputs=[
142
+ gr.Textbox(label="Your Query", placeholder="Enter your medical question here (e.g., diabetes, heart failure)..."),
143
  gr.Textbox(
144
  value="You are a medical AI assistant diagnosing patients based on their query, using relevant context from past records of other patients.",
145
  label="System Message"
 
155
  ),
156
  ],
157
  outputs=gr.HTML(label="Diagnosis"),
158
+ title="🏥 Medical Query Assistant",
159
+ description="A medical assistant that diagnoses patient queries using AI and past records, with styled output for any condition."
160
  )
161
 
162
  if __name__ == "__main__":