burhan112 commited on
Commit
5574a92
·
verified ·
1 Parent(s): ed8c0cb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -71
app.py CHANGED
@@ -44,38 +44,7 @@ def retrieve_docs(query, k=5):
44
  retrieved_docs['distance'] = distances[0]
45
  return retrieved_docs
46
 
47
- # Parse response into structured sections
48
- def parse_response(response_text):
49
- sections = {
50
- "Symptoms": [],
51
- "Signs": [],
52
- "Risk Factors": [],
53
- "Diagnostic Criteria": [],
54
- "Other": []
55
- }
56
-
57
- # Simple regex-based parsing (adjust based on your Gemini output format)
58
- lines = response_text.split('\n')
59
- current_section = "Other"
60
-
61
- for line in lines:
62
- line = line.strip()
63
- if line.lower().startswith("symptoms:"):
64
- current_section = "Symptoms"
65
- elif line.lower().startswith("signs:"):
66
- current_section = "Signs"
67
- elif line.lower().startswith("risk factors") or line.lower().startswith("past medical history:"):
68
- current_section = "Risk Factors"
69
- elif line.lower().startswith("diagnostic criteria:"):
70
- current_section = "Diagnostic Criteria"
71
- elif line and not line.startswith((' ', '\t')) and ':' in line:
72
- current_section = "Other"
73
- if line and not line.endswith(':'):
74
- sections[current_section].append(line)
75
-
76
- return sections
77
-
78
- # Respond function with generic HTML formatting
79
  def respond(message, system_message, max_tokens, temperature, top_p):
80
  # Preprocess the user message
81
  preprocessed_query = preprocess_text(message)
@@ -89,8 +58,7 @@ def respond(message, system_message, max_tokens, temperature, top_p):
89
  prompt += (
90
  f"Query: {message}\n"
91
  f"Relevant Context: {context}\n"
92
- f"Generate a concise response to the query based only on the provided context. "
93
- f"Structure the response with clear sections like 'Symptoms:', 'Signs:', 'Risk Factors:', and 'Diagnostic Criteria:' where applicable."
94
  )
95
 
96
  # Generate response with Gemini
@@ -102,44 +70,20 @@ def respond(message, system_message, max_tokens, temperature, top_p):
102
  )
103
  )
104
  answer = response.text.strip()
 
 
 
 
 
 
 
 
105
 
106
- # Parse the response into sections
107
- sections = parse_response(answer)
108
-
109
- # Format the response into HTML with CSS styling
110
- html_response = """
111
- <style>
112
- .diagnosis-container { font-family: Arial, sans-serif; line-height: 1.6; padding: 15px; max-width: 800px; margin: auto; }
113
- h2 { color: #2c3e50; font-size: 24px; margin-bottom: 15px; border-bottom: 2px solid #2980b9; padding-bottom: 5px; }
114
- h3 { color: #2980b9; font-size: 18px; margin-top: 15px; margin-bottom: 8px; }
115
- ul { margin: 0; padding-left: 25px; }
116
- li { margin-bottom: 6px; color: #34495e; }
117
- p { margin: 5px 0; color: #34495e; }
118
- </style>
119
- <div class="diagnosis-container">
120
- <h2>AI Response</h2>
121
- <p>Based on the provided context, here is the information relevant to your query:</p>
122
- """
123
-
124
- # Add sections dynamically
125
- for section, items in sections.items():
126
- if items: # Only include sections that have content
127
- html_response += f"<h3>{section}</h3>"
128
- html_response += "<ul>"
129
- for item in items:
130
- # Remove section prefix if present (e.g., "Symptoms:" from the first line)
131
- cleaned_item = re.sub(rf"^{section}:", "", item, flags=re.IGNORECASE).strip()
132
- html_response += f"<li>{cleaned_item}</li>"
133
- html_response += "</ul>"
134
-
135
- html_response += "</div>"
136
- return html_response
137
-
138
- # Simple Gradio Interface with HTML output
139
  demo = gr.Interface(
140
  fn=respond,
141
  inputs=[
142
- gr.Textbox(label="Your Query", placeholder="Enter your medical question here (e.g., diabetes, heart failure)..."),
143
  gr.Textbox(
144
  value="You are a medical AI assistant diagnosing patients based on their query, using relevant context from past records of other patients.",
145
  label="System Message"
@@ -154,9 +98,9 @@ demo = gr.Interface(
154
  label="Top-p (nucleus sampling)", # Included but not used by Gemini
155
  ),
156
  ],
157
- outputs=gr.HTML(label="Diagnosis"),
158
- title="🏥 Medical Query Assistant",
159
- description="A medical assistant that diagnoses patient queries using AI and past records, with styled output for any condition."
160
  )
161
 
162
  if __name__ == "__main__":
 
44
  retrieved_docs['distance'] = distances[0]
45
  return retrieved_docs
46
 
47
+ # Simplified respond function (no history)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
  def respond(message, system_message, max_tokens, temperature, top_p):
49
  # Preprocess the user message
50
  preprocessed_query = preprocess_text(message)
 
58
  prompt += (
59
  f"Query: {message}\n"
60
  f"Relevant Context: {context}\n"
61
+ f"Generate a short, concise, and to-the-point response to the query based only on the provided context."
 
62
  )
63
 
64
  # Generate response with Gemini
 
70
  )
71
  )
72
  answer = response.text.strip()
73
+ if not answer.endswith('.'):
74
+ last_period = answer.rfind('.')
75
+ if last_period != -1:
76
+ answer = answer[:last_period + 1]
77
+ else:
78
+ answer += "."
79
+
80
+ return answer
81
 
82
+ # Simple Gradio Interface
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
  demo = gr.Interface(
84
  fn=respond,
85
  inputs=[
86
+ gr.Textbox(label="Your Query", placeholder="Enter your medical question here..."),
87
  gr.Textbox(
88
  value="You are a medical AI assistant diagnosing patients based on their query, using relevant context from past records of other patients.",
89
  label="System Message"
 
98
  label="Top-p (nucleus sampling)", # Included but not used by Gemini
99
  ),
100
  ],
101
+ outputs=gr.Textbox(label="Diagnosis"),
102
+ title="🏥 Medical Assistant",
103
+ description="A simple medical assistant that diagnoses patient queries using AI and past records."
104
  )
105
 
106
  if __name__ == "__main__":