Ali2206 commited on
Commit
91fbd4d
·
verified ·
1 Parent(s): 26668b6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -6
app.py CHANGED
@@ -53,7 +53,6 @@ def extract_priority_pages(file_path: str) -> str:
53
  with pdfplumber.open(file_path) as pdf:
54
  for i, page in enumerate(pdf.pages):
55
  page_text = page.extract_text() or ""
56
- # Include first 3 pages or pages with medical keywords
57
  if i < 3 or any(re.search(rf'\b{kw}\b', page_text.lower()) for kw in MEDICAL_KEYWORDS):
58
  text_chunks.append(f"=== Page {i+1} ===\n{page_text.strip()}")
59
  return "\n\n".join(text_chunks)
@@ -181,7 +180,7 @@ Analyze the medical records for clinical oversights. Provide a concise, evidence
181
  4. **Urgent Follow-up**:
182
  - Flag abnormal lab results, imaging, behaviors, or legal history needing immediate reassessment or referral.
183
 
184
- Medical Records (Chunk {0}):
185
  {{chunk}}
186
 
187
  Begin analysis:
@@ -191,9 +190,13 @@ Begin analysis:
191
  if history and history[-1]["content"].startswith("⏳"):
192
  history.pop()
193
 
194
- # Process each chunk sequentially
195
  for chunk_idx, chunk in enumerate(chunks, 1):
196
- prompt = prompt_template.format(chunk_idx, chunk=chunk)
 
 
 
 
197
  chunk_response = ""
198
  for chunk_output in agent.run_gradio_chat(
199
  message=prompt,
@@ -212,15 +215,29 @@ Begin analysis:
212
  cleaned = clean_response(m.content)
213
  if cleaned:
214
  chunk_response += cleaned + "\n"
 
 
 
 
 
 
215
  elif isinstance(chunk_output, str) and chunk_output.strip():
216
  cleaned = clean_response(chunk_output)
217
  if cleaned:
218
  chunk_response += cleaned + "\n"
 
 
 
 
 
 
 
 
219
  combined_response += f"--- Analysis for Chunk {chunk_idx} ---\n{chunk_response}\n"
220
 
221
- # Update history with combined response
222
  if combined_response:
223
- history.append({"role": "assistant", "content": combined_response.strip()})
224
  else:
225
  history.append({"role": "assistant", "content": "No oversights identified."})
226
 
 
53
  with pdfplumber.open(file_path) as pdf:
54
  for i, page in enumerate(pdf.pages):
55
  page_text = page.extract_text() or ""
 
56
  if i < 3 or any(re.search(rf'\b{kw}\b', page_text.lower()) for kw in MEDICAL_KEYWORDS):
57
  text_chunks.append(f"=== Page {i+1} ===\n{page_text.strip()}")
58
  return "\n\n".join(text_chunks)
 
180
  4. **Urgent Follow-up**:
181
  - Flag abnormal lab results, imaging, behaviors, or legal history needing immediate reassessment or referral.
182
 
183
+ Medical Records (Chunk {0} of {1}):
184
  {{chunk}}
185
 
186
  Begin analysis:
 
190
  if history and history[-1]["content"].startswith("⏳"):
191
  history.pop()
192
 
193
+ # Process each chunk and stream results in real-time
194
  for chunk_idx, chunk in enumerate(chunks, 1):
195
+ # Update UI with progress
196
+ history.append({"role": "assistant", "content": f"🔄 Processing Chunk {chunk_idx} of {len(chunks)}..."})
197
+ yield history, None
198
+
199
+ prompt = prompt_template.format(chunk_idx, len(chunks), chunk=chunk)
200
  chunk_response = ""
201
  for chunk_output in agent.run_gradio_chat(
202
  message=prompt,
 
215
  cleaned = clean_response(m.content)
216
  if cleaned:
217
  chunk_response += cleaned + "\n"
218
+ # Update UI with partial response
219
+ if history[-1]["content"].startswith("🔄"):
220
+ history[-1] = {"role": "assistant", "content": f"--- Analysis for Chunk {chunk_idx} ---\n{chunk_response.strip()}"}
221
+ else:
222
+ history[-1]["content"] = f"--- Analysis for Chunk {chunk_idx} ---\n{chunk_response.strip()}"
223
+ yield history, None
224
  elif isinstance(chunk_output, str) and chunk_output.strip():
225
  cleaned = clean_response(chunk_output)
226
  if cleaned:
227
  chunk_response += cleaned + "\n"
228
+ # Update UI with partial response
229
+ if history[-1]["content"].startswith("🔄"):
230
+ history[-1] = {"role": "assistant", "content": f"--- Analysis for Chunk {chunk_idx} ---\n{chunk_response.strip()}"}
231
+ else:
232
+ history[-1]["content"] = f"--- Analysis for Chunk {chunk_idx} ---\n{chunk_response.strip()}"
233
+ yield history, None
234
+
235
+ # Append completed chunk response to combined response
236
  combined_response += f"--- Analysis for Chunk {chunk_idx} ---\n{chunk_response}\n"
237
 
238
+ # Finalize UI with complete response
239
  if combined_response:
240
+ history[-1]["content"] = combined_response.strip()
241
  else:
242
  history.append({"role": "assistant", "content": "No oversights identified."})
243