Update app.py
Browse files
app.py
CHANGED
@@ -131,7 +131,8 @@ def init_agent():
|
|
131 |
return agent
|
132 |
|
133 |
def process_final_report(agent, file, chatbot_state: List[Dict[str, str]]) -> Tuple[List[Dict[str, str]], Union[str, None]]:
|
134 |
-
|
|
|
135 |
messages = chatbot_state if chatbot_state else []
|
136 |
report_path = None
|
137 |
|
@@ -145,16 +146,13 @@ def process_final_report(agent, file, chatbot_state: List[Dict[str, str]]) -> Tu
|
|
145 |
|
146 |
extracted_text = extract_text_from_excel(file.name)
|
147 |
chunks = split_text_into_chunks(extracted_text, max_tokens=MAX_CHUNK_TOKENS)
|
148 |
-
chunk_responses = []
|
149 |
|
150 |
-
|
151 |
-
messages.append({"role": "assistant", "content": f"π Analyzing chunk {i+1}/{len(chunks)}..."})
|
152 |
prompt = build_prompt_from_text(chunk)
|
153 |
prompt_tokens = estimate_tokens(prompt)
|
154 |
-
|
155 |
if prompt_tokens > MAX_MODEL_TOKENS:
|
156 |
-
|
157 |
-
continue
|
158 |
|
159 |
response = ""
|
160 |
try:
|
@@ -176,19 +174,28 @@ def process_final_report(agent, file, chatbot_state: List[Dict[str, str]]) -> Tu
|
|
176 |
if hasattr(r, "content"):
|
177 |
response += r.content
|
178 |
except Exception as e:
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
186 |
messages.append({"role": "assistant", "content": "β No valid chunk responses to summarize."})
|
187 |
return messages, report_path
|
188 |
|
189 |
summary = ""
|
190 |
current_summary_tokens = 0
|
191 |
-
for i, response in enumerate(
|
192 |
response_tokens = estimate_tokens(response)
|
193 |
if current_summary_tokens + response_tokens > MAX_MODEL_TOKENS - PROMPT_OVERHEAD - MAX_NEW_TOKENS:
|
194 |
summary_prompt = f"Summarize the following analysis:\n\n{summary}\n\nProvide a concise summary."
|
@@ -261,8 +268,6 @@ def process_final_report(agent, file, chatbot_state: List[Dict[str, str]]) -> Tu
|
|
261 |
messages.append({"role": "assistant", "content": f"β Error processing file: {str(e)}"})
|
262 |
|
263 |
return messages, report_path
|
264 |
-
|
265 |
-
|
266 |
def create_ui(agent):
|
267 |
"""Create the Gradio UI for the patient history analysis tool."""
|
268 |
with gr.Blocks(
|
|
|
131 |
return agent
|
132 |
|
133 |
def process_final_report(agent, file, chatbot_state: List[Dict[str, str]]) -> Tuple[List[Dict[str, str]], Union[str, None]]:
|
134 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
135 |
+
|
136 |
messages = chatbot_state if chatbot_state else []
|
137 |
report_path = None
|
138 |
|
|
|
146 |
|
147 |
extracted_text = extract_text_from_excel(file.name)
|
148 |
chunks = split_text_into_chunks(extracted_text, max_tokens=MAX_CHUNK_TOKENS)
|
149 |
+
chunk_responses = [None] * len(chunks)
|
150 |
|
151 |
+
def analyze_chunk(index: int, chunk: str) -> Tuple[int, str]:
|
|
|
152 |
prompt = build_prompt_from_text(chunk)
|
153 |
prompt_tokens = estimate_tokens(prompt)
|
|
|
154 |
if prompt_tokens > MAX_MODEL_TOKENS:
|
155 |
+
return index, f"β Chunk {index+1} prompt too long ({prompt_tokens} tokens). Skipping..."
|
|
|
156 |
|
157 |
response = ""
|
158 |
try:
|
|
|
174 |
if hasattr(r, "content"):
|
175 |
response += r.content
|
176 |
except Exception as e:
|
177 |
+
return index, f"β Error analyzing chunk {index+1}: {str(e)}"
|
178 |
+
|
179 |
+
return index, clean_response(response)
|
180 |
+
|
181 |
+
with ThreadPoolExecutor(max_workers=4) as executor:
|
182 |
+
futures = [executor.submit(analyze_chunk, i, chunk) for i, chunk in enumerate(chunks)]
|
183 |
+
for future in as_completed(futures):
|
184 |
+
i, result = future.result()
|
185 |
+
chunk_responses[i] = result
|
186 |
+
if not result.startswith("β"):
|
187 |
+
messages.append({"role": "assistant", "content": f"β
Chunk {i+1} analysis complete"})
|
188 |
+
else:
|
189 |
+
messages.append({"role": "assistant", "content": result})
|
190 |
+
|
191 |
+
valid_responses = [res for res in chunk_responses if not res.startswith("β")]
|
192 |
+
if not valid_responses:
|
193 |
messages.append({"role": "assistant", "content": "β No valid chunk responses to summarize."})
|
194 |
return messages, report_path
|
195 |
|
196 |
summary = ""
|
197 |
current_summary_tokens = 0
|
198 |
+
for i, response in enumerate(valid_responses):
|
199 |
response_tokens = estimate_tokens(response)
|
200 |
if current_summary_tokens + response_tokens > MAX_MODEL_TOKENS - PROMPT_OVERHEAD - MAX_NEW_TOKENS:
|
201 |
summary_prompt = f"Summarize the following analysis:\n\n{summary}\n\nProvide a concise summary."
|
|
|
268 |
messages.append({"role": "assistant", "content": f"β Error processing file: {str(e)}"})
|
269 |
|
270 |
return messages, report_path
|
|
|
|
|
271 |
def create_ui(agent):
|
272 |
"""Create the Gradio UI for the patient history analysis tool."""
|
273 |
with gr.Blocks(
|