Update ui/ui_core.py
Browse files- ui/ui_core.py +33 -87
ui/ui_core.py
CHANGED
|
@@ -16,31 +16,13 @@ from txagent.txagent import TxAgent
|
|
| 16 |
def sanitize_utf8(text: str) -> str:
|
| 17 |
return text.encode("utf-8", "ignore").decode("utf-8")
|
| 18 |
|
| 19 |
-
def clean_final_response(text: str) -> str:
|
| 20 |
-
cleaned = text.replace("[TOOL_CALLS]", "").strip()
|
| 21 |
-
responses = cleaned.split("[Final Analysis]")
|
| 22 |
-
|
| 23 |
-
if len(responses) <= 1:
|
| 24 |
-
return f"<div style='padding:1em;border:1px solid #ccc;border-radius:12px;color:#fff;background:#353F54;'><p>{cleaned}</p></div>"
|
| 25 |
-
|
| 26 |
-
panels = []
|
| 27 |
-
for i, section in enumerate(responses[1:], 1):
|
| 28 |
-
final = section.strip()
|
| 29 |
-
panels.append(
|
| 30 |
-
f"<div style='background:#2B2B2B;color:#E0E0E0;border-radius:12px;margin-bottom:1em;border:1px solid #888;'>"
|
| 31 |
-
f"<div style='font-size:1.1em;font-weight:bold;padding:0.75em;background:#3A3A3A;color:#fff;border-radius:12px 12px 0 0;'>🧠 Final Analysis #{i}</div>"
|
| 32 |
-
f"<div style='padding:1em;line-height:1.6;'>{final.replace(chr(10), '<br>')}</div>"
|
| 33 |
-
f"</div>"
|
| 34 |
-
)
|
| 35 |
-
return "".join(panels)
|
| 36 |
-
|
| 37 |
def file_hash(path):
|
| 38 |
with open(path, "rb") as f:
|
| 39 |
return hashlib.md5(f.read()).hexdigest()
|
| 40 |
|
| 41 |
def convert_file_to_json(file_path: str, file_type: str) -> str:
|
| 42 |
try:
|
| 43 |
-
cache_dir =
|
| 44 |
os.makedirs(cache_dir, exist_ok=True)
|
| 45 |
h = file_hash(file_path)
|
| 46 |
cache_path = os.path.join(cache_dir, f"{h}.json")
|
|
@@ -75,23 +57,6 @@ def convert_file_to_json(file_path: str, file_type: str) -> str:
|
|
| 75 |
except Exception as e:
|
| 76 |
return json.dumps({"error": f"Error reading {os.path.basename(file_path)}: {str(e)}"})
|
| 77 |
|
| 78 |
-
def chunk_text(text: str, max_tokens: int = 6000) -> List[str]:
|
| 79 |
-
chunks = []
|
| 80 |
-
words = text.split()
|
| 81 |
-
chunk = []
|
| 82 |
-
token_count = 0
|
| 83 |
-
for word in words:
|
| 84 |
-
token_count += len(word) // 4 + 1
|
| 85 |
-
if token_count > max_tokens:
|
| 86 |
-
chunks.append(" ".join(chunk))
|
| 87 |
-
chunk = [word]
|
| 88 |
-
token_count = len(word) // 4 + 1
|
| 89 |
-
else:
|
| 90 |
-
chunk.append(word)
|
| 91 |
-
if chunk:
|
| 92 |
-
chunks.append(" ".join(chunk))
|
| 93 |
-
return chunks
|
| 94 |
-
|
| 95 |
def create_ui(agent: TxAgent):
|
| 96 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
| 97 |
gr.Markdown("<h1 style='text-align: center;'>📋 CPS: Clinical Patient Support System</h1>")
|
|
@@ -107,15 +72,6 @@ def create_ui(agent: TxAgent):
|
|
| 107 |
conversation_state = gr.State([])
|
| 108 |
|
| 109 |
def handle_chat(message: str, history: list, conversation: list, uploaded_files: list, progress=gr.Progress()):
|
| 110 |
-
context = (
|
| 111 |
-
"You are an expert clinical AI assistant reviewing medical form or interview data. "
|
| 112 |
-
"Your job is to analyze this data and reason about any information or red flags that a human doctor might have overlooked. "
|
| 113 |
-
"Provide a **detailed and structured response**, including examples, supporting evidence from the form, and clinical rationale for why these items matter. "
|
| 114 |
-
"Ensure the output is informative and helpful for improving patient care. "
|
| 115 |
-
"Do not hallucinate. Base the response only on the provided form content. "
|
| 116 |
-
"End with a section labeled '[Final Analysis]' where you summarize key findings the doctor may have missed."
|
| 117 |
-
)
|
| 118 |
-
|
| 119 |
try:
|
| 120 |
history.append({"role": "user", "content": message})
|
| 121 |
history.append({"role": "assistant", "content": "⏳ Processing your request..."})
|
|
@@ -131,47 +87,37 @@ def create_ui(agent: TxAgent):
|
|
| 131 |
json_text = convert_file_to_json(path, ext)
|
| 132 |
extracted_text += sanitize_utf8(json_text) + "\n"
|
| 133 |
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
result += msg.content
|
| 166 |
-
return result if result.strip() else f"[Chunk {i+1}] ⚠️ No response received."
|
| 167 |
-
except Exception as err:
|
| 168 |
-
print(f"[Error in chunk {i+1}] {err}")
|
| 169 |
-
return f"[Chunk {i+1}] ❌ Failed to process due to error."
|
| 170 |
-
|
| 171 |
-
results = [process_chunk(i, chunk) for i, chunk in enumerate(chunks)]
|
| 172 |
-
full_response = "\n\n".join(results)
|
| 173 |
-
full_response = clean_final_response(full_response.strip())
|
| 174 |
-
history[-1] = {"role": "assistant", "content": full_response}
|
| 175 |
yield history
|
| 176 |
|
| 177 |
except Exception as chat_error:
|
|
@@ -189,4 +135,4 @@ def create_ui(agent: TxAgent):
|
|
| 189 |
["Is there anything abnormal in the attached blood work report?"]
|
| 190 |
], inputs=message_input)
|
| 191 |
|
| 192 |
-
return demo
|
|
|
|
| 16 |
def sanitize_utf8(text: str) -> str:
|
| 17 |
return text.encode("utf-8", "ignore").decode("utf-8")
|
| 18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
def file_hash(path):
|
| 20 |
with open(path, "rb") as f:
|
| 21 |
return hashlib.md5(f.read()).hexdigest()
|
| 22 |
|
| 23 |
def convert_file_to_json(file_path: str, file_type: str) -> str:
|
| 24 |
try:
|
| 25 |
+
cache_dir = "/data/cache"
|
| 26 |
os.makedirs(cache_dir, exist_ok=True)
|
| 27 |
h = file_hash(file_path)
|
| 28 |
cache_path = os.path.join(cache_dir, f"{h}.json")
|
|
|
|
| 57 |
except Exception as e:
|
| 58 |
return json.dumps({"error": f"Error reading {os.path.basename(file_path)}: {str(e)}"})
|
| 59 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 60 |
def create_ui(agent: TxAgent):
|
| 61 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
| 62 |
gr.Markdown("<h1 style='text-align: center;'>📋 CPS: Clinical Patient Support System</h1>")
|
|
|
|
| 72 |
conversation_state = gr.State([])
|
| 73 |
|
| 74 |
def handle_chat(message: str, history: list, conversation: list, uploaded_files: list, progress=gr.Progress()):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 75 |
try:
|
| 76 |
history.append({"role": "user", "content": message})
|
| 77 |
history.append({"role": "assistant", "content": "⏳ Processing your request..."})
|
|
|
|
| 87 |
json_text = convert_file_to_json(path, ext)
|
| 88 |
extracted_text += sanitize_utf8(json_text) + "\n"
|
| 89 |
|
| 90 |
+
# Only final chunk will be passed (no split or loop)
|
| 91 |
+
context = (
|
| 92 |
+
"You are an expert clinical AI assistant. Review this patient's history, medications, and notes, and ONLY provide a final answer summarizing what the doctor might have missed."
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
chunked_prompt = f"{context}\n\n--- Patient Record ---\n{extracted_text}\n\n[Final Analysis]"
|
| 96 |
+
|
| 97 |
+
generator = agent.run_gradio_chat(
|
| 98 |
+
message=chunked_prompt,
|
| 99 |
+
history=[],
|
| 100 |
+
temperature=0.3,
|
| 101 |
+
max_new_tokens=1024,
|
| 102 |
+
max_token=8192,
|
| 103 |
+
call_agent=False,
|
| 104 |
+
conversation=conversation,
|
| 105 |
+
uploaded_files=uploaded_files,
|
| 106 |
+
max_round=30
|
| 107 |
+
)
|
| 108 |
+
|
| 109 |
+
final_response = ""
|
| 110 |
+
for update in generator:
|
| 111 |
+
if update is None:
|
| 112 |
+
continue
|
| 113 |
+
if isinstance(update, str):
|
| 114 |
+
final_response += update
|
| 115 |
+
elif isinstance(update, list):
|
| 116 |
+
for msg in update:
|
| 117 |
+
if hasattr(msg, 'content'):
|
| 118 |
+
final_response += msg.content
|
| 119 |
+
|
| 120 |
+
history[-1] = {"role": "assistant", "content": final_response.strip() or "❌ No response."}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 121 |
yield history
|
| 122 |
|
| 123 |
except Exception as chat_error:
|
|
|
|
| 135 |
["Is there anything abnormal in the attached blood work report?"]
|
| 136 |
], inputs=message_input)
|
| 137 |
|
| 138 |
+
return demo
|