CPS-Test-Mobile / app.py
Ali2206's picture
Update app.py
f9a6a36 verified
raw
history blame
5.71 kB
import os
import sys
import gradio as gr
from multiprocessing import freeze_support
import importlib
import inspect
import json
# === Fix path to include src/txagent
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "src"))
# === Import and reload to ensure correct file
import txagent.txagent
importlib.reload(txagent.txagent)
from txagent.txagent import TxAgent
# === Debug print
print(">>> TxAgent loaded from:", inspect.getfile(TxAgent))
print(">>> TxAgent has run_gradio_chat:", hasattr(TxAgent, "run_gradio_chat"))
# === Environment
current_dir = os.path.abspath(os.path.dirname(__file__))
os.environ["MKL_THREADING_LAYER"] = "GNU"
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# === Model config
model_name = "mims-harvard/TxAgent-T1-Llama-3.1-8B"
rag_model_name = "mims-harvard/ToolRAG-T1-GTE-Qwen2-1.5B"
new_tool_files = {
"new_tool": os.path.join(current_dir, "data", "new_tool.json")
}
# === Example prompts
question_examples = [
["Given a patient with WHIM syndrome on prophylactic antibiotics, is it advisable to co-administer Xolremdi with fluconazole?"],
["What treatment options exist for HER2+ breast cancer resistant to trastuzumab?"]
]
# === Helper: extract tool name from content
def extract_tool_name_and_clean_content(message_obj):
import logging
logging.basicConfig(level=logging.INFO)
tool_name = "Tool Result"
content = ""
if isinstance(message_obj, dict):
role = message_obj.get("role", "assistant")
content = message_obj.get("content", "")
tool_calls = message_obj.get("tool_calls", None)
else:
role = getattr(message_obj, "role", "assistant")
content = getattr(message_obj, "content", "")
tool_calls = getattr(message_obj, "tool_calls", None)
# Try to extract tool name from `tool_calls`
if tool_calls:
try:
if isinstance(tool_calls, str):
import json
tool_calls = json.loads(tool_calls)
tool_name = tool_calls[0].get("name", "Tool Result")
logging.info(f"[extract_tool_name] Extracted from tool_calls: {tool_name}")
except Exception as e:
logging.warning(f"[extract_tool_name] Failed tool_calls parsing: {e}")
# Format clean output
if isinstance(content, (dict, list)):
formatted = json.dumps(content, indent=2)
else:
formatted = str(content)
return f"Tool: {tool_name}", formatted
# === Helper: formatted collapsible output
def format_collapsible(content, title="Answer"):
return (
f"<details style='border: 1px solid #ccc; padding: 8px; margin-top: 8px;'>"
f"<summary style='font-weight: bold;'>{title}</summary>"
f"<div style='margin-top: 8px; white-space: pre-wrap;'>{content}</div></details>"
)
# === UI creation
def create_ui(agent):
with gr.Blocks(theme=gr.themes.Soft()) as demo:
gr.Markdown("<h1 style='text-align: center;'>TxAgent: Therapeutic Reasoning</h1>")
gr.Markdown("Ask biomedical or therapeutic questions. Powered by step-by-step reasoning and tools.")
chatbot = gr.Chatbot(label="TxAgent", height=600, type="messages")
message_input = gr.Textbox(placeholder="Ask your biomedical question...", show_label=False)
send_button = gr.Button("Send", variant="primary")
conversation_state = gr.State([])
# === Core handler (streaming generator)
def handle_chat(message, history, conversation):
generator = agent.run_gradio_chat(
message=message,
history=history,
temperature=0.3,
max_new_tokens=1024,
max_token=8192,
call_agent=False,
conversation=conversation,
max_round=30
)
for update in generator:
formatted_messages = []
for m in update:
role = m["role"] if isinstance(m, dict) else getattr(m, "role", "assistant")
content = m["content"] if isinstance(m, dict) else getattr(m, "content", "")
if role == "assistant":
title, clean = extract_tool_name_and_clean_content(content)
content = format_collapsible(clean, title)
formatted_messages.append({"role": role, "content": content})
yield formatted_messages
# === Trigger handlers
inputs = [message_input, chatbot, conversation_state]
send_button.click(fn=handle_chat, inputs=inputs, outputs=chatbot)
message_input.submit(fn=handle_chat, inputs=inputs, outputs=chatbot)
gr.Examples(examples=question_examples, inputs=message_input)
gr.Markdown("**DISCLAIMER**: This demo is for research purposes only and does not provide medical advice.")
return demo
# === Startup
if __name__ == "__main__":
freeze_support()
try:
agent = TxAgent(
model_name=model_name,
rag_model_name=rag_model_name,
tool_files_dict=new_tool_files,
force_finish=True,
enable_checker=True,
step_rag_num=10,
seed=100,
additional_default_tools=[]
)
agent.init_model()
if not hasattr(agent, "run_gradio_chat"):
raise AttributeError("❌ TxAgent is missing `run_gradio_chat`.")
demo = create_ui(agent)
demo.queue().launch(
server_name="0.0.0.0",
server_port=7860,
show_error=True,
share=True
)
except Exception as e:
print(f"❌ App failed to start: {e}")
raise