|
import os |
|
import sys |
|
import gradio as gr |
|
from multiprocessing import freeze_support |
|
import importlib |
|
import inspect |
|
import json |
|
|
|
|
|
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "src")) |
|
|
|
|
|
import txagent.txagent |
|
importlib.reload(txagent.txagent) |
|
from txagent.txagent import TxAgent |
|
|
|
|
|
print(">>> TxAgent loaded from:", inspect.getfile(TxAgent)) |
|
print(">>> TxAgent has run_gradio_chat:", hasattr(TxAgent, "run_gradio_chat")) |
|
|
|
|
|
current_dir = os.path.abspath(os.path.dirname(__file__)) |
|
os.environ["MKL_THREADING_LAYER"] = "GNU" |
|
os.environ["TOKENIZERS_PARALLELISM"] = "false" |
|
|
|
|
|
model_name = "mims-harvard/TxAgent-T1-Llama-3.1-8B" |
|
rag_model_name = "mims-harvard/ToolRAG-T1-GTE-Qwen2-1.5B" |
|
new_tool_files = { |
|
"new_tool": os.path.join(current_dir, "data", "new_tool.json") |
|
} |
|
|
|
|
|
question_examples = [ |
|
["Given a patient with WHIM syndrome on prophylactic antibiotics, is it advisable to co-administer Xolremdi with fluconazole?"], |
|
["What treatment options exist for HER2+ breast cancer resistant to trastuzumab?"] |
|
] |
|
|
|
|
|
|
|
def format_collapsible(content, tool_name=None): |
|
|
|
if isinstance(content, str): |
|
try: |
|
content = json.loads(content) |
|
except Exception: |
|
pass |
|
|
|
if isinstance(content, dict) and "results" in content: |
|
readable = "" |
|
for i, result in enumerate(content["results"], 1): |
|
readable += f"\n🔹 **Result {i}:**\n" |
|
for key, value in result.items(): |
|
key_str = key.replace("openfda.", "").replace("_", " ").capitalize() |
|
val_str = ", ".join(value) if isinstance(value, list) else str(value) |
|
readable += f"- **{key_str}**: {val_str}\n" |
|
formatted = readable.strip() |
|
elif isinstance(content, (dict, list)): |
|
formatted = json.dumps(content, indent=2) |
|
else: |
|
formatted = str(content) |
|
|
|
title = f"{tool_name or 'Answer'}" |
|
return ( |
|
"<details style='border: 1px solid #aaa; border-radius: 8px; padding: 10px; margin: 12px 0; background-color: #f8f8f8;'>" |
|
f"<summary style='font-weight: bold; font-size: 16px; color: #333;'>{title}</summary>" |
|
f"<div style='white-space: pre-wrap; font-family: sans-serif; color: #222; padding-top: 6px;'>{formatted}</div>" |
|
"</details>" |
|
) |
|
|
|
|
|
def create_ui(agent): |
|
with gr.Blocks(css="body { background-color: #f5f5f5; font-family: sans-serif; }") as demo: |
|
gr.Markdown("<h1 style='text-align: center;'>TxAgent: Therapeutic Reasoning</h1>") |
|
gr.Markdown("<p style='text-align: center;'>Ask biomedical or therapeutic questions. Powered by step-by-step reasoning and tools.</p>") |
|
|
|
conversation_state = gr.State([]) |
|
|
|
chatbot = gr.Chatbot(label="TxAgent", height=600, type="messages") |
|
message_input = gr.Textbox(placeholder="Ask your biomedical question...", show_label=False) |
|
send_button = gr.Button("Send", variant="primary") |
|
|
|
|
|
def handle_chat(message, history, conversation): |
|
generator = agent.run_gradio_chat( |
|
message=message, |
|
history=history, |
|
temperature=0.3, |
|
max_new_tokens=1024, |
|
max_token=8192, |
|
call_agent=False, |
|
conversation=conversation, |
|
max_round=30 |
|
) |
|
|
|
for update in generator: |
|
formatted = [] |
|
for m in update: |
|
role = m["role"] if isinstance(m, dict) else getattr(m, "role", "assistant") |
|
content = m["content"] if isinstance(m, dict) else getattr(m, "content", "") |
|
tool_name = m.get("tool_name") if isinstance(m, dict) else getattr(m, "tool_name", None) |
|
|
|
if role == "assistant": |
|
content = format_collapsible(content, tool_name) |
|
|
|
formatted.append({"role": role, "content": content}) |
|
yield formatted |
|
|
|
inputs = [message_input, chatbot, conversation_state] |
|
send_button.click(fn=handle_chat, inputs=inputs, outputs=chatbot) |
|
message_input.submit(fn=handle_chat, inputs=inputs, outputs=chatbot) |
|
|
|
gr.Examples(examples=question_examples, inputs=message_input) |
|
gr.Markdown("<p style='font-size: 12px; text-align: center; color: gray;'>This demo is for research purposes only and does not provide medical advice.</p>") |
|
|
|
return demo |
|
|
|
|
|
if __name__ == "__main__": |
|
freeze_support() |
|
try: |
|
agent = TxAgent( |
|
model_name=model_name, |
|
rag_model_name=rag_model_name, |
|
tool_files_dict=new_tool_files, |
|
force_finish=True, |
|
enable_checker=True, |
|
step_rag_num=10, |
|
seed=100, |
|
additional_default_tools=[] |
|
) |
|
agent.init_model() |
|
|
|
if not hasattr(agent, "run_gradio_chat"): |
|
raise AttributeError("TxAgent missing run_gradio_chat") |
|
|
|
demo = create_ui(agent) |
|
demo.queue().launch(server_name="0.0.0.0", server_port=7860, share=True, show_error=True) |
|
|
|
except Exception as e: |
|
print(f"\u274c App failed to start: {e}") |
|
raise |