import os
import sys
import gradio as gr
from multiprocessing import freeze_support
import importlib
import inspect
import json
# Fix path to include src
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "src"))
# Reload TxAgent from txagent.py
import txagent.txagent
importlib.reload(txagent.txagent)
from txagent.txagent import TxAgent
# Debug info
print(">>> TxAgent loaded from:", inspect.getfile(TxAgent))
print(">>> TxAgent has run_gradio_chat:", hasattr(TxAgent, "run_gradio_chat"))
# Env vars
current_dir = os.path.abspath(os.path.dirname(__file__))
os.environ["MKL_THREADING_LAYER"] = "GNU"
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# Model config
model_name = "mims-harvard/TxAgent-T1-Llama-3.1-8B"
rag_model_name = "mims-harvard/ToolRAG-T1-GTE-Qwen2-1.5B"
new_tool_files = {
"new_tool": os.path.join(current_dir, "data", "new_tool.json")
}
# Sample questions
question_examples = [
["Given a patient with WHIM syndrome on prophylactic antibiotics, is it advisable to co-administer Xolremdi with fluconazole?"],
["What treatment options exist for HER2+ breast cancer resistant to trastuzumab?"]
]
# Helper: format assistant responses in collapsible panels
def format_collapsible(content, tool_name=None):
# Try parsing if it's a JSON string
if isinstance(content, str):
try:
content = json.loads(content)
except Exception:
pass
if isinstance(content, dict) and "results" in content:
readable = ""
for i, result in enumerate(content["results"], 1):
readable += f"\n🔹 **Result {i}:**\n"
for key, value in result.items():
key_str = key.replace("openfda.", "").replace("_", " ").capitalize()
val_str = ", ".join(value) if isinstance(value, list) else str(value)
readable += f"- **{key_str}**: {val_str}\n"
formatted = readable.strip()
elif isinstance(content, (dict, list)):
formatted = json.dumps(content, indent=2)
else:
formatted = str(content)
title = f"{tool_name or 'Answer'}"
return (
"{title}
"
f"
Ask biomedical or therapeutic questions. Powered by step-by-step reasoning and tools.
") conversation_state = gr.State([]) chatbot = gr.Chatbot(label="TxAgent", height=600, type="messages") message_input = gr.Textbox(placeholder="Ask your biomedical question...", show_label=False) send_button = gr.Button("Send", variant="primary") # Main handler def handle_chat(message, history, conversation): generator = agent.run_gradio_chat( message=message, history=history, temperature=0.3, max_new_tokens=1024, max_token=8192, call_agent=False, conversation=conversation, max_round=30 ) for update in generator: formatted = [] for m in update: role = m["role"] if isinstance(m, dict) else getattr(m, "role", "assistant") content = m["content"] if isinstance(m, dict) else getattr(m, "content", "") tool_name = m.get("tool_name") if isinstance(m, dict) else getattr(m, "tool_name", None) if role == "assistant": content = format_collapsible(content, tool_name) formatted.append({"role": role, "content": content}) yield formatted inputs = [message_input, chatbot, conversation_state] send_button.click(fn=handle_chat, inputs=inputs, outputs=chatbot) message_input.submit(fn=handle_chat, inputs=inputs, outputs=chatbot) gr.Examples(examples=question_examples, inputs=message_input) gr.Markdown("This demo is for research purposes only and does not provide medical advice.
") return demo # === Entry point if __name__ == "__main__": freeze_support() try: agent = TxAgent( model_name=model_name, rag_model_name=rag_model_name, tool_files_dict=new_tool_files, force_finish=True, enable_checker=True, step_rag_num=10, seed=100, additional_default_tools=[] ) agent.init_model() if not hasattr(agent, "run_gradio_chat"): raise AttributeError("TxAgent missing run_gradio_chat") demo = create_ui(agent) demo.queue().launch(server_name="0.0.0.0", server_port=7860, share=True, show_error=True) except Exception as e: print(f"\u274c App failed to start: {e}") raise