import os import sys import gradio as gr from multiprocessing import freeze_support import importlib import inspect import json from typing import Dict, List, Union # Fix path to include src sys.path.insert(0, os.path.join(os.path.dirname(__file__), "src")) # Reload TxAgent from txagent.py import txagent.txagent importlib.reload(txagent.txagent) from txagent.txagent import TxAgent # Debug info print(">>> TxAgent loaded from:", inspect.getfile(TxAgent)) print(">>> TxAgent has run_gradio_chat:", hasattr(TxAgent, "run_gradio_chat")) # Env vars current_dir = os.path.abspath(os.path.dirname(__file__)) os.environ["MKL_THREADING_LAYER"] = "GNU" os.environ["TOKENIZERS_PARALLELISM"] = "false" # Model config model_name = "mims-harvard/TxAgent-T1-Llama-3.1-8B" rag_model_name = "mims-harvard/ToolRAG-T1-GTE-Qwen2-1.5B" new_tool_files = { "new_tool": os.path.join(current_dir, "data", "new_tool.json") } # Sample questions question_examples = [ ["Given a patient with WHIM syndrome on prophylactic antibiotics, is it advisable to co-administer Xolremdi with fluconazole?"], ["What treatment options exist for HER2+ breast cancer resistant to trastuzumab?"], ["What are the drug interactions between warfarin and ciprofloxacin?"] ] # Custom CSS for elegant design custom_css = """ :root { --primary-color: #4f46e5; --secondary-color: #f9fafb; --accent-color: #e5e7eb; --text-color: #111827; --border-radius: 8px; } body { font-family: 'Inter', system-ui, -apple-system, sans-serif; } .dark body { --secondary-color: #1f2937; --text-color: #f9fafb; } .gradio-container { max-width: 900px !important; margin: 0 auto !important; } h1 { color: var(--primary-color) !important; font-weight: 600 !important; margin-bottom: 1rem !important; } .chatbot { min-height: 600px; border-radius: var(--border-radius) !important; box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1) !important; } .textbox { border-radius: var(--border-radius) !important; } .button-primary { background: var(--primary-color) !important; border-radius: var(--border-radius) !important; } .answer-panel { background: var(--secondary-color) !important; border-radius: var(--border-radius) !important; padding: 16px !important; margin-top: 8px !important; border: 1px solid var(--accent-color) !important; } .answer-title { font-weight: 600 !important; color: var(--primary-color) !important; margin-bottom: 8px !important; } .answer-content { white-space: pre-wrap; font-family: 'Roboto Mono', monospace; font-size: 0.9em; line-height: 1.5; } .settings-panel { background: var(--secondary-color) !important; border-radius: var(--border-radius) !important; padding: 16px !important; margin-bottom: 16px !important; border: 1px solid var(--accent-color) !important; } .settings-title { font-weight: 600 !important; margin-bottom: 12px !important; color: var(--text-color) !important; } .examples-panel { margin-top: 16px !important; } """ # Helper: format assistant responses in elegant panels def format_response(content: Union[str, Dict, List]) -> str: """Format the assistant's response in a structured, user-friendly way.""" if isinstance(content, (dict, list)): try: formatted = json.dumps(content, indent=2) except Exception: formatted = str(content) else: formatted = str(content) # Clean up common formatting issues formatted = formatted.replace("\\n", "\n").replace("\\t", "\t") return ( f"
" f"
Detailed Response
" f"
{formatted}
" f"
" ) # Helper: format tool calls in a structured way def format_tool_call(tool_name: str, parameters: Dict) -> str: """Format tool calls for display in the chat.""" return ( f"
" f"
Tool Used: {tool_name}
" f"
Parameters: {json.dumps(parameters, indent=2)}
" f"
" ) # === UI setup def create_ui(agent: TxAgent) -> gr.Blocks: """Create the Gradio UI with elegant design and organized responses.""" with gr.Blocks(css=custom_css, theme=gr.themes.Default()) as demo: # Header section gr.Markdown( """

Therapeutic Decision Support

Get evidence-based answers to your biomedical questions with step-by-step reasoning

""" ) # Settings panel with gr.Accordion("⚙️ Advanced Settings", open=False): with gr.Row(): temperature = gr.Slider(0, 1, value=0.3, label="Creativity", info="Higher values produce more creative outputs") max_new_tokens = gr.Slider(128, 4096, value=1024, label="Max Response Length", step=128) with gr.Row(): max_tokens = gr.Slider(128, 32000, value=8192, label="Context Window", step=1024) max_round = gr.Slider(1, 50, value=30, label="Max Reasoning Steps") multi_agent = gr.Checkbox(label="Enable Multi-agent Reasoning", value=False, info="Uses multiple specialized agents for complex questions") conversation_state = gr.State([]) # Chat interface chatbot = gr.Chatbot( label="Therapeutic Reasoning Chat", height=600, bubble_full_width=False, avatar_images=( "assets/user_avatar.png", # User avatar "assets/bot_avatar.png" # Bot avatar ) ) with gr.Row(): message_input = gr.Textbox( placeholder="Ask your biomedical question...", show_label=False, container=False, autofocus=True, lines=3, max_lines=6 ) send_button = gr.Button("Send", variant="primary", size="lg") # Examples section gr.Examples( examples=question_examples, inputs=message_input, label="💡 Example Questions", examples_per_page=3 ) # Disclaimer gr.Markdown( """
Disclaimer: This tool is for research purposes only and does not constitute medical advice. Always consult a healthcare professional for medical decisions.
""" ) # Main handler def handle_chat( message: str, history: List, temperature: float, max_new_tokens: int, max_tokens: int, multi_agent: bool, conversation: List, max_round: int ): generator = agent.run_gradio_chat( message=message, history=history, temperature=temperature, max_new_tokens=max_new_tokens, max_token=max_tokens, call_agent=multi_agent, conversation=conversation, max_round=max_round ) for update in generator: formatted = [] for m in update: role = m["role"] if isinstance(m, dict) else getattr(m, "role", "assistant") content = m["content"] if isinstance(m, dict) else getattr(m, "content", "") # Format different types of messages appropriately if role == "assistant": if "tool_name" in m: formatted.append({ "role": role, "content": format_tool_call(m["tool_name"], m.get("parameters", {})) }) else: formatted.append({ "role": role, "content": format_response(content) }) else: formatted.append({"role": role, "content": content}) yield formatted # Event handlers inputs = [message_input, chatbot, temperature, max_new_tokens, max_tokens, multi_agent, conversation_state, max_round] send_button.click(fn=handle_chat, inputs=inputs, outputs=chatbot) message_input.submit(fn=handle_chat, inputs=inputs, outputs=chatbot) return demo # === Entry point if __name__ == "__main__": freeze_support() try: # Initialize the agent agent = TxAgent( model_name=model_name, rag_model_name=rag_model_name, tool_files_dict=new_tool_files, force_finish=True, enable_checker=True, step_rag_num=10, seed=100, additional_default_tools=[] ) agent.init_model() if not hasattr(agent, "run_gradio_chat"): raise AttributeError("TxAgent missing run_gradio_chat") # Create and launch the UI demo = create_ui(agent) demo.launch( server_name="0.0.0.0", server_port=7860, show_error=True, favicon_path="assets/favicon.ico" ) except Exception as e: print(f"❌ Application failed to start: {e}") raise