import os
import sys
import random
import gradio as gr
from datetime import datetime
# Add `src` directory to Python path
sys.path.append(os.path.join(os.path.dirname(__file__), "src"))
# Import your agent class from src/txagent/txagent.py
from txagent.txagent import TxAgent
# ==== Environment Setup ====
current_dir = os.path.dirname(os.path.abspath(__file__))
os.environ["MKL_THREADING_LAYER"] = "GNU"
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# ==== UI Content ====
DESCRIPTION = '''
TxAgent: An AI Agent for Therapeutic Reasoning Across a Universe of Tools
'''
INTRO = "Precision therapeutics require multimodal adaptive models..."
LICENSE = "DISCLAIMER: THIS WEBSITE DOES NOT PROVIDE MEDICAL ADVICE..."
PLACEHOLDER = '''
TxAgent
Click clear 🗑️ before asking a new question.
Click retry 🔄 to see another answer.
'''
css = """
h1 { text-align: center; }
#duplicate-button {
margin: auto;
color: white;
background: #1565c0;
border-radius: 100vh;
}
.gradio-accordion {
margin-top: 0px !important;
margin-bottom: 0px !important;
}
"""
chat_css = """
.gr-button { font-size: 20px !important; }
.gr-button svg { width: 32px !important; height: 32px !important; }
"""
# ==== Model Settings ====
model_name = "mims-harvard/TxAgent-T1-Llama-3.1-8B"
rag_model_name = "mims-harvard/ToolRAG-T1-GTE-Qwen2-1.5B"
new_tool_files = {
"new_tool": os.path.join(current_dir, "data", "new_tool.json")
}
question_examples = [
["Given a 50-year-old patient experiencing severe acute pain and considering the use of the newly approved medication, Journavx, how should the dosage be adjusted considering moderate hepatic impairment?"],
["A 30-year-old patient is on Prozac for depression and now diagnosed with WHIM syndrome. Is Xolremdi suitable?"]
]
# === Initialize the model ===
agent = TxAgent(
model_name,
rag_model_name,
tool_files_dict=new_tool_files,
force_finish=True,
enable_checker=True,
step_rag_num=10,
seed=100,
additional_default_tools=["DirectResponse", "RequireClarification"]
)
agent.init_model()
# === Gradio interface logic ===
def handle_chat(message, history, temperature, max_new_tokens, max_tokens, multi_agent, conversation, max_round):
return agent.run_gradio_chat(message, history, temperature, max_new_tokens, max_tokens, multi_agent, conversation, max_round)
def update_seed():
seed = random.randint(0, 10000)
return agent.update_parameters(seed=seed)
# ✅ FIXED: handle_retry with return, no yield
def handle_retry(history, retry_data, temperature, max_new_tokens, max_tokens, multi_agent, conversation, max_round):
update_seed()
new_history = history[:retry_data.index]
previous_prompt = history[retry_data.index]["content"]
result = agent.run_gradio_chat(
new_history + [{"role": "user", "content": previous_prompt}],
temperature, max_new_tokens, max_tokens, multi_agent, conversation, max_round
)
# If it's a generator, convert to list to avoid Gradio errors
if hasattr(result, "__iter__") and not isinstance(result, (str, dict, list)):
result = list(result)
return result
# ===== Build Gradio Interface =====
with gr.Blocks(css=css) as demo:
gr.Markdown(DESCRIPTION)
gr.Markdown(INTRO)
temperature = gr.Slider(0, 1, step=0.1, value=0.3, label="Temperature")
max_new_tokens = gr.Slider(128, 4096, step=1, value=1024, label="Max New Tokens")
max_tokens = gr.Slider(128, 32000, step=1, value=8192, label="Max Total Tokens")
max_round = gr.Slider(1, 50, step=1, value=30, label="Max Rounds")
multi_agent = gr.Checkbox(label="Enable Multi-agent Reasoning", value=False)
conversation_state = gr.State([])
chatbot = gr.Chatbot(
label="TxAgent",
placeholder=PLACEHOLDER,
height=700,
type="messages",
show_copy_button=True
)
# ✅ Retry logic added safely
chatbot.retry(
handle_retry,
chatbot, chatbot,
temperature, max_new_tokens, max_tokens,
multi_agent, conversation_state, max_round
)
gr.ChatInterface(
fn=handle_chat,
chatbot=chatbot,
additional_inputs=[
temperature, max_new_tokens, max_tokens,
multi_agent, conversation_state, max_round
],
examples=question_examples,
css=chat_css,
cache_examples=False,
fill_height=True,
fill_width=True,
stop_btn=True
)
gr.Markdown(LICENSE)
# ✅ Ensure launch works on Hugging Face Spaces
demo.launch()