File size: 7,125 Bytes
4b0f1a8 511fb62 5ffaf72 9aeb1dd 0151c98 9aeb1dd 0151c98 9438945 511fb62 410d25f 0151c98 b301866 99cd953 b301866 0151c98 79fb3cd 511fb62 5ffaf72 12efdad 59ced24 a87f861 fc30674 59ced24 9438945 79fb3cd a87f861 12efdad 70839bb 5ffaf72 410d25f 5ffaf72 bae0943 e3711be bae0943 5ffaf72 bae0943 12efdad 709aba9 5ffaf72 709aba9 5ffaf72 0151c98 709aba9 5ffaf72 99cd953 5ffaf72 709aba9 5ffaf72 79fb3cd e3711be 6916257 5ffaf72 709aba9 6916257 5ffaf72 e3711be 709aba9 e3711be 6916257 0151c98 6916257 709aba9 6916257 5ffaf72 0151c98 6916257 5ffaf72 6916257 e6865f5 5680e29 5205ee8 5ffaf72 951cbe7 79fb3cd 6d40680 709aba9 5ffaf72 fc30674 951cbe7 60a4dae 951cbe7 b301866 951cbe7 709aba9 951cbe7 6d40680 709aba9 5ffaf72 e6865f5 5ffaf72 709aba9 5ffaf72 709aba9 6d40680 709aba9 511fb62 79fb3cd 511fb62 5ffaf72 70839bb b301866 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 |
import os
import json
import torch
import logging
import numpy
import gradio as gr
import torch.serialization
from importlib.resources import files
from txagent import TxAgent
from tooluniverse import ToolUniverse
# Patch PyTorch to allow loading old numpy pickles
torch.serialization.add_safe_globals([
numpy.core.multiarray._reconstruct,
numpy.ndarray,
numpy.dtype
])
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
os.environ["MKL_THREADING_LAYER"] = "GNU"
os.environ["TOKENIZERS_PARALLELISM"] = "false"
current_dir = os.path.dirname(os.path.abspath(__file__))
CONFIG = {
"model_name": "mims-harvard/TxAgent-T1-Llama-3.1-8B",
"rag_model_name": "mims-harvard/ToolRAG-T1-GTE-Qwen2-1.5B",
"embedding_filename": "ToolRAG-T1-GTE-Qwen2-1.5Btool_embedding_47dc56b3e3ddeb31af4f19defdd538d984de1500368852a0fab80bc2e826c944.pt",
"tool_files": {
"opentarget": str(files('tooluniverse.data').joinpath('opentarget_tools.json')),
"fda_drug_label": str(files('tooluniverse.data').joinpath('fda_drug_labeling_tools.json')),
"special_tools": str(files('tooluniverse.data').joinpath('special_tools.json')),
"monarch": str(files('tooluniverse.data').joinpath('monarch_tools.json')),
"new_tool": os.path.join(current_dir, 'data', 'new_tool.json')
}
}
def safe_load_embeddings(filepath):
try:
return torch.load(filepath, weights_only=True)
except Exception as e:
logger.warning(f"Retrying with weights_only=False due to: {e}")
try:
return torch.load(filepath, weights_only=False)
except Exception as e:
logger.error(f"Failed to load embeddings: {e}")
return None
def patch_embedding_loading():
from txagent.toolrag import ToolRAGModel
def patched_load(self, tooluniverse):
try:
if not os.path.exists(CONFIG["embedding_filename"]):
return False
self.tool_desc_embedding = safe_load_embeddings(CONFIG["embedding_filename"])
if self.tool_desc_embedding is None:
logger.error("Tool embedding file could not be loaded.")
return False
tools = tooluniverse.get_all_tools() if hasattr(tooluniverse, "get_all_tools") else getattr(tooluniverse, "tools", [])
if len(tools) != len(self.tool_desc_embedding):
logger.warning("Tool count mismatch.")
if len(self.tool_desc_embedding) > len(tools):
self.tool_desc_embedding = self.tool_desc_embedding[:len(tools)]
else:
padding = self.tool_desc_embedding[-1].unsqueeze(0).repeat(len(tools) - len(self.tool_desc_embedding), 1)
self.tool_desc_embedding = torch.cat([self.tool_desc_embedding, padding], dim=0)
return True
except Exception as e:
logger.error(f"Embedding load failed: {e}")
return False
ToolRAGModel.load_tool_desc_embedding = patched_load
def prepare_tool_files():
os.makedirs(os.path.join(current_dir, 'data'), exist_ok=True)
if not os.path.exists(CONFIG["tool_files"]["new_tool"]):
try:
tu = ToolUniverse()
tools = tu.get_all_tools() if hasattr(tu, "get_all_tools") else getattr(tu, "tools", [])
with open(CONFIG["tool_files"]["new_tool"], "w") as f:
json.dump(tools, f, indent=2)
except Exception as e:
logger.error(f"Tool generation failed: {e}")
def create_agent():
patch_embedding_loading()
prepare_tool_files()
try:
tu = ToolUniverse()
tools = tu.get_all_tools() if hasattr(tu, "get_all_tools") else getattr(tu, "tools", [])
available_tool_names = [t["name"] for t in tools]
additional_default_tools = [t for t in ["DirectResponse", "RequireClarification"] if t in available_tool_names]
agent = TxAgent(
CONFIG["model_name"],
CONFIG["rag_model_name"],
tool_files_dict=CONFIG["tool_files"],
force_finish=True,
enable_checker=True,
step_rag_num=10,
seed=42,
additional_default_tools=additional_default_tools
)
agent.init_model()
return agent
except Exception as e:
logger.error(f"Agent initialization failed: {e}")
raise
def respond(msg, chat_history, temperature, max_new_tokens, max_tokens, multi_agent, conversation, max_round):
if not isinstance(msg, str) or len(msg.strip()) <= 10:
chat_history.append({"role": "assistant", "content": "Hi, I am TxAgent. Please provide a valid message longer than 10 characters."})
return chat_history
message = msg.strip()
chat_history.append({"role": "user", "content": message})
formatted_history = [(m["role"], m["content"]) for m in chat_history if "role" in m and "content" in m]
try:
response_generator = agent.run_gradio_chat(
message=message,
history=formatted_history,
temperature=temperature,
max_new_tokens=max_new_tokens,
max_token=max_tokens,
call_agent=multi_agent,
conversation=conversation,
max_round=max_round,
seed=42,
call_agent_level=None,
sub_agent_task=None
)
collected = ""
for chunk in response_generator:
if isinstance(chunk, dict) and "content" in chunk:
collected += chunk["content"]
elif isinstance(chunk, str):
collected += chunk
elif chunk is not None:
collected += str(chunk)
chat_history.append({"role": "assistant", "content": collected or "⚠️ No content returned."})
except Exception as e:
chat_history.append({"role": "assistant", "content": f"❌ Error: {str(e)}"})
return chat_history
def create_demo(agent):
with gr.Blocks(css=".gr-button { font-size: 18px !important; }") as demo:
chatbot = gr.Chatbot(label="TxAgent", type="messages", render_markdown=True)
msg = gr.Textbox(label="Your question", placeholder="Ask a biomedical question...", scale=6)
with gr.Row():
temp = gr.Slider(0, 1, value=0.3, label="Temperature")
max_new_tokens = gr.Slider(128, 4096, value=1024, label="Max New Tokens")
max_tokens = gr.Slider(128, 81920, value=81920, label="Max Total Tokens")
max_rounds = gr.Slider(1, 30, value=30, label="Max Rounds")
multi_agent = gr.Checkbox(label="Multi-Agent Mode")
submit = gr.Button("Ask TxAgent")
submit.click(
respond,
inputs=[msg, chatbot, temp, max_new_tokens, max_tokens, multi_agent, gr.State([]), max_rounds],
outputs=[chatbot]
)
return demo
def main():
global agent
agent = create_agent()
demo = create_demo(agent)
demo.launch(share=False)
if __name__ == "__main__":
main()
|