Update app.py
Browse files
app.py
CHANGED
@@ -43,7 +43,7 @@ CONFIG = {
|
|
43 |
def generate_tool_embeddings(agent):
|
44 |
tu = ToolUniverse(tool_files=CONFIG["tool_files"])
|
45 |
tu.load_tools()
|
46 |
-
embedding_tensor = agent.rag_model.
|
47 |
if embedding_tensor is not None:
|
48 |
torch.save(embedding_tensor, CONFIG["embedding_filename"])
|
49 |
logger.info(f"Saved new embedding tensor to {CONFIG['embedding_filename']}")
|
@@ -88,7 +88,7 @@ def respond(msg, chat_history, temperature, max_new_tokens, max_tokens, multi_ag
|
|
88 |
|
89 |
message = msg.strip()
|
90 |
chat_history.append({"role": "user", "content": message})
|
91 |
-
formatted_history =
|
92 |
|
93 |
try:
|
94 |
response_generator = agent.run_gradio_chat(
|
@@ -101,18 +101,20 @@ def respond(msg, chat_history, temperature, max_new_tokens, max_tokens, multi_ag
|
|
101 |
conversation=conversation,
|
102 |
max_round=max_round,
|
103 |
seed=42,
|
104 |
-
call_agent_level=
|
105 |
sub_agent_task=None
|
106 |
)
|
107 |
|
108 |
collected = ""
|
109 |
for chunk in response_generator:
|
110 |
-
if isinstance(chunk,
|
|
|
|
|
|
|
|
|
111 |
collected += chunk["content"]
|
112 |
elif isinstance(chunk, str):
|
113 |
collected += chunk
|
114 |
-
elif chunk is not None:
|
115 |
-
collected += str(chunk)
|
116 |
|
117 |
chat_history.append({"role": "assistant", "content": collected or "⚠️ No content returned."})
|
118 |
|
@@ -143,7 +145,7 @@ def main():
|
|
143 |
global agent
|
144 |
agent = create_agent()
|
145 |
demo = create_demo(agent)
|
146 |
-
demo.launch(share=
|
147 |
|
148 |
if __name__ == "__main__":
|
149 |
main()
|
|
|
43 |
def generate_tool_embeddings(agent):
|
44 |
tu = ToolUniverse(tool_files=CONFIG["tool_files"])
|
45 |
tu.load_tools()
|
46 |
+
embedding_tensor = agent.rag_model.load_tool_desc_embedding(tu)
|
47 |
if embedding_tensor is not None:
|
48 |
torch.save(embedding_tensor, CONFIG["embedding_filename"])
|
49 |
logger.info(f"Saved new embedding tensor to {CONFIG['embedding_filename']}")
|
|
|
88 |
|
89 |
message = msg.strip()
|
90 |
chat_history.append({"role": "user", "content": message})
|
91 |
+
formatted_history = chat_history # format as list of dicts for run_gradio_chat
|
92 |
|
93 |
try:
|
94 |
response_generator = agent.run_gradio_chat(
|
|
|
101 |
conversation=conversation,
|
102 |
max_round=max_round,
|
103 |
seed=42,
|
104 |
+
call_agent_level=0,
|
105 |
sub_agent_task=None
|
106 |
)
|
107 |
|
108 |
collected = ""
|
109 |
for chunk in response_generator:
|
110 |
+
if isinstance(chunk, list):
|
111 |
+
for msg in chunk:
|
112 |
+
if isinstance(msg, dict) and "content" in msg:
|
113 |
+
collected += msg["content"]
|
114 |
+
elif isinstance(chunk, dict) and "content" in chunk:
|
115 |
collected += chunk["content"]
|
116 |
elif isinstance(chunk, str):
|
117 |
collected += chunk
|
|
|
|
|
118 |
|
119 |
chat_history.append({"role": "assistant", "content": collected or "⚠️ No content returned."})
|
120 |
|
|
|
145 |
global agent
|
146 |
agent = create_agent()
|
147 |
demo = create_demo(agent)
|
148 |
+
demo.launch(share=True)
|
149 |
|
150 |
if __name__ == "__main__":
|
151 |
main()
|