Update app.py
Browse files
app.py
CHANGED
@@ -139,18 +139,18 @@ def create_agent():
|
|
139 |
|
140 |
def respond(msg, chat_history, temperature, max_new_tokens, max_tokens, multi_agent, conversation, max_round):
|
141 |
if not msg or len(msg.strip()) <= 10:
|
142 |
-
return chat_history + [
|
143 |
|
144 |
-
|
|
|
145 |
print("\n==== DEBUG ====")
|
146 |
print("User Message:", msg)
|
147 |
print("Chat History:", chat_history)
|
148 |
print("================\n")
|
149 |
|
150 |
-
# Format for TxAgent
|
151 |
-
formatted_history = [(role, content) for role, content in chat_history]
|
152 |
-
|
153 |
try:
|
|
|
|
|
154 |
response_generator = agent.run_gradio_chat(
|
155 |
formatted_history,
|
156 |
temperature,
|
@@ -160,23 +160,21 @@ def respond(msg, chat_history, temperature, max_new_tokens, max_tokens, multi_ag
|
|
160 |
conversation,
|
161 |
max_round
|
162 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
163 |
except Exception as e:
|
164 |
-
chat_history.append(
|
165 |
-
return chat_history
|
166 |
-
|
167 |
-
collected = ""
|
168 |
-
for chunk in response_generator:
|
169 |
-
if isinstance(chunk, dict):
|
170 |
-
collected += chunk.get("content", "")
|
171 |
-
else:
|
172 |
-
collected += str(chunk)
|
173 |
|
174 |
-
chat_history.append(["assistant", collected])
|
175 |
return chat_history
|
176 |
|
177 |
def create_demo(agent):
|
178 |
with gr.Blocks(css=chat_css) as demo:
|
179 |
-
chatbot = gr.Chatbot(label="TxAgent",
|
180 |
msg = gr.Textbox(label="Your question", placeholder="Type your biomedical query...", scale=6)
|
181 |
with gr.Row():
|
182 |
temp = gr.Slider(0, 1, value=0.3, label="Temperature")
|
|
|
139 |
|
140 |
def respond(msg, chat_history, temperature, max_new_tokens, max_tokens, multi_agent, conversation, max_round):
|
141 |
if not msg or len(msg.strip()) <= 10:
|
142 |
+
return chat_history + [{"role": "assistant", "content": "Please provide a valid message with a string longer than 10 characters."}]
|
143 |
|
144 |
+
# Append new user message
|
145 |
+
chat_history = chat_history + [{"role": "user", "content": msg}]
|
146 |
print("\n==== DEBUG ====")
|
147 |
print("User Message:", msg)
|
148 |
print("Chat History:", chat_history)
|
149 |
print("================\n")
|
150 |
|
|
|
|
|
|
|
151 |
try:
|
152 |
+
# Convert to tuples for the agent
|
153 |
+
formatted_history = [(m["role"], m["content"]) for m in chat_history]
|
154 |
response_generator = agent.run_gradio_chat(
|
155 |
formatted_history,
|
156 |
temperature,
|
|
|
160 |
conversation,
|
161 |
max_round
|
162 |
)
|
163 |
+
collected = ""
|
164 |
+
for chunk in response_generator:
|
165 |
+
if isinstance(chunk, dict):
|
166 |
+
collected += chunk.get("content", "")
|
167 |
+
else:
|
168 |
+
collected += str(chunk)
|
169 |
+
chat_history.append({"role": "assistant", "content": collected})
|
170 |
except Exception as e:
|
171 |
+
chat_history.append({"role": "assistant", "content": f"Error: {str(e)}"})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
172 |
|
|
|
173 |
return chat_history
|
174 |
|
175 |
def create_demo(agent):
|
176 |
with gr.Blocks(css=chat_css) as demo:
|
177 |
+
chatbot = gr.Chatbot(label="TxAgent", render_markdown=True)
|
178 |
msg = gr.Textbox(label="Your question", placeholder="Type your biomedical query...", scale=6)
|
179 |
with gr.Row():
|
180 |
temp = gr.Slider(0, 1, value=0.3, label="Temperature")
|