Ali2206 commited on
Commit
6d40680
·
verified ·
1 Parent(s): f6a62a5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -24
app.py CHANGED
@@ -137,24 +137,20 @@ def create_agent():
137
  logger.error(f"Failed to create agent: {str(e)}")
138
  raise
139
 
140
- def respond(chat_history, history, temperature, max_new_tokens, max_tokens, multi_agent, conversation, max_round):
141
- if not chat_history:
142
- return [["assistant", "Please provide a message."]]
143
-
144
- message = chat_history[-1][1] if isinstance(chat_history[-1], (list, tuple)) else chat_history[-1]
145
-
146
- if not isinstance(message, str) or len(message.strip()) <= 10:
147
  return chat_history + [["assistant", "Please provide a valid message with a string longer than 10 characters."]]
148
 
149
- updated_history = history + [{"role": "user", "content": message}]
150
  print("\n==== DEBUG ====")
151
- print("User Message:", message)
152
- print("Full History:", updated_history)
153
  print("================\n")
154
 
155
- try:
156
- formatted_history = [(m["role"], m["content"]) for m in updated_history]
157
 
 
158
  response_generator = agent.run_gradio_chat(
159
  formatted_history,
160
  temperature,
@@ -165,22 +161,23 @@ def respond(chat_history, history, temperature, max_new_tokens, max_tokens, mult
165
  max_round
166
  )
167
  except Exception as e:
168
- updated_history.append({"role": "assistant", "content": f"Error: {str(e)}"})
169
- else:
170
- collected = ""
171
- for chunk in response_generator:
172
- if isinstance(chunk, dict):
173
- collected += chunk.get("content", "")
174
- else:
175
- collected += str(chunk)
176
- updated_history.append({"role": "assistant", "content": collected})
177
 
178
- # Return formatted history to Gradio
179
- return [(m["role"], m["content"]) for m in updated_history]
180
 
181
  def create_demo(agent):
182
  with gr.Blocks(css=chat_css) as demo:
183
  chatbot = gr.Chatbot(label="TxAgent", type="messages")
 
184
  with gr.Row():
185
  temp = gr.Slider(0, 1, value=0.3, label="Temperature")
186
  max_new_tokens = gr.Slider(128, 4096, value=1024, label="Max New Tokens")
@@ -192,7 +189,7 @@ def create_demo(agent):
192
 
193
  submit.click(
194
  respond,
195
- inputs=[chatbot, chatbot, temp, max_new_tokens, max_tokens, multi_agent, gr.State([]), max_rounds],
196
  outputs=[chatbot]
197
  )
198
 
 
137
  logger.error(f"Failed to create agent: {str(e)}")
138
  raise
139
 
140
+ def respond(msg, chat_history, temperature, max_new_tokens, max_tokens, multi_agent, conversation, max_round):
141
+ if not msg or len(msg.strip()) <= 10:
 
 
 
 
 
142
  return chat_history + [["assistant", "Please provide a valid message with a string longer than 10 characters."]]
143
 
144
+ chat_history = chat_history + [["user", msg]]
145
  print("\n==== DEBUG ====")
146
+ print("User Message:", msg)
147
+ print("Chat History:", chat_history)
148
  print("================\n")
149
 
150
+ # Format for TxAgent
151
+ formatted_history = [(role, content) for role, content in chat_history]
152
 
153
+ try:
154
  response_generator = agent.run_gradio_chat(
155
  formatted_history,
156
  temperature,
 
161
  max_round
162
  )
163
  except Exception as e:
164
+ chat_history.append(["assistant", f"Error: {str(e)}"])
165
+ return chat_history
166
+
167
+ collected = ""
168
+ for chunk in response_generator:
169
+ if isinstance(chunk, dict):
170
+ collected += chunk.get("content", "")
171
+ else:
172
+ collected += str(chunk)
173
 
174
+ chat_history.append(["assistant", collected])
175
+ return chat_history
176
 
177
  def create_demo(agent):
178
  with gr.Blocks(css=chat_css) as demo:
179
  chatbot = gr.Chatbot(label="TxAgent", type="messages")
180
+ msg = gr.Textbox(label="Your question", placeholder="Type your biomedical query...", scale=6)
181
  with gr.Row():
182
  temp = gr.Slider(0, 1, value=0.3, label="Temperature")
183
  max_new_tokens = gr.Slider(128, 4096, value=1024, label="Max New Tokens")
 
189
 
190
  submit.click(
191
  respond,
192
+ inputs=[msg, chatbot, temp, max_new_tokens, max_tokens, multi_agent, gr.State([]), max_rounds],
193
  outputs=[chatbot]
194
  )
195