Ali2206 commited on
Commit
5205ee8
·
verified ·
1 Parent(s): 7bb41a5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -7
app.py CHANGED
@@ -139,18 +139,20 @@ def create_agent():
139
 
140
  def respond(msg, chat_history, temperature, max_new_tokens, max_tokens, multi_agent, conversation, max_round):
141
  if not msg or len(msg.strip()) <= 10:
142
- return chat_history + [{"role": "assistant", "content": "Please provide a valid message with a string longer than 10 characters."}]
 
 
 
143
 
144
- # Append new user message
145
- chat_history = chat_history + [{"role": "user", "content": msg}]
146
  print("\n==== DEBUG ====")
147
  print("User Message:", msg)
148
  print("Chat History:", chat_history)
149
  print("================\n")
150
 
151
  try:
152
- # Convert to tuples for the agent
153
- formatted_history = [(m["role"], m["content"]) for m in chat_history]
 
154
  response_generator = agent.run_gradio_chat(
155
  formatted_history,
156
  temperature,
@@ -160,15 +162,17 @@ def respond(msg, chat_history, temperature, max_new_tokens, max_tokens, multi_ag
160
  conversation,
161
  max_round
162
  )
 
163
  collected = ""
164
  for chunk in response_generator:
165
  if isinstance(chunk, dict):
166
  collected += chunk.get("content", "")
167
  else:
168
  collected += str(chunk)
169
- chat_history.append({"role": "assistant", "content": collected})
 
170
  except Exception as e:
171
- chat_history.append({"role": "assistant", "content": f"Error: {str(e)}"})
172
 
173
  return chat_history
174
 
 
139
 
140
  def respond(msg, chat_history, temperature, max_new_tokens, max_tokens, multi_agent, conversation, max_round):
141
  if not msg or len(msg.strip()) <= 10:
142
+ return chat_history + [["assistant", "Please provide a valid message with a string longer than 10 characters."]]
143
+
144
+ # Append user message in tuple format
145
+ chat_history = chat_history + [["user", msg]]
146
 
 
 
147
  print("\n==== DEBUG ====")
148
  print("User Message:", msg)
149
  print("Chat History:", chat_history)
150
  print("================\n")
151
 
152
  try:
153
+ # Convert to TxAgent format
154
+ formatted_history = [(role, content) for role, content in chat_history]
155
+
156
  response_generator = agent.run_gradio_chat(
157
  formatted_history,
158
  temperature,
 
162
  conversation,
163
  max_round
164
  )
165
+
166
  collected = ""
167
  for chunk in response_generator:
168
  if isinstance(chunk, dict):
169
  collected += chunk.get("content", "")
170
  else:
171
  collected += str(chunk)
172
+
173
+ chat_history.append(["assistant", collected])
174
  except Exception as e:
175
+ chat_history.append(["assistant", f"Error: {str(e)}"])
176
 
177
  return chat_history
178