Ali2206 commited on
Commit
f412a81
·
verified ·
1 Parent(s): 70a71a0

Update src/txagent/txagent.py

Browse files
Files changed (1) hide show
  1. src/txagent/txagent.py +32 -30
src/txagent/txagent.py CHANGED
@@ -797,17 +797,17 @@ Generate **one summarized sentence** about "function calls' responses" with nece
797
  str: Final assistant message.
798
  """
799
  logger.info("[TxAgent] Chat started with message: %s", message[:100])
800
- logger.debug("Initial history: %s", [msg.content[:50] for msg in history] if history else [])
801
 
802
  # Yield initial message to ensure UI updates
803
- history.append(ChatMessage(role="assistant", content="Starting analysis..."))
804
  yield history
805
  logger.debug("Yielded initial history")
806
 
807
  try:
808
  if not message or len(message.strip()) < 5:
809
  logger.warning("Invalid message detected")
810
- history.append(ChatMessage(role="assistant", content="Please provide a valid message or upload files to analyze."))
811
  yield history
812
  return "Invalid input."
813
 
@@ -851,37 +851,39 @@ Generate **one summarized sentence** about "function calls' responses" with nece
851
 
852
  if not function_call_result:
853
  logger.warning("Empty result from run_function_call_stream")
854
- history.append(ChatMessage(role="assistant", content="Error: Tool call processing failed."))
855
  yield history
856
  return "Error: Tool call processing failed."
857
 
858
  function_call_messages, picked_tools_prompt, special_tool_call, current_gradio_history = function_call_result
859
 
860
- # Deduplicate history entries
861
  unique_history = []
862
  seen_contents = set()
863
  for msg in current_gradio_history:
864
- if msg.content not in seen_contents:
865
- unique_history.append(msg)
866
- seen_contents.add(msg.content)
 
867
  history.extend(unique_history)
868
  logger.debug("Extended history with %d unique messages", len(unique_history))
869
 
870
  if special_tool_call == 'Finish' and function_call_messages:
871
- history.append(ChatMessage(role="assistant", content=function_call_messages[0]['content']))
872
- logger.debug("Yielding final history after Finish: %s", function_call_messages[0]['content'][:50])
 
873
  yield history
874
  next_round = False
875
  conversation.extend(function_call_messages)
876
- return function_call_messages[0]['content']
877
 
878
  elif special_tool_call in ['RequireClarification', 'DirectResponse']:
879
- last_msg = history[-1] if history else ChatMessage(role="assistant", content="Response needed.")
880
- history.append(ChatMessage(role="assistant", content=last_msg.content))
881
- logger.debug("Yielding history for special tool: %s", last_msg.content[:50])
882
  yield history
883
  next_round = False
884
- return last_msg.content
885
 
886
  if (self.enable_summary or token_overflow) and not call_agent:
887
  enable_summary = True
@@ -894,7 +896,7 @@ Generate **one summarized sentence** about "function calls' responses" with nece
894
  else:
895
  next_round = False
896
  content = ''.join(last_outputs).replace("</s>", "")
897
- history.append(ChatMessage(role="assistant", content=content))
898
  conversation.append({"role": "assistant", "content": content})
899
  logger.debug("Yielding history with content: %s", content[:50])
900
  yield history
@@ -923,26 +925,26 @@ Generate **one summarized sentence** about "function calls' responses" with nece
923
  if last_outputs_str is None:
924
  logger.warning("llm_infer returned None")
925
  error_msg = "Error: Unable to generate response due to token limit. Please reduce input size."
926
- history.append(ChatMessage(role="assistant", content=error_msg))
927
  yield history
928
  return error_msg
929
 
930
  last_thought = last_outputs_str.split("[TOOL_CALLS]")[0]
931
 
932
  for msg in history:
933
- if msg.metadata is not None:
934
- msg.metadata['status'] = 'done'
935
 
936
  if '[FinalAnswer]' in last_thought:
937
  parts = last_thought.split('[FinalAnswer]', 1)
938
  final_thought, final_answer = parts if len(parts) == 2 else (last_thought, "")
939
- history.append(ChatMessage(role="assistant", content=final_thought.strip()))
940
- history.append(ChatMessage(role="assistant", content="**🧠 Final Analysis:**\n" + final_answer.strip()))
941
  logger.debug("Yielding final analysis: %s", final_answer[:50])
942
  yield history
943
  next_round = False
944
  else:
945
- history.append(ChatMessage(role="assistant", content=last_thought))
946
  logger.debug("Yielding intermediate history: %s", last_thought[:50])
947
  yield history
948
 
@@ -956,15 +958,15 @@ Generate **one summarized sentence** about "function calls' responses" with nece
956
  if '[FinalAnswer]' in last_outputs_str:
957
  parts = last_outputs_str.split('[FinalAnswer]', 1)
958
  final_thought, final_answer = parts if len(parts) == 2 else (last_outputs_str, "")
959
- history.append(ChatMessage(role="assistant", content=final_thought.strip()))
960
- history.append(ChatMessage(role="assistant", content="**🧠 Final Analysis:**\n" + final_answer.strip()))
961
  else:
962
- history.append(ChatMessage(role="assistant", content=last_outputs_str.strip()))
963
  logger.debug("Yielding forced final history")
964
  yield history
965
  else:
966
  error_msg = "The number of reasoning rounds exceeded the limit."
967
- history.append(ChatMessage(role="assistant", content=error_msg))
968
  logger.debug("Yielding max rounds error")
969
  yield history
970
  return error_msg
@@ -972,7 +974,7 @@ Generate **one summarized sentence** about "function calls' responses" with nece
972
  except Exception as e:
973
  logger.error("Exception in run_gradio_chat: %s", e, exc_info=True)
974
  error_msg = f"An error occurred: {e}"
975
- history.append(ChatMessage(role="assistant", content=error_msg))
976
  logger.debug("Yielding error history: %s", error_msg)
977
  yield history
978
  if self.force_finish:
@@ -981,10 +983,10 @@ Generate **one summarized sentence** about "function calls' responses" with nece
981
  if '[FinalAnswer]' in last_outputs_str:
982
  parts = last_outputs_str.split('[FinalAnswer]', 1)
983
  final_thought, final_answer = parts if len(parts) == 2 else (last_outputs_str, "")
984
- history.append(ChatMessage(role="assistant", content=final_thought.strip()))
985
- history.append(ChatMessage(role="assistant", content="**🧠 Final Analysis:**\n" + final_answer.strip()))
986
  else:
987
- history.append(ChatMessage(role="assistant", content=last_outputs_str.strip()))
988
  logger.debug("Yielding forced final history after error")
989
  yield history
990
  return error_msg
 
797
  str: Final assistant message.
798
  """
799
  logger.info("[TxAgent] Chat started with message: %s", message[:100])
800
+ logger.debug("Initial history: %s", [msg["content"][:50] for msg in history] if history else [])
801
 
802
  # Yield initial message to ensure UI updates
803
+ history.append({"role": "assistant", "content": "Starting analysis..."})
804
  yield history
805
  logger.debug("Yielded initial history")
806
 
807
  try:
808
  if not message or len(message.strip()) < 5:
809
  logger.warning("Invalid message detected")
810
+ history.append({"role": "assistant", "content": "Please provide a valid message or upload files to analyze."})
811
  yield history
812
  return "Invalid input."
813
 
 
851
 
852
  if not function_call_result:
853
  logger.warning("Empty result from run_function_call_stream")
854
+ history.append({"role": "assistant", "content": "Error: Tool call processing failed."})
855
  yield history
856
  return "Error: Tool call processing failed."
857
 
858
  function_call_messages, picked_tools_prompt, special_tool_call, current_gradio_history = function_call_result
859
 
860
+ # Convert ChatMessage to dicts and deduplicate
861
  unique_history = []
862
  seen_contents = set()
863
  for msg in current_gradio_history:
864
+ content = msg.content
865
+ if content not in seen_contents:
866
+ unique_history.append({"role": "assistant", "content": content})
867
+ seen_contents.add(content)
868
  history.extend(unique_history)
869
  logger.debug("Extended history with %d unique messages", len(unique_history))
870
 
871
  if special_tool_call == 'Finish' and function_call_messages:
872
+ content = function_call_messages[0]['content']
873
+ history.append({"role": "assistant", "content": content})
874
+ logger.debug("Yielding final history after Finish: %s", content[:50])
875
  yield history
876
  next_round = False
877
  conversation.extend(function_call_messages)
878
+ return content
879
 
880
  elif special_tool_call in ['RequireClarification', 'DirectResponse']:
881
+ last_msg = history[-1] if history else {"role": "assistant", "content": "Response needed."}
882
+ history.append({"role": "assistant", "content": last_msg["content"]})
883
+ logger.debug("Yielding history for special tool: %s", last_msg["content"][:50])
884
  yield history
885
  next_round = False
886
+ return last_msg["content"]
887
 
888
  if (self.enable_summary or token_overflow) and not call_agent:
889
  enable_summary = True
 
896
  else:
897
  next_round = False
898
  content = ''.join(last_outputs).replace("</s>", "")
899
+ history.append({"role": "assistant", "content": content})
900
  conversation.append({"role": "assistant", "content": content})
901
  logger.debug("Yielding history with content: %s", content[:50])
902
  yield history
 
925
  if last_outputs_str is None:
926
  logger.warning("llm_infer returned None")
927
  error_msg = "Error: Unable to generate response due to token limit. Please reduce input size."
928
+ history.append({"role": "assistant", "content": error_msg})
929
  yield history
930
  return error_msg
931
 
932
  last_thought = last_outputs_str.split("[TOOL_CALLS]")[0]
933
 
934
  for msg in history:
935
+ if isinstance(msg, dict) and "metadata" in msg and msg["metadata"] is not None:
936
+ msg["metadata"]['status'] = 'done'
937
 
938
  if '[FinalAnswer]' in last_thought:
939
  parts = last_thought.split('[FinalAnswer]', 1)
940
  final_thought, final_answer = parts if len(parts) == 2 else (last_thought, "")
941
+ history.append({"role": "assistant", "content": final_thought.strip()})
942
+ history.append({"role": "assistant", "content": "**🧠 Final Analysis:**\n" + final_answer.strip()})
943
  logger.debug("Yielding final analysis: %s", final_answer[:50])
944
  yield history
945
  next_round = False
946
  else:
947
+ history.append({"role": "assistant", "content": last_thought})
948
  logger.debug("Yielding intermediate history: %s", last_thought[:50])
949
  yield history
950
 
 
958
  if '[FinalAnswer]' in last_outputs_str:
959
  parts = last_outputs_str.split('[FinalAnswer]', 1)
960
  final_thought, final_answer = parts if len(parts) == 2 else (last_outputs_str, "")
961
+ history.append({"role": "assistant", "content": final_thought.strip()})
962
+ history.append({"role": "assistant", "content": "**🧠 Final Analysis:**\n" + final_answer.strip()})
963
  else:
964
+ history.append({"role": "assistant", "content": last_outputs_str.strip()})
965
  logger.debug("Yielding forced final history")
966
  yield history
967
  else:
968
  error_msg = "The number of reasoning rounds exceeded the limit."
969
+ history.append({"role": "assistant", "content": error_msg})
970
  logger.debug("Yielding max rounds error")
971
  yield history
972
  return error_msg
 
974
  except Exception as e:
975
  logger.error("Exception in run_gradio_chat: %s", e, exc_info=True)
976
  error_msg = f"An error occurred: {e}"
977
+ history.append({"role": "assistant", "content": error_msg})
978
  logger.debug("Yielding error history: %s", error_msg)
979
  yield history
980
  if self.force_finish:
 
983
  if '[FinalAnswer]' in last_outputs_str:
984
  parts = last_outputs_str.split('[FinalAnswer]', 1)
985
  final_thought, final_answer = parts if len(parts) == 2 else (last_outputs_str, "")
986
+ history.append({"role": "assistant", "content": final_thought.strip()})
987
+ history.append({"role": "assistant", "content": "**🧠 Final Analysis:**\n" + final_answer.strip()})
988
  else:
989
+ history.append({"role": "assistant", "content": last_outputs_str.strip()})
990
  logger.debug("Yielding forced final history after error")
991
  yield history
992
  return error_msg