Ali2206 commited on
Commit
4712249
·
verified ·
1 Parent(s): c3218a0

Update src/txagent/txagent.py

Browse files
Files changed (1) hide show
  1. src/txagent/txagent.py +26 -3
src/txagent/txagent.py CHANGED
@@ -784,6 +784,7 @@ Generate **one summarized sentence** about "function calls' responses" with nece
784
  Returns:
785
  str: Final assistant message.
786
  """
 
787
  print("\033[1;32;40m[TxAgent] Chat started\033[0m")
788
 
789
  if not message or len(message.strip()) < 5:
@@ -822,6 +823,7 @@ Generate **one summarized sentence** about "function calls' responses" with nece
822
  try:
823
  while next_round and current_round < max_round:
824
  current_round += 1
 
825
 
826
  if last_outputs:
827
  function_call_messages, picked_tools_prompt, special_tool_call, current_gradio_history = yield from self.run_function_call_stream(
@@ -879,6 +881,22 @@ Generate **one summarized sentence** about "function calls' responses" with nece
879
  seed=seed,
880
  check_token_status=True)
881
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
882
  last_thought = last_outputs_str.split("[TOOL_CALLS]")[0]
883
 
884
  for msg in history:
@@ -922,7 +940,10 @@ Generate **one summarized sentence** about "function calls' responses" with nece
922
  yield "The number of reasoning rounds exceeded the limit."
923
 
924
  except Exception as e:
925
- print(f"[TxAgent] Exception occurred: {e}")
 
 
 
926
  if self.force_finish:
927
  last_outputs_str = self.get_answer_based_on_unfinished_reasoning(
928
  conversation, temperature, max_new_tokens, max_token)
@@ -936,5 +957,7 @@ Generate **one summarized sentence** about "function calls' responses" with nece
936
  yield history
937
  history.append(ChatMessage(role="assistant", content="**🧠 Final Analysis:**\n" + final_answer.strip()))
938
  yield history
939
- else:
940
- yield f"An error occurred: {e}"
 
 
 
784
  Returns:
785
  str: Final assistant message.
786
  """
787
+ logger.debug(f"[TxAgent] Chat started, message: {message[:100]}...")
788
  print("\033[1;32;40m[TxAgent] Chat started\033[0m")
789
 
790
  if not message or len(message.strip()) < 5:
 
823
  try:
824
  while next_round and current_round < max_round:
825
  current_round += 1
826
+ logger.debug(f"Round {current_round}, conversation length: {len(conversation)}")
827
 
828
  if last_outputs:
829
  function_call_messages, picked_tools_prompt, special_tool_call, current_gradio_history = yield from self.run_function_call_stream(
 
881
  seed=seed,
882
  check_token_status=True)
883
 
884
+ logger.debug(f"llm_infer output: {last_outputs_str[:100] if last_outputs_str else None}, token_overflow: {token_overflow}")
885
+
886
+ if last_outputs_str is None:
887
+ logger.warning("llm_infer returned None due to token overflow")
888
+ if self.force_finish:
889
+ last_outputs_str = self.get_answer_based_on_unfinished_reasoning(
890
+ conversation, temperature, max_new_tokens, max_token)
891
+ history.append(ChatMessage(role="assistant", content=last_outputs_str.strip()))
892
+ yield history
893
+ return last_outputs_str
894
+ else:
895
+ error_msg = "Token limit exceeded. Please reduce input size or increase max_token."
896
+ history.append(ChatMessage(role="assistant", content=error_msg))
897
+ yield history
898
+ return error_msg
899
+
900
  last_thought = last_outputs_str.split("[TOOL_CALLS]")[0]
901
 
902
  for msg in history:
 
940
  yield "The number of reasoning rounds exceeded the limit."
941
 
942
  except Exception as e:
943
+ logger.error(f"Exception in run_gradio_chat: {e}", exc_info=True)
944
+ error_msg = f"An error occurred: {e}"
945
+ history.append(ChatMessage(role="assistant", content=error_msg))
946
+ yield history
947
  if self.force_finish:
948
  last_outputs_str = self.get_answer_based_on_unfinished_reasoning(
949
  conversation, temperature, max_new_tokens, max_token)
 
957
  yield history
958
  history.append(ChatMessage(role="assistant", content="**🧠 Final Analysis:**\n" + final_answer.strip()))
959
  yield history
960
+ else:
961
+ history.append(ChatMessage(role="assistant", content=last_outputs_str.strip()))
962
+ yield history
963
+ return error_msg