Ali2206 commited on
Commit
3823832
·
verified ·
1 Parent(s): dcc7384

Update src/txagent/txagent.py

Browse files
Files changed (1) hide show
  1. src/txagent/txagent.py +37 -27
src/txagent/txagent.py CHANGED
@@ -755,17 +755,17 @@ Generate **one summarized sentence** about "function calls' responses" with nece
755
  return updated_attributes
756
 
757
  def run_gradio_chat(self, message: str,
758
- history: list,
759
- temperature: float,
760
- max_new_tokens: int,
761
- max_token: int,
762
- call_agent: bool,
763
- conversation: gr.State,
764
- max_round: int = 20,
765
- seed: int = None,
766
- call_agent_level: int = 0,
767
- sub_agent_task: str = None,
768
- uploaded_files: list = None) -> str:
769
  """
770
  Generate a streaming response using the loaded model.
771
  Args:
@@ -783,7 +783,6 @@ Generate **one summarized sentence** about "function calls' responses" with nece
783
  yield "Please provide a valid message or upload files to analyze."
784
  return "Invalid input."
785
 
786
- # Remove tool call traces or debug prefixes
787
  if message.startswith("[🧰 Tool_RAG") or message.startswith("⚒️"):
788
  return ""
789
 
@@ -827,16 +826,21 @@ Generate **one summarized sentence** about "function calls' responses" with nece
827
  temperature=temperature)
828
 
829
  history.extend(current_gradio_history)
830
- if special_tool_call == 'Finish':
 
831
  yield history
832
  next_round = False
833
  conversation.extend(function_call_messages)
834
  return function_call_messages[0]['content']
835
  elif special_tool_call in ['RequireClarification', 'DirectResponse']:
836
- history.append(ChatMessage(role="assistant", content=history[-1].content))
837
- yield history
838
- next_round = False
839
- return history[-1].content
 
 
 
 
840
 
841
  if (self.enable_summary or token_overflow) and not call_agent:
842
  enable_summary = True
@@ -845,7 +849,7 @@ Generate **one summarized sentence** about "function calls' responses" with nece
845
  conversation, status=last_status,
846
  enable_summary=enable_summary)
847
 
848
- if function_call_messages is not None:
849
  conversation.extend(function_call_messages)
850
  yield history
851
  else:
@@ -877,10 +881,12 @@ Generate **one summarized sentence** about "function calls' responses" with nece
877
  msg.metadata['status'] = 'done'
878
 
879
  if '[FinalAnswer]' in last_thought:
880
- final_thought, final_answer = last_thought.split('[FinalAnswer]', 1)
881
- history.append(ChatMessage(role="assistant", content=final_thought.strip()))
 
 
882
  yield history
883
- history.append(ChatMessage(role="assistant", content="**🧠 Final Analysis:**\n" + final_answer.strip()))
884
  yield history
885
  else:
886
  history.append(ChatMessage(role="assistant", content=last_thought))
@@ -893,10 +899,12 @@ Generate **one summarized sentence** about "function calls' responses" with nece
893
  last_outputs_str = self.get_answer_based_on_unfinished_reasoning(
894
  conversation, temperature, max_new_tokens, max_token)
895
  if '[FinalAnswer]' in last_outputs_str:
896
- final_thought, final_answer = last_outputs_str.split('[FinalAnswer]', 1)
897
- history.append(ChatMessage(role="assistant", content=final_thought.strip()))
 
 
898
  yield history
899
- history.append(ChatMessage(role="assistant", content="**🧠 Final Analysis:**\n" + final_answer.strip()))
900
  yield history
901
  else:
902
  history.append(ChatMessage(role="assistant", content=last_outputs_str.strip()))
@@ -910,10 +918,12 @@ Generate **one summarized sentence** about "function calls' responses" with nece
910
  last_outputs_str = self.get_answer_based_on_unfinished_reasoning(
911
  conversation, temperature, max_new_tokens, max_token)
912
  if '[FinalAnswer]' in last_outputs_str:
913
- final_thought, final_answer = last_outputs_str.split('[FinalAnswer]', 1)
914
- history.append(ChatMessage(role="assistant", content=final_thought.strip()))
 
 
915
  yield history
916
- history.append(ChatMessage(role="assistant", content="**🧠 Final Analysis:**\n" + final_answer.strip()))
917
  yield history
918
  else:
919
  yield f"An error occurred: {e}"
 
755
  return updated_attributes
756
 
757
  def run_gradio_chat(self, message: str,
758
+ history: list,
759
+ temperature: float,
760
+ max_new_tokens: int,
761
+ max_token: int,
762
+ call_agent: bool,
763
+ conversation: gr.State,
764
+ max_round: int = 20,
765
+ seed: int = None,
766
+ call_agent_level: int = 0,
767
+ sub_agent_task: str = None,
768
+ uploaded_files: list = None) -> str:
769
  """
770
  Generate a streaming response using the loaded model.
771
  Args:
 
783
  yield "Please provide a valid message or upload files to analyze."
784
  return "Invalid input."
785
 
 
786
  if message.startswith("[🧰 Tool_RAG") or message.startswith("⚒️"):
787
  return ""
788
 
 
826
  temperature=temperature)
827
 
828
  history.extend(current_gradio_history)
829
+
830
+ if special_tool_call == 'Finish' and function_call_messages:
831
  yield history
832
  next_round = False
833
  conversation.extend(function_call_messages)
834
  return function_call_messages[0]['content']
835
  elif special_tool_call in ['RequireClarification', 'DirectResponse']:
836
+ if history:
837
+ history.append(ChatMessage(role="assistant", content=history[-1].content))
838
+ yield history
839
+ next_round = False
840
+ return history[-1].content
841
+ else:
842
+ yield "I need more information to proceed."
843
+ return "Missing data."
844
 
845
  if (self.enable_summary or token_overflow) and not call_agent:
846
  enable_summary = True
 
849
  conversation, status=last_status,
850
  enable_summary=enable_summary)
851
 
852
+ if function_call_messages:
853
  conversation.extend(function_call_messages)
854
  yield history
855
  else:
 
881
  msg.metadata['status'] = 'done'
882
 
883
  if '[FinalAnswer]' in last_thought:
884
+ parts = last_thought.split('[FinalAnswer]', 1)
885
+ final_thought = parts[0].strip()
886
+ final_answer = parts[1].strip() if len(parts) > 1 else ""
887
+ history.append(ChatMessage(role="assistant", content=final_thought))
888
  yield history
889
+ history.append(ChatMessage(role="assistant", content="**🧠 Final Analysis:**\n" + final_answer))
890
  yield history
891
  else:
892
  history.append(ChatMessage(role="assistant", content=last_thought))
 
899
  last_outputs_str = self.get_answer_based_on_unfinished_reasoning(
900
  conversation, temperature, max_new_tokens, max_token)
901
  if '[FinalAnswer]' in last_outputs_str:
902
+ parts = last_outputs_str.split('[FinalAnswer]', 1)
903
+ final_thought = parts[0].strip()
904
+ final_answer = parts[1].strip() if len(parts) > 1 else ""
905
+ history.append(ChatMessage(role="assistant", content=final_thought))
906
  yield history
907
+ history.append(ChatMessage(role="assistant", content="**🧠 Final Analysis:**\n" + final_answer))
908
  yield history
909
  else:
910
  history.append(ChatMessage(role="assistant", content=last_outputs_str.strip()))
 
918
  last_outputs_str = self.get_answer_based_on_unfinished_reasoning(
919
  conversation, temperature, max_new_tokens, max_token)
920
  if '[FinalAnswer]' in last_outputs_str:
921
+ parts = last_outputs_str.split('[FinalAnswer]', 1)
922
+ final_thought = parts[0].strip()
923
+ final_answer = parts[1].strip() if len(parts) > 1 else ""
924
+ history.append(ChatMessage(role="assistant", content=final_thought))
925
  yield history
926
+ history.append(ChatMessage(role="assistant", content="**🧠 Final Analysis:**\n" + final_answer))
927
  yield history
928
  else:
929
  yield f"An error occurred: {e}"