improved main agent/doc agent interaction
Browse files
AI_CHANGELOG.md
CHANGED
@@ -1,4 +1,10 @@
|
|
1 |
# AI CHANGELOG
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
## [Updated application version to 2.1.2](https://github.com/joshuasundance-swca/langchain-streamlit-demo/commit/3fddfc9d34f66f92255f6a0d0878e20db401d694)
|
3 |
Mon Dec 18 11:02:13 2023 -0500
|
4 |
- Bumped the current version of the application in the bumpver.toml file from 2.1.1 to 2.1.2.
|
|
|
1 |
# AI CHANGELOG
|
2 |
+
## [Refactor Memory Management and Remove Intermediate Steps](https://github.com/joshuasundance-swca/langchain-streamlit-demo/commit/3a2b820bcfb7ef8dc4157626de454be9267e1e91)
|
3 |
+
Tue Dec 19 09:35:23 2023 -0500
|
4 |
+
- This commit refactors the memory management in the Streamlit app and other resources. It replaces the 'AgentTokenBufferMemory' with 'ConversationBufferMemory' for a more efficient memory usage. The 'return_messages' flag is set to 'True' in the new memory management system.
|
5 |
+
- The commit also disables the return of intermediate steps in both the Streamlit app and agent execution by setting 'return_intermediate_steps' to 'False'. This change can help to improve performance by reducing unnecessary computations.
|
6 |
+
- Furthermore, the commit simplifies the 'python_coder.py' script by removing unnecessary imports and argument parsing related to 'black', 'ruff', and 'mypy'. It also removes the main execution block, indicating that this script may now be used as a module rather than a standalone script.
|
7 |
+
- The changes in this commit aim to improve performance and maintainability of the code.
|
8 |
## [Updated application version to 2.1.2](https://github.com/joshuasundance-swca/langchain-streamlit-demo/commit/3fddfc9d34f66f92255f6a0d0878e20db401d694)
|
9 |
Mon Dec 18 11:02:13 2023 -0500
|
10 |
- Bumped the current version of the application in the bumpver.toml file from 2.1.1 to 2.1.2.
|
langchain-streamlit-demo/app.py
CHANGED
@@ -431,7 +431,7 @@ if st.session_state.llm:
|
|
431 |
callbacks=callbacks,
|
432 |
tags=["Streamlit Chat"],
|
433 |
verbose=True,
|
434 |
-
return_intermediate_steps=
|
435 |
)
|
436 |
if st.session_state.provider == "Anthropic":
|
437 |
config["max_concurrency"] = 5
|
@@ -507,17 +507,17 @@ if st.session_state.llm:
|
|
507 |
doc_chain_agent = get_doc_agent(
|
508 |
[doc_chain_tool],
|
509 |
)
|
510 |
-
|
511 |
func=lambda s: doc_chain_agent.invoke(
|
512 |
s,
|
513 |
),
|
514 |
-
name="document-
|
515 |
-
description="this assistant
|
|
|
516 |
"if the user's meaning is unclear, perhaps the answer is here. "
|
517 |
-
"generally speaking, try this tool before conducting web research."
|
518 |
-
"it is best to send this tool a question, as it will attempt to break complex questions down into several, simpler questions.",
|
519 |
)
|
520 |
-
TOOLS = [
|
521 |
|
522 |
st.session_state.chain = get_agent(
|
523 |
TOOLS,
|
|
|
431 |
callbacks=callbacks,
|
432 |
tags=["Streamlit Chat"],
|
433 |
verbose=True,
|
434 |
+
return_intermediate_steps=False,
|
435 |
)
|
436 |
if st.session_state.provider == "Anthropic":
|
437 |
config["max_concurrency"] = 5
|
|
|
507 |
doc_chain_agent = get_doc_agent(
|
508 |
[doc_chain_tool],
|
509 |
)
|
510 |
+
doc_question_tool = Tool.from_function(
|
511 |
func=lambda s: doc_chain_agent.invoke(
|
512 |
s,
|
513 |
),
|
514 |
+
name="document-question-tool",
|
515 |
+
description="this assistant answers a question based on the user's custom context. "
|
516 |
+
"this assistant responds to fully formed questions."
|
517 |
"if the user's meaning is unclear, perhaps the answer is here. "
|
518 |
+
"generally speaking, try this tool before conducting web research.",
|
|
|
519 |
)
|
520 |
+
TOOLS = [doc_question_tool, research_assistant_tool] + default_tools
|
521 |
|
522 |
st.session_state.chain = get_agent(
|
523 |
TOOLS,
|
langchain-streamlit-demo/llm_resources.py
CHANGED
@@ -3,9 +3,6 @@ from tempfile import NamedTemporaryFile
|
|
3 |
from typing import Tuple, List, Optional, Dict
|
4 |
|
5 |
from langchain.agents import AgentExecutor, AgentType, initialize_agent
|
6 |
-
from langchain.agents.openai_functions_agent.agent_token_buffer_memory import (
|
7 |
-
AgentTokenBufferMemory,
|
8 |
-
)
|
9 |
from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent
|
10 |
from langchain.callbacks.base import BaseCallbackHandler
|
11 |
from langchain.chains import LLMChain
|
@@ -57,10 +54,17 @@ def get_agent(
|
|
57 |
)
|
58 |
agent = OpenAIFunctionsAgent(llm=llm, tools=tools, prompt=prompt)
|
59 |
|
60 |
-
agent_memory = AgentTokenBufferMemory(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
chat_memory=chat_history,
|
|
|
62 |
memory_key=memory_key,
|
63 |
-
llm=llm,
|
64 |
)
|
65 |
|
66 |
agent_executor = AgentExecutor(
|
@@ -68,7 +72,7 @@ def get_agent(
|
|
68 |
tools=tools,
|
69 |
memory=agent_memory,
|
70 |
verbose=True,
|
71 |
-
return_intermediate_steps=
|
72 |
callbacks=callbacks,
|
73 |
)
|
74 |
return (
|
|
|
3 |
from typing import Tuple, List, Optional, Dict
|
4 |
|
5 |
from langchain.agents import AgentExecutor, AgentType, initialize_agent
|
|
|
|
|
|
|
6 |
from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent
|
7 |
from langchain.callbacks.base import BaseCallbackHandler
|
8 |
from langchain.chains import LLMChain
|
|
|
54 |
)
|
55 |
agent = OpenAIFunctionsAgent(llm=llm, tools=tools, prompt=prompt)
|
56 |
|
57 |
+
# agent_memory = AgentTokenBufferMemory(
|
58 |
+
# chat_memory=chat_history,
|
59 |
+
# memory_key=memory_key,
|
60 |
+
# llm=llm,
|
61 |
+
# )
|
62 |
+
from langchain.memory import ConversationBufferMemory
|
63 |
+
|
64 |
+
agent_memory = ConversationBufferMemory(
|
65 |
chat_memory=chat_history,
|
66 |
+
return_messages=True,
|
67 |
memory_key=memory_key,
|
|
|
68 |
)
|
69 |
|
70 |
agent_executor = AgentExecutor(
|
|
|
72 |
tools=tools,
|
73 |
memory=agent_memory,
|
74 |
verbose=True,
|
75 |
+
return_intermediate_steps=False,
|
76 |
callbacks=callbacks,
|
77 |
)
|
78 |
return (
|
langchain-streamlit-demo/python_coder.py
CHANGED
@@ -1,16 +1,14 @@
|
|
1 |
"""langchain python coder-- requires black, ruff, and mypy."""
|
2 |
|
3 |
-
import argparse
|
4 |
import os
|
5 |
import re
|
6 |
import subprocess # nosec
|
7 |
import tempfile
|
8 |
-
from importlib.util import find_spec
|
9 |
|
10 |
from langchain.agents import initialize_agent, AgentType
|
11 |
from langchain.agents.tools import Tool
|
12 |
-
from langchain.chat_models import ChatOpenAI
|
13 |
from langchain.llms.base import BaseLLM
|
|
|
14 |
from langchain.prompts import ChatPromptTemplate
|
15 |
from langchain.pydantic_v1 import BaseModel, validator, Field, ValidationError
|
16 |
|
@@ -177,35 +175,17 @@ def get_agent(
|
|
177 |
llm: BaseLLM,
|
178 |
agent_type: AgentType = AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION,
|
179 |
):
|
|
|
|
|
|
|
|
|
180 |
return initialize_agent(
|
181 |
tools,
|
182 |
llm,
|
183 |
agent=agent_type,
|
184 |
verbose=True,
|
185 |
handle_parsing_errors=True,
|
|
|
186 |
prompt=prompt,
|
187 |
-
|
188 |
) | (lambda output: output["output"])
|
189 |
-
|
190 |
-
|
191 |
-
if __name__ == "__main__":
|
192 |
-
for lib in ("black", "ruff", "mypy"):
|
193 |
-
if find_spec(lib) is None:
|
194 |
-
raise ImportError(
|
195 |
-
"You must install black, ruff, and mypy to use this tool. "
|
196 |
-
"You can install them with `pip install black ruff mypy`.",
|
197 |
-
)
|
198 |
-
|
199 |
-
parser = argparse.ArgumentParser()
|
200 |
-
parser.add_argument("--model", "-m", default="gpt-4-1106-preview")
|
201 |
-
parser.add_argument("instruction")
|
202 |
-
args = parser.parse_args()
|
203 |
-
|
204 |
-
instruction = args.instruction
|
205 |
-
model = args.model
|
206 |
-
|
207 |
-
llm = ChatOpenAI(model_name=model, temperature=0.0)
|
208 |
-
agent = get_agent(llm)
|
209 |
-
|
210 |
-
output = agent.invoke({"input": instruction}, config=dict(verbose=True))
|
211 |
-
print(output)
|
|
|
1 |
"""langchain python coder-- requires black, ruff, and mypy."""
|
2 |
|
|
|
3 |
import os
|
4 |
import re
|
5 |
import subprocess # nosec
|
6 |
import tempfile
|
|
|
7 |
|
8 |
from langchain.agents import initialize_agent, AgentType
|
9 |
from langchain.agents.tools import Tool
|
|
|
10 |
from langchain.llms.base import BaseLLM
|
11 |
+
from langchain.memory import ConversationBufferMemory
|
12 |
from langchain.prompts import ChatPromptTemplate
|
13 |
from langchain.pydantic_v1 import BaseModel, validator, Field, ValidationError
|
14 |
|
|
|
175 |
llm: BaseLLM,
|
176 |
agent_type: AgentType = AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION,
|
177 |
):
|
178 |
+
agent_memory = ConversationBufferMemory(
|
179 |
+
return_messages=True,
|
180 |
+
memory_key="chat_history",
|
181 |
+
)
|
182 |
return initialize_agent(
|
183 |
tools,
|
184 |
llm,
|
185 |
agent=agent_type,
|
186 |
verbose=True,
|
187 |
handle_parsing_errors=True,
|
188 |
+
memory=agent_memory,
|
189 |
prompt=prompt,
|
190 |
+
return_intermediate_steps=False,
|
191 |
) | (lambda output: output["output"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|