Merge pull request #112 from joshuasundance-swca/pyhistory
Browse files
langchain-streamlit-demo/app.py
CHANGED
@@ -430,7 +430,7 @@ if st.session_state.llm:
|
|
430 |
callbacks=callbacks,
|
431 |
tags=["Streamlit Chat"],
|
432 |
verbose=True,
|
433 |
-
return_intermediate_steps=
|
434 |
)
|
435 |
if st.session_state.provider == "Anthropic":
|
436 |
config["max_concurrency"] = 5
|
|
|
430 |
callbacks=callbacks,
|
431 |
tags=["Streamlit Chat"],
|
432 |
verbose=True,
|
433 |
+
return_intermediate_steps=False,
|
434 |
)
|
435 |
if st.session_state.provider == "Anthropic":
|
436 |
config["max_concurrency"] = 5
|
langchain-streamlit-demo/llm_resources.py
CHANGED
@@ -3,9 +3,6 @@ from tempfile import NamedTemporaryFile
|
|
3 |
from typing import Tuple, List, Optional, Dict
|
4 |
|
5 |
from langchain.agents import AgentExecutor
|
6 |
-
from langchain.agents.openai_functions_agent.agent_token_buffer_memory import (
|
7 |
-
AgentTokenBufferMemory,
|
8 |
-
)
|
9 |
from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent
|
10 |
from langchain.callbacks.base import BaseCallbackHandler
|
11 |
from langchain.chains import LLMChain
|
@@ -57,10 +54,17 @@ def get_agent(
|
|
57 |
)
|
58 |
agent = OpenAIFunctionsAgent(llm=llm, tools=tools, prompt=prompt)
|
59 |
|
60 |
-
agent_memory = AgentTokenBufferMemory(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
chat_memory=chat_history,
|
|
|
62 |
memory_key=memory_key,
|
63 |
-
llm=llm,
|
64 |
)
|
65 |
|
66 |
agent_executor = AgentExecutor(
|
@@ -68,7 +72,7 @@ def get_agent(
|
|
68 |
tools=tools,
|
69 |
memory=agent_memory,
|
70 |
verbose=True,
|
71 |
-
return_intermediate_steps=
|
72 |
callbacks=callbacks,
|
73 |
)
|
74 |
return (
|
|
|
3 |
from typing import Tuple, List, Optional, Dict
|
4 |
|
5 |
from langchain.agents import AgentExecutor
|
|
|
|
|
|
|
6 |
from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent
|
7 |
from langchain.callbacks.base import BaseCallbackHandler
|
8 |
from langchain.chains import LLMChain
|
|
|
54 |
)
|
55 |
agent = OpenAIFunctionsAgent(llm=llm, tools=tools, prompt=prompt)
|
56 |
|
57 |
+
# agent_memory = AgentTokenBufferMemory(
|
58 |
+
# chat_memory=chat_history,
|
59 |
+
# memory_key=memory_key,
|
60 |
+
# llm=llm,
|
61 |
+
# )
|
62 |
+
from langchain.memory import ConversationBufferMemory
|
63 |
+
|
64 |
+
agent_memory = ConversationBufferMemory(
|
65 |
chat_memory=chat_history,
|
66 |
+
return_messages=True,
|
67 |
memory_key=memory_key,
|
|
|
68 |
)
|
69 |
|
70 |
agent_executor = AgentExecutor(
|
|
|
72 |
tools=tools,
|
73 |
memory=agent_memory,
|
74 |
verbose=True,
|
75 |
+
return_intermediate_steps=False,
|
76 |
callbacks=callbacks,
|
77 |
)
|
78 |
return (
|
langchain-streamlit-demo/python_coder.py
CHANGED
@@ -1,16 +1,14 @@
|
|
1 |
"""langchain python coder-- requires black, ruff, and mypy."""
|
2 |
|
3 |
-
import argparse
|
4 |
import os
|
5 |
import re
|
6 |
import subprocess # nosec
|
7 |
import tempfile
|
8 |
-
from importlib.util import find_spec
|
9 |
|
10 |
from langchain.agents import initialize_agent, AgentType
|
11 |
from langchain.agents.tools import Tool
|
12 |
-
from langchain.chat_models import ChatOpenAI
|
13 |
from langchain.llms.base import BaseLLM
|
|
|
14 |
from langchain.prompts import ChatPromptTemplate
|
15 |
from langchain.pydantic_v1 import BaseModel, validator, Field, ValidationError
|
16 |
|
@@ -177,35 +175,17 @@ def get_agent(
|
|
177 |
llm: BaseLLM,
|
178 |
agent_type: AgentType = AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION,
|
179 |
):
|
|
|
|
|
|
|
|
|
180 |
return initialize_agent(
|
181 |
tools,
|
182 |
llm,
|
183 |
agent=agent_type,
|
184 |
verbose=True,
|
185 |
handle_parsing_errors=True,
|
|
|
186 |
prompt=prompt,
|
187 |
-
|
188 |
) | (lambda output: output["output"])
|
189 |
-
|
190 |
-
|
191 |
-
if __name__ == "__main__":
|
192 |
-
for lib in ("black", "ruff", "mypy"):
|
193 |
-
if find_spec(lib) is None:
|
194 |
-
raise ImportError(
|
195 |
-
"You must install black, ruff, and mypy to use this tool. "
|
196 |
-
"You can install them with `pip install black ruff mypy`.",
|
197 |
-
)
|
198 |
-
|
199 |
-
parser = argparse.ArgumentParser()
|
200 |
-
parser.add_argument("--model", "-m", default="gpt-4-1106-preview")
|
201 |
-
parser.add_argument("instruction")
|
202 |
-
args = parser.parse_args()
|
203 |
-
|
204 |
-
instruction = args.instruction
|
205 |
-
model = args.model
|
206 |
-
|
207 |
-
llm = ChatOpenAI(model_name=model, temperature=0.0)
|
208 |
-
agent = get_agent(llm)
|
209 |
-
|
210 |
-
output = agent.invoke({"input": instruction}, config=dict(verbose=True))
|
211 |
-
print(output)
|
|
|
1 |
"""langchain python coder-- requires black, ruff, and mypy."""
|
2 |
|
|
|
3 |
import os
|
4 |
import re
|
5 |
import subprocess # nosec
|
6 |
import tempfile
|
|
|
7 |
|
8 |
from langchain.agents import initialize_agent, AgentType
|
9 |
from langchain.agents.tools import Tool
|
|
|
10 |
from langchain.llms.base import BaseLLM
|
11 |
+
from langchain.memory import ConversationBufferMemory
|
12 |
from langchain.prompts import ChatPromptTemplate
|
13 |
from langchain.pydantic_v1 import BaseModel, validator, Field, ValidationError
|
14 |
|
|
|
175 |
llm: BaseLLM,
|
176 |
agent_type: AgentType = AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION,
|
177 |
):
|
178 |
+
agent_memory = ConversationBufferMemory(
|
179 |
+
return_messages=True,
|
180 |
+
memory_key="chat_history",
|
181 |
+
)
|
182 |
return initialize_agent(
|
183 |
tools,
|
184 |
llm,
|
185 |
agent=agent_type,
|
186 |
verbose=True,
|
187 |
handle_parsing_errors=True,
|
188 |
+
memory=agent_memory,
|
189 |
prompt=prompt,
|
190 |
+
return_intermediate_steps=False,
|
191 |
) | (lambda output: output["output"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|