Merge pull request #117 from joshuasundance-swca/python_coder_history
Browse files
langchain-streamlit-demo/app.py
CHANGED
@@ -15,7 +15,6 @@ from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
|
|
15 |
from langchain.schema.document import Document
|
16 |
from langchain.schema.retriever import BaseRetriever
|
17 |
from langchain.tools import DuckDuckGoSearchRun, WikipediaQueryRun
|
18 |
-
from langchain.tools import Tool
|
19 |
from langchain.utilities import WikipediaAPIWrapper
|
20 |
from langsmith.client import Client
|
21 |
from streamlit_feedback import streamlit_feedback
|
@@ -461,27 +460,30 @@ if st.session_state.llm:
|
|
461 |
)
|
462 |
st_callback = StreamlitCallbackHandler(st.container())
|
463 |
callbacks.append(st_callback)
|
464 |
-
|
465 |
-
|
466 |
-
|
467 |
-
|
468 |
-
|
469 |
-
|
470 |
-
|
471 |
-
|
472 |
-
|
473 |
-
|
|
|
|
|
|
|
|
|
474 |
|
475 |
python_coder_agent = get_python_agent(st.session_state.llm)
|
476 |
|
477 |
-
|
478 |
-
|
479 |
-
|
480 |
-
|
481 |
-
|
482 |
-
|
483 |
-
|
484 |
-
)
|
485 |
|
486 |
TOOLS = [research_assistant_tool, python_coder_tool] + default_tools
|
487 |
|
@@ -496,29 +498,32 @@ if st.session_state.llm:
|
|
496 |
prompt,
|
497 |
)
|
498 |
|
499 |
-
|
500 |
-
|
501 |
-
|
|
|
|
|
502 |
config=get_config(callbacks),
|
503 |
-
)
|
504 |
-
|
505 |
-
description="this assistant returns a response based on the user's custom context. ",
|
506 |
-
)
|
507 |
doc_chain_agent = get_doc_agent(
|
508 |
[doc_chain_tool],
|
509 |
)
|
510 |
-
|
511 |
-
|
512 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
513 |
config=get_config(callbacks),
|
514 |
-
)
|
515 |
-
|
516 |
-
description="this assistant answers a question based on the user's custom context. "
|
517 |
-
"this assistant responds to fully formed questions."
|
518 |
-
"Do not send anything besides a question. It already has context."
|
519 |
-
"if the user's meaning is unclear, perhaps the answer is here. "
|
520 |
-
"generally speaking, try this tool before conducting web research.",
|
521 |
-
)
|
522 |
TOOLS = [doc_question_tool, research_assistant_tool] + default_tools
|
523 |
|
524 |
st.session_state.chain = get_agent(
|
|
|
15 |
from langchain.schema.document import Document
|
16 |
from langchain.schema.retriever import BaseRetriever
|
17 |
from langchain.tools import DuckDuckGoSearchRun, WikipediaQueryRun
|
|
|
18 |
from langchain.utilities import WikipediaAPIWrapper
|
19 |
from langsmith.client import Client
|
20 |
from streamlit_feedback import streamlit_feedback
|
|
|
460 |
)
|
461 |
st_callback = StreamlitCallbackHandler(st.container())
|
462 |
callbacks.append(st_callback)
|
463 |
+
|
464 |
+
from langchain.agents.tools import tool
|
465 |
+
from langchain.callbacks.manager import Callbacks
|
466 |
+
|
467 |
+
@tool("web-research-assistant")
|
468 |
+
def research_assistant_tool(question: str, callbacks: Callbacks = None):
|
469 |
+
"""this assistant returns a comprehensive report based on web research.
|
470 |
+
it's slow and relatively expensive, so use it sparingly.
|
471 |
+
for quick facts, use duckduckgo instead.
|
472 |
+
"""
|
473 |
+
return research_assistant_chain.invoke(
|
474 |
+
dict(question=question),
|
475 |
+
config=get_config(callbacks),
|
476 |
+
)
|
477 |
|
478 |
python_coder_agent = get_python_agent(st.session_state.llm)
|
479 |
|
480 |
+
@tool("python-coder-assistant")
|
481 |
+
def python_coder_tool(input_str: str, callbacks: Callbacks = None):
|
482 |
+
"""this assistant writes Python code. give it clear instructions and requirements."""
|
483 |
+
return python_coder_agent.invoke(
|
484 |
+
dict(input=input_str),
|
485 |
+
config=get_config(callbacks),
|
486 |
+
)
|
|
|
487 |
|
488 |
TOOLS = [research_assistant_tool, python_coder_tool] + default_tools
|
489 |
|
|
|
498 |
prompt,
|
499 |
)
|
500 |
|
501 |
+
@tool("user-document-chat")
|
502 |
+
def doc_chain_tool(input_str: str, callbacks: Callbacks = None):
|
503 |
+
"""this assistant returns a response based on the user's custom context."""
|
504 |
+
return st.session_state.doc_chain.invoke(
|
505 |
+
input_str,
|
506 |
config=get_config(callbacks),
|
507 |
+
)
|
508 |
+
|
|
|
|
|
509 |
doc_chain_agent = get_doc_agent(
|
510 |
[doc_chain_tool],
|
511 |
)
|
512 |
+
|
513 |
+
@tool("document-question-tool")
|
514 |
+
def doc_question_tool(input_str: str, callbacks: Callbacks = None):
|
515 |
+
"""
|
516 |
+
this assistant answers a question based on the user's custom context.
|
517 |
+
this assistant responds to fully formed questions.
|
518 |
+
Do not send anything besides a question. It already has context.
|
519 |
+
if the user's meaning is unclear, perhaps the answer is here.
|
520 |
+
generally speaking, try this tool before conducting web research.
|
521 |
+
"""
|
522 |
+
return doc_chain_agent.invoke(
|
523 |
+
input_str,
|
524 |
config=get_config(callbacks),
|
525 |
+
)
|
526 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
527 |
TOOLS = [doc_question_tool, research_assistant_tool] + default_tools
|
528 |
|
529 |
st.session_state.chain = get_agent(
|
langchain-streamlit-demo/llm_resources.py
CHANGED
@@ -16,6 +16,7 @@ from langchain.chat_models import (
|
|
16 |
from langchain.document_loaders import PyPDFLoader
|
17 |
from langchain.embeddings import AzureOpenAIEmbeddings, OpenAIEmbeddings
|
18 |
from langchain.llms.base import BaseLLM
|
|
|
19 |
from langchain.prompts import MessagesPlaceholder, ChatPromptTemplate
|
20 |
from langchain.retrievers import EnsembleRetriever
|
21 |
from langchain.retrievers.multi_query import MultiQueryRetriever
|
@@ -59,7 +60,6 @@ def get_agent(
|
|
59 |
# memory_key=memory_key,
|
60 |
# llm=llm,
|
61 |
# )
|
62 |
-
from langchain.memory import ConversationBufferMemory
|
63 |
|
64 |
agent_memory = ConversationBufferMemory(
|
65 |
chat_memory=chat_history,
|
|
|
16 |
from langchain.document_loaders import PyPDFLoader
|
17 |
from langchain.embeddings import AzureOpenAIEmbeddings, OpenAIEmbeddings
|
18 |
from langchain.llms.base import BaseLLM
|
19 |
+
from langchain.memory import ConversationBufferMemory
|
20 |
from langchain.prompts import MessagesPlaceholder, ChatPromptTemplate
|
21 |
from langchain.retrievers import EnsembleRetriever
|
22 |
from langchain.retrievers.multi_query import MultiQueryRetriever
|
|
|
60 |
# memory_key=memory_key,
|
61 |
# llm=llm,
|
62 |
# )
|
|
|
63 |
|
64 |
agent_memory = ConversationBufferMemory(
|
65 |
chat_memory=chat_history,
|
langchain-streamlit-demo/python_coder.py
CHANGED
@@ -10,6 +10,7 @@ from langchain.agents.tools import Tool
|
|
10 |
from langchain.llms.base import BaseLLM
|
11 |
from langchain.memory import ConversationBufferMemory
|
12 |
from langchain.prompts import ChatPromptTemplate
|
|
|
13 |
from langchain.pydantic_v1 import BaseModel, validator, Field, ValidationError
|
14 |
|
15 |
|
@@ -29,7 +30,7 @@ def format_black(filepath: str):
|
|
29 |
stderr=subprocess.STDOUT,
|
30 |
text=True,
|
31 |
shell=True,
|
32 |
-
timeout=
|
33 |
check=False,
|
34 |
)
|
35 |
|
@@ -37,19 +38,19 @@ def format_black(filepath: str):
|
|
37 |
def format_ruff(filepath: str):
|
38 |
"""Run ruff format on a file."""
|
39 |
subprocess.run( # nosec
|
40 |
-
f"ruff check --fix {filepath}",
|
41 |
shell=True,
|
42 |
text=True,
|
43 |
-
timeout=
|
44 |
universal_newlines=True,
|
45 |
check=False,
|
46 |
)
|
47 |
|
48 |
subprocess.run( # nosec
|
49 |
-
f"ruff format {filepath}",
|
50 |
stderr=subprocess.STDOUT,
|
51 |
shell=True,
|
52 |
-
timeout=
|
53 |
text=True,
|
54 |
check=False,
|
55 |
)
|
@@ -58,15 +59,15 @@ def format_ruff(filepath: str):
|
|
58 |
def check_ruff(filepath: str):
|
59 |
"""Run ruff check on a file."""
|
60 |
subprocess.check_output( # nosec
|
61 |
-
f"ruff check {filepath}",
|
62 |
stderr=subprocess.STDOUT,
|
63 |
shell=True,
|
64 |
-
timeout=
|
65 |
text=True,
|
66 |
)
|
67 |
|
68 |
|
69 |
-
def check_mypy(filepath: str, strict: bool =
|
70 |
"""Run mypy on a file."""
|
71 |
cmd = f"mypy {'--strict' if strict else ''} --follow-imports={follow_imports} {filepath}"
|
72 |
|
@@ -75,7 +76,7 @@ def check_mypy(filepath: str, strict: bool = True, follow_imports: str = "skip")
|
|
75 |
stderr=subprocess.STDOUT,
|
76 |
shell=True,
|
77 |
text=True,
|
78 |
-
timeout=
|
79 |
)
|
80 |
|
81 |
|
@@ -148,9 +149,10 @@ prompt = ChatPromptTemplate.from_messages(
|
|
148 |
"Provide complete, end-to-end Python code to meet the user's description/requirements. "
|
149 |
"Always `check` your code. When you're done, you must ALWAYS use the `submit` tool.",
|
150 |
),
|
|
|
151 |
(
|
152 |
"human",
|
153 |
-
"
|
154 |
),
|
155 |
],
|
156 |
)
|
|
|
10 |
from langchain.llms.base import BaseLLM
|
11 |
from langchain.memory import ConversationBufferMemory
|
12 |
from langchain.prompts import ChatPromptTemplate
|
13 |
+
from langchain.prompts import MessagesPlaceholder
|
14 |
from langchain.pydantic_v1 import BaseModel, validator, Field, ValidationError
|
15 |
|
16 |
|
|
|
30 |
stderr=subprocess.STDOUT,
|
31 |
text=True,
|
32 |
shell=True,
|
33 |
+
timeout=30,
|
34 |
check=False,
|
35 |
)
|
36 |
|
|
|
38 |
def format_ruff(filepath: str):
|
39 |
"""Run ruff format on a file."""
|
40 |
subprocess.run( # nosec
|
41 |
+
f"ruff check --no-cache --fix {filepath}",
|
42 |
shell=True,
|
43 |
text=True,
|
44 |
+
timeout=30,
|
45 |
universal_newlines=True,
|
46 |
check=False,
|
47 |
)
|
48 |
|
49 |
subprocess.run( # nosec
|
50 |
+
f"ruff format --no-cache {filepath}",
|
51 |
stderr=subprocess.STDOUT,
|
52 |
shell=True,
|
53 |
+
timeout=30,
|
54 |
text=True,
|
55 |
check=False,
|
56 |
)
|
|
|
59 |
def check_ruff(filepath: str):
|
60 |
"""Run ruff check on a file."""
|
61 |
subprocess.check_output( # nosec
|
62 |
+
f"ruff check --no-cache {filepath}",
|
63 |
stderr=subprocess.STDOUT,
|
64 |
shell=True,
|
65 |
+
timeout=30,
|
66 |
text=True,
|
67 |
)
|
68 |
|
69 |
|
70 |
+
def check_mypy(filepath: str, strict: bool = False, follow_imports: str = "skip"):
|
71 |
"""Run mypy on a file."""
|
72 |
cmd = f"mypy {'--strict' if strict else ''} --follow-imports={follow_imports} {filepath}"
|
73 |
|
|
|
76 |
stderr=subprocess.STDOUT,
|
77 |
shell=True,
|
78 |
text=True,
|
79 |
+
timeout=30,
|
80 |
)
|
81 |
|
82 |
|
|
|
149 |
"Provide complete, end-to-end Python code to meet the user's description/requirements. "
|
150 |
"Always `check` your code. When you're done, you must ALWAYS use the `submit` tool.",
|
151 |
),
|
152 |
+
MessagesPlaceholder(variable_name="chat_history"),
|
153 |
(
|
154 |
"human",
|
155 |
+
"{input}",
|
156 |
),
|
157 |
],
|
158 |
)
|