Spaces:
Runtime error
Runtime error
Joshua Sundance Bailey
commited on
Commit
·
53abb59
1
Parent(s):
af4cea4
no env
Browse files- .env-example +0 -10
- docker-compose.yml +0 -2
- langchain-streamlit-demo/app.py +115 -71
- langchain-streamlit-demo/llm_stuff.py +10 -3
.env-example
DELETED
|
@@ -1,10 +0,0 @@
|
|
| 1 |
-
APP_PORT=8181
|
| 2 |
-
|
| 3 |
-
LANGCHAIN_ENDPOINT="https://api.smith.langchain.com"
|
| 4 |
-
LANGCHAIN_API_KEY=ls__...
|
| 5 |
-
LANGCHAIN_TRACING_V2="true"
|
| 6 |
-
LANGCHAIN_PROJECT="streamlit_chatbot"
|
| 7 |
-
|
| 8 |
-
ANYSCALE_API_KEY=secret_...
|
| 9 |
-
OPENAI_API_KEY=sk-...
|
| 10 |
-
ANTHROPIC_API_KEY="sk-..."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
docker-compose.yml
CHANGED
|
@@ -4,8 +4,6 @@ services:
|
|
| 4 |
streamlit-chat:
|
| 5 |
image: streamlit-chat:latest
|
| 6 |
build: .
|
| 7 |
-
env_file:
|
| 8 |
-
- .env
|
| 9 |
ports:
|
| 10 |
- "8000:8000"
|
| 11 |
volumes:
|
|
|
|
| 4 |
streamlit-chat:
|
| 5 |
image: streamlit-chat:latest
|
| 6 |
build: .
|
|
|
|
|
|
|
| 7 |
ports:
|
| 8 |
- "8000:8000"
|
| 9 |
volumes:
|
langchain-streamlit-demo/app.py
CHANGED
|
@@ -1,8 +1,10 @@
|
|
|
|
|
| 1 |
import streamlit as st
|
|
|
|
| 2 |
from langchain.callbacks.tracers.langchain import wait_for_all_tracers
|
| 3 |
from langchain.callbacks.tracers.run_collector import RunCollectorCallbackHandler
|
| 4 |
from langchain.schema.runnable import RunnableConfig
|
| 5 |
-
|
| 6 |
|
| 7 |
from llm_stuff import (
|
| 8 |
_DEFAULT_SYSTEM_PROMPT,
|
|
@@ -30,83 +32,125 @@ st.sidebar.markdown(
|
|
| 30 |
""",
|
| 31 |
)
|
| 32 |
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
"Custom Instructions",
|
| 36 |
-
_DEFAULT_SYSTEM_PROMPT,
|
| 37 |
-
help="Custom instructions to provide the language model to determine style, personality, etc.",
|
| 38 |
-
)
|
| 39 |
-
.strip()
|
| 40 |
-
.replace("{", "{{")
|
| 41 |
-
.replace("}", "}}")
|
| 42 |
-
)
|
| 43 |
-
|
| 44 |
-
memory = get_memory()
|
| 45 |
-
|
| 46 |
-
chain = get_llm_chain(memory, system_prompt)
|
| 47 |
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
run_collector = RunCollectorCallbackHandler()
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
if st.sidebar.button("Clear message history"):
|
| 54 |
-
print("Clearing message history")
|
| 55 |
-
memory.clear()
|
| 56 |
-
st.session_state.trace_link = None
|
| 57 |
-
st.session_state.run_id = None
|
| 58 |
|
| 59 |
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
for msg in st.session_state.langchain_messages:
|
| 72 |
-
streamlit_type = _get_openai_type(msg)
|
| 73 |
-
avatar = "🦜" if streamlit_type == "assistant" else None
|
| 74 |
-
with st.chat_message(streamlit_type, avatar=avatar):
|
| 75 |
-
st.markdown(msg.content)
|
| 76 |
-
|
| 77 |
-
if st.session_state.trace_link:
|
| 78 |
-
st.sidebar.markdown(
|
| 79 |
-
f'<a href="{st.session_state.trace_link}" target="_blank"><button>Latest Trace: 🛠️</button></a>',
|
| 80 |
-
unsafe_allow_html=True,
|
| 81 |
)
|
| 82 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 83 |
|
| 84 |
-
|
| 85 |
-
st.session_state.feedback_update = None
|
| 86 |
-
st.session_state.feedback = None
|
| 87 |
-
|
| 88 |
|
| 89 |
-
|
| 90 |
-
st.chat_message("user").write(prompt)
|
| 91 |
-
_reset_feedback()
|
| 92 |
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 99 |
)
|
| 100 |
-
full_response = chain.invoke({"input": prompt}, config=runnable_config)["text"]
|
| 101 |
-
message_placeholder.markdown(full_response)
|
| 102 |
-
|
| 103 |
-
run = run_collector.traced_runs[0]
|
| 104 |
-
run_collector.traced_runs = []
|
| 105 |
-
st.session_state.run_id = run.id
|
| 106 |
-
wait_for_all_tracers()
|
| 107 |
-
url = client.read_run(run.id).url
|
| 108 |
-
st.session_state.trace_link = url
|
| 109 |
-
|
| 110 |
|
| 111 |
-
|
| 112 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
import streamlit as st
|
| 3 |
+
from langchain.callbacks.manager import tracing_v2_enabled
|
| 4 |
from langchain.callbacks.tracers.langchain import wait_for_all_tracers
|
| 5 |
from langchain.callbacks.tracers.run_collector import RunCollectorCallbackHandler
|
| 6 |
from langchain.schema.runnable import RunnableConfig
|
| 7 |
+
from openai.error import AuthenticationError
|
| 8 |
|
| 9 |
from llm_stuff import (
|
| 10 |
_DEFAULT_SYSTEM_PROMPT,
|
|
|
|
| 32 |
""",
|
| 33 |
)
|
| 34 |
|
| 35 |
+
openai_api_key = st.sidebar.text_input("OpenAI API Key", type="password")
|
| 36 |
+
st.session_state.openai_api_key = openai_api_key
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
|
| 38 |
+
langsmith_api_key = st.sidebar.text_input("LangSmith API Key", type="password")
|
| 39 |
+
st.session_state.langsmith_api_key = langsmith_api_key
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 40 |
|
| 41 |
|
| 42 |
+
if st.session_state.openai_api_key.startswith("sk-"):
|
| 43 |
+
system_prompt = (
|
| 44 |
+
st.sidebar.text_area(
|
| 45 |
+
"Custom Instructions",
|
| 46 |
+
_DEFAULT_SYSTEM_PROMPT,
|
| 47 |
+
help="Custom instructions to provide the language model to determine style, personality, etc.",
|
| 48 |
+
)
|
| 49 |
+
.strip()
|
| 50 |
+
.replace("{", "{{")
|
| 51 |
+
.replace("}", "}}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
)
|
| 53 |
|
| 54 |
+
temperature = st.sidebar.slider(
|
| 55 |
+
"Temperature",
|
| 56 |
+
min_value=0.0,
|
| 57 |
+
max_value=1.0,
|
| 58 |
+
value=0.7,
|
| 59 |
+
help="Higher values give more random results.",
|
| 60 |
+
)
|
| 61 |
|
| 62 |
+
memory = get_memory()
|
|
|
|
|
|
|
|
|
|
| 63 |
|
| 64 |
+
chain = get_llm_chain(memory, system_prompt, temperature)
|
|
|
|
|
|
|
| 65 |
|
| 66 |
+
if st.session_state.langsmith_api_key.startswith("ls__"):
|
| 67 |
+
langsmith_project = st.sidebar.text_input(
|
| 68 |
+
"LangSmith Project Name",
|
| 69 |
+
value="langchain-streamlit-demo",
|
| 70 |
+
)
|
| 71 |
+
os.environ["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com"
|
| 72 |
+
os.environ["LANGCHAIN_API_KEY"] = st.session_state.langsmith_api_key
|
| 73 |
+
os.environ["LANGCHAIN_TRACING_V2"] = "true"
|
| 74 |
+
os.environ["LANGCHAIN_PROJECT"] = langsmith_project
|
| 75 |
+
|
| 76 |
+
client = get_langsmith_client()
|
| 77 |
+
else:
|
| 78 |
+
langsmith_project = None
|
| 79 |
+
client = None
|
| 80 |
+
|
| 81 |
+
run_collector = RunCollectorCallbackHandler()
|
| 82 |
+
|
| 83 |
+
if st.sidebar.button("Clear message history"):
|
| 84 |
+
print("Clearing message history")
|
| 85 |
+
memory.clear()
|
| 86 |
+
st.session_state.trace_link = None
|
| 87 |
+
st.session_state.run_id = None
|
| 88 |
+
|
| 89 |
+
# Display chat messages from history on app rerun
|
| 90 |
+
# NOTE: This won't be necessary for Streamlit 1.26+, you can just pass the type directly
|
| 91 |
+
# https://github.com/streamlit/streamlit/pull/7094
|
| 92 |
+
def _get_openai_type(msg):
|
| 93 |
+
if msg.type == "human":
|
| 94 |
+
return "user"
|
| 95 |
+
if msg.type == "ai":
|
| 96 |
+
return "assistant"
|
| 97 |
+
return msg.role if msg.type == "chat" else msg.type
|
| 98 |
+
|
| 99 |
+
for msg in st.session_state.langchain_messages:
|
| 100 |
+
streamlit_type = _get_openai_type(msg)
|
| 101 |
+
avatar = "🦜" if streamlit_type == "assistant" else None
|
| 102 |
+
with st.chat_message(streamlit_type, avatar=avatar):
|
| 103 |
+
st.markdown(msg.content)
|
| 104 |
+
|
| 105 |
+
if st.session_state.trace_link:
|
| 106 |
+
st.sidebar.markdown(
|
| 107 |
+
f'<a href="{st.session_state.trace_link}" target="_blank"><button>Latest Trace: 🛠️</button></a>',
|
| 108 |
+
unsafe_allow_html=True,
|
| 109 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 110 |
|
| 111 |
+
def _reset_feedback():
|
| 112 |
+
st.session_state.feedback_update = None
|
| 113 |
+
st.session_state.feedback = None
|
| 114 |
+
|
| 115 |
+
if prompt := st.chat_input(placeholder="Ask me a question!"):
|
| 116 |
+
st.chat_message("user").write(prompt)
|
| 117 |
+
_reset_feedback()
|
| 118 |
+
|
| 119 |
+
with st.chat_message("assistant", avatar="🦜"):
|
| 120 |
+
message_placeholder = st.empty()
|
| 121 |
+
stream_handler = StreamHandler(message_placeholder)
|
| 122 |
+
runnable_config = RunnableConfig(
|
| 123 |
+
callbacks=[run_collector, stream_handler],
|
| 124 |
+
tags=["Streamlit Chat"],
|
| 125 |
+
)
|
| 126 |
+
try:
|
| 127 |
+
if client and langsmith_project:
|
| 128 |
+
with tracing_v2_enabled(project_name=langsmith_project):
|
| 129 |
+
full_response = chain.invoke(
|
| 130 |
+
{"input": prompt},
|
| 131 |
+
config=runnable_config,
|
| 132 |
+
)["text"]
|
| 133 |
+
else:
|
| 134 |
+
full_response = chain.invoke(
|
| 135 |
+
{"input": prompt},
|
| 136 |
+
config=runnable_config,
|
| 137 |
+
)["text"]
|
| 138 |
+
except AuthenticationError:
|
| 139 |
+
st.error("Please enter a valid OpenAI API key.", icon="❌")
|
| 140 |
+
st.stop()
|
| 141 |
+
message_placeholder.markdown(full_response)
|
| 142 |
+
|
| 143 |
+
if client:
|
| 144 |
+
run = run_collector.traced_runs[0]
|
| 145 |
+
run_collector.traced_runs = []
|
| 146 |
+
st.session_state.run_id = run.id
|
| 147 |
+
wait_for_all_tracers()
|
| 148 |
+
url = client.read_run(run.id).url
|
| 149 |
+
st.session_state.trace_link = url
|
| 150 |
+
|
| 151 |
+
if client and st.session_state.get("run_id"):
|
| 152 |
+
feedback_component(client)
|
| 153 |
+
|
| 154 |
+
else:
|
| 155 |
+
st.error("Please enter a valid OpenAI API key.", icon="❌")
|
| 156 |
+
st.stop()
|
langchain-streamlit-demo/llm_stuff.py
CHANGED
|
@@ -6,14 +6,16 @@ from langchain.callbacks.base import BaseCallbackHandler
|
|
| 6 |
from langchain.chat_models import ChatOpenAI
|
| 7 |
from langchain.memory import ConversationBufferMemory, StreamlitChatMessageHistory
|
| 8 |
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
|
| 9 |
-
from streamlit_feedback import streamlit_feedback
|
| 10 |
from langsmith.client import Client
|
|
|
|
| 11 |
|
| 12 |
_DEFAULT_SYSTEM_PROMPT = "You are a helpful chatbot."
|
| 13 |
|
| 14 |
|
| 15 |
def get_langsmith_client():
|
| 16 |
-
return Client(
|
|
|
|
|
|
|
| 17 |
|
| 18 |
|
| 19 |
def get_memory() -> ConversationBufferMemory:
|
|
@@ -27,6 +29,7 @@ def get_memory() -> ConversationBufferMemory:
|
|
| 27 |
def get_llm_chain(
|
| 28 |
memory: ConversationBufferMemory,
|
| 29 |
system_prompt: str = _DEFAULT_SYSTEM_PROMPT,
|
|
|
|
| 30 |
) -> LLMChain:
|
| 31 |
"""Return a basic LLMChain with memory."""
|
| 32 |
prompt = ChatPromptTemplate.from_messages(
|
|
@@ -39,7 +42,11 @@ def get_llm_chain(
|
|
| 39 |
("human", "{input}"),
|
| 40 |
],
|
| 41 |
).partial(time=lambda: str(datetime.now()))
|
| 42 |
-
llm = ChatOpenAI(
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
return LLMChain(prompt=prompt, llm=llm, memory=memory or get_memory())
|
| 44 |
|
| 45 |
|
|
|
|
| 6 |
from langchain.chat_models import ChatOpenAI
|
| 7 |
from langchain.memory import ConversationBufferMemory, StreamlitChatMessageHistory
|
| 8 |
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
|
|
|
|
| 9 |
from langsmith.client import Client
|
| 10 |
+
from streamlit_feedback import streamlit_feedback
|
| 11 |
|
| 12 |
_DEFAULT_SYSTEM_PROMPT = "You are a helpful chatbot."
|
| 13 |
|
| 14 |
|
| 15 |
def get_langsmith_client():
|
| 16 |
+
return Client(
|
| 17 |
+
api_key=st.session_state.langsmith_api_key,
|
| 18 |
+
)
|
| 19 |
|
| 20 |
|
| 21 |
def get_memory() -> ConversationBufferMemory:
|
|
|
|
| 29 |
def get_llm_chain(
|
| 30 |
memory: ConversationBufferMemory,
|
| 31 |
system_prompt: str = _DEFAULT_SYSTEM_PROMPT,
|
| 32 |
+
temperature: float = 0.7,
|
| 33 |
) -> LLMChain:
|
| 34 |
"""Return a basic LLMChain with memory."""
|
| 35 |
prompt = ChatPromptTemplate.from_messages(
|
|
|
|
| 42 |
("human", "{input}"),
|
| 43 |
],
|
| 44 |
).partial(time=lambda: str(datetime.now()))
|
| 45 |
+
llm = ChatOpenAI(
|
| 46 |
+
temperature=temperature,
|
| 47 |
+
streaming=True,
|
| 48 |
+
openai_api_key=st.session_state.openai_api_key,
|
| 49 |
+
)
|
| 50 |
return LLMChain(prompt=prompt, llm=llm, memory=memory or get_memory())
|
| 51 |
|
| 52 |
|