Joshua Sundance Bailey
commited on
Commit
Β·
6da47ea
1
Parent(s):
a9599af
parameterization
Browse files
langchain-streamlit-demo/app.py
CHANGED
@@ -10,7 +10,16 @@ from langchain.schema.runnable import RunnableConfig
|
|
10 |
from langsmith.client import Client
|
11 |
|
12 |
from llm_stuff import (
|
|
|
|
|
|
|
13 |
_DEFAULT_SYSTEM_PROMPT,
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
get_llm_chain,
|
15 |
StreamHandler,
|
16 |
feedback_component,
|
@@ -21,60 +30,40 @@ st.set_page_config(
|
|
21 |
page_icon="π¦",
|
22 |
)
|
23 |
|
|
|
|
|
24 |
# Initialize State
|
25 |
if "trace_link" not in st.session_state:
|
26 |
st.session_state.trace_link = None
|
27 |
if "run_id" not in st.session_state:
|
28 |
st.session_state.run_id = None
|
29 |
|
30 |
-
st.sidebar.
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
"claude-instant-v1",
|
35 |
-
"claude-2",
|
36 |
-
"meta-llama/Llama-2-7b-chat-hf",
|
37 |
-
"meta-llama/Llama-2-13b-chat-hf",
|
38 |
-
"meta-llama/Llama-2-70b-chat-hf",
|
39 |
-
]
|
40 |
-
model = st.sidebar.selectbox(label="Chat Model", options=models, index=0)
|
41 |
-
|
42 |
-
if model.startswith("gpt"):
|
43 |
-
provider = "OpenAI"
|
44 |
-
elif model.startswith("claude"):
|
45 |
-
provider = "Anthropic"
|
46 |
-
elif model.startswith("meta-llama"):
|
47 |
-
provider = "Anyscale"
|
48 |
-
else:
|
49 |
-
st.stop()
|
50 |
-
|
51 |
-
if not model:
|
52 |
-
st.error("Please select a model and provide an API key.", icon="β")
|
53 |
-
st.stop()
|
54 |
-
|
55 |
-
provider_api_key = st.sidebar.text_input(f"{provider} API key", type="password")
|
56 |
-
|
57 |
-
langsmith_api_key = st.sidebar.text_input(
|
58 |
-
"LangSmith API Key (optional)",
|
59 |
-
type="password",
|
60 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
)
|
67 |
-
os.environ["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com"
|
68 |
-
os.environ["LANGCHAIN_API_KEY"] = langsmith_api_key
|
69 |
-
os.environ["LANGCHAIN_TRACING_V2"] = "true"
|
70 |
-
os.environ["LANGCHAIN_PROJECT"] = langsmith_project
|
71 |
-
|
72 |
-
client = Client(api_key=langsmith_api_key)
|
73 |
-
else:
|
74 |
-
langsmith_project = None
|
75 |
-
client = None
|
76 |
|
77 |
-
if provider_api_key:
|
78 |
system_prompt = (
|
79 |
st.sidebar.text_area(
|
80 |
"Custom Instructions",
|
@@ -88,17 +77,17 @@ if provider_api_key:
|
|
88 |
|
89 |
temperature = st.sidebar.slider(
|
90 |
"Temperature",
|
91 |
-
min_value=
|
92 |
-
max_value=
|
93 |
-
value=
|
94 |
help="Higher values give more random results.",
|
95 |
)
|
96 |
|
97 |
max_tokens = st.sidebar.slider(
|
98 |
"Max Tokens",
|
99 |
-
min_value=
|
100 |
-
max_value=
|
101 |
-
value=
|
102 |
help="Higher values give longer results.",
|
103 |
)
|
104 |
|
|
|
10 |
from langsmith.client import Client
|
11 |
|
12 |
from llm_stuff import (
|
13 |
+
_MODEL_DICT,
|
14 |
+
_SUPPORTED_MODELS,
|
15 |
+
_DEFAULT_MODEL,
|
16 |
_DEFAULT_SYSTEM_PROMPT,
|
17 |
+
_DEFAULT_TEMPERATURE,
|
18 |
+
_MIN_TEMPERATURE,
|
19 |
+
_MAX_TEMPERATURE,
|
20 |
+
_DEFAULT_MAX_TOKENS,
|
21 |
+
_MIN_TOKENS,
|
22 |
+
_MAX_TOKENS,
|
23 |
get_llm_chain,
|
24 |
StreamHandler,
|
25 |
feedback_component,
|
|
|
30 |
page_icon="π¦",
|
31 |
)
|
32 |
|
33 |
+
st.sidebar.markdown("# Menu")
|
34 |
+
|
35 |
# Initialize State
|
36 |
if "trace_link" not in st.session_state:
|
37 |
st.session_state.trace_link = None
|
38 |
if "run_id" not in st.session_state:
|
39 |
st.session_state.run_id = None
|
40 |
|
41 |
+
model = st.sidebar.selectbox(
|
42 |
+
label="Chat Model",
|
43 |
+
options=_SUPPORTED_MODELS,
|
44 |
+
index=_SUPPORTED_MODELS.index(_DEFAULT_MODEL),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
)
|
46 |
+
provider = _MODEL_DICT[model]
|
47 |
+
|
48 |
+
if provider_api_key := st.sidebar.text_input(f"{provider} API key", type="password"):
|
49 |
+
if langsmith_api_key := st.sidebar.text_input(
|
50 |
+
"LangSmith API Key (optional)",
|
51 |
+
type="password",
|
52 |
+
):
|
53 |
+
langsmith_project = st.sidebar.text_input(
|
54 |
+
"LangSmith Project Name",
|
55 |
+
value="langchain-streamlit-demo",
|
56 |
+
)
|
57 |
+
os.environ["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com"
|
58 |
+
os.environ["LANGCHAIN_API_KEY"] = langsmith_api_key
|
59 |
+
os.environ["LANGCHAIN_TRACING_V2"] = "true"
|
60 |
+
os.environ["LANGCHAIN_PROJECT"] = langsmith_project
|
61 |
|
62 |
+
client = Client(api_key=langsmith_api_key)
|
63 |
+
else:
|
64 |
+
langsmith_project = None
|
65 |
+
client = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
|
|
|
67 |
system_prompt = (
|
68 |
st.sidebar.text_area(
|
69 |
"Custom Instructions",
|
|
|
77 |
|
78 |
temperature = st.sidebar.slider(
|
79 |
"Temperature",
|
80 |
+
min_value=_MIN_TEMPERATURE,
|
81 |
+
max_value=_MAX_TEMPERATURE,
|
82 |
+
value=_DEFAULT_TEMPERATURE,
|
83 |
help="Higher values give more random results.",
|
84 |
)
|
85 |
|
86 |
max_tokens = st.sidebar.slider(
|
87 |
"Max Tokens",
|
88 |
+
min_value=_MIN_TOKENS,
|
89 |
+
max_value=_MAX_TOKENS,
|
90 |
+
value=_DEFAULT_MAX_TOKENS,
|
91 |
help="Higher values give longer results.",
|
92 |
)
|
93 |
|
langchain-streamlit-demo/llm_stuff.py
CHANGED
@@ -12,6 +12,26 @@ from streamlit_feedback import streamlit_feedback
|
|
12 |
|
13 |
_DEFAULT_SYSTEM_PROMPT = "You are a helpful chatbot."
|
14 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
def get_memory() -> BaseChatMemory:
|
17 |
return ConversationBufferMemory(
|
@@ -25,9 +45,9 @@ def get_llm(
|
|
25 |
model: str,
|
26 |
provider_api_key: str,
|
27 |
temperature: float,
|
28 |
-
max_tokens: int =
|
29 |
) -> BaseChatModel:
|
30 |
-
if model
|
31 |
return ChatOpenAI(
|
32 |
model=model,
|
33 |
openai_api_key=provider_api_key,
|
@@ -35,7 +55,7 @@ def get_llm(
|
|
35 |
streaming=True,
|
36 |
max_tokens=max_tokens,
|
37 |
)
|
38 |
-
elif model
|
39 |
return ChatAnthropic(
|
40 |
model_name=model,
|
41 |
anthropic_api_key=provider_api_key,
|
@@ -43,7 +63,7 @@ def get_llm(
|
|
43 |
streaming=True,
|
44 |
max_tokens_to_sample=max_tokens,
|
45 |
)
|
46 |
-
elif model
|
47 |
return ChatAnyscale(
|
48 |
model=model,
|
49 |
anyscale_api_key=provider_api_key,
|
@@ -59,8 +79,8 @@ def get_llm_chain(
|
|
59 |
model: str,
|
60 |
provider_api_key: str,
|
61 |
system_prompt: str = _DEFAULT_SYSTEM_PROMPT,
|
62 |
-
temperature: float =
|
63 |
-
max_tokens: int =
|
64 |
) -> LLMChain:
|
65 |
"""Return a basic LLMChain with memory."""
|
66 |
prompt = ChatPromptTemplate.from_messages(
|
@@ -89,7 +109,7 @@ class StreamHandler(BaseCallbackHandler):
|
|
89 |
|
90 |
|
91 |
def feedback_component(client):
|
92 |
-
scores = {"π": 1, "π": 0.
|
93 |
if feedback := streamlit_feedback(
|
94 |
feedback_type="faces",
|
95 |
optional_text_label="[Optional] Please provide an explanation",
|
|
|
12 |
|
13 |
_DEFAULT_SYSTEM_PROMPT = "You are a helpful chatbot."
|
14 |
|
15 |
+
_MODEL_DICT = {
|
16 |
+
"gpt-3.5-turbo": "OpenAI",
|
17 |
+
"gpt-4": "OpenAI",
|
18 |
+
"claude-instant-v1": "Anthropic",
|
19 |
+
"claude-2": "Anthropic",
|
20 |
+
"meta-llama/Llama-2-7b-chat-hf": "Anyscale Endpoints",
|
21 |
+
"meta-llama/Llama-2-13b-chat-hf": "Anyscale Endpoints",
|
22 |
+
"meta-llama/Llama-2-70b-chat-hf": "Anyscale Endpoints",
|
23 |
+
}
|
24 |
+
_SUPPORTED_MODELS = list(_MODEL_DICT.keys())
|
25 |
+
_DEFAULT_MODEL = "gpt-3.5-turbo"
|
26 |
+
|
27 |
+
_DEFAULT_TEMPERATURE = 0.7
|
28 |
+
_MIN_TEMPERATURE = 0.0
|
29 |
+
_MAX_TEMPERATURE = 1.0
|
30 |
+
|
31 |
+
_DEFAULT_MAX_TOKENS = 1000
|
32 |
+
_MIN_TOKENS = 1
|
33 |
+
_MAX_TOKENS = 100000
|
34 |
+
|
35 |
|
36 |
def get_memory() -> BaseChatMemory:
|
37 |
return ConversationBufferMemory(
|
|
|
45 |
model: str,
|
46 |
provider_api_key: str,
|
47 |
temperature: float,
|
48 |
+
max_tokens: int = _DEFAULT_MAX_TOKENS,
|
49 |
) -> BaseChatModel:
|
50 |
+
if _MODEL_DICT[model] == "OpenAI":
|
51 |
return ChatOpenAI(
|
52 |
model=model,
|
53 |
openai_api_key=provider_api_key,
|
|
|
55 |
streaming=True,
|
56 |
max_tokens=max_tokens,
|
57 |
)
|
58 |
+
elif _MODEL_DICT[model] == "Anthropic":
|
59 |
return ChatAnthropic(
|
60 |
model_name=model,
|
61 |
anthropic_api_key=provider_api_key,
|
|
|
63 |
streaming=True,
|
64 |
max_tokens_to_sample=max_tokens,
|
65 |
)
|
66 |
+
elif _MODEL_DICT[model] == "Anyscale Endpoints":
|
67 |
return ChatAnyscale(
|
68 |
model=model,
|
69 |
anyscale_api_key=provider_api_key,
|
|
|
79 |
model: str,
|
80 |
provider_api_key: str,
|
81 |
system_prompt: str = _DEFAULT_SYSTEM_PROMPT,
|
82 |
+
temperature: float = _DEFAULT_TEMPERATURE,
|
83 |
+
max_tokens: int = _DEFAULT_MAX_TOKENS,
|
84 |
) -> LLMChain:
|
85 |
"""Return a basic LLMChain with memory."""
|
86 |
prompt = ChatPromptTemplate.from_messages(
|
|
|
109 |
|
110 |
|
111 |
def feedback_component(client):
|
112 |
+
scores = {"π": 1, "π": 0.0, "π": 0.5, "π": 0.25, "π": 0}
|
113 |
if feedback := streamlit_feedback(
|
114 |
feedback_type="faces",
|
115 |
optional_text_label="[Optional] Please provide an explanation",
|