Spaces:
Runtime error
Runtime error
Joshua Sundance Bailey
commited on
Commit
·
979e3bd
1
Parent(s):
72c3d8c
update streamlit & clean up ui
Browse files- langchain-streamlit-demo/app.py +11 -9
- requirements.txt +1 -1
langchain-streamlit-demo/app.py
CHANGED
|
@@ -95,7 +95,7 @@ MODEL_DICT = {
|
|
| 95 |
"meta-llama/Llama-2-13b-chat-hf": "Anyscale Endpoints",
|
| 96 |
"meta-llama/Llama-2-70b-chat-hf": "Anyscale Endpoints",
|
| 97 |
"codellama/CodeLlama-34b-Instruct-hf": "Anyscale Endpoints",
|
| 98 |
-
"
|
| 99 |
}
|
| 100 |
SUPPORTED_MODELS = list(MODEL_DICT.keys())
|
| 101 |
|
|
@@ -198,7 +198,7 @@ with sidebar:
|
|
| 198 |
f"{st.session_state.provider} API key",
|
| 199 |
type="password",
|
| 200 |
)
|
| 201 |
-
if st.session_state.provider != "
|
| 202 |
else ""
|
| 203 |
)
|
| 204 |
|
|
@@ -288,8 +288,8 @@ with sidebar:
|
|
| 288 |
else:
|
| 289 |
st.error("Please enter a valid OpenAI API key.", icon="❌")
|
| 290 |
|
| 291 |
-
# --- Advanced
|
| 292 |
-
with st.expander("Advanced
|
| 293 |
st.markdown("## Feedback Scale")
|
| 294 |
use_faces = st.toggle(label="`Thumbs` ⇄ `Faces`", value=False)
|
| 295 |
feedback_option = "faces" if use_faces else "thumbs"
|
|
@@ -320,14 +320,16 @@ with sidebar:
|
|
| 320 |
help="Higher values give longer results.",
|
| 321 |
)
|
| 322 |
|
| 323 |
-
|
| 324 |
-
|
|
|
|
| 325 |
"LangSmith API Key (optional)",
|
| 326 |
type="password",
|
|
|
|
| 327 |
)
|
| 328 |
-
LANGSMITH_PROJECT =
|
| 329 |
"LangSmith Project Name",
|
| 330 |
-
value="langchain-streamlit-demo",
|
| 331 |
)
|
| 332 |
if st.session_state.client is None and LANGSMITH_API_KEY:
|
| 333 |
st.session_state.client = Client(
|
|
@@ -400,7 +402,7 @@ if provider_api_key:
|
|
| 400 |
streaming=True,
|
| 401 |
max_tokens=max_tokens,
|
| 402 |
)
|
| 403 |
-
elif AZURE_AVAILABLE and st.session_state.provider == "
|
| 404 |
st.session_state.llm = AzureChatOpenAI(
|
| 405 |
openai_api_base=AZURE_OPENAI_BASE_URL,
|
| 406 |
openai_api_version=AZURE_OPENAI_API_VERSION,
|
|
|
|
| 95 |
"meta-llama/Llama-2-13b-chat-hf": "Anyscale Endpoints",
|
| 96 |
"meta-llama/Llama-2-70b-chat-hf": "Anyscale Endpoints",
|
| 97 |
"codellama/CodeLlama-34b-Instruct-hf": "Anyscale Endpoints",
|
| 98 |
+
"Azure OpenAI": "Azure OpenAI",
|
| 99 |
}
|
| 100 |
SUPPORTED_MODELS = list(MODEL_DICT.keys())
|
| 101 |
|
|
|
|
| 198 |
f"{st.session_state.provider} API key",
|
| 199 |
type="password",
|
| 200 |
)
|
| 201 |
+
if st.session_state.provider != "Azure OpenAI"
|
| 202 |
else ""
|
| 203 |
)
|
| 204 |
|
|
|
|
| 288 |
else:
|
| 289 |
st.error("Please enter a valid OpenAI API key.", icon="❌")
|
| 290 |
|
| 291 |
+
# --- Advanced Settings ---
|
| 292 |
+
with st.expander("Advanced Settings", expanded=False):
|
| 293 |
st.markdown("## Feedback Scale")
|
| 294 |
use_faces = st.toggle(label="`Thumbs` ⇄ `Faces`", value=False)
|
| 295 |
feedback_option = "faces" if use_faces else "thumbs"
|
|
|
|
| 320 |
help="Higher values give longer results.",
|
| 321 |
)
|
| 322 |
|
| 323 |
+
# --- LangSmith Options ---
|
| 324 |
+
with st.expander("LangSmith Options", expanded=False):
|
| 325 |
+
LANGSMITH_API_KEY = st.text_input(
|
| 326 |
"LangSmith API Key (optional)",
|
| 327 |
type="password",
|
| 328 |
+
value=PROVIDER_KEY_DICT.get("LANGSMITH"),
|
| 329 |
)
|
| 330 |
+
LANGSMITH_PROJECT = st.text_input(
|
| 331 |
"LangSmith Project Name",
|
| 332 |
+
value=DEFAULT_LANGSMITH_PROJECT or "langchain-streamlit-demo",
|
| 333 |
)
|
| 334 |
if st.session_state.client is None and LANGSMITH_API_KEY:
|
| 335 |
st.session_state.client = Client(
|
|
|
|
| 402 |
streaming=True,
|
| 403 |
max_tokens=max_tokens,
|
| 404 |
)
|
| 405 |
+
elif AZURE_AVAILABLE and st.session_state.provider == "Azure OpenAI":
|
| 406 |
st.session_state.llm = AzureChatOpenAI(
|
| 407 |
openai_api_base=AZURE_OPENAI_BASE_URL,
|
| 408 |
openai_api_version=AZURE_OPENAI_API_VERSION,
|
requirements.txt
CHANGED
|
@@ -6,7 +6,7 @@ numpy>=1.22.2 # not directly required, pinned by Snyk to avoid a vulnerability
|
|
| 6 |
openai==0.28.1
|
| 7 |
pypdf==3.16.2
|
| 8 |
rank_bm25==0.2.2
|
| 9 |
-
streamlit==1.27.
|
| 10 |
streamlit-feedback==0.1.2
|
| 11 |
tiktoken==0.5.1
|
| 12 |
tornado>=6.3.3 # not directly required, pinned by Snyk to avoid a vulnerability
|
|
|
|
| 6 |
openai==0.28.1
|
| 7 |
pypdf==3.16.2
|
| 8 |
rank_bm25==0.2.2
|
| 9 |
+
streamlit==1.27.2
|
| 10 |
streamlit-feedback==0.1.2
|
| 11 |
tiktoken==0.5.1
|
| 12 |
tornado>=6.3.3 # not directly required, pinned by Snyk to avoid a vulnerability
|