Spaces:
Running
Running
Commit
·
5abcb47
1
Parent(s):
79e7fbe
cap temperature to 2.0 as it is the maximum for openai gpt models
Browse files- streamlit_app.py +3 -3
streamlit_app.py
CHANGED
@@ -24,7 +24,7 @@ with st.sidebar:
|
|
24 |
|
25 |
st.subheader('Models and parameters')
|
26 |
selected_model = st.sidebar.selectbox('Choose an OpenAI model', ['gpt-3.5-turbo-1106', 'gpt-4-1106-preview'], key='selected_model')
|
27 |
-
temperature = st.sidebar.slider('temperature', min_value=0.01, max_value=
|
28 |
st.markdown('📖 Reach out to Sakimilo to learn how to create this app!')
|
29 |
|
30 |
# Store LLM generated responses
|
@@ -42,8 +42,8 @@ st.sidebar.button('Clear Chat History', on_click=clear_chat_history)
|
|
42 |
|
43 |
def generate_llm_response(prompt_input):
|
44 |
system_content = ("You are a helpful assistant. "
|
45 |
-
|
46 |
-
|
47 |
)
|
48 |
|
49 |
completion = client.chat.completions.create(
|
|
|
24 |
|
25 |
st.subheader('Models and parameters')
|
26 |
selected_model = st.sidebar.selectbox('Choose an OpenAI model', ['gpt-3.5-turbo-1106', 'gpt-4-1106-preview'], key='selected_model')
|
27 |
+
temperature = st.sidebar.slider('temperature', min_value=0.01, max_value=2.0, value=0.1, step=0.01)
|
28 |
st.markdown('📖 Reach out to Sakimilo to learn how to create this app!')
|
29 |
|
30 |
# Store LLM generated responses
|
|
|
42 |
|
43 |
def generate_llm_response(prompt_input):
|
44 |
system_content = ("You are a helpful assistant. "
|
45 |
+
"You do not respond as 'User' or pretend to be 'User'. "
|
46 |
+
"You only respond once as 'Assistant'."
|
47 |
)
|
48 |
|
49 |
completion = client.chat.completions.create(
|