Spaces:
Running
Running
Commit
·
cf23d65
1
Parent(s):
0eb28b0
Update .gitignore and adjust Max Tokens slider range in app.py
Browse files- .gitignore +3 -1
- app.py +1 -1
.gitignore
CHANGED
@@ -1 +1,3 @@
|
|
1 |
-
secrets.toml
|
|
|
|
|
|
1 |
+
secrets.toml
|
2 |
+
__pycache__/
|
3 |
+
*.pkl
|
app.py
CHANGED
@@ -165,7 +165,7 @@ def main_page():
|
|
165 |
# Model settings
|
166 |
with st.expander("⚙️ Customize AI Settings", expanded=True):
|
167 |
st.slider("Sampling Temperature", min_value=0.0, max_value=1.0, step=0.1, key="temperature", help="Higher values make output more random.")
|
168 |
-
st.slider("Max Tokens", min_value=
|
169 |
st.selectbox("Choose AI Model", ["llama-3.1-8b-instant", "llama3-70b-8192", "gemma-7b-it"], key="model")
|
170 |
|
171 |
# Display messages and input box
|
|
|
165 |
# Model settings
|
166 |
with st.expander("⚙️ Customize AI Settings", expanded=True):
|
167 |
st.slider("Sampling Temperature", min_value=0.0, max_value=1.0, step=0.1, key="temperature", help="Higher values make output more random.")
|
168 |
+
st.slider("Max Tokens", min_value=750, max_value=5000, step=50, key="max_tokens", help="Limits the length of the response.")
|
169 |
st.selectbox("Choose AI Model", ["llama-3.1-8b-instant", "llama3-70b-8192", "gemma-7b-it"], key="model")
|
170 |
|
171 |
# Display messages and input box
|