Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -16,12 +16,6 @@ if not GROQ_API_KEY:
|
|
| 16 |
# Connect to Groq
|
| 17 |
client = Groq(api_key=GROQ_API_KEY)
|
| 18 |
|
| 19 |
-
#models' endpoints:
|
| 20 |
-
GROQ_MODEL_ENDPOINT70B = os.environ.get(GROQ_API_KEY, model='llama3-70b-8192')
|
| 21 |
-
GROQ_MODEL_ENDPOINT8B = os.environ.get(GROQ_API_KEY, model='llama3-8b-8192')
|
| 22 |
-
PRE_PROMPT = "You are a helpful assistant. You do not respond as 'User' or pretend to be 'User'. You only respond once as Assistant."
|
| 23 |
-
#Auth0 for auth
|
| 24 |
-
|
| 25 |
# Set up Streamlit app
|
| 26 |
st.set_page_config(page_title="LLaMA 3x", page_icon="🦙", layout="wide")
|
| 27 |
|
|
@@ -64,14 +58,7 @@ def render_app():
|
|
| 64 |
st.session_state['max_seq_len'] = 512
|
| 65 |
if 'pre_prompt' not in st.session_state:
|
| 66 |
st.session_state['pre_prompt'] = PRE_PROMPT
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
#Dropdown menu to select the model edpoint:
|
| 70 |
-
selected_option = st.sidebar.selectbox('Choose a LLaMA2 model:', ['LLaMA3 70B', 'LLaMA3 8B'], key='model')
|
| 71 |
-
if selected_option == 'LLaMA-3 70B':
|
| 72 |
-
st.session_state['llm'] = GROQ_MODEL_ENDPOINT70B
|
| 73 |
-
else:
|
| 74 |
-
st.session_state['llm'] = GROQ_MODEL_ENDPOINT8B
|
| 75 |
|
| 76 |
# Model hyperparameters
|
| 77 |
st.session_state['temperature'] = st.sidebar.slider('Temperature:', min_value=0.01, max_value=5.0, value=0.1, step=0.01)
|
|
@@ -129,7 +116,7 @@ def render_app():
|
|
| 129 |
messages = [{"role": msg["role"], "content": msg["content"]} for msg in st.session_state.chat_dialogue]
|
| 130 |
chat_completion = client.chat.completions.create(
|
| 131 |
messages=messages,
|
| 132 |
-
model=
|
| 133 |
temperature=st.session_state['temperature'],
|
| 134 |
top_p=st.session_state['top_p'],
|
| 135 |
max_tokens=st.session_state['max_seq_len']
|
|
|
|
| 16 |
# Connect to Groq
|
| 17 |
client = Groq(api_key=GROQ_API_KEY)
|
| 18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
# Set up Streamlit app
|
| 20 |
st.set_page_config(page_title="LLaMA 3x", page_icon="🦙", layout="wide")
|
| 21 |
|
|
|
|
| 58 |
st.session_state['max_seq_len'] = 512
|
| 59 |
if 'pre_prompt' not in st.session_state:
|
| 60 |
st.session_state['pre_prompt'] = PRE_PROMPT
|
| 61 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 62 |
|
| 63 |
# Model hyperparameters
|
| 64 |
st.session_state['temperature'] = st.sidebar.slider('Temperature:', min_value=0.01, max_value=5.0, value=0.1, step=0.01)
|
|
|
|
| 116 |
messages = [{"role": msg["role"], "content": msg["content"]} for msg in st.session_state.chat_dialogue]
|
| 117 |
chat_completion = client.chat.completions.create(
|
| 118 |
messages=messages,
|
| 119 |
+
model="llama3-70b-8192",
|
| 120 |
temperature=st.session_state['temperature'],
|
| 121 |
top_p=st.session_state['top_p'],
|
| 122 |
max_tokens=st.session_state['max_seq_len']
|