Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,79 +1,172 @@
|
|
1 |
-
|
2 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
import os
|
4 |
-
import
|
|
|
|
|
|
|
5 |
|
6 |
-
|
7 |
-
|
8 |
-
base_url="https://api.groq.com/openai/v1", # URL for Groq's API endpoint
|
9 |
-
api_key=os.environ.get("GROQ_API_KEY") # Retrieve the API key from environment variables
|
10 |
-
)
|
11 |
-
|
12 |
-
# Example usage of the client (sending a request to Groq API)
|
13 |
-
response = client.completions.create(
|
14 |
-
model="gpt-4", # You can replace with the desired model
|
15 |
-
prompt="Provide a summary of the Groq API documentation",
|
16 |
-
max_tokens=100
|
17 |
-
)
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
print(response.choices[0].text.strip()) # Output the response from Groq API
|
22 |
-
|
23 |
-
|
24 |
-
gr.load(
|
25 |
-
name = "Ai;)",
|
26 |
-
src = groq_gradio.registry,
|
27 |
-
title = "Groq-Gradio Chat",
|
28 |
-
theme = "upsatwal/mlsc_tiet",
|
29 |
-
examples = [""""
|
30 |
-
|
31 |
-
###system_prompt'''
|
32 |
-
|
33 |
-
**Π’Π΅Ρ
Π½ΠΈΡΠ΅ΡΠΊΠ°Ρ Π·Π°Π΄Π°ΡΠ°**: ΠΠ°ΠΏΠΈΡΠ°Π½ΠΈΠ΅ ΠΏΡΠΈΠ²Π»Π΅ΠΊΠ°ΡΠ΅Π»ΡΠ½ΠΎΠ³ΠΎ ΠΈ Π΄ΡΡΠΆΠ΅Π»ΡΠ±Π½ΠΎΠ³ΠΎ ΠΎΠ±ΡΡΠ²Π»Π΅Π½ΠΈΡ Π΄Π»Ρ ΡΠ΄Π°ΡΠΈ Π² Π°ΡΠ΅Π½Π΄Ρ Π½Π΅Π΄Π²ΠΈΠΆΠΈΠΌΠΎΡΡΠΈ, ΠΊΠΎΡΠΎΡΠΎΠ΅ Π±ΡΠ΄Π΅Ρ ΡΠΎΠ΄Π΅ΡΠΆΠ°ΡΡ ΠΊΠ»ΡΡΠ΅Π²ΡΡ ΠΈΠ½ΡΠΎΡΠΌΠ°ΡΠΈΡ Π΄Π»Ρ ΠΏΠΎΡΠ΅Π½ΡΠΈΠ°Π»ΡΠ½ΡΡ
Π°ΡΠ΅Π½Π΄Π°ΡΠΎΡΠΎΠ² ΠΈ ΠΎΠΏΠΈΡΡΠ²Π°ΡΡ ΠΏΡΠ΅ΠΈΠΌΡΡΠ΅ΡΡΠ²Π° ΠΊΠ²Π°ΡΡΠΈΡΡ.
|
34 |
|
35 |
-
### Π¨Π°Π³ΠΈ Π΄Π»Ρ Π²ΡΠΏΠΎΠ»Π½Π΅Π½ΠΈΡ
|
36 |
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
|
|
42 |
|
43 |
-
2. **ΠΠ° ΠΎΡΠ½ΠΎΠ²Π΅ ΠΏΡΠ΅Π΄ΠΎΡΡΠ°Π²Π»Π΅Π½Π½ΡΡ
Π΄Π°Π½Π½ΡΡ
**:
|
44 |
-
- ΠΡΠΎΠ²Π΅ΡΡΠΈ Π°Π½Π°Π»ΠΈΠ· ΠΎΠΊΡΡΠΆΠ΅Π½ΠΈΡ ΠΏΠΎ Google Maps, ΠΎΠΏΠΈΡΠ°Π² Π±Π»ΠΈΠ·Π»Π΅ΠΆΠ°ΡΠΈΠ΅ ΠΎΠ±ΡΠ΅ΠΊΡΡ, ΡΠ΄ΠΎΠ±ΡΡΠ²Π° ΡΠ°ΠΉΠΎΠ½Π°, ΠΈΠ½ΡΡΠ°ΡΡΡΡΠΊΡΡΡΡ ΠΈ Π»ΡΠ±ΡΠ΅ ΡΠ½ΠΈΠΊΠ°Π»ΡΠ½ΡΠ΅ ΠΎΡΠΎΠ±Π΅Π½Π½ΠΎΡΡΠΈ.
|
45 |
-
- ΠΡΠΈ Π½Π°Π»ΠΈΡΠΈΠΈ ΡΠΎΡΠΎΠ³ΡΠ°ΡΠΈΠΉ Π°Π½Π°Π»ΠΈΠ·ΠΈΡΠΎΠ²Π°ΡΡ ΠΈΡ
Π΄Π»Ρ Π΄ΠΎΠ±Π°Π²Π»Π΅Π½ΠΈΡ Π΄Π΅ΡΠ°Π»Π΅ΠΉ ΠΎΠ± ΠΈΠ½ΡΠ΅ΡΡΠ΅ΡΠ΅, ΡΡΠΎΠ±Ρ ΠΏΠΎΠ΄ΡΠ΅ΡΠΊΠ½ΡΡΡ ΠΎΡΠΎΠ±Π΅Π½Π½ΠΎΡΡΠΈ, ΠΊΠΎΡΠΎΡΡΠ΅ ΠΏΠΎΠΌΠΎΠ³ΡΡ ΠΏΡΠΈΠ²Π»Π΅ΡΡ Π°ΡΠ΅Π½Π΄Π°ΡΠΎΡΠΎΠ² (Π½Π°ΠΏΡΠΈΠΌΠ΅Ρ, Π±ΠΎΠ»ΡΡΠΈΠ΅ ΠΎΠΊΠ½Π°, Π½ΠΎΠ²Π°Ρ ΡΠ΅Ρ
Π½ΠΈΠΊΠ°, ΡΠ΄ΠΎΠ±Π½Π°Ρ ΠΏΠ»Π°Π½ΠΈΡΠΎΠ²ΠΊΠ°).
|
46 |
-
- ΠΠ° ΠΎΡΠ½ΠΎΠ²Π΅ Π²ΡΠ΅Ρ
Π΄Π°Π½Π½ΡΡ
ΡΠΎΠ·Π΄Π°ΡΡ ΡΠ»ΡΡΡΠ΅Π½Π½ΠΎΠ΅ ΠΎΠ±ΡΡΠ²Π»Π΅Π½ΠΈΠ΅, Π²ΡΠ΄Π΅Π»ΠΈΠ² ΠΏΡΠ΅ΠΈΠΌΡΡΠ΅ΡΡΠ²Π° ΠΊΠ²Π°ΡΡΠΈΡΡ, ΡΡΠΎΠ±Ρ ΠΎΠ½ΠΎ Π²ΡΠ³Π»ΡΠ΄Π΅Π»ΠΎ Π΄ΡΡΠΆΠ΅Π»ΡΠ±Π½ΠΎ ΠΈ ΠΏΡΠΈΠ²Π»Π΅ΠΊΠ°ΡΠ΅Π»ΡΠ½ΠΎ Π΄Π»Ρ Π°ΡΠ΅Π½Π΄Π°ΡΠΎΡΠΎΠ².
|
47 |
|
48 |
-
|
49 |
|
50 |
-
|
51 |
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
4. ΠΠ°ΠΊΠΎΠ²Π° ΡΠ΅Π½Π° Π°ΡΠ΅Π½Π΄Ρ? ΠΡΠ΅Π΄ΡΡΠΌΠΎΡΡΠ΅Π½Ρ Π»ΠΈ ΡΠΊΠΈΠ΄ΠΊΠΈ Π΄Π»Ρ Π΄ΠΎΠ»Π³ΠΎΡΡΠΎΡΠ½ΡΡ
Π°ΡΠ΅Π½Π΄Π°ΡΠΎΡΠΎΠ²?
|
56 |
-
5. ΠΡΠΈΠ»ΠΎΠΆΠΈΡΠ΅ ΡΠΎΡΠΎΠ³ΡΠ°ΡΠΈΠΈ ΠΊΠ²Π°ΡΡΠΈΡΡ Π΄Π»Ρ Π±ΠΎΠ»Π΅Π΅ Π΄Π΅ΡΠ°Π»ΡΠ½ΠΎΠ³ΠΎ ΠΎΠΏΠΈΡΠ°Π½ΠΈΡ.
|
57 |
-
|
58 |
-
---
|
59 |
-
|
60 |
-
### ΠΠΎΠ½Π΅ΡΠ½ΡΠΉ ΠΏΡΠΈΠΌΠ΅Ρ ΠΎΠ±ΡΡΠ²Π»Π΅Π½ΠΈΡ Π½Π° ΠΎΡΠ½ΠΎΠ²Π΅ ΡΠΎΠ±ΡΠ°Π½Π½ΠΎΠΉ ΠΈΠ½ΡΠΎΡΠΌΠ°ΡΠΈΠΈ (Π½Π° ΡΠΊΡΠ°ΠΈΠ½ΡΠΊΠΎΠΌ ΡΠ·ΡΠΊΠ΅, Ρ Π»ΠΎΠΊΠ°Π»ΠΈΠ·Π°ΡΠΈΠ΅ΠΉ Π² ΠΠΈΠ΅Π²Π΅):
|
61 |
-
|
62 |
-
**ΠΠ΄Π°ΡΡΡΡΡ Π² ΠΎΡΠ΅Π½Π΄Ρ ΠΏΡΠΎΡΡΠΎΡΠ° 2-ΠΊΡΠΌΠ½Π°ΡΠ½Π° ΠΊΠ²Π°ΡΡΠΈΡΠ° Ρ ΡΠ΅Π½ΡΡΡ ΠΠΈΡΠ²Π°, Π½Π΅ΠΏΠΎΠ΄Π°Π»ΡΠΊ Π²ΡΠ΄ ΠΌΠ΅ΡΡΠΎ ΠΠ°Π»Π°Ρ Π£ΠΊΡΠ°ΡΠ½Π°!**
|
63 |
-
|
64 |
-
ΠΠ΄ΡΠ΅ΡΠ°: [Π²ΠΊΠ°Π·Π°Π½ΠΈΠΉ Π°Π΄ΡΠ΅Ρ]
|
65 |
-
|
66 |
-
**ΠΠΏΠΈΡ ΠΊΠ²Π°ΡΡΠΈΡΠΈ**:
|
67 |
-
Π‘Π²ΡΡΠ»Π°, ΡΡΡΠ°ΡΠ½Π° ΠΊΠ²Π°ΡΡΠΈΡΠ° Π· ΡΠ΅ΠΌΠΎΠ½ΡΠΎΠΌ Ρ ΡΠΊΠ°Π½Π΄ΠΈΠ½Π°Π²ΡΡΠΊΠΎΠΌΡ ΡΡΠΈΠ»Ρ. ΠΡΠΎΡΡΠΎΡΠ° Π²ΡΡΠ°Π»ΡΠ½Ρ, Π²Π΅Π»ΠΈΠΊΠ° ΠΊΡΡ
Π½Ρ Π· ΡΡΡΡΡ Π½Π΅ΠΎΠ±Ρ
ΡΠ΄Π½ΠΎΡ ΡΠ΅Ρ
Π½ΡΠΊΠΎΡ (Ρ
ΠΎΠ»ΠΎΠ΄ΠΈΠ»ΡΠ½ΠΈΠΊ, ΠΏΠ»ΠΈΡΠ°, ΠΌΡΠΊΡΠΎΡ
Π²ΠΈΠ»ΡΠΎΠ²Π° ΠΏΡΡ), Π·Π°ΡΠΈΡΠ½Π° ΡΠΏΠ°Π»ΡΠ½Ρ Π· Π²Π΅Π»ΠΈΠΊΠΈΠΌ Π»ΡΠΆΠΊΠΎΠΌ. Π£ ΠΊΠ²Π°ΡΡΠΈΡΡ Ρ ΠΊΠΎΠ½Π΄ΠΈΡΡΠΎΠ½Π΅Ρ, ΡΠ΅Π»Π΅Π²ΡΠ·ΠΎΡ, ΠΏΡΠ°Π»ΡΠ½Π° ΠΌΠ°ΡΠΈΠ½Π°, Π° ΡΠ°ΠΊΠΎΠΆ Π±Π°Π³Π°ΡΠΎ ΠΌΡΡΡΡ Π΄Π»Ρ Π·Π±Π΅ΡΡΠ³Π°Π½Π½Ρ.
|
68 |
-
|
69 |
-
**ΠΠ½ΡΡΠ°ΡΡΡΡΠΊΡΡΡΠ°**:
|
70 |
-
ΠΠ²Π°ΡΡΠΈΡΠ° ΡΠΎΠ·ΡΠ°ΡΠΎΠ²Π°Π½Π° Ρ ΠΏΡΡΡΠΉ Π΄ΠΎΡΡΡΠΏΠ½ΠΎΡΡΡ Π΄ΠΎ ΡΡΠ°Π½ΡΡΠΉ ΠΌΠ΅ΡΡΠΎ ΠΠ°Π»Π°Ρ Π£ΠΊΡΠ°ΡΠ½Π° ΡΠ° ΠΠ»ΡΠΌΠΏΡΠΉΡΡΠΊΠ°, ΠΏΠΎΡΡΡ Π·ΡΠΏΠΈΠ½ΠΊΠΈ Π³ΡΠΎΠΌΠ°Π΄ΡΡΠΊΠΎΠ³ΠΎ ΡΡΠ°Π½ΡΠΏΠΎΡΡΡ. Π£ ΡΠ°ΠΉΠΎΠ½Ρ Π±Π°Π³Π°ΡΠΎ ΠΌΠ°Π³Π°Π·ΠΈΠ½ΡΠ², ΠΊΠ°ΡΠ΅, ΠΏΠ°ΡΠΊΡΠ², Π΄ΠΈΡΡΡΠΈΡ
ΠΌΠ°ΠΉΠ΄Π°Π½ΡΠΈΠΊΡΠ², Π° ΡΠ°ΠΊΠΎΠΆ Π’Π¦ ΠΠΊΠ΅Π°Π½ ΠΠ»Π°Π·Π° β Π·ΡΡΡΠ½Π΅ ΠΌΡΡΡΠ΅ Π΄Π»Ρ ΡΠΎΠΏΡΠ½Π³Ρ.
|
71 |
-
|
72 |
-
**Π¦ΡΠ½Π°**: Π₯Π₯Π₯ Π³ΡΠ½/ΠΌΡΡΡΡΡ (ΠΌΠΎΠΆΠ»ΠΈΠ²Ρ Π·Π½ΠΈΠΆΠΊΠΈ Π΄Π»Ρ Π΄ΠΎΠ²Π³ΠΎΡΡΠΈΠ²Π°Π»ΠΎΡ ΠΎΡΠ΅Π½Π΄ΠΈ).
|
73 |
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Hugging Face's logo
|
2 |
+
Hugging Face
|
3 |
+
Search models, datasets, users...
|
4 |
+
Models
|
5 |
+
Datasets
|
6 |
+
Spaces
|
7 |
+
Posts
|
8 |
+
Docs
|
9 |
+
Enterprise
|
10 |
+
Pricing
|
11 |
+
|
12 |
+
|
13 |
+
|
14 |
+
Spaces:
|
15 |
+
|
16 |
+
dromerosm
|
17 |
+
/
|
18 |
+
groq-llama3
|
19 |
+
|
20 |
+
|
21 |
+
like
|
22 |
+
29
|
23 |
+
App
|
24 |
+
Files
|
25 |
+
Community
|
26 |
+
groq-llama3
|
27 |
+
/
|
28 |
+
app.py
|
29 |
+
|
30 |
+
dromerosm's picture
|
31 |
+
dromerosm
|
32 |
+
Update app.py
|
33 |
+
81a6356
|
34 |
+
verified
|
35 |
+
4 months ago
|
36 |
+
raw
|
37 |
+
|
38 |
+
Copy download link
|
39 |
+
history
|
40 |
+
blame
|
41 |
+
contribute
|
42 |
+
delete
|
43 |
+
|
44 |
+
4.5 kB
|
45 |
import os
|
46 |
+
from dotenv import find_dotenv, load_dotenv
|
47 |
+
import streamlit as st
|
48 |
+
from typing import Generator
|
49 |
+
from groq import Groq
|
50 |
|
51 |
+
_ = load_dotenv(find_dotenv())
|
52 |
+
st.set_page_config(page_icon="π", layout="wide", page_title="Groq & LLaMA3.1 Chat Bot...")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
|
|
|
54 |
|
55 |
+
def icon(emoji: str):
|
56 |
+
"""Shows an emoji as a Notion-style page icon."""
|
57 |
+
st.write(
|
58 |
+
f'<span style="font-size: 78px; line-height: 1">{emoji}</span>',
|
59 |
+
unsafe_allow_html=True,
|
60 |
+
)
|
61 |
|
|
|
|
|
|
|
|
|
62 |
|
63 |
+
# icon("β‘οΈ")
|
64 |
|
65 |
+
st.subheader("Groq Chat with LLaMA3.1 App", divider="rainbow", anchor=False)
|
66 |
|
67 |
+
client = Groq(
|
68 |
+
api_key=os.environ['GROQ_API_KEY'],
|
69 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
|
71 |
+
# Initialize chat history and selected model
|
72 |
+
if "messages" not in st.session_state:
|
73 |
+
st.session_state.messages = []
|
74 |
+
|
75 |
+
if "selected_model" not in st.session_state:
|
76 |
+
st.session_state.selected_model = None
|
77 |
+
|
78 |
+
# Define model details
|
79 |
+
models = {
|
80 |
+
"llama-3.1-70b-versatile": {"name": "LLaMA3.1-70b", "tokens": 4096, "developer": "Meta"},
|
81 |
+
"llama-3.1-8b-instant": {"name": "LLaMA3.1-8b", "tokens": 4096, "developer": "Meta"},
|
82 |
+
"llama3-70b-8192": {"name": "Meta Llama 3 70B", "tokens": 4096, "developer": "Meta"},
|
83 |
+
"llama3-8b-8192": {"name": "Meta Llama 3 8B", "tokens": 4096, "developer": "Meta"},
|
84 |
+
"llama3-groq-70b-8192-tool-use-preview": {"name": "Llama 3 Groq 70B Tool Use (Preview)", "tokens": 4096, "developer": "Groq"},
|
85 |
+
"gemma-7b-it": {"name": "Gemma-7b-it", "tokens": 4096, "developer": "Google"},
|
86 |
+
"mixtral-8x7b-32768": {
|
87 |
+
"name": "Mixtral-8x7b-Instruct-v0.1",
|
88 |
+
"tokens": 32768,
|
89 |
+
"developer": "Mistral",
|
90 |
+
},
|
91 |
+
}
|
92 |
+
|
93 |
+
# Layout for model selection and max_tokens slider
|
94 |
+
col1, col2 = st.columns([1, 3]) # Adjust the ratio to make the first column smaller
|
95 |
+
|
96 |
+
|
97 |
+
with col1:
|
98 |
+
model_option = st.selectbox(
|
99 |
+
"Choose a model:",
|
100 |
+
options=list(models.keys()),
|
101 |
+
format_func=lambda x: models[x]["name"],
|
102 |
+
index=0, # Default to the first model in the list
|
103 |
+
)
|
104 |
+
max_tokens_range = models[model_option]["tokens"]
|
105 |
+
max_tokens = st.slider(
|
106 |
+
"Max Tokens:",
|
107 |
+
min_value=512,
|
108 |
+
max_value=max_tokens_range,
|
109 |
+
value=min(32768, max_tokens_range),
|
110 |
+
step=512,
|
111 |
+
help=f"Adjust the maximum number of tokens (words) for the model's response. Max for selected model: {max_tokens_range}",
|
112 |
+
)
|
113 |
+
|
114 |
+
# Detect model change and clear chat history if model has changed
|
115 |
+
if st.session_state.selected_model != model_option:
|
116 |
+
st.session_state.messages = []
|
117 |
+
st.session_state.selected_model = model_option
|
118 |
+
|
119 |
+
# Add a "Clear Chat" button
|
120 |
+
if st.button("Clear Chat"):
|
121 |
+
st.session_state.messages = []
|
122 |
+
|
123 |
+
# Display chat messages from history on app rerun
|
124 |
+
for message in st.session_state.messages:
|
125 |
+
avatar = "π" if message["role"] == "assistant" else "π§βπ»"
|
126 |
+
with st.chat_message(message["role"], avatar=avatar):
|
127 |
+
st.markdown(message["content"])
|
128 |
+
|
129 |
+
|
130 |
+
def generate_chat_responses(chat_completion) -> Generator[str, None, None]:
|
131 |
+
"""Yield chat response content from the Groq API response."""
|
132 |
+
for chunk in chat_completion:
|
133 |
+
if chunk.choices[0].delta.content:
|
134 |
+
yield chunk.choices[0].delta.content
|
135 |
+
|
136 |
+
|
137 |
+
if prompt := st.chat_input("Enter your prompt here..."):
|
138 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
139 |
+
|
140 |
+
with st.chat_message("user", avatar="π§βπ»"):
|
141 |
+
st.markdown(prompt)
|
142 |
+
|
143 |
+
# Fetch response from Groq API
|
144 |
+
try:
|
145 |
+
chat_completion = client.chat.completions.create(
|
146 |
+
model=model_option,
|
147 |
+
messages=[
|
148 |
+
{"role": m["role"], "content": m["content"]}
|
149 |
+
for m in st.session_state.messages
|
150 |
+
],
|
151 |
+
max_tokens=max_tokens,
|
152 |
+
stream=True,
|
153 |
+
)
|
154 |
+
|
155 |
+
# Use the generator function with st.write_stream
|
156 |
+
with st.chat_message("assistant", avatar="π"):
|
157 |
+
chat_responses_generator = generate_chat_responses(chat_completion)
|
158 |
+
full_response = st.write_stream(chat_responses_generator)
|
159 |
+
except Exception as e:
|
160 |
+
st.error(e, icon="β")
|
161 |
+
|
162 |
+
# Append the full response to session_state.messages
|
163 |
+
if isinstance(full_response, str):
|
164 |
+
st.session_state.messages.append(
|
165 |
+
{"role": "assistant", "content": full_response}
|
166 |
+
)
|
167 |
+
else:
|
168 |
+
# Handle the case where full_response is not a string
|
169 |
+
combined_response = "\n".join(str(item) for item in full_response)
|
170 |
+
st.session_state.messages.append(
|
171 |
+
{"role": "assistant", "content": combined_response}
|
172 |
+
)
|