Luigi commited on
Commit
3e4847c
·
1 Parent(s): b6129d9

fix error : ValueError: Conversation roles must alternate user/assistant/user/assistant

Browse files
Files changed (1) hide show
  1. app.py +35 -31
app.py CHANGED
@@ -131,34 +131,38 @@ st.caption(f"Powered by `llama.cpp` | Model: {selected_model['filename']}")
131
  user_input = st.chat_input("Ask something...")
132
 
133
  if user_input:
134
- st.session_state.chat_history.append({"role": "user", "content": user_input})
135
-
136
- with st.chat_message("user"):
137
- st.markdown(user_input)
138
-
139
- # Trim conversation history to max 8 turns (user+assistant)
140
- MAX_TURNS = 8
141
- trimmed_history = st.session_state.chat_history[-MAX_TURNS * 2:]
142
- messages = [{"role": "system", "content": system_prompt}] + trimmed_history
143
-
144
- with st.chat_message("assistant"):
145
- full_response = ""
146
- response_area = st.empty()
147
-
148
- stream = llm.create_chat_completion(
149
- messages=messages,
150
- max_tokens=max_tokens,
151
- temperature=temperature,
152
- top_k=top_k,
153
- top_p=top_p,
154
- repeat_penalty=repeat_penalty,
155
- stream=True,
156
- )
157
-
158
- for chunk in stream:
159
- if "choices" in chunk:
160
- delta = chunk["choices"][0]["delta"].get("content", "")
161
- full_response += delta
162
- response_area.markdown(full_response)
163
-
164
- st.session_state.chat_history.append({"role": "assistant", "content": full_response})
 
 
 
 
 
131
  user_input = st.chat_input("Ask something...")
132
 
133
  if user_input:
134
+ # Prevent appending user message if assistant hasn't replied yet
135
+ if len(st.session_state.chat_history) % 2 == 1:
136
+ st.warning("Please wait for the assistant to respond before sending another message.")
137
+ else:
138
+ st.session_state.chat_history.append({"role": "user", "content": user_input})
139
+
140
+ with st.chat_message("user"):
141
+ st.markdown(user_input)
142
+
143
+ # Trim conversation history to max 8 turns (user+assistant)
144
+ MAX_TURNS = 8
145
+ trimmed_history = st.session_state.chat_history[-MAX_TURNS * 2:]
146
+ messages = [{"role": "system", "content": system_prompt}] + trimmed_history
147
+
148
+ with st.chat_message("assistant"):
149
+ full_response = ""
150
+ response_area = st.empty()
151
+
152
+ stream = llm.create_chat_completion(
153
+ messages=messages,
154
+ max_tokens=max_tokens,
155
+ temperature=temperature,
156
+ top_k=top_k,
157
+ top_p=top_p,
158
+ repeat_penalty=repeat_penalty,
159
+ stream=True,
160
+ )
161
+
162
+ for chunk in stream:
163
+ if "choices" in chunk:
164
+ delta = chunk["choices"][0]["delta"].get("content", "")
165
+ full_response += delta
166
+ response_area.markdown(full_response)
167
+
168
+ st.session_state.chat_history.append({"role": "assistant", "content": full_response})