Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -166,11 +166,28 @@ def chat_gpt_nofn(prompt=None, history=[], model=model, client=client):
|
|
166 |
history.append({"role": "user", "content": f"{prompt}"})
|
167 |
messages = history
|
168 |
|
|
|
|
|
|
|
|
|
|
|
169 |
completion = client.chat.completions.create(
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
174 |
|
175 |
def format_chat_prompt(chat_history):
|
176 |
prompt = []
|
@@ -314,21 +331,33 @@ def chat(question, manual, history, liked):
|
|
314 |
Ref-2:
|
315 |
...
|
316 |
"""
|
317 |
-
final_response = chat_gpt_nofn(prompt=prompt, history=conv)
|
318 |
-
response = final_response.choices[-1].message.content
|
319 |
-
conv.append(final_response.choices[-1].message)
|
320 |
|
321 |
-
history.append((question, response))
|
322 |
-
|
323 |
-
|
324 |
-
|
|
|
325 |
"question": question,
|
326 |
"response": response,
|
327 |
"manual": manual,
|
328 |
"point_id": uuid.uuid4().hex
|
329 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
330 |
|
331 |
-
return '', history
|
332 |
|
333 |
def save_last_interaction(feedback):
|
334 |
if last_interaction.value:
|
|
|
166 |
history.append({"role": "user", "content": f"{prompt}"})
|
167 |
messages = history
|
168 |
|
169 |
+
# completion = client.chat.completions.create(
|
170 |
+
# model=model,
|
171 |
+
# messages=messages,
|
172 |
+
# )
|
173 |
+
# return completion
|
174 |
completion = client.chat.completions.create(
|
175 |
+
model=model,
|
176 |
+
messages=messages,
|
177 |
+
stream=True
|
178 |
+
)
|
179 |
+
|
180 |
+
full_response = ""
|
181 |
+
for chunk in completion:
|
182 |
+
try:
|
183 |
+
if chunk.choices[0].delta.content is not None:
|
184 |
+
content = chunk.choices[0].delta.content
|
185 |
+
full_response += content
|
186 |
+
yield content
|
187 |
+
except:
|
188 |
+
pass
|
189 |
+
|
190 |
+
return full_response
|
191 |
|
192 |
def format_chat_prompt(chat_history):
|
193 |
prompt = []
|
|
|
331 |
Ref-2:
|
332 |
...
|
333 |
"""
|
334 |
+
# final_response = chat_gpt_nofn(prompt=prompt, history=conv)
|
335 |
+
# response = final_response.choices[-1].message.content
|
336 |
+
# conv.append(final_response.choices[-1].message)
|
337 |
|
338 |
+
# history.append((question, response))
|
339 |
+
final_response = ""
|
340 |
+
for chunk in chat_gpt_nofn(prompt=prompt, history=conv):
|
341 |
+
final_response += chunk
|
342 |
+
last_interaction.value = {
|
343 |
"question": question,
|
344 |
"response": response,
|
345 |
"manual": manual,
|
346 |
"point_id": uuid.uuid4().hex
|
347 |
}
|
348 |
+
yield "", history + [(question, final_response)]
|
349 |
+
conv.append({"role": "user", "content": prompt})
|
350 |
+
conv.append({"role": "assistant", "content": final_response})
|
351 |
+
print("Answer:--- %s seconds ---" % (time.time() - start_time))
|
352 |
+
# Store the last interaction without saving to the database yet
|
353 |
+
#last_interaction.value = {
|
354 |
+
# "question": question,
|
355 |
+
# "response": response,
|
356 |
+
# "manual": manual,
|
357 |
+
# "point_id": uuid.uuid4().hex
|
358 |
+
#}
|
359 |
|
360 |
+
#return '', history
|
361 |
|
362 |
def save_last_interaction(feedback):
|
363 |
if last_interaction.value:
|