File size: 1,480 Bytes
f36f021 f6aa366 f36f021 f6aa366 c585131 f6aa366 c585131 f36f021 f6aa366 f36f021 c585131 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 |
import gradio as gr
from pipeline import run_with_chain
from my_memory_logic import memory, restatement_chain
def chat_history_fn(user_input, history):
# Convert `history` (list of [user, ai] pairs) into memory messages
for user_msg, ai_msg in history:
memory.chat_memory.add_user_message(user_msg)
memory.chat_memory.add_ai_message(ai_msg)
# 1) Restate the user question with chat history
reformulated_q = restatement_chain.run({
"chat_history": memory.chat_memory.messages,
"input": user_input
})
# 2) Pass the reformulated question to your pipeline
answer = run_with_chain(reformulated_q)
# 3) Update memory
memory.chat_memory.add_user_message(user_input)
memory.chat_memory.add_ai_message(answer)
# 4) Convert the updated history to a list of message dicts.
# This is what Gradio's ChatInterface expects (instead of returning a tuple).
history.append((user_input, answer))
message_dicts = []
for user_msg, ai_msg in history:
# user turn
message_dicts.append({"role": "user", "content": user_msg})
# AI turn
message_dicts.append({"role": "assistant", "content": ai_msg})
return message_dicts
demo = gr.ChatInterface(
fn=chat_history_fn,
title="DailyWellnessAI with Memory",
description="A chat bot that remembers context using memory + question restatement."
)
if __name__ == "__main__":
demo.launch()
|