Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,7 @@
|
|
1 |
import os
|
2 |
import streamlit as st
|
|
|
|
|
3 |
from langchain.chat_models import ChatOpenAI
|
4 |
from langchain.chains import ConversationalRetrievalChain
|
5 |
from langchain.prompts import PromptTemplate
|
@@ -88,7 +90,7 @@ st.markdown("""
|
|
88 |
margin: 5px 0;
|
89 |
text-align: left;
|
90 |
}
|
91 |
-
.
|
92 |
background-color: #e2e3e5;
|
93 |
border-radius: 10px;
|
94 |
padding: 10px;
|
@@ -99,30 +101,36 @@ st.markdown("""
|
|
99 |
""", unsafe_allow_html=True)
|
100 |
|
101 |
# Initialize chat history in session state
|
102 |
-
if
|
103 |
-
st.session_state.
|
104 |
-
|
105 |
-
#
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
113 |
|
114 |
-
#
|
115 |
-
|
|
|
|
|
116 |
|
117 |
-
#
|
118 |
-
|
119 |
-
st.markdown('<div class="chat-container">', unsafe_allow_html=True)
|
120 |
-
for chat in st.session_state.chat_history:
|
121 |
-
st.markdown(f'<div class="user-message"><strong>You:</strong> {chat["user"]}</div>', unsafe_allow_html=True)
|
122 |
-
st.markdown(f'<div class="bot-message"><strong>AI Recommendation:</strong> {chat["bot"]}</div>', unsafe_allow_html=True)
|
123 |
-
st.markdown('</div>', unsafe_allow_html=True)
|
124 |
|
125 |
# Optional: Add a button to clear the chat history
|
126 |
if st.button("Clear Chat History"):
|
127 |
-
st.session_state.
|
128 |
st.experimental_rerun()
|
|
|
1 |
import os
|
2 |
import streamlit as st
|
3 |
+
import random
|
4 |
+
import time
|
5 |
from langchain.chat_models import ChatOpenAI
|
6 |
from langchain.chains import ConversationalRetrievalChain
|
7 |
from langchain.prompts import PromptTemplate
|
|
|
90 |
margin: 5px 0;
|
91 |
text-align: left;
|
92 |
}
|
93 |
+
.assistant-message {
|
94 |
background-color: #e2e3e5;
|
95 |
border-radius: 10px;
|
96 |
padding: 10px;
|
|
|
101 |
""", unsafe_allow_html=True)
|
102 |
|
103 |
# Initialize chat history in session state
|
104 |
+
if "messages" not in st.session_state:
|
105 |
+
st.session_state.messages = []
|
106 |
+
|
107 |
+
# Display chat messages from history on app rerun
|
108 |
+
for message in st.session_state.messages:
|
109 |
+
with st.chat_message(message["role"]):
|
110 |
+
st.markdown(message["content"])
|
111 |
+
|
112 |
+
# Accept user input
|
113 |
+
if prompt := st.chat_input("What are you looking to learn?"):
|
114 |
+
# Add user message to chat history
|
115 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
116 |
+
# Display user message in chat message container
|
117 |
+
with st.chat_message("user"):
|
118 |
+
st.markdown(prompt)
|
119 |
+
|
120 |
+
# Assistant response generator with streaming effect
|
121 |
+
with st.chat_message("assistant"):
|
122 |
+
response = qa_chain({"question": prompt})
|
123 |
+
response_text = response["answer"]
|
124 |
|
125 |
+
# Simulate streaming response
|
126 |
+
for word in response_text.split():
|
127 |
+
st.markdown(word + " ", unsafe_allow_html=True)
|
128 |
+
time.sleep(0.05) # Delay for effect
|
129 |
|
130 |
+
# Add assistant response to chat history
|
131 |
+
st.session_state.messages.append({"role": "assistant", "content": response_text})
|
|
|
|
|
|
|
|
|
|
|
132 |
|
133 |
# Optional: Add a button to clear the chat history
|
134 |
if st.button("Clear Chat History"):
|
135 |
+
st.session_state.messages.clear()
|
136 |
st.experimental_rerun()
|