Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
|
2 |
import streamlit as st
|
3 |
from langchain.llms import OpenAI
|
4 |
from langchain.chat_models import ChatOpenAI
|
@@ -10,17 +10,11 @@ from langchain.prompts.prompt import PromptTemplate
|
|
10 |
from langchain.vectorstores import FAISS
|
11 |
import re
|
12 |
import time
|
13 |
-
# class CustomRetrievalQAWithSourcesChain(RetrievalQAWithSourcesChain):
|
14 |
-
# def _get_docs(self, inputs: Dict[str, Any]) -> List[Document]:
|
15 |
-
# # Call the parent class's method to get the documents
|
16 |
-
# docs = super()._get_docs(inputs)
|
17 |
-
# # Modify the document metadata
|
18 |
-
# for doc in docs:
|
19 |
-
# doc.metadata['source'] = doc.metadata.pop('path')
|
20 |
-
# return docs
|
21 |
|
|
|
|
|
22 |
model_name = "intfloat/e5-large-v2"
|
23 |
-
model_kwargs = {'device': '
|
24 |
encode_kwargs = {'normalize_embeddings': False}
|
25 |
embeddings = HuggingFaceEmbeddings(
|
26 |
model_name=model_name,
|
@@ -28,6 +22,7 @@ embeddings = HuggingFaceEmbeddings(
|
|
28 |
encode_kwargs=encode_kwargs
|
29 |
)
|
30 |
|
|
|
31 |
db = FAISS.load_local("IPCC_index_e5_1000_pdf", embeddings)
|
32 |
|
33 |
|
@@ -107,14 +102,16 @@ def generate_response(input_text):
|
|
107 |
with st.sidebar:
|
108 |
openai_api_key = st.text_input("OpenAI API Key", key="chatbot_api_key", type="password")
|
109 |
"[Get an OpenAI API key](https://platform.openai.com/account/api-keys)"
|
110 |
-
|
111 |
-
st.
|
112 |
-
|
|
|
|
|
113 |
#col1, col2, = st.columns(2)
|
114 |
|
115 |
|
116 |
if "messages" not in st.session_state:
|
117 |
-
st.session_state["messages"] = [{"role": "assistant", "content": "
|
118 |
|
119 |
for msg in st.session_state.messages:
|
120 |
st.chat_message(msg["role"]).write(msg["content"])
|
@@ -136,8 +133,27 @@ if prompt := st.chat_input():
|
|
136 |
highlighted_text = match.group(1)
|
137 |
else:
|
138 |
highlighted_text="hello world"
|
139 |
-
|
140 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
141 |
#display_typing_effect(st.chat_message("assistant"), result_r)
|
142 |
#st.markdown(result['source_documents'][0])
|
143 |
#st.markdown(result['source_documents'][1])
|
@@ -146,4 +162,4 @@ if prompt := st.chat_input():
|
|
146 |
#st.markdown(result['source_documents'][4])
|
147 |
|
148 |
|
149 |
-
st.image("https://cataas.com/cat/says/"+highlighted_text)
|
|
|
1 |
+
import openai
|
2 |
import streamlit as st
|
3 |
from langchain.llms import OpenAI
|
4 |
from langchain.chat_models import ChatOpenAI
|
|
|
10 |
from langchain.vectorstores import FAISS
|
11 |
import re
|
12 |
import time
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
+
|
15 |
+
# import e5-large-v2 embedding model
|
16 |
model_name = "intfloat/e5-large-v2"
|
17 |
+
model_kwargs = {'device': 'cuda'}
|
18 |
encode_kwargs = {'normalize_embeddings': False}
|
19 |
embeddings = HuggingFaceEmbeddings(
|
20 |
model_name=model_name,
|
|
|
22 |
encode_kwargs=encode_kwargs
|
23 |
)
|
24 |
|
25 |
+
# load IPCC database
|
26 |
db = FAISS.load_local("IPCC_index_e5_1000_pdf", embeddings)
|
27 |
|
28 |
|
|
|
102 |
with st.sidebar:
|
103 |
openai_api_key = st.text_input("OpenAI API Key", key="chatbot_api_key", type="password")
|
104 |
"[Get an OpenAI API key](https://platform.openai.com/account/api-keys)"
|
105 |
+
st.markdown("## ๐ Welcome to ClimateChat! ๐")
|
106 |
+
st.markdown("ClimateChat Harnesses the latest [IPCC reports](https://www.ipcc.ch/report/ar6/wg3/) and the power of Large Language Models to answer your questions about climate change. When you interact with ClimateChat not only will you receive clear, concise, and accurate answers, but each response is coupled with sources and hyperlinks for further exploration and verification.\
|
107 |
+
Our objective is to make climate change information accessible, understandable, and actionable for everyone, everywhere.")
|
108 |
+
st.title("๐ฌ๐๐ก๏ธClimateChat")
|
109 |
+
st.caption("๐ฌ A Climate Change chatbot powered by OpenAI LLM and IPCC documents")
|
110 |
#col1, col2, = st.columns(2)
|
111 |
|
112 |
|
113 |
if "messages" not in st.session_state:
|
114 |
+
st.session_state["messages"] = [{"role": "assistant", "content": "Any question about the climate change?"}]
|
115 |
|
116 |
for msg in st.session_state.messages:
|
117 |
st.chat_message(msg["role"]).write(msg["content"])
|
|
|
133 |
highlighted_text = match.group(1)
|
134 |
else:
|
135 |
highlighted_text="hello world"
|
136 |
+
|
137 |
+
|
138 |
+
|
139 |
+
|
140 |
+
# Display assistant response in chat message container
|
141 |
+
with st.chat_message("assistant"):
|
142 |
+
message_placeholder = st.empty()
|
143 |
+
full_response = ""
|
144 |
+
assistant_response = result_r
|
145 |
+
# Simulate stream of response with milliseconds delay
|
146 |
+
for chunk in assistant_response.split():
|
147 |
+
full_response += chunk + " "
|
148 |
+
time.sleep(0.05)
|
149 |
+
# Add a blinking cursor to simulate typing
|
150 |
+
message_placeholder.write(full_response + "โ")
|
151 |
+
message_placeholder.write(result_r)
|
152 |
+
# Add assistant response to chat history
|
153 |
+
st.session_state.messages.append({"role": "assistant", "content": result_r})
|
154 |
+
|
155 |
+
#st.session_state.messages.append({"role": "assistant", "content": result["result"]})
|
156 |
+
#st.chat_message("assistant").write(result_r)
|
157 |
#display_typing_effect(st.chat_message("assistant"), result_r)
|
158 |
#st.markdown(result['source_documents'][0])
|
159 |
#st.markdown(result['source_documents'][1])
|
|
|
162 |
#st.markdown(result['source_documents'][4])
|
163 |
|
164 |
|
165 |
+
#st.image("https://cataas.com/cat/says/"+highlighted_text)
|