nickmuchi commited on
Commit
44ef0b2
·
1 Parent(s): 9bb2bd8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -8
app.py CHANGED
@@ -3,7 +3,6 @@ import streamlit as st
3
 
4
  from langchain.embeddings import HuggingFaceInstructEmbeddings, HuggingFaceEmbeddings
5
  from langchain.vectorstores.faiss import FAISS
6
- from langchain.chains import ChatVectorDBChain
7
  from huggingface_hub import snapshot_download
8
  from langchain.chat_models import ChatOpenAI
9
  from langchain.prompts.chat import (
@@ -18,6 +17,11 @@ from langchain.schema import (
18
  SystemMessage
19
  )
20
 
 
 
 
 
 
21
 
22
  st.set_page_config(page_title="CFA Level 1", page_icon="📖")
23
 
@@ -114,14 +118,23 @@ def load_prompt():
114
 
115
  @st.experimental_singleton(show_spinner=False)
116
  def load_chain():
117
- llm = ChatOpenAI(temperature=0)
 
118
  cfa_db = load_vectorstore(embeddings)
119
-
120
- qa = ChatVectorDBChain.from_llm(llm,
121
- cfa_db,
122
- qa_prompt=load_prompt(),
123
- return_source_documents=True,
124
- )
 
 
 
 
 
 
 
 
125
 
126
  return qa
127
 
 
3
 
4
  from langchain.embeddings import HuggingFaceInstructEmbeddings, HuggingFaceEmbeddings
5
  from langchain.vectorstores.faiss import FAISS
 
6
  from huggingface_hub import snapshot_download
7
  from langchain.chat_models import ChatOpenAI
8
  from langchain.prompts.chat import (
 
17
  SystemMessage
18
  )
19
 
20
+ from langchain.chains.llm import LLMChain
21
+ from langchain.callbacks.base import CallbackManager
22
+ from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
23
+ from langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_PROMPT
24
+ from langchain.chains.question_answering import load_qa_chain
25
 
26
  st.set_page_config(page_title="CFA Level 1", page_icon="📖")
27
 
 
118
 
119
  @st.experimental_singleton(show_spinner=False)
120
  def load_chain():
121
+ '''Load langchain Conversational Retrieval Chain'''
122
+
123
  cfa_db = load_vectorstore(embeddings)
124
+ llm = ChatOpenAI(temperature=0)
125
+ streaming_llm = ChatOpenAI(streaming=True,
126
+ callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]),
127
+ verbose=True,
128
+ temperature=0)
129
+
130
+ question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT)
131
+ doc_chain = load_qa_chain(streaming_llm, chain_type="stuff", prompt=load_prompt())
132
+
133
+ qa = ConversationalRetrievalChain(
134
+ retriever=vectorstore.as_retriever(),
135
+ combine_docs_chain=doc_chain,
136
+ question_generator=question_generator,
137
+ return_source_documents=True)
138
 
139
  return qa
140