nickmuchi commited on
Commit
1d13bf0
·
1 Parent(s): 61d5ed8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -6
app.py CHANGED
@@ -19,7 +19,6 @@ from langchain.schema import (
19
  )
20
 
21
 
22
-
23
  st.set_page_config(page_title="CFA Level 1", page_icon="📖")
24
 
25
  #Load API Key
@@ -88,8 +87,7 @@ def load_vectorstore(_embeddings):
88
  def load_prompt():
89
  system_template="""You are an expert in finance, economics, investing, ethics, derivatives and markets.
90
  Use the following pieces of context to answer the users question. If you don't know the answer,
91
- just say that you don't know, don't try to make up an answer. Provide a source reference.
92
- ALWAYS return a "sources" part in your answer.
93
  The "sources" part should be a reference to the source of the documents from which you got your answer.
94
 
95
  Remember to only use the given context to answer the question, very important.
@@ -116,20 +114,20 @@ def load_prompt():
116
  @st.experimental_singleton(show_spinner=False)
117
  def load_chain():
118
  llm = ChatOpenAI(temperature=0)
 
119
 
120
  qa = ChatVectorDBChain.from_llm(llm,
121
- load_vectorstore(embeddings),
122
  qa_prompt=load_prompt(),
123
  return_source_documents=True)
124
 
125
  return qa
126
 
 
127
 
128
  def get_answer(question):
129
  '''Generate an answer from the chain'''
130
 
131
- chat_history = []
132
-
133
  chain = load_chain()
134
  result = chain({"question": question, "chat_history": chat_history})
135
 
 
19
  )
20
 
21
 
 
22
  st.set_page_config(page_title="CFA Level 1", page_icon="📖")
23
 
24
  #Load API Key
 
87
  def load_prompt():
88
  system_template="""You are an expert in finance, economics, investing, ethics, derivatives and markets.
89
  Use the following pieces of context to answer the users question. If you don't know the answer,
90
+ just say that you don't know, don't try to make up an answer. Provide a source reference. ALWAYS return a "sources" part in your answer.
 
91
  The "sources" part should be a reference to the source of the documents from which you got your answer.
92
 
93
  Remember to only use the given context to answer the question, very important.
 
114
  @st.experimental_singleton(show_spinner=False)
115
  def load_chain():
116
  llm = ChatOpenAI(temperature=0)
117
+ cfa_db = load_vectorstore(embeddings)
118
 
119
  qa = ChatVectorDBChain.from_llm(llm,
120
+ cfa_db,
121
  qa_prompt=load_prompt(),
122
  return_source_documents=True)
123
 
124
  return qa
125
 
126
+ chat_history = []
127
 
128
  def get_answer(question):
129
  '''Generate an answer from the chain'''
130
 
 
 
131
  chain = load_chain()
132
  result = chain({"question": question, "chat_history": chat_history})
133