ProfessorLeVesseur commited on
Commit
090aa9d
·
verified ·
1 Parent(s): 2e9378a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -1
app.py CHANGED
@@ -46,12 +46,15 @@ def calculate_embedding_cost(text):
46
  # print(f'Embedding Cost in USD: {total_tokens / 1000 * 0.0004:.6f}')
47
  return total_tokens, total_tokens / 1000 * 0.0004
48
 
 
49
  def ask_with_memory(vector_store, query, chat_history=[]):
50
  from langchain.chains import ConversationalRetrievalChain
51
  from langchain.chat_models import ChatOpenAI
52
 
53
  llm = ChatOpenAI(model_name='gpt-3.5-turbo', temperature=1, openai_api_key=openai_api_key)
54
- retriever = vector_store.as_retriever(search_type='similarity', search_kwargs={'k': 3})
 
 
55
 
56
  chain= ConversationalRetrievalChain.from_llm(llm, retriever)
57
  result = chain({'question': query, 'chat_history': st.session_state['history']})
 
46
  # print(f'Embedding Cost in USD: {total_tokens / 1000 * 0.0004:.6f}')
47
  return total_tokens, total_tokens / 1000 * 0.0004
48
 
49
+
50
  def ask_with_memory(vector_store, query, chat_history=[]):
51
  from langchain.chains import ConversationalRetrievalChain
52
  from langchain.chat_models import ChatOpenAI
53
 
54
  llm = ChatOpenAI(model_name='gpt-3.5-turbo', temperature=1, openai_api_key=openai_api_key)
55
+
56
+ # The retriever is created with metadata filter directly in search_kwargs
57
+ retriever = vector_store.as_retriever(search_type='similarity', search_kwargs={'k': 3, 'filter': {'source':{'$eq': '/Users/cheynelevesseur/Desktop/Python_Code/Projects/LLM/Intensifying Literacy Instruction - Essential Practices (NATIONAL).pdf'}}})
58
 
59
  chain= ConversationalRetrievalChain.from_llm(llm, retriever)
60
  result = chain({'question': query, 'chat_history': st.session_state['history']})