ArturG9 commited on
Commit
08a925d
·
verified ·
1 Parent(s): 5690af2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -2
app.py CHANGED
@@ -2,7 +2,7 @@ import os
2
  import streamlit as st
3
  from dotenv import load_dotenv
4
  from langchain_community.embeddings import HuggingFaceEmbeddings
5
- from langchain_community.llms import llamacpp
6
  from langchain_core.runnables.history import RunnableWithMessageHistory
7
  from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
8
  from langchain_core.callbacks import CallbackManager, StreamingStdOutCallbackHandler
@@ -16,7 +16,7 @@ from langchain.text_splitter import TokenTextSplitter, RecursiveCharacterTextSpl
16
  from langchain_community.document_loaders.directory import DirectoryLoader
17
  from langchain_core.output_parsers import StrOutputParser
18
  from langchain_core.runnables import RunnablePassthrough
19
-
20
 
21
  lang_api_key = os.getenv("lang_api_key")
22
 
@@ -173,6 +173,8 @@ def create_conversational_rag_chain(retriever):
173
  verbose=False,
174
  )
175
 
 
 
176
  template = """Answer the question, based only on the following context:
177
  {context}.Be consise.
178
  Question: {question}
 
2
  import streamlit as st
3
  from dotenv import load_dotenv
4
  from langchain_community.embeddings import HuggingFaceEmbeddings
5
+ from langchain_community.llms import llamacpp, LlamaCpp
6
  from langchain_core.runnables.history import RunnableWithMessageHistory
7
  from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
8
  from langchain_core.callbacks import CallbackManager, StreamingStdOutCallbackHandler
 
16
  from langchain_community.document_loaders.directory import DirectoryLoader
17
  from langchain_core.output_parsers import StrOutputParser
18
  from langchain_core.runnables import RunnablePassthrough
19
+ from langchain_experimental.chat_models import Llama2Chat
20
 
21
  lang_api_key = os.getenv("lang_api_key")
22
 
 
173
  verbose=False,
174
  )
175
 
176
+ model = Llama2Chat(llm=llm)
177
+
178
  template = """Answer the question, based only on the following context:
179
  {context}.Be consise.
180
  Question: {question}