Update app.py
Browse files
app.py
CHANGED
@@ -52,7 +52,7 @@ def create_retriever_from_chroma(vectorstore_path="docs/chroma/", search_type='m
|
|
52 |
# Load documents from the specified data path
|
53 |
loader = DirectoryLoader('./data/', glob="./*.txt", loader_cls=TextLoader)
|
54 |
docs = loader.load()
|
55 |
-
text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
|
56 |
split_docs = text_splitter.split_documents(docs)
|
57 |
|
58 |
|
@@ -94,7 +94,7 @@ def main():
|
|
94 |
|
95 |
st.markdown("Hi, I am Qwen, chat mmodel, based on respublic of Lithuania law document. Write you question and press enter to start chat.")
|
96 |
|
97 |
-
retriever = create_retriever_from_chroma(vectorstore_path="docs/chroma/", search_type='mmr', k=9, chunk_size=
|
98 |
if user_question := st.text_input("Ask a question about your documents:"):
|
99 |
handle_userinput(user_question,retriever)
|
100 |
|
@@ -153,12 +153,7 @@ def create_conversational_rag_chain(retriever):
|
|
153 |
verbose=False,
|
154 |
)
|
155 |
|
156 |
-
|
157 |
-
{context}
|
158 |
-
|
159 |
-
Question: {question}
|
160 |
-
"""
|
161 |
-
prompt = ChatPromptTemplate.from_template(template)
|
162 |
|
163 |
rag_chain = prompt | llm | StrOutputParser()
|
164 |
|
|
|
52 |
# Load documents from the specified data path
|
53 |
loader = DirectoryLoader('./data/', glob="./*.txt", loader_cls=TextLoader)
|
54 |
docs = loader.load()
|
55 |
+
text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap,separators=["\n \n \n", "\n \n", "\n1" , "(?<=\. )", " ", ""])
|
56 |
split_docs = text_splitter.split_documents(docs)
|
57 |
|
58 |
|
|
|
94 |
|
95 |
st.markdown("Hi, I am Qwen, chat mmodel, based on respublic of Lithuania law document. Write you question and press enter to start chat.")
|
96 |
|
97 |
+
retriever = create_retriever_from_chroma(vectorstore_path="docs/chroma/", search_type='mmr', k=9, chunk_size=450, chunk_overlap=20)
|
98 |
if user_question := st.text_input("Ask a question about your documents:"):
|
99 |
handle_userinput(user_question,retriever)
|
100 |
|
|
|
153 |
verbose=False,
|
154 |
)
|
155 |
|
156 |
+
prompt = hub.pull("rlm/rag-prompt")
|
|
|
|
|
|
|
|
|
|
|
157 |
|
158 |
rag_chain = prompt | llm | StrOutputParser()
|
159 |
|