Update app.py
Browse files
app.py
CHANGED
@@ -75,31 +75,10 @@ def create_retriever_from_chroma(vectorstore_path="./docs/chroma/", search_type=
|
|
75 |
|
76 |
retriever=vectorstore.as_retriever(search_type = search_type, search_kwargs={"k": k})
|
77 |
|
78 |
-
callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
|
79 |
-
|
80 |
-
llm = llamacpp.LlamaCpp(
|
81 |
-
model_path = "qwen2-0_5b-instruct-q8_0.gguf",
|
82 |
-
seed = 41,
|
83 |
-
n_gpu_layers=0,
|
84 |
-
temperature=0.0,
|
85 |
-
n_ctx=15000,
|
86 |
-
n_batch=2000,
|
87 |
-
max_tokens=1500,
|
88 |
-
repeat_penalty=1.5,
|
89 |
-
last_n_tokens_size = 400,
|
90 |
-
callback_manager=callback_manager,
|
91 |
-
verbose=False,
|
92 |
-
)
|
93 |
-
|
94 |
-
compressor = LLMChainExtractor.from_llm(llm)
|
95 |
|
96 |
-
compression_retriever = ContextualCompressionRetriever(
|
97 |
-
base_compressor=compressor,
|
98 |
-
base_retriever=retriever
|
99 |
-
)
|
100 |
|
101 |
|
102 |
-
return
|
103 |
|
104 |
|
105 |
|
|
|
75 |
|
76 |
retriever=vectorstore.as_retriever(search_type = search_type, search_kwargs={"k": k})
|
77 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
78 |
|
|
|
|
|
|
|
|
|
79 |
|
80 |
|
81 |
+
return retriever
|
82 |
|
83 |
|
84 |
|