Update app.py
Browse files
app.py
CHANGED
@@ -204,24 +204,24 @@ class OptimizedRAGLoader:
|
|
204 |
|
205 |
return retriever_function
|
206 |
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
# )
|
215 |
-
|
216 |
-
from langchain_openai import ChatOpenAI
|
217 |
-
llm = ChatOpenAI(
|
218 |
-
api_key="sk-bahOSQLfPZb62d-q3aZ0JGcN8raIl12mhUj38DkdpeT3BlbkFJ650KTnBNL0rsIvUcdBA1KJw8H7dCCy7-Kl02GO-l4A",
|
219 |
-
model_name="GPT-4 Turbo",
|
220 |
-
temperature=0.1,
|
221 |
)
|
222 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
223 |
rag_loader = OptimizedRAGLoader()
|
224 |
-
retriever = rag_loader.get_retriever(k=
|
225 |
|
226 |
# Cache for processed questions
|
227 |
question_cache = {}
|
@@ -354,7 +354,7 @@ def process_question(question: str) -> Iterator[str]:
|
|
354 |
scored_docs = list(zip(scores, context, relevant_docs))
|
355 |
# scored_docs.sort(reverse=True)
|
356 |
scored_docs.sort(key=lambda x: x[0], reverse=True)
|
357 |
-
reranked_docs = [d[2].page_content for d in scored_docs][:
|
358 |
|
359 |
|
360 |
prompt = prompt_template.format_messages(
|
@@ -376,7 +376,7 @@ def process_question(question: str) -> Iterator[str]:
|
|
376 |
sources = list(set([os.path.splitext(source)[0] for source in sources]))
|
377 |
|
378 |
|
379 |
-
sources = [d[2].metadata['source'] for d in scored_docs][:
|
380 |
sources = list(set([os.path.splitext(source)[0] for source in sources]))
|
381 |
|
382 |
|
|
|
204 |
|
205 |
return retriever_function
|
206 |
|
207 |
+
Initialize components
|
208 |
+
mistral_api_key = os.getenv("mistral_api_key")
|
209 |
+
llm = ChatMistralAI(
|
210 |
+
model="mistral-large-latest",
|
211 |
+
mistral_api_key=mistral_api_key,
|
212 |
+
temperature=0.01,
|
213 |
+
streaming=True,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
214 |
)
|
215 |
|
216 |
+
# from langchain_openai import ChatOpenAI
|
217 |
+
# llm = ChatOpenAI(
|
218 |
+
# api_key="sk-bahOSQLfPZb62d-q3aZ0JGcN8raIl12mhUj38DkdpeT3BlbkFJ650KTnBNL0rsIvUcdBA1KJw8H7dCCy7-Kl02GO-l4A",
|
219 |
+
# model_name="GPT-4 Turbo",
|
220 |
+
# temperature=0.1,
|
221 |
+
# )
|
222 |
+
|
223 |
rag_loader = OptimizedRAGLoader()
|
224 |
+
retriever = rag_loader.get_retriever(k=20) # Reduced k for faster retrieval
|
225 |
|
226 |
# Cache for processed questions
|
227 |
question_cache = {}
|
|
|
354 |
scored_docs = list(zip(scores, context, relevant_docs))
|
355 |
# scored_docs.sort(reverse=True)
|
356 |
scored_docs.sort(key=lambda x: x[0], reverse=True)
|
357 |
+
reranked_docs = [d[2].page_content for d in scored_docs][:5]
|
358 |
|
359 |
|
360 |
prompt = prompt_template.format_messages(
|
|
|
376 |
sources = list(set([os.path.splitext(source)[0] for source in sources]))
|
377 |
|
378 |
|
379 |
+
sources = [d[2].metadata['source'] for d in scored_docs][:5]
|
380 |
sources = list(set([os.path.splitext(source)[0] for source in sources]))
|
381 |
|
382 |
|