|
import logging |
|
import numpy as np |
|
from transformers import pipeline |
|
|
|
from config import ConfigConstants |
|
|
|
def retrieve_top_k_documents(vector_store, query, top_k=5): |
|
documents = vector_store.similarity_search(query, k=top_k) |
|
logging.info(f"Top {top_k} documents reterived for query") |
|
|
|
|
|
|
|
return documents |
|
|
|
|
|
def rerank_documents(query, documents): |
|
""" |
|
Re-rank documents using a cross-encoder model. |
|
|
|
Parameters: |
|
query (str): The user's query. |
|
documents (list): List of LangChain Document objects. |
|
reranker_model_name (str): Hugging Face model name for re-ranking. |
|
|
|
Returns: |
|
list: Re-ranked list of Document objects with updated scores. |
|
""" |
|
|
|
reranker = pipeline("text-classification", model=ConfigConstants.RE_RANKER_MODEL_NAME, top_k=1) |
|
|
|
|
|
rerank_inputs = [{"text": query, "text_pair": doc.page_content} for doc in documents] |
|
|
|
|
|
scores = reranker(rerank_inputs) |
|
|
|
|
|
for doc, score in zip(documents, scores): |
|
doc.metadata["rerank_score"] = score[0]['score'] |
|
|
|
|
|
documents = sorted(documents, key=lambda x: x.metadata.get("rerank_score", 0), reverse=True) |
|
logging.info("Re-ranked documents using a cross-encoder model") |
|
|
|
return documents |
|
|
|
|
|
|
|
def retrieve_top_k_documents_manual(vector_store, query, top_k=5): |
|
""" |
|
Retrieve top-k documents using FAISS index and optionally rerank them. |
|
|
|
Parameters: |
|
vector_store (FAISS): The vector store containing the FAISS index and docstore. |
|
query (str): The user's query string. |
|
top_k (int): The number of top results to retrieve. |
|
reranker_model_name (str): The Hugging Face model name for cross-encoder reranking. |
|
|
|
Returns: |
|
list: Top-k retrieved and reranked documents. |
|
""" |
|
|
|
embedding_model = vector_store.embedding_function |
|
query_vector = embedding_model.embed_query(query) |
|
query_vector = np.array([query_vector]).astype('float32') |
|
|
|
|
|
distances, indices = vector_store.index.search(query_vector, top_k) |
|
|
|
|
|
documents = [] |
|
for idx in indices.flatten(): |
|
if idx == -1: |
|
continue |
|
doc_id = vector_store.index_to_docstore_id[idx] |
|
|
|
|
|
internal_docstore = getattr(vector_store.docstore, "_dict", None) |
|
if internal_docstore and doc_id in internal_docstore: |
|
document = internal_docstore[doc_id] |
|
documents.append(document) |
|
|
|
|
|
documents = rerank_documents(query, documents) |
|
|
|
return documents |