Spaces:
Paused
Paused
File size: 4,510 Bytes
89cbc4d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 |
#####################################################
### DOCUMENT PROCESSOR [ENGINE]
#####################################################
# Jonathan Wang
# ABOUT:
# This project creates an app to chat with PDFs.
# This is the ENGINE
# which defines how LLMs handle processing.
#####################################################
## TODO Board:
#####################################################
## IMPORTS
from __future__ import annotations
import gc
from typing import TYPE_CHECKING, Callable, List, Optional, cast
from llama_index.core.query_engine import CustomQueryEngine
from llama_index.core.schema import NodeWithScore, QueryBundle
from llama_index.core.settings import (
Settings,
)
from torch.cuda import empty_cache
if TYPE_CHECKING:
from llama_index.core.base.response.schema import Response
from llama_index.core.callbacks import CallbackManager
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.response_synthesizers import (
BaseSynthesizer,
)
from llama_index.core.retrievers import BaseRetriever
# Own Modules
#####################################################
## CODE
class RAGQueryEngine(CustomQueryEngine):
"""Custom RAG Query Engine."""
retriever: BaseRetriever
response_synthesizer: BaseSynthesizer
node_postprocessors: Optional[List[BaseNodePostprocessor]] = []
# def __init__(
# self,
# retriever: BaseRetriever,
# response_synthesizer: Optional[BaseSynthesizer] = None,
# node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
# callback_manager: Optional[CallbackManager] = None,
# ) -> None:
# self._retriever = retriever
# # callback_manager = (
# # callback_manager
# # Settings.callback_manager
# # )
# # llm = llm or Settings.llm
# self._response_synthesizer = response_synthesizer or get_response_synthesizer(
# # llm=llm,
# # service_context=service_context,
# # callback_manager=callback_manager,
# )
# self._node_postprocessors = node_postprocessors or []
# self._metadata_mode = metadata_mode
# for node_postprocessor in self._node_postprocessors:
# node_postprocessor.callback_manager = callback_manager
# super().__init__(callback_manager=callback_manager)
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "RAGQueryEngine"
# taken from Llamaindex CustomEngine:
# https://github.com/run-llama/llama_index/blob/main/llama-index-core/llama_index/core/query_engine/retriever_query_engine.py#L134
def _apply_node_postprocessors(
self, nodes: list[NodeWithScore], query_bundle: QueryBundle
) -> list[NodeWithScore]:
if self.node_postprocessors is None:
return nodes
for node_postprocessor in self.node_postprocessors:
nodes = node_postprocessor.postprocess_nodes(
nodes, query_bundle=query_bundle
)
return nodes
def retrieve(self, query_bundle: QueryBundle) -> list[NodeWithScore]:
nodes = self.retriever.retrieve(query_bundle)
return self._apply_node_postprocessors(nodes, query_bundle=query_bundle)
async def aretrieve(self, query_bundle: QueryBundle) -> list[NodeWithScore]:
nodes = await self.retriever.aretrieve(query_bundle)
return self._apply_node_postprocessors(nodes, query_bundle=query_bundle)
def custom_query(self, query_str: str) -> Response:
# Convert query string into query bundle
query_bundle = QueryBundle(query_str=query_str)
nodes = self.retrieve(query_bundle) # also does the postprocessing.
response_obj = self.response_synthesizer.synthesize(query_bundle, nodes)
empty_cache()
gc.collect()
return cast(Response, response_obj) # type: ignore
# @st.cache_resource # none of these can be hashable or cached :(
def get_engine(
retriever: BaseRetriever,
response_synthesizer: BaseSynthesizer,
node_postprocessors: list[BaseNodePostprocessor] | None = None,
callback_manager: CallbackManager | None = None,
) -> RAGQueryEngine:
return RAGQueryEngine(
retriever=retriever,
response_synthesizer=response_synthesizer,
node_postprocessors=node_postprocessors,
callback_manager=callback_manager or Settings.callback_manager,
)
|