# 定义可使用工具 from llama_index.core.indices.base import BaseIndex from llama_index.core.tools import QueryEngineTool, ToolMetadata from llama_index.core.agent import ReActAgent from llama_index.core import get_response_synthesizer from typing import List, Tuple def from_index(index: Tuple[str, BaseIndex]) -> Tuple[str, QueryEngineTool]: """ Optional response mode: - refine: LLM call per node - compact: less LLM call compare to refine - tree_summarize: tree based summarization - simple_summarize: summarize all chunks - context_only: Returns a concatenated string of all text chunks. - accumulate: Good for when you need to run the same query separately against each text chunk. - compact accumulate: less LLM call accumulate """ index_name, vectorIndex = index response_synthesizer = get_response_synthesizer(response_mode="compact") query_engine = vectorIndex.as_query_engine(response_synthesizer=response_synthesizer, similiarity_top_k=3) tool = QueryEngineTool( query_engine=query_engine, metadata=ToolMetadata( name=index_name, description=f"Useful for questions related to specific aspects of {index_name}" ), ) return (index_name, tool) def from_agent(agent: ReActAgent) -> List[QueryEngineTool]: tool = QueryEngineTool( query_engine=agent, metadata=ToolMetadata( name="agent", description="This is a agent specilized in the topic: agent" ), ) return tool