File size: 1,598 Bytes
318db6e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
# 定义可使用工具
from llama_index.core.indices.base import BaseIndex
from llama_index.core.tools import QueryEngineTool, ToolMetadata
from llama_index.core.agent import ReActAgent
from llama_index.core import get_response_synthesizer
from typing import List, Tuple


def from_index(index: Tuple[str, BaseIndex]) -> Tuple[str, QueryEngineTool]:
    """
        Optional response mode:
        - refine: LLM call per node
        - compact: less LLM call compare to refine
        - tree_summarize: tree based summarization
        - simple_summarize: summarize all chunks
        - context_only: Returns a concatenated string of all text chunks.
        - accumulate: Good for when you need to run the same query separately against each text chunk.
        - compact accumulate: less LLM call accumulate
    """
    index_name, vectorIndex = index
    response_synthesizer = get_response_synthesizer(response_mode="compact")
    query_engine = vectorIndex.as_query_engine(response_synthesizer=response_synthesizer, similiarity_top_k=3)
    tool = QueryEngineTool(
        query_engine=query_engine,
        metadata=ToolMetadata(
            name=index_name,
            description=f"Useful for questions related to specific aspects of {index_name}"
        ),
    )
    return (index_name, tool)
    
def from_agent(agent: ReActAgent) -> List[QueryEngineTool]:
    tool = QueryEngineTool(
        query_engine=agent,
        metadata=ToolMetadata(
            name="agent",
            description="This is a agent specilized in the topic: agent"
        ),
    )
    return tool