File size: 1,636 Bytes
db7e2f6
 
 
 
 
 
 
 
34d3a67
db7e2f6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5184c29
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
import logging
import time
from generator.generate_response import generate_response
from retriever.retrieve_documents import retrieve_top_k_documents
from generator.compute_metrics import get_metrics
from generator.extract_attributes import extract_attributes

def retrieve_and_generate_response(gen_llm, vector_store, query):
    logging.info(f"Query: {query}")
    
    # Step 1: Retrieve relevant documents for given query
    relevant_docs = retrieve_top_k_documents(vector_store, query, top_k=5)
    #logging.info(f"Relevant documents retrieved :{len(relevant_docs)}")

    # Log each retrieved document individually
    #for i, doc in enumerate(relevant_docs):
        #logging.info(f"Relevant document {i+1}: {doc} \n")

    # Step 2: Generate a response using LLM
    response, source_docs = generate_response(gen_llm, vector_store, query, relevant_docs)

    logging.info(f"Response from LLM ({gen_llm.name}): {response}")

    return response, source_docs

def generate_metrics(val_llm, response, source_docs, query, time_to_wait):

    # Add a sleep interval to avoid hitting the rate limit
    time.sleep(time_to_wait)  # Adjust the sleep time as needed
    
    # Step 3: Extract attributes and total sentences for each query
    logging.info(f"Extracting attributes through validation LLM")
    attributes, total_sentences = extract_attributes(val_llm, query, source_docs, response)
    logging.info(f"Extracted attributes successfully")
    
    # Step 4 : Call the get metrics calculate metrics
    metrics = get_metrics(attributes, total_sentences)

    return attributes, metrics