Gourisankar Padihary commited on
Commit
b1b2c27
·
1 Parent(s): 5b18a9a

Calculation of context_relevance

Browse files
generator/compute_metrics.py CHANGED
@@ -1,11 +1,11 @@
1
- def compute_metrics(attributes):
2
  # Extract relevant information from attributes
3
  all_relevant_sentence_keys = attributes.get("all_relevant_sentence_keys", [])
4
  all_utilized_sentence_keys = attributes.get("all_utilized_sentence_keys", [])
5
  sentence_support_information = attributes.get("sentence_support_information", [])
6
-
7
  # Compute Context Relevance
8
- context_relevance = len(all_relevant_sentence_keys) / len(sentence_support_information) if sentence_support_information else 0
9
 
10
  # Compute Context Utilization
11
  context_utilization = len(all_utilized_sentence_keys) / len(sentence_support_information) if sentence_support_information else 0
 
1
+ def compute_metrics(attributes, total_sentences):
2
  # Extract relevant information from attributes
3
  all_relevant_sentence_keys = attributes.get("all_relevant_sentence_keys", [])
4
  all_utilized_sentence_keys = attributes.get("all_utilized_sentence_keys", [])
5
  sentence_support_information = attributes.get("sentence_support_information", [])
6
+
7
  # Compute Context Relevance
8
+ context_relevance = len(all_relevant_sentence_keys) / total_sentences if total_sentences else 0
9
 
10
  # Compute Context Utilization
11
  context_utilization = len(all_utilized_sentence_keys) / len(sentence_support_information) if sentence_support_information else 0
generator/extract_attributes.py CHANGED
@@ -12,9 +12,12 @@ def extract_attributes(question, relevant_docs, response):
12
  formatted_documents = apply_sentence_keys_documents(relevant_docs)
13
  formatted_responses = apply_sentence_keys_response(response)
14
 
 
 
 
15
  attribute_prompt = create_prompt(formatted_documents, question, formatted_responses)
16
 
17
  # Instead of using BaseMessage, pass the formatted prompt directly to invoke
18
  result = llm.invoke(attribute_prompt)
19
 
20
- return result
 
12
  formatted_documents = apply_sentence_keys_documents(relevant_docs)
13
  formatted_responses = apply_sentence_keys_response(response)
14
 
15
+ # Calculate the total number of sentences from formatted_documents
16
+ total_sentences = sum(len(doc) for doc in formatted_documents)
17
+
18
  attribute_prompt = create_prompt(formatted_documents, question, formatted_responses)
19
 
20
  # Instead of using BaseMessage, pass the formatted prompt directly to invoke
21
  result = llm.invoke(attribute_prompt)
22
 
23
+ return result, total_sentences
main.py CHANGED
@@ -46,11 +46,11 @@ def main():
46
  logging.info("Response generated")
47
 
48
  # Print the response
49
- print(f"Response from LLM: {response}")
50
  #print(f"Source Documents: {source_docs}")
51
 
52
  # Valuations : Extract attributes from the response and source documents
53
- attributes = extract_attributes(sample_question, source_docs, response)
54
 
55
  # Only proceed if the content is not empty
56
  if attributes.content:
@@ -65,7 +65,7 @@ def main():
65
  print(json.dumps(result_json, indent=2))
66
 
67
  # Compute metrics using the extracted attributes
68
- metrics = compute_metrics(result_json)
69
  print(metrics)
70
  except json.JSONDecodeError as e:
71
  logging.error(f"JSONDecodeError: {e}")
 
46
  logging.info("Response generated")
47
 
48
  # Print the response
49
+ logging.info(f"Response from LLM: {response}")
50
  #print(f"Source Documents: {source_docs}")
51
 
52
  # Valuations : Extract attributes from the response and source documents
53
+ attributes, total_sentences = extract_attributes(sample_question, source_docs, response)
54
 
55
  # Only proceed if the content is not empty
56
  if attributes.content:
 
65
  print(json.dumps(result_json, indent=2))
66
 
67
  # Compute metrics using the extracted attributes
68
+ metrics = compute_metrics(result_json, total_sentences)
69
  print(metrics)
70
  except json.JSONDecodeError as e:
71
  logging.error(f"JSONDecodeError: {e}")