|
import json
|
|
import logging
|
|
|
|
def compute_metrics(attributes, total_sentences):
|
|
|
|
all_relevant_sentence_keys = attributes.get("all_relevant_sentence_keys", [])
|
|
all_utilized_sentence_keys = attributes.get("all_utilized_sentence_keys", [])
|
|
sentence_support_information = attributes.get("sentence_support_information", [])
|
|
|
|
|
|
context_relevance = len(all_relevant_sentence_keys) / total_sentences if total_sentences else 0
|
|
|
|
|
|
context_utilization = len(all_utilized_sentence_keys) / total_sentences if total_sentences else 0
|
|
|
|
|
|
Ri = set(all_relevant_sentence_keys)
|
|
Ui = set(all_utilized_sentence_keys)
|
|
|
|
completeness_score = len(Ri & Ui) / len(Ri) if len(Ri) else 0
|
|
|
|
|
|
adherence = all(info.get("fully_supported", False) for info in sentence_support_information)
|
|
|
|
|
|
return {
|
|
"Context Relevance": context_relevance,
|
|
"Context Utilization": context_utilization,
|
|
"Completeness Score": completeness_score,
|
|
"Adherence": adherence
|
|
}
|
|
|
|
def get_metrics(attributes, total_sentences):
|
|
if attributes.content:
|
|
try:
|
|
result_content = attributes.content
|
|
|
|
json_start = result_content.find("{")
|
|
json_end = result_content.rfind("}") + 1
|
|
json_str = result_content[json_start:json_end]
|
|
result_json = json.loads(json_str)
|
|
|
|
metrics = compute_metrics(result_json, total_sentences)
|
|
logging.info(metrics)
|
|
|
|
return metrics
|
|
except json.JSONDecodeError as e:
|
|
logging.error(f"JSONDecodeError: {e}")
|
|
|
|
def get_attributes_text(attributes):
|
|
try:
|
|
result_content = attributes.content
|
|
|
|
json_start = result_content.find("{")
|
|
json_end = result_content.rfind("}") + 1
|
|
json_str = result_content[json_start:json_end]
|
|
result_json = json.loads(json_str)
|
|
|
|
|
|
relevance_explanation = result_json.get("relevance_explanation", "N/A")
|
|
all_relevant_sentence_keys = result_json.get("all_relevant_sentence_keys", [])
|
|
overall_supported_explanation = result_json.get("overall_supported_explanation", "N/A")
|
|
overall_supported = result_json.get("overall_supported", "N/A")
|
|
sentence_support_information = result_json.get("sentence_support_information", [])
|
|
all_utilized_sentence_keys = result_json.get("all_utilized_sentence_keys", [])
|
|
|
|
|
|
attributes_text = "Attributes:\n"
|
|
attributes_text = f"### Relevance Explanation:\n{relevance_explanation}\n\n"
|
|
attributes_text += f"### All Relevant Sentence Keys:\n{', '.join(all_relevant_sentence_keys)}\n\n"
|
|
attributes_text += f"### Overall Supported Explanation:\n{overall_supported_explanation}\n\n"
|
|
attributes_text += f"### Overall Supported:\n{overall_supported}\n\n"
|
|
attributes_text += "### Sentence Support Information:\n"
|
|
for info in sentence_support_information:
|
|
attributes_text += f"- Response Sentence Key: {info.get('response_sentence_key', 'N/A')}\n"
|
|
attributes_text += f" Explanation: {info.get('explanation', 'N/A')}\n"
|
|
attributes_text += f" Supporting Sentence Keys: {', '.join(info.get('supporting_sentence_keys', []))}\n"
|
|
attributes_text += f" Fully Supported: {info.get('fully_supported', 'N/A')}\n"
|
|
attributes_text += f"\n### All Utilized Sentence Keys:\n{', '.join(all_utilized_sentence_keys)}"
|
|
|
|
return attributes_text
|
|
except Exception as e:
|
|
logging.error(f"Error extracting attributes: {e}")
|
|
return f"An error occurred while extracting attributes: {e}" |