Upload 7 files
Browse files- generator/compute_metrics.py +83 -83
- generator/compute_rmse_auc_roc_metrics.py +74 -74
- generator/create_prompt.py +103 -103
- generator/extract_attributes.py +25 -25
- generator/generate_metrics.py +1 -1
- generator/generate_response.py +17 -17
generator/compute_metrics.py
CHANGED
@@ -1,84 +1,84 @@
|
|
1 |
-
import json
|
2 |
-
import logging
|
3 |
-
|
4 |
-
def compute_metrics(attributes, total_sentences):
|
5 |
-
# Extract relevant information from attributes
|
6 |
-
all_relevant_sentence_keys = attributes.get("all_relevant_sentence_keys", [])
|
7 |
-
all_utilized_sentence_keys = attributes.get("all_utilized_sentence_keys", [])
|
8 |
-
sentence_support_information = attributes.get("sentence_support_information", [])
|
9 |
-
|
10 |
-
# Compute Context Relevance
|
11 |
-
context_relevance = len(all_relevant_sentence_keys) / total_sentences if total_sentences else 0
|
12 |
-
|
13 |
-
# Compute Context Utilization
|
14 |
-
context_utilization = len(all_utilized_sentence_keys) / total_sentences if total_sentences else 0
|
15 |
-
|
16 |
-
# Compute Completeness score
|
17 |
-
Ri = set(all_relevant_sentence_keys)
|
18 |
-
Ui = set(all_utilized_sentence_keys)
|
19 |
-
|
20 |
-
completeness_score = len(Ri & Ui) / len(Ri) if len(Ri) else 0
|
21 |
-
|
22 |
-
# Compute Adherence
|
23 |
-
adherence = all(info.get("fully_supported", False) for info in sentence_support_information)
|
24 |
-
#adherence = 1 if all(info.get("fully_supported", False) for info in sentence_support_information) else 0
|
25 |
-
|
26 |
-
return {
|
27 |
-
"Context Relevance": context_relevance,
|
28 |
-
"Context Utilization": context_utilization,
|
29 |
-
"Completeness Score": completeness_score,
|
30 |
-
"Adherence": adherence
|
31 |
-
}
|
32 |
-
|
33 |
-
def get_metrics(attributes, total_sentences):
|
34 |
-
if attributes.content:
|
35 |
-
try:
|
36 |
-
result_content = attributes.content # Access the content attribute
|
37 |
-
# Extract the JSON part from the result_content
|
38 |
-
json_start = result_content.find("{")
|
39 |
-
json_end = result_content.rfind("}") + 1
|
40 |
-
json_str = result_content[json_start:json_end]
|
41 |
-
result_json = json.loads(json_str)
|
42 |
-
# Compute metrics using the extracted attributes
|
43 |
-
metrics = compute_metrics(result_json, total_sentences)
|
44 |
-
logging.info(metrics)
|
45 |
-
|
46 |
-
return metrics
|
47 |
-
except json.JSONDecodeError as e:
|
48 |
-
logging.error(f"JSONDecodeError: {e}")
|
49 |
-
|
50 |
-
def get_attributes_text(attributes):
|
51 |
-
try:
|
52 |
-
result_content = attributes.content # Access the content attribute
|
53 |
-
# Extract the JSON part from the result_content
|
54 |
-
json_start = result_content.find("{")
|
55 |
-
json_end = result_content.rfind("}") + 1
|
56 |
-
json_str = result_content[json_start:json_end]
|
57 |
-
result_json = json.loads(json_str)
|
58 |
-
|
59 |
-
# Extract the required fields from json
|
60 |
-
relevance_explanation = result_json.get("relevance_explanation", "N/A")
|
61 |
-
all_relevant_sentence_keys = result_json.get("all_relevant_sentence_keys", [])
|
62 |
-
overall_supported_explanation = result_json.get("overall_supported_explanation", "N/A")
|
63 |
-
overall_supported = result_json.get("overall_supported", "N/A")
|
64 |
-
sentence_support_information = result_json.get("sentence_support_information", [])
|
65 |
-
all_utilized_sentence_keys = result_json.get("all_utilized_sentence_keys", [])
|
66 |
-
|
67 |
-
# Format the metrics for display
|
68 |
-
attributes_text = "Attributes:\n"
|
69 |
-
attributes_text = f"### Relevance Explanation:\n{relevance_explanation}\n\n"
|
70 |
-
attributes_text += f"### All Relevant Sentence Keys:\n{', '.join(all_relevant_sentence_keys)}\n\n"
|
71 |
-
attributes_text += f"### Overall Supported Explanation:\n{overall_supported_explanation}\n\n"
|
72 |
-
attributes_text += f"### Overall Supported:\n{overall_supported}\n\n"
|
73 |
-
attributes_text += "### Sentence Support Information:\n"
|
74 |
-
for info in sentence_support_information:
|
75 |
-
attributes_text += f"- Response Sentence Key: {info.get('response_sentence_key', 'N/A')}\n"
|
76 |
-
attributes_text += f" Explanation: {info.get('explanation', 'N/A')}\n"
|
77 |
-
attributes_text += f" Supporting Sentence Keys: {', '.join(info.get('supporting_sentence_keys', []))}\n"
|
78 |
-
attributes_text += f" Fully Supported: {info.get('fully_supported', 'N/A')}\n"
|
79 |
-
attributes_text += f"\n### All Utilized Sentence Keys:\n{', '.join(all_utilized_sentence_keys)}"
|
80 |
-
|
81 |
-
return attributes_text
|
82 |
-
except Exception as e:
|
83 |
-
logging.error(f"Error extracting attributes: {e}")
|
84 |
return f"An error occurred while extracting attributes: {e}"
|
|
|
1 |
+
import json
|
2 |
+
import logging
|
3 |
+
|
4 |
+
def compute_metrics(attributes, total_sentences):
|
5 |
+
# Extract relevant information from attributes
|
6 |
+
all_relevant_sentence_keys = attributes.get("all_relevant_sentence_keys", [])
|
7 |
+
all_utilized_sentence_keys = attributes.get("all_utilized_sentence_keys", [])
|
8 |
+
sentence_support_information = attributes.get("sentence_support_information", [])
|
9 |
+
|
10 |
+
# Compute Context Relevance
|
11 |
+
context_relevance = len(all_relevant_sentence_keys) / total_sentences if total_sentences else 0
|
12 |
+
|
13 |
+
# Compute Context Utilization
|
14 |
+
context_utilization = len(all_utilized_sentence_keys) / total_sentences if total_sentences else 0
|
15 |
+
|
16 |
+
# Compute Completeness score
|
17 |
+
Ri = set(all_relevant_sentence_keys)
|
18 |
+
Ui = set(all_utilized_sentence_keys)
|
19 |
+
|
20 |
+
completeness_score = len(Ri & Ui) / len(Ri) if len(Ri) else 0
|
21 |
+
|
22 |
+
# Compute Adherence
|
23 |
+
adherence = all(info.get("fully_supported", False) for info in sentence_support_information)
|
24 |
+
#adherence = 1 if all(info.get("fully_supported", False) for info in sentence_support_information) else 0
|
25 |
+
|
26 |
+
return {
|
27 |
+
"Context Relevance": context_relevance,
|
28 |
+
"Context Utilization": context_utilization,
|
29 |
+
"Completeness Score": completeness_score,
|
30 |
+
"Adherence": adherence
|
31 |
+
}
|
32 |
+
|
33 |
+
def get_metrics(attributes, total_sentences):
|
34 |
+
if attributes.content:
|
35 |
+
try:
|
36 |
+
result_content = attributes.content # Access the content attribute
|
37 |
+
# Extract the JSON part from the result_content
|
38 |
+
json_start = result_content.find("{")
|
39 |
+
json_end = result_content.rfind("}") + 1
|
40 |
+
json_str = result_content[json_start:json_end]
|
41 |
+
result_json = json.loads(json_str)
|
42 |
+
# Compute metrics using the extracted attributes
|
43 |
+
metrics = compute_metrics(result_json, total_sentences)
|
44 |
+
logging.info(metrics)
|
45 |
+
|
46 |
+
return metrics
|
47 |
+
except json.JSONDecodeError as e:
|
48 |
+
logging.error(f"JSONDecodeError: {e}")
|
49 |
+
|
50 |
+
def get_attributes_text(attributes):
|
51 |
+
try:
|
52 |
+
result_content = attributes.content # Access the content attribute
|
53 |
+
# Extract the JSON part from the result_content
|
54 |
+
json_start = result_content.find("{")
|
55 |
+
json_end = result_content.rfind("}") + 1
|
56 |
+
json_str = result_content[json_start:json_end]
|
57 |
+
result_json = json.loads(json_str)
|
58 |
+
|
59 |
+
# Extract the required fields from json
|
60 |
+
relevance_explanation = result_json.get("relevance_explanation", "N/A")
|
61 |
+
all_relevant_sentence_keys = result_json.get("all_relevant_sentence_keys", [])
|
62 |
+
overall_supported_explanation = result_json.get("overall_supported_explanation", "N/A")
|
63 |
+
overall_supported = result_json.get("overall_supported", "N/A")
|
64 |
+
sentence_support_information = result_json.get("sentence_support_information", [])
|
65 |
+
all_utilized_sentence_keys = result_json.get("all_utilized_sentence_keys", [])
|
66 |
+
|
67 |
+
# Format the metrics for display
|
68 |
+
attributes_text = "Attributes:\n"
|
69 |
+
attributes_text = f"### Relevance Explanation:\n{relevance_explanation}\n\n"
|
70 |
+
attributes_text += f"### All Relevant Sentence Keys:\n{', '.join(all_relevant_sentence_keys)}\n\n"
|
71 |
+
attributes_text += f"### Overall Supported Explanation:\n{overall_supported_explanation}\n\n"
|
72 |
+
attributes_text += f"### Overall Supported:\n{overall_supported}\n\n"
|
73 |
+
attributes_text += "### Sentence Support Information:\n"
|
74 |
+
for info in sentence_support_information:
|
75 |
+
attributes_text += f"- Response Sentence Key: {info.get('response_sentence_key', 'N/A')}\n"
|
76 |
+
attributes_text += f" Explanation: {info.get('explanation', 'N/A')}\n"
|
77 |
+
attributes_text += f" Supporting Sentence Keys: {', '.join(info.get('supporting_sentence_keys', []))}\n"
|
78 |
+
attributes_text += f" Fully Supported: {info.get('fully_supported', 'N/A')}\n"
|
79 |
+
attributes_text += f"\n### All Utilized Sentence Keys:\n{', '.join(all_utilized_sentence_keys)}"
|
80 |
+
|
81 |
+
return attributes_text
|
82 |
+
except Exception as e:
|
83 |
+
logging.error(f"Error extracting attributes: {e}")
|
84 |
return f"An error occurred while extracting attributes: {e}"
|
generator/compute_rmse_auc_roc_metrics.py
CHANGED
@@ -1,74 +1,74 @@
|
|
1 |
-
|
2 |
-
from sklearn.metrics import roc_auc_score, root_mean_squared_error
|
3 |
-
from generator.generate_metrics import generate_metrics, retrieve_and_generate_response
|
4 |
-
import logging
|
5 |
-
|
6 |
-
def compute_rmse_auc_roc_metrics(gen_llm, val_llm, dataset, vector_store, num_question):
|
7 |
-
|
8 |
-
# Lists to accumulate ground truths and predictions for AUC-ROC computation
|
9 |
-
all_ground_truth_relevance = []
|
10 |
-
all_predicted_relevance = []
|
11 |
-
|
12 |
-
all_ground_truth_utilization = []
|
13 |
-
all_predicted_utilization = []
|
14 |
-
|
15 |
-
all_ground_truth_adherence = []
|
16 |
-
all_predicted_adherence = []
|
17 |
-
|
18 |
-
# For each question in dataset get the metrics
|
19 |
-
for i, document in enumerate(dataset):
|
20 |
-
# Extract ground truth metrics from dataset
|
21 |
-
ground_truth_relevance = dataset[i]['relevance_score']
|
22 |
-
ground_truth_utilization = dataset[i]['utilization_score']
|
23 |
-
ground_truth_adherence = 1 if dataset[i]['adherence_score'] else 0
|
24 |
-
|
25 |
-
query = document['question']
|
26 |
-
logging.info(f
|
27 |
-
# Call the generate_metrics for each query
|
28 |
-
response, source_docs = retrieve_and_generate_response(gen_llm, vector_store, query)
|
29 |
-
attributes, metrics = generate_metrics(val_llm, response, source_docs, query, 25)
|
30 |
-
|
31 |
-
# Extract predicted metrics (ensure these are continuous if possible)
|
32 |
-
predicted_relevance = metrics.get('Context Relevance', 0) if metrics else 0
|
33 |
-
predicted_utilization = metrics.get('Context Utilization', 0) if metrics else 0
|
34 |
-
predicted_adherence = 1 if metrics.get('Adherence', False) else 0
|
35 |
-
|
36 |
-
# === Handle Continuous Inputs for RMSE ===
|
37 |
-
all_ground_truth_relevance.append(ground_truth_relevance)
|
38 |
-
all_predicted_relevance.append(predicted_relevance)
|
39 |
-
all_ground_truth_utilization.append(ground_truth_utilization)
|
40 |
-
all_predicted_utilization.append(predicted_utilization)
|
41 |
-
|
42 |
-
all_ground_truth_adherence.append(ground_truth_adherence)
|
43 |
-
all_predicted_adherence.append(predicted_adherence)
|
44 |
-
|
45 |
-
if i == num_question:
|
46 |
-
break
|
47 |
-
|
48 |
-
# === Compute RMSE & AUC-ROC for the Entire Dataset ===
|
49 |
-
try:
|
50 |
-
logging.info(f"All Ground Truth Relevance: {all_ground_truth_relevance}")
|
51 |
-
logging.info(f"All Predicted Relevance: {all_predicted_relevance}")
|
52 |
-
relevance_rmse = root_mean_squared_error(all_ground_truth_relevance, all_predicted_relevance)
|
53 |
-
except ValueError:
|
54 |
-
relevance_rmse = None
|
55 |
-
|
56 |
-
try:
|
57 |
-
logging.info(f"All Ground Truth Utilization: {all_ground_truth_utilization}")
|
58 |
-
logging.info(f"All Predicted Utilization: {all_predicted_utilization}")
|
59 |
-
utilization_rmse = root_mean_squared_error(all_ground_truth_utilization, all_predicted_utilization)
|
60 |
-
except ValueError:
|
61 |
-
utilization_rmse = None
|
62 |
-
|
63 |
-
try:
|
64 |
-
logging.info(f"All Ground Truth Adherence: {all_ground_truth_adherence}")
|
65 |
-
logging.info(f"All Predicted Adherence: {all_predicted_adherence}")
|
66 |
-
adherence_auc = roc_auc_score(all_ground_truth_adherence, all_predicted_adherence)
|
67 |
-
except ValueError:
|
68 |
-
adherence_auc = None
|
69 |
-
|
70 |
-
logging.info(f"Relevance RMSE score: {relevance_rmse}")
|
71 |
-
logging.info(f"Utilization RMSE score: {utilization_rmse}")
|
72 |
-
logging.info(f"Overall Adherence AUC-ROC: {adherence_auc}")
|
73 |
-
|
74 |
-
return relevance_rmse, utilization_rmse, adherence_auc
|
|
|
1 |
+
|
2 |
+
from sklearn.metrics import roc_auc_score, root_mean_squared_error
|
3 |
+
from generator.generate_metrics import generate_metrics, retrieve_and_generate_response
|
4 |
+
import logging
|
5 |
+
|
6 |
+
def compute_rmse_auc_roc_metrics(gen_llm, val_llm, dataset, vector_store, num_question):
|
7 |
+
|
8 |
+
# Lists to accumulate ground truths and predictions for AUC-ROC computation
|
9 |
+
all_ground_truth_relevance = []
|
10 |
+
all_predicted_relevance = []
|
11 |
+
|
12 |
+
all_ground_truth_utilization = []
|
13 |
+
all_predicted_utilization = []
|
14 |
+
|
15 |
+
all_ground_truth_adherence = []
|
16 |
+
all_predicted_adherence = []
|
17 |
+
|
18 |
+
# For each question in dataset get the metrics
|
19 |
+
for i, document in enumerate(dataset):
|
20 |
+
# Extract ground truth metrics from dataset
|
21 |
+
ground_truth_relevance = dataset[i]['relevance_score']
|
22 |
+
ground_truth_utilization = dataset[i]['utilization_score']
|
23 |
+
ground_truth_adherence = 1 if dataset[i]['adherence_score'] else 0
|
24 |
+
|
25 |
+
query = document['question']
|
26 |
+
logging.info(f"Query number: {i + 1}")
|
27 |
+
# Call the generate_metrics for each query
|
28 |
+
response, source_docs = retrieve_and_generate_response(gen_llm, vector_store, query)
|
29 |
+
attributes, metrics = generate_metrics(val_llm, response, source_docs, query, 25)
|
30 |
+
|
31 |
+
# Extract predicted metrics (ensure these are continuous if possible)
|
32 |
+
predicted_relevance = metrics.get('Context Relevance', 0) if metrics else 0
|
33 |
+
predicted_utilization = metrics.get('Context Utilization', 0) if metrics else 0
|
34 |
+
predicted_adherence = 1 if metrics.get('Adherence', False) else 0
|
35 |
+
|
36 |
+
# === Handle Continuous Inputs for RMSE ===
|
37 |
+
all_ground_truth_relevance.append(ground_truth_relevance)
|
38 |
+
all_predicted_relevance.append(predicted_relevance)
|
39 |
+
all_ground_truth_utilization.append(ground_truth_utilization)
|
40 |
+
all_predicted_utilization.append(predicted_utilization)
|
41 |
+
|
42 |
+
all_ground_truth_adherence.append(ground_truth_adherence)
|
43 |
+
all_predicted_adherence.append(predicted_adherence)
|
44 |
+
|
45 |
+
if i == num_question:
|
46 |
+
break
|
47 |
+
|
48 |
+
# === Compute RMSE & AUC-ROC for the Entire Dataset ===
|
49 |
+
try:
|
50 |
+
logging.info(f"All Ground Truth Relevance: {all_ground_truth_relevance}")
|
51 |
+
logging.info(f"All Predicted Relevance: {all_predicted_relevance}")
|
52 |
+
relevance_rmse = root_mean_squared_error(all_ground_truth_relevance, all_predicted_relevance)
|
53 |
+
except ValueError:
|
54 |
+
relevance_rmse = None
|
55 |
+
|
56 |
+
try:
|
57 |
+
logging.info(f"All Ground Truth Utilization: {all_ground_truth_utilization}")
|
58 |
+
logging.info(f"All Predicted Utilization: {all_predicted_utilization}")
|
59 |
+
utilization_rmse = root_mean_squared_error(all_ground_truth_utilization, all_predicted_utilization)
|
60 |
+
except ValueError:
|
61 |
+
utilization_rmse = None
|
62 |
+
|
63 |
+
try:
|
64 |
+
logging.info(f"All Ground Truth Adherence: {all_ground_truth_adherence}")
|
65 |
+
logging.info(f"All Predicted Adherence: {all_predicted_adherence}")
|
66 |
+
adherence_auc = roc_auc_score(all_ground_truth_adherence, all_predicted_adherence)
|
67 |
+
except ValueError:
|
68 |
+
adherence_auc = None
|
69 |
+
|
70 |
+
logging.info(f"Relevance RMSE score: {relevance_rmse}")
|
71 |
+
logging.info(f"Utilization RMSE score: {utilization_rmse}")
|
72 |
+
logging.info(f"Overall Adherence AUC-ROC: {adherence_auc}")
|
73 |
+
|
74 |
+
return relevance_rmse, utilization_rmse, adherence_auc
|
generator/create_prompt.py
CHANGED
@@ -1,104 +1,104 @@
|
|
1 |
-
from langchain.docstore.document import Document
|
2 |
-
|
3 |
-
def create_prompt(documents, question, response):
|
4 |
-
prompt = f""" I asked someone to answer a question based on one or more documents. Your task is to review their response and assess whether or not each sentence in that response is supported by text in the documents. If supported, identify which sentences in the documents provide that support. Additionally, identify which documents contain useful information for answering the question, and which documents the answer was sourced from.
|
5 |
-
|
6 |
-
Here are the documents, each of which is split into sentences. Alongside each sentence is associated key, such as '0a.' or '0b.' that you can use to refer to it:
|
7 |
-
{documents}
|
8 |
-
|
9 |
-
The question was:
|
10 |
-
{question}
|
11 |
-
|
12 |
-
Here is their response, split into sentences with associated keys:
|
13 |
-
{response}
|
14 |
-
|
15 |
-
Provide a JSON response with an answer , relevance_explanation, all_relevant_sentence_keys, overall_supported_explanation, overall_supported, sentence_support_information(this can be a json inside a json and with these fields response_sentence_key, explanation, supporting_sentence_keys, fully_supported) and all_utilized_sentence_keys below are the definitions
|
16 |
-
|
17 |
-
You must respond with a JSON object matching this schema:
|
18 |
-
|
19 |
-
{{
|
20 |
-
"answer": "string",
|
21 |
-
"relevance_explanation": "string",
|
22 |
-
"all_relevant_sentence_keys": ["string"],
|
23 |
-
"overall_supported_explanation": "string",
|
24 |
-
"overall_supported": "boolean",
|
25 |
-
"sentence_support_information": [
|
26 |
-
{{
|
27 |
-
"response_sentence_key": "string",
|
28 |
-
"explanation": "string",
|
29 |
-
"supporting_sentence_keys": ["string"],
|
30 |
-
"fully_supported": "boolean"
|
31 |
-
}},
|
32 |
-
],
|
33 |
-
"all_utilized_sentence_keys": ["string"]
|
34 |
-
}}
|
35 |
-
|
36 |
-
The relevance_explanation field is a string explaining which documents
|
37 |
-
contain useful information for answering the question. Provide a step-by-step
|
38 |
-
breakdown of information provided in the documents and how it is useful for
|
39 |
-
answering the question.
|
40 |
-
|
41 |
-
The all_relevant_sentence_keys field is a list of all document sentences keys
|
42 |
-
(e.g. '0a') that are relevant to the question. Include every sentence that is
|
43 |
-
useful and relevant to the question, even if it was not used in the response,
|
44 |
-
or if only parts of the sentence are useful. Ignore the provided response when
|
45 |
-
making this judgement and base your judgement solely on the provided documents
|
46 |
-
and question. Omit sentences that, if removed from the document, would not
|
47 |
-
impact someone's ability to answer the question.
|
48 |
-
|
49 |
-
The overall_supported_explanation field is a string explaining why the response
|
50 |
-
*as a whole* is or is not supported by the documents. In this field, provide a
|
51 |
-
step-by-step breakdown of the claims made in the response and the support (or
|
52 |
-
lack thereof) for those claims in the documents. Begin by assessing each claim
|
53 |
-
separately, one by one; don’t make any remarks about the response as a whole
|
54 |
-
until you have assessed all the claims in isolation.
|
55 |
-
|
56 |
-
The overall_supported field is a boolean indicating whether the response as a
|
57 |
-
whole is supported by the documents. This value should reflect the conclusion
|
58 |
-
you drew at the end of your step-by-step breakdown in overall_supported_explanation.
|
59 |
-
In the sentence_support_information field, provide information about the support
|
60 |
-
*for each sentence* in the response.
|
61 |
-
The sentence_support_information field is a list of objects, one for each sentence
|
62 |
-
in the response. Each object MUST have the following fields:
|
63 |
-
- response_sentence_key: a string identifying the sentence in the response.
|
64 |
-
This key is the same as the one used in the response above.
|
65 |
-
16
|
66 |
-
- explanation: a string explaining why the sentence is or is not supported by the
|
67 |
-
documents.
|
68 |
-
- supporting_sentence_keys: keys (e.g. '0a') of sentences from the documents that
|
69 |
-
support the response sentence. If the sentence is not supported, this list MUST
|
70 |
-
be empty. If the sentence is supported, this list MUST contain one or more keys.
|
71 |
-
In special cases where the sentence is supported, but not by any specific sentence,
|
72 |
-
you can use the string "supported_without_sentence" to indicate that the sentence
|
73 |
-
is generally supported by the documents. Consider cases where the sentence is
|
74 |
-
expressing inability to answer the question due to lack of relevant information in
|
75 |
-
the provided contex as "supported_without_sentence". In cases where the sentence
|
76 |
-
is making a general statement (e.g. outlining the steps to produce an answer, or
|
77 |
-
summarizing previously stated sentences, or a transition sentence), use the
|
78 |
-
sting "general".In cases where the sentence is correctly stating a well-known fact,
|
79 |
-
like a mathematical formula, use the string "well_known_fact". In cases where the
|
80 |
-
sentence is performing numerical reasoning (e.g. addition, multiplication), use
|
81 |
-
the string "numerical_reasoning".
|
82 |
-
- fully_supported: a boolean indicating whether the sentence is fully supported by
|
83 |
-
the documents.
|
84 |
-
- This value should reflect the conclusion you drew at the end of your step-by-step
|
85 |
-
breakdown in explanation.
|
86 |
-
- If supporting_sentence_keys is an empty list, then fully_supported must be false.
|
87 |
-
- Otherwise, use fully_supported to clarify whether everything in the response
|
88 |
-
sentence is fully supported by the document text indicated in supporting_sentence_keys
|
89 |
-
(fully_supported = true), or whether the sentence is only partially or incompletely
|
90 |
-
supported by that document text (fully_supported = false).
|
91 |
-
The all_utilized_sentence_keys field is a list of all sentences keys (e.g. ’0a’) that
|
92 |
-
were used to construct the answer. Include every sentence that either directly supported
|
93 |
-
the answer, or was implicitly used to construct the answer, even if it was not used
|
94 |
-
in its entirety. Omit sentences that were not used, and could have been removed from
|
95 |
-
the documents without affecting the answer.
|
96 |
-
You must respond with a valid JSON string. Use escapes for quotes, e.g. ‘\\"‘, and
|
97 |
-
newlines, e.g. ‘\\n‘. Do not write anything before or after the JSON string. Do not
|
98 |
-
wrap the JSON string in backticks like ‘‘‘ or ‘‘‘json.
|
99 |
-
As a reminder: your task is to review the response and assess which documents contain
|
100 |
-
useful information pertaining to the question, and how each sentence in the response
|
101 |
-
is supported by the text in the documents.
|
102 |
-
|
103 |
-
"""
|
104 |
return prompt
|
|
|
1 |
+
from langchain.docstore.document import Document
|
2 |
+
|
3 |
+
def create_prompt(documents, question, response):
|
4 |
+
prompt = f""" I asked someone to answer a question based on one or more documents. Your task is to review their response and assess whether or not each sentence in that response is supported by text in the documents. If supported, identify which sentences in the documents provide that support. Additionally, identify which documents contain useful information for answering the question, and which documents the answer was sourced from.
|
5 |
+
|
6 |
+
Here are the documents, each of which is split into sentences. Alongside each sentence is associated key, such as '0a.' or '0b.' that you can use to refer to it:
|
7 |
+
{documents}
|
8 |
+
|
9 |
+
The question was:
|
10 |
+
{question}
|
11 |
+
|
12 |
+
Here is their response, split into sentences with associated keys:
|
13 |
+
{response}
|
14 |
+
|
15 |
+
Provide a JSON response with an answer , relevance_explanation, all_relevant_sentence_keys, overall_supported_explanation, overall_supported, sentence_support_information(this can be a json inside a json and with these fields response_sentence_key, explanation, supporting_sentence_keys, fully_supported) and all_utilized_sentence_keys below are the definitions
|
16 |
+
|
17 |
+
You must respond with a JSON object matching this schema:
|
18 |
+
|
19 |
+
{{
|
20 |
+
"answer": "string",
|
21 |
+
"relevance_explanation": "string",
|
22 |
+
"all_relevant_sentence_keys": ["string"],
|
23 |
+
"overall_supported_explanation": "string",
|
24 |
+
"overall_supported": "boolean",
|
25 |
+
"sentence_support_information": [
|
26 |
+
{{
|
27 |
+
"response_sentence_key": "string",
|
28 |
+
"explanation": "string",
|
29 |
+
"supporting_sentence_keys": ["string"],
|
30 |
+
"fully_supported": "boolean"
|
31 |
+
}},
|
32 |
+
],
|
33 |
+
"all_utilized_sentence_keys": ["string"]
|
34 |
+
}}
|
35 |
+
|
36 |
+
The relevance_explanation field is a string explaining which documents
|
37 |
+
contain useful information for answering the question. Provide a step-by-step
|
38 |
+
breakdown of information provided in the documents and how it is useful for
|
39 |
+
answering the question.
|
40 |
+
|
41 |
+
The all_relevant_sentence_keys field is a list of all document sentences keys
|
42 |
+
(e.g. '0a') that are relevant to the question. Include every sentence that is
|
43 |
+
useful and relevant to the question, even if it was not used in the response,
|
44 |
+
or if only parts of the sentence are useful. Ignore the provided response when
|
45 |
+
making this judgement and base your judgement solely on the provided documents
|
46 |
+
and question. Omit sentences that, if removed from the document, would not
|
47 |
+
impact someone's ability to answer the question.
|
48 |
+
|
49 |
+
The overall_supported_explanation field is a string explaining why the response
|
50 |
+
*as a whole* is or is not supported by the documents. In this field, provide a
|
51 |
+
step-by-step breakdown of the claims made in the response and the support (or
|
52 |
+
lack thereof) for those claims in the documents. Begin by assessing each claim
|
53 |
+
separately, one by one; don’t make any remarks about the response as a whole
|
54 |
+
until you have assessed all the claims in isolation.
|
55 |
+
|
56 |
+
The overall_supported field is a boolean indicating whether the response as a
|
57 |
+
whole is supported by the documents. This value should reflect the conclusion
|
58 |
+
you drew at the end of your step-by-step breakdown in overall_supported_explanation.
|
59 |
+
In the sentence_support_information field, provide information about the support
|
60 |
+
*for each sentence* in the response.
|
61 |
+
The sentence_support_information field is a list of objects, one for each sentence
|
62 |
+
in the response. Each object MUST have the following fields:
|
63 |
+
- response_sentence_key: a string identifying the sentence in the response.
|
64 |
+
This key is the same as the one used in the response above.
|
65 |
+
16
|
66 |
+
- explanation: a string explaining why the sentence is or is not supported by the
|
67 |
+
documents.
|
68 |
+
- supporting_sentence_keys: keys (e.g. '0a') of sentences from the documents that
|
69 |
+
support the response sentence. If the sentence is not supported, this list MUST
|
70 |
+
be empty. If the sentence is supported, this list MUST contain one or more keys.
|
71 |
+
In special cases where the sentence is supported, but not by any specific sentence,
|
72 |
+
you can use the string "supported_without_sentence" to indicate that the sentence
|
73 |
+
is generally supported by the documents. Consider cases where the sentence is
|
74 |
+
expressing inability to answer the question due to lack of relevant information in
|
75 |
+
the provided contex as "supported_without_sentence". In cases where the sentence
|
76 |
+
is making a general statement (e.g. outlining the steps to produce an answer, or
|
77 |
+
summarizing previously stated sentences, or a transition sentence), use the
|
78 |
+
sting "general".In cases where the sentence is correctly stating a well-known fact,
|
79 |
+
like a mathematical formula, use the string "well_known_fact". In cases where the
|
80 |
+
sentence is performing numerical reasoning (e.g. addition, multiplication), use
|
81 |
+
the string "numerical_reasoning".
|
82 |
+
- fully_supported: a boolean indicating whether the sentence is fully supported by
|
83 |
+
the documents.
|
84 |
+
- This value should reflect the conclusion you drew at the end of your step-by-step
|
85 |
+
breakdown in explanation.
|
86 |
+
- If supporting_sentence_keys is an empty list, then fully_supported must be false.
|
87 |
+
- Otherwise, use fully_supported to clarify whether everything in the response
|
88 |
+
sentence is fully supported by the document text indicated in supporting_sentence_keys
|
89 |
+
(fully_supported = true), or whether the sentence is only partially or incompletely
|
90 |
+
supported by that document text (fully_supported = false).
|
91 |
+
The all_utilized_sentence_keys field is a list of all sentences keys (e.g. ’0a’) that
|
92 |
+
were used to construct the answer. Include every sentence that either directly supported
|
93 |
+
the answer, or was implicitly used to construct the answer, even if it was not used
|
94 |
+
in its entirety. Omit sentences that were not used, and could have been removed from
|
95 |
+
the documents without affecting the answer.
|
96 |
+
You must respond with a valid JSON string. Use escapes for quotes, e.g. ‘\\"‘, and
|
97 |
+
newlines, e.g. ‘\\n‘. Do not write anything before or after the JSON string. Do not
|
98 |
+
wrap the JSON string in backticks like ‘‘‘ or ‘‘‘json.
|
99 |
+
As a reminder: your task is to review the response and assess which documents contain
|
100 |
+
useful information pertaining to the question, and how each sentence in the response
|
101 |
+
is supported by the text in the documents.
|
102 |
+
|
103 |
+
"""
|
104 |
return prompt
|
generator/extract_attributes.py
CHANGED
@@ -1,26 +1,26 @@
|
|
1 |
-
from generator.create_prompt import create_prompt
|
2 |
-
from generator.document_utils import apply_sentence_keys_documents, apply_sentence_keys_response
|
3 |
-
|
4 |
-
# Function to extract attributes
|
5 |
-
def extract_attributes(val_llm, question, relevant_docs, response):
|
6 |
-
# Format documents into a string by accessing the `page_content` attribute of each Document
|
7 |
-
#formatted_documents = "\n".join([f"Doc {i+1}: {doc.page_content}" for i, doc in enumerate(relevant_docs)])
|
8 |
-
formatted_documents = apply_sentence_keys_documents(relevant_docs)
|
9 |
-
formatted_responses = apply_sentence_keys_response(response)
|
10 |
-
|
11 |
-
#print(f"Formatted documents : {formatted_documents}")
|
12 |
-
# Print the number of sentences in each document
|
13 |
-
'''for i, doc in enumerate(formatted_documents):
|
14 |
-
num_sentences = len(doc)
|
15 |
-
print(f"Document {i} has {num_sentences} sentences.")'''
|
16 |
-
|
17 |
-
# Calculate the total number of sentences from formatted_documents
|
18 |
-
total_sentences = sum(len(doc) for doc in formatted_documents)
|
19 |
-
#print(f"Total number of sentences {total_sentences}")
|
20 |
-
|
21 |
-
attribute_prompt = create_prompt(formatted_documents, question, formatted_responses)
|
22 |
-
|
23 |
-
# Instead of using BaseMessage, pass the formatted prompt directly to invoke
|
24 |
-
result = val_llm.invoke(attribute_prompt)
|
25 |
-
|
26 |
return result, total_sentences
|
|
|
1 |
+
from generator.create_prompt import create_prompt
|
2 |
+
from generator.document_utils import apply_sentence_keys_documents, apply_sentence_keys_response
|
3 |
+
|
4 |
+
# Function to extract attributes
|
5 |
+
def extract_attributes(val_llm, question, relevant_docs, response):
|
6 |
+
# Format documents into a string by accessing the `page_content` attribute of each Document
|
7 |
+
#formatted_documents = "\n".join([f"Doc {i+1}: {doc.page_content}" for i, doc in enumerate(relevant_docs)])
|
8 |
+
formatted_documents = apply_sentence_keys_documents(relevant_docs)
|
9 |
+
formatted_responses = apply_sentence_keys_response(response)
|
10 |
+
|
11 |
+
#print(f"Formatted documents : {formatted_documents}")
|
12 |
+
# Print the number of sentences in each document
|
13 |
+
'''for i, doc in enumerate(formatted_documents):
|
14 |
+
num_sentences = len(doc)
|
15 |
+
print(f"Document {i} has {num_sentences} sentences.")'''
|
16 |
+
|
17 |
+
# Calculate the total number of sentences from formatted_documents
|
18 |
+
total_sentences = sum(len(doc) for doc in formatted_documents)
|
19 |
+
#print(f"Total number of sentences {total_sentences}")
|
20 |
+
|
21 |
+
attribute_prompt = create_prompt(formatted_documents, question, formatted_responses)
|
22 |
+
|
23 |
+
# Instead of using BaseMessage, pass the formatted prompt directly to invoke
|
24 |
+
result = val_llm.invoke(attribute_prompt)
|
25 |
+
|
26 |
return result, total_sentences
|
generator/generate_metrics.py
CHANGED
@@ -6,7 +6,7 @@ from generator.compute_metrics import get_metrics
|
|
6 |
from generator.extract_attributes import extract_attributes
|
7 |
|
8 |
def retrieve_and_generate_response(gen_llm, vector_store, query):
|
9 |
-
logging.info(f
|
10 |
|
11 |
# Step 1: Retrieve relevant documents for given query
|
12 |
relevant_docs = retrieve_top_k_documents(vector_store, query, top_k=5)
|
|
|
6 |
from generator.extract_attributes import extract_attributes
|
7 |
|
8 |
def retrieve_and_generate_response(gen_llm, vector_store, query):
|
9 |
+
logging.info(f"Query: {query}")
|
10 |
|
11 |
# Step 1: Retrieve relevant documents for given query
|
12 |
relevant_docs = retrieve_top_k_documents(vector_store, query, top_k=5)
|
generator/generate_response.py
CHANGED
@@ -1,18 +1,18 @@
|
|
1 |
-
from langchain.chains import RetrievalQA
|
2 |
-
|
3 |
-
def generate_response(llm, vector_store, question, relevant_docs):
|
4 |
-
# Create a retrieval-based question-answering chain using the relevant documents
|
5 |
-
qa_chain = RetrievalQA.from_chain_type(
|
6 |
-
llm=llm,
|
7 |
-
retriever=vector_store.as_retriever(),
|
8 |
-
return_source_documents=True
|
9 |
-
)
|
10 |
-
try:
|
11 |
-
result = qa_chain.invoke(question, documents=relevant_docs)
|
12 |
-
response = result['result']
|
13 |
-
source_docs = result['source_documents']
|
14 |
-
return response, source_docs
|
15 |
-
except Exception as e:
|
16 |
-
print(f"Error during QA chain invocation: {e}")
|
17 |
-
raise e
|
18 |
|
|
|
1 |
+
from langchain.chains import RetrievalQA
|
2 |
+
|
3 |
+
def generate_response(llm, vector_store, question, relevant_docs):
|
4 |
+
# Create a retrieval-based question-answering chain using the relevant documents
|
5 |
+
qa_chain = RetrievalQA.from_chain_type(
|
6 |
+
llm=llm,
|
7 |
+
retriever=vector_store.as_retriever(),
|
8 |
+
return_source_documents=True
|
9 |
+
)
|
10 |
+
try:
|
11 |
+
result = qa_chain.invoke(question, documents=relevant_docs)
|
12 |
+
response = result['result']
|
13 |
+
source_docs = result['source_documents']
|
14 |
+
return response, source_docs
|
15 |
+
except Exception as e:
|
16 |
+
print(f"Error during QA chain invocation: {e}")
|
17 |
+
raise e
|
18 |
|