File size: 10,510 Bytes
eca534f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 |
import os
import re
import torch
import string
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from transformers import AutoTokenizer, AutoModel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics import classification_report, confusion_matrix
os.environ["TOKENIZERS_PARALLELISM"] = "false"
class SelfAwareEvaluator():
def __init__(self):
pass
def remove_punctuation(self, input_string):
"""
Remove the punctuation from the input string.
"""
input_string = input_string.strip().lower()
if input_string and input_string[-1] in string.punctuation:
return input_string[:-1]
return input_string
def cut_sentences(self, content):
"""
Cut the content into sentences.
"""
sentences = re.split(r"(\.|\!|\?|。|!|?|\.{6})", content)
return sentences
def cut_sub_string(self, input_string, window_size=5, punctuation=".,?!"):
"""
Cut the input string into sub-strings of a fixed window size.
"""
input_string = input_string.strip().lower()
# If the input string is empty, return an empty list
if len(input_string) < 2:
return [""]
# Remove the punctuation
if input_string[-1] in punctuation:
input_string = input_string[:-1]
# Split the string into words
string_list = input_string.split()
# If the length of the string is less than the window size, return the string
length = len(string_list)
if length <= window_size:
return [input_string]
else:
res = []
for i in range(length - window_size + 1):
sub_string = " ".join(string_list[i: i + window_size])
if sub_string != "" or sub_string != " ":
res.append(sub_string)
return res
def group_cosine_similarity(self, model, tokenizer, sentences1, sentences2):
"""
Calculate the cosine similarity between two groups of sentences.
"""
# Move the model to the device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
# Encode the sentences
encoded1 = tokenizer(sentences1, padding=True, truncation=True, return_tensors="pt").to(device)
encoded2 = tokenizer(sentences2, padding=True, truncation=True, return_tensors="pt").to(device)
# Get the embeddings
embeddings1 = model(**encoded1).pooler_output
embeddings2 = model(**encoded2).pooler_output
# Calculate the cosine similarity
similarities = cosine_similarity(embeddings1.detach().cpu().numpy(), embeddings2.detach().cpu().numpy())
return similarities
def get_unanswerable(self, response, model, tokenizer, threshold=0.75):
"""
Predict whether the response is unanswerable or not.
"""
# List of uncertain words
uncertain_list = [
"The answer is unknown.",
"The answer is uncertain.",
"The answer is unclear.",
"There is no scientific evidence.",
"There is no definitive answer.",
"There is no right answer.",
"There is much debate.",
"There is no known case.",
"There is no concrete answer to this question.",
"There is no public information available.",
"It is impossible to know.",
"It is impossible to answer.",
"It is difficult to predict.",
"It is not known.",
"We do not know.",
"I'm not sure.",
]
uncertain_list = [self.remove_punctuation(_) for _ in uncertain_list]
# Check if the response contains uncertain words
pred_unanswerable = False
response = response.lower()
for uncertain in uncertain_list:
if uncertain in response:
pred_unanswerable = True
# If the response does not contain uncertain words, check the cosine similarity
if pred_unanswerable == False:
# Cut the response into sentences
sub_sen_list = self.cut_sentences(response)
# Cut the sentences into sub-strings
sub_str_list = []
for sub_sen in sub_sen_list:
if len(sub_sen) >= 2:
sub_str_list.extend(self.cut_sub_string(sub_sen))
# Calculate the cosine similarity
if len(sub_str_list) != 0:
similarities = self.group_cosine_similarity(model, tokenizer, sub_str_list, uncertain_list)
else:
similarities = [0]
# Check if the maximum similarity is greater than the threshold
max_uncertainty = np.max(similarities)
# If the maximum similarity is greater than the threshold, predict unanswerable
if max_uncertainty > threshold:
pred_unanswerable = True
return pred_unanswerable
def selfaware_barplot(self, result: dict, fig_path: str = "", save: bool = False):
"""
Create a bar plot of the performance on the SelfAware dataset.
Parameters
----------
result : dict
The evaluation results for the LLM responses on the SelfAware dataset.
fig_path : str
The path to save the figure.
save : bool, optional
Whether to save the figure, by default True.
"""
# Data
unanswerable_as_pos = result["unanswerable_as_pos"]
answerable_as_pos = result["answerable_as_pos"]
# Remove support
unanswerable_as_pos.pop("support", None)
answerable_as_pos.pop("support", None)
# Extract the accuracy values for each topic
metrics = list(unanswerable_as_pos.keys())
unanswerable_values = [round(v, 2) for k, v in unanswerable_as_pos.items()]
answerable_values = [round(v, 2) for k, v in answerable_as_pos.items()]
# Create a new figure
fig, ax = plt.subplots()
# Number of groups
n_groups = len(metrics)
index = np.arange(n_groups)
bar_width = 0.35
# Select two colors from the "rocket" palette
colors = sns.color_palette("rocket", n_colors=10)
color_unanswerable = colors[1]
color_answerable = colors[7]
# Plotting both sets of data
bars1 = ax.bar(index, unanswerable_values, bar_width, label='Unanswerable as Positive', color=color_unanswerable)
bars2 = ax.bar(index + bar_width, answerable_values, bar_width, label='Answerable as Positive', color=color_answerable)
# Adding values on top of each bar
for bar in bars1:
ax.text(bar.get_x() + bar.get_width() / 2, bar.get_height(), f'{bar.get_height():.2f}',
ha='center', va='bottom', color='black', rotation='horizontal')
for bar in bars2:
ax.text(bar.get_x() + bar.get_width() / 2, bar.get_height(), f'{bar.get_height():.2f}',
ha='center', va='bottom', color='black', rotation='horizontal')
# Set x-axis labels
ax.set_xticks(index + bar_width / 2)
ax.set_xticklabels(metrics)
# Set y-axis limits to accommodate annotations
ax.set_ylim((0, max(unanswerable_values + answerable_values) + 0.1))
# Adding labels and title
ax.set_xlabel("Metrics")
ax.set_ylabel("Performance")
ax.set_title("Performance on SelfAware Dataset")
ax.legend()
if save:
# Save the figure
plt.tight_layout()
plt.savefig(os.path.join(fig_path, "selfaware_barplot.pdf"), format="pdf")
plt.savefig(os.path.join(fig_path, "selfaware_barplot.png"), format="png")
# Return the figure
return fig
def selfaware_cm(self, labels: list, preds: list, fig_path: str = "", save: bool = False):
"""
Create a confusion matrix for the SelfAware dataset.
Parameters
----------
labels : list
The true labels.
preds : list
The predicted labels.
fig_path : str
The path to save the figure.
save : bool, optional
Whether to save the figure, by default True.
"""
# Create a new figure
fig, ax = plt.subplots()
# Compute confusion matrix
cm = sns.heatmap(confusion_matrix(labels, preds), annot=True, fmt="d", cmap="Blues", ax=ax)
# Adding labels and title
plt.xticks(ticks=[0.5, 1.5], labels=["Answerable", "Unanswerable"])
plt.yticks(ticks=[0.5, 1.5], labels=["Answerable", "Unanswerable"])
plt.ylabel("True label")
plt.xlabel("Predicted label")
plt.title("Confusion Matrix on SelfAware dataset.")
if save:
# Save the figure
plt.tight_layout()
plt.savefig(os.path.join(fig_path, "selfaware_cm.pdf"), format="pdf")
plt.savefig(os.path.join(fig_path, "selfaware_cm.png"), format="png")
# Return the figure
return fig
def evaluate_selfaware(self, llm_responses):
# Load the model
model_name = "princeton-nlp/sup-simcse-roberta-large"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModel.from_pretrained(model_name).to(device)
# Evaluate the LLM responses
labels, preds = [], []
for item in llm_responses:
# gold label: whether the question is answerable or not.
labels.append(item["label_unanswerable"])
# identify whether the model response answered the question or not.
preds.append(self.get_unanswerable(item["response"], model, tokenizer))
# Change the key names
result = classification_report(labels, preds, output_dict=True, zero_division=0)
# Renaming keys based on the expected output dictionary structure
# Unanswerable as positive class and answerable as negative class
if "True" in result:
result['unanswerable_as_pos'] = result.pop("True")
if "False" in result:
result['answerable_as_pos'] = result.pop('False')
return result, labels, preds
|