Ikala-allen commited on
Commit
5d9145f
·
1 Parent(s): 65757ca

Update relation_extraction.py

Browse files
Files changed (1) hide show
  1. relation_extraction.py +18 -4
relation_extraction.py CHANGED
@@ -123,10 +123,24 @@ class relation_extraction(evaluate.Metric):
123
  # TODO: Download external resources if needed
124
  pass
125
 
126
- def _compute(self, predictions, references, mode, show=False, relation_types=[]):
127
- """Returns the scores"""
128
- # TODO: Compute the different scores of the module
129
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130
  predictions = convert_format(predictions)
131
  references = convert_format(references)
132
 
@@ -207,7 +221,7 @@ class relation_extraction(evaluate.Metric):
207
  scores["ALL"]["Macro_p"] = np.mean([scores[ent_type]["p"] for ent_type in relation_types])
208
  scores["ALL"]["Macro_r"] = np.mean([scores[ent_type]["r"] for ent_type in relation_types])
209
 
210
- if show:
211
  return scores
212
 
213
  return scores["ALL"]
 
123
  # TODO: Download external resources if needed
124
  pass
125
 
126
+ def _compute(self, predictions, references, mode, detailed_scores=False, relation_types=[]):
127
+ """
128
+ This method computes and returns various scoring metrics for the prediction model based on the mode specified, including Precision, Recall, F1-Score and others. It evaluates the model's predictions against the provided reference data.
129
 
130
+ Parameters:
131
+ predictions: A list of predicted relations from the model.
132
+ references: A list of ground-truth or reference relations to compare the predictions against.
133
+ mode: Evaluation mode - 'strict' or 'boundaries'. 'strict' mode takes into account both entities type and their relationships
134
+ while 'boundaries' mode only considers the entity spans of the relationships.
135
+ detailed_scores: Boolean value, if True it returns scores for each relation type specifically,
136
+ if False it returns the overall scores.
137
+ relation_types: A list of relation types to consider while evaluating. If not provided, relation types will be constructed
138
+ from the ground truth or reference data.
139
+
140
+ Returns:
141
+ A dictionary mapping each entity type to its respective scoring metrics such as Precision, Recall, F1 Score.
142
+ """
143
+
144
  predictions = convert_format(predictions)
145
  references = convert_format(references)
146
 
 
221
  scores["ALL"]["Macro_p"] = np.mean([scores[ent_type]["p"] for ent_type in relation_types])
222
  scores["ALL"]["Macro_r"] = np.mean([scores[ent_type]["r"] for ent_type in relation_types])
223
 
224
+ if detailed_scores:
225
  return scores
226
 
227
  return scores["ALL"]