tajik-text-segmentation / annotations_parser.py
sobir-hf's picture
Uploaded data files and parser script
453bed4
raw
history blame
4.37 kB
import os
import re
def parse_annotated_text(text):
# Regular expression pattern to parse YEDDA format
yedda_pattern = re.compile(r'(\[\@(.*?)\#(.*?)\*\])', re.DOTALL)
# This variable will keep track of the number of characters removed
chars_removed = 0
# This will store the positions of the entities in the original text
positions_in_original_text = []
# Buffer to store content without annotations
buffer = []
# Index to track last end position
last_end = 0
# Store labels
labels = []
# Loop through each match
for match in yedda_pattern.finditer(text):
# The entire match
full_match = match.group(0)
# Capture group 2 (entity)
entity = match.group(2)
# Capture group 2 (label)
label = match.group(3)
# Start position of the match in the modified string
start = match.start()
# End position of the match in the modified string
end = match.end()
labels.append(label)
# Append the text before the annotation to the buffer
buffer.append(text[last_end:start])
buffer.append(entity)
# Calculate the start and end positions in the original text
original_start = start - chars_removed
original_end = original_start + len(entity)
# Store the positions
positions_in_original_text.append((original_start, original_end))
# update the chars_removed counter
chars_removed += len(full_match) - len(entity)
# Update last_end
last_end = end
# Append remaining content after the last match
buffer.append(text[last_end:])
# Join buffer parts to get content without annotations
content_without_annotations = "".join(buffer)
return {
'text': content_without_annotations,
'positions': positions_in_original_text,
'labels': labels
}
def preprocess_text(text: str):
# remove extra spaces
text = text.strip()
text = re.sub(r'\n+', '\n', text)
text = re.sub(r' +', ' ', text)
return text
def load_yedda_annotations(directory):
# List to store all the annotations from all files
all_annotations = []
# Iterate through each file in the given directory
for filename in os.listdir(directory):
# Check if the file has the '.ann' extension
if filename.endswith(".ann"):
# Construct the full file path
file_path = os.path.join(directory, filename)
# Open and read the file
with open(file_path, 'r', encoding='utf-8') as file:
content = file.read()
# Preprocess text
content = preprocess_text(content)
parsed = parse_annotated_text(content)
file_annotations = {
'file': filename,
'annotated_text': content,
'text': parsed['text'],
'positions': parsed['positions'],
'labels': parsed['labels'],
}
all_annotations.append(file_annotations)
return all_annotations
def convert_to_ann(annotatations):
text = annotatations['text']
buffer = []
i = 0
for (j_start, j_end), label in zip(annotatations['positions'], annotatations['labels']):
buffer += text[i:j_start]
buffer += [f'[@{text[j_start:j_end]}#{label}*]']
i = j_end
buffer += [text[i:]]
return ''.join(buffer)
if __name__ == '__main__':
directory_path = 'annotations' # The directory containing .ann files
annotations = load_yedda_annotations(directory_path)
counter = 0
for file_annotation in annotations:
counter += len(file_annotation['labels'])
print('File:', file_annotation['file'])
print('Text[:100]:', repr(file_annotation['text'][:100]))
print('Number of labels:', len(file_annotation['labels']))
assert len(file_annotation['labels']) == len(file_annotation['positions'])
print('Average labeled sentence length:', sum(end-start for start,end in file_annotation['positions']) / len(file_annotation['positions']))
print('--------------------------------')
print('Total number of files:', len(annotations))
print('Total label count:', counter)