File size: 6,924 Bytes
2b62bc8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 |
# -*- coding: utf-8 -*-
import os
import glob
import codecs
import spacy
def replace_unicode_whitespaces_with_ascii_whitespace(string):
return ' '.join(string.split())
def get_start_and_end_offset_of_token_from_spacy(token):
start = token.idx
end = start + len(token)
return start, end
def get_sentences_and_tokens_from_spacy(text, spacy_nlp):
document = spacy_nlp(text)
# sentences
sentences = []
for span in document.sents:
sentence = [document[i] for i in range(span.start, span.end)]
sentence_tokens = []
for token in sentence:
token_dict = {}
token_dict['start'], token_dict['end'] = get_start_and_end_offset_of_token_from_spacy(token)
token_dict['text'] = text[token_dict['start']:token_dict['end']]
if token_dict['text'].strip() in ['\n', '\t', ' ', '']:
continue
# Make sure that the token text does not contain any space
if len(token_dict['text'].split(' ')) != 1:
print("WARNING: the text of the token contains space character, replaced with hyphen\n\t{0}\n\t{1}".format(token_dict['text'],
token_dict['text'].replace(' ', '-')))
token_dict['text'] = token_dict['text'].replace(' ', '-')
sentence_tokens.append(token_dict)
sentences.append(sentence_tokens)
return sentences
def get_entities_from_brat(text_filepath, annotation_filepath, verbose=False):
# load text
with codecs.open(text_filepath, 'r', 'UTF-8') as f:
text =f.read()
if verbose: print("\ntext:\n{0}\n".format(text))
'''
text2 = ''
for word in text:
text2 += elimina_tildes(word)
'''
text2 = text
# parse annotation file
entities = []
with codecs.open(annotation_filepath, 'r', 'UTF-8') as f:
for line in f.read().splitlines():
anno = line.split()
id_anno = anno[0]
# parse entity
if id_anno[0] == 'T':
entity = {}
entity['id'] = id_anno
entity['type'] = anno[1]
entity['start'] = int(anno[2])
entity['end'] = int(anno[3])
#entity['text'] = elimina_tildes(' '.join(anno[4:]))
entity['text'] = ' '.join(anno[4:])
if verbose:
print("entity: {0}".format(entity))
# Check compatibility between brat text and anootation
if replace_unicode_whitespaces_with_ascii_whitespace(text2[entity['start']:entity['end']]) != \
replace_unicode_whitespaces_with_ascii_whitespace(entity['text']):
print("Warning: brat text and annotation do not match.")
print("\ttext: {0}".format(text2[entity['start']:entity['end']]))
print("\tanno: {0}".format(entity['text']))
# add to entitys data
entities.append(entity)
if verbose: print("\n\n")
return text2, entities
def check_brat_annotation_and_text_compatibility(brat_folder):
'''
Check if brat annotation and text files are compatible.
'''
dataset_type = os.path.basename(brat_folder)
print("Checking the validity of BRAT-formatted {0} set... ".format(dataset_type), end='')
text_filepaths = sorted(glob.glob(os.path.join(brat_folder, '*.txt')))
for text_filepath in text_filepaths:
base_filename = os.path.splitext(os.path.basename(text_filepath))[0]
annotation_filepath = os.path.join(os.path.dirname(text_filepath), base_filename + '.ann')
# check if annotation file exists
if not os.path.exists(annotation_filepath):
raise IOError("Annotation file does not exist: {0}".format(annotation_filepath))
text, entities = get_entities_from_brat(text_filepath, annotation_filepath)
print("Done.")
def brat_to_conll(input_folder, output_filepath, language):
'''
Assumes '.txt' and '.ann' files are in the input_folder.
Checks for the compatibility between .txt and .ann at the same time.
'''
spacy_nlp = spacy.load(language)
verbose = False
dataset_type = os.path.basename(input_folder)
print("Formatting {0} set from BRAT to CONLL... ".format(dataset_type), end='')
text_filepaths = sorted(glob.glob(os.path.join(input_folder, '*.txt')))
output_file = codecs.open(output_filepath, 'w', 'utf-8')
for text_filepath in text_filepaths:
base_filename = os.path.splitext(os.path.basename(text_filepath))[0]
annotation_filepath = os.path.join(os.path.dirname(text_filepath), base_filename + '.ann')
# create annotation file if it does not exist
if not os.path.exists(annotation_filepath):
codecs.open(annotation_filepath, 'w', 'UTF-8').close()
text, entities = get_entities_from_brat(text_filepath, annotation_filepath)
entities = sorted(entities, key=lambda entity:entity["start"])
sentences = get_sentences_and_tokens_from_spacy(text, spacy_nlp)
for sentence in sentences:
inside = False
previous_token_label = 'O'
for token in sentence:
token['label'] = 'O'
for entity in entities:
if entity['start'] <= token['start'] < entity['end'] or \
entity['start'] < token['end'] <= entity['end'] or \
token['start'] < entity['start'] < entity['end'] < token['end']:
token['label'] = entity['type'].replace('-', '_') # Because the ANN doesn't support tag with '-' in it
break
elif token['end'] < entity['start']:
break
if len(entities) == 0:
entity={'end':0}
if token['label'] == 'O':
gold_label = 'O'
inside = False
elif inside and token['label'] == previous_token_label:
gold_label = 'I-{0}'.format(token['label'])
else:
inside = True
gold_label = 'B-{0}'.format(token['label'])
if token['end'] == entity['end']:
inside = False
previous_token_label = token['label']
if verbose: print('{0} {1} {2} {3} {4}\n'.format(token['text'], base_filename, token['start'], token['end'], gold_label))
output_file.write('{0} {1} {2} {3} {4}\n'.format(token['text'], base_filename, token['start'], token['end'], gold_label))
if verbose: print('\n')
output_file.write('\n')
output_file.close()
print('Done.')
del spacy_nlp
|