File size: 4,653 Bytes
a6326c7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 |
import re
import os
from utils import write_jsonl_file, parse
from utils import read_jsonl_file
def readfile(input_dir, filename):
path = os.path.join(input_dir, filename)
data = read_jsonl_file(path)
return data
def is_space_language(language):
if language in ["zh-CN", "ja-JP", "zh-TW"]:
return False
return True
def get_slot_value_table(utterance, language, origin):
svt = []
pattern = re.compile("\[(.*?)\]", re.S)
svp_iter = re.finditer(pattern, utterance)
delta_length = 0 # record the delta length caused by annotation
prev_end = -1
for svp in svp_iter:
start = svp.start()
end = svp.end()
annotaed = utterance[start + 1 : end - 1]
slot, value = map(lambda x: x.strip(), annotaed.split(":"))
origin_start = start
is_space = is_space_language(language)
# offset 1 for non-space-seperated language
if (
not is_space
and start > 0
and utterance[start - 1] == " "
and prev_end + 1 != start
):
origin_start -= 1
# offset the delta length
origin_start -= delta_length
origin_end = origin_start + len(value)
if not is_space:
if origin[origin_start:origin_end] != value:
for delta_offset in range(-3, 4):
if (
origin[delta_offset + origin_start : delta_offset + origin_end]
== value
):
origin_start += delta_offset
origin_end += delta_offset
break
# update delta length
if (
origin_end < len(origin)
and end < len(utterance)
and origin[origin_end] == utterance[end]
):
delta_length = end - origin_end
else:
delta_length = end - origin_end + 1
else:
# update delta length
cur_delta_length = end - start - len(value)
if not is_space:
# head space
if start > 0 and utterance[start - 1] == " ":
cur_delta_length += 1
if end < len(utterance) and utterance[end] == " ":
cur_delta_length += 1
delta_length += cur_delta_length
assert origin[origin_start:origin_end] == value
svt.append(
{
"slot": slot,
"value": value,
"start": origin_start,
"end": origin_end,
"relation": "equal_to",
}
)
prev_end = end
return svt
def preprocess(args):
filenames = os.listdir(args.input_dir)
data = {"train": [], "dev": [], "test": [], "MMNLU-22": []}
total = 0
for _ in filenames:
total += 1
cur = 0
for filename in filenames:
cur += 1
print(f"preprocessing {filename} ({cur}/{total})")
origin_data = readfile(args.input_dir, filename)
for line in origin_data:
partition = line["partition"]
turn = dict()
turn["role"] = "ROLE"
turn["utterance"] = line["utt"]
domain = None
if "annot_utt" in line:
domain = [line["scenario"]]
bs = []
goal = dict()
goal["intent"] = line["intent"] # format: {domain}_{intent}
slot_value_table = get_slot_value_table(
line["annot_utt"], line["locale"], turn["utterance"]
)
goal["slot_value_table"] = slot_value_table
goal["active_intent"] = line["intent"]
bs.append(goal)
turn["belief_state"] = [bs]
else:
turn["belief_state"] = []
if domain is None:
data[partition].append(
{"turn": "single", "locale": line["locale"], "dialog": [turn]}
)
else:
data[partition].append(
{
"turn": "single",
"locale": line["locale"],
"dialog": [turn],
"domain": domain,
}
)
for partition in data:
if data[partition]:
write_jsonl_file(
data[partition], os.path.join(args.output_dir, f"{partition}.jsonl")
)
if __name__ == "__main__":
args = parse()
preprocess(args)
|