File size: 5,080 Bytes
a6326c7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
from utils import (
    write_jsonl_file,
    parse,
)

import os

topics = {
    1: "Ordinary Life",
    2: "School Life",
    3: "Culture & Education",
    4: "Attitude & Emotion",
    5: "Relationship",
    6: "Tourism",
    7: "Health",
    8: "Work",
    9: "Politics",
    10: "Finance",
}

emotions = {
    0: "neutral",
    1: "anger",
    2: "disgust",
    3: "fear",
    4: "happiness",
    5: "sadness",
    6: "surprise",
}

acts = {1: "inform", 2: "question", 3: "directive", 4: "commissive"}


def load_topics(args):
    text_file = os.path.join(args.input_dir, "dialogues_text.txt")
    topic_file = os.path.join(args.input_dir, "dialogues_topic.txt")
    text2topic = dict()

    with open(text_file, "r", encoding="utf-8") as text_reader, open(
        topic_file, "r", encoding="utf-8"
    ) as topic_reader:
        for line in text_reader:
            text = line.strip()
            topic = topics[int(topic_reader.readline().strip())]

            # if text in text2topic and text not in [
            #     "Can I help you ? __eou__ I hope so . I'm looking for some material for a paper I'm writing , and I'm not quite sure where to look . __eou__ I'll certainly try to help you . What topic is your paper on ? __eou__ My paper is on the influence of television on children . __eou__ There are several possible sources you might use for that topic . I suggest you use the computer and the computer will give you a list of every scientific journal that talks about children and television . __eou__ Thank you for you help . __eou__"
            #     "Hey , Ann . You don't have a pen , do you ? __eou__ Sure , here you go . __eou__ Thanks . I don't suppose you have some paper , too . __eou__ Of course . There you are . __eou__ Thanks so much . I owe you one ."
            # ]:
            #     print(text, topic, text2topic[text])
            #     assert text2topic[text] == topic

            text2topic[text] = topic

    return text2topic


def preprocess(args, split, text2topic):
    input_dir = os.path.join(args.input_dir, split)

    text_file = os.path.join(input_dir, f"dialogues_{split}.txt")
    act_file = os.path.join(input_dir, f"dialogues_act_{split}.txt")
    emotion_file = os.path.join(input_dir, f"dialogues_emotion_{split}.txt")

    if split == "validation":
        split = "dev"
    outfile = os.path.join(args.output_dir, f"{split}.jsonl")
    processed_data = []

    with open(text_file, "r", encoding="utf-8") as text_reader, open(
        act_file, "r", encoding="utf-8"
    ) as act_reader, open(emotion_file, "r", encoding="utf-8") as emotion_reader:
        for line in text_reader:
            text = line.strip()
            if text in text2topic:
                topic = text2topic[text]
            else:
                _text = "Sam , can we stop at this bicycle shop ? __eou__ Do you want to buy a new bicycle ? __eou__ Yes , and they have a sale on now . __eou__ What happened to your old one ? __eou__ I left it at my parent's house , but I need one here as well . __eou__ I've been using Jim's old bike but he needs it back . __eou__ Let's go then . __eou__ Look at this mountain bike . It is only £ 330 . Do you like it ? __eou__ I prefer something like this one - a touring bike , but it is more expensive . __eou__ How much is it ? __eou__ The price on the tag says £ 565 but maybe you can get a discount . __eou__ OK , let's go and ask . __eou__"
                topic = text2topic[_text]

            utterances = text.split("__eou__")
            assert not utterances[-1]
            utterances = utterances[:-1]
            _acts = list(
                map(lambda x: acts[int(x)], act_reader.readline().strip().split())
            )
            _emotions = list(
                map(
                    lambda x: emotions[int(x)],
                    emotion_reader.readline().strip().split(),
                )
            )

            dialogue = {
                "turn": "multi",
                "locale": "en",
                "domain": [topic],
                "dialog": [],
                "knowledge": {"type": "list", "value": sorted(emotions.values())},
            }

            assert len(utterances) == len(_acts) and len(utterances) == len(
                _emotions
            ), f"{utterances}\n{_acts}\n{_emotions}"

            roles = ["ROLE1", "ROLE2"]
            for idx, utterance in enumerate(utterances):
                assert utterance
                dialogue["dialog"].append(
                    {
                        "roles": [roles[idx % 2]],
                        "utterance": utterance,
                        "active_intents": [_acts[idx]],
                        "emotions": [{"emotion": _emotions[idx]}],
                    }
                )

            processed_data.append(dialogue)

    write_jsonl_file(processed_data, outfile)


if __name__ == "__main__":
    args = parse()
    text2topic = load_topics(args)
    preprocess(args, "train", text2topic)
    preprocess(args, "validation", text2topic)
    preprocess(args, "test", text2topic)