File size: 3,322 Bytes
44db2f9
 
 
 
 
 
1bd428f
254d61f
44db2f9
1bd428f
 
254d61f
 
44db2f9
 
18a7031
1bd428f
676caef
 
1bd428f
 
 
 
 
18a7031
 
1bd428f
 
 
 
 
254d61f
18a7031
1bd428f
 
 
 
 
 
 
 
 
 
254d61f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
"""
Reinforcement Learning (A3C) using Pytroch + multiprocessing.
The most simple implementation for continuous action.

View more on my Chinese tutorial page [莫烦Python](https://morvanzhou.github.io/).
"""
import os
import torch
import torch.multiprocessing as mp
from .shared_adam import SharedAdam
from .net import Net
from .utils import v_wrap
from .worker import Worker


def train(env, max_ep, model_checkpoint_dir, pretrained_model_path=None):
    os.environ["OMP_NUM_THREADS"] = "1"
    if not os.path.exists(model_checkpoint_dir):
        os.makedirs(model_checkpoint_dir)
    n_s = env.observation_space.shape[0]
    n_a = env.action_space.n
    words_list = env.words
    word_width = len(env.words[0])
    gnet = Net(n_s, n_a, words_list, word_width)        # global network
    if pretrained_model_path:
        gnet.load_state_dict(torch.load(pretrained_model_path))
    gnet.share_memory()         # share the global parameters in multiprocessing
    opt = SharedAdam(gnet.parameters(), lr=1e-4, betas=(0.92, 0.999))      # global optimizer
    global_ep, global_ep_r, res_queue, win_ep = mp.Value('i', 0), mp.Value('d', 0.), mp.Queue(), mp.Value('i', 0)

    # parallel training
    workers = [Worker(max_ep, gnet, opt, global_ep, global_ep_r, res_queue, i, env, n_s, n_a,
                      words_list, word_width, win_ep, model_checkpoint_dir, pretrained_model_path) for i in range(mp.cpu_count())]
    [w.start() for w in workers]
    res = []                    # record episode reward to plot
    while True:
        r = res_queue.get()
        if r is not None:
            res.append(r)
        else:
            break
    [w.join() for w in workers]
    return global_ep, win_ep, gnet, res


def evaluate_checkpoints(dir, env):
    n_s = env.observation_space.shape[0]
    n_a = env.action_space.n
    words_list = env.words
    word_width = len(env.words[0])
    net = Net(n_s, n_a, words_list, word_width)
    results = {}
    for checkpoint in os.listdir(dir):
        checkpoint_path = os.path.join(dir, checkpoint)
        if os.path.isfile(checkpoint_path):
            net.load_state_dict(torch.load(checkpoint_path))
            wins, guesses = evaluate(net, env)
            results[checkpoint] = wins, guesses
    return dict(sorted(results.items(), key=lambda x: (x[1][0], -x[1][1]), reverse=True))


def evaluate(net, env):
    n_wins = 0
    n_guesses = 0
    n_win_guesses = 0
    env = env.unwrapped
    N = env.allowable_words
    for goal_word in env.words[:N]:
        win, outcomes = play(net, env)
        if win:
            n_wins += 1
            n_win_guesses += len(outcomes)
        # else:
        #     print("Lost!", goal_word, outcomes)
        n_guesses += len(outcomes)
    print(f"Evaluation complete, won {n_wins/N*100}% and took {n_win_guesses/n_wins} guesses per win, "
          f"{n_guesses / N} including losses.")
    return n_wins/N*100, n_win_guesses/n_wins


def play(net, env):
    state = env.reset()
    outcomes = []
    win = False
    for i in range(env.max_turns):
        action = net.choose_action(v_wrap(state[None, :]))
        state, reward, done, _ = env.step(action)
        outcomes.append((env.words[action], reward))
        if done:
            if reward >= 0:
                win = True
            break
    return win, outcomes