File size: 1,346 Bytes
44db2f9
 
 
 
 
 
 
 
 
 
 
 
abff1ef
44db2f9
 
 
 
abff1ef
 
 
44db2f9
 
 
 
 
abff1ef
44db2f9
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
import os
import gym
import matplotlib.pyplot as plt
import torch.multiprocessing as mp

from a3c.discrete_A3C import Net, Worker
from a3c.shared_adam import SharedAdam
from wordle_env.wordle import WordleEnvBase


os.environ["OMP_NUM_THREADS"] = "1"

env = gym.make('WordleEnv100FullAction-v0')
N_S = env.observation_space.shape[0]
N_A = env.action_space.shape[0]

if __name__ == "__main__":
    words_list = env.words
    word_width = len(env.words[0])
    gnet = Net(N_S, N_A, words_list, word_width)        # global network
    gnet.share_memory()         # share the global parameters in multiprocessing
    opt = SharedAdam(gnet.parameters(), lr=1e-4, betas=(0.92, 0.999))      # global optimizer
    global_ep, global_ep_r, res_queue = mp.Value('i', 0), mp.Value('d', 0.), mp.Queue()

    # parallel training
    workers = [Worker(gnet, opt, global_ep, global_ep_r, res_queue, i, env, N_S = N_S, N_A=N_A, words_list=words_list, word_width=word_width) for i in range(mp.cpu_count())]
    [w.start() for w in workers]
    res = []                    # record episode reward to plot
    while True:
        r = res_queue.get()
        if r is not None:
            res.append(r)
        else:
            break
    [w.join() for w in workers]

    plt.plot(res)
    plt.ylabel('Moving average ep reward')
    plt.xlabel('Step')
    plt.show()