wordle-solver / a3c /train.py
santit96's picture
Delete constant GAMMA and add it as an command line argument
f899dd3
raw
history blame
1.47 kB
import os
import torch
import torch.multiprocessing as mp
from .shared_adam import SharedAdam
from .net import Net
from .worker import Worker
def train(env, max_ep, model_checkpoint_dir, gamma=0., pretrained_model_path=None):
os.environ["OMP_NUM_THREADS"] = "1"
if not os.path.exists(model_checkpoint_dir):
os.makedirs(model_checkpoint_dir)
n_s = env.observation_space.shape[0]
n_a = env.action_space.n
words_list = env.words
word_width = len(env.words[0])
gnet = Net(n_s, n_a, words_list, word_width) # global network
if pretrained_model_path:
gnet.load_state_dict(torch.load(pretrained_model_path))
gnet.share_memory() # share the global parameters in multiprocessing
opt = SharedAdam(gnet.parameters(), lr=1e-4, betas=(0.92, 0.999)) # global optimizer
global_ep, global_ep_r, res_queue, win_ep = mp.Value('i', 0), mp.Value('d', 0.), mp.Queue(), mp.Value('i', 0)
# parallel training
workers = [Worker(max_ep, gnet, opt, global_ep, global_ep_r, res_queue, i, env, n_s, n_a,
words_list, word_width, win_ep, model_checkpoint_dir, gamma, pretrained_model_path) for i in range(mp.cpu_count())]
[w.start() for w in workers]
res = [] # record episode reward to plot
while True:
r = res_queue.get()
if r is not None:
res.append(r)
else:
break
[w.join() for w in workers]
return global_ep, win_ep, gnet, res