wordle-solver / a3c /discrete_A3C.py
santit96's picture
Add possibility to train from a pretrained model
18a7031
raw
history blame
3.32 kB
"""
Reinforcement Learning (A3C) using Pytroch + multiprocessing.
The most simple implementation for continuous action.
View more on my Chinese tutorial page [莫烦Python](https://morvanzhou.github.io/).
"""
import os
import torch
import torch.multiprocessing as mp
from .shared_adam import SharedAdam
from .net import Net
from .utils import v_wrap
from .worker import Worker
def train(env, max_ep, model_checkpoint_dir, pretrained_model_path=None):
os.environ["OMP_NUM_THREADS"] = "1"
if not os.path.exists(model_checkpoint_dir):
os.makedirs(model_checkpoint_dir)
n_s = env.observation_space.shape[0]
n_a = env.action_space.n
words_list = env.words
word_width = len(env.words[0])
gnet = Net(n_s, n_a, words_list, word_width) # global network
if pretrained_model_path:
gnet.load_state_dict(torch.load(pretrained_model_path))
gnet.share_memory() # share the global parameters in multiprocessing
opt = SharedAdam(gnet.parameters(), lr=1e-4, betas=(0.92, 0.999)) # global optimizer
global_ep, global_ep_r, res_queue, win_ep = mp.Value('i', 0), mp.Value('d', 0.), mp.Queue(), mp.Value('i', 0)
# parallel training
workers = [Worker(max_ep, gnet, opt, global_ep, global_ep_r, res_queue, i, env, n_s, n_a,
words_list, word_width, win_ep, model_checkpoint_dir, pretrained_model_path) for i in range(mp.cpu_count())]
[w.start() for w in workers]
res = [] # record episode reward to plot
while True:
r = res_queue.get()
if r is not None:
res.append(r)
else:
break
[w.join() for w in workers]
return global_ep, win_ep, gnet, res
def evaluate_checkpoints(dir, env):
n_s = env.observation_space.shape[0]
n_a = env.action_space.n
words_list = env.words
word_width = len(env.words[0])
net = Net(n_s, n_a, words_list, word_width)
results = {}
for checkpoint in os.listdir(dir):
checkpoint_path = os.path.join(dir, checkpoint)
if os.path.isfile(checkpoint_path):
net.load_state_dict(torch.load(checkpoint_path))
wins, guesses = evaluate(net, env)
results[checkpoint] = wins, guesses
return dict(sorted(results.items(), key=lambda x: (x[1][0], -x[1][1]), reverse=True))
def evaluate(net, env):
n_wins = 0
n_guesses = 0
n_win_guesses = 0
env = env.unwrapped
N = env.allowable_words
for goal_word in env.words[:N]:
win, outcomes = play(net, env)
if win:
n_wins += 1
n_win_guesses += len(outcomes)
# else:
# print("Lost!", goal_word, outcomes)
n_guesses += len(outcomes)
print(f"Evaluation complete, won {n_wins/N*100}% and took {n_win_guesses/n_wins} guesses per win, "
f"{n_guesses / N} including losses.")
return n_wins/N*100, n_win_guesses/n_wins
def play(net, env):
state = env.reset()
outcomes = []
win = False
for i in range(env.max_turns):
action = net.choose_action(v_wrap(state[None, :]))
state, reward, done, _ = env.step(action)
outcomes.append((env.words[action], reward))
if done:
if reward >= 0:
win = True
break
return win, outcomes