Spaces:
Sleeping
Sleeping
File size: 2,579 Bytes
44db2f9 f05ece6 44db2f9 350e00d 44db2f9 350e00d 44db2f9 f05ece6 abff1ef f05ece6 44db2f9 62c6c3b 44db2f9 f05ece6 44db2f9 62c6c3b 44db2f9 350e00d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 |
import os
import sys
import gym
import matplotlib.pyplot as plt
import torch.multiprocessing as mp
from a3c.discrete_A3C import Net, Worker
from a3c.shared_adam import SharedAdam
from a3c.utils import v_wrap
from wordle_env.wordle import WordleEnvBase
os.environ["OMP_NUM_THREADS"] = "1"
def evaluate(net, env):
print("Evaluation mode")
n_wins = 0
n_guesses = 0
n_win_guesses = 0
env = env.unwrapped
N = env.allowable_words
for goal_word in env.words[:N]:
win, outcomes = play(net, env)
if win:
n_wins += 1
n_win_guesses += len(outcomes)
else:
print("Lost!", goal_word, outcomes)
n_guesses += len(outcomes)
print(f"Evaluation complete, won {n_wins/N*100}% and took {n_win_guesses/n_wins} guesses per win, "
f"{n_guesses / N} including losses.")
def play(net, env):
state = env.reset()
outcomes = []
win = False
for i in range(env.max_turns):
action = net.choose_action(v_wrap(state[None, :]))
state, reward, done, _ = env.step(action)
outcomes.append((env.words[action], reward))
if done:
if reward >= 0:
win = True
break
return win, outcomes
if __name__ == "__main__":
max_ep = int(sys.argv[1]) if len(sys.argv) > 1 else 100000
env_id = sys.argv[2] if len(sys.argv) > 2 else 'WordleEnv100FullAction-v0'
env = gym.make(env_id)
n_s = env.observation_space.shape[0]
n_a = env.action_space.n
words_list = env.words
word_width = len(env.words[0])
gnet = Net(n_s, n_a, words_list, word_width) # global network
gnet.share_memory() # share the global parameters in multiprocessing
opt = SharedAdam(gnet.parameters(), lr=1e-4, betas=(0.92, 0.999)) # global optimizer
global_ep, global_ep_r, res_queue, win_ep = mp.Value('i', 0), mp.Value('d', 0.), mp.Queue(), mp.Value('i', 0)
# parallel training
workers = [Worker(max_ep, gnet, opt, global_ep, global_ep_r, res_queue, i, env, n_s, n_a, words_list, word_width, win_ep) for i in range(mp.cpu_count())]
[w.start() for w in workers]
res = [] # record episode reward to plot
while True:
r = res_queue.get()
if r is not None:
res.append(r)
else:
break
[w.join() for w in workers]
print("Jugadas:", global_ep.value)
print("Ganadas:", win_ep.value)
plt.plot(res)
plt.ylabel('Moving average ep reward')
plt.xlabel('Step')
plt.show()
evaluate(gnet, env)
|