-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy path03_dqn_double.py
executable file
·106 lines (82 loc) · 4.04 KB
/
03_dqn_double.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
#!/usr/bin/env python3
import gym
import ptan
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from tensorboardX import SummaryWriter
from lib import dqn_model, common
STATES_TO_EVALUATE = 1000
EVAL_EVERY_FRAME = 100
def calc_loss(batch, net, tgt_net, gamma, device="cpu", double=True):
states, actions, rewards, dones, next_states = common.unpack_batch(batch)
states_v = torch.tensor(states).to(device)
next_states_v = torch.tensor(next_states).to(device)
actions_v = torch.tensor(actions).to(device)
rewards_v = torch.tensor(rewards).to(device)
done_mask = torch.ByteTensor(dones).to(device)
state_action_values = net(states_v).gather(1, actions_v.unsqueeze(-1)).squeeze(-1)
if double:
next_state_actions = net(next_states_v).max(1)[1]
next_state_values = tgt_net(next_states_v).gather(1, next_state_actions.unsqueeze(-1)).squeeze(-1)
else:
next_state_values = tgt_net(next_states_v).max(1)[0]
next_state_values[done_mask] = 0.0
expected_state_action_values = next_state_values.detach() * gamma + rewards_v
return nn.MSELoss()(state_action_values, expected_state_action_values)
def calc_values_of_states(states, net, device="cpu"):
mean_vals = []
for batch in np.array_split(states, 64):
states_v = torch.tensor(batch).to(device)
action_values_v = net(states_v)
best_action_values_v = action_values_v.max(1)[0]
mean_vals.append(best_action_values_v.mean().item())
return np.mean(mean_vals)
if __name__ == "__main__":
params = common.HYPERPARAMS['pong']
parser = argparse.ArgumentParser()
parser.add_argument("--cuda", default=False, action="store_true", help="Enable cuda")
parser.add_argument("--double", default=False, action="store_true", help="Enable double DQN")
args = parser.parse_args()
device = torch.device("cuda" if args.cuda else "cpu")
env = gym.make(params['env_name'])
env = ptan.common.wrappers.wrap_dqn(env)
writer = SummaryWriter(comment="-" + params['run_name'] + "-double=" + str(args.double))
net = dqn_model.DQN(env.observation_space.shape, env.action_space.n).to(device)
tgt_net = ptan.agent.TargetNet(net)
selector = ptan.actions.EpsilonGreedyActionSelector(epsilon=params['epsilon_start'])
epsilon_tracker = common.EpsilonTracker(selector, params)
agent = ptan.agent.DQNAgent(net, selector, device=device)
exp_source = ptan.experience.ExperienceSourceFirstLast(env, agent, gamma=params['gamma'], steps_count=1)
buffer = ptan.experience.ExperienceReplayBuffer(exp_source, buffer_size=params['replay_size'])
optimizer = optim.Adam(net.parameters(), lr=params['learning_rate'])
frame_idx = 0
eval_states = None
with common.RewardTracker(writer, params['stop_reward']) as reward_tracker:
while True:
frame_idx += 1
buffer.populate(1)
epsilon_tracker.frame(frame_idx)
new_rewards = exp_source.pop_total_rewards()
if new_rewards:
if reward_tracker.reward(new_rewards[0], frame_idx, selector.epsilon):
break
if len(buffer) < params['replay_initial']:
continue
if eval_states is None:
eval_states = buffer.sample(STATES_TO_EVALUATE)
eval_states = [np.array(transition.state, copy=False) for transition in eval_states]
eval_states = np.array(eval_states, copy=False)
optimizer.zero_grad()
batch = buffer.sample(params['batch_size'])
loss_v = calc_loss(batch, net, tgt_net.target_model, gamma=params['gamma'], device=device,
double=args.double)
loss_v.backward()
optimizer.step()
if frame_idx % params['target_net_sync'] == 0:
tgt_net.sync()
if frame_idx % EVAL_EVERY_FRAME == 0:
mean_val = calc_values_of_states(eval_states, net, device=device)
writer.add_scalar("values_mean", mean_val, frame_idx)