-
Notifications
You must be signed in to change notification settings - Fork 1
/
base_dqn.py
executable file
·76 lines (56 loc) · 2.71 KB
/
base_dqn.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
#!/usr/bin/env python3
import gym
import ptan
import argparse
import torch
import torch.optim as optim
from tensorboardX import SummaryWriter
from lib import dqn_model, common_my
#Todos
#Verificare molto bene tutto il wrapping dell'environment
#Verificare le immagini convertite in grayscale output dell'environment se sono corrette
#Verificare gli skip frames dell'environment
#Huber loss?
if __name__ == "__main__":
params = common_my.HYPERPARAMS['invaders']
parser = argparse.ArgumentParser()
parser.add_argument("--cuda", default=False, action="store_true", help="Enable cuda")
args = parser.parse_args()
device = torch.device("cuda" if args.cuda else "cpu")
env = gym.make(params['env_name'])
env = ptan.common.wrappers.wrap_dqn(env)
writer = SummaryWriter(comment="-" + params['run_name'] + "-basic")
net = dqn_model.DQN(env.observation_space.shape, env.action_space.n).to(device)
tgt_net = ptan.agent.TargetNet(net)
#selector = ptan.actions.EpsilonGreedyActionSelector(epsilon=params['epsilon_start'])
#epsilon_tracker = common.EpsilonTracker(selector, params)
selector = ptan.actions.ArgmaxActionSelector()
agent = ptan.agent.DQNAgent(net, selector, device=device)
exp_source = ptan.experience.ExperienceSourceFirstLast(env, agent, gamma=params['gamma'], steps_count=params['n_steps'])
buffer = ptan.experience.ExperienceReplayBuffer(exp_source, buffer_size=params['replay_size'])
optimizer = optim.Adam(net.parameters(), lr=params['learning_rate'])
frame_idx = 0
# Loads saved net
net.load_state_dict(torch.load(params["save_dir"] + "invaders_4000000.dat"))
tgt_net.sync()
with common_my.RewardTracker(writer, params['stop_reward']) as reward_tracker:
while True:
frame_idx += 1
buffer.populate(1)
#epsilon_tracker.frame(frame_idx)
new_rewards = exp_source.pop_total_rewards()
if new_rewards:
if reward_tracker.reward(new_rewards[0], frame_idx, None):
break
if len(buffer) < params['replay_initial']:
continue
optimizer.zero_grad()
batch = buffer.sample(params['batch_size'])
loss_v = common_my.calc_loss_dqn(batch, net, tgt_net.target_model, gamma=params['gamma'], n_steps=params['n_steps'], device=device)
loss_v.backward()
optimizer.step()
if frame_idx % params['target_net_sync'] == 0:
tgt_net.sync()
if frame_idx % params['save_interval'] == 0:
common_my.save_net(net, params['save_dir'], "{}_{}.dat".format(params['run_name'], frame_idx + 4000000))
common_my.save_net(net, params['save_dir'], "best.dat")