-
Notifications
You must be signed in to change notification settings - Fork 233
/
Copy pathtest.py
68 lines (59 loc) · 2.37 KB
/
test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
"""
@author: Viet Nguyen <nhviet1009@gmail.com>
"""
import os
os.environ['OMP_NUM_THREADS'] = '1'
import argparse
import torch
from src.env import create_train_env
from src.model import ActorCritic
import torch.nn.functional as F
def get_args():
parser = argparse.ArgumentParser(
"""Implementation of model described in the paper: Asynchronous Methods for Deep Reinforcement Learning for Super Mario Bros""")
parser.add_argument("--world", type=int, default=1)
parser.add_argument("--stage", type=int, default=1)
parser.add_argument("--action_type", type=str, default="complex")
parser.add_argument("--saved_path", type=str, default="trained_models")
parser.add_argument("--output_path", type=str, default="output")
args = parser.parse_args()
return args
def test(opt):
torch.manual_seed(123)
env, num_states, num_actions = create_train_env(opt.world, opt.stage, opt.action_type,
"{}/video_{}_{}.mp4".format(opt.output_path, opt.world, opt.stage))
model = ActorCritic(num_states, num_actions)
if torch.cuda.is_available():
model.load_state_dict(torch.load("{}/a3c_super_mario_bros_{}_{}".format(opt.saved_path, opt.world, opt.stage)))
model.cuda()
else:
model.load_state_dict(torch.load("{}/a3c_super_mario_bros_{}_{}".format(opt.saved_path, opt.world, opt.stage),
map_location=lambda storage, loc: storage))
model.eval()
state = torch.from_numpy(env.reset())
done = True
while True:
if done:
h_0 = torch.zeros((1, 512), dtype=torch.float)
c_0 = torch.zeros((1, 512), dtype=torch.float)
env.reset()
else:
h_0 = h_0.detach()
c_0 = c_0.detach()
if torch.cuda.is_available():
h_0 = h_0.cuda()
c_0 = c_0.cuda()
state = state.cuda()
logits, value, h_0, c_0 = model(state, h_0, c_0)
policy = F.softmax(logits, dim=1)
action = torch.argmax(policy).item()
action = int(action)
state, reward, done, info = env.step(action)
state = torch.from_numpy(state)
env.render()
if info["flag_get"]:
print("World {} stage {} completed".format(opt.world, opt.stage))
break
if __name__ == "__main__":
opt = get_args()
test(opt)