This repository has been archived by the owner on Dec 7, 2022. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
/
rnn_main.py
147 lines (122 loc) · 5.8 KB
/
rnn_main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
import maml_rl.envs
import gym
import numpy as np
import torch
import json
import csv
import collections
from memory_maml_rl.metalearner import MetaLearner
from memory_maml_rl.policies import CategoricalMLPPolicy, NormalMLPPolicy
from memory_maml_rl.baseline import LinearFeatureBaseline
from memory_maml_rl.sampler import BatchSampler
from tensorboardX import SummaryWriter
def total_rewards(episodes_rewards, aggregation=torch.mean):
rewards = torch.mean(torch.stack([aggregation(torch.sum(rewards, dim=0))
for rewards in episodes_rewards], dim=0))
return rewards.item()
def main(args):
continuous_actions = (args.env_name in ['AntVel-v1', 'AntDir-v1',
'AntPos-v0', 'HalfCheetahVel-v1', 'HalfCheetahDir-v1',
'2DNavigation-v0'])
writer = SummaryWriter('./logs/rnn/{0}'.format(args.output_folder))
save_folder = './saves/rnn/{0}'.format(args.output_folder)
if not os.path.exists(save_folder):
os.makedirs(save_folder)
with open(os.path.join(save_folder, 'config.json'), 'w') as f:
config = {k: v for (k, v) in vars(args).items() if k != 'device'}
config.update(device=args.device.type)
json.dump(config, f, indent=2)
sampler = BatchSampler(args.env_name, batch_size=args.fast_batch_size,
num_workers=args.num_workers)
if continuous_actions:
policy = NormalMLPPolicy(
int(np.prod(sampler.envs.observation_space.shape)),
int(np.prod(sampler.envs.action_space.shape)),
hidden_sizes=(args.hidden_size,) * args.num_layers)
else:
policy = CategoricalMLPPolicy(
int(np.prod(sampler.envs.observation_space.shape)),
sampler.envs.action_space.n,
hidden_sizes=(args.hidden_size,) * args.num_layers)
baseline = LinearFeatureBaseline(
int(np.prod(sampler.envs.observation_space.shape)))
metalearner = MetaLearner(sampler, policy, baseline, gamma=args.gamma,
fast_lr=args.fast_lr, tau=args.tau, device=args.device)
for batch in range(args.num_batches):
tasks = sampler.sample_tasks(num_tasks=args.meta_batch_size)
episodes = metalearner.sample(tasks, first_order=args.first_order)
metalearner.step(episodes, max_kl=args.max_kl, cg_iters=args.cg_iters,
cg_damping=args.cg_damping, ls_max_steps=args.ls_max_steps,
ls_backtrack_ratio=args.ls_backtrack_ratio)
print(total_rewards([ep.rewards for _, ep in episodes]), batch)
# Tensorboard
writer.add_scalar('total_rewards/before_update',
total_rewards([ep.rewards for ep, _ in episodes]), batch)
writer.add_scalar('total_rewards/after_update',
total_rewards([ep.rewards for _, ep in episodes]), batch)
# Save policy network
with open(os.path.join(save_folder,
'policy-{0}.pt'.format(batch)), 'wb') as f:
torch.save(policy.state_dict(), f)
if __name__ == '__main__':
import argparse
import os
import multiprocessing as mp
parser = argparse.ArgumentParser(description='Reinforcement learning with '
'Model-Agnostic Meta-Learning (MAML)')
# General
parser.add_argument('--env-name', type=str,
help='name of the environment')
parser.add_argument('--gamma', type=float, default=0.95,
help='value of the discount factor gamma')
parser.add_argument('--tau', type=float, default=1.0,
help='value of the discount factor for GAE')
parser.add_argument('--first-order', action='store_true',
help='use the first-order approximation of MAML')
# Policy network (relu activation function)
parser.add_argument('--hidden-size', type=int, default=100,
help='number of hidden units per layer')
parser.add_argument('--num-layers', type=int, default=2,
help='number of hidden layers')
# Task-specific
parser.add_argument('--fast-batch-size', type=int, default=20,
help='batch size for each individual task')
parser.add_argument('--fast-lr', type=float, default=0.5,
help='learning rate for the 1-step gradient update of MAML')
# Optimization
parser.add_argument('--num-batches', type=int, default=200,
help='number of batches')
parser.add_argument('--meta-batch-size', type=int, default=40,
help='number of tasks per batch')
parser.add_argument('--max-kl', type=float, default=1e-2,
help='maximum value for the KL constraint in TRPO')
parser.add_argument('--cg-iters', type=int, default=10,
help='number of iterations of conjugate gradient')
parser.add_argument('--cg-damping', type=float, default=1e-5,
help='damping in conjugate gradient')
parser.add_argument('--ls-max-steps', type=int, default=15,
help='maximum number of iterations for line search')
parser.add_argument('--ls-backtrack-ratio', type=float, default=0.8,
help='maximum number of iterations for line search')
# Miscellaneous
parser.add_argument('--output-folder', type=str, default='maml',
help='name of the output folder')
parser.add_argument('--num-workers', type=int, default=mp.cpu_count() - 1,
help='number of workers for trajectories sampling')
parser.add_argument('--device', type=str, default='cpu',
help='set the device (cpu or cuda)')
args = parser.parse_args()
# Create logs and saves folder if they don't exist
if not os.path.exists('./logs/rnn'):
os.makedirs('./logs/rnn')
if not os.path.exists('./data/rnn'):
os.makedirs('./data/rnn')
if not os.path.exists('./saves/rnn'):
os.makedirs('./saves/rnn')
# Device
args.device = torch.device(args.device
if torch.cuda.is_available() else 'cpu')
# Slurm
if 'SLURM_JOB_ID' in os.environ:
args.output_folder += '-{0}'.format(os.environ['SLURM_JOB_ID'])
main(args)