Skip to content

Commit

Permalink
Save before modify to autoaug version.
Browse files Browse the repository at this point in the history
  • Loading branch information
feichi committed Aug 21, 2020
1 parent f6c86de commit 3275d13
Show file tree
Hide file tree
Showing 14 changed files with 5,559 additions and 12 deletions.
10 changes: 5 additions & 5 deletions branch_num_exp.sh
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
srun -p VI_Face_1080TI -N 1 --gres=gpu:8 --job-name=branch1 python -u random_search.py \
--opnum 1 --opdepth 1 --policybatch 5 --attackbatch 32 --datasplit 500 --log branch_1.log &
--opnum 1 --opdepth 1 --policybatch 3 --attackbatch 32 --datasplit 500 --log branch_1_new.log &

srun -p VI_Face_1080TI -N 1 --gres=gpu:8 --job-name=branch3 python -u random_search.py \
--opnum 3 --opdepth 1 --policybatch 5 --attackbatch 16 --datasplit 500 --log branch_3.log &
--opnum 3 --opdepth 1 --policybatch 3 --attackbatch 16 --datasplit 500 --log branch_3_new.log &

srun -p VI_Face_1080TI -N 1 --gres=gpu:8 --job-name=branch5 python -u random_search.py \
--opnum 5 --opdepth 1 --policybatch 5 --attackbatch 8 --datasplit 500 --log branch_5.log &
--opnum 5 --opdepth 1 --policybatch 3 --attackbatch 8 --datasplit 500 --log branch_5_new.log &

srun -p VI_Face_1080TI -N 1 --gres=gpu:8 --job-name=branch7 python -u random_search.py \
--opnum 7 --opdepth 1 --policybatch 5 --attackbatch 8 --datasplit 500 --log branch_7.log &
--opnum 7 --opdepth 1 --policybatch 3 --attackbatch 8 --datasplit 500 --log branch_7_new.log &

srun -p VI_Face_1080TI -N 1 --gres=gpu:8 --job-name=branch9 python -u random_search.py \
--opnum 9 --opdepth 1 --policybatch 5 --attackbatch 4 --datasplit 500 --log branch_9.log &
--opnum 9 --opdepth 1 --policybatch 3 --attackbatch 4 --datasplit 500 --log branch_9_new.log &
1,050 changes: 1,050 additions & 0 deletions log/branch_1.log

Large diffs are not rendered by default.

500 changes: 500 additions & 0 deletions log/branch_1_new.log

Large diffs are not rendered by default.

500 changes: 500 additions & 0 deletions log/branch_3_new.log

Large diffs are not rendered by default.

500 changes: 500 additions & 0 deletions log/branch_5_new.log

Large diffs are not rendered by default.

395 changes: 395 additions & 0 deletions log/branch_7_new.log

Large diffs are not rendered by default.

365 changes: 365 additions & 0 deletions log/branch_9_new.log

Large diffs are not rendered by default.

500 changes: 500 additions & 0 deletions log/op_2.log

Large diffs are not rendered by default.

500 changes: 500 additions & 0 deletions log/op_3.log

Large diffs are not rendered by default.

500 changes: 500 additions & 0 deletions log/op_4.log

Large diffs are not rendered by default.

220 changes: 220 additions & 0 deletions log/op_4_back.log

Large diffs are not rendered by default.

500 changes: 500 additions & 0 deletions log/op_5.log

Large diffs are not rendered by default.

12 changes: 12 additions & 0 deletions op_num_exp.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@

#srun -p VI_Face_1080TI -N 1 --gres=gpu:8 --job-name=op3 python -u random_search.py \
# --opnum 3 --opdepth 2 --policybatch 3 --attackbatch 16 --datasplit 500 --log op_2.log &

#srun -p VI_Face_1080TI -N 1 --gres=gpu:8 --job-name=op5 python -u random_search.py \
# --opnum 3 --opdepth 3 --policybatch 3 --attackbatch 16 --datasplit 500 --log op_3.log &

srun -p VI_Face_1080TI -N 1 --gres=gpu:8 --job-name=op7 python -u random_search.py \
--opnum 3 --opdepth 4 --policybatch 3 --attackbatch 16 --datasplit 500 --log op_4.log &

#srun -p VI_Face_1080TI -N 1 --gres=gpu:8 --job-name=op9 python -u random_search.py \
# --opnum 3 --opdepth 5 --policybatch 3 --attackbatch 16 --datasplit 500 --log op_5.log &
19 changes: 12 additions & 7 deletions random_search.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,14 +40,14 @@
# Search process hyper parameters
interval = 0.5
sample_batch = ARGS.policybatch
lr = 5
epoch = 300
lr = 2
epoch = 100

RESUME = False
resume = 210
policy = None
best_policy = None
best_reward = 13.447103881835938
RESUME = True
resume = 43
policy = [[9, [0.0, 9, 7.0], [3, 5.0, 9], [5.0, 0, 3.0], [8, 0, 7.0]], [9, [4.0, 9, 0], [1.0, 0, 0], [8, 7.0, 0], [5, 0, 9]], [9, [0, 9, 0], [5.0, 9.0, 2.0], [2.0, 2.0, 5.0], [7.0, 3.0, 5.0]]]
best_policy = [[9, [2, 9, 8], [0.0, 8, 9], [6.0, 4, 6.0], [7.0, -0.0, 9]], [8.0, [2, 9, 2.0], [0, 3, -0.0], [8, 9, 0], [6, 2.0, 9]], [9, [0, 7.0, 0], [4, 8, 6.0], [2.0, 2.0, 4], [6, 0.0, 7.0]]]
best_reward = 9.170172293980917



Expand Down Expand Up @@ -242,6 +242,7 @@ def single_epoch(policy, reward_getter, lr=0.1, sample_batch=10):
best_reward = 0
policy = random_policy()
resume = -1
best_policy = policy.copy()

reward_getter = RewardCal()

Expand All @@ -256,6 +257,10 @@ def single_epoch(policy, reward_getter, lr=0.1, sample_batch=10):
policy, reward, epoch_best_policy, epoch_best_reward = single_epoch(policy, reward_getter,
lr=lr, sample_batch=sample_batch)


# Eval models are randomrized, recalculate best policy.
best_reward = reward_getter.get_reward(best_policy)

if reward > best_reward:
best_reward = reward
best_policy = policy
Expand Down

0 comments on commit 3275d13

Please sign in to comment.