Skip to content
This repository has been archived by the owner on Nov 23, 2023. It is now read-only.

Commit

Permalink
add multi-gpu support on Baseline, Affinity and AAF
Browse files Browse the repository at this point in the history
  • Loading branch information
buttomnutstoast committed Aug 24, 2018
1 parent 4e10b49 commit e3d5855
Show file tree
Hide file tree
Showing 10 changed files with 2,262 additions and 0 deletions.
144 changes: 144 additions & 0 deletions bashscripts/voc12/train_pspnet_aaf_mgpu.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,144 @@
#!/bin/bash
# This script is used for training, inference and benchmarking
# the Adaptive Affinity Fields method with PSPNet on PASCAL VOC
# 2012 with multi-gpus. Users could also modify from this script
# for their use case.
#
# Usage:
# # From Adaptive_Affinity_Fields/ directory.
# bash bashscripts/voc12/train_pspnet_aaf_mgpu.sh
#
#

# Set up parameters for training.
BATCH_SIZE=16
TRAIN_INPUT_SIZE=480,480
WEIGHT_DECAY=5e-4
ITER_SIZE=1
NUM_STEPS=30000
NUM_CLASSES=21
KLD_MARGIN=3.0
KLD_LAMBDA_1=1.0
KLD_LAMBDA_2=1.0
NUM_GPU=4

# Set up parameters for inference.
INFERENCE_INPUT_SIZE=480,480
INFERENCE_STRIDES=320,320
INFERENCE_SPLIT=val

# Set up path for saving models.
SNAPSHOT_DIR=snapshots/voc12/pspnet_aaf/p480_bs16_lr1e-3_kld3e0_it30k

# Set up the procedure pipeline.
IS_TRAIN_1=1
IS_INFERENCE_1=1
IS_BENCHMARK_1=1
IS_TRAIN_2=1
IS_INFERENCE_2=1
IS_BENCHMARK_2=1

# Update PYTHONPATH.
export PYTHONPATH=`pwd`:$PYTHONPATH

# Set up the data directory.
DATAROOT=/path/to/data

# Train for the 1st stage.
if [ ${IS_TRAIN_1} -eq 1 ]; then
python3 pyscripts/train/train_aaf_mgpu.py\
--snapshot-dir ${SNAPSHOT_DIR}/stage1\
--restore-from snapshots/imagenet/trained/resnet_v1_101.ckpt\
--data-list dataset/voc12/train+.txt\
--data-dir ${DATAROOT}/VOCdevkit/\
--batch-size ${BATCH_SIZE}\
--save-pred-every ${NUM_STEPS}\
--update-tb-every 50\
--input-size ${TRAIN_INPUT_SIZE}\
--learning-rate 1e-3\
--weight-decay ${WEIGHT_DECAY}\
--iter-size ${ITER_SIZE}\
--num-classes ${NUM_CLASSES}\
--num-steps $(($NUM_STEPS+1))\
--num-gpu ${NUM_GPU}\
--random-mirror\
--random-scale\
--random-crop\
--kld-margin ${KLD_MARGIN}\
--kld-lambda-1 ${KLD_LAMBDA_1}\
--kld-lambda-2 ${KLD_LAMBDA_2}\
--not-restore-classifier\
--is-training
fi

# Inference for the 1st stage.
if [ ${IS_INFERENCE_1} -eq 1 ]; then
python3 pyscripts/inference/inference.py\
--data-dir ${DATAROOT}/VOCdevkit/\
--data-list dataset/voc12/${INFERENCE_SPLIT}.txt\
--input-size ${INFERENCE_INPUT_SIZE}\
--strides ${INFERENCE_STRIDES}\
--restore-from ${SNAPSHOT_DIR}/stage1/model.ckpt-${NUM_STEPS}\
--colormap misc/colormapvoc.mat\
--num-classes ${NUM_CLASSES}\
--ignore-label 255\
--save-dir ${SNAPSHOT_DIR}/stage1/results/${INFERENCE_SPLIT}
fi

# Benchmark for the 1st stage.
if [ ${IS_BENCHMARK_1} -eq 1 ]; then
python3 pyscripts/benchmark/benchmark_by_mIoU.py\
--pred-dir ${SNAPSHOT_DIR}/stage1/results/${INFERENCE_SPLIT}/gray/\
--gt-dir ${DATAROOT}/VOCdevkit/VOC2012/segcls/\
--num-classes ${NUM_CLASSES}
fi

# Train for the 2nd stage.
if [ ${IS_TRAIN_2} -eq 1 ]; then
python3 pyscripts/train/train_aaf_mgpu.py\
--snapshot-dir ${SNAPSHOT_DIR}/stage2\
--restore-from ${SNAPSHOT_DIR}/stage1/model.ckpt-30000\
--data-list dataset/voc12/train.txt\
--data-dir ${DATAROOT}/VOCdevkit/\
--batch-size ${BATCH_SIZE}\
--save-pred-every ${NUM_STEPS}\
--update-tb-every 50\
--input-size ${TRAIN_INPUT_SIZE}\
--learning-rate 1e-4\
--weight-decay ${WEIGHT_DECAY}\
--iter-size ${ITER_SIZE}\
--num-classes ${NUM_CLASSES}\
--num-steps $(($NUM_STEPS+1))\
--num-gpu ${NUM_GPU}\
--random-mirror\
--random-scale\
--random-crop\
--kld-margin ${KLD_MARGIN}\
--kld-lambda-1 ${KLD_LAMBDA_1}\
--kld-lambda-2 ${KLD_LAMBDA_2}\
--is-training
fi

# Inference for the 2nd stage.
if [ ${IS_INFERENCE_2} -eq 1 ]; then
python3 pyscripts/inference/inference_msc.py\
--data-dir ${DATAROOT}/VOCdevkit/\
--data-list dataset/voc12/${INFERENCE_SPLIT}.txt\
--input-size ${INFERENCE_INPUT_SIZE}\
--strides ${INFERENCE_STRIDES}\
--restore-from ${SNAPSHOT_DIR}/stage2/model.ckpt-${NUM_STEPS}\
--colormap misc/colormapvoc.mat\
--num-classes ${NUM_CLASSES}\
--ignore-label 255\
--flip-aug\
--scale-aug\
--save-dir ${SNAPSHOT_DIR}/stage2/results/${INFERENCE_SPLIT}
fi

# Benchmark for the 2nd stage.
if [ ${IS_BENCHMARK_2} -eq 1 ]; then
python3 pyscripts/benchmark/benchmark_by_mIoU.py\
--pred-dir ${SNAPSHOT_DIR}/stage2/results/${INFERENCE_SPLIT}/gray/\
--gt-dir ${DATAROOT}/VOCdevkit/VOC2012/segcls/\
--num-classes ${NUM_CLASSES}
fi
144 changes: 144 additions & 0 deletions bashscripts/voc12/train_pspnet_aff_mgpu.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,144 @@
#!/bin/bash
# This script is used for training, inference and benchmarking
# the Affinity Fields method with PSPNet on PASCAL VOC 2012
# with multi-gpus. Users could also modify from this script
# for their use case.
#
# Usage:
# # From Adaptive_Affinity_Fields/ directory.
# bash bashscripts/voc12/train_pspnet_aff_mgpu.sh
#
#

# Set up parameters for training.
BATCH_SIZE=16
TRAIN_INPUT_SIZE=480,480
WEIGHT_DECAY=5e-4
ITER_SIZE=1
NUM_STEPS=30000
NUM_CLASSES=21
KLD_MARGIN=3.0
KLD_LAMBDA_1=1.0
KLD_LAMBDA_2=1.0
NUM_GPU=4

# Set up parameters for inference.
INFERENCE_INPUT_SIZE=480,480
INFERENCE_STRIDES=320,320
INFERENCE_SPLIT=val

# Set up path for saving models.
SNAPSHOT_DIR=snapshots/voc12/pspnet_aff/p480_bs16_lr1e-3_kld3e0_it30k

# Set up the procedure pipeline.
IS_TRAIN_1=1
IS_INFERENCE_1=1
IS_BENCHMARK_1=1
IS_TRAIN_2=1
IS_INFERENCE_2=1
IS_BENCHMARK_2=1

# Update PYTHONPATH.
export PYTHONPATH=`pwd`:$PYTHONPATH

# Set up the data directory.
DATAROOT=/path/to/data

# Train for the 1st stage.
if [ ${IS_TRAIN_1} -eq 1 ]; then
python3 pyscripts/train/train_aff_mgpu.py\
--snapshot-dir ${SNAPSHOT_DIR}/stage1\
--restore-from snapshots/imagenet/trained/resnet_v1_101.ckpt\
--data-list dataset/voc12/train+.txt\
--data-dir ${DATAROOT}/VOCdevkit/\
--batch-size ${BATCH_SIZE}\
--save-pred-every ${NUM_STEPS}\
--update-tb-every 50\
--input-size ${TRAIN_INPUT_SIZE}\
--learning-rate 1e-3\
--weight-decay ${WEIGHT_DECAY}\
--iter-size ${ITER_SIZE}\
--num-classes ${NUM_CLASSES}\
--num-steps $(($NUM_STEPS+1))\
--num-gpu ${NUM_GPU}\
--random-mirror\
--random-scale\
--random-crop\
--kld-margin ${KLD_MARGIN}\
--kld-lambda-1 ${KLD_LAMBDA_1}\
--kld-lambda-2 ${KLD_LAMBDA_2}\
--not-restore-classifier\
--is-training
fi

# Inference for the 1st stage.
if [ ${IS_INFERENCE_1} -eq 1 ]; then
python3 pyscripts/inference/inference.py\
--data-dir ${DATAROOT}/VOCdevkit/\
--data-list dataset/voc12/${INFERENCE_SPLIT}.txt\
--input-size ${INFERENCE_INPUT_SIZE}\
--strides ${INFERENCE_STRIDES}\
--restore-from ${SNAPSHOT_DIR}/stage1/model.ckpt-${NUM_STEPS}\
--colormap misc/colormapvoc.mat\
--num-classes ${NUM_CLASSES}\
--ignore-label 255\
--save-dir ${SNAPSHOT_DIR}/stage1/results/${INFERENCE_SPLIT}
fi

# Benchmark for the 1st stage.
if [ ${IS_BENCHMARK_1} -eq 1 ]; then
python3 pyscripts/benchmark/benchmark_by_mIoU.py\
--pred-dir ${SNAPSHOT_DIR}/stage1/results/${INFERENCE_SPLIT}/gray/\
--gt-dir ${DATAROOT}/VOCdevkit/VOC2012/segcls/\
--num-classes ${NUM_CLASSES}
fi

# Train for the 2nd stage.
if [ ${IS_TRAIN_2} -eq 1 ]; then
python3 pyscripts/train/train_aff_mgpu.py\
--snapshot-dir ${SNAPSHOT_DIR}/stage2\
--restore-from ${SNAPSHOT_DIR}/stage1/model.ckpt-30000\
--data-list dataset/voc12/train.txt\
--data-dir ${DATAROOT}/VOCdevkit/\
--batch-size ${BATCH_SIZE}\
--save-pred-every ${NUM_STEPS}\
--update-tb-every 50\
--input-size ${TRAIN_INPUT_SIZE}\
--learning-rate 1e-4\
--weight-decay ${WEIGHT_DECAY}\
--iter-size ${ITER_SIZE}\
--num-classes ${NUM_CLASSES}\
--num-steps $(($NUM_STEPS+1))\
--num-gpu ${NUM_GPU}\
--random-mirror\
--random-scale\
--random-crop\
--kld-margin ${KLD_MARGIN}\
--kld-lambda-1 ${KLD_LAMBDA_1}\
--kld-lambda-2 ${KLD_LAMBDA_2}\
--is-training
fi

# Inference for the 2nd stage.
if [ ${IS_INFERENCE_2} -eq 1 ]; then
python3 pyscripts/inference/inference_msc.py\
--data-dir ${DATAROOT}/VOCdevkit/\
--data-list dataset/voc12/${INFERENCE_SPLIT}.txt\
--input-size ${INFERENCE_INPUT_SIZE}\
--strides ${INFERENCE_STRIDES}\
--restore-from ${SNAPSHOT_DIR}/stage2/model.ckpt-${NUM_STEPS}\
--colormap misc/colormapvoc.mat\
--num-classes ${NUM_CLASSES}\
--ignore-label 255\
--flip-aug\
--scale-aug\
--save-dir ${SNAPSHOT_DIR}/stage2/results/${INFERENCE_SPLIT}
fi

# Benchmark for the 2nd stage.
if [ ${IS_BENCHMARK_2} -eq 1 ]; then
python3 pyscripts/benchmark/benchmark_by_mIoU.py\
--pred-dir ${SNAPSHOT_DIR}/stage2/results/${INFERENCE_SPLIT}/gray/\
--gt-dir ${DATAROOT}/VOCdevkit/VOC2012/segcls/\
--num-classes ${NUM_CLASSES}
fi
Loading

0 comments on commit e3d5855

Please sign in to comment.