Skip to content
This repository has been archived by the owner on Mar 11, 2021. It is now read-only.

Commit

Permalink
address review comments
Browse files Browse the repository at this point in the history
  • Loading branch information
tommadams committed Nov 20, 2019
1 parent c42da55 commit df51963
Show file tree
Hide file tree
Showing 5 changed files with 20 additions and 8 deletions.
6 changes: 3 additions & 3 deletions dual_net.py
Original file line number Diff line number Diff line change
Expand Up @@ -685,7 +685,7 @@ def freeze_graph(model_path, use_trt=False, trt_max_batch_size=8,
max_workspace_size_bytes=1 << 29,
precision_mode=trt_precision)

metadata = get_model_metadata({
metadata = make_model_metadata({
'engine': 'tf',
'use_trt': bool(use_trt),
})
Expand Down Expand Up @@ -733,14 +733,14 @@ def freeze_graph_tpu(model_path):
out_graph = tf.graph_util.convert_variables_to_constants(
sess, sess.graph.as_graph_def(), output_names)

metadata = get_model_metadata({
metadata = make_model_metadata({
'engine': 'tpu',
})

atomic_write_model(out_graph, metadata, model_path)


def get_model_metadata(metadata):
def make_model_metadata(metadata):
for f in ['conv_width', 'fc_width', 'trunk_layers', 'use_SE', 'use_SE_bias',
'use_swish', 'bool_features', 'input_features']:
metadata[f] = getattr(FLAGS, f)
Expand Down
4 changes: 4 additions & 0 deletions ml_perf/scripts/bootstrap.sh
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,11 @@ for var_name in flag_dir golden_chunk_dir holdout_dir log_dir model_dir \
selfplay_dir sgf_dir work_dir; do
dir="${!var_name}"
if [[ "${dir}" == gs://* ]]; then
# `gsutil rm -f` "helpfully" returns a non-zero error code if the requested file
# target files don't exist.
set +e
gsutil -m rm -rf "${dir}"/*
set -e
else
mkdir -p "${dir}"
rm -rf "${dir}"/*
Expand Down
4 changes: 2 additions & 2 deletions ml_perf/scripts/start_selfplay.sh
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,8 @@ for device in {0..7}; do
CUDA_VISIBLE_DEVICES="${device}" \
./bazel-bin/cc/concurrent_selfplay \
--flagfile="${flag_dir}/selfplay.flags" \
--output_dir="${data_dir}/selfplay/\$MODEL/${device}" \
--holdout_dir="${data_dir}/holdout/\$MODEL/${device}" \
--output_dir="${selfplay_dir}/\$MODEL/${device}" \
--holdout_dir="${holdout_dir}/\$MODEL/${device}" \
--model="${model_dir}/%d.pb" \
--run_forever=1 \
--abort_file=${abort_file} \
Expand Down
2 changes: 1 addition & 1 deletion ml_perf/train_loop.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@
'the training chunks as more threads are used to '
'compress the data. Using too many threads however could '
'slow down training time if each shard gets much smaller '
'than around 100MB'.
'than around 100MB.')

flags.DEFINE_string('golden_chunk_dir', None, 'Training example directory.')
flags.DEFINE_string('holdout_dir', None, 'Holdout example directory.')
Expand Down
12 changes: 10 additions & 2 deletions train.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,9 @@
'once over training data.')

flags.DEFINE_integer('num_examples', None,
'Total number of examples passed. Used to calculate '
'steps_to_train if it isn\'t set.')
'Total number of input examples. This is only used if '
'steps_to_train is not set. Requires that filter_amount '
'is 1.0.')

flags.DEFINE_integer('window_size', 500000,
'Number of games to include in the window')
Expand All @@ -68,6 +69,13 @@
lambda flags: flags['use_tpu'] if flags['use_bt'] else True,
'`use_bt` flag only valid with `use_tpu` as well')

@flags.multi_flags_validator(
['num_examples', 'steps_to_train', 'filter_amount'],
'`num_examples` requires `steps_to_train==0` and `filter_amount==1.0`')
def _example_flags_validator(flags_dict):
if not flags_dict['num_examples']:
return True
return not flags_dict['steps_to_train'] and flags_dict['filter_amount'] == 1.0

@flags.multi_flags_validator(
['use_bt', 'cbt_project', 'cbt_instance', 'cbt_table'],
Expand Down

0 comments on commit df51963

Please sign in to comment.