Skip to content
This repository has been archived by the owner on Aug 31, 2021. It is now read-only.

Commit

Permalink
adding num_cores parameter to all the estimators
Browse files Browse the repository at this point in the history
  • Loading branch information
ilblackdragon committed Feb 14, 2016
1 parent 79eed03 commit 61ac580
Show file tree
Hide file tree
Showing 3 changed files with 45 additions and 14 deletions.
25 changes: 20 additions & 5 deletions skflow/estimators/dnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ def exp_decay(global_step):
Setting this value, allows consistency between reruns.
continue_training: when continue_training is True, once initialized
model will be continuely trained on every call of fit.
num_cores: Number of cores to be used. (default: 4)
early_stopping_rounds: Activates early stopping if this is not None.
Loss needs to decrease at least every every <early_stopping_rounds>
round(s) to continue training. (default: None)
Expand All @@ -61,7 +62,7 @@ def exp_decay(global_step):
def __init__(self, hidden_units, n_classes, tf_master="", batch_size=32,
steps=200, optimizer="SGD", learning_rate=0.1,
class_weight=None,
tf_random_seed=42, continue_training=False,
tf_random_seed=42, continue_training=False, num_cores=4,
verbose=1, early_stopping_rounds=None,
max_to_keep=5, keep_checkpoint_every_n_hours=10000):
self.hidden_units = hidden_units
Expand All @@ -71,7 +72,7 @@ def __init__(self, hidden_units, n_classes, tf_master="", batch_size=32,
batch_size=batch_size, steps=steps, optimizer=optimizer,
learning_rate=learning_rate, class_weight=class_weight,
tf_random_seed=tf_random_seed,
continue_training=continue_training, verbose=verbose,
continue_training=continue_training, num_cores=4, verbose=verbose,
early_stopping_rounds=early_stopping_rounds,
max_to_keep=max_to_keep,
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours)
Expand Down Expand Up @@ -121,14 +122,28 @@ def exp_decay(global_step):
Setting this value, allows consistency between reruns.
continue_training: when continue_training is True, once initialized
model will be continuely trained on every call of fit.
num_cores: Number of cores to be used. (default: 4)
early_stopping_rounds: Activates early stopping if this is not None.
Loss needs to decrease at least every every <early_stopping_rounds>
round(s) to continue training. (default: None)
"""
verbose: Controls the verbosity, possible values:
0: the algorithm and debug information is muted.
1: trainer prints the progress.
2: log device placement is printed.
early_stopping_rounds: Activates early stopping if this is not None.
Loss needs to decrease at least every every <early_stopping_rounds>
round(s) to continue training. (default: None)
max_to_keep: The maximum number of recent checkpoint files to keep.
As new files are created, older files are deleted.
If None or 0, all checkpoint files are kept.
Defaults to 5 (that is, the 5 most recent checkpoint files are kept.)
keep_checkpoint_every_n_hours: Number of hours between each checkpoint
to be saved. The default value of 10,000 hours effectively disables the feature.
"""

def __init__(self, hidden_units, n_classes=0, tf_master="", batch_size=32,
steps=200, optimizer="SGD", learning_rate=0.1,
tf_random_seed=42, continue_training=False,
tf_random_seed=42, continue_training=False, num_cores=4,
verbose=1, early_stopping_rounds=None,
max_to_keep=5, keep_checkpoint_every_n_hours=10000):
self.hidden_units = hidden_units
Expand All @@ -137,7 +152,7 @@ def __init__(self, hidden_units, n_classes=0, tf_master="", batch_size=32,
n_classes=n_classes, tf_master=tf_master,
batch_size=batch_size, steps=steps, optimizer=optimizer,
learning_rate=learning_rate, tf_random_seed=tf_random_seed,
continue_training=continue_training, verbose=verbose,
continue_training=continue_training, num_cores=num_cores, verbose=verbose,
early_stopping_rounds=early_stopping_rounds,
max_to_keep=max_to_keep,
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours)
Expand Down
8 changes: 4 additions & 4 deletions skflow/estimators/linear.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,14 +26,14 @@ class TensorFlowLinearRegressor(TensorFlowEstimator, RegressorMixin):

def __init__(self, n_classes=0, tf_master="", batch_size=32, steps=200, optimizer="SGD",
learning_rate=0.1, tf_random_seed=42, continue_training=False,
verbose=1, early_stopping_rounds=None,
num_cores=4, verbose=1, early_stopping_rounds=None,
max_to_keep=5, keep_checkpoint_every_n_hours=10000):
super(TensorFlowLinearRegressor, self).__init__(
model_fn=models.linear_regression, n_classes=n_classes,
tf_master=tf_master,
batch_size=batch_size, steps=steps, optimizer=optimizer,
learning_rate=learning_rate, tf_random_seed=tf_random_seed,
continue_training=continue_training,
continue_training=continue_training, num_cores=num_cores,
verbose=verbose, early_stopping_rounds=early_stopping_rounds,
max_to_keep=max_to_keep,
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours)
Expand All @@ -54,7 +54,7 @@ class TensorFlowLinearClassifier(TensorFlowEstimator, ClassifierMixin):

def __init__(self, n_classes, tf_master="", batch_size=32, steps=200, optimizer="SGD",
learning_rate=0.1, class_weight=None,
tf_random_seed=42, continue_training=False,
tf_random_seed=42, continue_training=False, num_cores=4,
verbose=1, early_stopping_rounds=None,
max_to_keep=5, keep_checkpoint_every_n_hours=10000):
super(TensorFlowLinearClassifier, self).__init__(
Expand All @@ -63,7 +63,7 @@ def __init__(self, n_classes, tf_master="", batch_size=32, steps=200, optimizer=
batch_size=batch_size, steps=steps, optimizer=optimizer,
learning_rate=learning_rate, class_weight=class_weight,
tf_random_seed=tf_random_seed,
continue_training=continue_training,
continue_training=continue_training, num_cores=num_cores,
verbose=verbose, early_stopping_rounds=early_stopping_rounds,
max_to_keep=max_to_keep,
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours)
Expand Down
26 changes: 21 additions & 5 deletions skflow/estimators/rnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@ def exp_decay(global_step):
Setting this value, allows consistency between reruns.
continue_training: when continue_training is True, once initialized
model will be continuely trained on every call of fit.
num_cores: Number of cores to be used. (default: 4)
early_stopping_rounds: Activates early stopping if this is not None.
Loss needs to decrease at least every every <early_stopping_rounds>
round(s) to continue training. (default: None)
Expand All @@ -79,7 +80,7 @@ def __init__(self, rnn_size, n_classes, cell_type='gru', num_layers=1,
steps=50, optimizer="SGD", learning_rate=0.1,
class_weight=None,
tf_random_seed=42, continue_training=False,
verbose=1, early_stopping_rounds=None,
num_cores=4, verbose=1, early_stopping_rounds=None,
max_to_keep=5, keep_checkpoint_every_n_hours=10000):
self.rnn_size = rnn_size
self.cell_type = cell_type
Expand All @@ -94,7 +95,8 @@ def __init__(self, rnn_size, n_classes, cell_type='gru', num_layers=1,
batch_size=batch_size, steps=steps, optimizer=optimizer,
learning_rate=learning_rate, class_weight=class_weight,
tf_random_seed=tf_random_seed,
continue_training=continue_training, verbose=verbose,
continue_training=continue_training, num_cores=num_cores,
verbose=verbose,
early_stopping_rounds=early_stopping_rounds,
max_to_keep=max_to_keep,
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours)
Expand Down Expand Up @@ -150,18 +152,32 @@ def exp_decay(global_step):
Setting this value, allows consistency between reruns.
continue_training: when continue_training is True, once initialized
model will be continuely trained on every call of fit.
num_cores: Number of cores to be used. (default: 4)
early_stopping_rounds: Activates early stopping if this is not None.
Loss needs to decrease at least every every <early_stopping_rounds>
round(s) to continue training. (default: None)
"""
verbose: Controls the verbosity, possible values:
0: the algorithm and debug information is muted.
1: trainer prints the progress.
2: log device placement is printed.
early_stopping_rounds: Activates early stopping if this is not None.
Loss needs to decrease at least every every <early_stopping_rounds>
round(s) to continue training. (default: None)
max_to_keep: The maximum number of recent checkpoint files to keep.
As new files are created, older files are deleted.
If None or 0, all checkpoint files are kept.
Defaults to 5 (that is, the 5 most recent checkpoint files are kept.)
keep_checkpoint_every_n_hours: Number of hours between each checkpoint
to be saved. The default value of 10,000 hours effectively disables the feature.
"""

def __init__(self, rnn_size, cell_type='gru', num_layers=1,
input_op_fn=null_input_op_fn, initial_state=None,
bidirectional=False, sequence_length=None,
n_classes=0, tf_master="", batch_size=32,
steps=50, optimizer="SGD", learning_rate=0.1,
tf_random_seed=42, continue_training=False,
verbose=1, early_stopping_rounds=None,
num_cores=4, verbose=1, early_stopping_rounds=None,
max_to_keep=5, keep_checkpoint_every_n_hours=10000):
self.rnn_size = rnn_size
self.cell_type = cell_type
Expand All @@ -175,7 +191,7 @@ def __init__(self, rnn_size, cell_type='gru', num_layers=1,
n_classes=n_classes, tf_master=tf_master,
batch_size=batch_size, steps=steps, optimizer=optimizer,
learning_rate=learning_rate, tf_random_seed=tf_random_seed,
continue_training=continue_training, verbose=verbose,
continue_training=continue_training, num_cores=num_cores, verbose=verbose,
early_stopping_rounds=early_stopping_rounds,
max_to_keep=max_to_keep,
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours)
Expand Down

0 comments on commit 61ac580

Please sign in to comment.