diff --git a/tutorials/lm_dpsgd_tutorial.py b/tutorials/lm_dpsgd_tutorial.py index 0f840200..e3302b87 100644 --- a/tutorials/lm_dpsgd_tutorial.py +++ b/tutorials/lm_dpsgd_tutorial.py @@ -73,8 +73,8 @@ def rnn_model_fn(features, labels, mode): # pylint: disable=unused-argument x = tf.reshape(x, [-1, SEQ_LEN]) input_layer = x[:, :-1] input_one_hot = tf.one_hot(input_layer, 256) - lstm = tf.keras.layers.LSTM(256, return_sequences=True).apply(input_one_hot) - logits = tf.keras.layers.Dense(256).apply(lstm) + lstm = tf.keras.layers.LSTM(256, return_sequences=True)(input_one_hot) + logits = tf.keras.layers.Dense(256)(lstm) # Calculate loss as a vector (to support microbatches in DP-SGD). vector_loss = tf.nn.softmax_cross_entropy_with_logits( diff --git a/tutorials/mnist_dpsgd_tutorial.py b/tutorials/mnist_dpsgd_tutorial.py index 3ba706c2..fa83e429 100644 --- a/tutorials/mnist_dpsgd_tutorial.py +++ b/tutorials/mnist_dpsgd_tutorial.py @@ -85,7 +85,7 @@ def cnn_model_fn(features, labels, mode, params): # pylint: disable=unused-argu elif mode == tf_estimator.ModeKeys.EVAL: eval_metric_ops = { 'accuracy': - tf.metrics.accuracy( + tf.compat.v1.metrics.accuracy( labels=labels, predictions=tf.argmax(input=logits, axis=1)) } return tf_estimator.EstimatorSpec( diff --git a/tutorials/mnist_dpsgd_tutorial_common.py b/tutorials/mnist_dpsgd_tutorial_common.py index a4fb2c0e..dac0cb23 100644 --- a/tutorials/mnist_dpsgd_tutorial_common.py +++ b/tutorials/mnist_dpsgd_tutorial_common.py @@ -21,14 +21,16 @@ def get_cnn_model(features): """Given input features, returns the logits from a simple CNN model.""" input_layer = tf.reshape(features, [-1, 28, 28, 1]) y = tf.keras.layers.Conv2D( - 16, 8, strides=2, padding='same', activation='relu').apply(input_layer) - y = tf.keras.layers.MaxPool2D(2, 1).apply(y) + 16, 8, strides=2, padding='same', activation='relu')( + input_layer) + y = tf.keras.layers.MaxPool2D(2, 1)(y) y = tf.keras.layers.Conv2D( - 32, 4, strides=2, padding='valid', activation='relu').apply(y) - y = tf.keras.layers.MaxPool2D(2, 1).apply(y) - y = tf.keras.layers.Flatten().apply(y) - y = tf.keras.layers.Dense(32, activation='relu').apply(y) - logits = tf.keras.layers.Dense(10).apply(y) + 32, 4, strides=2, padding='valid', activation='relu')( + y) + y = tf.keras.layers.MaxPool2D(2, 1)(y) + y = tf.keras.layers.Flatten()(y) + y = tf.keras.layers.Dense(32, activation='relu')(y) + logits = tf.keras.layers.Dense(10)(y) return logits diff --git a/tutorials/mnist_dpsgd_tutorial_vectorized.py b/tutorials/mnist_dpsgd_tutorial_vectorized.py index 21f6163a..e02bff5d 100644 --- a/tutorials/mnist_dpsgd_tutorial_vectorized.py +++ b/tutorials/mnist_dpsgd_tutorial_vectorized.py @@ -70,14 +70,16 @@ def cnn_model_fn(features, labels, mode): # Define CNN architecture using tf.keras.layers. input_layer = tf.reshape(features['x'], [-1, 28, 28, 1]) y = tf.keras.layers.Conv2D( - 16, 8, strides=2, padding='same', activation='relu').apply(input_layer) - y = tf.keras.layers.MaxPool2D(2, 1).apply(y) + 16, 8, strides=2, padding='same', activation='relu')( + input_layer) + y = tf.keras.layers.MaxPool2D(2, 1)(y) y = tf.keras.layers.Conv2D( - 32, 4, strides=2, padding='valid', activation='relu').apply(y) - y = tf.keras.layers.MaxPool2D(2, 1).apply(y) - y = tf.keras.layers.Flatten().apply(y) - y = tf.keras.layers.Dense(32, activation='relu').apply(y) - logits = tf.keras.layers.Dense(10).apply(y) + 32, 4, strides=2, padding='valid', activation='relu')( + y) + y = tf.keras.layers.MaxPool2D(2, 1)(y) + y = tf.keras.layers.Flatten()(y) + y = tf.keras.layers.Dense(32, activation='relu')(y) + logits = tf.keras.layers.Dense(10)(y) # Calculate loss as a vector (to support microbatches in DP-SGD). vector_loss = tf.nn.sparse_softmax_cross_entropy_with_logits( @@ -115,7 +117,7 @@ def cnn_model_fn(features, labels, mode): elif mode == tf_estimator.ModeKeys.EVAL: eval_metric_ops = { 'accuracy': - tf.metrics.accuracy( + tf.compat.v1.metrics.accuracy( labels=labels, predictions=tf.argmax(input=logits, axis=1)) } diff --git a/tutorials/mnist_lr_tutorial.py b/tutorials/mnist_lr_tutorial.py index 89546df4..fce9ba76 100644 --- a/tutorials/mnist_lr_tutorial.py +++ b/tutorials/mnist_lr_tutorial.py @@ -56,12 +56,12 @@ def lr_model_fn(features, labels, mode, nclasses, dim): logits = tf.keras.layers.Dense( units=nclasses, kernel_regularizer=tf.keras.regularizers.L2(l2=FLAGS.regularizer), - bias_regularizer=tf.keras.regularizers.L2( - l2=FLAGS.regularizer)).apply(input_layer) + bias_regularizer=tf.keras.regularizers.L2(l2=FLAGS.regularizer))( + input_layer) # Calculate loss as a vector (to support microbatches in DP-SGD). vector_loss = tf.nn.sparse_softmax_cross_entropy_with_logits( - labels=labels, logits=logits) + tf.losses.get_regularization_loss() + labels, logits) + tf.compat.v1.losses.get_regularization_loss() # Define mean of loss across minibatch (for reporting through tf.Estimator). scalar_loss = tf.reduce_mean(vector_loss) @@ -94,7 +94,7 @@ def lr_model_fn(features, labels, mode, nclasses, dim): elif mode == tf_estimator.ModeKeys.EVAL: eval_metric_ops = { 'accuracy': - tf.metrics.accuracy( + tf.compat.v1.metrics.accuracy( labels=labels, predictions=tf.argmax(input=logits, axis=1)) } return tf_estimator.EstimatorSpec( @@ -165,7 +165,7 @@ def print_privacy_guarantees(epochs, batch_size, samples, noise_multiplier): # Using RDP accountant to compute eps. Doing computation analytically is # an option. rdp = [order * coef for order in orders] - eps = dp_accounting.rdp.compute_epsilon(orders, rdp, delta) + eps, _ = dp_accounting.rdp.compute_epsilon(orders, rdp, delta) print('\t{:g}% enjoy at least ({:.2f}, {})-DP'.format(p * 100, eps, delta)) accountant = dp_accounting.rdp.RdpAccountant(orders)