Skip to content

Commit

Permalink
Minor fix to tutorials.
Browse files Browse the repository at this point in the history
PiperOrigin-RevId: 463145196
  • Loading branch information
shs037 authored and tensorflower-gardener committed Jul 25, 2022
1 parent d16f020 commit 44dc404
Show file tree
Hide file tree
Showing 5 changed files with 27 additions and 23 deletions.
4 changes: 2 additions & 2 deletions tutorials/lm_dpsgd_tutorial.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,8 +73,8 @@ def rnn_model_fn(features, labels, mode): # pylint: disable=unused-argument
x = tf.reshape(x, [-1, SEQ_LEN])
input_layer = x[:, :-1]
input_one_hot = tf.one_hot(input_layer, 256)
lstm = tf.keras.layers.LSTM(256, return_sequences=True).apply(input_one_hot)
logits = tf.keras.layers.Dense(256).apply(lstm)
lstm = tf.keras.layers.LSTM(256, return_sequences=True)(input_one_hot)
logits = tf.keras.layers.Dense(256)(lstm)

# Calculate loss as a vector (to support microbatches in DP-SGD).
vector_loss = tf.nn.softmax_cross_entropy_with_logits(
Expand Down
2 changes: 1 addition & 1 deletion tutorials/mnist_dpsgd_tutorial.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ def cnn_model_fn(features, labels, mode, params): # pylint: disable=unused-argu
elif mode == tf_estimator.ModeKeys.EVAL:
eval_metric_ops = {
'accuracy':
tf.metrics.accuracy(
tf.compat.v1.metrics.accuracy(
labels=labels, predictions=tf.argmax(input=logits, axis=1))
}
return tf_estimator.EstimatorSpec(
Expand Down
16 changes: 9 additions & 7 deletions tutorials/mnist_dpsgd_tutorial_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,14 +21,16 @@ def get_cnn_model(features):
"""Given input features, returns the logits from a simple CNN model."""
input_layer = tf.reshape(features, [-1, 28, 28, 1])
y = tf.keras.layers.Conv2D(
16, 8, strides=2, padding='same', activation='relu').apply(input_layer)
y = tf.keras.layers.MaxPool2D(2, 1).apply(y)
16, 8, strides=2, padding='same', activation='relu')(
input_layer)
y = tf.keras.layers.MaxPool2D(2, 1)(y)
y = tf.keras.layers.Conv2D(
32, 4, strides=2, padding='valid', activation='relu').apply(y)
y = tf.keras.layers.MaxPool2D(2, 1).apply(y)
y = tf.keras.layers.Flatten().apply(y)
y = tf.keras.layers.Dense(32, activation='relu').apply(y)
logits = tf.keras.layers.Dense(10).apply(y)
32, 4, strides=2, padding='valid', activation='relu')(
y)
y = tf.keras.layers.MaxPool2D(2, 1)(y)
y = tf.keras.layers.Flatten()(y)
y = tf.keras.layers.Dense(32, activation='relu')(y)
logits = tf.keras.layers.Dense(10)(y)

return logits

Expand Down
18 changes: 10 additions & 8 deletions tutorials/mnist_dpsgd_tutorial_vectorized.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,14 +70,16 @@ def cnn_model_fn(features, labels, mode):
# Define CNN architecture using tf.keras.layers.
input_layer = tf.reshape(features['x'], [-1, 28, 28, 1])
y = tf.keras.layers.Conv2D(
16, 8, strides=2, padding='same', activation='relu').apply(input_layer)
y = tf.keras.layers.MaxPool2D(2, 1).apply(y)
16, 8, strides=2, padding='same', activation='relu')(
input_layer)
y = tf.keras.layers.MaxPool2D(2, 1)(y)
y = tf.keras.layers.Conv2D(
32, 4, strides=2, padding='valid', activation='relu').apply(y)
y = tf.keras.layers.MaxPool2D(2, 1).apply(y)
y = tf.keras.layers.Flatten().apply(y)
y = tf.keras.layers.Dense(32, activation='relu').apply(y)
logits = tf.keras.layers.Dense(10).apply(y)
32, 4, strides=2, padding='valid', activation='relu')(
y)
y = tf.keras.layers.MaxPool2D(2, 1)(y)
y = tf.keras.layers.Flatten()(y)
y = tf.keras.layers.Dense(32, activation='relu')(y)
logits = tf.keras.layers.Dense(10)(y)

# Calculate loss as a vector (to support microbatches in DP-SGD).
vector_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
Expand Down Expand Up @@ -115,7 +117,7 @@ def cnn_model_fn(features, labels, mode):
elif mode == tf_estimator.ModeKeys.EVAL:
eval_metric_ops = {
'accuracy':
tf.metrics.accuracy(
tf.compat.v1.metrics.accuracy(
labels=labels, predictions=tf.argmax(input=logits, axis=1))
}

Expand Down
10 changes: 5 additions & 5 deletions tutorials/mnist_lr_tutorial.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,12 +56,12 @@ def lr_model_fn(features, labels, mode, nclasses, dim):
logits = tf.keras.layers.Dense(
units=nclasses,
kernel_regularizer=tf.keras.regularizers.L2(l2=FLAGS.regularizer),
bias_regularizer=tf.keras.regularizers.L2(
l2=FLAGS.regularizer)).apply(input_layer)
bias_regularizer=tf.keras.regularizers.L2(l2=FLAGS.regularizer))(
input_layer)

# Calculate loss as a vector (to support microbatches in DP-SGD).
vector_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits) + tf.losses.get_regularization_loss()
labels, logits) + tf.compat.v1.losses.get_regularization_loss()
# Define mean of loss across minibatch (for reporting through tf.Estimator).
scalar_loss = tf.reduce_mean(vector_loss)

Expand Down Expand Up @@ -94,7 +94,7 @@ def lr_model_fn(features, labels, mode, nclasses, dim):
elif mode == tf_estimator.ModeKeys.EVAL:
eval_metric_ops = {
'accuracy':
tf.metrics.accuracy(
tf.compat.v1.metrics.accuracy(
labels=labels, predictions=tf.argmax(input=logits, axis=1))
}
return tf_estimator.EstimatorSpec(
Expand Down Expand Up @@ -165,7 +165,7 @@ def print_privacy_guarantees(epochs, batch_size, samples, noise_multiplier):
# Using RDP accountant to compute eps. Doing computation analytically is
# an option.
rdp = [order * coef for order in orders]
eps = dp_accounting.rdp.compute_epsilon(orders, rdp, delta)
eps, _ = dp_accounting.rdp.compute_epsilon(orders, rdp, delta)
print('\t{:g}% enjoy at least ({:.2f}, {})-DP'.format(p * 100, eps, delta))

accountant = dp_accounting.rdp.RdpAccountant(orders)
Expand Down

0 comments on commit 44dc404

Please sign in to comment.