Skip to content

Commit

Permalink
Support tensorflow 2.16 (#102)
Browse files Browse the repository at this point in the history
  • Loading branch information
uri-granta authored Jun 20, 2024
1 parent 83dafb0 commit b3ad682
Show file tree
Hide file tree
Showing 31 changed files with 130 additions and 102 deletions.
9 changes: 8 additions & 1 deletion .github/workflows/quality-check.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,10 @@ jobs:
check-and-test:
runs-on: ubuntu-20.04
strategy:
fail-fast: false
matrix:
python-version: ["3.7", "3.8", "3.9", "3.10"]
tensorflow: ["~=2.5.0", "~=2.6.0", "~=2.7.0", "~=2.8.0", "~=2.9.0", "~=2.10.0", "~=2.11.0", "~=2.12.0", "~=2.13.0", "~=2.14.0", "~=2.15.0"]
tensorflow: ["~=2.5.0", "~=2.6.0", "~=2.7.0", "~=2.8.0", "~=2.9.0", "~=2.10.0", "~=2.11.0", "~=2.12.0", "~=2.13.0", "~=2.14.0", "~=2.15.0", "~=2.16.0"]
include:
- tensorflow: "~=2.5.0"
keras: "~=2.6.0"
Expand Down Expand Up @@ -57,6 +58,8 @@ jobs:
- tensorflow: "~=2.15.0"
keras: "~=2.15.0"
tensorflow-probability: "~=0.23.0"
- tensorflow: "~=2.16.0"
tensorflow-probability: "~=0.24.0"
exclude:
# These older versions of TensorFlow don't work with Python 3.10:
- python-version: "3.10"
Expand All @@ -74,11 +77,15 @@ jobs:
tensorflow: "~=2.14.0"
- python-version: "3.7"
tensorflow: "~=2.15.0"
- python-version: "3.7"
tensorflow: "~=2.16.0"
# These newer versions of TensorFlow don't work with Python 3.8:
- python-version: "3.8"
tensorflow: "~=2.14.0"
- python-version: "3.8"
tensorflow: "~=2.15.0"
- python-version: "3.8"
tensorflow: "~=2.16.0"

name: Python-${{ matrix.python-version }} tensorflow${{ matrix.tensorflow }}
env:
Expand Down
8 changes: 5 additions & 3 deletions benchmarking/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,8 @@
from scipy.stats import norm
from utils import ExperimentName, git_version

from gpflow.keras import tf_keras

from gpflux.architectures import Config, build_constant_input_dim_deep_gp

THIS_DIR = Path(__file__).parent
Expand Down Expand Up @@ -89,10 +91,10 @@ def build_model(X, num_inducing, num_layers):


@EXPERIMENT.capture
def train_model(model: tf.keras.models.Model, data_train, batch_size, num_epochs):
def train_model(model: tf_keras.models.Model, data_train, batch_size, num_epochs):
X_train, Y_train = data_train
callbacks = [
tf.keras.callbacks.ReduceLROnPlateau(
tf_keras.callbacks.ReduceLROnPlateau(
'loss', factor=0.95, patience=3, min_lr=1e-6, verbose=1
),
]
Expand Down Expand Up @@ -123,7 +125,7 @@ def main(_config):
data = get_data()
model = build_model(data.X_train)

model.compile(optimizer=tf.optimizers.Adam(0.01))
model.compile(optimizer=tf_keras.optimizers.Adam(0.01))
train_model(model, (data.X_train, data.Y_train))

metrics = evaluate_model(model, (data.X_test, data.Y_test))
Expand Down
2 changes: 1 addition & 1 deletion docs/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ As a quick teaser, here's a snippet from the `intro notebook <notebooks/intro>`
# Compile and fit
model = two_layer_dgp.as_training_model()
model.compile(tf.optimizers.Adam(0.01))
model.compile(gpflow.keras.tf_keras.optimizers.Adam(0.01))
history = model.fit({"inputs": X, "targets": Y}, epochs=int(1e3), verbose=0)
The model described above produces the fit shown in Fig 1. For comparison, in Fig. 2 we show the fit on the same dataset by a vanilla single-layer GP model.
Expand Down
4 changes: 2 additions & 2 deletions docs/notebooks/deep_cde.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@
"\n",
"single_layer_dgp = gpflux.models.DeepGP([gp_layer], likelihood_layer)\n",
"model = single_layer_dgp.as_training_model()\n",
"model.compile(tf.optimizers.Adam(0.01))\n",
"model.compile(gpflow.keras.tf_keras.optimizers.Adam(0.01))\n",
"\n",
"history = model.fit({\"inputs\": X, \"targets\": Y}, epochs=int(1e3), verbose=0)\n",
"fig, ax = plt.subplots()\n",
Expand Down Expand Up @@ -387,7 +387,7 @@
"execution_count": 17,
"source": [
"model = dgp.as_training_model()\n",
"model.compile(tf.optimizers.Adam(0.005))\n",
"model.compile(gpflow.keras.tf_keras.optimizers.Adam(0.005))\n",
"history = model.fit({\"inputs\": X, \"targets\": Y}, epochs=int(20e3), verbose=0, batch_size=num_data, shuffle=False)"
],
"outputs": [],
Expand Down
5 changes: 3 additions & 2 deletions docs/notebooks/efficient_sampling.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@
import gpflux

from gpflow.config import default_float
from gpflow.keras import tf_keras

from gpflux.layers.basis_functions.fourier_features import RandomFourierFeaturesCosine
from gpflux.sampling import KernelWithFeatureDecomposition
Expand Down Expand Up @@ -99,10 +100,10 @@
"""

# %%
model.compile(tf.optimizers.Adam(learning_rate=0.1))
model.compile(tf_keras.optimizers.Adam(learning_rate=0.1))

callbacks = [
tf.keras.callbacks.ReduceLROnPlateau(
tf_keras.callbacks.ReduceLROnPlateau(
monitor="loss",
patience=5,
factor=0.95,
Expand Down
9 changes: 5 additions & 4 deletions docs/notebooks/gpflux_features.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,7 @@ def motorcycle_data():
# %%
import gpflux

from gpflow.keras import tf_keras
from gpflux.architectures import Config, build_constant_input_dim_deep_gp
from gpflux.models import DeepGP

Expand All @@ -80,19 +81,19 @@ def motorcycle_data():

# %%
# From the `DeepGP` model we instantiate a training model which is a `tf.keras.Model`
training_model: tf.keras.Model = deep_gp.as_training_model()
training_model: tf_keras.Model = deep_gp.as_training_model()

# Following the Keras procedure we need to compile and pass a optimizer,
# before fitting the model to data
training_model.compile(optimizer=tf.optimizers.Adam(learning_rate=0.01))
training_model.compile(optimizer=tf_keras.optimizers.Adam(learning_rate=0.01))

callbacks = [
# Create callback that reduces the learning rate every time the ELBO plateaus
tf.keras.callbacks.ReduceLROnPlateau("loss", factor=0.95, patience=3, min_lr=1e-6, verbose=0),
tf_keras.callbacks.ReduceLROnPlateau("loss", factor=0.95, patience=3, min_lr=1e-6, verbose=0),
# Create a callback that writes logs (e.g., hyperparameters, KLs, etc.) to TensorBoard
gpflux.callbacks.TensorBoard(),
# Create a callback that saves the model's weights
tf.keras.callbacks.ModelCheckpoint(filepath="ckpts/", save_weights_only=True, verbose=0),
tf_keras.callbacks.ModelCheckpoint(filepath="ckpts/", save_weights_only=True, verbose=0),
]

history = training_model.fit(
Expand Down
9 changes: 5 additions & 4 deletions docs/notebooks/gpflux_with_keras_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
import gpflux

from gpflow.config import default_float
from gpflow.keras import tf_keras


# %% [markdown]
Expand Down Expand Up @@ -78,11 +79,11 @@
likelihood_container = gpflux.layers.TrackableLayer()
likelihood_container.likelihood = likelihood

model = tf.keras.Sequential(
model = tf_keras.Sequential(
[
tf.keras.layers.Dense(100, activation="relu"),
tf.keras.layers.Dense(100, activation="relu"),
tf.keras.layers.Dense(1, activation="linear"),
tf_keras.layers.Dense(100, activation="relu"),
tf_keras.layers.Dense(100, activation="relu"),
tf_keras.layers.Dense(1, activation="linear"),
gp_layer,
likelihood_container, # no-op, for discovering trainable likelihood parameters
]
Expand Down
4 changes: 2 additions & 2 deletions docs/notebooks/intro.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ def motorcycle_data():
# %%
single_layer_dgp = gpflux.models.DeepGP([gp_layer], likelihood_layer)
model = single_layer_dgp.as_training_model()
model.compile(tf.optimizers.Adam(0.01))
model.compile(gpflow.keras.tf_keras.optimizers.Adam(0.01))

# %% [markdown]
"""
Expand Down Expand Up @@ -168,7 +168,7 @@ def plot(model, X, Y, ax=None):
likelihood_layer = gpflux.layers.LikelihoodLayer(gpflow.likelihoods.Gaussian(0.1))
two_layer_dgp = gpflux.models.DeepGP([gp_layer1, gp_layer2], likelihood_layer)
model = two_layer_dgp.as_training_model()
model.compile(tf.optimizers.Adam(0.01))
model.compile(gpflow.keras.tf_keras.optimizers.Adam(0.01))

# %%
history = model.fit({"inputs": X, "targets": Y}, epochs=int(1e3), verbose=0)
Expand Down
11 changes: 6 additions & 5 deletions docs/notebooks/keras_integration.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
import gpflow
import gpflux
from gpflow.ci_utils import reduce_in_tests
from gpflow.keras import tf_keras

import matplotlib.pyplot as plt

Expand Down Expand Up @@ -83,10 +84,10 @@ def create_model(model_class):
num_epochs = reduce_in_tests(200)

# %%
dgp = create_model(tf.keras.Model)
dgp = create_model(tf_keras.Model)

callbacks = [
tf.keras.callbacks.ReduceLROnPlateau(
tf_keras.callbacks.ReduceLROnPlateau(
monitor="loss",
patience=5,
factor=0.95,
Expand All @@ -96,7 +97,7 @@ def create_model(model_class):
]

dgp_train = dgp.as_training_model()
dgp_train.compile(tf.optimizers.Adam(learning_rate=0.1))
dgp_train.compile(tf_keras.optimizers.Adam(learning_rate=0.1))

history = dgp_train.fit(
{"inputs": X, "targets": Y}, batch_size=batch_size, epochs=num_epochs, callbacks=callbacks
Expand All @@ -106,7 +107,7 @@ def create_model(model_class):
dgp_natgrad = create_model(gpflux.optimization.NatGradModel)

callbacks = [
tf.keras.callbacks.ReduceLROnPlateau(
tf_keras.callbacks.ReduceLROnPlateau(
monitor="loss",
patience=5,
factor=0.95,
Expand All @@ -124,7 +125,7 @@ def create_model(model_class):
[
gpflow.optimizers.NaturalGradient(gamma=0.05),
gpflow.optimizers.NaturalGradient(gamma=0.05),
tf.optimizers.Adam(learning_rate=0.1),
tf_keras.optimizers.Adam(learning_rate=0.1),
]
)

Expand Down
7 changes: 3 additions & 4 deletions gpflux/callbacks.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,15 +21,14 @@
import re
from typing import Any, Dict, List, Mapping, Optional, Union

import tensorflow as tf

import gpflow
from gpflow.keras import tf_keras
from gpflow.utilities import parameter_dict

__all__ = ["TensorBoard"]


class TensorBoard(tf.keras.callbacks.TensorBoard):
class TensorBoard(tf_keras.callbacks.TensorBoard):
"""
This class is a thin wrapper around a `tf.keras.callbacks.TensorBoard` callback that also
calls GPflow's `gpflow.monitor.ModelToTensorBoard` monitoring task.
Expand Down Expand Up @@ -100,7 +99,7 @@ def __init__(
self.keywords_to_monitor = keywords_to_monitor
self.max_size = max_size

def set_model(self, model: tf.keras.Model) -> None:
def set_model(self, model: tf_keras.Model) -> None:
"""
Set the model (extends the Keras `set_model
<https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/TensorBoard#set_model>`_
Expand Down
2 changes: 1 addition & 1 deletion gpflux/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
import inspect
import warnings
from dataclasses import fields
from typing import List, Optional, Type, TypeVar, Union, Any
from typing import Any, List, Optional, Type, TypeVar, Union

import numpy as np

Expand Down
3 changes: 2 additions & 1 deletion gpflux/layers/basis_functions/fourier_features/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,11 +22,12 @@

import gpflow
from gpflow.base import TensorType
from gpflow.keras import tf_keras

from gpflux.types import ShapeType


class FourierFeaturesBase(ABC, tf.keras.layers.Layer):
class FourierFeaturesBase(ABC, tf_keras.layers.Layer):
r"""
The base class for all Fourier feature layers, used for both random Fourier feature layers and
quadrature layers. We subclass :class:`tf.keras.layers.Layer`, so we must provide
Expand Down
11 changes: 6 additions & 5 deletions gpflux/layers/latent_variable_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@

from gpflow import default_float
from gpflow.base import TensorType
from gpflow.keras import tf_keras

from gpflux.layers.trackable_layer import TrackableLayer
from gpflux.types import ObservationType
Expand Down Expand Up @@ -67,14 +68,14 @@ class LatentVariableLayer(LayerWithObservations):
prior: tfp.distributions.Distribution
""" The prior distribution for the latent variables. """

encoder: tf.keras.layers.Layer
encoder: tf_keras.layers.Layer
"""
An encoder that maps from a concatenation of inputs and targets to the
parameters of the approximate posterior distribution of the corresponding
latent variables.
"""

compositor: tf.keras.layers.Layer
compositor: tf_keras.layers.Layer
"""
A layer that takes as input the two-element ``[layer_inputs, latent_variable_samples]`` list
and combines the elements into a single output tensor.
Expand All @@ -83,8 +84,8 @@ class LatentVariableLayer(LayerWithObservations):
def __init__(
self,
prior: tfp.distributions.Distribution,
encoder: tf.keras.layers.Layer,
compositor: Optional[tf.keras.layers.Layer] = None,
encoder: tf_keras.layers.Layer,
compositor: Optional[tf_keras.layers.Layer] = None,
name: Optional[str] = None,
):
"""
Expand All @@ -108,7 +109,7 @@ def __init__(
self.compositor = (
compositor
if compositor is not None
else tf.keras.layers.Concatenate(axis=-1, dtype=default_float())
else tf_keras.layers.Concatenate(axis=-1, dtype=default_float())
)

def call(
Expand Down
5 changes: 3 additions & 2 deletions gpflux/layers/trackable_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,10 @@
#
"""Utility layer that tracks variables in :class:`tf.Module`."""

import tensorflow as tf
from deprecated import deprecated

from gpflow.keras import tf_keras


@deprecated(
reason=(
Expand All @@ -27,7 +28,7 @@
"be removed in GPflux version `1.0.0`."
)
)
class TrackableLayer(tf.keras.layers.Layer):
class TrackableLayer(tf_keras.layers.Layer):
"""
With the release of TensorFlow 2.5, our TrackableLayer workaround is no
longer needed. See https://github.com/Prowler-io/gpflux/issues/189.
Expand Down
3 changes: 2 additions & 1 deletion gpflux/losses.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,12 @@

import gpflow
from gpflow.base import TensorType
from gpflow.keras import tf_keras

from gpflux.types import unwrap_dist


class LikelihoodLoss(tf.keras.losses.Loss):
class LikelihoodLoss(tf_keras.losses.Loss):
r"""
This class is a `tf.keras.losses.Loss` implementation that wraps a GPflow
:class:`~gpflow.likelihoods.Likelihood` instance.
Expand Down
Loading

0 comments on commit b3ad682

Please sign in to comment.