Skip to content

Commit

Permalink
bump to version 0.1.1 with automatic publish on pypi and fix some tests
Browse files Browse the repository at this point in the history
  • Loading branch information
BDonnot committed Feb 21, 2024
1 parent 65e634c commit 0280ed8
Show file tree
Hide file tree
Showing 6 changed files with 227 additions and 5 deletions.
51 changes: 51 additions & 0 deletions .github/workflows/python-publish.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
# This workflow will upload a Python Package using Twine when a release is created
# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python#publishing-to-package-registries

# This workflow uses actions that are not certified by GitHub.
# They are provided by a third-party and are governed by
# separate terms of service, privacy policy, and support
# documentation.

name: Upload Python Package

on:
release:
types: [published]

permissions:
contents: read

jobs:
deploy:

runs-on: ubuntu-latest

steps:
- uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v3
with:
python-version: '3.10'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install build
- name: Build package
run: python -m build
- name: Install built package
run: |
pip install tensorflow
find dist/ -name leap_net\*.whl -type f -exec pip install {}[test] \;
# If you install TensorFlow, critically, you should reinstall Keras 3 afterwards.
# This is a temporary step while TensorFlow is pinned to Keras 2, and will no
# longer be necessary after TensorFlow 2.16. The cause is that tensorflow==2.15
# will overwrite your Keras installation with keras==2.15.
- name: test package
run: |
cd leap_net/test
python -m unittest discover
- name: Publish package
uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29
with:
user: __token__
password: ${{ secrets.PYPI_TOKEN }}
7 changes: 7 additions & 0 deletions CHANGELOG.rst
Original file line number Diff line number Diff line change
@@ -1,6 +1,13 @@
Change Log
===========

[0.1.1] - 2024-02-21
--------------------
- [FIXED] Broken tests
- [ADDED] test of both keras v3 and tf_keras implementation (when appropriate)
(*eg* not for python 3.8 where keras v3 is not available)
- [ADDED] automatic upload on pypi on new version

[0.1.0] - 2024-01-15
----------------------
- [BREAKING] refactoring of the code to use keras >= 3.0 (compatible with
Expand Down
2 changes: 1 addition & 1 deletion leap_net/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
# SPDX-License-Identifier: MPL-2.0
# This file is part of leap_net, leap_net a keras implementation of the LEAP Net model.

__version__ = "0.1.0"
__version__ = "0.1.1"
__all__ = []

try:
Expand Down
149 changes: 149 additions & 0 deletions leap_net/test/test_LeapNet_keras.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,149 @@
# Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of leap_net, leap_net a keras implementation of the LEAP Net model.

import logging
import os
import numpy as np
import unittest
logging.disable(logging.WARNING)
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"

try:
import keras
from keras.layers import Input
from keras.models import Model
from leap_net.keras import Ltau
except ImportError:
pass

import pdb

import sys


class Test(unittest.TestCase):
def setUp(self):
if sys.version_info.major == 3 and sys.version_info.minor == 8:
self.skipTest("Keras v3 not available on python 3.8")
self.tol = 1e-5 # use to compare results that should be strictly equal, up to numerical error
self.tol_learn = 1e-2 # use to compare results from a test set

# to have "reproducible" results
np.random.seed(1)
keras.utils.set_random_seed(1)

def test_ok_tau0(self):
dim_x = 10
n_elem = 5
dim_tau = 1

x = Input(shape=(dim_x,), name="x")
tau = Input(shape=(dim_tau,), name="tau")

res_model = Ltau()((x, tau))
model = Model(inputs=[x, tau], outputs=[res_model])

X_train = np.random.normal(size=(n_elem, dim_x)).astype(np.float32)
TAU_train = np.zeros(shape=(n_elem, dim_tau), dtype=np.float32)
res = model.predict([X_train, TAU_train])
assert np.all(res == X_train)

def test_ok_tau1(self):
dim_x = 10
n_elem = 100
dim_tau = 1
X_train = np.random.normal(size=(n_elem, dim_x)).astype(np.float32)
TAU_train = np.ones(shape=(n_elem, dim_tau), dtype=np.float32)

# the keras model
x = Input(shape=(dim_x,), name="x")
tau = Input(shape=(dim_tau,), name="tau")
res_model = Ltau(initializer='ones', use_bias=False)((x, tau))
model = Model(inputs=[x, tau], outputs=[res_model])

# make predictions
res = model.predict([X_train, TAU_train])

# LEAP Net implementation in numpy in case tau is not 0
res_th = np.matmul(X_train, np.ones((dim_x, dim_tau), dtype=np.float32))
res_th = np.multiply(res_th, TAU_train)
res_th = np.matmul(res_th, np.ones((dim_tau, dim_x), dtype=np.float32))
res_th += X_train
assert np.mean(np.abs(res - res_th)) <= self.tol, "problem with l1"
assert np.max(np.abs(res - res_th)) <= self.tol, "problem with linf"

def test_ok_tau_rand(self):
dim_x = 10
n_elem = 100
dim_tau = 20

X_train = np.random.normal(size=(n_elem, dim_x)).astype(np.float32)
TAU_train = np.random.normal(size=(n_elem, dim_tau)).astype(np.float32)

# the keras model
x = Input(shape=(dim_x,), name="x")
tau = Input(shape=(dim_tau,), name="tau")
res_model = Ltau(initializer='ones', use_bias=False)((x, tau))
model = Model(inputs=[x, tau], outputs=[res_model])

# make predictions
res = model.predict([X_train, TAU_train])

# LEAP Net implementation in numpy in case tau is not 0
res_th = np.matmul(X_train, np.ones((dim_x, dim_tau), dtype=np.float32))
res_th = np.multiply(res_th, TAU_train)
res_th = np.matmul(res_th, np.ones((dim_tau, dim_x), dtype=np.float32))
res_th += X_train
assert np.mean(np.abs(res - res_th)) <= self.tol, "problem with l1"
assert np.max(np.abs(res - res_th)) <= self.tol, "problem with linf"

def test_can_learn(self):
dim_x = 30
n_elem = 32*32
dim_tau = 5

X_train = np.random.normal(size=(n_elem, dim_x)).astype(np.float32)
TAU_train = np.random.normal(size=(n_elem, dim_tau)).astype(np.float32)

e = np.random.normal(size=(dim_x, dim_tau)).astype(np.float32)
d = np.random.normal(size=(dim_tau, dim_x)).astype(np.float32)

Y_train = np.matmul(X_train, e)
Y_train = np.multiply(Y_train, TAU_train)
Y_train = np.matmul(Y_train, d)
Y_train += X_train

# the keras model
x = Input(shape=(dim_x,), name="x")
tau = Input(shape=(dim_tau,), name="tau")
res_model = Ltau()((x, tau))
model = Model(inputs=[x, tau], outputs=[res_model])

adam_ = keras.optimizers.Adam(learning_rate=1e-3)
model.compile(optimizer=adam_, loss='mse')
## train it
model.fit(x=[X_train, TAU_train], y=[Y_train], epochs=200, batch_size=32, verbose=False)

# test it has learn something relevant
X_test = np.random.normal(size=(n_elem, dim_x)).astype(np.float32)
TAU_test = np.random.normal(size=(n_elem, dim_tau)).astype(np.float32)
Y_test = np.matmul(X_test, e)
Y_test = np.multiply(Y_test, TAU_test)
Y_test = np.matmul(Y_test, d)
Y_test += X_test
res = model.predict([X_test, TAU_test])
assert np.mean(np.abs(res - Y_test)) <= self.tol_learn, "problem with l1"
assert np.max(np.abs(res - Y_test)) <= self.tol_learn, "problem with linf"

# TODO test saving / loading
# TODO test name and graph visualizing
# TODO test resnet too


if __name__ == "__main__":
unittest.main()
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model

from leap_net import Ltau
from leap_net.tf_keras import Ltau
import pdb


Expand Down Expand Up @@ -117,7 +117,7 @@ def test_can_learn(self):
res_model = Ltau()((x, tau))
model = Model(inputs=[x, tau], outputs=[res_model])

adam_ = tf.optimizers.Adam(lr=1e-3)
adam_ = tf.optimizers.Adam(learning_rate=1e-3)
model.compile(optimizer=adam_, loss='mse')
## train it
model.fit(x=[X_train, TAU_train], y=[Y_train], epochs=200, batch_size=32, verbose=False)
Expand Down
19 changes: 17 additions & 2 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
# SPDX-License-Identifier: MPL-2.0
# This file is part of leap_net, leap_net a keras implementation of the LEAP Net model.

import sys
import setuptools
from setuptools import setup

Expand All @@ -21,17 +22,31 @@
"grid2op",
"pandas",
"tqdm",
'sklearn',
'scikit-learn',
"tensorflow"
]
}
}

if sys.version_info.major == 3 and sys.version_info.minor == 8:
# no keras v3 in python 3.8
pkgs["required"] = [el for el in pkgs["required"] if not "keras" in el]
pkgs["required"].append("tensorflow")


pkgs["extras"]["test"] = [el for el in pkgs["extras"]["recommended"] if not "tensorflow" in el]
# from here https://keras.io/getting_started/
# If you install TensorFlow, critically, you should reinstall Keras 3 afterwards.
# This is a temporary step while TensorFlow is pinned to Keras 2,
# and will no longer be necessary after TensorFlow 2.16. The cause is that tensorflow==2.15
# will overwrite your Keras installation with keras==2.15.
# This is why in the tests I just skip tensorflow for now. Will be fixed later

with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()

setup(name='leap_net',
version='0.1.0',
version='0.1.1',
description='An implementation in keras 3.0 (and tensorflow keras) of the LeapNet model',
long_description=long_description,
long_description_content_type="text/markdown",
Expand Down

0 comments on commit 0280ed8

Please sign in to comment.