From 037e816eb02e789968192f5e2f6e8191610727b3 Mon Sep 17 00:00:00 2001 From: Patrick Henkel Date: Tue, 28 Oct 2025 23:56:35 +0100 Subject: [PATCH 01/36] Working progress --- .gitignore | 1 + .../P_hp_modular.py | 42 ++++ physXAI/__init__.py | 2 +- physXAI/models/__init__.py | 3 - physXAI/models/ann/ann_design.py | 25 +-- .../models/ann/configs/ann_model_configs.py | 2 + .../models/ann/keras_models/keras_models.py | 135 +++++++++++++ .../ann/model_construction/ann_models.py | 46 +++-- .../ann/model_construction/rbf_models.py | 45 +++-- physXAI/models/modular/modular_ann.py | 92 +++++++++ physXAI/models/modular/modular_expression.py | 184 ++++++++++++++++++ physXAI/preprocessing/constructed.py | 4 + 12 files changed, 528 insertions(+), 53 deletions(-) create mode 100644 executables/bestest_hydronic_heat_pump/P_hp_modular.py create mode 100644 physXAI/models/modular/modular_ann.py create mode 100644 physXAI/models/modular/modular_expression.py diff --git a/.gitignore b/.gitignore index 200a224..cf63ac7 100644 --- a/.gitignore +++ b/.gitignore @@ -7,3 +7,4 @@ /**.egg-info **/.coverage /docs/build +/venv diff --git a/executables/bestest_hydronic_heat_pump/P_hp_modular.py b/executables/bestest_hydronic_heat_pump/P_hp_modular.py new file mode 100644 index 0000000..7fcc694 --- /dev/null +++ b/executables/bestest_hydronic_heat_pump/P_hp_modular.py @@ -0,0 +1,42 @@ +from physXAI.models.modular.modular_ann import ModularANN, ModularModel +from physXAI.preprocessing.preprocessing import PreprocessingSingleStep +from physXAI.preprocessing.constructed import Feature +from physXAI.models.ann.ann_design import ClassicalANNModel +from physXAI.utils.logging import Logger + + +""" +Creates modular models to predict the power of the heat pump using the Boptest data. +""" + +Logger.setup_logger(folder_name='P_hp_modular', override=True) + +file_path = r"data/bestest_hydronic_heat_pump/pid_data.csv" + +inputs = ['oveHeaPumY_u', 'Func(logistic)', 'weaSta_reaWeaTDryBul_y', 'reaTZon_y'] +output = 'reaPHeaPum_y' + +oveHeaPumY_u = Feature('oveHeaPumY_u') +func_logistic = Feature('Func(logistic)') +TDryBul = Feature('weaSta_reaWeaTDryBul_y') +TZon = Feature('reaTZon_y') + +prep = PreprocessingSingleStep(inputs, output) +td = prep.pipeline(file_path) + +"""Example usages of modular models""" +y = ModularModel( + model=ClassicalANNModel(), + inputs=[oveHeaPumY_u.input() / func_logistic.input(), func_logistic.input() ** 2, TDryBul.input(), TZon.input()], + rescale_output=True +) +m = ModularANN(architecture=y) + +# Training pipeline +model = m.pipeline(td) + + +# Log setup of preprocessing and model as json +Logger.log_setup(prep, m) +# Log training data as pickle +Logger.save_training_data(td) diff --git a/physXAI/__init__.py b/physXAI/__init__.py index 8d1c8b6..2677e7c 100644 --- a/physXAI/__init__.py +++ b/physXAI/__init__.py @@ -1 +1 @@ - +from .models.ann.keras_models.keras_models import * \ No newline at end of file diff --git a/physXAI/models/__init__.py b/physXAI/models/__init__.py index ba67d35..e69de29 100644 --- a/physXAI/models/__init__.py +++ b/physXAI/models/__init__.py @@ -1,3 +0,0 @@ -from .ann.keras_models.keras_models import * -from .models import * -from .ann.ann_design import * \ No newline at end of file diff --git a/physXAI/models/ann/ann_design.py b/physXAI/models/ann/ann_design.py index c082949..ce28653 100644 --- a/physXAI/models/ann/ann_design.py +++ b/physXAI/models/ann/ann_design.py @@ -1,3 +1,4 @@ +from logging import warning import os import time from abc import ABC, abstractmethod @@ -47,6 +48,9 @@ def __init__(self, batch_size: int = 32, epochs: int = 1000, learning_rate: floa self.random_seed: int = random_seed keras.utils.set_random_seed(random_seed) + self.model_config = dict() + + @abstractmethod def generate_model(self, **kwargs): """ @@ -216,12 +220,12 @@ def __init__(self, n_layers: int = 1, n_neurons: Union[int, list[int]] = 32, self.activation_function: Union[str, list[str]] = activation_function self.rescale_output: bool = rescale_output - self.model_config = { + self.model_config.update({ "n_layers": self.n_layers, "n_neurons": self.n_neurons, "activation_function": self.activation_function, "rescale_output": self.rescale_output, - } + }) def generate_model(self, **kwargs): """ @@ -272,12 +276,12 @@ def __init__(self, n_layers: int = 1, n_neurons: Union[int, list[int]] = 32, res self.n_neurons: Union[int, list[int]] = n_neurons self.rescale_output: bool = rescale_output - self.model_config = { + self.model_config.update({ "n_layers": self.n_layers, "n_neurons": self.n_neurons, "rescale_output": self.rescale_output, "random_state": random_seed - } + }) def generate_model(self, **kwargs): """ @@ -336,12 +340,12 @@ def __init__(self, n_layers: int = 1, n_neurons: Union[int, list[int]] = 32, res self.n_neurons: Union[int, list[int]] = n_neurons self.rescale_output: bool = rescale_output - self.model_config = { + self.model_config.update({ "n_layers": self.n_layers, "n_neurons": self.n_neurons, "rescale_output": self.rescale_output, "random_state": random_seed - } + }) def generate_model(self, **kwargs): """ @@ -401,14 +405,14 @@ def __init__(self, n_layers: int = 1, n_neurons: Union[int, list[int]] = 32, self.monotonies: dict[str, int] = monotonies self.activation_split: list[float] = activation_split - self.model_config = { + self.model_config.update({ "n_layers": self.n_layers, "n_neurons": self.n_neurons, "activation_function": self.activation_function, "rescale_output": self.rescale_output, "monotonicities": self.monotonies, "activation_split": activation_split, - } + }) def generate_model(self, **kwargs): """ @@ -476,14 +480,14 @@ def __init__(self, n_layers: int = 1, n_neurons: Union[int, list[int]] = 32, self.pinn_weights: list[float] = pinn_weights - self.model_config = { + self.model_config.update({ "n_layers": self.n_layers, "n_neurons": self.n_neurons, "activation_function": self.activation_function, "rescale_output": self.rescale_output, "monotonicities": self.monotonies, "activation_split": activation_split, - } + }) # Create pinn loss based on standard losses self.pinn_loss = multi_y_loss(keras.losses.MeanSquaredError(name='MSE'), self.pinn_weights, 'mse') @@ -608,7 +612,6 @@ def get_config(self) -> dict: }) return config - @register_model class RNNModel(MultiStepModel): """ diff --git a/physXAI/models/ann/configs/ann_model_configs.py b/physXAI/models/ann/configs/ann_model_configs.py index 8030c14..ec97332 100644 --- a/physXAI/models/ann/configs/ann_model_configs.py +++ b/physXAI/models/ann/configs/ann_model_configs.py @@ -8,6 +8,8 @@ class ClassicalANNConstruction_config(BaseModel): n_neurons: Union[int, list[int]] = 32 activation_function: Union[str, list[str]] = 'softplus' rescale_output: bool = True + normalize: bool = True + n_features: Optional[int] = None @field_validator('n_neurons') def validate_n_neurons(cls, v, info): diff --git a/physXAI/models/ann/keras_models/keras_models.py b/physXAI/models/ann/keras_models/keras_models.py index a16ac8b..21de1de 100644 --- a/physXAI/models/ann/keras_models/keras_models.py +++ b/physXAI/models/ann/keras_models/keras_models.py @@ -1,4 +1,5 @@ import os +from typing import Union import numpy as np os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' import keras @@ -382,3 +383,137 @@ def from_config(cls, config): if initial_centers_list is not None: config["initial_centers"] = np.array(initial_centers_list) return cls(**config) + + +@keras.saving.register_keras_serializable(package='custom_layer', name='InputSliceLayer') +class InputSliceLayer(keras.Layer): + """ + A simple layer to select specific features from the last axis. + """ + + def __init__(self, feature_indices: Union[int, list[int]], **kwargs): + """ + Initializes the layer. + + Args: + feature_indices (int or list): The index or indices to select. + - If int (e.g., 1), selects the feature at that index and + reduces the rank. + - If list (e.g., [1]), selects the feature(s) and + keeps the rank. + """ + super().__init__(**kwargs) + self.feature_indices = feature_indices + + def call(self, inputs): + return keras.ops.take(inputs, self.feature_indices, axis=-1) + + def get_config(self): + config = super().get_config() + config.update({ + "feature_indices": self.feature_indices + }) + return config + + def compute_output_shape(self, input_shape): + output_shape = list(input_shape) + + if isinstance(self.feature_indices, int): + output_shape.pop(-1) + + elif isinstance(self.feature_indices, (list, tuple)): + output_shape[-1] = len(self.feature_indices) + + return tuple(output_shape) + + +@keras.saving.register_keras_serializable(package='custom_layer', name='ConstantLayer') +class ConstantLayer(keras.Layer): + """ + A layer that returns a constant tensor, broadcasted to the batch size. + + This layer ignores its input and simply returns a tensor of a + pre-defined shape, initialized with a constant value. + + The constant is created as a Keras weight, which can be + trainable or non-trainable. + """ + + def __init__(self, value=0.0, shape=(1,), trainable=False, **kwargs): + """ + Initializes the layer. + + Args: + initial_value (float): The value to initialize the constant tensor with. + shape (tuple): The shape of the constant, *excluding* the batch + dimension. For a single number to be added, use (1,). + trainable (bool): Whether this constant is a learnable parameter. + """ + super().__init__(trainable=trainable, **kwargs) + self.value = value + self.target_shape = tuple(shape) + + def build(self, input_shape): + if self.value is not None: + init = keras.initializers.Constant(self.value) + else: + init = keras.initializers.glorot_uniform() + self.constant = self.add_weight( + shape=self.target_shape, + initializer=init, + trainable=self.trainable, + name=self.name, + ) + + def call(self, inputs): + batch_size = keras.ops.shape(inputs)[0] + + # Create the full target shape, including the batch dimension + # e.g., (batch_size,) + (1,) -> (batch_size, 1) + full_shape = (batch_size,) + self.target_shape + + return keras.ops.broadcast_to(self.constant, full_shape) + + def compute_output_shape(self, input_shape): + # The output shape is (batch_size,) + our target_shape + return (input_shape[0],) + self.target_shape + + def get_config(self): + config = super().get_config() + config.update({ + "value": self.value, + "shape": self.target_shape, + }) + return config + + +@keras.saving.register_keras_serializable(package='custom_layer', name='DivideLayer') +class DivideLayer(keras.Layer): + """ + A layer that divides two layers. + """ + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def call(self, inputs): + return keras.ops.divide(inputs[0], inputs[1]) + + def compute_output_shape(self, input_shape): + return input_shape[0] + + +@keras.saving.register_keras_serializable(package='custom_layer', name='PowerLayer') +class PowerLayer(keras.Layer): + """ + A layer that computes the power of two layers. + """ + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def call(self, inputs): + return keras.ops.power(inputs[0], inputs[1]) + + def compute_output_shape(self, input_shape): + return input_shape[0] \ No newline at end of file diff --git a/physXAI/models/ann/model_construction/ann_models.py b/physXAI/models/ann/model_construction/ann_models.py index bdf4b6e..f8e0bde 100644 --- a/physXAI/models/ann/model_construction/ann_models.py +++ b/physXAI/models/ann/model_construction/ann_models.py @@ -33,7 +33,10 @@ def ClassicalANNConstruction(config: dict, td: TrainingDataGeneric): n_neurons = [n_neurons] * n_layers else: assert len(n_neurons) == n_layers - n_featues = td.X_train_single.shape[1] + if config['n_features'] is not None: + n_features = config['n_features'] + else: + n_features = td.X_train_single.shape[1] activation_function = config['activation_function'] # If activation_function is a single string, replicate it for all layers if isinstance(activation_function, str): @@ -41,20 +44,17 @@ def ClassicalANNConstruction(config: dict, td: TrainingDataGeneric): else: assert len(activation_function) == n_layers - # Rescaling for output layer - rescale_min = float(td.y_train_single.min()) - rescale_max = float(td.y_train_single.max()) - # Build artificial neural network as Sequential model = keras.Sequential() # Add input layer - model.add(keras.layers.Input(shape=(n_featues,))) + model.add(keras.layers.Input(shape=(n_features,))) # Add normalization layer - normalization = keras.layers.Normalization() - normalization.adapt(td.X_train_single) - model.add(normalization) + if config['normalize']: + normalization = keras.layers.Normalization() + normalization.adapt(td.X_train_single) + model.add(normalization) for i in range(0, n_layers): # For each layer add dense @@ -63,6 +63,9 @@ def ClassicalANNConstruction(config: dict, td: TrainingDataGeneric): model.add(keras.layers.Dense(1, activation='linear')) # Add rescaling if config['rescale_output']: + # Rescaling for output layer + rescale_min = float(td.y_train_single.min()) + rescale_max = float(td.y_train_single.max()) model.add(keras.layers.Rescaling(scale=rescale_max - rescale_min, offset=rescale_min)) model.summary() @@ -96,7 +99,10 @@ def CMNNModelConstruction(config: dict, td: TrainingDataGeneric): n_neurons = [n_neurons] * n_layers else: assert len(n_neurons) == n_layers - n_featues = td.X_train_single.shape[1] + if config['n_features'] is not None: + n_features = config['n_features'] + else: + n_features = td.X_train_single.shape[1] activation_function = config['activation_function'] # If activation_function is a single string, replicate it for all layers if isinstance(activation_function, str): @@ -107,21 +113,20 @@ def CMNNModelConstruction(config: dict, td: TrainingDataGeneric): # Get monotonicity constraints mono = config['monotonicities'] if mono is None: - monotonicities = [0] * n_featues + monotonicities = [0] * n_features else: monotonicities = [0 if name not in mono.keys() else mono[name] for name in td.columns] - # Rescaling for output layer - rescale_min = float(td.y_train_single.min()) - rescale_max = float(td.y_train_single.max()) - # Add input layer - input_layer = keras.layers.Input(shape=(n_featues,)) + input_layer = keras.layers.Input(shape=(n_features,)) # Add normalization layer - normalization = keras.layers.Normalization() - normalization.adapt(td.X_train_single) - x = normalization(input_layer) + if config['normalize']: + normalization = keras.layers.Normalization() + normalization.adapt(td.X_train_single) + x = normalization(input_layer) + else: + x = input_layer # Add dense layer activation_split = config['activation_split'] @@ -167,6 +172,9 @@ def CMNNModelConstruction(config: dict, td: TrainingDataGeneric): # Add rescaling if config['rescale_output']: + # Rescaling for output layer + rescale_min = float(td.y_train_single.min()) + rescale_max = float(td.y_train_single.max()) x = keras.layers.Rescaling(scale=rescale_max - rescale_min, offset=rescale_min)(x) # # Add min / max constraints diff --git a/physXAI/models/ann/model_construction/rbf_models.py b/physXAI/models/ann/model_construction/rbf_models.py index cdf3ed6..46ce4f7 100644 --- a/physXAI/models/ann/model_construction/rbf_models.py +++ b/physXAI/models/ann/model_construction/rbf_models.py @@ -35,29 +35,20 @@ def RBFModelConstruction(config: dict, td: TrainingDataGeneric): n_neurons = [n_neurons] * n_layers else: assert len(n_neurons) == n_layers - n_featues = td.X_train_single.shape[1] - - # Rescaling for output layer - # Custom rescaling - if 'rescale_scale' in config.keys() and config['rescale_scale'] is not None: - if 'rescale_offset' in config.keys() and config['rescale_offset'] is not None: - offset = config['rescale_offset'] - else: - offset = 0 - rescale_scale = config['rescale_scale'] - rescale_min = offset - rescale_max = offset + rescale_scale - # Standard rescaling + if config['n_features'] is not None: + n_features = config['n_features'] else: - rescale_min = float(td.y_train_single.min()) - rescale_max = float(td.y_train_single.max()) + n_features = td.X_train_single.shape[1] # Add input layer - input_layer = keras.layers.Input(shape=(n_featues,)) + input_layer = keras.layers.Input(shape=(n_features,)) # Add normalization layer - normalization = keras.layers.Normalization() - normalization.adapt(td.X_train_single) - x = normalization(input_layer) + if config['normalize']: + normalization = keras.layers.Normalization() + normalization.adapt(td.X_train_single) + x = normalization(input_layer) + else: + x = input_layer for i in range(0, n_layers): # For each layer add RBF @@ -77,6 +68,22 @@ def RBFModelConstruction(config: dict, td: TrainingDataGeneric): # Add rescaling if config['rescale_output']: + + # Rescaling for output layer + # Custom rescaling + if 'rescale_scale' in config.keys() and config['rescale_scale'] is not None: + if 'rescale_offset' in config.keys() and config['rescale_offset'] is not None: + offset = config['rescale_offset'] + else: + offset = 0 + rescale_scale = config['rescale_scale'] + rescale_min = offset + rescale_max = offset + rescale_scale + # Standard rescaling + else: + rescale_min = float(td.y_train_single.min()) + rescale_max = float(td.y_train_single.max()) + x = keras.layers.Rescaling(scale=rescale_max - rescale_min, offset=rescale_min)(x) model = keras.Model(inputs=input_layer, outputs=x) diff --git a/physXAI/models/modular/modular_ann.py b/physXAI/models/modular/modular_ann.py new file mode 100644 index 0000000..9875042 --- /dev/null +++ b/physXAI/models/modular/modular_ann.py @@ -0,0 +1,92 @@ +from logging import warning +import os +from typing import Optional + +from physXAI.models.modular.modular_expression import ModularExpression +from physXAI.models.ann.ann_design import ANNModel, CMNNModel, ClassicalANNModel +from physXAI.models.models import register_model +from physXAI.preprocessing.training_data import TrainingDataGeneric +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' +import keras +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0' + + +@register_model +class ModularANN(ANNModel): + """ + A Modular Artificial Neural Network model that allows for custom architectures. + """ + + def __init__(self, architecture: ModularExpression, batch_size: int = 32, epochs: int = 1000, + learning_rate: float = 0.001, early_stopping_epochs: Optional[int] = 100, + random_seed: int = 42, **kwargs): + """ + Initializes the ModularANN. + + Args: + architecture (ModularExpression): The modular architecture defining the model. + batch_size (int): Number of samples per gradient update. + epochs (int): Number of times to iterate over the entire training dataset. + learning_rate (float): Learning rate for the Adam optimizer. + early_stopping_epochs (int): Number of epochs with no improvement after which training will be stopped. + If None, early stopping is disabled. + random_seed (int): Seed for random number generators to ensure reproducibility. + """ + + super().__init__(batch_size, epochs, learning_rate, early_stopping_epochs, random_seed) + self.architecture: ModularExpression = architecture + + self.model_config.update({}) + + def generate_model(self, **kwargs): + """ + Generates the Keras model using the specified modular architecture. + """ + + td = kwargs['td'] + n_features = td.X_train_single.shape[1] + input_layer = keras.layers.Input(shape=(n_features,)) + x = self.architecture.construct(input_layer, td) + model = keras.models.Model(inputs=input_layer, outputs=x) + model.summary() + return model + + def get_config(self) -> dict: + config = super().get_config() + config.update({}) + warning("ModularANN currently does not save architecture config.") + return config + + +class ModularModel(ModularExpression): + + allowed_models = [ClassicalANNModel, CMNNModel] + i = 0 + + def __init__(self, model: ANNModel, inputs: list[ModularExpression], rescale_output: bool = False, name: str = None): + if not any(isinstance(model, allowed) for allowed in self.allowed_models): + raise NotImplementedError(f"Currently {type(model)} is not supported. Allowed models are: {self.allowed_models}") + + if name is None: + name = f"ModularModel_{ModularModel.i}" + ModularModel.i += 1 + + super().__init__(name) + self.model = model + self.rescale_output = rescale_output + if rescale_output: + warning("Using rescale_output=True in ModularANN should only be done if model output is training data output.") + self.model.model_config.update({ + "normalize": False, + "rescale_output": rescale_output + }) + self.inputs = inputs + + def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> keras.layers.Layer: + inps = list() + for x in self.inputs: + y = x.construct(input_layer, td) + inps.append(y) + self.model.model_config['n_features'] = len(inps) + td.columns = [inp.name for inp in self.inputs] + return self.model.generate_model(td=td)(keras.layers.Concatenate()(inps)) \ No newline at end of file diff --git a/physXAI/models/modular/modular_expression.py b/physXAI/models/modular/modular_expression.py new file mode 100644 index 0000000..c0a633b --- /dev/null +++ b/physXAI/models/modular/modular_expression.py @@ -0,0 +1,184 @@ +from abc import ABC, abstractmethod +import os +from typing import Union +from physXAI.models.ann.keras_models.keras_models import ConstantLayer, DivideLayer, InputSliceLayer, PowerLayer +from physXAI.preprocessing.training_data import TrainingDataGeneric +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' +import keras +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0' + + +class ModularExpression(ABC): + + feature_list = dict() + feature_list_normalized = dict() + trainable_parameters = dict() + + def __init__(self, name: str): + self.name = name + + @abstractmethod + def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> keras.layers.Layer: + pass + + def __add__(self, other): + return ModularAdd(self, other) + + def __radd__(self, other): + return ModularAdd(other, self) + + def __sub__(self, other): + return ModularSub(self, other) + + def __rsub__(self, other): + return ModularSub(other, self) + + def __mul__(self, other): + return ModularMul(self, other) + + def __rmul__(self, other): + return ModularMul(other, self) + + def __truediv__(self, other): + return ModularTrueDiv(self, other) + + def __rtruediv__(self, other): + return ModularTrueDiv(other, self) + + def __pow__(self, other): + return ModularPow(self, other) + + def rename(self, name: str): + self.name = name + + +def get_name(feature: Union[ModularExpression, int, float]) -> str: + if isinstance(feature, ModularExpression): + return feature.name + else: + return str(feature) + + +class ModularFeature(ModularExpression): + + def __init__(self, name: str, normalize: bool = True): + super().__init__(name) + self.normalize = normalize + + def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> keras.layers.Layer: + if self.normalize and self.name in ModularExpression.feature_list_normalized.keys(): + return ModularExpression.feature_list_normalized[self.name] + elif not self.normalize and self.name in ModularExpression.feature_list.keys(): + return ModularExpression.feature_list[self.name] + else: + x = InputSliceLayer([td.columns.index(self.name)])(input_layer) + if self.normalize: + l = keras.layers.Normalization() + l.adapt(td.X_train_single[:, td.columns.index(self.name)].reshape(-1, 1)) + x = l(x) + ModularExpression.feature_list_normalized[self.name] = x + else: + ModularExpression.feature_list[self.name] = x + + return x + + +class ModularTrainable(ModularExpression): + + i = 0 + + def __init__(self, name: str = None, initial_value: float = None): + if name is None: + name = f"ModularTrainable_{ModularTrainable.i}" + ModularTrainable.i += 1 + super().__init__(name) + self.initial_value = initial_value + + def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> keras.layers.Layer: + if self.name is not None and self.name in ModularExpression.trainable_parameters.keys(): + return ModularExpression.trainable_parameters[self.name] + else: + l = ConstantLayer(trainable=True, name=self.name, value=self.initial_value)(input_layer) + if self.name is not None: + ModularExpression.trainable_parameters[self.name] = l + return l + + +class ModularTwo(ModularExpression, ABC): + + def __init__(self, feature1: Union[ModularExpression, int, float], feature2: Union[ModularExpression, int, float], name: str): + super().__init__(name) + self.feature1 = feature1 + self.feature2 = feature2 + + def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> keras.layers.Layer: + if isinstance(self.feature1, (int, float)): + l1 = ConstantLayer(value=self.feature1)(input_layer) + else: + l1 = self.feature1.construct(input_layer, td) + + if isinstance(self.feature2, (int, float)): + l2 = ConstantLayer(value=self.feature2)(input_layer) + else: + l2 = self.feature2.construct(input_layer, td) + + return self._construct(l1, l2) + + @abstractmethod + def _construct(self, layer1: keras.layers.Layer, layer2: keras.layers.Layer) -> keras.layers.Layer: + pass + + +class ModularAdd(ModularTwo): + + def __init__(self, feature1: Union[ModularExpression, int, float], feature2: Union[ModularExpression, int, float], name: str = None): + if name is None: + name = f"({get_name(feature1)}+{get_name(feature2)})" + super().__init__(feature1, feature2, name) + + def _construct(self, layer1: keras.layers.Layer, layer2: keras.layers.Layer) -> keras.layers.Layer: + return keras.layers.Add()([layer1, layer2]) + + +class ModularSub(ModularTwo): + + def __init__(self, feature1: Union[ModularExpression, int, float], feature2: Union[ModularExpression, int, float], name: str = None): + if name is None: + name = f"({get_name(feature1)}-{get_name(feature2)})" + super().__init__(feature1, feature2, name) + + def _construct(self, layer1: keras.layers.Layer, layer2: keras.layers.Layer) -> keras.layers.Layer: + return keras.layers.Subtract()([layer1, layer2]) + + +class ModularMul(ModularTwo): + + def __init__(self, feature1: Union[ModularExpression, int, float], feature2: Union[ModularExpression, int, float], name: str = None): + if name is None: + name = f"({get_name(feature1)}*{get_name(feature2)})" + super().__init__(feature1, feature2, name) + + def _construct(self, layer1: keras.layers.Layer, layer2: keras.layers.Layer) -> keras.layers.Layer: + return keras.layers.Multiply()([layer1, layer2]) + + +class ModularTrueDiv(ModularTwo): + + def __init__(self, feature1: Union[ModularExpression, int, float], feature2: Union[ModularExpression, int, float], name: str = None): + if name is None: + name = f"({get_name(feature1)}/{get_name(feature2)})" + super().__init__(feature1, feature2, name) + + def _construct(self, layer1: keras.layers.Layer, layer2: keras.layers.Layer) -> keras.layers.Layer: + return DivideLayer()([layer1, layer2]) + + +class ModularPow(ModularTwo): + + def __init__(self, feature1: Union[ModularExpression, int, float], feature2: Union[ModularExpression, int, float], name: str = None): + if name is None: + name = f"({get_name(feature1)}**{get_name(feature2)})" + super().__init__(feature1, feature2, name) + + def _construct(self, layer1: keras.layers.Layer, layer2: keras.layers.Layer) -> keras.layers.Layer: + return PowerLayer()([layer1, layer2]) diff --git a/physXAI/preprocessing/constructed.py b/physXAI/preprocessing/constructed.py index e90b7d3..2d01d22 100644 --- a/physXAI/preprocessing/constructed.py +++ b/physXAI/preprocessing/constructed.py @@ -2,6 +2,7 @@ from typing import Type, Union import numpy as np from pandas import DataFrame, Series +from physXAI.models.modular.modular_expression import ModularFeature class FeatureBase(ABC): @@ -122,6 +123,9 @@ def get_config(self) -> dict: def from_config(cls, config: dict) -> 'FeatureBase': return cls(**config) + def input(self, normalize: bool = True) -> ModularFeature: + return ModularFeature(self.feature, normalize=normalize) + # --- Registry for Feature Classes --- # This registry maps class names (strings) to the actual class types (Type[FeatureBase]). From 1daa46a8ee7a0cafac379e1baca4dbd1a2e10a54 Mon Sep 17 00:00:00 2001 From: Patrick Henkel Date: Wed, 29 Oct 2025 01:08:11 +0100 Subject: [PATCH 02/36] Updated inits --- physXAI/__init__.py | 1 - physXAI/models/__init__.py | 1 + physXAI/models/ann/__init__.py | 1 - physXAI/models/ann/keras_models/__init__.py | 1 - .../models/ann/keras_models/keras_models.py | 26 ++++++++++++++++++- physXAI/models/modular/__init__.py | 0 6 files changed, 26 insertions(+), 4 deletions(-) create mode 100644 physXAI/models/modular/__init__.py diff --git a/physXAI/__init__.py b/physXAI/__init__.py index 2677e7c..e69de29 100644 --- a/physXAI/__init__.py +++ b/physXAI/__init__.py @@ -1 +0,0 @@ -from .models.ann.keras_models.keras_models import * \ No newline at end of file diff --git a/physXAI/models/__init__.py b/physXAI/models/__init__.py index e69de29..1153f2a 100644 --- a/physXAI/models/__init__.py +++ b/physXAI/models/__init__.py @@ -0,0 +1 @@ +from .ann.keras_models.keras_models import * \ No newline at end of file diff --git a/physXAI/models/ann/__init__.py b/physXAI/models/ann/__init__.py index 8d1c8b6..e69de29 100644 --- a/physXAI/models/ann/__init__.py +++ b/physXAI/models/ann/__init__.py @@ -1 +0,0 @@ - diff --git a/physXAI/models/ann/keras_models/__init__.py b/physXAI/models/ann/keras_models/__init__.py index 8d1c8b6..e69de29 100644 --- a/physXAI/models/ann/keras_models/__init__.py +++ b/physXAI/models/ann/keras_models/__init__.py @@ -1 +0,0 @@ - diff --git a/physXAI/models/ann/keras_models/keras_models.py b/physXAI/models/ann/keras_models/keras_models.py index 21de1de..8e56ebe 100644 --- a/physXAI/models/ann/keras_models/keras_models.py +++ b/physXAI/models/ann/keras_models/keras_models.py @@ -415,6 +415,10 @@ def get_config(self): }) return config + @classmethod + def from_config(cls, config): + return cls(**config) + def compute_output_shape(self, input_shape): output_shape = list(input_shape) @@ -485,6 +489,10 @@ def get_config(self): "shape": self.target_shape, }) return config + + @classmethod + def from_config(cls, config): + return cls(**config) @keras.saving.register_keras_serializable(package='custom_layer', name='DivideLayer') @@ -502,6 +510,14 @@ def call(self, inputs): def compute_output_shape(self, input_shape): return input_shape[0] + def get_config(self): + config = super().get_config() + return config + + @classmethod + def from_config(cls, config): + return cls(**config) + @keras.saving.register_keras_serializable(package='custom_layer', name='PowerLayer') class PowerLayer(keras.Layer): @@ -516,4 +532,12 @@ def call(self, inputs): return keras.ops.power(inputs[0], inputs[1]) def compute_output_shape(self, input_shape): - return input_shape[0] \ No newline at end of file + return input_shape[0] + + def get_config(self): + config = super().get_config() + return config + + @classmethod + def from_config(cls, config): + return cls(**config) \ No newline at end of file diff --git a/physXAI/models/modular/__init__.py b/physXAI/models/modular/__init__.py new file mode 100644 index 0000000..e69de29 From 14ed7cb83b88e16d2c3f19f2805210ecea76e768 Mon Sep 17 00:00:00 2001 From: Patrick Henkel Date: Wed, 29 Oct 2025 13:40:05 +0100 Subject: [PATCH 03/36] Updated Modular Networks, Added Keywords for PreprocessingData init --- .../bestest_hydronic_heat_pump/P_hp.py | 2 +- .../P_hp_FeatureSelection.py | 2 +- .../P_hp_modular.py | 2 +- .../bestest_hydronic_heat_pump/P_hp_pinn.py | 2 +- .../bestest_hydronic_heat_pump/TAir.py | 2 +- .../TAir_FeatureSelection.py | 2 +- .../TAir_evaluateMultiStep.py | 2 +- .../bestest_hydronic_heat_pump/TAir_rnn.py | 2 +- physXAI/models/modular/modular_ann.py | 43 +++++++++++++++---- physXAI/models/modular/modular_expression.py | 6 +-- physXAI/preprocessing/constructed.py | 17 +++++++- physXAI/preprocessing/preprocessing.py | 8 ++-- unittests/test_coverage.py | 22 +++++----- unittests/verify_installation.py | 2 +- 14 files changed, 78 insertions(+), 36 deletions(-) diff --git a/executables/bestest_hydronic_heat_pump/P_hp.py b/executables/bestest_hydronic_heat_pump/P_hp.py index 97bf7ec..2c4fd53 100644 --- a/executables/bestest_hydronic_heat_pump/P_hp.py +++ b/executables/bestest_hydronic_heat_pump/P_hp.py @@ -36,7 +36,7 @@ # It is recommended to rename features, so that they can be easily added to the input list # Create Training data -prep = PreprocessingSingleStep(inputs, output) +prep = PreprocessingSingleStep(inputs=inputs, output=output) # Process Training data td = prep.pipeline(file_path) diff --git a/executables/bestest_hydronic_heat_pump/P_hp_FeatureSelection.py b/executables/bestest_hydronic_heat_pump/P_hp_FeatureSelection.py index ab4b60e..300c567 100644 --- a/executables/bestest_hydronic_heat_pump/P_hp_FeatureSelection.py +++ b/executables/bestest_hydronic_heat_pump/P_hp_FeatureSelection.py @@ -33,7 +33,7 @@ # Generic Preprocessing Pipeline # Model is output model, so single step evaluation is choosen -prep = PreprocessingSingleStep(inputs, output) +prep = PreprocessingSingleStep(inputs=inputs, output=output) # Generic Model m = LinearRegressionModel() diff --git a/executables/bestest_hydronic_heat_pump/P_hp_modular.py b/executables/bestest_hydronic_heat_pump/P_hp_modular.py index 7fcc694..056ced4 100644 --- a/executables/bestest_hydronic_heat_pump/P_hp_modular.py +++ b/executables/bestest_hydronic_heat_pump/P_hp_modular.py @@ -21,7 +21,7 @@ TDryBul = Feature('weaSta_reaWeaTDryBul_y') TZon = Feature('reaTZon_y') -prep = PreprocessingSingleStep(inputs, output) +prep = PreprocessingSingleStep(inputs=inputs, output=output) td = prep.pipeline(file_path) """Example usages of modular models""" diff --git a/executables/bestest_hydronic_heat_pump/P_hp_pinn.py b/executables/bestest_hydronic_heat_pump/P_hp_pinn.py index 1e3ba2d..532796a 100644 --- a/executables/bestest_hydronic_heat_pump/P_hp_pinn.py +++ b/executables/bestest_hydronic_heat_pump/P_hp_pinn.py @@ -39,7 +39,7 @@ pinn.rename('pinn') # Create Training data -prep = PreprocessingSingleStep(inputs, output) +prep = PreprocessingSingleStep(inputs=inputs, output=output) # Process Training data td = prep.pipeline(file_path) diff --git a/executables/bestest_hydronic_heat_pump/TAir.py b/executables/bestest_hydronic_heat_pump/TAir.py index 405d335..4f68c0d 100644 --- a/executables/bestest_hydronic_heat_pump/TAir.py +++ b/executables/bestest_hydronic_heat_pump/TAir.py @@ -31,7 +31,7 @@ x3.lag(2) # oveHeaPumY_u_lag1, oveHeaPumY_u_lag2 # Create Training data -prep = PreprocessingSingleStep(inputs, output) +prep = PreprocessingSingleStep(inputs=inputs, output=output) # Process Training data td = prep.pipeline(file_path) diff --git a/executables/bestest_hydronic_heat_pump/TAir_FeatureSelection.py b/executables/bestest_hydronic_heat_pump/TAir_FeatureSelection.py index bddc953..c232408 100644 --- a/executables/bestest_hydronic_heat_pump/TAir_FeatureSelection.py +++ b/executables/bestest_hydronic_heat_pump/TAir_FeatureSelection.py @@ -34,7 +34,7 @@ # Generic Preprocessing Pipeline # Model is state model, so multi-step evaluation is choosen # See example TAir_evaluateMultiStep.py for more information -prep = PreprocessingMultiStep(inputs, output, 48, 0, init_features=['reaTZon_y'], +prep = PreprocessingMultiStep(inputs=inputs, output=output, label_width=48, warmup_width=0, init_features=['reaTZon_y'], overlapping_sequences=False, batch_size=1) # Generic Model diff --git a/executables/bestest_hydronic_heat_pump/TAir_evaluateMultiStep.py b/executables/bestest_hydronic_heat_pump/TAir_evaluateMultiStep.py index 5c5a1bc..44e1075 100644 --- a/executables/bestest_hydronic_heat_pump/TAir_evaluateMultiStep.py +++ b/executables/bestest_hydronic_heat_pump/TAir_evaluateMultiStep.py @@ -42,7 +42,7 @@ overlapping_sequence should be False to avoid duplicate labels for single step prediction batch_size should be 1 as batches are processes differently in single step models """ -prep = PreprocessingMultiStep(inputs, output, 48, 0, init_features=['reaTZon_y'], +prep = PreprocessingMultiStep(inputs=inputs, output=output, label_width=48, warmup_width=0, init_features=['reaTZon_y'], overlapping_sequences=False, batch_size=1) # Process Training data td = prep.pipeline(file_path) diff --git a/executables/bestest_hydronic_heat_pump/TAir_rnn.py b/executables/bestest_hydronic_heat_pump/TAir_rnn.py index 0a74b3b..5039d4c 100644 --- a/executables/bestest_hydronic_heat_pump/TAir_rnn.py +++ b/executables/bestest_hydronic_heat_pump/TAir_rnn.py @@ -29,7 +29,7 @@ warmup_width = 48 # Create Training data. For RNNs MultiStep training data is required -prep = PreprocessingMultiStep(inputs, output, label_width, warmup_width, init_features=inits) +prep = PreprocessingMultiStep(inputs=inputs, output=output, label_width=label_width, warmup_width=warmup_width, init_features=inits) # Process Training data td = prep.pipeline(file_path) diff --git a/physXAI/models/modular/modular_ann.py b/physXAI/models/modular/modular_ann.py index 9875042..dce8a81 100644 --- a/physXAI/models/modular/modular_ann.py +++ b/physXAI/models/modular/modular_ann.py @@ -1,6 +1,6 @@ from logging import warning import os -from typing import Optional +from typing import Optional, Union from physXAI.models.modular.modular_expression import ModularExpression from physXAI.models.ann.ann_design import ANNModel, CMNNModel, ClassicalANNModel @@ -83,10 +83,37 @@ def __init__(self, model: ANNModel, inputs: list[ModularExpression], rescale_out self.inputs = inputs def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> keras.layers.Layer: - inps = list() - for x in self.inputs: - y = x.construct(input_layer, td) - inps.append(y) - self.model.model_config['n_features'] = len(inps) - td.columns = [inp.name for inp in self.inputs] - return self.model.generate_model(td=td)(keras.layers.Concatenate()(inps)) \ No newline at end of file + if self.name in ModularExpression.models.keys(): + return ModularExpression.models[self.name] + else: + inps = list() + for x in self.inputs: + y = x.construct(input_layer, td) + inps.append(y) + self.model.model_config['n_features'] = len(inps) + td.columns = [inp.name for inp in self.inputs] + l = self.model.generate_model(td=td)(keras.layers.Concatenate()(inps)) + ModularExpression.models[self.name] = l + return l + + +class ModularLinear(ModularExpression): + i = 0 + + def __init__(self, inputs: list[ModularExpression]): + if name is None: + name = f"ModularLinear_{ModularLinear.i}" + ModularLinear.i += 1 + super().__init__(name) + + def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> keras.layers.Layer: + if self.name in ModularExpression.models.keys(): + return ModularExpression.models[self.name] + else: + inps = list() + for x in self.inputs: + y = x.construct(input_layer, td) + inps.append(y) + l = keras.layers.Dense(units=1, activation='linear')(keras.layers.Concatenate()(inps)) + ModularExpression.models[self.name] = l + return l \ No newline at end of file diff --git a/physXAI/models/modular/modular_expression.py b/physXAI/models/modular/modular_expression.py index c0a633b..18031f4 100644 --- a/physXAI/models/modular/modular_expression.py +++ b/physXAI/models/modular/modular_expression.py @@ -13,6 +13,7 @@ class ModularExpression(ABC): feature_list = dict() feature_list_normalized = dict() trainable_parameters = dict() + models = dict() def __init__(self, name: str): self.name = name @@ -95,12 +96,11 @@ def __init__(self, name: str = None, initial_value: float = None): self.initial_value = initial_value def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> keras.layers.Layer: - if self.name is not None and self.name in ModularExpression.trainable_parameters.keys(): + if self.name in ModularExpression.trainable_parameters.keys(): return ModularExpression.trainable_parameters[self.name] else: l = ConstantLayer(trainable=True, name=self.name, value=self.initial_value)(input_layer) - if self.name is not None: - ModularExpression.trainable_parameters[self.name] = l + ModularExpression.trainable_parameters[self.name] = l return l diff --git a/physXAI/preprocessing/constructed.py b/physXAI/preprocessing/constructed.py index 2d01d22..fbe9fa0 100644 --- a/physXAI/preprocessing/constructed.py +++ b/physXAI/preprocessing/constructed.py @@ -170,7 +170,9 @@ class Feature(FeatureBase): Represents a basic feature that is assumed to exist directly in the input DataFrame. Its `process` method simply retrieves the column by its name. """ - pass + def __init__(self, name: str, **kwargs): + super().__init__(name, **kwargs) + FeatureConstruction.add_input(self.feature) @register_feature @@ -199,6 +201,7 @@ def __init__(self, f: Union[FeatureBase, str], lag: int, name: str = None, **kwa name = f.feature + f'_lag{lag}' super().__init__(name) self.lag: int = lag + FeatureConstruction.add_input(self.feature) def process(self, df: DataFrame) -> Series: if self.feature not in df.columns: @@ -529,6 +532,7 @@ class FeatureConstruction: """ features = list[FeatureBase]() + inputs = list[str]() @staticmethod def append(f: FeatureBase): @@ -542,6 +546,17 @@ def append(f: FeatureBase): if FeatureConstruction.get_feature(f.feature) is None: FeatureConstruction.features.append(f) + @staticmethod + def add_input(name: str): + """ + Adds a feature name to the list of input features. + + Args: + name (str): The name of the input feature to add. + """ + if name not in FeatureConstruction.inputs: + FeatureConstruction.inputs.append(name) + @staticmethod def get_feature(name: str) -> Union[FeatureBase, None]: """ diff --git a/physXAI/preprocessing/preprocessing.py b/physXAI/preprocessing/preprocessing.py index e4862a3..afb14ec 100644 --- a/physXAI/preprocessing/preprocessing.py +++ b/physXAI/preprocessing/preprocessing.py @@ -148,8 +148,8 @@ def __init__(self, inputs: list[str], output: Union[str, list[str]], shift: int csv_skiprows (Union[int, list[int]]): Row numbers of skipped data in csv. Default is no skipping. """ - super().__init__(inputs, output, shift, time_step, test_size, val_size, random_state, time_index_col, - csv_delimiter, csv_encoding, csv_header, csv_skiprows) + super().__init__(inputs=inputs, output=output, shift=shift, time_step=time_step, test_size=test_size, val_size=val_size, random_state=random_state, time_index_col=time_index_col, + csv_delimiter=csv_delimiter, csv_encoding=csv_encoding, csv_header=csv_header, csv_skiprows=csv_skiprows) def process_data(self, df: pd.DataFrame) -> tuple[pd.DataFrame, pd.DataFrame]: """ @@ -294,8 +294,8 @@ def __init__(self, inputs: list[str], output: Union[str, list[str]], label_width If None and warmup_width > 0, defaults to `inputs`. If None and warmup_width <= 0, defaults to empty list. """ - super().__init__(inputs, output, shift, time_step, test_size, val_size, random_state, time_index_col, - csv_delimiter, csv_encoding, csv_header, csv_skiprows) + super().__init__(inputs=inputs, output=output, shift=shift, time_step=time_step, test_size=test_size, val_size=val_size, random_state=random_state, time_index_col=time_index_col, + csv_delimiter=csv_delimiter, csv_encoding=csv_encoding, csv_header=csv_header, csv_skiprows=csv_skiprows) self.overlapping_sequences = overlapping_sequences diff --git a/unittests/test_coverage.py b/unittests/test_coverage.py index 3849e0c..627d770 100644 --- a/unittests/test_coverage.py +++ b/unittests/test_coverage.py @@ -71,7 +71,7 @@ def test_preprocessing(monkeypatch, file_path, inputs_php, output_php): FeatureConstant(1, 'name') # Create & process Training data - prep = PreprocessingSingleStep(inputs_php, output_php) + prep = PreprocessingSingleStep(inputs=inputs_php, output=output_php) prep.pipeline(file_path) def test_preprocessing_multistep(file_path, inputs_tair, output_tair): @@ -85,7 +85,7 @@ def test_preprocessing_multistep(file_path, inputs_tair, output_tair): x3.lag(2) # oveHeaPumY_u_lag1, oveHeaPumY_u_lag2 # EvaluateMultiStep: Prepare Preprocessing - prep = PreprocessingMultiStep(inputs_tair, output_tair, 6, 6, init_features=['reaTZon_y'], + prep = PreprocessingMultiStep(inputs=inputs_tair, output=output_tair, label_width=6, warmup_width=6, init_features=['reaTZon_y'], overlapping_sequences=False, batch_size=1) prep.pipeline(file_path) @@ -94,7 +94,7 @@ def p_hp_data(file_path, inputs_php, output_php): # Setup up logger for saving Logger.setup_logger(folder_name='unittests\\test_coverage', override=True) # Create & process Training data - prep = PreprocessingSingleStep(inputs_php, output_php) + prep = PreprocessingSingleStep(inputs=inputs_php, output=output_php) td = prep.pipeline(file_path) return prep, td @@ -107,7 +107,7 @@ def tair_data_delta(file_path, inputs_tair, output_tair): x2.lag(1) # weaSta_reaWeaTDryBul_y_lag1 x3 = Feature('oveHeaPumY_u') x3.lag(2) # oveHeaPumY_u_lag1, oveHeaPumY_u_lag2 - prep = PreprocessingMultiStep(inputs_tair, output_tair, 3, 0, init_features=['reaTZon_y'], + prep = PreprocessingMultiStep(inputs=inputs_tair, output=output_tair, label_width=3, warmup_width=0, init_features=['reaTZon_y'], overlapping_sequences=False, batch_size=1) td = prep.pipeline(file_path) return prep, td @@ -121,7 +121,7 @@ def tair_data_noval(file_path, inputs_tair, output_tair): x2.lag(1) # weaSta_reaWeaTDryBul_y_lag1 x3 = Feature('oveHeaPumY_u') x3.lag(2) # oveHeaPumY_u_lag1, oveHeaPumY_u_lag2 - prep = PreprocessingMultiStep(inputs_tair, output_tair, 3, 0, init_features=['reaTZon_y'], + prep = PreprocessingMultiStep(inputs=inputs_tair, output=output_tair, label_width=3, warmup_width=0, init_features=['reaTZon_y'], overlapping_sequences=False, batch_size=1, val_size=0) td = prep.pipeline(file_path) return prep, td @@ -136,7 +136,7 @@ def tair_data_total(file_path, inputs_tair, output_tair): x2.lag(1) # weaSta_reaWeaTDryBul_y_lag1 x3 = Feature('oveHeaPumY_u') x3.lag(2) # oveHeaPumY_u_lag1, oveHeaPumY_u_lag2 - prep = PreprocessingMultiStep(inputs_tair, 'reaTZon_y', 3, 0, init_features=['reaTZon_y'], + prep = PreprocessingMultiStep(inputs=inputs_tair, output='reaTZon_y', label_width=3, warmup_width=0, init_features=['reaTZon_y'], overlapping_sequences=False, batch_size=1) td = prep.pipeline(file_path) return prep, td @@ -145,7 +145,7 @@ def test_model_linReg(inputs_php, output_php, file_path): # Setup up logger for saving Logger.setup_logger(folder_name='unittests\\test_coverage', override=True) # Create & process Training data - prep = PreprocessingSingleStep(inputs_php, output_php, val_size=0) + prep = PreprocessingSingleStep(inputs=inputs_php, output=output_php, val_size=0) td = prep.pipeline(file_path) # Check Models @@ -241,12 +241,12 @@ def test_model_pinn(inputs_php, output_php, file_path): pinn.rename('pinn') # PINN: Preprocessing - prep = PreprocessingSingleStep(inputs_php, output_php) + prep = PreprocessingSingleStep(inputs=inputs_php, output=output_php) td = prep.pipeline(file_path) m = PINNModel(pinn_weights=[1], epochs=1, n_neurons=4) m.pipeline(td, save_model=False, plot=False) - prep = PreprocessingSingleStep(inputs_php, output_php, val_size=0) + prep = PreprocessingSingleStep(inputs=inputs_php, output=output_php, val_size=0) td = prep.pipeline(file_path) m = PINNModel(pinn_weights=None, epochs=1, n_neurons=4) m.pipeline(td, save_model=True, plot=False) @@ -265,7 +265,7 @@ def test_models_rnn(file_path): inputs = ['weaSta_reaWeaTDryBul_y', 'weaSta_reaWeaHDirNor_y', 'oveHeaPumY_u'] inits = ['reaTZon_y'] output = 'reaTZon_y' - prep = PreprocessingMultiStep(inputs, output, 4, 2, init_features=inits) + prep = PreprocessingMultiStep(inputs=inputs, output=output, label_width=4, warmup_width=2, init_features=inits) td = prep.pipeline(file_path) m = RNNModel(epochs=1, rnn_layer='LSTM', init_layer='dense') @@ -286,7 +286,7 @@ def test_models_rnn(file_path): m = RNNModel(epochs=1, rnn_layer='RNN') m.pipeline(td, save_model=True, plot=False) - prep = PreprocessingMultiStep(inputs, output, 4, 0, val_size=0) + prep = PreprocessingMultiStep(inputs=inputs, output=output, label_width=4, warmup_width=0, val_size=0) td = prep.pipeline(file_path) m = RNNModel(epochs=1, rnn_layer='LSTM', early_stopping_epochs=None) m.pipeline(td, save_model=False, plot=False) diff --git a/unittests/verify_installation.py b/unittests/verify_installation.py index 7816102..aac57e5 100644 --- a/unittests/verify_installation.py +++ b/unittests/verify_installation.py @@ -18,7 +18,7 @@ output = 'reaPHeaPum_y' # Create Training data -prep = PreprocessingSingleStep(inputs, output) +prep = PreprocessingSingleStep(inputs=inputs, output=output) # Process Training data td = prep.pipeline(file_path) From 0d5120ca1e1dee73da08476ccac5aa811ead1f35 Mon Sep 17 00:00:00 2001 From: Patrick Henkel Date: Wed, 29 Oct 2025 14:46:32 +0100 Subject: [PATCH 04/36] Added new Modular Features --- physXAI/models/modular/modular_ann.py | 71 +++++++++++++++++++++++++-- 1 file changed, 68 insertions(+), 3 deletions(-) diff --git a/physXAI/models/modular/modular_ann.py b/physXAI/models/modular/modular_ann.py index dce8a81..9db5133 100644 --- a/physXAI/models/modular/modular_ann.py +++ b/physXAI/models/modular/modular_ann.py @@ -1,4 +1,7 @@ +import functools +from itertools import combinations from logging import warning +import operator import os from typing import Optional, Union @@ -6,6 +9,7 @@ from physXAI.models.ann.ann_design import ANNModel, CMNNModel, ClassicalANNModel from physXAI.models.models import register_model from physXAI.preprocessing.training_data import TrainingDataGeneric +from physXAI.preprocessing.constructed import FeatureBase os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' import keras os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0' @@ -63,7 +67,7 @@ class ModularModel(ModularExpression): allowed_models = [ClassicalANNModel, CMNNModel] i = 0 - def __init__(self, model: ANNModel, inputs: list[ModularExpression], rescale_output: bool = False, name: str = None): + def __init__(self, model: ANNModel, inputs: list[ModularExpression, FeatureBase], rescale_output: bool = False, name: str = None): if not any(isinstance(model, allowed) for allowed in self.allowed_models): raise NotImplementedError(f"Currently {type(model)} is not supported. Allowed models are: {self.allowed_models}") @@ -80,7 +84,7 @@ def __init__(self, model: ANNModel, inputs: list[ModularExpression], rescale_out "normalize": False, "rescale_output": rescale_output }) - self.inputs = inputs + self.inputs = [inp if isinstance(inp, ModularExpression) else inp.input() for inp in inputs] def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> keras.layers.Layer: if self.name in ModularExpression.models.keys(): @@ -100,11 +104,12 @@ def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> class ModularLinear(ModularExpression): i = 0 - def __init__(self, inputs: list[ModularExpression]): + def __init__(self, inputs: list[ModularExpression, FeatureBase], name: str = None): if name is None: name = f"ModularLinear_{ModularLinear.i}" ModularLinear.i += 1 super().__init__(name) + self.inputs = [inp if isinstance(inp, ModularExpression) else inp.input() for inp in inputs] def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> keras.layers.Layer: if self.name in ModularExpression.models.keys(): @@ -116,4 +121,64 @@ def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> inps.append(y) l = keras.layers.Dense(units=1, activation='linear')(keras.layers.Concatenate()(inps)) ModularExpression.models[self.name] = l + return l + + +class ModularPolynomial(ModularExpression): + i = 0 + + def __init__(self, inputs: list[ModularExpression, FeatureBase], degree: int = 2, interaction_degree: int = 1, name: str = None): + if name is None: + name = f"ModularPolynomial_{ModularPolynomial.i}" + ModularPolynomial.i += 1 + super().__init__(name) + assert degree >= 1, "Degree must be at least 1." + assert interaction_degree >= 1, "Interaction degree must be at least 1." + self.degree = degree + self.interaction_degree = interaction_degree + self.inputs = [inp if isinstance(inp, ModularExpression) else inp.input() for inp in inputs] + + def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> keras.layers.Layer: + if self.name in ModularExpression.models.keys(): + return ModularExpression.models[self.name] + else: + inps = list() + for x in self.inputs: + y = x.construct(input_layer, td) + inps.append(y) + + new_features = list(inps) + for feature in inps: + for d in range(2, self.degree + 1): + new_features.append(feature ** d) + for k in range(2, self.interaction_degree + 1): + for combo in combinations(inps, k): + interaction_term = functools.reduce(operator.mul, combo) + new_features.append(interaction_term) + + l = keras.layers.Dense(units=1, activation='linear')(keras.layers.Concatenate()(new_features)) + ModularExpression.models[self.name] = l + return l + + +class ModularAverage(ModularExpression): + i = 0 + + def __init__(self, inputs: list[ModularExpression, FeatureBase], name: str = None): + if name is None: + name = f"ModularAverage_{ModularAverage.i}" + ModularAverage.i += 1 + super().__init__(name) + self.inputs = [inp if isinstance(inp, ModularExpression) else inp.input() for inp in inputs] + + def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> keras.layers.Layer: + if self.name in ModularExpression.models.keys(): + return ModularExpression.models[self.name] + else: + inps = list() + for x in self.inputs: + y = x.construct(input_layer, td) + inps.append(y) + l = keras.layers.Average()(inps) + ModularExpression.models[self.name] = l return l \ No newline at end of file From 8f1797d27c1d78edf45e9abd7c7609a26a6b7c73 Mon Sep 17 00:00:00 2001 From: Patrick Henkel Date: Wed, 29 Oct 2025 18:04:32 +0100 Subject: [PATCH 05/36] Added test for modular ANN and bug fix --- .../models/ann/keras_models/keras_models.py | 3 +- physXAI/models/modular/modular_expression.py | 5 +- unittests/modular/test_modular.py | 83 +++++++++++++++++++ 3 files changed, 88 insertions(+), 3 deletions(-) create mode 100644 unittests/modular/test_modular.py diff --git a/physXAI/models/ann/keras_models/keras_models.py b/physXAI/models/ann/keras_models/keras_models.py index 8e56ebe..1a75f02 100644 --- a/physXAI/models/ann/keras_models/keras_models.py +++ b/physXAI/models/ann/keras_models/keras_models.py @@ -443,7 +443,7 @@ class ConstantLayer(keras.Layer): trainable or non-trainable. """ - def __init__(self, value=0.0, shape=(1,), trainable=False, **kwargs): + def __init__(self, value=0.0, shape=(1,), trainable=False, name: str = None, **kwargs): """ Initializes the layer. @@ -456,6 +456,7 @@ def __init__(self, value=0.0, shape=(1,), trainable=False, **kwargs): super().__init__(trainable=trainable, **kwargs) self.value = value self.target_shape = tuple(shape) + self.name = name def build(self, input_shape): if self.value is not None: diff --git a/physXAI/models/modular/modular_expression.py b/physXAI/models/modular/modular_expression.py index 18031f4..2350944 100644 --- a/physXAI/models/modular/modular_expression.py +++ b/physXAI/models/modular/modular_expression.py @@ -88,18 +88,19 @@ class ModularTrainable(ModularExpression): i = 0 - def __init__(self, name: str = None, initial_value: float = None): + def __init__(self, name: str = None, initial_value: float = None, trainable: bool = True): if name is None: name = f"ModularTrainable_{ModularTrainable.i}" ModularTrainable.i += 1 super().__init__(name) self.initial_value = initial_value + self.trainable = trainable def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> keras.layers.Layer: if self.name in ModularExpression.trainable_parameters.keys(): return ModularExpression.trainable_parameters[self.name] else: - l = ConstantLayer(trainable=True, name=self.name, value=self.initial_value)(input_layer) + l = ConstantLayer(trainable=self.trainable, name=self.name, value=self.initial_value)(input_layer) ModularExpression.trainable_parameters[self.name] = l return l diff --git a/unittests/modular/test_modular.py b/unittests/modular/test_modular.py new file mode 100644 index 0000000..96efe1e --- /dev/null +++ b/unittests/modular/test_modular.py @@ -0,0 +1,83 @@ +import os +import keras +import numpy as np +import pandas as pd +from pathlib import Path +from physXAI.models.modular.modular_expression import ModularTrainable +from physXAI.models.ann.ann_design import ClassicalANNModel +from physXAI.models.modular.modular_ann import ModularANN, ModularAverage, ModularLinear, ModularModel +from physXAI.utils.logging import Logger +from physXAI.preprocessing.constructed import Feature +from physXAI.preprocessing.preprocessing import PreprocessingSingleStep + + +def generate_sample_csv(output_path: str = "data/sample_data.csv", num_rows: int = 1200, num_features: int = 4, seed: int = 42, value_range: tuple = (-100, 100)): + np.random.seed(seed) + + columns = [f"x{i}" for i in range(1, num_features + 1)] + + data = {} + + for col in columns: + data[col] = np.random.uniform(value_range[0], value_range[1], num_rows) + + data_with_index = {"": range(num_rows)} + data_with_index.update(data) + + df = pd.DataFrame(data_with_index) + + # Ensure output directory exists + output_file = Path(output_path) + output_file.parent.mkdir(parents=True, exist_ok=True) + + df.to_csv(output_path, sep=";", index=False) + + print(f"Sample CSV file generated at: {output_path}") + + +def generate_sample_model(random_seed: int = 42, training_data_path: str = "data/sample_data.csv"): + Logger.setup_logger(base_path=os.path.abspath('models'), folder_name='001', override=True) + + inputs = [f"x{i}" for i in range(1, 4)] + output = "x4" + + features = list() + for inp in inputs: + features.append(Feature(inp)) + + prep = PreprocessingSingleStep(inputs=inputs, output=output, random_state=random_seed) + td = prep.pipeline(training_data_path) + + # TODO: Flatten, BatchNorm, Cropping1D, Reshape, RBF + + m1 = ModularModel(ClassicalANNModel(random_seed=random_seed), inputs=features, rescale_output=True) + m2 = ModularTrainable(initial_value=0.5) + mX = ModularTrainable(initial_value=5) + mY = ModularTrainable(initial_value=0.5) + m3 = mX + mY + m4 = mX - mY + m5 = mX * mY + m6 = mX / mY + m7 = mX ** mY + m8 = ModularAverage([mX, mY]) + + out = ModularLinear([ + m1, + m2, + m3, + m4, + m5, + m6, + m7, + m8 + ]) + m = ModularANN(architecture=out, epochs=10, random_seed=random_seed) + model = m.pipeline(td, plot=False, save_model=False) + + os.makedirs('models', exist_ok=True) + model.save('models/model.keras') + + +if __name__ == "__main__": + generate_sample_csv() + generate_sample_model() \ No newline at end of file From df60c015ad2cad8a7261ab88aacd3a26a6f8944c Mon Sep 17 00:00:00 2001 From: Patrick Henkel Date: Fri, 31 Oct 2025 09:33:09 +0100 Subject: [PATCH 06/36] Updated --- physXAI/models/ann/keras_models/keras_models.py | 6 +++--- physXAI/models/modular/modular_expression.py | 2 +- unittests/modular/test_modular.py | 6 ++++-- unittests/test_coverage.py | 2 +- 4 files changed, 9 insertions(+), 7 deletions(-) diff --git a/physXAI/models/ann/keras_models/keras_models.py b/physXAI/models/ann/keras_models/keras_models.py index 1a75f02..3bae67c 100644 --- a/physXAI/models/ann/keras_models/keras_models.py +++ b/physXAI/models/ann/keras_models/keras_models.py @@ -443,7 +443,7 @@ class ConstantLayer(keras.Layer): trainable or non-trainable. """ - def __init__(self, value=0.0, shape=(1,), trainable=False, name: str = None, **kwargs): + def __init__(self, value=0.0, shape=(1,), trainable=False, weight_name: str = None, **kwargs): """ Initializes the layer. @@ -456,7 +456,7 @@ def __init__(self, value=0.0, shape=(1,), trainable=False, name: str = None, **k super().__init__(trainable=trainable, **kwargs) self.value = value self.target_shape = tuple(shape) - self.name = name + self.weight_name = weight_name def build(self, input_shape): if self.value is not None: @@ -467,7 +467,7 @@ def build(self, input_shape): shape=self.target_shape, initializer=init, trainable=self.trainable, - name=self.name, + name=self.weight_name, ) def call(self, inputs): diff --git a/physXAI/models/modular/modular_expression.py b/physXAI/models/modular/modular_expression.py index 2350944..d3d7ecd 100644 --- a/physXAI/models/modular/modular_expression.py +++ b/physXAI/models/modular/modular_expression.py @@ -100,7 +100,7 @@ def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> if self.name in ModularExpression.trainable_parameters.keys(): return ModularExpression.trainable_parameters[self.name] else: - l = ConstantLayer(trainable=self.trainable, name=self.name, value=self.initial_value)(input_layer) + l = ConstantLayer(trainable=self.trainable, weight_name=self.name, value=self.initial_value)(input_layer) ModularExpression.trainable_parameters[self.name] = l return l diff --git a/unittests/modular/test_modular.py b/unittests/modular/test_modular.py index 96efe1e..5785167 100644 --- a/unittests/modular/test_modular.py +++ b/unittests/modular/test_modular.py @@ -1,5 +1,4 @@ import os -import keras import numpy as np import pandas as pd from pathlib import Path @@ -9,6 +8,9 @@ from physXAI.utils.logging import Logger from physXAI.preprocessing.constructed import Feature from physXAI.preprocessing.preprocessing import PreprocessingSingleStep +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' +import keras +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0' def generate_sample_csv(output_path: str = "data/sample_data.csv", num_rows: int = 1200, num_features: int = 4, seed: int = 42, value_range: tuple = (-100, 100)): @@ -71,7 +73,7 @@ def generate_sample_model(random_seed: int = 42, training_data_path: str = "data m7, m8 ]) - m = ModularANN(architecture=out, epochs=10, random_seed=random_seed) + m = ModularANN(architecture=out, epochs=1000, random_seed=random_seed) model = m.pipeline(td, plot=False, save_model=False) os.makedirs('models', exist_ok=True) diff --git a/unittests/test_coverage.py b/unittests/test_coverage.py index 627d770..6b80abc 100644 --- a/unittests/test_coverage.py +++ b/unittests/test_coverage.py @@ -9,7 +9,7 @@ PreprocessingData from physXAI.preprocessing.constructed import Feature, FeatureConstruction, FeatureConstant from physXAI.feature_selection.recursive_feature_elimination import recursive_feature_elimination_pipeline -from physXAI.models import LinearRegressionModel, AbstractModel +from physXAI.models.models import LinearRegressionModel, AbstractModel from physXAI.models.ann.ann_design import ClassicalANNModel, CMNNModel, LinANNModel, PINNModel, RNNModel, \ RBFModel From 6f9266fb92eff27636a3decb82966c95bfeaff87 Mon Sep 17 00:00:00 2001 From: Patrick Henkel Date: Mon, 3 Nov 2025 14:51:48 +0100 Subject: [PATCH 07/36] Removed non backwards compatible changes in init --- physXAI/models/__init__.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/physXAI/models/__init__.py b/physXAI/models/__init__.py index 1153f2a..ba67d35 100644 --- a/physXAI/models/__init__.py +++ b/physXAI/models/__init__.py @@ -1 +1,3 @@ -from .ann.keras_models.keras_models import * \ No newline at end of file +from .ann.keras_models.keras_models import * +from .models import * +from .ann.ann_design import * \ No newline at end of file From fd9ea25e59ff35b77bce0d84ebed984bed372c78 Mon Sep 17 00:00:00 2001 From: GitHub Action Date: Mon, 3 Nov 2025 13:55:50 +0000 Subject: [PATCH 08/36] Update coverage badge [skip ci] --- build/reports/coverage.svg | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/build/reports/coverage.svg b/build/reports/coverage.svg index a8c7e72..318685c 100644 --- a/build/reports/coverage.svg +++ b/build/reports/coverage.svg @@ -9,13 +9,13 @@ - + coverage coverage - 92% - 92% + 85% + 85% From fe765c506cfff361f76d0c3a08e5426ef923fc4e Mon Sep 17 00:00:00 2001 From: Patrick Henkel Date: Sat, 8 Nov 2025 12:16:52 +0100 Subject: [PATCH 09/36] Updated --- physXAI/models/modular/modular_ann.py | 31 +++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/physXAI/models/modular/modular_ann.py b/physXAI/models/modular/modular_ann.py index 9db5133..b859493 100644 --- a/physXAI/models/modular/modular_ann.py +++ b/physXAI/models/modular/modular_ann.py @@ -3,6 +3,7 @@ from logging import warning import operator import os +from pathlib import Path from typing import Optional, Union from physXAI.models.modular.modular_expression import ModularExpression @@ -12,6 +13,8 @@ from physXAI.preprocessing.constructed import FeatureBase os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' import keras +from keras import Sequential +from keras.src import Functional os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0' @@ -101,6 +104,34 @@ def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> return l +class ModularExistingModel(ModularExpression): + + def __init__(self, model: Union[Sequential, Functional, str, Path], original_inputs: list[ModularExpression, FeatureBase], trainable: bool, name: str = None): + if name is None: + name = model.name + '_existing' + super().__init__(name) + if isinstance(model, str) or isinstance(model, Path): + model = keras.models.load_model(model) + self.model = model + self.inputs = [inp if isinstance(inp, ModularExpression) else inp.input() for inp in original_inputs] + self.model.trainable = trainable + if not trainable: + for layer in self.model.layers: + layer.trainable = False + + def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> keras.layers.Layer: + if self.name in ModularExpression.models.keys(): + return ModularExpression.models[self.name] + else: + inps = list() + for x in self.inputs: + y = x.construct(input_layer, td) + inps.append(y) + l = self.model(keras.layers.Concatenate()(inps)) + ModularExpression.models[self.name] = l + return l + + class ModularLinear(ModularExpression): i = 0 From a14e651f2940d6c6e9fc15715ecb94f4a8a97f02 Mon Sep 17 00:00:00 2001 From: GitHub Action Date: Sat, 8 Nov 2025 11:19:50 +0000 Subject: [PATCH 10/36] Update coverage badge [skip ci] --- build/reports/coverage.svg | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build/reports/coverage.svg b/build/reports/coverage.svg index 318685c..8e21255 100644 --- a/build/reports/coverage.svg +++ b/build/reports/coverage.svg @@ -15,7 +15,7 @@ coverage coverage - 85% - 85% + 84% + 84% From 42a8cda945274e0810e900cc77e51570c690e488 Mon Sep 17 00:00:00 2001 From: Patrick Henkel Date: Mon, 10 Nov 2025 14:31:16 +0100 Subject: [PATCH 11/36] Bug fix --- physXAI/preprocessing/constructed.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/physXAI/preprocessing/constructed.py b/physXAI/preprocessing/constructed.py index fbe9fa0..b978a3b 100644 --- a/physXAI/preprocessing/constructed.py +++ b/physXAI/preprocessing/constructed.py @@ -2,7 +2,6 @@ from typing import Type, Union import numpy as np from pandas import DataFrame, Series -from physXAI.models.modular.modular_expression import ModularFeature class FeatureBase(ABC): @@ -123,7 +122,8 @@ def get_config(self) -> dict: def from_config(cls, config: dict) -> 'FeatureBase': return cls(**config) - def input(self, normalize: bool = True) -> ModularFeature: + def input(self, normalize: bool = True): + from physXAI.models.modular.modular_expression import ModularFeature return ModularFeature(self.feature, normalize=normalize) From f8dca2cd628932dd6c0be0173043998f9ab53e71 Mon Sep 17 00:00:00 2001 From: GitHub Action Date: Tue, 11 Nov 2025 14:25:53 +0000 Subject: [PATCH 12/36] Update coverage badge [skip ci] --- build/reports/coverage.svg | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/build/reports/coverage.svg b/build/reports/coverage.svg index 607d3de..5b40c8f 100644 --- a/build/reports/coverage.svg +++ b/build/reports/coverage.svg @@ -9,13 +9,13 @@ - + coverage coverage - 91% - 91% + 83% + 83% From 5060b9e80a8136c65170121c1cc387e48d412960 Mon Sep 17 00:00:00 2001 From: Patrick Henkel Date: Thu, 20 Nov 2025 17:34:42 +0100 Subject: [PATCH 13/36] Small bug fix --- physXAI/models/modular/modular_ann.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/physXAI/models/modular/modular_ann.py b/physXAI/models/modular/modular_ann.py index b859493..930b8ed 100644 --- a/physXAI/models/modular/modular_ann.py +++ b/physXAI/models/modular/modular_ann.py @@ -5,7 +5,7 @@ import os from pathlib import Path from typing import Optional, Union - +from copy import deepcopy from physXAI.models.modular.modular_expression import ModularExpression from physXAI.models.ann.ann_design import ANNModel, CMNNModel, ClassicalANNModel from physXAI.models.models import register_model @@ -98,6 +98,7 @@ def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> y = x.construct(input_layer, td) inps.append(y) self.model.model_config['n_features'] = len(inps) + td = deepcopy(td) td.columns = [inp.name for inp in self.inputs] l = self.model.generate_model(td=td)(keras.layers.Concatenate()(inps)) ModularExpression.models[self.name] = l From d4305e4e03280f506686eb8c2717a68c9deb8363 Mon Sep 17 00:00:00 2001 From: GitHub Action Date: Fri, 21 Nov 2025 09:16:12 +0000 Subject: [PATCH 14/36] Update coverage badge [skip ci] --- build/reports/coverage.svg | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build/reports/coverage.svg b/build/reports/coverage.svg index 5b40c8f..4b105c6 100644 --- a/build/reports/coverage.svg +++ b/build/reports/coverage.svg @@ -15,7 +15,7 @@ coverage coverage - 83% - 83% + 82% + 82% From 2866780d40e90560cd82360a4565c2ccd1d51fd5 Mon Sep 17 00:00:00 2001 From: "patrick.henkel" Date: Fri, 21 Nov 2025 10:21:42 +0100 Subject: [PATCH 15/36] Small bug fixes --- physXAI/models/ann/configs/ann_model_configs.py | 2 +- physXAI/models/modular/modular_expression.py | 7 +++++++ physXAI/preprocessing/constructed.py | 6 ++++++ 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/physXAI/models/ann/configs/ann_model_configs.py b/physXAI/models/ann/configs/ann_model_configs.py index ec97332..d950699 100644 --- a/physXAI/models/ann/configs/ann_model_configs.py +++ b/physXAI/models/ann/configs/ann_model_configs.py @@ -4,7 +4,7 @@ class ClassicalANNConstruction_config(BaseModel): - n_layers: int = Field(..., gt=0) + n_layers: int = Field(..., ge=0) n_neurons: Union[int, list[int]] = 32 activation_function: Union[str, list[str]] = 'softplus' rescale_output: bool = True diff --git a/physXAI/models/modular/modular_expression.py b/physXAI/models/modular/modular_expression.py index d3d7ecd..5aea080 100644 --- a/physXAI/models/modular/modular_expression.py +++ b/physXAI/models/modular/modular_expression.py @@ -18,6 +18,13 @@ class ModularExpression(ABC): def __init__(self, name: str): self.name = name + @staticmethod + def reset(): + ModularExpression.feature_list = dict() + ModularExpression.feature_list_normalized = dict() + ModularExpression.trainable_parameters = dict() + ModularExpression.models = dict() + @abstractmethod def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> keras.layers.Layer: pass diff --git a/physXAI/preprocessing/constructed.py b/physXAI/preprocessing/constructed.py index b978a3b..fc08b31 100644 --- a/physXAI/preprocessing/constructed.py +++ b/physXAI/preprocessing/constructed.py @@ -534,6 +534,12 @@ class FeatureConstruction: features = list[FeatureBase]() inputs = list[str]() + @staticmethod + def reset(): + """Clears all registered features and input names.""" + FeatureConstruction.features = list[FeatureBase]() + FeatureConstruction.inputs = list[str]() + @staticmethod def append(f: FeatureBase): """ From 5a7d0dc112e3a72cfb9de05cd292ae8a0fd86db6 Mon Sep 17 00:00:00 2001 From: Patrick Henkel Date: Fri, 21 Nov 2025 15:21:30 +0100 Subject: [PATCH 16/36] Added scaling --- physXAI/models/modular/modular_ann.py | 75 +++++++++++++++++++++++---- 1 file changed, 64 insertions(+), 11 deletions(-) diff --git a/physXAI/models/modular/modular_ann.py b/physXAI/models/modular/modular_ann.py index 930b8ed..86db275 100644 --- a/physXAI/models/modular/modular_ann.py +++ b/physXAI/models/modular/modular_ann.py @@ -6,9 +6,11 @@ from pathlib import Path from typing import Optional, Union from copy import deepcopy + +import numpy as np from physXAI.models.modular.modular_expression import ModularExpression from physXAI.models.ann.ann_design import ANNModel, CMNNModel, ClassicalANNModel -from physXAI.models.models import register_model +from physXAI.models.models import LinearRegressionModel, register_model from physXAI.preprocessing.training_data import TrainingDataGeneric from physXAI.preprocessing.constructed import FeatureBase os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' @@ -26,7 +28,7 @@ class ModularANN(ANNModel): def __init__(self, architecture: ModularExpression, batch_size: int = 32, epochs: int = 1000, learning_rate: float = 0.001, early_stopping_epochs: Optional[int] = 100, - random_seed: int = 42, **kwargs): + random_seed: int = 42, rescale_output: bool = False, **kwargs): """ Initializes the ModularANN. @@ -38,11 +40,14 @@ def __init__(self, architecture: ModularExpression, batch_size: int = 32, epochs early_stopping_epochs (int): Number of epochs with no improvement after which training will be stopped. If None, early stopping is disabled. random_seed (int): Seed for random number generators to ensure reproducibility. + rescale_output (bool): Whether to rescale the output to output scale. """ super().__init__(batch_size, epochs, learning_rate, early_stopping_epochs, random_seed) self.architecture: ModularExpression = architecture + self.rescale_output = rescale_output + self.model_config.update({}) def generate_model(self, **kwargs): @@ -54,6 +59,10 @@ def generate_model(self, **kwargs): n_features = td.X_train_single.shape[1] input_layer = keras.layers.Input(shape=(n_features,)) x = self.architecture.construct(input_layer, td) + if self.rescale_output: + rescale_mean = float(np.mean(td.y_train_single)) + rescale_sigma = float(np.std(td.y_train_single, ddof=1)) + x = keras.layers.Rescaling(scale=rescale_sigma, offset=rescale_mean)(x) model = keras.models.Model(inputs=input_layer, outputs=x) model.summary() return model @@ -67,10 +76,10 @@ def get_config(self) -> dict: class ModularModel(ModularExpression): - allowed_models = [ClassicalANNModel, CMNNModel] + allowed_models = [ClassicalANNModel, CMNNModel, LinearRegressionModel] i = 0 - def __init__(self, model: ANNModel, inputs: list[ModularExpression, FeatureBase], rescale_output: bool = False, name: str = None): + def __init__(self, model: ANNModel, inputs: list[ModularExpression, FeatureBase], name: str = None, nominal_scale: float = None, nominal_offset: float = None): if not any(isinstance(model, allowed) for allowed in self.allowed_models): raise NotImplementedError(f"Currently {type(model)} is not supported. Allowed models are: {self.allowed_models}") @@ -80,15 +89,25 @@ def __init__(self, model: ANNModel, inputs: list[ModularExpression, FeatureBase] super().__init__(name) self.model = model - self.rescale_output = rescale_output - if rescale_output: - warning("Using rescale_output=True in ModularANN should only be done if model output is training data output.") self.model.model_config.update({ "normalize": False, - "rescale_output": rescale_output + "rescale_output": False }) self.inputs = [inp if isinstance(inp, ModularExpression) else inp.input() for inp in inputs] + if nominal_scale is not None and nominal_offset is None: + nominal_offset = 0.0 + elif nominal_offset is not None and nominal_scale is None: + nominal_scale = 1.0 + self.nominal_offset = nominal_offset + self.nominal_scale = nominal_scale + + if self.nominal_scale is not None: + self.rescale_output = True + else: + self.rescale_output = False + + def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> keras.layers.Layer: if self.name in ModularExpression.models.keys(): return ModularExpression.models[self.name] @@ -100,7 +119,13 @@ def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> self.model.model_config['n_features'] = len(inps) td = deepcopy(td) td.columns = [inp.name for inp in self.inputs] - l = self.model.generate_model(td=td)(keras.layers.Concatenate()(inps)) + if isinstance(self.model, LinearRegressionModel): + lr = ModularLinear(inputs=self.inputs, name=self.name + "_linear").construct(input_layer, td) + l = lr(keras.layers.Concatenate()(inps)) + else: + l = self.model.generate_model(td=td)(keras.layers.Concatenate()(inps)) + if self.rescale_output: + l = keras.layers.Rescaling(scale=self.nominal_scale, offset=self.nominal_offset)(l) ModularExpression.models[self.name] = l return l @@ -136,12 +161,24 @@ def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> class ModularLinear(ModularExpression): i = 0 - def __init__(self, inputs: list[ModularExpression, FeatureBase], name: str = None): + def __init__(self, inputs: list[ModularExpression, FeatureBase], name: str = None, nominal_scale: float = None, nominal_offset: float = None): if name is None: name = f"ModularLinear_{ModularLinear.i}" ModularLinear.i += 1 super().__init__(name) self.inputs = [inp if isinstance(inp, ModularExpression) else inp.input() for inp in inputs] + + if nominal_scale is not None and nominal_offset is None: + nominal_offset = 0.0 + elif nominal_offset is not None and nominal_scale is None: + nominal_scale = 1.0 + self.nominal_offset = nominal_offset + self.nominal_scale = nominal_scale + + if self.nominal_scale is not None: + self.rescale_output = True + else: + self.rescale_output = False def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> keras.layers.Layer: if self.name in ModularExpression.models.keys(): @@ -152,6 +189,8 @@ def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> y = x.construct(input_layer, td) inps.append(y) l = keras.layers.Dense(units=1, activation='linear')(keras.layers.Concatenate()(inps)) + if self.rescale_output: + l = keras.layers.Rescaling(scale=self.nominal_scale, offset=self.nominal_offset)(l) ModularExpression.models[self.name] = l return l @@ -159,7 +198,7 @@ def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> class ModularPolynomial(ModularExpression): i = 0 - def __init__(self, inputs: list[ModularExpression, FeatureBase], degree: int = 2, interaction_degree: int = 1, name: str = None): + def __init__(self, inputs: list[ModularExpression, FeatureBase], degree: int = 2, interaction_degree: int = 1, name: str = None, nominal_scale: float = None, nominal_offset: float = None): if name is None: name = f"ModularPolynomial_{ModularPolynomial.i}" ModularPolynomial.i += 1 @@ -170,6 +209,18 @@ def __init__(self, inputs: list[ModularExpression, FeatureBase], degree: int = 2 self.interaction_degree = interaction_degree self.inputs = [inp if isinstance(inp, ModularExpression) else inp.input() for inp in inputs] + if nominal_scale is not None and nominal_offset is None: + nominal_offset = 0.0 + elif nominal_offset is not None and nominal_scale is None: + nominal_scale = 1.0 + self.nominal_offset = nominal_offset + self.nominal_scale = nominal_scale + + if self.nominal_scale is not None: + self.rescale_output = True + else: + self.rescale_output = False + def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> keras.layers.Layer: if self.name in ModularExpression.models.keys(): return ModularExpression.models[self.name] @@ -189,6 +240,8 @@ def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> new_features.append(interaction_term) l = keras.layers.Dense(units=1, activation='linear')(keras.layers.Concatenate()(new_features)) + if self.rescale_output: + l = keras.layers.Rescaling(scale=self.nominal_scale, offset=self.nominal_offset)(l) ModularExpression.models[self.name] = l return l From e36b0e38762d0765f4b328fd2ba88ce5973f51a7 Mon Sep 17 00:00:00 2001 From: GitHub Action Date: Fri, 21 Nov 2025 14:24:24 +0000 Subject: [PATCH 17/36] Update coverage badge [skip ci] --- build/reports/coverage.svg | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build/reports/coverage.svg b/build/reports/coverage.svg index 4b105c6..7a18c7f 100644 --- a/build/reports/coverage.svg +++ b/build/reports/coverage.svg @@ -15,7 +15,7 @@ coverage coverage - 82% - 82% + 80% + 80% From 648e7ad50f13f5f4dd0c6f415d79af1355c78a68 Mon Sep 17 00:00:00 2001 From: Patrick Henkel Date: Fri, 21 Nov 2025 15:56:15 +0100 Subject: [PATCH 18/36] Better normalization --- physXAI/models/modular/modular_ann.py | 83 +++++++++++++++------------ 1 file changed, 45 insertions(+), 38 deletions(-) diff --git a/physXAI/models/modular/modular_ann.py b/physXAI/models/modular/modular_ann.py index 86db275..339c2f0 100644 --- a/physXAI/models/modular/modular_ann.py +++ b/physXAI/models/modular/modular_ann.py @@ -79,7 +79,7 @@ class ModularModel(ModularExpression): allowed_models = [ClassicalANNModel, CMNNModel, LinearRegressionModel] i = 0 - def __init__(self, model: ANNModel, inputs: list[ModularExpression, FeatureBase], name: str = None, nominal_scale: float = None, nominal_offset: float = None): + def __init__(self, model: ANNModel, inputs: list[ModularExpression, FeatureBase], name: str = None, nominal_range: tuple[float, float] = None): if not any(isinstance(model, allowed) for allowed in self.allowed_models): raise NotImplementedError(f"Currently {type(model)} is not supported. Allowed models are: {self.allowed_models}") @@ -95,18 +95,14 @@ def __init__(self, model: ANNModel, inputs: list[ModularExpression, FeatureBase] }) self.inputs = [inp if isinstance(inp, ModularExpression) else inp.input() for inp in inputs] - if nominal_scale is not None and nominal_offset is None: - nominal_offset = 0.0 - elif nominal_offset is not None and nominal_scale is None: - nominal_scale = 1.0 - self.nominal_offset = nominal_offset - self.nominal_scale = nominal_scale - - if self.nominal_scale is not None: - self.rescale_output = True - else: + if nominal_range is None: self.rescale_output = False - + elif nominal_range is not None and len(nominal_range) is not 2: + raise ValueError(f"Modular Model: nominal_range must be a tuple of (min, max), but was {nominal_range}") + else: + self.rescale_output = True + self.nominal_mean = (nominal_range[1] + nominal_range[0]) / 2.0 + self.nominal_sigma = (nominal_range[1] - nominal_range[0]) / 4.0 # Assuming 4 sigma covers the range def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> keras.layers.Layer: if self.name in ModularExpression.models.keys(): @@ -125,7 +121,7 @@ def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> else: l = self.model.generate_model(td=td)(keras.layers.Concatenate()(inps)) if self.rescale_output: - l = keras.layers.Rescaling(scale=self.nominal_scale, offset=self.nominal_offset)(l) + l = keras.layers.Rescaling(scale=self.nominal_sigma, offset=self.nominal_mean)(l) ModularExpression.models[self.name] = l return l @@ -161,24 +157,21 @@ def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> class ModularLinear(ModularExpression): i = 0 - def __init__(self, inputs: list[ModularExpression, FeatureBase], name: str = None, nominal_scale: float = None, nominal_offset: float = None): + def __init__(self, inputs: list[ModularExpression, FeatureBase], name: str = None, nominal_range: tuple[float, float] = None): if name is None: name = f"ModularLinear_{ModularLinear.i}" ModularLinear.i += 1 super().__init__(name) self.inputs = [inp if isinstance(inp, ModularExpression) else inp.input() for inp in inputs] - if nominal_scale is not None and nominal_offset is None: - nominal_offset = 0.0 - elif nominal_offset is not None and nominal_scale is None: - nominal_scale = 1.0 - self.nominal_offset = nominal_offset - self.nominal_scale = nominal_scale - - if self.nominal_scale is not None: - self.rescale_output = True - else: + if nominal_range is None: self.rescale_output = False + elif nominal_range is not None and len(nominal_range) is not 2: + raise ValueError(f"Modular Model: nominal_range must be a tuple of (min, max), but was {nominal_range}") + else: + self.rescale_output = True + self.nominal_mean = (nominal_range[1] + nominal_range[0]) / 2.0 + self.nominal_sigma = (nominal_range[1] - nominal_range[0]) / 4.0 # Assuming 4 sigma covers the range def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> keras.layers.Layer: if self.name in ModularExpression.models.keys(): @@ -190,7 +183,7 @@ def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> inps.append(y) l = keras.layers.Dense(units=1, activation='linear')(keras.layers.Concatenate()(inps)) if self.rescale_output: - l = keras.layers.Rescaling(scale=self.nominal_scale, offset=self.nominal_offset)(l) + l = keras.layers.Rescaling(scale=self.nominal_sigma, offset=self.nominal_mean)(l) ModularExpression.models[self.name] = l return l @@ -198,7 +191,7 @@ def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> class ModularPolynomial(ModularExpression): i = 0 - def __init__(self, inputs: list[ModularExpression, FeatureBase], degree: int = 2, interaction_degree: int = 1, name: str = None, nominal_scale: float = None, nominal_offset: float = None): + def __init__(self, inputs: list[ModularExpression, FeatureBase], degree: int = 2, interaction_degree: int = 1, name: str = None, nominal_range: tuple[float, float] = None): if name is None: name = f"ModularPolynomial_{ModularPolynomial.i}" ModularPolynomial.i += 1 @@ -209,17 +202,14 @@ def __init__(self, inputs: list[ModularExpression, FeatureBase], degree: int = 2 self.interaction_degree = interaction_degree self.inputs = [inp if isinstance(inp, ModularExpression) else inp.input() for inp in inputs] - if nominal_scale is not None and nominal_offset is None: - nominal_offset = 0.0 - elif nominal_offset is not None and nominal_scale is None: - nominal_scale = 1.0 - self.nominal_offset = nominal_offset - self.nominal_scale = nominal_scale - - if self.nominal_scale is not None: - self.rescale_output = True - else: + if nominal_range is None: self.rescale_output = False + elif nominal_range is not None and len(nominal_range) is not 2: + raise ValueError(f"Modular Model: nominal_range must be a tuple of (min, max), but was {nominal_range}") + else: + self.rescale_output = True + self.nominal_mean = (nominal_range[1] + nominal_range[0]) / 2.0 + self.nominal_sigma = (nominal_range[1] - nominal_range[0]) / 4.0 # Assuming 4 sigma covers the range def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> keras.layers.Layer: if self.name in ModularExpression.models.keys(): @@ -241,7 +231,7 @@ def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> l = keras.layers.Dense(units=1, activation='linear')(keras.layers.Concatenate()(new_features)) if self.rescale_output: - l = keras.layers.Rescaling(scale=self.nominal_scale, offset=self.nominal_offset)(l) + l = keras.layers.Rescaling(scale=self.nominal_sigma, offset=self.nominal_mean)(l) ModularExpression.models[self.name] = l return l @@ -266,4 +256,21 @@ def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> inps.append(y) l = keras.layers.Average()(inps) ModularExpression.models[self.name] = l - return l \ No newline at end of file + return l + + +class ModularNormalization(ModularExpression): + i = 0 + + def __init__(self, input: ModularExpression, name: str = None): + if name is None: + name = f"ModularNormalization_{ModularNormalization.i}" + ModularNormalization.i += 1 + super().__init__(name) + self.inputs = input + + def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> keras.layers.Layer: + inp = self.inputs.construct(input_layer, td) + normalization = keras.layers.BatchNormalization() + l = normalization(inp) + return l \ No newline at end of file From 0124cab96f0fc15047e4ab24418c87c2d7242d96 Mon Sep 17 00:00:00 2001 From: Patrick Henkel Date: Wed, 26 Nov 2025 15:03:01 +0100 Subject: [PATCH 19/36] Refactoring --- physXAI/models/modular/modular_ann.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/physXAI/models/modular/modular_ann.py b/physXAI/models/modular/modular_ann.py index 339c2f0..1bf54d4 100644 --- a/physXAI/models/modular/modular_ann.py +++ b/physXAI/models/modular/modular_ann.py @@ -97,7 +97,7 @@ def __init__(self, model: ANNModel, inputs: list[ModularExpression, FeatureBase] if nominal_range is None: self.rescale_output = False - elif nominal_range is not None and len(nominal_range) is not 2: + elif nominal_range is not None and len(nominal_range) != 2: raise ValueError(f"Modular Model: nominal_range must be a tuple of (min, max), but was {nominal_range}") else: self.rescale_output = True @@ -166,7 +166,7 @@ def __init__(self, inputs: list[ModularExpression, FeatureBase], name: str = Non if nominal_range is None: self.rescale_output = False - elif nominal_range is not None and len(nominal_range) is not 2: + elif nominal_range is not None and len(nominal_range) != 2: raise ValueError(f"Modular Model: nominal_range must be a tuple of (min, max), but was {nominal_range}") else: self.rescale_output = True @@ -204,7 +204,7 @@ def __init__(self, inputs: list[ModularExpression, FeatureBase], degree: int = 2 if nominal_range is None: self.rescale_output = False - elif nominal_range is not None and len(nominal_range) is not 2: + elif nominal_range is not None and len(nominal_range) != 2: raise ValueError(f"Modular Model: nominal_range must be a tuple of (min, max), but was {nominal_range}") else: self.rescale_output = True From dca231844a905f24a372869f8fa3ef2ed66e6984 Mon Sep 17 00:00:00 2001 From: Patrick Henkel Date: Fri, 28 Nov 2025 14:55:09 +0100 Subject: [PATCH 20/36] Merge --- physXAI/preprocessing/constructed.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/physXAI/preprocessing/constructed.py b/physXAI/preprocessing/constructed.py index 14e019f..fc08b31 100644 --- a/physXAI/preprocessing/constructed.py +++ b/physXAI/preprocessing/constructed.py @@ -540,11 +540,6 @@ def reset(): FeatureConstruction.features = list[FeatureBase]() FeatureConstruction.inputs = list[str]() - @staticmethod - def reset(): - """Clears all registered features and input names.""" - FeatureConstruction.features = list[FeatureBase]() - @staticmethod def append(f: FeatureBase): """ From 72937e7c93005a364fff890ffdde2f6fcef06291 Mon Sep 17 00:00:00 2001 From: Patrick Henkel Date: Fri, 28 Nov 2025 14:57:23 +0100 Subject: [PATCH 21/36] Bug fix --- physXAI/models/modular/modular_ann.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/physXAI/models/modular/modular_ann.py b/physXAI/models/modular/modular_ann.py index 1bf54d4..bf11adb 100644 --- a/physXAI/models/modular/modular_ann.py +++ b/physXAI/models/modular/modular_ann.py @@ -65,6 +65,8 @@ def generate_model(self, **kwargs): x = keras.layers.Rescaling(scale=rescale_sigma, offset=rescale_mean)(x) model = keras.models.Model(inputs=input_layer, outputs=x) model.summary() + + ModularExpression.reset() return model def get_config(self) -> dict: From 293dd28b5cf6ac0b1c3bc621bec0f518ff3a00d5 Mon Sep 17 00:00:00 2001 From: Patrick Henkel Date: Fri, 28 Nov 2025 15:02:24 +0100 Subject: [PATCH 22/36] Updated API --- executables/bestest_hydronic_heat_pump/P_hp_modular.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/executables/bestest_hydronic_heat_pump/P_hp_modular.py b/executables/bestest_hydronic_heat_pump/P_hp_modular.py index 056ced4..c61126f 100644 --- a/executables/bestest_hydronic_heat_pump/P_hp_modular.py +++ b/executables/bestest_hydronic_heat_pump/P_hp_modular.py @@ -27,10 +27,9 @@ """Example usages of modular models""" y = ModularModel( model=ClassicalANNModel(), - inputs=[oveHeaPumY_u.input() / func_logistic.input(), func_logistic.input() ** 2, TDryBul.input(), TZon.input()], - rescale_output=True + inputs=[oveHeaPumY_u.input() / func_logistic.input(), func_logistic.input() ** 2, TDryBul.input(), TZon.input()] ) -m = ModularANN(architecture=y) +m = ModularANN(architecture=y, rescale_output=True) # Training pipeline model = m.pipeline(td) From 0394a7c8a3b5d700f0a041b5a61a1b54e06b5cdd Mon Sep 17 00:00:00 2001 From: GitHub Action Date: Fri, 28 Nov 2025 14:06:34 +0000 Subject: [PATCH 23/36] Update coverage badge [skip ci] --- build/reports/coverage.svg | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build/reports/coverage.svg b/build/reports/coverage.svg index 1c7007c..7a18c7f 100644 --- a/build/reports/coverage.svg +++ b/build/reports/coverage.svg @@ -15,7 +15,7 @@ coverage coverage - 89% - 89% + 80% + 80% From d3d14d135a344f8c83b6356e7d49860191797f97 Mon Sep 17 00:00:00 2001 From: Patrick Henkel Date: Thu, 18 Dec 2025 22:21:36 +0100 Subject: [PATCH 24/36] Updated --- .../models/ann/configs/ann_model_configs.py | 1 + .../ann/model_construction/rbf_models.py | 104 +++++++++++------- .../ann/model_construction/residual_models.py | 2 +- 3 files changed, 64 insertions(+), 43 deletions(-) diff --git a/physXAI/models/ann/configs/ann_model_configs.py b/physXAI/models/ann/configs/ann_model_configs.py index 53d89f1..d172d65 100644 --- a/physXAI/models/ann/configs/ann_model_configs.py +++ b/physXAI/models/ann/configs/ann_model_configs.py @@ -33,6 +33,7 @@ def validate_activation(cls, v, info): class RBFConstruction_config(ClassicalANNConstruction_config): + n_layers: int = Field(..., ge=1, le=1) random_state: int = 42 rescale_mean: Optional[float] = Field( None, description="Mean value for z-score normalization of outputs" diff --git a/physXAI/models/ann/model_construction/rbf_models.py b/physXAI/models/ann/model_construction/rbf_models.py index 1d2e4a4..2a20df1 100644 --- a/physXAI/models/ann/model_construction/rbf_models.py +++ b/physXAI/models/ann/model_construction/rbf_models.py @@ -1,11 +1,33 @@ import keras import numpy as np from sklearn.cluster import KMeans +from sklearn.neighbors import NearestNeighbors from physXAI.preprocessing.training_data import TrainingDataGeneric from physXAI.models.ann.configs.ann_model_configs import RBFConstruction_config from physXAI.models.ann.keras_models.keras_models import RBFLayer +def gamma_init(centers, overlap=0.05): + """Initialize gamma parameter for RBF layer based on centers and desired overlap. + + Args: + centers (np.ndarray): Array of shape (n_centers, n_features) representing the RBF centers. + overlap (float): Desired overlap factor between RBFs. Higher values lead to more overlap. + """ + nbrs = NearestNeighbors(n_neighbors=2).fit(centers) + distances, _ = nbrs.kneighbors(centers) + dist_sq = distances[:, 1] ** 2 + avg_dist_sq = np.median(dist_sq) + + if avg_dist_sq == 0: + return 1.0 # Fallback + + gamma = -np.log(overlap) / avg_dist_sq + print(f"Calculated Gamma: {gamma}") + return gamma + + + def RBFModelConstruction(config: dict, td: TrainingDataGeneric): """ Constructs a Radial Basis Function (RBF) Network model using Keras. @@ -29,59 +51,57 @@ def RBFModelConstruction(config: dict, td: TrainingDataGeneric): config = RBFConstruction_config.model_validate(config).model_dump() # Get config - n_layers = config['n_layers'] n_neurons = config['n_neurons'] # If n_neurons is a single integer, replicate it for all layers - if isinstance(n_neurons, int): - n_neurons = [n_neurons] * n_layers - else: - assert len(n_neurons) == n_layers - n_featues = td.X_train_single.shape[1] - - # Rescaling for output layer - # Custom rescaling - if 'rescale_scale' in config.keys() or 'rescale_offset' in config.keys(): - raise ValueError( - "The 'rescale_scale' and 'rescale_offset' parameters are deprecated. " - "Scaling has changed from min/max to standardization (z-score normalization using mean=0, std=1). " - "Please use 'rescale_mean' and 'rescale_sigma' instead." - ) - if 'rescale_sigma' in config.keys() and config['rescale_sigma'] is not None: - if 'rescale_mean' in config.keys() and config['rescale_mean'] is not None: - rescale_mean = config['rescale_mean'] - else: - rescale_mean = 0 - rescale_sigma = config['rescale_sigma'] - # Standard rescaling + if isinstance(n_neurons, list): + n_neurons = n_neurons[0] + if config['n_features'] is not None: + n_features = config['n_features'] else: - rescale_mean = float(np.mean(td.y_train_single)) - rescale_sigma = float(np.std(td.y_train_single, ddof=1)) + n_features = td.X_train_single.shape[1] # Add input layer - input_layer = keras.layers.Input(shape=(n_featues,)) + input_layer = keras.layers.Input(shape=(n_features,)) # Add normalization layer - normalization = keras.layers.Normalization() - normalization.adapt(td.X_train_single) - x = normalization(input_layer) - - for i in range(0, n_layers): - # For each layer add RBF - - # Determine initial rbf centers - if i == 0: - # Apply KMeans Clustering for rbf centers - kmeans = KMeans(n_clusters=n_neurons[i], random_state=config['random_state'], n_init='auto') - kmeans.fit(normalization(td.X_train_single)) - initial_centers_kmeans = kmeans.cluster_centers_ - x = RBFLayer(n_neurons[i], initial_centers=initial_centers_kmeans, gamma=1)(x) - else: - x = RBFLayer(n_neurons[i], gamma=1)(x) + if config['normalize']: + normalization = keras.layers.Normalization() + normalization.adapt(td.X_train_single) + x = normalization(input_layer) + else: + x = input_layer + + kmeans = KMeans(n_clusters=n_neurons, random_state=config['random_state'], n_init='auto') + kmeans.fit(normalization(td.X_train_single).numpy()) + initial_centers_kmeans = kmeans.cluster_centers_ + + x = RBFLayer(n_neurons, + initial_centers=initial_centers_kmeans, + gamma=gamma_init(initial_centers_kmeans, overlap=0.5), + learnable_centers=False, + learnable_gamma=False)(x) # Add output layer - x = keras.layers.Dense(1, activation='linear')(x) + x = keras.layers.Dense(1, activation='linear', use_bias=False)(x) # Add rescaling if config['rescale_output']: + + # Rescaling for output layer + # Custom rescaling + # --- Sigma (Scale) --- + if 'rescale_sigma' in config and config['rescale_sigma'] is not None: + rescale_sigma = config['rescale_sigma'] + else: + # Auto-calculate from data + rescale_sigma = float(np.std(td.y_train_single, ddof=1)) + # --- Mean (Offset) --- + # CASE A: Residual Mode -> Config must provide 0.0 + # CASE B: Direct Prediction -> Config is None, calculate from data + if 'rescale_mean' in config and config['rescale_mean'] is not None: + rescale_mean = config['rescale_mean'] + else: + rescale_mean = float(np.mean(td.y_train_single)) + x = keras.layers.Rescaling(scale=rescale_sigma, offset=rescale_mean)(x) model = keras.Model(inputs=input_layer, outputs=x) diff --git a/physXAI/models/ann/model_construction/residual_models.py b/physXAI/models/ann/model_construction/residual_models.py index 5f55509..70ca9ad 100644 --- a/physXAI/models/ann/model_construction/residual_models.py +++ b/physXAI/models/ann/model_construction/residual_models.py @@ -35,7 +35,7 @@ def LinResidualANNConstruction(config: dict, td: TrainingDataGeneric, lin_model: # Determine predictions of linear regression for rescaling y_train_pred = lin_model.predict(td.X_train_single) config['rescale_sigma'] = float(np.std(td.y_train_single - y_train_pred, ddof=1)) - config['rescale_mean'] = float(np.mean(td.y_train_single - y_train_pred)) + config['rescale_mean'] = 0 # Add linear regression as dense keras layer lin = keras.layers.Dense(1, activation='linear') From 7bc8512d2895768b7d60cc945331adfda7aa3a44 Mon Sep 17 00:00:00 2001 From: "patrick.henkel" Date: Wed, 24 Dec 2025 12:40:52 +0100 Subject: [PATCH 25/36] Added Montone Linear --- physXAI/models/modular/modular_ann.py | 41 +++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/physXAI/models/modular/modular_ann.py b/physXAI/models/modular/modular_ann.py index bf11adb..2bdef40 100644 --- a/physXAI/models/modular/modular_ann.py +++ b/physXAI/models/modular/modular_ann.py @@ -8,6 +8,7 @@ from copy import deepcopy import numpy as np +from physXAI.models.ann.keras_models.keras_models import NonNegPartial from physXAI.models.modular.modular_expression import ModularExpression from physXAI.models.ann.ann_design import ANNModel, CMNNModel, ClassicalANNModel from physXAI.models.models import LinearRegressionModel, register_model @@ -188,6 +189,46 @@ def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> l = keras.layers.Rescaling(scale=self.nominal_sigma, offset=self.nominal_mean)(l) ModularExpression.models[self.name] = l return l + + +class ModularMonotoneLinear(ModularExpression): + i = 0 + + def __init__(self, inputs: list[Union[ModularExpression, FeatureBase]], name: str = None, monotonicities: Optional[dict[str, int]] = None, nominal_range: tuple[float, float] = None): + if name is None: + name = f"ModularMonotoneLinear_{ModularLinear.i}" + ModularLinear.i += 1 + super().__init__(name) + self.inputs = [inp if isinstance(inp, ModularExpression) else inp.input() for inp in inputs] + + if monotonicities is None: + monotonicities = [0] * len(self.inputs) + else: + monotonicities = [0 if inp.name not in monotonicities.keys() else monotonicities[inp.name] for inp in self.inputs] + self.monotonicities = monotonicities + + if nominal_range is None: + self.rescale_output = False + elif nominal_range is not None and len(nominal_range) != 2: + raise ValueError(f"Modular Model: nominal_range must be a tuple of (min, max), but was {nominal_range}") + else: + self.rescale_output = True + self.nominal_mean = (nominal_range[1] + nominal_range[0]) / 2.0 + self.nominal_sigma = (nominal_range[1] - nominal_range[0]) / 4.0 # Assuming 4 sigma covers the range + + def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> keras.layers.Layer: + if self.name in ModularExpression.models.keys(): + return ModularExpression.models[self.name] + else: + inps = list() + for x in self.inputs: + y = x.construct(input_layer, td) + inps.append(y) + l = keras.layers.Dense(units=1, activation='linear', kernel_constraint=NonNegPartial(self.monotonicities))(keras.layers.Concatenate()(inps)) + if self.rescale_output: + l = keras.layers.Rescaling(scale=self.nominal_sigma, offset=self.nominal_mean)(l) + ModularExpression.models[self.name] = l + return l class ModularPolynomial(ModularExpression): From 49090c3510f4b655dde03a88f7256bf8dfe92386 Mon Sep 17 00:00:00 2001 From: "ross.simon" Date: Sun, 28 Dec 2025 14:47:03 +0100 Subject: [PATCH 26/36] Added get_config methods for modular expressions --- physXAI/models/modular/modular_ann.py | 79 ++++++++++++++++++-- physXAI/models/modular/modular_expression.py | 38 ++++++++++ 2 files changed, 110 insertions(+), 7 deletions(-) diff --git a/physXAI/models/modular/modular_ann.py b/physXAI/models/modular/modular_ann.py index 2bdef40..dcd780e 100644 --- a/physXAI/models/modular/modular_ann.py +++ b/physXAI/models/modular/modular_ann.py @@ -49,7 +49,9 @@ def __init__(self, architecture: ModularExpression, batch_size: int = 32, epochs self.rescale_output = rescale_output - self.model_config.update({}) + self.model_config.update({ + 'rescale_output': rescale_output, # the rest of the parameters are passed on to super + }) def generate_model(self, **kwargs): """ @@ -72,7 +74,7 @@ def generate_model(self, **kwargs): def get_config(self) -> dict: config = super().get_config() - config.update({}) + config.update({}) # TODO: save architecture and rescale_output? warning("ModularANN currently does not save architecture config.") return config @@ -97,6 +99,7 @@ def __init__(self, model: ANNModel, inputs: list[ModularExpression, FeatureBase] "rescale_output": False }) self.inputs = [inp if isinstance(inp, ModularExpression) else inp.input() for inp in inputs] + self._nominal_range = nominal_range if nominal_range is None: self.rescale_output = False @@ -126,7 +129,16 @@ def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> if self.rescale_output: l = keras.layers.Rescaling(scale=self.nominal_sigma, offset=self.nominal_mean)(l) ModularExpression.models[self.name] = l - return l + return l + + def get_config(self) -> dict: + c = super().get_config() + c.update({ + # TODO: 'model': self.model??? + 'inputs': [inp.name for inp in self.inputs], + 'nominal_range': self._nominal_range, + }) + return c class ModularExistingModel(ModularExpression): @@ -156,6 +168,15 @@ def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> ModularExpression.models[self.name] = l return l + def get_config(self) -> dict: + c = super().get_config() + c.update({ + # TODO: model ??? + 'original_inputs': [inp.name for inp in self.inputs], + 'trainable': self.model.trainable + }) + return c + class ModularLinear(ModularExpression): i = 0 @@ -166,6 +187,7 @@ def __init__(self, inputs: list[ModularExpression, FeatureBase], name: str = Non ModularLinear.i += 1 super().__init__(name) self.inputs = [inp if isinstance(inp, ModularExpression) else inp.input() for inp in inputs] + self._nominal_range = nominal_range if nominal_range is None: self.rescale_output = False @@ -189,7 +211,15 @@ def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> l = keras.layers.Rescaling(scale=self.nominal_sigma, offset=self.nominal_mean)(l) ModularExpression.models[self.name] = l return l - + + def get_config(self) -> dict: + c = super().get_config() + c.update({ + 'inputs': [inp.name for inp in self.inputs], + 'nominal_range': self._nominal_range, + }) + return c + class ModularMonotoneLinear(ModularExpression): i = 0 @@ -200,6 +230,7 @@ def __init__(self, inputs: list[Union[ModularExpression, FeatureBase]], name: st ModularLinear.i += 1 super().__init__(name) self.inputs = [inp if isinstance(inp, ModularExpression) else inp.input() for inp in inputs] + self._nominal_range = nominal_range if monotonicities is None: monotonicities = [0] * len(self.inputs) @@ -230,6 +261,15 @@ def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> ModularExpression.models[self.name] = l return l + def get_config(self) -> dict: + c = super().get_config() + c.update({ + 'inputs': [inp.name for inp in self.inputs], + 'nominal_range': self._nominal_range, + 'monotonicities': self.monotonicities, + }) + return c + class ModularPolynomial(ModularExpression): i = 0 @@ -244,6 +284,7 @@ def __init__(self, inputs: list[ModularExpression, FeatureBase], degree: int = 2 self.degree = degree self.interaction_degree = interaction_degree self.inputs = [inp if isinstance(inp, ModularExpression) else inp.input() for inp in inputs] + self._nominal_range = nominal_range if nominal_range is None: self.rescale_output = False @@ -277,7 +318,17 @@ def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> l = keras.layers.Rescaling(scale=self.nominal_sigma, offset=self.nominal_mean)(l) ModularExpression.models[self.name] = l return l - + + def get_config(self) -> dict: + c = super().get_config() + c.update({ + 'inputs': [inp.name for inp in self.inputs], + 'degree': self.degree, + 'interaction:degree': self.interaction_degree, + 'nominal_range': self._nominal_range, + }) + return c + class ModularAverage(ModularExpression): i = 0 @@ -300,7 +351,14 @@ def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> l = keras.layers.Average()(inps) ModularExpression.models[self.name] = l return l - + + def get_config(self) -> dict: + c = super().get_config() + c.update({ + 'inputs': [inp.name for inp in self.inputs], + }) + return c + class ModularNormalization(ModularExpression): i = 0 @@ -316,4 +374,11 @@ def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> inp = self.inputs.construct(input_layer, td) normalization = keras.layers.BatchNormalization() l = normalization(inp) - return l \ No newline at end of file + return l + + def get_config(self) -> dict: + c = super().get_config() + c.update({ + 'input': self.inputs.name, + }) + return c diff --git a/physXAI/models/modular/modular_expression.py b/physXAI/models/modular/modular_expression.py index 5aea080..f6c3e6d 100644 --- a/physXAI/models/modular/modular_expression.py +++ b/physXAI/models/modular/modular_expression.py @@ -59,6 +59,13 @@ def __pow__(self, other): def rename(self, name: str): self.name = name + def get_config(self) -> dict: + c = { + 'class_name': self.__class__.__name__, + 'name': self.name, + } + return c + def get_name(feature: Union[ModularExpression, int, float]) -> str: if isinstance(feature, ModularExpression): @@ -90,6 +97,13 @@ def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> return x + def get_config(self) -> dict: + c = super().get_config() + c.update({ + 'normalize': self.normalize, + }) + return c + class ModularTrainable(ModularExpression): @@ -111,6 +125,14 @@ def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> ModularExpression.trainable_parameters[self.name] = l return l + def get_config(self) -> dict: + c = super().get_config() + c.update({ + 'initial_value': self.initial_value, + 'trainable': self.trainable, + }) + return c + class ModularTwo(ModularExpression, ABC): @@ -136,6 +158,22 @@ def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> def _construct(self, layer1: keras.layers.Layer, layer2: keras.layers.Layer) -> keras.layers.Layer: pass + def get_config(self) -> dict: + c = super().get_config() + if isinstance(self.feature1, ModularExpression): + f1n = self.feature1.name + else: + f1n = self.feature1 + if isinstance(self.feature2, ModularExpression): + f2n = self.feature2.name + else: + f2n = self.feature2 + c.update({ + 'feature1': f1n, + 'feature2': f2n, + }) + return c + class ModularAdd(ModularTwo): From cb9d2498f56539d344e6f303853230960bccb09d Mon Sep 17 00:00:00 2001 From: "ross.simon" Date: Sun, 28 Dec 2025 14:47:39 +0100 Subject: [PATCH 27/36] Fixed error --- unittests/modular/test_modular.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/unittests/modular/test_modular.py b/unittests/modular/test_modular.py index 5785167..a4bf18b 100644 --- a/unittests/modular/test_modular.py +++ b/unittests/modular/test_modular.py @@ -13,7 +13,7 @@ os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0' -def generate_sample_csv(output_path: str = "data/sample_data.csv", num_rows: int = 1200, num_features: int = 4, seed: int = 42, value_range: tuple = (-100, 100)): +def test_generate_sample_csv(output_path: str = "data/sample_data.csv", num_rows: int = 1200, num_features: int = 4, seed: int = 42, value_range: tuple = (-100, 100)): np.random.seed(seed) columns = [f"x{i}" for i in range(1, num_features + 1)] @@ -37,7 +37,7 @@ def generate_sample_csv(output_path: str = "data/sample_data.csv", num_rows: int print(f"Sample CSV file generated at: {output_path}") -def generate_sample_model(random_seed: int = 42, training_data_path: str = "data/sample_data.csv"): +def test_generate_sample_model(random_seed: int = 42, training_data_path: str = "data/sample_data.csv"): Logger.setup_logger(base_path=os.path.abspath('models'), folder_name='001', override=True) inputs = [f"x{i}" for i in range(1, 4)] @@ -52,7 +52,7 @@ def generate_sample_model(random_seed: int = 42, training_data_path: str = "data # TODO: Flatten, BatchNorm, Cropping1D, Reshape, RBF - m1 = ModularModel(ClassicalANNModel(random_seed=random_seed), inputs=features, rescale_output=True) + m1 = ModularModel(ClassicalANNModel(random_seed=random_seed), inputs=features) m2 = ModularTrainable(initial_value=0.5) mX = ModularTrainable(initial_value=5) mY = ModularTrainable(initial_value=0.5) @@ -81,5 +81,5 @@ def generate_sample_model(random_seed: int = 42, training_data_path: str = "data if __name__ == "__main__": - generate_sample_csv() - generate_sample_model() \ No newline at end of file + test_generate_sample_model() + test_generate_sample_model() \ No newline at end of file From 822f0c277169624b9e4c794595a30640781721ad Mon Sep 17 00:00:00 2001 From: "ross.simon" Date: Sun, 28 Dec 2025 14:59:40 +0100 Subject: [PATCH 28/36] Fixed testing error --- unittests/modular/test_modular.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/unittests/modular/test_modular.py b/unittests/modular/test_modular.py index a4bf18b..a87544c 100644 --- a/unittests/modular/test_modular.py +++ b/unittests/modular/test_modular.py @@ -6,7 +6,7 @@ from physXAI.models.ann.ann_design import ClassicalANNModel from physXAI.models.modular.modular_ann import ModularANN, ModularAverage, ModularLinear, ModularModel from physXAI.utils.logging import Logger -from physXAI.preprocessing.constructed import Feature +from physXAI.preprocessing.constructed import Feature, FeatureConstruction from physXAI.preprocessing.preprocessing import PreprocessingSingleStep os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' import keras @@ -79,6 +79,8 @@ def test_generate_sample_model(random_seed: int = 42, training_data_path: str = os.makedirs('models', exist_ok=True) model.save('models/model.keras') + FeatureConstruction.reset() + if __name__ == "__main__": test_generate_sample_model() From 940dfd75ebcf507c0811b497fead3a7afda87d1e Mon Sep 17 00:00:00 2001 From: "ross.simon" Date: Sun, 28 Dec 2025 21:12:58 +0100 Subject: [PATCH 29/36] Updated save path --- unittests/modular/test_modular.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/unittests/modular/test_modular.py b/unittests/modular/test_modular.py index a87544c..ac9d2ab 100644 --- a/unittests/modular/test_modular.py +++ b/unittests/modular/test_modular.py @@ -12,6 +12,8 @@ import keras os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0' +base_path = os.path.join(Path(__file__).resolve().parent.parent.parent, 'stored_data') + def test_generate_sample_csv(output_path: str = "data/sample_data.csv", num_rows: int = 1200, num_features: int = 4, seed: int = 42, value_range: tuple = (-100, 100)): np.random.seed(seed) @@ -38,7 +40,7 @@ def test_generate_sample_csv(output_path: str = "data/sample_data.csv", num_rows def test_generate_sample_model(random_seed: int = 42, training_data_path: str = "data/sample_data.csv"): - Logger.setup_logger(base_path=os.path.abspath('models'), folder_name='001', override=True) + Logger.setup_logger(base_path=base_path, folder_name='unittests\\test_modular', override=True) inputs = [f"x{i}" for i in range(1, 4)] output = "x4" @@ -74,10 +76,7 @@ def test_generate_sample_model(random_seed: int = 42, training_data_path: str = m8 ]) m = ModularANN(architecture=out, epochs=1000, random_seed=random_seed) - model = m.pipeline(td, plot=False, save_model=False) - - os.makedirs('models', exist_ok=True) - model.save('models/model.keras') + model = m.pipeline(td, plot=False, save_model=True) FeatureConstruction.reset() From 780ee126e3b558a6b96525e0e3edba0f1298da76 Mon Sep 17 00:00:00 2001 From: "ross.simon" Date: Sun, 28 Dec 2025 21:13:51 +0100 Subject: [PATCH 30/36] partially added from_config --- physXAI/models/modular/modular_ann.py | 85 ++++++++--- physXAI/models/modular/modular_expression.py | 140 +++++++++++++++++-- physXAI/utils/logging.py | 13 +- 3 files changed, 204 insertions(+), 34 deletions(-) diff --git a/physXAI/models/modular/modular_ann.py b/physXAI/models/modular/modular_ann.py index dcd780e..54e16e5 100644 --- a/physXAI/models/modular/modular_ann.py +++ b/physXAI/models/modular/modular_ann.py @@ -9,11 +9,12 @@ import numpy as np from physXAI.models.ann.keras_models.keras_models import NonNegPartial -from physXAI.models.modular.modular_expression import ModularExpression -from physXAI.models.ann.ann_design import ANNModel, CMNNModel, ClassicalANNModel +from physXAI.models.modular.modular_expression import ModularExpression, register_modular_expression +from physXAI.models.ann.ann_design import SingleStepModel, ANNModel, CMNNModel, ClassicalANNModel from physXAI.models.models import LinearRegressionModel, register_model from physXAI.preprocessing.training_data import TrainingDataGeneric from physXAI.preprocessing.constructed import FeatureBase +from physXAI.utils.logging import Logger os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' import keras from keras import Sequential @@ -74,11 +75,14 @@ def generate_model(self, **kwargs): def get_config(self) -> dict: config = super().get_config() - config.update({}) # TODO: save architecture and rescale_output? - warning("ModularANN currently does not save architecture config.") + config.update({ + 'architecture': self.architecture.name, + 'rescale_output': self.rescale_output, + }) return config +@register_modular_expression class ModularModel(ModularExpression): allowed_models = [ClassicalANNModel, CMNNModel, LinearRegressionModel] @@ -131,25 +135,49 @@ def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> ModularExpression.models[self.name] = l return l - def get_config(self) -> dict: - c = super().get_config() + def _get_config(self) -> dict: + c = super()._get_config() c.update({ - # TODO: 'model': self.model??? + 'model': self.model.get_config(), 'inputs': [inp.name for inp in self.inputs], 'nominal_range': self._nominal_range, }) return c + @classmethod + def _from_config(cls, config: dict) -> 'ModularModel': + """ + Creates a ModularModel instance from a configuration dictionary. + Handles reconstruction of model (ANNModel). + + Args: + config (dict): Configuration dictionary. Must contain configuration for model as well. + + Returns: + ModularModel: An instance of the specific ModularModel subclass. + """ + assert isinstance(config['model'], dict), (f"config must contain the configuration (dict) for the model but #" + f"config['model'] is {config['model']}]") + m = SingleStepModel.from_config(config['model']) + config['model'] = m + + return cls(**config) + + +@register_modular_expression class ModularExistingModel(ModularExpression): def __init__(self, model: Union[Sequential, Functional, str, Path], original_inputs: list[ModularExpression, FeatureBase], trainable: bool, name: str = None): - if name is None: - name = model.name + '_existing' - super().__init__(name) if isinstance(model, str) or isinstance(model, Path): + self.model_path = model model = keras.models.load_model(model) self.model = model + + if name is None: + name = model.name + '_existing' + super().__init__(name) + self.inputs = [inp if isinstance(inp, ModularExpression) else inp.input() for inp in original_inputs] self.model.trainable = trainable if not trainable: @@ -168,16 +196,23 @@ def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> ModularExpression.models[self.name] = l return l - def get_config(self) -> dict: - c = super().get_config() + def _get_config(self) -> dict: + c = super()._get_config() + + # if model wasn't loaded from path originally, save it and store path + if not hasattr(self, 'model_path'): + self.model_path = Logger.get_model_savepath(save_name_model=self.model.name) + self.model.save(self.model_path) + c.update({ - # TODO: model ??? + 'model': self.model_path, 'original_inputs': [inp.name for inp in self.inputs], 'trainable': self.model.trainable }) return c +@register_modular_expression class ModularLinear(ModularExpression): i = 0 @@ -212,8 +247,8 @@ def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> ModularExpression.models[self.name] = l return l - def get_config(self) -> dict: - c = super().get_config() + def _get_config(self) -> dict: + c = super()._get_config() c.update({ 'inputs': [inp.name for inp in self.inputs], 'nominal_range': self._nominal_range, @@ -221,6 +256,7 @@ def get_config(self) -> dict: return c +@register_modular_expression class ModularMonotoneLinear(ModularExpression): i = 0 @@ -261,8 +297,8 @@ def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> ModularExpression.models[self.name] = l return l - def get_config(self) -> dict: - c = super().get_config() + def _get_config(self) -> dict: + c = super()._get_config() c.update({ 'inputs': [inp.name for inp in self.inputs], 'nominal_range': self._nominal_range, @@ -271,6 +307,7 @@ def get_config(self) -> dict: return c +@register_modular_expression class ModularPolynomial(ModularExpression): i = 0 @@ -319,8 +356,8 @@ def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> ModularExpression.models[self.name] = l return l - def get_config(self) -> dict: - c = super().get_config() + def _get_config(self) -> dict: + c = super()._get_config() c.update({ 'inputs': [inp.name for inp in self.inputs], 'degree': self.degree, @@ -330,6 +367,7 @@ def get_config(self) -> dict: return c +@register_modular_expression class ModularAverage(ModularExpression): i = 0 @@ -352,14 +390,15 @@ def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> ModularExpression.models[self.name] = l return l - def get_config(self) -> dict: - c = super().get_config() + def _get_config(self) -> dict: + c = super()._get_config() c.update({ 'inputs': [inp.name for inp in self.inputs], }) return c +@register_modular_expression class ModularNormalization(ModularExpression): i = 0 @@ -376,8 +415,8 @@ def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> l = normalization(inp) return l - def get_config(self) -> dict: - c = super().get_config() + def _get_config(self) -> dict: + c = super()._get_config() c.update({ 'input': self.inputs.name, }) diff --git a/physXAI/models/modular/modular_expression.py b/physXAI/models/modular/modular_expression.py index f6c3e6d..3b081da 100644 --- a/physXAI/models/modular/modular_expression.py +++ b/physXAI/models/modular/modular_expression.py @@ -1,6 +1,6 @@ from abc import ABC, abstractmethod import os -from typing import Union +from typing import Union, Type from physXAI.models.ann.keras_models.keras_models import ConstantLayer, DivideLayer, InputSliceLayer, PowerLayer from physXAI.preprocessing.training_data import TrainingDataGeneric os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' @@ -14,9 +14,11 @@ class ModularExpression(ABC): feature_list_normalized = dict() trainable_parameters = dict() models = dict() + modular_expression_list = list['ModularExpression']() def __init__(self, name: str): self.name = name + ModularExpression.modular_expression_list.append(self) @staticmethod def reset(): @@ -24,6 +26,7 @@ def reset(): ModularExpression.feature_list_normalized = dict() ModularExpression.trainable_parameters = dict() ModularExpression.models = dict() + ModularExpression.modular_expression_list = list() @abstractmethod def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> keras.layers.Layer: @@ -59,13 +62,47 @@ def __pow__(self, other): def rename(self, name: str): self.name = name - def get_config(self) -> dict: + def _get_config(self) -> dict: c = { 'class_name': self.__class__.__name__, 'name': self.name, } return c + @classmethod + def _from_config(cls, config: dict) -> 'ModularExpression': + return cls(**config) + + @staticmethod + def get_config() -> list: + """ + Returns a list of configuration dictionaries for all managed modular expressions. + This list can be serialized (e.g., to JSON) to save the modular expression pipeline. + """ + + item_configs = [item._get_config() for item in ModularExpression.modular_expression_list] + return item_configs + + @staticmethod + def from_config(): + pass # TODO + + @staticmethod + def get_modular_expression(name: str) -> Union['ModularExpression', None]: + """ + Retrieves a modular expression object by its name from the managed list. + + Args: + name (str): The name of the modular expression to retrieve. + + Returns: + ModularExpression or None: The found modular expression object, or None if not found. + """ + for f in ModularExpression.modular_expression_list: + if f.name == name: + return f + return None + def get_name(feature: Union[ModularExpression, int, float]) -> str: if isinstance(feature, ModularExpression): @@ -74,6 +111,44 @@ def get_name(feature: Union[ModularExpression, int, float]) -> str: return str(feature) +# --- Registry for ModularExpression Classes --- +# This registry maps class names (strings) to the actual class types (Type[ModularExpression]). +# It's used by `modular_expression_from_config` to dynamically create instances of the correct modular expression class. +CONSTRUCTED_CLASS_REGISTRY: dict[str, Type['ModularExpression']] = dict() + + +def modular_expression_from_config(item_conf: dict) -> 'ModularExpression': + """ + Factory function to create a modular expression object from its configuration dictionary. + + Args: + item_conf (dict): The configuration dictionary for a single modular expression. + Must contain 'class_name' and other necessary parameters. + + Returns: + ModularExpression: An instance of the appropriate modular expression subclass. + + Raises: + KeyError: If 'class_name' is not in `item_conf` or if the class_name is not in `CONSTRUCTED_CLASS_REGISTRY`. + """ + class_name = item_conf['class_name'] + modular_expression_class = CONSTRUCTED_CLASS_REGISTRY[class_name] + f1f = modular_expression_class.from_config(item_conf) + return f1f + + +def register_modular_expression(cls): + """ + A class decorator that registers the decorated class in the CONSTRUCTED_CLASS_REGISTRY. + The class is registered using its __name__. + """ + if cls.__name__ in CONSTRUCTED_CLASS_REGISTRY: # pragma: no cover + print(f"Warning: Class '{cls.__name__}' is already registered. Overwriting.") # pragma: no cover + CONSTRUCTED_CLASS_REGISTRY[cls.__name__] = cls + return cls # Decorators must return the class (or a replacement) + + +@register_modular_expression class ModularFeature(ModularExpression): def __init__(self, name: str, normalize: bool = True): @@ -97,14 +172,15 @@ def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> return x - def get_config(self) -> dict: - c = super().get_config() + def _get_config(self) -> dict: + c = super()._get_config() c.update({ 'normalize': self.normalize, }) return c +@register_modular_expression class ModularTrainable(ModularExpression): i = 0 @@ -125,8 +201,8 @@ def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> ModularExpression.trainable_parameters[self.name] = l return l - def get_config(self) -> dict: - c = super().get_config() + def _get_config(self) -> dict: + c = super()._get_config() c.update({ 'initial_value': self.initial_value, 'trainable': self.trainable, @@ -158,8 +234,8 @@ def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> def _construct(self, layer1: keras.layers.Layer, layer2: keras.layers.Layer) -> keras.layers.Layer: pass - def get_config(self) -> dict: - c = super().get_config() + def _get_config(self) -> dict: + c = super()._get_config() if isinstance(self.feature1, ModularExpression): f1n = self.feature1.name else: @@ -174,7 +250,49 @@ def get_config(self) -> dict: }) return c - + @classmethod + def _from_config(cls, config: dict) -> 'ModularTwo': + """ + Creates a ModularTwo instance (or its subclass) from a configuration dictionary. + Handles reconstruction of operand modular expressions if they were ModularExpression objects. + + Args: + config (dict): Configuration dictionary. Must contain 'feature1' and 'feature2'. + + Returns: + ModularTwo: An instance of the specific ModularTwo subclass. + """ + + # Reconstruct feature 1 + if isinstance(config['feature1'], dict): + item_conf = config['feature1'] + # Check if modular expression already exists + f1n = ModularExpression.get_modular_expression(item_conf['name']) + if f1n is None: + f1n = modular_expression_from_config(item_conf) + elif isinstance(config['feature1'], str): + f1n = ModularExpression.get_modular_expression(config['feature1']) + else: # feature is int or float + f1n = config['feature1'] + config['feature1'] = f1n + + # Reconstruct feature 2 + if isinstance(config['feature2'], dict): + item_conf = config['feature2'] + # Check if modular expression already exists + f2n = ModularExpression.get_modular_expression(item_conf['name']) + if f2n is None: + f2n = modular_expression_from_config(item_conf) + elif isinstance(config['feature2'], str): + f2n = ModularExpression.get_modular_expression(config['feature2']) + else: # feature is int or float + f2n = config['feature2'] + config['feature2'] = f2n + + return cls(**config) + + +@register_modular_expression class ModularAdd(ModularTwo): def __init__(self, feature1: Union[ModularExpression, int, float], feature2: Union[ModularExpression, int, float], name: str = None): @@ -186,6 +304,7 @@ def _construct(self, layer1: keras.layers.Layer, layer2: keras.layers.Layer) -> return keras.layers.Add()([layer1, layer2]) +@register_modular_expression class ModularSub(ModularTwo): def __init__(self, feature1: Union[ModularExpression, int, float], feature2: Union[ModularExpression, int, float], name: str = None): @@ -197,6 +316,7 @@ def _construct(self, layer1: keras.layers.Layer, layer2: keras.layers.Layer) -> return keras.layers.Subtract()([layer1, layer2]) +@register_modular_expression class ModularMul(ModularTwo): def __init__(self, feature1: Union[ModularExpression, int, float], feature2: Union[ModularExpression, int, float], name: str = None): @@ -208,6 +328,7 @@ def _construct(self, layer1: keras.layers.Layer, layer2: keras.layers.Layer) -> return keras.layers.Multiply()([layer1, layer2]) +@register_modular_expression class ModularTrueDiv(ModularTwo): def __init__(self, feature1: Union[ModularExpression, int, float], feature2: Union[ModularExpression, int, float], name: str = None): @@ -219,6 +340,7 @@ def _construct(self, layer1: keras.layers.Layer, layer2: keras.layers.Layer) -> return DivideLayer()([layer1, layer2]) +@register_modular_expression class ModularPow(ModularTwo): def __init__(self, feature1: Union[ModularExpression, int, float], feature2: Union[ModularExpression, int, float], name: str = None): diff --git a/physXAI/utils/logging.py b/physXAI/utils/logging.py index 7f873d7..efeca23 100644 --- a/physXAI/utils/logging.py +++ b/physXAI/utils/logging.py @@ -232,10 +232,19 @@ def save_training_data(training_data, path: str = None): pickle.dump(training_data, f) @staticmethod - def get_model_savepath(): + def get_model_savepath(save_name_model: str = None) -> str: + """ + returns the path the model is saved to + + Args: + save_name_model (str): optional name the model is saved with (string without .keras), + default: Logger.save_name_model + """ if Logger._logger is None: Logger.setup_logger() + if save_name_model is None: + save_name_model = Logger.save_name_model - p = os.path.join(Logger._logger, Logger.save_name_model) + p = os.path.join(Logger._logger, save_name_model) return p From 4abda06baaa07245fdad28af4bfdc266298b6431 Mon Sep 17 00:00:00 2001 From: Patrick Henkel Date: Mon, 29 Dec 2025 16:14:01 +0100 Subject: [PATCH 31/36] Added reversed pow --- physXAI/models/modular/modular_expression.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/physXAI/models/modular/modular_expression.py b/physXAI/models/modular/modular_expression.py index 3b081da..03b7146 100644 --- a/physXAI/models/modular/modular_expression.py +++ b/physXAI/models/modular/modular_expression.py @@ -59,6 +59,9 @@ def __rtruediv__(self, other): def __pow__(self, other): return ModularPow(self, other) + def __rpow__(self, other): + return ModularPow(other, self) + def rename(self, name: str): self.name = name From 313a8cc60bbf2b12f064e70d7850cd374fc4e7fc Mon Sep 17 00:00:00 2001 From: "ross.simon" Date: Mon, 29 Dec 2025 16:43:17 +0100 Subject: [PATCH 32/36] added from_config for modular_ann.py --- physXAI/models/modular/modular_ann.py | 181 +++++++++++++------ physXAI/models/modular/modular_expression.py | 99 +++++++--- physXAI/utils/logging.py | 16 +- 3 files changed, 217 insertions(+), 79 deletions(-) diff --git a/physXAI/models/modular/modular_ann.py b/physXAI/models/modular/modular_ann.py index 54e16e5..d3e7860 100644 --- a/physXAI/models/modular/modular_ann.py +++ b/physXAI/models/modular/modular_ann.py @@ -1,6 +1,6 @@ import functools from itertools import combinations -from logging import warning +from abc import ABC, abstractmethod import operator import os from pathlib import Path @@ -9,7 +9,8 @@ import numpy as np from physXAI.models.ann.keras_models.keras_models import NonNegPartial -from physXAI.models.modular.modular_expression import ModularExpression, register_modular_expression +from physXAI.models.modular.modular_expression import (ModularExpression, register_modular_expression, + get_modular_expressions_by_name) from physXAI.models.ann.ann_design import SingleStepModel, ANNModel, CMNNModel, ClassicalANNModel from physXAI.models.models import LinearRegressionModel, register_model from physXAI.preprocessing.training_data import TrainingDataGeneric @@ -81,28 +82,76 @@ def get_config(self) -> dict: }) return config + @classmethod + def from_config(cls, config: dict) -> 'ModularANN': + + a = ModularExpression.get_existing_modular_expression(config['architecture']) + assert a is not None, (f"ModularExpression {config['architecture']} not found, make sure to construct required " + f"modular expressions before constructing {cls.__class__.__name__}.") + config['architecture'] = a + + return cls(**config) + + +class ModularAbstractModel(ModularExpression, ABC): + """ + Abstract Base Class for ModularExpressions having other ModularExpressions as inputs + Examples: ModularModel, ModularExistingModel, ModularLinear, ... + """ + def __init__(self, inputs: list[Union[ModularExpression, FeatureBase]], name: str): + super().__init__(name) + self.inputs = [inp if isinstance(inp, ModularExpression) else inp.input() for inp in inputs] + + @abstractmethod + def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> keras.layers.Layer: + pass + + def _get_config(self) -> dict: + c = super()._get_config() + c.update({ + 'inputs': [inp.name for inp in self.inputs], + }) + return c + + @classmethod + def _from_config(cls, item_config: dict, config: list[dict]) -> 'ModularAbstractModel': + """ + Creates a ModularAbstractModel instance (or its subclass) from a configuration dictionary. + Handles reconstruction of inputs. + + Args: + item_config (dict): Configuration dictionary. Must contain key 'inputs' with list of input names + config (list[dict]): The list with the configuration dictionaries of all modular expressions + + Returns: + ModularAbstractModel: An instance of the specific ModularAbstractModel subclass. + """ + + item_config['inputs'] = get_modular_expressions_by_name(item_config['inputs'], config) + return cls(**item_config) + @register_modular_expression -class ModularModel(ModularExpression): +class ModularModel(ModularAbstractModel): allowed_models = [ClassicalANNModel, CMNNModel, LinearRegressionModel] i = 0 - def __init__(self, model: ANNModel, inputs: list[ModularExpression, FeatureBase], name: str = None, nominal_range: tuple[float, float] = None): + def __init__(self, model: ANNModel, inputs: list[ModularExpression, FeatureBase], name: str = None, + nominal_range: tuple[float, float] = None): if not any(isinstance(model, allowed) for allowed in self.allowed_models): raise NotImplementedError(f"Currently {type(model)} is not supported. Allowed models are: {self.allowed_models}") if name is None: name = f"ModularModel_{ModularModel.i}" ModularModel.i += 1 + super().__init__(inputs, name) - super().__init__(name) self.model = model self.model.model_config.update({ "normalize": False, "rescale_output": False }) - self.inputs = [inp if isinstance(inp, ModularExpression) else inp.input() for inp in inputs] self._nominal_range = nominal_range if nominal_range is None: @@ -136,39 +185,42 @@ def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> return l def _get_config(self) -> dict: - c = super()._get_config() + c = ModularExpression._get_config(self) c.update({ 'model': self.model.get_config(), - 'inputs': [inp.name for inp in self.inputs], 'nominal_range': self._nominal_range, }) return c @classmethod - def _from_config(cls, config: dict) -> 'ModularModel': + def _from_config(cls, item_config: dict, config: list[dict]) -> 'ModularModel': """ Creates a ModularModel instance from a configuration dictionary. - Handles reconstruction of model (ANNModel). + Handles reconstruction of model (ANNModel) and inputs. Args: - config (dict): Configuration dictionary. Must contain configuration for model as well. + item_config (dict): Configuration dictionary. Must contain configuration for model as well. + config (list[dict]): The list with the configuration dictionaries of all modular expressions Returns: - ModularModel: An instance of the specific ModularModel subclass. + ModularModel: An instance of the specific ModularModel. """ - assert isinstance(config['model'], dict), (f"config must contain the configuration (dict) for the model but #" - f"config['model'] is {config['model']}]") - m = SingleStepModel.from_config(config['model']) - config['model'] = m - - return cls(**config) + assert isinstance(item_config['model'], dict), (f"config must contain the configuration (dict) for the model " + f"but config['model'] is {item_config['model']}]") + m = SingleStepModel.from_config(item_config['model']) + item_config['model'] = m + + item_config['inputs'] = get_modular_expressions_by_name(item_config['inputs'], config) + + return cls(**item_config) @register_modular_expression -class ModularExistingModel(ModularExpression): +class ModularExistingModel(ModularAbstractModel): - def __init__(self, model: Union[Sequential, Functional, str, Path], original_inputs: list[ModularExpression, FeatureBase], trainable: bool, name: str = None): + def __init__(self, model: Union[Sequential, Functional, str, Path], + original_inputs: list[ModularExpression, FeatureBase], trainable: bool, name: str = None): if isinstance(model, str) or isinstance(model, Path): self.model_path = model model = keras.models.load_model(model) @@ -176,9 +228,8 @@ def __init__(self, model: Union[Sequential, Functional, str, Path], original_inp if name is None: name = model.name + '_existing' - super().__init__(name) + super().__init__(original_inputs, name) - self.inputs = [inp if isinstance(inp, ModularExpression) else inp.input() for inp in original_inputs] self.model.trainable = trainable if not trainable: for layer in self.model.layers: @@ -206,22 +257,41 @@ def _get_config(self) -> dict: c.update({ 'model': self.model_path, - 'original_inputs': [inp.name for inp in self.inputs], + 'original_inputs': c['inputs'], 'trainable': self.model.trainable }) + c.__delitem__('inputs') # super config contains key 'inputs', here key must be original_inputs return c + @classmethod + def _from_config(cls, item_config: dict, config: list[dict]) -> 'ModularExistingModel': + """ + Creates a ModularExistingModel instance from a configuration dictionary. + Handles reconstruction of original_inputs. + + Args: + item_config (dict): Configuration dictionary + config (list[dict]): The list with the configuration dictionaries of all modular expressions + + Returns: + ModularExistingModel: An instance of the specific ModularExistingModel. + """ + + item_config['original_inputs'] = get_modular_expressions_by_name(item_config['original_inputs'], config) + + return cls(**item_config) + @register_modular_expression -class ModularLinear(ModularExpression): +class ModularLinear(ModularAbstractModel): i = 0 - def __init__(self, inputs: list[ModularExpression, FeatureBase], name: str = None, nominal_range: tuple[float, float] = None): + def __init__(self, inputs: list[ModularExpression, FeatureBase], name: str = None, + nominal_range: tuple[float, float] = None): if name is None: name = f"ModularLinear_{ModularLinear.i}" ModularLinear.i += 1 - super().__init__(name) - self.inputs = [inp if isinstance(inp, ModularExpression) else inp.input() for inp in inputs] + super().__init__(inputs, name) self._nominal_range = nominal_range if nominal_range is None: @@ -250,22 +320,21 @@ def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> def _get_config(self) -> dict: c = super()._get_config() c.update({ - 'inputs': [inp.name for inp in self.inputs], 'nominal_range': self._nominal_range, }) return c @register_modular_expression -class ModularMonotoneLinear(ModularExpression): +class ModularMonotoneLinear(ModularAbstractModel): i = 0 - def __init__(self, inputs: list[Union[ModularExpression, FeatureBase]], name: str = None, monotonicities: Optional[dict[str, int]] = None, nominal_range: tuple[float, float] = None): + def __init__(self, inputs: list[Union[ModularExpression, FeatureBase]], name: str = None, + monotonicities: Optional[dict[str, int]] = None, nominal_range: tuple[float, float] = None): if name is None: name = f"ModularMonotoneLinear_{ModularLinear.i}" ModularLinear.i += 1 - super().__init__(name) - self.inputs = [inp if isinstance(inp, ModularExpression) else inp.input() for inp in inputs] + super().__init__(inputs, name) self._nominal_range = nominal_range if monotonicities is None: @@ -300,7 +369,6 @@ def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> def _get_config(self) -> dict: c = super()._get_config() c.update({ - 'inputs': [inp.name for inp in self.inputs], 'nominal_range': self._nominal_range, 'monotonicities': self.monotonicities, }) @@ -308,19 +376,19 @@ def _get_config(self) -> dict: @register_modular_expression -class ModularPolynomial(ModularExpression): +class ModularPolynomial(ModularAbstractModel): i = 0 - def __init__(self, inputs: list[ModularExpression, FeatureBase], degree: int = 2, interaction_degree: int = 1, name: str = None, nominal_range: tuple[float, float] = None): + def __init__(self, inputs: list[ModularExpression, FeatureBase], degree: int = 2, interaction_degree: int = 1, + name: str = None, nominal_range: tuple[float, float] = None): if name is None: name = f"ModularPolynomial_{ModularPolynomial.i}" ModularPolynomial.i += 1 - super().__init__(name) + super().__init__(inputs, name) assert degree >= 1, "Degree must be at least 1." assert interaction_degree >= 1, "Interaction degree must be at least 1." self.degree = degree self.interaction_degree = interaction_degree - self.inputs = [inp if isinstance(inp, ModularExpression) else inp.input() for inp in inputs] self._nominal_range = nominal_range if nominal_range is None: @@ -359,7 +427,6 @@ def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> def _get_config(self) -> dict: c = super()._get_config() c.update({ - 'inputs': [inp.name for inp in self.inputs], 'degree': self.degree, 'interaction:degree': self.interaction_degree, 'nominal_range': self._nominal_range, @@ -368,15 +435,14 @@ def _get_config(self) -> dict: @register_modular_expression -class ModularAverage(ModularExpression): +class ModularAverage(ModularAbstractModel): i = 0 def __init__(self, inputs: list[ModularExpression, FeatureBase], name: str = None): if name is None: name = f"ModularAverage_{ModularAverage.i}" ModularAverage.i += 1 - super().__init__(name) - self.inputs = [inp if isinstance(inp, ModularExpression) else inp.input() for inp in inputs] + super().__init__(inputs, name) def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> keras.layers.Layer: if self.name in ModularExpression.models.keys(): @@ -390,24 +456,16 @@ def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> ModularExpression.models[self.name] = l return l - def _get_config(self) -> dict: - c = super()._get_config() - c.update({ - 'inputs': [inp.name for inp in self.inputs], - }) - return c - @register_modular_expression -class ModularNormalization(ModularExpression): +class ModularNormalization(ModularAbstractModel): i = 0 def __init__(self, input: ModularExpression, name: str = None): if name is None: name = f"ModularNormalization_{ModularNormalization.i}" ModularNormalization.i += 1 - super().__init__(name) - self.inputs = input + super().__init__([input], name) def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> keras.layers.Layer: inp = self.inputs.construct(input_layer, td) @@ -418,6 +476,25 @@ def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> def _get_config(self) -> dict: c = super()._get_config() c.update({ - 'input': self.inputs.name, + 'input': c['inputs'][0], }) + c.__delitem__('inputs') # super config contains key 'inputs', here only single input return c + + @classmethod + def _from_config(cls, item_config: dict, config: list[dict]) -> 'ModularNormalization': + """ + Creates a ModularNormalization instance from a configuration dictionary. + Handles reconstruction of single input. + + Args: + item_config (dict): Configuration dictionary + config (list[dict]): The list with the configuration dictionaries of all modular expressions + + Returns: + ModularNormalization: An instance of the specific ModularNormalization. + """ + + item_config['input'] = get_modular_expressions_by_name(item_config['input'], config)[0] + + return cls(**item_config) diff --git a/physXAI/models/modular/modular_expression.py b/physXAI/models/modular/modular_expression.py index 3b081da..636495e 100644 --- a/physXAI/models/modular/modular_expression.py +++ b/physXAI/models/modular/modular_expression.py @@ -70,8 +70,8 @@ def _get_config(self) -> dict: return c @classmethod - def _from_config(cls, config: dict) -> 'ModularExpression': - return cls(**config) + def _from_config(cls, item_config: dict, config: list[dict]) -> 'ModularExpression': + return cls(**item_config) @staticmethod def get_config() -> list: @@ -84,11 +84,25 @@ def get_config() -> list: return item_configs @staticmethod - def from_config(): - pass # TODO + def from_config(config: list): + """ + Reconstructs the modular expression engineering pipeline from a list of configuration dictionaries. + Clears any existing modular expressions and populates `ModularExpression.modular_expression_list` with + newly created modular expression objects based on the provided configurations. + + Args: + config (List[dict]): A list where each dictionary is the configuration + for a single modular expression object. + """ + + ModularExpression.reset() + for item_conf in config: + f = ModularExpression.get_existing_modular_expression(item_conf['name']) + if f is None: + modular_expression_from_config(item_conf, config) @staticmethod - def get_modular_expression(name: str) -> Union['ModularExpression', None]: + def get_existing_modular_expression(name: str) -> Union['ModularExpression', None]: """ Retrieves a modular expression object by its name from the managed list. @@ -117,13 +131,14 @@ def get_name(feature: Union[ModularExpression, int, float]) -> str: CONSTRUCTED_CLASS_REGISTRY: dict[str, Type['ModularExpression']] = dict() -def modular_expression_from_config(item_conf: dict) -> 'ModularExpression': +def modular_expression_from_config(item_conf: dict, config: list[dict]) -> 'ModularExpression': """ Factory function to create a modular expression object from its configuration dictionary. Args: item_conf (dict): The configuration dictionary for a single modular expression. Must contain 'class_name' and other necessary parameters. + config (list[dict]): The list with the configuration dictionaries of all modular expressions Returns: ModularExpression: An instance of the appropriate modular expression subclass. @@ -133,10 +148,41 @@ def modular_expression_from_config(item_conf: dict) -> 'ModularExpression': """ class_name = item_conf['class_name'] modular_expression_class = CONSTRUCTED_CLASS_REGISTRY[class_name] - f1f = modular_expression_class.from_config(item_conf) + f1f = modular_expression_class._from_config(item_conf, config) return f1f +def get_modular_expressions_by_name(names: Union[str, list[str]], config: list[dict]) -> list[ModularExpression]: + """ + Retrieves modular expressions by their names if they have already been constructed, + otherwise constructs the modular expression objects based on the given configuration. + + Args: + names (Union[str, list[str]]): single name (str) or list of names of the modular expressions to retrieve + config (list[dict]): The list with the configuration dictionaries of all modular expressions + + Returns: + ModularExpression: An instance of the specific ModularExpression subclass. + """ + + if isinstance(names, str): # convert str to list + names = [names] + + l = list[ModularExpression]() + for name in names: + me = ModularExpression.get_existing_modular_expression( + name) # if modular expression already constructed, retrieve it + + if me is None: # modular expression yet unconstructed + item_config = dict() + for item in config: # find config of modular expression to construct it + if item['name'] == name: + item_config = item + me = modular_expression_from_config(item_config, config) # construct modular expression + l.append(me) + return l + + def register_modular_expression(cls): """ A class decorator that registers the decorated class in the CONSTRUCTED_CLASS_REGISTRY. @@ -251,45 +297,46 @@ def _get_config(self) -> dict: return c @classmethod - def _from_config(cls, config: dict) -> 'ModularTwo': + def _from_config(cls, item_config: dict, config: list[dict]) -> 'ModularTwo': """ Creates a ModularTwo instance (or its subclass) from a configuration dictionary. Handles reconstruction of operand modular expressions if they were ModularExpression objects. Args: - config (dict): Configuration dictionary. Must contain 'feature1' and 'feature2'. + item_config (dict): Configuration dictionary. Must contain 'feature1' and 'feature2'. + config (list[dict]): The list with the configuration dictionaries of all modular expressions Returns: ModularTwo: An instance of the specific ModularTwo subclass. """ # Reconstruct feature 1 - if isinstance(config['feature1'], dict): - item_conf = config['feature1'] + if isinstance(item_config['feature1'], dict): + feature_conf = item_config['feature1'] # Check if modular expression already exists - f1n = ModularExpression.get_modular_expression(item_conf['name']) + f1n = ModularExpression.get_existing_modular_expression(feature_conf['name']) if f1n is None: - f1n = modular_expression_from_config(item_conf) - elif isinstance(config['feature1'], str): - f1n = ModularExpression.get_modular_expression(config['feature1']) + f1n = modular_expression_from_config(feature_conf, config) + elif isinstance(item_config['feature1'], str): + f1n = ModularExpression.get_existing_modular_expression(item_config['feature1']) else: # feature is int or float - f1n = config['feature1'] - config['feature1'] = f1n + f1n = item_config['feature1'] + item_config['feature1'] = f1n # Reconstruct feature 2 - if isinstance(config['feature2'], dict): - item_conf = config['feature2'] + if isinstance(item_config['feature2'], dict): + feature_conf = item_config['feature2'] # Check if modular expression already exists - f2n = ModularExpression.get_modular_expression(item_conf['name']) + f2n = ModularExpression.get_existing_modular_expression(feature_conf['name']) if f2n is None: - f2n = modular_expression_from_config(item_conf) - elif isinstance(config['feature2'], str): - f2n = ModularExpression.get_modular_expression(config['feature2']) + f2n = modular_expression_from_config(feature_conf, config) + elif isinstance(item_config['feature2'], str): + f2n = ModularExpression.get_existing_modular_expression(item_config['feature2']) else: # feature is int or float - f2n = config['feature2'] - config['feature2'] = f2n + f2n = item_config['feature2'] + item_config['feature2'] = f2n - return cls(**config) + return cls(**item_config) @register_modular_expression diff --git a/physXAI/utils/logging.py b/physXAI/utils/logging.py index efeca23..aee4cbe 100644 --- a/physXAI/utils/logging.py +++ b/physXAI/utils/logging.py @@ -109,6 +109,7 @@ class Logger: base_path = 'stored_data' save_name_model: str = 'model' save_name_model_online_learning: str = 'model_ol' + save_name_modular_expression_config: str = 'modular_expression_config.json' _logger = None _override = False @@ -156,7 +157,7 @@ def setup_logger(folder_name: str = None, override: bool = False, base_path: str @staticmethod def log_setup(preprocessing=None, model=None, save_name_preprocessing=None, save_name_model=None, - save_name_constructed=None): + save_name_constructed=None, save_name_modular_expression=None): if Logger._logger is None: Logger.setup_logger() @@ -198,6 +199,19 @@ def log_setup(preprocessing=None, model=None, save_name_preprocessing=None, save with open(path, "w") as f: json.dump(model_dict, f, indent=4) + from physXAI.models.modular.modular_expression import ModularExpression + modular_expression_config = ModularExpression.get_config() + if len(modular_expression_config) > 0: + if save_name_modular_expression is None: + save_name_modular_expression = Logger.save_name_modular_expression_config + path = os.path.join(Logger._logger, save_name_modular_expression) + path = create_full_path(path) + Logger.override_question(path) + with open(path, "w") as f: + json.dump(modular_expression_config, f, indent=4) + + ModularExpression.reset() + @staticmethod def save_training_data(training_data, path: str = None): if Logger._logger is None: From 1a53cd0db3754255dc330989b359380e1ef21c3d Mon Sep 17 00:00:00 2001 From: "ross.simon" Date: Mon, 29 Dec 2025 19:00:52 +0100 Subject: [PATCH 33/36] added testing of from_config methods, fixed small errors --- physXAI/models/modular/modular_ann.py | 11 ++-- physXAI/models/modular/modular_expression.py | 1 + unittests/modular/test_modular.py | 67 ++++++++++++++++++-- 3 files changed, 66 insertions(+), 13 deletions(-) diff --git a/physXAI/models/modular/modular_ann.py b/physXAI/models/modular/modular_ann.py index d3e7860..79523b3 100644 --- a/physXAI/models/modular/modular_ann.py +++ b/physXAI/models/modular/modular_ann.py @@ -11,8 +11,8 @@ from physXAI.models.ann.keras_models.keras_models import NonNegPartial from physXAI.models.modular.modular_expression import (ModularExpression, register_modular_expression, get_modular_expressions_by_name) -from physXAI.models.ann.ann_design import SingleStepModel, ANNModel, CMNNModel, ClassicalANNModel -from physXAI.models.models import LinearRegressionModel, register_model +from physXAI.models.ann.ann_design import ANNModel, CMNNModel, ClassicalANNModel +from physXAI.models.models import AbstractModel, LinearRegressionModel, register_model from physXAI.preprocessing.training_data import TrainingDataGeneric from physXAI.preprocessing.constructed import FeatureBase from physXAI.utils.logging import Logger @@ -71,7 +71,6 @@ def generate_model(self, **kwargs): model = keras.models.Model(inputs=input_layer, outputs=x) model.summary() - ModularExpression.reset() return model def get_config(self) -> dict: @@ -87,7 +86,7 @@ def from_config(cls, config: dict) -> 'ModularANN': a = ModularExpression.get_existing_modular_expression(config['architecture']) assert a is not None, (f"ModularExpression {config['architecture']} not found, make sure to construct required " - f"modular expressions before constructing {cls.__class__.__name__}.") + f"modular expressions before constructing {cls.__name__}.") config['architecture'] = a return cls(**config) @@ -185,7 +184,7 @@ def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> return l def _get_config(self) -> dict: - c = ModularExpression._get_config(self) + c = super()._get_config() c.update({ 'model': self.model.get_config(), 'nominal_range': self._nominal_range, @@ -208,7 +207,7 @@ def _from_config(cls, item_config: dict, config: list[dict]) -> 'ModularModel': assert isinstance(item_config['model'], dict), (f"config must contain the configuration (dict) for the model " f"but config['model'] is {item_config['model']}]") - m = SingleStepModel.from_config(item_config['model']) + m = AbstractModel.model_from_config(item_config['model']) item_config['model'] = m item_config['inputs'] = get_modular_expressions_by_name(item_config['inputs'], config) diff --git a/physXAI/models/modular/modular_expression.py b/physXAI/models/modular/modular_expression.py index 3a9d96d..351ac96 100644 --- a/physXAI/models/modular/modular_expression.py +++ b/physXAI/models/modular/modular_expression.py @@ -151,6 +151,7 @@ def modular_expression_from_config(item_conf: dict, config: list[dict]) -> 'Modu """ class_name = item_conf['class_name'] modular_expression_class = CONSTRUCTED_CLASS_REGISTRY[class_name] + item_conf.__delitem__('class_name') f1f = modular_expression_class._from_config(item_conf, config) return f1f diff --git a/unittests/modular/test_modular.py b/unittests/modular/test_modular.py index ac9d2ab..27f2a31 100644 --- a/unittests/modular/test_modular.py +++ b/unittests/modular/test_modular.py @@ -1,15 +1,17 @@ import os import numpy as np import pandas as pd +import json +import copy from pathlib import Path -from physXAI.models.modular.modular_expression import ModularTrainable +from physXAI.models.modular.modular_expression import ModularTrainable, ModularExpression from physXAI.models.ann.ann_design import ClassicalANNModel from physXAI.models.modular.modular_ann import ModularANN, ModularAverage, ModularLinear, ModularModel from physXAI.utils.logging import Logger -from physXAI.preprocessing.constructed import Feature, FeatureConstruction +from physXAI.preprocessing.constructed import Feature +from physXAI.models.models import AbstractModel from physXAI.preprocessing.preprocessing import PreprocessingSingleStep os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' -import keras os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0' base_path = os.path.join(Path(__file__).resolve().parent.parent.parent, 'stored_data') @@ -75,12 +77,63 @@ def test_generate_sample_model(random_seed: int = 42, training_data_path: str = m7, m8 ]) - m = ModularANN(architecture=out, epochs=1000, random_seed=random_seed) - model = m.pipeline(td, plot=False, save_model=True) + m = ModularANN(architecture=out, epochs=50, random_seed=random_seed) + model = m.pipeline(td, plot=True, save_model=True) - FeatureConstruction.reset() + Logger.log_setup(preprocessing=prep, model=m) + + +def test_read_setup(training_data_path: str = "data/sample_data.csv"): + Logger.setup_logger(base_path=base_path, folder_name='unittests\\test_modular', override=True) + + # Read setup + save_name_preprocessing = Logger.save_name_preprocessing + path = os.path.join(Logger._logger, save_name_preprocessing) + with open(path, "r") as f: + config_prep = json.load(f) + prep = PreprocessingSingleStep.from_config(config_prep) + + save_name_modular_expression = Logger.save_name_modular_expression_config + path = os.path.join(Logger._logger, save_name_modular_expression) + with open(path, "r") as f: + modular_expression_config = json.load(f) + stored_config = copy.deepcopy(modular_expression_config) + ModularExpression.from_config(modular_expression_config) + assert check_lists_equal(stored_config, ModularExpression.get_config()) + + save_name_model = Logger.save_name_model_config + path = os.path.join(Logger._logger, save_name_model) + with open(path, "r") as f: + config_model = json.load(f) + stored_config = copy.deepcopy(config_model) + m = AbstractModel.model_from_config(config_model) + assert check_lists_equal(stored_config, m.get_config()) + + td = prep.pipeline(training_data_path) + model = m.pipeline(td, plot=True, save_model=True) + + +def check_lists_equal(list1, list2): + """Check if all elements in list1 exist and are equal to those in list2.""" + + def make_hashable(d): + """Convert dictionary values to hashable types.""" + if isinstance(d, dict): + return frozenset((k, make_hashable(v)) for k, v in d.items()) + elif isinstance(d, list): + return tuple(make_hashable(i) for i in d) + elif hasattr(d, '__dict__'): # Check if it's an object with attributes + return frozenset((key, make_hashable(value)) for key, value in d.__dict__.items()) + else: + return d # Return as is if it's already hashable + + set1 = {make_hashable(d) for d in list1} + set2 = {make_hashable(d) for d in list2} + + return set1 == set2 if __name__ == "__main__": test_generate_sample_model() - test_generate_sample_model() \ No newline at end of file + test_generate_sample_model() + test_read_setup() \ No newline at end of file From 347e2f03d41e0a840e793d0e96fef59d1f401c23 Mon Sep 17 00:00:00 2001 From: "ross.simon" Date: Wed, 31 Dec 2025 15:22:53 +0100 Subject: [PATCH 34/36] corrected type hint --- physXAI/models/modular/modular_ann.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/physXAI/models/modular/modular_ann.py b/physXAI/models/modular/modular_ann.py index 79523b3..feda538 100644 --- a/physXAI/models/modular/modular_ann.py +++ b/physXAI/models/modular/modular_ann.py @@ -136,7 +136,7 @@ class ModularModel(ModularAbstractModel): allowed_models = [ClassicalANNModel, CMNNModel, LinearRegressionModel] i = 0 - def __init__(self, model: ANNModel, inputs: list[ModularExpression, FeatureBase], name: str = None, + def __init__(self, model: ANNModel, inputs: list[Union[ModularExpression, FeatureBase]], name: str = None, nominal_range: tuple[float, float] = None): if not any(isinstance(model, allowed) for allowed in self.allowed_models): raise NotImplementedError(f"Currently {type(model)} is not supported. Allowed models are: {self.allowed_models}") @@ -219,7 +219,7 @@ def _from_config(cls, item_config: dict, config: list[dict]) -> 'ModularModel': class ModularExistingModel(ModularAbstractModel): def __init__(self, model: Union[Sequential, Functional, str, Path], - original_inputs: list[ModularExpression, FeatureBase], trainable: bool, name: str = None): + original_inputs: list[Union[ModularExpression, FeatureBase]], trainable: bool, name: str = None): if isinstance(model, str) or isinstance(model, Path): self.model_path = model model = keras.models.load_model(model) @@ -285,7 +285,7 @@ def _from_config(cls, item_config: dict, config: list[dict]) -> 'ModularExisting class ModularLinear(ModularAbstractModel): i = 0 - def __init__(self, inputs: list[ModularExpression, FeatureBase], name: str = None, + def __init__(self, inputs: list[Union[ModularExpression, FeatureBase]], name: str = None, nominal_range: tuple[float, float] = None): if name is None: name = f"ModularLinear_{ModularLinear.i}" @@ -378,7 +378,7 @@ def _get_config(self) -> dict: class ModularPolynomial(ModularAbstractModel): i = 0 - def __init__(self, inputs: list[ModularExpression, FeatureBase], degree: int = 2, interaction_degree: int = 1, + def __init__(self, inputs: list[Union[ModularExpression, FeatureBase]], degree: int = 2, interaction_degree: int = 1, name: str = None, nominal_range: tuple[float, float] = None): if name is None: name = f"ModularPolynomial_{ModularPolynomial.i}" @@ -437,7 +437,7 @@ def _get_config(self) -> dict: class ModularAverage(ModularAbstractModel): i = 0 - def __init__(self, inputs: list[ModularExpression, FeatureBase], name: str = None): + def __init__(self, inputs: list[Union[ModularExpression, FeatureBase]], name: str = None): if name is None: name = f"ModularAverage_{ModularAverage.i}" ModularAverage.i += 1 From e51614fc94751c378decaf7e7191ea63d52126e2 Mon Sep 17 00:00:00 2001 From: "ross.simon" Date: Wed, 31 Dec 2025 16:10:35 +0100 Subject: [PATCH 35/36] updated unittests -> testing of all modular modules, fixed small errors --- physXAI/models/modular/modular_ann.py | 8 ++++---- unittests/modular/test_modular.py | 29 ++++++++++++++++++--------- 2 files changed, 24 insertions(+), 13 deletions(-) diff --git a/physXAI/models/modular/modular_ann.py b/physXAI/models/modular/modular_ann.py index feda538..b8b7a42 100644 --- a/physXAI/models/modular/modular_ann.py +++ b/physXAI/models/modular/modular_ann.py @@ -251,7 +251,7 @@ def _get_config(self) -> dict: # if model wasn't loaded from path originally, save it and store path if not hasattr(self, 'model_path'): - self.model_path = Logger.get_model_savepath(save_name_model=self.model.name) + self.model_path = Logger.get_model_savepath(save_name_model=self.model.name + '.keras') self.model.save(self.model_path) c.update({ @@ -369,7 +369,7 @@ def _get_config(self) -> dict: c = super()._get_config() c.update({ 'nominal_range': self._nominal_range, - 'monotonicities': self.monotonicities, + 'monotonicities': {self.inputs[n].name: self.monotonicities[n] for n in range(len(self.inputs))}, }) return c @@ -427,7 +427,7 @@ def _get_config(self) -> dict: c = super()._get_config() c.update({ 'degree': self.degree, - 'interaction:degree': self.interaction_degree, + 'interaction_degree': self.interaction_degree, 'nominal_range': self._nominal_range, }) return c @@ -467,7 +467,7 @@ def __init__(self, input: ModularExpression, name: str = None): super().__init__([input], name) def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> keras.layers.Layer: - inp = self.inputs.construct(input_layer, td) + inp = self.inputs[0].construct(input_layer, td) normalization = keras.layers.BatchNormalization() l = normalization(inp) return l diff --git a/unittests/modular/test_modular.py b/unittests/modular/test_modular.py index 27f2a31..536a467 100644 --- a/unittests/modular/test_modular.py +++ b/unittests/modular/test_modular.py @@ -5,8 +5,10 @@ import copy from pathlib import Path from physXAI.models.modular.modular_expression import ModularTrainable, ModularExpression -from physXAI.models.ann.ann_design import ClassicalANNModel -from physXAI.models.modular.modular_ann import ModularANN, ModularAverage, ModularLinear, ModularModel +from physXAI.models.ann.ann_design import ClassicalANNModel, CMNNModel +from physXAI.models.modular.modular_ann import (ModularANN, ModularAverage, ModularLinear, ModularModel, + ModularExistingModel, ModularMonotoneLinear, ModularPolynomial, + ModularNormalization) from physXAI.utils.logging import Logger from physXAI.preprocessing.constructed import Feature from physXAI.models.models import AbstractModel @@ -67,16 +69,24 @@ def test_generate_sample_model(random_seed: int = 42, training_data_path: str = m7 = mX ** mY m8 = ModularAverage([mX, mY]) + # Existing model + cmnn = CMNNModel(monotonies={'x1': 1, 'x2': -1, 'x3': 0}, activation_split=[1, 1, 1], epochs=50) + cmnn_model = cmnn.pipeline(td, save_model=False, plot=False) + me = ModularExistingModel(model=cmnn_model, original_inputs=features, trainable=False) + + mml = ModularMonotoneLinear(inputs=[m3, m4], monotonicities={m3.name: 1, m4.name: -1}) + mp = ModularPolynomial(inputs=[m5, m7, m8], degree=3) + mn = ModularNormalization(input=m2) + out = ModularLinear([ m1, - m2, - m3, - m4, - m5, m6, - m7, - m8 + me, + mml, + mp, + mn, ]) + m = ModularANN(architecture=out, epochs=50, random_seed=random_seed) model = m.pipeline(td, plot=True, save_model=True) @@ -136,4 +146,5 @@ def make_hashable(d): if __name__ == "__main__": test_generate_sample_model() test_generate_sample_model() - test_read_setup() \ No newline at end of file + test_read_setup() + \ No newline at end of file From 5d3fef04e23478aa45e26d8590c0cdbb5f88ea82 Mon Sep 17 00:00:00 2001 From: "patrick.henkel" Date: Fri, 2 Jan 2026 14:02:47 +0100 Subject: [PATCH 36/36] Bug fix --- physXAI/models/modular/modular_ann.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/physXAI/models/modular/modular_ann.py b/physXAI/models/modular/modular_ann.py index 79523b3..99d128c 100644 --- a/physXAI/models/modular/modular_ann.py +++ b/physXAI/models/modular/modular_ann.py @@ -467,7 +467,7 @@ def __init__(self, input: ModularExpression, name: str = None): super().__init__([input], name) def construct(self, input_layer: keras.layers.Input, td: TrainingDataGeneric) -> keras.layers.Layer: - inp = self.inputs.construct(input_layer, td) + inp = self.inputs[0].construct(input_layer, td) normalization = keras.layers.BatchNormalization() l = normalization(inp) return l