Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
56 commits
Select commit Hold shift + click to select a range
037e816
Working progress
PatrickHenkel Oct 28, 2025
1daa46a
Updated inits
PatrickHenkel Oct 29, 2025
14ed7cb
Updated Modular Networks, Added Keywords for PreprocessingData init
PatrickHenkel Oct 29, 2025
0d5120c
Added new Modular Features
PatrickHenkel Oct 29, 2025
8f1797d
Added test for modular ANN and bug fix
PatrickHenkel Oct 29, 2025
df60c01
Updated
PatrickHenkel Oct 31, 2025
e06bbbf
Merge remote-tracking branch 'remotes/origin/main' into 26-integrate-…
PatrickHenkel Oct 31, 2025
488eab5
Merge remote-tracking branch 'remotes/origin/main' into 26-integrate-…
PatrickHenkel Nov 3, 2025
f5aec51
Merge branch '26-integrate-modular-anns' of https://github.com/RWTH-E…
PatrickHenkel Nov 3, 2025
6f9266f
Removed non backwards compatible changes in init
PatrickHenkel Nov 3, 2025
fd9ea25
Update coverage badge [skip ci]
actions-user Nov 3, 2025
fe765c5
Updated
PatrickHenkel Nov 8, 2025
a14e651
Update coverage badge [skip ci]
actions-user Nov 8, 2025
67140fc
Merge remote-tracking branch 'remotes/origin/main' into 26-integrate-…
PatrickHenkel Nov 10, 2025
dc0bd9a
Merge branch 'main' into 26-integrate-modular-anns
PatrickHenkel1 Nov 10, 2025
42a8cda
Bug fix
PatrickHenkel Nov 10, 2025
350ac20
Merge branch '26-integrate-modular-anns' of https://github.com/RWTH-E…
PatrickHenkel Nov 10, 2025
c92ae5e
Merge remote-tracking branch 'remotes/origin/main' into 26-integrate-…
PatrickHenkel Nov 11, 2025
f8dca2c
Update coverage badge [skip ci]
actions-user Nov 11, 2025
99b80b9
Merge remote-tracking branch 'remotes/origin/main' into 26-integrate-…
PatrickHenkel Nov 12, 2025
095208a
Merge remote-tracking branch 'remotes/origin/main' into 26-integrate-…
PatrickHenkel Nov 13, 2025
d4111ab
Merge branch '26-integrate-modular-anns' of https://github.com/RWTH-E…
PatrickHenkel Nov 13, 2025
5060b9e
Small bug fix
PatrickHenkel Nov 20, 2025
ecb9e23
Merge remote-tracking branch 'remotes/origin/main' into 26-integrate-…
PatrickHenkel Nov 21, 2025
d4305e4
Update coverage badge [skip ci]
actions-user Nov 21, 2025
2866780
Small bug fixes
PatrickHenkel Nov 21, 2025
262b3dc
Merge remote-tracking branch 'remotes/origin/main' into 26-integrate-…
PatrickHenkel Nov 21, 2025
5a7d0dc
Added scaling
PatrickHenkel Nov 21, 2025
e36b0e3
Update coverage badge [skip ci]
actions-user Nov 21, 2025
648e7ad
Better normalization
PatrickHenkel Nov 21, 2025
bec6ed7
Merge branch '26-integrate-modular-anns' of https://github.com/RWTH-E…
PatrickHenkel Nov 21, 2025
0124cab
Refactoring
PatrickHenkel Nov 26, 2025
6922e79
Merge branch '54-bug-fix-global-variable-saving-could-mess-up-scenari…
PatrickHenkel Nov 28, 2025
dca2318
Merge
PatrickHenkel Nov 28, 2025
72937e7
Bug fix
PatrickHenkel Nov 28, 2025
293dd28
Updated API
PatrickHenkel Nov 28, 2025
b29ebcd
Merge branch 'main' into 26-integrate-modular-anns
PatrickHenkel1 Nov 28, 2025
0394a7c
Update coverage badge [skip ci]
actions-user Nov 28, 2025
9b8cf80
Merge remote-tracking branch 'remotes/origin/main' into 26-integrate-…
PatrickHenkel Dec 17, 2025
d3d14d1
Updated
PatrickHenkel Dec 18, 2025
74dac95
Merge remote-tracking branch 'remotes/origin/59-fix-residual-rbf-mode…
PatrickHenkel Dec 18, 2025
920742e
Merge remote-tracking branch 'remotes/origin/main' into 26-integrate-…
PatrickHenkel Dec 24, 2025
7bc8512
Added Montone Linear
PatrickHenkel Dec 24, 2025
49090c3
Added get_config methods for modular expressions
Dec 28, 2025
cb9d249
Fixed error
Dec 28, 2025
822f0c2
Fixed testing error
Dec 28, 2025
940dfd7
Updated save path
Dec 28, 2025
780ee12
partially added from_config
Dec 28, 2025
4abda06
Added reversed pow
PatrickHenkel Dec 29, 2025
313a8cc
added from_config for modular_ann.py
Dec 29, 2025
134a923
Merge branch '26-integrate-modular-anns' of https://github.com/RWTH-E…
Dec 29, 2025
1a53cd0
added testing of from_config methods, fixed small errors
Dec 29, 2025
347e2f0
corrected type hint
Dec 31, 2025
e51614f
updated unittests -> testing of all modular modules, fixed small errors
Dec 31, 2025
5d3fef0
Bug fix
PatrickHenkel Jan 2, 2026
28dd559
Merge branch '26-integrate-modular-anns' of https://github.com/RWTH-E…
PatrickHenkel Jan 2, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions build/reports/coverage.svg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
2 changes: 1 addition & 1 deletion executables/bestest_hydronic_heat_pump/P_hp.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@
# It is recommended to rename features, so that they can be easily added to the input list

# Create Training data
prep = PreprocessingSingleStep(inputs, output)
prep = PreprocessingSingleStep(inputs=inputs, output=output)
# Process Training data
td = prep.pipeline(file_path)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@

# Generic Preprocessing Pipeline
# Model is output model, so single step evaluation is choosen
prep = PreprocessingSingleStep(inputs, output)
prep = PreprocessingSingleStep(inputs=inputs, output=output)

# Generic Model
m = LinearRegressionModel()
Expand Down
41 changes: 41 additions & 0 deletions executables/bestest_hydronic_heat_pump/P_hp_modular.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
from physXAI.models.modular.modular_ann import ModularANN, ModularModel
from physXAI.preprocessing.preprocessing import PreprocessingSingleStep
from physXAI.preprocessing.constructed import Feature
from physXAI.models.ann.ann_design import ClassicalANNModel
from physXAI.utils.logging import Logger


"""
Creates modular models to predict the power of the heat pump using the Boptest data.
"""

Logger.setup_logger(folder_name='P_hp_modular', override=True)

file_path = r"data/bestest_hydronic_heat_pump/pid_data.csv"

inputs = ['oveHeaPumY_u', 'Func(logistic)', 'weaSta_reaWeaTDryBul_y', 'reaTZon_y']
output = 'reaPHeaPum_y'

oveHeaPumY_u = Feature('oveHeaPumY_u')
func_logistic = Feature('Func(logistic)')
TDryBul = Feature('weaSta_reaWeaTDryBul_y')
TZon = Feature('reaTZon_y')

prep = PreprocessingSingleStep(inputs=inputs, output=output)
td = prep.pipeline(file_path)

"""Example usages of modular models"""
y = ModularModel(
model=ClassicalANNModel(),
inputs=[oveHeaPumY_u.input() / func_logistic.input(), func_logistic.input() ** 2, TDryBul.input(), TZon.input()]
)
Comment on lines +28 to +31
Copy link

Copilot AI Dec 18, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The example shows ModularModel being used with a single submodel (ClassicalANNModel), but the comment on line 27 says "Example usages" (plural). The example could be more comprehensive by demonstrating additional modular features like ModularLinear, ModularPolynomial, ModularAverage, ModularTrainable, or combining multiple ModularModels, which would better showcase the flexibility of the modular system.

Copilot uses AI. Check for mistakes.
m = ModularANN(architecture=y, rescale_output=True)

# Training pipeline
model = m.pipeline(td)


# Log setup of preprocessing and model as json
Logger.log_setup(prep, m)
# Log training data as pickle
Logger.save_training_data(td)
2 changes: 1 addition & 1 deletion executables/bestest_hydronic_heat_pump/P_hp_pinn.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@
pinn.rename('pinn')

# Create Training data
prep = PreprocessingSingleStep(inputs, output)
prep = PreprocessingSingleStep(inputs=inputs, output=output)
# Process Training data
td = prep.pipeline(file_path)

Expand Down
2 changes: 1 addition & 1 deletion executables/bestest_hydronic_heat_pump/TAir.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
x3.lag(2) # oveHeaPumY_u_lag1, oveHeaPumY_u_lag2

# Create Training data
prep = PreprocessingSingleStep(inputs, output)
prep = PreprocessingSingleStep(inputs=inputs, output=output)
# Process Training data
td = prep.pipeline(file_path)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@
# Generic Preprocessing Pipeline
# Model is state model, so multi-step evaluation is choosen
# See example TAir_evaluateMultiStep.py for more information
prep = PreprocessingMultiStep(inputs, output, 48, 0, init_features=['reaTZon_y'],
prep = PreprocessingMultiStep(inputs=inputs, output=output, label_width=48, warmup_width=0, init_features=['reaTZon_y'],
overlapping_sequences=False, batch_size=1)

# Generic Model
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@
overlapping_sequence should be False to avoid duplicate labels for single step prediction
batch_size should be 1 as batches are processes differently in single step models
"""
prep = PreprocessingMultiStep(inputs, output, 48, 0, init_features=['reaTZon_y'],
prep = PreprocessingMultiStep(inputs=inputs, output=output, label_width=48, warmup_width=0, init_features=['reaTZon_y'],
overlapping_sequences=False, batch_size=1)
# Process Training data
td = prep.pipeline(file_path)
Expand Down
2 changes: 1 addition & 1 deletion executables/bestest_hydronic_heat_pump/TAir_rnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
warmup_width = 48

# Create Training data. For RNNs MultiStep training data is required
prep = PreprocessingMultiStep(inputs, output, label_width, warmup_width, init_features=inits)
prep = PreprocessingMultiStep(inputs=inputs, output=output, label_width=label_width, warmup_width=warmup_width, init_features=inits)
# Process Training data
td = prep.pipeline(file_path)

Expand Down
1 change: 0 additions & 1 deletion physXAI/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +0,0 @@

1 change: 0 additions & 1 deletion physXAI/models/ann/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +0,0 @@

25 changes: 14 additions & 11 deletions physXAI/models/ann/ann_design.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from logging import warning
import os
import time
from abc import ABC, abstractmethod
Expand Down Expand Up @@ -48,6 +49,9 @@ def __init__(self, batch_size: int = 32, epochs: int = 1000, learning_rate: floa
self.random_seed: int = random_seed
keras.utils.set_random_seed(random_seed)

self.model_config = dict()
Copy link

Copilot AI Dec 18, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Initializing model_config as an empty dictionary in the base class (ANNModel) and then using update() in subclasses (lines 241, 297, 361, 426, 501) is a potential issue. If multiple subclass instances share references, they could inadvertently share the same config dictionary. While this is unlikely in typical usage, it's safer to ensure each instance gets its own dictionary. Consider self.model_config = dict() or self.model_config = {} in each subclass __init__ before calling super().__init__().

Copilot uses AI. Check for mistakes.


@abstractmethod
def generate_model(self, **kwargs):
"""
Expand Down Expand Up @@ -234,12 +238,12 @@ def __init__(self, n_layers: int = 1, n_neurons: Union[int, list[int]] = 32,
self.activation_function: Union[str, list[str]] = activation_function
self.rescale_output: bool = rescale_output

self.model_config = {
self.model_config.update({
"n_layers": self.n_layers,
"n_neurons": self.n_neurons,
"activation_function": self.activation_function,
"rescale_output": self.rescale_output,
}
})

def generate_model(self, **kwargs):
"""
Expand Down Expand Up @@ -290,12 +294,12 @@ def __init__(self, n_layers: int = 1, n_neurons: Union[int, list[int]] = 32, res
self.n_neurons: Union[int, list[int]] = n_neurons
self.rescale_output: bool = rescale_output

self.model_config = {
self.model_config.update({
"n_layers": self.n_layers,
"n_neurons": self.n_neurons,
"rescale_output": self.rescale_output,
"random_state": random_seed
}
})

def generate_model(self, **kwargs):
"""
Expand Down Expand Up @@ -354,12 +358,12 @@ def __init__(self, n_layers: int = 1, n_neurons: Union[int, list[int]] = 32, res
self.n_neurons: Union[int, list[int]] = n_neurons
self.rescale_output: bool = rescale_output

self.model_config = {
self.model_config.update({
"n_layers": self.n_layers,
"n_neurons": self.n_neurons,
"rescale_output": self.rescale_output,
"random_state": random_seed
}
})

def generate_model(self, **kwargs):
"""
Expand Down Expand Up @@ -419,14 +423,14 @@ def __init__(self, n_layers: int = 1, n_neurons: Union[int, list[int]] = 32,
self.monotonies: dict[str, int] = monotonies
self.activation_split: list[float] = activation_split

self.model_config = {
self.model_config.update({
"n_layers": self.n_layers,
"n_neurons": self.n_neurons,
"activation_function": self.activation_function,
"rescale_output": self.rescale_output,
"monotonicities": self.monotonies,
"activation_split": activation_split,
}
})

def generate_model(self, **kwargs):
"""
Expand Down Expand Up @@ -494,14 +498,14 @@ def __init__(self, n_layers: int = 1, n_neurons: Union[int, list[int]] = 32,

self.pinn_weights: list[float] = pinn_weights

self.model_config = {
self.model_config.update({
"n_layers": self.n_layers,
"n_neurons": self.n_neurons,
"activation_function": self.activation_function,
"rescale_output": self.rescale_output,
"monotonicities": self.monotonies,
"activation_split": activation_split,
}
})

# Create pinn loss based on standard losses
self.pinn_loss = multi_y_loss(keras.losses.MeanSquaredError(name='MSE'), self.pinn_weights, 'mse')
Expand Down Expand Up @@ -626,7 +630,6 @@ def get_config(self) -> dict:
})
return config


@register_model
class RNNModel(MultiStepModel):
"""
Expand Down
5 changes: 4 additions & 1 deletion physXAI/models/ann/configs/ann_model_configs.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,12 @@

class ClassicalANNConstruction_config(BaseModel):

n_layers: int = Field(..., gt=0)
n_layers: int = Field(..., ge=0)
Copy link

Copilot AI Dec 18, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Changing n_layers from gt=0 (greater than 0) to ge=0 (greater than or equal to 0) allows zero layers, which may not make physical sense for a neural network and could lead to runtime errors in model construction code that assumes at least one layer. This is a breaking change that loosens validation constraints. If zero layers are intentional for the modular system, this should be documented, and model construction code should handle this edge case safely.

Suggested change
n_layers: int = Field(..., ge=0)
n_layers: int = Field(..., gt=0)

Copilot uses AI. Check for mistakes.
n_neurons: Union[int, list[int]] = 32
activation_function: Union[str, list[str]] = 'softplus'
rescale_output: bool = True
normalize: bool = True
n_features: Optional[int] = None
Comment on lines +11 to +12
Copy link

Copilot AI Dec 18, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Adding new required fields normalize and n_features to the configuration schema is a breaking change for existing saved model configurations. While both fields have defaults (True and None), any code that creates config dictionaries directly or loads old saved configs may fail validation if these fields are missing. Consider documenting this as a breaking change and providing a migration path for existing saved models.

Copilot uses AI. Check for mistakes.

@field_validator('n_neurons')
def validate_n_neurons(cls, v, info):
Expand All @@ -33,6 +35,7 @@ def validate_activation(cls, v, info):

class RBFConstruction_config(ClassicalANNConstruction_config):

n_layers: int = Field(..., ge=1, le=1)
random_state: int = 42
rescale_mean: Optional[float] = Field(
None, description="Mean value for z-score normalization of outputs"
Expand Down
1 change: 0 additions & 1 deletion physXAI/models/ann/keras_models/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +0,0 @@

47 changes: 27 additions & 20 deletions physXAI/models/ann/model_construction/ann_models.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import os

import numpy as np
from physXAI.preprocessing.training_data import TrainingDataGeneric
from physXAI.models.ann.configs.ann_model_configs import (ClassicalANNConstruction_config,
Expand Down Expand Up @@ -35,28 +34,28 @@ def ClassicalANNConstruction(config: dict, td: TrainingDataGeneric):
n_neurons = [n_neurons] * n_layers
else:
assert len(n_neurons) == n_layers
n_featues = td.X_train_single.shape[1]
if config['n_features'] is not None:
n_features = config['n_features']
else:
n_features = td.X_train_single.shape[1]
activation_function = config['activation_function']
# If activation_function is a single string, replicate it for all layers
if isinstance(activation_function, str):
activation_function = [activation_function] * n_layers
else:
assert len(activation_function) == n_layers

# Rescaling for output layer
rescale_mean = float(np.mean(td.y_train_single))
rescale_sigma = float(np.std(td.y_train_single, ddof=1))

# Build artificial neural network as Sequential
model = keras.Sequential()

# Add input layer
model.add(keras.layers.Input(shape=(n_featues,)))
model.add(keras.layers.Input(shape=(n_features,)))

# Add normalization layer
normalization = keras.layers.Normalization()
normalization.adapt(td.X_train_single)
model.add(normalization)
if config['normalize']:
normalization = keras.layers.Normalization()
normalization.adapt(td.X_train_single)
model.add(normalization)

for i in range(0, n_layers):
# For each layer add dense
Expand All @@ -65,6 +64,9 @@ def ClassicalANNConstruction(config: dict, td: TrainingDataGeneric):
model.add(keras.layers.Dense(1, activation='linear'))
# Add rescaling
if config['rescale_output']:
# Rescaling for output layer
rescale_mean = float(np.mean(td.y_train_single))
rescale_sigma = float(np.std(td.y_train_single, ddof=1))
model.add(keras.layers.Rescaling(scale=rescale_sigma, offset=rescale_mean))

model.summary()
Expand Down Expand Up @@ -98,7 +100,10 @@ def CMNNModelConstruction(config: dict, td: TrainingDataGeneric):
n_neurons = [n_neurons] * n_layers
else:
assert len(n_neurons) == n_layers
n_featues = td.X_train_single.shape[1]
if config['n_features'] is not None:
n_features = config['n_features']
else:
n_features = td.X_train_single.shape[1]
activation_function = config['activation_function']
# If activation_function is a single string, replicate it for all layers
if isinstance(activation_function, str):
Expand All @@ -109,21 +114,20 @@ def CMNNModelConstruction(config: dict, td: TrainingDataGeneric):
# Get monotonicity constraints
mono = config['monotonicities']
if mono is None:
monotonicities = [0] * n_featues
monotonicities = [0] * n_features
else:
monotonicities = [0 if name not in mono.keys() else mono[name] for name in td.columns]

# Rescaling for output layer
rescale_mean = float(np.mean(td.y_train_single))
rescale_sigma = float(np.std(td.y_train_single, ddof=1))

# Add input layer
input_layer = keras.layers.Input(shape=(n_featues,))
input_layer = keras.layers.Input(shape=(n_features,))

# Add normalization layer
normalization = keras.layers.Normalization()
normalization.adapt(td.X_train_single)
x = normalization(input_layer)
if config['normalize']:
normalization = keras.layers.Normalization()
normalization.adapt(td.X_train_single)
x = normalization(input_layer)
else:
x = input_layer

# Add dense layer
activation_split = config['activation_split']
Expand Down Expand Up @@ -169,6 +173,9 @@ def CMNNModelConstruction(config: dict, td: TrainingDataGeneric):

# Add rescaling
if config['rescale_output']:
# Rescaling for output layer
rescale_mean = float(np.mean(td.y_train_single))
rescale_sigma = float(np.std(td.y_train_single, ddof=1))
x = keras.layers.Rescaling(scale=rescale_sigma, offset=rescale_mean)(x)

# # Add min / max constraints
Expand Down
Loading
Loading