-
Notifications
You must be signed in to change notification settings - Fork 0
26 integrate modular anns #37
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
037e816
1daa46a
14ed7cb
0d5120c
8f1797d
df60c01
e06bbbf
488eab5
f5aec51
6f9266f
fd9ea25
fe765c5
a14e651
67140fc
dc0bd9a
42a8cda
350ac20
c92ae5e
f8dca2c
99b80b9
095208a
d4111ab
5060b9e
ecb9e23
d4305e4
2866780
262b3dc
5a7d0dc
e36b0e3
648e7ad
bec6ed7
0124cab
6922e79
dca2318
72937e7
293dd28
b29ebcd
0394a7c
9b8cf80
d3d14d1
74dac95
920742e
7bc8512
49090c3
cb9d249
822f0c2
940dfd7
780ee12
4abda06
313a8cc
134a923
1a53cd0
347e2f0
e51614f
5d3fef0
28dd559
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,41 @@ | ||
| from physXAI.models.modular.modular_ann import ModularANN, ModularModel | ||
| from physXAI.preprocessing.preprocessing import PreprocessingSingleStep | ||
| from physXAI.preprocessing.constructed import Feature | ||
| from physXAI.models.ann.ann_design import ClassicalANNModel | ||
| from physXAI.utils.logging import Logger | ||
|
|
||
|
|
||
| """ | ||
| Creates modular models to predict the power of the heat pump using the Boptest data. | ||
| """ | ||
|
|
||
| Logger.setup_logger(folder_name='P_hp_modular', override=True) | ||
|
|
||
| file_path = r"data/bestest_hydronic_heat_pump/pid_data.csv" | ||
|
|
||
| inputs = ['oveHeaPumY_u', 'Func(logistic)', 'weaSta_reaWeaTDryBul_y', 'reaTZon_y'] | ||
| output = 'reaPHeaPum_y' | ||
|
|
||
| oveHeaPumY_u = Feature('oveHeaPumY_u') | ||
| func_logistic = Feature('Func(logistic)') | ||
| TDryBul = Feature('weaSta_reaWeaTDryBul_y') | ||
| TZon = Feature('reaTZon_y') | ||
|
|
||
| prep = PreprocessingSingleStep(inputs=inputs, output=output) | ||
| td = prep.pipeline(file_path) | ||
|
|
||
| """Example usages of modular models""" | ||
| y = ModularModel( | ||
| model=ClassicalANNModel(), | ||
| inputs=[oveHeaPumY_u.input() / func_logistic.input(), func_logistic.input() ** 2, TDryBul.input(), TZon.input()] | ||
| ) | ||
| m = ModularANN(architecture=y, rescale_output=True) | ||
|
|
||
| # Training pipeline | ||
| model = m.pipeline(td) | ||
|
|
||
|
|
||
| # Log setup of preprocessing and model as json | ||
| Logger.log_setup(prep, m) | ||
| # Log training data as pickle | ||
| Logger.save_training_data(td) | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1 +0,0 @@ | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1 +0,0 @@ | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,3 +1,4 @@ | ||
| from logging import warning | ||
| import os | ||
| import time | ||
| from abc import ABC, abstractmethod | ||
|
|
@@ -48,6 +49,9 @@ def __init__(self, batch_size: int = 32, epochs: int = 1000, learning_rate: floa | |
| self.random_seed: int = random_seed | ||
| keras.utils.set_random_seed(random_seed) | ||
|
|
||
| self.model_config = dict() | ||
|
||
|
|
||
|
|
||
| @abstractmethod | ||
| def generate_model(self, **kwargs): | ||
| """ | ||
|
|
@@ -234,12 +238,12 @@ def __init__(self, n_layers: int = 1, n_neurons: Union[int, list[int]] = 32, | |
| self.activation_function: Union[str, list[str]] = activation_function | ||
| self.rescale_output: bool = rescale_output | ||
|
|
||
| self.model_config = { | ||
| self.model_config.update({ | ||
| "n_layers": self.n_layers, | ||
| "n_neurons": self.n_neurons, | ||
| "activation_function": self.activation_function, | ||
| "rescale_output": self.rescale_output, | ||
| } | ||
| }) | ||
|
|
||
| def generate_model(self, **kwargs): | ||
| """ | ||
|
|
@@ -290,12 +294,12 @@ def __init__(self, n_layers: int = 1, n_neurons: Union[int, list[int]] = 32, res | |
| self.n_neurons: Union[int, list[int]] = n_neurons | ||
| self.rescale_output: bool = rescale_output | ||
|
|
||
| self.model_config = { | ||
| self.model_config.update({ | ||
| "n_layers": self.n_layers, | ||
| "n_neurons": self.n_neurons, | ||
| "rescale_output": self.rescale_output, | ||
| "random_state": random_seed | ||
| } | ||
| }) | ||
|
|
||
| def generate_model(self, **kwargs): | ||
| """ | ||
|
|
@@ -354,12 +358,12 @@ def __init__(self, n_layers: int = 1, n_neurons: Union[int, list[int]] = 32, res | |
| self.n_neurons: Union[int, list[int]] = n_neurons | ||
| self.rescale_output: bool = rescale_output | ||
|
|
||
| self.model_config = { | ||
| self.model_config.update({ | ||
| "n_layers": self.n_layers, | ||
| "n_neurons": self.n_neurons, | ||
| "rescale_output": self.rescale_output, | ||
| "random_state": random_seed | ||
| } | ||
| }) | ||
|
|
||
| def generate_model(self, **kwargs): | ||
| """ | ||
|
|
@@ -419,14 +423,14 @@ def __init__(self, n_layers: int = 1, n_neurons: Union[int, list[int]] = 32, | |
| self.monotonies: dict[str, int] = monotonies | ||
| self.activation_split: list[float] = activation_split | ||
|
|
||
| self.model_config = { | ||
| self.model_config.update({ | ||
| "n_layers": self.n_layers, | ||
| "n_neurons": self.n_neurons, | ||
| "activation_function": self.activation_function, | ||
| "rescale_output": self.rescale_output, | ||
| "monotonicities": self.monotonies, | ||
| "activation_split": activation_split, | ||
| } | ||
| }) | ||
|
|
||
| def generate_model(self, **kwargs): | ||
| """ | ||
|
|
@@ -494,14 +498,14 @@ def __init__(self, n_layers: int = 1, n_neurons: Union[int, list[int]] = 32, | |
|
|
||
| self.pinn_weights: list[float] = pinn_weights | ||
|
|
||
| self.model_config = { | ||
| self.model_config.update({ | ||
| "n_layers": self.n_layers, | ||
| "n_neurons": self.n_neurons, | ||
| "activation_function": self.activation_function, | ||
| "rescale_output": self.rescale_output, | ||
| "monotonicities": self.monotonies, | ||
| "activation_split": activation_split, | ||
| } | ||
| }) | ||
|
|
||
| # Create pinn loss based on standard losses | ||
| self.pinn_loss = multi_y_loss(keras.losses.MeanSquaredError(name='MSE'), self.pinn_weights, 'mse') | ||
|
|
@@ -626,7 +630,6 @@ def get_config(self) -> dict: | |
| }) | ||
| return config | ||
|
|
||
|
|
||
| @register_model | ||
| class RNNModel(MultiStepModel): | ||
| """ | ||
|
|
||
| Original file line number | Diff line number | Diff line change | ||||
|---|---|---|---|---|---|---|
|
|
@@ -4,10 +4,12 @@ | |||||
|
|
||||||
| class ClassicalANNConstruction_config(BaseModel): | ||||||
|
|
||||||
| n_layers: int = Field(..., gt=0) | ||||||
| n_layers: int = Field(..., ge=0) | ||||||
|
||||||
| n_layers: int = Field(..., ge=0) | |
| n_layers: int = Field(..., gt=0) |
Copilot
AI
Dec 18, 2025
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Adding new required fields normalize and n_features to the configuration schema is a breaking change for existing saved model configurations. While both fields have defaults (True and None), any code that creates config dictionaries directly or loads old saved configs may fail validation if these fields are missing. Consider documenting this as a breaking change and providing a migration path for existing saved models.
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1 +0,0 @@ | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The example shows ModularModel being used with a single submodel (ClassicalANNModel), but the comment on line 27 says "Example usages" (plural). The example could be more comprehensive by demonstrating additional modular features like ModularLinear, ModularPolynomial, ModularAverage, ModularTrainable, or combining multiple ModularModels, which would better showcase the flexibility of the modular system.