diff --git a/CMakeLists_files.cmake b/CMakeLists_files.cmake
index 6f5ebfbd5f7..4978245ebdb 100644
--- a/CMakeLists_files.cmake
+++ b/CMakeLists_files.cmake
@@ -62,6 +62,7 @@ list (APPEND MAIN_SOURCE_FILES
opm/material/fluidsystems/blackoilpvt/SolventPvt.cpp
opm/material/fluidsystems/blackoilpvt/WetGasPvt.cpp
opm/material/fluidsystems/blackoilpvt/WetHumidGasPvt.cpp
+ opm/ml/ml_model.cpp
)
if(ENABLE_ECL_INPUT)
list(APPEND MAIN_SOURCE_FILES
@@ -474,6 +475,7 @@ list (APPEND TEST_SOURCE_FILES
tests/material/test_spline.cpp
tests/material/test_tabulation.cpp
tests/test_Visitor.cpp
+ tests/ml/ml_model_test.cpp
)
# tests that need to be linked to dune-common
@@ -646,6 +648,15 @@ list (APPEND TEST_DATA_FILES
tests/material/co2_unittest_below_sat.json
tests/material/h2o_unittest.json
tests/material/h2_unittest.json
+ tests/ml/ml_tools/models/test_dense_1x1.model
+ tests/ml/ml_tools/models/test_dense_2x2.model
+ tests/ml/ml_tools/models/test_dense_10x1.model
+ tests/ml/ml_tools/models/test_dense_10x10.model
+ tests/ml/ml_tools/models/test_dense_10x10x10.model
+ tests/ml/ml_tools/models/test_dense_relu_10.model
+ tests/ml/ml_tools/models/test_dense_tanh_10.model
+ tests/ml/ml_tools/models/test_relu_10.model
+ tests/ml/ml_tools/models/test_scalingdense_10x1.model
)
if(ENABLE_ECL_OUTPUT)
list (APPEND TEST_DATA_FILES
@@ -1052,6 +1063,7 @@ list( APPEND PUBLIC_HEADER_FILES
opm/material/thermal/SomertonThermalConductionLaw.hpp
opm/material/thermal/EclSpecrockLaw.hpp
opm/material/thermal/NullSolidEnergyLaw.hpp
+ opm/ml/ml_model.hpp
)
if(ENABLE_ECL_INPUT)
diff --git a/opm/ml/ml_model.cpp b/opm/ml/ml_model.cpp
new file mode 100644
index 00000000000..5f3e0abbd08
--- /dev/null
+++ b/opm/ml/ml_model.cpp
@@ -0,0 +1,330 @@
+/*
+ Copyright (c) 2016 Robert W. Rose
+ Copyright (c) 2018 Paul Maevskikh
+ Copyright (c) 2024 NORCE
+ This file is part of the Open Porous Media project (OPM).
+
+ OPM is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ OPM is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with OPM. If not, see .
+*/
+
+#include
+
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+
+namespace Opm {
+
+
+template
+bool readFile(std::ifstream& file, T& data, size_t n=1)
+{
+ file.read(reinterpret_cast(&data), sizeof (T)* n);
+ return !file.fail();
+}
+
+template
+bool NNLayerActivation::loadLayer(std::ifstream& file) {
+
+ unsigned int activation = 0;
+ OPM_ERROR_IF(!readFile(file, activation),
+ "Failed to read activation type");
+
+ switch (activation) {
+ case kLinear:
+ activation_type_ = kLinear;
+ break;
+ case kRelu:
+ activation_type_ = kRelu;
+ break;
+ case kSoftPlus:
+ activation_type_ = kSoftPlus;
+ break;
+ case kHardSigmoid:
+ activation_type_ = kHardSigmoid;
+ break;
+ case kSigmoid:
+ activation_type_ = kSigmoid;
+ break;
+ case kTanh:
+ activation_type_ = kTanh;
+ break;
+ default:
+ OPM_ERROR_IF(true,fmt::format("\n Unsupported activation type " "{}", activation));
+
+ }
+
+ return true;
+}
+
+template
+bool NNLayerActivation::apply(const Tensor& in, Tensor& out) {
+ constexpr double sigmoid_scale = 0.2;
+ out = in;
+
+ switch (activation_type_) {
+ case kLinear:
+ break;
+ case kRelu:
+ for (size_t i = 0; i < out.data_.size(); i++) {
+ if (out.data_[i] < 0.0) {
+ out.data_[i] = 0.0;
+ }
+ }
+ break;
+ case kSoftPlus:
+ for (size_t i = 0; i < out.data_.size(); i++) {
+ out.data_[i] = log(1.0 + exp(out.data_[i]));
+ }
+ break;
+ case kHardSigmoid:
+ for (size_t i = 0; i < out.data_.size(); i++) {
+ Evaluation x = (out.data_[i] * sigmoid_scale) + 0.5;
+
+ if (x <= 0) {
+ out.data_[i] = 0.0;
+ } else if (x >= 1) {
+ out.data_[i] = 1.0;
+ } else {
+ out.data_[i] = x;
+ }
+ }
+ break;
+ case kSigmoid:
+ for (size_t i = 0; i < out.data_.size(); i++) {
+ Evaluation& x = out.data_[i];
+
+ if (x >= 0) {
+ out.data_[i] = 1.0 / (1.0 + exp(-x));
+ } else {
+ Evaluation z = exp(x);
+ out.data_[i] = z / (1.0 + z);
+ }
+ }
+ break;
+ case kTanh:
+ for (size_t i = 0; i < out.data_.size(); i++) {
+ out.data_[i] = sinh(out.data_[i])/cosh(out.data_[i]);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+
+template
+bool NNLayerScaling::loadLayer(std::ifstream& file) {
+ OPM_ERROR_IF(!readFile(file, data_min), "Failed to read min");
+ OPM_ERROR_IF(!readFile(file, data_max), "Failed to read min");
+ OPM_ERROR_IF(!readFile(file, feat_inf), "Failed to read min");
+ OPM_ERROR_IF(!readFile(file, feat_sup), "Failed to read min");
+ return true;
+}
+
+template
+bool NNLayerScaling::apply(const Tensor& in, Tensor& out) {
+
+ for (size_t i = 0; i < out.data_.size(); i++) {
+ auto tempscale = (in.data_[i] - data_min)/(data_max - data_min);
+ out.data_[i] = tempscale * (feat_sup - feat_inf) + feat_inf;
+ }
+
+ return true;
+}
+
+template
+bool NNLayerUnScaling::loadLayer(std::ifstream& file) {
+ OPM_ERROR_IF(!readFile(file, data_min), "Failed to read min");
+ OPM_ERROR_IF(!readFile(file, data_max), "Failed to read max");
+ OPM_ERROR_IF(!readFile(file, feat_inf), "Failed to read inf");
+ OPM_ERROR_IF(!readFile(file, feat_sup), "Failed to read sup");
+
+ return true;
+}
+
+template
+bool NNLayerUnScaling::apply(const Tensor& in, Tensor& out) {
+
+ for (size_t i = 0; i < out.data_.size(); i++) {
+ auto tempscale = (in.data_[i] - feat_inf)/(feat_sup - feat_inf);
+
+ out.data_[i] = tempscale * (data_max - data_min) + data_min;
+ }
+
+ return true;
+}
+
+
+template
+bool NNLayerDense::loadLayer(std::ifstream& file) {
+ unsigned int weights_rows = 0;
+ OPM_ERROR_IF(!readFile(file, weights_rows), "Expected weight rows");
+ OPM_ERROR_IF(!(weights_rows > 0), "Invalid weights # rows");
+
+ unsigned int weights_cols = 0;
+ OPM_ERROR_IF(!readFile(file, weights_cols), "Expected weight cols");
+ OPM_ERROR_IF(!(weights_cols > 0), "Invalid weights shape");
+
+ unsigned int biases_shape = 0;
+ OPM_ERROR_IF(!readFile(file, biases_shape), "Expected biases shape");
+ OPM_ERROR_IF(!(biases_shape > 0), "Invalid biases shape");
+
+ weights_.resizeI({weights_rows, weights_cols});
+ OPM_ERROR_IF(!readFile(file, *weights_.data_.data(), weights_rows * weights_cols),
+ "Expected weights");
+
+ biases_.resizeI({biases_shape});
+ OPM_ERROR_IF(!readFile(file, *biases_.data_.data(), biases_shape),
+ "Expected biases");
+
+ OPM_ERROR_IF(!activation_.loadLayer(file), "Failed to load activation");
+
+ return true;
+}
+
+template
+bool NNLayerDense::apply(const Tensor& in, Tensor& out) {
+
+ Tensor tmp(weights_.dims_[1]);
+ Tensor temp_in(in);
+ for (int i = 0; i < weights_.dims_[0]; i++) {
+ for (int j = 0; j < weights_.dims_[1]; j++) {
+ tmp(j) += (temp_in)(i)*weights_(i, j);
+ }
+ }
+
+ for (int i = 0; i < biases_.dims_[0]; i++) {
+ tmp(i) += biases_(i);
+ }
+
+ OPM_ERROR_IF(!activation_.apply(tmp, out), "Failed to apply activation");
+
+ return true;
+}
+
+
+
+template
+bool NNLayerEmbedding::loadLayer(std::ifstream& file) {
+
+ unsigned int weights_rows = 0;
+ OPM_ERROR_IF(!readFile(file, weights_rows), "Expected weight rows");
+ OPM_ERROR_IF(!(weights_rows > 0), "Invalid weights # rows");
+
+ unsigned int weights_cols = 0;
+ OPM_ERROR_IF(!readFile(file, weights_cols), "Expected weight cols");
+ OPM_ERROR_IF(!(weights_cols > 0), "Invalid weights shape");
+
+ weights_.resizeI({weights_rows, weights_cols});
+ OPM_ERROR_IF(!readFile(file, *weights_.data_.data(), weights_rows * weights_cols),
+ "Expected weights");
+
+ return true;
+}
+
+template
+bool NNLayerEmbedding::apply(const Tensor& in, Tensor& out) {
+ int output_rows = in.dims_[1];
+ int output_cols = weights_.dims_[1];
+ out.dims_ = {output_rows, output_cols};
+ out.data_.reserve(output_rows * output_cols);
+
+ std::for_each(in.data_.begin(), in.data_.end(), [=](Evaluation i) {
+ typename std::vector::const_iterator first =
+ this->weights_.data_.begin() + (getValue(i) * output_cols);
+ typename std::vector::const_iterator last =
+ this->weights_.data_.begin() + (getValue(i) + 1) * output_cols;
+
+ out.data_.insert(out.data_.end(), first, last);
+ });
+
+ return true;
+}
+
+
+template
+bool NNModel::loadModel(const std::string& filename) {
+ std::ifstream file(filename.c_str(), std::ios::binary);
+ OPM_ERROR_IF(!file.is_open(),fmt::format("\n Unable to open file " "{}", filename.c_str()));
+
+ unsigned int num_layers = 0;
+ OPM_ERROR_IF(!readFile(file, num_layers), "Expected number of layers");
+
+ for (unsigned int i = 0; i < num_layers; i++) {
+ unsigned int layer_type = 0;
+ OPM_ERROR_IF(!readFile(file, layer_type), "Expected layer type");
+
+ std::unique_ptr> layer = nullptr;
+
+ switch (layer_type) {
+ case kScaling:
+ layer = std::make_unique>() ;
+ break;
+ case kUnScaling:
+ layer = std::make_unique>() ;
+ break;
+ case kDense:
+ layer = std::make_unique>() ;
+ break;
+ case kActivation:
+ layer = std::make_unique>() ;
+ break;
+ default:
+ break;
+ }
+
+ OPM_ERROR_IF(!layer,fmt::format("\n Unknown layer type " "{}", layer_type));
+ OPM_ERROR_IF(!layer->loadLayer(file),fmt::format("\n Failed to load layer " "{}", i));
+
+ layers_.emplace_back(std::move(layer));
+
+ }
+
+ return true;
+}
+
+
+template
+bool NNModel::apply(const Tensor& in, Tensor& out) {
+ Tensor temp_in(in);
+
+ for (unsigned int i = 0; i < layers_.size(); i++) {
+
+ OPM_ERROR_IF(!(layers_[i]->apply(temp_in, out)),fmt::format("\n Failed to apply layer " "{}", i));
+ temp_in = out;
+}
+ return true;
+
+}
+
+
+template class NNModel;
+
+template class NNModel;
+template class NNModel>;
+template class NNModel>;
+template class NNModel>;
+template class NNModel>;
+template class NNModel>;
+template class NNModel>;
+
+} // namespace Opm
diff --git a/opm/ml/ml_model.hpp b/opm/ml/ml_model.hpp
new file mode 100644
index 00000000000..758f4bc5642
--- /dev/null
+++ b/opm/ml/ml_model.hpp
@@ -0,0 +1,379 @@
+/*
+ Copyright (c) 2016 Robert W. Rose
+ Copyright (c) 2018 Paul Maevskikh
+ Copyright (c) 2024 NORCE
+ This file is part of the Open Porous Media project (OPM).
+
+ OPM is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ OPM is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with OPM. If not, see .
+*/
+
+#ifndef ML_MODEL_H_
+#define ML_MODEL_H_
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+
+
+namespace Opm {
+
+// NN layer
+// ---------------------
+/** \class Tensor class
+ * Implements mathematical tensor (Max 4d)
+ */
+template
+class Tensor {
+ public:
+ Tensor() {}
+
+ explicit Tensor(int i) { resizeI({i}); }
+
+ Tensor(int i, int j) { resizeI({i, j}); }
+
+ Tensor(int i, int j, int k) { resizeI({i, j, k}); }
+
+ Tensor(int i, int j, int k, int l) { resizeI({i, j, k, l}); }
+
+
+ template
+ void resizeI(std::vector c) {
+ if (c.size()==1)
+ dims_ = {(int)c[0]};
+ if (c.size()==2)
+ dims_ = {(int)c[0],(int)c[1]};
+ if (c.size()==3)
+ dims_ = {(int)c[0],(int)c[1],(int)c[2]};
+ if (c.size()==4)
+ dims_ = {(int)c[0],(int)c[1],(int)c[2],(int)c[3]};
+
+ data_.resize(std::accumulate(begin(c), end(c), 1.0, std::multiplies()));
+ }
+
+ inline void flatten() {
+ OPM_ERROR_IF(dims_.size() <= 0, "Invalid tensor");
+
+ int elements = dims_[0];
+ for (unsigned int i = 1; i < dims_.size(); i++) {
+ elements *= dims_[i];
+ }
+ dims_ = {elements};
+ }
+
+ inline T& operator()(int i) {
+ OPM_ERROR_IF(dims_.size() != 1, "Invalid indexing for tensor");
+
+ OPM_ERROR_IF (!(i < dims_[0] && i >= 0), fmt::format(" Invalid i: " "{}" " max: " "{}",i, dims_[0]));
+
+ return data_[i];
+ }
+
+ inline T& operator()(int i, int j) {
+ OPM_ERROR_IF(dims_.size() != 2, "Invalid indexing for tensor");
+ OPM_ERROR_IF (!(i < dims_[0] && i >= 0), fmt::format(" Invalid i: " "{}" " max: " "{}",i, dims_[0]));
+ OPM_ERROR_IF (!(j < dims_[1] && j >= 0), fmt::format(" Invalid j: " "{}" " max: " "{}",j, dims_[1]));
+
+ return data_[dims_[1] * i + j];
+ }
+
+ const T& operator()(int i, int j) const {
+ OPM_ERROR_IF(dims_.size() != 2, "Invalid indexing for tensor");
+ OPM_ERROR_IF (!(i < dims_[0] && i >= 0), fmt::format(" Invalid i: " "{}" " max: " "{}",i, dims_[0]));
+ OPM_ERROR_IF (!(j < dims_[1] && j >= 0), fmt::format(" Invalid j: " "{}" " max: " "{}",j, dims_[1]));
+ return data_[dims_[1] * i + j];
+ }
+
+ inline T& operator()(int i, int j, int k) {
+ OPM_ERROR_IF(dims_.size() != 3, "Invalid indexing for tensor");
+ OPM_ERROR_IF (!(i < dims_[0] && i >= 0), fmt::format(" Invalid i: " "{}" " max: " "{}",i, dims_[0]));
+ OPM_ERROR_IF (!(j < dims_[1] && j >= 0), fmt::format(" Invalid j: " "{}" " max: " "{}",j, dims_[1]));
+ OPM_ERROR_IF (!(k < dims_[2] && k >= 0), fmt::format(" Invalid k: " "{}" " max: " "{}",k, dims_[2]));
+
+ return data_[dims_[2] * (dims_[1] * i + j) + k];
+ }
+ const T& operator()(int i, int j, int k) const {
+ OPM_ERROR_IF(dims_.size() != 3, "Invalid indexing for tensor");
+ OPM_ERROR_IF (!(i < dims_[0] && i >= 0), fmt::format(" Invalid i: " "{}" " max: " "{}",i, dims_[0]));
+ OPM_ERROR_IF (!(j < dims_[1] && j >= 0), fmt::format(" Invalid j: " "{}" " max: " "{}",j, dims_[1]));
+ OPM_ERROR_IF (!(k < dims_[2] && k >= 0), fmt::format(" Invalid k: " "{}" " max: " "{}",k, dims_[2]));
+
+ return data_[dims_[2] * (dims_[1] * i + j) + k];
+ }
+
+ inline T& operator()(int i, int j, int k, int l) {
+ OPM_ERROR_IF(dims_.size() != 4, "Invalid indexing for tensor");
+ OPM_ERROR_IF (!(i < dims_[0] && i >= 0), fmt::format(" Invalid i: " "{}" " max: " "{}",i, dims_[0]));
+ OPM_ERROR_IF (!(j < dims_[1] && j >= 0), fmt::format(" Invalid j: " "{}" " max: " "{}",j, dims_[1]));
+ OPM_ERROR_IF (!(k < dims_[2] && k >= 0), fmt::format(" Invalid k: " "{}" " max: " "{}",k, dims_[2]));
+ OPM_ERROR_IF (!(l < dims_[3] && l >= 0), fmt::format(" Invalid l: " "{}" " max: " "{}",l, dims_[3]));
+
+ return data_[dims_[3] * (dims_[2] * (dims_[1] * i + j) + k) + l];
+ }
+
+ const T& operator()(int i, int j, int k, int l) const{
+ OPM_ERROR_IF(dims_.size() != 4, "Invalid indexing for tensor");
+ OPM_ERROR_IF (!(i < dims_[0] && i >= 0), fmt::format(" Invalid i: " "{}" " max: " "{}",i, dims_[0]));
+ OPM_ERROR_IF (!(j < dims_[1] && j >= 0), fmt::format(" Invalid j: " "{}" " max: " "{}",j, dims_[1]));
+ OPM_ERROR_IF (!(k < dims_[2] && k >= 0), fmt::format(" Invalid k: " "{}" " max: " "{}",k, dims_[2]));
+ OPM_ERROR_IF (!(l < dims_[3] && l >= 0), fmt::format(" Invalid l: " "{}" " max: " "{}",l, dims_[3]));
+
+ return data_[dims_[3] * (dims_[2] * (dims_[1] * i + j) + k) + l];
+ }
+ void fill(const T & value) {
+ std::fill(data_.begin(), data_.end(), value);
+ }
+
+ // Tensor addition
+ Tensor operator+(const Tensor& other) {
+ OPM_ERROR_IF(dims_.size() != other.dims_.size() ,
+ "Cannot add tensors with different dimensions");
+ Tensor result;
+ result.dims_ = dims_;
+ result.data_.reserve(data_.size());
+
+ std::transform(data_.begin(), data_.end(), other.data_.begin(),
+ std::back_inserter(result.data_),
+ [](T x, T y) { return x + y; });
+
+ return result;
+ }
+
+ // Tensor multiplication
+ Tensor multiply(const Tensor& other) {
+ OPM_ERROR_IF(dims_.size() != other.dims_.size() ,
+ "Cannot multiply elements with different dimensions");
+
+ Tensor result;
+ result.dims_ = dims_;
+ result.data_.reserve(data_.size());
+
+ std::transform(data_.begin(), data_.end(), other.data_.begin(),
+ std::back_inserter(result.data_),
+ [](T x, T y) { return x * y; });
+
+ return result;
+ }
+
+ // Tensor dot for 2d tensor
+ Tensor dot(const Tensor& other) {
+ OPM_ERROR_IF(dims_.size() != 2, "Invalid tensor dimensions");
+ OPM_ERROR_IF(other.dims_.size() != 2, "Invalid tensor dimensions");
+
+ OPM_ERROR_IF(dims_[1] != other.dims_[0],
+ "Cannot multiply with different inner dimensions");
+
+ Tensor tmp(dims_[0], other.dims_[1]);
+
+ for (int i = 0; i < dims_[0]; i++) {
+ for (int j = 0; j < other.dims_[1]; j++) {
+ for (int k = 0; k < dims_[1]; k++) {
+ tmp(i, j) += (*this)(i, k) * other(k, j);
+ }
+ }
+ }
+
+ return tmp;
+ }
+
+ std::vector dims_;
+ std::vector data_;
+};
+
+
+
+// NN layer
+// ---------------------
+/** \class Neural Network Layer base class
+ */
+template
+class NNLayer {
+ public:
+ NNLayer() {}
+
+ virtual ~NNLayer() {}
+
+ virtual bool loadLayer(std::ifstream& file) = 0;
+
+ virtual bool apply(const Tensor& in, Tensor& out) = 0;
+};
+
+/** \class Activation Layer class
+ * Applies an activation function
+ */
+template
+class NNLayerActivation : public NNLayer {
+ public:
+ enum ActivationType {
+ kLinear = 1,
+ kRelu = 2,
+ kSoftPlus = 3,
+ kSigmoid = 4,
+ kTanh = 5,
+ kHardSigmoid = 6
+ };
+
+ NNLayerActivation() : activation_type_(ActivationType::kLinear) {}
+
+ virtual ~NNLayerActivation() {}
+
+ virtual bool loadLayer(std::ifstream& file);
+
+ virtual bool apply(const Tensor& in, Tensor& out);
+
+ private:
+ ActivationType activation_type_;
+};
+
+/** \class Scaling Layer class
+ * A preprocessing layer which rescales input values to a new range.
+ */
+template
+class NNLayerScaling : public NNLayer {
+ public:
+ NNLayerScaling(): data_min(1.0f), data_max(1.0f), feat_inf(1.0f), feat_sup(1.0f) {}
+
+ virtual ~NNLayerScaling() {}
+
+ virtual bool loadLayer(std::ifstream& file);
+
+ virtual bool apply(const Tensor& in, Tensor& out);
+
+ private:
+ Tensor weights_;
+ Tensor biases_;
+ float data_min;
+ float data_max;
+ float feat_inf;
+ float feat_sup;
+};
+
+/** \class Unscaling Layer class
+ * A postprocessing layer to undo the scaling according to feature_range.
+ */
+template
+class NNLayerUnScaling : public NNLayer {
+ public:
+ NNLayerUnScaling(): data_min(1.0f), data_max(1.0f), feat_inf(1.0f), feat_sup(1.0f) {}
+
+ virtual ~NNLayerUnScaling() {}
+
+ virtual bool loadLayer(std::ifstream& file);
+
+ virtual bool apply(const Tensor& in, Tensor& out);
+
+ private:
+ Tensor weights_;
+ Tensor biases_;
+ float data_min;
+ float data_max;
+ float feat_inf;
+ float feat_sup;
+};
+
+/** \class Dense Layer class
+ * Densely-connected NN layer.
+ */
+template
+class NNLayerDense : public NNLayer {
+ public:
+ NNLayerDense() {}
+
+ virtual ~NNLayerDense() {}
+
+ virtual bool loadLayer(std::ifstream& file);
+
+ virtual bool apply(const Tensor& in, Tensor& out);
+
+ private:
+ Tensor weights_;
+ Tensor biases_;
+
+ NNLayerActivation activation_;
+};
+
+/** \class Embedding Layer class
+ * Turns nonnegative integers (indexes) into dense vectors of fixed size.
+ */
+template
+class NNLayerEmbedding : public NNLayer {
+ public:
+ NNLayerEmbedding() {}
+
+ virtual ~NNLayerEmbedding() {}
+
+ virtual bool loadLayer(std::ifstream& file);
+
+ virtual bool apply(const Tensor& in, Tensor& out);
+
+ private:
+ Tensor weights_;
+};
+
+/** \class Neural Network Model class
+ * A model grouping layers into an object
+ */
+template
+class NNModel {
+ public:
+ enum LayerType {
+ kScaling = 1,
+ kUnScaling = 2,
+ kDense = 3,
+ kActivation = 4
+ };
+
+ NNModel() {}
+
+ virtual ~NNModel() {}
+ //loads models generated by Kerasify
+ virtual bool loadModel(const std::string& filename);
+
+ virtual bool apply(const Tensor& in, Tensor& out);
+
+ private:
+ std::vector>> layers_;
+};
+
+/** \class Neural Network Timer class
+ */
+class NNTimer {
+ public:
+ NNTimer() {}
+
+ void start() { start_ = std::chrono::high_resolution_clock::now(); }
+
+ float stop() {
+ std::chrono::time_point now =
+ std::chrono::high_resolution_clock::now();
+
+ std::chrono::duration diff = now - start_;
+
+ return diff.count();
+ }
+
+ private:
+ std::chrono::time_point start_;
+};
+
+} // namespace Opm
+
+#endif // ML_MODEL_H_
diff --git a/python/opm/ml/ml_tools/__init__.py b/python/opm/ml/ml_tools/__init__.py
new file mode 100644
index 00000000000..d6a2782e03a
--- /dev/null
+++ b/python/opm/ml/ml_tools/__init__.py
@@ -0,0 +1,2 @@
+from .kerasify import *
+from .scaler_layers import *
\ No newline at end of file
diff --git a/python/opm/ml/ml_tools/kerasify.py b/python/opm/ml/ml_tools/kerasify.py
new file mode 100644
index 00000000000..9c6da13feed
--- /dev/null
+++ b/python/opm/ml/ml_tools/kerasify.py
@@ -0,0 +1,151 @@
+
+# Copyright (c) 2016 Robert W. Rose
+# Copyright (c) 2018 Paul Maevskikh
+# Copyright (c) 2024 NORCE
+# This file is part of the Open Porous Media project (OPM).
+# OPM is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+# OPM is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with OPM. If not, see .
+
+import numpy as np
+import struct
+
+LAYER_SCALING = 1
+LAYER_UNSCALING = 2
+LAYER_DENSE = 3
+LAYER_ACTIVATION = 4
+
+ACTIVATION_LINEAR = 1
+ACTIVATION_RELU = 2
+ACTIVATION_SOFTPLUS = 3
+ACTIVATION_SIGMOID = 4
+ACTIVATION_TANH = 5
+ACTIVATION_HARD_SIGMOID = 6
+
+def write_scaling(f):
+ f.write(struct.pack('I', LAYER_SCALING))
+
+
+def write_unscaling(f):
+ f.write(struct.pack('I', LAYER_UNSCALING))
+
+
+def write_tensor(f, data, dims=1):
+ """
+ Writes tensor as flat array of floats to file in 1024 chunks,
+ prevents memory explosion writing very large arrays to disk
+ when calling struct.pack().
+ """
+ f.write(struct.pack('I', dims))
+
+ for stride in data.shape[:dims]:
+ f.write(struct.pack('I', stride))
+
+ data = data.ravel()
+ step = 1024
+ written = 0
+
+ for i in np.arange(0, len(data), step):
+ remaining = min(len(data) - i, step)
+ written += remaining
+ f.write(struct.pack(f'={remaining}f', *data[i: i + remaining]))
+
+ assert written == len(data)
+
+
+def write_floats(file, floats):
+ '''
+ Writes floats to file in 1024 chunks.. prevents memory explosion
+ writing very large arrays to disk when calling struct.pack().
+ '''
+ step = 1024
+ written = 0
+
+ for i in np.arange(0, len(floats), step):
+ remaining = min(len(floats) - i, step)
+ written += remaining
+ file.write(struct.pack('=%sf' % remaining, *floats[i:i+remaining]))
+
+ assert written == len(floats)
+
+def export_model(model, filename):
+ with open(filename, 'wb') as f:
+
+ def write_activation(activation):
+ if activation == 'linear':
+ f.write(struct.pack('I', ACTIVATION_LINEAR))
+ elif activation == 'relu':
+ f.write(struct.pack('I', ACTIVATION_RELU))
+ elif activation == 'softplus':
+ f.write(struct.pack('I', ACTIVATION_SOFTPLUS))
+ elif activation == 'tanh':
+ f.write(struct.pack('I', ACTIVATION_TANH))
+ elif activation == 'sigmoid':
+ f.write(struct.pack('I', ACTIVATION_SIGMOID))
+ elif activation == 'hard_sigmoid':
+ f.write(struct.pack('I', ACTIVATION_HARD_SIGMOID))
+ else:
+ assert False, f"Unsupported activation type:{activation}"
+
+ model_layers = [l for l in model.layers]
+
+ num_layers = len(model_layers)
+ f.write(struct.pack('I', num_layers))
+
+ for layer in model_layers:
+ layer_type = type(layer).__name__
+
+ if layer_type == 'MinMaxScalerLayer':
+ write_scaling(f)
+ feat_inf = layer.get_weights()[0]
+ feat_sup = layer.get_weights()[1]
+ f.write(struct.pack('f', layer.data_min))
+ f.write(struct.pack('f', layer.data_max))
+ f.write(struct.pack('f', feat_inf))
+ f.write(struct.pack('f', feat_sup))
+
+
+ elif layer_type == 'MinMaxUnScalerLayer':
+ write_unscaling(f)
+ feat_inf = layer.get_weights()[0]
+ feat_sup = layer.get_weights()[1]
+ f.write(struct.pack('f', layer.data_min))
+ f.write(struct.pack('f', layer.data_max))
+ f.write(struct.pack('f', feat_inf))
+ f.write(struct.pack('f', feat_sup))
+
+ elif layer_type == 'Dense':
+ weights = layer.get_weights()[0]
+ biases = layer.get_weights()[1]
+ activation = layer.get_config()['activation']
+
+ f.write(struct.pack('I', LAYER_DENSE))
+ f.write(struct.pack('I', weights.shape[0]))
+ f.write(struct.pack('I', weights.shape[1]))
+ f.write(struct.pack('I', biases.shape[0]))
+
+ weights = weights.flatten()
+ biases = biases.flatten()
+
+ write_floats(f, weights)
+ write_floats(f, biases)
+
+ write_activation(activation)
+
+
+ elif layer_type == 'Activation':
+ activation = layer.get_config()['activation']
+
+ f.write(struct.pack('I', LAYER_ACTIVATION))
+ write_activation(activation)
+
+ else:
+ assert False, f"Unsupported layer type:{layer_type}"
diff --git a/python/opm/ml/ml_tools/requirements.txt b/python/opm/ml/ml_tools/requirements.txt
new file mode 100644
index 00000000000..b0f3a63436b
--- /dev/null
+++ b/python/opm/ml/ml_tools/requirements.txt
@@ -0,0 +1,16 @@
+################################################################
+# Python v.3.8.0 and above
+
+################################################################
+# Numpy 1.23.0 and above
+numpy
+
+################################################################
+# TensorFlow v.2.1 and above include both CPU and GPU versions.
+tensorflow
+
+################################################################
+# Keras v.2.12.0 and above
+keras
+
+################################################################
diff --git a/python/opm/ml/ml_tools/scaler_layers.py b/python/opm/ml/ml_tools/scaler_layers.py
new file mode 100644
index 00000000000..293d1cb5e67
--- /dev/null
+++ b/python/opm/ml/ml_tools/scaler_layers.py
@@ -0,0 +1,203 @@
+# Copyright (c) 2024 NORCE
+# Copyright (c) 2024 UiB
+# This file is part of the Open Porous Media project (OPM).
+# OPM is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+# OPM is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+# You should have received a copy of the GNU General Public License
+# along with OPM. If not, see .
+
+"""Provide MinMax scaler layers for tensorflow.keras."""
+
+from __future__ import annotations
+
+from typing import Optional, Sequence
+
+import numpy as np
+import tensorflow as tf
+from numpy.typing import ArrayLike
+from tensorflow import keras
+from tensorflow.python.keras.engine.base_preprocessing_layer import (
+ PreprocessingLayer,
+)
+
+
+class ScalerLayer(keras.layers.Layer):
+ """MixIn to provide functionality for the Scaler Layer."""
+
+ data_min: tf.Tensor
+ data_max: tf.Tensor
+ min: tf.Tensor
+ scalar: tf.Tensor
+
+ def __init__(
+ self,
+ data_min: Optional[float | ArrayLike] = None,
+ data_max: Optional[float | ArrayLike] = None,
+ feature_range: Sequence[float] | np.ndarray | tf.Tensor = (0, 1),
+ **kwargs,
+ ) -> None:
+ super().__init__(**kwargs)
+ if feature_range[0] >= feature_range[1]:
+ raise ValueError("Feature range must be strictly increasing.")
+ self.feature_range: tf.Tensor = tf.convert_to_tensor(
+ feature_range, dtype=tf.float32
+ )
+ self._is_adapted: bool = False
+ if data_min is not None and data_max is not None:
+ self.data_min = tf.convert_to_tensor(data_min, dtype=tf.float32)
+ self.data_max = tf.convert_to_tensor(data_max, dtype=tf.float32)
+ self._adapt()
+
+ def build(self, input_shape: tuple[int, ...]) -> None:
+ """Initialize ``data_min`` and ``data_max`` with the default values if they have
+ not been initialized yet.
+
+ Args:
+ input_shape (tuple[int, ...]): _description_
+
+ """
+ if not self._is_adapted:
+ # ``data_min`` and ``data_max`` have the same shape as one input tensor.
+ self.data_min = tf.zeros(input_shape[1:])
+ self.data_max = tf.ones(input_shape[1:])
+ self._adapt()
+
+ def get_weights(self) -> list[ArrayLike]:
+ """Return parameters of the scaling.
+
+ Returns:
+ list[ArrayLike]: List with three elements in the following order:
+ ``self.data_min``, ``self.data_max``, ``self.feature_range``
+
+ """
+ return [self.feature_range[0], self.feature_range[1], self.data_min, self.data_max, self.feature_range]
+
+ def set_weights(self, weights: list[ArrayLike]) -> None:
+ """Set parameters of the scaling.
+
+ Args:
+ weights (list[ArrayLike]): List with three elements in the following order:
+ ``data_min``, ``data_max``, ``feature_range``
+
+ Raises:
+ ValueError: If ``feature_range[0] >= feature_range[1]``.
+
+ """
+ self.feature_range = tf.convert_to_tensor(weights[2], dtype=tf.float32)
+ if self.feature_range[0] >= self.feature_range[1]:
+ raise ValueError("Feature range must be strictly increasing.")
+ self.data_min = tf.convert_to_tensor(weights[0], dtype=tf.float32)
+ self.data_max = tf.convert_to_tensor(weights[1], dtype=tf.float32)
+
+ def adapt(self, data: ArrayLike) -> None:
+ """Fit the layer to the min and max of the data. This is done individually for
+ each input feature.
+
+ Note:
+ So far, this is only tested for 1 dimensional input and output. For higher
+ dimensional input and output some functionality might need to be added.
+
+ Args:
+ data: _description_
+
+ """
+ data = tf.convert_to_tensor(data, dtype=tf.float32)
+ self.data_min = tf.math.reduce_min(data, axis=0)
+ self.data_max = tf.math.reduce_max(data, axis=0)
+ self._adapt()
+
+ def _adapt(self) -> None:
+ if tf.math.reduce_any(self.data_min > self.data_max):
+ raise RuntimeError(
+ f"""self.data_min {self.data_min} cannot be larger than self.data_max
+ {self.data_max} for any element."""
+ )
+ self.scalar = tf.where(
+ self.data_max > self.data_min,
+ self.data_max - self.data_min,
+ tf.ones_like(self.data_min),
+ )
+ self.min = tf.where(
+ self.data_max > self.data_min,
+ self.data_min,
+ tf.zeros_like(self.data_min),
+ )
+ self._is_adapted = True
+
+
+class MinMaxScalerLayer(ScalerLayer, PreprocessingLayer):
+ """Scales the input according to MinMaxScaling.
+
+ See
+ https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html
+ for an explanation of the transform.
+
+ """
+
+ def __init__(
+ self,
+ data_min: Optional[float | ArrayLike] = None,
+ data_max: Optional[float | ArrayLike] = None,
+ feature_range: Sequence[float] | np.ndarray | tf.Tensor = (0, 1),
+ **kwargs,
+ ) -> None:
+ super().__init__(data_min, data_max, feature_range, **kwargs)
+ self._name: str = "MinMaxScalerLayer"
+
+ def call(self, inputs: tf.Tensor) -> tf.Tensor:
+ if not self.is_adapted:
+ print(np.greater_equal(self.data_min, self.data_max))
+ raise RuntimeError(
+ """The layer has not been adapted correctly. Call ``adapt`` before using
+ the layer or set the ``data_min`` and ``data_max`` values manually.
+ """
+ )
+
+ # Ensure the dtype is correct.
+ inputs = tf.convert_to_tensor(inputs, dtype=tf.float32)
+ scaled_data = (inputs - self.min) / self.scalar
+ return (
+ scaled_data * (self.feature_range[1] - self.feature_range[0])
+ ) + self.feature_range[0]
+ # return inputs
+
+
+class MinMaxUnScalerLayer(ScalerLayer, tf.keras.layers.Layer):
+ """Unscales the input by applying the inverse transform of ``MinMaxScalerLayer``.
+
+ See
+ https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html
+ for an explanation of the transformation.
+
+ """
+
+ def __init__(
+ self,
+ data_min: Optional[float | ArrayLike] = None,
+ data_max: Optional[float | ArrayLike] = None,
+ feature_range: Sequence[float] | np.ndarray | tf.Tensor = (0, 1),
+ **kwargs,
+ ) -> None:
+ super().__init__(data_min, data_max, feature_range, **kwargs)
+ self._name: str = "MinMaxUnScalerLayer"
+
+ def call(self, inputs: tf.Tensor) -> tf.Tensor:
+ if not self._is_adapted:
+ raise RuntimeError(
+ """The layer has not been adapted correctly. Call ``adapt`` before using
+ the layer or set the ``data_min`` and ``data_max`` values manually."""
+ )
+
+ # Ensure the dtype is correct.
+ inputs = tf.convert_to_tensor(inputs, dtype=tf.float32)
+ unscaled_data = (inputs - self.feature_range[0]) / (
+ self.feature_range[1] - self.feature_range[0]
+ )
+ return unscaled_data * self.scalar + self.min
+ # return inputs
diff --git a/tests/ml/ml_model_test.cpp b/tests/ml/ml_model_test.cpp
new file mode 100644
index 00000000000..3943e719afc
--- /dev/null
+++ b/tests/ml/ml_model_test.cpp
@@ -0,0 +1,198 @@
+/*
+ Copyright (c) 2016 Robert W. Rose
+ Copyright (c) 2018 Paul Maevskikh
+ Copyright (c) 2024 NORCE
+ This file is part of the Open Porous Media project (OPM).
+
+ OPM is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ OPM is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with OPM. If not, see .
+*/
+
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+
+namespace Opm {
+
+template
+bool tensor_test() {
+ std::printf("TEST tensor_test\n");
+
+ {
+ const int i = 3;
+ const int j = 5;
+ const int k = 10;
+ Tensor t(i, j, k);
+
+ Evaluation c = 1.f;
+ for (int ii = 0; ii < i; ii++) {
+ for (int jj = 0; jj < j; jj++) {
+ for (int kk = 0; kk < k; kk++) {
+ t(ii, jj, kk) = c;
+ c += 1.f;
+ }
+ }
+ }
+
+ c = 1.f;
+ int cc = 0;
+ for (int ii = 0; ii < i; ii++) {
+ for (int jj = 0; jj < j; jj++) {
+ for (int kk = 0; kk < k; kk++) {
+ OPM_ERROR_IF (fabs(t(ii, jj, kk).value() - c.value()) > 1e-9,fmt::format("\n Expected " "{}" "got " "{}", c.value(), t(ii, jj, kk).value()));
+ OPM_ERROR_IF (fabs(t.data_[cc].value() - c.value()) > 1e-9,fmt::format("\n Expected " "{}" "got " "{}", c.value(), t.data_[cc].value()));
+
+ c += 1.f;
+ cc++;
+ }
+ }
+ }
+ }
+
+ {
+ const int i = 2;
+ const int j = 3;
+ const int k = 4;
+ const int l = 5;
+ Tensor t(i, j, k, l);
+
+ Evaluation c = 1.f;
+ for (int ii = 0; ii < i; ii++) {
+ for (int jj = 0; jj < j; jj++) {
+ for (int kk = 0; kk < k; kk++) {
+ for (int ll = 0; ll < l; ll++) {
+ t(ii, jj, kk, ll) = c;
+ c += 1.f;
+ }
+ }
+ }
+ }
+
+ c = 1.f;
+ int cc = 0;
+ for (int ii = 0; ii < i; ii++) {
+ for (int jj = 0; jj < j; jj++) {
+ for (int kk = 0; kk < k; kk++) {
+ for (int ll = 0; ll < l; ll++) {
+ OPM_ERROR_IF (fabs(t(ii, jj, kk, ll).value() - c.value()) > 1e-9,fmt::format("\n Expected " "{}" "got " "{}", c.value(), t(ii, jj, kk, ll).value()));
+ OPM_ERROR_IF (fabs(t.data_[cc].value() - c.value()) > 1e-9,fmt::format("\n Expected " "{}" "got " "{}", c.value(), t.data_[cc].value()));
+ c += 1.f;
+ cc++;
+ }
+ }
+ }
+ }
+ }
+
+ {
+ Tensor a(2, 2);
+ Tensor b(2, 2);
+
+ a.data_ = {1.0, 2.0, 3.0, 5.0};
+ b.data_ = {2.0, 5.0, 4.0, 1.0};
+
+ Tensor result = a + b;
+ OPM_ERROR_IF(result.data_ != std::vector({3.0, 7.0, 7.0, 6.0}),
+ "Vector add failed");
+ }
+
+ {
+ Tensor a(2, 2);
+ Tensor b(2, 2);
+
+ a.data_ = {1.0, 2.0, 3.0, 5.0};
+ b.data_ = {2.0, 5.0, 4.0, 1.0};
+
+ Tensor result = a.multiply(b);
+ OPM_ERROR_IF(result.data_ != std::vector({2.0, 10.0, 12.0, 5.0}),
+ "Vector multiply failed");
+ }
+
+ {
+ Tensor a(2, 1);
+ Tensor b(1, 2);
+
+ a.data_ = {1.0, 2.0};
+ b.data_ = {2.0, 5.0};
+
+ Tensor result = a.dot(b);
+ OPM_ERROR_IF(result.data_ != std::vector({2.0, 5.0, 4.0, 10.0}),
+ "Vector dot failed");
+ }
+
+ return true;
+}
+
+}
+
+
+int main() {
+ typedef Opm::DenseAd::Evaluation Evaluation;
+
+ Evaluation load_time = 0.0;
+ Evaluation apply_time = 0.0;
+
+ if (!tensor_test()) {
+ return 1;
+ }
+
+ if (!test_dense_1x1(&load_time, &apply_time)) {
+ return 1;
+ }
+
+ if (!test_dense_10x1(&load_time, &apply_time)) {
+ return 1;
+ }
+
+ if (!test_dense_2x2(&load_time, &apply_time)) {
+ return 1;
+ }
+
+ if (!test_dense_10x10(&load_time, &apply_time)) {
+ return 1;
+ }
+
+ if (!test_dense_10x10x10(&load_time, &apply_time)) {
+ return 1;
+ }
+
+ if (!test_relu_10(&load_time, &apply_time)) {
+ return 1;
+ }
+
+ if (!test_dense_relu_10(&load_time, &apply_time)) {
+ return 1;
+ }
+
+ if (!test_dense_tanh_10(&load_time, &apply_time)) {
+ return 1;
+ }
+
+ if (!test_scalingdense_10x1(&load_time, &apply_time)) {
+ return 1;
+ }
+
+ return 0;
+}
\ No newline at end of file
diff --git a/tests/ml/ml_tools/generateunittests.py b/tests/ml/ml_tools/generateunittests.py
new file mode 100644
index 00000000000..1b63e9d61bf
--- /dev/null
+++ b/tests/ml/ml_tools/generateunittests.py
@@ -0,0 +1,228 @@
+# Copyright (c) 2016 Robert W. Rose
+# Copyright (c) 2018 Paul Maevskikh
+# Copyright (c) 2024 NORCE
+# This file is part of the Open Porous Media project (OPM).
+# OPM is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+# OPM is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with OPM. If not, see .
+
+import numpy as np
+import os, sys
+from tensorflow import keras
+
+from keras.models import Sequential
+from keras.layers import Conv2D, Dense, Activation, ELU, Embedding
+
+sys.path.insert(0, '../../../python/opm/ml')
+
+from ml_tools import export_model
+from ml_tools import MinMaxScalerLayer, MinMaxUnScalerLayer
+
+
+np.set_printoptions(precision=25, threshold=10000000)
+
+
+def c_array(a):
+ def to_cpp(ndarray):
+ text = np.array2string(ndarray, separator=',', threshold=np.inf,
+ floatmode='unique')
+ return text.replace('[', '{').replace(']', '}').replace(' ', '')
+
+ s = to_cpp(a.ravel())
+ shape = to_cpp(np.asarray(a.shape)) if a.shape else '{1}'
+ return shape, s
+
+
+TEST_CASE = '''
+/*
+ Copyright (c) 2016 Robert W. Rose
+ Copyright (c) 2018 Paul Maevskikh
+ Copyright (c) 2024 NORCE
+ This file is part of the Open Porous Media project (OPM).
+
+ OPM is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ OPM is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with OPM. If not, see .
+*/
+
+#include
+#include
+#include
+#include
+
+namespace fs = std::filesystem;
+
+using namespace Opm;
+template
+bool test_%s(Evaluation* load_time, Evaluation* apply_time)
+{
+ printf("TEST %s\\n");
+
+ OPM_ERROR_IF(!load_time, "Invalid Evaluation");
+ OPM_ERROR_IF(!apply_time, "Invalid Evaluation");
+
+ Opm::Tensor in%s;
+ in.data_ = %s;
+
+ Opm::Tensor out%s;
+ out.data_ = %s;
+
+ NNTimer load_timer;
+ load_timer.start();
+
+ NNModel model;
+ OPM_ERROR_IF(!model.loadModel("%s"), "Failed to load model");
+
+ *load_time = load_timer.stop();
+
+ NNTimer apply_timer;
+ apply_timer.start();
+
+ Opm::Tensor predict = out;
+ OPM_ERROR_IF(!model.apply(in, out), "Failed to apply");
+
+ *apply_time = apply_timer.stop();
+
+ for (int i = 0; i < out.dims_[0]; i++)
+ {
+ OPM_ERROR_IF ((fabs(out(i).value() - predict(i).value()) > %s), fmt::format(" Expected " "{}" " got " "{}",predict(i).value(),out(i).value()));
+ }
+
+ return true;
+}
+'''
+
+directory = os.getcwd()
+directory1 = "models"
+directory2 = "include"
+
+if os.path.isdir(directory1):
+ print(f"{directory1} exists.")
+else:
+ print(f"{directory1} does not exist.")
+ path1 = os.path.join(directory, directory1)
+ os.makedirs(path1)
+
+if os.path.isdir(directory2):
+ print(f"{directory2} exists.")
+else:
+ path2 = os.path.join(directory, directory2)
+ os.makedirs(path2)
+
+
+def output_testcase(model, test_x, test_y, name, eps):
+ print("Processing %s" % name)
+ model.compile(loss='mean_squared_error', optimizer='adamax')
+ model.fit(test_x, test_y, epochs=1, verbose=False)
+ predict_y = model.predict(test_x).astype('f')
+ print(model.summary())
+
+ export_model(model, 'models/test_%s.model' % name)
+
+ path = f'./tests/ml/ml_tools/models/test_{name}.model'
+ with open('include/test_%s.hpp' % name, 'w') as f:
+ x_shape, x_data = c_array(test_x[0])
+ y_shape, y_data = c_array(predict_y[0])
+
+ f.write(TEST_CASE % (name, name, x_shape, x_data, y_shape, y_data, path, eps))
+
+
+# scaling 10x1
+data: np.ndarray = np.random.uniform(-500, 500, (5, 1))
+feature_ranges: list[tuple[float, float]] = [(0.0, 1.0), (-3.7, 0.0)]
+test_x = np.random.rand(10, 10).astype('f')
+test_y = np.random.rand(10).astype('f')
+data_min = 10.0
+model = Sequential()
+model.add(keras.layers.Input([10]))
+model.add(MinMaxScalerLayer(feature_range=(0.0, 1.0)))
+model.add(Dense(10,activation='tanh'))
+model.add(Dense(10,activation='tanh'))
+model.add(Dense(10,activation='tanh'))
+model.add(Dense(10,activation='tanh'))
+model.add(MinMaxUnScalerLayer(feature_range=(-3.7, -1.0)))
+# #
+model.get_layer(model.layers[0].name).adapt(data=data)
+model.get_layer(model.layers[-1].name).adapt(data=data)
+output_testcase(model, test_x, test_y, 'scalingdense_10x1', '1e-3')
+
+# Dense 1x1
+test_x = np.arange(10)
+test_y = test_x * 10 + 1
+model = Sequential()
+model.add(Dense(1, input_dim=1))
+output_testcase(model, test_x, test_y, 'dense_1x1', '1e-6')
+
+# Dense 10x1
+test_x = np.random.rand(10, 10).astype('f')
+test_y = np.random.rand(10).astype('f')
+model = Sequential()
+model.add(Dense(1, input_dim=10))
+output_testcase(model, test_x, test_y, 'dense_10x1', '1e-6')
+
+# Dense 2x2
+test_x = np.random.rand(10, 2).astype('f')
+test_y = np.random.rand(10).astype('f')
+model = Sequential()
+model.add(Dense(2, input_dim=2))
+model.add(Dense(1))
+output_testcase(model, test_x, test_y, 'dense_2x2', '1e-6')
+
+# Dense 10x10
+test_x = np.random.rand(10, 10).astype('f')
+test_y = np.random.rand(10).astype('f')
+model = Sequential()
+model.add(Dense(10, input_dim=10))
+model.add(Dense(1))
+output_testcase(model, test_x, test_y, 'dense_10x10', '1e-6')
+
+# Dense 10x10x10
+test_x = np.random.rand(10, 10).astype('f')
+test_y = np.random.rand(10, 10).astype('f')
+model = Sequential()
+model.add(Dense(10, input_dim=10))
+model.add(Dense(10))
+output_testcase(model, test_x, test_y, 'dense_10x10x10', '1e-6')
+
+# Activation relu
+test_x = np.random.rand(1, 10).astype('f')
+test_y = np.random.rand(1, 10).astype('f')
+model = Sequential()
+model.add(Dense(10, input_dim=10))
+model.add(Activation('relu'))
+output_testcase(model, test_x, test_y, 'relu_10', '1e-6')
+
+# Dense relu
+test_x = np.random.rand(1, 10).astype('f')
+test_y = np.random.rand(1, 10).astype('f')
+model = Sequential()
+model.add(Dense(10, input_dim=10, activation='relu'))
+model.add(Dense(10, input_dim=10, activation='relu'))
+model.add(Dense(10, input_dim=10, activation='relu'))
+output_testcase(model, test_x, test_y, 'dense_relu_10', '1e-6')
+
+# Dense tanh
+test_x = np.random.rand(1, 10).astype('f')
+test_y = np.random.rand(1, 10).astype('f')
+model = Sequential()
+model.add(Dense(10, input_dim=10, activation='tanh'))
+model.add(Dense(10, input_dim=10, activation='tanh'))
+model.add(Dense(10, input_dim=10, activation='tanh'))
+output_testcase(model, test_x, test_y, 'dense_tanh_10', '1e-6')
\ No newline at end of file
diff --git a/tests/ml/ml_tools/include/test_dense_10x1.hpp b/tests/ml/ml_tools/include/test_dense_10x1.hpp
new file mode 100644
index 00000000000..f4e2363f291
--- /dev/null
+++ b/tests/ml/ml_tools/include/test_dense_10x1.hpp
@@ -0,0 +1,67 @@
+
+/*
+ Copyright (c) 2016 Robert W. Rose
+ Copyright (c) 2018 Paul Maevskikh
+ Copyright (c) 2024 NORCE
+ This file is part of the Open Porous Media project (OPM).
+
+ OPM is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ OPM is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with OPM. If not, see .
+*/
+
+#include
+#include
+#include
+#include
+
+namespace fs = std::filesystem;
+
+using namespace Opm;
+template
+bool test_dense_10x1(Evaluation* load_time, Evaluation* apply_time)
+{
+ printf("TEST dense_10x1\n");
+
+ OPM_ERROR_IF(!load_time, "Invalid Evaluation");
+ OPM_ERROR_IF(!apply_time, "Invalid Evaluation");
+
+ Opm::Tensor in{10};
+ in.data_ = {0.33128357,0.5507012,0.089097455,0.9930643,0.3021132,0.44296286,
+0.77584934,0.5803623,0.6916559,0.48866668};
+
+ Opm::Tensor out{1};
+ out.data_ = {-0.59224933};
+
+ NNTimer load_timer;
+ load_timer.start();
+
+ NNModel model;
+ OPM_ERROR_IF(!model.loadModel("./tests/ml/ml_tools/models/test_dense_10x1.model"), "Failed to load model");
+
+ *load_time = load_timer.stop();
+
+ NNTimer apply_timer;
+ apply_timer.start();
+
+ Opm::Tensor predict = out;
+ OPM_ERROR_IF(!model.apply(in, out), "Failed to apply");
+
+ *apply_time = apply_timer.stop();
+
+ for (int i = 0; i < out.dims_[0]; i++)
+ {
+ OPM_ERROR_IF ((fabs(out(i).value() - predict(i).value()) > 1e-6), fmt::format(" Expected " "{}" " got " "{}",predict(i).value(),out(i).value()));
+ }
+
+ return true;
+}
diff --git a/tests/ml/ml_tools/include/test_dense_10x10.hpp b/tests/ml/ml_tools/include/test_dense_10x10.hpp
new file mode 100644
index 00000000000..9d44b3bffaa
--- /dev/null
+++ b/tests/ml/ml_tools/include/test_dense_10x10.hpp
@@ -0,0 +1,67 @@
+
+/*
+ Copyright (c) 2016 Robert W. Rose
+ Copyright (c) 2018 Paul Maevskikh
+ Copyright (c) 2024 NORCE
+ This file is part of the Open Porous Media project (OPM).
+
+ OPM is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ OPM is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with OPM. If not, see .
+*/
+
+#include
+#include
+#include
+#include
+
+namespace fs = std::filesystem;
+
+using namespace Opm;
+template
+bool test_dense_10x10(Evaluation* load_time, Evaluation* apply_time)
+{
+ printf("TEST dense_10x10\n");
+
+ OPM_ERROR_IF(!load_time, "Invalid Evaluation");
+ OPM_ERROR_IF(!apply_time, "Invalid Evaluation");
+
+ Opm::Tensor in{10};
+ in.data_ = {0.3796144,0.83585316,0.19076388,0.029881366,0.9120564,0.53910017,
+0.9167442,0.4047214,0.36840722,0.06841394};
+
+ Opm::Tensor out{1};
+ out.data_ = {-0.13820542};
+
+ NNTimer load_timer;
+ load_timer.start();
+
+ NNModel model;
+ OPM_ERROR_IF(!model.loadModel("./tests/ml/ml_tools/models/test_dense_10x10.model"), "Failed to load model");
+
+ *load_time = load_timer.stop();
+
+ NNTimer apply_timer;
+ apply_timer.start();
+
+ Opm::Tensor predict = out;
+ OPM_ERROR_IF(!model.apply(in, out), "Failed to apply");
+
+ *apply_time = apply_timer.stop();
+
+ for (int i = 0; i < out.dims_[0]; i++)
+ {
+ OPM_ERROR_IF ((fabs(out(i).value() - predict(i).value()) > 1e-6), fmt::format(" Expected " "{}" " got " "{}",predict(i).value(),out(i).value()));
+ }
+
+ return true;
+}
diff --git a/tests/ml/ml_tools/include/test_dense_10x10x10.hpp b/tests/ml/ml_tools/include/test_dense_10x10x10.hpp
new file mode 100644
index 00000000000..ec0666a6c5c
--- /dev/null
+++ b/tests/ml/ml_tools/include/test_dense_10x10x10.hpp
@@ -0,0 +1,68 @@
+
+/*
+ Copyright (c) 2016 Robert W. Rose
+ Copyright (c) 2018 Paul Maevskikh
+ Copyright (c) 2024 NORCE
+ This file is part of the Open Porous Media project (OPM).
+
+ OPM is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ OPM is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with OPM. If not, see .
+*/
+
+#include
+#include
+#include
+#include
+
+namespace fs = std::filesystem;
+
+using namespace Opm;
+template
+bool test_dense_10x10x10(Evaluation* load_time, Evaluation* apply_time)
+{
+ printf("TEST dense_10x10x10\n");
+
+ OPM_ERROR_IF(!load_time, "Invalid Evaluation");
+ OPM_ERROR_IF(!apply_time, "Invalid Evaluation");
+
+ Opm::Tensor in{10};
+ in.data_ = {0.5663459,0.6742576,0.34646514,0.6197347,0.94704074,0.03651024,
+0.14706604,0.21582149,0.6058318,0.33997878};
+
+ Opm::Tensor out{10};
+ out.data_ = {0.15782323,0.26536155,0.014020069,-0.12612823,0.41744298,
+-0.6301674,-0.026504815,0.36927527,-0.14409286,0.4722299};
+
+ NNTimer load_timer;
+ load_timer.start();
+
+ NNModel model;
+ OPM_ERROR_IF(!model.loadModel("./tests/ml/ml_tools/models/test_dense_10x10x10.model"), "Failed to load model");
+
+ *load_time = load_timer.stop();
+
+ NNTimer apply_timer;
+ apply_timer.start();
+
+ Opm::Tensor predict = out;
+ OPM_ERROR_IF(!model.apply(in, out), "Failed to apply");
+
+ *apply_time = apply_timer.stop();
+
+ for (int i = 0; i < out.dims_[0]; i++)
+ {
+ OPM_ERROR_IF ((fabs(out(i).value() - predict(i).value()) > 1e-6), fmt::format(" Expected " "{}" " got " "{}",predict(i).value(),out(i).value()));
+ }
+
+ return true;
+}
diff --git a/tests/ml/ml_tools/include/test_dense_1x1.hpp b/tests/ml/ml_tools/include/test_dense_1x1.hpp
new file mode 100644
index 00000000000..8afe684c24a
--- /dev/null
+++ b/tests/ml/ml_tools/include/test_dense_1x1.hpp
@@ -0,0 +1,66 @@
+
+/*
+ Copyright (c) 2016 Robert W. Rose
+ Copyright (c) 2018 Paul Maevskikh
+ Copyright (c) 2024 NORCE
+ This file is part of the Open Porous Media project (OPM).
+
+ OPM is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ OPM is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with OPM. If not, see .
+*/
+
+#include
+#include
+#include
+#include
+
+namespace fs = std::filesystem;
+
+using namespace Opm;
+template
+bool test_dense_1x1(Evaluation* load_time, Evaluation* apply_time)
+{
+ printf("TEST dense_1x1\n");
+
+ OPM_ERROR_IF(!load_time, "Invalid Evaluation");
+ OPM_ERROR_IF(!apply_time, "Invalid Evaluation");
+
+ Opm::Tensor in{1};
+ in.data_ = {0};
+
+ Opm::Tensor out{1};
+ out.data_ = {0.001};
+
+ NNTimer load_timer;
+ load_timer.start();
+
+ NNModel model;
+ OPM_ERROR_IF(!model.loadModel("./tests/ml/ml_tools/models/test_dense_1x1.model"), "Failed to load model");
+
+ *load_time = load_timer.stop();
+
+ NNTimer apply_timer;
+ apply_timer.start();
+
+ Opm::Tensor predict = out;
+ OPM_ERROR_IF(!model.apply(in, out), "Failed to apply");
+
+ *apply_time = apply_timer.stop();
+
+ for (int i = 0; i < out.dims_[0]; i++)
+ {
+ OPM_ERROR_IF ((fabs(out(i).value() - predict(i).value()) > 1e-6), fmt::format(" Expected " "{}" " got " "{}",predict(i).value(),out(i).value()));
+ }
+
+ return true;
+}
diff --git a/tests/ml/ml_tools/include/test_dense_2x2.hpp b/tests/ml/ml_tools/include/test_dense_2x2.hpp
new file mode 100644
index 00000000000..c2ba712f484
--- /dev/null
+++ b/tests/ml/ml_tools/include/test_dense_2x2.hpp
@@ -0,0 +1,66 @@
+
+/*
+ Copyright (c) 2016 Robert W. Rose
+ Copyright (c) 2018 Paul Maevskikh
+ Copyright (c) 2024 NORCE
+ This file is part of the Open Porous Media project (OPM).
+
+ OPM is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ OPM is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with OPM. If not, see .
+*/
+
+#include
+#include
+#include
+#include
+
+namespace fs = std::filesystem;
+
+using namespace Opm;
+template
+bool test_dense_2x2(Evaluation* load_time, Evaluation* apply_time)
+{
+ printf("TEST dense_2x2\n");
+
+ OPM_ERROR_IF(!load_time, "Invalid Evaluation");
+ OPM_ERROR_IF(!apply_time, "Invalid Evaluation");
+
+ Opm::Tensor in{2};
+ in.data_ = {0.9670195,0.3782521};
+
+ Opm::Tensor out{1};
+ out.data_ = {-0.12220664};
+
+ NNTimer load_timer;
+ load_timer.start();
+
+ NNModel model;
+ OPM_ERROR_IF(!model.loadModel("./tests/ml/ml_tools/models/test_dense_2x2.model"), "Failed to load model");
+
+ *load_time = load_timer.stop();
+
+ NNTimer apply_timer;
+ apply_timer.start();
+
+ Opm::Tensor predict = out;
+ OPM_ERROR_IF(!model.apply(in, out), "Failed to apply");
+
+ *apply_time = apply_timer.stop();
+
+ for (int i = 0; i < out.dims_[0]; i++)
+ {
+ OPM_ERROR_IF ((fabs(out(i).value() - predict(i).value()) > 1e-6), fmt::format(" Expected " "{}" " got " "{}",predict(i).value(),out(i).value()));
+ }
+
+ return true;
+}
diff --git a/tests/ml/ml_tools/include/test_dense_relu_10.hpp b/tests/ml/ml_tools/include/test_dense_relu_10.hpp
new file mode 100644
index 00000000000..7f08e477b9e
--- /dev/null
+++ b/tests/ml/ml_tools/include/test_dense_relu_10.hpp
@@ -0,0 +1,68 @@
+
+/*
+ Copyright (c) 2016 Robert W. Rose
+ Copyright (c) 2018 Paul Maevskikh
+ Copyright (c) 2024 NORCE
+ This file is part of the Open Porous Media project (OPM).
+
+ OPM is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ OPM is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with OPM. If not, see .
+*/
+
+#include
+#include
+#include
+#include
+
+namespace fs = std::filesystem;
+
+using namespace Opm;
+template
+bool test_dense_relu_10(Evaluation* load_time, Evaluation* apply_time)
+{
+ printf("TEST dense_relu_10\n");
+
+ OPM_ERROR_IF(!load_time, "Invalid Evaluation");
+ OPM_ERROR_IF(!apply_time, "Invalid Evaluation");
+
+ Opm::Tensor in{10};
+ in.data_ = {0.07037717,0.961961,0.116212614,0.07929625,0.35780948,0.58790404,
+0.15457754,0.8093645,0.18359734,0.5857788};
+
+ Opm::Tensor out{10};
+ out.data_ = {0.041210767,0.020567391,0.13336869,0.,0.,0.07310894,
+0.,0.075157285,0.16298258,0.};
+
+ NNTimer load_timer;
+ load_timer.start();
+
+ NNModel model;
+ OPM_ERROR_IF(!model.loadModel("./tests/ml/ml_tools/models/test_dense_relu_10.model"), "Failed to load model");
+
+ *load_time = load_timer.stop();
+
+ NNTimer apply_timer;
+ apply_timer.start();
+
+ Opm::Tensor predict = out;
+ OPM_ERROR_IF(!model.apply(in, out), "Failed to apply");
+
+ *apply_time = apply_timer.stop();
+
+ for (int i = 0; i < out.dims_[0]; i++)
+ {
+ OPM_ERROR_IF ((fabs(out(i).value() - predict(i).value()) > 1e-6), fmt::format(" Expected " "{}" " got " "{}",predict(i).value(),out(i).value()));
+ }
+
+ return true;
+}
diff --git a/tests/ml/ml_tools/include/test_dense_tanh_10.hpp b/tests/ml/ml_tools/include/test_dense_tanh_10.hpp
new file mode 100644
index 00000000000..7d8306d6eff
--- /dev/null
+++ b/tests/ml/ml_tools/include/test_dense_tanh_10.hpp
@@ -0,0 +1,68 @@
+
+/*
+ Copyright (c) 2016 Robert W. Rose
+ Copyright (c) 2018 Paul Maevskikh
+ Copyright (c) 2024 NORCE
+ This file is part of the Open Porous Media project (OPM).
+
+ OPM is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ OPM is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with OPM. If not, see .
+*/
+
+#include
+#include
+#include
+#include
+
+namespace fs = std::filesystem;
+
+using namespace Opm;
+template
+bool test_dense_tanh_10(Evaluation* load_time, Evaluation* apply_time)
+{
+ printf("TEST dense_tanh_10\n");
+
+ OPM_ERROR_IF(!load_time, "Invalid Evaluation");
+ OPM_ERROR_IF(!apply_time, "Invalid Evaluation");
+
+ Opm::Tensor in{10};
+ in.data_ = {0.0036774194,0.84451723,0.54717904,0.3704365,0.3736782,
+0.30650887,0.49595013,0.83192986,0.9395368,0.4966029};
+
+ Opm::Tensor out{10};
+ out.data_ = {0.35694593,-0.3102012,0.5760354,0.3260835,0.09098823,-0.34132,
+-0.08773771,-0.33041546,-0.1178762,-0.22991908};
+
+ NNTimer load_timer;
+ load_timer.start();
+
+ NNModel model;
+ OPM_ERROR_IF(!model.loadModel("./tests/ml/ml_tools/models/test_dense_tanh_10.model"), "Failed to load model");
+
+ *load_time = load_timer.stop();
+
+ NNTimer apply_timer;
+ apply_timer.start();
+
+ Opm::Tensor