diff --git a/.github/workflows/core.yml b/.github/workflows/core.yml index 9327e830aa..8b95384c79 100644 --- a/.github/workflows/core.yml +++ b/.github/workflows/core.yml @@ -4,10 +4,10 @@ on: push: branches: - main - - '*_rel' + - "*_rel" schedule: # run daily at 5:00 am UTC (12 am ET/9 pm PT) - - cron: '0 5 * * *' + - cron: "0 5 * * *" repository_dispatch: # to run this, send a POST API call at repos/IDAES/idaes-pse/dispatches with the specified event_type # e.g. `gh repos/IDAES/idaes-pse/dispatches -F event_type=ci_run_tests` @@ -38,7 +38,7 @@ concurrency: env: # default Python version to use for checks that do not require multiple versions - DEFAULT_PYTHON_VERSION: '3.10' + DEFAULT_PYTHON_VERSION: "3.10" IDAES_CONDA_ENV_NAME_DEV: idaes-pse-dev PYTEST_ADDOPTS: "--color=yes" @@ -54,7 +54,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - + - uses: actions/setup-python@v5 with: python-version: ${{ env.DEFAULT_PYTHON_VERSION }} @@ -76,10 +76,10 @@ jobs: steps: - name: Checkout source uses: actions/checkout@v4 - + - name: Run Spell Checker uses: crate-ci/typos@v1.24.5 - with: + with: config: ./.github/workflows/typos.toml pytest: @@ -90,7 +90,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ['3.9', '3.10', '3.11', '3.12', '3.13'] + python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] os: - linux - win64 @@ -99,15 +99,14 @@ jobs: runner-image: ubuntu-24.04 - os: win64 runner-image: windows-2022 - - python-version: '3.11' + - python-version: "3.11" # only generate coverage report for a single python version in the matrix # to avoid overloading Codecov cov-report: true - steps: - uses: actions/checkout@v5 - + - uses: ./.github/actions/display-debug-info - name: Set up Conda environment uses: conda-incubator/setup-miniconda@v3 @@ -119,7 +118,7 @@ jobs: - name: Set up idaes uses: ./.github/actions/setup-idaes with: - install-target: -r requirements-dev.txt + install-target: -r requirements-dev.txt - name: Add pytest CLI options for coverage if: matrix.cov-report run: | @@ -134,7 +133,7 @@ jobs: name: coverage-report-${{ matrix.os }} path: coverage.xml if-no-files-found: error - + upload-coverage: name: Upload coverage report (Codecov) needs: [pytest] @@ -146,7 +145,7 @@ jobs: steps: # the checkout step is needed to have access to codecov.yml - uses: actions/checkout@v4 - + - uses: actions/download-artifact@v4 with: name: coverage-report-${{ matrix.report-variant }} @@ -170,13 +169,15 @@ jobs: needs: [code-formatting, spell-check] steps: - uses: actions/checkout@v4 - + - name: Set up Conda environment uses: conda-incubator/setup-miniconda@v3 with: activate-environment: ${{ env.IDAES_CONDA_ENV_NAME_DEV }} python-version: ${{ env.DEFAULT_PYTHON_VERSION }} miniforge-version: latest + - name: Install pandoc + run: conda install -c conda-forge pandoc - name: Set up idaes uses: ./.github/actions/setup-idaes with: @@ -206,7 +207,7 @@ jobs: # NOTE: using Conda instead of actions/setup-python in this job is not strictly necessary # as it doesn't need to run on Windows or use the setup-idaes local action, # but we do it for consistency with the other jobs - + - name: Set up Conda environment uses: conda-incubator/setup-miniconda@v3 with: @@ -216,7 +217,7 @@ jobs: - name: Set up idaes uses: ./.github/actions/setup-idaes with: - install-target: -r requirements-dev.txt + install-target: -r requirements-dev.txt - name: Run pylint run: | echo "::group::Display pylint version" @@ -240,7 +241,7 @@ jobs: runs-on: ubuntu-24.04 steps: - uses: actions/checkout@v4 - + - name: Set up Conda environment uses: conda-incubator/setup-miniconda@v3 with: @@ -250,7 +251,7 @@ jobs: - name: Set up idaes uses: ./.github/actions/setup-idaes with: - install-target: -r requirements-dev.txt + install-target: -r requirements-dev.txt - name: Create empty pytest.ini file run: | echo "" > pytest.ini diff --git a/.github/workflows/typos.toml b/.github/workflows/typos.toml index ad5d99ec8e..1e7b78e10c 100644 --- a/.github/workflows/typos.toml +++ b/.github/workflows/typos.toml @@ -58,3 +58,6 @@ PN = "PN" hd = "hd" Tge = "Tge" iy = "iy" + +# more chemE abbreviations +HDA = "HDA" diff --git a/docs/build.py b/docs/build.py index 004576a381..5b13340509 100644 --- a/docs/build.py +++ b/docs/build.py @@ -72,6 +72,7 @@ def run_apidoc(clean=True, dry_run=False, **kwargs): "apidoc", "../idaes", "../idaes/*tests*", + "../idaes/core/util/structfs", # handled by apidoc2 ], 60, dry_run, diff --git a/docs/conf.py b/docs/conf.py index 96a7fd6c9b..eb3555ec8a 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -12,6 +12,7 @@ # For importing from idaes. sys.path.insert(0, os.path.abspath("..")) +sys.path.insert(0, os.path.abspath(".")) # -- General configuration ------------------------------------------------ @@ -36,8 +37,25 @@ "sphinxarg.ext", "sphinx.ext.doctest", "sphinx_copybutton", + "nbsphinx", + # MystMD extensions + "myst_parser", + "autodoc2", ] +# Myst autodoc2 (experimental) +autodoc2_packages = [ + "../idaes/core/util/structfs", +] +autodoc2_output_dir = "reference_guides/core/util" +autodoc2_render_plugin = "myst" +autodoc2_docstring_parser_regexes = [ + # render docstrings in matching files as Markdown + ("../idaes/core/util/structfs/.*", "myst"), +] +autodoc2_no_index = True +autodoc2_index_template = None # don't write index.rst + # Put type hints in the description, not signature autodoc_typehints = "description" @@ -51,8 +69,7 @@ # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] -source_suffix = ".rst" - +source_suffix = [".rst", ".md"] # The encoding of source files. # # source_encoding = 'utf-8-sig' @@ -85,7 +102,10 @@ # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path -exclude_patterns = ["apidoc/*tests*"] +exclude_patterns = [ + "apidoc/*tests*", + "../idaes/core/util/structfs", +] # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False diff --git a/docs/examples/index.rst b/docs/examples/index.rst new file mode 100644 index 0000000000..1a650f0eeb --- /dev/null +++ b/docs/examples/index.rst @@ -0,0 +1,7 @@ +Examples +======== + +.. toctree:: + :maxdepth: 1 + + structfs/index diff --git a/docs/examples/structfs/flash_flowsheet.py b/docs/examples/structfs/flash_flowsheet.py new file mode 100644 index 0000000000..0246bcb3f4 --- /dev/null +++ b/docs/examples/structfs/flash_flowsheet.py @@ -0,0 +1,86 @@ +############################################################################### +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2025 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +# +############################################################################### +""" +Simple Flash flowsheet for use in testing. +""" + +from pyomo.environ import ConcreteModel, SolverFactory +from idaes.core import FlowsheetBlock + +# Import idaes logger to set output levels +import idaes.logger as idaeslog +from idaes.models.properties.activity_coeff_models.BTX_activity_coeff_VLE import ( + BTXParameterBlock, +) +from idaes.models.unit_models import Flash +from idaes.core.util.structfs.fsrunner import FlowsheetRunner + + +FS = FlowsheetRunner() + +# # Flash Unit Model +# +# Author: Jaffer Ghouse +# Maintainer: Andrew Lee +# Updated: 2023-06-01 + + +@FS.step("build") +def build_model(ctx): + """Build the model.""" + m = ConcreteModel() + m.fs = FlowsheetBlock(dynamic=False) + m.fs.properties = BTXParameterBlock( + valid_phase=("Liq", "Vap"), activity_coeff_model="Ideal", state_vars="FTPz" + ) + m.fs.flash = Flash(property_package=m.fs.properties) + # assert degrees_of_freedom(m) == 7 + ctx.model = m + + +@FS.step("set_operating_conditions") +def set_operating_conditions(ctx): + """Set operating conditions.""" + m = ctx.model + m.fs.flash.inlet.flow_mol.fix(1) + m.fs.flash.inlet.temperature.fix(368) + m.fs.flash.inlet.pressure.fix(101325) + m.fs.flash.inlet.mole_frac_comp[0, "benzene"].fix(0.5) + m.fs.flash.inlet.mole_frac_comp[0, "toluene"].fix(0.5) + m.fs.flash.heat_duty.fix(0) + m.fs.flash.deltaP.fix(0) + + +@FS.step("initialize") +def init_model(ctx): + """ "Initialize the model.""" + m = ctx.model + m.fs.flash.initialize(outlvl=idaeslog.INFO) + + +@FS.step("set_solver") +def set_solver(ctx): + """Set the solver.""" + ctx.solver = SolverFactory("ipopt") + + +@FS.step("solve_initial") +def solve(ctx): + """Perform the initial model solve.""" + ctx["results"] = ctx.solver.solve(ctx.model, tee=ctx["tee"]) + + +@FS.step("solve_optimization") +def solve_o(ctx): + ctx["results"] = ctx.solver.solve(ctx.model, tee=ctx["tee"]) diff --git a/docs/examples/structfs/flash_flowsheet_nb.ipynb b/docs/examples/structfs/flash_flowsheet_nb.ipynb new file mode 100644 index 0000000000..3162dbcc0c --- /dev/null +++ b/docs/examples/structfs/flash_flowsheet_nb.ipynb @@ -0,0 +1,323 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "22ff8940", + "metadata": {}, + "source": [ + "# Structured Flowsheet Example\n", + "The Structured Flowsheet subpackage, in idaes.core.util.structfs, provides\n", + "a way to consistently structure flowsheets and then functions to help\n", + "manipulate them interactively or from an API.\n", + "\n", + "Author: Dan Gunter, LBNL" + ] + }, + { + "cell_type": "markdown", + "id": "a5d038e8", + "metadata": {}, + "source": [ + "## Imports" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "854989ae", + "metadata": {}, + "outputs": [], + "source": [ + "# Import the 'FS' structured flowsheet wrapper\n", + "from flash_flowsheet import FS" + ] + }, + { + "cell_type": "markdown", + "id": "b195f28a", + "metadata": {}, + "source": [ + "## Solve the square problem" + ] + }, + { + "cell_type": "markdown", + "id": "568d5a77", + "metadata": {}, + "source": [ + "### Run the solver" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "4f58d1b7", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Run steps:\n", + "build\n", + "set_operating_conditions\n", + "initialize\n", + "set_solver\n", + "solve_initial\n", + "solve_optimization\n", + "========================================\n", + "2025-12-06 05:31:08 [INFO] idaes.init.fs.flash.control_volume.properties_in: Initialization Step 1 optimal - Optimal Solution Found.\n", + "2025-12-06 05:31:08 [INFO] idaes.init.fs.flash.control_volume.properties_in: Initialization Step 2 optimal - Optimal Solution Found.\n", + "2025-12-06 05:31:08 [INFO] idaes.init.fs.flash.control_volume.properties_in: Initialization Step 3 optimal - Optimal Solution Found.\n", + "2025-12-06 05:31:08 [INFO] idaes.init.fs.flash.control_volume.properties_in: Initialization Step 4 optimal - Optimal Solution Found.\n", + "2025-12-06 05:31:08 [INFO] idaes.init.fs.flash.control_volume.properties_in: Initialization Step 5 optimal - Optimal Solution Found.\n", + "2025-12-06 05:31:08 [INFO] idaes.init.fs.flash.control_volume.properties_out: Initialization Step 1 optimal - Optimal Solution Found.\n", + "2025-12-06 05:31:08 [INFO] idaes.init.fs.flash.control_volume.properties_out: Initialization Step 2 optimal - Optimal Solution Found.\n", + "2025-12-06 05:31:08 [INFO] idaes.init.fs.flash.control_volume.properties_out: Initialization Step 3 optimal - Optimal Solution Found.\n", + "2025-12-06 05:31:08 [INFO] idaes.init.fs.flash.control_volume.properties_out: Initialization Step 4 optimal - Optimal Solution Found.\n", + "2025-12-06 05:31:08 [INFO] idaes.init.fs.flash.control_volume.properties_out: Initialization Step 5 optimal - Optimal Solution Found.\n", + "2025-12-06 05:31:08 [INFO] idaes.init.fs.flash.control_volume.properties_out: State Released.\n", + "2025-12-06 05:31:08 [INFO] idaes.init.fs.flash.control_volume: Initialization Complete\n", + "2025-12-06 05:31:08 [INFO] idaes.init.fs.flash.control_volume.properties_in: State Released.\n", + "2025-12-06 05:31:08 [INFO] idaes.init.fs.flash: Initialization Complete: optimal - Optimal Solution Found\n" + ] + } + ], + "source": [ + "# solve the square problem (up to 'solve_initial')\n", + "print(\"Run steps:\")\n", + "print(\"\\n\".join(FS.list_steps()))\n", + "print(\"=\" * 40)\n", + "FS.run_steps(last=\"solve_initial\")" + ] + }, + { + "cell_type": "markdown", + "id": "b254fa86", + "metadata": {}, + "source": [ + "### Show results" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "02d7f383", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Problem: \n", + "- Lower bound: -.inf\n", + " Upper bound: .inf\n", + " Number of objectives: 1\n", + " Number of constraints: 41\n", + " Number of variables: 41\n", + " Sense: unknown\n", + "Solver: \n", + "- Status: ok\n", + " Message: Ipopt 3.13.2\\x3a Optimal Solution Found\n", + " Termination condition: optimal\n", + " Id: 0\n", + " Error rc: 0\n", + " Time: 0.005049467086791992\n", + "Solution: \n", + "- number of solutions: 0\n", + " number of solutions displayed: 0\n", + "\n" + ] + } + ], + "source": [ + "print(FS.results)" + ] + }, + { + "cell_type": "markdown", + "id": "1c669172", + "metadata": {}, + "source": [ + "### Show timings" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "997466d7", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Time per step:\n", + "\n", + " build : 0.022 14.5%\n", + " set_operating_conditions : 0.000 0.1%\n", + " initialize : 0.111 74.7%\n", + " set_solver : 0.000 0.0%\n", + " solve_initial : 0.016 10.7%\n", + "\n", + "Total time: 0.151 s\n", + "\n" + ] + } + ], + "source": [ + "FS.timings" + ] + }, + { + "cell_type": "markdown", + "id": "dda62457", + "metadata": {}, + "source": [ + "### Show degrees of freedom" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "9dd1ecee", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Degrees of freedom: 0\n", + "\n", + "Degrees of freedom after steps:\n", + " build:\n", + " fs : 7\n", + " fs.flash : 2\n", + " fs.flash.control_volume : 7\n", + " fs.flash.split : 0\n", + " fs.properties : 0\n", + " fs.properties.Liq : 0\n", + " fs.properties.Vap : 0\n", + " fs.properties.benzene : 0\n", + " fs.properties.toluene : 0\n", + " solve_initial:\n", + " fs : 0\n", + " fs.flash : 0\n", + " fs.flash.control_volume : 0\n", + " fs.flash.split : 0\n", + " fs.properties : 0\n", + " fs.properties.Liq : 0\n", + " fs.properties.Vap : 0\n", + " fs.properties.benzene : 0\n", + " fs.properties.toluene : 0\n" + ] + } + ], + "source": [ + "FS.dof" + ] + }, + { + "cell_type": "markdown", + "id": "546345dc", + "metadata": {}, + "source": [ + "## Solve optimization" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "fcac1f17", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Before: 368\n", + "After : 368.85306111169916\n" + ] + } + ], + "source": [ + "# unfix the inlet temperature\n", + "temp = FS.model.fs.flash.inlet.temperature[0]\n", + "print(f\"Before: {temp.value}\")\n", + "temp.free()\n", + "# solve again\n", + "FS.run_steps(first=\"solve_optimization\")\n", + "# look at new value\n", + "print(f\"After : {temp.value}\")" + ] + }, + { + "cell_type": "markdown", + "id": "585afc38", + "metadata": {}, + "source": [ + "### Show new degrees of freedom\n", + "Putting the name of the step as an additional attribute will print a summary of the DoF for \n", + "that step only." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "68d8794f", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Degrees of freedom: 5\n", + "\n", + "fs : 1\n", + "fs.flash : 0\n", + "fs.flash.control_volume : 5\n", + "fs.flash.split : 0\n", + "fs.properties : 0\n", + "fs.properties.Liq : 0\n", + "fs.properties.Vap : 0\n", + "fs.properties.benzene : 0\n", + "fs.properties.toluene : 0\n" + ] + } + ], + "source": [ + "FS.dof.solve_optimization" + ] + }, + { + "cell_type": "markdown", + "id": "8e77e50b", + "metadata": {}, + "source": [ + "## Done" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "idaes-py3.12", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.7" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/examples/structfs/hda_flowsheet.py b/docs/examples/structfs/hda_flowsheet.py new file mode 100644 index 0000000000..6858425154 --- /dev/null +++ b/docs/examples/structfs/hda_flowsheet.py @@ -0,0 +1,378 @@ +############################################################################### +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +############################################################################### + + +# +# # HDA Flowsheet Simulation and Optimization +# +# Author: Jaffer Ghouse +# Maintainer: Brandon Paul +# Updated: 2023-06-01 +# +# ## Learning outcomes +# +# +# - Construct a steady-state flowsheet using the IDAES unit model library +# - Connecting unit models in a flowsheet using Arcs +# - Using the SequentialDecomposition tool to initialize a flowsheet with recycle +# - Formulate and solve an optimization problem +# - Defining an objective function +# - Setting variable bounds +# - Adding additional constraints +# +# +# ## Problem Statement +# +# Hydrodealkylation is a chemical reaction that often involves reacting +# an aromatic hydrocarbon in the presence of hydrogen gas to form a +# simpler aromatic hydrocarbon devoid of functional groups. In this +# example, toluene will be reacted with hydrogen gas at high temperatures +# to form benzene via the following reaction: +# +# **C6H5CH3 + H2 → C6H6 + CH4** +# +# +# This reaction is often accompanied by an equilibrium side reaction +# which forms diphenyl, which we will neglect for this example. +# +# This example is based on the 1967 AIChE Student Contest problem as +# present by Douglas, J.M., Chemical Design of Chemical Processes, 1988, +# McGraw-Hill. +# +# The flowsheet that we will be using for this module is shown below with the stream conditions. We will be processing toluene and hydrogen to produce at least 370 TPY of benzene. As shown in the flowsheet, there are two flash tanks, F101 to separate out the non-condensibles and F102 to further separate the benzene-toluene mixture to improve the benzene purity. Note that typically a distillation column is required to obtain high purity benzene but that is beyond the scope of this workshop. The non-condensibles separated out in F101 will be partially recycled back to M101 and the rest will be either purged or combusted for power generation.We will assume ideal gas for this flowsheet. The properties required for this module are available in the same directory: +# +# - hda_ideal_VLE.py +# - hda_reaction.py +# +# The state variables chosen for the property package are **flows of component by phase, temperature and pressure**. The components considered are: **toluene, hydrogen, benzene and methane**. Therefore, every stream has 8 flow variables, 1 temperature and 1 pressure variable. +# +# ![](HDA_flowsheet.png) +# +# + +# ## Importing required pyomo and idaes components +# +# +# To construct a flowsheet, we will need several components from the pyomo and idaes package. Let us first import the following components from Pyomo: +# - Constraint (to write constraints) +# - Var (to declare variables) +# - ConcreteModel (to create the concrete model object) +# - Expression (to evaluate values as a function of variables defined in the model) +# - Objective (to define an objective function for optimization) +# - SolverFactory (to solve the problem) +# - TransformationFactory (to apply certain transformations) +# - Arc (to connect two unit models) +# - SequentialDecomposition (to initialize the flowsheet in a sequential mode) +# +# For further details on these components, please refer to the pyomo documentation: https://pyomo.readthedocs.io/en/stable/ +# + + +from pyomo.environ import ( + Constraint, + Var, + ConcreteModel, + Expression, + Objective, + SolverFactory, + TerminationCondition, + TransformationFactory, + value, +) +from pyomo.network import Arc, SequentialDecomposition + +from idaes.core import FlowsheetBlock + +from idaes.models.unit_models import ( + PressureChanger, + Mixer, + Separator as Splitter, + Heater, + StoichiometricReactor, +) + +from idaes.models.unit_models import Flash +from idaes.models.unit_models.pressure_changer import ThermodynamicAssumption +from idaes.core.util.model_statistics import degrees_of_freedom + +import idaes.logger as idaeslog +from idaes.core.solvers import get_solver +from idaes.core.util.exceptions import InitializationError + +from idaes.core.util.structfs.fsrunner import FlowsheetRunner, Context + +import hda_ideal_VLE as thermo_props +import hda_reaction as reaction_props + + +FS = FlowsheetRunner() + + +@FS.step("build") +def build_model(ctx: Context): + m = ConcreteModel() + m.fs = FlowsheetBlock(dynamic=False) + add_property_packages(m) + add_units(m) + connect_units(m) + add_expr(m) + ctx.model = m + + +# We now need to add the property packages to the flowsheet. Unlike Module 1, where we only had a thermo property package, for this flowsheet we will also need to add a reaction property package. + + +@FS.substep("build", "add_props") +def add_property_packages(m): + m.fs.thermo_params = thermo_props.HDAParameterBlock() + m.fs.reaction_params = reaction_props.HDAReactionParameterBlock( + property_package=m.fs.thermo_params + ) + + +@FS.substep("build", "add_units") +def add_units(m): + """Add the unit models we have imported to the flowsheet. + + Here, we are adding the Mixer (assigned a name M101) and a Heater (assigned a name H101). + Note that, all unit models need to be given a property package argument. + """ + m.fs.M101 = Mixer( + property_package=m.fs.thermo_params, + inlet_list=["toluene_feed", "hydrogen_feed", "vapor_recycle"], + ) + + m.fs.H101 = Heater( + property_package=m.fs.thermo_params, + has_pressure_change=False, + has_phase_equilibrium=True, + ) + + # Todo: Add reactor with the specifications above + m.fs.R101 = StoichiometricReactor( + property_package=m.fs.thermo_params, + reaction_package=m.fs.reaction_params, + has_heat_of_reaction=True, + has_heat_transfer=True, + has_pressure_change=False, + ) + + m.fs.F101 = Flash( + property_package=m.fs.thermo_params, + has_heat_transfer=True, + has_pressure_change=True, + ) + + m.fs.S101 = Splitter( + property_package=m.fs.thermo_params, + ideal_separation=False, + outlet_list=["purge", "recycle"], + ) + + m.fs.C101 = PressureChanger( + property_package=m.fs.thermo_params, + compressor=True, + thermodynamic_assumption=ThermodynamicAssumption.isothermal, + ) + + m.fs.F102 = Flash( + property_package=m.fs.thermo_params, + has_heat_transfer=True, + has_pressure_change=True, + ) + + +@FS.substep("build", "create_arcs") +def connect_units(m): + """Connect Unit Models using Arcs""" + m.fs.s03 = Arc(source=m.fs.M101.outlet, destination=m.fs.H101.inlet) + m.fs.s04 = Arc(source=m.fs.H101.outlet, destination=m.fs.R101.inlet) + m.fs.s05 = Arc(source=m.fs.R101.outlet, destination=m.fs.F101.inlet) + m.fs.s06 = Arc(source=m.fs.F101.vap_outlet, destination=m.fs.S101.inlet) + m.fs.s08 = Arc(source=m.fs.S101.recycle, destination=m.fs.C101.inlet) + m.fs.s09 = Arc(source=m.fs.C101.outlet, destination=m.fs.M101.vapor_recycle) + m.fs.s10 = Arc(source=m.fs.F101.liq_outlet, destination=m.fs.F102.inlet) + + TransformationFactory("network.expand_arcs").apply_to(m) + + +@FS.substep("build", "add_expressions") +def add_expr(m): + """Add expressions to compute purity and operating costs""" + + m.fs.purity = Expression( + expr=m.fs.F102.vap_outlet.flow_mol_phase_comp[0, "Vap", "benzene"] + / ( + m.fs.F102.vap_outlet.flow_mol_phase_comp[0, "Vap", "benzene"] + + m.fs.F102.vap_outlet.flow_mol_phase_comp[0, "Vap", "toluene"] + ) + ) + m.fs.cooling_cost = Expression( + expr=0.212e-7 * (-m.fs.F101.heat_duty[0]) + 0.212e-7 * (-m.fs.R101.heat_duty[0]) + ) + m.fs.heating_cost = Expression( + expr=2.2e-7 * m.fs.H101.heat_duty[0] + 1.9e-7 * m.fs.F102.heat_duty[0] + ) + m.fs.operating_cost = Expression( + expr=(3600 * 24 * 365 * (m.fs.heating_cost + m.fs.cooling_cost)) + ) + + +@FS.step("set_operating_conditions") +def set_op_cond(ctx): + m = ctx.model + m.fs.M101.toluene_feed.flow_mol_phase_comp[0, "Vap", "benzene"].fix(1e-5) + m.fs.M101.toluene_feed.flow_mol_phase_comp[0, "Vap", "toluene"].fix(1e-5) + m.fs.M101.toluene_feed.flow_mol_phase_comp[0, "Vap", "hydrogen"].fix(1e-5) + m.fs.M101.toluene_feed.flow_mol_phase_comp[0, "Vap", "methane"].fix(1e-5) + m.fs.M101.toluene_feed.flow_mol_phase_comp[0, "Liq", "benzene"].fix(1e-5) + m.fs.M101.toluene_feed.flow_mol_phase_comp[0, "Liq", "toluene"].fix(0.30) + m.fs.M101.toluene_feed.flow_mol_phase_comp[0, "Liq", "hydrogen"].fix(1e-5) + m.fs.M101.toluene_feed.flow_mol_phase_comp[0, "Liq", "methane"].fix(1e-5) + m.fs.M101.toluene_feed.temperature.fix(303.2) + m.fs.M101.toluene_feed.pressure.fix(350000) + + m.fs.M101.hydrogen_feed.flow_mol_phase_comp[0, "Vap", "benzene"].fix(1e-5) + m.fs.M101.hydrogen_feed.flow_mol_phase_comp[0, "Vap", "toluene"].fix(1e-5) + m.fs.M101.hydrogen_feed.flow_mol_phase_comp[0, "Vap", "hydrogen"].fix(0.30) + m.fs.M101.hydrogen_feed.flow_mol_phase_comp[0, "Vap", "methane"].fix(0.02) + m.fs.M101.hydrogen_feed.flow_mol_phase_comp[0, "Liq", "benzene"].fix(1e-5) + m.fs.M101.hydrogen_feed.flow_mol_phase_comp[0, "Liq", "toluene"].fix(1e-5) + m.fs.M101.hydrogen_feed.flow_mol_phase_comp[0, "Liq", "hydrogen"].fix(1e-5) + m.fs.M101.hydrogen_feed.flow_mol_phase_comp[0, "Liq", "methane"].fix(1e-5) + m.fs.M101.hydrogen_feed.temperature.fix(303.2) + m.fs.M101.hydrogen_feed.pressure.fix(350000) + + m.fs.H101.outlet.temperature.fix(600) + + m.fs.R101.conversion = Var(initialize=0.75, bounds=(0, 1)) + + m.fs.R101.conv_constraint = Constraint( + expr=m.fs.R101.conversion + * m.fs.R101.inlet.flow_mol_phase_comp[0, "Vap", "toluene"] + == ( + m.fs.R101.inlet.flow_mol_phase_comp[0, "Vap", "toluene"] + - m.fs.R101.outlet.flow_mol_phase_comp[0, "Vap", "toluene"] + ) + ) + + m.fs.R101.conversion.fix(0.75) + m.fs.R101.heat_duty.fix(0) + + # Flash 1 + m.fs.F101.vap_outlet.temperature.fix(325.0) + m.fs.F101.deltaP.fix(0) + + # Flash 2 + m.fs.F102.vap_outlet.temperature.fix(375) + m.fs.F102.deltaP.fix(-200000) + + # Purge split fraction + m.fs.S101.split_fraction[0, "purge"].fix(0.2) + m.fs.C101.outlet.pressure.fix(350000) + + +@FS.step("initialize") +def initialization(ctx): + m = ctx.model + seq = SequentialDecomposition() + seq.options.select_tear_method = "heuristic" + seq.options.tear_method = "Wegstein" + seq.options.iterLim = 3 + + # Using the SD tool + G = seq.create_graph(m) + heuristic_tear_set = seq.tear_set_arcs(G, method="heuristic") + order = seq.calculation_order(G) + + tear_guesses = { + "flow_mol_phase_comp": { + (0, "Vap", "benzene"): 1e-5, + (0, "Vap", "toluene"): 1e-5, + (0, "Vap", "hydrogen"): 0.30, + (0, "Vap", "methane"): 0.02, + (0, "Liq", "benzene"): 1e-5, + (0, "Liq", "toluene"): 0.30, + (0, "Liq", "hydrogen"): 1e-5, + (0, "Liq", "methane"): 1e-5, + }, + "temperature": {0: 303}, + "pressure": {0: 350000}, + } + + # Pass the tear_guess to the SD tool + seq.set_guesses_for(m.fs.H101.inlet, tear_guesses) + + def init_function(unit): + try: + initializer = unit.default_initializer() + initializer.initialize(unit, output_level=idaeslog.INFO) + except InitializationError: + solver = get_solver() + solver.solve(unit) + + seq.run(m, init_function) + + +@FS.step("set_solver") +def set_solver(ctx): + ctx.solver = SolverFactory("ipopt") + + +@FS.step("solve_initial") +def solve(ctx): + """Perform the initial model solve.""" + ctx["status"] = results = ctx.solver.solve(ctx.model, tee=ctx["tee"]) + assert results.solver.termination_condition == TerminationCondition.optimal + + +@FS.step("solve_optimization") +def solve_opt(ctx): + m = ctx.model + m.fs.objective = Objective(expr=m.fs.operating_cost) + + m.fs.H101.outlet.temperature.unfix() + m.fs.R101.heat_duty.unfix() + m.fs.F101.vap_outlet.temperature.unfix() + m.fs.F102.vap_outlet.temperature.unfix() + + m.fs.F102.deltaP.unfix() + + assert degrees_of_freedom(m) == 5 + + m.fs.H101.outlet.temperature[0].setlb(500) + m.fs.H101.outlet.temperature[0].setub(600) + + m.fs.R101.outlet.temperature[0].setlb(600) + m.fs.R101.outlet.temperature[0].setub(800) + + m.fs.F101.vap_outlet.temperature[0].setlb(298.0) + m.fs.F101.vap_outlet.temperature[0].setub(450.0) + m.fs.F102.vap_outlet.temperature[0].setlb(298.0) + m.fs.F102.vap_outlet.temperature[0].setub(450.0) + m.fs.F102.vap_outlet.pressure[0].setlb(105000) + m.fs.F102.vap_outlet.pressure[0].setub(110000) + + m.fs.overhead_loss = Constraint( + expr=m.fs.F101.vap_outlet.flow_mol_phase_comp[0, "Vap", "benzene"] + <= 0.20 * m.fs.R101.outlet.flow_mol_phase_comp[0, "Vap", "benzene"] + ) + + m.fs.product_flow = Constraint( + expr=m.fs.F102.vap_outlet.flow_mol_phase_comp[0, "Vap", "benzene"] >= 0.15 + ) + + m.fs.product_purity = Constraint(expr=m.fs.purity >= 0.80) + + results = ctx.solver.solve(ctx.model, tee=ctx["tee"]) + ctx["results"] = results diff --git a/docs/examples/structfs/hda_flowsheet_nb.ipynb b/docs/examples/structfs/hda_flowsheet_nb.ipynb new file mode 100644 index 0000000000..815905f92c --- /dev/null +++ b/docs/examples/structfs/hda_flowsheet_nb.ipynb @@ -0,0 +1,124 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "e26e86fc", + "metadata": {}, + "source": [ + "# Running the HDA flowsheet" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cd678c21", + "metadata": {}, + "outputs": [], + "source": [ + "from pyomo.environ import TerminationCondition, value\n", + "from hda_flowsheet import FS\n", + "from idaes.core.solvers import get_solver\n", + "\n", + "solver = None\n", + "try:\n", + " solver = get_solver()\n", + "except:\n", + " pass" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0bfdd163", + "metadata": {}, + "outputs": [], + "source": [ + "# run up to, but not including, optimization\n", + "if solver:\n", + " FS.run_steps(before=\"solve_optimization\")\n", + " # print degrees of freedom\n", + " print(FS.dof)\n", + " # run rest of steps\n", + " FS.run_steps(first=\"solve_optimization\")\n", + " # print degrees of freedom again\n", + " print(FS.dof)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0a96da4a", + "metadata": {}, + "outputs": [], + "source": [ + "# examine results\n", + "\n", + "m = FS.model\n", + "\n", + "if m:\n", + " # What is the total operating cost?\n", + " print(\"operating cost = $\", value(m.fs.operating_cost))\n", + "\n", + " # For this operating cost, what is the amount of benzene we are able to produce and what purity we are able to achieve?\n", + " m.fs.F102.report()\n", + " print()\n", + " print(\"benzene purity = \", value(m.fs.purity))\n", + "\n", + "\n", + " # How much benzene are we losing in the F101 vapor outlet stream?\n", + " from idaes.core.util.tables import (\n", + " create_stream_table_dataframe,\n", + " stream_table_dataframe_to_string,\n", + " )\n", + " st = create_stream_table_dataframe({\"Reactor\": m.fs.s05, \"Light Gases\": m.fs.s06})\n", + " print(stream_table_dataframe_to_string(st))" + ] + }, + { + "cell_type": "markdown", + "id": "ca0cf4d4", + "metadata": {}, + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e3ec5e4b", + "metadata": {}, + "outputs": [], + "source": [ + "# Uncomment to show diagram\n", + "# FS.show_diagram()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "83297a7a", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "idaes-py3.12", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.7" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/examples/structfs/hda_ideal_VLE.py b/docs/examples/structfs/hda_ideal_VLE.py new file mode 100644 index 0000000000..c6295a1c94 --- /dev/null +++ b/docs/examples/structfs/hda_ideal_VLE.py @@ -0,0 +1,1341 @@ +############################################################################## +# Institute for the Design of Advanced Energy Systems Process Systems +# Engineering Framework (IDAES PSE Framework) Copyright (c) 2018-2019, by the +# software owners: The Regents of the University of California, through +# Lawrence Berkeley National Laboratory, National Technology & Engineering +# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia +# University Research Corporation, et al. All rights reserved. +# +# Please see the files COPYRIGHT.txt and LICENSE.txt for full copyright and +# license information, respectively. Both files are also available online +# at the URL "https://github.com/IDAES/idaes-pse". +############################################################################## +""" +Example ideal parameter block for the VLE calculations for a +Benzene-Toluene-o-Xylene system. +""" + + +# Import Pyomo libraries +from pyomo.environ import ( + Constraint, + Expression, + log, + NonNegativeReals, + Var, + Set, + Param, + sqrt, + log10, + units as pyunits, +) +from pyomo.util.calc_var_value import calculate_variable_from_constraint +from pyomo.common.config import ConfigValue + +# Import IDAES cores +from idaes.core import ( + declare_process_block_class, + MaterialFlowBasis, + PhysicalParameterBlock, + StateBlockData, + StateBlock, + MaterialBalanceType, + EnergyBalanceType, + Component, + LiquidPhase, + VaporPhase, +) +from idaes.core.util.constants import Constants as const +from idaes.core.util.initialization import fix_state_vars, solve_indexed_blocks +from idaes.core.initialization import InitializerBase +from idaes.core.util.misc import add_object_reference +from idaes.core.util.model_statistics import number_unfixed_variables +from idaes.core.util.misc import extract_data +from idaes.core.solvers import get_solver +import idaes.core.util.scaling as iscale +import idaes.logger as idaeslog + +# Set up logger +_log = idaeslog.getLogger(__name__) + + +class HDAInitializer(InitializerBase): + """ + Initializer for HDA Property package. + + """ + + CONFIG = InitializerBase.CONFIG() + CONFIG.declare( + "solver", + ConfigValue(default=None, domain=str, description="Initialization solver"), + ) + CONFIG.declare( + "solver_options", + ConfigValue(default=None, description="Initialization solver options"), + ) + + def initialization_routine(self, blk): + init_log = idaeslog.getInitLogger( + blk.name, self.config.output_level, tag="properties" + ) + solve_log = idaeslog.getSolveLogger( + blk.name, self.config.output_level, tag="properties" + ) + + # Set solver + solver = get_solver(self.config.solver, self.config.solver_options) + + # --------------------------------------------------------------------- + # If present, initialize bubble and dew point calculations + for k in blk.keys(): + if hasattr(blk[k], "eq_temperature_dew"): + calculate_variable_from_constraint( + blk[k].temperature_dew, blk[k].eq_temperature_dew + ) + + if hasattr(blk[k], "eq_pressure_dew"): + calculate_variable_from_constraint( + blk[k].pressure_dew, blk[k].eq_pressure_dew + ) + + init_log.info_high( + "Initialization Step 1 - Dew and bubble points " "calculation completed." + ) + + # --------------------------------------------------------------------- + # If flash, initialize T1 and Teq + for k in blk.keys(): + if blk[k].config.has_phase_equilibrium and not blk[k].config.defined_state: + blk[k]._t1.value = max( + blk[k].temperature.value, blk[k].temperature_bubble.value + ) + blk[k]._teq.value = min(blk[k]._t1.value, blk[k].temperature_dew.value) + + init_log.info_high( + "Initialization Step 2 - Equilibrium temperature " " calculation completed." + ) + + # --------------------------------------------------------------------- + # Initialize flow rates and compositions + free_vars = 0 + for k in blk.keys(): + free_vars += number_unfixed_variables(blk[k]) + if free_vars > 0: + try: + with idaeslog.solver_log(solve_log, idaeslog.DEBUG) as slc: + res = solve_indexed_blocks(solver, [blk], tee=slc.tee) + except: + res = None + else: + res = None + + init_log.info("Initialization Complete") + + return res + + +@declare_process_block_class("HDAParameterBlock") +class HDAParameterData(PhysicalParameterBlock): + CONFIG = PhysicalParameterBlock.CONFIG() + + def build(self): + """ + Callable method for Block construction. + """ + super(HDAParameterData, self).build() + + self._state_block_class = IdealStateBlock + + self.benzene = Component() + self.toluene = Component() + self.methane = Component() + self.hydrogen = Component() + + self.Liq = LiquidPhase() + self.Vap = VaporPhase() + + # List of components in each phase (optional) + self.phase_comp = {"Liq": self.component_list, "Vap": self.component_list} + + # List of phase equilibrium index + self.phase_equilibrium_idx = Set(initialize=[1, 2, 3, 4]) + + self.phase_equilibrium_list = { + 1: ["benzene", ("Vap", "Liq")], + 2: ["toluene", ("Vap", "Liq")], + 3: ["hydrogen", ("Vap", "Liq")], + 4: ["methane", ("Vap", "Liq")], + } + + # Thermodynamic reference state + self.pressure_ref = Param( + mutable=True, default=101325, units=pyunits.Pa, doc="Reference pressure" + ) + self.temperature_ref = Param( + mutable=True, default=298.15, units=pyunits.K, doc="Reference temperature" + ) + + # Source: The Properties of Gases and Liquids (1987) + # 4th edition, Chemical Engineering Series - Robert C. Reid + pressure_crit_data = { + "benzene": 48.9e5, + "toluene": 41e5, + "hydrogen": 12.9e5, + "methane": 46e5, + } + + self.pressure_crit = Param( + self.component_list, + within=NonNegativeReals, + mutable=True, + units=pyunits.Pa, + initialize=extract_data(pressure_crit_data), + doc="Critical pressure", + ) + + # Source: The Properties of Gases and Liquids (1987) + # 4th edition, Chemical Engineering Series - Robert C. Reid + temperature_crit_data = { + "benzene": 562.2, + "toluene": 591.8, + "hydrogen": 33.0, + "methane": 190.4, + } + + self.temperature_crit = Param( + self.component_list, + within=NonNegativeReals, + mutable=True, + units=pyunits.K, + initialize=extract_data(temperature_crit_data), + doc="Critical temperature", + ) + + # Source: The Properties of Gases and Liquids (1987) + # 4th edition, Chemical Engineering Series - Robert C. Reid + mw_comp_data = { + "benzene": 78.1136e-3, + "toluene": 92.1405e-3, + "hydrogen": 2.016e-3, + "methane": 16.043e-3, + } + + self.mw_comp = Param( + self.component_list, + mutable=True, + units=pyunits.kg / pyunits.mol, + initialize=extract_data(mw_comp_data), + doc="molecular weight", + ) + + # Constants for liquid densities + # Source: Perry's Chemical Engineers Handbook + # - Robert H. Perry (Cp_liq) + dens_liq_data = { + ("benzene", "1"): 1.0162, + ("benzene", "2"): 0.2655, + ("benzene", "3"): 562.16, + ("benzene", "4"): 0.28212, + ("toluene", "1"): 0.8488, + ("toluene", "2"): 0.26655, + ("toluene", "3"): 591.8, + ("toluene", "4"): 0.2878, + ("hydrogen", "1"): 5.414, + ("hydrogen", "2"): 0.34893, + ("hydrogen", "3"): 33.19, + ("hydrogen", "4"): 0.2706, + ("methane", "1"): 2.9214, + ("methane", "2"): 0.28976, + ("methane", "3"): 190.56, + ("methane", "4"): 0.28881, + } + + self.dens_liq_param_1 = Param( + self.component_list, + mutable=True, + initialize={c: v for (c, j), v in dens_liq_data.items() if j == "1"}, + doc="Parameter 1 to compute liquid densities", + units=pyunits.kmol * pyunits.m**-3, + ) + + self.dens_liq_param_2 = Param( + self.component_list, + mutable=True, + initialize={c: v for (c, j), v in dens_liq_data.items() if j == "2"}, + doc="Parameter 2 to compute liquid densities", + units=pyunits.dimensionless, + ) + + self.dens_liq_param_3 = Param( + self.component_list, + mutable=True, + initialize={c: v for (c, j), v in dens_liq_data.items() if j == "3"}, + doc="Parameter 3 to compute liquid densities", + units=pyunits.K, + ) + + self.dens_liq_param_4 = Param( + self.component_list, + mutable=True, + initialize={c: v for (c, j), v in dens_liq_data.items() if j == "4"}, + doc="Parameter 4 to compute liquid densities", + units=pyunits.dimensionless, + ) + + # Boiling point at standard pressure + # Source: Perry's Chemical Engineers Handbook + # - Robert H. Perry (Cp_liq) + bp_data = { + ("benzene"): 353.25, + ("toluene"): 383.95, + ("hydrogen"): 20.45, + ("methane"): 111.75, + } + + self.temperature_boil = Param( + self.component_list, + mutable=True, + units=pyunits.K, + initialize=extract_data(bp_data), + doc="Pure component boiling points at standard pressure", + ) + + # Constants for specific heat capacity, enthalpy + # Sources: The Properties of Gases and Liquids (1987) + # 4th edition, Chemical Engineering Series - Robert C. Reid + # Perry's Chemical Engineers Handbook + # - Robert H. Perry (Cp_liq) + cp_ig_data = { + ("Liq", "benzene", "1"): 1.29e5, + ("Liq", "benzene", "2"): -1.7e2, + ("Liq", "benzene", "3"): 6.48e-1, + ("Liq", "benzene", "4"): 0, + ("Liq", "benzene", "5"): 0, + ("Vap", "benzene", "1"): -3.392e1, + ("Vap", "benzene", "2"): 4.739e-1, + ("Vap", "benzene", "3"): -3.017e-4, + ("Vap", "benzene", "4"): 7.130e-8, + ("Vap", "benzene", "5"): 0, + ("Liq", "toluene", "1"): 1.40e5, + ("Liq", "toluene", "2"): -1.52e2, + ("Liq", "toluene", "3"): 6.95e-1, + ("Liq", "toluene", "4"): 0, + ("Liq", "toluene", "5"): 0, + ("Vap", "toluene", "1"): -2.435e1, + ("Vap", "toluene", "2"): 5.125e-1, + ("Vap", "toluene", "3"): -2.765e-4, + ("Vap", "toluene", "4"): 4.911e-8, + ("Vap", "toluene", "5"): 0, + ("Liq", "hydrogen", "1"): 0, # 6.6653e1, + ("Liq", "hydrogen", "2"): 0, # 6.7659e3, + ("Liq", "hydrogen", "3"): 0, # -1.2363e2, + ("Liq", "hydrogen", "4"): 0, # 4.7827e2, # Eqn 2 + ("Liq", "hydrogen", "5"): 0, + ("Vap", "hydrogen", "1"): 2.714e1, + ("Vap", "hydrogen", "2"): 9.274e-3, + ("Vap", "hydrogen", "3"): -1.381e-5, + ("Vap", "hydrogen", "4"): 7.645e-9, + ("Vap", "hydrogen", "5"): 0, + ("Liq", "methane", "1"): 0, # 6.5708e1, + ("Liq", "methane", "2"): 0, # 3.8883e4, + ("Liq", "methane", "3"): 0, # -2.5795e2, + ("Liq", "methane", "4"): 0, # 6.1407e2, # Eqn 2 + ("Liq", "methane", "5"): 0, + ("Vap", "methane", "1"): 1.925e1, + ("Vap", "methane", "2"): 5.213e-2, + ("Vap", "methane", "3"): 1.197e-5, + ("Vap", "methane", "4"): -1.132e-8, + ("Vap", "methane", "5"): 0, + } + + self.cp_ig_1 = Param( + self.phase_list, + self.component_list, + mutable=True, + initialize={(p, c): v for (p, c, j), v in cp_ig_data.items() if j == "1"}, + doc="Parameter 1 to compute Cp_comp", + units=pyunits.J / pyunits.mol / pyunits.K, + ) + + self.cp_ig_2 = Param( + self.phase_list, + self.component_list, + mutable=True, + initialize={(p, c): v for (p, c, j), v in cp_ig_data.items() if j == "2"}, + doc="Parameter 2 to compute Cp_comp", + units=pyunits.J / pyunits.mol / pyunits.K**2, + ) + + self.cp_ig_3 = Param( + self.phase_list, + self.component_list, + mutable=True, + initialize={(p, c): v for (p, c, j), v in cp_ig_data.items() if j == "3"}, + doc="Parameter 3 to compute Cp_comp", + units=pyunits.J / pyunits.mol / pyunits.K**3, + ) + + self.cp_ig_4 = Param( + self.phase_list, + self.component_list, + mutable=True, + initialize={(p, c): v for (p, c, j), v in cp_ig_data.items() if j == "4"}, + doc="Parameter 4 to compute Cp_comp", + units=pyunits.J / pyunits.mol / pyunits.K**4, + ) + + self.cp_ig_5 = Param( + self.phase_list, + self.component_list, + mutable=True, + initialize={(p, c): v for (p, c, j), v in cp_ig_data.items() if j == "5"}, + doc="Parameter 5 to compute Cp_comp", + units=pyunits.J / pyunits.mol / pyunits.K**5, + ) + + # Source: The Properties of Gases and Liquids (1987) + # 4th edition, Chemical Engineering Series - Robert C. Reid + # fitted to Antoine form + # H2, Methane from NIST webbook + pressure_sat_coeff_data = { + ("benzene", "A"): 4.202, + ("benzene", "B"): 1322, + ("benzene", "C"): -38.56, + ("toluene", "A"): 4.216, + ("toluene", "B"): 1435, + ("toluene", "C"): -43.33, + ("hydrogen", "A"): 3.543, + ("hydrogen", "B"): 99.40, + ("hydrogen", "C"): 7.726, + ("methane", "A"): 3.990, + ("methane", "B"): 443.0, + ("methane", "C"): -0.49, + } + + self.pressure_sat_coeff_A = Param( + self.component_list, + mutable=True, + initialize={ + c: v for (c, j), v in pressure_sat_coeff_data.items() if j == "A" + }, + doc="Parameter A to compute saturated pressure", + units=pyunits.dimensionless, + ) + + self.pressure_sat_coeff_B = Param( + self.component_list, + mutable=True, + initialize={ + c: v for (c, j), v in pressure_sat_coeff_data.items() if j == "B" + }, + doc="Parameter B to compute saturated pressure", + units=pyunits.K, + ) + + self.pressure_sat_coeff_C = Param( + self.component_list, + mutable=True, + initialize={ + c: v for (c, j), v in pressure_sat_coeff_data.items() if j == "C" + }, + doc="Parameter C to compute saturated pressure", + units=pyunits.K, + ) + + # Source: The Properties of Gases and Liquids (1987) + # 4th edition, Chemical Engineering Series - Robert C. Reid + dh_vap = {"benzene": 3.387e4, "toluene": 3.8262e4, "hydrogen": 0, "methane": 0} + + self.dh_vap = Param( + self.component_list, + mutable=True, + units=pyunits.J / pyunits.mol, + initialize=extract_data(dh_vap), + doc="heat of vaporization", + ) + + # Set default scaling factors + self.set_default_scaling("flow_mol", 1e3) + self.set_default_scaling("flow_mol_phase_comp", 1e3) + self.set_default_scaling("flow_mol_phase", 1e3) + self.set_default_scaling("material_flow_terms", 1e3) + self.set_default_scaling("enthalpy_flow_terms", 1e-2) + self.set_default_scaling("mole_frac_comp", 1e1) + self.set_default_scaling("temperature", 1e-2) + self.set_default_scaling("temperature_dew", 1e-2) + self.set_default_scaling("temperature_bubble", 1e-2) + self.set_default_scaling("pressure", 1e-5) + self.set_default_scaling("pressure_sat", 1e-5) + self.set_default_scaling("pressure_dew", 1e-5) + self.set_default_scaling("pressure_bubble", 1e-5) + self.set_default_scaling("mole_frac_phase_comp", 1e1) + self.set_default_scaling("enth_mol_phase", 1e-3, index="Liq") + self.set_default_scaling("enth_mol_phase", 1e-4, index="Vap") + self.set_default_scaling("enth_mol", 1e-3) + self.set_default_scaling("entr_mol_phase", 1e-2) + self.set_default_scaling("entr_mol", 1e-2) + + @classmethod + def define_metadata(cls, obj): + """Define properties supported and units.""" + obj.add_properties( + { + "flow_mol": {"method": None}, + "flow_mol_phase_comp": {"method": None}, + "mole_frac_comp": {"method": None}, + "temperature": {"method": None}, + "pressure": {"method": None}, + "flow_mol_phase": {"method": None}, + "dens_mol_phase": {"method": "_dens_mol_phase"}, + "pressure_sat": {"method": "_pressure_sat"}, + "mole_frac_phase_comp": {"method": "_mole_frac_phase"}, + "energy_internal_mol_phase_comp": { + "method": "_energy_internal_mol_phase_comp" + }, + "energy_internal_mol_phase": {"method": "_energy_internal_mol_phase"}, + "enth_mol_phase_comp": {"method": "_enth_mol_phase_comp"}, + "enth_mol_phase": {"method": "_enth_mol_phase"}, + "entr_mol_phase_comp": {"method": "_entr_mol_phase_comp"}, + "entr_mol_phase": {"method": "_entr_mol_phase"}, + "temperature_bubble": {"method": "_temperature_bubble"}, + "temperature_dew": {"method": "_temperature_dew"}, + "pressure_bubble": {"method": "_pressure_bubble"}, + "pressure_dew": {"method": "_pressure_dew"}, + "fug_phase_comp": {"method": "_fug_phase_comp"}, + } + ) + + obj.define_custom_properties( + { + # Enthalpy of vaporization + "dh_vap": {"method": "_dh_vap", "units": obj.derived_units.ENERGY_MOLE}, + # Entropy of vaporization + "ds_vap": { + "method": "_ds_vap", + "units": obj.derived_units.ENTROPY_MOLE, + }, + } + ) + + obj.add_default_units( + { + "time": pyunits.s, + "length": pyunits.m, + "mass": pyunits.kg, + "amount": pyunits.mol, + "temperature": pyunits.K, + } + ) + + +class _IdealStateBlock(StateBlock): + """ + This Class contains methods which should be applied to Property Blocks as a + whole, rather than individual elements of indexed Property Blocks. + """ + + default_initializer = HDAInitializer + + def fix_initialization_states(blk): + """ + Fixes state variables for state blocks. + + Returns: + None + """ + + # Fix state variables + fix_state_vars(blk) + + # Also need to deactivate sum of mole fraction constraint + for k in blk.values(): + if not k.config.defined_state and k.config.has_phase_equilibrium: + k.equilibrium_constraint.deactivate() + + +@declare_process_block_class("IdealStateBlock", block_class=_IdealStateBlock) +class IdealStateBlockData(StateBlockData): + """An example property package for ideal VLE.""" + + def build(self): + """Callable method for Block construction.""" + super().build() + + # Add state variables + self.flow_mol_phase_comp = Var( + self._params.phase_list, + self._params.component_list, + initialize=0.5, + units=pyunits.mol / pyunits.s, + bounds=(1e-12, 100), + doc="Phase-component molar flow rates", + ) + + self.pressure = Var( + initialize=101325, + bounds=(100000, 1000000), + units=pyunits.Pa, + domain=NonNegativeReals, + doc="State pressure", + ) + self.temperature = Var( + initialize=298.15, + units=pyunits.K, + bounds=(298, 1000), + domain=NonNegativeReals, + doc="State temperature", + ) + + # Add supporting variables + def flow_mol_phase(b, p): + return sum(b.flow_mol_phase_comp[p, j] for j in b._params.component_list) + + self.flow_mol_phase = Expression( + self._params.phase_list, rule=flow_mol_phase, doc="Phase molar flow rates" + ) + + def flow_mol(b): + return sum( + b.flow_mol_phase_comp[p, j] + for j in b._params.component_list + for p in b._params.phase_list + ) + + self.flow_mol = Expression(rule=flow_mol, doc="Total molar flowrate") + + def mole_frac_phase_comp(b, p, j): + return b.flow_mol_phase_comp[p, j] / b.flow_mol_phase[p] + + self.mole_frac_phase_comp = Expression( + self._params.phase_list, + self._params.component_list, + rule=mole_frac_phase_comp, + doc="Phase mole fractions", + ) + + def mole_frac_comp(b, j): + return ( + sum(b.flow_mol_phase_comp[p, j] for p in b._params.phase_list) + / b.flow_mol + ) + + self.mole_frac_comp = Expression( + self._params.component_list, + rule=mole_frac_comp, + doc="Mixture mole fractions", + ) + + # Reaction Stoichiometry + add_object_reference( + self, "phase_equilibrium_list_ref", self._params.phase_equilibrium_list + ) + + if self.config.has_phase_equilibrium and self.config.defined_state is False: + # Definition of equilibrium temperature for smooth VLE + self._teq = Var( + initialize=self.temperature.value, + units=pyunits.K, + doc="Temperature for calculating phase equilibrium", + ) + self._t1 = Var( + initialize=self.temperature.value, + units=pyunits.K, + doc="Intermediate temperature for calculating Teq", + ) + + self.eps_1 = Param( + default=0.01, + units=pyunits.K, + mutable=True, + doc="Smoothing parameter for Teq", + ) + self.eps_2 = Param( + default=0.0005, + units=pyunits.K, + mutable=True, + doc="Smoothing parameter for Teq", + ) + + # PSE paper Eqn 13 + def rule_t1(b): + return b._t1 == 0.5 * ( + b.temperature + + b.temperature_bubble + + sqrt((b.temperature - b.temperature_bubble) ** 2 + b.eps_1**2) + ) + + self._t1_constraint = Constraint(rule=rule_t1) + + # PSE paper Eqn 14 + # TODO : Add option for supercritical extension + def rule_teq(b): + return b._teq == 0.5 * ( + b._t1 + + b.temperature_dew + - sqrt((b._t1 - b.temperature_dew) ** 2 + b.eps_2**2) + ) + + self._teq_constraint = Constraint(rule=rule_teq) + + def rule_tr_eq(b, i): + return b._teq / b._params.temperature_crit[i] + + self._tr_eq = Expression( + self._params.component_list, + rule=rule_tr_eq, + doc="Component reduced temperatures", + ) + + def rule_equilibrium(b, i): + return b.fug_phase_comp["Liq", i] == b.fug_phase_comp["Vap", i] + + self.equilibrium_constraint = Constraint( + self._params.component_list, rule=rule_equilibrium + ) + + # ----------------------------------------------------------------------------- + # Property Methods + def _dens_mol_phase(self): + self.dens_mol_phase = Var( + self._params.phase_list, + initialize=1.0, + units=pyunits.mol * pyunits.m**-3, + doc="Molar density", + ) + + def rule_dens_mol_phase(b, p): + if p == "Vap": + return b._dens_mol_vap() + else: + return b._dens_mol_liq() + + self.eq_dens_mol_phase = Constraint( + self._params.phase_list, rule=rule_dens_mol_phase + ) + + def _energy_internal_mol_phase_comp(self): + self.energy_internal_mol_phase_comp = Var( + self._params.phase_list, + self._params.component_list, + units=pyunits.J / pyunits.mol, + doc="Phase-component molar specific internal energies", + ) + + def rule_energy_internal_mol_phase_comp(b, p, j): + if p == "Vap": + return b.energy_internal_mol_phase_comp[p, j] == b.enth_mol_phase_comp[ + p, j + ] - const.gas_constant * (b.temperature - b._params.temperature_ref) + else: + return ( + b.energy_internal_mol_phase_comp[p, j] + == b.enth_mol_phase_comp[p, j] + ) + + self.eq_energy_internal_mol_phase_comp = Constraint( + self._params.phase_list, + self._params.component_list, + rule=rule_energy_internal_mol_phase_comp, + ) + + def _energy_internal_mol_phase(self): + self.energy_internal_mol_phase = Var( + self._params.phase_list, + units=pyunits.J / pyunits.mol, + doc="Phase molar specific internal energies", + ) + + def rule_energy_internal_mol_phase(b, p): + return b.energy_internal_mol_phase[p] == sum( + b.energy_internal_mol_phase_comp[p, i] * b.mole_frac_phase_comp[p, i] + for i in b._params.component_list + ) + + self.eq_energy_internal_mol_phase = Constraint( + self._params.phase_list, rule=rule_energy_internal_mol_phase + ) + + def _enth_mol_phase_comp(self): + self.enth_mol_phase_comp = Var( + self._params.phase_list, + self._params.component_list, + initialize=7e5, + units=pyunits.J / pyunits.mol, + doc="Phase-component molar specific enthalpies", + ) + + def rule_enth_mol_phase_comp(b, p, j): + if p == "Vap": + return b._enth_mol_comp_vap(j) + else: + return b._enth_mol_comp_liq(j) + + self.eq_enth_mol_phase_comp = Constraint( + self._params.phase_list, + self._params.component_list, + rule=rule_enth_mol_phase_comp, + ) + + def _enth_mol_phase(self): + self.enth_mol_phase = Var( + self._params.phase_list, + initialize=7e5, + units=pyunits.J / pyunits.mol, + doc="Phase molar specific enthalpies", + ) + + def rule_enth_mol_phase(b, p): + return b.enth_mol_phase[p] == sum( + b.enth_mol_phase_comp[p, i] * b.mole_frac_phase_comp[p, i] + for i in b._params.component_list + ) + + self.eq_enth_mol_phase = Constraint( + self._params.phase_list, rule=rule_enth_mol_phase + ) + + def _entr_mol_phase_comp(self): + self.entr_mol_phase_comp = Var( + self._params.phase_list, + self._params.component_list, + units=pyunits.J / pyunits.mol / pyunits.K, + doc="Phase-component molar specific entropies", + ) + + def rule_entr_mol_phase_comp(b, p, j): + if p == "Vap": + return b._entr_mol_comp_vap(j) + else: + return b._entr_mol_comp_liq(j) + + self.eq_entr_mol_phase_comp = Constraint( + self._params.phase_list, + self._params.component_list, + rule=rule_entr_mol_phase_comp, + ) + + def _entr_mol_phase(self): + self.entr_mol_phase = Var( + self._params.phase_list, + units=pyunits.J / pyunits.mol / pyunits.K, + doc="Phase molar specific enthropies", + ) + + def rule_entr_mol_phase(b, p): + return b.entr_mol_phase[p] == sum( + b.entr_mol_phase_comp[p, i] * b.mole_frac_phase_comp[p, i] + for i in b._params.component_list + ) + + self.eq_entr_mol_phase = Constraint( + self._params.phase_list, rule=rule_entr_mol_phase + ) + + # ----------------------------------------------------------------------------- + # General Methods + def get_material_flow_terms(self, p, j): + """Create material flow terms for control volume.""" + if not self.is_property_constructed("material_flow_terms"): + try: + + def rule_material_flow_terms(blk, p, j): + return blk.flow_mol_phase_comp[p, j] + + self.material_flow_terms = Expression( + self.params.phase_list, + self.params.component_list, + rule=rule_material_flow_terms, + ) + except AttributeError: + self.del_component(self.material_flow_terms) + + if j in self.params.component_list: + return self.material_flow_terms[p, j] + else: + return 0 + + def get_enthalpy_flow_terms(self, p): + """Create enthalpy flow terms.""" + if not self.is_property_constructed("enthalpy_flow_terms"): + try: + + def rule_enthalpy_flow_terms(blk, p): + return blk.flow_mol_phase[p] * blk.enth_mol_phase[p] + + self.enthalpy_flow_terms = Expression( + self.params.phase_list, rule=rule_enthalpy_flow_terms + ) + except AttributeError: + self.del_component(self.enthalpy_flow_terms) + return self.enthalpy_flow_terms[p] + + def get_material_density_terms(self, p, j): + """Create material density terms.""" + if not self.is_property_constructed("material_density_terms"): + try: + + def rule_material_density_terms(b, p, j): + return self.dens_mol_phase[p] * self.mole_frac_phase_comp[p, j] + + self.material_density_terms = Expression( + self.params.phase_list, + self.params.component_list, + rule=rule_material_density_terms, + ) + except AttributeError: + self.del_component(self.material_density_terms) + + if j in self.params.component_list: + return self.material_density_terms[p, j] + else: + return 0 + + def get_enthalpy_density_terms(self, p): + """Create energy density terms.""" + if not self.is_property_constructed("enthalpy_density_terms"): + try: + + def rule_energy_density_terms(b, p): + return self.dens_mol_phase[p] * self.energy_internal_mol_phase[p] + + self.energy_density_terms = Expression( + self.params.phase_list, rule=rule_energy_density_terms + ) + except AttributeError: + self.del_component(self.energy_density_terms) + return self.enthalpy_density_terms[p] + + def default_material_balance_type(self): + return MaterialBalanceType.componentPhase + + def default_energy_balance_type(self): + return EnergyBalanceType.enthalpyTotal + + def get_material_flow_basis(b): + return MaterialFlowBasis.molar + + def define_state_vars(self): + """Define state vars.""" + return { + "flow_mol_phase_comp": self.flow_mol_phase_comp, + "temperature": self.temperature, + "pressure": self.pressure, + } + + # Property package utility functions + def calculate_bubble_point_temperature(self, clear_components=True): + """ "To compute the bubble point temperature of the mixture.""" + + if hasattr(self, "eq_temperature_bubble"): + # Do not delete components if the block already has the components + clear_components = False + + calculate_variable_from_constraint( + self.temperature_bubble, self.eq_temperature_bubble + ) + + return self.temperature_bubble.value + + if clear_components is True: + self.del_component(self.eq_temperature_bubble) + self.del_component(self._p_sat_bubbleT) + self.del_component(self.temperature_bubble) + + def calculate_dew_point_temperature(self, clear_components=True): + """ "To compute the dew point temperature of the mixture.""" + + if hasattr(self, "eq_temperature_dew"): + # Do not delete components if the block already has the components + clear_components = False + + calculate_variable_from_constraint( + self.temperature_dew, self.eq_temperature_dew + ) + + return self.temperature_dew.value + + # Delete the var/constraint created in this method that are part of the + # IdealStateBlock if the user desires + if clear_components is True: + self.del_component(self.eq_temperature_dew) + self.del_component(self._p_sat_dewT) + self.del_component(self.temperature_dew) + + def calculate_bubble_point_pressure(self, clear_components=True): + """ "To compute the bubble point pressure of the mixture.""" + + if hasattr(self, "eq_pressure_bubble"): + # Do not delete components if the block already has the components + clear_components = False + + calculate_variable_from_constraint( + self.pressure_bubble, self.eq_pressure_bubble + ) + + return self.pressure_bubble.value + + # Delete the var/constraint created in this method that are part of the + # IdealStateBlock if the user desires + if clear_components is True: + self.del_component(self.eq_pressure_bubble) + self.del_component(self._p_sat_bubbleP) + self.del_component(self.pressure_bubble) + + def calculate_dew_point_pressure(self, clear_components=True): + """ "To compute the dew point pressure of the mixture.""" + + if hasattr(self, "eq_pressure_dew"): + # Do not delete components if the block already has the components + clear_components = False + + calculate_variable_from_constraint(self.pressure_dew, self.eq_pressure_dew) + + return self.pressure_dew.value + + # Delete the var/constraint created in this method that are part of the + # IdealStateBlock if the user desires + if clear_components is True: + self.del_component(self.eq_pressure_dew) + self.del_component(self._p_sat_dewP) + self.del_component(self.pressure_dew) + + # ----------------------------------------------------------------------------- + # Bubble and Dew Points + # Ideal-Ideal properties allow for the simplifications below + # Other methods require more complex equations with shadow compositions + + # For future work, propose the following: + # Core class writes a set of constraints Phi_L_i == Phi_V_i + # Phi_L_i and Phi_V_i make calls to submethods which add shadow compositions + # as needed + def _temperature_bubble(self): + self.temperature_bubble = Param( + initialize=33.0, units=pyunits.K, doc="Bubble point temperature" + ) + + def _temperature_dew(self): + + self.temperature_dew = Var( + initialize=298.15, units=pyunits.K, doc="Dew point temperature" + ) + + def rule_psat_dew(b, j): + return ( + 1e5 + * pyunits.Pa + * 10 + ** ( + b._params.pressure_sat_coeff_A[j] + - b._params.pressure_sat_coeff_B[j] + / (b.temperature_dew + b._params.pressure_sat_coeff_C[j]) + ) + ) + + try: + # Try to build expression + self._p_sat_dewT = Expression( + self._params.component_list, rule=rule_psat_dew + ) + + def rule_temp_dew(b): + return ( + b.pressure + * sum( + b.mole_frac_comp[i] / b._p_sat_dewT[i] + for i in ["toluene", "benzene"] + ) + - 1 + == 0 + ) + + self.eq_temperature_dew = Constraint(rule=rule_temp_dew) + except AttributeError: + # If expression fails, clean up so that DAE can try again later + # Deleting only var/expression as expression construction will fail + # first; if it passes then constraint construction will not fail. + self.del_component(self.temperature_dew) + self.del_component(self._p_sat_dewT) + + def _pressure_bubble(self): + self.pressure_bubble = Param( + initialize=1e8, units=pyunits.Pa, doc="Bubble point pressure" + ) + + def _pressure_dew(self): + self.pressure_dew = Var( + initialize=298.15, units=pyunits.Pa, doc="Dew point pressure" + ) + + def rule_psat_dew(b, j): + return ( + 1e5 + * pyunits.Pa + * 10 + ** ( + b._params.pressure_sat_coeff_A[j] + - b._params.pressure_sat_coeff_B[j] + / (b.temperature + b._params.pressure_sat_coeff_C[j]) + ) + ) + + try: + # Try to build expression + self._p_sat_dewP = Expression( + self._params.component_list, rule=rule_psat_dew + ) + + def rule_pressure_dew(b): + return ( + b.pressure_dew + * sum( + b.mole_frac_comp[i] / b._p_sat_dewP[i] + for i in ["toluene", "benzene"] + ) + - 1 + == 0 + ) + + self.eq_pressure_dew = Constraint(rule=rule_pressure_dew) + except AttributeError: + # If expression fails, clean up so that DAE can try again later + # Deleting only var/expression as expression construction will fail + # first; if it passes then constraint construction will not fail. + self.del_component(self.pressure_dew) + self.del_component(self._p_sat_dewP) + + # ----------------------------------------------------------------------------- + # Liquid phase properties + def _dens_mol_liq(b): + return b.dens_mol_phase["Liq"] == 1e3 * sum( + b.mole_frac_phase_comp["Liq", j] + * b._params.dens_liq_param_1[j] + / b._params.dens_liq_param_2[j] + ** ( + 1 + + (1 - b.temperature / b._params.dens_liq_param_3[j]) + ** b._params.dens_liq_param_4[j] + ) + for j in ["benzene", "toluene"] + ) + + def _fug_phase_comp(self): + def fug_phase_comp_rule(b, p, i): + if p == "Liq": + if i in ["hydrogen", "methane"]: + return b.mole_frac_phase_comp["Liq", i] + else: + return b.pressure_sat[i] * b.mole_frac_phase_comp["Liq", i] + else: + if i in ["hydrogen", "methane"]: + return 1e-6 + else: + return b.mole_frac_phase_comp["Vap", i] * b.pressure + + self.fug_phase_comp = Expression( + self._params.phase_list, + self._params.component_list, + rule=fug_phase_comp_rule, + ) + + def _pressure_sat(self): + self.pressure_sat = Var( + self._params.component_list, + initialize=101325, + units=pyunits.Pa, + doc="Vapor pressure", + ) + + def rule_P_sat(b, j): + return ( + ( + log10(b.pressure_sat[j] / pyunits.Pa * 1e-5) + - b._params.pressure_sat_coeff_A[j] + ) + * (b._teq + b._params.pressure_sat_coeff_C[j]) + ) == -b._params.pressure_sat_coeff_B[j] + + self.eq_pressure_sat = Constraint(self._params.component_list, rule=rule_P_sat) + + def _enth_mol_comp_liq(b, j): + return b.enth_mol_phase_comp["Liq", j] * 1e3 == ( + (b._params.cp_ig_5["Liq", j] / 5) + * (b.temperature**5 - b._params.temperature_ref**5) + + (b._params.cp_ig_4["Liq", j] / 4) + * (b.temperature**4 - b._params.temperature_ref**4) + + (b._params.cp_ig_3["Liq", j] / 3) + * (b.temperature**3 - b._params.temperature_ref**3) + + (b._params.cp_ig_2["Liq", j] / 2) + * (b.temperature**2 - b._params.temperature_ref**2) + + b._params.cp_ig_1["Liq", j] * (b.temperature - b._params.temperature_ref) + ) + + def _entr_mol_comp_liq(b, j): + return b.entr_mol_phase_comp["Liq", j] * 1e3 == ( + ( + (b._params.cp_ig_5["Liq", j] / 4) + * (b.temperature**4 - b._params.temperature_ref**4) + + (b._params.cp_ig_4["Liq", j] / 3) + * (b.temperature**3 - b._params.temperature_ref**3) + + (b._params.cp_ig_3["Liq", j] / 2) + * (b.temperature**2 - b._params.temperature_ref**2) + + b._params.cp_ig_2["Liq", j] + * (b.temperature - b._params.temperature_ref) + + b._params.cp_ig_1["Liq", j] + * log(b.temperature / b._params.temperature_ref) + ) + - const.gas_constant + * log( + b.mole_frac_phase_comp["Liq", j] * b.pressure / b._params.pressure_ref + ) + ) + + # ----------------------------------------------------------------------------- + # Vapour phase properties + def _dens_mol_vap(b): + return b.pressure == ( + b.dens_mol_phase["Vap"] * const.gas_constant * b.temperature + ) + + def _dh_vap(self): + # heat of vaporization + add_object_reference(self, "dh_vap", self._params.dh_vap) + + def _ds_vap(self): + # entropy of vaporization = dh_Vap/T_boil + # TODO : something more rigorous would be nice + self.ds_vap = Var( + self._params.component_list, + initialize=86, + units=pyunits.J / pyunits.mol / pyunits.K, + doc="Entropy of vaporization", + ) + + def rule_ds_vap(b, j): + return b.dh_vap[j] == (b.ds_vap[j] * b._params.temperature_boil[j]) + + self.eq_ds_vap = Constraint(self._params.component_list, rule=rule_ds_vap) + + def _enth_mol_comp_vap(b, j): + return b.enth_mol_phase_comp["Vap", j] == b.dh_vap[j] + ( + (b._params.cp_ig_5["Vap", j] / 5) + * (b.temperature**5 - b._params.temperature_ref**5) + + (b._params.cp_ig_4["Vap", j] / 4) + * (b.temperature**4 - b._params.temperature_ref**4) + + (b._params.cp_ig_3["Vap", j] / 3) + * (b.temperature**3 - b._params.temperature_ref**3) + + (b._params.cp_ig_2["Vap", j] / 2) + * (b.temperature**2 - b._params.temperature_ref**2) + + b._params.cp_ig_1["Vap", j] * (b.temperature - b._params.temperature_ref) + ) + + def _entr_mol_comp_vap(b, j): + return b.entr_mol_phase_comp["Vap", j] == ( + b.ds_vap[j] + + ( + (b._params.cp_ig_5["Vap", j] / 4) + * (b.temperature**4 - b._params.temperature_ref**4) + + (b._params.cp_ig_4["Vap", j] / 3) + * (b.temperature**3 - b._params.temperature_ref**3) + + (b._params.cp_ig_3["Vap", j] / 2) + * (b.temperature**2 - b._params.temperature_ref**2) + + b._params.cp_ig_2["Vap", j] + * (b.temperature - b._params.temperature_ref) + + b._params.cp_ig_1["Vap", j] + * log(b.temperature / b._params.temperature_ref) + ) + - const.gas_constant + * log( + b.mole_frac_phase_comp["Vap", j] * b.pressure / b._params.pressure_ref + ) + ) + + def calculate_scaling_factors(self): + # Get default scale factors + super().calculate_scaling_factors() + + is_two_phase = len(self._params.phase_list) == 2 + sf_flow = iscale.get_scaling_factor(self.flow_mol, default=1, warning=True) + sf_T = iscale.get_scaling_factor(self.temperature, default=1, warning=True) + sf_P = iscale.get_scaling_factor(self.pressure, default=1, warning=True) + + if self.is_property_constructed("_teq"): + iscale.set_scaling_factor(self._teq, sf_T) + if self.is_property_constructed("_teq_constraint"): + iscale.constraint_scaling_transform( + self._teq_constraint, sf_T, overwrite=False + ) + + if self.is_property_constructed("_t1"): + iscale.set_scaling_factor(self._t1, sf_T) + if self.is_property_constructed("_t1_constraint"): + iscale.constraint_scaling_transform( + self._t1_constraint, sf_T, overwrite=False + ) + + if self.is_property_constructed("_mole_frac_pdew"): + iscale.set_scaling_factor(self._mole_frac_pdew, 1e3) + iscale.constraint_scaling_transform( + self._sum_mole_frac_pdew, 1e3, overwrite=False + ) + + if self.is_property_constructed("total_flow_balance"): + iscale.constraint_scaling_transform( + self.total_flow_balance, sf_flow, overwrite=False + ) + + if self.is_property_constructed("component_flow_balances"): + for i, c in self.component_flow_balances.items(): + if is_two_phase: + s = iscale.get_scaling_factor( + self.mole_frac_comp[i], default=1, warning=True + ) + s *= sf_flow + iscale.constraint_scaling_transform(c, s, overwrite=False) + else: + s = iscale.get_scaling_factor( + self.mole_frac_comp[i], default=1, warning=True + ) + iscale.constraint_scaling_transform(c, s, overwrite=False) + + if self.is_property_constructed("dens_mol_phase"): + for c in self.eq_dens_mol_phase.values(): + iscale.constraint_scaling_transform(c, sf_P, overwrite=False) + + if self.is_property_constructed("dens_mass_phase"): + for p, c in self.eq_dens_mass_phase.items(): + sf = iscale.get_scaling_factor( + self.dens_mass_phase[p], default=1, warning=True + ) + iscale.constraint_scaling_transform(c, sf, overwrite=False) + + if self.is_property_constructed("enth_mol_phase"): + for p, c in self.eq_enth_mol_phase.items(): + sf = iscale.get_scaling_factor( + self.enth_mol_phase[p], default=1, warning=True + ) + iscale.constraint_scaling_transform(c, sf, overwrite=False) + + if self.is_property_constructed("enth_mol"): + sf = iscale.get_scaling_factor(self.enth_mol, default=1, warning=True) + sf *= sf_flow + iscale.constraint_scaling_transform(self.eq_enth_mol, sf, overwrite=False) + + if self.is_property_constructed("entr_mol_phase"): + for p, c in self.eq_entr_mol_phase.items(): + sf = iscale.get_scaling_factor( + self.entr_mol_phase[p], default=1, warning=True + ) + iscale.constraint_scaling_transform(c, sf, overwrite=False) + + if self.is_property_constructed("entr_mol"): + sf = iscale.get_scaling_factor(self.entr_mol, default=1, warning=True) + sf *= sf_flow + iscale.constraint_scaling_transform(self.eq_entr_mol, sf, overwrite=False) + + if self.is_property_constructed("gibbs_mol_phase"): + for p, c in self.eq_gibbs_mol_phase.items(): + sf = iscale.get_scaling_factor( + self.gibbs_mol_phase[p], default=1, warning=True + ) + iscale.constraint_scaling_transform(c, sf, overwrite=False) diff --git a/docs/examples/structfs/hda_reaction.py b/docs/examples/structfs/hda_reaction.py new file mode 100644 index 0000000000..bade0736f7 --- /dev/null +++ b/docs/examples/structfs/hda_reaction.py @@ -0,0 +1,182 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2022 +# by the software owners: The Regents of the University of California, through +# Lawrence Berkeley National Laboratory, National Technology & Engineering +# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University +# Research Corporation, et al. All rights reserved. +# +# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and +# license information. +################################################################################# +""" +Property package for the hydrodealkylation of toluene to form benzene +""" + +# Import Python libraries +import logging + +# Import Pyomo libraries +from pyomo.environ import Constraint, exp, Set, Var, Param, units as pyunits + +# Import IDAES cores +from idaes.core import ( + declare_process_block_class, + MaterialFlowBasis, + ReactionParameterBlock, + ReactionBlockDataBase, + ReactionBlockBase, +) +from idaes.core.util.constants import Constants as const +from idaes.core.util.misc import add_object_reference + +# Set up logger +_log = logging.getLogger(__name__) + + +@declare_process_block_class("HDAReactionParameterBlock") +class HDAReactionParameterData(ReactionParameterBlock): + """ + Property Parameter Block Class + Contains parameters and indexing sets associated with properties for + superheated steam. + """ + + def build(self): + """ + Callable method for Block construction. + """ + super(HDAReactionParameterData, self).build() + + self._reaction_block_class = HDAReactionBlock + + # List of valid phases in property package + self.phase_list = Set(initialize=["Vap"]) + + # Component list - a list of component identifiers + self.component_list = Set( + initialize=["benzene", "toluene", "hydrogen", "methane"] + ) + + # Reaction Index + self.rate_reaction_idx = Set(initialize=["R1"]) + + # Reaction Stoichiometry + self.rate_reaction_stoichiometry = { + ("R1", "Vap", "benzene"): 1, + ("R1", "Vap", "toluene"): -1, + ("R1", "Vap", "hydrogen"): -1, + ("R1", "Vap", "methane"): 1, + ("R1", "Liq", "benzene"): 0, + ("R1", "Liq", "toluene"): 0, + ("R1", "Liq", "hydrogen"): 0, + ("R1", "Liq", "methane"): 0, + } + + # Arrhenius Constant + self.arrhenius = Var( + initialize=6.3e10, + units=pyunits.mol * pyunits.m**-3 * pyunits.s**-1 * pyunits.Pa**-1, + doc="Arrhenius pre-exponential factor", + ) + self.arrhenius.fix() + + # Activation Energy + self.energy_activation = Var( + initialize=217.6e3, units=pyunits.J / pyunits.mol, doc="Activation energy" + ) + self.energy_activation.fix() + + # Heat of Reaction + dh_rxn_dict = {"R1": -1.08e5} + self.dh_rxn = Param( + self.rate_reaction_idx, + initialize=dh_rxn_dict, + units=pyunits.J / pyunits.mol, + doc="Heat of reaction", + ) + + @classmethod + def define_metadata(cls, obj): + obj.add_properties( + { + "k_rxn": {"method": None, "units": "m^3/mol.s"}, + "reaction_rate": {"method": None, "units": "mol/m^3.s"}, + } + ) + obj.add_default_units( + { + "time": pyunits.s, + "length": pyunits.m, + "mass": pyunits.kg, + "amount": pyunits.mol, + "temperature": pyunits.K, + } + ) + + +class ReactionBlock(ReactionBlockBase): + """ + This Class contains methods which should be applied to Reaction Blocks as a + whole, rather than individual elements of indexed Reaction Blocks. + """ + + def initialize(blk, outlvl=0, **kwargs): + """ + Initialization routine for reaction package. + Keyword Arguments: + outlvl : sets output level of initialization routine + * 0 = no output (default) + * 1 = report after each step + Returns: + None + """ + if outlvl > 0: + _log.info("{} Initialization Complete.".format(blk.name)) + + +@declare_process_block_class("HDAReactionBlock", block_class=ReactionBlock) +class HDAReactionBlockData(ReactionBlockDataBase): + """ + An example reaction package for saponification of ethyl acetate + """ + + def build(self): + """ + Callable method for Block construction + """ + super(HDAReactionBlockData, self).build() + + # Heat of reaction - no _ref as this is the actual property + add_object_reference(self, "dh_rxn", self.config.parameters.dh_rxn) + + self.k_rxn = Var( + initialize=0.2, + units=pyunits.mol * pyunits.m**-3 * pyunits.s**-1 * pyunits.Pa**-1, + ) + + self.reaction_rate = Var( + self.params.rate_reaction_idx, + initialize=1, + units=pyunits.mol / pyunits.m**3 / pyunits.s, + ) + + self.arrhenus_equation = Constraint( + expr=self.k_rxn + == self.params.arrhenius + * exp( + -self.params.energy_activation + / (const.gas_constant * self.state_ref.temperature) + ) + ) + + self.rate_expression = Constraint( + expr=self.reaction_rate["R1"] + == self.k_rxn + * self.state_ref.pressure + * self.state_ref.mole_frac_phase_comp["Vap", "toluene"] + ) + + def get_reaction_rate_basis(b): + return MaterialFlowBasis.molar diff --git a/docs/examples/structfs/index.rst b/docs/examples/structfs/index.rst new file mode 100644 index 0000000000..ed33735a47 --- /dev/null +++ b/docs/examples/structfs/index.rst @@ -0,0 +1,8 @@ +Structured Flowsheet Examples +============================= + +.. toctree:: + :maxdepth: 1 + + flash_flowsheet_nb + hda_flowsheet_nb \ No newline at end of file diff --git a/docs/how_to_guides/index.rst b/docs/how_to_guides/index.rst index e2bd73ea7b..552e6b1f94 100644 --- a/docs/how_to_guides/index.rst +++ b/docs/how_to_guides/index.rst @@ -7,4 +7,5 @@ How-To-Guides custom_models/general_model_development workflow/index opt_dependencies - versioned_idaes_install \ No newline at end of file + versioned_idaes_install + structfs/index \ No newline at end of file diff --git a/docs/how_to_guides/structfs/index.md b/docs/how_to_guides/structfs/index.md new file mode 100644 index 0000000000..0f3f58af6a --- /dev/null +++ b/docs/how_to_guides/structfs/index.md @@ -0,0 +1,6 @@ +# Flowsheet Runner + +```{autodoc2-docstring} structfs +:parser: myst +``` + diff --git a/docs/index.rst b/docs/index.rst index 8f8f4f290b..81a2ca0d8d 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -44,6 +44,7 @@ Contents | :doc:`Setting up IDAES Models ` | :doc:`Developing Custom Models ` | :doc:`Installing Specific IDAES Versions ` + | :doc:`Creating and using the FlowsheetRunner interface to a flowsheet ` * - Explanations | :doc:`Why IDAES ` | :doc:`Concepts ` @@ -75,6 +76,7 @@ Contents tutorials/index how_to_guides/index explanations/index + examples/index reference_guides/index archived_features/index explanations/faq diff --git a/docs/reference_guides/core/util/index.rst b/docs/reference_guides/core/util/index.rst index 384bbf3a50..402e60f35b 100644 --- a/docs/reference_guides/core/util/index.rst +++ b/docs/reference_guides/core/util/index.rst @@ -13,6 +13,7 @@ model_statistics phase_equilibria scaling + Structured flowsheets tables tags utility_minimization diff --git a/idaes/commands/run_flowsheet.py b/idaes/commands/run_flowsheet.py new file mode 100644 index 0000000000..b43871c862 --- /dev/null +++ b/idaes/commands/run_flowsheet.py @@ -0,0 +1,20 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2026 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +Command to run a given flowsheet +""" + +from idaes.core.util.structfs.runner_cli import main + +if __name__ == "__main__": + main() diff --git a/idaes/core/base/flowsheet_model.py b/idaes/core/base/flowsheet_model.py index ed60cbeba8..e5f8a1cb1b 100644 --- a/idaes/core/base/flowsheet_model.py +++ b/idaes/core/base/flowsheet_model.py @@ -75,7 +75,6 @@ def __init__(self): self.installed = False else: import idaes_ui - self.visualize = idaes_ui.fv.visualize self.installed = True diff --git a/idaes/core/io/idaes/core/io/pyomo_model_state_pb2.py b/idaes/core/io/idaes/core/io/pyomo_model_state_pb2.py new file mode 100644 index 0000000000..4cbe7aefc5 --- /dev/null +++ b/idaes/core/io/idaes/core/io/pyomo_model_state_pb2.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2026 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: idaes/core/io/pyomo_model_state.proto +# Protobuf Python Version: 4.25.6 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder + +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile( + b'\n%idaes/core/io/pyomo_model_state.proto"V\n\x08Metadata\x12\x16\n\x0e\x66ormat_version\x18\x01 \x01(\t\x12\x0f\n\x07\x63reated\x18\x02 \x01(\x02\x12\x0e\n\x06\x61uthor\x18\x03 \x01(\t\x12\x11\n\tcoauthors\x18\x04 \x03(\t"\xdf\x02\n\x05\x42lock\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x18\n\x04type\x18\x02 \x01(\x0e\x32\n.BlockType\x12\x0e\n\x06parent\x18\x03 \x01(\x05\x12\x1e\n\nindex_type\x18\x04 \x01(\x0e\x32\n.IndexType\x12\x30\n\x0eindexed_data_f\x18\x05 \x03(\x0b\x32\x18.Block.IndexedDataFEntry\x12\x30\n\x0eindexed_data_s\x18\x06 \x03(\x0b\x32\x18.Block.IndexedDataSEntry\x12\x18\n\x04\x64\x61ta\x18\x07 \x01(\x0b\x32\n.BlockData\x1a?\n\x11IndexedDataFEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x19\n\x05value\x18\x02 \x01(\x0b\x32\n.BlockData:\x02\x38\x01\x1a?\n\x11IndexedDataSEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x19\n\x05value\x18\x02 \x01(\x0b\x32\n.BlockData:\x02\x38\x01"P\n\tBlockData\x12\r\n\x05value\x18\x01 \x01(\x01\x12\r\n\x05\x66ixed\x18\x02 \x01(\x08\x12\r\n\x05stale\x18\x03 \x01(\x08\x12\n\n\x02lb\x18\x04 \x01(\x01\x12\n\n\x02ub\x18\x05 \x01(\x01"<\n\x05Model\x12\x1b\n\x08metadata\x18\x01 \x01(\x0b\x32\t.Metadata\x12\x16\n\x06\x62locks\x18\x02 \x03(\x0b\x32\x06.Block*B\n\tBlockType\x12\t\n\x05\x42LOCK\x10\x00\x12\x07\n\x03VAR\x10\x01\x12\t\n\x05PARAM\x10\x02\x12\n\n\x06SUFFIX\x10\x03\x12\n\n\x06\x43ONFIG\x10\x04*,\n\tIndexType\x12\x08\n\x04NONE\x10\x00\x12\n\n\x06STRING\x10\x01\x12\t\n\x05\x46LOAT\x10\x02\x62\x06proto3' +) + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages( + DESCRIPTOR, "idaes.core.io.pyomo_model_state_pb2", _globals +) +if _descriptor._USE_C_DESCRIPTORS == False: + DESCRIPTOR._options = None + _globals["_BLOCK_INDEXEDDATAFENTRY"]._options = None + _globals["_BLOCK_INDEXEDDATAFENTRY"]._serialized_options = b"8\001" + _globals["_BLOCK_INDEXEDDATASENTRY"]._options = None + _globals["_BLOCK_INDEXEDDATASENTRY"]._serialized_options = b"8\001" + _globals["_BLOCKTYPE"]._serialized_start = 627 + _globals["_BLOCKTYPE"]._serialized_end = 693 + _globals["_INDEXTYPE"]._serialized_start = 695 + _globals["_INDEXTYPE"]._serialized_end = 739 + _globals["_METADATA"]._serialized_start = 41 + _globals["_METADATA"]._serialized_end = 127 + _globals["_BLOCK"]._serialized_start = 130 + _globals["_BLOCK"]._serialized_end = 481 + _globals["_BLOCK_INDEXEDDATAFENTRY"]._serialized_start = 353 + _globals["_BLOCK_INDEXEDDATAFENTRY"]._serialized_end = 416 + _globals["_BLOCK_INDEXEDDATASENTRY"]._serialized_start = 418 + _globals["_BLOCK_INDEXEDDATASENTRY"]._serialized_end = 481 + _globals["_BLOCKDATA"]._serialized_start = 483 + _globals["_BLOCKDATA"]._serialized_end = 563 + _globals["_MODEL"]._serialized_start = 565 + _globals["_MODEL"]._serialized_end = 625 +# @@protoc_insertion_point(module_scope) diff --git a/idaes/core/io/tests/__init__.py b/idaes/core/io/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/idaes/core/scaling/custom_scaler_base.py b/idaes/core/scaling/custom_scaler_base.py index 06672e1204..fddf9725e3 100644 --- a/idaes/core/scaling/custom_scaler_base.py +++ b/idaes/core/scaling/custom_scaler_base.py @@ -367,7 +367,7 @@ def _scale_component_by_default( sf = self.get_scaling_factor(component) if sf is None or overwrite: # If the user told us to overwrite scaling factors, then - # accepting a preexisiting scaling factor is not good enough. + # accepting a preexisting scaling factor is not good enough. # They need to go manually alter the default entry to # DefaultScalingRecommendation.userInputRecommended raise ValueError( diff --git a/idaes/core/util/doctesting.py b/idaes/core/util/doctesting.py new file mode 100644 index 0000000000..bdd5764bd4 --- /dev/null +++ b/idaes/core/util/doctesting.py @@ -0,0 +1,120 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2026 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +Utility code for documentation-related tests. +""" + +__author__ = "Dan Gunter (LBNL)" + +import re + + +class Docstring: + """Pick the marked code sections out of a docstring. + + This is useful for (at least) writing tests that use the code + snippets in a markdown (e.g. myst and with autodoc2) docstring, + without duplicating those code snippets across the original module + and test file. + """ + + # options on 'code' directive that can provide the name + LABEL_OPTION = ":name:" + CAPTION_OPTION = ":caption:" + + def __init__(self, text: str, style: str = "markdown"): + self._code = {} + if style == "markdown": + self._init_markdown(text) + # TODO: also handle RST + else: + raise ValueError( + f"Unknown docstring style: {style}. " f"Must be one of: markdown" + ) + + def code(self, section: str, func_prefix: str = None) -> str: + """Get code section. + + Args: + section: Label for the section + func_prefix: Add this prefix to all top-level + function definitions. If not provided, + the prefix will be the section name and an + underscore. Give an empty string to + have no prefixes. + + Returns: + Text of the code section, for exec() + """ + if func_prefix is None: + func_prefix = f"{section}_" + section_lines = self._prefix_def(self._code[section], func_prefix) + return "\n".join(section_lines) + + def _prefix_def(self, lines: list[str], prefix: str): + # Note that this will only match function definitions at + # module scope (not methods in a class, which are already, + # presumably, in a unique-enough namespace). + expr = re.compile(r"def\s+(?P\w+)") + result = [] + for line in lines: + m = expr.match(line) + if m: + # prepend prefix to function name + pos = m.start(1) + line = line[:pos] + prefix + line[pos:] + result.append(line) + return result + + def _init_markdown(self, text: str): + lines = text.split("\n") + state = 0 + sec_num = 1 + for line in lines: + ls_line = line.lstrip() + if state == 0 and ls_line.startswith("```{code}"): + indent = len(line) - len(ls_line) # spaces to left + state = 1 + section_name = None + section_lines = [] + elif state > 0: + if ls_line.startswith("```"): + if section_name is None: + pass # silently ignore + elif len(section_lines) == 0: + pass # silently ignore + else: + self._code[section_name] = section_lines + state = 0 + elif state == 1: + if ls_line.startswith(self.LABEL_OPTION): + offset = len(self.LABEL_OPTION) + 1 + section_name = ls_line[offset:].strip() + elif section_name is None and ls_line.startswith( + self.CAPTION_OPTION + ): + # only use caption if no label + offset = len(self.CAPTION_OPTION) + 1 + section_name = ls_line[offset:].strip() + elif ls_line == "" or ls_line.startswith(":"): + pass + else: + state = 2 # got past metadata + if section_name is None: + section_name = f"section{sec_num}" + sec_num += 1 + else: + section_name = section_name.replace(" ", "-").lower() + section_lines.append(line[indent:].rstrip()) + else: + section_lines.append(line[indent:].rstrip()) diff --git a/idaes/core/util/structfs/__init__.py b/idaes/core/util/structfs/__init__.py new file mode 100644 index 0000000000..34dd2ae058 --- /dev/null +++ b/idaes/core/util/structfs/__init__.py @@ -0,0 +1,238 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2026 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +''' +# Structured flowsheet runner API + +The *struct*ured *f*low*s*heet runner is an API in the +{py:mod}`structfs` subpackage, and in +particular that package's {py:mod}`runner ` and +{py:mod}`fsrunner ` modules. + +## Overview + +The core idea of the +{py:class}`FlowsheetRunner ` class is +that flowsheets should follow a standard set of "steps". By standardizing the +naming and ordering of these steps, it becomes easier to build tools that run +and inspect flowsheets. The Python mechanics of this are to put each step in a +function and wrap that function with decorator. The decorator uses a string to +indicate which standard step the function implements. + +Once these functions are defined, the API can be used to execute and inspect a +wrapped flowsheet. + +The framework can perform arbitrary actions before and after each run, +and before and after a given set of steps. This is implemented with +the {py:class}`Actions ` class +and methods `add_action`, `get_action`, and `remove_action` on the base +{py:class}`Runner ` class. +More details are given below in the Actions section. + +## Step 1: Define flowsheet + +It is assumed here that you have Python code to build, configure, and run an +IDAES flowsheet. You will first arrange this code to follow the standard "steps" +of a flowsheet workflow, which are listed in the +{py:class}`BaseFlowsheetRunner ` +class' `STEPS` attribute. Not all the steps need to be defined: the API will +skip over steps with no definition when executing a range of steps. To make the +code more structured you can also define internal sub-steps, as described later. + +The set of defined steps is: + +* build - create the flowsheet +* set_operating_conditions - set initial variable values +* set_scaling - set scaling +* initialize - initialize the flowsheet +* set_solver - choose the solver +* solve_initial - perform an initial (square problem) solve +* add_costing - add costing information (if any) +* check_model_structure - check the model for structural issues +* initialize_costing - initialize costing variables +* solve_optimization - setup and solve the optimization problem +* check_model_numerics - check the model for numerical issues + +### Example: Flash flowsheet + +This is illustrated below with a before/after of an extremely simple flowsheet +with a single Flash unit model. + +#### Before + +For now, let's assume this flowsheet uses only four of the standard steps: +"build", "set_operating_conditions", "initialize", and "solve_optimization". +Let's also assume you have four functions defined that correspond to these +steps. Below is a sample flowsheet (for a single Flash unit) that we will use as +an example: + +```{code} python +:name: before + +from pyomo.environ import ConcreteModel, SolverFactory, SolverStatus +from idaes.core import FlowsheetBlock +from idaes.models.properties.activity_coeff_models.BTX_activity_coeff_VLE import ( + BTXParameterBlock, +) +from idaes.models.unit_models import Flash + +def build_model(): + m = ConcreteModel() + m.fs = FlowsheetBlock(dynamic=False) + m.fs.properties = BTXParameterBlock( + valid_phase=("Liq", "Vap"), activity_coeff_model="Ideal", state_vars="FTPz" + ) + m.fs.flash = Flash(property_package=m.fs.properties) + return m + + +def set_operating_conditions(m): + m.fs.flash.inlet.flow_mol.fix(1) + m.fs.flash.inlet.temperature.fix(368) + m.fs.flash.inlet.pressure.fix(101325) + m.fs.flash.inlet.mole_frac_comp[0, "benzene"].fix(0.5) + m.fs.flash.inlet.mole_frac_comp[0, "toluene"].fix(0.5) + m.fs.flash.heat_duty.fix(0) + m.fs.flash.deltaP.fix(0) + + +def init_model(m): + m.fs.flash.initialize() + + +def solve(m): + solver = SolverFactory("ipopt") + return solver.solve(m, tee=True) + +``` + +#### After + +In order to make this into a +{py:class}`FlowsheetRunner `-wrapped +flowsheet, we need to do make a few changes. The modified file is shown below, +with changed lines highlighted and descriptions below. + +```{code} +:name: after +:linenos: +:emphasize-lines: 7,9, 11, 25, 37, 43, 48, 12, 26, 38, 44, 49, 23, 28, 40, 44, 45, 46 + +from pyomo.environ import ConcreteModel, SolverFactory +from idaes.core import FlowsheetBlock +import idaes.logger as idaeslog +from idaes.models.properties.activity_coeff_models.BTX_activity_coeff_VLE \ +import ( BTXParameterBlock, ) +from idaes.models.unit_models import Flash + +from idaes.core.util.structfs.fsrunner import FlowsheetRunner + +FS = FlowsheetRunner() + +@FS.step("build") +def build_model(ctx): + """Build the model.""" + m = ConcreteModel() + m.fs = FlowsheetBlock(dynamic=False) + m.fs.properties = BTXParameterBlock( + valid_phase=("Liq", "Vap"), + activity_coeff_model="Ideal", + state_vars="FTPz" + ) + m.fs.flash = Flash(property_package=m.fs.properties) + # assert degrees_of_freedom(m) == 7 + ctx.model = m + +@FS.step("set_operating_conditions") +def set_operating_conditions(ctx): + """Set operating conditions.""" + m = ctx.model + m.fs.flash.inlet.flow_mol.fix(1) + m.fs.flash.inlet.temperature.fix(368) + m.fs.flash.inlet.pressure.fix(101325) + m.fs.flash.inlet.mole_frac_comp[0, "benzene"].fix(0.5) + m.fs.flash.inlet.mole_frac_comp[0, "toluene"].fix(0.5) + m.fs.flash.heat_duty.fix(0) + m.fs.flash.deltaP.fix(0) + +@FS.step("initialize") +def init_model(ctx): + """ "Initialize the model.""" + m = ctx.model + m.fs.flash.initialize() + +@FS.step("set_solver") +def set_solver(ctx): + """Set the solver.""" + ctx.solver = SolverFactory("ipopt") + +@FS.step("solve_optimization") +def solve_opt(ctx): + ctx["results"] = ctx.solver.solve(ctx.model, tee=ctx["tee"]) +``` + +Details on the changes: + +* **7**: Import the FlowsheetRunner class. +* **9**: Create a global {py:class}`FlowsheetRunner ` object, here called `FS`. +* **11, 25, 37, 43, 48**: Add a `@FS.step()` decorator in front of each function + with the name of the associated step. +* **12, 26, 38, 44, 49**: Make each function take a single argument which is a {py:class}`fsrunner.Context ` instance used to + pass state information between functions (here, that argument is named `ctx`). +* **23**: Assign the model created in the "build" step to `ctx.model`, a + standard attribute of the context object. +* **28, 40**: Replace the direct passing of the model object (in this case, + called `m`) with a context object that has a `.model` attribute. +* **44-46**: Add a function for the `set_solver` step, to select the solver + (here, IPOPT). +* **46**: In the "solve_optimization" step, assign the solver result to + `ctx["results"]`. + +## Step 2: Execute and inspect + +Once the flowsheet has been 'wrapped' in the flowsheet runner interface, it can +be run and manipulated via the wrapper object. The basic steps to do this are: +import the flowsheet-runner object, build and execute the flowsheet, and inspect +the flowsheet. + +For example to run all the steps and get the status of the solve, you +could do this: + +```{code} +FS.run_steps() +assert FS.results.solver.status == SolverStatus.ok +``` + +Some more examples of using the FlowsheetRunner are shown in the +example notebooks found under the `docs/examples/structfs` directory +([docs link](/examples/structfs/index)). + +## Actions + +```{autodoc2-docstring} structfs.runner.Action +``` + +## Annotation + +You can also 'annotate' variables for special +treatment in display, etc. with the +`annotate_var` function in the +{py:class}`FlowsheetRunner ` class. + +```{autodoc2-object} structfs.fsrunner.FlowsheetRunner.annotate_var +``` + +```{autodoc2-docstring} structfs.fsrunner.FlowsheetRunner.annotate_var +:parser: myst +``` + +''' diff --git a/idaes/core/util/structfs/fsrunner.py b/idaes/core/util/structfs/fsrunner.py new file mode 100644 index 0000000000..c0e541def5 --- /dev/null +++ b/idaes/core/util/structfs/fsrunner.py @@ -0,0 +1,323 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2026 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +Specialize the generic `Runner` class to running a flowsheet, +in `FlowsheetRunner`. +""" +# stdlib + +# third-party +from pyomo.environ import ConcreteModel +from pyomo.environ import units as pyunits +from idaes.core import FlowsheetBlock + +try: + from idaes_connectivity import Connectivity + from idaes_connectivity.jupyter import display_connectivity +except ImportError: + Connectivity = None + +# package +from .runner import Runner + + +class Context(dict): + """Syntactic sugar for the dictionary for the 'context' passed into each + step of the `FlowsheetRunner` class. + """ + + @property + def model(self): + """The model being run.""" + return self["model"] + + @model.setter + def model(self, value): + """The model being run.""" + self["model"] = value + + @property + def solver(self): + """The solver used to solve the model.""" + return self["solver"] + + @solver.setter + def solver(self, value): + """The solver used to solve the model.""" + self["solver"] = value + + +class BaseFlowsheetRunner(Runner): + """Specialize the base `Runner` to handle IDAES flowsheets. + + This class pre-determine the name and order of steps to run + + Attributes: + STEPS: List of defined step names. + """ + + STEPS = ( + "build", + "set_operating_conditions", + "set_scaling", + "initialize", + "set_solver", + "solve_initial", + "add_costing", + "check_model_structure", + "initialize_costing", + "solve_optimization", + "check_model_numerics", + ) + + def __init__(self, solver=None, tee=True): + self.build_step = self.STEPS[0] + self._solver, self._tee = solver, tee + self._ann = {} + super().__init__(self.STEPS) # needs to be last + + def run_steps( + self, + first: str = Runner.STEP_ANY, + last: str = Runner.STEP_ANY, + before=None, + after=None, + **kwargs, + ): + """Run the steps. + + Before it calls the superclass to run the steps, checks + if the step name matches the `build_step` attribute and, + if so, creates an empty Pyomo ConcreteModel to use as + the base model for the flowsheet. + """ + from_step_name = self.normalize_name(first) + if ( + from_step_name == "-" + or from_step_name == self.build_step + or self._context.model is None + ): + self._context.model = self._create_model() + + # replace first/last with before/after, if present + if before is not None: + kwargs["before"] = before + last = "" + if after is not None: + kwargs["after"] = after + first = "" + + super().run_steps(first, last, **kwargs) + + def reset(self): + self._context = Context(solver=self._solver, tee=self._tee, model=None) + + def _create_model(self): + m = ConcreteModel() + m.fs = FlowsheetBlock(dynamic=False) + return m + + @property + def model(self): + """Syntactic sugar to return the model.""" + return self._context.model + + @property + def results(self): + """Syntactic sugar to return the `results` in the context.""" + return self._context["results"] + + def annotate_var( + self, + variable: object, + key: str = None, + title: str = None, + desc: str = None, + units: str = None, + rounding: int = 3, + is_input: bool = True, + is_output: bool = True, + input_category: str = "main", + output_category: str = "main", + ) -> object: + """Annotate a Pyomo variable. + + Args: + + - variable: Pyomo variable being annotated + - key: Key for this block in dict. Defaults to object name. + - title: Name / title of the block. Defaults to object name. + - desc: Description of the block. Defaults to object name. + - units: Units. Defaults to string value of native units. + - rounding: Significant digits + - is_input: Is this variable an input + - is_output: Is this variable an output + - input_category: Name of input grouping to display under + - output_category: Name of output grouping to display under + + Returns: + + - Input block (for chaining) + + Raises: + + - ValueError: if `is_input` and `is_output` are both False + + ### Example + + Here is an example of annotating a single Pyomo variable. + You can apply this same technique to any Pyomo, and thus IDAES, + object. + + ```{code} + :name: annotate_vars + from idaes.core.util.structfs.fsrunner import FlowsheetRunner + from pyomo.environ import * + + def example(f: FlowsheetRunner): + v = Var() + v.construct() + f.annotate_var(v, key="example", title="Example variable").fix(1) + ``` + + To retrieve the annotated variables, use the `annotated_vars` + property: + + ```{code} + example(fr := FlowsheetRunner()) + print(fr.annotated_vars) + # prints something like this: + # {'example': {'var': , + # 'fullname': 'ScalarVar', 'title': 'Example variable', + # 'description': 'ScalarVar', 'units': 'dimensionless', + # 'rounding': 3, 'is_input': True, 'is_output': True, 'input_category': 'main', + # 'output_category': 'main'}} + ``` + + """ + if not is_input and not is_output: + raise ValueError("One of 'is_input', 'is_output' must be True") + + qual_name = variable.name + key = key or variable.name + + self._ann[key] = { + "var": variable, + "fullname": qual_name, + "title": title or qual_name, + "description": desc or qual_name, + "units": units or str(pyunits.get_units(variable)), + "rounding": rounding, + "is_input": is_input, + "is_output": is_output, + "input_category": input_category, + "output_category": output_category, + } + + return variable + + @property + def annotated_vars(self) -> dict[str,]: + """Get (a copy of) the annotated variables.""" + return self._ann.copy() + + +class FlowsheetRunner(BaseFlowsheetRunner): + """Interface for running and inspecting IDAES flowsheets.""" + + class DegreesOfFreedom: + """Wrapper for the UnitDofChecker action""" + + def __init__(self, runner): + from .runner_actions import UnitDofChecker # pylint: disable=C0415 + + # check DoF after build, initial solve, and optimization solve + self._a = runner.add_action( + "degrees_of_freedom", + UnitDofChecker, + "fs", + ["build", "solve_initial", "solve_optimization"], + ) + self._rnr = runner + + def model(self): + """Get the model.""" + return self._a.get_dof_model() + + def __getattr__(self, name): + """Naming the step prints a summary of that step.""" + if name not in set(self._rnr.list_steps()): + raise AttributeError(f"No step named '{name}'") + self._a.summary(step=name) + + def __str__(self): + return self._a.summary(stream=None) + + def _ipython_display_(self): + self._a.summary() + + class Timings: + """Wrapper for the Timer action""" + + def __init__(self, runner): + from .runner_actions import Timer # pylint: disable=C0415 + + self._a: Timer = runner.add_action("timings", Timer) + + @property + def values(self) -> list[dict]: + """Get timing values.""" + return self._a.get_history() + + @property + def history(self) -> str: + """Get a text report of the timing history""" + h = [] + for i in range(len(self._a)): + h.append(f"== Run {i + 1} ==") + h.append("") + h.append(self._a.summary(run_idx=i)) + return "\n".join(h) + + def __str__(self): + return self._a.summary() + + def _ipython_display_(self): + self._a._ipython_display_() # pylint: disable=protected-access + + def __init__(self, **kwargs): + from .runner_actions import ( # pylint: disable=C0415 + CaptureSolverOutput, + ModelVariables, + ) + + super().__init__(**kwargs) + self.dof = self.DegreesOfFreedom(self) + self.timings = self.Timings(self) + self.add_action("capture_solver_output", CaptureSolverOutput) + self.add_action("model_variables", ModelVariables) + + def build(self): + """Run just the build step""" + self.run_step("build") + + def solve_initial(self): + """Perform all steps up to 'solve_initial'""" + self.run_steps(last="solve_initial") + + def show_diagram(self): + """Return the diagram.""" + if Connectivity is not None: + return display_connectivity(input_model=self.model) + else: + return "" diff --git a/idaes/core/util/structfs/logutil.py b/idaes/core/util/structfs/logutil.py new file mode 100644 index 0000000000..ec0e333da2 --- /dev/null +++ b/idaes/core/util/structfs/logutil.py @@ -0,0 +1,46 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2026 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +Utility functions for logging +""" + +import logging +import warnings + +g_quiet = {} + + +def quiet(roots=("idaes", "pyomo"), level=logging.CRITICAL): + """Be very quiet. I'm hunting wabbits. + + Ignore warnings and set all loggers starting with one of + the values in 'roots' to the given level (default=CRITICAL). + """ + warnings.filterwarnings("ignore") + all_loggers = [logging.getLogger()] + [ + logging.getLogger(name) for name in logging.root.manager.loggerDict + ] + for lg in all_loggers: + for root in roots: + if lg.name.startswith(root + "."): + g_quiet[lg.name] = lg.level + lg.setLevel(level) + + +def unquiet(): + """Reverse previous quiet()""" + for k in list(g_quiet.keys()): + v = g_quiet[k] + lg = logging.getLogger(k) + lg.setLevel(v) + del g_quiet[k] diff --git a/idaes/core/util/structfs/runner.py b/idaes/core/util/structfs/runner.py new file mode 100644 index 0000000000..f39c59aaee --- /dev/null +++ b/idaes/core/util/structfs/runner.py @@ -0,0 +1,532 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2026 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +Run functions in a module in a defined, named, sequence. +""" + +# stdlib +from abc import ABC, abstractmethod +import logging +from typing import Callable, Optional, Tuple, Sequence, TypeVar + +# third party +from pydantic import BaseModel + +__author__ = "Dan Gunter (LBNL)" + +_log = logging.Logger(__name__) + + +class Step: + """Step to run by the `Runner`.""" + + SEP = "::" # when printing out step::substep + + def __init__(self, name: str, func: Callable): + """Constructor + + Args: + name: Name of the step + func: Function to call to execute the step + """ + self.name: str = name + self.func: Callable = func + self.substeps: list[Tuple[str, Callable]] = [] + + def add_substep(self, name: str, func: Callable): + """Add a sub-step to this step. + Substeps are run in the order given. + + Args: + name: The name of substep + func: Function to call to execute this substep + """ + self.substeps.append((name, func)) + + +# Python 3.9-compatible forward reference +ActionType = TypeVar("ActionType", bound="Action") + + +class Runner: + """Run a set of defined steps.""" + + STEP_ANY = "-" + + def __init__(self, steps: Sequence[str]): + """Constructor. + + Args: + steps: List of step names + """ + self._context = {} + self._actions: dict[str, ActionType] = {} + self._step_names = list(steps) + self._steps: dict[str, Step] = {} + self.reset() + + def __getitem__(self, key): + """Look for key in `context`""" + return self._context[key] + + def add_step(self, name: str, func: Callable): + """Add a step. + + Steps are executed by calling `func(context)`, + where `context` is a dict (or dict-like) object + that is used to pass state between steps. + + Args: + name: Add a step to be executed + func: Function to execute for the step. + + Raises: + KeyError: _description_ + """ + step_name = self.normalize_name(name) + + if step_name not in self._step_names: + raise KeyError(f"Unknown step: {step_name}") + self._steps[step_name] = Step(step_name, func) + + def add_substep(self, base_name, name, func): + """Add a substep for a given step. + + Substeps are all executed, in the order added, + immediately after their base step is executed. + + Args: + base_name: Step name + name: Substep name + func: Function to execute + + Raises: + KeyError: Base step or substep is not found + ValueError: Base step does not have any substeps + """ + substep_name = self.normalize_name(name) + base_step_name = self.normalize_name(base_name) + if base_step_name not in self._step_names: + raise KeyError( + f"Unknown base step {base_step_name} for substep {substep_name}" + ) + try: + step = self._steps[base_step_name] + except KeyError: + raise ValueError( + f"Empty base step {base_step_name} for substep {substep_name}" + ) + step.add_substep(substep_name, func) + + def run_step(self, name): + """Syntactic sugar for calling `run_steps` for a single step.""" + self.run_steps(first=name, last=name) + + def run_steps( + self, first: str = "", last: str = "", after: str = "", before: str = "" + ): + """Run steps from `first`/`after` to step `last`/`before`. + + Specify only one of the first/after and last/before pairs. + + Use the special value `STEP_ANY` to mean the first or last defined step. + + Args: + first: First step to run (include) + after: Run first defined step after this one (exclude) + last: Last step to run (include) + before: Run last defined step before this one (exclude) + + Raises: + KeyError: Unknown or undefined step given + ValueError: Steps out of order or both first/after or before/last given + """ + if first and after: + raise ValueError("Cannot specify both 'after' and 'first'") + if last and before: + raise ValueError("Cannot specify both 'before' and 'last'") + if not self._steps: + return # nothing to do, no steps defined + args = ( + first or after, + last or before, + (bool(first) or not bool(after), bool(last) or not bool(before)), + ) + return self._run_steps(*args) + + def _run_steps( + self, + first: str, + last: str, + endpoints: tuple[bool, bool], + ): + names = (self.normalize_name(first), self.normalize_name(last)) + + self._last_run_steps = [] + + # get indexes of first/last step + step_range = [-1, -1] + for i, step_name in enumerate(names): + if step_name == self.STEP_ANY: # meaning first or last defined + # this will always find a step as long as there is at least one, + # which we checked before calling this function + idx = self._find_step(reverse=i == 1) + else: + try: + idx = self._step_names.index(step_name) + except ValueError: + raise KeyError(f"Unknown step: {step_name}") + if step_name not in self._steps: + raise KeyError(f"Empty step: {step_name}") + step_range[i] = idx + + # check that first comes before last + if step_range[0] > step_range[1]: + raise ValueError( + "Steps out of order: {names[0]}={step_range[0]} > {names[1]}={step_range[1]}" + ) + + # execute overall before-run action + for action in self._actions.values(): + action.before_run() + + # run each (defined) step + for i in range(step_range[0], step_range[1] + 1): + # check whether to skip endpoints in range + if (i == step_range[0] and not endpoints[0]) or ( + i == step_range[1] and not endpoints[1] + ): + continue + # get the step associated with the index + step = self._steps.get(self._step_names[i], None) + # if the step is defined, run it + if step: + step.func(self._context) + self._last_run_steps.append(step.name) + + # execute overall after-run action + for action in self._actions.values(): + action.after_run() + + def reset(self): + """Reset runner internal state, especially the context.""" + self._context = {} + self._last_run_steps = [] + + def list_steps(self, all_steps=False) -> list[str]: + """Get list of [runnable] steps.""" + result = [] + for n in self._step_names: + if all_steps or (n in self._steps): + result.append(n) + return result + + def add_action(self, name: str, action_class: type, *args, **kwargs) -> object: + """Add a named action. + + Args: + name: Arbitrary name for the action, used to get/remove it + action_class: Subclass of Action to use + args: Positional arguments passed to `action_class` constructor + kwargs: Keyword arguments passed to `action_class` constructor + """ + obj = action_class(self, *args, **kwargs) + self._actions[name] = obj + return obj + + def get_action(self, name: str) -> ActionType: + """Get an action object. + + Args: + name: Name of action (as provided to `add_action`) + + Returns: + ActionType: Action object + + Raises: + KeyError: If action name does not match any known action + """ + return self._actions[name] + + def remove_action(self, name: str): + """Remove an action object. + + Args: + name: Name of action (as provided to `add_action`) + + Raises: + KeyError: If action name does not match any known action + """ + del self._actions[name] + + def _find_step(self, reverse=False): + start_step, end_step, incr = ( + (0, len(self._step_names), 1), + (len(self._step_names) - 1, -1, -1), + )[reverse] + for i in range(start_step, end_step, incr): + if self._step_names[i] in self._steps: + return i + return -1 + + @classmethod + def normalize_name(cls, s: Optional[str]) -> str: + """Normalize a step name. + Args: + s: Step name + + Returns: + normalized name + """ + return cls.STEP_ANY if not s else s.lower() + + def _step_begin(self, name: str): + for action in self._actions.values(): + action.before_step(name) + + def _substep_begin(self, base: str, name: str): + for action in self._actions.values(): + action.before_substep(base, name) + + def _step_end(self, name: str): + for action in self._actions.values(): + action.after_step(name) + + def _substep_end(self, base: str, name: str): + for action in self._actions.values(): + action.after_substep(base, name) + + def step(self, name: str): + """Decorator function for creating a new step. + + Args: + name: Step name + + Returns: + Decorator function. + """ + + def step_decorator(func): + + def wrapper(*args, **kwargs): + self._step_begin(name) + result = func(*args, **kwargs) + self._step_end(name) + return result + + self.add_step(name, wrapper) + + return wrapper + + return step_decorator + + def substep(self, base: str, name: str): + """Decorator function for creating a new substep. + + Substeps are not run directly, and must have an already + existing base step as their parent. + + Args: + base: Base step name + name: Substep name + + Returns: + Decorator function. + """ + + def step_decorator(func): + + def wrapper(*args, **kwargs): + self._substep_begin(base, name) + result = func(*args, **kwargs) + self._substep_end(base, name) + return result + + self.add_substep(base, name, wrapper) + + return wrapper + + return step_decorator + + def report(self) -> dict[str, dict]: + """Compile reports of each action into a combined report + + Returns: + dict: Mapping with two key-value pairs: + - `actions`: Keys are names given to actions during `add_action()`, values are the + reports returned by that action, in Python dictionary form. + - `last_run`: List of steps (names, as strings) in previous run + """ + # create a mapping of actions to report dicts + action_reports = {} + for name, action in self._actions.items(): + rpt = action.report() + rpt_dict = rpt.model_dump() if isinstance(rpt, BaseModel) else rpt + action_reports[name] = rpt_dict + # return actions and other metadata as a report + return {"actions": action_reports, "last_run": self._last_run_steps.copy()} + + +class Action(ABC): + """The Action class implements a simple framework to run arbitrary + functions before and/or after each step and/or run performed + by the `Runner` class. + + To create and use your own Action, inherit from this class + and then define one or more of the methods: + + * before_step - Called before a given step is executed + * after_step - Called after a given step is executed + * before/after_substep - Called before/after a named + substep is executed (these can have arbitrary names) + * before_run - Called before the first step is executed + * after_run - Called after the last step is executed + + Then add the action to the `Runner` class (e.g., `FlowsheetRunner`) + instance with `add_action()`. Note that you pass the action + *class*, not instance. Additional settings can be passed to + the created action instance with arguments to `add_action`. + Also note that the *name* argument is used to retrieve the + action instance later, as needed. + + All actions must also implement the `report()` method, + which returns the results of the action to the caller + as either a Pydantic BaseModel subclass or a Python dict. + + ### Example + + Below is a simple example that prints a message + before/after every step and prints the total number + of steps run at the end of the run. + + ```{code} + :caption: Runner HelloGoodbye class + + from idaes.core.util.structfs.runner import Action + class HelloGoodbye(Action): + "Example action, for tutorial purposes." + + def __init__(self, runner, hello="hi", goodbye="bye", **kwargs): + super().__init__(runner, **kwargs) + self._hello, self._goodbye = hello, goodbye + self.step_counter = -1 + + def before_run(self): + self.step_counter = 0 + + def before_step(self, name): + print(f">> {self._hello} from step {name}") + + def before_substep(self, name, subname): + print(f" >> {self._hello} from sub-step {subname}") + + def after_step(self, name): + print(f"<< {self._goodbye} from step {name}") + self.step_counter += 1 + + def after_substep(self, name, subname): + print(f" << {self._goodbye} from sub-step {subname}") + + def after_run(self): + print(f"Ran {self.step_counter} steps") + + def report(self): + return {"steps": self.step_counter} + ``` + + You could add the above example to a Runner subclass, + here called `my_runner`, like this: + + ```{code} + my_runner.add_action( + "hg", + HelloGoodbye, + hello="Greetings and salutations", + goodbye="Smell you later", + ) + ``` + + Then, after running steps, you could print + the value of the *step_counter* attribute with: + + ```{code} + print(my_runner.get_action("hg").step_counter) + ``` + + See the pre-defined actions in the + {py:mod}`runner_actions ` + module, and their usage in the `FlowsheetRunner` class, for more examples. + """ + + def __init__(self, runner: Runner, log: Optional[logging.Logger] = None): + """Constructor + + Args: + runner: Reference to the runner that will trigger this action. + log: Logger to use when logging informational or error messages + """ + self._runner = runner + if log is None: + log = _log + self.log = log + + def before_step(self, step_name: str): + """Perform this action before the named step. + + Args: + step_name: Name of the step + """ + return + + def before_substep(self, step_name: str, substep_name: str): + """Perform this action before the named sub-step. + + Args: + step_name: Name of the step + substep_name: Name of the sub-step + """ + return + + def after_step(self, step_name: str): + """Perform this action after the named step. + + Args: + step_name: Name of the step + """ + return + + def after_substep(self, step_name: str, substep_name: str): + """Perform this action after the named sub-step. + + Args: + step_name: Name of the step + substep_name: Name of the sub-step + """ + return + + def before_run(self): + """Perform this action before a run starts.""" + return + + def after_run(self): + """Perform this action after a run ends.""" + return + + @abstractmethod + def report(self) -> BaseModel | dict: + """Report the results of the action to the caller. + + Returns: + Results as a Pydantic model or Python dict + """ + return diff --git a/idaes/core/util/structfs/runner_actions.py b/idaes/core/util/structfs/runner_actions.py new file mode 100644 index 0000000000..89b6003cfa --- /dev/null +++ b/idaes/core/util/structfs/runner_actions.py @@ -0,0 +1,490 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2026 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +Predefined Actions for the generic Runner. +""" +# stdlib +from collections.abc import Callable +from io import StringIO +import re +import sys +import time +from typing import Union, Optional + +# third-party +from pyomo.network.port import ScalarPort +from pyomo.core.base.var import IndexedVar +from pyomo.core.base.param import IndexedParam +import pyomo.environ as pyo +from pydantic import BaseModel, Field + +# package +from idaes.core.util.model_statistics import degrees_of_freedom +from idaes.core.base.unit_model import ProcessBlockData +from .runner import Action +from .fsrunner import FlowsheetRunner + + +class Timer(Action): + """Simple step/run timer action.""" + + class Report(BaseModel): + """Report returned by report() method.""" + + # {"step_name": , ..} for each step + timings: dict[str, float] = Field(default={}) + + def __init__(self, runner, **kwargs): + """Constructor. + + Args: + runner: Associated Runner object + kwargs: Additional optional arguments for Action constructor + + Attributes: + step_times: Dict with key step name and value a list of + timings for that step + run_times: List of timings for a run (sequence of steps) + """ + super().__init__(runner, **kwargs) + self.step_times: list[dict[str, float]] = [] + self.run_times: list[float] = [] + self._run_begin, self._step_begin = None, {} + self._step_order = runner.list_steps() + + def before_step(self, step_name): + self._step_begin[step_name] = time.time() + + def after_step(self, step_name): + t1 = time.time() + t0 = self._step_begin.get(step_name, None) + if t0 is None: + self.log.warning(f"Timer: step {step_name} end without begin") + else: + self._cur_step_times[step_name] = t1 - t0 + self._step_begin[step_name] = None + + def before_run(self): + self._run_begin = time.time() + self._cur_step_times = {} + self._step_begin = {} + + def after_run(self): + t1 = time.time() + if self._run_begin is None: + self.log.warning("Timer: run end without begin") + else: + self.run_times.append(t1 - self._run_begin) + self._run_begin = None + filled_times = {} + for step in self._runner.list_steps(): + filled_times[step] = self._cur_step_times.get(step, -1) + self.step_times.append(filled_times) + + def __len__(self): + return len(self.run_times) + + def get_history(self) -> list[dict]: + """Summarize timings + + Returns: + Summary of timings (in seconds) for each run in `run_times`: + - 'run': Time for the run + - 'steps': dict of `{: }` + - 'inclusive': total time spent in the steps + - 'exclusive': difference between run time and inclusive time + """ + return [self._get_summary(i) for i in range(0, len(self.run_times))] + + def _get_summary(self, i): + rt, st = self.run_times[i], self.step_times[i] + step_total = sum((max(t, 0) for t in st.values())) + return { + "run": rt, + "steps": st, + "inclusive": step_total, + "exclusive": rt - step_total, + } + + def summary(self, stream=None, run_idx=-1) -> str | None: + """Summary of the timings. + + Args: + stream: Output stream, with `write()` method. Return a string if None. + run_idx: Index of run, -1 meaning "last one" + + Returns: + str: If output stream was None, the text summary; otherwise None + """ + if stream is None: + stream = StringIO() + + if len(self.run_times) == 0: + return "" # nothing to summarize + + d = self._get_summary(run_idx) + + stream.write("Time per step:\n\n") + slen, ttot = -1, 0 + for s, t in d["steps"].items(): + if t >= 0: + slen = max(slen, len(s)) + ttot += t + sfmt = " {{s:{slen}s}} : {{t:8.3f}} {{p:4.1f}}%\n" + for s, t in d["steps"].items(): + if t >= 0: + fmt = sfmt.format(slen=slen) + stream.write(fmt.format(s=s, t=t, p=t / ttot * 100)) + + stream.write(f"\nTotal time: {d['run']:.3f} s\n") + + if isinstance(stream, StringIO): + return stream.getvalue() + + return None + + def _ipython_display_(self): + print(self.summary()) + + def report(self) -> Report: + """Report the timings. + + Returns: + The report object + """ + rpt = self.Report(timings=self.step_times[-1].copy()) + return rpt + + +# Hold degrees of freedom for one FlowsheetRunner 'step' +# {key=component: value=dof} +UnitDofType = dict[str, int] + + +class UnitDofChecker(Action): + """Check degrees of freedom on unit models. + + After a (caller-named) step or steps, check the degrees + of freedom on each unit model by the method of + fixing the inlet, applying the `degrees_of_freedom()` function, + and unfixing the inlet again. The calculated values are + saved and passed to an optional caller-provided function. + + At the end of a run, the degrees of freedom for the entire + model are checked, saved, and passed to an optional function. + """ + + class Report(BaseModel): + """Report on degrees of freedom in a model.""" + + steps: dict[str, UnitDofType] = Field(default={}) + model: int = Field(default=0) + + def __init__( + self, + runner: FlowsheetRunner, + flowsheet: str, + steps: Union[str, list[str]], + step_func: Optional[Callable[[str, UnitDofType], None]] = None, + run_func: Optional[Callable[[dict[str, UnitDofType], int], None]] = None, + **kwargs, + ): + """Constructor. + + Args: + runner: Associated Runner object (provided by `add_action`) + flowsheet: Variable name for flowsheet, e.g. "fs" + steps: Step or steps at which to run the checking action + step_func: Function to call with calculated DoF values for one step. + Takes name of step and dictionary with per-unit degrees of freedom + (see `UnitDofType` alias). + run_func: Function to call with calculated DoF values for each step, as well + as overall model DoF. + kwargs: Additional optional arguments for Action constructor + + Raises: + ValueError: if `steps` list is empty, or no callback functions provided + """ + super().__init__(runner, **kwargs) + if hasattr(steps, "lower"): # string-like + self._steps = {steps} + else: # assume it is list-like + if len(steps) == 0: + raise ValueError("At least one step name must be provided") + self._steps = set(steps) + self._steps_dof: dict[str, UnitDofType] = {} + self._model_dof = None + self._step_func, self._run_func = step_func, run_func + self._fs = flowsheet + + def after_step(self, step_name: str): + step_name = self._runner.normalize_name(step_name) + if step_name not in self._steps: + self.log.debug(f"Do not check DoF for step: {step_name}") + return + + fs = self._get_flowsheet() + + model_dof = degrees_of_freedom(self._get_flowsheet()) + units_dof = {self._fs: model_dof} + for unit in fs.component_objects(descend_into=True): + if self._is_unit_model(unit): + units_dof[unit.name] = self._get_dof(unit) + self._steps_dof[step_name] = units_dof # save + if self._step_func: + self._step_func(step_name, units_dof) + + def after_run(self): + """Actions performed after a run.""" + fs = self._get_flowsheet() + model_dof = degrees_of_freedom(fs) + self._model_dof = model_dof + if self._run_func: + self._run_func(self._steps_dof, model_dof) + + def _get_flowsheet(self): + m = self._runner.model + if self._fs: + return getattr(m, self._fs) + return m + + @staticmethod + def _is_unit_model(block): + return isinstance(block, ProcessBlockData) + + def summary(self, stream=sys.stdout, step=None): + """Readable summary of the degrees of freedom. + + Args: + stream: Output stream, with `write()` method. Return a string if None. + step: Specific step to summarize, otherwise all steps. + + Returns: + The summary as a string if `stream` was None, otherwise None + """ + if stream is None: + stream = StringIO() + + def write_step(sdof, indent=4): + sdof = self._steps_dof[step] + istr = " " * indent + unit_names = list(sdof.keys()) + ulen = max((len(u) for u in unit_names)) + dfmt = f"{istr}{{u:{ulen}s}} : {{d}}\n" + unit_names.sort() + for unit in unit_names: + dof = sdof[unit] + stream.write(dfmt.format(u=unit, d=dof)) + + stream.write(f"Degrees of freedom: {self._model_dof}\n\n") + if step is None: + stream.write("Degrees of freedom after steps:\n") + for step in self._runner.list_steps(): + if step in self._steps_dof: + stream.write(f" {step}:\n") + write_step(self._steps_dof[step]) + else: + write_step(self._steps_dof[step], indent=0) + + if isinstance(stream, StringIO): + return stream.getvalue() + + def _ipython_display_(self): + self.summary() + + def get_dof(self) -> dict[str, UnitDofType]: + """Get degrees of freedom + + Returns: + dict[str, UnitDofType]: Mapping of step name to per-unit DoF when + the step completed. + """ + return self._steps_dof.copy() + + def get_dof_model(self) -> int: + """Get degrees of freedom for the model. + + Returns: + int: Last calculated DoF for the model. + """ + return self._model_dof + + def steps(self, only_with_data: bool = False) -> list[str]: + """Get list of steps for which unit degrees of freedom are calculated. + + Args: + only_with_data: If True, do not return steps with no data + + Returns: + list of step names + """ + if only_with_data: + return [s for s in self._steps if s in self._steps_dof] + return list(self._steps) + + def report(self) -> Report: + """Machine-readable report of degrees of freedom. + + Returns: + Report object + """ + return self.Report(steps=self.get_dof(), model=self.get_dof_model()) + + @staticmethod + def _get_dof(block, fix_inlets: bool = True): + if fix_inlets: + inlets = [ + c + for c in block.component_objects(descend_into=False) + if isinstance(c, ScalarPort) + and (c.name.endswith("inlet") or c.name.endswith("recycle")) + ] + free_me = [] + for inlet in inlets: + if not inlet.is_fixed(): + inlet.fix() + free_me.append(inlet) + + dof = degrees_of_freedom(block) + + if fix_inlets: + for inlet in free_me: + inlet.free() + + return dof + + +class CaptureSolverOutput(Action): + """Capture the solver output.""" + + def __init__(self, runner, **kwargs): + super().__init__(runner, **kwargs) + self._logs = {} + self._solver_out = None + + def before_step(self, step_name: str): + """Action performed before the step.""" + if self._is_solve_step(step_name): + self._solver_out = StringIO() + self._save_stdout, sys.stdout = sys.stdout, self._solver_out + + def after_step(self, step_name: str): + """Action performed after the step.""" + if self._solver_out is not None: + self._logs[step_name] = self._solver_out.getvalue() + self._solver_out = None + sys.stdout = self._save_stdout + + def _is_solve_step(self, name: str): + return name.startswith("solve") + + def report(self) -> dict: + """Machine-readable report with solver output. + + Returns: + Report dict, {'solver_logs': ""} + """ + return {"solver_logs": self._logs} + + +class ModelVariables(Action): + """Extract and format model variables.""" + + VAR_TYPE, PARAM_TYPE = "V", "P" + + class Report(BaseModel): + """Report for ModelVariables.""" + + #: Tree of variables + variables: dict = Field(default={}) + + def __init__(self, runner, **kwargs): + assert isinstance(runner, FlowsheetRunner) # makes no sense otherwise + super().__init__(runner, **kwargs) + + def after_run(self): + """Actions performed after the run.""" + self._extract_vars(self._runner.model) + + def _extract_vars(self, m): + var_tree = {} + + for c in m.component_objects(): + # get component type + if self._is_var(c): + subtype = self.VAR_TYPE + elif self._is_param(c): + subtype = self.PARAM_TYPE + else: + continue # ignore other components + # start new block + b = [subtype] + # add its variables + items = [] + indexed = False + # add each value from an indexed var/param, + # this also works ok for non-indexed ones + for index in c: + v = c[index] + indexed = index is not None + if subtype == self.VAR_TYPE: + # index, value, is-fixed, is-stale, lower-bound, upper-bound + item = (index, pyo.value(v), v.fixed, v.stale, v.lb, v.ub) + else: + # index, value + item = (index, pyo.value(v)) + items.append(item) + b.append(indexed) + b.append(items) + # add block to tree + self._add_block(var_tree, c.name, b) + + self._vars = var_tree + + @staticmethod + def _is_var(c): + return c.is_variable_type() or isinstance(c, IndexedVar) + + @staticmethod + def _is_param(c): + return c.is_parameter_type() or isinstance(c, IndexedParam) + + @staticmethod + def _add_block(tree: dict, name: str, block): + # get parts of the name + # - mostly logic to handle 'foo.bar[0.0].baz' crap + p = name.split(".") + parts, i, n = [], 0, len(p) + while i < n: + cur = p[i] + # since split('.') creates ('foo[0.', '0]') from 'foo[0.0]', + # we need to rejoin them + if i < n - 1 and re.match(r".*\[\d+$", cur): + next_ = p[i + 1] + parts.append(cur + "." + next_) + i += 2 + else: + parts.append(cur) + i += 1 + # insert in tree + t, prev = tree, None + for p in parts: + prev = t + if p not in t: + t[p] = {} + t = t[p] + prev[p] = block + + def report(self) -> Report: + """Report containing model variable values.""" + return self.Report(variables=self._vars) diff --git a/idaes/core/util/structfs/runner_cli.py b/idaes/core/util/structfs/runner_cli.py new file mode 100644 index 0000000000..fe60e96449 --- /dev/null +++ b/idaes/core/util/structfs/runner_cli.py @@ -0,0 +1,174 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2026 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +Command-line interface to run a module. +""" +import os +import argparse +import importlib +import importlib.util +import json +import logging +import traceback +from io import FileIO +import sys + +from idaes.core.util.structfs.runner import Runner + +_log = logging.getLogger("idaes.core.util.structfs.runner_cli") + + +def _error(ofile: FileIO, msg: str, code: int = -1) -> int: + stack_trace = traceback.format_exc() + d = {"status": code, "error": msg, "error_detail": stack_trace} + json.dump(d, ofile) + _log.error(msg) + return code + + +def main(): + """Program entry point.""" + # Set up and parse commandline arguments + p = argparse.ArgumentParser() + p.add_argument("module", help="Python module name or path to .py file") + p.add_argument("output", help="Output file for result JSON") + p.add_argument("--object", help="Object in module (default=FS)", default="FS") + p.add_argument( + "--to", help="Step name to run to (default=all)", default=Runner.STEP_ANY + ) + p.add_argument( + "--info", + action="store_true", + help="Instead of running module, get information like list of steps", + default=False, + ) + args = p.parse_args() + + # open output file + try: + ofile = open(args.output, mode="w") + except IOError as err: + return _error(sys.stderr, f"Cannot open output file '{args.output}': {err}", -1) + + # find and import module + module_name = args.module + try: + mod = _load_module(module_name) + except (ModuleNotFoundError, FileNotFoundError) as e: + return _error(ofile, f"Could not find module '{module_name}': {e}", 2) + + # find flowsheet object in module + obj_name = args.object + try: + obj = getattr(mod, obj_name) + if not isinstance(obj, Runner): + return _error( + ofile, + f"Object must be an instance of the Runner class, got '{obj.__class__.__name__}'", + 3, + ) + except AttributeError: + return _error( + ofile, f"Could not find object '{obj_name}' in module '{module_name}'", 4 + ) + + # branch based on run mode + if args.info: + # get static flowsheet information only + report = {"steps": obj.list_steps(), "class_name": obj.__class__.__name__} + else: + # run the flowsheet and collect results + to_step = args.to + try: + obj.run_steps(first=Runner.STEP_ANY, last=to_step) + except Exception as e: # pylint: disable=broad-exception-caught + return _error(ofile, f"While running steps: {e}", 5) + + report = obj.report() + report["status"] = 0 + + # dump results to output file + json.dump(report, ofile) + + return 0 + + +def _load_module(module_or_path: str): + """ + Load a module - supports both module names and file paths. + + Args: + module_or_path: Can be either: + - Module name: "idaes.models.flash_flowsheet" + - File path: "/Users/user/Downloads/my_flowsheet.py" + Returns: + module: The loaded Python module object. + + Note: + For file paths, this function sets up a pseudo-package structure to + support relative imports (e.g., 'from ..sibling import something'). + """ + # Check if input is a file path + if module_or_path.endswith(".py") or os.path.isfile(module_or_path): + # This is a file path + file_path = os.path.abspath(module_or_path) + + if not os.path.exists(file_path): + raise FileNotFoundError(f"File not found: {file_path}") + + # Get directory structure for package simulation + dir_path = os.path.dirname(file_path) # e.g., /Users/user/workspace/subdir + parent_dir = os.path.dirname(dir_path) # e.g., /Users/user/workspace + package_name = os.path.basename(dir_path) # e.g., "subdir" + module_basename = os.path.splitext(os.path.basename(file_path))[ + 0 + ] # e.g., "test" + full_module_name = f"{package_name}.{module_basename}" # e.g., "subdir.test" + + # Add both current directory and parent directory to sys.path + # Current dir is needed for same-directory imports (import hda_ideal_VLE) + # Parent dir is needed for sibling package imports + if dir_path not in sys.path: + sys.path.insert(0, dir_path) + if parent_dir not in sys.path: + sys.path.insert(0, parent_dir) + + # Create module spec with submodule_search_locations for package support + spec = importlib.util.spec_from_file_location( + full_module_name, file_path, submodule_search_locations=[dir_path] + ) + + if spec is None or spec.loader is None: + raise ImportError(f"Cannot create module spec for {file_path}") + + # Create the module object from spec + module = importlib.util.module_from_spec(spec) + + # KEY: Set __package__ so relative imports know the package context + module.__package__ = package_name + + # Register in sys.modules so other imports can find it + sys.modules[full_module_name] = module + + # Execute the module code (this actually loads the content) + spec.loader.exec_module(module) + return module + else: + if module_or_path.startswith("."): + raise ValueError("Relative module names not allowed!!") + # This is a module name, use the original logic + return importlib.import_module(module_or_path) + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/idaes/core/util/structfs/tests/__init__.py b/idaes/core/util/structfs/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/idaes/core/util/structfs/tests/flash_flowsheet.py b/idaes/core/util/structfs/tests/flash_flowsheet.py new file mode 100644 index 0000000000..c53f1350a8 --- /dev/null +++ b/idaes/core/util/structfs/tests/flash_flowsheet.py @@ -0,0 +1,80 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2026 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +Simple Flash flowsheet for use in testing. +""" + +from pyomo.environ import ConcreteModel, SolverFactory +from idaes.core import FlowsheetBlock + +# Import idaes logger to set output levels +import idaes.logger as idaeslog +from idaes.models.properties.activity_coeff_models.BTX_activity_coeff_VLE import ( + BTXParameterBlock, +) +from idaes.models.unit_models import Flash +from idaes.core.util.structfs.fsrunner import FlowsheetRunner + + +FS = FlowsheetRunner() + +# # Flash Unit Model +# +# Author: Jaffer Ghouse +# Maintainer: Andrew Lee +# Updated: 2023-06-01 + + +@FS.step("build") +def build_model(ctx): + """Build the model.""" + m = ConcreteModel() + m.fs = FlowsheetBlock(dynamic=False) + m.fs.properties = BTXParameterBlock( + valid_phase=("Liq", "Vap"), activity_coeff_model="Ideal", state_vars="FTPz" + ) + m.fs.flash = Flash(property_package=m.fs.properties) + # assert degrees_of_freedom(m) == 7 + ctx.model = m + + +@FS.step("set_operating_conditions") +def set_operating_conditions(ctx): + """Set operating conditions.""" + m = ctx.model + m.fs.flash.inlet.flow_mol.fix(1) + m.fs.flash.inlet.temperature.fix(368) + m.fs.flash.inlet.pressure.fix(101325) + m.fs.flash.inlet.mole_frac_comp[0, "benzene"].fix(0.5) + m.fs.flash.inlet.mole_frac_comp[0, "toluene"].fix(0.5) + m.fs.flash.heat_duty.fix(0) + m.fs.flash.deltaP.fix(0) + + +@FS.step("initialize") +def init_model(ctx): + """ "Initialize the model.""" + m = ctx.model + m.fs.flash.initialize(outlvl=idaeslog.INFO) + + +@FS.step("set_solver") +def set_solver(ctx): + """Set the solver.""" + ctx.solver = SolverFactory("ipopt") + + +@FS.step("solve_initial") +def solve(ctx): + """Perform the initial model solve.""" + ctx["status"] = ctx.solver.solve(ctx.model, tee=ctx["tee"]) diff --git a/idaes/core/util/structfs/tests/test_fsrunner.py b/idaes/core/util/structfs/tests/test_fsrunner.py new file mode 100644 index 0000000000..904d29bd3b --- /dev/null +++ b/idaes/core/util/structfs/tests/test_fsrunner.py @@ -0,0 +1,186 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2026 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +import pytest +from pyomo.environ import ConcreteModel +from idaes.core import FlowsheetBlock +from ..fsrunner import FlowsheetRunner, BaseFlowsheetRunner +from .flash_flowsheet import FS as flash_fs +from idaes.core.util import structfs +from idaes.core.util.doctesting import Docstring + +# -- setup -- + +fsr = FlowsheetRunner() + + +@fsr.step("build") +def build_it(context): + print("flowsheet - build") + context.model = ConcreteModel() + print(f"@@ build_it: id(model)={id(context.model)}") + context.model.fs = FlowsheetBlock(dynamic=False) + add_units(context.model) + + +@fsr.substep("build", "add_units") +def add_units(m): + print("flowsheet - add units (substep)") + assert m.fs is not None + + +@fsr.step("add_costing") +def add_costing(context): + print("flowsheet - costing") + assert context.model is not None + + +@fsr.step("solve_optimization") +def solve_opt(context): + print("flowsheet - solve") + assert context.model is not None + context["results"] = 123 + + +# -- end setup -- + + +@pytest.mark.unit +def test_run_all(): + fsr.run_steps() + assert fsr.results == 123 + + +@pytest.mark.unit +def test_rerun(): + + fsr.run_steps() + first_model = fsr.model + print(f"@@ test: id(model)={id(fsr.model)}") + + print("-- rerun --") + + # model not changed + fsr.run_steps("solve_optimization") + assert fsr.model == first_model + + +@pytest.mark.unit +def test_rerun_reset(): + fsr.run_steps() + first_model = fsr.model + + print("-- rerun --") + + # reset forces new model + fsr.reset() + fsr.run_steps("solve_optimization") + assert fsr.model != first_model + second_model = fsr.model + + +@pytest.mark.unit +def test_rerun_frombuild(): + fsr.run_steps() + first_model = fsr.model + + print("-- rerun --") + + # running from build also creates new model + fsr.run_steps("build", "add_costing") + assert fsr.model != first_model + + +@pytest.mark.unit +def test_annotation(): + runner = flash_fs + runner.run_steps("build") + print(runner.timings.history) + + ann = runner.annotate_var # alias + flash = runner.model.fs.flash # alias + category = "flash" + kw = {"input_category": category, "output_category": category} + + ann( + flash.inlet.flow_mol, + key="fs.flash.inlet.flow_mol", + title="Inlet molar flow", + desc="Flash inlet molar flow rate", + **kw, + ).fix(1) + ann(flash.inlet.temperature, units="Centipedes", **kw).fix(368) + ann(flash.inlet.pressure, **kw).fix(101325) + ann(flash.inlet.mole_frac_comp[0, "benzene"], **kw).fix(0.5) + ann(flash.inlet.mole_frac_comp[0, "toluene"], **kw).fix(0.5) + ann(flash.heat_duty, **kw).fix(0) + ann(flash.deltaP, is_input=False, **kw).fix(0) + + ann = runner.annotated_vars + print("-" * 40) + print(ann) + print("-" * 40) + assert ann["fs.flash.inlet.flow_mol"]["title"] == "Inlet molar flow" + assert ( + ann["fs.flash.inlet.flow_mol"]["description"] == "Flash inlet molar flow rate" + ) + assert ann["fs.flash.inlet.flow_mol"]["input_category"] == category + assert ann["fs.flash.inlet.flow_mol"]["output_category"] == category + assert runner.model.fs.flash.inlet.flow_mol[0].value == 1 + assert ann["fs.flash._temperature_inlet_ref"]["units"] == "Centipedes" + assert ann["fs.flash.deltaP"]["is_input"] == False + + +##### +# Test the code blocks in the structfs/__init__.py +##### + +# pacify linters: +sfi_before_build_model = sfi_before_set_operating_conditions = sfi_before_init_model = ( + sfi_before_solve +) = lambda x: None +SolverStatus, FS = None, None + +# load the functions from the docstring +_ds1 = Docstring(structfs.__doc__) +exec(_ds1.code("before", func_prefix="sfi_before_")) +exec(_ds1.code("after", func_prefix="sfi_after_")) + + +@pytest.mark.unit +def test_sfi_before(): + m = sfi_before_build_model() + sfi_before_set_operating_conditions(m) + sfi_before_init_model(m) + result = sfi_before_solve(m) + assert result.solver.status == SolverStatus.ok + + +@pytest.mark.unit +def test_sfi_after(): + FS.run_steps() + assert FS.results.solver.status == SolverStatus.ok + + +# pacify linters +annotate_vars_example = lambda x: None +# load example function from docstring +_ds2 = Docstring(BaseFlowsheetRunner.annotate_var.__doc__) +exec(_ds2.code("annotate_vars")) + + +@pytest.mark.unit +def test_ann_docs(): + annotate_vars_example(fr := FlowsheetRunner()) + ex = fr.annotated_vars["example"] + assert ex["fullname"] == "ScalarVar" + assert ex["title"] == "Example variable" diff --git a/idaes/core/util/structfs/tests/test_logutil.py b/idaes/core/util/structfs/tests/test_logutil.py new file mode 100644 index 0000000000..17344845ef --- /dev/null +++ b/idaes/core/util/structfs/tests/test_logutil.py @@ -0,0 +1,86 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2026 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +Tests for logutil module +""" + +# standard library +import logging +import time + +# third-party +import pytest + +# package +from .. import logutil + +DEBUG = False + + +def numbytes(p): + return p.stat().st_size + + +def dump_console(p): + if DEBUG: + with open(p) as f: + print(f.read()) + + +@pytest.mark.unit +def test_quiet(tmp_path): + log = logging.getLogger("idaes.test_quiet") + ni_log = logging.getLogger("ni.test_quiet") + logfile = tmp_path / "test_quiet.log" + handler = logging.FileHandler(logfile) + log.addHandler(handler) + ni_log.addHandler(handler) + assert numbytes(logfile) == 0 + log.setLevel(logging.INFO) + ni_log.setLevel(logging.INFO) + + # messages initially logged + for i in range(2): + log.info("this is a log message 1") + time.sleep(0.1) + dump_console(logfile) + sz1 = numbytes(logfile) + assert sz1 > 0 + + logutil.quiet() + + # no more messages from idaes. logger + for i in range(2): + log.info("this is a log message 2") + time.sleep(0.1) + dump_console(logfile) + sz2 = numbytes(logfile) + assert sz2 == sz1 + + # still get messages from non-idaes. logger + for i in range(2): + ni_log.info("this is a log message 3") + time.sleep(0.1) + dump_console(logfile) + sz3 = numbytes(logfile) + assert sz3 > sz2 + + logutil.unquiet() + + # get messages from idaes. logger again + for i in range(2): + log.info("this is a log message 4") + time.sleep(0.1) + dump_console(logfile) + sz4 = numbytes(logfile) + assert sz4 > sz3 diff --git a/idaes/core/util/structfs/tests/test_runner.py b/idaes/core/util/structfs/tests/test_runner.py new file mode 100644 index 0000000000..ca5d1319fa --- /dev/null +++ b/idaes/core/util/structfs/tests/test_runner.py @@ -0,0 +1,156 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2026 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +import pytest +from ..runner import Runner, Action +from .. import runner_actions +from idaes.core.util.doctesting import Docstring + +## -- setup -- + +simple = Runner(("notrun-1", "hello", "hello.dude", "world", "notrun-2")) + + +@simple.step("hello") +def say_hello(context): + context["greeting"] = "Hello" + dude("yo") + + +@simple.substep("hello", "dude") +def dude(s): + print(f"{s}! this is called from hello, not directly by runner") + + +@simple.step("world") +def say_to_world(context): + msg = f"{context['greeting']}, World!" + print(msg) + context["greeting"] = msg + + +empty = Runner(("hi", "bye")) + +# -- end setup -- + + +@pytest.mark.unit +def test_simple_run_all(): + simple.run_steps() + assert simple._context["greeting"] == "Hello, World!" + + +@pytest.mark.unit +def test_runner_actions(): + + rn = Runner(("a step",)) + + def do_nothing(context): + print("do nothing") + + rn.add_action("nothing", do_nothing) + rn.get_action("nothing") + with pytest.raises(KeyError): + rn.get_action("something") + rn.remove_action("nothing") + with pytest.raises(KeyError): + rn.get_action("nothing") + + +@pytest.mark.unit +def test_run_steps_order(): + with pytest.raises(ValueError): + simple.run_steps("world", "hello") + + +@pytest.mark.unit +def test_run_steps_args(): + simple.run_steps(first="hello") + simple.run_steps(last="world") + + +@pytest.mark.unit +def test_run_1step(): + simple.run_step("hello") + + +@pytest.mark.unit +def test_run_empty_steps(): + empty.run_steps() + + +@pytest.mark.unit +def test_run_bad_steps(): + with pytest.raises(KeyError): + simple.run_steps("howdy", "pardner") + + with pytest.raises(KeyError): + simple.run_step("notrun-1") + + +@pytest.mark.unit +def test_runner_context(): + simple.run_steps() + assert simple["greeting"] + + +@pytest.mark.unit +def test_add_bad_step(): + with pytest.raises(KeyError): + + @simple.step("bad") + def do_bad(ctx): + return + + with pytest.raises(KeyError): + + @simple.substep("bad", "sub") + def do_bad2(ctx): + return + + # undefined step cannot have a substep + + with pytest.raises(ValueError): + + @simple.substep("notrun-1", "sub") + def do_bad3(ctx): + return + + +# load the HelloGoodbye class from the Action docstring +HelloGoodbye = None # pacify linters +exec(Docstring(runner_actions.Action.__doc__).code("runner-hellogoodbye-class")) + + +@pytest.mark.unit +def test_hellogoodbye(): + simple.add_action( + "hg", + HelloGoodbye, + hello="Greetings and salutations", + goodbye="Smell you later", + ) + simple.run_steps(first="-", last="-") + assert simple.get_action("hg").step_counter == 2 + + +class RunActionExample(Action): + def report(self) -> dict: + return {"example": True} + + +@pytest.mark.unit +def test_runaction(): + simple.reset() + simple.add_action("foo", RunActionExample) + simple.run_steps() + assert simple.get_action("foo").report() == {"example": True} diff --git a/idaes/core/util/structfs/tests/test_runner_actions.py b/idaes/core/util/structfs/tests/test_runner_actions.py new file mode 100644 index 0000000000..f7ecfa6d93 --- /dev/null +++ b/idaes/core/util/structfs/tests/test_runner_actions.py @@ -0,0 +1,192 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2026 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +import pprint +import time + +import pytest +from pytest import approx +from .. import runner +from ..runner_actions import Timer, UnitDofChecker +from . import flash_flowsheet + + +@pytest.mark.unit +def test_class_timer(): + timer = Timer(runner.Runner([])) + n, m = 2, 3 + for i in range(n): + timer.before_run() + time.sleep(0.1) + for j in range(m): + name = f"step{j}" + timer.before_step(name) + time.sleep(0.1 * (j + 1)) + timer.after_step(name) + time.sleep(0.1) + timer.after_run() + + s = timer.get_history() + # [ {'run': 0.8005404472351074, + # 'steps': { + # 'step0': 0.10010385513305664, + # 'step2': 0.3000965118408203, + # 'step1': 0.20009303092956543 + # }, + # 'inclusive': 0.6002933979034424, + # 'exclusive': 0.20024704933166504 + # }, + # ... + # ] + eps = 0.1 # big variance needed for Windows + for r in s: + print(f"Timings: {r}") + assert r["run"] == approx(0.8, abs=eps) + assert r["inclusive"] + r["exclusive"] == approx(r["run"]) + for name, t in r["steps"].items(): + assert t == approx(0.1 + 0.1 * i, abs=eps) + + +@pytest.mark.component +def test_timer_runner(): + rn = runner.Runner(["step1", "step2", "step3"]) + + @rn.step("step1") + def sleepy1(context): + time.sleep(0.1) + + @rn.step("step2") + def sleepy2(context): + time.sleep(0.1) + + @rn.step("step3") + def sleepy3(context): + time.sleep(0.1) + + rn.add_action("timer", Timer) + + rn.run_steps() + + s = rn.get_action("timer").get_history() + + eps = 0.1 # big variance needed for Windows + for r in s: + print(f"Timings: {r}") + assert r["run"] == approx(0.3, abs=eps) + assert r["inclusive"] + r["exclusive"] == approx(r["run"]) + for name, t in r["steps"].items(): + # assert name == f"step{i + 1}" + assert t == approx(0.1, abs=eps) + + +@pytest.mark.unit +def test_unit_dof_action_base(): + rn = flash_flowsheet.FS + rn.reset() + + def check_step(name, data): + print(f"check_step {name} data: {data}") + assert "fs.flash" in data + if name == "solve_initial": + assert data["fs.flash"] == 0 + + def check_run(step_dof, model_dof): + assert model_dof == 0 + + rn.add_action( + "check_dof", + UnitDofChecker, + "fs", + ["build", "solve_initial"], + check_step, + check_run, + ) + + rn.run_steps("build", "solve_initial") + + pprint.pprint(rn.get_action("check_dof").get_dof()) + + +@pytest.mark.unit +def test_unit_dof_action_getters(): + rn = flash_flowsheet.FS + rn.reset() + + aname = "check_dof" + rn.add_action( + aname, + UnitDofChecker, + "fs", + ["build", "solve_initial"], + ) + rn.run_steps() + + act = rn.get_action(aname) + + steps = act.steps() + dofs = [] + for s in steps: + step_dof = act.get_dof()[s] + assert step_dof + dofs.append(step_dof) + assert dofs[0] != dofs[1] + + assert act.steps() == act.steps(only_with_data=True) + + +@pytest.mark.unit +def test_timer_report(): + rn = flash_flowsheet.FS + rn.reset() + rn.add_action("timer", Timer) + rn.run_steps() + report = rn.get_action("timer").report() + # {'build': 0.053082942962646484, + # 'set_operating_conditions': 0.0004742145538330078, + # 'initialize': 0.22397446632385254, + # 'set_solver': 7.581710815429688e-05, + # 'solve_initial': 0.03623509407043457} + expect_steps = ( + "build", + "set_operating_conditions", + "initialize", + "set_solver", + "solve_initial", + ) + assert report + print(f"Hey! here's the report: {report}") + for step_name in expect_steps: + assert step_name in report.timings + assert report.timings[step_name] < 1 + + +@pytest.mark.unit +def test_dof_report(): + rn = flash_flowsheet.FS + rn.reset() + check_steps = ( + "build", + "set_operating_conditions", + "initialize", + "solve_initial", + ) + rn.add_action("dof", UnitDofChecker, "fs", check_steps) + rn.run_steps() + report = rn.get_action("dof").report() + print(f"@@ REPORT:\n{report}") + assert report + report_data = report.model_dump() + assert report_data["model"] == 0 # model has DOF=0 + for step_name in check_steps: + assert step_name in report_data["steps"] + for unit, value in report_data["steps"][step_name].items(): + assert value >= 0 # DOF > 0 in all (step, unit) diff --git a/pyproject.toml b/pyproject.toml index ab13b5514b..d496914580 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -117,6 +117,7 @@ include = ["idaes*"] [project.scripts] idaes = "idaes.commands.base:command_base" +idaes-run = "idaes.commands.run_flowsheet:main" [project.entry-points."idaes.flowsheets"] "0D_Fixed_Bed_TSA" = "idaes.models_extra.temperature_swing_adsorption.fixed_bed_tsa0d_ui" diff --git a/requirements-dev.txt b/requirements-dev.txt index 182773d080..4d14f31772 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -9,6 +9,9 @@ sphinxcontrib-napoleon>=0.5.0 sphinx-argparse==0.4.0 sphinx-book-theme<=1.1.2,>=1.0.0 sphinx-copybutton==0.5.2 +nbsphinx # for Jupyter Notebooks in the docs +myst-parser # for MystMD +sphinx-autodoc2>=0.5.0 # for autodoc2 ### testing and linting # TODO/NOTE pytest is specified as a dependency in setup.py, but we might want to pin a specific version here