From 33bf7e387dabc4c6d56325be33ab5caa2f5cf272 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Tue, 26 Aug 2025 18:18:07 +0000 Subject: [PATCH 01/31] Initial plan From 6787ff91bd7d200f2502721e2c2db219c971d1c2 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Tue, 26 Aug 2025 18:33:20 +0000 Subject: [PATCH 02/31] Add comprehensive docstrings for all plotting functions Co-authored-by: moyner <454871+moyner@users.noreply.github.com> --- docs/src/mesh.md | 37 ++++++- ext/JutulGraphMakieExt/JutulGraphMakieExt.jl | 46 ++++++++ ext/JutulMakieExt/performance.jl | 106 +++++++++++++++++++ src/ext/graphmakie_ext.jl | 16 +++ src/ext/makie_ext.jl | 63 +++++++++++ 5 files changed, 266 insertions(+), 2 deletions(-) diff --git a/docs/src/mesh.md b/docs/src/mesh.md index c9f933f72..21fc31877 100644 --- a/docs/src/mesh.md +++ b/docs/src/mesh.md @@ -16,7 +16,9 @@ MRSTWrapMesh Plotting requires that a Makie backend is loaded (typically GLMakie or CairoMakie). The documentation uses `CairoMakie` to work on machines without OpenGL enabled, but if you want fast and interactive plots, `GLMakie` should be preferred. -### Non-mutating +### Mesh visualization + +#### Non-mutating ```@docs plot_mesh @@ -25,7 +27,7 @@ plot_mesh_edges plot_interactive ``` -### Mutating +#### Mutating ```@docs plot_mesh! @@ -33,6 +35,37 @@ plot_cell_data! plot_mesh_edges! ``` +### Multi-model plotting + +```@docs +plot_multimodel_interactive +``` + +### Performance analysis + +```@docs +plot_solve_breakdown +plot_cumulative_solve +plot_cumulative_solve! +plot_linear_convergence +plot_linear_convergence! +``` + +### Graph visualization + +These functions require the GraphMakie.jl package to be loaded. + +```@docs +plot_variable_graph +plot_model_graph +``` + +### Plotting utilities + +```@docs +check_plotting_availability +``` + ## Example: Cartesian meshes For example, we can make a small 2D mesh with given physical dimensions and convert it: diff --git a/ext/JutulGraphMakieExt/JutulGraphMakieExt.jl b/ext/JutulGraphMakieExt/JutulGraphMakieExt.jl index c9602e3be..40b37884b 100644 --- a/ext/JutulGraphMakieExt/JutulGraphMakieExt.jl +++ b/ext/JutulGraphMakieExt/JutulGraphMakieExt.jl @@ -3,6 +3,27 @@ module JutulGraphMakieExt using Jutul, Makie using GraphMakie, Graphs, LayeredLayouts, NetworkLayout + """ + plot_variable_graph(model) + + Plot a graph visualization of variable dependencies in a simulation model. + + # Arguments + - `model`: A Jutul simulation model + + # Returns + - `fig`: Figure object containing the variable dependency graph + + This function creates a directed graph showing the relationships between primary + variables, secondary variables, and parameters in the simulation model. Different + node types are color-coded: + - Primary variables: Blue + - Secondary variables: Orange + - Parameters: Green + + The graph layout uses a layered approach to clearly show the dependency structure + and data flow within the model. + """ function Jutul.plot_variable_graph(model) graph, nodes, = build_variable_graph(model, to_graph = true) c1 = Makie.ColorSchemes.tab10[1] @@ -76,6 +97,31 @@ module JutulGraphMakieExt ) end + """ + plot_model_graph(model; kwarg...) + + Plot a graph visualization of model structure and equation relationships. + + # Arguments + - `model`: A Jutul simulation model (can be `MultiModel` or single model) + + # Keyword Arguments + - Additional keyword arguments are passed to the plotting functions + + # Returns + - Plot object containing the model structure graph + + For single models, this is equivalent to `plot_variable_graph`. For `MultiModel` + instances, this creates a more complex graph showing: + - Individual model components (colored by model) + - Equations within each model + - Cross-term relationships between models + - Edge labels indicating cross-term types + + The visualization helps understand the coupling structure in multi-physics + simulations and can be useful for debugging model setup and analyzing + computational dependencies. + """ function Jutul.plot_model_graph(model; kwarg...) Jutul.plot_variable_graph(model; kwarg...) end diff --git a/ext/JutulMakieExt/performance.jl b/ext/JutulMakieExt/performance.jl index 5f851f2a6..ce765644b 100644 --- a/ext/JutulMakieExt/performance.jl +++ b/ext/JutulMakieExt/performance.jl @@ -1,3 +1,23 @@ +""" + plot_solve_breakdown(allreports, names; per_it = false, include_local_solves = nothing, t_scale = ("s", 1.0)) + +Plot a breakdown of solver performance timing for multiple simulation reports. + +# Arguments +- `allreports`: Vector of simulation reports to analyze +- `names`: Vector of names for each report (used for labeling) + +# Keyword Arguments +- `per_it = false`: If `true`, show time per iteration instead of total time +- `include_local_solves = nothing`: Whether to include local solve timing (auto-detected if `nothing`) +- `t_scale = ("s", 1.0)`: Time scale unit and conversion factor for display + +# Returns +- `(fig, D)`: Figure object and processed timing data + +This function creates a bar chart showing the breakdown of computational time spent +in different parts of the simulation: assembly, local solves, linear solve, and total time. +""" function Jutul.plot_solve_breakdown(allreports, names; per_it = false, include_local_solves = nothing, t_scale = ("s", 1.0)) t_unit, t_num = t_scale if per_it @@ -47,6 +67,30 @@ function Jutul.plot_solve_breakdown(allreports, names; per_it = false, include_l return (fig, D) end +""" + plot_cumulative_solve(allreports, args...; kwarg...) + +Plot cumulative solver performance over time or steps for multiple simulation reports. + +# Arguments +- `allreports`: Vector of simulation reports to analyze +- `args...`: Additional arguments passed to `plot_cumulative_solve!` + +# Keyword Arguments +- `use_time = false`: Plot wall time instead of iterations +- `cumulative = true`: Show cumulative values vs individual step values +- `x_is_time = true`: Use time on x-axis instead of step numbers +- `legend = true`: Show legend for multiple datasets +- `scatter_points = true`: Add scatter points to the plot +- `linewidth = 3.5`: Line width for the plot +- Additional keyword arguments are passed to the plotting functions + +# Returns +- `(fig, ax, alldata, t)`: Figure, axis, processed data, and time vectors + +Creates a new figure and plots cumulative solver performance metrics, useful for +comparing simulation efficiency across different configurations or time periods. +""" function Jutul.plot_cumulative_solve(allreports, arg...; kwarg...) fig = Figure() ax, alldata, t = Jutul.plot_cumulative_solve!(fig[1, 1], allreports, arg...; kwarg...) @@ -54,6 +98,37 @@ function Jutul.plot_cumulative_solve(allreports, arg...; kwarg...) return (fig, ax, alldata, t) end +""" + plot_cumulative_solve!(f, allreports, dt = nothing, names = nothing; kwarg...) + +Mutating version of `plot_cumulative_solve` that plots into an existing figure layout. + +# Arguments +- `f`: Figure or layout position to plot into +- `allreports`: Vector of simulation reports to analyze +- `dt = nothing`: Time step sizes (auto-detected if `nothing`) +- `names = nothing`: Names for each dataset (auto-generated if `nothing`) + +# Keyword Arguments +- `use_time = false`: Plot wall time instead of iterations +- `use_title = true`: Show plot title +- `linewidth = 3.5`: Line width for the plot +- `cumulative = true`: Show cumulative values vs individual step values +- `linestyles = missing`: Custom line styles for each dataset +- `colormap = :tab20`: Colormap for multiple datasets +- `t_scale = ("s", 1.0)`: Time scale unit and conversion factor +- `x_is_time = true`: Use time on x-axis instead of step numbers +- `legend = true`: Show legend for multiple datasets +- `ministeps = false`: Include ministep information +- `title = nothing`: Custom plot title +- `scatter_points = true`: Add scatter points to the plot + +# Returns +- `(ax, alldata, t)`: Axis object, processed data, and time vectors + +This function provides fine-grained control over cumulative performance plotting +by allowing integration into custom figure layouts. +""" function Jutul.plot_cumulative_solve!(f, allreports, dt = nothing, names = nothing; use_time = false, use_title = true, @@ -156,6 +231,24 @@ function Jutul.plot_cumulative_solve!(f, allreports, dt = nothing, names = nothi end +""" + plot_linear_convergence(report; kwarg...) + +Plot the convergence history of linear solver iterations from a simulation report. + +# Arguments +- `report`: Simulation report containing linear solver information + +# Keyword Arguments +- Additional keyword arguments are passed to the `Axis` constructor + +# Returns +- `fig`: Figure object containing the convergence plot + +Creates a logarithmic plot showing how the linear solver residual decreases over +iterations. This is useful for analyzing linear solver performance and convergence +behavior during simulation. +""" function Jutul.plot_linear_convergence(report; kwarg...) fig = Figure() ax = Axis(fig[1, 1], yscale = log10; ylabel = "Residual", xlabel = "Linear iterations", kwarg...) @@ -163,6 +256,19 @@ function Jutul.plot_linear_convergence(report; kwarg...) return fig end +""" + plot_linear_convergence!(ax, report) + +Mutating version of `plot_linear_convergence` that plots into an existing axis. + +# Arguments +- `ax`: Makie Axis object to plot into +- `report`: Simulation report or vector of reports containing linear solver information + +This function extracts linear solver residual information from simulation reports +and plots the convergence history. It can handle individual reports, nested report +structures with ministeps, or vectors of multiple reports. +""" function Jutul.plot_linear_convergence!(ax, report::AbstractDict) if haskey(report, :ministeps) plot_linear_convergence!(ax, report[:ministeps]) diff --git a/src/ext/graphmakie_ext.jl b/src/ext/graphmakie_ext.jl index 31b1a8b9c..47069539b 100644 --- a/src/ext/graphmakie_ext.jl +++ b/src/ext/graphmakie_ext.jl @@ -1,9 +1,25 @@ export plot_variable_graph, plot_model_graph +""" + plot_variable_graph(model) + +Plot a graph visualization of variable dependencies in a simulation model. +This function is implemented when GraphMakie is loaded. + +See the GraphMakie extension documentation for detailed arguments and usage. +""" function plot_variable_graph end +""" + plot_model_graph(model; kwarg...) + +Plot a graph visualization of model structure and equation relationships. +This function is implemented when GraphMakie is loaded. + +See the GraphMakie extension documentation for detailed arguments and usage. +""" function plot_model_graph end diff --git a/src/ext/makie_ext.jl b/src/ext/makie_ext.jl index 7f17bcb9f..dd7750b5f 100644 --- a/src/ext/makie_ext.jl +++ b/src/ext/makie_ext.jl @@ -21,6 +21,29 @@ function plot_interactive_impl end +""" + plot_multimodel_interactive(model, states, model_keys = keys(model.models); plot_type = :mesh, shift = Dict(), kwarg...) + +Launch an interactive plot for multi-model simulations with multiple coupled domains. + +# Arguments +- `model`: `MultiModel` instance containing multiple coupled simulation models +- `states`: Vector of simulation states or single state +- `model_keys = keys(model.models)`: Which models to include in the plot + +# Keyword Arguments +- `plot_type = :mesh`: Type of plot (`:mesh`, `:meshscatter`, `:lines`) +- `shift = Dict()`: Dictionary of spatial shifts to apply to each model for visualization +- Additional keyword arguments are passed to `plot_interactive` + +This function creates an interactive visualization for multi-physics simulations +where multiple models are coupled together. Each model domain can be spatially +shifted for better visualization, and the resulting plot allows interactive +exploration of the combined multi-model states over time. + +The function automatically handles data mapping between different model domains +and creates a unified visualization interface. +""" function plot_multimodel_interactive(arg...; kwarg...) check_plotting_availability(interactive = true) plot_multimodel_interactive_impl(arg...; kwarg...) @@ -175,22 +198,62 @@ function check_plotting_availability_impl end +""" + plot_solve_breakdown(allreports, names; kwarg...) + +Plot a breakdown of solver performance timing for multiple simulation reports. +This function is implemented when a Makie backend is loaded. + +See the Makie extension documentation for detailed arguments and usage. +""" function plot_solve_breakdown end +""" + plot_cumulative_solve(allreports, args...; kwarg...) + +Plot cumulative solver performance over time or steps for multiple simulation reports. +This function is implemented when a Makie backend is loaded. + +See the Makie extension documentation for detailed arguments and usage. +""" function plot_cumulative_solve end +""" + plot_cumulative_solve!(f, allreports, args...; kwarg...) + +Mutating version of `plot_cumulative_solve` that plots into an existing figure layout. +This function is implemented when a Makie backend is loaded. + +See the Makie extension documentation for detailed arguments and usage. +""" function plot_cumulative_solve! end +""" + plot_linear_convergence(report; kwarg...) + +Plot the convergence history of linear solver iterations from a simulation report. +This function is implemented when a Makie backend is loaded. + +See the Makie extension documentation for detailed arguments and usage. +""" function plot_linear_convergence end +""" + plot_linear_convergence!(ax, report) + +Mutating version of `plot_linear_convergence` that plots into an existing axis. +This function is implemented when a Makie backend is loaded. + +See the Makie extension documentation for detailed arguments and usage. +""" function plot_linear_convergence! end \ No newline at end of file From 49df6eefe286dd2edd49304755484c7e81d4631c Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Tue, 26 Aug 2025 18:37:35 +0000 Subject: [PATCH 03/31] Create dedicated plotting documentation and update build structure Co-authored-by: moyner <454871+moyner@users.noreply.github.com> --- docs/make.jl | 1 + docs/src/mesh.md | 35 +----------- docs/src/plotting.md | 131 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 134 insertions(+), 33 deletions(-) create mode 100644 docs/src/plotting.md diff --git a/docs/make.jl b/docs/make.jl index 59715845d..9d1116f18 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -20,6 +20,7 @@ makedocs(; "Usage" => "usage.md", "optimization.md", "mesh.md", + "Plotting" => "plotting.md", "Internals" => "internals.md", "Docstrings" => "docstrings.md" ], diff --git a/docs/src/mesh.md b/docs/src/mesh.md index 21fc31877..4a6a13f93 100644 --- a/docs/src/mesh.md +++ b/docs/src/mesh.md @@ -14,9 +14,9 @@ MRSTWrapMesh ## Plotting functions -Plotting requires that a Makie backend is loaded (typically GLMakie or CairoMakie). The documentation uses `CairoMakie` to work on machines without OpenGL enabled, but if you want fast and interactive plots, `GLMakie` should be preferred. +Plotting requires that a Makie backend is loaded (typically GLMakie or CairoMakie). For comprehensive plotting documentation and examples, see the [Plotting](plotting.md) section. -### Mesh visualization +### Basic mesh plotting #### Non-mutating @@ -35,37 +35,6 @@ plot_cell_data! plot_mesh_edges! ``` -### Multi-model plotting - -```@docs -plot_multimodel_interactive -``` - -### Performance analysis - -```@docs -plot_solve_breakdown -plot_cumulative_solve -plot_cumulative_solve! -plot_linear_convergence -plot_linear_convergence! -``` - -### Graph visualization - -These functions require the GraphMakie.jl package to be loaded. - -```@docs -plot_variable_graph -plot_model_graph -``` - -### Plotting utilities - -```@docs -check_plotting_availability -``` - ## Example: Cartesian meshes For example, we can make a small 2D mesh with given physical dimensions and convert it: diff --git a/docs/src/plotting.md b/docs/src/plotting.md new file mode 100644 index 000000000..bbc491243 --- /dev/null +++ b/docs/src/plotting.md @@ -0,0 +1,131 @@ +# Plotting and Visualization + +Jutul.jl provides comprehensive plotting capabilities for mesh visualization, performance analysis, and model structure visualization. All plotting functionality requires a Makie backend to be loaded. + +## Requirements + +Plotting requires that a Makie backend is loaded (typically GLMakie or CairoMakie). The documentation uses `CairoMakie` to work on machines without OpenGL enabled, but if you want fast and interactive plots, `GLMakie` should be preferred. + +```julia +using GLMakie # For interactive plots +# or +using CairoMakie # For static plots/headless systems +``` + +## Mesh Visualization + +### Basic mesh plotting + +```@docs +plot_mesh +plot_mesh! +plot_cell_data +plot_cell_data! +plot_mesh_edges +plot_mesh_edges! +``` + +### Interactive plotting + +```@docs +plot_interactive +plot_multimodel_interactive +``` + +## Performance Analysis + +These functions help analyze simulation performance and solver behavior. + +```@docs +plot_solve_breakdown +plot_cumulative_solve +plot_cumulative_solve! +plot_linear_convergence +plot_linear_convergence! +``` + +## Model Structure Visualization + +These functions require the GraphMakie.jl package to be loaded in addition to a Makie backend. + +```julia +using GraphMakie +``` + +```@docs +plot_variable_graph +plot_model_graph +``` + +## Utilities + +```@docs +check_plotting_availability +``` + +## Examples + +### Basic mesh plotting + +```julia +using Jutul, CairoMakie + +# Create a simple mesh +nx, ny = 10, 5 +mesh = CartesianMesh((nx, ny), (100.0, 50.0)) + +# Plot the mesh structure +fig, ax, p = plot_mesh(mesh) + +# Plot cell data with values +cell_values = 1:number_of_cells(mesh) +fig, ax, p = plot_cell_data(mesh, cell_values) + +# Add mesh edges +plot_mesh_edges!(ax, mesh) +fig +``` + +### Performance analysis + +```julia +using Jutul, CairoMakie + +# After running simulations and collecting reports +reports = [report1, report2, report3] +names = ["Configuration A", "Configuration B", "Configuration C"] + +# Plot solver breakdown +fig, data = plot_solve_breakdown(reports, names) + +# Plot cumulative solve time +fig, ax, alldata, t = plot_cumulative_solve(reports, names = names) + +# Plot linear convergence for a single report +fig = plot_linear_convergence(reports[1]) +``` + +### Interactive visualization + +```julia +using Jutul, GLMakie + +# Create mesh and simulation states +mesh = CartesianMesh((10, 10, 5), (100.0, 100.0, 50.0)) +states = [state1, state2, state3, ...] # From simulation + +# Launch interactive plot +plot_interactive(mesh, states) +``` + +### Model structure visualization + +```julia +using Jutul, GraphMakie, GLMakie + +# Plot variable dependencies +fig = plot_variable_graph(model) + +# For multi-models, plot the full structure +fig = plot_model_graph(multimodel) +``` \ No newline at end of file From b5f99e4eab6bd2996b5942113e5ab0f9f2304c54 Mon Sep 17 00:00:00 2001 From: Alexandre Jiao Date: Wed, 27 Aug 2025 13:15:10 +0200 Subject: [PATCH 04/31] calibration --- src/LBFGS/constrained_optimizer.jl | 127 +++++++++++++++++++++++++++++ 1 file changed, 127 insertions(+) diff --git a/src/LBFGS/constrained_optimizer.jl b/src/LBFGS/constrained_optimizer.jl index 8a914d366..7a38e1e15 100644 --- a/src/LBFGS/constrained_optimizer.jl +++ b/src/LBFGS/constrained_optimizer.jl @@ -244,6 +244,133 @@ function box_bfgs(u0, f, bounds; kwarg...) return box_bfgs(u0, f, bounds...; kwarg...) end +function log_box_bfgs(x0, f, lb, ub; kwargs...) + n = length(x0) + length(lb) == n || throw(ArgumentError("Length of lower bound ($(length(lb))) must match length of initial guess ($n)")) + length(ub) == n || throw(ArgumentError("Length of upper bound ($(length(ub))) must match length of initial guess ($n)")) + + # Check bounds and initial guess + for i in eachindex(x0, lb, ub) + if x0[i] ≤ 0 || lb[i] ≤ 0 + throw(ArgumentError("Log scaling requires positive values: x0[$i] = $(x0[i]), lb[$i] = $(lb[i]) must be > 0")) + end + if x0[i] < lb[i] || x0[i] > ub[i] + throw(ArgumentError("Initial guess x0[$i] = $(x0[i]) is outside bounds [$(lb[i]), $(ub[i])]")) + end + if lb[i] >= ub[i] + throw(ArgumentError("Lower bound must be less than upper bound for index $i: lb[$i] = $(lb[i]), ub[$i] = $(ub[i])")) + end + end + + # Log-transform bounds + log_lb = log.(lb) + log_ub = log.(ub) + δ_log = log_ub .- log_lb + + # Transformation functions + function x_to_u(x) + log_x = log.(x) + u = (log_x .- log_lb) ./ δ_log + return u + end + + function u_to_x(u) + log_x = u .* δ_log .+ log_lb + x = exp.(log_x) + return x + end + + function dx_to_du!(g, x) + g .= g .* x .* δ_log # Chain rule: df/du = df/dx * dx/du + end + + # Wrapped objective function + function F(u) + x = u_to_x(u) + obj, g = f(x) + dx_to_du!(g, x) + return (obj, g) + end + + # Initial guess in transformed space + u0 = x_to_u(x0) + + # Optimize in unit box + v, u, history = unit_box_bfgs(u0, F; kwargs...) + + # Transform back to original space + x = u_to_x(u) + return (v, x, history) +end + +function log_box_bfgs(u0, f, bounds; kwargs...) + # Unpack bounds + lb, ub = bounds + return log_box_bfgs(u0, f, lb, ub; kwargs...) +end + + + +function box_bfgs_log_scale(x0, f, lb, ub; kwarg...) + @info "log scale box BFGS optimization started" + n = length(x0) + length(lb) == n || throw(ArgumentError("Length of lower bound ($(length(lb))) must match length of initial guess ($n)")) + length(ub) == n || throw(ArgumentError("Length of upper bound ($(length(ub))) must match length of initial guess ($n)")) + # Check bounds + for i in eachindex(x0, lb, ub) + if x0[i] < lb[i] || x0[i] > ub[i] + throw(ArgumentError("Initial guess x0[$i] = $(x0[i]) is outside bounds [$(lb[i]), $(ub[i])]")) + end + if !isfinite(lb[i]) || !isfinite(ub[i]) + throw(ArgumentError("Bounds must be finite, got lb[$i] = $(lb[i]), ub[$i] = $(ub[i])")) + end + if lb[i] >= ub[i] + throw(ArgumentError("Lower bound must be less than upper bound for index $i: lb[$i] = $(lb[i]), ub[$i] = $(ub[i])")) + end + end + δ = ub .- lb + base = ub ./ lb + # Ensure scaling_base is positive and finite + for i in 1:n + if base[i] <= 0 || !isfinite(base[i]) + throw(ArgumentError("Scaling base must be positive and finite, got scaling_base[$i] = $(scaling_base[i])")) + end + end + + function dx_to_du!(g,x) + for i in 1:n + g[i] = g[i] ./ (log(base[i]) * x[i]) + end + end + + function x_to_u(x) + # Convert x to u in the unit box [0, 1]^n + u = log.(x ./ lb) ./ log.(base) + return u + end + + function u_to_x(u) + x = lb .* exp.(u .* log.(base)) + return x + end + + function F(u) + x = u_to_x(u) + obj, g = f(x) + dx_to_du!(g,x) + return (obj, g) + end + + u0 = x_to_u(x0) + v, u, history = unit_box_bfgs(u0, F; kwarg...) + x = u_to_x(u) + return (v, x, history) +end + +function box_bfgs_log_scale(u0, f, bounds; kwarg...) + return box_bfgs_log_scale(u0, f, bounds...; kwarg...) +end + function get_search_direction(u0, g0, Hi, HiPrev, c) # Find search-direction which is (sum of) the projection(s) of Hi*g0 # restricted to directions with non-active constraints. From 37f76658470b5fa93a74bf1b2d1783f176c900cd Mon Sep 17 00:00:00 2001 From: Xavier Raynaud Date: Mon, 1 Sep 2025 16:40:32 +0200 Subject: [PATCH 05/31] support for log scaling in BFGS --- src/LBFGS/constrained_optimizer.jl | 70 ++++-------------------------- 1 file changed, 9 insertions(+), 61 deletions(-) diff --git a/src/LBFGS/constrained_optimizer.jl b/src/LBFGS/constrained_optimizer.jl index 7a38e1e15..cf56a1b1b 100644 --- a/src/LBFGS/constrained_optimizer.jl +++ b/src/LBFGS/constrained_optimizer.jl @@ -241,11 +241,15 @@ function box_bfgs(x0, f, lb, ub; kwarg...) end function box_bfgs(u0, f, bounds; kwarg...) + return box_bfgs(u0, f, bounds...; kwarg...) + end function log_box_bfgs(x0, f, lb, ub; kwargs...) + n = length(x0) + length(lb) == n || throw(ArgumentError("Length of lower bound ($(length(lb))) must match length of initial guess ($n)")) length(ub) == n || throw(ArgumentError("Length of upper bound ($(length(ub))) must match length of initial guess ($n)")) @@ -265,6 +269,7 @@ function log_box_bfgs(x0, f, lb, ub; kwargs...) # Log-transform bounds log_lb = log.(lb) log_ub = log.(ub) + δ_log = log_ub .- log_lb # Transformation functions @@ -300,77 +305,20 @@ function log_box_bfgs(x0, f, lb, ub; kwargs...) # Transform back to original space x = u_to_x(u) + return (v, x, history) + end function log_box_bfgs(u0, f, bounds; kwargs...) # Unpack bounds + lb, ub = bounds return log_box_bfgs(u0, f, lb, ub; kwargs...) + end - -function box_bfgs_log_scale(x0, f, lb, ub; kwarg...) - @info "log scale box BFGS optimization started" - n = length(x0) - length(lb) == n || throw(ArgumentError("Length of lower bound ($(length(lb))) must match length of initial guess ($n)")) - length(ub) == n || throw(ArgumentError("Length of upper bound ($(length(ub))) must match length of initial guess ($n)")) - # Check bounds - for i in eachindex(x0, lb, ub) - if x0[i] < lb[i] || x0[i] > ub[i] - throw(ArgumentError("Initial guess x0[$i] = $(x0[i]) is outside bounds [$(lb[i]), $(ub[i])]")) - end - if !isfinite(lb[i]) || !isfinite(ub[i]) - throw(ArgumentError("Bounds must be finite, got lb[$i] = $(lb[i]), ub[$i] = $(ub[i])")) - end - if lb[i] >= ub[i] - throw(ArgumentError("Lower bound must be less than upper bound for index $i: lb[$i] = $(lb[i]), ub[$i] = $(ub[i])")) - end - end - δ = ub .- lb - base = ub ./ lb - # Ensure scaling_base is positive and finite - for i in 1:n - if base[i] <= 0 || !isfinite(base[i]) - throw(ArgumentError("Scaling base must be positive and finite, got scaling_base[$i] = $(scaling_base[i])")) - end - end - - function dx_to_du!(g,x) - for i in 1:n - g[i] = g[i] ./ (log(base[i]) * x[i]) - end - end - - function x_to_u(x) - # Convert x to u in the unit box [0, 1]^n - u = log.(x ./ lb) ./ log.(base) - return u - end - - function u_to_x(u) - x = lb .* exp.(u .* log.(base)) - return x - end - - function F(u) - x = u_to_x(u) - obj, g = f(x) - dx_to_du!(g,x) - return (obj, g) - end - - u0 = x_to_u(x0) - v, u, history = unit_box_bfgs(u0, F; kwarg...) - x = u_to_x(u) - return (v, x, history) -end - -function box_bfgs_log_scale(u0, f, bounds; kwarg...) - return box_bfgs_log_scale(u0, f, bounds...; kwarg...) -end - function get_search_direction(u0, g0, Hi, HiPrev, c) # Find search-direction which is (sum of) the projection(s) of Hi*g0 # restricted to directions with non-active constraints. From ad9f30b6016e15564b5daefae5466470550ccdba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Brostr=C3=B8m?= Date: Tue, 26 Aug 2025 14:18:00 +0200 Subject: [PATCH 06/31] Change adjoint step info to use time at new timestep --- src/ad/gradients.jl | 8 ++++---- src/core_types/core_types.jl | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/ad/gradients.jl b/src/ad/gradients.jl index bd9e0a0ae..dd8d67276 100644 --- a/src/ad/gradients.jl +++ b/src/ad/gradients.jl @@ -989,8 +989,8 @@ from this function will be passed to objective functions as `step_info`. This function is a `Dict` with the following fields: # Fields -- `:time` - The time at the start of the step. To get time at the end of the - step, use `step_info[:time] + step_info[:dt]` +- `:time` - The time at the end of the step. To get time at the start of the + step, use `step_info[:time] - step_info[:dt]` - `:dt` - The time step size for this step. - `:step` - The step number, starting at 1. Not that this is the report step, and multiple `step_info` entries could have the same step if substeps are @@ -1001,7 +1001,7 @@ function is a `Dict` with the following fields: - `:substep_global` - The global substep number, starting at 1 and will not reset between global steps. - `:Nsubstep_global` - The total number of substeps in the simulation. -- `:total_time` - The total time of the simulation (i.e. dt + time at the final +- `:total_time` - The total time of the simulation (i.e. time at the final step and substep) """ function optimization_step_info(step::Int, time::Real, dt::Real; @@ -1030,7 +1030,7 @@ end function optimization_step_info(step::Int, dts::Vector; kwarg...) t = 0.0 - for i in 1:(step-1) + for i in 1:(step) t += dts[i] end return optimization_step_info(step, t, dts[step]; Nstep = length(dts), total_time = sum(dts), kwarg...) diff --git a/src/core_types/core_types.jl b/src/core_types/core_types.jl index 88ce61aa4..7e7109236 100644 --- a/src/core_types/core_types.jl +++ b/src/core_types/core_types.jl @@ -1426,6 +1426,7 @@ function AdjointPackedResult(states, dt::Vector{Float64}, forces, step_index) prev_ministep = 0 step_infos = missing for (i, dt_i) in enumerate(dt) + time += dt_i step_ix = step_index[i] if step_ix != prev_ministep ministep_ix = 1 @@ -1445,7 +1446,6 @@ function AdjointPackedResult(states, dt::Vector{Float64}, forces, step_index) else push!(step_infos, step_info) end - time += dt_i end if !ismissing(forces) forces = map(i -> forces_for_timestep(nothing, forces, dt, i), step_index) From 60f87e4e99890b1cf2bfdab87b561000ab374050 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Brostr=C3=B8m?= Date: Fri, 29 Aug 2025 16:09:24 +0200 Subject: [PATCH 07/31] Change the progress recorder API --- src/simulator/recorder.jl | 72 +++++++++++++++++++++++--------------- src/simulator/simulator.jl | 13 ++++--- 2 files changed, 51 insertions(+), 34 deletions(-) diff --git a/src/simulator/recorder.jl b/src/simulator/recorder.jl index 3e6ae8fc0..0ba8ba53d 100644 --- a/src/simulator/recorder.jl +++ b/src/simulator/recorder.jl @@ -4,38 +4,57 @@ step(r) = r.recorder.step substep(r) = r.subrecorder.step # Progress recorder stuff -function nextstep_global!(r::ProgressRecorder, dT, prev_success = !isnan(r.recorder.dt)) - g = r.recorder - l = r.subrecorder - g.iteration = l.iterations - # A bit dicey, just need to get this working - g.failed += l.failed - nextstep!(g, dT, prev_success) - reset!(r.subrecorder) +function recorder_start_step!(rec::ProgressRecorder, dt, level::Symbol) + level == :local || level == :global || error("level must be :local or :global") + if level == :local + rec.subrecorder.dt = dt + else # global + rec.recorder.dt = dt + end end -function nextstep_local!(r::ProgressRecorder, dT, prev_success = !isnan(r.local_recorder.dt)) - nextstep!(r.subrecorder, dT, prev_success) +function recorder_log_step!(rec::ProgressRecorder, success, level::Symbol) + level == :local || level == :global || error("level must be :local or :global") + l = rec.subrecorder + g = rec.recorder + + function update!(solver_rec, success) + if success + solver_rec.step += 1 + solver_rec.time += solver_rec.dt + else + solver_rec.failed += solver_rec.iteration + end + solver_rec.iterations += solver_rec.iteration + solver_rec.iteration = 0 + end + + if level == :local + update!(l, success) + else # global + g.iteration = l.iterations + g.failed += l.failed + update!(g, success) + reset!(l) + end end -function next_iteration!(rec, report) - if haskey(report, :update_time) - rec.subrecorder.iteration += 1 +function recorder_current_time(rec::ProgressRecorder, level::Symbol) + level == :local || level == :global || error("level must be :local or :global") + if level == :local + return rec.subrecorder.time + else # global + return rec.recorder.time + rec.subrecorder.time end end -function nextstep!(l::SolveRecorder, dT, success) - # Update time - if success - l.step += 1 - l.time += l.dt - else - l.failed += l.iteration +function recorder_increment_iteration!(rec::ProgressRecorder, report, level::Symbol) + level == :local || level == :global || error("level must be :local or :global") + if level == :local + if haskey(report, :update_time) + rec.subrecorder.iteration += 1 + end end - l.dt = dT - # Update iterations - l.iterations += l.iteration - l.iteration = 0 end function reset!(r::SolveRecorder, dt = NaN; step = 1, iterations = 0, iteration = 0, time = 0.0) @@ -63,8 +82,3 @@ function reset!(target::ProgressRecorder, source::ProgressRecorder) reset!(target.recorder, source.recorder) reset!(target.subrecorder, source.recorder) end - -function current_time(r::ProgressRecorder) - return r.recorder.time + r.subrecorder.time -end - diff --git a/src/simulator/simulator.jl b/src/simulator/simulator.jl index 1b84fc1be..d469ada9d 100644 --- a/src/simulator/simulator.jl +++ b/src/simulator/simulator.jl @@ -201,7 +201,7 @@ function simulate!(sim::JutulSimulator, timesteps::AbstractVector; for step_no = first_step:no_steps dT = timesteps[step_no] forces_step = forces_for_timestep(sim, forces, timesteps, step_no, per_step = forces_per_step) - nextstep_global!(rec, dT) + recorder_start_step!(rec, dT, :global) new_simulation_control_step_message(info_level, p, rec, t_elapsed, step_no, no_steps, dT, t_tot, start_date) if config[:output_substates] substates = JUTUL_OUTPUT_TYPE[] @@ -219,6 +219,7 @@ function simulate!(sim::JutulSimulator, timesteps::AbstractVector; subrep = JUTUL_OUTPUT_TYPE() subrep[:ministeps] = rep subrep[:total_time] = t_step + recorder_log_step!(rec, step_done, :global) if step_done lastrep = rep[end] if get(lastrep, :stopnow, false) @@ -284,11 +285,13 @@ function solve_timestep!(sim, dT, forces, max_its, config; while !done # Make sure that we hit the endpoint in case timestep selection is too optimistic. dt = min(dt, dT - t_local) + recorder_start_step!(rec, dt, :local) # Attempt to solve current step @tic "solve" ok, s = solve_ministep(sim, dt, forces, max_its, config; kwarg...) + recorder_log_step!(rec, ok, :local) + # We store the report even if it is a failure. push!(ministep_reports, s) - nextstep_local!(rec, dt, ok) n_so_far = length(ministep_reports) if ok if 2 > info_level > 1 @@ -357,7 +360,7 @@ function perform_step!(storage, model, dt, forces, config; end # Update the properties and equations rec = storage.recorder - time = rec.recorder.time + dt + time = recorder_current_time(rec, :global) + dt prep, prep_storage = get_prepare_step_handler(storage) # Apply a pre-step if it exists if ismissing(prep) @@ -517,7 +520,7 @@ function solve_ministep(sim, dt, forces, max_iter, cfg; report = JUTUL_OUTPUT_TYPE() report[:dt] = dt step_reports = JUTUL_OUTPUT_TYPE[] - cur_time = current_time(rec) + cur_time = recorder_current_time(rec, :global) t_prepare = @elapsed if prepare update_before_step!(sim, dt, forces, time = cur_time, recorder = rec, update_explicit = update_explicit) end @@ -539,7 +542,7 @@ function solve_ministep(sim, dt, forces, max_iter, cfg; end break end - next_iteration!(rec, step_report) + recorder_increment_iteration!(rec, step_report, :local) if done break end From 179a7ff02879557664b4617d567518e3b57a8b80 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Brostr=C3=B8m?= Date: Fri, 29 Aug 2025 16:22:36 +0200 Subject: [PATCH 08/31] Add tests for the recorder API --- test/utils.jl | 61 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) diff --git a/test/utils.jl b/test/utils.jl index 51b77f1fa..c78c4f585 100644 --- a/test/utils.jl +++ b/test/utils.jl @@ -445,3 +445,64 @@ import Jutul.AdjointsDI: devectorize_nested, vectorize_nested, devectorize_neste @test length(x) == 2 @test sort(x) == [170.0, 200.0] end + +import Jutul: + ProgressRecorder, + recorder_start_step!, + recorder_increment_iteration!, + recorder_log_step!, + recorder_current_time + +# Mock simulation to test the global and local recorders +@testset "Progress recorder" begin + rec = ProgressRecorder() + report = Dict(:update_time => nothing) # Just need this to be present for the API + + # First global step + recorder_start_step!(rec, 1.0, :global) + recorder_start_step!(rec, 0.5, :local) + for i in 1:5 # some iterations to solve + recorder_increment_iteration!(rec, report, :local) + end + recorder_log_step!(rec, true, :local) + @test recorder_current_time(rec, :local) == 0.5 + + recorder_start_step!(rec, 0.5, :local) + for i in 1:3 # some iterations to solve + recorder_increment_iteration!(rec, report, :local) + end + recorder_log_step!(rec, true, :local) + @test recorder_current_time(rec, :local) == 1.0 + + recorder_log_step!(rec, true, :global) + @test recorder_current_time(rec, :global) == 1.0 + + # Second global step + recorder_start_step!(rec, 1.0, :global) + recorder_start_step!(rec, 0.75, :local) + for i in 1:10 # some iterations to solve + recorder_increment_iteration!(rec, report, :local) + end + recorder_log_step!(rec, false, :local) # "Simulation" does not converge + @test recorder_current_time(rec, :local) == 0.0 + + # Emulate cut + recorder_start_step!(rec, 0.5, :local) + for i in 1:4 # some iterations to solve + recorder_increment_iteration!(rec, report, :local) + end + recorder_log_step!(rec, true, :local) + @test recorder_current_time(rec, :local) == 0.5 + + recorder_start_step!(rec, 0.5, :local) + for i in 1:2 # some iterations to solve + recorder_increment_iteration!(rec, report, :local) + end + recorder_log_step!(rec, true, :local) + @test recorder_current_time(rec, :local) == 1.0 + + recorder_log_step!(rec, true, :global) + @test recorder_current_time(rec, :global) == 2.0 + + @test rec.recorder.iterations == 24 # Check total iterations +end From d58bee8d3559529870f5ea5cf45a15340d135f90 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Brostr=C3=B8m?= Date: Mon, 1 Sep 2025 14:15:08 +0200 Subject: [PATCH 09/31] Rename recorder reset functions --- src/ad/AdjointsDI/adjoints.jl | 2 +- src/ad/gradients.jl | 2 +- src/simulator/recorder.jl | 28 +++++++++++++++++++--------- src/simulator/simulator.jl | 2 +- 4 files changed, 22 insertions(+), 12 deletions(-) diff --git a/src/ad/AdjointsDI/adjoints.jl b/src/ad/AdjointsDI/adjoints.jl index 2006fe70f..c63d86903 100644 --- a/src/ad/AdjointsDI/adjoints.jl +++ b/src/ad/AdjointsDI/adjoints.jl @@ -138,7 +138,7 @@ function update_sensitivities_generic!(∇G, X, H, i, G, adjoint_storage, packed report_step = step_info[:step] for skey in [:backward, :forward] s = adjoint_storage[skey] - Jutul.reset!(Jutul.progress_recorder(s), step = report_step, time = current_time) + Jutul.recorder_reset!(Jutul.progress_recorder(s), step = report_step, time = current_time) end λ = Jutul.next_lagrange_multiplier!(adjoint_storage, i, G, state0, state, state_next, packed_steps) diff --git a/src/ad/gradients.jl b/src/ad/gradients.jl index dd8d67276..65afeb81e 100644 --- a/src/ad/gradients.jl +++ b/src/ad/gradients.jl @@ -456,7 +456,7 @@ function update_sensitivities!(∇G, i, G, adjoint_storage, state0, state, state forces = packed_step.forces for skey in [:backward, :forward, :parameter] s = adjoint_storage[skey] - reset!(progress_recorder(s), step = report_step, time = current_time) + recorder_reset!(progress_recorder(s), step = report_step, time = current_time) end λ = next_lagrange_multiplier!(adjoint_storage, i, G, state0, state, state_next, packed_steps) # λ, t, dt, forces diff --git a/src/simulator/recorder.jl b/src/simulator/recorder.jl index 0ba8ba53d..ff765c251 100644 --- a/src/simulator/recorder.jl +++ b/src/simulator/recorder.jl @@ -35,7 +35,7 @@ function recorder_log_step!(rec::ProgressRecorder, success, level::Symbol) g.iteration = l.iterations g.failed += l.failed update!(g, success) - reset!(l) + recorder_reset!(l) end end @@ -57,7 +57,7 @@ function recorder_increment_iteration!(rec::ProgressRecorder, report, level::Sym end end -function reset!(r::SolveRecorder, dt = NaN; step = 1, iterations = 0, iteration = 0, time = 0.0) +function recorder_reset!(r::SolveRecorder, dt = NaN; step = 1, iterations = 0, iteration = 0, time = 0.0) r.step = step r.iterations = iterations r.time = time @@ -65,7 +65,7 @@ function reset!(r::SolveRecorder, dt = NaN; step = 1, iterations = 0, iteration r.dt = dt end -function reset!(target::SolveRecorder, source::SolveRecorder) +function recorder_reset!(target::SolveRecorder, source::SolveRecorder) target.step = source.step target.iterations = source.iterations target.time = source.time @@ -73,12 +73,22 @@ function reset!(target::SolveRecorder, source::SolveRecorder) target.dt = source.dt end -function reset!(r::ProgressRecorder, dt = NaN; kwarg...) - reset!(r.recorder, dt; kwarg...) - reset!(r.subrecorder, 0.0) +function recorder_reset!(r::ProgressRecorder, dt = NaN; kwarg...) + recorder_reset!(r.recorder, dt; kwarg...) + recorder_reset!(r.subrecorder, 0.0) end -function reset!(target::ProgressRecorder, source::ProgressRecorder) - reset!(target.recorder, source.recorder) - reset!(target.subrecorder, source.recorder) +function recorder_reset!(target::ProgressRecorder, source::ProgressRecorder) + recorder_reset!(target.recorder, source.recorder) + recorder_reset!(target.subrecorder, source.recorder) end + +@deprecate reset!(r::SolveRecorder, dt = NaN; + step = 1, + iterations = 0, + iteration = 0, + time = 0.0) recorder_reset!(r, dt; step, iterations, iteration, time) + +@deprecate reset!(target::SolveRecorder, source::SolveRecorder) recorder_reset!(targe, source) +@deprecate reset!(r::ProgressRecorder, dt = NaN; kwarg...) recorder_reset!(r, dt; kwarg...) +@deprecate reset!(target::ProgressRecorder, source::ProgressRecorder) recorder_reset!(target, source) diff --git a/src/simulator/simulator.jl b/src/simulator/simulator.jl index d469ada9d..b33595e82 100644 --- a/src/simulator/simulator.jl +++ b/src/simulator/simulator.jl @@ -159,7 +159,7 @@ function simulate!(sim::JutulSimulator, timesteps::AbstractVector; ) rec = progress_recorder(sim) # Reset recorder just in case since we are starting a new simulation - reset!(rec) + recorder_reset!(rec) forces, forces_per_step = preprocess_forces(sim, forces) start_timestamp = now() if isnothing(config) From 094691891c28791894061e5499ca8de53546b3a2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Olav=20M=C3=B8yner?= Date: Tue, 2 Sep 2025 20:01:33 +0200 Subject: [PATCH 10/31] Fix to BoomerAMG standalone preconditioner (#181) * Update JutulHYPREExt.jl * Add AMG tests to heat equation test * Add HYPRE to test env --- Project.toml | 2 +- ext/JutulHYPREExt/JutulHYPREExt.jl | 4 ++-- test/test_systems/heat_2d.jl | 15 +++++++++++++-- 3 files changed, 16 insertions(+), 5 deletions(-) diff --git a/Project.toml b/Project.toml index ab47b7f65..3c0ce9c65 100644 --- a/Project.toml +++ b/Project.toml @@ -120,4 +120,4 @@ NetworkLayout = "46757867-2c16-5918-afeb-47bfcb05e46a" PartitionedArrays = "5a9dfac6-5c52-46f7-8278-5e2210713be9" [targets] -test = ["Meshes", "KaHyPar"] +test = ["Meshes", "KaHyPar", "HYPRE"] diff --git a/ext/JutulHYPREExt/JutulHYPREExt.jl b/ext/JutulHYPREExt/JutulHYPREExt.jl index d73cedb96..f854ee57f 100644 --- a/ext/JutulHYPREExt/JutulHYPREExt.jl +++ b/ext/JutulHYPREExt/JutulHYPREExt.jl @@ -61,10 +61,10 @@ module JutulHYPREExt J_h, r_h, x_h = D[:hypre_system] reassemble_matrix!(J_h, D, J, executor) else - r_h = HYPRE.HYPREVector(r) + r_h = HYPRE.HYPREVector(copy(r)) x_h = HYPRE.HYPREVector(copy(r)) D[:J] = J - # D[:n] = n + D[:n] = length(r) # (; ilower, iupper) = r_h # D[:vector_indices] = HYPRE.HYPRE_BigInt.(ilower:iupper) J_h = transfer_matrix_to_hypre(J, D, executor) diff --git a/test/test_systems/heat_2d.jl b/test/test_systems/heat_2d.jl index 023e434e6..701700d74 100644 --- a/test/test_systems/heat_2d.jl +++ b/test/test_systems/heat_2d.jl @@ -1,7 +1,7 @@ using Jutul using Test -function test_heat_2d(nx = 3, ny = nx) +function test_heat_2d(nx = 3, ny = nx; kwarg...) sys = SimpleHeatSystem() # Unit square g = CartesianMesh((nx, ny), (1.0, 1.0)) @@ -13,7 +13,7 @@ function test_heat_2d(nx = 3, ny = nx) T0 = rand(nc) state0 = setup_state(model, Dict(:T=>T0)) sim = Simulator(model, state0 = state0) - states, = simulate(sim, [1.0], info_level = -1) + states, = simulate(sim, [1.0]; info_level = -1, kwarg...) return states end @@ -23,3 +23,14 @@ end true end end + +using HYPRE +@testset "Algebraic multigrid heat" begin + lsolve = GenericKrylov(:bicgstab, preconditioner = Jutul.AMGPreconditioner(:smoothed_aggregation)) + states = test_heat_2d(4, 4, linear_solver = lsolve) + @test length(states) == 1 + + lsolve = GenericKrylov(:bicgstab, preconditioner = Jutul.BoomerAMGPreconditioner()) + states = test_heat_2d(4, 4, linear_solver = lsolve) + @test length(states) == 1 +end From 92d2c5178e84d4376019af1a69ca5917b0c633b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Olav=20M=C3=B8yner?= Date: Thu, 4 Sep 2025 14:50:20 +0200 Subject: [PATCH 11/31] Remove old Meshes.jl code (#182) --- Project.toml | 6 +- ext/JutulMeshesExt/JutulMeshesExt.jl | 100 --------------------------- test/mesh.jl | 14 +--- 3 files changed, 2 insertions(+), 118 deletions(-) delete mode 100644 ext/JutulMeshesExt/JutulMeshesExt.jl diff --git a/Project.toml b/Project.toml index 3c0ce9c65..fcd81bfa8 100644 --- a/Project.toml +++ b/Project.toml @@ -50,7 +50,6 @@ KaHyPar = "2a6221f6-aa48-11e9-3542-2d9e0ef01880" LayeredLayouts = "f4a74d36-062a-4d48-97cd-1356bad1de4e" MPI = "da04e1cc-30fd-572f-bb4f-1f8673147195" Makie = "ee78f7c6-11fb-53f2-987a-cfe4a2b5a57a" -Meshes = "eacbb407-ea5a-433e-ab97-5258b1ca43fa" NetworkLayout = "46757867-2c16-5918-afeb-47bfcb05e46a" PartitionedArrays = "5a9dfac6-5c52-46f7-8278-5e2210713be9" @@ -62,7 +61,6 @@ JutulGraphMakieExt = ["GraphMakie", "NetworkLayout", "LayeredLayouts", "Makie"] JutulHYPREExt = "HYPRE" JutulKaHyParExt = "KaHyPar" JutulMakieExt = "Makie" -JutulMeshesExt = ["Meshes"] JutulPartitionedArraysExt = ["PartitionedArrays", "MPI"] [compat] @@ -86,7 +84,6 @@ LoopVectorization = "0.12.115" MAT = "0.10.3" Makie = "0.21, 0.22, 0.23" MappedArrays = "0.4.1" -Meshes = "0.28 - 0.41" Metis = "1.1.0" NetworkLayout = "0.4.5" OrderedCollections = "1.4.1" @@ -115,9 +112,8 @@ KaHyPar = "2a6221f6-aa48-11e9-3542-2d9e0ef01880" LayeredLayouts = "f4a74d36-062a-4d48-97cd-1356bad1de4e" MPI = "da04e1cc-30fd-572f-bb4f-1f8673147195" Makie = "ee78f7c6-11fb-53f2-987a-cfe4a2b5a57a" -Meshes = "eacbb407-ea5a-433e-ab97-5258b1ca43fa" NetworkLayout = "46757867-2c16-5918-afeb-47bfcb05e46a" PartitionedArrays = "5a9dfac6-5c52-46f7-8278-5e2210713be9" [targets] -test = ["Meshes", "KaHyPar", "HYPRE"] +test = ["KaHyPar", "HYPRE"] diff --git a/ext/JutulMeshesExt/JutulMeshesExt.jl b/ext/JutulMeshesExt/JutulMeshesExt.jl deleted file mode 100644 index 6f441b42f..000000000 --- a/ext/JutulMeshesExt/JutulMeshesExt.jl +++ /dev/null @@ -1,100 +0,0 @@ -module JutulMeshesExt - using Jutul, Meshes - function meshes_fv_geometry_3d(grid::Mesh) - nc = nelements(grid) - nf = nfacets(grid) - topo = topology(grid) - bnd = Boundary{3,2}(topo) - # Global neighborship - N = zeros(Int64, 2, nf) - # Normals and centroids - normals = Vector{Meshes.Vec3}(undef, nf) - cell_centroids = centroid.(grid) - face_centroids = Vector{Point3}(undef, nf) - # Measures - areas = zeros(nf) - volumes = volume.(grid) - # Mask found/not found for each face to get signed normals - found = BitArray(undef, nf) - found .= false - for cell_ix in 1:nc - hexa = grid[cell_ix] - # Next, deal with faces - boundary_faces = bnd(cell_ix) - mesh = boundary(hexa) - nhf = length(mesh) - @assert nhf == length(boundary_faces) - for j in 1:nhf - face_ix = boundary_faces[j] - if found[face_ix] - # Already processed, only need to add neighborship - lr = 2 - else - lr = 1 - face_mesh = mesh[j] - face_centroids[face_ix] = centroid(face_mesh) - areas[face_ix] = area(face_mesh) - # Simplexify to get normals - trimesh = simplexify(face_mesh) - normals[face_ix] = first(normal.(trimesh)) - found[face_ix] = true - end - N[lr, face_ix] = cell_ix - end - end - # Convert Point3 arrays to Jutul bare array format for geometry - d = 3 - float_normals = zeros(d, nf) - float_face_centroids = zeros(d, nf) - for f in 1:nf - for j in 1:d - float_normals[j, f] = normals[f][j] - float_face_centroids[j, f] = face_centroids[f].coords[j] - end - end - float_cell_centroids = zeros(d, nc) - for c in 1:nc - for j in 1:d - float_cell_centroids[j, c] = cell_centroids[c].coords[j] - end - end - # Jutul currently expects the interior faces - active = vec(all(x -> x > 0, N, dims=1)) - return ( - N=N[:, active], - areas=areas[active], - volumes=volumes, - normals=float_normals[:, active], - cell_centroids = float_cell_centroids, - face_centroids = float_face_centroids[:, active] - ) - end - - - function Jutul.tpfv_geometry(g::T) where T<:Meshes.Mesh{3, <:Any} - N, A, V, Nv, Cc, Fc = meshes_fv_geometry_3d(g) - geo = TwoPointFiniteVolumeGeometry(N, A, V, Nv, Cc, Fc) - return geo - end - - function Jutul.add_default_domain_data!(Ω::DataDomain, m::Meshes.Mesh; geometry = missing) - # TODO: Fix this code duplication - if ismissing(geometry) - geometry = tpfv_geometry(m) - end - fv = geometry - geom_pairs = ( - Pair(Faces(), [:neighbors, :areas, :normals, :face_centroids]), - Pair(Cells(), [:cell_centroids, :volumes]), - Pair(HalfFaces(), [:half_face_cells, :half_face_faces]), - Pair(BoundaryFaces(), [:boundary_areas, :boundary_centroids, :boundary_normals, :boundary_neighbors]) - ) - for (entity, names) in geom_pairs - if hasentity(Ω, entity) - for name in names - Ω[name, entity] = getproperty(fv, name) - end - end - end - end -end diff --git a/test/mesh.jl b/test/mesh.jl index f3b25a70a..15d53fc92 100644 --- a/test/mesh.jl +++ b/test/mesh.jl @@ -44,19 +44,7 @@ using LinearAlgebra end end end -using Meshes -@testset "Meshes.jl interop" begin - # This testing is a bit simple since it relies on the same ordering - # in both Jutul.jl's cart grid and Meshes.jl - dims = (2,2,3) - grid = CartesianGrid(dims) - geo = tpfv_geometry(grid) - jgrid = Jutul.CartesianMesh(dims, (2.0, 2.0, 3.0)) - jgeo = tpfv_geometry(jgrid) - @test isapprox(jgeo.cell_centroids, geo.cell_centroids, atol = 1e-12) - @test isapprox(jgeo.volumes, geo.volumes, atol = 1e-12) -end -## + using MAT @testset "UnstructuredMesh" begin exported = Jutul.get_mat_testgrid("pico") From c3c8b3ff92cc549eb6f48604955f099e4f390aa6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Olav=20M=C3=B8yner?= Date: Mon, 8 Sep 2025 13:02:17 +0200 Subject: [PATCH 12/31] Faster parameter optimization for models where the setup can be split in dynamic and static parts (#183) --- src/DictOptimization/interface.jl | 19 +++++++ src/ad/AdjointsDI/AdjointsDI.jl | 2 + src/ad/AdjointsDI/adjoints.jl | 92 +++++++++++++++++++++++++++---- src/ad/AdjointsDI/split.jl | 43 +++++++++++++++ src/variables/vectorization.jl | 5 ++ test/adjoints/basic_adjoint.jl | 18 +++++- 6 files changed, 166 insertions(+), 13 deletions(-) create mode 100644 src/ad/AdjointsDI/split.jl diff --git a/src/DictOptimization/interface.jl b/src/DictOptimization/interface.jl index 316798148..e1ffaf9fe 100644 --- a/src/DictOptimization/interface.jl +++ b/src/DictOptimization/interface.jl @@ -36,6 +36,8 @@ using `free_optimization_parameter!` prior to calling the optimizer. - `simulator`: Optional simulator object used in forward simulations - `config`: Optional configuration for the setup - `solution_history`: If `true`, stores all intermediate solutions +- `deps`: One of `:case`, `:parameters`, `:parameters_and_state0`. Defines the + dependencies for the adjoint computation. See notes for more details. - `backend_arg`: Options for the autodiff backend: - `use_sparsity`: Enable sparsity detection for the objective function - `di_sparse`: Use sparse differentiation @@ -49,6 +51,21 @@ The optimized parameters as a dictionary. - The function stores the optimization history and optimized parameters in the input `dopt` object. - If `solution_history` is `true`, intermediate solutions are stored in `dopt.history.solutions`. - The default optimization algorithm is L-BFGS with box constraints. + +## Type of dependencies in `deps` +The `deps` argument is used to set the type of dependency the `case` setup +function has on the active optimization parameters. The default, `:case`, is +fully general and allows dependencies on everything contained within the `case` +instance. This can be slow, however, as the setup function must be called for +every time-step. If you know that the model instance and forces are independent +of the active parameters, you can use `deps = :parameters_and_state0`. If there +is no dependence on `state0`, you can set `deps = :parameters`. This can +substantially speed up the optimization process, but as there is no programmatic +verification that this assumption is true, it should be used with care. + +This interface is dependent on the model supporting use of +`vectorize_variables!` and `devectorize_variables!` for `state0/parameters`, +which should be the case for most Jutul models. """ function optimize(dopt::DictParameters, objective, setup_fn = dopt.setup_function; grad_tol = 1e-6, @@ -59,11 +76,13 @@ function optimize(dopt::DictParameters, objective, setup_fn = dopt.setup_functio simulator = missing, config = missing, solution_history = false, + deps::Symbol = :case, backend_arg = ( use_sparsity = false, di_sparse = true, single_step_sparsity = false, do_prep = true, + deps = deps, ), kwarg... ) diff --git a/src/ad/AdjointsDI/AdjointsDI.jl b/src/ad/AdjointsDI/AdjointsDI.jl index c3d59cf63..f4f183bb0 100644 --- a/src/ad/AdjointsDI/AdjointsDI.jl +++ b/src/ad/AdjointsDI/AdjointsDI.jl @@ -10,4 +10,6 @@ module AdjointsDI include("adjoints.jl") include("utils.jl") + # Utilities to handle "split" generic adjoints for performance + include("split.jl") end diff --git a/src/ad/AdjointsDI/adjoints.jl b/src/ad/AdjointsDI/adjoints.jl index c63d86903..bd7a60dab 100644 --- a/src/ad/AdjointsDI/adjoints.jl +++ b/src/ad/AdjointsDI/adjoints.jl @@ -12,7 +12,6 @@ function solve_adjoint_generic(X, F, states, reports_or_timesteps, G; packed_steps = AdjointPackedResult(states, reports_or_timesteps, forces) Jutul.set_global_timer!(extra_timing) N = length(states) - n_param = length(X) # Allocation part if info_level > 1 jutul_message("Adjoints", "Setting up storage...", color = :blue) @@ -26,7 +25,10 @@ function solve_adjoint_generic(X, F, states, reports_or_timesteps, G; if info_level > 1 jutul_message("Adjoints", "Storage set up in $(Jutul.get_tstr(t_storage)).", color = :blue) end - ∇G = zeros(n_param) + n_param = storage[:n_dynamic] + n_param_static = storage[:n_static] + n_param_static == length(X) || error("Internal error: static parameter length mismatch.") + ∇G = zeros(n_param_static) # Solve! if info_level > 1 @@ -58,6 +60,20 @@ function solve_adjoint_generic!(∇G, X, F, storage, packed_steps::AdjointPacked ) N = length(packed_steps) case = setup_case(X, F, packed_steps, state0, :all) + if F != storage[:F_input] + @warn "The function F used in the solve must be the same as the one used in the setup." + end + F_dynamic = storage[:F_dynamic] + F_static = storage[:F_static] + is_fully_dynamic = storage[:F_fully_dynamic] + if is_fully_dynamic + Y = X + dYdX = missing + else + prep = storage[:dF_static_dX_prep] + backend = storage[:backend_di] + Y, dYdX = value_and_jacobian(F_static, prep, backend, X) + end G = Jutul.adjoint_wrap_objective(G, case.model) Jutul.adjoint_reset_parameters!(storage, case.parameters) @@ -72,17 +88,24 @@ function solve_adjoint_generic!(∇G, X, F, storage, packed_steps::AdjointPacked Jutul.update_objective_sparsity!(storage, G, packed_steps, :forward) # Set gradient to zero before solve starts - @. ∇G = 0 + dG_dynamic = storage[:dynamic_buffer] + @. dG_dynamic = 0 @tic "sensitivities" for i in N:-1:1 if info_level > 0 jutul_message("Step $i/$N", "Solving adjoint system.", color = :blue) end - update_sensitivities_generic!(∇G, X, H, i, G, storage, packed_steps) + update_sensitivities_generic!(dG_dynamic, Y, H, i, G, storage, packed_steps) end dparam = storage.dparam if !isnothing(dparam) - @. ∇G += dparam + @. dG_dynamic += dparam end + if is_fully_dynamic + copyto!(∇G, dG_dynamic) + else + mul!(∇G, dYdX', dG_dynamic) + end + all(isfinite, ∇G) || error("Adjoint solve resulted in non-finite gradient values.") return ∇G end @@ -96,6 +119,7 @@ function setup_adjoint_storage_generic(X, F, packed_steps::AdjointPackedResult, state0 = missing, do_prep = true, di_sparse = true, + deps = :case, backend = Jutul.default_di_backend(sparse = di_sparse), info_level = 0, single_step_sparsity = true, @@ -111,8 +135,8 @@ function setup_adjoint_storage_generic(X, F, packed_steps::AdjointPackedResult, n_objective = nothing, info_level = info_level, ) - storage[:dparam] = zeros(length(X)) - setup_jacobian_evaluation!(storage, X, F, G, packed_steps, case, backend, do_prep, single_step_sparsity, di_sparse) + setup_jacobian_evaluation!(storage, X, F, G, packed_steps, case, backend, do_prep, single_step_sparsity, di_sparse, deps) + storage[:dparam] = zeros(storage[:n_dynamic]) return storage end @@ -291,12 +315,54 @@ function (H::AdjointObjectiveHelper)(x) return v end -function setup_jacobian_evaluation!(storage, X, F, G, packed_steps, case0, backend, do_prep, single_step_sparsity, di_sparse) +function setup_jacobian_evaluation!(storage, X, F, G, packed_steps, case, backend, do_prep, single_step_sparsity, di_sparse, deps::Symbol) if ismissing(backend) backend = Jutul.default_di_backend(sparse = di_sparse) end + model = case.model + # Two approaches: + # 1. F_static(X) -> Y (vector of parameters) -> F_dynamic(Y) (updated case) + # 2. F(X) -> case directly and F_dynamic = F and F_static = identity + fully_dynamic = deps == :case + prep_static = nothing + if fully_dynamic + F_dynamic = F + F_static = x -> x + else + deps in (:parameters, :parameters_and_state0) || error("deps must be :case, :parameters or :parameters_and_state0. Got $deps.") + # cfg = optimization_config(case0, include_state0 = deps == :parameters_and_state0) + inc_state0 = deps == :parameters_and_state0 + parameters_map, = Jutul.variable_mapper(model, :parameters) + if inc_state0 + state0_map, = Jutul.variable_mapper(model, :primary) + else + state0_map = missing + end + cache_static = Dict{Type, AbstractVector}() + F_static = (X, step_info = missing) -> map_X_to_Y(F, X, case.model, parameters_map, state0_map, cache_static) + F_dynamic = (Y, step_info = missing) -> setup_from_vectorized(Y, case, parameters_map, state0_map) + if do_prep + prep_static = prepare_jacobian(F_static, backend, X) + end + end - H = AdjointObjectiveHelper(F, G, packed_steps) + # Whatever was input - for checking + storage[:F_input] = F + # Dynamic part - every timestep + storage[:F_dynamic] = F_dynamic + # Static part - once + storage[:F_static] = F_static + # Jacobian action + storage[:dF_static_dX_prep] = prep_static + storage[:F_fully_dynamic] = fully_dynamic + storage[:deps] = deps + + # Switch to Y and F_dynamic(Y) as main function + Y = F_static(X) + storage[:n_static] = length(X) + storage[:n_dynamic] = length(Y) + storage[:dynamic_buffer] = similar(Y) + H = AdjointObjectiveHelper(F_dynamic, G, packed_steps) storage[:adjoint_objective_helper] = H # Note: strict = false is needed because we create another function on the fly # that essentially calls the same function. @@ -306,8 +372,8 @@ function setup_jacobian_evaluation!(storage, X, F, G, packed_steps, case0, backe else step_index = missing end - set_objective_helper_step_index!(H, case0.model, step_index) - prep = prepare_jacobian(H, backend, X) + set_objective_helper_step_index!(H, case.model, step_index) + prep = prepare_jacobian(H, backend, Y) else prep = nothing end @@ -316,6 +382,10 @@ function setup_jacobian_evaluation!(storage, X, F, G, packed_steps, case0, backe return storage end +function setup_outer_chain_rule(F, case, deps::Symbol) + return (F_dynamic, F_static, dF_static_dX) +end + function evaluate_residual_and_jacobian_for_state_pair(x, state, state0, F, objective_eval::Function, packed_steps::AdjointPackedResult, substep_index::Int, cache = missing; is_sum = true) step_info = packed_steps[substep_index].step_info dt = step_info[:dt] diff --git a/src/ad/AdjointsDI/split.jl b/src/ad/AdjointsDI/split.jl new file mode 100644 index 000000000..5168b5807 --- /dev/null +++ b/src/ad/AdjointsDI/split.jl @@ -0,0 +1,43 @@ +function map_X_to_Y(F, X, model, parameters_map, state0_map, cache) + has_state0 = !ismissing(state0_map) + case = F(X, missing) + N_prm = Jutul.vectorized_length(model, parameters_map) + if has_state0 + N_s0 = Jutul.vectorized_length(model, state0_map) + else + N_s0 = 0 + end + N = N_prm + N_s0 + T = eltype(X) + if !haskey(cache, T) + cache[T] = zeros(T, N) + end + Y = cache[T] + resize!(Y, N) + Y_prm = view(Y, 1:N_prm) + vectorize_variables!(Y_prm, model, case.parameters, parameters_map) + if has_state0 + Y_s0 = view(Y, (N_prm+1):(N_prm+N_s0)) + vectorize_variables!(Y_s0, model, case.state0, state0_map) + end + return Y +end + +function setup_from_vectorized(Y, case, parameters_map, state0_map) + has_state0 = !ismissing(state0_map) + N_prm = Jutul.vectorized_length(case.model, parameters_map) + if has_state0 + N_s0 = Jutul.vectorized_length(case.model, state0_map) + else + N_s0 = 0 + end + N = N_prm + N_s0 + @assert length(Y) == N "Length of Y ($(length(Y))) does not match expected length ($N)." + Y_prm = view(Y, 1:N_prm) + devectorize_variables!(case.parameters, case.model, Y_prm, parameters_map) + if has_state0 + Y_s0 = view(Y, (N_prm+1):(N_prm+N_s0)) + devectorize_variables!(case.state0, case.model, Y_s0, state0_map) + end + return case +end diff --git a/src/variables/vectorization.jl b/src/variables/vectorization.jl index 0fb0055eb..003ea094a 100644 --- a/src/variables/vectorization.jl +++ b/src/variables/vectorization.jl @@ -87,6 +87,11 @@ end function devectorize_variable!(state, V, k, info, F_inv; config = nothing) (; n_full, n_x, offset_full, offset_x) = info state_val = state[k] + T = eltype(V) + if eltype(state_val) != T + state_val = similar(state_val, T) + state[k] = state_val + end lumping = get_lumping(config) if isnothing(lumping) @assert n_full == n_x diff --git a/test/adjoints/basic_adjoint.jl b/test/adjoints/basic_adjoint.jl index 4f2bd4fba..ed7a8d7fa 100644 --- a/test/adjoints/basic_adjoint.jl +++ b/test/adjoints/basic_adjoint.jl @@ -209,7 +209,7 @@ function num_grad_generic(F, G, x0) return out end -function test_for_timesteps(timesteps; atol = 5e-3, fmt = :case, global_objective = false, kwarg...) +function test_for_timesteps(timesteps; atol = 5e-3, fmt = :case, global_objective = false, deps = :case, kwarg...) # dx, dy, U0, k_val, srcval x = ones(5) case = setup_poisson_test_case_from_vector(x, dt = timesteps) @@ -238,6 +238,7 @@ function test_for_timesteps(timesteps; atol = 5e-3, fmt = :case, global_objectiv dGdx_adj = solve_adjoint_generic(x, F, states, reports, G; state0 = case.state0, forces = case.forces, + deps = deps, kwarg... ) @@ -245,6 +246,15 @@ function test_for_timesteps(timesteps; atol = 5e-3, fmt = :case, global_objectiv dGdx_adj = dGdx_adj[1:4] dGdx_num = dGdx_num[1:4] end + if deps == :parameters + ix = [1, 2, 4] + dGdx_adj = dGdx_adj[ix] + dGdx_num = dGdx_num[ix] + elseif deps == :parameters_and_state0 + ix = 1:4 + dGdx_adj = dGdx_adj[ix] + dGdx_num = dGdx_num[ix] + end @test dGdx_adj ≈ dGdx_num atol = atol end @@ -262,10 +272,14 @@ end for fmt in [:case, :onecase, :model_and_prm, :model_and_prm_and_forces, :model_and_prm_and_forces_and_state0] test_for_timesteps([100.0], fmt = fmt, global_objective = global_obj) end + @testset "parameters" begin + test_for_timesteps([100.0], fmt = :case, global_objective = global_obj, deps = :parameters) + test_for_timesteps([100.0], fmt = :case, global_objective = global_obj, deps = :parameters_and_state0) + end end end end -## + import Jutul.DictOptimization as DictOptimization @testset "DictOptimization" begin testdata = Dict( From 46def5bc1d70a84baaa004eee30cadd1239d84ea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Olav=20M=C3=B8yner?= Date: Mon, 8 Sep 2025 16:19:09 +0200 Subject: [PATCH 13/31] Enable objective sparsity in optimize by default, perf improvements to adjoint (#184) --- src/DictOptimization/interface.jl | 2 +- src/ad/AdjointsDI/split.jl | 12 ++++++++---- src/ad/gradients.jl | 6 +----- src/ad/local_ad.jl | 4 ++++ src/core_types/core_types.jl | 9 ++++----- src/variables/vectorization.jl | 5 +++++ test/adjoints/basic_adjoint.jl | 2 +- 7 files changed, 24 insertions(+), 16 deletions(-) diff --git a/src/DictOptimization/interface.jl b/src/DictOptimization/interface.jl index e1ffaf9fe..4b7eabcd7 100644 --- a/src/DictOptimization/interface.jl +++ b/src/DictOptimization/interface.jl @@ -78,7 +78,7 @@ function optimize(dopt::DictParameters, objective, setup_fn = dopt.setup_functio solution_history = false, deps::Symbol = :case, backend_arg = ( - use_sparsity = false, + use_sparsity = true, di_sparse = true, single_step_sparsity = false, do_prep = true, diff --git a/src/ad/AdjointsDI/split.jl b/src/ad/AdjointsDI/split.jl index 5168b5807..5e27fb9da 100644 --- a/src/ad/AdjointsDI/split.jl +++ b/src/ad/AdjointsDI/split.jl @@ -14,12 +14,14 @@ function map_X_to_Y(F, X, model, parameters_map, state0_map, cache) end Y = cache[T] resize!(Y, N) - Y_prm = view(Y, 1:N_prm) - vectorize_variables!(Y_prm, model, case.parameters, parameters_map) if has_state0 + Y_prm = view(Y, 1:N_prm) Y_s0 = view(Y, (N_prm+1):(N_prm+N_s0)) vectorize_variables!(Y_s0, model, case.state0, state0_map) + else + Y_prm = Y end + vectorize_variables!(Y_prm, model, case.parameters, parameters_map) return Y end @@ -33,11 +35,13 @@ function setup_from_vectorized(Y, case, parameters_map, state0_map) end N = N_prm + N_s0 @assert length(Y) == N "Length of Y ($(length(Y))) does not match expected length ($N)." - Y_prm = view(Y, 1:N_prm) - devectorize_variables!(case.parameters, case.model, Y_prm, parameters_map) if has_state0 + Y_prm = view(Y, 1:N_prm) Y_s0 = view(Y, (N_prm+1):(N_prm+N_s0)) devectorize_variables!(case.state0, case.model, Y_s0, state0_map) + else + Y_prm = Y end + devectorize_variables!(case.parameters, case.model, Y_prm, parameters_map) return case end diff --git a/src/ad/gradients.jl b/src/ad/gradients.jl index 65afeb81e..2928d76d1 100644 --- a/src/ad/gradients.jl +++ b/src/ad/gradients.jl @@ -416,11 +416,7 @@ function state_gradient_inner!(∂F∂x, F, model, state, tag, eval_model = mode end function diff_entity!(∂F∂x, state, i, S, ne, np, offset, symbol) - if !isnothing(symbol) - state_i = local_ad(state, i, S, symbol) - else - state_i = local_ad(state, i, S) - end + state_i = local_ad(state, i, S, symbol) v = F(eval_model, state_i) store_partials!(∂F∂x, v, i, ne, np, offset) end diff --git a/src/ad/local_ad.jl b/src/ad/local_ad.jl index eefeb100d..785bc2b35 100644 --- a/src/ad/local_ad.jl +++ b/src/ad/local_ad.jl @@ -190,6 +190,10 @@ end local_state_ad(state, index, ad_tag, symbol) end +@inline function local_ad(state, index, ad_tag, ::Nothing) + local_state_ad(state, index, ad_tag) +end + @inline function local_state_ad(state::T, index::I, ad_tag::∂T) where {T, I<:Integer, ∂T} return LocalStateAD{T, I, ad_tag}(index, state) end diff --git a/src/core_types/core_types.jl b/src/core_types/core_types.jl index 7e7109236..ffba1ece9 100644 --- a/src/core_types/core_types.jl +++ b/src/core_types/core_types.jl @@ -929,13 +929,12 @@ end function Base.getindex(case::JutulCase, ix) (; model, dt, forces, state0, parameters, input_data) = case - f = deepcopy(forces) - if f isa AbstractVector - @assert length(f) == length(dt) - f = f[ix] + if forces isa AbstractVector + @assert length(forces) == length(dt) + forces = forces[ix] end dt = dt[ix] - return JutulCase(model, dt, f, deepcopy(state0), deepcopy(parameters), deepcopy(input_data)) + return JutulCase(model, dt, forces, state0, parameters, input_data) end export NoRelaxation, SimpleRelaxation diff --git a/src/variables/vectorization.jl b/src/variables/vectorization.jl index 003ea094a..ba59f6142 100644 --- a/src/variables/vectorization.jl +++ b/src/variables/vectorization.jl @@ -93,6 +93,11 @@ function devectorize_variable!(state, V, k, info, F_inv; config = nothing) state[k] = state_val end lumping = get_lumping(config) + devectorize_variable_inner!(state_val, V, k, F_inv, lumping, n_full, n_x, offset_full, offset_x) + return state +end + +function devectorize_variable_inner!(state_val, V, k, F_inv, lumping, n_full, n_x, offset_full, offset_x) if isnothing(lumping) @assert n_full == n_x @assert length(state_val) == n_full "Expected field $k to have length $n_full, was $(length(state_val))" diff --git a/test/adjoints/basic_adjoint.jl b/test/adjoints/basic_adjoint.jl index ed7a8d7fa..59e140f17 100644 --- a/test/adjoints/basic_adjoint.jl +++ b/test/adjoints/basic_adjoint.jl @@ -367,7 +367,7 @@ import Jutul.DictOptimization as DictOptimization step = step_info[:step] U = s[:U] U_ref = states[step][:U] - v = sum(i -> (U[i] - U_ref[i]).^2, eachindex(U, U_ref)) + v = sum(i -> (U[i] - U_ref[i]).^2, eachindex(U)) return dt*v end # Perturb a parameter From ef79516def537dd0e2780fdc4b754017785e634c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Olav=20M=C3=B8yner?= Date: Tue, 9 Sep 2025 09:49:36 +0200 Subject: [PATCH 14/31] Fallback for MVector sparsity tracing issues on 1.11 (#185) --- src/conservation/conservation.jl | 37 +++++++++++++++++++++++++++++++- 1 file changed, 36 insertions(+), 1 deletion(-) diff --git a/src/conservation/conservation.jl b/src/conservation/conservation.jl index d23b1f979..c111a974e 100644 --- a/src/conservation/conservation.jl +++ b/src/conservation/conservation.jl @@ -4,7 +4,42 @@ number_of_equations_per_entity(model::SimulationModel, ::ConservationLaw{<:Any, flux_vector_type(::ConservationLaw{<:Any, <:Any, <:Any, N}, ::Val{T}) where {N, T<:ForwardDiff.Dual} = SVector{N, T} flux_vector_type(::ConservationLaw{<:Any, <:Any, <:Any, N}, ::Val{T}) where {N, T<:AbstractFloat} = SVector{N, T} -flux_vector_type(::ConservationLaw{<:Any, <:Any, <:Any, N}, ::Val{T}) where {N, T} = MVector{N, T} + +@static if VERSION ≥ v"1.11" + struct TempVector{N, T} + v::Vector{T} + end + + function TempVector(N, T) + return TempVector{N, T}(zeros(T, N)) + end + + Base.length(::TempVector{N, T}) where {N, T} = N + Base.zero(::Type{TempVector{N, T}}) where {N, T} = TempVector(N, T) + Base.zero(::TempVector{N, T}) where {N, T} = TempVector(N, T) + Base.getindex(x::TempVector, i) = x.v[i] + + import Base.+ + function (+)(x::TempVector{N, T}, y::TempVector{N, T}) where {N, T} + return TempVector{N, T}(x.v .+ y.v) + end + + Base.eachindex(x::TempVector{N, T}) where {N, T} = 1:N + + function Base.setindex!(x::TempVector, v, i::Integer) + x.v[i] = v + return x + end + + function StaticArrays.setindex(x::TempVector, v, i::Integer) + x[i] = v + return x + end + + flux_vector_type(::ConservationLaw{<:Any, <:Any, <:Any, N}, ::Val{T}) where {N, T} = TempVector{N, T} +else + flux_vector_type(::ConservationLaw{<:Any, <:Any, <:Any, N}, ::Val{T}) where {N, T} = MVector{N, T} +end conserved_symbol(::ConservationLaw{C, <:Any}) where C = C From 737a0fd3926f19d1863e314f191e6556c341f4d9 Mon Sep 17 00:00:00 2001 From: andreas-brostrom <83207523+andreas-brostrom@users.noreply.github.com> Date: Fri, 12 Sep 2025 13:27:53 +0200 Subject: [PATCH 15/31] Fix bug in format of time stamp for printing simulation result (#186) --- src/simulator/utils.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/simulator/utils.jl b/src/simulator/utils.jl index 18bcb689f..84c80df9f 100644 --- a/src/simulator/utils.jl +++ b/src/simulator/utils.jl @@ -126,7 +126,7 @@ function Base.show(io::IO, ::MIME"text/plain", sr::SimResult) end function print_sim_result_timing(io, sr::SimResult) - fmt = raw"u. dd Y H:mm" + fmt = raw"u. dd Y HH:MM" if length(sr.reports) == 0 t = 0.0 else From 5fd93134068b746f47318707bec8718a671bd442 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Olav=20M=C3=B8yner?= Date: Tue, 23 Sep 2025 13:49:34 +0200 Subject: [PATCH 16/31] Update backend warning for interactive plotting (#188) --- ext/JutulMakieExt/interactive_3d.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ext/JutulMakieExt/interactive_3d.jl b/ext/JutulMakieExt/interactive_3d.jl index edc12a6a6..2b4f0cfcd 100644 --- a/ext/JutulMakieExt/interactive_3d.jl +++ b/ext/JutulMakieExt/interactive_3d.jl @@ -900,9 +900,9 @@ end function Jutul.plotting_check_interactive(; warn = true) backend_name = "$(Makie.current_backend())" - if backend_name != "GLMakie" + if !(backend_name in ("GLMakie", "WGLMakie")) if warn - msg = "Currently active Makie backend $backend_name may not be interactive or fully supported.\nGLMakie is recommended for Jutul's interactive plots. To install:\n\tusing Pkg; Pkg.add(\"GLMakie\")\nTo use:\n\tusing GLMakie\n\tGLMakie.activate!()\nYou can then retry your plotting command." + msg = "Currently active Makie backend $backend_name may not be interactive or fully supported.\nGLMakie (or WGLMakie) is recommended for Jutul's interactive plots. To install:\n\tusing Pkg; Pkg.add(\"GLMakie\")\nTo use:\n\tusing GLMakie\n\tGLMakie.activate!()\nYou can then retry your plotting command." @warn msg end return false From 8be3965dbb3903004d4305ecc1d98c2e385f1f6a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Olav=20M=C3=B8yner?= Date: Tue, 23 Sep 2025 17:47:11 +0200 Subject: [PATCH 17/31] Improve fix for Julia GC crash (#189) * Update conservation.jl * Update unstructured.jl --- src/conservation/conservation.jl | 3 ++- src/meshes/unstructured/unstructured.jl | 4 ++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/src/conservation/conservation.jl b/src/conservation/conservation.jl index c111a974e..1be62d234 100644 --- a/src/conservation/conservation.jl +++ b/src/conservation/conservation.jl @@ -5,7 +5,7 @@ number_of_equations_per_entity(model::SimulationModel, ::ConservationLaw{<:Any, flux_vector_type(::ConservationLaw{<:Any, <:Any, <:Any, N}, ::Val{T}) where {N, T<:ForwardDiff.Dual} = SVector{N, T} flux_vector_type(::ConservationLaw{<:Any, <:Any, <:Any, N}, ::Val{T}) where {N, T<:AbstractFloat} = SVector{N, T} -@static if VERSION ≥ v"1.11" +@static if VERSION ≥ v"1.11" && VERSION < v"1.12" struct TempVector{N, T} v::Vector{T} end @@ -40,6 +40,7 @@ flux_vector_type(::ConservationLaw{<:Any, <:Any, <:Any, N}, ::Val{T}) where {N, else flux_vector_type(::ConservationLaw{<:Any, <:Any, <:Any, N}, ::Val{T}) where {N, T} = MVector{N, T} end +flux_vector_type(::ConservationLaw{<:Any, <:Any, <:Any, N}, ::Val{T}) where {N, T<:ST.ADval} = MVector{N, T} conserved_symbol(::ConservationLaw{C, <:Any}) where C = C diff --git a/src/meshes/unstructured/unstructured.jl b/src/meshes/unstructured/unstructured.jl index 91425649d..bf614b002 100644 --- a/src/meshes/unstructured/unstructured.jl +++ b/src/meshes/unstructured/unstructured.jl @@ -35,6 +35,10 @@ function grid_dims_ijk(g::UnstructuredMesh{D, CartesianIndex{D}}) where D return (nx, ny, nz) end +function cell_ijk(g) + return i -> cell_ijk(g, i) +end + function cell_ijk(g::UnstructuredMesh{D, CartesianIndex{D}}, index::Integer) where D nx, ny, nz = grid_dims_ijk(g) if isnothing(g.cell_map) From c5502ff6b17aba82e3beb78fb9da872661c0e641 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Olav=20M=C3=B8yner?= Date: Wed, 24 Sep 2025 10:02:53 +0200 Subject: [PATCH 18/31] More missing methods for type for Julia 1.11 crash (#190) --- src/conservation/conservation.jl | 28 +++++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/src/conservation/conservation.jl b/src/conservation/conservation.jl index 1be62d234..4deb5efad 100644 --- a/src/conservation/conservation.jl +++ b/src/conservation/conservation.jl @@ -8,10 +8,22 @@ flux_vector_type(::ConservationLaw{<:Any, <:Any, <:Any, N}, ::Val{T}) where {N, @static if VERSION ≥ v"1.11" && VERSION < v"1.12" struct TempVector{N, T} v::Vector{T} + function TempVector(x::T) where {T<:AbstractVector} + return new{length(x), eltype(x)}(x) + end + end + + function TempVector{N, T}(x::T) where {N, T} + N == 1 || error("This constructor only makes sense for scalars") + return TempVector([x]) + end + + function TempVector(x::T) where {T} + return TempVector{1, T}([x]) end - function TempVector(N, T) - return TempVector{N, T}(zeros(T, N)) + function TempVector(N::Int, T) + return TempVector(zeros(T, N)) end Base.length(::TempVector{N, T}) where {N, T} = N @@ -21,7 +33,17 @@ flux_vector_type(::ConservationLaw{<:Any, <:Any, <:Any, N}, ::Val{T}) where {N, import Base.+ function (+)(x::TempVector{N, T}, y::TempVector{N, T}) where {N, T} - return TempVector{N, T}(x.v .+ y.v) + return TempVector(x.v .+ y.v) + end + + + import Base.* + function (*)(x::TempVector{N, T}, y::Number) where {N, T} + return TempVector(x.v * y) + end + + function (*)(y::Number, x::TempVector{N, T}) where {N, T} + return x*y end Base.eachindex(x::TempVector{N, T}) where {N, T} = 1:N From 1634c9d7d2997a438e6ef4f5d137e3c54abcbb7c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Olav=20M=C3=B8yner?= Date: Wed, 24 Sep 2025 12:02:44 +0200 Subject: [PATCH 19/31] Refactor ForwardDiff usage to make use of ForwardDiff.Tag (#191) * Start to use ForwardDiff tags properly * More tag fixes * Additional type fixes/cleanup for AD * Update ad.jl --- src/ad/ad.jl | 27 ++++++++++++++++++++++----- src/ad/sparsity.jl | 36 +++++++++++++++++++++++++----------- test/sparsity.jl | 8 ++++++++ 3 files changed, 55 insertions(+), 16 deletions(-) diff --git a/src/ad/ad.jl b/src/ad/ad.jl index 8ddbbfc9f..043bbff9e 100644 --- a/src/ad/ad.jl +++ b/src/ad/ad.jl @@ -341,6 +341,7 @@ function allocate_array_ad(v::AbstractVector; kwarg...) # create a copy of a vector as AD v_AD = allocate_array_ad(length(v); kwarg...) update_values!(v_AD, v) + v_AD end """ @@ -371,7 +372,15 @@ function get_ad_entity_scalar(v::T, npartials, diag_pos = nothing; diag_value = if npartials > 0 D = diag_value.*ntuple(x -> T.(x == diag_pos), npartials) partials = ForwardDiff.Partials{npartials, T}(D) - v = ForwardDiff.Dual{tag, T, npartials}(v, partials) + if isnothing(tag) + fd_tag = ForwardDiff.Tag(nothing, NoEntity) + else + if tag isa JutulEntity + tag = typeof(tag) + end + fd_tag = ForwardDiff.Tag(simulate, tag) + end + v = ForwardDiff.Dual{fd_tag, T, npartials}(v, partials) end return v end @@ -382,7 +391,7 @@ end Replace values of `x` in-place by `y`, leaving `x` with the values of y and the partials of `x`. """ @inline function update_values!(v::AbstractArray{<:ForwardDiff.Dual{Tag}}, next::AbstractArray{<:Real}) where Tag - if Tag isa JutulEntity + if unpack_tag(v) isa JutulEntity # The ForwardDiff type is immutable, so to preserve the derivatives we # do this little trick if we are working with a Jutul entity tag. This # signifies that we are currently working with a Jutul AD variable. @@ -408,7 +417,7 @@ Replace values (for non-Real types, direct assignment) end @inline function update_values!(v::AbstractArray{<:AbstractFloat}, next::AbstractArray{<:ForwardDiff.Dual{Tag}}) where Tag - Tag::JutulEntity + unpack_tag(next)::JutulEntity @inbounds for i in eachindex(v, next) next_val = next[i] v[i] = value(next_val) @@ -427,8 +436,8 @@ end """ Take value of AD. """ -@inline function value(x::ForwardDiff.Dual{Tag}) where Tag - if Tag isa JutulEntity +@inline function value(x::ForwardDiff.Dual{T, V, N}) where {T, V, N} + if is_jutul_ad_tag(T) # If the tag is a Jutul entity, we know that the AD came from Jutul, so we can # use the ForwardDiff value function to get the value. Otherwise we leave it be. x = ForwardDiff.value(x) @@ -436,6 +445,14 @@ Take value of AD. return x end +function is_jutul_ad_tag(::ForwardDiff.Tag{F, V}) where {F, V} + return V<:JutulEntity +end + +function is_jutul_ad_tag(::Any) + return false +end + @inline function value(x) return x end diff --git a/src/ad/sparsity.jl b/src/ad/sparsity.jl index aa497b4b9..2b6c6c547 100644 --- a/src/ad/sparsity.jl +++ b/src/ad/sparsity.jl @@ -1,17 +1,31 @@ -function unpack_tag(v::Type{ForwardDiff.Dual{T, F, N}}, t::Symbol = :entity) where {T, F, N} - if v isa Tuple - if t == :entity - out = T[2] - elseif t == :model - out = T[1] - else - out = T - end +# function unpack_tag(v::Type{ForwardDiff.Dual{T, F, N}}, t::Symbol = :entity) where {T, F, N} +# @info "???" +# if v isa Tuple +# if t == :entity +# out = T[2] +# elseif t == :model +# out = T[1] +# else +# out = T +# end +# else +# out = T +# end +# return unpack_tag(out) +# end + +function unpack_tag(v::Type{ForwardDiff.Dual{T, F, N}}) where {T, F, N} + return unpack_tag(T) +end + +function unpack_tag(v::ForwardDiff.Tag{F, T}) where {F, T} + if T <: JutulEntity + return T() else - out = T + return nothing end - return out end + unpack_tag(A::AbstractArray, arg...) = unpack_tag(eltype(A), arg...) unpack_tag(::Any, arg...) = nothing diff --git a/test/sparsity.jl b/test/sparsity.jl index a029be727..da379fb93 100644 --- a/test/sparsity.jl +++ b/test/sparsity.jl @@ -49,3 +49,11 @@ using Jutul, Test end end end + +@testset "ad_tags" begin + v = allocate_array_ad(1, diag_pos = 1, tag = Cells()) + @test Jutul.value(v[1]) isa Float64 + @test Jutul.value(v) isa Vector{Float64} + v = allocate_array_ad(1, diag_pos = 1, tag = Cells()) + @test Jutul.unpack_tag(v) == Cells() +end From b1d32bcf5ba04508d9de7bae557b5616a465315d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Olav=20M=C3=B8yner?= Date: Fri, 26 Sep 2025 09:42:36 +0200 Subject: [PATCH 20/31] Update plot_secondary_variables to work with latest everything (#192) --- ext/JutulGLMakieExt/JutulGLMakieExt.jl | 3 +-- ext/JutulGLMakieExt/variables.jl | 13 ++++++++----- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/ext/JutulGLMakieExt/JutulGLMakieExt.jl b/ext/JutulGLMakieExt/JutulGLMakieExt.jl index 3e97dc301..41f8faaff 100644 --- a/ext/JutulGLMakieExt/JutulGLMakieExt.jl +++ b/ext/JutulGLMakieExt/JutulGLMakieExt.jl @@ -1,6 +1,5 @@ module JutulGLMakieExt - -using Jutul, GLMakie + using Jutul, GLMakie include("variables.jl") function Jutul.independent_figure(fig::Figure) diff --git a/ext/JutulGLMakieExt/variables.jl b/ext/JutulGLMakieExt/variables.jl index 1a7df3e31..2a2bc2d55 100644 --- a/ext/JutulGLMakieExt/variables.jl +++ b/ext/JutulGLMakieExt/variables.jl @@ -2,7 +2,7 @@ function Jutul.plot_secondary_variables(model::SimulationModel; kwarg...) Jutul.plot_secondary_variables(MultiModel((model = model, )); kwarg...) end -function Jutul.plot_secondary_variables(model::MultiModel; linewidth = 4, kwarg...) +function Jutul.plot_secondary_variables(model::MultiModel; linewidth = 2, kwarg...) data = Dict{String, Any}() nregmax = 1 count = 0 @@ -51,7 +51,7 @@ function Jutul.plot_secondary_variables(model::MultiModel; linewidth = 4, kwarg. reg = m2.selection[] begin function plot_by_reg(regions) - plot_jutul_line_data(d; regions = regions, linewidth = s.value[]) + Jutul.plot_jutul_line_data(d; regions = regions, linewidth = s.value[]) end if reg == "All" plot_by_reg(axes(d, 2)) @@ -72,21 +72,24 @@ function Jutul.plot_jutul_line_data(data::JutulLinePlotData; kwarg...) plot_jutul_line_data([data]; kwarg...) end -function Jutul.plot_jutul_line_data(data; resolution = (1600, 900), linewidth = 4, regions = axes(data, 2), kwarg...) - fig = Figure(resolution = resolution) +function Jutul.plot_jutul_line_data(data; size = (1600, 900), linewidth = 2, regions = axes(data, 2), kwarg...) + fig = Figure(size = size) colors = Makie.wong_colors() + all_axes = [] for (col, regix) in enumerate(regions) for i in axes(data, 1) d = data[i, regix] ax = Axis(fig[i, col], xlabel = d.xlabel, ylabel = d.ylabel, title = "$(d.title) region $regix") + push!(all_axes, ax) ix = 1 for (x, y, lbl) in zip(d.xdata, d.ydata, d.datalabels) c = colors[mod(ix, 7) + 1] lines!(ax, x, y; color = c, linewidth = linewidth, label = lbl, kwarg...) ix += 1 end - axislegend() + axislegend(position = :rt) end end + linkaxes!(all_axes...) display(GLMakie.Screen(), fig) end From 36522f134a742ceb49b2a3a9eb88dd2ec1c3ac9f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Olav=20M=C3=B8yner?= Date: Fri, 26 Sep 2025 10:00:26 +0200 Subject: [PATCH 21/31] Bump to version 0.4.6 (#193) --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index fcd81bfa8..02ff5381f 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Jutul" uuid = "2b460a1a-8a2b-45b2-b125-b5c536396eb9" authors = ["Olav Møyner "] -version = "0.4.5" +version = "0.4.6" [deps] AlgebraicMultigrid = "2169fc97-5a83-5252-b627-83903c6c433c" From e25cdabfff09d69aa5afb30b5bd1bfcf25d7f816 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Olav=20M=C3=B8yner?= Date: Fri, 26 Sep 2025 20:28:48 +0200 Subject: [PATCH 22/31] Add tag ordering to Jutul tags for ForwardDiff (#194) --- Project.toml | 2 +- src/Jutul.jl | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 02ff5381f..c28c72982 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Jutul" uuid = "2b460a1a-8a2b-45b2-b125-b5c536396eb9" authors = ["Olav Møyner "] -version = "0.4.6" +version = "0.4.7" [deps] AlgebraicMultigrid = "2169fc97-5a83-5252-b627-83903c6c433c" diff --git a/src/Jutul.jl b/src/Jutul.jl index 740418dc8..d18f75cf0 100644 --- a/src/Jutul.jl +++ b/src/Jutul.jl @@ -148,4 +148,9 @@ module Jutul import Jutul.ConvergenceMonitors: set_convergence_monitor_cutting_criterion! import Jutul.ConvergenceMonitors: set_convergence_monitor_relaxation! + # This is to make Jutul simulators work nicely with nested ForwardDiff. + JutulSimulateTag = ForwardDiff.Tag{typeof(simulate), <:JutulEntity} + ForwardDiff.:≺(::JutulSimulateTag, ::Type{<:ForwardDiff.Tag}) = true + ForwardDiff.:≺(::Type{<:ForwardDiff.Tag}, ::JutulSimulateTag) = false + end # module From e33e6f6cf68ddabfa5be78c6f43c41666ad0e4a4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=98ystein=20Klemetsdal?= Date: Sun, 28 Sep 2025 20:22:30 +0200 Subject: [PATCH 23/31] Find enclosing cell improvement (#195) * Add optional kwarg `cells` to limit seach to a subset of the mesh cells * Remove println-statement * Bump version --- Project.toml | 2 +- src/meshes/trajectories.jl | 14 +++++++++++--- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/Project.toml b/Project.toml index c28c72982..5a5dad271 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Jutul" uuid = "2b460a1a-8a2b-45b2-b125-b5c536396eb9" authors = ["Olav Møyner "] -version = "0.4.7" +version = "0.4.8" [deps] AlgebraicMultigrid = "2169fc97-5a83-5252-b627-83903c6c433c" diff --git a/src/meshes/trajectories.jl b/src/meshes/trajectories.jl index cad4dc9fd..fd10c8b26 100644 --- a/src/meshes/trajectories.jl +++ b/src/meshes/trajectories.jl @@ -27,10 +27,18 @@ of cells are treated more rigorously when picking exactly what cells are cut by a trajectory, but this requires that the boundary normals are oriented outwards, which is currently not the case for all meshes from downstream packages. +`atol` is the tolerance used when checking if points are inside the bounding +box. + `limit_box` speeds up the search by limiting the search to the minimal bounding box that contains both the trajectory and the mesh. This can be turned off by passing `false`. There should be no difference in the cells tagged by changing this option. + +`cells` can be used to limit the search to a subset of cells in the mesh. By +default all cells are used. If `limit_box` is true, the function searches among +the cells in intersect(cells, cells inside bounding box). + """ function find_enclosing_cells(G, traj; geometry = missing, @@ -38,6 +46,7 @@ function find_enclosing_cells(G, traj; use_boundary = false, atol = 0.01, limit_box = true, + cells = 1:number_of_cells(G), extra_out = false ) G = UnstructuredMesh(G) @@ -96,9 +105,8 @@ function find_enclosing_cells(G, traj; inside_bb(x) = point_in_bounding_box(x, low_bb, high_bb, atol = atol) pts = filter(inside_bb, pts) # Find cells via their nodes - if any node is inside BB we consider the cell - cells = cells_inside_bounding_box(G, low_bb, high_bb, atol = atol) - else - cells = 1:number_of_cells(G) + cells_bb = cells_inside_bounding_box(G, low_bb, high_bb, atol = atol) + cells = intersect(cells_bb, cells) end # Start search near middle of trajectory mean_pt = mean(pts) From e7e61e2e901f9d86dbaa86e6631b189775c30363 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Olav=20M=C3=B8yner?= Date: Wed, 8 Oct 2025 11:30:05 +0200 Subject: [PATCH 24/31] Add IJK fallback for unstructured meshes (#196) --- src/meshes/unstructured/unstructured.jl | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/meshes/unstructured/unstructured.jl b/src/meshes/unstructured/unstructured.jl index bf614b002..f711a8b47 100644 --- a/src/meshes/unstructured/unstructured.jl +++ b/src/meshes/unstructured/unstructured.jl @@ -20,6 +20,12 @@ function get_neighborship(G::UnstructuredMesh; internal = true) return N end +function grid_dims_ijk(g::UnstructuredMesh) + # For unstructured mesh, we just return number of cells in x, and 1 in y and z + ncells = number_of_cells(g) + return (ncells, 1, 1) +end + function grid_dims_ijk(g::UnstructuredMesh{D, CartesianIndex{D}}) where D dims = Tuple(g.structure) if D == 1 From 9973c10c67cef79cb3d6e534ca62e2712585fc3f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Olav=20M=C3=B8yner?= Date: Wed, 8 Oct 2025 11:30:33 +0200 Subject: [PATCH 25/31] Add 3 specialization for plotter (#197) --- src/meshes/unstructured/plotting.jl | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/src/meshes/unstructured/plotting.jl b/src/meshes/unstructured/plotting.jl index 3a4fe9c66..b2bf4efd5 100644 --- a/src/meshes/unstructured/plotting.jl +++ b/src/meshes/unstructured/plotting.jl @@ -94,8 +94,19 @@ end function triangulate_and_add_faces!(dest, face, neighbors, C, nodes, node_pts::Vector{SVector{3, T}}, cell_centroids, n; offset = 0) where {T} cell_index, face_index, pts, tri = dest - if n == 4 - # TODO: Could add a 3 mesh specialization here + if n == 3 + # Just add the triangle + for cell in neighbors + for i in 1:n + push!(cell_index, cell) + push!(face_index, face) + push!(pts, node_pts[nodes[i]]) + end + push!(tri, SVector{3, Int}(offset+1, offset + 2, offset + 3)) + offset += n + end + elseif n == 4 + # Tesselation is known for cell in neighbors for i in 1:n push!(cell_index, cell) From 614167629626927ebfba1c97b5387ed41734b093 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Olav=20M=C3=B8yner?= Date: Wed, 8 Oct 2025 11:30:43 +0200 Subject: [PATCH 26/31] Improve coarsening performance substantially (#198) * Improve performance of coarsening * Update coarsening.jl --- ext/JutulGLMakieExt/variables.jl | 2 +- src/coarsening.jl | 15 ++++++++++----- src/partitioning.jl | 1 - 3 files changed, 11 insertions(+), 7 deletions(-) diff --git a/ext/JutulGLMakieExt/variables.jl b/ext/JutulGLMakieExt/variables.jl index 2a2bc2d55..ce5ba22ef 100644 --- a/ext/JutulGLMakieExt/variables.jl +++ b/ext/JutulGLMakieExt/variables.jl @@ -69,7 +69,7 @@ function Jutul.plot_secondary_variables(model::MultiModel; linewidth = 2, kwarg. end function Jutul.plot_jutul_line_data(data::JutulLinePlotData; kwarg...) - plot_jutul_line_data([data]; kwarg...) + Jutul.plot_jutul_line_data([data]; kwarg...) end function Jutul.plot_jutul_line_data(data; size = (1600, 900), linewidth = 2, regions = axes(data, 2), kwarg...) diff --git a/src/coarsening.jl b/src/coarsening.jl index fb7928a64..db90d650b 100644 --- a/src/coarsening.jl +++ b/src/coarsening.jl @@ -26,10 +26,14 @@ function inner_apply_coarsening_function(finevals, fine_indices, op::CoarsenByFi return finevals[1] end -function apply_coarsening_function!(coarsevals, finevals, op, coarse::DataDomain, fine::DataDomain, name, entity::Union{Cells, Faces}) +function apply_coarsening_function!(coarsevals, finevals, op, coarse::DataDomain, fine::DataDomain, name, entity::Union{Cells, Faces}; coarse_to_cells = missing) CG = physical_representation(coarse) function block_indices(CG, block, ::Cells) - return findall(isequal(block), CG.partition) + if ismissing(coarse_to_cells) + return findall(isequal(block), CG.partition) + else + return coarse_to_cells[block] + end end function block_indices(CG, block, ::Faces) return CG.coarse_faces_to_fine[block] @@ -66,16 +70,17 @@ function coarsen_data_domain(D::DataDomain, partition; g = physical_representation(D) cg = CoarseMesh(g, partition) cD = DataDomain(cg) + coarse_to_cells = map(i -> findall(isequal(i), cg.partition), 1:maximum(cg.partition)) for name in keys(D) if !haskey(cD, name) val = D[name] e = Jutul.associated_entity(D, name) Te = eltype(val) - if !(e in (Cells(), Faces(), BoundaryFaces(), nothing)) + if !(e in (Cells(), Faces(), BoundaryFaces(), NoEntity(), nothing)) # Other entities are not supported yet. continue end - if isnothing(e) + if isnothing(e) || e isa NoEntity # No idea about coarse dims, just copy coarseval = deepcopy(val) elseif val isa AbstractVecOrMat @@ -90,7 +95,7 @@ function coarsen_data_domain(D::DataDomain, partition; else f = get(functions, name, default_other) end - coarseval = apply_coarsening_function!(coarseval, val, f, cD, D, name, e) + coarseval = apply_coarsening_function!(coarseval, val, f, cD, D, name, e, coarse_to_cells = coarse_to_cells) else # Don't know what's going on coarseval = deepcopy(val) diff --git a/src/partitioning.jl b/src/partitioning.jl index 51a577d15..81092f1c8 100644 --- a/src/partitioning.jl +++ b/src/partitioning.jl @@ -129,7 +129,6 @@ function process_partition(neighbors, partition; weights = missing) new_p = copy(partition) neighbors = Jutul.convert_neighborship(neighbors) max_p = maximum(partition) - nfine = length(partition) for coarse in 1:max_p cells = findall(isequal(coarse), partition) if length(cells) > 0 From b6cee7356be7a0e979fb0e635700164d5a0e23ec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Olav=20M=C3=B8yner?= Date: Thu, 9 Oct 2025 11:43:39 +0200 Subject: [PATCH 27/31] Add cross-term sparsity to objective function (#199) --- src/multimodel/gradients.jl | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/src/multimodel/gradients.jl b/src/multimodel/gradients.jl index 005bf2ff0..d9e369517 100644 --- a/src/multimodel/gradients.jl +++ b/src/multimodel/gradients.jl @@ -213,6 +213,35 @@ function determine_sparsity_simple(F, model::MultiModel, state, state0 = nothing end outer_sparsity[mod_k] = sparsity end + # We also add sparsity from cross terms (they can couple variables between models) + for ctp in model.cross_terms + for is_normal in (true, false) + ct = ctp.cross_term + if !is_normal && !has_symmetry(ct) + continue + end + if is_normal + modname = ctp.target + eqname = ctp.target_equation + else + modname = ctp.source + eqname = ctp.source_equation + end + m = model[modname] + eq = m.equations[eqname] + if is_normal + ix = cross_term_entities(ct, eq, m) + else + ix = cross_term_entities_source(ct, eq, m) + end + entityname = associated_entity(eq) + dest = outer_sparsity[modname][entityname] + for i in ix + push!(dest, i) + end + unique!(dest) + end + end return outer_sparsity end From d05296f9396429854cbf418b8cb1a1e2353372ce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Olav=20M=C3=B8yner?= Date: Fri, 10 Oct 2025 11:35:33 +0200 Subject: [PATCH 28/31] Guard against missing entry in objective sparsity (#200) --- src/multimodel/gradients.jl | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/multimodel/gradients.jl b/src/multimodel/gradients.jl index d9e369517..949349ca0 100644 --- a/src/multimodel/gradients.jl +++ b/src/multimodel/gradients.jl @@ -235,7 +235,11 @@ function determine_sparsity_simple(F, model::MultiModel, state, state0 = nothing ix = cross_term_entities_source(ct, eq, m) end entityname = associated_entity(eq) - dest = outer_sparsity[modname][entityname] + msparsity = outer_sparsity[modname] + if !haskey(msparsity, entityname) + msparsity[entityname] = Int64[] + end + dest = msparsity[entityname] for i in ix push!(dest, i) end From 9143b27d06fa4dd6e5658722a4a482b688789d41 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Olav=20M=C3=B8yner?= Date: Tue, 28 Oct 2025 16:58:51 +0100 Subject: [PATCH 29/31] Improve performance and code for generic adjoint (#201) * Put delumping/unscale in function * Single step sparsity is ok for "reduced" varsets * Replace a broadcast by loop due to allocations * Add more output to DI * Improve info_level in opt * Add type check * Guard against missing field in vectorization * Update optimization.jl * Use ordered dict in variable mapper * Internal improvements to sparsity * Update interface.jl * Update interface.jl * Refactor out the main optimization setup * Add a finite difference utility * Update adjoints.jl * Add deps_ad argument (non-functional) * Refactor * Split adjoint setup * More setup work on mixed adjoint * More coding on split adjoint * Mostly working version * Work on getting mixed state0 working * Cleanup * Add new tests to adjoints --- src/DictOptimization/interface.jl | 54 ++--- src/DictOptimization/optimization.jl | 60 +++-- src/DictOptimization/types.jl | 86 +++++++ src/LBFGS/constrained_optimizer.jl | 12 +- src/ad/AdjointsDI/adjoints.jl | 335 ++++++++++++++++----------- src/ad/AdjointsDI/split.jl | 3 +- src/ad/gradients.jl | 34 ++- src/ad/objectives.jl | 4 +- src/core_types/core_types.jl | 2 +- src/equations.jl | 5 +- src/multimodel/gradients.jl | 2 +- test/adjoints/basic_adjoint.jl | 9 +- 12 files changed, 402 insertions(+), 204 deletions(-) diff --git a/src/DictOptimization/interface.jl b/src/DictOptimization/interface.jl index 4b7eabcd7..4dc5c4862 100644 --- a/src/DictOptimization/interface.jl +++ b/src/DictOptimization/interface.jl @@ -41,7 +41,8 @@ using `free_optimization_parameter!` prior to calling the optimizer. - `backend_arg`: Options for the autodiff backend: - `use_sparsity`: Enable sparsity detection for the objective function - `di_sparse`: Use sparse differentiation - - `single_step_sparsity`: Enable single step sparsity detection (if sparsity does not change during timesteps) + - `single_step_sparsity`: Enable single step sparsity detection (if sparsity + does not change during timesteps) - `do_prep`: Perform preparation step # Returns @@ -73,43 +74,34 @@ function optimize(dopt::DictParameters, objective, setup_fn = dopt.setup_functio max_it = 25, opt_fun = missing, maximize = false, + backend_arg = missing, + info_level = 0, + deps::Symbol = :case, + deps_ad = :jutul, simulator = missing, config = missing, solution_history = false, - deps::Symbol = :case, - backend_arg = ( - use_sparsity = true, - di_sparse = true, - single_step_sparsity = false, - do_prep = true, - deps = deps, - ), kwarg... ) if ismissing(setup_fn) error("Setup function was not found in DictParameters struct or as last positional argument.") end - x0, x_setup, limits = optimization_setup(dopt) - - ub = limits.max - lb = limits.min - # Set up a cache for forward/backward sim - adj_cache = setup_optimization_cache(dopt, simulator = simulator, config = config) - - if solution_history - sols = [] - else - sols = missing - end - solve_and_differentiate(x) = solve_and_differentiate_for_optimization(x, dopt, setup_fn, objective, x_setup, adj_cache; - backend_arg, - solution_history = sols + problem = JutulOptimizationProblem(dopt, objective, setup_fn; + simulator = simulator, + config = config, + info_level = info_level, + backend_arg = backend_arg, + solution_history = solution_history, + deps = deps, + deps_ad = deps_ad ) + if dopt.verbose - jutul_message("Optimization", "Starting calibration of $(length(x0)) parameters.", color = :green) + jutul_message("Optimization", "Starting calibration of $(length(problem.x0)) parameters.", color = :green) end + t_opt = @elapsed if ismissing(opt_fun) - v, x, history = Jutul.LBFGS.box_bfgs(x0, solve_and_differentiate, lb, ub; + v, x, history = Jutul.LBFGS.box_bfgs(problem; print = Int(dopt.verbose), max_it = max_it, grad_tol = grad_tol, @@ -120,7 +112,7 @@ function optimize(dopt::DictParameters, objective, setup_fn = dopt.setup_functio else self_cache = Dict() function f!(x) - f, g = solve_and_differentiate(x) + f, g = opt_cache(x) self_cache[:f] = f self_cache[:g] = g self_cache[:x] = x @@ -140,12 +132,11 @@ function optimize(dopt::DictParameters, objective, setup_fn = dopt.setup_functio jutul_message("Optimization", "Finished in $t_opt seconds.", color = :green) end # Also remove AD from the internal ones and update them - prm_out = deepcopy(dopt.parameters) - optimizer_devectorize!(prm_out, x, x_setup) + prm_out = optimizer_devectorize(problem, x) dopt.parameters_optimized = prm_out dopt.history = history if solution_history - dopt.history = (history = history, solutions = sols) + dopt.history = (history = history, solutions = problem.solution_history) else dopt.history = history end @@ -167,10 +158,11 @@ function parameters_gradient(dopt::DictParameters, objective, setup_fn = dopt.se cache = missing, raw_output = false, output_cache = false, + deps = :case, backend_arg = ( use_sparsity = false, di_sparse = true, - single_step_sparsity = false, + single_step_sparsity = deps != :case, do_prep = true, ) ) diff --git a/src/DictOptimization/optimization.jl b/src/DictOptimization/optimization.jl index b4779fee2..d468eeeec 100644 --- a/src/DictOptimization/optimization.jl +++ b/src/DictOptimization/optimization.jl @@ -6,14 +6,7 @@ function solve_and_differentiate_for_optimization(x, dopt::DictParameters, setup ) prm = adj_cache[:parameters] - function setup_from_vector(X, step_info) - optimizer_devectorize!(prm, X, x_setup) - # Return the case setup function This is a function that sets up the - # case from the parameters - F() = setup_fn(prm, step_info) - return redirect_stdout(F, devnull) - end - + setup_from_vector = (X, step_info = missing) -> setup_from_vector_optimizer(X, step_info, setup_fn, prm, x_setup) case = setup_from_vector(x, missing) objective = Jutul.adjoint_wrap_objective(objective, case.model) result = forward_simulate_for_optimization(case, adj_cache) @@ -35,7 +28,7 @@ function solve_and_differentiate_for_optimization(x, dopt::DictParameters, setup t_setup = @elapsed S = Jutul.AdjointsDI.setup_adjoint_storage_generic( x, setup_from_vector, packed_steps, objective; backend_arg..., - info_level = adj_cache[:config][:info_level] + info_level = adj_cache[:info_level] ) jutul_message("Optimization", "Finished setup in $t_setup seconds.", color = :green) adj_cache[:storage] = S @@ -54,6 +47,14 @@ function solve_and_differentiate_for_optimization(x, dopt::DictParameters, setup return (f, g) end +function setup_from_vector_optimizer(X, step_info, setup_fn, prm, x_setup) + optimizer_devectorize!(prm, X, x_setup) + # Return the case setup function This is a function that sets up the + # case from the parameters + F() = setup_fn(prm, step_info) + return redirect_stdout(F, devnull) +end + function forward_simulate_for_optimization(case, adj_cache) sim = get(adj_cache, :simulator, missing) if ismissing(sim) @@ -62,7 +63,10 @@ function forward_simulate_for_optimization(case, adj_cache) end config = get(adj_cache, :config, missing) if ismissing(config) - config = simulator_config(sim, info_level = -1, output_substates = true) + config = simulator_config(sim, + info_level = adj_cache[:info_level], + output_substates = true + ) adj_cache[:config] = config end return simulate!(sim, case.dt, @@ -76,21 +80,15 @@ end function optimizer_devectorize!(prm, X, x_setup) if haskey(x_setup, :lumping) || haskey(x_setup, :scalers) X_new = similar(X, 0) + sizehint!(X_new, length(X)) pos = 0 for (i, k) in enumerate(x_setup.names) scaler = get(x_setup.scalers, k, missing) if haskey(x_setup.lumping, k) L = x_setup.lumping[k] - first_index = L.first_index - N = length(first_index) - for v in L.lumping - push!(X_new, undo_scaler(X[pos + v], scaler)) - end + N = optimizer_devectorize_lumping!(X_new, X, pos, L, scaler) else - N = x_setup.offsets[i+1]-x_setup.offsets[i] - for i in pos+1:pos+N - push!(X_new, undo_scaler(X[i], scaler)) - end + N = optimizer_devectorize_scaler!(X_new, X, i, pos, x_setup.offsets, scaler) end pos += N end @@ -101,6 +99,23 @@ function optimizer_devectorize!(prm, X, x_setup) return Jutul.AdjointsDI.devectorize_nested!(prm, X, x_setup) end +function optimizer_devectorize_lumping!(X_new, X, pos, L, scaler) + first_index = L.first_index + N = length(first_index) + for v in L.lumping + push!(X_new, undo_scaler(X[pos + v], scaler)) + end + return N +end + +function optimizer_devectorize_scaler!(X_new, X, i, pos, offsets, scaler) + N = offsets[i+1]-offsets[i] + for ix in pos+1:pos+N + push!(X_new, undo_scaler(X[ix], scaler)) + end + return N +end + function optimization_setup(dopt::DictParameters; include_limits = true) x0, x_setup = Jutul.AdjointsDI.vectorize_nested(dopt.parameters, active = active_keys(dopt), @@ -155,9 +170,14 @@ function optimization_setup(dopt::DictParameters; include_limits = true) return (x0 = x0, x_setup = x_setup, limits = lims) end -function setup_optimization_cache(dopt::DictParameters; simulator = missing, config = missing) +function setup_optimization_cache(dopt::DictParameters; + simulator = missing, + config = missing, + info_level = 0 + ) # Set up a cache for forward/backward sim adj_cache = Dict() + adj_cache[:info_level] = info_level # Internal copy - to be used for adjoints etc adj_cache[:parameters] = widen_dict_copy(dopt.parameters) if !ismissing(simulator) diff --git a/src/DictOptimization/types.jl b/src/DictOptimization/types.jl index dcd86581b..e21b9d25a 100644 --- a/src/DictOptimization/types.jl +++ b/src/DictOptimization/types.jl @@ -100,3 +100,89 @@ function DictParametersSampler(dopt::DictParameters, output_function = (case, re end return DictParametersSampler(parameters, dopt.setup_function, output_function, objective, simulator, config, setup) end + +struct JutulOptimizationProblem + dict_parameters::DictParameters + setup_function + objective + x0 + x_setup + limits + backend_arg::NamedTuple + cache + solution_history + function JutulOptimizationProblem(dopt::DictParameters, objective, setup_fn = dopt.setup_function; + backend_arg = missing, + info_level = 0, + deps::Symbol = :case, + deps_ad::Symbol = :jutul, + simulator = missing, + config = missing, + solution_history::Bool = false + ) + if ismissing(backend_arg) + deps_ad in (:di, :jutul) || error("deps_ad must be :di or :jutul. Got $deps_ad.") + backend_arg = ( + use_sparsity = true, + di_sparse = true, + single_step_sparsity = deps == :parameters || deps == :parameters_and_state0, + do_prep = true, + deps = deps, + deps_ad = deps_ad + ) + end + x0, x_setup, limits = optimization_setup(dopt) + + # Set up a cache for forward/backward sim + adj_cache = setup_optimization_cache(dopt, simulator = simulator, config = config, info_level = info_level) + + if solution_history + sols = [] + else + sols = missing + end + return new( + dopt, + setup_fn, + objective, + x0, + x_setup, + limits, + backend_arg, + adj_cache, + sols + ) + end +end + +function evaluate(opt::JutulOptimizationProblem, x = opt.x0; gradient = true) + dopt = opt.dict_parameters + setup_fn = opt.setup_function + objective = opt.objective + x_setup = opt.x_setup + adj_cache = opt.cache + backend_arg = opt.backend_arg + obj, dobj_dx = solve_and_differentiate_for_optimization(x, dopt, setup_fn, objective, x_setup, adj_cache; + backend_arg = backend_arg, + gradient = gradient + ) + return (obj, dobj_dx) +end + +function (I::JutulOptimizationProblem)(x = I.x0; gradient = true) + evaluate(I, x; gradient = gradient) +end + +function finite_difference_gradient_entry(I::JutulOptimizationProblem, x = I.x0; index = 1, eps = 1e-6) + f0, _ = I(x; gradient = false) + xd = copy(x) + xd[index] += eps + fd, _ = I(xd; gradient = false) + return (fd - f0)/eps +end + +function optimizer_devectorize(P::JutulOptimizationProblem, x) + prm_out = deepcopy(P.dict_parameters.parameters) + optimizer_devectorize!(prm_out, x, P.x_setup) + return prm_out +end diff --git a/src/LBFGS/constrained_optimizer.jl b/src/LBFGS/constrained_optimizer.jl index cf56a1b1b..5fc5ea535 100644 --- a/src/LBFGS/constrained_optimizer.jl +++ b/src/LBFGS/constrained_optimizer.jl @@ -193,21 +193,27 @@ function unit_box_bfgs( end +function box_bfgs(problem; kwarg...) + ub = problem.limits.max + lb = problem.limits.min + return box_bfgs(problem.x0, problem, lb, ub; kwarg...) +end + function box_bfgs(x0, f, lb, ub; kwarg...) n = length(x0) length(lb) == n || throw(ArgumentError("Length of lower bound ($(length(lb))) must match length of initial guess ($n)")) length(ub) == n || throw(ArgumentError("Length of upper bound ($(length(ub))) must match length of initial guess ($n)")) # Check bounds for i in eachindex(x0, lb, ub) + if lb[i] >= ub[i] + throw(ArgumentError("Lower bound must be less than upper bound for index $i: lb[$i] = $(lb[i]), ub[$i] = $(ub[i])")) + end if x0[i] < lb[i] || x0[i] > ub[i] throw(ArgumentError("Initial guess x0[$i] = $(x0[i]) is outside bounds [$(lb[i]), $(ub[i])]")) end if !isfinite(lb[i]) || !isfinite(ub[i]) throw(ArgumentError("Bounds must be finite, got lb[$i] = $(lb[i]), ub[$i] = $(ub[i])")) end - if lb[i] >= ub[i] - throw(ArgumentError("Lower bound must be less than upper bound for index $i: lb[$i] = $(lb[i]), ub[$i] = $(ub[i])")) - end end δ = ub .- lb diff --git a/src/ad/AdjointsDI/adjoints.jl b/src/ad/AdjointsDI/adjoints.jl index bd7a60dab..e9b97df4d 100644 --- a/src/ad/AdjointsDI/adjoints.jl +++ b/src/ad/AdjointsDI/adjoints.jl @@ -25,7 +25,6 @@ function solve_adjoint_generic(X, F, states, reports_or_timesteps, G; if info_level > 1 jutul_message("Adjoints", "Storage set up in $(Jutul.get_tstr(t_storage)).", color = :blue) end - n_param = storage[:n_dynamic] n_param_static = storage[:n_static] n_param_static == length(X) || error("Internal error: static parameter length mismatch.") ∇G = zeros(n_param_static) @@ -34,13 +33,10 @@ function solve_adjoint_generic(X, F, states, reports_or_timesteps, G; if info_level > 1 jutul_message("Adjoints", "Solving $N adjoint steps...", color = :blue) end - t_solve = @elapsed solve_adjoint_generic!(∇G, X, F, storage, packed_steps, G, + solve_adjoint_generic!(∇G, X, F, storage, packed_steps, G, info_level = info_level, state0 = state0, ) - if info_level > 1 - jutul_message("Adjoints", "Adjoints solved in $(Jutul.get_tstr(t_solve)).", color = :blue) - end Jutul.print_global_timer(extra_timing; text = "Adjoint solve detailed timing") if extra_output return (∇G, storage) @@ -58,54 +54,74 @@ function solve_adjoint_generic!(∇G, X, F, storage, packed_steps::AdjointPacked info_level = 0, state0 = missing ) - N = length(packed_steps) - case = setup_case(X, F, packed_steps, state0, :all) - if F != storage[:F_input] - @warn "The function F used in the solve must be the same as the one used in the setup." - end - F_dynamic = storage[:F_dynamic] - F_static = storage[:F_static] - is_fully_dynamic = storage[:F_fully_dynamic] - if is_fully_dynamic - Y = X - dYdX = missing - else - prep = storage[:dF_static_dX_prep] - backend = storage[:backend_di] - Y, dYdX = value_and_jacobian(F_static, prep, backend, X) - end - G = Jutul.adjoint_wrap_objective(G, case.model) - Jutul.adjoint_reset_parameters!(storage, case.parameters) - - packed_steps = set_packed_result_dynamic_values!(packed_steps, case) - H = storage[:adjoint_objective_helper] - H.packed_steps = packed_steps - - # Do sparsity detection if not already done. - if info_level > 1 - jutul_message("Adjoints", "Updating sparsity patterns.", color = :blue) - end + t_solve = @elapsed begin + N = length(packed_steps) + case = setup_case(X, F, packed_steps, state0, :all) + if F != storage[:F_input] + @warn "The function F used in the solve must be the same as the one used in the setup." + end + # F_dynamic = storage[:F_dynamic] + F_static = storage[:F_static] + is_fully_dynamic = storage[:F_fully_dynamic] + if is_fully_dynamic + Y = X + dYdX = missing + else + prep = storage[:dF_static_dX_prep] + backend = storage[:backend_di] + # Y = F_static(X) + # dYdX = jacobian(F_static, prep, backend, X) + Y, dYdX = value_and_jacobian(F_static, prep, backend, X) + # Y, dYdX = value_and_jacobian(F_static, AutoForwardDiff(), X) + end + G = Jutul.adjoint_wrap_objective(G, case.model) + Jutul.adjoint_reset_parameters!(storage, case.parameters) + + packed_steps = set_packed_result_dynamic_values!(packed_steps, case) + H = storage[:adjoint_objective_helper] + H.num_evals = 0 + H.packed_steps = packed_steps + dG_dynamic = storage[:dynamic_buffer] + + if storage[:deps_ad] == :jutul + @assert !is_fully_dynamic "Fully dynamic dependencies must use :di adjoints." + dG_dynamic_prm = storage[:dynamic_buffer_parameters] + Jutul.solve_adjoint_sensitivities!(dG_dynamic_prm, storage, packed_steps, G; info_level = 0) + dstate0 = storage[:dstate0] + if !ismissing(dstate0) + dG_dynamic_state0 = storage[:dynamic_buffer_state0] + dG_dynamic_state0 .= dstate0 + end + else + # Do sparsity detection if not already done. + if info_level > 1 + jutul_message("Adjoints", "Updating sparsity patterns.", color = :blue) + end - Jutul.update_objective_sparsity!(storage, G, packed_steps, :forward) - # Set gradient to zero before solve starts - dG_dynamic = storage[:dynamic_buffer] - @. dG_dynamic = 0 - @tic "sensitivities" for i in N:-1:1 - if info_level > 0 - jutul_message("Step $i/$N", "Solving adjoint system.", color = :blue) + Jutul.update_objective_sparsity!(storage, G, packed_steps, :forward) + # Set gradient to zero before solve starts + @. dG_dynamic = 0 + @tic "sensitivities" for i in N:-1:1 + if info_level > 0 + jutul_message("Step $i/$N", "Solving adjoint system.", color = :blue) + end + update_sensitivities_generic!(dG_dynamic, Y, H, i, G, storage, packed_steps) + end + dparam = storage.dparam + if !isnothing(dparam) + @. dG_dynamic += dparam + end + end + if is_fully_dynamic + copyto!(∇G, dG_dynamic) + else + mul!(∇G, dYdX', dG_dynamic) end - update_sensitivities_generic!(dG_dynamic, Y, H, i, G, storage, packed_steps) - end - dparam = storage.dparam - if !isnothing(dparam) - @. dG_dynamic += dparam end - if is_fully_dynamic - copyto!(∇G, dG_dynamic) - else - mul!(∇G, dYdX', dG_dynamic) + if info_level > 1 + num_evals = storage[:adjoint_objective_helper].num_evals + jutul_message("Adjoints", "Adjoints solved in $(Jutul.get_tstr(t_solve)) ($num_evals residual evaluations).", color = :blue) end - all(isfinite, ∇G) || error("Adjoint solve resulted in non-finite gradient values.") return ∇G end @@ -120,6 +136,8 @@ function setup_adjoint_storage_generic(X, F, packed_steps::AdjointPackedResult, do_prep = true, di_sparse = true, deps = :case, + deps_ad = :jutul, + deps_targets = nothing, backend = Jutul.default_di_backend(sparse = di_sparse), info_level = 0, single_step_sparsity = true, @@ -128,15 +146,118 @@ function setup_adjoint_storage_generic(X, F, packed_steps::AdjointPackedResult, case = setup_case(X, F, packed_steps, state0, :all) G = Jutul.adjoint_wrap_objective(G, case.model) packed_steps = set_packed_result_dynamic_values!(packed_steps, case) - storage = Jutul.setup_adjoint_storage_base( - case.model, case.state0, case.parameters, - use_sparsity = use_sparsity, - linear_solver = Jutul.select_linear_solver(case.model, mode = :adjoint, rtol = 1e-6), - n_objective = nothing, - info_level = info_level, + adj_kwarg = ( + use_sparsity = use_sparsity, + linear_solver = Jutul.select_linear_solver(case.model, mode = :adjoint, rtol = 1e-6), + n_objective = nothing, + info_level = info_level ) - setup_jacobian_evaluation!(storage, X, F, G, packed_steps, case, backend, do_prep, single_step_sparsity, di_sparse, deps) - storage[:dparam] = zeros(storage[:n_dynamic]) + if ismissing(backend) + backend = Jutul.default_di_backend(sparse = di_sparse) + end + model = case.model + # Two approaches: + # 1. F_static(X) -> Y (vector of parameters) -> F_dynamic(Y) (updated case) + # 2. F(X) -> case directly and F_dynamic = F and F_static = identity + deps in (:case, :parameters, :parameters_and_state0) || error("deps must be :case, :parameters or :parameters_and_state0. Got $deps.") + prep_static = nothing + adj_kwarg = ( + use_sparsity = use_sparsity, + linear_solver = Jutul.select_linear_solver(case.model, mode = :adjoint, rtol = 1e-6), + n_objective = nothing, + info_level = info_level, + ) + use_di = deps_ad == :di + fully_dynamic = deps == :case + inc_state0 = deps == :parameters_and_state0 + if fully_dynamic + deps_ad = :di + F_dynamic = F + F_static = x -> x + parameter_map = state0_map = missing + N_prm = length(X) + N_state0 = 0 + else + parameter_map, = Jutul.variable_mapper(model, :parameters, + targets = deps_targets, + config = nothing + ) + N_prm = Jutul.vectorized_length(case.model, parameter_map) + if inc_state0 + state0_map, = Jutul.variable_mapper(model, :primary) + N_state0 = Jutul.number_of_degrees_of_freedom(case.model) + else + state0_map = missing + N_state0 = 0 + end + cache_static = Dict{Type, AbstractVector}() + F_static = (X, step_info = missing) -> map_X_to_Y(F, X, case.model, parameter_map, state0_map, cache_static) + F_dynamic = (Y, step_info = missing) -> setup_from_vectorized(Y, case, parameter_map, state0_map; step_info = step_info) + if do_prep + prep_static = prepare_jacobian(F_static, backend, X) + end + end + if fully_dynamic || use_di + storage = Jutul.setup_adjoint_storage_base( + case.model, case.state0, case.parameters; + adj_kwarg... + ) + else + storage = Jutul.setup_adjoint_storage(model; + state0 = case.state0, + parameters = case.parameters, + include_state0 = inc_state0, + parameter_map = parameter_map, + state0_map = state0_map, + adj_kwarg... + ) + end + # Whatever was input - for checking + storage[:F_input] = F + # Dynamic part - every timestep + storage[:F_dynamic] = F_dynamic + # Static part - once + storage[:F_static] = F_static + # Jacobian action + storage[:dF_static_dX_prep] = prep_static + storage[:F_fully_dynamic] = fully_dynamic + # Hints about what type of dependencies are used + storage[:deps] = deps + storage[:deps_ad] = deps_ad + + # Switch to Y and F_dynamic(Y) as main function + Y = F_static(X) + storage[:n_static] = length(X) + storage[:n_dynamic] = length(Y) + # Buffer used for dynamic gradient storage + dG_dyn = similar(Y) + storage[:dynamic_buffer] = dG_dyn + storage[:dynamic_buffer_parameters] = view(dG_dyn, 1:N_prm) + storage[:dynamic_buffer_state0] = view(dG_dyn, (N_prm+1):(N_prm+N_state0)) + H = AdjointObjectiveHelper(F_dynamic, G, packed_steps) + storage[:adjoint_objective_helper] = H + # Note: strict = false is needed because we create another function on the fly + # that essentially calls the same function. + if do_prep + if single_step_sparsity + step_index = :firstlast + else + step_index = :all + end + set_objective_helper_step_index!(H, case.model, step_index) + prep = prepare_jacobian(H, backend, Y) + else + prep = nothing + end + storage[:prep_di] = prep + storage[:backend_di] = backend + if use_di + storage[:dparam] = zeros(storage[:n_dynamic]) + else + # state0 is a bit strange + storage[:dparam] = zeros(N_prm) + end + return storage end @@ -181,7 +302,6 @@ function update_sensitivities_generic!(∇G, X, H, i, G, adjoint_storage, packed # Add zero entry (corresponding to objective values) to avoid resizing matrix. N = length(λ) push!(λ, 0.0) - # tmp = jac'*λ mul!(∇G, jac', λ, 1.0, 1.0) resize!(λ, N) # Gradient of partial objective to parameters @@ -272,11 +392,12 @@ mutable struct AdjointObjectiveHelper F G objective_evaluator - step_index::Union{Int, Missing} + step_index::Union{Symbol, Int} packed_steps::AdjointPackedResult cache::Dict{Tuple{DataType, Int64}, Any} + num_evals::Int64 function AdjointObjectiveHelper(F, G, packed_steps::AdjointPackedResult) - new(F, G, missing, missing, packed_steps, Dict()) + new(F, G, missing, :all, packed_steps, Dict(), 0) end end @@ -285,7 +406,8 @@ function set_objective_helper_step_index!(H::AdjointObjectiveHelper, model, step step_index > 0 || error("Step index must be positive. Got $step_index.") step_index <= length(H.packed_steps) || error("Step index $step_index is larger than the number of steps $(length(H.packed_steps)).") else - ismissing(step_index) || error("Step index must be an integer or missing. Got $step_index.") + step_index in (:firstlast, :all) || error("If step_index is a Symbol it must be :firstlast or :all.") + step_index isa Symbol || error("Step index must be an integer or symbol (:firstlast, :all). Got $step_index.") end H.step_index = step_index H.objective_evaluator = Jutul.objective_evaluator_from_model_and_state(H.G, model, H.packed_steps, step_index) @@ -297,16 +419,26 @@ function (H::AdjointObjectiveHelper)(x) s0, s, = Jutul.adjoint_step_state_triplet(packed, ix) is_sum = H.G isa Jutul.AbstractSumObjective H.G::Jutul.AbstractJutulObjective + H.num_evals += 1 evaluate_residual_and_jacobian_for_state_pair(x, s, s0, H.F, H.objective_evaluator, packed, ix, H.cache; is_sum = is_sum) end - if ismissing(H.step_index) - # Loop over all to get the "extended sparsity". This is a bit of a hack, - # but it covers the case where there is some change in dynamics/controls - # at a later step. - v = evaluate(x, 1) + if H.step_index isa Symbol + # Loop over multiple steps to get the "extended sparsity". This is a bit + # of a hack, but it covers the case where there is some change in + # dynamics/controls at a later step. + if H.step_index == :all + indices_to_eval = eachindex(packed.states) + else + H.step_index == :firstlast || error("Unknown step index symbol: $(indices_to_eval).") + indices_to_eval = [1, length(packed.states)] + end + v = evaluate(x, indices_to_eval[1]) @. v = abs.(v) - for i in 2:length(packed) - tmp = evaluate(x, i) + for (i, step) in enumerate(indices_to_eval) + if i == 1 + continue + end + tmp = evaluate(x, step) @. v += abs.(tmp) end else @@ -315,73 +447,6 @@ function (H::AdjointObjectiveHelper)(x) return v end -function setup_jacobian_evaluation!(storage, X, F, G, packed_steps, case, backend, do_prep, single_step_sparsity, di_sparse, deps::Symbol) - if ismissing(backend) - backend = Jutul.default_di_backend(sparse = di_sparse) - end - model = case.model - # Two approaches: - # 1. F_static(X) -> Y (vector of parameters) -> F_dynamic(Y) (updated case) - # 2. F(X) -> case directly and F_dynamic = F and F_static = identity - fully_dynamic = deps == :case - prep_static = nothing - if fully_dynamic - F_dynamic = F - F_static = x -> x - else - deps in (:parameters, :parameters_and_state0) || error("deps must be :case, :parameters or :parameters_and_state0. Got $deps.") - # cfg = optimization_config(case0, include_state0 = deps == :parameters_and_state0) - inc_state0 = deps == :parameters_and_state0 - parameters_map, = Jutul.variable_mapper(model, :parameters) - if inc_state0 - state0_map, = Jutul.variable_mapper(model, :primary) - else - state0_map = missing - end - cache_static = Dict{Type, AbstractVector}() - F_static = (X, step_info = missing) -> map_X_to_Y(F, X, case.model, parameters_map, state0_map, cache_static) - F_dynamic = (Y, step_info = missing) -> setup_from_vectorized(Y, case, parameters_map, state0_map) - if do_prep - prep_static = prepare_jacobian(F_static, backend, X) - end - end - - # Whatever was input - for checking - storage[:F_input] = F - # Dynamic part - every timestep - storage[:F_dynamic] = F_dynamic - # Static part - once - storage[:F_static] = F_static - # Jacobian action - storage[:dF_static_dX_prep] = prep_static - storage[:F_fully_dynamic] = fully_dynamic - storage[:deps] = deps - - # Switch to Y and F_dynamic(Y) as main function - Y = F_static(X) - storage[:n_static] = length(X) - storage[:n_dynamic] = length(Y) - storage[:dynamic_buffer] = similar(Y) - H = AdjointObjectiveHelper(F_dynamic, G, packed_steps) - storage[:adjoint_objective_helper] = H - # Note: strict = false is needed because we create another function on the fly - # that essentially calls the same function. - if do_prep - if single_step_sparsity - step_index = 1 - else - step_index = missing - end - set_objective_helper_step_index!(H, case.model, step_index) - prep = prepare_jacobian(H, backend, Y) - else - prep = nothing - end - storage[:prep_di] = prep - storage[:backend_di] = backend - return storage -end - function setup_outer_chain_rule(F, case, deps::Symbol) return (F_dynamic, F_static, dF_static_dX) end diff --git a/src/ad/AdjointsDI/split.jl b/src/ad/AdjointsDI/split.jl index 5e27fb9da..d008adce6 100644 --- a/src/ad/AdjointsDI/split.jl +++ b/src/ad/AdjointsDI/split.jl @@ -25,7 +25,7 @@ function map_X_to_Y(F, X, model, parameters_map, state0_map, cache) return Y end -function setup_from_vectorized(Y, case, parameters_map, state0_map) +function setup_from_vectorized(Y, case, parameters_map, state0_map; step_info = missing) has_state0 = !ismissing(state0_map) N_prm = Jutul.vectorized_length(case.model, parameters_map) if has_state0 @@ -40,6 +40,7 @@ function setup_from_vectorized(Y, case, parameters_map, state0_map) Y_s0 = view(Y, (N_prm+1):(N_prm+N_s0)) devectorize_variables!(case.state0, case.model, Y_s0, state0_map) else + @assert length(Y) == N_prm Y_prm = Y end devectorize_variables!(case.parameters, case.model, Y_prm, parameters_map) diff --git a/src/ad/gradients.jl b/src/ad/gradients.jl index 2928d76d1..438ef509c 100644 --- a/src/ad/gradients.jl +++ b/src/ad/gradients.jl @@ -108,6 +108,8 @@ Set up storage for use with `solve_adjoint_sensitivities!`. function setup_adjoint_storage(model; state0 = setup_state(model), parameters = setup_parameters(model), + state0_map = missing, + parameter_map = missing, n_objective = nothing, targets = parameter_targets(model), include_state0 = false, @@ -121,9 +123,13 @@ function setup_adjoint_storage(model; parameter_model = adjoint_parameter_model(model, targets) n_prm = number_of_degrees_of_freedom(parameter_model) # Note that primary is here because the target parameters are now the primaries for the parameter_model - parameter_map, = variable_mapper(parameter_model, :primary, targets = targets; kwarg...) + if ismissing(parameter_map) + parameter_map, = variable_mapper(parameter_model, :primary, targets = targets; kwarg...) + end if include_state0 - state0_map, = variable_mapper(model, :primary) + if ismissing(state0_map) + state0_map, = variable_mapper(model, :primary) + end n_state0 = number_of_degrees_of_freedom(model) state0_vec = zeros(n_state0) else @@ -226,8 +232,18 @@ Non-allocating version of `solve_adjoint_sensitivities`. """ function solve_adjoint_sensitivities!(∇G, storage, states, state0, timesteps, G; forces, info_level = 0) G = adjoint_wrap_objective(G, storage.forward.model) - packed_steps = AdjointPackedResult(states, timesteps, forces) - packed_steps.state0 = state0 + if states isa AdjointPackedResult + packed_steps = states + else + packed_steps = AdjointPackedResult(states, timesteps, forces) + packed_steps.state0 = state0 + end + return solve_adjoint_sensitivities!(∇G, storage, packed_steps::AdjointPackedResult, G; info_level = info_level) +end + +function solve_adjoint_sensitivities!(∇G, storage, packed_steps::AdjointPackedResult, G; info_level = 0) + state0 = packed_steps.state0 + G = adjoint_wrap_objective(G, storage.forward.model) # Do sparsity detection if not already done. if info_level > 1 jutul_message("Adjoints", "Updating sparsity patterns.", color = :blue) @@ -261,6 +277,7 @@ function solve_adjoint_sensitivities!(∇G, storage, states, state0, timesteps, return ∇G end + function adjoint_reset_parameters!(storage, parameters) if haskey(storage, :parameter) sims = (storage.forward, storage.backward, storage.parameter) @@ -828,9 +845,14 @@ end gradient_vec_or_mat(n, ::Nothing) = zeros(n) gradient_vec_or_mat(n, m) = zeros(n, m) -function variable_mapper(model::SimulationModel, type = :primary; targets = nothing, config = nothing, offset_x = 0, offset_full = offset_x) +function variable_mapper(model::SimulationModel, type = :primary; + targets = nothing, + config = nothing, + offset_x = 0, + offset_full = offset_x + ) vars = get_variables_by_type(model, type) - out = Dict{Symbol, Any}() + out = OrderedDict{Symbol, Any}() for (t, var) in vars if !isnothing(targets) && !(t in targets) continue diff --git a/src/ad/objectives.jl b/src/ad/objectives.jl index 6090f6c1d..7b603fbd5 100644 --- a/src/ad/objectives.jl +++ b/src/ad/objectives.jl @@ -32,7 +32,7 @@ end function objective_evaluator_from_model_and_state(G::AbstractSumObjective, model, packed_steps, i) # (model, state; kwarg...) -> obj - if ismissing(i) + if i isa Symbol # This is hack for the case where we want to evaluate the objective for # all steps for sparsity detection but the objective is a sum. So we # fake it by taking the first step. @@ -44,7 +44,7 @@ function objective_evaluator_from_model_and_state(G::AbstractSumObjective, model end function objective_evaluator_from_model_and_state(G::AbstractGlobalObjective, model, packed_steps, current_step) - if ismissing(current_step) + if current_step isa Symbol # Same hack as for AbstractSumObjective current_step = 1 end diff --git a/src/core_types/core_types.jl b/src/core_types/core_types.jl index ffba1ece9..7eae67630 100644 --- a/src/core_types/core_types.jl +++ b/src/core_types/core_types.jl @@ -872,7 +872,7 @@ end Set up a structure that holds the complete specification of a simulation case. """ -function JutulCase(model, dt = [1.0], forces = setup_forces(model); state0 = nothing, parameters = nothing, input_data = nothing, kwarg...) +function JutulCase(model::JutulModel, dt = [1.0], forces = setup_forces(model); state0 = nothing, parameters = nothing, input_data = nothing, kwarg...) for (k, v) in kwarg if k == :forces error("forces was passed as kwarg. It is a positional argument.") diff --git a/src/equations.jl b/src/equations.jl index 10dda4f3a..a293c24d5 100644 --- a/src/equations.jl +++ b/src/equations.jl @@ -513,7 +513,10 @@ end function update_linearized_system_equation!(nz::Missing, r, model, equation::JutulEquation, cache) d = get_diagonal_entries(equation, cache) - @. r = d + for i in eachindex(d, r) + r[i] = d[i] + end + return r end """ diff --git a/src/multimodel/gradients.jl b/src/multimodel/gradients.jl index 949349ca0..62383a6ac 100644 --- a/src/multimodel/gradients.jl +++ b/src/multimodel/gradients.jl @@ -109,7 +109,7 @@ function variable_mapper(model::MultiModel, arg...; ) out = Dict{Symbol, Any}() for k in submodels_symbols(model) - if isnothing(targets) + if isnothing(targets) || !haskey(targets, k) t = nothing else t = targets[k] diff --git a/test/adjoints/basic_adjoint.jl b/test/adjoints/basic_adjoint.jl index 59e140f17..9742f22ae 100644 --- a/test/adjoints/basic_adjoint.jl +++ b/test/adjoints/basic_adjoint.jl @@ -209,7 +209,7 @@ function num_grad_generic(F, G, x0) return out end -function test_for_timesteps(timesteps; atol = 5e-3, fmt = :case, global_objective = false, deps = :case, kwarg...) +function test_for_timesteps(timesteps; atol = 5e-3, fmt = :case, global_objective = false, deps = :case, deps_ad = :jutul, kwarg...) # dx, dy, U0, k_val, srcval x = ones(5) case = setup_poisson_test_case_from_vector(x, dt = timesteps) @@ -238,6 +238,7 @@ function test_for_timesteps(timesteps; atol = 5e-3, fmt = :case, global_objectiv dGdx_adj = solve_adjoint_generic(x, F, states, reports, G; state0 = case.state0, forces = case.forces, + deps_ad = deps_ad, deps = deps, kwarg... ) @@ -273,8 +274,10 @@ end test_for_timesteps([100.0], fmt = fmt, global_objective = global_obj) end @testset "parameters" begin - test_for_timesteps([100.0], fmt = :case, global_objective = global_obj, deps = :parameters) - test_for_timesteps([100.0], fmt = :case, global_objective = global_obj, deps = :parameters_and_state0) + for deps_ad in [:di, :jutul] + test_for_timesteps([100.0], fmt = :case, global_objective = global_obj, deps = :parameters, deps_ad = deps_ad) + test_for_timesteps([100.0], fmt = :case, global_objective = global_obj, deps = :parameters_and_state0, deps_ad = deps_ad) + end end end end From 3ab6bd2281304a74b329d3cc8c7480f684f2dfa2 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Tue, 26 Aug 2025 18:18:07 +0000 Subject: [PATCH 30/31] Initial plan From 82b8d1fc15e919281c08181afe2a2d4fa5a8d03a Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Thu, 6 Nov 2025 09:10:20 +0000 Subject: [PATCH 31/31] Rebase on main and fix documentation build errors - Rebased branch on latest main (fc65435) - Fixed duplicate @docs blocks for plotting functions - Replaced @docs with narrative documentation to avoid Documenter errors - Added missing export for plot_linear_convergence functions - Documentation now builds successfully Co-authored-by: moyner <454871+moyner@users.noreply.github.com> --- docs/src/plotting.md | 131 +++++++++++++++++++++++-------------------- src/ext/makie_ext.jl | 1 + 2 files changed, 72 insertions(+), 60 deletions(-) diff --git a/docs/src/plotting.md b/docs/src/plotting.md index bbc491243..a9950d1d3 100644 --- a/docs/src/plotting.md +++ b/docs/src/plotting.md @@ -14,35 +14,66 @@ using CairoMakie # For static plots/headless systems ## Mesh Visualization -### Basic mesh plotting - -```@docs -plot_mesh -plot_mesh! -plot_cell_data -plot_cell_data! -plot_mesh_edges -plot_mesh_edges! -``` +For comprehensive mesh plotting documentation including `plot_mesh`, `plot_mesh!`, `plot_cell_data`, `plot_cell_data!`, `plot_mesh_edges`, `plot_mesh_edges!`, and `plot_interactive`, see the [Mesh](mesh.md) section. -### Interactive plotting +### Interactive multi-model plotting -```@docs -plot_interactive -plot_multimodel_interactive -``` +`plot_multimodel_interactive(model, states, model_keys = keys(model.models); plot_type = :mesh, shift = Dict(), kwarg...)` + +Launch an interactive plot for multi-model simulations with multiple coupled domains. + +**Arguments:** +- `model`: `MultiModel` instance containing multiple coupled simulation models +- `states`: Vector of simulation states or single state +- `model_keys = keys(model.models)`: Which models to include in the plot + +**Keyword Arguments:** +- `plot_type = :mesh`: Type of plot (`:mesh`, `:meshscatter`, `:lines`) +- `shift = Dict()`: Dictionary of spatial shifts to apply to each model for visualization +- Additional keyword arguments are passed to `plot_interactive` + +This function creates an interactive visualization for multi-physics simulations where multiple models are coupled together. ## Performance Analysis These functions help analyze simulation performance and solver behavior. -```@docs -plot_solve_breakdown -plot_cumulative_solve -plot_cumulative_solve! -plot_linear_convergence -plot_linear_convergence! -``` +### Solver timing breakdown + +`plot_solve_breakdown(allreports, names; per_it = false, include_local_solves = nothing, t_scale = ("s", 1.0))` + +Plot a breakdown of solver performance timing for multiple simulation reports. + +**Arguments:** +- `allreports`: Vector of simulation reports to analyze +- `names`: Vector of names for each report (used for labeling) + +**Keyword Arguments:** +- `per_it = false`: If `true`, show time per iteration instead of total time +- `include_local_solves = nothing`: Whether to include local solve timing (auto-detected if `nothing`) +- `t_scale = ("s", 1.0)`: Time scale unit and conversion factor for display + +Returns `(fig, D)`: Figure object and processed timing data. + +### Cumulative solve time + +`plot_cumulative_solve(allreports, dt = nothing, names = nothing; kwarg...)` + +Plot cumulative solver performance over time or steps for multiple simulation reports. + +`plot_cumulative_solve!(f, allreports, dt = nothing, names = nothing; kwarg...)` + +Mutating version that plots into an existing figure layout. + +### Linear solver convergence + +`plot_linear_convergence(report; kwarg...)` + +Plot the convergence history of linear solver iterations from a simulation report. + +`plot_linear_convergence!(ax, report)` + +Mutating version that plots into an existing axis. ## Model Structure Visualization @@ -52,39 +83,30 @@ These functions require the GraphMakie.jl package to be loaded in addition to a using GraphMakie ``` -```@docs -plot_variable_graph -plot_model_graph -``` +### Variable dependency graph -## Utilities +`plot_variable_graph(model)` -```@docs -check_plotting_availability -``` +Plot a graph visualization of variable dependencies in a simulation model. -## Examples +**Arguments:** +- `model`: A Jutul simulation model -### Basic mesh plotting +Returns a Figure object containing the variable dependency graph showing relationships between primary variables, secondary variables, and parameters. -```julia -using Jutul, CairoMakie +### Model structure graph -# Create a simple mesh -nx, ny = 10, 5 -mesh = CartesianMesh((nx, ny), (100.0, 50.0)) +`plot_model_graph(model; kwarg...)` -# Plot the mesh structure -fig, ax, p = plot_mesh(mesh) +Plot a graph visualization of model structure and equation relationships. For `MultiModel` instances, this shows the full structure including cross-terms and model coupling. -# Plot cell data with values -cell_values = 1:number_of_cells(mesh) -fig, ax, p = plot_cell_data(mesh, cell_values) +## Utilities -# Add mesh edges -plot_mesh_edges!(ax, mesh) -fig -``` +`check_plotting_availability(; throw = true, interactive = false)` + +Check if plotting through at least one Makie backend is available in the Julia session. Returns `true` if available, `false` otherwise (or throws an error if `throw=true`). + +## Examples ### Performance analysis @@ -105,19 +127,6 @@ fig, ax, alldata, t = plot_cumulative_solve(reports, names = names) fig = plot_linear_convergence(reports[1]) ``` -### Interactive visualization - -```julia -using Jutul, GLMakie - -# Create mesh and simulation states -mesh = CartesianMesh((10, 10, 5), (100.0, 100.0, 50.0)) -states = [state1, state2, state3, ...] # From simulation - -# Launch interactive plot -plot_interactive(mesh, states) -``` - ### Model structure visualization ```julia @@ -128,4 +137,6 @@ fig = plot_variable_graph(model) # For multi-models, plot the full structure fig = plot_model_graph(multimodel) -``` \ No newline at end of file +``` + +For mesh plotting examples, see the [Mesh](mesh.md) section. \ No newline at end of file diff --git a/src/ext/makie_ext.jl b/src/ext/makie_ext.jl index dd7750b5f..4e503715d 100644 --- a/src/ext/makie_ext.jl +++ b/src/ext/makie_ext.jl @@ -3,6 +3,7 @@ export plot_mesh, plot_mesh! export plot_cell_data!, plot_cell_data export plot_solve_breakdown export plot_cumulative_solve, plot_cumulative_solve! +export plot_linear_convergence, plot_linear_convergence! """