Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 8 additions & 1 deletion src/NLPModels.jl
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ using FastClosures
# JSO
using LinearOperators

export AbstractNLPModel, AbstractNLSModel
export AbstractNLPModel, AbstractNLSModel, AbstractDenseNLPModel

# For documentation purpose
const OBJECTIVE_HESSIAN = raw"""
Expand All @@ -30,6 +30,13 @@ Base type for an optimization model.
"""
abstract type AbstractNLPModel{T, S} end

"""
AbstractDenseNLPModel <: AbstractNLPModel

Base type for a dense optimization model (Jacobian/Hessian stored as dense matrices).
"""
abstract type AbstractDenseNLPModel{T, S} <: AbstractNLPModel{T, S} end

"""
AbstractNLSModel <: AbstractNLPModel

Expand Down
12 changes: 9 additions & 3 deletions src/nlp/api.jl
Original file line number Diff line number Diff line change
Expand Up @@ -1387,15 +1387,19 @@ This is typically used to normalize variables for numerical stability in solvers
By default, the scaling is model-dependent. If not overridden by the model, a vector of ones
is returned. Inspired by the AMPL scaling conventions.
"""
function varscale end
function varscale(model::AbstractNLPModel{T, S}) where {T, S}
return ones(T, model.meta.nvar)
end

"""
lagscale(model::AbstractNLPModel)

Return a vector of scaling factors for the Lagrange multipliers associated with constraints.
This can be used to improve numerical stability or condition number when solving KKT systems.
"""
function lagscale end
function lagscale(model::AbstractNLPModel{T, S}) where {T, S}
return ones(T, model.meta.ncon)
end

"""
conscale(model::AbstractNLPModel)
Expand All @@ -1404,4 +1408,6 @@ Return a vector of constraint scaling factors for the model.
These are typically used to normalize constraints to have similar magnitudes and improve
convergence behavior in nonlinear solvers.
"""
function conscale end
function conscale(model::AbstractNLPModel{T, S}) where {T, S}
return ones(T, model.meta.ncon)
end
250 changes: 250 additions & 0 deletions test/nlp/dense-model.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,250 @@
"""
ManualDenseNLPModel <: AbstractDenseNLPModel

Concrete dense NLP model for demonstration and testing.
This model stores Jacobian and Hessian as dense matrices.

Example problem:
min x₁² + x₂²
s.to x₁ + x₂ = 1
x₁² + x₂² ≤ 2

x₀ = [0.5, 0.5]
"""
mutable struct ManualDenseNLPModel{T, S} <: AbstractDenseNLPModel{T, S}
meta::NLPModelMeta{T, S}
counters::Counters
end

function ManualDenseNLPModel(::Type{T}) where {T}
meta = NLPModelMeta(
2, # nvar
ncon = 2, # number of constraints
nnzj = 4, # 2x2 dense Jacobian = 4 entries
nnzh = 3, # lower triangle of 2x2 Hessian = 3 entries
x0 = T[0.5, 0.5],
lcon = T[-Inf, -Inf],
ucon = T[0, 0], # x₁ + x₂ = 0 (shift by -1), x₁² + x₂² ≤ 2
name = "Manual Dense NLP Model",
lin = [1], # first constraint is linear
lin_nnzj = 2,
nln_nnzj = 2,
)

return ManualDenseNLPModel(meta, Counters())
end

ManualDenseNLPModel() = ManualDenseNLPModel(Float64)

# Objective: f(x) = x₁² + x₂²
function NLPModels.obj(nlp::ManualDenseNLPModel, x::AbstractVector)
@lencheck 2 x
increment!(nlp, :neval_obj)
return x[1]^2 + x[2]^2
end

function NLPModels.grad!(nlp::ManualDenseNLPModel, x::AbstractVector, gx::AbstractVector)
@lencheck 2 x gx
increment!(nlp, :neval_grad)
gx[1] = 2 * x[1]
gx[2] = 2 * x[2]
return gx
end

# Constraints: c₁(x) = x₁ + x₂ - 1, c₂(x) = x₁² + x₂² - 2
function NLPModels.cons_lin!(nlp::ManualDenseNLPModel, x::AbstractVector, cx::AbstractVector)
@lencheck 2 x
@lencheck 1 cx
increment!(nlp, :neval_cons_lin)
cx[1] = x[1] + x[2] - 1
return cx
end

function NLPModels.cons_nln!(nlp::ManualDenseNLPModel, x::AbstractVector, cx::AbstractVector)
@lencheck 2 x
@lencheck 1 cx
increment!(nlp, :neval_cons_nln)
cx[1] = x[1]^2 + x[2]^2 - 2
return cx
end

# Jacobian structure for dense model (all entries)
function NLPModels.jac_structure!(
nlp::ManualDenseNLPModel,
rows::AbstractVector{Int},
cols::AbstractVector{Int},
)
@lencheck 4 rows cols
# For 2 constraints × 2 variables = 4 entries
# Row-major order: (1,1), (1,2), (2,1), (2,2)
rows .= [1, 1, 2, 2]
cols .= [1, 2, 1, 2]
return rows, cols
end

function NLPModels.jac_lin_structure!(
nlp::ManualDenseNLPModel,
rows::AbstractVector{Int},
cols::AbstractVector{Int},
)
@lencheck 2 rows cols
# Linear constraint: c₁ = x₁ + x₂ - 1
# ∇c₁ = [1, 1]
rows .= [1, 1]
cols .= [1, 2]
return rows, cols
end

function NLPModels.jac_nln_structure!(
nlp::ManualDenseNLPModel,
rows::AbstractVector{Int},
cols::AbstractVector{Int},
)
@lencheck 2 rows cols
# Nonlinear constraint: c₂ = x₁² + x₂² - 2
# ∇c₂ = [2x₁, 2x₂]
rows .= [1, 1]
cols .= [1, 2]
return rows, cols
end

# Jacobian coordinate values
function NLPModels.jac_coord!(
nlp::ManualDenseNLPModel,
x::AbstractVector,
vals::AbstractVector,
)
@lencheck 2 x
@lencheck 4 vals
increment!(nlp, :neval_jac)
# c₁: [1, 1]
vals[1] = 1.0
vals[2] = 1.0
# c₂: [2x₁, 2x₂]
vals[3] = 2 * x[1]
vals[4] = 2 * x[2]
return vals
end

function NLPModels.jac_lin_coord!(nlp::ManualDenseNLPModel, vals::AbstractVector)
@lencheck 2 vals
increment!(nlp, :neval_jac_lin)
vals .= 1.0
return vals
end

function NLPModels.jac_nln_coord!(
nlp::ManualDenseNLPModel,
x::AbstractVector,
vals::AbstractVector,
)
@lencheck 2 x vals
increment!(nlp, :neval_jac_nln)
vals[1] = 2 * x[1]
vals[2] = 2 * x[2]
return vals
end

# Jacobian-vector products
function NLPModels.jprod_lin!(
nlp::ManualDenseNLPModel,
v::AbstractVector,
Jv::AbstractVector,
)
@lencheck 2 v
@lencheck 1 Jv
increment!(nlp, :neval_jprod_lin)
Jv[1] = v[1] + v[2]
return Jv
end

function NLPModels.jprod_nln!(
nlp::ManualDenseNLPModel,
x::AbstractVector,
v::AbstractVector,
Jv::AbstractVector,
)
@lencheck 2 x v
@lencheck 1 Jv
increment!(nlp, :neval_jprod_nln)
Jv[1] = 2 * x[1] * v[1] + 2 * x[2] * v[2]
return Jv
end

function NLPModels.jtprod_lin!(
nlp::ManualDenseNLPModel,
v::AbstractVector,
Jtv::AbstractVector,
)
@lencheck 1 v
@lencheck 2 Jtv
increment!(nlp, :neval_jtprod_lin)
Jtv[1] = v[1]
Jtv[2] = v[1]
return Jtv
end

function NLPModels.jtprod_nln!(
nlp::ManualDenseNLPModel,
x::AbstractVector,
v::AbstractVector,
Jtv::AbstractVector,
)
@lencheck 2 x Jtv
@lencheck 1 v
increment!(nlp, :neval_jtprod_nln)
Jtv[1] = 2 * x[1] * v[1]
Jtv[2] = 2 * x[2] * v[1]
return Jtv
end

# Hessian structure (lower triangle)
function NLPModels.hess_structure!(
nlp::ManualDenseNLPModel,
rows::AbstractVector{Int},
cols::AbstractVector{Int},
)
@lencheck 3 rows cols
# Lower triangle of 2×2: (1,1), (2,1), (2,2)
rows .= [1, 2, 2]
cols .= [1, 1, 2]
return rows, cols
end

# Lagrangian Hessian: ∇²L = obj_weight * ∇²f + y₁ * ∇²c₁ + y₂ * ∇²c₂
# ∇²f = [2, 0; 0, 2]
# ∇²c₁ = [0, 0; 0, 0] (linear)
# ∇²c₂ = [2, 0; 0, 2]
function NLPModels.hess_coord!(
nlp::ManualDenseNLPModel,
x::AbstractVector{T},
y::AbstractVector{T},
vals::AbstractVector{T};
obj_weight = one(T),
) where {T}
@lencheck 2 x y
@lencheck 3 vals
increment!(nlp, :neval_hess)
# Lower triangle: (1,1), (2,1), (2,2)
vals[1] = 2 * obj_weight + 2 * y[2] # (1,1)
vals[2] = 0 # (2,1)
vals[3] = 2 * obj_weight + 2 * y[2] # (2,2)
return vals
end

function NLPModels.hprod!(
nlp::ManualDenseNLPModel,
x::AbstractVector{T},
y::AbstractVector{T},
v::AbstractVector{T},
Hv::AbstractVector{T};
obj_weight = one(T),
) where {T}
@lencheck 2 x y v Hv
increment!(nlp, :neval_hprod)
# H = diag([2*obj_weight + 2*y[2], 2*obj_weight + 2*y[2]])
d = 2 * obj_weight + 2 * y[2]
Hv[1] = d * v[1]
Hv[2] = d * v[2]
return Hv
end
53 changes: 53 additions & 0 deletions test/nlp/dense.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
using Test
using NLPModels

@testset "ManualDenseNLPModel dense API" begin
model = ManualDenseNLPModel()
x = [0.5, 0.5]

# Test objective and gradient
@test obj(model, x) ≈ 0.5
g = similar(x)
grad!(model, x, g)
@test g ≈ [1.0, 1.0]

# Test constraints
c = zeros(2)
cons!(model, x, c)
@test c[1] ≈ 0.0 # x₁ + x₂ - 1 = 0.5 + 0.5 - 1 = 0
@test c[2] ≈ -1.5 # x₁² + x₂² - 2 = 0.5 - 2 = -1.5

# Test Jacobian structure
rows, cols = jac_structure(model)
@test length(rows) == 4
@test length(cols) == 4
@test rows == [1, 1, 2, 2]
@test cols == [1, 2, 1, 2]

# Test Jacobian values
vals = zeros(4)
jac_coord!(model, x, vals)
@test vals[1] ≈ 1.0 # ∂c₁/∂x₁
@test vals[2] ≈ 1.0 # ∂c₁/∂x₂
@test vals[3] ≈ 1.0 # ∂c₂/∂x₁ = 2*0.5
@test vals[4] ≈ 1.0 # ∂c₂/∂x₂ = 2*0.5

# Test Hessian structure
rows_h, cols_h = hess_structure(model)
@test length(rows_h) == 3
@test length(cols_h) == 3
@test rows_h == [1, 2, 2]
@test cols_h == [1, 1, 2]

# Test Hessian values
y = [1.0, 1.0]
vals_h = zeros(3)
hess_coord!(model, x, y, vals_h)
@test vals_h[1] ≈ 4.0 # ∇²L₁₁ = 2 (obj) + 2 (y₂)
@test vals_h[2] ≈ 0.0 # ∇²L₂₁ = 0
@test vals_h[3] ≈ 4.0 # ∇²L₂₂ = 2 (obj) + 2 (y₂)

# Test that model is correctly typed as AbstractDenseNLPModel
@test model isa AbstractDenseNLPModel
@test model isa AbstractNLPModel
end
8 changes: 4 additions & 4 deletions test/nlp/dummy-model.jl
Original file line number Diff line number Diff line change
Expand Up @@ -4,16 +4,16 @@ end

@testset "Default methods throw MethodError on DummyModel since they're not defined" begin
model = DummyModel(NLPModelMeta(1))
@test_throws(MethodError, lagscale(model, 1.0))
@test lagscale(model) == ones(Float64, model.meta.ncon)
@test varscale(model) == ones(Float64, model.meta.nvar)
@test conscale(model) == ones(Float64, model.meta.ncon)
@test_throws(MethodError, obj(model, [0.0]))
@test_throws(MethodError, varscale(model, [0.0]))
@test_throws(MethodError, conscale(model, [0.0]))
@test_throws(MethodError, jac_structure(model, [0], [1]))
@test_throws(MethodError, hess_structure(model, [0], [1]))
@test_throws(MethodError, grad!(model, [0.0], [1.0]))
@test_throws(MethodError, cons_lin!(model, [0.0], [1.0]))
@test_throws(MethodError, cons_nln!(model, [0.0], [1.0]))
@test_throws(MethodError, jac_lin_coord!(model, [0.0], [1.0]))
@test_throws(MethodError, jac_lin_coord!(model, [1.0]))
@test_throws(MethodError, jac_nln_coord!(model, [0.0], [1.0]))
@test_throws(MethodError, jth_con(model, [0.0], 1))
@test_throws(MethodError, jth_congrad(model, [0.0], 1))
Expand Down
1 change: 1 addition & 0 deletions test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ using LinearAlgebra, LinearOperators, NLPModels, SparseArrays, Test

include("nlp/simple-model.jl")
include("nlp/dummy-model.jl")
include("nlp/dense-model.jl")

include("nlp/api.jl")
include("nlp/counters.jl")
Expand Down