From db7c813c7cb9969264f95c449f7e4e3394669a55 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 6 Sep 2024 20:52:28 -0400 Subject: [PATCH] :robot: Format .jl files (#343) Co-authored-by: tmigot --- src/ADNLPProblems/allinit.jl | 4 +- src/ADNLPProblems/allinitc.jl | 4 +- src/ADNLPProblems/alsotame.jl | 2 +- src/ADNLPProblems/avion2.jl | 154 +++++++++++++++++++++++++++++++++- src/ADNLPProblems/booth.jl | 2 +- src/ADNLPProblems/camshape.jl | 22 +++-- src/ADNLPProblems/catenary.jl | 9 +- src/ADNLPProblems/hs100.jl | 4 +- src/ADNLPProblems/hs108.jl | 16 ++-- src/ADNLPProblems/hs109.jl | 6 +- src/ADNLPProblems/hs116.jl | 2 +- src/ADNLPProblems/hs225.jl | 2 +- src/ADNLPProblems/hs226.jl | 2 +- src/ADNLPProblems/hs23.jl | 2 +- src/ADNLPProblems/hs47.jl | 2 +- src/ADNLPProblems/hs72.jl | 4 +- src/ADNLPProblems/hs95.jl | 3 +- src/ADNLPProblems/hs96.jl | 3 +- src/ADNLPProblems/hs97.jl | 3 +- src/ADNLPProblems/hs98.jl | 3 +- src/ADNLPProblems/marine.jl | 25 +++++- src/ADNLPProblems/robotarm.jl | 15 +++- src/Meta/marine.jl | 3 +- src/PureJuMP/argauss.jl | 6 +- src/PureJuMP/arglinc.jl | 6 +- src/PureJuMP/brownbs.jl | 6 +- src/PureJuMP/hs104.jl | 10 +-- src/PureJuMP/hs77.jl | 6 +- src/PureJuMP/hs83.jl | 6 +- src/PureJuMP/palmer1c.jl | 6 +- src/PureJuMP/palmer1d.jl | 6 +- src/PureJuMP/palmer2c.jl | 6 +- src/PureJuMP/palmer3c.jl | 6 +- src/PureJuMP/palmer4c.jl | 6 +- src/PureJuMP/palmer5d.jl | 6 +- src/PureJuMP/palmer6c.jl | 6 +- src/PureJuMP/palmer7c.jl | 6 +- src/PureJuMP/palmer8c.jl | 6 +- src/PureJuMP/rat42.jl | 6 +- src/PureJuMP/tquartic.jl | 6 +- 40 files changed, 262 insertions(+), 136 deletions(-) diff --git a/src/ADNLPProblems/allinit.jl b/src/ADNLPProblems/allinit.jl index d2b2469e..a2590ed5 100644 --- a/src/ADNLPProblems/allinit.jl +++ b/src/ADNLPProblems/allinit.jl @@ -21,8 +21,8 @@ function allinit(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wh # return cx #end A = T[ - 0 0 0 1; - 0 1 0 0; + 0 0 0 1 + 0 1 0 0 0 0 1 0 ] diff --git a/src/ADNLPProblems/allinitc.jl b/src/ADNLPProblems/allinitc.jl index c01dd1ef..942ee6d8 100644 --- a/src/ADNLPProblems/allinitc.jl +++ b/src/ADNLPProblems/allinitc.jl @@ -19,8 +19,8 @@ function allinitc(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) w return cx end A = T[ - 0 0 0 1; - 0 1 0 0; + 0 0 0 1 + 0 1 0 0 0 0 1 0 ] diff --git a/src/ADNLPProblems/alsotame.jl b/src/ADNLPProblems/alsotame.jl index 23535916..fb1e980e 100644 --- a/src/ADNLPProblems/alsotame.jl +++ b/src/ADNLPProblems/alsotame.jl @@ -15,7 +15,7 @@ function alsotame(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) w return cx end A = T[ - 1 0 0; + 1 0 0 0 1 0 ] diff --git a/src/ADNLPProblems/avion2.jl b/src/ADNLPProblems/avion2.jl index 36f218c0..47c60164 100644 --- a/src/ADNLPProblems/avion2.jl +++ b/src/ADNLPProblems/avion2.jl @@ -179,8 +179,145 @@ function avion2(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwa 1, 1, ] - A = sparse([1, 2, 3, 2, 4, 3, 1, 4, 4, 4, 4, 5, 5, 7, 10, 14, 6, 8, 14, 14, 6, 13, 15, 7, 7, 8, 15, 9, 15, 10, 15, 11, 15, 12, 15, 13, 15, 9, 14, 11, 14, 12, 14], [1, 1, 2, 5, 5, 6, 7, 7, 8, 9, 10, 10, - 19, 20, 20, 20, 22, 22, 22, 23, 24, 26, 31, 33, 34, 35, 35, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41, 47, 47, 48, 48, 49, 49], T[-0.13, -0.7, -1.0, 1.0, -2.0, 1.0, 1.0, -2.0, -2.0, -1.0, 1.0, -20.0, 1.0, -1.0, -0.043, 0.5, -2.0, -0.137, -1.0, 1.0, 1.0, -300.0, 1.0, -1.0, 1.0, 1.0, -1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, -35.0, 660.0, -200.0, 95.0, -120.0, 70.0], 15, 49) + A = sparse( + [ + 1, + 2, + 3, + 2, + 4, + 3, + 1, + 4, + 4, + 4, + 4, + 5, + 5, + 7, + 10, + 14, + 6, + 8, + 14, + 14, + 6, + 13, + 15, + 7, + 7, + 8, + 15, + 9, + 15, + 10, + 15, + 11, + 15, + 12, + 15, + 13, + 15, + 9, + 14, + 11, + 14, + 12, + 14, + ], + [ + 1, + 1, + 2, + 5, + 5, + 6, + 7, + 7, + 8, + 9, + 10, + 10, + 19, + 20, + 20, + 20, + 22, + 22, + 22, + 23, + 24, + 26, + 31, + 33, + 34, + 35, + 35, + 37, + 37, + 38, + 38, + 39, + 39, + 40, + 40, + 41, + 41, + 47, + 47, + 48, + 48, + 49, + 49, + ], + T[ + -0.13, + -0.7, + -1.0, + 1.0, + -2.0, + 1.0, + 1.0, + -2.0, + -2.0, + -1.0, + 1.0, + -20.0, + 1.0, + -1.0, + -0.043, + 0.5, + -2.0, + -0.137, + -1.0, + 1.0, + 1.0, + -300.0, + 1.0, + -1.0, + 1.0, + 1.0, + -1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + -35.0, + 660.0, + -200.0, + 95.0, + -120.0, + 70.0, + ], + 15, + 49, + ) function c!(cx, x) return cx end @@ -289,7 +426,18 @@ function avion2(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwa 2, ] - return ADNLPModels.ADNLPModel!(f, x0, lvar, uvar, findnz(A)..., c!, lcon, ucon, name = "avion2"; kwargs...) + return ADNLPModels.ADNLPModel!( + f, + x0, + lvar, + uvar, + findnz(A)..., + c!, + lcon, + ucon, + name = "avion2"; + kwargs..., + ) end function avion2(::Val{:nls}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} diff --git a/src/ADNLPProblems/booth.jl b/src/ADNLPProblems/booth.jl index aed920e9..f9f4bd50 100644 --- a/src/ADNLPProblems/booth.jl +++ b/src/ADNLPProblems/booth.jl @@ -7,7 +7,7 @@ function booth(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher x0 = zeros(T, 2) A = T[ - 1 2; + 1 2 2 1 ] function c!(cx, x) diff --git a/src/ADNLPProblems/camshape.jl b/src/ADNLPProblems/camshape.jl index 35b16126..3478bf75 100644 --- a/src/ADNLPProblems/camshape.jl +++ b/src/ADNLPProblems/camshape.jl @@ -24,10 +24,10 @@ function camshape(args...; n::Int = default_nvar, type::Type{T} = Float64, kwarg ucon = vcat(T(0), T(α * θ) * ones(T, n + 1), zeros(T, n + 1)) A = zeros(T, n + 2, n) - A[2,n] = -1 + A[2, n] = -1 lcon[2] -= R_max ucon[2] -= R_max - A[3,1] = 1 + A[3, 1] = 1 lcon[3] += R_min ucon[3] += R_min for i = 1:(n - 1) @@ -35,13 +35,25 @@ function camshape(args...; n::Int = default_nvar, type::Type{T} = Float64, kwarg A[3 + i, i] = -1 end # cx[n + 3] = -R_min^2 - R_min * y[1] + 2 * R_min * y[1] * cos(θ) - A[1, 1] = -R_min + 2 * R_min * cos(θ) + A[1, 1] = -R_min + 2 * R_min * cos(θ) lcon[1] += R_min^2 ucon[1] += R_min^2 lvar = T(R_min) * ones(T, n) uvar = T(R_max) * ones(T, n) - + x0 = T((R_min + R_max) / 2) * ones(T, n) - return ADNLPModels.ADNLPModel!(f, x0, lvar, uvar, findnz(sparse(A))..., c!, lcon, ucon, name = "camshape", ; kwargs...) + return ADNLPModels.ADNLPModel!( + f, + x0, + lvar, + uvar, + findnz(sparse(A))..., + c!, + lcon, + ucon, + name = "camshape", + ; + kwargs..., + ) end diff --git a/src/ADNLPProblems/catenary.jl b/src/ADNLPProblems/catenary.jl index 34d1c4f7..0c4756f6 100644 --- a/src/ADNLPProblems/catenary.jl +++ b/src/ADNLPProblems/catenary.jl @@ -1,6 +1,13 @@ export catenary -function catenary(args...; n::Int = default_nvar, type::Type{T} = Float64, Bl = 1, FRACT = 0.6, kwargs...) where {T} +function catenary( + args...; + n::Int = default_nvar, + type::Type{T} = Float64, + Bl = 1, + FRACT = 0.6, + kwargs..., +) where {T} (n % 3 == 0) || @warn("catenary: number of variables adjusted to be a multiple of 3") n = 3 * max(1, div(n, 3)) (n < 6) || @warn("catenary: number of variables adjusted to be greater or equal to 6") diff --git a/src/ADNLPProblems/hs100.jl b/src/ADNLPProblems/hs100.jl index a8e4d2e4..8365cacc 100644 --- a/src/ADNLPProblems/hs100.jl +++ b/src/ADNLPProblems/hs100.jl @@ -12,8 +12,8 @@ function hs100(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher end x0 = T[1, 2, 0, 4, 0, 1, 1] function c!(cx, x) - cx[1] = - 7 * x[1] - 3 * x[2] - 10 * x[3]^2 - x[4] + x[5] - cx[2] = - 23 * x[1] - x[2]^2 - 6 * x[6]^2 + 8 * x[7] + cx[1] = -7 * x[1] - 3 * x[2] - 10 * x[3]^2 - x[4] + x[5] + cx[2] = -23 * x[1] - x[2]^2 - 6 * x[6]^2 + 8 * x[7] cx[4] = 127 - 2 * x[1]^2 - 3 * x[2]^4 - x[3] - 4 * x[4]^2 - 5 * x[5] cx[3] = -4 * x[1]^2 - x[2]^2 + 3 * x[1] * x[2] - 2 * x[3]^2 - 5 * x[6] + 11 * x[7] return cx diff --git a/src/ADNLPProblems/hs108.jl b/src/ADNLPProblems/hs108.jl index 170ad9fb..ed7951a2 100644 --- a/src/ADNLPProblems/hs108.jl +++ b/src/ADNLPProblems/hs108.jl @@ -10,16 +10,16 @@ function hs108(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher lvar = T[-Inf, -Inf, -Inf, -Inf, -Inf, -Inf, -Inf, -Inf, 0] uvar = T(Inf) * ones(T, 9) function c!(cx, x) - cx[1] = - x[3]^2 - x[4]^2 - cx[2] = - x[5]^2 - x[6]^2 - cx[3] = - (x[1] - x[5])^2 - (x[2] - x[6])^2 - cx[4] = - (x[1] - x[7])^2 - (x[2] - x[8])^2 - cx[5] = - (x[3] - x[5])^2 - (x[4] - x[6])^2 - cx[6] = - (x[3] - x[7])^2 - (x[4] - x[8])^2 + cx[1] = -x[3]^2 - x[4]^2 + cx[2] = -x[5]^2 - x[6]^2 + cx[3] = -(x[1] - x[5])^2 - (x[2] - x[6])^2 + cx[4] = -(x[1] - x[7])^2 - (x[2] - x[8])^2 + cx[5] = -(x[3] - x[5])^2 - (x[4] - x[6])^2 + cx[6] = -(x[3] - x[7])^2 - (x[4] - x[8])^2 cx[7] = x[3] * x[9] cx[8] = x[5] * x[8] - x[6] * x[7] - cx[9] = - x[9]^2 - cx[10] = - x[1]^2 - (x[2] - x[9])^2 + cx[9] = -x[9]^2 + cx[10] = -x[1]^2 - (x[2] - x[9])^2 cx[11] = x[1] * x[4] - x[2] * x[3] cx[12] = -x[5] * x[9] return cx diff --git a/src/ADNLPProblems/hs109.jl b/src/ADNLPProblems/hs109.jl index b694e13e..eb1e9611 100644 --- a/src/ADNLPProblems/hs109.jl +++ b/src/ADNLPProblems/hs109.jl @@ -12,8 +12,8 @@ function hs109(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher a = 50176 // 1000 b = sin(eltype(x)(25 // 100)) ci = cos(eltype(x)(25 // 100)) - cx[1] = - x[1]^2 - x[8]^2 - cx[2] = - x[2]^2 - x[9]^2 + cx[1] = -x[1]^2 - x[8]^2 + cx[2] = -x[2]^2 - x[9]^2 cx[3] = x[5] * x[6] * sin(-x[3] - 1 / 4) + x[5] * x[7] * sin(-x[4] - 1 / 4) + 2 * b * x[5]^2 - a * x[1] + 400 * a @@ -36,7 +36,7 @@ function hs109(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher 2 * ci * x[7]^2 + 0.7533e-3 * a * x[7]^2 return cx end - lcon = vcat(-T(0.55), - 2250000, -2250000, zeros(T, 6)) + lcon = vcat(-T(0.55), -2250000, -2250000, zeros(T, 6)) ucon = vcat(T(0.55), T(Inf), T(Inf), zeros(T, 6)) return ADNLPModels.ADNLPModel!( f, diff --git a/src/ADNLPProblems/hs116.jl b/src/ADNLPProblems/hs116.jl index 1912757c..f5db168e 100644 --- a/src/ADNLPProblems/hs116.jl +++ b/src/ADNLPProblems/hs116.jl @@ -22,7 +22,7 @@ function hs116(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher cx[5] = x[12] - b * x[9] + ci * x[2] * x[9] cx[6] = x[11] - b * x[8] + ci * x[1] * x[8] cx[7] = x[5] * x[7] - x[1] * x[8] - x[4] * x[7] + x[4] * x[8] - cx[8] = - a * (x[2] * x[9] + x[5] * x[8] - x[1] * x[8] - x[6] * x[9]) - x[5] - x[6] + cx[8] = -a * (x[2] * x[9] + x[5] * x[8] - x[1] * x[8] - x[6] * x[9]) - x[5] - x[6] cx[9] = x[2] * x[9] - x[3] * x[10] - x[6] * x[9] - 500 * x[2] + 500 * x[6] + x[2] * x[10] cx[10] = x[2] - a * (x[2] * x[10] - x[3] * x[10]) return cx diff --git a/src/ADNLPProblems/hs225.jl b/src/ADNLPProblems/hs225.jl index 18b0758c..7826b370 100644 --- a/src/ADNLPProblems/hs225.jl +++ b/src/ADNLPProblems/hs225.jl @@ -7,7 +7,7 @@ function hs225(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher x0 = T[3, 1] function c!(cx, x) cx[1] = x[1]^2 + x[2]^2 - cx[2] = 9 * x[1]^2 + x[2]^2 + cx[2] = 9 * x[1]^2 + x[2]^2 cx[3] = x[1]^2 - x[2] cx[4] = x[2]^2 - x[1] return cx diff --git a/src/ADNLPProblems/hs226.jl b/src/ADNLPProblems/hs226.jl index 2ec29fc6..82258632 100644 --- a/src/ADNLPProblems/hs226.jl +++ b/src/ADNLPProblems/hs226.jl @@ -10,7 +10,7 @@ function hs226(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher uvar = T[Inf, Inf] function c!(cx, x) cx[1] = x[1]^2 + x[2]^2 - cx[2] = - x[1]^2 - x[2]^2 + cx[2] = -x[1]^2 - x[2]^2 return cx end lcon = T[0; -1] diff --git a/src/ADNLPProblems/hs23.jl b/src/ADNLPProblems/hs23.jl index 0d6cb050..ccf35324 100644 --- a/src/ADNLPProblems/hs23.jl +++ b/src/ADNLPProblems/hs23.jl @@ -15,7 +15,7 @@ function hs23(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwarg uvar = 50 * ones(T, 2) function c!(cx, x) cx[1] = x[1]^2 + x[2]^2 - cx[2] = 9 * x[1]^2 + x[2]^2 + cx[2] = 9 * x[1]^2 + x[2]^2 cx[3] = x[1]^2 - x[2] cx[4] = x[2]^2 - x[1] return cx diff --git a/src/ADNLPProblems/hs47.jl b/src/ADNLPProblems/hs47.jl index 62ccda1b..7cd1db6d 100644 --- a/src/ADNLPProblems/hs47.jl +++ b/src/ADNLPProblems/hs47.jl @@ -1,7 +1,7 @@ export hs47 function hs47(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - function c!(cx, x) + function c!(cx, x) cx[3] = x[1] + x[2]^2 + x[3]^3 - 3 cx[1] = x[2] - x[3]^2 + x[4] cx[2] = x[1] * x[5] diff --git a/src/ADNLPProblems/hs72.jl b/src/ADNLPProblems/hs72.jl index 9bd48a24..0769fde0 100644 --- a/src/ADNLPProblems/hs72.jl +++ b/src/ADNLPProblems/hs72.jl @@ -9,8 +9,8 @@ function hs72(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where lvar = T(0.001) * ones(T, 4) uvar = T[(5 - i) * 1e5 for i = 1:4] function c!(cx, x) - cx[1] = + 4 / x[1] + 2.25 / x[2] + 1 / x[3] + 0.25 / x[4] - 0.0401 - cx[2] = + 0.16 / x[1] + 0.36 / x[2] + 0.64 / x[3] + 0.64 / x[4] - 0.010085 + cx[1] = +4 / x[1] + 2.25 / x[2] + 1 / x[3] + 0.25 / x[4] - 0.0401 + cx[2] = +0.16 / x[1] + 0.36 / x[2] + 0.64 / x[3] + 0.64 / x[4] - 0.010085 return cx end lcon = -T(Inf) * ones(T, 2) diff --git a/src/ADNLPProblems/hs95.jl b/src/ADNLPProblems/hs95.jl index e38ef216..80bd5323 100644 --- a/src/ADNLPProblems/hs95.jl +++ b/src/ADNLPProblems/hs95.jl @@ -22,8 +22,7 @@ function hs95(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where 17.9 * x[1] + 36.8 * x[2] + 113.9 * x[3] + 169.7 * x[4] + 337.8 * x[5] + 1385.2 * x[6] - 139 * x[1] * x[3] - 2450 * x[4] * x[5] - 16600 * x[4] * x[6] - 17200 * x[5] * x[6] cx[3] = -273 * x[2] - 70 * x[4] - 819 * x[5] + 26000 * x[4] * x[5] - cx[4] = - 159.9 * x[1] - 311 * x[2] + 587 * x[4] + 391 * x[5] + 2198 * x[6] - 14000 * x[1] * x[6] + cx[4] = 159.9 * x[1] - 311 * x[2] + 587 * x[4] + 391 * x[5] + 2198 * x[6] - 14000 * x[1] * x[6] return cx end lcon = T[4.97, -1.88, -29.08, -78.02] diff --git a/src/ADNLPProblems/hs96.jl b/src/ADNLPProblems/hs96.jl index f73a1010..dd41eb2e 100644 --- a/src/ADNLPProblems/hs96.jl +++ b/src/ADNLPProblems/hs96.jl @@ -22,8 +22,7 @@ function hs96(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where 17.9 * x[1] + 36.8 * x[2] + 113.9 * x[3] + 169.7 * x[4] + 337.8 * x[5] + 1385.2 * x[6] - 139 * x[1] * x[3] - 2450 * x[4] * x[5] - 16600 * x[4] * x[6] - 17200 * x[5] * x[6] cx[3] = -273 * x[2] - 70 * x[4] - 819 * x[5] + 26000 * x[4] * x[5] - cx[4] = - 159.9 * x[1] - 311 * x[2] + 587 * x[4] + 391 * x[5] + 2198 * x[6] - 14000 * x[1] * x[6] + cx[4] = 159.9 * x[1] - 311 * x[2] + 587 * x[4] + 391 * x[5] + 2198 * x[6] - 14000 * x[1] * x[6] return cx end lcon = T[4.97, -1.88, -69.08, -118.02] diff --git a/src/ADNLPProblems/hs97.jl b/src/ADNLPProblems/hs97.jl index 1aba173f..9a247dc4 100644 --- a/src/ADNLPProblems/hs97.jl +++ b/src/ADNLPProblems/hs97.jl @@ -22,8 +22,7 @@ function hs97(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where 17.9 * x[1] + 36.8 * x[2] + 113.9 * x[3] + 169.7 * x[4] + 337.8 * x[5] + 1385.2 * x[6] - 139 * x[1] * x[3] - 2450 * x[4] * x[5] - 16600 * x[4] * x[6] - 17200 * x[5] * x[6] cx[3] = -273 * x[2] - 70 * x[4] - 819 * x[5] + 26000 * x[4] * x[5] - cx[4] = - 159.9 * x[1] - 311 * x[2] + 587 * x[4] + 391 * x[5] + 2198 * x[6] - 14000 * x[1] * x[6] + cx[4] = 159.9 * x[1] - 311 * x[2] + 587 * x[4] + 391 * x[5] + 2198 * x[6] - 14000 * x[1] * x[6] return cx end lcon = T[32.97, 25.12, -29.08, -78.02] diff --git a/src/ADNLPProblems/hs98.jl b/src/ADNLPProblems/hs98.jl index 7f5d90fb..c819d35e 100644 --- a/src/ADNLPProblems/hs98.jl +++ b/src/ADNLPProblems/hs98.jl @@ -22,8 +22,7 @@ function hs98(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where 17.9 * x[1] + 36.8 * x[2] + 113.9 * x[3] + 169.7 * x[4] + 337.8 * x[5] + 1385.2 * x[6] - 139 * x[1] * x[3] - 2450 * x[4] * x[5] - 16600 * x[4] * x[6] - 17200 * x[5] * x[6] cx[3] = -273 * x[2] - 70 * x[4] - 819 * x[5] + 26000 * x[4] * x[5] - cx[4] = - 159.9 * x[1] - 311 * x[2] + 587 * x[4] + 391 * x[5] + 2198 * x[6] - 14000 * x[1] * x[6] + cx[4] = 159.9 * x[1] - 311 * x[2] + 587 * x[4] + 391 * x[5] + 2198 * x[6] - 14000 * x[1] * x[6] return cx end lcon = T[32.97, 25.12, -124.08, -173.02] diff --git a/src/ADNLPProblems/marine.jl b/src/ADNLPProblems/marine.jl index b3b90c7a..aa96414d 100644 --- a/src/ADNLPProblems/marine.jl +++ b/src/ADNLPProblems/marine.jl @@ -151,7 +151,17 @@ function marine(; n::Int = default_nvar, nc::Int = 1, type::Type{T} = Float64, k end end - function c!(cx, x; ne::Int = ne, nh::Int = nh, nc::Int = nc, h::Rational{Int} = h, fact::Vector{Int} = fact, ngm::Int = ngm, ngmv::Int = ngmv) + function c!( + cx, + x; + ne::Int = ne, + nh::Int = nh, + nc::Int = nc, + h::Rational{Int} = h, + fact::Vector{Int} = fact, + ngm::Int = ngm, + ngmv::Int = ngmv, + ) g = view(x, 1:(ne - 1)) m = view(x, ne:ngm) ngmw = ngmv + nh * nc * ne @@ -182,5 +192,16 @@ function marine(; n::Int = default_nvar, nc::Int = 1, type::Type{T} = Float64, k return cx end - return ADNLPModels.ADNLPModel!(f, x0, lvar, uvar, findnz(sparse(A))..., c!, lcon, ucon, name = "marine"; kwargs...) + return ADNLPModels.ADNLPModel!( + f, + x0, + lvar, + uvar, + findnz(sparse(A))..., + c!, + lcon, + ucon, + name = "marine"; + kwargs..., + ) end diff --git a/src/ADNLPProblems/robotarm.jl b/src/ADNLPProblems/robotarm.jl index 55fc063c..a1264eeb 100644 --- a/src/ADNLPProblems/robotarm.jl +++ b/src/ADNLPProblems/robotarm.jl @@ -25,7 +25,7 @@ function robotarm(; n::Int = default_nvar, L = 4.5, type::Type{T} = Float64, kwa A = zeros(T, n, 9n + 1) for i = 1:n - A[i, 6n + i] = L + A[i, 6n + i] = L end # constraints function @@ -95,5 +95,16 @@ function robotarm(; n::Int = default_nvar, L = 4.5, type::Type{T} = Float64, kwa uvar[8n] = lvar[8n + 1] = uvar[8n + 1] = lvar[9n] = uvar[9n] = T(0) - return ADNLPModels.ADNLPModel!(f, x0, lvar, uvar, findnz(sparse(A))..., c!, lcon, ucon, name = "robotarm"; kwargs...) + return ADNLPModels.ADNLPModel!( + f, + x0, + lvar, + uvar, + findnz(sparse(A))..., + c!, + lcon, + ucon, + name = "robotarm"; + kwargs..., + ) end diff --git a/src/Meta/marine.jl b/src/Meta/marine.jl index c32e85d7..f978ad44 100644 --- a/src/Meta/marine.jl +++ b/src/Meta/marine.jl @@ -21,7 +21,8 @@ get_marine_nvar(; n::Integer = default_nvar, nc::Int = 1, kwargs...) = 8 + 7 + Int(round((n - 2 * 8 + 1) / (3 * 8 * nc + 8))) * (8 + 3 * 8 * nc) get_marine_ncon(; n::Integer = default_nvar, nc::Int = 1, kwargs...) = Int(round((n - 2 * 8 + 1) / (3 * 8 * nc + 8))) * (8 + 2 * nc + nc * (8 - 2)) - 8 -get_marine_nlin(; n::Integer = default_nvar, nc::Int = 1, kwargs...) = 8 * (Int(round((n - 2 * 8 + 1) / (3 * 8 * 1 + 8))) - 1) # (nh - 1) * ne +get_marine_nlin(; n::Integer = default_nvar, nc::Int = 1, kwargs...) = + 8 * (Int(round((n - 2 * 8 + 1) / (3 * 8 * 1 + 8))) - 1) # (nh - 1) * ne get_marine_nnln(; n::Integer = default_nvar, nc::Int = 1, kwargs...) = Int(round((n - 2 * 8 + 1) / (3 * 8 * nc + 8))) * (2 * nc + nc * (8 - 2)) get_marine_nequ(; n::Integer = default_nvar, nc::Int = 1, kwargs...) = diff --git a/src/PureJuMP/argauss.jl b/src/PureJuMP/argauss.jl index 2223f88d..8db68534 100644 --- a/src/PureJuMP/argauss.jl +++ b/src/PureJuMP/argauss.jl @@ -49,11 +49,7 @@ function argauss(; n::Int = default_nvar, kwargs...) @variable(nlp, -Inf <= x[i = 1:3] <= Inf, start = xinit[i]) @objective(nlp, Min, 0) - @constraint( - nlp, - cons[i = 1:15], - x[1] * exp(-0.5 * x[2] * (0.5 * (8 - i) - x[3])^2) - rhs[i] == 0 - ) + @constraint(nlp, cons[i = 1:15], x[1] * exp(-0.5 * x[2] * (0.5 * (8 - i) - x[3])^2) - rhs[i] == 0) return nlp end diff --git a/src/PureJuMP/arglinc.jl b/src/PureJuMP/arglinc.jl index a7436f0a..4bdee4b9 100644 --- a/src/PureJuMP/arglinc.jl +++ b/src/PureJuMP/arglinc.jl @@ -20,11 +20,7 @@ function arglinc(args...; n::Int = default_nvar, m::Int = 2n, kwargs...) @variable(nlp, x[j = 1:n], start = 1.0) - @objective( - nlp, - Min, - 2 + sum(((i - 1) * sum(j * x[j] for j = 2:(n - 1)) - 1)^2 for i = 2:(m - 1)) - ) + @objective(nlp, Min, 2 + sum(((i - 1) * sum(j * x[j] for j = 2:(n - 1)) - 1)^2 for i = 2:(m - 1))) return nlp end diff --git a/src/PureJuMP/brownbs.jl b/src/PureJuMP/brownbs.jl index 6eca4d17..9b726858 100644 --- a/src/PureJuMP/brownbs.jl +++ b/src/PureJuMP/brownbs.jl @@ -18,11 +18,7 @@ function brownbs(args...; kwargs...) @variable(nlp, x[i = 1:2], start = 1.0) - @objective( - nlp, - Min, - 0.5 * (x[1] - 1e6)^2 + 0.5 * (x[2] - 2 * 1e-6)^2 + 0.5 * (x[1] * x[2] - 2)^2 - ) + @objective(nlp, Min, 0.5 * (x[1] - 1e6)^2 + 0.5 * (x[2] - 2 * 1e-6)^2 + 0.5 * (x[1] * x[2] - 2)^2) return nlp end diff --git a/src/PureJuMP/hs104.jl b/src/PureJuMP/hs104.jl index 2e1cfc97..98bd7a5a 100644 --- a/src/PureJuMP/hs104.jl +++ b/src/PureJuMP/hs104.jl @@ -23,14 +23,8 @@ function hs104(args...; kwargs...) @constraint(nlp, 1 - 0.0588 * x[5] * x[7] - 0.1 * x[1] ≥ 0) @constraint(nlp, 1 - 0.0588 * x[6] * x[8] - 0.1 * x[1] - 0.1 * x[2] ≥ 0) - @constraint( - nlp, - 1 - 4 * x[3] / x[5] - 2 * x[3]^(-0.71) / x[5] - 0.0588 * x[3]^(-1.3) * x[7] ≥ 0 - ) - @constraint( - nlp, - 1 - 4 * x[4] / x[6] - 2 * x[4]^(-0.71) / x[6] - 0.0588 * x[4]^(-1.3) * x[8] ≥ 0 - ) + @constraint(nlp, 1 - 4 * x[3] / x[5] - 2 * x[3]^(-0.71) / x[5] - 0.0588 * x[3]^(-1.3) * x[7] ≥ 0) + @constraint(nlp, 1 - 4 * x[4] / x[6] - 2 * x[4]^(-0.71) / x[6] - 0.0588 * x[4]^(-1.3) * x[8] ≥ 0) @constraint(nlp, 1 ≤ f ≤ 4.2) @objective(nlp, Min, f) diff --git a/src/PureJuMP/hs77.jl b/src/PureJuMP/hs77.jl index 79f733d2..9bb36374 100644 --- a/src/PureJuMP/hs77.jl +++ b/src/PureJuMP/hs77.jl @@ -21,11 +21,7 @@ function hs77(args...; kwargs...) @constraint(nlp, x[1]^2 * x[4] + sin(x[4] - x[5]) - 2 * sqrt(2) == 0) @constraint(nlp, x[2] + x[3]^4 * x[4]^2 - 8 - sqrt(2) == 0) - @objective( - nlp, - Min, - (x[1] - 1)^2 + (x[1] - x[2])^2 + (x[3] - 1)^2 + (x[4] - 1)^4 + (x[5] - 1)^6 - ) + @objective(nlp, Min, (x[1] - 1)^2 + (x[1] - x[2])^2 + (x[3] - 1)^2 + (x[4] - 1)^4 + (x[5] - 1)^6) return nlp end diff --git a/src/PureJuMP/hs83.jl b/src/PureJuMP/hs83.jl index 2add7ce1..94216608 100644 --- a/src/PureJuMP/hs83.jl +++ b/src/PureJuMP/hs83.jl @@ -35,11 +35,7 @@ function hs83(args...; kwargs...) 0 ≤ a[9] + a[10] * x[3] * x[5] + a[11] * x[1] * x[3] - a[12] * x[3] * x[4] - 20 ≤ 5 ) - @objective( - nlp, - Min, - 5.3578547 * x[3]^2 + 0.8356891 * x[1] * x[5] + 37.293239 * x[1] - 40792.141 - ) + @objective(nlp, Min, 5.3578547 * x[3]^2 + 0.8356891 * x[1] * x[5] + 37.293239 * x[1] - 40792.141) return nlp end diff --git a/src/PureJuMP/palmer1c.jl b/src/PureJuMP/palmer1c.jl index 02860c2e..30c39953 100644 --- a/src/PureJuMP/palmer1c.jl +++ b/src/PureJuMP/palmer1c.jl @@ -94,11 +94,7 @@ function palmer1c(args...; kwargs...) 92.733676, ] - @objective( - nlp, - Min, - 0.5 * sum((Y[i] - sum(x[j] * X[i]^(2 * j - 2) for j = 1:8))^2 for i = 1:35) - ) + @objective(nlp, Min, 0.5 * sum((Y[i] - sum(x[j] * X[i]^(2 * j - 2) for j = 1:8))^2 for i = 1:35)) return nlp end diff --git a/src/PureJuMP/palmer1d.jl b/src/PureJuMP/palmer1d.jl index 3ec56c13..fca48609 100644 --- a/src/PureJuMP/palmer1d.jl +++ b/src/PureJuMP/palmer1d.jl @@ -94,11 +94,7 @@ function palmer1d(args...; kwargs...) 92.733676, ] - @objective( - nlp, - Min, - 0.5 * sum((Y[i] - sum(x[j] * X[i]^(2 * j - 2) for j = 1:7))^2 for i = 1:35) - ) + @objective(nlp, Min, 0.5 * sum((Y[i] - sum(x[j] * X[i]^(2 * j - 2) for j = 1:7))^2 for i = 1:35)) return nlp end diff --git a/src/PureJuMP/palmer2c.jl b/src/PureJuMP/palmer2c.jl index 2f89b08d..22f2b659 100644 --- a/src/PureJuMP/palmer2c.jl +++ b/src/PureJuMP/palmer2c.jl @@ -70,11 +70,7 @@ function palmer2c(args...; kwargs...) 72.676767, ] - @objective( - nlp, - Min, - 0.5 * sum((Y[i] - sum(x[j] * X[i]^(2 * j - 2) for j = 1:8))^2 for i = 1:23) - ) + @objective(nlp, Min, 0.5 * sum((Y[i] - sum(x[j] * X[i]^(2 * j - 2) for j = 1:8))^2 for i = 1:23)) return nlp end diff --git a/src/PureJuMP/palmer3c.jl b/src/PureJuMP/palmer3c.jl index 785fedf4..20889010 100644 --- a/src/PureJuMP/palmer3c.jl +++ b/src/PureJuMP/palmer3c.jl @@ -70,11 +70,7 @@ function palmer3c(args...; kwargs...) 64.87939, ] - @objective( - nlp, - Min, - 0.5 * sum((Y[i] - sum(x[j] * X[i]^(2 * j - 2) for j = 1:8))^2 for i = 1:23) - ) + @objective(nlp, Min, 0.5 * sum((Y[i] - sum(x[j] * X[i]^(2 * j - 2) for j = 1:8))^2 for i = 1:23)) return nlp end diff --git a/src/PureJuMP/palmer4c.jl b/src/PureJuMP/palmer4c.jl index 06e44811..78172a4f 100644 --- a/src/PureJuMP/palmer4c.jl +++ b/src/PureJuMP/palmer4c.jl @@ -70,11 +70,7 @@ function palmer4c(args...; kwargs...) 67.27625, ] - @objective( - nlp, - Min, - 0.5 * sum((Y[i] - sum(x[j] * X[i]^(2 * j - 2) for j = 1:8))^2 for i = 1:23) - ) + @objective(nlp, Min, 0.5 * sum((Y[i] - sum(x[j] * X[i]^(2 * j - 2) for j = 1:8))^2 for i = 1:23)) return nlp end diff --git a/src/PureJuMP/palmer5d.jl b/src/PureJuMP/palmer5d.jl index 8cd3896b..f4674554 100644 --- a/src/PureJuMP/palmer5d.jl +++ b/src/PureJuMP/palmer5d.jl @@ -47,11 +47,7 @@ function palmer5d(args...; kwargs...) 77.719674, ] - @objective( - nlp, - Min, - 0.5 * sum((Y[i] - sum(x[j] * X[i]^(2 * j - 2) for j = 1:4))^2 for i = 1:12) - ) + @objective(nlp, Min, 0.5 * sum((Y[i] - sum(x[j] * X[i]^(2 * j - 2) for j = 1:4))^2 for i = 1:12)) return nlp end diff --git a/src/PureJuMP/palmer6c.jl b/src/PureJuMP/palmer6c.jl index bc38e0e5..ea677ceb 100644 --- a/src/PureJuMP/palmer6c.jl +++ b/src/PureJuMP/palmer6c.jl @@ -50,11 +50,7 @@ function palmer6c(args...; kwargs...) 9.026378, ] - @objective( - nlp, - Min, - 0.5 * sum((Y[i] - sum(x[j] * X[i]^(2 * j - 2) for j = 1:8))^2 for i = 1:13) - ) + @objective(nlp, Min, 0.5 * sum((Y[i] - sum(x[j] * X[i]^(2 * j - 2) for j = 1:8))^2 for i = 1:13)) return nlp end diff --git a/src/PureJuMP/palmer7c.jl b/src/PureJuMP/palmer7c.jl index f8b48aec..2151e01d 100644 --- a/src/PureJuMP/palmer7c.jl +++ b/src/PureJuMP/palmer7c.jl @@ -50,11 +50,7 @@ function palmer7c(args...; kwargs...) 117.630959, ] - @objective( - nlp, - Min, - 0.5 * sum((Y[i] - sum(x[j] * X[i]^(2 * j - 2) for j = 1:8))^2 for i = 1:13) - ) + @objective(nlp, Min, 0.5 * sum((Y[i] - sum(x[j] * X[i]^(2 * j - 2) for j = 1:8))^2 for i = 1:13)) return nlp end diff --git a/src/PureJuMP/palmer8c.jl b/src/PureJuMP/palmer8c.jl index ab12100c..c905b524 100644 --- a/src/PureJuMP/palmer8c.jl +++ b/src/PureJuMP/palmer8c.jl @@ -48,11 +48,7 @@ function palmer8c(args...; kwargs...) 97.874528, ] - @objective( - nlp, - Min, - 0.5 * sum((Y[i] - sum(x[j] * X[i]^(2 * j - 2) for j = 1:8))^2 for i = 1:12) - ) + @objective(nlp, Min, 0.5 * sum((Y[i] - sum(x[j] * X[i]^(2 * j - 2) for j = 1:8))^2 for i = 1:12)) return nlp end diff --git a/src/PureJuMP/rat42.jl b/src/PureJuMP/rat42.jl index 822a25b3..afd6cbf6 100644 --- a/src/PureJuMP/rat42.jl +++ b/src/PureJuMP/rat42.jl @@ -54,11 +54,7 @@ function rat42(args...; kwargs...) @variable(nlp, x[j = 1:3]) set_start_value.(x, [100, 1, 0.1]) # other: [75, 2.5, 0.07] - @objective( - nlp, - Min, - 0.5 * sum((y[i, 1] - x[1] / (1 + exp(x[2] - x[3] * y[i, 2])))^2 for i = 1:9) - ) + @objective(nlp, Min, 0.5 * sum((y[i, 1] - x[1] / (1 + exp(x[2] - x[3] * y[i, 2])))^2 for i = 1:9)) return nlp end diff --git a/src/PureJuMP/tquartic.jl b/src/PureJuMP/tquartic.jl index d2acf2dd..cfe94a58 100644 --- a/src/PureJuMP/tquartic.jl +++ b/src/PureJuMP/tquartic.jl @@ -27,11 +27,7 @@ function tquartic(args...; n::Int = default_nvar, kwargs...) @variable(nlp, x[i = 1:n], start = 0.1) - @objective( - nlp, - Min, - 0.5 * (x[1] - 1.0)^2 + 0.5 * sum((x[1]^2 - x[i + 1]^2)^2 for i = 1:(n - 2)) - ) + @objective(nlp, Min, 0.5 * (x[1] - 1.0)^2 + 0.5 * sum((x[1]^2 - x[i + 1]^2)^2 for i = 1:(n - 2))) return nlp end