diff --git a/src/IPM/IPM.jl b/src/IPM/IPM.jl index cc73e2ce..b88ba911 100644 --- a/src/IPM/IPM.jl +++ b/src/IPM/IPM.jl @@ -17,7 +17,7 @@ mutable struct MadNLPSolver{ IC <: AbstractInertiaCorrector, KKTVec <: AbstractKKTVector{T, VT} } <: AbstractMadNLPSolver{T} - + nlp::Model cb::CB kkt::KKTSystem @@ -103,29 +103,34 @@ mutable struct MadNLPSolver{ end function MadNLPSolver(nlp::AbstractNLPModel{T,VT}; kwargs...) where {T, VT} - + opt, opt_linear_solver, logger = load_options(nlp; kwargs...) @assert is_supported(opt.linear_solver, T) cnt = MadNLPCounters(start_time=time()) - cb = create_callback(opt.callback, nlp, opt) - + cb = create_callback( + opt.callback, + nlp; + fixed_variable_treatment=opt.fixed_variable_treatment, + equality_treatment=opt.equality_treatment, + ) + # generic options opt.disable_garbage_collector && (GC.enable(false); @warn(logger,"Julia garbage collector is temporarily disabled")) set_blas_num_threads(opt.blas_num_threads; permanent=true) @trace(logger,"Initializing variables.") - + ind_cons = get_index_constraints( get_lvar(nlp), get_uvar(nlp), - get_lcon(nlp), get_ucon(nlp), - opt.fixed_variable_treatment, - opt.equality_treatment + get_lcon(nlp), get_ucon(nlp); + fixed_variable_treatment=opt.fixed_variable_treatment, + equality_treatment=opt.equality_treatment ) ind_lb = ind_cons.ind_lb ind_ub = ind_cons.ind_ub - + ns = length(ind_cons.ind_ineq) nx = get_nvar(nlp) n = nx+ns @@ -148,12 +153,12 @@ function MadNLPSolver(nlp::AbstractNLPModel{T,VT}; kwargs...) where {T, VT} x = PrimalVector(VT, nx, ns, ind_lb, ind_ub) xl = PrimalVector(VT, nx, ns, ind_lb, ind_ub) - xu = PrimalVector(VT, nx, ns, ind_lb, ind_ub) + xu = PrimalVector(VT, nx, ns, ind_lb, ind_ub) zl = PrimalVector(VT, nx, ns, ind_lb, ind_ub) zu = PrimalVector(VT, nx, ns, ind_lb, ind_ub) f = PrimalVector(VT, nx, ns, ind_lb, ind_ub) x_trial = PrimalVector(VT, nx, ns, ind_lb, ind_ub) - + d = UnreducedKKTVector(VT, n, m, nlb, nub, ind_lb, ind_ub) p = UnreducedKKTVector(VT, n, m, nlb, nub, ind_lb, ind_ub) _w1 = UnreducedKKTVector(VT, n, m, nlb, nub, ind_lb, ind_ub) @@ -161,7 +166,7 @@ function MadNLPSolver(nlp::AbstractNLPModel{T,VT}; kwargs...) where {T, VT} _w3 = UnreducedKKTVector(VT, n, m, nlb, nub, ind_lb, ind_ub) _w4 = UnreducedKKTVector(VT, n, m, nlb, nub, ind_lb, ind_ub) - jacl = VT(undef,n) + jacl = VT(undef,n) c_trial = VT(undef, m) y = VT(undef, m) c = VT(undef, m) @@ -190,28 +195,28 @@ function MadNLPSolver(nlp::AbstractNLPModel{T,VT}; kwargs...) where {T, VT} VT, n, m, nlb, nub, ind_lb, ind_ub ) - + cnt.init_time = time() - cnt.start_time return MadNLPSolver( nlp, cb, kkt, - opt, cnt, logger, + opt, cnt, logger, n, m, nlb, nub, x, y, zl, zu, xl, xu, - zero(T), f, c, - jacl, - d, p, - _w1, _w2, _w3, _w4, - x_trial, c_trial, zero(T), c_slk, rhs, - ind_cons.ind_ineq, ind_cons.ind_fixed, ind_cons.ind_llb, ind_cons.ind_uub, - x_lr, x_ur, xl_r, xu_r, zl_r, zu_r, dx_lr, dx_ur, x_trial_lr, x_trial_ur, - iterator, + zero(T), f, c, + jacl, + d, p, + _w1, _w2, _w3, _w4, + x_trial, c_trial, zero(T), c_slk, rhs, + ind_cons.ind_ineq, ind_cons.ind_fixed, ind_cons.ind_llb, ind_cons.ind_uub, + x_lr, x_ur, xl_r, xu_r, zl_r, zu_r, dx_lr, dx_ur, x_trial_lr, x_trial_ur, + iterator, zero(T), zero(T), zero(T), zero(T), zero(T), zero(T), zero(T), zero(T), zero(T), " ", zero(T), zero(T), zero(T), Tuple{T, T}[], inertia_corrector, nothing, - INITIAL, Dict(), + INITIAL, Dict(), ) end diff --git a/src/IPM/solver.jl b/src/IPM/solver.jl index 0b80f905..197bd634 100644 --- a/src/IPM/solver.jl +++ b/src/IPM/solver.jl @@ -15,8 +15,8 @@ function initialize!(solver::AbstractMadNLPSolver{T}) where T nlp = solver.nlp opt = solver.opt - - # Initializing variables + + # Initializing variables @trace(solver.logger,"Initializing variables.") initialize!( solver.cb, @@ -25,13 +25,15 @@ function initialize!(solver::AbstractMadNLPSolver{T}) where T solver.xu, solver.y, solver.rhs, - solver.ind_ineq, - opt + solver.ind_ineq; + tol=opt.tol, + bound_push=opt.bound_push, + bound_fac=opt.bound_fac, ) fill!(solver.jacl, zero(T)) fill!(solver.zl_r, one(T)) fill!(solver.zu_r, one(T)) - + # Initializing scaling factors set_scaling!( solver.cb, @@ -50,7 +52,7 @@ function initialize!(solver::AbstractMadNLPSolver{T}) where T # Initializing jacobian and gradient eval_jac_wrapper!(solver, solver.kkt, solver.x) eval_grad_f_wrapper!(solver, solver.f,solver.x) - + @trace(solver.logger,"Initializing constraint duals.") if !solver.opt.dual_initialized @@ -65,7 +67,7 @@ function initialize!(solver::AbstractMadNLPSolver{T}) where T copyto!(solver.y, dual(solver.d)) end end - + # Initializing solver.obj_val = eval_f_wrapper(solver, solver.x) eval_cons_wrapper!(solver, solver.c, solver.x) @@ -207,7 +209,7 @@ function regular!(solver::AbstractMadNLPSolver{T}) where T ) solver.inf_compl = get_inf_compl(solver.x_lr,solver.xl_r,solver.zl_r,solver.xu_r,solver.x_ur,solver.zu_r,zero(T),sc) inf_compl_mu = get_inf_compl(solver.x_lr,solver.xl_r,solver.zl_r,solver.xu_r,solver.x_ur,solver.zu_r,solver.mu,sc) - + print_iter(solver) # evaluate termination criteria @@ -244,7 +246,7 @@ function regular!(solver::AbstractMadNLPSolver{T}) where T dual_inf_perturbation!(primal(solver.p),solver.ind_llb,solver.ind_uub,solver.mu,solver.opt.kappa_d) inertia_correction!(solver.inertia_corrector, solver) || return ROBUST - + # filter start @trace(solver.logger,"Backtracking line search initiated.") theta = get_theta(solver.c) @@ -278,7 +280,7 @@ function regular!(solver::AbstractMadNLPSolver{T}) where T unsuccessful_iterate = false while true - + copyto!(full(solver.x_trial), full(solver.x)) axpy!(solver.alpha, primal(solver.d), primal(solver.x_trial)) solver.obj_val_trial = eval_f_wrapper(solver, solver.x_trial) @@ -294,7 +296,7 @@ function regular!(solver::AbstractMadNLPSolver{T}) where T solver.filter,theta,theta_trial,varphi,varphi_trial,switching_condition,armijo_condition, solver.theta_min,solver.opt.obj_max_inc,solver.opt.gamma_theta,solver.opt.gamma_phi, has_constraints(solver)) - + if solver.ftype in ["f","h"] @trace(solver.logger,"Step accepted with type $(solver.ftype)") break @@ -308,7 +310,7 @@ function regular!(solver::AbstractMadNLPSolver{T}) where T end end - unsuccessful_iterate = true + unsuccessful_iterate = true solver.alpha /= 2 solver.cnt.l += 1 if solver.alpha < alpha_min @@ -333,7 +335,7 @@ function regular!(solver::AbstractMadNLPSolver{T}) where T empty!(solver.filter) push!(solver.filter,(solver.theta_max,-Inf)) solver.cnt.k+=1 - + return REGULAR end end @@ -378,7 +380,7 @@ function regular!(solver::AbstractMadNLPSolver{T}) where T primal(solver.x), solver.mu,solver.opt.kappa_sigma, ) - + eval_grad_f_wrapper!(solver, solver.f,solver.x) if !switching_condition || !armijo_condition @@ -462,7 +464,7 @@ function restore!(solver::AbstractMadNLPSolver{T}) where T end adjust_boundary!(solver.x_lr,solver.xl_r,solver.x_ur,solver.xu_r,solver.mu) - + F = F_trial theta = get_theta(solver.c) @@ -561,20 +563,20 @@ function robust!(solver::MadNLPSolver{T}) where T eval_lag_hess_wrapper!(solver, solver.kkt, solver.x, solver.y; is_resto=true) end set_aug_RR!(solver.kkt, solver, RR) - + # without inertia correction, @trace(solver.logger,"Solving restoration phase primal-dual system.") set_aug_rhs_RR!(solver, solver.kkt, RR, solver.opt.rho) - + inertia_correction!(solver.inertia_corrector, solver) || return RESTORATION_FAILED - + finish_aug_solve_RR!( RR.dpp,RR.dnn,RR.dzp,RR.dzn,solver.y,dual(solver.d), RR.pp,RR.nn,RR.zp,RR.zn,RR.mu_R,solver.opt.rho ) - - + + theta_R = get_theta_R(solver.c,RR.pp,RR.nn) varphi_R = get_varphi_R(RR.obj_val_R,solver.x_lr,solver.xl_r,solver.xu_r,solver.x_ur,RR.pp,RR.nn,RR.mu_R) varphi_d_R = get_varphi_d_R( @@ -623,7 +625,7 @@ function robust!(solver::MadNLPSolver{T}) where T varphi_R_trial = get_varphi_R( RR.obj_val_R_trial,solver.x_trial_lr,solver.xl_r,solver.xu_r,solver.x_trial_ur,RR.pp_trial,RR.nn_trial,RR.mu_R) - armijo_condition = is_armijo(varphi_R_trial,varphi_R,solver.opt.eta_phi,solver.alpha,varphi_d_R) + armijo_condition = is_armijo(varphi_R_trial,varphi_R,solver.opt.eta_phi,solver.alpha,varphi_d_R) small_search_norm && break solver.ftype = get_ftype( @@ -643,7 +645,7 @@ function robust!(solver::MadNLPSolver{T}) where T # (experimental) while giving up directly # we give MadNLP.jl second chance to explore # some possibility at the current iterate - + fill!(solver.y, zero(T)) fill!(solver.zl_r, one(T)) fill!(solver.zu_r, one(T)) @@ -722,7 +724,7 @@ function robust!(solver::MadNLPSolver{T}) where T else copyto!(solver.y, dual(solver.d)) end - + solver.cnt.k+=1 solver.cnt.t+=1 @@ -806,7 +808,7 @@ function inertia_correction!( inertia_corrector::InertiaBased, solver::MadNLPSolver{T} ) where {T} - + n_trial = 0 solver.del_w = del_w_prev = zero(T) @@ -815,14 +817,14 @@ function inertia_correction!( factorize_wrapper!(solver) num_pos,num_zero,num_neg = inertia(solver.kkt.linear_solver) - - + + solve_status = !is_inertia_correct(solver.kkt, num_pos, num_zero, num_neg) ? false : solve_refine_wrapper!( solver.d, solver, solver.p, solver._w4, ) - - + + while !solve_status @debug(solver.logger,"Primal-dual perturbed.") @@ -837,7 +839,7 @@ function inertia_correction!( return false end end - solver.del_c = num_neg == 0 ? zero(T) : solver.opt.jacobian_regularization_value * solver.mu^(solver.opt.jacobian_regularization_exponent) + solver.del_c = num_neg == 0 ? zero(T) : solver.opt.jacobian_regularization_value * solver.mu^(solver.opt.jacobian_regularization_exponent) regularize_diagonal!(solver.kkt, solver.del_w - del_w_prev, solver.del_c) del_w_prev = solver.del_w @@ -850,7 +852,7 @@ function inertia_correction!( ) n_trial += 1 end - + solver.del_w != 0 && (solver.del_w_last = solver.del_w) return true end @@ -858,7 +860,7 @@ end function inertia_correction!( inertia_corrector::InertiaFree, solver::MadNLPSolver{T} - ) where T + ) where T n_trial = 0 solver.del_w = del_w_prev = zero(T) @@ -922,7 +924,7 @@ function inertia_correction!( inertia_corrector::InertiaIgnore, solver::MadNLPSolver{T} ) where T - + n_trial = 0 solver.del_w = del_w_prev = zero(T) @@ -946,7 +948,7 @@ function inertia_correction!( return false end end - solver.del_c = solver.opt.jacobian_regularization_value * solver.mu^(solver.opt.jacobian_regularization_exponent) + solver.del_c = solver.opt.jacobian_regularization_value * solver.mu^(solver.opt.jacobian_regularization_exponent) regularize_diagonal!(solver.kkt, solver.del_w - del_w_prev, solver.del_c) del_w_prev = solver.del_w diff --git a/src/nlpmodels.jl b/src/nlpmodels.jl index 5616edab..1d757200 100644 --- a/src/nlpmodels.jl +++ b/src/nlpmodels.jl @@ -1,21 +1,95 @@ +""" + AbstractFixedVariableTreatment + +Abstract type to define the reformulation of the fixed variables inside MadNLP. +""" +abstract type AbstractFixedVariableTreatment end + +""" + MakeParameter{VT, VI} <: AbstractFixedVariableTreatment + +Remove the fixed variables from the optimization variables and +define them as problem's parameters. +""" +struct MakeParameter{VT,VI} <: AbstractFixedVariableTreatment + fixed::VI + fixedj::VI + fixedh::VI + grad_storage::VT +end + +""" + RelaxBound <: AbstractFixedVariableTreatment + +Relax the fixed variables ``x = x_{fixed}`` as bounded +variables ``x_{fixed} - ϵ ≤ x ≤ x_{fixed} + ϵ``, with +``ϵ`` a small-enough parameter. +""" +struct RelaxBound <: AbstractFixedVariableTreatment end + + +""" + AbstractEqualityTreatment + +Abstract type to define the reformulation of the equality +constraints inside MadNLP. +""" +abstract type AbstractEqualityTreatment end + +""" + EnforceEquality <: AbstractEqualityTreatment + +Keep the equality constraints intact. + +The solution returned by MadNLP will respect the equality constraints. +""" +struct EnforceEquality <: AbstractEqualityTreatment end + +""" + RelaxEquality <: AbstractEqualityTreatment + +Relax the equality constraints ``g(x) = 0`` with two +inequality constraints, as ``-ϵ ≤ g(x) ≤ ϵ``. The parameter +``ϵ`` is usually small. + +The solution returned by MadNLP will satisfy the equality +constraints only up to a tolerance ``ϵ``. + +""" +struct RelaxEquality <: AbstractEqualityTreatment end + + +""" + get_index_constraints(nlp::AbstractNLPModel) + +Analyze the bounds of the variables and the constraints in the `AbstractNLPModel` `nlp`. +Return a named-tuple witht the following keys:return ( + +* `ind_eq`: indices of equality constraints. +* `ind_ineq`: indices of inequality constraints. +* `ind_fixed`: indices of fixed variables. +* `ind_lb`: indices of variables with a lower-bound. +* `ind_ub`: indices of variables with an upper-bound. +* `ind_llb`: indices of variables with *only* a lower-bound. +* `ind_uub`: indices of variables with *only* an upper-bound. + +""" function get_index_constraints( - nlp::AbstractNLPModel, - fixed_variable_treatment, - equality_treatment, + nlp::AbstractNLPModel; options... ) get_index_constraints( get_lvar(nlp), get_uvar(nlp), - get_lcon(nlp), get_ucon(nlp), - fixed_variable_treatment, equality_treatment, + get_lcon(nlp), get_ucon(nlp); + options... ) end function get_index_constraints( lvar, uvar, - lcon, ucon, - fixed_variable_treatment, - equality_treatment, + lcon, ucon; + fixed_variable_treatment=EnforceEquality, + equality_treatment=MakeParameter, ) ncon = length(lcon) @@ -61,21 +135,23 @@ function get_index_constraints( ) end +""" + AbstractCallback{T, VT} + +Wrap the `AbstractNLPModel` passed by the user in a form amenable to MadNLP. + +An `AbstractCallback` handles the scaling of the problem and the +reformulations of the equality constraints and fixed variables. +""" abstract type AbstractCallback{T,VT} end -abstract type AbstractFixedVariableTreatment end -abstract type AbstractEqualityTreatment end -struct EnforceEquality <: AbstractEqualityTreatment end -struct RelaxEquality <: AbstractEqualityTreatment end -struct MakeParameter{VT,VI} <: AbstractFixedVariableTreatment - fixed::VI - fixedj::VI - fixedh::VI - grad_storage::VT -end -struct RelaxBound <: AbstractFixedVariableTreatment end +""" + SparseCallback{T, VT} < AbstractCallback{T, VT} +Wrap an `AbstractNLPModel` using sparse structures. + +""" struct SparseCallback{ T, VT <: AbstractVector{T}, @@ -109,6 +185,12 @@ struct SparseCallback{ equality_handler::EH end +""" + DenseCallback{T, VT} < AbstractCallback{T, VT} + +Wrap an `AbstractNLPModel` using dense structures. + +""" struct DenseCallback{ T, VT <: AbstractVector{T}, @@ -136,9 +218,10 @@ end create_array(cb::AbstractCallback, args...) = similar(get_x0(cb.nlp), args...) -function set_obj_scale!(obj_scale, f::VT,max_gradient) where {T, VT <: AbstractVector{T}} - obj_scale[] = min(one(T), max_gradient / norm(f,Inf)) +function set_obj_scale!(obj_scale, f::VT, max_gradient) where {T, VT <: AbstractVector{T}} + obj_scale[] = min(one(T), max_gradient / norm(f, Inf)) end + function set_con_scale_sparse!(con_scale::VT, jac_I,jac_buffer, max_gradient) where {T, VT <: AbstractVector{T}} fill!(con_scale, one(T)) _set_con_scale_sparse!(con_scale, jac_I, jac_buffer) @@ -150,9 +233,11 @@ function _set_con_scale_sparse!(con_scale, jac_I, jac_buffer) con_scale[row] = max(con_scale[row], abs(jac_buffer[i])) end end + function set_jac_scale_sparse!(jac_scale::VT, con_scale, jac_I) where {T, VT <: AbstractVector{T}} copyto!(jac_scale, @view(con_scale[jac_I])) end + function set_con_scale_dense!(con_scale::VT, jac_buffer, max_gradient) where {T, VT <: AbstractVector{T}} con_scale .= min.(one(T), max_gradient ./ mapreduce(abs, max, jac_buffer, dims=2, init=one(T))) end @@ -161,14 +246,11 @@ end function create_dense_fixed_handler( fixed_variable_treatment::Type{MakeParameter}, nlp, - opt - ) +) lvar = get_lvar(nlp) uvar = get_uvar(nlp) - isfixed = (lvar .== uvar) fixed = findall(isfixed) - return MakeParameter( fixed, similar(fixed,0), @@ -184,9 +266,8 @@ function create_sparse_fixed_handler( jac_J, hess_I, hess_J, - hess_buffer; - opt - ) + hess_buffer, +) lvar = get_lvar(nlp) uvar = get_uvar(nlp) nnzj = get_nnzj(nlp.meta) @@ -223,20 +304,17 @@ function create_sparse_fixed_handler( jac_J, hess_I, hess_J, - hess_buffer; - opt - ) - + hess_buffer, +) fixed_handler = RelaxBound() - - return fixed_handler, get_nnzj(nlp.meta), get_nnzh(nlp.meta) end function create_callback( ::Type{SparseCallback}, - nlp::AbstractNLPModel{T, VT}, - opt, + nlp::AbstractNLPModel{T, VT}; + fixed_variable_treatment=MakeParameter, + equality_treatment=EnforceEquality, ) where {T, VT} n = get_nvar(nlp) @@ -244,7 +322,6 @@ function create_callback( nnzj = get_nnzj(nlp.meta) nnzh = get_nnzh(nlp.meta) - x0 = get_x0(nlp) con_buffer = similar(x0, m) ; fill!(con_buffer, zero(T)) @@ -261,18 +338,16 @@ function create_callback( con_scale = similar(jac_buffer, m) ; fill!(con_scale, one(T)) jac_scale = similar(jac_buffer, nnzj) ; fill!(jac_scale, one(T)) - NLPModels.jac_structure!(nlp,jac_I,jac_J) - NLPModels.hess_structure!(nlp,hess_I,hess_J) + NLPModels.jac_structure!(nlp, jac_I, jac_J) + NLPModels.hess_structure!(nlp, hess_I, hess_J) fixed_handler, nnzj, nnzh = create_sparse_fixed_handler( - opt.fixed_variable_treatment, + fixed_variable_treatment, nlp, jac_I, jac_J, hess_I, hess_J, - hess_buffer; - opt = opt + hess_buffer, ) - - equality_handler = opt.equality_treatment() + equality_handler = equality_treatment() return SparseCallback( nlp, @@ -295,8 +370,9 @@ end function create_callback( ::Type{DenseCallback}, - nlp::AbstractNLPModel{T, VT}, - opt, + nlp::AbstractNLPModel{T, VT}; + fixed_variable_treatment=MakeParameter, + equality_treatment=EnforceEquality, ) where {T, VT} n = get_nvar(nlp) @@ -310,16 +386,14 @@ function create_callback( con_scale = similar(x0, m) ; fill!(con_scale, one(T)) fixed_handler = create_dense_fixed_handler( - opt.fixed_variable_treatment, + fixed_variable_treatment, nlp, - opt ) - - equality_handler = opt.equality_treatment() + equality_handler = equality_treatment() return DenseCallback( nlp, - n,m, + n, m, con_buffer, jac_buffer, grad_buffer, @@ -350,17 +424,17 @@ end function initialize!( cb::AbstractCallback, x, xl, xu, y0, rhs, - ind_ineq, - opt - ) + ind_ineq; + tol=1e-8, + bound_push=1e-2, + bound_fac=1e-2, +) x0= variable(x) lvar= variable(xl) uvar= variable(xu) - fixed_handler = cb.fixed_handler nlp = cb.nlp - con_buffer =cb.con_buffer grad_buffer =cb.grad_buffer @@ -372,20 +446,20 @@ function initialize!( lcon = copy(get_lcon(nlp)) ucon = copy(get_ucon(nlp)) - _treat_fixed_variable_initialize!(fixed_handler, x0, lvar, uvar) - _treat_equality_initialize!(cb.equality_handler, lcon, ucon, opt.tol) + _treat_fixed_variable_initialize!(cb.fixed_handler, x0, lvar, uvar) + _treat_equality_initialize!(cb.equality_handler, lcon, ucon, tol) set_initial_bounds!( lvar, uvar, - opt.tol + tol ) initialize_variables!( x0, lvar, uvar, - opt.bound_push, - opt.bound_fac + bound_push, + bound_fac ) NLPModels.cons!(nlp,x0,con_buffer) @@ -398,16 +472,15 @@ function initialize!( set_initial_bounds!( slack(xl), slack(xu), - opt.tol + tol ) initialize_variables!( slack(x), slack(xl), slack(xu), - opt.bound_push, - opt.bound_fac + bound_push, + bound_fac ) - end function set_scaling!( @@ -441,6 +514,7 @@ function set_scaling!( slack(x) .*= con_scale_slk slack(xl) .*= con_scale_slk slack(xu) .*= con_scale_slk + return end function set_scaling!( @@ -450,7 +524,7 @@ function set_scaling!( nlp_scaling_max_gradient ) - x0= variable(x) + x0 = variable(x) nlp = cb.nlp obj_scale = cb.obj_scale @@ -472,6 +546,7 @@ function set_scaling!( slack(x) .*= con_scale_slk slack(xl) .*= con_scale_slk slack(xu) .*= con_scale_slk + return end function _jac_sparsity_wrapper!( @@ -481,6 +556,7 @@ function _jac_sparsity_wrapper!( copyto!(I, cb.jac_I) copyto!(J, cb.jac_J) + return end function _hess_sparsity_wrapper!( @@ -489,6 +565,7 @@ function _hess_sparsity_wrapper!( ) copyto!(I, cb.hess_I) copyto!(J, cb.hess_J) + return end @@ -606,7 +683,6 @@ function _treat_fixed_variable_hess_dense!(fixed_handler::MakeParameter, cb::Spa _set_diag!(hess, fixed, one(T)) end - function _eval_jac_wrapper!( cb::DenseCallback{T}, x::AbstractVector, @@ -622,7 +698,6 @@ function _treat_fixed_variable_jac_dense!(fixed_handler::MakeParameter, cb::Dens jac[:,fixed_handler.fixed] .= zero(T) end - function _eval_lag_hess_wrapper!( cb::DenseCallback{T}, x::AbstractVector, @@ -646,8 +721,6 @@ function _treat_fixed_variable_lag_hess_dense!(fixed_handler::MakeParameter, cb: _set_diag!(hess, fixed, one(T)) end - - function update_z!(cb, zl, zu, jacl) _update_z!(cb.fixed_handler, zl, zu, jacl, get_minimize(cb.nlp) ? 1 : -1) end diff --git a/test/kkt_test.jl b/test/kkt_test.jl index 46cc53ae..ad72da34 100644 --- a/test/kkt_test.jl +++ b/test/kkt_test.jl @@ -38,16 +38,10 @@ end cnt = MadNLP.MadNLPCounters(; start_time=time()) nlp = MadNLPTests.HS15Model() - ind_cons = MadNLP.get_index_constraints( - nlp, - options.fixed_variable_treatment, - options.equality_treatment, - ) + ind_cons = MadNLP.get_index_constraints(nlp) cb = MadNLP.create_callback( - Callback, - nlp, - options, + Callback, nlp, ) kkt = MadNLP.create_kkt_system(