Skip to content

Commit

Permalink
Revisit fixed variables treatment
Browse files Browse the repository at this point in the history
* Change the behavior of MakeParameter, depending on the callback:
    - `SparseCallback`: remove the fixed variables and reduce problem's dimension in MadNLP
    - `DenseCallback`: keep the fixed variables frozen (as done previously)
* Add a new `fixed_handler` if the problem has no fixed variable: `NoFixedVariable`.
* Ensure we always interact to the nonlinear program `nlp` through the
  callback wrapper `cb` to avoid side effect.
* Remove the function `get_index_constraint` and stores indexes directly
  in the callback wrapper `cb`.
* Fix the computation of the bound's multipliers for fixed variables.
  • Loading branch information
frapac committed Jul 25, 2024
1 parent ff30db6 commit 39e1424
Show file tree
Hide file tree
Showing 17 changed files with 850 additions and 526 deletions.
2 changes: 1 addition & 1 deletion ext/MadNLPMOI/MadNLPMOI.jl
Original file line number Diff line number Diff line change
Expand Up @@ -1142,7 +1142,7 @@ function MOI.get(
MOI.check_result_index_bounds(model, attr)
MOI.throw_if_not_valid(model, ci)
rc = model.result.multipliers_L[ci.value] - model.result.multipliers_U[ci.value]
return rc
return -rc
end

function MOI.get(
Expand Down
52 changes: 32 additions & 20 deletions lib/MadNLPTests/src/wrapper.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,38 +2,38 @@ abstract type AbstractWrapperModel{T,VT} <: NLPModels.AbstractNLPModel{T,VT} end

struct DenseWrapperModel{T,VT,T2,VT2,MT2, I <: NLPModels.AbstractNLPModel{T2,VT2}} <: AbstractWrapperModel{T,VT}
inner::I

x::VT2
y::VT2

con::VT2
grad::VT2
jac::MT2
hess::MT2

meta::NLPModels.NLPModelMeta{T, VT}
counters::NLPModels.Counters
counters::NLPModels.Counters
end


struct SparseWrapperModel{T,VT,T2,VI2,VT2,I <: NLPModels.AbstractNLPModel{T2,VT2}} <: AbstractWrapperModel{T,VT}
inner::I

jrows::VI2
jcols::VI2
hrows::VI2
hcols::VI2

x::VT2
y::VT2

con::VT2
grad::VT2
jac::VT2
hess::VT2

meta::NLPModels.NLPModelMeta{T, VT}
counters::NLPModels.Counters
counters::NLPModels.Counters
end


Expand Down Expand Up @@ -121,10 +121,10 @@ function NLPModels.cons!(
g::V
) where {M <: AbstractWrapperModel, V <: AbstractVector}

copyto!(m.x, x)
copyto!(m.x, x)
NLPModels.cons!(m.inner, m.x, m.con)
copyto!(g, m.con)
return
return
end
function NLPModels.grad!(
m::M,
Expand All @@ -143,7 +143,7 @@ function NLPModels.jac_structure!(
rows::V,
cols::V
) where {M <: SparseWrapperModel, V <: AbstractVector}

NLPModels.jac_structure!(m.inner, m.jrows, m.jcols)
copyto!(rows, m.jrows)
copyto!(cols, m.jcols)
Expand All @@ -159,13 +159,26 @@ function NLPModels.hess_structure!(
copyto!(rows, m.hrows)
copyto!(cols, m.hcols)
end
function NLPModels.jtprod!(
m::SparseWrapperModel,
x::AbstractVector,
v::AbstractVector,
jtv::AbstractVector,
)
copyto!(m.x, x)
copyto!(m.grad, jtv)
copyto!(m.con, v)
NLPModels.jtprod!(m.inner, m.x, m.con, m.grad)
copyto!(jtv, m.grad)
return
end
function NLPModels.jac_coord!(
m::M,
x::V,
jac::V
) where {M <: SparseWrapperModel, V <: AbstractVector}
m::SparseWrapperModel,
x::AbstractVector,
jac::AbstractVector,
)

copyto!(m.x, x)
copyto!(m.x, x)
NLPModels.jac_coord!(m.inner, m.x, m.jac)
copyto!(jac, m.jac)
return
Expand All @@ -185,19 +198,18 @@ function NLPModels.hess_coord!(
return
end



function MadNLP.jac_dense!(
m::Model,
x::V,
jac::M
) where {Model <: DenseWrapperModel, V <: AbstractVector, M <: AbstractMatrix}

copyto!(m.x, x)
copyto!(m.x, x)
MadNLP.jac_dense!(m.inner, m.x, m.jac)
copyto!(jac, m.jac)
return
end

function MadNLP.hess_dense!(
m::Model,
x::V,
Expand Down
42 changes: 17 additions & 25 deletions src/IPM/IPM.jl
Original file line number Diff line number Diff line change
Expand Up @@ -134,28 +134,20 @@ function MadNLPSolver(nlp::AbstractNLPModel{T,VT}; kwargs...) where {T, VT}
set_blas_num_threads(ipm_opt.blas_num_threads; permanent=true)
@trace(logger,"Initializing variables.")

ind_cons = get_index_constraints(
get_lvar(nlp), get_uvar(nlp),
get_lcon(nlp), get_ucon(nlp);
fixed_variable_treatment=ipm_opt.fixed_variable_treatment,
equality_treatment=ipm_opt.equality_treatment
)

ind_lb = ind_cons.ind_lb
ind_ub = ind_cons.ind_ub
ind_lb = cb.ind_lb
ind_ub = cb.ind_ub

ns = length(ind_cons.ind_ineq)
nx = get_nvar(nlp)
ns = length(cb.ind_ineq)
nx = n_variables(cb)
n = nx+ns
m = get_ncon(nlp)
m = n_constraints(cb)
nlb = length(ind_lb)
nub = length(ind_ub)

@trace(logger,"Initializing KKT system.")
kkt = create_kkt_system(
ipm_opt.kkt_system,
cb,
ind_cons,
ipm_opt.linear_solver;
hessian_approximation=ipm_opt.hessian_approximation,
opt_linear_solver=options.linear_solver,
Expand Down Expand Up @@ -185,17 +177,17 @@ function MadNLPSolver(nlp::AbstractNLPModel{T,VT}; kwargs...) where {T, VT}
c = VT(undef, m)
rhs = VT(undef, m)

c_slk = view(c,ind_cons.ind_ineq)
x_lr = view(full(x), ind_cons.ind_lb)
x_ur = view(full(x), ind_cons.ind_ub)
xl_r = view(full(xl), ind_cons.ind_lb)
xu_r = view(full(xu), ind_cons.ind_ub)
zl_r = view(full(zl), ind_cons.ind_lb)
zu_r = view(full(zu), ind_cons.ind_ub)
x_trial_lr = view(full(x_trial), ind_cons.ind_lb)
x_trial_ur = view(full(x_trial), ind_cons.ind_ub)
dx_lr = view(d.xp, ind_cons.ind_lb) # TODO
dx_ur = view(d.xp, ind_cons.ind_ub) # TODO
c_slk = view(c,cb.ind_ineq)
x_lr = view(full(x), cb.ind_lb)
x_ur = view(full(x), cb.ind_ub)
xl_r = view(full(xl), cb.ind_lb)
xu_r = view(full(xu), cb.ind_ub)
zl_r = view(full(zl), cb.ind_lb)
zu_r = view(full(zu), cb.ind_ub)
x_trial_lr = view(full(x_trial), cb.ind_lb)
x_trial_ur = view(full(x_trial), cb.ind_ub)
dx_lr = view(d.xp, cb.ind_lb) # TODO
dx_ur = view(d.xp, cb.ind_ub) # TODO

inertia_correction_method = if ipm_opt.inertia_correction_method == InertiaAuto
is_inertia(kkt.linear_solver)::Bool ? InertiaBased : InertiaFree
Expand All @@ -221,7 +213,7 @@ function MadNLPSolver(nlp::AbstractNLPModel{T,VT}; kwargs...) where {T, VT}
d, p,
_w1, _w2, _w3, _w4,
x_trial, c_trial, zero(T), c_slk, rhs,
ind_cons.ind_ineq, ind_cons.ind_fixed, ind_cons.ind_llb, ind_cons.ind_uub,
cb.ind_ineq, cb.ind_fixed, cb.ind_llb, cb.ind_uub,
x_lr, x_ur, xl_r, xu_r, zl_r, zu_r, dx_lr, dx_ur, x_trial_lr, x_trial_ur,
iterator,
zero(T), zero(T), zero(T), zero(T), zero(T), zero(T), zero(T), zero(T), zero(T),
Expand Down
13 changes: 0 additions & 13 deletions src/IPM/kernels.jl
Original file line number Diff line number Diff line change
Expand Up @@ -814,19 +814,6 @@ function get_ftype(filter,theta,theta_trial,varphi,varphi_trial,switching_condit
return " "
end

# fixed variable treatment ----------------------------------------------------
function _get_fixed_variable_index(
mat::SparseMatrixCSC{Tv,Ti1}, ind_fixed::Vector{Ti2}
) where {Tv,Ti1,Ti2}
fixed_aug_index = Int[]
for i in ind_fixed
append!(fixed_aug_index,append!(collect(mat.colptr[i]+1:mat.colptr[i+1]-1)))
end
append!(fixed_aug_index,setdiff!(Base._findin(mat.rowval,ind_fixed),mat.colptr))

return fixed_aug_index
end

function dual_inf_perturbation!(px, ind_llb, ind_uub, mu, kappa_d)
px[ind_llb] .-= mu*kappa_d
px[ind_uub] .+= mu*kappa_d
Expand Down
8 changes: 7 additions & 1 deletion src/IPM/solver.jl
Original file line number Diff line number Diff line change
Expand Up @@ -152,6 +152,12 @@ function solve!(
set_options!(solver.opt, kwargs)
end

# If the problem has no free variable, do nothing
if solver.n == 0
update!(stats, solver)
return stats
end

try
if solver.status == INITIAL
@notice(solver.logger,"This is $(introduce()), running with $(introduce(solver.kkt.linear_solver))\n")
Expand Down Expand Up @@ -216,7 +222,7 @@ function regular!(solver::AbstractMadNLPSolver{T}) where T
if (solver.cnt.k!=0 && !solver.opt.jacobian_constant)
eval_jac_wrapper!(solver, solver.kkt, solver.x)
end

jtprod!(solver.jacl, solver.kkt, solver.y)
sd = get_sd(solver.y,solver.zl_r,solver.zu_r,T(solver.opt.s_max))
sc = get_sc(solver.zl_r,solver.zu_r,T(solver.opt.s_max))
Expand Down
52 changes: 32 additions & 20 deletions src/IPM/utils.jl
Original file line number Diff line number Diff line change
Expand Up @@ -20,40 +20,52 @@ mutable struct MadNLPExecutionStats{T, VT} <: AbstractExecutionStats
counters::MadNLPCounters
end

MadNLPExecutionStats(solver::MadNLPSolver) =MadNLPExecutionStats(
solver.opt,
solver.status,
primal(solver.x)[1:get_nvar(solver.nlp)],
solver.obj_val / solver.cb.obj_scale[],
solver.c ./ solver.cb.con_scale,
solver.inf_du,
solver.inf_pr,
copy(solver.y),
primal(solver.zl)[1:get_nvar(solver.nlp)],
primal(solver.zu)[1:get_nvar(solver.nlp)],
0,
solver.cnt,
)
function MadNLPExecutionStats(solver::MadNLPSolver{T, VT}) where {T, VT}
n, m = get_nvar(solver.nlp), get_ncon(solver.nlp)
x = similar(VT, n)
zl = similar(VT, n)
zu = similar(VT, n)
c = similar(VT, m)
unpack_cons!(c, solver.cb, solver.c)
unpack_x!(x, solver.cb, variable(solver.x))
unpack_z!(zl, solver.cb, variable(solver.zl))
unpack_z!(zu, solver.cb, variable(solver.zu))
return MadNLPExecutionStats(
solver.opt,
solver.status,
x,
unpack_obj(solver.cb, solver.obj_val),
c,
solver.inf_du,
solver.inf_pr,
copy(solver.y),
zl,
zu,
0,
solver.cnt,
)
end

function update!(stats::MadNLPExecutionStats, solver::MadNLPSolver)
stats.status = solver.status
stats.solution .= @view(primal(solver.x)[1:get_nvar(solver.nlp)])
unpack_x!(stats.solution, solver.cb, variable(solver.x))
unpack_z!(stats.multipliers_L, solver.cb, variable(solver.zl))
unpack_z!(stats.multipliers_U, solver.cb, variable(solver.zu))
stats.multipliers .= solver.y
stats.multipliers_L .= @view(primal(solver.zl)[1:get_nvar(solver.nlp)])
stats.multipliers_U .= @view(primal(solver.zu)[1:get_nvar(solver.nlp)])
# stats.solution .= min.(
# max.(
# @view(primal(solver.x)[1:get_nvar(solver.nlp)]),
# get_lvar(solver.nlp)
# ),
# get_uvar(solver.nlp)
# )
stats.objective = solver.obj_val / solver.cb.obj_scale[]
stats.constraints .= solver.c ./ solver.cb.con_scale .+ solver.rhs
stats.objective = unpack_obj(solver.cb, solver.obj_val)
unpack_cons!(stats.constraints, solver.cb, solver.c)
stats.constraints .+= solver.rhs
stats.constraints[solver.ind_ineq] .+= slack(solver.x)
stats.dual_feas = solver.inf_du
stats.primal_feas = solver.inf_pr
update_z!(solver.cb, stats.multipliers_L, stats.multipliers_U, solver.jacl)
update_z!(solver.cb, stats.solution, stats.multipliers, stats.multipliers_L, stats.multipliers_U, solver.jacl)
stats.iter = solver.cnt.k
return stats
end
Expand Down
13 changes: 6 additions & 7 deletions src/KKT/Dense/augmented.jl
Original file line number Diff line number Diff line change
Expand Up @@ -42,21 +42,20 @@ end
function create_kkt_system(
::Type{DenseKKTSystem},
cb::AbstractCallback{T,VT},
ind_cons,
linear_solver::Type;
opt_linear_solver=default_options(linear_solver),
hessian_approximation=ExactHessian,
) where {T, VT}

ind_ineq = ind_cons.ind_ineq
ind_lb = ind_cons.ind_lb
ind_ub = ind_cons.ind_ub
ind_ineq = cb.ind_ineq
ind_lb = cb.ind_lb
ind_ub = cb.ind_ub

n = cb.nvar
m = cb.ncon
ns = length(ind_ineq)
nlb = length(ind_cons.ind_lb)
nub = length(ind_cons.ind_ub)
nlb = length(cb.ind_lb)
nub = length(cb.ind_ub)

hess = create_array(cb, n, n)
jac = create_array(cb, m, n)
Expand Down Expand Up @@ -87,7 +86,7 @@ function create_kkt_system(
hess, jac, quasi_newton,
reg, pr_diag, du_diag, l_diag, u_diag, l_lower, u_lower,
diag_hess, aug_com,
ind_ineq, ind_cons.ind_lb, ind_cons.ind_ub,
ind_ineq, cb.ind_lb, cb.ind_ub,
_linear_solver,
Dict{Symbol, Any}(),
)
Expand Down
Loading

0 comments on commit 39e1424

Please sign in to comment.