diff --git a/Project.toml b/Project.toml index 01a1c35..8c2cee7 100644 --- a/Project.toml +++ b/Project.toml @@ -10,7 +10,7 @@ LinearOperators = "5c8ed15e-5a4c-59e4-a42b-c7e8811fb125" PartitionedStructures = "a6683cb1-4e99-4d03-b5b5-b79ed0acabcf" [compat] -Krylov = "0.9.0" +Krylov = "=0.9.0" LinearOperators = "2.4.0" PartitionedStructures = "0.1.4, 0.1.5" julia = "^1.6.0" diff --git a/docs/Project.toml b/docs/Project.toml index dc20bd1..089ea9e 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -7,3 +7,7 @@ PartitionedVectors = "9d5e22db-ddc8-46c9-8309-91b035e7f733" [compat] Documenter = "~0.27" +Krylov = "=0.9.0" +LinearOperators = "2.4.0" +PartitionedStructures = "0.1.4, 0.1.5" +julia = "^1.6.0" \ No newline at end of file diff --git a/docs/src/tutorial.md b/docs/src/tutorial.md index bfda06b..4f50a01 100644 --- a/docs/src/tutorial.md +++ b/docs/src/tutorial.md @@ -1,6 +1,6 @@ # PartitionedVectors.jl Tutorial -A `PartitionedVector <: DenseVector <: AbstractVector` wraps a `[PartitionedStructures](https://github.com/JuliaSmoothOptimizers/PartitionedStructures.jl).Elemental_pv`, to make [JuliaSmoothOptimizers](https://github.com/JuliaSmoothOptimizers) modules able to exploit the partially separable structure. +A `PartitionedVector <: DenseVector <: AbstractVector` wraps a [`PartitionedStructures`](https://github.com/JuliaSmoothOptimizers/PartitionedStructures.jl)`.Elemental_pv`, to make [JuliaSmoothOptimizers](https://github.com/JuliaSmoothOptimizers) modules able to exploit the partially separable structure. For now, `PartitionedVector` is the keystone to define [PartiallySeparableNLPModel](https://github.com/JuliaSmoothOptimizers/PartiallySeparableNLPModels.jl)s, and then to consequently: - replace `Vector` in [JSOSolvers.jl](https://github.com/JuliaSmoothOptimizers/JSOSolvers.jl); - replace `Vector` in [KrylovSolvers.jl](https://github.com/JuliaSmoothOptimizers/Krylov.jl); @@ -35,8 +35,8 @@ In optimization methods, you use it to store $\nabla f_i (U_i x)$, $y_i = \nabla ```math \nabla^2 f(x_k) s \approx B_k s = (\sum_{i=1}^N U_i^\top B_{k,i} U_i ) s = \sum_{i=1}^N U_i^\top (B_{k,i} U_i s) . ``` -- usage 2: represent simultaneously a vector $x \in \mathbb{R}^n$ and the application of every $U_i$ on to $x$: $U_i x, \forall i$. -By construction, the elements parametrized the same variables (for exemple `U1` and `U3` are parametrized by the third variable) share the same values. +- usage 2: represent simultaneously a vector $x \in \mathbb{R}^n$ and the application of every $U_i$ onto $x$: $U_i x, \forall i$. +By construction, the elements parametrized by the same variables (for exemple `U1` and `U3` are parametrized by the third variable) share the same values. In optimization methods, it allows to store the current point $x_k$ or step $s_k$, which always comes in handy to evaluate $f_i(U_i x), \nabla f_i(U_i x)$ or $B_{k,i} U_i s$. Any methods exploiting partially separable concepts will have to manipulate both usages at the same time, in particular the solvers from JSOSolvers.jl and Krylov.jl. @@ -66,15 +66,15 @@ You can set a `PartitionedVector` of usage 2 from a `Vector` with ```@example PV set!(pv_vec, rand(length(pv_vec))) ``` -Warning: `set!()` applied on `PartitionedVector` of usage 1 doesn't have sense, and produces an error. +**Warning**: the application `set!()` on `PartitionedVector` of usage 1 doesn't have sense, and produces an error. PartitionedVectors.jl specify several methods from various modules. For operations that are not in place, the result will take usage 1. -Warning: you have to be careful when you mix both usages in a single operation, because it could have not any sense. -Keep in mind what result of `Vector(pv)` do you want. +**Warning**: you have to be careful when you mix both usages in a single operation, because the result may have no meaning. +Keep in mind the result you expect for `Vector(pv)`. -Base.jl: +**Base.jl**: - elementary operations `+, -, *, ==` for PartitionedVectors. ```@example PV pv + pv == 2 * pv @@ -116,7 +116,7 @@ Vector(pv) == Vector(pv_vec) pvsimilar = similar(pv) ``` -LinearAlgebra: both `dot, norm` rely on `build!` before applying `dot, norm` on the resulting `Vector` +**LinearAlgebra**: both `dot` and `norm` rely on `build!(::PartitionedVector)` before applying `dot` or `norm` to the resulting `Vector` ```@example PV using LinearAlgebra dot(pv,pv) ≈ norm(pv)^2 @@ -133,7 +133,7 @@ lo * pv Note: `Matrix(lo)` will produce an error, since the default implementation assumes a complete `Vector`-like behaviour. - dedicated `CGSolver` from Krylov.jl to solve a partitioned linear system (from a partitioned `LinearOperator`). ```@example PV -using Krylov +using Krylov # 0.9.0 solver = Krylov.CgSolver(pv) pv_gradient = similar(pv) diff --git a/src/base.jl b/src/base.jl index 6592501..9534db1 100644 --- a/src/base.jl +++ b/src/base.jl @@ -26,7 +26,7 @@ getindex(pv::PartitionedVector, inds...) = PS.get_eev_set(pv.epv)[inds...] setindex!(pv::PartitionedVector{T}, val::T, index::Int) where T<:Number setindex!(pv::PartitionedVector{T}, val::Vector{T}, index::Int) where T<:Number -Set `pv[index]` (e.g. the `index`-th element vector) to `val`. +Set `pv[index]` (i.e. the `index`-th element vector) to `val`. """ function setindex!(pv::PartitionedVector, eev::Elemental_elt_vec, index::Int) get_eev_value(pv.epv, index) .= PS.get_vec(eev) diff --git a/src/struct.jl b/src/struct.jl index 58577ad..cd193b0 100644 --- a/src/struct.jl +++ b/src/struct.jl @@ -9,7 +9,7 @@ abstract type AbstractPartitionedVector{T} <: DenseVector{T} end # for Krylov """ PartitionedVector{T} <: AbstractPartitionedVector{T} -Wrap `PartitionedStructures.Elemental_pv` to behave almost as an `AbstractVector`. +Wrap `PartitionedStructures.Elemental_pv` to behave as an `AbstractVector`. """ mutable struct PartitionedVector{T} <: AbstractPartitionedVector{T} epv::Elemental_pv{T}