Skip to content

Commit 20a5ed0

Browse files
authored
Merge pull request #212 from JuliaControl/sparse_conv_mat
Changed: store conversion matrices as `SparseMatrixCSC`
2 parents 537591d + 760ecbd commit 20a5ed0

File tree

9 files changed

+44
-32
lines changed

9 files changed

+44
-32
lines changed

Project.toml

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
name = "ModelPredictiveControl"
22
uuid = "61f9bdb8-6ae4-484a-811f-bbf86720c31c"
33
authors = ["Francis Gagnon"]
4-
version = "1.8.0"
4+
version = "1.8.1"
55

66
[deps]
77
ControlSystemsBase = "aaaaaaaa-a6ca-5380-bf3e-84a91bcd477e"
@@ -16,6 +16,7 @@ PrecompileTools = "aea7be01-6a6a-4083-8856-8a6e6704d82a"
1616
ProgressLogging = "33c8b6b6-d38a-422a-b730-caa89a2f386c"
1717
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
1818
RecipesBase = "3cdcf5f2-1ef4-517c-9805-6587b60abb01"
19+
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
1920
SparseConnectivityTracer = "9f842d2f-2579-4b1d-911e-f412cf18a3f5"
2021
SparseMatrixColorings = "0a514795-09f3-496d-8182-132a7b665d35"
2122

@@ -36,6 +37,7 @@ PrecompileTools = "1"
3637
ProgressLogging = "0.1"
3738
Random = "1.10"
3839
RecipesBase = "1"
40+
SparseArrays = "1.10"
3941
SparseConnectivityTracer = "0.6.13"
4042
SparseMatrixColorings = "0.4.14"
4143
TestItemRunner = "1"

src/ModelPredictiveControl.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
module ModelPredictiveControl
22

33
using PrecompileTools
4-
using LinearAlgebra
4+
using LinearAlgebra, SparseArrays
55
using Random: randn
66

77
using RecipesBase

src/controller/construct.jl

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -122,10 +122,10 @@ struct ControllerConstraint{NT<:Real, GCfunc<:Union{Nothing, Function}}
122122
x̂0min ::Vector{NT}
123123
x̂0max ::Vector{NT}
124124
# A matrices for the linear inequality constraints:
125-
A_Umin ::Matrix{NT}
126-
A_Umax ::Matrix{NT}
127-
A_ΔŨmin ::Matrix{NT}
128-
A_ΔŨmax ::Matrix{NT}
125+
A_Umin ::SparseMatrixCSC{NT, Int}
126+
A_Umax ::SparseMatrixCSC{NT, Int}
127+
A_ΔŨmin ::SparseMatrixCSC{NT, Int}
128+
A_ΔŨmax ::SparseMatrixCSC{NT, Int}
129129
A_Ymin ::Matrix{NT}
130130
A_Ymax ::Matrix{NT}
131131
A_x̂min ::Matrix{NT}
@@ -675,7 +675,7 @@ constraints:
675675
in which ``\mathbf{U_{min}}`` and ``\mathbf{U_{max}}`` vectors respectively contains
676676
``\mathbf{u_{min}}`` and ``\mathbf{u_{max}}`` repeated ``H_p`` times.
677677
"""
678-
function relaxU(Pu::Matrix{NT}, C_umin, C_umax, nϵ) where NT<:Real
678+
function relaxU(Pu::AbstractMatrix{NT}, C_umin, C_umax, nϵ) where NT<:Real
679679
if== 1 # Z̃ = [Z; ϵ]
680680
# ϵ impacts Z → U conversion for constraint calculations:
681681
A_Umin, A_Umax = -[Pu C_umin], [Pu -C_umax]
@@ -717,7 +717,7 @@ bound, which is more precise than a linear inequality constraint. However, it is
717717
convenient to treat it as a linear inequality constraint since the optimizer `OSQP.jl` does
718718
not support pure bounds on the decision variables.
719719
"""
720-
function relaxΔU(PΔu::Matrix{NT}, C_Δumin, C_Δumax, ΔUmin, ΔUmax, nϵ) where NT<:Real
720+
function relaxΔU(PΔu::AbstractMatrix{NT}, C_Δumin, C_Δumax, ΔUmin, ΔUmax, nϵ) where NT<:Real
721721
nZ = size(PΔu, 2)
722722
if== 1 # Z̃ = [Z; ϵ]
723723
ΔŨmin, ΔŨmax = [ΔUmin; NT[0.0]], [ΔUmax; NT[Inf]] # 0 ≤ ϵ ≤ ∞
@@ -754,7 +754,7 @@ Denoting the decision variables augmented with the slack variable
754754
in which ``\mathbf{Y_{min}, Y_{max}}`` and ``\mathbf{Y_{op}}`` vectors respectively contains
755755
``\mathbf{y_{min}, y_{max}}`` and ``\mathbf{y_{op}}`` repeated ``H_p`` times.
756756
"""
757-
function relaxŶ(E::Matrix{NT}, C_ymin, C_ymax, nϵ) where NT<:Real
757+
function relaxŶ(E::AbstractMatrix{NT}, C_ymin, C_ymax, nϵ) where NT<:Real
758758
if== 1 # Z̃ = [Z; ϵ]
759759
if iszero(size(E, 1))
760760
# model is not a LinModel, thus Ŷ constraints are not linear:
@@ -792,7 +792,7 @@ the inequality constraints:
792792
\end{bmatrix}
793793
```
794794
"""
795-
function relaxterminal(ex̂::Matrix{NT}, c_x̂min, c_x̂max, nϵ) where {NT<:Real}
795+
function relaxterminal(ex̂::AbstractMatrix{NT}, c_x̂min, c_x̂max, nϵ) where {NT<:Real}
796796
if== 1 # Z̃ = [Z; ϵ]
797797
if iszero(size(ex̂, 1))
798798
# model is not a LinModel and transcription is a SingleShooting, thus terminal
@@ -821,7 +821,7 @@ It returns the ``\mathbf{Ẽŝ}`` matrix that appears in the defect equation
821821
\mathbf{A_ŝ Z̃} = - \mathbf{F_ŝ}
822822
```
823823
"""
824-
function augmentdefect(Eŝ::Matrix{NT}, nϵ) where NT<:Real
824+
function augmentdefect(Eŝ::AbstractMatrix{NT}, nϵ) where NT<:Real
825825
if== 1 # Z̃ = [Z; ϵ]
826826
Ẽŝ = [Eŝ zeros(NT, size(Eŝ, 1), 1)]
827827
else # Z̃ = Z (only hard constraints)

src/controller/explicitmpc.jl

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -15,9 +15,9 @@ struct ExplicitMPC{
1515
R̂u::Vector{NT}
1616
R̂y::Vector{NT}
1717
lastu0::Vector{NT}
18-
P̃Δu::Matrix{NT}
19-
P̃u ::Matrix{NT}
20-
Tu ::Matrix{NT}
18+
P̃Δu::SparseMatrixCSC{NT, Int}
19+
P̃u ::SparseMatrixCSC{NT, Int}
20+
Tu ::SparseMatrixCSC{NT, Int}
2121
Tu_lastu0::Vector{NT}
2222
::Matrix{NT}
2323
F::Vector{NT}

src/controller/linmpc.jl

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -24,9 +24,9 @@ struct LinMPC{
2424
R̂u::Vector{NT}
2525
R̂y::Vector{NT}
2626
lastu0::Vector{NT}
27-
P̃Δu::Matrix{NT}
28-
P̃u ::Matrix{NT}
29-
Tu ::Matrix{NT}
27+
P̃Δu::SparseMatrixCSC{NT, Int}
28+
P̃u ::SparseMatrixCSC{NT, Int}
29+
Tu ::SparseMatrixCSC{NT, Int}
3030
Tu_lastu0::Vector{NT}
3131
::Matrix{NT}
3232
F::Vector{NT}

src/controller/nonlinmpc.jl

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -40,9 +40,9 @@ struct NonLinMPC{
4040
R̂u::Vector{NT}
4141
R̂y::Vector{NT}
4242
lastu0::Vector{NT}
43-
P̃Δu::Matrix{NT}
44-
P̃u ::Matrix{NT}
45-
Tu ::Matrix{NT}
43+
P̃Δu::SparseMatrixCSC{NT, Int}
44+
P̃u ::SparseMatrixCSC{NT, Int}
45+
Tu ::SparseMatrixCSC{NT, Int}
4646
Tu_lastu0::Vector{NT}
4747
::Matrix{NT}
4848
F::Vector{NT}
@@ -550,7 +550,7 @@ This method is really intricate and I'm not proud of it. That's because of 3 ele
550550
and as efficient as possible. All the function outputs and derivatives are cached and
551551
updated in-place if required to use the efficient [`value_and_jacobian!`](@extref DifferentiationInterface DifferentiationInterface.value_and_jacobian!).
552552
- The `JuMP` NLP syntax forces splatting for the decision variable, which implies use
553-
of `Vararg{T,N}` (see the [performance tip][@extref Julia Be-aware-of-when-Julia-avoids-specializing]
553+
of `Vararg{T,N}` (see the (performance tip)[@extref Julia Be-aware-of-when-Julia-avoids-specializing]
554554
) and memoization to avoid redundant computations. This is already complex, but it's even
555555
worse knowing that most automatic differentiation tools do not support splatting.
556556
- The signature of gradient and hessian functions is not the same for univariate (`nZ̃ == 1`)

src/controller/transcription.jl

Lines changed: 14 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -81,21 +81,22 @@ in which ``\mathbf{P_{Δu}}`` is defined in the Extended Help section.
8181
- ``\mathbf{P_{Δu}} = \mathbf{I}`` if `transcription` is a [`SingleShooting`](@ref)
8282
- ``\mathbf{P_{Δu}} = [\begin{smallmatrix}\mathbf{I} & \mathbf{0} \end{smallmatrix}]``
8383
if `transcription` is a [`MultipleShooting`](@ref)
84+
The matrix is store as as `SparseMatrixCSC` to support both cases efficiently.
8485
"""
8586
function init_ZtoΔU end
8687

8788
function init_ZtoΔU(
8889
estim::StateEstimator{NT}, ::SingleShooting, _ , Hc
8990
) where {NT<:Real}
90-
PΔu = Matrix{NT}(I, estim.model.nu*Hc, estim.model.nu*Hc)
91+
PΔu = sparse(Matrix{NT}(I, estim.model.nu*Hc, estim.model.nu*Hc))
9192
return PΔu
9293
end
9394

9495
function init_ZtoΔU(
9596
estim::StateEstimator{NT}, ::MultipleShooting, Hp, Hc
9697
) where {NT<:Real}
97-
I_nu_Hc = Matrix{NT}(I, estim.model.nu*Hc, estim.model.nu*Hc)
98-
PΔu = [I_nu_Hc zeros(NT, estim.model.nu*Hc, estim.nx̂*Hp)]
98+
I_nu_Hc = sparse(Matrix{NT}(I, estim.model.nu*Hc, estim.model.nu*Hc))
99+
PΔu = [I_nu_Hc spzeros(NT, estim.model.nu*Hc, estim.nx̂*Hp)]
99100
return PΔu
100101
end
101102

@@ -144,30 +145,33 @@ The ``\mathbf{P_u}`` and ``\mathbf{T_u}`` matrices are defined in the Extended H
144145
- ``\mathbf{P_u} = \mathbf{P_u^†}`` if `transcription` is a [`SingleShooting`](@ref)
145146
- ``\mathbf{P_u} = [\begin{smallmatrix}\mathbf{P_u^†} & \mathbf{0} \end{smallmatrix}]``
146147
if `transcription` is a [`MultipleShooting`](@ref)
148+
The conversion matrices are stored as `SparseMatrixCSC` since it was benchmarked that it
149+
is generally more performant than normal dense matrices, even for small `nu`, `Hp` and
150+
`Hc` values. Using `Bool` element type and `BitMatrix` is also slower.
147151
"""
148152
function init_ZtoU(
149153
estim::StateEstimator{NT}, transcription::TranscriptionMethod, Hp, Hc, nb
150154
) where {NT<:Real}
151155
nu = estim.model.nu
152-
# Pu and Tu are `Matrix{NT}`, conversion is faster than `Matrix{Bool}` or `BitMatrix`
153-
I_nu = Matrix{NT}(I, nu, nu)
156+
I_nu = sparse(Matrix{NT}(I, nu, nu))
154157
PuDagger = Matrix{NT}(undef, nu*Hp, nu*Hc)
155158
for i=1:Hc
156159
ni = nb[i]
157160
Q_ni = repeat(I_nu, ni, 1)
158161
iRows = (1:nu*ni) .+ @views nu*sum(nb[1:i-1])
159-
PuDagger[iRows, :] = [repeat(Q_ni, 1, i) zeros(nu*ni, nu*(Hc-i))]
162+
PuDagger[iRows, :] = [repeat(Q_ni, 1, i) spzeros(nu*ni, nu*(Hc-i))]
160163
end
164+
PuDagger = sparse(PuDagger)
161165
Pu = init_PUmat(estim, transcription, Hp, Hc, PuDagger)
162166
Tu = repeat(I_nu, Hp)
163167
return Pu, Tu
164168
end
165169

166-
167-
168170
init_PUmat( _ , ::SingleShooting, _ , _ , PuDagger) = PuDagger
169-
function init_PUmat(estim, ::MultipleShooting, Hp, _ , PuDagger)
170-
return [PuDagger zeros(eltype(PuDagger), estim.model.nu*Hp, estim.nx̂*Hp)]
171+
function init_PUmat(
172+
estim, ::MultipleShooting, Hp, _ , PuDagger::AbstractMatrix{NT}
173+
) where NT<:Real
174+
return [PuDagger spzeros(NT, estim.model.nu*Hp, estim.nx̂*Hp)]
171175
end
172176

173177
@doc raw"""

src/estimator/kalman.jl

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -158,6 +158,12 @@ SteadyKalmanFilter estimator with a sample time Ts = 0.5 s, LinModel and:
158158
!!! tip
159159
Increasing `σQint_u` and `σQint_ym` values increases the integral action "gain".
160160
161+
Custom stochastic model for the unmeasured disturbances (different than integrated white
162+
gaussian noise) can be specified by constructing a [`LinModel`](@ref) object with the
163+
augmented state-space matrices directly, and by setting `nint_u=0` and `nint_ym=0`. See
164+
[Disturbance-gallery](@extref LowLevelParticleFilters) for examples of other
165+
disturbance models.
166+
161167
The constructor pre-compute the steady-state Kalman gain `K̂` with the [`kalman`](@extref ControlSystemsBase.kalman)
162168
function. It can sometimes fail, for example when `model` matrices are ill-conditioned.
163169
In such a case, you can try the alternative time-varying [`KalmanFilter`](@ref).

src/estimator/manual.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -130,7 +130,7 @@ ManualEstimator estimator with a sample time Ts = 0.5 s, LinModel and:
130130
A custom stochastic model for the unmeasured disturbances (other than integrated white
131131
noise) can be specified by constructing a [`SimModel`](@ref) object with the augmented
132132
state-space matrices/functions directly, and by setting `nint_u=0` and `nint_ym=0`. See
133-
[`Disturbance-gallery`](@extref LowLevelParticleFilters) for examples of other
133+
[Disturbance-gallery](@extref LowLevelParticleFilters) for examples of other
134134
disturbance models.
135135
"""
136136
function ManualEstimator(

0 commit comments

Comments
 (0)