From d7f0fbc800d121daf2480ab4d4408a76c0977d77 Mon Sep 17 00:00:00 2001 From: franckgaga Date: Sun, 11 May 2025 11:27:24 -0400 Subject: [PATCH 1/8] =?UTF-8?q?added:=20efficient=20Z=CC=83=20to=20=CE=94U?= =?UTF-8?q?=CC=83=20conversion?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/controller/transcription.jl | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/controller/transcription.jl b/src/controller/transcription.jl index 7b82dff19..756cd6f38 100644 --- a/src/controller/transcription.jl +++ b/src/controller/transcription.jl @@ -1022,8 +1022,14 @@ function set_warmstart!(mpc::PredictiveController, transcription::MultipleShooti return Z̃s end -getΔŨ!(ΔŨ, mpc::PredictiveController, ::SingleShooting, Z̃) = (ΔŨ .= Z̃) # since mpc.P̃Δu = I -getΔŨ!(ΔŨ, mpc::PredictiveController, ::TranscriptionMethod, Z̃) = mul!(ΔŨ, mpc.P̃Δu, Z̃) +getΔŨ!(ΔŨ, mpc::PredictiveController, ::SingleShooting, Z̃) = (ΔŨ .= Z̃) +function getΔŨ!(ΔŨ, mpc::PredictiveController, ::TranscriptionMethod, Z̃) + # avoid explicit matrix multiplication with mpc.P̃Δu for performance: + nΔU = mpc.Hc*mpc.estim.model.nu + ΔŨ[1:nΔU] .= @views Z̃[1:nΔU] + mpc.nϵ == 1 && (ΔŨ[end] = Z̃[end]) + return ΔŨ +end getU0!(U0, mpc::PredictiveController, Z̃) = (mul!(U0, mpc.P̃u, Z̃) .+ mpc.Tu_lastu0) @doc raw""" From f7d729384d90f08bd50baf574d1f2eaaeae882a3 Mon Sep 17 00:00:00 2001 From: Guillaume Dalle <22795598+gdalle@users.noreply.github.com> Date: Thu, 1 May 2025 12:09:59 +0200 Subject: [PATCH 2/8] Make Hessian sparsity detection work with SCT (prototype) --- Project.toml | 2 ++ src/ModelPredictiveControl.jl | 3 +++ src/controller/construct.jl | 41 ++++++++++++++++++--------------- src/controller/execute.jl | 7 +++++- src/controller/nonlinmpc.jl | 27 +++++++++++----------- src/controller/transcription.jl | 9 ++++---- 6 files changed, 53 insertions(+), 36 deletions(-) diff --git a/Project.toml b/Project.toml index 3c72934f2..73b90731a 100644 --- a/Project.toml +++ b/Project.toml @@ -16,6 +16,7 @@ PrecompileTools = "aea7be01-6a6a-4083-8856-8a6e6704d82a" ProgressLogging = "33c8b6b6-d38a-422a-b730-caa89a2f386c" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" RecipesBase = "3cdcf5f2-1ef4-517c-9805-6587b60abb01" +SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" SparseConnectivityTracer = "9f842d2f-2579-4b1d-911e-f412cf18a3f5" SparseMatrixColorings = "0a514795-09f3-496d-8182-132a7b665d35" @@ -36,6 +37,7 @@ PrecompileTools = "1" ProgressLogging = "0.1" Random = "1.10" RecipesBase = "1" +SparseArrays = "1.11.0" SparseConnectivityTracer = "0.6.13" SparseMatrixColorings = "0.4.14" TestItemRunner = "1" diff --git a/src/ModelPredictiveControl.jl b/src/ModelPredictiveControl.jl index adefdfef2..da71c038f 100644 --- a/src/ModelPredictiveControl.jl +++ b/src/ModelPredictiveControl.jl @@ -3,6 +3,7 @@ module ModelPredictiveControl using PrecompileTools using LinearAlgebra using Random: randn +using SparseArrays using RecipesBase using ProgressLogging @@ -47,6 +48,7 @@ include("state_estim.jl") include("predictive_control.jl") include("plot_sim.jl") +#= @setup_workload begin # Putting some things in `@setup_workload` instead of `@compile_workload` can reduce the # size of the precompile file and potentially make loading faster. @@ -56,5 +58,6 @@ include("plot_sim.jl") include("precompile.jl") end end +=# end \ No newline at end of file diff --git a/src/controller/construct.jl b/src/controller/construct.jl index 16b7b745e..9f53adf87 100644 --- a/src/controller/construct.jl +++ b/src/controller/construct.jl @@ -1,11 +1,11 @@ -struct PredictiveControllerBuffer{NT<:Real} +struct PredictiveControllerBuffer{NT<:Real,M<:AbstractMatrix{NT}} u ::Vector{NT} Z̃ ::Vector{NT} D̂ ::Vector{NT} Ŷ ::Vector{NT} U ::Vector{NT} Ẽ ::Matrix{NT} - P̃u::Matrix{NT} + P̃u::M empty::Vector{NT} end @@ -29,14 +29,19 @@ function PredictiveControllerBuffer( Ẽ = Matrix{NT}(undef, ny*Hp, nZ̃) P̃u = Matrix{NT}(undef, nu*Hp, nZ̃) empty = Vector{NT}(undef, 0) - return PredictiveControllerBuffer{NT}(u, Z̃, D̂, Ŷ, U, Ẽ, P̃u, empty) + return PredictiveControllerBuffer{NT,typeof(P̃u)}(u, Z̃, D̂, Ŷ, U, Ẽ, P̃u, empty) end "Include all the objective function weights of [`PredictiveController`](@ref)" -struct ControllerWeights{NT<:Real} - M_Hp::Hermitian{NT, Matrix{NT}} - Ñ_Hc::Hermitian{NT, Matrix{NT}} - L_Hp::Hermitian{NT, Matrix{NT}} +struct ControllerWeights{ + NT<:Real, + H1<:Hermitian{NT, <:AbstractMatrix{NT}}, + H2<:Hermitian{NT, <:AbstractMatrix{NT}}, + H3<:Hermitian{NT, <:AbstractMatrix{NT}}, +} + M_Hp::H1 + Ñ_Hc::H2 + L_Hp::H3 E ::NT iszero_M_Hp::Vector{Bool} iszero_Ñ_Hc::Vector{Bool} @@ -46,15 +51,15 @@ struct ControllerWeights{NT<:Real} model, Hp, Hc, M_Hp, N_Hc, L_Hp, Cwt=Inf, Ewt=0 ) where NT<:Real validate_weights(model, Hp, Hc, M_Hp, N_Hc, L_Hp, Cwt, Ewt) - # convert `Diagonal` to normal `Matrix` if required: - M_Hp = Hermitian(convert(Matrix{NT}, M_Hp), :L) - N_Hc = Hermitian(convert(Matrix{NT}, N_Hc), :L) - L_Hp = Hermitian(convert(Matrix{NT}, L_Hp), :L) + M_Hp = Hermitian(convert.(NT, M_Hp), :L) + N_Hc = Hermitian(convert.(NT, N_Hc), :L) + L_Hp = Hermitian(convert.(NT, L_Hp), :L) nΔU = size(N_Hc, 1) C = Cwt if !isinf(Cwt) # ΔŨ = [ΔU; ϵ] (ϵ is the slack variable) - Ñ_Hc = Hermitian([N_Hc zeros(NT, nΔU, 1); zeros(NT, 1, nΔU) C], :L) + # Ñ_Hc = Hermitian([N_Hc zeros(NT, nΔU, 1); zeros(NT, 1, nΔU) C], :L) + Ñ_Hc = Hermitian(blockdiag(sparse(N_Hc), sparse(Diagonal([C]))), :L) else # ΔŨ = ΔU (only hard constraints) Ñ_Hc = N_Hc @@ -64,7 +69,7 @@ struct ControllerWeights{NT<:Real} iszero_Ñ_Hc = [iszero(Ñ_Hc)] iszero_L_Hp = [iszero(L_Hp)] iszero_E = iszero(E) - return new{NT}(M_Hp, Ñ_Hc, L_Hp, E, iszero_M_Hp, iszero_Ñ_Hc, iszero_L_Hp, iszero_E) + return new{NT,typeof(M_Hp),typeof(Ñ_Hc),typeof(L_Hp)}(M_Hp, Ñ_Hc, L_Hp, E, iszero_M_Hp, iszero_Ñ_Hc, iszero_L_Hp, iszero_E) end end @@ -584,10 +589,10 @@ function relaxU(Pu::Matrix{NT}, C_umin, C_umax, nϵ) where NT<:Real # ϵ impacts Z → U conversion for constraint calculations: A_Umin, A_Umax = -[Pu C_umin], [Pu -C_umax] # ϵ has no impact on Z → U conversion for prediction calculations: - P̃u = [Pu zeros(NT, size(Pu, 1))] + P̃u = sparse_hcat(sparse(Pu), spzeros(NT, size(Pu, 1))) else # Z̃ = Z (only hard constraints) A_Umin, A_Umax = -Pu, Pu - P̃u = Pu + P̃u = sparse(Pu) end return A_Umin, A_Umax, P̃u end @@ -621,17 +626,17 @@ bound, which is more precise than a linear inequality constraint. However, it is convenient to treat it as a linear inequality constraint since the optimizer `OSQP.jl` does not support pure bounds on the decision variables. """ -function relaxΔU(PΔu::Matrix{NT}, C_Δumin, C_Δumax, ΔUmin, ΔUmax, nϵ) where NT<:Real +function relaxΔU(PΔu::AbstractMatrix{NT}, C_Δumin, C_Δumax, ΔUmin, ΔUmax, nϵ) where NT<:Real nZ = size(PΔu, 2) if nϵ == 1 # Z̃ = [Z; ϵ] ΔŨmin, ΔŨmax = [ΔUmin; NT[0.0]], [ΔUmax; NT[Inf]] # 0 ≤ ϵ ≤ ∞ A_ϵ = [zeros(NT, 1, nZ) NT[1.0]] A_ΔŨmin, A_ΔŨmax = -[PΔu C_Δumin; A_ϵ], [PΔu -C_Δumax; A_ϵ] - P̃Δu = [PΔu zeros(NT, size(PΔu, 1), 1); zeros(NT, 1, size(PΔu, 2)) NT[1.0]] + P̃Δu = blockdiag(sparse(PΔu), spdiagm([one(NT)])) else # Z̃ = Z (only hard constraints) ΔŨmin, ΔŨmax = ΔUmin, ΔUmax A_ΔŨmin, A_ΔŨmax = -PΔu, PΔu - P̃Δu = PΔu + P̃Δu = sparse(PΔu) end return A_ΔŨmin, A_ΔŨmax, ΔŨmin, ΔŨmax, P̃Δu end diff --git a/src/controller/execute.jl b/src/controller/execute.jl index 33fe59b0b..75877e112 100644 --- a/src/controller/execute.jl +++ b/src/controller/execute.jl @@ -370,7 +370,12 @@ function obj_nonlinprog!( end # --- economic term --- E_JE = obj_econ(mpc, model, Ue, Ŷe) - return JR̂y + JΔŨ + JR̂u + E_JE + return ( + JR̂y + + JΔŨ + + JR̂u + + E_JE + ) end "No custom nonlinear constraints `gc` by default, return `gc` unchanged." diff --git a/src/controller/nonlinmpc.jl b/src/controller/nonlinmpc.jl index 0fc281023..b647b078e 100644 --- a/src/controller/nonlinmpc.jl +++ b/src/controller/nonlinmpc.jl @@ -17,7 +17,8 @@ struct NonLinMPC{ JB<:AbstractADType, PT<:Any, JEfunc<:Function, - GCfunc<:Function + GCfunc<:Function, + M<:AbstractMatrix{NT} } <: PredictiveController{NT} estim::SE transcription::TM @@ -37,8 +38,8 @@ struct NonLinMPC{ p::PT R̂u::Vector{NT} R̂y::Vector{NT} - P̃Δu::Matrix{NT} - P̃u ::Matrix{NT} + P̃Δu::M + P̃u ::M Tu ::Matrix{NT} Tu_lastu0::Vector{NT} Ẽ::Matrix{NT} @@ -107,7 +108,7 @@ struct NonLinMPC{ nZ̃ = get_nZ(estim, transcription, Hp, Hc) + nϵ Z̃ = zeros(NT, nZ̃) buffer = PredictiveControllerBuffer(estim, transcription, Hp, Hc, nϵ) - mpc = new{NT, SE, TM, JM, GB, JB, PT, JEfunc, GCfunc}( + mpc = new{NT, SE, TM, JM, GB, HB, JB, PT, JEfunc, GCfunc, typeof(P̃u)}( estim, transcription, optim, con, gradient, jacobian, Z̃, ŷ, @@ -279,9 +280,9 @@ function NonLinMPC( Mwt = fill(DEFAULT_MWT, model.ny), Nwt = fill(DEFAULT_NWT, model.nu), Lwt = fill(DEFAULT_LWT, model.nu), - M_Hp = diagm(repeat(Mwt, Hp)), - N_Hc = diagm(repeat(Nwt, Hc)), - L_Hp = diagm(repeat(Lwt, Hp)), + M_Hp = Diagonal(repeat(Mwt, Hp)), + N_Hc = Diagonal(repeat(Nwt, Hc)), + L_Hp = Diagonal(repeat(Lwt, Hp)), Cwt = DEFAULT_CWT, Ewt = DEFAULT_EWT, JE ::Function = (_,_,_,_) -> 0.0, @@ -310,9 +311,9 @@ function NonLinMPC( Mwt = fill(DEFAULT_MWT, model.ny), Nwt = fill(DEFAULT_NWT, model.nu), Lwt = fill(DEFAULT_LWT, model.nu), - M_Hp = diagm(repeat(Mwt, Hp)), - N_Hc = diagm(repeat(Nwt, Hc)), - L_Hp = diagm(repeat(Lwt, Hp)), + M_Hp = Diagonal(repeat(Mwt, Hp)), + N_Hc = Diagonal(repeat(Nwt, Hc)), + L_Hp = Diagonal(repeat(Lwt, Hp)), Cwt = DEFAULT_CWT, Ewt = DEFAULT_EWT, JE ::Function = (_,_,_,_) -> 0.0, @@ -365,9 +366,9 @@ function NonLinMPC( Mwt = fill(DEFAULT_MWT, estim.model.ny), Nwt = fill(DEFAULT_NWT, estim.model.nu), Lwt = fill(DEFAULT_LWT, estim.model.nu), - M_Hp = diagm(repeat(Mwt, Hp)), - N_Hc = diagm(repeat(Nwt, Hc)), - L_Hp = diagm(repeat(Lwt, Hp)), + M_Hp = Diagonal(repeat(Mwt, Hp)), + N_Hc = Diagonal(repeat(Nwt, Hc)), + L_Hp = Diagonal(repeat(Lwt, Hp)), Cwt = DEFAULT_CWT, Ewt = DEFAULT_EWT, JE ::Function = (_,_,_,_) -> 0.0, diff --git a/src/controller/transcription.jl b/src/controller/transcription.jl index 756cd6f38..2a930ffb5 100644 --- a/src/controller/transcription.jl +++ b/src/controller/transcription.jl @@ -85,15 +85,15 @@ function init_ZtoΔU end function init_ZtoΔU( estim::StateEstimator{NT}, transcription::SingleShooting, _ , Hc ) where {NT<:Real} - PΔu = Matrix{NT}(I, estim.model.nu*Hc, estim.model.nu*Hc) + PΔu = Diagonal(fill(one(NT), estim.model.nu*Hc)) return PΔu end function init_ZtoΔU( estim::StateEstimator{NT}, transcription::MultipleShooting, Hp, Hc ) where {NT<:Real} - I_nu_Hc = Matrix{NT}(I, estim.model.nu*Hc, estim.model.nu*Hc) - PΔu = [I_nu_Hc zeros(NT, estim.model.nu*Hc, estim.nx̂*Hp)] + I_nu_Hc = Diagonal(fill(one(NT), estim.model.nu*Hc)) + PΔu = sparse_hcat(I_nu_Hc , spzeros(NT, estim.model.nu*Hc, estim.nx̂*Hp)) return PΔu end @@ -145,7 +145,8 @@ function init_ZtoU( ) where {NT<:Real} model = estim.model # Pu and Tu are `Matrix{NT}`, conversion is faster than `Matrix{Bool}` or `BitMatrix` - I_nu = Matrix{NT}(I, model.nu, model.nu) + I_nu = Diagonal(fill(one(NT), model.nu)) + # TODO: make PU and friends sparse PU_Hc = LowerTriangular(repeat(I_nu, Hc, Hc)) PUdagger = [PU_Hc; repeat(I_nu, Hp - Hc, Hc)] Pu = init_PUmat(estim, transcription, Hp, Hc, PUdagger) From b818d7bdae8f039fb873b8007c9dd57673f33ffa Mon Sep 17 00:00:00 2001 From: franckgaga Date: Sun, 11 May 2025 13:42:19 -0400 Subject: [PATCH 3/8] Revert "Make Hessian sparsity detection work with SCT (prototype)" This reverts commit f7d729384d90f08bd50baf574d1f2eaaeae882a3. --- Project.toml | 2 -- src/ModelPredictiveControl.jl | 3 --- src/controller/construct.jl | 41 +++++++++++++++------------------ src/controller/execute.jl | 7 +----- src/controller/nonlinmpc.jl | 27 +++++++++++----------- src/controller/transcription.jl | 9 ++++---- 6 files changed, 36 insertions(+), 53 deletions(-) diff --git a/Project.toml b/Project.toml index 73b90731a..3c72934f2 100644 --- a/Project.toml +++ b/Project.toml @@ -16,7 +16,6 @@ PrecompileTools = "aea7be01-6a6a-4083-8856-8a6e6704d82a" ProgressLogging = "33c8b6b6-d38a-422a-b730-caa89a2f386c" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" RecipesBase = "3cdcf5f2-1ef4-517c-9805-6587b60abb01" -SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" SparseConnectivityTracer = "9f842d2f-2579-4b1d-911e-f412cf18a3f5" SparseMatrixColorings = "0a514795-09f3-496d-8182-132a7b665d35" @@ -37,7 +36,6 @@ PrecompileTools = "1" ProgressLogging = "0.1" Random = "1.10" RecipesBase = "1" -SparseArrays = "1.11.0" SparseConnectivityTracer = "0.6.13" SparseMatrixColorings = "0.4.14" TestItemRunner = "1" diff --git a/src/ModelPredictiveControl.jl b/src/ModelPredictiveControl.jl index da71c038f..adefdfef2 100644 --- a/src/ModelPredictiveControl.jl +++ b/src/ModelPredictiveControl.jl @@ -3,7 +3,6 @@ module ModelPredictiveControl using PrecompileTools using LinearAlgebra using Random: randn -using SparseArrays using RecipesBase using ProgressLogging @@ -48,7 +47,6 @@ include("state_estim.jl") include("predictive_control.jl") include("plot_sim.jl") -#= @setup_workload begin # Putting some things in `@setup_workload` instead of `@compile_workload` can reduce the # size of the precompile file and potentially make loading faster. @@ -58,6 +56,5 @@ include("plot_sim.jl") include("precompile.jl") end end -=# end \ No newline at end of file diff --git a/src/controller/construct.jl b/src/controller/construct.jl index 9f53adf87..16b7b745e 100644 --- a/src/controller/construct.jl +++ b/src/controller/construct.jl @@ -1,11 +1,11 @@ -struct PredictiveControllerBuffer{NT<:Real,M<:AbstractMatrix{NT}} +struct PredictiveControllerBuffer{NT<:Real} u ::Vector{NT} Z̃ ::Vector{NT} D̂ ::Vector{NT} Ŷ ::Vector{NT} U ::Vector{NT} Ẽ ::Matrix{NT} - P̃u::M + P̃u::Matrix{NT} empty::Vector{NT} end @@ -29,19 +29,14 @@ function PredictiveControllerBuffer( Ẽ = Matrix{NT}(undef, ny*Hp, nZ̃) P̃u = Matrix{NT}(undef, nu*Hp, nZ̃) empty = Vector{NT}(undef, 0) - return PredictiveControllerBuffer{NT,typeof(P̃u)}(u, Z̃, D̂, Ŷ, U, Ẽ, P̃u, empty) + return PredictiveControllerBuffer{NT}(u, Z̃, D̂, Ŷ, U, Ẽ, P̃u, empty) end "Include all the objective function weights of [`PredictiveController`](@ref)" -struct ControllerWeights{ - NT<:Real, - H1<:Hermitian{NT, <:AbstractMatrix{NT}}, - H2<:Hermitian{NT, <:AbstractMatrix{NT}}, - H3<:Hermitian{NT, <:AbstractMatrix{NT}}, -} - M_Hp::H1 - Ñ_Hc::H2 - L_Hp::H3 +struct ControllerWeights{NT<:Real} + M_Hp::Hermitian{NT, Matrix{NT}} + Ñ_Hc::Hermitian{NT, Matrix{NT}} + L_Hp::Hermitian{NT, Matrix{NT}} E ::NT iszero_M_Hp::Vector{Bool} iszero_Ñ_Hc::Vector{Bool} @@ -51,15 +46,15 @@ struct ControllerWeights{ model, Hp, Hc, M_Hp, N_Hc, L_Hp, Cwt=Inf, Ewt=0 ) where NT<:Real validate_weights(model, Hp, Hc, M_Hp, N_Hc, L_Hp, Cwt, Ewt) - M_Hp = Hermitian(convert.(NT, M_Hp), :L) - N_Hc = Hermitian(convert.(NT, N_Hc), :L) - L_Hp = Hermitian(convert.(NT, L_Hp), :L) + # convert `Diagonal` to normal `Matrix` if required: + M_Hp = Hermitian(convert(Matrix{NT}, M_Hp), :L) + N_Hc = Hermitian(convert(Matrix{NT}, N_Hc), :L) + L_Hp = Hermitian(convert(Matrix{NT}, L_Hp), :L) nΔU = size(N_Hc, 1) C = Cwt if !isinf(Cwt) # ΔŨ = [ΔU; ϵ] (ϵ is the slack variable) - # Ñ_Hc = Hermitian([N_Hc zeros(NT, nΔU, 1); zeros(NT, 1, nΔU) C], :L) - Ñ_Hc = Hermitian(blockdiag(sparse(N_Hc), sparse(Diagonal([C]))), :L) + Ñ_Hc = Hermitian([N_Hc zeros(NT, nΔU, 1); zeros(NT, 1, nΔU) C], :L) else # ΔŨ = ΔU (only hard constraints) Ñ_Hc = N_Hc @@ -69,7 +64,7 @@ struct ControllerWeights{ iszero_Ñ_Hc = [iszero(Ñ_Hc)] iszero_L_Hp = [iszero(L_Hp)] iszero_E = iszero(E) - return new{NT,typeof(M_Hp),typeof(Ñ_Hc),typeof(L_Hp)}(M_Hp, Ñ_Hc, L_Hp, E, iszero_M_Hp, iszero_Ñ_Hc, iszero_L_Hp, iszero_E) + return new{NT}(M_Hp, Ñ_Hc, L_Hp, E, iszero_M_Hp, iszero_Ñ_Hc, iszero_L_Hp, iszero_E) end end @@ -589,10 +584,10 @@ function relaxU(Pu::Matrix{NT}, C_umin, C_umax, nϵ) where NT<:Real # ϵ impacts Z → U conversion for constraint calculations: A_Umin, A_Umax = -[Pu C_umin], [Pu -C_umax] # ϵ has no impact on Z → U conversion for prediction calculations: - P̃u = sparse_hcat(sparse(Pu), spzeros(NT, size(Pu, 1))) + P̃u = [Pu zeros(NT, size(Pu, 1))] else # Z̃ = Z (only hard constraints) A_Umin, A_Umax = -Pu, Pu - P̃u = sparse(Pu) + P̃u = Pu end return A_Umin, A_Umax, P̃u end @@ -626,17 +621,17 @@ bound, which is more precise than a linear inequality constraint. However, it is convenient to treat it as a linear inequality constraint since the optimizer `OSQP.jl` does not support pure bounds on the decision variables. """ -function relaxΔU(PΔu::AbstractMatrix{NT}, C_Δumin, C_Δumax, ΔUmin, ΔUmax, nϵ) where NT<:Real +function relaxΔU(PΔu::Matrix{NT}, C_Δumin, C_Δumax, ΔUmin, ΔUmax, nϵ) where NT<:Real nZ = size(PΔu, 2) if nϵ == 1 # Z̃ = [Z; ϵ] ΔŨmin, ΔŨmax = [ΔUmin; NT[0.0]], [ΔUmax; NT[Inf]] # 0 ≤ ϵ ≤ ∞ A_ϵ = [zeros(NT, 1, nZ) NT[1.0]] A_ΔŨmin, A_ΔŨmax = -[PΔu C_Δumin; A_ϵ], [PΔu -C_Δumax; A_ϵ] - P̃Δu = blockdiag(sparse(PΔu), spdiagm([one(NT)])) + P̃Δu = [PΔu zeros(NT, size(PΔu, 1), 1); zeros(NT, 1, size(PΔu, 2)) NT[1.0]] else # Z̃ = Z (only hard constraints) ΔŨmin, ΔŨmax = ΔUmin, ΔUmax A_ΔŨmin, A_ΔŨmax = -PΔu, PΔu - P̃Δu = sparse(PΔu) + P̃Δu = PΔu end return A_ΔŨmin, A_ΔŨmax, ΔŨmin, ΔŨmax, P̃Δu end diff --git a/src/controller/execute.jl b/src/controller/execute.jl index 75877e112..33fe59b0b 100644 --- a/src/controller/execute.jl +++ b/src/controller/execute.jl @@ -370,12 +370,7 @@ function obj_nonlinprog!( end # --- economic term --- E_JE = obj_econ(mpc, model, Ue, Ŷe) - return ( - JR̂y + - JΔŨ + - JR̂u + - E_JE - ) + return JR̂y + JΔŨ + JR̂u + E_JE end "No custom nonlinear constraints `gc` by default, return `gc` unchanged." diff --git a/src/controller/nonlinmpc.jl b/src/controller/nonlinmpc.jl index b647b078e..0fc281023 100644 --- a/src/controller/nonlinmpc.jl +++ b/src/controller/nonlinmpc.jl @@ -17,8 +17,7 @@ struct NonLinMPC{ JB<:AbstractADType, PT<:Any, JEfunc<:Function, - GCfunc<:Function, - M<:AbstractMatrix{NT} + GCfunc<:Function } <: PredictiveController{NT} estim::SE transcription::TM @@ -38,8 +37,8 @@ struct NonLinMPC{ p::PT R̂u::Vector{NT} R̂y::Vector{NT} - P̃Δu::M - P̃u ::M + P̃Δu::Matrix{NT} + P̃u ::Matrix{NT} Tu ::Matrix{NT} Tu_lastu0::Vector{NT} Ẽ::Matrix{NT} @@ -108,7 +107,7 @@ struct NonLinMPC{ nZ̃ = get_nZ(estim, transcription, Hp, Hc) + nϵ Z̃ = zeros(NT, nZ̃) buffer = PredictiveControllerBuffer(estim, transcription, Hp, Hc, nϵ) - mpc = new{NT, SE, TM, JM, GB, HB, JB, PT, JEfunc, GCfunc, typeof(P̃u)}( + mpc = new{NT, SE, TM, JM, GB, JB, PT, JEfunc, GCfunc}( estim, transcription, optim, con, gradient, jacobian, Z̃, ŷ, @@ -280,9 +279,9 @@ function NonLinMPC( Mwt = fill(DEFAULT_MWT, model.ny), Nwt = fill(DEFAULT_NWT, model.nu), Lwt = fill(DEFAULT_LWT, model.nu), - M_Hp = Diagonal(repeat(Mwt, Hp)), - N_Hc = Diagonal(repeat(Nwt, Hc)), - L_Hp = Diagonal(repeat(Lwt, Hp)), + M_Hp = diagm(repeat(Mwt, Hp)), + N_Hc = diagm(repeat(Nwt, Hc)), + L_Hp = diagm(repeat(Lwt, Hp)), Cwt = DEFAULT_CWT, Ewt = DEFAULT_EWT, JE ::Function = (_,_,_,_) -> 0.0, @@ -311,9 +310,9 @@ function NonLinMPC( Mwt = fill(DEFAULT_MWT, model.ny), Nwt = fill(DEFAULT_NWT, model.nu), Lwt = fill(DEFAULT_LWT, model.nu), - M_Hp = Diagonal(repeat(Mwt, Hp)), - N_Hc = Diagonal(repeat(Nwt, Hc)), - L_Hp = Diagonal(repeat(Lwt, Hp)), + M_Hp = diagm(repeat(Mwt, Hp)), + N_Hc = diagm(repeat(Nwt, Hc)), + L_Hp = diagm(repeat(Lwt, Hp)), Cwt = DEFAULT_CWT, Ewt = DEFAULT_EWT, JE ::Function = (_,_,_,_) -> 0.0, @@ -366,9 +365,9 @@ function NonLinMPC( Mwt = fill(DEFAULT_MWT, estim.model.ny), Nwt = fill(DEFAULT_NWT, estim.model.nu), Lwt = fill(DEFAULT_LWT, estim.model.nu), - M_Hp = Diagonal(repeat(Mwt, Hp)), - N_Hc = Diagonal(repeat(Nwt, Hc)), - L_Hp = Diagonal(repeat(Lwt, Hp)), + M_Hp = diagm(repeat(Mwt, Hp)), + N_Hc = diagm(repeat(Nwt, Hc)), + L_Hp = diagm(repeat(Lwt, Hp)), Cwt = DEFAULT_CWT, Ewt = DEFAULT_EWT, JE ::Function = (_,_,_,_) -> 0.0, diff --git a/src/controller/transcription.jl b/src/controller/transcription.jl index 2a930ffb5..756cd6f38 100644 --- a/src/controller/transcription.jl +++ b/src/controller/transcription.jl @@ -85,15 +85,15 @@ function init_ZtoΔU end function init_ZtoΔU( estim::StateEstimator{NT}, transcription::SingleShooting, _ , Hc ) where {NT<:Real} - PΔu = Diagonal(fill(one(NT), estim.model.nu*Hc)) + PΔu = Matrix{NT}(I, estim.model.nu*Hc, estim.model.nu*Hc) return PΔu end function init_ZtoΔU( estim::StateEstimator{NT}, transcription::MultipleShooting, Hp, Hc ) where {NT<:Real} - I_nu_Hc = Diagonal(fill(one(NT), estim.model.nu*Hc)) - PΔu = sparse_hcat(I_nu_Hc , spzeros(NT, estim.model.nu*Hc, estim.nx̂*Hp)) + I_nu_Hc = Matrix{NT}(I, estim.model.nu*Hc, estim.model.nu*Hc) + PΔu = [I_nu_Hc zeros(NT, estim.model.nu*Hc, estim.nx̂*Hp)] return PΔu end @@ -145,8 +145,7 @@ function init_ZtoU( ) where {NT<:Real} model = estim.model # Pu and Tu are `Matrix{NT}`, conversion is faster than `Matrix{Bool}` or `BitMatrix` - I_nu = Diagonal(fill(one(NT), model.nu)) - # TODO: make PU and friends sparse + I_nu = Matrix{NT}(I, model.nu, model.nu) PU_Hc = LowerTriangular(repeat(I_nu, Hc, Hc)) PUdagger = [PU_Hc; repeat(I_nu, Hp - Hc, Hc)] Pu = init_PUmat(estim, transcription, Hp, Hc, PUdagger) From a191134f8970904bc83c7a19faecfee4821ab10d Mon Sep 17 00:00:00 2001 From: franckgaga Date: Sun, 11 May 2025 14:23:15 -0400 Subject: [PATCH 4/8] changed: lower case u in variable for consistency --- src/controller/execute.jl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/controller/execute.jl b/src/controller/execute.jl index 33fe59b0b..c4a303918 100644 --- a/src/controller/execute.jl +++ b/src/controller/execute.jl @@ -212,7 +212,7 @@ function initpred!(mpc::PredictiveController, model::LinModel, d, D̂, R̂y, R̂ mul!(F, mpc.G, mpc.d0, 1, 1) # F = F + G*d0 mul!(F, mpc.J, mpc.D̂0, 1, 1) # F = F + J*D̂0 end - Cy, Cu, M_Hp_Ẽ, L_Hp_P̃U = mpc.buffer.Ŷ, mpc.buffer.U, mpc.buffer.Ẽ, mpc.buffer.P̃u + Cy, Cu, M_Hp_Ẽ, L_Hp_P̃u = mpc.buffer.Ŷ, mpc.buffer.U, mpc.buffer.Ẽ, mpc.buffer.P̃u q̃, r = mpc.q̃, mpc.r q̃ .= 0 r .= 0 @@ -226,8 +226,8 @@ function initpred!(mpc::PredictiveController, model::LinModel, d, D̂, R̂y, R̂ # --- input setpoint tracking term --- if !mpc.weights.iszero_L_Hp[] Cu .= mpc.Tu_lastu0 .+ mpc.Uop .- R̂u - mul!(L_Hp_P̃U, mpc.weights.L_Hp, mpc.P̃u) - mul!(q̃, L_Hp_P̃U', Cu, 1, 1) # q̃ = q̃ + L_Hp*P̃u'*Cu + mul!(L_Hp_P̃u, mpc.weights.L_Hp, mpc.P̃u) + mul!(q̃, L_Hp_P̃u', Cu, 1, 1) # q̃ = q̃ + L_Hp*P̃u'*Cu r .+= dot(Cu, mpc.weights.L_Hp, Cu) # r = r + Cu'*L_Hp*Cu end # --- finalize --- From e4d6826f8d7745fc4bc22a099b5d764de99d2fa8 Mon Sep 17 00:00:00 2001 From: franckgaga Date: Mon, 12 May 2025 11:03:34 -0400 Subject: [PATCH 5/8] added: preserve weight matrix types in `PredictiveController`s --- src/controller/construct.jl | 65 +++++++++++++++++-------- src/controller/explicitmpc.jl | 36 +++++++------- src/controller/linmpc.jl | 42 +++++++++------- src/controller/nonlinmpc.jl | 80 +++++++++++-------------------- test/3_test_predictive_control.jl | 8 +++- 5 files changed, 123 insertions(+), 108 deletions(-) diff --git a/src/controller/construct.jl b/src/controller/construct.jl index 16b7b745e..2e7dc1d01 100644 --- a/src/controller/construct.jl +++ b/src/controller/construct.jl @@ -33,41 +33,64 @@ function PredictiveControllerBuffer( end "Include all the objective function weights of [`PredictiveController`](@ref)" -struct ControllerWeights{NT<:Real} - M_Hp::Hermitian{NT, Matrix{NT}} - Ñ_Hc::Hermitian{NT, Matrix{NT}} - L_Hp::Hermitian{NT, Matrix{NT}} +struct ControllerWeights{ + NT<:Real, + # parameters to support both dense and Diagonal matrices (with specialization): + MW<:AbstractMatrix{NT}, + NW<:AbstractMatrix{NT}, + LW<:AbstractMatrix{NT}, +} + M_Hp::Hermitian{NT, MW} + Ñ_Hc::Hermitian{NT, NW} + L_Hp::Hermitian{NT, LW} E ::NT iszero_M_Hp::Vector{Bool} iszero_Ñ_Hc::Vector{Bool} iszero_L_Hp::Vector{Bool} iszero_E::Bool + isinf_C ::Bool function ControllerWeights{NT}( - model, Hp, Hc, M_Hp, N_Hc, L_Hp, Cwt=Inf, Ewt=0 - ) where NT<:Real + model, Hp, Hc, M_Hp::MW, N_Hc::NW, L_Hp::LW, Cwt=Inf, Ewt=0 + ) where { + NT<:Real, + MW<:AbstractMatrix{NT}, + NW<:AbstractMatrix{NT}, + LW<:AbstractMatrix{NT} + } validate_weights(model, Hp, Hc, M_Hp, N_Hc, L_Hp, Cwt, Ewt) - # convert `Diagonal` to normal `Matrix` if required: - M_Hp = Hermitian(convert(Matrix{NT}, M_Hp), :L) - N_Hc = Hermitian(convert(Matrix{NT}, N_Hc), :L) - L_Hp = Hermitian(convert(Matrix{NT}, L_Hp), :L) nΔU = size(N_Hc, 1) C = Cwt - if !isinf(Cwt) + isinf_C = isinf(C) + if !isinf_C # ΔŨ = [ΔU; ϵ] (ϵ is the slack variable) - Ñ_Hc = Hermitian([N_Hc zeros(NT, nΔU, 1); zeros(NT, 1, nΔU) C], :L) + Ñ_Hc = [N_Hc zeros(NT, nΔU, 1); zeros(NT, 1, nΔU) C] + Ñ_Hc = NW(Ñ_Hc) # preserve special type e.g. Diagonal else # ΔŨ = ΔU (only hard constraints) Ñ_Hc = N_Hc - end + end + M_Hp = Hermitian(M_Hp, :L) + Ñ_Hc = Hermitian(Ñ_Hc, :L) + L_Hp = Hermitian(L_Hp, :L) E = Ewt iszero_M_Hp = [iszero(M_Hp)] iszero_Ñ_Hc = [iszero(Ñ_Hc)] iszero_L_Hp = [iszero(L_Hp)] iszero_E = iszero(E) - return new{NT}(M_Hp, Ñ_Hc, L_Hp, E, iszero_M_Hp, iszero_Ñ_Hc, iszero_L_Hp, iszero_E) + return new{NT, MW, NW, LW}( + M_Hp, Ñ_Hc, L_Hp, E, + iszero_M_Hp, iszero_Ñ_Hc, iszero_L_Hp, iszero_E, isinf_C + ) end end +"Outer constructor to convert weight matrix number type to `NT` if necessary." +function ControllerWeights{NT}( + model, Hp, Hc, M_Hp::MW, N_Hc::NW, L_Hp::LW, Cwt=Inf, Ewt=0 + ) where {NT<:Real, MW<:AbstractMatrix, NW<:AbstractMatrix, LW<:AbstractMatrix} + return ControllerWeights{NT}(model, Hp, Hc, NT.(M_Hp), NT.(N_Hc), NT.(L_Hp), Cwt, Ewt) +end + "Include all the data for the constraints of [`PredictiveController`](@ref)" struct ControllerConstraint{NT<:Real, GCfunc<:Union{Nothing, Function}} # matrices for the terminal constraints: @@ -478,8 +501,10 @@ end """ init_defaultcon_mpc( - estim::StateEstimator, transcription::TranscriptionMethod, - Hp, Hc, C, + estim::StateEstimator, + weights::ControllerWeights + transcription::TranscriptionMethod, + Hp, Hc, PΔu, Pu, E, ex̂, fx̂, gx̂, jx̂, kx̂, vx̂, bx̂, Eŝ, Fŝ, Gŝ, Jŝ, Kŝ, Vŝ, Bŝ, @@ -491,8 +516,10 @@ Init `ControllerConstraint` struct with default parameters based on estimator `e Also return `P̃Δu`, `P̃u`, `Ẽ` and `Ẽŝ` matrices for the the augmented decision vector `Z̃`. """ function init_defaultcon_mpc( - estim::StateEstimator{NT}, transcription::TranscriptionMethod, - Hp, Hc, C, + estim::StateEstimator{NT}, + weights::ControllerWeights, + transcription::TranscriptionMethod, + Hp, Hc, PΔu, Pu, E, ex̂, fx̂, gx̂, jx̂, kx̂, vx̂, bx̂, Eŝ, Fŝ, Gŝ, Jŝ, Kŝ, Vŝ, Bŝ, @@ -500,7 +527,7 @@ function init_defaultcon_mpc( ) where {NT<:Real, GCfunc<:Union{Nothing, Function}} model = estim.model nu, ny, nx̂ = model.nu, model.ny, estim.nx̂ - nϵ = isinf(C) ? 0 : 1 + nϵ = weights.isinf_C ? 0 : 1 u0min, u0max = fill(convert(NT,-Inf), nu), fill(convert(NT,+Inf), nu) Δumin, Δumax = fill(convert(NT,-Inf), nu), fill(convert(NT,+Inf), nu) y0min, y0max = fill(convert(NT,-Inf), ny), fill(convert(NT,+Inf), ny) diff --git a/src/controller/explicitmpc.jl b/src/controller/explicitmpc.jl index d8729d3d3..f6f646792 100644 --- a/src/controller/explicitmpc.jl +++ b/src/controller/explicitmpc.jl @@ -1,4 +1,8 @@ -struct ExplicitMPC{NT<:Real, SE<:StateEstimator} <: PredictiveController{NT} +struct ExplicitMPC{ + NT<:Real, + SE<:StateEstimator, + CW<:ControllerWeights +} <: PredictiveController{NT} estim::SE transcription::SingleShooting Z̃::Vector{NT} @@ -6,7 +10,7 @@ struct ExplicitMPC{NT<:Real, SE<:StateEstimator} <: PredictiveController{NT} Hp::Int Hc::Int nϵ::Int - weights::ControllerWeights{NT} + weights::CW R̂u::Vector{NT} R̂y::Vector{NT} P̃Δu::Matrix{NT} @@ -34,17 +38,12 @@ struct ExplicitMPC{NT<:Real, SE<:StateEstimator} <: PredictiveController{NT} Dop::Vector{NT} buffer::PredictiveControllerBuffer{NT} function ExplicitMPC{NT}( - estim::SE, Hp, Hc, M_Hp, N_Hc, L_Hp - ) where {NT<:Real, SE<:StateEstimator} + estim::SE, Hp, Hc, weights::CW + ) where {NT<:Real, SE<:StateEstimator, CW<:ControllerWeights} model = estim.model nu, ny, nd, nx̂ = model.nu, model.ny, model.nd, estim.nx̂ ŷ = copy(model.yop) # dummy vals (updated just before optimization) nϵ = 0 # no slack variable ϵ for ExplicitMPC - weights = ControllerWeights{NT}(model, Hp, Hc, M_Hp, N_Hc, L_Hp) - # convert `Diagonal` to normal `Matrix` if required: - M_Hp = Hermitian(convert(Matrix{NT}, M_Hp), :L) - N_Hc = Hermitian(convert(Matrix{NT}, N_Hc), :L) - L_Hp = Hermitian(convert(Matrix{NT}, L_Hp), :L) # dummy vals (updated just before optimization): R̂y, R̂u, Tu_lastu0 = zeros(NT, ny*Hp), zeros(NT, nu*Hp), zeros(NT, nu*Hp) transcription = SingleShooting() # explicit MPC only supports SingleShooting @@ -53,7 +52,7 @@ struct ExplicitMPC{NT<:Real, SE<:StateEstimator} <: PredictiveController{NT} E, G, J, K, V, B = init_predmat(model, estim, transcription, Hp, Hc) # dummy val (updated just before optimization): F, fx̂ = zeros(NT, ny*Hp), zeros(NT, nx̂) - P̃Δu, P̃u, Ñ_Hc, Ẽ = PΔu, Pu, N_Hc, E # no slack variable ϵ for ExplicitMPC + P̃Δu, P̃u, Ẽ = PΔu, Pu, E # no slack variable ϵ for ExplicitMPC H̃ = init_quadprog(model, weights, Ẽ, P̃Δu, P̃u) # dummy vals (updated just before optimization): q̃, r = zeros(NT, size(H̃, 1)), zeros(NT, 1) @@ -65,7 +64,7 @@ struct ExplicitMPC{NT<:Real, SE<:StateEstimator} <: PredictiveController{NT} nZ̃ = get_nZ(estim, transcription, Hp, Hc) + nϵ Z̃ = zeros(NT, nZ̃) buffer = PredictiveControllerBuffer(estim, transcription, Hp, Hc, nϵ) - mpc = new{NT, SE}( + mpc = new{NT, SE, CW}( estim, transcription, Z̃, ŷ, @@ -131,9 +130,9 @@ function ExplicitMPC( Mwt = fill(DEFAULT_MWT, model.ny), Nwt = fill(DEFAULT_NWT, model.nu), Lwt = fill(DEFAULT_LWT, model.nu), - M_Hp = diagm(repeat(Mwt, Hp)), - N_Hc = diagm(repeat(Nwt, Hc)), - L_Hp = diagm(repeat(Lwt, Hp)), + M_Hp = Diagonal(repeat(Mwt, Hp)), + N_Hc = Diagonal(repeat(Nwt, Hc)), + L_Hp = Diagonal(repeat(Lwt, Hp)), kwargs... ) estim = SteadyKalmanFilter(model; kwargs...) @@ -154,9 +153,9 @@ function ExplicitMPC( Mwt = fill(DEFAULT_MWT, estim.model.ny), Nwt = fill(DEFAULT_NWT, estim.model.nu), Lwt = fill(DEFAULT_LWT, estim.model.nu), - M_Hp = diagm(repeat(Mwt, Hp)), - N_Hc = diagm(repeat(Nwt, Hc)), - L_Hp = diagm(repeat(Lwt, Hp)), + M_Hp = Diagonal(repeat(Mwt, Hp)), + N_Hc = Diagonal(repeat(Nwt, Hc)), + L_Hp = Diagonal(repeat(Lwt, Hp)), ) where {NT<:Real, SE<:StateEstimator{NT}} isa(estim.model, LinModel) || error("estim.model type must be a LinModel") nk = estimate_delays(estim.model) @@ -164,7 +163,8 @@ function ExplicitMPC( @warn("prediction horizon Hp ($Hp) ≤ estimated number of delays in model "* "($nk), the closed-loop system may be unstable or zero-gain (unresponsive)") end - return ExplicitMPC{NT}(estim, Hp, Hc, M_Hp, N_Hc, L_Hp) + weights = ControllerWeights{NT}(estim.model, Hp, Hc, M_Hp, N_Hc, L_Hp) + return ExplicitMPC{NT}(estim, Hp, Hc, weights) end setconstraint!(::ExplicitMPC; kwargs...) = error("ExplicitMPC does not support constraints.") diff --git a/src/controller/linmpc.jl b/src/controller/linmpc.jl index d13cab407..03122b651 100644 --- a/src/controller/linmpc.jl +++ b/src/controller/linmpc.jl @@ -4,6 +4,7 @@ const DEFAULT_LINMPC_TRANSCRIPTION = SingleShooting() struct LinMPC{ NT<:Real, SE<:StateEstimator, + CW<:ControllerWeights, TM<:TranscriptionMethod, JM<:JuMP.GenericModel } <: PredictiveController{NT} @@ -18,7 +19,7 @@ struct LinMPC{ Hp::Int Hc::Int nϵ::Int - weights::ControllerWeights{NT} + weights::CW R̂u::Vector{NT} R̂y::Vector{NT} P̃Δu::Matrix{NT} @@ -45,13 +46,18 @@ struct LinMPC{ Dop::Vector{NT} buffer::PredictiveControllerBuffer{NT} function LinMPC{NT}( - estim::SE, Hp, Hc, M_Hp, N_Hc, L_Hp, Cwt, + estim::SE, Hp, Hc, weights::CW, transcription::TM, optim::JM - ) where {NT<:Real, SE<:StateEstimator, TM<:TranscriptionMethod, JM<:JuMP.GenericModel} + ) where { + NT<:Real, + SE<:StateEstimator, + CW<:ControllerWeights, + TM<:TranscriptionMethod, + JM<:JuMP.GenericModel + } model = estim.model nu, ny, nd, nx̂ = model.nu, model.ny, model.nd, estim.nx̂ ŷ = copy(model.yop) # dummy vals (updated just before optimization) - weights = ControllerWeights{NT}(model, Hp, Hc, M_Hp, N_Hc, L_Hp, Cwt) # dummy vals (updated just before optimization): R̂y, R̂u, Tu_lastu0 = zeros(NT, ny*Hp), zeros(NT, nu*Hp), zeros(NT, nu*Hp) PΔu = init_ZtoΔU(estim, transcription, Hp, Hc) @@ -63,8 +69,9 @@ struct LinMPC{ # dummy vals (updated just before optimization): F, fx̂, Fŝ = zeros(NT, ny*Hp), zeros(NT, nx̂), zeros(NT, nx̂*Hp) con, nϵ, P̃Δu, P̃u, Ẽ, Ẽŝ = init_defaultcon_mpc( - estim, transcription, - Hp, Hc, Cwt, PΔu, Pu, E, + estim, weights, transcription, + Hp, Hc, + PΔu, Pu, E, ex̂, fx̂, gx̂, jx̂, kx̂, vx̂, bx̂, Eŝ, Fŝ, Gŝ, Jŝ, Kŝ, Vŝ, Bŝ ) @@ -78,7 +85,7 @@ struct LinMPC{ nZ̃ = get_nZ(estim, transcription, Hp, Hc) + nϵ Z̃ = zeros(NT, nZ̃) buffer = PredictiveControllerBuffer(estim, transcription, Hp, Hc, nϵ) - mpc = new{NT, SE, TM, JM}( + mpc = new{NT, SE, CW, TM, JM}( estim, transcription, optim, con, Z̃, ŷ, Hp, Hc, nϵ, @@ -140,9 +147,9 @@ arguments. This controller allocates memory at each time step for the optimizati - `Mwt=fill(1.0,model.ny)` : main diagonal of ``\mathbf{M}`` weight matrix (vector). - `Nwt=fill(0.1,model.nu)` : main diagonal of ``\mathbf{N}`` weight matrix (vector). - `Lwt=fill(0.0,model.nu)` : main diagonal of ``\mathbf{L}`` weight matrix (vector). -- `M_Hp=diagm(repeat(Mwt,Hp))` : positive semidefinite symmetric matrix ``\mathbf{M}_{H_p}``. -- `N_Hc=diagm(repeat(Nwt,Hc))` : positive semidefinite symmetric matrix ``\mathbf{N}_{H_c}``. -- `L_Hp=diagm(repeat(Lwt,Hp))` : positive semidefinite symmetric matrix ``\mathbf{L}_{H_p}``. +- `M_Hp=Diagonal(repeat(Mwt,Hp))` : positive semidefinite symmetric matrix ``\mathbf{M}_{H_p}``. +- `N_Hc=Diagonal(repeat(Nwt,Hc))` : positive semidefinite symmetric matrix ``\mathbf{N}_{H_c}``. +- `L_Hp=Diagonal(repeat(Lwt,Hp))` : positive semidefinite symmetric matrix ``\mathbf{L}_{H_p}``. - `Cwt=1e5` : slack variable weight ``C`` (scalar), use `Cwt=Inf` for hard constraints only. - `transcription=SingleShooting()` : a [`TranscriptionMethod`](@ref) for the optimization. - `optim=JuMP.Model(OSQP.MathOptInterfaceOSQP.Optimizer)` : quadratic optimizer used in @@ -199,9 +206,9 @@ function LinMPC( Mwt = fill(DEFAULT_MWT, model.ny), Nwt = fill(DEFAULT_NWT, model.nu), Lwt = fill(DEFAULT_LWT, model.nu), - M_Hp = diagm(repeat(Mwt, Hp)), - N_Hc = diagm(repeat(Nwt, Hc)), - L_Hp = diagm(repeat(Lwt, Hp)), + M_Hp = Diagonal(repeat(Mwt, Hp)), + N_Hc = Diagonal(repeat(Nwt, Hc)), + L_Hp = Diagonal(repeat(Lwt, Hp)), Cwt = DEFAULT_CWT, transcription::TranscriptionMethod = DEFAULT_LINMPC_TRANSCRIPTION, optim::JuMP.GenericModel = JuMP.Model(DEFAULT_LINMPC_OPTIMIZER, add_bridges=false), @@ -242,9 +249,9 @@ function LinMPC( Mwt = fill(DEFAULT_MWT, estim.model.ny), Nwt = fill(DEFAULT_NWT, estim.model.nu), Lwt = fill(DEFAULT_LWT, estim.model.nu), - M_Hp = diagm(repeat(Mwt, Hp)), - N_Hc = diagm(repeat(Nwt, Hc)), - L_Hp = diagm(repeat(Lwt, Hp)), + M_Hp = Diagonal(repeat(Mwt, Hp)), + N_Hc = Diagonal(repeat(Nwt, Hc)), + L_Hp = Diagonal(repeat(Lwt, Hp)), Cwt = DEFAULT_CWT, transcription::TranscriptionMethod = DEFAULT_LINMPC_TRANSCRIPTION, optim::JM = JuMP.Model(DEFAULT_LINMPC_OPTIMIZER, add_bridges=false), @@ -255,7 +262,8 @@ function LinMPC( @warn("prediction horizon Hp ($Hp) ≤ estimated number of delays in model "* "($nk), the closed-loop system may be unstable or zero-gain (unresponsive)") end - return LinMPC{NT}(estim, Hp, Hc, M_Hp, N_Hc, L_Hp, Cwt, transcription, optim) + weights = ControllerWeights{NT}(estim.model, Hp, Hc, M_Hp, N_Hc, L_Hp, Cwt) + return LinMPC{NT}(estim, Hp, Hc, weights, transcription, optim) end """ diff --git a/src/controller/nonlinmpc.jl b/src/controller/nonlinmpc.jl index 0fc281023..b02dc65db 100644 --- a/src/controller/nonlinmpc.jl +++ b/src/controller/nonlinmpc.jl @@ -9,8 +9,9 @@ const DEFAULT_NONLINMPC_JACSPARSE = AutoSparse( ) struct NonLinMPC{ - NT<:Real, + NT<:Real, SE<:StateEstimator, + CW<:ControllerWeights, TM<:TranscriptionMethod, JM<:JuMP.GenericModel, GB<:AbstractADType, @@ -32,7 +33,7 @@ struct NonLinMPC{ Hp::Int Hc::Int nϵ::Int - weights::ControllerWeights{NT} + weights::CW JE::JEfunc p::PT R̂u::Vector{NT} @@ -61,12 +62,14 @@ struct NonLinMPC{ Dop::Vector{NT} buffer::PredictiveControllerBuffer{NT} function NonLinMPC{NT}( - estim::SE, - Hp, Hc, M_Hp, N_Hc, L_Hp, Cwt, Ewt, JE::JEfunc, gc!::GCfunc, nc, p::PT, - transcription::TM, optim::JM, gradient::GB, jacobian::JB + estim::SE, Hp, Hc, weights::CW, + JE::JEfunc, gc!::GCfunc, nc, p::PT, + transcription::TM, optim::JM, + gradient::GB, jacobian::JB ) where { NT<:Real, - SE<:StateEstimator, + SE<:StateEstimator, + CW<:ControllerWeights, TM<:TranscriptionMethod, JM<:JuMP.GenericModel, GB<:AbstractADType, @@ -78,7 +81,6 @@ struct NonLinMPC{ model = estim.model nu, ny, nd, nx̂ = model.nu, model.ny, model.nd, estim.nx̂ ŷ = copy(model.yop) # dummy vals (updated just before optimization) - weights = ControllerWeights{NT}(model, Hp, Hc, M_Hp, N_Hc, L_Hp, Cwt, Ewt) # dummy vals (updated just before optimization): R̂y, R̂u, Tu_lastu0 = zeros(NT, ny*Hp), zeros(NT, nu*Hp), zeros(NT, nu*Hp) PΔu = init_ZtoΔU(estim, transcription, Hp, Hc) @@ -90,8 +92,9 @@ struct NonLinMPC{ # dummy vals (updated just before optimization): F, fx̂, Fŝ = zeros(NT, ny*Hp), zeros(NT, nx̂), zeros(NT, nx̂*Hp) con, nϵ, P̃Δu, P̃u, Ẽ, Ẽŝ = init_defaultcon_mpc( - estim, transcription, - Hp, Hc, Cwt, PΔu, Pu, E, + estim, weights, transcription, + Hp, Hc, + PΔu, Pu, E, ex̂, fx̂, gx̂, jx̂, kx̂, vx̂, bx̂, Eŝ, Fŝ, Gŝ, Jŝ, Kŝ, Vŝ, Bŝ, gc!, nc @@ -107,7 +110,7 @@ struct NonLinMPC{ nZ̃ = get_nZ(estim, transcription, Hp, Hc) + nϵ Z̃ = zeros(NT, nZ̃) buffer = PredictiveControllerBuffer(estim, transcription, Hp, Hc, nϵ) - mpc = new{NT, SE, TM, JM, GB, JB, PT, JEfunc, GCfunc}( + mpc = new{NT, SE, CW, TM, JM, GB, JB, PT, JEfunc, GCfunc}( estim, transcription, optim, con, gradient, jacobian, Z̃, ŷ, @@ -187,9 +190,9 @@ This controller allocates memory at each time step for the optimization. - `Mwt=fill(1.0,model.ny)` : main diagonal of ``\mathbf{M}`` weight matrix (vector). - `Nwt=fill(0.1,model.nu)` : main diagonal of ``\mathbf{N}`` weight matrix (vector). - `Lwt=fill(0.0,model.nu)` : main diagonal of ``\mathbf{L}`` weight matrix (vector). -- `M_Hp=diagm(repeat(Mwt,Hp))` : positive semidefinite symmetric matrix ``\mathbf{M}_{H_p}``. -- `N_Hc=diagm(repeat(Nwt,Hc))` : positive semidefinite symmetric matrix ``\mathbf{N}_{H_c}``. -- `L_Hp=diagm(repeat(Lwt,Hp))` : positive semidefinite symmetric matrix ``\mathbf{L}_{H_p}``. +- `M_Hp=Diagonal(repeat(Mwt,Hp))` : positive semidefinite symmetric matrix ``\mathbf{M}_{H_p}``. +- `N_Hc=Diagonal(repeat(Nwt,Hc))` : positive semidefinite symmetric matrix ``\mathbf{N}_{H_c}``. +- `L_Hp=Diagonal(repeat(Lwt,Hp))` : positive semidefinite symmetric matrix ``\mathbf{L}_{H_p}``. - `Cwt=1e5` : slack variable weight ``C`` (scalar), use `Cwt=Inf` for hard constraints only. - `Ewt=0.0` : economic costs weight ``E`` (scalar). - `JE=(_,_,_,_)->0.0` : economic or custom cost function ``J_E(\mathbf{U_e}, \mathbf{Ŷ_e}, @@ -279,40 +282,9 @@ function NonLinMPC( Mwt = fill(DEFAULT_MWT, model.ny), Nwt = fill(DEFAULT_NWT, model.nu), Lwt = fill(DEFAULT_LWT, model.nu), - M_Hp = diagm(repeat(Mwt, Hp)), - N_Hc = diagm(repeat(Nwt, Hc)), - L_Hp = diagm(repeat(Lwt, Hp)), - Cwt = DEFAULT_CWT, - Ewt = DEFAULT_EWT, - JE ::Function = (_,_,_,_) -> 0.0, - gc!::Function = (_,_,_,_,_,_) -> nothing, - gc ::Function = gc!, - nc::Int = 0, - p = model.p, - transcription::TranscriptionMethod = DEFAULT_NONLINMPC_TRANSCRIPTION, - optim::JuMP.GenericModel = JuMP.Model(DEFAULT_NONLINMPC_OPTIMIZER, add_bridges=false), - gradient::AbstractADType = DEFAULT_NONLINMPC_GRADIENT, - jacobian::AbstractADType = default_jacobian(transcription), - kwargs... -) - estim = UnscentedKalmanFilter(model; kwargs...) - return NonLinMPC( - estim; - Hp, Hc, Mwt, Nwt, Lwt, Cwt, Ewt, JE, gc, nc, p, M_Hp, N_Hc, L_Hp, - transcription, optim, gradient, jacobian - ) -end - -function NonLinMPC( - model::LinModel; - Hp::Int = default_Hp(model), - Hc::Int = DEFAULT_HC, - Mwt = fill(DEFAULT_MWT, model.ny), - Nwt = fill(DEFAULT_NWT, model.nu), - Lwt = fill(DEFAULT_LWT, model.nu), - M_Hp = diagm(repeat(Mwt, Hp)), - N_Hc = diagm(repeat(Nwt, Hc)), - L_Hp = diagm(repeat(Lwt, Hp)), + M_Hp = Diagonal(repeat(Mwt, Hp)), + N_Hc = Diagonal(repeat(Nwt, Hc)), + L_Hp = Diagonal(repeat(Lwt, Hp)), Cwt = DEFAULT_CWT, Ewt = DEFAULT_EWT, JE ::Function = (_,_,_,_) -> 0.0, @@ -326,7 +298,7 @@ function NonLinMPC( jacobian::AbstractADType = default_jacobian(transcription), kwargs... ) - estim = SteadyKalmanFilter(model; kwargs...) + estim = default_estimator(model; kwargs...) return NonLinMPC( estim; Hp, Hc, Mwt, Nwt, Lwt, Cwt, Ewt, JE, gc, nc, p, M_Hp, N_Hc, L_Hp, @@ -334,6 +306,8 @@ function NonLinMPC( ) end +default_estimator(model::SimModel; kwargs...) = UnscentedKalmanFilter(model, kwargs...) +default_estimator(model::LinModel; kwargs...) = SteadyKalmanFilter(model, kwargs...) """ NonLinMPC(estim::StateEstimator; ) @@ -365,9 +339,9 @@ function NonLinMPC( Mwt = fill(DEFAULT_MWT, estim.model.ny), Nwt = fill(DEFAULT_NWT, estim.model.nu), Lwt = fill(DEFAULT_LWT, estim.model.nu), - M_Hp = diagm(repeat(Mwt, Hp)), - N_Hc = diagm(repeat(Nwt, Hc)), - L_Hp = diagm(repeat(Lwt, Hp)), + M_Hp = Diagonal(repeat(Mwt, Hp)), + N_Hc = Diagonal(repeat(Nwt, Hc)), + L_Hp = Diagonal(repeat(Lwt, Hp)), Cwt = DEFAULT_CWT, Ewt = DEFAULT_EWT, JE ::Function = (_,_,_,_) -> 0.0, @@ -390,9 +364,9 @@ function NonLinMPC( end validate_JE(NT, JE) gc! = get_mutating_gc(NT, gc) + weights = ControllerWeights{NT}(estim.model, Hp, Hc, M_Hp, N_Hc, L_Hp, Cwt, Ewt) return NonLinMPC{NT}( - estim, Hp, Hc, M_Hp, N_Hc, L_Hp, Cwt, Ewt, JE, gc!, nc, p, - transcription, optim, gradient, jacobian + estim, Hp, Hc, weights, JE, gc!, nc, p, transcription, optim, gradient, jacobian ) end diff --git a/test/3_test_predictive_control.jl b/test/3_test_predictive_control.jl index 65209becf..dc2149e4b 100644 --- a/test/3_test_predictive_control.jl +++ b/test/3_test_predictive_control.jl @@ -420,7 +420,13 @@ end mpc13 = ExplicitMPC(model2) @test isa(mpc13, ExplicitMPC{Float32}) - @test_throws ArgumentError LinMPC(model, Hp=0) + @test_logs( + (:warn, + "prediction horizon Hp (0) ≤ estimated number of delays in model (0), the "* + "closed-loop system may be unstable or zero-gain (unresponsive)"), + @test_throws ArgumentError ExplicitMPC(model, Hp=0) + ) + end @testitem "ExplicitMPC moves and getinfo" setup=[SetupMPCtests] begin From 996436cd1da0544521a37ba99c2bfda5d42bbad4 Mon Sep 17 00:00:00 2001 From: franckgaga Date: Mon, 12 May 2025 11:46:05 -0400 Subject: [PATCH 6/8] debug: `NonLinMPC` construction --- src/controller/nonlinmpc.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/controller/nonlinmpc.jl b/src/controller/nonlinmpc.jl index b02dc65db..94aba9b4e 100644 --- a/src/controller/nonlinmpc.jl +++ b/src/controller/nonlinmpc.jl @@ -306,8 +306,8 @@ function NonLinMPC( ) end -default_estimator(model::SimModel; kwargs...) = UnscentedKalmanFilter(model, kwargs...) -default_estimator(model::LinModel; kwargs...) = SteadyKalmanFilter(model, kwargs...) +default_estimator(model::SimModel; kwargs...) = UnscentedKalmanFilter(model; kwargs...) +default_estimator(model::LinModel; kwargs...) = SteadyKalmanFilter(model; kwargs...) """ NonLinMPC(estim::StateEstimator; ) From 14b871aebff654d17a5e44e9c4004cad74f96393 Mon Sep 17 00:00:00 2001 From: franckgaga Date: Mon, 12 May 2025 13:26:11 -0400 Subject: [PATCH 7/8] debug: Julia 1.10 now works --- src/controller/construct.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/controller/construct.jl b/src/controller/construct.jl index 2e7dc1d01..2e6d11eea 100644 --- a/src/controller/construct.jl +++ b/src/controller/construct.jl @@ -64,7 +64,7 @@ struct ControllerWeights{ if !isinf_C # ΔŨ = [ΔU; ϵ] (ϵ is the slack variable) Ñ_Hc = [N_Hc zeros(NT, nΔU, 1); zeros(NT, 1, nΔU) C] - Ñ_Hc = NW(Ñ_Hc) # preserve special type e.g. Diagonal + isdiag(N_Hc) && (Ñ_Hc = Diagonal(Ñ_Hc)) # NW(Ñ_Hc) does not work on Julia 1.10 else # ΔŨ = ΔU (only hard constraints) Ñ_Hc = N_Hc From 52fe019606c134460052951f3059120eba86c207 Mon Sep 17 00:00:00 2001 From: franckgaga Date: Mon, 12 May 2025 14:36:03 -0400 Subject: [PATCH 8/8] test: new test to validate the type of weight matrices in MPC objects --- Project.toml | 2 +- test/3_test_predictive_control.jl | 57 +++++++++++++++++++++---------- 2 files changed, 40 insertions(+), 19 deletions(-) diff --git a/Project.toml b/Project.toml index 3c72934f2..bbe3e2519 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "ModelPredictiveControl" uuid = "61f9bdb8-6ae4-484a-811f-bbf86720c31c" authors = ["Francis Gagnon"] -version = "1.6.0" +version = "1.6.1" [deps] ControlSystemsBase = "aaaaaaaa-a6ca-5380-bf3e-84a91bcd477e" diff --git a/test/3_test_predictive_control.jl b/test/3_test_predictive_control.jl index dc2149e4b..fc06dd513 100644 --- a/test/3_test_predictive_control.jl +++ b/test/3_test_predictive_control.jl @@ -11,10 +11,13 @@ @test mpc3.weights.Ñ_Hc[end] ≈ 1e6 mpc4 = LinMPC(model, Mwt=[1,2], Hp=15) @test mpc4.weights.M_Hp ≈ Diagonal(diagm(repeat(Float64[1, 2], 15))) + @test mpc4.weights.M_Hp isa Hermitian{Float64, Diagonal{Float64, Vector{Float64}}} mpc5 = LinMPC(model, Nwt=[3,4], Cwt=1e3, Hc=5) @test mpc5.weights.Ñ_Hc ≈ Diagonal(diagm([repeat(Float64[3, 4], 5); [1e3]])) + @test mpc5.weights.Ñ_Hc isa Hermitian{Float64, Diagonal{Float64, Vector{Float64}}} mpc6 = LinMPC(model, Lwt=[0,1], Hp=15) @test mpc6.weights.L_Hp ≈ Diagonal(diagm(repeat(Float64[0, 1], 15))) + @test mpc6.weights.L_Hp isa Hermitian{Float64, Diagonal{Float64, Vector{Float64}}} mpc7 = @test_logs( (:warn, "Solving time limit is not supported by the optimizer."), LinMPC(model, optim=JuMP.Model(DAQP.Optimizer)) @@ -26,15 +29,19 @@ mpc9 = LinMPC(model, nint_u=[1, 1], nint_ym=[0, 0]) @test mpc9.estim.nint_u == [1, 1] @test mpc9.estim.nint_ym == [0, 0] - mpc10 = LinMPC(model, M_Hp=Diagonal(collect(1.01:0.01:1.2))) - @test mpc10.weights.M_Hp ≈ Diagonal(collect(1.01:0.01:1.2)) - mpc11 = LinMPC(model, N_Hc=Diagonal([0.1,0.11,0.12,0.13]), Cwt=Inf) - @test mpc11.weights.Ñ_Hc ≈ Diagonal([0.1,0.11,0.12,0.13]) - mcp12 = LinMPC(model, L_Hp=Diagonal(collect(0.001:0.001:0.02))) - @test mcp12.weights.L_Hp ≈ Diagonal(collect(0.001:0.001:0.02)) + mpc10 = LinMPC(model, M_Hp=diagm(collect(1.01:0.01:1.2))) + @test mpc10.weights.M_Hp ≈ diagm(collect(1.01:0.01:1.2)) + @test mpc10.weights.M_Hp isa Hermitian{Float64, Matrix{Float64}} + mpc11 = LinMPC(model, N_Hc=diagm([0.1,0.11,0.12,0.13]), Cwt=Inf) + @test mpc11.weights.Ñ_Hc ≈ diagm([0.1,0.11,0.12,0.13]) + @test mpc11.weights.Ñ_Hc isa Hermitian{Float64, Matrix{Float64}} + mcp12 = LinMPC(model, L_Hp=diagm(collect(0.001:0.001:0.02))) + @test mcp12.weights.L_Hp ≈ diagm(collect(0.001:0.001:0.02)) + @test mcp12.weights.L_Hp isa Hermitian{Float64, Matrix{Float64}} model2 = LinModel{Float32}(0.5*ones(1,1), ones(1,1), ones(1,1), zeros(1,0), zeros(1,0), 1.0) mpc13 = LinMPC(model2) @test isa(mpc13, LinMPC{Float32}) + @test isa(mpc13.estim, SteadyKalmanFilter{Float32}) @test isa(mpc13.optim, JuMP.GenericModel{Float64}) # OSQP does not support Float32 mpc14 = LinMPC(model2, transcription=MultipleShooting()) @test mpc14.transcription == MultipleShooting() @@ -400,25 +407,32 @@ end @test size(mpc1.Ẽ,1) == 15*mpc1.estim.model.ny mpc4 = ExplicitMPC(model, Mwt=[1,2], Hp=15) @test mpc4.weights.M_Hp ≈ Diagonal(diagm(repeat(Float64[1, 2], 15))) + @test mpc4.weights.M_Hp isa Hermitian{Float64, Diagonal{Float64, Vector{Float64}}} mpc5 = ExplicitMPC(model, Nwt=[3,4], Hc=5) @test mpc5.weights.Ñ_Hc ≈ Diagonal(diagm(repeat(Float64[3, 4], 5))) + @test mpc5.weights.Ñ_Hc isa Hermitian{Float64, Diagonal{Float64, Vector{Float64}}} mpc6 = ExplicitMPC(model, Lwt=[0,1], Hp=15) @test mpc6.weights.L_Hp ≈ Diagonal(diagm(repeat(Float64[0, 1], 15))) + @test mpc6.weights.L_Hp isa Hermitian{Float64, Diagonal{Float64, Vector{Float64}}} kf = KalmanFilter(model) mpc8 = ExplicitMPC(kf) @test isa(mpc8.estim, KalmanFilter) mpc9 = ExplicitMPC(model, nint_u=[1, 1], nint_ym=[0, 0]) @test mpc9.estim.nint_u == [1, 1] @test mpc9.estim.nint_ym == [0, 0] - mpc10 = ExplicitMPC(model, M_Hp=Diagonal(collect(1.01:0.01:1.2))) - @test mpc10.weights.M_Hp ≈ Diagonal(collect(1.01:0.01:1.2)) - mpc11 = ExplicitMPC(model, N_Hc=Diagonal([0.1,0.11,0.12,0.13])) - @test mpc11.weights.Ñ_Hc ≈ Diagonal([0.1,0.11,0.12,0.13]) - mcp12 = ExplicitMPC(model, L_Hp=Diagonal(collect(0.001:0.001:0.02))) - @test mcp12.weights.L_Hp ≈ Diagonal(collect(0.001:0.001:0.02)) + mpc10 = ExplicitMPC(model, M_Hp=diagm(collect(1.01:0.01:1.2))) + @test mpc10.weights.M_Hp ≈ diagm(collect(1.01:0.01:1.2)) + @test mpc10.weights.M_Hp isa Hermitian{Float64, Matrix{Float64}} + mpc11 = ExplicitMPC(model, N_Hc=diagm([0.1,0.11,0.12,0.13])) + @test mpc11.weights.Ñ_Hc ≈ diagm([0.1,0.11,0.12,0.13]) + @test mpc11.weights.Ñ_Hc isa Hermitian{Float64, Matrix{Float64}} + mcp12 = ExplicitMPC(model, L_Hp=diagm(collect(0.001:0.001:0.02))) + @test mcp12.weights.L_Hp ≈ diagm(collect(0.001:0.001:0.02)) + @test mcp12.weights.L_Hp isa Hermitian{Float64, Matrix{Float64}} model2 = LinModel{Float32}(0.5*ones(1,1), ones(1,1), ones(1,1), zeros(1,0), zeros(1,0), 1.0) mpc13 = ExplicitMPC(model2) @test isa(mpc13, ExplicitMPC{Float32}) + @test isa(mpc13.estim, SteadyKalmanFilter{Float32}) @test_logs( (:warn, @@ -582,10 +596,13 @@ end @test nmpc3.weights.Ñ_Hc[end] == 1e6 nmpc4 = NonLinMPC(nonlinmodel, Hp=15, Mwt=[1,2]) @test nmpc4.weights.M_Hp ≈ Diagonal(diagm(repeat(Float64[1, 2], 15))) + @test nmpc4.weights.M_Hp isa Hermitian{Float64, Diagonal{Float64, Vector{Float64}}} nmpc5 = NonLinMPC(nonlinmodel, Hp=15 ,Nwt=[3,4], Cwt=1e3, Hc=5) @test nmpc5.weights.Ñ_Hc ≈ Diagonal(diagm([repeat(Float64[3, 4], 5); [1e3]])) + @test nmpc5.weights.Ñ_Hc isa Hermitian{Float64, Diagonal{Float64, Vector{Float64}}} nmpc6 = NonLinMPC(nonlinmodel, Hp=15, Lwt=[0,1]) @test nmpc6.weights.L_Hp ≈ Diagonal(diagm(repeat(Float64[0, 1], 15))) + @test nmpc6.weights.L_Hp isa Hermitian{Float64, Diagonal{Float64, Vector{Float64}}} nmpc7 = NonLinMPC(nonlinmodel, Hp=15, Ewt=1e-3, JE=(Ue,Ŷe,D̂e,p) -> p*dot(Ue,Ŷe)+sum(D̂e), p=10) @test nmpc7.weights.E == 1e-3 @test nmpc7.JE([1,2],[3,4],[4,6],10) == 10*dot([1,2],[3,4])+sum([4,6]) @@ -602,12 +619,15 @@ end nmpc11 = NonLinMPC(nonlinmodel, Hp=15, nint_u=[1, 1], nint_ym=[0, 0]) @test nmpc11.estim.nint_u == [1, 1] @test nmpc11.estim.nint_ym == [0, 0] - nmpc12 = NonLinMPC(nonlinmodel, Hp=10, M_Hp=Diagonal(collect(1.01:0.01:1.2))) - @test nmpc12.weights.M_Hp ≈ Diagonal(collect(1.01:0.01:1.2)) - nmpc13 = NonLinMPC(nonlinmodel, Hp=10, N_Hc=Diagonal([0.1,0.11,0.12,0.13]), Cwt=Inf) - @test nmpc13.weights.Ñ_Hc ≈ Diagonal([0.1,0.11,0.12,0.13]) - nmcp14 = NonLinMPC(nonlinmodel, Hp=10, L_Hp=Diagonal(collect(0.001:0.001:0.02))) - @test nmcp14.weights.L_Hp ≈ Diagonal(collect(0.001:0.001:0.02)) + nmpc12 = NonLinMPC(nonlinmodel, Hp=10, M_Hp=diagm(collect(1.01:0.01:1.2))) + @test nmpc12.weights.M_Hp ≈ diagm(collect(1.01:0.01:1.2)) + @test nmpc12.weights.M_Hp isa Hermitian{Float64, Matrix{Float64}} + nmpc13 = NonLinMPC(nonlinmodel, Hp=10, N_Hc=diagm([0.1,0.11,0.12,0.13]), Cwt=Inf) + @test nmpc13.weights.Ñ_Hc ≈ diagm([0.1,0.11,0.12,0.13]) + @test nmpc13.weights.Ñ_Hc isa Hermitian{Float64, Matrix{Float64}} + nmcp14 = NonLinMPC(nonlinmodel, Hp=10, L_Hp=diagm(collect(0.001:0.001:0.02))) + @test nmcp14.weights.L_Hp ≈ diagm(collect(0.001:0.001:0.02)) + @test nmcp14.weights.L_Hp isa Hermitian{Float64, Matrix{Float64}} nmpc15 = NonLinMPC(nonlinmodel, Hp=10, gc=(Ue,Ŷe,D̂e,p,ϵ)-> [p*dot(Ue,Ŷe)+sum(D̂e)+ϵ], nc=1, p=10) LHS = zeros(1) nmpc15.con.gc!(LHS,[1,2],[3,4],[4,6],10,0.1) @@ -632,6 +652,7 @@ end nonlinmodel2 = NonLinModel{Float32}(f, h, Ts, 2, 4, 2, 1, solver=nothing) nmpc15 = NonLinMPC(nonlinmodel2, Hp=15) @test isa(nmpc15, NonLinMPC{Float32}) + @test isa(nmpc15.estim, UnscentedKalmanFilter{Float32}) @test isa(nmpc15.optim, JuMP.GenericModel{Float64}) # Ipopt does not support Float32 @test_throws ArgumentError NonLinMPC(nonlinmodel, Hp=15, Ewt=[1, 1])