@@ -531,8 +531,8 @@ This method is really indicated and I'm not proud of it. That's because of 3 ele
531
531
and as efficient as possible.
532
532
- The `JuMP` NLP syntax forces splatting for the decision variable, which implies use
533
533
of `Vararg{T,N}` (see the [performance tip](https://docs.julialang.org/en/v1/manual/performance-tips/#Be-aware-of-when-Julia-avoids-specializing))
534
- and memoization to avoid redundant computations. This is already complex,
535
- but it's even worse knowing that AD tools for gradients do not support splatting.
534
+ and memoization to avoid redundant computations. This is already complex, but it's even
535
+ worse knowing that most automatic differentiation tools do not support splatting.
536
536
- The signature of gradient and hessian functions is not the same for univariate (`nZ̃ == 1`)
537
537
and multivariate (`nZ̃ > 1`) operators in `JuMP`. Both must be defined.
538
538
@@ -546,6 +546,7 @@ function get_optim_functions(mpc::NonLinMPC, ::JuMP.GenericModel{JNT}) where JNT
546
546
nΔŨ, nUe, nŶe = nu* Hc + nϵ, nU + nu, nŶ + ny
547
547
Ncache = nZ̃ + 3
548
548
myNaN = convert (JNT, NaN ) # fill Z̃ with NaNs to force update_simulations! at 1st call:
549
+ # ---------------------- differentiation cache ---------------------------------------
549
550
Z̃_cache:: DiffCache{Vector{JNT}, Vector{JNT}} = DiffCache (fill (myNaN, nZ̃), Ncache)
550
551
ΔŨ_cache:: DiffCache{Vector{JNT}, Vector{JNT}} = DiffCache (zeros (JNT, nΔŨ), Ncache)
551
552
x̂0end_cache:: DiffCache{Vector{JNT}, Vector{JNT}} = DiffCache (zeros (JNT, nx̂), Ncache)
@@ -586,14 +587,14 @@ function get_optim_functions(mpc::NonLinMPC, ::JuMP.GenericModel{JNT}) where JNT
586
587
end
587
588
return nothing
588
589
end
589
- # --------------------- cache for the AD functions ------- ----------------------------
590
+ # --------------------- normal cache for the AD functions ----------------------------
590
591
Z̃arg_vec = Vector {JNT} (undef, nZ̃)
591
- ∇J = Vector {JNT} (undef, nZ̃) # gradient of J
592
+ ∇J = Vector {JNT} (undef, nZ̃) # gradient of objective J
592
593
g_vec = Vector {JNT} (undef, ng)
593
- ∇g = Matrix {JNT} (undef, ng, nZ̃) # Jacobian of g
594
+ ∇g = Matrix {JNT} (undef, ng, nZ̃) # Jacobian of inequality constraints g
594
595
geq_vec = Vector {JNT} (undef, neq)
595
- ∇geq = Matrix {JNT} (undef, neq, nZ̃) # Jacobian of geq
596
- # --------------------- objective function - -------------------------------------------
596
+ ∇geq = Matrix {JNT} (undef, neq, nZ̃) # Jacobian of equality constraints geq
597
+ # --------------------- objective functions -------------------------------------------
597
598
function Jfunc (Z̃arg:: Vararg{T, N} ) where {N, T<: Real }
598
599
update_simulations! (Z̃arg, get_tmp (Z̃_cache, T))
599
600
ΔŨ = get_tmp (ΔŨ_cache, T)
@@ -674,7 +675,7 @@ function get_optim_functions(mpc::NonLinMPC, ::JuMP.GenericModel{JNT}) where JNT
674
675
∇geqfuncs! = Vector {Function} (undef, neq)
675
676
for i in eachindex (∇geqfuncs!)
676
677
# only multivariate syntax, univariate is impossible since nonlinear equality
677
- # constraints imply MultipleShooting thus input AND state in Z̃:
678
+ # constraints imply MultipleShooting, thus input increment ΔU and state X̂0 in Z̃:
678
679
∇geqfuncs![i] =
679
680
function (∇geq_i, Z̃arg:: Vararg{T, N} ) where {N, T<: Real }
680
681
Z̃arg_vec .= Z̃arg
0 commit comments