diff --git a/docs/Project.toml b/docs/Project.toml index aefa3caf2..7db9bd79d 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -58,8 +58,8 @@ IterTools = "1" Juniper = "0.9" Lux = "1" MLUtils = "0.4.4" -Manifolds = "0.9" -Manopt = "0.4" +Manifolds = "0.10" +Manopt = "0.5" ModelingToolkit = "10" NLPModels = "0.21" NLPModelsTest = "0.10" @@ -70,9 +70,9 @@ OptimizationBase = "2" OptimizationCMAEvolutionStrategy = "0.3" OptimizationEvolutionary = "0.4" OptimizationGCMAES = "0.3" -OptimizationIpopt = "0.1" +OptimizationIpopt = "0.2" OptimizationMOI = "0.5" -OptimizationManopt = "0.0.4" +OptimizationManopt = "0.1.0" OptimizationMetaheuristics = "0.3" OptimizationNLPModels = "0.0.2" OptimizationNLopt = "0.3" diff --git a/docs/src/optimization_packages/manopt.md b/docs/src/optimization_packages/manopt.md index 64cc69880..422337e15 100644 --- a/docs/src/optimization_packages/manopt.md +++ b/docs/src/optimization_packages/manopt.md @@ -1,7 +1,9 @@ # Manopt.jl -[Manopt.jl](https://github.com/JuliaManifolds/Manopt.jl) is a package with implementations of a variety of optimization solvers on manifolds supported by -[Manifolds](https://github.com/JuliaManifolds/Manifolds.jl). +[Manopt.jl](https://github.com/JuliaManifolds/Manopt.jl) is a package providing solvers +for optimization problems defined on Riemannian manifolds. +The implementation is based on [ManifoldsBase.jl](https://github.com/JuliaManifolds/ManifoldsBase.jl) interface and can hence be used for all maniolds defined in +[Manifolds](https://github.com/JuliaManifolds/Manifolds.jl) or any other manifold implemented using the interface. ## Installation: OptimizationManopt.jl @@ -29,7 +31,7 @@ The common kwargs `maxiters`, `maxtime` and `abstol` are supported by all the op function or `OptimizationProblem`. !!! note - + The `OptimizationProblem` has to be passed the manifold as the `manifold` keyword argument. ## Examples diff --git a/lib/OptimizationManopt/Project.toml b/lib/OptimizationManopt/Project.toml index c19e7123c..12e765d1c 100644 --- a/lib/OptimizationManopt/Project.toml +++ b/lib/OptimizationManopt/Project.toml @@ -1,7 +1,7 @@ name = "OptimizationManopt" uuid = "e57b7fff-7ee7-4550-b4f0-90e9476e9fb6" -authors = ["Mateusz Baran "] -version = "0.0.5" +authors = ["Mateusz Baran ", "Ronny Bergmann "] +version = "0.1.0" [deps] LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" @@ -14,10 +14,10 @@ Reexport = "189a3867-3050-52da-a836-e630ba90ab69" [compat] LinearAlgebra = "1.10" -ManifoldDiff = "0.3.10" -Manifolds = "0.9.18" -ManifoldsBase = "0.15.10" -Manopt = "0.4.63" +ManifoldDiff = "0.4" +Manifolds = "0.10" +ManifoldsBase = "1" +Manopt = "0.5" Optimization = "4.4" Reexport = "1.2" julia = "1.10" diff --git a/lib/OptimizationManopt/src/OptimizationManopt.jl b/lib/OptimizationManopt/src/OptimizationManopt.jl index cd93d6ce3..9e9a092de 100644 --- a/lib/OptimizationManopt/src/OptimizationManopt.jl +++ b/lib/OptimizationManopt/src/OptimizationManopt.jl @@ -70,20 +70,15 @@ function call_manopt_optimizer( loss, gradF, x0; - stopping_criterion::Union{Manopt.StoppingCriterion, Manopt.StoppingCriterionSet}, - evaluation::AbstractEvaluationType = Manopt.AllocatingEvaluation(), - stepsize::Stepsize = ArmijoLinesearch(M), + hessF=nothing, # ignore that keyword for this solver kwargs...) - opts = gradient_descent(M, + opts = Manopt.gradient_descent(M, loss, gradF, x0; - return_state = true, - evaluation, - stepsize, - stopping_criterion, - kwargs...) - # we unwrap DebugOptions here + return_state = true, # return the (full, decorated) solver state + kwargs... + ) minimizer = Manopt.get_solver_result(opts) return (; minimizer = minimizer, minimum = loss(M, minimizer), options = opts) end @@ -95,13 +90,9 @@ function call_manopt_optimizer(M::ManifoldsBase.AbstractManifold, opt::NelderMea loss, gradF, x0; - stopping_criterion::Union{Manopt.StoppingCriterion, Manopt.StoppingCriterionSet}, - kwargs...) - opts = NelderMead(M, - loss; - return_state = true, - stopping_criterion, + hessF=nothing, # ignore that keyword for this solver kwargs...) + opts = NelderMead(M, loss; return_state = true, kwargs...) minimizer = Manopt.get_solver_result(opts) return (; minimizer = minimizer, minimum = loss(M, minimizer), options = opts) end @@ -114,20 +105,15 @@ function call_manopt_optimizer(M::ManifoldsBase.AbstractManifold, loss, gradF, x0; - stopping_criterion::Union{Manopt.StoppingCriterion, Manopt.StoppingCriterionSet}, - evaluation::AbstractEvaluationType = InplaceEvaluation(), - stepsize::Stepsize = ArmijoLinesearch(M), + hessF=nothing, # ignore that keyword for this solver kwargs...) - opts = conjugate_gradient_descent(M, + opts = Manopt.conjugate_gradient_descent(M, loss, gradF, x0; return_state = true, - evaluation, - stepsize, - stopping_criterion, - kwargs...) - # we unwrap DebugOptions here + kwargs... + ) minimizer = Manopt.get_solver_result(opts) return (; minimizer = minimizer, minimum = loss(M, minimizer), options = opts) end @@ -140,25 +126,11 @@ function call_manopt_optimizer(M::ManifoldsBase.AbstractManifold, loss, gradF, x0; - stopping_criterion::Union{Manopt.StoppingCriterion, Manopt.StoppingCriterionSet}, - evaluation::AbstractEvaluationType = InplaceEvaluation(), + hessF=nothing, # ignore that keyword for this solver population_size::Int = 100, - retraction_method::AbstractRetractionMethod = default_retraction_method(M), - inverse_retraction_method::AbstractInverseRetractionMethod = default_inverse_retraction_method(M), - vector_transport_method::AbstractVectorTransportMethod = default_vector_transport_method(M), kwargs...) - initial_population = vcat([x0], [rand(M) for _ in 1:(population_size - 1)]) - opts = particle_swarm(M, - loss; - x0 = initial_population, - n = population_size, - return_state = true, - retraction_method, - inverse_retraction_method, - vector_transport_method, - stopping_criterion, - kwargs...) - # we unwrap DebugOptions here + swarm = [x0, [rand(M) for _ in 1:(population_size - 1)]...] + opts = particle_swarm(M, loss, swarm; return_state = true, kwargs...) minimizer = Manopt.get_solver_result(opts) return (; minimizer = minimizer, minimum = loss(M, minimizer), options = opts) end @@ -172,28 +144,10 @@ function call_manopt_optimizer(M::Manopt.AbstractManifold, loss, gradF, x0; - stopping_criterion::Union{Manopt.StoppingCriterion, Manopt.StoppingCriterionSet}, - evaluation::AbstractEvaluationType = InplaceEvaluation(), - retraction_method::AbstractRetractionMethod = default_retraction_method(M), - vector_transport_method::AbstractVectorTransportMethod = default_vector_transport_method(M), - stepsize = WolfePowellLinesearch(M; - retraction_method = retraction_method, - vector_transport_method = vector_transport_method, - linesearch_stopsize = 1e-12), + hessF=nothing, # ignore that keyword for this solver kwargs... ) - opts = quasi_Newton(M, - loss, - gradF, - x0; - return_state = true, - evaluation, - retraction_method, - vector_transport_method, - stepsize, - stopping_criterion, - kwargs...) - # we unwrap DebugOptions here + opts = quasi_Newton(M, loss, gradF, x0; return_state = true, kwargs...) minimizer = Manopt.get_solver_result(opts) return (; minimizer = minimizer, minimum = loss(M, minimizer), options = opts) end @@ -205,19 +159,9 @@ function call_manopt_optimizer(M::ManifoldsBase.AbstractManifold, loss, gradF, x0; - stopping_criterion::Union{Manopt.StoppingCriterion, Manopt.StoppingCriterionSet}, - evaluation::AbstractEvaluationType = InplaceEvaluation(), - retraction_method::AbstractRetractionMethod = default_retraction_method(M), - vector_transport_method::AbstractVectorTransportMethod = default_vector_transport_method(M), - basis = Manopt.DefaultOrthonormalBasis(), - kwargs...) - opt = cma_es(M, - loss, - x0; - return_state = true, - stopping_criterion, + hessF=nothing, # ignore that keyword for this solver kwargs...) - # we unwrap DebugOptions here + opt = cma_es(M, loss, x0; return_state = true, kwargs...) minimizer = Manopt.get_solver_result(opt) return (; minimizer = minimizer, minimum = loss(M, minimizer), options = opt) end @@ -229,22 +173,9 @@ function call_manopt_optimizer(M::ManifoldsBase.AbstractManifold, loss, gradF, x0; - stopping_criterion::Union{Manopt.StoppingCriterion, Manopt.StoppingCriterionSet}, - evaluation::AbstractEvaluationType = InplaceEvaluation(), - retraction_method::AbstractRetractionMethod = default_retraction_method(M), - vector_transport_method::AbstractVectorTransportMethod = default_vector_transport_method(M), + hessF=nothing, # ignore that keyword for this solver kwargs...) - opt = convex_bundle_method!(M, - loss, - gradF, - x0; - return_state = true, - evaluation, - retraction_method, - vector_transport_method, - stopping_criterion, - kwargs...) - # we unwrap DebugOptions here + opt = convex_bundle_method(M, loss, gradF, x0; return_state = true, kwargs...) minimizer = Manopt.get_solver_result(opt) return (; minimizer = minimizer, minimum = loss(M, minimizer), options = opt) end @@ -257,21 +188,13 @@ function call_manopt_optimizer(M::ManifoldsBase.AbstractManifold, gradF, x0; hessF = nothing, - stopping_criterion::Union{Manopt.StoppingCriterion, Manopt.StoppingCriterionSet}, - evaluation::AbstractEvaluationType = InplaceEvaluation(), - retraction_method::AbstractRetractionMethod = default_retraction_method(M), - kwargs...) - opt = adaptive_regularization_with_cubics(M, - loss, - gradF, - hessF, - x0; - return_state = true, - evaluation, - retraction_method, - stopping_criterion, kwargs...) - # we unwrap DebugOptions here + + opt = if isnothing(hessF) + adaptive_regularization_with_cubics(M, loss, gradF, x0; return_state = true, kwargs...) + else + adaptive_regularization_with_cubics(M, loss, gradF, hessF, x0; return_state = true, kwargs...) + end minimizer = Manopt.get_solver_result(opt) return (; minimizer = minimizer, minimum = loss(M, minimizer), options = opt) end @@ -284,21 +207,12 @@ function call_manopt_optimizer(M::ManifoldsBase.AbstractManifold, gradF, x0; hessF = nothing, - stopping_criterion::Union{Manopt.StoppingCriterion, Manopt.StoppingCriterionSet}, - evaluation::AbstractEvaluationType = InplaceEvaluation(), - retraction_method::AbstractRetractionMethod = default_retraction_method(M), kwargs...) - opt = trust_regions(M, - loss, - gradF, - hessF, - x0; - return_state = true, - evaluation, - retraction = retraction_method, - stopping_criterion, - kwargs...) - # we unwrap DebugOptions here + opt = if isnothing(hessF) + trust_regions(M, loss, gradF, x0; return_state = true, kwargs...) + else + trust_regions(M, loss, gradF, hessF, x0; return_state = true, kwargs...) + end minimizer = Manopt.get_solver_result(opt) return (; minimizer = minimizer, minimum = loss(M, minimizer), options = opt) end @@ -310,22 +224,9 @@ function call_manopt_optimizer(M::ManifoldsBase.AbstractManifold, loss, gradF, x0; - stopping_criterion::Union{Manopt.StoppingCriterion, Manopt.StoppingCriterionSet}, - evaluation::AbstractEvaluationType = InplaceEvaluation(), - retraction_method::AbstractRetractionMethod = default_retraction_method(M), - stepsize::Stepsize = DecreasingStepsize(; length = 2.0, shift = 2), + hessF=nothing, # ignore that keyword for this solver kwargs...) - opt = Frank_Wolfe_method(M, - loss, - gradF, - x0; - return_state = true, - evaluation, - retraction_method, - stopping_criterion, - stepsize, - kwargs...) - # we unwrap DebugOptions here + opt = Frank_Wolfe_method(M, loss, gradF, x0; return_state = true, kwargs...) minimizer = Manopt.get_solver_result(opt) return (; minimizer = minimizer, minimum = loss(M, minimizer), options = opt) end @@ -337,13 +238,17 @@ function SciMLBase.requiresgradient(opt::Union{ AdaptiveRegularizationCubicOptimizer, TrustRegionsOptimizer}) true end +# TODO: WHY? they both still accept not passing it function SciMLBase.requireshessian(opt::Union{ AdaptiveRegularizationCubicOptimizer, TrustRegionsOptimizer}) true end function build_loss(f::OptimizationFunction, prob, cb) - function (::AbstractManifold, θ) + # TODO: I do not understand this. Why is the manifold not used? + # Either this is an Euclidean cost, then we should probably still call `embed`, + # or it is not, then we need M. + return function (::AbstractManifold, θ) x = f.f(θ, prob.p) cb(x, θ) __x = first(x) @@ -351,6 +256,9 @@ function build_loss(f::OptimizationFunction, prob, cb) end end +# cf. https://github.com/SciML/SciMLBase.jl/blob/master/src/problems/optimization_problems.jl +# {iip} is the parameter here – nowhere explained but very much probably “is in place” +# so this refers to whether the gradient/hessian is computed in place or not function build_gradF(f::OptimizationFunction{true}) function g(M::AbstractManifold, G, θ) f.grad(G, θ) @@ -361,23 +269,27 @@ function build_gradF(f::OptimizationFunction{true}) f.grad(G, θ) return riemannian_gradient(M, θ, G) end + return g end function build_hessF(f::OptimizationFunction{true}) function h(M::AbstractManifold, H1, θ, X) H = zeros(eltype(θ), length(θ)) + # an Optimization function has both hess (the matrix) and hv (Hessian with direction) + # we need hv here f.hv(H, θ, X) G = zeros(eltype(θ), length(θ)) f.grad(G, θ) riemannian_Hessian!(M, H1, θ, G, H, X) end function h(M::AbstractManifold, θ, X) - H = zeros(eltype(θ), length(θ), length(θ)) - f.hess(H, θ) + H = zeros(eltype(θ), length(θ)) + f.hv(H, θ, X) G = zeros(eltype(θ), length(θ)) f.grad(G, θ) return riemannian_Hessian(M, θ, G, H, X) end + return h end function SciMLBase.__solve(cache::OptimizationCache{ @@ -400,8 +312,7 @@ function SciMLBase.__solve(cache::OptimizationCache{ LC, UC, S, - O <: - AbstractManoptOptimizer, + O <: AbstractManoptOptimizer, D, P, C @@ -423,6 +334,7 @@ function SciMLBase.__solve(cache::OptimizationCache{ u = θ, p = cache.p, objective = x[1]) + #TODO: What is this callback for? cb_call = cache.callback(opt_state, x...) if !(cb_call isa Bool) error("The callback should return a boolean `halt` for whether to stop the optimization process.") @@ -457,7 +369,7 @@ function SciMLBase.__solve(cache::OptimizationCache{ solver_kwarg..., stopping_criterion = stopping_criterion, hessF) asc = get_stopping_criterion(opt_res.options) - opt_ret = Manopt.indicates_convergence(asc) ? ReturnCode.Success : ReturnCode.Failure + opt_ret = Manopt.has_converged(asc) ? ReturnCode.Success : ReturnCode.Failure return SciMLBase.build_solution(cache, cache.opt, diff --git a/lib/OptimizationManopt/test/runtests.jl b/lib/OptimizationManopt/test/runtests.jl index 2c84d8623..09074e602 100644 --- a/lib/OptimizationManopt/test/runtests.jl +++ b/lib/OptimizationManopt/test/runtests.jl @@ -15,233 +15,176 @@ function rosenbrock_grad!(storage, x, p) end R2 = Euclidean(2) +@testset "OptimizationManopt.jl" begin + @testset "Error on no or mismatching manifolds" begin + x0 = zeros(2) + p = [1.0, 100.0] + + stepsize = Manopt.ArmijoLinesearch(R2) + opt = OptimizationManopt.GradientDescentOptimizer() + + optprob_forwarddiff = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff()) + prob_forwarddiff = OptimizationProblem(optprob_forwarddiff, x0, p) + @test_throws ArgumentError("Manifold not specified in the problem for e.g. `OptimizationProblem(f, x, p; manifold = SymmetricPositiveDefinite(5))`.") Optimization.solve( + prob_forwarddiff, opt) + end -@testset "Error on no or mismatching manifolds" begin - x0 = zeros(2) - p = [1.0, 100.0] - - stepsize = Manopt.ArmijoLinesearch(R2) - opt = OptimizationManopt.GradientDescentOptimizer() - - optprob_forwarddiff = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff()) - prob_forwarddiff = OptimizationProblem(optprob_forwarddiff, x0, p) - @test_throws ArgumentError("Manifold not specified in the problem for e.g. `OptimizationProblem(f, x, p; manifold = SymmetricPositiveDefinite(5))`.") Optimization.solve( - prob_forwarddiff, opt) -end - -@testset "Gradient descent" begin - x0 = zeros(2) - p = [1.0, 100.0] - - stepsize = Manopt.ArmijoLinesearch(R2) - opt = OptimizationManopt.GradientDescentOptimizer() - - optprob_forwarddiff = OptimizationFunction(rosenbrock, Optimization.AutoEnzyme()) - prob_forwarddiff = OptimizationProblem( - optprob_forwarddiff, x0, p; manifold = R2, stepsize = stepsize) - sol = Optimization.solve(prob_forwarddiff, opt) - @test sol.minimum < 0.2 - - optprob_grad = OptimizationFunction(rosenbrock; grad = rosenbrock_grad!) - prob_grad = OptimizationProblem(optprob_grad, x0, p; manifold = R2, stepsize = stepsize) - sol = Optimization.solve(prob_grad, opt) - @test sol.minimum < 0.2 -end - -@testset "Nelder-Mead" begin - x0 = zeros(2) - p = [1.0, 100.0] - - opt = OptimizationManopt.NelderMeadOptimizer() - - optprob = OptimizationFunction(rosenbrock) - prob = OptimizationProblem(optprob, x0, p; manifold = R2) + @testset "Gradient descent" begin + x0 = zeros(2) + p = [1.0, 100.0] - sol = Optimization.solve(prob, opt) - @test sol.minimum < 0.7 -end + stepsize = Manopt.ArmijoLinesearch(R2) + opt = OptimizationManopt.GradientDescentOptimizer() -@testset "Conjugate gradient descent" begin - x0 = zeros(2) - p = [1.0, 100.0] + optprob_forwarddiff = OptimizationFunction(rosenbrock, Optimization.AutoEnzyme()) + prob_forwarddiff = OptimizationProblem( + optprob_forwarddiff, x0, p; manifold = R2, stepsize = stepsize) + sol = Optimization.solve(prob_forwarddiff, opt) + @test sol.minimum < 0.2 - stepsize = Manopt.ArmijoLinesearch(R2) - opt = OptimizationManopt.ConjugateGradientDescentOptimizer() + optprob_grad = OptimizationFunction(rosenbrock; grad = rosenbrock_grad!) + prob_grad = OptimizationProblem(optprob_grad, x0, p; manifold = R2, stepsize = stepsize) + sol = Optimization.solve(prob_grad, opt) + @test sol.minimum < 0.2 + end - optprob = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff()) - prob = OptimizationProblem(optprob, x0, p; manifold = R2) + @testset "Nelder-Mead" begin + x0 = zeros(2) + p = [1.0, 100.0] - sol = Optimization.solve(prob, opt, stepsize = stepsize) - @test sol.minimum < 0.5 -end + opt = OptimizationManopt.NelderMeadOptimizer() -@testset "Quasi Newton" begin - x0 = zeros(2) - p = [1.0, 100.0] + optprob = OptimizationFunction(rosenbrock) + prob = OptimizationProblem(optprob, x0, p; manifold = R2) - opt = OptimizationManopt.QuasiNewtonOptimizer() - function callback(state, l) - println(state.u) - println(l) - return false + sol = Optimization.solve(prob, opt) + @test sol.minimum < 0.7 end - optprob = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff()) - prob = OptimizationProblem(optprob, x0, p; manifold = R2) - - sol = Optimization.solve(prob, opt, callback = callback, maxiters = 30) - @test sol.minimum < 1e-14 -end - -@testset "Particle swarm" begin - x0 = zeros(2) - p = [1.0, 100.0] - opt = OptimizationManopt.ParticleSwarmOptimizer() + @testset "Conjugate gradient descent" begin + x0 = zeros(2) + p = [1.0, 100.0] - optprob = OptimizationFunction(rosenbrock) - prob = OptimizationProblem(optprob, x0, p; manifold = R2) + stepsize = Manopt.ArmijoLinesearch(R2) + opt = OptimizationManopt.ConjugateGradientDescentOptimizer() - sol = Optimization.solve(prob, opt) - @test sol.minimum < 0.1 -end + optprob = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff()) + prob = OptimizationProblem(optprob, x0, p; manifold = R2) -@testset "CMA-ES" begin - x0 = zeros(2) - p = [1.0, 100.0] + sol = Optimization.solve(prob, opt, stepsize = stepsize) + @test sol.minimum < 0.5 + end - opt = OptimizationManopt.CMAESOptimizer() + @testset "Quasi Newton" begin + x0 = zeros(2) + p = [1.0, 100.0] + + opt = OptimizationManopt.QuasiNewtonOptimizer() + function callback(state, l) + println(state.u) + println(l) + return false + end + optprob = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff()) + prob = OptimizationProblem(optprob, x0, p; manifold = R2) + + sol = Optimization.solve(prob, opt, callback = callback, maxiters = 30) + @test sol.minimum < 1e-14 + end - optprob = OptimizationFunction(rosenbrock) - prob = OptimizationProblem(optprob, x0, p; manifold = R2) + @testset "Particle swarm" begin + x0 = zeros(2) + p = [1.0, 100.0] - sol = Optimization.solve(prob, opt) - @test sol.minimum < 0.1 -end + opt = OptimizationManopt.ParticleSwarmOptimizer() -@testset "ConvexBundle" begin - x0 = zeros(2) - p = [1.0, 100.0] + optprob = OptimizationFunction(rosenbrock) + prob = OptimizationProblem(optprob, x0, p; manifold = R2) - opt = OptimizationManopt.ConvexBundleOptimizer() + sol = Optimization.solve(prob, opt) + @test sol.minimum < 0.1 + end - optprob = OptimizationFunction(rosenbrock, AutoForwardDiff()) - prob = OptimizationProblem(optprob, x0, p; manifold = R2) + @testset "CMA-ES" begin + x0 = zeros(2) + p = [1.0, 100.0] - sol = Optimization.solve( - prob, opt, sub_problem = Manopt.convex_bundle_method_subsolver!) - @test sol.minimum < 0.1 -end + opt = OptimizationManopt.CMAESOptimizer() -# @testset "TruncatedConjugateGradientDescent" begin -# x0 = zeros(2) -# p = [1.0, 100.0] + optprob = OptimizationFunction(rosenbrock) + prob = OptimizationProblem(optprob, x0, p; manifold = R2) -# opt = OptimizationManopt.TruncatedConjugateGradientDescentOptimizer() + sol = Optimization.solve(prob, opt) + @test sol.minimum < 0.1 + end -# optprob = OptimizationFunction(rosenbrock, AutoForwardDiff()) -# prob = OptimizationProblem(optprob, x0, p; manifold = R2) + @testset "ConvexBundle" begin + x0 = zeros(2) + p = [1.0, 100.0] -# sol = Optimization.solve(prob, opt) -# @test_broken sol.minimum < 0.1 -# end + opt = OptimizationManopt.ConvexBundleOptimizer() -@testset "AdaptiveRegularizationCubic" begin - x0 = zeros(2) - p = [1.0, 100.0] + optprob = OptimizationFunction(rosenbrock, AutoForwardDiff()) + prob = OptimizationProblem(optprob, x0, p; manifold = R2) - opt = OptimizationManopt.AdaptiveRegularizationCubicOptimizer() + sol = Optimization.solve( + prob, opt, sub_problem = Manopt.convex_bundle_method_subsolver) + @test sol.minimum < 0.1 + end - optprob = OptimizationFunction(rosenbrock, AutoForwardDiff()) - prob = OptimizationProblem(optprob, x0, p; manifold = R2) + # @testset "TruncatedConjugateGradientDescent" begin + # x0 = zeros(2) + # p = [1.0, 100.0] - sol = Optimization.solve(prob, opt) - @test sol.minimum < 0.1 -end + # opt = OptimizationManopt.TruncatedConjugateGradientDescentOptimizer() -@testset "TrustRegions" begin - x0 = zeros(2) - p = [1.0, 100.0] + # optprob = OptimizationFunction(rosenbrock, AutoForwardDiff()) + # prob = OptimizationProblem(optprob, x0, p; manifold = R2) - opt = OptimizationManopt.TrustRegionsOptimizer() + # sol = Optimization.solve(prob, opt) + # @test_broken sol.minimum < 0.1 + # end - optprob = OptimizationFunction(rosenbrock, AutoForwardDiff()) - prob = OptimizationProblem(optprob, x0, p; manifold = R2) + @testset "AdaptiveRegularizationCubic" begin + x0 = zeros(2) + p = [1.0, 100.0] - sol = Optimization.solve(prob, opt) - @test sol.minimum < 0.1 -end + opt = OptimizationManopt.AdaptiveRegularizationCubicOptimizer() -# @testset "Circle example from Manopt" begin -# Mc = Circle() -# pc = 0.0 -# data = [-π / 4, 0.0, π / 4] -# fc(y, _) = 1 / 2 * sum([distance(M, y, x)^2 for x in data]) -# sgrad_fc(G, y, _) = G .= -log(Mc, y, rand(data)) + #TODO: This autodiff currently provides a Hessian that seem to not provide a Hessian + # ARC Fails but also AD before that warns. So it passes _some_ hessian but a wrong one, even in format + optprob = OptimizationFunction(rosenbrock, AutoForwardDiff()) + prob = OptimizationProblem(optprob, x0, p; manifold = R2) -# opt = OptimizationManopt.StochasticGradientDescentOptimizer() + sol = Optimization.solve(prob, opt) + @test sol.minimum < 0.1 + end -# optprob = OptimizationFunction(fc, grad = sgrad_fc) -# prob = OptimizationProblem(optprob, pc; manifold = Mc) + @testset "TrustRegions" begin + x0 = zeros(2) + p = [1.0, 100.0] -# sol = Optimization.solve(prob, opt) + opt = OptimizationManopt.TrustRegionsOptimizer() -# @test all([is_point(Mc, q, true) for q in [q1, q2, q3, q4, q5]]) -# end + #TODO: This autodiff currently provides a Hessian that seem to not procide a Hessian + # TR Fails but also AD before that warns. So it passes _some_ hessian but a wrong one, even in format + optprob = OptimizationFunction(rosenbrock, AutoForwardDiff()) + prob = OptimizationProblem(optprob, x0, p; manifold = R2) -@testset "Custom constraints" begin - cons(res, x, p) = (res .= [x[1]^2 + x[2]^2, x[1] * x[2]]) + sol = Optimization.solve(prob, opt) + @test sol.minimum < 0.1 + end - x0 = zeros(2) - p = [1.0, 100.0] - opt = OptimizationManopt.GradientDescentOptimizer() + @testset "Custom constraints" begin + cons(res, x, p) = (res .= [x[1]^2 + x[2]^2, x[1] * x[2]]) - optprob_cons = OptimizationFunction(rosenbrock; grad = rosenbrock_grad!, cons = cons) - prob_cons = OptimizationProblem(optprob_cons, x0, p) - @test_throws SciMLBase.IncompatibleOptimizerError Optimization.solve(prob_cons, opt) -end + x0 = zeros(2) + p = [1.0, 100.0] + opt = OptimizationManopt.GradientDescentOptimizer() -@testset "SPD Manifold" begin - M = SymmetricPositiveDefinite(5) - m = 100 - σ = 0.005 - q = Matrix{Float64}(I, 5, 5) .+ 2.0 - data2 = [exp(M, q, σ * rand(M; vector_at = q)) for i in 1:m] - - f(x, p = nothing) = sum(distance(M, x, data2[i])^2 for i in 1:m) - - optf = OptimizationFunction(f, Optimization.AutoFiniteDiff()) - prob = OptimizationProblem(optf, data2[1]; manifold = M, maxiters = 1000) - - opt = OptimizationManopt.GradientDescentOptimizer() - @time sol = Optimization.solve(prob, opt) - - @test sol.u≈q rtol=1e-2 - - function closed_form_solution!(M::SymmetricPositiveDefinite, q, L, U, p, X) - # extract p^1/2 and p^{-1/2} - (p_sqrt_inv, p_sqrt) = Manifolds.spd_sqrt_and_sqrt_inv(p) - # Compute D & Q - e2 = eigen(p_sqrt_inv * X * p_sqrt_inv) # decompose Sk = QDQ' - D = Diagonal(1.0 .* (e2.values .< 0)) - Q = e2.vectors - Uprime = Q' * p_sqrt_inv * U * p_sqrt_inv * Q - Lprime = Q' * p_sqrt_inv * L * p_sqrt_inv * Q - P = cholesky(Hermitian(Uprime - Lprime)) - - z = P.U' * D * P.U + Lprime - copyto!(M, q, p_sqrt * Q * z * Q' * p_sqrt) - return q + optprob_cons = OptimizationFunction(rosenbrock; grad = rosenbrock_grad!, cons = cons) + prob_cons = OptimizationProblem(optprob_cons, x0, p) + #TODO: What is this? + @test_throws SciMLBase.IncompatibleOptimizerError Optimization.solve(prob_cons, opt) end - N = m - U = mean(data2) - L = inv(sum(1 / N * inv(matrix) for matrix in data2)) - - opt = OptimizationManopt.FrankWolfeOptimizer() - optf = OptimizationFunction(f, Optimization.AutoFiniteDiff()) - prob = OptimizationProblem(optf, data2[1]; manifold = M) - - @time sol = Optimization.solve( - prob, opt, sub_problem = (M, q, p, X) -> closed_form_solution!(M, q, L, U, p, X), - maxiters = 1000) - @test sol.u≈q rtol=1e-2 -end +end \ No newline at end of file