diff --git a/.travis.yml b/.travis.yml index b5e927e..7c0c6e9 100644 --- a/.travis.yml +++ b/.travis.yml @@ -6,6 +6,7 @@ os: julia: - 1.0 - 1.1 + - 1.2 - nightly matrix: allow_failures: diff --git a/Project.toml b/Project.toml new file mode 100644 index 0000000..d02d76a --- /dev/null +++ b/Project.toml @@ -0,0 +1,29 @@ +name = "StructuredOptimization" +uuid = "46cd3e9d-64ff-517d-a929-236bc1a1fc9d" +version = "0.2.0" + +[deps] +AbstractOperators = "d9c5613a-d543-52d8-9afd-8f241a8c3f1c" +DSP = "717857b8-e6f2-59f4-9121-6e50c889abd2" +FFTW = "7a1cc6ca-52ef-59f5-83cd-3a7055c09341" +LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" +ProximalAlgorithms = "140ffc9f-1907-541a-a177-7475e0a401e9" +ProximalOperators = "a725b495-10eb-56fe-b38b-717eba820537" +RecursiveArrayTools = "731186ca-8d62-57ce-b412-fbd966d074cd" + +[compat] +AbstractOperators = "≥ 0.1.0" +DSP = "≥ 0.5.1" +FFTW = "≥ 0.2.4" +ProximalAlgorithms = "≥ 0.3.0" +ProximalOperators = "≥ 0.8.0" +RecursiveArrayTools = "≥ 0.18.0" +julia = "1.0.0" + +[extras] +LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" +Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" +Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" + +[targets] +test = ["LinearAlgebra", "Test", "Random"] diff --git a/REQUIRE b/REQUIRE deleted file mode 100644 index 178140c..0000000 --- a/REQUIRE +++ /dev/null @@ -1,7 +0,0 @@ -julia 1.0 -FFTW 0.2.4 -DSP 0.5.1 -AbstractOperators 0.1.0 -ProximalOperators 0.8.0 -ProximalAlgorithms 0.1.0 -RecursiveArrayTools 0.18.0 diff --git a/docs/src/solvers.md b/docs/src/solvers.md index 35df816..ad0f238 100644 --- a/docs/src/solvers.md +++ b/docs/src/solvers.md @@ -9,7 +9,7 @@ !!! note "Problem warm-starting" By default *warm-starting* is always enabled. - For example, if two problems that utilize the same variables are solved consecutively, + For example, if two problems that involve the same variables are solved consecutively, the second one will be automatically warm-started by the solution of the first one. That is because the variables are always linked to their respective data vectors. If one wants to avoid this, the optimization variables needs to be manually re-initialized @@ -18,22 +18,21 @@ ## Specifying solver and options -As shown above it is possible to choose the type of algorithm and specify its options by creating a `Solver` object. -Currently, the following algorithms are supported: +You can pick the algorithm to use as `Solver` object from the +[`ProximalAlgorithms.jl`](https://github.com/kul-forbes/ProximalAlgorithms.jl)) +package. Currently, the following algorithms are supported: -* *Proximal Gradient (PG)* [[1]](http://www.mit.edu/~dimitrib/PTseng/papers/apgm.pdf), [[2]](http://epubs.siam.org/doi/abs/10.1137/080716542) -* *Fast Proximal Gradient (FPG)* [[1]](http://www.mit.edu/~dimitrib/PTseng/papers/apgm.pdf), [[2]](http://epubs.siam.org/doi/abs/10.1137/080716542) -* *ZeroFPR* [[3]](https://arxiv.org/abs/1606.06256) -* *PANOC* [[4]](https://doi.org/10.1109/CDC.2017.8263933) +* `ProximalAlgorithms.ForwardBackward`, also known as *proximal gradient* +method [[1]](http://www.mit.edu/~dimitrib/PTseng/papers/apgm.pdf), [[2]](http://epubs.siam.org/doi/abs/10.1137/080716542). Nesterov acceleration can be enabled, which significantly +improves its performance for convex problems. +* `ProximalAlgorithms.ZeroFPR`, a Newton-type forward-backward algorithm, +proposed in [[3]](https://arxiv.org/abs/1606.06256), using L-BFGS +directions to accelerate convergence. +* `ProximalAlgorithms.PANOC`, another Newton-type forward-backward algorithm, +proposed in [[4]](https://doi.org/10.1109/CDC.2017.8263933), also using +L-BFGS directions. -```@docs -PG -FPG -ZeroFPR -PANOC -``` - -## Build and solve +## Parse and solve The macro [`@minimize`](@ref) automatically parse and solve the problem. An alternative syntax is given by the function [`problem`](@ref) and [`solve`](@ref). @@ -43,18 +42,8 @@ problem solve ``` -It is important to stress out that the `Solver` objects created using -the functions above ([`PG`](@ref), [`FPG`](@ref), etc.) -specify only the type of algorithm to be used together with its options. -The actual solver -(namely the one of [`ProximalAlgorithms.jl`](https://github.com/kul-forbes/ProximalAlgorithms.jl)) -is constructed altogether with the problem formulation. -The problem parsing procedure can be separated from the solver application using the functions [`build`](@ref) and [`solve!`](@ref). - -```@docs -build -solve! -``` +Once again, the `Solver` objects is to be picked from +[`ProximalAlgorithms.jl`](https://github.com/kul-forbes/ProximalAlgorithms.jl)). ## References diff --git a/src/StructuredOptimization.jl b/src/StructuredOptimization.jl index 42650cd..ce95bf7 100644 --- a/src/StructuredOptimization.jl +++ b/src/StructuredOptimization.jl @@ -9,7 +9,18 @@ using ProximalOperators using ProximalAlgorithms include("syntax/syntax.jl") -include("calculus/precomposeNonlinear.jl") #TODO move to ProximalOperators? -include("solvers/solvers.jl") +include("calculus/precomposeNonlinear.jl") # TODO move to ProximalOperators? +include("arraypartition.jl") # TODO move to ProximalOperators? + +# problem parsing +include("solvers/terms_extract.jl") +include("solvers/terms_properties.jl") +include("solvers/terms_splitting.jl") + +# solver calls +include("solvers/solvers_options.jl") +include("solvers/build_solve.jl") +include("solvers/minimize.jl") + end diff --git a/src/arraypartition.jl b/src/arraypartition.jl new file mode 100644 index 0000000..d95bfe1 --- /dev/null +++ b/src/arraypartition.jl @@ -0,0 +1,36 @@ +import ProximalOperators +import RecursiveArrayTools + +@inline function ProximalOperators.prox( + h::ProximalOperators.ProximableFunction, + x::RecursiveArrayTools.ArrayPartition, + gamma... +) + # unwrap + y, fy = ProximalOperators.prox(h, x.x, gamma...) + # wrap + return RecursiveArrayTools.ArrayPartition(y), fy +end + +@inline function ProximalOperators.gradient( + h::ProximalOperators.ProximableFunction, + x::RecursiveArrayTools.ArrayPartition +) + # unwrap + grad, fx = ProximalOperators.gradient(h, x.x) + # wrap + return RecursiveArrayTools.ArrayPartition(grad), fx +end + +@inline ProximalOperators.prox!( + y::RecursiveArrayTools.ArrayPartition, + h::ProximalOperators.ProximableFunction, + x::RecursiveArrayTools.ArrayPartition, + gamma... +) = ProximalOperators.prox!(y.x, h, x.x, gamma...) + +@inline ProximalOperators.gradient!( + y::RecursiveArrayTools.ArrayPartition, + h::ProximalOperators.ProximableFunction, + x::RecursiveArrayTools.ArrayPartition +) = ProximalOperators.gradient!(y.x, h, x.x) diff --git a/src/solvers/build_solve.jl b/src/solvers/build_solve.jl index ac7a0ae..25baea0 100644 --- a/src/solvers/build_solve.jl +++ b/src/solvers/build_solve.jl @@ -1,11 +1,12 @@ export build """ -`build(terms::Tuple, solver_opt::ForwardBackwardSolver)` + parse_problem(terms::Tuple, solver::ForwardBackwardSolver) -Takes as input a tuple containing the terms defining the problem and the solver options. +Takes as input a tuple containing the terms defining the problem and the solver. -Returns a tuple containing the optimization variables and the built solver. +Returns a tuple containing the optimization variables and the problem terms +to be fed into the solver. # Example @@ -18,82 +19,37 @@ julia> A, b = randn(10,4), randn(10); julia> p = problem( ls(A*x - b ) , norm(x) <= 1 ); julia> build(p, PG()); - ``` - """ -function build(terms::Tuple, solver::ForwardBackwardSolver) +function parse_problem(terms::Tuple, solver::T) where T <: ForwardBackwardSolver x = extract_variables(terms) # Separate smooth and nonsmooth smooth, nonsmooth = split_smooth(terms) - # Separate quadratic and nonquadratic - quadratic, smooth = split_quadratic(smooth) - kwargs = Array{Any, 1}() if is_proximable(nonsmooth) g = extract_proximable(x, nonsmooth) - append!(kwargs, [(:g, g)]) - if !isempty(quadratic) - fq = extract_functions(quadratic) - Aq = extract_operators(x, quadratic) - append!(kwargs, [(:fq, fq)]) - append!(kwargs, [(:Aq, Aq)]) - end + kwargs = Dict{Symbol, Any}(:g => g) if !isempty(smooth) if is_linear(smooth) - fs = extract_functions(smooth) - As = extract_operators(x, smooth) - append!(kwargs, [(:As, As)]) - else - fs = extract_functions_nodisp(smooth) - As = extract_affines(x, smooth) - fs = PrecomposeNonlinear(fs, As) + f = extract_functions(smooth) + A = extract_operators(x, smooth) + kwargs[:A] = A + else # ?? + f = extract_functions_nodisp(smooth) + A = extract_affines(x, smooth) + f = PrecomposeNonlinear(f, A) end - append!(kwargs, [(:fs, fs)]) + kwargs[:f] = f end - return build_iterator(x, solver; kwargs...) + return (x, kwargs) end - error("Sorry, I cannot solve this problem") -end - -################################################################################ -export solve! - -""" -`solve!( x_solver )` - -Takes as input a tuple containing the optimization variables and the built solver. - -Solves the problem returning a tuple containing the iterations taken and the build solver. - -# Example - -```julia -julia> x = Variable(4) -Variable(Float64, (4,)) - -julia> A, b = randn(10,4), randn(10); - -julia> p = problem( ls(A*x - b ) , norm(x) <= 1 ); - -julia> x_solver = build(p, PG(verbose = 0)); - -julia> solve!(x_solver); - -``` - -""" -function solve!(x_and_iter::Tuple{Tuple{Vararg{Variable}}, ProximalAlgorithms.ProximalAlgorithm}) - x, iterator = x_and_iter - it, x_star = ProximalAlgorithms.run!(iterator) - ~x .= x_star - return it, iterator + error("Sorry, I cannot parse this problem for solver of type $(T)") end export solve """ -`solve(terms::Tuple, solver_opt::ForwardBackwardSolver)` + solve(terms::Tuple, solver::ForwardBackwardSolver) Takes as input a tuple containing the terms defining the problem and the solver options. @@ -102,22 +58,26 @@ Solves the problem returning a tuple containing the iterations taken and the bui # Example ```julia - julia> x = Variable(4) Variable(Float64, (4,)) julia> A, b = randn(10,4), randn(10); -julia> solve(p,PG()); -it | gamma | fpr | -------|------------|------------| -1 | 7.6375e-02 | 1.8690e+00 | -12 | 7.6375e-02 | 9.7599e-05 | +julia> p = problem(ls(A*x - b ), norm(x) <= 1); -``` +julia> solve(p, ProximalAlgorithms.ForwardBackward()); +julia> ~x +4-element Array{Float64,1}: + -0.6427139974173074 + -0.29043653211431103 + -0.6090539651510192 + 0.36279278640995494 +``` """ function solve(terms::Tuple, solver::ForwardBackwardSolver) - built_slv = build(terms, solver) - return solve!(built_slv) + x, kwargs = parse_problem(terms, solver) + x_star, it = solver(~x; kwargs...) + ~x .= x_star + return x, it end diff --git a/src/solvers/solvers.jl b/src/solvers/solvers.jl deleted file mode 100644 index bf2a2b6..0000000 --- a/src/solvers/solvers.jl +++ /dev/null @@ -1,9 +0,0 @@ -# problem parsing -include("terms_extract.jl") -include("terms_properties.jl") -include("terms_splitting.jl") - -# solver calls -include("solvers_options.jl") -include("build_solve.jl") -include("minimize.jl") diff --git a/src/solvers/solvers_options.jl b/src/solvers/solvers_options.jl index 9762863..b104602 100644 --- a/src/solvers/solvers_options.jl +++ b/src/solvers/solvers_options.jl @@ -1,105 +1,9 @@ -abstract type Solver end +using ProximalAlgorithms -abstract type ForwardBackwardSolver <: Solver end +const ForwardBackwardSolver = Union{ + ProximalAlgorithms.ForwardBackward, + ProximalAlgorithms.ZeroFPR, + ProximalAlgorithms.PANOC, +} -export Solver, ForwardBackwardSolver - -################################################################################ -export PG, FPG - -""" -`PG(;kwargs...)` - -Creates an object `PG` containing the options of the Proximal Gradient solvers: - - * `gamma`, stepsize (default: unspecified, determined automatically) - * `maxit`, maximum number of iteration (default: `10000`) - * `tol`, halting tolerance on the fixed-point residual (default: `1e-4`) - * `adaptive`, adaptively adjust `gamma` (default: `false` if `gamma` is provided) - * `fast`, enables accelerated method (default: `false`) - * `verbose`, verbosity level (default: `1`) - * `verbose_freq`, verbosity frequency for `verbose = 1` (default: `100`) - -""" -struct PG <: ForwardBackwardSolver - kwargs::Iterators.Pairs - function PG(; kwargs...) - new(kwargs) - end -end - -""" -`FPG(;kwargs...)` - -Same as `PG`, creates the options of the Fast Proximal Gradient solver. - -""" -function FPG(; kwargs...) - return PG(; kwargs..., fast=true) -end - -function build_iterator(x, solver::PG; kwargs...) - x, ProximalAlgorithms.FBSIterator(~x; solver.kwargs..., kwargs...) -end - -################################################################################ -export ZeroFPR - -""" -`ZeroFPR(;kwargs...)` - -Creates an object `ZeroFPR` containing the options of the ZeroFPR solver: - - * `gamma`, stepsize (default: unspecified, determined automatically) - * `maxit`, maximum number of iteration (default: `10000`) - * `tol`, halting tolerance on the fixed-point residual (default: `1e-4`) - * `adaptive`, adaptively adjust `gamma` (default: `false` if `gamma` is provided) - * `fast`, enables accelerated method (default: `false`) - * `verbose`, verbosity level (default: `1`) - * `verbose_freq`, verbosity frequency for `verbose = 1` (default: `100`) - * `memory`, memory of the `LBFGS` operator (default: `10` ) - -""" -struct ZeroFPR <: ForwardBackwardSolver - kwargs::Iterators.Pairs - function ZeroFPR(; kwargs...) - new(kwargs) - end -end - -function build_iterator(x, solver::ZeroFPR; kwargs...) - x, ProximalAlgorithms.ZeroFPRIterator(~x; solver.kwargs..., kwargs...) -end - -################################################################################ -export PANOC - -""" -`ZeroFPR(;kwargs...)` - -Creates an object `PANOC` containing the options of the PANOC solver: - - * `gamma`, stepsize (default: unspecified, determined automatically) - * `maxit`, maximum number of iteration (default: `10000`) - * `tol`, halting tolerance on the fixed-point residual (default: `1e-4`) - * `adaptive`, adaptively adjust `gamma` (default: `false` if `gamma` is provided) - * `fast`, enables accelerated method (default: `false`) - * `verbose`, verbosity level (default: `1`) - * `verbose_freq`, verbosity frequency for `verbose = 1` (default: `100`) - * `memory`, memory of the `LBFGS` operator (default: `10` ) - -""" -struct PANOC <: ForwardBackwardSolver - kwargs::Iterators.Pairs - function PANOC(; kwargs...) - new(kwargs) - end -end - -function build_iterator(x, solver::PANOC; kwargs...) - x, ProximalAlgorithms.PANOCIterator(~x; solver.kwargs..., kwargs...) -end - -default_solver = PANOC - -################################################################################ +const default_solver = ProximalAlgorithms.PANOC diff --git a/test/runtests.jl b/test/runtests.jl index 04ebeae..b6731bd 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -27,7 +27,7 @@ end include("test_build_minimize.jl") end -@testset "Integration tests" begin +@testset "End-to-end tests" begin include("test_usage_small.jl") include("test_usage.jl") end diff --git a/test/test_build_minimize.jl b/test/test_build_minimize.jl index 0e8f717..83bacf4 100644 --- a/test/test_build_minimize.jl +++ b/test/test_build_minimize.jl @@ -1,4 +1,4 @@ -println("\nTesting solver build \n") +using ProximalAlgorithms x = Variable(10) A = randn(5, 10) @@ -6,25 +6,16 @@ y = Variable(7) B = randn(5, 7) b = randn(5) -prob = problem(ls(A*x + b), norm(x, 2) <= 1.0) -built_slv = build(prob, StructuredOptimization.PG()) -solve!(built_slv) - -~x .= 0. -prob = problem(ls(A*x - B*y + b) + norm(y, 1), norm(x, 2) <= 1.0) -built_slv = build(prob, FPG()) -solve!(built_slv) - println("\nTesting @minimize \n") ~x .= 0. ~y .= 0. -slv, = @minimize ls(A*x - B*y + b) st norm(x, 2) <= 1e4, norm(y, 1) <= 1.0 with PG() +slv, = @minimize ls(A*x - B*y + b) st norm(x, 2) <= 1e4, norm(y, 1) <= 1.0 with ProximalAlgorithms.ForwardBackward() ~x .= 0. -slv, = @minimize ls(A*x - b) st norm(x, 1) <= 1.0 with PG() +slv, = @minimize ls(A*x - b) st norm(x, 1) <= 1.0 with ProximalAlgorithms.ForwardBackward() ~x .= 0. slv, = @minimize ls(A*x - b) st norm(x, 1) <= 1.0 ~x .= 0. -slv, = @minimize ls(A*x - b) + norm(x, 1) with PG() +slv, = @minimize ls(A*x - b) + norm(x, 1) with ProximalAlgorithms.ForwardBackward() ~x .= 0. slv, = @minimize ls(A*x - b) + norm(x, 1) ~x .= 0. @@ -36,23 +27,22 @@ A = randn(10, 5) b = randn(10) println("\nTesting @minimize nonlinear \n") -slv, = @minimize ls(sigmoid(A*x,10) - b)+norm(x,1) with PG() +slv, = @minimize ls(sigmoid(A*x,10) - b)+norm(x,1) with ProximalAlgorithms.ForwardBackward(tol = 1e-6) xpg = copy(~x) ~x .= 0. -slv, = @minimize ls(sigmoid(A*x,10) - b)+norm(x,1) with ZeroFPR() +slv, = @minimize ls(sigmoid(A*x,10) - b)+norm(x,1) with ProximalAlgorithms.ZeroFPR(tol = 1e-6) xz = copy(~x) ~x .= 0. -slv, = @minimize ls(sigmoid(A*x,10) - b)+norm(x,1) with PANOC() +slv, = @minimize ls(sigmoid(A*x,10) - b)+norm(x,1) with ProximalAlgorithms.PANOC(tol = 1e-6) xp = copy(~x) ~x .= 0. -@test norm(xz-xpg) <1e-4 -@test norm(xp-xpg) <1e-4 +@test norm(xz-xpg) <= 1e-4 +@test norm(xp-xpg) <= 1e-4 # test nonconvex Rosenbrock function with known minimum -solvers = ["ZeroFPR(tol = 1e-6)","PANOC(tol = 1e-6)"] -for slv in solvers - solver = eval(Meta.parse(slv)) +solvers = [ProximalAlgorithms.ZeroFPR(tol = 1e-6), ProximalAlgorithms.PANOC(tol = 1e-6)] +for solver in solvers x = Variable(1) y = Variable(1) a,b = 2.0, 100.0 diff --git a/test/test_usage.jl b/test/test_usage.jl index 06fa709..262f615 100644 --- a/test/test_usage.jl +++ b/test/test_usage.jl @@ -21,8 +21,7 @@ x1_fpg = Variable(n1) x2_fpg = Variable(n2) expr = ls(A1*x1_fpg + A2*x2_fpg - b) + lam1*norm(x1_fpg, 1) + lam2*norm(x2_fpg, 2) prob = problem(expr) -@time sol = solve(prob, StructuredOptimization.FPG(tol=1e-10,verbose=0,maxit=20000)) -println(sol) +@time sol = solve(prob, ProximalAlgorithms.ForwardBackward(fast=true, tol=1e-10, verbose=false,maxit=20000)) # Solve with ZeroFPR @@ -30,8 +29,7 @@ x1_zerofpr = Variable(n1) x2_zerofpr = Variable(n2) expr = ls(A1*x1_zerofpr + A2*x2_zerofpr - b) + lam1*norm(x1_zerofpr, 1) + lam2*norm(x2_zerofpr, 2) prob = problem(expr) -@time sol = solve(prob, StructuredOptimization.ZeroFPR(tol=1e-10,verbose=0)) -println(sol) +@time sol = solve(prob, ProximalAlgorithms.ZeroFPR(tol=1e-10, verbose=false)) # Solve with PANOC @@ -39,15 +37,13 @@ x1_panoc = Variable(n1) x2_panoc = Variable(n2) expr = ls(A1*x1_panoc + A2*x2_panoc - b) + lam1*norm(x1_panoc, 1) + lam2*norm(x2_panoc, 2) prob = problem(expr) -@time sol = solve(prob, StructuredOptimization.PANOC(tol=1e-10,verbose=0)) -println(sol) +@time sol = solve(prob, ProximalAlgorithms.PANOC(tol=1e-10, verbose=false)) # Solve with minimize, use default solver/options x1 = Variable(n1) x2 = Variable(n2) @time sol = @minimize ls(A1*x1 + A2*x2 - b) + lam1*norm(x1, 1) + lam2*norm(x2, 2) -println(sol) @test norm(~x1_fpg - ~x1_zerofpr, Inf)/(1+norm(~x1_zerofpr, Inf)) <= 1e-6 @test norm(~x2_fpg - ~x2_zerofpr, Inf)/(1+norm(~x2_zerofpr, Inf)) <= 1e-6 @@ -90,8 +86,7 @@ b = A*x_star + A'\y_star x_pg = Variable(n) expr = ls(A*x_pg - b) + lam*norm(x_pg, 1) prob = problem(expr) -@time sol = solve(prob, StructuredOptimization.PG(tol=1e-10,verbose=0)) -println(sol) +@time sol = solve(prob, ProximalAlgorithms.ForwardBackward(tol=1e-10, verbose=false)) @test norm(~x_pg - x_star, Inf) <= 1e-8 @test norm(A'*(A*~x_pg - b) + lam*sign.(~x_pg)) <= 1e-6 @@ -101,8 +96,7 @@ println(sol) x_fpg = Variable(n) expr = ls(A*x_fpg - b) + lam*norm(x_fpg, 1) prob = problem(expr) -@time sol = solve(prob, StructuredOptimization.FPG(tol=1e-10,verbose=0)) -println(sol) +@time sol = solve(prob, ProximalAlgorithms.ForwardBackward(fast=true, tol=1e-10, verbose=false)) @test norm(~x_fpg - x_star, Inf) <= 1e-8 @test norm(A'*(A*~x_fpg - b) + lam*sign.(~x_fpg)) <= 1e-6 @@ -112,8 +106,7 @@ println(sol) x_zerofpr = Variable(n) expr = ls(A*x_zerofpr - b) + lam*norm(x_zerofpr, 1) prob = problem(expr) -@time sol = solve(prob, StructuredOptimization.ZeroFPR(tol=1e-10,verbose=0)) -println(sol) +@time sol = solve(prob, ProximalAlgorithms.ZeroFPR(tol=1e-10, verbose=false)) @test norm(~x_zerofpr - x_star, Inf) <= 1e-8 @test norm(A'*(A*~x_zerofpr - b) + lam*sign.(~x_zerofpr)) <= 1e-5 @@ -123,8 +116,7 @@ println(sol) x_panoc = Variable(n) expr = ls(A*x_panoc - b) + lam*norm(x_panoc, 1) prob = problem(expr) -@time sol = solve(prob, StructuredOptimization.PANOC(tol=1e-10,verbose=0)) -println(sol) +@time sol = solve(prob, ProximalAlgorithms.PANOC(tol=1e-10, verbose=false)) @test norm(~x_panoc - x_star, Inf) <= 1e-8 @test norm(A'*(A*~x_panoc - b) + lam*sign.(~x_panoc)) <= 1e-5 @@ -147,42 +139,38 @@ b = A*x_orig + randn(m) x_pg = Variable(n) expr = smooth(norm(A*x_pg - b, 2)) + lam*norm(x_pg, 1) prob = problem(expr) -@time sol = solve(prob, PG(tol=1e-8,verbose=0)) -println(sol) +@time sol = solve(prob, ProximalAlgorithms.ForwardBackward(tol=1e-6, verbose=false)) # Solve with FPG x_fpg = Variable(n) expr = smooth(norm(A*x_fpg - b, 2)) + lam*norm(x_fpg, 1) prob = problem(expr) -@time sol = solve(prob, FPG(tol=1e-8,verbose=0)) -println(sol) +@time sol = solve(prob, ProximalAlgorithms.ForwardBackward(fast=true, tol=1e-6, verbose=false)) # Solve with ZeroFPR x_zerofpr = Variable(n) expr = smooth(norm(A*x_zerofpr - b, 2)) + lam*norm(x_zerofpr, 1) prob = problem(expr) -@time sol = solve(prob, StructuredOptimization.ZeroFPR(tol=1e-8,verbose=0)) -println(sol) +@time sol = solve(prob, ProximalAlgorithms.ZeroFPR(tol=1e-6, verbose=false)) # Solve with PANOC x_panoc = Variable(n) expr = smooth(norm(A*x_panoc - b, 2)) + lam*norm(x_panoc, 1) prob = problem(expr) -@time sol = solve(prob, StructuredOptimization.PANOC(tol=1e-8,verbose=0)) -println(sol) +@time sol = solve(prob, ProximalAlgorithms.PANOC(tol=1e-6, verbose=false)) # Solve with minimize, default solver/options x = Variable(n) @time sol = @minimize smooth(norm(A*x - b, 2)) + lam*norm(x, 1) -println(sol) -@test norm(~x_fpg - ~x_zerofpr, Inf)/(1+norm(~x_zerofpr, Inf)) <= 1e-6 -@test norm(~x_fpg - ~x_panoc, Inf)/(1+norm(~x_panoc, Inf)) <= 1e-6 -@test norm(~x - ~x_zerofpr, Inf)/(1+norm(~x_zerofpr, Inf)) <= 1e-3 +@test norm(~x_pg - ~x_fpg, Inf)/(1+norm(~x_pg, Inf)) <= 1e-4 +@test norm(~x_pg - ~x_zerofpr, Inf)/(1+norm(~x_pg, Inf)) <= 1e-4 +@test norm(~x_pg - ~x_panoc, Inf)/(1+norm(~x_pg, Inf)) <= 1e-4 +@test norm(~x_pg - ~x, Inf)/(1+norm(~x_pg, Inf)) <= 1e-3 ################################################################################ ### Box-constrained least-squares @@ -202,50 +190,45 @@ b = A*x_orig + randn(m) x_pg = Variable(n) expr = ls(A*x_pg - b) prob = problem(expr, x_pg in [lb, ub]) -@time sol = solve(prob, StructuredOptimization.PG(tol=1e-8,verbose=0)) -println(sol) +@time sol = solve(prob, ProximalAlgorithms.ForwardBackward(tol=1e-6, verbose=false)) @test norm(~x_pg - max.(lb, min.(ub, ~x_pg)), Inf) <= 1e-12 -@test norm(~x_pg - max.(lb, min.(ub, ~x_pg - A'*(A*~x_pg - b))), Inf)/(1+norm(~x_pg, Inf)) <= 1e-8 +@test norm(~x_pg - max.(lb, min.(ub, ~x_pg - A'*(A*~x_pg - b))), Inf)/(1+norm(~x_pg, Inf)) <= 1e-6 # Solve with FPG x_fpg = Variable(n) expr = ls(A*x_fpg - b) prob = problem(expr, x_fpg in [lb, ub]) -@time sol = solve(prob, StructuredOptimization.FPG(tol=1e-8,verbose=0)) -println(sol) +@time sol = solve(prob, ProximalAlgorithms.ForwardBackward(fast=true, tol=1e-6, verbose=false)) @test norm(~x_fpg - max.(lb, min.(ub, ~x_fpg)), Inf) <= 1e-12 -@test norm(~x_fpg - max.(lb, min.(ub, ~x_fpg - A'*(A*~x_fpg - b))), Inf)/(1+norm(~x_fpg, Inf)) <= 1e-8 +@test norm(~x_fpg - max.(lb, min.(ub, ~x_fpg - A'*(A*~x_fpg - b))), Inf)/(1+norm(~x_fpg, Inf)) <= 1e-6 # Solve with ZeroFPR x_zerofpr = Variable(n) expr = ls(A*x_zerofpr - b) prob = problem(expr, x_zerofpr in [lb, ub]) -@time sol = solve(prob, StructuredOptimization.ZeroFPR(tol=1e-8,verbose=0)) -println(sol) +@time sol = solve(prob, ProximalAlgorithms.ZeroFPR(tol=1e-6, verbose=false)) @test norm(~x_zerofpr - max.(lb, min.(ub, ~x_zerofpr)), Inf) <= 1e-12 -@test norm(~x_zerofpr - max.(lb, min.(ub, ~x_zerofpr - A'*(A*~x_zerofpr - b))), Inf)/(1+norm(~x_zerofpr, Inf)) <= 1e-8 +@test norm(~x_zerofpr - max.(lb, min.(ub, ~x_zerofpr - A'*(A*~x_zerofpr - b))), Inf)/(1+norm(~x_zerofpr, Inf)) <= 1e-6 # Solve with PANOC x_panoc = Variable(n) expr = ls(A*x_panoc - b) prob = problem(expr, x_panoc in [lb, ub]) -@time sol = solve(prob, StructuredOptimization.PANOC(tol=1e-8,verbose=0)) -println(sol) +@time sol = solve(prob, ProximalAlgorithms.PANOC(tol=1e-6, verbose=false)) @test norm(~x_panoc - max.(lb, min.(ub, ~x_panoc)), Inf) <= 1e-12 -@test norm(~x_panoc - max.(lb, min.(ub, ~x_panoc - A'*(A*~x_panoc - b))), Inf)/(1+norm(~x_panoc, Inf)) <= 1e-8 +@test norm(~x_panoc - max.(lb, min.(ub, ~x_panoc - A'*(A*~x_panoc - b))), Inf)/(1+norm(~x_panoc, Inf)) <= 1e-6 # Solve with minimize, default solver/options x = Variable(n) @time sol = @minimize ls(A*x - b) st x in [lb, ub] -println(sol) @test norm(~x - max.(lb, min.(ub, ~x)), Inf) <= 1e-12 @test norm(~x - max.(lb, min.(ub, ~x - A'*(A*~x - b))), Inf)/(1+norm(~x, Inf)) <= 1e-4 @@ -281,8 +264,7 @@ b = A*x_star + A'\y_star x_pg = Variable(n) expr = ls(A*x_pg - b) prob = problem(expr, x_pg >= 0.0) -@time sol = solve(prob, StructuredOptimization.PG(tol=1e-8,verbose=0)) -println(sol) +@time sol = solve(prob, ProximalAlgorithms.ForwardBackward(tol=1e-8, verbose=false)) @test all(~x_pg .>= 0.0) @test norm(~x_pg - x_star, Inf)/(1+norm(x_star, Inf)) <= 1e-8 @@ -292,8 +274,7 @@ println(sol) x_fpg = Variable(n) expr = ls(A*x_fpg - b) prob = problem(expr, x_fpg >= 0.0) -@time sol = solve(prob, StructuredOptimization.FPG(tol=1e-8,verbose=0)) -println(sol) +@time sol = solve(prob, ProximalAlgorithms.ForwardBackward(fast=true, tol=1e-8, verbose=false)) @test all(~x_fpg .>= 0.0) @test norm(~x_fpg - x_star, Inf)/(1+norm(x_star, Inf)) <= 1e-8 @@ -303,8 +284,7 @@ println(sol) x_zerofpr = Variable(n) expr = ls(A*x_zerofpr - b) prob = problem(expr, x_zerofpr >= 0.0) -@time sol = solve(prob, StructuredOptimization.ZeroFPR(tol=1e-8,verbose=0)) -println(sol) +@time sol = solve(prob, ProximalAlgorithms.ZeroFPR(tol=1e-8, verbose=false)) @test all(~x_zerofpr .>= 0.0) @test norm(~x_zerofpr - x_star, Inf)/(1+norm(x_star, Inf)) <= 1e-8 @@ -314,17 +294,15 @@ println(sol) x_panoc = Variable(n) expr = ls(A*x_panoc - b) prob = problem(expr, x_panoc >= 0.0) -@time sol = solve(prob, StructuredOptimization.PANOC(tol=1e-8,verbose=0)) -println(sol) +@time sol = solve(prob, ProximalAlgorithms.PANOC(tol=1e-8, verbose=false)) -@test all(~x_zerofpr .>= 0.0) -@test norm(~x_zerofpr - x_star, Inf)/(1+norm(x_star, Inf)) <= 1e-8 +@test all(~x_panoc .>= 0.0) +@test norm(~x_panoc - x_star, Inf)/(1+norm(x_star, Inf)) <= 1e-8 # Solve with minimize, default solver/options x = Variable(n) @time sol = @minimize ls(A*x - b) st x >= 0.0 -println(sol) @test all(~x .>= 0.0) @test norm(~x - x_star, Inf)/(1+norm(x_star, Inf)) <= 1e-6 diff --git a/test/test_usage_small.jl b/test/test_usage_small.jl index 1e3582e..d8d881b 100644 --- a/test/test_usage_small.jl +++ b/test/test_usage_small.jl @@ -3,12 +3,12 @@ b = randn(3) x_pg = Variable(5) prob_pg = problem(ls(A*x_pg - b) + 1e-3*norm(x_pg, 1)) -sol_pg = solve(prob_pg, StructuredOptimization.PG()) +sol_pg = solve(prob_pg, ProximalAlgorithms.ForwardBackward()) x_zfpr = Variable(5) prob_zfpr = problem(ls(A*x_zfpr - b) + 1e-3*norm(x_zfpr, 1)) -sol_zfpr = solve(prob_zfpr, StructuredOptimization.ZeroFPR()) +sol_zfpr = solve(prob_zfpr, ProximalAlgorithms.ZeroFPR()) x_pnc = Variable(5) prob_pnc = problem(ls(A*x_pnc - b) + 1e-3*norm(x_pnc, 1)) -sol_pnc = solve(prob_pnc, StructuredOptimization.PANOC()) +sol_pnc = solve(prob_pnc, ProximalAlgorithms.PANOC())