diff --git a/benchmark/Manifest.toml b/benchmark/Manifest.toml index fa3aef6e..ad9a0b7b 100644 --- a/benchmark/Manifest.toml +++ b/benchmark/Manifest.toml @@ -2,11 +2,11 @@ julia_version = "1.9.1" manifest_format = "2.0" -project_hash = "14fe4b55e0aa680d5c90f646c1a87c8fc8737479" +project_hash = "0cb0756144aac73ae8e2d06d9a0f6567a7a2f964" [[deps.ADNLPModels]] deps = ["ADTypes", "ForwardDiff", "LinearAlgebra", "NLPModels", "Requires", "ReverseDiff", "SparseArrays", "SparseConnectivityTracer", "SparseMatrixColorings"] -git-tree-sha1 = "ad4682ad3f6da4246a5a5408593e5824d949e5a0" +git-tree-sha1 = "2b582670fb51216d8d000c6de72934d1f68c4e7c" repo-rev = "main" repo-url = "https://github.com/JuliaSmoothOptimizers/ADNLPModels.jl" uuid = "54578032-b7ea-4c30-94aa-7cbd1cce6c9a" diff --git a/benchmark/Project.toml b/benchmark/Project.toml index d561c1ab..a3e2c200 100644 --- a/benchmark/Project.toml +++ b/benchmark/Project.toml @@ -1,6 +1,5 @@ [deps] ADNLPModels = "54578032-b7ea-4c30-94aa-7cbd1cce6c9a" -BenchmarkProfiles = "ecbce9bc-3e5e-569d-9e29-55181f61f8d0" BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0" Dates = "ade2ca70-3891-5945-98fb-dc099432e06a" @@ -13,7 +12,6 @@ LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" NLPModels = "a4795742-8479-5a88-8948-cc11e1c8c1a6" NLPModelsJuMP = "792afdf1-32c1-5681-94e0-d7bf7a5df49e" OptimizationProblems = "5049e819-d29b-5fba-b941-0eee7e64c1c6" -Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267" SolverBenchmark = "581a75fa-a23a-52d0-a590-d6201de2218a" diff --git a/benchmark/benchmark_analyzer/Project.toml b/benchmark/benchmark_analyzer/Project.toml new file mode 100644 index 00000000..ecbdd021 --- /dev/null +++ b/benchmark/benchmark_analyzer/Project.toml @@ -0,0 +1,9 @@ +[deps] +BenchmarkProfiles = "ecbce9bc-3e5e-569d-9e29-55181f61f8d0" +BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" +DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0" +Dates = "ade2ca70-3891-5945-98fb-dc099432e06a" +JLD2 = "033835bb-8acc-5ee8-8aae-3f567f8a3819" +JSON = "682c06a0-de6a-54ab-a142-c8b1cf79cde6" +SolverBenchmark = "581a75fa-a23a-52d0-a590-d6201de2218a" +StatsPlots = "f3b207a7-027a-5e70-b257-86293d7955fd" diff --git a/benchmark/benchmarks.jl b/benchmark/benchmarks.jl index 90a575f6..225fae26 100644 --- a/benchmark/benchmarks.jl +++ b/benchmark/benchmarks.jl @@ -1,13 +1,14 @@ # Include useful packages using ADNLPModels using Dates, DelimitedFiles, JLD2, LinearAlgebra, Printf, SparseArrays -using BenchmarkTools, DataFrames, Plots +using BenchmarkTools, DataFrames #JSO packages -using NLPModels, BenchmarkProfiles, OptimizationProblems, SolverBenchmark +using NLPModels, OptimizationProblems, SolverBenchmark # Most likely benchmark with JuMP as well using JuMP, NLPModelsJuMP include("problems_sets.jl") +verbose_subbenchmark = false # Run locally with `tune!(SUITE)` and then `run(SUITE)` const SUITE = BenchmarkGroup() diff --git a/benchmark/gradient/benchmarks_gradient.jl b/benchmark/gradient/benchmarks_gradient.jl index e8d907e5..56caf700 100644 --- a/benchmark/gradient/benchmarks_gradient.jl +++ b/benchmark/gradient/benchmarks_gradient.jl @@ -51,7 +51,7 @@ for f in benchmark_list for pb in problem_sets[s] n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))")) m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))")) - @info " $(pb): $T with $n vars and $m cons" + verbose_subbenchmark && @info " $(pb): $T with $n vars and $m cons" g = zeros(T, n) SUITE["$(fun)"][f][T][s][b][pb] = @benchmarkable $fun(nlp, get_x0(nlp), $g) setup = (nlp = set_adnlp($pb, $(name_backend), $(backend), $nscal, $T)) diff --git a/benchmark/hessian/benchmarks_coloring.jl b/benchmark/hessian/benchmarks_coloring.jl index 3ff43148..de953c0c 100644 --- a/benchmark/hessian/benchmarks_coloring.jl +++ b/benchmark/hessian/benchmarks_coloring.jl @@ -49,7 +49,7 @@ for f in benchmark_list for pb in problem_sets[s] n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))")) m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))")) - @info " $(pb): $T with $n vars and $m cons" + verbose_subbenchmark && @info " $(pb): $T with $n vars and $m cons" SUITE["$(fun)"][f][T][s][b][pb] = @benchmarkable set_adnlp($pb, $(name_backend), $backend, $nscal, $T) end diff --git a/benchmark/hessian/benchmarks_hessian.jl b/benchmark/hessian/benchmarks_hessian.jl index 53be8982..dc2368e7 100644 --- a/benchmark/hessian/benchmarks_hessian.jl +++ b/benchmark/hessian/benchmarks_hessian.jl @@ -40,7 +40,7 @@ for f in benchmark_list for pb in problem_sets[s] n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))")) m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))")) - @info " $(pb): $T with $n vars" + verbose_subbenchmark && @info " $(pb): $T with $n vars" SUITE["$(fun)"][f][T][s][b][pb] = @benchmarkable $fun(nlp, get_x0(nlp)) setup = (nlp = set_adnlp($pb, $(name_backend), $backend, $nscal, $T)) end diff --git a/benchmark/hessian/benchmarks_hessian_lagrangian.jl b/benchmark/hessian/benchmarks_hessian_lagrangian.jl index 7801657c..daa742c6 100644 --- a/benchmark/hessian/benchmarks_hessian_lagrangian.jl +++ b/benchmark/hessian/benchmarks_hessian_lagrangian.jl @@ -40,7 +40,7 @@ for f in benchmark_list for pb in problem_sets[s] n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))")) m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))")) - @info " $(pb): $T with $n vars and $m cons" + verbose_subbenchmark && @info " $(pb): $T with $n vars and $m cons" y = 10 * T[-(-1.0)^i for i = 1:m] SUITE["$(fun)"][f][T][s][b][pb] = @benchmarkable $fun(nlp, get_x0(nlp), $y) setup = (nlp = set_adnlp($pb, $(name_backend), $backend, $nscal, $T)) diff --git a/benchmark/hessian/benchmarks_hessian_residual.jl b/benchmark/hessian/benchmarks_hessian_residual.jl index 9fc8467a..7058ad50 100644 --- a/benchmark/hessian/benchmarks_hessian_residual.jl +++ b/benchmark/hessian/benchmarks_hessian_residual.jl @@ -41,7 +41,7 @@ for f in benchmark_list n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))")) m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))")) nequ = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nls_nequ(n = $(nscal))")) - @info " $(pb): $T with $n vars, $nequ residuals and $m cons" + verbose_subbenchmark && @info " $(pb): $T with $n vars, $nequ residuals and $m cons" v = 10 * T[-(-1.0)^i for i = 1:nequ] SUITE["$(fun)"][f][T][s][b][pb] = @benchmarkable $fun(nls, get_x0(nls), $v) setup = (nls = set_adnls($pb, $(name_backend), $backend, $nscal, $T)) diff --git a/benchmark/hessian/benchmarks_hprod.jl b/benchmark/hessian/benchmarks_hprod.jl index 99f7cac9..38049ed2 100644 --- a/benchmark/hessian/benchmarks_hprod.jl +++ b/benchmark/hessian/benchmarks_hprod.jl @@ -38,7 +38,7 @@ for f in benchmark_list for pb in problem_sets[s] n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))")) m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))")) - @info " $(pb): $T with $n vars" + verbose_subbenchmark && @info " $(pb): $T with $n vars" v = [sin(T(i) / 10) for i = 1:n] Hv = Vector{T}(undef, n) SUITE["$(fun)"][f][T][s][b][pb] = @benchmarkable $fun(nlp, get_x0(nlp), $v, $Hv) setup = diff --git a/benchmark/hessian/benchmarks_hprod_lagrangian.jl b/benchmark/hessian/benchmarks_hprod_lagrangian.jl index daee07f1..3f9dd730 100644 --- a/benchmark/hessian/benchmarks_hprod_lagrangian.jl +++ b/benchmark/hessian/benchmarks_hprod_lagrangian.jl @@ -38,7 +38,7 @@ for f in benchmark_list for pb in problem_sets[s] n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))")) m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))")) - @info " $(pb): $T with $n vars" + verbose_subbenchmark && @info " $(pb): $T with $n vars" y = 10 * T[-(-1.0)^i for i = 1:m] v = [sin(T(i) / 10) for i = 1:n] Hv = Vector{T}(undef, n) diff --git a/benchmark/jacobian/benchmarks_coloring.jl b/benchmark/jacobian/benchmarks_coloring.jl index 69974255..bf0e7023 100644 --- a/benchmark/jacobian/benchmarks_coloring.jl +++ b/benchmark/jacobian/benchmarks_coloring.jl @@ -49,7 +49,7 @@ for f in benchmark_list for pb in problem_sets[s] n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))")) m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))")) - @info " $(pb): $T with $n vars and $m cons" + verbose_subbenchmark && @info " $(pb): $T with $n vars and $m cons" SUITE["$(fun)"][f][T][s][b][pb] = @benchmarkable set_adnlp($pb, $(name_backend), $backend, $nscal, $T) end diff --git a/benchmark/jacobian/benchmarks_jacobian.jl b/benchmark/jacobian/benchmarks_jacobian.jl index 16f9b5a1..402653a0 100644 --- a/benchmark/jacobian/benchmarks_jacobian.jl +++ b/benchmark/jacobian/benchmarks_jacobian.jl @@ -36,7 +36,7 @@ for f in benchmark_list for pb in problem_sets[s] n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))")) m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))")) - @info " $(pb): $T with $n vars and $m cons" + verbose_subbenchmark && @info " $(pb): $T with $n vars and $m cons" SUITE["$(fun)"][f][T][s][b][pb] = @benchmarkable $fun(nlp, get_x0(nlp)) setup = (nlp = set_adnlp($pb, $(name_backend), $backend, $nscal, $T)) end diff --git a/benchmark/jacobian/benchmarks_jacobian_residual.jl b/benchmark/jacobian/benchmarks_jacobian_residual.jl index 24e11e02..a2fb6f89 100644 --- a/benchmark/jacobian/benchmarks_jacobian_residual.jl +++ b/benchmark/jacobian/benchmarks_jacobian_residual.jl @@ -37,7 +37,7 @@ for f in benchmark_list n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))")) m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))")) nequ = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nls_nequ(n = $(nscal))")) - @info " $(pb): $T with $n vars, $nequ residuals and $m cons" + verbose_subbenchmark && @info " $(pb): $T with $n vars, $nequ residuals and $m cons" SUITE["$(fun)"][f][T][s][b][pb] = @benchmarkable $fun(nls, get_x0(nls)) setup = (nls = set_adnls($pb, $(name_backend), $backend, $nscal, $T)) end diff --git a/benchmark/jacobian/benchmarks_jprod.jl b/benchmark/jacobian/benchmarks_jprod.jl index b5b6e315..08f20b56 100644 --- a/benchmark/jacobian/benchmarks_jprod.jl +++ b/benchmark/jacobian/benchmarks_jprod.jl @@ -38,7 +38,7 @@ for f in benchmark_list for pb in problem_sets[s] n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))")) m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))")) - @info " $(pb): $T with $n vars and $m cons" + verbose_subbenchmark && @info " $(pb): $T with $n vars and $m cons" Jv = Vector{T}(undef, m) v = 10 * T[-(-1.0)^i for i = 1:n] SUITE["$(fun)"][f][T][s][b][pb] = @benchmarkable $fun(nlp, get_x0(nlp), $v, $Jv) setup = diff --git a/benchmark/jacobian/benchmarks_jprod_residual.jl b/benchmark/jacobian/benchmarks_jprod_residual.jl index 810fc400..77208055 100644 --- a/benchmark/jacobian/benchmarks_jprod_residual.jl +++ b/benchmark/jacobian/benchmarks_jprod_residual.jl @@ -39,7 +39,7 @@ for f in benchmark_list n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))")) m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))")) nequ = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nls_nequ(n = $(nscal))")) - @info " $(pb): $T with $n vars, $nequ residuals and $m cons" + verbose_subbenchmark && @info " $(pb): $T with $n vars, $nequ residuals and $m cons" Jv = Vector{T}(undef, nequ) v = 10 * T[-(-1.0)^i for i = 1:n] SUITE["$(fun)"][f][T][s][b][pb] = @benchmarkable $fun(nlp, get_x0(nlp), $v, $Jv) setup = diff --git a/benchmark/jacobian/benchmarks_jtprod.jl b/benchmark/jacobian/benchmarks_jtprod.jl index aa4eb9b0..927b3eb0 100644 --- a/benchmark/jacobian/benchmarks_jtprod.jl +++ b/benchmark/jacobian/benchmarks_jtprod.jl @@ -38,7 +38,7 @@ for f in benchmark_list for pb in problem_sets[s] n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))")) m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))")) - @info " $(pb): $T with $n vars and $m cons" + verbose_subbenchmark && @info " $(pb): $T with $n vars and $m cons" Jtv = Vector{T}(undef, n) v = 10 * T[-(-1.0)^i for i = 1:m] SUITE["$(fun)"][f][T][s][b][pb] = @benchmarkable $fun(nlp, get_x0(nlp), $v, $Jtv) setup = diff --git a/benchmark/jacobian/benchmarks_jtprod_residual.jl b/benchmark/jacobian/benchmarks_jtprod_residual.jl index aa6e64a4..144455bd 100644 --- a/benchmark/jacobian/benchmarks_jtprod_residual.jl +++ b/benchmark/jacobian/benchmarks_jtprod_residual.jl @@ -39,7 +39,7 @@ for f in benchmark_list n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))")) m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))")) nequ = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nls_nequ(n = $(nscal))")) - @info " $(pb): $T with $n vars, $nequ residuals and $m cons" + verbose_subbenchmark && @info " $(pb): $T with $n vars, $nequ residuals and $m cons" Jtv = Vector{T}(undef, n) v = 10 * T[-(-1.0)^i for i = 1:nequ] SUITE["$(fun)"][f][T][s][b][pb] = @benchmarkable $fun(nlp, get_x0(nlp), $v, $Jtv) setup = diff --git a/benchmark/run_analyzer.jl b/benchmark/run_analyzer.jl new file mode 100644 index 00000000..adbe5328 --- /dev/null +++ b/benchmark/run_analyzer.jl @@ -0,0 +1,65 @@ +using Pkg +Pkg.activate("benchmark/benchmark_analyzer") +Pkg.instantiate() +using BenchmarkTools, Dates, JLD2, JSON, Plots, StatsPlots + +# name of the result file: +name = "" +resultpath = joinpath(dirname(@__FILE__), "results") +if name == "" + name = replace(readdir(resultpath)[end], ".jld2" => "", ".json" => "") +end + +@load joinpath(dirname(@__FILE__), "results", "$name.jld2") result +t = BenchmarkTools.load(joinpath(dirname(@__FILE__), "results", "$name.json")) + +# plots +using StatsPlots +plot(t) # ou can use all the keyword arguments from Plots.jl, for instance st=:box or yaxis=:log10. + +@info "Available benchmarks" +df_results = Dict{String, Dict{Symbol, DataFrame}}() +for benchmark in keys(result) + result_bench = result[benchmark] # one NLPModel API function + for benchmark_list in keys(result_bench) + for type_bench in keys(result_bench[benchmark_list]) + for set_bench in keys(result_bench[benchmark_list][type_bench]) + @info "$benchmark/$benchmark_list for type $type_bench on problem set $(set_bench)" + bench = result_bench[benchmark_list][type_bench][set_bench] + df_results["$(benchmark)_$(benchmark_list)_$(type_bench)_$(set_bench)"] = bg_to_df(bench) + end + end + end +end + +function bg_to_df(bench::BenchmarkGroup) + solvers = collect(keys(bench)) # "jump", ... + nsolvers = length(solvers) + problems = collect(keys(bench[solvers[1]])) + nprob = length(problems) + dfT = Dict{Symbol, DataFrame}() + for solver in solvers + dfT[Symbol(solver)] = DataFrame( + [ + [median(bench[solver][pb]).time for pb in problems], + [median(bench[solver][pb]).memory for pb in problems], + ], + [:median_time, :median_memory] + ) + end + return dfT +end + +using SolverBenchmark, BenchmarkProfiles + +# b::BenchmarkProfiles.AbstractBackend = PlotsBackend() +costs =[ + df -> df.median_time, + df -> df.median_memory, +] +costnames = ["median time", "median memory"] +for key_benchmark in keys(df_results) + stats = df_results[key_benchmark] + p = profile_solvers(stats, costs, costnames) + savefig(p, "$(name)_$(key_benchmark).png") +end diff --git a/benchmark/run_local.jl b/benchmark/run_local.jl index f700ba1e..b62a7404 100644 --- a/benchmark/run_local.jl +++ b/benchmark/run_local.jl @@ -1,24 +1,46 @@ -using Pkg, Logging, JLD2, Dates +using Pkg Pkg.activate("benchmark") -# instantiate -# up ADNLPModels +Pkg.instantiate() +Pkg.update("ADNLPModels") +using Logging, JLD2, Dates +path = dirname(@__FILE__) +skip_tune = true + +@info "INITIALIZE" include("benchmarks.jl") +list_of_benchmark = keys(SUITE) +# gradient: SUITE[@tagged "grad!"] +# Coloring benchmark: SUITE[@tagged "hessian_backend" || "hessian_residual_backend" || "jacobian_backend" || "jacobian_residual_backend"] +# Matrix benchmark: SUITE[@tagged "hessian_backend" || "hessian_residual_backend" || "jacobian_backend" || "jacobian_residual_backend" || "hess_coord!" || "hess_coord_residual!" || "jac_coord!" || "jac_coord_residual!"] +# Matrix-vector products: SUITE[@tagged "hprod!" || "hprod_residual!" || "jprod!" || "jprod_residual!" || "jtprod!" || "jtprod_residual!"] + +for benchmark_in_suite in list_of_benchmark + @info "$(benchmark_in_suite)" +end + @info "TUNE" -@time with_logger(ConsoleLogger(Error)) do # remove warnings - tune!(SUITE) +if !skip_tune + @time with_logger(ConsoleLogger(Error)) do + tune!(SUITE) + BenchmarkTools.save("params.json", params(suite)); + end +else + @info "Skip tuning" + # https://juliaci.github.io/BenchmarkTools.jl/dev/manual/ + BenchmarkTools.DEFAULT_PARAMETERS.evals = 1 end @info "RUN" @time result = with_logger(ConsoleLogger(Error)) do # remove warnings - run(SUITE) + if "params.json" in (path == "" ? readdir() : readdir(path)) + loadparams!(suite, BenchmarkTools.load("params.json")[1], :evals, :samples); + end + run(SUITE, verbose = true) end @info "SAVE BENCHMARK RESULT" name = "$(today())_adnlpmodels_benchmark" @save "$name.jld2" result - -@info "ANALYZE" -# save the result in a jld2 file? -# plots +BenchmarkTools.save("$name.json", result)