Skip to content

Commit

Permalink
Add jprod benchmark (#256)
Browse files Browse the repository at this point in the history
* Add product benchmarks

save result file

fixes (tested locally)

add hessian residual

* uncomment
  • Loading branch information
tmigot authored Jun 24, 2024
1 parent 5f72299 commit cbeff59
Show file tree
Hide file tree
Showing 15 changed files with 551 additions and 277 deletions.
426 changes: 160 additions & 266 deletions benchmark/Manifest.toml

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion benchmark/Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -25,4 +25,4 @@ Symbolics = "0c5d862f-8b57-4792-8d23-62f2024744c7"
Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"

[compat]
OptimizationProblems = "0.8"
OptimizationProblems = "0.8"
7 changes: 6 additions & 1 deletion benchmark/benchmarks.jl
Original file line number Diff line number Diff line change
Expand Up @@ -21,4 +21,9 @@ include("jacobian/benchmarks_jacobian_residual.jl")
include("hessian/benchmarks_coloring.jl")
include("hessian/benchmarks_hessian.jl")
include("hessian/benchmarks_hessian_lagrangian.jl")
# include("hessian/benchmarks_hessian_residual.jl")
include("hessian/benchmarks_hessian_residual.jl")

include("jacobian/benchmarks_jprod.jl")
include("jacobian/benchmarks_jprod_residual.jl")
include("jacobian/benchmarks_jtprod.jl")
include("jacobian/benchmarks_jtprod_residual.jl")
6 changes: 5 additions & 1 deletion benchmark/hessian/benchmarks_hessian.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ INTRODUCTION OF THIS BENCHMARK:
We test here the function `hess_coord!` for ADNLPModels with different backends:
- ADNLPModels.SparseADHessian
- ADNLPModels.SparseReverseADHessian
=#
using ForwardDiff, SparseConnectivityTracer, SparseMatrixColorings

Expand All @@ -12,7 +13,10 @@ data_types = [Float32, Float64]

benchmark_list = [:optimized]

benchmarked_hessian_backend = Dict("sparse" => ADNLPModels.SparseADHessian)
benchmarked_hessian_backend = Dict(
"sparse" => ADNLPModels.SparseADHessian,
"sparse-reverse" => ADNLPModels.SparseReverseADHessian,
)
get_backend_list(::Val{:optimized}) = keys(benchmarked_hessian_backend)
get_backend(::Val{:optimized}, b::String) = benchmarked_hessian_backend[b]

Expand Down
10 changes: 7 additions & 3 deletions benchmark/hessian/benchmarks_hessian_lagrangian.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ INTRODUCTION OF THIS BENCHMARK:
We test here the function `hess_coord!` for ADNLPModels with different backends:
- ADNLPModels.SparseADHessian
- ADNLPModels.SparseReverseADHessian
=#
using ForwardDiff, SparseConnectivityTracer, SparseMatrixColorings

Expand All @@ -12,7 +13,10 @@ data_types = [Float32, Float64]

benchmark_list = [:optimized]

benchmarked_hessian_backend = Dict("sparse" => ADNLPModels.SparseADHessian)
benchmarked_hessian_backend = Dict(
"sparse" => ADNLPModels.SparseADHessian,
"sparse-reverse" => ADNLPModels.SparseReverseADHessian,
)
get_backend_list(::Val{:optimized}) = keys(benchmarked_hessian_backend)
get_backend(::Val{:optimized}, b::String) = benchmarked_hessian_backend[b]

Expand All @@ -37,8 +41,8 @@ for f in benchmark_list
n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))"))
m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))"))
@info " $(pb): $T with $n vars and $m cons"
y0 = 10 * T[-(-1.0)^i for i = 1:m]
SUITE["$(fun)"][f][T][s][b][pb] = @benchmarkable $fun(nlp, get_x0(nlp), $y0) setup =
y = 10 * T[-(-1.0)^i for i = 1:m]
SUITE["$(fun)"][f][T][s][b][pb] = @benchmarkable $fun(nlp, get_x0(nlp), $y) setup =
(nlp = set_adnlp($pb, $(name_backend), $backend, $nscal, $T))
end
end
Expand Down
52 changes: 52 additions & 0 deletions benchmark/hessian/benchmarks_hessian_residual.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
#=
INTRODUCTION OF THIS BENCHMARK:
We test here the function `hess_residual_coord!` for ADNLPModels with different backends:
- ADNLPModels.SparseADJacobian
- ADNLPModels.SparseReverseADHessian
=#
using ForwardDiff, SparseConnectivityTracer, SparseMatrixColorings

include("additional_backends.jl")

data_types = [Float32, Float64]

benchmark_list = [:optimized]

benchmarked_hessian_backend = Dict(
"sparse" => ADNLPModels.SparseADHessian,
#"sparse-reverse" => ADNLPModels.SparseReverseADHessian, #failed
)
get_backend_list(::Val{:optimized}) = keys(benchmarked_hessian_backend)
get_backend(::Val{:optimized}, b::String) = benchmarked_hessian_backend[b]

problem_sets = Dict("scalable_nls" => scalable_nls_problems)
nscal = 1000

name_backend = "hessian_residual_backend"
fun = hess_coord_residual
@info "Initialize $(fun) benchmark"
SUITE["$(fun)"] = BenchmarkGroup()

for f in benchmark_list
SUITE["$(fun)"][f] = BenchmarkGroup()
for T in data_types
SUITE["$(fun)"][f][T] = BenchmarkGroup()
for s in keys(problem_sets)
SUITE["$(fun)"][f][T][s] = BenchmarkGroup()
for b in get_backend_list(Val(f))
SUITE["$(fun)"][f][T][s][b] = BenchmarkGroup()
backend = get_backend(Val(f), b)
for pb in problem_sets[s]
n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))"))
m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))"))
nequ = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nls_nequ(n = $(nscal))"))
@info " $(pb): $T with $n vars, $nequ residuals and $m cons"
v = 10 * T[-(-1.0)^i for i = 1:nequ]
SUITE["$(fun)"][f][T][s][b][pb] = @benchmarkable $fun(nls, get_x0(nls), $v) setup =
(nls = set_adnls($pb, $(name_backend), $backend, $nscal, $T))
end
end
end
end
end
50 changes: 50 additions & 0 deletions benchmark/hessian/benchmarks_hprod.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
#=
INTRODUCTION OF THIS BENCHMARK:
We test here the function `hprod!` for ADNLPModels with different backends:
- ADNLPModels.ForwardDiffADHvprod
- ADNLPModels.ReverseDiffADHvprod
=#
using ForwardDiff, ReverseDiff

include("additional_backends.jl")

data_types = [Float32, Float64]

benchmark_list = [:optimized]

benchmarked_hprod_backend =
Dict("forward" => ADNLPModels.ForwardDiffADHvprod, "reverse" => ADNLPModels.ReverseDiffADHvprod)
get_backend_list(::Val{:optimized}) = keys(benchmarked_hprod_backend)
get_backend(::Val{:optimized}, b::String) = benchmarked_hprod_backend[b]

problem_sets = Dict("scalable" => scalable_problems)
nscal = 1000

name_backend = "hprod_backend"
fun = hprod!
@info "Initialize $(fun) benchmark"
SUITE["$(fun)"] = BenchmarkGroup()

for f in benchmark_list
SUITE["$(fun)"][f] = BenchmarkGroup()
for T in data_types
SUITE["$(fun)"][f][T] = BenchmarkGroup()
for s in keys(problem_sets)
SUITE["$(fun)"][f][T][s] = BenchmarkGroup()
for b in get_backend_list(Val(f))
SUITE["$(fun)"][f][T][s][b] = BenchmarkGroup()
backend = get_backend(Val(f), b)
for pb in problem_sets[s]
n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))"))
m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))"))
@info " $(pb): $T with $n vars"
v = [sin(T(i) / 10) for i = 1:n]
Hv = Vector{T}(undef, n)
SUITE["$(fun)"][f][T][s][b][pb] = @benchmarkable $fun(nlp, get_x0(nlp), $v, $Hv) setup =
(nlp = set_adnlp($pb, $(name_backend), $backend, $nscal, $T))
end
end
end
end
end
52 changes: 52 additions & 0 deletions benchmark/hessian/benchmarks_hprod_lagrangian.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
#=
INTRODUCTION OF THIS BENCHMARK:
We test here the function `hprod!` for ADNLPModels with different backends:
- ADNLPModels.ForwardDiffADHvprod
- ADNLPModels.ReverseDiffADHvprod
=#
using ForwardDiff, ReverseDiff

include("additional_backends.jl")

data_types = [Float32, Float64]

benchmark_list = [:optimized]

benchmarked_hprod_backend =
Dict("forward" => ADNLPModels.ForwardDiffADHvprod, "reverse" => ADNLPModels.ReverseDiffADHvprod)
get_backend_list(::Val{:optimized}) = keys(benchmarked_hprod_backend)
get_backend(::Val{:optimized}, b::String) = benchmarked_hprod_backend[b]

problem_sets = Dict("scalable_cons" => scalable_cons_problems)
nscal = 1000

name_backend = "hprod_backend"
fun = hprod!
@info "Initialize $(fun) benchmark"
SUITE["$(fun)"] = BenchmarkGroup()

for f in benchmark_list
SUITE["$(fun)"][f] = BenchmarkGroup()
for T in data_types
SUITE["$(fun)"][f][T] = BenchmarkGroup()
for s in keys(problem_sets)
SUITE["$(fun)"][f][T][s] = BenchmarkGroup()
for b in get_backend_list(Val(f))
SUITE["$(fun)"][f][T][s][b] = BenchmarkGroup()
backend = get_backend(Val(f), b)
for pb in problem_sets[s]
n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))"))
m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))"))
@info " $(pb): $T with $n vars"
y = 10 * T[-(-1.0)^i for i = 1:m]
v = [sin(T(i) / 10) for i = 1:n]
Hv = Vector{T}(undef, n)
SUITE["$(fun)"][f][T][s][b][pb] =
@benchmarkable $fun(nlp, get_x0(nlp), $y, $v, $Hv) setup =
(nlp = set_adnlp($pb, $(name_backend), $backend, $nscal, $T))
end
end
end
end
end
3 changes: 2 additions & 1 deletion benchmark/jacobian/benchmarks_jacobian_residual.jl
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,8 @@ for f in benchmark_list
for pb in problem_sets[s]
n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))"))
m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))"))
@info " $(pb): $T with $n vars and $m cons"
nequ = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nls_nequ(n = $(nscal))"))
@info " $(pb): $T with $n vars, $nequ residuals and $m cons"
SUITE["$(fun)"][f][T][s][b][pb] = @benchmarkable $fun(nls, get_x0(nls)) setup =
(nls = set_adnls($pb, $(name_backend), $backend, $nscal, $T))
end
Expand Down
50 changes: 50 additions & 0 deletions benchmark/jacobian/benchmarks_jprod.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
#=
INTRODUCTION OF THIS BENCHMARK:
We test here the function `jprod` for ADNLPModels with different backends:
- ADNLPModels.ForwardDiffADJprod
- ADNLPModels.ReverseDiffADJprod
=#
using ForwardDiff, ReverseDiff

include("additional_backends.jl")

data_types = [Float32, Float64]

benchmark_list = [:optimized]

benchmarked_jprod_backend =
Dict("forward" => ADNLPModels.ForwardDiffADJprod, "reverse" => ADNLPModels.ReverseDiffADJprod)
get_backend_list(::Val{:optimized}) = keys(benchmarked_jprod_backend)
get_backend(::Val{:optimized}, b::String) = benchmarked_jprod_backend[b]

problem_sets = Dict("scalable" => scalable_cons_problems)
nscal = 1000

name_backend = "jprod_backend"
fun = jprod!
@info "Initialize $(fun) benchmark"
SUITE["$(fun)"] = BenchmarkGroup()

for f in benchmark_list
SUITE["$(fun)"][f] = BenchmarkGroup()
for T in data_types
SUITE["$(fun)"][f][T] = BenchmarkGroup()
for s in keys(problem_sets)
SUITE["$(fun)"][f][T][s] = BenchmarkGroup()
for b in get_backend_list(Val(f))
SUITE["$(fun)"][f][T][s][b] = BenchmarkGroup()
backend = get_backend(Val(f), b)
for pb in problem_sets[s]
n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))"))
m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))"))
@info " $(pb): $T with $n vars and $m cons"
Jv = Vector{T}(undef, m)
v = 10 * T[-(-1.0)^i for i = 1:n]
SUITE["$(fun)"][f][T][s][b][pb] = @benchmarkable $fun(nlp, get_x0(nlp), $v, $Jv) setup =
(nlp = set_adnlp($pb, $(name_backend), $backend, $nscal, $T))
end
end
end
end
end
51 changes: 51 additions & 0 deletions benchmark/jacobian/benchmarks_jprod_residual.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
#=
INTRODUCTION OF THIS BENCHMARK:
We test here the function `jprod_residual!` for ADNLPModels with different backends:
- ADNLPModels.ForwardDiffADJprod
- ADNLPModels.ReverseDiffADJprod
=#
using ForwardDiff, ReverseDiff

include("additional_backends.jl")

data_types = [Float32, Float64]

benchmark_list = [:optimized]

benchmarked_jprod_residual_backend =
Dict("forward" => ADNLPModels.ForwardDiffADJprod, "reverse" => ADNLPModels.ReverseDiffADJprod)
get_backend_list(::Val{:optimized}) = keys(benchmarked_jprod_residual_backend)
get_backend(::Val{:optimized}, b::String) = benchmarked_jprod_residual_backend[b]

problem_sets = Dict("scalable_nls" => scalable_nls_problems)
nscal = 1000

name_backend = "jprod_residual_backend"
fun = jprod_residual!
@info "Initialize $(fun) benchmark"
SUITE["$(fun)"] = BenchmarkGroup()

for f in benchmark_list
SUITE["$(fun)"][f] = BenchmarkGroup()
for T in data_types
SUITE["$(fun)"][f][T] = BenchmarkGroup()
for s in keys(problem_sets)
SUITE["$(fun)"][f][T][s] = BenchmarkGroup()
for b in get_backend_list(Val(f))
SUITE["$(fun)"][f][T][s][b] = BenchmarkGroup()
backend = get_backend(Val(f), b)
for pb in problem_sets[s]
n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))"))
m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))"))
nequ = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nls_nequ(n = $(nscal))"))
@info " $(pb): $T with $n vars, $nequ residuals and $m cons"
Jv = Vector{T}(undef, nequ)
v = 10 * T[-(-1.0)^i for i = 1:n]
SUITE["$(fun)"][f][T][s][b][pb] = @benchmarkable $fun(nlp, get_x0(nlp), $v, $Jv) setup =
(nlp = set_adnls($pb, $(name_backend), $backend, $nscal, $T))
end
end
end
end
end
50 changes: 50 additions & 0 deletions benchmark/jacobian/benchmarks_jtprod.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
#=
INTRODUCTION OF THIS BENCHMARK:
We test here the function `jtprod` for ADNLPModels with different backends:
- ADNLPModels.ForwardDiffADJtprod
- ADNLPModels.ReverseDiffADJtprod
=#
using ForwardDiff, ReverseDiff

include("additional_backends.jl")

data_types = [Float32, Float64]

benchmark_list = [:optimized]

benchmarked_jtprod_backend =
Dict("forward" => ADNLPModels.ForwardDiffADJtprod, "reverse" => ADNLPModels.ReverseDiffADJtprod)
get_backend_list(::Val{:optimized}) = keys(benchmarked_jtprod_backend)
get_backend(::Val{:optimized}, b::String) = benchmarked_jtprod_backend[b]

problem_sets = Dict("scalable" => scalable_cons_problems)
nscal = 1000

name_backend = "jtprod_backend"
fun = jtprod!
@info "Initialize $(fun) benchmark"
SUITE["$(fun)"] = BenchmarkGroup()

for f in benchmark_list
SUITE["$(fun)"][f] = BenchmarkGroup()
for T in data_types
SUITE["$(fun)"][f][T] = BenchmarkGroup()
for s in keys(problem_sets)
SUITE["$(fun)"][f][T][s] = BenchmarkGroup()
for b in get_backend_list(Val(f))
SUITE["$(fun)"][f][T][s][b] = BenchmarkGroup()
backend = get_backend(Val(f), b)
for pb in problem_sets[s]
n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))"))
m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))"))
@info " $(pb): $T with $n vars and $m cons"
Jtv = Vector{T}(undef, n)
v = 10 * T[-(-1.0)^i for i = 1:m]
SUITE["$(fun)"][f][T][s][b][pb] = @benchmarkable $fun(nlp, get_x0(nlp), $v, $Jtv) setup =
(nlp = set_adnlp($pb, $(name_backend), $backend, $nscal, $T))
end
end
end
end
end
Loading

0 comments on commit cbeff59

Please sign in to comment.