diff --git a/src/ADNLPProblems/toint.jl b/src/ADNLPProblems/toint.jl new file mode 100644 index 00000000..4d26edb5 --- /dev/null +++ b/src/ADNLPProblems/toint.jl @@ -0,0 +1,40 @@ +export toint + +function toint(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} + function f(x; n = length(x)) + s = zero(T) + for i ∈ 1:n + ci = 1 + (i // 10) + jmin = max(1, i - 2) + jmax = min(n, i + 2) + for j ∈ jmin:jmax + aij = 5 * (1 + mod(i, 5) + mod(j, 5)) + bij = (i + j) // 10 + cj = (1 + j) // 10 + s += aij * sin(bij + ci * x[i] + cj * x[j]) + end + + if iseven(n) + half = n ÷ 2 + j1 = i + half + if 1 <= j1 <= n && (j1 < jmin || j1 > jmax) + aij = 5 * (1 + mod(i, 5) + mod(j1, 5)) + bij = (i + j1) // 10 + cj = (1 + j1) // 10 + s += aij * sin(bij + ci * x[i] + cj * x[j1]) + end + j2 = i - half + if 1 <= j2 <= n && j2 != j1 && (j2 < jmin || j2 > jmax) + aij = 5 * (1 + mod(i, 5) + mod(j2, 5)) + bij = (i + j2) // 10 + cj = (1 + j2) // 10 + s += aij * sin(bij + ci * x[i] + cj * x[j2]) + end + end + end + return s / n + end + + x0 = fill(one(T), n) + return ADNLPModels.ADNLPModel(f, x0, name = "toint"; kwargs...) +end diff --git a/src/ADNLPProblems/trig.jl b/src/ADNLPProblems/trig.jl new file mode 100644 index 00000000..c2c51905 --- /dev/null +++ b/src/ADNLPProblems/trig.jl @@ -0,0 +1,38 @@ +export trig + +function trig(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} + function f(x; n = length(x)) + s = zero(T) + for i = 1:n + s += i * (1 - cos(x[i])) + + jmin = max(1, i - 2) + jmax = min(n, i + 2) + for j in jmin:jmax + aij = 5 * (1 + mod(i, 5) + mod(j, 5)) + bij = (i + j) // 10 + s += aij * sin(x[j]) + bij * cos(x[j]) + end + + if iseven(n) + half = n ÷ 2 + j1 = i + half + if 1 <= j1 <= n && (j1 < jmin || j1 > jmax) + aij = 5 * (1 + mod(i, 5) + mod(j1, 5)) + bij = (i + j1) // 10 + s += aij * sin(x[j1]) + bij * cos(x[j1]) + end + j2 = i - half + if 1 <= j2 <= n && j2 != j1 && (j2 < jmin || j2 > jmax) + aij = 5 * (1 + mod(i, 5) + mod(j2, 5)) + bij = (i + j2) // 10 + s += aij * sin(x[j2]) + bij * cos(x[j2]) + end + end + end + return s / n + end + + x0 = fill(one(T) / n, n) + return ADNLPModels.ADNLPModel(f, x0, name = "trig"; kwargs...) +end diff --git a/src/ADNLPProblems/trigb.jl b/src/ADNLPProblems/trigb.jl new file mode 100644 index 00000000..98339cfa --- /dev/null +++ b/src/ADNLPProblems/trigb.jl @@ -0,0 +1,16 @@ +export trigb + +function trigb(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} + function f(x; n = length(x)) + s = zero(T) + for i = 1:n + xim = (i == 1) ? zero(T) : x[i - 1] + xip = (i == n) ? zero(T) : x[i + 1] + s += i * (1 - cos(x[i]) + sin(xim) - sin(xip)) + end + return s + end + + x0 = fill(one(T), n) + return ADNLPModels.ADNLPModel(f, x0, name = "trigb"; kwargs...) +end diff --git a/src/Meta/toint.jl b/src/Meta/toint.jl new file mode 100644 index 00000000..f8122081 --- /dev/null +++ b/src/Meta/toint.jl @@ -0,0 +1,26 @@ +toint_meta = Dict( + :nvar => 100, + :variable_nvar => true, + :ncon => 0, + :variable_ncon => false, + :minimize => true, + :name => "toint", + :has_equalities_only => false, + :has_inequalities_only => false, + :has_bounds => false, + :has_fixed_variables => false, + :objtype => :other, + :contype => :unconstrained, + :best_known_lower_bound => -Inf, + :best_known_upper_bound => 0.0, + :is_feasible => true, + :defined_everywhere => missing, + :origin => :unknown, +) + +get_toint_nvar(; n::Integer = default_nvar, kwargs...) = n +get_toint_ncon(; n::Integer = default_nvar, kwargs...) = 0 +get_toint_nlin(; n::Integer = default_nvar, kwargs...) = 0 +get_toint_nnln(; n::Integer = default_nvar, kwargs...) = 0 +get_toint_nequ(; n::Integer = default_nvar, kwargs...) = 0 +get_toint_nineq(; n::Integer = default_nvar, kwargs...) = 0 diff --git a/src/Meta/trig.jl b/src/Meta/trig.jl new file mode 100644 index 00000000..c64a2eca --- /dev/null +++ b/src/Meta/trig.jl @@ -0,0 +1,26 @@ +trig_meta = Dict( + :nvar => 100, + :variable_nvar => true, + :ncon => 0, + :variable_ncon => false, + :minimize => true, + :name => "trig", + :has_equalities_only => false, + :has_inequalities_only => false, + :has_bounds => false, + :has_fixed_variables => false, + :objtype => :other, + :contype => :unconstrained, + :best_known_lower_bound => -Inf, + :best_known_upper_bound => 0.0, + :is_feasible => true, + :defined_everywhere => missing, + :origin => :unknown, +) + +get_trig_nvar(; n::Integer = default_nvar, kwargs...) = n +get_trig_ncon(; n::Integer = default_nvar, kwargs...) = 0 +get_trig_nlin(; n::Integer = default_nvar, kwargs...) = 0 +get_trig_nnln(; n::Integer = default_nvar, kwargs...) = 0 +get_trig_nequ(; n::Integer = default_nvar, kwargs...) = 0 +get_trig_nineq(; n::Integer = default_nvar, kwargs...) = 0 diff --git a/src/Meta/trigb.jl b/src/Meta/trigb.jl new file mode 100644 index 00000000..c648cb7a --- /dev/null +++ b/src/Meta/trigb.jl @@ -0,0 +1,26 @@ +trigb_meta = Dict( + :nvar => 100, + :variable_nvar => true, + :ncon => 0, + :variable_ncon => false, + :minimize => true, + :name => "trigb", + :has_equalities_only => false, + :has_inequalities_only => false, + :has_bounds => false, + :has_fixed_variables => false, + :objtype => :other, + :contype => :unconstrained, + :best_known_lower_bound => -Inf, + :best_known_upper_bound => 0.0, + :is_feasible => true, + :defined_everywhere => missing, + :origin => :unknown, +) + +get_trigb_nvar(; n::Integer = default_nvar, kwargs...) = n +get_trigb_ncon(; n::Integer = default_nvar, kwargs...) = 0 +get_trigb_nlin(; n::Integer = default_nvar, kwargs...) = 0 +get_trigb_nnln(; n::Integer = default_nvar, kwargs...) = 0 +get_trigb_nequ(; n::Integer = default_nvar, kwargs...) = 0 +get_trigb_nineq(; n::Integer = default_nvar, kwargs...) = 0 diff --git a/src/PureJuMP/toint.jl b/src/PureJuMP/toint.jl new file mode 100644 index 00000000..44afc934 --- /dev/null +++ b/src/PureJuMP/toint.jl @@ -0,0 +1,56 @@ +# Toint trigonometric function +# +# Problem 10 in +# L. Luksan, C. Matonoha and J. Vlcek +# Sparse Test Problems for Unconstrained Optimization, +# Technical Report 1064, +# Institute of Computer Science, +# Academy of Science of the Czech Republic +# +# https://www.researchgate.net/publication/325314400_Sparse_Test_Problems_for_Unconstrained_Optimization +# +export toint + +function toint(args...; n::Int = default_nvar, kwargs...) + model = Model() + @variable(model, x[i = 1:n], start = 1) + + @objective( + model, + Min, + (1 / n) * sum(begin + ci = 1 + (i // 10) + s = zero(Float64) + + jmin = max(1, i - 2) + jmax = min(n, i + 2) + for j in jmin:jmax + aij = 5 * (1 + mod(i, 5) + mod(j, 5)) + bij = (i + j) // 10 + cj = (1 + j) // 10 + s += aij * sin(bij + ci * x[i] + cj * x[j]) + end + + if iseven(n) + half = n ÷ 2 + j1 = i + half + if 1 <= j1 <= n && (j1 < jmin || j1 > jmax) + aij = 5 * (1 + mod(i, 5) + mod(j1, 5)) + bij = (i + j1) // 10 + cj = (1 + j1) // 10 + s += aij * sin(bij + ci * x[i] + cj * x[j1]) + end + j2 = i - half + if 1 <= j2 <= n && j2 != j1 && (j2 < jmin || j2 > jmax) + aij = 5 * (1 + mod(i, 5) + mod(j2, 5)) + bij = (i + j2) // 10 + cj = (1 + j2) // 10 + s += aij * sin(bij + ci * x[i] + cj * x[j2]) + end + end + s + end for i = 1:n) + ) + + return model +end diff --git a/src/PureJuMP/trig.jl b/src/PureJuMP/trig.jl new file mode 100644 index 00000000..98250977 --- /dev/null +++ b/src/PureJuMP/trig.jl @@ -0,0 +1,55 @@ +# Another trigonometric function +# +# Problem 9 in +# L. Luksan, C. Matonoha and J. Vlcek +# Sparse Test Problems for Unconstrained Optimization, +# Technical Report 1064, +# Institute of Computer Science, +# Academy of Science of the Czech Republic +# +# https://www.researchgate.net/publication/325314400_Sparse_Test_Problems_for_Unconstrained_Optimization +# +export trig + +function trig(args...; n::Int = default_nvar, kwargs...) + model = Model() + @variable(model, x[i = 1:n], start = 1 / n) + + @objective( + model, + Min, + (1 / n) * sum( + i * (1 - cos(x[i])) + + ( + begin + jmin = max(1, i - 2) + jmax = min(n, i + 2) + s = zero(Float64) + for j in jmin:jmax + aij = 5 * (1 + mod(i, 5) + mod(j, 5)) + bij = (i + j) / 10 + s += aij * sin(x[j]) + bij * cos(x[j]) + end + if iseven(n) + half = n ÷ 2 + j1 = i + half + if 1 <= j1 <= n && (j1 < jmin || j1 > jmax) + aij = 5 * (1 + mod(i, 5) + mod(j1, 5)) + bij = (i + j1) / 10 + s += aij * sin(x[j1]) + bij * cos(x[j1]) + end + j2 = i - half + if 1 <= j2 <= n && j2 != j1 && (j2 < jmin || j2 > jmax) + aij = 5 * (1 + mod(i, 5) + mod(j2, 5)) + bij = (i + j2) / 10 + s += aij * sin(x[j2]) + bij * cos(x[j2]) + end + end + s + end + ) for i = 1:n + ) + ) + + return model +end diff --git a/src/PureJuMP/trigb.jl b/src/PureJuMP/trigb.jl new file mode 100644 index 00000000..1eb24276 --- /dev/null +++ b/src/PureJuMP/trigb.jl @@ -0,0 +1,29 @@ +## Banded trigonometric problem +# +# Problem 16 in +# L. Luksan, C. Matonoha and J. Vlcek +# Sparse Test Problems for Unconstrained Optimization, +# Technical Report 1064, +# Institute of Computer Science, +# Academy of Science of the Czech Republic +# +# https://www.researchgate.net/publication/325314400_Sparse_Test_Problems_for_Unconstrained_Optimization +# +export trigb + +function trigb(args...; n::Int = default_nvar, kwargs...) + model = Model() + @variable(model, x[i = 1:n], start = 1) + + @objective( + model, + Min, + sum( + i * + ((1 - cos(x[i])) + ((i == 1) ? sin(0) : sin(x[i - 1])) - ((i == n) ? sin(0) : sin(x[i + 1]))) + for i = 1:n + ) + ) + + return model +end