Skip to content

Commit

Permalink
fix formatting
Browse files Browse the repository at this point in the history
  • Loading branch information
mikerouleau committed Jan 1, 2021
1 parent 1eccf40 commit 9eacdea
Show file tree
Hide file tree
Showing 2 changed files with 33 additions and 35 deletions.
15 changes: 7 additions & 8 deletions test/derivatives.jl
Original file line number Diff line number Diff line change
Expand Up @@ -423,7 +423,7 @@ struct ΦEvaluator <: MOI.AbstractNLPEvaluator end
reverse_extract(grad, reverse_storage, nd, adj, [], 1.0)
@test grad[1] == 0.0

function test_linearity(ex, testval, IJ=[], indices=[])
function test_linearity(ex, testval, IJ = [], indices = [])
nd, const_values = expr_to_nodedata(ex)
adj = adjmat(nd)
linearity = classify_linearity(nd, adj, [])
Expand Down Expand Up @@ -551,7 +551,7 @@ struct ΦEvaluator <: MOI.AbstractNLPEvaluator end
@test grad true_grad

# dual forward test
function dualforward(ex, x; ignore_nan=false)
function dualforward(ex, x; ignore_nan = false)
nd, const_values = expr_to_nodedata(ex)
adj = adjmat(nd)
forward_storage = zeros(length(nd))
Expand Down Expand Up @@ -643,18 +643,17 @@ struct ΦEvaluator <: MOI.AbstractNLPEvaluator end
ForwardDiff.Dual(2.0, 0.0),
)
for k in 1:length(nd)
@test _epsilon(forward_dual_storage[k])
forward_storage_ϵ[k][1]
@test _epsilon(forward_dual_storage[k]) forward_storage_ϵ[k][1]

if !(isnan(_epsilon(partials_dual_storage[k])) && ignore_nan)
@test _epsilon(partials_dual_storage[k])
partials_storage_ϵ[k][1]
partials_storage_ϵ[k][1]
else
@test !isnan(forward_storage_ϵ[k][1])
end
if !(isnan(_epsilon(reverse_dual_storage[k])) && ignore_nan)
@test _epsilon(reverse_dual_storage[k])
reverse_storage_ϵ[k][1] / 2
@test _epsilon(reverse_dual_storage[k])
reverse_storage_ϵ[k][1] / 2
else
@test !isnan(reverse_storage_ϵ[k][1])
end
Expand All @@ -674,6 +673,6 @@ struct ΦEvaluator <: MOI.AbstractNLPEvaluator end
dualforward(
:(x[1] * x[2]),
[3.427139283036299e-206, 1.0],
ignore_nan=true,
ignore_nan = true,
)
end
53 changes: 26 additions & 27 deletions test/nlp.jl
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ end
model = Model()
@variable(model, x)
user_function = x -> x
JuMP.register(model, :f, 1, user_function, autodiff=true)
JuMP.register(model, :f, 1, user_function, autodiff = true)
@test expressions_equal(
JuMP.@_process_NL_expr(model, f(x)),
_NonlinearExprData(model, :(f($x))),
Expand All @@ -158,7 +158,7 @@ end
@variable(model, x)
@variable(model, y)
user_function = (x, y) -> x
JuMP.register(model, :f, 2, user_function, autodiff=true)
JuMP.register(model, :f, 2, user_function, autodiff = true)
@test expressions_equal(
JuMP.@_process_NL_expr(model, f(x, y)),
_NonlinearExprData(model, :(f($x, $y))),
Expand All @@ -169,7 +169,7 @@ end
model = Model()
@variable(model, x[1:2])
user_function = (x, y) -> x
JuMP.register(model, :f, 2, user_function, autodiff=true)
JuMP.register(model, :f, 2, user_function, autodiff = true)
@test expressions_equal(
JuMP.@_process_NL_expr(model, f(x...)),
_NonlinearExprData(model, :(f($(x[1]), $(x[2])))),
Expand Down Expand Up @@ -253,7 +253,7 @@ end
values = [1.0, 2.0, 3.0] # Values for a, b, and c, respectively.
MOI.eval_hessian_lagrangian(d, V, values, 1.0, Float64[])
@test dense_hessian(hessian_sparsity, V, 3)
[0.0 1.0 0.0; 1.0 0.0 0.0; 0.0 0.0 2.0]
[0.0 1.0 0.0; 1.0 0.0 0.0; 0.0 0.0 2.0]

# make sure we don't get NaNs in this case
@NLobjective(m, Min, a * b + 3 * c^2)
Expand All @@ -263,14 +263,14 @@ end
V = zeros(length(hessian_sparsity))
MOI.eval_hessian_lagrangian(d, V, values, 1.0, Float64[])
@test dense_hessian(hessian_sparsity, V, 3)
[0.0 1.0 0.0; 1.0 0.0 0.0; 0.0 0.0 6.0]
[0.0 1.0 0.0; 1.0 0.0 0.0; 0.0 0.0 6.0]

# Initialize again
MOI.initialize(d, [:Hess])
V = zeros(length(hessian_sparsity))
MOI.eval_hessian_lagrangian(d, V, values, 1.0, Float64[])
@test dense_hessian(hessian_sparsity, V, 3)
[0.0 1.0 0.0; 1.0 0.0 0.0; 0.0 0.0 6.0]
[0.0 1.0 0.0; 1.0 0.0 0.0; 0.0 0.0 6.0]
end

@testset "NaN corner case (Issue #695)" begin
Expand All @@ -288,7 +288,7 @@ end
v = [2.4, 3.5]
values = [1.0, 2.0] # For x and y.
MOI.eval_hessian_lagrangian_product(d, h, values, v, 1.0, Float64[])
correct = [0.0 -1 / (2 * 2^(3 / 2)); -1 / (2 * 2^(3 / 2)) 3 / (4 * 2^(5 / 2))] * v
correct = [0.0 -1/(2*2^(3/2)); -1/(2*2^(3/2)) 3/(4*2^(5/2))] * v
@test h correct
end

Expand Down Expand Up @@ -494,12 +494,12 @@ end

ψ(x) = 1
t(x, y) = 2
JuMP.register(m, , 1, ψ, autodiff=true)
JuMP.register(m, :t, 2, t, autodiff=true)
JuMP.register(m, , 1, ψ, autodiff = true)
JuMP.register(m, :t, 2, t, autodiff = true)

@NLobjective(m, Min, x^y)
@NLconstraint(m, sin(x) * cos(y) == 5)
@NLconstraint(m, nlconstr[i=1:2], i * x^2 == i)
@NLconstraint(m, nlconstr[i = 1:2], i * x^2 == i)
@NLconstraint(m, -0.5 <= sin(x) <= 0.5)
@NLconstraint(m, ψ(x) + t(x, y) <= 3)

Expand Down Expand Up @@ -558,7 +558,7 @@ end
@objective(model, Max, x)

@NLconstraints(model, begin
ref[i=1:3], y[i] == 0
ref[i = 1:3], y[i] == 0
x + y[1] * y[2] * y[3] <= 0.5
end)

Expand Down Expand Up @@ -592,15 +592,14 @@ end
values = ones(18)
MOI.eval_hessian_lagrangian(d, V, values, 1.0, Float64[])
@test dense_hessian(hessian_sparsity, V, 18)
ones(18, 18) - diagm(0 => ones(18))
ones(18, 18) - diagm(0 => ones(18))

values[1] = 0.5
MOI.eval_hessian_lagrangian(d, V, values, 1.0, Float64[])
@test dense_hessian(hessian_sparsity, V, 18)
[
0 ones(17)'
ones(17) (ones(17, 17) - diagm(0 => ones(17))) / 2
]
@test dense_hessian(hessian_sparsity, V, 18) [
0 ones(17)'
ones(17) (ones(17, 17)-diagm(0 => ones(17)))/2
]
end

@testset "eval_objective and eval_objective_gradient" begin
Expand All @@ -611,15 +610,15 @@ end

ψ(x) = sin(x)
t(x, y) = x + 3y
JuMP.register(m, , 1, ψ, autodiff=true)
JuMP.register(m, :t, 2, t, autodiff=true)
JuMP.register(m, , 1, ψ, autodiff = true)
JuMP.register(m, :t, 2, t, autodiff = true)

@NLobjective(m, Min, ex / 2 + sin(x[2]) / ψ(x[2]) + t(x[3], x[4]))
d = JuMP.NLPEvaluator(m)
MOI.initialize(d, [:Grad])
variable_values = fill(2.0, (4,))
@test MOI.eval_objective(d, variable_values)
variable_values[1] + 1 + variable_values[3] + 3variable_values[4]
variable_values[1] + 1 + variable_values[3] + 3variable_values[4]
grad = zeros(4)
MOI.eval_objective_gradient(d, grad, variable_values)
@test grad [1.0, 0.0, 1.0, 3.0]
Expand All @@ -633,8 +632,8 @@ end

ψ(x) = sin(x)
t(x, y) = x + 3y
JuMP.register(m, , 1, ψ, autodiff=true)
JuMP.register(m, :t, 2, t, autodiff=true)
JuMP.register(m, , 1, ψ, autodiff = true)
JuMP.register(m, :t, 2, t, autodiff = true)

@NLconstraint(
m,
Expand All @@ -647,7 +646,7 @@ end
constraint_value = zeros(1)
MOI.eval_constraint(d, constraint_value, variable_values)
@test constraint_value[1]
variable_values[1] + 1 + variable_values[3] + 3variable_values[4]
variable_values[1] + 1 + variable_values[3] + 3variable_values[4]
jacobian_sparsity = MOI.jacobian_structure(d)
I = [i for (i, j) in jacobian_sparsity]
J = [j for (i, j) in jacobian_sparsity]
Expand Down Expand Up @@ -740,7 +739,7 @@ end
model = Model()
@variable(model, x[1:2])
f(x1) = x1 + x[2]
JuMP.register(model, :f, 1, f; autodiff=true)
JuMP.register(model, :f, 1, f; autodiff = true)
@NLobjective(model, Min, f(x[1]))
d = JuMP.NLPEvaluator(model)
MOI.initialize(d, [:Grad])
Expand All @@ -757,7 +756,7 @@ end
model = Model()
@variable(model, x)
f(x) = string(x)
JuMP.register(model, :f, 1, f; autodiff=true)
JuMP.register(model, :f, 1, f; autodiff = true)
@NLobjective(model, Min, f(x))
d = JuMP.NLPEvaluator(model)
MOI.initialize(d, [:Grad])
Expand All @@ -783,7 +782,7 @@ end
@testset "Hessians disabled with user-defined multivariate functions" begin
model = Model()
my_f(x, y) = (x - 1)^2 + (y - 2)^2
JuMP.register(model, :my_f, 2, my_f, autodiff=true)
JuMP.register(model, :my_f, 2, my_f, autodiff = true)
@variable(model, x[1:2])
@NLobjective(model, Min, my_f(x[1], x[2]))
evaluator = JuMP.NLPEvaluator(model)
Expand Down Expand Up @@ -840,7 +839,7 @@ end
err = ErrorException(
"Encountered an error parsing nonlinear expression: we don't support " *
"models of type $(typeof(model)). In general, JuMP's nonlinear features " *
"don't work with JuMP-extensions."
"don't work with JuMP-extensions.",
)
@test_throws(err, @NLexpression(model, sqrt(x)))
end
Expand Down

0 comments on commit 9eacdea

Please sign in to comment.