Skip to content

Commit

Permalink
Change isapprox(a,b) to a \approx b in tests (#2419)
Browse files Browse the repository at this point in the history
* change isapprox(bar,foo) to bar \approx foo

* fix formatting

Co-authored-by: Rouleau, Michael <mike.rouleau@gatech.edu>
  • Loading branch information
mikerouleau and mikerouleau authored Jan 4, 2021
1 parent 2b0d910 commit b562ae8
Show file tree
Hide file tree
Showing 2 changed files with 43 additions and 66 deletions.
44 changes: 19 additions & 25 deletions test/derivatives.jl
Original file line number Diff line number Diff line change
Expand Up @@ -15,14 +15,14 @@ struct ΦEvaluator <: MOI.AbstractNLPEvaluator end
nd, const_values = expr_to_nodedata(ex)
adj = adjmat(nd)

#@show nd
# @show nd

storage = zeros(length(nd))
partials_storage = zeros(length(nd))
reverse_storage = zeros(length(nd))

x = [2.0, 3.0]
#@show x
# @show x
fval = forward_eval(
storage,
partials_storage,
Expand All @@ -37,14 +37,14 @@ struct ΦEvaluator <: MOI.AbstractNLPEvaluator end
NO_USER_OPS,
)
true_val = sin(x[1]^2) + cos(x[2] * 4) / 5 - 2.0
@test isapprox(fval, true_val)
@test fval true_val

grad = zeros(2)
reverse_eval(reverse_storage, partials_storage, nd, adj)
reverse_extract(grad, reverse_storage, nd, adj, [], 1.0)

true_grad = [2 * x[1] * cos(x[1]^2), -4 * sin(x[2] * 4) / 5]
@test isapprox(grad, true_grad)
@test grad true_grad

# Testing view
xx = [1.0, 2.0, 3.0, 4.0, 5.0]
Expand Down Expand Up @@ -128,7 +128,7 @@ struct ΦEvaluator <: MOI.AbstractNLPEvaluator end
[],
NO_USER_OPS,
)
@test isapprox(fval, true_val)
@test fval true_val

outer_reverse_storage = zeros(1)
fill!(grad, 0.0)
Expand All @@ -150,7 +150,7 @@ struct ΦEvaluator <: MOI.AbstractNLPEvaluator end
@assert subexpr_output[1] == 1.0
reverse_eval(reverse_storage, partials_storage, nd, adj)
reverse_extract(grad, reverse_storage, nd, adj, [], subexpr_output[1])
@test isapprox(grad, true_grad)
@test grad true_grad

ex = :((1 / x[1])^x[2] - x[3])

Expand All @@ -162,7 +162,7 @@ struct ΦEvaluator <: MOI.AbstractNLPEvaluator end
reverse_storage = zeros(length(nd))

x = [2.5, 3.5, 1.0]
#@show x
# @show x
fval = forward_eval(
storage,
partials_storage,
Expand All @@ -177,14 +177,14 @@ struct ΦEvaluator <: MOI.AbstractNLPEvaluator end
NO_USER_OPS,
)
true_val = (1 / x[1])^x[2] - x[3]
@test isapprox(fval, true_val)
@test fval true_val

grad = zeros(3)
reverse_eval(reverse_storage, partials_storage, nd, adj)
reverse_extract(grad, reverse_storage, nd, adj, [], 1.0)

true_grad = [-x[2] * x[1]^(-x[2] - 1), -((1 / x[1])^x[2]) * log(x[1]), -1]
@test isapprox(grad, true_grad)
@test grad true_grad

# logical expressions
ex = :(x[1] > 0.5 && x[1] < 0.9)
Expand Down Expand Up @@ -538,7 +538,7 @@ struct ΦEvaluator <: MOI.AbstractNLPEvaluator end
r,
)
true_val = Φ(x[2], x[1] - 1) * cos(x[3])
@test isapprox(fval, true_val)
@test fval true_val
grad = zeros(3)
reverse_eval(reverse_storage, partials_storage, nd, adj)
reverse_extract(grad, reverse_storage, nd, adj, [], 1.0)
Expand All @@ -548,7 +548,7 @@ struct ΦEvaluator <: MOI.AbstractNLPEvaluator end
-4cos(x[3]) * x[2],
-sin(x[3]) * Φ(x[2], x[1] - 1),
]
@test isapprox(grad, true_grad)
@test grad true_grad

# dual forward test
function dualforward(ex, x; ignore_nan = false)
Expand Down Expand Up @@ -605,7 +605,7 @@ struct ΦEvaluator <: MOI.AbstractNLPEvaluator end
2.0,
zero_ϵ,
)
@test isapprox(fval_ϵ[1], dot(grad, ones(length(x))))
@test fval_ϵ[1] dot(grad, ones(length(x)))

# compare with running dual numbers
_epsilon(x::ForwardDiff.Dual{Nothing,Float64,1}) = x.partials[1]
Expand Down Expand Up @@ -643,30 +643,24 @@ struct ΦEvaluator <: MOI.AbstractNLPEvaluator end
ForwardDiff.Dual(2.0, 0.0),
)
for k in 1:length(nd)
@test isapprox(
_epsilon(forward_dual_storage[k]),
forward_storage_ϵ[k][1],
)
@test _epsilon(forward_dual_storage[k]) forward_storage_ϵ[k][1]

if !(isnan(_epsilon(partials_dual_storage[k])) && ignore_nan)
@test isapprox(
_epsilon(partials_dual_storage[k]),
partials_storage_ϵ[k][1],
)
@test _epsilon(partials_dual_storage[k])
partials_storage_ϵ[k][1]
else
@test !isnan(forward_storage_ϵ[k][1])
end
if !(isnan(_epsilon(reverse_dual_storage[k])) && ignore_nan)
@test isapprox(
_epsilon(reverse_dual_storage[k]),
reverse_storage_ϵ[k][1] / 2,
)
@test _epsilon(reverse_dual_storage[k])
reverse_storage_ϵ[k][1] / 2
else
@test !isnan(reverse_storage_ϵ[k][1])
end
end
for k in 1:length(x)
if !(isnan(_epsilon(output_dual_storage[k])) && ignore_nan)
@test isapprox(_epsilon(output_dual_storage[k]), output_ϵ[k][1])
@test _epsilon(output_dual_storage[k]) output_ϵ[k][1]
else
@test !isnan(output_ϵ[k][1])
end
Expand Down
65 changes: 24 additions & 41 deletions test/nlp.jl
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
# TODO: Replace isapprox with ≈ everywhere.

using JuMP
using LinearAlgebra
using SparseArrays
Expand Down Expand Up @@ -254,10 +252,8 @@ end
V = zeros(length(hessian_sparsity))
values = [1.0, 2.0, 3.0] # Values for a, b, and c, respectively.
MOI.eval_hessian_lagrangian(d, V, values, 1.0, Float64[])
@test isapprox(
dense_hessian(hessian_sparsity, V, 3),
[0.0 1.0 0.0; 1.0 0.0 0.0; 0.0 0.0 2.0],
)
@test dense_hessian(hessian_sparsity, V, 3)
[0.0 1.0 0.0; 1.0 0.0 0.0; 0.0 0.0 2.0]

# make sure we don't get NaNs in this case
@NLobjective(m, Min, a * b + 3 * c^2)
Expand All @@ -266,19 +262,15 @@ end
values = [1.0, 2.0, -1.0]
V = zeros(length(hessian_sparsity))
MOI.eval_hessian_lagrangian(d, V, values, 1.0, Float64[])
@test isapprox(
dense_hessian(hessian_sparsity, V, 3),
[0.0 1.0 0.0; 1.0 0.0 0.0; 0.0 0.0 6.0],
)
@test dense_hessian(hessian_sparsity, V, 3)
[0.0 1.0 0.0; 1.0 0.0 0.0; 0.0 0.0 6.0]

# Initialize again
MOI.initialize(d, [:Hess])
V = zeros(length(hessian_sparsity))
MOI.eval_hessian_lagrangian(d, V, values, 1.0, Float64[])
@test isapprox(
dense_hessian(hessian_sparsity, V, 3),
[0.0 1.0 0.0; 1.0 0.0 0.0; 0.0 0.0 6.0],
)
@test dense_hessian(hessian_sparsity, V, 3)
[0.0 1.0 0.0; 1.0 0.0 0.0; 0.0 0.0 6.0]
end

@testset "NaN corner case (Issue #695)" begin
Expand All @@ -297,7 +289,7 @@ end
values = [1.0, 2.0] # For x and y.
MOI.eval_hessian_lagrangian_product(d, h, values, v, 1.0, Float64[])
correct = [0.0 -1/(2*2^(3/2)); -1/(2*2^(3/2)) 3/(4*2^(5/2))] * v
@test isapprox(h, correct)
@test h correct
end

@testset "NaN corner case (Issue #1205)" begin
Expand All @@ -312,7 +304,7 @@ end
V = zeros(length(hessian_sparsity))
values = zeros(1)
MOI.eval_hessian_lagrangian(d, V, values, 1.0, Float64[])
@test isapprox(dense_hessian(hessian_sparsity, V, 1), [0.0])
@test dense_hessian(hessian_sparsity, V, 1) [0.0]
end

@testset "NaN corner case - ifelse (Issue #1205)" begin
Expand All @@ -327,7 +319,7 @@ end
V = zeros(length(hessian_sparsity))
values = zeros(1)
MOI.eval_hessian_lagrangian(d, V, values, 1.0, Float64[])
@test isapprox(dense_hessian(hessian_sparsity, V, 1), [0.0])
@test dense_hessian(hessian_sparsity, V, 1) [0.0]
end

@testset "Product corner case (issue #1181)" begin
Expand Down Expand Up @@ -371,12 +363,12 @@ end
V = zeros(length(hessian_sparsity))
MOI.eval_hessian_lagrangian(d, V, values, 1.0, [2.0, 3.0])
correct_hessian = [3.0 1.0 0.0; 1.0 0.0 2.0; 0.0 2.0 2.0]
@test isapprox(dense_hessian(hessian_sparsity, V, 3), correct_hessian)
@test dense_hessian(hessian_sparsity, V, 3) correct_hessian

h = ones(3) # The input values should be overwritten.
v = [2.4, 3.5, 1.2]
MOI.eval_hessian_lagrangian_product(d, h, values, v, 1.0, [2.0, 3.0])
@test isapprox(h, correct_hessian * v)
@test h correct_hessian * v
end

@testset "Hessians and Hess-vec with subexpressions" begin
Expand Down Expand Up @@ -599,20 +591,15 @@ end
V = zeros(length(hessian_sparsity))
values = ones(18)
MOI.eval_hessian_lagrangian(d, V, values, 1.0, Float64[])
@test isapprox(
dense_hessian(hessian_sparsity, V, 18),
ones(18, 18) - diagm(0 => ones(18)),
)
@test dense_hessian(hessian_sparsity, V, 18)
ones(18, 18) - diagm(0 => ones(18))

values[1] = 0.5
MOI.eval_hessian_lagrangian(d, V, values, 1.0, Float64[])
@test isapprox(
dense_hessian(hessian_sparsity, V, 18),
[
0 ones(17)'
ones(17) (ones(17, 17)-diagm(0 => ones(17)))/2
],
)
@test dense_hessian(hessian_sparsity, V, 18) [
0 ones(17)'
ones(17) (ones(17, 17)-diagm(0 => ones(17)))/2
]
end

@testset "eval_objective and eval_objective_gradient" begin
Expand All @@ -630,13 +617,11 @@ end
d = JuMP.NLPEvaluator(m)
MOI.initialize(d, [:Grad])
variable_values = fill(2.0, (4,))
@test isapprox(
MOI.eval_objective(d, variable_values),
variable_values[1] + 1 + variable_values[3] + 3variable_values[4],
)
@test MOI.eval_objective(d, variable_values)
variable_values[1] + 1 + variable_values[3] + 3variable_values[4]
grad = zeros(4)
MOI.eval_objective_gradient(d, grad, variable_values)
@test isapprox(grad, [1.0, 0.0, 1.0, 3.0])
@test grad [1.0, 0.0, 1.0, 3.0]
end

@testset "eval_constraint and Jacobians" begin
Expand All @@ -660,10 +645,8 @@ end
variable_values = fill(2.0, (4,))
constraint_value = zeros(1)
MOI.eval_constraint(d, constraint_value, variable_values)
@test isapprox(
constraint_value[1],
variable_values[1] + 1 + variable_values[3] + 3variable_values[4],
)
@test constraint_value[1]
variable_values[1] + 1 + variable_values[3] + 3variable_values[4]
jacobian_sparsity = MOI.jacobian_structure(d)
I = [i for (i, j) in jacobian_sparsity]
J = [j for (i, j) in jacobian_sparsity]
Expand All @@ -672,7 +655,7 @@ end
MOI.eval_constraint_jacobian(d, jac_nonzeros, variable_values)
jac_values = zeros(4)
jac_values[J] = jac_nonzeros
@test isapprox(jac_values, [1.0, 0.0, 1.0, 3.0])
@test jac_values [1.0, 0.0, 1.0, 3.0]
end

@testset "set_NL_objective and add_NL_constraint" begin
Expand Down Expand Up @@ -856,7 +839,7 @@ end
err = ErrorException(
"Encountered an error parsing nonlinear expression: we don't support " *
"models of type $(typeof(model)). In general, JuMP's nonlinear features " *
"don't work with JuMP-extensions."
"don't work with JuMP-extensions.",
)
@test_throws(err, @NLexpression(model, sqrt(x)))
end
Expand Down

0 comments on commit b562ae8

Please sign in to comment.