From e3d5423afd4595df7428efeb59ae60fbdbacfb3e Mon Sep 17 00:00:00 2001 From: Alexis Montoison Date: Wed, 20 Nov 2019 16:22:21 -0500 Subject: [PATCH 1/2] Remove A.tprod syntax --- src/bilq.jl | 9 ++++++--- src/cg.jl | 2 +- src/cg_lanczos.jl | 36 ++++++++++++++++++------------------ src/cgls.jl | 9 ++++++--- src/cgne.jl | 9 ++++++--- src/cgs.jl | 8 ++++---- src/cr.jl | 2 +- src/craig.jl | 7 +++++-- src/craigmr.jl | 11 +++++++---- src/crls.jl | 13 ++++++++----- src/crmr.jl | 9 ++++++--- src/diom.jl | 2 +- src/dqgmres.jl | 2 +- src/lslq.jl | 9 ++++++--- src/lsmr.jl | 9 ++++++--- src/lsqr.jl | 9 ++++++--- src/minres.jl | 2 +- src/minres_qlp.jl | 2 +- src/qmr.jl | 9 ++++++--- src/symmlq.jl | 2 +- src/usymlq.jl | 11 +++++++---- src/usymqr.jl | 9 ++++++--- test/test_alloc.jl | 13 +++++++++++++ 23 files changed, 123 insertions(+), 71 deletions(-) diff --git a/src/bilq.jl b/src/bilq.jl index a7692ce76..6b340f59b 100644 --- a/src/bilq.jl +++ b/src/bilq.jl @@ -22,7 +22,7 @@ when it exists. The transfer is based on the residual norm. This version of BiLQ works in any floating-point data type. """ -function bilq(A :: AbstractLinearOperator, b :: AbstractVector{T}; c :: AbstractVector{T}=b, +function bilq(A :: AbstractLinearOperator{T}, b :: AbstractVector{T}; c :: AbstractVector{T}=b, atol :: T=√eps(T), rtol :: T=√eps(T), transfer_to_bicg :: Bool=true, itmax :: Int=0, verbose :: Bool=false) where T <: AbstractFloat @@ -31,6 +31,9 @@ function bilq(A :: AbstractLinearOperator, b :: AbstractVector{T}; c :: Abstract length(b) == m || error("Inconsistent problem size") verbose && @printf("BILQ: system of size %d\n", n) + # Compute the adjoint of A + Aᵀ = A' + # Initial solution x₀ and residual norm ‖r₀‖. x = zeros(T, n) bNorm = @knrm2(n, b) # ‖r₀‖ @@ -78,8 +81,8 @@ function bilq(A :: AbstractLinearOperator, b :: AbstractVector{T}; c :: Abstract # AVₖ = VₖTₖ + βₖ₊₁vₖ₊₁(eₖ)ᵀ = Vₖ₊₁Tₖ₊₁.ₖ # AᵀUₖ = Uₖ(Tₖ)ᵀ + γₖ₊₁uₖ₊₁(eₖ)ᵀ = Uₖ₊₁(Tₖ.ₖ₊₁)ᵀ - q = A * vₖ # Forms vₖ₊₁ : q ← Avₖ - p = A.tprod(uₖ) # Forms uₖ₊₁ : p ← Aᵀuₖ + q = A * vₖ # Forms vₖ₊₁ : q ← Avₖ + p = Aᵀ * uₖ # Forms uₖ₊₁ : p ← Aᵀuₖ @kaxpy!(n, -γₖ, vₖ₋₁, q) # q ← q - γₖ * vₖ₋₁ @kaxpy!(n, -βₖ, uₖ₋₁, p) # p ← p - βₖ * uₖ₋₁ diff --git a/src/cg.jl b/src/cg.jl index 313d4b667..60cfc27d9 100644 --- a/src/cg.jl +++ b/src/cg.jl @@ -18,7 +18,7 @@ The method does _not_ abort if A is not definite. A preconditioner M may be provided in the form of a linear operator and is assumed to be symmetric and positive definite. """ -function cg(A :: AbstractLinearOperator, b :: AbstractVector{T}; +function cg(A :: AbstractLinearOperator{T}, b :: AbstractVector{T}; M :: AbstractLinearOperator=opEye(), atol :: T=√eps(T), rtol :: T=√eps(T), itmax :: Int=0, radius :: T=zero(T), verbose :: Bool=false) where T <: AbstractFloat diff --git a/src/cg_lanczos.jl b/src/cg_lanczos.jl index cf3336595..52d1af992 100644 --- a/src/cg_lanczos.jl +++ b/src/cg_lanczos.jl @@ -22,7 +22,7 @@ The method does _not_ abort if A is not definite. A preconditioner M may be provided in the form of a linear operator and is assumed to be symmetric and positive definite. """ -function cg_lanczos(A :: AbstractLinearOperator, b :: AbstractVector{T}; +function cg_lanczos(A :: AbstractLinearOperator{T}, b :: AbstractVector{T}; M :: AbstractLinearOperator=opEye(), atol :: T=√eps(T), rtol :: T=√eps(T), itmax :: Int=0, check_curvature :: Bool=false, verbose :: Bool=false) where T <: AbstractFloat @@ -125,10 +125,10 @@ The method does _not_ abort if A + αI is not definite. A preconditioner M may be provided in the form of a linear operator and is assumed to be symmetric and positive definite. """ -function cg_lanczos_shift_seq(A :: AbstractLinearOperator, b :: AbstractVector{Tb}, - shifts :: AbstractVector{Ts}; M :: AbstractLinearOperator=opEye(), - atol :: Tb=√eps(Tb), rtol :: Tb=√eps(Tb), itmax :: Int=0, - check_curvature :: Bool=false, verbose :: Bool=false) where {Tb <: AbstractFloat, Ts <: Number} +function cg_lanczos_shift_seq(A :: AbstractLinearOperator{T}, b :: AbstractVector{T}, + shifts :: AbstractVector{S}; M :: AbstractLinearOperator=opEye(), + atol :: T=√eps(T), rtol :: T=√eps(T), itmax :: Int=0, + check_curvature :: Bool=false, verbose :: Bool=false) where {T <: AbstractFloat, S <: Number} n = size(b, 1); (size(A, 1) == n & size(A, 2) == n) || error("Inconsistent problem size"); @@ -141,30 +141,30 @@ function cg_lanczos_shift_seq(A :: AbstractLinearOperator, b :: AbstractVector{T # Initial state. ## Distribute x similarly to shifts. - x = [zeros(Tb, n) for i = 1 : nshifts] # x₀ + x = [zeros(T, n) for i = 1 : nshifts] # x₀ Mv = copy(b) # Mv₁ ← b v = M * Mv # v₁ = M⁻¹ * Mv₁ β = sqrt(@kdot(n, v, Mv)) # β₁ = v₁ᵀ M v₁ - β == 0 && return x, LanczosStats(true, [zero(Tb)], false, zero(Tb), zero(Tb), "x = 0 is a zero-residual solution") + β == 0 && return x, LanczosStats(true, [zero(T)], false, zero(T), zero(T), "x = 0 is a zero-residual solution") # Initialize each p to v. p = [copy(v) for i = 1 : nshifts] # Initialize Lanczos process. # β₁Mv₁ = b - @kscal!(n, one(Tb)/β, v) # v₁ ← v₁ / β₁ - MisI || @kscal!(n, one(Tb)/β, Mv) # Mv₁ ← Mv₁ / β₁ + @kscal!(n, one(T)/β, v) # v₁ ← v₁ / β₁ + MisI || @kscal!(n, one(T)/β, Mv) # Mv₁ ← Mv₁ / β₁ Mv_prev = copy(Mv) # Initialize some constants used in recursions below. - ρ = one(Tb) - σ = β * ones(Tb, nshifts) - δhat = zeros(Tb, nshifts) - ω = zeros(Tb, nshifts) - γ = ones(Tb, nshifts) + ρ = one(T) + σ = β * ones(T, nshifts) + δhat = zeros(T, nshifts) + ω = zeros(T, nshifts) + γ = ones(T, nshifts) # Define stopping tolerance. - rNorms = β * ones(Tb, nshifts); + rNorms = β * ones(T, nshifts); rNorms_history = [rNorms;]; ε = atol + rtol * β; @@ -202,8 +202,8 @@ function cg_lanczos_shift_seq(A :: AbstractLinearOperator, b :: AbstractVector{T @. Mv = Mv_next # Mvₖ ← Mvₖ₊₁ v = M * Mv # vₖ₊₁ = M⁻¹ * Mvₖ₊₁ β = sqrt(@kdot(n, v, Mv)) # βₖ₊₁ = vₖ₊₁ᵀ M vₖ₊₁ - @kscal!(n, one(Tb)/β, v) # vₖ₊₁ ← vₖ₊₁ / βₖ₊₁ - MisI || @kscal!(n, one(Tb)/β, Mv) # Mvₖ₊₁ ← Mvₖ₊₁ / βₖ₊₁ + @kscal!(n, one(T)/β, v) # vₖ₊₁ ← vₖ₊₁ / βₖ₊₁ + MisI || @kscal!(n, one(T)/β, Mv) # Mvₖ₊₁ ← Mvₖ₊₁ / βₖ₊₁ # Check curvature: vₖᵀ(A + sᵢI)vₖ = vₖᵀAvₖ + sᵢ‖vₖ‖² = δₖ + ρₖ * sᵢ with ρₖ = ‖vₖ‖². # It is possible to show that σₖ² (δₖ + ρₖ * sᵢ - ωₖ₋₁ / γₖ₋₁) = pₖᵀ (A + sᵢ I) pₖ. @@ -242,6 +242,6 @@ function cg_lanczos_shift_seq(A :: AbstractLinearOperator, b :: AbstractVector{T end status = tired ? "maximum number of iterations exceeded" : "solution good enough given atol and rtol" - stats = LanczosStats(solved, permutedims(reshape(rNorms_history, nshifts, round(Int, sum(size(rNorms_history))/nshifts))), indefinite, zero(Tb), zero(Tb), status); # TODO: Estimate Anorm and Acond. + stats = LanczosStats(solved, permutedims(reshape(rNorms_history, nshifts, round(Int, sum(size(rNorms_history))/nshifts))), indefinite, zero(T), zero(T), status); # TODO: Estimate Anorm and Acond. return (x, stats); end diff --git a/src/cgls.jl b/src/cgls.jl index a93243ab9..ebf154a6c 100644 --- a/src/cgls.jl +++ b/src/cgls.jl @@ -37,7 +37,7 @@ CGLS produces monotonic residuals ‖r‖₂ but not optimality residuals ‖A It is formally equivalent to LSQR, though can be slightly less accurate, but simpler to implement. """ -function cgls(A :: AbstractLinearOperator, b :: AbstractVector{T}; +function cgls(A :: AbstractLinearOperator{T}, b :: AbstractVector{T}; M :: AbstractLinearOperator=opEye(), λ :: T=zero(T), atol :: T=√eps(T), rtol :: T=√eps(T), radius :: T=zero(T), itmax :: Int=0, verbose :: Bool=false) where T <: AbstractFloat @@ -46,12 +46,15 @@ function cgls(A :: AbstractLinearOperator, b :: AbstractVector{T}; size(b, 1) == m || error("Inconsistent problem size"); verbose && @printf("CGLS: system of %d equations in %d variables\n", m, n); + # Compute Aᵀ + Aᵀ = A' + x = zeros(T, n); r = copy(b) bNorm = @knrm2(m, r) # Marginally faster than norm(b); bNorm == 0 && return x, SimpleStats(true, false, [zero(T)], [zero(T)], "x = 0 is a zero-residual solution"); Mr = M * r - s = A.tprod(Mr) + s = Aᵀ * Mr p = copy(s); γ = @kdot(n, s, s) # Faster than γ = dot(s, s); iter = 0; @@ -87,7 +90,7 @@ function cgls(A :: AbstractLinearOperator, b :: AbstractVector{T}; @kaxpy!(n, α, p, x) # Faster than x = x + α * p; @kaxpy!(m, -α, q, r) # Faster than r = r - α * q; Mr = M * r - s = A.tprod(Mr); + s = Aᵀ * Mr λ > 0 && @kaxpy!(n, -λ, x, s) # s = A' * r - λ * x; γ_next = @kdot(n, s, s) # Faster than γ_next = dot(s, s); β = γ_next / γ; diff --git a/src/cgne.jl b/src/cgne.jl index 4539b3199..67c4f8519 100644 --- a/src/cgne.jl +++ b/src/cgne.jl @@ -53,7 +53,7 @@ but simpler to implement. Only the x-part of the solution is returned. A preconditioner M may be provided in the form of a linear operator. """ -function cgne(A :: AbstractLinearOperator, b :: AbstractVector{T}; +function cgne(A :: AbstractLinearOperator{T}, b :: AbstractVector{T}; M :: AbstractLinearOperator=opEye(), λ :: T=zero(T), atol :: T=√eps(T), rtol :: T=√eps(T), itmax :: Int=0, verbose :: Bool=false) where T <: AbstractFloat @@ -62,6 +62,9 @@ function cgne(A :: AbstractLinearOperator, b :: AbstractVector{T}; size(b, 1) == m || error("Inconsistent problem size"); verbose && @printf("CGNE: system of %d equations in %d variables\n", m, n); + # Compute the adjoint of A + Aᵀ = A' + x = zeros(T, n); r = copy(b) z = M * r @@ -72,7 +75,7 @@ function cgne(A :: AbstractLinearOperator, b :: AbstractVector{T}; # The following vector copy takes care of the case where A is a LinearOperator # with preallocation, so as to avoid overwriting vectors used later. In other # case, this should only add minimum overhead. - p = copy(A.tprod(z)); + p = copy(Aᵀ * z) # Use ‖p‖ to detect inconsistent system. # An inconsistent system will necessarily have AA' singular. @@ -107,7 +110,7 @@ function cgne(A :: AbstractLinearOperator, b :: AbstractVector{T}; z = M * r γ_next = @kdot(m, r, z) # Faster than γ_next = dot(r, z); β = γ_next / γ; - Aᵀz = A.tprod(z) + Aᵀz = Aᵀ * z @kaxpby!(n, one(T), Aᵀz, β, p) # Faster than p = Aᵀz + β * p; pNorm = @knrm2(n, p) if λ > 0 diff --git a/src/cgs.jl b/src/cgs.jl index b0e6ae5f0..75b7ad1cf 100644 --- a/src/cgs.jl +++ b/src/cgs.jl @@ -32,7 +32,7 @@ TFQMR and BICGSTAB were developed to remedy this difficulty.» This implementation allows a right preconditioner M. """ -function cgs(A :: AbstractLinearOperator, b :: AbstractVector{T}; +function cgs(A :: AbstractLinearOperator{T}, b :: AbstractVector{T}; M :: AbstractLinearOperator=opEye(), atol :: T=√eps(T), rtol :: T=√eps(T), itmax :: Int=0, verbose :: Bool=false) where T <: AbstractFloat @@ -45,7 +45,7 @@ function cgs(A :: AbstractLinearOperator, b :: AbstractVector{T}; # Initial solution x₀ and residual r₀. x = zeros(T, n) # x₀ r = copy(b) # r₀ - # Compute ρ₀ = < r₀,r₀ > and residual norm ‖r₀‖₂. + # Compute ρ₀ = ⟨ r₀,r₀ ⟩ and residual norm ‖r₀‖₂. ρ = @kdot(n, r, r) rNorm = sqrt(ρ) rNorm == 0 && return x, SimpleStats(true, false, [rNorm], T[], "x = 0 is a zero-residual solution") @@ -71,7 +71,7 @@ function cgs(A :: AbstractLinearOperator, b :: AbstractVector{T}; y = M * p # yₘ = M⁻¹pₘ v = A * y # vₘ = Ayₘ - σ = @kdot(n, v, b) # σₘ = < AM⁻¹pₘ,r₀ > + σ = @kdot(n, v, b) # σₘ = ⟨ AM⁻¹pₘ,r₀ ⟩ α = ρ / σ # αₘ = ρₘ / σₘ @. q = u - α * v # qₘ = uₘ - αₘ * AM⁻¹pₘ @kaxpy!(n, one(T), q, u) # uₘ₊½ = uₘ + qₘ @@ -79,7 +79,7 @@ function cgs(A :: AbstractLinearOperator, b :: AbstractVector{T}; @kaxpy!(n, α, z, x) # xₘ₊₁ = xₘ + αₘ * M⁻¹(uₘ + qₘ) w = A * z # wₘ = AM⁻¹(uₘ + qₘ) @kaxpy!(n, -α, w, r) # rₘ₊₁ = rₘ - αₘ * AM⁻¹(uₘ + qₘ) - ρ_next = @kdot(n, r, b) # ρₘ₊₁ = < rₘ₊₁,r₀ > + ρ_next = @kdot(n, r, b) # ρₘ₊₁ = ⟨ rₘ₊₁,r₀ ⟩ β = ρ_next / ρ # βₘ = ρₘ₊₁ / ρₘ @. u = r + β * q # uₘ₊₁ = rₘ₊₁ + βₘ * qₘ @kaxpby!(n, one(T), q, β, p) # pₘ₊₁ = uₘ₊₁ + βₘ * (qₘ + βₘ * pₘ) diff --git a/src/cr.jl b/src/cr.jl index a06c1d614..39f07f2b4 100644 --- a/src/cr.jl +++ b/src/cr.jl @@ -14,7 +14,7 @@ A preconditioner M may be provided in the form of a linear operator and is assumed to be symmetric and positive definite. In a linesearch context, 'linesearch' must be set to 'true'. """ -function cr(A :: AbstractLinearOperator, b :: AbstractVector{T}; +function cr(A :: AbstractLinearOperator{T}, b :: AbstractVector{T}; M :: AbstractLinearOperator=opEye(), atol :: T=√eps(T), rtol :: T=√eps(T), γ :: T=√eps(T), itmax :: Int=0, radius :: T=zero(T), verbose :: Bool=false, linesearch :: Bool=false) where T <: AbstractFloat diff --git a/src/craig.jl b/src/craig.jl index 0af37e430..8bcd06e90 100644 --- a/src/craig.jl +++ b/src/craig.jl @@ -58,7 +58,7 @@ which is equivalent to applying CG to (M + AN⁻¹Aᵀ)y = b. In this implementation, both the x and y-parts of the solution are returned. """ -function craig(A :: AbstractLinearOperator, b :: AbstractVector{T}; +function craig(A :: AbstractLinearOperator{T}, b :: AbstractVector{T}; M :: AbstractLinearOperator=opEye(), N :: AbstractLinearOperator=opEye(), λ :: T=zero(T), @@ -70,6 +70,9 @@ function craig(A :: AbstractLinearOperator, b :: AbstractVector{T}; size(b, 1) == m || error("Inconsistent problem size") verbose && @printf("CRAIG: system of %d equations in %d variables\n", m, n) + # Compute the adjoint of A + Aᵀ = A' + # Tests M == Iₘ and N == Iₙ MisI = isa(M, opEye) NisI = isa(N, opEye) @@ -133,7 +136,7 @@ function craig(A :: AbstractLinearOperator, b :: AbstractVector{T}; while ! (solved || inconsistent || ill_cond || tired) # Generate the next Golub-Kahan vectors # 1. αₖ₊₁Nvₖ₊₁ = Aᵀuₖ₊₁ - βₖ₊₁Nvₖ - Aᵀu = A.tprod(u) + Aᵀu = Aᵀ * u @kaxpby!(n, one(T), Aᵀu, -β, Nv) v = N * Nv α = sqrt(@kdot(n, v, Nv)) diff --git a/src/craigmr.jl b/src/craigmr.jl index a84696c0c..1d8262320 100644 --- a/src/craigmr.jl +++ b/src/craigmr.jl @@ -60,7 +60,7 @@ It is formally equivalent to CRMR, though can be slightly more accurate, and intricate to implement. Both the x- and y-parts of the solution are returned. """ -function craigmr(A :: AbstractLinearOperator, b :: AbstractVector{T}; +function craigmr(A :: AbstractLinearOperator{T}, b :: AbstractVector{T}; M :: AbstractLinearOperator=opEye(), N :: AbstractLinearOperator=opEye(), λ :: T=zero(T), atol :: T=√eps(T), rtol :: T=√eps(T), @@ -70,6 +70,9 @@ function craigmr(A :: AbstractLinearOperator, b :: AbstractVector{T}; size(b, 1) == m || error("Inconsistent problem size"); verbose && @printf("CRAIG-MR: system of %d equations in %d variables\n", m, n); + # Compute the adjoint of A + Aᵀ = A' + # Tests M == Iₘ and N == Iₙ MisI = isa(M, opEye) NisI = isa(N, opEye) @@ -87,7 +90,7 @@ function craigmr(A :: AbstractLinearOperator, b :: AbstractVector{T}; @kscal!(m, one(T)/β, u) MisI || @kscal!(m, one(T)/β, Mu) # α₁Nv₁ = Aᵀu₁. - Aᵀu = A.tprod(u) + Aᵀu = Aᵀ * u Nv = copy(Aᵀu) v = N * Nv α = sqrt(@kdot(n, v, Nv)) @@ -166,7 +169,7 @@ function craigmr(A :: AbstractLinearOperator, b :: AbstractVector{T}; @kaxpy!(m, ζ, w, y) # y = y + ζ * w; # 2. αₖ₊₁Nvₖ₊₁ = Aᵀuₖ₊₁ - βₖ₊₁Nvₖ - Aᵀu = A.tprod(u) + Aᵀu = Aᵀ * u @kaxpby!(n, one(T), Aᵀu, -β, Nv) v = N * Nv α = sqrt(@kdot(n, v, Nv)) @@ -190,7 +193,7 @@ function craigmr(A :: AbstractLinearOperator, b :: AbstractVector{T}; tired = iter >= itmax end - Aᵀy = A.tprod(y) + Aᵀy = Aᵀ * y N⁻¹Aᵀy = N * Aᵀy @. x = N⁻¹Aᵀy diff --git a/src/crls.jl b/src/crls.jl index 7a2fb4f4f..c034b593d 100644 --- a/src/crls.jl +++ b/src/crls.jl @@ -36,7 +36,7 @@ CRLS produces monotonic residuals ‖r‖₂ and optimality residuals ‖Aᵀr It is formally equivalent to LSMR, though can be substantially less accurate, but simpler to implement. """ -function crls(A :: AbstractLinearOperator, b :: AbstractVector{T}; +function crls(A :: AbstractLinearOperator{T}, b :: AbstractVector{T}; M :: AbstractLinearOperator=opEye(), λ :: T=zero(T), atol :: T=√eps(T), rtol :: T=√eps(T), radius :: T=zero(T), itmax :: Int=0, verbose :: Bool=false) where T <: AbstractFloat @@ -45,19 +45,22 @@ function crls(A :: AbstractLinearOperator, b :: AbstractVector{T}; size(b, 1) == m || error("Inconsistent problem size"); verbose && @printf("CRLS: system of %d equations in %d variables\n", m, n); + # Compute the adjoint of A + Aᵀ = A' + x = zeros(T, n) r = copy(b) bNorm = @knrm2(m, r) # norm(b - A * x0) if x0 ≠ 0. bNorm == 0 && return x, SimpleStats(true, false, [zero(T)], [zero(T)], "x = 0 is a zero-residual solution"); Mr = M * r; - Ar = copy(A.tprod(Mr)) # - λ * x0 if x0 ≠ 0. + Ar = copy(Aᵀ * Mr) # - λ * x0 if x0 ≠ 0. s = A * Ar; Ms = M * s; p = copy(Ar); Ap = copy(s); - q = A.tprod(Ms) # Ap; + q = Aᵀ * Ms # Ap; λ > 0 && @kaxpy!(n, λ, p, q) # q = q + λ * p; γ = @kdot(m, s, Ms) # Faster than γ = dot(s, Ms); iter = 0; @@ -90,7 +93,7 @@ function crls(A :: AbstractLinearOperator, b :: AbstractVector{T}; psd = true # det(AᵀA) = 0 p = Ar # p = Aᵀr pNorm² = ArNorm * ArNorm - q = A.tprod(s) + q = Aᵀ * s α = min(ArNorm^2 / γ, maximum(to_boundary(x, p, radius, flip = false, dNorm2 = pNorm²))) # the quadratic is minimal in the direction Aᵀr for α = ‖Ar‖²/γ else pNorm² = pNorm * pNorm @@ -117,7 +120,7 @@ function crls(A :: AbstractLinearOperator, b :: AbstractVector{T}; @kaxpby!(n, one(T), Ar, β, p) # Faster than p = Ar + β * p; @kaxpby!(m, one(T), s, β, Ap) # Faster than Ap = s + β * Ap; MAp = M * Ap - q = A.tprod(MAp) + q = Aᵀ * MAp λ > 0 && @kaxpy!(n, λ, p, q) # q = q + λ * p; γ = γ_next; diff --git a/src/crmr.jl b/src/crmr.jl index 22edb82b2..1329bd9e1 100644 --- a/src/crmr.jl +++ b/src/crmr.jl @@ -52,7 +52,7 @@ but simpler to implement. Only the x-part of the solution is returned. A preconditioner M may be provided in the form of a linear operator. """ -function crmr(A :: AbstractLinearOperator, b :: AbstractVector{T}; +function crmr(A :: AbstractLinearOperator{T}, b :: AbstractVector{T}; M :: AbstractLinearOperator=opEye(), λ :: T=zero(T), atol :: T=√eps(T), rtol :: T=√eps(T), itmax :: Int=0, verbose :: Bool=false) where T <: AbstractFloat @@ -61,13 +61,16 @@ function crmr(A :: AbstractLinearOperator, b :: AbstractVector{T}; size(b, 1) == m || error("Inconsistent problem size"); verbose && @printf("CRMR: system of %d equations in %d variables\n", m, n); + # Compute the adjoint of A + Aᵀ = A' + x = zeros(T, n) # initial estimation x = 0 r = copy(M * b) # initial residual r = M * (b - Ax) = M * b bNorm = @knrm2(m, r) # norm(b - A * x0) if x0 ≠ 0. bNorm == 0 && return x, SimpleStats(true, false, [zero(T)], [zero(T)], "x = 0 is a zero-residual solution"); rNorm = bNorm; # + λ * ‖x0‖ if x0 ≠ 0 and λ > 0. λ > 0 && (s = copy(r)); - Aᵀr = A.tprod(r) # - λ * x0 if x0 ≠ 0. + Aᵀr = Aᵀ * r # - λ * x0 if x0 ≠ 0. p = copy(Aᵀr); γ = @kdot(n, Aᵀr, Aᵀr) # Faster than γ = dot(Aᵀr, Aᵀr); λ > 0 && (γ += λ * rNorm * rNorm); @@ -95,7 +98,7 @@ function crmr(A :: AbstractLinearOperator, b :: AbstractVector{T}; @kaxpy!(n, α, p, x) # Faster than x = x + α * p; @kaxpy!(m, -α, Mq, r) # Faster than r = r - α * Mq; rNorm = @knrm2(m, r) # norm(r); - Aᵀr = A.tprod(r) + Aᵀr = Aᵀ * r γ_next = @kdot(n, Aᵀr, Aᵀr) # Faster than γ_next = dot(Aᵀr, Aᵀr); λ > 0 && (γ_next += λ * rNorm * rNorm); β = γ_next / γ; diff --git a/src/diom.jl b/src/diom.jl index 88ff525a5..d381ea068 100644 --- a/src/diom.jl +++ b/src/diom.jl @@ -25,7 +25,7 @@ This implementation allows a left preconditioner M and a right preconditioner N. - Right preconditioning : AN⁻¹u = b with x = N⁻¹u - Split preconditioning : M⁻¹AN⁻¹u = M⁻¹b with x = N⁻¹u """ -function diom(A :: AbstractLinearOperator, b :: AbstractVector{T}; +function diom(A :: AbstractLinearOperator{T}, b :: AbstractVector{T}; M :: AbstractLinearOperator=opEye(), N :: AbstractLinearOperator=opEye(), atol :: T=√eps(T), rtol :: T=√eps(T), itmax :: Int=0, diff --git a/src/dqgmres.jl b/src/dqgmres.jl index bb6d10197..152566f11 100644 --- a/src/dqgmres.jl +++ b/src/dqgmres.jl @@ -23,7 +23,7 @@ This implementation allows a left preconditioner M and a right preconditioner N. - Right preconditioning : AN⁻¹u = b with x = N⁻¹u - Split preconditioning : M⁻¹AN⁻¹u = M⁻¹b with x = N⁻¹u """ -function dqgmres(A :: AbstractLinearOperator, b :: AbstractVector{T}; +function dqgmres(A :: AbstractLinearOperator{T}, b :: AbstractVector{T}; M :: AbstractLinearOperator=opEye(), N :: AbstractLinearOperator=opEye(), atol :: T=√eps(T), rtol :: T=√eps(T), itmax :: Int=0, diff --git a/src/lslq.jl b/src/lslq.jl index 125dc0e9a..9ec259cc8 100644 --- a/src/lslq.jl +++ b/src/lslq.jl @@ -92,7 +92,7 @@ The iterations stop as soon as one of the following conditions holds true: * R. Estrin, D. Orban and M. A. Saunders, *Estimates of the 2-Norm Forward Error for SYMMLQ and CG*, Cahier du GERAD G-2016-70, GERAD, Montreal, 2016. DOI http://dx.doi.org/10.13140/RG.2.2.19581.77288. * R. Estrin, D. Orban and M. A. Saunders, *LSLQ: An Iterative Method for Linear Least-Squares with an Error Minimization Property*, Cahier du GERAD G-2017-xx, GERAD, Montreal, 2017. """ -function lslq(A :: AbstractLinearOperator, b :: AbstractVector{T}; +function lslq(A :: AbstractLinearOperator{T}, b :: AbstractVector{T}; M :: AbstractLinearOperator=opEye(), N :: AbstractLinearOperator=opEye(), sqd :: Bool=false, λ :: T=zero(T), σ :: T=zero(T), @@ -108,6 +108,9 @@ function lslq(A :: AbstractLinearOperator, b :: AbstractVector{T}; MisI = isa(M, opEye) NisI = isa(N, opEye) + # Compute the adjoint of A + Aᵀ = A' + # If solving an SQD system, set regularization to 1. sqd && (λ = one(T)) λ² = λ * λ @@ -129,7 +132,7 @@ function lslq(A :: AbstractLinearOperator, b :: AbstractVector{T}; @kscal!(m, one(T)/β₁, u) MisI || @kscal!(m, one(T)/β₁, Mu) - Aᵀu = A.tprod(u) + Aᵀu = Aᵀ * u Nv = copy(Aᵀu) v = N * Nv α = sqrt(@kdot(n, v, Nv)) # = α₁ @@ -207,7 +210,7 @@ function lslq(A :: AbstractLinearOperator, b :: AbstractVector{T}; MisI || @kscal!(m, one(T)/β, Mu) # 2. αₖ₊₁Nvₖ₊₁ = Aᵀuₖ₊₁ - βₖ₊₁Nvₖ - Aᵀu = A.tprod(u) + Aᵀu = Aᵀ * u @kaxpby!(n, one(T), Aᵀu, -β, Nv) v = N * Nv α = sqrt(@kdot(n, v, Nv)) diff --git a/src/lsmr.jl b/src/lsmr.jl index 833664668..66363fcfe 100644 --- a/src/lsmr.jl +++ b/src/lsmr.jl @@ -57,7 +57,7 @@ indefinite system In this case, `N` can still be specified and indicates the norm in which `x` should be measured. """ -function lsmr(A :: AbstractLinearOperator, b :: AbstractVector{T}; +function lsmr(A :: AbstractLinearOperator{T}, b :: AbstractVector{T}; M :: AbstractLinearOperator=opEye(), N :: AbstractLinearOperator=opEye(), sqd :: Bool=false, @@ -71,6 +71,9 @@ function lsmr(A :: AbstractLinearOperator, b :: AbstractVector{T}; size(b, 1) == m || error("Inconsistent problem size") verbose && @printf("LSMR: system of %d equations in %d variables\n", m, n) + # Compute the adjoint of A + Aᵀ = A' + # Tests M == Iₙ and N == Iₘ MisI = isa(M, opEye) NisI = isa(N, opEye) @@ -90,7 +93,7 @@ function lsmr(A :: AbstractLinearOperator, b :: AbstractVector{T}; @kscal!(m, one(T)/β₁, u) MisI || @kscal!(m, one(T)/β₁, Mu) - Aᵀu = A.tprod(u) + Aᵀu = Aᵀ * u Nv = copy(Aᵀu) v = N * Nv α = sqrt(@kdot(n, v, Nv)) @@ -165,7 +168,7 @@ function lsmr(A :: AbstractLinearOperator, b :: AbstractVector{T}; MisI || @kscal!(m, one(T)/β, Mu) # 2. αₖ₊₁Nvₖ₊₁ = Aᵀuₖ₊₁ - βₖ₊₁Nvₖ - Aᵀu = A.tprod(u) + Aᵀu = Aᵀ * u @kaxpby!(n, one(T), Aᵀu, -β, Nv) v = N * Nv α = sqrt(@kdot(n, v, Nv)) diff --git a/src/lsqr.jl b/src/lsqr.jl index 1c1bc2b28..4472030c0 100644 --- a/src/lsqr.jl +++ b/src/lsqr.jl @@ -57,7 +57,7 @@ indefinite system In this case, `N` can still be specified and indicates the norm in which `x` should be measured. """ -function lsqr(A :: AbstractLinearOperator, b :: AbstractVector{T}; +function lsqr(A :: AbstractLinearOperator{T}, b :: AbstractVector{T}; M :: AbstractLinearOperator=opEye(), N :: AbstractLinearOperator=opEye(), sqd :: Bool=false, @@ -71,6 +71,9 @@ function lsqr(A :: AbstractLinearOperator, b :: AbstractVector{T}; size(b, 1) == m || error("Inconsistent problem size") verbose && @printf("LSQR: system of %d equations in %d variables\n", m, n) + # Compute the adjoint of A + Aᵀ = A' + # Tests M == Iₙ and N == Iₘ MisI = isa(M, opEye) NisI = isa(N, opEye) @@ -91,7 +94,7 @@ function lsqr(A :: AbstractLinearOperator, b :: AbstractVector{T}; @kscal!(m, one(T)/β₁, u) MisI || @kscal!(m, one(T)/β₁, Mu) - Aᵀu = A.tprod(u) + Aᵀu = Aᵀ * u Nv = copy(Aᵀu) v = N * Nv Anorm² = @kdot(n, v, Nv) @@ -163,7 +166,7 @@ function lsqr(A :: AbstractLinearOperator, b :: AbstractVector{T}; λ > 0 && (Anorm² += λ²) # 2. αₖ₊₁Nvₖ₊₁ = Aᵀuₖ₊₁ - βₖ₊₁Nvₖ - Aᵀu = A.tprod(u) + Aᵀu = Aᵀ * u @kaxpby!(n, one(T), Aᵀu, -β, Nv) v = N * Nv α = sqrt(@kdot(n, v, Nv)) diff --git a/src/minres.jl b/src/minres.jl index bbe8f8069..31d7e8481 100644 --- a/src/minres.jl +++ b/src/minres.jl @@ -42,7 +42,7 @@ MINRES produces monotonic residuals ‖r‖₂ and optimality residuals ‖Aᵀr A preconditioner M may be provided in the form of a linear operator and is assumed to be symmetric and positive definite. """ -function minres(A :: AbstractLinearOperator, b :: AbstractVector{T}; +function minres(A :: AbstractLinearOperator{T}, b :: AbstractVector{T}; M :: AbstractLinearOperator=opEye(), λ :: T=zero(T), atol :: T=√eps(T)/100, rtol :: T=√eps(T)/100, etol :: T=√eps(T), window :: Int=5, itmax :: Int=0, conlim :: T=1/√eps(T), diff --git a/src/minres_qlp.jl b/src/minres_qlp.jl index a5a1d2e42..cffef3c65 100644 --- a/src/minres_qlp.jl +++ b/src/minres_qlp.jl @@ -23,7 +23,7 @@ It is significantly more complex but can be more reliable than MINRES when A is This version of MINRES-QLP works in any floating-point data type. """ -function minres_qlp(A :: AbstractLinearOperator, b :: AbstractVector{T}; +function minres_qlp(A :: AbstractLinearOperator{T}, b :: AbstractVector{T}; atol :: T=√eps(T), rtol :: T=√eps(T), λ ::T=zero(T), itmax :: Int=0, verbose :: Bool=false) where T <: AbstractFloat diff --git a/src/qmr.jl b/src/qmr.jl index 227038cd4..18fab12c6 100644 --- a/src/qmr.jl +++ b/src/qmr.jl @@ -26,7 +26,7 @@ When A is symmetric and b = c, QMR is equivalent to MINRES. This version of QMR works in any floating-point data type. """ -function qmr(A :: AbstractLinearOperator, b :: AbstractVector{T}; c :: AbstractVector{T}=b, +function qmr(A :: AbstractLinearOperator{T}, b :: AbstractVector{T}; c :: AbstractVector{T}=b, atol :: T=√eps(T), rtol :: T=√eps(T), itmax :: Int=0, verbose :: Bool=false) where T <: AbstractFloat @@ -35,6 +35,9 @@ function qmr(A :: AbstractLinearOperator, b :: AbstractVector{T}; c :: AbstractV length(b) == m || error("Inconsistent problem size") verbose && @printf("QMR: system of size %d\n", n) + # Compute the adjoint of A + Aᵀ = A' + # Initial solution x₀ and residual norm ‖r₀‖. x = zeros(T, n) rNorm = @knrm2(n, b) # rNorm = ‖r₀‖ @@ -80,8 +83,8 @@ function qmr(A :: AbstractLinearOperator, b :: AbstractVector{T}; c :: AbstractV # AVₖ = VₖTₖ + βₖ₊₁vₖ₊₁(eₖ)ᵀ = Vₖ₊₁Tₖ₊₁.ₖ # AᵀUₖ = Uₖ(Tₖ)ᵀ + γₖ₊₁uₖ₊₁(eₖ)ᵀ = Uₖ₊₁(Tₖ.ₖ₊₁)ᵀ - q = A * vₖ # Forms vₖ₊₁ : q ← Avₖ - p = A.tprod(uₖ) # Forms uₖ₊₁ : p ← Aᵀuₖ + q = A * vₖ # Forms vₖ₊₁ : q ← Avₖ + p = Aᵀ * uₖ # Forms uₖ₊₁ : p ← Aᵀuₖ @kaxpy!(n, -γₖ, vₖ₋₁, q) # q ← q - γₖ * vₖ₋₁ @kaxpy!(n, -βₖ, uₖ₋₁, p) # p ← p - βₖ * uₖ₋₁ diff --git a/src/symmlq.jl b/src/symmlq.jl index b7925597d..9c4a43555 100644 --- a/src/symmlq.jl +++ b/src/symmlq.jl @@ -24,7 +24,7 @@ SYMMLQ produces monotonic errors ‖x*-x‖₂. A preconditioner M may be provided in the form of a linear operator and is assumed to be symmetric and positive definite. """ -function symmlq(A :: AbstractLinearOperator, b :: AbstractVector{T}; +function symmlq(A :: AbstractLinearOperator{T}, b :: AbstractVector{T}; M :: AbstractLinearOperator=opEye(), λ :: T=zero(T), λest :: T=zero(T), atol :: T=√eps(T), rtol :: T=√eps(T), etol :: T=√eps(T), window :: Int=0, itmax :: Int=0, diff --git a/src/usymlq.jl b/src/usymlq.jl index 89ec53689..3d3133746 100644 --- a/src/usymlq.jl +++ b/src/usymlq.jl @@ -33,7 +33,7 @@ when it exists. The transfer is based on the residual norm. This version of USYMLQ works in any floating-point data type. """ -function usymlq(A :: AbstractLinearOperator, b :: AbstractVector{T}, c :: AbstractVector{T}; +function usymlq(A :: AbstractLinearOperator{T}, b :: AbstractVector{T}, c :: AbstractVector{T}; atol :: T=√eps(T), rtol :: T=√eps(T), transfer_to_usymcg :: Bool=true, itmax :: Int=0, verbose :: Bool=false) where T <: AbstractFloat @@ -42,6 +42,9 @@ function usymlq(A :: AbstractLinearOperator, b :: AbstractVector{T}, c :: Abstra length(c) == n || error("Inconsistent problem size") verbose && @printf("USYMLQ: system of %d equations in %d variables\n", m, n) + # Compute the adjoint of A + Aᵀ = A' + # Initial solution x₀ and residual norm ‖r₀‖. x = zeros(T, n) bNorm = @knrm2(m, b) # ‖r₀‖ @@ -62,7 +65,7 @@ function usymlq(A :: AbstractLinearOperator, b :: AbstractVector{T}, c :: Abstra uₖ₋₁ = zeros(T, n) # u₀ = 0 vₖ = b / βₖ # v₁ = b / β₁ uₖ = c / γₖ # u₁ = c / γ₁ - cₖ₋₁ = cₖ = - one(T) # Givens cosines used for the LQ factorization of Tₖ + cₖ₋₁ = cₖ = -one(T) # Givens cosines used for the LQ factorization of Tₖ sₖ₋₁ = sₖ = zero(T) # Givens sines used for the LQ factorization of Tₖ d̅ = zeros(T, n) # Last column of D̅ₖ = Uₖ(Qₖ)ᵀ ζₖ₋₁ = ζbarₖ = zero(T) # ζₖ₋₁ and ζbarₖ are the last components of z̅ₖ = (L̅ₖ)⁻¹β₁e₁ @@ -83,8 +86,8 @@ function usymlq(A :: AbstractLinearOperator, b :: AbstractVector{T}, c :: Abstra # AUₖ = VₖTₖ + βₖ₊₁vₖ₊₁(eₖ)ᵀ = Vₖ₊₁Tₖ₊₁.ₖ # AᵀVₖ = Uₖ(Tₖ)ᵀ + γₖ₊₁uₖ₊₁(eₖ)ᵀ = Uₖ₊₁(Tₖ.ₖ₊₁)ᵀ - q = A * uₖ # Forms vₖ₊₁ : q ← Auₖ - p = A.tprod(vₖ) # Forms uₖ₊₁ : p ← Aᵀvₖ + q = A * uₖ # Forms vₖ₊₁ : q ← Auₖ + p = Aᵀ * vₖ # Forms uₖ₊₁ : p ← Aᵀvₖ @kaxpy!(m, -γₖ, vₖ₋₁, q) # q ← q - γₖ * vₖ₋₁ @kaxpy!(n, -βₖ, uₖ₋₁, p) # p ← p - βₖ * uₖ₋₁ diff --git a/src/usymqr.jl b/src/usymqr.jl index ff1802121..f18627e24 100644 --- a/src/usymqr.jl +++ b/src/usymqr.jl @@ -30,7 +30,7 @@ USYMQR finds the minimum-norm solution if problems are inconsistent. This version of USYMQR works in any floating-point data type. """ -function usymqr(A :: AbstractLinearOperator, b :: AbstractVector{T}, c :: AbstractVector{T}; +function usymqr(A :: AbstractLinearOperator{T}, b :: AbstractVector{T}, c :: AbstractVector{T}; atol :: T=√eps(T), rtol :: T=√eps(T), itmax :: Int=0, verbose :: Bool=false) where T <: AbstractFloat @@ -39,6 +39,9 @@ function usymqr(A :: AbstractLinearOperator, b :: AbstractVector{T}, c :: Abstra length(c) == n || error("Inconsistent problem size") verbose && @printf("USYMQR: system of %d equations in %d variables\n", m, n) + # Compute the adjoint of A + Aᵀ = A' + # Initial solution x₀ and residual norm ‖r₀‖. x = zeros(T, n) rNorm = @knrm2(m, b) @@ -81,8 +84,8 @@ function usymqr(A :: AbstractLinearOperator, b :: AbstractVector{T}, c :: Abstra # AUₖ = VₖTₖ + βₖ₊₁vₖ₊₁(eₖ)ᵀ = Vₖ₊₁Tₖ₊₁.ₖ # AᵀVₖ = Uₖ(Tₖ)ᵀ + γₖ₊₁uₖ₊₁(eₖ)ᵀ = Uₖ₊₁(Tₖ.ₖ₊₁)ᵀ - q = A * uₖ # Forms vₖ₊₁ : q ← Auₖ - p = A.tprod(vₖ) # Forms uₖ₊₁ : p ← Aᵀvₖ + q = A * uₖ # Forms vₖ₊₁ : q ← Auₖ + p = Aᵀ * vₖ # Forms uₖ₊₁ : p ← Aᵀvₖ @kaxpy!(m, -γₖ, vₖ₋₁, q) # q ← q - γₖ * vₖ₋₁ @kaxpy!(n, -βₖ, uₖ₋₁, p) # p ← p - βₖ * uₖ₋₁ diff --git a/test/test_alloc.jl b/test/test_alloc.jl index 604883bba..a998059a3 100644 --- a/test/test_alloc.jl +++ b/test/test_alloc.jl @@ -101,6 +101,7 @@ actual_cr_bytes = @allocated cr(A, b, rtol=1e-6) # - 1 m-vector: r storage_crmr(n, m) = 2 * n + m storage_crmr_bytes(n, m) = 8 * storage_crmr(n, m) + expected_crmr_bytes = storage_crmr_bytes(n, m) (x, stats) = crmr(Au, c) # warmup actual_crmr_bytes = @allocated crmr(Au, c) @@ -109,6 +110,7 @@ actual_crmr_bytes = @allocated crmr(Au, c) # without preconditioner and with Ap preallocated, CGS needs 5 n-vectors: x, r, u, p, q storage_cgs(n) = 5 * n storage_cgs_bytes(n) = 8 * storage_cgs(n) + expected_cgs_bytes = storage_cgs_bytes(n) cgs(A, b) # warmup actual_cgs_bytes = @allocated cgs(A, b) @@ -130,6 +132,7 @@ actual_craigmr_bytes = @allocated craigmr(Au, c) # - 1 m-vector: r storage_cgne(n, m) = 2 * n + m storage_cgne_bytes(n, m) = 8 * storage_cgne(n, m) + expected_cgne_bytes = storage_cgne_bytes(n, m) (x, stats) = cgne(Au, c) # warmup actual_cgne_bytes = @allocated cgne(Au, c) @@ -151,6 +154,7 @@ actual_craig_bytes = @allocated craig(Au, c) # - 1 n-vector: u storage_lslq(n, m) = 3 * m + n storage_lslq_bytes(n, m) = 8 * storage_lslq(n, m) + expected_lslq_bytes = storage_lslq_bytes(n, m) (x, stats) = lslq(Ao, b) # warmup actual_lslq_bytes = @allocated lslq(Ao, b) @@ -161,6 +165,7 @@ actual_lslq_bytes = @allocated lslq(Ao, b) # - 1 n-vector: r storage_cgls(n, m) = n + 2 * m storage_cgls_bytes(n, m) = 8 * storage_cgls(n, m) + expected_cgls_bytes = storage_cgls_bytes(n, m) (x, stats) = cgls(Ao, b) # warmup actual_cgls_bytes = @allocated cgls(Ao, b) @@ -171,6 +176,7 @@ actual_cgls_bytes = @allocated cgls(Ao, b) # - 1 n-vector: u storage_lsqr(n, m) = 3 * m + n storage_lsqr_bytes(n, m) = 8 * storage_lsqr(n, m) + expected_lsqr_bytes = storage_lsqr_bytes(n, m) (x, stats) = lsqr(Ao, b) # warmup actual_lsqr_bytes = @allocated lsqr(Ao, b) @@ -181,6 +187,7 @@ actual_lsqr_bytes = @allocated lsqr(Ao, b) # - 2 n-vector: r, Ap storage_crls(n, m) = 3 * m + 2 * n storage_crls_bytes(n, m) = 8 * storage_crls(n, m) + expected_crls_bytes = storage_crls_bytes(n, m) (x, stats) = crls(Ao, b) # warmup actual_crls_bytes = @allocated crls(Ao, b) @@ -191,6 +198,7 @@ actual_crls_bytes = @allocated crls(Ao, b) # - 1 n-vector: u storage_lsmr(n, m) = 4 * m + n storage_lsmr_bytes(n, m) = 8 * storage_lsmr(n, m) + expected_lsmr_bytes = storage_lsmr_bytes(n, m) (x, stats) = lsmr(Ao, b) # warmup actual_lsmr_bytes = @allocated lsmr(Ao, b) @@ -201,6 +209,7 @@ actual_lsmr_bytes = @allocated lsmr(Ao, b) # - 2 n-vectors: uₖ₋₁, uₖ storage_usymqr(n, m) = 5 * m + 2 * n storage_usymqr_bytes(n, m) = 8 * storage_usymqr(n, m) + expected_usymqr_bytes = storage_usymqr_bytes(n, m) (x, stats) = usymqr(Ao, b, c) # warmup actual_usymqr_bytes = @allocated usymqr(Ao, b, c) @@ -210,6 +219,7 @@ actual_usymqr_bytes = @allocated usymqr(Ao, b, c) # - 6 n-vectors: uₖ₋₁, uₖ, vₖ₋₁, vₖ, x, d̅ storage_bilq(n) = 6 * n storage_bilq_bytes(n) = 8 * storage_bilq(n) + expected_bilq_bytes = storage_bilq_bytes(n) bilq(A, b) # warmup actual_bilq_bytes = @allocated bilq(A, b) @@ -219,6 +229,7 @@ actual_bilq_bytes = @allocated bilq(A, b) # - 5 n-vectors: wₖ₋₁, wₖ, vₖ₋₁, vₖ, x storage_minres_qlp(n) = 5 * n storage_minres_qlp_bytes(n) = 8 * storage_minres_qlp(n) + expected_minres_qlp_bytes = storage_minres_qlp_bytes(n) minres_qlp(A, b) # warmup actual_minres_qlp_bytes = @allocated minres_qlp(A, b) @@ -228,6 +239,7 @@ actual_minres_qlp_bytes = @allocated minres_qlp(A, b) # - 7 n-vectors: uₖ₋₁, uₖ, vₖ₋₁, vₖ, x, wₖ₋₁, wₖ storage_qmr(n) = 7 * n storage_qmr_bytes(n) = 8 * storage_qmr(n) + expected_qmr_bytes = storage_qmr_bytes(n) qmr(A, b) # warmup actual_qmr_bytes = @allocated qmr(A, b) @@ -238,6 +250,7 @@ actual_qmr_bytes = @allocated qmr(A, b) # - 2 m-vectors: uₖ₋₁, uₖ storage_usymlq(n, m) = 4 * n + 2 * m storage_usymlq_bytes(n, m) = 8 * storage_usymlq(n, m) + expected_usymlq_bytes = storage_usymlq_bytes(n, m) usymlq(Au, c, b) # warmup actual_usymlq_bytes = @allocated usymlq(Au, c, b) From ace45b884788075f45ac840804228fc836740c0f Mon Sep 17 00:00:00 2001 From: Alexis Montoison Date: Wed, 20 Nov 2019 16:24:38 -0500 Subject: [PATCH 2/2] =?UTF-8?q?limit=20LinearOperators=20=E2=89=A5=200.7.1?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Project.toml | 2 +- docs/Project.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Project.toml b/Project.toml index 2cea8ecba..c1045f157 100644 --- a/Project.toml +++ b/Project.toml @@ -11,5 +11,5 @@ SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [compat] -LinearOperators = "0.5.2, 0.6" +LinearOperators = "0.7.1, 1" julia = "^1.0.0" diff --git a/docs/Project.toml b/docs/Project.toml index 2492b03c2..8209e69c7 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -4,4 +4,4 @@ Krylov = "ba0b0d4f-ebba-5204-a429-3ac8c609bfb7" LinearOperators = "5c8ed15e-5a4c-59e4-a42b-c7e8811fb125" [compat] -LinearOperators = "0.5.2, 0.6" +LinearOperators = "0.7.1, 1"