Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove A.tprod #151

Merged
merged 2 commits into from
Nov 21, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -11,5 +11,5 @@ SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"

[compat]
LinearOperators = "0.5.2, 0.6"
LinearOperators = "0.7.1, 1"
julia = "^1.0.0"
2 changes: 1 addition & 1 deletion docs/Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,4 @@ Krylov = "ba0b0d4f-ebba-5204-a429-3ac8c609bfb7"
LinearOperators = "5c8ed15e-5a4c-59e4-a42b-c7e8811fb125"

[compat]
LinearOperators = "0.5.2, 0.6"
LinearOperators = "0.7.1, 1"
9 changes: 6 additions & 3 deletions src/bilq.jl
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ when it exists. The transfer is based on the residual norm.

This version of BiLQ works in any floating-point data type.
"""
function bilq(A :: AbstractLinearOperator, b :: AbstractVector{T}; c :: AbstractVector{T}=b,
function bilq(A :: AbstractLinearOperator{T}, b :: AbstractVector{T}; c :: AbstractVector{T}=b,
atol :: T=√eps(T), rtol :: T=√eps(T), transfer_to_bicg :: Bool=true,
itmax :: Int=0, verbose :: Bool=false) where T <: AbstractFloat

Expand All @@ -31,6 +31,9 @@ function bilq(A :: AbstractLinearOperator, b :: AbstractVector{T}; c :: Abstract
length(b) == m || error("Inconsistent problem size")
verbose && @printf("BILQ: system of size %d\n", n)

# Compute the adjoint of A
Aᵀ = A'

# Initial solution x₀ and residual norm ‖r₀‖.
x = zeros(T, n)
bNorm = @knrm2(n, b) # ‖r₀‖
Expand Down Expand Up @@ -78,8 +81,8 @@ function bilq(A :: AbstractLinearOperator, b :: AbstractVector{T}; c :: Abstract
# AVₖ = VₖTₖ + βₖ₊₁vₖ₊₁(eₖ)ᵀ = Vₖ₊₁Tₖ₊₁.ₖ
# AᵀUₖ = Uₖ(Tₖ)ᵀ + γₖ₊₁uₖ₊₁(eₖ)ᵀ = Uₖ₊₁(Tₖ.ₖ₊₁)ᵀ

q = A * vₖ # Forms vₖ₊₁ : q ← Avₖ
p = A.tprod(uₖ) # Forms uₖ₊₁ : p ← Aᵀuₖ
q = A * vₖ # Forms vₖ₊₁ : q ← Avₖ
p = Aᵀ * uₖ # Forms uₖ₊₁ : p ← Aᵀuₖ

@kaxpy!(n, -γₖ, vₖ₋₁, q) # q ← q - γₖ * vₖ₋₁
@kaxpy!(n, -βₖ, uₖ₋₁, p) # p ← p - βₖ * uₖ₋₁
Expand Down
2 changes: 1 addition & 1 deletion src/cg.jl
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ The method does _not_ abort if A is not definite.
A preconditioner M may be provided in the form of a linear operator and is
assumed to be symmetric and positive definite.
"""
function cg(A :: AbstractLinearOperator, b :: AbstractVector{T};
function cg(A :: AbstractLinearOperator{T}, b :: AbstractVector{T};
M :: AbstractLinearOperator=opEye(), atol :: T=√eps(T),
rtol :: T=√eps(T), itmax :: Int=0, radius :: T=zero(T),
verbose :: Bool=false) where T <: AbstractFloat
Expand Down
36 changes: 18 additions & 18 deletions src/cg_lanczos.jl
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ The method does _not_ abort if A is not definite.
A preconditioner M may be provided in the form of a linear operator and is
assumed to be symmetric and positive definite.
"""
function cg_lanczos(A :: AbstractLinearOperator, b :: AbstractVector{T};
function cg_lanczos(A :: AbstractLinearOperator{T}, b :: AbstractVector{T};
M :: AbstractLinearOperator=opEye(),
atol :: T=√eps(T), rtol :: T=√eps(T), itmax :: Int=0,
check_curvature :: Bool=false, verbose :: Bool=false) where T <: AbstractFloat
Expand Down Expand Up @@ -125,10 +125,10 @@ The method does _not_ abort if A + αI is not definite.
A preconditioner M may be provided in the form of a linear operator and is
assumed to be symmetric and positive definite.
"""
function cg_lanczos_shift_seq(A :: AbstractLinearOperator, b :: AbstractVector{Tb},
shifts :: AbstractVector{Ts}; M :: AbstractLinearOperator=opEye(),
atol :: Tb=√eps(Tb), rtol :: Tb=√eps(Tb), itmax :: Int=0,
check_curvature :: Bool=false, verbose :: Bool=false) where {Tb <: AbstractFloat, Ts <: Number}
function cg_lanczos_shift_seq(A :: AbstractLinearOperator{T}, b :: AbstractVector{T},
shifts :: AbstractVector{S}; M :: AbstractLinearOperator=opEye(),
atol :: T=√eps(T), rtol :: T=√eps(T), itmax :: Int=0,
check_curvature :: Bool=false, verbose :: Bool=false) where {T <: AbstractFloat, S <: Number}

n = size(b, 1);
(size(A, 1) == n & size(A, 2) == n) || error("Inconsistent problem size");
Expand All @@ -141,30 +141,30 @@ function cg_lanczos_shift_seq(A :: AbstractLinearOperator, b :: AbstractVector{T

# Initial state.
## Distribute x similarly to shifts.
x = [zeros(Tb, n) for i = 1 : nshifts] # x₀
x = [zeros(T, n) for i = 1 : nshifts] # x₀
Mv = copy(b) # Mv₁ ← b
v = M * Mv # v₁ = M⁻¹ * Mv₁
β = sqrt(@kdot(n, v, Mv)) # β₁ = v₁ᵀ M v₁
β == 0 && return x, LanczosStats(true, [zero(Tb)], false, zero(Tb), zero(Tb), "x = 0 is a zero-residual solution")
β == 0 && return x, LanczosStats(true, [zero(T)], false, zero(T), zero(T), "x = 0 is a zero-residual solution")

# Initialize each p to v.
p = [copy(v) for i = 1 : nshifts]

# Initialize Lanczos process.
# β₁Mv₁ = b
@kscal!(n, one(Tb)/β, v) # v₁ ← v₁ / β₁
MisI || @kscal!(n, one(Tb)/β, Mv) # Mv₁ ← Mv₁ / β₁
@kscal!(n, one(T)/β, v) # v₁ ← v₁ / β₁
MisI || @kscal!(n, one(T)/β, Mv) # Mv₁ ← Mv₁ / β₁
Mv_prev = copy(Mv)

# Initialize some constants used in recursions below.
ρ = one(Tb)
σ = β * ones(Tb, nshifts)
δhat = zeros(Tb, nshifts)
ω = zeros(Tb, nshifts)
γ = ones(Tb, nshifts)
ρ = one(T)
σ = β * ones(T, nshifts)
δhat = zeros(T, nshifts)
ω = zeros(T, nshifts)
γ = ones(T, nshifts)

# Define stopping tolerance.
rNorms = β * ones(Tb, nshifts);
rNorms = β * ones(T, nshifts);
rNorms_history = [rNorms;];
ε = atol + rtol * β;

Expand Down Expand Up @@ -202,8 +202,8 @@ function cg_lanczos_shift_seq(A :: AbstractLinearOperator, b :: AbstractVector{T
@. Mv = Mv_next # Mvₖ ← Mvₖ₊₁
v = M * Mv # vₖ₊₁ = M⁻¹ * Mvₖ₊₁
β = sqrt(@kdot(n, v, Mv)) # βₖ₊₁ = vₖ₊₁ᵀ M vₖ₊₁
@kscal!(n, one(Tb)/β, v) # vₖ₊₁ ← vₖ₊₁ / βₖ₊₁
MisI || @kscal!(n, one(Tb)/β, Mv) # Mvₖ₊₁ ← Mvₖ₊₁ / βₖ₊₁
@kscal!(n, one(T)/β, v) # vₖ₊₁ ← vₖ₊₁ / βₖ₊₁
MisI || @kscal!(n, one(T)/β, Mv) # Mvₖ₊₁ ← Mvₖ₊₁ / βₖ₊₁

# Check curvature: vₖᵀ(A + sᵢI)vₖ = vₖᵀAvₖ + sᵢ‖vₖ‖² = δₖ + ρₖ * sᵢ with ρₖ = ‖vₖ‖².
# It is possible to show that σₖ² (δₖ + ρₖ * sᵢ - ωₖ₋₁ / γₖ₋₁) = pₖᵀ (A + sᵢ I) pₖ.
Expand Down Expand Up @@ -242,6 +242,6 @@ function cg_lanczos_shift_seq(A :: AbstractLinearOperator, b :: AbstractVector{T
end

status = tired ? "maximum number of iterations exceeded" : "solution good enough given atol and rtol"
stats = LanczosStats(solved, permutedims(reshape(rNorms_history, nshifts, round(Int, sum(size(rNorms_history))/nshifts))), indefinite, zero(Tb), zero(Tb), status); # TODO: Estimate Anorm and Acond.
stats = LanczosStats(solved, permutedims(reshape(rNorms_history, nshifts, round(Int, sum(size(rNorms_history))/nshifts))), indefinite, zero(T), zero(T), status); # TODO: Estimate Anorm and Acond.
return (x, stats);
end
9 changes: 6 additions & 3 deletions src/cgls.jl
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ CGLS produces monotonic residuals ‖r‖₂ but not optimality residuals ‖A
It is formally equivalent to LSQR, though can be slightly less accurate,
but simpler to implement.
"""
function cgls(A :: AbstractLinearOperator, b :: AbstractVector{T};
function cgls(A :: AbstractLinearOperator{T}, b :: AbstractVector{T};
M :: AbstractLinearOperator=opEye(), λ :: T=zero(T),
atol :: T=√eps(T), rtol :: T=√eps(T), radius :: T=zero(T),
itmax :: Int=0, verbose :: Bool=false) where T <: AbstractFloat
Expand All @@ -46,12 +46,15 @@ function cgls(A :: AbstractLinearOperator, b :: AbstractVector{T};
size(b, 1) == m || error("Inconsistent problem size");
verbose && @printf("CGLS: system of %d equations in %d variables\n", m, n);

# Compute Aᵀ
Aᵀ = A'

x = zeros(T, n);
r = copy(b)
bNorm = @knrm2(m, r) # Marginally faster than norm(b);
bNorm == 0 && return x, SimpleStats(true, false, [zero(T)], [zero(T)], "x = 0 is a zero-residual solution");
Mr = M * r
s = A.tprod(Mr)
s = Aᵀ * Mr
p = copy(s);
γ = @kdot(n, s, s) # Faster than γ = dot(s, s);
iter = 0;
Expand Down Expand Up @@ -87,7 +90,7 @@ function cgls(A :: AbstractLinearOperator, b :: AbstractVector{T};
@kaxpy!(n, α, p, x) # Faster than x = x + α * p;
@kaxpy!(m, -α, q, r) # Faster than r = r - α * q;
Mr = M * r
s = A.tprod(Mr);
s = Aᵀ * Mr
λ > 0 && @kaxpy!(n, -λ, x, s) # s = A' * r - λ * x;
γ_next = @kdot(n, s, s) # Faster than γ_next = dot(s, s);
β = γ_next / γ;
Expand Down
9 changes: 6 additions & 3 deletions src/cgne.jl
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ but simpler to implement. Only the x-part of the solution is returned.

A preconditioner M may be provided in the form of a linear operator.
"""
function cgne(A :: AbstractLinearOperator, b :: AbstractVector{T};
function cgne(A :: AbstractLinearOperator{T}, b :: AbstractVector{T};
M :: AbstractLinearOperator=opEye(), λ :: T=zero(T),
atol :: T=√eps(T), rtol :: T=√eps(T), itmax :: Int=0,
verbose :: Bool=false) where T <: AbstractFloat
Expand All @@ -62,6 +62,9 @@ function cgne(A :: AbstractLinearOperator, b :: AbstractVector{T};
size(b, 1) == m || error("Inconsistent problem size");
verbose && @printf("CGNE: system of %d equations in %d variables\n", m, n);

# Compute the adjoint of A
Aᵀ = A'

x = zeros(T, n);
r = copy(b)
z = M * r
Expand All @@ -72,7 +75,7 @@ function cgne(A :: AbstractLinearOperator, b :: AbstractVector{T};
# The following vector copy takes care of the case where A is a LinearOperator
# with preallocation, so as to avoid overwriting vectors used later. In other
# case, this should only add minimum overhead.
p = copy(A.tprod(z));
p = copy(Aᵀ * z)

# Use ‖p‖ to detect inconsistent system.
# An inconsistent system will necessarily have AA' singular.
Expand Down Expand Up @@ -107,7 +110,7 @@ function cgne(A :: AbstractLinearOperator, b :: AbstractVector{T};
z = M * r
γ_next = @kdot(m, r, z) # Faster than γ_next = dot(r, z);
β = γ_next / γ;
Aᵀz = A.tprod(z)
Aᵀz = Aᵀ * z
@kaxpby!(n, one(T), Aᵀz, β, p) # Faster than p = Aᵀz + β * p;
pNorm = @knrm2(n, p)
if λ > 0
Expand Down
8 changes: 4 additions & 4 deletions src/cgs.jl
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ TFQMR and BICGSTAB were developed to remedy this difficulty.»

This implementation allows a right preconditioner M.
"""
function cgs(A :: AbstractLinearOperator, b :: AbstractVector{T};
function cgs(A :: AbstractLinearOperator{T}, b :: AbstractVector{T};
M :: AbstractLinearOperator=opEye(),
atol :: T=√eps(T), rtol :: T=√eps(T),
itmax :: Int=0, verbose :: Bool=false) where T <: AbstractFloat
Expand All @@ -45,7 +45,7 @@ function cgs(A :: AbstractLinearOperator, b :: AbstractVector{T};
# Initial solution x₀ and residual r₀.
x = zeros(T, n) # x₀
r = copy(b) # r₀
# Compute ρ₀ = < r₀,r₀ > and residual norm ‖r₀‖₂.
# Compute ρ₀ = r₀,r₀ and residual norm ‖r₀‖₂.
ρ = @kdot(n, r, r)
rNorm = sqrt(ρ)
rNorm == 0 && return x, SimpleStats(true, false, [rNorm], T[], "x = 0 is a zero-residual solution")
Expand All @@ -71,15 +71,15 @@ function cgs(A :: AbstractLinearOperator, b :: AbstractVector{T};

y = M * p # yₘ = M⁻¹pₘ
v = A * y # vₘ = Ayₘ
σ = @kdot(n, v, b) # σₘ = < AM⁻¹pₘ,r₀ >
σ = @kdot(n, v, b) # σₘ = AM⁻¹pₘ,r₀
α = ρ / σ # αₘ = ρₘ / σₘ
@. q = u - α * v # qₘ = uₘ - αₘ * AM⁻¹pₘ
@kaxpy!(n, one(T), q, u) # uₘ₊½ = uₘ + qₘ
z = M * u # zₘ = M⁻¹uₘ₊½
@kaxpy!(n, α, z, x) # xₘ₊₁ = xₘ + αₘ * M⁻¹(uₘ + qₘ)
w = A * z # wₘ = AM⁻¹(uₘ + qₘ)
@kaxpy!(n, -α, w, r) # rₘ₊₁ = rₘ - αₘ * AM⁻¹(uₘ + qₘ)
ρ_next = @kdot(n, r, b) # ρₘ₊₁ = < rₘ₊₁,r₀ >
ρ_next = @kdot(n, r, b) # ρₘ₊₁ = rₘ₊₁,r₀
β = ρ_next / ρ # βₘ = ρₘ₊₁ / ρₘ
@. u = r + β * q # uₘ₊₁ = rₘ₊₁ + βₘ * qₘ
@kaxpby!(n, one(T), q, β, p) # pₘ₊₁ = uₘ₊₁ + βₘ * (qₘ + βₘ * pₘ)
Expand Down
2 changes: 1 addition & 1 deletion src/cr.jl
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ A preconditioner M may be provided in the form of a linear operator and is
assumed to be symmetric and positive definite.
In a linesearch context, 'linesearch' must be set to 'true'.
"""
function cr(A :: AbstractLinearOperator, b :: AbstractVector{T};
function cr(A :: AbstractLinearOperator{T}, b :: AbstractVector{T};
M :: AbstractLinearOperator=opEye(), atol :: T=√eps(T),
rtol :: T=√eps(T), γ :: T=√eps(T), itmax :: Int=0,
radius :: T=zero(T), verbose :: Bool=false, linesearch :: Bool=false) where T <: AbstractFloat
Expand Down
7 changes: 5 additions & 2 deletions src/craig.jl
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ which is equivalent to applying CG to (M + AN⁻¹Aᵀ)y = b.

In this implementation, both the x and y-parts of the solution are returned.
"""
function craig(A :: AbstractLinearOperator, b :: AbstractVector{T};
function craig(A :: AbstractLinearOperator{T}, b :: AbstractVector{T};
M :: AbstractLinearOperator=opEye(),
N :: AbstractLinearOperator=opEye(),
λ :: T=zero(T),
Expand All @@ -70,6 +70,9 @@ function craig(A :: AbstractLinearOperator, b :: AbstractVector{T};
size(b, 1) == m || error("Inconsistent problem size")
verbose && @printf("CRAIG: system of %d equations in %d variables\n", m, n)

# Compute the adjoint of A
Aᵀ = A'

# Tests M == Iₘ and N == Iₙ
MisI = isa(M, opEye)
NisI = isa(N, opEye)
Expand Down Expand Up @@ -133,7 +136,7 @@ function craig(A :: AbstractLinearOperator, b :: AbstractVector{T};
while ! (solved || inconsistent || ill_cond || tired)
# Generate the next Golub-Kahan vectors
# 1. αₖ₊₁Nvₖ₊₁ = Aᵀuₖ₊₁ - βₖ₊₁Nvₖ
Aᵀu = A.tprod(u)
Aᵀu = Aᵀ * u
@kaxpby!(n, one(T), Aᵀu, -β, Nv)
v = N * Nv
α = sqrt(@kdot(n, v, Nv))
Expand Down
11 changes: 7 additions & 4 deletions src/craigmr.jl
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ It is formally equivalent to CRMR, though can be slightly more accurate,
and intricate to implement. Both the x- and y-parts of the solution are
returned.
"""
function craigmr(A :: AbstractLinearOperator, b :: AbstractVector{T};
function craigmr(A :: AbstractLinearOperator{T}, b :: AbstractVector{T};
M :: AbstractLinearOperator=opEye(),
N :: AbstractLinearOperator=opEye(),
λ :: T=zero(T), atol :: T=√eps(T), rtol :: T=√eps(T),
Expand All @@ -70,6 +70,9 @@ function craigmr(A :: AbstractLinearOperator, b :: AbstractVector{T};
size(b, 1) == m || error("Inconsistent problem size");
verbose && @printf("CRAIG-MR: system of %d equations in %d variables\n", m, n);

# Compute the adjoint of A
Aᵀ = A'

# Tests M == Iₘ and N == Iₙ
MisI = isa(M, opEye)
NisI = isa(N, opEye)
Expand All @@ -87,7 +90,7 @@ function craigmr(A :: AbstractLinearOperator, b :: AbstractVector{T};
@kscal!(m, one(T)/β, u)
MisI || @kscal!(m, one(T)/β, Mu)
# α₁Nv₁ = Aᵀu₁.
Aᵀu = A.tprod(u)
Aᵀu = Aᵀ * u
Nv = copy(Aᵀu)
v = N * Nv
α = sqrt(@kdot(n, v, Nv))
Expand Down Expand Up @@ -166,7 +169,7 @@ function craigmr(A :: AbstractLinearOperator, b :: AbstractVector{T};
@kaxpy!(m, ζ, w, y) # y = y + ζ * w;

# 2. αₖ₊₁Nvₖ₊₁ = Aᵀuₖ₊₁ - βₖ₊₁Nvₖ
Aᵀu = A.tprod(u)
Aᵀu = Aᵀ * u
@kaxpby!(n, one(T), Aᵀu, -β, Nv)
v = N * Nv
α = sqrt(@kdot(n, v, Nv))
Expand All @@ -190,7 +193,7 @@ function craigmr(A :: AbstractLinearOperator, b :: AbstractVector{T};
tired = iter >= itmax
end

Aᵀy = A.tprod(y)
Aᵀy = Aᵀ * y
N⁻¹Aᵀy = N * Aᵀy
@. x = N⁻¹Aᵀy

Expand Down
13 changes: 8 additions & 5 deletions src/crls.jl
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ CRLS produces monotonic residuals ‖r‖₂ and optimality residuals ‖Aᵀr
It is formally equivalent to LSMR, though can be substantially less accurate,
but simpler to implement.
"""
function crls(A :: AbstractLinearOperator, b :: AbstractVector{T};
function crls(A :: AbstractLinearOperator{T}, b :: AbstractVector{T};
M :: AbstractLinearOperator=opEye(), λ :: T=zero(T),
atol :: T=√eps(T), rtol :: T=√eps(T), radius :: T=zero(T),
itmax :: Int=0, verbose :: Bool=false) where T <: AbstractFloat
Expand All @@ -45,19 +45,22 @@ function crls(A :: AbstractLinearOperator, b :: AbstractVector{T};
size(b, 1) == m || error("Inconsistent problem size");
verbose && @printf("CRLS: system of %d equations in %d variables\n", m, n);

# Compute the adjoint of A
Aᵀ = A'

x = zeros(T, n)
r = copy(b)
bNorm = @knrm2(m, r) # norm(b - A * x0) if x0 ≠ 0.
bNorm == 0 && return x, SimpleStats(true, false, [zero(T)], [zero(T)], "x = 0 is a zero-residual solution");

Mr = M * r;
Ar = copy(A.tprod(Mr)) # - λ * x0 if x0 ≠ 0.
Ar = copy(Aᵀ * Mr) # - λ * x0 if x0 ≠ 0.
s = A * Ar;
Ms = M * s;

p = copy(Ar);
Ap = copy(s);
q = A.tprod(Ms) # Ap;
q = Aᵀ * Ms # Ap;
λ > 0 && @kaxpy!(n, λ, p, q) # q = q + λ * p;
γ = @kdot(m, s, Ms) # Faster than γ = dot(s, Ms);
iter = 0;
Expand Down Expand Up @@ -90,7 +93,7 @@ function crls(A :: AbstractLinearOperator, b :: AbstractVector{T};
psd = true # det(AᵀA) = 0
p = Ar # p = Aᵀr
pNorm² = ArNorm * ArNorm
q = A.tprod(s)
q = Aᵀ * s
α = min(ArNorm^2 / γ, maximum(to_boundary(x, p, radius, flip = false, dNorm2 = pNorm²))) # the quadratic is minimal in the direction Aᵀr for α = ‖Ar‖²/γ
else
pNorm² = pNorm * pNorm
Expand All @@ -117,7 +120,7 @@ function crls(A :: AbstractLinearOperator, b :: AbstractVector{T};
@kaxpby!(n, one(T), Ar, β, p) # Faster than p = Ar + β * p;
@kaxpby!(m, one(T), s, β, Ap) # Faster than Ap = s + β * Ap;
MAp = M * Ap
q = A.tprod(MAp)
q = Aᵀ * MAp
λ > 0 && @kaxpy!(n, λ, p, q) # q = q + λ * p;

γ = γ_next;
Expand Down
Loading