diff --git a/Project.toml b/Project.toml index 61de76a3..c4c8c073 100644 --- a/Project.toml +++ b/Project.toml @@ -1,6 +1,6 @@ +authors = ["Santiago Badia ", "Jordi Manyer ", "Alberto F. Martin ", "Javier Principe "] name = "GridapSolvers" uuid = "6d3209ee-5e3c-4db7-a716-942eb12ed534" -authors = ["Santiago Badia ", "Jordi Manyer ", "Alberto F. Martin ", "Javier Principe "] version = "0.1.0" [deps] @@ -23,6 +23,7 @@ PartitionedArrays = "0.2.15" julia = "1.7" [extras] +MPIPreferences = "3da0fdf6-3ccc-4f1b-acd9-58baa6c99267" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [targets] diff --git a/src/MultilevelTools/DistributedGridTransferOperators.jl b/src/MultilevelTools/DistributedGridTransferOperators.jl index 9521e614..d991bed6 100644 --- a/src/MultilevelTools/DistributedGridTransferOperators.jl +++ b/src/MultilevelTools/DistributedGridTransferOperators.jl @@ -79,7 +79,7 @@ function _get_projection_cache(lev::Int,sh::FESpaceHierarchy,qdegree::Int,mode:: model_h = get_model_before_redist(mh,lev) Uh = get_fe_space_before_redist(sh,lev) Ωh = Triangulation(model_h) - fv_h = PVector(0.0,Uh.gids) + fv_h = zero_free_values(Uh) dv_h = (mode == :solution) ? get_dirichlet_dof_values(Uh) : zero_dirichlet_values(Uh) model_H = get_model(mh,lev+1) @@ -92,10 +92,15 @@ function _get_projection_cache(lev::Int,sh::FESpaceHierarchy,qdegree::Int,mode:: aH(u,v) = ∫(v⋅u)*dΩH lH(v,uh) = ∫(v⋅uh)*dΩhH assem = SparseMatrixAssembler(UH,VH) - - u_dir = (mode == :solution) ? interpolate(0.0,UH) : interpolate_everywhere(0.0,UH) + + fv_H = zero_free_values(UH) + dv_H = zero_dirichlet_values(UH) + u0 = FEFunction(UH,fv_H,true) # Zero at free dofs + u00 = FEFunction(UH,fv_H,dv_H,true) # Zero everywhere + + u_dir = (mode == :solution) ? u0 : u00 u,v = get_trial_fe_basis(UH), get_fe_basis(VH) - data = collect_cell_matrix_and_vector(UH,VH,aH(u,v),lH(v,0.0),u_dir) + data = collect_cell_matrix_and_vector(UH,VH,aH(u,v),lH(v,u00),u_dir) AH,bH0 = assemble_matrix_and_vector(assem,data) xH = PVector(0.0,AH.cols) bH = copy(bH0) diff --git a/src/MultilevelTools/GridapDistributedExtensions.jl b/src/MultilevelTools/GridapDistributedExtensions.jl index b7e163a0..56fa3264 100644 --- a/src/MultilevelTools/GridapDistributedExtensions.jl +++ b/src/MultilevelTools/GridapDistributedExtensions.jl @@ -12,6 +12,10 @@ function Gridap.CellData.Measure(tt::GridapDistributed.DistributedTriangulation{ return GridapDistributed.DistributedMeasure(measures) end +function GridapDistributed.get_parts(x::GridapDistributed.DistributedFESpace) + PartitionedArrays.get_part_ids(local_views(x)) +end + # change_parts function change_parts(x::Union{AbstractPData,Nothing}, new_parts; default=nothing) diff --git a/src/PatchBasedSmoothers/PatchBasedSmoothers.jl b/src/PatchBasedSmoothers/PatchBasedSmoothers.jl index dcaf0d6c..e6a03299 100644 --- a/src/PatchBasedSmoothers/PatchBasedSmoothers.jl +++ b/src/PatchBasedSmoothers/PatchBasedSmoothers.jl @@ -12,6 +12,8 @@ using Gridap.FESpaces using PartitionedArrays using GridapDistributed +using GridapSolvers.MultilevelTools + export PatchDecomposition export PatchFESpace export PatchBasedLinearSolver diff --git a/src/PatchBasedSmoothers/mpi/PatchDecompositions.jl b/src/PatchBasedSmoothers/mpi/PatchDecompositions.jl index 609b0028..36c5dcba 100644 --- a/src/PatchBasedSmoothers/mpi/PatchDecompositions.jl +++ b/src/PatchBasedSmoothers/mpi/PatchDecompositions.jl @@ -4,30 +4,81 @@ struct DistributedPatchDecomposition{Dc,Dp,A,B} <: GridapType model::B end +GridapDistributed.local_views(a::DistributedPatchDecomposition) = a.patch_decompositions + function PatchDecomposition(model::GridapDistributed.DistributedDiscreteModel{Dc,Dp}; Dr=0, patch_boundary_style::PatchBoundaryStyle=PatchBoundaryExclude()) where {Dc,Dp} - patch_decompositions=map_parts(model.models) do lmodel + mark_interface_facets!(model) + patch_decompositions = map_parts(local_views(model)) do lmodel PatchDecomposition(lmodel; Dr=Dr, - patch_boundary_style=patch_boundary_style) + patch_boundary_style=patch_boundary_style, + boundary_tag_names=["boundary","interface"]) end - A=typeof(patch_decompositions) - B=typeof(model) - DistributedPatchDecomposition{Dc,Dp,A,B}(patch_decompositions,model) + A = typeof(patch_decompositions) + B = typeof(model) + return DistributedPatchDecomposition{Dc,Dp,A,B}(patch_decompositions,model) +end + +function PatchDecomposition(mh::ModelHierarchy;kwargs...) + nlevs = num_levels(mh) + decompositions = Vector{DistributedPatchDecomposition}(undef,nlevs-1) + for lev in 1:nlevs-1 + parts = get_level_parts(mh,lev) + if i_am_in(parts) + model = get_model(mh,lev) + decompositions[lev] = PatchDecomposition(model;kwargs...) + end + end + return decompositions end function Gridap.Geometry.Triangulation(a::DistributedPatchDecomposition) - trians=map_parts(a.patch_decompositions) do a + trians = map_parts(a.patch_decompositions) do a Triangulation(a) end - GridapDistributed.DistributedTriangulation(trians,a.model) + return GridapDistributed.DistributedTriangulation(trians,a.model) end function get_patch_root_dim(a::DistributedPatchDecomposition) - patch_root_dim=0 + patch_root_dim = -1 map_parts(a.patch_decompositions) do patch_decomposition - patch_root_dim=patch_decomposition.Dr + patch_root_dim = patch_decomposition.Dr + end + return patch_root_dim +end + +function mark_interface_facets!(model::GridapDistributed.DistributedDiscreteModel{Dc,Dp}) where {Dc,Dp} + face_labeling = get_face_labeling(model) + topo = get_grid_topology(model) + + map_parts(local_views(face_labeling),local_views(topo)) do face_labeling, topo + tag_to_name = face_labeling.tag_to_name + tag_to_entities = face_labeling.tag_to_entities + d_to_dface_to_entity = face_labeling.d_to_dface_to_entity + + # Create new tag & entity + interface_entity = maximum(map(maximum,tag_to_entities)) + 1 + push!(tag_to_entities,[interface_entity]) + push!(tag_to_name,"interface") + + # Interface faces should also be interior + interior_tag = findfirst(x->(x=="interior"),tag_to_name) + push!(tag_to_entities[interior_tag],interface_entity) + + # Select interface entities + boundary_tag = findfirst(x->(x=="boundary"),tag_to_name) + boundary_entities = tag_to_entities[boundary_tag] + + f2c_map = Geometry.get_faces(topo,Dc-1,Dc) + num_cells_around_facet = map(length,f2c_map) + mx = maximum(num_cells_around_facet) + for (f,nf) in enumerate(num_cells_around_facet) + is_boundary = (d_to_dface_to_entity[Dc][f] ∈ boundary_entities) + if !is_boundary && (nf != mx) + d_to_dface_to_entity[Dc][f] = interface_entity + end + end end - patch_root_dim end diff --git a/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl b/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl index 76924b8b..1f6614e9 100644 --- a/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl +++ b/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl @@ -1,6 +1,6 @@ # Rationale behind distributed PatchFESpace: # 1. Patches have an owner. Only owners compute subspace correction. -# If am not owner of a patch, all dofs in my patch become -1. +# If am not owner of a patch, all dofs in my patch become -1. [DONE] # 2. Subspace correction on an owned patch may affect DoFs which # are non-owned. These corrections should be sent to the owner # process. I.e., NO -> O (reversed) communication. [PENDING] @@ -12,7 +12,7 @@ function PatchFESpace(model::GridapDistributed.DistributedDiscreteModel, conformity::Gridap.FESpaces.Conformity, patch_decomposition::DistributedPatchDecomposition, Vh::GridapDistributed.DistributedSingleFieldFESpace) - root_gids=get_face_gids(model,get_patch_root_dim(patch_decomposition)) + root_gids = get_face_gids(model,get_patch_root_dim(patch_decomposition)) function f(model,patch_decomposition,Vh,partition) patches_mask = fill(false,length(partition.lid_to_gid)) @@ -26,12 +26,12 @@ function PatchFESpace(model::GridapDistributed.DistributedDiscreteModel, end spaces = map_parts(f, - model.models, - patch_decomposition.patch_decompositions, - Vh.spaces, + local_views(model), + local_views(patch_decomposition), + local_views(Vh), root_gids.partition) - parts = get_part_ids(model.models) + parts = get_parts(model) nodofs = map_parts(spaces) do space num_free_dofs(space) end @@ -43,38 +43,69 @@ function PatchFESpace(model::GridapDistributed.DistributedDiscreteModel, return GridapDistributed.DistributedSingleFieldFESpace(spaces,gids,get_vector_type(Vh)) end +function PatchFESpace(mh::ModelHierarchy, + reffe::Tuple{<:Gridap.FESpaces.ReferenceFEName,Any,Any}, + conformity::Gridap.FESpaces.Conformity, + patch_decompositions::AbstractArray{<:DistributedPatchDecomposition}, + sh::FESpaceHierarchy) + nlevs = num_levels(mh) + levels = Vector{MultilevelTools.FESpaceHierarchyLevel}(undef,nlevs) + for lev in 1:nlevs-1 + parts = get_level_parts(mh,lev) + if i_am_in(parts) + model = get_model(mh,lev) + space = MultilevelTools.get_fe_space(sh,lev) + decomp = patch_decompositions[lev] + patch_space = PatchFESpace(model,reffe,conformity,decomp,space) + levels[lev] = MultilevelTools.FESpaceHierarchyLevel(lev,nothing,patch_space) + end + end + return FESpaceHierarchy(mh,levels) +end + # x \in PatchFESpace # y \in SingleFESpace function prolongate!(x::PVector, Ph::GridapDistributed.DistributedSingleFieldFESpace, y::PVector) - parts=get_part_ids(x.owned_values) - Gridap.Helpers.@notimplementedif num_parts(parts)!=1 - - map_parts(x.owned_values,Ph.spaces,y.owned_values) do x,Ph,y + map_parts(x.values,Ph.spaces,y.values) do x,Ph,y prolongate!(x,Ph,y) end + exchange!(x) end +# x \in SingleFESpace +# y \in PatchFESpace function inject!(x::PVector, Ph::GridapDistributed.DistributedSingleFieldFESpace, y::PVector, - w::PVector) - parts = get_part_ids(x.owned_values) - Gridap.Helpers.@notimplementedif num_parts(parts)!=1 - - map_parts(x.owned_values,Ph.spaces,y.owned_values,w.owned_values) do x,Ph,y,w - inject!(x,Ph,y,w) + w::PVector, + w_sums::PVector) + + #exchange!(y) + map_parts(x.values,Ph.spaces,y.values,w.values,w_sums.values) do x,Ph,y,w,w_sums + inject!(x,Ph,y,w,w_sums) end + + # Exchange local contributions + assemble!(x) + exchange!(x) # TO CONSIDER: Is this necessary? Do we need ghosts for later? + return x end -function compute_weight_operators(Ph::GridapDistributed.DistributedSingleFieldFESpace) - parts = get_part_ids(Ph.spaces) - Gridap.Helpers.@notimplementedif num_parts(parts) != 1 - +function compute_weight_operators(Ph::GridapDistributed.DistributedSingleFieldFESpace,Vh) + # Local weights and partial sums w = PVector(0.0,Ph.gids) - map_parts(w.owned_values,Ph.spaces) do w, Ph - w .= compute_weight_operators(Ph) + w_sums = PVector(0.0,Vh.gids) + map_parts(w.values,w_sums.values,Ph.spaces) do w, w_sums, Ph + _w, _w_sums = compute_weight_operators(Ph,Ph.Vh) + w .= _w + w_sums .= _w_sums end - return w + + # partial sums -> global sums + assemble!(w_sums) # ghost -> owners + exchange!(w_sums) # repopulate ghosts with owner info + + return w, w_sums end diff --git a/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl b/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl index a9d15761..867f0f5d 100644 --- a/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl +++ b/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl @@ -12,9 +12,10 @@ # (not 100% sure, to investigate) -struct PatchBasedLinearSolver{A} <: Gridap.Algebra.LinearSolver +struct PatchBasedLinearSolver{A,B} <: Gridap.Algebra.LinearSolver bilinear_form :: Function Ph :: A + Vh :: B M :: Gridap.Algebra.LinearSolver end @@ -23,29 +24,52 @@ struct PatchBasedSymbolicSetup <: Gridap.Algebra.SymbolicSetup end function Gridap.Algebra.symbolic_setup(ls::PatchBasedLinearSolver,mat::AbstractMatrix) - PatchBasedSymbolicSetup(ls) + return PatchBasedSymbolicSetup(ls) end -struct PatchBasedSmootherNumericalSetup{A,B,C,D,E} <: Gridap.Algebra.NumericalSetup - solver :: PatchBasedLinearSolver - Ap :: A - nsAp :: B - rp :: C - dxp :: D - w :: E +struct PatchBasedSmootherNumericalSetup{A,B,C,D} <: Gridap.Algebra.NumericalSetup + solver :: PatchBasedLinearSolver + Ap :: A + Ap_ns :: B + weights :: C + caches :: D end function Gridap.Algebra.numerical_setup(ss::PatchBasedSymbolicSetup,A::AbstractMatrix) - Ph = ss.solver.Ph + Ph, Vh = ss.solver.Ph, ss.solver.Vh + weights = compute_weight_operators(Ph,Vh) + + # Assemble patch system assembler = SparseMatrixAssembler(Ph,Ph) - Ap = assemble_matrix(ss.solver.bilinear_form,assembler,Ph,Ph) - solver = ss.solver.M - ssAp = symbolic_setup(solver,Ap) - nsAp = numerical_setup(ssAp,Ap) - rp = _allocate_row_vector(Ap) - dxp = _allocate_col_vector(Ap) - w = compute_weight_operators(Ph) - PatchBasedSmootherNumericalSetup(ss.solver,Ap,nsAp,rp,dxp,w) + Ap = assemble_matrix(ss.solver.bilinear_form,assembler,Ph,Ph) + + # Patch system solver + Ap_solver = ss.solver.M + Ap_ss = symbolic_setup(Ap_solver,Ap) + Ap_ns = numerical_setup(Ap_ss,Ap) + + # Caches + caches = _patch_based_solver_caches(Ph,Vh,Ap) + + return PatchBasedSmootherNumericalSetup(ss.solver,Ap,Ap_ns,weights,caches) +end + +function _patch_based_solver_caches(Ph::PatchFESpace,Vh::FESpace,Ap::AbstractMatrix) + rp = _allocate_row_vector(Ap) + dxp = _allocate_col_vector(Ap) + return rp, dxp +end + +function _patch_based_solver_caches(Ph::GridapDistributed.DistributedSingleFieldFESpace, + Vh::GridapDistributed.DistributedSingleFieldFESpace, + Ap::PSparseMatrix) + rp_mat = _allocate_row_vector(Ap) + dxp_mat = _allocate_col_vector(Ap) + rp = PVector(0.0,Ph.gids) + dxp = PVector(0.0,Ph.gids) + r = PVector(0.0,Vh.gids) + x = PVector(0.0,Vh.gids) + return rp_mat, dxp_mat, rp, dxp, r, x end function _allocate_col_vector(A::AbstractMatrix) @@ -69,9 +93,34 @@ function Gridap.Algebra.numerical_setup!(ns::PatchBasedSmootherNumericalSetup, A end function Gridap.Algebra.solve!(x::AbstractVector,ns::PatchBasedSmootherNumericalSetup,r::AbstractVector) - Ap, nsAp, rp, dxp, w = ns.Ap, ns.nsAp, ns.rp, ns.dxp, ns.w + Ap_ns, weights, caches = ns.Ap_ns, ns.weights, ns.caches + + Ph = ns.solver.Ph + w, w_sums = weights + rp, dxp = caches + + prolongate!(rp,Ph,r) + solve!(dxp,Ap_ns,rp) + inject!(x,Ph,dxp,w,w_sums) + + return x +end + +function Gridap.Algebra.solve!(x_mat::PVector,ns::PatchBasedSmootherNumericalSetup,r_mat::PVector) + Ap_ns, weights, caches = ns.Ap_ns, ns.weights, ns.caches + + Ph = ns.solver.Ph + w, w_sums = weights + rp_mat, dxp_mat, rp, dxp, r, x = caches + + copy!(r,r_mat) + exchange!(r) + prolongate!(rp,Ph,r) + copy!(rp_mat,rp) + solve!(dxp_mat,Ap_ns,rp_mat) + copy!(dxp,dxp_mat) + inject!(x,Ph,dxp,w,w_sums) + copy!(x_mat,x) - prolongate!(rp,ns.solver.Ph,r) - solve!(dxp,nsAp,rp) - inject!(x,ns.solver.Ph,dxp,w) + return x_mat end diff --git a/src/PatchBasedSmoothers/seq/PatchDecompositions.jl b/src/PatchBasedSmoothers/seq/PatchDecompositions.jl index df093840..913f0c3d 100644 --- a/src/PatchBasedSmoothers/seq/PatchDecompositions.jl +++ b/src/PatchBasedSmoothers/seq/PatchDecompositions.jl @@ -20,7 +20,8 @@ Gridap.Geometry.num_cells(a::PatchDecomposition) = a.patch_cells_overlapped_mesh function PatchDecomposition( model::DiscreteModel{Dc,Dp}; Dr=0, - patch_boundary_style::PatchBoundaryStyle=PatchBoundaryExclude()) where {Dc,Dp} + patch_boundary_style::PatchBoundaryStyle=PatchBoundaryExclude(), + boundary_tag_names::AbstractArray{String}=["boundary"]) where {Dc,Dp} Gridap.Helpers.@check 0 <= Dr <= Dc-1 grid = get_grid(model) @@ -41,12 +42,13 @@ function PatchDecomposition( patch_cells, patch_cells_overlapped_mesh) - generate_patch_boundary_faces!(model, - patch_cells_faces_on_boundary, + generate_patch_boundary_faces!(patch_cells_faces_on_boundary, + model, patch_cells, patch_cells_overlapped_mesh, patch_facets, - patch_boundary_style) + patch_boundary_style, + boundary_tag_names) return PatchDecomposition{Dc,Dp}(model, Dr, patch_cells, @@ -112,47 +114,45 @@ function allocate_cell_overlapped_mesh_lface(::Type{T}, return Gridap.Arrays.Table(data,ptrs) end -function generate_patch_boundary_faces!(model, - patch_cells_faces_on_boundary, +function generate_patch_boundary_faces!(patch_cells_faces_on_boundary, + model::DiscreteModel, patch_cells, patch_cells_overlapped_mesh, patch_facets, - patch_boundary_style) - Dc = num_cell_dims(model) - topology = get_grid_topology(model) - labeling = get_face_labeling(model) - num_patches = length(patch_cells.ptrs)-1 + patch_boundary_style, + boundary_tag_names) + num_patches = length(patch_cells.ptrs)-1 cache_patch_cells = array_cache(patch_cells) cache_patch_facets = array_cache(patch_facets) for patch = 1:num_patches current_patch_cells = getindex!(cache_patch_cells,patch_cells,patch) current_patch_facets = getindex!(cache_patch_facets,patch_facets,patch) generate_patch_boundary_faces!(patch_cells_faces_on_boundary, - Dc, - topology, - labeling, + model, patch, current_patch_cells, patch_cells_overlapped_mesh, current_patch_facets, - patch_boundary_style) + patch_boundary_style, + boundary_tag_names) end end function generate_patch_boundary_faces!(patch_cells_faces_on_boundary, - Dc, - topology, - face_labeling, + model::DiscreteModel{Dc}, patch, patch_cells, patch_cells_overlapped_mesh, patch_facets, - patch_boundary_style) + patch_boundary_style, + boundary_tag_names) where Dc + face_labeling = get_face_labeling(model) + topology = get_grid_topology(model) - boundary_tag = findfirst(x->(x=="boundary"),face_labeling.tag_to_name) - Gridap.Helpers.@check !isa(boundary_tag, Nothing) - boundary_entities = face_labeling.tag_to_entities[boundary_tag] + boundary_tags = findall(x -> (x ∈ boundary_tag_names), face_labeling.tag_to_name) + Gridap.Helpers.@check !isempty(boundary_tags) + boundary_entities = vcat(face_labeling.tag_to_entities[boundary_tags]...) # Cells facets Df = Dc-1 @@ -184,7 +184,7 @@ function generate_patch_boundary_faces!(patch_cells_faces_on_boundary, facet_at_global_boundary = (facet_entity ∈ boundary_entities) A = (facet_at_global_boundary) && (facet ∉ patch_facets) B = (patch_boundary_style isa PatchBoundaryExclude) && cell_not_in_patch_found - facet_at_patch_boundary = A || B + facet_at_patch_boundary = (A || B) if (facet_at_patch_boundary) diff --git a/src/PatchBasedSmoothers/seq/PatchFESpaces.jl b/src/PatchBasedSmoothers/seq/PatchFESpaces.jl index efaa1802..9758e813 100644 --- a/src/PatchBasedSmoothers/seq/PatchFESpaces.jl +++ b/src/PatchBasedSmoothers/seq/PatchFESpaces.jl @@ -71,13 +71,14 @@ function PatchFESpace(model::DiscreteModel, return PatchFESpace(num_dofs,patch_cell_dofs_ids,Vh,patch_decomposition) end -Gridap.FESpaces.get_dof_value_type(a::PatchFESpace)=Gridap.FESpaces.get_dof_value_type(a.Vh) -Gridap.FESpaces.get_free_dof_ids(a::PatchFESpace)=Base.OneTo(a.num_dofs) -Gridap.FESpaces.get_cell_dof_ids(a::PatchFESpace)=a.patch_cell_dofs_ids -Gridap.FESpaces.get_cell_dof_ids(a::PatchFESpace,::Triangulation)=a.patch_cell_dofs_ids -Gridap.FESpaces.get_fe_basis(a::PatchFESpace)=get_fe_basis(a.Vh) -Gridap.FESpaces.ConstraintStyle(a::PatchFESpace)=Gridap.FESpaces.UnConstrained() -Gridap.FESpaces.get_vector_type(a::PatchFESpace)=get_vector_type(a.Vh) +Gridap.FESpaces.get_dof_value_type(a::PatchFESpace) = Gridap.FESpaces.get_dof_value_type(a.Vh) +Gridap.FESpaces.get_free_dof_ids(a::PatchFESpace) = Base.OneTo(a.num_dofs) +Gridap.FESpaces.get_cell_dof_ids(a::PatchFESpace) = a.patch_cell_dofs_ids +Gridap.FESpaces.get_cell_dof_ids(a::PatchFESpace,::Triangulation) = a.patch_cell_dofs_ids +Gridap.FESpaces.get_fe_basis(a::PatchFESpace) = get_fe_basis(a.Vh) +Gridap.FESpaces.ConstraintStyle(::PatchFESpace) = Gridap.FESpaces.UnConstrained() +Gridap.FESpaces.get_vector_type(a::PatchFESpace) = get_vector_type(a.Vh) +Gridap.FESpaces.get_fe_dof_basis(a::PatchFESpace) = get_fe_dof_basis(a.Vh) function Gridap.FESpaces.scatter_free_and_dirichlet_values(f::PatchFESpace,free_values,dirichlet_values) cell_vals = Gridap.Fields.PosNegReindex(free_values,dirichlet_values) @@ -140,7 +141,7 @@ end # TO-THINK/STRESS: # 1. MultiFieldFESpace case? -# 2. FESpaces which are directly defined on physical space? We think this cased is covered by +# 2. FESpaces which are directly defined on physical space? We think this case is covered by # the fact that we are using a CellConformity instance to rely on ownership info. # free_dofs_offset : the ID from which we start to assign free DoF IDs upwards # Note: we do not actually need to generate a global numbering for Dirichlet DoFs. We can @@ -255,8 +256,8 @@ end # x \in SingleFESpace # y \in PatchFESpace function inject!(x,Ph::PatchFESpace,y) - w = compute_weight_operators(Ph) - inject!(x,Ph::PatchFESpace,y,w) + w, w_sums = compute_weight_operators(Ph,Ph.Vh) + inject!(x,Ph::PatchFESpace,y,w,w_sums) end function inject!(x,Ph::PatchFESpace,y,w) @@ -288,14 +289,14 @@ function inject!(x,Ph::PatchFESpace,y,w) end end -function compute_weight_operators(Ph::PatchFESpace) - cell_dof_ids = get_cell_dof_ids(Ph.Vh) - cache_cell_dof_ids = array_cache(cell_dof_ids) - cache_patch_cells = array_cache(Ph.patch_decomposition.patch_cells) - - w = zeros(num_free_dofs(Ph.Vh)) +function inject!(x,Ph::PatchFESpace,y,w,w_sums) touched = Dict{Int,Bool}() cell_mesh_overlapped = 1 + cache_patch_cells = array_cache(Ph.patch_decomposition.patch_cells) + cell_dof_ids = get_cell_dof_ids(Ph.Vh) + cache_cell_dof_ids = array_cache(cell_dof_ids) + + fill!(x,0.0) for patch = 1:length(Ph.patch_decomposition.patch_cells) current_patch_cells = getindex!(cache_patch_cells, Ph.patch_decomposition.patch_cells, @@ -306,17 +307,25 @@ function compute_weight_operators(Ph::PatchFESpace) e = Ph.patch_cell_dofs_ids.ptrs[cell_mesh_overlapped+1]-1 current_patch_cell_dof_ids = view(Ph.patch_cell_dofs_ids.data,s:e) for (dof,pdof) in zip(current_cell_dof_ids,current_patch_cell_dof_ids) - if pdof > 0 && !(dof in keys(touched)) + if pdof > 0 && !(dof ∈ keys(touched)) touched[dof] = true - w[dof] += 1.0 + x[dof] += y[pdof] * w[pdof] / w_sums[dof] end end - cell_mesh_overlapped+=1 + cell_mesh_overlapped += 1 end empty!(touched) end - w .= 1.0 ./ w - w_Ph = similar(w,num_free_dofs(Ph)) - prolongate!(w_Ph,Ph,w) - return w_Ph +end + +function compute_weight_operators(Ph::PatchFESpace,Vh) + w = Fill(1.0,num_free_dofs(Ph)) + w_sums = compute_partial_sums(Ph,Vh,w) + return w, w_sums +end + +function compute_partial_sums(Ph::PatchFESpace,Vh,x) + x_sums = zeros(num_free_dofs(Vh)) + inject!(x_sums,Ph,x,Fill(1.0,num_free_dofs(Ph))) + return x_sums end diff --git a/test/mpi/GMGLinearSolversHDivRTTests.jl b/test/mpi/GMGLinearSolversHDivRTTests.jl index 1a9bff6f..a982d8da 100644 --- a/test/mpi/GMGLinearSolversHDivRTTests.jl +++ b/test/mpi/GMGLinearSolversHDivRTTests.jl @@ -13,11 +13,33 @@ using GridapP4est using GridapSolvers using GridapSolvers.LinearSolvers +using GridapSolvers.MultilevelTools +using GridapSolvers.PatchBasedSmoothers u(x) = VectorValue(x[1],x[2]) f(x) = VectorValue(2.0*x[2]*(1.0-x[1]*x[1]),2.0*x[1]*(1-x[2]*x[2])) +function get_patch_smoothers(tests,patch_spaces,patch_decompositions,biform,qdegree) + mh = tests.mh + nlevs = num_levels(mh) + smoothers = Vector{RichardsonSmoother}(undef,nlevs-1) + for lev in 1:nlevs-1 + parts = get_level_parts(mh,lev) + if i_am_in(parts) + PD = patch_decompositions[lev] + Ph = get_fe_space(patch_spaces,lev) + Vh = get_fe_space(tests,lev) + Ω = Triangulation(PD) + dΩ = Measure(Ω,qdegree) + a(u,v) = biform(u,v,dΩ) + patch_smoother = PatchBasedLinearSolver(a,Ph,Vh,LUSolver()) + smoothers[lev] = RichardsonSmoother(patch_smoother,1,1.0/3.0) + end + end + return smoothers +end + function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, order, α) GridapP4est.with(parts) do domain = (0,1,0,1) @@ -32,12 +54,15 @@ function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, tests = TestFESpace(mh,reffe;dirichlet_tags="boundary") trials = TrialFESpace(tests,u) + patch_decompositions = PatchDecomposition(mh) + patch_spaces = PatchFESpace(mh,reffe,DivConformity(),patch_decompositions,tests) + biform(u,v,dΩ) = ∫(v⋅u)dΩ + ∫(α*divergence(v)⋅divergence(u))dΩ liform(v,dΩ) = ∫(v⋅f)dΩ smatrices, A, b = compute_hierarchy_matrices(trials,biform,liform,qdegree) # Preconditioner - smoothers = Fill(RichardsonSmoother(JacobiLinearSolver(),10,2.0/3.0),num_levels-1) + smoothers = get_patch_smoothers(tests,patch_spaces,patch_decompositions,biform,qdegree) restrictions, prolongations = setup_transfer_operators(trials,qdegree;mode=:residual) gmg = GMGLinearSolver(mh, @@ -57,9 +82,10 @@ function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, x = PVector(0.0,A.cols) x, history = IterativeSolvers.cg!(x,A,b; verbose=i_am_main(parts), - reltol=1.0e-12, + reltol=1.0e-8, Pl=ns, - log=true) + log=true, + maxiter=10) # Error norms and print solution model = get_model(mh,1) @@ -75,7 +101,7 @@ function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, println("L2 error = ", e_l2) end - return history.iters, num_free_dofs(Vh) + return history.iters, num_free_dofs(Uh) end end @@ -86,14 +112,14 @@ if !MPI.Initialized() end # Parameters -order = 2 +order = 0 coarse_grid_partition = (2,2) num_refs_coarse = 2 α = 1.0 num_parts_x_level = [4,2,1] ranks = num_parts_x_level[1] -#num_iters, num_free_dofs2 = with_backend(main,MPIBackend(),ranks,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order,α) +num_iters, num_free_dofs2 = with_backend(main,MPIBackend(),ranks,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order,α) """ diff --git a/test/mpi/RefinementToolsTests.jl b/test/mpi/RefinementToolsTests.jl index 235d9e6e..06a02165 100644 --- a/test/mpi/RefinementToolsTests.jl +++ b/test/mpi/RefinementToolsTests.jl @@ -32,15 +32,17 @@ function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) cparts = get_level_parts(mh,lev+1) if i_am_in(cparts) + model_h = get_model_before_redist(mh,lev) Vh = get_fe_space_before_redist(tests,lev) Uh = get_fe_space_before_redist(trials,lev) - Ωh = get_triangulation(Uh,get_model_before_redist(mh,lev)) + Ωh = get_triangulation(model_h) dΩh = Measure(Ωh,quad_order) uh = interpolate(sol,Uh) + model_H = get_model(mh,lev+1) VH = get_fe_space(tests,lev+1) UH = get_fe_space(trials,lev+1) - ΩH = get_triangulation(UH,get_model(mh,lev+1)) + ΩH = get_triangulation(model_H) dΩH = Measure(ΩH,quad_order) uH = interpolate(sol,UH) dΩhH = Measure(ΩH,Ωh,quad_order) diff --git a/test/seq/DistributedPatchFESpacesDebuggingTests.jl b/test/seq/DistributedPatchFESpacesDebuggingTests.jl new file mode 100644 index 00000000..ecf2202a --- /dev/null +++ b/test/seq/DistributedPatchFESpacesDebuggingTests.jl @@ -0,0 +1,223 @@ +module DistributedPatchFESpacesHDivTests + +using LinearAlgebra +using Test +using PartitionedArrays +using Gridap +using Gridap.Helpers +using Gridap.Arrays +using Gridap.Geometry +using Gridap.ReferenceFEs +using GridapDistributed +using FillArrays + +using GridapSolvers +import GridapSolvers.PatchBasedSmoothers as PBS + +#= +backend = SequentialBackend() +ranks = (1,2) +parts = get_part_ids(backend,ranks) +=# + +function run(parts) + domain = (0.0,1.0,0.0,1.0) + partition = (2,4) + model = CartesianDiscreteModel(parts,domain,partition) + + order = 0 + reffe = ReferenceFE(raviart_thomas,Float64,order) + Vh = TestFESpace(model,reffe) + PD = PBS.PatchDecomposition(model) + Ph = PBS.PatchFESpace(model,reffe,DivConformity(),PD,Vh) + + + # ---- Testing Prolongation and Injection ---- # + + w, w_sums = PBS.compute_weight_operators(Ph,Vh); + + xP = PVector(1.0,Ph.gids) + yP = PVector(0.0,Ph.gids) + x = PVector(1.0,Vh.gids) + y = PVector(0.0,Vh.gids) + + PBS.prolongate!(yP,Ph,x) + PBS.inject!(y,Ph,yP,w,w_sums) + @test x ≈ y + + PBS.inject!(x,Ph,xP,w,w_sums) + PBS.prolongate!(yP,Ph,x) + @test xP ≈ yP + + # ---- Assemble systems ---- # + + sol(x) = VectorValue(x[1],x[2]) + f(x) = VectorValue(2.0*x[2]*(1.0-x[1]*x[1]),2.0*x[1]*(1-x[2]*x[2])) + + α = 1.0 + biform(u,v,dΩ) = ∫(v⋅u)dΩ + ∫(α*divergence(v)⋅divergence(u))dΩ + liform(v,dΩ) = ∫(v⋅f)dΩ + + Ω = Triangulation(model) + dΩ = Measure(Ω,2*order+1) + a(u,v) = biform(u,v,dΩ) + l(v) = liform(v,dΩ) + + assembler = SparseMatrixAssembler(Vh,Vh) + Ah = assemble_matrix(a,assembler,Vh,Vh) + fh = assemble_vector(l,assembler,Vh) + + sol_h = solve(LUSolver(),Ah,fh) + + Ωₚ = Triangulation(PD) + dΩₚ = Measure(Ωₚ,2*order+1) + ap(u,v) = biform(u,v,dΩₚ) + lp(v) = liform(v,dΩₚ) + + assembler_P = SparseMatrixAssembler(Ph,Ph,FullyAssembledRows()) + Ahp = assemble_matrix(ap,assembler_P,Ph,Ph) + fhp = assemble_vector(lp,assembler_P,Ph) + + sol_hp = solve(LUSolver(),Ahp,fhp) + + # ---- Define solvers ---- # + + LU = LUSolver() + LUss = symbolic_setup(LU,Ahp) + LUns = numerical_setup(LUss,Ahp) + + M = PBS.PatchBasedLinearSolver(ap,Ph,Vh,LU) + Mss = symbolic_setup(M,Ah) + Mns = numerical_setup(Mss,Ah) + + R = RichardsonSmoother(M,10,1.0/3.0) + Rss = symbolic_setup(R,Ah) + Rns = numerical_setup(Rss,Ah) + + # ---- Manual solve using LU ---- # + + x1_mat = PVector(0.5,Ah.cols) + r1_mat = fh-Ah*x1_mat + exchange!(r1_mat) + + r1 = PVector(0.0,Vh.gids) + x1 = PVector(0.0,Vh.gids) + rp = PVector(0.0,Ph.gids) + xp = PVector(0.0,Ph.gids) + rp_mat = PVector(0.0,Ahp.cols) + xp_mat = PVector(0.0,Ahp.cols) + + copy!(r1,r1_mat) + exchange!(r1) + PBS.prolongate!(rp,Ph,r1) # OK + + copy!(rp_mat,rp) + exchange!(rp_mat) + solve!(xp_mat,LUns,rp_mat) + copy!(xp,xp_mat) # Some big numbers appear here.... + + w, w_sums = PBS.compute_weight_operators(Ph,Vh); + PBS.inject!(x1,Ph,xp,w,w_sums) # Problem here!! + copy!(x1_mat,x1) + + # ---- Same using the PatchBasedSmoother ---- # + + x2_mat = PVector(0.5,Ah.cols) + r2_mat = fh-Ah*x2_mat + exchange!(r2_mat) + solve!(x2_mat,Mns,r2_mat) + + # ---- Smoother inside Richardson + + x3_mat = PVector(0.5,Ah.cols) + r3_mat = fh-Ah*x3_mat + exchange!(r3_mat) + solve!(x3_mat,Rns,r3_mat) + exchange!(x3_mat) + + # Outputs + res = Dict{String,Any}() + res["sol_h"] = sol_h + res["sol_hp"] = sol_hp + + res["r1"] = r1 + res["x1"] = x1 + res["r1_mat"] = r1_mat + res["x1_mat"] = x1_mat + res["rp"] = rp + res["xp"] = xp + res["rp_mat"] = rp_mat + res["xp_mat"] = xp_mat + + res["w"] = w + res["w_sums"] = w_sums + + return model,PD,Ph,Vh,res +end + +backend = SequentialBackend() + +parts = get_part_ids(backend,(1,1)) +Ms,PDs,Phs,Vhs,res_single = run(parts); + +parts = get_part_ids(backend,(1,2)) +Mm,PDm,Phm,Vhm,res_multi = run(parts); + +println(repeat('#',80)) + +map_parts(local_views(Ms)) do model + cell_ids = get_cell_node_ids(model) + cell_coords = get_cell_coordinates(model) + display(reshape(cell_ids,length(cell_ids))) + display(reshape(cell_coords,length(cell_coords))) +end; +println(repeat('-',80)) + +cell_gids = get_cell_gids(Mm) +vertex_gids = get_face_gids(Mm,0) +edge_gids = get_face_gids(Mm,1) + +println(">>> Cell gids") +map_parts(cell_gids.partition) do p + println(transpose(p.lid_to_ohid)) +end; +println(repeat('-',80)) + +println(">>> Vertex gids") +map_parts(vertex_gids.partition) do p + println(transpose(p.lid_to_ohid)) +end; +println(repeat('-',80)) + +println(">>> Edge gids") +map_parts(edge_gids.partition) do p + println(transpose(p.lid_to_ohid)) +end; + +println(repeat('#',80)) + +map_parts(local_views(Phs)) do Ph + display(Ph.patch_cell_dofs_ids) +end; + +map_parts(local_views(Phm)) do Ph + display(Ph.patch_cell_dofs_ids) +end; + +println(repeat('#',80)) + +for key in keys(res_single) + val_s = res_single[key] + val_m = res_multi[key] + + println(">>> ", key) + map_parts(val_s.values) do v + println(transpose(v)) + end; + map_parts(val_m.owned_values) do v + println(transpose(v)) + end; + println(repeat('-',80)) +end + +end \ No newline at end of file diff --git a/test/seq/DistributedPatchFESpacesTests.jl b/test/seq/DistributedPatchFESpacesTests.jl new file mode 100644 index 00000000..18dc37c3 --- /dev/null +++ b/test/seq/DistributedPatchFESpacesTests.jl @@ -0,0 +1,130 @@ +module DistributedPatchFESpacesTests + +using LinearAlgebra +using Test +using PartitionedArrays +using Gridap +using Gridap.Helpers +using Gridap.Geometry +using Gridap.ReferenceFEs +using GridapDistributed +using FillArrays + +using GridapSolvers +import GridapSolvers.PatchBasedSmoothers as PBS + +backend = SequentialBackend() +ranks = (1,2) +parts = get_part_ids(backend,ranks) + +domain = (0.0,1.0,0.0,1.0) +partition = (2,4) +model = CartesianDiscreteModel(parts,domain,partition) + +# order = 1 +# reffe = ReferenceFE(lagrangian,Float64,order) +order = 0 +reffe = ReferenceFE(raviart_thomas,Float64,order) +Vh = TestFESpace(model,reffe) +PD = PBS.PatchDecomposition(model) +Ph = PBS.PatchFESpace(model,reffe,DivConformity(),PD,Vh) + +# ---- Testing Prolongation and Injection ---- # + +w, w_sums = PBS.compute_weight_operators(Ph,Vh); + +xP = PVector(1.0,Ph.gids) +yP = PVector(0.0,Ph.gids) +x = PVector(1.0,Vh.gids) +y = PVector(0.0,Vh.gids) + +PBS.prolongate!(yP,Ph,x) +PBS.inject!(y,Ph,yP,w,w_sums) +@test x ≈ y + +PBS.inject!(x,Ph,xP,w,w_sums) +PBS.prolongate!(yP,Ph,x) +@test xP ≈ yP + + +# ---- Assemble systems ---- # + +Ω = Triangulation(model) +dΩ = Measure(Ω,2*order+1) +a(u,v) = ∫(v⋅u)*dΩ +l(v) = ∫(1*v)*dΩ + +assembler = SparseMatrixAssembler(Vh,Vh) +Ah = assemble_matrix(a,assembler,Vh,Vh) +fh = assemble_vector(l,assembler,Vh) + +sol_h = solve(LUSolver(),Ah,fh) + +Ωₚ = Triangulation(PD) +dΩₚ = Measure(Ωₚ,2*order+1) +ap(u,v) = ∫(v⋅u)*dΩₚ +lp(v) = ∫(1*v)*dΩₚ + +assembler_P = SparseMatrixAssembler(Ph,Ph) +Ahp = assemble_matrix(ap,assembler_P,Ph,Ph) +fhp = assemble_vector(lp,assembler_P,Ph) + + +# ---- Define solvers ---- # + +LU = LUSolver() +LUss = symbolic_setup(LU,Ahp) +LUns = numerical_setup(LUss,Ahp) + +M = PBS.PatchBasedLinearSolver(ap,Ph,Vh,LU) +Mss = symbolic_setup(M,Ah) +Mns = numerical_setup(Mss,Ah) + +R = RichardsonSmoother(M,10,1.0/3.0) +Rss = symbolic_setup(R,Ah) +Rns = numerical_setup(Rss,Ah) + + +# ---- Manual solve using LU ---- # + +x1_mat = PVector(0.5,Ah.cols) +r1_mat = fh-Ah*x1_mat +exchange!(r1_mat) + +r1 = PVector(0.0,Vh.gids) +x1 = PVector(0.0,Vh.gids) +rp = PVector(0.0,Ph.gids) +xp = PVector(0.0,Ph.gids) +rp_mat = PVector(0.0,Ahp.cols) +xp_mat = PVector(0.0,Ahp.cols) + +copy!(r1,r1_mat) +exchange!(r1) +PBS.prolongate!(rp,Ph,r1) + +copy!(rp_mat,rp) +solve!(xp_mat,LUns,rp_mat) +copy!(xp,xp_mat) + +w, w_sums = PBS.compute_weight_operators(Ph,Vh); +PBS.inject!(x1,Ph,xp,w,w_sums) +copy!(x1_mat,x1) + + +# ---- Same using the PatchBasedSmoother ---- # + +x2_mat = PVector(0.5,Ah.cols) +r2_mat = fh-Ah*x2_mat +exchange!(r2_mat) +solve!(x2_mat,Mns,r2_mat) + + +# ---- Smoother inside Richardson + +x3_mat = PVector(0.5,Ah.cols) +r3_mat = fh-Ah*x3_mat +exchange!(r3_mat) +solve!(x3_mat,Rns,r3_mat) +exchange!(x3_mat) + +end \ No newline at end of file diff --git a/test/seq/PatchLinearSolverTests.jl b/test/seq/PatchLinearSolverTests.jl index 9e38869f..5df48824 100644 --- a/test/seq/PatchLinearSolverTests.jl +++ b/test/seq/PatchLinearSolverTests.jl @@ -10,13 +10,13 @@ module PatchLinearSolverTests using GridapSolvers using GridapSolvers.PatchBasedSmoothers - const order=1 + order=1 - function returns_PD_Ph_xh_Vh(model) + function returns_PD_Ph_xh_Vh(model;style=GridapSolvers.PatchBasedSmoothers.PatchBoundaryExclude()) reffe = ReferenceFE(lagrangian,Float64,order) # reffe=ReferenceFE(lagrangian,VectorValue{2,Float64},order) @santiagobadia: For Vector Laplacian Vh = TestFESpace(model,reffe) - PD = PatchDecomposition(model) + PD = PatchDecomposition(model;patch_boundary_style=style) Ph = PatchFESpace(model,reffe,H1Conformity(),PD,Vh) assembler = SparseMatrixAssembler(Ph,Ph) Ωₚ = Triangulation(PD) @@ -51,7 +51,7 @@ module PatchLinearSolverTests dΩₚ = Measure(Ωₚ,2*order+1) a(u,v) = ∫(∇(v)⋅∇(u))*dΩₚ # α =1,0; a(u,v)=∫(v⋅u)dΩ+∫(α*∇(v)⊙∇(u))dΩ # @santiagobadia: For vector Laplacian - M = PatchBasedLinearSolver(a,Ph,LUSolver()) + M = PatchBasedLinearSolver(a,Ph,Vh,LUSolver()) s = RichardsonSmoother(M,10,1.0/3.0) x = GridapSolvers.PatchBasedSmoothers._allocate_col_vector(A) r = b-A*x @@ -82,7 +82,7 @@ module PatchLinearSolverTests A,b = compute_matrix_vector(model,Vh) x = test_smoother(PD,Ph,Vh,A,b) - parts = get_part_ids(sequential,(1,1)) + parts = get_part_ids(SequentialBackend(),(1,1)) dmodel = CartesianDiscreteModel(parts,domain,partition) dPD,dPh,dxh,dVh = returns_PD_Ph_xh_Vh(dmodel); dA,db = compute_matrix_vector(dmodel,dVh);