From a34f8a985bdfb7a645310a0a70e9122b9c2c28bc Mon Sep 17 00:00:00 2001 From: Dave Grote Date: Mon, 11 May 2020 17:05:42 -0700 Subject: [PATCH 1/9] Added CheckGriddingForRZSpectral --- Python/pywarpx/_libwarpx.py | 2 + Source/Python/WarpXWrappers.cpp | 5 +++ Source/Python/WarpXWrappers.h | 2 + Source/Utils/WarpXUtil.H | 5 +++ Source/Utils/WarpXUtil.cpp | 77 +++++++++++++++++++++++++++++++++ Source/main.cpp | 2 + 6 files changed, 93 insertions(+) diff --git a/Python/pywarpx/_libwarpx.py b/Python/pywarpx/_libwarpx.py index 7fb85e261b8..de2a8b83456 100755 --- a/Python/pywarpx/_libwarpx.py +++ b/Python/pywarpx/_libwarpx.py @@ -248,6 +248,8 @@ def initialize(argv=None): argv = sys.argv amrex_init(argv) libwarpx.warpx_ConvertLabParamsToBoost() + if geometry_dim == 'rz': + libwarpx.warpx_CheckGriddingForRZSpectral() libwarpx.warpx_init() diff --git a/Source/Python/WarpXWrappers.cpp b/Source/Python/WarpXWrappers.cpp index e87a20a7a1b..495e07e54a8 100644 --- a/Source/Python/WarpXWrappers.cpp +++ b/Source/Python/WarpXWrappers.cpp @@ -213,6 +213,11 @@ extern "C" ConvertLabParamsToBoost(); } + void warpx_CheckGriddingForRZSpectral() + { + CheckGriddingForRZSpectral(); + } + amrex::Real warpx_getProbLo(int dir) { WarpX& warpx = WarpX::GetInstance(); diff --git a/Source/Python/WarpXWrappers.h b/Source/Python/WarpXWrappers.h index 4a649a56412..bf1b305f466 100644 --- a/Source/Python/WarpXWrappers.h +++ b/Source/Python/WarpXWrappers.h @@ -78,6 +78,8 @@ extern "C" { void warpx_ConvertLabParamsToBoost(); + void warpx_CheckGriddingForRZSpectral(); + amrex::Real warpx_getProbLo(int dir); amrex::Real warpx_getProbHi(int dir); diff --git a/Source/Utils/WarpXUtil.H b/Source/Utils/WarpXUtil.H index 6cdc71a0c76..7cfdcbb65ad 100644 --- a/Source/Utils/WarpXUtil.H +++ b/Source/Utils/WarpXUtil.H @@ -24,6 +24,11 @@ void ReadBoostedFrameParameters(amrex::Real& gamma_boost, amrex::Real& beta_boos void ConvertLabParamsToBoost(); +/** + * \brief Ensures that the blocks are setup correctly for the RZ spectral solver + */ +void CheckGriddingForRZSpectral(); + void NullifyMF(amrex::MultiFab& mf, int lev, amrex::Real zmin, amrex::Real zmax); diff --git a/Source/Utils/WarpXUtil.cpp b/Source/Utils/WarpXUtil.cpp index e4080187a7e..4059fc0446e 100644 --- a/Source/Utils/WarpXUtil.cpp +++ b/Source/Utils/WarpXUtil.cpp @@ -213,6 +213,83 @@ WarpXParser makeParser (std::string const& parse_function, std::vector= the number of processors. + */ +void CheckGriddingForRZSpectral () +{ +#if (defined WARPX_DIM_RZ) && (defined WARPX_USE_PSATD) + + int max_level; + Vector n_cell(AMREX_SPACEDIM, -1); + + ParmParse pp_amr("amr"); + + pp_amr.get("max_level",max_level); + pp_amr.getarr("n_cell",n_cell,0,AMREX_SPACEDIM); + + Vector blocking_factor_x(max_level+1); + Vector max_grid_size_x(max_level+1); + + // Set the radial block size to be equal to the radial grid size. + blocking_factor_x[0] = n_cell[0]; + max_grid_size_x[0] = n_cell[0]; + + for (int lev=1 ; lev <= max_level ; lev++) { + blocking_factor_x[lev] = blocking_factor_x[lev-1]*refRatio(lev-1); + max_grid_size_x[lev] = max_grid_size_x[lev-1]*refRatio(lev-1); + } + + pp_amr.addarr("blocking_factor_x", blocking_factor_x); + pp_amr.addarr("max_grid_size_x", max_grid_size_x); + + // Adjust the longitudinal block sizes, making sure that there are + // more blocks than processors. + // The factor of 8 is there to make some room for higher order + // shape factors and filtering. + int nprocs = ParallelDescriptor::NProcs(); + AMREX_ALWAYS_ASSERT_WITH_MESSAGE(n_cell[1] >= 8*nprocs, + "With RZ spectral, there must be at least two z-cells per processor so that there can be at least one block per processor."); + + // Get the longitudinal blocking factor in case it was set by the user. + // If not set, use the default value of 8. + Vector bf; + pp_amr.queryarr("blocking_factor",bf); + pp_amr.queryarr("blocking_factor_y",bf); + bf.resize(std::max(static_cast(bf.size()),1),8); + + // Make sure that the blocking factor is small enough so + // that there will be at least as many blocks as there + // are processors. Because of the ASSERT above, bf will + // never be less than 8. + while (n_cell[1] < nprocs*bf[0]) { + bf[0] /= 2; + } + pp_amr.addarr("blocking_factor_y", bf); + + // Get the longitudinal max grid size in case it was set by the user. + // If not set, use the default value of 128. + Vector mg; + pp_amr.queryarr("max_grid_size",mg); + pp_amr.queryarr("max_grid_size_y",mg); + mg.resize(std::max(static_cast(mg.size()),1),128); + + // Make sure that the max grid size (of the finest level) is small + // enough so that there will be at least as many blocks as there + // are processors. + while (n_cell[1] < nprocs*mg[0]) { + mg[0] /= 2; + } + pp_amr.addarr("max_grid_size_y", mg); + +#endif +} + namespace WarpXUtilMsg{ void AlwaysAssert(bool is_expression_true, const std::string& msg = "ERROR!") diff --git a/Source/main.cpp b/Source/main.cpp index 56e84dd9a88..ee0db69604e 100644 --- a/Source/main.cpp +++ b/Source/main.cpp @@ -36,6 +36,8 @@ int main(int argc, char* argv[]) ConvertLabParamsToBoost(); + CheckGriddingForRZSpectral(); + WARPX_PROFILE_VAR("main()", pmain); const Real strt_total = amrex::second(); From def5990c56120172bb4469811ccbffd1a135947d Mon Sep 17 00:00:00 2001 From: Dave Grote Date: Thu, 4 Jun 2020 10:17:14 -0700 Subject: [PATCH 2/9] Bug fix in check gridding for RZ Spectral, fixing handling of refinement ratio --- Source/Utils/WarpXUtil.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/Source/Utils/WarpXUtil.cpp b/Source/Utils/WarpXUtil.cpp index 4059fc0446e..79840b5d805 100644 --- a/Source/Utils/WarpXUtil.cpp +++ b/Source/Utils/WarpXUtil.cpp @@ -241,8 +241,11 @@ void CheckGriddingForRZSpectral () max_grid_size_x[0] = n_cell[0]; for (int lev=1 ; lev <= max_level ; lev++) { - blocking_factor_x[lev] = blocking_factor_x[lev-1]*refRatio(lev-1); - max_grid_size_x[lev] = max_grid_size_x[lev-1]*refRatio(lev-1); + // For this to be correct, this needs to read in any user specified refinement ratios. + // But since that is messy and unlikely to be needed anytime soon, the ratio is + // fixed to 2 which will be the most likely value. + blocking_factor_x[lev] = blocking_factor_x[lev-1]*2; // refRatio(lev-1); + max_grid_size_x[lev] = max_grid_size_x[lev-1]*2; // refRatio(lev-1); } pp_amr.addarr("blocking_factor_x", blocking_factor_x); From 61a8a386c54809e8f8dadea05531a2aee0c4b8ff Mon Sep 17 00:00:00 2001 From: Revathi Jambunathan <41089244+RevathiJambunathan@users.noreply.github.com> Date: Fri, 5 Jun 2020 09:56:13 -0700 Subject: [PATCH 3/9] Change numprocs=2 for python reg tests and reset benchmarks (#1075) * changing nprocs to 2 for Python_PlasmaAcceleration test * Resetting benchmark for Python_PlasmaAcceleration test case due to change in nprocs=2 in reg test * resetting benchmarks for Python test-cases except Python_Langmuir due to change in numprocs in WarpX-tests.ini * changing numprocs=2 for Python tests * resetting python reg test benchmarks after updating pywarpx Co-authored-by: Revathi Jambunathan Co-authored-by: Tools Co-authored-by: Tools --- .../benchmarks_json/Python_Langmuir_2d.json | 4 +-- .../Python_Langmuir_rz_multimode.json | 36 +++++++++---------- .../Python_LaserAccelerationMR.json | 6 ++-- .../Python_PlasmaAcceleration.json | 4 +-- .../Python_PlasmaAccelerationMR.json | 4 +-- .../benchmarks_json/Python_gaussian_beam.json | 16 ++++----- Regression/WarpX-tests.ini | 12 +++---- 7 files changed, 41 insertions(+), 41 deletions(-) diff --git a/Regression/Checksum/benchmarks_json/Python_Langmuir_2d.json b/Regression/Checksum/benchmarks_json/Python_Langmuir_2d.json index 63faafc595d..bcb47cc19fe 100644 --- a/Regression/Checksum/benchmarks_json/Python_Langmuir_2d.json +++ b/Regression/Checksum/benchmarks_json/Python_Langmuir_2d.json @@ -1,8 +1,8 @@ { "electrons": { "particle_Ex": 219449752302051.72, - "particle_cpu": 0.0, - "particle_id": 52170752.0, + "particle_cpu": 4096.0, + "particle_id": 17567744.0, "particle_momentum_x": 1.0449418710231497e-19, "particle_position_x": 0.0831205701527011, "particle_position_y": 0.08192, diff --git a/Regression/Checksum/benchmarks_json/Python_Langmuir_rz_multimode.json b/Regression/Checksum/benchmarks_json/Python_Langmuir_rz_multimode.json index 87c162029a4..4ce2699ec0e 100644 --- a/Regression/Checksum/benchmarks_json/Python_Langmuir_rz_multimode.json +++ b/Regression/Checksum/benchmarks_json/Python_Langmuir_rz_multimode.json @@ -1,27 +1,27 @@ { "electrons": { - "particle_Bx": 47.22388871895397, - "particle_By": 81.29252077272655, - "particle_Bz": 44.82583319084903, - "particle_Ex": 32416882559812.15, - "particle_Ey": 36617948944833.266, - "particle_Ez": 47879188522350.445, - "particle_cpu": 0.0, - "particle_id": 315662642560.0, - "particle_momentum_x": 2.1038307987217165e-20, - "particle_momentum_y": 2.3745155589395773e-20, - "particle_momentum_z": 3.10244451025094e-20, + "particle_Bx": 47.223888719248514, + "particle_By": 81.29252077224889, + "particle_Bz": 44.825833191000626, + "particle_Ex": 32416882559812.16, + "particle_Ey": 36617948944833.2, + "particle_Ez": 47879188522350.67, + "particle_cpu": 353280.0, + "particle_id": 158538771840.0, + "particle_momentum_x": 2.1038307987216882e-20, + "particle_momentum_y": 2.3745155589395634e-20, + "particle_momentum_z": 3.102444510250924e-20, "particle_position_x": 6.61250119096399, - "particle_position_y": 14.720000000000006, + "particle_position_y": 14.719999999999999, "particle_theta": 1156106.5707136206, "particle_weight": 81147583679.15045 }, "lev=0": { - "By": 2.4960978462547296, - "Ex": 879037550423.2717, - "Ez": 1707660097260.003, - "jx": 210208695619973.06, - "jz": 407196128263308.2, + "By": 2.4960978462233174, + "Ex": 879037550423.2676, + "Ez": 1707660097260.009, + "jx": 210208695620051.66, + "jz": 407196128263304.3, "part_per_cell": 1472000.0 } -} +} \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/Python_LaserAccelerationMR.json b/Regression/Checksum/benchmarks_json/Python_LaserAccelerationMR.json index 9a31307dd4c..c8fea0d0b51 100644 --- a/Regression/Checksum/benchmarks_json/Python_LaserAccelerationMR.json +++ b/Regression/Checksum/benchmarks_json/Python_LaserAccelerationMR.json @@ -1,7 +1,7 @@ { "electrons": { - "particle_cpu": 0.0, - "particle_id": 3166334488540.0, + "particle_cpu": 695224.0, + "particle_id": 1478232160988.0, "particle_momentum_x": 2.3675577751195297e-19, "particle_momentum_y": 1.929061463191686e-21, "particle_momentum_z": 1.8938350321846786e-20, @@ -11,7 +11,7 @@ "particle_weight": 21640883789.062504 }, "lev=0": { - "Bx": 2.0570941426920037, + "Bx": 2.05709414269199, "By": 2449255.0148639255, "Bz": 71239.03584863919, "Ex": 752773924385165.6, diff --git a/Regression/Checksum/benchmarks_json/Python_PlasmaAcceleration.json b/Regression/Checksum/benchmarks_json/Python_PlasmaAcceleration.json index 77404e908bc..1165cc1d3e6 100644 --- a/Regression/Checksum/benchmarks_json/Python_PlasmaAcceleration.json +++ b/Regression/Checksum/benchmarks_json/Python_PlasmaAcceleration.json @@ -26,8 +26,8 @@ "particle_Ex": 0.0, "particle_Ey": 0.0, "particle_Ez": 0.0, - "particle_cpu": 0.0, - "particle_id": 1171797606400.0, + "particle_cpu": 589824.0, + "particle_id": 514063630336.0, "particle_momentum_x": 0.0, "particle_momentum_y": 0.0, "particle_momentum_z": 0.0, diff --git a/Regression/Checksum/benchmarks_json/Python_PlasmaAccelerationMR.json b/Regression/Checksum/benchmarks_json/Python_PlasmaAccelerationMR.json index dd3b8b31f9a..89e3cbdb138 100644 --- a/Regression/Checksum/benchmarks_json/Python_PlasmaAccelerationMR.json +++ b/Regression/Checksum/benchmarks_json/Python_PlasmaAccelerationMR.json @@ -35,8 +35,8 @@ "particle_Ex": 0.0, "particle_Ey": 0.0, "particle_Ez": 0.0, - "particle_cpu": 0.0, - "particle_id": 1371721824993280.0, + "particle_cpu": 8388608.0, + "particle_id": 1231405243432960.0, "particle_momentum_x": 0.0, "particle_momentum_y": 0.0, "particle_momentum_z": 0.0, diff --git a/Regression/Checksum/benchmarks_json/Python_gaussian_beam.json b/Regression/Checksum/benchmarks_json/Python_gaussian_beam.json index 77ef42908ac..fc66d50774b 100644 --- a/Regression/Checksum/benchmarks_json/Python_gaussian_beam.json +++ b/Regression/Checksum/benchmarks_json/Python_gaussian_beam.json @@ -1,8 +1,8 @@ { "electrons": { - "particle_Bx": 0.00022846273710997817, - "particle_By": 0.00021467707180826873, - "particle_Bz": 0.00021026443277436034, + "particle_Bx": 0.0002284627371099781, + "particle_By": 0.00021467707180826862, + "particle_Bz": 0.0002102644327743607, "particle_Ex": 16981277.687556587, "particle_Ey": 17009505.007470798, "particle_Ez": 16939221.994517952, @@ -18,8 +18,8 @@ }, "lev=0": { "Bx": 1.4237125684591286e-05, - "By": 1.5680008343654183e-05, - "Bz": 1.5239786558072315e-05, + "By": 1.568000834365418e-05, + "Bz": 1.523978655807232e-05, "Ex": 261461.57101810834, "Ey": 261273.6604194712, "Ez": 260681.87892799953, @@ -29,9 +29,9 @@ "part_per_cell": 65536.0 }, "protons": { - "particle_Bx": 0.0002269244677352299, - "particle_By": 0.00021360016225691492, - "particle_Bz": 0.00021031904641947416, + "particle_Bx": 0.00022692446773522987, + "particle_By": 0.00021360016225691481, + "particle_Bz": 0.00021031904641947448, "particle_Ex": 16527170.45904002, "particle_Ey": 16538759.196916314, "particle_Ez": 16456286.211242234, diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini index 8026e2b6693..4a1c8c6a885 100644 --- a/Regression/WarpX-tests.ini +++ b/Regression/WarpX-tests.ini @@ -577,7 +577,7 @@ dim = 2 addToCompileString = USE_PYTHON_MAIN=TRUE USE_RZ=TRUE PYINSTALLOPTIONS=--user restartTest = 0 useMPI = 1 -numprocs = 1 +numprocs = 2 useOMP = 1 numthreads = 1 compileTest = 0 @@ -989,7 +989,7 @@ dim = 3 addToCompileString = USE_PYTHON_MAIN=TRUE PYINSTALLOPTIONS=--user restartTest = 0 useMPI = 1 -numprocs = 1 +numprocs = 2 useOMP = 1 numthreads = 1 compileTest = 0 @@ -1024,7 +1024,7 @@ dim = 3 addToCompileString = USE_PYTHON_MAIN=TRUE PYINSTALLOPTIONS=--user restartTest = 0 useMPI = 1 -numprocs = 1 +numprocs = 2 useOMP = 1 numthreads = 1 compileTest = 0 @@ -1043,7 +1043,7 @@ dim = 3 addToCompileString = USE_PYTHON_MAIN=TRUE PYINSTALLOPTIONS=--user restartTest = 0 useMPI = 1 -numprocs = 1 +numprocs = 2 useOMP = 1 numthreads = 1 compileTest = 0 @@ -1130,7 +1130,7 @@ dim = 3 addToCompileString = USE_PYTHON_MAIN=TRUE PYINSTALLOPTIONS=--user restartTest = 0 useMPI = 1 -numprocs = 1 +numprocs = 2 useOMP = 1 numthreads = 1 compileTest = 0 @@ -1149,7 +1149,7 @@ dim = 2 addToCompileString = USE_PYTHON_MAIN=TRUE PYINSTALLOPTIONS=--user restartTest = 0 useMPI = 1 -numprocs = 1 +numprocs = 2 useOMP = 1 numthreads = 1 compileTest = 0 From fa2f8785447bd586694e0f0f524280d1f62cd6ad Mon Sep 17 00:00:00 2001 From: David Grote Date: Fri, 5 Jun 2020 15:17:16 -0700 Subject: [PATCH 4/9] Updates to picmi, mostly related to fields, and add psatd (#1077) --- Python/pywarpx/PSATD.py | 9 +++++++++ Python/pywarpx/WarpX.py | 2 ++ Python/pywarpx/__init__.py | 1 + Python/pywarpx/picmi.py | 33 ++++++++++++++++++++++++++++----- 4 files changed, 40 insertions(+), 5 deletions(-) create mode 100644 Python/pywarpx/PSATD.py diff --git a/Python/pywarpx/PSATD.py b/Python/pywarpx/PSATD.py new file mode 100644 index 00000000000..0cd3038336a --- /dev/null +++ b/Python/pywarpx/PSATD.py @@ -0,0 +1,9 @@ +# Copyright 2016 David Grote +# +# This file is part of WarpX. +# +# License: BSD-3-Clause-LBNL + +from .Bucket import Bucket + +psatd = Bucket('psatd') diff --git a/Python/pywarpx/WarpX.py b/Python/pywarpx/WarpX.py index 12afc4e15c7..ae48bfcf6aa 100644 --- a/Python/pywarpx/WarpX.py +++ b/Python/pywarpx/WarpX.py @@ -15,6 +15,7 @@ from .Lasers import lasers, lasers_list from . import Particles from .Particles import particles, particles_list +from .PSATD import psatd from .Diagnostics import diagnostics @@ -32,6 +33,7 @@ def create_argv_list(self): argv += algo.attrlist() argv += langmuirwave.attrlist() argv += interpolation.attrlist() + argv += psatd.attrlist() # --- Search through species_names and add any predefined particle objects in the list. particles_list_names = [p.instancename for p in particles_list] diff --git a/Python/pywarpx/__init__.py b/Python/pywarpx/__init__.py index 3a44684b8b8..ed2152c1d05 100644 --- a/Python/pywarpx/__init__.py +++ b/Python/pywarpx/__init__.py @@ -12,5 +12,6 @@ from .Langmuirwave import langmuirwave from .Interpolation import interpolation from .Particles import particles, electrons, positrons, protons, newspecies +from .PSATD import psatd from .Lasers import lasers from .Diagnostics import diagnostics diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index 680cd72acd2..6f4d7a4bab9 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -388,7 +388,7 @@ def initialize_inputs(self): pywarpx.warpx.moving_window_dir = 'x' pywarpx.warpx.moving_window_v = self.moving_window_velocity[0]/constants.c # in units of the speed of light if self.moving_window_velocity[1] != 0.: - pywarpx.warpx.moving_window_dir = 'y' + pywarpx.warpx.moving_window_dir = 'z' pywarpx.warpx.moving_window_v = self.moving_window_velocity[1]/constants.c # in units of the speed of light if self.refined_regions: @@ -445,20 +445,40 @@ def initialize_inputs(self): class ElectromagneticSolver(picmistandard.PICMI_ElectromagneticSolver): def init(self, kw): - assert self.method is None or self.method in ['Yee', 'CKC'], Exception("Only 'Yee' and 'CKC' FDTD are supported") + assert self.method is None or self.method in ['Yee', 'CKC', 'PSATD'], Exception("Only 'Yee', 'CKC', and 'PSATD' are supported") self.do_pml = kw.pop('warpx_do_pml', None) self.pml_ncell = kw.pop('warpx_pml_ncell', None) + if self.method == 'PSATD': + self.periodic_single_box_fft = kw.pop('warpx_periodic_single_box_fft', None) + self.fftw_plan_measure = kw.pop('warpx_fftw_plan_measure', None) + self.do_current_correction = kw.pop('warpx_do_current_correction', None) + def initialize_inputs(self): self.grid.initialize_inputs() pywarpx.warpx.do_pml = self.do_pml pywarpx.warpx.pml_ncell = self.pml_ncell + pywarpx.warpx.do_nodal = self.l_nodal + + if self.method == 'PSATD': + pywarpx.psatd.periodic_single_box_fft = self.periodic_single_box_fft + pywarpx.psatd.fftw_plan_measure = self.fftw_plan_measure + pywarpx.psatd.do_current_correction = self.do_current_correction - # --- Same method names are used, though mapped to lower case. - pywarpx.algo.maxwell_fdtd_solver = self.method + if self.stencil_order is not None: + pywarpx.psatd.nox = self.stencil_order[0] + pywarpx.psatd.noy = self.stencil_order[1] + pywarpx.psatd.noz = self.stencil_order[2] + + if self.galilean_velocity is not None: + pywarpx.psatd.v_galilean = np.array(self.galilean_velocity)/constants.c + + else: + # --- Same method names are used, though mapped to lower case. + pywarpx.algo.maxwell_fdtd_solver = self.method if self.cfl is not None: pywarpx.warpx.cfl = self.cfl @@ -530,6 +550,7 @@ def init(self, kw): self.do_dynamic_scheduling = kw.pop('warpx_do_dynamic_scheduling', None) self.load_balance_int = kw.pop('warpx_load_balance_int', None) self.load_balance_with_sfc = kw.pop('warpx_load_balance_with_sfc', None) + self.use_fdtd_nci_corr = kw.pop('warpx_use_fdtd_nci_corr', None) self.inputs_initialized = False self.warpx_initialized = False @@ -546,7 +567,7 @@ def initialize_inputs(self): if self.gamma_boost is not None: pywarpx.warpx.gamma_boost = self.gamma_boost - pywarpx.warpx.boost_direction = None + pywarpx.warpx.boost_direction = 'z' pywarpx.algo.current_deposition = self.current_deposition_algo pywarpx.algo.charge_deposition = self.charge_deposition_algo @@ -560,6 +581,8 @@ def initialize_inputs(self): pywarpx.warpx.load_balance_int = self.load_balance_int pywarpx.warpx.load_balance_with_sfc = self.load_balance_with_sfc + pywarpx.particles.use_fdtd_nci_corr = self.use_fdtd_nci_corr + particle_shape = self.particle_shape for s in self.species: if s.particle_shape is not None: From fdcba52244c6391f3253841823dafe923542d68c Mon Sep 17 00:00:00 2001 From: David Grote Date: Fri, 5 Jun 2020 16:50:18 -0700 Subject: [PATCH 5/9] Fixed bad arguments in PICMI_inputs_laser_acceleration.py (#1074) Note that polarization_angle was changed to 0 since that was the value being used because the polarization_argument was bad. --- .../laser_acceleration/PICMI_inputs_laser_acceleration.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/Examples/Physics_applications/laser_acceleration/PICMI_inputs_laser_acceleration.py b/Examples/Physics_applications/laser_acceleration/PICMI_inputs_laser_acceleration.py index 06b3426f3a8..185a342677d 100644 --- a/Examples/Physics_applications/laser_acceleration/PICMI_inputs_laser_acceleration.py +++ b/Examples/Physics_applications/laser_acceleration/PICMI_inputs_laser_acceleration.py @@ -14,7 +14,7 @@ laser_wavelength = 8e-07 # Wavelength of the laser (in meters) laser_waist = 5e-06 # Waist of the laser (in meters) laser_duration = 15e-15 # Duration of the laser (in seconds) -laser_polarization = np.pi/2. # Polarization angle (in rad) +laser_polarization = 0. # Polarization angle (in rad) laser_injection_loc = 9.e-6 # Position of injection (in meters, along z) laser_focal_distance = 100.e-6 # Focal distance from the injection (in meters) laser_t_peak = 30.e-15 # The time at which the laser reaches its peak @@ -62,7 +62,7 @@ duration = laser_duration, focal_position = [0., 0., laser_focal_distance + laser_injection_loc], centroid_position = [0., 0., laser_injection_loc - constants.c*laser_t_peak], - polarization_angle = laser_polarization, + polarization_direction = [np.cos(laser_polarization), np.sin(laser_polarization), 0.], propagation_direction = [0,0,1], E0 = laser_a0*2.*np.pi*constants.m_e*constants.c**2/(constants.q_e*laser_wavelength)) # Maximum amplitude of the laser field (in V/m) @@ -117,7 +117,6 @@ sim = picmi.Simulation(solver = solver, max_steps = max_steps, verbose = 1, - cfl = 1.0, warpx_current_deposition_algo = 'esirkepov') sim.add_species(electrons, layout=picmi.GriddedLayout(grid=grid, n_macroparticle_per_cell=number_per_cell_each_dim)) From dd7ca92add4de063b51b4f5a18a8e790ab4cf35a Mon Sep 17 00:00:00 2001 From: NeilZaim <49716072+NeilZaim@users.noreply.github.com> Date: Sat, 6 Jun 2020 08:34:48 +0200 Subject: [PATCH 6/9] Fix typo for pml_ncell in parameters doc (#1071) --- Docs/source/running_cpp/parameters.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Docs/source/running_cpp/parameters.rst b/Docs/source/running_cpp/parameters.rst index f43068bc477..1d03dc7a811 100644 --- a/Docs/source/running_cpp/parameters.rst +++ b/Docs/source/running_cpp/parameters.rst @@ -1105,7 +1105,7 @@ Boundary conditions and around the refinement patches. See the section :doc:`../../theory/PML` for more details. -* ``warpx.pml_ncells`` (`int`; default: 10) +* ``warpx.pml_ncell`` (`int`; default: 10) The depth of the PML, in number of cells. * ``warpx.pml_delta`` (`int`; default: 10) From ef9a4ba5e0a47d3114e01ca07a0c4e3eb2873bd7 Mon Sep 17 00:00:00 2001 From: WeiqunZhang Date: Sat, 6 Jun 2020 00:50:10 -0700 Subject: [PATCH 7/9] remove #line wp_parser.l because it causes issues with AMReX make system (#1078) --- Source/Parser/wp_parser.lex.cpp | 52 --------------------------------- 1 file changed, 52 deletions(-) diff --git a/Source/Parser/wp_parser.lex.cpp b/Source/Parser/wp_parser.lex.cpp index 09a1b2db85a..9a417e926f5 100644 --- a/Source/Parser/wp_parser.lex.cpp +++ b/Source/Parser/wp_parser.lex.cpp @@ -521,12 +521,9 @@ int yy_flex_debug = 0; #define YY_MORE_ADJ 0 #define YY_RESTORE_YY_MORE_OFFSET char *yytext; -#line 1 "wp_parser.l" -#line 3 "wp_parser.l" #include "wp_parser_y.h" #include "wp_parser.tab.h" #line 529 "wp_parser.lex.c" -#line 8 "wp_parser.l" /* Tokens NUMBER, SYMBOL, F1, POW, F2, etc. are defined in wp_parser.y. */ /* Types WP_SQRT, WP_SQRT, etc. are defined in wp_parser_y.h. */ /* Used leater to define NUMBER */ @@ -747,7 +744,6 @@ YY_DECL } { -#line 14 "wp_parser.l" #line 754 "wp_parser.lex.c" @@ -808,213 +804,166 @@ YY_DECL goto yy_find_action; case 1: -#line 17 "wp_parser.l" case 2: -#line 18 "wp_parser.l" case 3: -#line 19 "wp_parser.l" case 4: -#line 20 "wp_parser.l" case 5: -#line 21 "wp_parser.l" case 6: -#line 22 "wp_parser.l" case 7: -#line 23 "wp_parser.l" case 8: -#line 24 "wp_parser.l" case 9: -#line 25 "wp_parser.l" case 10: -#line 26 "wp_parser.l" case 11: YY_RULE_SETUP -#line 26 "wp_parser.l" { return yytext[0]; } /* simply pass through */ YY_BREAK /* yylval is union type defined in wp_parser.tab.h that is generated * by bison with wp_parser.y */ case 12: YY_RULE_SETUP -#line 31 "wp_parser.l" { yylval.f1 = WP_SQRT; return F1; } YY_BREAK case 13: YY_RULE_SETUP -#line 32 "wp_parser.l" { yylval.f1 = WP_EXP; return F1; } YY_BREAK case 14: YY_RULE_SETUP -#line 33 "wp_parser.l" { yylval.f1 = WP_LOG; return F1; } YY_BREAK case 15: YY_RULE_SETUP -#line 34 "wp_parser.l" { yylval.f1 = WP_LOG10; return F1; } YY_BREAK case 16: YY_RULE_SETUP -#line 35 "wp_parser.l" { yylval.f1 = WP_SIN; return F1; } YY_BREAK case 17: YY_RULE_SETUP -#line 36 "wp_parser.l" { yylval.f1 = WP_COS; return F1; } YY_BREAK case 18: YY_RULE_SETUP -#line 37 "wp_parser.l" { yylval.f1 = WP_TAN; return F1; } YY_BREAK case 19: YY_RULE_SETUP -#line 38 "wp_parser.l" { yylval.f1 = WP_ASIN; return F1; } YY_BREAK case 20: YY_RULE_SETUP -#line 39 "wp_parser.l" { yylval.f1 = WP_ACOS; return F1; } YY_BREAK case 21: YY_RULE_SETUP -#line 40 "wp_parser.l" { yylval.f1 = WP_ATAN; return F1; } YY_BREAK case 22: YY_RULE_SETUP -#line 41 "wp_parser.l" { yylval.f1 = WP_SINH; return F1; } YY_BREAK case 23: YY_RULE_SETUP -#line 42 "wp_parser.l" { yylval.f1 = WP_COSH; return F1; } YY_BREAK case 24: YY_RULE_SETUP -#line 43 "wp_parser.l" { yylval.f1 = WP_TANH; return F1; } YY_BREAK case 25: YY_RULE_SETUP -#line 44 "wp_parser.l" { yylval.f1 = WP_ABS; return F1; } YY_BREAK case 26: YY_RULE_SETUP -#line 45 "wp_parser.l" { yylval.f1 = WP_ABS; return F1; } YY_BREAK case 27: YY_RULE_SETUP -#line 46 "wp_parser.l" { yylval.f2 = WP_POW; return POW;} YY_BREAK case 28: YY_RULE_SETUP -#line 47 "wp_parser.l" { yylval.f2 = WP_POW; return POW;} YY_BREAK case 29: YY_RULE_SETUP -#line 48 "wp_parser.l" { yylval.f2 = WP_GEQ; return GEQ;} YY_BREAK case 30: YY_RULE_SETUP -#line 49 "wp_parser.l" { yylval.f2 = WP_LEQ; return LEQ;} YY_BREAK case 31: YY_RULE_SETUP -#line 50 "wp_parser.l" { yylval.f2 = WP_EQ; return EQ;} YY_BREAK case 32: YY_RULE_SETUP -#line 51 "wp_parser.l" { yylval.f2 = WP_NEQ; return NEQ;} YY_BREAK case 33: YY_RULE_SETUP -#line 52 "wp_parser.l" { yylval.f2 = WP_AND; return AND;} YY_BREAK case 34: YY_RULE_SETUP -#line 53 "wp_parser.l" { yylval.f2 = WP_OR; return OR;} YY_BREAK case 35: YY_RULE_SETUP -#line 54 "wp_parser.l" { yylval.f2 = WP_POW; return F2; } YY_BREAK case 36: YY_RULE_SETUP -#line 55 "wp_parser.l" { yylval.f2 = WP_HEAVISIDE; return F2; } YY_BREAK case 37: YY_RULE_SETUP -#line 56 "wp_parser.l" { yylval.f2 = WP_MIN; return F2; } YY_BREAK case 38: YY_RULE_SETUP -#line 57 "wp_parser.l" { yylval.f2 = WP_MAX; return F2; } YY_BREAK /* We use SYMBOL to hold variables and constants */ case 39: YY_RULE_SETUP -#line 60 "wp_parser.l" { yylval.s = wp_makesymbol(yytext); return SYMBOL; } YY_BREAK /* Number */ case 40: -#line 64 "wp_parser.l" case 41: YY_RULE_SETUP -#line 64 "wp_parser.l" { yylval.d = atof(yytext); return NUMBER; } YY_BREAK /* Special characters */ case 42: YY_RULE_SETUP -#line 67 "wp_parser.l" YY_BREAK case 43: YY_RULE_SETUP -#line 68 "wp_parser.l" /* ignore white space */ YY_BREAK case 44: /* rule 44 can match eol */ YY_RULE_SETUP -#line 69 "wp_parser.l" /* ignore line continuation */ YY_BREAK case 45: /* rule 45 can match eol */ YY_RULE_SETUP -#line 70 "wp_parser.l" { return EOL; } YY_BREAK /* everything else */ case 46: YY_RULE_SETUP -#line 73 "wp_parser.l" { yyerror("Unknow character %c\n", *yytext); } YY_BREAK case 47: YY_RULE_SETUP -#line 75 "wp_parser.l" YY_FATAL_ERROR( "flex scanner jammed" ); YY_BREAK #line 1021 "wp_parser.lex.c" @@ -2022,6 +1971,5 @@ void yyfree (void * ptr ) #define YYTABLES_NAME "yytables" -#line 75 "wp_parser.l" From 8b7b1404c7c72f6ba33237b1205d6ca3262a509c Mon Sep 17 00:00:00 2001 From: Revathi Jambunathan <41089244+RevathiJambunathan@users.noreply.github.com> Date: Mon, 15 Jun 2020 13:02:01 -0700 Subject: [PATCH 8/9] remove amrex namespace from diag functor cpp files and use amrex:: prefic (#1093) --- .../ComputeDiagFunctors/CellCenterFunctor.cpp | 8 +++----- .../Diagnostics/ComputeDiagFunctors/DivBFunctor.cpp | 4 +--- .../Diagnostics/ComputeDiagFunctors/DivEFunctor.cpp | 13 ++++++------- .../ComputeDiagFunctors/PartPerCellFunctor.cpp | 4 ++-- .../ComputeDiagFunctors/PartPerGridFunctor.cpp | 10 ++++------ 5 files changed, 16 insertions(+), 23 deletions(-) diff --git a/Source/Diagnostics/ComputeDiagFunctors/CellCenterFunctor.cpp b/Source/Diagnostics/ComputeDiagFunctors/CellCenterFunctor.cpp index 3ee86dd7d58..a821861bb00 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/CellCenterFunctor.cpp +++ b/Source/Diagnostics/ComputeDiagFunctors/CellCenterFunctor.cpp @@ -1,8 +1,6 @@ #include "CellCenterFunctor.H" #include "Utils/CoarsenIO.H" -using namespace amrex; - CellCenterFunctor::CellCenterFunctor(amrex::MultiFab const * mf_src, int lev, amrex::IntVect crse_ratio, bool convertRZmodes2cartesian, int ncomp) @@ -21,12 +19,12 @@ CellCenterFunctor::operator()(amrex::MultiFab& mf_dst, int dcomp) const nComp()==1, "The RZ averaging over modes must write into 1 single component"); auto& warpx = WarpX::GetInstance(); - MultiFab mf_dst_stag(m_mf_src->boxArray(), warpx.DistributionMap(m_lev), 1, m_mf_src->nGrowVect()); + amrex::MultiFab mf_dst_stag(m_mf_src->boxArray(), warpx.DistributionMap(m_lev), 1, m_mf_src->nGrowVect()); // Mode 0 - MultiFab::Copy(mf_dst_stag, *m_mf_src, 0, 0, 1, m_mf_src->nGrowVect()); + amrex::MultiFab::Copy(mf_dst_stag, *m_mf_src, 0, 0, 1, m_mf_src->nGrowVect()); for (int ic=1 ; ic < m_mf_src->nComp() ; ic += 2) { // All modes > 0 - MultiFab::Add(mf_dst_stag, *m_mf_src, ic, 0, 1, m_mf_src->nGrowVect()); + amrex::MultiFab::Add(mf_dst_stag, *m_mf_src, ic, 0, 1, m_mf_src->nGrowVect()); } CoarsenIO::Coarsen( mf_dst, mf_dst_stag, dcomp, 0, nComp(), 0, m_crse_ratio); } else { diff --git a/Source/Diagnostics/ComputeDiagFunctors/DivBFunctor.cpp b/Source/Diagnostics/ComputeDiagFunctors/DivBFunctor.cpp index b0e4c48e8f9..3b2889b83d1 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/DivBFunctor.cpp +++ b/Source/Diagnostics/ComputeDiagFunctors/DivBFunctor.cpp @@ -1,8 +1,6 @@ #include "DivBFunctor.H" #include "Utils/CoarsenIO.H" -using namespace amrex; - DivBFunctor::DivBFunctor(const std::array arr_mf_src, const int lev, const amrex::IntVect crse_ratio, const int ncomp) : ComputeDiagFunctor(ncomp, crse_ratio), m_arr_mf_src(arr_mf_src), m_lev(lev) {} @@ -17,7 +15,7 @@ DivBFunctor::operator()(amrex::MultiFab& mf_dst, int dcomp) const constexpr int ng = 1; // A cell-centered divB multifab spanning the entire domain is generated // and divB is computed on the cell-center, with ng=1. - MultiFab divB( warpx.boxArray(m_lev), warpx.DistributionMap(m_lev), 1, ng ); + amrex::MultiFab divB( warpx.boxArray(m_lev), warpx.DistributionMap(m_lev), 1, ng ); warpx.ComputeDivB(divB, 0, m_arr_mf_src, WarpX::CellSize(m_lev) ); // Coarsen and Interpolate from divB to coarsened/reduced_domain mf_dst CoarsenIO::Coarsen( mf_dst, divB, dcomp, 0, nComp(), 0, m_crse_ratio); diff --git a/Source/Diagnostics/ComputeDiagFunctors/DivEFunctor.cpp b/Source/Diagnostics/ComputeDiagFunctors/DivEFunctor.cpp index 418a1fa868b..adf78b3291a 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/DivEFunctor.cpp +++ b/Source/Diagnostics/ComputeDiagFunctors/DivEFunctor.cpp @@ -1,8 +1,6 @@ #include "DivEFunctor.H" #include "Utils/CoarsenIO.H" -using namespace amrex; - DivEFunctor::DivEFunctor(const std::array arr_mf_src, const int lev, const amrex::IntVect crse_ratio, bool convertRZmodes2cartesian, const int ncomp) @@ -20,8 +18,9 @@ DivEFunctor::operator()(amrex::MultiFab& mf_dst, const int dcomp) const constexpr int ng = 1; // For staggered and nodal calculations, divE is computed on the nodes. // The temporary divE MultiFab is generated to comply with the location of divE. - const BoxArray& ba = amrex::convert(warpx.boxArray(m_lev),IntVect::TheUnitVector()); - MultiFab divE(ba, warpx.DistributionMap(m_lev), 2*warpx.n_rz_azimuthal_modes-1, ng ); + const amrex::BoxArray& ba = amrex::convert(warpx.boxArray(m_lev), + amrex::IntVect::TheUnitVector()); + amrex::MultiFab divE(ba, warpx.DistributionMap(m_lev), 2*warpx.n_rz_azimuthal_modes-1, ng); warpx.ComputeDivE(divE, m_lev); #ifdef WARPX_DIM_RZ @@ -31,12 +30,12 @@ DivEFunctor::operator()(amrex::MultiFab& mf_dst, const int dcomp) const AMREX_ALWAYS_ASSERT_WITH_MESSAGE( nComp()==1, "The RZ averaging over modes must write into 1 single component"); - MultiFab mf_dst_stag(divE.boxArray(), warpx.DistributionMap(m_lev), 1, divE.nGrowVect()); + amrex::MultiFab mf_dst_stag(divE.boxArray(), warpx.DistributionMap(m_lev), 1, divE.nGrowVect()); // Mode 0 - MultiFab::Copy(mf_dst_stag, divE, 0, 0, 1, divE.nGrowVect()); + amrex::MultiFab::Copy(mf_dst_stag, divE, 0, 0, 1, divE.nGrowVect()); for (int ic=1 ; ic < divE.nComp() ; ic += 2) { // Real part of all modes > 0 - MultiFab::Add(mf_dst_stag, divE, ic, 0, 1, divE.nGrowVect()); + amrex::MultiFab::Add(mf_dst_stag, divE, ic, 0, 1, divE.nGrowVect()); } CoarsenIO::Coarsen( mf_dst, mf_dst_stag, dcomp, 0, nComp(), 0, m_crse_ratio); } else { diff --git a/Source/Diagnostics/ComputeDiagFunctors/PartPerCellFunctor.cpp b/Source/Diagnostics/ComputeDiagFunctors/PartPerCellFunctor.cpp index ce41cc64ead..232a2348f2c 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/PartPerCellFunctor.cpp +++ b/Source/Diagnostics/ComputeDiagFunctors/PartPerCellFunctor.cpp @@ -2,7 +2,7 @@ #include "WarpX.H" #include "Utils/CoarsenIO.H" -using namespace amrex; +using namespace amrex::literals; PartPerCellFunctor::PartPerCellFunctor(const amrex::MultiFab* mf_src, const int lev, amrex::IntVect crse_ratio, const int ncomp) : ComputeDiagFunctor(ncomp, crse_ratio), m_lev(lev) @@ -22,7 +22,7 @@ PartPerCellFunctor::operator()(amrex::MultiFab& mf_dst, const int dcomp) const // the operations performend in the CoarsenAndInterpolate function. constexpr int ng = 1; // Temporary cell-centered, single-component MultiFab for storing particles per cell. - MultiFab ppc_mf(warpx.boxArray(m_lev), warpx.DistributionMap(m_lev), 1, ng); + amrex::MultiFab ppc_mf(warpx.boxArray(m_lev), warpx.DistributionMap(m_lev), 1, ng); // Set value to 0, and increment the value in each cell with ppc. ppc_mf.setVal(0._rt); // Compute ppc which includes a summation over all species. diff --git a/Source/Diagnostics/ComputeDiagFunctors/PartPerGridFunctor.cpp b/Source/Diagnostics/ComputeDiagFunctors/PartPerGridFunctor.cpp index 0dc3003d119..b61748a18e6 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/PartPerGridFunctor.cpp +++ b/Source/Diagnostics/ComputeDiagFunctors/PartPerGridFunctor.cpp @@ -1,8 +1,6 @@ #include "PartPerGridFunctor.H" #include "Utils/CoarsenIO.H" -using namespace amrex; - PartPerGridFunctor::PartPerGridFunctor(const amrex::MultiFab * const mf_src, const int lev, const amrex::IntVect crse_ratio, const int ncomp) : ComputeDiagFunctor(ncomp, crse_ratio), m_lev(lev) { @@ -16,19 +14,19 @@ void PartPerGridFunctor::operator()(amrex::MultiFab& mf_dst, const int dcomp) const { auto& warpx = WarpX::GetInstance(); - const Vector& npart_in_grid = warpx.GetPartContainer().NumberOfParticlesInGrid(m_lev); + const amrex::Vector& npart_in_grid = warpx.GetPartContainer().NumberOfParticlesInGrid(m_lev); // Guard cell is set to 1 for generality. However, for a cell-centered // output Multifab, mf_dst, the guard-cell data is not needed especially considering // the operations performend in the CoarsenAndInterpolate function. constexpr int ng = 1; // Temporary MultiFab containing number of particles per grid. // (stored as constant for all cells in each grid) - MultiFab ppg_mf(warpx.boxArray(m_lev), warpx.DistributionMap(m_lev), 1, ng); + amrex::MultiFab ppg_mf(warpx.boxArray(m_lev), warpx.DistributionMap(m_lev), 1, ng); #ifdef _OPENMP #pragma omp parallel #endif - for (MFIter mfi(ppg_mf); mfi.isValid(); ++mfi) { - ppg_mf[mfi].setVal(static_cast(npart_in_grid[mfi.index()])); + for (amrex::MFIter mfi(ppg_mf); mfi.isValid(); ++mfi) { + ppg_mf[mfi].setVal(static_cast(npart_in_grid[mfi.index()])); } // Coarsen and interpolate from ppg_mf to the output diagnostic MultiFab, mf_dst. From b4e003e96f57ce5bf88308f0778396887eca5b81 Mon Sep 17 00:00:00 2001 From: Dave Grote Date: Mon, 15 Jun 2020 15:25:22 -0700 Subject: [PATCH 9/9] For RZ spectral, updated documentation regarding blocking factor and max grid size --- Docs/source/running_cpp/parallelization.rst | 7 +++++++ Source/Utils/WarpXUtil.cpp | 16 ++++++++-------- 2 files changed, 15 insertions(+), 8 deletions(-) diff --git a/Docs/source/running_cpp/parallelization.rst b/Docs/source/running_cpp/parallelization.rst index 86d86ffc764..603276e32be 100644 --- a/Docs/source/running_cpp/parallelization.rst +++ b/Docs/source/running_cpp/parallelization.rst @@ -66,3 +66,10 @@ simulation, it can be cumbersome to calculate the number of cells and the physical size of the computational domain for a given resolution. This :download:`Python script<../../../Tools/DevUtils/compute_domain.py>` does it automatically. + +When using the RZ spectral solver, the values of ``amr.max_grid_size`` and ``amr.blocking_factor`` are constrained since the solver +requires that the full radial extent be within a each block. +For the radial values, any input is ignored and the max grid size and blocking factor are both set equal to the number of radial cells. +For the longitudinal values, the blocking factor has a mminimum size of 8, allowing the computational domain of each block to be large enough relative to the guard cells for reasonable performance, but the max grid size and blocking factor must also be small enough so that there will be at least one block per processor. +If max grid size and/or blocking factor are too large, they will be silently reduced as needed. +If there are too many processors so that there is not enough blocks for the number processors, WarpX will abort. diff --git a/Source/Utils/WarpXUtil.cpp b/Source/Utils/WarpXUtil.cpp index 79840b5d805..7f57b9966f2 100644 --- a/Source/Utils/WarpXUtil.cpp +++ b/Source/Utils/WarpXUtil.cpp @@ -248,6 +248,7 @@ void CheckGriddingForRZSpectral () max_grid_size_x[lev] = max_grid_size_x[lev-1]*2; // refRatio(lev-1); } + // Note that any user input values for these parameters are discarded. pp_amr.addarr("blocking_factor_x", blocking_factor_x); pp_amr.addarr("max_grid_size_x", max_grid_size_x); @@ -257,7 +258,7 @@ void CheckGriddingForRZSpectral () // shape factors and filtering. int nprocs = ParallelDescriptor::NProcs(); AMREX_ALWAYS_ASSERT_WITH_MESSAGE(n_cell[1] >= 8*nprocs, - "With RZ spectral, there must be at least two z-cells per processor so that there can be at least one block per processor."); + "With RZ spectral, there must be at least eight z-cells per processor so that there can be at least one block per processor."); // Get the longitudinal blocking factor in case it was set by the user. // If not set, use the default value of 8. @@ -266,10 +267,9 @@ void CheckGriddingForRZSpectral () pp_amr.queryarr("blocking_factor_y",bf); bf.resize(std::max(static_cast(bf.size()),1),8); - // Make sure that the blocking factor is small enough so - // that there will be at least as many blocks as there - // are processors. Because of the ASSERT above, bf will - // never be less than 8. + // Modify the default or any user input, making sure that the blocking factor + // is small enough so that there will be at least as many blocks as there are + // processors. Because of the ASSERT above, bf will never be less than 8. while (n_cell[1] < nprocs*bf[0]) { bf[0] /= 2; } @@ -282,9 +282,9 @@ void CheckGriddingForRZSpectral () pp_amr.queryarr("max_grid_size_y",mg); mg.resize(std::max(static_cast(mg.size()),1),128); - // Make sure that the max grid size (of the finest level) is small - // enough so that there will be at least as many blocks as there - // are processors. + // Modify the default or any user input, making sure that the max grid size + // (of the coarsest level) is small enough so that there will be at least + // as many blocks as there are processors. while (n_cell[1] < nprocs*mg[0]) { mg[0] /= 2; }