From 573ef6b0d80a50af235522d3ab9777ba8b4d9610 Mon Sep 17 00:00:00 2001 From: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> Date: Fri, 18 Feb 2022 11:07:08 -0800 Subject: [PATCH] Add `PHistDiag` for scraping (#153) * refactor of surface flux diagnostic handling before implementing PHistDiag * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * initial commit of `ParticleHistDiag` * updated changelog and version number * added plotting functionality specifically for ZPlane assemblies * save histogram binning details to file as well * code cleanup * changes requested during PR review * add comment about 2d plotting * remove debugging print statement * further code changes from PR review * Fix typo Co-authored-by: Peter Scherpelz <31747262+peterscherpelz@users.noreply.github.com> * fix another typo Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Peter Scherpelz <31747262+peterscherpelz@users.noreply.github.com> --- mewarpx/changelog.csv | 21 ++ mewarpx/mewarpx/_version.py | 2 +- mewarpx/mewarpx/assemblies.py | 307 ++++++++++-------- mewarpx/mewarpx/diags.py | 1 + .../mewarpx/diags_store/flux_diagnostic.py | 87 ++++- .../particle_histogram_diagnostic.py | 271 ++++++++++++++++ mewarpx/mewarpx/mwxrun.py | 7 + mewarpx/tests/test_assemblies.py | 17 +- .../test_files/histograms/Anode_electrons.npy | Bin 0 -> 368 bytes mewarpx/tests/test_phist_diagnostic.py | 78 +++++ 10 files changed, 633 insertions(+), 158 deletions(-) create mode 100644 mewarpx/mewarpx/diags_store/particle_histogram_diagnostic.py create mode 100644 mewarpx/tests/test_files/histograms/Anode_electrons.npy create mode 100644 mewarpx/tests/test_phist_diagnostic.py diff --git a/mewarpx/changelog.csv b/mewarpx/changelog.csv index 6c151e982d1..d87eaf6551a 100644 --- a/mewarpx/changelog.csv +++ b/mewarpx/changelog.csv @@ -1,4 +1,25 @@ Version, Physics version, Date, List of changes +7.1.0, 2, 2/18/2022, " + +**Features**: + +- Added + :class:`mewarpx.diags_store.particle_histogram_diagnostic.ParticleHistDiag` + to track positions of where particles are scraped. + +**Other changes**: + +- Merge ``upstream/development`` (git hash ``8e1517a``) into memaster. This + includes an update of the AMReX version which fixed a bug in restarts when + the cumulative particle count over the simulation exceeds 2^31. +- Refactored the :class:`mewarpx.assemblies.Assembly` and + :class:`mewarpx.diags_store.flux_diagnostic.SurfaceFluxDiag` with regards to + how scraped particles are processed. A ``scraped_particle_array`` is now + created to hold information about scraped particles for each assembly. This + array is then processed by flux diagnostics to obtain the absorbed particle + and energy fluxes. + +" 7.0.1, 2, 2/8/2022, " **Other changes**: diff --git a/mewarpx/mewarpx/_version.py b/mewarpx/mewarpx/_version.py index dd0616da3f5..c06eca3491c 100644 --- a/mewarpx/mewarpx/_version.py +++ b/mewarpx/mewarpx/_version.py @@ -1,6 +1,6 @@ # One and only one place to store the version info # https://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package -__version_info__ = (7, 0, 1) +__version_info__ = (7, 1, 0) __version__ = '.'.join([str(x) for x in __version_info__]) # One and only one place to store the Physics version diff --git a/mewarpx/mewarpx/assemblies.py b/mewarpx/mewarpx/assemblies.py index b4f9b4dd5bd..6bc89d8df59 100644 --- a/mewarpx/mewarpx/assemblies.py +++ b/mewarpx/mewarpx/assemblies.py @@ -5,10 +5,9 @@ import logging import numpy as np -from pywarpx import picmi +from pywarpx import callbacks, picmi from mewarpx.mwxrun import mwxrun -from mewarpx.utils_store import parallel_util from mewarpx.utils_store.appendablearray import AppendableArray # Get module-level logger @@ -30,10 +29,7 @@ class Assembly(object): # the CSV file. It can be overridden by child classes, but is not currently # adjustable by the user. - # IF CHANGING THIS, CHANGE IN self.record_scrapedparticles() AS WELL. - fields = ['t', 'step', 'species_id', 'V_e', 'n', 'q', 'E_total'] - - def __init__(self, V, T, WF, name): + def __init__(self, V, T, WF, name, read_scraped_particles): """Basic initialization. Arguments: @@ -46,6 +42,21 @@ def __init__(self, V, T, WF, name): self.T = T self.WF = WF self.name = name + self.read_scraped_particles = read_scraped_particles + + # list of diagnostic functions to call after collecting scraped + # particles + self.scraper_diag_fnlist = [] + # the step scraped will be recorded by default + self.scraped_particle_attribs_list = ["step_scraped"] + # if needed will be an appendable array to store scraped particle + # properties for easy processing with diagnostic functions + self.scraped_particle_array = None + + # currently the beforeEsolve callback is the most immediate after + # scraping callback + if self.read_scraped_particles: + callbacks.installbeforeEsolve(self._run_scraper_diag_steps) def check_geom(self): """Throw an error if the simulation geometry is unsupported by the @@ -97,56 +108,69 @@ def _install_in_simulation(self): potential=f'"{self.V}"' ) - def init_scrapedparticles(self, fieldlist): - """Set up the scraped particles array. Call before - append_scrapedparticles. + def add_diag_fn(self, diag_fn, attrib_list): + """Add a diagnostic function to the diagnostic function list. Arguments: - fieldlist (list): List of string titles for the fields. Order is - important; it must match the order for future particle appends - that are made. - """ - self._scrapedparticles_fields = fieldlist - self._scrapedparticles_data = AppendableArray( - typecode='d', unitshape=[len(fieldlist)]) - - def record_scrapedparticles(self): - """Handles transforming raw particle information from the WarpX - scraped particle buffer to the information used to record particles as - a function of time. - - Note: - Assumes the fixed form of fields given in Assembly(). Doesn't - check since this is called many times. - - Note: - The total charge scraped and energy of the particles scraped - is multiplied by -1 since these quantities are leaving the system. + diag_fn (function): Function to be run after + _read_scraped_particles() that processes the scraped particle + dictionary for a specific diagnostic purpose. + attrib_list: (list of strings): Particle attributes that should + be included in the scraped particle dictionary for the given + diagnostic function. """ + if not callable(diag_fn): + raise ValueError("Must provide a callable diagnostic function.") + + self.scraper_diag_fnlist.append(diag_fn) + + new_attribs = False + for attrib in attrib_list: + if attrib not in self.scraped_particle_attribs_list: + self.scraped_particle_attribs_list.append(attrib) + new_attribs = True + + # re-initialize the scraped_particle_array so that it has space for + # the new particle attributes + if new_attribs: + if (self.scraped_particle_array is not None + and self.scraped_particle_array._datalen != 0 + ): + raise RuntimeError( + f"The scraped particle array of {self.name} is not empty; " + "cannot add new particle attribute to track." + ) + # note the +1 in unitshape is to save species ID + self.scraped_particle_array = AppendableArray( + typecode='d', + unitshape=[len(self.scraped_particle_attribs_list)+1] + ) - # skip conductors that don't have a label to get scraped particles with - if not hasattr(self, 'scraper_label'): - logger.warning(f"Assembly {self.name} doesn't have a scraper label") + def _run_scraper_diag_steps(self): + """Perform the steps involved in diagnostics of scraped particles.""" + # return early if no particle attributes will be recorded + if self.scraped_particle_array is None: return + if not hasattr(self, 'scraper_label'): + raise AttributeError( + f"Cannot record particles scraped on {self.name} since it " + "does not have a scraper label." + ) + + # accumulate the scraped particle data + self._read_scraped_particles() + # construct dictionary of scraped particle data + lpdict = self._get_scraped_particles_dict() + # run all diagnostic functions + for func in self.scraper_diag_fnlist: + func(lpdict) + + def _read_scraped_particles(self): + """Function to read the scraped particle buffer and populate the + scraped_particle_array.""" # loop over species and get the scraped particle data from the buffer for species in mwxrun.simulation.species: - data = np.zeros(7) - data[0] = mwxrun.get_t() - data[1] = mwxrun.get_it() - data[2] = species.species_number - data[3] = self.getvoltage_e() - - # When pre-seeding a simulation with plasma we inject particles over - # embedded boundaries which causes the first scraping step to - # show very large currents. For this reason we skip that first step - # but note that we inject after the first step so we need to skip - # scraping step 2. - if data[1] == 2: - self.append_scrapedparticles(data) - continue - - empty = True idx_list = [] # get the number of particles in the buffer - this is primarily @@ -157,104 +181,95 @@ def record_scrapedparticles(self): ) # logger.info(f"{self.name} scraped {buffer_count} {species.name}") - if buffer_count > 0: - # get the timesteps at which particles were scraped - comp_steps = mwxrun.sim_ext.get_particle_boundary_buffer( - species.name, self.scraper_label, "step_scraped", mwxrun.lev - ) - # get the particles that were scraped in this timestep - for arr in comp_steps: - idx_list.append(np.where(arr == mwxrun.get_it())[0]) - if len(idx_list[-1]) != 0: - empty = False - - # sort the particles appropriately if this is an eb - if not empty and self.scraper_label == 'eb': - temp_idx_list = [] - structs = mwxrun.sim_ext.get_particle_boundary_buffer_structs( - species.name, self.scraper_label, mwxrun.lev - ) + # if there are no particles in the buffer return early + if buffer_count == 0: + return - if mwxrun.geom_str == 'XZ': - xpos = [struct['x'] for struct in structs] - ypos = [struct['y'] for struct in structs] - zpos = [struct['y'] for struct in structs] - elif mwxrun.geom_str == 'XYZ': - xpos = [struct['x'] for struct in structs] - ypos = [struct['y'] for struct in structs] - zpos = [struct['z'] for struct in structs] - elif mwxrun.geom_str == 'RZ': - xpos = [struct['x'] for struct in structs] - ypos = [np.zeros(len(struct['y'])) for struct in structs] - zpos = [struct['y'] for struct in structs] - else: - raise NotImplementedError( - f"Scraping not implemented for {mwxrun.geom_str}." - ) + # get the timesteps at which particles were scraped + comp_steps = mwxrun.sim_ext.get_particle_boundary_buffer( + species.name, self.scraper_label, "step_scraped", mwxrun.lev + ) + # get the particles that were scraped in this timestep + for arr in comp_steps: + idx_list.append(np.where(arr == mwxrun.get_it())[0]) - for i in range(len(idx_list)): - is_inside = self.isinside( - xpos[i][idx_list[i]], ypos[i][idx_list[i]], - zpos[i][idx_list[i]] - ) - temp_idx_list.append(idx_list[i][np.where(is_inside)]) + raw_particle_data = {} - # set the scraped timestep to -1 for particles in this - # EB so that they are not considered again - comp_steps[i][idx_list[i][np.where(is_inside)]] = -1 + # get the particle structs from the boundary buffer + structs = mwxrun.sim_ext.get_particle_boundary_buffer_structs( + species.name, self.scraper_label, mwxrun.lev + ) + if mwxrun.geom_str == 'XZ' or mwxrun.geom_str == 'RZ': + raw_particle_data['x'] = [struct['x'] for struct in structs] + raw_particle_data['y'] = [ + np.zeros(len(struct['y'])) for struct in structs] + raw_particle_data['z'] = [struct['y'] for struct in structs] + elif mwxrun.geom_str == 'XYZ': + raw_particle_data['x'] = [struct['x'] for struct in structs] + raw_particle_data['y'] = [struct['y'] for struct in structs] + raw_particle_data['z'] = [struct['z'] for struct in structs] + else: + raise NotImplementedError( + f"Scraping not implemented for {mwxrun.geom_str}." + ) - idx_list = temp_idx_list + # sort the particles appropriately if this is an eb + if self.scraper_label == 'eb': + temp_idx_list = [] - if not empty: - data[4] = sum(np.size(idx) for idx in idx_list) - w_arrays = mwxrun.sim_ext.get_particle_boundary_buffer( - species.name, self.scraper_label, "w", mwxrun.lev - ) - data[5] = -species.sq * sum(np.sum(w_arrays[i][idx_list[i]]) - for i in range(len(idx_list)) - ) - E_arrays = mwxrun.sim_ext.get_particle_boundary_buffer( - species.name, self.scraper_label, "E_total", mwxrun.lev - ) - data[6] = -sum(np.sum(E_arrays[i][idx_list[i]]) - for i in range(len(idx_list)) - ) + for i in range(len(idx_list)): + is_inside = self.isinside( + raw_particle_data['x'][i][idx_list[i]], + raw_particle_data['y'][i][idx_list[i]], + raw_particle_data['z'][i][idx_list[i]] + ) + temp_idx_list.append(idx_list[i][np.where(is_inside)]) - self.append_scrapedparticles(data) + # set the scraped timestep to -1 for particles in this + # EB so that they are not considered again + comp_steps[i][idx_list[i][np.where(is_inside)]] = -1 - def append_scrapedparticles(self, data): - """Append one or more lines of scraped particles data. + idx_list = temp_idx_list - Arguments: - data (np.ndarray): Array of shape (m) or (n, m) where m is the - number of fields and n is the number of rows of data to append. - """ - self._scrapedparticles_data.append(data) + for ii, attrib in enumerate(self.scraped_particle_attribs_list): + # get the desired particle property + if attrib not in ['x', 'y', 'z']: + raw_particle_data[attrib] = ( + mwxrun.sim_ext.get_particle_boundary_buffer( + species.name, self.scraper_label, attrib, mwxrun.lev + ) + ) - def get_scrapedparticles(self, clear=False): - """Retrieve a copy of scrapedparticles data. + # loop over all tiles and append the particle data from that tile + # to scraped_particle_data + for ii, idxs in enumerate(idx_list): + if len(idxs) == 0: + continue + data = np.zeros( + (len(idxs), len(self.scraped_particle_attribs_list)+1) + ) + data[:,0] = species.species_number + for jj in range(0, len(self.scraped_particle_attribs_list)): + data[:,jj+1] = raw_particle_data[ + self.scraped_particle_attribs_list[jj]][ii][idxs] + self.scraped_particle_array.append(data) - Arguments: - clear (bool): If True, clear the particle data rows entered (field - names are still initialized as before). Default False. + def _get_scraped_particles_dict(self): + """Retrieve a dictionary containing the scraped particle data. Returns: scrapedparticles_dict (collections.OrderedDict): Keys are the originally passed field strings for lost particles. Values are an (n)-shape numpy array for each field. """ - lpdata = self._scrapedparticles_data.data() - - # Sum all except t/step/jsid/V_e from all processors - lpdata[:,4:] = parallel_util.parallelsum(np.array(lpdata[:,4:])) + lpdata = self.scraped_particle_array.data() lpdict = collections.OrderedDict( - [(fieldname, np.array(lpdata[:, ii], copy=True)) - for ii, fieldname in enumerate(self._scrapedparticles_fields)]) - - if clear: - self._scrapedparticles_data.cleardata() + [(name, np.array(lpdata[:, ii], copy=True)) for ii, name in + enumerate(["species_id"]+self.scraped_particle_attribs_list)] + ) + self.scraped_particle_array.cleardata() return lpdict @@ -263,7 +278,7 @@ class ZPlane(Assembly): """A semi-infinite plane.""" geoms = ['RZ', 'X', 'XZ', 'XYZ'] - def __init__(self, z, zsign, V, T, WF, name): + def __init__(self, z, zsign, V, T, WF, name, read_scraped_particles=True): """Basic initialization. Arguments: @@ -275,7 +290,10 @@ def __init__(self, z, zsign, V, T, WF, name): WF (float): Work function (eV) name (str): Assembly name """ - super(ZPlane, self).__init__(V=V, T=T, WF=WF, name=name) + super(ZPlane, self).__init__( + V=V, T=T, WF=WF, name=name, + read_scraped_particles=read_scraped_particles + ) self.z = z @@ -287,9 +305,10 @@ def __init__(self, z, zsign, V, T, WF, name): class Cathode(ZPlane): """A basic wrapper to define a semi-infinite plane for the cathode.""" - def __init__(self, V, T, WF): + def __init__(self, V, T, WF, read_scraped_particles=True): super(Cathode, self).__init__( - z=0, zsign=-1, V=V, T=T, WF=WF, name="Cathode" + z=0, zsign=-1, V=V, T=T, WF=WF, name="Cathode", + read_scraped_particles=read_scraped_particles ) # set solver boundary potential mwxrun.grid.potential_zmin = self.V @@ -300,9 +319,10 @@ def __init__(self, V, T, WF): class Anode(ZPlane): """A basic wrapper to define a semi-infinite plane for the anode.""" - def __init__(self, z, V, T, WF): + def __init__(self, z, V, T, WF, read_scraped_particles=True): super(Anode, self).__init__( - z=z, zsign=1, V=V, T=T, WF=WF, name="Anode" + z=z, zsign=1, V=V, T=T, WF=WF, name="Anode", + read_scraped_particles=read_scraped_particles ) # set solver boundary potential mwxrun.grid.potential_zmax = self.V @@ -315,7 +335,7 @@ class InfCylinderY(Assembly): geoms = ['XZ', 'XYZ'] def __init__(self, center_x, center_z, radius, V, T, WF, name, - install_in_simulation=True): + install_in_simulation=True, read_scraped_particles=True): """Basic initialization. Arguments: @@ -331,7 +351,10 @@ def __init__(self, center_x, center_z, radius, V, T, WF, name, install_in_simulation (bool): If True and the Assembly is an embedded boundary it will be included in the WarpX simulation """ - super(InfCylinderY, self).__init__(V=V, T=T, WF=WF, name=name) + super(InfCylinderY, self).__init__( + V=V, T=T, WF=WF, name=name, + read_scraped_particles=read_scraped_particles + ) self.center_x = center_x self.center_z = center_z self.radius = radius @@ -395,7 +418,7 @@ class CylinderZ(Assembly): geoms = ['RZ'] def __init__(self, V, T, WF, name, r_outer, r_inner=0.0, zmin=None, - zmax=None): + zmax=None, read_scraped_particles=True): """Basic initialization. Arguments: @@ -410,7 +433,10 @@ def __init__(self, V, T, WF, name, r_outer, r_inner=0.0, zmin=None, zmax (float): Upper z limit of the cylinder (m). Defaults to mwxrun.zmax. """ - super(CylinderZ, self).__init__(V=V, T=T, WF=WF, name=name) + super(CylinderZ, self).__init__( + V=V, T=T, WF=WF, name=name, + read_scraped_particles=read_scraped_particles + ) self.r_inner = r_inner self.r_outer = r_outer self.r_center = (self.r_inner + self.r_outer) / 2.0 @@ -543,7 +569,7 @@ class Rectangle(Assembly): geoms = ['XZ', 'XYZ'] def __init__(self, center_x, center_z, length_x, length_z, V, T, WF, name, - install_in_simulation=True): + install_in_simulation=True, read_scraped_particles=True): """Basic initialization. Arguments: @@ -560,7 +586,10 @@ def __init__(self, center_x, center_z, length_x, length_z, V, T, WF, name, install_in_simulation (bool): If True and the Assembly is an embedded boundary it will be included in the WarpX simulation """ - super(Rectangle, self).__init__(V=V, T=T, WF=WF, name=name) + super(Rectangle, self).__init__( + V=V, T=T, WF=WF, name=name, + read_scraped_particles=read_scraped_particles + ) self.center_x = float(center_x) self.center_z = float(center_z) self.length_x = float(length_x) diff --git a/mewarpx/mewarpx/diags.py b/mewarpx/mewarpx/diags.py index d5bd146abe5..8323a665f91 100644 --- a/mewarpx/mewarpx/diags.py +++ b/mewarpx/mewarpx/diags.py @@ -7,3 +7,4 @@ from mewarpx.diags_store.field_diagnostic import * # noqa from mewarpx.diags_store.flux_diagnostic import * # noqa from mewarpx.diags_store.particle_diagnostic import * # noqa +from mewarpx.diags_store.particle_histogram_diagnostic import * # noqa diff --git a/mewarpx/mewarpx/diags_store/flux_diagnostic.py b/mewarpx/mewarpx/diags_store/flux_diagnostic.py index 107fa022983..cd23b9e9f8f 100644 --- a/mewarpx/mewarpx/diags_store/flux_diagnostic.py +++ b/mewarpx/mewarpx/diags_store/flux_diagnostic.py @@ -13,6 +13,8 @@ from mewarpx.diags_store import diag_base, timeseries from mewarpx.mwxrun import mwxrun +from mewarpx.utils_store import parallel_util +from mewarpx.utils_store.appendablearray import AppendableArray import mewarpx.utils_store.util as mwxutil logger = logging.getLogger(__name__) @@ -170,6 +172,9 @@ class SurfaceFluxDiag(ParticleCSVDiag): This class is used by FluxDiag; rarely used directly. """ + # IF CHANGING THIS, CHANGE IN self._record_particle_flux() AS WELL. + fields = ['t', 'step', 'species_id', 'V_e', 'n', 'q', 'E_total'] + def __init__(self, diag_steps, surface, write_dir, **kwargs): """Initialize surface-specific features. @@ -189,7 +194,6 @@ def __init__(self, diag_steps, surface, write_dir, **kwargs): raise RuntimeError("Cannot attach two SurfaceFluxDiag objects " "to the same surface.") self.surface.scraper_diag = self - self.surface.init_scrapedparticles(self.surface.fields) save_name = self.surface.name + '_scraped.csv' @@ -200,16 +204,81 @@ def __init__(self, diag_steps, surface, write_dir, **kwargs): **kwargs ) - # install callback that will have the assembly object check the - # particle buffer and record the scraped particles data - # TODO change this to only process the particle buffer on a diagnostic - # step instead of after every step - callbacks.installbeforestep( - mwxrun.sim_ext.libwarpx_so.warpx_clearParticleBoundaryBuffer) - callbacks.installbeforeEsolve(self.surface.record_scrapedparticles) + # install diagnostic function to collect scraped particle data + self.surface.add_diag_fn( + diag_fn=self._record_particle_flux, attrib_list=["w", "E_total"] + ) + + # create the flux_dict to track fluxes + self.flux_array = AppendableArray( + typecode='d', unitshape=[len(self.fields)]) + + def _record_particle_flux(self, scraped_particle_dict): + """Function to process the scraped particle dict for the total + particle and energy flux. The scraped particle array is cleared after + every step so it is known that all particles in the + scraped_particle_dict was scraped on this step. + + Arguments: + scraped_particle_dict (dict): Dictionary containing scraped + particle data. + + Note: + The total charge scraped and energy of the particles scraped + is multiplied by -1 since these quantities are leaving the system. + """ + # loop over species and process scraped particle data + for species in mwxrun.simulation.species: + data = np.zeros(7) + data[0] = mwxrun.get_t() + data[1] = mwxrun.get_it() + data[2] = species.species_number + data[3] = self.surface.getvoltage_e() + + # When pre-seeding a simulation with plasma we inject particles over + # embedded boundaries which causes the first scraping step to + # show very large currents. For this reason we skip that first step + # but note that we inject after the first step so we need to skip + # scraping step 2. + if data[1] == 2: + self.flux_array.append(data) + continue + + idx = np.where(scraped_particle_dict["species_id"] == data[2]) + data[4] = np.size(idx) + data[5] = -species.sq * np.sum(scraped_particle_dict["w"][idx]) + data[6] = -np.sum(scraped_particle_dict["E_total"][idx]) + + self.flux_array.append(data) + + def _get_total_particle_flux(self, clear=False): + """Get a dictionary containing the fluxes summed over processors. + + Arguments: + clear (bool): If True, clear the particle data rows entered (field + names are still initialized as before). Default False. + + Returns: + scrapedparticles_dict (collections.OrderedDict): Keys are the + originally passed field strings for lost particles. Values are + an (n)-shape numpy array for each field. + """ + lpdata = self.flux_array.data() + + # Sum all except t/step/jsid/V_e from all processors + lpdata[:,4:] = parallel_util.parallelsum(np.array(lpdata[:,4:])) + + lpdict = collections.OrderedDict( + [(fieldname, np.array(lpdata[:, ii], copy=True)) + for ii, fieldname in enumerate(self.fields)]) + + if clear: + self.flux_array.cleardata() + + return lpdict def _collect_dataframe(self, clear=True): - partdict = self.surface.get_scrapedparticles(clear=clear) + partdict = self._get_total_particle_flux(clear=clear) # Convert species_id & step to int, because they come out as floats. partdict['species_id'] = partdict['species_id'].astype(int) partdict['step'] = partdict['step'].astype(int) diff --git a/mewarpx/mewarpx/diags_store/particle_histogram_diagnostic.py b/mewarpx/mewarpx/diags_store/particle_histogram_diagnostic.py new file mode 100644 index 00000000000..5abf3ad1044 --- /dev/null +++ b/mewarpx/mewarpx/diags_store/particle_histogram_diagnostic.py @@ -0,0 +1,271 @@ +import logging +import os + +import matplotlib.pyplot as plt +import numpy as np +from pywarpx import callbacks + +from mewarpx import assemblies +from mewarpx.diags_store import diag_base +from mewarpx.mwxrun import mwxrun +from mewarpx.utils_store import parallel_util + +logger = logging.getLogger(__name__) + + +class BaseParticleHistDiag(diag_base.WarpXDiagnostic): + + """Base class for diagnostics that record a multi-dimensional histogram of + particle properties. + Child classes should define ``name`` (diagnostic name), ``linres`` (the + resolution in each dimension) and ``domain`` (the boundaries of the binned + region). + """ + + PHIST_DIAG_DIR = "histograms" + + def __init__(self, diag_steps, species_list=None, include_overflow=True, + **kwargs): + """Initialize diagnostic. + + Arguments: + diag_steps (int): Number of steps between each output. + species_list (list of species): Species for which histograms should + be recorded. If None, all simulation species will be included. + include_overflow (bool): If True overflow bins to -inf and +inf + will be added. This is useful for quantities for which definite + ranges are not known (such as velocity). + """ + self.write_dir = os.path.join(self.DIAG_DIR, self.PHIST_DIAG_DIR) + self.species_list = species_list + if self.species_list is None: + self.species_list = mwxrun.simulation.species + self.include_overflow = include_overflow + + super(BaseParticleHistDiag, self).__init__( + diag_steps=diag_steps, **kwargs) + + # Check that weight is not in the quantities list. This is needed to + # get the "sample" dimensions correct since weight is not a + # histogram dimension. + if 'w' in self.quantities: + self.quantities.remove('w') + + # install the scraper diag function + self.assembly.add_diag_fn( + self._process_scraped_particles, self.quantities + ['w'] + ) + + # Create bins and data array after the simulation has been initialized + # since the bin specifications are written to file + callbacks.installafterinit(self.initialize) + + # counter to properly normalize data + self.accumulated_steps = 0 + + def initialize(self): + """Set up the histogram bins and data array.""" + self.setup_bins() + self.setup_array() + + def setup_bins(self): + """Calculate the bin edges.""" + # sanity check + if len(self.domain) != len(self.linres): + raise AttributeError( + "Number of domain ranges should match length of linres array." + ) + + self.bins = [] + for ii in range(len(self.domain)): + if self.linres[ii] <= 2: + self.bins.append([-np.inf, np.inf]) + else: + if self.include_overflow: + bins = ( + [-np.inf] + + list(np.linspace(self.domain[ii][0], + self.domain[ii][1], self.linres[ii]-1)) + + [np.inf] + ) + else: + bins = list( + np.linspace(self.domain[ii][0], self.domain[ii][1], + self.linres[ii]+1) + ) + self.bins.append(bins) + self.bins = np.array(self.bins, dtype=object) + + # save bin details to file + if mwxrun.me == 0: + fname = os.path.join( + self.write_dir, f"{self.assembly.name}_histogram_bins.npy" + ) + np.save(fname, self.bins) + + def setup_array(self): + """Calculate the necessary array size; create the array itself. + """ + shape = ((len(self.species_list),) + tuple(self.linres)) + self.Harray = np.zeros(shape) + + def get_sample(self, scraped_particle_dict, species_id): + """Function to get appropriate "sample" for binning. + + Arguments: + scraped_particle_dict (dict): Dictionary containing scraped + particle properties. + species_id (int): ID of the species for which a sample should be + collected. + """ + idxs = np.where( + scraped_particle_dict['species_id'] == species_id + ) + sample = np.zeros((np.size(idxs), len(self.quantities))) + for ii, key in enumerate(self.quantities): + sample[:,ii] = scraped_particle_dict[key][idxs] + + return sample, scraped_particle_dict['w'][idxs] + + def _process_scraped_particles(self, scraped_particle_dict): + """Function to process the scraped particle data. + + Arguments: + scraped_particle_dict (dict): Dictionary containing scraped + particle data. + """ + # metools.runtools.PHistDiag() implements very general histogram + # functionality. Take a look at that class before implementing new + # functionality since that will likely avoid the need to duplicate work. + + for ii, species in enumerate(self.species_list): + sample, weights = self.get_sample( + scraped_particle_dict, species.species_number + ) + H, _ = np.histogramdd( + sample=sample, bins=self.bins, weights=weights + ) + self.Harray[ii,...] += H + self.accumulated_steps += 1 + + # if this is a diagnostic period save the data + if self.check_timestep(): + self.save_and_reset_histogram() + + def save_and_reset_histogram(self): + """Save and reset the histogram.""" + # sum the histograms from all the processors + self.Harray = parallel_util.parallelsum(self.Harray) + + if mwxrun.me == 0: + self.Harray[:] /= (self.accumulated_steps * mwxrun.dt) + for ii, species in enumerate(self.species_list): + fileprefix = self.get_fileprefix(species.name) + np.save(fileprefix + '.npy', self.Harray[ii,...]) + + if self.plot: + self.plot_histograms() + + self.Harray[:] = 0.0 + self.accumulated_steps = 0 + + def get_fileprefix(self, species): + """Return filepath except for the filetype. + + Arguments: + title (str): String for title; whitespace will become underscores + """ + return os.path.join( + self.write_dir, f"{self.name}_{species}_{mwxrun.get_it():010d}" + ) + + +class ZPlanePHistDiag(BaseParticleHistDiag): + + """Particle histogram diagnostic to track the location particles are + scraped on a ZPlane assembly. + """ + + def __init__(self, diag_steps, assembly, species_list=None, linres_x=30, + linres_y=30, plot=True, **kwargs): + """Initialize diagnostic. + + Arguments: + diag_steps (int): Number of steps between each output. + assembly (mewarpx.Assembly object): The assembly object in which + particles are scraped. + species_list (list of species): Species for which histograms should + be recorded. If None, all simulation species will be included. + linres_x (int): Number of bins to create along the x-dimension. + Default 30. + linres_y (int): Number of bins to create along the y-dimension. + Default 30 unless the simulation is in 2D in which case only + 1 y-bin will be used regardless of this input parameter. + plot (bool): If True plot the histograms at each diagnostic step. + kwargs: See :class:`mewarpx.diags_store.diag_base.WarpXDiagnostic` + for more timing options. + """ + self.assembly = assembly + self.plot = plot + + self.name = self.assembly.name + + # sanity check + if not isinstance(self.assembly, assemblies.ZPlane): + raise AttributeError( + "ZPlanePHistDiag should only be used with a ZPlane assembly." + ) + + # set sensible linres based on simulation geometry - can override + # user given arguments if they don't make sense + if mwxrun.dim == 1: + logger.warn( + "ZPlane particle scraping histograms in 1d are trivial so this " + "diagnostic will not be installed." + ) + return + elif mwxrun.dim == 2: + linres_y = 1 + + # set up domain and number of bins + self.domain = [(mwxrun.xmin, mwxrun.xmax), (mwxrun.ymin, mwxrun.ymax)] + self.linres = [linres_x, linres_y] + + # for now we hard code this diagnostic to only save weight but this + # can be extended in the future + self.quantities = ['x', 'y'] + + super(ZPlanePHistDiag, self).__init__( + diag_steps=diag_steps, species_list=species_list, + include_overflow=False, **kwargs + ) + + def plot_histograms(self): + """Function to plot histogram of scraped particle positions.""" + for ii, species in enumerate(self.species_list): + # skip plotting if no data is present + if np.all(self.Harray[ii,...] == 0): + continue + plt.title(f"{species.name} scraped on {self.assembly.name}") + + if mwxrun.dim == 2: + plt.xlabel("x position (mm)") + plt.ylabel("Current density (A/cm$^2$)") + + dx = (self.bins[0][1] - self.bins[0][0]) + data = ( + self.Harray[ii,:,0] * abs(species.sq) + / (dx * (mwxrun.ymax - mwxrun.ymin)) + ) + plt.bar( + np.array((self.bins[0])[:-1] + dx/2)*1e3, data*1e-4, + width=dx*1e3, alpha=0.8 + ) + else: + # 2d plotting functionality can be found in + # metools.runtools.PHistDiag() if needed in the future. + logger.warn("2D histogram plotting not yet implemented.") + return + + fileprefix = self.get_fileprefix(species.name) + plt.savefig(fileprefix + '.png', dpi=300) diff --git a/mewarpx/mewarpx/mwxrun.py b/mewarpx/mewarpx/mwxrun.py index 0a6ee243567..cc429757c7b 100644 --- a/mewarpx/mewarpx/mwxrun.py +++ b/mewarpx/mewarpx/mwxrun.py @@ -130,6 +130,13 @@ def init_grid(self, lower_bound, upper_bound, number_of_cells, use_rz=False, ) callbacks.installafterinit(self._after_init) + # install a callback to clear the particle boundary buffer before + # every step, all assemblies for which scraping is enabled will + # move particles from the particle boundary buffer to their own + # scraped_particle_array before the E-field solve + callbacks.installbeforestep( + self.sim_ext.libwarpx_so.warpx_clearParticleBoundaryBuffer) + def _set_geom_str(self, use_rz): """Function to set the geometry string and coordinate map appropriately.""" diff --git a/mewarpx/tests/test_assemblies.py b/mewarpx/tests/test_assemblies.py index 044a3f49f4a..a346480646f 100644 --- a/mewarpx/tests/test_assemblies.py +++ b/mewarpx/tests/test_assemblies.py @@ -7,6 +7,7 @@ import numpy as np from mewarpx import assemblies, emission +from mewarpx.diags_store.flux_diagnostic import SurfaceFluxDiag from mewarpx.mwxrun import mwxrun from mewarpx.setups_store import diode_setup from mewarpx.utils_store import testing_util @@ -279,6 +280,9 @@ def test_two_embedded_cylinders_scraping(): npart=4000, plasma_density=1e14, ) + cyl1_flux = SurfaceFluxDiag(run.DIAG_STEPS, cylinder1, "diags/fluxes") + cyl2_flux = SurfaceFluxDiag(run.DIAG_STEPS, cylinder2, "diags/fluxes") + # Initialize the simulation run.init_simulation() run.init_warpx() @@ -291,16 +295,11 @@ def test_two_embedded_cylinders_scraping(): # Check flux on each cylinder # ####################################################################### - cylinder1.init_scrapedparticles(cylinder1.fields) - cylinder1.record_scrapedparticles() - cyl1_scraped = cylinder1.get_scrapedparticles() - - cylinder2.init_scrapedparticles(cylinder2.fields) - cylinder2.record_scrapedparticles() - cyl2_scraped = cylinder2.get_scrapedparticles() + cyl1_scraped = cyl1_flux._collect_dataframe() + cyl2_scraped = cyl2_flux._collect_dataframe() - assert np.allclose(cyl1_scraped['n'], np.array([2, 0])) - assert np.allclose(cyl2_scraped['n'], np.array([1, 2])) + assert np.allclose(np.array(cyl1_scraped['n'][-2:]), np.array([2, 0])) + assert np.allclose(np.array(cyl2_scraped['n'][-2:]), np.array([1, 2])) ####################################################################### # Check rho results against reference data # diff --git a/mewarpx/tests/test_files/histograms/Anode_electrons.npy b/mewarpx/tests/test_files/histograms/Anode_electrons.npy new file mode 100644 index 0000000000000000000000000000000000000000..80703e4f2340b13ec441be11a84f131ffbe1e302 GIT binary patch literal 368 zcmbR27wQ`j$;eQ~P_3SlTAW;@Zl$1ZlV+i=qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= zXCxM+0{I%o2099cnmP)#3giMV9Py{_;2G=~}lL&hz;;IIfhR=6v}|$6e|3 z6P-^znX+=*qaNqn=TaJ{jN6>I{m9(>?tQKEAD;KGx>`$|m(Ag1bv>KrT*IUkRH7W= zJmK_H3-wc;&hmcuFSdO(bGGGbGQVH1?0ol?u?6!|Hs|jw{j3U_KbYM1y=4|&_