Skip to content

Commit

Permalink
Merge pull request #1 from ComputationalRadiationPhysics/dev
Browse files Browse the repository at this point in the history
Dev
  • Loading branch information
Anton-Le authored Sep 23, 2020
2 parents 93a6e0e + f3e2846 commit 0280e5c
Show file tree
Hide file tree
Showing 74 changed files with 518 additions and 808 deletions.
3 changes: 2 additions & 1 deletion docs/TBG_macros.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,8 @@ TBG_stopWindow="--stopWindow 1337"
#--<species>_radiation.end Time step to stop calculating the radiation
#--<species>_radiation.radPerGPU If flag is set, each GPU stores its own spectra without summing the entire simulation area
#--<species>_radiation.folderRadPerGPU Folder where the GPU specific spectras are stored
#--e_<species>_radiation.compression If flag is set, the hdf5 output will be compressed.
#--<species>_radiation.compression If flag is set, the hdf5 output will be compressed.
#--<species>_radiation.numJobs Number of independent jobs used for the radiation calculation.
TBG_radiation="--<species>_radiation.period 1 --<species>_radiation.dump 2 --<species>_radiation.totalRadiation \
--<species>_radiation.lastRadiation --<species>_radiation.start 2800 --<species>_radiation.end 3000"

Expand Down
8 changes: 7 additions & 1 deletion docs/source/usage/plugins/radiation.rst
Original file line number Diff line number Diff line change
Expand Up @@ -287,6 +287,11 @@ Command line option Description
``--<species>_radiation.folderRadPerGPU`` Name of the folder, where the GPU specific spectra are stored.
Default: ``radPerGPU``
``--<species>_radiation.compression`` If set, the hdf5 output is compressed.
``--<species>_radiation.numJobs`` Number of independent jobs used for the radiation calculation.
This option is used to increase the utilization of the device by producing more independent work.
This option enables accumulation of data in parallel into multiple temporary arrays, thereby increasing the utilization of
the device by increasing the memory footprint
Default: ``2``
========================================= ==============================================================================================================================

Memory Complexity
Expand All @@ -295,7 +300,8 @@ Memory Complexity
Accelerator
"""""""""""

each energy bin times each coordinate bin allocates one counter (``float_X``) permanently and on each accelerator.
locally, ``numJobs`` times number of frequencies ``N_omega`` times number of directions ``N_theta`` is permanently allocated.
Each result element (amplitude) is a double precision complex number.

Host
""""
Expand Down
2 changes: 1 addition & 1 deletion etc/picongpu/aris-grnet/gpu.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ if [ -f !TBG_dstPath/input/bin/cuda_memtest ] && [ !TBG_numHostedGPUPerNode -eq
# Run CUDA memtest to check GPU's health
srun -n !TBG_tasks !TBG_dstPath/input/bin/cuda_memtest.sh
else
echo "no binary 'cuda_memtest' available or compute node is not exclusively allocated, skip GPU memory test" >&2
echo "Note: GPU memory test was skipped as no binary 'cuda_memtest' available or compute node is not exclusively allocated. This does not affect PIConGPU, starting it now" >&2
fi

if [ $? -eq 0 ] ; then
Expand Down
2 changes: 1 addition & 1 deletion etc/picongpu/bash/mpiexec.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ export OMPI_MCA_io=^ompio
if [ -f !TBG_dstPath/input/bin/cuda_memtest ] ; then
mpiexec -am !TBG_dstPath/tbg/openib.conf --mca mpi_leave_pinned 0 -npernode !TBG_gpusPerNode -n !TBG_tasks !TBG_dstPath/input/bin/cuda_memtest.sh
else
echo "no binary 'cuda_memtest' available, skip GPU memory test" >&2
echo Note: GPU memory test was skipped as no binary 'cuda_memtest' available. This does not affect PIConGPU, starting it now" >&2
fi
if [ $? -eq 0 ] ; then
Expand Down
2 changes: 1 addition & 1 deletion etc/picongpu/bash/mpirun.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ export OMPI_MCA_io=^ompio
if [ -f !TBG_dstPath/input/bin/cuda_memtest ] ; then
mpirun -am !TBG_dstPath/tbg/openib.conf --mca mpi_leave_pinned 0 -npernode !TBG_gpusPerNode -n !TBG_tasks !TBG_dstPath/input/bin/cuda_memtest.sh
else
echo "no binary 'cuda_memtest' available, skip GPU memory test" >&2
echo "Note: GPU memory test was skipped as no binary 'cuda_memtest' available. This does not affect PIConGPU, starting it now" >&2
fi

if [ $? -eq 0 ] ; then
Expand Down
2 changes: 1 addition & 1 deletion etc/picongpu/davide-cineca/gpu.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ if [ -f !TBG_dstPath/input/bin/cuda_memtest ] && [ !TBG_numHostedGPUPerNode -eq
# Run CUDA memtest to check GPU's health
srun --cpu-bind=sockets !TBG_dstPath/input/bin/cuda_memtest.sh
else
echo "no binary 'cuda_memtest' available or compute node is not exclusively allocated, skip GPU memory test" >&2
echo "Note: GPU memory test was skipped as no binary 'cuda_memtest' available or compute node is not exclusively allocated. This does not affect PIConGPU, starting it now" >&2
fi

if [ $? -eq 0 ] ; then
Expand Down
2 changes: 1 addition & 1 deletion etc/picongpu/davinci-rice/picongpu.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ export OMPI_MCA_io=^ompio
if [ -f !TBG_dstPath/input/bin/cuda_memtest ] ; then
mpirun -n TBG_tasks --display-map -am tbg/openib.conf --mca mpi_leave_pinned 0 !TBG_dstPath/input/bin/cuda_memtest.sh
else
echo "no binary 'cuda_memtest' available, skip GPU memory test" >&2
echo "Note: GPU memory test was skipped as no binary 'cuda_memtest' available. This does not affect PIConGPU, starting it now" >&2
fi
if [ $? -eq 0 ] ; then
Expand Down
2 changes: 1 addition & 1 deletion etc/picongpu/hemera-hzdr/fwkt_v100.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ if [ -f !TBG_dstPath/input/bin/cuda_memtest ] && [ !TBG_numHostedGPUPerNode -eq
# Run CUDA memtest to check GPU's health
mpiexec !TBG_dstPath/input/bin/cuda_memtest.sh
else
echo "no binary 'cuda_memtest' available or compute node is not exclusively allocated, skip GPU memory test" >&2
echo "Note: GPU memory test was skipped as no binary 'cuda_memtest' available or compute node is not exclusively allocated. This does not affect PIConGPU, starting it now" >&2
fi

if [ $? -eq 0 ] ; then
Expand Down
2 changes: 1 addition & 1 deletion etc/picongpu/hemera-hzdr/gpu.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ if [ -f !TBG_dstPath/input/bin/cuda_memtest ] && [ !TBG_numHostedGPUPerNode -eq
# Run CUDA memtest to check GPU's health
mpiexec !TBG_dstPath/input/bin/cuda_memtest.sh
else
echo "no binary 'cuda_memtest' available or compute node is not exclusively allocated, skip GPU memory test" >&2
echo "Note: GPU memory test was skipped as no binary 'cuda_memtest' available or compute node is not exclusively allocated. This does not affect PIConGPU, starting it now" >&2
fi

if [ $? -eq 0 ] ; then
Expand Down
2 changes: 1 addition & 1 deletion etc/picongpu/hemera-hzdr/k20.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ if [ -f !TBG_dstPath/input/bin/cuda_memtest ] && [ !TBG_numHostedGPUPerNode -eq
# Run CUDA memtest to check GPU's health
mpiexec !TBG_dstPath/input/bin/cuda_memtest.sh
else
echo "no binary 'cuda_memtest' available or compute node is not exclusively allocated, skip GPU memory test" >&2
echo "Note: GPU memory test was skipped as no binary 'cuda_memtest' available or compute node is not exclusively allocated. This does not affect PIConGPU, starting it now" >&2
fi

if [ $? -eq 0 ] ; then
Expand Down
2 changes: 1 addition & 1 deletion etc/picongpu/hemera-hzdr/k20_restart.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ export OMPI_MCA_io=^ompio
if [ -f !TBG_dstPath/input/bin/cuda_memtest ] && [ !TBG_numHostedGPUPerNode -eq !TBG_gpusPerNode ] ; then
mpiexec !TBG_dstPath/input/bin/cuda_memtest.sh
else
echo "no binary 'cuda_memtest' available or compute node is not exclusively allocated, skip GPU memory test" >&2
echo "Note: GPU memory test was skipped as no binary 'cuda_memtest' available or compute node is not exclusively allocated. This does not affect PIConGPU, starting it now" >&2
fi

if [ $? -eq 0 ] ; then
Expand Down
2 changes: 1 addition & 1 deletion etc/picongpu/hemera-hzdr/k80.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ if [ -f !TBG_dstPath/input/bin/cuda_memtest ] && [ !TBG_numHostedGPUPerNode -eq
# Run CUDA memtest to check GPU's health
mpiexec !TBG_dstPath/input/bin/cuda_memtest.sh
else
echo "no binary 'cuda_memtest' available or compute node is not exclusively allocated, skip GPU memory test" >&2
echo "Note: GPU memory test was skipped as no binary 'cuda_memtest' available or compute node is not exclusively allocated. This does not affect PIConGPU, starting it now" >&2
fi

if [ $? -eq 0 ] ; then
Expand Down
2 changes: 1 addition & 1 deletion etc/picongpu/hemera-hzdr/k80_restart.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ export OMPI_MCA_io=^ompio
if [ -f !TBG_dstPath/input/bin/cuda_memtest ] && [ !TBG_numHostedGPUPerNode -eq !TBG_gpusPerNode ] ; then
mpiexec !TBG_dstPath/input/bin/cuda_memtest.sh
else
echo "no binary 'cuda_memtest' available or compute node is not exclusively allocated, skip GPU memory test" >&2
echo "Note: GPU memory test was skipped as no binary 'cuda_memtest' available or compute node is not exclusively allocated. This does not affect PIConGPU, starting it now" >&2
fi

if [ $? -eq 0 ] ; then
Expand Down
2 changes: 1 addition & 1 deletion etc/picongpu/jureca-jsc/gpus.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ if [ -f !TBG_dstPath/input/bin/cuda_memtest ] && [ !TBG_numHostedDevicesPerNode
# Run CUDA memtest to check GPU's health
srun --cpu_bind=sockets !TBG_dstPath/input/bin/cuda_memtest.sh
else
echo "no binary 'cuda_memtest' available or compute node is not exclusively allocated, skip GPU memory test" >&2
echo "Note: GPU memory test was skipped as no binary 'cuda_memtest' available or compute node is not exclusively allocated. This does not affect PIConGPU, starting it now" >&2
fi

if [ $? -eq 0 ] ; then
Expand Down
2 changes: 1 addition & 1 deletion etc/picongpu/juwels-jsc/gpus.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ if [ -f !TBG_dstPath/input/bin/cuda_memtest ] && [ !TBG_numHostedDevicesPerNode
# Run CUDA memtest to check GPU's health
srun --cpu_bind=sockets !TBG_dstPath/input/bin/cuda_memtest.sh
else
echo "no binary 'cuda_memtest' available or compute node is not exclusively allocated, skip GPU memory test" >&2
echo "Note: GPU memory test was skipped as no binary 'cuda_memtest' available or compute node is not exclusively allocated. This does not affect PIConGPU, starting it now" >&2
fi

if [ $? -eq 0 ] ; then
Expand Down
2 changes: 1 addition & 1 deletion etc/picongpu/lawrencium-lbnl/fermi.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ if [ -f !TBG_dstPath/input/bin/cuda_memtest ] ; then
# Run CUDA memtest to check GPU's health
mpirun !TBG_dstPath/input/bin/cuda_memtest.sh
else
echo "no binary 'cuda_memtest' available, skip GPU memory test" >&2
echo "Note: GPU memory test was skipped as no binary 'cuda_memtest' available. This does not affect PIConGPU, starting it now" >&2
fi

if [ $? -eq 0 ] ; then
Expand Down
2 changes: 1 addition & 1 deletion etc/picongpu/lawrencium-lbnl/k20.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ if [ -f !TBG_dstPath/input/bin/cuda_memtest ] ; then
# Run CUDA memtest to check GPU's health
mpirun !TBG_dstPath/input/bin/cuda_memtest.sh
else
echo "no binary 'cuda_memtest' available, skip GPU memory test" >&2
echo "Note: GPU memory test was skipped as no binary 'cuda_memtest' available. This does not affect PIConGPU, starting it now" >&2
fi

if [ $? -eq 0 ] ; then
Expand Down
2 changes: 1 addition & 1 deletion etc/picongpu/pizdaint-cscs/large.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ ln -s ../stdout output
if [ -f !TBG_dstPath/input/bin/cuda_memtest ] ; then
srun -n !TBG_tasks !TBG_dstPath/input/bin/cuda_memtest.sh
else
echo "no binary 'cuda_memtest' available, skip GPU memory test" >&2
echo "Note: GPU memory test was skipped as no binary 'cuda_memtest' available. This does not affect PIConGPU, starting it now" >&2
fi

if [ $? -eq 0 ] ; then
Expand Down
2 changes: 1 addition & 1 deletion etc/picongpu/pizdaint-cscs/normal.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ export PMI_NO_PREINITIALIZE=1
if [ -f !TBG_dstPath/input/bin/cuda_memtest ] ; then
srun -n !TBG_tasks !TBG_dstPath/input/bin/cuda_memtest.sh
else
echo "no binary 'cuda_memtest' available, skip GPU memory test" >&2
echo "Note: GPU memory test was skipped as no binary 'cuda_memtest' available. This does not affect PIConGPU, starting it now" >&2
fi

if [ $? -eq 0 ] ; then
Expand Down
2 changes: 1 addition & 1 deletion etc/picongpu/taurus-tud/V100.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ if [ -f !TBG_dstPath/input/bin/cuda_memtest ] ; then
# Run CUDA memtest to check GPU's health
srun -K1 !TBG_dstPath/input/bin/cuda_memtest.sh
else
echo "no binary 'cuda_memtest' available, skip GPU memory test" >&2
echo "Note: GPU memory test was skipped as no binary 'cuda_memtest' available. This does not affect PIConGPU, starting it now" >&2
fi

if [ $? -eq 0 ] ; then
Expand Down
2 changes: 1 addition & 1 deletion etc/picongpu/taurus-tud/V100_restart.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ if [ -f !TBG_dstPath/input/bin/cuda_memtest ] ; then
# Run CUDA memtest to check GPU's health
mpiexec -hostfile ../machinefile.txt !TBG_dstPath/input/bin/cuda_memtest.sh
else
echo "no binary 'cuda_memtest' available, skip GPU memory test" >&2
echo "Note: GPU memory test was skipped as no binary 'cuda_memtest' available. This does not affect PIConGPU, starting it now" >&2
fi

if [ $? -eq 0 ] ; then
Expand Down
2 changes: 1 addition & 1 deletion etc/picongpu/taurus-tud/k20x.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ if [ -f !TBG_dstPath/input/bin/cuda_memtest ] ; then
# Run CUDA memtest to check GPU's health
srun -K1 !TBG_dstPath/input/bin/cuda_memtest.sh
else
echo "no binary 'cuda_memtest' available, skip GPU memory test" >&2
echo "Note: GPU memory test was skipped as no binary 'cuda_memtest' available. This does not affect PIConGPU, starting it now" >&2
fi

if [ $? -eq 0 ] ; then
Expand Down
2 changes: 1 addition & 1 deletion etc/picongpu/taurus-tud/k80.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ if [ -f !TBG_dstPath/input/bin/cuda_memtest ] ; then
# Run CUDA memtest to check GPU's health
srun -K1 !TBG_dstPath/input/bin/cuda_memtest.sh
else
echo "no binary 'cuda_memtest' available, skip GPU memory test" >&2
echo "Note: GPU memory test was skipped as no binary 'cuda_memtest' available. This does not affect PIConGPU, starting it now" >&2
fi

if [ $? -eq 0 ] ; then
Expand Down
2 changes: 1 addition & 1 deletion include/picongpu/_defaultParam.loader
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
#pragma once

#include "picongpu/param/dimension.param"
#if( PMACC_CUDA_ENABLED == 1 )
#if(BOOST_LANG_CUDA || BOOST_COMP_HIP)
# include "picongpu/param/mallocMC.param"
#endif
#include "picongpu/param/memory.param"
Expand Down
3 changes: 1 addition & 2 deletions include/picongpu/fields/EMFieldBase.tpp
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@
#include <pmacc/mappings/kernel/ExchangeMapping.hpp>
#include <pmacc/math/Vector.hpp>
#include <pmacc/memory/buffers/GridBuffer.hpp>
#include <pmacc/memory/MakeUnique.hpp>
#include <pmacc/particles/traits/FilterByFlag.hpp>

#include <boost/mpl/accumulate.hpp>
Expand All @@ -58,7 +57,7 @@ namespace fields
SimulationFieldHelper< MappingDesc >( cellDescription ),
id( id )
{
buffer = pmacc::memory::makeUnique< Buffer >(
buffer = std::make_unique< Buffer >(
cellDescription.getGridLayout( )
);

Expand Down
3 changes: 1 addition & 2 deletions include/picongpu/fields/FieldJ.tpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@
#include <pmacc/mappings/kernel/AreaMapping.hpp>
#include <pmacc/fields/tasks/FieldFactory.hpp>
#include <pmacc/math/Vector.hpp>
#include <pmacc/memory/MakeUnique.hpp>
#include <pmacc/fields/operations/CopyGuardToExchange.hpp>
#include <pmacc/fields/operations/AddExchangeToBorder.hpp>
#include <pmacc/traits/Resolve.hpp>
Expand Down Expand Up @@ -127,7 +126,7 @@ FieldJ::FieldJ( MappingDesc const & cellDescription ) :
if( originRecvGuard != DataSpace<simDim>::create(0) ||
endRecvGuard != DataSpace<simDim>::create(0) )
{
fieldJrecv = pmacc::memory::makeUnique< GridBuffer<ValueType, simDim > >(
fieldJrecv = std::make_unique< GridBuffer<ValueType, simDim > >(
buffer.getDeviceBuffer(),
cellDescription.getGridLayout( )
);
Expand Down
5 changes: 2 additions & 3 deletions include/picongpu/fields/FieldTmp.tpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@
#include "picongpu/particles/traits/GetInterpolation.hpp"

#include <pmacc/memory/buffers/GridBuffer.hpp>
#include <pmacc/memory/MakeUnique.hpp>
#include <pmacc/mappings/simulation/GridController.hpp>
#include <pmacc/dataManagement/DataConnector.hpp>
#include <pmacc/mappings/kernel/AreaMapping.hpp>
Expand Down Expand Up @@ -66,10 +65,10 @@ namespace picongpu
m_commTagGather = pmacc::traits::getNextId( ) + SPECIES_FIRSTTAG;

using Buffer = GridBuffer< ValueType, simDim >;
fieldTmp = memory::makeUnique< Buffer >( cellDescription.getGridLayout( ) );
fieldTmp = std::make_unique< Buffer >( cellDescription.getGridLayout( ) );

if( fieldTmpSupportGatherCommunication )
fieldTmpRecv = memory::makeUnique< Buffer >(
fieldTmpRecv = std::make_unique< Buffer >(
fieldTmp->getDeviceBuffer(),
cellDescription.getGridLayout( )
);
Expand Down
3 changes: 2 additions & 1 deletion include/picongpu/fields/MaxwellSolver/YeePML/YeePML.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -223,7 +223,8 @@ namespace maxwellSolver
Thickness globalThickness;
for( uint32_t axis = 0u; axis < simDim; axis++ )
for( auto direction = 0; direction < 2; direction++ )
globalThickness( axis, direction ) = absorber::numCells[ axis ][ direction ];
globalThickness( axis, direction ) =
absorber::getGlobalThickness()( axis, direction );
return globalThickness;
}

Expand Down
Loading

0 comments on commit 0280e5c

Please sign in to comment.