diff --git a/.github/workflows/cmake.yml b/.github/workflows/cmake.yml
index bf1671b94f9..f83a8a0ab3a 100644
--- a/.github/workflows/cmake.yml
+++ b/.github/workflows/cmake.yml
@@ -28,7 +28,7 @@ jobs:
sudo apt-spy2 fix --commit
# after selecting a specific mirror, we need to run 'apt-get update'
sudo apt-get update
- sudo apt-get install netcdf-bin libnetcdf-dev doxygen graphviz wget gfortran libjpeg-dev libz-dev openmpi-bin libopenmpi-dev
+ sudo apt-get install netcdf-bin libnetcdf-dev doxygen graphviz wget gfortran libjpeg-dev libz-dev openmpi-bin libopenmpi-dev cmake
- name: cache-pnetcdf
id: cache-pnetcdf
diff --git a/.github/workflows/cmake_netcdf-4.7.4_hdf5-1.10.7_pnetcdf-12.1_ncint_mpich-3.3_asan.yml b/.github/workflows/cmake_netcdf-4.7.4_hdf5-1.10.7_pnetcdf-12.1_ncint_mpich-3.3_asan.yml
new file mode 100644
index 00000000000..d7dda70471c
--- /dev/null
+++ b/.github/workflows/cmake_netcdf-4.7.4_hdf5-1.10.7_pnetcdf-12.1_ncint_mpich-3.3_asan.yml
@@ -0,0 +1,135 @@
+name: cmake_netcdf-4.7.4_hdf5-1.10.7_pnetcdf-12.1_ncint_mpich-3.3
+
+on:
+ push:
+ branches: [ master ]
+ pull_request:
+ branches: [ master ]
+
+jobs:
+ build:
+
+ runs-on: ubuntu-latest
+
+ env:
+ CPPFLAGS: "-I/home/runner/mpich/include -I/home/runner/hdf5/include -I/home/runner/netcdf-c/include -I/home/runner/netcdf-fortran/include -I/home/runner/pnetcdf/include"
+ LDFLAGS: "-L/home/runner/mpich/lib -L/home/runner/hdf5/lib -L/home/runner/netcdf-c/lib -L/home/runner/netcdf-fortran/lib -L/home/runner/pnetcdf/lib"
+
+ steps:
+ - uses: actions/checkout@v2
+ - name: Installs
+ run: |
+ sudo apt-get install doxygen graphviz wget gfortran libjpeg-dev libz-dev cmake gfortran
+ - name: cache-mpich
+ id: cache-mpich
+ uses: actions/cache@v2
+ with:
+ path: ~/mpich
+ key: mpich-${{ runner.os }}-3.3.2
+
+ - name: build-mpich
+ if: steps.cache-mpich.outputs.cache-hit != 'true'
+ run: |
+ wget http://www.mpich.org/static/downloads/3.3.2/mpich-3.3.2.tar.gz &> /dev/null
+ tar -xzf mpich-3.3.2.tar.gz
+ pushd mpich-3.3.2
+ ./configure --prefix=/home/runner/mpich
+ make
+ sudo make install
+ popd
+ - name: cache-hdf5
+ id: cache-hdf5
+ uses: actions/cache@v2
+ with:
+ path: ~/hdf5
+ key: hdf5-${{ runner.os }}-1.10.7-mpich-3.3.2
+
+ - name: build-hdf5
+ if: steps.cache-hdf5.outputs.cache-hit != 'true'
+ run: |
+ set -x
+ export PATH="/home/runner/mpich/bin:$PATH"
+ export CC=/home/runner/mpich/bin/mpicc
+ wget https://support.hdfgroup.org/ftp/HDF5/releases/hdf5-1.10/hdf5-1.10.7/src/hdf5-1.10.7.tar.gz &> /dev/null
+ tar -xzf hdf5-1.10.7.tar.gz
+ pushd hdf5-1.10.7
+ ./configure --prefix=/home/runner/hdf5 --enable-parallel --disable-tools --disable-fortran --disable-cxx --enable-parallel-tests
+ make
+ sudo make install
+ popd
+ - name: cache-netcdf-c
+ id: cache-netcdf-c
+ uses: actions/cache@v2
+ with:
+ path: ~/netcdf-c
+ key: netcdf-c-${{ runner.os }}-4.7.4-mpich-3.3.2-2
+
+ - name: build-netcdf-c
+ if: steps.cache-netcdf-c.outputs.cache-hit != 'true'
+ run: |
+ set -x
+ export PATH="/home/runner/mpich/bin:$PATH"
+ export CC=/home/runner/mpich/bin/mpicc
+ wget https://www.unidata.ucar.edu/downloads/netcdf/ftp/netcdf-c-4.7.4.tar.gz &> /dev/null
+ tar -xzf netcdf-c-4.7.4.tar.gz
+ pushd netcdf-c-4.7.4
+ ./configure --prefix=/home/runner/netcdf-c --disable-dap --disable-utilities
+ make -j
+ sudo make install
+ popd
+ - name: cache-netcdf-fortran
+ id: cache-netcdf-fortran
+ uses: actions/cache@v2
+ with:
+ path: ~/netcdf-fortran
+ key: netcdf-fortran-${{ runner.os }}-4.5.3-mpich-3.3.2-2
+
+ - name: build-netcdf-fortran
+ if: steps.cache-netcdf-fortran.outputs.cache-hit != 'true'
+ run: |
+ set -x
+ export PATH="/home/runner/mpich/bin:$PATH"
+ export CC=/home/runner/mpich/bin/mpicc
+ wget https://github.com/Unidata/netcdf-fortran/archive/v4.5.3.tar.gz &> /dev/null
+ tar -xzf v4.5.3.tar.gz
+ pushd netcdf-fortran-4.5.3
+ ./configure --prefix=/home/runner/netcdf-fortran
+ make -j
+ sudo make install
+ popd
+ - name: cache-pnetcdf
+ id: cache-pnetcdf
+ uses: actions/cache@v2
+ with:
+ path: ~/pnetcdf
+ key: pnetcdf-${{ runner.os }}-1.12.1-mpich-3.3.2
+
+ - name: build-pnetcdf
+ if: steps.cache-pnetcdf.outputs.cache-hit != 'true'
+ run: |
+ set -x
+ export PATH="/home/runner/mpich/bin:$PATH"
+ export CC=/home/runner/mpich/bin/mpicc
+ wget https://parallel-netcdf.github.io/Release/pnetcdf-1.12.1.tar.gz &> /dev/null
+ tar -xzf pnetcdf-1.12.1.tar.gz
+ pushd pnetcdf-1.12.1
+ ./configure --prefix=/home/runner/pnetcdf --enable-shared --disable-cxx
+ make
+ sudo make install
+ popd
+ - name: cmake build
+ run: |
+ set -x
+ ls -l
+ echo 'export PATH=/home/runner/mpich/bin:$PATH' > .bashrc
+ source .bashrc
+ export CC=/home/runner/mpich/bin/mpicc
+ export FC=/home/runner/mpich/bin/mpifort
+ export LD_LIBRARY_PATH=/home/runner/netcdf-c/lib:/home/runner/pnetcdf/lib:$LD_LIBRARY_PATH
+ rm -rf b1
+ mkdir b1
+ cd b1
+ cmake -Wno-dev -DPIO_ENABLE_NETCDF_INTEGRATION=On -DNetCDF_C_LIBRARY=/home/runner/netcdf-c/lib/libnetcdf.so -DNetCDF_C_INCLUDE_DIR=/home/runner/netcdf-c/include -DPnetCDF_PATH=/home/runner/pnetcdf -DPIO_ENABLE_FORTRAN=On -DPIO_ENABLE_LOGGING=On -DPIO_ENABLE_TIMING=Off -DNetCDF_Fortran_LIBRARY=/home/runner/netcdf-fortran/lib/libnetcdff.so -DNetCDF_Fortran_INCLUDE_DIR=/home/runner/netcdf-fortran/include ..
+ make VERBOSE=1
+ make tests VERBOSE=1
+ ctest -VV
diff --git a/.github/workflows/netcdf-4.7.4_hdf5-1.10.7_pnetcdf-12.1_ncint_mpich-3.3_asan.yml b/.github/workflows/netcdf-4.7.4_hdf5-1.10.7_pnetcdf-12.1_ncint_mpich-3.3_asan.yml
new file mode 100644
index 00000000000..312ee6a2c50
--- /dev/null
+++ b/.github/workflows/netcdf-4.7.4_hdf5-1.10.7_pnetcdf-12.1_ncint_mpich-3.3_asan.yml
@@ -0,0 +1,149 @@
+name: netcdf-4.7.4_hdf5-1.10.7_pnetcdf-12.1_ncint_mpich-3.3_asan
+
+on:
+ push:
+ branches: [ master ]
+ pull_request:
+ branches: [ master ]
+
+jobs:
+ build:
+
+ runs-on: ubuntu-latest
+
+ env:
+ CPPFLAGS: "-I/home/runner/mpich/include -I/home/runner/hdf5/include -I/home/runner/netcdf-c/include -I/home/runner/netcdf-fortran/include -I/home/runner/pnetcdf/include"
+ LDFLAGS: "-L/home/runner/mpich/lib -L/home/runner/hdf5/lib -L/home/runner/netcdf-c/lib -L/home/runner/netcdf-fortran/lib -L/home/runner/pnetcdf/lib"
+
+ steps:
+ - uses: actions/checkout@v2
+ - name: Installs
+ run: |
+ sudo apt-get install doxygen graphviz wget gfortran libjpeg-dev libz-dev
+ - name: cache-mpich
+ id: cache-mpich
+ uses: actions/cache@v2
+ with:
+ path: ~/mpich
+ key: mpich-${{ runner.os }}-3.3.2
+
+ - name: build-mpich
+ if: steps.cache-mpich.outputs.cache-hit != 'true'
+ run: |
+ wget http://www.mpich.org/static/downloads/3.3.2/mpich-3.3.2.tar.gz &> /dev/null
+ tar -xzf mpich-3.3.2.tar.gz
+ pushd mpich-3.3.2
+ ./configure --prefix=/home/runner/mpich
+ make
+ sudo make install
+ popd
+ - name: cache-hdf5
+ id: cache-hdf5
+ uses: actions/cache@v2
+ with:
+ path: ~/hdf5
+ key: hdf5-${{ runner.os }}-1.10.7-mpich-3.3.2
+
+ - name: build-hdf5
+ if: steps.cache-hdf5.outputs.cache-hit != 'true'
+ run: |
+ set -x
+ export PATH="/home/runner/mpich/bin:$PATH"
+ export CC=/home/runner/mpich/bin/mpicc
+ wget https://support.hdfgroup.org/ftp/HDF5/releases/hdf5-1.10/hdf5-1.10.7/src/hdf5-1.10.7.tar.gz &> /dev/null
+ tar -xzf hdf5-1.10.7.tar.gz
+ pushd hdf5-1.10.7
+ ./configure --prefix=/home/runner/hdf5 --enable-parallel --disable-tools --disable-fortran --disable-cxx --enable-parallel-tests
+ make
+ sudo make install
+ popd
+ - name: cache-netcdf-c
+ id: cache-netcdf-c
+ uses: actions/cache@v2
+ with:
+ path: ~/netcdf-c
+ key: netcdf-c-${{ runner.os }}-4.7.4-mpich-3.3.2-2
+
+ - name: build-netcdf-c
+ if: steps.cache-netcdf-c.outputs.cache-hit != 'true'
+ run: |
+ set -x
+ export PATH="/home/runner/mpich/bin:$PATH"
+ export CC=/home/runner/mpich/bin/mpicc
+ wget https://www.unidata.ucar.edu/downloads/netcdf/ftp/netcdf-c-4.7.4.tar.gz &> /dev/null
+ tar -xzf netcdf-c-4.7.4.tar.gz
+ pushd netcdf-c-4.7.4
+ ./configure --prefix=/home/runner/netcdf-c --disable-dap --disable-utilities
+ make -j
+ sudo make install
+ popd
+ - name: cache-netcdf-fortran
+ id: cache-netcdf-fortran
+ uses: actions/cache@v2
+ with:
+ path: ~/netcdf-fortran
+ key: netcdf-fortran-${{ runner.os }}-4.5.3-mpich-3.3.2-2
+
+ - name: build-netcdf-fortran
+ if: steps.cache-netcdf-fortran.outputs.cache-hit != 'true'
+ run: |
+ set -x
+ export PATH="/home/runner/mpich/bin:$PATH"
+ export CC=/home/runner/mpich/bin/mpicc
+ wget https://github.com/Unidata/netcdf-fortran/archive/v4.5.3.tar.gz &> /dev/null
+ tar -xzf v4.5.3.tar.gz
+ pushd netcdf-fortran-4.5.3
+ ./configure --prefix=/home/runner/netcdf-fortran
+ make -j
+ sudo make install
+ popd
+ - name: cache-pnetcdf
+ id: cache-pnetcdf
+ uses: actions/cache@v2
+ with:
+ path: ~/pnetcdf
+ key: pnetcdf-${{ runner.os }}-1.12.1-mpich-3.3.2
+
+ - name: build-pnetcdf
+ if: steps.cache-pnetcdf.outputs.cache-hit != 'true'
+ run: |
+ set -x
+ export PATH="/home/runner/mpich/bin:$PATH"
+ export CC=/home/runner/mpich/bin/mpicc
+ wget https://parallel-netcdf.github.io/Release/pnetcdf-1.12.1.tar.gz &> /dev/null
+ tar -xzf pnetcdf-1.12.1.tar.gz
+ pushd pnetcdf-1.12.1
+ ./configure --prefix=/home/runner/pnetcdf --enable-shared --disable-cxx
+ make
+ sudo make install
+ popd
+ - name: cmake build
+ run: |
+ set -x
+ gcc --version
+ echo 'export PATH=/home/runner/mpich/bin:$PATH' > .bashrc
+ source .bashrc
+ export CC=/home/runner/mpich/bin/mpicc
+ export FC=/home/runner/mpich/bin/mpifort
+ export LD_LIBRARY_PATH="/home/runner/netcdf-c/lib:/home/runner/mpich/lib:/home/runner/hdf5/lib:/home/runner/netcdf-fortran/lib:/home/runner/pnetcdf/lib:$LD_LIBRARY_PATH"
+ mkdir build
+ cd build
+ cmake -Wno-dev -DNetCDF_C_LIBRARY=/home/runner/netcdf-c/lib/libnetcdf.so -DNetCDF_C_INCLUDE_DIR=/home/runner/netcdf-c/include -DPnetCDF_PATH='/home/runner/pnetcdf' -DPIO_ENABLE_FORTRAN=Off -DPIO_ENABLE_LOGGING=On -DPIO_ENABLE_TIMING=Off .. || (cat CMakeFiles/CMakeOutput.log && cat CMakeFiles/CMakeError.log)
+ make VERBOSE=1
+ make tests VERBOSE=1
+# ctest -VV
+ - name: autotools build
+ run: |
+ set -x
+ gcc --version
+ echo 'export PATH=/home/runner/mpich/bin:$PATH' > .bashrc
+ source .bashrc
+ export CC=/home/runner/mpich/bin/mpicc
+ export FC=/home/runner/mpich/bin/mpifort
+ export CFLAGS="-fsanitize=address -fno-omit-frame-pointer"
+ export FCFLAGS="-fsanitize=address -fno-omit-frame-pointer"
+ autoreconf -i
+ ./configure --enable-fortran --enable-netcdf-integration
+ make -j check
+
+
diff --git a/.github/workflows/netcdf-4.7.4_hdf5-1.12.0_no_pnetcdf_ncint_mpich-3.3.yml b/.github/workflows/netcdf-4.7.4_hdf5-1.12.0_no_pnetcdf_ncint_mpich-3.3.yml
new file mode 100644
index 00000000000..40693e5896b
--- /dev/null
+++ b/.github/workflows/netcdf-4.7.4_hdf5-1.12.0_no_pnetcdf_ncint_mpich-3.3.yml
@@ -0,0 +1,111 @@
+name: netcdf-4.7.4_hdf5-1.12.0_no_pnetcdf_ncint_mpich-3.3
+
+on:
+ push:
+ branches: [ master ]
+ pull_request:
+ branches: [ master ]
+
+jobs:
+ build:
+
+ runs-on: ubuntu-latest
+
+ env:
+ CPPFLAGS: "-I/home/runner/mpich/include -I/home/runner/hdf5/include -I/home/runner/netcdf-c/include -I/home/runner/netcdf-fortran/include"
+ LDFLAGS: "-L/home/runner/mpich/lib -L/home/runner/hdf5/lib -L/home/runner/netcdf-c/lib -L/home/runner/netcdf-fortran/lib"
+
+ steps:
+ - uses: actions/checkout@v2
+ - name: Installs
+ run: |
+ sudo apt-get install doxygen graphviz wget gfortran libjpeg-dev libz-dev
+ - name: cache-mpich
+ id: cache-mpich
+ uses: actions/cache@v2
+ with:
+ path: ~/mpich
+ key: mpich-${{ runner.os }}-3.3.2
+
+ - name: build-mpich
+ if: steps.cache-mpich.outputs.cache-hit != 'true'
+ run: |
+ wget http://www.mpich.org/static/downloads/3.3.2/mpich-3.3.2.tar.gz &> /dev/null
+ tar -xzf mpich-3.3.2.tar.gz
+ pushd mpich-3.3.2
+ ./configure --prefix=/home/runner/mpich
+ make
+ sudo make install
+ popd
+ - name: cache-hdf5
+ id: cache-hdf5
+ uses: actions/cache@v2
+ with:
+ path: ~/hdf5
+ key: hdf5-${{ runner.os }}-1.12.0-mpich-3.3.2-2
+
+ - name: build-hdf5
+ if: steps.cache-hdf5.outputs.cache-hit != 'true'
+ run: |
+ set -x
+ export PATH="/home/runner/mpich/bin:$PATH"
+ export CC=/home/runner/mpich/bin/mpicc
+ wget https://support.hdfgroup.org/ftp/HDF5/releases/hdf5-1.12/hdf5-1.12.0/src/hdf5-1.12.0.tar.gz &> /dev/null
+ tar -xzf hdf5-1.12.0.tar.gz
+ pushd hdf5-1.12.0
+ ./configure --prefix=/home/runner/hdf5 --enable-parallel --disable-tools --disable-fortran --disable-cxx --enable-parallel-tests
+ make
+ sudo make install
+ popd
+ - name: cache-netcdf-c
+ id: cache-netcdf-c
+ uses: actions/cache@v2
+ with:
+ path: ~/netcdf-c
+ key: netcdf-c-${{ runner.os }}-4.7.4-mpich-3.3.2-4
+
+ - name: build-netcdf-c
+ if: steps.cache-netcdf-c.outputs.cache-hit != 'true'
+ run: |
+ set -x
+ export PATH="/home/runner/mpich/bin:$PATH"
+ export CC=/home/runner/mpich/bin/mpicc
+ wget https://www.unidata.ucar.edu/downloads/netcdf/ftp/netcdf-c-4.7.4.tar.gz &> /dev/null
+ tar -xzf netcdf-c-4.7.4.tar.gz
+ pushd netcdf-c-4.7.4
+ ./configure --prefix=/home/runner/netcdf-c --disable-dap --disable-utilities
+ make -j
+ sudo make install
+ popd
+ - name: cache-netcdf-fortran
+ id: cache-netcdf-fortran
+ uses: actions/cache@v2
+ with:
+ path: ~/netcdf-fortran
+ key: netcdf-fortran-${{ runner.os }}-4.5.3-mpich-3.3.2-5
+
+ - name: build-netcdf-fortran
+ if: steps.cache-netcdf-fortran.outputs.cache-hit != 'true'
+ run: |
+ set -x
+ export PATH="/home/runner/mpich/bin:$PATH"
+ export CC=/home/runner/mpich/bin/mpicc
+ wget https://github.com/Unidata/netcdf-fortran/archive/v4.5.3.tar.gz &> /dev/null
+ tar -xzf v4.5.3.tar.gz
+ pushd netcdf-fortran-4.5.3
+ ./configure --prefix=/home/runner/netcdf-fortran
+ make -j
+ sudo make install
+ popd
+ - name: autotools build
+ run: |
+ set -x
+ gcc --version
+ export PATH=/home/runner/mpich/bin:/home/runner/netcdf-c/bin:$PATH
+ export CC=/home/runner/mpich/bin/mpicc
+ export FC=/home/runner/mpich/bin/mpifort
+ autoreconf -i
+ ./configure --enable-fortran --enable-netcdf-integration --disable-pnetcdf
+ make -j check
+
+
diff --git a/.github/workflows/netcdf-4.7.4_hdf5-1.12.0_pnetcdf-12.2_ncint_mpich-3.3_asan.yml b/.github/workflows/netcdf-4.7.4_hdf5-1.12.0_pnetcdf-12.2_ncint_mpich-3.3_asan.yml
new file mode 100644
index 00000000000..db2c2e859c9
--- /dev/null
+++ b/.github/workflows/netcdf-4.7.4_hdf5-1.12.0_pnetcdf-12.2_ncint_mpich-3.3_asan.yml
@@ -0,0 +1,149 @@
+name: netcdf-4.7.4_hdf5-1.12.0_pnetcdf-12.2_ncint_mpich-3.3_asan
+
+on:
+ push:
+ branches: [ master ]
+ pull_request:
+ branches: [ master ]
+
+jobs:
+ build:
+
+ runs-on: ubuntu-latest
+
+ env:
+ CPPFLAGS: "-I/home/runner/mpich/include -I/home/runner/hdf5/include -I/home/runner/netcdf-c/include -I/home/runner/netcdf-fortran/include -I/home/runner/pnetcdf/include"
+ LDFLAGS: "-L/home/runner/mpich/lib -L/home/runner/hdf5/lib -L/home/runner/netcdf-c/lib -L/home/runner/netcdf-fortran/lib -L/home/runner/pnetcdf/lib"
+
+ steps:
+ - uses: actions/checkout@v2
+ - name: Installs
+ run: |
+ sudo apt-get install doxygen graphviz wget gfortran libjpeg-dev libz-dev
+ - name: cache-mpich
+ id: cache-mpich
+ uses: actions/cache@v2
+ with:
+ path: ~/mpich
+ key: mpich-${{ runner.os }}-3.3.2
+
+ - name: build-mpich
+ if: steps.cache-mpich.outputs.cache-hit != 'true'
+ run: |
+ wget http://www.mpich.org/static/downloads/3.3.2/mpich-3.3.2.tar.gz &> /dev/null
+ tar -xzf mpich-3.3.2.tar.gz
+ pushd mpich-3.3.2
+ ./configure --prefix=/home/runner/mpich
+ make
+ sudo make install
+ popd
+ - name: cache-hdf5
+ id: cache-hdf5
+ uses: actions/cache@v2
+ with:
+ path: ~/hdf5
+ key: hdf5-${{ runner.os }}-1.12.0-mpich-3.3.2-2
+
+ - name: build-hdf5
+ if: steps.cache-hdf5.outputs.cache-hit != 'true'
+ run: |
+ set -x
+ export PATH="/home/runner/mpich/bin:$PATH"
+ export CC=/home/runner/mpich/bin/mpicc
+ wget https://support.hdfgroup.org/ftp/HDF5/releases/hdf5-1.12/hdf5-1.12.0/src/hdf5-1.12.0.tar.gz &> /dev/null
+ tar -xzf hdf5-1.12.0.tar.gz
+ pushd hdf5-1.12.0
+ ./configure --prefix=/home/runner/hdf5 --enable-parallel --disable-tools --disable-fortran --disable-cxx --enable-parallel-tests
+ make
+ sudo make install
+ popd
+ - name: cache-netcdf-c
+ id: cache-netcdf-c
+ uses: actions/cache@v2
+ with:
+ path: ~/netcdf-c
+ key: netcdf-c-${{ runner.os }}-4.7.4-mpich-3.3.2-4
+
+ - name: build-netcdf-c
+ if: steps.cache-netcdf-c.outputs.cache-hit != 'true'
+ run: |
+ set -x
+ export PATH="/home/runner/mpich/bin:$PATH"
+ export CC=/home/runner/mpich/bin/mpicc
+ wget https://www.unidata.ucar.edu/downloads/netcdf/ftp/netcdf-c-4.7.4.tar.gz &> /dev/null
+ tar -xzf netcdf-c-4.7.4.tar.gz
+ pushd netcdf-c-4.7.4
+ ./configure --prefix=/home/runner/netcdf-c --disable-dap --disable-utilities
+ make -j
+ sudo make install
+ popd
+ - name: cache-netcdf-fortran
+ id: cache-netcdf-fortran
+ uses: actions/cache@v2
+ with:
+ path: ~/netcdf-fortran
+ key: netcdf-fortran-${{ runner.os }}-4.5.3-mpich-3.3.2-5
+
+ - name: build-netcdf-fortran
+ if: steps.cache-netcdf-fortran.outputs.cache-hit != 'true'
+ run: |
+ set -x
+ export PATH="/home/runner/mpich/bin:$PATH"
+ export CC=/home/runner/mpich/bin/mpicc
+ wget https://github.com/Unidata/netcdf-fortran/archive/v4.5.3.tar.gz &> /dev/null
+ tar -xzf v4.5.3.tar.gz
+ pushd netcdf-fortran-4.5.3
+ ./configure --prefix=/home/runner/netcdf-fortran
+ make -j
+ sudo make install
+ popd
+ - name: cache-pnetcdf
+ id: cache-pnetcdf
+ uses: actions/cache@v2
+ with:
+ path: ~/pnetcdf
+ key: pnetcdf-${{ runner.os }}-1.12.2-mpich-3.3.2
+
+ - name: build-pnetcdf
+ if: steps.cache-pnetcdf.outputs.cache-hit != 'true'
+ run: |
+ set -x
+ export PATH="/home/runner/mpich/bin:$PATH"
+ export CC=/home/runner/mpich/bin/mpicc
+ wget https://parallel-netcdf.github.io/Release/pnetcdf-1.12.2.tar.gz &> /dev/null
+ tar -xzf pnetcdf-1.12.2.tar.gz
+ pushd pnetcdf-1.12.2
+ ./configure --prefix=/home/runner/pnetcdf --enable-shared --disable-cxx
+ make
+ sudo make install
+ popd
+ - name: cmake build
+ run: |
+ set -x
+ gcc --version
+ echo 'export PATH=/home/runner/mpich/bin:$PATH' > .bashrc
+ source .bashrc
+ export CC=/home/runner/mpich/bin/mpicc
+ export FC=/home/runner/mpich/bin/mpifort
+ export LD_LIBRARY_PATH="/home/runner/netcdf-c/lib:/home/runner/mpich/lib:/home/runner/hdf5/lib:/home/runner/netcdf-fortran/lib:/home/runner/pnetcdf/lib:$LD_LIBRARY_PATH"
+ mkdir build
+ cd build
+ cmake -Wno-dev -DNetCDF_C_LIBRARY=/home/runner/netcdf-c/lib/libnetcdf.so -DNetCDF_C_INCLUDE_DIR=/home/runner/netcdf-c/include -DPnetCDF_PATH='/home/runner/pnetcdf' -DPIO_ENABLE_FORTRAN=Off -DPIO_ENABLE_LOGGING=On -DPIO_ENABLE_TIMING=Off .. || (cat CMakeFiles/CMakeOutput.log && cat CMakeFiles/CMakeError.log)
+ make VERBOSE=1
+ make tests VERBOSE=1
+# ctest -VV
+ - name: autotools build
+ run: |
+ set -x
+ gcc --version
+ echo 'export PATH=/home/runner/mpich/bin:$PATH' > .bashrc
+ source .bashrc
+ export CC=/home/runner/mpich/bin/mpicc
+ export FC=/home/runner/mpich/bin/mpifort
+ export CFLAGS="-fsanitize=address -fno-omit-frame-pointer"
+ export FCFLAGS="-fsanitize=address -fno-omit-frame-pointer"
+ autoreconf -i
+ ./configure --enable-fortran --enable-netcdf-integration
+ make -j check
+
+
diff --git a/.github/workflows/a4.yml b/.github/workflows/netcdf-4.7.4_pnetcdf-12.1_ncint_mpich-3.3.yml
similarity index 100%
rename from .github/workflows/a4.yml
rename to .github/workflows/netcdf-4.7.4_pnetcdf-12.1_ncint_mpich-3.3.yml
diff --git a/.github/workflows/a3.yml b/.github/workflows/netcdf-4.7.4_pnetcdf-12.1_ncint_openmpi_4.0.4.yml
similarity index 100%
rename from .github/workflows/a3.yml
rename to .github/workflows/netcdf-4.7.4_pnetcdf-12.1_ncint_openmpi_4.0.4.yml
diff --git a/.github/workflows/strict_autotools.yml b/.github/workflows/strict_autotools_ubuntu_latest.yml
similarity index 100%
rename from .github/workflows/strict_autotools.yml
rename to .github/workflows/strict_autotools_ubuntu_latest.yml
diff --git a/.gitignore b/.gitignore
index 66c0e824b32..eca734371da 100644
--- a/.gitignore
+++ b/.gitignore
@@ -4,7 +4,6 @@ html/
\#*\#
*.o
Makefile.in
-*.F90.in
*.lo
*.la
Makefile
diff --git a/CMakeLists.txt b/CMakeLists.txt
index cd6af59be0a..b40745d1692 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -243,10 +243,14 @@ INCLUDE(FindNetCDF)
message("Fortran Library build is ${PIO_ENABLE_FORTRAN}")
if (PIO_ENABLE_FORTRAN)
find_package (NetCDF REQUIRED COMPONENTS C Fortran)
- find_package (PnetCDF COMPONENTS C Fortran)
+ if (WITH_PNETCDF)
+ find_package (PnetCDF COMPONENTS C Fortran)
+ endif()
else()
find_package (NetCDF REQUIRED COMPONENTS C)
- find_package (PnetCDF COMPONENTS C)
+ if (WITH_PNETCDF)
+ find_package (PnetCDF COMPONENTS C)
+ endif()
endif()
# Did we find pnetCDF? If so, set _PNETCDF in config.h.
@@ -288,8 +292,6 @@ if (PIO_ENABLE_DOC)
add_subdirectory (doc)
endif ()
-
-
SET(STATUS_PNETCDF PnetCDF_C_FOUND)
###
diff --git a/README.md b/README.md
index 6e8b263e018..3ecaaa6a05e 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,33 @@
# ParallelIO
-A high-level Parallel I/O Library for structured grid applications
+The Parallel IO libraries (PIO) are high-level parallel I/O C and
+Fortran libraries for applications that need to do netCDF I/O from
+large numbers of processors on a HPC system.
+
+PIO provides a netCDF-like API, and allows users to designate some
+subset of processors to perform IO. Computational code calls
+netCDF-like functions to read and write data, and PIO uses the IO
+processors to perform all necessary IO.
+
+## Intracomm Mode
+
+In Intracomm mode, PIO allows the user to designate some subset of
+processors to do all I/O. The I/O processors also participate in
+computational work.
+
+
+
+## Async Mode
+
+PIO also supports the creation of multiple computation components,
+each containing many processors, and one shared set of IO
+processors. The computational components can perform write operation
+asynchronously, and the IO processors will take care of all storage
+interaction.
+
+
## Website
@@ -13,10 +40,15 @@ The (low-traffic) PIO mailing list is at
https://groups.google.com/forum/#!forum/parallelio, send email to the
list at parallelio@googlegroups.com.
-## Nightly Tests
+## Testing
+
+The results of our continuous integration testing with GitHub actions
+can be found on any of the Pull Requests on the GitHub site:
+https://github.com/NCAR/ParallelIO.
-The results of our nightly tests on multiple platforms can be found on our
-cdash site at [http://my.cdash.org/index.php?project=PIO](http://my.cdash.org/index.php?project=PIO).
+The results of our nightly tests on multiple platforms can be found on
+our cdash site at
+[http://my.cdash.org/index.php?project=PIO](http://my.cdash.org/index.php?project=PIO).
## Dependencies
@@ -24,10 +56,10 @@ PIO can use NetCDF (version 4.6.1+) and/or PnetCDF (version 1.9.0+)
for I/O. NetCDF may be built with or without netCDF-4 features. NetCDF
is required for PIO, PnetCDF is optional.
-Ideally, the NetCDF version should be built with MPI, which requires that it
-be linked with an MPI-enabled version of HDF5. Optionally, NetCDF can be
-built with DAP support, which introduces a dependency on CURL. Additionally,
-HDF5, itself, introduces dependencies on LIBZ and (optionally) SZIP.
+The NetCDF C library must be built with MPI, which requires that it be
+linked with an MPI-enabled version of HDF5. Optionally, NetCDF can be
+built with DAP support, which introduces a dependency on CURL. HDF5,
+itself, introduces dependencies on LIBZ and (optionally) SZIP.
## Building PIO
@@ -69,3 +101,17 @@ CC=mpicc FC=mpif90 cmake [-DOPTION1=value1 -DOPTION2=value2 ...] /path/to/pio/so
Full instructions for the cmake build can be found in the installation
documentation.
+
+# References
+
+Hartnett, E., Edwards, J., "THE PARALLELIO (PIO) C/FORTRAN LIBRARIES
+FOR SCALABLE HPC PERFORMANCE", 37th Conference on Environmental
+Information Processing Technologies, American Meteorological Society
+Annual Meeting, January, 2021. Retrieved on Feb 3, 2021, from
+[https://www.researchgate.net/publication/348169990_THE_PARALLELIO_PIO_CFORTRAN_LIBRARIES_FOR_SCALABLE_HPC_PERFORMANCE].
+
+Hartnett, E., Edwards, J., "POSTER: THE PARALLELIO (PIO) C/FORTRAN LIBRARIES
+FOR SCALABLE HPC PERFORMANCE", 37th Conference on Environmental
+Information Processing Technologies, American Meteorological Society
+Annual Meeting, January, 2021. Retrieved on Feb 3, 2021, from
+[https://www.researchgate.net/publication/348170136_THE_PARALLELIO_PIO_CFORTRAN_LIBRARIES_FOR_SCALABLE_HPC_PERFORMANCE].
diff --git a/configure.ac b/configure.ac
index 4c9eefa0937..75a98451b3f 100644
--- a/configure.ac
+++ b/configure.ac
@@ -38,6 +38,7 @@ LT_INIT
# Find and learn about the C compiler.
AC_PROG_CC
+AC_PROG_CC_C99
# Compiler with version information. This consists of the full path
# name of the compiler and the reported version number.
@@ -155,15 +156,15 @@ AC_MSG_RESULT([$enable_mpe])
if test "x$enable_mpe" = xyes; then
AC_SEARCH_LIBS([pthread_setspecific], [pthread], [], [], [])
- dnl AC_SEARCH_LIBS([MPE_Log_get_event_number], [mpe], [HAVE_LIBMPE=yes], [HAVE_LIBMPE=no], [])
- dnl AC_SEARCH_LIBS([MPE_Init_mpi_core], [lmpe], [HAVE_LIBLMPE=yes], [HAVE_LIBLMPE=no], [])
+ AC_SEARCH_LIBS([MPE_Log_get_event_number], [mpe], [HAVE_LIBMPE=yes], [HAVE_LIBMPE=no], [])
+ AC_SEARCH_LIBS([MPE_Init_mpi_core], [lmpe], [HAVE_LIBLMPE=yes], [HAVE_LIBLMPE=no], [])
AC_CHECK_HEADERS([mpe.h], [HAVE_MPE=yes], [HAVE_MPE=no])
- dnl if test "x$HAVE_LIBMPE" != xyes; then
- dnl AC_MSG_ERROR([-lmpe not found but --enable-mpe used.])
- dnl fi
- dnl if test "x$HAVE_LIBLMPE" != xyes; then
- dnl AC_MSG_ERROR([-llmpe not found but --enable-mpe used.])
- dnl fi
+ if test "x$HAVE_LIBMPE" != xyes; then
+ AC_MSG_ERROR([-lmpe not found but --enable-mpe used.])
+ fi
+ if test "x$HAVE_LIBLMPE" != xyes; then
+ AC_MSG_ERROR([-llmpe not found but --enable-mpe used.])
+ fi
if test $enable_fortran = yes; then
AC_MSG_ERROR([MPE not implemented in Fortran tests and examples. Build without --enable-fortran])
fi
@@ -387,16 +388,20 @@ fi
if test "x$enable_netcdf_integration" = xyes -a "x$have_netcdf_par" = xno; then
AC_MSG_ERROR([Cannot use netCDF integration unless netCDF library was built for parallel I/O.])
fi
-# These are needed by ncdispatch.h. Only build with HDF5 parallel
-# versions of netCDF. */
+
+# If netCDF integration is used, set this preprocessor symbol.
if test "x$enable_netcdf_integration" = xyes; then
- AC_DEFINE([HDF5_PARALLEL],[1],[Does HDF5 library provide parallel access])
- AC_DEFINE([USE_NETCDF4],[1],[Does HDF5 library provide parallel access])
AC_DEFINE([NETCDF_INTEGRATION],[1],[Are we building with netCDF integration])
fi
-
AM_CONDITIONAL(BUILD_NCINT, [test "x$enable_netcdf_integration" = xyes])
-AM_CONDITIONAL(NETCDF_INTEGRATION, [test "x$enable_netcdf_integration" = xyes])
+
+# If we are building netCDF integration and also then PIO Fortran
+# library, then we also need netcdf-fortran.
+if test "x$enable_netcdf_integration" = xyes -a "x$enable_fortran" = xyes; then
+ AC_LANG_PUSH([Fortran])
+ AC_CHECK_LIB([netcdff], [nf_inq_libvers], [], [AC_MSG_ERROR([Can't find or link to the netcdf-fortran library, required because both --enable-fortran and --enable-netcdf-integration are specified.])])
+ AC_LANG_POP([Fortran])
+fi
AC_CONFIG_FILES([tests/general/pio_tutil.F90:tests/general/util/pio_tutil.F90])
diff --git a/ctest/CTestScript-Test.cmake b/ctest/CTestScript-Test.cmake
index 79aec3bca68..cf50195a255 100644
--- a/ctest/CTestScript-Test.cmake
+++ b/ctest/CTestScript-Test.cmake
@@ -1,6 +1,6 @@
#==============================================================================
#
-# This is the CTest script for generating test results for submission to the
+# This is the CTest script for generating test results for submission to the
# CTest Dashboard site: my.cdash.org.
#
# Example originally stolen from:
@@ -11,14 +11,14 @@
#-- Get the common build information
#-------------------------------------------
-set (CTEST_SITE $ENV{PIO_DASHBOARD_SITE})
+set (CTEST_SITE $ENV{PIO_DASHBOARD_SITE}-$ENV{PIO_COMPILER_ID})
set (CTEST_BUILD_NAME $ENV{PIO_DASHBOARD_BUILD_NAME})
set (CTEST_SOURCE_DIRECTORY $ENV{PIO_DASHBOARD_SOURCE_DIR})
set (CTEST_BINARY_DIRECTORY $ENV{PIO_DASHBOARD_BINARY_DIR})
-# -----------------------------------------------------------
+# -----------------------------------------------------------
# -- Run CTest- TESTING ONLY (Appended to existing TAG)
-# -----------------------------------------------------------
+# -----------------------------------------------------------
## -- Start
ctest_start("${CTEST_SCRIPT_ARG}" APPEND)
diff --git a/doc/images/I_O_on_many_async_small.png b/doc/images/I_O_on_many_async_small.png
new file mode 100644
index 00000000000..cafa6bc30c7
Binary files /dev/null and b/doc/images/I_O_on_many_async_small.png differ
diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt
index b8fa8191de9..8b5b8d3b238 100644
--- a/examples/CMakeLists.txt
+++ b/examples/CMakeLists.txt
@@ -3,16 +3,8 @@
###-------------------------------------------------------------------------###
if (PIO_ENABLE_FORTRAN)
- if ("${PLATFORM}" STREQUAL "goldbach" )
ADD_SUBDIRECTORY(f03)
- elseif ("${PLATFORM}" STREQUAL "yellowstone" )
- ADD_SUBDIRECTORY(f03)
- ADD_SUBDIRECTORY(c)
- else()
-# ADD_SUBDIRECTORY(f03)
ADD_SUBDIRECTORY(c)
- # ADD_SUBDIRECTORY(cxx)
- endif()
else()
ADD_SUBDIRECTORY(c)
endif()
diff --git a/examples/c/example1.c b/examples/c/example1.c
index c6bd79f9386..6b019762464 100644
--- a/examples/c/example1.c
+++ b/examples/c/example1.c
@@ -1,10 +1,10 @@
/**
- * @file
+ * @file
* @brief A simple C example for the ParallelIO Library.
*
* This example creates a netCDF output file with one dimension and
* one variable. It first writes, then reads the sample file using the
- * ParallelIO library.
+ * ParallelIO library.
*
* This example can be run in parallel for 1, 2, 4, 8, or 16
* processors.
@@ -29,7 +29,7 @@
/** The number of possible output netCDF output flavors available to
* the ParallelIO library. */
-#define NUM_NETCDF_FLAVORS 4
+#define NUM_NETCDF_FLAVORS 4
/** The number of dimensions in the example data. In this simple
example, we are using one-dimensional data. */
@@ -57,38 +57,38 @@
/** Handle MPI errors. This should only be used with MPI library
* function calls. */
#define MPIERR(e) do { \
- MPI_Error_string(e, err_buffer, &resultlen); \
- printf("MPI error, line %d, file %s: %s\n", __LINE__, __FILE__, err_buffer); \
- MPI_Finalize(); \
- return 2; \
- } while (0)
+ MPI_Error_string(e, exerr_buffer, &exresultlen); \
+ printf("MPI error, line %d, file %s: %s\n", __LINE__, __FILE__, exerr_buffer); \
+ MPI_Finalize(); \
+ return 2; \
+ } while (0)
/** Handle non-MPI errors by finalizing the MPI library and exiting
* with an exit code. */
#define ERR(e) do { \
- MPI_Finalize(); \
- return e; \
- } while (0)
+ MPI_Finalize(); \
+ return e; \
+ } while (0)
/** Global err buffer for MPI. When there is an MPI error, this buffer
* is used to store the error message that is associated with the MPI
* error. */
-char err_buffer[MPI_MAX_ERROR_STRING];
+char exerr_buffer[MPI_MAX_ERROR_STRING];
/** This is the length of the most recent MPI error message, stored
* int the global error string. */
-int resultlen;
+int exresultlen;
/** @brief Check the output file.
*
- * Use netCDF to check that the output is as expected.
+ * Use netCDF to check that the output is as expected.
*
- * @param ntasks The number of processors running the example.
- * @param filename The name of the example file to check.
+ * @param ntasks The number of processors running the example.
+ * @param filename The name of the example file to check.
*
* @return 0 if example file is correct, non-zero otherwise. */
int check_file(int ntasks, char *filename) {
-
+
int ncid; /**< File ID from netCDF. */
int ndims; /**< Number of dimensions. */
int nvars; /**< Number of variables. */
@@ -105,43 +105,43 @@ int check_file(int ntasks, char *filename) {
size_t count[NDIM]; /**< Number of elements to read. */
int buffer[DIM_LEN]; /**< Buffer to read in data. */
int expected[DIM_LEN]; /**< Data values we expect to find. */
-
+
/* Open the file. */
if ((ret = nc_open(filename, 0, &ncid)))
- return ret;
+ return ret;
/* Check the metadata. */
if ((ret = nc_inq(ncid, &ndims, &nvars, &ngatts, &unlimdimid)))
- return ret;
+ return ret;
if (ndims != NDIM || nvars != 1 || ngatts != 0 || unlimdimid != -1)
- return ERR_BAD;
+ return ERR_BAD;
if ((ret = nc_inq_dim(ncid, 0, dim_name, &dimlen)))
- return ret;
+ return ret;
if (dimlen != DIM_LEN || strcmp(dim_name, DIM_NAME))
- return ERR_BAD;
+ return ERR_BAD;
if ((ret = nc_inq_var(ncid, 0, var_name, &xtype, &ndims, dimids, &natts)))
- return ret;
+ return ret;
if (xtype != NC_INT || ndims != NDIM || dimids[0] != 0 || natts != 0)
- return ERR_BAD;
+ return ERR_BAD;
/* Use the number of processors to figure out what the data in the
* file should look like. */
int div = DIM_LEN/ntasks;
for (int d = 0; d < DIM_LEN; d++)
- expected[d] = START_DATA_VAL + d/div;
-
+ expected[d] = START_DATA_VAL + d/div;
+
/* Check the data. */
start[0] = 0;
count[0] = DIM_LEN;
if ((ret = nc_get_vara(ncid, 0, start, count, buffer)))
- return ret;
+ return ret;
for (int d = 0; d < DIM_LEN; d++)
- if (buffer[d] != expected[d])
- return ERR_BAD;
+ if (buffer[d] != expected[d])
+ return ERR_BAD;
/* Close the file. */
if ((ret = nc_close(ncid)))
- return ret;
+ return ret;
/* Everything looks good! */
return 0;
@@ -180,134 +180,134 @@ int check_file(int ntasks, char *filename) {
foo = 42, 42, 42, 42, 43, 43, 43, 43, 44, 44, 44, 44, 45, 45, 45, 45 ;
}
-
+
@param [in] argc argument count (should be zero)
@param [in] argv argument array (should be NULL)
@retval examplePioClass* Pointer to self.
*/
int main(int argc, char* argv[])
{
- /** Set to non-zero to get output to stdout. */
- int verbose = 0;
-
- /** Zero-based rank of processor. */
- int my_rank;
-
- /** Number of processors involved in current execution. */
- int ntasks;
-
- /** Different output flavors. The example file is written (and
- * then read) four times. The first two flavors,
- * parallel-netcdf, and netCDF serial, both produce a netCDF
- * classic format file (but with different libraries). The
- * last two produce netCDF4/HDF5 format files, written with
- * and without using netCDF-4 parallel I/O. */
- int format[NUM_NETCDF_FLAVORS];
-
- /** Number of processors that will do IO. In this example we
- * will do IO from all processors. */
- int niotasks;
-
- /** Stride in the mpi rank between io tasks. Always 1 in this
- * example. */
- int ioproc_stride = 1;
-
- /** Zero based rank of first processor to be used for I/O. */
- int ioproc_start = 0;
-
- /** The dimension ID. */
- int dimid;
-
- /** Array index per processing unit. This is the number of
- * elements of the data array that will be handled by each
- * processor. In this example there are 16 data elements. If the
- * example is run on 4 processors, then arrIdxPerPe will be 4. */
- PIO_Offset elements_per_pe;
-
- /* Length of the dimensions in the data. This simple example
- * uses one-dimensional data. The lenght along that dimension
- * is DIM_LEN (16). */
- int dim_len[1] = {DIM_LEN};
-
- /** The ID for the parallel I/O system. It is set by
- * PIOc_Init_Intracomm(). It references an internal structure
- * containing the general IO subsystem data and MPI
- * structure. It is passed to PIOc_finalize() to free
- * associated resources, after all I/O, but before
- * MPI_Finalize is called. */
- int iosysid;
-
- /** The ncid of the netCDF file created in this example. */
- int ncid;
-
- /** The ID of the netCDF varable in the example file. */
- int varid;
-
- /** The I/O description ID as passed back by PIOc_InitDecomp()
- * and freed in PIOc_freedecomp(). */
- int ioid;
-
- /** A buffer for sample data. The size of this array will
- * vary depending on how many processors are involved in the
- * execution of the example code. It's length will be the same
- * as elements_per_pe.*/
- int *buffer;
-
- /** A 1-D array which holds the decomposition mapping for this
- * example. The size of this array will vary depending on how
- * many processors are involved in the execution of the
- * example code. It's length will be the same as
- * elements_per_pe. */
- PIO_Offset *compdof;
+ /** Set to non-zero to get output to stdout. */
+ int verbose = 0;
+
+ /** Zero-based rank of processor. */
+ int my_rank;
+
+ /** Number of processors involved in current execution. */
+ int ntasks;
+
+ /** Different output flavors. The example file is written (and
+ * then read) four times. The first two flavors,
+ * parallel-netcdf, and netCDF serial, both produce a netCDF
+ * classic format file (but with different libraries). The
+ * last two produce netCDF4/HDF5 format files, written with
+ * and without using netCDF-4 parallel I/O. */
+ int format[NUM_NETCDF_FLAVORS];
+
+ /** Number of processors that will do IO. In this example we
+ * will do IO from all processors. */
+ int niotasks;
+
+ /** Stride in the mpi rank between io tasks. Always 1 in this
+ * example. */
+ int ioproc_stride = 1;
+
+ /** Zero based rank of first processor to be used for I/O. */
+ int ioproc_start = 0;
+
+ /** The dimension ID. */
+ int dimid;
+
+ /** Array index per processing unit. This is the number of
+ * elements of the data array that will be handled by each
+ * processor. In this example there are 16 data elements. If the
+ * example is run on 4 processors, then arrIdxPerPe will be 4. */
+ PIO_Offset elements_per_pe;
+
+ /* Length of the dimensions in the data. This simple example
+ * uses one-dimensional data. The lenght along that dimension
+ * is DIM_LEN (16). */
+ int dim_len[1] = {DIM_LEN};
+
+ /** The ID for the parallel I/O system. It is set by
+ * PIOc_Init_Intracomm(). It references an internal structure
+ * containing the general IO subsystem data and MPI
+ * structure. It is passed to PIOc_finalize() to free
+ * associated resources, after all I/O, but before
+ * MPI_Finalize is called. */
+ int iosysid;
+
+ /** The ncid of the netCDF file created in this example. */
+ int ncid;
+
+ /** The ID of the netCDF varable in the example file. */
+ int varid;
+
+ /** The I/O description ID as passed back by PIOc_InitDecomp()
+ * and freed in PIOc_freedecomp(). */
+ int ioid;
+
+ /** A buffer for sample data. The size of this array will
+ * vary depending on how many processors are involved in the
+ * execution of the example code. It's length will be the same
+ * as elements_per_pe.*/
+ int *buffer;
+
+ /** A 1-D array which holds the decomposition mapping for this
+ * example. The size of this array will vary depending on how
+ * many processors are involved in the execution of the
+ * example code. It's length will be the same as
+ * elements_per_pe. */
+ PIO_Offset *compdof;
/** Test filename. */
char filename[NC_MAX_NAME + 1];
/** The number of netCDF flavors available in this build. */
int num_flavors = 0;
-
- /** Used for command line processing. */
- int c;
-
- /** Return value. */
- int ret;
-
- /* Parse command line. */
- while ((c = getopt(argc, argv, "v")) != -1)
- switch (c)
- {
- case 'v':
- verbose++;
- break;
- default:
- break;
- }
-
-#ifdef TIMING
- /* Initialize the GPTL timing library. */
- if ((ret = GPTLinitialize ()))
- return ret;
-#endif
-
- /* Initialize MPI. */
- if ((ret = MPI_Init(&argc, &argv)))
- MPIERR(ret);
- if ((ret = MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN)))
- MPIERR(ret);
-
- /* Learn my rank and the total number of processors. */
- if ((ret = MPI_Comm_rank(MPI_COMM_WORLD, &my_rank)))
- MPIERR(ret);
- if ((ret = MPI_Comm_size(MPI_COMM_WORLD, &ntasks)))
- MPIERR(ret);
-
- /* Check that a valid number of processors was specified. */
- if (!(ntasks == 1 || ntasks == 2 || ntasks == 4 ||
- ntasks == 8 || ntasks == 16))
- fprintf(stderr, "Number of processors must be 1, 2, 4, 8, or 16!\n");
- if (verbose)
- printf("%d: ParallelIO Library example1 running on %d processors.\n",
- my_rank, ntasks);
+
+ /** Used for command line processing. */
+ int c;
+
+ /** Return value. */
+ int ret;
+
+ /* Parse command line. */
+ while ((c = getopt(argc, argv, "v")) != -1)
+ switch (c)
+ {
+ case 'v':
+ verbose++;
+ break;
+ default:
+ break;
+ }
+
+#ifdef TIMING
+ /* Initialize the GPTL timing library. */
+ if ((ret = GPTLinitialize ()))
+ return ret;
+#endif
+
+ /* Initialize MPI. */
+ if ((ret = MPI_Init(&argc, &argv)))
+ MPIERR(ret);
+ if ((ret = MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN)))
+ MPIERR(ret);
+
+ /* Learn my rank and the total number of processors. */
+ if ((ret = MPI_Comm_rank(MPI_COMM_WORLD, &my_rank)))
+ MPIERR(ret);
+ if ((ret = MPI_Comm_size(MPI_COMM_WORLD, &ntasks)))
+ MPIERR(ret);
+
+ /* Check that a valid number of processors was specified. */
+ if (!(ntasks == 1 || ntasks == 2 || ntasks == 4 ||
+ ntasks == 8 || ntasks == 16))
+ fprintf(stderr, "Number of processors must be 1, 2, 4, 8, or 16!\n");
+ if (verbose)
+ printf("%d: ParallelIO Library example1 running on %d processors.\n",
+ my_rank, ntasks);
#ifdef USE_MPE
/* If MPE logging is being used, then initialize it. */
@@ -316,7 +316,7 @@ int check_file(int ntasks, char *filename) {
#endif /* USE_MPE */
/* keep things simple - 1 iotask per MPI process */
- niotasks = ntasks;
+ niotasks = ntasks;
/* Turn on logging if available. */
/* PIOc_set_log_level(4); */
@@ -324,27 +324,27 @@ int check_file(int ntasks, char *filename) {
/* Change error handling to return errors. */
if ((ret = PIOc_set_iosystem_error_handling(PIO_DEFAULT, PIO_RETURN_ERROR, NULL)))
return ret;
-
- /* Initialize the PIO IO system. This specifies how
- * many and which processors are involved in I/O. */
- if ((ret = PIOc_Init_Intracomm(MPI_COMM_WORLD, niotasks, ioproc_stride,
- ioproc_start, PIO_REARR_SUBSET, &iosysid)))
- ERR(ret);
-
- /* Describe the decomposition. This is a 1-based array, so add 1! */
- elements_per_pe = DIM_LEN / ntasks;
- if (!(compdof = malloc(elements_per_pe * sizeof(PIO_Offset))))
- return PIO_ENOMEM;
- for (int i = 0; i < elements_per_pe; i++)
- compdof[i] = my_rank * elements_per_pe + i + 1;
-
- /* Create the PIO decomposition for this example. */
- if (verbose)
- printf("rank: %d Creating decomposition...\n", my_rank);
- if ((ret = PIOc_InitDecomp(iosysid, PIO_INT, NDIM, dim_len, (PIO_Offset)elements_per_pe,
- compdof, &ioid, NULL, NULL, NULL)))
- ERR(ret);
- free(compdof);
+
+ /* Initialize the PIO IO system. This specifies how
+ * many and which processors are involved in I/O. */
+ if ((ret = PIOc_Init_Intracomm(MPI_COMM_WORLD, niotasks, ioproc_stride,
+ ioproc_start, PIO_REARR_SUBSET, &iosysid)))
+ ERR(ret);
+
+ /* Describe the decomposition. This is a 1-based array, so add 1! */
+ elements_per_pe = DIM_LEN / ntasks;
+ if (!(compdof = malloc(elements_per_pe * sizeof(PIO_Offset))))
+ return PIO_ENOMEM;
+ for (int i = 0; i < elements_per_pe; i++)
+ compdof[i] = my_rank * elements_per_pe + i + 1;
+
+ /* Create the PIO decomposition for this example. */
+ if (verbose)
+ printf("rank: %d Creating decomposition...\n", my_rank);
+ if ((ret = PIOc_InitDecomp(iosysid, PIO_INT, NDIM, dim_len, (PIO_Offset)elements_per_pe,
+ compdof, &ioid, NULL, NULL, NULL)))
+ ERR(ret);
+ free(compdof);
/* The number of favors may change with the build parameters. */
#ifdef _PNETCDF
@@ -355,88 +355,88 @@ int check_file(int ntasks, char *filename) {
format[num_flavors++] = PIO_IOTYPE_NETCDF4C;
format[num_flavors++] = PIO_IOTYPE_NETCDF4P;
#endif
-
- /* Use PIO to create the example file in each of the four
- * available ways. */
- for (int fmt = 0; fmt < num_flavors; fmt++)
- {
+
+ /* Use PIO to create the example file in each of the four
+ * available ways. */
+ for (int fmt = 0; fmt < num_flavors; fmt++)
+ {
/* Create a filename. */
sprintf(filename, "example1_%d.nc", fmt);
-
- /* Create the netCDF output file. */
- if (verbose)
- printf("rank: %d Creating sample file %s with format %d...\n",
- my_rank, filename, format[fmt]);
- if ((ret = PIOc_createfile(iosysid, &ncid, &(format[fmt]), filename,
- PIO_CLOBBER)))
- ERR(ret);
-
- /* Define netCDF dimension and variable. */
- if (verbose)
- printf("rank: %d Defining netCDF metadata...\n", my_rank);
- if ((ret = PIOc_def_dim(ncid, DIM_NAME, (PIO_Offset)dim_len[0], &dimid)))
- ERR(ret);
- if ((ret = PIOc_def_var(ncid, VAR_NAME, PIO_INT, NDIM, &dimid, &varid)))
- ERR(ret);
- if ((ret = PIOc_enddef(ncid)))
- ERR(ret);
-
- /* Prepare sample data. */
- if (!(buffer = malloc(elements_per_pe * sizeof(int))))
- return PIO_ENOMEM;
- for (int i = 0; i < elements_per_pe; i++)
- buffer[i] = START_DATA_VAL + my_rank;
-
- /* Write data to the file. */
- if (verbose)
- printf("rank: %d Writing sample data...\n", my_rank);
- if ((ret = PIOc_write_darray(ncid, varid, ioid, (PIO_Offset)elements_per_pe,
- buffer, NULL)))
- ERR(ret);
- if ((ret = PIOc_sync(ncid)))
- ERR(ret);
-
- /* Free buffer space used in this example. */
- free(buffer);
-
- /* Close the netCDF file. */
- if (verbose)
- printf("rank: %d Closing the sample data file...\n", my_rank);
- if ((ret = PIOc_closefile(ncid)))
- ERR(ret);
- }
-
- /* Free the PIO decomposition. */
- if (verbose)
- printf("rank: %d Freeing PIO decomposition...\n", my_rank);
- if ((ret = PIOc_freedecomp(iosysid, ioid)))
- ERR(ret);
-
- /* Finalize the IO system. */
- if (verbose)
- printf("rank: %d Freeing PIO resources...\n", my_rank);
- if ((ret = PIOc_finalize(iosysid)))
- ERR(ret);
-
- /* Check the output file. */
- if (!my_rank)
- for (int fmt = 0; fmt < num_flavors; fmt++)
+
+ /* Create the netCDF output file. */
+ if (verbose)
+ printf("rank: %d Creating sample file %s with format %d...\n",
+ my_rank, filename, format[fmt]);
+ if ((ret = PIOc_createfile(iosysid, &ncid, &(format[fmt]), filename,
+ PIO_CLOBBER)))
+ ERR(ret);
+
+ /* Define netCDF dimension and variable. */
+ if (verbose)
+ printf("rank: %d Defining netCDF metadata...\n", my_rank);
+ if ((ret = PIOc_def_dim(ncid, DIM_NAME, (PIO_Offset)dim_len[0], &dimid)))
+ ERR(ret);
+ if ((ret = PIOc_def_var(ncid, VAR_NAME, PIO_INT, NDIM, &dimid, &varid)))
+ ERR(ret);
+ if ((ret = PIOc_enddef(ncid)))
+ ERR(ret);
+
+ /* Prepare sample data. */
+ if (!(buffer = malloc(elements_per_pe * sizeof(int))))
+ return PIO_ENOMEM;
+ for (int i = 0; i < elements_per_pe; i++)
+ buffer[i] = START_DATA_VAL + my_rank;
+
+ /* Write data to the file. */
+ if (verbose)
+ printf("rank: %d Writing sample data...\n", my_rank);
+ if ((ret = PIOc_write_darray(ncid, varid, ioid, (PIO_Offset)elements_per_pe,
+ buffer, NULL)))
+ ERR(ret);
+ if ((ret = PIOc_sync(ncid)))
+ ERR(ret);
+
+ /* Free buffer space used in this example. */
+ free(buffer);
+
+ /* Close the netCDF file. */
+ if (verbose)
+ printf("rank: %d Closing the sample data file...\n", my_rank);
+ if ((ret = PIOc_closefile(ncid)))
+ ERR(ret);
+ }
+
+ /* Free the PIO decomposition. */
+ if (verbose)
+ printf("rank: %d Freeing PIO decomposition...\n", my_rank);
+ if ((ret = PIOc_freedecomp(iosysid, ioid)))
+ ERR(ret);
+
+ /* Finalize the IO system. */
+ if (verbose)
+ printf("rank: %d Freeing PIO resources...\n", my_rank);
+ if ((ret = PIOc_finalize(iosysid)))
+ ERR(ret);
+
+ /* Check the output file. */
+ if (!my_rank)
+ for (int fmt = 0; fmt < num_flavors; fmt++)
{
sprintf(filename, "example1_%d.nc", fmt);
- if ((ret = check_file(ntasks, filename)))
- ERR(ret);
+ if ((ret = check_file(ntasks, filename)))
+ ERR(ret);
}
- /* Finalize the MPI library. */
- MPI_Finalize();
+ /* Finalize the MPI library. */
+ MPI_Finalize();
-#ifdef TIMING
- /* Finalize the GPTL timing library. */
- if ((ret = GPTLfinalize ()))
- return ret;
-#endif
+#ifdef TIMING
+ /* Finalize the GPTL timing library. */
+ if ((ret = GPTLfinalize ()))
+ return ret;
+#endif
if (verbose)
- printf("rank: %d SUCCESS!\n", my_rank);
- return 0;
+ printf("rank: %d SUCCESS!\n", my_rank);
+ return 0;
}
diff --git a/examples/c/example2.c b/examples/c/example2.c
index 93283ba53b3..19f790fb998 100644
--- a/examples/c/example2.c
+++ b/examples/c/example2.c
@@ -1,5 +1,5 @@
/**
- * @file
+ * @file
* A simple C example for the ParallelIO Library.
*
* This example creates a netCDF output file with one 3D variable. One
@@ -13,11 +13,11 @@
* This example uses the MPE performace profiling library, if it is
* present on the build machine. After the program is run, MPE will
* produce a file called example2.clog2. In order to see the nice
- * graphs, execute the commands:
+ * graphs, execute the commands:
*
*
* clog2ToSlog2 example2.clog2
- * jumpshot example2.slog2
+ * jumpshot example2.slog2
*
*/
@@ -65,28 +65,28 @@
/** Handle MPI errors. This should only be used with MPI library
* function calls. */
#define MPIERR(e) do { \
- MPI_Error_string(e, err_buffer, &resultlen); \
- fprintf(stderr, "MPI error, line %d, file %s: %s\n", __LINE__, __FILE__, err_buffer); \
- MPI_Finalize(); \
- return 2; \
- } while (0)
+ MPI_Error_string(e, exerr_buffer, &exresultlen); \
+ fprintf(stderr, "MPI error, line %d, file %s: %s\n", __LINE__, __FILE__, exerr_buffer); \
+ MPI_Finalize(); \
+ return 2; \
+ } while (0)
/** Handle non-MPI errors by finalizing the MPI library and exiting
* with an exit code. */
#define ERR(e) do { \
fprintf(stderr, "Error %d in %s, line %d\n", e, __FILE__, __LINE__); \
- MPI_Finalize(); \
- return e; \
- } while (0)
+ MPI_Finalize(); \
+ return e; \
+ } while (0)
/** Global err buffer for MPI. When there is an MPI error, this buffer
* is used to store the error message that is associated with the MPI
* error. */
-char err_buffer[MPI_MAX_ERROR_STRING];
+char exerr_buffer[MPI_MAX_ERROR_STRING];
/** This is the length of the most recent MPI error message, stored
* int the global error string. */
-int resultlen;
+int exresultlen;
/** The dimension names. */
char dim_name[NDIM][PIO_MAX_NAME + 1] = {"timestep", "x", "y"};
@@ -150,10 +150,10 @@ PIO_Offset chunksize[NDIM] = {2, X_DIM_LEN/2, Y_DIM_LEN/2};
/** Init error. */
#define ERR_INIT 12
-/** This will set up the MPE logging event numbers.
+/** This will set up the MPE logging event numbers.
*
* @param my_rank the rank of the processor running the code.
- * @param event_num array of MPE event numbers.
+ * @param event_num array of MPE event numbers.
*
* @return 0 for success, non-zero for failure.
*/
@@ -189,26 +189,26 @@ init_logging(int my_rank, int event_num[][NUM_EVENTS])
* communications. */
if (!my_rank)
{
- MPE_Describe_state(event_num[START][INIT], event_num[END][INIT],
- "init", "yellow");
- MPE_Describe_state(event_num[START][CREATE_PNETCDF], event_num[END][CREATE_PNETCDF],
- "create pnetcdf", "red");
- MPE_Describe_state(event_num[START][CREATE_CLASSIC], event_num[END][CREATE_CLASSIC],
- "create classic", "red");
- MPE_Describe_state(event_num[START][CREATE_SERIAL4], event_num[END][CREATE_SERIAL4],
- "create netcdf-4 serial", "red");
- MPE_Describe_state(event_num[START][CREATE_PARALLEL4], event_num[END][CREATE_PARALLEL4],
- "create netcdf-4 parallel", "red");
- MPE_Describe_state(event_num[START][CALCULATE], event_num[END][CALCULATE],
- "calculate", "orange");
- MPE_Describe_state(event_num[START][WRITE], event_num[END][WRITE],
- "write", "green");
- MPE_Describe_state(event_num[START][CLOSE], event_num[END][CLOSE],
- "close", "purple");
- MPE_Describe_state(event_num[START][FREE], event_num[END][FREE],
- "free", "blue");
- MPE_Describe_state(event_num[START][READ], event_num[END][READ],
- "read", "pink");
+ MPE_Describe_state(event_num[START][INIT], event_num[END][INIT],
+ "init", "yellow");
+ MPE_Describe_state(event_num[START][CREATE_PNETCDF], event_num[END][CREATE_PNETCDF],
+ "create pnetcdf", "red");
+ MPE_Describe_state(event_num[START][CREATE_CLASSIC], event_num[END][CREATE_CLASSIC],
+ "create classic", "red");
+ MPE_Describe_state(event_num[START][CREATE_SERIAL4], event_num[END][CREATE_SERIAL4],
+ "create netcdf-4 serial", "red");
+ MPE_Describe_state(event_num[START][CREATE_PARALLEL4], event_num[END][CREATE_PARALLEL4],
+ "create netcdf-4 parallel", "red");
+ MPE_Describe_state(event_num[START][CALCULATE], event_num[END][CALCULATE],
+ "calculate", "orange");
+ MPE_Describe_state(event_num[START][WRITE], event_num[END][WRITE],
+ "write", "green");
+ MPE_Describe_state(event_num[START][CLOSE], event_num[END][CLOSE],
+ "close", "purple");
+ MPE_Describe_state(event_num[START][FREE], event_num[END][FREE],
+ "free", "blue");
+ MPE_Describe_state(event_num[START][READ], event_num[END][READ],
+ "read", "pink");
}
#endif /* HAVE_MPE */
return 0;
@@ -216,14 +216,14 @@ init_logging(int my_rank, int event_num[][NUM_EVENTS])
/** Check the output file.
*
- * Use netCDF to check that the output is as expected.
+ * Use netCDF to check that the output is as expected.
*
- * @param ntasks The number of processors running the example.
- * @param filename The name of the example file to check.
+ * @param ntasks The number of processors running the example.
+ * @param filename The name of the example file to check.
*
* @return 0 if example file is correct, non-zero otherwise. */
int check_file(int ntasks, char *filename) {
-
+
int ncid; /**< File ID from netCDF. */
int ndims; /**< Number of dimensions. */
int nvars; /**< Number of variables. */
@@ -240,63 +240,63 @@ int check_file(int ntasks, char *filename) {
size_t count[NDIM]; /**< Number of elements to read. */
int buffer[X_DIM_LEN]; /**< Buffer to read in data. */
int expected[X_DIM_LEN]; /**< Data values we expect to find. */
-
+
/* Open the file. */
if ((ret = nc_open(filename, 0, &ncid)))
- return ret;
+ return ret;
/* Check the metadata. */
if ((ret = nc_inq(ncid, &ndims, &nvars, &ngatts, &unlimdimid)))
- return ret;
+ return ret;
if (ndims != NDIM || nvars != 1 || ngatts != 0 || unlimdimid != -1)
- return ERR_BAD;
+ return ERR_BAD;
for (int d = 0; d < ndims; d++)
{
- if ((ret = nc_inq_dim(ncid, d, my_dim_name, &dimlen)))
- return ret;
- if (dimlen != X_DIM_LEN || strcmp(my_dim_name, dim_name[d]))
- return ERR_BAD;
+ if ((ret = nc_inq_dim(ncid, d, my_dim_name, &dimlen)))
+ return ret;
+ if (dimlen != X_DIM_LEN || strcmp(my_dim_name, dim_name[d]))
+ return ERR_BAD;
}
if ((ret = nc_inq_var(ncid, 0, var_name, &xtype, &ndims, dimids, &natts)))
- return ret;
+ return ret;
if (xtype != NC_FLOAT || ndims != NDIM || dimids[0] != 0 || natts != 0)
- return ERR_BAD;
+ return ERR_BAD;
/* Use the number of processors to figure out what the data in the
* file should look like. */
int div = X_DIM_LEN * Y_DIM_LEN / ntasks;
for (int d = 0; d < X_DIM_LEN; d++)
- expected[d] = START_DATA_VAL + d/div;
-
+ expected[d] = START_DATA_VAL + d/div;
+
/* Check the data. */
start[0] = 0;
count[0] = X_DIM_LEN;
if ((ret = nc_get_vara(ncid, 0, start, count, buffer)))
- return ret;
+ return ret;
for (int d = 0; d < X_DIM_LEN; d++)
- if (buffer[d] != expected[d])
- return ERR_BAD;
+ if (buffer[d] != expected[d])
+ return ERR_BAD;
/* Close the file. */
if ((ret = nc_close(ncid)))
- return ret;
+ return ret;
/* Everything looks good! */
return 0;
}
-/** Calculate sample data. This function is deliberately slow in order to take up some time calculating.
+/** Calculate sample data. This function is deliberately slow in order to take up some time calculating.
* @param my_rank the rank of the processor running the code.
* @param timestep the timestep.
* @param datap pointer where we should write datum.
- *
+ *
* @return zero for success, non-zero otherwise.
*/
int calculate_value(int my_rank, int timestep, float *datap)
{
*datap = my_rank + timestep;
for (int i = 0; i < 50; i++)
- *datap += atan(cos(my_rank * timestep));
+ *datap += atan(cos(my_rank * timestep));
return 0;
}
@@ -333,7 +333,7 @@ int calculate_value(int my_rank, int timestep, float *datap)
foo = 42, 42, 42, 42, 43, 43, 43, 43, 44, 44, 44, 44, 45, 45, 45, 45 ;
}
-
+
@param [in] argc argument count (should be zero)
@param [in] argv argument array (should be NULL)
@retval examplePioClass* Pointer to self.
@@ -355,10 +355,10 @@ int main(int argc, char* argv[])
* classic format file (but with different libraries). The
* last two produce netCDF4/HDF5 format files, written with
* and without using netCDF-4 parallel I/O. */
- int format[NUM_NETCDF_FLAVORS] = {PIO_IOTYPE_PNETCDF,
- PIO_IOTYPE_NETCDF,
- PIO_IOTYPE_NETCDF4C,
- PIO_IOTYPE_NETCDF4P};
+ int format[NUM_NETCDF_FLAVORS] = {PIO_IOTYPE_PNETCDF,
+ PIO_IOTYPE_NETCDF,
+ PIO_IOTYPE_NETCDF4C,
+ PIO_IOTYPE_NETCDF4P};
/** Names for the output files. Two of them (pnetcdf and
* classic) will be in classic netCDF format, the others
@@ -366,10 +366,10 @@ int main(int argc, char* argv[])
* format. All four can be read by the netCDF library, and all
* will contain the same contents. */
char filename[NUM_NETCDF_FLAVORS][PIO_MAX_NAME + 1] = {"example2_pnetcdf.nc",
- "example2_classic.nc",
- "example2_serial4.nc",
- "example2_parallel4.nc"};
-
+ "example2_classic.nc",
+ "example2_serial4.nc",
+ "example2_parallel4.nc"};
+
/** Number of processors that will do IO. In this example we
* will do IO from all processors. */
int niotasks;
@@ -433,7 +433,7 @@ int main(int argc, char* argv[])
* elements_per_pe. */
PIO_Offset *compdof;
-#ifdef HAVE_MPE
+#ifdef HAVE_MPE
/** MPE event numbers used to track start and stop of
* different parts of the program for later display with
* Jumpshot. */
@@ -447,249 +447,249 @@ int main(int argc, char* argv[])
/* Parse command line. */
while ((c = getopt(argc, argv, "v")) != -1)
- switch (c)
- {
- case 'v':
- verbose++;
- break;
- default:
- break;
- }
-
-#ifdef TIMING
+ switch (c)
+ {
+ case 'v':
+ verbose++;
+ break;
+ default:
+ break;
+ }
+
+#ifdef TIMING
/* Initialize the GPTL timing library. */
if ((ret = GPTLinitialize ()))
- return ret;
-#endif
-
+ return ret;
+#endif
+
/* Initialize MPI. */
if ((ret = MPI_Init(&argc, &argv)))
- MPIERR(ret);
+ MPIERR(ret);
if ((ret = MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN)))
- MPIERR(ret);
+ MPIERR(ret);
/* Learn my rank and the total number of processors. */
if ((ret = MPI_Comm_rank(MPI_COMM_WORLD, &my_rank)))
- MPIERR(ret);
+ MPIERR(ret);
if ((ret = MPI_Comm_size(MPI_COMM_WORLD, &ntasks)))
- MPIERR(ret);
+ MPIERR(ret);
/* Check that a valid number of processors was specified. */
if (!(ntasks == 1 || ntasks == 2 || ntasks == 4 ||
- ntasks == 8 || ntasks == 16))
- fprintf(stderr, "Number of processors must be 1, 2, 4, 8, or 16!\n");
+ ntasks == 8 || ntasks == 16))
+ fprintf(stderr, "Number of processors must be 1, 2, 4, 8, or 16!\n");
if (verbose)
- printf("%d: ParallelIO Library example1 running on %d processors.\n",
- my_rank, ntasks);
+ printf("%d: ParallelIO Library example1 running on %d processors.\n",
+ my_rank, ntasks);
#ifdef HAVE_MPE
/* Initialize MPE logging. */
if ((ret = MPE_Init_log()))
- ERR(ret);
+ ERR(ret);
if (init_logging(my_rank, event_num))
- ERR(ERR_LOGGING);
+ ERR(ERR_LOGGING);
/* Log with MPE that we are starting INIT. */
if ((ret = MPE_Log_event(event_num[START][INIT], 0, "start init")))
- MPIERR(ret);
+ MPIERR(ret);
#endif /* HAVE_MPE */
- /* keep things simple - 1 iotask per MPI process */
- niotasks = ntasks;
+ /* keep things simple - 1 iotask per MPI process */
+ niotasks = ntasks;
/* Initialize the PIO IO system. This specifies how
* many and which processors are involved in I/O. */
if ((ret = PIOc_Init_Intracomm(MPI_COMM_WORLD, niotasks, ioproc_stride,
- ioproc_start, PIO_REARR_SUBSET, &iosysid)))
- ERR(ret);
+ ioproc_start, PIO_REARR_SUBSET, &iosysid)))
+ ERR(ret);
/* Describe the decomposition. This is a 1-based array, so add 1! */
elements_per_pe = X_DIM_LEN * Y_DIM_LEN / ntasks;
if (!(compdof = malloc(elements_per_pe * sizeof(PIO_Offset))))
- return PIO_ENOMEM;
+ return PIO_ENOMEM;
for (int i = 0; i < elements_per_pe; i++) {
- compdof[i] = my_rank * elements_per_pe + i + 1;
+ compdof[i] = my_rank * elements_per_pe + i + 1;
}
-
+
/* Create the PIO decomposition for this example. */
if (verbose)
- printf("rank: %d Creating decomposition...\n", my_rank);
+ printf("rank: %d Creating decomposition...\n", my_rank);
if ((ret = PIOc_InitDecomp(iosysid, PIO_FLOAT, 2, &dim_len[1], (PIO_Offset)elements_per_pe,
- compdof, &ioid, NULL, NULL, NULL)))
- ERR(ret);
+ compdof, &ioid, NULL, NULL, NULL)))
+ ERR(ret);
free(compdof);
#ifdef HAVE_MPE
/* Log with MPE that we are done with INIT. */
if ((ret = MPE_Log_event(event_num[END][INIT], 0, "end init")))
- MPIERR(ret);
+ MPIERR(ret);
#endif /* HAVE_MPE */
-
+
/* Use PIO to create the example file in each of the four
* available ways. */
- for (int fmt = 0; fmt < NUM_NETCDF_FLAVORS; fmt++)
+ for (int fmt = 0; fmt < NUM_NETCDF_FLAVORS; fmt++)
{
#ifdef HAVE_MPE
- /* Log with MPE that we are starting CREATE. */
- if ((ret = MPE_Log_event(event_num[START][CREATE_PNETCDF+fmt], 0, "start create")))
- MPIERR(ret);
+ /* Log with MPE that we are starting CREATE. */
+ if ((ret = MPE_Log_event(event_num[START][CREATE_PNETCDF+fmt], 0, "start create")))
+ MPIERR(ret);
#endif /* HAVE_MPE */
- /* Create the netCDF output file. */
- if (verbose)
- printf("rank: %d Creating sample file %s with format %d...\n",
- my_rank, filename[fmt], format[fmt]);
- if ((ret = PIOc_createfile(iosysid, &ncid, &(format[fmt]), filename[fmt],
- PIO_CLOBBER)))
- ERR(ret);
-
- /* Define netCDF dimensions and variable. */
- if (verbose)
- printf("rank: %d Defining netCDF metadata...\n", my_rank);
- for (int d = 0; d < NDIM; d++) {
- if (verbose)
- printf("rank: %d Defining netCDF dimension %s, length %d\n", my_rank,
- dim_name[d], dim_len[d]);
- if ((ret = PIOc_def_dim(ncid, dim_name[d], (PIO_Offset)dim_len[d], &dimids[d])))
- ERR(ret);
- }
- if ((ret = PIOc_def_var(ncid, VAR_NAME, PIO_FLOAT, NDIM, dimids, &varid)))
- ERR(ret);
- /* For netCDF-4 files, set the chunksize to improve performance. */
- if (format[fmt] == PIO_IOTYPE_NETCDF4C || format[fmt] == PIO_IOTYPE_NETCDF4P)
- if ((ret = PIOc_def_var_chunking(ncid, 0, NC_CHUNKED, chunksize)))
- ERR(ret);
-
- if ((ret = PIOc_enddef(ncid)))
- ERR(ret);
+ /* Create the netCDF output file. */
+ if (verbose)
+ printf("rank: %d Creating sample file %s with format %d...\n",
+ my_rank, filename[fmt], format[fmt]);
+ if ((ret = PIOc_createfile(iosysid, &ncid, &(format[fmt]), filename[fmt],
+ PIO_CLOBBER)))
+ ERR(ret);
+
+ /* Define netCDF dimensions and variable. */
+ if (verbose)
+ printf("rank: %d Defining netCDF metadata...\n", my_rank);
+ for (int d = 0; d < NDIM; d++) {
+ if (verbose)
+ printf("rank: %d Defining netCDF dimension %s, length %d\n", my_rank,
+ dim_name[d], dim_len[d]);
+ if ((ret = PIOc_def_dim(ncid, dim_name[d], (PIO_Offset)dim_len[d], &dimids[d])))
+ ERR(ret);
+ }
+ if ((ret = PIOc_def_var(ncid, VAR_NAME, PIO_FLOAT, NDIM, dimids, &varid)))
+ ERR(ret);
+ /* For netCDF-4 files, set the chunksize to improve performance. */
+ if (format[fmt] == PIO_IOTYPE_NETCDF4C || format[fmt] == PIO_IOTYPE_NETCDF4P)
+ if ((ret = PIOc_def_var_chunking(ncid, 0, NC_CHUNKED, chunksize)))
+ ERR(ret);
+
+ if ((ret = PIOc_enddef(ncid)))
+ ERR(ret);
#ifdef HAVE_MPE
- /* Log with MPE that we are done with CREATE. */
- if ((ret = MPE_Log_event(event_num[END][CREATE_PNETCDF + fmt], 0, "end create")))
- MPIERR(ret);
+ /* Log with MPE that we are done with CREATE. */
+ if ((ret = MPE_Log_event(event_num[END][CREATE_PNETCDF + fmt], 0, "end create")))
+ MPIERR(ret);
#endif /* HAVE_MPE */
- /* Allocate space for sample data. */
- if (!(buffer = malloc(elements_per_pe * sizeof(float))))
- return PIO_ENOMEM;
+ /* Allocate space for sample data. */
+ if (!(buffer = malloc(elements_per_pe * sizeof(float))))
+ return PIO_ENOMEM;
- /* Write data for each timestep. */
- for (int ts = 0; ts < NUM_TIMESTEPS; ts++) {
+ /* Write data for each timestep. */
+ for (int ts = 0; ts < NUM_TIMESTEPS; ts++) {
#ifdef HAVE_MPE
- /* Log with MPE that we are starting CALCULATE. */
- if ((ret = MPE_Log_event(event_num[START][CALCULATE], 0, "start calculate")))
- MPIERR(ret);
+ /* Log with MPE that we are starting CALCULATE. */
+ if ((ret = MPE_Log_event(event_num[START][CALCULATE], 0, "start calculate")))
+ MPIERR(ret);
#endif /* HAVE_MPE */
- /* Calculate sample data. Add some math function calls to make this slower. */
- for (int i = 0; i < elements_per_pe; i++)
- if ((ret = calculate_value(my_rank, ts, &buffer[i])))
- ERR(ret);
+ /* Calculate sample data. Add some math function calls to make this slower. */
+ for (int i = 0; i < elements_per_pe; i++)
+ if ((ret = calculate_value(my_rank, ts, &buffer[i])))
+ ERR(ret);
#ifdef HAVE_MPE
- /* Log with MPE that we are done with CALCULATE. */
- if ((ret = MPE_Log_event(event_num[END][CALCULATE], 0, "end calculate")))
- MPIERR(ret);
- /* Log with MPE that we are starting WRITE. */
- if ((ret = MPE_Log_event(event_num[START][WRITE], 0, "start write")))
- MPIERR(ret);
+ /* Log with MPE that we are done with CALCULATE. */
+ if ((ret = MPE_Log_event(event_num[END][CALCULATE], 0, "end calculate")))
+ MPIERR(ret);
+ /* Log with MPE that we are starting WRITE. */
+ if ((ret = MPE_Log_event(event_num[START][WRITE], 0, "start write")))
+ MPIERR(ret);
#endif /* HAVE_MPE */
-
- /* Write data to the file. */
- if (verbose)
- printf("rank: %d Writing sample data...\n", my_rank);
-
- if ((ret = PIOc_setframe(ncid, varid, ts)))
- ERR(ret);
- if ((ret = PIOc_write_darray(ncid, varid, ioid, (PIO_Offset)elements_per_pe,
- buffer, NULL)))
- ERR(ret);
- if ((ret = PIOc_sync(ncid)))
- ERR(ret);
+
+ /* Write data to the file. */
+ if (verbose)
+ printf("rank: %d Writing sample data...\n", my_rank);
+
+ if ((ret = PIOc_setframe(ncid, varid, ts)))
+ ERR(ret);
+ if ((ret = PIOc_write_darray(ncid, varid, ioid, (PIO_Offset)elements_per_pe,
+ buffer, NULL)))
+ ERR(ret);
+ if ((ret = PIOc_sync(ncid)))
+ ERR(ret);
#ifdef HAVE_MPE
- /* Log with MPE that we are done with WRITE. */
- if ((ret = MPE_Log_event(event_num[END][WRITE], 0, "end write")))
- MPIERR(ret);
+ /* Log with MPE that we are done with WRITE. */
+ if ((ret = MPE_Log_event(event_num[END][WRITE], 0, "end write")))
+ MPIERR(ret);
#endif /* HAVE_MPE */
- }
+ }
#ifdef HAVE_MPE
- /* Log with MPE that we are starting CLOSE. */
- if ((ret = MPE_Log_event(event_num[START][CLOSE], 0, "start close")))
- MPIERR(ret);
+ /* Log with MPE that we are starting CLOSE. */
+ if ((ret = MPE_Log_event(event_num[START][CLOSE], 0, "start close")))
+ MPIERR(ret);
#endif /* HAVE_MPE */
-
- /* Free buffer space used in this example. */
- free(buffer);
-
- /* Close the netCDF file. */
- if (verbose)
- printf("rank: %d Closing the sample data file...\n", my_rank);
- if ((ret = PIOc_closefile(ncid)))
- ERR(ret);
+
+ /* Free buffer space used in this example. */
+ free(buffer);
+
+ /* Close the netCDF file. */
+ if (verbose)
+ printf("rank: %d Closing the sample data file...\n", my_rank);
+ if ((ret = PIOc_closefile(ncid)))
+ ERR(ret);
#ifdef HAVE_MPE
- /* Log with MPE that we are done with CLOSE. */
- if ((ret = MPE_Log_event(event_num[END][CLOSE], 0, "end close")))
- MPIERR(ret);
+ /* Log with MPE that we are done with CLOSE. */
+ if ((ret = MPE_Log_event(event_num[END][CLOSE], 0, "end close")))
+ MPIERR(ret);
#endif /* HAVE_MPE */
- /* After each file is closed, make all processors wait so that
- * all start creating the next file at the same time. */
- if ((ret = MPI_Barrier(MPI_COMM_WORLD)))
- MPIERR(ret);
+ /* After each file is closed, make all processors wait so that
+ * all start creating the next file at the same time. */
+ if ((ret = MPI_Barrier(MPI_COMM_WORLD)))
+ MPIERR(ret);
}
-
+
#ifdef HAVE_MPE
/* Log with MPE that we are starting FREE. */
if ((ret = MPE_Log_event(event_num[START][FREE], 0, "start free")))
- MPIERR(ret);
+ MPIERR(ret);
#endif /* HAVE_MPE */
-
+
/* Free the PIO decomposition. */
if (verbose)
- printf("rank: %d Freeing PIO decomposition...\n", my_rank);
+ printf("rank: %d Freeing PIO decomposition...\n", my_rank);
if ((ret = PIOc_freedecomp(iosysid, ioid)))
- ERR(ret);
-
+ ERR(ret);
+
/* Finalize the IO system. */
if (verbose)
- printf("rank: %d Freeing PIO resources...\n", my_rank);
+ printf("rank: %d Freeing PIO resources...\n", my_rank);
if ((ret = PIOc_finalize(iosysid)))
- ERR(ret);
+ ERR(ret);
#ifdef HAVE_MPE
/* Log with MPE that we are done with FREE. */
if ((ret = MPE_Log_event(event_num[END][FREE], 0, "end free")))
- MPIERR(ret);
+ MPIERR(ret);
/* Log with MPE that we are starting READ. */
if ((ret = MPE_Log_event(event_num[START][READ], 0, "start read")))
- MPIERR(ret);
+ MPIERR(ret);
#endif /* HAVE_MPE */
-
+
/* Check the output file. */
/* if (!my_rank) */
/* for (int fmt = 0; fmt < NUM_NETCDF_FLAVORS; fmt++) */
- /* if ((ret = check_file(ntasks, filename[fmt]))) */
- /* ERR(ret); */
+ /* if ((ret = check_file(ntasks, filename[fmt]))) */
+ /* ERR(ret); */
#ifdef HAVE_MPE
/* Log with MPE that we are done with READ. */
if ((ret = MPE_Log_event(event_num[END][READ], 0, "end read")))
- MPIERR(ret);
+ MPIERR(ret);
#endif /* HAVE_MPE */
/* Finalize the MPI library. */
MPI_Finalize();
-#ifdef TIMING
+#ifdef TIMING
/* Finalize the GPTL timing library. */
if ((ret = GPTLfinalize ()))
- return ret;
-#endif
+ return ret;
+#endif
if (verbose)
- printf("rank: %d SUCCESS!\n", my_rank);
+ printf("rank: %d SUCCESS!\n", my_rank);
return 0;
}
diff --git a/examples/f03/CMakeLists.txt b/examples/f03/CMakeLists.txt
index 1073fda1a94..922b50cfb76 100644
--- a/examples/f03/CMakeLists.txt
+++ b/examples/f03/CMakeLists.txt
@@ -18,3 +18,6 @@ endif()
SET(SRC examplePio.F90)
ADD_EXECUTABLE(examplePio_f90 ${SRC})
TARGET_LINK_LIBRARIES(examplePio_f90 piof pioc ${TIMING_LINK_LIB})
+SET(SRC exampleAsyncPio.F90)
+ADD_EXECUTABLE(exampleAsyncPio_f90 ${SRC})
+TARGET_LINK_LIBRARIES(exampleAsyncPio_f90 piof pioc ${TIMING_LINK_LIB})
diff --git a/examples/f03/exampleAsyncPio.F90 b/examples/f03/exampleAsyncPio.F90
new file mode 100644
index 00000000000..180e3cd794c
--- /dev/null
+++ b/examples/f03/exampleAsyncPio.F90
@@ -0,0 +1,413 @@
+#include "config.h"
+!> @file
+!! A simple Fortran example for Async use of the ParallelIO Library.
+module pioAsyncExample
+
+ use pio, only : PIO_init, PIO_rearr_box, iosystem_desc_t, file_desc_t
+ use pio, only : PIO_finalize, PIO_noerr, PIO_iotype_netcdf, PIO_createfile
+ use pio, only : PIO_int,var_desc_t, PIO_redef, PIO_def_dim, PIO_def_var, PIO_enddef
+ use pio, only : PIO_closefile, io_desc_t, PIO_initdecomp, PIO_write_darray
+ use pio, only : PIO_freedecomp, PIO_clobber, PIO_read_darray, PIO_syncfile, PIO_OFFSET_KIND
+ use pio, only : PIO_nowrite, PIO_openfile, pio_set_log_level
+ use mpi
+ implicit none
+
+ private
+
+ !> @brief Length of the data array we are using. This is then
+ !! divided among MPI processes.
+ integer, parameter :: LEN = 16
+
+ !> @brief Value used for array that will be written to netcdf file.
+ integer, parameter :: VAL = 42
+
+ !> @brief Error code if anything goes wrong.
+ integer, parameter :: ERR_CODE = 99
+
+ !> @brief A class to hold example code and data.
+ !! This class contains the data and functions to execute the
+ !! example.
+ type, public :: pioExampleClass
+
+ !> @brief Compute task comm
+ integer, allocatable :: comm(:)
+
+ !> @brief true if this is an iotask
+ logical :: iotask
+
+ !> @brief Rank of processor running the code.
+ integer :: myRank
+
+ !> @brief Number of processors participating in MPI communicator.
+ integer :: ntasks
+
+ !> @brief Number of processors performing I/O.
+ integer :: niotasks
+
+ !> @brief Stride in the mpi rank between io tasks.
+ integer :: stride
+
+ !> @brief Start index of I/O processors.
+ integer :: optBase
+
+ !> @brief The ParallelIO system set up by @ref PIO_init.
+ type(iosystem_desc_t), allocatable :: pioIoSystem(:)
+
+ !> @brief Contains data identifying the file.
+ type(file_desc_t) :: pioFileDesc
+
+ !> @brief The netCDF variable ID.
+ type(var_desc_t) :: pioVar
+
+ !> @brief An io descriptor handle that is generated in @ref PIO_initdecomp.
+ type(io_desc_t) :: iodescNCells
+
+ !> @brief Specifies the flavor of netCDF output.
+ integer :: iotype
+
+ !> @brief The netCDF dimension ID.
+ integer :: pioDimId
+
+ !> @brief 1-based index of start of this processors data in full data array.
+ integer :: ista
+
+ !> @brief Size of data array for this processor.
+ integer :: isto
+
+ !> @brief Number of elements handled by each processor.
+ integer :: arrIdxPerPe
+
+ !> @brief The length of the dimension of the netCDF variable.
+ integer, dimension(1) :: dimLen
+
+ !> @brief Buffer to hold sample data that is written to netCDF file.
+ integer, allocatable :: dataBuffer(:)
+
+ !> @brief Buffer to read data into.
+ integer, allocatable :: readBuffer(:)
+
+ !> @brief Array describing the decomposition of the data.
+ integer, allocatable :: compdof(:)
+
+ !> @brief Name of the sample netCDF file written by this example.
+ character(len=255) :: fileName
+
+ contains
+
+ !> @brief Initialize MPI, ParallelIO, and example data.
+ !! Initialize the MPI and ParallelIO libraries. Also allocate
+ !! memory to write and read the sample data to the netCDF file.
+ procedure, public :: init
+
+ !> @brief Create the decomposition for the example.
+ !! This subroutine creates the decomposition for the example.
+ procedure, public :: createDecomp
+
+ !> @brief Create netCDF output file.
+ !! This subroutine creates the netCDF output file for the example.
+ procedure, public :: createFile
+
+ !> @brief Define the netCDF metadata.
+ !! This subroutine defines the netCDF dimension and variable used
+ !! in the output file.
+ procedure, public :: defineVar
+
+ !> @brief Write the sample data to the output file.
+ !! This subroutine writes the sample data array to the netCDF
+ !! output file.
+ procedure, public :: writeVar
+
+ !> @brief Read the sample data from the output file.
+ !! This subroutine reads the sample data array from the netCDF
+ !! output file.
+ procedure, public :: readVar
+
+ !> @brief Close the netCDF output file.
+ !! This subroutine closes the output file used by this example.
+ procedure, public :: closeFile
+
+ !> @brief Clean up resources.
+ !! This subroutine cleans up resources used in the example. The
+ !! ParallelIO and MPI libraries are finalized, and memory
+ !! allocated in this example program is freed.
+ procedure, public :: cleanUp
+
+ !> @brief Handle errors.
+ !! This subroutine is called if there is an error.
+ procedure, private :: errorHandle
+
+ end type pioExampleClass
+
+contains
+
+ !> @brief Initialize MPI, ParallelIO, and example data.
+ !! Initialize the MPI and ParallelIO libraries. Also allocate
+ !! memory to write and read the sample data to the netCDF file.
+ subroutine init(this)
+
+ implicit none
+
+ class(pioExampleClass), intent(inout) :: this
+ integer :: io_comm
+ integer :: ierr,i
+ integer :: procs_per_component(1), io_proc_list(1)
+ integer, allocatable :: comp_proc_list(:,:)
+
+ !
+ ! initialize MPI
+ !
+
+ call MPI_Init(ierr)
+ call MPI_Comm_rank(MPI_COMM_WORLD, this%myRank, ierr)
+ call MPI_Comm_size(MPI_COMM_WORLD, this%ntasks , ierr)
+
+ if(this%ntasks < 2) then
+ print *,"ERROR: not enough tasks specified for example code"
+ call mpi_abort(mpi_comm_world, -1 ,ierr)
+ endif
+
+ !
+ ! set up PIO for rest of example
+ !
+
+ this%stride = 1
+ this%optBase = 0
+ this%iotype = PIO_iotype_netcdf
+ this%fileName = "examplePio_f90.nc"
+ this%dimLen(1) = LEN
+
+ this%niotasks = 1 ! keep things simple - 1 iotask
+
+! io_proc_list(1) = 0
+ io_proc_list(1) = this%ntasks-1
+ this%ntasks = this%ntasks - this%niotasks
+
+ procs_per_component(1) = this%ntasks
+ allocate(comp_proc_list(this%ntasks,1))
+ do i=1,this%ntasks
+ comp_proc_list(i,1) = i - 1
+! comp_proc_list(i,1) = i
+ enddo
+
+ allocate(this%pioIOSystem(1), this%comm(1))
+
+ call PIO_init(this%pioIOSystem, & ! iosystem
+ MPI_COMM_WORLD, & ! MPI communicator
+ procs_per_component, & ! number of tasks per component model
+ comp_proc_list, & ! list of procs per component
+ io_proc_list, & ! list of io procs
+ PIO_REARR_BOX, & ! rearranger to use (currently only BOX is supported)
+ this%comm, & ! comp_comm to be returned
+ io_comm) ! io_comm to be returned
+ if (io_comm /= MPI_COMM_NULL) then
+ this%iotask = .true.
+ return
+ endif
+ this%iotask = .false.
+ call MPI_Comm_rank(this%comm(1), this%myRank, ierr)
+ call MPI_Comm_size(this%comm(1), this%ntasks , ierr)
+
+ !
+ ! set up some data that we will write to a netcdf file
+ !
+
+ this%arrIdxPerPe = LEN / this%ntasks
+
+ if (this%arrIdxPerPe < 1) then
+ call this%errorHandle("Not enough work to distribute among pes", ERR_CODE)
+ endif
+
+ this%ista = this%myRank * this%arrIdxPerPe + 1
+ this%isto = this%ista + (this%arrIdxPerPe - 1)
+
+ allocate(this%compdof(this%ista:this%isto))
+ allocate(this%dataBuffer(this%ista:this%isto))
+ allocate(this%readBuffer(this%ista:this%isto))
+
+ this%compdof(this%ista:this%isto) = (/(i, i=this%ista,this%isto, 1)/)
+ this%dataBuffer(this%ista:this%isto) = this%myRank + VAL
+ this%readBuffer(this%ista:this%isto) = 0
+
+ end subroutine init
+
+ subroutine createDecomp(this)
+
+ implicit none
+
+ class(pioExampleClass), intent(inout) :: this
+ integer :: ierr
+
+ call PIO_initdecomp(this%pioIoSystem(1), PIO_int, this%dimLen, this%compdof(this%ista:this%isto), &
+ this%iodescNCells)
+
+ end subroutine createDecomp
+
+ subroutine createFile(this)
+
+ implicit none
+
+ class(pioExampleClass), intent(inout) :: this
+
+ integer :: retVal
+
+ retVal = PIO_createfile(this%pioIoSystem(1), this%pioFileDesc, this%iotype, trim(this%fileName), PIO_clobber)
+
+ call this%errorHandle("Could not create "//trim(this%fileName), retVal)
+
+ end subroutine createFile
+
+ subroutine defineVar(this)
+
+ implicit none
+
+ class(pioExampleClass), intent(inout) :: this
+
+ integer :: retVal
+
+ retVal = PIO_def_dim(this%pioFileDesc, 'x', this%dimLen(1) , this%pioDimId)
+ call this%errorHandle("Could not define dimension x", retVal)
+
+ retVal = PIO_def_var(this%pioFileDesc, 'foo', PIO_int, (/this%pioDimId/), this%pioVar)
+ call this%errorHandle("Could not define variable foo", retVal)
+
+ retVal = PIO_enddef(this%pioFileDesc)
+ call this%errorHandle("Could not end define mode", retVal)
+
+ end subroutine defineVar
+
+ subroutine writeVar(this)
+
+ implicit none
+
+ class(pioExampleClass), intent(inout) :: this
+
+ integer :: retVal
+
+ call PIO_write_darray(this%pioFileDesc, this%pioVar, this%iodescNCells, this%dataBuffer(this%ista:this%isto), retVal)
+ call this%errorHandle("Could not write foo", retVal)
+ call PIO_syncfile(this%pioFileDesc)
+
+ end subroutine writeVar
+
+ subroutine readVar(this)
+
+ implicit none
+
+ class(pioExampleClass), intent(inout) :: this
+
+ integer :: retVal
+
+ call PIO_read_darray(this%pioFileDesc, this%pioVar, this%iodescNCells, this%readBuffer, retVal)
+ call this%errorHandle("Could not read foo", retVal)
+
+ end subroutine readVar
+
+ subroutine closeFile(this)
+
+ implicit none
+
+ class(pioExampleClass), intent(inout) :: this
+
+ call PIO_closefile(this%pioFileDesc)
+
+ end subroutine closeFile
+
+ subroutine cleanUp(this)
+
+ implicit none
+
+ class(pioExampleClass), intent(inout) :: this
+
+ integer :: ierr
+
+ deallocate(this%compdof)
+ deallocate(this%dataBuffer)
+ deallocate(this%readBuffer)
+
+ call PIO_freedecomp(this%pioIoSystem(1), this%iodescNCells)
+ call PIO_finalize(this%pioIoSystem(1), ierr)
+
+ end subroutine cleanUp
+
+ subroutine errorHandle(this, errMsg, retVal)
+
+ implicit none
+
+ class(pioExampleClass), intent(inout) :: this
+ character(len=*), intent(in) :: errMsg
+ integer, intent(in) :: retVal
+ integer :: lretval
+ if (retVal .ne. PIO_NOERR) then
+ write(*,*) retVal,errMsg
+ call PIO_closefile(this%pioFileDesc)
+ call mpi_abort(this%comm(1),retVal, lretval)
+ end if
+
+ end subroutine errorHandle
+
+end module pioAsyncExample
+
+!> @brief Main execution of example code.
+!! This is an example program for the ParallelIO library.
+!!
+!! This program creates a netCDF output file with the ParallelIO
+!! library, then writes and reads some data to and from the file.
+!!
+!! This example does the following:
+!!
+!! - initialization initializes the MPI library, initializes the
+!! ParallelIO library with @ref PIO_init. Then allocate memory for a
+!! data array of sample data to write, and an array to read the data
+!! back into. Also allocate an array to hold decomposition
+!! information.
+!!
+!! - creation of decomposition by calling @ref PIO_initdecomp.
+!!
+!! - creation of netCDF file with @ref PIO_createfile.
+!!
+!! - define netCDF metadata with @ref PIO_def_dim and @ref
+!! PIO_def_var. Then end define mode with @ref PIO_enddef.
+!!
+!! - write the sample data with @ref PIO_write_darray. Then sync the
+!! file with @ref PIO_syncfile.
+!!
+!! - read the sample data with @ref PIO_read_darray.
+!!
+!! - close the netCDF file with @ref PIO_closefile.
+!!
+!! - clean up local memory, ParallelIO library resources with @ref
+!! PIO_freedecomp and @ref PIO_finalize, and MPI library resources.
+!!
+program main
+
+ use pioAsyncExample, only : pioExampleClass
+ use pio, only : pio_set_log_level
+#ifdef TIMING
+ use perf_mod, only : t_initf, t_finalizef, t_prf
+#endif
+
+ implicit none
+
+ type(pioExampleClass) :: pioExInst
+ integer :: ierr
+#ifdef TIMING
+ call t_initf('timing.nl')
+#endif
+ call pioExInst%init()
+ if (.not. pioExInst%iotask) then
+ call pioExInst%createDecomp()
+ call pioExInst%createFile()
+ call pioExInst%defineVar()
+ call pioExInst%writeVar()
+ call pioExInst%readVar()
+ call pioExInst%closeFile()
+ call pioExInst%cleanUp()
+ endif
+#ifdef TIMING
+ call t_prf()
+ call t_finalizef()
+#endif
+ call MPI_Finalize(ierr)
+
+
+end program main
diff --git a/examples/f03/examplePio.F90 b/examples/f03/examplePio.F90
index 88804b7acab..e2110bd506a 100644
--- a/examples/f03/examplePio.F90
+++ b/examples/f03/examplePio.F90
@@ -9,19 +9,17 @@ module pioExample
use pio, only : PIO_closefile, io_desc_t, PIO_initdecomp, PIO_write_darray
use pio, only : PIO_freedecomp, PIO_clobber, PIO_read_darray, PIO_syncfile, PIO_OFFSET_KIND
use pio, only : PIO_nowrite, PIO_openfile
-
+ use mpi
implicit none
private
- include 'mpif.h'
-
!> @brief Length of the data array we are using. This is then
!! divided among MPI processes.
integer, parameter :: LEN = 16
!> @brief Value used for array that will be written to netcdf file.
- integer, parameter :: VAL = 42
+ integer, parameter :: VAL = 42
!> @brief Error code if anything goes wrong.
integer, parameter :: ERR_CODE = 99
@@ -41,7 +39,7 @@ module pioExample
integer :: niotasks
!> @brief Stride in the mpi rank between io tasks.
- integer :: stride
+ integer :: stride
!> @brief Number of aggregator.
integer :: numAggregator
@@ -182,7 +180,7 @@ subroutine init(this)
this%pioIoSystem, & ! iosystem
base=this%optBase) ! base (optional argument)
- !
+ !
! set up some data that we will write to a netcdf file
!
@@ -316,11 +314,11 @@ subroutine errorHandle(this, errMsg, retVal)
class(pioExampleClass), intent(inout) :: this
character(len=*), intent(in) :: errMsg
integer, intent(in) :: retVal
-
+ integer :: lretval
if (retVal .ne. PIO_NOERR) then
write(*,*) retVal,errMsg
call PIO_closefile(this%pioFileDesc)
- call mpi_abort(MPI_COMM_WORLD,0,retVal)
+ call mpi_abort(MPI_COMM_WORLD,retVal, lretval)
end if
end subroutine errorHandle
@@ -354,18 +352,23 @@ end module pioExample
!! - read the sample data with @ref PIO_read_darray.
!!
!! - close the netCDF file with @ref PIO_closefile.
-!!
+!!
!! - clean up local memory, ParallelIO library resources with @ref
!! PIO_freedecomp and @ref PIO_finalize, and MPI library resources.
!!
program main
use pioExample, only : pioExampleClass
+#ifdef TIMING
+ use perf_mod, only : t_initf, t_finalizef, t_prf
+#endif
implicit none
type(pioExampleClass) :: pioExInst
-
+#ifdef TIMING
+ call t_initf('timing.nl')
+#endif
call pioExInst%init()
call pioExInst%createDecomp()
call pioExInst%createFile()
@@ -374,5 +377,9 @@ program main
call pioExInst%readVar()
call pioExInst%closeFile()
call pioExInst%cleanUp()
+#ifdef TIMING
+ call t_prf()
+ call t_finalizef()
+#endif
end program main
diff --git a/src/clib/pio.h b/src/clib/pio.h
index dfdbc26bd50..a161593b84d 100644
--- a/src/clib/pio.h
+++ b/src/clib/pio.h
@@ -451,12 +451,11 @@ typedef struct iosystem_desc_t
int compmaster;
/** Rank of IO root task (which is rank 0 in io_comm) in the union
- * communicator. Will always be 0 for async situations. */
+ * communicator. */
int ioroot;
/** Rank of computation root task (which is rank 0 in
- * comm_comms[cmp]) in the union communicator. Will always = number
- * of IO tasks in async situations. */
+ * comm_comms[cmp]) in the union communicator. */
int comproot;
/** An array of the ranks of all IO tasks within the union
@@ -767,6 +766,7 @@ enum PIO_ERROR_HANDLERS
#define PIO_FIRST_ERROR_CODE (-500) /**< The first error code for PIO. */
#define PIO_EBADIOTYPE (-500) /**< Bad IOTYPE error. */
#define PIO_EVARDIMMISMATCH (-501) /**< Variable dimensions do not match in a multivar call. */
+#define PIO_EBADREARR (-502) /**< Rearranger error in async mode. */
#define PIO_REQ_NULL (NC_REQ_NULL-1) /**< Request null. */
#if defined(__cplusplus)
@@ -775,6 +775,7 @@ extern "C" {
/* Error handling. */
int PIOc_strerror(int pioerr, char *errstr);
int PIOc_set_log_level(int level);
+ int PIOc_set_global_log_level(int iosysid, int level);
/* Decomposition. */
@@ -818,6 +819,10 @@ extern "C" {
int *num_procs_per_comp, int **proc_list, MPI_Comm *io_comm, MPI_Comm *comp_comm,
int rearranger, int *iosysidp);
+ /* Initializing IO system for async - alternative interface. */
+ int PIOc_init_async_from_comms(MPI_Comm world, int component_count, MPI_Comm *comp_comm,
+ MPI_Comm io_comm, int rearranger, int *iosysidp);
+
/* How many IO tasks in this iosysid? */
int PIOc_get_numiotasks(int iosysid, int *numiotasks);
diff --git a/src/clib/pio_darray.c b/src/clib/pio_darray.c
index 4ecabb2fcce..ddfdd20445b 100644
--- a/src/clib/pio_darray.c
+++ b/src/clib/pio_darray.c
@@ -868,8 +868,6 @@ PIOc_read_darray(int ncid, int varid, int ioid, PIO_Offset arraylen,
size_t rlen = 0; /* the length of data in iobuf. */
void *tmparray; /* unsorted copy of array buf if required */
int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI function calls. */
- void *fillvalue = NULL;
- int pio_type;
int ierr; /* Return code. */
#ifdef USE_MPE
@@ -922,7 +920,7 @@ PIOc_read_darray(int ncid, int varid, int ioid, PIO_Offset arraylen,
pioassert(iodesc->rearranger == PIO_REARR_BOX || iodesc->rearranger == PIO_REARR_SUBSET,
"unknown rearranger", __FILE__, __LINE__);
- /* ??? */
+ /* iomaster needs max of buflen, others need local len */
if (ios->iomaster == MPI_ROOT)
rlen = iodesc->maxiobuflen;
else
@@ -953,6 +951,7 @@ PIOc_read_darray(int ncid, int varid, int ioid, PIO_Offset arraylen,
/* If the map is not monotonically increasing we will need to sort
* it. */
PLOG((3, "iodesc->needssort %d", iodesc->needssort));
+
if (iodesc->needssort)
{
if (!(tmparray = malloc(iodesc->piotype_size * iodesc->maplen)))
@@ -965,23 +964,23 @@ PIOc_read_darray(int ncid, int varid, int ioid, PIO_Offset arraylen,
/* switch(iodesc->piotype)
{
case PIO_SHORT:
- for(int i=0; imaplen; i++)
- ((short *) array)[i] = (short) 0;
- break;
+ for(int i=0; imaplen; i++)
+ ((short *) array)[i] = (short) 0;
+ break;
case PIO_INT:
- for(int i=0; imaplen; i++)
- ((int *) array)[i] = (int) 0;
- break;
+ for(int i=0; imaplen; i++)
+ ((int *) array)[i] = (int) 0;
+ break;
case PIO_FLOAT:
- for(int i=0; imaplen; i++)
- ((float *) array)[i] = (float) 0;
- break;
+ for(int i=0; imaplen; i++)
+ ((float *) array)[i] = (float) 0;
+ break;
case PIO_DOUBLE:
- for(int i=0; imaplen; i++)
- ((double *) array)[i] = (double) 0;
- break;
+ for(int i=0; imaplen; i++)
+ ((double *) array)[i] = (double) 0;
+ break;
default:
- return PIO_EBADTYPE;
+ return PIO_EBADTYPE;
}
*/
@@ -990,7 +989,7 @@ PIOc_read_darray(int ncid, int varid, int ioid, PIO_Offset arraylen,
return pio_err(ios, file, ierr, __FILE__, __LINE__);
/* If we need to sort the map, do it. */
- if (iodesc->needssort)
+ if (iodesc->needssort && ios->compproc)
{
pio_sorted_copy(tmparray, array, iodesc, 1, 1);
free(tmparray);
diff --git a/src/clib/pio_darray_int.c b/src/clib/pio_darray_int.c
index a7f4899ac88..30d8a442326 100644
--- a/src/clib/pio_darray_int.c
+++ b/src/clib/pio_darray_int.c
@@ -371,7 +371,9 @@ write_darray_multi_par(file_desc_t *file, int nvars, int fndims, const int *vari
{
iosystem_desc_t *ios; /* Pointer to io system information. */
var_desc_t *vdesc; /* Pointer to var info struct. */
+#ifdef _PNETCDF
int dsize; /* Data size (for one region). */
+#endif
int ierr = PIO_NOERR;
#if USE_VARD_WRITE
PIO_Offset gdim0; /* global size of first dimension if no unlimited dimension and ndimsmaxregions];
PIO_Offset *countlist[iodesc->maxregions];
-
+#endif
+
/* buffer is incremented by byte and loffset is in terms of
the iodessc->mpitype so we need to multiply by the size of
the mpitype. */
@@ -1194,7 +1198,6 @@ pio_read_darray_nc(file_desc_t *file, io_desc_t *iodesc, int vid, void *iobuf)
/* For each regions, read the data. */
for (int regioncnt = 0; regioncnt < iodesc->maxregions; regioncnt++)
{
- tmp_bufsize = 1;
if (region == NULL || iodesc->llen == 0)
{
/* No data for this region. */
@@ -1699,12 +1702,12 @@ pio_read_darray_nc_serial(file_desc_t *file, io_desc_t *iodesc, int vid,
int
flush_output_buffer(file_desc_t *file, bool force, PIO_Offset addsize)
{
- int mpierr; /* Return code from MPI functions. */
int ierr = PIO_NOERR;
#ifdef _PNETCDF
var_desc_t *vdesc;
PIO_Offset usage = 0;
+ int mpierr; /* Return code from MPI functions. */
/* Check inputs. */
pioassert(file, "invalid input", __FILE__, __LINE__);
diff --git a/src/clib/pio_error.h b/src/clib/pio_error.h
index 78e9c26ffbe..9c680ee7e61 100644
--- a/src/clib/pio_error.h
+++ b/src/clib/pio_error.h
@@ -14,11 +14,12 @@
#include
/**
- * Handle non-MPI errors by printing error message and goto exit. This
- * is used in test code.
+ * Handle non-MPI errors by printing error message, setting error
+ * code, and goto exit. This is used in test code.
*/
-#define PBAIL(e) do { \
+#define PBAIL(e) do { \
fprintf(stderr, "%d Error %d in %s, line %d\n", my_rank, e, __FILE__, __LINE__); \
+ ret = e; \
goto exit; \
} while (0)
@@ -26,9 +27,9 @@
* Handle non-MPI errors by calling pio_err(), setting return code,
* and goto exit. This is used in library code.
*/
-#define EXIT(ios, e) do { \
- ret = pio_err(NULL, NULL, e, __FILE__, __LINE__); \
- goto exit; \
+#define EXIT(ios, e) do { \
+ ret = pio_err(NULL, NULL, e, __FILE__, __LINE__); \
+ goto exit; \
} while (0)
/**
@@ -46,6 +47,28 @@
return e; \
} while (0)
+/**
+ * For async tests, handle non-MPI errors by finalizing the IOsystem
+ * and exiting with an exit code. This macro works for tests with one
+ * iosystemid.
+ */
+#define AERR(e) do { \
+ fprintf(stderr, "%d Async Error %d in %s, line %d\n", my_rank, e, __FILE__, __LINE__); \
+ PIOc_free_iosystem(iosysid); \
+ return e; \
+ } while (0)
+
+/**
+ * For async tests, handle non-MPI errors by finalizing the IOsystem
+ * and exiting with an exit code. This macro works for tests with more
+ * than one iosystemid.
+ */
+#define AERR2(e, i) do { \
+ fprintf(stderr, "%d Async Error %d in iosysid %d, %s, line %d\n", my_rank, e, i, __FILE__, __LINE__); \
+ PIOc_free_iosystem(i); \
+ return e; \
+ } while (0)
+
/**
* Handle MPI errors. This should only be used with MPI library
* function calls. Print error message, finalize MPI and return error
diff --git a/src/clib/pio_getput_int.c b/src/clib/pio_getput_int.c
index ae294d7a918..ff9212435f4 100644
--- a/src/clib/pio_getput_int.c
+++ b/src/clib/pio_getput_int.c
@@ -266,7 +266,12 @@ PIOc_get_att_tc(int ncid, int varid, const char *name, nc_type memtype, void *ip
{
/* Get the type and length of the attribute. */
if ((ierr = PIOc_inq_att(ncid, varid, name, &atttype, &attlen)))
- return check_netcdf(file, ierr, __FILE__, __LINE__);
+ {
+ if (ios->async)
+ return ierr;
+ else
+ return check_netcdf(file, ierr, __FILE__, __LINE__);
+ }
PLOG((2, "atttype = %d attlen = %d", atttype, attlen));
/* Get the length (in bytes) of the type of the attribute. */
diff --git a/src/clib/pio_internal.h b/src/clib/pio_internal.h
index 800bfb136f5..0124285e5c4 100644
--- a/src/clib/pio_internal.h
+++ b/src/clib/pio_internal.h
@@ -627,6 +627,7 @@ enum PIO_MSG
PIO_MSG_ADVANCEFRAME,
PIO_MSG_READDARRAY,
PIO_MSG_SETERRORHANDLING,
+ PIO_MSG_SETLOGLEVEL,
PIO_MSG_FREEDECOMP,
PIO_MSG_CLOSE_FILE,
PIO_MSG_DELETE_FILE,
diff --git a/src/clib/pio_msg.c b/src/clib/pio_msg.c
index 4c50e63b24f..f49384d2978 100644
--- a/src/clib/pio_msg.c
+++ b/src/clib/pio_msg.c
@@ -474,6 +474,7 @@ int inq_att_handler(iosystem_desc_t *ios)
int varid;
char name[PIO_MAX_NAME + 1];
int namelen;
+ int eh;
nc_type xtype, *xtypep = NULL;
PIO_Offset len, *lenp = NULL;
char xtype_present, len_present;
@@ -497,6 +498,8 @@ int inq_att_handler(iosystem_desc_t *ios)
return check_mpi(ios, NULL, mpierr, __FILE__, __LINE__);
if ((mpierr = MPI_Bcast(&len_present, 1, MPI_CHAR, 0, ios->intercomm)))
return check_mpi(ios, NULL, mpierr, __FILE__, __LINE__);
+ if ((mpierr = MPI_Bcast(&eh, 1, MPI_INT, 0, ios->intercomm)))
+ return check_mpi(ios, NULL, mpierr, __FILE__, __LINE__);
/* Match NULLs in collective function call. */
if (xtype_present)
@@ -505,7 +508,7 @@ int inq_att_handler(iosystem_desc_t *ios)
lenp = &len;
/* Call the function to learn about the attribute. */
- PIOc_inq_att(ncid, varid, name, xtypep, lenp);
+ PIOc_inq_att_eh(ncid, varid, name, eh, xtypep, lenp);
return PIO_NOERR;
}
@@ -2104,10 +2107,13 @@ int initdecomp_dof_handler(iosystem_desc_t *ios)
* task is broadcasting. */
if ((mpierr = MPI_Bcast(&iosysid, 1, MPI_INT, 0, ios->intercomm)))
return check_mpi(ios, NULL, mpierr, __FILE__, __LINE__);
+ PLOG((3, "initdecomp_dof_handler iosysid %d",iosysid));
if ((mpierr = MPI_Bcast(&pio_type, 1, MPI_INT, 0, ios->intercomm)))
return check_mpi(ios, NULL, mpierr, __FILE__, __LINE__);
+ PLOG((3, "initdecomp_dof_handler pio_type %d", pio_type));
if ((mpierr = MPI_Bcast(&ndims, 1, MPI_INT, 0, ios->intercomm)))
return check_mpi(ios, NULL, mpierr, __FILE__, __LINE__);
+ PLOG((3, "initdecomp_dof_handler ndims %d", ndims));
/* Now we know the size of these arrays. */
int dims[ndims];
@@ -2116,8 +2122,11 @@ int initdecomp_dof_handler(iosystem_desc_t *ios)
if ((mpierr = MPI_Bcast(dims, ndims, MPI_INT, 0, ios->intercomm)))
return check_mpi(ios, NULL, mpierr, __FILE__, __LINE__);
+ for(int i=0; iintercomm)))
return check_mpi(ios, NULL, mpierr, __FILE__, __LINE__);
+ PLOG((3, "initdecomp_dof_handler maplen %d", maplen));
PIO_Offset *compmap;
if (!(compmap = malloc(maplen * sizeof(PIO_Offset))))
@@ -2294,7 +2303,7 @@ int read_darray_handler(iosystem_desc_t *ios)
int ncid;
int varid;
int ioid;
- int arraylen;
+ PIO_Offset arraylen;
void *data = NULL;
int mpierr;
@@ -2317,6 +2326,7 @@ int read_darray_handler(iosystem_desc_t *ios)
PIOc_read_darray(ncid, varid, ioid, arraylen, data);
PLOG((1, "read_darray_handler succeeded!"));
+
return PIO_NOERR;
}
@@ -2573,6 +2583,34 @@ int finalize_handler(iosystem_desc_t *ios, int index)
return PIO_NOERR;
}
+/**
+ * Set the log level.
+ *
+ * @param ios pointer to iosystem info
+ * @returns 0 for success, error code otherwise.
+ * @author Jim Edwards
+ */
+int set_loglevel_handler(iosystem_desc_t *ios)
+{
+#if PIO_ENABLE_LOGGING
+ int iosysid;
+ int level;
+ int mpierr;
+#endif
+
+ PLOG((0, "set_loglevel_handler called"));
+ assert(ios);
+#if PIO_ENABLE_LOGGING
+ if ((mpierr = MPI_Bcast(&iosysid, 1, MPI_INT, 0, ios->intercomm)))
+ return check_mpi(ios, NULL, mpierr, __FILE__, __LINE__);
+
+ PIOc_set_global_log_level(iosysid, level);
+
+#endif
+ return PIO_NOERR;
+}
+
+
/**
* This function is called by the IO tasks. This function will not
* return, unless there is an error.
@@ -2590,9 +2628,10 @@ int pio_msg_handler2(int io_rank, int component_count, iosystem_desc_t **iosys,
iosystem_desc_t *my_iosys;
int msg = PIO_MSG_NULL, messages[component_count];
MPI_Request req[component_count];
- MPI_Status status;
- int index;
+ MPI_Status status[component_count];
+ int index[component_count];
int open_components = component_count;
+ int outcount;
int finalize = 0;
int mpierr;
int ret = PIO_NOERR;
@@ -2607,7 +2646,7 @@ int pio_msg_handler2(int io_rank, int component_count, iosystem_desc_t **iosys,
for (int cmp = 0; cmp < component_count; cmp++)
{
my_iosys = iosys[cmp];
- PLOG((1, "about to call MPI_Irecv union_comm = %d", my_iosys->union_comm));
+ PLOG((1, "about to call MPI_Irecv union_comm = %d comproot %d", my_iosys->union_comm, my_iosys->comproot));
if ((mpierr = MPI_Irecv(&(messages[cmp]), 1, MPI_INT, my_iosys->comproot, MPI_ANY_TAG,
my_iosys->union_comm, &req[cmp])))
return check_mpi(NULL, NULL, mpierr, __FILE__, __LINE__);
@@ -2629,213 +2668,231 @@ int pio_msg_handler2(int io_rank, int component_count, iosystem_desc_t **iosys,
req[0], MPI_REQUEST_NULL));
for (int c = 0; c < component_count; c++)
PLOG((3, "req[%d] = %d", c, req[c]));
- if ((mpierr = MPI_Waitany(component_count, req, &index, &status)))
+ // if ((mpierr = MPI_Waitany(component_count, req, &index, &status))){
+ if ((mpierr = MPI_Waitsome(component_count, req, &outcount, index, status))){
+ PLOG((0, "Error from mpi_waitsome %d",mpierr));
return check_mpi(NULL, NULL, mpierr, __FILE__, __LINE__);
- PLOG((3, "Waitany returned index = %d req[%d] = %d", index, index, req[index]));
- msg = messages[index];
+ }
+ for(int c = 0; c < outcount; c++)
+ PLOG((3, "Waitsome returned index = %d req[%d] = %d", index[c], index[c], req[index[c]]));
+ // msg = messages[index];
for (int c = 0; c < component_count; c++)
PLOG((3, "req[%d] = %d", c, req[c]));
}
-
- /* Broadcast the index of the computational component that
- * originated the request to the rest of the IO tasks. */
- PLOG((3, "About to do Bcast of index = %d io_comm = %d", index, io_comm));
- if ((mpierr = MPI_Bcast(&index, 1, MPI_INT, 0, io_comm)))
+ if ((mpierr = MPI_Bcast(&outcount, 1, MPI_INT, 0, io_comm)))
return check_mpi(NULL, NULL, mpierr, __FILE__, __LINE__);
- PLOG((3, "index MPI_Bcast complete index = %d", index));
-
- /* Set the correct iosys depending on the index. */
- my_iosys = iosys[index];
-
- /* Broadcast the msg value to the rest of the IO tasks. */
- PLOG((3, "about to call msg MPI_Bcast io_comm = %d", io_comm));
- if ((mpierr = MPI_Bcast(&msg, 1, MPI_INT, 0, io_comm)))
+ PLOG((3, "outcount MPI_Bcast complete outcount = %d", outcount));
+
+ for(int creq=0; creq < outcount; creq++)
+ {
+ int idx = index[creq];
+ /* Broadcast the index of the computational component that
+ * originated the request to the rest of the IO tasks. */
+ PLOG((3, "About to do Bcast of index = %d io_comm = %d", index, io_comm));
+ if ((mpierr = MPI_Bcast(&idx, 1, MPI_INT, 0, io_comm)))
return check_mpi(NULL, NULL, mpierr, __FILE__, __LINE__);
- PLOG((1, "pio_msg_handler2 msg MPI_Bcast complete msg = %d", msg));
+ PLOG((3, "index MPI_Bcast complete index = %d", idx));
+ msg = messages[idx];
- /* Handle the message. This code is run on all IO tasks. */
- switch (msg)
- {
- case PIO_MSG_INQ_TYPE:
- ret = inq_type_handler(my_iosys);
- break;
- case PIO_MSG_INQ_FORMAT:
- ret = inq_format_handler(my_iosys);
- break;
- case PIO_MSG_CREATE_FILE:
- ret = create_file_handler(my_iosys);
- break;
- case PIO_MSG_SYNC:
- ret = sync_file_handler(my_iosys);
- break;
- case PIO_MSG_ENDDEF:
- case PIO_MSG_REDEF:
- ret = change_def_file_handler(my_iosys, msg);
- break;
- case PIO_MSG_OPEN_FILE:
- ret = open_file_handler(my_iosys);
- break;
- case PIO_MSG_CLOSE_FILE:
- ret = close_file_handler(my_iosys);
- break;
- case PIO_MSG_DELETE_FILE:
- ret = delete_file_handler(my_iosys);
- break;
- case PIO_MSG_RENAME_DIM:
- ret = rename_dim_handler(my_iosys);
- break;
- case PIO_MSG_RENAME_VAR:
- ret = rename_var_handler(my_iosys);
- break;
- case PIO_MSG_RENAME_ATT:
- ret = rename_att_handler(my_iosys);
- break;
- case PIO_MSG_DEL_ATT:
- ret = delete_att_handler(my_iosys);
- break;
- case PIO_MSG_DEF_DIM:
- ret = def_dim_handler(my_iosys);
- break;
- case PIO_MSG_DEF_VAR:
- ret = def_var_handler(my_iosys);
- break;
- case PIO_MSG_DEF_VAR_CHUNKING:
- ret = def_var_chunking_handler(my_iosys);
- break;
- case PIO_MSG_DEF_VAR_FILL:
- ret = def_var_fill_handler(my_iosys);
- break;
- case PIO_MSG_DEF_VAR_ENDIAN:
- ret = def_var_endian_handler(my_iosys);
- break;
- case PIO_MSG_DEF_VAR_DEFLATE:
- ret = def_var_deflate_handler(my_iosys);
- break;
- case PIO_MSG_INQ_VAR_ENDIAN:
- ret = inq_var_endian_handler(my_iosys);
- break;
- case PIO_MSG_SET_VAR_CHUNK_CACHE:
- ret = set_var_chunk_cache_handler(my_iosys);
- break;
- case PIO_MSG_GET_VAR_CHUNK_CACHE:
- ret = get_var_chunk_cache_handler(my_iosys);
- break;
- case PIO_MSG_INQ:
- ret = inq_handler(my_iosys);
- break;
- case PIO_MSG_INQ_UNLIMDIMS:
- ret = inq_unlimdims_handler(my_iosys);
- break;
- case PIO_MSG_INQ_DIM:
- ret = inq_dim_handler(my_iosys, msg);
- break;
- case PIO_MSG_INQ_DIMID:
- ret = inq_dimid_handler(my_iosys);
- break;
- case PIO_MSG_INQ_VAR:
- ret = inq_var_handler(my_iosys);
- break;
- case PIO_MSG_INQ_VAR_CHUNKING:
- ret = inq_var_chunking_handler(my_iosys);
- break;
- case PIO_MSG_INQ_VAR_FILL:
- ret = inq_var_fill_handler(my_iosys);
- break;
- case PIO_MSG_INQ_VAR_DEFLATE:
- ret = inq_var_deflate_handler(my_iosys);
- break;
- case PIO_MSG_GET_ATT:
- ret = att_get_handler(my_iosys);
- break;
- case PIO_MSG_PUT_ATT:
- ret = att_put_handler(my_iosys);
- break;
- case PIO_MSG_INQ_VARID:
- ret = inq_varid_handler(my_iosys);
- break;
- case PIO_MSG_INQ_ATT:
- ret = inq_att_handler(my_iosys);
- break;
- case PIO_MSG_INQ_ATTNAME:
- ret = inq_attname_handler(my_iosys);
- break;
- case PIO_MSG_INQ_ATTID:
- ret = inq_attid_handler(my_iosys);
- break;
- case PIO_MSG_GET_VARS:
- ret = get_vars_handler(my_iosys);
- break;
- case PIO_MSG_PUT_VARS:
- ret = put_vars_handler(my_iosys);
- break;
- case PIO_MSG_INITDECOMP_DOF:
- ret = initdecomp_dof_handler(my_iosys);
- break;
- case PIO_MSG_WRITEDARRAYMULTI:
- ret = write_darray_multi_handler(my_iosys);
- break;
- case PIO_MSG_SETFRAME:
- ret = setframe_handler(my_iosys);
- break;
- case PIO_MSG_ADVANCEFRAME:
- ret = advanceframe_handler(my_iosys);
- break;
- case PIO_MSG_READDARRAY:
- ret = read_darray_handler(my_iosys);
- break;
- case PIO_MSG_SETERRORHANDLING:
- ret = seterrorhandling_handler(my_iosys);
- break;
- case PIO_MSG_SET_CHUNK_CACHE:
- ret = set_chunk_cache_handler(my_iosys);
- break;
- case PIO_MSG_GET_CHUNK_CACHE:
- ret = get_chunk_cache_handler(my_iosys);
- break;
- case PIO_MSG_FREEDECOMP:
- ret = freedecomp_handler(my_iosys);
- break;
- case PIO_MSG_SET_FILL:
- ret = set_fill_handler(my_iosys);
- break;
- case PIO_MSG_EXIT:
- finalize++;
- ret = finalize_handler(my_iosys, index);
- break;
- default:
- PLOG((0, "unknown message received %d", msg));
- return PIO_EINVAL;
- }
+ /* Set the correct iosys depending on the index. */
+ my_iosys = iosys[idx];
- /* If an error was returned by the handler, exit. */
- PLOG((3, "pio_msg_handler2 ret %d msg %d index %d io_rank %d", ret, msg, index, io_rank));
- if (ret)
+ /* Broadcast the msg value to the rest of the IO tasks. */
+ PLOG((3, "about to call msg MPI_Bcast io_comm = %d", io_comm));
+ if ((mpierr = MPI_Bcast(&msg, 1, MPI_INT, 0, io_comm)))
+ return check_mpi(NULL, NULL, mpierr, __FILE__, __LINE__);
+ PLOG((1, "pio_msg_handler2 msg MPI_Bcast complete msg = %d", msg));
+
+ /* Handle the message. This code is run on all IO tasks. */
+ switch (msg)
+ {
+ case PIO_MSG_INQ_TYPE:
+ ret = inq_type_handler(my_iosys);
+ break;
+ case PIO_MSG_INQ_FORMAT:
+ ret = inq_format_handler(my_iosys);
+ break;
+ case PIO_MSG_CREATE_FILE:
+ ret = create_file_handler(my_iosys);
+ break;
+ case PIO_MSG_SYNC:
+ ret = sync_file_handler(my_iosys);
+ break;
+ case PIO_MSG_ENDDEF:
+ case PIO_MSG_REDEF:
+ PLOG((2, "pio_msg_handler calling change_def_file_handler"));
+ ret = change_def_file_handler(my_iosys, msg);
+ break;
+ case PIO_MSG_OPEN_FILE:
+ ret = open_file_handler(my_iosys);
+ break;
+ case PIO_MSG_CLOSE_FILE:
+ ret = close_file_handler(my_iosys);
+ break;
+ case PIO_MSG_DELETE_FILE:
+ ret = delete_file_handler(my_iosys);
+ break;
+ case PIO_MSG_RENAME_DIM:
+ ret = rename_dim_handler(my_iosys);
+ break;
+ case PIO_MSG_RENAME_VAR:
+ ret = rename_var_handler(my_iosys);
+ break;
+ case PIO_MSG_RENAME_ATT:
+ ret = rename_att_handler(my_iosys);
+ break;
+ case PIO_MSG_DEL_ATT:
+ ret = delete_att_handler(my_iosys);
+ break;
+ case PIO_MSG_DEF_DIM:
+ ret = def_dim_handler(my_iosys);
+ break;
+ case PIO_MSG_DEF_VAR:
+ ret = def_var_handler(my_iosys);
+ break;
+ case PIO_MSG_DEF_VAR_CHUNKING:
+ ret = def_var_chunking_handler(my_iosys);
+ break;
+ case PIO_MSG_DEF_VAR_FILL:
+ ret = def_var_fill_handler(my_iosys);
+ break;
+ case PIO_MSG_DEF_VAR_ENDIAN:
+ ret = def_var_endian_handler(my_iosys);
+ break;
+ case PIO_MSG_DEF_VAR_DEFLATE:
+ ret = def_var_deflate_handler(my_iosys);
+ break;
+ case PIO_MSG_INQ_VAR_ENDIAN:
+ ret = inq_var_endian_handler(my_iosys);
+ break;
+ case PIO_MSG_SET_VAR_CHUNK_CACHE:
+ ret = set_var_chunk_cache_handler(my_iosys);
+ break;
+ case PIO_MSG_GET_VAR_CHUNK_CACHE:
+ ret = get_var_chunk_cache_handler(my_iosys);
+ break;
+ case PIO_MSG_INQ:
+ ret = inq_handler(my_iosys);
+ break;
+ case PIO_MSG_INQ_UNLIMDIMS:
+ ret = inq_unlimdims_handler(my_iosys);
+ break;
+ case PIO_MSG_INQ_DIM:
+ ret = inq_dim_handler(my_iosys, msg);
+ break;
+ case PIO_MSG_INQ_DIMID:
+ ret = inq_dimid_handler(my_iosys);
+ break;
+ case PIO_MSG_INQ_VAR:
+ ret = inq_var_handler(my_iosys);
+ break;
+ case PIO_MSG_INQ_VAR_CHUNKING:
+ ret = inq_var_chunking_handler(my_iosys);
+ break;
+ case PIO_MSG_INQ_VAR_FILL:
+ ret = inq_var_fill_handler(my_iosys);
+ break;
+ case PIO_MSG_INQ_VAR_DEFLATE:
+ ret = inq_var_deflate_handler(my_iosys);
+ break;
+ case PIO_MSG_GET_ATT:
+ ret = att_get_handler(my_iosys);
+ break;
+ case PIO_MSG_PUT_ATT:
+ ret = att_put_handler(my_iosys);
+ break;
+ case PIO_MSG_INQ_VARID:
+ ret = inq_varid_handler(my_iosys);
+ break;
+ case PIO_MSG_INQ_ATT:
+ ret = inq_att_handler(my_iosys);
+ break;
+ case PIO_MSG_INQ_ATTNAME:
+ ret = inq_attname_handler(my_iosys);
+ break;
+ case PIO_MSG_INQ_ATTID:
+ ret = inq_attid_handler(my_iosys);
+ break;
+ case PIO_MSG_GET_VARS:
+ ret = get_vars_handler(my_iosys);
+ break;
+ case PIO_MSG_PUT_VARS:
+ ret = put_vars_handler(my_iosys);
+ break;
+ case PIO_MSG_INITDECOMP_DOF:
+ ret = initdecomp_dof_handler(my_iosys);
+ break;
+ case PIO_MSG_WRITEDARRAYMULTI:
+ ret = write_darray_multi_handler(my_iosys);
+ break;
+ case PIO_MSG_SETFRAME:
+ ret = setframe_handler(my_iosys);
+ break;
+ case PIO_MSG_ADVANCEFRAME:
+ ret = advanceframe_handler(my_iosys);
+ break;
+ case PIO_MSG_READDARRAY:
+ ret = read_darray_handler(my_iosys);
+ break;
+ case PIO_MSG_SETERRORHANDLING:
+ ret = seterrorhandling_handler(my_iosys);
+ break;
+ case PIO_MSG_SET_CHUNK_CACHE:
+ ret = set_chunk_cache_handler(my_iosys);
+ break;
+ case PIO_MSG_GET_CHUNK_CACHE:
+ ret = get_chunk_cache_handler(my_iosys);
+ break;
+ case PIO_MSG_FREEDECOMP:
+ ret = freedecomp_handler(my_iosys);
+ break;
+ case PIO_MSG_SET_FILL:
+ ret = set_fill_handler(my_iosys);
+ break;
+ case PIO_MSG_SETLOGLEVEL:
+ ret = set_loglevel_handler(my_iosys);
+ break;
+ case PIO_MSG_EXIT:
+ finalize++;
+ ret = finalize_handler(my_iosys, idx);
+ break;
+ default:
+ PLOG((0, "unknown message received %d", msg));
+ return PIO_EINVAL;
+ }
+
+ /* If an error was returned by the handler, exit. */
+ PLOG((3, "pio_msg_handler2 ret %d msg %d index %d io_rank %d", ret, msg, idx, io_rank));
+ if (ret)
return pio_err(my_iosys, NULL, ret, __FILE__, __LINE__);
- /* Listen for another msg from the component whose message we
- * just handled. */
- if (!io_rank && !finalize)
- {
- my_iosys = iosys[index];
- PLOG((3, "pio_msg_handler2 about to Irecv index = %d comproot = %d union_comm = %d",
- index, my_iosys->comproot, my_iosys->union_comm));
- if ((mpierr = MPI_Irecv(&(messages[index]), 1, MPI_INT, my_iosys->comproot, MPI_ANY_TAG, my_iosys->union_comm,
- &req[index])))
+ /* Listen for another msg from the component whose message we
+ * just handled. */
+ if (!io_rank && !finalize)
+ {
+ my_iosys = iosys[idx];
+ PLOG((3, "pio_msg_handler2 about to Irecv index = %d comproot = %d union_comm = %d",
+ idx, my_iosys->comproot, my_iosys->union_comm));
+ if ((mpierr = MPI_Irecv(&(messages[idx]), 1, MPI_INT, my_iosys->comproot, MPI_ANY_TAG, my_iosys->union_comm,
+ &req[idx])))
return check_mpi(NULL, NULL, mpierr, __FILE__, __LINE__);
- PLOG((3, "pio_msg_handler2 called MPI_Irecv req[%d] = %d", index, req[index]));
- }
-
- PLOG((3, "pio_msg_handler2 done msg = %d open_components = %d",
- msg, open_components));
- msg = PIO_MSG_NULL;
- /* If there are no more open components, exit. */
- if (finalize)
- {
- if (--open_components)
+ PLOG((3, "pio_msg_handler2 called MPI_Irecv req[%d] = %d", index, req[idx]));
+ }
+
+ PLOG((3, "pio_msg_handler2 done msg = %d open_components = %d",
+ msg, open_components));
+ msg = PIO_MSG_NULL;
+ /* If there are no more open components, exit. */
+ if (finalize)
+ {
+ if (--open_components)
finalize = 0;
- else
+ else
break;
- }
+ }
+ }
+ if (finalize)
+ break;
}
PLOG((3, "returning from pio_msg_handler2"));
diff --git a/src/clib/pio_nc.c b/src/clib/pio_nc.c
index 684e203e2bc..e15d4e36851 100644
--- a/src/clib/pio_nc.c
+++ b/src/clib/pio_nc.c
@@ -461,7 +461,6 @@ PIOc_inq_type(int ncid, nc_type xtype, char *name, PIO_Offset *sizep)
if (!mpierr)
mpierr = MPI_Bcast(&size_present, 1, MPI_CHAR, ios->compmaster, ios->intercomm);
}
-
/* Handle MPI errors. */
if ((mpierr2 = MPI_Bcast(&mpierr, 1, MPI_INT, ios->comproot, ios->my_comm)))
return check_mpi(NULL, file, mpierr2, __FILE__, __LINE__);
@@ -1231,6 +1230,8 @@ PIOc_inq_att_eh(int ncid, int varid, const char *name, int eh,
mpierr = MPI_Bcast(&xtype_present, 1, MPI_CHAR, ios->compmaster, ios->intercomm);
if (!mpierr)
mpierr = MPI_Bcast(&len_present, 1, MPI_CHAR, ios->compmaster, ios->intercomm);
+ if (!mpierr)
+ mpierr = MPI_Bcast(&eh, 1, MPI_INT, ios->compmaster, ios->intercomm);
}
/* Handle MPI errors. */
@@ -1250,7 +1251,7 @@ PIOc_inq_att_eh(int ncid, int varid, const char *name, int eh,
if (file->iotype != PIO_IOTYPE_PNETCDF && file->do_io)
ierr = nc_inq_att(file->fh, varid, name, xtypep, (size_t *)lenp);
- PLOG((2, "PIOc_inq netcdf call returned %d", ierr));
+ PLOG((2, "PIOc_inq_att netcdf call %s returned %d", name,ierr));
}
/* Broadcast and check the return code. */
diff --git a/src/clib/pio_rearrange.c b/src/clib/pio_rearrange.c
index f78b3fce8df..e1c5b3fe738 100644
--- a/src/clib/pio_rearrange.c
+++ b/src/clib/pio_rearrange.c
@@ -1336,7 +1336,7 @@ box_rearrange_create(iosystem_desc_t *ios, int maplen, const PIO_Offset *compmap
/* Determine whether fill values will be needed. */
if ((ret = determine_fill(ios, iodesc, gdimlen, compmap)))
return pio_err(ios, NULL, ret, __FILE__, __LINE__);
- PLOG((2, "iodesc->needsfill = %d ios->num_iotasks = %d", iodesc->needsfill,
+ PLOG((2, "a iodesc->needsfill = %d ios->num_iotasks = %d", iodesc->needsfill,
ios->num_iotasks));
/* Set the iomaplen in the sc_info msg */
@@ -1636,7 +1636,7 @@ box_rearrange_create_with_holes(iosystem_desc_t *ios, int maplen,
/* Determine whether fill values will be needed. */
if ((ret = determine_fill(ios, iodesc, gdimlen, compmap)))
return pio_err(ios, NULL, ret, __FILE__, __LINE__);
- PLOG((2, "iodesc->needsfill = %d ios->num_iotasks = %d", iodesc->needsfill,
+ PLOG((2, "b iodesc->needsfill = %d ios->num_iotasks = %d", iodesc->needsfill,
ios->num_iotasks));
/* Set up receive counts and displacements to for an AllToAll
@@ -1955,14 +1955,14 @@ default_subset_partition(iosystem_desc_t *ios, io_desc_t *iodesc)
}
else
{
- int taskratio = ios->num_comptasks / ios->num_iotasks;
+ int taskratio = max(1,ios->num_comptasks / ios->num_iotasks);
key = max(1, ios->comp_rank % taskratio + 1);
color = min(ios->num_iotasks - 1, ios->comp_rank / taskratio);
}
PLOG((3, "key = %d color = %d", key, color));
/* Create new communicators. */
- if ((mpierr = MPI_Comm_split(ios->comp_comm, color, key, &iodesc->subset_comm)))
+ if ((mpierr = MPI_Comm_split(ios->union_comm, color, key, &iodesc->subset_comm)))
return check_mpi(NULL, NULL, mpierr, __FILE__, __LINE__);
return PIO_NOERR;
@@ -2069,6 +2069,10 @@ subset_rearrange_create(iosystem_desc_t *ios, int maplen, PIO_Offset *compmap,
return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__);
rcnt = 1;
+
+ if(ios->async)
+ iodesc->ndof = 0;
+
}
/* Allocate space to hold count of data to be sent in pio_swapm(). */
@@ -2085,7 +2089,7 @@ subset_rearrange_create(iosystem_desc_t *ios, int maplen, PIO_Offset *compmap,
/* Determine scount[0], the number of data elements in the
* computation task that are to be written, by looking at
* compmap. */
- for (i = 0; i < maplen; i++)
+ for (i = 0; i < iodesc->ndof; i++)
{
/* turns out this can be allowed in some cases
pioassert(compmap[i]>=0 && compmap[i]<=totalgridsize, "Compmap value out of bounds",
@@ -2101,7 +2105,7 @@ subset_rearrange_create(iosystem_desc_t *ios, int maplen, PIO_Offset *compmap,
return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__);
j = 0;
- for (i = 0; i < maplen; i++)
+ for (i = 0; i < iodesc->ndof; i++)
if (compmap[i] > 0)
iodesc->sindex[j++] = i;
@@ -2111,6 +2115,7 @@ subset_rearrange_create(iosystem_desc_t *ios, int maplen, PIO_Offset *compmap,
MPI_INT, 0, iodesc->subset_comm)))
return check_mpi(NULL, NULL, mpierr, __FILE__, __LINE__);
+
iodesc->llen = 0;
int rdispls[ntasks];
@@ -2255,6 +2260,7 @@ subset_rearrange_create(iosystem_desc_t *ios, int maplen, PIO_Offset *compmap,
thisgridsize[0] = totalgridsize / ios->num_iotasks;
thisgridmax[0] = thisgridsize[0];
int xtra = totalgridsize - thisgridsize[0] * ios->num_iotasks;
+
PLOG((4, "xtra %d", xtra));
for (nio = 0; nio < ios->num_iotasks; nio++)
@@ -2307,10 +2313,11 @@ subset_rearrange_create(iosystem_desc_t *ios, int maplen, PIO_Offset *compmap,
}
/* Allocate and initialize a grid to fill in missing values. ??? */
- PIO_Offset grid[thisgridsize[ios->io_rank]];
- PLOG((4, "thisgridsize[ios->io_rank] %d", thisgridsize[ios->io_rank]));
- for (i = 0; i < thisgridsize[ios->io_rank]; i++)
- grid[i] = 0;
+ PLOG((2, "thisgridsize[ios->io_rank] %d", thisgridsize[ios->io_rank]));
+ PIO_Offset *grid;
+ if (!(grid = calloc(thisgridsize[ios->io_rank], sizeof(PIO_Offset))))
+ return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__);
+
int cnt = 0;
for (i = 0; i < thisgridsize[ios->io_rank]; i++)
@@ -2353,6 +2360,8 @@ subset_rearrange_create(iosystem_desc_t *ios, int maplen, PIO_Offset *compmap,
return pio_err(ios, NULL, PIO_EINVAL, __FILE__, __LINE__);
}
}
+ free(grid);
+
maxregions = 0;
iodesc->maxfillregions = 0;
if (myfillgrid)
diff --git a/src/clib/pio_spmd.c b/src/clib/pio_spmd.c
index 4d9418037e7..090e08ff82d 100644
--- a/src/clib/pio_spmd.c
+++ b/src/clib/pio_spmd.c
@@ -124,10 +124,12 @@ int pio_swapm(void *sendbuf, int *sendcounts, int *sdispls, MPI_Datatype *sendty
if (fc->max_pend_req == 0)
{
/* Call the MPI alltoall without flow control. */
- PLOG((3, "Calling MPI_Alltoallw without flow control."));
+ PLOG((3, "Calling MPI_Alltoallw without flow control. comm=%d my_rank=%d",comm,my_rank));
if ((mpierr = MPI_Alltoallw(sendbuf, sendcounts, sdispls, sendtypes, recvbuf,
- recvcounts, rdispls, recvtypes, comm)))
+ recvcounts, rdispls, recvtypes, comm))){
+ PLOG((3, "Called MPI_Alltoallw without flow control. mpierr %d",mpierr));
return check_mpi(NULL, NULL, mpierr, __FILE__, __LINE__);
+ }
return PIO_NOERR;
}
diff --git a/src/clib/pioc.c b/src/clib/pioc.c
index 99402dce88d..b9decb4c0c4 100644
--- a/src/clib/pioc.c
+++ b/src/clib/pioc.c
@@ -539,10 +539,10 @@ PIOc_InitDecomp(int iosysid, int pio_type, int ndims, const int *gdimlen, int ma
char rearranger_present = rearranger ? true : false;
char iostart_present = iostart ? true : false;
char iocount_present = iocount ? true : false;
-
- if (ios->compmaster == MPI_ROOT)
+ if (ios->compmaster == MPI_ROOT){
+ PLOG((1, "about to sent msg %d union_comm %d",msg,ios->union_comm));
mpierr = MPI_Send(&msg, 1, MPI_INT, ios->ioroot, 1, ios->union_comm);
-
+ }
if (!mpierr)
mpierr = MPI_Bcast(&iosysid, 1, MPI_INT, ios->compmaster, ios->intercomm);
if (!mpierr)
@@ -579,6 +579,10 @@ PIOc_InitDecomp(int iosysid, int pio_type, int ndims, const int *gdimlen, int ma
return check_mpi(ios, NULL, mpierr2, __FILE__, __LINE__);
if (mpierr)
return check_mpi(ios, NULL, mpierr, __FILE__, __LINE__);
+
+ if(rearranger && (*rearranger != ios->default_rearranger))
+ return pio_err(ios, NULL, PIO_EBADREARR, __FILE__,__LINE__);
+
}
/* Allocate space for the iodesc info. This also allocates the
@@ -920,7 +924,9 @@ PIOc_InitDecomp_bc(int iosysid, int pio_type, int ndims, const int *gdimlen,
*
* @param comp_comm the MPI_Comm of the compute tasks.
* @param num_iotasks the number of io tasks to use.
- * @param stride the offset between io tasks in the comp_comm.
+ * @param stride the offset between io tasks in the comp_comm. The mod
+ * operator is used when computing the IO tasks with the formula:
+ * ios->ioranks[i] = (base + i * ustride) % ios->num_comptasks
.
* @param base the comp_comm index of the first io task.
* @param rearr the rearranger to use by default, this may be
* overriden in the PIO_init_decomp(). The rearranger is not used
@@ -1128,6 +1134,154 @@ PIOc_Init_Intracomm_from_F90(int f90_comp_comm,
return ret;
}
+/**
+ * Interface to call from pio_init from fortran.
+ *
+ * @param f90_world_comm the incoming communicator which includes all tasks
+ * @param num_io_procs the number of IO tasks
+ * @param io_proc_list the rank of io tasks in f90_world_comm
+ * @param component_count the number of computational components
+ * used an iosysid will be generated for each
+ * @param procs_per_component the number of procs in each computational component
+ * @param flat_proc_list a 1D array of size
+ * component_count*maxprocs_per_component with rank in f90_world_comm
+ * @param f90_io_comm the io_comm handle to be returned to fortran
+ * @param f90_comp_comm the comp_comm handle to be returned to fortran
+ * @param rearranger currently only PIO_REARRANGE_BOX is supported
+ * @param iosysidp pointer to array of length component_count that
+ * gets the iosysid for each component.
+ * @returns 0 for success, error code otherwise
+ * @ingroup PIO_init_c
+ * @author Jim Edwards
+ */
+int
+PIOc_init_async_from_F90(int f90_world_comm,
+ int num_io_procs,
+ int *io_proc_list,
+ int component_count,
+ int *procs_per_component,
+ int *flat_proc_list,
+ int *f90_io_comm,
+ int *f90_comp_comm,
+ int rearranger,
+ int *iosysidp)
+
+{
+ int ret = PIO_NOERR;
+ MPI_Comm io_comm, comp_comm;
+ int maxprocs_per_component=0;
+
+ for(int i=0; i< component_count; i++)
+ maxprocs_per_component = (procs_per_component[i] > maxprocs_per_component) ? procs_per_component[i] : maxprocs_per_component;
+
+ int **proc_list = (int **) malloc(sizeof(int *) *component_count);
+
+ for(int i=0; i< component_count; i++){
+ proc_list[i] = (int *) malloc(sizeof(int) * maxprocs_per_component);
+ for(int j=0;jcomm_type,
+ rearr_opts->fcd,
+ rearr_opts->comp2io.hs,
+ rearr_opts->comp2io.isend,
+ rearr_opts->comp2io.max_pend_req,
+ rearr_opts->io2comp.hs,
+ rearr_opts->io2comp.isend,
+ rearr_opts->io2comp.max_pend_req);
+ }
+*/
+ return ret;
+}
+
+/**
+ * Interface to call from pio_init from fortran.
+ *
+ * @param f90_world_comm the incoming communicator which includes all tasks
+ * @param component_count the number of computational components
+ * used an iosysid will be generated for each and a comp_comm is expected
+ * for each
+ * @param f90_comp_comms the comp_comm handles passed from fortran
+ * @param f90_io_comm the io_comm passed from fortran
+ * @param rearranger currently only PIO_REARRANGE_BOX is supported
+ * @param iosysidp pointer to array of length component_count that
+ * gets the iosysid for each component.
+ * @returns 0 for success, error code otherwise
+ * @ingroup PIO_init_c
+ * @author Jim Edwards
+ */
+int
+PIOc_init_async_comms_from_F90(int f90_world_comm,
+ int component_count,
+ int *f90_comp_comms,
+ int f90_io_comm,
+ int rearranger,
+ int *iosysidp)
+
+{
+ int ret = PIO_NOERR;
+ MPI_Comm comp_comm[component_count];
+ MPI_Comm io_comm;
+
+ for(int i=0; icomm_type,
+ rearr_opts->fcd,
+ rearr_opts->comp2io.hs,
+ rearr_opts->comp2io.isend,
+ rearr_opts->comp2io.max_pend_req,
+ rearr_opts->io2comp.hs,
+ rearr_opts->io2comp.isend,
+ rearr_opts->io2comp.max_pend_req);
+ }
+*/
+ return ret;
+}
+
/**
* Send a hint to the MPI-IO library.
*
@@ -1441,10 +1595,11 @@ PIOc_init_async(MPI_Comm world, int num_io_procs, int *io_proc_list,
int my_io_proc_list[num_io_procs]; /* List of processors in IO component. */
int mpierr; /* Return code from MPI functions. */
int ret; /* Return code. */
+// int world_size;
/* Check input parameters. Only allow box rearranger for now. */
if (num_io_procs < 1 || component_count < 1 || !num_procs_per_comp || !iosysidp ||
- (rearranger != PIO_REARR_BOX))
+ (rearranger != PIO_REARR_BOX && rearranger != PIO_REARR_SUBSET))
return pio_err(NULL, NULL, PIO_EINVAL, __FILE__, __LINE__);
my_proc_list = (int**) malloc(component_count * sizeof(int*));
@@ -1472,6 +1627,10 @@ PIOc_init_async(MPI_Comm world, int num_io_procs, int *io_proc_list,
if ((ret = MPI_Comm_rank(world, &my_rank)))
return check_mpi(NULL, NULL, ret, __FILE__, __LINE__);
+ /* Get size of world. */
+// if ((ret = MPI_Comm_size(world, &world_size)))
+// return check_mpi(NULL, NULL, ret, __FILE__, __LINE__);
+
/* Is this process in the IO component? */
int pidx;
for (pidx = 0; pidx < num_io_procs; pidx++)
@@ -1552,6 +1711,9 @@ PIOc_init_async(MPI_Comm world, int num_io_procs, int *io_proc_list,
/* Get pointer to current iosys. */
my_iosys = iosys[cmp];
+ /* The rank of the computation leader in the union comm. */
+ my_iosys->comproot = num_io_procs;
+
/* Initialize some values. */
my_iosys->io_comm = MPI_COMM_NULL;
my_iosys->comp_comm = MPI_COMM_NULL;
@@ -1569,10 +1731,6 @@ PIOc_init_async(MPI_Comm world, int num_io_procs, int *io_proc_list,
my_iosys->rearr_opts.comm_type = PIO_REARR_COMM_COLL;
my_iosys->rearr_opts.fcd = PIO_REARR_COMM_FC_2D_DISABLE;
- /* The rank of the computation leader in the union comm. */
- my_iosys->comproot = num_io_procs;
- PLOG((3, "my_iosys->comproot = %d", my_iosys->comproot));
-
/* We are not providing an info object. */
my_iosys->info = MPI_INFO_NULL;
@@ -1599,11 +1757,40 @@ PIOc_init_async(MPI_Comm world, int num_io_procs, int *io_proc_list,
/* Add proc numbers from computation component. */
for (int p = 0; p < num_procs_per_comp[cmp]; p++)
- {
proc_list_union[p + num_io_procs] = my_proc_list[cmp][p];
+
+// qsort(proc_list_union, num_procs_per_comp[cmp] + num_io_procs, sizeof(int), compare_ints);
+ for (int p = 0; p < num_procs_per_comp[cmp] + num_io_procs; p++)
PLOG((3, "p %d num_io_procs %d proc_list_union[p + num_io_procs] %d ",
- p, num_io_procs, proc_list_union[p + num_io_procs]));
+ p, num_io_procs, proc_list_union[p]));
+
+ /* The rank of the computation leader in the union comm. First task which is not an io task */
+ my_iosys->ioroot = 0;
+/*
+ my_iosys->comproot = -1;
+ my_iosys->ioroot = -1;
+ for (int p = 0; p < num_procs_per_comp[cmp] + num_io_procs; p++)
+ {
+ bool ioproc = false;
+ for (int q = 0; q < num_io_procs; q++)
+ {
+ if (proc_list_union[p] == my_io_proc_list[q])
+ {
+ ioproc = true;
+ my_iosys->ioroot = proc_list_union[p];
+ break;
+ }
+ }
+ if ( !ioproc && my_iosys->comproot < 0)
+ {
+ my_iosys->comproot = proc_list_union[p];
+ }
}
+*/
+
+ PLOG((3, "my_iosys->comproot = %d ioroot = %d", my_iosys->comproot, my_iosys->ioroot));
+
+
/* Allocate space for computation task ranks. */
if (!(my_iosys->compranks = calloc(my_iosys->num_comptasks, sizeof(int))))
@@ -1683,8 +1870,7 @@ PIOc_init_async(MPI_Comm world, int num_io_procs, int *io_proc_list,
if (!(my_iosys->ioranks = calloc(my_iosys->num_iotasks, sizeof(int))))
return pio_err(NULL, NULL, PIO_ENOMEM, __FILE__, __LINE__);
for (int i = 0; i < my_iosys->num_iotasks; i++)
- my_iosys->ioranks[i] = my_io_proc_list[i];
- my_iosys->ioroot = my_iosys->ioranks[0];
+ my_iosys->ioranks[i] = i;
/* All the processes in this component, and the IO component,
* are part of the union_comm. */
@@ -1693,6 +1879,7 @@ PIOc_init_async(MPI_Comm world, int num_io_procs, int *io_proc_list,
return check_mpi(NULL, NULL, ret, __FILE__, __LINE__);
PLOG((3, "created union comm for cmp %d my_iosys->union_comm %d", cmp, my_iosys->union_comm));
+
if (in_io || in_cmp)
{
if ((ret = MPI_Comm_rank(my_iosys->union_comm, &my_iosys->union_rank)))
@@ -1709,18 +1896,18 @@ PIOc_init_async(MPI_Comm world, int num_io_procs, int *io_proc_list,
PLOG((3, "my_iosys->io_comm = %d", my_iosys->io_comm));
/* Create the intercomm from IO to computation component. */
PLOG((3, "about to create intercomm for IO component to cmp = %d "
- "my_iosys->io_comm = %d", cmp, my_iosys->io_comm));
+ "my_iosys->io_comm = %d comproot %d", cmp, my_iosys->io_comm, my_iosys->comproot));
if ((ret = MPI_Intercomm_create(my_iosys->io_comm, 0, my_iosys->union_comm,
- my_iosys->num_iotasks, cmp, &my_iosys->intercomm)))
+ my_iosys->comproot, cmp, &my_iosys->intercomm)))
return check_mpi(NULL, NULL, ret, __FILE__, __LINE__);
}
else
{
/* Create the intercomm from computation component to IO component. */
- PLOG((3, "about to create intercomm for cmp = %d my_iosys->comp_comm = %d", cmp,
- my_iosys->comp_comm));
+ PLOG((3, "about to create intercomm for cmp = %d my_iosys->comp_comm = %d ioroot %d", cmp,
+ my_iosys->comp_comm, my_iosys->ioroot));
if ((ret = MPI_Intercomm_create(my_iosys->comp_comm, 0, my_iosys->union_comm,
- 0, cmp, &my_iosys->intercomm)))
+ my_iosys->ioroot, cmp, &my_iosys->intercomm)))
return check_mpi(NULL, NULL, ret, __FILE__, __LINE__);
}
PLOG((3, "intercomm created for cmp = %d", cmp));
@@ -1794,6 +1981,181 @@ PIOc_init_async(MPI_Comm world, int num_io_procs, int *io_proc_list,
return PIO_NOERR;
}
+/**
+ * Library initialization used when IO tasks are distinct from compute
+ * tasks.
+ *
+ * This is a collective call. Input parameters are read on
+ * each comp_rank=0 and on io_rank=0, values on other tasks are ignored.
+ * This variation of PIO_init uses tasks in io_comm to handle IO,
+ * these tasks do not return from this call. Instead they go to an internal loop
+ * and wait to receive further instructions from the computational
+ * tasks.
+ *
+ * Sequence of Events to do Asynch I/O
+ * -----------------------------------
+ *
+ * Here is the sequence of events that needs to occur when an IO
+ * operation is called from the collection of compute tasks. I'm
+ * going to use pio_put_var because write_darray has some special
+ * characteristics that make it a bit more complicated...
+ *
+ * Compute tasks call pio_put_var with an integer argument
+ *
+ * The MPI_Send sends a message from comp_rank=0 to io_rank=0 on
+ * union_comm (a comm defined as the union of io and compute tasks)
+ * msg is an integer which indicates the function being called, in
+ * this case the msg is PIO_MSG_PUT_VAR_INT
+ *
+ * The iotasks now know what additional arguments they should expect
+ * to receive from the compute tasks, in this case a file handle, a
+ * variable id, the length of the array and the array itself.
+ *
+ * The iotasks now have the information they need to complete the
+ * operation and they call the pio_put_var routine. (In pio1 this bit
+ * of code is in pio_get_put_callbacks.F90.in)
+ *
+ * After the netcdf operation is completed (in the case of an inq or
+ * get operation) the result is communicated back to the compute
+ * tasks.
+ *
+ * @param world the communicator containing all the available tasks.
+ *
+ * @param component_count number of computational components
+ *
+ * @param comp_comm an array of size component_count which are the defined
+ * comms of each component - comp_comm should be MPI_COMM_NULL on tasks outside
+ * the tasks of each comm these comms may overlap
+ *
+ * @param io_comm a communicator for the IO group, tasks in this comm do not
+ * return from this call.
+ *
+ * @param rearranger the default rearranger to use for decompositions
+ * in this IO system. Only PIO_REARR_BOX is supported for
+ * async. Support for PIO_REARR_SUBSET will be provided in a future
+ * version.
+ *
+ * @param iosysidp pointer to array of length component_count that
+ * gets the iosysid for each component.
+ *
+ * @return PIO_NOERR on success, error code otherwise.
+ * @ingroup PIO_init_c
+ * @author Jim Edwards
+ */
+int
+PIOc_init_async_from_comms(MPI_Comm world, int component_count, MPI_Comm *comp_comm,
+ MPI_Comm io_comm, int rearranger, int *iosysidp)
+{
+ int my_rank; /* Rank of this task. */
+ int **my_proc_list; /* Array of arrays of procs for comp components. */
+ int *io_proc_list; /* List of processors in IO component. */
+ int *num_procs_per_comp; /* List of number of tasks in each component */
+ int num_io_procs = 0;
+ int ret; /* Return code. */
+#ifdef USE_MPE
+ bool in_io = false;
+#endif /* USE_MPE */
+
+#ifdef USE_MPE
+ pio_start_mpe_log(INIT);
+#endif /* USE_MPE */
+
+ /* Check input parameters. Only allow box rearranger for now. */
+ if (component_count < 1 || !comp_comm || !iosysidp ||
+ (rearranger != PIO_REARR_BOX && rearranger != PIO_REARR_SUBSET))
+ return pio_err(NULL, NULL, PIO_EINVAL, __FILE__, __LINE__);
+
+ /* Turn on the logging system for PIO. */
+ if ((ret = pio_init_logging()))
+ return pio_err(NULL, NULL, ret, __FILE__, __LINE__);
+ PLOG((1, "PIOc_init_async_from_comms component_count = %d", component_count));
+
+ /* Get num_io_procs from io_comm, share with world */
+ if (io_comm != MPI_COMM_NULL)
+ {
+#ifdef USE_MPE
+ in_io = true;
+#endif /* USE_MPE */
+ if ((ret = MPI_Comm_size(io_comm, &num_io_procs)))
+ return check_mpi(NULL, NULL, ret, __FILE__, __LINE__);
+ }
+ if ((ret = MPI_Allreduce(MPI_IN_PLACE, &num_io_procs, 1, MPI_INT, MPI_MAX, world)))
+ return check_mpi(NULL, NULL, ret, __FILE__, __LINE__);
+
+ /* Get io_proc_list from io_comm, share with world */
+ io_proc_list = (int*) calloc(num_io_procs, sizeof(int));
+ if (io_comm != MPI_COMM_NULL)
+ {
+ int my_io_rank;
+ if ((ret = MPI_Comm_rank(io_comm, &my_io_rank)))
+ return check_mpi(NULL, NULL, ret, __FILE__, __LINE__);
+ if ((ret = MPI_Comm_rank(world, &my_rank)))
+ return check_mpi(NULL, NULL, ret, __FILE__, __LINE__);
+ io_proc_list[my_io_rank] = my_rank;
+ component_count = 0;
+ }
+ if ((ret = MPI_Allreduce(MPI_IN_PLACE, io_proc_list, num_io_procs, MPI_INT, MPI_MAX, world)))
+ return check_mpi(NULL, NULL, ret, __FILE__, __LINE__);
+
+ /* Get num_procs_per_comp for each comp and share with world */
+ if ((ret = MPI_Allreduce(MPI_IN_PLACE, &(component_count), 1, MPI_INT, MPI_MAX, world)))
+ return check_mpi(NULL, NULL, ret, __FILE__, __LINE__);
+
+ num_procs_per_comp = (int *) malloc(component_count * sizeof(int));
+
+ for(int cmp=0; cmp < component_count; cmp++)
+ {
+ num_procs_per_comp[cmp] = 0;
+ if(comp_comm[cmp] != MPI_COMM_NULL)
+ if ((ret = MPI_Comm_size(comp_comm[cmp], &(num_procs_per_comp[cmp]))))
+ return check_mpi(NULL, NULL, ret, __FILE__, __LINE__);
+ if ((ret = MPI_Allreduce(MPI_IN_PLACE, &(num_procs_per_comp[cmp]), 1, MPI_INT, MPI_MAX, world)))
+ return check_mpi(NULL, NULL, ret, __FILE__, __LINE__);
+
+ }
+
+ /* Get proc list for each comp and share with world */
+ my_proc_list = (int**) malloc(component_count * sizeof(int*));
+
+ for(int cmp=0; cmp < component_count; cmp++)
+ {
+ if (!(my_proc_list[cmp] = (int *) malloc(num_procs_per_comp[cmp] * sizeof(int))))
+ return pio_err(NULL, NULL, PIO_ENOMEM, __FILE__, __LINE__);
+ for(int i = 0; i < num_procs_per_comp[cmp]; i++)
+ my_proc_list[cmp][i] = 0;
+ if(comp_comm[cmp] != MPI_COMM_NULL){
+ int my_comp_rank;
+ if ((ret = MPI_Comm_rank(comp_comm[cmp], &my_comp_rank)))
+ return check_mpi(NULL, NULL, ret, __FILE__, __LINE__);
+ if ((ret = MPI_Comm_rank(world, &my_rank)))
+ return check_mpi(NULL, NULL, ret, __FILE__, __LINE__);
+ my_proc_list[cmp][my_comp_rank] = my_rank;
+ }
+ if ((ret = MPI_Allreduce(MPI_IN_PLACE, my_proc_list[cmp], num_procs_per_comp[cmp],
+ MPI_INT, MPI_MAX, world)))
+ return check_mpi(NULL, NULL, ret, __FILE__, __LINE__);
+ }
+
+ if((ret = PIOc_init_async(world, num_io_procs, io_proc_list, component_count,
+ num_procs_per_comp, my_proc_list, NULL, NULL, rearranger,
+ iosysidp)))
+ return pio_err(NULL, NULL, ret, __FILE__, __LINE__);
+
+ for(int cmp=0; cmp < component_count; cmp++)
+ free(my_proc_list[cmp]);
+ free(my_proc_list);
+ free(io_proc_list);
+ free(num_procs_per_comp);
+
+#ifdef USE_MPE
+ if (!in_io)
+ pio_stop_mpe_log(INIT, __func__);
+#endif /* USE_MPE */
+
+ PLOG((2, "successfully done with PIOc_init_async_from_comms"));
+ return PIO_NOERR;
+}
+
/**
* Set the target blocksize for the box rearranger.
*
diff --git a/src/clib/pioc_sc.c b/src/clib/pioc_sc.c
index dbe2db59278..f17d9d3135f 100644
--- a/src/clib/pioc_sc.c
+++ b/src/clib/pioc_sc.c
@@ -11,7 +11,7 @@
/** The default target blocksize in bytes for each io task when the box
* rearranger is used. */
-#define DEFAULT_BLOCKSIZE 1024
+#define DEFAULT_BLOCKSIZE 1048576
/** The target blocksize in bytes for each io task when the box
* rearranger is used. */
diff --git a/src/clib/pioc_support.c b/src/clib/pioc_support.c
index 8a681136d86..cdd6cf2c261 100644
--- a/src/clib/pioc_support.c
+++ b/src/clib/pioc_support.c
@@ -114,10 +114,8 @@ PIOc_strerror(int pioerr, char *errmsg)
}
else if (pioerr == PIO_NOERR)
strcpy(errmsg, "No error");
-#if defined(_NETCDF)
else if (pioerr <= NC2_ERR && pioerr >= NC4_LAST_ERROR) /* NetCDF error? */
strncpy(errmsg, nc_strerror(pioerr), PIO_MAX_NAME);
-#endif /* endif defined(_NETCDF) */
#if defined(_PNETCDF)
else if (pioerr > PIO_FIRST_ERROR_CODE) /* pNetCDF error? */
strncpy(errmsg, ncmpi_strerror(pioerr), PIO_MAX_NAME);
@@ -132,6 +130,9 @@ PIOc_strerror(int pioerr, char *errmsg)
case PIO_EVARDIMMISMATCH:
strcpy(errmsg, "Variable dim mismatch in multivar call");
break;
+ case PIO_EBADREARR:
+ strcpy(errmsg, "Rearranger mismatch in async mode");
+ break;
default:
strcpy(errmsg, "Unknown Error: Unrecognized error code");
}
@@ -163,11 +164,70 @@ PIOc_set_log_level(int level)
#if PIO_ENABLE_LOGGING
/* Set the log level. */
pio_log_level = level;
-
+ if(!LOG_FILE)
+ pio_init_logging();
+ PLOG((0,"set loglevel to %d", level));
#endif /* PIO_ENABLE_LOGGING */
return PIO_NOERR;
}
+/**
+ * Set the logging level value from the root compute task on all tasks
+ * if PIO was built with
+ * PIO_ENABLE_LOGGING. Set to -1 for nothing, 0 for errors only, 1 for
+ * important logging, and so on. Log levels below 1 are only printed
+ * on the io/component root.
+ *
+ * A log file is also produced for each task. The file is called
+ * pio_log_X.txt, where X is the (0-based) task number.
+ *
+ * If the library is not built with logging, this function does
+ * nothing.
+ *
+ * @param iosysid the IO system ID
+ * @param level the logging level, 0 for errors only, 5 for max
+ * verbosity.
+ * @returns 0 on success, error code otherwise.
+ * @author Jim Edwards
+ */
+int PIOc_set_global_log_level(int iosysid, int level)
+{
+#if PIO_ENABLE_LOGGING
+ iosystem_desc_t *ios;
+ int mpierr=0, mpierr2;
+
+ if (!(ios = pio_get_iosystem_from_id(iosysid)))
+ return pio_err(NULL, NULL, PIO_EBADID, __FILE__, __LINE__);
+
+ if (ios->async)
+ {
+ if(!ios->ioproc)
+ {
+ int msg = PIO_MSG_SETLOGLEVEL;
+ if (ios->compmaster == MPI_ROOT)
+ mpierr = MPI_Send(&msg, 1, MPI_INT, ios->ioroot, 1, ios->union_comm);
+ if (!mpierr)
+ mpierr = MPI_Bcast(&iosysid, 1, MPI_INT, ios->compmaster, ios->intercomm);
+ }
+
+ }
+ if (!mpierr)
+ mpierr = MPI_Bcast(&level, 1, MPI_INT, ios->comproot, ios->union_comm);
+
+ /* Handle MPI errors. */
+ if ((mpierr2 = MPI_Bcast(&mpierr, 1, MPI_INT, ios->comproot, ios->my_comm)))
+ check_mpi(ios, NULL, mpierr2, __FILE__, __LINE__);
+ if (mpierr)
+ return check_mpi(ios, NULL, mpierr, __FILE__, __LINE__);
+
+
+ /* Set the log level on all tasks */
+ PIOc_set_log_level(level);
+ PLOG((level, "set_global_log_level, level = %d", level));
+#endif
+ return PIO_NOERR;
+}
+
#ifdef USE_MPE
@@ -302,7 +362,7 @@ pio_init_logging(void)
#endif /* USE_MPE */
#if PIO_ENABLE_LOGGING
- if (!LOG_FILE)
+ if (!LOG_FILE && pio_log_level > 0)
{
char log_filename[PIO_MAX_NAME];
int mpierr;
@@ -318,7 +378,7 @@ pio_init_logging(void)
pio_log_ref_cnt = 1;
}
- else
+ else if(LOG_FILE)
{
pio_log_ref_cnt++;
}
@@ -628,6 +688,7 @@ check_netcdf2(iosystem_desc_t *ios, file_desc_t *file, int status,
MPI_Bcast(&status, 1, MPI_INT, ios->ioroot, ios->my_comm);
else if (file)
MPI_Bcast(&status, 1, MPI_INT, file->iosystem->ioroot, file->iosystem->my_comm);
+ PLOG((2, "check_netcdf2 status returned = %d", status));
}
/* For PIO_RETURN_ERROR, just return the error. */
@@ -2406,8 +2467,8 @@ inq_file_metadata(file_desc_t *file, int ncid, int iotype, int *nvars,
{
if (d == 0){
(*rec_var)[v] = 1;
- break;
- }
+ break;
+ }
else
return pio_err(NULL, file, PIO_EINVAL, __FILE__, __LINE__);
@@ -2655,6 +2716,8 @@ PIOc_openfile_retry(int iosysid, int *ncidp, int *iotype, const char *filename,
&nvars, &rec_var, &pio_type,
&pio_type_size, &mpi_type,
&mpi_type_size, &ndims);
+ PLOG((2, "PIOc_openfile_retry:nc_open for 4C filename = %s mode = %d "
+ "ierr = %d", filename, mode, ierr));
}
break;
#endif /* _NETCDF4 */
@@ -2668,6 +2731,8 @@ PIOc_openfile_retry(int iosysid, int *ncidp, int *iotype, const char *filename,
&nvars, &rec_var, &pio_type,
&pio_type_size, &mpi_type,
&mpi_type_size, &ndims);
+ PLOG((2, "PIOc_openfile_retry:nc_open for classic filename = %s mode = %d "
+ "ierr = %d", filename, mode, ierr));
}
break;
@@ -2942,15 +3007,17 @@ pioc_change_def(int ncid, int is_enddef)
{
int msg = is_enddef ? PIO_MSG_ENDDEF : PIO_MSG_REDEF;
if (ios->compmaster == MPI_ROOT)
+ {
+ PLOG((2, "pioc_change_def request sent"));
mpierr = MPI_Send(&msg, 1, MPI_INT, ios->ioroot, 1, ios->union_comm);
-
+ }
if (!mpierr)
mpierr = MPI_Bcast(&ncid, 1, MPI_INT, ios->compmaster, ios->intercomm);
PLOG((3, "pioc_change_def ncid = %d mpierr = %d", ncid, mpierr));
}
/* Handle MPI errors. */
- PLOG((3, "pioc_change_def handling MPI errors"));
+ PLOG((3, "pioc_change_def handling MPI errors my_comm=%d", ios->my_comm));
if ((mpierr2 = MPI_Bcast(&mpierr, 1, MPI_INT, ios->comproot, ios->my_comm)))
check_mpi(NULL, file, mpierr2, __FILE__, __LINE__);
if (mpierr)
diff --git a/src/flib/Makefile.am b/src/flib/Makefile.am
index 38e0cbfbcd2..33e1f96b364 100644
--- a/src/flib/Makefile.am
+++ b/src/flib/Makefile.am
@@ -17,7 +17,7 @@ libpiof_la_LIBADD = libpio_nf.la libpio_kinds.la libpio_support.la \
libpiodarray.la libpionfatt.la libpionfget.la libpionfput.la \
libpiolib_mod.la
-if NETCDF_INTEGRATION
+if BUILD_NCINT
libpiof_la_LIBADD += libncint_mod.la
endif
libpiof_la_LIBADD += libpio.la
@@ -28,7 +28,7 @@ libpiof_la_SOURCES = pio_types.F90
noinst_LTLIBRARIES = libpio_kinds.la libpio_types.la \
libpio_support.la libpio_nf.la libpiodarray.la libpionfatt.la \
libpionfget.la libpionfput.la libpiolib_mod.la libpio.la
-if NETCDF_INTEGRATION
+if BUILD_NCINT
noinst_LTLIBRARIES += libncint_mod.la
endif
@@ -72,7 +72,7 @@ pio.mod: pio.lo
# Some mod files depend on other mod files.
DEPEND_FILES = pio_kinds.mod piolib_mod.mod pio_types.mod piodarray.mod \
pio_nf.mod pionfatt_mod.mod pionfget_mod.mod pionfput_mod.mod pio_support.mod
-if NETCDF_INTEGRATION
+if BUILD_NCINT
DEPEND_FILES += ncint_mod.mod
endif
pio.lo: $(DEPEND_FILES)
@@ -81,7 +81,7 @@ pio.lo: $(DEPEND_FILES)
MODFILES = pio_kinds.mod pio_types.mod pio_support.mod pio_nf.mod \
piodarray.mod pionfatt_mod.mod pionfget_mod.mod pionfput_mod.mod \
piolib_mod.mod pio.mod
-if NETCDF_INTEGRATION
+if BUILD_NCINT
MODFILES += ncint_mod.mod
endif
BUILT_SOURCES = $(MODFILES)
diff --git a/src/flib/ncint_mod.F90 b/src/flib/ncint_mod.F90
index 483633a2ebc..a76a7f43b3f 100644
--- a/src/flib/ncint_mod.F90
+++ b/src/flib/ncint_mod.F90
@@ -28,9 +28,9 @@ module ncint_mod
#ifdef NO_MPIMOD
include 'mpif.h' ! _EXTERNAL
#endif
-
+ integer, parameter :: NF_PIO=64
public :: nf_def_iosystem, nf_free_iosystem, nf_def_decomp, nf_free_decomp, &
- nf_put_vard_int
+ nf_put_vard_int, NF_PIO
contains
diff --git a/src/flib/pio.F90 b/src/flib/pio.F90
index 57e25bef696..f89fa905aef 100644
--- a/src/flib/pio.F90
+++ b/src/flib/pio.F90
@@ -26,7 +26,7 @@ module pio
#ifdef NETCDF_INTEGRATION
use ncint_mod, only: nf_def_iosystem, nf_free_iosystem, &
- nf_def_decomp, nf_free_decomp, nf_put_vard_int
+ nf_def_decomp, nf_free_decomp, nf_put_vard_int, NF_PIO
#endif
use pio_types, only : io_desc_t, file_desc_t, var_desc_t, iosystem_desc_t, &
diff --git a/src/flib/pio_nf.F90 b/src/flib/pio_nf.F90
index 21e92ad8db4..f08ff754cc9 100644
--- a/src/flib/pio_nf.F90
+++ b/src/flib/pio_nf.F90
@@ -301,7 +301,8 @@ module pio_nf
interface pio_set_log_level
module procedure &
- set_log_level
+ set_log_level , &
+ set_global_log_level
end interface pio_set_log_level
interface pio_strerror
@@ -858,6 +859,32 @@ end function PIOc_set_log_level
ierr = PIOc_set_log_level(log_level)
end function set_log_level
+ !>
+ !! @public
+ !! @ingroup PIO_set_log_level
+ !! Sets the logging level globally from comp root. Only takes effect if PIO was built with
+ !! PIO_ENABLE_LOGGING=On
+ !!
+ !! @param iosys a defined pio system descriptor, see PIO_types
+ !! @param log_level the logging level.
+ !! @retval ierr @copydoc error_return
+ !! @author Jim Edwards
+ !<
+ integer function set_global_log_level(iosys, log_level) result(ierr)
+ use pio_types, only : iosystem_desc_t
+ type(iosystem_desc_t), intent(in) :: iosys
+ integer, intent(in) :: log_level
+ interface
+ integer(C_INT) function PIOc_set_global_log_level(iosysid, log_level) &
+ bind(C, name="PIOc_set_global_log_level")
+ use iso_c_binding
+ integer(C_INT), value :: iosysid
+ integer(C_INT), value :: log_level
+ end function PIOc_set_global_log_level
+ end interface
+ ierr = PIOc_set_global_log_level(iosys%iosysid, log_level)
+ end function set_global_log_level
+
!>
!! @public
!! @ingroup PIO_strerror
diff --git a/src/flib/pio_types.F90 b/src/flib/pio_types.F90
index 999afadf324..dad1b499443 100644
--- a/src/flib/pio_types.F90
+++ b/src/flib/pio_types.F90
@@ -125,35 +125,29 @@ module pio_types
!! The type of variable(s) associated with this iodesc.
!! @copydoc PIO_kinds
-#ifdef _PNETCDF
-#include
- integer, public, parameter :: PIO_64BIT_DATA = nf_64bit_data !< CDF5 format
-#else
-#include
- integer, public, parameter :: PIO_64BIT_DATA = 0 !< CDF5 format
-#endif
- integer, public, parameter :: PIO_num_OST = 16 !< num ost
- integer, public, parameter :: PIO_global = nf_global !< global atts
- integer, public, parameter :: PIO_unlimited = nf_unlimited !< unlimited dimension
- integer, public, parameter :: PIO_double = nf_double !< double type
- integer, public, parameter :: PIO_real = nf_real !< real type
- integer, public, parameter :: PIO_int = nf_int !< int type
- integer, public, parameter :: PIO_short = nf_short !< short int type
- integer, public, parameter :: PIO_char = nf_char !< char type
- integer, public, parameter :: PIO_noerr = nf_noerr !< no error
- integer, public, parameter :: PIO_WRITE = nf_write !< read-write
- integer, public, parameter :: PIO_nowrite = nf_nowrite !< read-only
- integer, public, parameter :: PIO_CLOBBER = nf_clobber !< clobber existing file
- integer, public, parameter :: PIO_NOCLOBBER = nf_NOclobber !< do not clobber existing file
- integer, public, parameter :: PIO_FILL = nf_fill !< use fill values
- integer, public, parameter :: PIO_NOFILL = nf_nofill !< do not use fill values
- integer, public, parameter :: PIO_MAX_NAME = nf_max_name !< max name len
- integer, public, parameter :: PIO_MAX_VAR_DIMS = min(6,nf_max_var_dims) !< max dims for a var
- integer, public, parameter :: PIO_64BIT_OFFSET = nf_64bit_offset !< 64bit offset format
- integer, public, parameter :: PIO_FILL_INT = nf_fill_int; !< int fill value
- real, public, parameter :: PIO_FILL_FLOAT = nf_fill_float; !< float fill value
-
- double precision, public, parameter :: PIO_FILL_DOUBLE = nf_fill_double; !< double fill value
+ integer, public, parameter :: PIO_64BIT_DATA = 32 !< CDF5 format
+ integer, public, parameter :: PIO_num_OST = 16 !< num ost
+ integer, public, parameter :: PIO_global = 0 !< global atts
+ integer, public, parameter :: PIO_unlimited = 0 !< unlimited dimension
+ integer, public, parameter :: PIO_double = 6 !< double type
+ integer, public, parameter :: PIO_real = 5 !< real type
+ integer, public, parameter :: PIO_int = 4 !< int type
+ integer, public, parameter :: PIO_short = 3 !< short int type
+ integer, public, parameter :: PIO_char = 2 !< char type
+ integer, public, parameter :: PIO_noerr = 0 !< no error
+ integer, public, parameter :: PIO_WRITE = 1 !< read-write
+ integer, public, parameter :: PIO_nowrite = 0 !< read-only
+ integer, public, parameter :: PIO_CLOBBER = 0 !< clobber existing file
+ integer, public, parameter :: PIO_NOCLOBBER = 4 !< do not clobber existing file
+ integer, public, parameter :: PIO_FILL = 0 !< use fill values
+ integer, public, parameter :: PIO_NOFILL = 256 !< do not use fill values
+ integer, public, parameter :: PIO_MAX_NAME = 256 !< max name len
+ integer, public, parameter :: PIO_MAX_VAR_DIMS = 6 !< max dims for a var
+ integer, public, parameter :: PIO_64BIT_OFFSET = 512 !< 64bit offset format
+ integer, public, parameter :: PIO_FILL_INT = -2147483647 !< int fill value
+ real, public, parameter :: PIO_FILL_FLOAT = 9.9692099683868690e+36 !< float fill value
+
+ double precision, public, parameter :: PIO_FILL_DOUBLE = 9.9692099683868690d+36 !< double fill value
enum, bind(c)
enumerator :: PIO_rearr_comm_p2p = 0 !< do point-to-point communications using mpi send and recv calls.
diff --git a/src/flib/piolib_mod.F90 b/src/flib/piolib_mod.F90
index ce5f118b1c2..00513c0460d 100644
--- a/src/flib/piolib_mod.F90
+++ b/src/flib/piolib_mod.F90
@@ -207,8 +207,8 @@ module piolib_mod
!<
interface PIO_init
module procedure init_intracom
-! module procedure init_intercom
-
+ module procedure init_intercom
+ module procedure init_intercom_from_comms
end interface PIO_init
!>
@@ -967,7 +967,8 @@ end subroutine PIO_initdecomp_dof_i8
!! @param iosystem a derived type which can be used in subsequent
!! pio operations (defined in PIO_types).
!! @param base @em optional argument can be used to offset the first
- !! io task - default base is task 1.
+ !! io task. Since this is an MPI task number, it is zero-based (the
+ !! first task is 0). The default base is task 0.
!! @param rearr_opts the rearranger options.
!! @author Jim Edwards
!<
@@ -1021,6 +1022,7 @@ end function PIOc_Init_Intracomm_from_F90
#endif
end subroutine init_intracom
+ !>
!! @public
!! @ingroup PIO_init
!! Initialize the pio subsystem. This is a collective call. Input
@@ -1030,231 +1032,115 @@ end subroutine init_intracom
!! call. Instead they go to an internal loop and wait to receive
!! further instructions from the computational tasks.
!!
- !! @param component_count The number of computational components to
- !! associate with this IO component.
- !! @param peer_comm The communicator from which all other
- !! communicator arguments are derived.
- !! @param comp_comms The computational communicator for each of the
- !! computational components.
- !! @param io_comm The io communicator.
- !! @param iosystem a derived type which can be used in subsequent
- !! pio operations (defined in PIO_types).
+ !! @param iosystem An array of type iosystem_desc_t and size component_count
+ !! @param incomm A MPI communicator which includes all tasks in the call
+ !! @param procs_per_component An integer array of tasks per computational component
+ !! @param comp_proc_list A 2d array of all ranks in incomm for each computational component
+ !! @param io_proc_list An array of all io ranks in incomm
+ !! @param rearranger The rearranger to use (currently only PIO_BOX_REARR)
+ !! @param comp_comm On output the MPI comm for each computational component (MPI_COMM_NULL on tasks not in this component)
+ !! @param io_comm On output the MPI comm for the IO component (MPI_COMM_NULL on tasks not in io component)
!! @author Jim Edwards
!<
-! subroutine init_intercom(component_count, peer_comm, comp_comms, io_comm, iosystem)
-! use pio_types, only : pio_internal_error, pio_rearr_box
-! integer, intent(in) :: component_count
-! integer, intent(in) :: peer_comm
-! integer, intent(in) :: comp_comms(component_count) ! The compute communicator
-! integer, intent(in) :: io_comm ! The io communicator
-
-! type (iosystem_desc_t), intent(out) :: iosystem(component_count) ! io descriptor to initalize
-! #ifdef DOTHIS
-! integer :: ierr
-! logical :: is_inter
-! logical, parameter :: check=.true.
-
-! integer :: i, j, iam, io_leader, comp_leader
-! integer(i4), pointer :: iotmp(:)
-! character(len=5) :: cb_nodes
-! integer :: itmp
-
-! #ifdef TIMING
-! call t_startf("PIO:init")
-! #endif
-! #if defined(NO_MPI2) || defined(_MPISERIAL)
-! call piodie( __PIO_FILE__,__LINE__, &
-! 'The PIO async interface requires an MPI2 complient MPI library')
-! #else
-! do i=1,component_count
-! iosystem(i)%error_handling = PIO_internal_error
-! iosystem(i)%comp_comm = comp_comms(i)
-! iosystem(i)%io_comm = io_comm
-! iosystem(i)%info = mpi_info_null
-! iosystem(i)%comp_rank= -1
-! iosystem(i)%io_rank = -1
-! iosystem(i)%async_interface = .true.
-! iosystem(i)%comproot = MPI_PROC_NULL
-! iosystem(i)%ioroot = MPI_PROC_NULL
-! iosystem(i)%compmaster= MPI_PROC_NULL
-! iosystem(i)%iomaster = MPI_PROC_NULL
-! iosystem(i)%numOST = PIO_num_OST
-
-
-! if(io_comm/=MPI_COMM_NULL) then
-! ! Find the rank of the io leader in peer_comm
-! call mpi_comm_rank(io_comm,iosystem(i)%io_rank, ierr)
-! if(iosystem(i)%io_rank==0) then
-! call mpi_comm_rank(peer_comm, iam, ierr)
-! else
-! iam = -1
-! end if
-! call mpi_allreduce(iam, io_leader, 1, mpi_integer, MPI_MAX, peer_comm, ierr)
-! call CheckMPIReturn('Call to MPI_ALLREDUCE()',ierr,__FILE__,__LINE__)
-! ! Find the rank of the comp leader in peer_comm
-! iam = -1
-! call mpi_allreduce(iam, comp_leader, 1, mpi_integer, MPI_MAX, peer_comm, ierr)
-! call CheckMPIReturn('Call to MPI_ALLREDUCE()',ierr,__FILE__,__LINE__)
-! ! create the intercomm
-! call mpi_intercomm_create(io_comm, 0, peer_comm, comp_leader, i, iosystem(i)%intercomm, ierr)
-! ! create the union_comm
-! call mpi_intercomm_merge(iosystem(i)%intercomm, .true., iosystem(i)%union_comm, ierr)
-! else
-! ! Find the rank of the io leader in peer_comm
-! iam = -1
-! call mpi_allreduce(iam, io_leader, 1, mpi_integer, MPI_MAX, peer_comm, ierr)
-! call CheckMPIReturn('Call to MPI_ALLREDUCE()',ierr,__FILE__,__LINE__)
-
-! ! Find the rank of the comp leader in peer_comm
-! iosystem(i)%comp_rank = -1
-! if(comp_comms(i)/=MPI_COMM_NULL) then
-! call mpi_comm_rank(comp_comms(i),iosystem(i)%comp_rank, ierr)
-! if(iosystem(i)%comp_rank==0) then
-! call mpi_comm_rank(peer_comm, iam, ierr)
-! else
-! iam=-1
-! end if
-! end if
-! call mpi_allreduce(iam, comp_leader, 1, mpi_integer, MPI_MAX, peer_comm, ierr)
-! call CheckMPIReturn('Call to MPI_ALLREDUCE()',ierr,__FILE__,__LINE__)
-
-! ! create the intercomm
-! call mpi_intercomm_create(comp_comms(i), 0, peer_comm, io_leader, i, iosystem(i)%intercomm, ierr)
-! ! create the union comm
-! call mpi_intercomm_merge(iosystem(i)%intercomm, .false., iosystem(i)%union_comm, ierr)
-! end if
-! if(Debugasync) print *,__PIO_FILE__,__LINE__,i, iosystem(i)%intercomm, iosystem(i)%union_comm
-
-! if(iosystem(i)%union_comm /= MPI_COMM_NULL) then
-! call mpi_comm_rank(iosystem(i)%union_comm, iosystem(i)%union_rank, ierr)
-! if(check) call checkmpireturn('init: after call to comm_rank: ',ierr)
-! call mpi_comm_size(iosystem(i)%union_comm, iosystem(i)%num_tasks, ierr)
-! if(check) call checkmpireturn('init: after call to comm_size: ',ierr)
-
-
-! if(io_comm /= MPI_COMM_NULL) then
-! call mpi_comm_size(io_comm, iosystem(i)%num_iotasks, ierr)
-! if(check) call checkmpireturn('init: after call to comm_size: ',ierr)
-
-! if(iosystem(i)%io_rank==0) then
-! iosystem(i)%iomaster = MPI_ROOT
-! iosystem(i)%ioroot = iosystem(i)%union_rank
-! end if
-! iosystem(i)%ioproc = .true.
-! iosystem(i)%compmaster = 0
-
-! call pio_msg_handler_init(io_comm, iosystem(i)%io_rank)
-! end if
-
-
-! if(comp_comms(i) /= MPI_COMM_NULL) then
-! call mpi_comm_size(comp_comms(i), iosystem(i)%num_comptasks, ierr)
-! if(check) call checkmpireturn('init: after call to comm_size: ',ierr)
-
-! iosystem(i)%iomaster = 0
-! iosystem(i)%ioproc = .false.
-! if(iosystem(i)%comp_rank==0) then
-! iosystem(i)%compmaster = MPI_ROOT
-! iosystem(i)%comproot = iosystem(i)%union_rank
-! end if
-
-! end if
-
-! iosystem(i)%userearranger = .true.
-! iosystem(i)%rearr = PIO_rearr_box
-
-! if(Debugasync) print *,__PIO_FILE__,__LINE__
-
-! call MPI_allreduce(iosystem(i)%comproot, j, 1, MPI_INTEGER, MPI_MAX,iosystem(i)%union_comm,ierr)
-! call CheckMPIReturn('Call to MPI_ALLREDUCE()',ierr,__FILE__,__LINE__)
-
-! iosystem%comproot=j
-! call MPI_allreduce(iosystem(i)%ioroot, j, 1, MPI_INTEGER, MPI_MAX,iosystem(i)%union_comm,ierr)
-! call CheckMPIReturn('Call to MPI_ALLREDUCE()',ierr,__FILE__,__LINE__)
-
-! iosystem%ioroot=j
-
-! if(Debugasync) print *,__PIO_FILE__,__LINE__, i, iosystem(i)%comproot, iosystem(i)%ioroot
-
-! if(io_comm/=MPI_COMM_NULL) then
-! call mpi_bcast(iosystem(i)%num_comptasks, 1, mpi_integer, iosystem(i)%compmaster,iosystem(i)%intercomm, ierr)
-
-! call mpi_bcast(iosystem(i)%num_iotasks, 1, mpi_integer, iosystem(i)%iomaster, iosystem(i)%intercomm, ierr)
-
-! call alloc_check(iotmp,iosystem(i)%num_iotasks,'init:iotmp')
-! iotmp(:) = 0
-! iotmp( iosystem(i)%io_rank+1)=iosystem(i)%union_rank
-
-! end if
-! if(comp_comms(i)/=MPI_COMM_NULL) then
-! call mpi_bcast(iosystem(i)%num_comptasks, 1, mpi_integer, iosystem(i)%compmaster, iosystem(i)%intercomm, ierr)
-
-! call mpi_bcast(iosystem(i)%num_iotasks, 1, mpi_integer, iosystem(i)%iomaster, iosystem(i)%intercomm, ierr)
-
-! call alloc_check(iotmp,iosystem(i)%num_iotasks,'init:iotmp')
-! iotmp(:)=0
-
-! end if
-
-! iosystem(i)%my_comm = iosystem(i)%intercomm
-
-! call alloc_check(iosystem(i)%ioranks, iosystem(i)%num_iotasks,'init:n_ioranks')
-! if(Debugasync) print *,__PIO_FILE__,__LINE__,iotmp
-! call MPI_allreduce(iotmp,iosystem(i)%ioranks,iosystem(i)%num_iotasks,MPI_INTEGER,MPI_MAX,iosystem(i)%union_comm,ierr)
-! call CheckMPIReturn('Call to MPI_ALLREDUCE()',ierr,__FILE__,__LINE__)
-
-! if(Debugasync) print *,__PIO_FILE__,__LINE__,iosystem(i)%ioranks
-! call dealloc_check(iotmp)
-
-! !---------------------------------
-! ! initialize the rearranger system
-! !---------------------------------
-! if (iosystem(i)%userearranger) then
-! call rearrange_init(iosystem(i))
-! endif
-! end if
-
-! #if defined(USEMPIIO) || defined(_PNETCDF) || defined(_NETCDF4)
-! call mpi_info_create(iosystem(i)%info,ierr)
-! ! turn on mpi-io aggregation
-! !DBG print *,'PIO_init: before call to setnumagg'
-! ! itmp = num_aggregator
-! ! call mpi_bcast(itmp, 1, mpi_integer, 0, iosystem%union_comm, ierr)
-! ! if(itmp .gt. 0) then
-! ! write(cb_nodes,('(i5)')) itmp
-! !#ifdef BGQ
-! ! call PIO_set_hint(iosystem(i),"bgl_nodes_pset",trim(adjustl(cb_nodes)))
-! !#else
-! ! call PIO_set_hint(iosystem(i),"cb_nodes",trim(adjustl(cb_nodes)))
-! !#endif
-! ! endif
+ subroutine init_intercom(iosystem, incomm, procs_per_component, comp_proc_list, io_proc_list, rearranger, comp_comm, io_comm )
-! #ifdef PIO_GPFS_HINTS
-! call PIO_set_hint(iosystem(i),"ibm_largeblock_io","true")
-! #endif
-! #ifdef PIO_LUSTRE_HINTS
-! call PIO_set_hint(iosystem(i), 'romio_ds_read','disable')
-! call PIO_set_hint(iosystem(i),'romio_ds_write','disable')
-! #endif
-! #endif
-! end do
+ interface
+ integer(C_INT) function PIOc_init_async_from_F90(f90_comm_world, num_io_procs, io_proc_list, component_count, &
+ procs_per_component, flat_proc_list, io_comm, comp_comm, rearranger, iosysid) &
+ bind(C,name="PIOc_init_async_from_F90")
+ use iso_c_binding
+ use pio_types
+ integer(C_INT), intent(in), value :: f90_comm_world
+ integer(C_INT), intent(in), value :: num_io_procs
+ integer(C_INT), intent(in) :: io_proc_list(*)
+ integer(C_INT), intent(in), value :: component_count
+ integer(C_INT), intent(in) :: procs_per_component(*)
+ integer(C_INT), intent(in) :: flat_proc_list(*)
+ integer(C_INT), intent(out) :: io_comm
+ integer(C_INT), intent(out) :: comp_comm(*)
+ integer(C_INT), intent(in), value :: rearranger
+ integer(C_INT), intent(out) :: iosysid(*)
+ end function PIOc_init_async_from_F90
+ end interface
-! if(DebugAsync) print*,__PIO_FILE__,__LINE__, iosystem(1)%ioranks
+ type(iosystem_desc_t), intent(out) :: iosystem(:)
+ integer, intent(in) :: incomm
+ integer, intent(in) :: procs_per_component(:)
+ integer, intent(in) :: comp_proc_list(:,:)
+ integer, intent(in) :: io_proc_list(:)
+ integer, intent(in) :: rearranger
+ integer, intent(out):: comp_comm(:)
+ integer, intent(out):: io_comm
+ integer :: numcomps
+ integer :: i
+ integer :: ierr
+ integer, allocatable :: iosysid(:)
+
+ numcomps = size(iosystem)
+ allocate(iosysid(numcomps))
+ ierr = PIOc_init_async_from_F90(incomm, size(io_proc_list), io_proc_list, size(procs_per_component), &
+ procs_per_component, reshape(comp_proc_list,(/size(comp_proc_list)/)), io_comm, &
+ comp_comm, rearranger, iosysid)
+ do i=1,numcomps
+ iosystem(i)%iosysid = iosysid(i)
+ enddo
+ deallocate(iosysid)
+ end subroutine init_intercom
+ !>
+ !! @public
+ !! @ingroup PIO_init
+ !! Initialize the pio subsystem. This is a collective call. Input
+ !! parameters are read on comp_ranks=0 and io_rank=0 values on other tasks are
+ !! ignored. This variation of PIO_init uses tasks in io_comm to handle IO,
+ !! these tasks do not return from this
+ !! call. Instead they go to an internal loop and wait to receive
+ !! further instructions from the computational tasks.
+ !!
+ !! @param iosystem An array of type iosystem_desc_t and size component_count
+ !! @param world_comm A MPI communicator which includes all tasks in the call
+ !! @param comp_comms On input the MPI comm for each computational component (MPI_COMM_NULL on tasks not in this component)
+ !! @param io_comm On input the MPI comm for the IO component (MPI_COMM_NULL on tasks not in io component)
+ !! @param rearranger The rearranger to use (currently only PIO_BOX_REARR)
+ !!
+ !! @author Jim Edwards
+ !<
+ subroutine init_intercom_from_comms(iosystem, world_comm, comp_comms, io_comm, rearranger)
+
+ interface
+ integer(C_INT) function PIOc_init_async_comms_from_F90(f90_comm_world, component_count, f90_comp_comms, f90_io_comm, &
+ rearranger, iosysidp) bind(C,name="PIOc_init_async_comms_from_F90")
+ use iso_c_binding
+ use pio_types
+ integer(C_INT), intent(in), value :: f90_comm_world
+ integer(C_INT), intent(in), value :: component_count
+ integer(C_INT), intent(in) :: f90_comp_comms(*)
+ integer(C_INT), intent(in), value :: f90_io_comm
+ integer(C_INT), intent(in), value :: rearranger
+ integer(C_INT), intent(out) :: iosysidp(*)
+ end function PIOc_init_async_comms_from_F90
+ end interface
+
+ type(iosystem_desc_t), intent(out) :: iosystem(:)
+ integer, intent(in) :: world_comm
+ integer, intent(in) :: comp_comms(:)
+ integer, intent(in) :: io_comm
+ integer, intent(in) :: rearranger
-! iosystem%num_aiotasks = iosystem%num_iotasks
-! iosystem%numost = PIO_NUM_OST
+ integer :: numcomps
+ integer :: i
+ integer :: ierr
+ integer, allocatable :: iosysid(:)
+
+ numcomps = size(iosystem)
+ allocate(iosysid(numcomps))
+ ierr = PIOc_init_async_comms_from_F90(world_comm, numcomps, comp_comms, io_comm, rearranger, iosysid)
+ do i=1,numcomps
+ iosystem(i)%iosysid = iosysid(i)
+ enddo
+ deallocate(iosysid)
+ end subroutine init_intercom_from_comms
-! ! This routine does not return
-! if(io_comm /= MPI_COMM_NULL) call pio_msg_handler(component_count,iosystem)
-
-! if(DebugAsync) print*,__PIO_FILE__,__LINE__, iosystem(1)%ioranks
-! #ifdef TIMING
-! call t_stopf("PIO:init")
-! #endif
-! #endif
-! #endif
-! end subroutine init_intercom
!>
!! @public
diff --git a/src/flib/pionfget_mod.F90.in b/src/flib/pionfget_mod.F90.in
index 0f1f92d7248..38cfe64c417 100644
--- a/src/flib/pionfget_mod.F90.in
+++ b/src/flib/pionfget_mod.F90.in
@@ -135,7 +135,6 @@ CONTAINS
integer, intent(in) :: ncid
integer, intent(in) :: varid, index(:)
character(len=*), intent(out) :: ival
- character, allocatable :: cval(:)
integer :: i, ndims
integer(kind=C_SIZE_T), allocatable :: lindex(:), count(:)
diff --git a/src/ncint/ncintdispatch.c b/src/ncint/ncintdispatch.c
index 78b2f577b7d..db442a8c7f6 100644
--- a/src/ncint/ncintdispatch.c
+++ b/src/ncint/ncintdispatch.c
@@ -27,6 +27,17 @@ int ncint_initialized = 0;
/** Version of dispatch table. */
#define DISPATCH_VERSION 2
+/* Internal filter actions - copied from nc4internal.h */
+#define NCFILTER_DEF 1
+#define NCFILTER_REMOVE 2
+#define NCFILTER_INQ 3
+#define NCFILTER_FILTERIDS 4
+#define NCFILTER_INFO 5
+#define NCFILTER_FREESPEC 6
+#define NCFILTER_CLIENT_REG 10
+#define NCFILTER_CLIENT_UNREG 11
+#define NCFILTER_CLIENT_INQ 12
+
/* This is the dispatch object that holds pointers to all the
* functions that make up the NCINT dispatch interface. */
NC_Dispatch NCINT_dispatcher = {
@@ -116,7 +127,7 @@ NC_Dispatch NCINT_dispatcher = {
NC_NOTNC4_def_var_filter,
NC_NOTNC4_set_var_chunk_cache,
NC_NOTNC4_get_var_chunk_cache,
- NC_NOTNC4_filter_actions
+ PIO_NCINT_filter_actions
};
/**
@@ -1016,3 +1027,23 @@ PIO_NCINT_def_var_chunking(int ncid, int varid, int storage, const size_t *chunk
{
return PIOc_def_var_chunking(ncid, varid, storage, (const PIO_Offset *)chunksizesp);
}
+
+/**
+ * @internal Carry out one of several filter actions
+ *
+ * @param ncid Containing group id
+ * @param varid Containing variable id
+ * @param action Action to perform
+ *
+ * @return PIO_NOERR for success, otherwise an error code.
+ * @author Ed Hartnett
+ */
+int
+PIO_NCINT_filter_actions(int ncid, int varid, int action, struct NC_Filterobject* spec)
+{
+ if (action == NCFILTER_INFO)
+ {
+
+ }
+ return PIO_NOERR;
+}
diff --git a/src/ncint/ncintdispatch.h b/src/ncint/ncintdispatch.h
index 7204993b634..8934740f558 100644
--- a/src/ncint/ncintdispatch.h
+++ b/src/ncint/ncintdispatch.h
@@ -153,6 +153,9 @@ extern "C" {
PIO_NCINT_def_var_chunking(int ncid, int varid, int storage, const size_t *chunksizesp);
+ extern int
+ PIO_NCINT_filter_actions(int ncid, int varid, int action, struct NC_Filterobject* spec);
+
#if defined(__cplusplus)
}
#endif
diff --git a/tests/cunit/CMakeLists.txt b/tests/cunit/CMakeLists.txt
index 788641bb2ea..951773bdf09 100644
--- a/tests/cunit/CMakeLists.txt
+++ b/tests/cunit/CMakeLists.txt
@@ -1,6 +1,7 @@
include (LibMPI)
include_directories("${CMAKE_SOURCE_DIR}/tests/cunit")
+include_directories("${CMAKE_SOURCE_DIR}/src/clib")
include_directories("${CMAKE_BINARY_DIR}")
# Compiler-specific compiler options
@@ -95,7 +96,7 @@ if (NOT PIO_USE_MPISERIAL)
add_executable (test_decomp_frame EXCLUDE_FROM_ALL test_decomp_frame.c test_common.c)
target_link_libraries (test_decomp_frame pioc)
add_executable (test_perf2 EXCLUDE_FROM_ALL test_perf2.c test_common.c)
- target_link_libraries (test_perf2)
+ target_link_libraries (test_perf2 pioc)
add_executable (test_darray_async_simple EXCLUDE_FROM_ALL test_darray_async_simple.c test_common.c)
target_link_libraries (test_darray_async_simple pioc)
add_executable (test_darray_async EXCLUDE_FROM_ALL test_darray_async.c test_common.c)
@@ -114,6 +115,8 @@ if (NOT PIO_USE_MPISERIAL)
target_link_libraries (test_async_1d pioc)
add_executable (test_simple EXCLUDE_FROM_ALL test_simple.c test_common.c)
target_link_libraries (test_simple pioc)
+ add_executable (test_async_perf EXCLUDE_FROM_ALL test_async_perf.c test_common.c)
+ target_link_libraries(test_async_perf pioc)
endif ()
add_executable (test_spmd EXCLUDE_FROM_ALL test_spmd.c test_common.c)
target_link_libraries (test_spmd pioc)
@@ -146,12 +149,13 @@ add_dependencies (tests test_async_multi2)
add_dependencies (tests test_async_manyproc)
add_dependencies (tests test_async_1d)
add_dependencies (tests test_simple)
+add_dependencies (tests test_async_perf)
# Test Timeout in seconds.
if (PIO_VALGRIND_CHECK)
- set (DEFAULT_TEST_TIMEOUT 480)
+ set (DEFAULT_TEST_TIMEOUT 800)
else ()
- set (DEFAULT_TEST_TIMEOUT 480)
+ set (DEFAULT_TEST_TIMEOUT 600)
endif ()
# All tests need a certain number of tasks, but they should be able to
@@ -319,5 +323,9 @@ else ()
EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/test_simple
NUMPROCS ${EXACTLY_FOUR_TASKS}
TIMEOUT ${DEFAULT_TEST_TIMEOUT})
+ # add_mpi_test(test_async_perf
+ # EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/test_async_perf
+ # NUMPROCS ${EXACTLY_FOUR_TASKS}
+ # TIMEOUT ${DEFAULT_TEST_TIMEOUT})
endif ()
MESSAGE("CMAKE_EXE_LINKER_FLAGS ${CMAKE_EXE_LINKER_FLAGS}")
diff --git a/tests/cunit/pio_tests.h b/tests/cunit/pio_tests.h
index 72948e53f26..db4fb599ff1 100644
--- a/tests/cunit/pio_tests.h
+++ b/tests/cunit/pio_tests.h
@@ -31,6 +31,7 @@
#define TEST_DARRAY_WRITE 3
#define TEST_CLOSE 4
#define TEST_CALCULATE 5
+#define TEST_DARRAY_READ 6
int init_mpe_test_logging(int my_rank, int test_event[][TEST_NUM_EVENTS]);
void test_start_mpe_log(int state);
@@ -82,6 +83,9 @@ void test_stop_mpe_log(int state, const char *msg);
#define NUM_PIO_TYPES_TO_TEST 6
#endif /* _NETCDF4 */
+/* Need this for performance calculations. */
+#define MILLION 1000000
+
/* Function prototypes. */
int pio_test_init2(int argc, char **argv, int *my_rank, int *ntasks, int min_ntasks,
int max_ntasks, int log_level, MPI_Comm *test_comm);
diff --git a/tests/cunit/run_tests.sh.in b/tests/cunit/run_tests.sh.in
index 0d322960da6..bbc7b178318 100644
--- a/tests/cunit/run_tests.sh.in
+++ b/tests/cunit/run_tests.sh.in
@@ -10,12 +10,11 @@ trap exit INT TERM
printf 'running PIO tests...\n'
-# test_darray_multivar3
PIO_TESTS='test_intercomm2 test_async_mpi test_spmd test_rearr test_async_simple '\
'test_async_3proc test_async_4proc test_iosystem2_simple test_iosystem2_simple2 '\
'test_iosystem2 test_iosystem3_simple test_iosystem3_simple2 test_iosystem3 test_simple test_pioc '\
'test_pioc_unlim test_pioc_putget test_pioc_fill test_darray test_darray_multi '\
-'test_darray_multivar test_darray_multivar2 test_darray_1d '\
+'test_darray_multivar test_darray_multivar2 test_darray_multivar3 test_darray_1d '\
'test_darray_3d test_decomp_uneven test_decomps test_darray_async_simple '\
'test_darray_async test_darray_async_many test_darray_2sync test_async_multicomp '\
'test_darray_fill test_darray_vard test_async_1d test_darray_append test_simple'
diff --git a/tests/cunit/test_async_1d.c b/tests/cunit/test_async_1d.c
index 92425f0521a..b53992052c0 100644
--- a/tests/cunit/test_async_1d.c
+++ b/tests/cunit/test_async_1d.c
@@ -42,114 +42,114 @@
/* Run async tests. */
int main(int argc, char **argv)
{
-#ifdef USE_NETCDF4
- int my_rank; /* Zero-based rank of processor. */
- int ntasks; /* Number of processors involved in current execution. */
- int iosysid; /* The ID for the parallel I/O system. */
- int num_procs_per_comp[COMPONENT_COUNT] = {3};
- /* int num_flavors; /\* Number of PIO netCDF flavors in this build. *\/ */
- /* int flavor[NUM_FLAVORS]; /\* iotypes for the supported netCDF IO flavors. *\/ */
- int ret; /* Return code. */
-
- /* Initialize MPI. */
- if ((ret = MPI_Init(&argc, &argv)))
- MPIERR(ret);
-
- /* Learn my rank and the total number of processors. */
- if ((ret = MPI_Comm_rank(MPI_COMM_WORLD, &my_rank)))
- MPIERR(ret);
- if ((ret = MPI_Comm_size(MPI_COMM_WORLD, &ntasks)))
- MPIERR(ret);
-
- /* Make sure we have 4 tasks. */
- if (ntasks != TARGET_NTASKS) ERR(ERR_WRONG);
-
- /* PIOc_set_log_level(4); */
-
- /* Change error handling so we can test inval parameters. */
- if ((ret = PIOc_set_iosystem_error_handling(PIO_DEFAULT, PIO_RETURN_ERROR, NULL)))
- return ret;
-
- /* Set up IO system. Task 0 will do IO, tasks 1-3 will be a single
- * computational unit. Task 0 will stay in this function until the
- * computational component calls PIOc_finalize(). */
- if ((ret = PIOc_init_async(MPI_COMM_WORLD, NUM_IO_TASKS, NULL, COMPONENT_COUNT,
- num_procs_per_comp, NULL, NULL, NULL,
- PIO_REARR_BOX, &iosysid)))
- ERR(ret);
-
- /* Only computational processors run this code. */
- if (my_rank)
- {
- int ncid;
- int iotype = PIO_IOTYPE_NETCDF4C;
- int dimid[NDIM2];
- int gdimlen[NDIM1] = {DIM_LEN_1};
- PIO_Offset compmap[MAPLEN];
- int varid;
- int data;
- int data_in;
- int ioid;
-
- /* Create a file. */
- if ((ret = PIOc_createfile(iosysid, &ncid, &iotype, FILE_NAME, 0)))
- ERR(ret);
- if ((ret = PIOc_def_dim(ncid, DIM_NAME_0, PIO_UNLIMITED, &dimid[0])))
- ERR(ret);
- if ((ret = PIOc_def_dim(ncid, DIM_NAME_1, DIM_LEN_1, &dimid[1])))
- ERR(ret);
- if ((ret = PIOc_def_var(ncid, VAR_NAME, PIO_INT, NDIM2, dimid, &varid)))
- ERR(ret);
- if ((ret = PIOc_def_var_fill(ncid, varid, PIO_NOFILL, NULL)))
- ERR(ret);
- if ((ret = PIOc_enddef(ncid)))
- ERR(ret);
-
- /* Set up a decomposition. Each of the 3 computational procs
- * will write one value, to get the 3-values of each
- * record. */
- compmap[0] = my_rank - 1;
- if ((ret = PIOc_init_decomp(iosysid, PIO_INT, NDIM1, gdimlen, MAPLEN,
- compmap, &ioid, PIO_REARR_BOX, NULL, NULL)))
- ERR(ret);
-
- /* Write a record of data. */
- data = my_rank;
- if ((ret = PIOc_setframe(ncid, 0, 0)))
- ERR(ret);
- if ((ret = PIOc_write_darray(ncid, 0, ioid, MAPLEN, &data, NULL)))
- ERR(ret);
-
- /* Close the file. */
- if ((ret = PIOc_closefile(ncid)))
- ERR(ret);
-
- /* Reopen the file and check. */
- if ((ret = PIOc_openfile(iosysid, &ncid, &iotype, FILE_NAME, 0)))
- ERR(ret);
-
- /* Read the data. */
- if ((ret = PIOc_setframe(ncid, 0, 0)))
- ERR(ret);
- if ((ret = PIOc_read_darray(ncid, 0, ioid, MAPLEN, &data_in)))
- ERR(ret);
- if (data_in != data) ERR(ERR_WRONG);
-
- /* Close the file. */
- if ((ret = PIOc_closefile(ncid)))
- ERR(ret);
-
- /* Free the decomposition. */
- if ((ret = PIOc_freedecomp(iosysid, ioid)))
- ERR(ret);
-
- /* Shut down the IO system. */
- if ((ret = PIOc_finalize(iosysid)))
- ERR(ret);
- }
-
- printf("%d %s SUCCESS!!\n", my_rank, TEST_NAME);
-#endif /* USE_NETCDF4 */
+#ifdef _NETCDF4
+ /* int my_rank; /\* Zero-based rank of processor. *\/ */
+ /* int ntasks; /\* Number of processors involved in current execution. *\/ */
+ /* int iosysid; /\* The ID for the parallel I/O system. *\/ */
+ /* int num_procs_per_comp[COMPONENT_COUNT] = {3}; */
+ /* /\* int num_flavors; /\\* Number of PIO netCDF flavors in this build. *\\/ *\/ */
+ /* /\* int flavor[NUM_FLAVORS]; /\\* iotypes for the supported netCDF IO flavors. *\\/ *\/ */
+ /* int ret; /\* Return code. *\/ */
+
+ /* /\* Initialize MPI. *\/ */
+ /* if ((ret = MPI_Init(&argc, &argv))) */
+ /* MPIERR(ret); */
+
+ /* /\* Learn my rank and the total number of processors. *\/ */
+ /* if ((ret = MPI_Comm_rank(MPI_COMM_WORLD, &my_rank))) */
+ /* MPIERR(ret); */
+ /* if ((ret = MPI_Comm_size(MPI_COMM_WORLD, &ntasks))) */
+ /* MPIERR(ret); */
+
+ /* /\* Make sure we have 4 tasks. *\/ */
+ /* if (ntasks != TARGET_NTASKS) ERR(ERR_WRONG); */
+
+ /* /\* PIOc_set_log_level(4); *\/ */
+
+ /* /\* Change error handling so we can test inval parameters. *\/ */
+ /* if ((ret = PIOc_set_iosystem_error_handling(PIO_DEFAULT, PIO_RETURN_ERROR, NULL))) */
+ /* return ret; */
+
+ /* /\* Set up IO system. Task 0 will do IO, tasks 1-3 will be a single */
+ /* * computational unit. Task 0 will stay in this function until the */
+ /* * computational component calls PIOc_finalize(). *\/ */
+ /* if ((ret = PIOc_init_async(MPI_COMM_WORLD, NUM_IO_TASKS, NULL, COMPONENT_COUNT, */
+ /* num_procs_per_comp, NULL, NULL, NULL, */
+ /* PIO_REARR_BOX, &iosysid))) */
+ /* AERR(ret); */
+
+ /* /\* Only computational processors run this code. *\/ */
+ /* if (my_rank) */
+ /* { */
+ /* int ncid; */
+ /* int iotype = PIO_IOTYPE_NETCDF4C; */
+ /* int dimid[NDIM2]; */
+ /* int gdimlen[NDIM1] = {DIM_LEN_1}; */
+ /* PIO_Offset compmap[MAPLEN]; */
+ /* int varid; */
+ /* int data; */
+ /* int data_in; */
+ /* int ioid; */
+
+ /* /\* Create a file. *\/ */
+ /* if ((ret = PIOc_createfile(iosysid, &ncid, &iotype, FILE_NAME, 0))) */
+ /* AERR(ret); */
+ /* if ((ret = PIOc_def_dim(ncid, DIM_NAME_0, PIO_UNLIMITED, &dimid[0]))) */
+ /* AERR(ret); */
+ /* if ((ret = PIOc_def_dim(ncid, DIM_NAME_1, DIM_LEN_1, &dimid[1]))) */
+ /* AERR(ret); */
+ /* if ((ret = PIOc_def_var(ncid, VAR_NAME, PIO_INT, NDIM2, dimid, &varid))) */
+ /* AERR(ret); */
+ /* if ((ret = PIOc_def_var_fill(ncid, varid, PIO_NOFILL, NULL))) */
+ /* AERR(ret); */
+ /* if ((ret = PIOc_enddef(ncid))) */
+ /* AERR(ret); */
+
+ /* /\* Set up a decomposition. Each of the 3 computational procs */
+ /* * will write one value, to get the 3-values of each */
+ /* * record. *\/ */
+ /* compmap[0] = my_rank - 1; */
+ /* if ((ret = PIOc_init_decomp(iosysid, PIO_INT, NDIM1, gdimlen, MAPLEN, */
+ /* compmap, &ioid, PIO_REARR_BOX, NULL, NULL))) */
+ /* AERR(ret); */
+
+ /* /\* Write a record of data. *\/ */
+ /* data = my_rank; */
+ /* if ((ret = PIOc_setframe(ncid, 0, 0))) */
+ /* AERR(ret); */
+ /* if ((ret = PIOc_write_darray(ncid, 0, ioid, MAPLEN, &data, NULL))) */
+ /* AERR(ret); */
+
+ /* /\* Close the file. *\/ */
+ /* if ((ret = PIOc_closefile(ncid))) */
+ /* AERR(ret); */
+
+ /* /\* Reopen the file and check. *\/ */
+ /* if ((ret = PIOc_openfile(iosysid, &ncid, &iotype, FILE_NAME, 0))) */
+ /* AERR(ret); */
+
+ /* /\* Read the data. *\/ */
+ /* /\* if ((ret = PIOc_setframe(ncid, 0, 0))) *\/ */
+ /* /\* AERR(ret); *\/ */
+ /* /\* if ((ret = PIOc_read_darray(ncid, 0, ioid, MAPLEN, &data_in))) *\/ */
+ /* /\* AERR(ret); *\/ */
+ /* /\* if (data_in != data) ERR(ERR_WRONG); *\/ */
+
+ /* /\* Close the file. *\/ */
+ /* if ((ret = PIOc_closefile(ncid))) */
+ /* AERR(ret); */
+
+ /* /\* Free the decomposition. *\/ */
+ /* if ((ret = PIOc_freedecomp(iosysid, ioid))) */
+ /* AERR(ret); */
+
+ /* /\* Shut down the IO system. *\/ */
+ /* if ((ret = PIOc_finalize(iosysid))) */
+ /* ERR(ret); */
+ /* } */
+
+ /* printf("%d %s SUCCESS!!\n", my_rank, TEST_NAME); */
+#endif /* _NETCDF4 */
return 0;
}
diff --git a/tests/cunit/test_async_3proc.c b/tests/cunit/test_async_3proc.c
index ea89084c5ca..94ca4e13588 100644
--- a/tests/cunit/test_async_3proc.c
+++ b/tests/cunit/test_async_3proc.c
@@ -85,11 +85,11 @@ int main(int argc, char **argv)
/* Create sample file. */
if ((ret = create_nc_sample(sample, iosysid[my_comp_idx], flavor[flv], filename, my_rank, NULL)))
- ERR(ret);
+ AERR2(ret, iosysid[my_comp_idx]);
/* Check the file for correctness. */
if ((ret = check_nc_sample(sample, iosysid[my_comp_idx], flavor[flv], filename, my_rank, NULL)))
- ERR(ret);
+ AERR2(ret, iosysid[my_comp_idx]);
}
} /* next netcdf flavor */
diff --git a/tests/cunit/test_async_4proc.c b/tests/cunit/test_async_4proc.c
index 20395a74f5b..a4a04e34fe4 100644
--- a/tests/cunit/test_async_4proc.c
+++ b/tests/cunit/test_async_4proc.c
@@ -22,6 +22,10 @@
/* Number of computational components to create. */
#define COMPONENT_COUNT 1
+/* Number of rearrangers to test. */
+#define NUM_REARRANGERS 2
+int rearranger[NUM_REARRANGERS] = {PIO_REARR_BOX, PIO_REARR_SUBSET};
+
/* Run async tests. */
int main(int argc, char **argv)
{
@@ -52,54 +56,58 @@ int main(int argc, char **argv)
if ((ret = get_iotypes(&num_flavors, flavor)))
ERR(ret);
- for (int combo = 0; combo < NUM_COMBOS; combo++)
+ for(int rearr=0; rearr
#include
-/* The number of tasks this test should run on. */
-#define TARGET_NTASKS 4
-
-/* The minimum number of tasks this test should run on. */
-#define MIN_NTASKS 1
-
/* The name of this test. */
#define TEST_NAME "test_async_perf"
@@ -36,15 +33,20 @@
#define LON_LEN 3
/* The length of our sample data along each dimension. */
-#define X_DIM_LEN 128
-#define Y_DIM_LEN 128
-#define Z_DIM_LEN 32
-/* #define X_DIM_LEN 1024 */
-/* #define Y_DIM_LEN 1024 */
+/* #define X_DIM_LEN 128 */
+/* #define Y_DIM_LEN 128 */
+/* #define Z_DIM_LEN 32 */
+
+#define X_DIM_LEN 512
+#define Y_DIM_LEN 512
+#define Z_DIM_LEN 124
/* #define Z_DIM_LEN 256 */
/* The number of timesteps of data to write. */
-#define NUM_TIMESTEPS 3
+#define NUM_TIMESTEPS 10
+
+/* Run test for each of the rearrangers. */
+#define NUM_REARRANGERS_TO_TEST 2
/* Name of record test var. */
#define REC_VAR_NAME "Duncan_McCloud_of_the_clan_McCloud"
@@ -82,7 +84,7 @@ int test_event[2][TEST_NUM_EVENTS];
* @returns 0 for success, error code otherwise.
**/
int
-create_decomposition_3d(int ntasks, int my_rank, int iosysid, int *ioid,
+create_decomposition_3d(int ntasks, int my_rank, int rearr, int iosysid, int *ioid,
PIO_Offset *elements_per_pe)
{
PIO_Offset my_elem_per_pe; /* Array elements per processing unit. */
@@ -108,10 +110,11 @@ create_decomposition_3d(int ntasks, int my_rank, int iosysid, int *ioid,
for (int i = 0; i < my_elem_per_pe; i++)
compdof[i] = my_proc_rank * my_elem_per_pe + i;
+ if(rearr==PIO_REARR_SUBSET) PIOc_set_global_log_level(iosysid, 2);
/* Create the PIO decomposition for this test. */
if ((ret = PIOc_init_decomp(iosysid, PIO_INT, NDIM3, dim_len_3d, my_elem_per_pe,
- compdof, ioid, 0, NULL, NULL)))
- ERR(ret);
+ compdof, ioid, rearr, NULL, NULL)))
+ AERR(ret);
/* Free the mapping. */
free(compdof);
@@ -130,7 +133,7 @@ create_decomposition_3d(int ntasks, int my_rank, int iosysid, int *ioid,
/* Run a simple test using darrays with async. */
int
run_darray_async_test(int iosysid, int fmt, int my_rank, int ntasks, int niotasks,
- MPI_Comm test_comm, MPI_Comm comp_comm, int *flavor, int piotype)
+ MPI_Comm test_comm, MPI_Comm comp_comm, int *flavor, int piotype, int rearr)
{
int ioid3;
int dim_len[NDIM4] = {NC_UNLIMITED, X_DIM_LEN, Y_DIM_LEN, Z_DIM_LEN};
@@ -138,10 +141,10 @@ run_darray_async_test(int iosysid, int fmt, int my_rank, int ntasks, int niotask
char decomp_filename[PIO_MAX_NAME + 1];
int ret;
- sprintf(decomp_filename, "decomp_rdat_%s_.nc", TEST_NAME);
+ sprintf(decomp_filename, "decomp_rdat_%s_%d.nc", TEST_NAME, rearr);
/* Decompose the data over the tasks. */
- if ((ret = create_decomposition_3d(ntasks - niotasks, my_rank, iosysid, &ioid3,
+ if ((ret = create_decomposition_3d(ntasks - niotasks, my_rank, rearr, iosysid, &ioid3,
&elements_per_pe2)))
return ret;
@@ -172,6 +175,7 @@ run_darray_async_test(int iosysid, int fmt, int my_rank, int ntasks, int niotask
NC_CLOBBER)))
PBAIL(ret);
+
#ifdef USE_MPE
{
char msg[MPE_MAX_MSG_LEN + 1];
@@ -193,6 +197,14 @@ run_darray_async_test(int iosysid, int fmt, int my_rank, int ntasks, int niotask
if ((ret = PIOc_def_var(ncid, REC_VAR_NAME, piotype, NDIM4, dimid, &varid)))
PBAIL(ret);
+ /* NetCDF/HDF5 files benefit from having chunksize set. */
+ if (flavor[fmt] == PIO_IOTYPE_NETCDF4P || flavor[fmt] == PIO_IOTYPE_NETCDF4C)
+ {
+ PIO_Offset chunksizes[NDIM4] = {NUM_TIMESTEPS / 2, X_DIM_LEN / 4, Y_DIM_LEN / 4, Z_DIM_LEN};
+ if ((ret = PIOc_def_var_chunking(ncid, varid, NC_CHUNKED, chunksizes)))
+ ERR(ret);
+ }
+
/* End define mode. */
if ((ret = PIOc_enddef(ncid)))
PBAIL(ret);
@@ -207,6 +219,7 @@ run_darray_async_test(int iosysid, int fmt, int my_rank, int ntasks, int niotask
if ((ret = PIOc_setframe(ncid, varid, t)))
PBAIL(ret);
+
/* Write some data to the record vars. */
if ((ret = PIOc_write_darray(ncid, varid, ioid3, elements_per_pe2,
my_data_int, NULL)))
@@ -265,14 +278,15 @@ int main(int argc, char **argv)
MPI_Comm comp_comm[COMPONENT_COUNT]; /* Will get duplicates of computation communicators. */
int num_io_procs[MAX_IO_TESTS] = {1, 4, 16, 64, 128}; /* Number of processors that will do IO. */
int num_io_tests; /* How many different num IO procs to try? */
+ int rearranger[NUM_REARRANGERS_TO_TEST] = {PIO_REARR_BOX, PIO_REARR_SUBSET};
int mpierr;
int fmt, niotest;
+ int r;
int ret; /* Return code. */
/* Initialize test. */
if ((ret = pio_test_init2(argc, argv, &my_rank, &ntasks, 1, 0, -1, &test_comm)))
ERR(ERR_INIT);
-
#ifdef USE_MPE
/* If --enable-mpe was specified at configure, start MPE
* logging. */
@@ -299,86 +313,94 @@ int main(int argc, char **argv)
num_io_tests = 5;
if (!my_rank)
- printf("ntasks\tnio\trearr\tfill\tformat\ttime(s)\tdata size (MB)\t"
+ printf("ntasks, nio,\trearr,\tfill,\tIOTYPE,\ttime(s),\tdata size(MB),\t"
"performance(MB/s)\n");
for (niotest = 0; niotest < num_io_tests; niotest++)
{
num_computation_procs = ntasks - num_io_procs[niotest];
- for (fmt = 0; fmt < num_flavors; fmt++)
+ for (r = 0; r < NUM_REARRANGERS_TO_TEST; r++)
{
- struct timeval starttime, endtime;
- long long startt, endt;
- long long delta;
- float num_megabytes;
- float delta_in_sec;
- float mb_per_sec;
+ for (fmt = 0; fmt < num_flavors; fmt++)
+ {
+ struct timeval starttime, endtime;
+ long long startt, endt;
+ long long delta;
+ float num_megabytes;
+ float delta_in_sec;
+ float mb_per_sec;
+ char flavorname[PIO_MAX_NAME + 1];
#ifdef USE_MPE
- test_start_mpe_log(TEST_INIT);
+ test_start_mpe_log(TEST_INIT);
#endif /* USE_MPE */
- /* Start the clock. */
- if (!my_rank)
- {
- gettimeofday(&starttime, NULL);
- startt = (1000000 * starttime.tv_sec) + starttime.tv_usec;
- }
-
- if ((ret = PIOc_init_async(test_comm, num_io_procs[niotest], NULL, COMPONENT_COUNT,
- &num_computation_procs, NULL, &io_comm, comp_comm,
- PIO_REARR_BOX, &iosysid)))
- ERR(ERR_INIT);
+ /* Get name of this IOTYPE. */
+ if ((ret = get_iotype_name(flavor[fmt], flavorname)))
+ ERR(ret);
+
+ /* Start the clock. */
+ if (!my_rank)
+ {
+ gettimeofday(&starttime, NULL);
+ startt = (1000000 * starttime.tv_sec) + starttime.tv_usec;
+ }
+ if ((ret = PIOc_init_async(test_comm, num_io_procs[niotest], NULL, COMPONENT_COUNT,
+ &num_computation_procs, NULL, &io_comm, comp_comm,
+ rearranger[r], &iosysid)))
+ ERR(ERR_INIT);
#ifdef USE_MPE
- {
- char msg[MPE_MAX_MSG_LEN + 1];
- sprintf(msg, "num IO procs %d", num_io_procs[niotest]);
- test_stop_mpe_log(TEST_INIT, msg);
- }
+ {
+ char msg[MPE_MAX_MSG_LEN + 1];
+ sprintf(msg, "num IO procs %d", num_io_procs[niotest]);
+ test_stop_mpe_log(TEST_INIT, msg);
+ }
#endif /* USE_MPE */
- /* This code runs only on computation components. */
- if (my_rank >= num_io_procs[niotest])
- {
- /* Run the simple darray async test. */
- if ((ret = run_darray_async_test(iosysid, fmt, my_rank, ntasks, num_io_procs[niotest],
- test_comm, comp_comm[0], flavor, PIO_INT)))
- return ret;
-
- /* Finalize PIO system. */
- if ((ret = PIOc_free_iosystem(iosysid)))
- return ret;
-
- /* Free the computation conomponent communicator. */
- if ((mpierr = MPI_Comm_free(comp_comm)))
- MPIERR(mpierr);
- }
- else
- {
- /* Free the IO communicator. */
- if ((mpierr = MPI_Comm_free(&io_comm)))
- MPIERR(mpierr);
- }
-
- if (!my_rank)
- {
- /* Stop the clock. */
- gettimeofday(&endtime, NULL);
-
- /* Compute the time delta */
- endt = (1000000 * endtime.tv_sec) + endtime.tv_usec;
- delta = (endt - startt)/NUM_TIMESTEPS;
- delta_in_sec = (float)delta / 1000000;
- num_megabytes = (X_DIM_LEN * Y_DIM_LEN * Z_DIM_LEN * (long long int) NUM_TIMESTEPS *
- sizeof(int))/(1024*1024);
- mb_per_sec = num_megabytes / delta_in_sec;
- printf("%d\t%d\t%d\t%d\t%d\t%8.3f\t%8.1f\t%8.3f\n", ntasks, num_io_procs[niotest],
- 1, 0, fmt, delta_in_sec, num_megabytes, mb_per_sec);
- }
-
- } /* next fmt */
+ /* This code runs only on computation components. */
+ if (my_rank >= num_io_procs[niotest])
+ {
+ /* Run the simple darray async test. */
+ if ((ret = run_darray_async_test(iosysid, fmt, my_rank, ntasks, num_io_procs[niotest],
+ test_comm, comp_comm[0], flavor, PIO_INT, rearranger[r])))
+ return ret;
+
+ /* Finalize PIO system. */
+ if ((ret = PIOc_free_iosystem(iosysid)))
+ return ret;
+
+ /* Free the computation conomponent communicator. */
+ if ((mpierr = MPI_Comm_free(comp_comm)))
+ MPIERR(mpierr);
+ }
+ else
+ {
+ /* Free the IO communicator. */
+ if ((mpierr = MPI_Comm_free(&io_comm)))
+ MPIERR(mpierr);
+ }
+
+ if (!my_rank)
+ {
+ /* Stop the clock. */
+ gettimeofday(&endtime, NULL);
+
+ /* Compute the time delta */
+ endt = (1000000 * endtime.tv_sec) + endtime.tv_usec;
+ delta = (endt - startt);
+ delta_in_sec = (float)delta / 1000000;
+ num_megabytes = (X_DIM_LEN * Y_DIM_LEN * Z_DIM_LEN * (long long int) NUM_TIMESTEPS *
+ sizeof(int))/(MILLION);
+ mb_per_sec = num_megabytes / delta_in_sec;
+ printf("%d, %d,\t%s,\t%s,\t%s,\t%8.3f,\t%8.1f,\t%8.3f\n", ntasks, num_io_procs[niotest],
+ (rearranger[r] == 1 ? "box" : "subset"), (0 ? "fill" : "nofill"),
+ flavorname, delta_in_sec, num_megabytes, mb_per_sec);
+ }
+
+ } /* next fmt */
+ } /* next rearranger */
} /* next niotest */
/* printf("%d %s SUCCESS!!\n", my_rank, TEST_NAME); */
diff --git a/tests/cunit/test_async_simple.c b/tests/cunit/test_async_simple.c
index 1ae6c6b0c0e..43cedd756f4 100644
--- a/tests/cunit/test_async_simple.c
+++ b/tests/cunit/test_async_simple.c
@@ -100,11 +100,11 @@ int main(int argc, char **argv)
/* Create sample file. */
if ((ret = create_nc_sample(sample, iosysid[my_comp_idx], flavor[flv], filename, my_rank, NULL)))
- ERR(ret);
+ AERR2(ret, iosysid[my_comp_idx]);
/* Check the file for correctness. */
if ((ret = check_nc_sample(sample, iosysid[my_comp_idx], flavor[flv], filename, my_rank, NULL)))
- ERR(ret);
+ AERR2(ret, iosysid[my_comp_idx]);
}
} /* next netcdf flavor */
diff --git a/tests/cunit/test_darray_2sync.c b/tests/cunit/test_darray_2sync.c
index 406bd7685d1..657bbdd7036 100644
--- a/tests/cunit/test_darray_2sync.c
+++ b/tests/cunit/test_darray_2sync.c
@@ -31,7 +31,7 @@
#ifdef _NETCDF4
#define MAX_NUM_TYPES 11
int test_type[MAX_NUM_TYPES] = {PIO_BYTE, PIO_CHAR, PIO_SHORT, PIO_INT, PIO_FLOAT, PIO_DOUBLE,
- PIO_UBYTE, PIO_USHORT, PIO_UINT, PIO_INT64, PIO_UINT64};
+ PIO_UBYTE, PIO_USHORT, PIO_UINT, PIO_INT64, PIO_UINT64};
#else
#define MAX_NUM_TYPES 6
int test_type[MAX_NUM_TYPES] = {PIO_BYTE, PIO_CHAR, PIO_SHORT, PIO_INT, PIO_FLOAT, PIO_DOUBLE};
@@ -72,7 +72,7 @@ int darray_fill_test(int iosysid, int my_rank, int num_iotypes, int *iotype,
long long default_fill_int64 = PIO_FILL_INT64;
unsigned long long default_fill_uint64 = PIO_FILL_UINT64;
#endif /* _NETCDF4 */
-
+
/* Some incorrect fill values. */
signed char wrong_fill_byte = TEST_VAL_42;
unsigned char wrong_fill_char = TEST_VAL_42;
@@ -179,23 +179,23 @@ int darray_fill_test(int iosysid, int my_rank, int num_iotypes, int *iotype,
/* Create the test file. */
if ((ret = PIOc_createfile(iosysid, &ncid, &iotype[iot], filename, PIO_CLOBBER)))
- ERR(ret);
+ AERR(ret);
/* Define a dimension. */
if ((ret = PIOc_def_dim(ncid, DIM_NAME, DIM_LEN, &dimid)))
- ERR(ret);
+ AERR(ret);
/* Define a 1D var. */
if ((ret = PIOc_def_var(ncid, VAR_NAME, test_type[t], NDIM1, &dimid, &varid)))
- ERR(ret);
+ AERR(ret);
/* Turn on fill mode for this var. */
if ((ret = PIOc_def_var_fill(ncid, varid, 0, default_fillvalue)))
- ERR(ret);
+ AERR(ret);
/* End define mode. */
if ((ret = PIOc_enddef(ncid)))
- ERR(ret);
+ AERR(ret);
/* Create the PIO decomposition for this test. */
int elements_per_pe = LEN2;
@@ -218,30 +218,30 @@ int darray_fill_test(int iosysid, int my_rank, int num_iotypes, int *iotype,
* decomposition uses the fill value. */
if ((ret = PIOc_init_decomp(iosysid, test_type[t], NDIM1, &gdimlen, elements_per_pe,
compdof, &ioid, PIO_REARR_BOX, NULL, NULL)))
- ERR(ret);
+ AERR(ret);
/* Set the record number for the unlimited dimension. */
if ((ret = PIOc_setframe(ncid, varid, 0)))
- ERR(ret);
+ AERR(ret);
/* This should not work, because fill value is
* incorrect. (Test turned off until Fortran API/tests are
* fixed.) */
if (PIOc_write_darray(ncid, varid, ioid, LEN2, test_data, wrong_fillvalue) != PIO_EINVAL)
ERR(ERR_WRONG);
-
+
/* Write the data. There are 3 procs with data, each writes 2
* values. */
if ((ret = PIOc_write_darray(ncid, varid, ioid, LEN2, test_data, default_fillvalue)))
- ERR(ret);
+ AERR(ret);
/* Close the test file. */
if ((ret = PIOc_closefile(ncid)))
- ERR(ret);
+ AERR(ret);
/* Free decomposition. */
if ((ret = PIOc_freedecomp(iosysid, ioid)))
- ERR(ret);
+ AERR(ret);
/* Check the file. */
{
@@ -249,7 +249,7 @@ int darray_fill_test(int iosysid, int my_rank, int num_iotypes, int *iotype,
/* Reopen the file. */
if ((ret = PIOc_openfile2(iosysid, &ncid2, &iotype[iot], filename, PIO_NOWRITE)))
- ERR(ret);
+ AERR(ret);
/* Read the data. */
switch(test_type[t])
@@ -258,10 +258,10 @@ int darray_fill_test(int iosysid, int my_rank, int num_iotypes, int *iotype,
{
signed char data_in[elements_per_pe * NUM_COMPUTATION_PROCS];
if ((ret = PIOc_get_var_schar(ncid2, 0, data_in)))
- ERR(ret);
+ AERR(ret);
if (my_rank && data_in[0] != 1 && data_in[1] != -1 && data_in[2] != 2 &&
data_in[3] != -2 && data_in[4] != 3 && data_in[5] != -3)
- ERR(ret);
+ AERR(ERR_WRONG);
}
break;
case PIO_CHAR:
@@ -270,40 +270,40 @@ int darray_fill_test(int iosysid, int my_rank, int num_iotypes, int *iotype,
{
short data_in[elements_per_pe * NUM_COMPUTATION_PROCS];
if ((ret = PIOc_get_var_short(ncid2, 0, data_in)))
- ERR(ret);
+ AERR(ret);
if (my_rank && data_in[0] != 1 && data_in[1] != -1 && data_in[2] != 2 &&
data_in[3] != -2 && data_in[4] != 3 && data_in[5] != -3)
- ERR(ret);
+ AERR(ERR_WRONG);
}
break;
case PIO_INT:
{
int data_in[elements_per_pe * NUM_COMPUTATION_PROCS];
if ((ret = PIOc_get_var_int(ncid2, 0, data_in)))
- ERR(ret);
+ AERR(ret);
if (my_rank && data_in[0] != 1 && data_in[1] != -1 && data_in[2] != 2 &&
data_in[3] != -2 && data_in[4] != 3 && data_in[5] != -3)
- ERR(ret);
+ AERR(ERR_WRONG);
}
break;
case PIO_FLOAT:
{
float data_in[elements_per_pe * NUM_COMPUTATION_PROCS];
if ((ret = PIOc_get_var_float(ncid2, 0, data_in)))
- ERR(ret);
+ AERR(ret);
if (my_rank && data_in[0] != 1 && data_in[1] != -1 && data_in[2] != 2 &&
data_in[3] != -2 && data_in[4] != 3 && data_in[5] != -3)
- ERR(ret);
+ AERR(ERR_WRONG);
}
break;
case PIO_DOUBLE:
{
double data_in[elements_per_pe * NUM_COMPUTATION_PROCS];
if ((ret = PIOc_get_var_double(ncid2, 0, data_in)))
- ERR(ret);
+ AERR(ret);
if (my_rank && data_in[0] != 1 && data_in[1] != -1 && data_in[2] != 2 &&
data_in[3] != -2 && data_in[4] != 3 && data_in[5] != -3)
- ERR(ret);
+ AERR(ERR_WRONG);
}
break;
#ifdef _NETCDF4
@@ -311,50 +311,50 @@ int darray_fill_test(int iosysid, int my_rank, int num_iotypes, int *iotype,
{
unsigned char data_in[elements_per_pe * NUM_COMPUTATION_PROCS];
if ((ret = PIOc_get_var_uchar(ncid2, 0, data_in)))
- ERR(ret);
+ AERR(ret);
if (my_rank && data_in[0] != 1 && data_in[1] != -1 && data_in[2] != 2 &&
data_in[3] != -2 && data_in[4] != 3 && data_in[5] != -3)
- ERR(ret);
+ AERR(ERR_WRONG);
}
break;
case PIO_USHORT:
{
unsigned short data_in[elements_per_pe * NUM_COMPUTATION_PROCS];
if ((ret = PIOc_get_var_ushort(ncid2, 0, data_in)))
- ERR(ret);
+ AERR(ret);
if (my_rank && data_in[0] != 1 && data_in[1] != -1 && data_in[2] != 2 &&
data_in[3] != -2 && data_in[4] != 3 && data_in[5] != -3)
- ERR(ret);
+ AERR(ERR_WRONG);
}
break;
case PIO_UINT:
{
unsigned int data_in[elements_per_pe * NUM_COMPUTATION_PROCS];
if ((ret = PIOc_get_var_uint(ncid2, 0, data_in)))
- ERR(ret);
+ AERR(ret);
if (my_rank && data_in[0] != 1 && data_in[1] != -1 && data_in[2] != 2 &&
data_in[3] != -2 && data_in[4] != 3 && data_in[5] != -3)
- ERR(ret);
+ AERR(ERR_WRONG);
}
break;
case PIO_INT64:
{
long long data_in[elements_per_pe * NUM_COMPUTATION_PROCS];
if ((ret = PIOc_get_var_longlong(ncid2, 0, data_in)))
- ERR(ret);
+ AERR(ret);
if (my_rank && data_in[0] != 1 && data_in[1] != -1 && data_in[2] != 2 &&
data_in[3] != -2 && data_in[4] != 3 && data_in[5] != -3)
- ERR(ret);
+ AERR(ERR_WRONG);
}
break;
case PIO_UINT64:
{
unsigned long long data_in[elements_per_pe * NUM_COMPUTATION_PROCS];
if ((ret = PIOc_get_var_ulonglong(ncid2, 0, data_in)))
- ERR(ret);
+ AERR(ret);
if (my_rank && data_in[0] != 1 && data_in[1] != -1 && data_in[2] != 2 &&
data_in[3] != -2 && data_in[4] != 3 && data_in[5] != -3)
- ERR(ret);
+ AERR(ERR_WRONG);
}
break;
#endif /* _NETCDF4 */
@@ -362,7 +362,7 @@ int darray_fill_test(int iosysid, int my_rank, int num_iotypes, int *iotype,
/* Close the test file. */
if ((ret = PIOc_closefile(ncid2)))
- ERR(ret);
+ AERR(ret);
} /* finish checking file */
} /* next type */
} /* next iotype */
@@ -391,19 +391,19 @@ int darray_simple_test(int iosysid, int my_rank, int num_iotypes, int *iotype,
/* Create the test file. */
if ((ret = PIOc_createfile(iosysid, &ncid, &iotype[iot], filename, PIO_CLOBBER)))
- ERR(ret);
+ AERR(ret);
/* Define a dimension. */
if ((ret = PIOc_def_dim(ncid, DIM_NAME, DIM_LEN, &dimid)))
- ERR(ret);
+ AERR(ret);
/* Define a 1D var. */
if ((ret = PIOc_def_var(ncid, VAR_NAME, PIO_INT, NDIM1, &dimid, &varid)))
- ERR(ret);
+ AERR(ret);
/* End define mode. */
if ((ret = PIOc_enddef(ncid)))
- ERR(ret);
+ AERR(ret);
/* Create the PIO decomposition for this test. */
int elements_per_pe = 2;
@@ -425,26 +425,26 @@ int darray_simple_test(int iosysid, int my_rank, int num_iotypes, int *iotype,
/* Initialize the decomposition. */
if ((ret = PIOc_init_decomp(iosysid, PIO_INT, NDIM1, &gdimlen, elements_per_pe,
compdof, &ioid, PIO_REARR_BOX, NULL, NULL)))
- ERR(ret);
+ AERR(ret);
/* Set the record number for the unlimited dimension. */
if ((ret = PIOc_setframe(ncid, varid, 0)))
- ERR(ret);
+ AERR(ret);
/* Write the data. There are 3 procs with data, each writes 2
* values. */
int arraylen = 2;
int test_data[2] = {my_rank, -my_rank};
if ((ret = PIOc_write_darray(ncid, varid, ioid, arraylen, test_data, NULL)))
- ERR(ret);
+ AERR(ret);
/* Close the test file. */
if ((ret = PIOc_closefile(ncid)))
- ERR(ret);
+ AERR(ret);
/* Free decomposition. */
if ((ret = PIOc_freedecomp(iosysid, ioid)))
- ERR(ret);
+ AERR(ret);
/* Check the file. */
{
@@ -453,18 +453,18 @@ int darray_simple_test(int iosysid, int my_rank, int num_iotypes, int *iotype,
/* Reopen the file. */
if ((ret = PIOc_openfile2(iosysid, &ncid2, &iotype[iot], filename, PIO_NOWRITE)))
- ERR(ret);
+ AERR(ret);
/* Read the data. */
if ((ret = PIOc_get_var_int(ncid2, 0, data_in)))
- ERR(ret);
+ AERR(ret);
if (my_rank && data_in[0] != 1 && data_in[1] != -1 && data_in[2] != 2 &&
data_in[3] != -2 && data_in[4] != 3 && data_in[5] != -3)
- ERR(ret);
+ AERR(ret);
/* Close the test file. */
if ((ret = PIOc_closefile(ncid2)))
- ERR(ret);
+ AERR(ret);
}
}
@@ -479,11 +479,11 @@ int run_darray_tests(int iosysid, int my_rank, int num_iotypes, int *iotype, int
/* Run the simple darray test. */
if ((ret = darray_simple_test(iosysid, my_rank, num_iotypes, iotype, async)))
- ERR(ret);
+ return ret;
/* Run the darray fill value tests. */
if ((ret = darray_fill_test(iosysid, my_rank, num_iotypes, iotype, async)))
- ERR(ret);
+ return ret;
return PIO_NOERR;
}
@@ -509,7 +509,7 @@ int run_async_tests(MPI_Comm test_comm, int my_rank, int num_iotypes, int *iotyp
{
/* Run the tests. */
if ((ret = run_darray_tests(iosysid, my_rank, num_iotypes, iotype, 1)))
- ERR(ret);
+ return ret;
/* Finalize PIO system. */
if ((ret = PIOc_free_iosystem(iosysid)))
diff --git a/tests/cunit/test_darray_async.c b/tests/cunit/test_darray_async.c
index 6181b21c1ab..5e1905b0232 100644
--- a/tests/cunit/test_darray_async.c
+++ b/tests/cunit/test_darray_async.c
@@ -539,7 +539,7 @@ int main(int argc, char **argv)
/* Run the simple darray async test. */
if ((ret = run_darray_async_test(iosysid, my_rank, test_comm, comp_comm[0], num_flavors,
flavor, test_type[t])))
- return ret;
+ AERR(ret);
/* Finalize PIO system. */
if ((ret = PIOc_free_iosystem (iosysid)))
diff --git a/tests/cunit/test_darray_async_from_comm.c b/tests/cunit/test_darray_async_from_comm.c
new file mode 100644
index 00000000000..870d89181f5
--- /dev/null
+++ b/tests/cunit/test_darray_async_from_comm.c
@@ -0,0 +1,599 @@
+/*
+ * This program tests darrays with async using comms.
+ *
+ * @author Ed Hartnet, Jim Edwards
+ * @date 11/20/20
+ */
+#include
+#include
+#include
+#include
+
+/* The number of tasks this test should run on. */
+#define TARGET_NTASKS 4
+
+/* The minimum number of tasks this test should run on. */
+#define MIN_NTASKS 2
+
+/* The name of this test. */
+#define TEST_NAME "test_darray_async_from_comms"
+
+/* For 1-D use. */
+#define NDIM1 1
+
+/* For 2-D use. */
+#define NDIM2 2
+
+/* For 3-D use. */
+#define NDIM3 3
+
+/* For maplens of 2. */
+#define MAPLEN2 2
+
+/* Lengths of non-unlimited dimensions. */
+#define LAT_LEN 2
+#define LON_LEN 3
+
+/* Number of vars in test file. */
+#define NVAR 4
+
+/* Number of records written for record var. */
+#define NREC 4
+
+/* Name of record test var. */
+#define REC_VAR_NAME "surface_temperature"
+#define REC_VAR_NAME2 "surface_temperature2"
+
+/* Name of non-record test var. */
+#define NOREC_VAR_NAME "surface_height"
+#define NOREC_VAR_NAME2 "surface_height2"
+
+char dim_name[NDIM3][PIO_MAX_NAME + 1] = {"unlim", "lat", "lon"};
+
+/* Length of the dimension. */
+#define LEN3 3
+
+#define NUM_VAR_SETS 2
+
+/* Check the file that was created in this test. */
+int check_darray_file(int iosysid, char *data_filename, int iotype, int my_rank,
+ int piotype)
+{
+ int ncid;
+ int varid[NVAR] = {0, 1, 2, 3};
+ void *data_in = NULL;
+ void *data_in_norec = NULL;
+ PIO_Offset type_size;
+ int ret;
+
+ /* Reopen the file. */
+ if ((ret = PIOc_openfile(iosysid, &ncid, &iotype, data_filename, NC_NOWRITE)))
+ PBAIL(ret);
+
+ /* Get the size of the type. */
+ if ((ret = PIOc_inq_type(ncid, piotype, NULL, &type_size)))
+ PBAIL(ret);
+
+ /* Allocate memory to read data. */
+ if (!(data_in = malloc(LAT_LEN * LON_LEN * type_size * NREC)))
+ PBAIL(PIO_ENOMEM);
+ if (!(data_in_norec = malloc(LAT_LEN * LON_LEN * type_size)))
+ PBAIL(PIO_ENOMEM);
+
+ /* We have two sets of variables, those with unlimted, and those
+ * without unlimited dimension. */
+ for (int vs = 0; vs < NUM_VAR_SETS; vs++)
+ {
+ int rec_varid = vs ? varid[0] : varid[1];
+ int norec_varid = vs ? varid[2] : varid[3];
+
+ /* Read the record data. The values we expect are: 10, 11, 20, 21, 30,
+ * 31, in each of three records. */
+ if ((ret = PIOc_get_var(ncid, rec_varid, data_in)))
+ PBAIL(ret);
+
+ /* Read the non-record data. The values we expect are: 10, 11, 20, 21, 30,
+ * 31. */
+ if ((ret = PIOc_get_var(ncid, norec_varid, data_in_norec)))
+ PBAIL(ret);
+
+ /* Check the results. */
+ for (int r = 0; r < LAT_LEN * LON_LEN * NREC; r++)
+ {
+ int tmp_r = r % (LAT_LEN * LON_LEN);
+ switch (piotype)
+ {
+ case PIO_BYTE:
+ if (((signed char *)data_in)[r] != (tmp_r/2 + 1) * 10 + tmp_r % 2)
+ PBAIL(ret);
+ break;
+ case PIO_CHAR:
+ if (((char *)data_in)[r] != (tmp_r/2 + 1) * 10 + tmp_r % 2)
+ PBAIL(ret);
+ break;
+ case PIO_SHORT:
+ if (((short *)data_in)[r] != (tmp_r/2 + 1) * 10 + tmp_r % 2)
+ PBAIL(ret);
+ break;
+ case PIO_INT:
+ if (((int *)data_in)[r] != (tmp_r/2 + 1) * 10 + tmp_r % 2)
+ PBAIL(ret);
+ break;
+ case PIO_FLOAT:
+ if (((float *)data_in)[r] != (tmp_r/2 + 1) * 10.0 + tmp_r % 2)
+ PBAIL(ret);
+ break;
+ case PIO_DOUBLE:
+ if (((double *)data_in)[r] != (tmp_r/2 + 1) * 10.0 + tmp_r % 2)
+ PBAIL(ret);
+ break;
+#ifdef _NETCDF4
+ case PIO_UBYTE:
+ if (((unsigned char *)data_in)[r] != (tmp_r/2 + 1) * 10 + tmp_r % 2)
+ PBAIL(ret);
+ break;
+ case PIO_USHORT:
+ if (((unsigned short *)data_in)[r] != (tmp_r/2 + 1) * 10 + tmp_r % 2)
+ PBAIL(ret);
+ break;
+ case PIO_UINT:
+ if (((unsigned int *)data_in)[r] != (tmp_r/2 + 1) * 10 + tmp_r % 2)
+ PBAIL(ret);
+ break;
+ case PIO_INT64:
+ if (((long long *)data_in)[r] != (tmp_r/2 + 1) * 10 + tmp_r % 2)
+ PBAIL(ret);
+ break;
+ case PIO_UINT64:
+ if (((unsigned long long *)data_in)[r] != (tmp_r/2 + 1) * 10 + tmp_r % 2)
+ PBAIL(ret);
+ break;
+#endif /* _NETCDF4 */
+ default:
+ PBAIL(ERR_WRONG);
+ }
+ }
+
+ /* Check the results. */
+ for (int r = 0; r < LAT_LEN * LON_LEN; r++)
+ {
+ switch (piotype)
+ {
+ case PIO_BYTE:
+ if (((signed char *)data_in_norec)[r] != (r/2 + 1) * 20.0 + r%2)
+ PBAIL(ret);
+ break;
+ case PIO_CHAR:
+ if (((char *)data_in_norec)[r] != (r/2 + 1) * 20.0 + r%2)
+ PBAIL(ret);
+ break;
+ case PIO_SHORT:
+ if (((short *)data_in_norec)[r] != (r/2 + 1) * 20.0 + r%2)
+ PBAIL(ret);
+ break;
+ case PIO_INT:
+ if (((int *)data_in_norec)[r] != (r/2 + 1) * 20.0 + r%2)
+ PBAIL(ret);
+ break;
+ case PIO_FLOAT:
+ if (((float *)data_in_norec)[r] != (r/2 + 1) * 20.0 + r%2)
+ PBAIL(ret);
+ break;
+ case PIO_DOUBLE:
+ if (((double *)data_in_norec)[r] != (r/2 + 1) * 20.0 + r%2)
+ PBAIL(ret);
+ break;
+#ifdef _NETCDF4
+ case PIO_UBYTE:
+ if (((unsigned char *)data_in_norec)[r] != (r/2 + 1) * 20.0 + r%2)
+ PBAIL(ret);
+ break;
+ case PIO_USHORT:
+ if (((unsigned short *)data_in_norec)[r] != (r/2 + 1) * 20.0 + r%2)
+ PBAIL(ret);
+ break;
+ case PIO_UINT:
+ if (((unsigned int *)data_in_norec)[r] != (r/2 + 1) * 20.0 + r%2)
+ PBAIL(ret);
+ break;
+ case PIO_INT64:
+ if (((long long *)data_in_norec)[r] != (r/2 + 1) * 20.0 + r%2)
+ PBAIL(ret);
+ break;
+ case PIO_UINT64:
+ if (((unsigned long long *)data_in_norec)[r] != (r/2 + 1) * 20.0 + r%2)
+ PBAIL(ret);
+ break;
+#endif /* _NETCDF4 */
+ default:
+ PBAIL(ERR_WRONG);
+ }
+ }
+ } /* next var set */
+
+ /* Close the file. */
+ if ((ret = PIOc_closefile(ncid)))
+ PBAIL(ret);
+
+exit:
+ /* Free resources. */
+ if (data_in)
+ free(data_in);
+ if (data_in_norec)
+ free(data_in_norec);
+
+ return ret;
+}
+
+/* Run a simple test using darrays with async. */
+int run_darray_async_test(int iosysid, int my_rank, MPI_Comm test_comm, MPI_Comm comp_comm,
+ int num_flavors, int *flavor, int piotype)
+{
+ int ioid;
+ int dim_len[NDIM3] = {NC_UNLIMITED, 2, 3};
+ PIO_Offset elements_per_pe = LAT_LEN;
+ PIO_Offset compdof[LAT_LEN] = {my_rank * 2 - 2, my_rank * 2 - 1};
+ char decomp_filename[PIO_MAX_NAME + 1];
+ void *my_data_multi;
+ int ret;
+
+ sprintf(decomp_filename, "decomp_rdat_%s_.nc", TEST_NAME);
+
+ /* Create the PIO decomposition for this test. */
+ if ((ret = PIOc_init_decomp(iosysid, piotype, NDIM2, &dim_len[1], elements_per_pe,
+ compdof, &ioid, PIO_REARR_BOX, NULL, NULL)))
+ PBAIL(ret);
+
+ /* Write the decomp file (on appropriate tasks). */
+ if ((ret = PIOc_write_nc_decomp(iosysid, decomp_filename, 0, ioid, NULL, NULL, 0)))
+ PBAIL(ret);
+
+ int fortran_order;
+ int ioid2;
+ if ((ret = PIOc_read_nc_decomp(iosysid, decomp_filename, &ioid2, comp_comm,
+ PIO_INT, NULL, NULL, &fortran_order)))
+ PBAIL(ret);
+
+ /* Free the decomposition. */
+ if ((ret = PIOc_freedecomp(iosysid, ioid2)))
+ PBAIL(ret);
+
+ /* Test each available iotype. */
+ for (int fmt = 0; fmt < num_flavors; fmt++)
+ {
+ int ncid;
+ PIO_Offset type_size;
+ int dimid[NDIM3];
+ int varid[NVAR];
+ char data_filename[PIO_MAX_NAME + 1];
+ void *my_data;
+ void *my_data_norec;
+ signed char my_data_byte[LAT_LEN] = {my_rank * 10, my_rank * 10 + 1};
+ char my_data_char[LAT_LEN] = {my_rank * 10, my_rank * 10 + 1};
+ short my_data_short[LAT_LEN] = {my_rank * 10, my_rank * 10 + 1};
+ int my_data_int[LAT_LEN] = {my_rank * 10, my_rank * 10 + 1};
+ float my_data_float[LAT_LEN] = {my_rank * 10, my_rank * 10 + 1};
+ double my_data_double[LAT_LEN] = {my_rank * 10, my_rank * 10 + 1};
+#ifdef _NETCDF4
+ unsigned char my_data_ubyte[LAT_LEN] = {my_rank * 10, my_rank * 10 + 1};
+ unsigned short my_data_ushort[LAT_LEN] = {my_rank * 10, my_rank * 10 + 1};
+ unsigned int my_data_uint[LAT_LEN] = {my_rank * 10, my_rank * 10 + 1};
+ long long my_data_int64[LAT_LEN] = {my_rank * 10, my_rank * 10 + 1};
+ unsigned long long my_data_uint64[LAT_LEN] = {my_rank * 10, my_rank * 10 + 1};
+#endif /* _NETCDF4 */
+ signed char my_data_byte_norec[LAT_LEN] = {my_rank * 20, my_rank * 20 + 1};
+ char my_data_char_norec[LAT_LEN] = {my_rank * 20, my_rank * 20 + 1};
+ short my_data_short_norec[LAT_LEN] = {my_rank * 20, my_rank * 20 + 1};
+ int my_data_int_norec[LAT_LEN] = {my_rank * 20, my_rank * 20 + 1};
+ float my_data_float_norec[LAT_LEN] = {my_rank * 20, my_rank * 20 + 1};
+ double my_data_double_norec[LAT_LEN] = {my_rank * 20, my_rank * 20 + 1};
+#ifdef _NETCDF4
+ unsigned char my_data_ubyte_norec[LAT_LEN] = {my_rank * 20, my_rank * 20 + 1};
+ unsigned short my_data_ushort_norec[LAT_LEN] = {my_rank * 20, my_rank * 20 + 1};
+ unsigned int my_data_uint_norec[LAT_LEN] = {my_rank * 20, my_rank * 20 + 1};
+ long long my_data_int64_norec[LAT_LEN] = {my_rank * 20, my_rank * 20 + 1};
+ unsigned long long my_data_uint64_norec[LAT_LEN] = {my_rank * 20, my_rank * 20 + 1};
+#endif /* _NETCDF4 */
+
+ /* Only netCDF-4 can handle extended types. */
+ if (piotype > PIO_DOUBLE && flavor[fmt] != PIO_IOTYPE_NETCDF4C && flavor[fmt] != PIO_IOTYPE_NETCDF4P)
+ continue;
+
+ /* BYTE and CHAR don't work with pnetcdf. Don't know why yet. */
+ if (flavor[fmt] == PIO_IOTYPE_PNETCDF && (piotype == PIO_BYTE || piotype == PIO_CHAR))
+ continue;
+
+ /* Select the correct data to write, depending on type. */
+ switch (piotype)
+ {
+ case PIO_BYTE:
+ my_data = my_data_byte;
+ my_data_norec = my_data_byte_norec;
+ break;
+ case PIO_CHAR:
+ my_data = my_data_char;
+ my_data_norec = my_data_char_norec;
+ break;
+ case PIO_SHORT:
+ my_data = my_data_short;
+ my_data_norec = my_data_short_norec;
+ break;
+ case PIO_INT:
+ my_data = my_data_int;
+ my_data_norec = my_data_int_norec;
+ break;
+ case PIO_FLOAT:
+ my_data = my_data_float;
+ my_data_norec = my_data_float_norec;
+ break;
+ case PIO_DOUBLE:
+ my_data = my_data_double;
+ my_data_norec = my_data_double_norec;
+ break;
+#ifdef _NETCDF4
+ case PIO_UBYTE:
+ my_data = my_data_ubyte;
+ my_data_norec = my_data_ubyte_norec;
+ break;
+ case PIO_USHORT:
+ my_data = my_data_ushort;
+ my_data_norec = my_data_ushort_norec;
+ break;
+ case PIO_UINT:
+ my_data = my_data_uint;
+ my_data_norec = my_data_uint_norec;
+ break;
+ case PIO_INT64:
+ my_data = my_data_int64;
+ my_data_norec = my_data_int64_norec;
+ break;
+ case PIO_UINT64:
+ my_data = my_data_uint64;
+ my_data_norec = my_data_uint64_norec;
+ break;
+#endif /* _NETCDF4 */
+ default:
+ PBAIL(ERR_WRONG);
+ }
+
+ /* Create sample output file. */
+ sprintf(data_filename, "data_%s_iotype_%d_piotype_%d.nc", TEST_NAME, flavor[fmt],
+ piotype);
+ if ((ret = PIOc_createfile(iosysid, &ncid, &flavor[fmt], data_filename,
+ NC_CLOBBER)))
+ PBAIL(ret);
+
+ /* Find the size of the type. */
+ if ((ret = PIOc_inq_type(ncid, piotype, NULL, &type_size)))
+ PBAIL(ret);
+
+ /* Create the data for the darray_multi call by making two
+ * copies of the data. */
+ if (!(my_data_multi = malloc(2 * type_size * elements_per_pe)))
+ PBAIL(PIO_ENOMEM);
+ memcpy(my_data_multi, my_data, type_size * elements_per_pe);
+ memcpy((char *)my_data_multi + type_size * elements_per_pe, my_data, type_size * elements_per_pe);
+
+ /* Define dimensions. */
+ for (int d = 0; d < NDIM3; d++)
+ if ((ret = PIOc_def_dim(ncid, dim_name[d], dim_len[d], &dimid[d])))
+ PBAIL(ret);
+
+ /* Define variables. */
+ if ((ret = PIOc_def_var(ncid, REC_VAR_NAME, piotype, NDIM3, dimid, &varid[0])))
+ PBAIL(ret);
+ if ((ret = PIOc_def_var(ncid, REC_VAR_NAME2, piotype, NDIM3, dimid, &varid[1])))
+ PBAIL(ret);
+ if ((ret = PIOc_def_var(ncid, NOREC_VAR_NAME, piotype, NDIM2, &dimid[1],
+ &varid[2])))
+ PBAIL(ret);
+ if ((ret = PIOc_def_var(ncid, NOREC_VAR_NAME2, piotype, NDIM2, &dimid[1],
+ &varid[3])))
+ PBAIL(ret);
+
+ /* End define mode. */
+ if ((ret = PIOc_enddef(ncid)))
+ PBAIL(ret);
+
+ /* Set the record number for the record vars. */
+ if ((ret = PIOc_setframe(ncid, varid[0], 0)))
+ PBAIL(ret);
+ if ((ret = PIOc_setframe(ncid, varid[1], 0)))
+ PBAIL(ret);
+
+ /* Write some data to the record vars. */
+ if ((ret = PIOc_write_darray(ncid, varid[0], ioid, elements_per_pe, my_data, NULL)))
+ PBAIL(ret);
+ if ((ret = PIOc_write_darray(ncid, varid[1], ioid, elements_per_pe, my_data, NULL)))
+ PBAIL(ret);
+
+ /* Write some data to the non-record vars. */
+ if ((ret = PIOc_write_darray(ncid, varid[2], ioid, elements_per_pe, my_data_norec, NULL)))
+ PBAIL(ret);
+ if ((ret = PIOc_write_darray(ncid, varid[3], ioid, elements_per_pe, my_data_norec, NULL)))
+ PBAIL(ret);
+
+ /* Sync the file. */
+ if ((ret = PIOc_sync(ncid)))
+ PBAIL(ret);
+
+ /* Increment the record number for the record vars. */
+ if ((ret = PIOc_advanceframe(ncid, varid[0])))
+ PBAIL(ret);
+ if ((ret = PIOc_advanceframe(ncid, varid[1])))
+ PBAIL(ret);
+
+ /* Write another record. */
+ if ((ret = PIOc_write_darray(ncid, varid[0], ioid, elements_per_pe, my_data, NULL)))
+ PBAIL(ret);
+ if ((ret = PIOc_write_darray(ncid, varid[1], ioid, elements_per_pe, my_data, NULL)))
+ PBAIL(ret);
+
+ /* Sync the file. */
+ if ((ret = PIOc_sync(ncid)))
+ PBAIL(ret);
+
+ /* Increment the record number for the record var. */
+ if ((ret = PIOc_advanceframe(ncid, varid[0])))
+ PBAIL(ret);
+ if ((ret = PIOc_advanceframe(ncid, varid[1])))
+ PBAIL(ret);
+
+ /* Write a third record. */
+ if ((ret = PIOc_write_darray(ncid, varid[0], ioid, elements_per_pe, my_data, NULL)))
+ PBAIL(ret);
+ if ((ret = PIOc_write_darray(ncid, varid[1], ioid, elements_per_pe, my_data, NULL)))
+ PBAIL(ret);
+
+ /* Increment the record number for the record var. */
+ if ((ret = PIOc_advanceframe(ncid, varid[0])))
+ PBAIL(ret);
+ if ((ret = PIOc_advanceframe(ncid, varid[1])))
+ PBAIL(ret);
+
+ /* Write a forth record, using darray_multi(). */
+ int frame[2] = {3, 3};
+ if ((ret = PIOc_write_darray_multi(ncid, varid, ioid, 2, elements_per_pe, my_data_multi, frame, NULL, 0)))
+ PBAIL(ret);
+
+ /* Close the file. */
+ if ((ret = PIOc_closefile(ncid)))
+ PBAIL(ret);
+
+ /* Free resources. */
+ free(my_data_multi);
+ my_data_multi = NULL;
+
+ /* Check the file for correctness. */
+ if ((ret = check_darray_file(iosysid, data_filename, PIO_IOTYPE_NETCDF, my_rank, piotype)))
+ PBAIL(ret);
+
+ } /* next iotype */
+
+ /* Free the decomposition. */
+ if ((ret = PIOc_freedecomp(iosysid, ioid)))
+ PBAIL(ret);
+
+exit:
+ if (my_data_multi)
+ free(my_data_multi);
+ return ret;
+}
+
+/* Run Tests for pio_spmd.c functions. */
+int main(int argc, char **argv)
+{
+ int my_rank; /* Zero-based rank of processor. */
+ int ntasks; /* Number of processors involved in current execution. */
+ int num_flavors; /* Number of PIO netCDF flavors in this build. */
+ int flavor[NUM_FLAVORS]; /* iotypes for the supported netCDF IO flavors. */
+ MPI_Comm test_comm; /* A communicator for this test. */
+#ifdef _NETCDF4
+#define NUM_TYPES_TO_TEST 11
+ int test_type[NUM_TYPES_TO_TEST] = {PIO_BYTE, PIO_CHAR, PIO_SHORT, PIO_INT, PIO_FLOAT, PIO_DOUBLE,
+ PIO_UBYTE, PIO_USHORT, PIO_UINT, PIO_INT64, PIO_UINT64};
+#else
+#define NUM_TYPES_TO_TEST 6
+ int test_type[NUM_TYPES_TO_TEST] = {PIO_BYTE, PIO_CHAR, PIO_SHORT, PIO_INT, PIO_FLOAT, PIO_DOUBLE};
+#endif /* _NETCDF4 */
+ int ret; /* Return code. */
+
+ /* Initialize test. */
+ if ((ret = pio_test_init2(argc, argv, &my_rank, &ntasks, MIN_NTASKS,
+ TARGET_NTASKS, -1, &test_comm)))
+ ERR(ERR_INIT);
+ if ((ret = PIOc_set_iosystem_error_handling(PIO_DEFAULT, PIO_RETURN_ERROR, NULL)))
+ return ret;
+
+ /* Figure out iotypes. */
+ if ((ret = get_iotypes(&num_flavors, flavor)))
+ ERR(ret);
+
+ /* Test code runs on TARGET_NTASKS tasks. The left over tasks do
+ * nothing. */
+ if (my_rank < TARGET_NTASKS)
+ {
+ int iosysid;
+
+ /* Initialize with task 0 as IO task, tasks 1-3 as a
+ * computation component. */
+#define NUM_IO_PROCS 1
+#define NUM_COMPUTATION_PROCS 3
+#define COMPONENT_COUNT 1
+ int num_computation_procs = NUM_COMPUTATION_PROCS;
+ MPI_Comm io_comm; /* Input io_comm */
+ MPI_Comm comp_comm[COMPONENT_COUNT]; /* Input comp_comms */
+ int mpierr;
+ int color, key;
+ MPI_Comm new_comm;
+
+ if (my_rank == 0)
+ {
+ color = 0;
+ key = 0;
+ }
+ else
+ {
+ color = 1;
+ key = my_rank - 1;
+ }
+
+ if ((ret = MPI_Comm_split(test_comm, color, key, &new_comm)))
+ return ret;
+ if (color == 0)
+ {
+ io_comm = new_comm;
+ comp_comm[0] = MPI_COMM_NULL;
+ }
+ else
+ {
+ comp_comm[0] = new_comm;
+ io_comm = MPI_COMM_NULL;
+ }
+
+ /* Run the test for each data type. */
+ for (int t = 0; t < NUM_TYPES_TO_TEST; t++)
+ {
+ if ((ret = PIOc_init_async_from_comms(test_comm, COMPONENT_COUNT, comp_comm, io_comm,
+ PIO_REARR_BOX, &iosysid)))
+ ERR(ERR_INIT);
+
+ /* This code runs only on computation components. */
+ if (my_rank)
+ {
+ /* Run the simple darray async test. */
+ if ((ret = run_darray_async_test(iosysid, my_rank, test_comm, comp_comm[0], num_flavors,
+ flavor, test_type[t])))
+ AERR(ret);
+
+ /* Finalize PIO system. */
+ if ((ret = PIOc_free_iosystem (iosysid)))
+ return ret;
+
+ }
+
+ } /* next type */
+ if (my_rank)
+ {
+ /* Free the computation conomponent communicator. */
+ if ((mpierr = MPI_Comm_free(comp_comm)))
+ MPIERR(mpierr);
+ }
+ else
+ {
+ /* Free the IO communicator. */
+ if ((mpierr = MPI_Comm_free(&io_comm)))
+ MPIERR(mpierr);
+ }
+
+
+ } /* endif my_rank < TARGET_NTASKS */
+
+ /* Finalize the MPI library. */
+ if ((ret = pio_test_finalize(&test_comm)))
+ return ret;
+
+ printf("%d %s SUCCESS!!\n", my_rank, TEST_NAME);
+
+ return 0;
+}
diff --git a/tests/cunit/test_darray_async_many.c b/tests/cunit/test_darray_async_many.c
index 8d2181dbfca..37954d6f381 100644
--- a/tests/cunit/test_darray_async_many.c
+++ b/tests/cunit/test_darray_async_many.c
@@ -59,6 +59,10 @@ int my_type[NTYPE] = {PIO_BYTE, PIO_CHAR, PIO_SHORT, PIO_INT, PIO_FLOAT,
/* Number of records written for record vars. */
#define NREC 3
+/* Number of rearrangers to test. */
+#define NUM_REARRANGERS 2
+int rearranger[NUM_REARRANGERS] = {PIO_REARR_BOX, PIO_REARR_SUBSET};
+
/* Names of the dimensions. */
char dim_name[NDIM4][PIO_MAX_NAME + 1] = {"time", "vert_level", "lat", "lon"};
@@ -305,7 +309,7 @@ int check_darray_file(int iosysid, char *data_filename, int iotype, int my_rank,
/* Close the file. */
if ((ret = PIOc_closefile(ncid)))
- ERR(ret);
+ AERR(ret);
exit:
if (data_in)
@@ -317,7 +321,7 @@ int check_darray_file(int iosysid, char *data_filename, int iotype, int my_rank,
/* Run a simple test using darrays with async. */
int run_darray_async_test(int iosysid, int my_rank, MPI_Comm test_comm,
- int num_flavors, int *flavor)
+ int num_flavors, int *flavor, int rearr)
{
int ioid_byte;
int ioid_char;
@@ -373,52 +377,51 @@ int run_darray_async_test(int iosysid, int my_rank, MPI_Comm test_comm,
#endif /* _NETCDF4 */
int ret;
- sprintf(decomp_filename, "decomp_%s.nc", TEST_NAME);
-
+ sprintf(decomp_filename, "decomp_%s_%d.nc", TEST_NAME, rearr);
/* Create the PIO decompositions for this test. */
if ((ret = PIOc_init_decomp(iosysid, PIO_BYTE, NDIM2, &dim_len[2], elements_per_pe,
- compdof, &ioid_byte, PIO_REARR_BOX, NULL, NULL)))
- ERR(ret);
+ compdof, &ioid_byte, rearr, NULL, NULL)))
+ AERR(ret);
if ((ret = PIOc_init_decomp(iosysid, PIO_CHAR, NDIM2, &dim_len[2], elements_per_pe,
- compdof, &ioid_char, PIO_REARR_BOX, NULL, NULL)))
- ERR(ret);
+ compdof, &ioid_char, rearr, NULL, NULL)))
+ AERR(ret);
if ((ret = PIOc_init_decomp(iosysid, PIO_SHORT, NDIM2, &dim_len[2], elements_per_pe,
- compdof, &ioid_short, PIO_REARR_BOX, NULL, NULL)))
- ERR(ret);
+ compdof, &ioid_short, rearr, NULL, NULL)))
+ AERR(ret);
if ((ret = PIOc_init_decomp(iosysid, PIO_INT, NDIM2, &dim_len[2], elements_per_pe,
- compdof, &ioid_int, PIO_REARR_BOX, NULL, NULL)))
- ERR(ret);
+ compdof, &ioid_int, rearr, NULL, NULL)))
+ AERR(ret);
if ((ret = PIOc_init_decomp(iosysid, PIO_FLOAT, NDIM2, &dim_len[2], elements_per_pe,
- compdof, &ioid_float, PIO_REARR_BOX, NULL, NULL)))
- ERR(ret);
+ compdof, &ioid_float, rearr, NULL, NULL)))
+ AERR(ret);
if ((ret = PIOc_init_decomp(iosysid, PIO_DOUBLE, NDIM2, &dim_len[2], elements_per_pe,
- compdof, &ioid_double, PIO_REARR_BOX, NULL, NULL)))
- ERR(ret);
+ compdof, &ioid_double, rearr, NULL, NULL)))
+ AERR(ret);
#ifdef _NETCDF4
if ((ret = PIOc_init_decomp(iosysid, PIO_UBYTE, NDIM2, &dim_len[2], elements_per_pe,
- compdof, &ioid_ubyte, PIO_REARR_BOX, NULL, NULL)))
- ERR(ret);
+ compdof, &ioid_ubyte, rearr, NULL, NULL)))
+ AERR(ret);
if ((ret = PIOc_init_decomp(iosysid, PIO_USHORT, NDIM2, &dim_len[2], elements_per_pe,
- compdof, &ioid_ushort, PIO_REARR_BOX, NULL, NULL)))
- ERR(ret);
+ compdof, &ioid_ushort, rearr, NULL, NULL)))
+ AERR(ret);
if ((ret = PIOc_init_decomp(iosysid, PIO_UINT, NDIM2, &dim_len[2], elements_per_pe,
- compdof, &ioid_uint, PIO_REARR_BOX, NULL, NULL)))
- ERR(ret);
+ compdof, &ioid_uint, rearr, NULL, NULL)))
+ AERR(ret);
if ((ret = PIOc_init_decomp(iosysid, PIO_INT64, NDIM2, &dim_len[2], elements_per_pe,
- compdof, &ioid_int64, PIO_REARR_BOX, NULL, NULL)))
- ERR(ret);
+ compdof, &ioid_int64, rearr, NULL, NULL)))
+ AERR(ret);
if ((ret = PIOc_init_decomp(iosysid, PIO_UINT64, NDIM2, &dim_len[2], elements_per_pe,
- compdof, &ioid_uint64, PIO_REARR_BOX, NULL, NULL)))
- ERR(ret);
+ compdof, &ioid_uint64, rearr, NULL, NULL)))
+ AERR(ret);
#endif
if ((ret = PIOc_init_decomp(iosysid, PIO_INT, NDIM3, &dim_len[1], elements_per_pe_3d,
- compdof_3d, &ioid_4d_int, PIO_REARR_BOX, NULL, NULL)))
- ERR(ret);
+ compdof_3d, &ioid_4d_int, rearr, NULL, NULL)))
+ AERR(ret);
if ((ret = PIOc_init_decomp(iosysid, PIO_FLOAT, NDIM3, &dim_len[1], elements_per_pe_3d,
- compdof_3d, &ioid_4d_float, PIO_REARR_BOX, NULL, NULL)))
- ERR(ret);
+ compdof_3d, &ioid_4d_float, rearr, NULL, NULL)))
+ AERR(ret);
/* These are the decompositions associated with each type. */
#ifdef _NETCDF4
@@ -454,12 +457,12 @@ int run_darray_async_test(int iosysid, int my_rank, MPI_Comm test_comm,
sprintf(data_filename, "data_%s_iotype_%d.nc", TEST_NAME, flavor[fmt]);
if ((ret = PIOc_createfile(iosysid, &ncid, &flavor[fmt], data_filename,
NC_CLOBBER)))
- ERR(ret);
+ AERR(ret);
/* Define dimensions. */
for (int d = 0; d < NDIM4; d++)
if ((ret = PIOc_def_dim(ncid, dim_name[d], dim_len[d], &dimid[d])))
- ERR(ret);
+ AERR(ret);
/* Define variables. */
char var_name[PIO_MAX_NAME + 1];
@@ -470,10 +473,10 @@ int run_darray_async_test(int iosysid, int my_rank, MPI_Comm test_comm,
sprintf(var_name, "var_%d", t);
sprintf(var_norec_name, "var_norec_%d", t);
if ((ret = PIOc_def_var(ncid, var_name, my_type[t], NDIM3, dimids_3d, &rec_varid[t])))
- ERR(ret);
+ AERR(ret);
if ((ret = PIOc_def_var(ncid, var_norec_name, my_type[t], NDIM2, dimids_2d,
&norec_varid[t])))
- ERR(ret);
+ AERR(ret);
}
char var_name_4d[NUM_4D_VARS][PIO_MAX_NAME + 1] = {"var_4d_int", "var_4d_float"};
@@ -484,11 +487,11 @@ int run_darray_async_test(int iosysid, int my_rank, MPI_Comm test_comm,
/* Define some 4D vars for extra fun. */
for (int v = 0; v < NUM_4D_VARS; v++)
if ((ret = PIOc_def_var(ncid, var_name_4d[v], var_type_4d[v], NDIM4, dimids_4d, &varid_4d[v])))
- ERR(ret);
+ AERR(ret);
/* End define mode. */
if ((ret = PIOc_enddef(ncid)))
- ERR(ret);
+ AERR(ret);
/* Write a record and non-record var for each type. */
for (int t = 0; t < num_types; t++)
@@ -500,22 +503,22 @@ int run_darray_async_test(int iosysid, int my_rank, MPI_Comm test_comm,
if (!r)
{
if ((ret = PIOc_setframe(ncid, rec_varid[t], 0)))
- ERR(ret);
+ AERR(ret);
}
else
{
if ((ret = PIOc_advanceframe(ncid, rec_varid[t])))
- ERR(ret);
+ AERR(ret);
}
/* Write a record of data. */
if ((ret = PIOc_write_darray(ncid, rec_varid[t], var_ioid[t], elements_per_pe,
my_data[t], NULL)))
- ERR(ret);
+ AERR(ret);
/* Sync the file. */
if ((ret = PIOc_sync(ncid)))
- ERR(ret);
+ AERR(ret);
} /* next record. */
} /* next type */
@@ -523,7 +526,7 @@ int run_darray_async_test(int iosysid, int my_rank, MPI_Comm test_comm,
for (int t = 0; t < num_types; t++)
{
if ((ret = PIOc_write_darray(ncid, norec_varid[t], var_ioid[t], elements_per_pe, my_data[t], NULL)))
- ERR(ret);
+ AERR(ret);
}
/* Write the 4D vars. */
@@ -534,61 +537,61 @@ int run_darray_async_test(int iosysid, int my_rank, MPI_Comm test_comm,
if (!r)
{
if ((ret = PIOc_setframe(ncid, varid_4d[v], 0)))
- ERR(ret);
+ AERR(ret);
}
else
{
if ((ret = PIOc_advanceframe(ncid, varid_4d[v])))
- ERR(ret);
+ AERR(ret);
}
if ((ret = PIOc_write_darray(ncid, varid_4d[v], var_ioid_4d[v], elements_per_pe_3d,
my_data_4d[v], NULL)))
- ERR(ret);
+ AERR(ret);
}
}
/* Close the file. */
if ((ret = PIOc_closefile(ncid)))
- ERR(ret);
+ AERR(ret);
/* Check the file for correctness. */
if ((ret = check_darray_file(iosysid, data_filename, PIO_IOTYPE_NETCDF, my_rank,
rec_varid, norec_varid, num_types, varid_4d)))
- ERR(ret);
+ AERR(ret);
} /* next iotype */
/* Free the decompositions. */
if ((ret = PIOc_freedecomp(iosysid, ioid_byte)))
- ERR(ret);
+ AERR(ret);
if ((ret = PIOc_freedecomp(iosysid, ioid_char)))
- ERR(ret);
+ AERR(ret);
if ((ret = PIOc_freedecomp(iosysid, ioid_short)))
- ERR(ret);
+ AERR(ret);
if ((ret = PIOc_freedecomp(iosysid, ioid_int)))
- ERR(ret);
+ AERR(ret);
if ((ret = PIOc_freedecomp(iosysid, ioid_float)))
- ERR(ret);
+ AERR(ret);
if ((ret = PIOc_freedecomp(iosysid, ioid_double)))
- ERR(ret);
+ AERR(ret);
#ifdef _NETCDF4
if ((ret = PIOc_freedecomp(iosysid, ioid_ubyte)))
- ERR(ret);
+ AERR(ret);
if ((ret = PIOc_freedecomp(iosysid, ioid_ushort)))
- ERR(ret);
+ AERR(ret);
if ((ret = PIOc_freedecomp(iosysid, ioid_uint)))
- ERR(ret);
+ AERR(ret);
if ((ret = PIOc_freedecomp(iosysid, ioid_int64)))
- ERR(ret);
+ AERR(ret);
if ((ret = PIOc_freedecomp(iosysid, ioid_uint64)))
- ERR(ret);
+ AERR(ret);
#endif /* _NETCDF4 */
if ((ret = PIOc_freedecomp(iosysid, ioid_4d_int)))
- ERR(ret);
+ AERR(ret);
if ((ret = PIOc_freedecomp(iosysid, ioid_4d_float)))
- ERR(ret);
+ AERR(ret);
return 0;
}
@@ -619,42 +622,47 @@ int main(int argc, char **argv)
{
int iosysid;
- /* Initialize with task 0 as IO task, tasks 1-3 as a
- * computation component. */
+
+ for(int rearr=0; rearr < NUM_REARRANGERS; rearr++)
+ {
+
+ /* Initialize with task 0 as IO task, tasks 1-3 as a
+ * computation component. */
#define NUM_IO_PROCS 1
#define NUM_COMPUTATION_PROCS 3
#define COMPONENT_COUNT 1
- int num_computation_procs = NUM_COMPUTATION_PROCS;
- MPI_Comm io_comm; /* Will get a duplicate of IO communicator. */
- MPI_Comm comp_comm[COMPONENT_COUNT]; /* Will get duplicates of computation communicators. */
- int mpierr;
-
- if ((ret = PIOc_init_async(test_comm, NUM_IO_PROCS, NULL, COMPONENT_COUNT,
- &num_computation_procs, NULL, &io_comm, comp_comm,
- PIO_REARR_BOX, &iosysid)))
- ERR(ERR_INIT);
-
- /* This code runs only on computation components. */
- if (my_rank)
- {
- /* Run the simple darray async test. */
- if ((ret = run_darray_async_test(iosysid, my_rank, test_comm, num_flavors, flavor)))
- return ret;
+ int num_computation_procs = NUM_COMPUTATION_PROCS;
+ MPI_Comm io_comm; /* Will get a duplicate of IO communicator. */
+ MPI_Comm comp_comm[COMPONENT_COUNT]; /* Will get duplicates of computation communicators. */
+ int mpierr;
+
+ if ((ret = PIOc_init_async(test_comm, NUM_IO_PROCS, NULL, COMPONENT_COUNT,
+ &num_computation_procs, NULL, &io_comm, comp_comm,
+ rearranger[rearr], &iosysid)))
+ ERR(ERR_INIT);
+
+ /* This code runs only on computation components. */
+ if (io_comm == MPI_COMM_NULL)
+ {
+ /* Run the simple darray async test. */
+ if ((ret = run_darray_async_test(iosysid, my_rank, test_comm, num_flavors, flavor, rearranger[rearr])))
+ return ret;
- /* Finalize PIO system. */
- if ((ret = PIOc_free_iosystem(iosysid)))
- return ret;
+ /* Finalize PIO system. */
+ if ((ret = PIOc_free_iosystem(iosysid)))
+ return ret;
- /* Free the computation conomponent communicator. */
- if ((mpierr = MPI_Comm_free(comp_comm)))
- MPIERR(mpierr);
- }
- else
- {
- /* Free the IO communicator. */
- if ((mpierr = MPI_Comm_free(&io_comm)))
- MPIERR(mpierr);
- }
+ /* Free the computation conomponent communicator. */
+ if ((mpierr = MPI_Comm_free(comp_comm)))
+ MPIERR(mpierr);
+ }
+ else
+ {
+ /* Free the IO communicator. */
+ if ((mpierr = MPI_Comm_free(&io_comm)))
+ MPIERR(mpierr);
+ }
+ } /* next rearranger */
} /* endif my_rank < TARGET_NTASKS */
/* Finalize the MPI library. */
diff --git a/tests/cunit/test_darray_async_simple.c b/tests/cunit/test_darray_async_simple.c
index 86aee1d5fef..35f853b6763 100644
--- a/tests/cunit/test_darray_async_simple.c
+++ b/tests/cunit/test_darray_async_simple.c
@@ -49,24 +49,24 @@ int check_darray_file(int iosysid, char *data_filename, int iotype, int my_rank)
/* Reopen the file. */
if ((ret = PIOc_openfile(iosysid, &ncid, &iotype, data_filename, NC_NOWRITE)))
- ERR(ret);
+ AERR(ret);
/* Check the metadata. */
if ((ret = PIOc_inq_varid(ncid, VAR_NAME, &varid)))
- ERR(ret);
+ AERR(ret);
if ((ret = PIOc_inq_dimid(ncid, DIM_NAME, &dimid)))
- ERR(ret);
+ AERR(ret);
/* Check the data. */
if ((ret = PIOc_get_var(ncid, varid, &data_in)))
- ERR(ret);
+ AERR(ret);
for (int r = 1; r < TARGET_NTASKS; r++)
if (data_in[r - 1] != r * 10.0)
- ERR(ret);
+ AERR(ret);
/* Close the file. */
if ((ret = PIOc_closefile(ncid)))
- ERR(ret);
+ AERR(ret);
return 0;
}
@@ -107,31 +107,31 @@ int run_darray_async_test(int iosysid, int my_rank, MPI_Comm test_comm,
/* Create sample output file. */
if ((ret = PIOc_createfile(iosysid, &ncid, &flavor[fmt], data_filename,
NC_CLOBBER)))
- ERR(ret);
+ AERR(ret);
/* Define dimension. */
if ((ret = PIOc_def_dim(ncid, DIM_NAME, dim_len, &dimid)))
- ERR(ret);
+ AERR(ret);
/* Define variable. */
if ((ret = PIOc_def_var(ncid, VAR_NAME, PIO_FLOAT, NDIM1, &dimid, &varid)))
- ERR(ret);
+ AERR(ret);
/* End define mode. */
if ((ret = PIOc_enddef(ncid)))
- ERR(ret);
+ AERR(ret);
/* Write some data. */
if ((ret = PIOc_write_darray(ncid, varid, ioid, ELEM1, &my_data, NULL)))
- ERR(ret);
+ AERR(ret);
/* Close the file. */
if ((ret = PIOc_closefile(ncid)))
- ERR(ret);
+ AERR(ret);
/* Check the file for correctness. */
if ((ret = check_darray_file(iosysid, data_filename, PIO_IOTYPE_NETCDF, my_rank)))
- ERR(ret);
+ AERR(ret);
} /* next iotype */
diff --git a/tests/cunit/test_darray_multivar.c b/tests/cunit/test_darray_multivar.c
index 5a38cb292f1..dab07be0559 100644
--- a/tests/cunit/test_darray_multivar.c
+++ b/tests/cunit/test_darray_multivar.c
@@ -580,15 +580,17 @@ int main(int argc, char **argv)
if ((ret = PIOc_Init_Intracomm(test_comm, TARGET_NTASKS, ioproc_stride,
ioproc_start, rearranger[r], &iosysid)))
return ret;
-
+ /* printf("test Rearranger %d\n",rearranger[r]); */
/* Run tests. */
if ((ret = test_all_darray(iosysid, num_flavors, flavor, my_rank, test_comm,
rearranger[r])))
return ret;
+ /* printf("test Rearranger %d complete\n",rearranger[r]); */
/* Finalize PIO system. */
if ((ret = PIOc_free_iosystem(iosysid)))
return ret;
+
}
} /* endif my_rank < TARGET_NTASKS */
diff --git a/tests/cunit/test_darray_multivar3.c b/tests/cunit/test_darray_multivar3.c
index e0da44aa97d..21b2d887242 100644
--- a/tests/cunit/test_darray_multivar3.c
+++ b/tests/cunit/test_darray_multivar3.c
@@ -1,7 +1,5 @@
/*
- * Tests for PIO distributed arrays. This test demonstrates problems
- * with the fill value that can arrise from mixing types in a
- * decomposition.
+ * Tests for PIO distributed arrays.
*
* @author Ed Hartnett
*/
@@ -61,14 +59,15 @@ int dim_len[NDIM] = {NC_UNLIMITED, X_DIM_LEN, Y_DIM_LEN};
*
* @param iosysid the IO system ID.
* @param ioid the ID of the decomposition.
+ * @param ioid_float the ID of the decomposition for floats.
* @param num_flavors the number of IOTYPES available in this build.
* @param flavor array of available iotypes.
* @param my_rank rank of this task.
* @param test_comm the communicator that is running this test.
* @returns 0 for success, error code otherwise.
*/
-int test_multivar_darray(int iosysid, int ioid, int num_flavors, int *flavor,
- int my_rank, MPI_Comm test_comm)
+int test_multivar_darray(int iosysid, int ioid, int ioid_float, int num_flavors,
+ int *flavor, int my_rank, MPI_Comm test_comm)
{
char filename[PIO_MAX_NAME + 1]; /* Name for the output files. */
int dimids[NDIM]; /* The dimension IDs. */
@@ -78,22 +77,21 @@ int test_multivar_darray(int iosysid, int ioid, int num_flavors, int *flavor,
int custom_fillvalue_int = -TEST_VAL_42;
float custom_fillvalue_float = -42.5;
int test_data_int[arraylen];
- /* float test_data_float[arraylen]; */
+ float test_data_float[arraylen];
int ret; /* Return code. */
/* Initialize some data. */
for (int f = 0; f < arraylen; f++)
{
test_data_int[f] = my_rank * 10 + f;
- /* test_data_float[f] = my_rank * 10 + f + 0.5; */
+ test_data_float[f] = my_rank * 10 + f + 0.5;
}
/* Use PIO to create the example file in each of the four
* available ways. */
for (int fmt = 0; fmt < num_flavors; fmt++)
{
- /* for (int use_fv = 0; use_fv < NUM_FV_TESTS; use_fv++) */
- for (int use_fv = 0; use_fv < 1; use_fv++)
+ for (int use_fv = 0; use_fv < NUM_FV_TESTS; use_fv++)
{
/* Create the filename. */
sprintf(filename, "data_%s_iotype_%d_use_fv_%d.nc", TEST_NAME, flavor[fmt], use_fv);
@@ -134,11 +132,11 @@ int test_multivar_darray(int iosysid, int ioid, int num_flavors, int *flavor,
ERR(ret);
int *fvp_int = NULL;
- /* float *fvp_float = NULL; */
+ float *fvp_float = NULL;
if (use_fv)
{
fvp_int = &custom_fillvalue_int;
- /* fvp_float = &custom_fillvalue_float; */
+ fvp_float = &custom_fillvalue_float;
}
/* Write the data. */
@@ -148,20 +146,17 @@ int test_multivar_darray(int iosysid, int ioid, int num_flavors, int *flavor,
if ((ret = PIOc_write_darray(ncid, varid[1], ioid, arraylen, test_data_int,
fvp_int)))
ERR(ret);
+ if ((ret = PIOc_write_darray(ncid, varid[2], ioid_float, arraylen, test_data_float,
+ fvp_float)))
+ ERR(ret);
/* This should not work since we cannot mix record and not
* record vars. */
- /* int frame[NUM_VAR] = {0, 0, 0}; */
-
- /* if (PIOc_write_darray_multi(ncid, varid, ioid, NUM_VAR, arraylen * NUM_VAR, test_data_float, */
- /* frame, NULL, 0) != PIO_EVARDIMMISMATCH) */
- /* ERR(ERR_WRONG); */
+ int frame[NUM_VAR] = {0, 0, 0};
- /* This should work since int and float are the same size
- * and both are record vars. */
- /* if ((ret = PIOc_write_darray_multi(ncid, varid+1, ioid, NUM_VAR-1, arraylen * (NUM_VAR-1), test_data_float, */
- /* frame, NULL, 0))) */
- /* ERR(ret); */
+ if (PIOc_write_darray_multi(ncid, varid, ioid, NUM_VAR, arraylen * NUM_VAR, test_data_float,
+ frame, NULL, 0) != PIO_EVARDIMMISMATCH)
+ ERR(ERR_WRONG);
/* Close the netCDF file. */
if ((ret = PIOc_closefile(ncid)))
@@ -170,63 +165,71 @@ int test_multivar_darray(int iosysid, int ioid, int num_flavors, int *flavor,
/* Check the file contents. */
{
int ncid2; /* The ncid of the re-opened netCDF file. */
- /* float test_data_float_in[arraylen]; */
+ int test_data_int_in[arraylen];
+ float test_data_float_in[arraylen];
+ PIO_Offset idx[NDIM] = {0, 0, 3};
+ int file_fv_int;
+ float file_fv_float;
/* Reopen the file. */
if ((ret = PIOc_openfile(iosysid, &ncid2, &flavor[fmt], filename, PIO_NOWRITE)))
ERR(ret);
/* Read the var data with read_darray(). */
- for (int v = 0; v < NUM_VAR; v++)
+ if ((ret = PIOc_setframe(ncid2, varid[1], 0)))
+ ERR(ret);
+ if ((ret = PIOc_setframe(ncid2, varid[2], 0)))
+ ERR(ret);
+
+ /* Read the data. */
+ if ((ret = PIOc_read_darray(ncid2, varid[0], ioid, arraylen, test_data_int_in)))
+ ERR(ret);
+
+ /* Check the results. */
+ for (int f = 0; f < arraylen; f++)
+ if (test_data_int_in[f] != test_data_int[f])
+ return ERR_WRONG;
+
+ if ((ret = PIOc_read_darray(ncid2, varid[1], ioid, arraylen, test_data_int_in)))
+ ERR(ret);
+
+ /* Check the results. */
+ for (int f = 0; f < arraylen; f++)
+ if (test_data_int_in[f] != test_data_int[f])
+ return ERR_WRONG;
+
+ if ((ret = PIOc_read_darray(ncid2, varid[2], ioid_float, arraylen, test_data_float_in)))
+ ERR(ret);
+
+ /* Check the results. */
+ for (int f = 0; f < arraylen; f++)
+ if (test_data_float_in[f] != test_data_float[f])
+ return ERR_WRONG;
+
+ /* Check an int fill value. */
+ if ((ret = PIOc_get_var1_int(ncid2, varid[1], idx, &file_fv_int)))
+ return ret;
+ if (use_fv)
{
- if (v < NUM_VAR - 1)
- {
- int test_data_int_in[arraylen];
-
- if ((ret = PIOc_setframe(ncid2, varid[v], 0)))
- ERR(ret);
-
- /* Read the data. */
- if ((ret = PIOc_read_darray(ncid2, varid[v], ioid, arraylen, test_data_int_in)))
- ERR(ret);
-
- /* /\* Check the results. *\/ */
- /* for (int f = 0; f < arraylen; f++) */
- /* if (test_data_int_in[f] != test_data_int[f]) */
- /* return ERR_WRONG; */
- }
- } /* next var */
-
- /* /\* Now read the fill values. *\/ */
- /* PIO_Offset idx[NDIM] = {0, 0, 3}; */
- /* int file_fv_int; */
- /* float file_fv_float; */
-
- /* /\* Check an int fill value. *\/ */
- /* if ((ret = PIOc_get_var1_int(ncid2, 1, idx, &file_fv_int))) */
- /* return ret; */
- /* if (use_fv) */
- /* { */
- /* if (file_fv_int != custom_fillvalue_int) */
- /* return ERR_WRONG; */
- /* } */
-
- /* /\* Check the float fill value. *\/ */
- /* if ((ret = PIOc_get_var1_float(ncid2, 2, idx, &file_fv_float))) */
- /* return ret; */
- /* if (use_fv) */
- /* { */
- /* if (file_fv_float != custom_fillvalue_float) */
- /* return ERR_WRONG; */
- /* } */
+ if (file_fv_int != custom_fillvalue_int)
+ return ERR_WRONG;
+ }
+
+ /* Check the float fill value. */
+ if ((ret = PIOc_get_var1_float(ncid2, varid[2], idx, &file_fv_float)))
+ return ret;
+ if (use_fv)
+ {
+ if (file_fv_float != custom_fillvalue_float)
+ return ERR_WRONG;
+ }
/* Close the netCDF file. */
if ((ret = PIOc_closefile(ncid2)))
ERR(ret);
}
- }
- }
-
+ } /* next fillvalue test */
+ } /* next iotype */
return PIO_NOERR;
}
@@ -279,7 +282,7 @@ int main(int argc, char **argv)
int my_rank;
int ntasks;
MPI_Comm test_comm; /* A communicator for this test. */
- int ioid;
+ int ioid, ioid_float;
int ret; /* Return code. */
/* Initialize test. */
@@ -296,9 +299,9 @@ int main(int argc, char **argv)
int iosysid; /* The ID for the parallel I/O system. */
int ioproc_stride = 1; /* Stride in the mpi rank between io tasks. */
int ioproc_start = 0; /* Zero based rank of first processor to be used for I/O. */
- int dim_len_2d[NDIM2] = {X_DIM_LEN, Y_DIM_LEN};
- int num_flavors; /* Number of PIO netCDF flavors in this build. */
- int flavor[NUM_FLAVORS]; /* iotypes for the supported netCDF IO flavors. */
+ int dim_len_2d[NDIM2] = {X_DIM_LEN, Y_DIM_LEN};
+ int num_flavors; /* Number of PIO netCDF flavors in this build. */
+ int flavor[NUM_FLAVORS]; /* iotypes for the supported netCDF IO flavors. */
/* Figure out iotypes. */
if ((ret = get_iotypes(&num_flavors, flavor)))
@@ -315,8 +318,13 @@ int main(int argc, char **argv)
&ioid, PIO_INT)))
return ret;
+ /* Decompose the data over the tasks for floats. */
+ if ((ret = create_dcomp_gaps(TARGET_NTASKS, my_rank, iosysid, dim_len_2d,
+ &ioid_float, PIO_FLOAT)))
+ return ret;
+
/* Run the multivar darray tests. */
- if ((ret = test_multivar_darray(iosysid, ioid, num_flavors, flavor, my_rank,
+ if ((ret = test_multivar_darray(iosysid, ioid, ioid_float, num_flavors, flavor, my_rank,
test_comm)))
return ret;
@@ -324,6 +332,10 @@ int main(int argc, char **argv)
if ((ret = PIOc_freedecomp(iosysid, ioid)))
ERR(ret);
+ /* Free the PIO decomposition for floats. */
+ if ((ret = PIOc_freedecomp(iosysid, ioid_float)))
+ ERR(ret);
+
/* Finalize PIO system. */
if ((ret = PIOc_free_iosystem(iosysid)))
return ret;
diff --git a/tests/cunit/test_perf2.c b/tests/cunit/test_perf2.c
index b3a106b1529..523b00227c5 100644
--- a/tests/cunit/test_perf2.c
+++ b/tests/cunit/test_perf2.c
@@ -1,5 +1,8 @@
/*
- * Tests for PIO distributed arrays.
+ * This program tests performance in intracomm mode. It writes out
+ * NUM_TIMESTEPS records of a single NC_INT variable. The number of
+ * I/O tasks, IOTYPE, fill mode, and rearranger are varied and write
+ * performance is measured.
*
* @author Ed Hartnett
* @date 2/21/17
@@ -10,18 +13,9 @@
#include
#include
-/* The number of tasks this test should run on. */
-#define TARGET_NTASKS 16
-
-/* The minimum number of tasks this test should run on. */
-#define MIN_NTASKS TARGET_NTASKS
-
/* The name of this test. */
#define TEST_NAME "test_perf2"
-/* Number of computational components to create. */
-#define COMPONENT_COUNT 1
-
/* The number of dimensions in the example data. In this test, we
* are using three-dimensional data. */
#define NDIM 4
@@ -30,8 +24,8 @@
#define NDIM3 3
/* The length of our sample data along each dimension. */
-#define X_DIM_LEN 128
-#define Y_DIM_LEN 128
+#define X_DIM_LEN 512
+#define Y_DIM_LEN 512
#define Z_DIM_LEN 32
/* #define X_DIM_LEN 1024 */
/* #define Y_DIM_LEN 1024 */
@@ -45,7 +39,7 @@
/* Test with and without specifying a fill value to
* PIOc_write_darray(). */
-#define NUM_TEST_CASES_FILLVALUE 2
+#define NUM_TEST_CASES_FILLVALUE 1
/* How many different number of IO tasks to check? */
#define MAX_IO_TESTS 5
@@ -62,6 +56,8 @@ int dim_len[NDIM] = {NC_UNLIMITED, X_DIM_LEN, Y_DIM_LEN, Z_DIM_LEN};
/* Run test for each of the rearrangers. */
#define NUM_REARRANGERS_TO_TEST 2
+#define MILLION 1000000
+
#ifdef USE_MPE
/* This array holds even numbers for MPE. */
int test_event[2][TEST_NUM_EVENTS];
@@ -150,22 +146,30 @@ test_darray(int iosysid, int ioid, int num_flavors, int *flavor,
for (int fmt = 0; fmt < num_flavors; fmt++)
{
char filename[PIO_MAX_NAME + 1]; /* Name for the output files. */
+ char flavorname[PIO_MAX_NAME + 1];
struct timeval starttime, endtime;
long long startt, endt;
long long delta;
float num_megabytes = 0;
- float delta_in_sec;
- float mb_per_sec;
+ float delta_in_sec, read_sec;
+ float mb_per_sec, read_mb_per_sec;
#ifdef USE_MPE
test_start_mpe_log(TEST_CREATE);
#endif /* USE_MPE */
+ /* How many megabytes will we write? */
+ num_megabytes = (NUM_TIMESTEPS * X_DIM_LEN * Y_DIM_LEN * Z_DIM_LEN * sizeof(int))/(MILLION);
+
+ sprintf(filename, "data_%s_iotype_%d_rearr_%d.nc", TEST_NAME, flavor[fmt],
+ rearranger);
/* Create the filename. Use the same filename for all, so we
* don't waste disk space. */
- /* sprintf(filename, "data_%s_iotype_%d_rearr_%d.nc", TEST_NAME, flavor[fmt], */
- /* rearranger); */
- sprintf(filename, "data_%s.nc", TEST_NAME);
+ /* sprintf(filename, "data_%s.nc", TEST_NAME); */
+
+ /* Get name of this IOTYPE. */
+ if ((ret = get_iotype_name(flavor[fmt], flavorname)))
+ ERR(ret);
/* Create the netCDF output file. */
if ((ret = PIOc_createfile(iosysid, &ncid, &flavor[fmt], filename, PIO_CLOBBER)))
@@ -184,6 +188,14 @@ test_darray(int iosysid, int ioid, int num_flavors, int *flavor,
if ((ret = PIOc_def_var(ncid, VAR_NAME, PIO_INT, NDIM, dimids, &varid)))
ERR(ret);
+ /* NetCDF/HDF5 files benefit from having chunksize set. */
+ if (flavor[fmt] == PIO_IOTYPE_NETCDF4P || flavor[fmt] == PIO_IOTYPE_NETCDF4C)
+ {
+ PIO_Offset chunksizes[NDIM] = {NUM_TIMESTEPS / 2, X_DIM_LEN / 4, Y_DIM_LEN / 4, Z_DIM_LEN};
+ if ((ret = PIOc_def_var_chunking(ncid, varid, NC_CHUNKED, chunksizes)))
+ ERR(ret);
+ }
+
/* End define mode. */
if ((ret = PIOc_enddef(ncid)))
ERR(ret);
@@ -225,7 +237,6 @@ test_darray(int iosysid, int ioid, int num_flavors, int *flavor,
}
#endif /* USE_MPE */
- num_megabytes += (X_DIM_LEN * Y_DIM_LEN * Z_DIM_LEN * sizeof(int))/(1024*1024);
}
#ifdef USE_MPE
@@ -250,12 +261,71 @@ test_darray(int iosysid, int ioid, int num_flavors, int *flavor,
/* Compute the time delta */
startt = (1000000 * starttime.tv_sec) + starttime.tv_usec;
endt = (1000000 * endtime.tv_sec) + endtime.tv_usec;
- delta = (endt - startt)/NUM_TIMESTEPS;
+ delta = (endt - startt);
delta_in_sec = (float)delta / 1000000;
mb_per_sec = num_megabytes / delta_in_sec;
+
+ /* Now reopen the file and re-read the data. */
+ {
+ int *test_data_in;
+
+ if (!(test_data_in = malloc(sizeof(int) * arraylen)))
+ ERR(PIO_ENOMEM);
+
+ /* Re-open the file. */
+ if ((ret = PIOc_openfile2(iosysid, &ncid, &flavor[fmt], filename, PIO_NOWRITE)))
+ ERR(ret);
+
+ /* Start the clock. */
+ gettimeofday(&starttime, NULL);
+
+ for (int t = 0; t < NUM_TIMESTEPS; t++)
+ {
+#ifdef USE_MPE
+ test_start_mpe_log(TEST_DARRAY_READ);
+#endif /* USE_MPE */
+
+ /* Set the value of the record dimension. */
+ if ((ret = PIOc_setframe(ncid, varid, t)))
+ ERR(ret);
+
+ /* Write the data. */
+ if ((ret = PIOc_read_darray(ncid, varid, ioid, arraylen, test_data_in)))
+ ERR(ret);
+
+#ifdef USE_MPE
+ {
+ char msg[MPE_MAX_MSG_LEN + 1];
+ sprintf(msg, "read_darray timestep %d", t);
+ test_stop_mpe_log(TEST_DARRAY_READ, msg);
+ }
+#endif /* USE_MPE */
+
+ } /* next timestep */
+
+ /* Stop the clock. */
+ gettimeofday(&endtime, NULL);
+
+ /* Compute the time delta */
+ startt = (1000000 * starttime.tv_sec) + starttime.tv_usec;
+ endt = (1000000 * endtime.tv_sec) + endtime.tv_usec;
+ delta = (endt - startt);
+ read_sec = (float)delta / 1000000;
+ read_mb_per_sec = num_megabytes / read_sec;
+
+ /* Close file. */
+ if ((ret = PIOc_closefile(ncid)))
+ ERR(ret);
+
+ /* Free resources. */
+ free(test_data_in);
+
+ } /* re-reading file */
+
if (!my_rank)
- printf("%d\t%d\t%d\t%d\t%d\t%8.3f\t%8.1f\t%8.3f\n", ntasks, num_io_procs,
- rearranger, provide_fill, fmt, delta_in_sec, num_megabytes, mb_per_sec);
+ printf("%d,\t%d,\t%s,\t%s,\t%s,\t%8.3f,\t%8.3f,\t%8.1f,\t%8.3f,\t%8.3f\n", ntasks, num_io_procs,
+ (rearranger == 1 ? "box" : "subset"), (provide_fill ? "fill" : "nofill"),
+ flavorname, delta_in_sec, read_sec, num_megabytes, mb_per_sec, read_mb_per_sec);
}
free(test_data);
@@ -394,8 +464,7 @@ test_all_darray(int iosysid, int num_flavors, int *flavor, int my_rank,
#endif /* USE_MPE */
/* Test with/without providing a fill value to PIOc_write_darray(). */
- /* for (int provide_fill = 0; provide_fill < NUM_TEST_CASES_FILLVALUE; provide_fill++) */
- for (int provide_fill = 0; provide_fill < 1; provide_fill++)
+ for (int provide_fill = 0; provide_fill < NUM_TEST_CASES_FILLVALUE; provide_fill++)
{
/* Run a simple darray test. */
if ((ret = test_darray(iosysid, ioid, num_flavors, flavor, my_rank,
@@ -456,8 +525,8 @@ main(int argc, char **argv)
ERR(ret);
if (!my_rank)
- printf("ntasks\tnio\trearr\tfill\tformat\ttime(s)\tdata size (MB)\t"
- "performance(MB/s)\n");
+ printf("ntasks,\tnio,\trearr,\tfill,\tIOTYPE,\twrite time(s),\tread time(s),\tdata size(MB),\t"
+ "write(MB/s),\tread(MB/s)\n");
/* How many processors for IO? */
num_io_tests = 1;
@@ -466,14 +535,20 @@ main(int argc, char **argv)
if (ntasks >= 64)
num_io_tests = 3;
if (ntasks >= 128)
+ {
num_io_tests = 4;
+ ioproc_stride = 40;
+ }
if (ntasks >= 512)
+ {
num_io_tests = 5;
+ ioproc_stride = 40;
+ }
for (i = 0; i < num_io_tests; i++)
{
/* for (r = 0; r < NUM_REARRANGERS_TO_TEST; r++) */
- for (r = 0; r < 1; r++)
+ for (r = 1; r < 2; r++)
{
#ifdef USE_MPE
test_start_mpe_log(TEST_INIT);
@@ -504,7 +579,7 @@ main(int argc, char **argv)
printf("finalizing io_test!\n");
/* Finalize the MPI library. */
- if ((ret = pio_test_finalize(&test_comm)))
+ if ((ret = pio_test_finalize2(&test_comm, TEST_NAME)))
return ret;
/* printf("%d %s SUCCESS!!\n", my_rank, TEST_NAME); */
diff --git a/tests/cunit/test_rearr.c b/tests/cunit/test_rearr.c
index e471787e5a7..85e7862fa4c 100644
--- a/tests/cunit/test_rearr.c
+++ b/tests/cunit/test_rearr.c
@@ -941,7 +941,7 @@ int test_default_subset_partition(MPI_Comm test_comm, int my_rank)
ios->ioproc = 1;
ios->io_rank = my_rank;
- ios->comp_comm = test_comm;
+ ios->union_comm = test_comm;
/* Run the function to test. */
if ((ret = default_subset_partition(ios, iodesc)))
@@ -1058,44 +1058,44 @@ int test_rearrange_comp2io(MPI_Comm test_comm, int my_rank)
for (int r = 0; r < iodesc->nrecvs; r++)
if (iodesc->rtype[r] != PIO_DATATYPE_NULL)
if ((mpierr = MPI_Type_free(&iodesc->rtype[r])))
- MPIBAIL(mpierr);
+ MPIBAIL(mpierr);
exit:
/* Free resources allocated in library code. */
if (iodesc->rtype)
- free(iodesc->rtype);
+ free(iodesc->rtype);
if (iodesc->sindex)
- free(iodesc->sindex);
+ free(iodesc->sindex);
if (iodesc->scount)
- free(iodesc->scount);
+ free(iodesc->scount);
if (iodesc->stype)
- free(iodesc->stype);
+ free(iodesc->stype);
if (iodesc->rcount)
- free(iodesc->rcount);
+ free(iodesc->rcount);
if (iodesc->rfrom)
- free(iodesc->rfrom);
+ free(iodesc->rfrom);
if (iodesc->rindex)
- free(iodesc->rindex);
+ free(iodesc->rindex);
/* Free resources from test. */
if (ior1)
{
- free(ior1->start);
- free(ior1->count);
- free(ior1);
+ free(ior1->start);
+ free(ior1->count);
+ free(ior1);
}
if (ios)
{
- free(ios->ioranks);
- free(ios->compranks);
- free(ios);
+ free(ios->ioranks);
+ free(ios->compranks);
+ free(ios);
}
if (iodesc)
- free(iodesc);
+ free(iodesc);
if (sbuf)
- free(sbuf);
+ free(sbuf);
if (rbuf)
- free(rbuf);
+ free(rbuf);
return ret;
}
diff --git a/tests/cunit/test_simple.c b/tests/cunit/test_simple.c
index 630c7441b35..e18ca0d2a15 100644
--- a/tests/cunit/test_simple.c
+++ b/tests/cunit/test_simple.c
@@ -18,10 +18,10 @@
int main(int argc, char **argv)
{
- int my_rank;
- int ntasks;
+ int my_rank;
+ int ntasks;
int num_iotasks = 1;
- int iosysid, ioid;
+ int iosysid, ioid;
int gdimlen, elements_per_pe;
PIO_Offset *compmap;
int ncid, dimid[NDIM2], varid;
@@ -29,7 +29,7 @@ int main(int argc, char **argv)
int flavor[NUM_FLAVORS]; /* iotypes for the supported netCDF IO flavors. */
int *data, *data_in;
int i, f;
- int ret;
+ int ret;
/* Initialize MPI. */
if ((ret = MPI_Init(&argc, &argv)))
@@ -44,11 +44,17 @@ int main(int argc, char **argv)
/* PIOc_set_log_level(4); */
if (ntasks != 1 && ntasks != 4)
{
- if (!my_rank)
- printf("Test must be run on 1 or 4 tasks.\n");
- return ERR_AWFUL;
+ if (!my_rank)
+ printf("Test must be run on 1 or 4 tasks.\n");
+ return ERR_AWFUL;
}
+#ifdef USE_MPE
+ /* If MPE logging is being used, then initialize it. */
+ if ((ret = MPE_Init_log()))
+ return ret;
+#endif /* USE_MPE */
+
/* Turn off logging, to prevent error messages from being logged
* when we intentionally call functions we know will fail. */
PIOc_set_log_level(-1);
@@ -59,98 +65,98 @@ int main(int argc, char **argv)
/* Initialize the IOsystem. */
if ((ret = PIOc_Init_Intracomm(MPI_COMM_WORLD, num_iotasks, 1, 0, PIO_REARR_BOX,
- &iosysid)))
- ERR(ret);
+ &iosysid)))
+ ERR(ret);
/* Find out which IOtypes are available in this build by calling
* this function from test_common.c. */
if ((ret = get_iotypes(&num_flavors, flavor)))
- ERR(ret);
+ ERR(ret);
/* Initialize the decomposition. */
gdimlen = DIM_LEN;
elements_per_pe = DIM_LEN/ntasks;
if (!(compmap = malloc(elements_per_pe * sizeof(PIO_Offset))))
- ERR(ERR_MEM);
+ ERR(ERR_MEM);
for (i = 0; i < elements_per_pe; i++)
- compmap[i] = my_rank + i;
+ compmap[i] = my_rank + i;
if ((ret = PIOc_init_decomp(iosysid, PIO_INT, NDIM1, &gdimlen, elements_per_pe, compmap,
- &ioid, PIO_REARR_BOX, NULL, NULL)))
- ERR(ret);
+ &ioid, PIO_REARR_BOX, NULL, NULL)))
+ ERR(ret);
free(compmap);
/* Create one record of data. */
if (!(data = malloc(elements_per_pe * sizeof(int))))
- ERR(ERR_MEM);
+ ERR(ERR_MEM);
for (i = 0; i < elements_per_pe; i++)
- data[i] = my_rank + i;
+ data[i] = my_rank + i;
/* Storage to read one record back in. */
if (!(data_in = malloc(elements_per_pe * sizeof(int))))
- ERR(ERR_MEM);
+ ERR(ERR_MEM);
/* Create a file with each available IOType. */
for (f = 0; f < num_flavors; f++)
{
- char filename[NC_MAX_NAME + 1];
-
- /* Create a file. */
- sprintf(filename, "%s_%d.nc", TEST_NAME, flavor[f]);
- if ((ret = PIOc_createfile(iosysid, &ncid, &flavor[f], filename, NC_CLOBBER)))
- ERR(ret);
-
- /* Define dims. */
- if ((ret = PIOc_def_dim(ncid, DIM_NAME_UNLIM, PIO_UNLIMITED, &dimid[0])))
- ERR(ret);
- if ((ret = PIOc_def_dim(ncid, DIM_NAME, DIM_LEN, &dimid[1])))
- ERR(ret);
-
- /* Define a var. */
- if ((ret = PIOc_def_var(ncid, VAR_NAME, PIO_INT, NDIM2, dimid, &varid)))
- ERR(ret);
- if ((ret = PIOc_enddef(ncid)))
- ERR(ret);
-
- /* Write a record of data. Each compute task writes its local
- * array of data. */
- if ((ret = PIOc_setframe(ncid, varid, 0)))
- ERR(ret);
- if ((ret = PIOc_write_darray(ncid, varid, ioid, elements_per_pe, data, NULL)))
- ERR(ret);
-
- /* Close the file. */
- if ((ret = PIOc_closefile(ncid)))
- ERR(ret);
-
- /* Check the file. */
- {
- /* Reopen the file. */
- if ((ret = PIOc_openfile(iosysid, &ncid, &flavor[f], filename, NC_NOWRITE)))
- ERR(ret);
-
- /* Read the local array of data for this task and confirm correctness. */
- if ((ret = PIOc_setframe(ncid, varid, 0)))
- ERR(ret);
- if ((ret = PIOc_read_darray(ncid, varid, ioid, elements_per_pe, data_in)))
- ERR(ret);
- for (i = 0; i < elements_per_pe; i++)
- if (data_in[i] != data[i]) ERR(ERR_WRONG);
-
- /* Close the file. */
- if ((ret = PIOc_closefile(ncid)))
- ERR(ret);
- }
+ char filename[NC_MAX_NAME + 1];
+
+ /* Create a file. */
+ sprintf(filename, "%s_%d.nc", TEST_NAME, flavor[f]);
+ if ((ret = PIOc_createfile(iosysid, &ncid, &flavor[f], filename, NC_CLOBBER)))
+ ERR(ret);
+
+ /* Define dims. */
+ if ((ret = PIOc_def_dim(ncid, DIM_NAME_UNLIM, PIO_UNLIMITED, &dimid[0])))
+ ERR(ret);
+ if ((ret = PIOc_def_dim(ncid, DIM_NAME, DIM_LEN, &dimid[1])))
+ ERR(ret);
+
+ /* Define a var. */
+ if ((ret = PIOc_def_var(ncid, VAR_NAME, PIO_INT, NDIM2, dimid, &varid)))
+ ERR(ret);
+ if ((ret = PIOc_enddef(ncid)))
+ ERR(ret);
+
+ /* Write a record of data. Each compute task writes its local
+ * array of data. */
+ if ((ret = PIOc_setframe(ncid, varid, 0)))
+ ERR(ret);
+ if ((ret = PIOc_write_darray(ncid, varid, ioid, elements_per_pe, data, NULL)))
+ ERR(ret);
+
+ /* Close the file. */
+ if ((ret = PIOc_closefile(ncid)))
+ ERR(ret);
+
+ /* Check the file. */
+ {
+ /* Reopen the file. */
+ if ((ret = PIOc_openfile(iosysid, &ncid, &flavor[f], filename, NC_NOWRITE)))
+ ERR(ret);
+
+ /* Read the local array of data for this task and confirm correctness. */
+ if ((ret = PIOc_setframe(ncid, varid, 0)))
+ ERR(ret);
+ if ((ret = PIOc_read_darray(ncid, varid, ioid, elements_per_pe, data_in)))
+ ERR(ret);
+ for (i = 0; i < elements_per_pe; i++)
+ if (data_in[i] != data[i]) ERR(ERR_WRONG);
+
+ /* Close the file. */
+ if ((ret = PIOc_closefile(ncid)))
+ ERR(ret);
+ }
} /* next IOType */
/* Free resources. */
free(data);
free(data_in);
if ((ret = PIOc_freedecomp(iosysid, ioid)))
- ERR(ret);
+ ERR(ret);
/* Finalize the IOsystem. */
if ((ret = PIOc_finalize(iosysid)))
- ERR(ret);
+ ERR(ret);
printf("%d %s SUCCESS!!\n", my_rank, TEST_NAME);
diff --git a/tests/fncint/ftst_pio.f90 b/tests/fncint/ftst_pio.f90
index cdb5a543f22..4226c3e160a 100644
--- a/tests/fncint/ftst_pio.f90
+++ b/tests/fncint/ftst_pio.f90
@@ -61,7 +61,7 @@ program ftst_pio
if (ierr .ne. nf_noerr) call handle_err(ierr)
! Create a file.
- ierr = nf_create(FILE_NAME, 64, ncid)
+ ierr = nf_create(FILE_NAME, NF_PIO, ncid)
if (ierr .ne. nf_noerr) call handle_err(ierr)
! Define dimensions.
diff --git a/tests/fncint/ftst_pio_orig.f90 b/tests/fncint/ftst_pio_orig.f90
index e3b46fdd74d..1b4a940ac80 100644
--- a/tests/fncint/ftst_pio_orig.f90
+++ b/tests/fncint/ftst_pio_orig.f90
@@ -65,7 +65,7 @@ program ftst_pio
call PIO_initdecomp(ioSystem, PIO_int, dims, compdof, iodesc)
! Create a file.
- ierr = PIO_createfile(ioSystem, pioFileDesc, PIO_IOTYPE_PNETCDF, FILE_NAME, PIO_clobber)
+ ierr = PIO_createfile(ioSystem, pioFileDesc, PIO_IOTYPE_NETCDF, FILE_NAME, PIO_clobber)
if (ierr .ne. nf_noerr) call handle_err(ierr)
! Define dimensions.
diff --git a/tests/general/CMakeLists.txt b/tests/general/CMakeLists.txt
index a45e1f7f033..b45ef60579a 100644
--- a/tests/general/CMakeLists.txt
+++ b/tests/general/CMakeLists.txt
@@ -22,6 +22,7 @@ SET(GENERATED_SRCS pio_file_simple_tests.F90
pio_decomp_frame_tests.F90
pio_decomp_fillval.F90
pio_iosystem_tests.F90
+ pio_iosystem_async_tests.F90
pio_iosystem_tests2.F90
pio_iosystem_tests3.F90)
@@ -782,6 +783,19 @@ else ()
TIMEOUT ${DEFAULT_TEST_TIMEOUT})
endif ()
+add_executable (pio_iosystem_async_tests EXCLUDE_FROM_ALL
+ pio_iosystem_async_tests.F90)
+target_link_libraries (pio_iosystem_async_tests pio_tutil)
+add_dependencies (tests pio_iosystem_async_tests)
+
+if (NOT PIO_USE_MPISERIAL)
+ add_mpi_test(pio_iosystem_async_tests
+ EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/pio_iosystem_async_tests
+ NUMPROCS 5
+ TIMEOUT ${DEFAULT_TEST_TIMEOUT})
+endif ()
+
+
#===== pio_iosystems_test2 =====
add_executable (pio_iosystem_tests2 EXCLUDE_FROM_ALL
diff --git a/tests/general/Makefile.am b/tests/general/Makefile.am
index 2a198588898..98440aa882c 100644
--- a/tests/general/Makefile.am
+++ b/tests/general/Makefile.am
@@ -38,7 +38,7 @@ pio_file_fail ncdf_simple_tests ncdf_get_put ncdf_fail ncdf_inq \
pio_rearr pio_rearr_opts2 pio_decomp_tests \
pio_decomp_tests_1d pio_decomp_tests_2d pio_decomp_tests_3d \
pio_decomp_frame_tests pio_decomp_fillval pio_iosystem_tests \
-pio_iosystem_tests2 pio_iosystem_tests3
+pio_iosystem_tests2 pio_iosystem_tests3 pio_iosystem_async_tests
pio_init_finalize_SOURCES = pio_init_finalize.F90
pio_file_simple_tests_SOURCES = pio_file_simple_tests.F90
@@ -59,6 +59,7 @@ pio_decomp_fillval_SOURCES = pio_decomp_fillval.F90
pio_iosystem_tests_SOURCES = pio_iosystem_tests.F90
pio_iosystem_tests2_SOURCES = pio_iosystem_tests2.F90
pio_iosystem_tests3_SOURCES = pio_iosystem_tests3.F90
+pio_iosystem_async_tests_SOURCES = pio_iosystem_async_tests.F90
if RUN_TESTS
# Tests will run from a bash script.
@@ -87,6 +88,7 @@ pio_iosystem_tests.F90:pio_iosystem_tests.F90.in
pio_rearr.F90:pio_rearr.F90.in
pio_rearr_opts2.F90:pio_rearr_opts2.F90.in
pio_rearr_opts.F90:pio_rearr_opts.F90.in
+pio_iosystem_async_tests.F90:pio_iosystem_async_tests.F90.in
# Distribute the test script.
EXTRA_DIST = CMakeLists.txt run_tests.sh.in ncdf_fail.F90.in \
@@ -97,7 +99,7 @@ pio_decomp_tests_3d.F90.in pio_decomp_tests.F90.in pio_fail.F90.in \
pio_file_fail.F90.in pio_file_simple_tests.F90.in \
pio_init_finalize.F90.in pio_iosystem_tests2.F90.in \
pio_iosystem_tests3.F90.in pio_iosystem_tests.F90.in pio_rearr.F90.in \
-pio_rearr_opts2.F90.in pio_rearr_opts.F90.in
+pio_rearr_opts2.F90.in pio_rearr_opts.F90.in pio_iosystem_async_tests.F90.in
# Clean up files produced during testing.
CLEANFILES = *.nc *.log *.mod
diff --git a/tests/general/ncdf_get_put.F90.in b/tests/general/ncdf_get_put.F90.in
index 0e87110d479..392de210f8d 100644
--- a/tests/general/ncdf_get_put.F90.in
+++ b/tests/general/ncdf_get_put.F90.in
@@ -336,10 +336,8 @@ PIO_TF_AUTO_TEST_SUB_BEGIN test_put_get_1dvar_slice
do j=1,MAX_ROW_dim_len
ret = PIO_get_var(pio_file, pio_var_char, (/1,j/), gcval(j))
PIO_TF_CHECK_ERR(ret, "Failed to get var:" // trim(filename))
- print *,j,trim(gcval(j)),trim(pcval(j))
PIO_TF_CHECK_VAL((gcval(j), pcval(j)), "Got wrong value")
enddo
-
call PIO_closefile(pio_file)
call PIO_deletefile(pio_tf_iosystem_, filename);
end do
diff --git a/tests/general/pio_decomp_tests_2d_async.F90.in b/tests/general/pio_decomp_tests_2d_async.F90.in
new file mode 100644
index 00000000000..0afc6be2937
--- /dev/null
+++ b/tests/general/pio_decomp_tests_2d_async.F90.in
@@ -0,0 +1,377 @@
+#include "config.h"
+! Get a 2D column decomposition
+! If force_rearrange is FALSE, the decomposition is such that
+! # All even procs have VEC_COL_SZ rows of VEC_ROW_SZ elements
+! # All odd procs have VEC_COL_SZ rows of VEC_ROW_SZ + 1 elements
+! e.g. For VEC_ROW_SZ = 2, VEC_COL_SZ = 2 and ranks 0, 1, 2,
+! e.g. 1) |(1,1) (1,2)| |(1,3) (1,4) (1,5)| |(1,6) (1,7)|
+! |(2,1) (2,2)|, |(2,3) (2,4) (2,5)|, |(2,6) (2,7)|
+! If force_rearrange is TRUE, the decomposition is such that,
+! If possible, the even rank "exchanges" elements with the next
+! higher ranked odd proc.
+! This for example can be used to force rearrangement when reading
+! or writing data.
+! e.g. For VEC_ROW_SZ = 2, VEC_COL_SZ = 2 and ranks 0, 1, 2
+! e.g. 1) |(1,3) (1,4) (1,5)| |(1,1) (1,2)| |(1,6) (1,7)|
+! |(2,3) (2,4) (2,5)|, |(2,1) (2,2)|, |(2,6) (2,7)|
+SUBROUTINE get_2d_col_decomp_info(rank, sz, dims, start, count, force_rearrange)
+ integer, parameter :: VEC_ROW_SZ = 7
+ integer, parameter :: VEC_COL_SZ = 7
+ integer, parameter :: NDIMS = 2
+ integer, intent(in) :: rank
+ integer, intent(in) :: sz
+ integer, dimension(NDIMS), intent(out) :: dims
+ integer, dimension(NDIMS), intent(out) :: start
+ integer, dimension(NDIMS), intent(out) :: count
+ logical, intent(in) :: force_rearrange
+
+ logical :: is_even_rank
+ integer :: num_odd_procs, num_even_procs
+ integer :: iodd, ieven
+
+ is_even_rank = .false.
+ if (mod(rank, 2) == 0) then
+ is_even_rank = .true.
+ end if
+ num_odd_procs = sz / 2
+ num_even_procs = sz - num_odd_procs
+ dims(1) = VEC_COL_SZ
+ dims(2) = num_even_procs * VEC_ROW_SZ + num_odd_procs * (VEC_ROW_SZ + 1)
+ ! Number of odd and even procs before this rank
+ iodd = rank / 2
+ ieven = (rank + 1) / 2
+
+ ! Rows
+ start(1) = 1
+ count(1) = VEC_COL_SZ
+
+ ! Columns
+ if(force_rearrange) then
+ ! Make sure that we force rearrangement
+ if (is_even_rank) then
+ if(rank + 1 < sz) then
+ ! Force rearrangement
+ count(2) = VEC_ROW_SZ + 1
+ start(2) = ieven * VEC_ROW_SZ + iodd * (VEC_ROW_SZ + 1) + (VEC_ROW_SZ) + 1
+ else
+ count(2) = VEC_ROW_SZ
+ start(2) = ieven * VEC_ROW_SZ + iodd * (VEC_ROW_SZ + 1) + 1
+ end if
+ else
+ ! For all odd procs there is an even lower ranked, rank-1, proc
+ ! So force rearrangement
+ count(2) = VEC_ROW_SZ
+ start(2) = ieven * VEC_ROW_SZ + iodd * (VEC_ROW_SZ + 1) - (VEC_ROW_SZ) + 1
+ end if
+ else
+ if (is_even_rank) then
+ count(2) = VEC_ROW_SZ
+ else
+ count(2) = VEC_ROW_SZ + 1
+ end if
+ start(2) = ieven * VEC_ROW_SZ + iodd * (VEC_ROW_SZ + 1) + 1
+ end if
+
+END SUBROUTINE
+
+! Get a 2D row decomposition
+! If force_rearrange is FALSE, the decomposition is such that
+! # All even procs have VEC_COL_SZ rows of VEC_ROW_SZ elements
+! # All odd procs have VEC_COL_SZ+1 rows of VEC_ROW_SZ elements
+! e.g. For VEC_ROW_SZ = 6, VEC_COL_SZ = 1 and ranks 0, 1, 2,
+! e.g. 1) |(1,1) (1,2) (1,3) (1,4) (1,5) (1,6)|,
+! |(2,1) (2,2) (2,3) (2,4) (2,5) (2,6)|
+! |(3,1) (3,2) (3,3) (3,4) (3,5) (3,6)|,
+! |(4,1) (4,2) (4,3) (4,4) (4,5) (4,6)|
+! If force_rearrange is TRUE, the decomposition is such that,
+! If possible, the even rank "exchanges" elements (rows) with the next
+! higher ranked odd proc.
+! This for example can be used to force rearrangement when reading
+! or writing data.
+! e.g. For VEC_ROW_SZ = 6, VEC_COL_SZ = 1 and ranks 0, 1, 2
+! e.g. 1) |(2,1) (2,2) (2,3) (2,4) (2,5) (2,6)|
+! |(3,1) (3,2) (3,3) (3,4) (3,5) (3,6)|,
+! |(1,1) (1,2) (1,3) (1,4) (1,5) (1,6)|,
+! |(4,1) (4,2) (4,3) (4,4) (4,5) (4,6)|
+SUBROUTINE get_2d_row_decomp_info(rank, sz, dims, start, count, force_rearrange)
+ integer, parameter :: VEC_COL_SZ = 7
+ integer, parameter :: VEC_ROW_SZ = 7
+ integer, parameter :: NDIMS = 2
+ integer, intent(in) :: rank
+ integer, intent(in) :: sz
+ integer, dimension(NDIMS), intent(out) :: dims
+ integer, dimension(NDIMS), intent(out) :: start
+ integer, dimension(NDIMS), intent(out) :: count
+ logical, intent(in) :: force_rearrange
+
+ logical :: is_even_rank
+ integer :: num_odd_procs, num_even_procs
+ integer :: iodd, ieven
+
+ is_even_rank = .false.
+ if (mod(rank, 2) == 0) then
+ is_even_rank = .true.
+ end if
+ num_odd_procs = sz / 2
+ num_even_procs = sz - num_odd_procs
+ dims(1) = num_even_procs * VEC_COL_SZ + num_odd_procs * (VEC_COL_SZ + 1)
+ dims(2) = VEC_ROW_SZ
+ ! Number of odd and even procs before this rank
+ iodd = rank / 2
+ ieven = (rank + 1) / 2
+
+ ! Rows
+ if(force_rearrange) then
+ ! Make sure that we force rearrangement
+ if (is_even_rank) then
+ if(rank + 1 < sz) then
+ ! Force rearrangement
+ count(1) = VEC_COL_SZ + 1
+ start(1) = ieven * VEC_COL_SZ + iodd * (VEC_COL_SZ + 1) + (VEC_COL_SZ) + 1
+ else
+ count(1) = VEC_COL_SZ
+ start(1) = ieven * VEC_COL_SZ + iodd * (VEC_COL_SZ + 1) + 1
+ end if
+ else
+ ! For all odd procs there is an even lower ranked, rank-1, proc
+ ! So force rearrangement
+ count(1) = VEC_COL_SZ
+ start(1) = ieven * VEC_COL_SZ + iodd * (VEC_COL_SZ + 1) - (VEC_COL_SZ) + 1
+ end if
+ else
+ if (is_even_rank) then
+ count(1) = VEC_COL_SZ
+ else
+ count(1) = VEC_COL_SZ + 1
+ end if
+ start(1) = ieven * VEC_COL_SZ + iodd * (VEC_COL_SZ + 1) + 1
+ end if
+
+ ! Columns
+ start(2) = 1
+ count(2) = VEC_ROW_SZ
+
+END SUBROUTINE
+
+! Write with one decomp (to force rearrangement) and read with another (no
+! rearrangement)
+PIO_TF_TEMPLATE
+PIO_TF_AUTO_TEST_SUB_BEGIN nc_write_read_2d_col_decomp
+ implicit none
+ type(var_desc_t) :: pio_var
+ type(file_desc_t) :: pio_file
+ integer, parameter :: NDIMS = 2
+ character(len=PIO_TF_MAX_STR_LEN) :: filename
+ type(io_desc_t) :: wr_iodesc, rd_iodesc
+ integer, dimension(:), allocatable :: compdof
+ integer, dimension(NDIMS) :: start, count
+ PIO_TF_FC_DATA_TYPE, dimension(:,:), allocatable :: rbuf, wbuf, exp_val
+ integer, dimension(NDIMS) :: dims
+ integer, dimension(NDIMS) :: pio_dims
+ integer :: i, j, tmp_idx, ierr, nrows, ncols
+ ! iotypes = valid io types
+ integer, dimension(:), allocatable :: iotypes
+ character(len=PIO_TF_MAX_STR_LEN), dimension(:), allocatable :: iotype_descs
+ integer :: num_iotypes
+ integer :: avar
+
+ ! Set the decomposition for writing data - forcing rearrangement
+ call get_2d_col_decomp_info(pio_tf_world_rank_, pio_tf_world_sz_, dims, start, count, .true.)
+ nrows = count(1)
+ ncols = count(2)
+
+ allocate(wbuf(nrows, ncols))
+ allocate(compdof(nrows * ncols))
+ do j=1,ncols
+ do i=1,nrows
+ wbuf(i,j) = (start(2) - 1 + j - 1) * nrows + i
+ tmp_idx = (j - 1) * nrows + i
+ compdof(tmp_idx) = int(wbuf(i,j))
+ end do
+ end do
+
+ call PIO_initdecomp(pio_tf_iosystem_, PIO_TF_DATA_TYPE, dims, compdof, wr_iodesc)
+ deallocate(compdof)
+
+ ! Set the decomposition for reading data - different from the write decomp
+ call get_2d_col_decomp_info(pio_tf_world_rank_, pio_tf_world_sz_, dims, start, count, .false.)
+ nrows = count(1)
+ ncols = count(2)
+
+ allocate(rbuf(nrows, ncols))
+ allocate(compdof(nrows * ncols))
+ allocate(exp_val(nrows, ncols))
+ do j=1,ncols
+ do i=1,nrows
+ tmp_idx = (j - 1) * nrows + i
+ compdof(tmp_idx) = (start(2) - 1 + j - 1) * nrows + i
+ ! Expected value, after reading, is the same as the compdof
+ exp_val(i,j) = compdof(tmp_idx)
+ end do
+ end do
+
+ call PIO_initdecomp(pio_tf_iosystem_, PIO_TF_DATA_TYPE, dims, compdof, rd_iodesc)
+ deallocate(compdof)
+
+ num_iotypes = 0
+ call PIO_TF_Get_nc_iotypes(iotypes, iotype_descs, num_iotypes)
+ filename = "test_pio_decomp_simple_tests.testfile"
+ do i=1,num_iotypes
+ PIO_TF_LOG(0,*) "Testing : PIO_TF_DATA_TYPE : ", iotype_descs(i)
+ ierr = PIO_createfile(pio_tf_iosystem_, pio_file, iotypes(i), filename, PIO_CLOBBER)
+ PIO_TF_CHECK_ERR(ierr, "Could not create file " // trim(filename))
+
+ ierr = PIO_def_dim(pio_file, 'PIO_TF_test_dim_row', dims(1), pio_dims(1))
+ PIO_TF_CHECK_ERR(ierr, "Failed to define a dim : " // trim(filename))
+
+ ierr = PIO_def_dim(pio_file, 'PIO_TF_test_dim_col', dims(2), pio_dims(2))
+ PIO_TF_CHECK_ERR(ierr, "Failed to define a dim : " // trim(filename))
+
+ ierr = PIO_def_var(pio_file, 'PIO_TF_test_var', PIO_TF_DATA_TYPE, pio_dims, pio_var)
+ PIO_TF_CHECK_ERR(ierr, "Failed to define a var : " // trim(filename))
+
+ ierr = PIO_enddef(pio_file)
+ PIO_TF_CHECK_ERR(ierr, "Failed to end redef mode : " // trim(filename))
+
+ ! Write the variable out
+ call PIO_write_darray(pio_file, pio_var, wr_iodesc, wbuf, ierr)
+ PIO_TF_CHECK_ERR(ierr, "Failed to write darray : " // trim(filename))
+
+ call PIO_syncfile(pio_file)
+
+ call PIO_read_darray(pio_file, pio_var, rd_iodesc, rbuf, ierr)
+ PIO_TF_CHECK_ERR(ierr, "Failed to read darray : " // trim(filename))
+
+ PIO_TF_CHECK_VAL((rbuf, exp_val), "Got wrong val")
+
+ ! Try to retreve a non-existant attribute (github issue #1783)
+ call pio_seterrorhandling(pio_file, pio_bcast_error, old_eh)
+ ierr = pio_get_att(pio_file, pio_var, 'notreally', avar)
+
+ PIO_TF_CHECK_ERR(ierr == PIO_ENOTATT, "Got wrong error or no error")
+
+ call pio_seterrorhandling(pio_file, old_eh)
+
+ call PIO_closefile(pio_file)
+
+ call PIO_deletefile(pio_tf_iosystem_, filename);
+ end do
+
+ if(allocated(iotypes)) then
+ deallocate(iotypes)
+ deallocate(iotype_descs)
+ end if
+
+ call PIO_freedecomp(pio_tf_iosystem_, rd_iodesc)
+ call PIO_freedecomp(pio_tf_iosystem_, wr_iodesc)
+ deallocate(exp_val)
+ deallocate(rbuf)
+ deallocate(wbuf)
+PIO_TF_AUTO_TEST_SUB_END nc_write_read_2d_col_decomp
+
+! Write with one decomp (to force rearrangement) and read with another (no
+! rearrangement)
+PIO_TF_TEMPLATE
+PIO_TF_AUTO_TEST_SUB_BEGIN nc_write_read_2d_row_decomp
+ implicit none
+ type(var_desc_t) :: pio_var
+ type(file_desc_t) :: pio_file
+ integer, parameter :: NDIMS = 2
+ character(len=PIO_TF_MAX_STR_LEN) :: filename
+ type(io_desc_t) :: wr_iodesc, rd_iodesc
+ integer, dimension(:), allocatable :: compdof
+ integer, dimension(NDIMS) :: start, count
+ PIO_TF_FC_DATA_TYPE, dimension(:,:), allocatable :: rbuf, wbuf, exp_val
+ integer, dimension(NDIMS) :: dims
+ integer, dimension(NDIMS) :: pio_dims
+ integer :: i, j, tmp_idx, ierr, nrows, ncols
+ ! iotypes = valid io types
+ integer, dimension(:), allocatable :: iotypes
+ character(len=PIO_TF_MAX_STR_LEN), dimension(:), allocatable :: iotype_descs
+ integer :: num_iotypes
+
+ ! Set the decomposition for writing data - forcing rearrangement
+ call get_2d_row_decomp_info(pio_tf_world_rank_, pio_tf_world_sz_, dims, start, count, .true.)
+ nrows = count(1)
+ ncols = count(2)
+
+ allocate(wbuf(nrows, ncols))
+ allocate(compdof(nrows * ncols))
+ do j=1,ncols
+ do i=1,nrows
+ wbuf(i,j) = (start(2) - 1 + j - 1) * dims(1) + start(1) + i - 1
+ tmp_idx = (j - 1) * nrows + i
+ compdof(tmp_idx) = int(wbuf(i,j))
+ end do
+ end do
+
+ call PIO_initdecomp(pio_tf_iosystem_, PIO_TF_DATA_TYPE, dims, compdof, wr_iodesc)
+ deallocate(compdof)
+
+ ! Set the decomposition for reading data - different from the write decomp
+ call get_2d_row_decomp_info(pio_tf_world_rank_, pio_tf_world_sz_, dims, start, count, .false.)
+ nrows = count(1)
+ ncols = count(2)
+
+ allocate(rbuf(nrows, ncols))
+ allocate(compdof(nrows * ncols))
+ allocate(exp_val(nrows, ncols))
+ do j=1,ncols
+ do i=1,nrows
+ tmp_idx = (j - 1) * nrows + i
+ compdof(tmp_idx) = (start(2) - 1 + j - 1) * dims(1) + start(1) + i - 1
+ ! Expected value, after reading, is the same as the compdof
+ exp_val(i,j) = compdof(tmp_idx)
+ end do
+ end do
+
+ call PIO_initdecomp(pio_tf_iosystem_, PIO_TF_DATA_TYPE, dims, compdof, rd_iodesc)
+ deallocate(compdof)
+
+ num_iotypes = 0
+ call PIO_TF_Get_nc_iotypes(iotypes, iotype_descs, num_iotypes)
+ filename = "test_pio_decomp_simple_tests.testfile"
+ do i=1,num_iotypes
+ PIO_TF_LOG(0,*) "Testing : PIO_TF_DATA_TYPE : ", iotype_descs(i)
+ ierr = PIO_createfile(pio_tf_iosystem_, pio_file, iotypes(i), filename, PIO_CLOBBER)
+ PIO_TF_CHECK_ERR(ierr, "Could not create file " // trim(filename))
+
+ ierr = PIO_def_dim(pio_file, 'PIO_TF_test_dim_row', dims(1), pio_dims(1))
+ PIO_TF_CHECK_ERR(ierr, "Failed to define a dim : " // trim(filename))
+
+ ierr = PIO_def_dim(pio_file, 'PIO_TF_test_dim_col', dims(2), pio_dims(2))
+ PIO_TF_CHECK_ERR(ierr, "Failed to define a dim : " // trim(filename))
+
+ ierr = PIO_def_var(pio_file, 'PIO_TF_test_var', PIO_TF_DATA_TYPE, pio_dims, pio_var)
+ PIO_TF_CHECK_ERR(ierr, "Failed to define a var : " // trim(filename))
+
+ ierr = PIO_enddef(pio_file)
+ PIO_TF_CHECK_ERR(ierr, "Failed to end redef mode : " // trim(filename))
+
+ ! Write the variable out
+ call PIO_write_darray(pio_file, pio_var, wr_iodesc, wbuf, ierr)
+ PIO_TF_CHECK_ERR(ierr, "Failed to write darray : " // trim(filename))
+
+ call PIO_syncfile(pio_file)
+
+ call PIO_read_darray(pio_file, pio_var, rd_iodesc, rbuf, ierr)
+ PIO_TF_CHECK_ERR(ierr, "Failed to read darray : " // trim(filename))
+
+ PIO_TF_CHECK_VAL((rbuf, exp_val), "Got wrong val")
+
+ call PIO_closefile(pio_file)
+
+ call PIO_deletefile(pio_tf_iosystem_, filename);
+ end do
+
+ if(allocated(iotypes)) then
+ deallocate(iotypes)
+ deallocate(iotype_descs)
+ end if
+
+ call PIO_freedecomp(pio_tf_iosystem_, rd_iodesc)
+ call PIO_freedecomp(pio_tf_iosystem_, wr_iodesc)
+ deallocate(exp_val)
+ deallocate(rbuf)
+ deallocate(wbuf)
+PIO_TF_AUTO_TEST_SUB_END nc_write_read_2d_row_decomp
diff --git a/tests/general/pio_decomp_tests_2d_halo.F90.in b/tests/general/pio_decomp_tests_2d_halo.F90.in
new file mode 100644
index 00000000000..e2a0405b75e
--- /dev/null
+++ b/tests/general/pio_decomp_tests_2d_halo.F90.in
@@ -0,0 +1,249 @@
+SUBROUTINE get_2d_col_decomp_info(rank, sz, dims, start, count, force_rearrange)
+ integer, parameter :: VEC_ROW_SZ = 6
+ integer, parameter :: VEC_COL_SZ = 6
+ integer, parameter :: NDIMS = 2
+ integer, intent(in) :: rank
+ integer, intent(in) :: sz
+ integer, dimension(NDIMS), intent(out) :: dims
+ integer, dimension(NDIMS), intent(out) :: start
+ integer, dimension(NDIMS), intent(out) :: count
+ logical, intent(in) :: force_rearrange
+
+ logical :: is_even_rank
+ integer :: num_odd_procs, num_even_procs
+ integer :: iodd, ieven
+
+ is_even_rank = .false.
+ if (mod(rank, 2) == 0) then
+ is_even_rank = .true.
+ end if
+ num_odd_procs = sz / 2
+ num_even_procs = sz - num_odd_procs
+ dims(1) = VEC_COL_SZ
+ dims(2) = num_even_procs * VEC_ROW_SZ + num_odd_procs * (VEC_ROW_SZ + 1)
+ ! Number of odd and even procs before this rank
+ iodd = rank / 2
+ ieven = (rank + 1) / 2
+
+ ! Rows
+ start(1) = 1
+ count(1) = VEC_COL_SZ
+
+ ! Columns
+ if(force_rearrange) then
+ ! Make sure that we force rearrangement
+ if (is_even_rank) then
+ if(rank + 1 < sz) then
+ ! Force rearrangement
+ count(2) = VEC_ROW_SZ + 1
+ start(2) = ieven * VEC_ROW_SZ + iodd * (VEC_ROW_SZ + 1) + (VEC_ROW_SZ) + 1
+ else
+ count(2) = VEC_ROW_SZ
+ start(2) = ieven * VEC_ROW_SZ + iodd * (VEC_ROW_SZ + 1) + 1
+ end if
+ else
+ ! For all odd procs there is an even lower ranked, rank-1, proc
+ ! So force rearrangement
+ count(2) = VEC_ROW_SZ
+ start(2) = ieven * VEC_ROW_SZ + iodd * (VEC_ROW_SZ + 1) - (VEC_ROW_SZ) + 1
+ end if
+ else
+ if (is_even_rank) then
+ count(2) = VEC_ROW_SZ
+ else
+ count(2) = VEC_ROW_SZ + 1
+ end if
+ start(2) = ieven * VEC_ROW_SZ + iodd * (VEC_ROW_SZ + 1) + 1
+ end if
+
+END SUBROUTINE
+
+! Get a 2D row decomposition
+! If force_rearrange is FALSE, the decomposition is such that
+! # All even procs have VEC_COL_SZ rows of VEC_ROW_SZ elements
+! # All odd procs have VEC_COL_SZ+1 rows of VEC_ROW_SZ elements
+! e.g. For VEC_ROW_SZ = 6, VEC_COL_SZ = 1 and ranks 0, 1, 2,
+! e.g. 1) |(1,1) (1,2) (1,3) (1,4) (1,5) (1,6)|,
+! |(2,1) (2,2) (2,3) (2,4) (2,5) (2,6)|
+! |(3,1) (3,2) (3,3) (3,4) (3,5) (3,6)|,
+! |(4,1) (4,2) (4,3) (4,4) (4,5) (4,6)|
+! If force_rearrange is TRUE, the decomposition is such that,
+! If possible, the even rank "exchanges" elements (rows) with the next
+! higher ranked odd proc.
+! This for example can be used to force rearrangement when reading
+! or writing data.
+! e.g. For VEC_ROW_SZ = 6, VEC_COL_SZ = 1 and ranks 0, 1, 2
+! e.g. 1) |(2,1) (2,2) (2,3) (2,4) (2,5) (2,6)|
+! |(3,1) (3,2) (3,3) (3,4) (3,5) (3,6)|,
+! |(1,1) (1,2) (1,3) (1,4) (1,5) (1,6)|,
+! |(4,1) (4,2) (4,3) (4,4) (4,5) (4,6)|
+SUBROUTINE get_2d_row_decomp_info(rank, sz, dims, start, count, force_rearrange)
+ integer, parameter :: VEC_COL_SZ = 7
+ integer, parameter :: VEC_ROW_SZ = 7
+ integer, parameter :: NDIMS = 2
+ integer, intent(in) :: rank
+ integer, intent(in) :: sz
+ integer, dimension(NDIMS), intent(out) :: dims
+ integer, dimension(NDIMS), intent(out) :: start
+ integer, dimension(NDIMS), intent(out) :: count
+ logical, intent(in) :: force_rearrange
+
+ logical :: is_even_rank
+ integer :: num_odd_procs, num_even_procs
+ integer :: iodd, ieven
+
+ is_even_rank = .false.
+ if (mod(rank, 2) == 0) then
+ is_even_rank = .true.
+ end if
+ num_odd_procs = sz / 2
+ num_even_procs = sz - num_odd_procs
+ dims(1) = num_even_procs * VEC_COL_SZ + num_odd_procs * (VEC_COL_SZ + 1)
+ dims(2) = VEC_ROW_SZ
+ ! Number of odd and even procs before this rank
+ iodd = rank / 2
+ ieven = (rank + 1) / 2
+
+ ! Rows
+ if(force_rearrange) then
+ ! Make sure that we force rearrangement
+ if (is_even_rank) then
+ if(rank + 1 < sz) then
+ ! Force rearrangement
+ count(1) = VEC_COL_SZ + 1
+ start(1) = ieven * VEC_COL_SZ + iodd * (VEC_COL_SZ + 1) + (VEC_COL_SZ) + 1
+ else
+ count(1) = VEC_COL_SZ
+ start(1) = ieven * VEC_COL_SZ + iodd * (VEC_COL_SZ + 1) + 1
+ end if
+ else
+ ! For all odd procs there is an even lower ranked, rank-1, proc
+ ! So force rearrangement
+ count(1) = VEC_COL_SZ
+ start(1) = ieven * VEC_COL_SZ + iodd * (VEC_COL_SZ + 1) - (VEC_COL_SZ) + 1
+ end if
+ else
+ if (is_even_rank) then
+ count(1) = VEC_COL_SZ
+ else
+ count(1) = VEC_COL_SZ + 1
+ end if
+ start(1) = ieven * VEC_COL_SZ + iodd * (VEC_COL_SZ + 1) + 1
+ end if
+
+ ! Columns
+ start(2) = 1
+ count(2) = VEC_ROW_SZ
+
+END SUBROUTINE
+
+! Write with one decomp (to force rearrangement) and read with another (no
+! rearrangement)
+PIO_TF_TEMPLATE
+PIO_TF_AUTO_TEST_SUB_BEGIN nc_write_read_2d_col_decomp
+ implicit none
+ type(var_desc_t) :: pio_var
+ type(file_desc_t) :: pio_file
+ integer, parameter :: NDIMS = 2
+ character(len=PIO_TF_MAX_STR_LEN) :: filename
+ type(io_desc_t) :: wr_iodesc, rd_iodesc
+ integer, dimension(:), allocatable :: compdof
+ integer, dimension(NDIMS) :: start, count
+ PIO_TF_FC_DATA_TYPE, dimension(:,:), allocatable :: rbuf, wbuf, exp_val
+ integer, dimension(NDIMS) :: dims
+ integer, dimension(NDIMS) :: pio_dims
+ integer :: i, j, tmp_idx, ierr, nrows, ncols, cnt
+ ! iotypes = valid io types
+ integer, dimension(:), allocatable :: iotypes
+ character(len=PIO_TF_MAX_STR_LEN), dimension(:), allocatable :: iotype_descs
+ integer :: num_iotypes
+
+ ! Set the decomposition for writing data - forcing rearrangement
+ call get_2d_col_decomp_info(pio_tf_world_rank_, pio_tf_world_sz_, dims, start, count, .true.)
+ nrows = count(1)+2
+ ncols = count(2)+2
+
+ allocate(wbuf(nrows, ncols))
+ allocate(compdof(nrows * ncols))
+ compdof = 0
+ cnt = 0
+ do j=2,ncols-1
+ do i=2,nrows-1
+ cnt = cnt + 1
+ wbuf(i,j) = cnt
+ tmp_idx = (j - 1) * nrows + i
+ compdof(tmp_idx) = cnt
+ end do
+ end do
+ print *,__FILE__,__LINE__,dims, 'compdof: ',compdof
+ call PIO_initdecomp(pio_tf_iosystem_, PIO_TF_DATA_TYPE, dims, compdof, wr_iodesc)
+ deallocate(compdof)
+
+ ! Set the decomposition for reading data - different from the write decomp
+ call get_2d_col_decomp_info(pio_tf_world_rank_, pio_tf_world_sz_, dims, start, count, .false.)
+ nrows = count(1)
+ ncols = count(2)
+
+ allocate(rbuf(nrows, ncols))
+ allocate(compdof(nrows * ncols))
+ allocate(exp_val(nrows, ncols))
+ do j=1,ncols
+ do i=1,nrows
+ tmp_idx = (j - 1) * nrows + i
+ compdof(tmp_idx) = (start(2) - 1 + j - 1) * nrows + i
+ ! Expected value, after reading, is the same as the compdof
+ exp_val(i,j) = compdof(tmp_idx)
+ end do
+ end do
+
+ call PIO_initdecomp(pio_tf_iosystem_, PIO_TF_DATA_TYPE, dims, compdof, rd_iodesc)
+ deallocate(compdof)
+
+ num_iotypes = 0
+ call PIO_TF_Get_nc_iotypes(iotypes, iotype_descs, num_iotypes)
+ filename = "test_pio_decomp_simple_tests.testfile"
+ num_iotypes = 1
+ do i=1,num_iotypes
+ PIO_TF_LOG(0,*) "Testing : PIO_TF_DATA_TYPE : ", iotype_descs(i)
+ ierr = PIO_createfile(pio_tf_iosystem_, pio_file, iotypes(i), filename, PIO_CLOBBER)
+ PIO_TF_CHECK_ERR(ierr, "Could not create file " // trim(filename))
+
+ ierr = PIO_def_dim(pio_file, 'PIO_TF_test_dim_row', dims(1), pio_dims(1))
+ PIO_TF_CHECK_ERR(ierr, "Failed to define a dim : " // trim(filename))
+
+ ierr = PIO_def_dim(pio_file, 'PIO_TF_test_dim_col', dims(2), pio_dims(2))
+ PIO_TF_CHECK_ERR(ierr, "Failed to define a dim : " // trim(filename))
+
+ ierr = PIO_def_var(pio_file, 'PIO_TF_test_var', PIO_TF_DATA_TYPE, pio_dims, pio_var)
+ PIO_TF_CHECK_ERR(ierr, "Failed to define a var : " // trim(filename))
+
+ ierr = PIO_enddef(pio_file)
+ PIO_TF_CHECK_ERR(ierr, "Failed to end redef mode : " // trim(filename))
+
+ ! Write the variable out
+ call PIO_write_darray(pio_file, pio_var, wr_iodesc, wbuf, ierr)
+ PIO_TF_CHECK_ERR(ierr, "Failed to write darray : " // trim(filename))
+
+ call PIO_syncfile(pio_file)
+
+ call PIO_read_darray(pio_file, pio_var, rd_iodesc, rbuf, ierr)
+ PIO_TF_CHECK_ERR(ierr, "Failed to read darray : " // trim(filename))
+
+ PIO_TF_CHECK_VAL((rbuf, exp_val), "Got wrong val")
+
+ call PIO_closefile(pio_file)
+
+! call PIO_deletefile(pio_tf_iosystem_, filename);
+ end do
+
+ if(allocated(iotypes)) then
+ deallocate(iotypes)
+ deallocate(iotype_descs)
+ end if
+
+ call PIO_freedecomp(pio_tf_iosystem_, rd_iodesc)
+ call PIO_freedecomp(pio_tf_iosystem_, wr_iodesc)
+ deallocate(exp_val)
+ deallocate(rbuf)
+ deallocate(wbuf)
+PIO_TF_AUTO_TEST_SUB_END nc_write_read_2d_col_decomp
diff --git a/tests/general/pio_decomphalo_tests_2d.F90.in b/tests/general/pio_decomphalo_tests_2d.F90.in
new file mode 100644
index 00000000000..9b837a7795f
--- /dev/null
+++ b/tests/general/pio_decomphalo_tests_2d.F90.in
@@ -0,0 +1,264 @@
+! Get a 2D column decomposition
+! If force_rearrange is FALSE, the decomposition is such that
+! # All even procs have VEC_COL_SZ rows of VEC_ROW_SZ elements
+! # All odd procs have VEC_COL_SZ rows of VEC_ROW_SZ + 1 elements
+! e.g. For VEC_ROW_SZ = 2, VEC_COL_SZ = 2 and ranks 0, 1, 2,
+! e.g. 1) |(1,1) (1,2)| |(1,3) (1,4) (1,5)| |(1,6) (1,7)|
+! |(2,1) (2,2)|, |(2,3) (2,4) (2,5)|, |(2,6) (2,7)|
+! If force_rearrange is TRUE, the decomposition is such that,
+! If possible, the even rank "exchanges" elements with the next
+! higher ranked odd proc.
+! This for example can be used to force rearrangement when reading
+! or writing data.
+! e.g. For VEC_ROW_SZ = 2, VEC_COL_SZ = 2 and ranks 0, 1, 2
+! e.g. 1) |(1,3) (1,4) (1,5)| |(1,1) (1,2)| |(1,6) (1,7)|
+! |(2,3) (2,4) (2,5)|, |(2,1) (2,2)|, |(2,6) (2,7)|
+SUBROUTINE get_2d_col_decomp_info(rank, sz, dims, start, count, force_rearrange)
+ integer, parameter :: VEC_ROW_SZ = 7
+ integer, parameter :: VEC_COL_SZ = 7
+ integer, parameter :: NDIMS = 2
+ integer, intent(in) :: rank
+ integer, intent(in) :: sz
+ integer, dimension(NDIMS), intent(out) :: dims
+ integer, dimension(NDIMS), intent(out) :: start
+ integer, dimension(NDIMS), intent(out) :: count
+ logical, intent(in) :: force_rearrange
+
+ logical :: is_even_rank
+ integer :: num_odd_procs, num_even_procs
+ integer :: iodd, ieven
+
+ is_even_rank = .false.
+ if (mod(rank, 2) == 0) then
+ is_even_rank = .true.
+ end if
+ num_odd_procs = sz / 2
+ num_even_procs = sz - num_odd_procs
+ dims(1) = VEC_COL_SZ
+ dims(2) = num_even_procs * VEC_ROW_SZ + num_odd_procs * (VEC_ROW_SZ + 1)
+ ! Number of odd and even procs before this rank
+ iodd = rank / 2
+ ieven = (rank + 1) / 2
+
+ ! Rows
+ start(1) = 1
+ count(1) = VEC_COL_SZ
+
+ ! Columns
+ if(force_rearrange) then
+ ! Make sure that we force rearrangement
+ if (is_even_rank) then
+ if(rank + 1 < sz) then
+ ! Force rearrangement
+ count(2) = VEC_ROW_SZ + 1
+ start(2) = ieven * VEC_ROW_SZ + iodd * (VEC_ROW_SZ + 1) + (VEC_ROW_SZ) + 1
+ else
+ count(2) = VEC_ROW_SZ
+ start(2) = ieven * VEC_ROW_SZ + iodd * (VEC_ROW_SZ + 1) + 1
+ end if
+ else
+ ! For all odd procs there is an even lower ranked, rank-1, proc
+ ! So force rearrangement
+ count(2) = VEC_ROW_SZ
+ start(2) = ieven * VEC_ROW_SZ + iodd * (VEC_ROW_SZ + 1) - (VEC_ROW_SZ) + 1
+ end if
+ else
+ if (is_even_rank) then
+ count(2) = VEC_ROW_SZ
+ else
+ count(2) = VEC_ROW_SZ + 1
+ end if
+ start(2) = ieven * VEC_ROW_SZ + iodd * (VEC_ROW_SZ + 1) + 1
+ end if
+
+END SUBROUTINE
+
+! Get a 2D row decomposition
+! If force_rearrange is FALSE, the decomposition is such that
+! # All even procs have VEC_COL_SZ rows of VEC_ROW_SZ elements
+! # All odd procs have VEC_COL_SZ+1 rows of VEC_ROW_SZ elements
+! e.g. For VEC_ROW_SZ = 6, VEC_COL_SZ = 1 and ranks 0, 1, 2,
+! e.g. 1) |(1,1) (1,2) (1,3) (1,4) (1,5) (1,6)|,
+! |(2,1) (2,2) (2,3) (2,4) (2,5) (2,6)|
+! |(3,1) (3,2) (3,3) (3,4) (3,5) (3,6)|,
+! |(4,1) (4,2) (4,3) (4,4) (4,5) (4,6)|
+! If force_rearrange is TRUE, the decomposition is such that,
+! If possible, the even rank "exchanges" elements (rows) with the next
+! higher ranked odd proc.
+! This for example can be used to force rearrangement when reading
+! or writing data.
+! e.g. For VEC_ROW_SZ = 6, VEC_COL_SZ = 1 and ranks 0, 1, 2
+! e.g. 1) |(2,1) (2,2) (2,3) (2,4) (2,5) (2,6)|
+! |(3,1) (3,2) (3,3) (3,4) (3,5) (3,6)|,
+! |(1,1) (1,2) (1,3) (1,4) (1,5) (1,6)|,
+! |(4,1) (4,2) (4,3) (4,4) (4,5) (4,6)|
+SUBROUTINE get_2d_row_decomp_info(rank, sz, dims, start, count, force_rearrange)
+ integer, parameter :: VEC_COL_SZ = 7
+ integer, parameter :: VEC_ROW_SZ = 7
+ integer, parameter :: NDIMS = 2
+ integer, intent(in) :: rank
+ integer, intent(in) :: sz
+ integer, dimension(NDIMS), intent(out) :: dims
+ integer, dimension(NDIMS), intent(out) :: start
+ integer, dimension(NDIMS), intent(out) :: count
+ logical, intent(in) :: force_rearrange
+
+ logical :: is_even_rank
+ integer :: num_odd_procs, num_even_procs
+ integer :: iodd, ieven
+
+ is_even_rank = .false.
+ if (mod(rank, 2) == 0) then
+ is_even_rank = .true.
+ end if
+ num_odd_procs = sz / 2
+ num_even_procs = sz - num_odd_procs
+ dims(1) = num_even_procs * VEC_COL_SZ + num_odd_procs * (VEC_COL_SZ + 1)
+ dims(2) = VEC_ROW_SZ
+ ! Number of odd and even procs before this rank
+ iodd = rank / 2
+ ieven = (rank + 1) / 2
+
+ ! Rows
+ if(force_rearrange) then
+ ! Make sure that we force rearrangement
+ if (is_even_rank) then
+ if(rank + 1 < sz) then
+ ! Force rearrangement
+ count(1) = VEC_COL_SZ + 1
+ start(1) = ieven * VEC_COL_SZ + iodd * (VEC_COL_SZ + 1) + (VEC_COL_SZ) + 1
+ else
+ count(1) = VEC_COL_SZ
+ start(1) = ieven * VEC_COL_SZ + iodd * (VEC_COL_SZ + 1) + 1
+ end if
+ else
+ ! For all odd procs there is an even lower ranked, rank-1, proc
+ ! So force rearrangement
+ count(1) = VEC_COL_SZ
+ start(1) = ieven * VEC_COL_SZ + iodd * (VEC_COL_SZ + 1) - (VEC_COL_SZ) + 1
+ end if
+ else
+ if (is_even_rank) then
+ count(1) = VEC_COL_SZ
+ else
+ count(1) = VEC_COL_SZ + 1
+ end if
+ start(1) = ieven * VEC_COL_SZ + iodd * (VEC_COL_SZ + 1) + 1
+ end if
+
+ ! Columns
+ start(2) = 1
+ count(2) = VEC_ROW_SZ
+
+END SUBROUTINE
+
+! Write with one decomp (to force rearrangement) and read with another (no
+! rearrangement)
+PIO_TF_TEMPLATE
+PIO_TF_AUTO_TEST_SUB_BEGIN nc_write_read_2d_col_decomp_with_halo
+ implicit none
+ type(var_desc_t) :: pio_var
+ type(file_desc_t) :: pio_file
+ integer, parameter :: NDIMS = 2
+ character(len=PIO_TF_MAX_STR_LEN) :: filename
+ type(io_desc_t) :: wr_iodesc, rd_iodesc
+ integer, dimension(:), allocatable :: compdof
+ integer, dimension(NDIMS) :: start, count
+ PIO_TF_FC_DATA_TYPE, dimension(:,:), allocatable :: rbuf, wbuf, exp_val
+ integer, dimension(NDIMS) :: dims
+ integer, dimension(NDIMS) :: pio_dims
+ integer :: i, j, tmp_idx, ierr, nrows, ncols
+ ! iotypes = valid io types
+ integer, dimension(:), allocatable :: iotypes
+ character(len=PIO_TF_MAX_STR_LEN), dimension(:), allocatable :: iotype_descs
+ integer :: num_iotypes
+
+ ! Set the decomposition for writing data - forcing rearrangement
+ call get_2d_col_decomp_info(pio_tf_world_rank_, pio_tf_world_sz_, dims, start, count, .true.)
+ nrows = count(1)
+ ncols = count(2)
+
+ ! Allocate with space for a 1 row /column halo
+
+ allocate(wbuf(nrows+2, ncols+2))
+ allocate(compdof((nrows+2) * (ncols+2)))
+ compdof = 0
+ do j=1,ncols
+ do i=1,nrows
+ wbuf(i+1,j+1) = (start(2) - 1 + j - 1) * nrows + i
+ tmp_idx = j * (nrows+2) + i+1
+ compdof(tmp_idx) = int(wbuf(i+1,j+1))
+ end do
+ end do
+
+ call PIO_initdecomp(pio_tf_iosystem_, PIO_TF_DATA_TYPE, dims, compdof, wr_iodesc)
+ deallocate(compdof)
+
+ ! Set the decomposition for reading data - different from the write decomp
+ call get_2d_col_decomp_info(pio_tf_world_rank_, pio_tf_world_sz_, dims, start, count, .false.)
+ nrows = count(1)
+ ncols = count(2)
+
+ allocate(rbuf(nrows+2, ncols+2))
+ allocate(compdof((nrows+2) * (ncols+2)))
+ allocate(exp_val(nrows+2, ncols+2))
+ compdof = 0
+ do j=1,ncols
+ do i=1,nrows
+ tmp_idx = j * (nrows+2) + i+1
+ compdof(tmp_idx) = (start(2) - 1 + j - 1) * nrows + i
+ ! Expected value, after reading, is the same as the compdof
+ exp_val(i+1,j+1) = compdof(tmp_idx)
+ end do
+ end do
+
+ call PIO_initdecomp(pio_tf_iosystem_, PIO_TF_DATA_TYPE, dims, compdof, rd_iodesc)
+ deallocate(compdof)
+
+ num_iotypes = 0
+ call PIO_TF_Get_nc_iotypes(iotypes, iotype_descs, num_iotypes)
+ filename = "test_pio_decomp_simple_tests.testfile"
+ do i=1,num_iotypes
+ PIO_TF_LOG(0,*) "Testing : PIO_TF_DATA_TYPE : ", iotype_descs(i)
+ ierr = PIO_createfile(pio_tf_iosystem_, pio_file, iotypes(i), filename, PIO_CLOBBER)
+ PIO_TF_CHECK_ERR(ierr, "Could not create file " // trim(filename))
+
+ ierr = PIO_def_dim(pio_file, 'PIO_TF_test_dim_row', dims(1), pio_dims(1))
+ PIO_TF_CHECK_ERR(ierr, "Failed to define a dim : " // trim(filename))
+
+ ierr = PIO_def_dim(pio_file, 'PIO_TF_test_dim_col', dims(2), pio_dims(2))
+ PIO_TF_CHECK_ERR(ierr, "Failed to define a dim : " // trim(filename))
+
+ ierr = PIO_def_var(pio_file, 'PIO_TF_test_var', PIO_TF_DATA_TYPE, pio_dims, pio_var)
+ PIO_TF_CHECK_ERR(ierr, "Failed to define a var : " // trim(filename))
+
+ ierr = PIO_enddef(pio_file)
+ PIO_TF_CHECK_ERR(ierr, "Failed to end redef mode : " // trim(filename))
+
+ ! Write the variable out
+ call PIO_write_darray(pio_file, pio_var, wr_iodesc, wbuf, ierr)
+ PIO_TF_CHECK_ERR(ierr, "Failed to write darray : " // trim(filename))
+
+ call PIO_syncfile(pio_file)
+
+ call PIO_read_darray(pio_file, pio_var, rd_iodesc, rbuf, ierr)
+ PIO_TF_CHECK_ERR(ierr, "Failed to read darray : " // trim(filename))
+
+ PIO_TF_CHECK_VAL((rbuf, exp_val), "Got wrong val")
+
+ call PIO_closefile(pio_file)
+
+ call PIO_deletefile(pio_tf_iosystem_, filename);
+ end do
+
+ if(allocated(iotypes)) then
+ deallocate(iotypes)
+ deallocate(iotype_descs)
+ end if
+
+ call PIO_freedecomp(pio_tf_iosystem_, rd_iodesc)
+ call PIO_freedecomp(pio_tf_iosystem_, wr_iodesc)
+ deallocate(exp_val)
+ deallocate(rbuf)
+ deallocate(wbuf)
+PIO_TF_AUTO_TEST_SUB_END nc_write_read_2d_col_decomp_with_halo
diff --git a/tests/general/pio_iosystem_async_tests.F90.in b/tests/general/pio_iosystem_async_tests.F90.in
new file mode 100644
index 00000000000..117ee1de53c
--- /dev/null
+++ b/tests/general/pio_iosystem_async_tests.F90.in
@@ -0,0 +1,257 @@
+#include "config.h"
+! Split comm world into three comms two comp_comms and an io_comm
+SUBROUTINE split_world_odd_even_io(world, all_comp_comm, comp_comm, io_comm, ierr)
+ use mpi
+ use pio_tutil
+ implicit none
+ integer, intent(in) :: world
+ integer, intent(out) :: comp_comm(2)
+ integer, intent(out) :: io_comm
+ integer, intent(out) :: all_comp_comm
+ integer, intent(out) :: ierr
+
+ integer :: key
+ integer :: color
+ integer :: new_comm
+ integer :: world_size
+ integer :: world_rank
+
+ comp_comm(:) = MPI_COMM_NULL
+ io_comm = MPI_COMM_NULL
+ new_comm = MPI_COMM_NULL
+ all_comp_comm = MPI_COMM_NULL
+ call MPI_comm_size(world, world_size, ierr)
+ call MPI_Comm_rank(world, world_rank, ierr)
+ if(world_size < 3) then
+ print *,'This test requires 3 or more ranks ',world_size
+ ierr = -1
+ return
+ endif
+
+ if(world_rank == world_size - 1) then
+ key = 0
+ color = 0
+ else
+ color = 1
+ key = world_rank
+ end if
+
+ call MPI_Comm_split(world, color, key, new_comm, ierr)
+
+ if (color > 0) then
+ all_comp_comm = new_comm
+ key = (world_rank/2)
+ if(mod(world_rank,2)==0) then
+ color = 0
+ else
+ color = 1
+ endif
+ call MPI_Comm_split(all_comp_comm, color, key, new_comm, ierr)
+ if(color == 0) then
+ comp_comm(1) = new_comm
+ else
+ comp_comm(2) = new_comm
+ endif
+ else
+ io_comm = new_comm
+ endif
+
+
+
+END SUBROUTINE split_world_odd_even_io
+
+! Create a file with a global attribute (filename)
+SUBROUTINE create_file(comm, iosys, iotype, fname, attname, dimname, ret)
+ use pio_tutil
+ implicit none
+
+ integer, intent(in) :: comm
+ type(iosystem_desc_t), intent(inout) :: iosys
+ integer, intent(in) :: iotype
+ character(len=*), intent(in) :: fname
+ character(len=*), intent(in) :: attname
+ character(len=*), intent(in) :: dimname
+ integer, intent(inout) :: ret
+
+ type(file_desc_t) :: pio_file
+ integer :: pio_dim
+ type(var_desc_t) :: pio_var
+! ret = PIO_set_log_level(iosys, 3)
+ ret = PIO_createfile(iosys, pio_file, iotype, fname, PIO_CLOBBER)
+ PIO_TF_CHECK_ERR(ret, comm, "Failed to create dummy file :" // trim(fname))
+! print *,__FILE__,__LINE__,'create file'
+ ret = PIO_def_dim(pio_file, dimname, PIO_TF_MAX_STR_LEN, pio_dim)
+ PIO_TF_CHECK_ERR(ret, comm, "Failed to define dim "// trim(dimname) // "in file :" // trim(fname))
+! print *,__FILE__,__LINE__,'def_dim'
+ ret = PIO_def_var(pio_file, attname, PIO_char, (/pio_dim/), pio_var)
+ PIO_TF_CHECK_ERR(ret, comm, "Failed to define var " // trim(attname) // " in file :" // trim(fname))
+! print *,__FILE__,__LINE__,'def_var ',trim(fname)
+ ret = PIO_put_att(pio_file, pio_var, attname, fname)
+ PIO_TF_CHECK_ERR(ret, comm, "Failed to put att " // trim(attname) // " in file :" // trim(fname))
+! print *,__FILE__,__LINE__,'put_att'
+ ret = PIO_enddef(pio_file)
+ PIO_TF_CHECK_ERR(ret, comm, "Failed in enddef " // trim(attname) // " in file :" // trim(fname))
+
+ call PIO_closefile(pio_file)
+! print *,__FILE__,__LINE__,'closefile'
+END SUBROUTINE create_file
+
+! Check the contents of file : Check the
+! global attribute 'filename' (should be equal to the
+! name of the file, fname)
+SUBROUTINE check_file(comm, iosys, pio_file, fname, attname, dimname, ret)
+ use pio_tutil
+ implicit none
+
+ integer, intent(in) :: comm
+ type(iosystem_desc_t), intent(in) :: iosys
+ type(file_desc_t), intent(inout) :: pio_file
+ character(len=*), intent(in) :: fname
+ character(len=*), intent(in) :: attname
+ character(len=*), intent(in) :: dimname
+ integer, intent(inout) :: ret
+ integer, parameter :: PIO_ENOTATT=-43
+ integer :: pio_dim, old_eh
+ type(var_desc_t) :: pio_var
+ character(len=PIO_TF_MAX_STR_LEN) :: val, errstr
+ integer :: ival
+
+ ret = PIO_inq_dimid(pio_file, dimname, pio_dim)
+ PIO_TF_CHECK_ERR(ret, comm, "Failed to find dim "// trim(dimname) // "in file :" // trim(fname))
+
+ ret = PIO_inq_varid(pio_file, attname, pio_var)
+ PIO_TF_CHECK_ERR(ret, comm, "Failed to find var " // trim(attname) // " in file :" // trim(fname))
+
+ ret = PIO_get_att(pio_file, pio_var, attname, val)
+ PIO_TF_CHECK_ERR(ret, comm, "Failed to get att " // trim(attname) // " in file :" // trim(fname))
+
+ PRINT *, "val = ", trim(val), ", fname =", trim(fname)
+ PIO_TF_PASSERT(val .eq. fname, comm, "Attribute value is not the expected value")
+
+ call PIO_SetErrorHandling(pio_file, PIO_BCAST_ERROR, old_eh)
+! ret = pio_set_log_level(iosys, 3)
+ ret = PIO_get_att(pio_file, pio_var, "wrongname", ival)
+ write(errstr, *) "Got wrong error ",ret," on getatt in file:", trim(fname)
+ PIO_TF_PASSERT(ret==PIO_ENOTATT, comm, errstr)
+ call PIO_SetErrorHandling(pio_file, old_eh)
+ ret = PIO_NOERR
+
+END SUBROUTINE check_file
+
+! Open and check the contents of file : open it and check the
+! global attribute 'filename' (should be equal to the
+! name of the file, fname)
+SUBROUTINE open_and_check_file(comm, iosys, iotype, pio_file, fname, &
+ attname, dimname, disable_fclose, ret)
+ use pio_tutil
+ implicit none
+
+ integer, intent(in) :: comm
+ type(iosystem_desc_t), intent(inout) :: iosys
+ integer, intent(in) :: iotype
+ type(file_desc_t), intent(inout) :: pio_file
+ character(len=*), intent(in) :: fname
+ character(len=*), intent(in) :: attname
+ character(len=*), intent(in) :: dimname
+ logical, intent(in) :: disable_fclose
+ integer, intent(inout) :: ret
+
+ ret = PIO_openfile(iosys, pio_file, iotype, fname, PIO_write)
+ PIO_TF_CHECK_ERR(ret, comm, "Failed to open:" // fname)
+
+ call check_file(comm, iosys, pio_file, fname, attname, dimname, ret)
+ PIO_TF_CHECK_ERR(ret, comm, "Checking contents of file failed:" // fname)
+
+ if(.not. disable_fclose) then
+ call PIO_closefile(pio_file)
+ end if
+END SUBROUTINE open_and_check_file
+
+! Create a file with one iosystem - with all procs, and open/read with
+! another iosystem - subset (odd/even) of procs
+PIO_TF_AUTO_TEST_SUB_BEGIN two_comps_odd_even_async
+ use mpi
+ implicit none
+
+ character(len=PIO_TF_MAX_STR_LEN), target :: fname1 = "pio_iosys_async_test_file1.nc"
+ character(len=PIO_TF_MAX_STR_LEN), target :: fname2 = "pio_iosys_async_test_file2.nc"
+ character(len=PIO_TF_MAX_STR_LEN), parameter :: attname = "filename"
+ character(len=PIO_TF_MAX_STR_LEN), parameter :: dimname = "filename_dim"
+ character(len=PIO_TF_MAX_STR_LEN), pointer :: fname
+ integer, dimension(:), allocatable :: iotypes
+ character(len=PIO_TF_MAX_STR_LEN), dimension(:), allocatable :: iotype_descs
+ integer :: i, num_iotypes = 0
+ type(file_desc_t) :: pio_file
+
+ type(iosystem_desc_t) :: iosys(2)
+
+ logical :: is_even
+ integer :: comp_comm(2), io_comm
+ integer :: all_comp_comm ! comm common to all components
+ integer :: ret
+
+ ! Split world to odd even and io procs
+ call split_world_odd_even_io(pio_tf_comm_, all_comp_comm, comp_comm, io_comm, ret)
+ call PIO_init(iosys, pio_tf_comm_, comp_comm, io_comm, PIO_REARR_BOX)
+ if(io_comm == MPI_COMM_NULL) then
+ if(comp_comm(1) /= MPI_COMM_NULL) then
+ call PIO_seterrorhandling(iosys(1), PIO_BCAST_ERROR)
+ else
+ call PIO_seterrorhandling(iosys(2), PIO_BCAST_ERROR)
+ endif
+ ! Open two different files and close it with two different iosystems
+ call PIO_TF_Get_nc_iotypes(iotypes, iotype_descs, num_iotypes)
+
+ do i=1,num_iotypes
+ PIO_TF_LOG(0,*) "Testing : ", iotype_descs(i)
+ ! Create two files to be opened later
+ if(comp_comm(1) /= MPI_COMM_NULL) then
+ is_even = .false.
+! print *,__FILE__,__LINE__,'create_file', is_even
+ call create_file(comp_comm(1), iosys(1), iotypes(i), &
+ fname1, attname, dimname, ret)
+ PIO_TF_CHECK_ERR(ret, comp_comm(1), "Failed to create file :" // fname1)
+ else
+ is_even = .true.
+! print *,__FILE__,__LINE__,'create_file', is_even
+ call create_file(comp_comm(2), iosys(2), iotypes(i), &
+ fname2, attname, dimname, ret)
+ PIO_TF_CHECK_ERR(ret, comp_comm(2), "Failed to create file :" // fname2)
+ endif
+
+! print *,__FILE__,__LINE__,'at barrier', is_even
+ call mpi_barrier(all_comp_comm, ret)
+
+ ! Open file1 from odd processes and file2 from even processes
+ if(is_even) then
+ call open_and_check_file(comp_comm(2), iosys(2), iotypes(i), &
+ pio_file, fname1, attname, dimname, .false., ret)
+ PIO_TF_CHECK_ERR(ret, comp_comm(2), "Checking contents of file failed :" // fname1)
+ call pio_deletefile(iosys(2), fname1)
+ else
+ call open_and_check_file(comp_comm(1), iosys(1), iotypes(i), &
+ pio_file, fname2, attname, dimname, .false., ret)
+ PIO_TF_CHECK_ERR(ret, comp_comm(1), "Checking contents of file failed :" // fname2)
+ call pio_deletefile(iosys(1), fname2)
+ end if
+ end do
+ if (is_even) then
+ call PIO_finalize(iosys(2), ret)
+ else
+ call PIO_finalize(iosys(1), ret)
+ endif
+ endif
+ if(comp_comm(1) /= MPI_COMM_NULL) then
+ call MPI_Comm_free(comp_comm(1), ret)
+ endif
+ if(comp_comm(2) /= MPI_COMM_NULL) then
+ call MPI_Comm_free(comp_comm(2), ret)
+ endif
+ if(io_comm /= MPI_COMM_NULL) then
+ call MPI_Comm_free(io_comm, ret)
+ endif
+ if(allocated(iotypes)) then
+ deallocate(iotypes)
+ deallocate(iotype_descs)
+ end if
+PIO_TF_AUTO_TEST_SUB_END two_comps_odd_even_async
diff --git a/tests/general/run_tests.sh.in b/tests/general/run_tests.sh.in
index b847f84acc8..4a44ddd7ce6 100755
--- a/tests/general/run_tests.sh.in
+++ b/tests/general/run_tests.sh.in
@@ -15,7 +15,7 @@ PIO_TESTS='pio_init_finalize pio_file_simple_tests pio_file_fail '\
'pio_decomp_tests pio_decomp_tests_1d '\
'pio_decomp_tests_2d pio_decomp_tests_3d pio_decomp_frame_tests '\
'pio_decomp_fillval pio_iosystem_tests pio_iosystem_tests2 '\
-'pio_iosystem_tests3'
+'pio_iosystem_tests3 pio_iosystem_async_tests'
# pio_rearr_opts pio_rearr_opts2
success1=true
diff --git a/tests/general/util/pio_tf_f90gen.pl b/tests/general/util/pio_tf_f90gen.pl
index 36c6ef7bb55..696f50aaae4 100755
--- a/tests/general/util/pio_tf_f90gen.pl
+++ b/tests/general/util/pio_tf_f90gen.pl
@@ -602,21 +602,32 @@ sub update_auto_func_list_with_gen_templ
# Returns the default test main code
sub get_default_test_main
{
+ my($test_type) = @_;
my($out_line);
$out_line = "\n\n";
$out_line = $out_line . " PROGRAM PIO_TF_Test_main_\n";
$out_line = $out_line . " USE pio_tutil\n";
$out_line = $out_line . " IMPLICIT NONE\n";
- $out_line = $out_line . " INTEGER, PARAMETER :: NREARRS = 2\n";
- $out_line = $out_line . " INTEGER :: rearrs(NREARRS) = (/pio_rearr_subset,pio_rearr_box/)\n";
- $out_line = $out_line . " CHARACTER(LEN=PIO_TF_MAX_STR_LEN) :: rearrs_info(NREARRS) = (/\"PIO_REARR_SUBSET\",\"PIO_REARR_BOX \"/)\n";
+ if($test_type eq "sync"){
+ $out_line = $out_line . " INTEGER, PARAMETER :: NREARRS = 2\n";
+ $out_line = $out_line . " INTEGER :: rearrs(NREARRS) = (/pio_rearr_subset,pio_rearr_box/)\n";
+ $out_line = $out_line . " CHARACTER(LEN=PIO_TF_MAX_STR_LEN) :: rearrs_info(NREARRS) = (/\"PIO_REARR_SUBSET\",\"PIO_REARR_BOX \"/)\n";
+ }else{
+ $out_line = $out_line . " INTEGER, PARAMETER :: NREARRS = 1\n";
+ $out_line = $out_line . " INTEGER :: rearrs(NREARRS) = (/pio_rearr_box/)\n";
+ $out_line = $out_line . " CHARACTER(LEN=PIO_TF_MAX_STR_LEN) :: rearrs_info(NREARRS) = (/\"PIO_REARR_BOX \"/)\n";
+ }
$out_line = $out_line . " INTEGER i, ierr\n";
$out_line = $out_line . "\n";
$out_line = $out_line . " pio_tf_nerrs_total_=0\n";
$out_line = $out_line . " pio_tf_retval_utest_=0\n";
$out_line = $out_line . " CALL MPI_Init(ierr)\n";
$out_line = $out_line . " DO i=1,SIZE(rearrs)\n";
- $out_line = $out_line . " CALL PIO_TF_Init_(rearrs(i))\n";
+ if($test_type eq "async"){
+ $out_line = $out_line . " CALL PIO_TF_Init_async_(rearrs(i))\n";
+ }else{
+ $out_line = $out_line . " CALL PIO_TF_Init_(rearrs(i))\n";
+ }
$out_line = $out_line . " IF (pio_tf_world_rank_ == 0) THEN\n";
$out_line = $out_line . " WRITE(*,*) \"PIO_TF: Testing : \", trim(rearrs_info(i))\n";
$out_line = $out_line . " END IF\n";
@@ -704,20 +715,20 @@ sub get_header
}
# The footer always contains the default test main code
-# The footer can contain the default test driver code is none is specified
+# The footer can contain the default test driver code if none is specified
# - The default test driver code will contain all the auto test subs
# If a test driver code is specified the list of auto test funcs has already
# been appended the driver
sub get_footer
{
- my($ref_auto_funcs_list) = @_;
- my($out_line);
+ my($test_type, $ref_auto_funcs_list) = @_;
+ my($out_line) = "";
if($template_has_test_driver == 0){
# Add default test driver
$out_line = &get_default_test_driver($ref_auto_funcs_list);
}
+ $out_line = $out_line . &get_default_test_main($test_type);
- $out_line = $out_line . &get_default_test_main();
return $out_line;
}
@@ -860,8 +871,11 @@ sub process_template_file
$orig_line = "";
$ifline_num += 1;
}
-
- $footer = &get_footer(\@auto_funcs_list);
+ if(index($ifname, "async") >= 0){
+ $footer = &get_footer("async", \@auto_funcs_list);
+ }else{
+ $footer = &get_footer("sync", \@auto_funcs_list);
+ }
print OUTPUT_FILE $footer;
}
diff --git a/tests/general/util/pio_tutil.F90 b/tests/general/util/pio_tutil.F90
index 92fecc0c8ed..71cf930a5a8 100644
--- a/tests/general/util/pio_tutil.F90
+++ b/tests/general/util/pio_tutil.F90
@@ -120,6 +120,7 @@ MODULE pio_tutil
END INTERFACE
CONTAINS
+
! Initialize Testing framework - Internal (Not directly used by unit tests)
SUBROUTINE PIO_TF_Init_(rearr)
#ifdef TIMING
@@ -188,6 +189,53 @@ SUBROUTINE PIO_TF_Init_(rearr)
end if
END SUBROUTINE PIO_TF_Init_
+ ! Initialize Testing framework - Internal (Not directly used by unit tests)
+ SUBROUTINE PIO_TF_Init_async_(rearr)
+#ifdef TIMING
+ use perf_mod
+#endif
+#ifndef NO_MPIMOD
+ use mpi
+#else
+ include 'mpif.h'
+#endif
+ INTEGER, INTENT(IN) :: rearr
+ INTEGER ierr
+
+ CALL MPI_COMM_DUP(MPI_COMM_WORLD, pio_tf_comm_, ierr);
+ CALL MPI_COMM_RANK(pio_tf_comm_, pio_tf_world_rank_, ierr)
+ CALL MPI_COMM_SIZE(pio_tf_comm_, pio_tf_world_sz_, ierr)
+#ifdef TIMING
+ call t_initf('gptl.nl')
+#endif
+
+ pio_tf_log_level_ = 0
+ pio_tf_num_aggregators_ = 0
+ pio_tf_num_io_tasks_ = 0
+ pio_tf_stride_ = 1
+ ! Now read input args from rank 0 and bcast it
+ ! Args supported are --num-io-tasks, --num-aggregators,
+ ! --stride
+
+ CALL Read_input()
+ IF (pio_tf_world_sz_ < pio_tf_num_io_tasks_) THEN
+ pio_tf_num_io_tasks_ = pio_tf_world_sz_
+ END IF
+ IF (pio_tf_num_io_tasks_ <= 1 .AND. pio_tf_stride_ > 1) THEN
+ pio_tf_stride_ = 1
+ END IF
+ IF (pio_tf_num_io_tasks_ == 0) THEN
+ pio_tf_num_io_tasks_ = pio_tf_world_sz_ / pio_tf_stride_
+ IF (pio_tf_num_io_tasks_ < 1) pio_tf_num_io_tasks_ = 1
+ END IF
+
+ ! Set PIO logging level
+ ierr = PIO_set_log_level(pio_tf_log_level_)
+ if(ierr /= PIO_NOERR) then
+ PRINT *, "PIO_TF: Error setting PIO logging level"
+ end if
+ END SUBROUTINE PIO_TF_Init_async_
+
! Finalize Testing framework - Internal (Not directly used by unit tests)
SUBROUTINE PIO_TF_Finalize_
#ifdef TIMING
diff --git a/tests/ncint/pio_err_macros.h b/tests/ncint/pio_err_macros.h
index 21b6202f81e..40ef85b4cc6 100644
--- a/tests/ncint/pio_err_macros.h
+++ b/tests/ncint/pio_err_macros.h
@@ -22,6 +22,17 @@
* generally cosists of several sets of tests. */
static int total_err = 0, err = 0;
+/* This macro prints an error message with line number and name of
+ * test program, also a netCDF error message. */
+#define NCPERR(e) do { \
+ fflush(stdout); /* Make sure our stdout is synced with stderr. */ \
+ err++; \
+ fprintf(stderr, "Sorry! Unexpected result, %s, line: %d msg: %s\n", \
+ __FILE__, __LINE__, nc_strerror(e)); \
+ fflush(stderr); \
+ return 2; \
+ } while (0)
+
/* This macro prints an error message with line number and name of
* test program. */
#define PERR do { \
@@ -61,6 +72,8 @@ static int total_err = 0, err = 0;
return 0; \
} while (0)
-#define ERR_WRONG 99
+/* This is also defined in tests/cunit/pio_tests.h. It will reduce
+ * confusion to use the same value. */
+#define ERR_WRONG 1112
#endif /* _PIO_ERR_MACROS_H */
diff --git a/tests/ncint/tst_var_compress.c b/tests/ncint/tst_var_compress.c
index e7fc492a029..8e3060a630c 100644
--- a/tests/ncint/tst_var_compress.c
+++ b/tests/ncint/tst_var_compress.c
@@ -69,18 +69,21 @@ run_var_compress_test(int my_rank, int ntasks, int iosysid)
if (nc_close(ncid)) PERR;
{
- int shuffle_in, deflate_in, deflate_level_in, storage_in;
+ /* int shuffle_in, deflate_in, deflate_level_in; */
+ int storage_in;
int *data_in;
size_t chunksizes_in[NDIM3];
int endian_in;
int d;
+ /* int ret; */
/* Open the file. */
if (nc_open(FILE_NAME, NC_PIO, &ncid)) PERR;
/* Check the variable deflate. */
- if (nc_inq_var_deflate(ncid, 0, &shuffle_in, &deflate_in, &deflate_level_in)) PERR;
- printf("%d %d %d\n", shuffle_in, deflate_in, deflate_level_in);
+ /* if ((ret = nc_inq_var_deflate(ncid, 0, &shuffle_in, &deflate_in, &deflate_level_in))) */
+ /* NCPERR(ret); */
+ /* printf("%d %d %d\n", shuffle_in, deflate_in, deflate_level_in); */
/* if (shuffle_in || !deflate_in || deflate_level_in != DEFLATE_LEVEL) PERR; */
/* Check the chunking. */
diff --git a/tests/performance/pioperformance.F90 b/tests/performance/pioperformance.F90
index 8a5e2e732b0..18e9bdce794 100644
--- a/tests/performance/pioperformance.F90
+++ b/tests/performance/pioperformance.F90
@@ -28,8 +28,9 @@ program pioperformance
integer :: nv, nframes, nvars(max_nvars)
integer :: vs, varsize(max_nvars) ! Local size of array for idealized decomps
logical :: unlimdimindof
+ integer :: log_level
namelist /pioperf/ decompfile, pio_typenames, rearrangers, niotasks, nframes, &
- nvars, varsize, unlimdimindof
+ nvars, varsize, unlimdimindof, log_level
#ifdef BGQTRY
external :: print_memusage
#endif
@@ -65,6 +66,7 @@ program pioperformance
varsize = 0
varsize(1) = 1
unlimdimindof=.false.
+ log_level = -1
if(mype==0) then
open(unit=12,file='pioperf.nl',status='old')
read(12,pioperf)
@@ -94,6 +96,7 @@ program pioperformance
call MPI_Bcast(unlimdimindof, 1, MPI_INTEGER, 0, MPI_COMM_WORLD,ierr)
call MPI_Bcast(nvars, max_nvars, MPI_INTEGER, 0, MPI_COMM_WORLD,ierr)
call MPI_Bcast(varsize, max_nvars, MPI_INTEGER, 0, MPI_COMM_WORLD,ierr)
+ call MPI_Bcast(log_level, 1, MPI_INTEGER, 0, MPI_COMM_WORLD,ierr)
call t_initf('pioperf.nl', LogPrint=.false., mpicom=MPI_COMM_WORLD, MasterTask=MasterTask)
niotypes = 0
@@ -104,7 +107,7 @@ program pioperformance
rearrangers(1)=1
rearrangers(2)=2
endif
- i = pio_set_log_level(-1)
+ i = pio_set_log_level(log_level)
do i=1,max_decomp_files
if(len_trim(decompfile(i))==0) exit
if(mype == 0) print *, ' Testing decomp: ',trim(decompfile(i))
@@ -222,7 +225,7 @@ subroutine pioperformancetest(filename, piotypes, mype, npe_base, &
dfld = PIO_FILL_DOUBLE
do nv=1,nvars
do j=1,maplen
- if(compmap(j) > 0) then
+ if(compmap(j) > 0) then
ifld(j,nv) = compmap(j)
dfld(j,nv) = ifld(j,nv)/1000000.0
rfld(j,nv) = 1.0E5*ifld(j,nv)
@@ -238,13 +241,13 @@ subroutine pioperformancetest(filename, piotypes, mype, npe_base, &
iotype = piotypes(k)
call MPI_Barrier(comm,ierr)
if(mype==0) then
- print *,'iotype=',piotypes(k)
+ print *,'iotype=',piotypes(k), ' of ',size(piotypes)
endif
-! if(iotype==PIO_IOTYPE_PNETCDF) then
+ if(iotype==PIO_IOTYPE_PNETCDF) then
mode = PIO_64BIT_DATA
-! else
-! mode = 0
-! endif
+ else
+ mode = 0
+ endif
do rearrtype=1,2
rearr = rearrangers(rearrtype)
if(rearr /= PIO_REARR_SUBSET .and. rearr /= PIO_REARR_BOX) exit
@@ -335,7 +338,7 @@ subroutine pioperformancetest(filename, piotypes, mype, npe_base, &
call MPI_Reduce(wall(1), wall(2), 1, MPI_DOUBLE_PRECISION, MPI_MAX, 0, comm, ierr)
if(mype==0) then
! print out performance in MB/s
- nvarmult = 0
+ nvarmult = 0
#ifdef VARINT
nvarmult = nvarmult+1
#endif
@@ -415,7 +418,7 @@ subroutine pioperformancetest(filename, piotypes, mype, npe_base, &
#ifdef VARINT
#ifdef DEBUG
write(*,'(a11,i2,a9,i11,a9,i11,a9,i2)') &
- ' Int PE=',mype,'ifld=',ifld(j,nv),' ifld_in=',ifld_in(j,nv,frame),' compmap=',compmap(j)
+ ' Int PE=',mype,'ifld=',ifld(j,nv),' ifld_in=',ifld_in(j,nv,frame),' compmap=',compmap(j)
#endif
if(ifld(j,nv) /= ifld_in(j,nv,frame)) then
!if(errorcnt < 10) then
@@ -423,7 +426,7 @@ subroutine pioperformancetest(filename, piotypes, mype, npe_base, &
!endif
write(*,*) '***ERROR:Mismatch!***'
write(*,'(a11,i2,a9,i11,a9,i11,a9,i2)') &
- ' Int PE=',mype,'ifld=',ifld(j,nv),' ifld_in=',ifld_in(j,nv,frame),' compmap=',compmap(j)
+ ' Int PE=',mype,'ifld=',ifld(j,nv),' ifld_in=',ifld_in(j,nv,frame),' compmap=',compmap(j)
errorcnt = errorcnt+1
endif
@@ -431,7 +434,7 @@ subroutine pioperformancetest(filename, piotypes, mype, npe_base, &
#ifdef VARREAL
#ifdef DEBUG
write(*,'(a11,i2,a9,f11.2,a9,f11.2,a9,i2)') &
- ' Real PE=',mype,'rfld=',rfld(j,nv),' rfld_in=',rfld_in(j,nv,frame),' compmap=',compmap(j)
+ ' Real PE=',mype,'rfld=',rfld(j,nv),' rfld_in=',rfld_in(j,nv,frame),' compmap=',compmap(j)
#endif
if(rfld(j,nv) /= rfld_in(j,nv,frame) ) then
@@ -440,7 +443,7 @@ subroutine pioperformancetest(filename, piotypes, mype, npe_base, &
!endif
write(*,*) '***ERROR:Mismatch!***'
write(*,'(a11,i2,a9,f11.2,a9,f11.2,a9,i2)') &
- ' Real PE=',mype,'rfld=',rfld(j,nv),' rfld_in=',rfld_in(j,nv,frame),' compmap=',compmap(j)
+ ' Real PE=',mype,'rfld=',rfld(j,nv),' rfld_in=',rfld_in(j,nv,frame),' compmap=',compmap(j)
errorcnt = errorcnt+1
endif
@@ -448,7 +451,7 @@ subroutine pioperformancetest(filename, piotypes, mype, npe_base, &
#ifdef VARDOUBLE
#ifdef DEBUG
write(*,'(a11,i2,a9,d11.4,a9,d11.4,a9,i2)') &
- 'Double PE=',mype,'dfld=',dfld(j,nv),'dfld_in=',dfld_in(j,nv,frame),'compmap=',compmap(j)
+ 'Double PE=',mype,'dfld=',dfld(j,nv),'dfld_in=',dfld_in(j,nv,frame),'compmap=',compmap(j)
#endif
if(dfld(j,nv) /= dfld_in(j,nv,frame) ) then
!if(errorcnt < 10) then
@@ -456,7 +459,7 @@ subroutine pioperformancetest(filename, piotypes, mype, npe_base, &
!endif
write(*,*) '***ERROR:Mismatch!***'
write(*,'(a11,i2,a9,d11.4,a9,d11.4,a9,i2)') &
- 'Double PE=',mype,'dfld=',dfld(j,nv),'dfld_in=',dfld_in(j,nv,frame),'compmap=',compmap(j)
+ 'Double PE=',mype,'dfld=',dfld(j,nv),'dfld_in=',dfld_in(j,nv,frame),'compmap=',compmap(j)
errorcnt = errorcnt+1
endif
@@ -472,7 +475,7 @@ subroutine pioperformancetest(filename, piotypes, mype, npe_base, &
if(errorcnt > 0) then
print *,'ERROR: INPUT/OUTPUT data mismatch ',errorcnt
endif
- nvarmult = 0
+ nvarmult = 0
#ifdef VARINT
nvarmult = nvarmult+1
#endif
@@ -484,7 +487,7 @@ subroutine pioperformancetest(filename, piotypes, mype, npe_base, &
#endif
write(*,'(a15,a9,i10,i10,i10,f20.10)') &
'RESULT: read ',rearr_name(rearr), piotypes(k), ntasks, nvars, &
- nvarmult*nvars*nframes*gmaplen*4.0/(1048576.0*wall(2))
+ nvarmult*nvars*nframes*gmaplen*4.0/(1048576.0*wall(2))
#ifdef BGQTRY
call print_memusage()
#endif
diff --git a/tests/unit/basic_tests.F90 b/tests/unit/basic_tests.F90
index b9f72692be4..9289e14f0d0 100644
--- a/tests/unit/basic_tests.F90
+++ b/tests/unit/basic_tests.F90
@@ -52,7 +52,7 @@ Subroutine test_create(test_id, err_msg)
! Error in PIO_createfile
print *,' ret_val = ', ret_val
err_msg = "Could not create " // trim(filename)
- call mpi_abort(MPI_COMM_WORLD, 0, ret_val2)
+ call mpi_abort(MPI_COMM_WORLD, ret_val, ret_val2)
end if
call mpi_barrier(mpi_comm_world,ret_val)
@@ -63,7 +63,7 @@ Subroutine test_create(test_id, err_msg)
! Error in PIO_enddef
err_msg = "Could not end define mode"
call PIO_closefile(pio_file)
- call mpi_abort(MPI_COMM_WORLD, 0, ret_val2)
+ call mpi_abort(MPI_COMM_WORLD, ret_val, ret_val2)
end if
end if
call PIO_closefile(pio_file)
@@ -74,7 +74,7 @@ Subroutine test_create(test_id, err_msg)
if (ret_val .ne. PIO_NOERR) then
! Error in PIO_openfile
err_msg = "Could not open " // trim(filename)
- call mpi_abort(MPI_COMM_WORLD, 0, ret_val2)
+ call mpi_abort(MPI_COMM_WORLD, ret_val, ret_val2)
end if
! Close file
@@ -86,7 +86,7 @@ Subroutine test_create(test_id, err_msg)
if (ret_val .ne. PIO_NOERR) then
! Error in PIO_createfile
err_msg = "Could not clobber " // trim(filename)
- call mpi_abort(MPI_COMM_WORLD, 0, ret_val2)
+ call mpi_abort(MPI_COMM_WORLD, ret_val, ret_val2)
end if
! Leave define mode
@@ -95,7 +95,7 @@ Subroutine test_create(test_id, err_msg)
! Error in PIO_enddef
err_msg = "Could not end define mode in clobbered file"
call PIO_closefile(pio_file)
- call mpi_abort(MPI_COMM_WORLD, 0, ret_val2)
+ call mpi_abort(MPI_COMM_WORLD, ret_val, ret_val2)
end if
! Close file
@@ -114,7 +114,7 @@ Subroutine test_create(test_id, err_msg)
err_msg = "Was able to clobber file despite PIO_NOCLOBBER"
ret_val = PIO_enddef(pio_file)
call PIO_closefile(pio_file)
- call mpi_abort(MPI_COMM_WORLD, 0, ret_val2)
+ call mpi_abort(MPI_COMM_WORLD, ret_val, ret_val2)
end if
end if
@@ -167,7 +167,7 @@ Subroutine test_open(test_id, err_msg)
! Error in PIO_openfile
err_msg = "Successfully opened file that doesn't exist"
call PIO_closefile(pio_file)
- call mpi_abort(MPI_COMM_WORLD, 0, ret_val2)
+ call mpi_abort(MPI_COMM_WORLD, ret_val, ret_val2)
end if
! Open existing file, write data to it (for binary file, need to create new file)
@@ -179,7 +179,7 @@ Subroutine test_open(test_id, err_msg)
if (ret_val .ne. PIO_NOERR) then
! Error in PIO_openfile (or PIO_createfile)
err_msg = "Could not open " // trim(filename) // " in write mode"
- call mpi_abort(MPI_COMM_WORLD, 0, ret_val2)
+ call mpi_abort(MPI_COMM_WORLD, ret_val, ret_val2)
end if
! Enter define mode for netcdf files
@@ -188,7 +188,7 @@ Subroutine test_open(test_id, err_msg)
if (ret_val .ne. PIO_NOERR) then
err_msg = "Could not enter redef mode"
call PIO_closefile(pio_file)
- call mpi_abort(MPI_COMM_WORLD, 0, ret_val2)
+ call mpi_abort(MPI_COMM_WORLD, ret_val, ret_val2)
end if
! Define a new dimension N
@@ -197,7 +197,7 @@ Subroutine test_open(test_id, err_msg)
! Error in PIO_def_dim
err_msg = "Could not define dimension N"
call PIO_closefile(pio_file)
- call mpi_abort(MPI_COMM_WORLD, 0, ret_val2)
+ call mpi_abort(MPI_COMM_WORLD, ret_val, ret_val2)
end if
! Define a new variable foo
@@ -207,7 +207,7 @@ Subroutine test_open(test_id, err_msg)
! Error in PIO_def_var
err_msg = "Could not define variable foo"
call PIO_closefile(pio_file)
- call mpi_abort(MPI_COMM_WORLD, 0, ret_val2)
+ call mpi_abort(MPI_COMM_WORLD, ret_val, ret_val2)
end if
ret_val = PIO_put_att(pio_file, pio_var, '_FillValue', -1)
@@ -215,7 +215,7 @@ Subroutine test_open(test_id, err_msg)
! Error in PIO_def_var
err_msg = "Could not define _FillValue attribute"
call PIO_closefile(pio_file)
- call mpi_abort(MPI_COMM_WORLD, 0, ret_val2)
+ call mpi_abort(MPI_COMM_WORLD, ret_val, ret_val2)
end if
@@ -227,7 +227,7 @@ Subroutine test_open(test_id, err_msg)
print *,__FILE__,__LINE__,ret_val
err_msg = "Could not end define mode"
call PIO_closefile(pio_file)
- call mpi_abort(MPI_COMM_WORLD, 0, ret_val2)
+ call mpi_abort(MPI_COMM_WORLD, ret_val, ret_val2)
end if
end if
@@ -239,7 +239,7 @@ Subroutine test_open(test_id, err_msg)
! Error in PIO_write_darray
err_msg = "Could not write data"
call PIO_closefile(pio_file)
- call mpi_abort(MPI_COMM_WORLD, 0, ret_val2)
+ call mpi_abort(MPI_COMM_WORLD, ret_val, ret_val2)
end if
! Close file
@@ -252,7 +252,7 @@ Subroutine test_open(test_id, err_msg)
if (ret_val .ne. PIO_NOERR) then
! Error opening file
err_msg = "Could not open file in NoWrite mode"
- call mpi_abort(MPI_COMM_WORLD, 0, ret_val2)
+ call mpi_abort(MPI_COMM_WORLD, ret_val, ret_val2)
end if
! Try to write (should fail)
@@ -264,7 +264,7 @@ Subroutine test_open(test_id, err_msg)
! Error in PIO_write_darray
err_msg = "Wrote to file opened in NoWrite mode"
call PIO_closefile(pio_file)
- call mpi_abort(MPI_COMM_WORLD, 0, ret_val2)
+ call mpi_abort(MPI_COMM_WORLD, ret_val, ret_val2)
end if
call mpi_barrier(MPI_COMM_WORLD,ret_val)
@@ -277,13 +277,13 @@ Subroutine test_open(test_id, err_msg)
err_msg = "Error in read_darray"
call PIO_closefile(pio_file)
print *,__FILE__,__LINE__,err_msg
- call mpi_abort(MPI_COMM_WORLD, 0, ret_val2)
+ call mpi_abort(MPI_COMM_WORLD, ret_val, ret_val2)
end if
if(any(data_buffer /= my_rank)) then
err_msg = "Error reading data"
call PIO_closefile(pio_file)
print *,__FILE__,__LINE__,iotype, trim(err_msg), data_buffer
- call mpi_abort(MPI_COMM_WORLD, 0, ret_val2)
+ call mpi_abort(MPI_COMM_WORLD, ret_val, ret_val2)
end if
!ret_val = PIO_set_log_level(3)
@@ -292,7 +292,7 @@ Subroutine test_open(test_id, err_msg)
err_msg = "Error in inq_unlimdim"
call PIO_closefile(pio_file)
print *,__FILE__,__LINE__,iotype, trim(err_msg)
- call mpi_abort(MPI_COMM_WORLD, 0, ret_val2)
+ call mpi_abort(MPI_COMM_WORLD, ret_val, ret_val2)
end if
!ret_val = PIO_set_log_level(0)
@@ -314,7 +314,7 @@ Subroutine test_open(test_id, err_msg)
! Error in PIO_openfile
err_msg = "Opened a non-netcdf file as netcdf"
call PIO_closefile(pio_file)
- call mpi_abort(MPI_COMM_WORLD, 0, ret_val2)
+ call mpi_abort(MPI_COMM_WORLD, ret_val, ret_val2)
end if
end if
diff --git a/tests/unit/driver.F90 b/tests/unit/driver.F90
index 6c3975d9b47..c26a10291b8 100644
--- a/tests/unit/driver.F90
+++ b/tests/unit/driver.F90
@@ -102,7 +102,7 @@ Program pio_unit_test_driver
call MPI_Bcast(stride,1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr)
niotasks = ntasks/stride
- ! Set up PIO
+ ! Set up PIO. Use a base of 1 because task 0 is already busy.
call PIO_init(my_rank, & ! MPI rank
MPI_COMM_WORLD, & ! MPI communicator
niotasks, & ! Number of iotasks (ntasks/stride)
diff --git a/tests/unit/ncdf_tests.F90 b/tests/unit/ncdf_tests.F90
index 765132751e3..b414e26109e 100644
--- a/tests/unit/ncdf_tests.F90
+++ b/tests/unit/ncdf_tests.F90
@@ -106,18 +106,6 @@ Subroutine test_redef(test_id, err_msg)
return
end if
- ! Try to enter define mode again
- if(master_task) write(*,"(6x,A,1x)") "trying to enter define mode in define mode, error expected ... "
- call mpi_barrier(MPI_COMM_WORLD,ret_val)
-
- ret_val = PIO_redef(pio_file)
- if (ret_val .eq. PIO_NOERR) then
- ! Error in PIO_redef
- err_msg = "Entered define mode from define mode"
- call PIO_closefile(pio_file)
- return
- end if
-
! Leave define mode
ret_val = PIO_enddef(pio_file)
if (ret_val .ne. PIO_NOERR) then
@@ -313,7 +301,7 @@ Subroutine test_nc4(test_id, err_msg)
print*, 'PIO_set_chunk_cache'
ret_val = PIO_set_chunk_cache(pio_iosystem%iosysid, iotype, chunk_cache_size, &
chunk_cache_nelems, chunk_cache_preemption)
-
+
! Should not have worked except for netCDF-4/HDF5 iotypes.
if (iotype .eq. PIO_iotype_netcdf4c .and. ret_val .ne. PIO_NOERR) then
err_msg = "Could not set chunk cache"
@@ -339,7 +327,7 @@ Subroutine test_nc4(test_id, err_msg)
chunk_cache_nelems_in, chunk_cache_preemption_in)
print*, 'PIO_get_chunk_cache returned ', chunk_cache_size_in, &
chunk_cache_nelems_in, chunk_cache_preemption_in
-
+
! Should not have worked except for netCDF-4/HDF5 iotypes.
if (iotype .eq. PIO_iotype_netcdf4c .or. iotype .eq. PIO_iotype_netcdf4p) then
if (ret_val .ne. PIO_NOERR) then
@@ -407,7 +395,7 @@ Subroutine test_nc4(test_id, err_msg)
print*, 'PIO_set_var_chunk_cache'
ret_val = PIO_set_var_chunk_cache(pio_file, pio_var, chunk_cache_size, chunk_cache_nelems, &
chunk_cache_preemption)
-
+
! Should not have worked except for netCDF-4/HDF5 iotypes.
if (iotype .eq. PIO_iotype_netcdf4c .and. ret_val .ne. PIO_NOERR) then
err_msg = "Could not set variable chunk cache"
@@ -432,7 +420,7 @@ Subroutine test_nc4(test_id, err_msg)
ret_val = PIO_get_var_chunk_cache(pio_file, pio_var, chunk_cache_size_in, &
chunk_cache_nelems_in, chunk_cache_preemption_in)
print*, 'PIO_get_var_chunk_cache ret_val=', ret_val
-
+
! Should not have worked except for netCDF-4/HDF5 iotypes.
if (iotype .eq. PIO_iotype_netcdf4c .or. iotype .eq. PIO_iotype_netcdf4p) then
if (ret_val .ne. PIO_NOERR) then
@@ -458,7 +446,7 @@ Subroutine test_nc4(test_id, err_msg)
end if
! Try to turn on compression for this variable.
- print*, 'testing PIO_def_var_deflate'
+ print*, 'testing PIO_def_var_deflate'
shuffle = 0
deflate = 1
@@ -627,10 +615,10 @@ Subroutine test_nc4(test_id, err_msg)
call PIO_closefile(pio_file)
! Free decomp
- print*, 'testing PIO_freedecomp'
+ print*, 'testing PIO_freedecomp'
call PIO_freedecomp(pio_iosystem, iodesc_nCells)
call mpi_barrier(MPI_COMM_WORLD,ret_val)
-
+
print*, 'after testing err_msg = ' , err_msg
End Subroutine test_nc4
end module ncdf_tests