diff --git a/src/CPL/LIS_cpl/module_lis_HYDRO.F b/src/CPL/LIS_cpl/module_lis_HYDRO.F index 08a0cd3fe..911e913be 100644 --- a/src/CPL/LIS_cpl/module_lis_HYDRO.F +++ b/src/CPL/LIS_cpl/module_lis_HYDRO.F @@ -52,7 +52,7 @@ subroutine lis_cpl_HYDRO(n) #endif if(nlst(did)%nsoil < 1) then write(6,*) "FATAL ERROR: nsoil is less than 1" - call hydro_stop("In module_lis_HYDRO.F module_lis_HYDRO() - nsoil is less than 1") + call hydro_stop("In module_lis_HYDRO.F module_lis_HYDRO() - nsoil is less than 1") endif allocate(nlst(did)%zsoil8(nlst(did)%nsoil)) nlst(did)%zsoil8(1) = -noah271_struc(n)%lyrthk(1) @@ -65,15 +65,15 @@ subroutine lis_cpl_HYDRO(n) #endif - CALL mpi_initialized( mpi_inited, ierr ) + call MPI_Initialized( mpi_inited, ierr ) if ( .NOT. mpi_inited ) then - call MPI_INIT( ierr ) ! stand alone land model. + call MPI_Init( ierr ) ! stand alone land model. if (ierr /= MPI_SUCCESS) stop "MPI_INIT" - call MPI_COMM_DUP(MPI_COMM_WORLD, HYDRO_COMM_WORLD, ierr) + call MPI_Comm_dup(MPI_COMM_WORLD, HYDRO_COMM_WORLD, ierr) if (ierr /= MPI_SUCCESS) stop "MPI_COMM_DUP" endif - call MPI_COMM_RANK( HYDRO_COMM_WORLD, my_id, ierr ) - call MPI_COMM_SIZE( HYDRO_COMM_WORLD, numprocs, ierr ) + call MPI_Comm_rank( HYDRO_COMM_WORLD, my_id, ierr ) + call MPI_Comm_size( HYDRO_COMM_WORLD, numprocs, ierr ) endif if(nlst(did)%rtFlag .eq. 0) return @@ -184,7 +184,7 @@ subroutine lis_cpl_HYDRO(n) enddo #ifdef HYDRO_D - write(6,*) "NDHMS lis date ", LIS_rc%yr, LIS_rc%mo, LIS_rc%da, LIS_rc%hr, LIS_rc%mn, LIS_rc%ss + write(6,*) "NDHMS lis date ", LIS_rc%yr, LIS_rc%mo, LIS_rc%da, LIS_rc%hr, LIS_rc%mn, LIS_rc%ss #endif ! write(11,*) "RT_DOMAIN(did)%stc",RT_DOMAIN(did)%stc(:,:,1) ! write(12,*) "noah271_struc(n)%noah%stc(1)",noah271_struc(n)%noah%stc(1) diff --git a/src/CPL/WRF_cpl/module_wrf_HYDRO.F90 b/src/CPL/WRF_cpl/module_wrf_HYDRO.F90 index 00966ef32..8721200ff 100644 --- a/src/CPL/WRF_cpl/module_wrf_HYDRO.F90 +++ b/src/CPL/WRF_cpl/module_wrf_HYDRO.F90 @@ -86,7 +86,7 @@ subroutine wrf_cpl_HYDRO(HYDRO_dt,grid,its,ite,jts,jte) #ifdef MPP_LAND - call MPI_COMM_DUP(MPI_COMM_WORLD, HYDRO_COMM_WORLD, ierr) + call MPI_Comm_dup(MPI_COMM_WORLD, HYDRO_COMM_WORLD, ierr) call MPP_LAND_INIT(grid%e_we - grid%s_we - 1, grid%e_sn - grid%s_sn - 1) call mpp_land_bcast_int1 (nlst(did)%nsoil) @@ -194,9 +194,9 @@ subroutine wrf_cpl_HYDRO(HYDRO_dt,grid,its,ite,jts,jte) #endif else do k = 1, nlst(did)%nsoil - RT_DOMAIN(did)%STC(:,:,k) = grid%TSLB(its:ite,k,jts:jte) - RT_DOMAIN(did)%smc(:,:,k) = grid%smois(its:ite,k,jts:jte) - RT_DOMAIN(did)%sh2ox(:,:,k) = grid%sh2o(its:ite,k,jts:jte) + RT_DOMAIN(did)%STC(:,:,k) = grid%TSLB(its:ite,k,jts:jte) + RT_DOMAIN(did)%smc(:,:,k) = grid%smois(its:ite,k,jts:jte) + RT_DOMAIN(did)%sh2ox(:,:,k) = grid%sh2o(its:ite,k,jts:jte) end do rt_domain(did)%infxsrt = grid%infxsrt(its:ite,jts:jte) rt_domain(did)%soldrain = grid%soldrain(its:ite,jts:jte) @@ -215,7 +215,7 @@ subroutine wrf_cpl_HYDRO(HYDRO_dt,grid,its,ite,jts,jte) ! update WRF variable after running routing model. grid%sfcheadrt(its:ite,jts:jte) = rt_domain(did)%overland%control%surface_water_head_lsm -! provide groundwater soil flux to WRF for fully coupled simulations (FERSCH 09/2014) +! provide groundwater soil flux to WRF for fully coupled simulations (FERSCH 09/2014) if(nlst(did)%GWBASESWCRT .eq. 3 ) then !Wei Yu: comment the following two lines. Not ready for WRF3.7 release !yw grid%qsgw(its:ite,jts:jte) = gw2d(did)%qsgw @@ -249,7 +249,7 @@ subroutine wrf2lsm (z1,v1,kk1,z,vout,ix,jx,kk,vegtyp) do j = 1, jx do i = 1, ix do k = 1, kk - call interpLayer(Z1,v1(i,1:kk1,j),kk1,Z(k),vout(i,j,k)) + call interpLayer(Z1,v1(i,1:kk1,j),kk1,Z(k),vout(i,j,k)) end do end do end do @@ -271,7 +271,7 @@ subroutine lsm2wrf (z1,v1,kk1,z,vout,ix,jx,kk,vegtyp) do j = 1, jx do i = 1, ix do k = 1, kk - call interpLayer(Z1,v1(i,j,1:kk1),kk1,Z(k),vout(i,k,j)) + call interpLayer(Z1,v1(i,j,1:kk1),kk1,Z(k),vout(i,k,j)) end do end do end do diff --git a/src/HYDRO_drv/module_HYDRO_drv.F90 b/src/HYDRO_drv/module_HYDRO_drv.F90 index 3a0ed6140..15ad1e02c 100644 --- a/src/HYDRO_drv/module_HYDRO_drv.F90 +++ b/src/HYDRO_drv/module_HYDRO_drv.F90 @@ -1821,7 +1821,7 @@ subroutine HYDRO_finish() close(78) #endif call mpp_land_sync() - call MPI_finalize(ierr) + call MPI_Finalize(ierr) stop #else diff --git a/src/IO/netcdf_layer.F90 b/src/IO/netcdf_layer.F90 index 850f2e826..286e9122e 100644 --- a/src/IO/netcdf_layer.F90 +++ b/src/IO/netcdf_layer.F90 @@ -43,7 +43,7 @@ end function create_file_signature end type NetCDF_serial_ type, extends(NetCDF_layer_) :: NetCDF_parallel_ - integer :: MPI_communicator + integer :: MPI_Communicator integer :: default_info = MPI_INFO_NULL contains procedure, pass(object) :: create_file => create_file_parallel diff --git a/src/Land_models/NoahMP/IO_code/module_hrldas_netcdf_io.F b/src/Land_models/NoahMP/IO_code/module_hrldas_netcdf_io.F index 0bcf2f18b..69be3ae04 100644 --- a/src/Land_models/NoahMP/IO_code/module_hrldas_netcdf_io.F +++ b/src/Land_models/NoahMP/IO_code/module_hrldas_netcdf_io.F @@ -417,9 +417,9 @@ subroutine read_hrldas_hdrinfo(wrfinput_flnm, ix, jx, & integer :: rank #ifdef _PARALLEL_ - call MPI_COMM_RANK(HYDRO_COMM_WORLD, rank, ierr) + call MPI_Comm_rank(HYDRO_COMM_WORLD, rank, ierr) if (ierr /= MPI_SUCCESS) stop "FATAL ERROR: In module_hrldas_netcdf_io.F "// & - "read_hrldas_hdrinfo() - MPI_COMM_RANK" + "read_hrldas_hdrinfo() - MPI_Comm_rank" #else rank = 0 #endif @@ -598,9 +598,9 @@ subroutine readland_hrldas(wrfinput_flnm, & crocus_opt = local_crocus_opt ! setting module scope variable #ifdef _PARALLEL_ - call MPI_COMM_RANK(HYDRO_COMM_WORLD, rank, ierr) + call MPI_Comm_rank(HYDRO_COMM_WORLD, rank, ierr) if (ierr /= MPI_SUCCESS) stop "FATAL ERROR: In module_hrldas_netcdf_io.F readland_hrldas()"// & - " - MPI_COMM_RANK" + " - MPI_Comm_rank" #else rank = 0 #endif @@ -620,8 +620,8 @@ subroutine readland_hrldas(wrfinput_flnm, & if (ierr /= 0) then write(*,'("READLAND_HRLDAS: Problem opening wrfinput file: ''", A, "''")') trim(wrfinput_flnm) #ifdef _PARALLEL_ - call mpi_finalize(ierr) - if (ierr /= 0) write(*, '("Problem with MPI_finalize.")') + call MPI_Finalize(ierr) + if (ierr /= 0) write(*, '("Problem with MPI_Finalize.")') #endif stop "FATAL ERROR: In module_hrldas_netcdf_io.F readland_hrldas()"// & " - Problem opening wrfinput file." @@ -740,9 +740,9 @@ subroutine read_mmf_runoff(wrfinput_flnm, & integer :: rank #ifdef _PARALLEL_ - call MPI_COMM_RANK(HYDRO_COMM_WORLD, rank, ierr) + call MPI_Comm_rank(HYDRO_COMM_WORLD, rank, ierr) if (ierr /= MPI_SUCCESS) stop "FATAL ERROR: In module_hrldas_netcdf_io.F read_mmf_runoff()"// & - " - MPI_COMM_RANK" + " - MPI_Comm_rank" #else rank = 0 #endif @@ -762,8 +762,8 @@ subroutine read_mmf_runoff(wrfinput_flnm, & if (ierr /= 0) then write(*,'("read_mmf_runoff: Problem opening wrfinput file: ''", A, "''")') trim(wrfinput_flnm) #ifdef _PARALLEL_ - call mpi_finalize(ierr) - if (ierr /= 0) write(*, '("Problem with MPI_finalize.")') + call MPI_Finalize(ierr) + if (ierr /= 0) write(*, '("Problem with MPI_Finalize.")') #endif stop "FATAL ERROR: In module_hrldas_netcdf_io.F read_mmf_runoff()"// & " - Problem opening wrfinput file." @@ -1513,9 +1513,9 @@ subroutine readinit_hrldas(netcdf_flnm, xstart, xend, ystart, yend, nsoil, sldpt #ifdef _PARALLEL_ - call MPI_COMM_RANK(HYDRO_COMM_WORLD, rank, ierr) + call MPI_Comm_rank(HYDRO_COMM_WORLD, rank, ierr) if (ierr /= MPI_SUCCESS) stop "FATAL ERROR: In module_hrldas_netcdf_io.F"// & - " readinit_hrldas() - MPI_COMM_RANK" + " readinit_hrldas() - MPI_Comm_rank" ierr = nf90_open_par(netcdf_flnm, NF90_NOWRITE, HYDRO_COMM_WORLD, MPI_INFO_NULL, ncid) #else @@ -1534,7 +1534,7 @@ subroutine readinit_hrldas(netcdf_flnm, xstart, xend, ystart, yend, nsoil, sldpt #endif endif #ifdef _PARALLEL_ - call mpi_finalize(ierr) + call MPI_Finalize(ierr) #endif stop "FATAL ERROR: In module_hrldas_netcdf_io.F readinit_hrldas()"// & " - Problem opening netcdf file." @@ -1658,9 +1658,9 @@ subroutine init_interp(xstart, xend, ystart, yend, nsoil, sldpth, var, nvar, src integer :: rank #ifdef _PARALLEL_ - call MPI_COMM_RANK(HYDRO_COMM_WORLD, rank, ierr) + call MPI_Comm_rank(HYDRO_COMM_WORLD, rank, ierr) if (ierr /= MPI_SUCCESS) stop "FATAL ERROR: In module_hrldas_netcdf_io.F init_interp()"// & - " - MPI_COMM_RANK." + " - MPI_Comm_rank." #else rank = 0 #endif @@ -1964,15 +1964,15 @@ subroutine READFORC_HRLDAS(flnm_template, forcing_timestep, target_date, xstart, #endif if (ierr /= 0) then #ifdef _PARALLEL_ - call MPI_COMM_RANK(HYDRO_COMM_WORLD, rank, ierr) + call MPI_Comm_rank(HYDRO_COMM_WORLD, rank, ierr) if (ierr /= MPI_SUCCESS) stop "FATAL ERROR: In module_hrldas_netcdf_io.F"// & - " READFORC_HRLDAS() - MPI_COMM_RANK" + " READFORC_HRLDAS() - MPI_Comm_rank" if (rank == 0) then #endif write(*,'("A) Problem opening netcdf file: ''", A, "''")') trim(flnm) #ifdef _PARALLEL_ endif - call mpi_finalize(ierr) + call MPI_Finalize(ierr) #endif stop "FATAL ERROR: In module_hrldas_netcdf_io.F READFORC_HRLDAS()"// & " - Problem opening netcdf file" @@ -3099,9 +3099,9 @@ subroutine prepare_restart_file_seq(outdir, version, igrid, llanduse, olddate, s #ifdef _PARALLEL_ - call MPI_COMM_RANK(HYDRO_COMM_WORLD, rank, ierr) + call MPI_Comm_rank(HYDRO_COMM_WORLD, rank, ierr) if (ierr /= MPI_SUCCESS) stop "FATAL ERROR: In module_hrldas_netcdf_io.F"// & - " prepare_restart_file_seq() - MPI_COMM_RANK problem" + " prepare_restart_file_seq() - MPI_Comm_rank problem" #else @@ -3433,9 +3433,9 @@ subroutine read_restart(restart_flnm, & restart_filename_remember = restart_flnm #ifdef _PARALLEL_ - call MPI_COMM_RANK(HYDRO_COMM_WORLD, rank, ierr) + call MPI_Comm_rank(HYDRO_COMM_WORLD, rank, ierr) if (ierr /= MPI_SUCCESS) stop "FATAL ERROR: In module_hrldas_netcdf_io.F "// & - "read_restart() - MPI_COMM_RANK" + "read_restart() - MPI_Comm_rank" ierr = nf90_open_par(trim(restart_flnm), NF90_NOWRITE, HYDRO_COMM_WORLD, MPI_INFO_NULL, ncid) #else @@ -3615,9 +3615,9 @@ subroutine get_from_restart_2d_float(parallel_xstart, parallel_xend, subwindow_x #ifdef _PARALLEL_ - call MPI_COMM_RANK(HYDRO_COMM_WORLD, rank, ierr) + call MPI_Comm_rank(HYDRO_COMM_WORLD, rank, ierr) if (ierr /= MPI_SUCCESS) stop "FATAL ERROR: In module_hrldas_netcdf_io.F "// & - "get_from_restart_2d_float() - MPI_COMM_RANK" + "get_from_restart_2d_float() - MPI_Comm_rank" ierr = nf90_open_par(trim(restart_filename_remember), NF90_NOWRITE, HYDRO_COMM_WORLD, MPI_INFO_NULL, ncid) diff --git a/src/Land_models/NoahMP/Utility_routines/module_wrf_utilities.F b/src/Land_models/NoahMP/Utility_routines/module_wrf_utilities.F index bf5adf97e..47bb164e0 100644 --- a/src/Land_models/NoahMP/Utility_routines/module_wrf_utilities.F +++ b/src/Land_models/NoahMP/Utility_routines/module_wrf_utilities.F @@ -27,8 +27,8 @@ END SUBROUTINE wrf_error_fatal SUBROUTINE wrf_abort use module_cpl_land integer ierr - CALL MPI_ABORT(HYDRO_COMM_WORLD,1,ierr) - call MPI_finalize(ierr) + call MPI_Abort(HYDRO_COMM_WORLD,1,ierr) + call MPI_Finalize(ierr) STOP 'wrf_abort' END SUBROUTINE wrf_abort diff --git a/src/MPP/CPL_WRF.F90 b/src/MPP/CPL_WRF.F90 index f2e06ec26..04332d411 100644 --- a/src/MPP/CPL_WRF.F90 +++ b/src/MPP/CPL_WRF.F90 @@ -47,17 +47,17 @@ subroutine CPL_LAND_INIT(istart,iend,jstart,jend) data cyclic/.false.,.false./ ! not cyclic data reorder/.false./ - CALL mpi_initialized( mpi_inited, ierr ) + call MPI_Initialized( mpi_inited, ierr ) if ( .NOT. mpi_inited ) then - call mpi_init(ierr) - if (ierr /= MPI_SUCCESS) call fatal_error_stop("MPI Error: MPI_INIT failed") - call MPI_COMM_DUP(MPI_COMM_WORLD, HYDRO_COMM_WORLD, ierr) - if (ierr /= MPI_SUCCESS) call fatal_error_stop("MPI Error: MPI_COMM_DUP failed") + call MPI_Init(ierr) + if (ierr /= MPI_SUCCESS) call fatal_error_stop("MPI Error: MPI_Init failed") + call MPI_Comm_dup(MPI_COMM_WORLD, HYDRO_COMM_WORLD, ierr) + if (ierr /= MPI_SUCCESS) call fatal_error_stop("MPI Error: MPI_Comm_dup failed") endif - call MPI_COMM_RANK( HYDRO_COMM_WORLD, my_global_id, ierr ) - call MPI_COMM_SIZE( HYDRO_COMM_WORLD, total_pe_num, ierr ) - if (ierr /= MPI_SUCCESS) call fatal_error_stop("MPI Error: MPI_COMM_RANK and/or MPI_COMM_SIZE failed") + call MPI_Comm_rank( HYDRO_COMM_WORLD, my_global_id, ierr ) + call MPI_Comm_size( HYDRO_COMM_WORLD, total_pe_num, ierr ) + if (ierr /= MPI_SUCCESS) call fatal_error_stop("MPI Error: MPI_Comm_rank and/or MPI_Comm_size failed") allocate(node_info(9,total_pe_num)) @@ -98,7 +98,7 @@ subroutine CPL_LAND_INIT(istart,iend,jstart,jend) call MPI_Cart_create(HYDRO_COMM_WORLD, ndim, dims, & cyclic, reorder, cartGridComm, ierr) - call MPI_CART_GET(cartGridComm, 2, dims, cyclic, coords, ierr) + call MPI_Cart_get(cartGridComm, 2, dims, cyclic, coords, ierr) p_up_down = coords(0) p_left_right = coords(1) @@ -116,21 +116,21 @@ subroutine send_info() if(my_global_id .eq. 0) then do i = 1, total_pe_num-1 - call mpi_recv(node_info(:,i+1),size,MPI_INTEGER, & + call MPI_Recv(node_info(:,i+1),size,MPI_INTEGER, & i,tag,HYDRO_COMM_WORLD,mpp_status,ierr) enddo else - call mpi_send(node_info(:,my_global_id+1),size, & + call MPI_Send(node_info(:,my_global_id+1),size, & MPI_INTEGER,0,tag,HYDRO_COMM_WORLD,ierr) endif - call MPI_barrier( HYDRO_COMM_WORLD ,ierr) + call MPI_Barrier( HYDRO_COMM_WORLD ,ierr) size = 9 * total_pe_num - call mpi_bcast(node_info,size,MPI_INTEGER, & + call MPI_Bcast(node_info,size,MPI_INTEGER, & 0,HYDRO_COMM_WORLD,ierr) - call MPI_barrier( HYDRO_COMM_WORLD ,ierr) + call MPI_Barrier( HYDRO_COMM_WORLD ,ierr) end subroutine send_info diff --git a/src/MPP/module_mpp_GWBUCKET.F90 b/src/MPP/module_mpp_GWBUCKET.F90 index 9c9adf0b5..a69f800c0 100644 --- a/src/MPP/module_mpp_GWBUCKET.F90 +++ b/src/MPP/module_mpp_GWBUCKET.F90 @@ -37,7 +37,7 @@ subroutine collectSizeInd(numbasns) if(my_id .ne. IO_id) then tag = 66 - call mpi_send(numbasns,1,MPI_INTEGER, IO_id, & + call MPI_Send(numbasns,1,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) else do i = 0, numprocs - 1 @@ -45,7 +45,7 @@ subroutine collectSizeInd(numbasns) sizeInd(i+1) = numbasns else tag = 66 - call mpi_recv(rcv,1,& + call MPI_Recv(rcv,1,& MPI_INTEGER,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr) sizeInd(i+1) = rcv @@ -81,10 +81,10 @@ subroutine gw_write_io_real(numbasns,inV,ind,outV) if(my_id .ne. IO_id) then if(numbasns .gt. 0) then tag = 62 - call mpi_send(inV,numbasns,MPI_REAL, IO_id, & + call MPI_Send(inV,numbasns,MPI_REAL, IO_id, & tag,HYDRO_COMM_WORLD,ierr) tag2 = 63 - call mpi_send(ind,numbasns,MPI_INTEGER8, IO_id, & + call MPI_Send(ind,numbasns,MPI_INTEGER8, IO_id, & tag2,HYDRO_COMM_WORLD,ierr) endif else @@ -97,10 +97,10 @@ subroutine gw_write_io_real(numbasns,inV,ind,outV) if(i .ne. IO_id) then if(sizeInd(i+1) .gt. 0) then tag = 62 - call mpi_recv(vbuff(1:sizeInd(i+1)),sizeInd(i+1),& + call MPI_Recv(vbuff(1:sizeInd(i+1)),sizeInd(i+1),& MPI_REAL,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr) tag2 = 63 - call mpi_recv(ibuff(1:sizeInd(i+1)),sizeInd(i+1),& + call MPI_Recv(ibuff(1:sizeInd(i+1)),sizeInd(i+1),& MPI_INTEGER8,i,tag2,HYDRO_COMM_WORLD,mpp_status,ierr) do k = 1, sizeInd(i+1) outV(ibuff(k)) = vbuff(k) @@ -139,10 +139,10 @@ subroutine gw_write_io_int(numbasns,inV,ind,outV) if(my_id .ne. IO_id) then if(numbasns .gt. 0) then tag = 62 - call mpi_send(inV,numbasns,MPI_INTEGER8, IO_id, & + call MPI_Send(inV,numbasns,MPI_INTEGER8, IO_id, & tag,HYDRO_COMM_WORLD,ierr) tag2 = 63 - call mpi_send(ind,numbasns,MPI_INTEGER8, IO_id, & + call MPI_Send(ind,numbasns,MPI_INTEGER8, IO_id, & tag2,HYDRO_COMM_WORLD,ierr) endif else @@ -155,10 +155,10 @@ subroutine gw_write_io_int(numbasns,inV,ind,outV) if(i .ne. IO_id) then if(sizeInd(i+1) .gt. 0) then tag = 62 - call mpi_recv(vbuff(1:sizeInd(i+1)),sizeInd(i+1),& + call MPI_Recv(vbuff(1:sizeInd(i+1)),sizeInd(i+1),& MPI_INTEGER8,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr) tag2 = 63 - call mpi_recv(ibuff(1:sizeInd(i+1)),sizeInd(i+1),& + call MPI_Recv(ibuff(1:sizeInd(i+1)),sizeInd(i+1),& MPI_INTEGER8,i,tag2,HYDRO_COMM_WORLD,mpp_status,ierr) do k = 1, sizeInd(i+1) outV(ibuff(k)) = vbuff(k) diff --git a/src/MPP/module_mpp_ReachLS.F90 b/src/MPP/module_mpp_ReachLS.F90 index 6d6028ea0..a5fd079e8 100644 --- a/src/MPP/module_mpp_ReachLS.F90 +++ b/src/MPP/module_mpp_ReachLS.F90 @@ -82,30 +82,30 @@ subroutine updateLinkV8_mem(LinkV, outV) if(my_id .ne. IO_id) then tag = 101 - call mpi_send(LLINKLEN,1,MPI_INTEGER, IO_id, & + call MPI_Send(LLINKLEN,1,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) if(LLINKLEN .gt. 0) then tag = 102 - call mpi_send(LLINKIDINDX,LLINKLEN,MPI_INTEGER, IO_id, & + call MPI_Send(LLINKIDINDX,LLINKLEN,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) tag = 103 - call mpi_send(LinkV,LLINKLEN,MPI_DOUBLE_PRECISION, IO_id, & + call MPI_Send(LinkV,LLINKLEN,MPI_DOUBLE_PRECISION, IO_id, & tag,HYDRO_COMM_WORLD,ierr) endif else do i = 0, numprocs - 1 if(i .ne. IO_id) then tag = 101 - call mpi_recv(lsize,1,MPI_INTEGER, i, & + call MPI_Recv(lsize,1,MPI_INTEGER, i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) if(lsize .gt. 0) then allocate(lindex(lsize) ) allocate(tmpBuf(lsize) ) tag = 102 - call mpi_recv(lindex,lsize,MPI_INTEGER, i, & + call MPI_Recv(lindex,lsize,MPI_INTEGER, i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) tag = 103 - call mpi_recv(tmpBuf,lsize,& + call MPI_Recv(tmpBuf,lsize,& MPI_DOUBLE_PRECISION,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr) do k = 1, lsize gLinkV_r8(lindex(k)) = gLinkV_r8(lindex(k)) + tmpBuf(k) @@ -146,30 +146,30 @@ subroutine updateLinkV4_mem(LinkV, outV) if(my_id .ne. IO_id) then tag = 101 - call mpi_send(LLINKLEN,1,MPI_INTEGER, IO_id, & + call MPI_Send(LLINKLEN,1,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) if(LLINKLEN .gt. 0) then tag = 102 - call mpi_send(LLINKIDINDX,LLINKLEN,MPI_INTEGER, IO_id, & + call MPI_Send(LLINKIDINDX,LLINKLEN,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) tag = 103 - call mpi_send(LinkV,LLINKLEN,MPI_REAL, IO_id, & + call MPI_Send(LinkV,LLINKLEN,MPI_REAL, IO_id, & tag,HYDRO_COMM_WORLD,ierr) endif else do i = 0, numprocs - 1 if(i .ne. IO_id) then tag = 101 - call mpi_recv(lsize,1,MPI_INTEGER, i, & + call MPI_Recv(lsize,1,MPI_INTEGER, i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) if(lsize .gt. 0) then allocate(lindex(lsize) ) allocate(tmpBuf(lsize) ) tag = 102 - call mpi_recv(lindex,lsize,MPI_INTEGER, i, & + call MPI_Recv(lindex,lsize,MPI_INTEGER, i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) tag = 103 - call mpi_recv(tmpBuf,lsize,& + call MPI_Recv(tmpBuf,lsize,& MPI_REAL,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr) do k = 1, lsize gLinkV_r4(lindex(k)) = gLinkV_r4(lindex(k)) + tmpBuf(k) @@ -204,14 +204,14 @@ subroutine updateLinkV8(LinkV, outV) if(my_id .ne. IO_id) then tag = 102 - call mpi_send(gLinkV,gnlinksl,MPI_DOUBLE_PRECISION, IO_id, & + call MPI_Send(gLinkV,gnlinksl,MPI_DOUBLE_PRECISION, IO_id, & tag,HYDRO_COMM_WORLD,ierr) else gLinkV_r = gLinkV do i = 0, numprocs - 1 if(i .ne. IO_id) then tag = 102 - call mpi_recv(gLinkV,gnlinksl,& + call MPI_Recv(gLinkV,gnlinksl,& MPI_DOUBLE_PRECISION,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr) gLinkV_r = gLinkV_r + gLinkV end if @@ -237,14 +237,14 @@ subroutine updateLinkV4(LinkV, outV) if(my_id .ne. IO_id) then tag = 102 - call mpi_send(gLinkV,gnlinksl,MPI_REAL, IO_id, & + call MPI_Send(gLinkV,gnlinksl,MPI_REAL, IO_id, & tag,HYDRO_COMM_WORLD,ierr) else gLinkV_r = gLinkV do i = 0, numprocs - 1 if(i .ne. IO_id) then tag = 102 - call mpi_recv(gLinkV,gnlinksl,& + call MPI_Recv(gLinkV,gnlinksl,& MPI_REAL,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr) gLinkV_r = gLinkV_r + gLinkV end if @@ -260,7 +260,7 @@ subroutine gbcastReal(inV, outV) real, dimension(:) :: inV integer :: ierr call ReachLS_write_io(inV,outV) - call mpi_bcast(outV(1:gnlinksl),gnlinksl,MPI_REAL, & + call MPI_Bcast(outV(1:gnlinksl),gnlinksl,MPI_REAL, & IO_id,HYDRO_COMM_WORLD,ierr) end subroutine gbcastReal @@ -277,7 +277,7 @@ subroutine gbcastReal2_old(index,size1,inV, insize, outV) bsize = linkls_e(i+1) - linkls_s(i+1) + 1 if(linkls_e(i+1) .gt. 0) then if(my_id .eq. i) tmpV(1:bsize) = inV(1:bsize) - call mpi_bcast(tmpV(1:bsize),bsize,MPI_REAL, & + call MPI_Bcast(tmpV(1:bsize),bsize,MPI_REAL, & i,HYDRO_COMM_WORLD,ierr) do j = 1, size1 do k = 1, bsize @@ -304,7 +304,7 @@ subroutine gbcastReal2(index,size1,inV, insize, outV) integer :: ierr, k, i, m, j, bsize outV = 0 call ReachLS_write_io(inV,gbuf) - call mpi_bcast(gbuf,gnlinksl,MPI_REAL, & + call MPI_Bcast(gbuf,gnlinksl,MPI_REAL, & IO_id,HYDRO_COMM_WORLD,ierr) do j = 1, size1 outV(j) = gbuf(index(j)) @@ -320,7 +320,7 @@ subroutine gbcastInt(inV, outV) integer, dimension(:) :: inV integer :: ierr call ReachLS_write_io(inV,outV) - call mpi_bcast(outV(1:gnlinksl),gnlinksl,MPI_INTEGER, & + call MPI_Bcast(outV(1:gnlinksl),gnlinksl,MPI_INTEGER, & IO_id,HYDRO_COMM_WORLD,ierr) end subroutine gbcastInt @@ -330,7 +330,7 @@ subroutine gbcastInt8(inV, outV) integer(kind=int64), dimension(:) :: inV integer :: ierr call ReachLS_write_io(inV,outV) - call mpi_bcast(outV(1:gnlinksl),gnlinksl,MPI_INTEGER8, & + call MPI_Bcast(outV(1:gnlinksl),gnlinksl,MPI_INTEGER8, & IO_id,HYDRO_COMM_WORLD,ierr) end subroutine gbcastInt8 @@ -347,7 +347,7 @@ subroutine getLocalIndx(glinksl,LINKID, LLINKID) call ReachLS_write_io(LINKID,gLinkId) - call mpi_bcast(gLinkId(1:glinksl),glinksl,MPI_INTEGER8, & + call MPI_Bcast(gLinkId(1:glinksl),glinksl,MPI_INTEGER8, & IO_id,HYDRO_COMM_WORLD,ierr) ! The following loops are replaced by a hashtable-based algorithm @@ -386,8 +386,8 @@ subroutine ReachLS_ini(glinksl,nlinksl,linklsS, linklsE) integer :: i, ii, ierr ! get my_id and numprocs - call MPI_COMM_RANK( HYDRO_COMM_WORLD, my_id, ierr ) - call MPI_COMM_SIZE( HYDRO_COMM_WORLD, numprocs, ierr ) + call MPI_Comm_rank( HYDRO_COMM_WORLD, my_id, ierr ) + call MPI_Comm_size( HYDRO_COMM_WORLD, numprocs, ierr ) nlinksl = glinksl / numprocs @@ -453,7 +453,7 @@ subroutine MapGrid2ReachIni(in2d) if(my_id .eq. n-1) then tmpS = sDataRec endif - call mpi_bcast(tmpS,numprocs,MPI_INTEGER, & + call MPI_Bcast(tmpS,numprocs,MPI_INTEGER, & n-1,HYDRO_COMM_WORLD,ierr) rDataRec(n) = tmpS(n) enddo @@ -475,7 +475,7 @@ subroutine ReachLS_decompReal(inV,outV) endif else if(aLinksl(i) .gt. 0) then - call mpi_send(inV(linkls_s(i):linkls_e(i)), & + call MPI_Send(inV(linkls_s(i):linkls_e(i)), & aLinksl(i), & MPI_REAL, i-1 ,tag,HYDRO_COMM_WORLD,ierr) endif @@ -483,7 +483,7 @@ subroutine ReachLS_decompReal(inV,outV) end do else if(aLinksl(my_id+1) .gt. 0) then - call mpi_recv(outV(1:(linkls_e(my_id+1)-linkls_s(my_id+1)+1) ), & !! this one has +1! + call MPI_Recv(outV(1:(linkls_e(my_id+1)-linkls_s(my_id+1)+1) ), & !! this one has +1! aLinksl(my_id+1), & MPI_REAL, io_id,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif @@ -505,7 +505,7 @@ subroutine ReachLS_decompReal8(inV,outV) endif else if(aLinksl(i) .gt. 0) then - call mpi_send(inV(linkls_s(i):linkls_e(i)), & + call MPI_Send(inV(linkls_s(i):linkls_e(i)), & aLinksl(i), & MPI_REAL8, i-1 ,tag,HYDRO_COMM_WORLD,ierr) endif @@ -513,7 +513,7 @@ subroutine ReachLS_decompReal8(inV,outV) end do else if(aLinksl(my_id+1) .gt. 0) then - call mpi_recv(outV(1:(linkls_e(my_id+1)-linkls_s(my_id+1)+1) ), & !! this one has +1! + call MPI_Recv(outV(1:(linkls_e(my_id+1)-linkls_s(my_id+1)+1) ), & !! this one has +1! aLinksl(my_id+1), & MPI_REAL8, io_id,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif @@ -535,7 +535,7 @@ subroutine ReachLS_decompInt(inV,outV) endif else if(aLinksl(i) .gt. 0) then - call mpi_send(inV(linkls_s(i):linkls_e(i)), & + call MPI_Send(inV(linkls_s(i):linkls_e(i)), & aLinksl(i), & MPI_INTEGER, i-1,tag,HYDRO_COMM_WORLD,ierr) endif @@ -543,7 +543,7 @@ subroutine ReachLS_decompInt(inV,outV) end do else if(aLinksl(my_id+1) .gt. 0) then - call mpi_recv(outV(1:linkls_e(my_id+1)-linkls_s(my_id+1)+1), & + call MPI_Recv(outV(1:linkls_e(my_id+1)-linkls_s(my_id+1)+1), & alinksl(my_id+1), & MPI_INTEGER, io_id,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif @@ -567,7 +567,7 @@ subroutine ReachLS_decompInt8(inV,outV) endif else if(aLinksl(i) .gt. 0) then - call mpi_send(inV(linkls_s(i):linkls_e(i)), & + call MPI_Send(inV(linkls_s(i):linkls_e(i)), & aLinksl(i), & MPI_INTEGER8, i-1,tag,HYDRO_COMM_WORLD,ierr) endif @@ -575,7 +575,7 @@ subroutine ReachLS_decompInt8(inV,outV) end do else if(aLinksl(my_id+1) .gt. 0) then - call mpi_recv(outV(1:linkls_e(my_id+1)-linkls_s(my_id+1)+1), & + call MPI_Recv(outV(1:linkls_e(my_id+1)-linkls_s(my_id+1)+1), & alinksl(my_id+1), & MPI_INTEGER8, io_id,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif @@ -602,8 +602,8 @@ subroutine ReachLS_decompChar(inV,outV) endif else if(aLinksl(i) .gt. 0) then - ! The mpi_send takes what you give it and THEN treats each caracter as an array element. - call mpi_send(inV(linkls_s(i):linkls_e(i)), & + ! The MPI_Send takes what you give it and THEN treats each caracter as an array element. + call MPI_Send(inV(linkls_s(i):linkls_e(i)), & strLen*aLinksl(i), & MPI_CHARACTER, i-1, tag, HYDRO_COMM_WORLD, ierr) endif @@ -611,8 +611,8 @@ subroutine ReachLS_decompChar(inV,outV) end do else if(aLinksl(my_id+1) .gt. 0) then - ! The mpi_recv treats each caracter as an array element. - call mpi_recv(outV(1 : (linkls_e(my_id+1)-linkls_s(my_id+1)+1) ), & !jlm should have +1 + ! The MPI_Recv treats each caracter as an array element. + call MPI_Recv(outV(1 : (linkls_e(my_id+1)-linkls_s(my_id+1)+1) ), & !jlm should have +1 strLen*alinksl(my_id+1), & MPI_CHARACTER, io_id, tag, HYDRO_COMM_WORLD, mpp_status,ierr ) endif @@ -637,7 +637,7 @@ subroutine ReachLS_wReal(inV,outV) else if(aLinksl(i) .gt. 0) then - call mpi_recv(outV(linkls_s(i):linkls_e(i)), & + call MPI_Recv(outV(linkls_s(i):linkls_e(i)), & aLinksl(i), & MPI_REAL,i-1,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif @@ -647,7 +647,7 @@ subroutine ReachLS_wReal(inV,outV) if(aLinksl(my_id+1) .gt. 0) then tag = 12 ss = size(inv,1) - call mpi_send(inV(1:aLinksl(my_id+1) ), & + call MPI_Send(inV(1:aLinksl(my_id+1) ), & aLinksl(my_id+1), & MPI_REAL,io_id,tag,HYDRO_COMM_WORLD,ierr) endif @@ -671,7 +671,7 @@ subroutine ReachLS_wReal8(inV,outV) else if(aLinksl(i) .gt. 0) then - call mpi_recv(outV(linkls_s(i):linkls_e(i)), & + call MPI_Recv(outV(linkls_s(i):linkls_e(i)), & aLinksl(i), & MPI_REAL8,i-1,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif @@ -681,7 +681,7 @@ subroutine ReachLS_wReal8(inV,outV) if(aLinksl(my_id+1) .gt. 0) then tag = 12 ss = size(inv,1) - call mpi_send(inV(1:aLinksl(my_id+1) ), & + call MPI_Send(inV(1:aLinksl(my_id+1) ), & aLinksl(my_id+1), & MPI_REAL8,io_id,tag,HYDRO_COMM_WORLD,ierr) endif @@ -705,7 +705,7 @@ subroutine ReachLS_wInt(inV,outV) else if(aLinksl(i) .gt. 0) then tag = 12 - call mpi_recv(outV(linkls_s(i):linkls_e(i)), & + call MPI_Recv(outV(linkls_s(i):linkls_e(i)), & aLinksl(i), & MPI_INTEGER,i-1,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif @@ -714,7 +714,7 @@ subroutine ReachLS_wInt(inV,outV) else if(aLinksl(my_id+1) .gt. 0) then tag = 12 - call mpi_send(inV(1:aLinksl(my_id+1) ), & + call MPI_Send(inV(1:aLinksl(my_id+1) ), & aLinksl(my_id+1), & MPI_INTEGER,io_id,tag,HYDRO_COMM_WORLD,ierr) endif @@ -737,7 +737,7 @@ subroutine ReachLS_wInt8(inV,outV) else if(aLinksl(i) .gt. 0) then tag = 12 - call mpi_recv(outV(linkls_s(i):linkls_e(i)), & + call MPI_Recv(outV(linkls_s(i):linkls_e(i)), & aLinksl(i), & MPI_INTEGER8,i-1,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif @@ -746,7 +746,7 @@ subroutine ReachLS_wInt8(inV,outV) else if(aLinksl(my_id+1) .gt. 0) then tag = 12 - call mpi_send(inV(1:aLinksl(my_id+1) ), & + call MPI_Send(inV(1:aLinksl(my_id+1) ), & aLinksl(my_id+1), & MPI_INTEGER8,io_id,tag,HYDRO_COMM_WORLD,ierr) endif @@ -770,7 +770,7 @@ subroutine ReachLS_wInt2(inV,outV,len,glen) else if(aLinksl(i) .gt. 0) then tag = 12 - call mpi_recv(outV(linkls_s(i):linkls_e(i)), & + call MPI_Recv(outV(linkls_s(i):linkls_e(i)), & aLinksl(i), & MPI_INTEGER,i-1,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif @@ -779,7 +779,7 @@ subroutine ReachLS_wInt2(inV,outV,len,glen) else if(aLinksl(my_id+1) .gt. 0) then tag = 12 - call mpi_send(inV(1:aLinksl(my_id+1) ), & + call MPI_Send(inV(1:aLinksl(my_id+1) ), & aLinksl(my_id+1), & MPI_INTEGER,io_id,tag,HYDRO_COMM_WORLD,ierr) endif @@ -803,7 +803,7 @@ subroutine ReachLS_wReal2(inV,outV,len,glen) else if(aLinksl(i) .gt. 0) then tag = 12 - call mpi_recv(outV(linkls_s(i):linkls_e(i)), & + call MPI_Recv(outV(linkls_s(i):linkls_e(i)), & aLinksl(i), & MPI_REAL,i-1,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif @@ -812,7 +812,7 @@ subroutine ReachLS_wReal2(inV,outV,len,glen) else if(aLinksl(my_id+1) .gt. 0) then tag = 12 - call mpi_send(inV(1:aLinksl(my_id+1) ), & + call MPI_Send(inV(1:aLinksl(my_id+1) ), & aLinksl(my_id+1), & MPI_REAL,io_id,tag,HYDRO_COMM_WORLD,ierr) endif @@ -837,8 +837,8 @@ subroutine ReachLS_wChar(inV,outV) if(aLinksl(i) .gt. 0) then tag = 12 ! ? seems asymmetric with ReachLS_decompChar - call mpi_recv(outV( linkls_s(i) : linkls_e(i) ), & -! call mpi_recv(outV( ((linkls_s(i)-1)+1) : (linkls_e(i)) ), & + call MPI_Recv(outV( linkls_s(i) : linkls_e(i) ), & +! call MPI_Recv(outV( ((linkls_s(i)-1)+1) : (linkls_e(i)) ), & aLinksl(i), & MPI_CHARACTER, i-1, tag, HYDRO_COMM_WORLD, mpp_status, ierr ) endif @@ -847,8 +847,8 @@ subroutine ReachLS_wChar(inV,outV) else if(aLinksl(my_id+1) .gt. 0) then tag = 12 - ! The mpi_send takes what you give it and THEN treats each caracter as an array element. - call mpi_send(inV(1:aLinksl(my_id+1)), & + ! The MPI_Send takes what you give it and THEN treats each caracter as an array element. + call MPI_Send(inV(1:aLinksl(my_id+1)), & aLinksl(my_id+1), & MPI_CHARACTER, io_id, tag, HYDRO_COMM_WORLD, ierr) endif @@ -984,7 +984,7 @@ subroutine getToInd(from,to,ind,indLen,gToNodeOut) ToInd(my_id+1) = kk do i = 0, numprocs - 1 - call mpi_bcast(ToInd(i+1),1,MPI_INTEGER8, & + call MPI_Bcast(ToInd(i+1),1,MPI_INTEGER8, & i,HYDRO_COMM_WORLD,ierr) end do @@ -1025,7 +1025,7 @@ subroutine com_decomp1dInt(inV,gsize,outV,lsize) endif else if(ssize .gt. 0 ) then - call mpi_send(inV(start:start+ssize-1), ssize, & + call MPI_Send(inV(start:start+ssize-1), ssize, & MPI_INTEGER, i-1,tag,HYDRO_COMM_WORLD,ierr) endif endif @@ -1038,7 +1038,7 @@ subroutine com_decomp1dInt(inV,gsize,outV,lsize) endif if( lsize .gt. 0) then allocate(outV(lsize) ) - call mpi_recv(outV,lsize, & + call MPI_Recv(outV,lsize, & MPI_INTEGER, io_id,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif endif @@ -1082,7 +1082,7 @@ subroutine com_decomp1dInt8(inV,gsize,outV,lsize) endif else if(ssize .gt. 0 ) then - call mpi_send(inV(start:start+ssize-1), ssize, & + call MPI_Send(inV(start:start+ssize-1), ssize, & MPI_INTEGER8, i-1,tag,HYDRO_COMM_WORLD,ierr) endif endif @@ -1095,7 +1095,7 @@ subroutine com_decomp1dInt8(inV,gsize,outV,lsize) endif if( lsize .gt. 0) then allocate(outV(lsize) ) - call mpi_recv(outV,lsize, & + call MPI_Recv(outV,lsize, & MPI_INTEGER8, io_id,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif endif @@ -1134,7 +1134,7 @@ subroutine com_write1dInt(inV,lsize,outV,gsize) endif else if(rsize .gt. 0 ) then - call mpi_recv(outV(start:start+rsize-1), rsize, & + call MPI_Recv(outV(start:start+rsize-1), rsize, & MPI_INTEGER, i-1,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif endif @@ -1146,7 +1146,7 @@ subroutine com_write1dInt(inV,lsize,outV,gsize) lsize = ncomsize endif if( lsize .gt. 0) then - call mpi_send(inV, lsize, & + call MPI_Send(inV, lsize, & MPI_INTEGER, io_id,tag,HYDRO_COMM_WORLD,ierr) endif endif @@ -1185,7 +1185,7 @@ subroutine com_write1dInt8(inV,lsize,outV,gsize) endif else if(rsize .gt. 0 ) then - call mpi_recv(outV(start:start+rsize-1), rsize, & + call MPI_Recv(outV(start:start+rsize-1), rsize, & MPI_INTEGER8, i-1,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif endif @@ -1197,7 +1197,7 @@ subroutine com_write1dInt8(inV,lsize,outV,gsize) lsize = ncomsize endif if( lsize .gt. 0) then - call mpi_send(inV, lsize, & + call MPI_Send(inV, lsize, & MPI_INTEGER8, io_id,tag,HYDRO_COMM_WORLD,ierr) endif endif @@ -1239,7 +1239,7 @@ subroutine pack_decomp_int(g1bufid, ndata, nprocs_map, lnsizes, istart,bufid) if(my_id .ne. IO_id) then tag = 72 if(lnsizes(my_id + 1) .gt. 0) then - call mpi_recv(bufid,lnsizes(my_id + 1),& + call MPI_Recv(bufid,lnsizes(my_id + 1),& MPI_INTEGER,io_id,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif else @@ -1247,7 +1247,7 @@ subroutine pack_decomp_int(g1bufid, ndata, nprocs_map, lnsizes, istart,bufid) if(i .ne. my_id) then tag = 72 if(lnsizes(i+1) .gt. 0) then - call mpi_send(buf(istart(i+1):istart(i+1)+lnsizes(i+1)-1), & + call MPI_Send(buf(istart(i+1):istart(i+1)+lnsizes(i+1)-1), & lnsizes(i+1),MPI_INTEGER,i, tag,HYDRO_COMM_WORLD,ierr) endif else @@ -1295,7 +1295,7 @@ subroutine pack_decomp_int8(g1bufid, ndata, nprocs_map, lnsizes, istart,bufid) if(my_id .ne. IO_id) then tag = 72 if(lnsizes(my_id + 1) .gt. 0) then - call mpi_recv(bufid,lnsizes(my_id + 1),& + call MPI_Recv(bufid,lnsizes(my_id + 1),& MPI_INTEGER8,io_id,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif else @@ -1303,7 +1303,7 @@ subroutine pack_decomp_int8(g1bufid, ndata, nprocs_map, lnsizes, istart,bufid) if(i .ne. my_id) then tag = 72 if(lnsizes(i+1) .gt. 0) then - call mpi_send(buf(istart(i+1):istart(i+1)+lnsizes(i+1)-1), & + call MPI_Send(buf(istart(i+1):istart(i+1)+lnsizes(i+1)-1), & lnsizes(i+1),MPI_INTEGER8,i, tag,HYDRO_COMM_WORLD,ierr) endif else @@ -1343,7 +1343,7 @@ subroutine pack_decomp_real8(g1bufid, ndata, nprocs_map, lnsizes, istart,bufid) if(my_id .ne. IO_id) then tag = 72 if(lnsizes(my_id + 1) .gt. 0) then - call mpi_recv(bufid,lnsizes(my_id + 1),& + call MPI_Recv(bufid,lnsizes(my_id + 1),& MPI_DOUBLE_PRECISION,io_id,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif else @@ -1351,7 +1351,7 @@ subroutine pack_decomp_real8(g1bufid, ndata, nprocs_map, lnsizes, istart,bufid) if(i .ne. my_id) then tag = 72 if(lnsizes(i+1) .gt. 0) then - call mpi_send(buf(istart(i+1):istart(i+1)+lnsizes(i+1)-1), & + call MPI_Send(buf(istart(i+1):istart(i+1)+lnsizes(i+1)-1), & lnsizes(i+1),MPI_DOUBLE_PRECISION,i, tag,HYDRO_COMM_WORLD,ierr) endif else @@ -1395,15 +1395,15 @@ subroutine TONODE2RSL (ind,inVar,size,gNLINKSL,NLINKSL,ioVar,flag) end do else tag = 82 - call mpi_recv(tmpSize,1,MPI_INTEGER,i-1,tag,HYDRO_COMM_WORLD,mpp_status,ierr) + call MPI_Recv(tmpSize,1,MPI_INTEGER,i-1,tag,HYDRO_COMM_WORLD,mpp_status,ierr) if(tmpSize .gt. 0) then allocate(buf(tmpSize)) allocate(tmpInd(tmpSize)) tag = 83 - call mpi_recv(tmpInd, tmpSize , & + call MPI_Recv(tmpInd, tmpSize , & MPI_INTEGER,i-1,tag,HYDRO_COMM_WORLD,mpp_status,ierr) tag = 84 - call mpi_recv(buf, tmpSize , & + call MPI_Recv(buf, tmpSize , & MPI_INTEGER,i-1,tag,HYDRO_COMM_WORLD,mpp_status,ierr) do k = 1, tmpSize if(buf(k) .ne. flag) then @@ -1417,13 +1417,13 @@ subroutine TONODE2RSL (ind,inVar,size,gNLINKSL,NLINKSL,ioVar,flag) end do else tag = 82 - call mpi_send(size,1,MPI_INTEGER,io_id,tag,HYDRO_COMM_WORLD,ierr) + call MPI_Send(size,1,MPI_INTEGER,io_id,tag,HYDRO_COMM_WORLD,ierr) if(size .gt. 0) then tag = 83 - call mpi_send(ind(1:size),size, & + call MPI_Send(ind(1:size),size, & MPI_INTEGER,io_id,tag,HYDRO_COMM_WORLD,ierr) tag = 84 - call mpi_send(inVar(1:size),size, & + call MPI_Send(inVar(1:size),size, & MPI_INTEGER,io_id,tag,HYDRO_COMM_WORLD,ierr) endif endif @@ -1460,15 +1460,15 @@ subroutine TONODE2RSL8 (ind,inVar,size,gNLINKSL,NLINKSL,ioVar,flag) end do else tag = 82 - call mpi_recv(tmpSize,1,MPI_INTEGER,i-1,tag,HYDRO_COMM_WORLD,mpp_status,ierr) + call MPI_Recv(tmpSize,1,MPI_INTEGER,i-1,tag,HYDRO_COMM_WORLD,mpp_status,ierr) if(tmpSize .gt. 0) then allocate(buf(tmpSize)) allocate(tmpInd(tmpSize)) tag = 83 - call mpi_recv(tmpInd, tmpSize , & + call MPI_Recv(tmpInd, tmpSize , & MPI_INTEGER,i-1,tag,HYDRO_COMM_WORLD,mpp_status,ierr) tag = 84 - call mpi_recv(buf, tmpSize , & + call MPI_Recv(buf, tmpSize , & MPI_INTEGER8,i-1,tag,HYDRO_COMM_WORLD,mpp_status,ierr) do k = 1, tmpSize if(buf(k) .ne. flag) then @@ -1482,13 +1482,13 @@ subroutine TONODE2RSL8 (ind,inVar,size,gNLINKSL,NLINKSL,ioVar,flag) end do else tag = 82 - call mpi_send(size,1,MPI_INTEGER,io_id,tag,HYDRO_COMM_WORLD,ierr) + call MPI_Send(size,1,MPI_INTEGER,io_id,tag,HYDRO_COMM_WORLD,ierr) if(size .gt. 0) then tag = 83 - call mpi_send(ind(1:size),size, & + call MPI_Send(ind(1:size),size, & MPI_INTEGER,io_id,tag,HYDRO_COMM_WORLD,ierr) tag = 84 - call mpi_send(inVar(1:size),size, & + call MPI_Send(inVar(1:size),size, & MPI_INTEGER8,io_id,tag,HYDRO_COMM_WORLD,ierr) endif endif diff --git a/src/MPP/mpp_land.F90 b/src/MPP/mpp_land.F90 index 1c9a48e15..41698e2b7 100644 --- a/src/MPP/mpp_land.F90 +++ b/src/MPP/mpp_land.F90 @@ -14,7 +14,7 @@ MODULE MODULE_MPP_LAND integer, public :: global_nx, global_ny, local_nx,local_ny integer, public :: global_rt_nx, global_rt_ny integer, public :: local_rt_nx,local_rt_ny,rt_AGGFACTRT - integer, public :: numprocs ! total process, get by mpi initialization. + integer, public :: numprocs ! total process, get by MPI initialization. integer :: local_startx, local_starty integer :: local_startx_rt, local_starty_rt, local_endx_rt, local_endy_rt @@ -66,8 +66,8 @@ subroutine LOG_MAP2d() data cyclic/.false.,.false./ ! not cyclic data reorder/.false./ - call MPI_COMM_RANK( HYDRO_COMM_WORLD, my_id, ierr ) - call MPI_COMM_SIZE( HYDRO_COMM_WORLD, numprocs, ierr ) + call MPI_Comm_rank( HYDRO_COMM_WORLD, my_id, ierr ) + call MPI_Comm_size( HYDRO_COMM_WORLD, numprocs, ierr ) call getNX_NY(numprocs, left_right_np,up_down_np) if(my_id.eq.IO_id) then @@ -111,7 +111,7 @@ subroutine LOG_MAP2d() call MPI_Cart_create(HYDRO_COMM_WORLD, ndim, dims, & cyclic, reorder, cartGridComm, ierr) - call MPI_CART_GET(cartGridComm, 2, dims, cyclic, coords, ierr) + call MPI_Cart_get(cartGridComm, 2, dims, cyclic, coords, ierr) p_up_down = coords(0) p_left_right = coords(1) @@ -133,17 +133,17 @@ subroutine MPP_LAND_INIT(in_global_nx,in_global_ny) global_ny = in_global_ny end if - call mpi_initialized( mpi_inited, ierr ) + call MPI_Initialized( mpi_inited, ierr ) if ( .not. mpi_inited ) then - call MPI_INIT_THREAD( MPI_THREAD_FUNNELED, provided, ierr ) - if (ierr /= MPI_SUCCESS) call fatal_error_stop("MPI Error: MPI_INIT failed") - call MPI_COMM_DUP(MPI_COMM_WORLD, HYDRO_COMM_WORLD, ierr) - if (ierr /= MPI_SUCCESS) call fatal_error_stop("MPI Error: MPI_COMM_DUP failed") + call MPI_Init_thread( MPI_THREAD_FUNNELED, provided, ierr ) + if (ierr /= MPI_SUCCESS) call fatal_error_stop("MPI Error: MPI_Init failed") + call MPI_Comm_dup(MPI_COMM_WORLD, HYDRO_COMM_WORLD, ierr) + if (ierr /= MPI_SUCCESS) call fatal_error_stop("MPI Error: MPI_Comm_dup failed") endif - call MPI_COMM_RANK( HYDRO_COMM_WORLD, my_id, ierr ) - call MPI_COMM_SIZE( HYDRO_COMM_WORLD, numprocs, ierr ) - if (ierr /= MPI_SUCCESS) call fatal_error_stop("MPI Error: MPI_COMM_RANK and/or MPI_COMM_SIZE failed") + call MPI_Comm_rank( HYDRO_COMM_WORLD, my_id, ierr ) + call MPI_Comm_size( HYDRO_COMM_WORLD, numprocs, ierr ) + if (ierr /= MPI_SUCCESS) call fatal_error_stop("MPI Error: MPI_Comm_rank and/or MPI_Comm_size failed") ! create 2d logical mapping of the CPU. call log_map2d() @@ -224,26 +224,26 @@ subroutine MPP_LAND_LR_COM(in_out_data,NX,NY,flag) if(right_id .ge. 0) then ! ### send to right first. tag = 11 size = ny - call mpi_send(in_out_data(nx-1,:),size,MPI_REAL, & + call MPI_Send(in_out_data(nx-1,:),size,MPI_REAL, & right_id,tag,HYDRO_COMM_WORLD,ierr) end if if(left_id .ge. 0) then ! receive from left tag = 11 size = ny - call mpi_recv(in_out_data(1,:),size,MPI_REAL, & + call MPI_Recv(in_out_data(1,:),size,MPI_REAL, & left_id,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif if(left_id .ge. 0 ) then ! ### send to left second. size = ny tag = 21 - call mpi_send(in_out_data(2,:),size,MPI_REAL, & + call MPI_Send(in_out_data(2,:),size,MPI_REAL, & left_id,tag,HYDRO_COMM_WORLD,ierr) endif if(right_id .ge. 0) then ! receive from right tag = 21 size = ny - call mpi_recv(in_out_data(nx,:),size,MPI_REAL,& + call MPI_Recv(in_out_data(nx,:),size,MPI_REAL,& right_id,tag,HYDRO_COMM_WORLD, mpp_status,ierr) endif @@ -252,13 +252,13 @@ subroutine MPP_LAND_LR_COM(in_out_data,NX,NY,flag) if(right_id .ge. 0) then ! ### send to right first. tag = 11 size = 2*ny - call mpi_send(in_out_data(nx-1:nx,:),size,MPI_REAL, & + call MPI_Send(in_out_data(nx-1:nx,:),size,MPI_REAL, & right_id,tag,HYDRO_COMM_WORLD,ierr) end if if(left_id .ge. 0) then ! receive from left tag = 11 size = 2*ny - call mpi_recv(data_r,size,MPI_REAL,left_id,tag, & + call MPI_Recv(data_r,size,MPI_REAL,left_id,tag, & HYDRO_COMM_WORLD,mpp_status,ierr) in_out_data(1,:) = in_out_data(1,:) + data_r(1,:) in_out_data(2,:) = in_out_data(2,:) + data_r(2,:) @@ -267,13 +267,13 @@ subroutine MPP_LAND_LR_COM(in_out_data,NX,NY,flag) if(left_id .ge. 0 ) then ! ### send to left second. size = 2*ny tag = 21 - call mpi_send(in_out_data(1:2,:),size,MPI_REAL, & + call MPI_Send(in_out_data(1:2,:),size,MPI_REAL, & left_id,tag,HYDRO_COMM_WORLD,ierr) endif if(right_id .ge. 0) then ! receive from right tag = 21 size = 2*ny - call mpi_recv(in_out_data(nx-1:nx,:),size,MPI_REAL,& + call MPI_Recv(in_out_data(nx-1:nx,:),size,MPI_REAL,& right_id,tag,HYDRO_COMM_WORLD, mpp_status,ierr) endif endif ! end if black for flag. @@ -291,26 +291,26 @@ subroutine MPP_LAND_LR_COM8(in_out_data,NX,NY,flag) if(right_id .ge. 0) then ! ### send to right first. tag = 11 size = ny - call mpi_send(in_out_data(nx-1,:),size,MPI_DOUBLE_PRECISION, & + call MPI_Send(in_out_data(nx-1,:),size,MPI_DOUBLE_PRECISION, & right_id,tag,HYDRO_COMM_WORLD,ierr) end if if(left_id .ge. 0) then ! receive from left tag = 11 size = ny - call mpi_recv(in_out_data(1,:),size,MPI_DOUBLE_PRECISION, & + call MPI_Recv(in_out_data(1,:),size,MPI_DOUBLE_PRECISION, & left_id,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif if(left_id .ge. 0 ) then ! ### send to left second. size = ny tag = 21 - call mpi_send(in_out_data(2,:),size,MPI_DOUBLE_PRECISION, & + call MPI_Send(in_out_data(2,:),size,MPI_DOUBLE_PRECISION, & left_id,tag,HYDRO_COMM_WORLD,ierr) endif if(right_id .ge. 0) then ! receive from right tag = 21 size = ny - call mpi_recv(in_out_data(nx,:),size,MPI_DOUBLE_PRECISION,& + call MPI_Recv(in_out_data(nx,:),size,MPI_DOUBLE_PRECISION,& right_id,tag,HYDRO_COMM_WORLD, mpp_status,ierr) endif @@ -319,13 +319,13 @@ subroutine MPP_LAND_LR_COM8(in_out_data,NX,NY,flag) if(right_id .ge. 0) then ! ### send to right first. tag = 11 size = 2*ny - call mpi_send(in_out_data(nx-1:nx,:),size,MPI_DOUBLE_PRECISION, & + call MPI_Send(in_out_data(nx-1:nx,:),size,MPI_DOUBLE_PRECISION, & right_id,tag,HYDRO_COMM_WORLD,ierr) end if if(left_id .ge. 0) then ! receive from left tag = 11 size = 2*ny - call mpi_recv(data_r,size,MPI_DOUBLE_PRECISION,left_id,tag, & + call MPI_Recv(data_r,size,MPI_DOUBLE_PRECISION,left_id,tag, & HYDRO_COMM_WORLD,mpp_status,ierr) in_out_data(1,:) = in_out_data(1,:) + data_r(1,:) in_out_data(2,:) = in_out_data(2,:) + data_r(2,:) @@ -334,13 +334,13 @@ subroutine MPP_LAND_LR_COM8(in_out_data,NX,NY,flag) if(left_id .ge. 0 ) then ! ### send to left second. size = 2*ny tag = 21 - call mpi_send(in_out_data(1:2,:),size,MPI_DOUBLE_PRECISION, & + call MPI_Send(in_out_data(1:2,:),size,MPI_DOUBLE_PRECISION, & left_id,tag,HYDRO_COMM_WORLD,ierr) endif if(right_id .ge. 0) then ! receive from right tag = 21 size = 2*ny - call mpi_recv(in_out_data(nx-1:nx,:),size,MPI_DOUBLE_PRECISION,& + call MPI_Recv(in_out_data(nx-1:nx,:),size,MPI_DOUBLE_PRECISION,& right_id,tag,HYDRO_COMM_WORLD, mpp_status,ierr) endif endif ! end if black for flag. @@ -368,7 +368,7 @@ subroutine get_local_size(local_nx, local_ny,rt_nx,rt_ny) if(i .ne. my_id) then !block receive from other node. tag = 1 - call mpi_recv(s_r,2,MPI_INTEGER,i, & + call MPI_Recv(s_r,2,MPI_INTEGER,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) local_nx_size(i+1) = s_r(1) local_ny_size(i+1) = s_r(2) @@ -381,7 +381,7 @@ subroutine get_local_size(local_nx, local_ny,rt_nx,rt_ny) tag = 1 s_r(1) = local_nx s_r(2) = local_ny - call mpi_send(s_r,2,MPI_INTEGER, IO_id, & + call MPI_Send(s_r,2,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) end if @@ -391,7 +391,7 @@ subroutine get_local_size(local_nx, local_ny,rt_nx,rt_ny) if(i .ne. my_id) then !block receive from other node. tag = 2 - call mpi_recv(s_r,2,MPI_INTEGER,i, & + call MPI_Recv(s_r,2,MPI_INTEGER,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) local_rt_nx_size(i+1) = s_r(1) local_rt_ny_size(i+1) = s_r(2) @@ -404,7 +404,7 @@ subroutine get_local_size(local_nx, local_ny,rt_nx,rt_ny) tag = 2 s_r(1) = rt_nx s_r(2) = rt_ny - call mpi_send(s_r,2,MPI_INTEGER, IO_id, & + call MPI_Send(s_r,2,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) end if @@ -424,26 +424,26 @@ subroutine MPP_LAND_UB_COM(in_out_data,NX,NY,flag) if(up_id .ge. 0 ) then ! ### send to up first. tag = 31 size = nx - call mpi_send(in_out_data(:,ny-1),size,MPI_REAL, & + call MPI_Send(in_out_data(:,ny-1),size,MPI_REAL, & up_id,tag,HYDRO_COMM_WORLD,ierr) endif if(down_id .ge. 0 ) then ! receive from down tag = 31 size = nx - call mpi_recv(in_out_data(:,1),size,MPI_REAL, & + call MPI_Recv(in_out_data(:,1),size,MPI_REAL, & down_id,tag,HYDRO_COMM_WORLD, mpp_status,ierr) endif if(down_id .ge. 0 ) then ! send down. tag = 41 size = nx - call mpi_send(in_out_data(:,2),size,MPI_REAL, & + call MPI_Send(in_out_data(:,2),size,MPI_REAL, & down_id,tag,HYDRO_COMM_WORLD,ierr) endif if(up_id .ge. 0 ) then ! receive from upper tag = 41 size = nx - call mpi_recv(in_out_data(:,ny),size,MPI_REAL, & + call MPI_Recv(in_out_data(:,ny),size,MPI_REAL, & up_id,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif @@ -452,13 +452,13 @@ subroutine MPP_LAND_UB_COM(in_out_data,NX,NY,flag) if(up_id .ge. 0 ) then ! ### send to up first. tag = 31 size = nx*2 - call mpi_send(in_out_data(:,ny-1:ny),size,MPI_REAL, & + call MPI_Send(in_out_data(:,ny-1:ny),size,MPI_REAL, & up_id,tag,HYDRO_COMM_WORLD,ierr) endif if(down_id .ge. 0 ) then ! receive from down tag = 31 size = nx*2 - call mpi_recv(data_r,size,MPI_REAL, & + call MPI_Recv(data_r,size,MPI_REAL, & down_id,tag,HYDRO_COMM_WORLD, mpp_status,ierr) in_out_data(:,1) = in_out_data(:,1) + data_r(:,1) in_out_data(:,2) = in_out_data(:,2) + data_r(:,2) @@ -467,13 +467,13 @@ subroutine MPP_LAND_UB_COM(in_out_data,NX,NY,flag) if(down_id .ge. 0 ) then ! send down. tag = 41 size = nx*2 - call mpi_send(in_out_data(:,1:2),size,MPI_REAL, & + call MPI_Send(in_out_data(:,1:2),size,MPI_REAL, & down_id,tag,HYDRO_COMM_WORLD,ierr) endif if(up_id .ge. 0 ) then ! receive from upper tag = 41 size = nx * 2 - call mpi_recv(in_out_data(:,ny-1:ny),size,MPI_REAL, & + call MPI_Recv(in_out_data(:,ny-1:ny),size,MPI_REAL, & up_id,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif endif ! end of block flag @@ -492,26 +492,26 @@ subroutine MPP_LAND_UB_COM8(in_out_data,NX,NY,flag) if(up_id .ge. 0 ) then ! ### send to up first. tag = 31 size = nx - call mpi_send(in_out_data(:,ny-1),size,MPI_DOUBLE_PRECISION, & + call MPI_Send(in_out_data(:,ny-1),size,MPI_DOUBLE_PRECISION, & up_id,tag,HYDRO_COMM_WORLD,ierr) endif if(down_id .ge. 0 ) then ! receive from down tag = 31 size = nx - call mpi_recv(in_out_data(:,1),size,MPI_DOUBLE_PRECISION, & + call MPI_Recv(in_out_data(:,1),size,MPI_DOUBLE_PRECISION, & down_id,tag,HYDRO_COMM_WORLD, mpp_status,ierr) endif if(down_id .ge. 0 ) then ! send down. tag = 41 size = nx - call mpi_send(in_out_data(:,2),size,MPI_DOUBLE_PRECISION, & + call MPI_Send(in_out_data(:,2),size,MPI_DOUBLE_PRECISION, & down_id,tag,HYDRO_COMM_WORLD,ierr) endif if(up_id .ge. 0 ) then ! receive from upper tag = 41 size = nx - call mpi_recv(in_out_data(:,ny),size,MPI_DOUBLE_PRECISION, & + call MPI_Recv(in_out_data(:,ny),size,MPI_DOUBLE_PRECISION, & up_id,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif @@ -520,13 +520,13 @@ subroutine MPP_LAND_UB_COM8(in_out_data,NX,NY,flag) if(up_id .ge. 0 ) then ! ### send to up first. tag = 31 size = nx*2 - call mpi_send(in_out_data(:,ny-1:ny),size,MPI_DOUBLE_PRECISION, & + call MPI_Send(in_out_data(:,ny-1:ny),size,MPI_DOUBLE_PRECISION, & up_id,tag,HYDRO_COMM_WORLD,ierr) endif if(down_id .ge. 0 ) then ! receive from down tag = 31 size = nx*2 - call mpi_recv(data_r,size,MPI_DOUBLE_PRECISION, & + call MPI_Recv(data_r,size,MPI_DOUBLE_PRECISION, & down_id,tag,HYDRO_COMM_WORLD, mpp_status,ierr) in_out_data(:,1) = in_out_data(:,1) + data_r(:,1) in_out_data(:,2) = in_out_data(:,2) + data_r(:,2) @@ -535,13 +535,13 @@ subroutine MPP_LAND_UB_COM8(in_out_data,NX,NY,flag) if(down_id .ge. 0 ) then ! send down. tag = 41 size = nx*2 - call mpi_send(in_out_data(:,1:2),size,MPI_DOUBLE_PRECISION, & + call MPI_Send(in_out_data(:,1:2),size,MPI_DOUBLE_PRECISION, & down_id,tag,HYDRO_COMM_WORLD,ierr) endif if(up_id .ge. 0 ) then ! receive from upper tag = 41 size = nx * 2 - call mpi_recv(in_out_data(:,ny-1:ny),size,MPI_DOUBLE_PRECISION, & + call MPI_Recv(in_out_data(:,ny-1:ny),size,MPI_DOUBLE_PRECISION, & up_id,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif endif ! end of block flag @@ -594,7 +594,7 @@ subroutine calculate_start_p() ! block receive from other node. if(i.ne.my_id) then tag = 1 - call mpi_recv(r_s,2,MPI_INTEGER,i, & + call MPI_Recv(r_s,2,MPI_INTEGER,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) startx(i+1) = r_s(1) starty(i+1) = r_s(2) @@ -602,7 +602,7 @@ subroutine calculate_start_p() end do else tag = 1 - call mpi_send(r_s,2,MPI_INTEGER, IO_id, & + call MPI_Send(r_s,2,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) end if @@ -665,8 +665,8 @@ subroutine decompose_data_real (in_buff,out_buff) if(my_id .eq. IO_id) then - ! allocate the buffer to hold data as required by mpi_scatterv - ! be careful with the index range if using array prepared for mpi in fortran (offset_vectors) + ! allocate the buffer to hold data as required by MPI_Scatterv + ! be careful with the index range if using array prepared for MPI in fortran (offset_vectors) allocate(send_buff(0: (global_nx*global_ny) -1),stat = ierr) ! for each sub region in the global buffer linearize the data and place it in the @@ -695,15 +695,15 @@ subroutine decompose_data_real (in_buff,out_buff) ! send the to each process size_vector(mpi_rank+1) data elements ! and store the results in out_buff - call mpi_scatterv(send_buff, size_vectors, offset_vectors, MPI_REAL, & + call MPI_Scatterv(send_buff, size_vectors, offset_vectors, MPI_REAL, & out_buff, size_vectors(my_id+1), MPI_REAL, IO_id, HYDRO_COMM_WORLD, ierr) ! remove the send buffer deallocate(send_buff) else - ! other processes only need to make mpi_scatterv call - call mpi_scatterv(send_buff, size_vectors, offset_vectors, MPI_REAL, & + ! other processes only need to make MPI_Scatterv call + call MPI_Scatterv(send_buff, size_vectors, offset_vectors, MPI_REAL, & out_buff, local_nx*local_ny, MPI_REAL, IO_id, HYDRO_COMM_WORLD, ierr) end if @@ -729,13 +729,13 @@ subroutine decompose_data_int (in_buff,out_buff) else ! send data to the rest process. size = local_nx_size(i+1)*local_ny_size(i+1) - call mpi_send(in_buff(ibegin:iend,jbegin:jend),size,& + call MPI_Send(in_buff(ibegin:iend,jbegin:jend),size,& MPI_INTEGER, i,tag,HYDRO_COMM_WORLD,ierr) end if end do else size = local_nx*local_ny - call mpi_recv(out_buff,size,MPI_INTEGER,IO_id, & + call MPI_Recv(out_buff,size,MPI_INTEGER,IO_id, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) end if end subroutine decompose_data_int @@ -748,7 +748,7 @@ subroutine write_IO_int(in_buff,out_buff) if(my_id .ne. IO_id) then size = local_nx*local_ny tag = 2 - call mpi_send(in_buff,size,MPI_INTEGER, IO_id, & + call MPI_Send(in_buff,size,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) else do i = 0, numprocs - 1 @@ -761,7 +761,7 @@ subroutine write_IO_int(in_buff,out_buff) else size = local_nx_size(i+1)*local_ny_size(i+1) tag = 2 - call mpi_recv(out_buff(ibegin:iend,jbegin:jend),size,& + call MPI_Recv(out_buff(ibegin:iend,jbegin:jend),size,& MPI_INTEGER,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr) end if end do @@ -786,7 +786,7 @@ subroutine write_IO_char_head(in, out, imageHead) if(my_id .ne. IO_id) then lenSize = imageHead(my_id+1)*len(in(1)) !! some times necessary for character arrays? if(lenSize .eq. 0) return - call mpi_send(in,lenSize,MPI_CHARACTER,IO_id,tag,HYDRO_COMM_WORLD,ierr) + call MPI_Send(in,lenSize,MPI_CHARACTER,IO_id,tag,HYDRO_COMM_WORLD,ierr) else do i = 0, numprocs-1 lenSize = imageHead(i+1)*len(in(1)) !! necessary? @@ -800,7 +800,7 @@ subroutine write_IO_char_head(in, out, imageHead) if(i .eq. IO_id) then out(theStart:theEnd) = in(1:imageHead(i+1)) else - call mpi_recv(out(theStart:theEnd),lenSize,& + call MPI_Recv(out(theStart:theEnd),lenSize,& MPI_CHARACTER,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr) end if end do @@ -826,7 +826,7 @@ subroutine write_IO_real(in_buff,out_buff) if(my_id .ne. IO_id) then size = local_nx*local_ny tag = 2 - call mpi_send(in_buff,size,MPI_REAL, IO_id, & + call MPI_Send(in_buff,size,MPI_REAL, IO_id, & tag,HYDRO_COMM_WORLD,ierr) else do i = 0, numprocs - 1 @@ -839,7 +839,7 @@ subroutine write_IO_real(in_buff,out_buff) else size = local_nx_size(i+1)*local_ny_size(i+1) tag = 2 - call mpi_recv(out_buff(ibegin:iend,jbegin:jend),size,& + call MPI_Recv(out_buff(ibegin:iend,jbegin:jend),size,& MPI_REAL,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr) end if end do @@ -854,7 +854,7 @@ end subroutine write_IO_real ! if(my_id .ne. IO_id) then ! size = local_rt_nx*local_rt_ny ! tag = 2 -! call mpi_send(in_buff,size,MPI_REAL, IO_id, & +! call MPI_Send(in_buff,size,MPI_REAL, IO_id, & ! tag,HYDRO_COMM_WORLD,ierr) ! else ! do i = 0, numprocs - 1 @@ -869,7 +869,7 @@ end subroutine write_IO_real ! else ! size = local_rt_nx_size(i+1)*local_rt_ny_size(i+1) ! tag = 2 -! call mpi_recv(out_buff(ibegin:iend,jbegin:jend),size,& +! call MPI_Recv(out_buff(ibegin:iend,jbegin:jend),size,& ! MPI_REAL,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr) ! end if ! end do @@ -887,14 +887,14 @@ subroutine write_IO_RT_real (in_buff,out_buff) if(my_id .eq. IO_id) then - ! allocate the buffer to hold data as required by mpi_scatterv + ! allocate the buffer to hold data as required by MPI_Scatterv ! (this will be larger than out_buff due to halo cell overlap) - ! be careful with the index range if using array prepared for mpi in fortran (offset_vectors) + ! be careful with the index range if using array prepared for MPI in fortran (offset_vectors) allocate(recv_buff(0: sum(size_vectors_rt) -1),stat = ierr) ! recieve from each process size_vector(mpi_rank+1) data elements ! and store the results in recv_buffer - call mpi_gatherv(in_buff, size_vectors_rt(my_id+1), MPI_REAL, & + call MPI_Gatherv(in_buff, size_vectors_rt(my_id+1), MPI_REAL, & recv_buff, size_vectors_rt, offset_vectors_rt, MPI_REAL, & IO_id, HYDRO_COMM_WORLD, ierr) @@ -918,8 +918,8 @@ subroutine write_IO_RT_real (in_buff,out_buff) deallocate(recv_buff) else - ! other processes only need to make mpi_gatherv call - call mpi_gatherv(in_buff, local_rt_nx*local_rt_ny, MPI_REAL, & + ! other processes only need to make MPI_Gatherv call + call MPI_Gatherv(in_buff, local_rt_nx*local_rt_ny, MPI_REAL, & recv_buff, size_vectors_rt, offset_vectors_rt, MPI_REAL, & IO_id, HYDRO_COMM_WORLD, ierr) end if @@ -936,14 +936,14 @@ subroutine write_IO_RT_int (in_buff,out_buff) if(my_id .eq. IO_id) then - ! allocate the buffer to hold data as required by mpi_scatterv + ! allocate the buffer to hold data as required by MPI_Scatterv ! (this will be larger than out_buff due to halo cell overlap) - ! be careful with the index range if using array prepared for mpi in fortran (offset_vectors) + ! be careful with the index range if using array prepared for MPI in fortran (offset_vectors) allocate(recv_buff(0: sum(size_vectors_rt) -1),stat = ierr) ! recieve from each process size_vector(mpi_rank+1) data elements ! and store the results in recv_buffer - call mpi_gatherv(in_buff, size_vectors_rt(my_id+1), MPI_REAL, & + call MPI_Gatherv(in_buff, size_vectors_rt(my_id+1), MPI_REAL, & recv_buff, size_vectors_rt, offset_vectors_rt, MPI_REAL, & IO_id, HYDRO_COMM_WORLD, ierr) @@ -967,8 +967,8 @@ subroutine write_IO_RT_int (in_buff,out_buff) deallocate(recv_buff) else - ! other processes only need to make mpi_gatherv call - call mpi_gatherv(in_buff, local_rt_nx*local_rt_ny, MPI_INTEGER, & + ! other processes only need to make MPI_Gatherv call + call MPI_Gatherv(in_buff, local_rt_nx*local_rt_ny, MPI_INTEGER, & recv_buff, size_vectors_rt, offset_vectors_rt, MPI_INTEGER, & IO_id, HYDRO_COMM_WORLD, ierr) end if @@ -983,7 +983,7 @@ end subroutine write_IO_RT_int ! if(my_id .ne. IO_id) then ! size = local_rt_nx*local_rt_ny ! tag = 2 -! call mpi_send(in_buff,size,MPI_INTEGER, IO_id, & +! call MPI_Send(in_buff,size,MPI_INTEGER, IO_id, & ! tag,HYDRO_COMM_WORLD,ierr) ! else ! do i = 0, numprocs - 1 @@ -998,7 +998,7 @@ end subroutine write_IO_RT_int ! else ! size = local_rt_nx_size(i+1)*local_rt_ny_size(i+1) ! tag = 2 -! call mpi_recv(out_buff(ibegin:iend,jbegin:jend),size,& +! call MPI_Recv(out_buff(ibegin:iend,jbegin:jend),size,& ! MPI_INTEGER,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr) ! end if ! end do @@ -1014,7 +1014,7 @@ subroutine write_IO_RT_int8(in_buff,out_buff) if(my_id .ne. IO_id) then size = local_rt_nx*local_rt_ny tag = 2 - call mpi_send(in_buff,size,MPI_INTEGER8, IO_id, & + call MPI_Send(in_buff,size,MPI_INTEGER8, IO_id, & tag,HYDRO_COMM_WORLD,ierr) else do i = 0, numprocs - 1 @@ -1029,7 +1029,7 @@ subroutine write_IO_RT_int8(in_buff,out_buff) else size = local_rt_nx_size(i+1)*local_rt_ny_size(i+1) tag = 2 - call mpi_recv(out_buff(ibegin:iend,jbegin:jend),size,& + call MPI_Recv(out_buff(ibegin:iend,jbegin:jend),size,& MPI_INTEGER8,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr) end if end do @@ -1039,7 +1039,7 @@ end subroutine write_IO_RT_int8 subroutine mpp_land_bcast_log1(inout) logical inout integer ierr - call mpi_bcast(inout,1,MPI_LOGICAL, & + call MPI_Bcast(inout,1,MPI_LOGICAL, & IO_id,HYDRO_COMM_WORLD,ierr) end subroutine mpp_land_bcast_log1 @@ -1048,7 +1048,7 @@ subroutine mpp_land_bcast_int(size,inout) integer size integer inout(size) integer ierr - call mpi_bcast(inout,size,MPI_INTEGER, & + call MPI_Bcast(inout,size,MPI_INTEGER, & IO_id,HYDRO_COMM_WORLD,ierr) end subroutine mpp_land_bcast_int @@ -1056,7 +1056,7 @@ subroutine mpp_land_bcast_int8(size,inout) integer size integer(kind=int64) inout(size) integer ierr - call mpi_bcast(inout,size,MPI_INTEGER8, & + call MPI_Bcast(inout,size,MPI_INTEGER8, & IO_id,HYDRO_COMM_WORLD,ierr) end subroutine mpp_land_bcast_int8 @@ -1065,7 +1065,7 @@ subroutine mpp_land_bcast_int8_1d(inout) integer(kind=int64) inout(:) integer ierr len = size(inout,1) - call mpi_bcast(inout,len,MPI_INTEGER8, & + call MPI_Bcast(inout,len,MPI_INTEGER8, & IO_id,HYDRO_COMM_WORLD,ierr) end subroutine mpp_land_bcast_int8_1d @@ -1074,7 +1074,7 @@ subroutine mpp_land_bcast_int1d(inout) integer inout(:) integer ierr len = size(inout,1) - call mpi_bcast(inout,len,MPI_INTEGER, & + call MPI_Bcast(inout,len,MPI_INTEGER, & IO_id,HYDRO_COMM_WORLD,ierr) end subroutine mpp_land_bcast_int1d @@ -1084,13 +1084,13 @@ subroutine mpp_land_bcast_int1d_root(inout, rootId) integer, intent(in) :: rootId integer ierr len = size(inout,1) - call mpi_bcast(inout,len,MPI_INTEGER,rootId,HYDRO_COMM_WORLD,ierr) + call MPI_Bcast(inout,len,MPI_INTEGER,rootId,HYDRO_COMM_WORLD,ierr) end subroutine mpp_land_bcast_int1d_root subroutine mpp_land_bcast_int1(inout) integer inout integer ierr - call mpi_bcast(inout,1,MPI_INTEGER, & + call MPI_Bcast(inout,1,MPI_INTEGER, & IO_id,HYDRO_COMM_WORLD,ierr) end subroutine mpp_land_bcast_int1 @@ -1098,13 +1098,13 @@ subroutine mpp_land_bcast_int1_root(inout, rootId) integer inout integer ierr integer, intent(in) :: rootId - call mpi_bcast(inout,1,MPI_INTEGER,rootId,HYDRO_COMM_WORLD,ierr) + call MPI_Bcast(inout,1,MPI_INTEGER,rootId,HYDRO_COMM_WORLD,ierr) end subroutine mpp_land_bcast_int1_root subroutine mpp_land_bcast_logical(inout) logical :: inout integer ierr - call mpi_bcast(inout,1,MPI_LOGICAL, & + call MPI_Bcast(inout,1,MPI_LOGICAL, & IO_id,HYDRO_COMM_WORLD,ierr) end subroutine mpp_land_bcast_logical @@ -1112,20 +1112,20 @@ subroutine mpp_land_bcast_logical_root(inout, rootId) logical :: inout integer, intent(in) :: rootId integer ierr - call mpi_bcast(inout,1,MPI_LOGICAL,rootId,HYDRO_COMM_WORLD,ierr) + call MPI_Bcast(inout,1,MPI_LOGICAL,rootId,HYDRO_COMM_WORLD,ierr) end subroutine mpp_land_bcast_logical_root subroutine mpp_land_bcast_real1(inout) real inout integer ierr - call mpi_bcast(inout,1,MPI_REAL, & + call MPI_Bcast(inout,1,MPI_REAL, & IO_id,HYDRO_COMM_WORLD,ierr) end subroutine mpp_land_bcast_real1 subroutine mpp_land_bcast_real1_double(inout) real*8 inout integer ierr - call mpi_bcast(inout,1,MPI_REAL8, & + call MPI_Bcast(inout,1,MPI_REAL8, & IO_id,HYDRO_COMM_WORLD,ierr) end subroutine mpp_land_bcast_real1_double @@ -1134,7 +1134,7 @@ subroutine mpp_land_bcast_real_1d(inout) real inout(:) integer ierr len = size(inout,1) - call mpi_bcast(inout,len,MPI_real, & + call MPI_Bcast(inout,len,MPI_REAL, & IO_id,HYDRO_COMM_WORLD,ierr) end subroutine mpp_land_bcast_real_1d @@ -1144,7 +1144,7 @@ subroutine mpp_land_bcast_real_1d_root(inout, rootId) integer, intent(in) :: rootId integer ierr len = size(inout,1) - call mpi_bcast(inout,len,MPI_real,rootId,HYDRO_COMM_WORLD,ierr) + call MPI_Bcast(inout,len,MPI_REAL,rootId,HYDRO_COMM_WORLD,ierr) end subroutine mpp_land_bcast_real_1d_root subroutine mpp_land_bcast_real8_1d(inout) @@ -1152,7 +1152,7 @@ subroutine mpp_land_bcast_real8_1d(inout) real*8 inout(:) integer ierr len = size(inout,1) - call mpi_bcast(inout,len,MPI_double, & + call MPI_Bcast(inout,len,MPI_DOUBLE, & IO_id,HYDRO_COMM_WORLD,ierr) end subroutine mpp_land_bcast_real8_1d @@ -1161,7 +1161,7 @@ subroutine mpp_land_bcast_real(size1,inout) ! real inout(size1) real , dimension(:) :: inout integer ierr, len - call mpi_bcast(inout,size1,MPI_real, & + call MPI_Bcast(inout,size1,MPI_REAL, & IO_id,HYDRO_COMM_WORLD,ierr) end subroutine mpp_land_bcast_real @@ -1172,7 +1172,7 @@ subroutine mpp_land_bcast_int2d(inout) length1 = size(inout,1) length2 = size(inout,2) do k = 1, length2 - call mpi_bcast(inout(:,k),length1,MPI_INTEGER, & + call MPI_Bcast(inout(:,k),length1,MPI_INTEGER, & IO_id,HYDRO_COMM_WORLD,ierr) end do end subroutine mpp_land_bcast_int2d @@ -1184,7 +1184,7 @@ subroutine mpp_land_bcast_real2(inout) length1 = size(inout,1) length2 = size(inout,2) do k = 1, length2 - call mpi_bcast(inout(:,k),length1,MPI_real, & + call MPI_Bcast(inout(:,k),length1,MPI_REAL, & IO_id,HYDRO_COMM_WORLD,ierr) end do end subroutine mpp_land_bcast_real2 @@ -1198,7 +1198,7 @@ subroutine mpp_land_bcast_real3d(inout) length3 = size(inout,3) do k = 1, length3 do j = 1, length2 - call mpi_bcast(inout(:,j,k), length1, MPI_real, & + call MPI_Bcast(inout(:,j,k), length1, MPI_REAL, & IO_id, HYDRO_COMM_WORLD, ierr) end do end do @@ -1208,7 +1208,7 @@ subroutine mpp_land_bcast_rd(size,inout) integer size real*8 inout(size) integer ierr - call mpi_bcast(inout,size,MPI_REAL8, & + call MPI_Bcast(inout,size,MPI_REAL8, & IO_id,HYDRO_COMM_WORLD,ierr) end subroutine mpp_land_bcast_rd @@ -1216,7 +1216,7 @@ subroutine mpp_land_bcast_char(size,inout) integer size character inout(*) integer ierr - call mpi_bcast(inout,size,MPI_CHARACTER, & + call MPI_Bcast(inout,size,MPI_CHARACTER, & IO_id,HYDRO_COMM_WORLD,ierr) end subroutine mpp_land_bcast_char @@ -1225,7 +1225,7 @@ subroutine mpp_land_bcast_char_root(size,inout,rootId) character inout(*) integer, intent(in) :: rootId integer ierr - call mpi_bcast(inout,size,MPI_CHARACTER,rootId,HYDRO_COMM_WORLD,ierr) + call MPI_Bcast(inout,size,MPI_CHARACTER,rootId,HYDRO_COMM_WORLD,ierr) end subroutine mpp_land_bcast_char_root subroutine mpp_land_bcast_char1d(inout) @@ -1233,7 +1233,7 @@ subroutine mpp_land_bcast_char1d(inout) integer :: lenSize integer :: ierr lenSize = size(inout,1)*len(inout) - call mpi_bcast(inout,lenSize,MPI_CHARACTER, & + call MPI_Bcast(inout,lenSize,MPI_CHARACTER, & IO_id,HYDRO_COMM_WORLD,ierr) end subroutine mpp_land_bcast_char1d @@ -1243,7 +1243,7 @@ subroutine mpp_land_bcast_char1d_root(inout,rootId) integer :: lenSize integer :: ierr lenSize = size(inout,1)*len(inout) - call mpi_bcast(inout,lenSize,MPI_CHARACTER,rootId,HYDRO_COMM_WORLD,ierr) + call MPI_Bcast(inout,lenSize,MPI_CHARACTER,rootId,HYDRO_COMM_WORLD,ierr) end subroutine mpp_land_bcast_char1d_root subroutine mpp_land_bcast_char1(inout) @@ -1251,7 +1251,7 @@ subroutine mpp_land_bcast_char1(inout) character(len=*) inout integer ierr len = LEN_TRIM(inout) - call mpi_bcast(inout,len,MPI_CHARACTER, & + call MPI_Bcast(inout,len,MPI_CHARACTER, & IO_id,HYDRO_COMM_WORLD,ierr) end subroutine mpp_land_bcast_char1 @@ -1420,13 +1420,13 @@ subroutine decompose_RT_real (in_buff,out_buff,g_nx,g_ny,nx,ny) else ! send data to the rest process. size = (iend-ibegin+1)*(jend-jbegin+1) - call mpi_send(in_buff(ibegin:iend,jbegin:jend),size,& + call MPI_Send(in_buff(ibegin:iend,jbegin:jend),size,& MPI_REAL, i,tag,HYDRO_COMM_WORLD,ierr) end if end do else size = nx*ny - call mpi_recv(out_buff,size,MPI_REAL,IO_id, & + call MPI_Recv(out_buff,size,MPI_REAL,IO_id, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) end if end subroutine decompose_RT_real @@ -1455,13 +1455,13 @@ subroutine decompose_RT_int (in_buff,out_buff,g_nx,g_ny,nx,ny) else ! send data to the rest process. size = (iend-ibegin+1)*(jend-jbegin+1) - call mpi_send(in_buff(ibegin:iend,jbegin:jend),size,& + call MPI_Send(in_buff(ibegin:iend,jbegin:jend),size,& MPI_INTEGER, i,tag,HYDRO_COMM_WORLD,ierr) end if end do else size = nx*ny - call mpi_recv(out_buff,size,MPI_INTEGER,IO_id, & + call MPI_Recv(out_buff,size,MPI_INTEGER,IO_id, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) end if end subroutine decompose_RT_int @@ -1490,13 +1490,13 @@ subroutine decompose_RT_int8 (in_buff,out_buff,g_nx,g_ny,nx,ny) else ! send data to the rest process. size = (iend-ibegin+1)*(jend-jbegin+1) - call mpi_send(in_buff(ibegin:iend,jbegin:jend),size,& + call MPI_Send(in_buff(ibegin:iend,jbegin:jend),size,& MPI_INTEGER8, i,tag,HYDRO_COMM_WORLD,ierr) end if end do else size = nx*ny - call mpi_recv(out_buff,size,MPI_INTEGER8,IO_id, & + call MPI_Recv(out_buff,size,MPI_INTEGER8,IO_id, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) end if end subroutine decompose_RT_int8 @@ -1546,8 +1546,8 @@ subroutine wrf_LAND_set_INIT(info,total_pe,AGGFACTRT) integer :: ierr, status integer i - call MPI_COMM_RANK( HYDRO_COMM_WORLD, my_id, ierr ) - call MPI_COMM_SIZE( HYDRO_COMM_WORLD, numprocs, ierr ) + call MPI_Comm_rank( HYDRO_COMM_WORLD, my_id, ierr ) + call MPI_Comm_size( HYDRO_COMM_WORLD, numprocs, ierr ) if(numprocs .ne. total_pe) then write(6,*) "FATAL ERROR: In wrf_LAND_set_INIT() - numprocs .ne. total_pe ",numprocs, total_pe @@ -1610,7 +1610,7 @@ end subroutine wrf_LAND_set_INIT subroutine getMy_global_id() integer ierr - call MPI_COMM_RANK( HYDRO_COMM_WORLD, my_id, ierr ) + call MPI_Comm_rank( HYDRO_COMM_WORLD, my_id, ierr ) end subroutine getMy_global_id subroutine MPP_CHANNEL_COM_REAL(Link_location,ix,jy,Link_V,size,flag) @@ -1834,14 +1834,14 @@ subroutine mpp_land_max_int1(v) if(i .ne. my_id) then !block receive from other node. tag = 101 - call mpi_recv(r1,1,MPI_INTEGER,i, & + call MPI_Recv(r1,1,MPI_INTEGER,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) if(max <= r1) max = r1 end if end do else tag = 101 - call mpi_send(v,1,MPI_INTEGER, IO_id, & + call MPI_Send(v,1,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) end if call mpp_land_bcast_int1(max) @@ -1858,14 +1858,14 @@ subroutine mpp_land_max_real1(v) if(i .ne. my_id) then !block receive from other node. tag = 101 - call mpi_recv(r1,1,MPI_REAL,i, & + call MPI_Recv(r1,1,MPI_REAL,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) if(max <= r1) max = r1 end if end do else tag = 101 - call mpi_send(v,1,MPI_REAL, IO_id, & + call MPI_Send(v,1,MPI_REAL, IO_id, & tag,HYDRO_COMM_WORLD,ierr) end if call mpp_land_bcast_real1(max) @@ -1881,14 +1881,14 @@ subroutine mpp_same_int1(v) if(i .ne. my_id) then !block receive from other node. tag = 109 - call mpi_recv(r1,1,MPI_INTEGER,i, & + call MPI_Recv(r1,1,MPI_INTEGER,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) if(v .ne. r1) v = -99 end if end do else tag = 109 - call mpi_send(v,1,MPI_INTEGER, IO_id, & + call MPI_Send(v,1,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) end if call mpp_land_bcast_int1(v) @@ -1927,11 +1927,11 @@ subroutine write_chanel_real(v,map_l2g,gnlinks,nlinks,g_v) !block receive from other node. tag = 109 - call mpi_recv(tmp_map(1:message_len),message_len,MPI_INTEGER,i, & + call MPI_Recv(tmp_map(1:message_len),message_len,MPI_INTEGER,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) tag = 119 - call mpi_recv(tmp_v(1:message_len),message_len,MPI_REAL,i, & + call MPI_Recv(tmp_v(1:message_len),message_len,MPI_REAL,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) do k = 1,message_len @@ -1960,10 +1960,10 @@ subroutine write_chanel_real(v,map_l2g,gnlinks,nlinks,g_v) end do else tag = 109 - call mpi_send(map_l2g,nlinks,MPI_INTEGER, IO_id, & + call MPI_Send(map_l2g,nlinks,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) tag = 119 - call mpi_send(v,nlinks,MPI_REAL,IO_id, & + call MPI_Send(v,nlinks,MPI_REAL,IO_id, & tag,HYDRO_COMM_WORLD,ierr) end if @@ -2001,11 +2001,11 @@ subroutine write_chanel_int(v,map_l2g,gnlinks,nlinks,g_v) !block receive from other node. tag = 109 - call mpi_recv(tmp_map(1:message_len),message_len,MPI_INTEGER,i, & + call MPI_Recv(tmp_map(1:message_len),message_len,MPI_INTEGER,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) tag = 119 - call mpi_recv(tmp_v(1:message_len),message_len,MPI_INTEGER,i, & + call MPI_Recv(tmp_v(1:message_len),message_len,MPI_INTEGER,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) do k = 1,message_len @@ -2034,10 +2034,10 @@ subroutine write_chanel_int(v,map_l2g,gnlinks,nlinks,g_v) end do else tag = 109 - call mpi_send(map_l2g,nlinks,MPI_INTEGER, IO_id, & + call MPI_Send(map_l2g,nlinks,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) tag = 119 - call mpi_send(v,nlinks,MPI_INTEGER,IO_id, & + call MPI_Send(v,nlinks,MPI_INTEGER,IO_id, & tag,HYDRO_COMM_WORLD,ierr) end if if(allocated(tmp_map)) deallocate(tmp_map) @@ -2075,10 +2075,10 @@ subroutine write_chanel_int8(v,map_l2g,gnlinks,nlinks,g_v) !block receive from other node. tag = 109 - call mpi_recv(tmp_map(1:message_len),message_len,MPI_INTEGER,i, & + call MPI_Recv(tmp_map(1:message_len),message_len,MPI_INTEGER,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) tag = 119 - call mpi_recv(tmp_v(1:message_len),message_len,MPI_INTEGER8,i, & + call MPI_Recv(tmp_v(1:message_len),message_len,MPI_INTEGER8,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) do k = 1,message_len @@ -2107,10 +2107,10 @@ subroutine write_chanel_int8(v,map_l2g,gnlinks,nlinks,g_v) end do else tag = 109 - call mpi_send(map_l2g,nlinks,MPI_INTEGER, IO_id, & + call MPI_Send(map_l2g,nlinks,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) tag = 119 - call mpi_send(v,nlinks,MPI_INTEGER8,IO_id, & + call MPI_Send(v,nlinks,MPI_INTEGER8,IO_id, & tag,HYDRO_COMM_WORLD,ierr) end if if(allocated(tmp_map)) deallocate(tmp_map) @@ -2131,10 +2131,10 @@ subroutine write_lake_real(v,nodelist_in,nlakes) if(i .ne. my_id) then !block receive from other node. tag = 129 - call mpi_recv(nodelist,nlakes,MPI_INTEGER,i, & + call MPI_Recv(nodelist,nlakes,MPI_INTEGER,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) tag = 139 - call mpi_recv(recv(:),nlakes,MPI_REAL,i, & + call MPI_Recv(recv(:),nlakes,MPI_REAL,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) do k = 1,nlakes @@ -2147,10 +2147,10 @@ subroutine write_lake_real(v,nodelist_in,nlakes) end do else tag = 129 - call mpi_send(nodelist,nlakes,MPI_INTEGER, IO_id, & + call MPI_Send(nodelist,nlakes,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) tag = 139 - call mpi_send(v,nlakes,MPI_REAL,IO_id, & + call MPI_Send(v,nlakes,MPI_REAL,IO_id, & tag,HYDRO_COMM_WORLD,ierr) end if end subroutine write_lake_real @@ -2171,10 +2171,10 @@ subroutine write_lake_char(v,nodelist_in,nlakes) if(i .ne. my_id) then !block receive from other node. tag = 129 - call mpi_recv(nodelist,nlakes,MPI_INTEGER,i, & + call MPI_Recv(nodelist,nlakes,MPI_INTEGER,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) tag = 139 - call mpi_recv(recv(:),in_len,MPI_CHARACTER,i, & + call MPI_Recv(recv(:),in_len,MPI_CHARACTER,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) do k = 1,nlakes @@ -2187,10 +2187,10 @@ subroutine write_lake_char(v,nodelist_in,nlakes) end do else tag = 129 - call mpi_send(nodelist,nlakes,MPI_INTEGER, IO_id, & + call MPI_Send(nodelist,nlakes,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) tag = 139 - call mpi_send(v,in_len,MPI_CHARACTER,IO_id, & + call MPI_Send(v,in_len,MPI_CHARACTER,IO_id, & tag,HYDRO_COMM_WORLD,ierr) end if end subroutine write_lake_char @@ -2207,7 +2207,7 @@ subroutine read_rst_crt_r(unit,out,size) 99 continue call mpp_land_bcast_int1(ierr2) if(ierr2 .ne. 0) return - call mpi_bcast(out,size,MPI_REAL, & + call MPI_Bcast(out,size,MPI_REAL, & IO_id,HYDRO_COMM_WORLD,ierr) end subroutine read_rst_crt_r @@ -2228,13 +2228,13 @@ subroutine sum_int1d(vin,nsize) if(my_id .eq. IO_id) then do i = 0, numprocs - 1 if(i .ne. my_id) then - call mpi_recv(recv,nsize,MPI_INTEGER,i, & + call MPI_Recv(recv,nsize,MPI_INTEGER,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) vin(:) = vin(:) + recv(:) endif end do else - call mpi_send(vin,nsize,MPI_INTEGER,IO_id, & + call MPI_Send(vin,nsize,MPI_INTEGER,IO_id, & tag,HYDRO_COMM_WORLD,ierr) endif call mpp_land_bcast_int1d(vin) @@ -2249,7 +2249,7 @@ subroutine combine_int1d(vin,nsize, flag) if(my_id .eq. IO_id) then do i = 0, numprocs - 1 if(i .ne. my_id) then - call mpi_recv(recv,nsize,MPI_INTEGER,i, & + call MPI_Recv(recv,nsize,MPI_INTEGER,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) do k = 1, nsize if(recv(k) .ne. flag) then @@ -2259,7 +2259,7 @@ subroutine combine_int1d(vin,nsize, flag) endif end do else - call mpi_send(vin,nsize,MPI_INTEGER,IO_id, & + call MPI_Send(vin,nsize,MPI_INTEGER,IO_id, & tag,HYDRO_COMM_WORLD,ierr) endif call mpp_land_bcast_int1d(vin) @@ -2274,7 +2274,7 @@ subroutine combine_int8_1d(vin,nsize, flag) if(my_id .eq. IO_id) then do i = 0, numprocs - 1 if(i .ne. my_id) then - call mpi_recv(recv,nsize,MPI_INTEGER8,i, & + call MPI_Recv(recv,nsize,MPI_INTEGER8,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) do k = 1, nsize if(recv(k) .ne. flag) then @@ -2284,7 +2284,7 @@ subroutine combine_int8_1d(vin,nsize, flag) endif end do else - call mpi_send(vin,nsize,MPI_INTEGER8,IO_id, & + call MPI_Send(vin,nsize,MPI_INTEGER8,IO_id, & tag,HYDRO_COMM_WORLD,ierr) endif call mpp_land_bcast_int8_1d(vin) @@ -2309,14 +2309,14 @@ subroutine sum_real8(vin,nsize) if(my_id .eq. IO_id) then do i = 0, numprocs - 1 if(i .ne. my_id) then - call mpi_recv(recv,nsize,MPI_DOUBLE_PRECISION,i, & + call MPI_Recv(recv,nsize,MPI_DOUBLE_PRECISION,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) vin(:) = vin(:) + recv(:) endif end do v = vin else - call mpi_send(vin,nsize,MPI_DOUBLE_PRECISION,IO_id, & + call MPI_Send(vin,nsize,MPI_DOUBLE_PRECISION,IO_id, & tag,HYDRO_COMM_WORLD,ierr) endif call mpp_land_bcast_real(nsize,v) @@ -2329,10 +2329,10 @@ end subroutine sum_real8 ! ! if ( my_id .eq. IO_id ) then ! g_ix = ix -! call mpi_reduce( MPI_IN_PLACE, g_ix, 4, MPI_INTEGER, & +! call MPI_Reduce( MPI_IN_PLACE, g_ix, 4, MPI_INTEGER, & ! MPI_SUM, 0, HYDRO_COMM_WORLD, ierr ) ! else -! call mpi_reduce( ix, 0, 4, MPI_INTEGER, & +! call MPI_Reduce( ix, 0, 4, MPI_INTEGER, & ! MPI_SUM, 0, HYDRO_COMM_WORLD, ierr ) ! endif ! call mpp_land_bcast_int1(g_ix) @@ -2362,24 +2362,24 @@ subroutine gather_1d_real_tmp(vl,s_in,e_in,vg,sg) if(i .ne. my_id) then !block receive from other node. tag = 202 - call mpi_recv(index_s,2,MPI_INTEGER,i, & + call MPI_Recv(index_s,2,MPI_INTEGER,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) tag = 203 e = index_s(2) s = index_s(1) size = e - s + 1 - call mpi_recv(vg(s:e),size,MPI_REAL, & + call MPI_Recv(vg(s:e),size,MPI_REAL, & i,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif end do else tag = 202 - call mpi_send(index_s,2,MPI_INTEGER, IO_id, & + call MPI_Send(index_s,2,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) tag = 203 - call mpi_send(vl,size,MPI_REAL,IO_id, & + call MPI_Send(vl,size,MPI_REAL,IO_id, & tag,HYDRO_COMM_WORLD,ierr) end if @@ -2390,7 +2390,7 @@ subroutine sum_real1(inout) real:: inout, send integer :: ierr send = inout - CALL MPI_ALLREDUCE(send,inout,1,MPI_REAL,MPI_SUM,HYDRO_COMM_WORLD,ierr) + call MPI_Allreduce(send,inout,1,MPI_REAL,MPI_SUM,HYDRO_COMM_WORLD,ierr) end subroutine sum_real1 subroutine sum_double(inout) @@ -2398,8 +2398,8 @@ subroutine sum_double(inout) real*8:: inout, send integer :: ierr send = inout - !yw CALL MPI_ALLREDUCE(send,inout,1,MPI_DOUBLE,MPI_SUM,HYDRO_COMM_WORLD,ierr) - CALL MPI_ALLREDUCE(send,inout,1,MPI_DOUBLE_PRECISION,MPI_SUM,HYDRO_COMM_WORLD,ierr) + !yw call MPI_Allreduce(send,inout,1,MPI_DOUBLE,MPI_SUM,HYDRO_COMM_WORLD,ierr) + call MPI_Allreduce(send,inout,1,MPI_DOUBLE_PRECISION,MPI_SUM,HYDRO_COMM_WORLD,ierr) end subroutine sum_double subroutine mpp_chrt_nlinks_collect(nlinks) @@ -2413,14 +2413,14 @@ subroutine mpp_chrt_nlinks_collect(nlinks) if(my_id .eq. IO_id) then do i = 0,numprocs -1 if(i .ne. my_id) then - call mpi_recv(mpp_nlinks(i+1),1,MPI_INTEGER,i, & + call MPI_Recv(mpp_nlinks(i+1),1,MPI_INTEGER,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) else mpp_nlinks(i+1) = 0 end if end do else - call mpi_send(nlinks,1,MPI_INTEGER, IO_id, & + call MPI_Send(nlinks,1,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) endif @@ -2494,13 +2494,13 @@ subroutine mpp_collect_1d_int(nlinks,vinout) if(my_id .eq. IO_id) then do i = 0,numprocs -1 if(i .ne. my_id) then - call mpi_recv(buf,nlinks,MPI_INTEGER,i, & + call MPI_Recv(buf,nlinks,MPI_INTEGER,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) vinout = vinout + buf end if end do else - call mpi_send(vinout,nlinks,MPI_INTEGER, IO_id, & + call MPI_Send(vinout,nlinks,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) endif call mpp_land_bcast_int1d(vinout) @@ -2523,11 +2523,11 @@ subroutine mpp_collect_1d_int_mem(nlinks,vinout) do i = 0,numprocs -1 if(i .ne. my_id) then tag = 120 - call mpi_recv(lsize,1,MPI_INTEGER,i, & + call MPI_Recv(lsize,1,MPI_INTEGER,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) if(lsize .gt. 0) then tag = 121 - call mpi_recv(tmpBuf(1:lsize),lsize,MPI_INTEGER,i, & + call MPI_Recv(tmpBuf(1:lsize),lsize,MPI_INTEGER,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) do k = 1, lsize m = tmpBuf(k) @@ -2546,11 +2546,11 @@ subroutine mpp_collect_1d_int_mem(nlinks,vinout) end if end do tag = 120 - call mpi_send(lsize,1,MPI_INTEGER, IO_id, & + call MPI_Send(lsize,1,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) if(lsize .gt. 0) then tag = 121 - call mpi_send(tmpIn(1:lsize),lsize,MPI_INTEGER, IO_id, & + call MPI_Send(tmpIn(1:lsize),lsize,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) endif endif @@ -2569,12 +2569,12 @@ subroutine updateLake_seqInt(in,nsize,in0) tag = 29 if(my_id .ne. IO_id) then - call mpi_send(in,nsize,MPI_INTEGER, IO_id, & + call MPI_Send(in,nsize,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) else do i = 0, numprocs - 1 if(i .ne. IO_id) then - call mpi_recv(tmp,nsize,& + call MPI_Recv(tmp,nsize,& MPI_INTEGER,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr) do k = 1, nsize if(in0(k) .ne. tmp(k)) in(k) = tmp(k) @@ -2597,12 +2597,12 @@ subroutine updateLake_seqInt8(in,nsize,in0) tag = 29 if(my_id .ne. IO_id) then - call mpi_send(in,nsize,MPI_INTEGER8, IO_id, & + call MPI_Send(in,nsize,MPI_INTEGER8, IO_id, & tag,HYDRO_COMM_WORLD,ierr) else do i = 0, numprocs - 1 if(i .ne. IO_id) then - call mpi_recv(tmp,nsize,& + call MPI_Recv(tmp,nsize,& MPI_INTEGER8,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr) do k = 1, nsize if(in0(k) .ne. tmp(k)) in(k) = tmp(k) @@ -2634,7 +2634,7 @@ subroutine updateLake_seq(in,nsize,in0) allocate(prev(nsize)) if (my_id == IO_id) prev = in0 - call mpi_bcast(prev, nsize, MPI_REAL, IO_id, HYDRO_COMM_WORLD, ierr) + call MPI_Bcast(prev, nsize, MPI_REAL, IO_id, HYDRO_COMM_WORLD, ierr) if (my_id == IO_id) then adjustment = in @@ -2642,7 +2642,7 @@ subroutine updateLake_seq(in,nsize,in0) adjustment = in - prev end if - call mpi_allreduce(adjustment, in, nsize, MPI_REAL, MPI_SUM, HYDRO_COMM_WORLD, ierr) ! TODO: check ierr! + call MPI_Allreduce(adjustment, in, nsize, MPI_REAL, MPI_SUM, HYDRO_COMM_WORLD, ierr) ! TODO: check ierr! deallocate(adjustment) deallocate(prev) @@ -2663,12 +2663,12 @@ subroutine updateLake_seq_char(in,nsize,in0) tag = 29 if(my_id .ne. IO_id) then - call mpi_send(in,in_len,MPI_CHARACTER, IO_id, & + call MPI_Send(in,in_len,MPI_CHARACTER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) else do i = 0, numprocs - 1 if(i .ne. IO_id) then - call mpi_recv(tmp,in_len,& + call MPI_Recv(tmp,in_len,& MPI_CHARACTER,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr) do k = 1, nsize if(in0(k) .ne. tmp(k)) in(k) = tmp(k) @@ -2692,19 +2692,19 @@ subroutine updateLake_grid(in,nsize,lake_index) if(my_id .ne. IO_id) then tag = 29 - call mpi_send(in,nsize,MPI_REAL, IO_id, & + call MPI_Send(in,nsize,MPI_REAL, IO_id, & tag,HYDRO_COMM_WORLD,ierr) tag = 30 - call mpi_send(lake_index,nsize,MPI_INTEGER, IO_id, & + call MPI_Send(lake_index,nsize,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) else do i = 0, numprocs - 1 if(i .ne. IO_id) then tag = 29 - call mpi_recv(tmp,nsize,& + call MPI_Recv(tmp,nsize,& MPI_REAL,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr) tag = 30 - call mpi_recv(lake_index,nsize,& + call MPI_Recv(lake_index,nsize,& MPI_INTEGER,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr) do k = 1, nsize if(lake_index(k) .gt. 0) in(k) = tmp(k) @@ -2729,7 +2729,7 @@ subroutine match1dLake(vin,nsize,flag) if(my_id .eq. IO_id) then do i = 0, numprocs - 1 if(i .ne. my_id) then - call mpi_recv(recv,nsize,MPI_INTEGER,i, & + call MPI_Recv(recv,nsize,MPI_INTEGER,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) do k = 1, nsize if(recv(k) .eq. flag) vin(k) = flag @@ -2744,7 +2744,7 @@ subroutine match1dLake(vin,nsize,flag) endif end do else - call mpi_send(vin,nsize,MPI_INTEGER,IO_id, & + call MPI_Send(vin,nsize,MPI_INTEGER,IO_id, & tag,HYDRO_COMM_WORLD,ierr) endif call mpp_land_bcast_int1d(vin) @@ -2753,13 +2753,13 @@ end subroutine match1dLake subroutine mpp_land_abort() implicit none integer ierr - CALL MPI_ABORT(HYDRO_COMM_WORLD,1,IERR) + call MPI_Abort(HYDRO_COMM_WORLD,1,ierr) end subroutine mpp_land_abort ! mpp_land_abort subroutine mpp_land_sync() implicit none integer ierr - call MPI_barrier( HYDRO_COMM_WORLD ,ierr) + call MPI_Barrier( HYDRO_COMM_WORLD ,ierr) if(ierr .ne. 0) call mpp_land_abort() end subroutine mpp_land_sync ! mpp_land_sync @@ -2770,10 +2770,10 @@ subroutine mpp_comm_scalar_real(scalar, fromImage, toImage) integer:: ierr, tag tag=2 if(my_id .eq. fromImage) & - call mpi_send(scalar, 1, MPI_REAL, & + call MPI_Send(scalar, 1, MPI_REAL, & toImage, tag, HYDRO_COMM_WORLD, ierr) if(my_id .eq. toImage) & - call mpi_recv(scalar, 1, MPI_REAL, & + call MPI_Recv(scalar, 1, MPI_REAL, & fromImage, tag, HYDRO_COMM_WORLD, mpp_status, ierr) end subroutine mpp_comm_scalar_real @@ -2785,10 +2785,10 @@ subroutine mpp_comm_scalar_char(scalar, fromImage, toImage) tag=2 length=len(scalar) if(my_id .eq. fromImage) & - call mpi_send(scalar, length, MPI_CHARACTER, & + call MPI_Send(scalar, length, MPI_CHARACTER, & toImage, tag, HYDRO_COMM_WORLD, ierr) if(my_id .eq. toImage) & - call mpi_recv(scalar, length, MPI_CHARACTER, & + call MPI_Recv(scalar, length, MPI_CHARACTER, & fromImage, tag, HYDRO_COMM_WORLD, mpp_status, ierr) end subroutine mpp_comm_scalar_char @@ -2800,14 +2800,14 @@ subroutine mpp_comm_1d_real(vector, fromImage, toImage) integer:: ierr, tag integer:: my_id, numprocs tag=2 - call MPI_COMM_RANK(HYDRO_COMM_WORLD,my_id,ierr) - call MPI_COMM_SIZE(HYDRO_COMM_WORLD,numprocs,ierr) + call MPI_Comm_rank(HYDRO_COMM_WORLD,my_id,ierr) + call MPI_Comm_size(HYDRO_COMM_WORLD,numprocs,ierr) if(numprocs > 1) then if(my_id .eq. fromImage) & - call mpi_send(vector, size(vector), MPI_REAL, & + call MPI_Send(vector, size(vector), MPI_REAL, & toImage, tag, HYDRO_COMM_WORLD, ierr) if(my_id .eq. toImage) & - call mpi_recv(vector, size(vector), MPI_REAL, & + call MPI_Recv(vector, size(vector), MPI_REAL, & fromImage, tag, HYDRO_COMM_WORLD, mpp_status, ierr) endif end subroutine mpp_comm_1d_real @@ -2820,15 +2820,15 @@ subroutine mpp_comm_1d_char(vector, fromImage, toImage) integer:: ierr, tag, totalLength integer:: my_id,numprocs tag=2 - call MPI_COMM_RANK(HYDRO_COMM_WORLD,my_id,ierr) - call MPI_COMM_SIZE(HYDRO_COMM_WORLD,numprocs,ierr) + call MPI_Comm_rank(HYDRO_COMM_WORLD,my_id,ierr) + call MPI_Comm_size(HYDRO_COMM_WORLD,numprocs,ierr) totalLength=len(vector(1))*size(vector,1) if(numprocs > 1) then if(my_id .eq. fromImage) & - call mpi_send(vector, totalLength, MPI_CHARACTER, & + call MPI_Send(vector, totalLength, MPI_CHARACTER, & toImage, tag, HYDRO_COMM_WORLD, ierr) if(my_id .eq. toImage) & - call mpi_recv(vector, totalLength, MPI_CHARACTER, & + call MPI_Recv(vector, totalLength, MPI_CHARACTER, & fromImage, tag, HYDRO_COMM_WORLD, mpp_status, ierr) endif end subroutine mpp_comm_1d_char diff --git a/src/Routing/module_NWM_io.F90 b/src/Routing/module_NWM_io.F90 index efaa6c74a..43e7da435 100644 --- a/src/Routing/module_NWM_io.F90 +++ b/src/Routing/module_NWM_io.F90 @@ -172,7 +172,7 @@ subroutine output_chrt_NWM(domainId) ! If not MPI, then default to 0, which is the I/O ID. if(mppFlag .eq. 1) then #ifdef MPP_LAND - call MPI_COMM_RANK( HYDRO_COMM_WORLD, myId, ierr ) + call MPI_Comm_rank( HYDRO_COMM_WORLD, myId, ierr ) call nwmCheck(diagFlag,ierr,'ERROR: Unable to determine MPI process ID.') #endif else @@ -1144,7 +1144,7 @@ subroutine output_NoahMP_NWM(outDir,iGrid,output_timestep,itime,startdate,date,i ! If not MPI, then default to 0, which is the I/O ID. if(mppFlag .eq. 1) then #ifdef MPP_LAND - call MPI_COMM_RANK( HYDRO_COMM_WORLD, myId, ierr ) + call MPI_Comm_rank( HYDRO_COMM_WORLD, myId, ierr ) call nwmCheck(diagFlag,ierr,'ERROR: Unable to determine MPI process ID.') #endif else @@ -1857,7 +1857,7 @@ subroutine output_rt_NWM(domainId,iGrid) ! If not MPI, then default to 0, which is the I/O ID. if(mppFlag .eq. 1) then #ifdef MPP_LAND - call MPI_COMM_RANK( HYDRO_COMM_WORLD, myId, ierr ) + call MPI_Comm_rank( HYDRO_COMM_WORLD, myId, ierr ) call nwmCheck(diagFlag,ierr,'ERROR: Unable to determine MPI process ID.') #endif else @@ -2433,7 +2433,7 @@ subroutine output_lakes_NWM(domainId,iGrid) ! If not MPI, then default to 0, which is the I/O ID. if(mppFlag .eq. 1) then #ifdef MPP_LAND - call MPI_COMM_RANK( HYDRO_COMM_WORLD, myId, ierr ) + call MPI_Comm_rank( HYDRO_COMM_WORLD, myId, ierr ) call nwmCheck(diagFlag,ierr,'ERROR: Unable to determine MPI process ID.') #endif else @@ -3125,7 +3125,7 @@ subroutine output_chrtout_grd_NWM(domainId,iGrid) ! If not MPI, then default to 0, which is the I/O ID. if(mppFlag .eq. 1) then #ifdef MPP_LAND - call MPI_COMM_RANK( HYDRO_COMM_WORLD, myId, ierr ) + call MPI_Comm_rank( HYDRO_COMM_WORLD, myId, ierr ) call nwmCheck(diagFlag,ierr,'ERROR: Unable to determine MPI process ID.') #endif else @@ -3613,7 +3613,7 @@ subroutine output_lsmOut_NWM(domainId) ! If not MPI, then default to 0, which is the I/O ID. if(mppFlag .eq. 1) then #ifdef MPP_LAND - call MPI_COMM_RANK( HYDRO_COMM_WORLD, myId, ierr ) + call MPI_Comm_rank( HYDRO_COMM_WORLD, myId, ierr ) call nwmCheck(diagFlag,ierr,'ERROR: Unable to determine MPI process ID.') #endif else @@ -4067,7 +4067,7 @@ subroutine output_frxstPts(domainId) ! If not MPI, then default to 0, which is the I/O ID. if(mppFlag .eq. 1) then #ifdef MPP_LAND - call MPI_COMM_RANK( HYDRO_COMM_WORLD, myId, ierr ) + call MPI_Comm_rank( HYDRO_COMM_WORLD, myId, ierr ) call nwmCheck(diagFlag,ierr,'ERROR: Unable to determine MPI process ID.') #endif else @@ -4375,7 +4375,7 @@ subroutine output_chanObs_NWM(domainId) ! If not MPI, then default to 0, which is the I/O ID. if(mppFlag .eq. 1) then #ifdef MPP_LAND - call MPI_COMM_RANK( HYDRO_COMM_WORLD, myId, ierr ) + call MPI_Comm_rank( HYDRO_COMM_WORLD, myId, ierr ) call nwmCheck(diagFlag,ierr,'ERROR: Unable to determine MPI process ID.') #endif else @@ -5090,7 +5090,7 @@ subroutine output_gw_NWM(domainId,iGrid) ! If not MPI, then default to 0, which is the I/O ID. if(mppFlag .eq. 1) then #ifdef MPP_LAND - call MPI_COMM_RANK( HYDRO_COMM_WORLD, myId, ierr ) + call MPI_Comm_rank( HYDRO_COMM_WORLD, myId, ierr ) call nwmCheck(diagFlag,ierr,'ERROR: Unable to determine MPI process ID.') #endif else diff --git a/src/Routing/module_gw_gw2d.F90 b/src/Routing/module_gw_gw2d.F90 index d2eb173cf..ae3ab1a8d 100644 --- a/src/Routing/module_gw_gw2d.F90 +++ b/src/Routing/module_gw_gw2d.F90 @@ -801,12 +801,12 @@ subroutine gwstep(ix, jx, dx, & #ifdef MPP_LAND -call mpi_reduce(delcur, mpiDelcur, 1, MPI_REAL, MPI_SUM, 0, HYDRO_COMM_WORLD, ierr) -call MPI_COMM_SIZE( HYDRO_COMM_WORLD, mpiSize, ierr ) +call MPI_Reduce(delcur, mpiDelcur, 1, MPI_REAL, MPI_SUM, 0, HYDRO_COMM_WORLD, ierr) +call MPI_Comm_size( HYDRO_COMM_WORLD, mpiSize, ierr ) if(my_id .eq. IO_id) delcur = mpiDelcur/mpiSize -call mpi_bcast(delcur, 1, mpi_real, 0, HYDRO_COMM_WORLD, ierr) +call MPI_Bcast(delcur, 1, MPI_REAL, 0, HYDRO_COMM_WORLD, ierr) #endif @@ -886,10 +886,10 @@ subroutine gwstep(ix, jx, dx, & #ifdef HYDRO_D #ifdef MPP_LAND - call MPI_REDUCE(dtot,gdtot,1, MPI_REAL, MPI_SUM, IO_id, HYDRO_COMM_WORLD, ierr) - call MPI_REDUCE(dtoa,gdtoa,1, MPI_REAL, MPI_SUM, IO_id, HYDRO_COMM_WORLD, ierr) - call MPI_REDUCE(eocn,geocn,1, MPI_REAL, MPI_SUM, IO_id, HYDRO_COMM_WORLD, ierr) - call MPI_REDUCE(ebot,gebot,1, MPI_REAL, MPI_SUM, IO_id, HYDRO_COMM_WORLD, ierr) + call MPI_Reduce(dtot,gdtot,1, MPI_REAL, MPI_SUM, IO_id, HYDRO_COMM_WORLD, ierr) + call MPI_Reduce(dtoa,gdtoa,1, MPI_REAL, MPI_SUM, IO_id, HYDRO_COMM_WORLD, ierr) + call MPI_Reduce(eocn,geocn,1, MPI_REAL, MPI_SUM, IO_id, HYDRO_COMM_WORLD, ierr) + call MPI_Reduce(ebot,gebot,1, MPI_REAL, MPI_SUM, IO_id, HYDRO_COMM_WORLD, ierr) if(my_id .eq. IO_id) then write (*,900) & @@ -1239,11 +1239,11 @@ subroutine parysolv1(c,b,r,ct,pid,z_pid, & ! ! Send (ZSPS,j)th equations. ! ! Receive (ZSPS+1,j)th equations. - call mpi_cart_shift(cartGridComm, rowshift, 1, source, dest, ierr) - call MPI_ISEND(zntmp, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, sendReq, ierr) - call MPI_IRECV( zn, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, recvReq, ierr) - call mpi_wait(sendReq, mpp_status, ierr) - call mpi_wait(recvReq, mpp_status, ierr) + call MPI_Cart_shift(cartGridComm, rowshift, 1, source, dest, ierr) + call MPI_Isend(zntmp, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, sendReq, ierr) + call MPI_Irecv( zn, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, recvReq, ierr) + call MPI_Wait(sendReq, mpp_status, ierr) + call MPI_Wait(recvReq, mpp_status, ierr) #ifdef TIMING tf = click() @@ -1271,9 +1271,9 @@ subroutine parysolv1(c,b,r,ct,pid,z_pid, & #endif ! ! Receive (0,j)th equations. - call mpi_cart_shift(cartGridComm, rowshift, -1, source, dest, ierr) - call MPI_IRECV( zn, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, recvReq, ierr) - call mpi_wait(recvReq, mpp_status, ierr) + call MPI_Cart_shift(cartGridComm, rowshift, -1, source, dest, ierr) + call MPI_Irecv( zn, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, recvReq, ierr) + call MPI_Wait(recvReq, mpp_status, ierr) #ifdef TIMING tf = click() @@ -1305,11 +1305,11 @@ subroutine parysolv1(c,b,r,ct,pid,z_pid, & ! ! Send (ZSPS,j)th equations. ! ! Receive (ZSPS+1,j)th equations. - call mpi_cart_shift(cartGridComm, rowshift, 1, source, dest, ierr) - call MPI_ISEND(zntmp, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, sendReq, ierr) - call MPI_IRECV( zn, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, recvReq, ierr) - call mpi_wait(sendReq, mpp_status, ierr) - call mpi_wait(recvReq, mpp_status, ierr) + call MPI_Cart_shift(cartGridComm, rowshift, 1, source, dest, ierr) + call MPI_Isend(zntmp, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, sendReq, ierr) + call MPI_Irecv( zn, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, recvReq, ierr) + call MPI_Wait(sendReq, mpp_status, ierr) + call MPI_Wait(recvReq, mpp_status, ierr) #ifdef TIMING tf = click() call add_dt(ct,tf,ti,dt) @@ -1339,8 +1339,8 @@ subroutine parysolv1(c,b,r,ct,pid,z_pid, & call add_dt(ct,tf,ti,dt) #endif - call mpi_cart_shift(cartGridComm, rowshift, -1, source, dest, ierr) - call MPI_ISEND(zntmp, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, sendReq, ierr) + call MPI_Cart_shift(cartGridComm, rowshift, -1, source, dest, ierr) + call MPI_Isend(zntmp, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, sendReq, ierr) do 60 j = 1, XSPS ! Backward elimination in (0,j)th equations. @@ -1352,7 +1352,7 @@ subroutine parysolv1(c,b,r,ct,pid,z_pid, & 70 continue 60 continue - call mpi_wait(sendReq, mpp_status, ierr) + call MPI_Wait(sendReq, mpp_status, ierr) else if (z_pid .lt. ZDNS) then @@ -1362,9 +1362,9 @@ subroutine parysolv1(c,b,r,ct,pid,z_pid, & #endif ! ! Receive (ZSPS+1,j)th equations. - call mpi_cart_shift(cartGridComm, rowshift, 1, source, dest, ierr) - call MPI_IRECV( zn, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, recvReq, ierr) - call mpi_wait(recvReq, mpp_status, ierr) + call MPI_Cart_shift(cartGridComm, rowshift, 1, source, dest, ierr) + call MPI_Irecv( zn, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, recvReq, ierr) + call MPI_Wait(recvReq, mpp_status, ierr) #ifdef TIMING tf = click() @@ -1397,11 +1397,11 @@ subroutine parysolv1(c,b,r,ct,pid,z_pid, & ! ! Send (1,j)th equations. ! ! Receive (0,j)th equations. - call mpi_cart_shift(cartGridComm, rowshift, -1, source, dest, ierr) - call MPI_ISEND(zntmp, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, sendReq, ierr) - call MPI_IRECV( zn, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, recvReq, ierr) - call mpi_wait(sendReq, mpp_status, ierr) - call mpi_wait(recvReq, mpp_status, ierr) + call MPI_Cart_shift(cartGridComm, rowshift, -1, source, dest, ierr) + call MPI_Isend(zntmp, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, sendReq, ierr) + call MPI_Irecv( zn, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, recvReq, ierr) + call MPI_Wait(sendReq, mpp_status, ierr) + call MPI_Wait(recvReq, mpp_status, ierr) #ifdef TIMING tf = click() @@ -1427,8 +1427,8 @@ subroutine parysolv1(c,b,r,ct,pid,z_pid, & #endif ! ! Send (ZSPS,j)th equations. - call mpi_cart_shift(cartGridComm, rowshift, 1, source, dest, ierr) - call MPI_ISEND(zntmp, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, sendReq, ierr) + call MPI_Cart_shift(cartGridComm, rowshift, 1, source, dest, ierr) + call MPI_Isend(zntmp, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, sendReq, ierr) #ifdef TIMING tf = click() @@ -1445,7 +1445,7 @@ subroutine parysolv1(c,b,r,ct,pid,z_pid, & 110 continue 100 continue - call mpi_wait(sendReq, mpp_status, ierr) + call MPI_Wait(sendReq, mpp_status, ierr) else @@ -1461,11 +1461,11 @@ subroutine parysolv1(c,b,r,ct,pid,z_pid, & ! ! Send (1,j)th equations. ! ! Receive (0,j)th equations. - call mpi_cart_shift(cartGridComm, rowshift, -1, source, dest, ierr) - call MPI_ISEND(zntmp, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, sendReq, ierr) - call MPI_IRECV( zn, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, recvReq, ierr) - call mpi_wait(sendReq, mpp_status, ierr) - call mpi_wait(recvReq, mpp_status, ierr) + call MPI_Cart_shift(cartGridComm, rowshift, -1, source, dest, ierr) + call MPI_Isend(zntmp, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, sendReq, ierr) + call MPI_Irecv( zn, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, recvReq, ierr) + call MPI_Wait(sendReq, mpp_status, ierr) + call MPI_Wait(recvReq, mpp_status, ierr) #ifdef TIMING tf = click() @@ -1550,11 +1550,11 @@ subroutine parxsolv1(c,b,r,ct,pid,x_pid, & ! ! Send (i,XSPS)th equations. ! ! Receive (i,(XSPS + 1))th equations. - call mpi_cart_shift(cartGridComm, colshift, 1, source, dest, ierr) - call MPI_ISEND(xntmp, cnt, MPI_REAL, dest, XN_REC, cartGridComm, sendReq, ierr) - call MPI_IRECV( xn, cnt, MPI_REAL, dest, XN_REC, cartGridComm, recvReq, ierr) - call mpi_wait(sendReq, mpp_status, ierr) - call mpi_wait(recvReq, mpp_status, ierr) + call MPI_Cart_shift(cartGridComm, colshift, 1, source, dest, ierr) + call MPI_Isend(xntmp, cnt, MPI_REAL, dest, XN_REC, cartGridComm, sendReq, ierr) + call MPI_Irecv( xn, cnt, MPI_REAL, dest, XN_REC, cartGridComm, recvReq, ierr) + call MPI_Wait(sendReq, mpp_status, ierr) + call MPI_Wait(recvReq, mpp_status, ierr) #ifdef TIMING tf = click() @@ -1585,9 +1585,9 @@ subroutine parxsolv1(c,b,r,ct,pid,x_pid, & #endif ! ! Receive (i,0)th equations. - call mpi_cart_shift(cartGridComm, colshift, -1, source, dest, ierr) - call MPI_IRECV( xn, cnt, MPI_REAL, dest, XN_REC, cartGridComm, recvReq, ierr) - call mpi_wait(recvReq, mpp_status, ierr) + call MPI_Cart_shift(cartGridComm, colshift, -1, source, dest, ierr) + call MPI_Irecv( xn, cnt, MPI_REAL, dest, XN_REC, cartGridComm, recvReq, ierr) + call MPI_Wait(recvReq, mpp_status, ierr) #ifdef TIMING tf = click() @@ -1618,11 +1618,11 @@ subroutine parxsolv1(c,b,r,ct,pid,x_pid, & ! ! Send (i,XSPS)th equations. ! ! Receive (i,(XSPS + 1))th equations. - call mpi_cart_shift(cartGridComm, colshift, 1, source, dest, ierr) - call MPI_ISEND(xntmp, cnt, MPI_REAL, dest, XN_REC, cartGridComm, sendReq, ierr) - call MPI_IRECV( xn, cnt, MPI_REAL, dest, XN_REC, cartGridComm, recvReq, ierr) - call mpi_wait(sendReq, mpp_status, ierr) - call mpi_wait(recvReq, mpp_status, ierr) + call MPI_Cart_shift(cartGridComm, colshift, 1, source, dest, ierr) + call MPI_Isend(xntmp, cnt, MPI_REAL, dest, XN_REC, cartGridComm, sendReq, ierr) + call MPI_Irecv( xn, cnt, MPI_REAL, dest, XN_REC, cartGridComm, recvReq, ierr) + call MPI_Wait(sendReq, mpp_status, ierr) + call MPI_Wait(recvReq, mpp_status, ierr) #ifdef TIMING tf = click() call add_dt(ct,tf,ti,dt) @@ -1651,8 +1651,8 @@ subroutine parxsolv1(c,b,r,ct,pid,x_pid, & tf = click() call add_dt(ct,tf,ti,dt) #endif - call mpi_cart_shift(cartGridComm, colshift, -1, source, dest, ierr) - call MPI_ISEND(xntmp, cnt, MPI_REAL, dest, XN_REC, cartGridComm, sendReq, ierr) + call MPI_Cart_shift(cartGridComm, colshift, -1, source, dest, ierr) + call MPI_Isend(xntmp, cnt, MPI_REAL, dest, XN_REC, cartGridComm, sendReq, ierr) do 60 i = 1, ZSPS ! Backward elimination in (i,0)th equations. @@ -1666,7 +1666,7 @@ subroutine parxsolv1(c,b,r,ct,pid,x_pid, & r(i,j) = r(i,j) - b(i,j)*r(i,XSPS) - c(i,j)*r(i,1) 70 continue - call mpi_wait(sendReq, mpp_status, ierr) + call MPI_Wait(sendReq, mpp_status, ierr) else if (x_pid .lt. XDNS) then @@ -1676,9 +1676,9 @@ subroutine parxsolv1(c,b,r,ct,pid,x_pid, & #endif ! ! Receive (i,XSPS+1)th equations. - call mpi_cart_shift(cartGridComm, colshift, 1, source, dest, ierr) - call MPI_IRECV( xn, cnt, MPI_REAL, dest, XN_REC, cartGridComm, recvReq, ierr) - call mpi_wait(recvReq, mpp_status, ierr) + call MPI_Cart_shift(cartGridComm, colshift, 1, source, dest, ierr) + call MPI_Irecv( xn, cnt, MPI_REAL, dest, XN_REC, cartGridComm, recvReq, ierr) + call MPI_Wait(recvReq, mpp_status, ierr) #ifdef TIMING tf = click() @@ -1708,11 +1708,11 @@ subroutine parxsolv1(c,b,r,ct,pid,x_pid, & #endif ! ! Send (i,1)th equations. ! ! Receive (i,0)th equations. - call mpi_cart_shift(cartGridComm, colshift, -1, source, dest, ierr) - call MPI_ISEND(xntmp, cnt, MPI_REAL, dest, XN_REC, cartGridComm, sendReq, ierr) - call MPI_IRECV( xn, cnt, MPI_REAL, dest, XN_REC, cartGridComm, recvReq, ierr) - call mpi_wait(sendReq, mpp_status, ierr) - call mpi_wait(recvReq, mpp_status, ierr) + call MPI_Cart_shift(cartGridComm, colshift, -1, source, dest, ierr) + call MPI_Isend(xntmp, cnt, MPI_REAL, dest, XN_REC, cartGridComm, sendReq, ierr) + call MPI_Irecv( xn, cnt, MPI_REAL, dest, XN_REC, cartGridComm, recvReq, ierr) + call MPI_Wait(sendReq, mpp_status, ierr) + call MPI_Wait(recvReq, mpp_status, ierr) #ifdef TIMING tf = click() @@ -1738,8 +1738,8 @@ subroutine parxsolv1(c,b,r,ct,pid,x_pid, & #endif ! ! Send (i,XSPS)th equations. - call mpi_cart_shift(cartGridComm, colshift, 1, source, dest, ierr) - call MPI_ISEND(xntmp, cnt, MPI_REAL, dest, XN_REC, cartGridComm, sendReq, ierr) + call MPI_Cart_shift(cartGridComm, colshift, 1, source, dest, ierr) + call MPI_Isend(xntmp, cnt, MPI_REAL, dest, XN_REC, cartGridComm, sendReq, ierr) #ifdef TIMING tf = click() call add_dt(ct,tf,ti,dt) @@ -1757,7 +1757,7 @@ subroutine parxsolv1(c,b,r,ct,pid,x_pid, & r(i,j) = r(i,j) - c(i,j)*r(i,1) - b(i,j)*r(i,XSPS) 110 continue - call mpi_wait(sendReq, mpp_status, ierr) + call MPI_Wait(sendReq, mpp_status, ierr) else @@ -1774,11 +1774,11 @@ subroutine parxsolv1(c,b,r,ct,pid,x_pid, & ! ! Send (i,1)th equations. ! ! Receive (i,0)th equations. - call mpi_cart_shift(cartGridComm, colshift, -1, source, dest, ierr) - call MPI_ISEND(xntmp, cnt, MPI_REAL, dest, XN_REC, cartGridComm, sendReq, ierr) - call MPI_IRECV( xn, cnt, MPI_REAL, dest, XN_REC, cartGridComm, recvReq, ierr) - call mpi_wait(sendReq, mpp_status, ierr) - call mpi_wait(recvReq, mpp_status, ierr) + call MPI_Cart_shift(cartGridComm, colshift, -1, source, dest, ierr) + call MPI_Isend(xntmp, cnt, MPI_REAL, dest, XN_REC, cartGridComm, sendReq, ierr) + call MPI_Irecv( xn, cnt, MPI_REAL, dest, XN_REC, cartGridComm, recvReq, ierr) + call MPI_Wait(sendReq, mpp_status, ierr) + call MPI_Wait(recvReq, mpp_status, ierr) #ifdef TIMING tf = click() diff --git a/src/Routing/module_reservoir_routing.F90 b/src/Routing/module_reservoir_routing.F90 index b2b20b459..bc157b2ba 100644 --- a/src/Routing/module_reservoir_routing.F90 +++ b/src/Routing/module_reservoir_routing.F90 @@ -1,5 +1,5 @@ ! Intended purpose is to provide a module for all subroutines related to -! reservoir routing, including active management, level pool, and integrating live +! reservoir routing, including active management, level pool, and integrating live ! data feeds. As of NWMv2.0, this module stub can read in a timeslice file ! to incorporate data from external sources, should a data service become available. @@ -83,7 +83,7 @@ subroutine read_reservoir_obs(domainId) ! If not MPI, then default to 0, which is the I/O ID. if(mppFlag .eq. 1) then #ifdef MPP_LAND - call MPI_COMM_RANK( HYDRO_COMM_WORLD, myId, ierr ) + call MPI_Comm_rank( HYDRO_COMM_WORLD, myId, ierr ) call nwmCheck(diagFlag,ierr,'ERROR: Unable to determine MPI process ID.') #endif else @@ -92,7 +92,7 @@ subroutine read_reservoir_obs(domainId) ! Open up and read in the NetCDF file containing disharge data. if(myId .eq. 0) then - ! Initialize our missing flag to 0. If at any point we don't find a file, + ! Initialize our missing flag to 0. If at any point we don't find a file, ! the flag value will go to 1 to indicate no files were found. missingFlag = 0 diff --git a/src/utils/module_hydro_stop.F90 b/src/utils/module_hydro_stop.F90 index 724d61dce..47fe9c182 100644 --- a/src/utils/module_hydro_stop.F90 +++ b/src/utils/module_hydro_stop.F90 @@ -35,7 +35,7 @@ subroutine HYDRO_stop(msg) ! call flush(my_id+90) call mpp_land_abort() - call MPI_finalize(ierr) + call MPI_Finalize(ierr) #else stop "FATAL ERROR: Program stopped. Recompile with environment variable HYDRO_D set to 1 for enhanced debug information." #endif