我试图使用映射(m)将矢量A(m)映射到B(m),即使用单侧通信根据批准的处理编号传送矢量A到B的元素。 MPI标准书中有一个例子。我为子程序编写了一个小的主程序并复制了示例子程序。假设我只有2个触发器,我尝试将proc 0中的所有元素转移到proc 1,反之亦然。但是,我在下面的代码中遇到了分段错误。
我在使用Intel和Gfrotran编译器时遇到了同样的问题。 我在这里编译:
mpif90 -O0 -debug -traceback -check -ftrapuv mpimapp.f90
运行: mpirun -np 2 ./a.out
module mpi
include "mpif.h"
end module mpi
program mpimap
use mpi
implicit none
integer, parameter :: m=10
REAL A(m), B(m)
integer map(m)
integer rank, nproc, ierror, tag, status(MPI_STATUS_SIZE),i
call MPI_INIT(ierror)
call MPI_COMM_SIZE(MPI_COMM_WORLD, nproc, ierror)
call MPI_COMM_RANK(MPI_COMM_WORLD, rank, ierror)
print *, 'node', rank, ': Hello world',nproc
A(:)=(rank) * .10
do i=1, m
if (rank==0) then
map(i)=1
else
map(i)=0
endif
print*, 'node', rank, A(m), map(i)
enddo
Call MPI_BARRIER(MPI_COMM_WORLD, IERROR)
Call MAPVALS(A, B, map, m, MPI_COMM_WORLD, nproc)
call MPI_FINALIZE(ierror)
end program mpimap
SUBROUTINE MAPVALS(A, B, map, m, comm, p)
USE MPI
INTEGER m, map(m), comm, p
REAL A(m), B(m)
integer rank
INTEGER otype(p), oindex(m), & ! used to construct origin datatypes
ttype(p), tindex(m), & ! used to construct target datatypes
count(p), total(p), &
win, ierr
INTEGER (KIND=MPI_ADDRESS_KIND) lowerbound, sizeofreal
call MPI_COMM_RANK(MPI_COMM_WORLD, rank, ierr)
! This part does the work that depends on the locations of B.
! Can be reused while this does not change
CALL MPI_TYPE_GET_EXTENT(MPI_REAL, lowerbound, sizeofreal, ierr)
CALL MPI_WIN_CREATE(B, m*sizeofreal, sizeofreal, MPI_INFO_NULL, &
comm, win, ierr)
! This part does the work that depends on the value of map and
! the locations of the arrays.
! Can be reused while these do not change
! Compute number of entries to be received from each process
DO i=1,p
count(i) = 0
END DO
DO i=1,m
j = map(i)/m+1
count(j) = count(j)+1
END DO
total(1) = 0
DO i=2,p
total(i) = total(i-1) + count(i-1)
END DO
DO i=1,p
count(i) = 0
END DO
! compute origin and target indices of entries.
! entry i at current process is received from location
! k at process (j-1), where map(i) = (j-1)*m + (k-1),
! j = 1..p and k = 1..m
DO i=1,m
j = map(i)/m+1
k = MOD(map(i),m)+1
count(j) = count(j)+1
oindex(total(j) + count(j)) = i
tindex(total(j) + count(j)) = k
END DO
! create origin and target datatypes for each get operation
DO i=1,p
CALL MPI_TYPE_CREATE_INDEXED_BLOCK(count(i), 1, oindex(total(i)+1), &
MPI_REAL, otype(i), ierr)
CALL MPI_TYPE_COMMIT(otype(i), ierr)
CALL MPI_TYPE_CREATE_INDEXED_BLOCK(count(i), 1, tindex(total(i)+1), &
MPI_REAL, ttype(i), ierr)
CALL MPI_TYPE_COMMIT(ttype(i), ierr)
END DO
! this part does the assignment itself
CALL MPI_WIN_FENCE(0, win, ierr)
DO i=1,p
CALL MPI_GET(A, 1, otype(i), i-1, 0, 1, ttype(i), win, ierr)
END DO
CALL MPI_WIN_FENCE(0, win, ierr)
CALL MPI_WIN_FREE(win, ierr)
DO i=1,p
CALL MPI_TYPE_FREE(otype(i), ierr)
CALL MPI_TYPE_FREE(ttype(i), ierr)
END DO
RETURN
END SUBROUTINE MAPVALS