我已经多次询问这个问题,但没有找到可以解决我的问题的答案。我希望能够通过MPI将Fortran中的链表发送到另一个进程。我做了类似的事情,其中链表中的派生数据类型如下
type a
{
integer :: varA
type(a), pointer :: next=>null()
real :: varB
}
我这样做的方法是创建一个包含所有varA值的MPI数据类型,并将其作为整数数组接收。然后对varB做同样的事情。
我现在要做的是创建链表,然后将所有varA和varB值打包在一起以形成MPI数据类型。我在下面给出了执行此操作的代码。
PROGRAM TEST
USE MPI
IMPLICIT NONE
TYPE a
INTEGER:: b
REAL :: e
TYPE(a), POINTER :: nextPacketInList => NULL()
END TYPE
TYPE PacketComm
INTEGER :: numPacketsToComm
TYPE(a), POINTER :: PacketListHeadPtr => NULL()
TYPE(a), POINTER :: PacketListTailPtr => NULL()
END TYPE PacketComm
TYPE(PacketComm), DIMENSION(:), ALLOCATABLE :: PacketCommArray
INTEGER :: packPacketDataType !New data type
INTEGER :: ierr, size, rank, dest, ind
integer :: b
real :: e
CALL MPI_INIT(ierr)
CALL MPI_COMM_SIZE(MPI_COMM_WORLD, size, ierr)
CALL MPI_COMM_RANK(MPI_COMM_WORLD, rank, ierr)
IF(.NOT. ALLOCATED(PacketCommArray)) THEN
ALLOCATE(PacketCommArray(0:size-1), STAT=ierr)
DO ind=0, size-1
PacketCommArray(ind)%numPacketsToComm = 0
END DO
ENDIF
b = 2
e = 4
dest = 1
CALL addPacketToList(b, e, dest)
b = 3
e = 5
dest = 1
CALL addPacketToList(b, e, dest)
dest = 1
CALL packPacketList(dest)
IF(rank == 0) THEN
dest = 1
CALL sendPacketList(dest)
ELSE
CALL recvPacketList()
ENDIF
CALL MPI_FINALIZE(ierr)
CONTAINS
SUBROUTINE addPacketToList(b, e, rank)
IMPLICIT NONE
INTEGER :: b, rank, ierr
REAL :: e
TYPE(a), POINTER :: head
IF(.NOT. ASSOCIATED(PacketCommArray(rank)%PacketListHeadPtr)) THEN
ALLOCATE(PacketCommArray(rank)%PacketListHeadPtr, STAT=ierr)
PacketCommArray(rank)%PacketListHeadPtr%b = b
PacketCommArray(rank)%PacketListHeadPtr%e = e
PacketCommArray(rank)%PacketListHeadPtr%nextPacketInList => NULL()
PacketCommArray(rank)%PacketListTailPtr => PacketCommArray(rank)%PacketListHeadPtr
PacketCommArray(rank)%numPacketsToComm = PacketCommArray(rank)%numPacketsToComm+1
ELSE
ALLOCATE(PacketCommArray(rank)%PacketListTailPtr%nextPacketInList, STAT=ierr)
PacketCommArray(rank)%PacketListTailPtr => PacketCommArray(rank)%PacketListTailPtr%nextPacketInList
PacketCommArray(rank)%PacketListTailPtr%b = b
PacketCommArray(rank)%PacketListTailPtr%e = e
PacketCommArray(rank)%PacketListTailPtr%nextPacketInList => NULL()
PacketCommArray(rank)%numPacketsToComm = PacketCommArray(rank)%numPacketsToComm+1
ENDIF
END SUBROUTINE addPacketToList
SUBROUTINE packPacketList(rank)
IMPLICIT NONE
INTEGER :: rank
INTEGER :: numListNodes
INTEGER(kind=MPI_ADDRESS_KIND), DIMENSION(:), ALLOCATABLE :: listNodeAddr
INTEGER(kind=MPI_ADDRESS_KIND), DIMENSION(:), ALLOCATABLE :: listNodeDispl
INTEGER, DIMENSION(:), ALLOCATABLE :: listNodeTypes
INTEGER, DIMENSION(:), ALLOCATABLE :: listNodeCount
TYPE(a), POINTER :: head
INTEGER :: numNode
head => PacketCommArray(rank)%PacketListHeadPtr
numListNodes = PacketCommArray(rank)%numPacketsToComm
PRINT *, ' Number of nodes to allocate for rank ', rank , ' is ', numListNodes
ALLOCATE(listNodeTypes(2*numListNodes), stat=ierr)
ALLOCATE(listNodeCount(2*numListNodes), stat=ierr)
DO numNode=1, 2*numListNodes, 2
listNodeTypes(numNode) = MPI_INTEGER
listNodeTypes(numNode+1) = MPI_REAL
END DO
DO numNode=1, 2*numListNodes, 2
listNodeCount(numNode) = 1
listNodeCount(numNode+1) = 1
END DO
ALLOCATE(listNodeAddr(2*numListNodes), stat=ierr)
ALLOCATE(listNodeDispl(2*numListNodes), stat=ierr)
numNode = 1
DO WHILE(ASSOCIATED(head))
CALL MPI_GET_ADDRESS(head%b, listNodeAddr(numNode), ierr)
CALL MPI_GET_ADDRESS(head%e, listNodeAddr(numNode+1), ierr)
numNode = numNode + 2
head => head%nextPacketInList
END DO
DO numNode=1, UBOUND(listNodeAddr,1)
listNodeDispl(numNode) = listNodeAddr(numNode) - listNodeAddr(1)
END DO
CALL MPI_TYPE_CREATE_STRUCT(UBOUND(listNodeAddr,1), listNodeCount, listNodeDispl, listNodeTypes, packPacketDataType, ierr)
CALL MPI_TYPE_COMMIT(packPacketDataType, ierr)
END SUBROUTINE packPacketList
SUBROUTINE sendPacketList(rank)
IMPLICIT NONE
INTEGER :: rank, ierr, numNodes
TYPE(a), POINTER :: head
head => PacketCommArray(rank)%PacketListHeadPtr
numNodes = PacketCommArray(rank)%numPacketsToComm
CALL MPI_SSEND(head%b, 1, packPacketDataType, rank, 0, MPI_COMM_WORLD, ierr)
END SUBROUTINE sendPacketList
SUBROUTINE recvPacketList
IMPLICIT NONE
TYPE(a), POINTER :: head
TYPE(a), DIMENSION(:), ALLOCATABLE :: RecvPacketCommArray
INTEGER, DIMENSION(:), ALLOCATABLE :: recvB
INTEGER :: numNodes, ierr, numNode
INTEGER, DIMENSION(MPI_STATUS_SIZE):: status
head => PacketCommArray(rank)%PacketListHeadPtr
numNodes = PacketCommArray(rank)%numPacketsToComm
ALLOCATE(RecvPacketCommArray(numNodes), stat=ierr)
ALLOCATE(recvB(numNodes), stat=ierr)
CALL MPI_RECV(RecvPacketCommArray, 1, packPacketDataType, 0, 0, MPI_COMM_WORLD, status, ierr)
DO numNode=1, numNodes
PRINT *, ' value in b', RecvPacketCommArray(numNode)%b
PRINT *, ' value in e', RecvPacketCommArray(numNode)%e
END DO
END SUBROUTINE recvPacketList
END PROGRAM TEST
所以基本上我创建了一个包含两个节点的链表,其中包含以下数据
节点1 b = 2,e = 4
节点2 b = 3,e = 5
当我在两个核心上运行此代码时,我在核心1上获得的结果是
value in b 2
value in e 4.000000
value in b 0
value in e 0.0000000E+00
所以我的代码似乎正确地在链表的第一个节点中发送数据,而不是第二个节点。如果我想要做的事情是可行的,那么有人可以告诉我,以及代码有什么问题。我知道我可以将所有节点中的b值一起发送,然后将e的值一起发送。但我的派生数据类型可能包含更多变量(包括数组),我希望能够一次性发送所有数据,而不是使用多个发送。
由于
答案 0 :(得分:0)
我不容易阅读该代码,但似乎您期望接收缓冲区获取连续数据,但实际情况并非如此。通过计算地址偏移量构造的奇怪类型不会与接收缓冲区匹配。为了说明这一点,我虽然可以提出这个简单的例子(它写得很快,但不要把它作为一个好的代码示例):
program example
use mpi
integer :: nprocs, myrank
integer :: buf(4)
integer :: n_elements
integer :: len_element(2)
integer(MPI_ADDRESS_KIND) :: disp_element(2)
integer :: type_element(2)
integer :: newtype
integer :: istat(MPI_STATUS_SIZE)
integer :: ierr
call mpi_init(ierr)
call mpi_comm_size(mpi_comm_world, nprocs, ierr)
call mpi_comm_rank(mpi_comm_world, myrank, ierr)
! simple example illustrating mpi_type_create_struct
! take an integer array buf(4):
! on rank 0: [ 7, 2, 6, 4 ]
! on rank 1: [ 1, 1, 1, 1 ]
! and we create a struct to send only elements 1 and 3
! so that on rank 1 we'll get [7, 1, 6, 1]
if (myrank == 0) then
buf = [7, 2, 6, 4]
else
buf = 1
end if
n_elements = 2
len_element = 1
disp_element(1) = 0
disp_element(2) = 8
type_element = MPI_INTEGER
call mpi_type_create_struct(n_elements, len_element, disp_element, type_element, newtype, ierr)
call mpi_type_commit(newtype, ierr)
write(6,'(1x,a,i2,1x,a,4i2)') 'SEND| rank ', myrank, 'buf = ', buf
if (myrank == 0) then
call mpi_send (buf, 1, newtype, 1, 13, MPI_COMM_WORLD, ierr)
else
call mpi_recv (buf, 1, newtype, 0, 13, MPI_COMM_WORLD, istat, ierr)
!the below call does not scatter the received integers, try to see the difference
!call mpi_recv (buf, 2, MPI_INTEGER, 0, 13, MPI_COMM_WORLD, istat, ierr)
end if
write(6,'(1x,a,i2,1x,a,4i2)') 'RECV| rank ', myrank, 'buf = ', buf
end program
我希望这清楚地表明接收缓冲区必须适应构造类型中的任何偏移量,并且不会接收任何连续数据。
编辑:更新代码以说明不散布数据的不同接收类型。