如果每个线程改变了一部分,则通过MPI进程同步数组?

时间:2014-02-06 15:39:29

标签: mpi openmpi

我有一个程序,我想使用MPI并行化。我之前没有和MPI合作过。

该程序计算大量对象随时间的行为。的数据 这些对象存储在数组中,例如x坐标为double precision :: body_x(10000)

要计算对象的行为,需要有关所有其他对象的信息, 所以每个线程都需要保存所有数据,但只会更新一部分数据。但之前 新时间步每个线程都需要从所有其他线程获取信息。

据我所知MPI_Allgather可以用于此,但它需要一个发送缓冲区和一个 recive缓冲区。如果每个线程都更新,我如何在不同的线程上同步一个数组 阵列的不同部分?我是否必须将每个线程的整个数组发送到 master在recive缓冲区中,更新master数组的特定部分,毕竟 线程已经从主设备重新发送了他们的数据?

这是一个非常基本的问题,但我对MPI很新,我发现的所有例子都是 很简单,不要掩盖这一点。谢谢你的帮助。

伪示例(假设具有第一个索引1的Fortran样式向量): (是的,发送/接收最好是非阻塞,这是为了简单起见)

if (master) then
   readInputFile
end if

MPI_Bcast(numberOfObject)

allocate body_arrays(numberOfObjects)

if (master) then
   fill body_arrays ! with the data from the input file
end if

MPI_Bcast(body_arrays)

objectsPerThread = numberOfObjects / threadCount

myStart = threadID * objectsPerThread + 1
myEnd   = (threadID + 1) * objectsPerThread

do while (t < t_end)

   do i = myStart, myEnd
      do stuff for body_arrays(i)
   end do

   ! here is the question
   if (.not. master)
      MPI_Send(body_arrays, toMaster)
   else
      do i = 1, threadCount - 1
         MPI_Recive(body_arrays_recive, senderID)
         body_arrays(senderID*objectsPerThread+1, (senderId+1)*objectsPerThread) = body_arrays_recive(senderID*objectsPerThread+1, (senderId+1)*objectsPerThread)
   end if

   MPI_Bcast(body_arrays)
   ! ----

   t = t + dt
end do

2 个答案:

答案 0 :(得分:0)

听起来你想要MPI_Allgather。为避免需要单独的发送缓冲区,您可以使用MPI_IN_PLACE值。这告诉MPI使用相同的缓冲区进行发送和接收。

请参阅http://mpi-forum.org/docs/mpi-2.2/mpi22-report/node99.htm#Node99

答案 1 :(得分:0)

可以使用对MPI_Allgatherv的调用来组合来自所有进程的数组块。以下是Fortran中的完整示例。它定义了一个大小为50的数组。然后每个进程将该数组的一个块设置为某个复数。最后,对MPI_allgatherv的调用将所有块组合在一起。块大小的计算以及需要传递给MPI_allgatherv的一些参数都封装在mpi_split例程中。

program test
  use mpi

  implicit none

  integer, parameter :: idp = 8
  integer, parameter :: n_tasks = 11
  real(idp), parameter :: zero = 0.0d0
  complex(idp), parameter :: czero = cmplx(zero, zero, kind=idp)

  integer :: mpi_n_procs, mpi_proc_id, error
  integer :: i, i_from, i_to
  complex(idp) :: c(-5:5)
  real(idp) :: split_size

  integer, allocatable :: recvcount(:), displs(:)

  call MPI_Init(error)
  call MPI_Comm_size(MPI_COMM_WORLD, mpi_n_procs, error)
  call MPI_Comm_rank(MPI_COMM_WORLD, mpi_proc_id, error)

  allocate(recvcount(mpi_n_procs))
  allocate(displs(mpi_n_procs))

  i_from = -5
  i_to = 5

  ! each process covers only part of the array
  call mpi_split(i_from, i_to, counts=recvcount, displs=displs)
  write(*,*) "ID", mpi_proc_id,":", i_from, "..", i_to
  if (mpi_proc_id == 0) then
    write(*,*) "Counts: ", recvcount
    write(*,*) "Displs: ", displs
  end if

  c(:) = czero
  do i = i_from, i_to
    c(i) = cmplx(real(i, idp), real(i+1, idp), kind=idp)
  end do

  call MPI_Allgatherv(c(i_from), i_to-i_from+1, MPI_DOUBLE_COMPLEX, c,         &
  &                   recvcount, displs, MPI_DOUBLE_COMPLEX, MPI_COMM_WORLD,   &
  &                   error)

  if (mpi_proc_id == 0) then
    do i = -5, 5
      write(*,*) i, ":", c(i)
    end do
  end if

  deallocate(recvcount, displs)
  call MPI_Finalize(error)

contains

  !! @description: split the range (a,b) into equal chunks, where each chunk is
  !! handled by a different MPI process
  !! @param: a        On input, the lower bound of an array to be processed. On
  !!                  output, the lower index of the chunk that the MPI process
  !!                  `proc_id` should process
  !! @param: b        On input, the upper bound of an array. On, output the
  !!                  upper index of the chunk that process `proc_id` should
  !!                  process.
  !! @param: n_procs  The total number of available processes. If not given,
  !!                  this is determined automatically from the MPI environment.
  !! @param: proc_id  The (zero-based) process ID (`0 <= proc_id < n_procs`). If
  !!                  not given, the ID of the current MPI process
  !! @param: counts   If given, must be of size `n_procs`. On output, the chunk
  !!                  size for each MPI process
  !! @param: displs   If given, must be of size `n_procs`. On output, the offset
  !!                  if the first index processed by each MPI process, relative
  !!                  to the input value of `a`
  subroutine mpi_split(a, b, n_procs, proc_id, counts, displs)

    integer,           intent(inout) :: a
    integer,           intent(inout) :: b
    integer, optional, intent(in)    :: n_procs
    integer, optional, intent(in)    :: proc_id
    integer, optional, intent(inout) :: counts(:)
    integer, optional, intent(inout) :: displs(:)

    integer :: mpi_n_procs, n_tasks, mpi_proc_id, error
    integer :: aa, bb
    real(idp) :: split_size
    logical :: mpi_is_initialized

    mpi_n_procs = 1
    if (present(n_procs)) mpi_n_procs = n_procs
    mpi_proc_id = 0
    if (present(proc_id)) mpi_proc_id = proc_id
    if (.not. present(n_procs)) then
      call MPI_Comm_size(MPI_COMM_WORLD, mpi_n_procs, error)
    end if
    if (.not. present(proc_id)) then
      call MPI_Comm_rank(MPI_COMM_WORLD, mpi_proc_id, error)
    end if

    aa = a
    bb = b

    n_tasks = bb - aa + 1
    split_size = real(n_tasks, idp) / real(max(mpi_n_procs, 1), idp)
    a = nint(mpi_proc_id * split_size) + aa
    b = min(aa + nint((mpi_proc_id+1) * split_size) - 1, bb)

    if (present(counts)) then
      do mpi_proc_id = 0, mpi_n_procs-1
        counts(mpi_proc_id+1) = max(nint((mpi_proc_id+1) * split_size)         &
        &                           - nint((mpi_proc_id) * split_size), 0)
      end do
    end if

    if (present(displs)) then
      do mpi_proc_id = 0, mpi_n_procs-1
        displs(mpi_proc_id+1) = min(nint(mpi_proc_id * split_size), bb-aa)
      end do
    end if

  end subroutine mpi_split

end program