我在do循环中发送和接收数据时遇到问题。检查以下代码:
include 'mpif.h'
parameter (NRA = 4)
parameter (NCA = 4)
parameter (MASTER = 0)
parameter (FROM_MASTER = 1)
parameter (FROM_WORKER = 2)
integer numtasks,taskid,numworkers,source,dest,mtype,
& cols,avecol,extra, offset,i,j,k,ierr,rc
integer status(MPI_STATUS_SIZE)
real*8 a(NRA,NCA)
call MPI_INIT( ierr )
call MPI_COMM_RANK( MPI_COMM_WORLD, taskid, ierr )
call MPI_COMM_SIZE( MPI_COMM_WORLD, numtasks, ierr )
numworkers = numtasks-1
print *, 'task ID= ',taskid
C *************************** master task *************************************
if (taskid .eq. MASTER) then
if (numworkers .NE. 2) then
print *, 'Please use 3 processors'
print *,'Quitting...'
call MPI_ABORT(MPI_COMM_WORLD,rc,ierr)
endif
C Initialize A and B
do 30 i=1, NRA
do 30 j=1, NCA
a(i,j) = (i-1)+(j-1)
30 continue
C Send matrix data to the worker tasks
avecol = NCA/numworkers
extra = mod(NCA,numworkers)
offset = 1
mtype = FROM_MASTER
do 50 dest=1, numworkers
if (dest .le. extra) then
cols = avecol + 1
else
cols = avecol
endif
write(*,*)' sending',cols,' cols to task',dest
call MPI_SEND(offset,1,MPI_INTEGER,dest,mtype,
& MPI_COMM_WORLD,ierr)
call MPI_SEND(cols,1,MPI_INTEGER,dest,mtype,
& MPI_COMM_WORLD,ierr)
call MPI_SEND(a(1,offset),cols*NRA,MPI_DOUBLE_PRECISION,
& dest,mtype,MPI_COMM_WORLD,ierr )
offset = offset + cols
50 continue
C Receive results from worker tasks
mtype = FROM_WORKER
do 60 i=1, numworkers
source = i
call MPI_RECV(offset,1,MPI_INTEGER,source,
& mtype,MPI_COMM_WORLD,status,ierr )
call MPI_RECV(cols,1,MPI_INTEGER,source,
& mtype,MPI_COMM_WORLD,status,ierr )
call MPI_RECV(a(1,offset),cols*NRA,MPI_DOUBLE_PRECISION,
& source,mtype,MPI_COMM_WORLD,status,ierr)
60 continue
C Print results
do 90 i=1, NRA
do 80 j = 1, NCA
write(*,70)a(i,j)
70 format(2x,f8.2,$)
80 continue
print *, ' '
90 continue
endif
C *************************** worker task *************************************
if (taskid > MASTER) then
C Receive matrix data from master task
mtype = FROM_MASTER
call MPI_RECV(offset,1,MPI_INTEGER,MASTER,
& mtype,MPI_COMM_WORLD,status,ierr)
call MPI_RECV( cols,1,MPI_INTEGER,MASTER,
& mtype,MPI_COMM_WORLD,status,ierr)
call MPI_RECV(a(1,offset),cols*NCA,MPI_DOUBLE_PRECISION,MASTER,
& mtype,MPI_COMM_WORLD,status,ierr)
start0 = offset
end0 = offset+cols-1
C Do matrix multiply
do t=1,5
do i=1, NRA
do j=start0,end0
a(i,j) = a(i,j)*t
enddo
enddo
C Send results back to master task
mtype = FROM_WORKER
call MPI_SEND(offset,1,MPI_INTEGER,MASTER,mtype,
& MPI_COMM_WORLD,ierr)
call MPI_SEND(cols,1,MPI_INTEGER,MASTER,mtype,
& MPI_COMM_WORLD,ierr)
call MPI_SEND(a(1,offset),cols*NCA,MPI_DOUBLE_PRECISION,MASTER,
& mtype,MPI_COMM_WORLD,ierr)
enddo
endif
call MPI_FINALIZE(ierr)
end
我想在do循环内的屏幕上打印矩阵a
。当我执行代码时,它只打印一次,即第一次执行do循环(t
= 1)。如何修改此代码,以便我可以在计算后每次在屏幕上打印矩阵a
。
答案 0 :(得分:0)
我明白了。我必须在从主服务器接收数据时向主服务器发出一个循环。修改后的代码。
include 'mpif.h'
parameter (NRA = 4)
parameter (NCA = 4)
parameter (MASTER = 0)
parameter (FROM_MASTER = 1)
parameter (FROM_WORKER = 2)
integer numtasks,taskid,numworkers,source,dest,mtype,
& cols,avecol,extra, offset,i,j,k,ierr,rc
integer status(MPI_STATUS_SIZE)
real*8 a(NRA,NCA)
call MPI_INIT( ierr )
call MPI_COMM_RANK( MPI_COMM_WORLD, taskid, ierr )
call MPI_COMM_SIZE( MPI_COMM_WORLD, numtasks, ierr )
numworkers = numtasks-1
print *, 'task ID= ',taskid
C *************************** master task *************************************
if (taskid .eq. MASTER) then
if (numworkers .NE. 2) then
print *, 'Please use 3 processors'
print *,'Quitting...'
call MPI_ABORT(MPI_COMM_WORLD,rc,ierr)
endif
C Initialize A and B
do 30 i=1, NRA
do 30 j=1, NCA
a(i,j) = (i-1)+(j-1)
30 continue
C Send matrix data to the worker tasks
avecol = NCA/numworkers
extra = mod(NCA,numworkers)
offset = 1
mtype = FROM_MASTER
do 50 dest=1, numworkers
if (dest .le. extra) then
cols = avecol + 1
else
cols = avecol
endif
write(*,*)' sending',cols,' cols to task',dest
call MPI_SEND(offset,1,MPI_INTEGER,dest,mtype,
& MPI_COMM_WORLD,ierr)
call MPI_SEND(cols,1,MPI_INTEGER,dest,mtype,
& MPI_COMM_WORLD,ierr)
call MPI_SEND(a(1,offset),cols*NRA,MPI_DOUBLE_PRECISION,
& dest,mtype,MPI_COMM_WORLD,ierr )
offset = offset + cols
50 continue
C Receive results from worker tasks
do t = 1,5
mtype = FROM_WORKER
do 60 i=1, numworkers
source = i
call MPI_RECV(offset,1,MPI_INTEGER,source,
& mtype,MPI_COMM_WORLD,status,ierr )
call MPI_RECV(cols,1,MPI_INTEGER,source,
& mtype,MPI_COMM_WORLD,status,ierr )
call MPI_RECV(a(1,offset),cols*NRA,MPI_DOUBLE_PRECISION,
& source,mtype,MPI_COMM_WORLD,status,ierr)
60 continue
C Print results
do 90 i=1, NRA
do 80 j = 1, NCA
write(*,70)a(i,j)
70 format(2x,f8.2,$)
80 continue
print *, ' '
90 continue
end do
endif
C *************************** worker task *************************************
if (taskid > MASTER) then
C Receive matrix data from master task
mtype = FROM_MASTER
call MPI_RECV(offset,1,MPI_INTEGER,MASTER,
& mtype,MPI_COMM_WORLD,status,ierr)
call MPI_RECV( cols,1,MPI_INTEGER,MASTER,
& mtype,MPI_COMM_WORLD,status,ierr)
call MPI_RECV(a(1,offset),cols*NCA,MPI_DOUBLE_PRECISION,MASTER,
& mtype,MPI_COMM_WORLD,status,ierr)
start0 = offset
end0 = offset+cols-1
C Do matrix multiply
do t=1,5
do i=1, NRA
do j=start0,end0
a(i,j) = a(i,j)*t
enddo
enddo
C Send results back to master task
mtype = FROM_WORKER
call MPI_SEND(offset,1,MPI_INTEGER,MASTER,mtype,
& MPI_COMM_WORLD,ierr)
call MPI_SEND(cols,1,MPI_INTEGER,MASTER,mtype,
& MPI_COMM_WORLD,ierr)
call MPI_SEND(a(1,offset),cols*NCA,MPI_DOUBLE_PRECISION,MASTER,
& mtype,MPI_COMM_WORLD,ierr)
enddo
endif
call MPI_FINALIZE(ierr)
end