在Fortran 90中尝试使用MPI-IO编写文件时遇到问题。如果我执行以下操作,请使用MPI_File_Set_View
program test
implicit none
include "mpif.h"
integer :: myrank, nproc, fhandle, ierr
integer :: xpos, ypos
integer, parameter :: loc_x=10, loc_y=10
integer :: loc_dim
integer :: nx=2, ny=2
real(8), dimension(loc_x, loc_y) :: data, data_read
integer :: written_arr
integer, dimension(2) :: wa_size, wa_subsize, wa_start
integer :: int_size, double_size
integer(kind=MPI_OFFSET_KIND) :: offset
call MPI_Init(ierr)
call MPI_Comm_Rank(MPI_COMM_WORLD, myrank, ierr)
call MPI_Comm_Size(MPI_COMM_WORLD, nproc, ierr)
xpos = mod(myrank, nx)
ypos = mod(myrank/nx, ny)
data = myrank
loc_dim = loc_x*loc_y
! Write using MPI_File_Set_View
wa_size = (/ nx*loc_x, ny*loc_y /)
wa_subsize = (/ loc_x, loc_y /)
wa_start = (/ xpos, ypos /)*wa_subsize
call MPI_Type_Create_Subarray(2, wa_size, wa_subsize, wa_start &
, MPI_ORDER_FORTRAN, MPI_DOUBLE_PRECISION, written_arr, ierr)
call MPI_Type_Commit(written_arr, ierr)
call MPI_Type_Size(MPI_INTEGER, int_size, ierr)
call MPI_Type_Size(MPI_DOUBLE_PRECISION, double_size, ierr)
call MPI_File_Open(MPI_COMM_WORLD, "file_set_view.dat" &
, MPI_MODE_WRONLY + MPI_MODE_CREATE, MPI_INFO_NULL, fhandle, ierr)
call MPI_File_Set_View(fhandle, 0, MPI_DOUBLE_PRECISION, written_arr &
, "native", MPI_INFO_NULL, ierr)
call MPI_File_Write_All(fhandle, data, loc_dim, MPI_DOUBLE_PRECISION &
, MPI_STATUS_IGNORE, ierr)
call MPI_File_Close(fhandle, ierr)
call MPI_Finalize(ierr)
end program test
我得到一个69Go文件,考虑到我写的内容,这个文件太大了。顺便说一下,如果我增加loc_x
和loc_y
,文件的大小就不会改变。
但是,如果我使用MPI_File_Seek
,它的效果会更好;创建一个合理大小的文件,其中包含我要写的数据
program test
implicit none
include "mpif.h"
integer :: myrank, nproc, fhandle, ierr
integer :: xpos, ypos
integer, parameter :: loc_x=10, loc_y=10
integer :: loc_dim
integer :: nx=2, ny=2
real(8), dimension(loc_x, loc_y) :: data, data_read
integer :: written_arr
integer, dimension(2) :: wa_size, wa_subsize, wa_start
integer :: int_size, double_size
integer(kind=MPI_OFFSET_KIND) :: offset
call MPI_Init(ierr)
call MPI_Comm_Rank(MPI_COMM_WORLD, myrank, ierr)
call MPI_Comm_Size(MPI_COMM_WORLD, nproc, ierr)
xpos = mod(myrank, nx)
ypos = mod(myrank/nx, ny)
data = myrank
loc_dim = loc_x*loc_y
! Write using MPI_File_Seek
call MPI_File_Open(MPI_COMM_WORLD, "file_seek.dat" &
, MPI_MODE_WRONLY + MPI_MODE_CREATE, MPI_INFO_NULL, fhandle, ierr)
offset = loc_x*loc_y*myrank
print*, 'myrank, offset, data: ', myrank, offset, data(1,:2)
call MPI_File_Seek(fhandle, offset, MPI_SEEK_SET)
call MPI_File_Write_All(fhandle, data, loc_dim, MPI_DOUBLE_PRECISION &
, MPI_STATUS_IGNORE, ierr)
call MPI_File_Close(fhandle, ierr)
call MPI_Finalize(ierr)
end program test
在我看来,这两种方法应该产生相同的东西,特别是第一种方法应该创建一个如此大的文件。
我使用gfortran 4.6.3和OpenMPI 1.6.2编译我的代码。
任何帮助将不胜感激!
答案 0 :(得分:1)
答案实际上是在Hristo Iliev对this question的答案中给出的:
替换
0
来电中的MPI_FILE_SET_VIEW
0_MPI_OFFSET_KIND
或声明类型的常量INTEGER(KIND=MPI_OFFSET_KIND)
,值为零,然后传递给它。call MPI_File_Set_View(fhandle, 0_MPI_OFFSET_KIND, MPI_DOUBLE_PRECISION, ...
或
integer(kind=MPI_OFFSET_KIND), parameter :: zero_off = 0 ... call MPI_File_Set_View(fhandle, zero_off, MPI_DOUBLE_PRECISION, ...
两种方法都会产生大小为3200字节的输出文件(如预期的那样)。