我正在尝试使用OVERFLOW-PLOT3D q文件格式(在此定义:http://overflow.larc.nasa.gov/files/2014/06/Appendix_A.pdf)保存解决方案。对于单个网格,它基本上是
READ(1) NGRID
READ(1) JD,KD,LD,NQ,NQC
READ(1) REFMACH,ALPHA,REY,TIME,GAMINF,BETA,TINF, &
IGAM,HTINF,HT1,HT2,RGAS1,RGAS2, &
FSMACH,TVREF,DTVREF
READ(1) ((((Q(J,K,L,N),J=1,JD),K=1,KD),L=1,LD),N=1,NQ)
所有变量都是双精度数,除了NGRID,JD,KD,LD,NQ,NQC和IGAM是整数。我需要使用MPI-IO来导出解决方案。如果我用一个处理器采用一个非常简单的例子,下面的代码不起作用,但我不明白为什么。
call mpi_file_open( mpi_comm_world, fileOut, mpi_mode_wronly + mpi_mode_create, &
mpi_info_null, mpi_fh, ierr )
offset = 0
call mpi_file_seek( mpi_fh, offset, mpi_seek_set, ierr )
call mpi_file_write( mpi_fh, (/NGRID,JD,KD,LD,NQ,NQC/), 6, mpi_integer, mstat, ierr )
call mpi_file_write( mpi_fh, (/REFMACH,ALPHA,REY,TIME,GAMINF,BETA,TINF/), 7, mpi_double_precision, mstat, ierr )
call mpi_file_write( mpi_fh, IGAM, 1, mpi_integer, mstat, ierr )
call mpi_file_write( mpi_fh, (/HTINF,HT1,HT2,RGAS1,RGAS2,FSMACH,TVREF,DTVREF/), 8, mpi_double_precision, mstat, ierr )
call mpi_file_write( mpi_fh, Q, NQ*JD*KD*LD, mpi_double_precision, mstat, ierr )
Tecplot无法识别格式。但是,如果我写一个简单的非MPI代码,例如:
open(2, file=fileOut, form='unformatted', convert='little_endian')
write(2) NGRID
write(2) JD, KD, LD, NQ, NQC
write(2) REFMACH,ALPHA,REY,TIME,GAMINF,BETA,TINF, &
IGAM,HTINF,HT1,HT2,RGAS1,RGAS2, &
FSMACH,TVREF,DTVREF
write(2) ((((Q(J,K,L,N),J=1,JD),K=1,KD),L=1,LD),N=1,NQ)
一切正常。我的MPI-IO代码出了什么问题? 非常感谢你的帮助!
约阿希姆
注意:我不知道这是否相关,但是如果我在最后一个写语句之前添加一个mpi_file_seek(偏移量),其中offset = 144。 Tecplot同意加载文件(但数据未正确读取)。这很奇怪,因为正常偏移应该是7个整数+ 15个实数* 8 = 148个字节......
编辑:你的方法@Jonathan Dursi,由于某些原因似乎不适用于Tecplot。以下代码有什么问题吗? (针对单个处理器进行了简化) call MPI_File_write(fileh, [4, ngrid, 4], 3, MPI_INTEGER, MPI_STATUS_IGNORE, ierr)
call MPI_File_write(fileh, [20, jd, kd, ld, nq, nqc, 20], 7, MPI_INTEGER, MPI_STATUS_IGNORE, ierr)
call MPI_File_write(fileh, [56], 1, MPI_INTEGER, MPI_STATUS_IGNORE, ierr)
call MPI_File_write(fileh, [refmach,alpha,rey,time,gaminf,beta,tinf], 7, MPI_double_precision, MPI_STATUS_IGNORE, ierr)
call MPI_File_write(fileh, [56], 1, MPI_INTEGER, MPI_STATUS_IGNORE, ierr)
call MPI_File_write(fileh, [4, IGAM, 4], 3, MPI_INTEGER, MPI_STATUS_IGNORE, ierr)
call MPI_File_write(fileh, [64], 1, MPI_INTEGER, MPI_STATUS_IGNORE, ierr)
call MPI_File_write(fileh, [HTINF,HT1,HT2,RGAS1,RGAS2,FSMACH,TVREF,DTVREF], 8, MPI_double_precision, MPI_STATUS_IGNORE, ierr)
call MPI_File_write(fileh, [64], 1, MPI_INTEGER, MPI_STATUS_IGNORE, ierr)
call MPI_File_write(fileh, [jd*kd*ld*nq*8], 1, MPI_INTEGER, MPI_STATUS_IGNORE, ierr)
call MPI_File_write(fileh, q, jd*kd*ld*nq, MPI_double_precision, MPI_STATUS_IGNORE, ierr)
call MPI_File_write(fileh, [jd*kd*ld*nq*8], 1, MPI_INTEGER, MPI_STATUS_IGNORE, ierr)
答案 0 :(得分:3)
@francescalus是对的 - Fortran sequential unformatted data是基于记录的 - 这对很多东西来说实际上非常好,但没有其他东西使用它(即使是Fortran中的MPI-IO,它更像C - 文件是只是一大堆无差别的字节。)
让我们看一下问题中写作程序的简化版本:
program testwrite
integer, parameter:: ngrid=2
integer, parameter:: jd=4, kd=3, ld=2, nq=1, nqc=-1
integer, parameter :: refmach=1, alpha=2, rey=3, time=4, gaminf=5
integer, parameter :: beta=6, tinf=7
integer, dimension(jd,kd,ld,nq) :: q
q = 0
open(2, file='ftest.dat', form='unformatted', convert='little_endian')
write(2) NGRID
write(2) JD, KD, LD, NQ, NQC
write(2) REFMACH,ALPHA,REY,TIME,GAMINF,BETA,TINF
write(2) ((((Q(J,K,L,N),J=1,JD),K=1,KD),L=1,LD),N=1,NQ)
close(2)
end program testwrite
运行它,并使用od
查看生成的二进制文件(为了清楚起见,我已将所有内容都设为整数):
$ gfortran -o fwrite fwrite.f90
$ ./fwrite
$ od --format "d" ftest.dat
0000000 4 2 4 20
0000020 4 3 2 1
0000040 -1 20 28 1
0000060 2 3 4 5
0000100 6 7 28 96
0000120 0 0 0 0
*
0000260 96
0000264
例如,我们在开始时看到了ngrid(2)整数,以4/4为单位记录 - 记录的大小(以字节为单位)。然后,由20/20预订,我们看到5个整数(5 * 4字节)4,3,2,1,-1 - jd,kd,ld,nq,nqc。接近尾声时,我们看到一堆由96(= 4bytes / integer * 4 * 3 * 2 * 1)组成的零,表示q。 (请注意,没有标准可以定义此行为,但我不知道任何主要的Fortran编译器不会这样做;但是,当记录变得比4字节整数更大时,行为开始不同。
我们可以使用以下简单程序来测试数据文件:
program testread
implicit none
integer :: ngrid
integer :: jd, kd, ld, nq, nqc
integer :: refmach, alpha, rey, time, gaminf
integer :: beta, tinf
integer :: j, k, l, n
integer, allocatable, dimension(:,:,:,:) :: q
character(len=64) :: filename
if (command_argument_count() < 1) then
print *,'Usage: read [filename]'
else
call get_command_argument(1, filename)
open(2, file=trim(filename), form='unformatted', convert='little_endian')
read(2) NGRID
read(2) JD, KD, LD, NQ, NQC
read(2) REFMACH,ALPHA,REY,TIME,GAMINF,BETA,TINF
allocate(q(jd, kd, ld, nq))
read(2) ((((Q(J,K,L,N),J=1,JD),K=1,KD),L=1,LD),N=1,NQ)
close(2)
print *, 'Ngrid = ', ngrid
print *, 'jd, kd, ld, nq, nqc = ', jd, kd, ld, nq, nqc
print *, 'q: min/mean/max = ', minval(q), sum(q)/size(q), maxval(q)
deallocate(q)
endif
end program testread
并且正在运行
$ ./fread ftest.dat
Ngrid = 2
jd, kd, ld, nq, nqc = 4 3 2 1 -1
q: min/mean/max = 0 0 0
足够简单。
因此,这种行为很容易在MPI-IO中模仿。这里真的有三个部分 - 标题,Q,我假设要分发(比方说,MPI子阵列)和页脚(它只是数组的书挡)。
让我们来看看Fortran中的MPI-IO程序,它会做同样的事情:
program mpiwrite
use mpi
implicit none
integer, parameter:: ngrid=2
integer, parameter:: jd=3, kd=3, ld=3, nlocq=3, nqc=-1
integer :: nq
integer, parameter :: refmach=1, alpha=2, rey=3, time=4, gaminf=5
integer, parameter :: beta=6, tinf=7
integer, dimension(jd,kd,ld,nlocq) :: q
integer :: intsize
integer :: subarray
integer :: fileh
integer(kind=MPI_Offset_kind) :: offset
integer :: comsize, rank, ierr
call MPI_Init(ierr)
call MPI_Comm_size(MPI_COMM_WORLD, comsize, ierr)
call MPI_Comm_rank(MPI_COMM_WORLD, rank, ierr)
nq = nlocq * comsize
q = rank
! create a subarray; each processor gets its own q-slice of the
! global array
call MPI_Type_create_subarray (4, [jd, kd, ld, nq], [jd, kd, ld, nlocq], &
[0, 0, 0, nlocq*rank], &
MPI_ORDER_FORTRAN, MPI_INTEGER, subarray, ierr)
call MPI_Type_commit(subarray, ierr)
call MPI_File_open(MPI_COMM_WORLD, 'mpi.dat', &
MPI_MODE_WRONLY + MPI_MODE_CREATE, &
MPI_INFO_NULL, fileh, ierr )
! the header size is:
! 1 field of 1 integer ( = 4*(1 + 1 + 1) = 12 bytes )
! +1 field of 5 integers( = 4*(1 + 5 + 1) = 28 bytes )
! +1 field of 7 integers( = 4*(1 + 7 + 1) = 36 bytes )
! +first bookend of array size = 4 bytes
offset = 12 + 28 + 36 + 4
! rank 1 writes the header and footer
if (rank == 0) then
call MPI_File_write(fileh, [4, ngrid, 4], 3, MPI_INTEGER, &
MPI_STATUS_IGNORE, ierr)
call MPI_File_write(fileh, [20, jd, kd, ld, nq, nqc, 20], 7, MPI_INTEGER, &
MPI_STATUS_IGNORE, ierr)
call MPI_File_write(fileh, &
[28, refmach, alpha, rey, time, gaminf, beta, tinf, 28],&
9, MPI_INTEGER, MPI_STATUS_IGNORE, ierr)
call MPI_File_write(fileh, [jd*kd*ld*nq*4], 1, MPI_INTEGER, &
MPI_STATUS_IGNORE, ierr)
call MPI_File_seek(fileh, offset+jd*kd*ld*nq*4, MPI_SEEK_CUR, ierr)
call MPI_File_write(fileh, [jd*kd*ld*nq*4], 1, MPI_INTEGER, &
MPI_STATUS_IGNORE, ierr)
endif
! now everyone dumps their part of the array
call MPI_File_set_view(fileh, offset, MPI_INTEGER, subarray, &
'native', MPI_INFO_NULL, ierr)
call MPI_File_write_all(fileh, q, jd*kd*ld*nlocq, MPI_INTEGER, &
MPI_STATUS_IGNORE, ierr)
call MPI_File_close(fileh, ierr)
CALL MPI_Finalize(ierr)
end program mpiwrite
在此程序中,进程0负责编写标题和记录字段。它从写入三个标题记录开始,每个记录由记录长度以字节为单位;然后它为大Q数组写了两个书挡。
然后,每个排名设置文件视图首先跳过标题,然后描述它的全局数组(这里只填充其排名编号),并写出其本地数据。这些都是非重叠的数据。
因此,让我们尝试使用几种不同的尺寸:
$ mpif90 -o mpifwrite mpifwrite.f90
$ mpirun -np 1 ./mpifwrite
$ ./fread mpi.dat
Ngrid = 2
jd, kd, ld, nq, nqc = 3 3 3 3 -1
q: min/mean/max = 0 0 0
$ od --format="d" mpi.dat
0000000 4 2 4 20
0000020 3 3 3 3
0000040 -1 20 28 1
0000060 2 3 4 5
0000100 6 7 28 324
0000120 0 0 0 0
*
0000740 0 324
0000750
$ mpirun -np 3 ./mpifwrite
$ ./fread mpi.dat
Ngrid = 2
jd, kd, ld, nq, nqc = 3 3 3 9 -1
q: min/mean/max = 0 1 2
$ od --format="d" mpi.dat
0000000 4 2 4 20
0000020 3 3 3 9
0000040 -1 20 28 1
0000060 2 3 4 5
0000100 6 7 28 972
0000120 0 0 0 0
*
0000620 0 1 1 1
0000640 1 1 1 1
*
0001320 1 1 2 2
0001340 2 2 2 2
*
0002020 2 2 2 0
0002040 0 0 0 0
*
0002140 0 0 0 972
0002160
这是我们期望的输出。将事物扩展到多个数据类型或多个网格是相对简单的。