从void转换为MPI_Aint

时间:2012-11-14 17:14:38

标签: c fortran mpi implicit-conversion fortran-iso-c-binding

将某些变量从void *转换为MPI_Aint时遇到一些麻烦。以下是代码的一部分:

C:
void myfunc_(MPI_Aint *out_ptr, ...)
...
void *ptr = mmap(...)
...
*out_ptr = (MPI_Aint) ptr;

Fortran :
#ifdef DOUBLE_PREC
  integer, parameter, public :: mytype = KIND(0.0D0)
  integer, parameter, public :: real_type = MPI_DOUBLE_PRECISION
#endif
INTEGER BSIZ, CORE_COMM, status
real(mytype), pointer :: SND
...
call myfunc(SND, BSIZ, real_type, CORE_COMM, status)

mmap正在运行,但出现错误(当我评论最后一行时没有错误)

...
mmap succeeded 0x7fab7b490000
...
*** Process received signal ***
Signal: Segmentation fault (11)
Signal code: Address not mapped (1)
Failing at address: (nil)

有什么想法可以提供帮助吗?下面是完整的C函数代码:

void myfunc_(MPI_Aint *out_ptr, MPI_Fint *nelem, MPI_Fint *type,
            MPI_Fint *comm, MPI_Fint *ret)
{
MPI_Comm world;
int mype;

world = MPI_Comm_f2c(*comm);
MPI_Comm_rank(world, &mype);

char filename[20];

#define POSIX_SHM

int i,j;

int world_rank = -1, world_size = -1;
int mpi_result = MPI_SUCCESS;

int color = -1;
int ranks_per_node = -1;
MPI_Comm IntraNodeComm;

int node_shmem_bytes; 

mpi_result = MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
assert(mpi_result==MPI_SUCCESS);
mpi_result = MPI_Comm_size(MPI_COMM_WORLD, &world_size);
assert(mpi_result==MPI_SUCCESS);

if (world_rank==0)
{
    char * env_char;
    int units = 1;
    int num_count = 0;
    env_char = getenv("NODE_SHARED_MEMORY");
    if (env_char!=NULL)
    {
        if      ( NULL != strstr(env_char,"G") ) units = 1000000000;
        else if ( NULL != strstr(env_char,"M") ) units = 1000000;
        else if ( NULL != strstr(env_char,"K") ) units = 1000;
        else                                     units = 1;

        num_count = strspn(env_char, "0123456789");
        memset( &env_char[num_count], ' ', strlen(env_char)-num_count);

        node_shmem_bytes = units * atoi(env_char);
        printf("%7d: NODE_SHARED_MEMORY = %d bytes \n", world_rank, node_shmem_bytes );
    }
    else
    {
        node_shmem_bytes = getpagesize();
        printf("%7d: NODE_SHARED_MEMORY = %d bytes \n", world_rank, node_shmem_bytes );
    }
}
mpi_result = MPI_Bcast( &node_shmem_bytes, 1, MPI_INT, 0, MPI_COMM_WORLD );
assert(mpi_result==MPI_SUCCESS);

int node_shmem_count = node_shmem_bytes/sizeof(double);
node_shmem_count = (int) *nelem;
node_shmem_bytes = node_shmem_count * sizeof(double) * 2;

fflush(stdout);
MPI_Barrier(MPI_COMM_WORLD);

IntraNodeComm = world;

int subcomm_rank = -1;
mpi_result = MPI_Comm_rank(IntraNodeComm, &subcomm_rank);
assert(mpi_result==MPI_SUCCESS);

sprintf(filename,"/foo_%d_%d_%d",*nelem,*type,*comm);

#if defined(POSIX_SHM)
int fd;
if (subcomm_rank==0)
    fd = shm_open(filename, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR );

mpi_result = MPI_Barrier(MPI_COMM_WORLD);
assert(mpi_result==MPI_SUCCESS);

if (subcomm_rank!=0)
    fd = shm_open(filename, O_RDWR, S_IRUSR | S_IWUSR );

if (fd<0) printf("%7d: shm_open failed: %d \n", world_rank, fd);
else      printf("%7d: shm_open succeeded: %d \n", world_rank, fd);
#elif defined(DEV_SHM)
int fd = open("/dev/shm/foo", O_RDWR | O_CREAT, S_IRUSR | S_IWUSR );
if (fd<0) printf("%7d: open failed: %d \n", world_rank, fd);
else      printf("%7d: open succeeded: %d \n", world_rank, fd);
#else
int fd = -1;
printf("%7d: no file backing \n", world_rank);
#endif
fflush(stdout);
mpi_result = MPI_Barrier(MPI_COMM_WORLD);
assert(mpi_result==MPI_SUCCESS);

if (fd>=0 && subcomm_rank==0)
{
    int rc = ftruncate(fd, node_shmem_bytes);
    if (rc==0) printf("%7d: ftruncate succeeded \n", world_rank);
    else       printf("%7d: ftruncate failed \n", world_rank);
}
fflush(stdout);
mpi_result = MPI_Barrier(MPI_COMM_WORLD);
assert(mpi_result==MPI_SUCCESS);

#ifdef __bgp__
double * ptr = NULL;
_BGP_Personality_t pers;
Kernel_GetPersonality(&pers, sizeof(pers));

if( BGP_Personality_processConfig(&pers) == _BGP_PERS_PROCESSCONFIG_SMP )
{
    printf("SMP mode => MAP_PRIVATE | MAP_ANONYMOUS \n");
    ptr = mmap( NULL, node_shmem_bytes, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, fd, 0 );
}
else
{
    if (node_shmem_bytes>pers.Kernel_Config.SharedMemMB)
    {
        printf("node_shmem_bytes (%d) greater than pers.Kernel_Config.SharedMemMB (%d) - allocating the latter \n", 
               node_shmem_bytes, pers.Kernel_Config.SharedMemMB );
        node_shmem_bytes = pers.Kernel_Config.SharedMemMB;
    }
    ptr = mmap( NULL, node_shmem_bytes, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0 );
}
#else
void *ptr = mmap( NULL, node_shmem_bytes, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0 );
#endif
if (ptr==NULL) printf("%7d: mmap failed \n", world_rank);
else           printf("%7d: mmap succeeded %p\n", world_rank,ptr);
fflush(stdout);
mpi_result = MPI_Barrier(MPI_COMM_WORLD);
assert(mpi_result==MPI_SUCCESS);

mpi_result = MPI_Comm_size(IntraNodeComm, &ranks_per_node );
assert(mpi_result==MPI_SUCCESS);
if (0==subcomm_rank) printf("%7d: ranks_per_node = %d \n", world_rank, ranks_per_node);
fflush(stdout);

for (i=0; i<ranks_per_node; i++)
{
    if (i==subcomm_rank)
   {
        printf("%7d: subcomm_rank %d setting the buffer \n", world_rank, subcomm_rank );
        //for (j=0; j<node_shmem_count; j++ ) ptr[j] = (double)i;
        printf("%7d: memset succeeded \n", world_rank);

        int rc = msync(ptr, node_shmem_bytes, MS_INVALIDATE | MS_SYNC);
        if (rc==0) printf("%7d: msync succeeded, %p \n", world_rank, ptr);
        else       printf("%7d: msync failed \n", world_rank);
    }

    fflush(stdout);
    mpi_result = MPI_Barrier(MPI_COMM_WORLD);
    assert(mpi_result==MPI_SUCCESS);

    //printf("%7d: ptr = %lf ... %lf \n", world_rank, ptr[0], ptr[node_shmem_count-1]);
    fflush(stdout);

    mpi_result = MPI_Barrier(MPI_COMM_WORLD);
    assert(mpi_result==MPI_SUCCESS);
}
fflush(stdout);
mpi_result = MPI_Barrier(MPI_COMM_WORLD);
assert(mpi_result==MPI_SUCCESS);

if (ptr!=NULL)
{
    int rc = munmap(ptr, node_shmem_bytes);
    if (rc==0) printf("%7d: munmap succeeded %p, %d\n", world_rank,ptr, (MPI_Aint) ptr);
    else       printf("%7d: munmap failed \n", world_rank);
}
fflush(stdout);
mpi_result = MPI_Barrier(MPI_COMM_WORLD);
assert(mpi_result==MPI_SUCCESS);

#if defined(POSIX_SHM)
//if (fd>=0)
if (fd>=0 && subcomm_rank==0)
{
    int rc = -1;

    rc = shm_unlink(filename);
    if (rc==0) printf("%7d: shm_unlink succeeded %p\n", world_rank,ptr);
    else       printf("%7d: shm_unlink failed \n", world_rank);
}
#elif defined(DEV_SHM)
if (fd>=0 && subcomm_rank==0)
{
    int rc = -1;

    rc = ftruncate(fd, 0);
    if (rc==0) printf("%7d: ftruncate succeeded \n", world_rank);
    else       printf("%7d: ftruncate failed \n", world_rank);

    rc = close(fd);
    if (rc==0) printf("%7d: close succeeded \n", world_rank);
    else       printf("%7d: close failed \n", world_rank);
}
#endif
fflush(stdout);
mpi_result = MPI_Barrier(MPI_COMM_WORLD);
assert(mpi_result==MPI_SUCCESS);

*out_ptr = (MPI_Aint) ptr;

}

2 个答案:

答案 0 :(得分:1)

我打算给你写一个简短的评论,但它在某种程度上超过极限......

MPI标准主体和实现者已经在这个C到Fortran内存传递问题上挣扎了多年。为什么不重新使用他们的努力而不是重新发现这样一个事实,即圆形轮比方形轮工作得更好?

只需看看MPI标准函数MPI_ALLOC_MEM,它应该在MPI中分配特殊内存并将其返回给用户代码。 MPI-2.2标准将其Fortran接口定义为:

MPI_ALLOC_MEM(SIZE, INFO, BASEPTR, IERROR)
    INTEGER INFO, IERROR
    INTEGER(KIND=MPI_ADDRESS_KIND) SIZE, BASEPTR

MPI-3.0中的现代Fortran 2008界面使用ISO_C_BINDING,其名称为:

MPI_Alloc_mem(size, info, baseptr, ierror)
    USE, INTRINSIC :: ISO_C_BINDING, ONLY : C_PTR
    INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: size
    TYPE(MPI_Info), INTENT(IN) :: info
    TYPE(C_PTR), INTENT(OUT) :: baseptr
    INTEGER, OPTIONAL, INTENT(OUT) :: ierror

该标准提供了有关如何使用该呼叫的以下示例:

USE mpi_f08
USE, INTRINSIC :: ISO_C_BINDING
TYPE(C_PTR) :: p
REAL, DIMENSION(:,:), POINTER :: a
INTEGER, DIMENSION(2) :: shape
INTEGER(KIND=MPI_ADDRESS_KIND) :: size
shape = (/100,100/)
size = 4 * shape(1) * shape(2)
CALL MPI_Alloc_mem(size,MPI_INFO_NULL,p,ierr)
CALL C_F_POINTER(p, a, shape)
...
a(3,5) = 2.71
...
CALL MPI_Free_mem(a, ierr)

基本上,来自C_F_POINTER的{​​{1}}例程将C指针绑定到Fortran指针,然后由前者指向的内存通过后者可用。

这是Open MPI实现F08 ISO_C_BINDING

的方式
MPI_Alloc_mem

subroutine MPI_Alloc_mem_f08(size,info,baseptr,ierror) use, intrinsic :: ISO_C_BINDING, only : C_PTR use :: mpi_f08_types, only : MPI_Info, MPI_ADDRESS_KIND use :: mpi_f08, only : ompi_alloc_mem_f implicit none INTEGER(MPI_ADDRESS_KIND), INTENT(IN) :: size TYPE(MPI_Info), INTENT(IN) :: info TYPE(C_PTR), INTENT(OUT) :: baseptr INTEGER, OPTIONAL, INTENT(OUT) :: ierror integer :: c_ierror call ompi_alloc_mem_f(size,info%MPI_VAL,baseptr,c_ierror) if (present(ierror)) ierror = c_ierror end subroutine MPI_Alloc_mem_f08 是一个C函数,它将内部C实现与Fortran接口:

ompi_alloc_mem_f

所以你可以看到来自Fortran的void ompi_alloc_mem_f(MPI_Aint *size, MPI_Fint *info, char *baseptr, MPI_Fint *ierr) { int ierr_c; MPI_Info c_info = MPI_Info_f2c(*info); ierr_c = MPI_Alloc_mem(*size, c_info, baseptr); if (NULL != ierr) *ierr = OMPI_INT_2_FINT(ierr_c); } TYPE(C_PTR)参数只是作为指针进入,通过引用传递(像往常一样)。这在这里不太明显,因为MPI标准定义了baseptr的最后一个参数,其中返回指向已分配内存的指针,为MPI_Alloc_mem,而实际上是void *指针通过引用传递(即void)。虚拟void **参数实际上是baseptr但由于原因而被简单地声明为void ** :)同样的函数用于实现旧的Fortran接口,因此char *映射到char *baseptr实际参数。

虽然Fortran中的INTEGER(KIND=MPI_ADDRESS_KIND)整数用于存储指针和指针差值,但是你不应该在C中使用MPI_ADDRESS_KIND作为指针参数类型,而是使用像{{{}这样的常规双指针。 1}}。

答案 1 :(得分:0)

我不确定您可以注释掉以避免此问题的行是否以下内容:

*out_ptr = (MPI_Aint) ptr;

您的解除引用不一致。

ptrdouble *,无法直接转换为MPI_Aint

也许你想要

*out_ptr = *(MPI_Aint *)ptr;

如果调用者将指针(作为out_ptr)传递到您要存储MPI_Aint处找到的单个*ptr的位置。但是,根据您的分配node_shmem_bytes,这可能没有意义:

out_ptr = (MPI_Aint *)ptr

将(本地到myfunc副本)out_ptr设置为一个MPI_Aint对象块,但调用者不会看到它。我不知道Fortran - &gt;正在使用C调用约定,但是您可能希望将指针传递给C程序可以放入ptr的MPI_Aint *