多个节点上的MPI_Bcast错误

时间:2014-05-18 12:59:07

标签: c linux segmentation-fault mpi

背景:我正在编写基于collfs项目的I / O系统调用的MPI版本。

代码在单个节点上的多个处理器上运行时没有错误。

但是,在多个节点上运行会导致分段错误...包含2个进程的错误消息,每个节点有1个进程如下:

$ qsub test.sub
$ cat test.e291810
0: pasc_open(./libSDL.so, 0, 0)
1: pasc_open(./libSDL.so, 0, 0)
1: mptr[0]=0 mptr[len-1]=0
1: MPI_Bcast(mptr=eed11000, len=435104, MPI_BYTE, 0, MPI_COMM_WORLD)
0: mptr[0]=127 mptr[len-1]=0
0: MPI_Bcast(mptr=eeb11000, len=435104, MPI_BYTE, 0, MPI_COMM_WORLD)
_pmiu_daemon(SIGCHLD): [NID 00632] [c3-0c0s14n0] [Sun May 18 13:10:30 2014] PE RANK 0 exit signal Segmentation fault
[NID 00632] 2014-05-18 13:10:30 Apid 8283706: initiated application termination

发生错误的函数如下:

static int nextfd = BASE_FD;
#define next_fd() (nextfd++)

int pasc_open(const char *pathname, int flags, mode_t mode)
{
    int rank;
    int err;

    if(!init)
        return ((pasc_open_fp) def.open)(pathname, flags, mode);

    if(MPI_Comm_rank(MPI_COMM_WORLD, &rank) != MPI_SUCCESS)
        return -1;
    dprintf("%d: %s(%s, %x, %x)\n", rank, __FUNCTION__, pathname, flags, mode);

    /* Handle just read-only access for now. */
    if(flags == O_RDONLY || flags == (O_RDONLY | O_CLOEXEC)) {
        int fd, len, xlen, mptr_is_null;
        void *mptr;
        struct mpi_buf { int len, en; } buf;
        struct file_entry *file;

        if(rank == 0) {
            len = -1;
            fd = ((pasc_open_fp) def.open)(pathname, flags, mode);
            /* Call stat to get file size and check for errors */
            if(fd >= 0) {
                struct stat st;
                if(fstat(fd, &st) >= 0)
                    len = st.st_size;
                else
                    ((pasc_close_fp) def.close)(fd);
            }
            /* Record them */
            buf.len = len;
            buf.en = errno;
        }
        /* Propagate file size and errno */
        if(MPI_Bcast(&buf, 2, MPI_INT, 0, MPI_COMM_WORLD) != MPI_SUCCESS)
            return -1;
        len = buf.len;
        if(len < 0) {
            dprintf("error opening file, len < 0");
            return -1;
        }
        /* Get the page-aligned size */
        xlen = page_extend(len);
        /* `mmap` the file into memory */
        if(rank == 0) {
            mptr = ((pasc_mmap_fp) def.mmap)(0, xlen, PROT_READ, MAP_PRIVATE,
                    fd, 0);
        } else {
            fd = next_fd();
            mptr = ((pasc_mmap_fp) def.mmap)(0, xlen, PROT_READ | PROT_WRITE,
                    MAP_PRIVATE | MAP_ANONYMOUS, fd, 0);
        }
        ((pasc_lseek_fp) def.lseek)(fd, 0, SEEK_SET);
        /* Ensure success on all aux. processes */
        if(rank != 0)
            mptr_is_null = !mptr;
        MPI_Allreduce(MPI_IN_PLACE, &mptr_is_null, 1, MPI_INT, MPI_LAND,
                MPI_COMM_WORLD);
        if(mptr_is_null) {
            if(mptr)
                ((pasc_munmap_fp) def.munmap)(mptr, xlen);
            dprintf("%d: error: mmap/malloc error\n", rank);
            return -1;
        }
        dprintf("%d: mptr[0]=%d mptr[len-1]=%d\n", rank, ((char*)mptr)[0], ((char*)mptr)[len-1]);
        /* Propagate file contents */
        dprintf("%d: MPI_Bcast(mptr=%x, len=%d, MPI_BYTE, 0, MPI_COMM_WORLD)\n",
        rank, mptr, len);
        if(MPI_Bcast(mptr, len, MPI_BYTE, 0, MPI_COMM_WORLD) != MPI_SUCCESS)
            return -1;
        if(rank != 0)
            fd = next_fd();
        /* Register the file in the linked list */
        file = malloc(sizeof(struct file_entry));
        file->fd = fd;
        file->refcnt = 1;
        strncpy(file->fn, pathname, PASC_FNMAX);
        file->mptr = mptr;
        file->len = len;
        file->xlen = xlen;
        file->offset = 0;
        /* Reverse stack */
        file->next = open_files;
        open_files = file;
        return fd;

    }
    /* Fall back to independent access */
    return ((pasc_open_fp) def.open)(pathname, flags, mode);
}

错误发生在最终MPI_Bcast来电时。我不知道为什么会发生这样的事情:它复制的内存和我可以解除引用就好了。

我在运行SUSE Linux x86_64的自定义Cray XC30计算机上使用MPICH。

谢谢!


编辑:我尝试用MPI_Bcast / MPI_Send对替换MPI_Recv来电,结果相同。

1 个答案:

答案 0 :(得分:2)

由于性能原因,Cray MPI实现可能会有一些魔力。在不知道内部结构的情况下,很多答案都是猜测。

节点间通信可能不利用网络堆栈,依赖于某种共享内存通信。当你试图通过网络堆栈发送mmap - ed缓冲区某些地方时 - DMA引擎(我在这里疯狂地猜测)无法处理这种情况。

你可以尝试页面锁定mmaped缓冲区 - 也许mlock可以正常工作。 如果失败,则将数据复制到malloc ed buffer。