具有OpenMP和MPI的混合方法不会在具有不同数量主机的群集中使用相同数量的线程

时间:2018-06-24 13:47:26

标签: c mpi openmp

我正在通过将友好数字(CAPBenchmark)程序与MPI和OpenMP并行测试一种混合方法。

我的集群有8台计算机,每台计算机有4个核心处理器。

代码:

/*
 * Copyright(C) 2014 Pedro H. Penna <pedrohenriquepenna@gmail.com>
 * 
 * friendly-numbers.c - Friendly numbers kernel.
 */

#include <global.h>
#include <mpi.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <util.h>
#include "fn.h"

/*
 * Computes the Greatest Common Divisor of two numbers.
 */
static int gcd(int a, int b)
{
  int c;

  /* Compute greatest common divisor. */
  while (a != 0)
  {
     c = a;
     a = b%a;
     b = c;
  }

  return (b);
}

/*
 * Some of divisors.
 */
static int sumdiv(int n)
{
    int sum;    /* Sum of divisors. */
    int factor; /* Working factor.  */

    sum = 1 + n;

    /* Compute sum of divisors. */
    for (factor = 2; factor < n; factor++)
    {
        /* Divisor found. */
        if ((n%factor) == 0)
            sum += factor;
    }

    return (sum);
}

/*
 * Computes friendly numbers.
 */
int friendly_numbers(int start, int end) 
{
    int n;        /* Divisor.                    */
    int *num;     /* Numerator.                  */
    int *den;     /* Denominator.                */
    int *totalnum;
    int *totalden;
    int rcv_friends;
    int range;    /* Range of numbers.           */
    int i, j;     /* Loop indexes.               */
    int nfriends; /* Number of friendly numbers. */
    int slice;

    range = end - start + 1;
    slice = range / nthreads;
    if (rank == 0) {

        num = smalloc(sizeof(int)*range);
        den = smalloc(sizeof(int)*range);
        totalnum = smalloc(sizeof(int)*range);
        totalden = smalloc(sizeof(int)*range);

    } else {

        num = smalloc(sizeof(int) * slice);
        den = smalloc(sizeof(int) * slice);
        totalnum = smalloc(sizeof(int)*range);
        totalden = smalloc(sizeof(int)*range);
    }

    j = 0;
    omp_set_dynamic(0);    
    omp_set_num_threads(4);
    #pragma omp parallel for private(i, j, n) default(shared) 
    for (i = start + rank * slice; i < start + (rank + 1) * slice; i++) { 
            j = i - (start + rank * slice);
            num[j] = sumdiv(i);
            den[j] = i;

            n = gcd(num[j], den[j]);
            num[j] /= n;
            den[j] /= n;
    }
    if (rank != 0) {
        MPI_Send(num, slice, MPI_INT, 0, 0, MPI_COMM_WORLD);
        MPI_Send(den, slice, MPI_INT, 0, 1, MPI_COMM_WORLD);
    } else {
        for (i = 1; i < nthreads; i++)  {
            MPI_Recv(num + (i * (slice)), slice, MPI_INT, i, 0, MPI_COMM_WORLD, 0);
            MPI_Recv(den + (i * (slice)), slice, MPI_INT, i, 1, MPI_COMM_WORLD, 0);
        }
    }

    if (rank == 0) {
        for (i = 1; i < nthreads; i++) {
            MPI_Send(num, range, MPI_INT, i, 2, MPI_COMM_WORLD);
            MPI_Send(den, range, MPI_INT, i, 3, MPI_COMM_WORLD);
        }
    } else {
        MPI_Recv(totalnum, range, MPI_INT, 0, 2, MPI_COMM_WORLD,0);
        MPI_Recv(totalden, range, MPI_INT, 0, 3, MPI_COMM_WORLD,0);
    }

    /* Check friendly numbers. */
    nfriends = 0;
    if (rank == 0) {
        omp_set_dynamic(0);
        omp_set_num_threads(4);
        #pragma omp parallel for private(i, j) default(shared) reduction(+:nfriends)
        for (i = rank; i < range; i += nthreads) {
            for (j = 0; j < i; j++) {
                /* Friends. */
                if ((num[i] == num[j]) && (den[i] == den[j])) 
                    nfriends++;
            }
        }
    } else {
        omp_set_dynamic(0);
        omp_set_num_threads(4);
        #pragma omp parallel for private(i, j) default(shared) reduction(+:nfriends)
        for (i = rank; i < range; i += nthreads) {
            for (j = 0; j < i; j++) {
                /* Friends. */
                if ((totalnum[i] == totalnum[j]) && (totalden[i] == totalden[j])) 
                    nfriends++;
            }
        }

    }
    if (rank == 0) {
        for (i = 1; i < nthreads; i++) {
            MPI_Recv(&rcv_friends, 1, MPI_INT, i, 4, MPI_COMM_WORLD, 0);
            nfriends += rcv_friends;
        }
    } else {
        MPI_Send(&nfriends, 1, MPI_INT, 0, 4, MPI_COMM_WORLD);
    }

    free(num);
    free(den);

    return (nfriends);
}

在执行过程中,我观察到以下行为:

当我在4台和8台主机上运行mpirun时,每个主机都按预期使用4个线程进行处理。

但是,仅使用2台主机运行时,每台计算机上仅使用1个线程。 是什么导致这种现象?在2台主机的情况下,除了“强制”使用4个线程之外,还有其他选择吗?

1 个答案:

答案 0 :(得分:1)

我假设您正在使用Open MPI。

默认绑定策略是绑定到套接字或numa域(取决于您的版本)。我假设您的节点是单个套接字,这意味着一个MPI任务绑定到4个内核,然后OpenMP运行时可能会启动4个OpenMP线程。

一种特殊情况是仅启动2个MPI任务。在这种情况下,绑定策略是绑定到内核,这意味着一个MPI任务仅绑定到一个内核,因此OpenMP运行时仅启动一个OpenMP线程。

为了实现所需的行为,您可以

mpirun --bind-to numa -np 2 ...

如果失败,您可以回退到

mpirun —-bind-to socket -np 2 ...