使用Isend / Irecv MPI c ++进行矢量更改(平滑)

时间:2017-11-14 22:13:11

标签: c++ vector mpi nonblocking smoothing

我有一项任务是通过下一个公式来平滑矢量

x [i] =(x [i - 1] + x [i + 1])/ 2

使用Isend / Irecv MPI。

初始向量由N个元素组成,x [0] = x [N / 2 - 1] = x [N - 1] = 1.这些元素是常数。

我将部分初始向量从master发送到每个slave进程。然后每个过程都会平滑其中的一部分。关键是我需要在每次更新时交换每个流程的边界元素。

例如,如果我有4个进程(包括master)

  • master:1 0 0 0 1 0 0 0 0 1
  • 1个过程将获得:1 0 0
  • 2进程将获得:0 1 0
  • 3过程将获得:0 0 0 1

一次迭代后:

  • 1个过程将获得:1 0.5 0.25
  • 2过程将得到:0.5 1 0.5
  • 3过程将获得:0 0 0.5 1

交换:

  • 1进程将其最后一个元素发送到2进程。
  • 2进程将其第一个元素发送到1个进程,将其最后一个元素发送到3个进程。
  • 3进程将其第一个元素发送到2个进程。

我的代码

#include "stdafx.h"
#include <mpi.h>
#include <iostream>
#include <vector>
#define iter_num 1
#define vec_size 10
using namespace std;

void one_proc(int iter) {
    // single program
}

 int main(int argc, char **argv)
{
int rank, size;

vector <double> main_vector(vec_size);
main_vector[0] = main_vector[(vec_size / 2)-1] = main_vector[vec_size - 1] = 1;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);

if (size == 1 || size==2)
{
    one_proc(iter_num);
    MPI_Finalize();
    exit(0);
}
MPI_Request request;
MPI_Status status;

int proc_amount = size - 1; //exclude the master
int temp_size = ((vec_size % proc_amount) ? (vec_size / proc_amount + 1) : (vec_size / proc_amount));
vector <double> temp(temp_size);

if (rank == 0)
{
    for (int i = 1; i <= proc_amount; i++) {
        temp.assign(main_vector.begin() + (i - 1)*vec_size / proc_amount, main_vector.begin() + (i*vec_size / proc_amount));
        MPI_Isend(&temp[0], temp.size(), MPI_DOUBLE, i, 0, MPI_COMM_WORLD, &request);
        MPI_Wait(&request, &status);
    }
}
else
{
    temp[temp_size - 1] = -1; // kludge. is used when vector can't be divided into equal parts 
    MPI_Irecv(&temp[0], temp_size, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD, &request);
    MPI_Wait(&request, &status);
    if (temp[temp_size - 1] == -1)
    {
        temp.pop_back();
    }
}


double first, last;
for (int i = 0; i < iter_num; i++)
{
    if (rank == 1)
    {
        first = temp[temp.size() - 1];
        //send last element to 2 process as first
        MPI_Isend(&first, 1, MPI_DOUBLE, 2, 0, MPI_COMM_WORLD, &request);
        MPI_Wait(&request, &status);

        //receive first element from 2 process as last
        MPI_Irecv(&last, 1, MPI_DOUBLE, 2, 0, MPI_COMM_WORLD, &request);
        MPI_Wait(&request, &status);
        temp.push_back(last);

        for (int j = 0; j < temp.size(); j++)
        {
            if (j != 0 && j != temp.size() - 1) {
                if (temp[j] != 1) {
                    temp[j] = (temp[j - 1] + temp[j + 1]) / 2;
                }
            }
        }
        temp.pop_back();
        first = temp[temp.size() - 1];

        cout << endl;
        for (int k = 0; k < temp.size(); k++) {
            cout << rank << " " << temp[k] << endl;
        }
    }
    else if (rank > 1 && rank < proc_amount)
    {
        last = temp[0];
        MPI_Isend(&last, 1, MPI_DOUBLE, rank - 1, 0, MPI_COMM_WORLD, &request);
        MPI_Wait(&request, &status);

        MPI_Irecv(&first, 1, MPI_DOUBLE, rank - 1, 0, MPI_COMM_WORLD, &request);
        MPI_Wait(&request, &status);

        temp.insert(temp.begin(), first);

        first = temp[temp.size() - 1];
        MPI_Isend(&first, 1, MPI_DOUBLE, rank + 1, 0, MPI_COMM_WORLD, &request);
        MPI_Wait(&request, &status);


        MPI_Irecv(&last, 1, MPI_DOUBLE, rank + 1, 0, MPI_COMM_WORLD, &request);
        MPI_Wait(&request, &status);

        temp.push_back(last);

        for (int j = 0; j < temp.size(); j++)
        {
            if (j != 0 && j != temp.size() - 1) {
                if (temp[j] != 1) {
                    temp[j] = (temp[j - 1] + temp[j + 1]) / 2;
                }
            }
        }


        temp.erase(temp.begin());
        temp.pop_back();

        cout << endl;
        for (int k = 0; k < temp.size(); k++) {
            cout << rank << " " << temp[k] << endl;
        }

    }
    else if (rank == proc_amount)
    {
        last = temp[0];
        MPI_Isend(&last, 1, MPI_DOUBLE, proc_amount - 1, 0, MPI_COMM_WORLD, &request);
        MPI_Wait(&request, &status);

        MPI_Irecv(&first, 1, MPI_DOUBLE, proc_amount - 1, 0, MPI_COMM_WORLD, &request);
        MPI_Wait(&request, &status);

        temp.insert(temp.begin(), first);
        for (int j = 0; j < temp.size(); j++)
        {
            if (j != 0 && j != temp.size() - 1) {
                if (temp[j] != 1) {
                    temp[j] = (temp[j - 1] + temp[j + 1]) / 2;
                }
            }
        }
        temp.erase(temp.begin());
        cout << endl;
        for (int k = 0; k < temp.size(); k++) {
            cout << rank << " " << temp[k] << endl;
        }
    }

}
// gathering
MPI_Finalize();

system("pause");
return 0;
}

没有加速因为进程进行了很多沟通。

我知道我的解决方案并不高效,所以如果你建议我如何改进建议采用其他方法(仅指点),我将很高兴到点通信)。

0 个答案:

没有答案