好的,这是另一个与我之前的帖子MPI_Broadcast using vectors
相关的问题我希望以每个进程接收一行(总共4个进程)的方式分散矩阵(4x4)。我正在使用需要调整大小和一些游戏的向量。它在使用数组时效果很好但是使用向量我无法获得所需的输出。
更新代码(最小)
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
#include <cmath>
#include <chrono>
#include <cmath>
#include <iomanip>
#include <stdio.h>
#include <stdlib.h>
#include <algorithm>
#include"mpi.h"
using namespace std;
const int num_rows1 = 4, num_rows2 = 4, num_cols1 = 4, num_cols2 = 4;
const int root = 0;
int i, j, k;
vector<vector<int> > matrix1(num_rows1, vector <int>(num_cols1));
vector<vector<int> > matrix2(num_rows2, vector <int>(num_cols2));
vector<vector<int> > result(num_rows1, vector <int>(num_cols1));
int finale[num_rows1][num_cols2];
vector<vector<int> > transpose_mat(num_cols2, vector <int>(num_rows2));
vector<int> column1(num_rows1);
vector<int> column2(num_cols2);
double start_time, end_time;
int * column3 = new int[];
//Function working with the multiplication
vector<int> mult(vector<vector<int> > A, vector<int> B)
{
//Multiplication
for (int i = 0; i < num_rows1; ++i)
{
int sum = 0;
for (int j = 0; j < num_cols1; ++j)
{
sum += A[i][j] * B[j];
}
column1.push_back(sum);
}
return column1;
}
//Function generating random matrices
vector<vector<int>> generate_matrix(int nrow, int ncol)
{
vector<vector<int>> matrix(nrow, vector <int>(ncol));
for (int i = 0; i < nrow; ++i)
{
for (int j = 0; j < ncol; ++j)
{
matrix[i][j] = (15 *rand() / RAND_MAX - 3);
}
}
return matrix;
}
//function taking the transpose
vector<vector<int>>transpose(vector<vector<int> > matrix , int nrow, int ncol)
{
//Transpose of matrix 2
for (i = 0; i < nrow; ++i)
for (j = 0; j < ncol; ++j)
{
transpose_mat[j][i] = matrix2[i][j];
}
cout << "Transpose " << endl;
for (int i = 0; i < num_rows2; ++i)
{
for (int j = 0; j < num_cols2; ++j)
{
cout << transpose_mat[i][j] << " ";
}
cout << endl;
}
return transpose_mat;
}
//main function
int main(int argc, char *argv[])
{
MPI_Status status;
MPI_Request request;
int tag = 1;
int rank;
int world_size; //Number of processes
// Initialize the MPI environment
MPI_Init(NULL, NULL);
// Get the rank of the process
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
// Get the name of the processor
char processor_name[MPI_MAX_PROCESSOR_NAME];
int name_len;
MPI_Get_processor_name(processor_name, &name_len);
// Get the number of processes
MPI_Comm_size(MPI_COMM_WORLD, &world_size);
if (rank == root)
{
//Filling
matrix1 = generate_matrix(num_rows1, num_cols1);
for (int i = 0; i < num_rows1; ++i)
{
MPI_Bcast(&matrix1[i][0], num_rows1, MPI_INT, root, MPI_COMM_WORLD);
}
}
else if (rank == 1)
{
srand(time(NULL));
//Filling
matrix2 = generate_matrix(num_rows2, num_cols2);
transpose_mat = transpose(matrix2, num_rows2, num_cols2);
}
if (rank > root)
{
int size = matrix1.size();
result.resize(size);
for (int i = 0; i < size; i++){
result[i].resize(size);
}
for (int i = 0; i < size; ++i)
{
MPI_Bcast(&result[i][0], size, MPI_INT, root, MPI_COMM_WORLD);
}
}
int size1 = transpose_mat.size();
column2.resize(size1);
//Scattering the transposed matrix
for (j = 0; j < num_rows2; ++j)
{
MPI_Scatter(&transpose_mat[0][j], size1*size1 / world_size, MPI_INT, &column2[j], size1*size1 / world_size, MPI_INT, 1, MPI_COMM_WORLD);
}
cout << "The scattered data at process " << rank << " is: " << endl;
for (int j = 0; j < num_cols2; ++j)
{
cout << column2[j] << endl;
}
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
return 0;
}
在更新的PNG中,可以看到矩阵的前两行是分散的。第一个由进程0接收,第二个由进程2接收。为什么进程1和进程3没有提供所需的reault。 Updated PNG