任何人都可以帮我在我的代码中找到以下错误:
" MPI_Send中的致命错误:排名无效,错误堆栈: MPI_Send(190):MPI_Send(buf = 0x1123060,count = 40,MPI_FLOAT,dest = MPI_ANY_SOURCE,tag = 25,MPI_COMM_WORLD)失败 MPI_Send(109):无效的等级值为-2但必须是非负且小于1"
int main(int argc, char* argv[])
{
name = "quick.dat";
n = 40;
auto start = std::chrono::high_resolution_clock::now();
int i;
double sp;
double *ap, *ae, *aw, *aww, *cte;
ap = new double[n];
ae = new double[n];
aw = new double[n];
aww = new double[n];
cte = new double[n];
int my_rank, nproc;
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
MPI_Comm_size(MPI_COMM_WORLD, &nproc);
if (my_rank == 0)
{
MPI_Send(&ap[0], n, MPI_FLOAT, MPI_ANY_SOURCE, 25, MPI_COMM_WORLD);
MPI_Send(&ae[0], n, MPI_FLOAT, MPI_ANY_SOURCE, 25, MPI_COMM_WORLD);
MPI_Send(&aw[0], n, MPI_FLOAT, MPI_ANY_SOURCE, 25, MPI_COMM_WORLD);
MPI_Send(&aww[0], n, MPI_FLOAT, MPI_ANY_SOURCE, 25, MPI_COMM_WORLD);
MPI_Send(&cte[0], n, MPI_FLOAT, MPI_ANY_SOURCE, 25, MPI_COMM_WORLD);
for (i = 0; i < (n - 1) / nproc + 1; i++)
{
if (i == 0)
{
ae[i] = a((i + 1) * 1.0 / n) * D((i + 1) * 1.0 / n) +
1. / 3.0 * a(i * 1.0 / n) * D(i * 1.0 / n) -
3.0 / 8.0 * a((i + 1) * 1.0 / n) * u((i + 1) * 1.0 / n);
cte[i] = -(8. / 3. * a(i * 1.0 / n) * D(i * 1.0 / n) +
2.0 / 8.0 * a((i + 1) * 1.0 / n) * u((i + 1) * 1.0 / n) +
a(i * 1.0 / n) * u(i * 1.0 / n));
ap[i] = -(ae[i] - cte[i] + a((i + 1) * 1.0 / n) * u((i + 1) * 1.0 / n) -
a(i * 1.0 / n) * u(i * 1.0 / n));
continue;
}
if (i == 1)
{
aw[i] = a(i * 1.0 / n) * D(i * 1.0 / n) +
7. / 8. * a(i * 1.0 / n) * u(i * 1.0 / n) +
1.0 / 8.0 * a((i + 1) * 1.0 / n) * u((i + 1) * 1.0 / n);
cte[i] = 0.25 * a(i * 1.0 / n) * u(i * 1.0 / n);
ae[i] = a((i + 1) * 1.0 / n) * D((i + 1) * 1.0 / n) -
3.0 / 8.0 * a((i + 1) * 1.0 / n) * u((i + 1) * 1.0 / n);
ap[i] = -(ae[i] + aw[i] - cte[i] + a((i + 1) * 1.0 / n) * u((i + 1) * 1.0 / n) -
a(i * 1.0 / n) * u(i * 1.0 / n));
continue;
}
if (i == n - 1)
{
aw[i] = a(i * 1.0 / n) * D(i * 1.0 / n) +
1. / 3.0 * a((i + 1) * 1.0 / n) * D((i + 1) * 1.0 / n) +
6.0 / 8.0 * a(i * 1.0 / n) * u(i * 1.0 / n);
aww[i] = -1.0 / 8.0 * a(i * 1.0 / n) * u(i * 1.0 / n);
cte[i] = 0;
sp = 8.0 / 3.0 * a((i + 1) * 1.0 / n) * D((i + 1) * 1.0 / n) -
a((i + 1) * 1.0 / n) * u((i + 1) * 1.0 / n);
ap[i] = -(aww[i] + aw[i] + sp + a((i + 1) * 1.0 / n) * u((i + 1) * 1.0 / n) -
a(i * 1.0 / n) * u(i * 1.0 / n));
continue;
}
if (i != 0 && i != 1 && i != n - 1)
{
ae[i] = a((i + 1) * 1.0 / n) * D((i + 1) * 1.0 / n) -
3.0 / 8.0 * a((i + 1) * 1.0 / n) * u((i + 1) * 1.0 / n);
aw[i] = a(i * 1.0 / n) * D(i * 1.0 / n) +
6. / 8. * a(i * 1.0 / n) * u(i * 1.0 / n) +
1.0 / 8.0 * a((i + 1) * 1.0 / n) * u((i + 1) * 1.0 / n);
aww[i] = -1.0 / 8.0 * a(i * 1.0 / n) * u(i * 1.0 / n);
cte[i] = 0;
ap[i] = -(aww[i] + aw[i] + ae[i] - cte[i] +
a((i + 1) * 1.0 / n) * u((i + 1) * 1.0 / n) -
a(i * 1.0 / n) * u(i * 1.0 / n));
}
}
MPI_Recv(&ap[0], n, MPI_FLOAT, MPI_ANY_SOURCE, 25, MPI_COMM_WORLD, &status);
MPI_Recv(&ae[0], n, MPI_FLOAT, MPI_ANY_SOURCE, 25, MPI_COMM_WORLD, &status);
MPI_Recv(&aw[0], n, MPI_FLOAT, MPI_ANY_SOURCE, 25, MPI_COMM_WORLD, &status);
MPI_Recv(&aww[0], n, MPI_FLOAT, MPI_ANY_SOURCE, 25, MPI_COMM_WORLD, &status);
MPI_Recv(&cte[0], n, MPI_FLOAT, MPI_ANY_SOURCE, 25, MPI_COMM_WORLD, &status);
}
else if (my_rank != 0 && my_rank != nproc - 1)
{
MPI_Recv(&ap[0], n, MPI_FLOAT, 0, 25, MPI_COMM_WORLD, &status);
MPI_Recv(&ae[0], n, MPI_FLOAT, 0, 25, MPI_COMM_WORLD, &status);
MPI_Recv(&aw[0], n, MPI_FLOAT, 0, 25, MPI_COMM_WORLD, &status);
MPI_Recv(&aww[0], n, MPI_FLOAT, 0, 25, MPI_COMM_WORLD, &status);
MPI_Recv(&cte[0], n, MPI_FLOAT, 0, 25, MPI_COMM_WORLD, &status);
for (i = ((n - 1) / nproc) * my_rank + 1; i < ((n - 1) / nproc) * (my_rank + 1) + 1; i++)
{
if (i == 0)
{
ae[i] = a((i + 1) * 1.0 / n) * D((i + 1) * 1.0 / n) +
1. / 3.0 * a(i * 1.0 / n) * D(i * 1.0 / n) -
3.0 / 8.0 * a((i + 1) * 1.0 / n) * u((i + 1) * 1.0 / n);
cte[i] = -(8. / 3. * a(i * 1.0 / n) * D(i * 1.0 / n) +
2.0 / 8.0 * a((i + 1) * 1.0 / n) * u((i + 1) * 1.0 / n) +
a(i * 1.0 / n) * u(i * 1.0 / n));
ap[i] = -(ae[i] - cte[i] + a((i + 1) * 1.0 / n) * u((i + 1) * 1.0 / n) -
a(i * 1.0 / n) * u(i * 1.0 / n));
continue;
}
if (i == 1)
{
aw[i] = a(i * 1.0 / n) * D(i * 1.0 / n) +
7. / 8. * a(i * 1.0 / n) * u(i * 1.0 / n) +
1.0 / 8.0 * a((i + 1) * 1.0 / n) * u((i + 1) * 1.0 / n);
cte[i] = 0.25 * a(i * 1.0 / n) * u(i * 1.0 / n);
ae[i] = a((i + 1) * 1.0 / n) * D((i + 1) * 1.0 / n) -
3.0 / 8.0 * a((i + 1) * 1.0 / n) * u((i + 1) * 1.0 / n);
ap[i] = -(ae[i] + aw[i] - cte[i] + a((i + 1) * 1.0 / n) * u((i + 1) * 1.0 / n) -
a(i * 1.0 / n) * u(i * 1.0 / n));
continue;
}
if (i == n - 1)
{
aw[i] = a(i * 1.0 / n) * D(i * 1.0 / n) +
1. / 3.0 * a((i + 1) * 1.0 / n) * D((i + 1) * 1.0 / n) +
6.0 / 8.0 * a(i * 1.0 / n) * u(i * 1.0 / n);
aww[i] = -1.0 / 8.0 * a(i * 1.0 / n) * u(i * 1.0 / n);
cte[i] = 0;
sp = 8.0 / 3.0 * a((i + 1) * 1.0 / n) * D((i + 1) * 1.0 / n) -
a((i + 1) * 1.0 / n) * u((i + 1) * 1.0 / n);
ap[i] = -(aww[i] + aw[i] + sp + a((i + 1) * 1.0 / n) * u((i + 1) * 1.0 / n) -
a(i * 1.0 / n) * u(i * 1.0 / n));
continue;
}
if (i != 0 && i != 1 && i != n - 1)
{
ae[i] = a((i + 1) * 1.0 / n) * D((i + 1) * 1.0 / n) -
3.0 / 8.0 * a((i + 1) * 1.0 / n) * u((i + 1) * 1.0 / n);
aw[i] = a(i * 1.0 / n) * D(i * 1.0 / n) +
6. / 8. * a(i * 1.0 / n) * u(i * 1.0 / n) +
1.0 / 8.0 * a((i + 1) * 1.0 / n) * u((i + 1) * 1.0 / n);
aww[i] = -1.0 / 8.0 * a(i * 1.0 / n) * u(i * 1.0 / n);
cte[i] = 0;
ap[i] = -(aww[i] + aw[i] + ae[i] - cte[i] +
a((i + 1) * 1.0 / n) * u((i + 1) * 1.0 / n) -
a(i * 1.0 / n) * u(i * 1.0 / n));
}
}
MPI_Send(&ap[0], n, MPI_FLOAT, 0, 25, MPI_COMM_WORLD);
MPI_Send(&ae[0], n, MPI_FLOAT, 0, 25, MPI_COMM_WORLD);
MPI_Send(&aw[0], n, MPI_FLOAT, 0, 25, MPI_COMM_WORLD);
MPI_Send(&aww[0], n, MPI_FLOAT, 0, 25, MPI_COMM_WORLD);
MPI_Send(&cte[0], n, MPI_FLOAT, 0, 25, MPI_COMM_WORLD);
}
else if (my_rank == nproc - 1 && nproc != 1)
{
MPI_Recv(&ap[0], n, MPI_FLOAT, 0, 25, MPI_COMM_WORLD, &status);
MPI_Recv(&ae[0], n, MPI_FLOAT, 0, 25, MPI_COMM_WORLD, &status);
MPI_Recv(&aw[0], n, MPI_FLOAT, 0, 25, MPI_COMM_WORLD, &status);
MPI_Recv(&aww[0], n, MPI_FLOAT, 0, 25, MPI_COMM_WORLD, &status);
MPI_Recv(&cte[0], n, MPI_FLOAT, 0, 25, MPI_COMM_WORLD, &status);
for (i = ((n - 1) / nproc) * my_rank + 1; i < n; i++)
{
if (i == 0)
{
ae[i] = a((i + 1) * 1.0 / n) * D((i + 1) * 1.0 / n) +
1. / 3.0 * a(i * 1.0 / n) * D(i * 1.0 / n) -
3.0 / 8.0 * a((i + 1) * 1.0 / n) * u((i + 1) * 1.0 / n);
cte[i] = -(8. / 3. * a(i * 1.0 / n) * D(i * 1.0 / n) +
2.0 / 8.0 * a((i + 1) * 1.0 / n) * u((i + 1) * 1.0 / n) +
a(i * 1.0 / n) * u(i * 1.0 / n));
ap[i] = -(ae[i] - cte[i] + a((i + 1) * 1.0 / n) * u((i + 1) * 1.0 / n) -
a(i * 1.0 / n) * u(i * 1.0 / n));
continue;
}
if (i == 1)
{
aw[i] = a(i * 1.0 / n) * D(i * 1.0 / n) +
7. / 8. * a(i * 1.0 / n) * u(i * 1.0 / n) +
1.0 / 8.0 * a((i + 1) * 1.0 / n) * u((i + 1) * 1.0 / n);
cte[i] = 0.25 * a(i * 1.0 / n) * u(i * 1.0 / n);
ae[i] = a((i + 1) * 1.0 / n) * D((i + 1) * 1.0 / n) -
3.0 / 8.0 * a((i + 1) * 1.0 / n) * u((i + 1) * 1.0 / n);
ap[i] = -(ae[i] + aw[i] - cte[i] + a((i + 1) * 1.0 / n) * u((i + 1) * 1.0 / n) -
a(i * 1.0 / n) * u(i * 1.0 / n));
continue;
}
if (i == n - 1)
{
aw[i] = a(i * 1.0 / n) * D(i * 1.0 / n) +
1. / 3.0 * a((i + 1) * 1.0 / n) * D((i + 1) * 1.0 / n) +
6.0 / 8.0 * a(i * 1.0 / n) * u(i * 1.0 / n);
aww[i] = -1.0 / 8.0 * a(i * 1.0 / n) * u(i * 1.0 / n);
cte[i] = 0;
sp = 8.0 / 3.0 * a((i + 1) * 1.0 / n) * D((i + 1) * 1.0 / n) -
a((i + 1) * 1.0 / n) * u((i + 1) * 1.0 / n);
ap[i] = -(aww[i] + aw[i] + sp + a((i + 1) * 1.0 / n) * u((i + 1) * 1.0 / n) -
a(i * 1.0 / n) * u(i * 1.0 / n));
continue;
}
if (i != 0 && i != 1 && i != n - 1)
{
ae[i] = a((i + 1) * 1.0 / n) * D((i + 1) * 1.0 / n) -
3.0 / 8.0 * a((i + 1) * 1.0 / n) * u((i + 1) * 1.0 / n);
aw[i] = a(i * 1.0 / n) * D(i * 1.0 / n) +
6. / 8. * a(i * 1.0 / n) * u(i * 1.0 / n) +
1.0 / 8.0 * a((i + 1) * 1.0 / n) * u((i + 1) * 1.0 / n);
aww[i] = -1.0 / 8.0 * a(i * 1.0 / n) * u(i * 1.0 / n);
cte[i] = 0;
ap[i] = -(aww[i] + aw[i] + ae[i] - cte[i] +
a((i + 1) * 1.0 / n) * u((i + 1) * 1.0 / n) -
a(i * 1.0 / n) * u(i * 1.0 / n));
}
}
MPI_Send(&ap[0], n, MPI_FLOAT, 0, MPI_ANY_TAG, MPI_COMM_WORLD);
MPI_Send(&ae[0], n, MPI_FLOAT, 0, MPI_ANY_TAG, MPI_COMM_WORLD);
MPI_Send(&aw[0], n, MPI_FLOAT, 0, MPI_ANY_TAG, MPI_COMM_WORLD);
MPI_Send(&aww[0], n, MPI_FLOAT, 0, MPI_ANY_TAG, MPI_COMM_WORLD);
MPI_Send(&cte[0], n, MPI_FLOAT, 0, MPI_ANY_TAG, MPI_COMM_WORLD);
}
// double t22 = MPI_Wtime();
MPI_Finalize();
// printf("[IJK] Compute time [s] : %6.3f \n", t22-t11 );
iteration(aww, aw, ap, ae, cte);
auto end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff = end - start;
std::cout << "Time to Execute the FVM code "
<< " is : " << diff.count() << " s\n";
return 0;
}
答案 0 :(得分:0)
该消息实际上非常清楚:MPI_Send: Invalid rank
。您无法将MPI_ANY_SOURCE
指定为MPI_Send
的目标排名。在点对点通信中,必须有一个特定的接收器。您似乎想要使用集合MPI_Bcast
,例如:
MPI_Send(&ap[0], n, MPI_FLOAT, 0, MPI_COMM_WORLD);
这应该在所有等级上运行。