我必须使用MPJ express实现矩阵算法的逆。我成功地将转置矩阵和行列式从rootProcess(rank = 0)发送到其他进程。
根进程还为每个工作进程分配行间隔,每个工作程序必须计算这些行的反矩阵元素。 这部分有效,但在计算每个worker中的矩阵逆元素之后,我必须将这些行及其索引从worker发送到root进程,以便root进程能够构建结果矩阵。
这部分不起作用,程序卡住了。你能帮我从每个工作进程向根发送一些数组及其索引(结果矩阵中该数组的行)吗?
public static void main(String[] args) {
MPI.Init(args);
comunicator = MPI.COMM_WORLD;
currentProcessorRank = comunicator.Rank();
processorsNumber = comunicator.Size();
if (currentProcessorRank == ROOT_PROCESSOR_RANK) {
rootProcessorAction();
} else {
workerProcessorAction();
}
MPI.Finalize();
}
private static void rootProcessorAction() {
// Util.generateInputData(5);
try (Scanner sc = new Scanner(new FileReader("D:\\workspace\\Inversa-mpj\\in.txt"))) {
final int n = Util.readInt(sc);
final float[][] result = new float[n][n];
int[][] a = Util.readMatrix(n, sc);
final int[][] aTranspus = Inversa.getTranspusa(a, n);
final int[] det = new int[] { Inversa.calculeazaDet(a, n) };
final int[] noLines = new int[] { n };
if (det[0] == 0) {
throw new Exception("Matricea nu e inversabila!");
}
for (int i = 1; i < processorsNumber; i++) {
MPI.COMM_WORLD.Send(noLines, 0, 1, MPI.INT, i, 0);
MPI.COMM_WORLD.Send(det, 0, 1, MPI.INT, i, 0);
MPI.COMM_WORLD.Send(aTranspus, 0, n, MPI.OBJECT, i, 0);
}
computeLinesAssignments(n);
for (int i = 0; i < n; i++) {
int index[] = new int[1];
float line[] = new float[1];
System.out.println("Astept pentru index=" + i);
MPI.COMM_WORLD.Recv(index, 0, 1, MPI.INT, 0, 0);
//MPI.COMM_WORLD.Recv(line, 0, 1, MPI.FLOAT, 0, 0);
result[index[0]] = line;
System.out.println("Am primit de la index=" + index[0]);
}
// Util.displayMatrix(n, result);
} catch (FileNotFoundException e) {
System.err.println("File not found!");
} catch (Exception e) {
System.out.println(e.getMessage());
}
}
private static void computeLinesAssignments(int n) {
final int linesPerThread = n / (processorsNumber - 1);
int rest = n % (processorsNumber - 1);
int start = 0;
int end = 0;
for (int i = 1; i < processorsNumber; i++) {
end = start + linesPerThread;
if (rest != 0) {
end++;
rest--;
}
MPI.COMM_WORLD.Send(new int[] { start }, 0, 1, MPI.INT, i, 0);
MPI.COMM_WORLD.Send(new int[] { end }, 0, 1, MPI.INT, i, 0);
start = end;
}
}
private static void workerProcessorAction() {
int rank = MPI.COMM_WORLD.Rank();
int det[] = new int[1];
int n[] = new int[1];
int start[] = new int[1];
int end[] = new int[1];
MPI.COMM_WORLD.Recv(n, 0, 1, MPI.INT, 0, 0);
MPI.COMM_WORLD.Recv(det, 0, 1, MPI.INT, 0, 0);
int transpusa[][] = new int[n[0]][];
MPI.COMM_WORLD.Recv(transpusa, 0, n[0], MPI.OBJECT, 0, 0);
MPI.COMM_WORLD.Recv(start, 0, 1, MPI.INT, 0, 0);
MPI.COMM_WORLD.Recv(end, 0, 1, MPI.INT, 0, 0);
System.out.println("Procesorul " + rank + " a primit start=" + start[0] + " si end=" + end[0]);
for (int i = start[0]; i < end[0]; i++) {
final float[] line = new float[n[0]];
for (int j = 0; j < n[0]; j++) {
line[j] = (float) (Inversa.calculeazaDet(Inversa.eliminaLinieColoana(transpusa, i, j, n[0]),
(n[0] - 1))) * Inversa.semn(i, j) / det[0];
System.out.println("calculat pentru i=" + i + " si j=" + j + " cu valoarea " + line[j]);
}
MPI.COMM_WORLD.Send(new int[] { i }, 0, 1, MPI.INT, 0, 0);
//MPI.COMM_WORLD.Send(line, 0, 1, MPI.FLOAT, ROOT_PROCESSOR_RANK, 1);
}
}
}