我目前正在编写一个计算以下等式的代码: W =(XT * X)^ - 1 * XT * Y. XT是X的转置,而^ -1是乘积XT * X的倒数。
为此,我将带有矩阵的文本文件传递给程序,然后对其执行操作以获得要在等式中使用的正确矩阵格式。虽然代码有时会运行,但有些情况下我会运行代码并收到正确的答案,但有些代码会产生随机数。
例如,使用矩阵[first integer = columns-1,second integer = rows]:
4
4
4.000000,3.000000,2.000000,3.000000,200.000000
5.000000,2.000000,1.000000,7.000000,300.000000
1.000000,4.000000,2.000000,1.000000,500.000000
8.000000,8.000000,9.000000,3.000000,200.000000
它将产生XT *(XT * X)^ - 1的答案为:
0.497617 -0.166646 0.061712 -0.137570
-61.086998 -0.258283 -0.340935 -0.064228
0.186411 -0.083895 0.285920 -0.082587
-0.722773 0.207515 -0.009408 0.238550
-0.579552 0.345476 0.154362 0.055048
并且每次测试运行的数字都不会保持不变。它使用此结果矩阵乘以Y [原始矩阵中的最后一列]来完成此操作。下面是我到目前为止编写的代码示例:
#include <stdlib.h>
#include <stdio.h>
int main(int argc, char* argv[]){
if(argc < 2){
printf("error.");
return 0;
}
FILE *fptrain = fopen(argv[1], "r");
int row, col, i, j;
fscanf(fptrain, "%d", &col);
col = col+1;
fscanf(fptrain, "%d", &row);
char ch;
//creates the original X and Y matrix
float trainX[row][col];
float trainY[row][1];
for(i=0; i<row; i++)
{
trainX[i][0] = 1.000000;
for(j=1; j<col; j++)
{
fscanf(fptrain, "%f%c", &trainX[i][j], &ch);
}
fscanf(fptrain, "%f%c", &trainY[i][0], &ch);
}
//creates the X transposed matrix
float trainXtrans[col][row];
for(i=0; i<row; i++)
{
for(j=0; j<col; j++)
{
trainXtrans[j][i] = trainX[i][j];
}
}
//multiplies X and X transposed
float trainXtemp[row][row];
int s;
int num=0;
for(i=0; i<row; i++)
{
for(j=0; j<row; j++)
{
for(s=0; s<col; s++)
{
num = num + trainX[i][s]*trainXtrans[s][j];
}
trainXtemp[i][j] = num;
num =0;
}
}
//finds the identity matrix of X times X transposed
float trainXinden[row][row*2];
for(i=0; i<row; i++)
{
for(j=0; j<row; j++)
{
trainXinden[i][j] = trainXtemp[i][j];
}
for(j=row; j<row*2; j++)
{
if(j==i+row)
{
trainXinden[i][j] = 1;
}
else{
trainXinden[i][j] = 0;
}
}
}
//finds the inverse of X times X transposed through Gauss Jordan Elimination
int k;
float divscalar;
for(i=0; i<row; i++)
{
divscalar = trainXinden[i][i];
for(j=0; j<row*2; j++)
{
trainXinden[i][j] = trainXinden[i][j]/divscalar;
}
for(k=0; k<row; k++)
{
if(i!=k)
{
float subscalar = trainXinden[k][i];
for(j=0; j<row*2; j++)
{
trainXinden[k][j] = trainXinden[k][j] - subscalar*trainXinden[i][j];
}
}
}
}
//copies over the result of gauss jordan elimination
float trainXinverse[row][row];
for(i=0; i<row; i++)
{
for(j=0; j<row; j++)
{
trainXinverse[i][j] = trainXinden[i][j+row];
}
}
//multiplies (X times X transpose) inverse by (X transposed)
float trainXinvXt[col][row];
for(i=0; i<col; i++)
{
for(j=0; j<row; j++)
{
for(s=0; s<row; s++)
{
trainXinvXt[i][j] += trainXtrans[i][s]*trainXinverse[s][j];
}
}
}
//multiples (trainXinvXt) by Y
float weight[row][1];
for(i=0; i<col; i++)
{
for(s=0; s<col-1; s++)
{
weight[i][0] += trainXinvXt[i][s]*trainY[s][0];
}
}
return 0;
}
这可能是一个记忆问题还是我的Gauss-Jordan Elimination方法抛弃了什么?