Gauss-Jordan消除最后一行并发症

时间:2017-10-23 17:29:48

标签: c matrix

目前我试图通过增强矩阵上的高斯 - 约旦消除找到给定矩阵的逆矩阵,如:

4
10
3.000000,1.000000,1180.000000,1955.000000,221900.000000
3.000000,2.250000,2570.000000,1951.000000,538000.000000
2.000000,1.000000,770.000000,1933.000000,180000.000000
4.000000,3.000000,1960.000000,1965.000000,604000.000000
3.000000,2.000000,1680.000000,1987.000000,510000.000000
4.000000,4.500000,5420.000000,2001.000000,1230000.000000
3.000000,2.250000,1715.000000,1995.000000,257500.000000
3.000000,1.500000,1060.000000,1963.000000,291850.000000
3.000000,1.000000,1780.000000,1960.000000,229500.000000
3.000000,2.500000,1890.000000,2003.000000,323000.000000

第一个整数是X中的列数,第二个整数是行数,在这些元素的前面添加了一列1s。矩阵的最后一列构成矩阵Y.这是我创建的算法,找到W =(XT * X)^ - 1 * XT * Y,其中w =权重,XT = X的转置,和^ -1表示找到产品的反转。

#include <stdlib.h>
#include <stdio.h>

int main(int argc, char* argv[]){
        if(argc < 2){
                printf("error.");
                return 0;
        }
        FILE *fptrain = fopen(argv[1], "r");
        //FILE *fptest = fopen(argv[2], "r");
        if(fptrain == NULL)
        {
                printf("error.");
                return 0;
        }
        int row, col, i, j;
        fscanf(fptrain, "%d", &col);
        col = col+1;
        fscanf(fptrain, "%d", &row);
        char ch;

        //creates the original X and Y matrix
        float trainX[row][col];
        float trainY[row][1];
        for(i=0; i<row; i++)
        {
                trainX[i][0] = 1.000000;
                for(j=1; j<col; j++)
                {
                        fscanf(fptrain, "%f%c", &trainX[i][j], &ch);
                }

                        fscanf(fptrain, "%f%c", &trainY[i][0], &ch);
        }

        //multiplies X and X transposed
        double trainXtemp[row][row];
        int s;
        double num=0;
        for(i=0; i<row; i++)
        {
                for(j=0; j<row; j++)
                {
                        for(s=0; s<col; s++)
                        {
                                num = num + trainX[i][s]*trainXtrans[s][j];
                        }
                                trainXtemp[i][j] = num;
                                num =0;
                }
        }

        //finds the identity matrix of X times X transposed
        double trainXinden[row][row*2];
        for(i=0; i<row; i++)
        {
                for(j=0; j<row; j++)
                {
                        trainXinden[i][j] = trainXtemp[i][j];
                }
                for(j=row; j<row*2; j++)
                {
                        if(j==i+row)
                        {
                                trainXinden[i][j] = 1.000000;
                        }
                        else{
                                trainXinden[i][j] = 0.000000;
                        }
                }
        }
 //finds the inverse of X times X transposed through Gauss Jordan Elimination
        int k;
        double divscalar;
        for(i=0; i<row; i++)
        {
                divscalar = trainXinden[i][i];
                for(j=0; j<row*2; j++)
                {
                        if(trainXinden[i][j] != 0)
                        {
                                trainXinden[i][j] = trainXinden[i][j]/divscalar;
                        }
                }
 for(k=0; k<row; k++)
                {
                        if(i!=k)
                        {
                                double subscalar = trainXinden[k][i];
                                for(j=0; j<row*2; j++)
                                {
                                        trainXinden[k][j] = trainXinden[k][j] - subscalar*trainXinden[i][j];
                                }
                        }
                }
        }
 //copies over the result of gauss jordan elimination
        double trainXinverse[row][row];
        for(i=0; i<row; i++)
        {
                for(j=0; j<row; j++)
                {
                        trainXinverse[i][j] = trainXinden[i][j+row];
                }
        }
  //multiplies (X times X transpose) inverse by (X transposed)
        double trainXinvXt[col][row];
        for(i=0; i<col; i++)
        {
                for(j=0; j<row; j++)
                {
                        for(s=0; s<row; s++)
                        {
                                trainXinvXt[i][j] += trainXtrans[i][s]*trainXinverse[s][j];
                        }
                }
        }
//multiples (trainXinvXt) by Y
        double weight[row][1];
        for(i=0; i<col; i++)
        {
                for(s=0; s<col; s++)
                {
                        weight[i][0] += trainXinvXt[i][s]*trainY[s][0];
                }

        }
return 0;
}

这正确地计算了反向值,除了最后一列的几行中的复杂性,在最后一个循环中,逆矩阵因此而被错误地计算。解决这个问题的最佳方法是什么? [操作必须在增强矩阵上执行,行不能交换]

0 个答案:

没有答案