在Cusp中真实比例的稀疏矩阵向量乘法?

时间:2017-06-19 00:06:37

标签: cuda gpu sparse-matrix matrix-multiplication cusp-library

在cusp中,有一个乘法来计算spmv(稀疏矩阵向量乘法),它采用reduce和combine:

 template <typename LinearOperator,
             typename MatrixOrVector1,
             typename MatrixOrVector2,
             typename UnaryFunction,
             typename BinaryFunction1,
             typename BinaryFunction2>
    void multiply(const LinearOperator&  A,
                  const MatrixOrVector1& B,
                  MatrixOrVector2& C,
                  UnaryFunction  initialize,
                  BinaryFunction1 combine,
                  BinaryFunction2 reduce);

从界面看起来似乎自定义组合和减少应该可以用于任何矩阵/向量乘法。我认为cusp支持使用其他组合并减少在thrust / functional.h中定义的函数,除了乘法和加号来计算spmv。例如,我可以使用thrust :: plus来替换原始组合函数(即乘法)的乘法吗? 我想,这个缩放的spmv也支持那些在coo,csr,dia,hyb格式中的稀疏矩阵。

然而,当我在a.cu中测试下面的例子时,我得到了一个错误的答案,其中矩阵A是铜格式的。 它使用plus运算符进行组合。我用cmd编译了它:nvcc a.cu -o a到。

#include <cusp/csr_matrix.h>
#include <cusp/monitor.h>
#include <cusp/multiply.h>
#include <cusp/print.h>
#include <cusp/krylov/cg.h>

int main(void)
{
    // COO format in host memory
    int   host_I[13] = {0,0,1,1,2,2,2,3,3,3,4,5,5}; // COO row indices
    int   host_J[13] = {0,1,1,2,2,4,6,3,4,5,5,5,6}; // COO column indices
    int   host_V[13] = {1,1,1,1,1,1,1,1,1,1,1,1,1};
    // x and y arrays in host memory
    int host_x[7] = {1,1,1,1,1,1,1};
    int host_y[6] = {0,0,0,0,0,0};

    // allocate device memory for COO format
    int   * device_I;
    cudaMalloc(&device_I, 13 * sizeof(int));
    int   * device_J;
    cudaMalloc(&device_J, 13 * sizeof(int));
    int * device_V;
    cudaMalloc(&device_V, 13 * sizeof(int));

    // allocate device memory for x and y arrays
    int * device_x;
    cudaMalloc(&device_x, 7 * sizeof(int));
    int * device_y;
    cudaMalloc(&device_y, 6 * sizeof(int));

    // copy raw data from host to device
    cudaMemcpy(device_I, host_I, 13 * sizeof(int),   cudaMemcpyHostToDevice);
    cudaMemcpy(device_J, host_J, 13 * sizeof(int),   cudaMemcpyHostToDevice);
    cudaMemcpy(device_V, host_V, 13 * sizeof(int), cudaMemcpyHostToDevice);
    cudaMemcpy(device_x, host_x,  7 * sizeof(int), cudaMemcpyHostToDevice);
    cudaMemcpy(device_y, host_y,  6 * sizeof(int), cudaMemcpyHostToDevice);

    // matrices and vectors now reside on the device

    // *NOTE* raw pointers must be wrapped with thrust::device_ptr!
    thrust::device_ptr<int>   wrapped_device_I(device_I);
    thrust::device_ptr<int>   wrapped_device_J(device_J);
    thrust::device_ptr<int>   wrapped_device_V(device_V);
    thrust::device_ptr<int>   wrapped_device_x(device_x);
    thrust::device_ptr<int>   wrapped_device_y(device_y);

    // use array1d_view to wrap the individual arrays
    typedef typename cusp::array1d_view< thrust::device_ptr<int>   > DeviceIndexArrayView;
    typedef typename cusp::array1d_view< thrust::device_ptr<int> > DeviceValueArrayView;

    DeviceIndexArrayView row_indices   (wrapped_device_I, wrapped_device_I + 13);
    DeviceIndexArrayView column_indices(wrapped_device_J, wrapped_device_J + 13);
    DeviceValueArrayView values        (wrapped_device_V, wrapped_device_V + 13);
    DeviceValueArrayView x             (wrapped_device_x, wrapped_device_x + 7);
    DeviceValueArrayView y             (wrapped_device_y, wrapped_device_y + 6);

    // combine the three array1d_views into a coo_matrix_view
    typedef cusp::coo_matrix_view<DeviceIndexArrayView,
            DeviceIndexArrayView,
            DeviceValueArrayView> DeviceView;

    // construct a coo_matrix_view from the array1d_views
    DeviceView A(6, 7, 13, row_indices, column_indices, values);

    std::cout << "\ndevice coo_matrix_view" << std::endl;
    cusp::print(A);
    cusp::constant_functor<int> initialize;
    thrust::plus<int> combine;
    thrust::plus<int> reduce;
    cusp::multiply(A , x , y , initialize, combine, reduce);
    std::cout << "\nx array" << std::endl;
    cusp::print(x);
    std::cout << "\n y array, y = A * x" << std::endl;
    cusp::print(y);

    cudaMemcpy(host_y, device_y,  6 * sizeof(int), cudaMemcpyDeviceToHost);

    // free device arrays
    cudaFree(device_I);
    cudaFree(device_J);
    cudaFree(device_V);
    cudaFree(device_x);
    cudaFree(device_y);

    return 0;
}

我得到了以下答案。

device coo_matrix_view
sparse matrix <6, 7> with 13 entries
              0              0        (1)
              0              1        (1)
              1              1        (1)
              1              2        (1)
              2              2        (1)
              2              4        (1)
              2              6        (1)
              3              3        (1)
              3              4        (1)
              3              5        (1)
              4              5        (1)
              5              5        (1)
              5              6        (1)
x array
array1d <7>

        (1)
        (1)
        (1)
        (1)
        (1)
        (1)
        (1)
 y array, y = A * x
array1d <6>
        (4)
        (4)
        (6)
        (6)
        (2)
        (631)

我得到的矢量很奇怪,我认为正确的答案应该是:

[9,
9,
10,
10,
8,
9]

所以我不确定是否可以将combine和reduce的替换替换为其他稀疏矩阵格式,如coo。或者也许我上面写的代码调用multiply是不正确的。 你能给我一些帮助吗?任何信息都会有所帮助。

谢谢!

1 个答案:

答案 0 :(得分:1)

从您的示例的代码和工具的非常简短的阅读中,这似乎是在CUSP中严重破坏导致此用例的问题。对于合并运算符乘法的情况,代码似乎只是偶然正常工作,因为它使用零元素执行的虚假操作不会影响缩减操作(即它只是将很多额外的零加起来)。