名称空间push :: system :: cuda :: thrust中的原因不明的错误,特别是" system_error"和" cuda_category"

时间:2016-07-18 19:18:25

标签: c++ cuda thrust

我正在尝试使用thrust :: raw_pointer_cast来转换原始指针以捕获仿函数中的输出。我已经尝试了多种方法将指针传递给float,但是继续遇到内存冲突和两个intellisense错误推力:: system :: cuda :: thrust没有成员" system_error"并且没有会员" cuda_category"。奇怪的是,它似乎是程序throw_on_error.hpp中的一个错误,它似乎是BULK库的一部分,即使我没有专门引用BULK。我是C ++的新手,所以我可能会误解指针,或者我错过了某种包含。

以下是我试图开始工作的代码版本。任何帮助将不胜感激。

#include <iostream>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/for_each.h>
#include <thrust/sequence.h>
#include <cstdlib>
#include <ctime>
#include <vector>
#include <algorithm>
#include <memory.h>
#include <cstdio>
#include <thread>
#include <thrust/copy.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/reduce.h>


using namespace std;

const int num_segs = 1;  // number of segments to sort
const int num_vals = 5;  // number of values in each segment


template <typename T> 
struct sort_vector
{

    T *Ndata;
    T *Ddata;
    T *answer;

    sort_vector(T *_Ndata, T *_Ddata, float *a) : Ndata(_Ndata), Ddata(_Ddata), answer(a) {};


    __host__ __device__ void operator()(int idx)
    {
        thrust::sort(thrust::seq, Ndata + idx*num_vals, Ndata + ((idx + 1)*num_vals));
        thrust::sort(thrust::seq, Ddata + idx*num_vals, Ddata + ((idx + 1)*num_vals));
        *answer = thrust::reduce(thrust::device, Ddata + idx*num_vals, Ddata + ((idx + 1)*num_vals));

    }
};

int main() {

    thrust::device_vector<float> d_Ndata(num_segs*num_vals);
    d_Ndata[0] = 30;
    d_Ndata[1] = 5.5;
    d_Ndata[2] = 60;
    d_Ndata[3] = 21;
    d_Ndata[4] = 2;

    thrust::device_vector<float> d_Ddata(num_segs*num_vals);
    d_Ddata[0] = 50;
    d_Ddata[1] = 9.5;
    d_Ddata[2] = 30;
    d_Ddata[3] = 8.1;
    d_Ddata[4] = 1;

    cout << "original norm" << endl;
    int f = 0;
    while (f < num_segs*num_vals){
        cout << d_Ndata[f] << endl;
        f++;
    }

    cout << "original dut" << endl;
    int g = 0;
    while (g < num_segs*num_vals){
        cout << d_Ddata[g] << endl;
        g++;
    }

    thrust::device_vector<int> d_idxs(num_segs);
    thrust::sequence(d_idxs.begin(), d_idxs.end());

    float *answer = (float*)malloc(sizeof(float));

    cudaStream_t s1;
    cudaStreamCreate(&s1);


    clock_t start;
    double duration;
    start = clock();

    thrust::for_each(thrust::cuda::par.on(s1),
        d_idxs.begin(),
    d_idxs.end(), sort_vector<float>(thrust::raw_pointer_cast(d_Ndata.data()), thrust::raw_pointer_cast(d_Ddata.data()), thrust::raw_pointer_cast(answer)));

    cudaStreamSynchronize(s1);

    cout << "sum" << endl;
    cout << answer << endl;

    //free(answer);

    cudaStreamDestroy(s1);


    duration = (clock() - start) / (double)CLOCKS_PER_SEC;
    cout << "time " << duration << endl;

    cin.get();
    return 0;
}

1 个答案:

答案 0 :(得分:1)

主要问题在于:

float *answer = (float*)malloc(sizeof(float));

这是创建主机内存分配。然后,当您将该指针传递给仿函数时:

 thrust::raw_pointer_cast(answer)

您正在将指向主机内存的指针传递给将在设备代码中运行的仿函数。如果仿函数试图访问该位置,则将是非法访问。在CUDA中,不允许设备代码直接访问主机指针位置,反之亦然(忽略此处未使用的各种概念)。

所以当你的函子代码执行此操作时:

*answer = thrust::reduce(thrust::device, Ddata + idx*num_vals, Ddata + ((idx + 1)*num_vals));

在尝试写入*answer时会触发非法访问。

一个简单的解决方案是创建answer,指向设备内存中正确分配的位置。以下代码演示了更改并运行时没有错误:

$ cat t1190.cu
#include <iostream>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/for_each.h>
#include <thrust/sequence.h>
#include <cstdlib>
#include <ctime>
#include <vector>
#include <algorithm>
#include <memory.h>
#include <cstdio>
#include <thread>
#include <thrust/copy.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/reduce.h>


using namespace std;

const int num_segs = 1;  // number of segments to sort
const int num_vals = 5;  // number of values in each segment


template <typename T>
struct sort_vector
{

    T *Ndata;
    T *Ddata;
    T *answer;

    sort_vector(T *_Ndata, T *_Ddata, float *a) : Ndata(_Ndata), Ddata(_Ddata), answer(a) {};


    __host__ __device__ void operator()(int idx)
    {
        thrust::sort(thrust::seq, Ndata + idx*num_vals, Ndata + ((idx + 1)*num_vals));
        thrust::sort(thrust::seq, Ddata + idx*num_vals, Ddata + ((idx + 1)*num_vals));
        *answer = thrust::reduce(thrust::device, Ddata + idx*num_vals, Ddata + ((idx + 1)*num_vals));

    }
};

int main() {

    thrust::device_vector<float> d_Ndata(num_segs*num_vals);
    d_Ndata[0] = 30;
    d_Ndata[1] = 5.5;
    d_Ndata[2] = 60;
    d_Ndata[3] = 21;
    d_Ndata[4] = 2;

    thrust::device_vector<float> d_Ddata(num_segs*num_vals);
    d_Ddata[0] = 50;
    d_Ddata[1] = 9.5;
    d_Ddata[2] = 30;
    d_Ddata[3] = 8.1;
    d_Ddata[4] = 1;

    cout << "original norm" << endl;
    int f = 0;
    while (f < num_segs*num_vals){
        cout << d_Ndata[f] << endl;
        f++;
    }

    cout << "original dut" << endl;
    int g = 0;
    while (g < num_segs*num_vals){
        cout << d_Ddata[g] << endl;
        g++;
    }

    thrust::device_vector<int> d_idxs(num_segs);
    thrust::sequence(d_idxs.begin(), d_idxs.end());

    thrust::device_vector<float> dv_answer(1);
    //float *answer = (float*)malloc(sizeof(float));

    cudaStream_t s1;
    cudaStreamCreate(&s1);


    clock_t start;
    double duration;
    start = clock();

    thrust::for_each(thrust::cuda::par.on(s1),
        d_idxs.begin(),
    d_idxs.end(), sort_vector<float>(thrust::raw_pointer_cast(d_Ndata.data()), thrust::raw_pointer_cast(d_Ddata.data()), thrust::raw_pointer_cast(dv_answer.data())));

    cudaStreamSynchronize(s1);

    cout << "sum" << endl;
    cout << dv_answer[0] << endl;

    //free(answer);

    cudaStreamDestroy(s1);


    duration = (clock() - start) / (double)CLOCKS_PER_SEC;
    cout << "time " << duration << endl;

    return 0;
}
$ nvcc -std=c++11  t1190.cu -o t1190
$ ./t1190
original norm
30
5.5
60
21
2
original dut
50
9.5
30
8.1
1
sum
98.6
time 0.000919
$

我不会试图解释智能感知错误。 Intellisense通常不能很好地与CUDA一起使用,正如你所见,intellisense可以标记实际编译得很好的东西(例如这个代码在这个问题中)。如果CUDA代码编译正确,很可能会安全地忽略Intellisense报告的问题。

另外几条评论:

  1. 你似乎是在一个奇怪的道路上为一个推力初学者,在一个仿函数中运行Thrust算法。你正在做的事情在技术上没有任何错误,但是这种类型的代码通常会保留用于特定情况,而不是用于一般推力使用。由于此示例中num_segs为1,因此您将运行一个CUDA线程来执行所有这些工作,这绝对不会有效。如果你打算稍后扩大,那很好。我以前做过类似的评论,所以我在这里不再详述。

  2. 此仿函数写入单个位置(*answer)以存储其结果。如果你将它扩展到多个线程,你将不得不为函子提供多个位置来写入(每个线程一个或传递给for_each的向量中的每个元素)或者线程将覆盖彼此的结果。