在transform_reduce中推测异常bulk_kernel_by_value

时间:2015-05-29 07:23:21

标签: c++ c++11 cuda

我正在研究一个优化问题,其中包含类似形式的各种数学函数,因此我在FunctionObj

中对它们进行扭曲
template <typename T>
struct FunctionObj
{
    T a;
     FunctionObj(): a(1)
    {
    }
};

定义要评估的FuncEval

template <typename T>
__host__ __device__  inline T FuncEval(const FunctionObj<T> &f_obj, T x)
{
    return f_obj.a+x;
}

我真正想做的是sum {func(x)},所以我定义了一个FuncEvalF仿函数来使用thrust::tranform_reduce

template <typename T>
struct FuncEvalF
{
    const FunctionObj<T>& f_obj;
    __host__ __device__ inline FuncEvalF(const FunctionObj<T>& in_f_obj) :f_obj(in_f_obj)
{

}
    __host__ __device__ inline T operator()(T x)
    {
        return FuncEval(f_obj, x);
    }
};

template <typename T>
__host__ __device__ inline T BatchFuncEval(const FunctionObj<T>  &f_obj, int size, const T *x_in);
template<>
inline float BatchFuncEval< float>(const FunctionObj<float>  &f_obj, int size, const float *x_in)
{
    return thrust::transform_reduce(thrust::device, thrust::device_pointer_cast(x_in), thrust::device_pointer_cast(x_in + size), FuncEvalF<float>(f_obj), static_cast<float>(0), thrust::plus<float>());
}

最后在main.cu我致电transform_reduce

auto func = FuncEvalF<float>(FunctionObj<float>());
    float result = 0;
    try
    {
        result = thrust::transform_reduce(thrust::device, thrust::device_pointer_cast(dev_a), thrust::device_pointer_cast(dev_a + 10000), func, static_cast<float>(0), thrust::plus<float>());

    }
    catch (std::exception e)
    {
        printf("%s in thurst \n ", e.what());
    }

此处出现异常:bulk_kernel_by_value,即使我将10000更改为10.当我将FuncEval的定义更改为

时,情况会更好
return x;

该程序将输出正确但毫无意义的答案。我不禁要问我的代码有什么问题?感谢您的关注。 完整的代码如下,cuda 7.0 sm_20

#include <cuda_runtime.h>
#include <device_launch_parameters.h>

#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <thrust/inner_product.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/reduce.h>
#include <thrust/execution_policy.h>
#include <thrust/transform_reduce.h>
#include <thrust/transform.h>

#include <stdio.h>

template <typename T>
struct FunctionObj
{
    T a;
     FunctionObj(): a(1)
    {
    }

};

template <typename T>
__host__ __device__  inline T FuncEval(const FunctionObj<T> &f_obj, T x)
{
    return f_obj.a+x;
}


template <typename T>
struct FuncEvalF
{
    const FunctionObj<T>& f_obj;
    __host__ __device__ inline FuncEvalF(const FunctionObj<T>& in_f_obj) :f_obj(in_f_obj)
    {

    }
    __host__ __device__ inline T operator()(T x)
    {
        return FuncEval(f_obj, x);
    }
};
template <typename T>
__host__ __device__ inline T BatchFuncEval(const FunctionObj<T>  &f_obj, int size, const T *x_in);
template<>
inline float BatchFuncEval< float>(const FunctionObj<float>  &f_obj, int size, const float *x_in)
{
    return thrust::transform_reduce(thrust::device, thrust::device_pointer_cast(x_in), thrust::device_pointer_cast(x_in + size), FuncEvalF<float>(f_obj), static_cast<float>(0), thrust::plus<float>());
}
int main()
{
    cudaError_t cudaE;
    float a[10000] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
    float* dev_a;
    cudaE = cudaMalloc((void**)(&dev_a), sizeof(float) * 10000);
    cudaE = cudaMemcpy(dev_a, a, sizeof(float) * 10000, cudaMemcpyHostToDevice);
    auto func = FuncEvalF<float>(FunctionObj<float>());
    float result = 0;
    try
    {
        result = thrust::transform_reduce(thrust::device, thrust::device_pointer_cast(dev_a), thrust::device_pointer_cast(dev_a + 10000), func, static_cast<float>(0), thrust::plus<float>());

    }
    catch (std::exception e)
    {
        printf("%s in thurst \n ", e.what());
    }
    printf("the gpu float result is %f\n", result);
    cudaFree(dev_a);
}

1 个答案:

答案 0 :(得分:1)

问题是f_obj中的struct FuncEvalFconst FunctionObj<T>&

它在主机FunctionObj<float>()上被实例化为临时对象,但对它的引用在以后不再有效。

解决此问题的一种方法是创建它的副本,而不是保留对它的引用:

template <typename T>
struct FuncEvalF
{
    FunctionObj<T> f_obj;
    ....
}