在CUDA 6中结合纹理记忆统一存储器

时间:2014-07-10 09:08:10

标签: c++ memory cuda nvidia tegra

我正在使用CUDA 6为Jetson TK1编写CUDA应用程序。我从Mark Harris的博客文章中得到了印象

Jetson TK1: Mobile Embedded Supercomputer Takes CUDA Everywhere

Tegra K1的内存是物理统一的。我还观察到结果表明cudaMallocManaged对于全局内存的速度明显快于普通cudaMemcpy。这可能是因为统一内存不需要任何复制。

但是,当我想将纹理内存用于应用程序的某些部分时,我该怎么办?我没有找到使用cudaMallocManaged对纹理的任何支持,所以我假设我必须使用普通cudaMemcpyToArraybindTextureToArray

使用前面提到的方法通常似乎有效,但由cudaMallocManaged管理的变量有时会给我带来奇怪的分段错误。这是使用纹理内存和统一内存的正确方法吗?以下代码说明了我是如何做到的。这段代码工作正常但我的问题是这是否是正确的方法,或者它是否可能会产生可能导致例如分段错误。

#define width 16
#define height 16
texture<float, cudaTextureType2D, cudaReadModeElementType> input_tex;

__global__ void some_tex_kernel(float* output){
    int i= threadIdx.x;
    float x = i%width+0.5f;
    float y =  i/width+0.5f;
    output[i] = tex2D(input_tex, x, y);
}

int main(){
    float* out;
    if(cudaMallocManaged(&out, width*height*sizeof(float))!= cudaSuccess)
        std::cout << "unified not working\n";

    for(int i=0; i< width*height; ++i){
        out[i] = float(i);
    }

    const cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
    cudaArray* input_t;
    cudaMallocArray(&input_t, &desc, width, height);
    cudaMemcpyToArrayAsync(input_t, 0, 0, out, width*height*sizeof(float),  cudaMemcpyHostToDevice);

    input_tex.filterMode = cudaFilterModeLinear;
    cudaBindTextureToArray(input_tex, input_t, desc);

    some_tex_kernel<<<1, width*height>>>(out);
    cudaDeviceSynchronize();

    for(int i=0;i<width*height; ++i)
        std::cout << out[i] << " ";

    cudaFree(out);
    cudaFreeArray(input_t); 
    }
}

我觉得奇怪的另一件事是,如果我删除代码中的cudaDeviceSynchronize(),我总会遇到分段错误。我理解,如果我在没有同步的情况下读取结果,但结果可能无法完成,但该变量是否仍然无法访问?

任何人都有线索?

的Mattias

2 个答案:

答案 0 :(得分:2)

only managed memory possibilities at this time是使用__device__ __managed__的静态分配或使用cudaMallocManaged()的动态分配。纹理,表面,常量记忆等没有直接支持。

你对纹理的使用很好。纹理使用和托管内存之间的唯一重叠在以下调用中:

cudaMemcpyToArrayAsync(input_t, 0, 0, out, width*height*sizeof(float),  cudaMemcpyHostToDevice);

其中托管内存是传输的源(即主机端)。只要在没有内核执行期间发出调用,这是可以接受的(见下文)。

  

&#34;我发现奇怪的另一件事是,如果我删除代码中的cudaDeviceSynchronize(),我总会得到分段错误。&#34;

在内核调用之后,

cudaDeviceSynchronize();是必要的,以使托管内存再次对主机可见。我建议你仔细阅读this section of the documentation

  

&#34;通常,在GPU处于活动状态时,CPU不允许访问任何托管分配或变量。并发CPU / GPU访问,......将导致分段错误......&#34;

正如您所指出的,您发布的代码运行正常。如果您在使用托管内存时有其他代码具有不可预测的seg错误,我会仔细检查代码流(特别是如果您使用流即并发)以确保主机仅在cudaDeviceSynchronize();之后访问托管数据已经发布,并且在任何后续内核调用之前。

答案 1 :(得分:2)

Robert Crovella已经回答了你的问题。但是,为了向您展示cudaMallocManaged可以在纹理存储器的框架中使用,我已经使用了我的1D线性插值代码并使用cudaMallocManaged对其进行了转换。您将看到代码以四种不同的方式执行一维线性插值:

  • CPU;
  • GPU;
  • GPU使用tex1Dfetch;
  • GPU使用tex1D过滤。

代码在所有情况下都没有问题,特别是后两种情况,在Kepler K20c卡上。

// includes, system
#include <cstdlib> 
#include <conio.h>
#include <math.h>
#include <fstream>
#include <iostream> 
#include <iomanip>

// includes, cuda 
#include <cuda.h>
#include <cuda_runtime.h>

using namespace std;

texture<float, 1, cudaReadModeElementType> data_d_texture_filtering;
texture<float, 1> data_d_texture;

#define BLOCK_SIZE 256

/******************/
/* ERROR CHECKING */
/******************/
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
    if (code != cudaSuccess) 
    {
        fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
        if (abort) { getch(); exit(code); }
    }
}

/************/
/* LINSPACE */
/************/
// --- Generates N equally spaced, increasing points between a and b and stores them in x 
void linspace(float* x, float a, float b, int N) {
    float delta_x=(b-a)/(float)N;
    x[0]=a;
    for(int k=1;k<N;k++) x[k]=x[k-1]+delta_x;
}

/*************/
/* RANDSPACE */
/*************/
// --- Generates N randomly spaced, increasing points between a and b and stores them in x 
void randspace(float* x, float a, float b, int N) {
    float delta_x=(b-a)/(float)N;
    x[0]=a;
    for(int k=1;k<N;k++) x[k]=x[k-1]+delta_x+(((float)rand()/(float)RAND_MAX-0.5)*(1./(float)N));
}

/******************/
/* DATA GENERATOR */
/******************/
// --- Generates N complex random data points, with real and imaginary parts ranging in (0.f,1.f)
void Data_Generator(float* data, int N) {
    for(int k=0;k<N;k++) {
        data[k]=(float)rand()/(float)RAND_MAX;
    }
}

/*************************************/
/* LINEAR INTERPOLATION KERNEL - CPU */
/*************************************/
float linear_kernel_CPU(float in)
{
    float d_y;
    return 1.-abs(in);
}

/***************************************/
/* LINEAR INTERPOLATION FUNCTION - CPU */
/***************************************/
void linear_interpolation_function_CPU(float* result_GPU, float* data, float* x_in, float* x_out, int M, int N){

    float a;
    for(int j=0; j<N; j++){
        int k = floor(x_out[j]+M/2);
        a = x_out[j]+M/2-floor(x_out[j]+M/2);
        result_GPU[j] = a * data[k+1] + (-data[k] * a + data[k]);
    }   
}

/*************************************/
/* LINEAR INTERPOLATION KERNEL - GPU */
/*************************************/
__device__ float linear_kernel_GPU(float in)
{
    float d_y;
    return 1.-abs(in);
}

/**************************************************************/
/* LINEAR INTERPOLATION KERNEL FUNCTION - GPU - GLOBAL MEMORY */
/**************************************************************/
__global__ void linear_interpolation_kernel_function_GPU(float* __restrict__ result_d, const float* __restrict__ data_d, const float* __restrict__ x_out_d, const int M, const int N)
{
    int j = threadIdx.x + blockDim.x * blockIdx.x;

    if(j<N)
    {
        float reg_x_out = x_out_d[j]+M/2;
        int k = __float2int_rz(reg_x_out); 
        float a = reg_x_out - truncf(reg_x_out);
        float dk = data_d[k];
        float dkp1 = data_d[k+1];
        result_d[j] = a * dkp1 + (-dk * a + dk);
    } 
}

/***************************************************************/
/* LINEAR INTERPOLATION KERNEL FUNCTION - GPU - TEXTURE MEMORY */
/***************************************************************/
__global__ void linear_interpolation_kernel_function_GPU_texture(float* __restrict__ result_d, const float* __restrict__ x_out_d, const int M, const int N)
{
    int j = threadIdx.x + blockDim.x * blockIdx.x;

    if(j<N)
    {
        float reg_x_out = x_out_d[j]+M/2;
        int k = __float2int_rz(reg_x_out); 
        float a = reg_x_out - truncf(reg_x_out);
        float dk = tex1Dfetch(data_d_texture,k);
        float dkp1 = tex1Dfetch(data_d_texture,k+1);
        result_d[j] = a * dkp1 + (-dk * a + dk);
    } 
}

/************************************************************************************/
/* LINEAR INTERPOLATION KERNEL FUNCTION - GPU - TEXTURE MEMORY - FILTERING FEATURES */
/************************************************************************************/
__global__ void linear_interpolation_kernel_function_GPU_texture_filtering(float* __restrict__ result_d, const float* __restrict__ x_out_d, const int M, const int N)
{
    int j = threadIdx.x + blockDim.x * blockIdx.x; 
    if(j<N) result_d[j] = tex1D(data_d_texture_filtering,float(x_out_d[j]+M/2+0.5));
}

/***************************************/
/* LINEAR INTERPOLATION FUNCTION - GPU */
/***************************************/
void linear_interpolation_function_GPU(float* result_d, float* data_d, float* x_in_d, float* x_out_d, int M, int N){

    dim3 dimBlock(BLOCK_SIZE,1); dim3 dimGrid(N/BLOCK_SIZE + (N%BLOCK_SIZE == 0 ? 0:1),1);
    linear_interpolation_kernel_function_GPU<<<dimGrid,dimBlock>>>(result_d, data_d, x_out_d, M, N);
    gpuErrchk(cudaPeekAtLastError());
    gpuErrchk(cudaDeviceSynchronize());
}

/********************************************************/
/* LINEAR INTERPOLATION FUNCTION - GPU - TEXTURE MEMORY */
/********************************************************/
void linear_interpolation_function_GPU_texture(float* result_d, float* data_d, float* x_in_d, float* x_out_d, int M, int N){

    cudaBindTexture(NULL, data_d_texture, data_d, M*sizeof(float));

    dim3 dimBlock(BLOCK_SIZE,1); dim3 dimGrid(N/BLOCK_SIZE + (N%BLOCK_SIZE == 0 ? 0:1),1);
    linear_interpolation_kernel_function_GPU_texture<<<dimGrid,dimBlock>>>(result_d, x_out_d, M, N);
    gpuErrchk(cudaPeekAtLastError());
    gpuErrchk(cudaDeviceSynchronize());
}

/*****************************************************************************/
/* LINEAR INTERPOLATION FUNCTION - GPU - TEXTURE MEMORY - FILTERING FEATURES */
/*****************************************************************************/
void linear_interpolation_function_GPU_texture_filtering(float* result_d, float* data, float* x_in_d, float* x_out_d, int M, int N){

    cudaArray* data_d = NULL; gpuErrchk(cudaMallocArray(&data_d, &data_d_texture_filtering.channelDesc, M, 1)); 
    gpuErrchk(cudaMemcpyToArray(data_d, 0, 0, data, sizeof(float)*M, cudaMemcpyHostToDevice)); 
    gpuErrchk(cudaBindTextureToArray(data_d_texture_filtering, data_d)); 
    data_d_texture_filtering.normalized = false; 
    data_d_texture_filtering.filterMode = cudaFilterModeLinear;

    dim3 dimBlock(BLOCK_SIZE,1); dim3 dimGrid(N/BLOCK_SIZE + (N%BLOCK_SIZE == 0 ? 0:1),1);
    linear_interpolation_kernel_function_GPU_texture_filtering<<<dimGrid,dimBlock>>>(result_d, x_out_d, M, N);
    gpuErrchk(cudaPeekAtLastError());
    gpuErrchk(cudaDeviceSynchronize());

}

/********/
/* MAIN */
/********/
int main()
{

    int M=1024;             // --- Number of input points

    int N=1024;             // --- Number of output points

    int Nit = 100;          // --- Number of computations for time measurement

    // --- Input sampling
    float* x_in; gpuErrchk(cudaMallocManaged(&x_in,sizeof(float)*M));

    // --- Input data
    float *data;        gpuErrchk(cudaMallocManaged(&data,(M+1)*sizeof(float))); Data_Generator(data,M); data[M]=0.;

    // --- Output sampling
    float* x_out;       gpuErrchk(cudaMallocManaged((void**)&x_out,sizeof(float)*N)); randspace(x_out,-M/2.,M/2.,N);

    // --- Result allocation
    float *result_CPU;                          result_CPU=(float*)malloc(N*sizeof(float));
    float *result_d;                            gpuErrchk(cudaMallocManaged(&result_d,sizeof(float)*N));
    float *result_d_texture;                    gpuErrchk(cudaMallocManaged(&result_d_texture,sizeof(float)*N));
    float *result_d_texture_filtering;          gpuErrchk(cudaMallocManaged(&result_d_texture_filtering,sizeof(float)*N));

    // --- Reference interpolation result as evaluated on the CPU
    linear_interpolation_function_CPU(result_CPU, data, x_in, x_out, M, N);

    float time;
    cudaEvent_t start, stop;
    cudaEventCreate(&start);
    cudaEventCreate(&stop);
    cudaEventRecord(start, 0);
    for (int k=0; k<Nit; k++) linear_interpolation_function_GPU(result_d, data, x_in, x_out, M, N);
    cudaEventRecord(stop, 0);
    cudaEventSynchronize(stop);
    cudaEventElapsedTime(&time, start, stop);
    cout << "GPU Global memory [ms]: " << setprecision (10) << time/Nit << endl;

    cudaEventRecord(start, 0);
    for (int k=0; k<Nit; k++) linear_interpolation_function_GPU_texture_filtering(result_d_texture_filtering, data, x_in, x_out, M, N);
    cudaEventRecord(stop, 0);
    cudaEventSynchronize(stop);
    cudaEventElapsedTime(&time, start, stop);
    cout << "GPU Texture filtering [ms]: " << setprecision (10) << time/Nit << endl;

    cudaEventRecord(start, 0);
    for (int k=0; k<Nit; k++) linear_interpolation_function_GPU_texture(result_d_texture, data, x_in, x_out, M, N);
    cudaEventRecord(stop, 0);
    cudaEventSynchronize(stop);
    cudaEventElapsedTime(&time, start, stop);
    cout << "GPU Texture [ms]: " << setprecision (10) << time/Nit << endl;

    float diff_norm=0.f, norm=0.f;
    for(int j=0; j<N; j++) {
        diff_norm = diff_norm + (result_CPU[j]-result_d[j])*(result_CPU[j]-result_d[j]);
        norm      = norm      + result_CPU[j]*result_CPU[j];
    }
    printf("Error GPU [percentage] = %f\n",100.*sqrt(diff_norm/norm));

    float diff_norm_texture_filtering=0.f;
    for(int j=0; j<N; j++) {
        diff_norm_texture_filtering = diff_norm_texture_filtering + (result_CPU[j]-result_d_texture_filtering[j])*(result_CPU[j]-result_d_texture_filtering[j]);
    }
    printf("Error texture filtering [percentage] = %f\n",100.*sqrt(diff_norm_texture_filtering/norm));

    float diff_norm_texture=0.f;
    for(int j=0; j<N; j++) {
        diff_norm_texture = diff_norm_texture + (result_CPU[j]-result_d_texture[j])*(result_CPU[j]-result_d_texture[j]);
    }
    printf("Error texture [percentage] = %f\n",100.*sqrt(diff_norm_texture/norm));

    cudaDeviceReset();

    return 0;
}