在CUDA中排序许多小数组

时间:2014-03-12 01:00:42

标签: sorting cuda cub

我正在CUDA中实现中值过滤器。对于特定像素,我提取其对应于像素周围窗口的邻居,比如N x N3 x 3)窗口,现在有一个N x N元素数组。我没有想到为我的应用程序使用超过10 x 10个元素的窗口。

此数组现在已在内核中本地存在,并已加载到设备内存中。从我之前阅读的SO帖子中,最常见的排序算法由Thrust实现。但是,Thrust只能从主机调用。线程 - Thrust inside user written kernels

是否有一种快速有效的方法可以对内核中的一小部分N x N元素进行排序?

3 个答案:

答案 0 :(得分:4)

如果元素数量固定且很小,则可以使用排序网络(http://pages.ripco.net/~jgamble/nw.html)。它为固定数量的元素提供固定数量的比较/交换操作(例如,对8个元素进行19次比较/交换迭代)。

答案 1 :(得分:2)

你的问题是在CUDA中排序许多小数组

根据罗伯特在评论中的建议,CUB提供了解决此问题的可能解决方案。下面我将报告一个围绕Robert cub BlockRadixSort: how to deal with large tile size or sort multiple tiles?代码构建的示例。

这个想法是将小数组分配给不同的线程块,然后使用cub::BlockRadixSort对每个数组进行排序。提供了两个版本,一个加载,一个将小数组加载到共享内存中。

让我最后注意到你的CUDA Thrust不能从内核中调用的声明不再是真的。您链接到的帖子Thrust inside user written kernels已更新为其他答案。

#include <cub/cub.cuh>
#include <stdio.h>
#include <stdlib.h>

#include "Utilities.cuh"

using namespace cub;

/**********************************/
/* CUB BLOCKSORT KERNEL NO SHARED */
/**********************************/
template <int BLOCK_THREADS, int ITEMS_PER_THREAD>
__global__ void BlockSortKernel(int *d_in, int *d_out)
{
    // --- Specialize BlockLoad, BlockStore, and BlockRadixSort collective types
    typedef cub::BlockLoad      <int*, BLOCK_THREADS, ITEMS_PER_THREAD, BLOCK_LOAD_TRANSPOSE>   BlockLoadT;
    typedef cub::BlockStore     <int*, BLOCK_THREADS, ITEMS_PER_THREAD, BLOCK_STORE_TRANSPOSE>  BlockStoreT;
    typedef cub::BlockRadixSort <int , BLOCK_THREADS, ITEMS_PER_THREAD>                         BlockRadixSortT;

    // --- Allocate type-safe, repurposable shared memory for collectives
    __shared__ union {
        typename BlockLoadT     ::TempStorage load;
        typename BlockStoreT    ::TempStorage store;
        typename BlockRadixSortT::TempStorage sort;
    } temp_storage;

    // --- Obtain this block's segment of consecutive keys (blocked across threads)
    int thread_keys[ITEMS_PER_THREAD];
    int block_offset = blockIdx.x * (BLOCK_THREADS * ITEMS_PER_THREAD);

    BlockLoadT(temp_storage.load).Load(d_in + block_offset, thread_keys);
    __syncthreads(); 

    // --- Collectively sort the keys
    BlockRadixSortT(temp_storage.sort).Sort(thread_keys);
    __syncthreads(); 

    // --- Store the sorted segment
    BlockStoreT(temp_storage.store).Store(d_out + block_offset, thread_keys);

}

/*******************************/
/* CUB BLOCKSORT KERNEL SHARED */
/*******************************/
template <int BLOCK_THREADS, int ITEMS_PER_THREAD>
__global__ void shared_BlockSortKernel(int *d_in, int *d_out)
{
    // --- Shared memory allocation
    __shared__ int sharedMemoryArray[BLOCK_THREADS * ITEMS_PER_THREAD];

    // --- Specialize BlockStore and BlockRadixSort collective types
    typedef cub::BlockRadixSort <int , BLOCK_THREADS, ITEMS_PER_THREAD> BlockRadixSortT;

    // --- Allocate type-safe, repurposable shared memory for collectives
    __shared__ typename BlockRadixSortT::TempStorage temp_storage;

    int block_offset = blockIdx.x * (BLOCK_THREADS * ITEMS_PER_THREAD);

    // --- Load data to shared memory
    for (int k = 0; k < ITEMS_PER_THREAD; k++) sharedMemoryArray[threadIdx.x * ITEMS_PER_THREAD + k]  = d_in[block_offset + threadIdx.x * ITEMS_PER_THREAD + k];
    __syncthreads();

    // --- Collectively sort the keys
    BlockRadixSortT(temp_storage).Sort(*static_cast<int(*)[ITEMS_PER_THREAD]>(static_cast<void*>(sharedMemoryArray + (threadIdx.x * ITEMS_PER_THREAD))));
    __syncthreads();

    // --- Write data to shared memory
    for (int k = 0; k < ITEMS_PER_THREAD; k++) d_out[block_offset + threadIdx.x * ITEMS_PER_THREAD + k] = sharedMemoryArray[threadIdx.x * ITEMS_PER_THREAD + k];

}

/********/
/* MAIN */
/********/
int main() {

    const int numElemsPerArray  = 8;
    const int numArrays         = 4;
    const int N                 = numArrays * numElemsPerArray;
    const int numElemsPerThread = 4;

    const int RANGE             = N * numElemsPerThread;

    // --- Allocating and initializing the data on the host
    int *h_data = (int *)malloc(N * sizeof(int));
    for (int i = 0 ; i < N; i++) h_data[i] = rand() % RANGE;

    // --- Allocating the results on the host
    int *h_result1 = (int *)malloc(N * sizeof(int));
    int *h_result2 = (int *)malloc(N * sizeof(int));

    // --- Allocating space for data and results on device
    int *d_in;      gpuErrchk(cudaMalloc((void **)&d_in,   N * sizeof(int)));
    int *d_out1;    gpuErrchk(cudaMalloc((void **)&d_out1, N * sizeof(int)));
    int *d_out2;    gpuErrchk(cudaMalloc((void **)&d_out2, N * sizeof(int)));

    // --- BlockSortKernel no shared
    gpuErrchk(cudaMemcpy(d_in, h_data, N*sizeof(int), cudaMemcpyHostToDevice));
    BlockSortKernel<N / numArrays / numElemsPerThread, numElemsPerThread><<<numArrays, numElemsPerArray / numElemsPerThread>>>(d_in, d_out1); 
    gpuErrchk(cudaMemcpy(h_result1, d_out1, N*sizeof(int), cudaMemcpyDeviceToHost));

    printf("BlockSortKernel no shared\n\n");
    for (int k = 0; k < numArrays; k++) 
        for (int i = 0; i < numElemsPerArray; i++)
            printf("Array nr. %i; Element nr. %i; Value %i\n", k, i, h_result1[k * numElemsPerArray + i]);

    // --- BlockSortKernel with shared
    gpuErrchk(cudaMemcpy(d_in, h_data, N*sizeof(int), cudaMemcpyHostToDevice));
    shared_BlockSortKernel<N / numArrays / numElemsPerThread, numElemsPerThread><<<numArrays, numElemsPerArray / numElemsPerThread>>>(d_in, d_out2); 
    gpuErrchk(cudaMemcpy(h_result2, d_out2, N*sizeof(int), cudaMemcpyDeviceToHost));

    printf("\n\nBlockSortKernel with shared\n\n");
    for (int k = 0; k < numArrays; k++) 
        for (int i = 0; i < numElemsPerArray; i++)
            printf("Array nr. %i; Element nr. %i; Value %i\n", k, i, h_result2[k * numElemsPerArray + i]);

    return 0;
}

答案 2 :(得分:0)

如果您使用的是CUDA 5.X,则可以使用动态并行。您可以在过滤器内核中创建一些子内核来完成排序作业。至于如何按CUDA排序,你可以使用一些感应技巧。