我有一个具有这种结构的顶点数组:
[x0, y0, z0, empty float, x1, y1, z1, empty float, x2, y2, z2, empty float, ...]
我需要使用CUDA找到minX
,minY
,minZ
,maxX
,maxY
和maxZ
。我写了一个适当的缩减算法,但它有点太慢了。我决定使用THRUST库。有一个高度优化的reduce()
,甚至更好的minmax_element()
方法,这是一种同时查找数组的最大值和最小值的方法,但我无法找到一种快速的方法来使用每个4
索引。将数据复制到3
个分隔的数组并不是我正在寻找的解决方案。
有没有办法(使用Thrust迭代器或某些类似的技巧)将步幅传递给reduce()
?
答案 0 :(得分:7)
您可以使用strided range method以及对thrust::minmax_element的3次调用来获得所需的结果,而无需修改数据存储。
这是一个有效的例子:
$ cat t491.cu
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <iostream>
#include <thrust/copy.h>
#include <thrust/iterator/permutation_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/functional.h>
#include <thrust/extrema.h>
#include <thrust/transform_reduce.h>
#define DSIZE (1048576*2)
#define SSIZE 4
#define FSIZE (DSIZE*SSIZE)
#define nTPB 256
#define BSIZE nTPB
#define nBLKS 64
#define FLOAT_MIN (-99999)
#define FLOAT_MAX 99999
typedef thrust::tuple<float, float, float, float, float, float> tpl6;
struct expand_functor
{
__host__ __device__
tpl6 operator()(const float4 a){
tpl6 result;
result.get<0>() = a.x;
result.get<1>() = a.x;
result.get<2>() = a.y;
result.get<3>() = a.y;
result.get<4>() = a.z;
result.get<5>() = a.z;
return result;
}
};
struct minmax3_functor
{
__host__ __device__
tpl6 operator()(const tpl6 a, const tpl6 b) {
tpl6 result;
result.get<0>() = (a.get<0>() < b.get<0>()) ? a.get<0>():b.get<0>();
result.get<1>() = (a.get<1>() > b.get<1>()) ? a.get<1>():b.get<1>();
result.get<2>() = (a.get<2>() < b.get<2>()) ? a.get<2>():b.get<2>();
result.get<3>() = (a.get<3>() > b.get<3>()) ? a.get<3>():b.get<3>();
result.get<4>() = (a.get<4>() < b.get<4>()) ? a.get<4>():b.get<4>();
result.get<5>() = (a.get<5>() > b.get<5>()) ? a.get<5>():b.get<5>();
return result;
}
};
__device__ int bcount = 0;
__device__ float xmins[nBLKS];
__device__ float xmaxs[nBLKS];
__device__ float ymins[nBLKS];
__device__ float ymaxs[nBLKS];
__device__ float zmins[nBLKS];
__device__ float zmaxs[nBLKS];
__global__ void my_minmax3(float4 *data, float *results, size_t dsize){
// assumes power-of-2 threadblock size
// assumes nBLKS <= nTPB, nBLKS also power-of-2
__shared__ float xmin[BSIZE], xmax[BSIZE], ymin[BSIZE], ymax[BSIZE], zmin[BSIZE], zmax[BSIZE];
__shared__ int lblock;
float my_xmin = FLOAT_MAX;
float my_ymin = FLOAT_MAX;
float my_zmin = FLOAT_MAX;
float my_xmax = FLOAT_MIN;
float my_ymax = FLOAT_MIN;
float my_zmax = FLOAT_MIN;
int idx = threadIdx.x+blockDim.x*blockIdx.x;
while (idx < dsize){
float4 my_temp = data[idx];
if (my_xmin > my_temp.x) my_xmin = my_temp.x;
if (my_ymin > my_temp.y) my_ymin = my_temp.y;
if (my_zmin > my_temp.z) my_zmin = my_temp.z;
if (my_xmax < my_temp.x) my_xmax = my_temp.x;
if (my_ymax < my_temp.y) my_ymax = my_temp.y;
if (my_zmax < my_temp.z) my_zmax = my_temp.z;
idx += blockDim.x*gridDim.x;}
xmin[threadIdx.x] = my_xmin;
ymin[threadIdx.x] = my_ymin;
zmin[threadIdx.x] = my_zmin;
xmax[threadIdx.x] = my_xmax;
ymax[threadIdx.x] = my_ymax;
zmax[threadIdx.x] = my_zmax;
__syncthreads();
for (int i = blockDim.x/2; i > 0; i>>=1){
if (threadIdx.x < i){
if (xmin[threadIdx.x] > xmin[threadIdx.x+i]) xmin[threadIdx.x] = xmin[threadIdx.x + i];
if (ymin[threadIdx.x] > ymin[threadIdx.x+i]) ymin[threadIdx.x] = ymin[threadIdx.x + i];
if (zmin[threadIdx.x] > zmin[threadIdx.x+i]) zmin[threadIdx.x] = zmin[threadIdx.x + i];
if (xmax[threadIdx.x] < xmax[threadIdx.x+i]) xmax[threadIdx.x] = xmax[threadIdx.x + i];
if (ymax[threadIdx.x] < ymax[threadIdx.x+i]) ymax[threadIdx.x] = ymax[threadIdx.x + i];
if (zmax[threadIdx.x] < zmax[threadIdx.x+i]) zmax[threadIdx.x] = zmax[threadIdx.x + i];
}
__syncthreads();
}
if (!threadIdx.x){
xmins[blockIdx.x] = xmin[0];
xmaxs[blockIdx.x] = xmax[0];
ymins[blockIdx.x] = ymin[0];
ymaxs[blockIdx.x] = ymax[0];
zmins[blockIdx.x] = zmin[0];
zmaxs[blockIdx.x] = zmax[0];
__threadfence();
if (atomicAdd(&bcount, 1) == (nBLKS-1)) lblock = 1;
else lblock = 0;
}
__syncthreads();
if (lblock){ // last block does final reduction
if (threadIdx.x < nBLKS){
xmin[threadIdx.x] = xmins[threadIdx.x];
xmax[threadIdx.x] = xmaxs[threadIdx.x];
ymin[threadIdx.x] = ymins[threadIdx.x];
ymax[threadIdx.x] = ymaxs[threadIdx.x];
zmin[threadIdx.x] = zmins[threadIdx.x];
zmax[threadIdx.x] = zmaxs[threadIdx.x];}
__syncthreads();
for (int i = nBLKS/2; i > 0; i>>=1){
if (threadIdx.x < i){
if (xmin[threadIdx.x] > xmin[threadIdx.x+i]) xmin[threadIdx.x] = xmin[threadIdx.x + i];
if (ymin[threadIdx.x] > ymin[threadIdx.x+i]) ymin[threadIdx.x] = ymin[threadIdx.x + i];
if (zmin[threadIdx.x] > zmin[threadIdx.x+i]) zmin[threadIdx.x] = zmin[threadIdx.x + i];
if (xmax[threadIdx.x] < xmax[threadIdx.x+i]) xmax[threadIdx.x] = xmax[threadIdx.x + i];
if (ymax[threadIdx.x] < ymax[threadIdx.x+i]) ymax[threadIdx.x] = ymax[threadIdx.x + i];
if (zmax[threadIdx.x] < zmax[threadIdx.x+i]) zmax[threadIdx.x] = zmax[threadIdx.x + i];
}
__syncthreads();
}
if (!threadIdx.x){
results[0] = xmin[0];
results[1] = xmax[0];
results[2] = ymin[0];
results[3] = ymax[0];
results[4] = zmin[0];
results[5] = zmax[0];
}
}
}
template <typename Iterator>
class strided_range
{
public:
typedef typename thrust::iterator_difference<Iterator>::type difference_type;
struct stride_functor : public thrust::unary_function<difference_type,difference_type>
{
difference_type stride;
stride_functor(difference_type stride)
: stride(stride) {}
__host__ __device__
difference_type operator()(const difference_type& i) const
{
return stride * i;
}
};
typedef typename thrust::counting_iterator<difference_type> CountingIterator;
typedef typename thrust::transform_iterator<stride_functor, CountingIterator> TransformIterator;
typedef typename thrust::permutation_iterator<Iterator,TransformIterator> PermutationIterator;
// type of the strided_range iterator
typedef PermutationIterator iterator;
// construct strided_range for the range [first,last)
strided_range(Iterator first, Iterator last, difference_type stride)
: first(first), last(last), stride(stride) {}
iterator begin(void) const
{
return PermutationIterator(first, TransformIterator(CountingIterator(0), stride_functor(stride)));
}
iterator end(void) const
{
return begin() + ((last - first) + (stride - 1)) / stride;
}
protected:
Iterator first;
Iterator last;
difference_type stride;
};
typedef thrust::device_vector<float>::iterator Iter;
typedef strided_range<Iter>::iterator sIter;
int main(){
// set up test data
cudaEvent_t start, stop;
float et;
cudaEventCreate(&start); cudaEventCreate(&stop);
thrust::host_vector<float> h_vals(FSIZE);
for (int i = 0; i < DSIZE; i ++) {
h_vals[i*SSIZE + 0] = rand()/(float)RAND_MAX;
h_vals[i*SSIZE + 1] = rand()/(float)RAND_MAX;
h_vals[i*SSIZE + 2] = rand()/(float)RAND_MAX;
h_vals[i*SSIZE + 3] = 0.0f;}
thrust::device_vector<float> d_vals = h_vals;
// set up strided ranges
strided_range<Iter> item_x(d_vals.begin() , d_vals.end(), SSIZE);
strided_range<Iter> item_y(d_vals.begin()+1, d_vals.end(), SSIZE);
strided_range<Iter> item_z(d_vals.begin()+2, d_vals.end(), SSIZE);
// find min and max
cudaEventRecord(start);
thrust::pair<sIter, sIter> result_x = thrust::minmax_element(item_x.begin(), item_x.end());
thrust::pair<sIter, sIter> result_y = thrust::minmax_element(item_y.begin(), item_y.end());
thrust::pair<sIter, sIter> result_z = thrust::minmax_element(item_z.begin(), item_z.end());
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&et, start, stop);
std::cout << "thrust elapsed time: " << et << "ms" << std::endl;
std::cout << "thrust results: " << std::endl;
std::cout << "x min element = " << *(result_x.first) << std::endl;
std::cout << "x max element = " << *(result_x.second) << std::endl;
std::cout << "y min element = " << *(result_y.first) << std::endl;
std::cout << "y max element = " << *(result_y.second) << std::endl;
std::cout << "z min element = " << *(result_z.first) << std::endl;
std::cout << "z max element = " << *(result_z.second) << std::endl;
float *h_results, *d_results;
h_results = new float[6];
cudaMalloc(&d_results, 6*sizeof(float));
cudaEventRecord(start);
my_minmax3<<<nBLKS,nTPB>>>((float4 *)thrust::raw_pointer_cast(d_vals.data()), d_results, DSIZE);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&et, start, stop);
cudaMemcpy(h_results, d_results, 6*sizeof(float), cudaMemcpyDeviceToHost);
std::cout << "kernel elapsed time: " << et << "ms" << std::endl;
std::cout << "kernel results: " << std::endl;
std::cout << "x min element = " << h_results[0] << std::endl;
std::cout << "x max element = " << h_results[1] << std::endl;
std::cout << "y min element = " << h_results[2] << std::endl;
std::cout << "y max element = " << h_results[3] << std::endl;
std::cout << "z min element = " << h_results[4] << std::endl;
std::cout << "z max element = " << h_results[5] << std::endl;
thrust::device_ptr<float4> dptr_vals = thrust::device_pointer_cast(reinterpret_cast<float4 *>( thrust::raw_pointer_cast(d_vals.data())));
tpl6 my_init;
my_init.get<0>() = FLOAT_MAX;
my_init.get<1>() = FLOAT_MIN;
my_init.get<2>() = FLOAT_MAX;
my_init.get<3>() = FLOAT_MIN;
my_init.get<4>() = FLOAT_MAX;
my_init.get<5>() = FLOAT_MIN;
cudaEventRecord(start);
tpl6 my_result = thrust::transform_reduce(dptr_vals, dptr_vals + DSIZE, expand_functor(), my_init, minmax3_functor());
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&et, start, stop);
cudaMemcpy(h_results, d_results, 6*sizeof(float), cudaMemcpyDeviceToHost);
std::cout << "thrust2 elapsed time: " << et << "ms" << std::endl;
std::cout << "thrust2 results: " << std::endl;
std::cout << "x min element = " << my_result.get<0>() << std::endl;
std::cout << "x max element = " << my_result.get<1>() << std::endl;
std::cout << "y min element = " << my_result.get<2>() << std::endl;
std::cout << "y max element = " << my_result.get<3>() << std::endl;
std::cout << "z min element = " << my_result.get<4>() << std::endl;
std::cout << "z max element = " << my_result.get<5>() << std::endl;
return 0;
}
$ nvcc -O3 -arch=sm_20 -o t491 t491.cu
$ ./t491
thrust elapsed time: 3.88784ms
thrust results:
x min element = 1.16788e-06
x max element = 0.999998
y min element = 2.85916e-07
y max element = 1
z min element = 1.72295e-08
z max element = 0.999999
kernel elapsed time: 0.462848ms
kernel results:
x min element = 1.16788e-06
x max element = 0.999998
y min element = 2.85916e-07
y max element = 1
z min element = 1.72295e-08
z max element = 0.999999
thrust2 elapsed time: 1.29728ms
thrust2 results:
x min element = 1.16788e-06
x max element = 0.999998
y min element = 2.85916e-07
y max element = 1
z min element = 1.72295e-08
z max element = 0.999999
$
我已经更新了上面的示例以包含用于比较&#34;优化&#34;还原内核,在单个内核调用中执行所有6次减少(最小和最大操作)。
正如预期的那样,这种方法比3个单独的推力调用运行得更快,产生相同的结果,在我的情况下(RHEL5.5,Quadro5000,CUDA 6.5RC),大约5-8倍,具体取决于数据大小。请注意,虽然我在这里选择的数据大小(DSIZE
)是2的幂,但整个示例对于任意数据大小都能正常工作。为了简洁起见,我放弃了proper cuda error checking。
编辑:根据@JaredHoberock的建议,我添加了第3种方法,允许单次调用thrust::transform_reduce
来生成所有6个结果。这些是&#34;推力2&#34;结果如上。这种方法比第一种(三次推力调用)方法快约3倍。仍然没有cuda内核方法那么快,但也许这种推力方法可以进一步优化。
答案 1 :(得分:2)
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/iterator/permutation_iterator.h>
#include <thrust/functional.h>
#include <thrust/fill.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
// for printing
#include <thrust/copy.h>
#include <ostream>
#define STRIDE 2
template <typename Iterator>
class strided_range
{
public:
typedef typename thrust::iterator_difference<Iterator>::type difference_type;
struct stride_functor : public thrust::unary_function<difference_type,difference_type>
{
difference_type stride;
stride_functor(difference_type stride)
: stride(stride) {}
__host__ __device__
difference_type operator()(const difference_type& i) const
{
return stride * i;
}
};
typedef typename thrust::counting_iterator<difference_type> CountingIterator;
typedef typename thrust::transform_iterator<stride_functor, CountingIterator> TransformIterator;
typedef typename thrust::permutation_iterator<Iterator,TransformIterator> PermutationIterator;
// type of the strided_range iterator
typedef PermutationIterator iterator;
// construct strided_range for the range [first,last)
strided_range(Iterator first, Iterator last, difference_type stride)
: first(first), last(last), stride(stride) {}
iterator begin(void) const
{
return PermutationIterator(first, TransformIterator(CountingIterator(0), stride_functor(stride)));
}
iterator end(void) const
{
return begin() + ((last - first) + (stride - 1)) / stride;
}
protected:
Iterator first;
Iterator last;
difference_type stride;
};
int main(void)
{
const int N = 9;
thrust::host_vector<int> h_data(N);
for (int i=0; i<N; i++) h_data[i] = i;
thrust::device_vector<int> data(h_data);
typedef thrust::device_vector<int>::iterator Iterator;
strided_range<Iterator> pos(data.begin(), data.end(), STRIDE);
int sum = thrust::reduce(pos.begin(), pos.end(), (int) 0, thrust::plus<int>());
printf("sum = %i\n",sum);
return 0;
}
答案 2 :(得分:1)
我在本文档之后优化了我的内核:http://www.cuvilib.com/Reduction.pdf
现在看起来像这样:
template<unsigned int blockSize>
__global__ void minMaxReduction_k(float *g_minData, float *g_maxData, float *g_minOutput, float *g_maxOutput, unsigned int n)
{
extern __shared__ float shared[];
float* minSdata = (float*)shared;
float* maxSdata = (float*)&minSdata[4*blockDim.x];
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
minSdata[4*tid] = FLT_MAX;
minSdata[4*tid+1] = FLT_MAX;
minSdata[4*tid+2] = FLT_MAX;
maxSdata[4*tid] = -FLT_MAX;
maxSdata[4*tid+1] = -FLT_MAX;
maxSdata[4*tid+2] = -FLT_MAX;
while(i<n){
minSdata[4*tid] = fminf(fminf(minSdata[4*tid], g_minData[4*i]), g_minData[4*(i+blockDim.x)]);
minSdata[4*tid+1] = fminf(fminf(minSdata[4*tid+1], g_minData[4*i+1]), g_minData[4*(i+blockDim.x)+1]);
minSdata[4*tid+2] = fminf(fminf(minSdata[4*tid+2], g_minData[4*i+2]), g_minData[4*(i+blockDim.x)+2]);
maxSdata[4*tid] = fmaxf(fmaxf(maxSdata[4*tid], g_maxData[4*i]), g_maxData[4*(i+blockDim.x)]);
maxSdata[4*tid+1] = fmaxf(fmaxf(maxSdata[4*tid+1], g_maxData[4*i+1]), g_maxData[4*(i+blockDim.x)+1]);
maxSdata[4*tid+2] = fmaxf(fmaxf(maxSdata[4*tid+2], g_maxData[4*i+2]), g_maxData[4*(i+blockDim.x)+2]);
i+=gridSize;
}
__syncthreads();
if(blockSize >= 1024){
if(tid < 512){
minSdata[4*tid] = fminf(minSdata[4*tid], minSdata[4*(tid+512)]);
minSdata[4*tid+1] = fminf(minSdata[4*tid+1], minSdata[4*(tid+512)+1]);
minSdata[4*tid+2] = fminf(minSdata[4*tid+2], minSdata[4*(tid+512)+2]);
maxSdata[4*tid] = fmaxf(maxSdata[4*tid], maxSdata[4*(tid+512)]);
maxSdata[4*tid+1] = fmaxf(maxSdata[4*tid+1], maxSdata[4*(tid+512)+1]);
maxSdata[4*tid+2] = fmaxf(maxSdata[4*tid+2], maxSdata[4*(tid+512)+2]);
}
__syncthreads();
}
if(blockSize >= 512){
if(tid < 256){
minSdata[4*tid] = fminf(minSdata[4*tid], minSdata[4*(tid+256)]);
minSdata[4*tid+1] = fminf(minSdata[4*tid+1], minSdata[4*(tid+256)+1]);
minSdata[4*tid+2] = fminf(minSdata[4*tid+2], minSdata[4*(tid+256)+2]);
maxSdata[4*tid] = fmaxf(maxSdata[4*tid], maxSdata[4*(tid+256)]);
maxSdata[4*tid+1] = fmaxf(maxSdata[4*tid+1], maxSdata[4*(tid+256)+1]);
maxSdata[4*tid+2] = fmaxf(maxSdata[4*tid+2], maxSdata[4*(tid+256)+2]);
}
__syncthreads();
}
if(blockSize >= 256){
if(tid < 128){
minSdata[4*tid] = fminf(minSdata[4*tid], minSdata[4*(tid+128)]);
minSdata[4*tid+1] = fminf(minSdata[4*tid+1], minSdata[4*(tid+128)+1]);
minSdata[4*tid+2] = fminf(minSdata[4*tid+2], minSdata[4*(tid+128)+2]);
maxSdata[4*tid] = fmaxf(maxSdata[4*tid], maxSdata[4*(tid+128)]);
maxSdata[4*tid+1] = fmaxf(maxSdata[4*tid+1], maxSdata[4*(tid+128)+1]);
maxSdata[4*tid+2] = fmaxf(maxSdata[4*tid+2], maxSdata[4*(tid+128)+2]);
}
__syncthreads();
}
if(blockSize >= 128){
if(tid < 64){
minSdata[4*tid] = fminf(minSdata[4*tid], minSdata[4*(tid+64)]);
minSdata[4*tid+1] = fminf(minSdata[4*tid+1], minSdata[4*(tid+64)+1]);
minSdata[4*tid+2] = fminf(minSdata[4*tid+2], minSdata[4*(tid+64)+2]);
maxSdata[4*tid] = fmaxf(maxSdata[4*tid], maxSdata[4*(tid+64)]);
maxSdata[4*tid+1] = fmaxf(maxSdata[4*tid+1], maxSdata[4*(tid+64)+1]);
maxSdata[4*tid+2] = fmaxf(maxSdata[4*tid+2], maxSdata[4*(tid+64)+2]);
}
__syncthreads();
}
if(tid<32){
if (blockSize >= 64){
minSdata[4*tid] = fminf(minSdata[4*tid], minSdata[4*(tid+32)]);
minSdata[4*tid+1] = fminf(minSdata[4*tid+1], minSdata[4*(tid+32)+1]);
minSdata[4*tid+2] = fminf(minSdata[4*tid+2], minSdata[4*(tid+32)+2]);
maxSdata[4*tid] = fmaxf(maxSdata[4*tid], maxSdata[4*(tid+32)]);
maxSdata[4*tid+1] = fmaxf(maxSdata[4*tid+1], maxSdata[4*(tid+32)+1]);
maxSdata[4*tid+2] = fmaxf(maxSdata[4*tid+2], maxSdata[4*(tid+32)+2]);
}
//
if (blockSize >= 32){
minSdata[4*tid] = fminf(minSdata[4*tid], minSdata[4*(tid+16)]);
minSdata[4*tid+1] = fminf(minSdata[4*tid+1], minSdata[4*(tid+16)+1]);
minSdata[4*tid+2] = fminf(minSdata[4*tid+2], minSdata[4*(tid+16)+2]);
maxSdata[4*tid] = fmaxf(maxSdata[4*tid], maxSdata[4*(tid+16)]);
maxSdata[4*tid+1] = fmaxf(maxSdata[4*tid+1], maxSdata[4*(tid+16)+1]);
maxSdata[4*tid+2] = fmaxf(maxSdata[4*tid+2], maxSdata[4*(tid+16)+2]);
}
//
if (blockSize >= 16){
minSdata[4*tid] = fminf(minSdata[4*tid], minSdata[4*(tid+8)]);
minSdata[4*tid+1] = fminf(minSdata[4*tid+1], minSdata[4*(tid+8)+1]);
minSdata[4*tid+2] = fminf(minSdata[4*tid+2], minSdata[4*(tid+8)+2]);
maxSdata[4*tid] = fmaxf(maxSdata[4*tid], maxSdata[4*(tid+8)]);
maxSdata[4*tid+1] = fmaxf(maxSdata[4*tid+1], maxSdata[4*(tid+8)+1]);
maxSdata[4*tid+2] = fmaxf(maxSdata[4*tid+2], maxSdata[4*(tid+8)+2]);
}
//
if (blockSize >= 8){
minSdata[4*tid] = fminf(minSdata[4*tid], minSdata[4*(tid+4)]);
minSdata[4*tid+1] = fminf(minSdata[4*tid+1], minSdata[4*(tid+4)+1]);
minSdata[4*tid+2] = fminf(minSdata[4*tid+2], minSdata[4*(tid+4)+2]);
maxSdata[4*tid] = fmaxf(maxSdata[4*tid], maxSdata[4*(tid+4)]);
maxSdata[4*tid+1] = fmaxf(maxSdata[4*tid+1], maxSdata[4*(tid+4)+1]);
maxSdata[4*tid+2] = fmaxf(maxSdata[4*tid+2], maxSdata[4*(tid+4)+2]);
}
//
if (blockSize >= 4){
minSdata[4*tid] = fminf(minSdata[4*tid], minSdata[4*(tid+2)]);
minSdata[4*tid+1] = fminf(minSdata[4*tid+1], minSdata[4*(tid+2)+1]);
minSdata[4*tid+2] = fminf(minSdata[4*tid+2], minSdata[4*(tid+2)+2]);
maxSdata[4*tid] = fmaxf(maxSdata[4*tid], maxSdata[4*(tid+2)]);
maxSdata[4*tid+1] = fmaxf(maxSdata[4*tid+1], maxSdata[4*(tid+2)+1]);
maxSdata[4*tid+2] = fmaxf(maxSdata[4*tid+2], maxSdata[4*(tid+2)+2]);
}
//
if (blockSize >= 2){
minSdata[4*tid] = fminf(minSdata[4*tid], minSdata[4*(tid+1)]);
minSdata[4*tid+1] = fminf(minSdata[4*tid+1], minSdata[4*(tid+1)+1]);
minSdata[4*tid+2] = fminf(minSdata[4*tid+2], minSdata[4*(tid+1)+2]);
maxSdata[4*tid] = fmaxf(maxSdata[4*tid], maxSdata[4*(tid+1)]);
maxSdata[4*tid+1] = fmaxf(maxSdata[4*tid+1], maxSdata[4*(tid+1)+1]);
maxSdata[4*tid+2] = fmaxf(maxSdata[4*tid+2], maxSdata[4*(tid+1)+2]);
}
}
// write result for this block to global mem
if (tid == 0){
g_minOutput[blockIdx.x] = minSdata[0];
g_minOutput[blockIdx.x+1] = minSdata[1];
g_minOutput[blockIdx.x+2] = minSdata[2];
g_maxOutput[blockIdx.x] = maxSdata[0];
g_maxOutput[blockIdx.x+1] = maxSdata[1];
g_maxOutput[blockIdx.x+2] = maxSdata[2];
}
}
像这样调用:
float *d_minOutput;
float *d_maxOutput;
int tempN = n;
while(tempN>1){
getNumBlocksAndThreads(tempN, 65535, 1024, blocks, threads);
cudaMalloc((void **)&d_minOutput, 4*(sizeof(float)*blocks));
cudaMalloc((void **)&d_maxOutput, 4*(sizeof(float)*blocks));
int smem = (threads <= 32) ? 2*2*4*threads*sizeof(float) : 2*4*threads*sizeof(float);
switch(threads){
case 1024:
minMaxReduction_k<1024><<< blocks, threads, smem >>>(d_minData, d_maxData, d_minOutput, d_maxOutput, tempN);
break;
case 512:
minMaxReduction_k<512><<< blocks, threads, smem >>>(d_minData, d_maxData, d_minOutput, d_maxOutput, tempN);
break;
case 256:
minMaxReduction_k<256><<< blocks, threads, smem >>>(d_minData, d_maxData, d_minOutput, d_maxOutput, tempN);
break;
case 128:
minMaxReduction_k<128><<< blocks, threads, smem >>>(d_minData, d_maxData, d_minOutput, d_maxOutput, tempN);
break;
case 64:
minMaxReduction_k<64><<< blocks, threads, smem >>>(d_minData, d_maxData, d_minOutput, d_maxOutput, tempN);
break;
case 32:
minMaxReduction_k<32><<< blocks, threads, smem >>>(d_minData, d_maxData, d_minOutput, d_maxOutput, tempN);
break;
case 16:
minMaxReduction_k<16><<< blocks, threads, smem >>>(d_minData, d_maxData, d_minOutput, d_maxOutput, tempN);
break;
case 8:
minMaxReduction_k<8><<< blocks, threads, smem >>>(d_minData, d_maxData, d_minOutput, d_maxOutput, tempN);
break;
case 4:
minMaxReduction_k<4><<< blocks, threads, smem >>>(d_minData, d_maxData, d_minOutput, d_maxOutput, tempN);
break;
case 2:
minMaxReduction_k<2><<< blocks, threads, smem >>>(d_minData, d_maxData, d_minOutput, d_maxOutput, tempN);
break;
case 1:
minMaxReduction_k<1><<< blocks, threads, smem >>>(d_minData, d_maxData, d_minOutput, d_maxOutput, tempN);
break;
}
tempN = blocks;
cudaMemcpy(d_minData, d_minOutput, 4*(sizeof(float)*blocks), cudaMemcpyDeviceToDevice);
cudaMemcpy(d_maxData, d_maxOutput, 4*(sizeof(float)*blocks), cudaMemcpyDeviceToDevice);
cudaFree(d_minOutput);
cudaFree(d_maxOutput);
}
助手:
void UniformGrid::getNumBlocksAndThreads(unsigned int n, unsigned int maxBlocks, unsigned int maxThreads, unsigned int &blocks, unsigned int &threads)
{
//get device capability, to avoid block/grid size excceed the upbound
cudaDeviceProp prop;
int device;
cudaGetDevice(&device);
cudaGetDeviceProperties(&prop, device);
threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads;
blocks = (n + (threads * 2 - 1)) / (threads * 2);
if ((float)threads*blocks > (float)prop.maxGridSize[0] * prop.maxThreadsPerBlock)
{
printf("n is too large, please choose a smaller number!\n");
}
if (blocks > (unsigned int) prop.maxGridSize[0])
{
printf("Grid size <%d> excceeds the device capability <%d>, set block size as %d (original %d)\n",
blocks, prop.maxGridSize[0], threads*2, threads);
blocks /= 2;
threads *= 2;
}
}
unsigned int UniformGrid::nextPow2(unsigned int x)
{
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
几天后,我可能会测试哪种解决方案更快。