使用推力在GPU上进行随机排列

时间:2018-10-29 22:57:40

标签: cuda gpu thrust

我正在尝试编写可在gpu上置换向量的代码,但要让Thrust合作非常困难。现在,下面的代码可以正常编译,但是对向量r的顺序没有任何作用。请帮忙。谢谢!

void rng_permutation<float>(const int n, float* r){

  float* order;
  cudaMalloc((void**)&order, n* sizeof(float));

  /*
  some lines of code that generate uniform random floats between 0 and 1 that I know work
  */

  thrust::device_ptr<float> order_(order);
  thrust::device_vector<float> order__(order_, order_ + n);
  thrust::device_ptr<float> r_(r);
  thrust::device_vector<float> r__(r_, r_ + n);

  thrust::sort_by_key(order__.begin(), order__.end(), r__.begin());

  thrust::copy(order_, order_ + n, order__.begin());
  thrust::copy(r_, r_ + n, r__.begin()); 


  cudaFree(order);

  order__.clear();
  r__.clear();
  thrust::device_vector<float>().swap(order__);
  thrust::device_vector<float>().swap(r__);

}

1 个答案:

答案 0 :(得分:2)

您在这里将源和目的地倒退了:

thrust::copy(order_, order_ + n, order__.begin());
thrust::copy(r_, r_ + n, r__.begin()); 

上一行代码只是对order__中的内容进行了排序。然后,您要在其顶部复制order_的内容(thrust::copy的第一个参数是源参数,最后一个是目标参数)。没有意义。相反,相反:

  thrust::copy(order__.begin(), order__.end(), order_);
  thrust::copy(r__.begin(), r__.end(), r_);

您将获得明智的结果:

$ cat t312.cu
#include <stdlib.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <iostream>
#include <thrust/sequence.h>

template <typename T>
void caffe_gpu_rng_uniform(int n, T lo, T hi, T *o)
{
  T *d = (T *)malloc(n*sizeof(T));
  for (int i = 0; i < n; i++) d[i] = (rand()/(float)RAND_MAX)*(hi-lo) + lo;
  cudaMemcpy(o, d, n*sizeof(T), cudaMemcpyHostToDevice);
  free(d);
};

template <typename T>
void print_gpu_array_entries(T *o, int x , int y , int n){
  thrust::copy_n(thrust::device_pointer_cast<T>(o), x, std::ostream_iterator<T>(std::cout, ","));
  std::cout << std::endl;
}

void rng_permutation(const int n, float* r){

  float* order;
  cudaMalloc((void**)&order, n* sizeof(float));

  caffe_gpu_rng_uniform<float>(n, (float)0.0, (float)1.0, order);


  print_gpu_array_entries<float>(order, 10 , 1 , n);
  print_gpu_array_entries<float>(r, 10 , 1 , n);

  thrust::device_ptr<float> order_(order);
  thrust::device_vector<float> order__(order_, order_ + n);
  thrust::device_ptr<float> r_(r);
  thrust::device_vector<float> r__(r_, r_ + n);

  thrust::sort_by_key(order__.begin(), order__.end(), r__.begin());

  thrust::copy(order__.begin(), order__.end(), order_);
  thrust::copy(r__.begin(), r__.end(), r_);

  print_gpu_array_entries<float>(order, 10 , 1 , n);
  print_gpu_array_entries<float>(r, 10 , 1 , n);

  cudaFree(order);

  order__.clear();
  r__.clear();
  thrust::device_vector<float>().swap(order__);
  thrust::device_vector<float>().swap(r__);

}

int main(){

  thrust::device_vector<float> data(10);
  thrust::sequence(data.begin(), data.end());
  rng_permutation(10, thrust::raw_pointer_cast(data.data()));
}
$ nvcc -o t312 t312.cu
$ ./t312
0.840188,0.394383,0.783099,0.79844,0.911647,0.197551,0.335223,0.76823,0.277775,0.55397,
0,1,2,3,4,5,6,7,8,9,
0.197551,0.277775,0.335223,0.394383,0.55397,0.76823,0.783099,0.79844,0.840188,0.911647,
5,8,6,1,9,7,2,3,0,4,
$