CUDA对Mersenne Twister
(MT
)随机数生成器的实现仅限于256
和200
块/网格的最大线程数/块数,即最大线程数为51200
。
因此,无法使用
启动使用MT的内核kernel<<<blocksPerGrid, threadsPerBlock>>>(devMTGPStates, ...)
其中
int blocksPerGrid = (n+threadsPerBlock-1)/threadsPerBlock;
和n
是线程的总数。
MT
使用threads > 51200
的最佳方法是什么?
我的方法是使用blocksPerGrid
和threadsPerBlock
的常量值,例如<<<128,128>>>
并在内核代码中使用以下内容:
__global__ void kernel(curandStateMtgp32 *state, int n, ...) {
int id = threadIdx.x+blockIdx.x*blockDim.x;
while (id < n) {
float x = curand_normal(&state[blockIdx.x]);
/* some more calls to curand_normal() followed
by the algorithm that works with the data */
id += blockDim.x*gridDim.x;
}
}
我不确定这是否是正确的方式,或者它是否会以不受欢迎的方式影响MT状态?
谢谢。
答案 0 :(得分:3)
我建议您仔细阅读CURAND documentation。
当每个块使用256个线程以及最多64个块来生成数字时,MT API将是最有效的。
如果您需要更多,您有多种选择:
一般来说,我没有看到你所概述的内核问题,而且它与上面的选择1大致相符。但是它不允许超过51200个线程。 (你的例子有<<<128, 128>>>
所以16384个帖子)
答案 1 :(得分:1)
下面是罗伯特的回答,下面我将提供一个完整的示例,说明如何使用cuRAND的Mersenne Twister处理任意数量的线程。我正在使用Robert的第一个选项从现有状态集中生成更多数字,并将这些数字分配到需要它们的线程中。
// --- Generate random numbers with cuRAND's Mersenne Twister
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda.h>
#include <curand_kernel.h>
/* include MTGP host helper functions */
#include <curand_mtgp32_host.h>
#define BLOCKSIZE 256
#define GRIDSIZE 64
/*******************/
/* GPU ERROR CHECK */
/*******************/
#define gpuErrchk(x) do { if((x) != cudaSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__); \
return EXIT_FAILURE;}} while(0)
#define CURAND_CALL(x) do { if((x) != CURAND_STATUS_SUCCESS) { \
printf("Error at %s:%d\n",__FILE__,__LINE__); \
return EXIT_FAILURE;}} while(0)
/*******************/
/* iDivUp FUNCTION */
/*******************/
__host__ __device__ int iDivUp(int a, int b) { return ((a % b) != 0) ? (a / b + 1) : (a / b); }
/*********************/
/* GENERATION KERNEL */
/*********************/
__global__ void generate_kernel(curandStateMtgp32 * __restrict__ state, float * __restrict__ result, const int N)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int k = tid; k < N; k += blockDim.x * gridDim.x)
result[k] = curand_uniform(&state[blockIdx.x]);
}
/********/
/* MAIN */
/********/
int main()
{
const int N = 217 * 123;
// --- Allocate space for results on host
float *hostResults = (float *)malloc(N * sizeof(float));
// --- Allocate and initialize space for results on device
float *devResults; gpuErrchk(cudaMalloc(&devResults, N * sizeof(float)));
gpuErrchk(cudaMemset(devResults, 0, N * sizeof(float)));
// --- Setup the pseudorandom number generator
curandStateMtgp32 *devMTGPStates; gpuErrchk(cudaMalloc(&devMTGPStates, GRIDSIZE * sizeof(curandStateMtgp32)));
mtgp32_kernel_params *devKernelParams; gpuErrchk(cudaMalloc(&devKernelParams, sizeof(mtgp32_kernel_params)));
CURAND_CALL(curandMakeMTGP32Constants(mtgp32dc_params_fast_11213, devKernelParams));
//CURAND_CALL(curandMakeMTGP32KernelState(devMTGPStates, mtgp32dc_params_fast_11213, devKernelParams, GRIDSIZE, 1234));
CURAND_CALL(curandMakeMTGP32KernelState(devMTGPStates, mtgp32dc_params_fast_11213, devKernelParams, GRIDSIZE, time(NULL)));
// --- Generate pseudo-random sequence and copy to the host
generate_kernel << <GRIDSIZE, BLOCKSIZE >> >(devMTGPStates, devResults, N);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaMemcpy(hostResults, devResults, N * sizeof(float), cudaMemcpyDeviceToHost));
// --- Print results
//for (int i = 0; i < N; i++) {
for (int i = 0; i < 10; i++) {
printf("%f\n", hostResults[i]);
}
// --- Cleanup
gpuErrchk(cudaFree(devMTGPStates));
gpuErrchk(cudaFree(devResults));
free(hostResults);
return 0;
}