我复制了一个异步CUDA / C ++示例并对其进行了修改以评估primality。我的问题是,对于每个打印的素数,数组中的下一个值都是该值的副本。这是预期的行为还是我编写示例的方式有问题?
守则:
////////////////////////////////////////////////////////////////////////////
//
// Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
//
// This sample illustrates the usage of CUDA events for both GPU timing and
// overlapping CPU and GPU execution. Events are inserted into a stream
// of CUDA calls. Since CUDA stream calls are asynchronous, the CPU can
// perform computations while GPU is executing (including DMA memcopies
// between the host and device). CPU can query CUDA events to determine
// whether GPU has completed tasks.
//
// includes, system
#include <stdio.h>
// includes CUDA Runtime
#include <cuda_runtime.h>
// includes, project
#include <helper_cuda.h>
#include <helper_functions.h> // helper utility functions
//set matrix to possible prime values
//evaluate if input is prime, sets variable to 0 if not prime
__global__ void testPrimality(int * g_data) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
g_data[idx] = 3 + idx / 2;
if (g_data[idx] <= 3) {
if (g_data[idx] <= 1) {
g_data[idx] = 0;
}
}
else if (g_data[idx] % 2 == 0 || g_data[idx] % 3 == 0) {
g_data[idx] = 0;
}
else {
for (unsigned short i = 5; i * i <= g_data[idx]; i += 6) {
if (g_data[idx] % i == 0 || g_data[idx] % (i + 2) == 0) {
g_data[idx] = 0;
}
}
}
}
bool correct_output(int *data, const int n, const int x)
{
for (int i = 0; i < n; i++)
if (data[i] != x)
{
printf("Error! data[%d] = %d, ref = %d\n", i, data[i], x);
return false;
}
return true;
}
int main(int argc, char *argv[])
{
int devID;
cudaDeviceProp deviceProps;
printf("[%s] - Starting...\n", argv[0]);
// This will pick the best possible CUDA capable device
devID = findCudaDevice(argc, (const char **)argv);
// get device name
checkCudaErrors(cudaGetDeviceProperties(&deviceProps, devID));
printf("CUDA device [%s]\n", deviceProps.name);
const int n = 16 * 1024 * 1024;
int nbytes = n * sizeof(int);
int value = 1;
// allocate host memory
int *a = 0;
checkCudaErrors(cudaMallocHost((void **)&a, nbytes));
memset(a, 0, nbytes);
// allocate device memory
int *d_a=0;
checkCudaErrors(cudaMalloc((void **)&d_a, nbytes));
checkCudaErrors(cudaMemset(d_a, 255, nbytes));
// set kernel launch configuration
dim3 threads = dim3(512, 1);
dim3 blocks = dim3(n / threads.x, 1);
// create cuda event handles
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
StopWatchInterface *timer = NULL;
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
checkCudaErrors(cudaDeviceSynchronize());
float gpu_time = 0.0f;
// asynchronously issue work to the GPU (all to stream 0)
sdkStartTimer(&timer);
cudaEventRecord(start, 0);
cudaMemcpyAsync(d_a, a, nbytes, cudaMemcpyHostToDevice, 0);
//increment_kernel<<<blocks, threads, 0, 0>>>(d_a);
testPrimality<<<blocks, threads, 0, 0 >>>(d_a);
cudaMemcpyAsync(a, d_a, nbytes, cudaMemcpyDeviceToHost, 0);
cudaEventRecord(stop, 0);
sdkStopTimer(&timer);
// have CPU do some work while waiting for stage 1 to finish
unsigned long int counter=0;
while (cudaEventQuery(stop) == cudaErrorNotReady)
{
counter++;
}
checkCudaErrors(cudaEventElapsedTime(&gpu_time, start, stop));
// print the cpu and gpu times
printf("time spent executing by the GPU: %.2f\n", gpu_time);
printf("time spent by CPU in CUDA calls: %.2f\n", sdkGetTimerValue(&timer));
printf("CPU executed %lu iterations while waiting for GPU to finish\n", counter);
//print values for all allocated memory space
for (int i = 0; i < n; i++) {
if (a[i] != 0) {
std::cout << a[i]<< " : " << i << std::endl;
}
}
// check the output for correctness
//bool bFinalResults = correct_output(a, n, value);
bool bFinalResults = true;
// release resources
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
checkCudaErrors(cudaFreeHost(a));
checkCudaErrors(cudaFree(d_a));
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
exit(bFinalResults ? EXIT_SUCCESS : EXIT_FAILURE);
}
答案 0 :(得分:2)
重复是由您的实际&#34;输入&#34;操作的值。我不清楚你想要的数字序列,但是这行代码:
g_data[idx] = 3 + idx / 2;
整数除法(idx
的类型为int
,g_data[idx]
}也是如此。
整数divsion的结果为2意味着&#34;输入&#34;将被复制,因此输出中的每个值也是如此。如果您想查看输入值,请修改上一个cout
语句,如下所示:
std::cout << a[i]<< " : " << i << " " << 3+i/2 << std::endl;
to&#34;模仿&#34;您在内核中执行的输入数据生成。如果您这样做,您将在最后一列数字中看到重复。
编辑:根据以下评论,似乎idx
变量将如何生成数字存在一些不确定性。这是一种用于生成全局唯一线程ID的规范方法:
int idx = blockIdx.x * blockDim.x + threadIdx.x;
并且在典型的使用中,每个线程将获得一个独特的正指数,该正指数高于先前的&#34;螺纹:
0,1,2,3,...
似乎所需的案例是创建一个&#34;输入&#34;数据集看起来像这样:
3,5,7,9,...
因此用正确的算法代替:
g_data[idx] = 3 + idx / 2;
就是这样:
g_data[idx] = 3 + idx * 2;
这是一个完全有效的示例,其中包含了此更改以及我之前提出的cout
更改:
$ cat t1119.cu
////////////////////////////////////////////////////////////////////////////
//
// Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
//
// This sample illustrates the usage of CUDA events for both GPU timing and
// overlapping CPU and GPU execution. Events are inserted into a stream
// of CUDA calls. Since CUDA stream calls are asynchronous, the CPU can
// perform computations while GPU is executing (including DMA memcopies
// between the host and device). CPU can query CUDA events to determine
// whether GPU has completed tasks.
//
// includes, system
#include <stdio.h>
// includes CUDA Runtime
#include <cuda_runtime.h>
// includes, project
#include <helper_cuda.h>
#include <helper_functions.h> // helper utility functions
//set matrix to possible prime values
//evaluate if input is prime, sets variable to 0 if not prime
__global__ void testPrimality(int * g_data) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
g_data[idx] = 3 + idx * 2;
if (g_data[idx] <= 3) {
if (g_data[idx] <= 1) {
g_data[idx] = 0;
}
}
else if (g_data[idx] % 2 == 0 || g_data[idx] % 3 == 0) {
g_data[idx] = 0;
}
else {
for (unsigned short i = 5; i * i <= g_data[idx]; i += 6) {
if (g_data[idx] % i == 0 || g_data[idx] % (i + 2) == 0) {
g_data[idx] = 0;
}
}
}
}
bool correct_output(int *data, const int n, const int x)
{
for (int i = 0; i < n; i++)
if (data[i] != x)
{
printf("Error! data[%d] = %d, ref = %d\n", i, data[i], x);
return false;
}
return true;
}
int main(int argc, char *argv[])
{
int devID;
cudaDeviceProp deviceProps;
printf("[%s] - Starting...\n", argv[0]);
// This will pick the best possible CUDA capable device
devID = findCudaDevice(argc, (const char **)argv);
// get device name
checkCudaErrors(cudaGetDeviceProperties(&deviceProps, devID));
printf("CUDA device [%s]\n", deviceProps.name);
//const int n = 16 * 1024 * 1024;
const int n = 1024;
int nbytes = n * sizeof(int);
//int value = 1;
// allocate host memory
int *a = 0;
checkCudaErrors(cudaMallocHost((void **)&a, nbytes));
memset(a, 0, nbytes);
// allocate device memory
int *d_a=0;
checkCudaErrors(cudaMalloc((void **)&d_a, nbytes));
checkCudaErrors(cudaMemset(d_a, 255, nbytes));
// set kernel launch configuration
dim3 threads = dim3(512, 1);
dim3 blocks = dim3(n / threads.x, 1);
// create cuda event handles
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
StopWatchInterface *timer = NULL;
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
checkCudaErrors(cudaDeviceSynchronize());
float gpu_time = 0.0f;
// asynchronously issue work to the GPU (all to stream 0)
sdkStartTimer(&timer);
cudaEventRecord(start, 0);
cudaMemcpyAsync(d_a, a, nbytes, cudaMemcpyHostToDevice, 0);
//increment_kernel<<<blocks, threads, 0, 0>>>(d_a);
testPrimality<<<blocks, threads, 0, 0 >>>(d_a);
cudaMemcpyAsync(a, d_a, nbytes, cudaMemcpyDeviceToHost, 0);
cudaEventRecord(stop, 0);
sdkStopTimer(&timer);
// have CPU do some work while waiting for stage 1 to finish
unsigned long int counter=0;
while (cudaEventQuery(stop) == cudaErrorNotReady)
{
counter++;
}
checkCudaErrors(cudaEventElapsedTime(&gpu_time, start, stop));
// print the cpu and gpu times
printf("time spent executing by the GPU: %.2f\n", gpu_time);
printf("time spent by CPU in CUDA calls: %.2f\n", sdkGetTimerValue(&timer));
printf("CPU executed %lu iterations while waiting for GPU to finish\n", counter);
//print values for all allocated memory space
for (int i = 0; i < n; i++) {
if (a[i] != 0) {
std::cout << a[i]<< " : " << i << " " << 3 + i * 2 << std::endl;
}
}
// check the output for correctness
//bool bFinalResults = correct_output(a, n, value);
bool bFinalResults = true;
// release resources
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
checkCudaErrors(cudaFreeHost(a));
checkCudaErrors(cudaFree(d_a));
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
exit(bFinalResults ? EXIT_SUCCESS : EXIT_FAILURE);
}
$ nvcc -I/usr/local/cuda/samples/common/inc t1119.cu -o t1119
$ cuda-memcheck ./t1119
(excerpted output:)
337 : 167 337
347 : 172 347
349 : 173 349
353 : 175 353
359 : 178 359
367 : 182 367
373 : 185 373
379 : 188 379
383 : 190 383
389 : 193 389
397 : 197 397
401 : 199 401
409 : 203 409
419 : 208 419
421 : 209 421
431 : 214 431
433 : 215 433
439 : 218 439
443 : 220 443
449 : 223 449
457 : 227 457
461 : 229 461
463 : 230 463
467 : 232 467
479 : 238 479
487 : 242 487
491 : 244 491
499 : 248 499
503 : 250 503
509 : 253 509
521 : 259 521
523 : 260 523
541 : 269 541
547 : 272 547
557 : 277 557
563 : 280 563
569 : 283 569
571 : 284 571
577 : 287 577
587 : 292 587
593 : 295 593
599 : 298 599
601 : 299 601
607 : 302 607
613 : 305 613
617 : 307 617
619 : 308 619
如上所示,输出序列中没有重复。