您好我是JOCL(opencl)的新手。我编写了这段代码来获取每张图像强度的总和。内核采用所有图像的所有像素的一维数组。图像为300x300,因此每张图像为90000像素。目前它比我按顺序执行的速度慢。
我的代码
package PAR;
/*
* JOCL - Java bindings for OpenCL
*
* Copyright 2009 Marco Hutter - http://www.jocl.org/
*/
import IMAGE_IO.ImageReader;
import IMAGE_IO.Input_Folder;
import static org.jocl.CL.*;
import org.jocl.*;
/**
* A small JOCL sample.
*/
public class IPPARA {
/**
* The source code of the OpenCL program to execute
*/
private static String programSource =
"__kernel void "
+ "sampleKernel(__global uint *a,"
+ " __global uint *c)"
+ "{"
+ "__private uint intensity_core=0;"
+ " uint i = get_global_id(0);"
+ " for(uint j=i*90000; j < (i+1)*90000; j++){ "
+ " intensity_core += a[j];"
+ " }"
+ "c[i]=intensity_core;"
+ "}";
/**
* The entry point of this sample
*
* @param args Not used
*/
public static void main(String args[]) {
long numBytes[] = new long[1];
ImageReader imagereader = new ImageReader() ;
int srcArrayA[] = imagereader.readImages();
int size[] = new int[1];
size[0] = srcArrayA.length;
long before = System.nanoTime();
int dstArray[] = new int[size[0]/90000];
Pointer srcA = Pointer.to(srcArrayA);
Pointer dst = Pointer.to(dstArray);
// Obtain the platform IDs and initialize the context properties
System.out.println("Obtaining platform...");
cl_platform_id platforms[] = new cl_platform_id[1];
clGetPlatformIDs(platforms.length, platforms, null);
cl_context_properties contextProperties = new cl_context_properties();
contextProperties.addProperty(CL_CONTEXT_PLATFORM, platforms[0]);
// Create an OpenCL context on a GPU device
cl_context context = clCreateContextFromType(
contextProperties, CL_DEVICE_TYPE_CPU, null, null, null);
if (context == null) {
// If no context for a GPU device could be created,
// try to create one for a CPU device.
context = clCreateContextFromType(
contextProperties, CL_DEVICE_TYPE_CPU, null, null, null);
if (context == null) {
System.out.println("Unable to create a context");
return;
}
}
// Enable exceptions and subsequently omit error checks in this sample
CL.setExceptionsEnabled(true);
// Get the list of GPU devices associated with the context
clGetContextInfo(context, CL_CONTEXT_DEVICES, 0, null, numBytes);
// Obtain the cl_device_id for the first device
int numDevices = (int) numBytes[0] / Sizeof.cl_device_id;
cl_device_id devices[] = new cl_device_id[numDevices];
clGetContextInfo(context, CL_CONTEXT_DEVICES, numBytes[0],
Pointer.to(devices), null);
// Create a command-queue
cl_command_queue commandQueue =
clCreateCommandQueue(context, devices[0], 0, null);
// Allocate the memory objects for the input- and output data
cl_mem memObjects[] = new cl_mem[2];
memObjects[0] = clCreateBuffer(context,
CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR,
Sizeof.cl_uint * srcArrayA.length, srcA, null);
memObjects[1] = clCreateBuffer(context,
CL_MEM_READ_WRITE,
Sizeof.cl_uint * (srcArrayA.length/90000), null, null);
// Create the program from the source code
cl_program program = clCreateProgramWithSource(context,
1, new String[]{programSource}, null, null);
// Build the program
clBuildProgram(program, 0, null, null, null, null);
// Create the kernel
cl_kernel kernel = clCreateKernel(program, "sampleKernel", null);
// Set the arguments for the kernel
clSetKernelArg(kernel, 0,
Sizeof.cl_mem, Pointer.to(memObjects[0]));
clSetKernelArg(kernel, 1,
Sizeof.cl_mem, Pointer.to(memObjects[1]));
// Set the work-item dimensions
long local_work_size[] = new long[]{1};
long global_work_size[] = new long[]{(srcArrayA.length/90000)*local_work_size[0]};
// Execute the kernel
clEnqueueNDRangeKernel(commandQueue, kernel, 1, null,
global_work_size, local_work_size, 0, null, null);
// Read the output data
clEnqueueReadBuffer(commandQueue, memObjects[1], CL_TRUE, 0,
(srcArrayA.length/90000) * Sizeof.cl_float, dst, 0, null, null);
// Release kernel, program, and memory objects
clReleaseMemObject(memObjects[0]);
clReleaseMemObject(memObjects[1]);
clReleaseKernel(kernel);
clReleaseProgram(program);
clReleaseCommandQueue(commandQueue);
clReleaseContext(context);
long after = System.nanoTime();
System.out.println("Time: " + (after - before) / 1e9);
}
}
在答案中的建议之后,通过CPU的并行代码几乎与顺序代码一样快。是否还有其他改进措施?
答案 0 :(得分:2)
for(uint j=i*90000; j < (i+1)*90000; j++){ "
+ " c[i] += a[j];"
1)你正在使用全局内存(c [])求和,这很慢。使用私有变量使其更快。 像这样:
"__kernel void "
+ "sampleKernel(__global uint *a,"
+ " __global uint *c)"
+ "{"
+ "__private uint intensity_core=0;" <---this is a private variable of each core
+ " uint i = get_global_id(0);"
+ " for(uint j=i*90000; j < (i+1)*90000; j++){ "
+ " intensity_core += a[j];" <---register is at least 100x faster than global memory
//but we cannot get rid of a[] so the calculation time cannot be less than %50
+ " }"
+ "c[i]=intensity_core;"
+ "}"; //expecting %100 speedup
现在你有c [图像数]数组的强度和。
您的本地工作规模为1,如果您有至少160张图片(这是您的gpu的核心编号),那么计算将使用所有核心。
您需要90000 * num_images次读取和num_images写入以及90000 * num_images寄存器读/写。使用寄存器会使内核时间缩短一半。
2)每2个内存访问只进行1次数学运算。每1个内存访问至少需要10个数学,才能使用gpu的一小部分峰值Gflops(6490M时为250 Gflops峰值)
你的i7 cpu可以轻松拥有100 Gflops,但你的记忆力将成为瓶颈。通过pci-express发送整个数据时情况更糟(HD Graphics 3000的额定值为125 GFLOPS)
// Obtain a device ID
cl_device_id devices[] = new cl_device_id[numDevices];
clGetDeviceIDs(platform, deviceType, numDevices, devices, null);
cl_device_id device = devices[deviceIndex];
//one of devices[] element must be your HD3000.Example: devices[0]->gpu devices[1]->cpu
//devices[2]-->HD3000
在你的计划中:
// Obtain the cl_device_id for the first device
int numDevices = (int) numBytes[0] / Sizeof.cl_device_id;
cl_device_id devices[] = new cl_device_id[numDevices];
clGetContextInfo(context, CL_CONTEXT_DEVICES, numBytes[0],
Pointer.to(devices), null);
将第一个设备概率地称为gpu。
答案 1 :(得分:0)
您应该按照300x300图像使用整个工作组。这将有助于使gpu内核饱和并让您使用本地内存。内核还应该能够像设备上的计算单元一样同时处理任意数量的图像。
下面的内核将您的减少分为三个步骤。
定义了WG_MAX_SIZE,因为我不是传递可变大小的本地内存块的粉丝。该值为64,因为这是在大多数平台上使用的良好值。如果要试验较大的工作组,请确保将此值设置得更高。小于WG_MAX_SIZE的工作组仍然可以正常工作。
#define WORK_SIZE 90000
#define WG_MAX_SIZE 64
__kernel void sampleKernel(__global uint *a, __global uint *c)
{
local uint intensity_core[WG_MAX_SIZE];
private uint workItemIntensity = 0;
int gid = get_group_id(0);
int lid = get_local_id(0);
int wgsize = get_local_size(0);
int i;
for(i=gid*WORK_SIZE; i < (gid+1)*WORK_SIZE; i+=wgsize){
workItemIntensity += a[j];
}
intensity_core[lid] = workItemIntensity;
mem_fence(CLK_LOCAL_MEM_FENCE);
//option #1
//loop to reduce the final values O(n) time
if(lid == 0){
for(i=1;i<wgsize;i++){
workItemIntensity += intensity_core[i];
}
c[gid]=intensity_core;
}
//option #2
//O(logn) time reduction
//assumes work group size is a power of 2
int steps = 32 - clz(wgsize);
for(i=1;i<steps;i++){
if(lid % (1 << i) == 0){
intensity_core[lid] += intensity_core[i<<(i-1)];
}
mem_fence(CLK_LOCAL_MEM_FENCE);
}
if(lid == 0){
c[gid]=intensity_core[0];
}
}