我正在研究我的学位论文项目,试图研究在CPU上使用OpenCL管道是否以及何时也有用(我们已经知道它们在FPGA中得到广泛使用)。
我正在尝试实现甚至最简单的算法,在该算法中,我具有生产者(写入管道)和消费者内核(从管道读取)。我想并行执行两个内核并对管道具有阻塞行为(仅当管道不为空时才读取)。阅读Intel_FPGA_Opencl处的英特尔文档时,可以解释为管道声明的属性__attribute__((blocking))
应该在管道为空时阻止读取操作。但是,当我尝试使用属性时,会得到__write_pipe_2_bl is undefined
。我什至尝试使用while循环来模拟阻塞行为(如intel文档中所示),但是即使管道不为空,内核也会停止运行。
此外,除非我使用两个不同的命令队列,否则内核似乎不会并行运行。
内核代码:
#pragma OPENCL EXTENSION cl_intel_printf : enable
#define SIZE 1000
__kernel void pipe_writer(__global int *in,
write_only pipe int __attribute((depth(SIZE))) p)
{
for(int i = 0; i < SIZE; i++){
write_pipe(p, &in[i]);
printf("written: %d\n", in[i]);
}
}
__kernel void pipe_reader(__global int *out,
read_only pipe int __attribute((depth(SIZE))) p)
{
for(int i = 0; i < SIZE; i++){
while (read_pipe(p, &out[i]) == -1){
//printf("blocked read\n";
}
//int check = read_pipe(p, &out[i]);
printf("read: %d\n", out[i]);
}
}
主机代码:
#include <stdio.h>
#include <stdlib.h>
#ifdef __APPLE__
#include <OpenCL/opencl.h>
#else
#include <CL/cl.h>
#endif
#define MAX_SOURCE_SIZE (0x100000)
int main(void) {
// Create the two input vectors
int bb = 0;
int i;
const int LIST_SIZE = 1000;
int *A = (int*)malloc(sizeof(int)*LIST_SIZE);
int *B = (int*)malloc(sizeof(int)*LIST_SIZE);
for(i = 0; i < LIST_SIZE; i++) {
A[i] = i;
}
// Load the kernel source code into the array source_str
FILE *fp;
char *source_str;
size_t source_size;
fp = fopen("kernel.cl", "r");
if (!fp) {
fprintf(stderr, "Failed to load kernel.\n");
exit(1);
}
source_str = (char*)malloc(MAX_SOURCE_SIZE);
source_size = fread( source_str, 1, MAX_SOURCE_SIZE, fp);
fclose( fp );
// Get platform and device information
cl_platform_id platform_id = NULL;
cl_device_id device_id = NULL;
cl_uint ret_num_devices;
cl_uint ret_num_platforms;
cl_int ret = clGetPlatformIDs(1, &platform_id, &ret_num_platforms);
if(ret != CL_SUCCESS){
printf("getPlatformId, ERROR CODE: %d\n", ret);
bb=1;
}
ret = clGetDeviceIDs( platform_id, CL_DEVICE_TYPE_CPU, 1,
&device_id, &ret_num_devices);
if(ret != CL_SUCCESS){
printf("getDevice, ERROR CODE: %d\n", ret);
bb=1;
}
// Create an OpenCL context
cl_context context = clCreateContext( NULL, 1, &device_id, NULL, NULL, &ret);
if(ret != CL_SUCCESS){
printf("createContext, ERROR CODE: %d\n", ret);
bb=1;
}
// Create a command queue
cl_command_queue command_queue = clCreateCommandQueue(context, device_id, 0, &ret);
cl_command_queue command_queue2 = clCreateCommandQueue(context, device_id, 0, &ret);
if(ret != CL_SUCCESS){
printf("commandQueue, ERROR CODE: %d\n", ret);
bb=1;
}
// Create memory buffers on the device for each vector
cl_mem a_mem_obj = clCreateBuffer(context, CL_MEM_READ_ONLY,
LIST_SIZE * sizeof(int), NULL, &ret);
cl_mem b_mem_obj = clCreateBuffer(context, CL_MEM_WRITE_ONLY,
LIST_SIZE * sizeof(int), NULL, &ret);
if(ret != CL_SUCCESS){
printf("memobj, ERROR CODE: %d\n", ret);
bb=1;
}
ret = clEnqueueWriteBuffer(command_queue, a_mem_obj, CL_TRUE, 0,
LIST_SIZE * sizeof(int), A, 0, NULL, NULL);
if(ret != CL_SUCCESS){
printf("enqueuewritebuffer, ERROR CODE: %d\n", ret);
bb=1;
}
cl_program program = clCreateProgramWithSource(context, 1,
(const char **)&source_str, (const size_t *)&source_size, &ret);
if(ret != CL_SUCCESS){
printf("crateProgWithSource, ERROR CODE: %d\n", ret);
bb=1;
}
// Build the program
ret = clBuildProgram(program, 1, &device_id, "-cl-std=CL2.0", NULL, NULL);
if(ret != CL_SUCCESS){
printf("buildProgram, ERROR CODE: %d\n", ret);
bb=1;
}
/////Debug Kernel compilation:
size_t ret_val_size;
clGetProgramBuildInfo(program, device_id, CL_PROGRAM_BUILD_LOG, 0, NULL, &ret_val_size);
char * build_log = (char*) malloc(sizeof(char) * (ret_val_size));
clGetProgramBuildInfo(program, device_id, CL_PROGRAM_BUILD_LOG, ret_val_size, build_log, NULL);
printf("LOG: \n%s\n", build_log);
///////////////////////////////
cl_kernel pipe_writer = clCreateKernel(program, "pipe_writer", &ret);
if(ret != CL_SUCCESS){
printf("createkernelwriter, ERROR CODE: %d\n", ret);
bb=1;
}
cl_kernel pipe_reader = clCreateKernel(program, "pipe_reader", &ret);
if(ret != CL_SUCCESS){
printf("createkernelReader, ERROR CODE: %d\n", ret);
bb=1;
}
cl_mem pipe = clCreatePipe(context, 0, sizeof(cl_int), 1000, NULL, &ret);
if(ret != CL_SUCCESS){
printf("createPipe, ERROR CODE: %d\n", ret);
bb=1;
}
// Set the arguments of the kernel
ret = clSetKernelArg(pipe_writer, 0, sizeof(cl_mem), (void *)&a_mem_obj);
if(ret != CL_SUCCESS){
printf("setArgWriterZERO, ERROR CODE: %d\n", ret);
bb=1;
}
ret = clSetKernelArg(pipe_writer, 1, sizeof(cl_mem), &pipe);
if(ret != CL_SUCCESS){
printf("setArgWriterONE, ERROR CODE: %d\n", ret);
bb=1;
}
ret = clSetKernelArg(pipe_reader, 0, sizeof(cl_mem), (void *)&b_mem_obj);
if(ret != CL_SUCCESS){
printf("setArgReaderZERO, ERROR CODE: %d\n", ret);
bb=1;
}
ret = clSetKernelArg(pipe_reader, 1, sizeof(cl_mem), &pipe);
if(ret != CL_SUCCESS){
printf("setArgReaderONE, ERROR CODE: %d\n", ret);
bb=1;
}
// Execute the OpenCL kernel on the list
size_t global_item_size = 1; // Process the entire lists
size_t local_item_size = 1; // Divide work items into groups of 64
cl_event sync; //??
ret = clEnqueueTask (command_queue, pipe_writer, NULL, NULL, NULL);
if(ret != CL_SUCCESS){
printf("EnqueueKernelWriter, ERROR CODE: %d\n", ret);
bb=1;
}
if(ret != CL_SUCCESS){
printf("EnqueueKernelwriter, ERROR CODE: %d\n", ret);
bb=1;
}
ret = clEnqueueTask (command_queue2, pipe_reader, NULL, NULL, NULL);
if(ret != CL_SUCCESS){
printf("EnqueueKernelWriter, ERROR CODE: %d\n", ret);
bb=1;
}
if(ret != CL_SUCCESS){
printf("EnqueueKernelReader, ERROR CODE: %d\n", ret);
bb=1;
}
ret = clEnqueueReadBuffer(command_queue2, b_mem_obj, CL_TRUE, 0,
LIST_SIZE * sizeof(int), B, 0, NULL, NULL);
if(ret != CL_SUCCESS){
printf("EnqueueReadBuffer, ERROR CODE: %d\n", ret);
bb=1;
}
if(bb == 0){
// Display the result to the screen
for(i = 0; i < LIST_SIZE; i++)
printf("%d and %d\n", A[i], B[i]);
}
// Clean up
ret = clFlush(command_queue);
ret = clFinish(command_queue);
ret = clReleaseKernel(pipe_writer);
ret = clReleaseKernel(pipe_reader);
ret = clReleaseProgram(program);
ret = clReleaseMemObject(a_mem_obj);
ret = clReleaseMemObject(b_mem_obj);
ret = clReleaseCommandQueue(command_queue);
ret = clReleaseContext(context);
free(A);
free(B);
return 0;
}
这就是我在运行'clinfo'时使用的CPU的信息:
Platform Name Intel(R) CPU Runtime for OpenCL(TM) Applications
Number of devices 1
Device Name Intel(R) Xeon(R) CPU E5-2698 v4 @ 2.20GHz
Device Vendor Intel(R) Corporation
Device Vendor ID 0x8086
Device Version OpenCL 2.1 (Build 0)
Driver Version 18.1.0.0920
Device OpenCL C Version OpenCL C 2.0
Device Type CPU
Device Profile FULL_PROFILE
Max compute units 80
Max clock frequency 2200MHz
Device Partition (core)
Max number of sub-devices 80
Supported partition types by counts, equally, by names (Intel)
Max work item dimensions 3
Max work item sizes 8192x8192x8192
Max work group size 8192
Preferred work group size multiple 128
Max sub-groups per work group 1
Preferred / native vector sizes
char 1 / 32
short 1 / 16
int 1 / 8
long 1 / 4
half 0 / 0 (n/a)
float 1 / 8
double 1 / 4 (cl_khr_fp64)
Half-precision Floating-point support (n/a)
Single-precision Floating-point support (core)
Denormals Yes
Infinity and NANs Yes
Round to nearest Yes
Round to zero No
Round to infinity No
IEEE754-2008 fused multiply-add No
Support is emulated in software No
Correctly-rounded divide and sqrt operations No
Double-precision Floating-point support (cl_khr_fp64)
Denormals Yes
Infinity and NANs Yes
Round to nearest Yes
Round to zero Yes
Round to infinity Yes
IEEE754-2008 fused multiply-add Yes
Support is emulated in software No
Correctly-rounded divide and sqrt operations No
Address bits 64, Little-Endian
Global memory size 540956721152 (503.8GiB)
Error Correction support No
Max memory allocation 135239180288 (126GiB)
Unified memory for Host and Device Yes
Shared Virtual Memory (SVM) capabilities (core)
Coarse-grained buffer sharing Yes
Fine-grained buffer sharing Yes
Fine-grained system sharing Yes
Atomics Yes
Minimum alignment for any data type 128 bytes
Alignment of base address 1024 bits (128 bytes)
Preferred alignment for atomics
SVM 64 bytes
Global 64 bytes
Local 0 bytes
Max size for global variable 65536 (64KiB)
Preferred total size of global vars 65536 (64KiB)
Global Memory cache type Read/Write
Global Memory cache size 262144
Global Memory cache line 64 bytes
Image support Yes
Max number of samplers per kernel 480
Max size for 1D images from buffer 8452448768 pixels
Max 1D or 2D image array size 2048 images
Base address alignment for 2D image buffers 64 bytes
Pitch alignment for 2D image buffers 64 bytes
Max 2D image size 16384x16384 pixels
Max 3D image size 2048x2048x2048 pixels
Max number of read image args 480
Max number of write image args 480
Max number of read/write image args 480
Max number of pipe args 16
Max active pipe reservations 3276
Max pipe packet size 1024
Local memory type Global
Local memory size 32768 (32KiB)
Max constant buffer size 131072 (128KiB)
Max number of constant args 480
Max size of kernel argument 3840 (3.75KiB)
Queue properties (on host)
Out-of-order execution Yes
Profiling Yes
Local thread execution (Intel) Yes
Queue properties (on device)
Out-of-order execution Yes
Profiling Yes
Preferred size 4294967295 (4GiB)
Max size 4294967295 (4GiB)
Max queues on device 4294967295
Max events on device 4294967295
Prefer user sync for interop No
Profiling timer resolution 1ns
Execution capabilities
Run OpenCL kernels Yes
Run native kernels Yes
Sub-group independent forward progress No
IL version SPIR-V_1.0
SPIR versions 1.2
printf() buffer size 1048576 (1024KiB)
Built-in kernels
Device Available Yes
Compiler Available Yes
Linker Available Yes
Device Extensions cl_khr_icd cl_khr_global_int32_base_atomics cl_khr_global_int32_extended_atomics cl_khr_local_int32_base_atomics cl_khr_local_int32_extended_atomics cl_khr_byte_addressable_store cl_khr_depth_images cl_khr_3d_image_writes cl_intel_exec_by_local_thread cl_khr_spir cl_khr_fp64 cl_khr_image2d_from_buffer cl_intel_vec_len_hint
答案 0 :(得分:5)
用于Intel FPGA的OpenCL 2.0管道与 用于CPU的标准OpenCL 2.0。
一个重要的区别是标准的OpenCL 2.0管道不是 旨在用于在并发之间建立通信 内核。管道是内存对象的子类,其状态为 仅在同步点强制执行(请参阅s3.3.1内存一致性) (OpenCL 1.2规范),其中同步点是 命令队列屏障或等待事件(请参阅s3.4.3 同步)。换句话说,根据OpenCL 规范,仅当内核时,写入管道的数据才可见 完成执行。
用于FPGA的Intel OpenCL具有其他功能(扩展名) 适用于FPGA的OpenCL 2.0管道:具体地说,它保证 内核可以通过管道进行通信,并提供了一些扩展 使这种通信更简单,更有效(阻塞管道, 主机管道,管道深度)。所有这些功能均不受支持 用于CPU的Intel OpenCL运行时。
但是,就您的学位论文项目而言,您可以查看 FPGA FPGA的快速仿真器:基本上是CPU运行时 支持FPGA扩展,包括管道(具有内核到内核通信)和主机管道。看到 https://www.intel.com/content/dam/www/programmable/us/en/pdfs/literature/hb/opencl-sdk/aocl_programming_guide.pdf s8.7。使用快速仿真器(预览)。
此外,除非我使用两个不同的命令队列,否则内核似乎不会并行运行。
如果不使用以下命令创建命令队列
CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE
,队列是有序的,这意味着
推送到此的命令之间存在隐式依赖
排队,因此它们不能并行运行。
此外,您应该在致电clFlush(command_queue)
之前
clEnqueueReadBuffer(command_queue2, ...)
确保作者
在对读取器进行阻塞调用之前,已启动内核。