AMD clEnqueueMapBuffer和Pinned内存

时间:2017-04-15 12:51:53

标签: asynchronous memory opencl gpu

我目前使用的是AMD GPU。

/*device memory*/
pattern_obj = clCreateBuffer(context, CL_MEM_READ_ONLY, MAX_PATTERN_SIZE * MAX_PATTERN_NUM * sizeof(cl_char), NULL, &ret);
text_objA = clCreateBuffer(context, CL_MEM_READ_ONLY, fileLen * sizeof(char) / 2, NULL, &ret);
text_objB = clCreateBuffer(context, CL_MEM_READ_ONLY, fileLen * sizeof(char) / 2, NULL, &ret);
failure_obj = clCreateBuffer(context, CL_MEM_READ_ONLY, MAX_PATTERN_SIZE * MAX_PATTERN_NUM * sizeof(cl_int), NULL, &ret);
ret_objA = clCreateBuffer(context, CL_MEM_WRITE_ONLY, MAX_PATTERN_NUM * sizeof(cl_int), NULL, &ret);
ret_objB = clCreateBuffer(context, CL_MEM_WRITE_ONLY, MAX_PATTERN_NUM * sizeof(cl_int), NULL, &ret);

/*pinned memory*/
mPattern_obj = clCreateBuffer(context, CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR, MAX_PATTERN_SIZE * MAX_PATTERN_NUM * sizeof(cl_char), NULL, &ret);
mText_obj = clCreateBuffer(context, CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR, fileLen * sizeof(char), NULL, &ret);
mFailure_obj = clCreateBuffer(context, CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR, MAX_PATTERN_SIZE * MAX_PATTERN_NUM * sizeof(cl_int), NULL, &ret);
mRet_obj = clCreateBuffer(context, CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR, MAX_PATTERN_NUM * sizeof(cl_int) * 2, NULL, &ret);

/*mapped pointer for pinned memory*/
pattern = (cl_char *)clEnqueueMapBuffer(command_queue[0], mPattern_obj, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, MAX_PATTERN_SIZE * MAX_PATTERN_NUM * sizeof(cl_char), 0, NULL, NULL, &ret);
strings = (char *)clEnqueueMapBuffer(command_queue[0], mText_obj, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, fileLen * sizeof(char), 0, NULL, NULL, &ret);
failure = (cl_int *)clEnqueueMapBuffer(command_queue[0], mFailure_obj, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, MAX_PATTERN_SIZE * MAX_PATTERN_NUM * sizeof(cl_int), 0, NULL, NULL, &ret);
matched = (cl_int *)clEnqueueMapBuffer(command_queue[0], mRet_obj, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, MAX_PATTERN_NUM * sizeof(cl_int) * 2, 0, NULL, NULL, &ret);

/*Initialize the mapped pointer*/

ret = clEnqueueWriteBuffer(command_queue[0], pattern_obj, CL_FALSE, 0, MAX_PATTERN_SIZE * MAX_PATTERN_NUM * sizeof(cl_char), (void *)&pattern[0], 0, NULL, NULL);
ret = clEnqueueWriteBuffer(command_queue[0], text_objA, CL_FALSE, 0, halfSize, (void *)&strings[0], 0, NULL, NULL);
ret = clEnqueueWriteBuffer(command_queue[0], failure_obj, CL_FALSE, 0, MAX_PATTERN_SIZE * MAX_PATTERN_NUM * sizeof(cl_int), (void *)&failure[0], 0, NULL, NULL);

ret = clEnqueueNDRangeKernel(command_queue[0], kernel[0], 1, NULL, &global_item_size, &local_item_size, 0, NULL, &kmp_event); 

clWaitForEvents(1, &kmp_event);

ret = clEnqueueWriteBuffer(command_queue[1], pattern_obj, CL_FALSE, 0, MAX_PATTERN_SIZE * MAX_PATTERN_NUM * sizeof(cl_char), (void *)&pattern[0], 0, NULL, NULL);
ret = clEnqueueWriteBuffer(command_queue[1], text_objB, CL_FALSE, 0, halfSize, (void *)&strings[halfOffset], 0, NULL, NULL);
ret = clEnqueueWriteBuffer(command_queue[1], failure_obj, CL_FALSE, 0, MAX_PATTERN_SIZE * MAX_PATTERN_NUM * sizeof(cl_int), (void *)&failure[0], 0, NULL, NULL);

ret = clEnqueueNDRangeKernel(command_queue[1], kernel[1], 1, NULL, &global_item_size, &local_item_size, 0, NULL, &kmp_event);

clWaitForEvents(1, &kmp_event);

ret = clEnqueueReadBuffer(command_queue[0], ret_objA, CL_FALSE, 0, MAX_PATTERN_NUM * sizeof(cl_int), (void *)&matched[0], 0, NULL, NULL);

ret = clEnqueueReadBuffer(command_queue[1], ret_objB, CL_FALSE, 0, MAX_PATTERN_NUM * sizeof(cl_int), (void *)&matched[MAX_PATTERN_NUM], 0, NULL, NULL);

这是我的固定内存代码。

我使用MapBuffer和固定内存来重叠数据传输和执行。

我知道我的代码是针对Nvidia GPU的,我认为Nvidia和AMD之间存在不匹配。

AMD GPU没有关于重叠传输和计算的示例代码。

所以,我不知道自己该做什么。

我应该更改什么才能使我的代码在AMD GPU中运行?

我将A&中的text_obj分开了B因为我想通过将文本分成前侧和后侧来传输文本。

例如)文字:“ababcccc”text_objA => abab和text_objB => cccc

1 个答案:

答案 0 :(得分:0)

是的,NVIDIA和AMD确实以不同的方式固定内存。 NVIDIA有两个缓冲区(一个设备,一个主机)的奇怪要求,并且它神奇地检测到复制操作的主机指针是“特殊的”并且使复制更快。请注意,在较新的驱动程序中,当您映射/取消映射设备缓冲区时,您可能会获得类似的性能。 AMD就是这样做的(不需要两个缓冲区)。阅读AMD的开发人员指南;他们有关于记忆转移的整个部分。底线:使用文档作为指导,但尝试所有内容并对设备进行概要分析,以找到真正有用的内容。