gpt4 book ai didi

c++ - OpenCL 限制 for 循环大小?

转载 作者:太空宇宙 更新时间:2023-11-04 02:45:42 26 4
gpt4 key购买 nike

更新:clEnqueueReadBuffer(command_queue, c_mem_obj, CL_TRUE, 0, LIST_SIZE * sizeof(double), C, 0, NULL, NULL); 正在返回 -5,CL_OUT_OF_RESOURCES。这个函数/调用不应该返回这个!

我开始使用 OpenCL 时遇到了一个问题。如果我允许 for 循环(在内核中)运行 10000 次,如果我允许循环运行 8000 次,则所有 C 都为 0,结果都是正确的。

我在内核周围添加了等待以确保它完成,以为我在完成之前就将数据拉出,并且尝试了 Clwaitforevent 和 CLFinish。任何调用都不会发出错误信号。当我使用整数时,for 循环的大小为 4000000。 float 和 double 有同样的问题,但是 float 在 10000 时工作,但在 20000 时不工作,当我使用 float 时,我删除了 #pragma OPENCL EXTENSION cl_khr_fp64 :启用 以检查这不是问题所在。

这是不是有些奇怪的内存问题,我是不是用错了 OpenCL?我意识到在大多数内核中我不会像这样实现 for 循环,但这似乎是个问题。我还删除了 __private 以查看是否是问题所在,没有变化。那么 OpenCL 内核中 for 循环的大小有限制吗?是硬件特定的吗?或者这是一个错误?

内核是一个简单的内核,它将 2 个数组 (A+B) 相加并输出另一个 (C)。为了感受性能,我在每个计算周围放置了一个 for 循环以减慢/增加每次运行的操作数。

内核代码如下:

#pragma OPENCL EXTENSION cl_khr_fp64 : enable

__kernel void vector_add(__global double *A, __global double *B, __global double *C)
{

// Get the index of the current element
int i = get_global_id(0);

// Do the operation

for (__private unsigned int j = 0; j < 10000; j++)
{
C[i] = A[i] + B[i];
}
}

我正在运行的代码如下:(我确保在float和double之间切换时两段代码之间的变量是一致的)

#include <stdio.h>
#include <stdlib.h>
#include <iostream>

#ifdef __APPLE__
#include <OpenCL/opencl.h>
#else
#include <CL/cl.h>
#endif

#define MAX_SOURCE_SIZE (0x100000)

int main(void) {
// Create the two input vectors
int i;
const int LIST_SIZE = 4000000;
double *A = (double*)malloc(sizeof(double)*LIST_SIZE);
double *B = (double*)malloc(sizeof(double)*LIST_SIZE);
for(i = 0; i < LIST_SIZE; i++) {
A[i] = static_cast<double>(i);
B[i] = static_cast<double>(LIST_SIZE - i);
}

// Load the kernel source code into the array source_str
FILE *fp;
char *source_str;
size_t source_size;

fp = fopen("vector_add_kernel.cl", "r");
if (!fp) {
fprintf(stderr, "Failed to load kernel.\n");
exit(1);
}
source_str = (char*)malloc(MAX_SOURCE_SIZE);
source_size = fread( source_str, 1, MAX_SOURCE_SIZE, fp);
fclose( fp );

// Get platform and device information
cl_platform_id platform_id = NULL;
cl_device_id device_id = NULL;
cl_uint ret_num_devices;
cl_uint ret_num_platforms;
// clGetPlatformIDs(1, &platform_id, NULL);
//clGetDeviceIDs(platform_id, CL_DEVICE_TYPE_GPU, 1, &device_id, ret_num_devices);


cl_int ret = clGetPlatformIDs(1, &platform_id, NULL);
if (ret != CL_SUCCESS) {
printf("Error: Failed to get platforms! (%d) \n", ret);
return EXIT_FAILURE;
}
ret = clGetDeviceIDs(platform_id, CL_DEVICE_TYPE_GPU, 1, &device_id, &ret_num_devices);
if (ret != CL_SUCCESS) {
printf("Error: Failed to query platforms to get devices! (%d) \n", ret);
return EXIT_FAILURE;
}
/*
cl_int ret = clGetPlatformIDs(1, &platform_id, NULL);
if (ret != CL_SUCCESS) {
printf("Error: Failed to get platforms! (%d) \n", ret);
return EXIT_FAILURE;
}
ret = clGetDeviceIDs( platform_id, CL_DEVICE_TYPE_CPU, 1,
&device_id, &ret_num_devices);
if (ret != CL_SUCCESS) {
printf("Error: Failed to query platforms to get devices! (%d) \n", ret);
return EXIT_FAILURE;
}
*/
// Create an OpenCL context
cl_context context = clCreateContext( NULL, 1, &device_id, NULL, NULL, &ret);

// Create a command queue
cl_command_queue command_queue = clCreateCommandQueue(context, device_id, 0, &ret);

// Create memory buffers on the device for each vector
cl_mem a_mem_obj = clCreateBuffer(context, CL_MEM_READ_ONLY,
LIST_SIZE * sizeof(double), NULL, &ret);
cl_mem b_mem_obj = clCreateBuffer(context, CL_MEM_READ_ONLY,
LIST_SIZE * sizeof(double), NULL, &ret);
cl_mem c_mem_obj = clCreateBuffer(context, CL_MEM_WRITE_ONLY,
LIST_SIZE * sizeof(double), NULL, &ret);
if (ret != CL_SUCCESS) {
printf("Error: Buffer Fail! (%d) \n", ret);
return EXIT_FAILURE;
}

// Copy the lists A and B to their respective memory buffers
ret = clEnqueueWriteBuffer(command_queue, a_mem_obj, CL_TRUE, 0,
LIST_SIZE * sizeof(double), A, 0, NULL, NULL);
ret = clEnqueueWriteBuffer(command_queue, b_mem_obj, CL_TRUE, 0,
LIST_SIZE * sizeof(double), B, 0, NULL, NULL);

std::cout << "Begin Compile" << "\n";
// Create a program from the kernel source
cl_program program = clCreateProgramWithSource(context, 1,
(const char **)&source_str, (const size_t *)&source_size, &ret);
if (ret != CL_SUCCESS) {
printf("Error: Program Fail! (%d) \n", ret);
return EXIT_FAILURE;
}

// Build the program
ret = clBuildProgram(program, 1, &device_id, NULL, NULL, NULL);
if (ret != CL_SUCCESS) {
printf("Error: ProgramBuild Fail! (%d) \n", ret);
return EXIT_FAILURE;
}

// Create the OpenCL kernel
cl_kernel kernel = clCreateKernel(program, "vector_add", &ret);
if (ret != CL_SUCCESS) {
printf("Error: Kernel Build Fail! (%d) \n", ret);
return EXIT_FAILURE;
}
std::cout << "End Compile" << "\n";

std::cout << "Begin Data Move" << "\n";
// Set the arguments of the kernel
ret = clSetKernelArg(kernel, 0, sizeof(cl_mem), (void *)&a_mem_obj);
ret = clSetKernelArg(kernel, 1, sizeof(cl_mem), (void *)&b_mem_obj);
ret = clSetKernelArg(kernel, 2, sizeof(cl_mem), (void *)&c_mem_obj);
std::cout << "End Data Move" << "\n";

// Execute the OpenCL kernel on the list
size_t global_item_size = LIST_SIZE; // Process the entire lists
size_t local_item_size = 64; // Process in groups of 64

std::cout << "Begin Execute" << "\n";
cl_event event;
ret = clEnqueueNDRangeKernel(command_queue, kernel, 1, NULL,
&global_item_size, &local_item_size, 0, NULL, &event);
clFinish(command_queue);
//clWaitForEvents(1, &event);
std::cout << "End Execute" << "\n";
if (ret != CL_SUCCESS) {
printf("Error: Execute Fail! (%d) \n", ret);
return EXIT_FAILURE;
}

// Read the memory buffer C on the device to the local variable C
std::cout << "Begin Data Move" << "\n";

double *C = (double*)malloc(sizeof(double)*LIST_SIZE);
ret = clEnqueueReadBuffer(command_queue, c_mem_obj, CL_TRUE, 0,
LIST_SIZE * sizeof(double), C, 0, NULL, NULL);
if (ret != CL_SUCCESS) {
printf("Error: Read Fail! (%d) \n", ret);
return EXIT_FAILURE;
}
clFinish(command_queue);
std::cout << "End Data Move" << "\n";

std::cout << "Done" << "\n";
std::cin.get();
// Display the result to the screen
for(i = 0; i < LIST_SIZE; i++)
printf("%f + %f = %f \n", A[i], B[i], C[i]);

// Clean up
ret = clFlush(command_queue);
ret = clFinish(command_queue);
ret = clReleaseKernel(kernel);
ret = clReleaseProgram(program);
ret = clReleaseMemObject(a_mem_obj);
ret = clReleaseMemObject(b_mem_obj);
ret = clReleaseMemObject(c_mem_obj);
ret = clReleaseCommandQueue(command_queue);
ret = clReleaseContext(context);
free(A);
free(B);
free(C);
std::cout << "Number of Devices: " << ret_num_devices << "\n";
std::cin.get();
return 0;
}

我在互联网上看过,找不到有类似问题的人,这是一个问题,因为它可能导致代码在扩大规模之前运行良好......

我正在运行 Ubuntu 14.04,并且有一个用于 RC520 的笔记本电脑显卡,我使用 bumblebee/optirun 运行它。如果此错误在其他机器上无法重现,最大循环大小为 4000000,那么我将使用 bumblebee/optirun 记录错误。

干杯

最佳答案

我发现了这个问题,连接到显示器/事件 VGA 等的 GPU 有一个看门狗定时器,它会在大约 5 秒后超时。不是特斯拉的卡就是这种情况,它具有要关闭的功能。在辅助卡上运行是一种解决方法。这很糟糕,需要尽快修复。这绝对是 NVidia 的问题,不确定 AMD,不管怎样,这太糟糕了。

解决方法是在 Windows 中更改注册表,在 Linux/Ubuntu 中更改 X conf 并放置:

选项“互动”“0”

与显卡的差距,但是X conf在以后的版本中已经不生成了,可能需要手动创建。如果有人复制并粘贴控制台代码修复此问题,那将是一个更好的答案。

关于c++ - OpenCL 限制 for 循环大小?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/27648360/

26 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com