gpt4 book ai didi

c - 带cuda c的箱式过滤器

转载 作者:太空宇宙 更新时间:2023-11-03 23:21:09 25 4
gpt4 key购买 nike

我对 cuda 和尝试编写盒式过滤器的经验很少。我读到盒式过滤器是一种过滤器,其中结果图像中的每个像素的值等于输入图像中其相邻像素的平均值。我找到了这份文件 http://www.nvidia.com/content/nvision2008/tech_presentations/Game_Developer_Track/NVISION08-Image_Processing_and_Video_with_CUDA.pdf并稍微更改了代码。这是我的功能。

#define TILE_W      16
#define TILE_H 16
#define R 2 // filter radius
#define D (R*2+1) // filter diameter
#define S (D*D) // filter size
#define BLOCK_W (TILE_W+(2*R))
#define BLOCK_H (TILE_H+(2*R))

__global__ void d_filter(unsigned char *g_idata, unsigned char *g_odata, unsigned int width, unsigned int height)
{
__shared__ unsigned char smem[BLOCK_W*BLOCK_H];
int x = blockIdx.x*TILE_W + threadIdx.x - R;
int y = blockIdx.y*TILE_H + threadIdx.y - R;
// clamp to edge of image
x = max(0, x);
x = min(x, width-1);
y = max(y, 0);
y = min(y, height-1);
unsigned int index = y*width + x;
unsigned int bindex = threadIdx.y*blockDim.y+threadIdx.x;
// each thread copies its pixel of the block to shared memory
smem[bindex] = g_idata[index];
__syncthreads();
// only threads inside the apron will write results
if ((threadIdx.x >= R) && (threadIdx.x < (BLOCK_W-R)) && (threadIdx.y >= R) && (threadIdx.y < (BLOCK_H-R))) {
float sum = 0;
for(int dy=-R; dy<=R; dy++) {
for(int dx=-R; dx<=R; dx++) {
float i = smem[bindex + (dy*blockDim.x) + dx];
sum += i;
}
}
g_odata[index] = sum / S;
}
}

编辑:这是一个可以使用的较新版本。问题出在内核启动中。

#include <fstream>
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <assert.h>

#define PGMHeaderSize 0x40

inline bool loadPPM(const char *file, unsigned char **data, unsigned int *w, unsigned int *h, unsigned int *channels)
{
FILE *fp = NULL;

fp = fopen(file, "rb");
if (!fp) {
fprintf(stderr, "__LoadPPM() : unable to open file\n" );
return false;
}

// check header
char header[PGMHeaderSize];

if (fgets(header, PGMHeaderSize, fp) == NULL)
{
fprintf(stderr,"__LoadPPM() : reading PGM header returned NULL\n" );
return false;
}

if (strncmp(header, "P5", 2) == 0)
{
*channels = 1;
}
else if (strncmp(header, "P6", 2) == 0)
{
*channels = 3;
}
else
{
fprintf(stderr,"__LoadPPM() : File is not a PPM or PGM image\n" );
*channels = 0;
return false;
}

// parse header, read maxval, width and height
unsigned int width = 0;
unsigned int height = 0;
unsigned int maxval = 0;
unsigned int i = 0;

while (i < 3)
{
if (fgets(header, PGMHeaderSize, fp) == NULL)
{
fprintf(stderr,"__LoadPPM() : reading PGM header returned NULL\n" );
return false;
}

if (header[0] == '#')
{
continue;
}

if (i == 0)
{
i += sscanf(header, "%u %u %u", &width, &height, &maxval);
}
else if (i == 1)
{
i += sscanf(header, "%u %u", &height, &maxval);
}
else if (i == 2)
{
i += sscanf(header, "%u", &maxval);
}
}

// check if given handle for the data is initialized
if (NULL != *data)
{
if (*w != width || *h != height)
{
fprintf(stderr, "__LoadPPM() : Invalid image dimensions.\n" );
}
}
else
{
*data = (unsigned char *) malloc(sizeof(unsigned char) * width * height * *channels);
if (!data) {
fprintf(stderr, "Unable to allocate hostmemory\n");
return false;
}
*w = width;
*h = height;
}

// read and close file
if (fread(*data, sizeof(unsigned char), width * height * *channels, fp) == 0)
{
fprintf(stderr, "__LoadPPM() : read data returned error.\n" );
fclose(fp);
return false;
}

fclose(fp);

return true;
}

inline bool savePPM(const char *file, unsigned char *data, unsigned int w, unsigned int h, unsigned int channels)
{
assert(NULL != data);
assert(w > 0);
assert(h > 0);

std::fstream fh(file, std::fstream::out | std::fstream::binary);

if (fh.bad())
{
fprintf(stderr, "__savePPM() : Opening file failed.\n" );
return false;
}

if (channels == 1)
{
fh << "P5\n";
}
else if (channels == 3)
{
fh << "P6\n";
}
else
{
fprintf(stderr, "__savePPM() : Invalid number of channels.\n" );
return false;
}

fh << w << "\n" << h << "\n" << 0xff << std::endl;

for (unsigned int i = 0; (i < (w*h*channels)) && fh.good(); ++i)
{
fh << data[i];
}

fh.flush();

if (fh.bad())
{
fprintf(stderr,"__savePPM() : Writing data failed.\n" );
return false;
}

fh.close();

return true;
}

#define TILE_W 16
#define TILE_H 16
#define Rx 2 // filter radius in x direction
#define Ry 2 // filter radius in y direction
#define FILTER_W (Rx*2+1) // filter diameter in x direction
#define FILTER_H (Ry*2+1) // filter diameter in y direction
#define S (FILTER_W*FILTER_H) // filter size
#define BLOCK_W (TILE_W+(2*Rx))
#define BLOCK_H (TILE_H+(2*Ry))

__global__ void box_filter(const unsigned char *in, unsigned char *out, const unsigned int w, const unsigned int h){
//Indexes
const int x = blockIdx.x * TILE_W + threadIdx.x - Rx; // x image index
const int y = blockIdx.y * TILE_H + threadIdx.y - Ry; // y image index
const int d = y * w + x; // data index

//shared mem
__shared__ float shMem[BLOCK_W][BLOCK_H];
if(x<0 || y<0 || x>=w || y>=h) { // Threads which are not in the picture just write 0 to the shared mem
shMem[threadIdx.x][threadIdx.y] = 0;
return;
}
shMem[threadIdx.x][threadIdx.y] = in[d];
__syncthreads();

// box filter (only for threads inside the tile)
if ((threadIdx.x >= Rx) && (threadIdx.x < (BLOCK_W-Rx)) && (threadIdx.y >= Ry) && (threadIdx.y < (BLOCK_H-Ry))) {
float sum = 0;
for(int dx=-Rx; dx<=Rx; dx++) {
for(int dy=-Ry; dy<=Ry; dy++) {
sum += shMem[threadIdx.x+dx][threadIdx.y+dy];
}
}
out[d] = sum / S;
}
}


#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)

inline void __checkCudaErrors(cudaError err, const char *file, const int line)
{
if (cudaSuccess != err)
{
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",
file, line, (int)err, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}

int main(){
unsigned char *data=NULL, *d_idata=NULL, *d_odata=NULL;
unsigned int w,h,channels;

if(! loadPPM("../../data/lena_bw.pgm", &data, &w, &h, &channels)){
fprintf(stderr, "Failed to open File\n");
exit(EXIT_FAILURE);
}

printf("Loaded file with w:%d h:%d channels:%d \n",w,h,channels);

unsigned int numElements = w*h*channels;
size_t datasize = numElements * sizeof(unsigned char);

// Allocate the Device Memory
printf("Allocate Devicememory for data\n");
checkCudaErrors(cudaMalloc((void **)&d_idata, datasize));
checkCudaErrors(cudaMalloc((void **)&d_odata, datasize));

// Copy to device
printf("Copy idata from the host memory to the CUDA device\n");
checkCudaErrors(cudaMemcpy(d_idata, data, datasize, cudaMemcpyHostToDevice));

// Launch Kernel
int GRID_W = w/TILE_W +1;
int GRID_H = h/TILE_H +1;
dim3 threadsPerBlock(BLOCK_W, BLOCK_H);
dim3 blocksPerGrid(GRID_W,GRID_H);
printf("CUDA kernel launch with [%d %d] blocks of [%d %d] threads\n", blocksPerGrid.x, blocksPerGrid.y, threadsPerBlock.x, threadsPerBlock.y);
box_filter<<<blocksPerGrid, threadsPerBlock>>>(d_idata, d_odata, w,h);

checkCudaErrors(cudaGetLastError());

// Copy data from device to host
printf("Copy odata from the CUDA device to the host memory\n");
checkCudaErrors(cudaMemcpy(data, d_odata, datasize, cudaMemcpyDeviceToHost));

// Free Device memory
printf("Free Device memory\n");
checkCudaErrors(cudaFree(d_idata));
checkCudaErrors(cudaFree(d_odata));

// Save Picture
printf("Save Picture\n");
bool saved = false;
if (channels==1)
saved = savePPM("output.pgm", data, w, h, channels);
else if (channels==3)
saved = savePPM("output.ppm", data, w, h, channels);
else fprintf(stderr, "ERROR: Unable to save file - wrong channel!\n");

// Free Host memory
printf("Free Host memory\n");
free(data);

if (!saved){
fprintf(stderr, "Failed to save File\n");
exit(EXIT_FAILURE);
}

printf("Done\n");
}

过滤器功能有问题。 loadPPM 和 savePPM(cuda 示例的一部分)正在与其他内核函数一起使用,但是使用这个过滤函数我得到了一个黑色图像。

所以问题是:我做错了什么?

其他一些理解题:这里https://www.nvidia.com/docs/IO/116711/sc11-cuda-c-basics.pdf我读到线程只能在一个 block 内通信(共享内存、同步线程……)。所以在我的函数中,图像被分割成矩形 block ,图像处理幻灯片第 9 页上的图片大约是一个 block ? block 边缘的像素怎么样?它们没有变化吗?

感谢您的回答。

最佳答案

您的代码中的一个问题是您的内核需要一个 2D 网格和 2D 线程 block :

int x = blockIdx.x*TILE_W + threadIdx.x - R;
int y = blockIdx.y*TILE_H + threadIdx.y - R;
^^^^^^^^^^ ^^^^^^^^^^^
2D grid 2D threadblock

但是您正在启动一个带有一维网格和线程 block 定义的内核:

int threadsPerBlock = 256;  // creates 1D threadblock
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; //1D grid
....
d_filter<<<blocksPerGrid, threadsPerBlock>>>(d_idata, d_odata, w,h);

因此,当您启动该内核时,threadIdx.y始终为零,blockIdx.y

也是如此

当我对您的代码进行修改,使其不依赖于 PPM 图像加载/存储(因此,使用合成数据),并进行必要的更改以启动 2D 网格和线程 block ,以与您的内核保持一致时,代码似乎对我来说运行正确,并且产生的输出似乎是经过过滤的输出,而不是零:

#include <stdio.h>
#include <stdlib.h>
#include <assert.h>


#define TILE_W 16
#define TILE_H 16
#define R 2 // filter radius
#define D (R*2+1) // filter diameter
#define S (D*D) // filter size
#define BLOCK_W (TILE_W+(2*R))
#define BLOCK_H (TILE_H+(2*R))

__global__ void d_filter(unsigned char *g_idata, unsigned char *g_odata, unsigned int width, unsigned int height)
{
__shared__ unsigned char smem[BLOCK_W*BLOCK_H];
int x = blockIdx.x*TILE_W + threadIdx.x - R;
int y = blockIdx.y*TILE_H + threadIdx.y - R;
// clamp to edge of image
x = max(0, x);
x = min(x, width-1);
y = max(y, 0);
y = min(y, height-1);
unsigned int index = y*width + x;
unsigned int bindex = threadIdx.y*blockDim.y+threadIdx.x;
// each thread copies its pixel of the block to shared memory
smem[bindex] = g_idata[index];
__syncthreads();
// only threads inside the apron will write results
if ((threadIdx.x >= R) && (threadIdx.x < (BLOCK_W-R)) && (threadIdx.y >= R) && (threadIdx.y < (BLOCK_H-R))) {
float sum = 0;
for(int dy=-R; dy<=R; dy++) {
for(int dx=-R; dx<=R; dx++) {
float i = smem[bindex + (dy*blockDim.x) + dx];
sum += i;
}
}
g_odata[index] = sum / S;
}
}

const unsigned int imgw = 512;
const unsigned int imgh = 256;
void loadImg(unsigned char **data, unsigned int *w, unsigned int *h, unsigned int *ch){
*w = imgw;
*h = imgh;
*ch = 1;
*data = (unsigned char *)malloc(imgw*imgh*sizeof(unsigned char));
for (int i = 0; i < imgw*imgh; i++) (*data)[i] = i%8;
}


int main(){
unsigned char *data=NULL, *d_idata=NULL, *d_odata;
unsigned int w,h,channels;

loadImg(&data, &w, &h, &channels);
printf("Loaded file with w:%d h:%d channels:%d \n",w,h,channels);

unsigned int numElements = w*h*channels;
size_t datasize = numElements * sizeof(unsigned char);
cudaError_t err = cudaSuccess;

// Allocate the Device Memory
printf("Allocate Devicememory for data\n");

err = cudaMalloc((void **)&d_idata, datasize);
if ( err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device memory for idata (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}

err = cudaMalloc((void **)&d_odata, datasize);
if ( err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device memory for odata (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}

// Copy to device
printf("Copy idata from the host memory to the CUDA device\n");
err =cudaMemcpy(d_idata, data, datasize, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy idata from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}

// Launch Kernel
dim3 threadsPerBlock(BLOCK_W, BLOCK_H);
dim3 blocksPerGrid((w+threadsPerBlock.x-1)/threadsPerBlock.x, (h+threadsPerBlock.y-1)/threadsPerBlock.y);
printf("CUDA kernel launch with %d,%d blocks of %d,%d threads\n", blocksPerGrid.x, blocksPerGrid.y, threadsPerBlock.x, threadsPerBlock.y);
d_filter<<<blocksPerGrid, threadsPerBlock>>>(d_idata, d_odata, w,h);

err=cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}

// Copy data from device to host
printf("Copy odata from the CUDA device to the host memory\n");
err=cudaMemcpy(data, d_odata, datasize, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy odata from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}

// Free Device memory
printf("Free Device memory\n");
err=cudaFree(d_idata);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device idata (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err=cudaFree(d_odata);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device odata (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}

printf("results:\n");
for (int i = 0; i < 16; i++){
for (int j = 0; j < 16; j++) printf("%d ", data[i*w+j]);
printf("\n");}


// Free Host memory
printf("Free Host memory\n");
free(data);



printf("\nDone\n");
}

当我用 cuda-memcheck 运行上面的代码时,我得到了这个:

C:\ProgramData\NVIDIA Corporation\CUDA Samples\v5.0\bin\win32\Debug>cuda-memcheck test
========= CUDA-MEMCHECK
Loaded file with w:512 h:256 channels:1
Allocate Devicememory for data
Copy idata from the host memory to the CUDA device
CUDA kernel launch with 26,13 blocks of 20,20 threads
Copy odata from the CUDA device to the host memory
Free Device memory
results:
0 1 2 3 4 5 4 3 3 2 2 3 4 5 4 3
0 1 2 3 4 5 4 3 3 2 2 3 4 5 4 3
0 1 2 3 4 5 4 3 3 2 2 3 4 5 4 3
0 1 2 3 4 5 4 3 3 2 2 3 4 5 4 3
0 1 2 3 4 5 4 3 3 2 2 3 4 5 4 3
0 1 2 3 4 5 4 3 3 2 2 3 4 5 4 3
0 1 2 3 4 5 4 3 3 2 2 3 4 5 4 3
0 1 2 3 4 5 4 3 3 2 2 3 4 5 4 3
0 1 2 3 4 5 4 3 3 2 2 3 4 5 4 3
0 1 2 3 4 5 4 3 3 2 2 3 4 5 4 3
0 1 2 3 4 5 4 3 3 2 2 3 4 5 4 3
0 1 2 3 4 5 4 3 3 2 2 3 4 5 4 3
0 1 2 3 4 5 4 3 3 2 2 3 4 5 4 3
0 1 2 3 4 5 4 3 3 2 2 3 4 5 4 3
0 1 2 3 4 5 4 3 3 2 2 3 4 5 4 3
0 1 2 3 4 5 4 3 3 2 2 3 4 5 4 3
Free Host memory

Done
========= ERROR SUMMARY: 0 errors

C:\ProgramData\NVIDIA Corporation\CUDA Samples\v5.0\bin\win32\Debug>

关于c - 带cuda c的箱式过滤器,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/39336574/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com