- android - 多次调用 OnPrimaryClipChangedListener
- android - 无法更新 RecyclerView 中的 TextView 字段
- android.database.CursorIndexOutOfBoundsException : Index 0 requested, 光标大小为 0
- android - 使用 AppCompat 时,我们是否需要明确指定其 UI 组件(Spinner、EditText)颜色
我对 cuda 和尝试编写盒式过滤器的经验很少。我读到盒式过滤器是一种过滤器,其中结果图像中的每个像素的值等于输入图像中其相邻像素的平均值。我找到了这份文件 http://www.nvidia.com/content/nvision2008/tech_presentations/Game_Developer_Track/NVISION08-Image_Processing_and_Video_with_CUDA.pdf并稍微更改了代码。这是我的功能。
#define TILE_W 16
#define TILE_H 16
#define R 2 // filter radius
#define D (R*2+1) // filter diameter
#define S (D*D) // filter size
#define BLOCK_W (TILE_W+(2*R))
#define BLOCK_H (TILE_H+(2*R))
__global__ void d_filter(unsigned char *g_idata, unsigned char *g_odata, unsigned int width, unsigned int height)
{
__shared__ unsigned char smem[BLOCK_W*BLOCK_H];
int x = blockIdx.x*TILE_W + threadIdx.x - R;
int y = blockIdx.y*TILE_H + threadIdx.y - R;
// clamp to edge of image
x = max(0, x);
x = min(x, width-1);
y = max(y, 0);
y = min(y, height-1);
unsigned int index = y*width + x;
unsigned int bindex = threadIdx.y*blockDim.y+threadIdx.x;
// each thread copies its pixel of the block to shared memory
smem[bindex] = g_idata[index];
__syncthreads();
// only threads inside the apron will write results
if ((threadIdx.x >= R) && (threadIdx.x < (BLOCK_W-R)) && (threadIdx.y >= R) && (threadIdx.y < (BLOCK_H-R))) {
float sum = 0;
for(int dy=-R; dy<=R; dy++) {
for(int dx=-R; dx<=R; dx++) {
float i = smem[bindex + (dy*blockDim.x) + dx];
sum += i;
}
}
g_odata[index] = sum / S;
}
}
编辑:这是一个可以使用的较新版本。问题出在内核启动中。
#include <fstream>
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <assert.h>
#define PGMHeaderSize 0x40
inline bool loadPPM(const char *file, unsigned char **data, unsigned int *w, unsigned int *h, unsigned int *channels)
{
FILE *fp = NULL;
fp = fopen(file, "rb");
if (!fp) {
fprintf(stderr, "__LoadPPM() : unable to open file\n" );
return false;
}
// check header
char header[PGMHeaderSize];
if (fgets(header, PGMHeaderSize, fp) == NULL)
{
fprintf(stderr,"__LoadPPM() : reading PGM header returned NULL\n" );
return false;
}
if (strncmp(header, "P5", 2) == 0)
{
*channels = 1;
}
else if (strncmp(header, "P6", 2) == 0)
{
*channels = 3;
}
else
{
fprintf(stderr,"__LoadPPM() : File is not a PPM or PGM image\n" );
*channels = 0;
return false;
}
// parse header, read maxval, width and height
unsigned int width = 0;
unsigned int height = 0;
unsigned int maxval = 0;
unsigned int i = 0;
while (i < 3)
{
if (fgets(header, PGMHeaderSize, fp) == NULL)
{
fprintf(stderr,"__LoadPPM() : reading PGM header returned NULL\n" );
return false;
}
if (header[0] == '#')
{
continue;
}
if (i == 0)
{
i += sscanf(header, "%u %u %u", &width, &height, &maxval);
}
else if (i == 1)
{
i += sscanf(header, "%u %u", &height, &maxval);
}
else if (i == 2)
{
i += sscanf(header, "%u", &maxval);
}
}
// check if given handle for the data is initialized
if (NULL != *data)
{
if (*w != width || *h != height)
{
fprintf(stderr, "__LoadPPM() : Invalid image dimensions.\n" );
}
}
else
{
*data = (unsigned char *) malloc(sizeof(unsigned char) * width * height * *channels);
if (!data) {
fprintf(stderr, "Unable to allocate hostmemory\n");
return false;
}
*w = width;
*h = height;
}
// read and close file
if (fread(*data, sizeof(unsigned char), width * height * *channels, fp) == 0)
{
fprintf(stderr, "__LoadPPM() : read data returned error.\n" );
fclose(fp);
return false;
}
fclose(fp);
return true;
}
inline bool savePPM(const char *file, unsigned char *data, unsigned int w, unsigned int h, unsigned int channels)
{
assert(NULL != data);
assert(w > 0);
assert(h > 0);
std::fstream fh(file, std::fstream::out | std::fstream::binary);
if (fh.bad())
{
fprintf(stderr, "__savePPM() : Opening file failed.\n" );
return false;
}
if (channels == 1)
{
fh << "P5\n";
}
else if (channels == 3)
{
fh << "P6\n";
}
else
{
fprintf(stderr, "__savePPM() : Invalid number of channels.\n" );
return false;
}
fh << w << "\n" << h << "\n" << 0xff << std::endl;
for (unsigned int i = 0; (i < (w*h*channels)) && fh.good(); ++i)
{
fh << data[i];
}
fh.flush();
if (fh.bad())
{
fprintf(stderr,"__savePPM() : Writing data failed.\n" );
return false;
}
fh.close();
return true;
}
#define TILE_W 16
#define TILE_H 16
#define Rx 2 // filter radius in x direction
#define Ry 2 // filter radius in y direction
#define FILTER_W (Rx*2+1) // filter diameter in x direction
#define FILTER_H (Ry*2+1) // filter diameter in y direction
#define S (FILTER_W*FILTER_H) // filter size
#define BLOCK_W (TILE_W+(2*Rx))
#define BLOCK_H (TILE_H+(2*Ry))
__global__ void box_filter(const unsigned char *in, unsigned char *out, const unsigned int w, const unsigned int h){
//Indexes
const int x = blockIdx.x * TILE_W + threadIdx.x - Rx; // x image index
const int y = blockIdx.y * TILE_H + threadIdx.y - Ry; // y image index
const int d = y * w + x; // data index
//shared mem
__shared__ float shMem[BLOCK_W][BLOCK_H];
if(x<0 || y<0 || x>=w || y>=h) { // Threads which are not in the picture just write 0 to the shared mem
shMem[threadIdx.x][threadIdx.y] = 0;
return;
}
shMem[threadIdx.x][threadIdx.y] = in[d];
__syncthreads();
// box filter (only for threads inside the tile)
if ((threadIdx.x >= Rx) && (threadIdx.x < (BLOCK_W-Rx)) && (threadIdx.y >= Ry) && (threadIdx.y < (BLOCK_H-Ry))) {
float sum = 0;
for(int dx=-Rx; dx<=Rx; dx++) {
for(int dy=-Ry; dy<=Ry; dy++) {
sum += shMem[threadIdx.x+dx][threadIdx.y+dy];
}
}
out[d] = sum / S;
}
}
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line)
{
if (cudaSuccess != err)
{
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",
file, line, (int)err, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
int main(){
unsigned char *data=NULL, *d_idata=NULL, *d_odata=NULL;
unsigned int w,h,channels;
if(! loadPPM("../../data/lena_bw.pgm", &data, &w, &h, &channels)){
fprintf(stderr, "Failed to open File\n");
exit(EXIT_FAILURE);
}
printf("Loaded file with w:%d h:%d channels:%d \n",w,h,channels);
unsigned int numElements = w*h*channels;
size_t datasize = numElements * sizeof(unsigned char);
// Allocate the Device Memory
printf("Allocate Devicememory for data\n");
checkCudaErrors(cudaMalloc((void **)&d_idata, datasize));
checkCudaErrors(cudaMalloc((void **)&d_odata, datasize));
// Copy to device
printf("Copy idata from the host memory to the CUDA device\n");
checkCudaErrors(cudaMemcpy(d_idata, data, datasize, cudaMemcpyHostToDevice));
// Launch Kernel
int GRID_W = w/TILE_W +1;
int GRID_H = h/TILE_H +1;
dim3 threadsPerBlock(BLOCK_W, BLOCK_H);
dim3 blocksPerGrid(GRID_W,GRID_H);
printf("CUDA kernel launch with [%d %d] blocks of [%d %d] threads\n", blocksPerGrid.x, blocksPerGrid.y, threadsPerBlock.x, threadsPerBlock.y);
box_filter<<<blocksPerGrid, threadsPerBlock>>>(d_idata, d_odata, w,h);
checkCudaErrors(cudaGetLastError());
// Copy data from device to host
printf("Copy odata from the CUDA device to the host memory\n");
checkCudaErrors(cudaMemcpy(data, d_odata, datasize, cudaMemcpyDeviceToHost));
// Free Device memory
printf("Free Device memory\n");
checkCudaErrors(cudaFree(d_idata));
checkCudaErrors(cudaFree(d_odata));
// Save Picture
printf("Save Picture\n");
bool saved = false;
if (channels==1)
saved = savePPM("output.pgm", data, w, h, channels);
else if (channels==3)
saved = savePPM("output.ppm", data, w, h, channels);
else fprintf(stderr, "ERROR: Unable to save file - wrong channel!\n");
// Free Host memory
printf("Free Host memory\n");
free(data);
if (!saved){
fprintf(stderr, "Failed to save File\n");
exit(EXIT_FAILURE);
}
printf("Done\n");
}
过滤器功能有问题。 loadPPM 和 savePPM(cuda 示例的一部分)正在与其他内核函数一起使用,但是使用这个过滤函数我得到了一个黑色图像。
所以问题是:我做错了什么?
其他一些理解题:这里https://www.nvidia.com/docs/IO/116711/sc11-cuda-c-basics.pdf我读到线程只能在一个 block 内通信(共享内存、同步线程……)。所以在我的函数中,图像被分割成矩形 block ,图像处理幻灯片第 9 页上的图片大约是一个 block ? block 边缘的像素怎么样?它们没有变化吗?
感谢您的回答。
最佳答案
您的代码中的一个问题是您的内核需要一个 2D 网格和 2D 线程 block :
int x = blockIdx.x*TILE_W + threadIdx.x - R;
int y = blockIdx.y*TILE_H + threadIdx.y - R;
^^^^^^^^^^ ^^^^^^^^^^^
2D grid 2D threadblock
但是您正在启动一个带有一维网格和线程 block 定义的内核:
int threadsPerBlock = 256; // creates 1D threadblock
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; //1D grid
....
d_filter<<<blocksPerGrid, threadsPerBlock>>>(d_idata, d_odata, w,h);
因此,当您启动该内核时,threadIdx.y
将始终为零,blockIdx.y
当我对您的代码进行修改,使其不依赖于 PPM 图像加载/存储(因此,使用合成数据),并进行必要的更改以启动 2D 网格和线程 block ,以与您的内核保持一致时,代码似乎对我来说运行正确,并且产生的输出似乎是经过过滤的输出,而不是零:
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#define TILE_W 16
#define TILE_H 16
#define R 2 // filter radius
#define D (R*2+1) // filter diameter
#define S (D*D) // filter size
#define BLOCK_W (TILE_W+(2*R))
#define BLOCK_H (TILE_H+(2*R))
__global__ void d_filter(unsigned char *g_idata, unsigned char *g_odata, unsigned int width, unsigned int height)
{
__shared__ unsigned char smem[BLOCK_W*BLOCK_H];
int x = blockIdx.x*TILE_W + threadIdx.x - R;
int y = blockIdx.y*TILE_H + threadIdx.y - R;
// clamp to edge of image
x = max(0, x);
x = min(x, width-1);
y = max(y, 0);
y = min(y, height-1);
unsigned int index = y*width + x;
unsigned int bindex = threadIdx.y*blockDim.y+threadIdx.x;
// each thread copies its pixel of the block to shared memory
smem[bindex] = g_idata[index];
__syncthreads();
// only threads inside the apron will write results
if ((threadIdx.x >= R) && (threadIdx.x < (BLOCK_W-R)) && (threadIdx.y >= R) && (threadIdx.y < (BLOCK_H-R))) {
float sum = 0;
for(int dy=-R; dy<=R; dy++) {
for(int dx=-R; dx<=R; dx++) {
float i = smem[bindex + (dy*blockDim.x) + dx];
sum += i;
}
}
g_odata[index] = sum / S;
}
}
const unsigned int imgw = 512;
const unsigned int imgh = 256;
void loadImg(unsigned char **data, unsigned int *w, unsigned int *h, unsigned int *ch){
*w = imgw;
*h = imgh;
*ch = 1;
*data = (unsigned char *)malloc(imgw*imgh*sizeof(unsigned char));
for (int i = 0; i < imgw*imgh; i++) (*data)[i] = i%8;
}
int main(){
unsigned char *data=NULL, *d_idata=NULL, *d_odata;
unsigned int w,h,channels;
loadImg(&data, &w, &h, &channels);
printf("Loaded file with w:%d h:%d channels:%d \n",w,h,channels);
unsigned int numElements = w*h*channels;
size_t datasize = numElements * sizeof(unsigned char);
cudaError_t err = cudaSuccess;
// Allocate the Device Memory
printf("Allocate Devicememory for data\n");
err = cudaMalloc((void **)&d_idata, datasize);
if ( err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device memory for idata (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **)&d_odata, datasize);
if ( err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device memory for odata (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy to device
printf("Copy idata from the host memory to the CUDA device\n");
err =cudaMemcpy(d_idata, data, datasize, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy idata from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch Kernel
dim3 threadsPerBlock(BLOCK_W, BLOCK_H);
dim3 blocksPerGrid((w+threadsPerBlock.x-1)/threadsPerBlock.x, (h+threadsPerBlock.y-1)/threadsPerBlock.y);
printf("CUDA kernel launch with %d,%d blocks of %d,%d threads\n", blocksPerGrid.x, blocksPerGrid.y, threadsPerBlock.x, threadsPerBlock.y);
d_filter<<<blocksPerGrid, threadsPerBlock>>>(d_idata, d_odata, w,h);
err=cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy data from device to host
printf("Copy odata from the CUDA device to the host memory\n");
err=cudaMemcpy(data, d_odata, datasize, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy odata from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free Device memory
printf("Free Device memory\n");
err=cudaFree(d_idata);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device idata (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err=cudaFree(d_odata);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device odata (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("results:\n");
for (int i = 0; i < 16; i++){
for (int j = 0; j < 16; j++) printf("%d ", data[i*w+j]);
printf("\n");}
// Free Host memory
printf("Free Host memory\n");
free(data);
printf("\nDone\n");
}
当我用 cuda-memcheck
运行上面的代码时,我得到了这个:
C:\ProgramData\NVIDIA Corporation\CUDA Samples\v5.0\bin\win32\Debug>cuda-memcheck test
========= CUDA-MEMCHECK
Loaded file with w:512 h:256 channels:1
Allocate Devicememory for data
Copy idata from the host memory to the CUDA device
CUDA kernel launch with 26,13 blocks of 20,20 threads
Copy odata from the CUDA device to the host memory
Free Device memory
results:
0 1 2 3 4 5 4 3 3 2 2 3 4 5 4 3
0 1 2 3 4 5 4 3 3 2 2 3 4 5 4 3
0 1 2 3 4 5 4 3 3 2 2 3 4 5 4 3
0 1 2 3 4 5 4 3 3 2 2 3 4 5 4 3
0 1 2 3 4 5 4 3 3 2 2 3 4 5 4 3
0 1 2 3 4 5 4 3 3 2 2 3 4 5 4 3
0 1 2 3 4 5 4 3 3 2 2 3 4 5 4 3
0 1 2 3 4 5 4 3 3 2 2 3 4 5 4 3
0 1 2 3 4 5 4 3 3 2 2 3 4 5 4 3
0 1 2 3 4 5 4 3 3 2 2 3 4 5 4 3
0 1 2 3 4 5 4 3 3 2 2 3 4 5 4 3
0 1 2 3 4 5 4 3 3 2 2 3 4 5 4 3
0 1 2 3 4 5 4 3 3 2 2 3 4 5 4 3
0 1 2 3 4 5 4 3 3 2 2 3 4 5 4 3
0 1 2 3 4 5 4 3 3 2 2 3 4 5 4 3
0 1 2 3 4 5 4 3 3 2 2 3 4 5 4 3
Free Host memory
Done
========= ERROR SUMMARY: 0 errors
C:\ProgramData\NVIDIA Corporation\CUDA Samples\v5.0\bin\win32\Debug>
关于c - 带cuda c的箱式过滤器,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/39336574/
#include using namespace std; class C{ private: int value; public: C(){ value = 0;
这个问题已经有答案了: What is the difference between char a[] = ?string?; and char *p = ?string?;? (8 个回答) 已关闭
关闭。此题需要details or clarity 。目前不接受答案。 想要改进这个问题吗?通过 editing this post 添加详细信息并澄清问题. 已关闭 7 年前。 此帖子已于 8 个月
除了调试之外,是否有任何针对 c、c++ 或 c# 的测试工具,其工作原理类似于将独立函数复制粘贴到某个文本框,然后在其他文本框中输入参数? 最佳答案 也许您会考虑单元测试。我推荐你谷歌测试和谷歌模拟
我想在第二台显示器中移动一个窗口 (HWND)。问题是我尝试了很多方法,例如将分辨率加倍或输入负值,但它永远无法将窗口放在我的第二台显示器上。 关于如何在 C/C++/c# 中执行此操作的任何线索 最
我正在寻找 C/C++/C## 中不同类型 DES 的现有实现。我的运行平台是Windows XP/Vista/7。 我正在尝试编写一个 C# 程序,它将使用 DES 算法进行加密和解密。我需要一些实
很难说出这里要问什么。这个问题模棱两可、含糊不清、不完整、过于宽泛或夸夸其谈,无法以目前的形式得到合理的回答。如需帮助澄清此问题以便重新打开,visit the help center . 关闭 1
有没有办法强制将另一个 窗口置于顶部? 不是应用程序的窗口,而是另一个已经在系统上运行的窗口。 (Windows, C/C++/C#) 最佳答案 SetWindowPos(that_window_ha
假设您可以在 C/C++ 或 Csharp 之间做出选择,并且您打算在 Windows 和 Linux 服务器上运行同一服务器的多个实例,那么构建套接字服务器应用程序的最明智选择是什么? 最佳答案 如
你们能告诉我它们之间的区别吗? 顺便问一下,有什么叫C++库或C库的吗? 最佳答案 C++ 标准库 和 C 标准库 是 C++ 和 C 标准定义的库,提供给 C++ 和 C 程序使用。那是那些词的共同
下面的测试代码,我将输出信息放在注释中。我使用的是 gcc 4.8.5 和 Centos 7.2。 #include #include class C { public:
很难说出这里问的是什么。这个问题是含糊的、模糊的、不完整的、过于宽泛的或修辞性的,无法以目前的形式得到合理的回答。如需帮助澄清此问题以便重新打开它,visit the help center 。 已关
我的客户将使用名为 annoucement 的结构/类与客户通信。我想我会用 C++ 编写服务器。会有很多不同的类继承annoucement。我的问题是通过网络将这些类发送给客户端 我想也许我应该使用
我在 C# 中有以下函数: public Matrix ConcatDescriptors(IList> descriptors) { int cols = descriptors[0].Co
我有一个项目要编写一个函数来对某些数据执行某些操作。我可以用 C/C++ 编写代码,但我不想与雇主共享该函数的代码。相反,我只想让他有权在他自己的代码中调用该函数。是否可以?我想到了这两种方法 - 在
我使用的是编写糟糕的第 3 方 (C/C++) Api。我从托管代码(C++/CLI)中使用它。有时会出现“访问冲突错误”。这使整个应用程序崩溃。我知道我无法处理这些错误[如果指针访问非法内存位置等,
关闭。这个问题不符合Stack Overflow guidelines .它目前不接受答案。 我们不允许提问寻求书籍、工具、软件库等的推荐。您可以编辑问题,以便用事实和引用来回答。 关闭 7 年前。
已关闭。此问题不符合Stack Overflow guidelines 。目前不接受答案。 要求我们推荐或查找工具、库或最喜欢的场外资源的问题对于 Stack Overflow 来说是偏离主题的,因为
我有一些 C 代码,将使用 P/Invoke 从 C# 调用。我正在尝试为这个 C 函数定义一个 C# 等效项。 SomeData* DoSomething(); struct SomeData {
这个问题已经有答案了: Why are these constructs using pre and post-increment undefined behavior? (14 个回答) 已关闭 6
我是一名优秀的程序员,十分优秀!