gpt4 book ai didi

sorting - CUDA 中的粒子细胞计数(一维和二维直方图生成)

转载 作者:行者123 更新时间:2023-12-02 11:05:40 24 4
gpt4 key购买 nike

__global__函数用于增加数字并计算某些单元格中的粒子数量。

__global__ void Set_Nc_GPU_0831(int *nc,int *index,SP DSMC)
{
int tidx;
tidx=threadIdx.x+blockDim.x*blockIdx.x;

atomicAdd(&nc[index[tidx]],1);
}

我使用的原子操作速度很慢。所以我想用一些其他函数或算法来替换原子函数。

有没有其他方法可以修改这个简单的 __global__ 函数?

最佳答案

这是一个迟到的答案,目的是从未回答的列表中删除此问题。

您已经认识到,计算落在某个单元格内的粒子数量相当于构建直方图。直方图的构建是一个经过充分研究的问题。 Shane Cook 的书(CUDA 编程)包含了关于这个主题的很好的讨论。此外,CUDA 样本包含直方图示例。此外,histogram construction by CUDA Thrust也是可以的。最后,CUDA Programming Blog包含更多见解。

下面我提供了一个代码来比较 5 个不同版本的直方图计算:

  1. CPU;
  2. 具有原子功能的 GPU(基本上是您的方法);
  3. GPU 在共享内存中具有原子性,并最终对部分直方图求和(基本上是 Paul R 提出的方法);
  4. 使用 CUDA Thrust 的 GPU。

如果您在 Kepler K20c 上运行 10MB 数据的典型情况的代码,您将得到以下计时:

  1. CPU = 83ms;
  2. 具有原子功能的 GPU = 15.8ms;
  3. 共享内存中具有原子功能的 GPU = 17.7ms;
  4. GPU by CUDA Thrust = 40ms

正如您所看到的,令人惊讶的是,您的“暴力”解决方案是最快的。这是合理的,因为对于最新的架构(您的帖子日期为 2012 年 8 月,当时 Kepler 尚未发布,至少在欧洲),原子操作非常快。

这是代码:

#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/generate.h>
#include <thrust/adjacent_difference.h>
#include <thrust/binary_search.h>

#define SIZE (100*1024*1024) // 100 MB

/**********************************************/
/* FUNCTION TO GENERATE RANDOM UNSIGNED CHARS */
/**********************************************/
unsigned char* big_random_block(int size) {
unsigned char *data = (unsigned char*)malloc(size);
for (int i=0; i<size; i++)
data[i] = rand();
return data;
}

/***************************************/
/* GPU HISTOGRAM CALCULATION VERSION 1 */
/***************************************/
__global__ void histo_kernel1(unsigned char *buffer, long size, unsigned int *histo ) {

// --- The number of threads does not cover all the data size
int i = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while (i < size) {
atomicAdd(&histo[buffer[i]], 1);
i += stride;
}
}

/***************************************/
/* GPU HISTOGRAM CALCULATION VERSION 2 */
/***************************************/
__global__ void histo_kernel2(unsigned char *buffer, long size, unsigned int *histo ) {

// --- Allocating and initializing shared memory to store partial histograms
__shared__ unsigned int temp[256];
temp[threadIdx.x] = 0;
__syncthreads();

// --- The number of threads does not cover all the data size
int i = threadIdx.x + blockIdx.x * blockDim.x;
int offset = blockDim.x * gridDim.x;
while (i < size)
{
atomicAdd(&temp[buffer[i]], 1);
i += offset;
}
__syncthreads();

// --- Summing histograms
atomicAdd(&(histo[threadIdx.x]), temp[threadIdx.x]);
}

/********************/
/* CUDA ERROR CHECK */
/********************/
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}

/********/
/* MAIN */
/********/
void main(){

// --- Generating an array of SIZE unsigned chars
unsigned char *buffer = (unsigned char*)big_random_block(SIZE);

/********************/
/* CPU COMPUTATIONS */
/********************/

// --- Allocating host memory space and initializing the host-side histogram
unsigned int histo[256];
for (int i=0; i<256; i++) histo [i] = 0;

clock_t start_CPU, stop_CPU;

// --- Histogram calculation on the host
start_CPU = clock();
for (int i=0; i<SIZE; i++) histo [buffer[i]]++;
stop_CPU = clock();
float elapsedTime = (float)(stop_CPU - start_CPU) / (float)CLOCKS_PER_SEC * 1000.0f;
printf("Time to generate (CPU): %3.1f ms\n", elapsedTime);

// --- Indirect check of the result
long histoCount = 0;
for (int i=0; i<256; i++) { histoCount += histo[i]; }
printf("Histogram Sum: %ld\n", histoCount);

/********************/
/* GPU COMPUTATIONS */
/********************/

// --- Initializing the device-side data
unsigned char *dev_buffer;
gpuErrchk(cudaMalloc((void**)&dev_buffer,SIZE));
gpuErrchk(cudaMemcpy(dev_buffer, buffer, SIZE, cudaMemcpyHostToDevice));

// --- Allocating device memory space for the device-side histogram
unsigned int *dev_histo;
gpuErrchk(cudaMalloc((void**)&dev_histo,256*sizeof(long)));

// --- GPU timing
cudaEvent_t start, stop;
gpuErrchk(cudaEventCreate(&start));
gpuErrchk(cudaEventCreate(&stop));

// --- ATOMICS
// --- Histogram calculation on the device - 2x the number of multiprocessors gives best timing
gpuErrchk(cudaEventRecord(start,0));
gpuErrchk(cudaMemset(dev_histo,0,256*sizeof(int)));
cudaDeviceProp prop;
gpuErrchk(cudaGetDeviceProperties(&prop,0));
int blocks = prop.multiProcessorCount;
histo_kernel1<<<blocks*2,256>>>(dev_buffer, SIZE, dev_histo);

gpuErrchk(cudaMemcpy(histo,dev_histo,256*sizeof(int),cudaMemcpyDeviceToHost));
gpuErrchk(cudaEventRecord(stop,0));
gpuErrchk(cudaEventSynchronize(stop));
gpuErrchk(cudaEventElapsedTime(&elapsedTime,start,stop));
printf("Time to generate (GPU): %3.1f ms\n", elapsedTime);

histoCount = 0;
for (int i=0; i<256; i++) {
histoCount += histo[i];
}
printf( "Histogram Sum: %ld\n", histoCount );

// --- Check the correctness of the results via the host
for (int i=0; i<SIZE; i++) histo[buffer[i]]--;
for (int i=0; i<256; i++) {
if (histo[i] != 0) printf( "Failure at %d! Off by %d\n", i, histo[i] );
}

// --- ATOMICS IN SHARED MEMORY
// --- Histogram calculation on the device - 2x the number of multiprocessors gives best timing
gpuErrchk(cudaEventRecord(start,0));
gpuErrchk(cudaMemset(dev_histo,0,256*sizeof(int)));
gpuErrchk(cudaGetDeviceProperties(&prop,0));
blocks = prop.multiProcessorCount;
histo_kernel2<<<blocks*2,256>>>(dev_buffer, SIZE, dev_histo);

gpuErrchk(cudaMemcpy(histo,dev_histo,256*sizeof(int),cudaMemcpyDeviceToHost));
gpuErrchk(cudaEventRecord(stop,0));
gpuErrchk(cudaEventSynchronize(stop));
gpuErrchk(cudaEventElapsedTime(&elapsedTime,start,stop));
printf("Time to generate (GPU): %3.1f ms\n", elapsedTime);

histoCount = 0;
for (int i=0; i<256; i++) {
histoCount += histo[i];
}
printf( "Histogram Sum: %ld\n", histoCount );

// --- Check the correctness of the results via the host
for (int i=0; i<SIZE; i++) histo[buffer[i]]--;
for (int i=0; i<256; i++) {
if (histo[i] != 0) printf( "Failure at %d! Off by %d\n", i, histo[i] );
}

// --- CUDA THRUST

gpuErrchk(cudaEventRecord(start,0));

// --- Wrapping dev_buffer raw pointer with a device_ptr and initializing a device_vector with it
thrust::device_ptr<unsigned char> dev_ptr(dev_buffer);
thrust::device_vector<unsigned char> dev_buffer_thrust(dev_ptr, dev_ptr + SIZE);

// --- Sorting data to bring equal elements together
thrust::sort(dev_buffer_thrust.begin(), dev_buffer_thrust.end());

// - The number of histogram bins is equal to the maximum value plus one
int num_bins = dev_buffer_thrust.back() + 1;

// --- Resize histogram storage
thrust::device_vector<int> d_histogram;
d_histogram.resize(num_bins);

// --- Find the end of each bin of values
thrust::counting_iterator<int> search_begin(0);
thrust::upper_bound(dev_buffer_thrust.begin(), dev_buffer_thrust.end(),
search_begin, search_begin + num_bins,
d_histogram.begin());

// --- Compute the histogram by taking differences of the cumulative histogram
thrust::adjacent_difference(d_histogram.begin(), d_histogram.end(),
d_histogram.begin());

thrust::host_vector<int> h_histogram(d_histogram);
gpuErrchk(cudaEventRecord(stop,0));
gpuErrchk(cudaEventSynchronize(stop));
gpuErrchk(cudaEventElapsedTime(&elapsedTime,start,stop));
printf("Time to generate (GPU): %3.1f ms\n", elapsedTime);

histoCount = 0;
for (int i=0; i<256; i++) {
histoCount += h_histogram[i];
}
printf( "Histogram Sum: %ld\n", histoCount );

// --- Check the correctness of the results via the host
for (int i=0; i<SIZE; i++) h_histogram[buffer[i]]--;
for (int i=0; i<256; i++) {
if (h_histogram[i] != 0) printf( "Failure at %d! Off by %d\n", i, h_histogram[i] );
}

gpuErrchk(cudaEventDestroy(start));
gpuErrchk(cudaEventDestroy(stop));
gpuErrchk(cudaFree(dev_histo));
gpuErrchk(cudaFree(dev_buffer));

free(buffer);

getchar();

}

关于sorting - CUDA 中的粒子细胞计数(一维和二维直方图生成),我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/12212906/

24 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com