gpt4 book ai didi

CUDA:如何使该代码并行?

转载 作者:行者123 更新时间:2023-12-02 16:51:20 25 4
gpt4 key购买 nike

我正在努力使代码并行运行(CUDA)。简化后的代码为:

float sum = ...         //sum = some number
for (i = 0; i < N; i++){
f = ... // f = a function that returns a float and puts it into f
sum += f;
}

我遇到的问题是 sum+=f 因为它需要 sum 在线程之间共享。我尝试在声明 sum (__shared__ float sum) 时使用 __shared__ 参数,但这不起作用(它没有给我正确的结果)。我也听说过缩减(并且知道如何在 OpenMP 上使用它),但不知道如何在这里应用它。

任何帮助将不胜感激。谢谢!

最佳答案

这是代码:

#include <stdio.h>
__global__ void para(float* f, int len) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < len ){
// calculate f[i], f in iteration ith
f[i] = i;
}
}

int main(int argc, char ** argv) {
int inputLength=1024;
float * h_f;
float * d_f;
int size = inputLength*sizeof(float);

h_f = (float *) malloc(size);
cudaMalloc((void**)&d_f , size);
cudaMemcpy(d_f, h_f, size, cudaMemcpyHostToDevice);

dim3 DimGrid((inputLength)/256 +1 , 1 , 1);
dim3 DimBlock(256 , 1, 1);

para<<<DimGrid , DimBlock>>>(d_f , inputLength);
cudaThreadSynchronize();

cudaMemcpy(h_f, d_f, size , cudaMemcpyDeviceToHost);

cudaFree(d_f);

// do parallel reduction
int i;
float sum=0;
for(i=0; i<inputLength; i++)
sum+=h_f[i];

printf("%6.4f\n",sum);

free(h_f);

return 0;
}

并行归约部分可以替换为工作的 CUDA 并行求和归约(例如 this one )。很快我就会花时间去改变它。

编辑:

以下是使用 CUDA 执行并行归约的代码:

#include <stdio.h>
__global__ void para(float* f, int len) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < len ){
// calculate f[i], f in iteration ith
f[i] = i;
}
}
__global__ void strideSum(float *f, int len, int strid){
int i = threadIdx.x + blockDim.x * blockIdx.x;
if(i+strid<len){
f[i]=f[i]+f[i+strid];
}
}

#define BLOCKSIZE 256
int main(int argc, char ** argv) {
int inputLength=4096;
float * h_f;
float * d_f;
int size = inputLength*sizeof(float);

h_f = (float *) malloc(size);
cudaMalloc((void**)&d_f , size);
cudaMemcpy(d_f, h_f, size, cudaMemcpyHostToDevice);

dim3 DimGrid((inputLength)/BLOCKSIZE +1 , 1 , 1);
dim3 DimBlock(BLOCKSIZE , 1, 1);

para<<<DimGrid , DimBlock>>>(d_f , inputLength);
cudaThreadSynchronize();

int i;
float sum=0, d_sum=0;
// serial sum on host. YOU CAN SAFELY COMMENT FOLLOWING COPY AND LOOP. intended for sum validity check.
cudaMemcpy(h_f, d_f, size , cudaMemcpyDeviceToHost);
for(i=0; i<inputLength; i++)
sum+=h_f[i];

// parallel reduction on gpu
for(i=inputLength; i>1; i=i/2){
strideSum<<<((i/BLOCKSIZE)+1),BLOCKSIZE>>>(d_f,i,i/2);
cudaThreadSynchronize();
}
cudaMemcpy(&d_sum, d_f, 1*sizeof(float) , cudaMemcpyDeviceToHost);

printf("Host -> %6.4f, Device -> %6.4f\n",sum,d_sum);

cudaFree(d_f);
free(h_f);

return 0;
}

关于CUDA:如何使该代码并行?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/13794112/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com