gpt4 book ai didi

c++ - CUDA矩阵乘法,执行时间长

转载 作者:太空狗 更新时间:2023-10-29 20:16:19 26 4
gpt4 key购买 nike

我是 CUDA 的新手,我一直在努力找出我做错了什么。 CUDA 比仅使用 CPU 乘以矩阵花费的时间更长。如果我做错了什么,请告诉我。这是我的代码:

#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <cstdlib>
#include <assert.h>
#include <time.h>
#define size 100 // Matrix size
#define cols size // Matrix width
#define rows size // Matrix height

void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
__global__ void matrixMul( int *A, int *B, int *C)
{
int bx = blockIdx.x; // Block index
int tx = threadIdx.x; // Thread index
int ts = blockDim.x; // number of threads
// Declaration of the shared memory C element
extern __shared__ int c_element_sum[];
c_element_sum[tx] = A[tx+((bx/ts)*ts)] * B[(bx%ts)+(tx*ts)];

//Block until all threads in the block have written their data to shared mem
__syncthreads();

int sum;
for(int i=0; i<ts; i++){
if(i==0){
sum=c_element_sum[i];
}
else{
sum+=c_element_sum[i];
}
}
C[bx] = sum;

}


/////////////////////////////////////////////////////////
// Program main
/////////////////////////////////////////////////////////

int main(int argc, char** argv)
{
//create timer.
clock_t t1, t2;

//start timer
t1=clock();

//allocate host memory for matrices
unsigned int size_A = cols * rows;
unsigned int mem_size_A = sizeof(int) * size_A;
int* mA = (int*) malloc(mem_size_A);

unsigned int size_B = cols * rows;
unsigned int mem_size_B = sizeof(int) * size_B;
int* mB = (int*) malloc(mem_size_B);

unsigned int size_C = cols * rows;
unsigned int mem_size_C = sizeof(int) * size_C;
int* mC = (int*) malloc(mem_size_C);

//initialize host memory
for (int i = 0; i < size_A; ++i){
mA[i] = 1;
mB[i] = 1;
mC[i] = 0;
}

// allocate device memory
int* d_mA;
int* d_mB;
int* d_mC;
cudaMalloc((void**) &d_mA, mem_size_A);
cudaMalloc((void**) &d_mB, mem_size_B);
cudaMalloc((void**) &d_mC, mem_size_C);

//copy host memory to device (A and B)
cudaMemcpy(d_mA, mA, mem_size_A, cudaMemcpyHostToDevice);
cudaMemcpy(d_mB, mB, mem_size_B, cudaMemcpyHostToDevice);
cudaMemcpy(d_mC, mC, mem_size_C, cudaMemcpyHostToDevice);

// setup execution parameters
int numThreadsPerBlock = cols;
int numBlocks = (cols * rows);
int sharedMemSize = numThreadsPerBlock * sizeof(int);

dim3 dimGrid(numBlocks);
dim3 dimBlock(numThreadsPerBlock);

// execute the kernel
matrixMul <<< dimGrid, dimBlock, sharedMemSize >>>(d_mA, d_mB, d_mC);

//Block until device has completed
cudaThreadSynchronize();

// check if kernel execution generated an error
// Check for any CUDA errors
checkCUDAError("kernel invocation");

//copy result from device to host
cudaMemcpy(mC, d_mC, mem_size_C, cudaMemcpyDeviceToHost);

// Check for any CUDA errors
checkCUDAError("memcpy");

//stop timer
t2 = clock();

//check results
for (int i = 0; i < size_C; ++i){
assert(mC[i] == cols);
}

//clean up memory
free(mA);
free(mB);
free(mC);
cudaFree(d_mA);
cudaFree(d_mB);
cudaFree(d_mC);

printf("WITH CUDA - clocks: %d \n\n", t2-t1);

//////////////////////////////
///////// CPU ONLY //////////
/////////////////////////////

//create timer.
clock_t cpu_t1, cpu_t2;

//start timer
cpu_t1=clock();

//allocate host memory for matrices
unsigned int cpu_size_A = cols * rows;
unsigned int cpu_mem_size_A = sizeof(int) * cpu_size_A;
int* cpu_mA = (int*) malloc(cpu_mem_size_A);

unsigned int cpu_size_B = cols * rows;
unsigned int cpu_mem_size_B = sizeof(int) * cpu_size_B;
int* cpu_mB = (int*) malloc(cpu_mem_size_B);

unsigned int cpu_size_C = cols * rows;
unsigned int cpu_mem_size_C = sizeof(int) * cpu_size_C;
int* cpu_mC = (int*) malloc(cpu_mem_size_C);

//initialize host memory
for (int i = 0; i < cpu_size_A; ++i){
cpu_mA[i] = 1;
cpu_mB[i] = 1;
cpu_mC[i] = 0;
}

int ts = cols;
for(int bx=0; bx<(cols*rows);bx++){
int sum = 0;
for(int tx=0; tx<cols; tx++){
sum += cpu_mA[tx+((bx/ts)*ts)] * cpu_mB[(bx%ts)+(tx*ts)];
}
cpu_mC[bx]=sum;
}

//stop timer
cpu_t2 = clock();

//check results
for (int i = 0; i < cpu_size_C; ++i){
assert(cpu_mC[i] == cols);
}

//clean up memory
free(cpu_mA);
free(cpu_mB);
free(cpu_mC);

printf("CPU ONLY - clocks: %d \n\n", cpu_t2-cpu_t1);

return 0;
}

最佳答案

根据您的程序,这是预期的。您的计时器看起来像是在记录程序的整个执行过程,包括复制到设备、计算时间和将结果复制回来。鉴于您为程序提供的工作量相当小(100x100 矩阵),内存拷贝的开销远远超过您在使用内核进行计算时获得的任何计算 yield 。您的内核本身也不是最有效的实现。

我不认为您做错了什么,只是您没有为 GPU 提供足够大的工作 block ,您可能会进一步优化您的内核。请注意,简单地扩大块的大小可能不会显着提高 CPU 的性能,因为您也会增加内存管理时间。虽然在 CUDA 上编写程序的第一个实现相对简单,但要从中获得良好的性能却要困难得多。使用 CUDA 的最有效方法是使计算与内存事务的比率很高。例如,有一个由多个计算密集型内核组成的流水线依次对一大块数据进行操作,只需要在开始和结束时进行主机设备复制。

如果这只是一个帮助您学习 CUDA 编码的程序,那么这是一个重要的步骤,深入了解如何优化矩阵乘法内核将在许多其他情况下为您提供帮助。如果您正在编写此内核用于生产软件,我建议您使用高度优化的线性代数库 CUBLAS:http://developer.nvidia.com/cublas (或者已经为您完成了艰苦工作的其他图书馆)。

关于c++ - CUDA矩阵乘法,执行时间长,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/9970147/

26 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com