gpt4 book ai didi

c++ - CUDA 零拷贝与 Jetson TK1 上的 CudaMemcpy

转载 作者:太空狗 更新时间:2023-10-29 21:37:49 31 4
gpt4 key购买 nike

我的问题:我正在寻找有人指出我尝试在 CUDA 中使用零复制的方式中的错误,或者揭示一个更“幕后”的视角来解释为什么零复制方法不会比 memcpy 方法更快.顺便说一句,我正在使用 Ubuntu 在 NVidia 的 TK1 处理器上执行我的测试。

我的问题与有效使用 NVIDIA TK1 的(物理上)统一内存架构与 CUDA 有关。 NVIDIA 为 GPU/CPU 内存传输抽象提供了两种方法。

  1. 统一内存抽象(使用 cudaHostAlloc 和 cudaHostGetDevicePointer)
  2. 显式复制到主机和从设备(使用 cudaMalloc() 和 cudaMemcpy)

我的测试代码的简短描述:我使用方法 1 和方法 2 测试同一个 cuda 内核。我预计方法 1 会更快,因为没有将源数据复制到设备或从设备复制结果数据.然而,结果倒退到我的假设(方法#1 慢了 50%)。下面是我的测试代码:

#include <libfreenect/libfreenect.hpp>
#include <iostream>
#include <vector>
#include <cmath>
#include <pthread.h>
#include <cxcore.h>
#include <time.h>
#include <sys/time.h>
#include <memory.h>
///CUDA///
#include <cuda.h>
#include <cuda_runtime.h>

///OpenCV 2.4
#include <highgui.h>
#include <cv.h>
#include <opencv2/gpu/gpu.hpp>

using namespace cv;
using namespace std;

///The Test Kernel///
__global__ void cudaCalcXYZ( float *dst, float *src, float *M, int height, int width, float scaleFactor, int minDistance)
{
float nx,ny,nz, nzpminD, jFactor;
int heightCenter = height / 2;
int widthCenter = width / 2;
//int j = blockIdx.x; //Represents which row we are in
int index = blockIdx.x*width;
jFactor = (blockIdx.x - heightCenter)*scaleFactor;
for(int i= 0; i < width; i++)
{
nz = src[index];
nzpminD = nz + minDistance;
nx = (i - widthCenter )*(nzpminD)*scaleFactor;
ny = (jFactor)*(nzpminD);
//Solve for only Y matrix (height vlaues)
dst[index++] = nx*M[4] + ny*M[5] + nz*M[6];
//dst[index++] = 1 + 2 + 3;
}
}

//Function fwd declarations
double getMillis();
double getMicros();
void runCudaTestZeroCopy(int iter, int cols, int rows);
void runCudaTestDeviceCopy(int iter, int cols, int rows);

int main(int argc, char **argv) {

//ZERO COPY FLAG (allows runCudaTestZeroCopy to run without fail)
cudaSetDeviceFlags(cudaDeviceMapHost);

//Runs kernel using explicit data copy to 'device' and back from 'device'
runCudaTestDeviceCopy(20, 640,480);
//Uses 'unified memory' cuda abstraction so device can directly work from host data
runCudaTestZeroCopy(20,640, 480);

std::cout << "Stopping test" << std::endl;

return 0;
}

void runCudaTestZeroCopy(int iter, int cols, int rows)
{
cout << "CUDA Test::ZEROCOPY" << endl;
int src_rows = rows;
int src_cols = cols;
int m_rows = 4;
int m_cols = 4;
int dst_rows = src_rows;
int dst_cols = src_cols;
//Create and allocate memory for host mats pointers
float *psrcMat;
float *pmMat;
float *pdstMat;
cudaHostAlloc((void **)&psrcMat, src_rows*src_cols*sizeof(float), cudaHostAllocMapped);
cudaHostAlloc((void **)&pmMat, m_rows*m_cols*sizeof(float), cudaHostAllocMapped);
cudaHostAlloc((void **)&pdstMat, dst_rows*dst_cols*sizeof(float), cudaHostAllocMapped);
//Create mats using host pointers
Mat src_mat = Mat(cvSize(src_cols, src_rows), CV_32FC1, psrcMat);
Mat m_mat = Mat(cvSize(m_cols, m_rows), CV_32FC1, pmMat);
Mat dst_mat = Mat(cvSize(dst_cols, dst_rows), CV_32FC1, pdstMat);

//configure src and m mats
for(int i = 0; i < src_rows*src_cols; i++)
{
psrcMat[i] = (float)i;
}
for(int i = 0; i < m_rows*m_cols; i++)
{
pmMat[i] = 0.1234;
}
//Create pointers to dev mats
float *d_psrcMat;
float *d_pmMat;
float *d_pdstMat;
//Map device to host pointers
cudaHostGetDevicePointer((void **)&d_psrcMat, (void *)psrcMat, 0);
//cudaHostGetDevicePointer((void **)&d_pmMat, (void *)pmMat, 0);
cudaHostGetDevicePointer((void **)&d_pdstMat, (void *)pdstMat, 0);
//Copy matrix M to device
cudaMalloc( (void **)&d_pmMat, sizeof(float)*4*4 ); //4x4 matrix
cudaMemcpy( d_pmMat, pmMat, sizeof(float)*m_rows*m_cols, cudaMemcpyHostToDevice);

//Additional Variables for kernels
float scaleFactor = 0.0021;
int minDistance = -10;

//Run kernel! //cudaSimpleMult( float *dst, float *src, float *M, int width, int height)
int blocks = src_rows;
const int numTests = iter;
double perfStart = getMillis();

for(int i = 0; i < numTests; i++)
{
//cudaSimpleMult<<<blocks,1>>>(d_pdstMat, d_psrcMat, d_pmMat, src_cols, src_rows);
cudaCalcXYZ<<<blocks,1>>>(d_pdstMat, d_psrcMat, d_pmMat, src_rows, src_cols, scaleFactor, minDistance);
cudaDeviceSynchronize();
}
double perfStop = getMillis();
double perfDelta = perfStop - perfStart;
cout << "Ran " << numTests << " iterations totaling " << perfDelta << "ms" << endl;
cout << " Average time per iteration: " << (perfDelta/(float)numTests) << "ms" << endl;

//Copy result back to host
//cudaMemcpy(pdstMat, d_pdstMat, sizeof(float)*src_rows*src_cols, cudaMemcpyDeviceToHost);
//cout << "Printing results" << endl;
//for(int i = 0; i < 16*16; i++)
//{
// cout << "src[" << i << "]= " << psrcMat[i] << " dst[" << i << "]= " << pdstMat[i] << endl;
//}

cudaFree(d_psrcMat);
cudaFree(d_pmMat);
cudaFree(d_pdstMat);
cudaFreeHost(psrcMat);
cudaFreeHost(pmMat);
cudaFreeHost(pdstMat);
}

void runCudaTestDeviceCopy(int iter, int cols, int rows)
{
cout << "CUDA Test::DEVICE COPY" << endl;
int src_rows = rows;
int src_cols = cols;
int m_rows = 4;
int m_cols = 4;
int dst_rows = src_rows;
int dst_cols = src_cols;
//Create and allocate memory for host mats pointers
float *psrcMat;
float *pmMat;
float *pdstMat;
cudaHostAlloc((void **)&psrcMat, src_rows*src_cols*sizeof(float), cudaHostAllocMapped);
cudaHostAlloc((void **)&pmMat, m_rows*m_cols*sizeof(float), cudaHostAllocMapped);
cudaHostAlloc((void **)&pdstMat, dst_rows*dst_cols*sizeof(float), cudaHostAllocMapped);
//Create pointers to dev mats
float *d_psrcMat;
float *d_pmMat;
float *d_pdstMat;
cudaMalloc( (void **)&d_psrcMat, sizeof(float)*src_rows*src_cols );
cudaMalloc( (void **)&d_pdstMat, sizeof(float)*src_rows*src_cols );
cudaMalloc( (void **)&d_pmMat, sizeof(float)*4*4 ); //4x4 matrix
//Create mats using host pointers
Mat src_mat = Mat(cvSize(src_cols, src_rows), CV_32FC1, psrcMat);
Mat m_mat = Mat(cvSize(m_cols, m_rows), CV_32FC1, pmMat);
Mat dst_mat = Mat(cvSize(dst_cols, dst_rows), CV_32FC1, pdstMat);

//configure src and m mats
for(int i = 0; i < src_rows*src_cols; i++)
{
psrcMat[i] = (float)i;
}
for(int i = 0; i < m_rows*m_cols; i++)
{
pmMat[i] = 0.1234;
}

//Additional Variables for kernels
float scaleFactor = 0.0021;
int minDistance = -10;

//Run kernel! //cudaSimpleMult( float *dst, float *src, float *M, int width, int height)
int blocks = src_rows;

double perfStart = getMillis();
for(int i = 0; i < iter; i++)
{
//Copty from host to device
cudaMemcpy( d_psrcMat, psrcMat, sizeof(float)*src_rows*src_cols, cudaMemcpyHostToDevice);
cudaMemcpy( d_pmMat, pmMat, sizeof(float)*m_rows*m_cols, cudaMemcpyHostToDevice);
//Run Kernel
//cudaSimpleMult<<<blocks,1>>>(d_pdstMat, d_psrcMat, d_pmMat, src_cols, src_rows);
cudaCalcXYZ<<<blocks,1>>>(d_pdstMat, d_psrcMat, d_pmMat, src_rows, src_cols, scaleFactor, minDistance);
//Copy from device to host
cudaMemcpy( pdstMat, d_pdstMat, sizeof(float)*src_rows*src_cols, cudaMemcpyDeviceToHost);
}
double perfStop = getMillis();
double perfDelta = perfStop - perfStart;
cout << "Ran " << iter << " iterations totaling " << perfDelta << "ms" << endl;
cout << " Average time per iteration: " << (perfDelta/(float)iter) << "ms" << endl;

cudaFree(d_psrcMat);
cudaFree(d_pmMat);
cudaFree(d_pdstMat);
cudaFreeHost(psrcMat);
cudaFreeHost(pmMat);
cudaFreeHost(pdstMat);
}

//Timing functions for performance measurements
double getMicros()
{
timespec ts;
//double t_ns, t_s;
long t_ns;
double t_s;
clock_gettime(CLOCK_MONOTONIC, &ts);
t_s = (double)ts.tv_sec;
t_ns = ts.tv_nsec;
//return( (t_s *1000.0 * 1000.0) + (double)(t_ns / 1000.0) );
return ((double)t_ns / 1000.0);
}

double getMillis()
{
timespec ts;
double t_ns, t_s;
clock_gettime(CLOCK_MONOTONIC, &ts);
t_s = (double)ts.tv_sec;
t_ns = (double)ts.tv_nsec;
return( (t_s * 1000.0) + (t_ns / 1000000.0) );
}

我已经看过帖子Cuda zero-copy performance ,但我觉得这不相关,原因如下:GPU 和 CPU 具有物理上统一的内存架构。

谢谢

最佳答案

当您使用 ZeroCopy 时,对内存的读取会通过一些路径查询内存单元以从系统内存中获取数据。此操作有一些延迟。

当使用直接访问内存时,内存单元从全局内存中收集数据,并且具有不同的访问模式和延迟。

要真正看到这种差异,需要进行某种形式的分析。

尽管如此,您对全局函数的调用使用了单个线程

cudaCalcXYZ<<< blocks,1 >>> (...

在这种情况下,当从系统内存(或全局内存)收集内存时,GPU 几乎没有办法隐藏延迟。我建议您使用更多线程(64 的倍数,总共至少 128 个),并在其上运行探查器以获得内存访问成本。您的算法似乎是可分离的,并且从

修改代码
for(int i= 0; i < width; i++)

for (int i = threadIdx.x ; i < width ; i += blockDim.x)

可能会提高整体性能。图像大小为 640 宽,这将变成 128 个线程的 5 次迭代。

cudaCalcXYZ<<< blocks,128 >>> (...

我相信这会带来一些性能提升。

关于c++ - CUDA 零拷贝与 Jetson TK1 上的 CudaMemcpy,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/36758620/

31 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com