gpt4 book ai didi

c++ - NVIDIA Visual profiler 不生成时间线

转载 作者:行者123 更新时间:2023-11-30 05:40:47 24 4
gpt4 key购买 nike

我的问题几乎与[之前在 SO 此处询问][1] 的问题相同。但是没有提供任何答案,所以我要问一个单独的问题。

我在 Windows-7 操作系统上使用 CUDA 7.0 工具包。我正在使用 VS-2013。

我尝试生成 vector 加法示例程序的时间线并且成功了。但是当我按照完全相同的步骤生成我自己的代码的时间线时,它会一直显示一条消息“正在运行应用程序以生成时间线”。我知道内核被调用并且一切正常。

cudaDeviceReset() 在完成与 CUDA 相关的所有事情后,调用也在那里。

程序:我已经更改了我原来的问题以提供一个可以产生相同问题的最小工作示例。以下代码未使用 nvvp 生成时间线,无论我放置 cudaDeviceReset() 的位置如何。

#include "cuda_runtime.h"
#include "device_launch_parameters.h"

//OpenCV
#include <opencv2/highgui.hpp>
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>

#include <stdio.h>

using namespace cv;

__global__ void colorTransformation_kernel(int numChannels, int iw, int ih, unsigned char *ptr_source, unsigned char *ptr_dst)
{
// Calculate our pixel's location
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;

// Operate only if we are in the correct boundaries
if (x >= 0 && x < iw && y >= 0 && y < ih)
{
ptr_dst[numChannels* (iw*y + x) + 0] = ptr_source[numChannels* (iw*y + x) + 0];
ptr_dst[numChannels* (iw*y + x) + 1] = ptr_source[numChannels* (iw*y + x) + 1];
ptr_dst[numChannels* (iw*y + x) + 2] = ptr_source[numChannels* (iw*y + x) + 2];
}
}

int main()
{
while (1)
{
Mat image(400, 400, CV_8UC3, Scalar(0, 0, 255));
unsigned char *h_src = image.data;
size_t numBytes = image.rows * image.cols * 3;
int numChannels = 3;


unsigned char *dev_src, *dev_dst, *h_dst;

//Allocate memomry at device for SOURCE and DESTINATION and get their pointers
cudaMalloc((void**)&dev_src, numBytes * sizeof(unsigned char));
cudaMalloc((void**)&dev_dst, numBytes * sizeof(unsigned char));

////Copy the source image to the device i.e. GPU
cudaMemcpy(dev_src, h_src, numBytes * sizeof(unsigned char), cudaMemcpyHostToDevice);

////KERNEL
dim3 numOfBlocks(3 * (image.cols / 20), 3 * (image.rows / 20)); //multiplied by 3 because we have 3 channel image now
dim3 numOfThreadsPerBlocks(20, 20);
colorTransformation_kernel << <numOfBlocks, numOfThreadsPerBlocks >> >(numChannels, image.cols, image.rows, dev_src, dev_dst);
cudaDeviceSynchronize();

//Get the processed image
Mat org_dijSDK_img(image.rows, image.cols, CV_8UC3);
h_dst = org_dijSDK_img.data;
cudaMemcpy(h_dst, dev_dst, numBytes * sizeof(unsigned char), cudaMemcpyDeviceToHost);

//DISPLAY PROCESSED IMAGE
imshow("Processed dijSDK image", org_dijSDK_img);
waitKey(33);

}

cudaDeviceReset();
return 0;
}

非常重要的线索:如果我注释行 while(1) 并因此只运行一次代码,nvvp 会生成时间线.但是在我原来的项目中,我无法通过这样做获得时间线配置文件,因为它包含多线程和其他东西,因此在第一次运行期间没有要处理的图像。因此,我必须需要一些方法来使用包含无限 while 循环 的代码生成时间线。

最佳答案

我的代码中的问题是无休止的 while 循环,因此从未调用过 cudaDeviceReset()。有两种可能的解决方案来处理这种情况:

  1. 如果您有兴趣只在那时查看时间线分析,只需注释您的 while 循环 并且 nvvp 将能够到达 cudaDeviceReset() 出现在 main() 的末尾。

  2. 可能会有这样一种情况,您必须在程序中保留一个循环。例如,在我包含多线程的原始项目中,在 while 循环 的初始 180 运行期间没有要处理的图像。要处理这种情况,请将您的 while 循环替换为可以运行有限次数的 for 循环。例如,以下代码帮助我获得了 4 次运行的时间线分析。我只发布修改后的 main()

    int main()
    {
    cudaStream_t stream_one;
    cudaStream_t stream_two;
    cudaStream_t stream_three;

    //while (1)
    for (int i = 0; i < 4; i++)
    {
    cudaStreamCreate(&stream_one);
    cudaStreamCreate(&stream_two);
    cudaStreamCreate(&stream_three);

    Mat image = imread("DijSDK_test_image.jpg", 1);
    //Mat image(1080, 1920, CV_8UC3, Scalar(0,0,255));
    size_t numBytes = image.rows * image.cols * 3;
    int numChannels = 3;

    int iw = image.rows;
    int ih = image.cols;
    size_t totalMemSize = numBytes * sizeof(unsigned char);
    size_t oneThirdMemSize = totalMemSize / 3;

    unsigned char *dev_src_1, *dev_src_2, *dev_src_3, *dev_dst_1, *dev_dst_2, *dev_dst_3, *h_src, *h_dst;


    //Allocate memomry at device for SOURCE and DESTINATION and get their pointers
    cudaMalloc((void**)&dev_src_1, (totalMemSize) / 3);
    cudaMalloc((void**)&dev_src_2, (totalMemSize) / 3);
    cudaMalloc((void**)&dev_src_3, (totalMemSize) / 3);
    cudaMalloc((void**)&dev_dst_1, (totalMemSize) / 3);
    cudaMalloc((void**)&dev_dst_2, (totalMemSize) / 3);
    cudaMalloc((void**)&dev_dst_3, (totalMemSize) / 3);

    //Get the processed image
    Mat org_dijSDK_img(image.rows, image.cols, CV_8UC3, Scalar(0, 0, 255));
    h_dst = org_dijSDK_img.data;
    //copy new data of image to the host pointer
    h_src = image.data;

    //Copy the source image to the device i.e. GPU
    cudaMemcpyAsync(dev_src_1, h_src, (totalMemSize) / 3, cudaMemcpyHostToDevice, stream_one);
    cudaMemcpyAsync(dev_src_2, h_src + oneThirdMemSize, (totalMemSize) / 3, cudaMemcpyHostToDevice, stream_two);
    cudaMemcpyAsync(dev_src_3, h_src + (2 * oneThirdMemSize), (totalMemSize) / 3, cudaMemcpyHostToDevice, stream_three);

    //KERNEL--stream-1
    callMultiStreamingCudaKernel(dev_src_1, dev_dst_1, numChannels, iw, ih, &stream_one);
    //KERNEL--stream-2
    callMultiStreamingCudaKernel(dev_src_2, dev_dst_2, numChannels, iw, ih, &stream_two);
    //KERNEL--stream-3
    callMultiStreamingCudaKernel(dev_src_3, dev_dst_3, numChannels, iw, ih, &stream_three);


    //RESULT copy: GPU to CPU
    cudaMemcpyAsync(h_dst, dev_dst_1, (totalMemSize) / 3, cudaMemcpyDeviceToHost, stream_one);
    cudaMemcpyAsync(h_dst + oneThirdMemSize, dev_dst_2, (totalMemSize) / 3, cudaMemcpyDeviceToHost, stream_two);
    cudaMemcpyAsync(h_dst + (2 * oneThirdMemSize), dev_dst_3, (totalMemSize) / 3, cudaMemcpyDeviceToHost, stream_three);

    // wait for results
    cudaStreamSynchronize(stream_one);
    cudaStreamSynchronize(stream_two);
    cudaStreamSynchronize(stream_three);


    //Assign the processed data to the display image.
    org_dijSDK_img.data = h_dst;
    //DISPLAY PROCESSED IMAGE
    imshow("Processed dijSDK image", org_dijSDK_img);
    waitKey(33);


    }

    cudaDeviceReset();
    return 0;
    }

关于c++ - NVIDIA Visual profiler 不生成时间线,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/31428003/

24 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com