gpt4 book ai didi

c++ - 如何在 Windows/MSVC 环境中使用 Qt 编译 CUDA 代码?

转载 作者:行者123 更新时间:2023-11-28 04:39:48 25 4
gpt4 key购买 nike

经过长时间的故障排除,我设法让我的小测试程序与 Qt Creator 一起工作(据我所知,有几个人为此遇到了麻烦)。我在这里分享一个解决方案(见我的回答),请随时评论或更正可以改进的地方,尤其是如果有人对下面提到的问题有解决方案

我遇到的两个问题:

  • 可能有一种方法可以同时编译和链接所有内容,但是当我尝试这个时,我总是会收到一个奇怪的错误,指出无法找到 main.cpp,而 MakeFile 中的所有路径都是正确的。

  • 此外,我不确切知道为什么使用 -dlink 或 -dc 选项启用可重定位代码会产生 _cudaRegisterLinkedBinary 外部符号错误。可重定位代码是否可以单独编译?是编译时分配的问题吗?

最佳答案

这是一个可能的解决方案。

基本思想是编写矩阵乘法例程。为此,我只是使用了一个包含调用 CUDA 内核函数的包装函数的 matMul.cu 文件。然后,使用以下命令使用 nvcc 编译该文件:

nvcc.exe -lib -o lib_cuda/matMul.lib -c matMul.cu

有了 .lib 文件,我可以使用带有静态链接的“添加库”工具在 Qt 中添加库,该工具会自动在 .pro 文件中添加最后 8 行。

项目文件如下:

.pro 文件:

QT -= gui

CONFIG += c++11 console
CONFIG -= app_bundle

SOURCES += main.cpp

OTHER_FILES =+ matMul.cu

# The following library conflicts with something in Cuda
QMAKE_LFLAGS_RELEASE = /NODEFAULTLIB:msvcrt.lib
QMAKE_LFLAGS_DEBUG = /NODEFAULTLIB:msvcrtd.lib
QMAKE_LFLAGS_DEBUG = /NODEFAULTLIB:libcmt.lib


# Used to avoid conflicting flags between CUDA and MSVC files, should make everything static
QMAKE_CFLAGS_DEBUG += /MTd
QMAKE_CFLAGS_RELEASE += /MT
QMAKE_CXXFLAGS_DEBUG += /MTd
QMAKE_CXXFLAGS_RELEASE += /MT

# CUDA settings <-- may change depending on your system
CUDA_DIR = "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v9.1" # Path to cuda toolkit install
SYSTEM_NAME = x64

# include paths
CUDA_INC += $$CUDA_DIR\include
INCLUDEPATH += $$CUDA_INC

# library directories
QMAKE_LIBDIR += $$CUDA_DIR/lib/$$SYSTEM_NAME \

# Add the necessary CUDA libraries
LIBS += -lcuda -lcudart

# Add project related libraries containing kernels
win32: LIBS += -L$$PWD/lib_cuda/ -lmatMul_d

INCLUDEPATH += $$PWD/lib_cuda
DEPENDPATH += $$PWD/lib_cuda

win32:!win32-g++: PRE_TARGETDEPS += $$PWD/lib_cuda/matMul_d.lib
else:win32-g++: PRE_TARGETDEPS += $$PWD/lib_cuda/libmatMul_d.a

主要.cpp:

#include <cmath>
#include <chrono>
#include <iostream>

#include <cuda.h>
#include <cuda_runtime.h>

typedef struct
{
int width;
int height;
int stride;

float *elements;
} Matrix;


void matMul_wrapper(Matrix &C, const Matrix &A, const Matrix &B, cudaDeviceProp devProp);

int main()
{
int devCount;
cudaGetDeviceCount(&devCount);

cudaDeviceProp devProp;
for(int i=0; i < devCount; ++i)
{
cudaGetDeviceProperties(&devProp, i);
std::cout << "\nDevice: " << devProp.name << "\n";
std::cout << " Compute capability: " << devProp.major << "\n";
std::cout << " Max threads per block: " << devProp.maxThreadsPerBlock << "\n";
std::cout << " Warp size: " << devProp.warpSize << "\n\n";
}


Matrix A {1000, 1000, 1, new float[1000*1000]};
Matrix B {1000, 1000, 1, new float[1000*1000]};
Matrix C {B.width, A.height, 1, new float[1000*1000]};


for(int row=0; row < A.height; ++row)
{
for(int col=0; col < A.width; ++col)
A.elements[row*A.width + col] = (float)(row*A.width + col) / (float)100000;
}

for(int row=0; row < B.height; ++row)
{
for(int col=0; col < B.width; ++col)
B.elements[row*B.width + col] = (float)(row*B.width + col) / (float)100000;
}

std::cout << A.elements[20000] << '\n';

matMul_wrapper(C, A, B, devProp);

std::cout << A.elements[20000] << '\n';

delete[] A.elements;
delete[] B.elements;
delete[] C.elements;

return 0;
}

matMul.cu:

#include <cuda.h>
#include <cuda_runtime.h>

#define BLOCK_SIZE 16

typedef struct
{
int width;
int height;
int stride;

float *elements;
} Matrix;

__global__
void matMulKernel(Matrix C, const Matrix A, const Matrix B)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int idx = row*C.width + col;

float out = 0;
if(idx < C.width * C.height)
{
for(int j=0; j < A.width; ++j)
out += A.elements[row*A.width + j] * B.elements[j*B.width + col];
}

C.elements[idx] = out;

}


void matMul_wrapper(Matrix &C, const Matrix &A, const Matrix &B, cudaDeviceProp devProp)
{
dim3 block(BLOCK_SIZE, BLOCK_SIZE, 1);
dim3 grid( (C.width + block.x - 1) / block.x,
(C.height + block.y - 1) / block.y,
1);

Matrix d_A {A.width, A.height, A.stride};
size_t size = A.height * A.width * sizeof(float);
cudaMallocManaged(&d_A.elements, size);
cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice);

Matrix d_B {B.width, B.height, B.stride};
size = B.height * B.width * sizeof(float);
cudaMallocManaged(&d_B.elements, size);
cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice);

Matrix d_C {C.width, C.height, C.stride};
size = C.height * C.width * sizeof(float);
cudaMallocManaged(&d_C.elements, size);
cudaMemcpy(d_C.elements, C.elements, size, cudaMemcpyHostToDevice);

matMulKernel<<<grid, block>>>(d_C, d_A, d_B);

cudaDeviceSynchronize();

cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost);

cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
}

希望这会有所帮助。

关于c++ - 如何在 Windows/MSVC 环境中使用 Qt 编译 CUDA 代码?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/50485116/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com