gpt4 book ai didi

c++ - 为 cublasSgemm 使用指向 vector::data() 的指针

转载 作者:行者123 更新时间:2023-11-28 07:00:03 25 4
gpt4 key购买 nike

我在使用 cudaMalloc、cudaMemcpy 和 cublasSgemm 时尝试使用 vector::data() 指针,但我似乎无法让它工作。如果我没记错的话,vector::data() 应该返回一个指向该 vector 存储在内存中的实际数组的指针,因此它应该与 T* aArray 指针指向存储在内存中的 T 类型数组相同。使用后者确实有效,但 data() 指针无效。

这是我正在处理的代码:

Matrix<T> Matrix<T>::cudaProd(Matrix<T>&A,Matrix<T>&B, Matrix<T>&C)
{
C = Matrix<T>(A.height, B.width); //resizing of the vector of elements for Matrix C
//A[m][n]*B[n][k]=C[m][k]
int m = A.height;
int n = B.height;
int k = B.width;
float alpha = 1.0f;
float beta = 0.0f;

T* d_a = A.GetPointer();
T* d_b = B.GetPointer();
T* d_c = C.GetPointer();

cudaMalloc(&d_a,A.size);
cudaMalloc(&d_b,B.size);
cudaMalloc(&d_c,C.size);

cudaMemcpy(d_a,A.GetPointer(),A.size,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,B.GetPointer(),B.size,cudaMemcpyHostToDevice);

cublasHandle_t handle;

cublasStatus_t status = cublasCreate(&handle);

if (status != CUBLAS_STATUS_SUCCESS)
{
std::cerr << "!!!! CUBLAS initialization error\n";
}

status = cublasSgemm(handle,CUBLAS_OP_N,CUBLAS_OP_N,k,m,n,&alpha,d_b,k,d_a,n,&beta,d_c,k);

if (status != CUBLAS_STATUS_SUCCESS)
{
std::cerr << "!!!! kernel execution error.\n";
}

status = cublasDestroy(handle);
if (status != CUBLAS_STATUS_SUCCESS)
{
std::cerr << "!!!! shutdown error (A)\n";
}

cudaMemcpy(C.GetPointer(), d_c, C.size,cudaMemcpyDeviceToHost);

cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);

GetPointer() 成员函数返回该 Matrix 对象的元素 vector 的 vector::data()。大小是 vector 元素在内存中的大小。

矩阵 C 的 vector 在使用 data() 指针时返回全零,而在使用不带 vector 的 T* aArray 指针时返回矩阵 A 和 B 的乘积。

是否真的可以使用 vector 来存储元素数组,然后使用 data() 指针来初始化数组的设备拷贝,或者我是否被迫在主机上使用 C 风格的数组存储?另外,我尝试过使用 thrust::device_vector 并且可行,但我想远离创建 raw_pointer_casts。

感谢您的帮助!

编辑: 对于那些在复制和粘贴方面有困难的人,这里是完整的例子:

#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_device_runtime_api.h>
#include <cublas_v2.h>
#include <vector>
#include <iostream>

using namespace std;

template<typename T> class Matrix
{
public:
~Matrix();
Matrix();
Matrix(int rows, int columns);
int width;
int height;
int stride;
size_t size;

T &GetElement(int row, int column);
void SetElement(int row, int column, T value);
void SetElements(vector<T> value);
vector<T>& GetElements();
T* GetPointer();
Matrix<T> cudaProd(Matrix<T>&A,Matrix<T>&B, Matrix<T>&C);
private:
vector<T> elements;
T* firstElement;
};

template<typename T>
Matrix<T>::~Matrix()
{
}

template<typename T>
Matrix<T>::Matrix()
{
}

template<typename T>
Matrix<T>::Matrix(int rows, int columns)
{
height = rows;
width = columns;
stride = columns; //in row major order this is equal to the # of columns
elements.resize(rows*columns);
firstElement = elements.data();
size = height*width*sizeof(T);
}

template<typename T>
T &Matrix<T>::GetElement(int row, int column)
{
return elements[row*width + column]; //row major order return
}

template<typename T>
vector<T>& Matrix<T>::GetElements()
{
return elements; //row major order return
}

template<typename T>
void Matrix<T>::SetElement(int row, int column, T value)
{
elements[row*width + column] = value; //row major order return
}

template<typename T>
void Matrix<T>::SetElements(vector<T> value)
{
elements = value;
}

template<typename T>
T* Matrix<T>::GetPointer()
{
return firstElement;
}


template<typename T>
//Matrix Multiplication using CUDA
Matrix<T> Matrix<T>::cudaProd(Matrix<T>&A,Matrix<T>&B, Matrix<T>&C)
{
C = Matrix<T>(A.height, B.width);
//A[m][n]*B[n][k]=C[m][k]
int m = A.height;
int n = B.height;
int k = B.width;
float alpha = 1.0f;
float beta = 0.0f;


//Thrust usage

/*thrust::device_vector<T> d_A = A.GetElements();
T* d_a = thrust::raw_pointer_cast(&d_A[0]);
thrust::device_vector<T> d_B = B.GetElements();
T* d_b = thrust::raw_pointer_cast(&d_B[0]);
thrust::device_vector<T> d_C = C.GetElements();
T* d_c = thrust::raw_pointer_cast(&d_C[0]);*/

T* d_a = A.GetPointer();
T* d_b = B.GetPointer();
T* d_c = C.GetPointer();

cudaMalloc(&d_a,A.size);
cudaMalloc(&d_b,B.size);
cudaMalloc(&d_c,C.size);

cudaMemcpy(d_a,A.GetPointer(),A.size,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,B.GetPointer(),B.size,cudaMemcpyHostToDevice);
cudaMemcpy(d_c,C.GetPointer(),C.size,cudaMemcpyHostToDevice);

cublasHandle_t handle;

cublasStatus_t status = cublasCreate(&handle);

if (status != CUBLAS_STATUS_SUCCESS)
{
std::cerr << "!!!! CUBLAS initialization error\n";
}

status = cublasSgemm(handle,CUBLAS_OP_N,CUBLAS_OP_N,k,m,n,&alpha,d_b,k,d_a,n,&beta,d_c,k);

if (status != CUBLAS_STATUS_SUCCESS)
{
std::cerr << "!!!! kernel execution error.\n";
}

status = cublasDestroy(handle);
if (status != CUBLAS_STATUS_SUCCESS)
{
std::cerr << "!!!! shutdown error (A)\n";
}

//thrust::copy(d_C.begin(), d_C.end(), C.GetElements().begin());

cudaMemcpy(C.GetPointer(), d_c, C.size,cudaMemcpyDeviceToHost);

cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);

return C;

}

int main()
{
Matrix<float> A(2,2);
Matrix<float> B(2,2);
Matrix<float> C;

vector<float> aE(4,2);
vector<float> bE(4,4);
A.SetElements(aE);
B.SetElements(bE);

C = C.cudaProd(A, B, C); //function call to cudaProd()

for(int row = 0; row < A.height; ++row)
{
for(int col = 0; col < A.width; ++col)
{
cout<<A.GetElement(row, col)<<" "; //h_c is stored on device in column major order, need to switch to row major order
}
printf("\n");
}
printf("\n");

for(int row = 0; row < B.height; ++row)
{
for(int col = 0; col < B.width; ++col)
{
cout<<B.GetElement(row, col)<<" "; //h_c is stored on device in column major order, need to switch to row major order
}
printf("\n");
}
printf("\n");

for(int row = 0; row < C.height; ++row)
{
for(int col = 0; col < C.width; ++col)
{
cout<<C.GetElement(row, col)<<" "; //h_c is stored on device in column major order, need to switch to row major order
}
printf("\n");
}
printf("\n");
}

最佳答案

If I am not mistaken, vector::data() should return a pointer to the actual array stored in memory for that vector so it should be the same as having a T* aArray pointer to an array of type T stored in memory.

std::vector 类是一个拥有 资源类。这意味着试图用 data 指针自己管理底层资源会让你进入一个痛苦的世界。

出于同样的原因:

cudaMalloc(&d_a,A.size);
cudaMalloc(&d_b,B.size);
cudaMalloc(&d_c,C.size);

和:

cudaMemcpy(C.GetPointer(), d_c, C.size,cudaMemcpyDeviceToHost);

和:

cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);

不可能工作。

关于c++ - 为 cublasSgemm 使用指向 vector<T>::data() 的指针,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/22614428/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com