gpt4 book ai didi

c++ - 并行转置不同的矩阵

转载 作者:太空狗 更新时间:2023-10-29 22:54:26 25 4
gpt4 key购买 nike

我有 3 个不同大小的矩阵,想将它们平行转置。

首先,我使用 malloc 将它们放入二维数组中,然后使用 cudaMalloc 将数组从主机 (h_B) 传输到设备 (d_B)。

使用threadIdx 查找数组中矩阵的每个地址。使用了 cublas 函数。这是我的代码。

代码可以编译,但我无法得到结果。似乎在全局函数中 float *A = new float[m*n] 不是一个好方法。

有人对此有想法吗?非常感谢!

/* Includes, system */
#include <stdio.h>
#include <stdlib.h>
#include<iostream>

/* Includes, cuda */
#include <cuda_runtime.h>
#include <cublas_v2.h>

/* Includes, cuda helper functions */
#include <helper_cuda.h>

__global__ void transposeCublasSgeam(int *M_A, int *N_A, float *ptrA, float *ptrC, const int N, int *address)
{
cublasHandle_t cnpHandle;
cublasStatus_t status = cublasCreate(&cnpHandle);

if (status != CUBLAS_STATUS_SUCCESS)
{
return;
}

const float d_alpha = 1.0f;
const float d_beta = 0.0f;
int idx = threadIdx.x;
if(idx<N){
int m = M_A[idx]; //A_row
int n = N_A[idx]; //A_col
float *A = new float[m*n];
float *C = new float[m*n];
A = ptrA+address[idx];
C = ptrC+address[idx];
cublasSgeam(cnpHandle, CUBLAS_OP_T, CUBLAS_OP_T, m, n, &d_alpha, (const float*)A, n, &d_beta, (const float *)A, n, C, m);
delete[] A;
delete[] C;
}
cublasDestroy(cnpHandle);

}

int main()
{

const int N = 3;
int M_B[N] = { 2,3,2 }; //row number of matrices
int N_B[N] = { 3,2,4 }; //col number of matrices

float a[6] = { 1,2,3,
4,5,6 };
float b[6] = { 1,2,
3,4,
5,6};
float c[8] = { 1,2,3,1,
2,3,4,5 };

float **h_B = (float**)malloc(N * sizeof(float*));
float **h_BT = (float**)malloc(N * sizeof(float*));

h_B[0] = a, h_BT[0] = a;
h_B[1] = b, h_BT[1] = b;
h_B[2] = c, h_BT[2] = c;

int NUM_B = 20; // total number of elements
int address[] = {0,6,12};

float *d_B, *d_BT;
checkCudaErrors(cudaMalloc((void **)&d_B, NUM_B * sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_BT, NUM_B * sizeof(float)));
checkCudaErrors(cudaMemcpy(d_B, h_B, NUM_B * sizeof(float), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_BT, h_BT, NUM_B * sizeof(float), cudaMemcpyHostToDevice));

transposeCublasSgeam<<<1,N>>>(M_B, N_B, d_B,d_BT, N,address);

checkCudaErrors(cudaMemcpy(h_BT, d_BT, NUM_B * sizeof(float), cudaMemcpyDeviceToHost));

cudaFree(d_B);
cudaFree(d_BT);
delete[] h_B;
delete[] h_BT;

return 0;
}

最佳答案

您的代码中有许多错误。我可能会在我的描述中遗漏一些内容。

  1. 请注意,此 cublas-in-device-code 功能在较新的 CUDA 版本中不再可用。
  2. 传递给设备代码的每个指针都需要使用 cudaMalloc 进行分配。您已经为一些指针完成了 cudaMalloc,但不是全部。
  3. 您对指针和指针数组感到困惑。我无法为您解决所有这些问题。您的内核设计确实不需要使用指针数组的复杂性。所以我已经删除了所有这些。
  4. 在 CUDA 动态并行性 (CDP) 中,不能将指向本地地址空间的指针传递给子内核。您不能在本地地址空间中使用 alpha 和 beta,并将指向它们的指针传递给 CDP 中的 CUBLAS。
  5. 要进行纯转置,请研究 the CUBLAS Sgeam documentation了解要使用的推荐参数。

我相信我还修复了其他问题。请研究这个例子:

$ cat t1433.cu
/* Includes, system */
#include <stdio.h>
#include <stdlib.h>
#include<iostream>

/* Includes, cuda */
#include <cuda_runtime.h>
#include <cublas_v2.h>

/* Includes, cuda helper functions */
#include <helper_cuda.h>

__global__ void transposeCublasSgeam(int *M_A, int *N_A, float *ptrA, float *ptrC, const int N, int *address)
{
cublasHandle_t cnpHandle;
cublasStatus_t status = cublasCreate(&cnpHandle);

if (status != CUBLAS_STATUS_SUCCESS)
{
printf("thread: %d, error1: %d\n", threadIdx.x, (int)status);
return;
}

float *d_alpha = new float; // a pointer to device-heap, not local memory
*d_alpha = 1.0f;
float *d_beta = new float;
*d_beta = 0.0f;
int idx = threadIdx.x;
if(idx<N){
int m = M_A[idx]; //A_row
int n = N_A[idx]; //A_col
status = cublasSgeam(cnpHandle, CUBLAS_OP_T, CUBLAS_OP_N, m, n, d_alpha, ptrA+address[idx], n, d_beta, ptrC+address[idx], m, ptrC+address[idx], m);
if (status != CUBLAS_STATUS_SUCCESS)
{
printf("thread: %d, error2: %d\n", threadIdx.x, (int)status);
return;
}
}
cublasDestroy(cnpHandle);

}

int main()
{

const int N = 3;
int M_B[N] = { 2,3,2 }; //row number of matrices
int N_B[N] = { 3,2,4 }; //col number of matrices

float a[6] = { 1,2,3,
4,5,6 };
float b[6] = { 1,2,
3,4,
5,6};
float c[8] = { 1,2,3,1,
2,3,4,5 };
float *h_Bdata = (float *)malloc(sizeof(a)+sizeof(b)+sizeof(c));
float *h_BTdata = (float *)malloc(sizeof(a)+sizeof(b)+sizeof(c));
memcpy(h_Bdata, a, sizeof(a));
memcpy(h_Bdata+(sizeof(a)/sizeof(a[0])), b, sizeof(b));
memcpy(h_Bdata+(sizeof(a)/sizeof(a[0]))+(sizeof(b)/sizeof(b[0])), c, sizeof(c));

int NUM_B = 20; // total number of elements
int address[] = {0,6,12};
int *d_address;
cudaMalloc(&d_address, sizeof(address));
cudaMemcpy(d_address, address, sizeof(address), cudaMemcpyHostToDevice);
int *d_M_B, *d_N_B;
cudaMalloc(&d_M_B, sizeof(M_B));
cudaMalloc(&d_N_B, sizeof(N_B));
cudaMemcpy(d_M_B, M_B, sizeof(M_B), cudaMemcpyHostToDevice);
cudaMemcpy(d_N_B, N_B, sizeof(N_B), cudaMemcpyHostToDevice);
float *d_B, *d_BT;
checkCudaErrors(cudaMalloc((void **)&d_B, NUM_B * sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_BT, NUM_B * sizeof(float)));
checkCudaErrors(cudaMemcpy(d_B, h_Bdata, NUM_B * sizeof(float), cudaMemcpyHostToDevice));

transposeCublasSgeam<<<1,N>>>(d_M_B, d_N_B, d_B,d_BT, N,d_address);

checkCudaErrors(cudaMemcpy(h_BTdata, d_BT, NUM_B * sizeof(float), cudaMemcpyDeviceToHost));
std::cout << "B , BT" << std::endl;
for (int i = 0; i < NUM_B; i++){
std::cout << h_Bdata[i] << " , " << h_BTdata[i] << std::endl;}
cudaFree(d_B);
cudaFree(d_BT);

return 0;
}
$ /usr/local/cuda-8.0/bin/nvcc -I/usr/local/cuda-8.0/samples/common/inc t1433.cu -rdc=true -lcublas_device -lcudadevrt -arch=sm_35 -o t1433
$ LD_LIBRARY_PATH=/usr/local/cuda-8.0/lib64 CUDA_VISIBLE_DEVICES="3" cuda-memcheck ./t1433
========= CUDA-MEMCHECK
B , BT
1 , 1
2 , 4
3 , 2
4 , 5
5 , 3
6 , 6
1 , 1
2 , 3
3 , 5
4 , 2
5 , 4
6 , 6
1 , 1
2 , 2
3 , 2
1 , 3
2 , 3
3 , 4
4 , 1
5 , 5
========= ERROR SUMMARY: 0 errors
$

关于c++ - 并行转置不同的矩阵,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/56001823/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com