gpt4 book ai didi

c++ - #pragma unroll 语句是否可以用于清理编译器预评估代码?

转载 作者:塔克拉玛干 更新时间:2023-11-03 06:42:47 31 4
gpt4 key购买 nike

在NVIDIA的制作精良reduction optimization documentation ,他们最终得到一个看起来像这样的 warpReduce:

Template <unsigned int blockSize>
__device__ void warpReduce(volatile int* sdata, int tid) {
if (blockSize >= 64) sdata[tid] += sdata[tid + 32];
if (blockSize >= 32) sdata[tid] += sdata[tid + 16];
if (blockSize >= 16) sdata[tid] += sdata[tid + 8];
if (blockSize >= 8) sdata[tid] += sdata[tid + 4];
if (blockSize >= 4) sdata[tid] += sdata[tid + 2];
if (blockSize >= 2) sdata[tid] += sdata[tid + 1];
}

这伤害了我作为 Don't Repeat Yourself 支持者的敏感性,他们为什么不节省程序员的时间并引入更像

Template <unsigned int blockSize>
__device__ void warpReduce(volatile int* sdata, int tid) {
#pragma unroll(6)
for (int i = 64; i>1; i>>=1){
if (blockSize >= i) sdata[tid] += sdata[tid + (i/2)];
}
}

这会编译成相同的代码吗?或者这是否会使编译器混淆太多而无法对其进行优化?我知道 case 语句遇到类似的 DRY 问题,但我不知道有什么方法可以解决这个问题,因为它将运行时输入映射到编译器时间常量定义的模板内核,但我不明白如何让程序员需要将同一件事写很多次是有帮助的。

更不用说,如果 warp 大小发生变化,它可能不会(但谁知道)我介绍的代码将更容易更改,甚至可以使用常量来定义 warp 大小,允许一次更改更改此处的优化,以及任何其他依赖于 warp 大小的地方。

非常相关的子问题,我读到使用内置的 warpSize 也是出于编译器优化原因的问题。否则我会把它包含在上面的代码中。转到 device_launch_parameters.h 中的定义,它调用内置设备查询 __cudaGet_warpSize()。如果这不是问题,NVIDIA 可以在该文档中提供完整的、通用的 warpReduce 优化以供引用,但没有,我想知道为什么?

Tl;dr: 我可以这样写吗

Template <unsigned int blockSize>
__device__ void warpReduce(volatile int* sdata, int tid) {
#pragma unroll(6)
for (int i = warpSize * 2; i>1; i>>=1){
if (blockSize >= i) sdata[tid] += sdata[tid + (i/2)];
}
}

并享受与他们文档中相同的优化?

最佳答案

这是一个富有洞察力的观察(我认为)。根据我的测试代码:

#include <stdio.h>

template <unsigned int blockSize>
__device__ void warpReduce1(volatile int* sdata, int tid) {
if (blockSize >= 64) sdata[tid] += sdata[tid + 32];
if (blockSize >= 32) sdata[tid] += sdata[tid + 16];
if (blockSize >= 16) sdata[tid] += sdata[tid + 8];
if (blockSize >= 8) sdata[tid] += sdata[tid + 4];
if (blockSize >= 4) sdata[tid] += sdata[tid + 2];
if (blockSize >= 2) sdata[tid] += sdata[tid + 1];
}

template <unsigned int blockSize>
__device__ void warpReduce2(volatile int* sdata, int tid) {
#pragma unroll 6
for (int i = 64; i>1; i>>=1){
if (blockSize >= i) sdata[tid] += sdata[tid + (i/2)];
}
}

template <unsigned int blockSize>
__global__ void reduce6(int *g_idata, int *g_odata, unsigned int n) {
extern __shared__ int sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + tid;
unsigned int gridSize = blockSize*2*gridDim.x;
sdata[tid] = 0;
while (i < n){sdata[tid] += g_idata[i] + g_idata[i+blockSize]; i += gridSize; }
__syncthreads();
if (blockSize >= 512) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); }
if (tid < 32) warpReduce1<blockSize>(sdata, tid);
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}

#define DSIZE 1048576
#define BSIZE 256
#define NBLKS 64

int main(){
int *h_data, *d_idata, *d_odata;
h_data=(int *)malloc(DSIZE * sizeof(int));
cudaMalloc(&d_idata, DSIZE * sizeof(int));
cudaMalloc(&d_odata, NBLKS * sizeof(int));
for (int i = 0; i < DSIZE; i++) h_data[i] = rand()%2;
cudaMemcpy(d_idata, h_data, DSIZE*sizeof(int), cudaMemcpyHostToDevice);
reduce6<BSIZE><<<64, BSIZE>>>(d_idata, d_odata, DSIZE);
cudaMemcpy(h_data, d_odata, NBLKS*sizeof(int), cudaMemcpyDeviceToHost);
return 0;
}

看起来生成的机器代码是相同的:

Fatbin elf code:
================
arch = sm_20
code version = [1,7]
producer = <unknown>
host = linux
compile_size = 64bit
identifier = t576.cu

code for sm_20

Fatbin elf code:
================
arch = sm_20
code version = [1,7]
producer = cuda
host = linux
compile_size = 64bit
identifier = t576.cu

code for sm_20
Function : _Z7reduce6ILj256EEvPiS0_j
.headerflags @"EF_CUDA_SM20 EF_CUDA_PTX_SM(EF_CUDA_SM20)"
/*0000*/ MOV R1, c[0x1][0x100]; /* 0x2800440400005de4 */
/*0008*/ S2R R0, SR_CTAID.X; /* 0x2c00000094001c04 */
/*0010*/ S2R R2, SR_TID.X; /* 0x2c00000084009c04 */
/*0018*/ MOV R4, c[0x0][0x14]; /* 0x2800400050011de4 */
/*0020*/ MOV R10, RZ; /* 0x28000000fc029de4 */
/*0028*/ ISCADD R8, R0, R2, 0x9; /* 0x4000000008021d23 */
/*0030*/ SHL.W R3, R2, 0x2; /* 0x6000c0000820de03 */
/*0038*/ SHL R9, R4, 0x9; /* 0x6000c00024425c03 */
/*0040*/ ISETP.GE.U32.AND P0, PT, R8, c[0x0][0x30], PT; /* 0x1b0e4000c081dc03 */
/*0048*/ STS [R3], RZ; /* 0xc9000000003fdc85 */
/*0050*/ SSY 0xf0; /* 0x6000000260000007 */
/*0058*/ @P0 NOP.S; /* 0x40000000000001f4 */
/*0060*/ MOV32I R11, 0x4; /* 0x180000001002dde2 */
/*0068*/ IMAD.U32.U32 RZ, R1, RZ, RZ; /* 0x207e0000fc1fdc03 */
/*0070*/ SSY 0xe8; /* 0x60000001c0000007 */
/*0078*/ NOP; /* 0x4000000000001de4 */
/*0080*/ IMAD.U32.U32 R6.CC, R8, R11, c[0x0][0x20]; /* 0x2017800080819c03 */
/*0088*/ IADD R5, R8, 0x100; /* 0x4800c00400815c03 */
/*0090*/ IMAD.U32.U32.HI.X R7, R8, R11, c[0x0][0x24]; /* 0x209680009081dc43 */
/*0098*/ IMAD.U32.U32 R4.CC, R5, R11, c[0x0][0x20]; /* 0x2017800080511c03 */
/*00a0*/ IADD R8, R8, R9; /* 0x4800000024821c03 */
/*00a8*/ LD.E R7, [R6]; /* 0x840000000061dc85 */
/*00b0*/ IMAD.U32.U32.HI.X R5, R5, R11, c[0x0][0x24]; /* 0x2096800090515c43 */
/*00b8*/ ISETP.LT.U32.AND P0, PT, R8, c[0x0][0x30], PT; /* 0x188e4000c081dc03 */
/*00c0*/ LD.E R4, [R4]; /* 0x8400000000411c85 */
/*00c8*/ IADD R5, R7, R10; /* 0x4800000028715c03 */
/*00d0*/ IADD R10, R5, R4; /* 0x4800000010529c03 */
/*00d8*/ @P0 BRA 0x80; /* 0x4003fffe800001e7 */
/*00e0*/ NOP.S; /* 0x4000000000001df4 */
/*00e8*/ STS.S [R3], R10; /* 0xc900000000329c95 */
/*00f0*/ IMAD.U32.U32 RZ, R1, RZ, RZ; /* 0x207e0000fc1fdc03 */
/*00f8*/ BAR.RED.POPC RZ, RZ, RZ, PT; /* 0x50ee0000ffffdc04 */
/*0100*/ ISETP.GT.U32.AND P0, PT, R2, 0x7f, PT; /* 0x1a0ec001fc21dc03 */
/*0108*/ @!P0 LDS R5, [R3]; /* 0xc100000000316085 */
/*0110*/ @!P0 LDS R4, [R3+0x200]; /* 0xc100000800312085 */
/*0118*/ @!P0 IADD R4, R5, R4; /* 0x4800000010512003 */
/*0120*/ @!P0 STS [R3], R4; /* 0xc900000000312085 */
/*0128*/ BAR.RED.POPC RZ, RZ, RZ, PT; /* 0x50ee0000ffffdc04 */
/*0130*/ ISETP.GT.U32.AND P0, PT, R2, 0x3f, PT; /* 0x1a0ec000fc21dc03 */
/*0138*/ @!P0 LDS R5, [R3]; /* 0xc100000000316085 */
/*0140*/ @!P0 LDS R4, [R3+0x100]; /* 0xc100000400312085 */
/*0148*/ @!P0 IADD R4, R5, R4; /* 0x4800000010512003 */
/*0150*/ @!P0 STS [R3], R4; /* 0xc900000000312085 */
/*0158*/ SSY 0x240; /* 0x6000000380000007 */
/*0160*/ BAR.RED.POPC RZ, RZ, RZ, PT; /* 0x50ee0000ffffdc04 */
/*0168*/ ISETP.GT.U32.AND P0, PT, R2, 0x1f, PT; /* 0x1a0ec0007c21dc03 */
/*0170*/ @P0 NOP.S; /* 0x40000000000001f4 */
/*0178*/ SHL.W R3, R2, 0x2; /* 0x6000c0000820de03 */
/*0180*/ LDS R5, [R3]; /* 0xc100000000315c85 */
/*0188*/ LDS R4, [R3+0x80]; /* 0xc100000200311c85 */
/*0190*/ IADD R6, R5, R4; /* 0x4800000010519c03 */
/*0198*/ STS [R3], R6; /* 0xc900000000319c85 */
/*01a0*/ LDS R5, [R3]; /* 0xc100000000315c85 */
/*01a8*/ LDS R4, [R3+0x40]; /* 0xc100000100311c85 */
/*01b0*/ IADD R7, R5, R4; /* 0x480000001051dc03 */
/*01b8*/ STS [R3], R7; /* 0xc90000000031dc85 */
/*01c0*/ LDS R5, [R3]; /* 0xc100000000315c85 */
/*01c8*/ LDS R4, [R3+0x20]; /* 0xc100000080311c85 */
/*01d0*/ IADD R6, R5, R4; /* 0x4800000010519c03 */
/*01d8*/ STS [R3], R6; /* 0xc900000000319c85 */
/*01e0*/ LDS R5, [R3]; /* 0xc100000000315c85 */
/*01e8*/ LDS R4, [R3+0x10]; /* 0xc100000040311c85 */
/*01f0*/ IADD R7, R5, R4; /* 0x480000001051dc03 */
/*01f8*/ STS [R3], R7; /* 0xc90000000031dc85 */
/*0200*/ LDS R5, [R3]; /* 0xc100000000315c85 */
/*0208*/ LDS R4, [R3+0x8]; /* 0xc100000020311c85 */
/*0210*/ IADD R6, R5, R4; /* 0x4800000010519c03 */
/*0218*/ STS [R3], R6; /* 0xc900000000319c85 */
/*0220*/ LDS R5, [R3]; /* 0xc100000000315c85 */
/*0228*/ LDS R4, [R3+0x4]; /* 0xc100000010311c85 */
/*0230*/ IADD R4, R5, R4; /* 0x4800000010511c03 */
/*0238*/ STS.S [R3], R4; /* 0xc900000000311c95 */
/*0240*/ ISETP.NE.AND P0, PT, R2, RZ, PT; /* 0x1a8e0000fc21dc23 */
/*0248*/ @P0 BRA.U 0x278; /* 0x40000000a00081e7 */
/*0250*/ @!P0 MOV32I R3, 0x4; /* 0x180000001000e1e2 */
/*0258*/ @!P0 LDS R2, [RZ]; /* 0xc100000003f0a085 */
/*0260*/ @!P0 IMAD.U32.U32 R4.CC, R0, R3, c[0x0][0x28]; /* 0x20078000a0012003 */
/*0268*/ @!P0 IMAD.U32.U32.HI.X R5, R0, R3, c[0x0][0x2c]; /* 0x20868000b0016043 */
/*0270*/ @!P0 ST.E [R4], R2; /* 0x940000000040a085 */
/*0278*/ EXIT; /* 0x8000000000001de7 */
..........................................



Fatbin ptx code:
================
arch = sm_20
code version = [4,1]
producer = cuda
host = linux
compile_size = 64bit
compressed
identifier = t576.cu

我是否使用:

  if (tid < 32) warpReduce1<blockSize>(sdata, tid);

或:

  if (tid < 32) warpReduce2<blockSize>(sdata, tid);

回想起来,编译器可以很容易地确定 for 循环的次数:

    for (int i = 64; i>1; i>>=1){

展开的循环将生成与内联代码完全相同的序列。因此编译器生成相同的代码。

关于c++ - #pragma unroll 语句是否可以用于清理编译器预评估代码?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/26062378/

31 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com