Asynchronous streams in CUDA do not yield increase in performance
我正在尝试通过使用多个流来加速以下CUDA代码。
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 | #define N (4096 * 4096) #define blockDimX 16 #define blockDimY 16 float domain1 [N]; float domain2 [N]; __global__ updateDomain1_kernel(const int dimX, const int dimY) { // update mechanism here for domain1 // ... } __global__ updateDomain2_kernel(const int dimX, const int dimY) { // update mechanism here for domain2, which is nearly the same // ... } __global__ addDomainsTogether_kernel(float* domainOut, const int dimX, const int dimY) { // add domain1 and domain2 together and fill domainOut } void updateDomains(float* domainOut) { dim3 blocks((dimX + blockDimX - 1) / blockDimX , (dimY + blockDimY- 1) / blockDimY); dim3 threads(blockDimX, blockDimY); updateDomain1_kernel<<<blocks, threads>>> (dimX, dimY); updateDomain2_kernel<<<blocks, threads>>> (dimX, dimY); addDomainsTogether_kernel<<<block, threads>>> (domainOut_gpu, dimX, dimY); cudaMemcpy(domainOut, domainOut_gpu, N * sizeof(float), cudaMemcpyDeviceToHost); } |
精确的实现并不重要;重要的是,更新各自的域是两个完全独立的操作,然后在第三次内核调用中使用这两个操作。因此,我认为尝试通过将每个更新内核放入自己想同时运行的流中来加速它是一个好主意。所以我将其更改为以下内容:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 | void updateDomains(float* domainOut) { dim3 blocks((dimX + blockDimX - 1) / blockDimX , (dimY + blockDimY- 1) / blockDimY); dim3 threads(blockDimX, blockDimY); cudaStream_t stream0, stream1; cudaStreamCreate(&stream0); cudaStreamCreate(&stream1); updateDomain1_kernel<<<blocks, threads, 0, stream0>>> (dimX, dimY); updateDomain2_kernel<<<blocks, threads, 0, stream1>>> (dimX, dimY); cudaDeviceSynchronize(); addDomainsTogether_kernel<<<block, threads>>> (domainOut_gpu, dimX, dimY); cudaMemcpy(domainOut, domainOut_gpu, N * sizeof(float), cudaMemcpyDeviceToHost); cudaStreamDestroy(stream0); cudaStreamDestroy(stream1); } |
我猜想在性能速度上有差异,但是绝对没有明显的差异。因此,我认为编译器通过同时自动调度更新调用来第一次变得很聪明,因此我认为以下操作会降低性能:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 | void updateDomains(float* domainOut) { dim3 blocks((dimX + blockDimX - 1) / blockDimX , (dimY + blockDimY- 1) / blockDimY); dim3 threads(blockDimX, blockDimY); cudaStream_t stream0; cudaStreamCreate(&stream0); updateDomain1_kernel<<<blocks, threads, 0, stream0>>> (dimX, dimY); updateDomain2_kernel<<<blocks, threads, 0, stream0>>> (dimX, dimY); addDomainsTogether_kernel<<<block, threads0, stream0>>> (domainOut_gpu, dimX, dimY); cudaMemcpy(domainOut, domainOut_gpu, N * sizeof(float), cudaMemcpyDeviceToHost); cudaStreamDestroy(stream0); } |
但是,再次,性能速度几乎没有任何区别。如果有的话,最后一个似乎最快。这让我觉得关于CUDA流有一些我不了解的地方。有人可以启发我如何加速此代码吗?
增加的并行度只会在您尚未使用所有可用内核的情况下增加您的计算吞吐量。如果您已经具有足够的并行性,那么它将增加您的同步开销。