我有以下问题。我已经实现了几种不同的并行约简算法,如果我只减少每个内核一个值,那么所有这些算法都能正常工作。但是现在我需要减少几个(21),我只是不知道为什么它有时工作,有时不工作。
执行的步骤如下:
这里是完整的代码,您可以只需cpy&pst并运行。
#include <stdio.h>
#include <cuda_runtime.h>
// switch the compiler flag if you don't have the sdk's helper_cuda.h file
#if 1
#include "helper_cuda.h"
#else
#define checkCudaErrors(val) (val)
#define getLastCudaError(msg)
#endif
#ifdef __CDT_PARSER__
#define __global__
#define __device__
#define __shared__
#define __host__
#endif
// compute sum of val over num threads
__device__ float localSum(const float& val, volatile float* reductionSpace, const uint& localId)
{
reductionSpace[localId] = val; // load data into shared mem
__syncthreads();
// complete loop unroll
if (localId < 128) reductionSpace[localId] += reductionSpace[localId + 128];
__syncthreads();
if (localId < 64) reductionSpace[localId] += reductionSpace[localId + 64];
__syncthreads();
// within one warp (=32 threads) instructions are SIMD synchronous
// -> __syncthreads() not needed
if (localId < 32)
{
reductionSpace[localId] += reductionSpace[localId + 32];
reductionSpace[localId] += reductionSpace[localId + 16];
reductionSpace[localId] += reductionSpace[localId + 8];
reductionSpace[localId] += reductionSpace[localId + 4];
reductionSpace[localId] += reductionSpace[localId + 2];
reductionSpace[localId] += reductionSpace[localId + 1];
}
## Edit: Here we need to sync in order to guarantee that the thread with ID 0 is also done... ##
__syncthreads();
return reductionSpace[0];
}
__global__ void d_kernel(float* od, int n)
{
extern __shared__ float reductionSpace[];
int g_idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int linId = threadIdx.x;
__shared__ float partialSums[21];
float tmp[6] =
{ 0, 0, 0, 0, 0, 0 };
// for simplification all computations are remove - this version still shows the same behaviour
if (g_idx < n)
{
tmp[0] = 1.0f;
tmp[1] = 1.0f;
tmp[2] = 1.0f;
tmp[3] = 1.0f;
tmp[4] = 1.0f;
tmp[5] = 1.0f;
}
float res = 0.0f;
int c = 0;
for (int i = 0; i < 6; ++i)
{
for (int j = i; j < 6; ++j, ++c)
{
res = tmp[i] * tmp[j];
// compute the sum of the values res for blockDim.x threads. This uses
// the shared memory reductionSpace for calculations
partialSums[c] = localSum(res, reductionSpace, linId);
}
}
__syncthreads();
// write back the sum values for this block
if (linId < 21)
{
atomicAdd(&od[linId], partialSums[linId]);
}
}
int main()
{
int w = 320;
int h = 240;
int n = w * h;
// ------------------------------------------------------------------------------------
float *d_out;
checkCudaErrors(cudaMalloc(&d_out, 21 * sizeof(float)));
float* h_out = new float[21];
int dimBlock = 256;
int dimGrid = (n - 1) / dimBlock + 1;
int sharedMemSize = dimBlock * sizeof(float);
printf("w: %d\n", w);
printf("h: %d\n", h);
printf("dimBlock: %d\n", dimBlock);
printf("dimGrid: %d\n", dimGrid);
printf("sharedMemSize: %d\n", sharedMemSize);
int failcounter = 0;
float target = (float) n;
int c = 0;
// ------------------------------------------------------------------------------------
// run the kernel for 200 times
for (int run = 0; run < 200; ++run)
{
cudaMemset(d_out, 0, 21 * sizeof(float));
d_kernel<<<dimGrid, dimBlock, sharedMemSize>>>(d_out, n);;
getLastCudaError("d_kernel");
checkCudaErrors(cudaMemcpy(h_out, d_out, 21 * sizeof(float), cudaMemcpyDeviceToHost));
// check if the output has target value
// since all threads get value 1 the kernel output corresponds to counting the elements which is w*h=n
bool failed = false;
for (int i = 0; i < 21; ++i)
{
if (abs(h_out[i] - target) > 0.01f)
{
++failcounter;
failed = true;
}
}
// if failed, print the elements to show which one failed
if (failed)
{
c = 0;
for (int i = 0; i < 6; ++i)
{
for (int j = i; j < 6; ++j, ++c)
{
printf("%10.7f ", h_out[c]);
}
printf("\n");
}
}
}
printf("failcounter: %d\n", failcounter);
// ------------------------------------------------------------------------------------
delete[] h_out;
checkCudaErrors(cudaFree(d_out));
// ------------------------------------------------------------------------------------
return 0;
}
一些评论:
BlockSize总是256,因此localSum()中的展开循环检查正确的threadIds。正如前面提到的,在200次运行中,有时是完全正确的,有时只有2个值是错误的,有时大约有150个值是错误的。
而且它不需要有浮点精度,因为只有1x1被乘以并存储在d_kernel()中的变量res中。我可以清楚地看到,有时只是一些线程或块没有开始,但我不知道为什么。:/
从结果来看,很明显存在某种种族状况,但我根本看不出问题所在。
有人知道问题出在哪里吗?
编辑:
我现在测试了很多东西,我发现它必须用BlockSize来做一些事情。如果我将其简化为smth <=64并相应地更改localSum(),那么一切都按照预期的方式工作。
但这对我来说毫无意义?!在这里,我仍然只做一个普通的共享内存的并行还原,唯一的区别就是我每个线程做了21次。
编辑2:
现在我完全糊涂了。问题正在展开循环!!或更好的说法是同步了经纱。以下localSum()代码工作:
// compute sum of val over num threads
__device__ float localSum(const float& val, volatile float* reductionSpace, const uint& localId)
{
reductionSpace[localId] = val; // load data into shared mem
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (localId < s)
{
reductionSpace[localId] += reductionSpace[localId + s];
}
__syncthreads();
}
return reductionSpace[0];
}
但是,如果我展开最后一个翘曲,并且在线程之间不同步,有时我会再次得到类似于2000次运行中的2或3个错误结果。因此,下面的代码会使不运行:
// compute sum of val over num threads
__device__ float localSum(const float& val, volatile float* reductionSpace, const uint& localId)
{
reductionSpace[localId] = val; // load data into shared mem
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 32; s >>= 1)
{
if (localId < s)
{
reductionSpace[localId] += reductionSpace[localId + s];
}
__syncthreads();
}
if (localId < 32)
{
reductionSpace[localId] += reductionSpace[localId + 32];
reductionSpace[localId] += reductionSpace[localId + 16];
reductionSpace[localId] += reductionSpace[localId + 8];
reductionSpace[localId] += reductionSpace[localId + 4];
reductionSpace[localId] += reductionSpace[localId + 2];
reductionSpace[localId] += reductionSpace[localId + 1];
}
return reductionSpace[0];
}
但是,既然CUDA同时执行一个翘曲(32个线程)而不需要__syncthreads(),这又有什么意义呢?!
我不需要有人把我的工作代码在这里,但我真的要求一个有丰富的经验和丰富的知识,在CUDA编程描述我这里的根本问题。或者至少给我个提示。
发布于 2015-05-01 02:44:28
解决办法太简单了,我几乎羞于告诉它。我眼花缭乱,四处张望,却没有看到最明显的代码。在localSum()中的返回语句之前缺少一个简单的localSum()。Bc最后一次弯曲本身是同时执行的,但不能保证使用threadID 0的那个已经完成.这是个愚蠢的错误,我只是没看到。
抱歉给你添麻烦了..。:)
https://stackoverflow.com/questions/29906486
复制