/* * spGPU - Sparse matrices on GPU library. * * Copyright (C) 2010 - 2012 * Davide Barbieri - University of Rome Tor Vergata * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 3 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "stdio.h" #include "cudalang.h" #include "cudadebug.h" #include "cuComplex.h" extern "C" { #include "core.h" #include "vector.h" } //#define USE_CUBLAS #define BLOCK_SIZE 320 //#define BLOCK_SIZE 512 //#define ASSUME_LOCK_SYNC_PARALLELISM static __device__ float snrm2ReductionResult[128]; __global__ void spgpuCnrm2_kern(int n, cuFloatComplex* x) { __shared__ float sSum[BLOCK_SIZE]; float res = 0; cuFloatComplex* lastX = x + n; x += threadIdx.x + blockIdx.x*BLOCK_SIZE; int blockOffset = gridDim.x*BLOCK_SIZE; int numSteps = (lastX - x + blockOffset - 1)/blockOffset; // prefetching for (int j = 0; j < numSteps / 2; j++) { // Probably, there's a more precise method to do this.. cuFloatComplex x1 = x[0]; x += blockOffset; cuFloatComplex x2 = x[0]; x += blockOffset; res = res + cuCrealf(cuCmulf(x1,cuConjf(x1))); res = res + cuCrealf(cuCmulf(x2,cuConjf(x2))); } if (numSteps % 2) { cuFloatComplex x1 = x[0]; res = res + cuCrealf(cuCmulf(x1,cuConjf(x1))); } if (threadIdx.x >= 32) sSum[threadIdx.x] = res; __syncthreads(); // Start reduction! if (threadIdx.x < 32) { for (int i=1; imultiProcessorCount, (n+BLOCK_SIZE-1)/BLOCK_SIZE)); #endif float tRes[128]; spgpuCnrm2_kern<<currentStream>>>(n, x); cudaMemcpyFromSymbol(tRes, snrm2ReductionResult,blocks*sizeof(float)); for (int i=0; i