/* * spGPU - Sparse matrices on GPU library. * * Copyright (C) 2010 - 2012 * Davide Barbieri - University of Rome Tor Vergata * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 3 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "stdio.h" #include "cudalang.h" #include "cudadebug.h" extern "C" { #include "core.h" #include "vector.h" } //#define USE_CUBLAS #define BLOCK_SIZE 320 //#define BLOCK_SIZE 512 //#define ASSUME_LOCK_SYNC_PARALLELISM static __device__ float snrm2ReductionResult[128]; __global__ void spgpuSnrm2_kern(int n, float* x) { __shared__ float sSum[BLOCK_SIZE]; float res = 0; float* lastX = x + n; x += threadIdx.x + blockIdx.x*BLOCK_SIZE; int blockOffset = gridDim.x*BLOCK_SIZE; int numSteps = (lastX - x + blockOffset - 1)/blockOffset; // prefetching for (int j = 0; j < numSteps / 2; j++) { float x1 = x[0]; x += blockOffset; float x2 = x[0]; x += blockOffset; res = PREC_FADD(res, PREC_FMUL(x1,x1)); res = PREC_FADD(res, PREC_FMUL(x2,x2)); } if (numSteps % 2) { float x1 = x[0]; res = PREC_FADD(res, PREC_FMUL(x1,x1)); } if (threadIdx.x >= 32) sSum[threadIdx.x] = res; __syncthreads(); // Start reduction! if (threadIdx.x < 32) { for (int i=1; imultiProcessorCount, (n+BLOCK_SIZE-1)/BLOCK_SIZE)); #endif float tRes[128]; spgpuSnrm2_kern<<currentStream>>>(n, x); cudaMemcpyFromSymbol(tRes, snrm2ReductionResult,blocks*sizeof(float)); for (int i=0; i