/* * spGPU - Sparse matrices on GPU library. * * Copyright (C) 2010 - 2012 * Davide Barbieri - University of Rome Tor Vergata * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 3 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "stdio.h" #include "cudalang.h" #include "cudadebug.h" extern "C" { #include "core.h" #include "vector.h" } //#define USE_CUBLAS #ifdef USE_CUBLAS #include "cublas.h" #endif #define BLOCK_SIZE 320 //#define BLOCK_SIZE 512 //#define ASSUME_LOCK_SYNC_PARALLELISM #ifndef USE_CUBLAS static __device__ float sdotReductionResult[128]; #endif __global__ void spgpuSdot_kern(int n, float* x, float* y) { __shared__ float sSum[BLOCK_SIZE]; float res = 0; float* lastX = x + n; x += threadIdx.x + blockIdx.x*BLOCK_SIZE; y += threadIdx.x + blockIdx.x*BLOCK_SIZE; int blockOffset = gridDim.x*BLOCK_SIZE; int numSteps = (lastX - x + blockOffset - 1)/blockOffset; // prefetching for (int j = 0; j < numSteps / 2; j++) { float x1 = x[0]; x += blockOffset; float y1 = y[0]; y += blockOffset; float x2 = x[0]; x += blockOffset; float y2 = y[0]; y += blockOffset; res = PREC_FADD(res, PREC_FMUL(x1,y1)); res = PREC_FADD(res, PREC_FMUL(x2,y2)); } if (numSteps % 2) { res = PREC_FADD(res, PREC_FMUL(*x,*y)); } if (threadIdx.x >= 32) sSum[threadIdx.x] = res; __syncthreads(); // Start reduction! if (threadIdx.x < 32) { for (int i=1; imultiProcessorCount, (n+BLOCK_SIZE-1)/BLOCK_SIZE)); #endif float tRes[128]; spgpuSdot_kern<<currentStream>>>(n, a, b); cudaMemcpyFromSymbol(tRes, sdotReductionResult, blocks*sizeof(float)); for (int i=0; i