Switch off OpenMP in certain sections of MatchBOXP

PolySmooth
sfilippone 7 months ago
parent 89e2d53e8b
commit 2f5072166d

@ -67,7 +67,7 @@ void dMatchBoxPC(MilanLongInt NLVer, MilanLongInt NLEdge,
#endif #endif
#define TIME_TRACKER #undef TIME_TRACKER
#ifdef TIME_TRACKER #ifdef TIME_TRACKER
double tmr = MPI_Wtime(); double tmr = MPI_Wtime();
#endif #endif

@ -33,13 +33,13 @@ void PARALLEL_PROCESS_EXPOSED_VERTEX_BD(MilanLongInt NLVer,
MilanLongInt v = -1, k = -1, w = -1, adj11 = 0, adj12 = 0, k1 = 0; MilanLongInt v = -1, k = -1, w = -1, adj11 = 0, adj12 = 0, k1 = 0;
MilanInt ghostOwner = 0, option, igw; MilanInt ghostOwner = 0, option, igw;
#pragma omp parallel private(option, k, w, v, k1, adj11, adj12, ghostOwner) \ //#pragma omp parallel private(option, k, w, v, k1, adj11, adj12, ghostOwner) \
firstprivate(privateU, StartIndex, EndIndex, privateQLocalVtx, \ firstprivate(privateU, StartIndex, EndIndex, privateQLocalVtx, \
privateQGhostVtx, privateQMsgType, privateQOwner) \ privateQGhostVtx, privateQMsgType, privateQOwner) \
default(shared) num_threads(NUM_THREAD) default(shared) num_threads(NUM_THREAD)
{ {
#pragma omp for reduction(+ \ //#pragma omp for reduction(+ \
: PCounter[:numProcs], myCard \ : PCounter[:numProcs], myCard \
[:1], msgInd \ [:1], msgInd \
[:1], NumMessagesBundled \ [:1], NumMessagesBundled \
@ -217,13 +217,13 @@ void PARALLEL_PROCESS_EXPOSED_VERTEX_BS(MilanLongInt NLVer,
MilanLongInt v = -1, k = -1, w = -1, adj11 = 0, adj12 = 0, k1 = 0; MilanLongInt v = -1, k = -1, w = -1, adj11 = 0, adj12 = 0, k1 = 0;
MilanInt ghostOwner = 0, option, igw; MilanInt ghostOwner = 0, option, igw;
#pragma omp parallel private(option, k, w, v, k1, adj11, adj12, ghostOwner) \ //#pragma omp parallel private(option, k, w, v, k1, adj11, adj12, ghostOwner) \
firstprivate(privateU, StartIndex, EndIndex, privateQLocalVtx, \ firstprivate(privateU, StartIndex, EndIndex, privateQLocalVtx, \
privateQGhostVtx, privateQMsgType, privateQOwner) \ privateQGhostVtx, privateQMsgType, privateQOwner) \
default(shared) num_threads(NUM_THREAD) default(shared) num_threads(NUM_THREAD)
{ {
#pragma omp for reduction(+ \ //#pragma omp for reduction(+ \
: PCounter[:numProcs], myCard \ : PCounter[:numProcs], myCard \
[:1], msgInd \ [:1], msgInd \
[:1], NumMessagesBundled \ [:1], NumMessagesBundled \

@ -338,7 +338,7 @@ void processMatchedVerticesS(
#ifdef COUNT_LOCAL_VERTEX #ifdef COUNT_LOCAL_VERTEX
MilanLongInt localVertices = 0; MilanLongInt localVertices = 0;
#endif #endif
#pragma omp parallel private(k, w, v, k1, adj1, adj2, adj11, adj12, ghostOwner, option) \ //#pragma omp parallel private(k, w, v, k1, adj1, adj2, adj11, adj12, ghostOwner, option) \
firstprivate(privateU, StartIndex, EndIndex, privateQLocalVtx, privateQGhostVtx, \ firstprivate(privateU, StartIndex, EndIndex, privateQLocalVtx, privateQGhostVtx, \
privateQMsgType, privateQOwner, UChunkBeingProcessed) \ privateQMsgType, privateQOwner, UChunkBeingProcessed) \
default(shared) num_threads(NUM_THREAD) \ default(shared) num_threads(NUM_THREAD) \

@ -346,7 +346,7 @@ void processMatchedVerticesAndSendMessagesS(
#ifdef COUNT_LOCAL_VERTEX #ifdef COUNT_LOCAL_VERTEX
MilanLongInt localVertices = 0; MilanLongInt localVertices = 0;
#endif #endif
#pragma omp parallel private(k, w, v, k1, adj1, adj2, adj11, adj12, ghostOwner, option) \ //#pragma omp parallel private(k, w, v, k1, adj1, adj2, adj11, adj12, ghostOwner, option) \
firstprivate(Message, privateU, StartIndex, EndIndex, privateQLocalVtx, privateQGhostVtx,\ firstprivate(Message, privateU, StartIndex, EndIndex, privateQLocalVtx, privateQGhostVtx,\
privateQMsgType, privateQOwner, UChunkBeingProcessed) default(shared) \ privateQMsgType, privateQOwner, UChunkBeingProcessed) default(shared) \
num_threads(NUM_THREAD) \ num_threads(NUM_THREAD) \

Loading…
Cancel
Save