Added MPI_Dist_graph_creat_adjacent. Clean up needs to occur.

scr-persistent-collective
Soren Rasmussen 6 years ago
parent 5ab1d3d64b
commit 84c52822f0

@ -64,6 +64,7 @@ subroutine psi_i_cnv_dsc(halo_in,ovrlap_in,ext_in,cdesc, info, mold)
use psi_mod, psi_protect_name => psi_i_cnv_dsc
use psb_realloc_mod
use mpi
implicit none
! ....scalars parameters....
@ -82,6 +83,20 @@ subroutine psi_i_cnv_dsc(halo_in,ovrlap_in,ext_in,cdesc, info, mold)
integer(psb_ipk_) :: debug_level, debug_unit
logical, parameter :: debug=.false.
character(len=20) :: name
! ...Artless
integer(psb_ipk_), allocatable :: iaux(:)
integer(psb_ipk_) :: proc_to_comm, pnti, nerv, nesd, i, rcv_pt, snd_pt
integer(psb_ipk_), allocatable, dimension(:) :: prcid, brvidx, rvsz, bsdidx
integer(psb_ipk_), allocatable, dimension(:) ::sdsz
integer :: comm_size, comm_rank, ierr
logical, parameter :: reorder=.FALSE., persistent_mpi=.TRUE.
integer :: graph_comm, degree
integer(psb_ipk_), allocatable, dimension(:) :: max_degree, src, dest
! to remove: for mpix_init
integer(psb_ipk_), allocatable, dimension(:) :: rcv_buf
integer :: buf_size, req
! ...end Artless
name='psi_cnv_desc'
call psb_get_erraction(err_act)
@ -103,6 +118,7 @@ subroutine psi_i_cnv_dsc(halo_in,ovrlap_in,ext_in,cdesc, info, mold)
if (debug_level>0) write(debug_unit,*) me,'Calling crea_index on halo',&
& size(halo_in)
call psi_crea_index(cdesc,halo_in, idx_out,nxch,nsnd,nrcv,info)
if (info /= psb_success_) then
call psb_errpush(psb_err_from_subroutine_,name,a_err='psi_crea_index')
goto 9999
@ -151,7 +167,7 @@ subroutine psi_i_cnv_dsc(halo_in,ovrlap_in,ext_in,cdesc, info, mold)
call psi_bld_ovr_mst(me,cdesc%ovrlap_elem,tmp_mst_idx,info)
if (info == psb_success_) call psi_crea_index(cdesc,&
& tmp_mst_idx,idx_out,nxch,nsnd,nrcv,info)
if (debug_level>0) write(debug_unit,*) me,'Done crea_indx'
if (debug_level>0 )write(debug_unit,*) me,'Done crea_indx'
if (info /= psb_success_) then
call psb_errpush(psb_err_from_subroutine_,name,a_err='psi_bld_ovr_mst')
goto 9999
@ -172,6 +188,86 @@ subroutine psi_i_cnv_dsc(halo_in,ovrlap_in,ext_in,cdesc, info, mold)
call cdesc%v_ovr_mst_idx%bld(cdesc%ovr_mst_idx,mold=mold)
! ARTLESS: start of additions
ictxt = cdesc%get_ctxt() ! get context to then get
call psb_info(ictxt,me,np) ! rank and number of processors
! print *, "---artless:psi_i_cnv_dsc, me=",me," np=",np
iaux = cdesc%v_halo_index%get_vect() ! iaux is allocatable int array
! print *, "---artless:psi_i_cnv_dsc:",me," iaux = ", iaux
! psb_get_mpicomm with integer(psb_mpk_) becaust standard says 4 byte
! copying from base/internals/psi_dswapdata_a.F90
if (persistent_mpi) then ! artless: make a proper flag
allocate(prcid(0:np-1), brvidx(0:np-1), rvsz(0:np-1), bsdidx(0:np-1))
allocate(sdsz(0:np-1))
call MPI_Comm_rank(MPI_COMM_WORLD,comm_rank, ierr)
call MPI_Comm_size(MPI_COMM_WORLD,comm_size, ierr)
allocate(max_degree(comm_size))
allocate(max_n_send(comm_size))
allocate(max_n_recv(comm_size))
degree = 0
pnti = 1
snd_pt = 1
rcv_pt = 1
do while (iaux(pnti+psb_proc_id_) .ne. -1)
degree = degree + 1
proc_to_comm = iaux(pnti+psb_proc_id_)
max_degree(degree) = proc_to_comm
nerv = iaux(pnti+psb_n_elem_recv_)
nesd = iaux(pnti+nerv+psb_n_elem_send_)
print *, comm_rank, ": nerv", nerv, "nesd", nesd
call psb_get_rank(prcid(proc_to_comm),ictxt,proc_to_comm)
brvidx(proc_to_comm) = rcv_pt
rvsz(proc_to_comm) = nerv
bsdidx(proc_to_comm) = snd_pt
sdsz(proc_to_comm) = nesd
rcv_pt = rcv_pt + nerv
snd_pt = snd_pt + nesd
pnti = pnti + nerv + nesd + 3
end do
! source and destination nodes are the same
allocate(src(degree), dest(degree))
src = max_degree(1:degree)
dest = src
! create graph comm of MPI rank's communication
call MPI_Dist_graph_create_adjacent(MPI_COMM_WORLD, degree, src, &
MPI_UNWEIGHTED, degree, dest, MPI_UNWEIGHTED, MPI_INFO_NULL, &
reorder, graph_comm, ierr)
deallocate(src, dest)
! TESTING MPIX_INIT HERE, WILL BE MOVED ELSEWHERE
! THE SEND BUF, can't I just send iaux?
! FOR RECV BUF, best way to figure out how big?
! buf_size = size(iaux)
! allocate(rcv_buf(buf_size))
! rcv_buf = 0
! buf_size = ! sum of
! buf_size = size(iaux) / 2
! call MPIX_Neighbor_alltoallv_init(iaux, buf_sizes, displs, MPI_INTEGER, &
! rcv_buf, buf_sizes, displs, MPI_INTEGER, graph_comm, MPI_INFO_NULL, &
! req, ierr)
! print *, "-----------------PRE START---------------"
! print *, comm_rank, ": sends", size(iaux), "to", src
! print *, comm_rank, ": buf_size", buf_size, "size(iaux)", size(iaux), "snd_buf", iaux
! call MPI_Start(req, ierr)
! call MPI_Wait(req, MPI_STATUS_IGNORE, ierr)
! print *, comm_rank, ": buf_size", buf_size, "rcv_buf", rcv_buf
! print *, rank, ": iaux"
print *, "====END WAIT===="
end if
! ARTLESS: end of additions
if (info /= psb_success_) then
call psb_errpush(psb_err_from_subroutine_,name,a_err='psi_crea_bnd_elem')
goto 9999
@ -494,4 +590,3 @@ subroutine psi_i_bld_ovr_mst(me,ovrlap_elem,mst_idx,info)
return
end subroutine psi_i_bld_ovr_mst

Loading…
Cancel
Save