Skip to content
Snippets Groups Projects
Commit ea846d5c authored by Antoine Cyril David Hoffmann's avatar Antoine Cyril David Hoffmann :seedling:
Browse files

HeLaZ includes now magnetic shear. Linearly benchmarked with GENE

parent a9696f93
Branches shear_version
No related tags found
No related merge requests found
......@@ -37,7 +37,7 @@ implicit none
! Some geometrical coefficients
REAL(dp), PUBLIC, DIMENSION(:,:) , ALLOCATABLE :: gradz_coeff ! 1 / [ J_{xyz} \hat{B} ]
! Array to map the index of mode (kx,ky,-pi) to (kx+2pi*s*ky,ky,pi) for sheared periodic boundary condition
INTEGER, PUBLIC, DIMENSION(:,:), ALLOCATABLE :: ikx_zBC_map
INTEGER, PUBLIC, DIMENSION(:,:), ALLOCATABLE :: ikx_zBC_L, ikx_zBC_R
! Functions
PUBLIC :: geometry_readinputs, geometry_outputinputs,&
eval_magnetic_geometry, set_ikx_zBC_map
......@@ -271,44 +271,71 @@ CONTAINS
SUBROUTINE set_ikx_zBC_map
IMPLICIT NONE
INTEGER, DIMENSION(:), ALLOCATABLE :: ikx_array
INTEGER :: shift
ALLOCATE(ikx_array(ikxs:ikxe))
DO ikx = ikxs,ikxe
ikx_array(ikx) = MODULO(ikx - Nx/2,Nx) + 1
REAL :: shift, kx_shift, kxmax_, kxmin_
ALLOCATE(ikx_zBC_R(ikys:ikye,ikxs:ikxe))
ALLOCATE(ikx_zBC_L(ikys:ikye,ikxs:ikxe))
!! No shear case (simple id mapping)
!3 | 1 2 3 4 5 6 | ky = 3 dky
!2 ky | 1 2 3 4 5 6 | ky = 2 dky
!1 A | 1 2 3 4 5 6 | ky = 1 dky
!0 | -> kx | 1____2____3____4____5____6 | ky = 0 dky
!(e.g.) kx = 0 0.1 0.2 0.3 -0.2 -0.1 (dkx=free)
DO iky = ikys,ikye
DO ikx = ikxs,ikxe
ikx_zBC_L(iky,ikx) = ikx
ikx_zBC_R(iky,ikx) = ikx
ENDDO
ENDDO
! Modify connection map only at border of z
IF(SHEARED) THEN
!! We allocate a mapping to tell where the current mode will point for the
! parallel periodic sheared BC for the kx index:
! map for 0 Dirichlet BC
!3 | 1 2 3 4 5 -1 -1 -1|
!2 ky | 8 1 2 3 4 5 -1 -1|
!1 A | 7 8 1 2 3 4 5 -1|
!0 | -> kx | 6____7____8____1____2____3____4____5|
ALLOCATE(ikx_zBC_map(ikys:ikye,ikxs:ikxe))
ikx_zBC_map(ikys:ikye,ikxs:ikxe) = -1
DO iky = ikys,ikye
DO ikx = ikxs,ikxe - iky + 1
shift = ikx_array(MODULO(ikx+iky-1,Nx+1))
ikx_zBC_map(iky,ikx) = shift
ENDDO
ENDDO
! connection map BC of the RIGHT boundary (z=pi*Npol-dz)
!3 | 4 x x x 2 3 | ky = 3 dky
!2 ky | 3 4 x x 1 2 | ky = 2 dky
!1 A | 2 3 4 x 6 1 | ky = 1 dky
!0 | -> kx | 1____2____3____4____5____6 | ky = 0 dky
!kx = 0 0.1 0.2 0.3 -0.2 -0.1 (dkx=2pi*shear*npol*dky)
kxmax_ = kx_max
IF(contains_zmax) THEN ! Check if the process is at the end of the FT
DO iky = ikys,ikye
shift = 2._dp*shear*PI*kyarray(iky)*Npol
DO ikx = ikxs,ikxe
kx_shift = kxarray(ikx) + shift
IF(kx_shift .GT. kxmax_) THEN ! outside of the frequ domain
ikx_zBC_R(iky,ikx) = -99
ELSE
ikx_zBC_R(iky,ikx) = (ikx+iky)-1
IF(SINGLE_KY) ikx_zBC_R(iky,ikx) = (ikx+(iky+1))-1
IF( ikx_zBC_R(iky,ikx) .GT. Nkx ) &
ikx_zBC_R(iky,ikx) = ikx_zBC_R(iky,ikx) - Nkx
ENDIF
ENDDO
ENDDO
ENDIF
! connection map BC of the LEFT boundary (z=-pi*Npol)
!3 | x 5 6 1 x x | ky = 3 dky
!2 ky | 5 6 1 2 x x | ky = 2 dky
!1 A | 6 1 2 3 x 5 | ky = 1 dky
!0 | -> kx | 1____2____3____4____5____6 | ky = 0 dky
!(e.g.) kx = 0 0.1 0.2 0.3 -0.2 -0.1 (dkx=2pi*shear*npol*dky)
kxmin_ = -kx_max+deltakx
IF(contains_zmin) THEN ! Check if the process is at the start of the FT
DO iky = ikys,ikye
shift = 2._dp*shear*PI*kyarray(iky)*Npol
DO ikx = ikxs,ikxe
kx_shift = kxarray(ikx) - shift
IF( kx_shift .LT. kxmin_ ) THEN ! outside of the frequ domain
ikx_zBC_L(iky,ikx) = -99
ELSE
ikx_zBC_L(iky,ikx) = (ikx-iky)+1
IF(SINGLE_KY) ikx_zBC_L(iky,ikx) = (ikx-(iky+1))+1
IF( ikx_zBC_L(iky,ikx) .LE. 0 ) &
ikx_zBC_L(iky,ikx) = ikx_zBC_L(iky,ikx) + Nkx
ENDIF
ENDDO
ENDDO
ENDIF
ELSE
!! No shear case (simple id mapping)
!3 | 6 7 8 1 2 3 4 5|
!2 ky | 6 7 8 1 2 3 4 5|
!1 A | 6 7 8 1 2 3 4 5|
!0 | -> kx | 6____7____8____1____2____3____4____5|
DO iky = ikys,ikye
ikx_zBC_map(iky,:) = ikx_array(:)
ENDDO
ENDIF
! IF (my_id .EQ. 0) THEN
! write(*,*) 'ikx map for parallel BC'
! DO, iky = ikys,ikye
! write(*,*) ( ikx_zBC_map(iky,ikx), ikx=ikxs,ikxe )
! enddo
! ENDIF
END SUBROUTINE set_ikx_zBC_map
!
......
......@@ -4,8 +4,7 @@ USE fields, ONLY : moments_e, moments_i, phi
USE grid
USE time_integration
USE model, ONLY : KIN_E
USE geometry, ONLY : SHEARED, ikx_zBC_map
USE geometry, ONLY : SHEARED, ikx_zBC_L, ikx_zBC_R
IMPLICIT NONE
INTEGER :: status(MPI_STATUS_SIZE), source, dest, count, ipg
......@@ -121,192 +120,179 @@ END SUBROUTINE update_ghosts_p_i
! ^ ^
!Periodic: 0 1
SUBROUTINE update_ghosts_z_e
USE parallel, ONLY : buff_pjxy_zBC_e
IMPLICIT NONE
INTEGER :: ikxBC
CALL MPI_BARRIER(MPI_COMM_WORLD,ierr)
IF (num_procs_z .GT. 1) THEN
count = (ipge_e-ipgs_e+1)*(ijge_e-ijgs_e+1)*(ikye-ikys+1)*(ikxe-ikxs+1)
!!!!!!!!!!! Send ghost to up neighbour !!!!!!!!!!!!!!!!!!!!!!
! Send the last local moment to fill the -1 neighbour ghost
CALL mpi_sendrecv(moments_e(:,:,:,:,ize ,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_U, 20, & ! Send to Up the last
moments_e(:,:,:,:,izs-1,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_D, 20, & ! Recieve from Down the first-1
comm0, status, ierr)
CALL mpi_sendrecv(moments_e(:,:,:,:,ize-1,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_U, 21, & ! Send to Up the last-1
moments_e(:,:,:,:,izs-2,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_D, 21, & ! Recieve from Down the first-2
comm0, status, ierr)
INTEGER :: ikxBC_L, ikxBC_R
IF(Nz .GT. 1) THEN
IF (num_procs_z .GT. 1) THEN
CALL MPI_BARRIER(MPI_COMM_WORLD,ierr)
count = (ipge_e-ipgs_e+1)*(ijge_e-ijgs_e+1)*(ikye-ikys+1)*(ikxe-ikxs+1)
!!!!!!!!!!! Send ghost to up neighbour !!!!!!!!!!!!!!!!!!!!!!
CALL mpi_sendrecv(moments_e(:,:,:,:,izs ,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_D, 22, & ! Send to Down the first
moments_e(:,:,:,:,ize+1,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_U, 22, & ! Recieve from Up the last+1
comm0, status, ierr)
CALL mpi_sendrecv(moments_e(:,:,:,:,izs+1,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_D, 23, & ! Send to Down the first+1
moments_e(:,:,:,:,ize+2,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_U, 23, & ! Recieve from Up the last+2
comm0, status, ierr)
ELSE ! still need to perform periodic boundary conditions
IF(SHEARED) THEN
DO iky = ikys,ikye
DO ikx = ikxs,ikxe
ikxBC = ikx_zBC_map(ikx,iky);
IF (ikxBC .NE. -1) THEN ! Exchanging the modes that have a periodic pair (a)
! first-1 gets last
moments_e(iky,ikx,:,:,izs-1,updatetlevel) = moments_e(iky,ikxBC,:,:,ize ,updatetlevel)
! first-2 gets last-1
moments_e(iky,ikx,:,:,izs-2,updatetlevel) = moments_e(iky,ikxBC,:,:,ize-1,updatetlevel)
! last+1 gets first
moments_e(iky,ikx,:,:,ize+1,updatetlevel) = moments_e(iky,ikxBC,:,:,izs ,updatetlevel)
! last+2 gets first+1
moments_e(iky,ikx,:,:,ize+2,updatetlevel) = moments_e(iky,ikxBC,:,:,izs+1,updatetlevel)
ELSE
moments_e(iky,ikx,:,:,izs-1,updatetlevel) = 0._dp
moments_e(iky,ikx,:,:,izs-2,updatetlevel) = 0._dp
moments_e(iky,ikx,:,:,ize+1,updatetlevel) = 0._dp
moments_e(iky,ikx,:,:,ize+2,updatetlevel) = 0._dp
ENDIF
ENDDO
ENDDO
ELSE ! No shear so simple periodic BC
! first-1 gets last
moments_e(:,:,:,:,izs-1,updatetlevel) = moments_e(:,:,:,:,ize ,updatetlevel)
! first-2 gets last-1
moments_e(:,:,:,:,izs-2,updatetlevel) = moments_e(:,:,:,:,ize-1,updatetlevel)
! last+1 gets first
moments_e(:,:,:,:,ize+1,updatetlevel) = moments_e(:,:,:,:,izs ,updatetlevel)
! last+2 gets first+1
moments_e(:,:,:,:,ize+2,updatetlevel) = moments_e(:,:,:,:,izs+1,updatetlevel)
ENDIF
!!!!!!!!!!! Send ghost to up neighbour !!!!!!!!!!!!!!!!!!!!!!
! Send the last local moment to fill the -1 neighbour ghost
CALL mpi_sendrecv(moments_e(:,:,:,:,ize ,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_U, 20, & ! Send to Up the last
buff_pjxy_zBC_e(:,:,:,:,-1), count, MPI_DOUBLE_COMPLEX, nbr_D, 20, & ! Recieve from Down the first-1
comm0, status, ierr)
CALL mpi_sendrecv(moments_e(:,:,:,:,ize-1,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_U, 21, & ! Send to Up the last
buff_pjxy_zBC_e(:,:,:,:,-2), count, MPI_DOUBLE_COMPLEX, nbr_D, 21, & ! Recieve from Down the first-1
comm0, status, ierr)
!!!!!!!!!!! Send ghost to down neighbour !!!!!!!!!!!!!!!!!!!!!!
CALL mpi_sendrecv(moments_e(:,:,:,:,izs ,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_D, 22, & ! Send to Up the last
buff_pjxy_zBC_e(:,:,:,:,+1), count, MPI_DOUBLE_COMPLEX, nbr_U, 22, & ! Recieve from Down the first-1
comm0, status, ierr)
CALL mpi_sendrecv(moments_e(:,:,:,:,izs+1,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_D, 23, & ! Send to Up the last
buff_pjxy_zBC_e(:,:,:,:,+2), count, MPI_DOUBLE_COMPLEX, nbr_U, 23, & ! Recieve from Down the first-1
comm0, status, ierr)
ELSE !No parallel (copy)
buff_pjxy_zBC_e(:,:,:,:,-1) = moments_e(:,:,:,:,ize ,updatetlevel)
buff_pjxy_zBC_e(:,:,:,:,-2) = moments_e(:,:,:,:,ize-1,updatetlevel)
buff_pjxy_zBC_e(:,:,:,:,+1) = moments_e(:,:,:,:,izs ,updatetlevel)
buff_pjxy_zBC_e(:,:,:,:,+2) = moments_e(:,:,:,:,izs+1,updatetlevel)
ENDIF
DO iky = ikys,ikye
DO ikx = ikxs,ikxe
ikxBC_L = ikx_zBC_L(iky,ikx);
IF (ikxBC_L .NE. -99) THEN ! Exchanging the modes that have a periodic pair (a)
! first-1 gets last
moments_e(:,:,iky,ikx,izs-1,updatetlevel) = buff_pjxy_zBC_e(:,:,iky,ikxBC_L,-1)
! first-2 gets last-1
moments_e(:,:,iky,ikx,izs-2,updatetlevel) = buff_pjxy_zBC_e(:,:,iky,ikxBC_L,-2)
ELSE
moments_e(:,:,iky,ikx,izs-1,updatetlevel) = 0._dp
moments_e(:,:,iky,ikx,izs-2,updatetlevel) = 0._dp
ENDIF
ikxBC_R = ikx_zBC_R(iky,ikx);
IF (ikxBC_R .NE. -99) THEN ! Exchanging the modes that have a periodic pair (a)
! last+1 gets first
moments_e(:,:,iky,ikx,ize+1,updatetlevel) = buff_pjxy_zBC_e(:,:,iky,ikxBC_R,+1)
! last+2 gets first+1
moments_e(:,:,iky,ikx,ize+2,updatetlevel) = buff_pjxy_zBC_e(:,:,iky,ikxBC_R,+2)
ELSE
moments_e(:,:,iky,ikx,ize+1,updatetlevel) = 0._dp
moments_e(:,:,iky,ikx,ize+2,updatetlevel) = 0._dp
ENDIF
ENDDO
ENDDO
ENDIF
END SUBROUTINE update_ghosts_z_e
SUBROUTINE update_ghosts_z_i
USE parallel, ONLY : buff_pjxy_zBC_i
IMPLICIT NONE
INTEGER :: ikxBC
CALL MPI_BARRIER(MPI_COMM_WORLD,ierr)
IF (num_procs_z .GT. 1) THEN
count = (ipge_i-ipgs_i+1)*(ijge_i-ijgs_i+1)*(ikye-ikys+1)*(ikxe-ikxs+1)
!!!!!!!!!!! Send ghost to up neighbour !!!!!!!!!!!!!!!!!!!!!!
! Send the last local moment to fill the -1 neighbour ghost
CALL mpi_sendrecv(moments_i(:,:,:,:,ize ,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_U, 30, & ! Send to Up the last
moments_i(:,:,:,:,izs-1,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_D, 30, & ! Recieve from Down the first-1
comm0, status, ierr)
CALL mpi_sendrecv(moments_i(:,:,:,:,ize-1,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_U, 31, & ! Send to Up the last-1
moments_i(:,:,:,:,izs-2,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_D, 31, & ! Recieve from Down the first-2
comm0, status, ierr)
!!!!!!!!!!! Send ghost to down neighbour !!!!!!!!!!!!!!!!!!!!!!
CALL mpi_sendrecv(moments_i(:,:,:,:,izs ,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_D, 32, & ! Send to Down the first
moments_i(:,:,:,:,ize+1,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_U, 32, & ! Recieve from Down the last+1
comm0, status, ierr)
CALL mpi_sendrecv(moments_i(:,:,:,:,izs+1,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_D, 33, & ! Send to Down the first+1
moments_i(:,:,:,:,ize+2,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_U, 33, & ! Recieve from Down the last+2
comm0, status, ierr)
ELSE ! still need to perform periodic boundary conditions
IF(SHEARED) THEN
DO iky = ikys,ikye
DO ikx = ikxs,ikxe
ikxBC = ikx_zBC_map(iky,ikx);
IF (ikxBC .NE. -1) THEN ! Exchanging the modes that have a periodic pair (a)
! first-1 gets last
moments_i(:,:,iky,ikx,izs-1,updatetlevel) = moments_i(:,:,iky,ikxBC,ize ,updatetlevel)
! first-2 gets last-1
moments_i(:,:,iky,ikx,izs-2,updatetlevel) = moments_i(:,:,iky,ikxBC,ize-1,updatetlevel)
! last+1 gets first
moments_i(:,:,iky,ikx,ize+1,updatetlevel) = moments_i(:,:,iky,ikxBC,izs ,updatetlevel)
! last+2 gets first+1
moments_i(:,:,iky,ikx,ize+2,updatetlevel) = moments_i(:,:,iky,ikxBC,izs+1,updatetlevel)
ELSE
moments_i(:,:,iky,ikx,izs-1,updatetlevel) = 0._dp
moments_i(:,:,iky,ikx,izs-2,updatetlevel) = 0._dp
moments_i(:,:,iky,ikx,ize+1,updatetlevel) = 0._dp
moments_i(:,:,iky,ikx,ize+2,updatetlevel) = 0._dp
ENDIF
ENDDO
ENDDO
ELSE ! No shear so simple periodic BC
! first-1 gets last
moments_i(:,:,:,:,izs-1,updatetlevel) = moments_i(:,:,:,:,ize ,updatetlevel)
! first-2 gets last-1
moments_i(:,:,:,:,izs-2,updatetlevel) = moments_i(:,:,:,:,ize-1,updatetlevel)
! last+1 gets first
moments_i(:,:,:,:,ize+1,updatetlevel) = moments_i(:,:,:,:,izs ,updatetlevel)
! last+2 gets first+1
moments_i(:,:,:,:,ize+2,updatetlevel) = moments_i(:,:,:,:,izs+1,updatetlevel)
ENDIF
ENDIF
END SUBROUTINE update_ghosts_z_i
SUBROUTINE update_ghosts_z_phi
IMPLICIT NONE
INTEGER :: ikxBC
CALL cpu_time(t1_ghost)
INTEGER :: ikxBC_L, ikxBC_R
IF(Nz .GT. 1) THEN
CALL MPI_BARRIER(MPI_COMM_WORLD,ierr)
IF (num_procs_z .GT. 1) THEN
count = (ikye-ikys+1) * (ikxe-ikxs+1)
CALL MPI_BARRIER(MPI_COMM_WORLD,ierr)
count = (ipge_i-ipgs_i+1)*(ijge_i-ijgs_i+1)*(ikye-ikys+1)*(ikxe-ikxs+1)
!!!!!!!!!!! Send ghost to up neighbour !!!!!!!!!!!!!!!!!!!!!!
! Send the last local moment to fill the -1 neighbour ghost
CALL mpi_sendrecv(phi(:,:,ize ), count, MPI_DOUBLE_COMPLEX, nbr_U, 40, & ! Send to Up the last
phi(:,:,izs-1), count, MPI_DOUBLE_COMPLEX, nbr_D, 40, & ! Receive from Down the first-1
CALL mpi_sendrecv(moments_i(:,:,:,:,ize ,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_U, 24, & ! Send to Up the last
buff_pjxy_zBC_i(:,:,:,:,-1), count, MPI_DOUBLE_COMPLEX, nbr_D, 24, & ! Recieve from Down the first-1
comm0, status, ierr)
CALL mpi_sendrecv(phi(:,:,ize-1), count, MPI_DOUBLE_COMPLEX, nbr_U, 41, & ! Send to Up the last-1
phi(:,:,izs-2), count, MPI_DOUBLE_COMPLEX, nbr_D, 41, & ! Receive from Down the first-2
CALL mpi_sendrecv(moments_i(:,:,:,:,ize-1,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_U, 25, & ! Send to Up the last
buff_pjxy_zBC_i(:,:,:,:,-2), count, MPI_DOUBLE_COMPLEX, nbr_D, 25, & ! Recieve from Down the first-1
comm0, status, ierr)
!!!!!!!!!!! Send ghost to down neighbour !!!!!!!!!!!!!!!!!!!!!!
CALL mpi_sendrecv(phi(:,:,izs ), count, MPI_DOUBLE_COMPLEX, nbr_D, 42, & ! Send to Down the first
phi(:,:,ize+1), count, MPI_DOUBLE_COMPLEX, nbr_U, 42, & ! Recieve from Up the last+1
CALL mpi_sendrecv(moments_i(:,:,:,:,izs ,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_D, 26, & ! Send to Up the last
buff_pjxy_zBC_i(:,:,:,:,+1), count, MPI_DOUBLE_COMPLEX, nbr_U, 26, & ! Recieve from Down the first-1
comm0, status, ierr)
CALL mpi_sendrecv(phi(:,:,izs+1), count, MPI_DOUBLE_COMPLEX, nbr_D, 43, & ! Send to Down the first+1
phi(:,:,ize+2), count, MPI_DOUBLE_COMPLEX, nbr_U, 43, & ! Recieve from Up the last+2
CALL mpi_sendrecv(moments_i(:,:,:,:,izs+1,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_D, 27, & ! Send to Up the last
buff_pjxy_zBC_i(:,:,:,:,+2), count, MPI_DOUBLE_COMPLEX, nbr_U, 27, & ! Recieve from Down the first-1
comm0, status, ierr)
ELSE ! still need to perform periodic boundary conditions
phi(:,:,izs-1) = phi(:,:,ize)
phi(:,:,izs-2) = phi(:,:,ize-1)
phi(:,:,ize+1) = phi(:,:,izs)
phi(:,:,ize+2) = phi(:,:,izs+1)
IF(SHEARED) THEN
DO iky = ikys,ikye
DO ikx = ikxs,ikxe
ikxBC = ikx_zBC_map(iky,ikx);
IF (ikxBC .NE. -1) THEN ! Exchanging the modes that have a periodic pair (a)
! first-1 gets last
phi(iky,ikx,izs-1) = phi(iky,ikxBC,ize )
! first-2 gets last-1
phi(iky,ikx,izs-2) = phi(iky,ikxBC,ize-1)
! last+1 gets first
phi(iky,ikx,ize+1) = phi(iky,ikxBC,izs )
! last+2 gets first+1
phi(iky,ikx,ize+2) = phi(iky,ikxBC,izs+1)
ELSE
phi(iky,ikx,izs-1) = 0._dp
phi(iky,ikx,izs-2) = 0._dp
phi(iky,ikx,ize+1) = 0._dp
phi(iky,ikx,ize+2) = 0._dp
ENDIF
ENDDO
ENDDO
ELSE ! No shear so simple periodic BC
! first-1 gets last
phi(:,:,izs-1) = phi(:,:,ize )
! first-2 gets last-1
phi(:,:,izs-2) = phi(:,:,ize-1)
! last+1 gets first
phi(:,:,ize+1) = phi(:,:,izs )
! last+2 gets first+1
phi(:,:,ize+2) = phi(:,:,izs+1)
ENDIF
ELSE !No parallel (copy)
buff_pjxy_zBC_i(:,:,:,:,-1) = moments_i(:,:,:,:,ize ,updatetlevel)
buff_pjxy_zBC_i(:,:,:,:,-2) = moments_i(:,:,:,:,ize-1,updatetlevel)
buff_pjxy_zBC_i(:,:,:,:,+1) = moments_i(:,:,:,:,izs ,updatetlevel)
buff_pjxy_zBC_i(:,:,:,:,+2) = moments_i(:,:,:,:,izs+1,updatetlevel)
ENDIF
DO iky = ikys,ikye
DO ikx = ikxs,ikxe
ikxBC_L = ikx_zBC_L(iky,ikx);
IF (ikxBC_L .NE. -99) THEN ! Exchanging the modes that have a periodic pair (a)
! first-1 gets last
moments_i(:,:,iky,ikx,izs-1,updatetlevel) = buff_pjxy_zBC_i(:,:,iky,ikxBC_L,-1)
! first-2 gets last-1
moments_i(:,:,iky,ikx,izs-2,updatetlevel) = buff_pjxy_zBC_i(:,:,iky,ikxBC_L,-2)
ELSE
moments_i(:,:,iky,ikx,izs-1,updatetlevel) = 0._dp
moments_i(:,:,iky,ikx,izs-2,updatetlevel) = 0._dp
ENDIF
ikxBC_R = ikx_zBC_R(iky,ikx);
IF (ikxBC_R .NE. -99) THEN ! Exchanging the modes that have a periodic pair (a)
! last+1 gets first
moments_i(:,:,iky,ikx,ize+1,updatetlevel) = buff_pjxy_zBC_i(:,:,iky,ikxBC_R,+1)
! last+2 gets first+1
moments_i(:,:,iky,ikx,ize+2,updatetlevel) = buff_pjxy_zBC_i(:,:,iky,ikxBC_R,+2)
ELSE
moments_i(:,:,iky,ikx,ize+1,updatetlevel) = 0._dp
moments_i(:,:,iky,ikx,ize+2,updatetlevel) = 0._dp
ENDIF
ENDDO
ENDDO
ENDIF
END SUBROUTINE update_ghosts_z_i
SUBROUTINE update_ghosts_z_phi
USE parallel, ONLY : buff_xy_zBC
IMPLICIT NONE
INTEGER :: ikxBC_L, ikxBC_R
CALL cpu_time(t1_ghost)
IF(Nz .GT. 1) THEN
IF (num_procs_z .GT. 1) THEN
CALL MPI_BARRIER(MPI_COMM_WORLD,ierr)
count = (ikye-ikys+1) * (ikxe-ikxs+1)
!!!!!!!!!!! Send ghost to up neighbour !!!!!!!!!!!!!!!!!!!!!!
CALL mpi_sendrecv( phi(:,:,ize ), count, MPI_DOUBLE_COMPLEX, nbr_U, 30, & ! Send to Up the last
buff_xy_zBC(:,:,-1), count, MPI_DOUBLE_COMPLEX, nbr_D, 30, & ! Receive from Down the first-1
comm0, status, ierr)
CALL mpi_sendrecv( phi(:,:,ize-1), count, MPI_DOUBLE_COMPLEX, nbr_U, 31, & ! Send to Up the last
buff_xy_zBC(:,:,-2), count, MPI_DOUBLE_COMPLEX, nbr_D, 31, & ! Receive from Down the first-2
comm0, status, ierr)
!!!!!!!!!!! Send ghost to down neighbour !!!!!!!!!!!!!!!!!!!!!!
CALL mpi_sendrecv( phi(:,:,izs ), count, MPI_DOUBLE_COMPLEX, nbr_D, 32, & ! Send to Down the first
buff_xy_zBC(:,:,+1), count, MPI_DOUBLE_COMPLEX, nbr_U, 32, & ! Recieve from Up the last+1
comm0, status, ierr)
CALL mpi_sendrecv( phi(:,:,izs+1), count, MPI_DOUBLE_COMPLEX, nbr_D, 33, & ! Send to Down the first
buff_xy_zBC(:,:,+2), count, MPI_DOUBLE_COMPLEX, nbr_U, 33, & ! Recieve from Up the last+2
comm0, status, ierr)
ELSE
buff_xy_zBC(:,:,-1) = phi(:,:,ize )
buff_xy_zBC(:,:,-2) = phi(:,:,ize-1)
buff_xy_zBC(:,:,+1) = phi(:,:,izs )
buff_xy_zBC(:,:,+2) = phi(:,:,izs+1)
ENDIF
DO iky = ikys,ikye
DO ikx = ikxs,ikxe
ikxBC_L = ikx_zBC_L(iky,ikx);
IF (ikxBC_L .NE. -99) THEN ! Exchanging the modes that have a periodic pair (a)
! first-1 gets last
phi(iky,ikx,izs-1) = buff_xy_zBC(iky,ikxBC_L,-1)
! first-2 gets last-1
phi(iky,ikx,izs-2) = buff_xy_zBC(iky,ikxBC_L,-2)
ELSE
phi(iky,ikx,izs-1) = 0._dp
phi(iky,ikx,izs-2) = 0._dp
ENDIF
ikxBC_R = ikx_zBC_R(iky,ikx);
IF (ikxBC_R .NE. -99) THEN ! Exchanging the modes that have a periodic pair (a)
! last+1 gets first
phi(iky,ikx,ize+1) = buff_xy_zBC(iky,ikxBC_R,+1)
! last+2 gets first+1
phi(iky,ikx,ize+2) = buff_xy_zBC(iky,ikxBC_R,+2)
ELSE
phi(iky,ikx,ize+1) = 0._dp
phi(iky,ikx,ize+2) = 0._dp
ENDIF
ENDDO
ENDDO
ENDIF
CALL cpu_time(t1_ghost)
tc_ghost = tc_ghost + (t1_ghost - t0_ghost)
END SUBROUTINE update_ghosts_z_phi
END MODULE ghosts
......@@ -82,7 +82,9 @@ MODULE grid
LOGICAL, PUBLIC, PROTECTED :: contains_kx0 = .false. ! flag if the proc contains kx=0 index
LOGICAL, PUBLIC, PROTECTED :: contains_ky0 = .false. ! flag if the proc contains ky=0 index
LOGICAL, PUBLIC, PROTECTED :: contains_kymax = .false. ! flag if the proc contains kx=kxmax index
LOGICAL, PUBLIC, PROTECTED :: contains_zmax = .false. ! flag if the proc contains z=pi-dz index
LOGICAL, PUBLIC, PROTECTED :: contains_zmin = .false. ! flag if the proc contains z=-pi index
LOGICAL, PUBLIC, PROTECTED :: SINGLE_KY = .false. ! to check if it is a single non 0 ky simulation
! Grid containing the polynomials degrees
INTEGER, DIMENSION(:), ALLOCATABLE, PUBLIC :: parray_e, parray_e_full
INTEGER, DIMENSION(:), ALLOCATABLE, PUBLIC :: parray_i, parray_i_full
......@@ -286,9 +288,9 @@ CONTAINS
Nky = Ny/2+1 ! Defined only on positive kx since fields are real
! Grid spacings
IF (Ny .EQ. 1) THEN
deltaky = 0._dp
ky_max = 0._dp
ky_min = 0._dp
deltaky = 2._dp*PI/Ly
ky_max = deltaky
ky_min = deltaky
ELSE
deltaky = 2._dp*PI/Ly
ky_max = Nky*deltaky
......@@ -315,7 +317,13 @@ CONTAINS
ENDDO
! Creating a grid ordered as dk*(0 1 2 3)
DO iky = ikys,ikye
kyarray(iky) = REAL(iky-1,dp) * deltaky
IF(Ny .EQ. 1) THEN
kyarray(iky) = deltaky
kyarray_full(iky) = deltaky
SINGLE_KY = .TRUE.
ELSE
kyarray(iky) = REAL(iky-1,dp) * deltaky
ENDIF
! Finding kx=0
IF (kyarray(iky) .EQ. 0) THEN
iky_0 = iky
......@@ -351,7 +359,7 @@ CONTAINS
INTEGER :: i_, counter
IF(shear .GT. 0._dp) THEN
IF(my_id.EQ.0) write(*,*) 'Magnetic shear detected: set up sheared kx grid..'
Lx = Ly/(2._dp*pi*shear)
Lx = Ly/(2._dp*pi*shear*Npol)
ENDIF
Nkx = Nx;
! Local data
......@@ -361,7 +369,7 @@ CONTAINS
local_nkx = ikxe - ikxs + 1
ALLOCATE(kxarray(ikxs:ikxe))
ALLOCATE(kxarray_full(1:Nkx))
IF (Nx .EQ. 1) THEN ! "cancel" y dimension
IF (Nx .EQ. 1) THEN
deltakx = 1._dp
kxarray(1) = 0._dp
ikx_0 = 1
......@@ -419,7 +427,7 @@ CONTAINS
USE model, ONLY: mu_z
IMPLICIT NONE
INTEGER :: i_, fid
REAL :: grid_shift, Lz
REAL :: grid_shift, Lz, zmax, zmin
INTEGER :: ip, istart, iend, in
total_nz = Nz
! Length of the flux tube (in ballooning angle)
......@@ -441,8 +449,11 @@ CONTAINS
! Build the full grids on process 0 to diagnose it without comm
ALLOCATE(zarray_full(1:Nz))
IF (Nz .EQ. 1) Npol = 0
DO iz = 1,total_nz
zarray_full(iz) = REAL(iz-1,dp)*deltaz - PI*REAL(Npol,dp)
zmax = 0; zmin = 0;
DO iz = 1,total_nz ! z in [-pi pi-dz] x Npol
zarray_full(iz) = REAL(iz-1,dp)*deltaz - Lz/2._dp
IF(zarray_full(iz) .GT. zmax) zmax = zarray_full(iz)
IF(zarray_full(iz) .LT. zmin) zmin = zarray_full(iz)
END DO
!! Parallel data distribution
! Local data distribution
......@@ -484,6 +495,10 @@ CONTAINS
zarray(iz,1) = zarray_full(iz) + grid_shift
ENDIF
ENDDO
IF(abs(zarray(izs,0) - zmin) .LT. EPSILON(zmin)) &
contains_zmin = .TRUE.
IF(abs(zarray(ize,0) - zmax) .LT. EPSILON(zmax)) &
contains_zmax = .TRUE.
! Weitghs for Simpson rule
ALLOCATE(zweights_SR(izs:ize))
DO iz = izs,ize
......
MODULE parallel
USE basic
USE grid, ONLY: Nkx,Nky,Nz, ikys,ikye, izs,ize, local_nky, local_nz, &
local_np_e, local_np_i, Np_e, Np_i, Nj_e, Nj_i, &
pmaxi, pmaxe, ips_e, ipe_e, ips_i, ipe_i, &
jmaxi, jmaxe, ijs_e, ije_e, ijs_i, ije_i, &
rcv_p_e, rcv_p_i, dsp_p_e, dsp_p_i
USE grid
use prec_const
USE model, ONLY: KIN_E
IMPLICIT NONE
! recieve and displacement counts for gatherv
INTEGER, DIMENSION(:), ALLOCATABLE :: rcv_y, dsp_y, rcv_zy, dsp_zy
......@@ -16,6 +13,11 @@ MODULE parallel
INTEGER, DIMENSION(:), ALLOCATABLE :: rcv_yp_i, dsp_yp_i
INTEGER, DIMENSION(:), ALLOCATABLE :: rcv_zyp_i, dsp_zyp_i
! Various buffers used
COMPLEX(dp), ALLOCATABLE, DIMENSION(:,:,:) :: buff_xy_zBC
COMPLEX(dp), ALLOCATABLE, DIMENSION(:,:,:,:,:) :: buff_pjxy_zBC_e
COMPLEX(dp), ALLOCATABLE, DIMENSION(:,:,:,:,:) :: buff_pjxy_zBC_i
PUBLIC :: manual_0D_bcast, manual_3D_bcast, init_parallel_var, &
gather_xyz, gather_pjz_i, gather_pjxyz_e, gather_pjxyz_i
......@@ -115,6 +117,12 @@ CONTAINS
dsp_zyp_e(i_) =dsp_zyp_e(i_-1) + rcv_zyp_e(i_-1)
END DO
!! Allocate some buffers
ALLOCATE(buff_xy_zBC(ikys:ikye,ikxs:ikxe,-2:2))
IF(KIN_E) &
ALLOCATE(buff_pjxy_zBC_e(ipgs_e:ipge_e,ijgs_e:ijge_e,ikys:ikye,ikxs:ikxe,-2:2))
ALLOCATE(buff_pjxy_zBC_i(ipgs_i:ipge_i,ijgs_i:ijge_i,ikys:ikye,ikxs:ikxe,-2:2))
END SUBROUTINE init_parallel_var
!!!!! Gather a field in spatial coordinates on rank 0 !!!!!
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment