From 988802af4c94850f9078c9c48957806dfcd053f4 Mon Sep 17 00:00:00 2001 From: Antoine Hoffmann <antoine.hoffmann@epfl.ch> Date: Tue, 7 Mar 2023 11:18:25 +0100 Subject: [PATCH] the new version compiles, must be tested now --- Makefile | 46 +- fort_example.90 | 4 +- matlab/setup.m | 4 +- matlab/write_fort90.m | 4 +- src/advance_field_mod.F90 | 30 +- src/array_mod.F90 | 93 +- src/auxval.F90 | 42 +- src/basic_mod.F90 | 202 ++-- src/calculus_mod.F90 | 247 +++-- src/closure_mod.F90 | 193 ++-- src/collision_mod.F90 | 869 ++++----------- src/control.F90 | 47 +- src/cosolver_interface_mod.F90 | 277 +++++ src/diagnose.F90 | 405 +++---- src/diagnostics_par_mod.F90 | 25 +- src/endrun.F90 | 2 +- src/fields_mod.F90 | 6 +- src/fourier_mod.F90 | 107 +- src/geometry_mod.F90 | 476 ++++----- src/ghosts_mod.F90 | 449 +++----- src/grid_mod.F90 | 670 ++++++------ src/inital.F90 | 539 ++++------ src/initial_par_mod.F90 | 25 +- src/memory.F90 | 175 +-- src/miller_mod.F90 | 98 +- src/model_mod.F90 | 153 +-- src/moments_eq_rhs_mod.F90 | 509 ++++----- src/nonlinear_mod.F90 | 499 ++------- src/numerical_experiments_mod.F90 | 113 -- src/numerics_mod.F90 | 441 ++++---- src/parallel_mod.F90 | 492 +++++---- src/ppexit.F90 | 2 +- src/prec_const_mod.F90 | 1 - src/processing_mod.F90 | 997 ++++++------------ src/readinputs.F90 | 4 + src/restarts_mod.F90 | 329 ++---- src/solve_EM_fields.F90 | 149 +-- src/species_mod.F90 | 196 +++- src/stepon.F90 | 155 ++- src/tesend.F90 | 17 +- src/time_integration_mod.F90 | 84 +- src/utility_mod.F90 | 13 +- testcases/cyclone_example/fort_00.90 | 4 +- testcases/matlab_testscripts/Hallenbert.m | 2 +- .../linear_1D_entropy_mode.m | 2 +- testcases/matlab_testscripts/linear_damping.m | 2 +- testcases/smallest_problem/fort.90 | 101 ++ testcases/smallest_problem/fort_00.90 | 4 +- testcases/smallest_problem/fort_01.90 | 4 +- testcases/zpinch_example/fort_00.90 | 4 +- wk/CBC_P_J_scan.m | 2 +- wk/CBC_hypcoll_PJ_scan.m | 2 +- wk/CBC_kT_PJ_scan.m | 2 +- wk/CBC_kT_nu_scan.m | 2 +- wk/CBC_nu_PJ_scan.m | 2 +- wk/benchmark scripts/Ajay_scan_CH4_lin_ITG.m | 2 +- wk/lin_3D_Zpinch.m | 2 +- wk/lin_ETPY.m | 2 +- wk/lin_ITG.m | 2 +- wk/lin_KBM.m | 2 +- wk/lin_MTM.m | 2 +- wk/lin_RHT.m | 2 +- wk/lin_TEM.m | 2 +- wk/local_run.m | 2 +- wk/marconi_run.m | 2 +- wk/quick_run.m | 2 +- 66 files changed, 3848 insertions(+), 5496 deletions(-) create mode 100644 src/cosolver_interface_mod.F90 delete mode 100644 src/numerical_experiments_mod.F90 create mode 100644 testcases/smallest_problem/fort.90 diff --git a/Makefile b/Makefile index baac06c4..c0c7dee3 100644 --- a/Makefile +++ b/Makefile @@ -81,9 +81,9 @@ $(OBJDIR)/ghosts_mod.o $(OBJDIR)/grid_mod.o $(OBJDIR)/inital.o \ $(OBJDIR)/initial_par_mod.o $(OBJDIR)/lag_interp_mod.o $(OBJDIR)/main.o \ $(OBJDIR)/memory.o $(OBJDIR)/miller_mod.o $(OBJDIR)/model_mod.o \ $(OBJDIR)/moments_eq_rhs_mod.o $(OBJDIR)/numerics_mod.o $(OBJDIR)/parallel_mod.o \ -$(OBJDIR)/ppexit.o $(OBJDIR)/ppinit.o $(OBJDIR)/prec_const_mod.o \ +$(OBJDIR)/ppexit.o $(OBJDIR)/prec_const_mod.o \ $(OBJDIR)/processing_mod.o $(OBJDIR)/readinputs.o $(OBJDIR)/restarts_mod.o \ -$(OBJDIR)/solve_EM_fields.o $(OBJDIR)/stepon.o $(OBJDIR)/tesend.o \ +$(OBJDIR)/solve_EM_fields.o $(OBJDIR)/species_mod.o $(OBJDIR)/stepon.o $(OBJDIR)/tesend.o \ $(OBJDIR)/time_integration_mod.o $(OBJDIR)/utility_mod.o $(EXEC): $(FOBJ) @@ -114,11 +114,11 @@ $(OBJDIR)/time_integration_mod.o $(OBJDIR)/utility_mod.o $(OBJDIR)/auxval.o : src/auxval.F90 \ $(OBJDIR)/fourier_mod.o $(OBJDIR)/memory.o $(OBJDIR)/model_mod.o \ $(OBJDIR)/geometry_mod.o $(OBJDIR)/grid_mod.o $(OBJDIR)/numerics_mod.o \ - $(OBJDIR)/parallel_mod.o + $(OBJDIR)/parallel_mod.o $(OBJDIR)/processing_mod.o $(F90) -c $(F90FLAGS) $(FPPFLAGS) $(EXTMOD) $(EXTINC) src/auxval.F90 -o $@ $(OBJDIR)/basic_mod.o : src/basic_mod.F90 \ - $(OBJDIR)/prec_const_mod.o + $(OBJDIR)/prec_const_mod.o $(OBJDIR)/parallel_mod.o $(F90) -c $(F90FLAGS) $(FPPFLAGS) $(EXTMOD) $(EXTINC) src/basic_mod.F90 -o $@ $(OBJDIR)/calculus_mod.o : src/calculus_mod.F90 \ @@ -137,17 +137,22 @@ $(OBJDIR)/time_integration_mod.o $(OBJDIR)/utility_mod.o $(F90) -c $(F90FLAGS) $(FPPFLAGS) $(EXTMOD) $(EXTINC) src/closure_mod.F90 -o $@ $(OBJDIR)/collision_mod.o : src/collision_mod.F90 \ - $(OBJDIR)/array_mod.o $(OBJDIR)/basic_mod.o $(OBJDIR)/fields_mod.o \ - $(OBJDIR)/grid_mod.o $(OBJDIR)/model_mod.o $(OBJDIR)/prec_const_mod.o \ - $(OBJDIR)/time_integration_mod.o $(OBJDIR)/utility_mod.o + $(OBJDIR)/array_mod.o $(OBJDIR)/basic_mod.o $(OBJDIR)/cosolver_interface_mod.o\ + $(OBJDIR)/fields_mod.o $(OBJDIR)/grid_mod.o $(OBJDIR)/model_mod.o \ + $(OBJDIR)/prec_const_mod.o $(OBJDIR)/species_mod.o $(OBJDIR)/time_integration_mod.o \ + $(OBJDIR)/utility_mod.o $(F90) -c $(F90FLAGS) $(FPPFLAGS) $(EXTMOD) $(EXTINC) src/collision_mod.F90 -o $@ $(OBJDIR)/control.o : src/control.F90 \ $(OBJDIR)/auxval.o $(OBJDIR)/geometry_mod.o $(OBJDIR)/prec_const_mod.o \ - $(OBJDIR)/basic_mod.o $(OBJDIR)/ppexit.o $(OBJDIR)/ppinit.o \ - $(OBJDIR)/readinputs.o $(OBJDIR)/tesend.o + $(OBJDIR)/basic_mod.o $(OBJDIR)/ppexit.o $(OBJDIR)/readinputs.o $(OBJDIR)/tesend.o $(F90) -c $(F90FLAGS) $(FPPFLAGS) $(EXTMOD) $(EXTINC) src/control.F90 -o $@ + $(OBJDIR)/cosolver_interface_mod.o : src/cosolver_interface_mod.F90 \ + $(OBJDIR)/grid_mod.o $(OBJDIR)/array_mod.o $(OBJDIR)/model_mod.o $(OBJDIR)/species_mod.o\ + $(OBJDIR)/prec_const_mod.o + $(F90) -c $(F90FLAGS) $(FPPFLAGS) $(EXTMOD) $(EXTINC) src/cosolver_interface_mod.F90 -o $@ + $(OBJDIR)/diagnose.o : src/diagnose.F90 \ $(OBJDIR)/prec_const_mod.o $(OBJDIR)/processing_mod.o $(OBJDIR)/array_mod.o \ $(OBJDIR)/basic_mod.o $(OBJDIR)/diagnostics_par_mod.o $(OBJDIR)/fields_mod.o \ @@ -169,7 +174,7 @@ $(OBJDIR)/time_integration_mod.o $(OBJDIR)/utility_mod.o $(F90) -c $(F90FLAGS) $(FPPFLAGS) $(EXTMOD) $(EXTINC) src/fields_mod.F90 -o $@ $(OBJDIR)/fourier_mod.o : src/fourier_mod.F90 \ - $(OBJDIR)/basic_mod.o $(OBJDIR)/prec_const_mod.o $(OBJDIR)/grid_mod.o + $(OBJDIR)/basic_mod.o $(OBJDIR)/prec_const_mod.o $(F90) -c $(F90FLAGS) $(FPPFLAGS) $(EXTMOD) $(EXTINC) src/fourier_mod.F90 -o $@ $(OBJDIR)/geometry_mod.o : src/geometry_mod.F90 \ @@ -180,11 +185,12 @@ $(OBJDIR)/time_integration_mod.o $(OBJDIR)/utility_mod.o $(OBJDIR)/ghosts_mod.o : src/ghosts_mod.F90 \ $(OBJDIR)/basic_mod.o $(OBJDIR)/fields_mod.o $(OBJDIR)/grid_mod.o\ - $(OBJDIR)/geometry_mod.o $(OBJDIR)/ppinit.o $(OBJDIR)/time_integration_mod.o + $(OBJDIR)/geometry_mod.o $(OBJDIR)/prec_const_mod.o $(OBJDIR)/time_integration_mod.o $(F90) -c $(F90FLAGS) $(FPPFLAGS) $(EXTMOD) $(EXTINC) src/ghosts_mod.F90 -o $@ $(OBJDIR)/grid_mod.o : src/grid_mod.F90 \ - $(OBJDIR)/basic_mod.o $(OBJDIR)/model_mod.o $(OBJDIR)/prec_const_mod.o + $(OBJDIR)/basic_mod.o $(OBJDIR)/fourier_mod.o $(OBJDIR)/model_mod.o \ + $(OBJDIR)/prec_const_mod.o $(F90) -c $(F90FLAGS) $(FPPFLAGS) $(EXTMOD) $(EXTINC) src/grid_mod.F90 -o $@ $(OBJDIR)/inital.o : src/inital.F90 \ @@ -218,7 +224,7 @@ $(OBJDIR)/time_integration_mod.o $(OBJDIR)/utility_mod.o $(F90) -c $(F90FLAGS) $(FPPFLAGS) $(EXTMOD) $(EXTINC) src/miller_mod.F90 -o $@ $(OBJDIR)/model_mod.o : src/model_mod.F90 \ - $(OBJDIR)/prec_const_mod.o + $(OBJDIR)/grid_mod.o $(OBJDIR)/prec_const_mod.o $(F90) -c $(F90FLAGS) $(FPPFLAGS) $(EXTMOD) $(EXTINC) src/model_mod.F90 -o $@ $(OBJDIR)/moments_eq_rhs_mod.o : src/moments_eq_rhs_mod.F90 \ @@ -239,19 +245,13 @@ $(OBJDIR)/time_integration_mod.o $(OBJDIR)/utility_mod.o $(F90) -c $(F90FLAGS) $(FPPFLAGS) $(EXTMOD) $(EXTINC) src/numerics_mod.F90 -o $@ $(OBJDIR)/parallel_mod.o : src/parallel_mod.F90 \ - $(OBJDIR)/basic_mod.o $(OBJDIR)/prec_const_mod.o $(OBJDIR)/grid_mod.o + $(OBJDIR)/prec_const_mod.o $(F90) -c $(F90FLAGS) $(FPPFLAGS) $(EXTMOD) $(EXTINC) src/parallel_mod.F90 -o $@ $(OBJDIR)/ppexit.o : src/ppexit.F90 \ $(OBJDIR)/prec_const_mod.o $(OBJDIR)/basic_mod.o $(OBJDIR)/coeff_mod.o $(F90) -c $(F90FLAGS) $(FPPFLAGS) $(EXTMOD) $(EXTINC) src/ppexit.F90 -o $@ - $(OBJDIR)/ppinit.o : src/ppinit.F90 \ - $(OBJDIR)/array_mod.o $(OBJDIR)/prec_const_mod.o $(OBJDIR)/grid_mod.o\ - $(OBJDIR)/fields_mod.o $(OBJDIR)/array_mod.o $(OBJDIR)/time_integration_mod.o \ - $(OBJDIR)/basic_mod.o - $(F90) -c $(F90FLAGS) $(FPPFLAGS) $(EXTMOD) $(EXTINC) src/ppinit.F90 -o $@ - $(OBJDIR)/prec_const_mod.o : src/prec_const_mod.F90 $(F90) -c $(F90FLAGS) $(FPPFLAGS) $(EXTMOD) $(EXTINC) src/prec_const_mod.F90 -o $@ @@ -270,11 +270,15 @@ $(OBJDIR)/time_integration_mod.o $(OBJDIR)/utility_mod.o $(F90) -c $(F90FLAGS) $(FPPFLAGS) $(EXTMOD) $(EXTINC) src/restarts_mod.F90 -o $@ $(OBJDIR)/solve_EM_fields.o : src/solve_EM_fields.F90 \ - $(OBJDIR)/array_mod.o $(OBJDIR)/prec_const_mod.o $(OBJDIR)/grid_mod.o \ + $(OBJDIR)/array_mod.o $(OBJDIR)/prec_const_mod.o $(OBJDIR)/grid_mod.o \ $(OBJDIR)/ghosts_mod.o $(OBJDIR)/fields_mod.o $(OBJDIR)/array_mod.o \ $(OBJDIR)/time_integration_mod.o $(OBJDIR)/basic_mod.o $(OBJDIR)/parallel_mod.o $(F90) -c $(F90FLAGS) $(FPPFLAGS) $(EXTMOD) $(EXTINC) src/solve_EM_fields.F90 -o $@ + $(OBJDIR)/species_mod.o : src/species_mod.F90 \ + $(OBJDIR)/basic_mod.o $(OBJDIR)/model_mod.o $(OBJDIR)/prec_const_mod.o + $(F90) -c $(F90FLAGS) $(FPPFLAGS) $(EXTMOD) $(EXTINC) src/species_mod.F90 -o $@ + $(OBJDIR)/stepon.o : src/stepon.F90 \ $(OBJDIR)/initial_par_mod.o $(OBJDIR)/prec_const_mod.o $(OBJDIR)/advance_field_mod.o \ $(OBJDIR)/basic_mod.o $(OBJDIR)/nonlinear_mod.o $(OBJDIR)/grid_mod.o \ diff --git a/fort_example.90 b/fort_example.90 index 7607e1e1..667ca822 100644 --- a/fort_example.90 +++ b/fort_example.90 @@ -74,8 +74,8 @@ / &COLLISION_PAR collision_model = 'DG' !collision model (DG,SG,LD,LR), all need a matrix except DG - gyrokin_CO = .f. !activate gyrokinetic terms in the CO - interspecies = .t. !activate interspecies if CO has some + GK_CO = .f. !activate gyrokinetic terms in the CO + INTERSPECIES = .t. !activate INTERSPECIES if CO has some mat_file = 'LDGK_P10_J5_dk_5e-2_km_5_NFLR_30.h5' !path to find the collision matrix collision_kcut = 1.8 !maximal wavelength of the CO matrix. For higher kperp, the matrix at collision_kcut will be applied. / diff --git a/matlab/setup.m b/matlab/setup.m index 468f08c5..aa1c410a 100644 --- a/matlab/setup.m +++ b/matlab/setup.m @@ -58,8 +58,8 @@ MODEL.k_cB = k_cB; % Magnetic curvature MODEL.lambdaD = LAMBDAD; % Collision parameters COLL.collision_model = ['''',CO,'''']; -if (GKCO); COLL.gyrokin_CO = '.true.'; else; COLL.gyrokin_CO = '.false.';end; -if (ABCO); COLL.interspecies = '.true.'; else; COLL.interspecies = '.false.';end; +if (GKCO); COLL.GK_CO = '.true.'; else; COLL.GK_CO = '.false.';end; +if (ABCO); COLL.INTERSPECIES = '.true.'; else; COLL.INTERSPECIES = '.false.';end; COLL.mat_file = '''null'''; switch CO case 'SG' diff --git a/matlab/write_fort90.m b/matlab/write_fort90.m index 54e7f08b..23bbd924 100644 --- a/matlab/write_fort90.m +++ b/matlab/write_fort90.m @@ -86,8 +86,8 @@ fprintf(fid,'/\n'); fprintf(fid,'&COLLISION_PAR\n'); fprintf(fid,[' collision_model = ', COLL.collision_model,'\n']); -fprintf(fid,[' gyrokin_CO = ', COLL.gyrokin_CO,'\n']); -fprintf(fid,[' interspecies = ', COLL.interspecies,'\n']); +fprintf(fid,[' GK_CO = ', COLL.GK_CO,'\n']); +fprintf(fid,[' INTERSPECIES = ', COLL.INTERSPECIES,'\n']); fprintf(fid,[' mat_file = ', COLL.mat_file,'\n']); fprintf(fid,[' collision_kcut = ', num2str(COLL.coll_kcut),'\n']); fprintf(fid,'/\n'); diff --git a/src/advance_field_mod.F90 b/src/advance_field_mod.F90 index 5fd45d9f..7605db7a 100644 --- a/src/advance_field_mod.F90 +++ b/src/advance_field_mod.F90 @@ -20,30 +20,22 @@ CONTAINS USE time_integration USE grid use prec_const - USE model, ONLY: CLOS, KIN_E - use fields, ONLY: moments_e, moments_i - use array, ONLY: moments_rhs_e, moments_rhs_i + USE model, ONLY: CLOS + use fields, ONLY: moments + use array, ONLY: moments_rhs IMPLICIT NONE - INTEGER :: p_int, j_int + INTEGER :: p_int, j_int, ia, ip, ij CALL cpu_time(t0_adv_field) - IF(KIN_E) THEN - DO ip=ips_e,ipe_e - p_int = parray_e(ip) - DO ij=ijs_e,ije_e - j_int = jarray_e(ij) - IF((CLOS .NE. 1) .OR. (p_int+2*j_int .LE. dmaxe))& - CALL advance_field(moments_e(ip,ij,ikys:ikye,ikxs:ikxe,izs:ize,:), moments_rhs_e(ip,ij,ikys:ikye,ikxs:ikxe,izs:ize,:)) + DO ia=ias,iae + DO ip=ips,ipe + p_int = parray(ip) + DO ij=ijs,ije + j_int = jarray(ij) + IF((CLOS .NE. 1) .OR. (p_int+2*j_int .LE. dmax))& + CALL advance_field(moments(ia,ip,ij,ikys:ikye,ikxs:ikxe,izs:ize,:), moments_rhs(ia,ip,ij,ikys:ikye,ikxs:ikxe,izs:ize,:)) ENDDO ENDDO - ENDIF - DO ip=ips_i,ipe_i - p_int = parray_i(ip) - DO ij=ijs_i,ije_i - j_int = jarray_i(ij) - IF((CLOS .NE. 1) .OR. (p_int+2*j_int .LE. dmaxi))& - CALL advance_field(moments_i(ip,ij,ikys:ikye,ikxs:ikxe,izs:ize,:), moments_rhs_i(ip,ij,ikys:ikye,ikxs:ikxe,izs:ize,:)) - ENDDO ENDDO ! Execution time end CALL cpu_time(t1_adv_field) diff --git a/src/array_mod.F90 b/src/array_mod.F90 index 213c108b..7b46d23f 100644 --- a/src/array_mod.F90 +++ b/src/array_mod.F90 @@ -3,45 +3,28 @@ MODULE array use prec_const implicit none - ! Arrays to store the rhs, for time integration (ip,ij,iky,ikx,iz,updatetlevel) - COMPLEX(dp), DIMENSION(:,:,:,:,:,:), ALLOCATABLE :: moments_rhs_e - COMPLEX(dp), DIMENSION(:,:,:,:,:,:), ALLOCATABLE :: moments_rhs_i + ! Arrays to store the rhs, for time integration (ia,ip,ij,iky,ikx,iz) + COMPLEX(dp), DIMENSION(:,:,:,:,:,:,:), ALLOCATABLE :: moments_rhs ! Arrays of non-adiabatique moments - COMPLEX(dp), DIMENSION(:,:,:,:,:), ALLOCATABLE :: nadiab_moments_e - COMPLEX(dp), DIMENSION(:,:,:,:,:), ALLOCATABLE :: nadiab_moments_i + COMPLEX(dp), DIMENSION(:,:,:,:,:,:), ALLOCATABLE :: nadiab_moments ! Derivatives and interpolated moments - COMPLEX(dp), DIMENSION(:,:,:,:,:), ALLOCATABLE :: ddz_nepj - COMPLEX(dp), DIMENSION(:,:,:,:,:), ALLOCATABLE :: interp_nepj - COMPLEX(dp), DIMENSION(:,:,:,:,:), ALLOCATABLE :: ddzND_Nepj - COMPLEX(dp), DIMENSION(:,:,:,:,:), ALLOCATABLE :: ddz_nipj - COMPLEX(dp), DIMENSION(:,:,:,:,:), ALLOCATABLE :: interp_nipj - COMPLEX(dp), DIMENSION(:,:,:,:,:), ALLOCATABLE :: ddzND_Nipj - - ! Arrays to store special initial modes (semi linear simulation) - ! Zonal ones (ky=0) - COMPLEX(dp), DIMENSION(:,:,:,:), ALLOCATABLE :: moments_e_ZF - COMPLEX(dp), DIMENSION(:,:,:,:), ALLOCATABLE :: moments_i_ZF - COMPLEX(dp), DIMENSION(:,:), ALLOCATABLE :: phi_ZF - ! non-zonal modes (kx=0) - COMPLEX(dp), DIMENSION(:,:,:,:), ALLOCATABLE :: moments_e_NZ - COMPLEX(dp), DIMENSION(:,:,:,:), ALLOCATABLE :: moments_i_NZ - COMPLEX(dp), DIMENSION(:,:), ALLOCATABLE :: phi_NZ + COMPLEX(dp), DIMENSION(:,:,:,:,:,:), ALLOCATABLE :: ddz_napj + COMPLEX(dp), DIMENSION(:,:,:,:,:,:), ALLOCATABLE :: interp_napj + COMPLEX(dp), DIMENSION(:,:,:,:,:,:), ALLOCATABLE :: ddzND_Napj ! Non linear term array (ip,ij,iky,ikx,iz) - COMPLEX(dp), DIMENSION(:,:,:,:,:), ALLOCATABLE :: Sepj ! electron - COMPLEX(dp), DIMENSION(:,:,:,:,:), ALLOCATABLE :: Sipj ! ion + COMPLEX(dp), DIMENSION(:,:,:,:,:,:), ALLOCATABLE :: Sapj ! electron - ! To load collision matrix (ip,ij,iky,ikx,iz) - REAL(dp), DIMENSION(:,:,:,:,:), ALLOCATABLE :: Ceepj, CeipjT - REAL(dp), DIMENSION(:,:,:,:,:), ALLOCATABLE :: CeipjF - REAL(dp), DIMENSION(:,:,:,:,:), ALLOCATABLE :: Ciipj, CiepjT - REAL(dp), DIMENSION(:,:,:,:,:), ALLOCATABLE :: CiepjF + ! self collision matrix (ia,ip,ij,iky,ikx,iz) + REAL(dp), DIMENSION(:,:,:,:,:,:), ALLOCATABLE :: Caa + ! Test and field collision matrices (ia,ib,ip,ij,iky,ikx,iz) + REAL(dp), DIMENSION(:,:,:,:,:,:,:), ALLOCATABLE :: Cab_F, Cab_T ! Collision term (ip,ij,iky,ikx,iz) - COMPLEX(dp), DIMENSION(:,:,:,:,:), ALLOCATABLE :: TColl_e, TColl_i - COMPLEX(dp), DIMENSION(:,:,:,:,:), ALLOCATABLE :: TColl_e_local, TColl_i_local + COMPLEX(dp), DIMENSION(:,:,:,:,:,:), ALLOCATABLE :: Capj + COMPLEX(dp), DIMENSION(:,:,:,:,:), ALLOCATABLE :: TColl_e_local, TColl_i_local ! dnjs coefficient storage (in, ij, is) COMPLEX(dp), DIMENSION(:,:,:), ALLOCATABLE :: dnjs @@ -50,20 +33,14 @@ MODULE array COMPLEX(dp), DIMENSION(:), ALLOCATABLE :: dv4_Hp_coeff ! lin rhs p,j coefficient storage (ip,ij) - REAL(dp), DIMENSION(:,:), ALLOCATABLE :: xnepj,xnipj - REAL(dp), DIMENSION(:), ALLOCATABLE :: xnepp1j, xnepm1j, xnepp2j, xnepm2j, xnepjp1, xnepjm1 - REAL(dp), DIMENSION(:,:), ALLOCATABLE :: ynepp1j, ynepm1j, ynepp1jm1, ynepm1jm1 ! mirror lin coeff for non adiab mom - REAL(dp), DIMENSION(:,:), ALLOCATABLE :: zNepm1j, zNepm1jp1, zNepm1jm1 ! mirror lin coeff for adiab mom - REAL(dp), DIMENSION(:), ALLOCATABLE :: xnipp1j, xnipm1j, xnipp2j, xnipm2j, xnipjp1, xnipjm1 - REAL(dp), DIMENSION(:,:), ALLOCATABLE :: ynipp1j, ynipm1j, ynipp1jm1, ynipm1jm1 ! mirror lin coeff for non adiab mom - REAL(dp), DIMENSION(:,:), ALLOCATABLE :: zNipm1j, zNipm1jp1, zNipm1jm1 ! mirror lin coeff for adiab mom - REAL(dp), DIMENSION(:,:), ALLOCATABLE :: xphij_e, xphijp1_e, xphijm1_e - REAL(dp), DIMENSION(:,:), ALLOCATABLE :: xphij_i, xphijp1_i, xphijm1_i - REAL(dp), DIMENSION(:,:), ALLOCATABLE :: xpsij_e, xpsijp1_e, xpsijm1_e - REAL(dp), DIMENSION(:,:), ALLOCATABLE :: xpsij_i, xpsijp1_i, xpsijm1_i - ! Kernel function evaluation (ij,iky,ikx,iz,odd/even p) - REAL(dp), DIMENSION(:,:,:,:,:), ALLOCATABLE :: kernel_e - REAL(dp), DIMENSION(:,:,:,:,:), ALLOCATABLE :: kernel_i + REAL(dp), DIMENSION(:,:,:), ALLOCATABLE :: xnapj + REAL(dp), DIMENSION(:,:), ALLOCATABLE :: xnapp1j, xnapp2j, xnapm1j, xnapm2j, xnapjp1, xnapjm1 + REAL(dp), DIMENSION(:,:,:), ALLOCATABLE :: ynapp1j, ynapm1j, ynapp1jm1, ynapm1jm1 ! mirror lin coeff for non adiab mom + REAL(dp), DIMENSION(:,:,:), ALLOCATABLE :: zNapm1j, zNapm1jp1, zNapm1jm1 ! mirror lin coeff for adiab mom + REAL(dp), DIMENSION(:,:,:), ALLOCATABLE :: xphij, xphijp1, xphijm1 + REAL(dp), DIMENSION(:,:,:), ALLOCATABLE :: xpsij, xpsijp1, xpsijm1 + ! Kernel function evaluation (ia,ij,iky,ikx,iz,odd/even p) + REAL(dp), DIMENSION(:,:,:,:,:,:), ALLOCATABLE :: kernel ! Poisson operator (iky,ikx,iz) REAL(dp), DIMENSION(:,:,:), ALLOCATABLE :: inv_poisson_op @@ -71,30 +48,22 @@ MODULE array REAL(dp), DIMENSION(:,:,:), ALLOCATABLE :: inv_pol_ion REAL(dp), DIMENSION(:,:,:), ALLOCATABLE :: HF_phi_correction_operator - ! Gyrocenter density for electron and ions (iky,ikx,iz) - COMPLEX(dp), DIMENSION(:,:,:), ALLOCATABLE :: Ne00 - COMPLEX(dp), DIMENSION(:,:,:), ALLOCATABLE :: Ni00 + ! Gyrocenter density for electron and ions (ia,iky,ikx,iz) + COMPLEX(dp), DIMENSION(:,:,:,:), ALLOCATABLE :: Na00 - ! Kinetic spectrum sum_kx,ky(|Napj(z)|^2), (ip,ij,iz) (should be real) - REAL(dp), DIMENSION(:,:,:), ALLOCATABLE :: Nepjz - REAL(dp), DIMENSION(:,:,:), ALLOCATABLE :: Nipjz + ! Kinetic spectrum sum_kx,ky(|Napj(z)|^2), (ia,ip,ij,iz) (should be real) + REAL(dp), DIMENSION(:,:,:,:), ALLOCATABLE :: Napjz ! particle density for electron and ions (iky,ikx,iz) - COMPLEX(dp), DIMENSION(:,:,:), ALLOCATABLE :: dens_e - COMPLEX(dp), DIMENSION(:,:,:), ALLOCATABLE :: dens_i + COMPLEX(dp), DIMENSION(:,:,:,:), ALLOCATABLE :: dens ! particle fluid velocity for electron and ions (iky,ikx,iz) - COMPLEX(dp), DIMENSION(:,:,:), ALLOCATABLE :: upar_e - COMPLEX(dp), DIMENSION(:,:,:), ALLOCATABLE :: upar_i - COMPLEX(dp), DIMENSION(:,:,:), ALLOCATABLE :: uper_e - COMPLEX(dp), DIMENSION(:,:,:), ALLOCATABLE :: uper_i + COMPLEX(dp), DIMENSION(:,:,:,:), ALLOCATABLE :: upar + COMPLEX(dp), DIMENSION(:,:,:,:), ALLOCATABLE :: uper ! particle temperature for electron and ions (iky,ikx,iz) - COMPLEX(dp), DIMENSION(:,:,:), ALLOCATABLE :: Tpar_e - COMPLEX(dp), DIMENSION(:,:,:), ALLOCATABLE :: Tpar_i - COMPLEX(dp), DIMENSION(:,:,:), ALLOCATABLE :: Tper_e - COMPLEX(dp), DIMENSION(:,:,:), ALLOCATABLE :: Tper_i - COMPLEX(dp), DIMENSION(:,:,:), ALLOCATABLE :: temp_e - COMPLEX(dp), DIMENSION(:,:,:), ALLOCATABLE :: temp_i + COMPLEX(dp), DIMENSION(:,:,:,:), ALLOCATABLE :: Tpar + COMPLEX(dp), DIMENSION(:,:,:,:), ALLOCATABLE :: Tper + COMPLEX(dp), DIMENSION(:,:,:,:), ALLOCATABLE :: temp END MODULE array diff --git a/src/auxval.F90 b/src/auxval.F90 index b236c48e..3b2e27ee 100644 --- a/src/auxval.F90 +++ b/src/auxval.F90 @@ -9,34 +9,21 @@ subroutine auxval use prec_const USE numerics USE geometry - USE parallel, ONLY: init_parallel_var + USE parallel, ONLY: init_parallel_var, my_id, num_procs, num_procs_p, num_procs_z, num_procs_ky, rank_p, rank_ky, rank_z + USE processing, ONLY: init_process IMPLICIT NONE - INTEGER :: i_ + INTEGER :: i_, ierr IF (my_id .EQ. 0) WRITE(*,*) '=== Set auxiliary values ===' - IF (LINEARITY .NE. 'linear') THEN - IF (my_id .EQ. 0) write(*,*) 'FFTW3 y-grid distribution' - CALL init_grid_distr_and_plans(Nx,Ny) - ELSE - CALL init_1Dgrid_distr - IF (my_id .EQ. 0) write(*,*) 'Manual y-grid distribution' - ENDIF ! Init the grids - CALL set_pgrid ! parallel kin (MPI distributed) - - CALL set_jgrid ! perp kin - - CALL set_kxgrid(shear) ! radial modes (MPI distributed by FFTW) - - CALL set_kygrid ! azymuthal modes - - CALL set_zgrid ! field aligned angle - IF ((my_id .EQ. 0) .AND. SG) WRITE(*,*) '--2 staggered z grids--' + CALL set_grids(shear,Npol) ! radial modes (MPI distributed by FFTW) CALL memory ! Allocate memory for global arrays - CALL init_parallel_var + CALL init_parallel_var(local_np,total_np,local_nky,total_nky,local_nz) + + CALL init_process CALL eval_magnetic_geometry ! precompute coeff for lin equation @@ -64,13 +51,9 @@ subroutine auxval WRITE(*,'(A9,I3,A10,I3,A10,I3,A9,I3)')& 'my_id = ', my_id, ', rank_p = ', rank_p, ', rank_ky = ', rank_ky,', rank_z = ', rank_z WRITE(*,'(A22,I3,A11,I3)')& - ' ips_e = ', ips_e, ', ipe_e = ', ipe_e - WRITE(*,'(A22,I3,A11,I3)')& - ' ijs_e = ', ijs_e, ', ije_e = ', ije_e + ' ips = ', ips, ', ipe = ', ipe WRITE(*,'(A22,I3,A11,I3)')& - ' ips_i = ', ips_i, ', ipe_i = ', ipe_i - WRITE(*,'(A22,I3,A11,I3)')& - ' ijs_i = ', ijs_i, ', ije_i = ', ije_i + ' ijs = ', ijs, ', ije = ', ije WRITE(*,'(A22,I3,A11,I3)')& ' ikxs = ', ikxs , ', ikxe = ', ikxe WRITE(*,'(A22,I3,A11,I3)')& @@ -83,10 +66,7 @@ subroutine auxval ENDDO CALL mpi_barrier(MPI_COMM_WORLD, ierr) - IF((my_id.EQ.0) .AND. (CLOS .EQ. 1)) THEN - IF(KIN_E) & - write(*,*) 'Closure = 1 -> Maximal Nepj degree is min(Pmaxe,2*Jmaxe+1): De = ', dmaxi - write(*,*) 'Closure = 1 -> Maximal Nipj degree is min(Pmaxi,2*Jmaxi+1): Di = ', dmaxi - ENDIF + IF((CLOS .EQ. 1)) & + CALL speak('Closure = 1 -> Maximal Napj degree is min(Pmax,2*Jmax+1): D = '// str(dmax)) END SUBROUTINE auxval diff --git a/src/basic_mod.F90 b/src/basic_mod.F90 index c0ae90b5..9013db29 100644 --- a/src/basic_mod.F90 +++ b/src/basic_mod.F90 @@ -1,76 +1,63 @@ MODULE basic ! Basic module for time dependent problems use, intrinsic :: iso_c_binding - use prec_const + use prec_const, ONLY : dp IMPLICIT none - + PRIVATE ! INCLUDE 'fftw3-mpi.f03' - - INTEGER :: nrun = 1 ! Number of time steps to run - real(dp) :: tmax = 100000.0 ! Maximum simulation time - real(dp) :: dt = 1.0 ! Time step - real(dp) :: time = 0 ! Current simulation time (Init from restart file) - - INTEGER :: comm0 ! Default communicator with a topology - INTEGER :: group0 ! Default group with a topology - INTEGER :: rank_0 ! Ranks in comm0 - ! Communicators for 1-dim cartesian subgrids of comm0 - INTEGER :: comm_p, comm_ky, comm_z - INTEGER :: rank_p, rank_ky, rank_z! Ranks - INTEGER :: comm_pz, rank_pz ! 2D comm for N_a(p,j,z) output (mspfile) - INTEGER :: comm_kyz, rank_kyz ! 2D comm for N_a(p,j,z) output (mspfile) - INTEGER :: comm_ky0, rank_ky0 ! comm along ky with p=0 - INTEGER :: comm_z0, rank_z0 ! comm along z with p=0 - - INTEGER :: group_ky0, group_z0 - - INTEGER :: jobnum = 0 ! Job number - INTEGER :: step = 0 ! Calculation step of this run - INTEGER :: cstep = 0 ! Current step number (Init from restart file) - LOGICAL :: nlend = .FALSE. ! Signal end of run - LOGICAL :: crashed = .FALSE. ! Signal end of crashed run - - INTEGER :: ierr ! flag for MPI error - INTEGER :: my_id ! Rank in COMM_WORLD - INTEGER :: num_procs ! number of MPI processes - INTEGER :: num_procs_p ! Number of processes in p - INTEGER :: num_procs_ky ! Number of processes in r - INTEGER :: num_procs_z ! Number of processes in z - INTEGER :: num_procs_pz ! Number of processes in pz comm - INTEGER :: num_procs_kyz ! Number of processes in kyz comm - INTEGER :: nbr_L, nbr_R ! Left and right neighbours (along p) - INTEGER :: nbr_T, nbr_B ! Top and bottom neighbours (along kx) - INTEGER :: nbr_U, nbr_D ! Upstream and downstream neighbours (along z) - - INTEGER :: iframe0d ! counting the number of times 0d datasets are outputed (for diagnose) - INTEGER :: iframe1d ! counting the number of times 1d datasets are outputed (for diagnose) - INTEGER :: iframe2d ! counting the number of times 2d datasets are outputed (for diagnose) - INTEGER :: iframe3d ! counting the number of times 3d datasets are outputed (for diagnose) - INTEGER :: iframe5d ! counting the number of times 5d datasets are outputed (for diagnose) + ! INPUT PARAMETERS + INTEGER, PUBLIC, PROTECTED :: nrun = 1 ! Number of time steps to run + real(dp), PUBLIC, PROTECTED :: tmax = 100000.0 ! Maximum simulation time + real(dp), PUBLIC, PROTECTED :: dt = 1.0 ! Time step + real(dp), PUBLIC, PROTECTED :: maxruntime = 1e9 ! Maximum simulation CPU time + INTEGER, PUBLIC, PROTECTED :: job2load = 99 ! jobnum of the checkpoint to load + ! Auxiliary variables + real(dp), PUBLIC, PROTECTED :: time = 0 ! Current simulation time (Init from restart file) + + INTEGER, PUBLIC, PROTECTED :: jobnum = 0 ! Job number + INTEGER, PUBLIC, PROTECTED :: step = 0 ! Calculation step of this run + INTEGER, PUBLIC, PROTECTED :: cstep = 0 ! Current step number (Init from restart file) + LOGICAL, PUBLIC :: nlend = .FALSE. ! Signal end of run + LOGICAL, PUBLIC :: crashed = .FALSE. ! Signal end of crashed run + + INTEGER, PUBLIC :: iframe0d ! counting the number of times 0d datasets are outputed (for diagnose) + INTEGER, PUBLIC :: iframe1d ! counting the number of times 1d datasets are outputed (for diagnose) + INTEGER, PUBLIC :: iframe2d ! counting the number of times 2d datasets are outputed (for diagnose) + INTEGER, PUBLIC :: iframe3d ! counting the number of times 3d datasets are outputed (for diagnose) + INTEGER, PUBLIC :: iframe5d ! counting the number of times 5d datasets are outputed (for diagnose) ! List of logical file units - INTEGER :: lu_in = 90 ! File duplicated from STDIN - INTEGER :: lu_stop = 91 ! stop file, see subroutine TESEND + INTEGER, PUBLIC, PROTECTED :: lu_in = 90 ! File duplicated from STDIN + INTEGER, PUBLIC, PROTECTED :: lu_stop = 91 ! stop file, see subroutine TESEND ! To measure computation time - real :: start, finish - real(dp) :: t0_rhs, t0_adv_field, t0_poisson, t0_Sapj, t0_diag, t0_checkfield,& - t0_step, t0_clos, t0_ghost, t0_coll, t0_process - real(dp) :: t1_rhs, t1_adv_field, t1_poisson, t1_Sapj, t1_diag, t1_checkfield,& - t1_step, t1_clos, t1_ghost, t1_coll, t1_process - real(dp) :: tc_rhs, tc_adv_field, tc_poisson, tc_Sapj, tc_diag, tc_checkfield,& - tc_step, tc_clos, tc_ghost, tc_coll, tc_process - real(dp) :: maxruntime = 1e9 ! Maximum simulation CPU time + real(dp), PUBLIC :: start, finish + real(dp), PUBLIC :: t0_rhs, t0_adv_field, t0_poisson, t0_Sapj, t0_diag, t0_checkfield,& + t0_step, t0_clos, t0_ghost, t0_coll, t0_process + real(dp), PUBLIC :: t1_rhs, t1_adv_field, t1_poisson, t1_Sapj, t1_diag, t1_checkfield,& + t1_step, t1_clos, t1_ghost, t1_coll, t1_process + real(dp), PUBLIC :: tc_rhs, tc_adv_field, tc_poisson, tc_Sapj, tc_diag, tc_checkfield,& + tc_step, tc_clos, tc_ghost, tc_coll, tc_process + + LOGICAL, PUBLIC, PROTECTED :: GATHERV_OUTPUT = .true. - LOGICAL :: GATHERV_OUTPUT = .true. + PUBLIC :: allocate_array, basic_outputinputs,basic_data,& + speak, str, increase_step, increase_cstep, increase_time, display_h_min_s,& + set_basic_cp, daytim INTERFACE allocate_array - MODULE PROCEDURE allocate_array_dp1,allocate_array_dp2,allocate_array_dp3,allocate_array_dp4, allocate_array_dp5, allocate_array_dp6 - MODULE PROCEDURE allocate_array_dc1,allocate_array_dc2,allocate_array_dc3,allocate_array_dc4, allocate_array_dc5, allocate_array_dc6 + MODULE PROCEDURE allocate_array_dp1,allocate_array_dp2,allocate_array_dp3, & + allocate_array_dp4, allocate_array_dp5, allocate_array_dp6, allocate_array_dp7 + MODULE PROCEDURE allocate_array_dc1,allocate_array_dc2,allocate_array_dc3, & + allocate_array_dc4, allocate_array_dc5, allocate_array_dc6, allocate_array_dc7 MODULE PROCEDURE allocate_array_i1,allocate_array_i2,allocate_array_i3,allocate_array_i4 MODULE PROCEDURE allocate_array_l1,allocate_array_l2,allocate_array_l3,allocate_array_l4 END INTERFACE allocate_array + INTERFACE str + MODULE PROCEDURE str_dp, str_int + END INTERFACE + CONTAINS !================================================================================ SUBROUTINE basic_data @@ -79,7 +66,7 @@ CONTAINS use prec_const IMPLICIT NONE - NAMELIST /BASIC/ nrun, dt, tmax, maxruntime + NAMELIST /BASIC/ nrun, dt, tmax, maxruntime, job2load CALL find_input_file @@ -100,15 +87,17 @@ CONTAINS END SUBROUTINE basic_data - SUBROUTINE basic_outputinputs(fid, str) + SUBROUTINE basic_outputinputs(fid) ! ! Write the input parameters to the results_xx.h5 file ! USE prec_const - USE futils, ONLY: attach + USE futils, ONLY: attach, creatd IMPLICIT NONE INTEGER, INTENT(in) :: fid - CHARACTER(len=256), INTENT(in) :: str + CHARACTER(len=256) :: str + WRITE(str,'(a)') '/data/input/basic' + CALL creatd(fid, 0,(/0/),TRIM(str),'Basic Input') CALL attach(fid, TRIM(str), "start_iframe0d", iframe0d) CALL attach(fid, TRIM(str), "start_iframe2d", iframe2d) CALL attach(fid, TRIM(str), "start_iframe3d", iframe3d) @@ -119,16 +108,42 @@ CONTAINS CALL attach(fid, TRIM(str), "tmax", tmax) CALL attach(fid, TRIM(str), "nrun", nrun) CALL attach(fid, TRIM(str), "cpu_time", -1) - CALL attach(fid, TRIM(str), "Nproc", num_procs) - CALL attach(fid, TRIM(str), "Np_p" , num_procs_p) - CALL attach(fid, TRIM(str), "Np_kx",num_procs_ky) - CALL attach(fid, TRIM(str), "Np_z", num_procs_z) END SUBROUTINE basic_outputinputs + + SUBROUTINE increase_step + IMPLICIT NONE + step = step + 1 + END SUBROUTINE + SUBROUTINE increase_cstep + IMPLICIT NONE + cstep = cstep + 1 + END SUBROUTINE + SUBROUTINE increase_time + IMPLICIT NONE + time = time + dt + END SUBROUTINE + SUBROUTINE set_basic_cp(cstep_cp,time_cp,jobnum_cp) + IMPLICIT NONE + REAL(dp), INTENT(IN) :: time_cp + INTEGER, INTENT(IN) :: cstep_cp, jobnum_cp + cstep = cstep_cp + time = time_cp + jobnum = jobnum_cp+1 + END SUBROUTINE + !================================================================================ + ! routine to speak in the terminal + SUBROUTINE speak(message) + USE parallel, ONLY: my_id + IMPLICIT NONE + CHARACTER(len=*), INTENT(in) :: message + IF(my_id .EQ. 0) write(*,*) message + END SUBROUTINE !================================================================================ SUBROUTINE find_input_file + USE parallel, ONLY: my_id IMPLICIT NONE CHARACTER(len=32) :: str, input_file - INTEGER :: nargs, fileid, l + INTEGER :: nargs, fileid, l, ierr LOGICAL :: mlexist nargs = COMMAND_ARGUMENT_COUNT() IF((nargs .EQ. 1) .OR. (nargs .EQ. 4)) THEN @@ -149,7 +164,7 @@ CONTAINS !================================================================================ SUBROUTINE daytim(str) ! Print date and time - + USE parallel, ONLY: my_id use prec_const IMPLICIT NONE @@ -160,13 +175,16 @@ CONTAINS CALL DATE_AND_TIME(d,t) dat=d(7:8) // '/' // d(5:6) // '/' // d(1:4) time=t(1:2) // ':' // t(3:4) // ':' // t(5:10) - WRITE(*,'(a,1x,a,1x,a)') str, dat(1:10), time(1:12) + IF (my_id .EQ. 0) & + WRITE(*,'(a,1x,a,1x,a)') str, dat(1:10), time(1:12) ! END SUBROUTINE daytim !================================================================================ SUBROUTINE display_h_min_s(time) - real :: time - integer :: days, hours, mins, secs + USE parallel, ONLY: my_id + IMPLICIT NONE + real(dp) :: time + integer :: days, hours, mins, secs days = FLOOR(time/24./3600.); hours= FLOOR(time/3600.); mins = FLOOR(time/60.); @@ -197,6 +215,22 @@ CONTAINS END SUBROUTINE display_h_min_s !================================================================================ + function str_dp(k) result( str ) + ! "Convert an integer to string." + REAL(dp), intent(in) :: k + character(len=20):: str + write (str, *) k + str = adjustl(str) + end function str_dp + + function str_int(k) result( str ) + ! "Convert an integer to string." + integer, intent(in) :: k + character(len=20) :: str + write (str, *) k + str = adjustl(str) + end function str_int + ! To allocate arrays of doubles, integers, etc. at run time SUBROUTINE allocate_array_dp1(a,is1,ie1) IMPLICIT NONE @@ -245,11 +279,19 @@ CONTAINS ALLOCATE(a(is1:ie1,is2:ie2,is3:ie3,is4:ie4,is5:ie5,is6:ie6)) a=0.0_dp END SUBROUTINE allocate_array_dp6 + + SUBROUTINE allocate_array_dp7(a,is1,ie1,is2,ie2,is3,ie3,is4,ie4,is5,ie5,is6,ie6,is7,ie7) + IMPLICIT NONE + REAL(dp), DIMENSION(:,:,:,:,:,:,:), ALLOCATABLE, INTENT(INOUT) :: a + INTEGER, INTENT(IN) :: is1,ie1,is2,ie2,is3,ie3,is4,ie4,is5,ie5,is6,ie6,is7,ie7 + ALLOCATE(a(is1:ie1,is2:ie2,is3:ie3,is4:ie4,is5:ie5,is6:ie6,is7:ie7)) + a=0.0_dp + END SUBROUTINE allocate_array_dp7 !======================================== SUBROUTINE allocate_array_dc1(a,is1,ie1) IMPLICIT NONE - DOUBLE COMPLEX, DIMENSION(:), ALLOCATABLE, INTENT(INOUT) :: a + COMPLEX(dp), DIMENSION(:), ALLOCATABLE, INTENT(INOUT) :: a INTEGER, INTENT(IN) :: is1,ie1 ALLOCATE(a(is1:ie1)) a=CMPLX(0.0_dp,0.0_dp) @@ -257,7 +299,7 @@ CONTAINS SUBROUTINE allocate_array_dc2(a,is1,ie1,is2,ie2) IMPLICIT NONE - DOUBLE COMPLEX, DIMENSION(:,:), ALLOCATABLE, INTENT(INOUT) :: a + COMPLEX(dp), DIMENSION(:,:), ALLOCATABLE, INTENT(INOUT) :: a INTEGER, INTENT(IN) :: is1,ie1,is2,ie2 ALLOCATE(a(is1:ie1,is2:ie2)) a=CMPLX(0.0_dp,0.0_dp) @@ -265,7 +307,7 @@ CONTAINS SUBROUTINE allocate_array_dc3(a,is1,ie1,is2,ie2,is3,ie3) IMPLICIT NONE - DOUBLE COMPLEX, DIMENSION(:,:,:), ALLOCATABLE, INTENT(INOUT) :: a + COMPLEX(dp), DIMENSION(:,:,:), ALLOCATABLE, INTENT(INOUT) :: a INTEGER, INTENT(IN) :: is1,ie1,is2,ie2,is3,ie3 ALLOCATE(a(is1:ie1,is2:ie2,is3:ie3)) a=CMPLX(0.0_dp,0.0_dp) @@ -273,7 +315,7 @@ CONTAINS SUBROUTINE allocate_array_dc4(a,is1,ie1,is2,ie2,is3,ie3,is4,ie4) IMPLICIT NONE - DOUBLE COMPLEX, DIMENSION(:,:,:,:), ALLOCATABLE, INTENT(INOUT) :: a + COMPLEX(dp), DIMENSION(:,:,:,:), ALLOCATABLE, INTENT(INOUT) :: a INTEGER, INTENT(IN) :: is1,ie1,is2,ie2,is3,ie3,is4,ie4 ALLOCATE(a(is1:ie1,is2:ie2,is3:ie3,is4:ie4)) a=CMPLX(0.0_dp,0.0_dp) @@ -281,7 +323,7 @@ CONTAINS SUBROUTINE allocate_array_dc5(a,is1,ie1,is2,ie2,is3,ie3,is4,ie4,is5,ie5) IMPLICIT NONE - DOUBLE COMPLEX, DIMENSION(:,:,:,:,:), ALLOCATABLE, INTENT(INOUT) :: a + COMPLEX(dp), DIMENSION(:,:,:,:,:), ALLOCATABLE, INTENT(INOUT) :: a INTEGER, INTENT(IN) :: is1,ie1,is2,ie2,is3,ie3,is4,ie4,is5,ie5 ALLOCATE(a(is1:ie1,is2:ie2,is3:ie3,is4:ie4,is5:ie5)) a=CMPLX(0.0_dp,0.0_dp) @@ -289,11 +331,19 @@ CONTAINS SUBROUTINE allocate_array_dc6(a,is1,ie1,is2,ie2,is3,ie3,is4,ie4,is5,ie5,is6,ie6) IMPLICIT NONE - DOUBLE COMPLEX, DIMENSION(:,:,:,:,:,:), ALLOCATABLE, INTENT(INOUT) :: a + COMPLEX(dp), DIMENSION(:,:,:,:,:,:), ALLOCATABLE, INTENT(INOUT) :: a INTEGER, INTENT(IN) :: is1,ie1,is2,ie2,is3,ie3,is4,ie4,is5,ie5,is6,ie6 ALLOCATE(a(is1:ie1,is2:ie2,is3:ie3,is4:ie4,is5:ie5,is6:ie6)) a=CMPLX(0.0_dp,0.0_dp) END SUBROUTINE allocate_array_dc6 + + SUBROUTINE allocate_array_dc7(a,is1,ie1,is2,ie2,is3,ie3,is4,ie4,is5,ie5,is6,ie6,is7,ie7) + IMPLICIT NONE + COMPLEX(dp), DIMENSION(:,:,:,:,:,:,:), ALLOCATABLE, INTENT(INOUT) :: a + INTEGER, INTENT(IN) :: is1,ie1,is2,ie2,is3,ie3,is4,ie4,is5,ie5,is6,ie6,is7,ie7 + ALLOCATE(a(is1:ie1,is2:ie2,is3:ie3,is4:ie4,is5:ie5,is6:ie6,is7:ie7)) + a=CMPLX(0.0_dp,0.0_dp) + END SUBROUTINE allocate_array_dc7 !======================================== SUBROUTINE allocate_array_i1(a,is1,ie1) diff --git a/src/calculus_mod.F90 b/src/calculus_mod.F90 index 72ff5ffa..a3ce639a 100644 --- a/src/calculus_mod.F90 +++ b/src/calculus_mod.F90 @@ -1,88 +1,98 @@ MODULE calculus ! Routine to evaluate gradients, interpolation schemes and integrals - USE basic - USE prec_const - USE grid - USE parallel, ONLY: manual_0D_bcast + USE prec_const, ONLY: dp IMPLICIT NONE REAL(dp), dimension(-2:2) :: dz_usu = & - (/ onetwelfth, -twothird, & - 0._dp, & - twothird, -onetwelfth /) ! fd4 centered stencil + (/ 1._dp/12._dp, -2._dp/3._dp, 0._dp, 2._dp/3._dp, -1._dp/12._dp /) ! fd4 centered stencil REAL(dp), dimension(-2:1) :: dz_o2e = & - (/ onetwentyfourth,-nineeighths, nineeighths,-onetwentyfourth /) ! fd4 odd to even stencil + (/ 1._dp/24._dp,-9._dp/8._dp, 9._dp/8._dp,-1._dp/24._dp /) ! fd4 odd to even stencil REAL(dp), dimension(-1:2) :: dz_e2o = & - (/ onetwentyfourth,-nineeighths, nineeighths,-onetwentyfourth /) ! fd4 odd to even stencil + (/ 1._dp/24._dp,-9._dp/8._dp, 9._dp/8._dp,-1._dp/24._dp /) ! fd4 odd to even stencil REAL(dp), dimension(-2:2) :: dz2_usu = & - (/-1.0_dp/12.0_dp, 4.0_dp/3.0_dp, -5.0_dp/2.0_dp, 4.0_dp/3.0_dp, -1.0_dp/12.0_dp /)! 2th derivative, 4th order (for parallel hypdiff) + (/-1._dp/12._dp, 4._dp/3._dp, -5._dp/2._dp, 4._dp/3._dp, -1._dp/12._dp /)! 2th derivative, 4th order (for parallel hypdiff) REAL(dp), dimension(-2:2) :: dz4_usu = & (/ 1._dp, -4._dp, 6._dp, -4._dp, 1._dp /) ! 4th derivative, 2nd order (for parallel hypdiff) + REAL(dp), dimension(-2:1) :: iz_o2e = & + (/ -1._dp/16._dp, 9._dp/16._dp, 9._dp/16._dp, -1._dp/16._dp /) ! grid interpolation, 4th order, odd to even + REAL(dp), dimension(-1:2) :: iz_e2o = & + (/ -1._dp/16._dp, 9._dp/16._dp, 9._dp/16._dp, -1._dp/16._dp /) ! grid interpolation, 4th order, even to odd PUBLIC :: simpson_rule_z, interp_z, grad_z, grad_z4 CONTAINS -SUBROUTINE grad_z(target,f,ddzf) +SUBROUTINE grad_z(target,local_Nz,Ngz,inv_deltaz,f,ddzf) implicit none ! Compute the periodic boundary condition 4 points centered finite differences ! formula among staggered grid or not. ! not staggered : the derivative results must be on the same grid as the field ! staggered : the derivative is computed from a grid to the other - INTEGER, INTENT(IN) :: target - COMPLEX(dp),dimension(izgs:izge), intent(in) :: f - COMPLEX(dp),dimension( izs:ize ), intent(out) :: ddzf - IF(Nz .GT. 3) THEN ! Cannot apply four points stencil on less than four points grid - IF(SG) THEN - IF(TARGET .EQ. 0) THEN - CALL grad_z_o2e(f,ddzf) - ELSE - CALL grad_z_e2o(f,ddzf) - ENDIF - ELSE ! No staggered grid -> usual centered finite differences - DO iz = izs,ize - ddzf(iz) = dz_usu(-2)*f(iz-2) + dz_usu(-1)*f(iz-1) & - +dz_usu( 1)*f(iz+1) + dz_usu( 2)*f(iz+2) + INTEGER, INTENT(IN) :: target, local_Nz, Ngz + REAL(dp), INTENT(IN) :: inv_deltaz + COMPLEX(dp),dimension(local_Nz+Ngz), INTENT(IN) :: f + COMPLEX(dp),dimension(local_Nz), INTENT(OUT) :: ddzf + INTEGER :: iz + IF(Ngz .GT. 3) THEN ! Cannot apply four points stencil on less than four points grid + SELECT CASE(TARGET) + CASE(1) + CALL grad_z_o2e(local_Nz,Ngz,inv_deltaz,f,ddzf) + CASE(2) + CALL grad_z_e2o(local_Nz,Ngz,inv_deltaz,f,ddzf) + CASE DEFAULT ! No staggered grid -> usual centered finite differences + DO iz = Ngz/2+1,Ngz/2+local_Nz + ddzf(iz) = dz_usu(-2)*f(iz ) + dz_usu(-1)*f(iz+1) & + +dz_usu( 0)*f(iz+2) & + +dz_usu( 1)*f(iz+3) + dz_usu( 2)*f(iz+4) ENDDO ddzf(:) = ddzf(:) * inv_deltaz - ENDIF + END SELECT ELSE ddzf = 0._dp ENDIF CONTAINS - SUBROUTINE grad_z_o2e(fo,ddzfe) ! Paruta 2018 eq (27) + SUBROUTINE grad_z_o2e(local_Nz,Ngz,inv_deltaz,fo,ddzfe) ! Paruta 2018 eq (27) ! gives the gradient of a field from the odd grid to the even one implicit none - COMPLEX(dp),dimension(izgs:izge), intent(in) :: fo - COMPLEX(dp),dimension( izs:ize ), intent(out) :: ddzfe ! - DO iz = izs,ize - ddzfe(iz) = dz_o2e(-2)*fo(iz-2) + dz_o2e(-1)*fo(iz-1) & - +dz_o2e( 0)*fo(iz ) + dz_o2e( 1)*fo(iz+1) + INTEGER, INTENT(IN) :: local_Nz, Ngz + REAL(dp), INTENT(IN) :: inv_deltaz + COMPLEX(dp),dimension(local_Nz+Ngz), INTENT(IN) :: fo + COMPLEX(dp),dimension(local_Nz), INTENT(OUT) :: ddzfe ! + INTEGER :: iz + DO iz = 1,local_Nz + ddzfe(iz) = dz_o2e(-2)*fo(iz ) + dz_o2e(-1)*fo(iz+1) & + +dz_o2e( 0)*fo(iz+2) + dz_o2e( 1)*fo(iz+3) ENDDO ddzfe(:) = ddzfe(:) * inv_deltaz END SUBROUTINE grad_z_o2e - SUBROUTINE grad_z_e2o(fe,ddzfo) ! n2v for Paruta 2018 eq (28) + SUBROUTINE grad_z_e2o(local_Nz,Ngz,inv_deltaz,fe,ddzfo) ! n2v for Paruta 2018 eq (28) ! gives the gradient of a field from the even grid to the odd one implicit none - COMPLEX(dp),dimension(izgs:izge), intent(in) :: fe - COMPLEX(dp),dimension( izs:ize ), intent(out) :: ddzfo - DO iz = izs,ize - ddzfo(iz) = dz_e2o(-1)*fe(iz-1) + dz_e2o(0)*fe(iz ) & - +dz_e2o( 1)*fe(iz+1) + dz_e2o(2)*fe(iz+2) + INTEGER, INTENT(IN) :: local_Nz, Ngz + REAL(dp), INTENT(IN) :: inv_deltaz + COMPLEX(dp),dimension(local_Nz+Ngz), INTENT(IN) :: fe + COMPLEX(dp),dimension(local_Nz), INTENT(OUT) :: ddzfo + INTEGER :: iz + DO iz = 1,local_Nz + ddzfo(iz) = dz_e2o(-1)*fe(iz+1) + dz_e2o(0)*fe(iz+2) & + +dz_e2o( 1)*fe(iz+3) + dz_e2o(2)*fe(iz+4) ENDDO ddzfo(:) = ddzfo(:) * inv_deltaz END SUBROUTINE grad_z_e2o END SUBROUTINE grad_z -SUBROUTINE grad_z2(f,ddz2f) - implicit none +SUBROUTINE grad_z2(local_Nz,Ngz,inv_deltaz,f,ddz2f) ! Compute the second order fourth derivative for periodic boundary condition - COMPLEX(dp),dimension(izgs:izge), intent(in) :: f - COMPLEX(dp),dimension( izs:ize ), intent(out) :: ddz2f - IF(Nz .GT. 3) THEN ! Cannot apply four points stencil on less than four points grid - DO iz = izs,ize - ddz2f(iz) = dz2_usu(-2)*f(iz-2) + dz2_usu(-1)*f(iz-1) & - +dz2_usu( 0)*f(iz )& - +dz2_usu( 1)*f(iz+1) + dz2_usu( 2)*f(iz+2) + implicit none + INTEGER, INTENT(IN) :: local_Nz, Ngz + REAL(dp), INTENT(IN) :: inv_deltaz + COMPLEX(dp),dimension(local_Nz+Ngz), INTENT(IN) :: f + COMPLEX(dp),dimension(local_Nz), INTENT(OUT) :: ddz2f + INTEGER :: iz + IF(Ngz .GT. 3) THEN ! Cannot apply four points stencil on less than four points grid + DO iz = 1,local_Nz + ddz2f(iz) = dz2_usu(-2)*f(iz ) + dz2_usu(-1)*f(iz+1) & + +dz2_usu( 0)*f(iz+2)& + +dz2_usu( 1)*f(iz+3) + dz2_usu( 2)*f(iz+4) ENDDO ELSE ddz2f = 0._dp @@ -91,16 +101,19 @@ SUBROUTINE grad_z2(f,ddz2f) END SUBROUTINE grad_z2 -SUBROUTINE grad_z4(f,ddz4f) - implicit none +SUBROUTINE grad_z4(local_Nz,Ngz,inv_deltaz,f,ddz4f) ! Compute the second order fourth derivative for periodic boundary condition - COMPLEX(dp),dimension(izgs:izge), intent(in) :: f - COMPLEX(dp),dimension( izs:ize ), intent(out) :: ddz4f - IF(Nz .GT. 3) THEN ! Cannot apply four points stencil on less than four points grid - DO iz = izs,ize - ddz4f(iz) = dz4_usu(-2)*f(iz-2) + dz4_usu(-1)*f(iz-1) & - +dz4_usu( 0)*f(iz)& - +dz4_usu( 1)*f(iz+1) + dz4_usu( 2)*f(iz+2) + implicit none + INTEGER, INTENT(IN) :: local_Nz, Ngz + REAL(dp), INTENT(IN) :: inv_deltaz + COMPLEX(dp),dimension(local_Nz+Ngz), INTENT(IN) :: f + COMPLEX(dp),dimension(local_Nz), INTENT(OUT) :: ddz4f + INTEGER :: iz + IF(Ngz .GT. 3) THEN ! Cannot apply four points stencil on less than four points grid + DO iz = 1,local_Nz + ddz4f(iz) = dz4_usu(-2)*f(iz ) + dz4_usu(-1)*f(iz+1) & + +dz4_usu( 0)*f(iz+2)& + +dz4_usu( 1)*f(iz+3) + dz4_usu( 2)*f(iz+4) ENDDO ELSE ddz4f = 0._dp @@ -109,91 +122,97 @@ SUBROUTINE grad_z4(f,ddz4f) END SUBROUTINE grad_z4 -SUBROUTINE interp_z(target,f_in,f_out) +SUBROUTINE interp_z(target,local_Nz,Ngz,f_in,f_out) ! Function meant to interpolate one field defined on a even/odd z into ! the other odd/even z grid. ! If Staggered Grid flag (SG) is false, returns identity implicit none + INTEGER, INTENT(IN) :: local_Nz, Ngz INTEGER, intent(in) :: target ! target grid : 0 for even grid, 1 for odd - COMPLEX(dp),dimension(izgs:izge), intent(in) :: f_in - COMPLEX(dp),dimension( izs:ize ), intent(out) :: f_out ! - IF(SG) THEN - IF(target .EQ. 0) THEN - CALL interp_o2e_z(f_in,f_out) - ELSE - CALL interp_e2o_z(f_in,f_out) - ENDIF - ELSE ! No staggered grid -> identity - f_out(izs:ize) = f_in(izs:ize) - ENDIF + COMPLEX(dp),dimension(local_Nz+Ngz), INTENT(IN) :: f_in + COMPLEX(dp),dimension(local_Nz), INTENT(OUT) :: f_out + SELECT CASE(TARGET) + CASE(1) ! output on even grid + CALL interp_o2e_z(local_Nz,Ngz,f_in,f_out) + CASE(2) ! output on odd grid + CALL interp_e2o_z(local_Nz,Ngz,f_in,f_out) + CASE DEFAULT ! No staggered grid -> usual centered finite differences + f_out = f_in + END SELECT CONTAINS - SUBROUTINE interp_o2e_z(fo,fe) + SUBROUTINE interp_o2e_z(local_Nz, Ngz,fo,fe) ! gives the value of a field from the odd grid to the even one implicit none - COMPLEX(dp),dimension(izgs:izge), intent(in) :: fo - COMPLEX(dp),dimension( izs:ize ), intent(out) :: fe ! + INTEGER, INTENT(IN) :: local_Nz, Ngz + COMPLEX(dp),dimension(local_Nz+Ngz), INTENT(IN) :: fo + COMPLEX(dp),dimension(local_Nz), INTENT(OUT) :: fe + INTEGER :: iz ! 4th order interp - DO iz = izs,ize - fe(iz) = onesixteenth * (-fo(iz-2) + 9._dp*(fo(iz-1) + fo(iz)) - fo(iz+1)) + DO iz = 1,local_Nz + fe(iz) = iz_o2e(-2)*fo(iz ) + iz_o2e(-1)*fo(iz+1) & + + iz_o2e( 0)*fo(iz+2) + iz_o2e( 1)*fo(iz+3) ENDDO END SUBROUTINE interp_o2e_z - SUBROUTINE interp_e2o_z(fe,fo) + SUBROUTINE interp_e2o_z(local_Nz, Ngz,fe,fo) ! gives the value of a field from the even grid to the odd one implicit none - COMPLEX(dp),dimension(izgs:izge), intent(in) :: fe - COMPLEX(dp),dimension( izs:ize ), intent(out) :: fo + INTEGER, INTENT(IN) :: local_Nz, Ngz + COMPLEX(dp),dimension(local_Nz+Ngz), INTENT(IN) :: fe + COMPLEX(dp),dimension(local_Nz), INTENT(OUT) :: fo + INTEGER :: iz ! 4th order interp - DO iz = izs,ize - fo(iz) = onesixteenth * (-fe(iz-1) + 9._dp*(fe(iz) + fe(iz+1)) - fe(iz+2)) + DO iz = 1,local_Nz + fo(iz) = iz_e2o(-1)*fe(iz+1) + iz_e2o( 0)*fe(iz+2) & + + iz_e2o( 1)*fe(iz+3) + iz_e2o( 2)*fe(iz+4) ENDDO END SUBROUTINE interp_e2o_z END SUBROUTINE interp_z -SUBROUTINE simpson_rule_z(f,intf) +SUBROUTINE simpson_rule_z(local_Nz,dz,f,intf) ! integrate f(z) over z using the simpon's rule. Assume periodic boundary conditions (f(ize+1) = f(izs)) !from molix BJ Frei + USE prec_const, ONLY: dp, onethird + USE parallel, ONLY: num_procs_z, rank_z, comm_z, manual_0D_bcast + USE mpi implicit none - complex(dp),dimension(izs:ize), intent(in) :: f + INTEGER, INTENT(IN) :: local_Nz + REAL(dp),INTENT(IN) :: dz + complex(dp),dimension(local_Nz), intent(in) :: f COMPLEX(dp), intent(out) :: intf COMPLEX(dp) :: buffer, local_int - INTEGER :: root, i_ + INTEGER :: root, i_, iz, ierr - IF(Nz .EQ. 1) THEN !2D zpinch simulations - intf = f(izs) - - ELSE !3D fluxtube - IF(mod(Nz,2) .ne. 0 ) THEN - ERROR STOP '>> ERROR << Simpson rule: Nz must be an even number !!!!' - ENDIF - ! Buil local sum using the weights of composite Simpson's rule - local_int = 0._dp - DO iz = izs,ize - local_int = local_int + zweights_SR(iz)*f(iz) - ENDDO - buffer = local_int - root = 0 - !Gather manually among the rank_z=0 processes and perform the sum - intf = 0._dp - IF (num_procs_z .GT. 1) THEN - !! Everyone sends its local_sum to root = 0 - IF (rank_z .NE. root) THEN - CALL MPI_SEND(buffer, 1 , MPI_DOUBLE_COMPLEX, root, 5678, comm_z, ierr) - ELSE - ! Recieve from all the other processes - DO i_ = 0,num_procs_z-1 - IF (i_ .NE. rank_z) & - CALL MPI_RECV(buffer, 1 , MPI_DOUBLE_COMPLEX, i_, 5678, comm_z, MPI_STATUS_IGNORE, ierr) - intf = intf + buffer - ENDDO - ENDIF - CALL manual_0D_bcast(intf) - ELSE - intf = local_int - ENDIF - intf = onethird*deltaz*intf + ! Buil local sum using the weights of composite Simpson's rule + local_int = 0._dp + DO iz = 1,local_Nz + IF(MODULO(iz,2) .EQ. 1) THEN ! odd iz + local_int = local_int + 2._dp*onethird*dz*f(iz) + ELSE ! even iz + local_int = local_int + 4._dp*onethird*dz*f(iz) ENDIF + ENDDO + buffer = local_int + root = 0 + !Gather manually among the rank_z=0 processes and perform the sum + intf = 0._dp + IF (num_procs_z .GT. 1) THEN + !! Everyone sends its local_sum to root = 0 + IF (rank_z .NE. root) THEN + CALL MPI_SEND(buffer, 1 , MPI_DOUBLE_COMPLEX, root, 5678, comm_z, ierr) + ELSE + ! Recieve from all the other processes + DO i_ = 0,num_procs_z-1 + IF (i_ .NE. rank_z) & + CALL MPI_RECV(buffer, 1 , MPI_DOUBLE_COMPLEX, i_, 5678, comm_z, MPI_STATUS_IGNORE, ierr) + intf = intf + buffer + ENDDO + ENDIF + CALL manual_0D_bcast(intf) + ELSE + intf = local_int + ENDIF END SUBROUTINE simpson_rule_z diff --git a/src/closure_mod.F90 b/src/closure_mod.F90 index c7d7bb20..c44a5436 100644 --- a/src/closure_mod.F90 +++ b/src/closure_mod.F90 @@ -1,11 +1,5 @@ module closure ! Contains the routines to define closures -USE basic -USE model, ONLY: CLOS, tau_e, tau_i, q_e, q_i, nu, KIN_E -USE grid -USE array, ONLY: kernel_e, kernel_i -USE fields, ONLY: moments_e, moments_i -USE time_integration, ONLY: updatetlevel IMPLICIT NONE PUBLIC :: apply_closure_model @@ -14,132 +8,141 @@ CONTAINS ! Positive Oob indices are approximated with a model SUBROUTINE apply_closure_model + USE basic, ONLY: t0_clos, t1_clos, tc_clos + USE prec_const, ONLY: dp + USE model, ONLY: CLOS + USE grid, ONLY: local_na, local_nky, local_nkx, local_nz,ngz,& + local_nj,ngj, jarray,& + local_np,ngp, parray, dmax + USE fields, ONLY: moments + USE time_integration, ONLY: updatetlevel IMPLICIT NONE - + INTEGER :: iz,ikx,iky,ij,ip,ia CALL cpu_time(t0_clos) IF (CLOS .EQ. 0) THEN ! zero truncation, An+1=0 for n+1>nmax only CALL ghosts_upper_truncation - - ELSEIF (CLOS .EQ. 1) THEN - ! zero truncation, An+1=0 for n+1>nmax only - CALL ghosts_upper_truncation - ! Additional truncation at highest fully represented kinetic moment + ! truncation at highest fully represented kinetic moment ! e.g. Dmax = 3 means ! only Napj s.t. p+2j <= 3 are evolved ! -> (p,j) allowed are (0,0),(1,0),(0,1),(2,0),(1,1),(3,0) ! =>> Dmax = min(Pmax,2*Jmax+1) - IF(KIN_E) THEN - DO ip = ipgs_e,ipge_e - DO ij = ijgs_e,ijge_e - IF ( parray_e(ip)+2*jarray_e(ij) .GT. dmaxe) & - moments_e(ip,ij,ikys:ikye,ikxs:ikxe,izgs:izge,updatetlevel) = 0._dp - ENDDO - ENDDO - ENDIF - DO ip = ipgs_i,ipge_i - DO ij = ijgs_i,ijge_i - IF ( parray_i(ip)+2*jarray_i(ij) .GT. dmaxi) & - moments_i(ip,ij,ikys:ikye,ikxs:ikxe,izgs:izge,updatetlevel) = 0._dp - ENDDO - ENDDO - ! + ghosts truncation - CALL ghosts_upper_truncation + z: DO iz = 1,local_nz+ngz + kx:DO ikx= 1,local_nkx + ky:DO iky= 1,local_nky + j: DO ij = 1,local_nj+ngj + p: DO ip = 1,local_np+ngp + IF ( parray(ip)+2*jarray(ij) .GT. dmax) THEN + a:DO ia = 1,local_na + moments(ia,ip,ij,iky,ikx,iz,updatetlevel) = 0._dp + ENDDO a + ENDIF + ENDDO p + ENDDO j + ENDDO ky + ENDDO kx + ENDDO z ELSE ERROR STOP '>> ERROR << Closure scheme not found ' - ENDIF - CALL ghosts_lower_truncation - CALL cpu_time(t1_clos) tc_clos = tc_clos + (t1_clos - t0_clos) END SUBROUTINE apply_closure_model ! Positive Oob indices are approximated with a model SUBROUTINE ghosts_upper_truncation + USE prec_const, ONLY: dp + USE grid, ONLY: local_na, local_np,ngp,Pmax,& + local_nj,ngj,Jmax,& + local_nky,local_nkx,& + local_nz,ngz,& + local_pmax, local_jmax + USE fields, ONLY: moments + USE time_integration, ONLY: updatetlevel IMPLICIT NONE - -! zero truncation, An+1=0 for n+1>nmax - ! Electrons - IF(KIN_E) THEN - ! applies only for the process that has largest j index - IF(ije_e .EQ. Jmaxe+1) THEN - DO ip = ipgs_e,ipge_e - moments_e(ip,ije_e+1,ikys:ikye,ikxs:ikxe,izgs:izge,updatetlevel) = 0._dp + INTEGER :: iz,ikx,iky,ip,ij,ia,ig + ! zero truncation, An+1=0 for n+1>nmax + ! applies only for the processes that evolve the highest moment degree + IF(local_jmax .GE. Jmax) THEN + DO iz = 1,local_nz+ngz + DO ikx= 1,local_nkx + DO iky= 1,local_nky + DO ig = 1,ngj/2 + DO ip = 1,local_np+ngp + DO ia = 1,local_na + moments(ia,ip,local_nj+ngj/2+ig,iky,ikx,iz,updatetlevel) = 0._dp + ENDDO + ENDDO + ENDDO + ENDDO + ENDDO ENDDO ENDIF ! applies only for the process that has largest p index - IF(ipe_e .EQ. Pmaxe+1) THEN - DO ij = ijgs_e,ijge_e - moments_e(ipe_e+1,ij,ikys:ikye,ikxs:ikxe,izgs:izge,updatetlevel) = 0._dp - IF(deltape .EQ. 1) THEN ! Must truncate the second stencil - moments_e(ipe_e+2,ij,ikys:ikye,ikxs:ikxe,izgs:izge,updatetlevel) = 0._dp - ENDIF + IF(local_pmax .GE. Pmax) THEN + DO iz = 1,local_nz+ngz + DO ikx = 1,local_nkx + DO iky = 1,local_nky + DO ij = 1,local_nj+ngj + DO ia = 1,local_na + DO ig = 1,ngp/2 + moments(ia,local_np+ngp/2+ig,ij,iky,ikx,iz,updatetlevel) = 0._dp + ENDDO + ENDDO + ENDDO + ENDDO + ENDDO ENDDO ENDIF - ENDIF - - ! Ions - ! applies only for the process that has largest j index - IF(ije_i .EQ. Jmaxi+1) THEN - DO ip = ipgs_i,ipge_i - moments_i(ip,ije_i+1,ikys:ikye,ikxs:ikxe,izgs:izge,updatetlevel) = 0._dp - ENDDO - ENDIF - ! applies only for the process that has largest p index - IF(ipe_i .EQ. Pmaxi+1) THEN - DO ij = ijgs_i,ijge_i - moments_i(ipe_i+1,ij,ikys:ikye,ikxs:ikxe,izgs:izge,updatetlevel) = 0._dp - IF(deltape .EQ. 1) THEN ! Must truncate the second stencil - moments_i(ipe_i+2,ij,ikys:ikye,ikxs:ikxe,izgs:izge,updatetlevel) = 0._dp - ENDIF - ENDDO - ENDIF - END SUBROUTINE ghosts_upper_truncation ! Negative OoB indices are 0 SUBROUTINE ghosts_lower_truncation + USE prec_const, ONLY: dp + USE grid, ONLY: local_na,local_np,ngp,& + local_nj,ngj,& + local_nky,local_nkx,& + local_nz,ngz,& + local_pmin, local_jmin + USE fields, ONLY: moments + USE time_integration, ONLY: updatetlevel IMPLICIT NONE - + INTEGER :: iz,ikx,iky,ip,ia,ij,ig ! zero truncation, An=0 for n<0 - ! Electrons - IF(KIN_E) THEN - ! applies only for the process that has lowest j index - IF(ijs_e .EQ. 1) THEN - DO ip = ipgs_e,ipge_e - moments_e(ip,ijs_e-1,ikys:ikye,ikxs:ikxe,izgs:izge,updatetlevel) = 0._dp + IF(local_jmin .EQ. 0) THEN + DO iz = 1,local_nz+ngz + DO ikx = 1,local_nkx + DO iky = 1,local_nky + DO ig = 1,ngj/2 + DO ip = 1,local_np+ngp + DO ia = 1,local_na + ! set to zero the first ghosts cells + moments(ia,ip,ig,iky,ikx,iz,updatetlevel) = 0._dp + ENDDO + ENDDO + ENDDO + ENDDO + ENDDO ENDDO ENDIF ! applies only for the process that has lowest p index - IF(ips_e .EQ. 1) THEN - DO ij = ijgs_e,ijge_e - moments_e(ips_e-1,ij,ikys:ikye,ikxs:ikxe,izgs:izge,updatetlevel) = 0._dp - IF(deltape .EQ. 1) THEN ! Must truncate the second stencil - moments_e(ips_e-2,ij,ikys:ikye,ikxs:ikxe,izgs:izge,updatetlevel) = 0._dp - ENDIF + IF(local_pmin .EQ. 0) THEN + DO iz = 1,local_nz+ngz + DO ikx = 1,local_nkx + DO iky = 1,local_nky + DO ij = 1,local_nj+ngj + DO ig = 1,ngp/2 + DO ia = 1,local_na + moments(ia,ig,ij,iky,ikx,iz,updatetlevel) = 0._dp + ENDDO + ENDDO + ENDDO + ENDDO + ENDDO ENDDO ENDIF - ENDIF - - ! Ions - IF(ijs_i .EQ. 1) THEN - ! applies only for the process that has lowest j index - DO ip = ipgs_i,ipge_i - moments_i(ip,ijs_i-1,ikys:ikye,ikxs:ikxe,izgs:izge,updatetlevel) = 0._dp - ENDDO - ENDIF - ! applies only for the process that has lowest p index - IF(ips_i .EQ. 1) THEN - DO ij = ijgs_i,ijge_i - moments_i(ips_i-1,ij,ikys:ikye,ikxs:ikxe,izgs:izge,updatetlevel) = 0._dp - IF(deltape .EQ. 1) THEN ! Must truncate the second stencil - moments_i(ips_i-2,ij,ikys:ikye,ikxs:ikxe,izgs:izge,updatetlevel) = 0._dp - ENDIF - ENDDO - ENDIF END SUBROUTINE ghosts_lower_truncation diff --git a/src/collision_mod.F90 b/src/collision_mod.F90 index c2286157..702defea 100644 --- a/src/collision_mod.F90 +++ b/src/collision_mod.F90 @@ -1,81 +1,92 @@ module collision -! contains the Hermite-Laguerre collision operators. Solved using COSOlver. -USE array -USE basic -USE fields -USE futils -USE grid -USE model -USE prec_const -USE time_integration -USE utility +! contains the Hermite-Laguerre collision operators without the use of COSOLVER +USE prec_const, ONLY : dp IMPLICIT NONE PRIVATE -! Set the collision model to use -! (Lenard-Bernstein: 'LB', Dougherty: 'DG', Sugama: 'SG', Lorentz: 'LR', Landau: 'LD') -CHARACTER(len=16),PUBLIC,PROTECTED :: collision_model -LOGICAL, PUBLIC, PROTECTED :: gyrokin_CO =.true. ! activates GK effects on CO -LOGICAL, PUBLIC, PROTECTED :: interspecies =.true. ! activates interpecies collision -CHARACTER(len=128), PUBLIC :: mat_file ! COSOlver matrix file names -REAL(dp), PUBLIC, PROTECTED :: collision_kcut = 100.0 -LOGICAL, PUBLIC, PROTECTED :: cosolver_coll ! check if cosolver matrices are used - +CHARACTER(len=32), PUBLIC, PROTECTED :: collision_model ! (Lenard-Bernstein: 'LB', Dougherty: 'DG', Sugama: 'SG', Lorentz: 'LR', Landau: 'LD') +LOGICAL, PUBLIC, PROTECTED :: GK_CO =.true. ! activates GK effects on CO +LOGICAL, PUBLIC, PROTECTED :: INTERSPECIES =.true. ! activates interpecies collision +CHARACTER(len=128), PUBLIC, PROTECTED :: mat_file ! COSOlver matrix file names +REAL(dp), PUBLIC, PROTECTED :: collision_kcut = 100.0 +LOGICAL, PUBLIC, PROTECTED :: cosolver_coll ! check if cosolver matrices are used + +PUBLIC :: init_collision PUBLIC :: collision_readinputs, coll_outputinputs -PUBLIC :: compute_TColl +PUBLIC :: compute_Capj PUBLIC :: compute_lenard_bernstein, compute_dougherty -PUBLIC :: load_COSOlver_mat, compute_cosolver_coll -PUBLIC :: apply_COSOlver_mat_e, apply_COSOlver_mat_i CONTAINS + SUBROUTINE init_collision + USE cosolver_interface, ONLY: load_COSOlver_mat + IMPLICIT NONE + ! Load the COSOlver collision operator coefficients + IF(cosolver_coll) & + CALL load_COSOlver_mat(GK_CO,INTERSPECIES,mat_file,collision_kcut) + END SUBROUTINE init_collision SUBROUTINE collision_readinputs ! Read the input parameters + USE basic, ONLY: lu_in IMPLICIT NONE - NAMELIST /COLLISION_PAR/ collision_model, gyrokin_CO, interspecies, mat_file, collision_kcut + NAMELIST /COLLISION_PAR/ collision_model, GK_CO, INTERSPECIES, mat_file, collision_kcut READ(lu_in,collision_par) SELECT CASE(collision_model) - CASE ('LB') ! Lenhard bernstein - cosolver_coll = .false. - interspecies = .false. - CASE ('DG') ! Dougherty - cosolver_coll = .false. - interspecies = .false. - CASE ('SG') ! Sugama - cosolver_coll = .true. - CASE ('LR') ! Lorentz (Pitch angle) - cosolver_coll = .true. - interspecies = .false. - CASE ('LD') ! Landau - cosolver_coll = .true. + ! Lenhard bernstein + CASE ('LB','lenhard-bernstein','Lenhard-Bernstein') + collision_model = 'LB' + cosolver_coll = .false. + INTERSPECIES = .false. + ! Dougherty + CASE ('DG','dougherty','Dougherty') + collision_model = 'DG' + cosolver_coll = .false. + INTERSPECIES = .false. + ! Sugama + CASE ('SG','sugama','Sugama') + collision_model = 'SG' + cosolver_coll = .true. + ! Lorentz (Pitch angle) + CASE ('LR','lorentz','Lorentz','PA','pitch-angle') + collision_model = 'LR' + cosolver_coll = .true. + INTERSPECIES = .false. + ! Landau named also Coulomb or Fokker-Planck + CASE ('LD','landau','Landau','CL','coulomb','Coulomb','FP','fokker-planck','Fokker-Planck') + collision_model = 'LD' + cosolver_coll = .true. CASE ('none','hypcoll','dvpar4') - cosolver_coll = .false. - interspecies = .false. + collision_model = 'NO' + cosolver_coll = .false. + INTERSPECIES = .false. CASE DEFAULT ERROR STOP '>> ERROR << collision model not recognized!!' END SELECT END SUBROUTINE collision_readinputs - SUBROUTINE coll_outputinputs(fidres, str) + SUBROUTINE coll_outputinputs(fidres) ! Write the input parameters to the results_xx.h5 file + USE futils, ONLY: attach, creatd IMPLICIT NONE INTEGER, INTENT(in) :: fidres - CHARACTER(len=256), INTENT(in) :: str - CHARACTER(len=2) :: gkco = 'dk' - CHARACTER(len=2) :: abco = 'aa' - CHARACTER(len=6) :: coname - - IF (gyrokin_CO) gkco = 'GK' - IF (interspecies) abco = 'ab' + CHARACTER(len=256) :: str + CHARACTER(len=2) :: gkco = 'DK' + CHARACTER(len=2) :: abco = 'aa' + CHARACTER(len=6) :: coname + IF (GK_CO) gkco = 'GK' + IF (INTERSPECIES) abco = 'ab' WRITE(coname,'(a2,a2,a2)') collision_model,gkco,abco - - CALL attach(fidres, TRIM(str), "CO", coname) - CALL attach(fidres, TRIM(str), "matfilename", mat_file) + WRITE(str,'(a)') '/data/input/coll' + CALL creatd(fidres, 0,(/0/),TRIM(str),'Collision Input') + CALL attach(fidres, TRIM(str), "CO", coname) + CALL attach(fidres, TRIM(str), "matfilename",mat_file) END SUBROUTINE coll_outputinputs - SUBROUTINE compute_TColl - USE basic - USE model, ONLY : nu + SUBROUTINE compute_Capj + USE basic, ONLY: tc_coll, t0_coll, t1_coll + USE array, ONLY: Capj + USE model, ONLY: nu + USE cosolver_interface, ONLY: compute_cosolver_coll IMPLICIT NONE ! Execution time start CALL cpu_time(t0_coll) @@ -87,716 +98,214 @@ CONTAINS CASE ('DG') CALL compute_dougherty CASE ('SG','LR','LD') - CALL compute_cosolver_coll + CALL compute_cosolver_coll(GK_CO,INTERSPECIES) CASE ('none','hypcoll','dvpar4') - IF(KIN_E) & - TColl_e = 0._dp - TColl_i = 0._dp + Capj = 0._dp CASE DEFAULT ERROR STOP '>> ERROR << collision operator not recognized!!' END SELECT ELSE - IF(KIN_E) & - TColl_e = 0._dp - TColl_i = 0._dp + Capj = 0._dp ENDIF ! Execution time end CALL cpu_time(t1_coll) tc_coll = tc_coll + (t1_coll - t0_coll) - END SUBROUTINE compute_TColl + END SUBROUTINE compute_Capj !******************************************************************************! !! Lenard Bernstein collision operator !******************************************************************************! SUBROUTINE compute_lenard_bernstein + USE grid, ONLY: ias,iae, ips,ipe, ijs,ije, parray, jarray, & + ikys,ikye, ikxs,ikxe, izs,ize, kparray + USE species, ONLY: sigma2_tau_o2, nu_ab + USE time_integration, ONLY: updatetlevel + USE array, ONLY: Capj + USE fields, ONLY: moments IMPLICIT NONE COMPLEX(dp) :: TColl_ - IF (KIN_E) THEN - DO ip = ips_e,ipe_e;DO ij = ijs_e,ije_e - DO ikx = ikxs, ikxe;DO iky = ikys, ikye; DO iz = izs,ize - CALL LenardBernstein_e(ip,ij,iky,ikx,iz,TColl_) - TColl_e(ip,ij,iky,ikx,iz) = TColl_ - ENDDO;ENDDO;ENDDO - ENDDO;ENDDO - ENDIF - DO ip = ips_i,ipe_i;DO ij = ijs_i,ije_i - DO ikx = ikxs, ikxe;DO iky = ikys, ikye; DO iz = izs,ize - CALL LenardBernstein_i(ip,ij,iky,ikx,iz,TColl_) - TColl_i(ip,ij,iky,ikx,iz) = TColl_ - ENDDO;ENDDO;ENDDO - ENDDO;ENDDO - END SUBROUTINE compute_lenard_bernstein - - !******************************************************************************! - !! for electrons - !******************************************************************************! - SUBROUTINE LenardBernstein_e(ip_,ij_,iky_,ikx_,iz_,TColl_) - IMPLICIT NONE - INTEGER, INTENT(IN) :: ip_,ij_,iky_,ikx_,iz_ - COMPLEX(dp), INTENT(OUT) :: TColl_ - - REAL(dp) :: j_dp, p_dp, be_2, kp - INTEGER :: eo_ - - !** Auxiliary variables ** - eo_ = MODULO(parray_e(ip_),2) - p_dp = REAL(parray_e(ip_),dp) - j_dp = REAL(jarray_e(ij_),dp) - kp = kparray(iky_,ikx_,iz_,eo_) - be_2 = kp**2 * sigmae2_taue_o2 ! this is (be/2)^2 - eo_ = MODULO(parray_e(ip_),2) - - !** Assembling collison operator ** - ! -nuee (p + 2j) Nepj - TColl_ = -nu_ee * (p_dp + 2._dp*j_dp)*moments_e(ip_,ij_,iky_,ikx_,iz_,updatetlevel) - IF(gyrokin_CO) THEN - TColl_ = TColl_ - nu_ee *2._dp*be_2*moments_e(ip_,ij_,iky_,ikx_,iz_,updatetlevel) - ENDIF - END SUBROUTINE LenardBernstein_e - - !******************************************************************************! - !! for ions - !******************************************************************************! - SUBROUTINE LenardBernstein_i(ip_,ij_,iky_,ikx_,iz_,TColl_) - USE fields, ONLY: moments_i - USE grid, ONLY: parray_i, jarray_i - USE basic - USE model, ONLY: sigmai2_taui_o2, nu_i - USE time_integration, ONLY : updatetlevel - IMPLICIT NONE - INTEGER, INTENT(IN) :: ip_,ij_,iky_,ikx_,iz_ - COMPLEX(dp), INTENT(OUT) :: TColl_ - - REAL(dp) :: j_dp, p_dp, kp, bi_2 - INTEGER :: eo_ - + REAL(dp) :: j_dp, p_dp, ba_2, kp + INTEGER :: iz,ikx,iky,ij,ip,ia,eo + DO iz = izs,ize + DO ikx = ikxs, ikxe; + DO iky = ikys, ikye; + DO ij = ijs,ije + DO ip = ips,ipe; + DO ia = ias, iae !** Auxiliary variables ** - eo_ = MODULO(parray_i(ip_),2) - p_dp = REAL(parray_i(ip_),dp) - j_dp = REAL(jarray_i(ij_),dp) - kp = kparray(iky_,ikx_,iz_,eo_) - bi_2 = kp**2 * sigmai2_taui_o2 ! this is (be/2)^2 + eo = MODULO(parray(ip),2) + p_dp = REAL(parray(ip),dp) + j_dp = REAL(jarray(ij),dp) + kp = kparray(iky,ikx,iz,eo) + ba_2 = kp**2 * sigma2_tau_o2(ia) ! this is (ba/2)^2 !** Assembling collison operator ** - ! -nuii (p + 2j) Nipj - TColl_ = -nu_i * (p_dp + 2._dp*j_dp)*moments_i(ip_,ij_,iky_,ikx_,iz_,updatetlevel) - IF(gyrokin_CO) THEN - TColl_ = TColl_ - nu_i *2._dp*bi_2*moments_i(ip_,ij_,iky_,ikx_,iz_,updatetlevel) + ! -nuee (p + 2j) Nepj + TColl_ = -nu_ab(ia,ia) * (p_dp + 2._dp*j_dp)*moments(ia,ip,ij,iky,ikx,iz,updatetlevel) + IF(GK_CO) THEN + TColl_ = TColl_ - nu_ab(ia,ia) *2._dp*ba_2*moments(ia,ip,ij,iky,ikx,iz,updatetlevel) ENDIF - END SUBROUTINE LenardBernstein_i + Capj(ia,ip,ij,iky,ikx,iz) = TColl_ + ENDDO + ENDDO + ENDDO + ENDDO + ENDDO + ENDDO + END SUBROUTINE compute_lenard_bernstein !******************************************************************************! !! Doughtery collision operator !******************************************************************************! SUBROUTINE compute_dougherty IMPLICIT NONE - IF(gyrokin_CO) THEN - if(KIN_E) & - CALL DoughertyGK_aa(ips_e,ipe_e,ijs_e,ije_e,ijgs_e,ijge_e,ip0_e,ip1_e,ip2_e,Jmaxe,& - parray_e(ips_e:ipe_e),jarray_e(ijs_e:ije_e),& - kernel_e(ijgs_e:ijge_e,ikys:ikye,ikxs:ikxe,izs:ize,0:1),& - nadiab_moments_e(ips_e:ipe_e,ijs_e:ije_e,ikys:ikye,ikxs:ikxe,izs:ize),& - nu_ee,sigmae2_taue_o2,sqrt_sigmae2_taue_o2,TColl_e) - CALL DoughertyGK_aa(ips_i,ipe_i,ijs_i,ije_i,ijgs_i,ijge_i,ip0_i,ip1_i,ip2_i,Jmaxi,& - parray_i(ips_i:ipe_i),jarray_i(ijs_i:ije_i),& - kernel_i(ijgs_i:ijge_i,ikys:ikye,ikxs:ikxe,izs:ize,0:1),& - nadiab_moments_i(ips_i:ipe_i,ijs_i:ije_i,ikys:ikye,ikxs:ikxe,izs:ize),& - nu_i,sigmai2_taui_o2,sqrt_sigmai2_taui_o2,TColl_i) + IF(GK_CO) THEN + CALL Dougherty_GK ELSE - if(KIN_E) & - CALL DoughertyDK_aa(ips_e,ipe_e,ijs_e,ije_e,ip0_e,ip1_e,ip2_e,& - parray_e(ips_e:ipe_e),jarray_e(ijs_e:ije_e),& - moments_e(ips_e:ipe_e,ijs_e:ije_e,ikys:ikye,ikxs:ikxe,izs:ize,updatetlevel),& - nu_ee,TColl_e) - CALL DoughertyDK_aa(ips_i,ipe_i,ijs_i,ije_i,ip0_i,ip1_i,ip2_i,& - parray_i(ips_i:ipe_i),jarray_i(ijs_i:ije_i),& - moments_i(ips_i:ipe_i,ijs_i:ije_i,ikys:ikye,ikxs:ikxe,izs:ize,updatetlevel),& - nu_i,TColl_i) + CALL Dougherty_DK ENDIF END SUBROUTINE compute_dougherty + !******************************************************************************! - !! Doughtery driftkinetic collision operator (like-species) + !! Doughtery drift-kinetic collision operator (species like) !******************************************************************************! - SUBROUTINE DoughertyDK_aa(ips_,ipe_,ijs_,ije_,ip0_,ip1_,ip2_,& - parray_,jarray_,moments_,nu_,Tcoll_) - + SUBROUTINE Dougherty_DK + USE grid, ONLY: ias,iae, ips,ipe, ijs,ije, parray, jarray, & + ip0, ip1, ip2, & + ikys,ikye, ikxs,ikxe, izs,ize + USE species, ONLY: nu_ab + USE time_integration, ONLY: updatetlevel + USE array, ONLY: Capj + USE fields, ONLY: moments + USE prec_const, ONLY: dp, SQRT2, twothird IMPLICIT NONE - !! INPUTS - INTEGER, INTENT(IN) :: ips_, ipe_, ijs_, ije_ - INTEGER, INTENT(IN) :: ip0_, ip1_, ip2_ - INTEGER, DIMENSION(ips_:ipe_), INTENT(IN) :: parray_ - INTEGER, DIMENSION(ijs_:ije_), INTENT(IN) :: jarray_ - COMPLEX(dp), DIMENSION(ips_:ipe_,ijs_:ije_,ikys:ikye,ikxs:ikxe,izs:ize),INTENT(IN) :: moments_ - REAL(dp), INTENT(IN) :: nu_ - !! OUTPUT - COMPLEX(dp), DIMENSION(ips_:ipe_, ijs_:ije_,ikys:ikye,ikxs:ikxe, izs:ize), INTENT(OUT) :: TColl_ - !! Local variables - REAL(dp) :: j_dp, p_dp COMPLEX(dp) :: Tmp - DO iz = izs,ize - DO iky = ikys, ikye; - DO ikx = ikxs, ikxe; - DO ij = ijs_,ije_ - DO ip = ips_,ipe_; - !** Auxiliary variables ** - p_dp = REAL(parray_(ip),dp) - j_dp = REAL(jarray_(ij),dp) - !** Assembling collison operator ** - Tmp = -(p_dp + 2._dp*j_dp)*moments_(ip,ij,iky,ikx,iz) - IF( (p_dp .EQ. 1._dp) .AND. (j_dp .EQ. 0._dp)) THEN !Ce10 - Tmp = Tmp + moments_(ip1_,1,iky,ikx,iz) - ELSEIF( (p_dp .EQ. 2._dp) .AND. (j_dp .EQ. 0._dp)) THEN ! Ce20 - Tmp = Tmp + twothird*moments_(ip2_,1,iky,ikx,iz) & - - SQRT2*twothird*moments_(ip0_,2,iky,ikx,iz) - ELSEIF( (p_dp .EQ. 0._dp) .AND. (j_dp .EQ. 1._dp)) THEN ! Ce01 - Tmp = Tmp + 2._dp*twothird*moments_(ip0_,2,iky,ikx,iz) & - - SQRT2*twothird*moments_(ip2_,1,iky,ikx,iz) - ENDIF - TColl_(ip,ij,iky,ikx,iz) = nu_ * Tmp - ENDDO;ENDDO;ENDDO + INTEGER :: iz,ikx,iky,ij,ip,ia + REAL(dp) :: j_dp, p_dp + DO ia = ias,iae + DO iz = izs,ize; DO iky = ikys,ikye; DO ikx = ikxs,ikxe + DO ij = ijs,ije; DO ip = ips,ipe + !** Auxiliary variables ** + p_dp = REAL(parray(ip),dp) + j_dp = REAL(jarray(ij),dp) + !** Assembling collison operator ** + Tmp = -(p_dp + 2._dp*j_dp)*moments(ia,ip,ij,iky,ikx,iz,updatetlevel) + IF( (p_dp .EQ. 1._dp) .AND. (j_dp .EQ. 0._dp)) THEN !Ce10 + Tmp = Tmp + moments(ia,ip1,1,iky,ikx,iz,updatetlevel) + ELSEIF( (p_dp .EQ. 2._dp) .AND. (j_dp .EQ. 0._dp)) THEN ! Ce20 + Tmp = Tmp + twothird*moments(ia,ip2,1,iky,ikx,iz,updatetlevel) & + - SQRT2*twothird*moments(ia,ip0,2,iky,ikx,iz,updatetlevel) + ELSEIF( (p_dp .EQ. 0._dp) .AND. (j_dp .EQ. 1._dp)) THEN ! Ce01 + Tmp = Tmp + 2._dp*twothird*moments(ia,ip0,2,iky,ikx,iz,updatetlevel) & + - SQRT2*twothird*moments(ia,ip2,1,iky,ikx,iz,updatetlevel) + ENDIF + Capj(ia,ip,ij,iky,ikx,iz) = nu_ab(ia,ia) * Tmp ENDDO;ENDDO - END SUBROUTINE DoughertyDK_aa - + ENDDO;ENDDO;ENDDO + ENDDO + END SUBROUTINE Dougherty_DK !******************************************************************************! !! Doughtery gyrokinetic collision operator (species like) !******************************************************************************! - SUBROUTINE DoughertyGK_aa(ips_,ipe_,ijs_,ije_,ijgs_,ijge_,ip0_,ip1_,ip2_,jmax_,& - parray_,jarray_,kernel_,nadiab_moments_,& - nu_,sigmaa2_taua_o2, sqrt_sigmaa2_taua_o2,Tcoll_) + SUBROUTINE Dougherty_GK + USE grid, ONLY: ias,iae, ips,ipe, ijs,ije, parray, jarray, & + ip0, ip1, ip2, jmax, & + ikys,ikye, ikxs,ikxe, izs,ize, kparray + USE species, ONLY: sigma2_tau_o2, sqrt_sigma2_tau_o2, nu_ab + USE array, ONLY: Capj, nadiab_moments, kernel + USE prec_const, ONLY: dp, SQRT2, twothird IMPLICIT NONE - !! INPUTS - INTEGER, INTENT(IN) :: ips_, ipe_, ijs_, ije_, ijgs_, ijge_ - INTEGER, INTENT(IN) :: ip0_, ip1_, ip2_,jmax_ - INTEGER, DIMENSION(ips_:ipe_), INTENT(IN) :: parray_ - INTEGER, DIMENSION(ijs_:ije_), INTENT(IN) :: jarray_ - REAL(dp), DIMENSION(ijgs_:ijge_,ikys:ikye,ikxs:ikxe,izgs:izge,0:1), INTENT(IN) :: kernel_ - COMPLEX(dp), DIMENSION(ips_:ipe_,ijs_:ije_,ikys:ikye,ikxs:ikxe,izs:ize),INTENT(IN) :: nadiab_moments_ - REAL(dp), INTENT(IN) :: nu_, sigmaa2_taua_o2, sqrt_sigmaa2_taua_o2 - !! OUTPUT - COMPLEX(dp), DIMENSION(ips_:ipe_,ijs_:ije_,ikys:ikye,ikxs:ikxe,izs:ize), INTENT(OUT) :: TColl_ !! Local variables - COMPLEX(dp) :: dens,upar,uperp,Tpar, Tperp, Temp + COMPLEX(dp) :: dens_,upar_,uperp_,Tpar_,Tperp_,Temp_ COMPLEX(dp) :: nadiab_moment_0j, Tmp REAL(dp) :: Knp0, Knp1, Knm1, kp - INTEGER :: in, eo + INTEGER :: iz,ikx,iky,ij,ip,ia,eo,in REAL(dp) :: n_dp, j_dp, p_dp, ba, ba_2 - DO iz = izs,ize - DO iky = ikys, ikye; - DO ikx = ikxs, ikxe; - DO ij = ijs_,ije_ - DO ip = ips_,ipe_; + DO ia = ias,iae + DO iz = izs,ize; DO iky = ikys, ikye; DO ikx = ikxs, ikxe; + DO ij = ijs,ije; DO ip = ips,ipe; !** Auxiliary variables ** - p_dp = REAL(parray_(ip),dp) - eo = MODULO(parray_(ip),2) - j_dp = REAL(jarray_(ij),dp) + p_dp = REAL(parray(ip),dp) + j_dp = REAL(jarray(ij),dp) + eo = MODULO(parray(ip),2) kp = kparray(iky,ikx,iz,eo) - ba_2 = kp**2 * sigmaa2_taua_o2 ! this is (l_a/2)^2 - ba = 2_dp*kp * sqrt_sigmaa2_taua_o2 ! this is l_a - + ba_2 = kp**2 * sigma2_tau_o2(ia) ! this is (l_a/2)^2 + ba = 2_dp*kp * sqrt_sigma2_tau_o2(ia) ! this is l_a !** Assembling collison operator ** ! Velocity-space diffusion (similar to Lenard Bernstein) ! -nu (p + 2j + b^2/2) Napj - Tmp = -(p_dp + 2._dp*j_dp + 2._dp*ba_2)*nadiab_moments_(ip,ij,iky,ikx,iz) - + Tmp = -(p_dp + 2._dp*j_dp + 2._dp*ba_2)*nadiab_moments(ia,ip,ij,iky,ikx,iz) !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Non zero term for p = 0 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! IF( p_dp .EQ. 0 ) THEN ! Kronecker p0 !** build required fluid moments ** - dens = 0._dp - upar = 0._dp; uperp = 0._dp - Tpar = 0._dp; Tperp = 0._dp - DO in = 1,jmax_+1 + dens_ = 0._dp + upar_ = 0._dp; uperp_ = 0._dp + Tpar_ = 0._dp; Tperp_ = 0._dp + DO in = 1,jmax+1 n_dp = REAL(in-1,dp) ! Store the kernels for sparing readings - Knp0 = Kernel_(in ,iky,ikx,iz,eo) - Knp1 = Kernel_(in+1,iky,ikx,iz,eo) - Knm1 = Kernel_(in-1,iky,ikx,iz,eo) + Knp0 = kernel(ia,in ,iky,ikx,iz,eo) + Knp1 = kernel(ia,in+1,iky,ikx,iz,eo) + Knm1 = kernel(ia,in-1,iky,ikx,iz,eo) ! Nonadiabatic moments (only different from moments when p=0) - nadiab_moment_0j = nadiab_moments_(ip0_,in,iky,ikx,iz) + nadiab_moment_0j = nadiab_moments(ia,ip0,in,iky,ikx,iz) ! Density - dens = dens + Knp0 * nadiab_moment_0j + dens_ = dens_ + Knp0 * nadiab_moment_0j ! Perpendicular velocity - uperp = uperp + ba*0.5_dp*(Knp0 - Knm1) * nadiab_moment_0j + uperp_ = uperp_ + ba*0.5_dp*(Knp0 - Knm1) * nadiab_moment_0j ! Parallel temperature - Tpar = Tpar + Knp0 * (SQRT2*nadiab_moments_(ip2_,in,iky,ikx,iz) + nadiab_moment_0j) + Tpar_ = Tpar_ + Knp0 * (SQRT2*nadiab_moments(ia,ip2,in,iky,ikx,iz) + nadiab_moment_0j) ! Perpendicular temperature - Tperp = Tperp + ((2._dp*n_dp+1._dp)*Knp0 - (n_dp+1._dp)*Knp1 - n_dp*Knm1)*nadiab_moment_0j + Tperp_ = Tperp_ + ((2._dp*n_dp+1._dp)*Knp0 - (n_dp+1._dp)*Knp1 - n_dp*Knm1)*nadiab_moment_0j ENDDO - Temp = (Tpar + 2._dp*Tperp)/3._dp - dens + Temp_ = (Tpar_ + 2._dp*Tperp_)/3._dp - dens_ ! Add energy restoring term - Tmp = Tmp + Temp* 4._dp * j_dp * Kernel_(ij ,iky,ikx,iz,eo) - Tmp = Tmp - Temp* 2._dp * (j_dp + 1._dp) * Kernel_(ij+1,iky,ikx,iz,eo) - Tmp = Tmp - Temp* 2._dp * j_dp * Kernel_(ij-1,iky,ikx,iz,eo) - Tmp = Tmp + uperp*ba* (Kernel_(ij,iky,ikx,iz,eo) - Kernel_(ij-1,iky,ikx,iz,eo)) + Tmp = Tmp + Temp_* 4._dp * j_dp * kernel(ia,ij ,iky,ikx,iz,eo) + Tmp = Tmp - Temp_* 2._dp * (j_dp + 1._dp) * kernel(ia,ij+1,iky,ikx,iz,eo) + Tmp = Tmp - Temp_* 2._dp * j_dp * kernel(ia,ij-1,iky,ikx,iz,eo) + Tmp = Tmp + uperp_*ba* (kernel(ia,ij,iky,ikx,iz,eo) - kernel(ia,ij-1,iky,ikx,iz,eo)) !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Non zero term for p = 1 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ELSEIF( p_dp .eq. 1 ) THEN ! kronecker p1 !** build required fluid moments ** - upar = 0._dp - DO in = 1,jmax_+1 + upar_ = 0._dp + DO in = 1,jmax+1 ! Parallel velocity - upar = upar + Kernel_(in,iky,ikx,iz,eo) * nadiab_moments_(ip1_,in,iky,ikx,iz) + upar_ = upar_ + kernel(ia,in,iky,ikx,iz,eo) * nadiab_moments(ia,ip1,in,iky,ikx,iz) ENDDO - Tmp = Tmp + upar*Kernel_(ij,iky,ikx,iz,eo) + Tmp = Tmp + upar_*kernel(ia,ij,iky,ikx,iz,eo) !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Non zero term for p = 2 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ELSEIF( p_dp .eq. 2 ) THEN ! kronecker p2 !** build required fluid moments ** - dens = 0._dp - upar = 0._dp; uperp = 0._dp - Tpar = 0._dp; Tperp = 0._dp - DO in = 1,jmax_+1 + dens_ = 0._dp + upar_ = 0._dp; uperp_ = 0._dp + Tpar_ = 0._dp; Tperp_ = 0._dp + DO in = 1,jmax+1 n_dp = REAL(in-1,dp) ! Store the kernels for sparing readings - Knp0 = Kernel_(in ,iky,ikx,iz,eo) - Knp1 = Kernel_(in+1,iky,ikx,iz,eo) - Knm1 = Kernel_(in-1,iky,ikx,iz,eo) + Knp0 = kernel(ia,in ,iky,ikx,iz,eo) + Knp1 = kernel(ia,in+1,iky,ikx,iz,eo) + Knm1 = kernel(ia,in-1,iky,ikx,iz,eo) ! Nonadiabatic moments (only different from moments when p=0) - nadiab_moment_0j = nadiab_moments_(ip0_,in,iky,ikx,iz) + nadiab_moment_0j = nadiab_moments(ia,ip0,in,iky,ikx,iz) ! Density - dens = dens + Knp0 * nadiab_moment_0j + dens_ = dens_ + Knp0 * nadiab_moment_0j ! Parallel temperature - Tpar = Tpar + Knp0 * (SQRT2*nadiab_moments_(ip2_,in,iky,ikx,iz) + nadiab_moment_0j) + Tpar_ = Tpar_ + Knp0 * (SQRT2*nadiab_moments(ia,ip2,in,iky,ikx,iz) + nadiab_moment_0j) ! Perpendicular temperature - Tperp = Tperp + ((2._dp*n_dp+1._dp)*Knp0 - (n_dp+1._dp)*Knp1 - n_dp*Knm1)*nadiab_moment_0j + Tperp_ = Tperp_ + ((2._dp*n_dp+1._dp)*Knp0 - (n_dp+1._dp)*Knp1 - n_dp*Knm1)*nadiab_moment_0j ENDDO - Temp = (Tpar + 2._dp*Tperp)/3._dp - dens - Tmp = Tmp + Temp*SQRT2*Kernel_(ij,iky,ikx,iz,eo) + Temp_ = (Tpar_ + 2._dp*Tperp_)/3._dp - dens_ + Tmp = Tmp + Temp_*SQRT2*kernel(ia,ij,iky,ikx,iz,eo) !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - ENDIF ! Multiply by collision parameter - TColl_(ip,ij,iky,ikx,iz) = nu_ * Tmp + Capj(ia,ip,ij,iky,ikx,iz) = nu_ab(ia,ia) * Tmp ENDDO;ENDDO;ENDDO ENDDO;ENDDO - END SUBROUTINE DoughertyGK_aa - !******************************************************************************! - !! compute the collision terms in a (Np x Nj x Nkx x Nky) matrix all at once - !******************************************************************************! - SUBROUTINE compute_cosolver_coll - IMPLICIT NONE - COMPLEX(dp), DIMENSION(1:total_np_e) :: local_sum_e, buffer_e - COMPLEX(dp), DIMENSION(ips_e:ipe_e) :: TColl_distr_e - COMPLEX(dp), DIMENSION(1:total_np_i) :: local_sum_i, buffer_i - COMPLEX(dp), DIMENSION(ips_i:ipe_i) :: TColl_distr_i - COMPLEX(dp) :: TColl - DO iz = izs,ize - DO ikx = ikxs,ikxe - DO iky = ikys,ikye - IF(KIN_E) THEN - DO ij = 1,Jmaxe+1 - ! Electrons - ! Loop over all p to compute sub collision term - DO ip = 1,total_np_e - CALL apply_COSOlver_mat_e(ip,ij,iky,ikx,iz,TColl) - local_sum_e(ip) = TColl - ENDDO - IF (num_procs_p .GT. 1) THEN - ! Sum up all the sub collision terms on root 0 - CALL MPI_REDUCE(local_sum_e, buffer_e, total_np_e, MPI_DOUBLE_COMPLEX, MPI_SUM, 0, comm_p, ierr) - ! distribute the sum over the process among p - CALL MPI_SCATTERV(buffer_e, rcv_p_e, dsp_p_e, MPI_DOUBLE_COMPLEX,& - TColl_distr_e, local_np_e, MPI_DOUBLE_COMPLEX,& - 0, comm_p, ierr) - ELSE - TColl_distr_e = local_sum_e - ENDIF - ! Write in output variable - DO ip = ips_e,ipe_e - TColl_e(ip,ij,iky,ikx,iz) = TColl_distr_e(ip) - ENDDO - ENDDO - ENDIF - ! Ions - DO ij = 1,Jmaxi+1 - DO ip = 1,total_np_i - CALL apply_COSOlver_mat_i(ip,ij,iky,ikx,iz,TColl) - local_sum_i(ip) = TColl - ENDDO - IF (num_procs_p .GT. 1) THEN - ! Reduce the local_sums to root = 0 - CALL MPI_REDUCE(local_sum_i, buffer_i, total_np_i, MPI_DOUBLE_COMPLEX, MPI_SUM, 0, comm_p, ierr) - ! buffer contains the entire collision term along p, we scatter it between - ! the other processes (use of scatterv since Pmax/Np is not an integer) - CALL MPI_SCATTERV(buffer_i, rcv_p_i, dsp_p_i, MPI_DOUBLE_COMPLEX,& - TColl_distr_i, local_np_i, MPI_DOUBLE_COMPLEX, & - 0, comm_p, ierr) - ELSE - TColl_distr_i = local_sum_i - ENDIF - ! Write in output variable - DO ip = ips_i,ipe_i - TColl_i(ip,ij,iky,ikx,iz) = TColl_distr_i(ip) - ENDDO - ENDDO - ENDDO - ENDDO ENDDO - END SUBROUTINE compute_cosolver_coll - - !******************************************************************************! - !!!!!!! Compute electron collision term - !******************************************************************************! - SUBROUTINE apply_COSOlver_mat_e(ip_,ij_,iky_,ikx_,iz_,TColl_) - IMPLICIT NONE - - INTEGER, INTENT(IN) :: ip_, ij_ ,ikx_, iky_, iz_ - COMPLEX(dp), INTENT(OUT) :: TColl_ - - INTEGER :: ip2,ij2, p_int,j_int, p2_int,j2_int, iky_C, ikx_C, iz_C - p_int = parray_e_full(ip_); j_int = jarray_e_full(ij_); - - IF (gyrokin_CO) THEN ! GK operator (k-dependant) - ikx_C = ikx_; iky_C = iky_; iz_C = iz_ - ELSE ! DK operator (only one mat for every k) - ikx_C = 1; iky_C = 1; iz_C = 1; - ENDIF - - TColl_ = 0._dp ! Initialization of the local sum - - ! sum the electron-self and electron-ion test terms - ploopee: DO ip2 = ips_e,ipe_e - p2_int = parray_e(ip2) - jloopee: DO ij2 = ijs_e,ije_e - j2_int = jarray_e(ij2) - IF((CLOS .NE. 1) .OR. (p2_int+2*j2_int .LE. dmaxe))& - TColl_ = TColl_ + nadiab_moments_e(ip2,ij2,iky_,ikx_,iz_) & - *( nu_e * CeipjT(bare(p_int,j_int), bare(p2_int,j2_int),iky_C, ikx_C, iz_C) & - +nu_ee * Ceepj (bare(p_int,j_int), bare(p2_int,j2_int),iky_C, ikx_C, iz_C)) - ENDDO jloopee - ENDDO ploopee - - ! sum the electron-ion field terms - ploopei: DO ip2 = ips_i,ipe_i - p2_int = parray_i(ip2) - jloopei: DO ij2 = ijs_i,ije_i - j2_int = jarray_i(ij2) - IF((CLOS .NE. 1) .OR. (p2_int+2*j2_int .LE. dmaxi))& - TColl_ = TColl_ + nadiab_moments_i(ip2,ij2,iky_,ikx_,iz_) & - *(nu_e * CeipjF(bare(p_int,j_int), bari(p2_int,j2_int),iky_C, ikx_C, iz_C)) - END DO jloopei - ENDDO ploopei - - END SUBROUTINE apply_COSOlver_mat_e - - !******************************************************************************! - !!!!!!! Compute ion collision term - !******************************************************************************! - SUBROUTINE apply_COSOlver_mat_i(ip_,ij_,iky_,ikx_,iz_,TColl_) - IMPLICIT NONE - INTEGER, INTENT(IN) :: ip_, ij_ ,ikx_, iky_, iz_ - COMPLEX(dp), INTENT(OUT) :: TColl_ - - INTEGER :: ip2,ij2, p_int,j_int, p2_int,j2_int, iky_C, ikx_C, iz_C - p_int = parray_i_full(ip_); j_int = jarray_i_full(ij_); - - IF (gyrokin_CO) THEN ! GK operator (k-dependant) - ikx_C = ikx_; iky_C = iky_; iz_C = iz_; - ELSE ! DK operator (only one mat for every k) - ikx_C = 1; iky_C = 1; iz_C = 1; - ENDIF - - TColl_ = 0._dp ! Initialization - ! sum the ion-self and ion-electron test terms - ploopii: DO ip2 = ips_i,ipe_i - p2_int = parray_i(ip2) - jloopii: DO ij2 = ijs_i,ije_i - j2_int = jarray_i(ij2) - IF((CLOS .NE. 1) .OR. (p2_int+2*j2_int .LE. dmaxi))& - ! Ion-ion collision - TColl_ = TColl_ + nadiab_moments_i(ip2,ij2,iky_,ikx_,iz_) & - * nu_i * Ciipj (bari(p_int,j_int), bari(p2_int,j2_int), iky_C, ikx_C, iz_C) - IF(KIN_E) & ! Ion-electron collision test - TColl_ = TColl_ + nadiab_moments_i(ip2,ij2,iky_,ikx_,iz_) & - * nu_ie * CiepjT(bari(p_int,j_int), bari(p2_int,j2_int), iky_C, ikx_C, iz_C) - ENDDO jloopii - ENDDO ploopii - - IF(KIN_E) THEN ! Ion-electron collision field - ploopie: DO ip2 = ips_e,ipe_e ! sum the ion-electron field terms - p2_int = parray_e(ip2) - jloopie: DO ij2 = ijs_e,ije_e - j2_int = jarray_e(ij2) - IF((CLOS .NE. 1) .OR. (p2_int+2*j2_int .LE. dmaxe))& - TColl_ = TColl_ + nadiab_moments_e(ip2,ij2,iky_,ikx_,iz_) & - *(nu_ie * CiepjF(bari(p_int,j_int), bare(p2_int,j2_int), iky_C, ikx_C, iz_C)) - ENDDO jloopie - ENDDO ploopie - ENDIF - END SUBROUTINE apply_COSOlver_mat_i - - !******************************************************************************! - !!!!!!! Load the collision matrix coefficient table from COSOlver results - !******************************************************************************! - SUBROUTINE load_COSOlver_mat ! Load a sub matrix from iCa files (works for pmaxa,jmaxa<=P_full,J_full) - IMPLICIT NONE - ! Indices for row and columns of the COSOlver matrix (4D compressed 2D matrices) - INTEGER :: irow_sub, irow_full, icol_sub, icol_full - INTEGER :: fid ! file indexation - - INTEGER :: ip_e, ij_e, il_e, ik_e, ikps_C, ikpe_C ! indices for electrons loops - REAL(dp), DIMENSION(2) :: dims_e - INTEGER :: pdime, jdime ! dimensions of the COSOlver matrices - REAL(dp), DIMENSION(:,:), ALLOCATABLE :: Ceepj_full, CeipjT_full ! To load the entire matrix - REAL(dp), DIMENSION(:,:), ALLOCATABLE :: CeipjF_full ! '' - REAL(dp), DIMENSION(:,:,:), ALLOCATABLE :: Ceepj__kp, CeipjT_kp ! To store the coeff that will be used along kperp - REAL(dp), DIMENSION(:,:,:), ALLOCATABLE :: CeipjF_kp ! '' - INTEGER :: ip_i, ij_i, il_i, ik_i ! same for ions - INTEGER, DIMENSION(2) :: dims_i - INTEGER :: pdimi, jdimi ! dimensions of the COSOlver matrices - REAL(dp), DIMENSION(:,:), ALLOCATABLE :: Ciipj_full, CiepjT_full ! . - REAL(dp), DIMENSION(:,:), ALLOCATABLE :: CiepjF_full ! . - REAL(dp), DIMENSION(:,:,:), ALLOCATABLE :: Ciipj__kp, CiepjT_kp ! . - REAL(dp), DIMENSION(:,:,:), ALLOCATABLE :: CiepjF_kp ! . - - REAL(dp), DIMENSION(:), ALLOCATABLE :: kp_grid_mat ! kperp grid of the matrices - INTEGER :: ikp_next, ikp_prev, nkp_mat, ikp_mat - REAL(dp) :: kp_max - REAL(dp) :: kperp_sim, kperp_mat, zerotoone - - CHARACTER(len=128) :: var_name, ikp_string - - !! Some terminal info - SELECT CASE (collision_model) - CASE ('SG') - IF (my_id .EQ. 0) WRITE(*,*) '=== Load Sugama matrix ===' - CASE ('LR') - IF (my_id .EQ. 0) WRITE(*,*) '=== Load Lorentz matrix ===' - CASE ('LD') - IF (my_id .EQ. 0) WRITE(*,*) '=== Load Landau matrix ===' - END SELECT - SELECT CASE (gyrokin_CO) - CASE (.true.) - IF (my_id .EQ. 0) WRITE(*,*) '..gyrokinetic model..' - CASE (.false.) - IF (my_id .EQ. 0) WRITE(*,*) '..driftkinetic model..' - END SELECT - - ! Opening the compiled cosolver matrices results - if(my_id.EQ.0)write(*,*) mat_file - CALL openf(mat_file,fid, 'r', 'D', mpicomm=comm_p); - - ! Get matrices dimensions (polynomials degrees and kperp grid) - CALL getarr(fid, '/dims_e', dims_e) ! Get the electron polynomial degrees - pdime = dims_e(1); jdime = dims_e(2); - CALL getarr(fid, '/dims_i', dims_i) ! Get the ion polynomial degrees - pdimi = dims_i(1); jdimi = dims_i(2); - IF ( ((pdime .LT. pmaxe) .OR. (jdime .LT. jmaxe)) .AND. (my_id .EQ. 0)) ERROR STOP '>> ERROR << Pe,Je Matrix too small' - IF ( ((pdimi .LT. pmaxi) .OR. (jdimi .LT. jmaxi)) .AND. (my_id .EQ. 0)) ERROR STOP '>> ERROR << Pi,Ji Matrix too small' - - CALL getsize(fid, '/coordkperp', nkp_mat) ! Get the dimension kperp grid of the matrices - CALL allocate_array(kp_grid_mat, 1,nkp_mat) - CALL getarr(fid, '/coordkperp', kp_grid_mat) - - kp_max = SQRT(kx_max**2+ky_max**2) - ! check that we have enough kperps mat - IF (LINEARITY .NE. 'linear') THEN - IF ( (kp_grid_mat(nkp_mat) .LT. 2./3.*kp_max) .AND. (my_id .EQ. 0)) WRITE(*,*) 'warning: Matrix kperp grid too small' - ELSE - IF ( (kp_grid_mat(nkp_mat) .LT. kp_max) .AND. (my_id .EQ. 0)) WRITE(*,*) 'warning: Matrix kperp grid too small !!' - ENDIF - - IF (gyrokin_CO) THEN ! GK operator (k-dependant) - ikps_C = 1; ikpe_C = nkp_mat - ELSE ! DK operator (only one mat for all k) - ikps_C = 1; ikpe_C = 1 - ENDIF - - CALL allocate_array( Ceepj__kp, 1,(pmaxe+1)*(jmaxe+1), 1,(pmaxe+1)*(jmaxe+1), ikps_C,ikpe_C) - CALL allocate_array( CeipjT_kp, 1,(pmaxe+1)*(jmaxe+1), 1,(pmaxe+1)*(jmaxe+1), ikps_C,ikpe_C) - CALL allocate_array( CeipjF_kp, 1,(pmaxe+1)*(jmaxe+1), 1,(pmaxe+1)*(jmaxe+1), ikps_C,ikpe_C) - CALL allocate_array( Ciipj__kp, 1,(pmaxe+1)*(jmaxe+1), 1,(pmaxe+1)*(jmaxe+1), ikps_C,ikpe_C) - CALL allocate_array( CiepjT_kp, 1,(pmaxe+1)*(jmaxe+1), 1,(pmaxe+1)*(jmaxe+1), ikps_C,ikpe_C) - CALL allocate_array( CiepjF_kp, 1,(pmaxe+1)*(jmaxe+1), 1,(pmaxe+1)*(jmaxe+1), ikps_C,ikpe_C) - - DO ikp = ikps_C,ikpe_C ! Loop over everz kperp values - ! Kperp value in string format to select in cosolver hdf5 file - IF (gyrokin_CO) THEN - write(ikp_string,'(i5.5)') ikp-1 - ELSE - write(ikp_string,'(i5.5)') 0 - ENDIF - !!!!!!!!!!!! E-E matrices !!!!!!!!!!!! - ! get the self electron colision matrix - ! Allocate space for storing full collision matrix - CALL allocate_array( Ceepj_full, 1,(pdime+1)*(jdime+1), 1,(pdime+1)*(jdime+1)) - ! Naming of the array to load (kperp dependant) - WRITE(var_name,'(a,a)') TRIM(ADJUSTL(ikp_string)),'/Caapj/Ceepj' - CALL getarr(fid, var_name, Ceepj_full) ! get array (moli format) - ! Fill sub array with the usefull polynmial degrees only - DO ip_e = 0,pmaxe ! Loop over rows - DO ij_e = 0,jmaxe - irow_sub = (jmaxe +1)*ip_e + ij_e +1 - irow_full = (jdime +1)*ip_e + ij_e +1 - DO il_e = 0,pmaxe ! Loop over columns - DO ik_e = 0,jmaxe - icol_sub = (jmaxe +1)*il_e + ik_e +1 - icol_full = (jdime +1)*il_e + ik_e +1 - Ceepj__kp (irow_sub,icol_sub,ikp) = Ceepj_full (irow_full,icol_full) - ENDDO - ENDDO - ENDDO - ENDDO - DEALLOCATE(Ceepj_full) - - !!!!!!!!!!!!!!! I-I matrices !!!!!!!!!!!!!! - ! get the self electron colision matrix - CALL allocate_array( Ciipj_full, 1,(pdimi+1)*(jdimi+1), 1,(pdimi+1)*(jdimi+1)) - WRITE(var_name,'(a,a,a)') TRIM(ADJUSTL(ikp_string)),'/Caapj/Ciipj' - CALL getarr(fid, var_name, Ciipj_full) ! get array (moli format) - ! Fill sub array with only usefull polynmials degree - DO ip_i = 0,Pmaxi ! Loop over rows - DO ij_i = 0,Jmaxi - irow_sub = (Jmaxi +1)*ip_i + ij_i +1 - irow_full = (jdimi +1)*ip_i + ij_i +1 - DO il_i = 0,Pmaxi ! Loop over columns - DO ik_i = 0,Jmaxi - icol_sub = (Jmaxi +1)*il_i + ik_i +1 - icol_full = (jdimi +1)*il_i + ik_i +1 - Ciipj__kp (irow_sub,icol_sub,ikp) = Ciipj_full (irow_full,icol_full) - ENDDO - ENDDO - ENDDO - ENDDO - DEALLOCATE(Ciipj_full) - - IF(interspecies) THEN ! Pitch angle is only applied on like-species - !!!!!!!!!!!!!!! E-I matrices !!!!!!!!!!!!!! - ! Get test and field e-i collision matrices - CALL allocate_array( CeipjT_full, 1,(pdime+1)*(jdime+1), 1,(pdime+1)*(jdime+1)) - CALL allocate_array( CeipjF_full, 1,(pdime+1)*(jdime+1), 1,(pdimi+1)*(jdimi+1)) - WRITE(var_name,'(a,a)') TRIM(ADJUSTL(ikp_string)),'/Ceipj/CeipjT' - CALL getarr(fid, var_name, CeipjT_full) - WRITE(var_name,'(a,a)') TRIM(ADJUSTL(ikp_string)),'/Ceipj/CeipjF' - CALL getarr(fid, var_name, CeipjF_full) - ! Fill sub array with only usefull polynmials degree - DO ip_e = 0,pmaxe ! Loop over rows - DO ij_e = 0,jmaxe - irow_sub = (jmaxe +1)*ip_e + ij_e +1 - irow_full = (jdime +1)*ip_e + ij_e +1 - DO il_e = 0,pmaxe ! Loop over columns - DO ik_e = 0,jmaxe - icol_sub = (jmaxe +1)*il_e + ik_e +1 - icol_full = (jdime +1)*il_e + ik_e +1 - CeipjT_kp(irow_sub,icol_sub,ikp) = CeipjT_full(irow_full,icol_full) - ENDDO - ENDDO - DO il_i = 0,pmaxi ! Loop over columns - DO ik_i = 0,jmaxi - icol_sub = (Jmaxi +1)*il_i + ik_i +1 - icol_full = (jdimi +1)*il_i + ik_i +1 - CeipjF_kp(irow_sub,icol_sub,ikp) = CeipjF_full(irow_full,icol_full) - ENDDO - ENDDO - ENDDO - ENDDO - DEALLOCATE(CeipjF_full) - DEALLOCATE(CeipjT_full) - - !!!!!!!!!!!!!!! I-E matrices !!!!!!!!!!!!!! - ! get the Test and Back field electron ion collision matrix - CALL allocate_array( CiepjT_full, 1,(pdimi+1)*(jdimi+1), 1,(pdimi+1)*(jdimi+1)) - CALL allocate_array( CiepjF_full, 1,(pdimi+1)*(jdimi+1), 1,(pdime+1)*(jdime+1)) - WRITE(var_name,'(a,a,a)') TRIM(ADJUSTL(ikp_string)),'/Ciepj/CiepjT' - CALL getarr(fid, var_name, CiepjT_full) - WRITE(var_name,'(a,a,a)') TRIM(ADJUSTL(ikp_string)),'/Ciepj/CiepjF' - CALL getarr(fid, var_name, CiepjF_full) - ! Fill sub array with only usefull polynmials degree - DO ip_i = 0,Pmaxi ! Loop over rows - DO ij_i = 0,Jmaxi - irow_sub = (Jmaxi +1)*ip_i + ij_i +1 - irow_full = (jdimi +1)*ip_i + ij_i +1 - DO il_i = 0,Pmaxi ! Loop over columns - DO ik_i = 0,Jmaxi - icol_sub = (Jmaxi +1)*il_i + ik_i +1 - icol_full = (jdimi +1)*il_i + ik_i +1 - CiepjT_kp(irow_sub,icol_sub,ikp) = CiepjT_full(irow_full,icol_full) - ENDDO - ENDDO - DO il_e = 0,pmaxe ! Loop over columns - DO ik_e = 0,jmaxe - icol_sub = (jmaxe +1)*il_e + ik_e +1 - icol_full = (jdime +1)*il_e + ik_e +1 - CiepjF_kp(irow_sub,icol_sub,ikp) = CiepjF_full(irow_full,icol_full) - ENDDO - ENDDO - ENDDO - ENDDO - DEALLOCATE(CiepjF_full) - DEALLOCATE(CiepjT_full) - ELSE - CeipjT_kp = 0._dp; CeipjF_kp = 0._dp; CiepjT_kp = 0._dp; CiepjF_kp = 0._dp; - ENDIF - ENDDO - CALL closef(fid) - - IF (gyrokin_CO) THEN ! Interpolation of the kperp matrix values on kx ky grid - IF (my_id .EQ. 0 ) WRITE(*,*) '...Interpolation from matrices kperp to simulation kx,ky...' - DO ikx = ikxs,ikxe - DO iky = ikys,ikye - DO iz = izs,ize - ! Check for nonlinear case if we are in the anti aliased domain or the filtered one - kperp_sim = MIN(kparray(iky,ikx,iz,0),collision_kcut) ! current simulation kperp - ! Find the interval in kp grid mat where kperp_sim is contained - ! Loop over the whole kp mat grid to find the smallest kperp that is - ! larger than the current kperp_sim (brute force...) - DO ikp=1,nkp_mat - ikp_mat = ikp ! the first indice of the interval (k0) - kperp_mat = kp_grid_mat(ikp) - IF(kperp_mat .GT. kperp_sim) EXIT ! a matrix with kperp2 > current kx2 + ky2 has been found - ENDDO - ! Interpolation - ! interval boundaries - ikp_next = ikp_mat !index of k1 (larger than kperp_sim thanks to previous loop) - ikp_prev = ikp_mat - 1 !index of k0 (smaller neighbour to interpolate inbetween) - if ( (kp_grid_mat(ikp_prev) .GT. kperp_sim) .OR. (kp_grid_mat(ikp_next) .LT. kperp_sim) ) THEN - ! write(*,*) 'Warning, linear interp of collision matrix failed!! ' - ! write(*,*) kp_grid_mat(ikp_prev), '<', kperp_sim, '<', kp_grid_mat(ikp_next) - ENDIF - ! 0->1 variable for linear interp, i.e. zero2one = (k-k0)/(k1-k0) - zerotoone = MIN(1._dp,(kperp_sim - kp_grid_mat(ikp_prev))/(kp_grid_mat(ikp_next) - kp_grid_mat(ikp_prev))) - ! Linear interpolation between previous and next kperp matrix values - Ceepj (:,:,iky,ikx,iz) = (Ceepj__kp(:,:,ikp_next) - Ceepj__kp(:,:,ikp_prev))*zerotoone + Ceepj__kp(:,:,ikp_prev) - Ciipj (:,:,iky,ikx,iz) = (Ciipj__kp(:,:,ikp_next) - Ciipj__kp(:,:,ikp_prev))*zerotoone + Ciipj__kp(:,:,ikp_prev) - IF(interspecies) THEN - CeipjT(:,:,iky,ikx,iz) = (CeipjT_kp(:,:,ikp_next) - CeipjT_kp(:,:,ikp_prev))*zerotoone + CeipjT_kp(:,:,ikp_prev) - CeipjF(:,:,iky,ikx,iz) = (CeipjF_kp(:,:,ikp_next) - CeipjF_kp(:,:,ikp_prev))*zerotoone + CeipjF_kp(:,:,ikp_prev) - CiepjT(:,:,iky,ikx,iz) = (CiepjT_kp(:,:,ikp_next) - CiepjT_kp(:,:,ikp_prev))*zerotoone + CiepjT_kp(:,:,ikp_prev) - CiepjF(:,:,iky,ikx,iz) = (CiepjF_kp(:,:,ikp_next) - CiepjF_kp(:,:,ikp_prev))*zerotoone + CiepjF_kp(:,:,ikp_prev) - ELSE - CeipjT(:,:,iky,ikx,iz) = 0._dp - CeipjF(:,:,iky,ikx,iz) = 0._dp - CiepjT(:,:,iky,ikx,iz) = 0._dp - CiepjF(:,:,iky,ikx,iz) = 0._dp - ENDIF - ENDDO - ENDDO - ENDDO - ELSE ! DK -> No kperp dep, copy simply to final collision matrices - Ceepj (:,:,1,1,1) = Ceepj__kp(:,:,1) - CeipjT(:,:,1,1,1) = CeipjT_kp(:,:,1) - CeipjF(:,:,1,1,1) = CeipjF_kp(:,:,1) - Ciipj (:,:,1,1,1) = Ciipj__kp(:,:,1) - CiepjT(:,:,1,1,1) = CiepjT_kp(:,:,1) - CiepjF(:,:,1,1,1) = CiepjF_kp(:,:,1) - ENDIF - ! Deallocate auxiliary variables - DEALLOCATE (Ceepj__kp); DEALLOCATE (CeipjT_kp); DEALLOCATE (CeipjF_kp) - DEALLOCATE (Ciipj__kp); DEALLOCATE (CiepjT_kp); DEALLOCATE (CiepjF_kp) - - IF( .NOT. interspecies ) THEN - IF(my_id.EQ.0) write(*,*) "--Like Species operator--" - CeipjF = 0._dp; - CeipjT = 0._dp; - CiepjF = 0._dp; - CiepjT = 0._dp; - ENDIF - - IF (my_id .EQ. 0) WRITE(*,*) '============DONE===========' - - END SUBROUTINE load_COSOlver_mat - !******************************************************************************! + END SUBROUTINE Dougherty_GK end module collision diff --git a/src/control.F90 b/src/control.F90 index d69d991c..944daf4c 100644 --- a/src/control.F90 +++ b/src/control.F90 @@ -1,71 +1,72 @@ SUBROUTINE control ! Control the run - use basic - use prec_const + use basic, ONLY: str,daytim,speak,basic_data,start,t0_step,t1_step,tc_step,& + nlend,step,increase_step,increase_time,increase_cstep + use prec_const, ONLY: dp, stdout + USE parallel, ONLY: ppinit + USE mpi IMPLICIT NONE REAL(dp) :: t_init_diag_0, t_init_diag_1 - + INTEGER :: ierr CALL cpu_time(start) !________________________________________________________________________________ ! 1. Prologue ! 1.1 Initialize the parallel environment CALL ppinit - IF (my_id .EQ. 0) WRITE(*,'(a/)') 'MPI initialized' + CALL speak('MPI initialized') CALL daytim('Start at ') ! 1.2 Define data specific to run - IF (my_id .EQ. 0) WRITE(*,*) 'Load basic data...' + CALL speak( 'Load basic data...') CALL basic_data ! CALL mpi_barrier(MPI_COMM_WORLD, ierr) - IF (my_id .EQ. 0) WRITE(*,'(a/)') '...basic data loaded.' + CALL speak('...basic data loaded.') ! 1.3 Read input parameters from input file - IF (my_id .EQ. 0) WRITE(*,*) 'Read input parameters...' + CALL speak('Read input parameters...') CALL readinputs ! CALL mpi_barrier(MPI_COMM_WORLD, ierr) - IF (my_id .EQ. 0) WRITE(*,'(a/)') '...input parameters read' + CALL speak('...input parameters read') ! 1.4 Set auxiliary values (allocate arrays, set grid, ...) - IF (my_id .EQ. 0) WRITE(*,*) 'Calculate auxval...' + CALL speak('Calculate auxval...') CALL auxval ! CALL mpi_barrier(MPI_COMM_WORLD, ierr) - IF (my_id .EQ. 0) WRITE(*,'(a/)') '...auxval calculated' + CALL speak('...auxval calculated') ! 1.5 Initial conditions - IF (my_id .EQ. 0) WRITE(*,*) 'Create initial state...' + CALL speak( 'Create initial state...') CALL inital ! CALL mpi_barrier(MPI_COMM_WORLD, ierr) - IF (my_id .EQ. 0) WRITE(*,'(a/)') '...initial state created' + CALL speak('...initial state created') ! 1.6 Initial diagnostics - IF (my_id .EQ. 0) WRITE(*,*) 'Initial diagnostics...' + CALL speak( 'Initial diagnostics...') CALL cpu_time(t_init_diag_0) ! Measure the time of the init diag CALL diagnose(0) CALL cpu_time(t_init_diag_1) ! CALL mpi_barrier(MPI_COMM_WORLD, ierr) - IF (my_id .EQ. 0) THEN - WRITE(*,'(a)') '...initial diagnostics done' - WRITE(*,'(a,F6.3,a/)') '(',t_init_diag_1-t_init_diag_0,'[s])' - ENDIF + CALL speak('...initial diagnostics done') + CALL speak('('//str(t_init_diag_1-t_init_diag_0)//'[s])') CALL FLUSH(stdout) CALL mpi_barrier(MPI_COMM_WORLD, ierr) !________________________________________________________________________________ - IF (my_id .EQ. 0) WRITE(*,*) 'Time integration loop..' + CALL speak( 'Time integration loop..') !________________________________________________________________________________ ! 2. Main loop DO CALL cpu_time(t0_step) ! Measuring time - step = step + 1 - cstep = cstep + 1 + CALL increase_step + CALL increase_cstep CALL stepon - time = time + dt + CALL increase_time CALL tesend IF( nlend ) EXIT ! exit do loop @@ -77,14 +78,14 @@ SUBROUTINE control END DO - IF (my_id .EQ. 0) WRITE(*,'(a/)') '...time integration done' + CALL speak('...time integration done') !________________________________________________________________________________ ! 9. Epilogue CALL diagnose(-1) CALL endrun - IF (my_id .EQ. 0) CALL daytim('Done at ') + CALL daytim('Done at ') CALL ppexit diff --git a/src/cosolver_interface_mod.F90 b/src/cosolver_interface_mod.F90 new file mode 100644 index 00000000..f617dd74 --- /dev/null +++ b/src/cosolver_interface_mod.F90 @@ -0,0 +1,277 @@ +module cosolver_interface +! contains the Hermite-Laguerre collision operators solved using COSOlver. +USE prec_const, ONLY: dp +IMPLICIT NONE +PRIVATE +PUBLIC :: load_COSOlver_mat, compute_cosolver_coll + +CONTAINS + !******************************************************************************! + !! compute the collision terms in a (Np x Nj x Nkx x Nky) matrix all at once + !******************************************************************************! + SUBROUTINE compute_cosolver_coll(GK_CO,INTERSPECIES) + USE parallel, ONLY: num_procs_p, comm_p,dsp_p,rcv_p + USE grid, ONLY: & + ias,iae, & + ips,ipe,parray,& + local_np,total_np,& + ijs,ije,jarray,jmax, dmax,& + ikxs,ikxe,ikys,ikye,izs,ize, bar + USE array, ONLY: Capj, Caa, Cab_T, Cab_F, nadiab_moments + USE MPI + USE model, ONLY: GK_CO, Na, CLOS + USE species, ONLY: nu_ab + IMPLICIT NONE + LOGICAL, INTENT(IN) :: GK_CO, INTERSPECIES + COMPLEX(dp), DIMENSION(1:total_np) :: local_sum, buffer + COMPLEX(dp), DIMENSION(ips:ipe) :: TColl_distr + COMPLEX(dp) :: Tmp_ + INTEGER :: iz,ikx,iky,ij,ip,ia,ib,iq,il,ikx_C,iky_C,iz_C + INTEGER :: p_int,q_int,j_int,l_int, ierr + z:DO iz = izs,ize + x:DO ikx = ikxs,ikxe + y:DO iky = ikys,ikye + a:DO ia = ias,iae + j:DO ij = 1,Jmax+1 + p:DO ip = 1,total_np + !! Take GK or DK limit + IF (GK_CO) THEN ! GK operator (k-dependant) + ikx_C = ikx; iky_C = iky; iz_C = iz; + ELSE ! DK operator (only one mat for every k) + ikx_C = 1; iky_C = 1; iz_C = 1; + ENDIF + !! Apply the cosolver collision matrix + Tmp_ = 0._dp ! Initialization + ! self interaction + Tmp_ = Tmp_ + nadiab_moments(ia,iq,il,iky,ikx,iz) & + * nu_ab(ia,ia)*Caa(ia,bar(p_int,j_int), bar(q_int,l_int), iky_C, ikx_C, iz_C) + ! sum the contribution over the other species + IF (INTERSPECIES) THEN + b:DO ib = 1,Na + q:DO iq = ips,ipe + q_int = parray(iq) + l:DO il = ijs,ije + l_int = jarray(il) + IF((CLOS .NE. 1) .OR. (q_int+2*l_int .LE. dmax)) THEN + ! Test contribution + Tmp_ = Tmp_ + nadiab_moments(ia,iq,il,iky,ikx,iz) & + * nu_ab(ia,ib) * Cab_T(ia,ib,bar(p_int,j_int), bar(q_int,l_int), iky_C, ikx_C, iz_C) + ! Field contribution + Tmp_ = Tmp_ + nadiab_moments(ib,iq,il,iky,ikx,iz) & + * nu_ab(ia,ib) * Cab_F(ia,ib,bar(p_int,j_int), bar(q_int,l_int), iky_C, ikx_C, iz_C) + ENDIF + ENDDO l + ENDDO q + ENDDO b + ENDIF + local_sum(ip) = Tmp_ + ENDDO p + IF (num_procs_p .GT. 1) THEN + ! Reduce the local_sums to root = 0 + CALL MPI_REDUCE(local_sum, buffer, total_np, MPI_DOUBLE_COMPLEX, MPI_SUM, 0, comm_p, ierr) + ! buffer contains the entire collision term along p, we scatter it between + ! the other processes (use of scatterv since Pmax/Np is not an integer) + CALL MPI_SCATTERV(buffer, rcv_p, dsp_p, MPI_DOUBLE_COMPLEX,& + TColl_distr, local_np, MPI_DOUBLE_COMPLEX, & + 0, comm_p, ierr) + ELSE + TColl_distr = local_sum + ENDIF + ! Write in output variable + DO ip = ips,ipe + Capj(ia,ip,ij,iky,ikx,iz) = TColl_distr(ip) + ENDDO + ENDDO j + ENDDO a + ENDDO y + ENDDO x + ENDDO z + END SUBROUTINE compute_cosolver_coll + + !******************************************************************************! + !!!!!!! Load the collision matrix coefficient table from COSOlver results + !******************************************************************************! + SUBROUTINE load_COSOlver_mat(GK_CO,INTERSPECIES,matfile,collision_kcut) ! Load a sub matrix from iCa files (works for pmax,jmax<=P_full,J_full) + USE basic, ONLY: allocate_array + USE parallel, ONLY: comm_p, my_id + USE grid, ONLY: & + pmax,jmax,& + ikxs,ikxe,& + ikys,ikye, kparray,& + izs,ize,bar + USE array, ONLY: Caa, Cab_T, Cab_F + USE MPI + USE model, ONLY: GK_CO, Na, LINEARITY + USE species, ONLY: name + USE futils + IMPLICIT NONE + ! Input + LOGICAL, INTENT(IN) :: GK_CO, INTERSPECIES + CHARACTER(len=128), INTENT(IN) :: matfile ! COSOlver matrix file names + REAL(dp), INTENT(IN) :: collision_kcut + ! Local variables + REAL(dp), DIMENSION(:,:), ALLOCATABLE :: Caa_full,CabT_full, CabF_full ! To load the self entire matrices + REAL(dp), DIMENSION(:,:,:,:), ALLOCATABLE :: Caa__kp ! To store the coeff that will be used along kperp + REAL(dp), DIMENSION(:,:,:,:,:), ALLOCATABLE :: CabF_kp,CabT_kp ! '' + REAL(dp), DIMENSION(:), ALLOCATABLE :: kp_grid_mat ! kperp grid of the matrices + REAL(dp), DIMENSION(2) :: dims + ! Indices for row and columns of the COSOlver matrix (4D compressed 2D matrices) + INTEGER :: irow_sub, irow_full, icol_sub, icol_full + INTEGER :: fid ! file indexation + INTEGER :: ip, ij, il, ikx, iky, iz, ik, ikp, ikps_C,ikpe_C,ia,ib ! indices for loops + INTEGER :: pdim, jdim ! dimensions of the COSOlver matrices + INTEGER :: ikp_next, ikp_prev, nkp_mat, ikp_mat + REAL(dp) :: kp_max,kperp_sim, kperp_mat, zerotoone + CHARACTER(len=128) :: var_name, ikp_string, name_a, name_b + CHARACTER(len=1) :: letter_a, letter_b + ! Opening the compiled cosolver matrices results + CALL openf(matfile,fid, 'r', 'D', mpicomm=comm_p); + ! Get matrices dimensions (polynomials degrees and kperp grid) + CALL getarr(fid, '/dims_i', dims) ! Get the ion polynomial degrees (consider same for electrons) + pdim = dims(1); jdim = dims(2); + !! Here we stop if the matrix is too small, we could put zero to these coefficients otherwise? + IF ( ((pdim .LT. pmax) .OR. (jdim .LT. jmax)) .AND. (my_id .EQ. 0)) ERROR STOP '>> ERROR << P,J Matrix too small' + ! Get the dimension kperp grid of the matrices for GK operator + CALL getsize(fid, '/coordkperp', nkp_mat) + CALL allocate_array(kp_grid_mat, 1,nkp_mat) + CALL getarr(fid, '/coordkperp', kp_grid_mat) + kp_max = MAXVAL(kparray) + ! check that we have enough kperps mat, if not we apply the kpmax matrix to all k>kpmax + IF (LINEARITY .NE. 'linear') THEN + IF ( (kp_grid_mat(nkp_mat) .LT. 2./3.*kp_max) .AND. (my_id .EQ. 0)) WRITE(*,*) 'warning: Matrix kperp grid too small' + ELSE + IF ( (kp_grid_mat(nkp_mat) .LT. kp_max) .AND. (my_id .EQ. 0)) WRITE(*,*) 'warning: Matrix kperp grid too small !!' + ENDIF + IF (GK_CO) THEN ! GK operator (k-dependant) + ikps_C = 1; ikpe_C = nkp_mat + ELSE ! DK operator, only the k=0 mat applied to all k + ikps_C = 1; ikpe_C = 1 + ENDIF + ! allocate the temporary matrices + CALL allocate_array( Caa__kp, 1,Na, 1,(pmax+1)*(jmax+1), 1,(pmax+1)*(jmax+1), ikps_C,ikpe_C) + CALL allocate_array( CabF_kp, 1,Na, 1,Na, 1,(pmax+1)*(jmax+1), 1,(pmax+1)*(jmax+1), ikps_C,ikpe_C) + CALL allocate_array( CabT_kp, 1,Na, 1,Na, 1,(pmax+1)*(jmax+1), 1,(pmax+1)*(jmax+1), ikps_C,ikpe_C) + CALL allocate_array( Caa_full,1,(pdim+1)*(jdim+1), 1,(pdim+1)*(jdim+1)) + CALL allocate_array( CabT_full,1,(pdim+1)*(jdim+1), 1,(pdim+1)*(jdim+1)) + CALL allocate_array( CabF_full,1,(pdim+1)*(jdim+1), 1,(pdim+1)*(jdim+1)) + ! Loop over every kperp values that will get the collision operator + kp:DO ikp = ikps_C,ikpe_C + ! Kperp value in string format to select in cosolver hdf5 file + IF (GK_CO) THEN + write(ikp_string,'(i5.5)') ikp-1 + ELSE + write(ikp_string,'(i5.5)') 0 + ENDIF + a:DO ia = 1,Na + name_a = name(ia); letter_a = name_a(1:1) + ! get the self colision matrix + ! Naming of the array to load (kperp dependant) + ! we look for the data stored at e.g. '00001/Caapj/Ceepj' + WRITE(var_name,'(a,a,a,a,a)') TRIM(ADJUSTL(ikp_string)),'/Caapj/',letter_a,letter_a,'pj' + CALL getarr(fid, var_name, Caa_full) ! get array + ! Fill sub array with the usefull polynmial degrees only + DO ip = 0,pmax ! Loop over rows + DO ij = 0,jmax + irow_sub = (jmax +1)*ip + ij +1 + irow_full = (jdim +1)*ip + ij +1 + DO il = 0,pmax ! Loop over columns + DO ik = 0,jmax + icol_sub = (jmax +1)*il + ik +1 + icol_full = (jdim +1)*il + ik +1 + Caa__kp (ia,irow_sub,icol_sub,ikp) = Caa_full(irow_full,icol_full) + ENDDO + ENDDO + ENDDO + ENDDO + b: DO ib = 1,Na + name_b = name(ib); letter_b = name_b(1:1) + IF(INTERSPECIES) THEN ! Pitch angle is only applied on like-species + !!!!!!!!!!!!!!! Test and field matrices !!!!!!!!!!!!!! + ! we look for the data stored at e.g. '00001/Ceipj/CeipjT' + WRITE(var_name,'(a,a,a,a,a,a,a,a)') TRIM(ADJUSTL(ikp_string)),'/C',letter_a,letter_b,'pj/',letter_a,letter_b,'pjT' + CALL getarr(fid, var_name, CabT_full) + ! we look for the data stored at e.g. '00001/Ceipj/CeipjF' + WRITE(var_name,'(a,a,a,a,a,a,a,a)') TRIM(ADJUSTL(ikp_string)),'/C',letter_a,letter_b,'pj/',letter_a,letter_b,'pjF' + CALL getarr(fid, var_name, CabF_full) + ! Fill sub array with only usefull polynmials degree + DO ip = 0,pmax ! Loop over rows + DO ij = 0,jmax + irow_sub = (jmax +1)*ip + ij +1 + irow_full = (jdim +1)*ip + ij +1 + DO il = 0,pmax ! Loop over columns + DO ik = 0,jmax + icol_sub = (jmax +1)*il + ik +1 + icol_full = (jdim +1)*il + ik +1 + CabT_kp(ia,ib,irow_sub,icol_sub,ikp) = CabT_full(irow_full,icol_full) + CabF_kp(ia,ib,irow_sub,icol_sub,ikp) = CabF_full(irow_full,icol_full) + ENDDO + ENDDO + ENDDO + ENDDO + ELSE + CabT_kp = 0._dp; CabF_kp = 0._dp + ENDIF + ENDDO b + ENDDO a + ENDDO kp + DEALLOCATE(Caa_full) + DEALLOCATE(CabT_full) + DEALLOCATE(CabF_full) + CALL closef(fid) + + IF (GK_CO) THEN ! Interpolation of the kperp matrix values on kx ky grid + IF (my_id .EQ. 0 ) WRITE(*,*) '...Interpolation from matrices kperp to simulation kx,ky...' + DO ikx = ikxs,ikxe + DO iky = ikys,ikye + DO iz = izs,ize + ! Check for nonlinear case if we are in the anti aliased domain or the filtered one + kperp_sim = MIN(kparray(iky,ikx,iz,0),collision_kcut) ! current simulation kperp + ! Find the interval in kp grid mat where kperp_sim is contained + ! Loop over the whole kp mat grid to find the smallest kperp that is + ! larger than the current kperp_sim (brute force...) + DO ikp=1,nkp_mat + ikp_mat = ikp ! the first indice of the interval (k0) + kperp_mat = kp_grid_mat(ikp) + IF(kperp_mat .GT. kperp_sim) EXIT ! a matrix with kperq > current kx2 + ky2 has been found + ENDDO + ! Interpolation + ! interval boundaries + ikp_next = ikp_mat !index of k1 (larger than kperp_sim thanks to previous loop) + ikp_prev = ikp_mat - 1 !index of k0 (smaller neighbour to interpolate inbetween) + if ( (kp_grid_mat(ikp_prev) .GT. kperp_sim) .OR. (kp_grid_mat(ikp_next) .LT. kperp_sim) ) THEN + ! write(*,*) 'Warning, linear interp of collision matrix failed!! ' + ! write(*,*) kp_grid_mat(ikp_prev), '<', kperp_sim, '<', kp_grid_mat(ikp_next) + ENDIF + ! 0->1 variable for linear interp, i.e. zero2one = (k-k0)/(k1-k0) + zerotoone = MIN(1._dp,(kperp_sim - kp_grid_mat(ikp_prev))/(kp_grid_mat(ikp_next) - kp_grid_mat(ikp_prev))) + ! Linear interpolation between previous and next kperp matrix values + Caa (:,:,:,iky,ikx,iz) = (Caa__kp(:,:,:,ikp_next) - Caa__kp(:,:,:,ikp_prev))*zerotoone + Caa__kp(:,:,:,ikp_prev) + IF(INTERSPECIES) THEN + Cab_T(:,:,:,:,iky,ikx,iz) = (CabT_kp(:,:,:,:,ikp_next) - CabT_kp(:,:,:,:,ikp_prev))*zerotoone + CabT_kp(:,:,:,:,ikp_prev) + Cab_F(:,:,:,:,iky,ikx,iz) = (CabF_kp(:,:,:,:,ikp_next) - CabF_kp(:,:,:,:,ikp_prev))*zerotoone + CabF_kp(:,:,:,:,ikp_prev) + ELSE + Cab_T(:,:,:,:,iky,ikx,iz) = 0._dp + Cab_F(:,:,:,:,iky,ikx,iz) = 0._dp + ENDIF + ENDDO + ENDDO + ENDDO + ELSE ! DK -> No kperp dep, copy simply to final collision matrices + Caa (:,:,:,1,1,1) = Caa__kp(:,:,:,1) + Cab_T(:,:,:,:,1,1,1) = CabT_kp(:,:,:,:,1) + Cab_F(:,:,:,:,1,1,1) = CabF_kp(:,:,:,:,1) + ENDIF + ! Deallocate auxiliary variables + DEALLOCATE (Caa__kp); DEALLOCATE (CabT_kp); DEALLOCATE (CabF_kp) + + IF( .NOT. INTERSPECIES ) THEN + IF(my_id.EQ.0) write(*,*) "--Like Species operator--" + Cab_F = 0._dp; + Cab_T = 0._dp; + ENDIF + + IF (my_id .EQ. 0) WRITE(*,*) '============DONE===========' + + END SUBROUTINE load_COSOlver_mat + !******************************************************************************! +END MODULE cosolver_interface diff --git a/src/diagnose.F90 b/src/diagnose.F90 index 77dcdd1d..20872259 100644 --- a/src/diagnose.F90 +++ b/src/diagnose.F90 @@ -1,8 +1,9 @@ SUBROUTINE diagnose(kstep) ! Diagnostics, writing simulation state to disk - USE basic - USE diagnostics_par - USE processing, ONLY: gflux_ri, hflux_xi + USE basic, ONLY: t0_diag,t1_diag,tc_diag, lu_in, finish, start, cstep, dt, time, tmax, display_h_min_s + USE diagnostics_par, ONLY: input_fname + USE processing, ONLY: pflux_x, hflux_x + USE parallel, ONLY: my_id IMPLICIT NONE INTEGER, INTENT(in) :: kstep @@ -17,26 +18,27 @@ SUBROUTINE diagnose(kstep) IF (kstep .EQ. -1) THEN CALL cpu_time(finish) ! Display computational time cost - IF (my_id .EQ. 0) CALL display_h_min_s(finish-start) + CALL display_h_min_s(finish-start) ! Show last state transport values IF (my_id .EQ. 0) & - WRITE(*,"(A,G10.2,A8,G10.2,A)") 'Final transport values : | Gxi = ',gflux_ri,'| Qxi = ',hflux_xi,'|' + WRITE(*,"(A,G10.2,A8,G10.2,A)") 'Final transport values : | Gxi = ',pflux_x(1),'| Qxi = ',hflux_x(1),'|' END IF !! Specific diagnostic calls CALL diagnose_full(kstep) ! Terminal info IF ((kstep .GT. 0) .AND. (MOD(cstep, INT(1.0/dt)) == 0) .AND. (my_id .EQ. 0)) THEN - WRITE(*,"(A,F6.0,A1,F6.0,A8,G10.2,A8,G10.2,A)")'|t/tmax = ', time,"/",tmax,'| Gxi = ',gflux_ri,'| Qxi = ',hflux_xi,'|' + WRITE(*,"(A,F6.0,A1,F6.0,A8,G10.2,A8,G10.2,A)")'|t/tmax = ', time,"/",tmax,'| Gxi = ',pflux_x(1),'| Qxi = ',hflux_x(1),'|' ENDIF CALL cpu_time(t1_diag); tc_diag = tc_diag + (t1_diag - t0_diag) END SUBROUTINE diagnose SUBROUTINE init_outfile(comm,file0,file,fid) USE diagnostics_par, ONLY : write_doubleprecision, diag_par_outputinputs, input_fname - USE basic, ONLY : my_id, jobnum, basic_outputinputs + USE basic, ONLY : speak, jobnum, basic_outputinputs USE grid, ONLY : grid_outputinputs USE geometry, ONLY : geometry_outputinputs USE model, ONLY : model_outputinputs + USE species, ONLY : species_outputinputs USE collision, ONLY : coll_outputinputs USE initial_par, ONLY : initial_outputinputs USE time_integration,ONLY : time_integration_outputinputs @@ -59,7 +61,7 @@ SUBROUTINE init_outfile(comm,file0,file,fid) ELSE CALL creatf(file, fid, mpicomm=comm) END IF - IF (my_id .EQ. 0) WRITE(*,'(3x,a,a)') TRIM(file), ' created' + CALL speak(TRIM(file)//' created') ! basic data group CALL creatg(fid, "/data", "data") ! File group @@ -67,22 +69,22 @@ SUBROUTINE init_outfile(comm,file0,file,fid) CALL attach(fid, "/files", "jobnum", jobnum) ! Add the code info and parameters to the file - WRITE(str,'(a,i2.2)') "/data/input" - CALL creatd(fid, 0,(/0/),TRIM(str),'Input parameters') + CALL creatg(fid, "/data/input", "input") + CALL creatd(fid, 0,(/0/),"/data/input/codeinfo",'Code Information') CALL attach(fid, TRIM(str), "version", VERSION) !defined in srcinfo.h CALL attach(fid, TRIM(str), "branch", BRANCH) !defined in srcinfo.h CALL attach(fid, TRIM(str), "author", AUTHOR) !defined in srcinfo.h CALL attach(fid, TRIM(str), "execdate", EXECDATE) !defined in srcinfo.h CALL attach(fid, TRIM(str), "host", HOST) !defined in srcinfo.h - - CALL basic_outputinputs(fid,str) - CALL grid_outputinputs(fid, str) - CALL geometry_outputinputs(fid, str) - CALL diag_par_outputinputs(fid, str) - CALL model_outputinputs(fid, str) - CALL coll_outputinputs(fid, str) - CALL initial_outputinputs(fid, str) - CALL time_integration_outputinputs(fid, str) + CALL basic_outputinputs(fid) + CALL grid_outputinputs(fid) + CALL geometry_outputinputs(fid) + CALL diag_par_outputinputs(fid) + CALL model_outputinputs(fid) + CALL species_outputinputs(fid) + CALL coll_outputinputs(fid) + CALL initial_outputinputs(fid) + CALL time_integration_outputinputs(fid) ! Save STDIN (input file) of this run IF(jobnum .LE. 99) THEN @@ -94,24 +96,32 @@ SUBROUTINE init_outfile(comm,file0,file,fid) END SUBROUTINE init_outfile SUBROUTINE diagnose_full(kstep) - USE basic - USE grid + USE basic, ONLY: speak,& + cstep,iframe0d,iframe2d,iframe3d,iframe5d,& + start,finish,crashed + USE grid, ONLY: & + ias,iae, & + parray_full,pmax,& + ijs,ije,jarray_full,jmax,& + ikys,ikye,kyarray_full,& + ikxs,ikxe,kxarray_full,& + izs,ize,zarray_full USE diagnostics_par - USE futils, ONLY: creatf, creatg, creatd, closef, putarr, putfile, attach, openf, putarrnd + USE futils, ONLY: creatf, creatg, creatd, closef, putarr, putfile, attach, openf, putarrnd USE array - USE model - USE initial_par - USE fields - USE time_integration - USE parallel - USE prec_const - USE collision, ONLY: coll_outputinputs - USE geometry + USE model, ONLY: + USE initial_par, ONLY: + USE fields, ONLY: + USE time_integration,ONLY: + USE parallel, ONLY: my_id, comm0 + USE prec_const, ONLY: + USE collision, ONLY: coll_outputinputs + USE geometry, ONLY: gxx,gxy,gyy,gxz,gyz,gzz,hatR,hatZ,hatB,dBdx,dBdy,dBdz,Jacobian,gradz_coeff,Ckxky IMPLICIT NONE INTEGER, INTENT(in) :: kstep INTEGER, parameter :: BUFSIZE = 2 - INTEGER :: rank = 0 + INTEGER :: rank = 0, ierr INTEGER :: dims(1) = (/0/) !____________________________________________________________________________ ! 1. Initial diagnostics @@ -139,10 +149,8 @@ SUBROUTINE diagnose_full(kstep) CALL putarr(fidres, "/data/grid/coordkx", kxarray_full, "kx*rho_s0", ionode=0) CALL putarr(fidres, "/data/grid/coordky", kyarray_full, "ky*rho_s0", ionode=0) CALL putarr(fidres, "/data/grid/coordz", zarray_full, "z/R", ionode=0) - CALL putarr(fidres, "/data/grid/coordp_e" , parray_e_full, "p_e", ionode=0) - CALL putarr(fidres, "/data/grid/coordj_e" , jarray_e_full, "j_e", ionode=0) - CALL putarr(fidres, "/data/grid/coordp_i" , parray_i_full, "p_i", ionode=0) - CALL putarr(fidres, "/data/grid/coordj_i" , jarray_i_full, "j_i", ionode=0) + CALL putarr(fidres, "/data/grid/coordp" , parray_full, "p", ionode=0) + CALL putarr(fidres, "/data/grid/coordj" , jarray_full, "j", ionode=0) ! Metric info CALL creatg(fidres, "/data/metric", "Metric data") @@ -161,7 +169,7 @@ SUBROUTINE diagnose_full(kstep) CALL putarrnd(fidres, "/data/metric/Jacobian", Jacobian(izs:ize,0:1), (/1, 1, 1/)) CALL putarrnd(fidres, "/data/metric/gradz_coeff", gradz_coeff(izs:ize,0:1), (/1, 1, 1/)) CALL putarrnd(fidres, "/data/metric/Ckxky", Ckxky(ikys:ikye,ikxs:ikxe,izs:ize,0:1), (/1, 1, 3/)) - CALL putarrnd(fidres, "/data/metric/kernel_i", kernel_i(ijs_i:ije_i,ikys:ikye,ikxs:ikxe,izs:ize,0:1), (/ 1, 2, 4/)) + CALL putarrnd(fidres, "/data/metric/kernel", kernel(ias:iae,ijs:ije,ikys:ikye,ikxs:ikxe,izs:ize,0:1), (/1, 1, 2, 4/)) ! var0d group (gyro transport) IF (nsave_0d .GT. 0) THEN @@ -170,18 +178,11 @@ SUBROUTINE diagnose_full(kstep) CALL creatd(fidres, rank, dims, "/data/var0d/cstep", "iteration number") IF (write_gamma) THEN - CALL creatd(fidres, rank, dims, "/data/var0d/gflux_ri", "Radial gyro ion transport") - CALL creatd(fidres, rank, dims, "/data/var0d/pflux_ri", "Radial part ion transport") - IF(KIN_E) THEN - CALL creatd(fidres, rank, dims, "/data/var0d/gflux_re", "Radial gyro electron transport") - CALL creatd(fidres, rank, dims, "/data/var0d/pflux_re", "Radial part electron transport") - ENDIF + CALL creatd(fidres, rank, dims, "/data/var0d/gflux_x", "Radial gyro transport") + CALL creatd(fidres, rank, dims, "/data/var0d/pflux_x", "Radial part transport") ENDIF IF (write_hf) THEN - CALL creatd(fidres, rank, dims, "/data/var0d/hflux_xi", "Radial part ion heat flux") - IF(KIN_E) THEN - CALL creatd(fidres, rank, dims, "/data/var0d/hflux_xe", "Radial part electron heat flux") - ENDIF + CALL creatd(fidres, rank, dims, "/data/var0d/hflux_x", "Radial part ion heat flux") ENDIF IF (cstep==0) THEN iframe0d=0 @@ -211,38 +212,23 @@ SUBROUTINE diagnose_full(kstep) IF (write_phi) CALL creatg(fidres, "/data/var3d/psi", "psi") IF (write_Na00) THEN - IF(KIN_E)& - CALL creatg(fidres, "/data/var3d/Ne00", "Ne00") - CALL creatg(fidres, "/data/var3d/Ni00", "Ni00") - IF(KIN_E)& - CALL creatg(fidres, "/data/var3d/Nepjz", "Nepjz") - CALL creatg(fidres, "/data/var3d/Nipjz", "Nipjz") + CALL creatg(fidres, "/data/var3d/Na00", "Na00") + CALL creatg(fidres, "/data/var3d/Nepjz", "Napjz") ENDIF IF (write_dens) THEN - IF(KIN_E)& - CALL creatg(fidres, "/data/var3d/dens_e", "dens_e") - CALL creatg(fidres, "/data/var3d/dens_i", "dens_i") + CALL creatg(fidres, "/data/var3d/dens", "dens_e") ENDIF IF (write_fvel) THEN - IF(KIN_E) THEN - CALL creatg(fidres, "/data/var3d/upar_e", "upar_e") - CALL creatg(fidres, "/data/var3d/uper_e", "uper_e") - ENDIF - CALL creatg(fidres, "/data/var3d/upar_i", "upar_i") - CALL creatg(fidres, "/data/var3d/uper_i", "uper_i") + CALL creatg(fidres, "/data/var3d/upar", "upar") + CALL creatg(fidres, "/data/var3d/uper", "uper") ENDIF IF (write_temp) THEN - IF(KIN_E) THEN - CALL creatg(fidres, "/data/var3d/Tper_e", "Tper_e") - CALL creatg(fidres, "/data/var3d/Tpar_e", "Tpar_e") - CALL creatg(fidres, "/data/var3d/temp_e", "temp_e") - ENDIF - CALL creatg(fidres, "/data/var3d/Tper_i", "Tper_i") - CALL creatg(fidres, "/data/var3d/Tpar_i", "Tpar_i") - CALL creatg(fidres, "/data/var3d/temp_i", "temp_i") + CALL creatg(fidres, "/data/var3d/Tper_e", "Tper") + CALL creatg(fidres, "/data/var3d/Tpar_e", "Tpar") + CALL creatg(fidres, "/data/var3d/temp_e", "temp") ENDIF IF (cstep==0) THEN @@ -258,15 +244,11 @@ SUBROUTINE diagnose_full(kstep) CALL creatd(fidres, rank, dims, "/data/var5d/cstep", "iteration number") IF (write_Napj) THEN - IF(KIN_E)& - CALL creatg(fidres, "/data/var5d/moments_e", "moments_e") - CALL creatg(fidres, "/data/var5d/moments_i", "moments_i") + CALL creatg(fidres, "/data/var5d/moments", "moments") ENDIF IF (write_Sapj) THEN - IF(KIN_E)& - CALL creatg(fidres, "/data/var5d/Sepj", "Sepj") - CALL creatg(fidres, "/data/var5d/Sipj", "Sipj") + CALL creatg(fidres, "/data/var5d/Sapj", "Sapj") ENDIF IF (cstep==0) THEN @@ -340,7 +322,6 @@ SUBROUTINE diagnose_0d USE diagnostics_par USE prec_const USE processing - USE model, ONLY: KIN_E IMPLICIT NONE ! Time measurement data @@ -364,36 +345,29 @@ SUBROUTINE diagnose_0d CALL attach(fidres,"/data/var0d/" , "frames", iframe0d) ! Ion transport data IF (write_gamma) THEN - CALL compute_radial_ion_transport - CALL append(fidres, "/data/var0d/gflux_ri",gflux_ri,ionode=0) - CALL append(fidres, "/data/var0d/pflux_ri",pflux_ri,ionode=0) - IF(KIN_E) THEN - CALL compute_radial_electron_transport - CALL append(fidres, "/data/var0d/gflux_re",gflux_re,ionode=0) - CALL append(fidres, "/data/var0d/pflux_re",pflux_re,ionode=0) - ENDIF + CALL compute_radial_transport + CALL append(fidres, "/data/var0d/gflux_x",gflux_x(1),ionode=0) + CALL append(fidres, "/data/var0d/pflux_x",pflux_x(1),ionode=0) ENDIF IF (write_hf) THEN - CALL compute_radial_ion_heatflux - CALL append(fidres, "/data/var0d/hflux_xi",hflux_xi,ionode=0) - IF(KIN_E) THEN - CALL compute_radial_electron_heatflux - CALL append(fidres, "/data/var0d/hflux_xe",hflux_xe,ionode=0) - ENDIF + CALL compute_radial_heatflux + CALL append(fidres, "/data/var0d/hflux_x",hflux_x(1),ionode=0) ENDIF END SUBROUTINE diagnose_0d SUBROUTINE diagnose_3d USE basic USE futils, ONLY: append, getatt, attach, putarrnd, putarr - USE fields - USE array - USE grid, ONLY: ikxs,ikxe, ikys,ikye, Nkx, Nky, local_nkx, ikx, iky, ips_e, ips_i - USE time_integration + USE fields, ONLY: phi, psi, moments + USE array, ONLY: Na00,Napjz,dens,upar,uper,Tpar,Tper,temp + USE grid, ONLY: CONTAINSp0, ip0,ij0, & + total_np, total_nj, total_nky, total_nkx, total_nz, & + local_np, local_nj, local_nky, local_nkx, local_nz, & + ngz + USE time_integration, ONLY: updatetlevel USE diagnostics_par USE prec_const - USE processing - USE model, ONLY: KIN_E + USE processing, ONLY: compute_fluid_moments, compute_Napjz_spectrum IMPLICIT NONE CALL append(fidres, "/data/var3d/time", time,ionode=0) @@ -402,23 +376,16 @@ SUBROUTINE diagnose_3d iframe3d=iframe3d+1 CALL attach(fidres,"/data/var3d/" , "frames", iframe3d) - IF (write_phi) CALL write_field3d_kykxz(phi (ikys:ikye,ikxs:ikxe,izs:ize), 'phi') - IF (write_phi) CALL write_field3d_kykxz(psi (ikys:ikye,ikxs:ikxe,izs:ize), 'psi') + IF (write_phi) CALL write_field3d_kykxz(phi (:,:,1+ngz/2:local_nz+ngz/2), 'phi') + IF (write_phi) CALL write_field3d_kykxz(psi (:,:,1+ngz/2:local_nz+ngz/2), 'psi') IF (write_Na00) THEN - IF(KIN_E)THEN - IF (CONTAINS_ip0_e) & - Ne00(ikys:ikye,ikxs:ikxe,izs:ize) = moments_e(ip0_e,ij0_e,ikys:ikye,ikxs:ikxe,izs:ize,updatetlevel) - CALL write_field3d_kykxz(Ne00(ikys:ikye,ikxs:ikxe,izs:ize), 'Ne00') - ENDIF - IF (CONTAINS_ip0_i) & - Ni00(ikys:ikye,ikxs:ikxe,izs:ize) = moments_i(ip0_i,ij0_i,ikys:ikye,ikxs:ikxe,izs:ize,updatetlevel) - CALL write_field3d_kykxz(Ni00(ikys:ikye,ikxs:ikxe,izs:ize), 'Ni00') + IF (CONTAINSp0) & + Na00(1,:,:,:) = moments(1,ip0,ij0,:,:,1+ngz/2:local_nz+ngz/2,updatetlevel) + CALL write_field3d_kykxz(Na00(1,:,:,:), 'Na00') CALL compute_Napjz_spectrum - IF(KIN_E) & - CALL write_field3d_pjz_e(Nepjz(ips_e:ipe_e,ijs_e:ije_e,izs:ize), 'Nepjz') - CALL write_field3d_pjz_i(Nipjz(ips_i:ipe_i,ijs_i:ije_i,izs:ize), 'Nipjz') + CALL write_field3d_pjz(Napjz(1,:,:,:), 'Napjz') ENDIF !! Fuid moments @@ -426,108 +393,78 @@ SUBROUTINE diagnose_3d CALL compute_fluid_moments IF (write_dens) THEN - IF(KIN_E)& - CALL write_field3d_kykxz(dens_e(ikys:ikye,ikxs:ikxe,izs:ize), 'dens_e') - CALL write_field3d_kykxz(dens_i(ikys:ikye,ikxs:ikxe,izs:ize), 'dens_i') + CALL write_field3d_kykxz(dens(1,:,:,:), 'dens') ENDIF IF (write_fvel) THEN - IF(KIN_E)& - CALL write_field3d_kykxz(upar_e(ikys:ikye,ikxs:ikxe,izs:ize), 'upar_e') - CALL write_field3d_kykxz(upar_i(ikys:ikye,ikxs:ikxe,izs:ize), 'upar_i') - IF(KIN_E)& - CALL write_field3d_kykxz(uper_e(ikys:ikye,ikxs:ikxe,izs:ize), 'uper_e') - CALL write_field3d_kykxz(uper_i(ikys:ikye,ikxs:ikxe,izs:ize), 'uper_i') + CALL write_field3d_kykxz(upar(1,:,:,:), 'upar') + CALL write_field3d_kykxz(uper(1,:,:,:), 'uper') ENDIF IF (write_temp) THEN - IF(KIN_E)& - CALL write_field3d_kykxz(Tpar_e(ikys:ikye,ikxs:ikxe,izs:ize), 'Tpar_e') - CALL write_field3d_kykxz(Tpar_i(ikys:ikye,ikxs:ikxe,izs:ize), 'Tpar_i') - IF(KIN_E)& - CALL write_field3d_kykxz(Tper_e(ikys:ikye,ikxs:ikxe,izs:ize), 'Tper_e') - CALL write_field3d_kykxz(Tper_i(ikys:ikye,ikxs:ikxe,izs:ize), 'Tper_i') - IF(KIN_E)& - CALL write_field3d_kykxz(temp_e(ikys:ikye,ikxs:ikxe,izs:ize), 'temp_e') - CALL write_field3d_kykxz(temp_i(ikys:ikye,ikxs:ikxe,izs:ize), 'temp_i') + CALL write_field3d_kykxz(Tpar(1,:,:,:), 'Tpar') + CALL write_field3d_kykxz(Tper(1,:,:,:), 'Tper') + CALL write_field3d_kykxz(temp(1,:,:,:), 'temp') ENDIF CONTAINS - - SUBROUTINE write_field3d_kykxz(field, text) - USE parallel, ONLY : gather_xyz - IMPLICIT NONE - COMPLEX(dp), DIMENSION(ikys:ikye,ikxs:ikxe, izs:ize), INTENT(IN) :: field - CHARACTER(*), INTENT(IN) :: text - COMPLEX(dp), DIMENSION(1:Nky,1:Nkx,1:Nz) :: field_full - CHARACTER(256) :: dset_name - field_full = 0; - WRITE(dset_name, "(A, '/', A, '/', i6.6)") "/data/var3d", TRIM(text), iframe3d - - IF (num_procs .EQ. 1) THEN ! no data distribution - CALL putarr(fidres, dset_name, field(ikys:ikye,ikxs:ikxe, izs:ize), ionode=0) - - ELSEIF(GATHERV_OUTPUT) THEN ! output using one node (gatherv) - CALL gather_xyz(field(ikys:ikye,1:Nkx,izs:ize),field_full(1:Nky,1:Nkx,1:Nz)) - CALL putarr(fidres, dset_name, field_full(1:Nky,1:Nkx,1:Nz), ionode=0) - ELSE ! output using putarrnd (very slow on marconi) - CALL putarrnd(fidres, dset_name, field(ikys:ikye,ikxs:ikxe, izs:ize), (/1, 1, 3/)) - ENDIF - CALL attach(fidres, dset_name, "time", time) - END SUBROUTINE write_field3d_kykxz - - SUBROUTINE write_field3d_pjz_i(field, text) - USE parallel, ONLY : gather_pjz_i - IMPLICIT NONE - REAL(dp), DIMENSION(ips_i:ipe_i,ijs_i:ije_i,izs:ize), INTENT(IN) :: field - REAL(dp), DIMENSION(1:Np_i,1:Nj_i,1:Nz) :: field_full - CHARACTER(*), INTENT(IN) :: text - CHARACTER(LEN=50) :: dset_name - field_full = 0; - WRITE(dset_name, "(A, '/', A, '/', i6.6)") "/data/var3d", TRIM(text), iframe3d - IF (num_procs .EQ. 1) THEN ! no data distribution - CALL putarr(fidres, dset_name, field(ips_i:ipe_i,ijs_i:ije_i,izs:ize), ionode=0) - ELSE - CALL gather_pjz_i(field(ips_i:ipe_i,ijs_i:ije_i,izs:ize),field_full(1:Np_i,1:Nj_i,1:Nz)) - CALL putarr(fidres, dset_name, field(1:Np_i,1:Nj_i,1:Nz), ionode=0) - ENDIF - CALL attach(fidres, dset_name, "time", time) - END SUBROUTINE write_field3d_pjz_i - - SUBROUTINE write_field3d_pjz_e(field, text) - USE parallel, ONLY : gather_pjz_e - IMPLICIT NONE - REAL(dp), DIMENSION(ips_e:ipe_e,ijs_e:ije_e,izs:ize), INTENT(IN) :: field - REAL(dp), DIMENSION(1:pmaxe+1,1:jmaxe+1,1:Nz) :: field_full - CHARACTER(*), INTENT(IN) :: text - CHARACTER(LEN=50) :: dset_name - field_full = 0; - WRITE(dset_name, "(A, '/', A, '/', i6.6)") "/data/var3d", TRIM(text), iframe3d - IF (num_procs .EQ. 1) THEN ! no data distribution - CALL putarr(fidres, dset_name, field(ips_e:ipe_e,ijs_e:ije_e,izs:ize), ionode=0) - ELSE - CALL gather_pjz_e(field(ips_e:ipe_e,ijs_e:ije_e,izs:ize),field_full(1:pmaxe+1,1:jmaxe+1,1:Nz)) - CALL putarr(fidres, dset_name, field(1:Np_i,1:Nj_i,1:Nz), ionode=0) - ENDIF - CALL attach(fidres, dset_name, "time", time) - END SUBROUTINE write_field3d_pjz_e + SUBROUTINE write_field3d_kykxz(field, text) + USE basic, ONLY : GATHERV_OUTPUT + USE parallel, ONLY : gather_xyz, num_procs + IMPLICIT NONE + COMPLEX(dp), DIMENSION(local_nky,local_nkx,local_nz), INTENT(IN) :: field + CHARACTER(*), INTENT(IN) :: text + COMPLEX(dp), DIMENSION(total_nky,total_nkx,total_nz) :: field_full + CHARACTER(256) :: dset_name + field_full = 0; + WRITE(dset_name, "(A, '/', A, '/', i6.6)") "/data/var3d", TRIM(text), iframe3d + + IF (num_procs .EQ. 1) THEN ! no data distribution + CALL putarr(fidres, dset_name, field, ionode=0) + + ELSEIF(GATHERV_OUTPUT) THEN ! output using one node (gatherv) + CALL gather_xyz(field,field_full,local_nky,total_nky,total_nkx,local_nz,total_nz) + CALL putarr(fidres, dset_name, field_full, ionode=0) + ELSE ! output using putarrnd (very slow on marconi) + CALL putarrnd(fidres, dset_name, field, (/1, 1, 3/)) + ENDIF + CALL attach(fidres, dset_name, "time", time) + END SUBROUTINE write_field3d_kykxz + + SUBROUTINE write_field3d_pjz(field, text) + USE parallel, ONLY : gather_pjz, num_procs + IMPLICIT NONE + REAL(dp), DIMENSION(local_np,local_nj,local_nz), INTENT(IN) :: field + REAL(dp), DIMENSION(total_np,total_nj,total_nz) :: field_full + CHARACTER(*), INTENT(IN) :: text + CHARACTER(LEN=50) :: dset_name + field_full = 0; + WRITE(dset_name, "(A, '/', A, '/', i6.6)") "/data/var3d", TRIM(text), iframe3d + IF (num_procs .EQ. 1) THEN ! no data distribution + CALL putarr(fidres, dset_name, field, ionode=0) + ELSE + CALL gather_pjz(field,field_full,local_np,total_np,total_nj,local_nz,total_nz) + CALL putarr(fidres, dset_name, field_full, ionode=0) + ENDIF + CALL attach(fidres, dset_name, "time", time) + END SUBROUTINE write_field3d_pjz END SUBROUTINE diagnose_3d SUBROUTINE diagnose_5d - USE basic + USE basic, ONLY: time, iframe5d,cstep USE futils, ONLY: append, getatt, attach, putarrnd, putarr - USE fields - USE array!, ONLY: Sepj, Sipj - USE grid, ONLY: ips_e,ipe_e, ips_i, ipe_i, & - ijs_e,ije_e, ijs_i, ije_i, & - Np_i, Nj_i, Np_e, Nj_e, Nky, Nkx, Nz, & + USE fields, ONLY: moments + USE array, ONLY: Sapj + USE grid, ONLY:ips, ipe, ijs, ije, & + total_np, total_nj, total_nky, total_nkx, total_nz, & + local_np, local_nj, local_nky, local_nkx, local_nz, & + ngp, ngj, ngz,& ikxs,ikxe,ikys,ikye,izs,ize - USE time_integration + USE time_integration, ONLY: updatetlevel USE diagnostics_par - USE prec_const - USE model, ONLY: KIN_E + USE prec_const, ONLY: dp IMPLICIT NONE CALL append(fidres, "/data/var5d/time", time,ionode=0) @@ -537,101 +474,67 @@ SUBROUTINE diagnose_5d CALL attach(fidres,"/data/var5d/" , "frames", iframe5d) IF (write_Napj) THEN - IF(KIN_E)& - CALL write_field5d_e(moments_e(ips_e:ipe_e,ijs_e:ije_e,ikys:ikye,ikxs:ikxe,izs:ize,updatetlevel), 'moments_e') - CALL write_field5d_i(moments_i(ips_i:ipe_i,ijs_i:ije_i,ikys:ikye,ikxs:ikxe,izs:ize,updatetlevel), 'moments_i') + CALL write_field5d(moments(1,1+ngp/2:local_np+ngp/2,1+ngj/2:local_nj+ngj/2,& + :,:,1+ngz/2:local_nz+ngz/2,updatetlevel), 'moments') ENDIF IF (write_Sapj) THEN - IF(KIN_E)& - CALL write_field5d_e(Sepj(ips_e:ipe_e,ijs_e:ije_e,ikys:ikye,ikxs:ikxe,izs:ize), 'Sepj') - CALL write_field5d_i(Sipj(ips_i:ipe_i,ijs_i:ije_i,ikys:ikye,ikxs:ikxe,izs:ize), 'Sipj') + CALL write_field5d(Sapj(1,:,:,:,:,:), 'Sapj') ENDIF CONTAINS - SUBROUTINE write_field5d_e(field, text) - USE futils, ONLY: attach, putarr, putarrnd - USE parallel, ONLY: gather_pjxyz_e - USE grid, ONLY: ips_e,ipe_e, ijs_e,ije_e, ikxs,ikxe, ikys,ikye, izs,ize - USE prec_const + SUBROUTINE write_field5d(field, text) + USE basic, ONLY: GATHERV_OUTPUT, jobnum, dt + USE futils, ONLY: attach, putarr, putarrnd + USE parallel, ONLY: gather_pjxyz, num_procs + USE prec_const, ONLY: dp IMPLICIT NONE - COMPLEX(dp), DIMENSION(ips_e:ipe_e,ijs_e:ije_e,ikys:ikye,ikxs:ikxe,izs:ize), INTENT(IN) :: field + COMPLEX(dp), DIMENSION(local_np,local_nj,local_nky,local_nkx,local_nz), INTENT(IN) :: field CHARACTER(*), INTENT(IN) :: text - COMPLEX(dp), DIMENSION(1:Np_e,1:Nj_e,1:Nky,1:Nkx,1:Nz) :: field_full + COMPLEX(dp), DIMENSION(total_np,total_nj,total_nky,total_nkx,total_nz) :: field_full CHARACTER(LEN=50) :: dset_name field_full = 0; WRITE(dset_name, "(A, '/', A, '/', i6.6)") "/data/var5d", TRIM(text), iframe5d IF (num_procs .EQ. 1) THEN - CALL putarr(fidres, dset_name, field(ips_e:ipe_e,ijs_e:ije_e,ikys:ikye,ikxs:ikxe,izs:ize), ionode=0) - ELSEIF(GATHERV_OUTPUT) THEN ! output using one node (gatherv) - CALL gather_pjxyz_e(field(ips_e:ipe_e,ijs_e:ije_e,ikys:ikye,ikxs:ikxe,izs:ize),& - field_full(1:Np_e,1:Nj_e,1:Nky,1:Nkx,1:Nz)) - CALL putarr(fidres, dset_name, field_full(1:Np_i,1:Nj_i,1:Nky,1:Nkx,1:Nz), ionode=0) - ELSE - CALL putarrnd(fidres, dset_name, field(ips_e:ipe_e,ijs_e:ije_e,ikys:ikye,ikxs:ikxe,izs:ize), (/1,3,5/)) - ENDIF - CALL attach(fidres, dset_name, 'cstep', cstep) - CALL attach(fidres, dset_name, 'time', time) - CALL attach(fidres, dset_name, 'jobnum', jobnum) - CALL attach(fidres, dset_name, 'dt', dt) - CALL attach(fidres, dset_name, 'iframe2d', iframe2d) - CALL attach(fidres, dset_name, 'iframe5d', iframe5d) - - END SUBROUTINE write_field5d_e - - SUBROUTINE write_field5d_i(field, text) - USE futils, ONLY: attach, putarr, putarrnd - USE parallel, ONLY: gather_pjxyz_i - USE grid, ONLY: ips_i,ipe_i, ijs_i,ije_i, ikxs,ikxe, ikys,ikye, izs,ize - USE prec_const - IMPLICIT NONE - COMPLEX(dp), DIMENSION(ips_i:ipe_i,ijs_i:ije_i,ikys:ikye,ikxs:ikxe,izs:ize), INTENT(IN) :: field - CHARACTER(*), INTENT(IN) :: text - COMPLEX(dp), DIMENSION(1:Np_i,1:Nj_i,1:Nky,1:Nkx,1:Nz) :: field_full - CHARACTER(LEN=50) :: dset_name - field_full = 0; - WRITE(dset_name, "(A, '/', A, '/', i6.6)") "/data/var5d", TRIM(text), iframe5d - IF (num_procs .EQ. 1) THEN - CALL putarr(fidres, dset_name, field(ips_i:ipe_i,ijs_i:ije_i,ikys:ikye,ikxs:ikxe,izs:ize), ionode=0) + CALL putarr(fidres, dset_name, field(ips:ipe,ijs:ije,ikys:ikye,ikxs:ikxe,izs:ize), ionode=0) ELSEIF(GATHERV_OUTPUT) THEN ! output using one node (gatherv) - CALL gather_pjxyz_i(field(ips_i:ipe_i,ijs_i:ije_i,ikys:ikye,ikxs:ikxe,izs:ize),& - field_full(1:Np_i,1:Nj_i,1:Nky,1:Nkx,1:Nz)) - CALL putarr(fidres, dset_name, field_full(1:Np_i,1:Nj_i,1:Nky,1:Nkx,1:Nz), ionode=0) + CALL gather_pjxyz(field,field_full,local_np,total_np,total_nj,local_nky,total_nky,total_nkx,local_nz,total_nz) + CALL putarr(fidres, dset_name, field_full(1:total_np,1:total_nj,1:total_nky,1:total_nkx,1:total_nz), ionode=0) ELSE - CALL putarrnd(fidres, dset_name, field(ips_i:ipe_i,ijs_i:ije_i,ikys:ikye,ikxs:ikxe,izs:ize), (/1,3,5/)) + CALL putarrnd(fidres, dset_name, field(ips:ipe,ijs:ije,ikys:ikye,ikxs:ikxe,izs:ize), (/1,3,5/)) ENDIF CALL attach(fidres, dset_name, 'cstep', cstep) CALL attach(fidres, dset_name, 'time', time) CALL attach(fidres, dset_name, 'jobnum', jobnum) CALL attach(fidres, dset_name, 'dt', dt) - CALL attach(fidres, dset_name, 'iframe2d', iframe2d) CALL attach(fidres, dset_name, 'iframe5d', iframe5d) - - END SUBROUTINE write_field5d_i + END SUBROUTINE write_field5d END SUBROUTINE diagnose_5d SUBROUTINE spit_snapshot_check USE fields, ONLY: phi - USE grid, ONLY: ikxs,ikxe,Nkx,ikys,ikye,Nky,izs,ize,Nz - USE parallel, ONLY: gather_xyz + USE grid, ONLY: total_nkx,total_nky,total_nz,& + local_nky,local_nz, ngz + USE parallel, ONLY: gather_xyz, my_id USE basic + USE prec_const, ONLY: dp IMPLICIT NONE LOGICAL :: file_exist INTEGER :: fid_check, ikx, iky, iz CHARACTER(256) :: check_filename - COMPLEX(dp), DIMENSION(1:Nky,1:Nkx,1:Nz) :: field_to_check + COMPLEX(dp), DIMENSION(total_nky,total_nkx,total_nz) :: field_to_check !! Spit a snapshot of PHI if requested (triggered by creating a file named "check_phi") INQUIRE(file='check_phi', exist=file_exist) IF( file_exist ) THEN IF(my_id.EQ. 0) WRITE(*,*) 'Check file found -> gather phi..' - CALL gather_xyz(phi(ikys:ikye,ikxs:ikxe,izs:ize), field_to_check) + CALL gather_xyz(phi(:,:,1+Ngz/2:local_nz+Ngz/2), field_to_check,local_nky,total_nky,total_nkx,local_nz,total_nz) IF(my_id.EQ. 0) THEN WRITE(check_filename,'(a16)') 'check_phi.out' OPEN(fid_check, file=check_filename, form='formatted') WRITE(*,*) 'Check file found -> output phi ..' - WRITE(fid_check,*) Nky, Nkx, Nz - DO iky = 1,Nky; DO ikx = 1, Nkx; DO iz = 1,Nz + WRITE(fid_check,*) total_nky, total_nkx, total_nz + DO iky = 1,total_nky; DO ikx = 1, total_nkx; DO iz = 1,total_nz WRITE(fid_check,*) real(field_to_check(iky,ikx,iz)), ',' , imag(field_to_check(iky,ikx,iz)) ENDDO; ENDDO; ENDDO CLOSE(fid_check) diff --git a/src/diagnostics_par_mod.F90 b/src/diagnostics_par_mod.F90 index 9cbe257d..e5093904 100644 --- a/src/diagnostics_par_mod.F90 +++ b/src/diagnostics_par_mod.F90 @@ -11,7 +11,8 @@ MODULE diagnostics_par LOGICAL, PUBLIC, PROTECTED :: write_Napj, write_Sapj LOGICAL, PUBLIC, PROTECTED :: write_dens, write_fvel, write_temp - INTEGER, PUBLIC, PROTECTED :: nsave_0d, nsave_1d, nsave_2d, nsave_3d, nsave_5d + INTEGER, PUBLIC, PROTECTED :: nsave_0d, nsave_1d, nsave_2d, nsave_3d, nsave_5d ! save data every n step + INTEGER, PUBLIC, PROTECTED :: dtsave_0d, dtsave_1d, dtsave_2d, dtsave_3d, dtsave_5d ! save data every dt time unit ! HDF5 file CHARACTER(len=256), PUBLIC :: resfile,resfile0 = "outputs" ! Head of main result file name @@ -23,7 +24,6 @@ MODULE diagnostics_par CHARACTER(len=256), PUBLIC :: prffile,prffile0 = "profiler" ! Head of time traces (gamma_x,Q_x) CHARACTER(len=256), PUBLIC :: input_fname CHARACTER(len=256), PUBLIC :: rstfile ! restart result file - INTEGER, PUBLIC :: job2load ! jobnum of the checkpoint to load INTEGER, PUBLIC :: fidres,fidmsp,fidfld,fidttr ! FID for output INTEGER, PUBLIC :: fidmom,fidggm, fidprf INTEGER, PUBLIC :: fidrst ! FID for restart file @@ -36,31 +36,38 @@ CONTAINS SUBROUTINE diag_par_readinputs ! Read the input parameters - USE basic, ONLY : lu_in + USE basic, ONLY : lu_in, dt USE prec_const IMPLICIT NONE - NAMELIST /OUTPUT_PAR/ nsave_0d, nsave_1d, nsave_2d, nsave_3d, nsave_5d + NAMELIST /OUTPUT_PAR/ dtsave_0d, dtsave_1d, dtsave_2d, dtsave_3d, dtsave_5d NAMELIST /OUTPUT_PAR/ write_doubleprecision, write_gamma, write_hf, write_phi NAMELIST /OUTPUT_PAR/ write_Na00, write_Napj, write_Sapj NAMELIST /OUTPUT_PAR/ write_dens, write_fvel, write_temp - NAMELIST /OUTPUT_PAR/ job2load READ(lu_in,output_par) + ! set nsave variables from dtsave ones (time unit to steps) + nsave_0d = CEILING(dtsave_0d/dt) + nsave_1d = CEILING(dtsave_1d/dt) + nsave_2d = CEILING(dtsave_2d/dt) + nsave_3d = CEILING(dtsave_3d/dt) + nsave_5d = CEILING(dtsave_5d/dt) + END SUBROUTINE diag_par_readinputs - SUBROUTINE diag_par_outputinputs(fid, str) + SUBROUTINE diag_par_outputinputs(fid) ! ! Write the input parameters to the results_xx.h5 file ! USE prec_const - USE futils, ONLY: attach + USE futils, ONLY: attach, creatd IMPLICIT NONE INTEGER, INTENT(in) :: fid - CHARACTER(len=256), INTENT(in) :: str - + CHARACTER(len=256) :: str + WRITE(str,'(a)') '/data/input/diag_par' + CALL creatd(fidres, 0,(/0/),TRIM(str),'Diagnostics Parameters Input') CALL attach(fid, TRIM(str), "write_doubleprecision", write_doubleprecision) CALL attach(fid, TRIM(str), "nsave_0d", nsave_0d) CALL attach(fid, TRIM(str), "nsave_1d", nsave_1d) diff --git a/src/endrun.F90 b/src/endrun.F90 index 4667c04e..80413fe6 100644 --- a/src/endrun.F90 +++ b/src/endrun.F90 @@ -9,7 +9,7 @@ SUBROUTINE endrun IF( nlend ) THEN !---------------------------------------------------------------------- ! 1. Normal end of run - IF(my_id .EQ. 0) WRITE(*,'(/a)') ' Normal exit' + CALL speak(' Normal exit') !---------------------------------------------------------------------- ! 2. Abnormal exit diff --git a/src/fields_mod.F90 b/src/fields_mod.F90 index 728b9fdb..ba3cc5cf 100644 --- a/src/fields_mod.F90 +++ b/src/fields_mod.F90 @@ -3,10 +3,8 @@ MODULE fields use prec_const implicit none !------------------MOMENTS Napj------------------ - ! Hermite-Moments: N_a^pj ! dimensions correspond to: p, j, kx, ky, z, updatetlevel. - COMPLEX(dp), DIMENSION(:,:,:,:,:,:), ALLOCATABLE :: moments_e - COMPLEX(dp), DIMENSION(:,:,:,:,:,:), ALLOCATABLE :: moments_i - + ! Hermite-Moments: N_a^pj ! dimensions correspond to: species (a), p, j, kx, ky, z, updatetlevel. + COMPLEX(dp), DIMENSION(:,:,:,:,:,:,:), ALLOCATABLE :: moments !------------------ELECTROSTATIC POTENTIAL------------------ ! Normalized electric potential: \hat{\phi} ! (kx,ky,z) diff --git a/src/fourier_mod.F90 b/src/fourier_mod.F90 index 3c0484d7..3a23febf 100644 --- a/src/fourier_mod.F90 +++ b/src/fourier_mod.F90 @@ -1,8 +1,6 @@ MODULE fourier - USE prec_const - USE grid - USE basic + USE parallel use, intrinsic :: iso_c_binding implicit none @@ -11,57 +9,52 @@ MODULE fourier PRIVATE - PUBLIC :: init_grid_distr_and_plans, poisson_bracket_and_sum, convolve_and_add, finalize_plans + PUBLIC :: init_grid_distr_and_plans, poisson_bracket_and_sum, finalize_plans real(C_DOUBLE), pointer, PUBLIC :: real_data_f(:,:), real_data_g(:,:), bracket_sum_r(:,:) complex(C_DOUBLE_complex), pointer, PUBLIC :: cmpx_data_f(:,:), cmpx_data_g(:,:), bracket_sum_c(:,:) - type(C_PTR) :: cdatar_f, cdatar_g, cdatar_c - type(C_PTR) :: cdatac_f, cdatac_g, cdatac_c - type(C_PTR) , PUBLIC :: planf, planb - integer(C_INTPTR_T) :: i, ix, iy - integer(C_INTPTR_T), PUBLIC :: alloc_local_1, alloc_local_2 - integer(C_INTPTR_T) :: NX_, NY_, NY_halved - integer :: communicator + type(C_PTR) :: cdatar_f, cdatar_g, cdatar_c + type(C_PTR) :: cdatac_f, cdatac_g, cdatac_c + type(C_PTR) , PUBLIC :: planf, planb + integer(C_INTPTR_T) :: i, ix, iy + integer(C_INTPTR_T), PUBLIC :: alloc_local_1, alloc_local_2 + integer(C_INTPTR_T) :: NX_, NY_, NY_halved ! many plan data variables - integer(C_INTPTR_T) :: howmany=9 ! numer of eleemnt of the tensor + integer(C_INTPTR_T) :: howmany=9 ! numer of element of the tensor integer :: rank=3 ! rank of the transform integer(C_INTPTR_T), dimension(2) :: fft_dims ! array containing data extent CONTAINS - SUBROUTINE init_grid_distr_and_plans(Nx,Ny) + SUBROUTINE init_grid_distr_and_plans(Nx,Ny,communicator,local_nkx_ptr,local_nkx_ptr_offset,local_nky_ptr,local_nky_ptr_offset) IMPLICIT NONE - - INTEGER, INTENT(IN) :: Nx,Ny + INTEGER, INTENT(IN) :: Nx,Ny, communicator + INTEGER(C_INTPTR_T), INTENT(OUT) :: local_nkx_ptr,local_nkx_ptr_offset,local_nky_ptr,local_nky_ptr_offset NX_ = Nx; NY_ = Ny NY_halved = NY_/2 + 1 - - ! communicator = MPI_COMM_WORLD - communicator = comm_ky - !! Complex arrays F, G ! Compute the room to allocate - alloc_local_1 = fftw_mpi_local_size_2d(NY_halved, NX_, communicator, local_nky, local_nky_offset) + alloc_local_1 = fftw_mpi_local_size_2d(NY_halved, NX_, communicator, local_nky_ptr, local_nky_ptr_offset) ! Initalize pointers to this room cdatac_f = fftw_alloc_complex(alloc_local_1) cdatac_g = fftw_alloc_complex(alloc_local_1) cdatac_c = fftw_alloc_complex(alloc_local_1) ! Initalize the arrays with the rooms pointed - call c_f_pointer(cdatac_f, cmpx_data_f, [NX_ ,local_nky]) - call c_f_pointer(cdatac_g, cmpx_data_g, [NX_ ,local_nky]) - call c_f_pointer(cdatac_c, bracket_sum_c, [NX_ ,local_nky]) + call c_f_pointer(cdatac_f, cmpx_data_f, [NX_ ,local_nky_ptr]) + call c_f_pointer(cdatac_g, cmpx_data_g, [NX_ ,local_nky_ptr]) + call c_f_pointer(cdatac_c, bracket_sum_c, [NX_ ,local_nky_ptr]) !! Real arrays iFFT(F), iFFT(G) ! Compute the room to allocate - alloc_local_2 = fftw_mpi_local_size_2d(NX_, NY_halved, communicator, local_nkx, local_nkx_offset) + alloc_local_2 = fftw_mpi_local_size_2d(NX_, NY_halved, communicator, local_nkx_ptr, local_nkx_ptr_offset) ! Initalize pointers to this room cdatar_f = fftw_alloc_real(2*alloc_local_2) cdatar_g = fftw_alloc_real(2*alloc_local_2) cdatar_c = fftw_alloc_real(2*alloc_local_2) ! Initalize the arrays with the rooms pointed - call c_f_pointer(cdatar_f, real_data_f, [2*(NY_/2 + 1),local_nkx]) - call c_f_pointer(cdatar_g, real_data_g, [2*(NY_/2 + 1),local_nkx]) - call c_f_pointer(cdatar_c, bracket_sum_r, [2*(NY_/2 + 1),local_nkx]) + call c_f_pointer(cdatar_f, real_data_f, [2*(NY_/2 + 1),local_nkx_ptr]) + call c_f_pointer(cdatar_g, real_data_g, [2*(NY_/2 + 1),local_nkx_ptr]) + call c_f_pointer(cdatar_c, bracket_sum_r, [2*(NY_/2 + 1),local_nkx_ptr]) ! Plan Creation (out-of-place forward and backward FFT) planf = fftw_mpi_plan_dft_r2c_2D(NX_, NY_, real_data_f, cmpx_data_f, communicator, ior(FFTW_MEASURE, FFTW_MPI_TRANSPOSED_OUT)) @@ -73,62 +66,46 @@ MODULE fourier END SUBROUTINE init_grid_distr_and_plans - !!! Compute the poisson bracket of [F,G] to real space ! - Compute the convolution using the convolution theorem - SUBROUTINE poisson_bracket_and_sum( F_, G_, sum_real_) + SUBROUTINE poisson_bracket_and_sum(kx_, ky_, inv_Nx, inv_Ny, AA_x, AA_y,& + local_nky_ptr, local_nkx_ptr, F_, G_, sum_real_) IMPLICIT NONE - COMPLEX(C_DOUBLE_COMPLEX), DIMENSION(ikys:ikye,ikxs:ikxe),& - INTENT(IN) :: F_, G_ ! input fields - real(C_DOUBLE), pointer, INTENT(INOUT) :: sum_real_(:,:) + INTEGER(C_INTPTR_T), INTENT(IN) :: local_nkx_ptr,local_nky_ptr + REAL(dp), INTENT(IN) :: inv_Nx, inv_Ny + REAL(dp), DIMENSION(:), INTENT(IN) :: kx_, ky_, AA_x, AA_y + COMPLEX(C_DOUBLE_COMPLEX),INTENT(IN) :: F_(:,:), G_(:,:) + real(C_DOUBLE), pointer, INTENT(INOUT) :: sum_real_(:,:) + INTEGER :: ikx,iky ! First term df/dx x dg/dy - DO ikx = ikxs, ikxe - DO iky = ikys, ikye - cmpx_data_f(ikx,iky-local_nky_offset) = & - imagu*kxarray(ikx)*F_(iky,ikx)*AA_x(ikx)*AA_y(iky) !Anti aliasing filter - cmpx_data_g(ikx,iky-local_nky_offset) = & - imagu*kyarray(iky)*G_(iky,ikx)*AA_x(ikx)*AA_y(iky) !Anti aliasing filter + DO ikx = 1,local_nkx_ptr + DO iky = 1,local_nky_ptr + cmpx_data_f(ikx,iky) = & + imagu*kx_(ikx)*F_(iky,ikx)*AA_x(ikx)*AA_y(iky) !Anti aliasing filter + cmpx_data_g(ikx,iky) = & + imagu*ky_(iky)*G_(iky,ikx)*AA_x(ikx)*AA_y(iky) !Anti aliasing filter ENDDO ENDDO call fftw_mpi_execute_dft_c2r(planb, cmpx_data_f, real_data_f) call fftw_mpi_execute_dft_c2r(planb, cmpx_data_g, real_data_g) - sum_real_ = sum_real_ + real_data_f * real_data_g*inv_Ny*inv_Nx + sum_real_ = sum_real_ + real_data_f*real_data_g*inv_Ny*inv_Nx ! Second term -df/dy x dg/dx - DO ikx = ikxs, ikxe - DO iky = ikys, ikye - cmpx_data_f(ikx,iky-local_nky_offset) = & - imagu*kyarray(iky)*F_(iky,ikx)*AA_x(ikx)*AA_y(iky) !Anti aliasing filter - cmpx_data_g(ikx,iky-local_nky_offset) = & - imagu*kxarray(ikx)*G_(iky,ikx)*AA_x(ikx)*AA_y(iky) !Anti aliasing filter + DO ikx = 1,local_nkx_ptr + DO iky = 1,local_nky_ptr + cmpx_data_f(ikx,iky) = & + imagu*ky_(iky)*F_(iky,ikx)*AA_x(ikx)*AA_y(iky) !Anti aliasing filter + cmpx_data_g(ikx,iky) = & + imagu*kx_(ikx)*G_(iky,ikx)*AA_x(ikx)*AA_y(iky) !Anti aliasing filter ENDDO ENDDO call fftw_mpi_execute_dft_c2r(planb, cmpx_data_f, real_data_f) call fftw_mpi_execute_dft_c2r(planb, cmpx_data_g, real_data_g) - sum_real_ = sum_real_ - real_data_f * real_data_g*inv_Ny*inv_Nx + sum_real_ = sum_real_ - real_data_f*real_data_g*inv_Ny*inv_Nx END SUBROUTINE poisson_bracket_and_sum -!!! Compute the poisson bracket of [F,G] to real space -! - Compute the convolution using the convolution theorem -SUBROUTINE convolve_and_add( F_, G_) - IMPLICIT NONE - COMPLEX(C_DOUBLE_COMPLEX), DIMENSION(ikys:ikye,ikxs:ikxe),& - INTENT(IN) :: F_, G_ ! input fields - ! First term df/dx x dg/dy - DO ikx = ikxs, ikxe - DO iky = ikys, ikye - cmpx_data_f(ikx,iky-local_nky_offset) = F_(iky,ikx)*AA_x(ikx)*AA_y(iky) !Anti aliasing filter - cmpx_data_g(ikx,iky-local_nky_offset) = G_(iky,ikx)*AA_x(ikx)*AA_y(iky) !Anti aliasing filter - ENDDO - ENDDO - call fftw_mpi_execute_dft_c2r(planb, cmpx_data_f, real_data_f) - call fftw_mpi_execute_dft_c2r(planb, cmpx_data_g, real_data_g) - bracket_sum_r = bracket_sum_r + real_data_f * real_data_g*inv_Ny*inv_Nx -END SUBROUTINE convolve_and_add - SUBROUTINE finalize_plans IMPLICIT NONE - IF (my_id .EQ. 0) write(*,*) '..plan Destruction.' call fftw_destroy_plan(planb) call fftw_destroy_plan(planf) diff --git a/src/geometry_mod.F90 b/src/geometry_mod.F90 index 576e8ae1..0af30081 100644 --- a/src/geometry_mod.F90 +++ b/src/geometry_mod.F90 @@ -1,15 +1,7 @@ module geometry ! computes geometrical quantities ! Adapted from B.J.Frei MOLIX code (2021) - - use prec_const - use model - use grid - use array - use fields - use basic - use calculus, ONLY: simpson_rule_z - use miller, ONLY: set_miller_parameters, get_miller + use prec_const, ONLY: dp implicit none PRIVATE ! Geometry input parameters @@ -27,7 +19,8 @@ implicit none REAL(dp), PUBLIC, PROTECTED :: zeta = 0._dp ! squareness REAL(dp), PUBLIC, PROTECTED :: s_zeta = 0._dp ! '' szeta = r dzeta/dr ! to apply shift in the parallel z-BC if shearless - REAL(dp), PUBLIC, PROTECTED :: shift_y = 0._dp ! for Arno + REAL(dp), PUBLIC, PROTECTED :: shift_y = 0._dp ! for Arno <3 + INTEGER, PUBLIC, PROTECTED :: Npol = 1 ! number of poloidal turns ! Chooses the type of parallel BC we use for the unconnected kx modes (active for non-zero shear only) ! 'periodic' : Connect a disconnected kx to a mode on the other cadran ! 'dirichlet' : Connect a disconnected kx to 0 @@ -78,11 +71,12 @@ CONTAINS SUBROUTINE geometry_readinputs + USE basic, ONLY: lu_in, speak ! Read the input parameters IMPLICIT NONE NAMELIST /GEOMETRY/ geom, q0, shear, eps,& kappa, s_kappa,delta, s_delta, zeta, s_zeta,& ! For miller - parallel_bc, shift_y + parallel_bc, shift_y, Npol READ(lu_in,geometry) IF(shear .NE. 0._dp) SHEARED = .true. SELECT CASE(parallel_bc) @@ -94,40 +88,45 @@ CONTAINS CASE DEFAULT ERROR STOP '>> ERROR << Parallel BC not recognized' END SELECT - IF(my_id .EQ. 0) print*, 'Parallel BC : ', parallel_bc + CALL speak('Parallel BC : '//parallel_bc) END SUBROUTINE geometry_readinputs subroutine eval_magnetic_geometry + USE grid, ONLY: total_nky, total_nz, local_nkx, local_nky, local_nz, Ngz, kxarray, kyarray, set_kparray, Nzgrid, deltaz + USE basic, ONLY: speak + USE miller, ONLY: set_miller_parameters, get_miller + USE calculus, ONLY: simpson_rule_z ! evalute metrix, elementwo_third_kpmaxts, jacobian and gradient implicit none REAL(dp) :: kx,ky - COMPLEX(dp), DIMENSION(izs:ize) :: integrant + COMPLEX(dp), DIMENSION(local_Nz+Ngz) :: integrant real(dp) :: G1,G2,G3,Cx,Cy + INTEGER :: eo,iz,iky,ikx ! Allocate arrays - CALL geometry_allocate_mem + CALL geometry_allocate_mem(local_nky,local_nkx,local_nz,Ngz,Nzgrid) ! - IF( (Ny .EQ. 1) .AND. (Nz .EQ. 1)) THEN !1D perp linear run - IF( my_id .eq. 0 ) WRITE(*,*) '1D perpendicular geometry' + IF( (total_nky .EQ. 1) .AND. (total_nz .EQ. 1)) THEN !1D perp linear run + CALL speak('1D perpendicular geometry') call eval_1D_geometry ELSE SELECT CASE(geom) CASE('s-alpha') - IF( my_id .eq. 0 ) WRITE(*,*) 's-alpha geometry' + CALL speak('s-alpha geometry') call eval_salpha_geometry CASE('Z-pinch','z-pinch','Zpinch','zpinch') - IF( my_id .eq. 0 ) WRITE(*,*) 'Z-pinch geometry' + CALL speak('Z-pinch geometry') call eval_zpinch_geometry SHEARED = .FALSE. shear = 0._dp CASE('miller') - IF( my_id .eq. 0 ) WRITE(*,*) 'Miller geometry' + CALL speak('Miller geometry') call set_miller_parameters(kappa,s_kappa,delta,s_delta,zeta,s_zeta) - call get_miller(eps,major_R,major_Z,q0,shear,alpha_MHD,edge_opt,& - C_y,C_xy,dpdx_pm_geom,gxx,gyy,gzz,gxy,gxz,gyz,& - dBdx,dBdy,hatB,jacobian,dBdz,hatR,hatZ,dxdR,dxdZ,& - Ckxky,gradz_coeff) + call get_miller(eps,major_R,major_Z,q0,shear,Npol,alpha_MHD,edge_opt,& + C_y,C_xy,dpdx_pm_geom,gxx,gyy,gzz,gxy,gxz,gyz,& + dBdx,dBdy,hatB,jacobian,dBdz,hatR,hatZ,dxdR,dxdZ,& + Ckxky,gradz_coeff) CASE DEFAULT ERROR STOP '>> ERROR << geometry not recognized!!' END SELECT @@ -136,31 +135,19 @@ CONTAINS ! Evaluate perpendicular wavenumber ! k_\perp^2 = g^{xx} k_x^2 + 2 g^{xy}k_x k_y + k_y^2 g^{yy} ! normalized to rhos_ - DO eo = 0,1 - DO iz = izgs,izge - DO iky = ikys, ikye - ky = kyarray(iky) - DO ikx = ikxs, ikxe - kx = kxarray(ikx) - ! there is a factor 1/B from the normalization; important to match GENE - ! this factor comes from $b_a$ argument in the Bessel. Kperp is not used otherwise. - kparray(iky, ikx, iz, eo) = & - SQRT( gxx(iz,eo)*kx**2 + 2._dp*gxy(iz,eo)*kx*ky + gyy(iz,eo)*ky**2)/ hatB(iz,eo) - ENDDO - ENDDO - ENDDO + CALL set_kparray(gxx,gxy,gyy,hatB) + DO eo = 1,Nzgrid ! Curvature operator (Frei et al. 2022 eq 2.15) - DO iz = izgs,izge + DO iz = 1,local_Nz+Ngz G1 = gxx(iz,eo)*gyy(iz,eo)-gxy(iz,eo)*gxy(iz,eo) G2 = gxx(iz,eo)*gyz(iz,eo)-gxy(iz,eo)*gxz(iz,eo) G3 = gxy(iz,eo)*gyz(iz,eo)-gyy(iz,eo)*gxz(iz,eo) ! Here we divide by hatB because our equation is formulated with grad(lnB) terms (not gradB like in GENE) Cx =-(dBdy(iz,eo) + G2/G1*dBdz(iz,eo))/hatB(iz,eo) Cy = (dBdx(iz,eo) - G3/G1*dBdz(iz,eo))/hatB(iz,eo) - - DO iky = ikys, ikye + DO iky = 1,local_nky ky = kyarray(iky) - DO ikx= ikxs, ikxe + DO ikx= 1,local_nkx kx = kxarray(ikx) Ckxky(iky, ikx, iz,eo) = (Cx*kx + Cy*ky)/C_xy ENDDO @@ -177,12 +164,10 @@ CONTAINS ! set the mapping for parallel boundary conditions CALL set_ikx_zBC_map - - two_third_kpmax = 2._dp/3._dp * MAXVAL(kparray) ! ! Compute the inverse z integrated Jacobian (useful for flux averaging) - integrant = Jacobian(izs:ize,0) ! Convert into complex array - CALL simpson_rule_z(integrant,iInt_Jacobian) + integrant = Jacobian(:,1) ! Convert into complex array + CALL simpson_rule_z(local_nz,deltaz,integrant,iInt_Jacobian) iInt_Jacobian = 1._dp/iInt_Jacobian ! reverse it END SUBROUTINE eval_magnetic_geometry ! @@ -190,13 +175,15 @@ CONTAINS ! SUBROUTINE eval_salpha_geometry + USE grid, ONLY : local_Nz,Ngz,zarray,Nzgrid ! evaluate s-alpha geometry model implicit none REAL(dp) :: z + INTEGER :: iz, eo alpha_MHD = 0._dp - parity: DO eo = 0,1 - zloop: DO iz = izgs,izge + DO eo = 1,Nzgrid + DO iz = 1,local_Nz+Ngz z = zarray(iz,eo) ! metric @@ -232,8 +219,8 @@ CONTAINS ! Curvature factor C_xy = 1._dp - ENDDO zloop - ENDDO parity + ENDDO + ENDDO END SUBROUTINE eval_salpha_geometry ! @@ -241,12 +228,14 @@ CONTAINS ! SUBROUTINE eval_zpinch_geometry + USE grid, ONLY : local_Nz,Ngz,zarray,Nzgrid implicit none REAL(dp) :: z + INTEGER :: iz, eo alpha_MHD = 0._dp - parity: DO eo = 0,1 - zloop: DO iz = izgs,izge + DO eo = 1,Nzgrid + DO iz = 1,local_Nz+Ngz z = zarray(iz,eo) ! metric @@ -279,51 +268,41 @@ CONTAINS dBdy(iz,eo) = 0._dp dBdz(iz,eo) = 0._dp ! Gene put a factor hatB or 1/hatR in this - ENDDO zloop - ENDDO parity + ENDDO + ENDDO ! Curvature factor C_xy = 1._dp END SUBROUTINE eval_zpinch_geometry ! !-------------------------------------------------------------------------------- - ! + ! NOT TESTED subroutine eval_1D_geometry + USE grid, ONLY : local_Nz,Ngz,zarray, Nzgrid ! evaluate 1D perp geometry model implicit none - REAL(dp) :: z, kx, ky + REAL(dp) :: z + INTEGER :: iz, eo + DO eo = 1,Nzgrid + DO iz = 1,local_nz+Ngz + z = zarray(iz,eo) - parity: DO eo = 0,1 - zloop: DO iz = izs,ize - z = zarray(iz,eo) + ! metric + gxx(iz,eo) = 1._dp + gxy(iz,eo) = 0._dp + gyy(iz,eo) = 1._dp - ! metric - gxx(iz,eo) = 1._dp - gxy(iz,eo) = 0._dp - gyy(iz,eo) = 1._dp + ! Relative strengh of radius + hatR(iz,eo) = 1._dp - ! Relative strengh of radius - hatR(iz,eo) = 1._dp + ! Jacobian + Jacobian(iz,eo) = 1._dp - ! Jacobian - Jacobian(iz,eo) = 1._dp - - ! Relative strengh of modulus of B - hatB(iz,eo) = 1._dp - - ! Curvature operator - DO iky = ikys, ikye - ky = kyarray(iky) - DO ikx= ikxs, ikxe - kx = kxarray(ikx) - Ckxky(ikx, iky, iz,eo) = -kx ! .. multiply by hatB to cancel the 1/ hatB factor in moments_eqs_rhs.f90 routine - ENDDO - ENDDO - ! coefficient in the front of parallel derivative - gradz_coeff(iz,eo) = 1._dp - ENDDO zloop - ENDDO parity + ! Relative strengh of modulus of B + hatB(iz,eo) = 1._dp + ENDDO + ENDDO END SUBROUTINE eval_1D_geometry ! @@ -331,186 +310,185 @@ CONTAINS ! SUBROUTINE set_ikx_zBC_map - IMPLICIT NONE - REAL :: shift - - ALLOCATE(ikx_zBC_L(ikys:ikye,ikxs:ikxe)) - ALLOCATE(ikx_zBC_R(ikys:ikye,ikxs:ikxe)) - ALLOCATE(pb_phase_L(ikys:ikye)) - ALLOCATE(pb_phase_R(ikys:ikye)) - !! No shear case (simple id mapping) or not at the end of the z domain - !3 | 1 2 3 4 5 6 | ky = 3 dky - !2 ky | 1 2 3 4 5 6 | ky = 2 dky - !1 A | 1 2 3 4 5 6 | ky = 1 dky - !0 | -> kx | 1____2____3____4____5____6 | ky = 0 dky - !(e.g.) kx = 0 0.1 0.2 0.3 -0.2 -0.1 (dkx=free) - DO iky = ikys,ikye - DO ikx = ikxs,ikxe - ikx_zBC_L(iky,ikx) = ikx ! connect to itself per default - ikx_zBC_R(iky,ikx) = ikx - ENDDO - pb_phase_L(iky) = 1._dp ! no phase change per default - pb_phase_R(iky) = 1._dp - ENDDO - ! Parallel boundary are not trivial for sheared case and if - ! the user does not ask explicitly for shearless bc - IF(SHEARED .AND. (parallel_bc .NE. 'shearless')) THEN - !!!!!!!!!! LEFT PARALLEL BOUNDARY - ! Modify connection map only at border of z (matters for MPI z-parallelization) - IF(contains_zmin) THEN ! Check if the process is at the start of the fluxtube - DO iky = ikys,ikye - ! Formula for the shift due to shear after Npol turns - shift = 2._dp*PI*shear*kyarray(iky)*Npol - DO ikx = ikxs,ikxe - ! Usual formula for shifting indices using that dkx = 2pi*shear*dky/Nexc - ikx_zBC_L(iky,ikx) = ikx-(iky-1)*Nexc - ! Check if it points out of the kx domain - ! IF( (kxarray(ikx) - shift) .LT. kx_min ) THEN - IF( (ikx-(iky-1)*Nexc) .LT. 1 ) THEN ! outside of the frequ domain - SELECT CASE(parallel_bc) - CASE ('dirichlet')! connected to 0 - ikx_zBC_L(iky,ikx) = -99 - CASE ('periodic') - ikx_zBC_L(iky,ikx) = ikx - CASE ('cyclic')! reroute it by cycling through modes - ikx_zBC_L(iky,ikx) = MODULO(ikx_zBC_L(iky,ikx)-1,Nkx)+1 - END SELECT - ENDIF - ENDDO - ! phase present in GENE from a shift of the x origin by Lx/2 (useless?) - ! We also put the user defined shift in the y direction (see Volcokas et al. 2022) - pb_phase_L(iky) = (-1._dp)**(Nexc*(iky-1))*EXP(imagu*REAL(iky-1,dp)*2._dp*pi*shift_y) + USE grid, ONLY: local_nky,Nkx, contains_zmin,contains_zmax, Nexc + USE prec_const, ONLY: imagu, pi + IMPLICIT NONE + ! REAL :: shift + INTEGER :: ikx,iky + ALLOCATE(ikx_zBC_L(local_nky,Nkx)) + ALLOCATE(ikx_zBC_R(local_nky,Nkx)) + ALLOCATE(pb_phase_L(local_nky)) + ALLOCATE(pb_phase_R(local_nky)) + !! No shear case (simple id mapping) or not at the end of the z domain + !3 | 1 2 3 4 5 6 | ky = 3 dky + !2 ky | 1 2 3 4 5 6 | ky = 2 dky + !1 A | 1 2 3 4 5 6 | ky = 1 dky + !0 | -> kx | 1____2____3____4____5____6 | ky = 0 dky + !(e.g.) kx = 0 0.1 0.2 0.3 -0.2 -0.1 (dkx=free) + DO iky = 1,local_nky + DO ikx = 1,Nkx + ikx_zBC_L(iky,ikx) = ikx ! connect to itself per default + ikx_zBC_R(iky,ikx) = ikx ENDDO - ENDIF - ! Option for disconnecting every modes, viz. connecting all boundary to 0 - IF(parallel_bc .EQ. 'disconnected') ikx_zBC_L = -99 - !!!!!!!!!! RIGHT PARALLEL BOUNDARY - IF(contains_zmax) THEN ! Check if the process is at the end of the flux-tube - DO iky = ikys,ikye - ! Formula for the shift due to shear after Npol - shift = 2._dp*PI*shear*kyarray(iky)*Npol - DO ikx = ikxs,ikxe - ! Usual formula for shifting indices - ikx_zBC_R(iky,ikx) = ikx+(iky-1)*Nexc - ! Check if it points out of the kx domain - ! IF( (kxarray(ikx) + shift) .GT. kx_max ) THEN ! outside of the frequ domain - IF( (ikx+(iky-1)*Nexc) .GT. Nkx ) THEN ! outside of the frequ domain - SELECT CASE(parallel_bc) - CASE ('dirichlet') ! connected to 0 - ikx_zBC_R(iky,ikx) = -99 - CASE ('periodic') ! connected to itself as for shearless - ikx_zBC_R(iky,ikx) = ikx - CASE ('cyclic') - ! write(*,*) 'check',ikx,iky, kxarray(ikx) + shift, '>', kx_max - ikx_zBC_R(iky,ikx) = MODULO(ikx_zBC_R(iky,ikx)-1,Nkx)+1 - END SELECT - ENDIF - ENDDO - ! phase present in GENE from a shift ofthe x origin by Lx/2 (useless?) - ! We also put the user defined shift in the y direction (see Volcokas et al. 2022) - pb_phase_R(iky) = (-1._dp)**(Nexc*(iky-1))*EXP(-imagu*REAL(iky-1,dp)*2._dp*pi*shift_y) - ENDDO - ENDIF - ! Option for disconnecting every modes, viz. connecting all boundary to 0 - IF(parallel_bc .EQ. 'disconnected') ikx_zBC_R = -99 - ENDIF - ! write(*,*) kxarray - ! write(*,*) kyarray - ! write(*,*) 'ikx_zBC_L :-----------' - ! DO iky = ikys,ikye - ! print*, ikx_zBC_L(iky,:) - ! enddo - ! print*, pb_phase_L - ! write(*,*) 'ikx_zBC_R :-----------' - ! DO iky = ikys,ikye - ! print*, ikx_zBC_R(iky,:) - ! enddo - ! print*, pb_phase_R - ! print*, shift_y - ! stop - !!!!!!! Example of maps ('x' means connected to 0 value, in the table it is -99) - ! dirichlet connection map BC of the RIGHT boundary (z=pi*Npol-dz) - !3 | 4 x x x 2 3 | ky = 3 dky - !2 ky | 3 4 x x 1 2 | ky = 2 dky - !1 A | 2 3 4 x 6 1 | ky = 1 dky - !0 | -> kx | 1____2____3____4____5____6 | ky = 0 dky - !kx = 0 0.1 0.2 0.3 -0.2 -0.1 (dkx=2pi*shear*npol*dky) - - ! periodic connection map BC of the RIGHT boundary (z=pi*Npol-dz) - !3 | 4 2 3 4 2 3 | ky = 3 dky - !2 ky | 3 4 3 4 1 2 | ky = 2 dky - !1 A | 2 3 4 4 6 1 | ky = 1 dky - !0 | -> kx | 1____2____3____4____5____6 | ky = 0 dky - !kx = 0 0.1 0.2 0.3 -0.2 -0.1 (dkx=2pi*shear*npol*dky) - - ! cyclic connection map BC of the LEFT boundary (z=-pi*Npol) - !3 | 4 5 6 1 2 3 | ky = 3 dky - !2 ky | 5 6 1 2 3 4 | ky = 2 dky - !1 A | 6 1 2 3 4 5 | ky = 1 dky - !0 | -> kx | 1____2____3____4____5____6 | ky = 0 dky - !(e.g.) kx = 0 0.1 0.2 0.3 -0.2 -0.1 (dkx=2pi*shear*npol*dky) - - ! shearless connection map BC of the LEFT/RIGHT boundary (z=+/-pi*Npol) - !3 | 1 2 3 4 5 6 | ky = 3 dky - !2 ky | 1 2 3 4 5 6 | ky = 2 dky - !1 A | 1 2 3 4 5 6 | ky = 1 dky - !0 | -> kx | 1____2____3____4____5____6 | ky = 0 dky - !(e.g.) kx = 0 0.1 0.2 0.3 -0.2 -0.1 (dkx=2pi*shear*npol*dky) - - ! disconnected connection map BC of the LEFT/RIGHT boundary (z=+/-pi*Npol) - !3 | x x x x x x | ky = 3 dky - !2 ky | x x x x x x | ky = 2 dky - !1 A | x x x x x x | ky = 1 dky - !0 | -> kx | x____x____x____x____x____x | ky = 0 dky - !(e.g.) kx = 0 0.1 0.2 0.3 -0.2 -0.1 (dkx=2pi*shear*npol*dky) + pb_phase_L(iky) = 1._dp ! no phase change per default + pb_phase_R(iky) = 1._dp + ENDDO + ! Parallel boundary are not trivial for sheared case and if + ! the user does not ask explicitly for shearless bc + IF(SHEARED .AND. (parallel_bc .NE. 'shearless')) THEN + !!!!!!!!!! LEFT PARALLEL BOUNDARY + ! Modify connection map only at border of z (matters for MPI z-parallelization) + IF(contains_zmin) THEN ! Check if the process is at the start of the fluxtube + DO iky = 1,local_nky + ! Formula for the shift due to shear after Npol turns + ! shift = 2._dp*PI*shear*kyarray(iky)*Npol + DO ikx = 1,Nkx + ! Usual formula for shifting indices using that dkx = 2pi*shear*dky/Nexc + ikx_zBC_L(iky,ikx) = ikx-(iky-1)*Nexc + ! Check if it points out of the kx domain + ! IF( (kxarray(ikx) - shift) .LT. kx_min ) THEN + IF( (ikx-(iky-1)*Nexc) .LT. 1 ) THEN ! outside of the frequ domain + SELECT CASE(parallel_bc) + CASE ('dirichlet')! connected to 0 + ikx_zBC_L(iky,ikx) = -99 + CASE ('periodic') + ikx_zBC_L(iky,ikx) = ikx + CASE ('cyclic')! reroute it by cycling through modes + ikx_zBC_L(iky,ikx) = MODULO(ikx_zBC_L(iky,ikx)-1,Nkx)+1 + END SELECT + ENDIF + ENDDO + ! phase present in GENE from a shift of the x origin by Lx/2 (useless?) + ! We also put the user defined shift in the y direction (see Volcokas et al. 2022) + pb_phase_L(iky) = (-1._dp)**(Nexc*(iky-1))*EXP(imagu*REAL(iky-1,dp)*2._dp*pi*shift_y) + ENDDO + ENDIF + ! Option for disconnecting every modes, viz. connecting all boundary to 0 + IF(parallel_bc .EQ. 'disconnected') ikx_zBC_L = -99 + !!!!!!!!!! RIGHT PARALLEL BOUNDARY + IF(contains_zmax) THEN ! Check if the process is at the end of the flux-tube + DO iky = 1,local_nky + ! Formula for the shift due to shear after Npol + ! shift = 2._dp*PI*shear*kyarray(iky)*Npol + DO ikx = 1,Nkx + ! Usual formula for shifting indices + ikx_zBC_R(iky,ikx) = ikx+(iky-1)*Nexc + ! Check if it points out of the kx domain + ! IF( (kxarray(ikx) + shift) .GT. kx_max ) THEN ! outside of the frequ domain + IF( (ikx+(iky-1)*Nexc) .GT. Nkx ) THEN ! outside of the frequ domain + SELECT CASE(parallel_bc) + CASE ('dirichlet') ! connected to 0 + ikx_zBC_R(iky,ikx) = -99 + CASE ('periodic') ! connected to itself as for shearless + ikx_zBC_R(iky,ikx) = ikx + CASE ('cyclic') + ! write(*,*) 'check',ikx,iky, kxarray(ikx) + shift, '>', kx_max + ikx_zBC_R(iky,ikx) = MODULO(ikx_zBC_R(iky,ikx)-1,Nkx)+1 + END SELECT + ENDIF + ENDDO + ! phase present in GENE from a shift ofthe x origin by Lx/2 (useless?) + ! We also put the user defined shift in the y direction (see Volcokas et al. 2022) + pb_phase_R(iky) = (-1._dp)**(Nexc*(iky-1))*EXP(-imagu*REAL(iky-1,dp)*2._dp*pi*shift_y) + ENDDO + ENDIF + ! Option for disconnecting every modes, viz. connecting all boundary to 0 + IF(parallel_bc .EQ. 'disconnected') ikx_zBC_R = -99 + ENDIF + ! write(*,*) kxarray + ! write(*,*) kyarray + ! write(*,*) 'ikx_zBC_L :-----------' + ! DO iky = ikys,ikye + ! print*, ikx_zBC_L(iky,:) + ! enddo + ! print*, pb_phase_L + ! write(*,*) 'ikx_zBC_R :-----------' + ! DO iky = ikys,ikye + ! print*, ikx_zBC_R(iky,:) + ! enddo + ! print*, pb_phase_R + ! print*, shift_y + ! stop + !!!!!!! Example of maps ('x' means connected to 0 value, in the table it is -99) + ! dirichlet connection map BC of the RIGHT boundary (z=pi*Npol-dz) + !3 | 4 x x x 2 3 | ky = 3 dky + !2 ky | 3 4 x x 1 2 | ky = 2 dky + !1 A | 2 3 4 x 6 1 | ky = 1 dky + !0 | -> kx | 1____2____3____4____5____6 | ky = 0 dky + !kx = 0 0.1 0.2 0.3 -0.2 -0.1 (dkx=2pi*shear*npol*dky) + + ! periodic connection map BC of the RIGHT boundary (z=pi*Npol-dz) + !3 | 4 2 3 4 2 3 | ky = 3 dky + !2 ky | 3 4 3 4 1 2 | ky = 2 dky + !1 A | 2 3 4 4 6 1 | ky = 1 dky + !0 | -> kx | 1____2____3____4____5____6 | ky = 0 dky + !kx = 0 0.1 0.2 0.3 -0.2 -0.1 (dkx=2pi*shear*npol*dky) + + ! cyclic connection map BC of the LEFT boundary (z=-pi*Npol) + !3 | 4 5 6 1 2 3 | ky = 3 dky + !2 ky | 5 6 1 2 3 4 | ky = 2 dky + !1 A | 6 1 2 3 4 5 | ky = 1 dky + !0 | -> kx | 1____2____3____4____5____6 | ky = 0 dky + !(e.g.) kx = 0 0.1 0.2 0.3 -0.2 -0.1 (dkx=2pi*shear*npol*dky) + + ! shearless connection map BC of the LEFT/RIGHT boundary (z=+/-pi*Npol) + !3 | 1 2 3 4 5 6 | ky = 3 dky + !2 ky | 1 2 3 4 5 6 | ky = 2 dky + !1 A | 1 2 3 4 5 6 | ky = 1 dky + !0 | -> kx | 1____2____3____4____5____6 | ky = 0 dky + !(e.g.) kx = 0 0.1 0.2 0.3 -0.2 -0.1 (dkx=2pi*shear*npol*dky) + + ! disconnected connection map BC of the LEFT/RIGHT boundary (z=+/-pi*Npol) + !3 | x x x x x x | ky = 3 dky + !2 ky | x x x x x x | ky = 2 dky + !1 A | x x x x x x | ky = 1 dky + !0 | -> kx | x____x____x____x____x____x | ky = 0 dky + !(e.g.) kx = 0 0.1 0.2 0.3 -0.2 -0.1 (dkx=2pi*shear*npol*dky) END SUBROUTINE set_ikx_zBC_map ! !-------------------------------------------------------------------------------- ! - SUBROUTINE geometry_allocate_mem - + SUBROUTINE geometry_allocate_mem(local_nky,local_nkx,local_nz,Ngz,Nzgrid) + INTEGER, INTENT(IN) :: local_nky,local_nkx,local_nz,Ngz,Nzgrid ! Curvature and geometry - CALL allocate_array( Ckxky, ikys,ikye, ikxs,ikxe,izgs,izge,0,1) - CALL allocate_array( Jacobian,izgs,izge, 0,1) - CALL allocate_array( gxx,izgs,izge, 0,1) - CALL allocate_array( gxy,izgs,izge, 0,1) - CALL allocate_array( gxz,izgs,izge, 0,1) - CALL allocate_array( gyy,izgs,izge, 0,1) - CALL allocate_array( gyz,izgs,izge, 0,1) - CALL allocate_array( gzz,izgs,izge, 0,1) - CALL allocate_array( dBdx,izgs,izge, 0,1) - CALL allocate_array( dBdy,izgs,izge, 0,1) - CALL allocate_array( dBdz,izgs,izge, 0,1) - CALL allocate_array( dlnBdz,izgs,izge, 0,1) - CALL allocate_array( hatB,izgs,izge, 0,1) - ! CALL allocate_array(Gamma_phipar,izgs,izge, 0,1) (not implemented) - CALL allocate_array( hatR,izgs,izge, 0,1) - CALL allocate_array( hatZ,izgs,izge, 0,1) - CALL allocate_array( Rc,izgs,izge, 0,1) - CALL allocate_array( phic,izgs,izge, 0,1) - CALL allocate_array( Zc,izgs,izge, 0,1) - CALL allocate_array( dxdR,izgs,izge, 0,1) - CALL allocate_array( dxdZ,izgs,izge, 0,1) - call allocate_array(gradz_coeff,izgs,izge, 0,1) - CALL allocate_array( kparray, ikys,ikye, ikxs,ikxe,izgs,izge,0,1) + ALLOCATE( Ckxky(local_nky,local_nkx,local_nz+Ngz,Nzgrid)) + ALLOCATE( Jacobian(local_nz+Ngz,Nzgrid)) + ALLOCATE( gxx(local_nz+Ngz,Nzgrid)) + ALLOCATE( gxy(local_nz+Ngz,Nzgrid)) + ALLOCATE( gxz(local_nz+Ngz,Nzgrid)) + ALLOCATE( gyy(local_nz+Ngz,Nzgrid)) + ALLOCATE( gyz(local_nz+Ngz,Nzgrid)) + ALLOCATE( gzz(local_nz+Ngz,Nzgrid)) + ALLOCATE( dBdx(local_nz+Ngz,Nzgrid)) + ALLOCATE( dBdy(local_nz+Ngz,Nzgrid)) + ALLOCATE( dBdz(local_nz+Ngz,Nzgrid)) + ALLOCATE( dlnBdz(local_nz+Ngz,Nzgrid)) + ALLOCATE( hatB(local_nz+Ngz,Nzgrid)) + ! ALLOCATE(Gamma_phipar,(local_nz+Ngz,Nzgrid)) (not implemented) + ALLOCATE( hatR(local_nz+Ngz,Nzgrid)) + ALLOCATE( hatZ(local_nz+Ngz,Nzgrid)) + ALLOCATE( Rc(local_nz+Ngz,Nzgrid)) + ALLOCATE( phic(local_nz+Ngz,Nzgrid)) + ALLOCATE( Zc(local_nz+Ngz,Nzgrid)) + ALLOCATE( dxdR(local_nz+Ngz,Nzgrid)) + ALLOCATE( dxdZ(local_nz+Ngz,Nzgrid)) + ALLOCATE(gradz_coeff(local_nz+Ngz,Nzgrid)) END SUBROUTINE geometry_allocate_mem - SUBROUTINE geometry_outputinputs(fidres, str) + SUBROUTINE geometry_outputinputs(fid) ! Write the input parameters to the results_xx.h5 file - - USE futils, ONLY: attach - - USE prec_const + USE futils, ONLY: attach, creatd IMPLICIT NONE - - INTEGER, INTENT(in) :: fidres - CHARACTER(len=256), INTENT(in) :: str - CALL attach(fidres, TRIM(str),"geometry", geom) - CALL attach(fidres, TRIM(str), "q0", q0) - CALL attach(fidres, TRIM(str), "shear", shear) - CALL attach(fidres, TRIM(str), "eps", eps) + INTEGER, INTENT(in) :: fid + CHARACTER(len=256) :: str + WRITE(str,'(a)') '/data/input/geometry' + CALL creatd(fid, 0,(/0/),TRIM(str),'Geometry Input') + CALL attach(fid, TRIM(str),"geometry", geom) + CALL attach(fid, TRIM(str), "q0", q0) + CALL attach(fid, TRIM(str), "shear", shear) + CALL attach(fid, TRIM(str), "eps", eps) END SUBROUTINE geometry_outputinputs end module geometry diff --git a/src/ghosts_mod.F90 b/src/ghosts_mod.F90 index bd23ad92..b68c5c03 100644 --- a/src/ghosts_mod.F90 +++ b/src/ghosts_mod.F90 @@ -1,9 +1,6 @@ module ghosts -USE basic -USE grid -USE time_integration -USE model, ONLY : KIN_E, beta -USE geometry, ONLY : SHEARED, ikx_zBC_L, ikx_zBC_R, pb_phase_L, pb_phase_R +USE mpi +USE prec_const, ONLY: dp IMPLICIT NONE INTEGER :: status(MPI_STATUS_SIZE), source, dest, count, ipg @@ -13,33 +10,33 @@ PUBLIC :: update_ghosts_moments, update_ghosts_EM CONTAINS SUBROUTINE update_ghosts_moments + USE grid, ONLY: total_nz + USE parallel, ONLY: num_procs_p + USE basic, ONLY: t0_ghost,t1_ghost,tc_ghost + IMPLICIT NONE CALL cpu_time(t0_ghost) - IF (num_procs_p .GT. 1) THEN ! Do it only if we share the p - IF(KIN_E)& - CALL update_ghosts_p_e - CALL update_ghosts_p_i + CALL update_ghosts_p_mom ENDIF - - IF(Nz .GT. 1) THEN - IF(KIN_E) & - CALL update_ghosts_z_e - CALL update_ghosts_z_i + IF(total_nz .GT. 1) THEN + CALL update_ghosts_z_mom ENDIF - CALL cpu_time(t1_ghost) tc_ghost = tc_ghost + (t1_ghost - t0_ghost) END SUBROUTINE update_ghosts_moments SUBROUTINE update_ghosts_EM + USE model, ONLY : beta + USE grid, ONLY: total_nz + USE fields, ONLY: phi, psi + USE basic, ONLY: t0_ghost,t1_ghost,tc_ghost + IMPLICIT NONE CALL cpu_time(t0_ghost) - - IF(Nz .GT. 1) THEN - CALL update_ghosts_z_phi + IF(total_nz .GT. 1) THEN + CALL update_ghosts_z_3D(phi) IF(beta .GT. 0._dp) & - CALL update_ghosts_z_psi + CALL update_ghosts_z_3D(psi) ENDIF - CALL cpu_time(t1_ghost) tc_ghost = tc_ghost + (t1_ghost - t0_ghost) END SUBROUTINE update_ghosts_EM @@ -57,59 +54,32 @@ END SUBROUTINE update_ghosts_EM !proc 3: [11 12|13 14 15 16|17 18] ! ^ ^ !Closure by zero truncation : 0 0 -SUBROUTINE update_ghosts_p_e - USE fields, ONLY : moments_e - IMPLICIT NONE - - count = (ijge_e-ijgs_e+1)*(ikye-ikys+1)*(ikxe-ikxs+1)*(izge-izgs+1) - - !!!!!!!!!!! Send ghost to right neighbour !!!!!!!!!!!!!!!!!!!!!! - ! Send the last local moment to fill the -1 neighbour ghost - CALL mpi_sendrecv(moments_e(ipe_e ,:,:,:,:,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_R, 10, & ! Send to right - moments_e(ips_e-1,:,:,:,:,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_L, 10, & ! Recieve from left - comm0, status, ierr) - IF (deltape .EQ. 1) & ! If we have odd Hermite degrees we need a 2nd order stencil - CALL mpi_sendrecv(moments_e(ipe_e-1,:,:,:,:,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_R, 11, & ! Send to right - moments_e(ips_e-2,:,:,:,:,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_L, 11, & ! Recieve from left - comm0, status, ierr) - - !!!!!!!!!!! Send ghost to left neighbour !!!!!!!!!!!!!!!!!!!!!! - CALL mpi_sendrecv(moments_e(ips_e ,:,:,:,:,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_L, 12, & ! Send to left - moments_e(ipe_e+1,:,:,:,:,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_R, 12, & ! Recieve from right - comm0, status, ierr) - IF (deltape .EQ. 1) & ! If we have odd Hermite degrees we need a 2nd order stencil - CALL mpi_sendrecv(moments_e(ips_e+1,:,:,:,:,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_L, 13, & ! Send to left - moments_e(ipe_e+2,:,:,:,:,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_R, 13, & ! Recieve from right - comm0, status, ierr) - -END SUBROUTINE update_ghosts_p_e !Communicate p+1, p+2 moments to left neighboor and p-1, p-2 moments to right one -SUBROUTINE update_ghosts_p_i - USE fields, ONLY : moments_i - IMPLICIT NONE - - count = (ijge_i-ijgs_i+1)*(ikye-ikys+1)*(ikxe-ikxs+1)*(izge-izgs+1) ! Number of elements sent - - !!!!!!!!!!! Send ghost to right neighbour !!!!!!!!!!!!!!!!!!!!!! - CALL mpi_sendrecv(moments_i(ipe_i ,:,:,:,:,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_R, 14, & - moments_i(ips_i-1,:,:,:,:,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_L, 14, & - comm0, status, ierr) - IF (deltapi .EQ. 1) & ! If we have odd Hermite degrees we need a 2nd order stencil - CALL mpi_sendrecv(moments_i(ipe_i-1,:,:,:,:,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_R, 15, & - moments_i(ips_i-2,:,:,:,:,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_L, 15, & - comm0, status, ierr) - - !!!!!!!!!!! Send ghost to left neighbour !!!!!!!!!!!!!!!!!!!!!! - CALL mpi_sendrecv(moments_i(ips_i ,:,:,:,:,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_L, 16, & - moments_i(ipe_i+1,:,:,:,:,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_R, 16, & - comm0, status, ierr) - IF (deltapi .EQ. 1) & ! If we have odd Hermite degrees we need a 2nd order stencil - CALL mpi_sendrecv(moments_i(ips_i+1,:,:,:,:,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_L, 17, & - moments_i(ipe_i+2,:,:,:,:,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_R, 17, & +SUBROUTINE update_ghosts_p_mom + USE time_integration, ONLY: updatetlevel + USE fields, ONLY: moments + USE parallel, ONLY: nbr_R,nbr_L,comm0 + USE grid, ONLY: local_na,local_np,local_nj,local_nky,local_nkx,local_nz,& + ngp,ngj,ngz + IMPLICIT NONE + INTEGER :: ierr, first, last, ig + first = 1 + ngp/2 + last = local_np + ngp/2 + count = local_na*(local_nj+ngj)*local_nky*local_nkx*(local_nz+ngz) ! Number of elements to send + !!!!!!!!!!! Send ghost to right neighbour !!!!!!!!!!!!!!!!!!!!!! + DO ig = 1,ngp/2 + CALL mpi_sendrecv(moments(:,last-(ig-1),:,:,:,:,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_R, 14+ig, & + moments(:,first-ig ,:,:,:,:,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_L, 14+ig, & comm0, status, ierr) - -END SUBROUTINE update_ghosts_p_i + ENDDO + !!!!!!!!!!! Send ghost to left neighbour !!!!!!!!!!!!!!!!!!!!!! + DO ig = 1,ngp/2 + CALL mpi_sendrecv(moments(:,first+(ig-1),:,:,:,:,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_L, 16+ig, & + moments(:,last + ig ,:,:,:,:,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_R, 16+ig, & + comm0, status, ierr) + ENDDO +END SUBROUTINE update_ghosts_p_mom !Communicate z+1, z+2 moments to left neighboor and z-1, z-2 moments to right one ! [a b|C D|e f] : proc n has moments a to f where a,b,e,f are ghosts @@ -123,245 +93,130 @@ END SUBROUTINE update_ghosts_p_i !proc 3: [11 12|13 14 15 16|17 18] ! ^ ^ !Periodic: 0 1 -SUBROUTINE update_ghosts_z_e - USE parallel, ONLY : buff_pjxy_zBC_e - USE fields, ONLY : moments_e - IMPLICIT NONE - INTEGER :: ikxBC_L, ikxBC_R - IF(Nz .GT. 1) THEN - IF (num_procs_z .GT. 1) THEN - CALL MPI_BARRIER(MPI_COMM_WORLD,ierr) - count = (ipge_e-ipgs_e+1)*(ijge_e-ijgs_e+1)*(ikye-ikys+1)*(ikxe-ikxs+1) - !!!!!!!!!!! Send ghost to up neighbour !!!!!!!!!!!!!!!!!!!!!! - ! Send the last local moment to fill the -1 neighbour ghost - CALL mpi_sendrecv(moments_e(:,:,:,:,ize ,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_U, 20, & ! Send to Up the last - buff_pjxy_zBC_e(:,:,:,:,-1), count, MPI_DOUBLE_COMPLEX, nbr_D, 20, & ! Recieve from Down the first-1 - comm0, status, ierr) - CALL mpi_sendrecv(moments_e(:,:,:,:,ize-1,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_U, 21, & ! Send to Up the last - buff_pjxy_zBC_e(:,:,:,:,-2), count, MPI_DOUBLE_COMPLEX, nbr_D, 21, & ! Recieve from Down the first-1 - comm0, status, ierr) - !!!!!!!!!!! Send ghost to down neighbour !!!!!!!!!!!!!!!!!!!!!! - CALL mpi_sendrecv(moments_e(:,:,:,:,izs ,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_D, 22, & ! Send to Up the last - buff_pjxy_zBC_e(:,:,:,:,+1), count, MPI_DOUBLE_COMPLEX, nbr_U, 22, & ! Recieve from Down the first-1 +SUBROUTINE update_ghosts_z_mom + USE geometry, ONLY: ikx_zBC_L, ikx_zBC_R, pb_phase_L, pb_phase_R + USE time_integration, ONLY: updatetlevel + USE parallel, ONLY: comm0,nbr_U,nbr_D,num_procs_z + USE fields, ONLY: moments + USE grid, ONLY: local_na,local_np,local_nj,local_nky,local_nkx,local_nz,& + ngp,ngj,ngz + IMPLICIT NONE + !! buffer for data exchanges + COMPLEX(dp),DIMENSION(local_na,local_np+ngp,local_nj+ngj,local_nky,local_nkx,-Ngz/2:Ngz/2) :: buff_pjxy_zBC + INTEGER :: ikxBC_L, ikxBC_R, ikx, iky, first, last, ig, ierr + first = 1 + ngz/2 + last = local_nz + ngz/2 + count = local_na*(local_np+ngp)*(local_nj+ngj)*local_nky*local_nkx + IF (num_procs_z .GT. 1) THEN + CALL MPI_BARRIER(MPI_COMM_WORLD,ierr) + !!!!!!!!!!! Send ghost to up neighbour !!!!!!!!!!!!!!!!!!!!!! + ! Send the last local moment to fill the -1 neighbour ghost + DO ig=1,ngz + CALL mpi_sendrecv(moments(:,:,:,:,:,last-(ig-1),updatetlevel),count,MPI_DOUBLE_COMPLEX,nbr_U,24+ig, & ! Send to Up the last + buff_pjxy_zBC(:,:,:,:,:,-ig),count,MPI_DOUBLE_COMPLEX,nbr_D,24+ig, & ! Recieve from Down the first-1 comm0, status, ierr) - CALL mpi_sendrecv(moments_e(:,:,:,:,izs+1,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_D, 23, & ! Send to Up the last - buff_pjxy_zBC_e(:,:,:,:,+2), count, MPI_DOUBLE_COMPLEX, nbr_U, 23, & ! Recieve from Down the first-1 + ENDDO + !!!!!!!!!!! Send ghost to down neighbour !!!!!!!!!!!!!!!!!!!!!! + DO ig=1,ngz + CALL mpi_sendrecv(moments(:,:,:,:,:,first+(ig-1),updatetlevel),count,MPI_DOUBLE_COMPLEX,nbr_D,26+ig, & ! Send to Up the last + buff_pjxy_zBC(:,:,:,:,:,ig),count,MPI_DOUBLE_COMPLEX,nbr_U,26+ig, & ! Recieve from Down the first-1 comm0, status, ierr) - ELSE !No parallel (copy) - buff_pjxy_zBC_e(:,:,:,:,-1) = moments_e(:,:,:,:,ize ,updatetlevel) - buff_pjxy_zBC_e(:,:,:,:,-2) = moments_e(:,:,:,:,ize-1,updatetlevel) - buff_pjxy_zBC_e(:,:,:,:,+1) = moments_e(:,:,:,:,izs ,updatetlevel) - buff_pjxy_zBC_e(:,:,:,:,+2) = moments_e(:,:,:,:,izs+1,updatetlevel) - ENDIF - DO iky = ikys,ikye - DO ikx = ikxs,ikxe - ikxBC_L = ikx_zBC_L(iky,ikx); - IF (ikxBC_L .NE. -99) THEN ! Exchanging the modes that have a periodic pair (a) - ! first-1 gets last - moments_e(:,:,iky,ikx,izs-1,updatetlevel) = pb_phase_L(iky)*buff_pjxy_zBC_e(:,:,iky,ikxBC_L,-1) - ! first-2 gets last-1 - moments_e(:,:,iky,ikx,izs-2,updatetlevel) = pb_phase_L(iky)*buff_pjxy_zBC_e(:,:,iky,ikxBC_L,-2) - ELSE - moments_e(:,:,iky,ikx,izs-1,updatetlevel) = 0._dp - moments_e(:,:,iky,ikx,izs-2,updatetlevel) = 0._dp - ENDIF - ikxBC_R = ikx_zBC_R(iky,ikx); - IF (ikxBC_R .NE. -99) THEN ! Exchanging the modes that have a periodic pair (a) - ! last+1 gets first - moments_e(:,:,iky,ikx,ize+1,updatetlevel) = pb_phase_R(iky)*buff_pjxy_zBC_e(:,:,iky,ikxBC_R,+1) - ! last+2 gets first+1 - moments_e(:,:,iky,ikx,ize+2,updatetlevel) = pb_phase_R(iky)*buff_pjxy_zBC_e(:,:,iky,ikxBC_R,+2) - ELSE - moments_e(:,:,iky,ikx,ize+1,updatetlevel) = 0._dp - moments_e(:,:,iky,ikx,ize+2,updatetlevel) = 0._dp - ENDIF - ENDDO + ENDDO + ELSE !No parallel (just copy) + DO ig=1,ngz + buff_pjxy_zBC(:,:,:,:,:,-ig) = moments(:,:,:,:,:, last-(ig-1),updatetlevel) + buff_pjxy_zBC(:,:,:,:,:, ig) = moments(:,:,:,:,:,first+(ig-1),updatetlevel) ENDDO ENDIF -END SUBROUTINE update_ghosts_z_e + DO iky = 1,local_nky + DO ikx = 1,local_nkx + ikxBC_L = ikx_zBC_L(iky,ikx); + ! Exchanging the modes that have a periodic pair (from sheared BC) + IF (ikxBC_L .NE. -99) THEN + ! Fill the lower ghosts cells with the buffer value (upper cells from LEFT process) + DO ig=1,ngz + moments(:,:,:,iky,ikx,first-ig,updatetlevel) = pb_phase_L(iky)*buff_pjxy_zBC(:,:,:,iky,ikxBC_L,-ig) + ENDDO + ELSE + DO ig=1,ngz + moments(:,:,:,iky,ikx,first-ig,updatetlevel) = 0._dp + ENDDO + ENDIF + ikxBC_R = ikx_zBC_R(iky,ikx); + ! Exchanging the modes that have a periodic pair (from sheared BC) + IF (ikxBC_R .NE. -99) THEN + ! Fill the upper ghosts cells with the buffer value (lower cells from RIGHT process) + DO ig=1,ngz + moments(:,:,:,iky,ikx,last+ig,updatetlevel) = pb_phase_R(iky)*buff_pjxy_zBC(:,:,:,iky,ikxBC_R,ig) + ENDDO + ELSE + DO ig=1,ngz + moments(:,:,:,iky,ikx,last+ig,updatetlevel) = 0._dp + ENDDO + ENDIF + ENDDO + ENDDO +END SUBROUTINE update_ghosts_z_mom -SUBROUTINE update_ghosts_z_i - USE parallel, ONLY : buff_pjxy_zBC_i - USE fields, ONLY : moments_i +SUBROUTINE update_ghosts_z_3D(field) + USE geometry, ONLY: ikx_zBC_L, ikx_zBC_R, pb_phase_L, pb_phase_R + USE parallel, ONLY: nbr_U,nbr_D,comm0,num_procs_z + USE grid, ONLY: local_nky,local_nkx,local_nz,ngz IMPLICIT NONE - INTEGER :: ikxBC_L, ikxBC_R - IF(Nz .GT. 1) THEN - IF (num_procs_z .GT. 1) THEN - CALL MPI_BARRIER(MPI_COMM_WORLD,ierr) - count = (ipge_i-ipgs_i+1)*(ijge_i-ijgs_i+1)*(ikye-ikys+1)*(ikxe-ikxs+1) - + !! buffer for data exchanges + COMPLEX(dp),DIMENSION(local_nky,local_nkx,-ngz/2:ngz/2) :: buff_xy_zBC + COMPLEX(dp), INTENT(INOUT) :: field(local_nky,local_nkx,local_nz+ngz) + INTEGER :: ikxBC_L, ikxBC_R, ikx, iky, first, last, ig, ierr + first = 1 + ngz/2 + last = local_nz + ngz/2 + count = local_nky * local_nkx + IF (num_procs_z .GT. 1) THEN + CALL MPI_BARRIER(MPI_COMM_WORLD,ierr) !!!!!!!!!!! Send ghost to up neighbour !!!!!!!!!!!!!!!!!!!!!! - ! Send the last local moment to fill the -1 neighbour ghost - CALL mpi_sendrecv(moments_i(:,:,:,:,ize ,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_U, 24, & ! Send to Up the last - buff_pjxy_zBC_i(:,:,:,:,-1), count, MPI_DOUBLE_COMPLEX, nbr_D, 24, & ! Recieve from Down the first-1 - comm0, status, ierr) - CALL mpi_sendrecv(moments_i(:,:,:,:,ize-1,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_U, 25, & ! Send to Up the last - buff_pjxy_zBC_i(:,:,:,:,-2), count, MPI_DOUBLE_COMPLEX, nbr_D, 25, & ! Recieve from Down the first-1 + DO ig = 1,ngz + CALL mpi_sendrecv( field(:,:,last-(ig-1)), count, MPI_DOUBLE_COMPLEX, nbr_U, 30+ig, & ! Send to Up the last + buff_xy_zBC(:,:,-ig), count, MPI_DOUBLE_COMPLEX, nbr_D, 30+ig, & ! Receive from Down the first-1 comm0, status, ierr) + ENDDO !!!!!!!!!!! Send ghost to down neighbour !!!!!!!!!!!!!!!!!!!!!! - CALL mpi_sendrecv(moments_i(:,:,:,:,izs ,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_D, 26, & ! Send to Up the last - buff_pjxy_zBC_i(:,:,:,:,+1), count, MPI_DOUBLE_COMPLEX, nbr_U, 26, & ! Recieve from Down the first-1 + DO ig = 1,ngz + CALL mpi_sendrecv( field(:,:,first+(ig-1)), count, MPI_DOUBLE_COMPLEX, nbr_D, 32+ig, & ! Send to Down the first + buff_xy_zBC(:,:,ig), count, MPI_DOUBLE_COMPLEX, nbr_U, 32+ig, & ! Recieve from Up the last+1 comm0, status, ierr) - CALL mpi_sendrecv(moments_i(:,:,:,:,izs+1,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_D, 27, & ! Send to Up the last - buff_pjxy_zBC_i(:,:,:,:,+2), count, MPI_DOUBLE_COMPLEX, nbr_U, 27, & ! Recieve from Down the first-1 - comm0, status, ierr) - ELSE !No parallel (copy) - buff_pjxy_zBC_i(:,:,:,:,-1) = moments_i(:,:,:,:,ize ,updatetlevel) - buff_pjxy_zBC_i(:,:,:,:,-2) = moments_i(:,:,:,:,ize-1,updatetlevel) - buff_pjxy_zBC_i(:,:,:,:,+1) = moments_i(:,:,:,:,izs ,updatetlevel) - buff_pjxy_zBC_i(:,:,:,:,+2) = moments_i(:,:,:,:,izs+1,updatetlevel) - ENDIF - DO iky = ikys,ikye - DO ikx = ikxs,ikxe - ikxBC_L = ikx_zBC_L(iky,ikx); - IF (ikxBC_L .NE. -99) THEN ! Exchanging the modes that have a periodic pair (a) - ! first-1 gets last - moments_i(:,:,iky,ikx,izs-1,updatetlevel) = pb_phase_L(iky)*buff_pjxy_zBC_i(:,:,iky,ikxBC_L,-1) - ! first-2 gets last-1 - moments_i(:,:,iky,ikx,izs-2,updatetlevel) = pb_phase_L(iky)*buff_pjxy_zBC_i(:,:,iky,ikxBC_L,-2) - ELSE - moments_i(:,:,iky,ikx,izs-1,updatetlevel) = 0._dp - moments_i(:,:,iky,ikx,izs-2,updatetlevel) = 0._dp - ENDIF - ikxBC_R = ikx_zBC_R(iky,ikx); - IF (ikxBC_R .NE. -99) THEN ! Exchanging the modes that have a periodic pair (a) - ! last+1 gets first - moments_i(:,:,iky,ikx,ize+1,updatetlevel) = pb_phase_R(iky)*buff_pjxy_zBC_i(:,:,iky,ikxBC_R,+1) - ! last+2 gets first+1 - moments_i(:,:,iky,ikx,ize+2,updatetlevel) = pb_phase_R(iky)*buff_pjxy_zBC_i(:,:,iky,ikxBC_R,+2) - ELSE - moments_i(:,:,iky,ikx,ize+1,updatetlevel) = 0._dp - moments_i(:,:,iky,ikx,ize+2,updatetlevel) = 0._dp - ENDIF - ENDDO - ENDDO - ENDIF -END SUBROUTINE update_ghosts_z_i - -SUBROUTINE update_ghosts_z_phi - USE parallel, ONLY : buff_xy_zBC - USE fields, ONLY : phi - IMPLICIT NONE - INTEGER :: ikxBC_L, ikxBC_R - CALL cpu_time(t1_ghost) - IF(Nz .GT. 1) THEN - IF (num_procs_z .GT. 1) THEN - CALL MPI_BARRIER(MPI_COMM_WORLD,ierr) - count = (ikye-ikys+1) * (ikxe-ikxs+1) - !!!!!!!!!!! Send ghost to up neighbour !!!!!!!!!!!!!!!!!!!!!! - CALL mpi_sendrecv( phi(:,:,ize ), count, MPI_DOUBLE_COMPLEX, nbr_U, 30, & ! Send to Up the last - buff_xy_zBC(:,:,-1), count, MPI_DOUBLE_COMPLEX, nbr_D, 30, & ! Receive from Down the first-1 - comm0, status, ierr) - - CALL mpi_sendrecv( phi(:,:,ize-1), count, MPI_DOUBLE_COMPLEX, nbr_U, 31, & ! Send to Up the last - buff_xy_zBC(:,:,-2), count, MPI_DOUBLE_COMPLEX, nbr_D, 31, & ! Receive from Down the first-2 - comm0, status, ierr) - - !!!!!!!!!!! Send ghost to down neighbour !!!!!!!!!!!!!!!!!!!!!! - CALL mpi_sendrecv( phi(:,:,izs ), count, MPI_DOUBLE_COMPLEX, nbr_D, 32, & ! Send to Down the first - buff_xy_zBC(:,:,+1), count, MPI_DOUBLE_COMPLEX, nbr_U, 32, & ! Recieve from Up the last+1 - comm0, status, ierr) - - CALL mpi_sendrecv( phi(:,:,izs+1), count, MPI_DOUBLE_COMPLEX, nbr_D, 33, & ! Send to Down the first - buff_xy_zBC(:,:,+2), count, MPI_DOUBLE_COMPLEX, nbr_U, 33, & ! Recieve from Up the last+2 - comm0, status, ierr) - ELSE - buff_xy_zBC(:,:,-1) = phi(:,:,ize ) - buff_xy_zBC(:,:,-2) = phi(:,:,ize-1) - buff_xy_zBC(:,:,+1) = phi(:,:,izs ) - buff_xy_zBC(:,:,+2) = phi(:,:,izs+1) - ENDIF - DO iky = ikys,ikye - DO ikx = ikxs,ikxe - ikxBC_L = ikx_zBC_L(iky,ikx); - IF (ikxBC_L .NE. -99) THEN ! Exchanging the modes that have a periodic pair (a) - ! first-1 gets last - phi(iky,ikx,izs-1) = pb_phase_L(iky)*buff_xy_zBC(iky,ikxBC_L,-1) - ! first-2 gets last-1 - phi(iky,ikx,izs-2) = pb_phase_L(iky)*buff_xy_zBC(iky,ikxBC_L,-2) - ELSE - phi(iky,ikx,izs-1) = 0._dp - phi(iky,ikx,izs-2) = 0._dp - ENDIF - ikxBC_R = ikx_zBC_R(iky,ikx); - IF (ikxBC_R .NE. -99) THEN ! Exchanging the modes that have a periodic pair (a) - ! last+1 gets first - phi(iky,ikx,ize+1) = pb_phase_R(iky)*buff_xy_zBC(iky,ikxBC_R,+1) - ! last+2 gets first+1 - phi(iky,ikx,ize+2) = pb_phase_R(iky)*buff_xy_zBC(iky,ikxBC_R,+2) - ELSE - phi(iky,ikx,ize+1) = 0._dp - phi(iky,ikx,ize+2) = 0._dp - ENDIF ENDDO + ELSE + ! no parallelization so just copy last cell into first ghosts and vice versa + DO ig = 1,ngz + buff_xy_zBC(:,:,-ig) = field(:,:,last-(ig-1)) + buff_xy_zBC(:,:, ig) = field(:,:,first+(ig-1)) + ENDDO + ENDIF + DO iky = 1,local_nky + DO ikx = 1,local_nkx + ikxBC_L = ikx_zBC_L(iky,ikx); + IF (ikxBC_L .GT. 0) THEN ! Exchanging the modes that have a periodic pair (a) + DO ig = 1,ngz + field(iky,ikx,first-ig) = pb_phase_L(iky)*buff_xy_zBC(iky,ikxBC_L,-ig) + ENDDO + ELSE + DO ig = 1,ngz + field(iky,ikx,first-ig) = 0._dp + ENDDO + ENDIF + ikxBC_R = ikx_zBC_R(iky,ikx); + IF (ikxBC_R .GT. 0) THEN ! Exchanging the modes that have a periodic pair (a) + ! last+1 gets first + DO ig = 1,ngz + field(iky,ikx,last+ig) = pb_phase_R(iky)*buff_xy_zBC(iky,ikxBC_R,ig) + ENDDO + ELSE + DO ig = 1,ngz + field(iky,ikx,last+ig) = 0._dp + ENDDO + ENDIF ENDDO - ENDIF - CALL cpu_time(t1_ghost) - tc_ghost = tc_ghost + (t1_ghost - t0_ghost) -END SUBROUTINE update_ghosts_z_phi - -SUBROUTINE update_ghosts_z_psi - USE parallel, ONLY : buff_xy_zBC - USE fields, ONLY : psi - IMPLICIT NONE - INTEGER :: ikxBC_L, ikxBC_R - CALL cpu_time(t1_ghost) - IF(Nz .GT. 1) THEN - IF (num_procs_z .GT. 1) THEN - CALL MPI_BARRIER(MPI_COMM_WORLD,ierr) - count = (ikye-ikys+1) * (ikxe-ikxs+1) - !!!!!!!!!!! Send ghost to up neighbour !!!!!!!!!!!!!!!!!!!!!! - CALL mpi_sendrecv( psi(:,:,ize ), count, MPI_DOUBLE_COMPLEX, nbr_U, 40, & ! Send to Up the last - buff_xy_zBC(:,:,-1), count, MPI_DOUBLE_COMPLEX, nbr_D, 40, & ! Receive from Down the first-1 - comm0, status, ierr) - - CALL mpi_sendrecv( psi(:,:,ize-1), count, MPI_DOUBLE_COMPLEX, nbr_U, 41, & ! Send to Up the last - buff_xy_zBC(:,:,-2), count, MPI_DOUBLE_COMPLEX, nbr_D, 41, & ! Receive from Down the first-2 - comm0, status, ierr) - - !!!!!!!!!!! Send ghost to down neighbour !!!!!!!!!!!!!!!!!!!!!! - CALL mpi_sendrecv( psi(:,:,izs ), count, MPI_DOUBLE_COMPLEX, nbr_D, 42, & ! Send to Down the first - buff_xy_zBC(:,:,+1), count, MPI_DOUBLE_COMPLEX, nbr_U, 42, & ! Recieve from Up the last+1 - comm0, status, ierr) - - CALL mpi_sendrecv( psi(:,:,izs+1), count, MPI_DOUBLE_COMPLEX, nbr_D, 43, & ! Send to Down the first - buff_xy_zBC(:,:,+2), count, MPI_DOUBLE_COMPLEX, nbr_U, 43, & ! Recieve from Up the last+2 - comm0, status, ierr) - ELSE - buff_xy_zBC(:,:,-1) = psi(:,:,ize ) - buff_xy_zBC(:,:,-2) = psi(:,:,ize-1) - buff_xy_zBC(:,:,+1) = psi(:,:,izs ) - buff_xy_zBC(:,:,+2) = psi(:,:,izs+1) - ENDIF - DO iky = ikys,ikye - DO ikx = ikxs,ikxe - ikxBC_L = ikx_zBC_L(iky,ikx); - IF (ikxBC_L .NE. -99) THEN ! Exchanging the modes that have a periodic pair (a) - ! first-1 gets last - psi(iky,ikx,izs-1) = pb_phase_L(iky)*buff_xy_zBC(iky,ikxBC_L,-1) - ! first-2 gets last-1 - psi(iky,ikx,izs-2) = pb_phase_L(iky)*buff_xy_zBC(iky,ikxBC_L,-2) - ELSE - psi(iky,ikx,izs-1) = 0._dp - psi(iky,ikx,izs-2) = 0._dp - ENDIF - ikxBC_R = ikx_zBC_R(iky,ikx); - IF (ikxBC_R .NE. -99) THEN ! Exchanging the modes that have a periodic pair (a) - ! last+1 gets first - psi(iky,ikx,ize+1) = pb_phase_R(iky)*buff_xy_zBC(iky,ikxBC_R,+1) - ! last+2 gets first+1 - psi(iky,ikx,ize+2) = pb_phase_R(iky)*buff_xy_zBC(iky,ikxBC_R,+2) - ELSE - psi(iky,ikx,ize+1) = 0._dp - psi(iky,ikx,ize+2) = 0._dp - ENDIF - ENDDO - ENDDO - ENDIF - CALL cpu_time(t1_ghost) - tc_ghost = tc_ghost + (t1_ghost - t0_ghost) -END SUBROUTINE update_ghosts_z_psi - + ENDDO +END SUBROUTINE update_ghosts_z_3D END MODULE ghosts diff --git a/src/grid_mod.F90 b/src/grid_mod.F90 index cbd94555..ed59684e 100644 --- a/src/grid_mod.F90 +++ b/src/grid_mod.F90 @@ -2,84 +2,89 @@ MODULE grid ! Grid module for spatial discretization USE prec_const USE basic - + USE parallel, ONLY: my_id, comm_ky + USE iso_c_binding IMPLICIT NONE PRIVATE - ! GRID Namelist - INTEGER, PUBLIC, PROTECTED :: pmaxe = 1 ! The maximal electron Hermite-moment computed - INTEGER, PUBLIC, PROTECTED :: jmaxe = 1 ! The maximal electron Laguerre-moment computed - INTEGER, PUBLIC, PROTECTED :: pmaxi = 1 ! The maximal ion Hermite-moment computed - INTEGER, PUBLIC, PROTECTED :: jmaxi = 1 ! The maximal ion Laguerre-moment computed - INTEGER, PUBLIC, PROTECTED :: maxj = 1 ! The maximal Laguerre-moment - INTEGER, PUBLIC, PROTECTED :: dmaxe = 1 ! The maximal full GF set of e-moments v^dmax - INTEGER, PUBLIC, PROTECTED :: dmaxi = 1 ! The maximal full GF set of i-moments v^dmax + ! GRID Input + INTEGER, PUBLIC, PROTECTED :: pmax = 1 ! The maximal Hermite-moment computed + INTEGER, PUBLIC, PROTECTED :: jmax = 1 ! The maximal Laguerre-moment computed + INTEGER, PUBLIC, PROTECTED :: maxj = 1 ! The maximal Laguerre-moment + INTEGER, PUBLIC, PROTECTED :: dmax = 1 ! The maximal full GF set of i-moments v^dmax INTEGER, PUBLIC, PROTECTED :: Nx = 4 ! Number of total internal grid points in x REAL(dp), PUBLIC, PROTECTED :: Lx = 120_dp ! horizontal length of the spatial box INTEGER, PUBLIC, PROTECTED :: Nexc = 1 ! factor to increase Lx when shear>0 (Lx = Nexc/kymin/shear) INTEGER, PUBLIC, PROTECTED :: Ny = 4 ! Number of total internal grid points in y REAL(dp), PUBLIC, PROTECTED :: Ly = 120_dp ! vertical length of the spatial box INTEGER, PUBLIC, PROTECTED :: Nz = 4 ! Number of total perpendicular planes - INTEGER, PUBLIC, PROTECTED :: Npol = 1 ! number of poloidal turns INTEGER, PUBLIC, PROTECTED :: Odz = 4 ! order of z interp and derivative schemes INTEGER, PUBLIC, PROTECTED :: Nkx = 4 ! Number of total internal grid points in kx INTEGER, PUBLIC, PROTECTED :: Nky = 4 ! Number of total internal grid points in ky REAL(dp), PUBLIC, PROTECTED :: kpar = 0_dp ! parallel wave vector component - ! For Orszag filter - REAL(dp), PUBLIC, PROTECTED :: two_third_kxmax - REAL(dp), PUBLIC, PROTECTED :: two_third_kymax - REAL(dp), PUBLIC :: two_third_kpmax - - ! 1D Antialiasing arrays (2/3 rule) - REAL(dp), DIMENSION(:), ALLOCATABLE, PUBLIC :: AA_x - REAL(dp), DIMENSION(:), ALLOCATABLE, PUBLIC :: AA_y - - ! Grids containing position in physical space - REAL(dp), DIMENSION(:), ALLOCATABLE, PUBLIC :: xarray - REAL(dp), DIMENSION(:), ALLOCATABLE, PUBLIC :: yarray - ! Local and global z grids, 2D since it has to store odd and even grids - REAL(dp), DIMENSION(:,:), ALLOCATABLE, PUBLIC :: zarray - REAL(dp), DIMENSION(:), ALLOCATABLE, PUBLIC :: zarray_full + ! Grid arrays + INTEGER, DIMENSION(:), ALLOCATABLE, PUBLIC,PROTECTED :: parray, parray_full + INTEGER, DIMENSION(:), ALLOCATABLE, PUBLIC,PROTECTED :: jarray, jarray_full + REAL(dp), DIMENSION(:), ALLOCATABLE, PUBLIC,PROTECTED :: kxarray, kxarray_full + REAL(dp), DIMENSION(:), ALLOCATABLE, PUBLIC,PROTECTED :: kyarray, kyarray_full + REAL(dp), DIMENSION(:,:), ALLOCATABLE, PUBLIC,PROTECTED :: zarray + REAL(dp), DIMENSION(:), ALLOCATABLE, PUBLIC,PROTECTED :: zarray_full + REAL(dp), DIMENSION(:,:,:,:), ALLOCATABLE, PUBLIC,PROTECTED :: kparray !kperp + ! Indexation variables + INTEGER, PUBLIC, PROTECTED :: ias ,iae ! species index + INTEGER, PUBLIC, PROTECTED :: ips ,ipe ! Hermite + INTEGER, PUBLIC, PROTECTED :: ijs ,ije ! Laguerre + INTEGER, PUBLIC, PROTECTED :: ikys,ikye ! Fourier y mode + INTEGER, PUBLIC, PROTECTED :: ikxs,ikxe ! Fourier x mode + INTEGER, PUBLIC, PROTECTED :: izs ,ize ! z-grid + INTEGER, PUBLIC, PROTECTED :: ieven, iodd ! indices for the staggered grids + INTEGER, PUBLIC, PROTECTED :: ip0, ip1, ip2, ip3, ij0 + INTEGER, PUBLIC, PROTECTED :: ikx0, iky0, ikx_max, iky_max ! Indices of k-grid origin and max + ! Total numbers of points for Hermite and Laguerre + INTEGER, PUBLIC, PROTECTED :: total_na + INTEGER, PUBLIC, PROTECTED :: total_np + INTEGER, PUBLIC, PROTECTED :: total_nj + INTEGER, PUBLIC, PROTECTED :: total_nky + INTEGER, PUBLIC, PROTECTED :: total_nkx + INTEGER, PUBLIC, PROTECTED :: total_nz + ! Local numbers of points (without ghosts) + INTEGER, PUBLIC, PROTECTED :: local_Na + INTEGER, PUBLIC, PROTECTED :: local_Np + INTEGER, PUBLIC, PROTECTED :: local_Nj + INTEGER, PUBLIC, PROTECTED :: local_Nky + INTEGER, PUBLIC, PROTECTED :: local_Nkx + INTEGER, PUBLIC, PROTECTED :: local_Nz + INTEGER, PUBLIC, PROTECTED :: local_Nkp + INTEGER, PUBLIC, PROTECTED :: Ngp, Ngj, Ngx, Ngy, Ngz ! number of ghosts points + INTEGER, PUBLIC, PROTECTED :: Nzgrid ! one or two depending on the staggered grid option + ! Local offsets + INTEGER, PUBLIC, PROTECTED :: local_na_offset + INTEGER, PUBLIC, PROTECTED :: local_np_offset + INTEGER, PUBLIC, PROTECTED :: local_nj_offset + INTEGER, PUBLIC, PROTECTED :: local_nky_offset + INTEGER, PUBLIC, PROTECTED :: local_nkx_offset + INTEGER, PUBLIC, PROTECTED :: local_nz_offset + ! C-pointer type for FFTW3 + integer(C_INTPTR_T), PUBLIC,PROTECTED :: local_nkx_ptr, local_nky_ptr + integer(C_INTPTR_T), PUBLIC,PROTECTED :: local_nkx_ptr_offset, local_nky_ptr_offset + ! Grid spacing and limits + REAL(dp), PUBLIC, PROTECTED :: deltap, pp2, deltaz, inv_deltaz + REAL(dp), PUBLIC, PROTECTED :: deltakx, deltaky, kx_max, ky_max, kx_min, ky_min!, kp_max + REAL(dp), PUBLIC, PROTECTED :: local_pmin, local_pmax + REAL(dp), PUBLIC, PROTECTED :: local_jmin, local_jmax + REAL(dp), PUBLIC, PROTECTED :: local_kymin, local_kymax + REAL(dp), PUBLIC, PROTECTED :: local_kxmin, local_kxmax + REAL(dp), DIMENSION(:), ALLOCATABLE, PUBLIC, PROTECTED :: local_zmin, local_zmax ! local z weights for computing simpson rule - INTEGER, DIMENSION(:), ALLOCATABLE, PUBLIC :: zweights_SR - REAL(dp), PUBLIC, PROTECTED :: deltax, deltay, deltaz, inv_deltaz - REAL(dp), PUBLIC, PROTECTED :: diff_pe_coeff, diff_je_coeff - REAL(dp), PUBLIC, PROTECTED :: diff_pi_coeff, diff_ji_coeff + INTEGER, DIMENSION(:), ALLOCATABLE, PUBLIC,PROTECTED :: zweights_SR + ! Numerical diffusion scaling + REAL(dp), PUBLIC, PROTECTED :: diff_p_coeff, diff_j_coeff REAL(dp), PUBLIC, PROTECTED :: diff_kx_coeff, diff_ky_coeff, diff_dz_coeff - INTEGER, PUBLIC, PROTECTED :: ixs, ixe, iys, iye, izs, ize - INTEGER, PUBLIC, PROTECTED :: izgs, izge ! ghosts LOGICAL, PUBLIC, PROTECTED :: SG = .true.! shifted grid flag - INTEGER, PUBLIC :: ir,iz ! counters - ! Data about parallel distribution for ky.kx - integer(C_INTPTR_T), PUBLIC :: local_nkx, local_nky - integer(C_INTPTR_T), PUBLIC :: local_nkx_offset, local_nky_offset - INTEGER, PUBLIC :: local_nkp - INTEGER, DIMENSION(:), ALLOCATABLE, PUBLIC :: counts_nkx, counts_nky - INTEGER, DIMENSION(:), ALLOCATABLE, PUBLIC :: displs_nkx, displs_nky - ! "" for p - INTEGER, PUBLIC :: local_np_e, local_np_i - INTEGER, PUBLIC :: total_np_e, total_np_i, Np_e, Np_i - integer(C_INTPTR_T), PUBLIC :: local_np_e_offset, local_np_i_offset - INTEGER, DIMENSION(:), ALLOCATABLE, PUBLIC :: rcv_p_e, rcv_p_i - INTEGER, DIMENSION(:), ALLOCATABLE, PUBLIC :: dsp_p_e, dsp_p_i - ! "" for z - INTEGER, PUBLIC :: local_nz - INTEGER, PUBLIC :: total_nz - integer(C_INTPTR_T), PUBLIC :: local_nz_offset - INTEGER, DIMENSION(:), ALLOCATABLE, PUBLIC :: counts_nz - INTEGER, DIMENSION(:), ALLOCATABLE, PUBLIC :: displs_nz - ! "" for j (not parallelized) - INTEGER, PUBLIC :: local_nj_e, local_nj_i, Nj_e, Nj_i - ! Grids containing position in fourier space - REAL(dp), DIMENSION(:), ALLOCATABLE, PUBLIC :: kxarray, kxarray_full - REAL(dp), DIMENSION(:), ALLOCATABLE, PUBLIC :: kyarray, kyarray_full + ! Array to know the distribution of data among all processes (for MPI comm) + INTEGER, DIMENSION(:), ALLOCATABLE, PUBLIC,PROTECTED :: counts_nkx, counts_nky, counts_nz + INTEGER, DIMENSION(:), ALLOCATABLE, PUBLIC,PROTECTED :: displs_nkx, displs_nky, displs_nz ! Kperp array depends on kx, ky, z (geometry), eo (even or odd zgrid) - REAL(dp), DIMENSION(:,:,:,:), ALLOCATABLE, PUBLIC :: kparray - REAL(dp), PUBLIC, PROTECTED :: deltakx, deltaky, kx_max, ky_max, kx_min, ky_min!, kp_max - REAL(dp), PUBLIC, PROTECTED :: local_kxmax, local_kymax - INTEGER, PUBLIC, PROTECTED :: ikxs, ikxe, ikys, ikye!, ikps, ikpe - INTEGER, PUBLIC, PROTECTED :: ikx_0, iky_0, ikx_max, iky_max ! Indices of k-grid origin and max - INTEGER, PUBLIC :: ikx, iky, ip, ij, ikp, pp2, eo ! counters LOGICAL, PUBLIC, PROTECTED :: contains_kx0 = .false. ! flag if the proc contains kx=0 index LOGICAL, PUBLIC, PROTECTED :: contains_ky0 = .false. ! flag if the proc contains ky=0 index LOGICAL, PUBLIC, PROTECTED :: contains_kymax = .false. ! flag if the proc contains kx=kxmax index @@ -87,34 +92,26 @@ MODULE grid LOGICAL, PUBLIC, PROTECTED :: contains_zmin = .false. ! flag if the proc contains z=-pi index LOGICAL, PUBLIC, PROTECTED :: SINGLE_KY = .false. ! to check if it is a single non 0 ky simulation ! Grid containing the polynomials degrees - INTEGER, DIMENSION(:), ALLOCATABLE, PUBLIC :: parray_e, parray_e_full - INTEGER, DIMENSION(:), ALLOCATABLE, PUBLIC :: parray_i, parray_i_full - INTEGER, DIMENSION(:), ALLOCATABLE, PUBLIC :: jarray_e, jarray_e_full - INTEGER, DIMENSION(:), ALLOCATABLE, PUBLIC :: jarray_i, jarray_i_full - INTEGER, PUBLIC, PROTECTED :: ips_e,ipe_e, ijs_e,ije_e ! Start and end indices for pol. deg. - INTEGER, PUBLIC, PROTECTED :: ips_i,ipe_i, ijs_i,ije_i - INTEGER, PUBLIC, PROTECTED :: ipgs_e,ipge_e, ijgs_e,ijge_e ! Ghosts start and end indices - INTEGER, PUBLIC, PROTECTED :: ipgs_i,ipge_i, ijgs_i,ijge_i - INTEGER, PUBLIC, PROTECTED :: deltape, ip0_e, ip1_e, ip2_e, ip3_e ! Pgrid spacing and moment 0,1,2 index - INTEGER, PUBLIC, PROTECTED :: deltapi, ip0_i, ip1_i, ip2_i, ip3_i - LOGICAL, PUBLIC, PROTECTED :: CONTAINS_ip0_e, CONTAINS_ip0_i - LOGICAL, PUBLIC, PROTECTED :: CONTAINS_ip1_e, CONTAINS_ip1_i - LOGICAL, PUBLIC, PROTECTED :: CONTAINS_ip2_e, CONTAINS_ip2_i - LOGICAL, PUBLIC, PROTECTED :: CONTAINS_ip3_e, CONTAINS_ip3_i + LOGICAL, PUBLIC, PROTECTED :: CONTAINSp0, CONTAINSp1, CONTAINSp2, CONTAINSp3 LOGICAL, PUBLIC, PROTECTED :: SOLVE_POISSON, SOLVE_AMPERE - INTEGER, PUBLIC, PROTECTED :: ij0_i, ij0_e -! Usefull inverse numbers + ! Usefull inverse numbers REAL(dp), PUBLIC, PROTECTED :: inv_Nx, inv_Ny, inv_Nz + ! For Orszag filter + REAL(dp), PUBLIC, PROTECTED :: two_third_kxmax + REAL(dp), PUBLIC, PROTECTED :: two_third_kymax + REAL(dp), PUBLIC, PROTECTED :: two_third_kpmax + ! 1D Antialiasing arrays (2/3 rule) + REAL(dp), DIMENSION(:), ALLOCATABLE, PUBLIC,PROTECTED :: AA_x + REAL(dp), DIMENSION(:), ALLOCATABLE, PUBLIC,PROTECTED :: AA_y ! Public Functions PUBLIC :: init_1Dgrid_distr - PUBLIC :: set_pgrid, set_jgrid - PUBLIC :: set_kxgrid, set_kygrid, set_zgrid + PUBLIC :: set_grids, set_kparray PUBLIC :: grid_readinputs, grid_outputinputs - PUBLIC :: bare, bari + PUBLIC :: bar ! Precomputations - real(dp), PUBLIC, PROTECTED :: pmaxe_dp, pmaxi_dp, jmaxe_dp,jmaxi_dp + real(dp), PUBLIC, PROTECTED :: pmax_dp, jmax_dp CONTAINS @@ -123,40 +120,72 @@ CONTAINS ! Read the input parameters USE prec_const IMPLICIT NONE - INTEGER :: lu_in = 90 ! File duplicated from STDIN - - NAMELIST /GRID/ pmaxe, jmaxe, pmaxi, jmaxi, & - Nx, Lx, Ny, Ly, Nz, Npol, SG, Nexc - READ(lu_in,grid) - + INTEGER :: lun = 90 ! File duplicated from STDIN + NAMELIST /GRID/ pmax, jmax, Nx, Lx, Ny, Ly, Nz, SG, Nexc + READ(lun,grid) IF(Nz .EQ. 1) & ! overwrite SG option if Nz = 1 for safety of use SG = .FALSE. - !! Compute the maximal degree of full GF moments set ! i.e. : all moments N_a^pj s.t. p+2j<=d are simulated (see GF closure) - dmaxe = min(pmaxe,2*jmaxe+1) - dmaxi = min(pmaxi,2*jmaxi+1) - + dmax = min(pmax,2*jmax+1) ! Usefull precomputations inv_Nx = 1._dp/REAL(Nx,dp) inv_Ny = 1._dp/REAL(Ny,dp) - END SUBROUTINE grid_readinputs + !!!! GRID REPRESENTATION + ! We define the grids that contain ghosts (p,j,z) with indexing from 1 to Nlocal + nghost + ! e.g. nghost = 4, nlocal = 4 + ! |x|x|_|_|_|_|x|x| + ! 1 2 3 4 5 6 7 8 (x are ghosts) + ! the interior points are contained between 1+Ng/2 and Nlocal+Ng/2 + ! The other grids are simply + ! |_|_|_|_| + ! 1 2 3 4 + SUBROUTINE set_grids(shear,Npol) + USE model, ONLY: LINEARITY + USE fourier, ONLY: init_grid_distr_and_plans + REAL(dp), INTENT(IN) :: shear + INTEGER, INTENT(IN) :: Npol + CALL set_agrid + CALL set_pgrid + !! Parallel distribution of kx ky grid + IF (LINEARITY .NE. 'linear') THEN + IF (my_id .EQ. 0) write(*,*) 'FFTW3 y-grid distribution' + CALL init_grid_distr_and_plans(Nx,Ny,comm_ky,local_nkx_ptr,local_nkx_ptr_offset,local_nky_ptr,local_nky_ptr_offset) + ELSE + CALL init_1Dgrid_distr + IF (my_id .EQ. 0) write(*,*) 'Manual y-grid distribution' + ENDIF + CALL set_kygrid + CALL set_kxgrid(shear,Npol) + CALL set_zgrid (Npol) + END SUBROUTINE set_grids SUBROUTINE init_1Dgrid_distr + USE parallel, ONLY: num_procs_ky, rank_ky ! write(*,*) Nx - local_nky = (Ny/2+1)/num_procs_ky - ! write(*,*) local_nkx - local_nky_offset = rank_ky*local_nky - if (rank_ky .EQ. num_procs_ky-1) local_nky = (Ny/2+1)-local_nky_offset + local_nky_ptr = (Ny/2+1)/num_procs_ky + ! write(*,*) local_nkx_ptr + local_nky_ptr_offset = rank_ky*local_nky_ptr + if (rank_ky .EQ. num_procs_ky-1) local_nky_ptr = (Ny/2+1)-local_nky_ptr_offset END SUBROUTINE init_1Dgrid_distr + SUBROUTINE set_agrid ! you're a sorcerer harry + USE model, ONLY: Na + IMPLICIT NONE + ias = 1 + iae = Na + total_Na = Na + local_Na = Na + local_Na_offset = ias - 1 + END SUBROUTINE + SUBROUTINE set_pgrid USE prec_const - USE model, ONLY: beta ! To know if we solve ampere or not and put odd p moments + USE model, ONLY: beta ! To know if we solve ampere or not and put odd p moments + USE parallel, ONLY: num_procs_p, rank_p IMPLICIT NONE - INTEGER :: ip, istart, iend, in - + INTEGER :: ip, ig ! If no parallel dim (Nz=1) and no EM effects (beta=0), the moment hierarchy !! is separable between odds and even P and since the energy is injected in !! P=0 and P=2 for density/temperature gradients there is no need of @@ -164,88 +193,58 @@ CONTAINS !! We define in this case a grid Parray = 0,2,4,...,Pmax i.e. deltap = 2 !! instead of 1 to spare computation IF((Nz .EQ. 1) .AND. (beta .EQ. 0._dp)) THEN - deltape = 2; deltapi = 2; - pp2 = 1; ! index p+2 is ip+1 + deltap = 2 + Ngp = 2 ! two ghosts cells for p+/-2 only + pp2 = 1 ! index p+2 is ip+1 ELSE - deltape = 1; deltapi = 1; - pp2 = 2; ! index p+2 is ip+2 + deltap = 1 + Ngp = 4 ! four ghosts cells for p+/-1 and p+/-2 terms + pp2 = 2 ! index p+2 is ip+2 ENDIF - ! Total number of Hermite polynomials we will evolve - total_np_e = (Pmaxe/deltape) + 1 - total_np_i = (Pmaxi/deltapi) + 1 - Np_e = total_np_e ! Reduced names (redundant) - Np_i = total_np_i + total_np = (Pmax/deltap) + 1 ! Build the full grids on process 0 to diagnose it without comm - ALLOCATE(parray_e_full(1:total_np_e)) - ALLOCATE(parray_i_full(1:total_np_i)) + ALLOCATE(parray_full(total_np)) ! P - DO ip = 1,total_np_e; parray_e_full(ip) = (ip-1)*deltape; END DO - DO ip = 1,total_np_i; parray_i_full(ip) = (ip-1)*deltapi; END DO + DO ip = 1,total_np; parray_full(ip) = (ip-1)*deltap; END DO !! Parallel data distribution ! Local data distribution - CALL decomp1D(total_np_e, num_procs_p, rank_p, ips_e, ipe_e) - CALL decomp1D(total_np_i, num_procs_p, rank_p, ips_i, ipe_i) - local_np_e = ipe_e - ips_e + 1 - local_np_i = ipe_i - ips_i + 1 - ! Ghosts boundaries - ipgs_e = ips_e - 2/deltape; ipge_e = ipe_e + 2/deltape; - ipgs_i = ips_i - 2/deltapi; ipge_i = ipe_i + 2/deltapi; - ! List of shift and local numbers between the different processes (used in scatterv and gatherv) - ALLOCATE(rcv_p_e (1:num_procs_p)) - ALLOCATE(rcv_p_i (1:num_procs_p)) - ALLOCATE(dsp_p_e (1:num_procs_p)) - ALLOCATE(dsp_p_i (1:num_procs_p)) - DO in = 0,num_procs_p-1 - CALL decomp1D(total_np_e, num_procs_p, in, istart, iend) - rcv_p_e(in+1) = iend-istart+1 - dsp_p_e(in+1) = istart-1 - CALL decomp1D(total_np_i, num_procs_p, in, istart, iend) - rcv_p_i(in+1) = iend-istart+1 - dsp_p_i(in+1) = istart-1 - ENDDO - + CALL decomp1D(total_np, num_procs_p, rank_p, ips, ipe) + local_np = ipe - ips + 1 + local_np_offset = ips - 1 !! local grid computations ! Flag to avoid unnecessary logical operations - CONTAINS_ip0_e = .FALSE.; CONTAINS_ip1_e = .FALSE. - CONTAINS_ip2_e = .FALSE.; CONTAINS_ip3_e = .FALSE. - CONTAINS_ip0_i = .FALSE.; CONTAINS_ip1_i = .FALSE. - CONTAINS_ip2_i = .FALSE.; CONTAINS_ip3_i = .FALSE. + CONTAINSp0 = .FALSE.; CONTAINSp1 = .FALSE. + CONTAINSp2 = .FALSE.; CONTAINSp3 = .FALSE. SOLVE_POISSON = .FALSE.; SOLVE_AMPERE = .FALSE. - ALLOCATE(parray_e(ipgs_e:ipge_e)) - ALLOCATE(parray_i(ipgs_i:ipge_i)) - DO ip = ipgs_e,ipge_e - parray_e(ip) = (ip-1)*deltape + ALLOCATE(parray(local_np+ngp)) + ! Fill the interior (no ghosts) + DO ip = 1+ngp/2,local_np+ngp/2 + parray(ip) = (ip-1-ngp/2+local_np_offset)*deltap ! Storing indices of particular degrees for fluid moments computations - SELECT CASE (parray_e(ip)) - CASE(0); ip0_e = ip; CONTAINS_ip0_e = .TRUE. - CASE(1); ip1_e = ip; CONTAINS_ip1_e = .TRUE. - CASE(2); ip2_e = ip; CONTAINS_ip2_e = .TRUE. - CASE(3); ip3_e = ip; CONTAINS_ip3_e = .TRUE. + SELECT CASE (parray(ip)) + CASE(0); ip0 = ip; CONTAINSp0 = .TRUE. + CASE(1); ip1 = ip; CONTAINSp1 = .TRUE. + CASE(2); ip2 = ip; CONTAINSp2 = .TRUE. + CASE(3); ip3 = ip; CONTAINSp3 = .TRUE. END SELECT END DO - DO ip = ipgs_i,ipge_i - parray_i(ip) = (ip-1)*deltapi - ! Storing indices of particular degrees for fluid moments computations - SELECT CASE (parray_i(ip)) - CASE(0); ip0_i = ip; CONTAINS_ip0_i = .TRUE. - CASE(1); ip1_i = ip; CONTAINS_ip1_i = .TRUE. - CASE(2); ip2_i = ip; CONTAINS_ip2_i = .TRUE. - CASE(3); ip3_i = ip; CONTAINS_ip3_i = .TRUE. - END SELECT - END DO - IF(CONTAINS_ip0_e .AND. CONTAINS_ip0_i) SOLVE_POISSON = .TRUE. - IF(CONTAINS_ip1_e .AND. CONTAINS_ip1_i) SOLVE_AMPERE = .TRUE. + local_pmax = parray(local_np+ngp/2) + local_pmin = parray(1+ngp/2) + ! Fill the ghosts + DO ig = 1,ngp/2 + parray(ig) = local_pmin-ngp/2+(ig-1) + parray(local_np+ig) = local_pmax+ig + ENDDO + IF(CONTAINSp0) SOLVE_POISSON = .TRUE. + IF(CONTAINSp1) SOLVE_AMPERE = .TRUE. !DGGK operator uses moments at index p=2 (ip=3) for the p=0 term so the ! process that contains ip=1 MUST contain ip=3 as well for both e and i. - IF(((ips_e .EQ. ip0_e) .OR. (ips_i .EQ. ip0_e)) .AND. ((ipe_e .LT. ip2_e) .OR. (ipe_i .LT. ip2_i)))& + IF(CONTAINSp0 .AND. .NOT. (CONTAINSp2))& WRITE(*,*) "Warning : distribution along p may not work with DGGK" ! Precomputations - pmaxe_dp = real(pmaxe,dp) - pmaxi_dp = real(pmaxi,dp) - diff_pe_coeff = pmaxe_dp*(1._dp/pmaxe_dp)**6 - diff_pi_coeff = pmaxi_dp*(1._dp/pmaxi_dp)**6 - + pmax_dp = real(pmax,dp) + diff_p_coeff = pmax_dp*(1._dp/pmax_dp)**6 ! Overwrite SOLVE_AMPERE flag if beta is zero IF(beta .EQ. 0._dp) THEN SOLVE_AMPERE = .FALSE. @@ -255,47 +254,45 @@ CONTAINS SUBROUTINE set_jgrid USE prec_const IMPLICIT NONE - INTEGER :: ij + INTEGER :: ij, ig ! Total number of J degrees - Nj_e = jmaxe+1 - Nj_i = jmaxi+1 + total_nj = jmax+1 + local_jmax = jmax + Ngj= 2 ! 2-points ghosts for j+\-1 terms ! Build the full grids on process 0 to diagnose it without comm - ALLOCATE(jarray_e_full(1:jmaxe+1)) - ALLOCATE(jarray_i_full(1:jmaxi+1)) + ALLOCATE(jarray_full(total_nj)) ! J - DO ij = 1,jmaxe+1; jarray_e_full(ij) = (ij-1); END DO - DO ij = 1,jmaxi+1; jarray_i_full(ij) = (ij-1); END DO - ! Local data - ijs_e = 1; ije_e = jmaxe + 1 - ijs_i = 1; ije_i = jmaxi + 1 - ! Ghosts boundaries - ijgs_e = ijs_e - 1; ijge_e = ije_e + 1; - ijgs_i = ijs_i - 1; ijge_i = ije_i + 1; + DO ij = 1,jmax+1; jarray_full(ij) = (ij-1); END DO + ! Indices of local data + ijs = 1; ije = jmax + 1 ! Local number of J - local_nj_e = ijge_e - ijgs_e + 1 - local_nj_i = ijge_i - ijgs_i + 1 - ALLOCATE(jarray_e(ijgs_e:ijge_e)) - ALLOCATE(jarray_i(ijgs_i:ijge_i)) - DO ij = ijgs_e,ijge_e; jarray_e(ij) = ij-1; END DO - DO ij = ijgs_i,ijge_i; jarray_i(ij) = ij-1; END DO + local_nj = ije - ijs + 1 + local_nj_offset = ijs - 1 + ALLOCATE(jarray(local_nj+ngj)) + DO ij = 1+ngj/2,local_nj+ngj/2 + jarray(ij) = ij-1-ngj/2+local_nj_offset + END DO + local_jmax = jarray(local_np+ngp/2) + local_jmin = jarray(1+ngp/2) + ! Fill the ghosts + DO ig = 1,ngj/2 + jarray(ig) = local_jmin-ngj/2+(ig-1) + jarray(local_nj+ig) = local_jmax+ig + ENDDO ! Precomputations - maxj = MAX(jmaxi, jmaxe) - jmaxe_dp = real(jmaxe,dp) - jmaxi_dp = real(jmaxi,dp) - diff_je_coeff = jmaxe_dp*(1._dp/jmaxe_dp)**6 - diff_ji_coeff = jmaxi_dp*(1._dp/jmaxi_dp)**6 - + jmax_dp = real(jmax,dp) + diff_j_coeff = jmax_dp*(1._dp/jmax_dp)**6 ! j=0 indices - DO ij = ijs_e,ije_e; IF(jarray_e(ij) .EQ. 0) ij0_e = ij; END DO - DO ij = ijs_i,ije_i; IF(jarray_i(ij) .EQ. 0) ij0_i = ij; END DO + DO ij = 1,local_nj + IF(jarray(ij) .EQ. 0) ij0 = ij + END DO END SUBROUTINE set_jgrid - SUBROUTINE set_kygrid USE prec_const USE model, ONLY: LINEARITY, N_HD IMPLICIT NONE - INTEGER :: in, istart, iend + INTEGER :: iky, ikyo Nky = Ny/2+1 ! Defined only on positive kx since fields are real ! Grid spacings IF (Ny .EQ. 1) THEN @@ -307,72 +304,71 @@ CONTAINS ky_max = (Nky-1)*deltaky ky_min = deltaky ENDIF + Ngy = 0 ! no ghosts cells in ky ! Build the full grids on process 0 to diagnose it without comm - ALLOCATE(kyarray_full(1:Nky)) + ALLOCATE(kyarray_full(Nky)) DO iky = 1,Nky kyarray_full(iky) = REAL(iky-1,dp) * deltaky END DO - !! Parallel distribution - ikys = local_nky_offset + 1 - ikye = ikys + local_nky - 1 - ALLOCATE(kyarray(ikys:ikye)) + ikys = local_nky_ptr_offset + 1 + ikye = ikys + local_nky_ptr - 1 + local_nky = ikye - ikys + 1 + local_nky_offset = ikys - 1 + ALLOCATE(kyarray(local_nky)) local_kymax = 0._dp - ! List of shift and local numbers between the different processes (used in scatterv and gatherv) - ALLOCATE(counts_nky (1:num_procs_ky)) - ALLOCATE(displs_nky (1:num_procs_ky)) - DO in = 0,num_procs_ky-1 - CALL decomp1D(Nky, num_procs_ky, in, istart, iend) - counts_nky(in+1) = iend-istart+1 - displs_nky(in+1) = istart-1 - ENDDO ! Creating a grid ordered as dk*(0 1 2 3) + ! We loop over the natural iky numbers (|1 2 3||4 5 6||... Nky|) DO iky = ikys,ikye + ! We shift the natural iky index by the offset to obtain the mpi dependent + ! indexation (|1 2 3||1 2 3|... local_Nky|) + ikyo = iky - local_nky_offset IF(Ny .EQ. 1) THEN kyarray(iky) = deltaky kyarray_full(iky) = deltaky SINGLE_KY = .TRUE. ELSE - kyarray(iky) = REAL(iky-1,dp) * deltaky + kyarray(ikyo) = REAL(iky,dp) * deltaky ENDIF ! Finding kx=0 - IF (kyarray(iky) .EQ. 0) THEN - iky_0 = iky + IF (kyarray(ikyo) .EQ. 0) THEN + iky0 = ikyo contains_ky0 = .true. ENDIF ! Finding local kxmax value - IF (ABS(kyarray(iky)) .GT. local_kymax) THEN - local_kymax = ABS(kyarray(iky)) + IF (ABS(kyarray(ikyo)) .GT. local_kymax) THEN + local_kymax = ABS(kyarray(ikyo)) ENDIF ! Finding kxmax idx - IF (kyarray(iky) .EQ. ky_max) THEN - iky_max = iky + IF (kyarray(ikyo) .EQ. ky_max) THEN + iky_max = ikyo contains_kymax = .true. ENDIF END DO ! Orszag 2/3 filter two_third_kymax = 2._dp/3._dp*deltaky*(Nky-1) - ! For hyperdiffusion - IF(LINEARITY.EQ.'linear') THEN - diff_ky_coeff= (1._dp/ky_max)**N_HD - ELSE - diff_ky_coeff= (1._dp/two_third_kymax)**N_HD - ENDIF - - ALLOCATE(AA_y(ikys:ikye)) - DO iky = ikys,ikye + ALLOCATE(AA_y(local_Nky)) + DO iky = 1,local_Nky IF ( (kyarray(iky) .LT. two_third_kymax) .OR. (LINEARITY .EQ. 'linear')) THEN AA_y(iky) = 1._dp; ELSE AA_y(iky) = 0._dp; ENDIF END DO + ! For hyperdiffusion + IF(LINEARITY.EQ.'linear') THEN + diff_ky_coeff= (1._dp/ky_max)**N_HD + ELSE + diff_ky_coeff= (1._dp/two_third_kymax)**N_HD + ENDIF END SUBROUTINE set_kygrid - SUBROUTINE set_kxgrid(shear) + SUBROUTINE set_kxgrid(shear,Npol) USE prec_const USE model, ONLY: LINEARITY, N_HD IMPLICIT NONE REAL(dp), INTENT(IN) :: shear + INTEGER, INTENT(IN) :: Npol + INTEGER :: ikx, ikxo REAL :: Lx_adapted IF(shear .GT. 0) THEN IF(my_id.EQ.0) write(*,*) 'Magnetic shear detected: set up sheared kx grid..' @@ -386,18 +382,21 @@ CONTAINS ! x length is adapted Lx = Lx_adapted*Nexc ENDIF - Nkx = Nx; + Nkx = Nx; + total_nkx = Nx ! Local data ! Start and END indices of grid ikxs = 1 ikxe = Nkx - local_nkx = ikxe - ikxs + 1 - ALLOCATE(kxarray(ikxs:ikxe)) - ALLOCATE(kxarray_full(1:Nkx)) + local_nkx_ptr = ikxe - ikxs + 1 + local_nkx = ikxe - ikxs + 1 + local_nky_offset = ikxs - 1 + ALLOCATE(kxarray(local_nkx)) + ALLOCATE(kxarray_full(1:total_nkx)) IF (Nx .EQ. 1) THEN deltakx = 1._dp kxarray(1) = 0._dp - ikx_0 = 1 + ikx0 = 1 contains_kx0 = .true. kx_max = 0._dp ikx_max = 1 @@ -412,19 +411,20 @@ CONTAINS ! Creating a grid ordered as dk*(0 1 2 3 -2 -1) local_kxmax = 0._dp DO ikx = ikxs,ikxe - kxarray(ikx) = deltakx*(MODULO(ikx-1,Nkx/2)-Nkx/2*FLOOR(2.*real(ikx-1)/real(Nkx))) - if (ikx .EQ. Nx/2+1) kxarray(ikx) = -kxarray(ikx) + ikxo = ikx - local_nkx_offset + kxarray(ikxo) = deltakx*(MODULO(ikx-1,Nkx/2)-Nkx/2*FLOOR(2.*real(ikx-1)/real(Nkx))) + if (ikx .EQ. Nx/2+1) kxarray(ikxo) = -kxarray(ikxo) ! Finding kx=0 - IF (kxarray(ikx) .EQ. 0) THEN - ikx_0 = ikx + IF (kxarray(ikxo) .EQ. 0) THEN + ikx0 = ikxo contains_kx0 = .true. ENDIF ! Finding local kxmax - IF (ABS(kxarray(ikx)) .GT. local_kxmax) THEN - local_kxmax = ABS(kxarray(ikx)) + IF (ABS(kxarray(ikxo)) .GT. local_kxmax) THEN + local_kxmax = ABS(kxarray(ikxo)) ENDIF ! Finding kxmax - IF (kxarray(ikx) .EQ. kx_max) ikx_max = ikx + IF (kxarray(ikxo) .EQ. kx_max) ikx_max = ikxo END DO ! Build the full grids on process 0 to diagnose it without comm ! kx @@ -438,22 +438,23 @@ CONTAINS ! Creating a grid ordered as dk*(0 1 2 -2 -1) local_kxmax = 0._dp DO ikx = ikxs,ikxe + ikxo = ikx - local_nkx_offset IF(ikx .LE. (Nkx-1)/2+1) THEN - kxarray(ikx) = deltakx*(ikx-1) + kxarray(ikxo) = deltakx*(ikx-1) ELSE - kxarray(ikx) = deltakx*(ikx-Nkx-1) + kxarray(ikxo) = deltakx*(ikx-Nkx-1) ENDIF ! Finding kx=0 - IF (kxarray(ikx) .EQ. 0) THEN - ikx_0 = ikx + IF (kxarray(ikxo) .EQ. 0) THEN + ikx0 = ikxo contains_kx0 = .true. ENDIF ! Finding local kxmax - IF (ABS(kxarray(ikx)) .GT. local_kxmax) THEN - local_kxmax = ABS(kxarray(ikx)) + IF (ABS(kxarray(ikxo)) .GT. local_kxmax) THEN + local_kxmax = ABS(kxarray(ikxo)) ENDIF ! Finding kxmax - IF (kxarray(ikx) .EQ. kx_max) ikx_max = ikx + IF (kxarray(ikxo) .EQ. kx_max) ikx_max = ikxo END DO ! Build the full grids on process 0 to diagnose it without comm ! kx @@ -468,17 +469,9 @@ CONTAINS ENDIF ! Orszag 2/3 filter two_third_kxmax = 2._dp/3._dp*kx_max; - - ! For hyperdiffusion - IF(LINEARITY.EQ.'linear') THEN - diff_kx_coeff= (1._dp/kx_max)**N_HD - ELSE - diff_kx_coeff= (1._dp/two_third_kxmax)**N_HD - ENDIF - ! Antialiasing filter - ALLOCATE(AA_x(ikxs:ikxe)) - DO ikx = ikxs,ikxe + ALLOCATE(AA_x(local_nkx)) + DO ikx = 1,local_nkx IF ( ((kxarray(ikx) .GT. -two_third_kxmax) .AND. & (kxarray(ikx) .LT. two_third_kxmax)) .OR. (LINEARITY .EQ. 'linear')) THEN AA_x(ikx) = 1._dp; @@ -486,15 +479,21 @@ CONTAINS AA_x(ikx) = 0._dp; ENDIF END DO + ! For hyperdiffusion + IF(LINEARITY.EQ.'linear') THEN + diff_kx_coeff= (1._dp/kx_max)**N_HD + ELSE + diff_kx_coeff= (1._dp/two_third_kxmax)**N_HD + ENDIF END SUBROUTINE set_kxgrid - - SUBROUTINE set_zgrid + SUBROUTINE set_zgrid(Npol) USE prec_const USE model, ONLY: mu_z + USE parallel, ONLY: num_procs_z, rank_z IMPLICIT NONE REAL :: grid_shift, Lz, zmax, zmin - INTEGER :: istart, iend, in + INTEGER :: istart, iend, in, Npol, iz, ig, eo, izo total_nz = Nz ! Length of the flux tube (in ballooning angle) Lz = 2_dp*pi*Npol @@ -508,12 +507,18 @@ CONTAINS diff_dz_coeff = -1._dp ! non adaptive (negative sign to compensate mu_z neg) ENDIF IF (SG) THEN + CALL speak('--2 staggered z grids--') grid_shift = deltaz/2._dp + ! indices for even p and odd p grids (used in kernel, jacobian, gij etc.) + ieven = 1; iodd = 2 + Nzgrid = 2 ELSE grid_shift = 0._dp + ieven = 1; iodd = 1 + Nzgrid = 1 ENDIF ! Build the full grids on process 0 to diagnose it without comm - ALLOCATE(zarray_full(1:Nz)) + ALLOCATE(zarray_full(total_nz)) IF (Nz .EQ. 1) Npol = 0 zmax = 0; zmin = 0; DO iz = 1,total_nz ! z in [-pi pi-dz] x Npol @@ -526,108 +531,117 @@ CONTAINS ERROR STOP '>> ERROR << Cannot have multiple core in z-direction (Nz = 1)' ! Local data distribution CALL decomp1D(total_nz, num_procs_z, rank_z, izs, ize) - local_nz = ize - izs + 1 + local_nz = ize - izs + 1 + local_nz_offset = izs - 1 ! Ghosts boundaries (depend on the order of z operators) IF(Nz .EQ. 1) THEN - izgs = izs; izge = ize; - zarray_full(izs) = 0; + Ngz = 0 + zarray_full(izs) = 0 ELSEIF(Nz .GE. 4) THEN - izgs = izs - 2; izge = ize + 2; + Ngz =4 ELSE ERROR STOP '>> ERROR << Nz is not appropriate!!' ENDIF ! List of shift and local numbers between the different processes (used in scatterv and gatherv) - ALLOCATE(counts_nz (1:num_procs_z)) - ALLOCATE(displs_nz (1:num_procs_z)) + ALLOCATE(counts_nz (num_procs_z)) + ALLOCATE(displs_nz (num_procs_z)) DO in = 0,num_procs_z-1 CALL decomp1D(total_nz, num_procs_z, in, istart, iend) counts_nz(in+1) = iend-istart+1 displs_nz(in+1) = istart-1 ENDDO ! Local z array - ALLOCATE(zarray(izgs:izge,0:1)) - DO iz = izgs,izge - IF(iz .EQ. 0) THEN - zarray(iz,0) = zarray_full(total_nz) - zarray(iz,1) = zarray_full(total_nz) + grid_shift - ELSEIF(iz .EQ. -1) THEN - zarray(iz,0) = zarray_full(total_nz-1) - zarray(iz,1) = zarray_full(total_nz-1) + grid_shift - ELSEIF(iz .EQ. total_nz + 1) THEN - zarray(iz,0) = zarray_full(1) - zarray(iz,1) = zarray_full(1) + grid_shift - ELSEIF(iz .EQ. total_nz + 2) THEN - zarray(iz,0) = zarray_full(2) - zarray(iz,1) = zarray_full(2) + grid_shift - ELSE - zarray(iz,0) = zarray_full(iz) - zarray(iz,1) = zarray_full(iz) + grid_shift - ENDIF - ENDDO - IF(abs(zarray(izs,0) - zmin) .LT. EPSILON(zmin)) & - contains_zmin = .TRUE. - IF(abs(zarray(ize,0) - zmax) .LT. EPSILON(zmax)) & - contains_zmax = .TRUE. - ! Weights for Simpson rule - ALLOCATE(zweights_SR(izs:ize)) + ALLOCATE(zarray(local_nz+Ngz,Nzgrid)) + !! interior point loop DO iz = izs,ize - IF(MODULO(iz,2) .EQ. 1) THEN ! odd iz - zweights_SR(iz) = 2._dp - ELSE ! even iz - zweights_SR(iz) = 4._dp - ENDIF + izo = iz - local_nz_offset + DO eo = 1,Nzgrid + zarray(izo,eo) = zarray_full(iz) + (eo-1)*grid_shift + ENDDO ENDDO + ALLOCATE(local_zmax(Nzgrid),local_zmin(Nzgrid)) + DO eo = 1,Nzgrid + ! Find local extrema + local_zmax(eo) = zarray(local_nz+ngz/2,eo) + local_zmin(eo) = zarray(1+ngz/2,eo) + ! Fill the ghosts + DO ig = 1,ngj/2 + zarray(ig,eo) = local_zmin(eo)-(ngz/2+(ig-1))*deltaz + zarray(local_nz+ig,eo) = local_zmax(eo)+ig*deltaz + ENDDO + ! Set up the flags to know if the process contains the tip and/or the tail + ! of the z domain (important for z-boundary condition) + IF(abs(local_zmin(eo) - (zmin+(eo-1)*grid_shift)) .LT. EPSILON(zmin)) & + contains_zmin = .TRUE. + IF(abs(local_zmax(eo) - (zmax+(eo-1)*grid_shift)) .LT. EPSILON(zmax)) & + contains_zmax = .TRUE. + ENDDO + IF(mod(Nz,2) .NE. 0 ) THEN + ERROR STOP '>> ERROR << Nz must be an even number for Simpson integration rule !!!!' + ENDIF END SUBROUTINE set_zgrid - SUBROUTINE grid_outputinputs(fidres, str) - ! Write the input parameters to the results_xx.h5 file - - USE futils, ONLY: attach + SUBROUTINE set_kparray(gxx, gxy, gyy,hatB) + REAL(dp), DIMENSION(:,:), INTENT(IN) :: gxx,gxy,gyy,hatB + INTEGER :: eo,iz,iky,ikx + REAL(dp) :: kx, ky + CALL allocate_array( kparray, 1,local_nky, 1,local_nkx, 1,local_nz+Ngz, 1,2) + DO eo = 1,Nzgrid + DO iz = 1,local_Nz+Ngz + DO iky = 1,local_nky + ky = kyarray(iky) + DO ikx = 1,local_nkx + kx = kxarray(ikx) + ! there is a factor 1/B from the normalization; important to match GENE + ! this factor comes from $b_a$ argument in the Bessel. Kperp is not used otherwise. + kparray(iky, ikx, iz, eo) = & + SQRT( gxx(iz,eo)*kx**2 + 2._dp*gxy(iz,eo)*kx*ky + gyy(iz,eo)*ky**2)/ hatB(iz,eo) + ENDDO + ENDDO + ENDDO + ENDDO + two_third_kpmax = 2._dp/3._dp * MAXVAL(kparray) + END SUBROUTINE - USE prec_const + SUBROUTINE grid_outputinputs(fid) + ! Write the input parameters to the results_xx.h5 file + USE futils, ONLY: attach, creatd IMPLICIT NONE - - INTEGER, INTENT(in) :: fidres - CHARACTER(len=256), INTENT(in) :: str - CALL attach(fidres, TRIM(str), "pmaxe", pmaxe) - CALL attach(fidres, TRIM(str), "jmaxe", jmaxe) - CALL attach(fidres, TRIM(str), "pmaxi", pmaxi) - CALL attach(fidres, TRIM(str), "jmaxi", jmaxi) - CALL attach(fidres, TRIM(str), "Nx", Nx) - CALL attach(fidres, TRIM(str), "Lx", Lx) - CALL attach(fidres, TRIM(str), "Nexc", Nexc) - CALL attach(fidres, TRIM(str), "Ny", Ny) - CALL attach(fidres, TRIM(str), "Ly", Ly) - CALL attach(fidres, TRIM(str), "Nz", Nz) - CALL attach(fidres, TRIM(str), "Nkx", Nkx) - CALL attach(fidres, TRIM(str), "Nky", Nky) - CALL attach(fidres, TRIM(str), "SG", SG) + INTEGER, INTENT(in) :: fid + CHARACTER(len=256) :: str + WRITE(str,'(a)') '/data/input/grid' + CALL creatd(fid, 0,(/0/),TRIM(str),'Grid Input') + CALL attach(fid, TRIM(str), "pmax", pmax) + CALL attach(fid, TRIM(str), "jmax", jmax) + CALL attach(fid, TRIM(str), "Nx", Nx) + CALL attach(fid, TRIM(str), "Lx", Lx) + CALL attach(fid, TRIM(str), "Nexc", Nexc) + CALL attach(fid, TRIM(str), "Ny", Ny) + CALL attach(fid, TRIM(str), "Ly", Ly) + CALL attach(fid, TRIM(str), "Nz", Nz) + CALL attach(fid, TRIM(str), "Nkx", Nkx) + CALL attach(fid, TRIM(str), "Nky", Nky) + CALL attach(fid, TRIM(str), "SG", SG) END SUBROUTINE grid_outputinputs - FUNCTION bare(p_,j_) - IMPLICIT NONE - INTEGER :: bare, p_, j_ - bare = (jmaxe+1)*p_ + j_ + 1 - END FUNCTION - - FUNCTION bari(p_,j_) + FUNCTION bar(p_,j_) IMPLICIT NONE - INTEGER :: bari, p_, j_ - bari = (jmaxi+1)*p_ + j_ + 1 + INTEGER :: bar, p_, j_ + bar = (jmax+1)*p_ + j_ + 1 END FUNCTION - SUBROUTINE decomp1D( n, numprocs, myid, s, e ) - INTEGER :: n, numprocs, myid, s, e + SUBROUTINE decomp1D( n, numprocs, myid, is, ie ) + INTEGER :: n, numprocs, myid, is, ie INTEGER :: nlocal INTEGER :: deficit nlocal = n / numprocs - s = myid * nlocal + 1 + is = myid * nlocal + 1 deficit = MOD(n,numprocs) - s = s + MIN(myid,deficit) + is = is + MIN(myid,deficit) IF (myid .LT. deficit) nlocal = nlocal + 1 - e = s + nlocal - 1 - IF (e .GT. n .OR. myid .EQ. numprocs-1) e = n + ie = is + nlocal - 1 + IF ((ie .GT. n) .OR. (myid .EQ. numprocs-1)) ie = n END SUBROUTINE decomp1D END MODULE grid diff --git a/src/inital.F90 b/src/inital.F90 index f7206588..11f6433f 100644 --- a/src/inital.F90 +++ b/src/inital.F90 @@ -3,15 +3,15 @@ !******************************************************************************! SUBROUTINE inital - USE basic, ONLY: my_id + USE basic, ONLY: speak USE initial_par, ONLY: INIT_OPT USE time_integration, ONLY: set_updatetlevel - USE collision, ONLY: load_COSOlver_mat, cosolver_coll + USE collision, ONLY: init_collision USE closure, ONLY: apply_closure_model USE ghosts, ONLY: update_ghosts_moments, update_ghosts_EM USE restarts, ONLY: load_moments, job2load USE processing, ONLY: compute_fluid_moments - USE model, ONLY: KIN_E, LINEARITY + USE model, ONLY: LINEARITY USE nonlinear, ONLY: compute_Sapj, nonlinear_init IMPLICIT NONE @@ -20,7 +20,7 @@ SUBROUTINE inital !!!!!! Set the moments arrays Nepj, Nipj and phi!!!!!! ! through loading a previous state IF ( job2load .GE. 0 ) THEN - IF (my_id .EQ. 0) WRITE(*,*) 'Load moments' + CALL speak('Load moments') CALL load_moments ! get N_0 CALL update_ghosts_moments CALL solve_EM_fields ! compute phi_0=phi(N_0) @@ -30,37 +30,37 @@ SUBROUTINE inital SELECT CASE (INIT_OPT) ! set phi with noise and set moments to 0 CASE ('phi') - IF (my_id .EQ. 0) WRITE(*,*) 'Init noisy phi' + CALL speak('Init noisy phi') CALL init_phi CALL update_ghosts_EM CASE ('phi_ppj') - IF (my_id .EQ. 0) WRITE(*,*) 'Init noisy phi' + CALL speak('Init noisy phi') CALL init_phi_ppj CALL update_ghosts_EM ! set moments_00 (GC density) with noise and compute phi afterwards CASE('mom00') - IF (my_id .EQ. 0) WRITE(*,*) 'Init noisy gyrocenter density' + CALL speak('Init noisy gyrocenter density') CALL init_gyrodens ! init only gyrocenter density CALL update_ghosts_moments CALL solve_EM_fields CALL update_ghosts_EM ! init all moments randomly (unadvised) CASE('allmom') - IF (my_id .EQ. 0) WRITE(*,*) 'Init noisy moments' + CALL speak('Init noisy moments') CALL init_moments ! init all moments CALL update_ghosts_moments CALL solve_EM_fields CALL update_ghosts_EM ! init a gaussian blob in gyrodens CASE('blob') - IF (my_id .EQ. 0) WRITE(*,*) '--init a blob' + CALL speak('--init a blob') CALL initialize_blob CALL update_ghosts_moments CALL solve_EM_fields CALL update_ghosts_EM ! init moments 00 with a power law similarly to GENE CASE('ppj') - IF (my_id .EQ. 0) WRITE(*,*) 'ppj init ~ GENE' + CALL speak('ppj init ~ GENE') call init_ppj CALL update_ghosts_moments CALL solve_EM_fields @@ -68,21 +68,20 @@ SUBROUTINE inital END SELECT ENDIF ! closure of j>J, p>P and j<0, p<0 moments - IF (my_id .EQ. 0) WRITE(*,*) 'Apply closure' + CALL speak('Apply closure') CALL apply_closure_model ! ghosts for p parallelization - IF (my_id .EQ. 0) WRITE(*,*) 'Ghosts communication' + CALL speak('Ghosts communication') CALL update_ghosts_moments CALL update_ghosts_EM !! End of phi and moments initialization - ! Load the COSOlver collision operator coefficients - IF(cosolver_coll) & - CALL load_COSOlver_mat + ! Init collision operator + CALL init_collision !! Preparing auxiliary arrays at initial state ! particle density, fluid velocity and temperature (used in diagnose) - IF (my_id .EQ. 0) WRITE(*,*) 'Computing fluid moments' + CALL speak('Computing fluid moments') CALL compute_fluid_moments ! init auxval for nonlinear @@ -97,86 +96,58 @@ END SUBROUTINE inital !!!!!!! Initialize all the moments randomly !******************************************************************************! SUBROUTINE init_moments - USE basic - USE grid + USE grid, ONLY: local_na, local_np, local_nj, total_nkx, local_nky, local_nz,& + ngp, ngj, ngz, iky0, contains_ky0, AA_x, AA_y USE initial_par,ONLY: iseed, init_noiselvl, init_background - USE fields, ONLY: moments_e, moments_i + USE fields, ONLY: moments USE prec_const, ONLY: dp USE utility, ONLY: checkfield - USE model, ONLY : LINEARITY, KIN_E + USE model, ONLY: LINEARITY + USE parallel, ONLY: my_id IMPLICIT NONE REAL(dp) :: noise INTEGER, DIMENSION(12) :: iseedarr + INTEGER :: ia,ip,ij,ikx,iky,iz ! Seed random number generator iseedarr(:)=iseed CALL RANDOM_SEED(PUT=iseedarr+my_id) !**** Broad noise initialization ******************************************* - ! Electron init - IF(KIN_E) THEN - DO ip=ips_e,ipe_e - DO ij=ijs_e,ije_e - DO ikx=ikxs,ikxe - DO iky=ikys,ikye - DO iz=izs,ize + DO ia=1,local_na + DO ip=1,local_np + ngp + DO ij=1,local_nj + ngj + DO ikx=1,total_nkx + DO iky=1,local_nky + DO iz=1,local_nz +ngz CALL RANDOM_NUMBER(noise) - moments_e(ip,ij,iky,ikx,iz,:) = (init_background + init_noiselvl*(noise-0.5_dp)) + moments(ia,ip,ij,iky,ikx,iz,:) = (init_background + init_noiselvl*(noise-0.5_dp)) END DO END DO END DO IF ( contains_ky0 ) THEN - DO iky=2,Nky/2 !symmetry at ky = 0 for all z - moments_e(ip,ij,iky_0,ikx,:,:) = moments_e( ip,ij,iky_0,Nkx+2-ikx,:, :) + DO ikx=2,total_nkx/2 !symmetry at ky = 0 for all z + moments(ia,ip,ij,iky0,ikx,:,:) = moments(ia,ip,ij,iky0,total_nkx+2-ikx,:,:) END DO ENDIF END DO END DO - ENDIF - ! Ion init - DO ip=ips_i,ipe_i - DO ij=ijs_i,ije_i - - DO ikx=ikxs,ikxe - DO iky=ikys,ikye - DO iz=izs,ize - CALL RANDOM_NUMBER(noise) - moments_i(ip,ij,iky,ikx,iz,:) = (init_background + init_noiselvl*(noise-0.5_dp)) - END DO - END DO - END DO - - IF ( contains_ky0 ) THEN - DO ikx=2,Nkx/2 !symmetry at ky = 0 for all z - moments_i( ip,ij,iky_0,ikx,:,:) = moments_i( ip,ij,iky_0,Nkx+2-ikx,:,:) - END DO - ENDIF - - END DO - END DO - ! Putting to zero modes that are not in the 2/3 Orszag rule IF (LINEARITY .NE. 'linear') THEN - DO ikx=ikxs,ikxe - DO iky=ikys,ikye - DO iz=izs,ize - IF(KIN_E) THEN - DO ip=ips_e,ipe_e - DO ij=ijs_e,ije_e - moments_e( ip,ij,iky,ikx,iz, :) = moments_e( ip,ij,iky,ikx,iz, :)*AA_x(ikx)*AA_y(iky) - ENDDO + DO ikx=1,total_nkx + DO iky=1,local_nky + DO iz=1,local_nz + ngz + DO ip=1,local_np + ngp + DO ij=1,local_nj + ngj + moments(ia,ip,ij,iky,ikx,iz, :) = moments(ia, ip,ij,iky,ikx,iz, :)*AA_x(ikx)*AA_y(iky) + ENDDO + ENDDO + ENDDO ENDDO - ENDIF - DO ip=ips_i,ipe_i - DO ij=ijs_i,ije_i - moments_i( ip,ij,iky,ikx,iz, :) = moments_i( ip,ij,iky,ikx,iz, :)*AA_x(ikx)*AA_y(iky) - ENDDO - ENDDO - ENDDO - ENDDO ENDDO ENDIF + ENDDO END SUBROUTINE init_moments !******************************************************************************! @@ -184,16 +155,18 @@ END SUBROUTINE init_moments !!!!!!! Initialize the gyrocenter density randomly !******************************************************************************! SUBROUTINE init_gyrodens - USE basic - USE grid - USE fields - USE prec_const - USE utility, ONLY: checkfield - USE initial_par - USE model, ONLY: KIN_E, LINEARITY + USE grid, ONLY: local_na, local_np, local_nj, total_nkx, local_nky, local_nz,& + ngp, ngj, ngz, iky0, parray, jarray, contains_ky0, AA_x, AA_y + USE fields, ONLY: moments + USE prec_const, ONLY: dp + USE utility, ONLY: checkfield + USE initial_par,ONLY: iseed, init_noiselvl, init_background + USE model, ONLY: LINEARITY + USE parallel, ONLY: my_id IMPLICIT NONE REAL(dp) :: noise + INTEGER :: ia,ip,ij,ikx,iky,iz INTEGER, DIMENSION(12) :: iseedarr ! Seed random number generator @@ -201,73 +174,43 @@ SUBROUTINE init_gyrodens CALL RANDOM_SEED(PUT=iseedarr+my_id) !**** Broad noise initialization ******************************************* - IF(KIN_E) THEN - DO ip=ips_e,ipe_e - DO ij=ijs_e,ije_e - DO ikx=ikxs,ikxe - DO iky=ikys,ikye - DO iz=izs,ize - CALL RANDOM_NUMBER(noise) - IF ( (ip .EQ. 1) .AND. (ij .EQ. 1) ) THEN - moments_e(ip,ij,iky,ikx,iz,:) = (init_background + init_noiselvl*(noise-0.5_dp)) - ELSE - moments_e(ip,ij,iky,ikx,iz,:) = 0._dp - ENDIF - END DO - END DO - END DO - IF ( contains_ky0 ) THEN - DO ikx=2,Nkx/2 !symmetry at ky = 0 for all z - moments_e(ip,ij,iky_0,ikx,:,:) = moments_e(ip,ij,iky_0,Nkx+2-ikx,:,:) - END DO - ENDIF - END DO - END DO - ENDIF - - DO ip=ips_i,ipe_i - DO ij=ijs_i,ije_i - DO ikx=ikxs,ikxe - DO iky=ikys,ikye - DO iz=izs,ize + DO ia=1,local_na + DO ip=1,local_np+ngp + DO ij=1,local_nj+ngj + DO ikx=1,total_nkx + DO iky=1,local_nky + DO iz=1,local_nz+ngz CALL RANDOM_NUMBER(noise) - IF ( (ip .EQ. 1) .AND. (ij .EQ. 1) ) THEN - moments_i(ip,ij,iky,ikx,iz,:) = (init_background + init_noiselvl*(noise-0.5_dp)) + IF ( (parray(ip) .EQ. 1) .AND. (jarray(ij) .EQ. 1) ) THEN + moments(ia,ip,ij,iky,ikx,iz,:) = (init_background + init_noiselvl*(noise-0.5_dp)) ELSE - moments_i(ip,ij,iky,ikx,iz,:) = 0._dp + moments(ia,ip,ij,iky,ikx,iz,:) = 0._dp ENDIF END DO END DO END DO IF ( contains_ky0 ) THEN - DO ikx=2,Nkx/2 !symmetry at ky = 0 for all z - moments_i( ip,ij,iky_0,ikx,:,:) = moments_i( ip,ij,iky_0,Nkx+2-ikx,:,:) + DO ikx=2,total_nkx/2 !symmetry at ky = 0 for all z + moments(ia, ip,ij,iky0,ikx,:,:) = moments(ia, ip,ij,iky0,total_nkx+2-ikx,:,:) END DO ENDIF END DO END DO - ! Putting to zero modes that are not in the 2/3 Orszag rule IF (LINEARITY .NE. 'linear') THEN - DO ikx=ikxs,ikxe - DO iky=ikys,ikye - DO iz=izs,ize - IF(KIN_E) THEN - DO ip=ips_e,ipe_e - DO ij=ijs_e,ije_e - moments_e( ip,ij,iky,ikx,iz, :) = moments_e( ip,ij,iky,ikx,iz, :)*AA_x(ikx)*AA_y(iky) - ENDDO - ENDDO - ENDIF - DO ip=ips_i,ipe_i - DO ij=ijs_i,ije_i - moments_i( ip,ij,iky,ikx,iz, :) = moments_i( ip,ij,iky,ikx,iz, :)*AA_x(ikx)*AA_y(iky) - ENDDO + DO ikx=1,total_nkx + DO iky=1,local_nky + DO iz=1,local_nz+ngz + DO ip=1,local_np+ngp + DO ij=1,local_nj+ngj + moments(ia, ip,ij,iky,ikx,iz, :) = moments(ia, ip,ij,iky,ikx,iz, :)*AA_x(ikx)*AA_y(iky) + ENDDO + ENDDO + ENDDO ENDDO ENDDO - ENDDO - ENDDO ENDIF + ENDDO END SUBROUTINE init_gyrodens !******************************************************************************! @@ -275,57 +218,53 @@ END SUBROUTINE init_gyrodens !!!!!!! Initialize a noisy ES potential and cancel the moments !******************************************************************************! SUBROUTINE init_phi - USE basic - USE grid - USE fields - USE prec_const - USE initial_par - USE model, ONLY: KIN_E, LINEARITY + USE grid, ONLY: total_nkx, local_nky, local_nz,& + ngz, iky0, ikx0, contains_ky0 + USE fields, ONLY: phi, moments + USE prec_const, ONLY: dp + USE initial_par,ONLY: iseed, init_noiselvl, init_background + USE model, ONLY: LINEARITY + USE parallel, ONLY: my_id IMPLICIT NONE - REAL(dp) :: noise INTEGER, DIMENSION(12) :: iseedarr - + INTEGER :: iky,ikx,iz ! Seed random number generator iseedarr(:)=iseed CALL RANDOM_SEED(PUT=iseedarr+my_id) - - !**** noise initialization ******************************************* - - DO ikx=ikxs,ikxe - DO iky=ikys,ikye + !**** noise initialization ******************************************* + DO ikx=1,total_nkx + DO iky=1,local_nky + DO iz=1,local_nz+ngz CALL RANDOM_NUMBER(noise) - DO iz=izs,ize - phi(iky,ikx,iz) = (init_background + init_noiselvl*(noise-0.5_dp))!*AA_x(ikx)*AA_y(iky) - ENDDO - END DO + phi(iky,ikx,iz) = (init_background + init_noiselvl*(noise-0.5_dp))!*AA_x(ikx)*AA_y(iky) + ENDDO END DO - - !symmetry at ky = 0 to keep real inverse transform - IF ( contains_ky0 ) THEN - DO ikx=2,Nkx/2 - phi(iky_0,ikx,izs:ize) = phi(iky_0,Nkx+2-ikx,izs:ize) - END DO - phi(iky_0,ikx_0,izs:ize) = REAL(phi(iky_0,ikx_0,izs:ize)) !origin must be real - ENDIF - - !**** ensure no previous moments initialization - IF(KIN_E) moments_e = 0._dp - moments_i = 0._dp - - !**** Zonal Flow initialization ******************************************* - ! put a mode at ikx = mode number + 1, symmetry is already included since kx>=0 - IF(INIT_ZF .GT. 0) THEN - IF (my_id .EQ. 0) WRITE(*,*) 'Init ZF phi' - IF( (INIT_ZF+1 .GT. ikxs) .AND. (INIT_ZF+1 .LT. ikxe) ) THEN - DO iz = izs,ize - phi(iky_0,INIT_ZF+1,iz) = ZF_AMP*(2._dp*PI)**2/deltakx/deltaky/2._dp * COS((iz-1)/Nz*2._dp*PI) - moments_i(1,1,iky_0,INIT_ZF+1,iz,:) = kxarray(INIT_ZF+1)**2*phi(iky_0,INIT_ZF+1,iz)* COS((iz-1)/Nz*2._dp*PI) - IF(KIN_E) moments_e(1,1,iky_0,INIT_ZF+1,iz,:) = 0._dp - ENDDO - ENDIF - ENDIF - + END DO + !symmetry at ky = 0 to keep real inverse transform + IF ( contains_ky0 ) THEN + DO iz=1,local_nz+ngz + DO ikx=2,total_nkx/2 + phi(iky0,ikx,iz) = phi(iky0,total_nkx+2-ikx,iz) + ENDDO + phi(iky0,ikx0,iz) = REAL(phi(iky0,ikx0,iz)) !origin must be real + END DO + ENDIF + !**** ensure no previous moments initialization + moments = 0._dp + !**** Zonal Flow initialization ******************************************* + ! put a mode at ikx = mode number + 1, symmetry is already included since kx>=0 + ! IF(INIT_ZF .GT. 0) THEN + ! IF (my_id .EQ. 0) WRITE(*,*) 'Init ZF phi' + ! IF( (INIT_ZF+1 .GT. ikxs) .AND. (INIT_ZF+1 .LT. ikxe) ) THEN + ! DO ia=1,local_na + ! DO iz = 1,local_nz+ngz + ! phi(iky0,INIT_ZF+1,iz) = ZF_AMP*(2._dp*PI)**2/deltakx/deltaky/2._dp * COS((iz-1)/Nz*2._dp*PI) + ! moments(ia,ip0,ij0,iky0,INIT_ZF+1,iz,:) = kxarray(INIT_ZF+1)**2*phi(iky0,INIT_ZF+1,iz)* COS((iz-1)/Nz*2._dp*PI) + ! ENDDO + ! ENDDO + ! ENDIF + ! ENDIF END SUBROUTINE init_phi !******************************************************************************! @@ -333,23 +272,24 @@ END SUBROUTINE init_phi !!!!!!! Initialize a ppj ES potential and cancel the moments !******************************************************************************! SUBROUTINE init_phi_ppj - USE basic - USE grid - USE fields - USE prec_const - USE initial_par - USE model, ONLY: KIN_E, LINEARITY - USE geometry, ONLY: Jacobian, iInt_Jacobian + USE grid, ONLY: total_nkx, local_nky, local_nz,& + ngz, iky0, ikx0, contains_ky0, ieven, kxarray, kyarray, zarray, deltakx + USE fields, ONLY: phi, moments + USE prec_const, ONLY: dp + USE initial_par,ONLY: iseed, init_noiselvl, init_background + USE model, ONLY: LINEARITY + USE geometry, ONLY: Jacobian, iInt_Jacobian IMPLICIT NONE REAL(dp) :: kx, ky, z, amp + INTEGER :: ikx, iky, iz amp = 1.0_dp !**** ppj initialization ******************************************* - DO ikx=ikxs,ikxe + DO ikx=1,total_nkx kx = kxarray(ikx) - DO iky=ikys,ikye + DO iky=1,local_nky ky = kyarray(iky) - DO iz=izs,ize - z = zarray(iz,0) + DO iz=1,local_nz+ngz + z = zarray(iz,ieven) IF (ky .NE. 0) THEN phi(iky,ikx,iz) = 0._dp ELSE @@ -357,23 +297,21 @@ SUBROUTINE init_phi_ppj ENDIF ! z-dep and noise phi(iky,ikx,iz) = phi(iky,ikx,iz) * & - (Jacobian(iz,0)*iInt_Jacobian)**2 + (Jacobian(iz,ieven)*iInt_Jacobian)**2 END DO END DO END DO - !symmetry at ky = 0 to keep real inverse transform IF ( contains_ky0 ) THEN - DO ikx=2,Nkx/2 - phi(iky_0,ikx,izs:ize) = phi(iky_0,Nkx+2-ikx,izs:ize) + DO iz=1,local_nz+ngz + DO ikx=2,total_nkx/2 + phi(iky0,ikx,iz) = phi(iky0,total_nkx+2-ikx,iz) + ENDDO + phi(iky0,ikx0,iz) = REAL(phi(iky0,ikx0,iz)) !origin must be real END DO - phi(iky_0,ikx_0,izs:ize) = REAL(phi(iky_0,ikx_0,izs:ize)) !origin must be real ENDIF - !**** ensure no previous moments initialization - IF(KIN_E) moments_e = 0._dp - moments_i = 0._dp - + moments = 0._dp END SUBROUTINE init_phi_ppj !******************************************************************************! @@ -382,184 +320,117 @@ END SUBROUTINE init_phi_ppj !!!!!!! Initialize an ionic Gaussian blob on top of the preexisting modes !******************************************************************************! SUBROUTINE initialize_blob - USE fields - USE grid - USE geometry, ONLY: shear, Jacobian, iInt_Jacobian - USE model, ONLY: KIN_E, LINEARITY + USE grid, ONLY: local_na, local_np, local_nj, total_nkx, local_nky, local_nz,& + AA_x, AA_y,& + ngp,ngj,ngz, iky0, ieven, kxarray, kyarray, zarray + USE fields, ONLY: moments + USE prec_const, ONLY: dp + USE initial_par,ONLY: iseed, init_noiselvl, init_background + USE model, ONLY: LINEARITY + USE geometry, ONLY: Jacobian, iInt_Jacobian, shear IMPLICIT NONE REAL(dp) ::kx, ky, z, sigma_x, sigma_y, gain + INTEGER :: ia,iky,ikx,iz,ip,ij sigma_y = 1.0 sigma_x = sigma_y gain = 10.0 - - - DO iky=ikys,ikye - ky = kyarray(iky) - DO iz=izs,ize - z = zarray(iz,0) - DO ikx=ikxs,ikxe - kx = kxarray(ikx) + z*shear*ky - DO ip=ips_i,ipe_i - DO ij=ijs_i,ije_i - IF( (iky .NE. iky_0) .AND. (ip .EQ. 1) .AND. (ij .EQ. 1)) THEN - moments_i( ip,ij,iky,ikx,iz, :) = moments_i( ip,ij,iky,ikx,iz, :) & - + gain * exp(-((kx/sigma_x)**2+(ky/sigma_y)**2)) & - * AA_x(ikx)*AA_y(iky)* & - (Jacobian(iz,0)*iInt_Jacobian)**2!& - ! * exp(sigmai2_taui_o2*(kx**2+ky**2)) - ENDIF - ENDDO - ENDDO - IF(KIN_E) THEN - DO ip=ips_e,ipe_e - DO ij=ijs_e,ije_e - IF( (iky .NE. iky_0) .AND. (ip .EQ. 1) .AND. (ij .EQ. 1)) THEN - moments_e( ip,ij,iky,ikx,iz, :) = moments_e( ip,ij,iky,ikx,iz, :) & - + gain * exp(-((kx/sigma_x)**2+(ky/sigma_y)**2)) & - * AA_x(ikx)*AA_y(iky)* & - (Jacobian(iz,0)*iInt_Jacobian)**2!& - ! * exp(sigmai2_taui_o2*(kx**2+ky**2)) - ENDIF - ENDDO + DO ia=1,local_na + DO iky=1,local_nky + ky = kyarray(iky) + DO iz=1,local_nz+ngz + z = zarray(iz,ieven) + DO ikx=1,total_nkx + kx = kxarray(ikx) + z*shear*ky + DO ip=1,local_np+ngp + DO ij=1,local_nj+ngj + IF( (iky .NE. iky0) .AND. (ip .EQ. 1) .AND. (ij .EQ. 1)) THEN + moments(ia,ip,ij,iky,ikx,iz, :) = moments(ia,ip,ij,iky,ikx,iz, :) & + + gain * exp(-((kx/sigma_x)**2+(ky/sigma_y)**2)) & + * AA_x(ikx)*AA_y(iky)* & + (Jacobian(iz,ieven)*iInt_Jacobian)**2!& + ! * exp(sigmai2_taui_o2*(kx**2+ky**2)) + ENDIF + ENDDO + ENDDO + ENDDO + ENDDO ENDDO - ENDIF - ENDDO - ENDDO ENDDO END SUBROUTINE initialize_blob !******************************************************************************! - - !******************************************************************************! !!!!!!! Initialize the gyrocenter in a ppj gene manner (power law) !******************************************************************************! SUBROUTINE init_ppj - USE basic - USE grid - USE fields, ONLY: moments_e, moments_i - USE array, ONLY: kernel_e, kernel_i - USE prec_const - USE utility, ONLY: checkfield - USE initial_par - USE model, ONLY: KIN_E, LINEARITY - USE geometry, ONLY: Jacobian, iInt_Jacobian - + USE grid, ONLY: local_na, local_np, local_nj, total_nkx, local_nky, local_nz,& + AA_x, AA_y, deltakx, deltaky,contains_ky0,& + ngp,ngj,ngz, iky0, ieven, kxarray, kyarray, zarray + USE fields, ONLY: moments + USE prec_const, ONLY: dp, pi + USE initial_par,ONLY: iseed, init_noiselvl, init_background + USE model, ONLY: LINEARITY + USE geometry, ONLY: Jacobian, iInt_Jacobian, shear IMPLICIT NONE - REAL(dp) :: kx, ky, sigma_z, amp, z - + INTEGER :: ia,iky,ikx,iz,ip,ij sigma_z = pi/4._dp amp = 1.0_dp - !**** Broad noise initialization ******************************************* - ! Electrons - IF (KIN_E) THEN - DO ip=ips_e,ipe_e - DO ij=ijs_e,ije_e - IF ( (ip .EQ. 1) .AND. (ij .EQ. 1) ) THEN - DO ikx=ikxs,ikxe - kx = kxarray(ikx) - DO iky=ikys,ikye - ky = kyarray(iky) - DO iz=izs,ize - z = zarray(iz,0) - IF (kx .EQ. 0) THEN - IF(ky .EQ. 0) THEN - moments_e(ip,ij,iky,ikx,iz,:) = 0._dp - ELSE - moments_e(ip,ij,iky,ikx,iz,:) = 0.5_dp * ky_min/(ABS(ky)+ky_min) - ENDIF - ELSE - IF(ky .GT. 0) THEN - moments_e(ip,ij,iky,ikx,iz,:) = (deltakx/(ABS(kx)+deltakx))*(ky_min/(ABS(ky)+ky_min)) + DO ia=1,local_na + DO ip=1,local_np+ngp + DO ij=1,local_nj+ngj + IF ( (ip .EQ. 1) .AND. (ij .EQ. 1) ) THEN + DO ikx=1,total_nkx + kx = kxarray(ikx) + DO iky=1,local_nky + ky = kyarray(iky) + DO iz=1,local_nz+ngz + z = zarray(iz,ieven) + IF (kx .EQ. 0) THEN + IF(ky .EQ. 0) THEN + moments(ia,ip,ij,iky,ikx,iz,:) = 0._dp + ELSE + moments(ia,ip,ij,iky,ikx,iz,:) = 0.5_dp * deltaky/(ABS(ky)+deltaky) + ENDIF ELSE - moments_e(ip,ij,iky,ikx,iz,:) = 0.5_dp*amp*(deltakx/(ABS(kx)+deltakx)) + IF(ky .GT. 0) THEN + moments(ia,ip,ij,iky,ikx,iz,:) = (deltakx/(ABS(kx)+deltakx))*(deltaky/(ABS(ky)+deltaky)) + ELSE + moments(ia,ip,ij,iky,ikx,iz,:) = 0.5_dp*amp*(deltakx/(ABS(kx)+deltakx)) + ENDIF ENDIF - ENDIF - ! z-dep and noise - moments_e(ip,ij,iky,ikx,iz,:) = moments_e(ip,ij,iky,ikx,iz,:) * & - (Jacobian(iz,0)*iInt_Jacobian)**2 - - ! divide by kernel_0 to adjust to particle density (n = kernel_0 N00) - ! moments_e(ip,ij,iky,ikx,iz,:) = moments_e(ip,ij,iky,ikx,iz,:)/kernel_e(ij,iky,ikx,iz,0) + ! z-dep and noise + moments(ia,ip,ij,iky,ikx,iz,:) = moments(ia,ip,ij,iky,ikx,iz,:) * & + (Jacobian(iz,ieven)*iInt_Jacobian)**2 + ! divide by kernel_0 to adjust to particle density (n = kernel_0 N00) + ! moments(ia,ip,ij,iky,ikx,iz,:) = moments(ia,ip,ij,iky,ikx,iz,:)/kernel(ia,ij,iky,ikx,iz,0) + END DO END DO END DO - END DO - - IF ( contains_ky0 ) THEN - DO ikx=2,Nkx/2 !symmetry at kx = 0 for all z - moments_e(ip,ij,iky_0,ikx,:,:) = moments_e( ip,ij,iky_0,Nkx+2-ikx,:, :) - END DO - ENDIF - ELSE - moments_e(ip,ij,:,:,:,:) = 0._dp - ENDIF - END DO - END DO - ENDIF - - ! Ions - DO ip=ips_i,ipe_i - DO ij=ijs_i,ije_i - IF ( (ip .EQ. 1) .AND. (ij .EQ. 1) ) THEN - DO ikx=ikxs,ikxe - kx = kxarray(ikx) - DO iky=ikys,ikye - ky = kyarray(iky) - DO iz=izs,ize - z = zarray(iz,0) - IF (kx .EQ. 0) THEN - IF(ky .EQ. 0) THEN - moments_i(ip,ij,iky,ikx,iz,:) = 0._dp - ELSE - moments_i(ip,ij,iky,ikx,iz,:) = 0.5_dp * ky_min/(ABS(ky)+ky_min) - ENDIF - ELSE - IF(ky .GT. 0) THEN - moments_i(ip,ij,iky,ikx,iz,:) = (deltakx/(ABS(kx)+deltakx))*(ky_min/(ABS(ky)+ky_min)) - ELSE - moments_i(ip,ij,iky,ikx,iz,:) = 0.5_dp*amp*(deltakx/(ABS(kx)+deltakx)) - ENDIF - ENDIF - ! z-dep and noise - moments_i(ip,ij,iky,ikx,iz,:) = moments_i(ip,ij,iky,ikx,iz,:) * & - (Jacobian(iz,0)*iInt_Jacobian)**2 - ! divide by kernel_0 to adjust to particle density (n = kernel_0 N00) - ! moments_i(ip,ij,iky,ikx,iz,:) = moments_i(ip,ij,iky,ikx,iz,:)/kernel_i(ij,iky,ikx,iz,0) + IF ( contains_ky0 ) THEN + DO ikx=2,total_nkx/2 !symmetry at kx = 0 for all z + moments(ia,ip,ij,iky0,ikx,:,:) = moments(ia, ip,ij,iky0,total_nkx+2-ikx,:, :) END DO - END DO - END DO - - IF ( contains_ky0 ) THEN - DO ikx=2,Nkx/2 !symmetry at kx = 0 for all z - moments_i(ip,ij,iky_0,ikx,:,:) = moments_i( ip,ij,iky_0,Nkx+2-ikx,:, :) - END DO - ENDIF + ENDIF ELSE - moments_i(ip,ij,:,:,:,:) = 0._dp + moments(ia,ip,ij,:,:,:,:) = 0._dp ENDIF END DO END DO - ! Putting to zero modes that are not in the 2/3 Orszag rule IF (LINEARITY .NE. 'linear') THEN - DO ikx=ikxs,ikxe - DO iky=ikys,ikye - DO iz=izs,ize - DO ip=ips_e,ipe_e - DO ij=ijs_e,ije_e - moments_e( ip,ij,iky,ikx,iz, :) = moments_e( ip,ij,iky,ikx,iz, :)*AA_x(ikx)*AA_y(iky) - ENDDO - ENDDO - DO ip=ips_i,ipe_i - DO ij=ijs_i,ije_i - moments_i( ip,ij,iky,ikx,iz, :) = moments_i( ip,ij,iky,ikx,iz, :)*AA_x(ikx)*AA_y(iky) + DO ikx=1,total_nkx + DO iky=1,local_nky + DO iz=1,local_nz+ngz + DO ip=1,local_np+ngp + DO ij=1,local_nj+ngj + moments(ia, ip,ij,iky,ikx,iz, :) = moments(ia, ip,ij,iky,ikx,iz, :)*AA_x(ikx)*AA_y(iky) ENDDO ENDDO ENDDO ENDDO ENDDO ENDIF - + ENDDO END SUBROUTINE init_ppj !******************************************************************************! diff --git a/src/initial_par_mod.F90 b/src/initial_par_mod.F90 index a9d9714f..0b95194f 100644 --- a/src/initial_par_mod.F90 +++ b/src/initial_par_mod.F90 @@ -43,23 +43,18 @@ CONTAINS END SUBROUTINE initial_readinputs - SUBROUTINE initial_outputinputs(fidres, str) + SUBROUTINE initial_outputinputs(fid) ! Write the input parameters to the results_xx.h5 file - - USE futils, ONLY: attach - USE prec_const + USE futils, ONLY: attach, creatd IMPLICIT NONE - INTEGER, INTENT(in) :: fidres - CHARACTER(len=256), INTENT(in) :: str - - CALL attach(fidres, TRIM(str), "INIT_OPT", INIT_OPT) - - CALL attach(fidres, TRIM(str), "init_background", init_background) - - CALL attach(fidres, TRIM(str), "init_noiselvl", init_noiselvl) - - CALL attach(fidres, TRIM(str), "iseed", iseed) - + INTEGER, INTENT(in) :: fid + CHARACTER(len=256) :: str + WRITE(str,'(a)') '/data/input/intial' + CALL creatd(fid, 0,(/0/),TRIM(str),'Initial Input') + CALL attach(fid, TRIM(str), "INIT_OPT", INIT_OPT) + CALL attach(fid, TRIM(str), "init_background", init_background) + CALL attach(fid, TRIM(str), "init_noiselvl", init_noiselvl) + CALL attach(fid, TRIM(str), "iseed", iseed) END SUBROUTINE initial_outputinputs END MODULE initial_par diff --git a/src/memory.F90 b/src/memory.F90 index 4e737e77..4263680f 100644 --- a/src/memory.F90 +++ b/src/memory.F90 @@ -4,138 +4,77 @@ SUBROUTINE memory USE array USE basic, ONLY: allocate_array USE fields - USE grid - USE time_integration - USE model, ONLY: LINEARITY, KIN_E + USE grid, ONLY: local_Na, local_Np,Ngp ,local_Nj,Ngj, local_nky, local_nkx,local_Nz,Ngz, jmax, pmax USE collision - + USE time_integration, ONLY: ntimelevel USE prec_const + USE model, ONLY: Na IMPLICIT NONE ! Electrostatic potential - CALL allocate_array( phi, ikys,ikye, ikxs,ikxe, izgs,izge) - CALL allocate_array( psi, ikys,ikye, ikxs,ikxe, izgs,izge) - CALL allocate_array( phi_ZF, ikxs,ikxe, izs,ize) - CALL allocate_array( phi_NZ, ikys,ikye, izs,ize) - CALL allocate_array(inv_poisson_op, ikys,ikye, ikxs,ikxe, izs,ize) - CALL allocate_array( inv_ampere_op, ikys,ikye, ikxs,ikxe, izs,ize) - CALL allocate_array( inv_pol_ion, ikys,ikye, ikxs,ikxe, izs,ize) - CALL allocate_array(HF_phi_correction_operator, ikys,ikye, ikxs,ikxe, izs,ize) - - !Electrons arrays - IF(KIN_E) THEN - CALL allocate_array( Ne00, ikys,ikye, ikxs,ikxe, izs,ize) - CALL allocate_array( dens_e, ikys,ikye, ikxs,ikxe, izs,ize) - CALL allocate_array( upar_e, ikys,ikye, ikxs,ikxe, izs,ize) - CALL allocate_array( uper_e, ikys,ikye, ikxs,ikxe, izs,ize) - CALL allocate_array( Tpar_e, ikys,ikye, ikxs,ikxe, izs,ize) - CALL allocate_array( Tper_e, ikys,ikye, ikxs,ikxe, izs,ize) - CALL allocate_array( temp_e, ikys,ikye, ikxs,ikxe, izs,ize) - CALL allocate_array( Kernel_e, ijgs_e,ijge_e, ikys,ikye, ikxs,ikxe, izgs,izge, 0,1) - CALL allocate_array( moments_e, ipgs_e,ipge_e, ijgs_e,ijge_e, ikys,ikye, ikxs,ikxe, izgs,izge, 1,ntimelevel ) - CALL allocate_array( Nepjz, ips_e,ipe_e, ijs_e,ije_e, izs,ize) - CALL allocate_array( moments_rhs_e, ips_e,ipe_e, ijs_e,ije_e, ikys,ikye, ikxs,ikxe, izs,ize, 1,ntimelevel ) - CALL allocate_array( nadiab_moments_e, ipgs_e,ipge_e, ijgs_e,ijge_e, ikys,ikye, ikxs,ikxe, izgs,izge) - CALL allocate_array( ddz_nepj, ipgs_e,ipge_e, ijgs_e,ijge_e, ikys,ikye, ikxs,ikxe, izgs,izge) - CALL allocate_array( ddzND_Nepj, ipgs_e,ipge_e, ijgs_e,ijge_e, ikys,ikye, ikxs,ikxe, izgs,izge) - CALL allocate_array( interp_nepj, ipgs_e,ipge_e, ijgs_e,ijge_e, ikys,ikye, ikxs,ikxe, izgs,izge) - CALL allocate_array( moments_e_ZF, ipgs_e,ipge_e, ijgs_e,ijge_e, ikxs,ikxe, izs,ize) - CALL allocate_array( moments_e_NZ, ipgs_e,ipge_e, ijgs_e,ijge_e, ikys,ikye, izs,ize) - CALL allocate_array( TColl_e, ips_e,ipe_e, ijs_e,ije_e , ikys,ikye, ikxs,ikxe, izs,ize) - CALL allocate_array( Sepj, ips_e,ipe_e, ijs_e,ije_e, ikys,ikye, ikxs,ikxe, izs,ize) - CALL allocate_array( xnepj, ips_e,ipe_e, ijs_e,ije_e) - CALL allocate_array( xnepp2j, ips_e,ipe_e) - CALL allocate_array( xnepp1j, ips_e,ipe_e) - CALL allocate_array( xnepm1j, ips_e,ipe_e) - CALL allocate_array( xnepm2j, ips_e,ipe_e) - CALL allocate_array( xnepjp1, ijs_e,ije_e) - CALL allocate_array( xnepjm1, ijs_e,ije_e) - CALL allocate_array( ynepp1j, ips_e,ipe_e, ijs_e,ije_e) - CALL allocate_array( ynepm1j, ips_e,ipe_e, ijs_e,ije_e) - CALL allocate_array( ynepp1jm1, ips_e,ipe_e, ijs_e,ije_e) - CALL allocate_array( ynepm1jm1, ips_e,ipe_e, ijs_e,ije_e) - CALL allocate_array( zNepm1j, ips_e,ipe_e, ijs_e,ije_e) - CALL allocate_array( zNepm1jp1, ips_e,ipe_e, ijs_e,ije_e) - CALL allocate_array( zNepm1jm1, ips_e,ipe_e, ijs_e,ije_e) - ENDIF + CALL allocate_array( phi, 1,local_nky, 1,local_nkx, 1,local_Nz+Ngz) + CALL allocate_array( psi, 1,local_nky, 1,local_nkx, 1,local_Nz+Ngz) + CALL allocate_array(inv_poisson_op, 1,local_nky, 1,local_nkx, 1,local_Nz) + CALL allocate_array( inv_ampere_op, 1,local_nky, 1,local_nkx, 1,local_Nz) + CALL allocate_array( inv_pol_ion, 1,local_nky, 1,local_nkx, 1,local_Nz) + CALL allocate_array(HF_phi_correction_operator, 1,local_nky, 1,local_nkx, 1,local_Nz) - !Ions arrays - CALL allocate_array( Ni00, ikys,ikye, ikxs,ikxe, izs,ize) - CALL allocate_array( dens_i, ikys,ikye, ikxs,ikxe, izs,ize) - CALL allocate_array( upar_i, ikys,ikye, ikxs,ikxe, izs,ize) - CALL allocate_array( uper_i, ikys,ikye, ikxs,ikxe, izs,ize) - CALL allocate_array( Tpar_i, ikys,ikye, ikxs,ikxe, izs,ize) - CALL allocate_array( Tper_i, ikys,ikye, ikxs,ikxe, izs,ize) - CALL allocate_array( temp_i, ikys,ikye, ikxs,ikxe, izs,ize) - CALL allocate_array( Kernel_i, ijgs_i,ijge_i, ikys,ikye, ikxs,ikxe, izgs,izge, 0,1) - CALL allocate_array( moments_i, ipgs_i,ipge_i, ijgs_i,ijge_i, ikys,ikye, ikxs,ikxe, izgs,izge, 1,ntimelevel ) - CALL allocate_array( Nipjz, ips_i,ipe_i, ijs_i,ije_i, izs,ize) - CALL allocate_array( moments_rhs_i, ips_i,ipe_i, ijs_i,ije_i, ikys,ikye, ikxs,ikxe, izs,ize, 1,ntimelevel ) - CALL allocate_array( nadiab_moments_i, ipgs_i,ipge_i, ijgs_i,ijge_i, ikys,ikye, ikxs,ikxe, izgs,izge) - CALL allocate_array( ddz_nipj, ipgs_i,ipge_i, ijgs_i,ijge_i, ikys,ikye, ikxs,ikxe, izgs,izge) - CALL allocate_array( ddzND_Nipj, ipgs_i,ipge_i, ijgs_i,ijge_i, ikys,ikye, ikxs,ikxe, izgs,izge) - CALL allocate_array( interp_nipj, ipgs_i,ipge_i, ijgs_i,ijge_i, ikys,ikye, ikxs,ikxe, izgs,izge) - CALL allocate_array( moments_i_ZF, ipgs_i,ipge_i, ijgs_i,ijge_i, ikxs,ikxe, izs,ize) - CALL allocate_array( moments_i_NZ, ipgs_i,ipge_i, ijgs_i,ijge_i, ikys,ikye, izs,ize) - CALL allocate_array( TColl_i, ips_i,ipe_i, ijs_i,ije_i, ikys,ikye, ikxs,ikxe, izs,ize) - CALL allocate_array( Sipj, ips_i,ipe_i, ijs_i,ije_i, ikys,ikye, ikxs,ikxe, izs,ize) - CALL allocate_array( xnipj, ips_i,ipe_i, ijs_i,ije_i) - CALL allocate_array( xnipp2j, ips_i,ipe_i) - CALL allocate_array( xnipp1j, ips_i,ipe_i) - CALL allocate_array( xnipm1j, ips_i,ipe_i) - CALL allocate_array( xnipm2j, ips_i,ipe_i) - CALL allocate_array( xnipjp1, ijs_i,ije_i) - CALL allocate_array( xnipjm1, ijs_i,ije_i) - CALL allocate_array( ynipp1j, ips_i,ipe_i, ijs_i,ije_i) - CALL allocate_array( ynipm1j, ips_i,ipe_i, ijs_i,ije_i) - CALL allocate_array( ynipp1jm1, ips_i,ipe_i, ijs_i,ije_i) - CALL allocate_array( ynipm1jm1, ips_i,ipe_i, ijs_i,ije_i) - CALL allocate_array( zNipm1j, ips_i,ipe_i, ijs_i,ije_i) - CALL allocate_array( zNipm1jp1, ips_i,ipe_i, ijs_i,ije_i) - CALL allocate_array( zNipm1jm1, ips_i,ipe_i, ijs_i,ije_i) + !Moments related arrays + CALL allocate_array( Na00, 1,local_Na, 1,local_nky, 1,local_nkx, 1,local_Nz) + CALL allocate_array( dens, 1,local_Na, 1,local_nky, 1,local_nkx, 1,local_Nz) + CALL allocate_array( upar, 1,local_Na, 1,local_nky, 1,local_nkx, 1,local_Nz) + CALL allocate_array( uper, 1,local_Na, 1,local_nky, 1,local_nkx, 1,local_Nz) + CALL allocate_array( Tpar, 1,local_Na, 1,local_nky, 1,local_nkx, 1,local_Nz) + CALL allocate_array( Tper, 1,local_Na, 1,local_nky, 1,local_nkx, 1,local_Nz) + CALL allocate_array( temp, 1,local_Na, 1,local_nky, 1,local_nkx, 1,local_Nz) + CALL allocate_array( Kernel, 1,local_Na, 1,local_Nj+Ngj, 1,local_nky, 1,local_nkx, 1,local_Nz+Ngz, 1,2) + CALL allocate_array( moments, 1,local_Na, 1,local_Np+Ngp, 1,local_Nj+Ngj, 1,local_nky, 1,local_nkx, 1,local_Nz+Ngz, 1,ntimelevel ) + CALL allocate_array( Napjz, 1,local_Na, 1,local_Np, 1,local_Nj, 1,local_Nz) + CALL allocate_array( moments_rhs, 1,local_Na, 1,local_Np, 1,local_Nj, 1,local_nky, 1,local_nkx, 1,local_Nz, 1,ntimelevel ) + CALL allocate_array( nadiab_moments, 1,local_Na, 1,local_Np+Ngp, 1,local_Nj+Ngj, 1,local_nky, 1,local_nkx, 1,local_Nz+Ngz) + CALL allocate_array( ddz_napj, 1,local_Na, 1,local_Np+Ngp, 1,local_Nj+Ngj, 1,local_nky, 1,local_nkx, 1,local_Nz+Ngz) + CALL allocate_array( ddzND_Napj, 1,local_Na, 1,local_Np+Ngp, 1,local_Nj+Ngj, 1,local_nky, 1,local_nkx, 1,local_Nz+Ngz) + CALL allocate_array( interp_napj, 1,local_Na, 1,local_Np+Ngp, 1,local_Nj+Ngj, 1,local_nky, 1,local_nkx, 1,local_Nz+Ngz) + CALL allocate_array( Capj, 1,local_Na, 1,local_Np, 1,local_Nj, 1,local_nky, 1,local_nkx, 1,local_Nz) + CALL allocate_array( Sapj, 1,local_Na, 1,local_Np, 1,local_Nj, 1,local_nky, 1,local_nkx, 1,local_Nz) + CALL allocate_array( xnapj, 1,local_Na, 1,local_Np, 1,local_Nj) + CALL allocate_array( xnapp2j, 1,local_Na, 1,local_Np) + CALL allocate_array( xnapp1j, 1,local_Na, 1,local_Np) + CALL allocate_array( xnapm1j, 1,local_Na, 1,local_Np) + CALL allocate_array( xnapm2j, 1,local_Na, 1,local_Np) + CALL allocate_array( xnapjp1, 1,local_Na, 1,local_Nj) + CALL allocate_array( xnapjm1, 1,local_Na, 1,local_Nj) + CALL allocate_array( ynapp1j, 1,local_Na, 1,local_Np, 1,local_Nj) + CALL allocate_array( ynapm1j, 1,local_Na, 1,local_Np, 1,local_Nj) + CALL allocate_array( ynapp1jm1, 1,local_Na, 1,local_Np, 1,local_Nj) + CALL allocate_array( ynapm1jm1, 1,local_Na, 1,local_Np, 1,local_Nj) + CALL allocate_array( zNapm1j, 1,local_Na, 1,local_Np, 1,local_Nj) + CALL allocate_array( zNapm1jp1, 1,local_Na, 1,local_Np, 1,local_Nj) + CALL allocate_array( zNapm1jm1, 1,local_Na, 1,local_Np, 1,local_Nj) ! Non linear terms and dnjs table - CALL allocate_array( dnjs, 1,maxj+1, 1,maxj+1, 1,maxj+1) + CALL allocate_array( dnjs, 1,jmax+1, 1,jmax+1, 1,jmax+1) ! Hermite fourth derivative coeff table 4*sqrt(p!/(p-4)!) - CALL allocate_array( dv4_Hp_coeff, -2, MAX(pmaxe,pmaxi)) + CALL allocate_array( dv4_Hp_coeff, -2, pmax) - ! elect. pot. linear terms - IF (KIN_E) THEN - CALL allocate_array( xphij_e, ips_e,ipe_e, ijs_e,ije_e) - CALL allocate_array( xphijp1_e, ips_e,ipe_e, ijs_e,ije_e) - CALL allocate_array( xphijm1_e, ips_e,ipe_e, ijs_e,ije_e) - CALL allocate_array( xpsij_e, ips_e,ipe_e, ijs_e,ije_e) - CALL allocate_array( xpsijp1_e, ips_e,ipe_e, ijs_e,ije_e) - CALL allocate_array( xpsijm1_e, ips_e,ipe_e, ijs_e,ije_e) - ENDIF - CALL allocate_array( xphij_i, ips_i,ipe_i, ijs_i,ije_i) - CALL allocate_array( xphijp1_i, ips_i,ipe_i, ijs_i,ije_i) - CALL allocate_array( xphijm1_i, ips_i,ipe_i, ijs_i,ije_i) - CALL allocate_array( xpsij_i, ips_i,ipe_i, ijs_i,ije_i) - CALL allocate_array( xpsijp1_i, ips_i,ipe_i, ijs_i,ije_i) - CALL allocate_array( xpsijm1_i, ips_i,ipe_i, ijs_i,ije_i) + CALL allocate_array( xphij, 1,local_Na, 1,local_Np, 1,local_Nj) + CALL allocate_array( xphijp1, 1,local_Na, 1,local_Np, 1,local_Nj) + CALL allocate_array( xphijm1, 1,local_Na, 1,local_Np, 1,local_Nj) + CALL allocate_array( xpsij, 1,local_Na, 1,local_Np, 1,local_Nj) + CALL allocate_array( xpsijp1, 1,local_Na, 1,local_Np, 1,local_Nj) + CALL allocate_array( xpsijm1, 1,local_Na, 1,local_Np, 1,local_Nj) !___________________ 2x5D ARRAYS __________________________ !! Collision matrices - IF (gyrokin_CO) THEN !GK collision matrices (one for each kperp) - IF (KIN_E) THEN - CALL allocate_array( Ceepj, 1,(pmaxe+1)*(jmaxe+1), 1,(pmaxe+1)*(jmaxe+1), ikys,ikye, ikxs,ikxe, izs,ize) - CALL allocate_array( CeipjT, 1,(pmaxe+1)*(jmaxe+1), 1,(pmaxe+1)*(jmaxe+1), ikys,ikye, ikxs,ikxe, izs,ize) - CALL allocate_array( CeipjF, 1,(pmaxe+1)*(jmaxe+1), 1,(pmaxi+1)*(jmaxi+1), ikys,ikye, ikxs,ikxe, izs,ize) - CALL allocate_array( CiepjT, 1,(pmaxi+1)*(jmaxi+1), 1,(pmaxi+1)*(jmaxi+1), ikys,ikye, ikxs,ikxe, izs,ize) - CALL allocate_array( CiepjF, 1,(pmaxi+1)*(jmaxi+1), 1,(pmaxe+1)*(jmaxe+1), ikys,ikye, ikxs,ikxe, izs,ize) - ENDIF - CALL allocate_array( Ciipj, 1,(pmaxi+1)*(jmaxi+1), 1,(pmaxi+1)*(jmaxi+1), ikys,ikye, ikxs,ikxe, izs,ize) + IF (GK_CO) THEN !GK collision matrices (one for each kperp) + CALL allocate_array( Cab_F, 1,Na, 1,Na, 1,(pmax+1)*(jmax+1), 1,(pmax+1)*(jmax+1), 1,local_nky, 1,local_nkx, 1,local_Nz) + CALL allocate_array( Cab_T, 1,Na, 1,Na, 1,(pmax+1)*(jmax+1), 1,(pmax+1)*(jmax+1), 1,local_nky, 1,local_nkx, 1,local_Nz) + CALL allocate_array( Caa, 1,Na, 1,(pmax+1)*(jmax+1), 1,(pmax+1)*(jmax+1), 1,local_nky, 1,local_nkx, 1,local_Nz) ELSE !DK collision matrix (same for every k) - IF (KIN_E) THEN - CALL allocate_array( Ceepj, 1,(pmaxe+1)*(jmaxe+1), 1,(pmaxe+1)*(jmaxe+1), 1,1, 1,1, 1,1) - CALL allocate_array( CeipjT, 1,(pmaxe+1)*(jmaxe+1), 1,(pmaxe+1)*(jmaxe+1), 1,1, 1,1, 1,1) - CALL allocate_array( CeipjF, 1,(pmaxe+1)*(jmaxe+1), 1,(pmaxi+1)*(jmaxi+1), 1,1, 1,1, 1,1) - CALL allocate_array( CiepjT, 1,(pmaxi+1)*(jmaxi+1), 1,(pmaxi+1)*(jmaxi+1), 1,1, 1,1, 1,1) - CALL allocate_array( CiepjF, 1,(pmaxi+1)*(jmaxi+1), 1,(pmaxe+1)*(jmaxe+1), 1,1, 1,1, 1,1) - ENDIF - CALL allocate_array( Ciipj, 1,(pmaxi+1)*(jmaxi+1), 1,(pmaxi+1)*(jmaxi+1), 1,1, 1,1, 1,1) - ENDIF + CALL allocate_array( Cab_F, 1,Na, 1,Na, 1,(pmax+1)*(jmax+1), 1,(pmax+1)*(jmax+1), 1,1, 1,1, 1,1) + CALL allocate_array( Cab_T, 1,Na, 1,Na, 1,(pmax+1)*(jmax+1), 1,(pmax+1)*(jmax+1), 1,1, 1,1, 1,1) + CALL allocate_array( Caa, 1,Na, 1,(pmax+1)*(jmax+1), 1,(pmax+1)*(jmax+1), 1,1, 1,1, 1,1) +ENDIF END SUBROUTINE memory diff --git a/src/miller_mod.F90 b/src/miller_mod.F90 index 34a175cd..688561e3 100644 --- a/src/miller_mod.F90 +++ b/src/miller_mod.F90 @@ -4,8 +4,9 @@ MODULE miller USE prec_const USE basic + USE parallel, ONLY: my_id, num_procs_z, nbr_U, nbr_D, comm0 ! use coordinates,only: gcoor, get_dzprimedz - USE grid + USE grid, ONLY: local_Nky, local_Nkx, local_Nz, Ngz, kyarray, kxarray, zarray, Nz, local_nz_offset ! use discretization USE lagrange_interpolation ! use par_in, only: beta, sign_Ip_CW, sign_Bt_CW, Npol @@ -22,7 +23,7 @@ MODULE miller real(dp) :: rho, kappa, delta, s_kappa, s_delta, drR, drZ, zeta, s_zeta real(dp) :: thetaShift real(dp) :: thetak, thetad - + INTEGER :: ierr CONTAINS !>Set defaults for miller parameters @@ -44,7 +45,7 @@ CONTAINS end subroutine set_miller_parameters !>Get Miller metric, magnetic field, jacobian etc. - subroutine get_miller(trpeps,major_R,major_Z,q0,shat,amhd,edge_opt,& + subroutine get_miller(trpeps,major_R,major_Z,q0,shat,Npol,amhd,edge_opt,& C_y,C_xy,dpdx_pm_geom,gxx_,gyy_,gzz_,gxy_,gxz_,gyz_,dBdx_,dBdy_,& Bfield_,jacobian_,dBdz_,R_hat_,Z_hat_,dxdR_,dxdZ_,Ckxky_,gradz_coeff_) !!!!!!!!!!!!!!!! GYACOMO INTERFACE !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! @@ -53,17 +54,18 @@ CONTAINS real(dp), INTENT(INOUT) :: major_Z ! major Z real(dp), INTENT(INOUT) :: q0 ! safetyfactor real(dp), INTENT(INOUT) :: shat ! safetyfactor + INTEGER, INTENT(IN) :: Npol ! number of poloidal turns real(dp), INTENT(INOUT) :: amhd ! alpha mhd real(dp), INTENT(INOUT) :: edge_opt ! alpha mhd real(dp), INTENT(INOUT) :: dpdx_pm_geom ! amplitude mag. eq. pressure grad. real(dp), INTENT(INOUT) :: C_y, C_xy - - real(dp), dimension(izgs:izge,0:1), INTENT(INOUT) :: & + real(dp), dimension(1:local_Nz+Ngz,1:2), INTENT(INOUT) :: & gxx_,gyy_,gzz_,gxy_,gxz_,gyz_,& dBdx_,dBdy_,Bfield_,jacobian_,& dBdz_,R_hat_,Z_hat_,dxdR_,dxdZ_, & gradz_coeff_ - real(dp), dimension(ikys:ikye,ikxs:ikxe,izgs:izge,0:1), INTENT(INOUT) :: Ckxky_ + real(dp), dimension(1:local_Nky,1:local_Nkx,1:local_Nz+Ngz,1:2), INTENT(INOUT) :: Ckxky_ + INTEGER :: iz, ikx, iky, eo ! No parameter in gyacomo yet real(dp) :: sign_Ip_CW=1 ! current sign (only normal current) real(dp) :: sign_Bt_CW=1 ! current sign (only normal current) @@ -438,7 +440,7 @@ CONTAINS !new parallel coordinate chi_out==zprime !see also tracer_aux.F90 if (Npol>1) ERROR STOP '>> ERROR << Npol>1 has not been implemented for edge_opt=\=0.0' - do k=izs,ize + do k=1,Nz chi_out(k)=sinh((-pi+k*2.*pi/Nz)*log(edge_opt*pi+sqrt(edge_opt**2*pi**2+1))/pi)/edge_opt enddo !transform metrics according to chain rule @@ -473,24 +475,25 @@ CONTAINS call lag3interp(Z_s,chi_s,np_s,Z_out,chi_out,Nz) call lag3interp(dxdR_s,chi_s,np_s,dxdR_out,chi_out,Nz) call lag3interp(dxdZ_s,chi_s,np_s,dxdZ_out,chi_out,Nz) - ! Fill the geom arrays with the results - do eo=0,1 - gxx_(izs:ize,eo) =gxx_out(izs:ize) - gyy_(izs:ize,eo) =gyy_out(izs:ize) - gxz_(izs:ize,eo) =gxz_out(izs:ize) - gyz_(izs:ize,eo) =gyz_out(izs:ize) - dBdx_(izs:ize,eo) =dBdx_out(izs:ize) - dBdy_(izs:ize,eo) =0. - gxy_(izs:ize,eo) =gxy_out(izs:ize) - gzz_(izs:ize,eo) =gzz_out(izs:ize) - Bfield_(izs:ize,eo) =Bfield_out(izs:ize) - jacobian_(izs:ize,eo) =jacobian_out(izs:ize) - dBdz_(izs:ize,eo) =dBdz_out(izs:ize) - R_hat_(izs:ize,eo) =R_out(izs:ize) - Z_hat_(izs:ize,eo) =Z_out(izs:ize) - dxdR_(izs:ize,eo) = dxdR_out(izs:ize) - dxdZ_(izs:ize,eo) = dxdZ_out(izs:ize) - + ! Fill the interior of the geom arrays with the results + do eo=1,2 + DO iz = 1,local_Nz + gxx_(iz+Ngz/2,eo) = gxx_out(iz-local_nz_offset) + gyy_(iz+Ngz/2,eo) = gyy_out(iz-local_nz_offset) + gxz_(iz+Ngz/2,eo) = gxz_out(iz-local_nz_offset) + gyz_(iz+Ngz/2,eo) = gyz_out(iz-local_nz_offset) + dBdx_(iz+Ngz/2,eo) = dBdx_out(iz-local_nz_offset) + dBdy_(iz+Ngz/2,eo) = 0. + gxy_(iz+Ngz/2,eo) = gxy_out(iz-local_nz_offset) + gzz_(iz+Ngz/2,eo) = gzz_out(iz-local_nz_offset) + Bfield_(iz+Ngz/2,eo) = Bfield_out(iz-local_nz_offset) + jacobian_(iz+Ngz/2,eo) = jacobian_out(iz-local_nz_offset) + dBdz_(iz+Ngz/2,eo) = dBdz_out(iz-local_nz_offset) + R_hat_(iz+Ngz/2,eo) = R_out(iz-local_nz_offset) + Z_hat_(iz+Ngz/2,eo) = Z_out(iz-local_nz_offset) + dxdR_(iz+Ngz/2,eo) = dxdR_out(iz-local_nz_offset) + dxdZ_(iz+Ngz/2,eo) = dxdZ_out(iz-local_nz_offset) + ENDDO !! UPDATE GHOSTS VALUES (since the miller function in GENE does not) CALL update_ghosts_z(gxx_(:,eo)) CALL update_ghosts_z(gyy_(:,eo)) @@ -508,16 +511,16 @@ CONTAINS CALL update_ghosts_z(dxdZ_(:,eo)) ! Curvature operator (Frei et al. 2022 eq 2.15) - DO iz = izgs,izge + DO iz = 1,local_Nz+Ngz G1 = gxy_(iz,eo)*gxy_(iz,eo)-gxx_(iz,eo)*gyy_(iz,eo) G2 = gxy_(iz,eo)*gxz_(iz,eo)-gxx_(iz,eo)*gyz_(iz,eo) G3 = gyy_(iz,eo)*gxz_(iz,eo)-gxy_(iz,eo)*gyz_(iz,eo) Cx = (G1*dBdy_(iz,eo) + G2*dBdz_(iz,eo))/Bfield_(iz,eo) Cy = (G3*dBdz_(iz,eo) - G1*dBdx_(iz,eo))/Bfield_(iz,eo) - DO iky = ikys, ikye + DO iky = 1,local_Nky ky = kyarray(iky) - DO ikx= ikxs, ikxe + DO ikx= 1,local_Nkx kx = kxarray(ikx) Ckxky_(iky, ikx, iz,eo) = (Cx*kx + Cy*ky) ENDDO @@ -533,41 +536,42 @@ CONTAINS SUBROUTINE update_ghosts_z(fz_) IMPLICIT NONE ! INTEGER, INTENT(IN) :: nztot_ - REAL(dp), DIMENSION(izgs:izge), INTENT(INOUT) :: fz_ + REAL(dp), DIMENSION(1:local_Nz+Ngz), INTENT(INOUT) :: fz_ REAL(dp), DIMENSION(-2:2) :: buff - INTEGER :: status(MPI_STATUS_SIZE), count - + INTEGER :: status(MPI_STATUS_SIZE), count, last, first + last = local_Nz+Ngz/2 + first= 1 + Ngz/2 IF(Nz .GT. 1) THEN IF (num_procs_z .GT. 1) THEN CALL MPI_BARRIER(MPI_COMM_WORLD,ierr) count = 1 ! one point to exchange !!!!!!!!!!! Send ghost to up neighbour !!!!!!!!!!!!!!!!!!!!!! - CALL mpi_sendrecv(fz_(ize), count, MPI_DOUBLE, nbr_U, 0, & ! Send to Up the last - buff(-1), count, MPI_DOUBLE, nbr_D, 0, & ! Receive from Down the first-1 + CALL mpi_sendrecv(fz_(last), count, MPI_DOUBLE, nbr_U, 0, & ! Send to Up the last + buff(-1), count, MPI_DOUBLE, nbr_D, 0, & ! Receive from Down the first-1 comm0, status, ierr) - CALL mpi_sendrecv(fz_(ize-1), count, MPI_DOUBLE, nbr_U, 0, & ! Send to Up the last - buff(-2), count, MPI_DOUBLE, nbr_D, 0, & ! Receive from Down the first-2 + CALL mpi_sendrecv(fz_(last-1), count, MPI_DOUBLE, nbr_U, 0, & ! Send to Up the last + buff(-2), count, MPI_DOUBLE, nbr_D, 0, & ! Receive from Down the first-2 comm0, status, ierr) !!!!!!!!!!! Send ghost to down neighbour !!!!!!!!!!!!!!!!!!!!!! - CALL mpi_sendrecv(fz_(izs), count, MPI_DOUBLE, nbr_D, 0, & ! Send to Down the first - buff(+1), count, MPI_DOUBLE, nbr_U, 0, & ! Recieve from Up the last+1 + CALL mpi_sendrecv(fz_(first), count, MPI_DOUBLE, nbr_D, 0, & ! Send to Down the first + buff(+1), count, MPI_DOUBLE, nbr_U, 0, & ! Recieve from Up the last+1 comm0, status, ierr) - CALL mpi_sendrecv(fz_(izs+1), count, MPI_DOUBLE, nbr_D, 0, & ! Send to Down the first - buff(+2), count, MPI_DOUBLE, nbr_U, 0, & ! Recieve from Up the last+2 + CALL mpi_sendrecv(fz_(first+1), count, MPI_DOUBLE, nbr_D, 0, & ! Send to Down the first + buff(+2), count, MPI_DOUBLE, nbr_U, 0, & ! Recieve from Up the last+2 comm0, status, ierr) ELSE - buff(-1) = fz_(ize ) - buff(-2) = fz_(ize-1) - buff(+1) = fz_(izs ) - buff(+2) = fz_(izs+1) + buff(-1) = fz_(last ) + buff(-2) = fz_(last-1) + buff(+1) = fz_(first ) + buff(+2) = fz_(first+1) ENDIF - fz_(ize+1) = buff(+1) - fz_(ize+2) = buff(+2) - fz_(izs-1) = buff(-1) - fz_(izs-2) = buff(-2) + fz_(last +1) = buff(+1) + fz_(last +2) = buff(+2) + fz_(first-1) = buff(-1) + fz_(first-2) = buff(-2) ENDIF END SUBROUTINE update_ghosts_z diff --git a/src/model_mod.F90 b/src/model_mod.F90 index 400ec152..ae9df467 100644 --- a/src/model_mod.F90 +++ b/src/model_mod.F90 @@ -3,72 +3,45 @@ MODULE model USE prec_const IMPLICIT NONE PRIVATE - + ! INPUTS INTEGER, PUBLIC, PROTECTED :: CLOS = 0 ! linear truncation method INTEGER, PUBLIC, PROTECTED :: NL_CLOS = 0 ! nonlinear truncation method INTEGER, PUBLIC, PROTECTED :: KERN = 0 ! Kernel model CHARACTER(len=16), & PUBLIC, PROTECTED ::LINEARITY= 'linear' ! To turn on non linear bracket term - LOGICAL, PUBLIC, PROTECTED :: KIN_E = .true. ! Simulate kinetic electron (adiabatic otherwise) - INTEGER, PUBLIC, PROTECTED :: N_HD = 4 ! order of numerical spatial diffusion REAL(dp), PUBLIC, PROTECTED :: mu_x = 0._dp ! spatial x-Hyperdiffusivity coefficient (for num. stability) REAL(dp), PUBLIC, PROTECTED :: mu_y = 0._dp ! spatial y-Hyperdiffusivity coefficient (for num. stability) + INTEGER, PUBLIC, PROTECTED :: N_HD = 4 ! order of numerical spatial diffusion LOGICAL, PUBLIC, PROTECTED :: HDz_h = .false. ! to apply z-hyperdiffusion on non adiab part REAL(dp), PUBLIC, PROTECTED :: mu_z = 0._dp ! spatial z-Hyperdiffusivity coefficient (for num. stability) - CHARACTER(len=16), & - PUBLIC, PROTECTED :: HYP_V = 'hypcoll' ! hyperdiffusion model for velocity space ('none','hypcoll','dvpar4') REAL(dp), PUBLIC, PROTECTED :: mu_p = 0._dp ! kinetic para hyperdiffusivity coefficient (for num. stability) REAL(dp), PUBLIC, PROTECTED :: mu_j = 0._dp ! kinetic perp hyperdiffusivity coefficient (for num. stability) - REAL(dp), PUBLIC, PROTECTED :: nu = 0._dp ! Collision frequency - REAL(dp), PUBLIC, PROTECTED :: tau_e = 1._dp ! Temperature - REAL(dp), PUBLIC, PROTECTED :: tau_i = 1._dp ! - REAL(dp), PUBLIC, PROTECTED :: sigma_e = 0.023338_dp! sqrt of electron ion mass ratio - REAL(dp), PUBLIC, PROTECTED :: sigma_i = 1._dp ! - REAL(dp), PUBLIC, PROTECTED :: q_e = -1._dp ! Charge - REAL(dp), PUBLIC, PROTECTED :: q_i = 1._dp ! - REAL(dp), PUBLIC, PROTECTED :: k_Ni = 0._dp ! Ion density drive (L_ref/L_Ni) - REAL(dp), PUBLIC, PROTECTED :: k_Ne = 0._dp ! Ele '' - REAL(dp), PUBLIC, PROTECTED :: k_Ti = 0._dp ! Ion temperature drive (L_ref/L_Ti) - REAL(dp), PUBLIC, PROTECTED :: k_Te = 0._dp ! Ele '' - REAL(dp), PUBLIC, PROTECTED :: K_E = 0._dp ! Backg. electric field drive (L_ref/L_E) - REAL(dp), PUBLIC, PROTECTED :: k_gB = 1._dp ! Magnetic gradient drive (L_ref/L_gB) - REAL(dp), PUBLIC, PROTECTED :: k_cB = 1._dp ! Magnetic curvature drive (L_ref/L_cB) + CHARACTER(len=16), & + PUBLIC, PROTECTED :: HYP_V = 'hypcoll' ! hyperdiffusion model for velocity space ('none','hypcoll','dvpar4') + INTEGER, PUBLIC, PROTECTED :: Na = 1 ! number of evolved species + REAL(dp), PUBLIC, PROTECTED :: nu = 0._dp ! collision frequency parameter + REAL(dp), PUBLIC, PROTECTED :: k_gB = 1._dp ! Magnetic gradient strength (L_ref/L_gB) + REAL(dp), PUBLIC, PROTECTED :: k_cB = 1._dp ! Magnetic curvature strength (L_ref/L_cB) REAL(dp), PUBLIC, PROTECTED :: lambdaD = 0._dp ! Debye length REAL(dp), PUBLIC, PROTECTED :: beta = 0._dp ! electron plasma Beta (8piNT_e/B0^2) - + LOGICAL, PUBLIC :: ADIAB_E = .false. ! adiabatic electron model + REAL(dp), PUBLIC, PROTECTED :: tau_e = 1.0 ! electron temperature ratio for adiabatic electrons + ! Auxiliary variable LOGICAL, PUBLIC, PROTECTED :: EM = .false. ! Electromagnetic effects flag - REAL(dp), PUBLIC, PROTECTED :: taue_qe ! factor of the magnetic moment coupling - REAL(dp), PUBLIC, PROTECTED :: taui_qi ! - REAL(dp), PUBLIC, PROTECTED :: qi_taui ! - REAL(dp), PUBLIC, PROTECTED :: qe_taue ! - REAL(dp), PUBLIC, PROTECTED :: sqrtTaue_qe ! factor of parallel moment term - REAL(dp), PUBLIC, PROTECTED :: sqrtTaui_qi ! - REAL(dp), PUBLIC, PROTECTED :: qe_sigmae_sqrtTaue ! factor of parallel phi term - REAL(dp), PUBLIC, PROTECTED :: qi_sigmai_sqrtTaui ! - REAL(dp), PUBLIC, PROTECTED :: sigmae2_taue_o2 ! factor of the Kernel argument - REAL(dp), PUBLIC, PROTECTED :: sigmai2_taui_o2 ! - REAL(dp), PUBLIC, PROTECTED :: sqrt_sigmae2_taue_o2 ! factor of the Kernel argument - REAL(dp), PUBLIC, PROTECTED :: sqrt_sigmai2_taui_o2 - REAL(dp), PUBLIC, PROTECTED :: nu_e, nu_i ! electron-ion, ion-ion collision frequency - REAL(dp), PUBLIC, PROTECTED :: nu_ee, nu_ie ! e-e, i-e coll. frequ. - REAL(dp), PUBLIC, PROTECTED :: qe2_taue, qi2_taui ! factor of the gammaD sum - REAL(dp), PUBLIC, PROTECTED :: q_o_sqrt_tau_sigma_e, q_o_sqrt_tau_sigma_i - REAL(dp), PUBLIC, PROTECTED :: sqrt_tau_o_sigma_e, sqrt_tau_o_sigma_i - REAL(dp), PUBLIC, PROTECTED :: dpdx = 0 ! radial pressure gradient PUBLIC :: model_readinputs, model_outputinputs CONTAINS SUBROUTINE model_readinputs ! Read the input parameters - USE basic, ONLY : lu_in, my_id, num_procs_p + USE basic, ONLY: lu_in + USE parallel, ONLY: my_id,num_procs_p USE prec_const IMPLICIT NONE - NAMELIST /MODEL_PAR/ CLOS, NL_CLOS, KERN, LINEARITY, KIN_E, & - mu_x, mu_y, N_HD, HDz_h, mu_z, mu_p, mu_j, HYP_V, nu,& - tau_e, tau_i, sigma_e, sigma_i, q_e, q_i,& - k_Ne, k_Ni, k_Te, k_Ti, k_gB, k_cB, lambdaD, beta + NAMELIST /MODEL_PAR/ CLOS, NL_CLOS, KERN, LINEARITY, & + mu_x, mu_y, N_HD, HDz_h, mu_z, mu_p, mu_j, HYP_V, Na,& + nu, k_gB, k_cB, lambdaD, beta, ADIAB_E, tau_e READ(lu_in,model_par) @@ -76,7 +49,7 @@ CONTAINS ERROR STOP '>> ERROR << dvpar4 velocity dissipation is not compatible with current p parallelization' ENDIF - IF(.NOT. KIN_E) THEN + IF(Na .EQ. 1) THEN IF(my_id.EQ.0) print*, 'Adiabatic electron model -> beta = 0' beta = 0._dp ENDIF @@ -84,77 +57,39 @@ CONTAINS IF(beta .GT. 0) THEN IF(my_id.EQ.0) print*, 'Electromagnetic effects are included' EM = .TRUE. - dpdx = -(tau_i*(k_Ni + k_Ti) + tau_e*(k_Ne + k_Te)) ENDIF - taue_qe = tau_e/q_e ! factor of the magnetic moment coupling - taui_qi = tau_i/q_i ! factor of the magnetic moment coupling - qe_taue = q_e/tau_e - qi_taui = q_i/tau_i - sqrtTaue_qe = sqrt(tau_e)/q_e ! factor of parallel moment term - sqrtTaui_qi = sqrt(tau_i)/q_i ! factor of parallel moment term - qe_sigmae_sqrtTaue = q_e/sigma_e/SQRT(tau_e) ! factor of parallel phi term - qi_sigmai_sqrtTaui = q_i/sigma_i/SQRT(tau_i) - qe2_taue = (q_e**2)/tau_e ! factor of the gammaD sum - qi2_taui = (q_i**2)/tau_i - sigmae2_taue_o2 = sigma_e**2 * tau_e/2._dp ! factor of the Kernel argument - sigmai2_taui_o2 = sigma_i**2 * tau_i/2._dp - sqrt_sigmae2_taue_o2 = SQRT(sigma_e**2 * tau_e/2._dp) ! to avoid multiple SQRT eval - sqrt_sigmai2_taui_o2 = SQRT(sigma_i**2 * tau_i/2._dp) - q_o_sqrt_tau_sigma_e = q_e/SQRT(tau_e)/sigma_e ! For psi field terms - q_o_sqrt_tau_sigma_i = q_i/SQRT(tau_i)/sigma_i ! For psi field terms - sqrt_tau_o_sigma_e = SQRT(tau_e)/sigma_e ! For Ampere eq - sqrt_tau_o_sigma_i = SQRT(tau_i)/sigma_i - !! We use the ion-ion collision as normalization with definition - ! nu_ii = 4 sqrt(pi)/3 T_i^(-3/2) m_i^(-1/2) q^4 n_i0 ln(Lambda) - ! - nu_e = nu/sigma_e * (tau_e)**(3._dp/2._dp) ! electron-ion collision frequency (where already multiplied by 0.532) - nu_i = nu ! ion-ion collision frequ. - nu_ee = nu_e ! e-e coll. frequ. - nu_ie = nu_i ! i-e coll. frequ. - - ! Old normalization (MOLI Jorge/Frei) - ! nu_e = 0.532_dp*nu ! electron-ion collision frequency (where already multiplied by 0.532) - ! nu_i = 0.532_dp*nu*sigma_e*tau_e**(-3._dp/2._dp)/SQRT2 ! ion-ion collision frequ. - ! nu_ee = nu_e/SQRT2 ! e-e coll. frequ. - ! nu_ie = 0.532_dp*nu*sigma_e**2 ! i-e coll. frequ. - END SUBROUTINE model_readinputs - SUBROUTINE model_outputinputs(fidres, str) - ! Write the input parameters to the results_xx.h5 file - - USE futils, ONLY: attach - USE prec_const + SUBROUTINE model_outputinputs(fid) + ! Write the input parameters to the results_xx.h5 file + USE futils, ONLY: attach, creatd IMPLICIT NONE - - INTEGER, INTENT(in) :: fidres - CHARACTER(len=256), INTENT(in) :: str - CALL attach(fidres, TRIM(str), "CLOS", CLOS) - CALL attach(fidres, TRIM(str), "KERN", KERN) - CALL attach(fidres, TRIM(str), "LINEARITY", LINEARITY) - CALL attach(fidres, TRIM(str), "KIN_E", KIN_E) - CALL attach(fidres, TRIM(str), "nu", nu) - CALL attach(fidres, TRIM(str), "mu", 0) - CALL attach(fidres, TRIM(str), "mu_x", mu_x) - CALL attach(fidres, TRIM(str), "mu_y", mu_y) - CALL attach(fidres, TRIM(str), "mu_z", mu_z) - CALL attach(fidres, TRIM(str), "mu_p", mu_p) - CALL attach(fidres, TRIM(str), "mu_j", mu_j) - CALL attach(fidres, TRIM(str), "tau_e", tau_e) - CALL attach(fidres, TRIM(str), "tau_i", tau_i) - CALL attach(fidres, TRIM(str), "sigma_e", sigma_e) - CALL attach(fidres, TRIM(str), "sigma_i", sigma_i) - CALL attach(fidres, TRIM(str), "q_e", q_e) - CALL attach(fidres, TRIM(str), "q_i", q_i) - CALL attach(fidres, TRIM(str), "k_Ne", k_Ne) - CALL attach(fidres, TRIM(str), "k_Ni", k_Ni) - CALL attach(fidres, TRIM(str), "k_Te", k_Te) - CALL attach(fidres, TRIM(str), "k_Ti", k_Ti) - CALL attach(fidres, TRIM(str), "K_E", K_E) - CALL attach(fidres, TRIM(str), "lambdaD", lambdaD) - CALL attach(fidres, TRIM(str), "beta", beta) + INTEGER, INTENT(in) :: fid + CHARACTER(len=256) :: str + WRITE(str,'(a)') '/data/input/model' + CALL creatd(fid, 0,(/0/),TRIM(str),'Model Input') + CALL attach(fid, TRIM(str), "CLOS", CLOS) + CALL attach(fid, TRIM(str), "NL_CLOS", NL_CLOS) + CALL attach(fid, TRIM(str), "KERN", KERN) + CALL attach(fid, TRIM(str), "LINEARITY", LINEARITY) + CALL attach(fid, TRIM(str), "mu_x", mu_x) + CALL attach(fid, TRIM(str), "mu_y", mu_y) + CALL attach(fid, TRIM(str), "N_HD", N_HD) + CALL attach(fid, TRIM(str), "mu_z", mu_z) + CALL attach(fid, TRIM(str), "HDz_h", HDz_h) + CALL attach(fid, TRIM(str), "mu_p", mu_p) + CALL attach(fid, TRIM(str), "mu_j", mu_j) + CALL attach(fid, TRIM(str), "HYP_V", HYP_V) + CALL attach(fid, TRIM(str), "Na", Na) + CALL attach(fid, TRIM(str), "nu", nu) + CALL attach(fid, TRIM(str), "k_gB", k_gB) + CALL attach(fid, TRIM(str), "k_cB", k_cB) + CALL attach(fid, TRIM(str), "lambdaD", lambdaD) + CALL attach(fid, TRIM(str), "beta", beta) + CALL attach(fid, TRIM(str), "ADIAB_E", ADIAB_E) + CALL attach(fid, TRIM(str), "tau_e", tau_e) END SUBROUTINE model_outputinputs END MODULE model diff --git a/src/moments_eq_rhs_mod.F90 b/src/moments_eq_rhs_mod.F90 index 4e2b7de1..599fe2c4 100644 --- a/src/moments_eq_rhs_mod.F90 +++ b/src/moments_eq_rhs_mod.F90 @@ -7,326 +7,213 @@ SUBROUTINE compute_moments_eq_rhs USE model USE array USE fields - USE grid + USE grid, ONLY: local_na, local_np, local_nj, local_nkx, local_nky, local_nz,& + nzgrid,pp2,ngp,ngj,ngz,dmax,& + diff_dz_coeff,diff_kx_coeff,diff_ky_coeff,diff_p_coeff,diff_j_coeff,& + parray,jarray,kxarray, kyarray, kparray USE basic USE prec_const USE collision USE time_integration USE geometry, ONLY: gradz_coeff, dlnBdz, Ckxky!, Gamma_phipar - USE calculus, ONLY : interp_z, grad_z, grad_z2 + USE calculus, ONLY: interp_z, grad_z, grad_z2 + USE species, ONLY: dpdx IMPLICIT NONE - - !compute ion moments_eq_rhs - CALL moments_eq_rhs(ips_i,ipe_i,ipgs_i,ipge_i,ijs_i,ije_i,ijgs_i,ijge_i,jarray_i,parray_i,& - xnipj, xnipp2j, xnipm2j, xnipjp1, xnipjm1, xnipp1j, xnipm1j,& - ynipp1j, ynipp1jm1, ynipm1j, ynipm1jm1, & - znipm1j, znipm1jp1, znipm1jm1, & - xphij_i, xphijp1_i, xphijm1_i, xpsij_i, xpsijp1_i, xpsijm1_i,& - kernel_i, nadiab_moments_i, ddz_nipj, interp_nipj, Sipj,& - moments_i(ipgs_i:ipge_i,ijgs_i:ijge_i,ikys:ikye,ikxs:ikxe,izgs:izge,updatetlevel),& - TColl_i, ddzND_Nipj, diff_pi_coeff, diff_ji_coeff,& - moments_rhs_i(ips_i:ipe_i,ijs_i:ije_i,ikys:ikye,ikxs:ikxe,izs:ize,updatetlevel)) - - !compute ion moments_eq_rhs - IF(KIN_E) & - CALL moments_eq_rhs(ips_e,ipe_e,ipgs_e,ipge_e,ijs_e,ije_e,ijgs_e,ijge_e,jarray_e,parray_e,& - xnepj, xnepp2j, xnepm2j, xnepjp1, xnepjm1, xnepp1j, xnepm1j,& - ynepp1j, ynepp1jm1, ynepm1j, ynepm1jm1, & - znepm1j, znepm1jp1, znepm1jm1, & - xphij_e, xphijp1_e, xphijm1_e, xpsij_e, xpsijp1_e, xpsijm1_e,& - kernel_e, nadiab_moments_e, ddz_nepj, interp_nepj, Sepj,& - moments_e(ipgs_e:ipge_e,ijgs_e:ijge_e,ikys:ikye,ikxs:ikxe,izgs:izge,updatetlevel),& - TColl_e, ddzND_Nepj, diff_pe_coeff, diff_je_coeff,& - moments_rhs_e(ips_e:ipe_e,ijs_e:ije_e,ikys:ikye,ikxs:ikxe,izs:ize,updatetlevel)) - - CONTAINS - !_____________________________________________________________________________! - !_____________________________________________________________________________! - !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - !!! moments_ RHS computation !!!!!!! - !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - ! This routine assemble the RHS of the moment hierarchy equations. It uses - ! linear coefficients that are stored in arrays (xn*, yn* and zn*) computed in - ! numerics_mod.F90. Otherwise it simply adds the collision term TColl_ that is - ! computed in collision_mod.F90 and the nonlinear term Sapj_ computed in - ! nonlinear_mod.F90. - ! All arguments of the subroutines are inputs only except the last one, - ! moments_rhs_ that will contain the sum of every terms in the RHS. - !_____________________________________________________________________________! - SUBROUTINE moments_eq_rhs(ips_,ipe_,ipgs_,ipge_,ijs_,ije_,ijgs_,ijge_,jarray_,parray_,& - xnapj_, xnapp2j_, xnapm2j_, xnapjp1_, xnapjm1_, xnapp1j_, xnapm1j_,& - ynapp1j_, ynapp1jm1_, ynapm1j_, ynapm1jm1_, & - znapm1j_, znapm1jp1_, znapm1jm1_, & - xphij_, xphijp1_, xphijm1_, xpsij_, xpsijp1_, xpsijm1_,& - kernel_, nadiab_moments_, ddz_napj_, interp_napj_, Sapj_,& - moments_, TColl_, ddzND_napj_, diff_p_coeff_, diff_j_coeff_, moments_rhs_) - - IMPLICIT NONE - !! INPUTS - INTEGER, INTENT(IN) :: ips_, ipe_, ipgs_, ipge_, ijs_, ije_, ijgs_, ijge_ - INTEGER, DIMENSION(ips_:ipe_), INTENT(IN) :: parray_ - INTEGER, DIMENSION(ijs_:ije_), INTENT(IN) :: jarray_ - REAL(dp), DIMENSION(ips_:ipe_,ijs_:ije_), INTENT(IN) :: xnapj_ - REAL(dp), DIMENSION(ips_:ipe_), INTENT(IN) :: xnapp2j_, xnapm2j_ - REAL(dp), DIMENSION(ijs_:ije_), INTENT(IN) :: xnapjp1_, xnapjm1_ - REAL(dp), DIMENSION(ips_:ipe_), INTENT(IN) :: xnapp1j_, xnapm1j_ - REAL(dp), DIMENSION(ips_:ipe_,ijs_:ije_), INTENT(IN) :: ynapp1j_, ynapp1jm1_, ynapm1j_, ynapm1jm1_ - REAL(dp), DIMENSION(ips_:ipe_,ijs_:ije_), INTENT(IN) :: znapm1j_, znapm1jp1_, znapm1jm1_ - REAL(dp), DIMENSION(ips_:ipe_,ijs_:ije_), INTENT(IN) :: xphij_, xphijp1_, xphijm1_ - REAL(dp), DIMENSION(ips_:ipe_,ijs_:ije_), INTENT(IN) :: xpsij_, xpsijp1_, xpsijm1_ - - REAL(dp), DIMENSION(ijgs_:ijge_,ikys:ikye,ikxs:ikxe,izgs:izge,0:1),INTENT(IN) :: kernel_ - - COMPLEX(dp), DIMENSION(ipgs_:ipge_,ijgs_:ijge_,ikys:ikye,ikxs:ikxe,izgs:izge),INTENT(IN) :: nadiab_moments_ - COMPLEX(dp), DIMENSION(ipgs_:ipge_,ijgs_:ijge_,ikys:ikye,ikxs:ikxe,izgs:izge),INTENT(IN) :: ddz_napj_ - COMPLEX(dp), DIMENSION(ipgs_:ipge_,ijgs_:ijge_,ikys:ikye,ikxs:ikxe,izgs:izge),INTENT(IN) :: interp_napj_ - COMPLEX(dp), DIMENSION( ips_:ipe_, ijs_:ije_, ikys:ikye,ikxs:ikxe, izs:ize), INTENT(IN) :: Sapj_ - COMPLEX(dp), DIMENSION(ipgs_:ipge_,ijgs_:ijge_,ikys:ikye,ikxs:ikxe,izgs:izge),INTENT(IN) :: moments_ - COMPLEX(dp), DIMENSION( ips_:ipe_, ijs_:ije_, ikys:ikye,ikxs:ikxe, izs:ize), INTENT(IN) :: TColl_ - COMPLEX(dp), DIMENSION(ipgs_:ipge_,ijgs_:ijge_,ikys:ikye,ikxs:ikxe,izgs:izge),INTENT(IN) :: ddzND_napj_ - REAL(dp), INTENT(IN) :: diff_p_coeff_, diff_j_coeff_ - !! OUTPUT - COMPLEX(dp), DIMENSION( ips_:ipe_, ijs_:ije_, ikys:ikye,ikxs:ikxe, izs:ize),INTENT(OUT) :: moments_rhs_ - - INTEGER :: p_int, j_int ! loops indices and polynom. degrees - REAL(dp) :: kx, ky, kperp2 - COMPLEX(dp) :: Tnapj, Tnapp2j, Tnapm2j, Tnapjp1, Tnapjm1 ! Terms from b x gradB and drives - COMPLEX(dp) :: Tnapp1j, Tnapm1j, Tnapp1jm1, Tnapm1jm1 ! Terms from mirror force with non adiab moments_ - COMPLEX(dp) :: Tpar, Tmir, Tphi, Tpsi - COMPLEX(dp) :: Mperp, Mpara, Dphi, Dpsi - COMPLEX(dp) :: Unapm1j, Unapm1jp1, Unapm1jm1 ! Terms from mirror force with adiab moments_ - COMPLEX(dp) :: i_kx,i_ky,phikykxz, psikykxz - - ! Measuring execution time - CALL cpu_time(t0_rhs) - - ! Spatial loops - zloop : DO iz = izs,ize - kxloop : DO ikx = ikxs,ikxe - kx = kxarray(ikx) ! radial wavevector - i_kx = imagu * kx ! radial derivative - - kyloop : DO iky = ikys,ikye - ky = kyarray(iky) ! binormal wavevector - i_ky = imagu * ky ! binormal derivative - psikykxz = psi(iky,ikx,iz)! tmp psi value - - ! Kinetic loops - jloop : DO ij = ijs_, ije_ ! This loop is from 1 to jmaxi+1 - j_int = jarray_(ij) - - ploop : DO ip = ips_, ipe_ ! Hermite loop - p_int = parray_(ip) ! Hermite degree - eo = MODULO(p_int,2) ! Indicates if we are on odd or even z grid - kperp2= kparray(iky,ikx,iz,eo)**2 - - IF((CLOS .NE. 1) .OR. (p_int+2*j_int .LE. dmaxe)) THEN ! for the closure scheme - !! Compute moments_ mixing terms - ! Perpendicular dynamic - ! term propto n^{p,j} - Tnapj = xnapj_(ip,ij)* nadiab_moments_(ip,ij,iky,ikx,iz) - ! term propto n^{p+2,j} - Tnapp2j = xnapp2j_(ip) * nadiab_moments_(ip+pp2,ij,iky,ikx,iz) - ! term propto n^{p-2,j} - Tnapm2j = xnapm2j_(ip) * nadiab_moments_(ip-pp2,ij,iky,ikx,iz) - ! term propto n^{p,j+1} - Tnapjp1 = xnapjp1_(ij) * nadiab_moments_(ip,ij+1,iky,ikx,iz) - ! term propto n^{p,j-1} - Tnapjm1 = xnapjm1_(ij) * nadiab_moments_(ip,ij-1,iky,ikx,iz) - ! Perpendicular magnetic term (curvature and gradient drifts) - Mperp = imagu*Ckxky(iky,ikx,iz,eo)*(Tnapj + Tnapp2j + Tnapm2j + Tnapjp1 + Tnapjm1) - - ! Parallel dynamic - ! ddz derivative for Landau damping term - Tpar = xnapp1j_(ip) * ddz_napj_(ip+1,ij,iky,ikx,iz) & - + xnapm1j_(ip) * ddz_napj_(ip-1,ij,iky,ikx,iz) - ! Mirror terms - Tnapp1j = ynapp1j_ (ip,ij) * interp_napj_(ip+1,ij ,iky,ikx,iz) - Tnapp1jm1 = ynapp1jm1_(ip,ij) * interp_napj_(ip+1,ij-1,iky,ikx,iz) - Tnapm1j = ynapm1j_ (ip,ij) * interp_napj_(ip-1,ij ,iky,ikx,iz) - Tnapm1jm1 = ynapm1jm1_(ip,ij) * interp_napj_(ip-1,ij-1,iky,ikx,iz) - ! Trapping terms - Unapm1j = znapm1j_ (ip,ij) * interp_napj_(ip-1,ij ,iky,ikx,iz) - Unapm1jp1 = znapm1jp1_(ip,ij) * interp_napj_(ip-1,ij+1,iky,ikx,iz) - Unapm1jm1 = znapm1jm1_(ip,ij) * interp_napj_(ip-1,ij-1,iky,ikx,iz) - - Tmir = dlnBdz(iz,eo)*(Tnapp1j + Tnapp1jm1 + Tnapm1j + Tnapm1jm1 +& - Unapm1j + Unapm1jp1 + Unapm1jm1) - ! Parallel magnetic term (Landau damping and the mirror force) - Mpara = gradz_coeff(iz,eo)*(Tpar + Tmir) - !! Electrical potential term - IF ( p_int .LE. 2 ) THEN ! kronecker p0 p1 p2 - Dphi =i_ky*( xphij_ (ip,ij)*kernel_(ij ,iky,ikx,iz,eo) & - +xphijp1_(ip,ij)*kernel_(ij+1,iky,ikx,iz,eo) & - +xphijm1_(ip,ij)*kernel_(ij-1,iky,ikx,iz,eo) )*phi(iky,ikx,iz) + INTEGER :: ia, iz, iky, ikx, ip ,ij, eo ! counters + INTEGER :: izi,ipi,iji ! interior points counters + INTEGER :: p_int, j_int ! loops indices and polynom. degrees + REAL(dp) :: kx, ky, kperp2 + COMPLEX(dp) :: Tnapj, Tnapp2j, Tnapm2j, Tnapjp1, Tnapjm1 ! Terms from b x gradB and drives + COMPLEX(dp) :: Tnapp1j, Tnapm1j, Tnapp1jm1, Tnapm1jm1 ! Terms from mirror force with non adiab moments_ + COMPLEX(dp) :: Ldamp, Fmir + COMPLEX(dp) :: Mperp, Mpara, Dphi, Dpsi + COMPLEX(dp) :: Unapm1j, Unapm1jp1, Unapm1jm1 ! Terms from mirror force with adiab moments_ + COMPLEX(dp) :: i_kx,i_ky + COMPLEX(dp) :: Napj, RHS + ! Measuring execution time + CALL cpu_time(t0_rhs) + + ! Spatial loops + z:DO iz = 1,local_nz + izi = iz + ngz/2 + x:DO ikx = 1,local_nkx + kx = kxarray(ikx) ! radial wavevector + i_kx = imagu * kx ! radial derivative + y:DO iky = 1,local_nky + ky = kyarray(iky) ! binormal wavevector + i_ky = imagu * ky ! binormal derivative + ! Kinetic loops + j:DO ij = 1, local_nj ! This loop is from 1 to jmaxi+1 + iji = ij+ngj/2 + j_int = jarray(iji) + p:DO ip = 1, local_np ! Hermite loop + ipi = ip+ngp/2 + p_int = parray(ipi) ! Hermite degree + eo = min(nzgrid,MODULO(p_int,2)+1) ! Indicates if we are on odd or even z grid + kperp2= kparray(iky,ikx,izi,eo)**2 + Napj = moments(ia,ipi,iji,iky,ikx,izi,updatetlevel) + RHS = 0._dp + ! Species loop + a:DO ia = 1,local_na + IF((CLOS .NE. 1) .OR. (p_int+2*j_int .LE. dmax)) THEN ! for the closure scheme + !! Compute moments_ mixing terms + ! Perpendicular dynamic + ! term propto n^{p,j} + Tnapj = xnapj(ia,ip,ij)* nadiab_moments(ia,ipi, iji, iky,ikx,izi) + ! term propto n^{p+2,j} + Tnapp2j = xnapp2j(ia,ip) * nadiab_moments(ia,ipi+pp2,iji, iky,ikx,izi) + ! term propto n^{p-2,j} + Tnapm2j = xnapm2j(ia,ip) * nadiab_moments(ia,ipi-pp2,iji, iky,ikx,izi) + ! term propto n^{p,j+1} + Tnapjp1 = xnapjp1(ia,ij) * nadiab_moments(ia,ipi, iji+1,iky,ikx,izi) + ! term propto n^{p,j-1} + Tnapjm1 = xnapjm1(ia,ij) * nadiab_moments(ia,ipi, iji-1,iky,ikx,izi) + ! Perpendicular magnetic term (curvature and gradient drifts) + Mperp = imagu*Ckxky(iky,ikx,iz,eo)*(Tnapj + Tnapp2j + Tnapm2j + Tnapjp1 + Tnapjm1) + ! Parallel dynamic + ! ddz derivative for Landau damping term + Ldamp = xnapp1j(ia,ip) * ddz_napj(ia,ipi+1,ij,iky,ikx,izi) & + + xnapm1j(ia,ip) * ddz_napj(ia,ipi-1,ij,iky,ikx,izi) + ! Mirror terms + Tnapp1j = ynapp1j (ia,ip,ij) * interp_napj(ia,ipi+1,ij ,iky,ikx,izi) + Tnapp1jm1 = ynapp1jm1(ia,ip,ij) * interp_napj(ia,ipi+1,ij-1,iky,ikx,izi) + Tnapm1j = ynapm1j (ia,ip,ij) * interp_napj(ia,ipi-1,ij ,iky,ikx,izi) + Tnapm1jm1 = ynapm1jm1(ia,ip,ij) * interp_napj(ia,ipi-1,ij-1,iky,ikx,izi) + ! Trapping terms + Unapm1j = znapm1j (ia,ip,ij) * interp_napj(ia,ipi-1,ij ,iky,ikx,izi) + Unapm1jp1 = znapm1jp1(ia,ip,ij) * interp_napj(ia,ipi-1,ij+1,iky,ikx,izi) + Unapm1jm1 = znapm1jm1(ia,ip,ij) * interp_napj(ia,ipi-1,ij-1,iky,ikx,izi) + ! sum the parallel forces + Fmir = dlnBdz(iz,eo)*(Tnapp1j + Tnapp1jm1 + Tnapm1j + Tnapm1jm1 +& + Unapm1j + Unapm1jp1 + Unapm1jm1) + ! Parallel magnetic term (Landau damping and the mirror force) + Mpara = gradz_coeff(iz,eo)*(Ldamp + Fmir) + !! Electrical potential term + IF ( p_int .LE. 2 ) THEN ! kronecker p0 p1 p2 + Dphi =i_ky*( xphij (ia,ip,ij)*kernel(ia,iji ,iky,ikx,izi,eo) & + +xphijp1(ia,ip,ij)*kernel(ia,iji+1,iky,ikx,izi,eo) & + +xphijm1(ia,ip,ij)*kernel(ia,iji-1,iky,ikx,izi,eo) )*phi(iky,ikx,izi) + ELSE + Dphi = 0._dp + ENDIF + !! Vector potential term + IF ( (p_int .LE. 3) .AND. (p_int .GE. 1) ) THEN ! Kronecker p1 or p3 + Dpsi =-i_ky*( xpsij (ia,ip,ij)*kernel(ia,iji ,iky,ikx,izi,eo) & + +xpsijp1(ia,ip,ij)*kernel(ia,iji+1,iky,ikx,izi,eo) & + +xpsijm1(ia,ip,ij)*kernel(ia,iji-1,iky,ikx,izi,eo))*psi(iky,ikx,izi) + ELSE + Dpsi = 0._dp + ENDIF + !! Sum of all RHS terms + RHS = & + ! Nonlinear term Sapj_ = {phi,f} + - Sapj(ia,ip,ij,iky,ikx,iz) & + ! Perpendicular magnetic term + - Mperp & + ! Parallel magnetic term + - Mpara & + ! Drives (density + temperature gradients) + - (Dphi + Dpsi) & + ! Collision term + + Capj(ia,ip,ij,iky,ikx,iz) & + ! Perpendicular pressure effects (electromagnetic term) (TO CHECK) + - i_ky*beta*dpdx(ia) * (Tnapj + Tnapp2j + Tnapm2j + Tnapjp1 + Tnapjm1)& + ! Parallel drive term (should be negligible, to test) + ! -Gamma_phipar(iz,eo)*Tphi*ddz_phi(iky,ikx,iz) & + ! Numerical perpendicular hyperdiffusion + -mu_x*diff_kx_coeff*kx**N_HD*Napj & + -mu_y*diff_ky_coeff*ky**N_HD*Napj & + ! Numerical parallel hyperdiffusion "mu_z*ddz**4" see Pueschel 2010 (eq 25) + -mu_z*diff_dz_coeff*ddzND_napj(ia,ipi,iji,iky,ikx,izi) + !! Velocity space dissipation (should be implemented somewhere else) + SELECT CASE(HYP_V) + CASE('hypcoll') ! GX like Hermite hypercollisions see Mandell et al. 2023 (eq 3.23), unadvised to use it + IF (p_int .GT. 2) & + RHS = RHS - mu_p*diff_p_coeff*p_int**6*Napj + IF (j_int .GT. 1) & + RHS = RHS - mu_j*diff_j_coeff*j_int**6*Napj + CASE('dvpar4') + ! fourth order numerical diffusion in vpar + IF(ip-4 .GT. 0) & + ! Numerical parallel velocity hyperdiffusion "+ dvpar4 g_a" see Pueschel 2010 (eq 33) + ! (not used often so not parallelized) + RHS = RHS + mu_p*dv4_Hp_coeff(p_int)*moments(ia,ipi-4,iji,iky,ikx,izi,updatetlevel) + ! + dummy Laguerre diff + IF (j_int .GT. 1) & + RHS = RHS - mu_j*diff_j_coeff*j_int**6*Napj + CASE DEFAULT + END SELECT ELSE - Tphi = 0._dp + RHS = 0._dp ENDIF - - !! Vector potential term - IF ( (p_int .LE. 3) .AND. (p_int .GE. 1) ) THEN ! Kronecker p1 or p3 - Dpsi =-i_ky*( xpsij_ (ip,ij)*kernel_(ij ,iky,ikx,iz,eo) & - +xpsijp1_(ip,ij)*kernel_(ij+1,iky,ikx,iz,eo) & - +xpsijm1_(ip,ij)*kernel_(ij-1,iky,ikx,iz,eo))*psi(iky,ikx,iz) - ELSE - Dpsi = 0._dp - ENDIF - - !! Sum of all RHS terms - moments_rhs_(ip,ij,iky,ikx,iz) = & - ! Nonlinear term Sapj_ = {phi,f} - - Sapj_(ip,ij,iky,ikx,iz) & - ! Perpendicular magnetic term - - Mperp & - ! Parallel magnetic term - - Mpara & - ! Drives (density + temperature gradients) - - (Dphi + Dpsi) & - ! Collision term - + TColl_(ip,ij,iky,ikx,iz) & - ! Perpendicular pressure effects (electromagnetic term) (TO CHECK) - - i_ky*beta*dpdx * (Tnapj + Tnapp2j + Tnapm2j + Tnapjp1 + Tnapjm1)& - ! Parallel drive term (should be negligible, to test) - ! -Gamma_phipar(iz,eo)*Tphi*ddz_phi(iky,ikx,iz) & - ! Numerical perpendicular hyperdiffusion - -mu_x*diff_kx_coeff*kx**N_HD*moments_(ip,ij,iky,ikx,iz) & - -mu_y*diff_ky_coeff*ky**N_HD*moments_(ip,ij,iky,ikx,iz) & - ! Numerical parallel hyperdiffusion "mu_z*ddz**4" see Pueschel 2010 (eq 25) - -mu_z*diff_dz_coeff*ddzND_napj_(ip,ij,iky,ikx,iz) - - !! Velocity space dissipation (should be implemented somewhere else) - SELECT CASE(HYP_V) - CASE('hypcoll') ! GX like Hermite hypercollisions see Mandell et al. 2023 (eq 3.23), unadvised to use it - IF (p_int .GT. 2) & - moments_rhs_(ip,ij,iky,ikx,iz) = & - moments_rhs_(ip,ij,iky,ikx,iz) - mu_p*diff_pe_coeff*p_int**6*moments_(ip,ij,iky,ikx,iz) - IF (j_int .GT. 1) & - moments_rhs_(ip,ij,iky,ikx,iz) = & - moments_rhs_(ip,ij,iky,ikx,iz) - mu_j*diff_je_coeff*j_int**6*moments_(ip,ij,iky,ikx,iz) - CASE('dvpar4') - ! fourth order numerical diffusion in vpar - IF(ip-4 .GT. 0) & - ! Numerical parallel velocity hyperdiffusion "+ dvpar4 g_a" see Pueschel 2010 (eq 33) - ! (not used often so not parallelized) - moments_rhs_(ip,ij,iky,ikx,iz) = & - moments_rhs_(ip,ij,iky,ikx,iz) & - + mu_p*dv4_Hp_coeff(p_int)*moments_(ip-4,ij,iky,ikx,iz) - ! + dummy Laguerre diff - IF (j_int .GT. 1) & - moments_rhs_(ip,ij,iky,ikx,iz) = & - moments_rhs_(ip,ij,iky,ikx,iz) - mu_j*diff_je_coeff*j_int**6*moments_(ip,ij,iky,ikx,iz) - CASE DEFAULT - END SELECT - ELSE - moments_rhs_(ip,ij,iky,ikx,iz) = 0._dp - ENDIF - END DO ploop - END DO jloop - END DO kyloop - END DO kxloop - END DO zloop - ! Execution time end - CALL cpu_time(t1_rhs) - tc_rhs = tc_rhs + (t1_rhs-t0_rhs) - - END SUBROUTINE moments_eq_rhs - !_____________________________________________________________________________! - !_____________________________________________________________________________! - + !! Put RHS in the array + moments_rhs(ia,ip,ij,iky,ikx,iz,updatetlevel) = RHS + END DO a + END DO p + END DO j + END DO y + END DO x + END DO z + ! Execution time end + CALL cpu_time(t1_rhs) + tc_rhs = tc_rhs + (t1_rhs-t0_rhs) END SUBROUTINE compute_moments_eq_rhs -SUBROUTINE add_Maxwellian_background_terms - ! This routine is meant to add the terms rising from the magnetic operator, - ! i.e. (B x k_gB) Grad, applied on the background Maxwellian distribution - ! (x_a + spar^2)(b x k_gB) GradFaM - ! It gives birth to kx=ky=0 sources terms (averages) that hit moments_ 00, 20, - ! 40, 01,02, 21 with background gradient dependences. - USE prec_const - USE time_integration, ONLY : updatetlevel - USE model, ONLY: taue_qe, taui_qi, k_Ni, k_Ne, k_Ti, k_Te, KIN_E - USE array, ONLY: moments_rhs_e, moments_rhs_i - USE grid, ONLY: contains_kx0, contains_ky0, ikx_0, iky_0,& - ips_e,ipe_e,ijs_e,ije_e,ips_i,ipe_i,ijs_i,ije_i,& - zarray, izs,ize,& - ip,ij - IMPLICIT NONE - real(dp), DIMENSION(izs:ize) :: sinz - - sinz(izs:ize) = SIN(zarray(izs:ize,0)) - - IF(contains_kx0 .AND. contains_ky0) THEN - IF(KIN_E) THEN - DO ip = ips_e,ipe_e - DO ij = ijs_e,ije_e - SELECT CASE(ij-1) - CASE(0) ! j = 0 - SELECT CASE (ip-1) - CASE(0) ! Na00 term - moments_rhs_e(ip,ij,iky_0,ikx_0,izs:ize,updatetlevel) = moments_rhs_e(ip,ij,iky_0,ikx_0,izs:ize,updatetlevel)& - +taue_qe * sinz(izs:ize) * (1.5_dp*k_Ne - 1.125_dp*k_Te) - CASE(2) ! Na20 term - moments_rhs_e(ip,ij,iky_0,ikx_0,izs:ize,updatetlevel) = moments_rhs_e(ip,ij,iky_0,ikx_0,izs:ize,updatetlevel)& - +taue_qe * sinz(izs:ize) * (SQRT2*0.5_dp*k_Ne - 2.75_dp*k_Te) - CASE(4) ! Na40 term - moments_rhs_e(ip,ij,iky_0,ikx_0,izs:ize,updatetlevel) = moments_rhs_e(ip,ij,iky_0,ikx_0,izs:ize,updatetlevel)& - +taue_qe * sinz(izs:ize) * SQRT6*0.75_dp*k_Te - END SELECT - CASE(1) ! j = 1 - SELECT CASE (ip-1) - CASE(0) ! Na01 term - moments_rhs_e(ip,ij,iky_0,ikx_0,izs:ize,updatetlevel) = moments_rhs_e(ip,ij,iky_0,ikx_0,izs:ize,updatetlevel)& - -taue_qe * sinz(izs:ize) * (k_Ne + 3.5_dp*k_Te) - CASE(2) ! Na21 term - moments_rhs_e(ip,ij,iky_0,ikx_0,izs:ize,updatetlevel) = moments_rhs_e(ip,ij,iky_0,ikx_0,izs:ize,updatetlevel)& - -taue_qe * sinz(izs:ize) * SQRT2*k_Te - END SELECT - CASE(2) ! j = 2 - SELECT CASE (ip-1) - CASE(0) ! Na02 term - moments_rhs_e(ip,ij,iky_0,ikx_0,izs:ize,updatetlevel) = moments_rhs_e(ip,ij,iky_0,ikx_0,izs:ize,updatetlevel)& - +taue_qe * sinz(izs:ize) * 2._dp*k_Te - END SELECT - END SELECT - ENDDO - ENDDO - ENDIF - - DO ip = ips_i,ipe_i - DO ij = ijs_i,ije_i - SELECT CASE(ij-1) - CASE(0) ! j = 0 - SELECT CASE (ip-1) - CASE(0) ! Na00 term - moments_rhs_i(ip,ij,iky_0,ikx_0,izs:ize,updatetlevel) = moments_rhs_i(ip,ij,iky_0,ikx_0,izs:ize,updatetlevel)& - +taui_qi * sinz(izs:ize) * (1.5_dp*k_Ni - 1.125_dp*k_Ti) - CASE(2) ! Na20 term - moments_rhs_i(ip,ij,iky_0,ikx_0,izs:ize,updatetlevel) = moments_rhs_i(ip,ij,iky_0,ikx_0,izs:ize,updatetlevel)& - +taui_qi * sinz(izs:ize) * (SQRT2*0.5_dp*k_Ni - 2.75_dp*k_Ti) - CASE(4) ! Na40 term - moments_rhs_i(ip,ij,iky_0,ikx_0,izs:ize,updatetlevel) = moments_rhs_i(ip,ij,iky_0,ikx_0,izs:ize,updatetlevel)& - +taui_qi * sinz(izs:ize) * SQRT6*0.75_dp*k_Ti - END SELECT - CASE(1) ! j = 1 - SELECT CASE (ip-1) - CASE(0) ! Na01 term - moments_rhs_i(ip,ij,iky_0,ikx_0,izs:ize,updatetlevel) = moments_rhs_i(ip,ij,iky_0,ikx_0,izs:ize,updatetlevel)& - -taui_qi * sinz(izs:ize) * (k_Ni + 3.5_dp*k_Ti) - CASE(2) ! Na21 term - moments_rhs_i(ip,ij,iky_0,ikx_0,izs:ize,updatetlevel) = moments_rhs_i(ip,ij,iky_0,ikx_0,izs:ize,updatetlevel)& - -taui_qi * sinz(izs:ize) * SQRT2*k_Ti - END SELECT - CASE(2) ! j = 2 - SELECT CASE (ip-1) - CASE(0) ! Na02 term - moments_rhs_i(ip,ij,iky_0,ikx_0,izs:ize,updatetlevel) = moments_rhs_i(ip,ij,iky_0,ikx_0,izs:ize,updatetlevel)& - +taui_qi * sinz(izs:ize) * 2._dp*k_Ti - END SELECT - END SELECT - ENDDO - ENDDO - - ENDIF - -END SUBROUTINE +! SUBROUTINE add_Maxwellian_background_terms +! ! This routine is meant to add the terms rising from the magnetic operator, +! ! i.e. (B x k_gB) Grad, applied on the background Maxwellian distribution +! ! (x_a + spar^2)(b x k_gB) GradFaM +! ! It gives birth to kx=ky=0 sources terms (averages) that hit moments_ 00, 20, +! ! 40, 01,02, 21 with background gradient dependences. +! USE prec_const +! USE time_integration, ONLY : updatetlevel +! USE species, ONLY: tau_q, k_N, k_T +! USE array, ONLY: moments_rhs +! USE grid, ONLY: contains_kx0, contains_ky0, ikx0, iky0,& +! ia,ias,iae,ip,ips,ipe, ij,ijs,ije, zarray,izs,ize +! IMPLICIT NONE +! real(dp), DIMENSION(izs:ize) :: sinz +! +! sinz(izs:ize) = SIN(zarray(izs:ize,0)) +! +! IF(contains_kx0 .AND. contains_ky0) THEN +! DO ia = ias, iae +! DO ip = ips,ipe +! DO ij = ijs,ije +! SELECT CASE(ij-1) +! CASE(0) ! j = 0 +! SELECT CASE (ip-1) +! CASE(0) ! Na00 term +! moments_rhs(ia,ip,ij,iky0,ikx0,izs:ize,updatetlevel) = moments_rhs(ia,ip,ij,iky0,ikx0,izs:ize,updatetlevel)& +! +tau_q(ia) * sinz(izs:ize) * (1.5_dp*k_N(ia) - 1.125_dp*k_T(ia)) +! CASE(2) ! Na20 term +! moments_rhs(ia,ip,ij,iky0,ikx0,izs:ize,updatetlevel) = moments_rhs(ia,ip,ij,iky0,ikx0,izs:ize,updatetlevel)& +! +tau_q(ia) * sinz(izs:ize) * (SQRT2*0.5_dp*k_N(ia) - 2.75_dp*k_T(ia)) +! CASE(4) ! Na40 term +! moments_rhs(ia,ip,ij,iky0,ikx0,izs:ize,updatetlevel) = moments_rhs(ia,ip,ij,iky0,ikx0,izs:ize,updatetlevel)& +! +tau_q(ia) * sinz(izs:ize) * SQRT6*0.75_dp*k_T(ia) +! END SELECT +! CASE(1) ! j = 1 +! SELECT CASE (ip-1) +! CASE(0) ! Na01 term +! moments_rhs(ia,ip,ij,iky0,ikx0,izs:ize,updatetlevel) = moments_rhs(ia,ip,ij,iky0,ikx0,izs:ize,updatetlevel)& +! -tau_q(ia) * sinz(izs:ize) * (k_N(ia) + 3.5_dp*k_T(ia)) +! CASE(2) ! Na21 term +! moments_rhs(ia,ip,ij,iky0,ikx0,izs:ize,updatetlevel) = moments_rhs(ia,ip,ij,iky0,ikx0,izs:ize,updatetlevel)& +! -tau_q(ia) * sinz(izs:ize) * SQRT2*k_T(ia) +! END SELECT +! CASE(2) ! j = 2 +! SELECT CASE (ip-1) +! CASE(0) ! Na02 term +! moments_rhs(ia,ip,ij,iky0,ikx0,izs:ize,updatetlevel) = moments_rhs(ia,ip,ij,iky0,ikx0,izs:ize,updatetlevel)& +! +tau_q(ia) * sinz(izs:ize) * 2._dp*k_T(ia) +! END SELECT +! END SELECT +! ENDDO +! ENDDO +! ENDDO +! ENDIF +! +! END SUBROUTINE END MODULE moments_eq_rhs diff --git a/src/nonlinear_mod.F90 b/src/nonlinear_mod.F90 index 1e0a8bca..7155fc24 100644 --- a/src/nonlinear_mod.F90 +++ b/src/nonlinear_mod.F90 @@ -1,41 +1,50 @@ MODULE nonlinear - USE array, ONLY : dnjs, Sepj, Sipj, kernel_i, kernel_e,& - moments_e_ZF, moments_i_ZF, phi_ZF + USE array, ONLY : dnjs, Sapj, kernel USE initial_par, ONLY : ACT_ON_MODES - USE basic - USE fourier - USE fields, ONLY : phi, psi, moments_e, moments_i - USE grid - USE model - USE prec_const + USE basic, ONLY : t0_Sapj, t1_Sapj, tc_Sapj + USE fourier, ONLY : bracket_sum_r, bracket_sum_c, planf, planb, poisson_bracket_and_sum + USE fields, ONLY : phi, psi, moments + USE grid, ONLY: local_na, & + local_np,ngp,parray,pmax,& + local_nj,ngj,jarray,jmax, local_nj_offset, dmax,& + kyarray, AA_y, local_nky_ptr, local_nky_ptr_offset,inv_Ny,& + local_nkx_ptr,kxarray, AA_x, inv_Nx,& + local_nz,ngz,zarray,nzgrid + USE model, ONLY : LINEARITY, CLOS, NL_CLOS, EM + USE prec_const, ONLY : dp + USE species, ONLY : sqrt_tau_o_sigma USE time_integration, ONLY : updatetlevel + use, intrinsic :: iso_c_binding + IMPLICIT NONE + INCLUDE 'fftw3-mpi.f03' COMPLEX(dp), DIMENSION(:,:), ALLOCATABLE :: F_cmpx, G_cmpx COMPLEX(dp), DIMENSION(:,:), ALLOCATABLE :: Fx_cmpx, Gy_cmpx COMPLEX(dp), DIMENSION(:,:), ALLOCATABLE :: Fy_cmpx, Gx_cmpx, F_conv_G - - INTEGER :: in, is, p_int, j_int - INTEGER :: nmax, smax ! Upper bound of the sums - REAL(dp):: kx, ky, kerneln, sqrt_p, sqrt_pp1 - PUBLIC :: compute_Sapj, nonlinear_init + INTEGER :: in, is, p_int, j_int, n_int + INTEGER :: nmax, smax + REAL(dp):: sqrt_p, sqrt_pp1 + PUBLIC :: compute_Sapj, nonlinear_init CONTAINS SUBROUTINE nonlinear_init - ALLOCATE( F_cmpx(ikys:ikye,ikxs:ikxe)) - ALLOCATE( G_cmpx(ikys:ikye,ikxs:ikxe)) + IMPLICIT NONE + ALLOCATE( F_cmpx(local_nky_ptr,local_nkx_ptr)) + ALLOCATE( G_cmpx(local_nky_ptr,local_nkx_ptr)) - ALLOCATE(Fx_cmpx(ikys:ikye,ikxs:ikxe)) - ALLOCATE(Gy_cmpx(ikys:ikye,ikxs:ikxe)) - ALLOCATE(Fy_cmpx(ikys:ikye,ikxs:ikxe)) - ALLOCATE(Gx_cmpx(ikys:ikye,ikxs:ikxe)) + ALLOCATE(Fx_cmpx(local_nky_ptr,local_nkx_ptr)) + ALLOCATE(Gy_cmpx(local_nky_ptr,local_nkx_ptr)) + ALLOCATE(Fy_cmpx(local_nky_ptr,local_nkx_ptr)) + ALLOCATE(Gx_cmpx(local_nky_ptr,local_nkx_ptr)) - ALLOCATE(F_conv_G(ikys:ikye,ikxs:ikxe)) + ALLOCATE(F_conv_G(local_nky_ptr,local_nkx_ptr)) END SUBROUTINE nonlinear_init SUBROUTINE compute_Sapj + IMPLICIT NONE ! This routine is meant to compute the non linear term for each specie and degree !! In real space Sapj ~ b*(grad(phi) x grad(g)) which in moments in fourier becomes !! Sapj = Sum_n (ikx Kn phi)#(iky Sum_s d_njs Naps) - (iky Kn phi)#(ikx Sum_s d_njs Naps) @@ -47,12 +56,8 @@ SUBROUTINE compute_Sapj SELECT CASE(LINEARITY) CASE ('nonlinear') CALL compute_nonlinear - CASE ('ZF_semilin') - CALL compute_semi_linear_ZF - CASE ('NZ_semilin') - CALL compute_semi_linear_NZ CASE ('linear') - Sepj = 0._dp; Sipj = 0._dp + Sapj = 0._dp CASE DEFAULT ERROR STOP '>> ERROR << Linearity not recognized ' END SELECT @@ -65,398 +70,74 @@ END SUBROUTINE compute_Sapj SUBROUTINE compute_nonlinear IMPLICIT NONE - !!!!!!!!!!!!!!!!!!!! ELECTRON non linear term computation (Sepj)!!!!!!!!!! - IF(KIN_E) THEN - zloope: DO iz = izs,ize - - ploope: DO ip = ips_e,ipe_e ! Loop over Hermite moments - eo = MODULO(parray_e(ip),2) - p_int = parray_e(ip) - sqrt_p = SQRT(REAL(p_int,dp)); sqrt_pp1 = SQRT(REAL(p_int,dp)+1._dp); - - jloope: DO ij = ijs_e, ije_e ! Loop over Laguerre moments - j_int=jarray_e(ij) - IF((CLOS .NE. 1) .OR. (p_int+2*j_int .LE. dmaxe)) THEN !compute - ! Set non linear sum truncation - IF (NL_CLOS .EQ. -2) THEN - nmax = Jmaxe - ELSEIF (NL_CLOS .EQ. -1) THEN - nmax = Jmaxe-j_int - ELSE - nmax = min(NL_CLOS,Jmaxe-j_int) - ENDIF - bracket_sum_r = 0._dp ! initialize sum over real nonlinear term - - nloope: DO in = 1,nmax+1 ! Loop over laguerre for the sum -!-----------!! ELECTROSTATIC CONTRIBUTION {Kernel phi, Sum_s dnjs Neps} - ! First convolution terms - F_cmpx(ikys:ikye,ikxs:ikxe) = phi(ikys:ikye,ikxs:ikxe,iz) * kernel_e(in, ikys:ikye,ikxs:ikxe, iz, eo) - ! Second convolution terms - G_cmpx(ikys:ikye,ikxs:ikxe) = 0._dp ! initialization of the sum - smax = MIN( (in-1)+(ij-1), Jmaxe ); - DO is = 1, smax+1 ! sum truncation on number of moments - G_cmpx(ikys:ikye,ikxs:ikxe) = G_cmpx(ikys:ikye,ikxs:ikxe) + & - dnjs(in,ij,is) * moments_e(ip,is,ikys:ikye,ikxs:ikxe,iz,updatetlevel) - ENDDO - ! this function add its result to bracket_sum_r - CALL poisson_bracket_and_sum(F_cmpx,G_cmpx,bracket_sum_r) - -!-----------!! ELECTROMAGNETIC CONTRIBUTION -sqrt(tau)/sigma*{Sum_s dnjs [sqrt(p+1)Nap+1s + sqrt(p)Nap-1s], Kernel psi} - IF(EM) THEN - ! First convolution terms - F_cmpx(ikys:ikye,ikxs:ikxe) = -sqrt_tau_o_sigma_e * psi(ikys:ikye,ikxs:ikxe,iz) * kernel_e(in, ikys:ikye,ikxs:ikxe, iz, eo) - ! Second convolution terms - G_cmpx(ikys:ikye,ikxs:ikxe) = 0._dp ! initialization of the sum - smax = MIN( (in-1)+(ij-1), Jmaxe ); - DO is = 1, smax+1 ! sum truncation on number of moments - G_cmpx(ikys:ikye,ikxs:ikxe) = G_cmpx(ikys:ikye,ikxs:ikxe) + & - dnjs(in,ij,is) * (sqrt_pp1*moments_e(ip+1,is,ikys:ikye,ikxs:ikxe,iz,updatetlevel)& - +sqrt_p *moments_e(ip-1,is,ikys:ikye,ikxs:ikxe,iz,updatetlevel)) - ENDDO - ! this function add its result to bracket_sum_r - CALL poisson_bracket_and_sum(F_cmpx,G_cmpx,bracket_sum_r) - ENDIF - ENDDO nloope - -!---------! Put back the real nonlinear product into k-space - call fftw_mpi_execute_dft_r2c(planf, bracket_sum_r, bracket_sum_c) - ! Retrieve convolution in input format - DO iky = ikys, ikye - Sepj(ip,ij,iky,ikxs:ikxe,iz) = bracket_sum_c(ikxs:ikxe,iky-local_nky_offset)*AA_x(ikxs:ikxe)*AA_y(iky) !Anti aliasing filter - ENDDO - ELSE - Sepj(ip,ij,:,:,iz) = 0._dp - ENDIF - ENDDO jloope - ENDDO ploope - ENDDO zloope -ENDIF - !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - !!!!!!!!!!!!!!!!!!!! ION non linear term computation (Sipj)!!!!!!!!!! - zloopi: DO iz = izs,ize - ploopi: DO ip = ips_i,ipe_i ! Loop over Hermite moments - p_int = parray_i(ip) - eo = MODULO(parray_i(ip),2) - jloopi: DO ij = ijs_i, ije_i ! Loop over Laguerre moments - j_int=jarray_i(ij) - IF((CLOS .NE. 1) .OR. (p_int+2*j_int .LE. dmaxi)) THEN !compute for every moments except for closure 1 - ! Set non linear sum truncation - IF (NL_CLOS .EQ. -2) THEN - nmax = Jmaxi - ELSEIF (NL_CLOS .EQ. -1) THEN - nmax = Jmaxi-j_int - ELSE - nmax = min(NL_CLOS,Jmaxi-j_int) - ENDIF - bracket_sum_r = 0._dp ! initialize sum over real nonlinear term - nloopi: DO in = 1,nmax+1 ! Loop over laguerre for the sum -!-----------!! ELECTROSTATIC CONTRIBUTION - ! First convolution terms - F_cmpx(ikys:ikye,ikxs:ikxe) = phi(ikys:ikye,ikxs:ikxe,iz) * kernel_i(in, ikys:ikye,ikxs:ikxe, iz, eo) - ! Second convolution terms - G_cmpx(ikys:ikye,ikxs:ikxe) = 0._dp ! initialization of the sum - smax = MIN( (in-1)+(ij-1), jmaxi ); - DO is = 1, smax+1 ! sum truncation on number of moments - G_cmpx(ikys:ikye,ikxs:ikxe) = G_cmpx(ikys:ikye,ikxs:ikxe) + & - dnjs(in,ij,is) * moments_i(ip,is,ikys:ikye,ikxs:ikxe,iz,updatetlevel) - ENDDO - ! this function add its result to bracket_sum_r - CALL poisson_bracket_and_sum(F_cmpx,G_cmpx,bracket_sum_r) -!-----------!! ELECTROMAGNETIC CONTRIBUTION -sqrt(tau)/sigma*{Sum_s dnjs [sqrt(p+1)Nap+1s + sqrt(p)Nap-1s], Kernel psi} - IF(EM) THEN - ! First convolution terms - F_cmpx(ikys:ikye,ikxs:ikxe) = -sqrt_tau_o_sigma_i * psi(ikys:ikye,ikxs:ikxe,iz) * kernel_i(in, ikys:ikye,ikxs:ikxe, iz, eo) - ! Second convolution terms - G_cmpx(ikys:ikye,ikxs:ikxe) = 0._dp ! initialization of the sum - smax = MIN( (in-1)+(ij-1), Jmaxi ); - DO is = 1, smax+1 ! sum truncation on number of moments - G_cmpx(ikys:ikye,ikxs:ikxe) = G_cmpx(ikys:ikye,ikxs:ikxe) + & - dnjs(in,ij,is) * (sqrt_pp1*moments_i(ip+1,is,ikys:ikye,ikxs:ikxe,iz,updatetlevel)& - +sqrt_p *moments_i(ip-1,is,ikys:ikye,ikxs:ikxe,iz,updatetlevel)) - ENDDO - ! this function add its result to bracket_sum_r - CALL poisson_bracket_and_sum(F_cmpx,G_cmpx,bracket_sum_r) + INTEGER :: iz,ij,ip,eo,ia,ikx,iky + DO iz = 1,local_nz + DO ij = 1,local_nj ! Loop over Laguerre moments + j_int=jarray(ij+ngj/2) + DO ip = 1,local_np ! Loop over Hermite moments + p_int = parray(ip+ngp/2) + sqrt_p = SQRT(REAL(p_int,dp)) + sqrt_pp1 = SQRT(REAL(p_int,dp) + 1._dp) + eo = min(nzgrid,MODULO(parray(ip),2)+1) + DO ia = 1,local_na + IF((CLOS .NE. 1) .OR. (p_int+2*j_int .LE. dmax)) THEN !compute for every moments except for closure 1 + ! Set non linear sum truncation + IF (NL_CLOS .EQ. -2) THEN + nmax = local_nj + ELSEIF (NL_CLOS .EQ. -1) THEN + nmax = (Jmax-j_int)+1+ngj/2-local_nj_offset + ELSE + nmax = NL_CLOS+1+ngj/2-local_nj_offset ENDIF - ENDDO nloopi - ! Put the real nonlinear product into k-space - call fftw_mpi_execute_dft_r2c(planf, bracket_sum_r, bracket_sum_c) - ! Retrieve convolution in input format - DO iky = ikys, ikye - Sipj(ip,ij,iky,ikxs:ikxe,iz) = bracket_sum_c(ikxs:ikxe,iky-local_nky_offset)*AA_x(ikxs:ikxe)*AA_y(iky) - ENDDO - ELSE - Sipj(ip,ij,:,:,iz) = 0._dp - ENDIF - ENDDO jloopi - ENDDO ploopi - ENDDO zloopi - !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -END SUBROUTINE compute_nonlinear -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - -! Semi linear computation : Only NZ-ZF convolutions are kept -SUBROUTINE compute_semi_linear_ZF - IMPLICIT NONE - !!!!!!!!!!!!!!!!!!!! ELECTRON semi linear term computation (Sepj)!!!!!!!!!! - IF(KIN_E) THEN - zloope: DO iz = izs,ize - ploope: DO ip = ips_e,ipe_e ! Loop over Hermite moments - eo = MODULO(parray_e(ip),2) - jloope: DO ij = ijs_e, ije_e ! Loop over Laguerre moments - j_int=jarray_e(ij) - ! Set non linear sum truncation - IF (NL_CLOS .EQ. -2) THEN - nmax = Jmaxe - ELSEIF (NL_CLOS .EQ. -1) THEN - nmax = Jmaxe-(ij-1) - ELSE - nmax = NL_CLOS - ENDIF - bracket_sum_r = 0._dp ! initialize sum over real nonlinear term - nloope: DO in = 1,nmax+1 ! Loop over laguerre for the sum - ! Build the terms to convolve - kxloope: DO ikx = ikxs,ikxe ! Loop over kx - kyloope: DO iky = ikys,ikye ! Loop over ky - kx = kxarray(ikx) - ky = kyarray(iky) - kerneln = kernel_e(in, ikx, iky, iz, eo) - ! Zonal terms (=0 for all ky not 0) - Fx_cmpx(iky,ikx) = 0._dp - Gx_cmpx(iky,ikx) = 0._dp - IF(iky .EQ. iky_0) THEN - Fx_cmpx(iky,ikx) = imagu*kx* phi(iky,ikx,iz) * kerneln - smax = MIN( (in-1)+(ij-1), jmaxe ); - DO is = 1, smax+1 ! sum truncation on number of moments - Gx_cmpx(iky,ikx) = Gx_cmpx(iky,ikx) + & - dnjs(in,ij,is) * moments_e(ip,is,iky,ikx,iz,updatetlevel) + bracket_sum_r = 0._dp ! initialize sum over real nonlinear term + DO in = 1,nmax ! Loop over laguerre for the sum + n_int = parray(in+ngp/2) + !-----------!! ELECTROSTATIC CONTRIBUTION + ! First convolution terms + F_cmpx(:,:) = phi(:,:,iz+ngz/2) * kernel(ia,in+ngj/2,:,:,iz+ngz/2,eo) + ! Second convolution terms + G_cmpx = 0._dp ! initialization of the sum + smax = (n_int+j_int)+1+ngj/2-local_nj_offset + DO is = 1, MIN(smax,local_nj) ! sum truncation on number of moments + G_cmpx(:,:) = G_cmpx(:,:) + & + dnjs(in,ij,is) * moments(ia,ip,is+ngj/2,:,:,iz+ngz/2,updatetlevel) ENDDO - Gx_cmpx(iky,ikx) = imagu*kx*Gx_cmpx(iky,ikx) - ENDIF - ! NZ terms - Fy_cmpx(iky,ikx) = imagu*ky* phi(iky,ikx,iz) * kerneln - Gy_cmpx(iky,ikx) = 0._dp ! initialization of the sum - smax = MIN( (in-1)+(ij-1), jmaxe ); - DO is = 1, smax+1 ! sum truncation on number of moments - Gy_cmpx(iky,ikx) = Gy_cmpx(iky,ikx) + & - dnjs(in,ij,is) * moments_e(ip,is,iky,ikx,iz,updatetlevel) + ! this function add its result to bracket_sum_r + CALL poisson_bracket_and_sum(kyarray,kxarray,inv_Ny,inv_Nx,AA_y,AA_x,local_nky_ptr,local_nkx_ptr,F_cmpx,G_cmpx,bracket_sum_r) + !-----------!! ELECTROMAGNETIC CONTRIBUTION -sqrt(tau)/sigma*{Sum_s dnjs [sqrt(p+1)Nap+1s + sqrt(p)Nap-1s], Kernel psi} + IF(EM) THEN + ! First convolution terms + F_cmpx(:,:) = -sqrt_tau_o_sigma(ia) * psi(:,:,iz+ngz/2) * kernel(ia,in+ngj/2,:,:,iz+ngz/2,eo) + ! Second convolution terms + G_cmpx = 0._dp ! initialization of the sum + smax = (n_int+j_int)+1+ngj/2-local_nj_offset + DO is = 1, MIN(smax,local_nj) ! sum truncation on number of moments + G_cmpx(:,:) = G_cmpx(:,:) + & + dnjs(in,ij,is) * (sqrt_pp1*moments(ia,ip+1+ngj/2,is,:,:,iz+ngz/2,updatetlevel)& + +sqrt_p *moments(ia,ip-1+ngj/2,is,:,:,iz+ngz/2,updatetlevel)) + ENDDO + ! this function add its result to bracket_sum_r + CALL poisson_bracket_and_sum(kyarray,kxarray,inv_Ny,inv_Nx,AA_y,AA_x,local_nky_ptr,local_nkx_ptr,F_cmpx,G_cmpx,bracket_sum_r) + ENDIF ENDDO - Gy_cmpx(iky,ikx) = imagu*ky*Gy_cmpx(iky,ikx) - ENDDO kyloope - ENDDO kxloope - ! First term df/dx x dg/dy - CALL convolve_and_add(Fx_cmpx,Gy_cmpx) - ! Second term -df/dy x dg/dx - CALL convolve_and_add(-Fy_cmpx,Gx_cmpx) - ENDDO nloope - ! Put the real nonlinear product into k-space - call fftw_mpi_execute_dft_r2c(planf, bracket_sum_r, bracket_sum_c) - ! Retrieve convolution in input format - DO ikx = ikxs, ikxe - DO iky = ikys, ikye - Sepj(ip,ij,iky,ikx,iz) = bracket_sum_c(ikx,iky-local_nky_offset)*AA_x(ikx)*AA_y(iky) !Anti aliasing filter - ENDDO - ENDDO - ENDDO jloope - ENDDO ploope -ENDDO zloope -ENDIF - !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - !!!!!!!!!!!!!!!!!!!! ION non linear term computation (Sipj)!!!!!!!!!! -zloopi: DO iz = izs,ize - ploopi: DO ip = ips_i,ipe_i ! Loop over Hermite moments - eo = MODULO(parray_i(ip),2) - jloopi: DO ij = ijs_i, ije_i ! Loop over Laguerre moments - j_int=jarray_i(ij) - ! Set non linear sum truncation - IF (NL_CLOS .EQ. -2) THEN - nmax = Jmaxi - ELSEIF (NL_CLOS .EQ. -1) THEN - nmax = Jmaxi-(ij-1) - ELSE - nmax = NL_CLOS - ENDIF - bracket_sum_r = 0._dp ! initialize sum over real nonlinear term - nloopi: DO in = 1,nmax+1 ! Loop over laguerre for the sum - kxloopi: DO ikx = ikxs,ikxe ! Loop over kx - kyloopi: DO iky = ikys,ikye ! Loop over ky - ! Zonal terms (=0 for all ky not 0) - Fx_cmpx(iky,ikx) = 0._dp - Gx_cmpx(iky,ikx) = 0._dp - IF(iky .EQ. iky_0) THEN - Fx_cmpx(iky,ikx) = imagu*kx* phi(iky,ikx,iz) * kerneln - smax = MIN( (in-1)+(ij-1), jmaxi ); - DO is = 1, smax+1 ! sum truncation on number of moments - Gx_cmpx(iky,ikx) = Gx_cmpx(iky,ikx) + & - dnjs(in,ij,is) * moments_i(ip,is,iky,ikx,iz,updatetlevel) + ! Put the real nonlinear product into k-space + call fftw_mpi_execute_dft_r2c(planf, bracket_sum_r, bracket_sum_c) + ! Retrieve convolution in input format and apply anti aliasing + DO ikx = 1,local_nkx_ptr + DO iky = 1,local_nky_ptr + Sapj(ia,ip,ij,iky,ikx,iz) = bracket_sum_c(ikx,iky)*AA_x(ikx)*AA_y(iky) ENDDO - Gx_cmpx(iky,ikx) = imagu*kx*Gx_cmpx(iky,ikx) - ENDIF - - ! NZ terms - Fy_cmpx(iky,ikx) = imagu*ky* phi(iky,ikx,iz) * kerneln - Gy_cmpx(iky,ikx) = 0._dp ! initialization of the sum - smax = MIN( (in-1)+(ij-1), jmaxi ); - DO is = 1, smax+1 ! sum truncation on number of moments - Gy_cmpx(iky,ikx) = Gy_cmpx(iky,ikx) + & - dnjs(in,ij,is) * moments_i(ip,is,iky,ikx,iz,updatetlevel) ENDDO - Gy_cmpx(iky,ikx) = imagu*ky*Gy_cmpx(iky,ikx) - ENDDO kyloopi - ENDDO kxloopi - ! First term drphi x dzf - CALL convolve_and_add(Fy_cmpx,Gx_cmpx) - ! Second term -dzphi x drf - CALL convolve_and_add(Fy_cmpx,Gx_cmpx) - ENDDO nloopi - ! Put the real nonlinear product into k-space - call fftw_mpi_execute_dft_r2c(planf, bracket_sum_r, bracket_sum_c) - ! Retrieve convolution in input format - DO ikx = ikxs, ikxe - DO iky = ikys, ikye - Sipj(ip,ij,iky,ikx,iz) = bracket_sum_c(ikx,iky-local_nky_offset)*AA_x(ikx)*AA_y(iky) + ELSE + Sapj(ia,ip,ij,:,:,iz) = 0._dp + ENDIF ENDDO ENDDO - ENDDO jloopi - ENDDO ploopi -ENDDO zloopi -END SUBROUTINE compute_semi_linear_ZF - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + ENDDO + ENDDO + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +END SUBROUTINE compute_nonlinear !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! Semi linear computation : Only kx=0*all convolutions are kept -SUBROUTINE compute_semi_linear_NZ - IMPLICIT NONE - !!!!!!!!!!!!!!!!!!!! ELECTRON semi linear term computation (Sepj)!!!!!!!!!! - IF(KIN_E) THEN - zloope: DO iz = izs,ize - ploope: DO ip = ips_e,ipe_e ! Loop over Hermite moments - eo = MODULO(parray_e(ip),2) - jloope: DO ij = ijs_e, ije_e ! Loop over Laguerre moments - j_int=jarray_e(ij) - ! Set non linear sum truncation - IF (NL_CLOS .EQ. -2) THEN - nmax = Jmaxe - ELSEIF (NL_CLOS .EQ. -1) THEN - nmax = Jmaxe-(ij-1) - ELSE - nmax = NL_CLOS - ENDIF - bracket_sum_r = 0._dp ! initialize sum over real nonlinear term - nloope: DO in = 1,nmax+1 ! Loop over laguerre for the sum - ! Build the terms to convolve - kxloope: DO ikx = ikxs,ikxe ! Loop over kx - kyloope: DO iky = ikys,ikye ! Loop over ky - kx = kxarray(ikx) - ky = kyarray(iky) - kerneln = kernel_e(in, ikx, iky, iz, eo) - ! All terms - Fx_cmpx(iky,ikx) = imagu*kx* phi(iky,ikx,iz) * kerneln - smax = MIN( (in-1)+(ij-1), jmaxe ); - DO is = 1, smax+1 ! sum truncation on number of moments - Gx_cmpx(iky,ikx) = Gx_cmpx(iky,ikx) + & - dnjs(in,ij,is) * moments_e(ip,is,iky,ikx,iz,updatetlevel) - ENDDO - Gx_cmpx(iky,ikx) = imagu*kx*Gx_cmpx(iky,ikx) - ! Kx = 0 terms - Fy_cmpx(iky,ikx) = 0._dp - Gy_cmpx(iky,ikx) = 0._dp - IF (ikx .EQ. ikx_0) THEN - Fy_cmpx(iky,ikx) = imagu*ky* phi(iky,ikx,iz) * kerneln - Gy_cmpx(iky,ikx) = 0._dp ! initialization of the sum - smax = MIN( (in-1)+(ij-1), jmaxe ); - DO is = 1, smax+1 ! sum truncation on number of moments - Gy_cmpx(iky,ikx) = Gy_cmpx(iky,ikx) + & - dnjs(in,ij,is) * moments_e(ip,is,iky,ikx,iz,updatetlevel) - ENDDO - Gy_cmpx(iky,ikx) = imagu*ky*Gy_cmpx(iky,ikx) - ENDIF - ENDDO kyloope - ENDDO kxloope - ! First term df/dx x dg/dy - CALL convolve_and_add(Fx_cmpx,Gy_cmpx) - ! Second term -df/dy x dg/dx - CALL convolve_and_add(-Fy_cmpx,Gx_cmpx) - ENDDO nloope - ! Put the real nonlinear product into k-space - call fftw_mpi_execute_dft_r2c(planf, bracket_sum_r, bracket_sum_c) - ! Retrieve convolution in input format - DO ikx = ikxs, ikxe - DO iky = ikys, ikye - Sepj(ip,ij,iky,ikx,iz) = bracket_sum_c(ikx,iky-local_nky_offset)*AA_x(ikx)*AA_y(iky) !Anti aliasing filter - ENDDO - ENDDO - ENDDO jloope - ENDDO ploope -ENDDO zloope -ENDIF - !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - !!!!!!!!!!!!!!!!!!!! ION non linear term computation (Sipj)!!!!!!!!!! -zloopi: DO iz = izs,ize - ploopi: DO ip = ips_i,ipe_i ! Loop over Hermite moments - eo = MODULO(parray_i(ip),2) - jloopi: DO ij = ijs_i, ije_i ! Loop over Laguerre moments - j_int=jarray_i(ij) - ! Set non linear sum truncation - IF (NL_CLOS .EQ. -2) THEN - nmax = Jmaxi - ELSEIF (NL_CLOS .EQ. -1) THEN - nmax = Jmaxi-(ij-1) - ELSE - nmax = NL_CLOS - ENDIF - bracket_sum_r = 0._dp ! initialize sum over real nonlinear term - nloopi: DO in = 1,nmax+1 ! Loop over laguerre for the sum - kxloopi: DO ikx = ikxs,ikxe ! Loop over kx - kyloopi: DO iky = ikys,ikye ! Loop over ky - ! Zonal terms (=0 for all ky not 0) - Fx_cmpx(iky,ikx) = imagu*kx* phi(iky,ikx,iz) * kerneln - smax = MIN( (in-1)+(ij-1), jmaxi ); - DO is = 1, smax+1 ! sum truncation on number of moments - Gx_cmpx(iky,ikx) = Gx_cmpx(iky,ikx) + & - dnjs(in,ij,is) * moments_i(ip,is,iky,ikx,iz,updatetlevel) - ENDDO - Gx_cmpx(iky,ikx) = imagu*kx*Gx_cmpx(iky,ikx) - - ! Kx = 0 terms - Fy_cmpx(iky,ikx) = 0._dp - Gy_cmpx(iky,ikx) = 0._dp - IF (ikx .EQ. ikx_0) THEN - Fy_cmpx(iky,ikx) = imagu*ky* phi(iky,ikx,iz) * kerneln - Gy_cmpx(iky,ikx) = 0._dp ! initialization of the sum - smax = MIN( (in-1)+(ij-1), jmaxi ); - DO is = 1, smax+1 ! sum truncation on number of moments - Gy_cmpx(iky,ikx) = Gy_cmpx(iky,ikx) + & - dnjs(in,ij,is) * moments_i(ip,is,iky,ikx,iz,updatetlevel) - ENDDO - Gy_cmpx(iky,ikx) = imagu*ky*Gy_cmpx(iky,ikx) - ENDIF - ENDDO kyloopi - ENDDO kxloopi - ! First term drphi x dzf - CALL convolve_and_add(Fy_cmpx,Gx_cmpx) - ! Second term -dzphi x drf - CALL convolve_and_add(Fy_cmpx,Gx_cmpx) - ENDDO nloopi - ! Put the real nonlinear product into k-space - call fftw_mpi_execute_dft_r2c(planf, bracket_sum_r, bracket_sum_c) - ! Retrieve convolution in input format - DO ikx = ikxs, ikxe - DO iky = ikys, ikye - Sipj(ip,ij,iky,ikx,iz) = bracket_sum_c(ikx,iky-local_nky_offset)*AA_x(ikx)*AA_y(iky) - ENDDO - ENDDO - ENDDO jloopi - ENDDO ploopi -ENDDO zloopi -END SUBROUTINE compute_semi_linear_NZ - END MODULE nonlinear diff --git a/src/numerical_experiments_mod.F90 b/src/numerical_experiments_mod.F90 deleted file mode 100644 index c316a123..00000000 --- a/src/numerical_experiments_mod.F90 +++ /dev/null @@ -1,113 +0,0 @@ -!! The numerical_experiments module contains routines to "play" with the fourier -! modes in order to understand mechanisms. These routines are not integrated in -! the main code anymore because they are not used. This file serves as an archive. -MODULE numerical_experiments -USE basic -USE prec_const -USE grid -USE utility - -implicit none - -PUBLIC :: play_with_modes, save_EM_ZF_modes - -CONTAINS -!******************************************************************************! -!!!!!!! Routine that can artificially increase or wipe modes -!******************************************************************************! -SUBROUTINE save_EM_ZF_modes - USE fields - USE array, ONLY : moments_e_ZF, moments_i_ZF, phi_ZF, moments_e_NZ,moments_i_NZ,phi_NZ - USE grid - USE time_integration, ONLY: updatetlevel - USE model, ONLY: KIN_E - IMPLICIT NONE - ! Store Zonal and entropy modes - IF(contains_ky0) THEN - IF(KIN_E) & - moments_e_ZF(ips_e:ipe_e,ijs_e:ije_e,ikxs:ikxe,izs:ize) = moments_e(ips_e:ipe_e,ijs_e:ije_e,iky_0,ikxs:ikxe,izs:ize,updatetlevel) - moments_i_ZF(ips_i:ipe_i,ijs_i:ije_i,ikxs:ikxe,izs:ize) = moments_i(ips_i:ipe_i,ijs_i:ije_i,iky_0,ikxs:ikxe,izs:ize,updatetlevel) - phi_ZF(ikxs:ikxe,izs:ize) = phi(iky_0,ikxs:ikxe,izs:ize) - ELSE - IF(KIN_E) & - moments_e_ZF(ips_e:ipe_e,ijs_e:ije_e,ikxs:ikxe,izs:ize) = 0._dp - moments_i_ZF(ips_i:ipe_i,ijs_i:ije_i,ikxs:ikxe,izs:ize) = 0._dp - phi_ZF(ikxs:ikxe,izs:ize) = 0._dp - ENDIF - IF(contains_kx0) THEN - IF(KIN_E) & - moments_e_NZ(ips_e:ipe_e,ijs_e:ije_e,ikys:ikye,izs:ize) = moments_e(ips_e:ipe_e,ijs_e:ije_e,ikys:ikye,ikx_0,izs:ize,updatetlevel) - moments_i_NZ(ips_i:ipe_i,ijs_i:ije_i,ikys:ikye,izs:ize) = moments_i(ips_i:ipe_i,ijs_i:ije_i,ikys:ikye,ikx_0,izs:ize,updatetlevel) - phi_NZ(ikys:ikye,izs:ize) = phi(ikys:ikye,ikx_0,izs:ize) - ELSE - IF(KIN_E) & - moments_e_NZ(ips_e:ipe_e,ijs_e:ije_e,ikys:ikye,izs:ize) = 0._dp - moments_i_NZ(ips_e:ipe_e,ijs_i:ije_i,ikys:ikye,izs:ize) = 0._dp - phi_NZ(ikys:ikye,izs:ize) = 0._dp - ENDIF -END SUBROUTINE - -SUBROUTINE play_with_modes - USE fields - USE array, ONLY : moments_e_ZF, moments_i_ZF, phi_ZF, moments_e_NZ,moments_i_NZ,phi_NZ - USE grid - USE time_integration, ONLY: updatetlevel - USE initial_par, ONLY: ACT_ON_MODES - USE model, ONLY: KIN_E - IMPLICIT NONE - REAL(dp) :: AMP = 1.5_dp - - SELECT CASE(ACT_ON_MODES) - CASE('wipe_zonal') ! Errase the zonal flow - IF(KIN_E) & - moments_e(ips_e:ipe_e,ijs_e:ije_e,iky_0,ikxs:ikxe,izs:ize,updatetlevel) = 0._dp - moments_i(ips_i:ipe_i,ijs_i:ije_i,iky_0,ikxs:ikxe,izs:ize,updatetlevel) = 0._dp - phi(iky_0,ikxs:ikxe,izs:ize) = 0._dp - CASE('wipe_entropymode') - IF(KIN_E) & - moments_e(ips_e:ipe_e,ijs_e:ije_e,ikys:ikye,ikx_0,izs:ize,updatetlevel) = 0._dp - moments_i(ips_i:ipe_i,ijs_i:ije_i,ikys:ikye,ikx_0,izs:ize,updatetlevel) = 0._dp - phi(ikys:ikye,ikx_0,izs:ize) = 0._dp - CASE('wipe_turbulence') - DO ikx = ikxs,ikxe - DO iky = ikys, ikye - IF ( (ikx .NE. ikx_0) .AND. (iky .NE. iky_0) ) THEN - IF(KIN_E) & - moments_e(ips_e:ipe_e,ijs_e:ije_e,iky,ikx,izs:ize,updatetlevel) = 0._dp - moments_i(ips_i:ipe_i,ijs_i:ije_i,iky,ikx,izs:ize,updatetlevel) = 0._dp - phi(iky,ikx,izs:ize) = 0._dp - ENDIF - ENDDO - ENDDO - CASE('wipe_nonzonal') - DO ikx = ikxs,ikxe - DO iky = ikys, ikye - IF ( (ikx .NE. ikx_0) ) THEN - IF(KIN_E) & - moments_e(ips_e:ipe_e,ijs_e:ije_e,iky,ikx,izs:ize,updatetlevel) = 0._dp - moments_i(ips_i:ipe_i,ijs_i:ije_i,iky,ikx,izs:ize,updatetlevel) = 0._dp - phi(iky,ikx,izs:ize) = 0._dp - ENDIF - ENDDO - ENDDO - CASE('freeze_zonal') - IF(KIN_E) & - moments_e(ips_e:ipe_e,ijs_e:ije_e,iky_0,ikxs:ikxe,izs:ize,updatetlevel) = moments_e_ZF(ips_e:ipe_e,ijs_e:ije_e,ikxs:ikxe,izs:ize) - moments_i(ips_i:ipe_i,ijs_i:ije_i,iky_0,ikxs:ikxe,izs:ize,updatetlevel) = moments_i_ZF(ips_i:ipe_i,ijs_i:ije_i,ikxs:ikxe,izs:ize) - phi(iky_0,ikxs:ikxe,izs:ize) = phi_ZF(ikxs:ikxe,izs:ize) - CASE('freeze_entropymode') - IF(contains_kx0) THEN - IF(KIN_E) & - moments_e(ips_e:ipe_e,ijs_e:ije_e,ikys:ikye,ikx_0,izs:ize,updatetlevel) = moments_e_NZ(ips_e:ipe_e,ijs_e:ije_e,ikys:ikye,izs:ize) - moments_i(ips_i:ipe_i,ijs_i:ije_i,ikys:ikye,ikx_0,izs:ize,updatetlevel) = moments_i_NZ(ips_i:ipe_i,ijs_i:ije_i,ikys:ikye,izs:ize) - phi(ikys:ikye,ikx_0,izs:ize) = phi_NZ(ikys:ikye,izs:ize) - ENDIF - CASE('amplify_zonal') - IF(KIN_E) & - moments_e(ips_e:ipe_e,ijs_e:ije_e,iky_0,ikxs:ikxe,izs:ize,updatetlevel) = AMP*moments_e_ZF(ips_e:ipe_e,ijs_e:ije_e,ikxs:ikxe,izs:ize) - moments_i(ips_i:ipe_i,ijs_i:ije_i,iky_0,ikxs:ikxe,izs:ize,updatetlevel) = AMP*moments_i_ZF(ips_i:ipe_i,ijs_i:ije_i,ikxs:ikxe,izs:ize) - phi(iky_0,ikxs:ikxe,izs:ize) = AMP*phi_ZF(ikxs:ikxe,izs:ize) - END SELECT -END SUBROUTINE - -END MODULE numerical experiments diff --git a/src/numerics_mod.F90 b/src/numerics_mod.F90 index bacd92dd..e62df880 100644 --- a/src/numerics_mod.F90 +++ b/src/numerics_mod.F90 @@ -1,12 +1,7 @@ !! MODULE NUMERICS ! The module numerics contains a set of routines that are called only once at -! the begining of a run. These routines do not need to be optimzed +! the beginng of a run. These routines do not need to be optimzed MODULE numerics - USE basic - USE prec_const - USE grid - USE utility - implicit none PUBLIC :: build_dnjs_table, evaluate_kernels, evaluate_EM_op @@ -21,14 +16,15 @@ SUBROUTINE build_dnjs_table USE array, ONLY : dnjs USE FMZM, ONLY : TO_DP USE coeff, ONLY : ALL2L + USE grid, ONLY : jmax IMPLICIT NONE INTEGER :: in, ij, is, J INTEGER :: n_, j_, s_ - J = max(jmaxe,jmaxi) + J = jmax - DO in = 1,J+1 ! Nested dependent loops to make benefit from dnjs symmetry + DO in = 1,J+1 ! Nested dependent loops to make benefit from dnjs symmetrys n_ = in - 1 DO ij = in,J+1 j_ = ij - 1 @@ -51,12 +47,12 @@ END SUBROUTINE build_dnjs_table !!!!!!! Build the fourth derivative Hermite coefficient table !******************************************************************************! SUBROUTINE build_dv4Hp_table - USE array, ONLY: dv4_Hp_coeff - USE grid, ONLY: pmaxi, pmaxe + USE array, ONLY: dv4_Hp_coeff + USE grid, ONLY: pmax + USE prec_const, ONLY: dp, PI IMPLICIT NONE - INTEGER :: p_, pmax_ - pmax_ = MAX(pmaxi,pmaxe) - DO p_ = -2,pmax_ + INTEGER :: p_ + DO p_ = -2,pmax if (p_ < 4) THEN dv4_Hp_coeff(p_) = 0._dp ELSE @@ -65,8 +61,8 @@ SUBROUTINE build_dv4Hp_table ENDDO !we scale it w.r.t. to the max degree since !D_4^{v}\sim (\Delta v/2)^4 and \Delta v \sim 2pi/kvpar = pi/\sqrt{2P} - ! dv4_Hp_coeff = dv4_Hp_coeff*(1._dp/2._dp/SQRT(REAL(pmax_,dp)))**4 - dv4_Hp_coeff = dv4_Hp_coeff*(PI/2._dp/SQRT(2._dp*REAL(pmax_,dp)))**4 + ! dv4_Hp_coeff = dv4_Hp_coeff*(1._dp/2._dp/SQRT(REAL(pmax,dp)))**4 + dv4_Hp_coeff = dv4_Hp_coeff*(PI/2._dp/SQRT(2._dp*REAL(pmax,dp)))**4 END SUBROUTINE build_dv4Hp_table !******************************************************************************! @@ -75,64 +71,50 @@ END SUBROUTINE build_dv4Hp_table !******************************************************************************! SUBROUTINE evaluate_kernels USE basic - USE array, Only : kernel_e, kernel_i, HF_phi_correction_operator - USE grid - USE model, ONLY : sigmae2_taue_o2, sigmai2_taui_o2, KIN_E + USE array, ONLY : kernel, HF_phi_correction_operator + USE grid, ONLY : local_Na, local_Nj,Ngj, local_nkx, local_nky, local_nz, Ngz, jarray, kparray + USE species, ONLY : sigma2_tau_o2 + USE prec_const, ONLY: dp IMPLICIT NONE - INTEGER :: j_int + INTEGER :: j_int, ia, eo, ikx, iky, iz, ij REAL(dp) :: j_dp, y_, factj -DO eo = 0,1 -DO ikx = ikxs,ikxe -DO iky = ikys,ikye -DO iz = izgs,izge - !!!!! Electron kernels !!!!! - IF(KIN_E) THEN - DO ij = ijgs_e, ijge_e - j_int = jarray_e(ij) - j_dp = REAL(j_int,dp) - y_ = sigmae2_taue_o2 * kparray(iky,ikx,iz,eo)**2 - IF(j_int .LT. 0) THEN - kernel_e(ij,iky,ikx,iz,eo) = 0._dp - ELSE - factj = GAMMA(j_dp+1._dp) - kernel_e(ij,iky,ikx,iz,eo) = y_**j_int*EXP(-y_)/factj - ENDIF +DO ia = 1,local_Na + DO eo = 1,2 + DO ikx = 1,local_nkx + DO iky = 1,local_nky + DO iz = 1,local_nz + Ngz + DO ij = 1, local_Nj + Ngj + j_int = jarray(ij) + j_dp = REAL(j_int,dp) + y_ = sigma2_tau_o2(ia) * kparray(iky,ikx,iz,eo)**2 + IF(j_int .LT. 0) THEN + kernel(ia,ij,iky,ikx,iz,eo) = 0._dp + ELSE + factj = GAMMA(j_dp+1._dp) + kernel(ia,ij,iky,ikx,iz,eo) = y_**j_int*EXP(-y_)/factj + ENDIF + ENDDO + ! IF (ijs .EQ. 1) & ! if ijs is 1, the ghost kernel has negative index + ! kernel(ia,ijgs,iky,ikx,iz,eo) = 0._dp ENDDO - IF (ijs_e .EQ. 1) & - kernel_e(ijgs_e,iky,ikx,iz,eo) = 0._dp - ENDIF - !!!!! Ion kernels !!!!! - DO ij = ijgs_i, ijge_i - j_int = jarray_i(ij) - j_dp = REAL(j_int,dp) - y_ = sigmai2_taui_o2 * kparray(iky,ikx,iz,eo)**2 - IF(j_int .LT. 0) THEN - kernel_i(ij,iky,ikx,iz,eo) = 0._dp - ELSE - factj = GAMMA(j_dp+1._dp) - kernel_i(ij,iky,ikx,iz,eo) = y_**j_int*EXP(-y_)/factj - ENDIF ENDDO - IF (ijs_i .EQ. 1) & - kernel_i(ijgs_i,iky,ikx,iz,eo) = 0._dp -ENDDO -ENDDO -ENDDO -ENDDO -!! Correction term for the evaluation of the heat flux -HF_phi_correction_operator(ikys:ikye,ikxs:ikxe,izs:ize) = & - 2._dp * Kernel_i(1,ikys:ikye,ikxs:ikxe,izs:ize,0) & - -1._dp * Kernel_i(2,ikys:ikye,ikxs:ikxe,izs:ize,0) + ENDDO + ENDDO + !! Correction term for the evaluation of the heat flux + HF_phi_correction_operator(:,:,:) = & + 2._dp * Kernel(ia,1,:,:,:,1) & + -1._dp * Kernel(ia,2,:,:,:,1) -DO ij = ijs_i, ije_i - j_int = jarray_i(ij) - j_dp = REAL(j_int,dp) - HF_phi_correction_operator(ikys:ikye,ikxs:ikxe,izs:ize) = HF_phi_correction_operator(ikys:ikye,ikxs:ikxe,izs:ize) & - - Kernel_i(ij,ikys:ikye,ikxs:ikxe,izs:ize,0) * (& - 2._dp*(j_dp+1.5_dp) * Kernel_i(ij ,ikys:ikye,ikxs:ikxe,izs:ize,0) & - - (j_dp+1.0_dp) * Kernel_i(ij+1,ikys:ikye,ikxs:ikxe,izs:ize,0) & - - j_dp * Kernel_i(ij-1,ikys:ikye,ikxs:ikxe,izs:ize,0)) + DO ij = 1, local_Nj + j_int = jarray(ij) + j_dp = REAL(j_int,dp) + HF_phi_correction_operator(:,:,:) = HF_phi_correction_operator(:,:,:) & + - Kernel(ia,ij,:,:,:,1) * (& + 2._dp*(j_dp+1.5_dp) * Kernel(ia,ij ,:,:,:,1) & + - (j_dp+1.0_dp) * Kernel(ia,ij+1,:,:,:,1) & + - j_dp * Kernel(ia,ij-1,:,:,:,1)) + ENDDO ENDDO END SUBROUTINE evaluate_kernels @@ -150,39 +132,40 @@ END SUBROUTINE evaluate_EM_op !******************************************************************************! SUBROUTINE evaluate_poisson_op USE basic - USE array, Only : kernel_e, kernel_i, inv_poisson_op, inv_pol_ion - USE grid - USE model, ONLY : qe2_taue, qi2_taui, KIN_E + USE array, ONLY : kernel, inv_poisson_op, inv_pol_ion + USE grid, ONLY : local_Na, local_nkx, local_nky, local_nz,& + kxarray, kyarray, jmax + USE species, ONLY : q2_tau + USE model, ONLY : ADIAB_E, tau_e + USE prec_const, ONLY: dp IMPLICIT NONE - REAL(dp) :: pol_i, pol_e ! (Z_a^2/tau_a (1-sum_n kernel_na^2)) - INTEGER :: ini,ine + REAL(dp) :: pol_ion, pol_tot, operator, operator_ion ! (Z^2/tau (1-sum_n kernel_na^2)) + INTEGER :: in,ikx,iky,iz,ia ! This term has no staggered grid dependence. It is evalued for the ! even z grid since poisson uses p=0 moments and phi only. - kxloop: DO ikx = ikxs,ikxe - kyloop: DO iky = ikys,ikye - zloop: DO iz = izs,ize + kxloop: DO ikx = 1,local_nkx + kyloop: DO iky = 1,local_nky + zloop: DO iz = 1,local_nz IF( (kxarray(ikx).EQ.0._dp) .AND. (kyarray(iky).EQ.0._dp) ) THEN inv_poisson_op(iky, ikx, iz) = 0._dp ELSE - !!!!!!!!!!!!!!!!! Ion contribution - ! loop over n only if the max polynomial degree - pol_i = 0._dp - DO ini=1,jmaxi+1 - pol_i = pol_i + qi2_taui*kernel_i(ini,iky,ikx,iz,0)**2 ! ... sum recursively ... - END DO - !!!!!!!!!!!!! Electron contribution - pol_e = 0._dp - IF (KIN_E) THEN ! Kinetic model - ! loop over n only if the max polynomial degree - DO ine=1,jmaxe+1 ! ine = n+1 - pol_e = pol_e + qe2_taue*kernel_e(ine,iky,ikx,iz,0)**2 ! ... sum recursively ... - END DO - ELSE ! Adiabatic model - pol_e = qe2_taue - 1._dp + operator = 0._dp + DO ia = 1,local_na ! sum over species + pol_tot = 0._dp ! total polarisation term + pol_ion = 0._dp ! sum of ion polarisation term + ! loop over n only up to the max polynomial degree + DO in=1,jmax+1 + pol_tot = pol_tot + q2_tau(ia)*kernel(ia,in,iky,ikx,iz,0)**2 ! ... sum recursively ... + pol_ion = pol_ion + q2_tau(ia)*kernel(ia,in,iky,ikx,iz,0)**2 ! + END DO + operator = operator + q2_tau(ia) - pol_tot + ENDDO + IF(ADIAB_E) THEN ! Adiabatic model + pol_tot = pol_tot + 1._dp/tau_e - 1._dp ENDIF - inv_poisson_op(iky, ikx, iz) = 1._dp/(qi2_taui - pol_i + qe2_taue - pol_e) - inv_pol_ion (iky, ikx, iz) = 1._dp/(qi2_taui - pol_i) + inv_poisson_op(iky, ikx, iz) = 1._dp/operator + inv_pol_ion (iky, ikx, iz) = 1._dp/operator_ion ENDIF END DO zloop END DO kyloop @@ -195,188 +178,140 @@ END SUBROUTINE evaluate_poisson_op !!!!!!! Evaluate inverse polarisation operator for Poisson equation !******************************************************************************! SUBROUTINE evaluate_ampere_op - USE basic - USE array, Only : kernel_e, kernel_i, inv_ampere_op - USE grid - USE model, ONLY : q_e, q_i, beta, sigma_e, sigma_i + USE prec_const, ONLY : dp + USE array, ONLY : kernel, inv_ampere_op + USE grid, ONLY : local_Na, local_nkx, local_nky, local_nz, & + jmax, kparray, kxarray, kyarray, SOLVE_AMPERE + USE model, ONLY : beta + USE species, ONLY : q, sigma USE geometry, ONLY : hatB + USE prec_const, ONLY: dp IMPLICIT NONE - REAL(dp) :: pol_i, pol_e, kperp2 ! (Z_a^2/tau_a (1-sum_n kernel_na^2)) - INTEGER :: ini,ine - + REAL(dp) :: pol_tot, kperp2 ! (Z^2/tau (1-sum_n kernel_na^2)) + INTEGER :: in,ikx,iky,iz,ia ! We do not solve Ampere if beta = 0 to spare waste of ressources IF(SOLVE_AMPERE) THEN - ! This term has no staggered grid dependence. It is evalued for the - ! even z grid since poisson uses p=0 moments and phi only. - kxloop: DO ikx = ikxs,ikxe - kyloop: DO iky = ikys,ikye - zloop: DO iz = izs,ize - kperp2 = kparray(iky,ikx,iz,0)**2 + DO ikx = 1,local_nkx + DO iky = 1,local_nky + DO iz = 1,local_nz + kperp2 = kparray(iky,ikx,iz,1)**2 IF( (kxarray(ikx).EQ.0._dp) .AND. (kyarray(iky).EQ.0._dp) ) THEN inv_ampere_op(iky, ikx, iz) = 0._dp ELSE !!!!!!!!!!!!!!!!! Ion contribution - pol_i = 0._dp - ! loop over n only up to the max polynomial degree - DO ini=1,jmaxi+1 - pol_i = pol_i + kernel_i(ini,iky,ikx,iz,0)**2 ! ... sum recursively ... - END DO - pol_i = q_i**2/(sigma_i**2) * pol_i - !!!!!!!!!!!!! Electron contribution - pol_e = 0._dp - ! loop over n only up to the max polynomial degree - DO ine=1,jmaxe+1 ! ine = n+1 - pol_e = pol_e + kernel_e(ine,iky,ikx,iz,0)**2 ! ... sum recursively ... + pol_tot = 0._dp + DO ia = 1,local_na + ! loop over n only up to the max polynomial degree + DO in=1,jmax+1 + pol_tot = pol_tot + q(ia)**2/(sigma(ia)**2)*kernel(ia,in,iky,ikx,iz,1)**2 ! ... sum recursively ... + END DO END DO - pol_e = q_e**2/(sigma_e**2) * pol_e - inv_ampere_op(iky, ikx, iz) = 1._dp/(2._dp*kperp2*hatB(iz,0)**2 + beta*(pol_i + pol_e)) + inv_ampere_op(iky, ikx, iz) = 1._dp/(2._dp*kperp2*hatB(iz,0)**2 + beta*pol_tot) ENDIF - END DO zloop - END DO kyloop - END DO kxloop + END DO + END DO + END DO ENDIF - END SUBROUTINE evaluate_ampere_op !******************************************************************************! SUBROUTINE compute_lin_coeff - USE array, ONLY: xnepj, & - ynepp1j, ynepm1j, ynepp1jm1, ynepm1jm1,& - zNepm1j, zNepm1jp1, zNepm1jm1,& - xnepp1j, xnepm1j, xnepp2j, xnepm2j,& - xnepjp1, xnepjm1,& - xphij_e, xphijp1_e, xphijm1_e,& - xpsij_e, xpsijp1_e, xpsijm1_e,& - xnipj, & - ynipp1j, ynipm1j, ynipp1jm1, ynipm1jm1,& - zNipm1j, zNipm1jp1, zNipm1jm1,& - xnipp1j, xnipm1j, xnipp2j, xnipm2j,& - xnipjp1, xnipjm1,& - xphij_i, xphijp1_i, xphijm1_i,& - xpsij_i, xpsijp1_i, xpsijm1_i - USE model, ONLY: k_Te, k_Ti, k_Ne, k_Ni, k_cB, k_gB, KIN_E,& - tau_e, tau_i, sigma_e, sigma_i, q_e, q_i - USE prec_const - USE grid, ONLY: parray_e, parray_i, jarray_e, jarray_i, & - ip,ij, ips_e,ipe_e, ips_i,ipe_i, ijs_e,ije_e, ijs_i,ije_i + USE array, ONLY: xnapj, & + ynapp1j, ynapm1j, ynapp1jm1, ynapm1jm1,& + zNapm1j, zNapm1jp1, zNapm1jm1,& + xnapj, xnapjp1, xnapjm1,& + xnapp1j, xnapm1j, xnapp2j, xnapm2j,& + xphij, xphijp1, xphijm1,& + xpsij, xpsijp1, xpsijm1 + USE species, ONLY: k_T, k_N, tau, q, sqrtTau_q, tau_q + USE model, ONLY: k_cB, k_gB + USE prec_const, ONLY: dp, SQRT2, SQRT3 + USE grid, ONLY: parray, jarray, local_na, local_np, local_nj + INTEGER :: ia,ip,ij,p_int, j_int ! polynom. dagrees + REAL(dp) :: p_dp, j_dp - IF(KIN_E) THEN - CALL lin_coeff(k_Te,k_Ne,k_cB,k_gB,tau_e,q_e,sigma_e,& - parray_e(ips_e:ipe_e),jarray_e(ijs_e:ije_e),ips_e,ipe_e,ijs_e,ije_e,& - xnepj,xnepp1j,xnepm1j,xnepp2j,xnepm2j,xnepjp1,xnepjm1,& - ynepp1j,ynepm1j,ynepp1jm1,ynepm1jm1,zNepm1j,zNepm1jp1,zNepm1jm1,& - xphij_e,xphijp1_e,xphijm1_e,xpsij_e,xpsijp1_e,xpsijm1_e) - ENDIF - - CALL lin_coeff(k_Ti,k_Ni,k_cB,k_gB,tau_i,q_i,sigma_i,& - parray_i(ips_i:ipe_i),jarray_i(ijs_i:ije_i),ips_i,ipe_i,ijs_i,ije_i,& - xnipj,xnipp1j,xnipm1j,xnipp2j,xnipm2j,xnipjp1,xnipjm1,& - ynipp1j,ynipm1j,ynipp1jm1,ynipm1jm1,zNipm1j,zNipm1jp1,zNipm1jm1,& - xphij_i,xphijp1_i,xphijm1_i,xpsij_i,xpsijp1_i,xpsijm1_i) - - CONTAINS - SUBROUTINE lin_coeff(k_Ta,k_Na,k_cB,k_gB,tau_a,q_a,sigma_a,& - parray_a,jarray_a,ips_a,ipe_a,ijs_a,ije_a,& - xnapj,xnapp1j,xnapm1j,xnapp2j,xnapm2j,xnapjp1,xnapjm1,& - ynapp1j,ynapm1j,ynapp1jm1,ynapm1jm1,zNapm1j,zNapm1jp1,zNapm1jm1,& - xphij_a,xphijp1_a,xphijm1_a,xpsij_a,xpsijp1_a,xpsijm1_a) - IMPLICIT NONE - ! INPUTS - REAL(dp), INTENT(IN) :: k_Ta,k_Na,k_cB,k_gB,tau_a,q_a,sigma_a - INTEGER, DIMENSION(ips_a:ipe_a), INTENT(IN) :: parray_a - INTEGER, DIMENSION(ijs_a:ije_a), INTENT(IN) :: jarray_a - INTEGER, INTENT(IN) :: ips_a,ipe_a,ijs_a,ije_a - ! OUTPUTS (linear coefficients used in moment_eq_rhs_mod.F90) - REAL(dp), DIMENSION(ips_a:ipe_a,ijs_a:ije_a), INTENT(OUT) :: xnapj - REAL(dp), DIMENSION(ips_a:ipe_a), INTENT(OUT) :: xnapp1j, xnapm1j, xnapp2j, xnapm2j - REAL(dp), DIMENSION(ijs_a:ije_a), INTENT(OUT) :: xnapjp1, xnapjm1 - REAL(dp), DIMENSION(ips_a:ipe_a,ijs_a:ije_a), INTENT(OUT) :: ynapp1j, ynapm1j, ynapp1jm1, ynapm1jm1 - REAL(dp), DIMENSION(ips_a:ipe_a,ijs_a:ije_a), INTENT(OUT) :: zNapm1j, zNapm1jp1, zNapm1jm1 - REAL(dp), DIMENSION(ips_a:ipe_a,ijs_a:ije_a), INTENT(OUT) :: xphij_a, xphijp1_a, xphijm1_a - REAL(dp), DIMENSION(ips_a:ipe_a,ijs_a:ije_a), INTENT(OUT) :: xpsij_a, xpsijp1_a, xpsijm1_a - INTEGER :: p_int, j_int ! polynom. dagrees - REAL(dp) :: p_dp, j_dp - !! linear coefficients for moment RHS !!!!!!!!!! - DO ip = ips_a, ipe_a - p_int= parray_a(ip) ! Hermite degree - p_dp = REAL(p_int,dp) ! REAL of Hermite degree - DO ij = ijs_a, ije_a - j_int= jarray_a(ij) ! Laguerre degree - j_dp = REAL(j_int,dp) ! REAL of Laguerre degree - ! All Napj terms - xnapj(ip,ij) = tau_a/q_a*(k_cB*(2._dp*p_dp + 1._dp) & - +k_gB*(2._dp*j_dp + 1._dp)) - ! Mirror force terms - ynapp1j (ip,ij) = -SQRT(tau_a)/sigma_a * (j_dp+1._dp)*SQRT(p_dp+1._dp) - ynapm1j (ip,ij) = -SQRT(tau_a)/sigma_a * (j_dp+1._dp)*SQRT(p_dp) - ynapp1jm1(ip,ij) = +SQRT(tau_a)/sigma_a * j_dp*SQRT(p_dp+1._dp) - ynapm1jm1(ip,ij) = +SQRT(tau_a)/sigma_a * j_dp*SQRT(p_dp) - ! Trapping terms - zNapm1j (ip,ij) = +SQRT(tau_a)/sigma_a *(2._dp*j_dp+1._dp)*SQRT(p_dp) - zNapm1jp1(ip,ij) = -SQRT(tau_a)/sigma_a * (j_dp+1._dp)*SQRT(p_dp) - zNapm1jm1(ip,ij) = -SQRT(tau_a)/sigma_a * j_dp*SQRT(p_dp) - ENDDO - ENDDO - DO ip = ips_a, ipe_a - p_int= parray_a(ip) ! Hermite degree - p_dp = REAL(p_int,dp) ! REAL of Hermite degree - ! Landau damping coefficients (ddz napj term) - xnapp1j(ip) = SQRT(tau_a)/sigma_a * SQRT(p_dp+1._dp) - xnapm1j(ip) = SQRT(tau_a)/sigma_a * SQRT(p_dp) - ! Magnetic curvature term - xnapp2j(ip) = tau_a/q_a * k_cB * SQRT((p_dp+1._dp)*(p_dp + 2._dp)) - xnapm2j(ip) = tau_a/q_a * k_cB * SQRT( p_dp *(p_dp - 1._dp)) - ENDDO - DO ij = ijs_a, ije_a - j_int= jarray_a(ij) ! Laguerre degree + !! linear coefficients for moment RHS !!!!!!!!!! + DO ia = 1,local_na + DO ip = 1, local_np + p_int= parray(ip) ! Hermite degree + p_dp = REAL(p_int,dp) ! REAL of Hermite degree + DO ij = 1, local_nj + j_int= jarray(ij) ! Laguerre degree j_dp = REAL(j_int,dp) ! REAL of Laguerre degree - ! Magnetic gradient term - xnapjp1(ij) = -tau_a/q_a * k_gB * (j_dp + 1._dp) - xnapjm1(ij) = -tau_a/q_a * k_gB * j_dp + ! All Napj terms + xnapj(ia,ip,ij) = tau(ia)/q(ia)*(k_cB*(2._dp*p_dp + 1._dp) & + +k_gB*(2._dp*j_dp + 1._dp)) + ! Mirror force terms + ynapp1j (ia,ip,ij) = -sqrtTau_q(ia) * (j_dp+1._dp)*SQRT(p_dp+1._dp) + ynapm1j (ia,ip,ij) = -sqrtTau_q(ia) * (j_dp+1._dp)*SQRT(p_dp) + ynapp1jm1(ia,ip,ij) = +sqrtTau_q(ia) * j_dp*SQRT(p_dp+1._dp) + ynapm1jm1(ia,ip,ij) = +sqrtTau_q(ia) * j_dp*SQRT(p_dp) + ! Trapping terms + zNapm1j (ia,ip,ij) = +sqrtTau_q(ia) *(2._dp*j_dp+1._dp)*SQRT(p_dp) + zNapm1jp1(ia,ip,ij) = -sqrtTau_q(ia) * (j_dp+1._dp)*SQRT(p_dp) + zNapm1jm1(ia,ip,ij) = -sqrtTau_q(ia) * j_dp*SQRT(p_dp) ENDDO - !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - !! ES linear coefficients for moment RHS !!!!!!!!!! - DO ip = ips_a, ipe_a - p_int= parray_a(ip) ! Hermite degree - DO ij = ijs_a, ije_a - j_int= jarray_a(ij) ! REALof Laguerre degree - j_dp = REAL(j_int,dp) ! REALof Laguerre degree - !! Electrostatic potential pj terms - IF (p_int .EQ. 0) THEN ! kronecker p0 - xphij_a(ip,ij) = +k_Na + 2._dp*j_dp*k_Ta - xphijp1_a(ip,ij) = -k_Ta*(j_dp+1._dp) - xphijm1_a(ip,ij) = -k_Ta* j_dp - ELSE IF (p_int .EQ. 2) THEN ! kronecker p2 - xphij_a(ip,ij) = +k_Ta/SQRT2 - xphijp1_a(ip,ij) = 0._dp; xphijm1_a(ip,ij) = 0._dp; - ELSE - xphij_a(ip,ij) = 0._dp; xphijp1_a(ip,ij) = 0._dp - xphijm1_a(ip,ij) = 0._dp; - ENDIF - ENDDO + ENDDO + DO ip = 1, local_np + p_int= parray(ip) ! Hermite degree + p_dp = REAL(p_int,dp) ! REAL of Hermite degree + ! Landau damping coefficients (ddz napj term) + xnapp1j(ia,ip) = sqrtTau_q(ia) * SQRT(p_dp+1._dp) + xnapm1j(ia,ip) = sqrtTau_q(ia) * SQRT(p_dp) + ! Magnetic curvature term + xnapp2j(ia,ip) = tau_q(ia) * k_cB * SQRT((p_dp+1._dp)*(p_dp + 2._dp)) + xnapm2j(ia,ip) = tau_q(ia) * k_cB * SQRT( p_dp *(p_dp - 1._dp)) + ENDDO + DO ij = 1, local_nj + j_int= jarray(ij) ! Laguerre degree + j_dp = REAL(j_int,dp) ! REAL of Laguerre degree + ! Magnetic gradient term + xnapjp1(ia,ij) = -tau_q(ia) * k_gB * (j_dp + 1._dp) + xnapjm1(ia,ij) = -tau_q(ia) * k_gB * j_dp + ENDDO + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + !! ES linear coefficients for moment RHS !!!!!!!!!! + DO ip = 1, local_np + p_int= parray(ip) ! Hermite degree + DO ij = 1, local_nj + j_int= jarray(ij) ! REALof Laguerre degree + j_dp = REAL(j_int,dp) ! REALof Laguerre degree + !! Electrostatic potential pj terms + IF (p_int .EQ. 0) THEN ! kronecker p0 + xphij (ia,ip,ij) = +k_N(ia) + 2._dp*j_dp*k_T(ia) + xphijp1(ia,ip,ij) = -k_T(ia)*(j_dp+1._dp) + xphijm1(ia,ip,ij) = -k_T(ia)* j_dp + ELSE IF (p_int .EQ. 2) THEN ! kronecker p2 + xphij(ia,ip,ij) = +k_T(ia)/SQRT2 + xphijp1(ia,ip,ij) = 0._dp; xphijm1(ia,ip,ij) = 0._dp; + ELSE + xphij (ia,ip,ij) = 0._dp; xphijp1(ia,ip,ij) = 0._dp + xphijm1(ia,ip,ij) = 0._dp; + ENDIF ENDDO - !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - !! Electromagnatic linear coefficients for moment RHS !!!!!!!!!! - DO ip = ips_a, ipe_a - p_int= parray_a(ip) ! Hermite degree - DO ij = ijs_a, ije_a - j_int= jarray_a(ij) ! REALof Laguerre degree - j_dp = REAL(j_int,dp) ! REALof Laguerre degree - IF (p_int .EQ. 1) THEN ! kronecker p1 - xpsij_a (ip,ij) = +(k_Na + (2._dp*j_dp+1._dp)*k_Ta)* SQRT(tau_a)/sigma_a - xpsijp1_a(ip,ij) = - k_Ta*(j_dp+1._dp) * SQRT(tau_a)/sigma_a - xpsijm1_a(ip,ij) = - k_Ta* j_dp * SQRT(tau_a)/sigma_a - ELSE IF (p_int .EQ. 3) THEN ! kronecker p3 - xpsij_a (ip,ij) = + k_Ta*SQRT3/SQRT2 * SQRT(tau_a)/sigma_a - xpsijp1_a(ip,ij) = 0._dp; xpsijm1_a(ip,ij) = 0._dp; - ELSE - xpsij_a (ip,ij) = 0._dp; xpsijp1_a(ip,ij) = 0._dp - xpsijm1_a(ip,ij) = 0._dp; - ENDIF - ENDDO + ENDDO + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + !! Electromagnatic linear coefficients for moment RHS !!!!!!!!!! + DO ip = 1, local_np + p_int= parray(ip) ! Hermite degree + DO ij = 1, local_nj + j_int= jarray(ij) ! REALof Laguerre degree + j_dp = REAL(j_int,dp) ! REALof Laguerre degree + IF (p_int .EQ. 1) THEN ! kronecker p1 + xpsij (ia,ip,ij) = +(k_N(ia) + (2._dp*j_dp+1._dp)*k_T(ia))* sqrtTau_q(ia) + xpsijp1(ia,ip,ij) = - k_T(ia)*(j_dp+1._dp) * sqrtTau_q(ia) + xpsijm1(ia,ip,ij) = - k_T(ia)* j_dp * sqrtTau_q(ia) + ELSE IF (p_int .EQ. 3) THEN ! kronecker p3 + xpsij (ia,ip,ij) = + k_T(ia)*SQRT3/SQRT2 * sqrtTau_q(ia) + xpsijp1(ia,ip,ij) = 0._dp; xpsijm1(ia,ip,ij) = 0._dp; + ELSE + xpsij (ia,ip,ij) = 0._dp; xpsijp1(ia,ip,ij) = 0._dp + xpsijm1(ia,ip,ij) = 0._dp; + ENDIF ENDDO - END SUBROUTINE lin_coeff + ENDDO + ENDDO END SUBROUTINE compute_lin_coeff END MODULE numerics diff --git a/src/parallel_mod.F90 b/src/parallel_mod.F90 index 3a048fed..6e4fc9c6 100644 --- a/src/parallel_mod.F90 +++ b/src/parallel_mod.F90 @@ -1,37 +1,134 @@ MODULE parallel - USE basic - USE grid - use prec_const - USE model, ONLY: KIN_E + use prec_const, ONLY : dp + USE mpi IMPLICIT NONE - ! recieve and displacement counts for gatherv - INTEGER, DIMENSION(:), ALLOCATABLE :: rcv_y, dsp_y, rcv_zy, dsp_zy - INTEGER, DIMENSION(:), ALLOCATABLE :: rcv_zp_e, dsp_zp_e - INTEGER, DIMENSION(:), ALLOCATABLE :: rcv_yp_e, dsp_yp_e - INTEGER, DIMENSION(:), ALLOCATABLE :: rcv_zyp_e, dsp_zyp_e - INTEGER, DIMENSION(:), ALLOCATABLE :: rcv_zp_i, dsp_zp_i - INTEGER, DIMENSION(:), ALLOCATABLE :: rcv_yp_i, dsp_yp_i - INTEGER, DIMENSION(:), ALLOCATABLE :: rcv_zyp_i, dsp_zyp_i + ! Auxiliary variables + INTEGER, PUBLIC, PROTECTED :: comm0 ! Default communicator with a topology + INTEGER, PUBLIC, PROTECTED :: group0 ! Default group with a topology + INTEGER, PUBLIC, PROTECTED :: rank_0 ! Ranks in comm0 + ! Communicators for 1-dim cartesian subgrids of comm0 + INTEGER, PUBLIC, PROTECTED :: comm_p, comm_ky, comm_z + INTEGER, PUBLIC, PROTECTED :: rank_p, rank_ky, rank_z! Ranks + INTEGER, PUBLIC, PROTECTED :: comm_pz, rank_pz ! 2D comm for N_a(p,j,z) ouptut (mspfile) + INTEGER, PUBLIC, PROTECTED :: comm_kyz, rank_kyz ! 2D comm for N_a(p,j,z) ouptut (mspfile) + INTEGER, PUBLIC, PROTECTED :: comm_ky0, rank_ky0 ! comm along ky with p=0 + INTEGER, PUBLIC, PROTECTED :: comm_z0, rank_z0 ! comm along z with p=0 + INTEGER, PUBLIC, PROTECTED :: group_ky0, group_z0 + INTEGER, PUBLIC, PROTECTED :: ierr ! flag for MPI error + INTEGER, PUBLIC, PROTECTED :: my_id ! Rank in COMM_WORLD + INTEGER, PUBLIC, PROTECTED :: num_procs ! number of MPI processes + INTEGER, PUBLIC, PROTECTED :: num_procs_p ! Number of processes in p + INTEGER, PUBLIC, PROTECTED :: num_procs_ky ! Number of processes in r + INTEGER, PUBLIC, PROTECTED :: num_procs_z ! Number of processes in z + INTEGER, PUBLIC, PROTECTED :: num_procs_pz ! Number of processes in pz comm + INTEGER, PUBLIC, PROTECTED :: num_procs_kyz ! Number of processes in kyz comm + INTEGER, PUBLIC, PROTECTED :: nbr_L, nbr_R ! Left and right neighbours (along p) + INTEGER, PUBLIC, PROTECTED :: nbr_T, nbr_B ! Top and bottom neighbours (along kx) + INTEGER, PUBLIC, PROTECTED :: nbr_U, nbr_D ! Upstream and downstream neighbours (along z) + - ! Various buffers used - COMPLEX(dp), ALLOCATABLE, DIMENSION(:,:,:) :: buff_xy_zBC - COMPLEX(dp), ALLOCATABLE, DIMENSION(:,:,:,:,:) :: buff_pjxy_zBC_e - COMPLEX(dp), ALLOCATABLE, DIMENSION(:,:,:,:,:) :: buff_pjxy_zBC_i + ! recieve and displacement counts for gatherv + INTEGER, DIMENSION(:), ALLOCATABLE :: rcv_p, dsp_p, rcv_y, dsp_y, rcv_zy, dsp_zy + INTEGER, DIMENSION(:), ALLOCATABLE :: rcv_zp, dsp_zp + INTEGER, DIMENSION(:), ALLOCATABLE :: rcv_yp, dsp_yp + INTEGER, DIMENSION(:), ALLOCATABLE :: rcv_zyp, dsp_zyp - PUBLIC :: manual_0D_bcast, manual_3D_bcast, init_parallel_var, & - gather_xyz, gather_pjz_i, gather_pjxyz_e, gather_pjxyz_i + PUBLIC :: ppinit, manual_0D_bcast, manual_3D_bcast, init_parallel_var, & + gather_xyz, gather_pjz, gather_pjxyz CONTAINS - SUBROUTINE init_parallel_var + SUBROUTINE ppinit + ! Init the parallel environment + IMPLICIT NONE + ! Variables for cartesian domain decomposition + INTEGER, PARAMETER :: ndims=3 ! p, kx and z + INTEGER, DIMENSION(ndims) :: dims=0, coords=0 + LOGICAL :: periods(ndims) = .FALSE., reorder=.FALSE. + CHARACTER(len=32) :: str + INTEGER :: nargs, i, l + + CALL MPI_INIT(ierr) + + CALL MPI_COMM_RANK (MPI_COMM_WORLD, my_id, ierr) + CALL MPI_COMM_SIZE (MPI_COMM_WORLD, num_procs, ierr) + + nargs = COMMAND_ARGUMENT_COUNT() + ! + IF( nargs .GT. 1 ) THEN + DO i=1,ndims + CALL GET_COMMAND_ARGUMENT(i, str, l, ierr) + READ(str(1:l),'(i3)') dims(i) + END DO + IF( PRODUCT(dims) .NE. num_procs ) THEN + IF(my_id .EQ. 0) WRITE(*, '(a,i4,a,i4)') 'Product of dims: ', PRODUCT(dims), " is not consistent WITH NPROCS=",num_procs + CALL MPI_ABORT(MPI_COMM_WORLD, -2, ierr) + END IF + ELSE + ! CALL MPI_DIMS_CREATE(num_procs, ndims, dims, ierr) + dims(1) = 1 + dims(2) = num_procs + dims(3) = 1 + END IF + num_procs_p = dims(1) ! Number of processes along p + num_procs_ky = dims(2) ! Number of processes along kx + num_procs_z = dims(3) ! Number of processes along z + ! + !periodiciyt in p + periods(1)=.FALSE. + !periodiciyt in ky + periods(2)=.FALSE. + !periodiciyt in z + periods(3)=.TRUE. + CALL MPI_CART_CREATE(MPI_COMM_WORLD, ndims, dims, periods, reorder, comm0, ierr) + CALL MPI_COMM_GROUP(comm0,group0, ierr) + CALL MPI_COMM_RANK(comm0, rank_0, ierr) + CALL MPI_CART_COORDS(comm0,rank_0,ndims,coords,ierr) + ! + ! Partitions 3-dim cartesian topology of comm0 into 1-dim cartesian subgrids + ! + CALL MPI_CART_SUB (comm0, (/.TRUE.,.FALSE.,.FALSE./), comm_p, ierr) + CALL MPI_CART_SUB (comm0, (/.FALSE.,.TRUE.,.FALSE./), comm_ky, ierr) + CALL MPI_CART_SUB (comm0, (/.FALSE.,.FALSE.,.TRUE./), comm_z, ierr) + ! Find id inside the 1d-sub communicators + CALL MPI_COMM_RANK(comm_p, rank_p, ierr) + CALL MPI_COMM_RANK(comm_ky, rank_ky, ierr) + CALL MPI_COMM_RANK(comm_z, rank_z, ierr) + ! 2D communicator + CALL MPI_CART_SUB (comm0, (/.TRUE.,.FALSE.,.TRUE./), comm_pz, ierr) + CALL MPI_CART_SUB (comm0, (/.FALSE.,.TRUE.,.TRUE./), comm_kyz, ierr) + ! Count the number of processes in 2D comms + CALL MPI_COMM_SIZE(comm_pz, num_procs_pz, ierr) + CALL MPI_COMM_SIZE(comm_kyz,num_procs_kyz,ierr) + ! Find id inside the 1d-sub communicators + CALL MPI_COMM_RANK(comm_pz, rank_pz, ierr) + CALL MPI_COMM_RANK(comm_kyz, rank_kyz, ierr) + ! Find neighbours + CALL MPI_CART_SHIFT(comm0, 0, 1, nbr_L, nbr_R, ierr) !left right neighbours + CALL MPI_CART_SHIFT(comm0, 1, 1, nbr_B, nbr_T, ierr) !bottom top neighbours + CALL MPI_CART_SHIFT(comm0, 2, 1, nbr_D, nbr_U, ierr) !down up neighbours + END SUBROUTINE ppinit + + SUBROUTINE init_parallel_var(np_loc,np_tot,nky_loc,nky_tot,nz_loc) + IMPLICIT NONE + INTEGER, INTENT(IN) :: np_loc,np_tot,nky_loc,nky_tot,nz_loc INTEGER :: i_ + !! P reduction at constant x,y,z,j + ALLOCATE(rcv_p(0:num_procs_p-1),dsp_p(0:num_procs_p-1)) !Displacement sizes for balance diagnostic + ! all processes share their local number of points + CALL MPI_ALLGATHER(np_loc,1,MPI_INTEGER,rcv_p,1,MPI_INTEGER,comm_p,ierr) + ! the displacement array can be build from each np_loc as + dsp_p(0)=0 + DO i_=1,num_procs_p-1 + dsp_p(i_) =dsp_p(i_-1) + rcv_p(i_-1) + END DO !!!!!! XYZ gather variables !! Y reduction at constant x and z ! number of points recieved and displacement for the y reduction ALLOCATE(rcv_y(0:num_procs_ky-1),dsp_y(0:num_procs_ky-1)) !Displacement sizes for balance diagnostic ! all processes share their local number of points - CALL MPI_ALLGATHER(local_nky,1,MPI_INTEGER,rcv_y,1,MPI_INTEGER,comm_ky,ierr) - ! the displacement array can be build from each local_np as + CALL MPI_ALLGATHER(nky_loc,1,MPI_INTEGER,rcv_y,1,MPI_INTEGER,comm_ky,ierr) + ! the displacement array can be build from each np_loc as dsp_y(0)=0 DO i_=1,num_procs_ky-1 dsp_y(i_) =dsp_y(i_-1) + rcv_y(i_-1) @@ -40,314 +137,202 @@ CONTAINS ! number of points recieved and displacement for the z reduction ALLOCATE(rcv_zy(0:num_procs_z-1),dsp_zy(0:num_procs_z-1)) !Displacement sizes for balance diagnostic ! all processes share their local number of points - CALL MPI_ALLGATHER(local_nz*Nky,1,MPI_INTEGER,rcv_zy,1,MPI_INTEGER,comm_z,ierr) - ! the displacement array can be build from each local_np as + CALL MPI_ALLGATHER(nz_loc*nky_tot,1,MPI_INTEGER,rcv_zy,1,MPI_INTEGER,comm_z,ierr) + ! the displacement array can be build from each np_loc as dsp_zy(0)=0 DO i_=1,num_procs_z-1 dsp_zy(i_) =dsp_zy(i_-1) + rcv_zy(i_-1) END DO - !!!!! PJZ gather variables - ! IONS !! P reduction at constant j and z is already done in module GRID !! Z reduction for full slices of p data but constant j ! number of points recieved and displacement for the z reduction - ALLOCATE(rcv_zp_i(0:num_procs_z-1),dsp_zp_i(0:num_procs_z-1)) !Displacement sizes for balance diagnostic + ALLOCATE(rcv_zp(0:num_procs_z-1),dsp_zp(0:num_procs_z-1)) !Displacement sizes for balance diagnostic ! all processes share their local number of points - CALL MPI_ALLGATHER(local_nz*Np_i,1,MPI_INTEGER,rcv_zp_i,1,MPI_INTEGER,comm_z,ierr) - ! the displacement array can be build from each local_np as - dsp_zp_i(0)=0 + CALL MPI_ALLGATHER(nz_loc*np_tot,1,MPI_INTEGER,rcv_zp,1,MPI_INTEGER,comm_z,ierr) + ! the displacement array can be build from each np_loc as + dsp_zp(0)=0 DO i_=1,num_procs_z-1 - dsp_zp_i(i_) =dsp_zp_i(i_-1) + rcv_zp_i(i_-1) + dsp_zp(i_) =dsp_zp(i_-1) + rcv_zp(i_-1) END DO - !!!!! PJXYZ gather variables !! Y reduction for full slices of p data but constant j ! number of points recieved and displacement for the y reduction - ALLOCATE(rcv_yp_i(0:num_procs_ky-1),dsp_yp_i(0:num_procs_ky-1)) !Displacement sizes for balance diagnostic + ALLOCATE(rcv_yp(0:num_procs_ky-1),dsp_yp(0:num_procs_ky-1)) !Displacement sizes for balance diagnostic ! all processes share their local number of points - CALL MPI_ALLGATHER(local_nky*Np_i,1,MPI_INTEGER,rcv_yp_i,1,MPI_INTEGER,comm_ky,ierr) - ! the displacement array can be build from each local_np as - dsp_yp_i(0)=0 + CALL MPI_ALLGATHER(nky_loc*np_tot,1,MPI_INTEGER,rcv_yp,1,MPI_INTEGER,comm_ky,ierr) + ! the displacement array can be build from each np_loc as + dsp_yp(0)=0 DO i_=1,num_procs_ky-1 - dsp_yp_i(i_) =dsp_yp_i(i_-1) + rcv_yp_i(i_-1) + dsp_yp(i_) =dsp_yp(i_-1) + rcv_yp(i_-1) END DO !! Z reduction for full slices of py data but constant j ! number of points recieved and displacement for the z reduction - ALLOCATE(rcv_zyp_i(0:num_procs_z-1),dsp_zyp_i(0:num_procs_z-1)) !Displacement sizes for balance diagnostic - ! all processes share their local number of points - CALL MPI_ALLGATHER(local_nz*Np_i*Nky,1,MPI_INTEGER,rcv_zyp_i,1,MPI_INTEGER,comm_z,ierr) - ! the displacement array can be build from each local_np as - dsp_zyp_i(0)=0 - DO i_=1,num_procs_z-1 - dsp_zyp_i(i_) =dsp_zyp_i(i_-1) + rcv_zyp_i(i_-1) - END DO - - ! ELECTONS - !! Z reduction for full slices of p data but constant j - ! number of points recieved and displacement for the z reduction - ALLOCATE(rcv_zp_e(0:num_procs_z-1),dsp_zp_e(0:num_procs_z-1)) !Displacement sizes for balance diagnostic + ALLOCATE(rcv_zyp(0:num_procs_z-1),dsp_zyp(0:num_procs_z-1)) !Displacement sizes for balance diagnostic ! all processes share their local number of points - CALL MPI_ALLGATHER(local_nz*Np_e,1,MPI_INTEGER,rcv_zp_e,1,MPI_INTEGER,comm_z,ierr) - ! the displacement array can be build from each local_np as - dsp_zp_e(0)=0 + CALL MPI_ALLGATHER(nz_loc*np_tot*nky_tot,1,MPI_INTEGER,rcv_zyp,1,MPI_INTEGER,comm_z,ierr) + ! the displacement array can be build from each np_loc as + dsp_zyp(0)=0 DO i_=1,num_procs_z-1 - dsp_zp_e(i_) =dsp_zp_e(i_-1) + rcv_zp_e(i_-1) + dsp_zyp(i_) =dsp_zyp(i_-1) + rcv_zyp(i_-1) END DO - - !!!!! PJXYZ gather variables - !! Y reduction for full slices of p data but constant j - ! number of points recieved and displacement for the y reduction - ALLOCATE(rcv_yp_e(0:num_procs_ky-1),dsp_yp_e(0:num_procs_ky-1)) !Displacement sizes for balance diagnostic - ! all processes share their local number of points - CALL MPI_ALLGATHER(local_nky*Np_e,1,MPI_INTEGER,rcv_yp_e,1,MPI_INTEGER,comm_ky,ierr) - ! the displacement array can be build from each local_np as - dsp_yp_e(0)=0 - DO i_=1,num_procs_ky-1 - dsp_yp_e(i_) =dsp_yp_e(i_-1) + rcv_yp_e(i_-1) - END DO - !! Z reduction for full slices of py data but constant j - ! number of points recieved and displacement for the z reduction - ALLOCATE(rcv_zyp_e(0:num_procs_z-1),dsp_zyp_e(0:num_procs_z-1)) !Displacement sizes for balance diagnostic - ! all processes share their local number of points - CALL MPI_ALLGATHER(local_nz*Np_e*Nky,1,MPI_INTEGER,rcv_zyp_e,1,MPI_INTEGER,comm_z,ierr) - ! the displacement array can be build from each local_np as - dsp_zyp_e(0)=0 - DO i_=1,num_procs_z-1 - dsp_zyp_e(i_) =dsp_zyp_e(i_-1) + rcv_zyp_e(i_-1) - END DO - - !! Allocate some buffers - ALLOCATE(buff_xy_zBC(ikys:ikye,ikxs:ikxe,-2:2)) - IF(KIN_E) & - ALLOCATE(buff_pjxy_zBC_e(ipgs_e:ipge_e,ijgs_e:ijge_e,ikys:ikye,ikxs:ikxe,-2:2)) - ALLOCATE(buff_pjxy_zBC_i(ipgs_i:ipge_i,ijgs_i:ijge_i,ikys:ikye,ikxs:ikxe,-2:2)) END SUBROUTINE init_parallel_var + SUBROUTINE parallel_ouptutinputs(fid) + ! + ! Write the input parameters to the results_xx.h5 file + ! + USE futils, ONLY: attach, creatd + IMPLICIT NONE + INTEGER, INTENT(in) :: fid + CHARACTER(len=256) :: str + WRITE(str,'(a)') '/data/input/parallel' + CALL creatd(fid, 0,(/0/),TRIM(str),'Parallel Input') + CALL attach(fid, TRIM(str), "Nproc", num_procs) + CALL attach(fid, TRIM(str), "Np_p" , num_procs_p) + CALL attach(fid, TRIM(str), "Np_kx",num_procs_ky) + CALL attach(fid, TRIM(str), "Np_z", num_procs_z) + END SUBROUTINE parallel_ouptutinputs + !!!!! Gather a field in spatial coordinates on rank 0 !!!!! - SUBROUTINE gather_xyz(field_sub,field_full) - COMPLEX(dp), DIMENSION(ikys:ikye, 1:Nkx, izs:ize), INTENT(IN) :: field_sub - COMPLEX(dp), DIMENSION( 1:Nky, 1:Nkx, 1:Nz), INTENT(OUT) :: field_full - COMPLEX(dp), DIMENSION(ikys:ikye) :: buffer_ly_cz !local y, constant z - COMPLEX(dp), DIMENSION( 1:Nky ) :: buffer_fy_cz !full y, constant z - COMPLEX(dp), DIMENSION( 1:Nky, izs:ize ) :: buffer_fy_lz !full y, local z - COMPLEX(dp), DIMENSION( 1:Nky, 1:Nz ) :: buffer_fy_fz !full y, full z + SUBROUTINE gather_xyz(field_loc,field_tot,nky_loc,nky_tot,nkx_tot,nz_loc,nz_tot) + IMPLICIT NONE + INTEGER, INTENT(IN) :: nky_loc,nky_tot,nkx_tot,nz_loc,nz_tot + COMPLEX(dp), DIMENSION(nky_loc,nkx_tot,nz_loc), INTENT(IN) :: field_loc + COMPLEX(dp), DIMENSION(nky_tot,nkx_tot,nz_tot), INTENT(OUT) :: field_tot + COMPLEX(dp), DIMENSION(nky_tot,nz_loc) :: buffer_yt_zl !full y, local z + COMPLEX(dp), DIMENSION(nky_tot,nz_tot) :: buffer_yt_zt !full y, full z + COMPLEX(dp), DIMENSION(nz_loc) :: buffer_yl_zc !local y, constant z + COMPLEX(dp), DIMENSION(nky_tot):: buffer_yt_zc !full y, constant z INTEGER :: snd_y, snd_z, root_p, root_z, root_ky, ix, iz - snd_y = local_nky ! Number of points to send along y (per z) - snd_z = Nky*local_nz ! Number of points to send along z (full y) + snd_y = nky_loc ! Number of points to send along y (per z) + snd_z = nky_tot*nz_loc ! Number of points to send along z (full y) root_p = 0; root_z = 0; root_ky = 0 IF(rank_p .EQ. root_p) THEN - DO ix = 1,Nkx - DO iz = izs,ize + DO ix = 1,nkx_tot + DO iz = 1,nz_loc ! fill a buffer to contain a slice of data at constant kx and z - buffer_ly_cz(ikys:ikye) = field_sub(ikys:ikye,ix,iz) - CALL MPI_GATHERV(buffer_ly_cz, snd_y, MPI_DOUBLE_COMPLEX, & - buffer_fy_cz, rcv_y, dsp_y, MPI_DOUBLE_COMPLEX, & + buffer_yl_zc(1:nky_loc) = field_loc(1:nky_loc,ix,iz) + CALL MPI_GATHERV(buffer_yl_zc, snd_y, MPI_DOUBLE_COMPLEX, & + buffer_yt_zc, rcv_y, dsp_y, MPI_DOUBLE_COMPLEX, & root_ky, comm_ky, ierr) - buffer_fy_lz(1:Nky,iz) = buffer_fy_cz(1:Nky) + buffer_yt_zl(1:nky_tot,iz) = buffer_yt_zc(1:nky_tot) ENDDO ! send the full line on y contained by root_kyas IF(rank_ky .EQ. 0) THEN - CALL MPI_GATHERV(buffer_fy_lz, snd_z, MPI_DOUBLE_COMPLEX, & - buffer_fy_fz, rcv_zy, dsp_zy, MPI_DOUBLE_COMPLEX, & + CALL MPI_GATHERV(buffer_yt_zl, snd_z, MPI_DOUBLE_COMPLEX, & + buffer_yt_zt, rcv_zy, dsp_zy, MPI_DOUBLE_COMPLEX, & root_z, comm_z, ierr) ENDIF - ! ID 0 (the one who output) rebuild the whole array + ! ID 0 (the one who ouptut) rebuild the whole array IF(my_id .EQ. 0) & - field_full(1:Nky,ix,1:Nz) = buffer_fy_fz(1:Nky,1:Nz) + field_tot(1:nky_tot,ix,1:nz_tot) = buffer_yt_zt(1:nky_tot,1:nz_tot) ENDDO ENDIF END SUBROUTINE gather_xyz !!!!! Gather a field in kinetic + z coordinates on rank 0 !!!!! - SUBROUTINE gather_pjz_i(field_sub,field_full) - REAL(dp), DIMENSION(ips_i:ipe_i, 1:Nj_i, izs:ize), INTENT(IN) :: field_sub - REAL(dp), DIMENSION( 1:Np_i, 1:Nj_i, 1:Nz), INTENT(OUT) :: field_full - REAL(dp), DIMENSION(ips_i:ipe_i) :: buffer_lp_cz !local p, constant z - REAL(dp), DIMENSION( 1:Np_i ) :: buffer_fp_cz !full p, constant z - REAL(dp), DIMENSION( 1:Np_i, izs:ize ) :: buffer_fp_lz !full p, local z - REAL(dp), DIMENSION( 1:Np_i, 1:Nz ) :: buffer_fp_fz !full p, full z - INTEGER :: snd_p, snd_z, root_p, root_z, root_ky, ij, iz - - snd_p = local_np_i ! Number of points to send along p (per z) - snd_z = Np_i*local_nz ! Number of points to send along z (full p) - - root_p = 0; root_z = 0; root_ky = 0 - IF(rank_ky .EQ. root_ky) THEN - DO ij = 1,Nj_i - DO iz = izs,ize - ! fill a buffer to contain a slice of data at constant j and z - buffer_lp_cz(ips_i:ipe_i) = field_sub(ips_i:ipe_i,ij,iz) - CALL MPI_GATHERV(buffer_lp_cz, snd_p, MPI_DOUBLE_PRECISION, & - buffer_fp_cz, rcv_p_i, dsp_p_i, MPI_DOUBLE_PRECISION, & - root_p, comm_p, ierr) - buffer_fp_lz(1:Np_i,iz) = buffer_fp_cz(1:Np_i) - ENDDO - - ! send the full line on y contained by root_kyas - IF(rank_p .EQ. 0) THEN - CALL MPI_GATHERV(buffer_fp_lz, snd_z, MPI_DOUBLE_PRECISION, & - buffer_fp_fz, rcv_zp_i, dsp_zp_i, MPI_DOUBLE_PRECISION, & - root_z, comm_z, ierr) - ENDIF - ! ID 0 (the one who output) rebuild the whole array - IF(my_id .EQ. 0) & - field_full(1:Np_i,ij,1:Nz) = buffer_fp_fz(1:Np_i,1:Nz) - ENDDO - ENDIF - END SUBROUTINE gather_pjz_i - - SUBROUTINE gather_pjz_e(field_sub,field_full) - REAL(dp), DIMENSION(ips_e:ipe_e, 1:jmaxe+1, izs:ize), INTENT(IN) :: field_sub - REAL(dp), DIMENSION( 1:pmaxe+1, 1:jmaxe+1, 1:Nz), INTENT(OUT) :: field_full - REAL(dp), DIMENSION(ips_e:ipe_e) :: buffer_lp_cz !local p, constant z - REAL(dp), DIMENSION( 1:pmaxe+1 ) :: buffer_fp_cz !full p, constant z - REAL(dp), DIMENSION( 1:pmaxe+1, izs:ize ) :: buffer_fp_lz !full p, local z - REAL(dp), DIMENSION( 1:pmaxe+1, 1:Nz ) :: buffer_fp_fz !full p, full z + SUBROUTINE gather_pjz(field_loc,field_tot,np_loc,np_tot,nj_tot,nz_loc,nz_tot) + IMPLICIT NONE + INTEGER, INTENT(IN) :: np_loc,np_tot, nj_tot, nz_loc,nz_tot + REAL(dp), DIMENSION(np_loc,nj_tot,nz_loc), INTENT(IN) :: field_loc + REAL(dp), DIMENSION(np_tot,nj_tot,nz_tot), INTENT(OUT) :: field_tot + REAL(dp), DIMENSION(np_tot,nz_loc) :: buffer_pt_zl !full p, local z + REAL(dp), DIMENSION(np_tot,nz_tot) :: buffer_pt_zt !full p, full z + REAL(dp), DIMENSION(np_loc) :: buffer_pl_zc !local p, constant z + REAL(dp), DIMENSION(np_tot) :: buffer_pt_zc !full p, constant z INTEGER :: snd_p, snd_z, root_p, root_z, root_ky, ij, iz - snd_p = local_np_e ! Number of points to send along p (per z) - snd_z = Np_e*local_nz ! Number of points to send along z (full p) + snd_p = np_loc ! Number of points to send along p (per z) + snd_z = np_tot*nz_loc ! Number of points to send along z (full p) root_p = 0; root_z = 0; root_ky = 0 IF(rank_ky .EQ. root_ky) THEN - DO ij = 1,Nj_i - DO iz = izs,ize + DO ij = 1,nj_tot + DO iz = 1,nz_loc ! fill a buffer to contain a slice of data at constant j and z - buffer_lp_cz(ips_e:ipe_e) = field_sub(ips_e:ipe_e,ij,iz) - CALL MPI_GATHERV(buffer_lp_cz, snd_p, MPI_DOUBLE_PRECISION, & - buffer_fp_cz, rcv_p_e, dsp_p_e, MPI_DOUBLE_PRECISION, & + buffer_pl_zc(1:np_loc) = field_loc(1:np_loc,ij,iz) + CALL MPI_GATHERV(buffer_pl_zc, snd_p, MPI_DOUBLE_PRECISION, & + buffer_pt_zc, rcv_p, dsp_p, MPI_DOUBLE_PRECISION, & root_p, comm_p, ierr) - buffer_fp_lz(1:Np_e,iz) = buffer_fp_cz(1:Np_e) + buffer_pt_zl(1:np_tot,iz) = buffer_pt_zc(1:np_tot) ENDDO ! send the full line on y contained by root_kyas IF(rank_p .EQ. 0) THEN - CALL MPI_GATHERV(buffer_fp_lz, snd_z, MPI_DOUBLE_PRECISION, & - buffer_fp_fz, rcv_zp_e, dsp_zp_e, MPI_DOUBLE_PRECISION, & + CALL MPI_GATHERV(buffer_pt_zl, snd_z, MPI_DOUBLE_PRECISION, & + buffer_pt_zt, rcv_zp, dsp_zp, MPI_DOUBLE_PRECISION, & root_z, comm_z, ierr) ENDIF - ! ID 0 (the one who output) rebuild the whole array + ! ID 0 (the one who ouptut) rebuild the whole array IF(my_id .EQ. 0) & - field_full(1:Np_e,ij,1:Nz) = buffer_fp_fz(1:Np_e,1:Nz) + field_tot(1:np_tot,ij,1:nz_tot) = buffer_pt_zt(1:np_tot,1:nz_tot) ENDDO ENDIF - END SUBROUTINE gather_pjz_e + END SUBROUTINE gather_pjz !!!!! Gather a field in kinetic + spatial coordinates on rank 0 !!!!! !!!!! Gather a field in spatial coordinates on rank 0 !!!!! - SUBROUTINE gather_pjxyz_i(field_sub,field_full) - COMPLEX(dp), DIMENSION( ips_i:ipe_i, 1:Nj_i, ikys:ikye, 1:Nkx, izs:ize), INTENT(IN) :: field_sub - COMPLEX(dp), DIMENSION( 1:Np_i, 1:Nj_i, 1:Nky, 1:Nkx, 1:Nz), INTENT(OUT) :: field_full - COMPLEX(dp), DIMENSION(ips_i:ipe_i) :: buffer_lp_cy_cz !local p, constant y, constant z - COMPLEX(dp), DIMENSION(1:Np_i) :: buffer_fp_cy_cz ! full p, constant y, constant z - COMPLEX(dp), DIMENSION(1:Np_i, ikys:ikye) :: buffer_fp_ly_cz ! full p, local y, constant z - COMPLEX(dp), DIMENSION(1:Np_i, 1:Nky ) :: buffer_fp_fy_cz ! full p, full y, constant z - COMPLEX(dp), DIMENSION(1:Np_i, 1:Nky, izs:ize ) :: buffer_fp_fy_lz ! full p, full y, local z - COMPLEX(dp), DIMENSION(1:Np_i, 1:Nky, 1:Nz ) :: buffer_fp_fy_fz ! full p, full y, full z - INTEGER :: snd_p, snd_y, snd_z, root_p, root_z, root_ky, iy, ix, iz, ij - - snd_p = local_np_i ! Number of points to send along p (per z,y) - snd_y = Np_i*local_nky ! Number of points to send along y (per z, full p) - snd_z = Np_i*Nky*local_nz ! Number of points to send along z (full y, full p) - - root_p = 0; root_z = 0; root_ky = 0 - - j: DO ij = 1,Nj_i - x: DO ix = 1,Nkx - z: DO iz = izs,ize - y: DO iy = ikys,ikye - ! fill a buffer to contain a slice of p data at constant j, ky, kx and z - buffer_lp_cy_cz(ips_i:ipe_i) = field_sub(ips_i:ipe_i,ij,iy,ix,iz) - CALL MPI_GATHERV(buffer_lp_cy_cz, snd_p, MPI_DOUBLE_COMPLEX, & - buffer_fp_cy_cz, rcv_p_i, dsp_p_i, MPI_DOUBLE_COMPLEX, & - root_p, comm_p, ierr) - buffer_fp_ly_cz(1:Np_i,iy) = buffer_fp_cy_cz(1:Np_i) - ENDDO y - ! send the full line on p contained by root_p - IF(rank_p .EQ. 0) THEN - CALL MPI_GATHERV(buffer_fp_ly_cz, snd_y, MPI_DOUBLE_COMPLEX, & - buffer_fp_fy_cz, rcv_yp_i, dsp_yp_i, MPI_DOUBLE_COMPLEX, & - root_ky, comm_ky, ierr) - buffer_fp_fy_lz(1:Np_i,1:Nky,iz) = buffer_fp_fy_cz(1:Np_i,1:Nky) - ENDIF - ENDDO z - ! send the full line on y contained by root_kyas - IF(rank_ky .EQ. 0) THEN - CALL MPI_GATHERV(buffer_fp_fy_lz, snd_z, MPI_DOUBLE_COMPLEX, & - buffer_fp_fy_fz, rcv_zyp_i, dsp_zyp_i, MPI_DOUBLE_COMPLEX, & - root_z, comm_z, ierr) - ENDIF - ! ID 0 (the one who output) rebuild the whole array - IF(my_id .EQ. 0) & - field_full(1:Np_i,ij,1:Nky,ix,1:Nz) = buffer_fp_fy_fz(1:Np_i,1:Nky,1:Nz) - ENDDO x - ENDDO j - - END SUBROUTINE gather_pjxyz_i - - SUBROUTINE gather_pjxyz_e(field_sub,field_full) - COMPLEX(dp), DIMENSION( ips_e:ipe_e, 1:Nj_e, ikys:ikye, 1:Nkx, izs:ize), INTENT(IN) :: field_sub - COMPLEX(dp), DIMENSION( 1:Np_e, 1:Nj_e, 1:Nky, 1:Nkx, 1:Nz), INTENT(OUT) :: field_full - COMPLEX(dp), DIMENSION(ips_e:ipe_e) :: buffer_lp_cy_cz !local p, constant y, constant z - COMPLEX(dp), DIMENSION(1:Np_e) :: buffer_fp_cy_cz ! full p, constant y, constant z - COMPLEX(dp), DIMENSION(1:Np_e, ikys:ikye) :: buffer_fp_ly_cz ! full p, local y, constant z - COMPLEX(dp), DIMENSION(1:Np_e, 1:Nky ) :: buffer_fp_fy_cz ! full p, full y, constant z - COMPLEX(dp), DIMENSION(1:Np_e, 1:Nky, izs:ize ) :: buffer_fp_fy_lz ! full p, full y, local z - COMPLEX(dp), DIMENSION(1:Np_e, 1:Nky, 1:Nz ) :: buffer_fp_fy_fz ! full p, full y, full z + SUBROUTINE gather_pjxyz(field_loc,field_tot,np_loc,np_tot,nj_tot,nky_loc,nky_tot,nkx_tot,nz_loc,nz_tot) + IMPLICIT NONE + INTEGER, INTENT(IN) :: np_loc,np_tot,nj_tot,nky_loc,nky_tot,nkx_tot,nz_loc,nz_tot + COMPLEX(dp), DIMENSION(np_loc,nj_tot,nky_loc,nkx_tot,nz_loc), INTENT(IN) :: field_loc + COMPLEX(dp), DIMENSION(np_tot,nj_tot,nky_tot,nkx_tot,nz_tot), INTENT(OUT) :: field_tot + COMPLEX(dp), DIMENSION(np_tot,nky_tot,nz_loc) :: buffer_pt_yt_zl ! full p, full y, local z + COMPLEX(dp), DIMENSION(np_tot,nky_tot,nz_tot) :: buffer_pt_yt_zt ! full p, full y, full z + COMPLEX(dp), DIMENSION(np_tot,nky_loc) :: buffer_pt_yl_zc ! full p, local y, constant z + COMPLEX(dp), DIMENSION(np_tot,nky_tot) :: buffer_pt_yt_zc ! full p, full y, constant z + COMPLEX(dp), DIMENSION(np_loc) :: buffer_pl_cy_zc !local p, constant y, constant z + COMPLEX(dp), DIMENSION(np_tot) :: buffer_pt_cy_zc ! full p, constant y, constant z INTEGER :: snd_p, snd_y, snd_z, root_p, root_z, root_ky, iy, ix, iz, ij - - snd_p = local_np_e ! Number of points to send along p (per z,y) - snd_y = Np_e*local_nky ! Number of points to send along y (per z, full p) - snd_z = Np_e*Nky*local_nz ! Number of points to send along z (full y, full p) - + snd_p = np_loc ! Number of points to send along p (per z,y) + snd_y = np_tot*nky_loc ! Number of points to send along y (per z, full p) + snd_z = np_tot*nky_tot*nz_loc ! Number of points to send along z (full y, full p) root_p = 0; root_z = 0; root_ky = 0 - - j: DO ij = 1,Nj_e - x: DO ix = 1,Nkx - z: DO iz = izs,ize - y: DO iy = ikys,ikye + j: DO ij = 1,nj_tot + x: DO ix = 1,nkx_tot + z: DO iz = 1,nz_loc + y: DO iy = 1,nky_loc ! fill a buffer to contain a slice of p data at constant j, ky, kx and z - buffer_lp_cy_cz(ips_e:ipe_e) = field_sub(ips_e:ipe_e,ij,iy,ix,iz) - CALL MPI_GATHERV(buffer_lp_cy_cz, snd_p, MPI_DOUBLE_COMPLEX, & - buffer_fp_cy_cz, rcv_p_e, dsp_p_e, MPI_DOUBLE_COMPLEX, & + buffer_pl_cy_zc(1:np_loc) = field_loc(1:np_loc,ij,iy,ix,iz) + CALL MPI_GATHERV(buffer_pl_cy_zc, snd_p, MPI_DOUBLE_COMPLEX, & + buffer_pt_cy_zc, rcv_p, dsp_p, MPI_DOUBLE_COMPLEX, & root_p, comm_p, ierr) - buffer_fp_ly_cz(1:Np_e,iy) = buffer_fp_cy_cz(1:Np_e) + buffer_pt_yl_zc(1:np_tot,iy) = buffer_pt_cy_zc(1:np_tot) ENDDO y ! send the full line on p contained by root_p IF(rank_p .EQ. 0) THEN - CALL MPI_GATHERV(buffer_fp_ly_cz, snd_y, MPI_DOUBLE_COMPLEX, & - buffer_fp_fy_cz, rcv_yp_e, dsp_yp_e, MPI_DOUBLE_COMPLEX, & + CALL MPI_GATHERV(buffer_pt_yl_zc, snd_y, MPI_DOUBLE_COMPLEX, & + buffer_pt_yt_zc, rcv_yp, dsp_yp, MPI_DOUBLE_COMPLEX, & root_ky, comm_ky, ierr) - buffer_fp_fy_lz(1:Np_e,1:Nky,iz) = buffer_fp_fy_cz(1:Np_e,1:Nky) + buffer_pt_yt_zl(1:np_tot,1:nky_tot,iz) = buffer_pt_yt_zc(1:np_tot,1:nky_tot) ENDIF ENDDO z ! send the full line on y contained by root_kyas IF(rank_ky .EQ. 0) THEN - CALL MPI_GATHERV(buffer_fp_fy_lz, snd_z, MPI_DOUBLE_COMPLEX, & - buffer_fp_fy_fz, rcv_zyp_e, dsp_zyp_e, MPI_DOUBLE_COMPLEX, & + CALL MPI_GATHERV(buffer_pt_yt_zl, snd_z, MPI_DOUBLE_COMPLEX, & + buffer_pt_yt_zt, rcv_zyp, dsp_zyp, MPI_DOUBLE_COMPLEX, & root_z, comm_z, ierr) ENDIF - ! ID 0 (the one who output) rebuild the whole array + ! ID 0 (the one who ouptut) rebuild the whole array IF(my_id .EQ. 0) & - field_full(1:Np_e,ij,1:Nky,ix,1:Nz) = buffer_fp_fy_fz(1:Np_e,1:Nky,1:Nz) + field_tot(1:np_tot,ij,1:nky_tot,ix,1:nz_tot) = buffer_pt_yt_zt(1:np_tot,1:nky_tot,1:nz_tot) ENDDO x ENDDO j - - END SUBROUTINE gather_pjxyz_e + END SUBROUTINE gather_pjxyz !!!!! This is a manual way to do MPI_BCAST !!!!!!!!!!! - SUBROUTINE manual_3D_bcast(field_) - USE grid + SUBROUTINE manual_3D_bcast(field_,n1,n2,n3) IMPLICIT NONE - COMPLEX(dp), INTENT(INOUT) :: field_(ikys:ikye,ikxs:ikxe,izs:ize) - COMPLEX(dp) :: buffer(ikys:ikye,ikxs:ikxe,izs:ize) - INTEGER :: i_, root, world_rank, world_size, count + INTEGER, INTENT(IN) :: n1,n2,n3 + COMPLEX(dp), DIMENSION(n1,n2,n3), INTENT(INOUT) :: field_ + COMPLEX(dp) :: buffer(n1,n2,n3) + INTEGER :: i_, root, world_rank, world_size, count, i1,i2,i3 root = 0; - count = (ikye-ikys+1) * (ikxe-ikxs+1) * (ize-izs+1); + count = n1*n2*n3; CALL MPI_COMM_RANK(comm_p,world_rank,ierr) CALL MPI_COMM_SIZE(comm_p,world_size,ierr) @@ -355,10 +340,10 @@ CONTAINS !! Broadcast phi to the other processes on the same k range (communicator along p) IF (world_rank .EQ. root) THEN ! Fill the buffer - DO iz = izs,ize - DO ikx = ikxs,ikxe - DO iky = ikys,ikye - buffer(iky,ikx,iz) = field_(iky,ikx,iz) + DO i3 = 1,n3 + DO i2 = 1,n2 + DO i1 = 1,n1 + buffer(i1,i2,i3) = field_(i1,i2,i3) ENDDO ENDDO ENDDO @@ -371,10 +356,10 @@ CONTAINS ! Recieve buffer from root CALL MPI_RECV(buffer, count, MPI_DOUBLE_COMPLEX, root, 0, comm_p, MPI_STATUS_IGNORE, ierr) ! Write it in phi - DO iz = izs,ize - DO ikx = ikxs,ikxe - DO iky = ikys,ikye - field_(iky,ikx,iz) = buffer(iky,ikx,iz) + DO i3 = 1,n3 + DO i2 = 1,n2 + DO i1 = 1,n1 + field_(i1,i2,i3) = buffer(i1,i2,i3) ENDDO ENDDO ENDDO @@ -384,7 +369,6 @@ CONTAINS !!!!! This is a manual way to do MPI_BCAST !!!!!!!!!!! SUBROUTINE manual_0D_bcast(v) - USE grid IMPLICIT NONE COMPLEX(dp), INTENT(INOUT) :: v COMPLEX(dp) :: buffer diff --git a/src/ppexit.F90 b/src/ppexit.F90 index 0e557085..dc68c0b7 100644 --- a/src/ppexit.F90 +++ b/src/ppexit.F90 @@ -6,7 +6,7 @@ SUBROUTINE ppexit use prec_const IMPLICIT NONE - + INTEGER :: ierr CALL finalize_plans CALL MPI_BARRIER(MPI_COMM_WORLD, ierr) diff --git a/src/prec_const_mod.F90 b/src/prec_const_mod.F90 index a9ecaff6..ba8a8dd7 100644 --- a/src/prec_const_mod.F90 +++ b/src/prec_const_mod.F90 @@ -6,7 +6,6 @@ MODULE prec_const stdin=>input_unit, & stdout=>output_unit, & stderr=>error_unit - ! Precision for real and complex INTEGER, PARAMETER :: sp = REAL32 !Single precision, should not be used INTEGER, PARAMETER :: dp = REAL64 !real(dp), enforced through the code diff --git a/src/processing_mod.F90 b/src/processing_mod.F90 index e5ccd88c..49aaf22e 100644 --- a/src/processing_mod.F90 +++ b/src/processing_mod.F90 @@ -1,384 +1,213 @@ MODULE processing - USE basic - USE prec_const - USE grid - implicit none - - REAL(dp), PUBLIC, PROTECTED :: pflux_ri, gflux_ri, pflux_re, gflux_re - REAL(dp), PUBLIC, PROTECTED :: hflux_xi, hflux_xe - - PUBLIC :: compute_nadiab_moments_z_gradients_and_interp - PUBLIC :: compute_density, compute_upar, compute_uperp - PUBLIC :: compute_Tpar, compute_Tperp, compute_fluid_moments - PUBLIC :: compute_radial_ion_transport, compute_radial_electron_transport - PUBLIC :: compute_radial_ion_heatflux, compute_radial_electron_heatflux - PUBLIC :: compute_Napjz_spectrum + USE prec_const, ONLY: dp, imagu, SQRT2, SQRT3 + USE grid, ONLY: & + local_na, local_np, local_nj, local_nky, local_nkx, local_nz, Ngz,Ngj,Ngp, & + parray,pmax,ip0,& + CONTAINSp0,ip1,CONTAINSp1,ip2,CONTAINSp2,ip3,CONTAINSp3,& + jarray,jmax,ij0, dmax,& + kyarray, AA_y,& + kxarray, AA_x,& + zarray, deltaz, ieven, iodd, inv_deltaz + USE fields, ONLY: moments, phi, psi + USE array, ONLY : kernel, nadiab_moments, & + ddz_napj, ddzND_Napj, interp_napj,& + dens, upar, uper, Tpar, Tper, temp + USE geometry, ONLY: Jacobian, iInt_Jacobian + USE time_integration, ONLY: updatetlevel + USE calculus, ONLY: simpson_rule_z, grad_z, grad_z2, grad_z4, interp_z + USE model, ONLY: EM, CLOS, beta, HDz_h + USE species, ONLY: tau,q_tau,q_o_sqrt_tau_sigma,sqrt_tau_o_sigma + USE basic, ONLY: t0_process, t1_process, tc_process + USE parallel, ONLY: num_procs_ky, rank_ky, comm_ky + USE mpi + implicit none + + REAL(dp), PUBLIC, ALLOCATABLE, DIMENSION(:), PROTECTED :: pflux_x, gflux_x + REAL(dp), PUBLIC, ALLOCATABLE, DIMENSION(:), PROTECTED :: hflux_x + INTEGER :: ierr + PUBLIC :: init_process + PUBLIC :: compute_nadiab_moments_z_gradients_and_interp + PUBLIC :: compute_density, compute_upar, compute_uperp + PUBLIC :: compute_Tpar, compute_Tperp, compute_fluid_moments + PUBLIC :: compute_radial_transport + PUBLIC :: compute_radial_heatflux + PUBLIC :: compute_Napjz_spectrum CONTAINS -! 1D diagnostic to compute the average radial particle transport <n_i v_ExB_x>_xyz -SUBROUTINE compute_radial_ion_transport - USE fields, ONLY : moments_i, phi, psi - USE array, ONLY : kernel_i - USE geometry, ONLY : Jacobian, iInt_Jacobian - USE time_integration, ONLY : updatetlevel - USE calculus, ONLY : simpson_rule_z - USE model, ONLY : sqrt_tau_o_sigma_i, EM +SUBROUTINE init_process + USE grid, ONLY: local_na IMPLICIT NONE - COMPLEX(dp) :: pflux_local, gflux_local, integral - REAL(dp) :: ky_, buffer(1:2) - INTEGER :: i_, root - COMPLEX(dp), DIMENSION(izgs:izge) :: integrant - - pflux_local = 0._dp ! particle flux - gflux_local = 0._dp ! gyrocenter flux - integrant = 0._dp ! auxiliary variable for z integration - !!---------- Gyro center flux (drift kinetic) ------------ - ! Electrostatic part - IF(CONTAINS_ip0_i) THEN - DO iky = ikys,ikye - ky_ = kyarray(iky) - DO ikx = ikxs,ikxe - integrant(izgs:izge) = integrant(izgs:izge) & - +moments_i(ip0_i,ij0_i,iky,ikx,izgs:izge,updatetlevel)& - *imagu*ky_*CONJG(phi(iky,ikx,izgs:izge)) - ENDDO - ENDDO - ENDIF - ! Electromagnetic part - IF( EM .AND. CONTAINS_ip1_i ) THEN - DO iky = ikys,ikye - ky_ = kyarray(iky) - DO ikx = ikxs,ikxe - integrant(izgs:izge) = integrant(izgs:izge)& - -sqrt_tau_o_sigma_i*moments_i(ip1_i,ij0_i,iky,ikx,izgs:izge,updatetlevel)& - *imagu*ky_*CONJG(psi(iky,ikx,izgs:izge)) - ENDDO - ENDDO - ENDIF - ! Integrate over z - integrant(izgs:izge) = Jacobian(izgs:izge,0)*integrant(izgs:izge) - call simpson_rule_z(integrant,integral) - ! Get process local gyrocenter flux - gflux_local = integral*iInt_Jacobian + ALLOCATE( pflux_x(local_na)) + ALLOCATE( gflux_x(local_na)) + ALLOCATE( hflux_x(local_na)) +END SUBROUTINE init_process - ! - integrant = 0._dp ! reset auxiliary variable - !!---------- Particle flux (gyrokinetic) ------------ - ! Electrostatic part - IF(CONTAINS_ip0_i) THEN - DO iky = ikys,ikye - ky_ = kyarray(iky) - DO ikx = ikxs,ikxe - DO ij = ijs_i, ije_i - integrant(izgs:izge) = integrant(izgs:izge)& - +moments_i(ip0_i,ij,iky,ikx,izgs:izge,updatetlevel)& - *imagu*ky_*kernel_i(ij,iky,ikx,izgs:izge,0)*CONJG(phi(iky,ikx,izgs:izge)) +! 1D diagnostic to compute the average radial particle transport <n_a v_ExB_x>_xyz +SUBROUTINE compute_radial_transport + IMPLICIT NONE + COMPLEX(dp) :: pflux_local, gflux_local, integral + REAL(dp) :: buffer(2) + INTEGER :: i_, root, iky, ikx, ia, iz, in, iodd, ieven + COMPLEX(dp), DIMENSION(local_nz+Ngz) :: integrant + DO ia = 1,local_na + pflux_local = 0._dp ! particle flux + gflux_local = 0._dp ! gyrocenter flux + integrant = 0._dp ! auxiliary variable for z integration + !!---------- Gyro center flux (drift kinetic) ------------ + ! Electrostatic part + IF(CONTAINSp0) THEN + DO iz = 1,local_nz+ngz ! we include ghost for integration + DO ikx = 1,local_nkx + DO iky = 1,local_nky + integrant(iz) = integrant(iz) & + +Jacobian(iz,ieven)*moments(ia,ip0,ij0,iky,ikx,iz,updatetlevel)& + *imagu*kyarray(iky)*CONJG(phi(iky,ikx,iz)) ENDDO - ENDDO - ENDDO - ENDIF - ! Electromagnetic part - IF( EM .AND. CONTAINS_ip1_i ) THEN - DO iky = ikys,ikye - ky_ = kyarray(iky) - DO ikx = ikxs,ikxe - integrant = 0._dp ! auxiliary variable for z integration - DO ij = ijs_i, ije_i - integrant(izgs:izge) = integrant(izgs:izge)& - -sqrt_tau_o_sigma_i*moments_i(ip1_i,ij,iky,ikx,izgs:izge,updatetlevel)& - *imagu*ky_*kernel_i(ij,iky,ikx,izgs:izge,0)*CONJG(psi(iky,ikx,izgs:izge)) ENDDO ENDDO - ENDDO - ENDIF - ! Integrate over z - integrant(izgs:izge) = Jacobian(izgs:izge,0)*integrant(izgs:izge) - call simpson_rule_z(integrant,integral) - ! Get process local particle flux - pflux_local = integral*iInt_Jacobian - - !!!!---------- Sum over all processes ---------- - buffer(1) = 2._dp*REAL(gflux_local) - buffer(2) = 2._dp*REAL(pflux_local) - root = 0 - !Gather manually among the rank_p=0 processes and perform the sum - gflux_ri = 0 - pflux_ri = 0 - IF (num_procs_ky .GT. 1) THEN - !! Everyone sends its local_sum to root = 0 - IF (rank_ky .NE. root) THEN - CALL MPI_SEND(buffer, 2 , MPI_DOUBLE_PRECISION, root, 1234, comm_ky, ierr) - ELSE - ! Recieve from all the other processes - DO i_ = 0,num_procs_ky-1 - IF (i_ .NE. rank_ky) & - CALL MPI_RECV(buffer, 2 , MPI_DOUBLE_PRECISION, i_, 1234, comm_ky, MPI_STATUS_IGNORE, ierr) - gflux_ri = gflux_ri + buffer(1) - pflux_ri = pflux_ri + buffer(2) + ENDIF + ! Electromagnetic part + IF( EM .AND. CONTAINSp1 ) THEN + DO iz = 1,local_nz+ngz + DO ikx = 1,local_nkx + DO iky = 1,local_nky + integrant(iz) = integrant(iz)& + -Jacobian(iz,iodd)*sqrt_tau_o_sigma(ia)*moments(ia,ip1,ij0,iky,ikx,iz,updatetlevel)& + *imagu*kyarray(iky)*CONJG(psi(iky,ikx,iz)) ENDDO - ENDIF - ELSE - gflux_ri = gflux_local - pflux_ri = pflux_local - ENDIF - ! if(my_id .eq. 0) write(*,*) 'pflux_ri = ',pflux_ri -END SUBROUTINE compute_radial_ion_transport - -! 1D diagnostic to compute the average radial particle transport <n_e v_ExB_x>_xyz -SUBROUTINE compute_radial_electron_transport - USE fields, ONLY : moments_e, phi, psi - USE array, ONLY : kernel_e - USE geometry, ONLY : Jacobian, iInt_Jacobian - USE time_integration, ONLY : updatetlevel - USE calculus, ONLY : simpson_rule_z - USE model, ONLY : sqrt_tau_o_sigma_e, EM - IMPLICIT NONE - COMPLEX(dp) :: pflux_local, gflux_local, integral - REAL(dp) :: ky_, buffer(1:2) - INTEGER :: i_, root - COMPLEX(dp), DIMENSION(izgs:izge) :: integrant - - pflux_local = 0._dp ! particle flux - gflux_local = 0._dp ! gyrocenter flux - integrant = 0._dp ! auxiliary variable for z integration - !!---------- Gyro center flux (drift kinetic) ------------ - ! Electrostatic part - IF(CONTAINS_ip0_e) THEN - DO iky = ikys,ikye - ky_ = kyarray(iky) - DO ikx = ikxs,ikxe - integrant(izgs:izge) = integrant(izgs:izge) & - +moments_e(ip0_e,ij0_e,iky,ikx,izgs:izge,updatetlevel)& - *imagu*ky_*CONJG(phi(iky,ikx,izgs:izge)) + ENDDO ENDDO - ENDDO - ENDIF - ! Electromagnetic part - IF( EM .AND. CONTAINS_ip1_e ) THEN - DO iky = ikys,ikye - ky_ = kyarray(iky) - DO ikx = ikxs,ikxe - integrant(izgs:izge) = integrant(izgs:izge)& - -sqrt_tau_o_sigma_e*moments_e(ip1_e,ij0_e,iky,ikx,izgs:izge,updatetlevel)& - *imagu*ky_*CONJG(psi(iky,ikx,izgs:izge)) + ENDIF + ! Integrate over z + call simpson_rule_z(local_nz,deltaz,integrant,integral) + ! Get process local gyrocenter flux with a factor two to account for the negative ky modes + gflux_local = 2._dp*integral*iInt_Jacobian + + ! + integrant = 0._dp ! reset auxiliary variable + !!---------- Particle flux (gyrokinetic) ------------ + ! Electrostatic part + IF(CONTAINSp0) THEN + DO iz = 1,local_nz+ngz + DO ikx = 1,local_nkx + DO iky = 1,local_nky + DO in = 1+ngj/2, local_nj+ngj/2 ! only interior points + integrant(iz) = integrant(iz)+ & + Jacobian(iz,ieven)*moments(ia,ip0,in,iky,ikx,iz,updatetlevel)& + *imagu*kyarray(iky)*kernel(ia,in,iky,ikx,iz,ieven)*CONJG(phi(iky,ikx,iz)) + ENDDO + ENDDO + ENDDO ENDDO - ENDDO - ENDIF - ! Integrate over z - integrant(izgs:izge) = Jacobian(izgs:izge,0)*integrant(izgs:izge) - call simpson_rule_z(integrant,integral) - ! Get process local gyrocenter flux - gflux_local = integral*iInt_Jacobian - ! - integrant = 0._dp ! reset auxiliary variable - !!---------- Particle flux (gyrokinetic) ------------ - ! Electrostatic part - IF(CONTAINS_ip0_e) THEN - DO iky = ikys,ikye - ky_ = kyarray(iky) - DO ikx = ikxs,ikxe - DO ij = ijs_e, ije_e - integrant(izgs:izge) = integrant(izgs:izge)& - +moments_e(ip0_e,ij,iky,ikx,izgs:izge,updatetlevel)& - *imagu*ky_*kernel_e(ij,iky,ikx,izgs:izge,0)*CONJG(phi(iky,ikx,izgs:izge)) + ENDIF + ! Electromagnetic part + IF( EM .AND. CONTAINSp1 ) THEN + DO iz = 1,local_nz+ngz + DO ikx = 1,local_nkx + DO iky = 1,local_nky + DO in = 1+ngj/2, local_nj+ngj/2 ! only interior points + integrant(iz) = integrant(iz) - & + Jacobian(iz,iodd)*sqrt_tau_o_sigma(ia)*moments(ia,ip1,in,iky,ikx,iz,updatetlevel)& + *imagu*kyarray(iky)*kernel(ia,in,iky,ikx,iz,iodd)*CONJG(psi(iky,ikx,iz)) + ENDDO ENDDO - ENDDO - ENDDO - ENDIF - ! Electromagnetic part - IF( EM .AND. CONTAINS_ip1_e ) THEN - DO iky = ikys,ikye - ky_ = kyarray(iky) - DO ikx = ikxs,ikxe - integrant = 0._dp ! auxiliary variable for z integration - DO ij = ijs_e, ije_e - integrant(izgs:izge) = integrant(izgs:izge)& - -sqrt_tau_o_sigma_e*moments_e(ip1_e,ij,iky,ikx,izgs:izge,updatetlevel)& - *imagu*ky_*kernel_e(ij,iky,ikx,izgs:izge,0)*CONJG(psi(iky,ikx,izgs:izge)) ENDDO ENDDO - ENDDO - ENDIF - ! Integrate over z - integrant(izgs:izge) = Jacobian(izgs:izge,0)*integrant(izgs:izge) - call simpson_rule_z(integrant,integral) - ! Get process local particle flux - pflux_local = integral*iInt_Jacobian - - !!!!---------- Sum over all processes ---------- - buffer(1) = 2._dp*REAL(gflux_local) - buffer(2) = 2._dp*REAL(pflux_local) - root = 0 - !Gather manually among the rank_p=0 processes and perform the sum - gflux_re = 0 - pflux_re = 0 - IF (num_procs_ky .GT. 1) THEN - !! Everyone sends its local_sum to root = 0 - IF (rank_ky .NE. root) THEN - CALL MPI_SEND(buffer, 2 , MPI_DOUBLE_PRECISION, root, 1234, comm_ky, ierr) - ELSE - ! Recieve from all the other processes - DO i_ = 0,num_procs_ky-1 - IF (i_ .NE. rank_ky) & - CALL MPI_RECV(buffer, 2 , MPI_DOUBLE_PRECISION, i_, 1234, comm_ky, MPI_STATUS_IGNORE, ierr) - gflux_re = gflux_re + buffer(1) - pflux_re = pflux_re + buffer(2) - ENDDO - ENDIF - ELSE - gflux_re = gflux_local - pflux_re = pflux_local - ENDIF -END SUBROUTINE compute_radial_electron_transport + ENDIF + ! Integrate over z + call simpson_rule_z(local_nz,deltaz,integrant,integral) + ! Get process local particle flux with a factor two to account for the negative ky modes + pflux_local = 2._dp*integral*iInt_Jacobian + + !!!!---------- Sum over all processes ---------- + buffer(1) = REAL(gflux_local) + buffer(2) = REAL(pflux_local) + root = 0 + !Gather manually among the rank_p=0 processes and perform the sum + gflux_x(ia) = 0 + pflux_x(ia) = 0 + IF (num_procs_ky .GT. 1) THEN + !! Everyone sends its local_sum to root = 0 + IF (rank_ky .NE. root) THEN + CALL MPI_SEND(buffer, 2 , MPI_DOUBLE_PRECISION, root, 1234, comm_ky, ierr) + ELSE + ! Recieve from all the other processes + DO i_ = 0,num_procs_ky-1 + IF (i_ .NE. rank_ky) & + CALL MPI_RECV(buffer, 2 , MPI_DOUBLE_PRECISION, i_, 1234, comm_ky, MPI_STATUS_IGNORE, ierr) + gflux_x(ia) = gflux_x(ia) + buffer(1) + pflux_x(ia) = pflux_x(ia) + buffer(2) + ENDDO + ENDIF + ELSE + gflux_x(ia) = gflux_local + pflux_x(ia) = pflux_local + ENDIF + ENDDO + ! if(my_id .eq. 0) write(*,*) 'pflux_ri = ',pflux_ri +END SUBROUTINE compute_radial_transport ! 1D diagnostic to compute the average radial particle transport <T_i v_ExB_x>_xyz -SUBROUTINE compute_radial_ion_heatflux - USE fields, ONLY : moments_i, phi, psi - USE array, ONLY : kernel_i!, HF_phi_correction_operator - USE geometry, ONLY : Jacobian, iInt_Jacobian - USE time_integration, ONLY : updatetlevel - USE calculus, ONLY : simpson_rule_z - USE model, ONLY : tau_i, sqrt_tau_o_sigma_i, EM +SUBROUTINE compute_radial_heatflux IMPLICIT NONE COMPLEX(dp) :: hflux_local, integral - REAL(dp) :: ky_, buffer(1:2), n_dp - INTEGER :: i_, root, in - COMPLEX(dp), DIMENSION(izgs:izge) :: integrant ! charge density q_a n_a - + REAL(dp) :: buffer(2), n_dp + INTEGER :: i_, root, in, ia, iky, ikx, iz + COMPLEX(dp), DIMENSION(local_nz+ngz) :: integrant ! charge density q_a n_a + DO ia = 1,local_na hflux_local = 0._dp ! heat flux integrant = 0._dp ! z integration auxiliary variable !!----------------ELECTROSTATIC CONTRIBUTION--------------------------- - IF(CONTAINS_ip0_i .AND. CONTAINS_ip2_i) THEN - ! Loop to compute gamma_kx = sum_ky sum_j -i k_z Kernel_j Ni00 * phi - DO iky = ikys,ikye - ky_ = kyarray(iky) - DO ikx = ikxs,ikxe - DO in = ijs_i, ije_i - n_dp = jarray_i(in) - integrant(izgs:izge) = integrant(izgs:izge) + tau_i*imagu*ky_*CONJG(phi(iky,ikx,izgs:izge))& - *kernel_i(in,iky,ikx,izgs:izge,0)*(& - 0.5_dp*SQRT2*moments_i(ip2_i,in ,iky,ikx,izgs:izge,updatetlevel)& - +(2._dp*n_dp + 1.5_dp)*moments_i(ip0_i,in ,iky,ikx,izgs:izge,updatetlevel)& - -(n_dp+1._dp)*moments_i(ip0_i,in+1,iky,ikx,izgs:izge,updatetlevel)& - -n_dp*moments_i(ip0_i,in-1,iky,ikx,izgs:izge,updatetlevel)) - ENDDO - ENDDO - ENDDO - ENDIF - IF(EM .AND. CONTAINS_ip1_i .AND. CONTAINS_ip3_i) THEN - !!----------------ELECTROMAGNETIC CONTRIBUTION-------------------- - DO iky = ikys,ikye - ky_ = kyarray(iky) - DO ikx = ikxs,ikxe - DO in = ijs_i, ije_i - n_dp = jarray_i(in) - integrant(izgs:izge) = integrant(izgs:izge) & - +tau_i*sqrt_tau_o_sigma_i*imagu*ky_*CONJG(psi(iky,ikx,izgs:izge))& - *kernel_i(in,iky,ikx,izgs:izge,0)*(& - 0.5_dp*SQRT2*SQRT3*moments_i(ip3_i,in ,iky,ikx,izgs:izge,updatetlevel)& - +1.5_dp*moments_i(ip1_i,in ,iky,ikx,izgs:izge,updatetlevel)& - +(2._dp*n_dp+1._dp)*moments_i(ip1_i,in ,iky,ikx,izgs:izge,updatetlevel)& - -(n_dp+1._dp)*moments_i(ip1_i,in+1,iky,ikx,izgs:izge,updatetlevel)& - -n_dp*moments_i(ip1_i,in-1,iky,ikx,izgs:izge,updatetlevel)) + IF(CONTAINSp0 .AND. CONTAINSp2) THEN + ! Loop to compute gamma_kx = sum_ky sum_j -i k_z Kernel_j Na00 * phi + DO iz = 1,local_nz+ngz + DO ikx = 1,local_nkx + DO iky = 1,local_nky + DO in = 1+ngj/2, local_nj+ngj/2 ! only interior points + n_dp = jarray(in) + integrant(iz) = integrant(iz) & + +Jacobian(iz,ieven)*tau(ia)*imagu*kyarray(iky)*CONJG(phi(iky,ikx,iz))& + *kernel(ia,in,iky,ikx,iz,ieven)*(& + 0.5_dp*SQRT2*moments(ia,ip2,in ,iky,ikx,iz,updatetlevel)& + +(2._dp*n_dp + 1.5_dp)*moments(ia,ip0,in ,iky,ikx,iz,updatetlevel)& + -(n_dp+1._dp)*moments(ia,ip0,in+1,iky,ikx,iz,updatetlevel)& + -n_dp*moments(ia,ip0,in-1,iky,ikx,iz,updatetlevel)) ENDDO ENDDO ENDDO - ENDIF - ! Add polarisation contribution - ! integrant(izgs:izge) = integrant(izgs:izge) + tau_i*imagu*ky_& - ! *CONJG(phi(iky,ikx,izgs:izge))*phi(iky,ikx,izgs:izge) * HF_phi_correction_operator(iky,ikx,izgs:izge) - ! Integrate over z - integrant(izgs:izge) = Jacobian(izgs:izge,0)*integrant(izgs:izge) - call simpson_rule_z(integrant,integral) - hflux_local = hflux_local + integral*iInt_Jacobian - ! Double it for taking into account the other half plane - buffer(2) = 2._dp*REAL(hflux_local) - root = 0 - !Gather manually among the rank_p=0 processes and perform the sum - hflux_xi = 0 - IF (num_procs_ky .GT. 1) THEN - !! Everyone sends its local_sum to root = 0 - IF (rank_ky .NE. root) THEN - CALL MPI_SEND(buffer, 2 , MPI_DOUBLE_PRECISION, root, 1234, comm_ky, ierr) - ELSE - ! Recieve from all the other processes - DO i_ = 0,num_procs_ky-1 - IF (i_ .NE. rank_ky) & - CALL MPI_RECV(buffer, 2 , MPI_DOUBLE_PRECISION, i_, 1234, comm_ky, MPI_STATUS_IGNORE, ierr) - hflux_xi = hflux_xi + buffer(2) - ENDDO - ENDIF - ELSE - hflux_xi = hflux_local - ENDIF -END SUBROUTINE compute_radial_ion_heatflux - - -! 1D diagnostic to compute the average radial particle transport <T_e v_ExB_x>_xyz -SUBROUTINE compute_radial_electron_heatflux - USE fields, ONLY : moments_e, phi, psi - USE array, ONLY : kernel_e!, HF_phi_correction_operator - USE geometry, ONLY : Jacobian, iInt_Jacobian - USE time_integration, ONLY : updatetlevel - USE calculus, ONLY : simpson_rule_z - USE model, ONLY : tau_e, sqrt_tau_o_sigma_e, EM - IMPLICIT NONE - COMPLEX(dp) :: hflux_local, integral - REAL(dp) :: ky_, buffer(1:2), n_dp - INTEGER :: i_, root, in - COMPLEX(dp), DIMENSION(izgs:izge) :: integrant ! charge density q_a n_a - - hflux_local = 0._dp ! heat flux - integrant = 0._dp ! z integration auxiliary variable - !!----------------ELECTROSTATIC CONTRIBUTION--------------------------- - IF(CONTAINS_ip0_e .AND. CONTAINS_ip2_e) THEN - ! Loop to compute gamma_kx = sum_ky sum_j -i k_z Kernel_j Ni00 * phi - DO iky = ikys,ikye - ky_ = kyarray(iky) - DO ikx = ikxs,ikxe - DO in = ijs_e, ije_e - n_dp = jarray_e(in) - integrant(izgs:izge) = integrant(izgs:izge) + tau_e*imagu*ky_*CONJG(phi(iky,ikx,izgs:izge))& - *kernel_e(in,iky,ikx,izgs:izge,0)*(& - 0.5_dp*SQRT2*moments_e(ip2_e,in ,iky,ikx,izgs:izge,updatetlevel)& - +(2._dp*n_dp + 1.5_dp)*moments_e(ip0_e,in ,iky,ikx,izgs:izge,updatetlevel)& - -(n_dp+1._dp)*moments_e(ip0_e,in+1,iky,ikx,izgs:izge,updatetlevel)& - -n_dp*moments_e(ip0_e,in-1,iky,ikx,izgs:izge,updatetlevel)) - ENDDO - ENDDO ENDDO - ENDIF - IF(EM .AND. CONTAINS_ip1_e .AND. CONTAINS_ip3_e) THEN + ENDIF + IF(EM .AND. CONTAINSp1 .AND. CONTAINSp3) THEN !!----------------ELECTROMAGNETIC CONTRIBUTION-------------------- - DO iky = ikys,ikye - ky_ = kyarray(iky) - DO ikx = ikxs,ikxe - DO in = ijs_e, ije_e - n_dp = jarray_e(in) - integrant(izgs:izge) = integrant(izgs:izge) & - +tau_e*sqrt_tau_o_sigma_e*imagu*ky_*CONJG(psi(iky,ikx,izgs:izge))& - *kernel_e(in,iky,ikx,izgs:izge,0)*(& - 0.5_dp*SQRT2*SQRT3*moments_e(ip3_e,in ,iky,ikx,izgs:izge,updatetlevel)& - +1.5_dp*CONJG(moments_e(ip1_e,in ,iky,ikx,izgs:izge,updatetlevel))& !????? - +(2._dp*n_dp+1._dp)*moments_e(ip1_e,in ,iky,ikx,izgs:izge,updatetlevel)& - -(n_dp+1._dp)*moments_e(ip1_e,in+1,iky,ikx,izgs:izge,updatetlevel)& - -n_dp*moments_e(ip1_e,in-1,iky,ikx,izgs:izge,updatetlevel)) + DO iz = 1,local_nz+ngz + DO ikx = 1,local_nkx + DO iky = 1,local_nky + DO in = 1+ngj/2, local_nj+ngj/2 ! only interior points + n_dp = jarray(in) + integrant(iz) = integrant(iz) & + +Jacobian(iz,iodd)*tau(ia)*sqrt_tau_o_sigma(ia)*imagu*kyarray(iky)*CONJG(psi(iky,ikx,iz))& + *kernel(ia,in,iky,ikx,iz,iodd)*(& + 0.5_dp*SQRT2*SQRT3*moments(ia,ip3,in ,iky,ikx,iz,updatetlevel)& + +1.5_dp*moments(ia,ip1,in ,iky,ikx,iz,updatetlevel)& + +(2._dp*n_dp+1._dp)*moments(ia,ip1,in ,iky,ikx,iz,updatetlevel)& + -(n_dp+1._dp)*moments(ia,ip1,in+1,iky,ikx,iz,updatetlevel)& + -n_dp*moments(ia,ip1,in-1,iky,ikx,iz,updatetlevel)) ENDDO ENDDO ENDDO + ENDDO ENDIF ! Add polarisation contribution - ! integrant(izs:ize) = integrant(izs:ize) + tau_e*imagu*ky_& - ! *CONJG(phi(iky,ikx,izs:ize))*phi(iky,ikx,izs:ize) * HF_phi_correction_operator(iky,ikx,izs:ize) + ! integrant(iz) = integrant(iz) + tau_i*imagu*ky_& + ! *CONJG(phi(iky,ikx,iz))*phi(iky,ikx,iz) * HF_phi_correction_operator(iky,ikx,iz) ! Integrate over z - integrant(izgs:izge) = Jacobian(izgs:izge,0)*integrant(izgs:izge) - call simpson_rule_z(integrant,integral) - hflux_local = hflux_local + integral*iInt_Jacobian + call simpson_rule_z(local_nz,deltaz,integrant,integral) ! Double it for taking into account the other half plane - buffer(2) = 2._dp*REAL(hflux_local) + hflux_local = 2._dp*integral*iInt_Jacobian + buffer(2) = REAL(hflux_local) root = 0 !Gather manually among the rank_p=0 processes and perform the sum - hflux_xe = 0 + hflux_x(ia) = 0 IF (num_procs_ky .GT. 1) THEN !! Everyone sends its local_sum to root = 0 IF (rank_ky .NE. root) THEN @@ -388,147 +217,91 @@ SUBROUTINE compute_radial_electron_heatflux DO i_ = 0,num_procs_ky-1 IF (i_ .NE. rank_ky) & CALL MPI_RECV(buffer, 2 , MPI_DOUBLE_PRECISION, i_, 1234, comm_ky, MPI_STATUS_IGNORE, ierr) - hflux_xe = hflux_xe + buffer(2) + hflux_x(ia) = hflux_x(ia) + buffer(2) ENDDO ENDIF ELSE - hflux_xe = hflux_local + hflux_x(ia) = hflux_local ENDIF -END SUBROUTINE compute_radial_electron_heatflux - - + ENDDO +END SUBROUTINE compute_radial_heatflux SUBROUTINE compute_nadiab_moments_z_gradients_and_interp ! evaluate the non-adiabatique ion moments ! ! n_{pi} = N^{pj} + kernel(j) /tau_i phi delta_p0 ! - USE fields, ONLY : moments_i, moments_e, phi, psi - USE array, ONLY : kernel_e, kernel_i, nadiab_moments_e, nadiab_moments_i, & - ddz_nepj, ddzND_Nepj, interp_nepj,& - ddz_nipj, ddzND_Nipj, interp_nipj!, ddz_phi - USE time_integration, ONLY : updatetlevel - USE model, ONLY : qe_taue, qi_taui,q_o_sqrt_tau_sigma_e, q_o_sqrt_tau_sigma_i, & - KIN_E, CLOS, beta, HDz_h - USE calculus, ONLY : grad_z, grad_z2, grad_z4, interp_z IMPLICIT NONE - INTEGER :: eo, p_int, j_int + INTEGER :: eo, p_int, j_int, ia,ip,ij,iky,ikx,iz CALL cpu_time(t0_process) - ! Electron non adiab moments - - IF(KIN_E) THEN - DO ip=ipgs_e,ipge_e - IF(parray_e(ip) .EQ. 0) THEN - DO ij=ijgs_e,ijge_e - nadiab_moments_e(ip,ij,ikys:ikye,ikxs:ikxe,izgs:izge) = moments_e(ip,ij,ikys:ikye,ikxs:ikxe,izgs:izge,updatetlevel) & - + qe_taue*kernel_e(ij,ikys:ikye,ikxs:ikxe,izgs:izge,0)*phi(ikys:ikye,ikxs:ikxe,izgs:izge) - ENDDO - ELSEIF( (parray_e(ip) .EQ. 1) .AND. (beta .GT. 0) ) THEN - DO ij=ijgs_e,ijge_e - nadiab_moments_e(ip,ij,ikys:ikye,ikxs:ikxe,izgs:izge) = moments_e(ip,ij,ikys:ikye,ikxs:ikxe,izgs:izge,updatetlevel) & - - q_o_sqrt_tau_sigma_e*kernel_e(ij,ikys:ikye,ikxs:ikxe,izgs:izge,0)*psi(ikys:ikye,ikxs:ikxe,izgs:izge) - ENDDO - ELSE - DO ij=ijgs_e,ijge_e - nadiab_moments_e(ip,ij,ikys:ikye,ikxs:ikxe,izgs:izge) = moments_e(ip,ij,ikys:ikye,ikxs:ikxe,izgs:izge,updatetlevel) - ENDDO - ENDIF - ENDDO - ENDIF - ! Ions non adiab moments - DO ip=ipgs_i,ipge_i - IF(parray_i(ip) .EQ. 0) THEN - DO ij=ijgs_i,ijge_i - nadiab_moments_i(ip,ij,ikys:ikye,ikxs:ikxe,izgs:izge) = moments_i(ip,ij,ikys:ikye,ikxs:ikxe,izgs:izge,updatetlevel) & - + qi_taui*kernel_i(ij,ikys:ikye,ikxs:ikxe,izgs:izge,0)*phi(ikys:ikye,ikxs:ikxe,izgs:izge) - ENDDO - ELSEIF( (parray_i(ip) .EQ. 1) .AND. (beta .GT. 0) ) THEN - DO ij=ijgs_i,ijge_i - nadiab_moments_i(ip,ij,ikys:ikye,ikxs:ikxe,izgs:izge) = moments_i(ip,ij,ikys:ikye,ikxs:ikxe,izgs:izge,updatetlevel) & - - q_o_sqrt_tau_sigma_i*kernel_i(ij,ikys:ikye,ikxs:ikxe,izgs:izge,0)*psi(ikys:ikye,ikxs:ikxe,izgs:izge) - ENDDO - ELSE - DO ij=ijgs_i,ijge_i - nadiab_moments_i(ip,ij,ikys:ikye,ikxs:ikxe,izgs:izge) = moments_i(ip,ij,ikys:ikye,ikxs:ikxe,izgs:izge,updatetlevel) - ENDDO - ENDIF - ENDDO - - !! Ensure to kill the moments too high if closue option is set to 1 - IF(CLOS .EQ. 1) THEN - IF(KIN_E) THEN - DO ip=ipgs_e,ipge_e - p_int = parray_e(ip) - DO ij=ijgs_e,ijge_e - j_int = jarray_e(ij) - IF(p_int+2*j_int .GT. dmaxe) & - nadiab_moments_e(ip,ij,:,:,:) = 0._dp + !non adiab moments + DO iz=1,local_nz+ngz + DO ikx=1,local_nkx + DO iky=1,local_nky + DO ij=1,local_nj+ngj + DO ip=1,local_np+ngp + DO ia = 1,local_na + IF(parray(ip) .EQ. 0) THEN + nadiab_moments(ia,ip,ij,iky,ikx,iz) = moments(ia,ip,ij,iky,ikx,iz,updatetlevel) & + + q_tau(ia)*kernel(ia,ij,iky,ikx,iz,ieven)*phi(iky,ikx,iz) + ELSEIF( (parray(ip) .EQ. 1) .AND. (beta .GT. 0) ) THEN + nadiab_moments(ia,ip,ij,iky,ikx,iz) = moments(ia,ip,ij,iky,ikx,iz,updatetlevel) & + - q_o_sqrt_tau_sigma(ia)*kernel(ia,ij,iky,ikx,iz,ieven)*psi(iky,ikx,iz) + ELSE + nadiab_moments(ia,ip,ij,iky,ikx,iz) = moments(ia,ip,ij,iky,ikx,iz,updatetlevel) + ENDIF ENDDO - ENDDO - ENDIF - DO ip=ipgs_i,ipge_i - p_int = parray_i(ip) - DO ij=ijgs_i,ijge_i - j_int = jarray_i(ij) - IF(p_int+2*j_int .GT. dmaxi) & - nadiab_moments_i(ip,ij,:,:,:) = 0._dp ENDDO + ENDDO ENDDO - ENDIF - - !------------- INTERP AND GRADIENTS ALONG Z ---------------------------------- + ENDDO + ENDDO - IF (KIN_E) THEN - DO ikx = ikxs,ikxe - DO iky = ikys,ikye - DO ij = ijgs_e,ijge_e - DO ip = ipgs_e,ipge_e - p_int = parray_e(ip) - eo = MODULO(p_int,2) ! Indicates if we are on even or odd z grid - ! Compute z derivatives - CALL grad_z(eo,nadiab_moments_e(ip,ij,iky,ikx,izgs:izge), ddz_nepj(ip,ij,iky,ikx,izs:ize)) - ! Parallel hyperdiffusion - IF (HDz_h) THEN - CALL grad_z4(nadiab_moments_e(ip,ij,iky,ikx,izgs:izge),ddzND_Nepj(ip,ij,iky,ikx,izs:ize)) - ELSE - CALL grad_z4(moments_e(ip,ij,iky,ikx,izgs:izge,updatetlevel),ddzND_Nepj(ip,ij,iky,ikx,izs:ize)) - ENDIF - ! Compute even odd grids interpolation - CALL interp_z(eo,nadiab_moments_e(ip,ij,iky,ikx,izgs:izge), interp_nepj(ip,ij,iky,ikx,izs:ize)) + !! Ensure to kill the moments too high if closue option is set to 1 + IF(CLOS .EQ. 1) THEN + DO ij=1,local_nj+ngj + j_int = jarray(ij) + DO ip=1,local_np+ngp + p_int = parray(ip) + DO ia = 1,local_na + IF(p_int+2*j_int .GT. dmax) & + nadiab_moments(ia,ip,ij,:,:,:) = 0._dp ENDDO ENDDO ENDDO - ENDDO ENDIF - DO ikx = ikxs,ikxe - DO iky = ikys,ikye - DO ij = ijgs_i,ijge_i - DO ip = ipgs_i,ipge_i - p_int = parray_i(ip) - eo = MODULO(p_int,2) ! Indicates if we are on even or odd z grid - ! Compute z first derivative - CALL grad_z(eo,nadiab_moments_i(ip,ij,iky,ikx,izgs:izge), ddz_nipj(ip,ij,iky,ikx,izs:ize)) - ! Parallel numerical diffusion - IF (HDz_h) THEN - CALL grad_z4(nadiab_moments_i(ip,ij,iky,ikx,izgs:izge),ddzND_Nipj(ip,ij,iky,ikx,izs:ize)) - ELSE - CALL grad_z4(moments_i(ip,ij,iky,ikx,izgs:izge,updatetlevel),ddzND_Nipj(ip,ij,iky,ikx,izs:ize)) - ENDIF - ! Compute even odd grids interpolation - CALL interp_z(eo,nadiab_moments_i(ip,ij,iky,ikx,izgs:izge), interp_nipj(ip,ij,iky,ikx,izs:ize)) + !------------- INTERP AND GRADIENTS ALONG Z ---------------------------------- + DO ikx = 1,local_nkx + DO iky = 1,local_nky + DO ij = 1,local_nj+ngj + DO ip = 1,local_np+ngp + DO ia = 1,local_na + p_int = parray(ip) + eo = MODULO(p_int,2)+1 ! Indicates if we are on even or odd z grid + ! Compute z first derivative + CALL grad_z(eo,local_nz,ngz,inv_deltaz,nadiab_moments(ia,ip,ij,iky,ikx,:),ddz_napj(ia,ip,ij,iky,ikx,:)) + ! Parallel numerical diffusion + IF (HDz_h) THEN + CALL grad_z4(local_nz,ngz,inv_deltaz,nadiab_moments(ia,ip,ij,iky,ikx,:),ddzND_Napj(ia,ip,ij,iky,ikx,:)) + ELSE + CALL grad_z4(local_nz,ngz,inv_deltaz,moments(ia,ip,ij,iky,ikx,:,updatetlevel),ddzND_Napj(ia,ip,ij,iky,ikx,:)) + ENDIF + ! Compute even odd grids interpolation + CALL interp_z(eo,local_nz,ngz,nadiab_moments(ia,ip,ij,iky,ikx,1:local_nz+ngz), interp_napj(ia,ip,ij,iky,ikx,1:local_nz)) + ENDDO + ENDDO ENDDO ENDDO ENDDO - ENDDO - ! Phi parallel gradient (not implemented fully, should be negligible) - ! DO ikx = ikxs,ikxe - ! DO iky = ikys,ikye - ! CALL grad_z(0,phi(iky,ikx,izgs:izge), ddz_phi(iky,ikx,izs:ize)) - ! ENDDO - ! ENDDO + ! Phi parallel gradient (not implemented fully, should be negligible) + ! DO ikx = 1,local_nkx + ! DO iky = 1,local_nky + ! CALL grad_z(0,phi(iky,ikx,1:local_nz+ngz), ddz_phi(iky,ikx,1:local_nz)) + ! ENDDO + ! ENDDO ! Execution time end CALL cpu_time(t1_process) @@ -536,279 +309,181 @@ SUBROUTINE compute_nadiab_moments_z_gradients_and_interp END SUBROUTINE compute_nadiab_moments_z_gradients_and_interp SUBROUTINE compute_Napjz_spectrum - USE fields, ONLY : moments_e, moments_i - USE model, ONLY : KIN_E - USE array, ONLY : Nipjz, Nepjz + USE fields, ONLY : moments + USE array, ONLY : Napjz USE time_integration, ONLY : updatetlevel IMPLICIT NONE - REAL(dp), DIMENSION(ips_e:ipe_e,ijs_e:ije_e,izs:ize) :: local_sum_e,global_sum_e, buffer_e - REAL(dp), DIMENSION(ips_i:ipe_i,ijs_i:ije_i,izs:ize) :: local_sum_i,global_sum_i, buffer_i - INTEGER :: i_, root, count + REAL(dp), DIMENSION(local_np,local_nj,local_nz) :: local_sum,global_sum, buffer + INTEGER :: i_, root, count, ia, ip, ij, iky, ikx, iz root = 0 - ! Electron moments spectrum - IF (KIN_E) THEN + DO ia=1,local_na + ! z-moment spectrum ! build local sum - local_sum_e = 0._dp - DO ikx = ikxs,ikxe - DO iky = ikys,ikye - local_sum_e(ips_e:ipe_e,ijs_e:ije_e,izs:ize) = local_sum_e(ips_e:ipe_e,ijs_e:ije_e,izs:ize) + & - REAL(REAL(moments_e(ips_e:ipe_e,ijs_e:ije_e,iky,ikx,izs:ize,updatetlevel)& - * CONJG(moments_e(ips_e:ipe_e,ijs_e:ije_e,iky,ikx,izs:ize,updatetlevel)))) + local_sum = 0._dp + DO ikx = 1,local_nkx + DO iky = 1,local_nky + DO ij = 1,local_nj + DO ip = 1,local_np + local_sum(ip,ij,iz) = local_sum(ip,ij,iz) + & + (moments(ia,ip+Ngp/2,ij+Ngj/2,iky,ikx,iz+Ngz/2,updatetlevel) & + * CONJG(moments(ia,ip+Ngp/2,ij+Ngj/2,iky,ikx,iz+Ngz/2,updatetlevel))) + ENDDO + ENDDO ENDDO ENDDO ! sum reduction - buffer_e = local_sum_e - global_sum_e = 0._dp - count = (ipe_e-ips_e+1)*(ije_e-ijs_e+1)*(ize-izs+1) + buffer = local_sum + global_sum = 0._dp + count = local_np*local_nj*local_nz IF (num_procs_ky .GT. 1) THEN !! Everyone sends its local_sum to root = 0 IF (rank_ky .NE. root) THEN - CALL MPI_SEND(buffer_e, count , MPI_DOUBLE_COMPLEX, root, 1234, comm_ky, ierr) + CALL MPI_SEND(buffer, count , MPI_DOUBLE_PRECISION, root, 5678, comm_ky, ierr) ELSE ! Recieve from all the other processes DO i_ = 0,num_procs_ky-1 IF (i_ .NE. rank_ky) & - CALL MPI_RECV(buffer_e, count , MPI_DOUBLE_COMPLEX, i_, 1234, comm_ky, MPI_STATUS_IGNORE, ierr) - global_sum_e = global_sum_e + buffer_e + CALL MPI_RECV(buffer, count , MPI_DOUBLE_PRECISION, i_, 5678, comm_ky, MPI_STATUS_IGNORE, ierr) + global_sum = global_sum + buffer ENDDO ENDIF ELSE - global_sum_e = local_sum_e + global_sum = local_sum ENDIF - Nepjz = global_sum_e - ENDIF - ! Ion moment spectrum - ! build local sum - local_sum_i = 0._dp - DO ikx = ikxs,ikxe - DO iky = ikys,ikye - local_sum_i(ips_i:ipe_i,ijs_i:ije_i,izs:ize) = local_sum_i(ips_i:ipe_i,ijs_i:ije_i,izs:ize) + & - (moments_i(ips_i:ipe_i,ijs_i:ije_i,iky,ikx,izs:ize,updatetlevel) & - * CONJG(moments_i(ips_i:ipe_i,ijs_i:ije_i,iky,ikx,izs:ize,updatetlevel))) - ENDDO + Napjz(ia,:,:,:) = global_sum ENDDO - ! sum reduction - buffer_i = local_sum_i - global_sum_i = 0._dp - count = (ipe_i-ips_i+1)*(ije_i-ijs_i+1)*(ize-izs+1) - IF (num_procs_ky .GT. 1) THEN - !! Everyone sends its local_sum to root = 0 - IF (rank_ky .NE. root) THEN - CALL MPI_SEND(buffer_i, count , MPI_DOUBLE_PRECISION, root, 5678, comm_ky, ierr) - ELSE - ! Recieve from all the other processes - DO i_ = 0,num_procs_ky-1 - IF (i_ .NE. rank_ky) & - CALL MPI_RECV(buffer_i, count , MPI_DOUBLE_PRECISION, i_, 5678, comm_ky, MPI_STATUS_IGNORE, ierr) - global_sum_i = global_sum_i + buffer_i - ENDDO - ENDIF - ELSE - global_sum_i = local_sum_i - ENDIF - Nipjz = global_sum_i END SUBROUTINE compute_Napjz_spectrum !_____________________________________________________________________________! !!!!! FLUID MOMENTS COMPUTATIONS !!!!! ! Compute the 2D particle density for electron and ions (sum over Laguerre) SUBROUTINE compute_density - USE array, ONLY : dens_e, dens_i, kernel_e, kernel_i - USE model, ONLY : KIN_E - USE fields, ONLY : moments_e, moments_i - USE time_integration, ONLY : updatetlevel IMPLICIT NONE - COMPLEX(dp) :: dens - - IF ( CONTAINS_ip0_e .AND. CONTAINS_ip0_i ) THEN + COMPLEX(dp) :: dens_ + INTEGER :: ia, iz, iky, ikx, ij + DO ia=1,local_na + IF ( CONTAINSp0 ) THEN ! Loop to compute dens_i = sum_j kernel_j Ni0j at each k - DO iz = izs,ize - DO iky = ikys,ikye - DO ikx = ikxs,ikxe - - IF(KIN_E) THEN - ! electron density - dens = 0._dp - DO ij = ijs_e, ije_e - dens = dens + kernel_e(ij,iky,ikx,iz,0) * moments_e(ip0_e,ij,iky,ikx,iz,updatetlevel) - ENDDO - dens_e(iky,ikx,iz) = dens - ENDIF - ! ion density - dens = 0._dp - DO ij = ijs_i, ije_i - dens = dens + kernel_i(ij,iky,ikx,iz,0) * moments_i(ip0_e,ij,iky,ikx,iz,updatetlevel) + DO iz = 1,local_nz + DO iky = 1,local_nky + DO ikx = 1,local_nkx + dens_ = 0._dp + DO ij = 1, local_nj + dens_ = dens_ + kernel(ia,ij+ngj/2,iky,ikx,iz+ngz/2,0) * moments(ia,ip0,ij+ngj/2,iky,ikx,iz+ngz/2,updatetlevel) ENDDO - dens_i(iky,ikx,iz) = dens + dens(ia,iky,ikx,iz) = dens_ ENDDO ENDDO ENDDO - ENDIF + ENDIF + ENDDO END SUBROUTINE compute_density ! Compute the 2D particle fluid perp velocity for electron and ions (sum over Laguerre) SUBROUTINE compute_uperp - USE array, ONLY : uper_e, uper_i, kernel_e, kernel_i - USE model, ONLY : KIN_E - USE fields, ONLY : moments_e, moments_i - USE time_integration, ONLY : updatetlevel IMPLICIT NONE - COMPLEX(dp) :: uperp - - IF ( CONTAINS_ip0_e .AND. CONTAINS_ip0_i ) THEN - DO iz = izs,ize - DO iky = ikys,ikye - DO ikx = ikxs,ikxe - - IF(KIN_E) THEN - ! electron - uperp = 0._dp - DO ij = ijs_e, ije_e - uperp = uperp + kernel_e(ij,iky,ikx,iz,0) *& - 0.5_dp*(moments_e(ip0_e,ij,iky,ikx,iz,updatetlevel) - moments_e(ip0_e,ij-1,iky,ikx,iz,updatetlevel)) + COMPLEX(dp) :: uperp_ + INTEGER :: ia, iz, iky, ikx, ij + DO ia=1,local_na + IF ( CONTAINSp0 ) THEN + DO iz = 1,local_nz + DO iky = 1,local_nky + DO ikx = 1,local_nkx + uperp_ = 0._dp + DO ij = 1, local_nj + uperp_ = uperp_ + kernel(ia,ij+ngj/2,iky,ikx,iz+ngz/2,0) *& + 0.5_dp*(moments(ia,ip0,ij+ngj/2,iky,ikx,iz+ngz/2,updatetlevel) - moments(ia,ip0,ij-1+ngj/2,iky,ikx,iz+ngz/2,updatetlevel)) + ENDDO + uper(ia,iky,ikx,iz) = uperp_ ENDDO - uper_e(iky,ikx,iz) = uperp - ENDIF - ! ion - uperp = 0._dp - DO ij = ijs_i, ije_i - uperp = uperp + kernel_i(ij,iky,ikx,iz,0) *& - 0.5_dp*(moments_i(ip0_i,ij,iky,ikx,iz,updatetlevel) - moments_i(ip0_i,ij-1,iky,ikx,iz,updatetlevel)) - ENDDO - uper_i(iky,ikx,iz) = uperp ENDDO ENDDO - ENDDO - ENDIF + ENDIF + ENDDO END SUBROUTINE compute_uperp ! Compute the 2D particle fluid par velocity for electron and ions (sum over Laguerre) SUBROUTINE compute_upar - USE array, ONLY : upar_e, upar_i, kernel_e, kernel_i - USE model, ONLY : KIN_E - USE fields, ONLY : moments_e, moments_i - USE time_integration, ONLY : updatetlevel IMPLICIT NONE - COMPLEX(dp) :: upar - - IF ( CONTAINS_ip1_e .AND. CONTAINS_ip1_i ) THEN - DO iz = izs,ize - DO iky = ikys,ikye - DO ikx = ikxs,ikxe - IF(KIN_E) THEN - ! electron - upar = 0._dp - DO ij = ijs_e, ije_e - upar = upar + kernel_e(ij,iky,ikx,iz,1)*moments_e(ip1_e,ij,iky,ikx,iz,updatetlevel) + INTEGER :: ia, iz, iky, ikx, ij + COMPLEX(dp) :: upar_ + DO ia=1,local_na + IF ( CONTAINSp1 ) THEN + DO iz = 1,local_nz + DO iky = 1,local_nky + DO ikx = 1,local_nkx + upar_ = 0._dp + DO ij = 1, local_nj + upar_ = upar_ + kernel(ia,ij+ngj/2,iky,ikx,iz+ngz/2,1)*moments(ia,ip1,ij+ngj/2,iky,ikx,iz+ngz/2,updatetlevel) + ENDDO + upar(ia,iky,ikx,iz) = upar_ ENDDO - upar_e(iky,ikx,iz) = upar - ENDIF - ! ion - upar = 0._dp - DO ij = ijs_i, ije_i - upar = upar + kernel_i(ij,iky,ikx,iz,1)*moments_i(ip1_i,ij,iky,ikx,iz,updatetlevel) - ENDDO - upar_i(iky,ikx,iz) = upar ENDDO ENDDO - ENDDO - ELSE - IF(KIN_E)& - upar_e = 0 - upar_i = 0 - ENDIF + ENDIF + ENDDO END SUBROUTINE compute_upar ! Compute the 2D particle temperature for electron and ions (sum over Laguerre) SUBROUTINE compute_tperp - USE array, ONLY : Tper_e, Tper_i, kernel_e, kernel_i - USE model, ONLY : KIN_E - USE fields, ONLY : moments_e, moments_i USE time_integration, ONLY : updatetlevel IMPLICIT NONE REAL(dp) :: j_dp - COMPLEX(dp) :: Tperp - - IF ( CONTAINS_ip0_e .AND. CONTAINS_ip0_i .AND. & - CONTAINS_ip2_e .AND. CONTAINS_ip2_i ) THEN - ! Loop to compute T = 1/3*(Tpar + 2Tperp) - DO iz = izs,ize - DO iky = ikys,ikye - DO ikx = ikxs,ikxe - ! electron temperature - IF(KIN_E) THEN - Tperp = 0._dp - DO ij = ijs_e, ije_e - j_dp = REAL(ij-1,dp) - Tperp = Tperp + kernel_e(ij,iky,ikx,iz,0)*& - ((2_dp*j_dp+1)*moments_e(ip0_e,ij ,iky,ikx,iz,updatetlevel)& - -j_dp *moments_e(ip0_e,ij-1,iky,ikx,iz,updatetlevel)& - -j_dp+1 *moments_e(ip0_e,ij+1,iky,ikx,iz,updatetlevel)) - ENDDO - Tper_e(iky,ikx,iz) = Tperp - ENDIF - ! ion temperature - Tperp = 0._dp - DO ij = ijs_i, ije_i - j_dp = REAL(ij-1,dp) - Tperp = Tperp + kernel_i(ij,iky,ikx,iz,0)*& - ((2_dp*j_dp+1)*moments_i(ip0_i,ij ,iky,ikx,iz,updatetlevel)& - -j_dp *moments_i(ip0_i,ij-1,iky,ikx,iz,updatetlevel)& - -j_dp+1 *moments_i(ip0_i,ij+1,iky,ikx,iz,updatetlevel)) + COMPLEX(dp) :: Tperp_ + INTEGER :: ia, iz, iky, ikx, ij + DO ia=1,local_na + IF ( CONTAINSp0 .AND. CONTAINSp2 ) THEN + ! Loop to compute T = 1/3*(Tpar + 2Tperp) + DO iz = 1,local_nz + DO iky = 1,local_nky + DO ikx = 1,local_nkx + Tperp_ = 0._dp + DO ij = 1, local_nj + j_dp = REAL(ij-1,dp) + Tperp_ = Tperp_ + kernel(ia,ij,iky,ikx,iz,0)*& + ((2_dp*j_dp+1)*moments(ia,ip0,ij +ngj/2,iky,ikx,iz+ngz/2,updatetlevel)& + -j_dp *moments(ia,ip0,ij-1+ngj/2,iky,ikx,iz+ngz/2,updatetlevel)& + -j_dp+1 *moments(ia,ip0,ij+1+ngj/2,iky,ikx,iz+ngz/2,updatetlevel)) + ENDDO + Tper(ia,iky,ikx,iz) = Tperp_ ENDDO - Tper_i(iky,ikx,iz) = Tperp ENDDO ENDDO - ENDDO - ENDIF + ENDIF + ENDDO END SUBROUTINE compute_Tperp ! Compute the 2D particle temperature for electron and ions (sum over Laguerre) SUBROUTINE compute_Tpar - USE array, ONLY : Tpar_e, Tpar_i, kernel_e, kernel_i - USE model, ONLY : KIN_E - USE fields, ONLY : moments_e, moments_i USE time_integration, ONLY : updatetlevel IMPLICIT NONE REAL(dp) :: j_dp - COMPLEX(dp) :: tpar + COMPLEX(dp) :: Tpar_ + INTEGER :: ia, iz, iky, ikx, ij - IF ( CONTAINS_ip0_e .AND. CONTAINS_ip0_i .AND. & - CONTAINS_ip2_e .AND. CONTAINS_ip2_i ) THEN + DO ia=1,local_na + IF ( CONTAINSp0 .AND. CONTAINSp0 ) THEN ! Loop to compute T = 1/3*(Tpar + 2Tperp) - DO iz = izs,ize - DO iky = ikys,ikye - DO ikx = ikxs,ikxe - ! electron temperature - IF(KIN_E) THEN - Tpar = 0._dp - DO ij = ijs_e, ije_e - j_dp = REAL(ij-1,dp) - Tpar = Tpar + kernel_e(ij,iky,ikx,iz,0)*& - (SQRT2 * moments_e(ip2_e,ij,iky,ikx,iz,updatetlevel) & - + moments_e(ip0_e,ij,iky,ikx,iz,updatetlevel)) - ENDDO - Tpar_e(iky,ikx,iz) = Tpar - ENDIF - ! ion temperature - Tpar = 0._dp - DO ij = ijs_i, ije_i + DO iz = 1,local_nz + DO iky = 1,local_nky + DO ikx = 1,local_nkx + Tpar_ = 0._dp + DO ij = 1, local_nj j_dp = REAL(ij-1,dp) - Tpar = Tpar + kernel_i(ij,iky,ikx,iz,0)*& - (SQRT2 * moments_i(ip2_i,ij,iky,ikx,iz,updatetlevel) & - + moments_i(ip0_i,ij,iky,ikx,iz,updatetlevel)) + Tpar_ = Tpar_ + kernel(ia,ij+ngj/2,iky,ikx,iz+ngz/2,0)*& + (SQRT2 * moments(ia,ip2,ij+ngj/2,iky,ikx,iz+ngz/2,updatetlevel) & + + moments(ia,ip0,ij+ngj/2,iky,ikx,iz+ngz/2,updatetlevel)) ENDDO - Tpar_i(iky,ikx,iz) = Tpar + Tpar(ia,iky,ikx,iz) = Tpar_ ENDDO ENDDO ENDDO - ENDIF + ENDIF + ENDDO END SUBROUTINE compute_Tpar ! Compute the 2D particle fluid moments for electron and ions (sum over Laguerre) SUBROUTINE compute_fluid_moments - USE array, ONLY : dens_e, Tpar_e, Tper_e, dens_i, Tpar_i, Tper_i, temp_e, temp_i - USE model, ONLY : KIN_E IMPLICIT NONE CALL compute_density CALL compute_upar @@ -816,9 +491,7 @@ SUBROUTINE compute_fluid_moments CALL compute_Tpar CALL compute_Tperp ! Temperature - IF(KIN_E)& - temp_e = (Tpar_e - 2._dp * Tper_e)/3._dp - dens_e - temp_i = (Tpar_i - 2._dp * Tper_i)/3._dp - dens_i + temp = (Tpar - 2._dp * Tper)/3._dp - dens END SUBROUTINE compute_fluid_moments END MODULE processing diff --git a/src/readinputs.F90 b/src/readinputs.F90 index 56cdca06..dcc9978b 100644 --- a/src/readinputs.F90 +++ b/src/readinputs.F90 @@ -5,6 +5,7 @@ SUBROUTINE readinputs USE diagnostics_par, ONLY: diag_par_readinputs USE collision, ONLY: collision_readinputs USE model, ONLY: model_readinputs + USE species, ONLY: species_readinputs USE initial_par, ONLY: initial_readinputs USE time_integration, ONLY: time_integration_readinputs USE geometry, ONLY: geometry_readinputs @@ -28,6 +29,9 @@ SUBROUTINE readinputs ! Load model parameters from input file CALL model_readinputs + ! Load model parameters from input file + CALL species_readinputs + ! Load collision parameters from input file CALL collision_readinputs diff --git a/src/restarts_mod.F90 b/src/restarts_mod.F90 index 9628c2ef..bf1d7fee 100644 --- a/src/restarts_mod.F90 +++ b/src/restarts_mod.F90 @@ -6,249 +6,102 @@ USE grid USE fields USE diagnostics_par USE time_integration -USE model, ONLY: KIN_E IMPLICIT NONE -INTEGER :: rank, sz_, n_ -INTEGER :: dims(1) = (/0/) -CHARACTER(LEN=50) :: dset_name -INTEGER :: pmaxe_cp, jmaxe_cp, pmaxi_cp, jmaxi_cp, n0, Nkx_cp, Nky_cp, Nz_cp -COMPLEX(dp), DIMENSION(:,:,:,:,:), ALLOCATABLE :: moments_e_cp -COMPLEX(dp), DIMENSION(:,:,:,:,:), ALLOCATABLE :: moments_i_cp - PUBLIC :: load_moments!, write_restart CONTAINS - !******************************************************************************! - !!!!!!! Load moments from a previous output file - !******************************************************************************! - SUBROUTINE load_moments - IMPLICIT NONE - REAL :: timer_tot_1, timer_find_CP_1, timer_load_mom_1 - REAL :: timer_tot_2, timer_find_CP_2, timer_load_mom_2 - INTEGER :: deltap = 1 - CALL cpu_time(timer_tot_1) - - ! Checkpoint filename - WRITE(rstfile,'(a,a1,i2.2,a3)') TRIM(resfile0),'_',job2load,'.h5' - - IF (my_id .EQ. 0) WRITE(*,'(3x,a)') "Resume from ", rstfile - ! Open file - CALL openf(rstfile, fidrst,mpicomm=comm0) - ! Get the checkpoint moments degrees to allocate memory - CALL getatt(fidrst,"/data/input/" , "Nkx", Nkx_cp) - CALL getatt(fidrst,"/data/input/" , "Nky", Nky_cp) - CALL getatt(fidrst,"/data/input/" , "Nz", Nz_cp) - IF(Nz .EQ. 1) deltap = 2 - IF (KIN_E) THEN - CALL getatt(fidrst,"/data/input/" , "pmaxe", pmaxe_cp) - CALL getatt(fidrst,"/data/input/" , "jmaxe", jmaxe_cp) - ENDIF - CALL getatt(fidrst,"/data/input/" , "pmaxi", pmaxi_cp) - CALL getatt(fidrst,"/data/input/" , "jmaxi", jmaxi_cp) - CALL getatt(fidrst,"/data/input/" , "start_iframe5d", n0) - - IF ((KIN_E .AND. ((pmaxe_cp .NE. pmaxe) .OR. (jmaxe_cp .NE. jmaxe))) .OR.& - (pmaxi_cp .NE. pmaxi) .OR. (jmaxi_cp .NE. jmaxi)) THEN - IF(my_id.EQ.0) WRITE(*,*) '! Extending the polynomials basis !' - CALL load_output_adapt_pj - ELSE - - CALL cpu_time(timer_find_CP_1) - - ! Find the last results of the checkpoint file by iteration - n_ = n0+1 - WRITE(dset_name, "(A, '/', i6.6)") "/data/var5d/moments_i", n_ ! start with moments_e/000001 - DO WHILE (isdataset(fidrst, dset_name)) ! If n_ is not a file we stop the loop - n_ = n_ + 1 - WRITE(dset_name, "(A, '/', i6.6)") "/data/var5d/moments_i", n_ ! updtate file number - ENDDO - n_ = n_ - 1 ! n_ is not a file so take the previous one n_-1 - - ! Read time dependent attributes to continue simulation - WRITE(dset_name, "(A, '/', i6.6)") "/data/var5d/moments_i", n_ - CALL getatt(fidrst, dset_name, 'cstep', cstep) - CALL getatt(fidrst, dset_name, 'time', time) - CALL getatt(fidrst, dset_name, 'jobnum', jobnum) - jobnum = jobnum+1 - CALL getatt(fidrst, dset_name, 'iframe2d',iframe2d) - CALL getatt(fidrst, dset_name, 'iframe5d',iframe5d) - iframe2d = iframe2d-1; iframe5d = iframe5d-1 - IF(my_id.EQ.0) WRITE(*,*) '.. restart from t = ', time - - CALL cpu_time(timer_find_CP_2) - IF(my_id.EQ.0) WRITE(*,*) '** Time find CP : ', timer_find_CP_2 - timer_find_CP_1, ' **' - - CALL cpu_time(timer_load_mom_1) - ! Read state of system from checkpoint file - - ! Super slow futils routine in Marconi.... but spare RAM - ! IF (KIN_E) THEN - ! WRITE(dset_name, "(A, '/', i6.6)") "/data/var5d/moments_e", n_ - ! CALL getarrnd(fidrst, dset_name, moments_e(ips_e:ipe_e, ijs_e:ije_e, ikys:ikye, ikxs:ikxe, izs:ize, 1),(/1,3,5/)) - ! ENDIF - ! WRITE(dset_name, "(A, '/', i6.6)") "/data/var5d/moments_i", n_ - ! CALL getarrnd(fidrst, dset_name, moments_i(ips_i:ipe_i, ijs_i:ije_i, ikys:ikye, ikxs:ikxe, izs:ize, 1),(/1,3,5/)) - - ! Brute force loading: load the full moments and take what is needed (RAM dangerous...) - IF (KIN_E) THEN - CALL allocate_array(moments_e_cp, 1,pmaxe_cp/deltap+1, 1,jmaxe_cp+1, 1,Nky_cp, 1,Nkx_cp, 1,Nz_cp) - WRITE(dset_name, "(A, '/', i6.6)") "/data/var5d/moments_e", n_ - CALL getarr(fidrst, dset_name, moments_e_cp(:,:,:,:,:)) - moments_e(ips_e:ipe_e, ijs_e:ije_e, ikys:ikye, ikxs:ikxe, izs:ize, 1) & - = moments_e_cp(ips_e:ipe_e, ijs_e:ije_e, ikys:ikye, ikxs:ikxe, izs:ize) - DEALLOCATE(moments_e_cp) - ENDIF - CALL allocate_array(moments_i_cp, 1,pmaxi_cp/deltap+1, 1,jmaxi_cp+1, 1,Nky_cp, 1,Nkx_cp, 1,Nz_cp) - WRITE(dset_name, "(A, '/', i6.6)") "/data/var5d/moments_i", n_ - CALL getarr(fidrst, dset_name, moments_i_cp(:,:,:,:,:)) - moments_i(ips_i:ipe_i, ijs_i:ije_i, ikys:ikye, ikxs:ikxe, izs:ize, 1) & - = moments_i_cp(ips_i:ipe_i, ijs_i:ije_i, ikys:ikye, ikxs:ikxe, izs:ize) - DEALLOCATE(moments_i_cp) - - CALL closef(fidrst) - - IF (my_id .EQ. 0) WRITE(*,'(3x,a)') "Reading from restart file "//TRIM(rstfile)//" completed!" - ENDIF - CALL cpu_time(timer_load_mom_2) - CALL cpu_time(timer_tot_2) - - CALL mpi_barrier(MPI_COMM_WORLD, ierr) - - IF(my_id.EQ.0) WRITE(*,*) '** Time load mom : ', timer_load_mom_2 - timer_load_mom_1, ' **' - IF(my_id.EQ.0) WRITE(*,*) '** Total load time : ', timer_tot_2 - timer_tot_1, ' **' - - END SUBROUTINE load_moments - !******************************************************************************! - - - !******************************************************************************! - !!!!!!! Load moments from a previous output file with possible different PJ - !******************************************************************************! - SUBROUTINE load_output_adapt_pj - IMPLICIT NONE - INTEGER :: pmaxloop_e, pmaxloop_i, jmaxloop_e, jmaxloop_i, Nkx_cp, Nky_cp, Nz_cp - INTEGER :: deltap = 1 - - ! Checkpoint filename - WRITE(rstfile,'(a,a1,i2.2,a3)') TRIM(resfile0),'_',job2load,'.h5' - - IF (my_id .EQ. 0) WRITE(*,'(3x,a)') "Resume from ", rstfile - ! Open file - CALL openf(rstfile, fidrst,mpicomm=comm0) - ! Get grid info - CALL getatt(fidrst,"/data/input/" , "Nkx", Nkx_cp) - CALL getatt(fidrst,"/data/input/" , "Nky", Nky_cp) - CALL getatt(fidrst,"/data/input/" , "Nz", Nz_cp) - IF(Nz .EQ. 1) deltap = 2 - !!!!!!!!! Load electron moments - IF (KIN_E) THEN - ! Get the checkpoint moments degrees to allocate memory - CALL getatt(fidrst,"/data/input/" , "pmaxe", pmaxe_cp) - CALL getatt(fidrst,"/data/input/" , "jmaxe", jmaxe_cp) - IF (my_id .EQ. 0) WRITE(*,*) "Pe_cp = ", pmaxe_cp - IF (my_id .EQ. 0) WRITE(*,*) "Je_cp = ", jmaxe_cp - CALL getatt(fidrst,"/data/input/" , "start_iframe5d", n0) - ! Allocate the required size to load checkpoints moments - ! CALL allocate_array(moments_e_cp, 1,pmaxe_cp+1, 1,jmaxe_cp+1, ikxs,ikxe, ikys,ikye, izs,ize) - CALL allocate_array(moments_e_cp, 1,pmaxe_cp/deltap+1, 1,jmaxe_cp+1, 1,Nky_cp, 1,Nkx_cp, 1,Nz_cp) - ! Find the last results of the checkpoint file by iteration - n_ = n0+1 - WRITE(dset_name, "(A, '/', i6.6)") "/data/var5d/moments_e", n_ ! start with moments_e/000001 - DO WHILE (isdataset(fidrst, dset_name)) ! If n_ is not a file we stop the loop - n_ = n_ + 1 - WRITE(dset_name, "(A, '/', i6.6)") "/data/var5d/moments_e", n_ ! updtate file number - ENDDO - n_ = n_ - 1 ! n_ is not a file so take the previous one n_-1 - ! Read state of system from checkpoint file and load every moment to change the distribution - WRITE(dset_name, "(A, '/', i6.6)") "/data/var5d/moments_e", n_ - ! CALL getarrnd(fidrst, dset_name, moments_e_cp(1:pmaxe_cp+1, 1:jmaxe_cp+1, ikys:ikye, ikxs:ikxe, izs:ize),(/1,3/)) - CALL getarr(fidrst, dset_name, moments_e_cp(1:pmaxe_cp/deltap+1, 1:jmaxe_cp+1, 1:Nky_cp, 1:Nkx_cp, 1:Nz_cp)) - ! Initialize simulation moments array with checkpoints ones - ! (they may have a larger number of polynomials, set to 0 at the begining) - moments_e = 0._dp; - pmaxloop_e = min(ipe_e,pmaxe_cp/deltap+1) - jmaxloop_e = min(ije_e,jmaxe_cp+1) - IF (ips_e .LE. pmaxe_cp/deltap+1) THEN - DO ip=ips_e,pmaxloop_e - IF (ijs_e .LE. jmaxe_cp+1) THEN - DO ij=ijs_e,jmaxloop_e - DO ikx=ikxs,ikxe - DO iky=ikys,ikye - DO iz = izs,ize - moments_e(ip,ij,iky,ikx,iz,:) = moments_e_cp(ip,ij,iky,ikx,iz) - ENDDO - ENDDO - ENDDO - ENDDO - ENDIF - ENDDO - ENDIF - ! Deallocate checkpoint arrays - DEALLOCATE(moments_e_cp) - ENDIF - !!!!!!! Load ion moments - ! Get the checkpoint moments degrees to allocate memory - CALL getatt(fidrst,"/data/input/" , "pmaxi", pmaxi_cp) - CALL getatt(fidrst,"/data/input/" , "jmaxi", jmaxi_cp) - IF (my_id .EQ. 0) WRITE(*,*) "Pi_cp = ", pmaxi_cp - IF (my_id .EQ. 0) WRITE(*,*) "Ji_cp = ", jmaxi_cp - CALL getatt(fidrst,"/data/input/" , "start_iframe5d", n0) - ! Allocate the required size to load checkpoints moments - ! CALL allocate_array(moments_i_cp, 1,pmaxi_cp+1, 1,jmaxi_cp+1, ikxs,ikxe, ikys,ikye, izs,ize) - CALL allocate_array(moments_i_cp, 1,pmaxi_cp/deltap+1, 1,jmaxi_cp+1, 1,Nky_cp, 1,Nkx_cp, 1,Nz_cp) - ! Find the last results of the checkpoint file by iteration - n_ = n0+1 - WRITE(dset_name, "(A, '/', i6.6)") "/data/var5d/moments_i", n_ ! start with moments_e/000001 - DO WHILE (isdataset(fidrst, dset_name)) ! If n_ is not a file we stop the loop - n_ = n_ + 1 - WRITE(dset_name, "(A, '/', i6.6)") "/data/var5d/moments_i", n_ ! updtate file number - ENDDO - n_ = n_ - 1 ! n_ is not a file so take the previous one n_-1 - - ! Read state of system from checkpoint file and load every moment to change the distribution - WRITE(dset_name, "(A, '/', i6.6)") "/data/var5d/moments_i", n_ - ! CALL getarrnd(fidrst, dset_name, moments_i_cp(1:pmaxi_cp+1, 1:jmaxi_cp+1, ikys:ikye, ikxs:ikxe, izs:ize),(/1,3/)) - CALL getarr(fidrst, dset_name, moments_i_cp(1:pmaxi_cp/deltap+1, 1:jmaxi_cp+1, 1:Nky_cp, 1:Nkx_cp, 1:Nz_cp)) - - ! Initialize simulation moments array with checkpoints ones - ! (they may have a larger number of polynomials, set to 0 at the begining) - moments_i = 0._dp; - pmaxloop_i = min(ipe_i,pmaxi_cp/deltap+1) - jmaxloop_i = min(ije_i,jmaxi_cp+1) - IF (ips_i .LE. pmaxi_cp/deltap+1) THEN - DO ip=ips_i,pmaxloop_i - IF (ijs_i .LE. jmaxi_cp+1) THEN - DO ij=ijs_i,jmaxloop_i - DO ikx=ikxs,ikxe - DO iky=ikys,ikye - DO iz = izs,ize - moments_i(ip,ij,iky,ikx,iz,:) = moments_i_cp(ip,ij,iky,ikx,iz) - ENDDO - ENDDO - ENDDO - ENDDO - ENDIF - ENDDO - ENDIF - ! Deallocate checkpoint arrays - DEALLOCATE(moments_i_cp) - - ! Read time dependent attributes to continue simulation - CALL getatt(fidrst, dset_name, 'cstep', cstep) - CALL getatt(fidrst, dset_name, 'time', time) - CALL getatt(fidrst, dset_name, 'jobnum', jobnum) - jobnum = jobnum+1 - CALL getatt(fidrst, dset_name, 'iframe2d',iframe2d) - CALL getatt(fidrst, dset_name, 'iframe5d',iframe5d) - iframe2d = iframe2d-1; iframe5d = iframe5d-1 - - CALL closef(fidrst) - - IF (my_id .EQ. 0) WRITE(*,'(3x,a)') "Reading from restart file "//TRIM(rstfile)//" completed!" - - END SUBROUTINE load_output_adapt_pj - !******************************************************************************! + !******************************************************************************! + !!!!!!! Fill initial moments value using the final state of a previous simulation + !******************************************************************************! + SUBROUTINE load_moments + USE parallel, ONLY: comm0 + IMPLICIT NONE + CHARACTER(LEN=50) :: dset_name + REAL(dp):: time_cp + INTEGER :: cstep_cp, jobnum_cp + INTEGER :: n_ + INTEGER :: deltap_cp + INTEGER :: pmax_cp, jmax_cp, n0, Nkx_cp, Nky_cp, Nz_cp, Na_cp, Np_cp, Nj_cp + INTEGER :: ia,ip,ij,iky,ikx,iz, iacp,ipcp,ijcp,iycp,ixcp,izcp, ierr + REAL(dp):: timer_tot_1,timer_tot_2 + COMPLEX(dp), DIMENSION(:,:,:,:,:,:), ALLOCATABLE :: moments_cp + CALL cpu_time(timer_tot_1) + ! Checkpoint filename + WRITE(rstfile,'(a,a1,i2.2,a3)') TRIM(resfile0),'_',job2load,'.h5' + CALL speak("Resume from "//rstfile) + ! Open file + CALL openf(rstfile, fidrst,mpicomm=comm0) + ! Get the checkpoint moments degrees to allocate memory + CALL getatt(fidrst,"/data/input/grid" , "Nkx", Nkx_cp) + CALL getatt(fidrst,"/data/input/grid" , "Nky", Nky_cp) + CALL getatt(fidrst,"/data/input/grid" , "Nz", Nz_cp) + IF(Nz_cp .NE. Nz) & + ERROR STOP "!! cannot change Nz in a restart, interp or reduction not implemented !!" + CALL getatt(fidrst,"/data/input/grid" ,"deltap",deltap_cp) + IF(deltap_cp .NE. deltap) & + ERROR STOP "!! cannot change deltap in a restart, not implemented !!" + CALL getatt(fidrst,"/data/input/grid" , "pmax", pmax_cp) + Np_cp = pmax_cp/deltap_cp+1 + CALL getatt(fidrst,"/data/input/grid" , "jmax", jmax_cp) + Nj_cp = jmax_cp+1 + CALL getatt(fidrst,"/data/input/model", "Na", Na_cp) + CALL getatt(fidrst,"/data/input/basic" , "startframe5d", n0) + ! Find the last results of the checkpoint file by iteration + n_ = n0+1 + WRITE(dset_name, "(A, '/', i6.6)") "/data/var5d/moments", n_ ! start with moments/000001 + DO WHILE (isdataset(fidrst, dset_name)) ! If n_ is not a file we stop the loop + n_ = n_ + 1 + WRITE(dset_name, "(A, '/', i6.6)") "/data/var5d/moments", n_ ! updtate file number + ENDDO + n_ = n_ - 1 ! n_ is not a file so take the previous one n_-1 + WRITE(dset_name, "(A, '/', i6.6)") "/data/var5d/moments", n_ + ! Read time dependent attributes to continue simulation + CALL getatt(fidrst, dset_name, 'cstep', cstep_cp) + CALL getatt(fidrst, dset_name, 'time', time_cp) + CALL getatt(fidrst, dset_name, 'jobnum', jobnum_cp) + ! Set the cstep, step, time and iframnd variables in basic from the checkpoint + CALL set_basic_cp(cstep_cp,time_cp,jobnum_cp) + CALL speak('.. restart from t = '//str(time)) + ! Read state of system from checkpoint file + ! Brute force loading: load the full moments and take what is needed (RAM dangerous...) + ! other possibility is to loop over slices + CALL allocate_array(moments_cp, 1,Na_cp, 1,Np_cp, 1,Nj_cp, 1,Nky_cp, 1,Nkx_cp, 1,Nz_cp) + WRITE(dset_name, "(A, '/', i6.6)") "/data/var5d/moments", n_ + CALL getarr(fidrst, dset_name, moments_cp(:,:,:,:,:,:)) + + moments = 0._dp; + z: DO iz = 1,local_nz + izcp = iz+local_nz_offset + x: DO ikx=1,local_nkx + ixcp = ikx+local_nkx_offset + y: DO iky=1,local_nky + iycp = iky + local_nkx_offset + j: DO ij=1,local_nj + ijcp = ij + local_nj_offset + p: DO ip=1,local_np + ipcp = ip + local_np_offset + a: DO ia=1,Na_cp + iacp = ia + local_na_offset + IF((iacp.LE.Na_cp).AND.(ipcp.LE.Np_cp).AND.(ijcp.LE.Nj_cp).AND.(iycp.LE.Nky_cp).AND.(ixcp.LE.Nkx_cp).AND.(izcp.LE.Nz_cp)) & + moments(ia,ip,ij,iky,ikx,iz,1) = moments_cp(ia,ip,ij,iky,ikx,iz) + ENDDO a + ENDDO p + ENDDO j + ENDDO y + ENDDO x + ENDDO z + !! deallocate the full moment variable + DEALLOCATE(moments_cp) + CALL closef(fidrst) + CALL speak("Reading from restart file "//TRIM(rstfile)//" completed!") + CALL mpi_barrier(MPI_COMM_WORLD, ierr) + ! stop time measurement + CALL cpu_time(timer_tot_2) + CALL speak('** Total load time : '// str(timer_tot_2 - timer_tot_1)//' **') + + END SUBROUTINE load_moments + !******************************************************************************! END MODULE restarts diff --git a/src/solve_EM_fields.F90 b/src/solve_EM_fields.F90 index 3f355e59..5e61c5dd 100644 --- a/src/solve_EM_fields.F90 +++ b/src/solve_EM_fields.F90 @@ -15,66 +15,69 @@ CONTAINS SUBROUTINE poisson ! Solve poisson equation to get phi USE time_integration, ONLY: updatetlevel - USE array, ONLY: kernel_e, kernel_i, inv_poisson_op, inv_pol_ion - USE fields, ONLY: phi, moments_e, moments_i - USE grid - USE calculus, ONLY : simpson_rule_z - USE parallel, ONLY : manual_3D_bcast - use model, ONLY : q_e, q_i, lambdaD, KIN_E, sigma_e, sigma_i - USE processing, ONLY : compute_density - USE geometry, ONLY : iInt_Jacobian, Jacobian + USE array, ONLY: kernel, inv_poisson_op, inv_pol_ion + USE fields, ONLY: phi, moments + USE grid, ONLY: local_na, local_nky, local_nkx, local_nz,ngz, SOLVE_POISSON,& + kyarray, contains_kx0, contains_ky0,ikx0,iky0, deltaz, ieven,& + ip0, total_nj, ngj + USE calculus, ONLY: simpson_rule_z + USE parallel, ONLY: manual_3D_bcast + USE model, ONLY: lambdaD, ADIAB_E + use species, ONLY: q + USE processing, ONLY: compute_density + USE geometry, ONLY: iInt_Jacobian, Jacobian IMPLICIT NONE - INTEGER :: ini,ine + INTEGER :: in, ia, ikx, iky, iz COMPLEX(dp) :: fsa_phi, intf_ ! current flux averaged phi - COMPLEX(dp), DIMENSION(izs:ize) :: rho_i, rho_e, integrant ! charge density q_a n_a and aux var + COMPLEX(dp), DIMENSION(local_nz) :: rho, integrant ! charge density q_a n_a and aux var ! Execution time start CALL cpu_time(t0_poisson) - !! Poisson can be solved only for process containing p=0 + !! Poisson can be solved only for process containng p=0 IF ( SOLVE_POISSON ) THEN - kxloop: DO ikx = ikxs,ikxe - kyloop: DO iky = ikys,ikye - phi(iky,ikx,izs:ize) = 0._dp - !!!! Compute ion particle charge density q_i n_i - rho_i = 0._dp - DO ini=1,jmaxi+1 - rho_i(izs:ize) = rho_i(izs:ize) & - +q_i*kernel_i(ini,iky,ikx,izs:ize,0)*moments_i(ip0_i,ini,iky,ikx,izs:ize,updatetlevel) - END DO - !!!! Compute electron particle charge density q_e n_e - rho_e = 0._dp - IF (KIN_E) THEN ! Kinetic electrons - DO ine=1,jmaxe+1 - rho_e(izs:ize) = rho_e(izs:ize) & - +q_e*kernel_e(ine,iky,ikx,izs:ize,0)*moments_e(ip0_e,ine,iky,ikx,izs:ize,updatetlevel) - END DO - ELSE ! Adiabatic electrons + x:DO ikx = 1,local_nky + y:DO iky = 1,local_nkx + !!!!!!!!!!!!!!! Compute particle charge density q_a n_a for each evolved species + DO iz = 1,local_Nz + rho(iz) = 0._dp + DO in=1,total_nj + DO ia = 1,local_na + rho(iz) = rho(iz) & + +q(ia)*kernel(ia,in+ngj/2,iky,ikx,iz+ngz/2,ieven)*moments(ia,ip0,in+ngj/2,iky,ikx,iz+ngz/2,updatetlevel) + END DO + END DO + END DO + !!!!!!!!!!!!!!! adiabatic electron contribution if asked + IF (ADIAB_E) THEN ! Adiabatic charge density (linked to flux surface averaged phi) ! We compute the flux surface average solving a flux surface averaged ! Poisson equation, i.e. ! [qi^2(1-sum_j K_j^2)/tau_i] <phi>_psi = <q_i n_i >_psi ! inv_pol_ion^-1 fsa_phi = simpson(Jacobian rho_i ) * iInt_Jacobian - fsa_phi = 0._dp - IF(kyarray(iky).EQ.0._dp) THEN ! take ky=0 mode (y-average) - ! Prepare integrant for z-average - integrant(izs:ize) = Jacobian(izs:ize,0)*rho_i(izs:ize)*inv_pol_ion(iky,ikx,izs:ize) - call simpson_rule_z(integrant(izs:ize),intf_) ! get the flux averaged phi - fsa_phi = intf_ * iInt_Jacobian !Normalize by 1/int(Jxyz)dz + fsa_phi = 0._dp + IF(kyarray(iky).EQ.0._dp) THEN ! take ky=0 mode (y-average) + ! Prepare integrant for z-average + integrant(iz) = Jacobian(iz+ngz/2,ieven)*rho(iz)*inv_pol_ion(iky,ikx,iz) + call simpson_rule_z(local_Nz,deltaz,integrant,intf_) ! get the flux averaged phi + fsa_phi = intf_ * iInt_Jacobian !Normalize by 1/int(Jxyz)dz + ENDIF + rho(iz) = rho(iz) + fsa_phi ENDIF - rho_e(izs:ize) = fsa_phi - ENDIF - !!!!!!!!!!!!!!! Inverting the poisson equation !!!!!!!!!!!!!!!!!!!!!!!!!! - phi(iky,ikx,izs:ize) = (rho_e(izs:ize) + rho_i(izs:ize))*inv_poisson_op(iky,ikx,izs:ize) - END DO kyloop - END DO kxloop + !!!!!!!!!!!!!!! adiabatic ions ? + ! IF (ADIAB_I) THEN + ! ENDIF + !!!!!!!!!!!!!!! Inverting the poisson equation + DO iz = 1,local_Nz + phi(iky,ikx,iz+ngz/2) = inv_poisson_op(iky,ikx,iz)*rho(iz) + ENDDO + ENDDO y + ENDDO x ! Cancel origin singularity - IF (contains_kx0 .AND. contains_ky0) phi(iky_0,ikx_0,:) = 0._dp + IF (contains_kx0 .AND. contains_ky0) phi(iky0,ikx0,:) = 0._dp ENDIF - ! Transfer phi to all the others process along p - CALL manual_3D_bcast(phi(ikys:ikye,ikxs:ikxe,izs:ize)) - + CALL manual_3D_bcast(phi,local_nky,local_nkx,local_nz+ngz) ! Execution time end CALL cpu_time(t1_poisson) tc_poisson = tc_poisson + (t1_poisson - t0_poisson) @@ -83,40 +86,42 @@ CONTAINS SUBROUTINE ampere ! Solve ampere equation to get psi USE time_integration, ONLY: updatetlevel - USE array, ONLY: kernel_e, kernel_i, inv_ampere_op - USE fields, ONLY: moments_i, moments_e, psi - USE grid - USE parallel, ONLY : manual_3D_bcast - use model, ONLY : sqrt_tau_o_sigma_e, sqrt_tau_o_sigma_i, q_e, q_i, beta, KIN_E + USE array, ONLY: kernel, inv_ampere_op + USE fields, ONLY: moments, psi + USE grid, ONLY: local_na, local_nky, local_nkx, local_nz,ngz, SOLVE_AMPERE,& + contains_kx0, contains_ky0,ikx0,iky0, iodd,& + ip1, total_nj, ngj + USE parallel, ONLY: manual_3D_bcast + use species, ONLY: sqrt_tau_o_sigma, q + use model, ONLY: beta IMPLICIT NONE - - INTEGER :: ini,ine - + COMPLEX(dp) :: iota ! current density + INTEGER :: in, ia, ikx, iky, iz ! Execution time start CALL cpu_time(t0_poisson) - !! Ampere can be solved only with beta > 0 and for process containing p=1 moments + !! Ampere can be solved only with beta > 0 and for process containng p=1 moments IF ( SOLVE_AMPERE ) THEN - psi(ikys:ikye,ikxs:ikxe,izs:ize) = 0._dp - !!!! ion particle current density contribution "q_i u_i" - DO ini=1,jmaxi+1 - psi(ikys:ikye,ikxs:ikxe,izs:ize) = psi(ikys:ikye,ikxs:ikxe,izs:ize) & - +q_i*sqrt_tau_o_sigma_i*kernel_i(ini,ikys:ikye,ikxs:ikxe,izs:ize,0)*moments_i(ip1_i,ini,ikys:ikye,ikxs:ikxe,izs:ize,updatetlevel) - END DO - !!!! electron particle current density contribution "q_e u_e" - DO ine=1,jmaxe+1 - psi(ikys:ikye,ikxs:ikxe,izs:ize) = psi(ikys:ikye,ikxs:ikxe,izs:ize) & - +q_e*sqrt_tau_o_sigma_e*kernel_e(ine,ikys:ikye,ikxs:ikxe,izs:ize,0)*moments_e(ip1_e,ine,ikys:ikye,ikxs:ikxe,izs:ize,updatetlevel) - END DO - !!!!!!!!!!!!!!! Inverting the poisson equation !!!!!!!!!!!!!!!!!!!!!!!!!! - psi(ikys:ikye,ikxs:ikxe,izs:ize) = beta*psi(ikys:ikye,ikxs:ikxe,izs:ize)*inv_ampere_op(ikys:ikye,ikxs:ikxe,izs:ize) - - ! Cancel origin singularity - IF (contains_kx0 .AND. contains_ky0) psi(iky_0,ikx_0,:) = 0._dp + z:DO iz = 1,local_nz + x:DO ikx = 1,local_nky + y:DO iky = 1,local_nkx + !!!!!!!!!!!!!!! compute current density contribution "iota = q_a u_a" for each species + iota = 0._dp + n:DO in=1,total_nj + a:DO ia = 1,local_na + iota = iota & + +q(ia)*sqrt_tau_o_sigma(ia)*kernel(ia,in+ngj/2,iky,ikx,iz+ngz/2,iodd)*moments(ia,ip1,in,iky,ikx,iz+ngz/2,updatetlevel) + ENDDO a + ENDDO n + !!!!!!!!!!!!!!! Inverting the Ampere equation + psi(iky,ikx,iz+ngz/2) = beta*inv_ampere_op(iky,ikx,iz)*iota + ENDDO y + ENDDO x + ENDDO z ENDIF - + ! Cancel origin singularity + IF (contains_kx0 .AND. contains_ky0) psi(iky0,ikx0,:) = 0._dp ! Transfer phi to all the others process along p - CALL manual_3D_bcast(psi(ikys:ikye,ikxs:ikxe,izs:ize)) - + CALL manual_3D_bcast(psi,local_nky,local_nkx,local_nz+ngz) ! Execution time end CALL cpu_time(t1_poisson) tc_poisson = tc_poisson + (t1_poisson - t0_poisson) diff --git a/src/species_mod.F90 b/src/species_mod.F90 index 426ae36e..6b40dab8 100644 --- a/src/species_mod.F90 +++ b/src/species_mod.F90 @@ -1,56 +1,156 @@ -MODULE species_mod - USE :: basic +MODULE species + ! Module for diagnostic parameters + USE prec_const IMPLICIT NONE PRIVATE + !! Input parameters + CHARACTER(len=32) :: name_ ! name of the species + REAL(dp) :: tau_ ! Temperature + REAL(dp) :: sigma_ ! sqrt mass ratio + REAL(dp) :: q_ ! Charge + REAL(dp) :: k_N_ ! density drive (L_ref/L_Ni) + REAL(dp) :: k_T_ ! temperature drive (L_ref/L_Ti) + !! Arrays to store all species features + CHARACTER(len=32),& + ALLOCATABLE, DIMENSION(:), PUBLIC, PROTECTED :: name ! name of the species + REAL(dp), ALLOCATABLE, DIMENSION(:), PUBLIC, PROTECTED :: tau ! Temperature + REAL(dp), ALLOCATABLE, DIMENSION(:), PUBLIC, PROTECTED :: sigma ! sqrt mass ratio + REAL(dp), ALLOCATABLE, DIMENSION(:), PUBLIC, PROTECTED :: q ! Charge + REAL(dp), ALLOCATABLE, DIMENSION(:), PUBLIC, PROTECTED :: k_N ! density drive (L_ref/L_Ni) + REAL(dp), ALLOCATABLE, DIMENSION(:), PUBLIC, PROTECTED :: k_T ! temperature drive (L_ref/L_Ti) + REAL(dp), ALLOCATABLE, DIMENSION(:,:),PUBLIC, PROTECTED :: nu_ab ! Collision frequency tensor + !! Auxiliary variables to store precomputation + REAL(dp), ALLOCATABLE, DIMENSION(:),PUBLIC, PROTECTED :: tau_q ! factor of the magnetic moment coupling + REAL(dp), ALLOCATABLE, DIMENSION(:),PUBLIC, PROTECTED :: q_tau ! + REAL(dp), ALLOCATABLE, DIMENSION(:),PUBLIC, PROTECTED :: sqrtTau_q ! factor of parallel moment term + REAL(dp), ALLOCATABLE, DIMENSION(:),PUBLIC, PROTECTED :: q_sigma_sqrtTau ! factor of parallel phi term + REAL(dp), ALLOCATABLE, DIMENSION(:),PUBLIC, PROTECTED :: sigma2_tau_o2 ! factor of the Kernel argument + REAL(dp), ALLOCATABLE, DIMENSION(:),PUBLIC, PROTECTED :: sqrt_sigma2_tau_o2 ! to avoid multiple SQRT eval + REAL(dp), ALLOCATABLE, DIMENSION(:),PUBLIC, PROTECTED :: q2_tau ! factor of the gammaD sum + REAL(dp), ALLOCATABLE, DIMENSION(:),PUBLIC, PROTECTED :: q_o_sqrt_tau_sigma ! For psi field terms + REAL(dp), ALLOCATABLE, DIMENSION(:),PUBLIC, PROTECTED :: sqrt_tau_o_sigma ! For Ampere eq + REAL(dp), ALLOCATABLE, DIMENSION(:),PUBLIC, PROTECTED :: dpdx ! radial pressure gradient + !! Accessible routines + PUBLIC :: species_readinputs, species_outputinputs +CONTAINS - ! Classe that encapsulate all atributes and methods for one arbitrary species - TYPE, PUBLIC :: species_class - REAL(dp), PUBLIC :: q !charge - REAL(dp), PUBLIC :: sigma !sqrt masse ratio w.r.t. ion mass - REAL(dp), PUBLIC :: tau !temperatrue ratio w.r.t. electron temp. - INTEGER, DIMENSION(:), ALLOCATABLE, PUBLIC :: parray ! Hermite degrees - INTEGER, DIMENSION(:), ALLOCATABLE, PUBLIC :: jarray ! Laguerre degrees - ! Hermite-Moments: N_a^pj ! DIMENSIONs correspond to: p, j, kx, ky, z, updatetlevel. - COMPLEX(dp), DIMENSION(:,:,:,:,:,:), ALLOCATABLE :: moments - ! Arrays to store the rhs, for time integration (ip,ij,ikx,iky,iz,updatetlevel) - COMPLEX(dp), DIMENSION(:,:,:,:,:,:), ALLOCATABLE :: moments_rhs - ! Non linear term array (ip,ij,ikx,iky,iz) - COMPLEX(dp), DIMENSION(:,:,:,:,:), ALLOCATABLE :: Sapj - ! lin rhs p,j coefficient storage (ip,ij) - REAL(dp), DIMENSION(:,:), ALLOCATABLE :: xnapj - REAL(dp), DIMENSION(:), ALLOCATABLE :: xnapp1j, xnapm1j, xnapp2j, xnapm2j, xnapjp1, xnapjm1 - REAL(dp), DIMENSION(:,:), ALLOCATABLE :: ynapp1j, ynapm1j, ynapp1jm1, ynapm1jm1 ! mirror lin coeff for non adiab mom - REAL(dp), DIMENSION(:,:), ALLOCATABLE :: zNapm1j, zNapm1jp1, zNapm1jm1 ! mirror lin coeff for adiab mom - ! Kernel function evaluation (ij,ikx,iky,iz) - REAL(dp), DIMENSION(:,:,:,:), ALLOCATABLE :: kernel - !! Diagnostics - ! Gyrocenter density (ikx,iky,iz) - COMPLEX(dp), DIMENSION(:,:,:), ALLOCATABLE :: Na00 + SUBROUTINE species_readinputs + ! Read the input parameters + USE basic, ONLY : lu_in + USE model, ONLY : Na, nu, ADIAB_E + USE prec_const + IMPLICIT NONE + INTEGER :: ia,ib + ! expected namelist in the input file + NAMELIST /SPECIES/ & + name_, tau_, sigma_, q_, k_N_, k_T_ + ! allocate the arrays of species parameters + CALL species_allocate + ! loop over the species namelists in the input file + DO ia = 1,Na + ! default parameters + name_ = 'ions' + tau_ = 1._dp + sigma_ = 1._dp + q_ = 1._dp + k_N_ = 2.22_dp + k_T_ = 6.96_dp + ! read input + READ(lu_in,species) + ! place values found in the arrays + name(ia) = name_ + tau(ia) = tau_ + sigma(ia) = sigma_ + q(ia) = q_ + k_N(ia) = k_N_ + k_T(ia) = k_T_ + tau_q(ia) = tau_/q_ + ! precompute factors + q_tau(ia) = q_/tau_ + sqrtTau_q(ia) = sqrt(tau_)/q_ + q_sigma_sqrtTau(ia) = q_/sigma_/SQRT(tau_) + sigma2_tau_o2(ia) = sigma_**2 * tau_/2._dp + sqrt_sigma2_tau_o2(ia) = SQRT(sigma_**2 * tau_/2._dp) + q2_tau(ia) = (q_**2)/tau_ + q_o_sqrt_tau_sigma(ia) = q_/SQRT(tau_)/sigma_ + sqrt_tau_o_sigma(ia) = SQRT(tau_)/sigma_ + dpdx(ia) = 0._dp !not implemented yet + ! We remove the adiabatic electron flag if electrons are included + SELECT CASE (name_) + CASE ('electrons','e','electron') + ADIAB_E = .FALSE. + END SELECT + ENDDO + !! Set collision frequency tensor + IF (nu .EQ. 0) THEN + nu_ab = 0 + ELSE + DO ia = 1,Na + DO ib = 1,Na + !! We use the ion-ion collision as normalization with definition + ! nu_ii = 4 sqrt(pi)/3 T_i^(-3/2) m_i^(-1/2) q^4 n_i0 ln(Lambda) + SELECT CASE (name(ia)) + CASE ('electrons','e','electron') ! e-e and e-i collision + nu_ab(ia,ib) = nu/sigma(ia) * (tau(ia))**(3._dp/2._dp) ! (where already multiplied by 0.532) + CASE ('ions','ion','i') ! i-e and i-i collision + nu_ab(ia,ib) = nu + CASE DEFAULT + ERROR STOP "!! No collision model for these species interactions" + END SELECT + ! I think we can just write + ! nu_ab(ia,ib) = nu/sigma(ia) * (tau(ia))**(3._dp/2._dp) + ENDDO + ENDDO + ENDIF + ! nu_e = nu/sigma_e * (tau_e)**(3._dp/2._dp) ! electron-ion collision frequency (where already multiplied by 0.532) + ! nu_i = nu ! ion-ion collision frequ. + ! nu_ee = nu_e ! e-e coll. frequ. + ! nu_ie = nu_i ! i-e coll. frequ. - ! particle density (ikx,iky,iz) - COMPLEX(dp), DIMENSION(:,:,:), ALLOCATABLE :: density + END SUBROUTINE species_readinputs - ! particle temperature for electron and ions (ikx,iky,iz) - COMPLEX(dp), DIMENSION(:,:,:), ALLOCATABLE :: temperature - CONTAINS - ! Initialization procedures - PROCEDURE, PUBLIC :: init => species_init - PROCEDURE, PUBLIC :: setup_arrays => species_setup_arrays - PROCEDURE, PUBLIC :: evaluate_kernels => species_evaluate_kernels - ! Diagnostics - PROCEDURE, PUBLIC :: compute_density => species_compute_density - PROCEDURE, PUBLIC :: compute_temperature => species_compute_temperature - END TYPE species_class + SUBROUTINE species_outputinputs(fid) + ! Write the input parameters to the results_xx.h5 file + USE futils, ONLY: attach, creatd + USE model, ONLY: Na + IMPLICIT NONE + INTEGER, INTENT(in) :: fid + INTEGER :: ia + CHARACTER(len=256) :: str + DO ia = 1,Na + WRITE(str,'(a,a)') '/data/input/', name(ia) + CALL creatd(fid, 0,(/0/),TRIM(str),'Species Input') + CALL attach(fid, TRIM(str), "name", name(ia)) + CALL attach(fid, TRIM(str), "tau", tau(ia)) + CALL attach(fid, TRIM(str), "sigma", sigma(ia)) + CALL attach(fid, TRIM(str), "q", q(ia)) + CALL attach(fid, TRIM(str), "k_N", k_N(ia)) + CALL attach(fid, TRIM(str), "k_T", k_T(ia)) + ENDDO + END SUBROUTINE species_outputinputs - ! Routines that every species may use - CONTAINS - SUBROUTINE species_setup_arrays(this) + SUBROUTINE species_allocate + USE model, ONLY : Na + IMPLICIT NONE + !! Allocate the arrays + ALLOCATE( name(1:Na)) + ALLOCATE( nu_ab(1:Na,1:Na)) + ALLOCATE( tau(1:Na)) + ALLOCATE( sigma(1:Na)) + ALLOCATE( q(1:Na)) + ALLOCATE( k_N(1:Na)) + ALLOCATE( k_T(1:Na)) + ALLOCATE( tau_q(1:Na)) + ALLOCATE( q_tau(1:Na)) + ALLOCATE( sqrtTau_q(1:Na)) + ALLOCATE( q_sigma_sqrtTau(1:Na)) + ALLOCATE( sigma2_tau_o2(1:Na)) + ALLOCATE(sqrt_sigma2_tau_o2(1:Na)) + ALLOCATE( q2_tau(1:Na)) + ALLOCATE(q_o_sqrt_tau_sigma(1:Na)) + ALLOCATE( sqrt_tau_o_sigma(1:Na)) + ALLOCATE( dpdx(1:Na)) + END SUBROUTINE species_allocate - END SUBROUTINE - - SUBROUTINE species_compute_density(this) - - END SUBROUTINE - -END MODULE +END MODULE species diff --git a/src/stepon.F90 b/src/stepon.F90 index b2fec775..f8835396 100644 --- a/src/stepon.F90 +++ b/src/stepon.F90 @@ -1,14 +1,14 @@ SUBROUTINE stepon ! Advance one time step, (num_step=4 for Runge Kutta 4 scheme) USE advance_field_routine, ONLY: advance_time_level, advance_moments - USE basic, ONLY: nlend, ierr + USE basic, ONLY: nlend USE closure, ONLY: apply_closure_model USE ghosts, ONLY: update_ghosts_moments, update_ghosts_EM use mpi, ONLY: MPI_COMM_WORLD USE time_integration, ONLY: ntimelevel IMPLICIT NONE - INTEGER :: num_step + INTEGER :: num_step, ierr LOGICAL :: mlend DO num_step=1,ntimelevel ! eg RK4 compute successively k1, k2, k3, k4 @@ -46,7 +46,7 @@ SUBROUTINE stepon !!!! Basic structure to simplify stepon SUBROUTINE assemble_RHS USE moments_eq_rhs, ONLY: compute_moments_eq_rhs - USE collision, ONLY: compute_TColl + USE collision, ONLY: compute_Capj USE nonlinear, ONLY: compute_Sapj USE processing, ONLY: compute_nadiab_moments_z_gradients_and_interp IMPLICIT NONE @@ -55,22 +55,23 @@ SUBROUTINE stepon ! compute nonlinear term ("if linear" is included inside) CALL compute_Sapj ! compute collision term ("if coll, if nu >0" is included inside) - CALL compute_TColl + CALL compute_Capj ! compute the moments equation rhs CALL compute_moments_eq_rhs END SUBROUTINE assemble_RHS SUBROUTINE checkfield_all ! Check all the fields for inf or nan - USE utility,ONLY: checkfield + USE utility,ONLY: checkelem USE basic, ONLY: t0_checkfield, t1_checkfield, tc_checkfield - USE fields, ONLY: phi, moments_e, moments_i - USE grid, ONLY: ipgs_e,ipge_e,ijgs_e,ijge_e,ipgs_i,ipge_i,ijgs_i,ijge_i,& - ikys,ikye,ikxs,ikxe,izgs,izge,ij,ip + USE fields, ONLY: phi, moments + USE grid, ONLY: local_na,local_np,local_nj,local_nky,local_nkx,local_nz,& + ngp,ngj,ngz USE MPI USE time_integration, ONLY: updatetlevel - USE model, ONLY: LINEARITY, KIN_E + USE model, ONLY: LINEARITY IMPLICIT NONE LOGICAL :: checkf_ + INTEGER :: ia, ip, ij, iky, ikx, iz ! Execution time start CALL cpu_time(t0_checkfield) @@ -79,109 +80,83 @@ SUBROUTINE stepon mlend=.FALSE. IF(.NOT.nlend) THEN - checkf_ = checkfield(phi,' phi') - mlend= (mlend .or. checkf_) - IF(KIN_E) THEN - DO ij=ijgs_e,ijge_e - DO ip=ipgs_e,ipge_e - checkf_ = checkfield(moments_e(ip,ij,ikys:ikye,ikxs:ikxe,izgs:izge,updatetlevel),' moments_e') - mlend = (mlend .or. checkf_) - ENDDO - ENDDO - ENDIF - DO ij=ijgs_i,ijge_i - DO ip=ipgs_i,ipge_i - checkf_ = checkfield(moments_i(ip,ij,ikys:ikye,ikxs:ikxe,izgs:izge,updatetlevel),' moments_i') - mlend = (mlend .or. checkf_) - ! print*, 'should be an error' - ! stop - ENDDO - ENDDO + z: DO iz = 1,local_nz+ngz + kx:DO ikx= 1,local_nkx + ky:DO iky=1,local_nky + checkf_ = checkelem(phi(iky,ikx,iz),' phi') + mlend= (mlend .or. checkf_) + j: DO ij=1,local_nj+ngj + p: DO ip=1,local_np+ngp + a: DO ia=1,local_na + checkf_ = checkelem(moments(ia,ip,ij,iky,ikx,iz,updatetlevel),' moments') + mlend = (mlend .or. checkf_) + ENDDO a + ENDDO p + ENDDO j + ENDDO ky + ENDDO kx + ENDDO z CALL MPI_ALLREDUCE(mlend, nlend, 1, MPI_LOGICAL, MPI_LOR, MPI_COMM_WORLD, ierr) ENDIF - ! Execution time end CALL cpu_time(t1_checkfield) tc_checkfield = tc_checkfield + (t1_checkfield - t0_checkfield) END SUBROUTINE checkfield_all SUBROUTINE anti_aliasing - USE fields, ONLY: moments_e, moments_i - USE grid, ONLY: ipgs_i,ipge_i,ijgs_i,ijge_i,ipgs_e,ipge_e,ijgs_e,ijge_e,& - ikys,ikye,ikxs,ikxe,izgs,izge,AA_x,AA_y,iz,ikx,iky,ij,ip - USE model, ONLY: KIN_E + USE fields, ONLY: moments + USE grid, ONLY: local_na,local_np,local_nj,local_nky,local_nkx,local_nz,& + ngp,ngj,ngz, AA_x, AA_y IMPLICIT NONE - IF(KIN_E)THEN - DO iz=izgs,izge - DO ikx=ikxs,ikxe - DO iky=ikys,ikye - DO ij=ijgs_e,ijge_e - DO ip=ipgs_e,ipge_e - moments_e( ip,ij,iky,ikx,iz,:) = AA_x(ikx)* AA_y(iky) * moments_e( ip,ij,iky,ikx,iz,:) - END DO - END DO - END DO - END DO - END DO - ENDIF - DO iz=izgs,izge - DO ikx=ikxs,ikxe - DO iky=ikys,ikye - DO ij=ijgs_i,ijge_i - DO ip=ipgs_i,ipge_i - moments_i( ip,ij,iky,ikx,iz,:) = AA_x(ikx)* AA_y(iky) * moments_i( ip,ij,iky,ikx,iz,:) - END DO - END DO - END DO - END DO - END DO + INTEGER :: ia, ip, ij, iky, ikx, iz + z: DO iz = 1,local_nz+ngz + kx:DO ikx= 1,local_nkx + ky:DO iky=1,local_nky + j: DO ij=1,local_nj+ngj + p: DO ip=1,local_np+ngp + a: DO ia=1,local_na + moments(ia,ip,ij,iky,ikx,iz,:) = AA_x(ikx)* AA_y(iky) * moments(ia,ip,ij,iky,ikx,iz,:) + ENDDO a + ENDDO p + ENDDO j + ENDDO ky + ENDDO kx + ENDDO z END SUBROUTINE anti_aliasing SUBROUTINE enforce_symmetry ! Force X(k) = X(N-k)* complex conjugate symmetry - USE fields, ONLY: phi, psi, moments_e, moments_i - USE grid, ONLY: ipgs_i,ipge_i,ijgs_i,ijge_i,ipgs_e,ipge_e,ijgs_e,ijge_e,& - izgs,izge,iz,ikx,ij,ip,Nkx, iky_0, ikx_0, contains_ky0 - USE model, ONLY: KIN_E + USE fields, ONLY: phi, psi, moments + USE grid, ONLY: local_na,local_np,local_nj,total_nkx,local_nz,& + ngp,ngj,ngz, ikx0,iky0, contains_ky0 IMPLICIT NONE + INTEGER :: ia, ip, ij, ikx, iz IF ( contains_ky0 ) THEN - ! Electron moments - IF(KIN_E) THEN - DO iz=izgs,izge - DO ij=ijgs_e,ijge_e - DO ip=ipgs_e,ipge_e - DO ikx=2,Nkx/2 !symmetry at ky = 0 - moments_e( ip,ij,iky_0,ikx,iz, :) = CONJG(moments_e( ip,ij,iky_0,Nkx+2-ikx,iz, :)) - END DO - ! must be real at origin - moments_e(ip,ij, iky_0,ikx_0,iz, :) = REAL(moments_e(ip,ij, iky_0,ikx_0,iz, :)) - END DO - END DO - END DO - ENDIF - ! Ion moments - DO iz=izgs,izge - DO ij=ijgs_i,ijge_i - DO ip=ipgs_i,ipge_i - DO ikx=2,Nkx/2 !symmetry at ky = 0 - moments_i( ip,ij,iky_0,ikx,iz, :) = CONJG(moments_i( ip,ij,iky_0,Nkx+2-ikx,iz, :)) + ! moments + z: DO iz = 1,local_nz+ngz + j: DO ij=1,local_nj+ngj + p: DO ip=1,local_np+ngp + a: DO ia=1,local_na + DO ikx=2,total_nkx/2 !symmetry at ky = 0 + moments(ia,ip,ij,iky0,ikx,iz,:) = CONJG(moments(ia,ip,ij,iky0,total_nkx+2-ikx,iz,:)) END DO ! must be real at origin and top right - moments_i(ip,ij, iky_0,ikx_0,iz, :) = REAL(moments_i(ip,ij, iky_0,ikx_0,iz, :)) - END DO - END DO - END DO + moments(ia,ip,ij, iky0,ikx0,iz,:) = REAL(moments(ia,ip,ij, iky0,ikx0,iz,:)) + ENDDO a + ENDDO p + ENDDO j + ENDDO z ! Phi - DO ikx=2,Nkx/2 !symmetry at ky = 0 - phi(iky_0,ikx,izgs:izge) = phi(iky_0,Nkx+2-ikx,izgs:izge) + DO ikx=2,total_nkx/2 !symmetry at ky = 0 + phi(iky0,ikx,:) = phi(iky0,total_nkx+2-ikx,:) END DO ! must be real at origin - phi(iky_0,ikx_0,izgs:izge) = REAL(phi(iky_0,ikx_0,izgs:izge)) + phi(iky0,ikx0,:) = REAL(phi(iky0,ikx0,:)) ! Psi - DO ikx=2,Nkx/2 !symmetry at ky = 0 - psi(iky_0,ikx,izgs:izge) = psi(iky_0,Nkx+2-ikx,izgs:izge) + DO ikx=2,total_nkx/2 !symmetry at ky = 0 + psi(iky0,ikx,:) = psi(iky0,total_nkx+2-ikx,:) END DO ! must be real at origin - psi(iky_0,ikx_0,izgs:izge) = REAL(psi(iky_0,ikx_0,izgs:izge)) + psi(iky0,ikx0,:) = REAL(psi(iky0,ikx0,:)) ENDIF END SUBROUTINE enforce_symmetry diff --git a/src/tesend.F90 b/src/tesend.F90 index e908f7e7..ebbdbf39 100644 --- a/src/tesend.F90 +++ b/src/tesend.F90 @@ -2,12 +2,13 @@ SUBROUTINE tesend ! Test for run completion USE basic - + USE mpi use prec_const + USE parallel, ONLY: my_id IMPLICIT NONE LOGICAL :: mlend, mlexist REAL :: tnow - INTEGER :: ncheck_stop = 100 + INTEGER :: ncheck_stop = 100, ierr CHARACTER(len=*), PARAMETER :: stop_file = 'mystop' !________________________________________________________________________________ @@ -16,8 +17,8 @@ SUBROUTINE tesend IF( mlend ) THEN nlend = .TRUE. crashed = .TRUE. - IF (my_id .EQ. 0) WRITE(*,'(/a)') 'rhs are NaN/Inf' - IF (my_id .EQ. 0) WRITE(*,*) 'Run terminated at cstep=',cstep + CALL speak('rhs are NaN/Inf') + CALL speak('Run terminated at cstep='//str(cstep)) RETURN END IF @@ -25,7 +26,7 @@ SUBROUTINE tesend ! 2. Test on NRUN nlend = step .GT. nrun IF ( nlend ) THEN - WRITE(*,'(/a)') 'NRUN steps done' + CALL speak('NRUN steps done') RETURN END IF @@ -34,7 +35,7 @@ SUBROUTINE tesend ! 3. Test on TMAX nlend = time .GT. tmax IF ( nlend ) THEN - IF (my_id .EQ. 0) WRITE(*,'(/a)') 'TMAX reached' + CALL speak('TMAX reached') RETURN END IF ! @@ -48,7 +49,7 @@ SUBROUTINE tesend CALL mpi_allreduce(mlend, nlend, 1, MPI_LOGICAL, MPI_LOR, MPI_COMM_WORLD, ierr) IF ( nlend ) THEN - IF(my_id.EQ.0) WRITE(*,'(/a)') 'Max run time reached' + CALL speak('Max run time reached') RETURN END IF !________________________________________________________________________________ @@ -59,7 +60,7 @@ SUBROUTINE tesend IF( mlexist ) THEN OPEN(lu_stop, file=stop_file) mlend = mlexist ! Send stop status asa the file exists - WRITE(*,'(/a,i4,a)') 'Stop file found -> finishing..' + CALL speak('Stop file found -> finishing..') CLOSE(lu_stop, status='delete') END IF END IF diff --git a/src/time_integration_mod.F90 b/src/time_integration_mod.F90 index 02ba5bf9..7f937147 100644 --- a/src/time_integration_mod.F90 +++ b/src/time_integration_mod.F90 @@ -24,41 +24,30 @@ CONTAINS SUBROUTINE time_integration_readinputs ! Read the input parameters - USE prec_const USE basic, ONLY : lu_in IMPLICIT NONE - NAMELIST /TIME_INTEGRATION_PAR/ numerical_scheme - READ(lu_in,time_integration_par) - CALL set_numerical_scheme - END SUBROUTINE time_integration_readinputs - SUBROUTINE time_integration_outputinputs(fidres, str) + SUBROUTINE time_integration_outputinputs(fid) ! Write the input parameters to the results_xx.h5 file - - USE prec_const - USE futils, ONLY: attach + USE futils, ONLY: attach, creatd IMPLICIT NONE - INTEGER, INTENT(in) :: fidres - CHARACTER(len=256), INTENT(in) :: str - - CALL attach(fidres, TRIM(str), "numerical_scheme", numerical_scheme) - + INTEGER, INTENT(in) :: fid + CHARACTER(len=256) :: str + WRITE(str,'(a)') '/data/input/time_integration' + CALL creatd(fid, 0,(/0/),TRIM(str),'Time Integration Input') + CALL attach(fid, TRIM(str), "numerical_scheme", numerical_scheme) END SUBROUTINE time_integration_outputinputs - - SUBROUTINE set_numerical_scheme - ! Initialize Butcher coefficient of numerical_scheme - - use basic + ! Initialize Butcher coefficient of set_numerical_scheme + use parallel, ONLY: my_id IMPLICIT NONE - SELECT CASE (numerical_scheme) ! Order 2 methods CASE ('RK2') @@ -86,7 +75,6 @@ CONTAINS IF (my_id .EQ. 0) WRITE(*,*) 'Cannot initialize time integration scheme. Name invalid.' END SELECT IF (my_id .EQ. 0) WRITE(*,*) " Time integration with ", numerical_scheme - END SUBROUTINE set_numerical_scheme !!! second order time schemes @@ -96,9 +84,9 @@ CONTAINS USE prec_const IMPLICIT NONE INTEGER,PARAMETER :: nbstep = 2 - CALL allocate_array_dp1(c_E,1,nbstep) - CALL allocate_array_dp1(b_E,1,nbstep) - CALL allocate_array_dp2(A_E,1,nbstep,1,nbstep) + CALL allocate_array(c_E,1,nbstep) + CALL allocate_array(b_E,1,nbstep) + CALL allocate_array(A_E,1,nbstep,1,nbstep) ntimelevel = 2 c_E(1) = 0.0_dp c_E(2) = 1.0_dp @@ -118,9 +106,9 @@ CONTAINS REAL(dp) :: alpha, beta alpha = 1._dp/SQRT(2._dp) beta = SQRT(2._dp) - 1._dp - CALL allocate_array_dp1(c_E,1,nbstep) - CALL allocate_array_dp1(b_E,1,nbstep) - CALL allocate_array_dp2(A_E,1,nbstep,1,nbstep) + CALL allocate_array(c_E,1,nbstep) + CALL allocate_array(b_E,1,nbstep) + CALL allocate_array(A_E,1,nbstep,1,nbstep) ntimelevel = 2 c_E(1) = 0.0_dp c_E(2) = 1.0_dp/2.0_dp @@ -139,9 +127,9 @@ CONTAINS USE prec_const IMPLICIT NONE INTEGER,PARAMETER :: nbstep = 3 - CALL allocate_array_dp1(c_E,1,nbstep) - CALL allocate_array_dp1(b_E,1,nbstep) - CALL allocate_array_dp2(A_E,1,nbstep,1,nbstep) + CALL allocate_array(c_E,1,nbstep) + CALL allocate_array(b_E,1,nbstep) + CALL allocate_array(A_E,1,nbstep,1,nbstep) ntimelevel = 3 c_E(1) = 0.0_dp c_E(2) = 1.0_dp/2.0_dp @@ -173,9 +161,9 @@ CONTAINS ! w2 = 0.3769892220587804931852815570891834795475_dp ! (6^(2/3)-1-sqrt(9-2*6^(2/3)))/2 w3 = 1._dp/a1 - w2 * (1._dp + w1) ! w3 = 1.3368459739528868457369981115334667265415_dp - CALL allocate_array_dp1(c_E,1,nbstep) - CALL allocate_array_dp1(b_E,1,nbstep) - CALL allocate_array_dp2(A_E,1,nbstep,1,nbstep) + CALL allocate_array(c_E,1,nbstep) + CALL allocate_array(b_E,1,nbstep) + CALL allocate_array(A_E,1,nbstep,1,nbstep) ntimelevel = 3 c_E(1) = 0.0_dp c_E(2) = 1.0_dp/2.0_dp @@ -195,9 +183,9 @@ CONTAINS USE prec_const IMPLICIT NONE INTEGER,PARAMETER :: nbstep = 3 - CALL allocate_array_dp1(c_E,1,nbstep) - CALL allocate_array_dp1(b_E,1,nbstep) - CALL allocate_array_dp2(A_E,1,nbstep,1,nbstep) + CALL allocate_array(c_E,1,nbstep) + CALL allocate_array(b_E,1,nbstep) + CALL allocate_array(A_E,1,nbstep,1,nbstep) ntimelevel = 3 c_E(1) = 0._dp c_E(2) = 0.711664700366941_dp @@ -217,9 +205,9 @@ CONTAINS USE prec_const IMPLICIT NONE INTEGER,PARAMETER :: nbstep = 3 - CALL allocate_array_dp1(c_E,1,nbstep) - CALL allocate_array_dp1(b_E,1,nbstep) - CALL allocate_array_dp2(A_E,1,nbstep,1,nbstep) + CALL allocate_array(c_E,1,nbstep) + CALL allocate_array(b_E,1,nbstep) + CALL allocate_array(A_E,1,nbstep,1,nbstep) ntimelevel = 3 c_E(1) = 0._dp c_E(2) = 2._dp*(1._dp - 1._dp/SQRT2) @@ -238,9 +226,9 @@ CONTAINS USE prec_const IMPLICIT NONE INTEGER,PARAMETER :: nbstep = 3 - CALL allocate_array_dp1(c_E,1,nbstep) - CALL allocate_array_dp1(b_E,1,nbstep) - CALL allocate_array_dp2(A_E,1,nbstep,1,nbstep) + CALL allocate_array(c_E,1,nbstep) + CALL allocate_array(b_E,1,nbstep) + CALL allocate_array(A_E,1,nbstep,1,nbstep) ntimelevel = 3 c_E(1) = 0.0_dp c_E(2) = 1.0_dp @@ -260,9 +248,9 @@ CONTAINS USE prec_const IMPLICIT NONE INTEGER,PARAMETER :: nbstep = 4 - CALL allocate_array_dp1(c_E,1,nbstep) - CALL allocate_array_dp1(b_E,1,nbstep) - CALL allocate_array_dp2(A_E,1,nbstep,1,nbstep) + CALL allocate_array(c_E,1,nbstep) + CALL allocate_array(b_E,1,nbstep) + CALL allocate_array(A_E,1,nbstep,1,nbstep) ntimelevel = 4 c_E(1) = 0.0_dp c_E(2) = 1.0_dp/2.0_dp @@ -285,9 +273,9 @@ CONTAINS USE basic IMPLICIT NONE INTEGER,PARAMETER :: nbstep =7 - CALL allocate_array_dp1(c_E,1,nbstep) - CALL allocate_array_dp1(b_E,1,nbstep) - CALL allocate_array_dp2(A_E,1,nbstep,1,nbstep) + CALL allocate_array(c_E,1,nbstep) + CALL allocate_array(b_E,1,nbstep) + CALL allocate_array(A_E,1,nbstep,1,nbstep) ntimelevel = 7 c_E(1) = 0._dp c_E(2) = 1.0_dp/5.0_dp diff --git a/src/utility_mod.F90 b/src/utility_mod.F90 index 028d514e..6433db45 100644 --- a/src/utility_mod.F90 +++ b/src/utility_mod.F90 @@ -4,9 +4,9 @@ MODULE utility CONTAINS FUNCTION is_nan(x,str) RESULT(isn) - USE basic, ONLY: cstep, stdout + USE basic, ONLY: cstep USE time_integration, ONLY: updatetlevel - USE prec_const, ONLY: dp + USE prec_const, ONLY: dp, stdout IMPLICIT NONE real(dp), INTENT(IN) :: x @@ -25,8 +25,7 @@ CONTAINS FUNCTION is_inf(x,str) RESULT(isi) - USE basic, ONLY: stdout - USE prec_const, ONLY: dp + USE prec_const, ONLY: dp, stdout IMPLICIT NONE real(dp), INTENT(IN) :: x @@ -44,15 +43,15 @@ CONTAINS END FUNCTION is_inf - FUNCTION checkfield(field,str) RESULT(mlend) - USE grid, ONLY: ikys,ikye,ikxs,ikxe,izgs,izge + FUNCTION checkfield(n1,n2,n3,field,str) RESULT(mlend) use prec_const, ONLY: dp IMPLICIT NONE !! BUG found (or feature?) ! if one put the commented first line (commented) instead of the second one, ! no error will be risen by the compiler even if the rank of the array is not matching (should be 3D!) ! COMPLEX(dp), DIMENSION(ikys:ikye,ikxs:ikxe), INTENT(IN) :: field - COMPLEX(dp), DIMENSION(ikys:ikye,ikxs:ikxe,izgs:izge), INTENT(IN) :: field + INTEGER, INTENT(in) :: n1,n2,n3 + COMPLEX(dp), DIMENSION(n1,n2,n3), INTENT(IN) :: field CHARACTER(LEN=*), INTENT(IN) :: str LOGICAL :: mlend COMPLEX(dp) :: sumfield diff --git a/testcases/cyclone_example/fort_00.90 b/testcases/cyclone_example/fort_00.90 index fd926cba..6ca55404 100644 --- a/testcases/cyclone_example/fort_00.90 +++ b/testcases/cyclone_example/fort_00.90 @@ -65,8 +65,8 @@ / &COLLISION_PAR collision_model = 'DG' - gyrokin_CO = .false. - interspecies = .true. + GK_CO = .false. + INTERSPECIES = .true. mat_file = 'null' / &INITIAL_CON diff --git a/testcases/matlab_testscripts/Hallenbert.m b/testcases/matlab_testscripts/Hallenbert.m index 5ded4b11..38fa23d7 100644 --- a/testcases/matlab_testscripts/Hallenbert.m +++ b/testcases/matlab_testscripts/Hallenbert.m @@ -41,7 +41,7 @@ JOB2LOAD= -1; % (LB:L.Bernstein, DG:Dougherty, SG:Sugama, LR: Lorentz, LD: Landau) CO = 'LD'; GKCO = 1; % gyrokinetic operator -ABCO = 1; % interspecies collisions +ABCO = 1; % INTERSPECIES collisions NL_CLOS = -1; % nonlinear closure model (-2: nmax = jmax, -1: nmax = jmax-j, >=0 : nmax = NL_CLOS) SIMID = 'Hallenbert_nu_1e-01'; % Name of the simulation % SIMID = 'debug'; % Name of the simulation diff --git a/testcases/matlab_testscripts/linear_1D_entropy_mode.m b/testcases/matlab_testscripts/linear_1D_entropy_mode.m index 02de8d1a..77ef9c1d 100644 --- a/testcases/matlab_testscripts/linear_1D_entropy_mode.m +++ b/testcases/matlab_testscripts/linear_1D_entropy_mode.m @@ -47,7 +47,7 @@ KIN_E = 1; % (LB:L.Bernstein, DG:Dougherty, SG:Sugama, LR: Lorentz, LD: Landau) CO = 'LR'; GKCO = 1; % gyrokinetic operator -ABCO = 1; % interspecies collisions +ABCO = 1; % INTERSPECIES collisions INIT_ZF = 0; ZF_AMP = 0.0; CLOS = 0; % Closure model (0: =0 truncation, 1: gyrofluid closure (p+2j<=Pmax))s NL_CLOS = 0; % nonlinear closure model (-2:nmax=jmax; -1:nmax=jmax-j; >=0:nmax=NL_CLOS) diff --git a/testcases/matlab_testscripts/linear_damping.m b/testcases/matlab_testscripts/linear_damping.m index 46b6c4e8..494470f7 100644 --- a/testcases/matlab_testscripts/linear_damping.m +++ b/testcases/matlab_testscripts/linear_damping.m @@ -39,7 +39,7 @@ KIN_E = 1; % (LB:L.Bernstein, DG:Dougherty, SG:Sugama, LR: Lorentz, LD: Landau) CO = 'DG'; GKCO = 1; % gyrokinetic operator -ABCO = 1; % interspecies collisions +ABCO = 1; % INTERSPECIES collisions INIT_ZF = 0; ZF_AMP = 0.0; CLOS = 0; % Closure model (0: =0 truncation, 1: gyrofluid closure (p+2j<=Pmax)) NL_CLOS = 0; % nonlinear closure model (-2:nmax=jmax; -1:nmax=jmax-j; >=0:nmax=NL_CLOS) diff --git a/testcases/smallest_problem/fort.90 b/testcases/smallest_problem/fort.90 new file mode 100644 index 00000000..532f1db0 --- /dev/null +++ b/testcases/smallest_problem/fort.90 @@ -0,0 +1,101 @@ +&BASIC + nrun = 10 + dt = 0.01 + tmax = 1 + maxruntime = 356400 + job2load = -1 +/ +&GRID + pmax = 2 + jmax = 1 + Nx = 2 + Lx = 200 + Ny = 2 + Ly = 60 + Nz = 4 + SG = .f. + Nexc = 1 +/ +&GEOMETRY + geom = 's-alpha' + q0 = 1.4 + shear = 0.8 + eps = 0.18 + kappa = 1.0 + s_kappa= 0.0 + delta = 0.0 + s_delta= 0.0 + zeta = 0.0 + s_zeta = 0.0 + parallel_bc = 'dirichlet' + shift_y= 0.0 +/ +&OUTPUT_PAR + dtsave_0d = 0.01 + dtsave_1d = -1 + dtsave_2d = -1 + dtsave_3d = 0.01 + dtsave_5d = 0.01 + write_doubleprecision = .f. + write_gamma = .t. + write_hf = .t. + write_phi = .t. + write_Na00 = .f. + write_Napj = .t. + write_Sapj = .f. + write_dens = .t. + write_fvel = .t. + write_temp = .t. +/ +&MODEL_PAR + ! Collisionality + CLOS = 0 + NL_CLOS = -1 + LINEARITY = 'linear' + Na = 2 ! number of species + mu_x = 0.0 + mu_y = 0.0 + N_HD = 4 + mu_z = 0.1 + mu_p = 0 + mu_j = 0 + nu = 1 + beta = 0 + ADIAB_E = .f. + tau_e = 1.0 +/ +&SPECIES + ! ions + name_ = 'ions' + tau_ = 1.0 + sigma_= 1.0 + q_ = 1.0 + k_N_ = 2.22 + k_T_ = 6.96 +/ +&SPECIES + ! electrons + name_ = 'electrons' + tau_ = 1.0 + sigma_= 0.023338 + q_ = 1.0 + k_N_ = 2.22 + k_T_ = 6.96 +/ + +&COLLISION_PAR + collision_model = 'DG' !DG/SG/PA/LD (dougherty, sugama, pitch angle, landau) + GK_CO = .f. + INTERSPECIES = .true. + !mat_file = 'gk_sugama_P_20_J_10_N_150_kpm_8.0.h5' +/ +&INITIAL_CON + INIT_OPT = 'phi' + ACT_ON_MODES = 'donothing' + init_background = 0 + init_noiselvl = 0.005 + iseed = 42 +/ +&TIME_INTEGRATION_PAR + numerical_scheme = 'RK4' +/ diff --git a/testcases/smallest_problem/fort_00.90 b/testcases/smallest_problem/fort_00.90 index 04e32a7f..7c7eec40 100644 --- a/testcases/smallest_problem/fort_00.90 +++ b/testcases/smallest_problem/fort_00.90 @@ -67,8 +67,8 @@ / &COLLISION_PAR collision_model = 'DG' !DG/SG/PA/LD (dougherty, sugama, pitch angle, landau) - gyrokin_CO = .f. - interspecies = .true. + GK_CO = .f. + INTERSPECIES = .true. !mat_file = 'gk_sugama_P_20_J_10_N_150_kpm_8.0.h5' / &INITIAL_CON diff --git a/testcases/smallest_problem/fort_01.90 b/testcases/smallest_problem/fort_01.90 index f5a7d560..ecaa8711 100644 --- a/testcases/smallest_problem/fort_01.90 +++ b/testcases/smallest_problem/fort_01.90 @@ -67,8 +67,8 @@ / &COLLISION_PAR collision_model = 'DG' !DG/SG/PA/LD (dougherty, sugama, pitch angle, landau) - gyrokin_CO = .t. - interspecies = .true. + GK_CO = .t. + INTERSPECIES = .true. !mat_file = 'gk_sugama_P_20_J_10_N_150_kpm_8.0.h5' / &INITIAL_CON diff --git a/testcases/zpinch_example/fort_00.90 b/testcases/zpinch_example/fort_00.90 index 67312e54..5864634f 100644 --- a/testcases/zpinch_example/fort_00.90 +++ b/testcases/zpinch_example/fort_00.90 @@ -67,8 +67,8 @@ / &COLLISION_PAR collision_model = 'DG' !DG/SG/PA/LD (dougherty, sugama, pitch angle, landau) - gyrokin_CO = .true. - interspecies = .true. + GK_CO = .true. + INTERSPECIES = .true. !mat_file = 'gk_sugama_P_20_J_10_N_150_kpm_8.0.h5' / &INITIAL_CON diff --git a/wk/CBC_P_J_scan.m b/wk/CBC_P_J_scan.m index 1b2a32ea..a4dc8261 100644 --- a/wk/CBC_P_J_scan.m +++ b/wk/CBC_P_J_scan.m @@ -91,7 +91,7 @@ for J = J_a %% OPTIONS LINEARITY = 'linear'; % activate non-linearity (is cancelled if KXEQ0 = 1) % Collision operator - ABCO = 1; % interspecies collisions + ABCO = 1; % INTERSPECIES collisions INIT_ZF = 0; ZF_AMP = 0.0; CLOS = 0; % Closure model (0: =0 truncation, 1: v^Nmax closure (p+2j<=Pmax))s NL_CLOS = 0; % nonlinear closure model (-2:nmax=jmax; -1:nmax=jmax-j; >=0:nmax=NL_CLOS) diff --git a/wk/CBC_hypcoll_PJ_scan.m b/wk/CBC_hypcoll_PJ_scan.m index 1375c493..1392bc32 100644 --- a/wk/CBC_hypcoll_PJ_scan.m +++ b/wk/CBC_hypcoll_PJ_scan.m @@ -92,7 +92,7 @@ for NU_ = NU_a %% OPTIONS LINEARITY = 'linear'; % activate non-linearity (is cancelled if KXEQ0 = 1) % Collision operator - ABCO = 1; % interspecies collisions + ABCO = 1; % INTERSPECIES collisions INIT_ZF = 0; ZF_AMP = 0.0; CLOS = 0; % Closure model (0: =0 truncation, 1: v^Nmax closure (p+2j<=Pmax))s NL_CLOS = 0; % nonlinear closure model (-2:nmax=jmax; -1:nmax=jmax-j; >=0:nmax=NL_CLOS) diff --git a/wk/CBC_kT_PJ_scan.m b/wk/CBC_kT_PJ_scan.m index 66912c27..4fe617ec 100644 --- a/wk/CBC_kT_PJ_scan.m +++ b/wk/CBC_kT_PJ_scan.m @@ -94,7 +94,7 @@ for KT = KT_a %% OPTIONS LINEARITY = 'linear'; % activate non-linearity (is cancelled if KXEQ0 = 1) % Collision operator - ABCO = 1; % interspecies collisions + ABCO = 1; % INTERSPECIES collisions INIT_ZF = 0; ZF_AMP = 0.0; CLOS = 0; % Closure model (0: =0 truncation, 1: v^Nmax closure (p+2j<=Pmax))s NL_CLOS = 0; % nonlinear closure model (-2:nmax=jmax; -1:nmax=jmax-j; >=0:nmax=NL_CLOS) diff --git a/wk/CBC_kT_nu_scan.m b/wk/CBC_kT_nu_scan.m index 5bd21bfd..fca6c554 100644 --- a/wk/CBC_kT_nu_scan.m +++ b/wk/CBC_kT_nu_scan.m @@ -90,7 +90,7 @@ for KT = KT_a %% OPTIONS LINEARITY = 'linear'; % activate non-linearity (is cancelled if KXEQ0 = 1) % Collision operator - ABCO = 1; % interspecies collisions + ABCO = 1; % INTERSPECIES collisions INIT_ZF = 0; ZF_AMP = 0.0; CLOS = 0; % Closure model (0: =0 truncation, 1: v^Nmax closure (p+2j<=Pmax))s NL_CLOS = 0; % nonlinear closure model (-2:nmax=jmax; -1:nmax=jmax-j; >=0:nmax=NL_CLOS) diff --git a/wk/CBC_nu_PJ_scan.m b/wk/CBC_nu_PJ_scan.m index 8a28aca6..f20276b1 100644 --- a/wk/CBC_nu_PJ_scan.m +++ b/wk/CBC_nu_PJ_scan.m @@ -89,7 +89,7 @@ for NU = NU_a %% OPTIONS LINEARITY = 'linear'; % activate non-linearity (is cancelled if KXEQ0 = 1) % Collision operator - ABCO = 1; % interspecies collisions + ABCO = 1; % INTERSPECIES collisions INIT_ZF = 0; ZF_AMP = 0.0; CLOS = 0; % Closure model (0: =0 truncation, 1: v^Nmax closure (p+2j<=Pmax))s NL_CLOS = 0; % nonlinear closure model (-2:nmax=jmax; -1:nmax=jmax-j; >=0:nmax=NL_CLOS) diff --git a/wk/benchmark scripts/Ajay_scan_CH4_lin_ITG.m b/wk/benchmark scripts/Ajay_scan_CH4_lin_ITG.m index 5836570d..db2e4d8e 100644 --- a/wk/benchmark scripts/Ajay_scan_CH4_lin_ITG.m +++ b/wk/benchmark scripts/Ajay_scan_CH4_lin_ITG.m @@ -87,7 +87,7 @@ for NU = NU_a %% OPTIONS LINEARITY = 'linear'; % activate non-linearity (is cancelled if KXEQ0 = 1) % Collision operator - ABCO = 1; % interspecies collisions + ABCO = 1; % INTERSPECIES collisions INIT_ZF = 0; ZF_AMP = 0.0; CLOS = 0; % Closure model (0: =0 truncation, 1: v^Nmax closure (p+2j<=Pmax))s NL_CLOS = 0; % nonlinear closure model (-2:nmax=jmax; -1:nmax=jmax-j; >=0:nmax=NL_CLOS) diff --git a/wk/lin_3D_Zpinch.m b/wk/lin_3D_Zpinch.m index e6ca8d59..fa650e0a 100644 --- a/wk/lin_3D_Zpinch.m +++ b/wk/lin_3D_Zpinch.m @@ -68,7 +68,7 @@ LINEARITY = 'linear'; % activate non-linearity (is cancelled if KXEQ0 = 1) % (LB:L.Bernstein, DG:Dougherty, SG:Sugama, LR: Lorentz, LD: Landau) CO = 'DG'; GKCO = 1; % gyrokinetic operator -ABCO = 1; % interspecies collisions +ABCO = 1; % INTERSPECIES collisions INIT_ZF = 0; ZF_AMP = 0.0; CLOS = 0; % Closure model (0: =0 truncation, 1: v^Nmax closure (p+2j<=Pmax))s NL_CLOS = 0; % nonlinear closure model (-2:nmax=jmax; -1:nmax=jmax-j; >=0:nmax=NL_CLOS) diff --git a/wk/lin_ETPY.m b/wk/lin_ETPY.m index ed2d0e85..5c5f4725 100644 --- a/wk/lin_ETPY.m +++ b/wk/lin_ETPY.m @@ -72,7 +72,7 @@ LINEARITY = 'linear'; % activate non-linearity (is cancelled if KXEQ0 = 1) % (LB:L.Bernstein, DG:Dougherty, SG:Sugama, LR: Lorentz, LD: Landau) CO = 'DG'; GKCO = 1; % gyrokinetic operator -ABCO = 1; % interspecies collisions +ABCO = 1; % INTERSPECIES collisions INIT_ZF = 0; ZF_AMP = 0.0; CLOS = 0; % Closure model (0: =0 truncation, 1: v^Nmax closure (p+2j<=Pmax))s NL_CLOS = 0; % nonlinear closure model (-2:nmax=jmax; -1:nmax=jmax-j; >=0:nmax=NL_CLOS) diff --git a/wk/lin_ITG.m b/wk/lin_ITG.m index 64bfc888..1750f20e 100644 --- a/wk/lin_ITG.m +++ b/wk/lin_ITG.m @@ -75,7 +75,7 @@ LINEARITY = 'linear'; % activate non-linearity (is cancelled if KXEQ0 = 1) % (LB:L.Bernstein, DG:Dougherty, SG:Sugama, LR: Lorentz, LD: Landau) CO = 'DG'; GKCO = 0; % gyrokinetic operator -ABCO = 1; % interspecies collisions +ABCO = 1; % INTERSPECIES collisions INIT_ZF = 0; ZF_AMP = 0.0; CLOS = 0; % Closure model (0: =0 truncation, 1: v^Nmax closure (p+2j<=Pmax))s NL_CLOS = 0; % nonlinear closure model (-2:nmax=jmax; -1:nmax=jmax-j; >=0:nmax=NL_CLOS) diff --git a/wk/lin_KBM.m b/wk/lin_KBM.m index f4f03f62..7a8c6b78 100644 --- a/wk/lin_KBM.m +++ b/wk/lin_KBM.m @@ -64,7 +64,7 @@ LINEARITY = 'linear'; % activate non-linearity (is cancelled if KXEQ0 = 1) % (LB:L.Bernstein, DG:Dougherty, SG:Sugama, LR: Lorentz, LD: Landau) CO = 'DG'; GKCO = 0; % gyrokinetic operator -ABCO = 1; % interspecies collisions +ABCO = 1; % INTERSPECIES collisions INIT_ZF = 0; ZF_AMP = 0.0; CLOS = 0; % Closure model (0: =0 truncation, 1: v^Nmax closure (p+2j<=Pmax))s NL_CLOS = 0; % nonlinear closure model (-2:nmax=jmax; -1:nmax=jmax-j; >=0:nmax=NL_CLOS) diff --git a/wk/lin_MTM.m b/wk/lin_MTM.m index 3330f265..b7f2885b 100644 --- a/wk/lin_MTM.m +++ b/wk/lin_MTM.m @@ -64,7 +64,7 @@ LINEARITY = 'linear'; % activate non-linearity (is cancelled if KXEQ0 = 1) % (LB:L.Bernstein, DG:Dougherty, SG:Sugama, LR: Lorentz, LD: Landau) CO = 'DG'; GKCO = 0; % gyrokinetic operator -ABCO = 1; % interspecies collisions +ABCO = 1; % INTERSPECIES collisions INIT_ZF = 0; ZF_AMP = 0.0; CLOS = 0; % Closure model (0: =0 truncation, 1: v^Nmax closure (p+2j<=Pmax))s NL_CLOS = 0; % nonlinear closure model (-2:nmax=jmax; -1:nmax=jmax-j; >=0:nmax=NL_CLOS) diff --git a/wk/lin_RHT.m b/wk/lin_RHT.m index 9fe1bcce..61a2c101 100644 --- a/wk/lin_RHT.m +++ b/wk/lin_RHT.m @@ -66,7 +66,7 @@ LINEARITY = 'linear'; % activate non-linearity (is cancelled if KXEQ0 = 1) % (LB:L.Bernstein, DG:Dougherty, SG:Sugama, LR: Lorentz, LD: Landau) CO = 'DG'; GKCO = 0; % gyrokinetic operator -ABCO = 1; % interspecies collisions +ABCO = 1; % INTERSPECIES collisions INIT_ZF = 0; ZF_AMP = 0.0; CLOS = 0; % Closure model (0: =0 truncation, 1: v^Nmax closure (p+2j<=Pmax))s NL_CLOS = 0; % nonlinear closure model (-2:nmax=jmax; -1:nmax=jmax-j; >=0:nmax=NL_CLOS) diff --git a/wk/lin_TEM.m b/wk/lin_TEM.m index 70e5f26a..563d90f0 100644 --- a/wk/lin_TEM.m +++ b/wk/lin_TEM.m @@ -64,7 +64,7 @@ LINEARITY = 'linear'; % activate non-linearity (is cancelled if KXEQ0 = 1) % (LB:L.Bernstein, DG:Dougherty, SG:Sugama, LR: Lorentz, LD: Landau) CO = 'DG'; GKCO = 0; % gyrokinetic operator -ABCO = 1; % interspecies collisions +ABCO = 1; % INTERSPECIES collisions INIT_ZF = 0; ZF_AMP = 0.0; CLOS = 0; % Closure model (0: =0 truncation, 1: v^Nmax closure (p+2j<=Pmax))s NL_CLOS = 0; % nonlinear closure model (-2:nmax=jmax; -1:nmax=jmax-j; >=0:nmax=NL_CLOS) diff --git a/wk/local_run.m b/wk/local_run.m index 7031cacb..21aca732 100644 --- a/wk/local_run.m +++ b/wk/local_run.m @@ -40,7 +40,7 @@ JOB2LOAD= -1; % (LB:L.Bernstein, DG:Dougherty, SG:Sugama, LR: Lorentz, LD: Landau) CO = 'SG'; GKCO = 1; % gyrokinetic operator -ABCO = 1; % interspecies collisions +ABCO = 1; % INTERSPECIES collisions NL_CLOS = 0; % nonlinear closure model (-2: nmax = jmax, -1: nmax = jmax-j, >=0 : nmax = NL_CLOS) SIMID = 'kobayashi_2015_fig1'; % Name of the simulation % SIMID = 'debug'; % Name of the simulation diff --git a/wk/marconi_run.m b/wk/marconi_run.m index 4f78af0a..00ee0405 100644 --- a/wk/marconi_run.m +++ b/wk/marconi_run.m @@ -48,7 +48,7 @@ JOB2LOAD= -1; % start from t=0 if <0, else restart from outputs_$job2load % (LB:L.Bernstein, DG:Dougherty, SG:Sugama, LR: Lorentz, LD: Landau) CO = 'DG'; GKCO = 1; % gyrokinetic operator -ABCO = 1; % interspecies collisions +ABCO = 1; % INTERSPECIES collisions CLOS = 0; % Closure model (0: =0 truncation) NL_CLOS = -1; % nonlinear closure model (-2: nmax = jmax, -1: nmax = jmax-j, >=0 : nmax = NL_CLOS) LINEARITY = 'nonlinear'; % activate non-linearity (is cancelled if KXEQ0 = 1) diff --git a/wk/quick_run.m b/wk/quick_run.m index dffb3b2a..9eb6073b 100644 --- a/wk/quick_run.m +++ b/wk/quick_run.m @@ -64,7 +64,7 @@ LINEARITY = 'linear'; % activate non-linearity (is cancelled if KXEQ0 = 1) % (LB:L.Bernstein, DG:Dougherty, SG:Sugama, LR: Lorentz, LD: Landau) CO = 'DG'; GKCO = 0; % gyrokinetic operator -ABCO = 1; % interspecies collisions +ABCO = 1; % INTERSPECIES collisions INIT_ZF = 0; ZF_AMP = 0.0; CLOS = 0; % Closure model (0: =0 truncation, 1: v^Nmax closure (p+2j<=Pmax))s NL_CLOS = 0; % nonlinear closure model (-2:nmax=jmax; -1:nmax=jmax-j; >=0:nmax=NL_CLOS) -- GitLab