diff --git a/src/auxval.F90 b/src/auxval.F90
index d044790125f00c8b361a01e09452db2f7a3347b3..547b697ab60c84f3ed99ba4f1445241f9cfca170 100644
--- a/src/auxval.F90
+++ b/src/auxval.F90
@@ -40,6 +40,7 @@ subroutine auxval
   CALL build_dv4Hp_table ! precompute the hermite fourth derivative table
 
   !! Display parallel settings
+  CALL mpi_barrier(MPI_COMM_WORLD, ierr)
   DO i_ = 0,num_procs-1
     CALL mpi_barrier(MPI_COMM_WORLD, ierr)
     IF (my_id .EQ. i_) THEN
@@ -51,9 +52,9 @@ subroutine auxval
       WRITE(*,'(A9,I3,A10,I3,A10,I3,A9,I3)')&
        'my_id  = ', my_id, ', rank_p  = ', rank_p, ', rank_ky  = ', rank_ky,', rank_z  = ', rank_z
        WRITE(*,'(A22,I3,A11,I3)')&
-       '              ips = ', ips, ', ipe  = ', ipe
+       '              ips   = ', ips  , ', ipe    = ', ipe
        WRITE(*,'(A22,I3,A11,I3)')&
-       '              ijs = ', ijs, ', ije  = ', ije
+       '              ijs   = ', ijs  , ', ije    = ', ije
        WRITE(*,'(A22,I3,A11,I3)')&
        '              ikxs  = ', ikxs , ', ikxe   = ', ikxe
        WRITE(*,'(A22,I3,A11,I3)')&
diff --git a/src/control.F90 b/src/control.F90
index 20b0239b72e6b12894dbfc72f56515b7cb9b5021..d4a965bcdad38429c1d643c7d0aabfe88c7c86c9 100644
--- a/src/control.F90
+++ b/src/control.F90
@@ -15,7 +15,7 @@ SUBROUTINE control
   !                   1.1     Initialize the parallel environment
   CALL ppinit
   CALL speak('MPI initialized')
-
+  CALL mpi_barrier(MPI_COMM_WORLD, ierr)
 
   CALL daytim('Start at ')
   
diff --git a/src/grid_mod.F90 b/src/grid_mod.F90
index e46d4b039c4961675574f0ed50a07a03473c3bbd..513fd0a2a01591a699f494544a62c12872df9292 100644
--- a/src/grid_mod.F90
+++ b/src/grid_mod.F90
@@ -56,7 +56,7 @@ MODULE grid
   INTEGER, PUBLIC, PROTECTED :: local_nz
   INTEGER, PUBLIC, PROTECTED :: local_nkp
   INTEGER, PUBLIC, PROTECTED :: ngp, ngj, ngx, ngy, ngz ! number of ghosts points
-  INTEGER, PUBLIC, PROTECTED :: Nzgrid  ! one or two depending on the staggered grid option
+  INTEGER, PUBLIC, PROTECTED :: nzgrid  ! one or two depending on the staggered grid option
   ! Local offsets
   INTEGER, PUBLIC, PROTECTED :: local_na_offset
   INTEGER, PUBLIC, PROTECTED :: local_np_offset
@@ -233,7 +233,7 @@ CONTAINS
     SOLVE_POISSON  = .FALSE.; SOLVE_AMPERE   = .FALSE.
     ALLOCATE(parray(local_np+ngp))
     ! Fill the interior (no ghosts)
-    DO ip = 1+ngp/2,local_np+ngp/2
+    DO ip = 1,local_np+ngp
       parray(ip) = (ip-1-ngp/2+local_np_offset)*deltap
       ! Storing indices of particular degrees for fluid moments computations
       SELECT CASE (parray(ip))
@@ -245,11 +245,6 @@ CONTAINS
     END DO
     local_pmax = parray(local_np+ngp/2)
     local_pmin = parray(1+ngp/2)
-    ! Fill the ghosts
-    DO ig = 1,ngp/2
-      parray(ig)                = local_pmin-ngp/2+(ig-1)
-      parray(local_np+ngp/2+ig) = local_pmax+ig
-    ENDDO
     IF(CONTAINSp0) SOLVE_POISSON = .TRUE.
     IF(CONTAINSp1) SOLVE_AMPERE  = .TRUE.
     !DGGK operator uses moments at index p=2 (ip=3) for the p=0 term so the
@@ -283,16 +278,11 @@ CONTAINS
     local_nj        = ije - ijs + 1
     local_nj_offset = ijs - 1
     ALLOCATE(jarray(local_nj+ngj))
-    DO ij = 1+ngj/2,local_nj+ngj/2
+    DO ij = 1,local_nj+ngj
       jarray(ij) = ij-1-ngj/2+local_nj_offset
     END DO
     local_jmax = jarray(local_nj+ngj/2)
     local_jmin = jarray(1+ngj/2)
-    ! Fill the ghosts
-    DO ig = 1,ngj/2
-      jarray(ig)                = local_jmin-ngj/2+(ig-1)
-      jarray(local_nj+ngj/2+ig) = local_jmax+ig
-    ENDDO
     ! Precomputations
     jmax_dp      = real(jmax,dp)
     diff_j_coeff = jmax_dp*(1._dp/jmax_dp)**6
@@ -489,12 +479,12 @@ CONTAINS
       ! indices for even p and odd p grids (used in kernel, jacobian, gij etc.)
       ieven  = 1
       iodd   = 2
-      Nzgrid = 2
+      nzgrid = 2
     ELSE
       grid_shift = 0._dp
       ieven  = 1
       iodd   = 1
-      Nzgrid = 1
+      nzgrid = 1
     ENDIF
     ! Build the full grids on process 0 to diagnose it without comm
     ALLOCATE(zarray_full(total_nz))
@@ -530,15 +520,16 @@ CONTAINS
       displs_nz(in+1) = istart-1
     ENDDO
     ! Local z array
-    ALLOCATE(zarray(local_nz+Ngz,Nzgrid))
+    ALLOCATE(zarray(local_nz+Ngz,nzgrid))
     !! interior point loop
     DO iz = 1,total_nz
-      DO eo = 1,Nzgrid
+      DO eo = 1,nzgrid
         zarray(iz+ngz/2,eo) = zarray_full(iz) + REAL(eo-1,dp)*grid_shift
       ENDDO
     ENDDO
-    ALLOCATE(local_zmax(Nzgrid),local_zmin(Nzgrid))
-    DO eo = 1,Nzgrid
+    CALL allocate_array(local_zmax,1,nzgrid)
+    CALL allocate_array(local_zmin,1,nzgrid)
+    DO eo = 1,nzgrid
       ! Find local extrema
       local_zmax(eo) = zarray(local_nz+ngz/2,eo)
       local_zmin(eo) = zarray(1+ngz/2,eo)
diff --git a/src/moments_eq_rhs_mod.F90 b/src/moments_eq_rhs_mod.F90
index 74d1615b26e922471e225ce385481f095445d5fc..29c658c5025c31f2a0506f2491d7b21b43ba2003 100644
--- a/src/moments_eq_rhs_mod.F90
+++ b/src/moments_eq_rhs_mod.F90
@@ -15,6 +15,7 @@ SUBROUTINE compute_moments_eq_rhs
   USE prec_const
   USE collision
   USE time_integration
+  USE species, ONLY: dpdx
   USE geometry, ONLY: gradz_coeff, dlnBdz, Ckxky!, Gamma_phipar
   USE calculus, ONLY: interp_z, grad_z, grad_z2
   ! USE species,  ONLY: dpdx
@@ -53,6 +54,7 @@ SUBROUTINE compute_moments_eq_rhs
             ! Species loop
             a:DO ia = 1,local_na
               Napj = moments(ia,ipi,iji,iky,ikx,izi,updatetlevel)
+              RHS = 0._dp
               IF((CLOS .NE. 1) .OR. (p_int+2*j_int .LE. dmax)) THEN ! for the closure scheme
                 !! Compute moments_ mixing terms
                 ! Perpendicular dynamic
@@ -88,13 +90,21 @@ SUBROUTINE compute_moments_eq_rhs
                 ! Parallel magnetic term (Landau damping and the mirror force)
                 Mpara = gradz_coeff(izi,eo)*(Ldamp + Fmir)
                 !! Electrical potential term
-                Dphi =i_ky*( xphij  (ia,ip,ij)*kernel(ia,iji  ,iky,ikx,izi,eo) &
+                IF ( p_int .LE. 2 ) THEN ! kronecker p0 p1 p2
+                  Dphi =i_ky*( xphij  (ia,ip,ij)*kernel(ia,iji  ,iky,ikx,izi,eo) &
                             +xphijp1(ia,ip,ij)*kernel(ia,iji+1,iky,ikx,izi,eo) &
                             +xphijm1(ia,ip,ij)*kernel(ia,iji-1,iky,ikx,izi,eo) )*phi(iky,ikx,izi)
+                ELSE
+                  Dphi = 0._dp
+                ENDIF
                 !! Vector potential term
-                Dpsi =-i_ky*( xpsij  (ia,ip,ij)*kernel(ia,iji  ,iky,ikx,izi,eo) &
+                IF ( (p_int .LE. 3) .AND. (p_int .GE. 1) ) THEN ! Kronecker p1 or p3
+                  Dpsi =-i_ky*( xpsij  (ia,ip,ij)*kernel(ia,iji  ,iky,ikx,izi,eo) &
                               +xpsijp1(ia,ip,ij)*kernel(ia,iji+1,iky,ikx,izi,eo) &
                               +xpsijm1(ia,ip,ij)*kernel(ia,iji-1,iky,ikx,izi,eo))*psi(iky,ikx,izi)
+                ELSE
+                  Dpsi = 0._dp
+                ENDIF
                 !! Sum of all RHS terms
                 RHS = &
                     ! Nonlinear term Sapj_ = {phi,f}
@@ -149,21 +159,21 @@ SUBROUTINE compute_moments_eq_rhs
   CALL cpu_time(t1_rhs)
   tc_rhs = tc_rhs + (t1_rhs-t0_rhs)
 
-      print*,'sumabs moments i', SUM(ABS(moments(1,:,:,:,:,:,updatetlevel)))
-      print*,'moment rhs i 221', moments_rhs(1,1,1,2,2,1,updatetlevel)
-      print*,'sum real nadiabe', SUM(REAL(nadiab_moments(2,:,:,:,:,:)))
-      print*,'sumreal momrhs i', SUM(REAL(moments_rhs(1,:,:,:,:,:,:)))
-      print*,'sumimag momrhs i', SUM(IMAG(moments_rhs(1,:,:,:,:,:,:)))
-      print*,'sumreal phi     ', SUM(REAL(phi(:,:,(1+ngz/2):(local_nz+ngz/2))))
-      print*,'sumimag phi     ', SUM(IMAG(phi(:,:,(1+ngz/2):(local_nz+ngz/2))))
-      print*,'phi(2,2,1)      ', phi(2,2,1+ngz/2)
-      print*,'sumreal ddznipj ', SUM(REAL(ddz_napj(1,:,:,:,:,:)))
-      print*,'sumimag ddznipj ', SUM(IMAG(ddz_napj(1,:,:,:,:,:)))
-      print*,'        ddznipj  ',(ddz_napj(1,2+ngp/2,2+ngj/2,2,2,1))
-      print*,'sumreal Capj    ', SUM(REAL(Capj(1,:,:,:,:,:)))
-      print*,'---'
-      IF(updatetlevel .EQ. 4) stop
-  stop
+  !     print*,'sumabs moments i', SUM(ABS(moments(1,:,:,:,:,:,updatetlevel)))
+  !     print*,'moment rhs i 221', moments_rhs(1,1,1,2,2,1,updatetlevel)
+  !     print*,'sum real nadiabe', SUM(REAL(nadiab_moments(2,:,:,:,:,:)))
+  !     print*,'sumreal momrhs i', SUM(REAL(moments_rhs(1,:,:,:,:,:,:)))
+  !     print*,'sumimag momrhs i', SUM(IMAG(moments_rhs(1,:,:,:,:,:,:)))
+  !     print*,'sumreal phi     ', SUM(REAL(phi(:,:,(1+ngz/2):(local_nz+ngz/2))))
+  !     print*,'sumimag phi     ', SUM(IMAG(phi(:,:,(1+ngz/2):(local_nz+ngz/2))))
+  !     print*,'phi(2,2,1)      ', phi(2,2,1+ngz/2)
+  !     print*,'sumreal ddznipj ', SUM(REAL(ddz_napj(1,:,:,:,:,:)))
+  !     print*,'sumimag ddznipj ', SUM(IMAG(ddz_napj(1,:,:,:,:,:)))
+  !     print*,'        ddznipj  ',(ddz_napj(1,2+ngp/2,2+ngj/2,2,2,1))
+  !     print*,'sumreal Capj    ', SUM(REAL(Capj(1,:,:,:,:,:)))
+  !     print*,'---'
+  !     IF(updatetlevel .EQ. 4) stop
+  ! ! stop
 
 END SUBROUTINE compute_moments_eq_rhs
 
diff --git a/src/parallel_mod.F90 b/src/parallel_mod.F90
index f437d332e11b0357ebc7dce5b4be35ee7e6fc0db..a9f3df3474b748e9c957fcbd35d20419fe8337dc 100644
--- a/src/parallel_mod.F90
+++ b/src/parallel_mod.F90
@@ -50,12 +50,9 @@ CONTAINS
     LOGICAL :: com_log(ndims) = .FALSE.
     CHARACTER(len=32) :: str
     INTEGER :: nargs, i, l
-
     CALL MPI_INIT(ierr)
-
     CALL MPI_COMM_RANK (MPI_COMM_WORLD,     my_id, ierr)
     CALL MPI_COMM_SIZE (MPI_COMM_WORLD, num_procs, ierr)
-
     nargs = COMMAND_ARGUMENT_COUNT()
     !
     IF( nargs .GT. 1 ) THEN
@@ -68,7 +65,6 @@ CONTAINS
           CALL MPI_ABORT(MPI_COMM_WORLD, -2, ierr)
        END IF
     ELSE
-      !  CALL MPI_DIMS_CREATE(num_procs, ndims, dims, ierr)
       dims(1) = 1
       dims(2) = num_procs
       dims(3) = 1
@@ -90,21 +86,16 @@ CONTAINS
     !
     !  Partitions 3-dim cartesian topology of comm0 into 1-dim cartesian subgrids
     !
-    com_log = (/.TRUE.,.FALSE.,.FALSE./)
-    CALL MPI_CART_SUB (comm0, com_log,  comm_p, ierr)
-    com_log = (/.FALSE.,.TRUE.,.FALSE./)
-    CALL MPI_CART_SUB (comm0, com_log, comm_ky, ierr)
-    com_log = (/.FALSE.,.FALSE.,.TRUE./)
-    CALL MPI_CART_SUB (comm0, com_log,  comm_z, ierr)
+    CALL MPI_CART_SUB (comm0, (/.TRUE.,.FALSE.,.FALSE./),  comm_p, ierr)
+    CALL MPI_CART_SUB (comm0, (/.FALSE.,.TRUE.,.FALSE./), comm_ky, ierr)
+    CALL MPI_CART_SUB (comm0, (/.FALSE.,.FALSE.,.TRUE./),  comm_z, ierr)
     ! Find id inside the 1d-sub communicators
     CALL MPI_COMM_RANK(comm_p,  rank_p,  ierr)
     CALL MPI_COMM_RANK(comm_ky, rank_ky, ierr)
     CALL MPI_COMM_RANK(comm_z,  rank_z,  ierr)
     ! 2D communicator
-    com_log = (/.TRUE.,.FALSE.,.TRUE./)
-    CALL MPI_CART_SUB (comm0, com_log,  comm_pz,  ierr)
-    com_log = (/.FALSE.,.TRUE.,.TRUE./)
-    CALL MPI_CART_SUB (comm0, com_log,  comm_kyz, ierr)
+    CALL MPI_CART_SUB (comm0, (/.TRUE.,.FALSE.,.TRUE./),  comm_pz,  ierr)
+    CALL MPI_CART_SUB (comm0, (/.FALSE.,.TRUE.,.TRUE./),  comm_kyz, ierr)
     ! Count the number of processes in 2D comms
     CALL MPI_COMM_SIZE(comm_pz, num_procs_pz, ierr)
     CALL MPI_COMM_SIZE(comm_kyz,num_procs_kyz,ierr)
diff --git a/src/ppinit.F90 b/src/ppinit.F90
deleted file mode 100644
index 3eb59a8f4f44eb45ed0ae9a5d6e4127f5e773aae..0000000000000000000000000000000000000000
--- a/src/ppinit.F90
+++ /dev/null
@@ -1,90 +0,0 @@
-SUBROUTINE ppinit
-  !   Parallel environment
-
-  USE basic
-  use prec_const
-  IMPLICIT NONE
-
-  ! Variables for cartesian domain decomposition
-  INTEGER, PARAMETER :: ndims=3 ! p, kx and z
-  INTEGER, DIMENSION(ndims) :: dims=0, coords=0
-  LOGICAL :: periods(ndims) = .FALSE., reorder=.FALSE.
-  CHARACTER(len=32) :: str
-  INTEGER :: nargs, i, l
-
-  CALL MPI_INIT(ierr)
-
-  CALL MPI_COMM_RANK (MPI_COMM_WORLD,     my_id, ierr)
-  CALL MPI_COMM_SIZE (MPI_COMM_WORLD, num_procs, ierr)
-
-  nargs = COMMAND_ARGUMENT_COUNT()
-  !
-  IF( nargs .GT. 1 ) THEN
-     DO i=1,ndims
-        CALL GET_COMMAND_ARGUMENT(i, str, l, ierr)
-        READ(str(1:l),'(i3)')  dims(i)
-     END DO
-     IF( PRODUCT(dims) .NE. num_procs ) THEN
-      IF(my_id .EQ. 0) WRITE(*, '(a,i4,a,i4)') 'Product of dims: ', PRODUCT(dims), " is not consistent WITH NPROCS=",num_procs
-        CALL MPI_ABORT(MPI_COMM_WORLD, -2, ierr)
-     END IF
-  ELSE
-    !  CALL MPI_DIMS_CREATE(num_procs, ndims, dims, ierr)
-    dims(1) = 1
-    dims(2) = num_procs
-    dims(3) = 1
-  END IF
-
-  num_procs_p  = dims(1) ! Number of processes along p
-  num_procs_ky = dims(2) ! Number of processes along kx
-  num_procs_z  = dims(3) ! Number of processes along z
-
-  !
-  !periodicity in p
-  periods(1)=.FALSE.
-  !periodicity in ky
-  periods(2)=.FALSE.
-  !periodicity in z
-  periods(3)=.TRUE.
-
-  CALL MPI_CART_CREATE(MPI_COMM_WORLD, ndims, dims, periods, reorder, comm0, ierr)
-  CALL MPI_COMM_GROUP(comm0,group0, ierr)
-  CALL MPI_COMM_RANK(comm0, rank_0,  ierr)
-  CALL MPI_CART_COORDS(comm0,rank_0,ndims,coords,ierr)
-
-  !
-  !  Partitions 3-dim cartesian topology of comm0 into 1-dim cartesian subgrids
-  !
-  CALL MPI_CART_SUB (comm0, (/.TRUE.,.FALSE.,.FALSE./),  comm_p, ierr)
-  CALL MPI_CART_SUB (comm0, (/.FALSE.,.TRUE.,.FALSE./), comm_ky, ierr)
-  CALL MPI_CART_SUB (comm0, (/.FALSE.,.FALSE.,.TRUE./),  comm_z, ierr)
-  ! Find id inside the 1d-sub communicators
-  CALL MPI_COMM_RANK(comm_p,  rank_p,  ierr)
-  CALL MPI_COMM_RANK(comm_ky, rank_ky, ierr)
-  CALL MPI_COMM_RANK(comm_z,  rank_z,  ierr)
-
-  ! 2D communicator
-  CALL MPI_CART_SUB (comm0, (/.TRUE.,.FALSE.,.TRUE./),  comm_pz,  ierr)
-  CALL MPI_CART_SUB (comm0, (/.FALSE.,.TRUE.,.TRUE./),  comm_kyz, ierr)
-  ! Count the number of processes in 2D comms
-  CALL MPI_COMM_SIZE(comm_pz, num_procs_pz, ierr)
-  CALL MPI_COMM_SIZE(comm_kyz,num_procs_kyz,ierr)
-  ! Find id inside the 1d-sub communicators
-  CALL MPI_COMM_RANK(comm_pz,  rank_pz,   ierr)
-  CALL MPI_COMM_RANK(comm_kyz, rank_kyz,  ierr)
-
-  ! Find neighbours
-  CALL MPI_CART_SHIFT(comm0, 0, 1, nbr_L, nbr_R, ierr) !left   right neighbours
-  CALL MPI_CART_SHIFT(comm0, 1, 1, nbr_B, nbr_T, ierr) !bottom top   neighbours
-  CALL MPI_CART_SHIFT(comm0, 2, 1, nbr_D, nbr_U, ierr) !down   up    neighbours
-
-  ! Create the communicator for groups used in gatherv
-  ! CALL MPI_COMM_GROUP(comm0,group_ky0)
-  ! ALLOCATE(rank2include(0:num_procs_ky))
-  ! DO r_ = 0,rank_0
-  !   IF(rank_y .EQ. 0) &
-  !     rank2exclude
-  ! ENDDO
-  ! CALL MPI_COMM_CREATE_GROUPE(comm0, group_p0, comm_ky0)
-
-END SUBROUTINE ppinit
diff --git a/src/processing_mod.F90 b/src/processing_mod.F90
index b38d9cd13ef5d4ed18e9e73e64c09c95c917c488..65bfea95ecd27d04286c2b8649cc64b318a4c219 100644
--- a/src/processing_mod.F90
+++ b/src/processing_mod.F90
@@ -181,6 +181,8 @@ CONTAINS
                   ENDDO
                ENDDO
             ENDDO
+         ELSEIF(CONTAINSp0) THEN
+            ERROR STOP "Degrees p=0 and p=2 should be owned by the same process"
          ENDIF
          IF(EM .AND. CONTAINSp1 .AND. CONTAINSp3) THEN
             !!----------------ELECTROMAGNETIC CONTRIBUTION--------------------
diff --git a/testcases/smallest_problem/fort_11.90 b/testcases/smallest_problem/fort_11.90
new file mode 100644
index 0000000000000000000000000000000000000000..f6a8807c823adec78718240e0ca647988b504acd
--- /dev/null
+++ b/testcases/smallest_problem/fort_11.90
@@ -0,0 +1,101 @@
+&BASIC
+  nrun       = 99999999
+  dt         = 0.01
+  tmax       = 5
+  maxruntime = 356400
+  job2load   = -1
+/
+&GRID
+  pmax   = 4
+  jmax   = 1
+  Nx     = 2
+  Lx     = 200
+  Ny     = 4
+  Ly     = 60
+  Nz     = 6
+  SG     = .f.
+  Nexc   = 1
+/
+&GEOMETRY
+  geom   = 's-alpha'
+  q0     = 1.4
+  shear  = 0.0
+  eps    = 0.18
+  kappa  = 1.0
+  s_kappa= 0.0
+  delta  = 0.0
+  s_delta= 0.0
+  zeta   = 0.0
+  s_zeta = 0.0
+  parallel_bc = 'dirichlet'
+  shift_y= 0.0
+/
+&OUTPUT_PAR
+  dtsave_0d = 0.5
+  dtsave_1d = -1
+  dtsave_2d = -1
+  dtsave_3d = 1
+  dtsave_5d = 5
+  write_doubleprecision = .f.
+  write_gamma = .t.
+  write_hf    = .t.
+  write_phi   = .t.
+  write_Na00  = .t.
+  write_Napj  = .t.
+  write_dens  = .t.
+  write_fvel  = .t.
+  write_temp  = .t.
+/
+&MODEL_PAR
+  ! Collisionality
+  CLOS    = 0
+  NL_CLOS = -1
+  LINEARITY = 'linear'
+  Na      = 2 ! number of species
+  mu_x    = 0.2
+  mu_y    = 0.4
+  N_HD    = 4
+  mu_z    = 0.6
+  HYP_V   = 'hypcoll'
+  mu_p    = 0.1
+  mu_j    = 0.5
+  nu      = 1.0
+  beta    = 0.1
+  ADIAB_E = .f.
+  tau_e   = 1.0
+/
+&SPECIES
+ ! ions
+ name_ = 'ions'
+ tau_  = 1.0
+ sigma_= 1.0
+ q_    = 1.0
+ k_N_  = 3.0!2.22
+ k_T_  = 4.0!6.96
+/
+&SPECIES
+ ! electrons
+ name_ = 'electrons'
+ tau_  = 1.0
+ sigma_= 0.023338
+ q_    = -1.0
+ k_N_  = 1.0!2.22
+ k_T_  = 2.0!6.96
+/
+
+&COLLISION_PAR
+  collision_model = 'DG' !DG/SG/PA/LD (dougherty, sugama, pitch angle, landau)
+  GK_CO           = .t.
+  INTERSPECIES    = .true.
+  mat_file       = 'gk_sugama_P_20_J_10_N_150_kpm_8.0.h5'
+/
+&INITIAL_CON
+  INIT_OPT         = 'allmom'
+  ACT_ON_MODES     = 'donothing'
+  init_background  = 1.0
+  init_noiselvl    = 0.0
+  iseed            = 42
+/
+&TIME_INTEGRATION_PAR
+  numerical_scheme = 'RK4'
+/