diff --git a/src/collision_mod.F90 b/src/collision_mod.F90
index 89315fcae1db0c9fd3ccc96e5d3d58591d4ba8e9..960d7356dfcbd490622478500c91609fea0241b6 100644
--- a/src/collision_mod.F90
+++ b/src/collision_mod.F90
@@ -67,7 +67,7 @@ CONTAINS
   !******************************************************************************!
   SUBROUTINE DoughertyGK_e(ip_,ij_,ikx_,iky_,iz_,TColl_)
     USE fields, ONLY: moments_e, phi
-    USE grid,   ONLY: parray_e, jarray_e, kxarray, kyarray, Jmaxe
+    USE grid,   ONLY: parray_e, jarray_e, kxarray, kyarray, Jmaxe, ip0_e, ip1_e, ip2_e
     USE array,  ONLY: kernel_e
     USE basic
     USE model,  ONLY: sigmae2_taue_o2, qe_taue, nu_ee
@@ -108,13 +108,13 @@ CONTAINS
           Knp1 =  Kernel_e(in_+1,ikx_,iky_,iz_)
           Knm1 =  Kernel_e(in_-1,ikx_,iky_,iz_)
           ! Nonadiabatic moments (only different from moments when p=0)
-          nadiab_moment_0j   = moments_e(1,in_  ,ikx_,iky_,iz_,updatetlevel) + qe_taue*Knp0*phi(ikx_,iky_,iz_)
+          nadiab_moment_0j   = moments_e(ip0_e,in_  ,ikx_,iky_,iz_,updatetlevel) + qe_taue*Knp0*phi(ikx_,iky_,iz_)
           ! Density
           n_     = n_     + Knp0 * nadiab_moment_0j
           ! Perpendicular velocity
           uperp_ = uperp_ + be_*0.5_dp*(Knp0 - Knm1) * nadiab_moment_0j
           ! Parallel temperature
-          Tpar_  = Tpar_  + Knp0 * (SQRT2*moments_e(3,in_,ikx_,iky_,iz_,updatetlevel) + nadiab_moment_0j)
+          Tpar_  = Tpar_  + Knp0 * (SQRT2*moments_e(ip2_e,in_,ikx_,iky_,iz_,updatetlevel) + nadiab_moment_0j)
           ! Perpendicular temperature
           Tperp_ = Tperp_ + ((2._dp*n_dp+1._dp)*Knp0 - (n_dp+1._dp)*Knp1 - n_dp*Knm1)*nadiab_moment_0j
         ENDDO
@@ -132,7 +132,7 @@ CONTAINS
       upar_  = 0._dp
       DO in_ = 1,jmaxe+1
         ! Parallel velocity
-         upar_  = upar_  + Kernel_e(in_,ikx_,iky_,iz_) * moments_e(2,in_,ikx_,iky_,iz_,updatetlevel)
+         upar_  = upar_  + Kernel_e(in_,ikx_,iky_,iz_) * moments_e(ip1_e,in_,ikx_,iky_,iz_,updatetlevel)
       ENDDO
       TColl_ = TColl_ + upar_*Kernel_e(ij_,ikx_,iky_,iz_)
       !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
@@ -150,11 +150,11 @@ CONTAINS
         Knp1 =  Kernel_e(in_+1,ikx_,iky_,iz_)
         Knm1 =  Kernel_e(in_-1,ikx_,iky_,iz_)
         ! Nonadiabatic moments (only different from moments when p=0)
-        nadiab_moment_0j = moments_e(1,in_,ikx_,iky_,iz_,updatetlevel) + qe_taue*Knp0*phi(ikx_,iky_,iz_)
+        nadiab_moment_0j = moments_e(ip0_e,in_,ikx_,iky_,iz_,updatetlevel) + qe_taue*Knp0*phi(ikx_,iky_,iz_)
         ! Density
         n_     = n_     + Knp0 * nadiab_moment_0j
         ! Parallel temperature
-        Tpar_  = Tpar_  + Knp0 * (SQRT2*moments_e(3,in_,ikx_,iky_,iz_,updatetlevel) + nadiab_moment_0j)
+        Tpar_  = Tpar_  + Knp0 * (SQRT2*moments_e(ip2_e,in_,ikx_,iky_,iz_,updatetlevel) + nadiab_moment_0j)
         ! Perpendicular temperature
         Tperp_ = Tperp_ + ((2._dp*n_dp+1._dp)*Knp0 - (n_dp+1._dp)*Knp1 - n_dp*Knm1)*nadiab_moment_0j
       ENDDO
@@ -172,7 +172,7 @@ CONTAINS
   !******************************************************************************!
   SUBROUTINE DoughertyGK_i(ip_,ij_,ikx_,iky_,iz_,TColl_)
     USE fields, ONLY: moments_i, phi
-    USE grid,   ONLY: parray_i, jarray_i, kxarray, kyarray, Jmaxi
+    USE grid,   ONLY: parray_i, jarray_i, kxarray, kyarray, Jmaxi, ip0_i, ip1_i, ip2_i
     USE array,  ONLY: kernel_i
     USE basic
     USE model,  ONLY: sigmai2_taui_o2, qi_taui, nu_i
@@ -214,13 +214,13 @@ CONTAINS
           Knp1 =  Kernel_i(in_+1,ikx_,iky_,iz_)
           Knm1 =  Kernel_i(in_-1,ikx_,iky_,iz_)
           ! Nonadiabatic moments (only different from moments when p=0)
-          nadiab_moment_0j   = moments_i(1,in_  ,ikx_,iky_,iz_,updatetlevel) + qi_taui*Knp0*phi(ikx_,iky_,iz_)
+          nadiab_moment_0j   = moments_i(ip0_i,in_  ,ikx_,iky_,iz_,updatetlevel) + qi_taui*Knp0*phi(ikx_,iky_,iz_)
           ! Density
           n_     = n_     + Knp0 * nadiab_moment_0j
           ! Perpendicular velocity
           uperp_ = uperp_ + bi_*0.5_dp*(Knp0 - Knm1) * nadiab_moment_0j
           ! Parallel temperature
-          Tpar_  = Tpar_  + Knp0 * (SQRT2*moments_i(3,in_,ikx_,iky_,iz_,updatetlevel) + nadiab_moment_0j)
+          Tpar_  = Tpar_  + Knp0 * (SQRT2*moments_i(ip2_i,in_,ikx_,iky_,iz_,updatetlevel) + nadiab_moment_0j)
           ! Perpendicular temperature
           Tperp_ = Tperp_ + ((2._dp*n_dp+1._dp)*Knp0 - (n_dp+1._dp)*Knp1 - n_dp*Knm1)*nadiab_moment_0j
         ENDDO
@@ -238,7 +238,7 @@ CONTAINS
       upar_  = 0._dp
       DO in_ = 1,jmaxi+1
         ! Parallel velocity
-         upar_  = upar_  + Kernel_i(in_,ikx_,iky_,iz_) * moments_i(2,in_,ikx_,iky_,iz_,updatetlevel)
+         upar_  = upar_  + Kernel_i(in_,ikx_,iky_,iz_) * moments_i(ip1_i,in_,ikx_,iky_,iz_,updatetlevel)
       ENDDO
       TColl_ = TColl_ + upar_*Kernel_i(ij_,ikx_,iky_,iz_)
       !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
@@ -256,11 +256,11 @@ CONTAINS
         Knp1 =  Kernel_i(in_+1,ikx_,iky_,iz_)
         Knm1 =  Kernel_i(in_-1,ikx_,iky_,iz_)
         ! Nonadiabatic moments (only different from moments when p=0)
-        nadiab_moment_0j = moments_i(1,in_,ikx_,iky_,iz_,updatetlevel) + qi_taui*Knp0*phi(ikx_,iky_,iz_)
+        nadiab_moment_0j = moments_i(ip0_i,in_,ikx_,iky_,iz_,updatetlevel) + qi_taui*Knp0*phi(ikx_,iky_,iz_)
         ! Density
         n_     = n_     + Knp0 * nadiab_moment_0j
         ! Parallel temperature
-        Tpar_  = Tpar_  + Knp0 * (SQRT2*moments_i(3,in_,ikx_,iky_,iz_,updatetlevel) + nadiab_moment_0j)
+        Tpar_  = Tpar_  + Knp0 * (SQRT2*moments_i(ip2_i,in_,ikx_,iky_,iz_,updatetlevel) + nadiab_moment_0j)
         ! Perpendicular temperature
         Tperp_ = Tperp_ + ((2._dp*n_dp+1._dp)*Knp0 - (n_dp+1._dp)*Knp1 - n_dp*Knm1)*nadiab_moment_0j
       ENDDO
@@ -287,9 +287,9 @@ CONTAINS
     USE model
     USE utility
     IMPLICIT NONE
-    COMPLEX(dp), DIMENSION(1:pmaxe+1)   :: local_sum_e, buffer_e, total_sum_e
+    COMPLEX(dp), DIMENSION(1:total_np_e)   :: local_sum_e, buffer_e, total_sum_e
     COMPLEX(dp), DIMENSION(ips_e:ipe_e) :: TColl_distr_e
-    COMPLEX(dp), DIMENSION(1:pmaxi+1)   :: local_sum_i, buffer_i, total_sum_i
+    COMPLEX(dp), DIMENSION(1:total_np_i)   :: local_sum_i, buffer_i, total_sum_i
     COMPLEX(dp), DIMENSION(ips_i:ipe_i) :: TColl_distr_i
     COMPLEX(dp) :: TColl
     INTEGER :: ikxs_C, ikxe_C, ikys_C, ikye_C
@@ -298,20 +298,19 @@ CONTAINS
     CALL cpu_time(t0_coll)
 
     IF (ABS(CO) .GE. 2) THEN !compute only if COSOlver matrices are used
-
       DO ikx = ikxs,ikxe
         DO iky = ikys,ikye
           DO iz = izs,ize
             ! Electrons
             DO ij = 1,Jmaxe+1
               ! Loop over all p to compute sub collision term
-              DO ip = 1,Pmaxe+1
+              DO ip = 1,total_np_e
                 CALL apply_COSOlver_mat_e(ip,ij,ikx,iky,iz,TColl)
                 local_sum_e(ip) = TColl
               ENDDO
               IF (num_procs_p .GT. 1) THEN
                 ! Sum up all the sub collision terms on root 0
-                CALL MPI_REDUCE(local_sum_e, buffer_e, pmaxe+1, MPI_DOUBLE_COMPLEX, MPI_SUM, 0, comm_p, ierr)
+                CALL MPI_REDUCE(local_sum_e, buffer_e, total_np_e, MPI_DOUBLE_COMPLEX, MPI_SUM, 0, comm_p, ierr)
                 ! distribute the sum over the process among p
                 CALL MPI_SCATTERV(buffer_e, counts_np_e, displs_np_e, MPI_DOUBLE_COMPLEX,&
                                   TColl_distr_e, local_np_e, MPI_DOUBLE_COMPLEX,&
@@ -326,13 +325,13 @@ CONTAINS
             ENDDO
             ! Ions
             DO ij = 1,Jmaxi+1
-              DO ip = 1,Pmaxi+1
+              DO ip = 1,total_np_i
                 CALL apply_COSOlver_mat_i(ip,ij,ikx,iky,iz,TColl)
                 local_sum_i(ip) = TColl
               ENDDO
               IF (num_procs_p .GT. 1) THEN
                 ! Reduce the local_sums to root = 0
-                CALL MPI_REDUCE(local_sum_i, buffer_i, pmaxi+1, MPI_DOUBLE_COMPLEX, MPI_SUM, 0, comm_p, ierr)
+                CALL MPI_REDUCE(local_sum_i, buffer_i, total_np_i, MPI_DOUBLE_COMPLEX, MPI_SUM, 0, comm_p, ierr)
                 ! buffer contains the entire collision term along p, we scatter it between
                 ! the other processes (use of scatterv since Pmax/Np is not an integer)
                 CALL MPI_SCATTERV(buffer_i, counts_np_i, displs_np_i, MPI_DOUBLE_COMPLEX,&
@@ -366,14 +365,14 @@ CONTAINS
     USE basic
     USE time_integration, ONLY: updatetlevel
     USE utility
-    USE model, ONLY: CO, nu_e, nu_ee
+    USE model, ONLY: CO, nu_e, nu_ee, CLOS
     IMPLICIT NONE
 
     INTEGER,     INTENT(IN)  :: ip_, ij_ ,ikx_, iky_, iz_
     COMPLEX(dp), INTENT(OUT) :: TColl_
 
     INTEGER     :: ip2,ij2, p_int,j_int, p2_int,j2_int, ikx_C, iky_C
-    p_int = ip_-1; j_int = ij_-1
+    p_int = parray_e_full(ip_); j_int = jarray_e_full(ij_);
 
     IF (CO .GT. 0) THEN ! GK operator (k-dependant)
       ikx_C = ikx_; iky_C = iky_
@@ -388,6 +387,7 @@ CONTAINS
       p2_int = parray_e(ip2)
       jloopee: DO ij2 = ijs_e,ije_e
         j2_int = jarray_e(ij2)
+        IF((CLOS .NE. 1) .OR. (p2_int+2*j2_int .LE. dmaxe))&
         TColl_ = TColl_ + moments_e(ip2,ij2,ikx_,iky_,iz_,updatetlevel) &
            *( nu_e  * CeipjT(bare(p_int,j_int), bare(p2_int,j2_int),ikx_C, iky_C) &
              +nu_ee * Ceepj (bare(p_int,j_int), bare(p2_int,j2_int),ikx_C, iky_C))
@@ -399,6 +399,7 @@ CONTAINS
       p2_int = parray_i(ip2)
       jloopei: DO ij2 = ijs_i,ije_i
         j2_int = jarray_i(ij2)
+        IF((CLOS .NE. 1) .OR. (p2_int+2*j2_int .LE. dmaxi))&
         TColl_ = TColl_ + moments_i(ip2,ij2,ikx_,iky_,iz_,updatetlevel) &
           *(nu_e * CeipjF(bare(p_int,j_int), bari(p2_int,j2_int),ikx_C, iky_C))
       END DO jloopei
@@ -416,13 +417,13 @@ CONTAINS
     USE basic
     USE time_integration, ONLY : updatetlevel
     USE utility
-    USE model, ONLY: CO, nu_i, nu_ie
+    USE model, ONLY: CO, nu_i, nu_ie, CLOS
     IMPLICIT NONE
     INTEGER,     INTENT(IN)    :: ip_, ij_ ,ikx_, iky_, iz_
     COMPLEX(dp), INTENT(OUT)   :: TColl_
 
     INTEGER     :: ip2,ij2, p_int,j_int, p2_int,j2_int, ikx_C, iky_C
-    p_int = ip_-1; j_int = ij_-1
+    p_int = parray_i_full(ip_); j_int = jarray_i_full(ij_);
 
     IF (CO .GT. 0) THEN ! GK operator (k-dependant)
       ikx_C = ikx_; iky_C = iky_
@@ -436,6 +437,7 @@ CONTAINS
       p2_int = parray_i(ip2)
       jloopii: DO ij2 = ijs_i,ije_i
         j2_int = jarray_i(ij2)
+        IF((CLOS .NE. 1) .OR. (p2_int+2*j2_int .LE. dmaxi))&
         TColl_ = TColl_ + moments_i(ip2,ij2,ikx_,iky_,iz_,updatetlevel) &
             *( nu_ie * CiepjT(bari(p_int,j_int), bari(p2_int,j2_int), ikx_C, iky_C) &
               +nu_i  * Ciipj (bari(p_int,j_int), bari(p2_int,j2_int), ikx_C, iky_C))
@@ -446,6 +448,7 @@ CONTAINS
       p2_int = parray_e(ip2)
       jloopie: DO ij2 = ijs_e,ije_e
         j2_int = jarray_e(ij2)
+        IF((CLOS .NE. 1) .OR. (p2_int+2*j2_int .LE. dmaxe))&
         TColl_ = TColl_ + moments_e(ip2,ij2,ikx_,iky_,iz_,updatetlevel) &
           *(nu_ie * CiepjF(bari(p_int,j_int), bare(p2_int,j2_int), ikx_C, iky_C))
       ENDDO jloopie
@@ -688,46 +691,22 @@ CONTAINS
               kperp_mat = kp_grid_mat(ikp)
               IF(kperp_mat .GT. kperp_sim) EXIT ! a matrix with kperp2 > current kx2 + ky2 has been found
             ENDDO
-            IF((abs(CO) .eq. 4) .AND. (kperp_sim .GT. 3.0)) THEN ! hybrid CO for Coulomb collisions
-              ! write(*,*) kp_grid_mat(ikp_mat), '<', kperp_sim
-              !! Fill non existing matrices with diagonal Dougherty damping
-              DO ip_e = 0,Pmaxe ! Loop over rows
-              DO ij_e = 0,Jmaxe
-                irow_sub  = (Jmaxe +1)*ip_e + ij_e +1
-                be_2      = (kxarray(ikx)**2 + kyarray(iky)**2) * sigmae2_taue_o2
-                Ceepj (irow_sub,irow_sub,ikx,iky) = -(real(ip_e,dp) + 2._dp*real(ij_e,dp) + 2._dp*be_2)!Ceepj__kp(:,:,nkp_mat)
-              ENDDO
-              ENDDO
-              DO ip_i = 0,Pmaxi ! Loop over rows
-              DO ij_i = 0,Jmaxi
-                irow_sub  = (Jmaxi +1)*ip_i + ij_i +1
-                bi_2      = (kxarray(ikx)**2 + kyarray(iky)**2) * sigmai2_taui_o2
-                Ciipj (irow_sub,irow_sub,ikx,iky) = -(real(ip_i,dp) + 2._dp*real(ij_i,dp) + 2._dp*bi_2)!0*Ciipj__kp(:,:,nkp_mat)
-              ENDDO
-              ENDDO
-              ! Ceepj (:,:,ikx,iky) = 0*Ceepj__kp(:,:,nkp_mat)
-              ! Ciipj (:,:,ikx,iky) = 0*Ciipj__kp(:,:,nkp_mat)
-              CeipjT(:,:,ikx,iky) = 0*CeipjT_kp(:,:,nkp_mat)
-              CeipjF(:,:,ikx,iky) = 0*CeipjF_kp(:,:,nkp_mat)
-              CiepjT(:,:,ikx,iky) = 0*CiepjT_kp(:,:,nkp_mat)
-              CiepjF(:,:,ikx,iky) = 0*CiepjF_kp(:,:,nkp_mat)
-            ELSE ! Interpolation
-              ! interval boundaries
-              ikp_next  = ikp_mat     !index of k1 (larger than kperp_sim thanks to previous loop)
-              ikp_prev  = ikp_mat - 1 !index of k0 (smaller neighbour to interpolate inbetween)
-              ! write(*,*) kp_grid_mat(ikp_prev), '<', kperp_sim, '<', kp_grid_mat(ikp_next)
-
-              ! 0->1 variable for linear interp, i.e. zero2one = (k-k0)/(k1-k0)
-              zerotoone = (kperp_sim - kp_grid_mat(ikp_prev))/(kp_grid_mat(ikp_next) - kp_grid_mat(ikp_prev))
-
-              ! Linear interpolation between previous and next kperp matrix values
-              Ceepj (:,:,ikx,iky) = (Ceepj__kp(:,:,ikp_next) - Ceepj__kp(:,:,ikp_prev))*zerotoone + Ceepj__kp(:,:,ikp_prev)
-              CeipjT(:,:,ikx,iky) = (CeipjT_kp(:,:,ikp_next) - CeipjT_kp(:,:,ikp_prev))*zerotoone + CeipjT_kp(:,:,ikp_prev)
-              CeipjF(:,:,ikx,iky) = (CeipjF_kp(:,:,ikp_next) - CeipjF_kp(:,:,ikp_prev))*zerotoone + CeipjF_kp(:,:,ikp_prev)
-              Ciipj (:,:,ikx,iky) = (Ciipj__kp(:,:,ikp_next) - Ciipj__kp(:,:,ikp_prev))*zerotoone + Ciipj__kp(:,:,ikp_prev)
-              CiepjT(:,:,ikx,iky) = (CiepjT_kp(:,:,ikp_next) - CiepjT_kp(:,:,ikp_prev))*zerotoone + CiepjT_kp(:,:,ikp_prev)
-              CiepjF(:,:,ikx,iky) = (CiepjF_kp(:,:,ikp_next) - CiepjF_kp(:,:,ikp_prev))*zerotoone + CiepjF_kp(:,:,ikp_prev)
-            ENDIF
+            ! Interpolation
+            ! interval boundaries
+            ikp_next  = ikp_mat     !index of k1 (larger than kperp_sim thanks to previous loop)
+            ikp_prev  = ikp_mat - 1 !index of k0 (smaller neighbour to interpolate inbetween)
+            ! write(*,*) kp_grid_mat(ikp_prev), '<', kperp_sim, '<', kp_grid_mat(ikp_next)
+
+            ! 0->1 variable for linear interp, i.e. zero2one = (k-k0)/(k1-k0)
+            zerotoone = (kperp_sim - kp_grid_mat(ikp_prev))/(kp_grid_mat(ikp_next) - kp_grid_mat(ikp_prev))
+
+            ! Linear interpolation between previous and next kperp matrix values
+            Ceepj (:,:,ikx,iky) = (Ceepj__kp(:,:,ikp_next) - Ceepj__kp(:,:,ikp_prev))*zerotoone + Ceepj__kp(:,:,ikp_prev)
+            CeipjT(:,:,ikx,iky) = (CeipjT_kp(:,:,ikp_next) - CeipjT_kp(:,:,ikp_prev))*zerotoone + CeipjT_kp(:,:,ikp_prev)
+            CeipjF(:,:,ikx,iky) = (CeipjF_kp(:,:,ikp_next) - CeipjF_kp(:,:,ikp_prev))*zerotoone + CeipjF_kp(:,:,ikp_prev)
+            Ciipj (:,:,ikx,iky) = (Ciipj__kp(:,:,ikp_next) - Ciipj__kp(:,:,ikp_prev))*zerotoone + Ciipj__kp(:,:,ikp_prev)
+            CiepjT(:,:,ikx,iky) = (CiepjT_kp(:,:,ikp_next) - CiepjT_kp(:,:,ikp_prev))*zerotoone + CiepjT_kp(:,:,ikp_prev)
+            CiepjF(:,:,ikx,iky) = (CiepjF_kp(:,:,ikp_next) - CiepjF_kp(:,:,ikp_prev))*zerotoone + CiepjF_kp(:,:,ikp_prev)
           ENDDO
         ENDDO
       ELSE ! DK -> No kperp dep, copy simply to final collision matrices
diff --git a/src/diagnose.F90 b/src/diagnose.F90
index f9b879fdda9e6bfa92845c154fd8a1d6236c380a..4ab9b96142955695cff335cbf24cdcff602cc44a 100644
--- a/src/diagnose.F90
+++ b/src/diagnose.F90
@@ -21,11 +21,6 @@ SUBROUTINE diagnose(kstep)
   INTEGER :: dims(1) = (/0/)
   INTEGER :: cp_counter = 0
   CHARACTER(len=256) :: str, fname,test_
-  ! putarr(...,pardim=1) does not work for 2D domain decomposition
-  ! so we need to gather non 5D data on one proc to output it
-  INTEGER     :: parray_e_full(1:pmaxe+1), parray_i_full(1:pmaxi+1)
-  INTEGER     :: jarray_e_full(1:jmaxe+1), jarray_i_full(1:jmaxi+1)
-  REAL(dp)    :: kxarray_full(1:Nkx),  kyarray_full(1:Nky), zarray_full(1:Nz)
 
   CALL cpu_time(t0_diag) ! Measuring time
   !_____________________________________________________________________________
@@ -64,46 +59,15 @@ SUBROUTINE diagnose(kstep)
      CALL creatd(fidres, 0, dims, "/profiler/Tc_step",       "cumulative total step computation time")
      CALL creatd(fidres, 0, dims, "/profiler/time",          "current simulation time")
 
-     ! Build the full grids on process 0 to diagnose it without comm
-     IF (my_id .EQ. 0) THEN
-       ! P
-       DO ip = 1,pmaxe+1; parray_e_full(ip) = (ip-1); END DO
-       DO ip = 1,pmaxi+1; parray_i_full(ip) = (ip-1); END DO
-       ! J
-       DO ij = 1,jmaxe+1; jarray_e_full(ij) = (ij-1); END DO
-       DO ij = 1,jmaxi+1; jarray_i_full(ij) = (ij-1); END DO
-       ! kx
-       DO ikx = 1,Nkx
-         kxarray_full(ikx) = REAL(ikx-1,dp) * deltakx
-       END DO
-       ! ky
-       IF (Nky .GT. 1) THEN
-        DO iky = 1,Nky
-          kyarray_full(iky) = deltaky*(MODULO(iky-1,Nky/2)-Nky/2*FLOOR(2.*real(iky-1)/real(Nky)))
-          if (iky .EQ. Ny/2+1)     kyarray(iky) = -kyarray(iky)
-        END DO
-       ELSE
-        kyarray_full(1) =  0
-       endif
-      ! z
-      IF (Nz .GT. 1) THEN
-       DO iz = 1,Nz
-         zarray_full(iz) = deltaz*(iz-1)
-       END DO
-      ELSE
-       zarray_full(1) =  0
-      endif
-     ENDIF
-
      ! Grid info
      CALL creatg(fidres, "/data/grid", "Grid data")
-     CALL putarr(fidres, "/data/grid/coordkx", kxarray_full(1:Nkx),"kx*rho_s0", ionode=0)
-     CALL putarr(fidres, "/data/grid/coordky", kyarray_full(1:Nky),"ky*rho_s0", ionode=0)
-     CALL putarr(fidres, "/data/grid/coordz",   zarray_full(1:Nz) ,"z/R", ionode=0)
-     CALL putarr(fidres, "/data/grid/coordp_e" , parray_e_full(1:pmaxe+1), "p_e", ionode=0)
-     CALL putarr(fidres, "/data/grid/coordj_e" , jarray_e_full(1:jmaxe+1), "j_e", ionode=0)
-     CALL putarr(fidres, "/data/grid/coordp_i" , parray_i_full(1:pmaxi+1), "p_i", ionode=0)
-     CALL putarr(fidres, "/data/grid/coordj_i" , jarray_i_full(1:jmaxi+1), "j_i", ionode=0)
+     CALL putarr(fidres, "/data/grid/coordkx",   kxarray_full,  "kx*rho_s0", ionode=0)
+     CALL putarr(fidres, "/data/grid/coordky",   kyarray_full,  "ky*rho_s0", ionode=0)
+     CALL putarr(fidres, "/data/grid/coordz",    zarray_full,   "z/R", ionode=0)
+     CALL putarr(fidres, "/data/grid/coordp_e" , parray_e_full, "p_e", ionode=0)
+     CALL putarr(fidres, "/data/grid/coordj_e" , jarray_e_full, "j_e", ionode=0)
+     CALL putarr(fidres, "/data/grid/coordp_i" , parray_i_full, "p_i", ionode=0)
+     CALL putarr(fidres, "/data/grid/coordj_i" , jarray_i_full, "j_i", ionode=0)
 
      !  var0d group (gyro transport)
      IF (nsave_0d .GT. 0) THEN
@@ -224,7 +188,7 @@ SUBROUTINE diagnose(kstep)
 
      CALL grid_outputinputs(fidres, str)
 
-     CALL output_par_outputinputs(fidres, str)
+     CALL diag_par_outputinputs(fidres, str)
 
      CALL model_outputinputs(fidres, str)
 
diff --git a/src/ghosts_mod.F90 b/src/ghosts_mod.F90
index 1e42882206ac8b128c44a3a9f850cacb9fabd728..053a1097e8ddac72e09392e34665b474883b9200 100644
--- a/src/ghosts_mod.F90
+++ b/src/ghosts_mod.F90
@@ -29,25 +29,39 @@ END SUBROUTINE update_ghosts
 
 
 !Communicate p+1, p+2 moments to left neighboor and p-1, p-2 moments to right one
+! [a b|C D|e f] : proc n has moments a to f where a,b,e,f are ghosts
+!
+!proc 0: [0 1 2 3 4|5 6]
+!               V V ^ ^ 
+!proc 1:       [3 4|5 6 7 8|9 10]
+!                       V V ^  ^
+!proc 2:               [7 8|9 10 11 12|13 14]
+!                                 V  V  ^  ^        
+!proc 3:                        [11 12|13 14 15 16|17 18]
+!                                                   ^  ^
+!Closure by zero truncation :                       0  0
 SUBROUTINE update_ghosts_p_e
 
     IMPLICIT NONE
     count = (ijeg_e-ijsg_e+1)*(ikxe-ikxs+1)*(ikye-ikys+1)*(ize-izs+1)
 
     !!!!!!!!!!! Send ghost to right neighbour !!!!!!!!!!!!!!!!!!!!!!
-    CALL mpi_sendrecv(moments_e(ipe_e  ,ijsg_e:ijeg_e,ikxs:ikxe,ikys:ikye,izs:ize,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_R, 10, &
-                      moments_e(ips_e-1,ijsg_e:ijeg_e,ikxs:ikxe,ikys:ikye,izs:ize,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_L, 10, &
+    ! Send the last local moment to fill the -1 neighbour ghost
+    CALL mpi_sendrecv(moments_e(ipe_e  ,ijsg_e:ijeg_e,ikxs:ikxe,ikys:ikye,izs:ize,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_R, 10, & ! Send to right
+                      moments_e(ips_e-1,ijsg_e:ijeg_e,ikxs:ikxe,ikys:ikye,izs:ize,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_L, 10, & ! Recieve from left
                       comm0, status, ierr)
-    CALL mpi_sendrecv(moments_e(ipe_e-1,ijsg_e:ijeg_e,ikxs:ikxe,ikys:ikye,izs:ize,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_R, 11, &
-                      moments_e(ips_e-2,ijsg_e:ijeg_e,ikxs:ikxe,ikys:ikye,izs:ize,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_L, 11, &
+    IF (deltape .EQ. 1) & ! If we have odd Hermite degrees we need a 2nd order stencil
+    CALL mpi_sendrecv(moments_e(ipe_e-1,ijsg_e:ijeg_e,ikxs:ikxe,ikys:ikye,izs:ize,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_R, 11, & ! Send to right
+                      moments_e(ips_e-2,ijsg_e:ijeg_e,ikxs:ikxe,ikys:ikye,izs:ize,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_L, 11, & ! Recieve from left
                       comm0, status, ierr)
 
     !!!!!!!!!!! Send ghost to left neighbour !!!!!!!!!!!!!!!!!!!!!!
-    CALL mpi_sendrecv(moments_e(ips_e  ,ijsg_e:ijeg_e,ikxs:ikxe,ikys:ikye,izs:ize,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_L, 12, &
-                      moments_e(ipe_e+1,ijsg_e:ijeg_e,ikxs:ikxe,ikys:ikye,izs:ize,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_R, 12, &
+    CALL mpi_sendrecv(moments_e(ips_e  ,ijsg_e:ijeg_e,ikxs:ikxe,ikys:ikye,izs:ize,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_L, 12, & ! Send to left
+                      moments_e(ipe_e+1,ijsg_e:ijeg_e,ikxs:ikxe,ikys:ikye,izs:ize,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_R, 12, & ! Recieve from right
                       comm0, status, ierr)
-    CALL mpi_sendrecv(moments_e(ips_e+1,ijsg_e:ijeg_e,ikxs:ikxe,ikys:ikye,izs:ize,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_L, 13, &
-                      moments_e(ipe_e+2,ijsg_e:ijeg_e,ikxs:ikxe,ikys:ikye,izs:ize,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_R, 13, &
+    IF (deltape .EQ. 1) & ! If we have odd Hermite degrees we need a 2nd order stencil
+    CALL mpi_sendrecv(moments_e(ips_e+1,ijsg_e:ijeg_e,ikxs:ikxe,ikys:ikye,izs:ize,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_L, 13, & ! Send to left
+                      moments_e(ipe_e+2,ijsg_e:ijeg_e,ikxs:ikxe,ikys:ikye,izs:ize,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_R, 13, & ! Recieve from right
                       comm0, status, ierr)
 
 END SUBROUTINE update_ghosts_p_e
@@ -63,6 +77,7 @@ SUBROUTINE update_ghosts_p_i
     CALL mpi_sendrecv(moments_i(ipe_i  ,ijsg_i:ijeg_i,ikxs:ikxe,ikys:ikye,izs:ize,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_R, 14, &
                       moments_i(ips_i-1,ijsg_i:ijeg_i,ikxs:ikxe,ikys:ikye,izs:ize,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_L, 14, &
                       comm0, status, ierr)
+    IF (deltapi .EQ. 1) & ! If we have odd Hermite degrees we need a 2nd order stencil
     CALL mpi_sendrecv(moments_i(ipe_i-1,ijsg_i:ijeg_i,ikxs:ikxe,ikys:ikye,izs:ize,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_R, 15, &
                       moments_i(ips_i-2,ijsg_i:ijeg_i,ikxs:ikxe,ikys:ikye,izs:ize,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_L, 15, &
                       comm0, status, ierr)
@@ -72,6 +87,7 @@ SUBROUTINE update_ghosts_p_i
     CALL mpi_sendrecv(moments_i(ips_i  ,ijsg_i:ijeg_i,ikxs:ikxe,ikys:ikye,izs:ize,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_L, 16, &
                       moments_i(ipe_i+1,ijsg_i:ijeg_i,ikxs:ikxe,ikys:ikye,izs:ize,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_R, 16, &
                       comm0, status, ierr)
+    IF (deltapi .EQ. 1) & ! If we have odd Hermite degrees we need a 2nd order stencil
     CALL mpi_sendrecv(moments_i(ips_i+1,ijsg_i:ijeg_i,ikxs:ikxe,ikys:ikye,izs:ize,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_L, 17, &
                       moments_i(ipe_i+2,ijsg_i:ijeg_i,ikxs:ikxe,ikys:ikye,izs:ize,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_R, 17, &
                       comm0, status, ierr)
diff --git a/src/grid_mod.F90 b/src/grid_mod.F90
index 13ebf0e2b91e6166d67924d4c5480ec5529ba3e0..312b52d0dee2367599db28a50fc8b16a5c7ab135 100644
--- a/src/grid_mod.F90
+++ b/src/grid_mod.F90
@@ -11,7 +11,9 @@ MODULE grid
   INTEGER,  PUBLIC, PROTECTED :: jmaxe = 1      ! The maximal electron Laguerre-moment computed
   INTEGER,  PUBLIC, PROTECTED :: pmaxi = 1      ! The maximal ion Hermite-moment computed
   INTEGER,  PUBLIC, PROTECTED :: jmaxi = 1      ! The maximal ion Laguerre-moment computed
-  INTEGER,  PUBLIC, PROTECTED :: maxj  = 1      ! The maximal ion Laguerre-moment computed
+  INTEGER,  PUBLIC, PROTECTED :: maxj  = 1      ! The maximal Laguerre-moment
+  INTEGER,  PUBLIC, PROTECTED :: dmaxe = 1      ! The maximal full GF set of e-moments v^dmax
+  INTEGER,  PUBLIC, PROTECTED :: dmaxi = 1      ! The maximal full GF set of i-moments v^dmax
   INTEGER,  PUBLIC, PROTECTED :: Nx    = 16     ! Number of total internal grid points in x
   REAL(dp), PUBLIC, PROTECTED :: Lx    = 1._dp  ! horizontal length of the spatial box
   INTEGER,  PUBLIC, PROTECTED :: Ny    = 16     ! Number of total internal grid points in y
@@ -37,7 +39,7 @@ MODULE grid
   ! Grids containing position in physical space
   REAL(dp), DIMENSION(:), ALLOCATABLE, PUBLIC :: xarray
   REAL(dp), DIMENSION(:), ALLOCATABLE, PUBLIC :: yarray
-  REAL(dp), DIMENSION(:), ALLOCATABLE, PUBLIC :: zarray
+  REAL(dp), DIMENSION(:), ALLOCATABLE, PUBLIC :: zarray, zarray_full
   REAL(dp), PUBLIC, PROTECTED ::  deltax,  deltay, deltaz
   INTEGER,  PUBLIC, PROTECTED  ::  ixs,  ixe,  iys,  iye, izs, ize
   INTEGER,  PUBLIC :: ir,iz ! counters
@@ -45,13 +47,14 @@ MODULE grid
   integer(C_INTPTR_T), PUBLIC :: local_nkx_offset, local_nky_offset
   INTEGER,             PUBLIC :: local_nkp
   INTEGER,             PUBLIC :: local_np_e, local_np_i
+  INTEGER,             PUBLIC :: total_np_e, total_np_i
   integer(C_INTPTR_T), PUBLIC :: local_np_e_offset, local_np_i_offset
   INTEGER, DIMENSION(:), ALLOCATABLE, PUBLIC :: counts_np_e, counts_np_i
   INTEGER, DIMENSION(:), ALLOCATABLE, PUBLIC :: displs_np_e, displs_np_i
 
   ! Grids containing position in fourier space
-  REAL(dp), DIMENSION(:),   ALLOCATABLE, PUBLIC :: kxarray
-  REAL(dp), DIMENSION(:),   ALLOCATABLE, PUBLIC :: kyarray
+  REAL(dp), DIMENSION(:),   ALLOCATABLE, PUBLIC :: kxarray, kxarray_full
+  REAL(dp), DIMENSION(:),   ALLOCATABLE, PUBLIC :: kyarray, kyarray_full
   REAL(dp), DIMENSION(:),   ALLOCATABLE, PUBLIC :: kparray     ! kperp array
   REAL(dp), PUBLIC, PROTECTED ::  deltakx, deltaky, kx_max, ky_max, kp_max
   INTEGER,  PUBLIC, PROTECTED ::  ikxs, ikxe, ikys, ikye, ikps, ikpe
@@ -61,14 +64,16 @@ MODULE grid
   LOGICAL,  PUBLIC, PROTECTED :: contains_kxmax = .false. ! rank of the proc containing kx=max indices
 
   ! Grid containing the polynomials degrees
-  INTEGER,  DIMENSION(:), ALLOCATABLE, PUBLIC :: parray_e
-  INTEGER,  DIMENSION(:), ALLOCATABLE, PUBLIC :: parray_i
-  INTEGER,  DIMENSION(:), ALLOCATABLE, PUBLIC :: jarray_e
-  INTEGER,  DIMENSION(:), ALLOCATABLE, PUBLIC :: jarray_i
+  INTEGER,  DIMENSION(:), ALLOCATABLE, PUBLIC :: parray_e, parray_e_full
+  INTEGER,  DIMENSION(:), ALLOCATABLE, PUBLIC :: parray_i, parray_i_full
+  INTEGER,  DIMENSION(:), ALLOCATABLE, PUBLIC :: jarray_e, jarray_e_full
+  INTEGER,  DIMENSION(:), ALLOCATABLE, PUBLIC :: jarray_i, jarray_i_full
   INTEGER, PUBLIC, PROTECTED ::  ips_e,ipe_e, ijs_e,ije_e ! Start and end indices for pol. deg.
   INTEGER, PUBLIC, PROTECTED ::  ips_i,ipe_i, ijs_i,ije_i
   INTEGER, PUBLIC, PROTECTED ::  ipsg_e,ipeg_e, ijsg_e,ijeg_e ! Ghosts start and end indices
   INTEGER, PUBLIC, PROTECTED ::  ipsg_i,ipeg_i, ijsg_i,ijeg_i
+  INTEGER, PUBLIC, PROTECTED ::  deltape, ip0_e, ip1_e, ip2_e ! Pgrid spacing and moment 0,1,2 index
+  INTEGER, PUBLIC, PROTECTED ::  deltapi, ip0_i, ip1_i, ip2_i
 
   ! Public Functions
   PUBLIC :: init_1Dgrid_distr
@@ -93,17 +98,30 @@ CONTAINS
                     Nx,  Lx,  Ny,  Ly, Nz, q0, shear, eps
     READ(lu_in,grid)
 
+    !! Compute the maximal degree of full GF moments set
+    !   i.e. : all moments N_a^pj s.t. p+2j<=d are simulated (see GF closure)
+    dmaxe = min(pmaxe,2*jmaxe+1)
+    dmaxi = min(pmaxi,2*jmaxi+1)
+
+    ! If no parallel dim (Nz=1), the moment hierarchy is separable between odds and even P
+    !! and since the energy is injected in P=0 and P=2 for density/temperature gradients
+    !! there is no need of simulating the odd p which will only be damped.
+    !! We define in this case a grid Parray = 0,2,4,...,Pmax i.e. deltap = 2 instead of 1
+    !! to spare computation
+    IF(Nz .EQ. 1) THEN
+      deltape = 2; deltapi = 2;
+    ELSE
+      deltape = 1; deltapi = 1;
+    ENDIF
+
   END SUBROUTINE grid_readinputs
 
   SUBROUTINE init_1Dgrid_distr
-
     ! write(*,*) Nx
     local_nkx        = (Nx/2+1)/num_procs_kx
     ! write(*,*) local_nkx
     local_nkx_offset = rank_kx*local_nkx
-
     if (rank_kx .EQ. num_procs_kx-1) local_nkx = (Nx/2+1)-local_nkx_offset
-
   END SUBROUTINE init_1Dgrid_distr
 
   SUBROUTINE set_pgrid
@@ -111,9 +129,19 @@ CONTAINS
     IMPLICIT NONE
     INTEGER :: ip, istart, iend, in
 
+    ! Total number of Hermite polynomials we will evolve
+    total_np_e = (Pmaxe/deltape) + 1
+    total_np_i = (Pmaxi/deltapi) + 1
+    ! Build the full grids on process 0 to diagnose it without comm
+    ALLOCATE(parray_e_full(1:total_np_e))
+    ALLOCATE(parray_i_full(1:total_np_i))
+    ! P
+    DO ip = 1,total_np_e; parray_e_full(ip) = (ip-1)*deltape; END DO
+    DO ip = 1,total_np_i; parray_i_full(ip) = (ip-1)*deltapi; END DO
+    !! Parallel data distribution
     ! Local data distribution
-    CALL decomp1D(pmaxe+1, num_procs_p, rank_p, ips_e, ipe_e)
-    CALL decomp1D(pmaxi+1, num_procs_p, rank_p, ips_i, ipe_i)
+    CALL decomp1D(total_np_e, num_procs_p, rank_p, ips_e, ipe_e)
+    CALL decomp1D(total_np_i, num_procs_p, rank_p, ips_i, ipe_i)
     local_np_e = ipe_e - ips_e + 1
     local_np_i = ipe_i - ips_i + 1
     ! List of shift and local numbers between the different processes (used in scatterv and gatherv)
@@ -122,27 +150,38 @@ CONTAINS
     ALLOCATE(displs_np_e (1:num_procs_p))
     ALLOCATE(displs_np_i (1:num_procs_p))
     DO in = 0,num_procs_p-1
-      CALL decomp1D(pmaxe+1, num_procs_p, in, istart, iend)
+      CALL decomp1D(total_np_e, num_procs_p, in, istart, iend)
       counts_np_e(in+1) = iend-istart+1
       displs_np_e(in+1) = istart-1
-      CALL decomp1D(pmaxi+1, num_procs_p, in, istart, iend)
+      CALL decomp1D(total_np_i, num_procs_p, in, istart, iend)
       counts_np_i(in+1) = iend-istart+1
       displs_np_i(in+1) = istart-1
-    !DGGK operator uses moments at index p=2 (ip=3) for the p=0 term so the
-    ! process that contains ip=1 MUST contain ip=3 as well for both e and i.
-    IF(((ips_e .EQ. 1) .OR. (ips_i .EQ. 1)) .AND. ((ipe_e .LT. 3) .OR. (ipe_i .LT. 3)))&
-     WRITE(*,*) "Warning : distribution along p may not work with DGGK"
     ENDDO
 
     ! local grid computation
     ALLOCATE(parray_e(ips_e:ipe_e))
     ALLOCATE(parray_i(ips_i:ipe_i))
-    DO ip = ips_e,ipe_e; parray_e(ip) = (ip-1); END DO
-    DO ip = ips_i,ipe_i; parray_i(ip) = (ip-1); END DO
+    DO ip = ips_e,ipe_e
+      parray_e(ip) = (ip-1)*deltape
+      ! Storing indices of particular degrees for DG and fluid moments computations
+      IF(parray_e(ip) .EQ. 0) ip0_e = ip
+      IF(parray_e(ip) .EQ. 1) ip1_e = ip
+      IF(parray_e(ip) .EQ. 2) ip2_e = ip
+    END DO
+    DO ip = ips_i,ipe_i
+      parray_i(ip) = (ip-1)*deltapi
+      IF(parray_i(ip) .EQ. 0) ip0_i = ip
+      IF(parray_i(ip) .EQ. 1) ip1_i = ip
+      IF(parray_i(ip) .EQ. 2) ip2_i = ip
+    END DO
+    !DGGK operator uses moments at index p=2 (ip=3) for the p=0 term so the
+    ! process that contains ip=1 MUST contain ip=3 as well for both e and i.
+    IF(((ips_e .EQ. ip0_e) .OR. (ips_i .EQ. ip0_e)) .AND. ((ipe_e .LT. ip2_e) .OR. (ipe_i .LT. ip2_i)))&
+     WRITE(*,*) "Warning : distribution along p may not work with DGGK"
 
     ! Ghosts boundaries
-    ipsg_e = ips_e - 2; ipeg_e = ipe_e + 2;
-    ipsg_i = ips_i - 2; ipeg_i = ipe_i + 2;
+    ipsg_e = ips_e - 2/deltape; ipeg_e = ipe_e + 2/deltape;
+    ipsg_i = ips_i - 2/deltapi; ipeg_i = ipe_i + 2/deltapi;
     ! Precomputations
     pmaxe_dp   = real(pmaxe,dp)
     pmaxi_dp   = real(pmaxi,dp)
@@ -153,6 +192,13 @@ CONTAINS
     IMPLICIT NONE
     INTEGER :: ij
 
+    ! Build the full grids on process 0 to diagnose it without comm
+    ALLOCATE(jarray_e_full(1:jmaxe+1))
+    ALLOCATE(jarray_i_full(1:jmaxi+1))
+    ! J
+    DO ij = 1,jmaxe+1; jarray_e_full(ij) = (ij-1); END DO
+    DO ij = 1,jmaxi+1; jarray_i_full(ij) = (ij-1); END DO
+    ! Local data
     ijs_e = 1; ije_e = jmaxe + 1
     ijs_i = 1; ije_i = jmaxi + 1
     ALLOCATE(jarray_e(ijs_e:ije_e))
@@ -178,20 +224,27 @@ CONTAINS
     INTEGER :: i_
 
     Nkx = Nx/2+1 ! Defined only on positive kx since fields are real
-    ! Start and END indices of grid
-    ikxs = local_nkx_offset + 1
-    ikxe = ikxs + local_nkx - 1
-    ALLOCATE(kxarray(ikxs:ikxe))
-
     ! Grid spacings
     IF (Lx .EQ. 0) THEN
       deltakx = 1._dp
       kx_max  = 0._dp
     ELSE
       deltakx = 2._dp*PI/Lx
-      kx_max  = (Nx/2+1)*deltakx
+      kx_max  = Nkx*deltakx
     ENDIF
 
+    ! Build the full grids on process 0 to diagnose it without comm
+    ALLOCATE(kxarray_full(1:Nkx))
+    DO ikx = 1,Nkx
+     kxarray_full(ikx) = REAL(ikx-1,dp) * deltakx
+    END DO
+
+    !! Parallel distribution
+    ! Start and END indices of grid
+    ikxs = local_nkx_offset + 1
+    ikxe = ikxs + local_nkx - 1
+    ALLOCATE(kxarray(ikxs:ikxe))
+
     ! Creating a grid ordered as dk*(0 1 2 3)
     DO ikx = ikxs,ikxe
       kxarray(ikx) = REAL(ikx-1,dp) * deltakx
@@ -226,11 +279,12 @@ CONTAINS
     INTEGER :: i_, counter
 
     Nky = Ny;
+    ALLOCATE(kyarray_full(1:Nky))
+    ! Local data
     ! Start and END indices of grid
     ikys = 1
     ikye = Nky
     ALLOCATE(kyarray(ikys:ikye))
-
     IF (Ly .EQ. 0) THEN ! 1D linear case
       deltaky    = 1._dp
       kyarray(1) = 0
@@ -249,7 +303,16 @@ CONTAINS
         IF (kyarray(ikx) .EQ. ky_max) ikx_max = ikx
       END DO
     ENDIF
-
+    ! Build the full grids on process 0 to diagnose it without comm
+    ! ky
+    IF (Nky .GT. 1) THEN
+     DO iky = 1,Nky
+       kyarray_full(iky) = deltaky*(MODULO(iky-1,Nky/2)-Nky/2*FLOOR(2.*real(iky-1)/real(Nky)))
+       IF (iky .EQ. Ny/2+1) kyarray_full(iky) = -kyarray_full(iky)
+     END DO
+    ELSE
+     kyarray_full(1) =  0
+    ENDIF
     ! Orszag 2/3 filter
     two_third_kymax = 2._dp/3._dp*deltaky*(Nky/2-1);
     ALLOCATE(AA_y(ikys:ikye))
@@ -288,6 +351,16 @@ CONTAINS
     USE prec_const
     IMPLICIT NONE
     INTEGER :: i_
+    ! Build the full grids on process 0 to diagnose it without comm
+    ALLOCATE(zarray_full(1:Nz))
+    ! z
+    IF (Nz .GT. 1) THEN
+      DO iz = 1,Nz
+        zarray_full(iz) = deltaz*(iz-1)
+      END DO
+    ELSE
+      zarray_full(1) =  0
+    ENDIF
     ! Start and END indices of grid
     izs = 1
     ize = Nz
diff --git a/src/poisson.F90 b/src/poisson.F90
index 00ad9de7ad31981141db5a8fcefc4431590d5275..f3bd42d03a5f634d53fcd4131fe4d6cf1c336342 100644
--- a/src/poisson.F90
+++ b/src/poisson.F90
@@ -23,7 +23,7 @@ SUBROUTINE poisson
   COMPLEX(dp) :: buffer(ikxs:ikxe,ikys:ikye)
 
   !! Poisson can be solved only for process containing ip=1
-  IF ( (ips_e .EQ. 1) .AND. (ips_i .EQ. 1) ) THEN
+  IF ( (ips_e .EQ. ip0_e) .AND. (ips_i .EQ. ip0_i) ) THEN
 
     ! Execution time start
     CALL cpu_time(t0_poisson)
@@ -38,7 +38,7 @@ SUBROUTINE poisson
           ! loop over n only if the max polynomial degree
           DO ine=1,jmaxe+1 ! ine = n+1
             Kne = kernel_e(ine,ikx,iky,iz)
-            sum_kernel_mom_e  = sum_kernel_mom_e  + Kne * moments_e(1, ine, ikx, iky, iz, updatetlevel)
+            sum_kernel_mom_e  = sum_kernel_mom_e  + Kne * moments_e(ip0_e, ine, ikx, iky, iz, updatetlevel)
             sum_kernel2_e     = sum_kernel2_e     + Kne**2 ! ... sum recursively ...
           END DO
 
@@ -48,7 +48,7 @@ SUBROUTINE poisson
             ! loop over n only if the max polynomial degree
             DO ini=1,jmaxi+1
               Kni = kernel_i(ini,ikx,iky,iz)
-              sum_kernel_mom_i  = sum_kernel_mom_i  + Kni * moments_i(1, ini, ikx, iky, iz, updatetlevel)
+              sum_kernel_mom_i  = sum_kernel_mom_i  + Kni * moments_i(ip0_i, ini, ikx, iky, iz, updatetlevel)
               sum_kernel2_i     = sum_kernel2_i     + Kni**2 ! ... sum recursively ...
             END DO
 
diff --git a/src/processing_mod.F90 b/src/processing_mod.F90
index c7b2927e8abd7de72de4e1c102dba54645d46d09..09f164bd9865ba2620f1e1c999d2ab486243522e 100644
--- a/src/processing_mod.F90
+++ b/src/processing_mod.F90
@@ -32,10 +32,10 @@ SUBROUTINE compute_radial_ion_transport
               ky_ = kyarray(iky)
               DO ikx = ikxs,ikxe
                   gflux_local = gflux_local + &
-                      imagu * ky_ * moments_i(1,1,ikx,iky,iz,updatetlevel) * CONJG(phi(ikx,iky,iz))
+                      imagu * ky_ * moments_i(ip0_i,1,ikx,iky,iz,updatetlevel) * CONJG(phi(ikx,iky,iz))
                   DO ij = ijs_i, ije_i
                       pflux_local = pflux_local + &
-                          imagu * ky_ * kernel_i(ij,ikx,iky,iz) * moments_i(1,ij,ikx,iky,iz,updatetlevel) * CONJG(phi(ikx,iky,iz))
+                          imagu * ky_ * kernel_i(ij,ikx,iky,iz) * moments_i(ip0_i,ij,ikx,iky,iz,updatetlevel) * CONJG(phi(ikx,iky,iz))
                   ENDDO
               ENDDO
           ENDDO
@@ -86,17 +86,20 @@ SUBROUTINE compute_radial_heatflux
             DO ikx = ikxs,ikxe
               DO ij = ijs_i, ije_i
                 j_dp = REAL(ij-1,dp)
-                hflux_local = hflux_local + imagu*ky_*CONJG(phi(ikx,iky,iz))&
+                hflux_local = hflux_local - imagu*ky_*CONJG(phi(ikx,iky,iz))&
                  *(2._dp/3._dp * (2._dp*j_dp*kernel_i(ij,ikx,iky,iz) - (j_dp+1)*kernel_i(ij+1,ikx,iky,iz) - j_dp*kernel_i(ij-1,ikx,iky,iz))&
-                 * (moments_i(1,ij,ikx,iky,iz,updatetlevel)+q_i/tau_i*kernel_i(ij,ikx,iky,iz)*phi(ikx,iky,iz)) &
-                + SQRT2/3._dp * kernel_i(ij,ikx,iky,iz) * moments_i(3,ij,ikx,iky,iz,updatetlevel))
+                 * (moments_i(ip0_i,ij,ikx,iky,iz,updatetlevel)+q_i/tau_i*kernel_i(ij,ikx,iky,iz)*phi(ikx,iky,iz)) &
+                + SQRT2/3._dp * kernel_i(ij,ikx,iky,iz) * moments_i(ip2_i,ij,ikx,iky,iz,updatetlevel))
               ENDDO
               DO ij = ijs_e, ije_e
                 j_dp = REAL(ij-1,dp)
-                hflux_local = hflux_local + imagu*ky_*CONJG(phi(ikx,iky,iz))&
-                 *(2._dp/3._dp * (2._dp*j_dp*kernel_e(ij,ikx,iky,iz) - (j_dp+1)*kernel_e(ij+1,ikx,iky,iz) - j_dp*kernel_e(ij-1,ikx,iky,iz))&
-                 * (moments_e(1,ij,ikx,iky,iz,updatetlevel)+q_e/tau_e*kernel_e(ij,ikx,iky,iz)*phi(ikx,iky,iz)) &
-                + SQRT2/3._dp * kernel_e(ij,ikx,iky,iz) * moments_e(3,ij,ikx,iky,iz,updatetlevel))
+                hflux_local = hflux_local - imagu*ky_*CONJG(phi(ikx,iky,iz))&
+                 *(2._dp/3._dp *(2._dp*j_dp * kernel_e(  ij,ikx,iky,iz) &
+                                  -(j_dp+1)  * kernel_e(ij+1,ikx,iky,iz) &
+                                  -j_dp      * kernel_e(ij-1,ikx,iky,iz))&
+                               *(moments_e(ip0_e,ij,ikx,iky,iz,updatetlevel)&
+                                  +q_e/tau_e * kernel_e(  ij,ikx,iky,iz) * phi(ikx,iky,iz)) &
+                  +SQRT2/3._dp * kernel_e(ij,ikx,iky,iz) *                                                                                                     moments_e(ip2_e,ij,ikx,iky,iz,updatetlevel))
               ENDDO
             ENDDO
           ENDDO
@@ -140,13 +143,13 @@ SUBROUTINE compute_density
             dens_e(ikx,iky,iz) = 0._dp
             DO ij = ijs_e, ije_e
                 dens_e(ikx,iky,iz) = dens_e(ikx,iky,iz) + kernel_e(ij,ikx,iky,iz) * &
-                 (moments_e(1,ij,ikx,iky,iz,updatetlevel)+q_e/tau_e*kernel_e(ij,ikx,iky,iz)*phi(ikx,iky,iz))
+                 (moments_e(ip0_e,ij,ikx,iky,iz,updatetlevel)+q_e/tau_e*kernel_e(ij,ikx,iky,iz)*phi(ikx,iky,iz))
             ENDDO
             ! ion density
             dens_i(ikx,iky,iz) = 0._dp
             DO ij = ijs_i, ije_i
                 dens_i(ikx,iky,iz) = dens_i(ikx,iky,iz) + kernel_i(ij,ikx,iky,iz) * &
-                (moments_i(1,ij,ikx,iky,iz,updatetlevel)+q_i/tau_i*kernel_i(ij,ikx,iky,iz)*phi(ikx,iky,iz))
+                (moments_i(ip0_i,ij,ikx,iky,iz,updatetlevel)+q_i/tau_i*kernel_i(ij,ikx,iky,iz)*phi(ikx,iky,iz))
             ENDDO
           ENDDO
         ENDDO
@@ -177,8 +180,8 @@ SUBROUTINE compute_temperature
               j_dp = REAL(ij-1,dp)
               temp_e(ikx,iky,iz) = temp_e(ikx,iky,iz) + &
                 2._dp/3._dp * (2._dp*j_dp*kernel_e(ij,ikx,iky,iz) - (j_dp+1)*kernel_e(ij+1,ikx,iky,iz) - j_dp*kernel_e(ij-1,ikx,iky,iz))&
-                 * (moments_e(1,ij,ikx,iky,iz,updatetlevel)+q_e/tau_e*kernel_e(ij,ikx,iky,iz)*phi(ikx,iky,iz)) &
-                + SQRT2/3._dp * kernel_e(ij,ikx,iky,iz) * moments_e(3,ij,ikx,iky,iz,updatetlevel)
+                 * (moments_e(ip0_e,ij,ikx,iky,iz,updatetlevel)+q_e/tau_e*kernel_e(ij,ikx,iky,iz)*phi(ikx,iky,iz)) &
+                + SQRT2/3._dp * kernel_e(ij,ikx,iky,iz) * moments_e(ip2_e,ij,ikx,iky,iz,updatetlevel)
             ENDDO
 
             ! ion temperature
@@ -187,8 +190,8 @@ SUBROUTINE compute_temperature
               j_dp = REAL(ij-1,dp)
               temp_i(ikx,iky,iz) = temp_i(ikx,iky,iz) + &
                 2._dp/3._dp * (2._dp*j_dp*kernel_i(ij,ikx,iky,iz) - (j_dp+1)*kernel_i(ij+1,ikx,iky,iz) - j_dp*kernel_i(ij-1,ikx,iky,iz))&
-                 * (moments_i(1,ij,ikx,iky,iz,updatetlevel)+q_i/tau_i*kernel_i(ij,ikx,iky,iz)*phi(ikx,iky,iz)) &
-                + SQRT2/3._dp * kernel_i(ij,ikx,iky,iz) * moments_i(3,ij,ikx,iky,iz,updatetlevel)
+                 * (moments_i(ip0_i,ij,ikx,iky,iz,updatetlevel)+q_i/tau_i*kernel_i(ij,ikx,iky,iz)*phi(ikx,iky,iz)) &
+                + SQRT2/3._dp * kernel_i(ij,ikx,iky,iz) * moments_i(ip2_i,ij,ikx,iky,iz,updatetlevel)
             ENDDO
           ENDDO
         ENDDO