xref: /petsc/src/ts/utils/dmplexlandau/plexland.c (revision b3f4e4a4e369e0a32051200ee43ca889ec91523f)
1 #include <../src/mat/impls/aij/seq/aij.h>
2 #include <petsc/private/dmpleximpl.h>   /*I "petscdmplex.h" I*/
3 #include <petsclandau.h>                /*I "petsclandau.h"   I*/
4 #include <petscts.h>
5 #include <petscdmforest.h>
6 #include <petscdmcomposite.h>
7 
8 /* Landau collision operator */
9 
10 /* relativistic terms */
11 #if defined(PETSC_USE_REAL_SINGLE)
12 #define SPEED_OF_LIGHT 2.99792458e8F
13 #define C_0(v0) (SPEED_OF_LIGHT/v0) /* needed for relativistic tensor on all architectures */
14 #else
15 #define SPEED_OF_LIGHT 2.99792458e8
16 #define C_0(v0) (SPEED_OF_LIGHT/v0) /* needed for relativistic tensor on all architectures */
17 #endif
18 
19 #define PETSC_THREAD_SYNC
20 #include "land_tensors.h"
21 
22 #if defined(PETSC_HAVE_OPENMP)
23 #include <omp.h>
24 #endif
25 
26 /* vector padding not supported */
27 #define LANDAU_VL  1
28 
29 static PetscErrorCode LandauMatMult(Mat A, Vec x, Vec y)
30 {
31   PetscErrorCode  ierr;
32   LandauCtx       *ctx;
33   PetscContainer  container;
34 
35   PetscFunctionBegin;
36   ierr = PetscObjectQuery((PetscObject) A, "LandauCtx", (PetscObject *) &container);CHKERRQ(ierr);
37   if (container) {
38     ierr = PetscContainerGetPointer(container, (void **) &ctx);CHKERRQ(ierr);
39     ierr = VecScatterBegin(ctx->plex_batch,x,ctx->work_vec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
40     ierr = VecScatterEnd(ctx->plex_batch,x,ctx->work_vec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
41     ierr = (*ctx->seqaij_mult)(A,ctx->work_vec,y);CHKERRQ(ierr);
42     ierr = VecCopy(y, ctx->work_vec);CHKERRQ(ierr);
43     ierr = VecScatterBegin(ctx->plex_batch,ctx->work_vec,y,INSERT_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
44     ierr = VecScatterEnd(ctx->plex_batch,ctx->work_vec,y,INSERT_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
45     PetscFunctionReturn(0);
46   }
47   ierr = MatMult(A,x,y);CHKERRQ(ierr);
48   PetscFunctionReturn(0);
49 }
50 
51 // Computes v3 = v2 + A * v1.
52 static PetscErrorCode LandauMatMultAdd(Mat A,Vec v1,Vec v2,Vec v3)
53 {
54   PetscErrorCode  ierr;
55 
56   PetscFunctionBegin;
57   SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "?????");
58   ierr = LandauMatMult(A,v1,v3);CHKERRQ(ierr);
59   ierr = VecAYPX(v3,1,v2);CHKERRQ(ierr);
60   PetscFunctionReturn(0);
61 }
62 
63 static PetscErrorCode LandauMatMultTranspose(Mat A, Vec x, Vec y)
64 {
65   PetscErrorCode  ierr;
66   LandauCtx       *ctx;
67   PetscContainer  container;
68 
69   PetscFunctionBegin;
70   ierr = PetscObjectQuery((PetscObject) A, "LandauCtx", (PetscObject *) &container);CHKERRQ(ierr);
71   if (container) {
72     ierr = PetscContainerGetPointer(container, (void **) &ctx);CHKERRQ(ierr);
73     ierr = VecScatterBegin(ctx->plex_batch,x,ctx->work_vec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
74     ierr = VecScatterEnd(ctx->plex_batch,x,ctx->work_vec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
75     ierr = (*ctx->seqaij_multtranspose)(A,ctx->work_vec,y);CHKERRQ(ierr);
76     ierr = VecCopy(y, ctx->work_vec);CHKERRQ(ierr);
77     ierr = VecScatterBegin(ctx->plex_batch,ctx->work_vec,y,INSERT_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
78     ierr = VecScatterEnd(ctx->plex_batch,ctx->work_vec,y,INSERT_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
79     PetscFunctionReturn(0);
80   }
81   ierr = MatMultTranspose(A,x,y);CHKERRQ(ierr);
82   PetscFunctionReturn(0);
83 }
84 
85 static PetscErrorCode LandauMatGetDiagonal(Mat A,Vec x)
86 {
87   PetscErrorCode  ierr;
88   LandauCtx       *ctx;
89   PetscContainer  container;
90 
91   PetscFunctionBegin;
92   ierr = PetscObjectQuery((PetscObject) A, "LandauCtx", (PetscObject *) &container);CHKERRQ(ierr);
93   if (container) {
94     ierr = PetscContainerGetPointer(container, (void **) &ctx);CHKERRQ(ierr);
95     ierr = (*ctx->seqaij_getdiagonal)(A,ctx->work_vec);CHKERRQ(ierr);
96     ierr = VecScatterBegin(ctx->plex_batch,ctx->work_vec,x,INSERT_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
97     ierr = VecScatterEnd(ctx->plex_batch,ctx->work_vec,x,INSERT_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
98     PetscFunctionReturn(0);
99   }
100   ierr = MatGetDiagonal(A, x);CHKERRQ(ierr);
101   PetscFunctionReturn(0);
102 }
103 
104 static PetscErrorCode LandauGPUMapsDestroy(void *ptr)
105 {
106   P4estVertexMaps *maps = (P4estVertexMaps*)ptr;
107   PetscErrorCode  ierr;
108   PetscFunctionBegin;
109   // free device data
110   if (maps[0].deviceType != LANDAU_CPU) {
111 #if defined(PETSC_HAVE_KOKKOS_KERNELS)
112     if (maps[0].deviceType == LANDAU_KOKKOS) {
113       ierr = LandauKokkosDestroyMatMaps(maps,  maps[0].numgrids);CHKERRQ(ierr); // imples Kokkos does
114     } // else could be CUDA
115 #elif defined(PETSC_HAVE_CUDA)
116     if (maps[0].deviceType == LANDAU_CUDA) {
117       ierr = LandauCUDADestroyMatMaps(maps, maps[0].numgrids);CHKERRQ(ierr);
118     } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "maps->deviceType %" PetscInt_FMT " ?????",maps->deviceType);
119 #endif
120   }
121   // free host data
122   for (PetscInt grid=0 ; grid < maps[0].numgrids ; grid++) {
123     ierr = PetscFree(maps[grid].c_maps);CHKERRQ(ierr);
124     ierr = PetscFree(maps[grid].gIdx);CHKERRQ(ierr);
125   }
126   ierr = PetscFree(maps);CHKERRQ(ierr);
127 
128   PetscFunctionReturn(0);
129 }
130 static PetscErrorCode energy_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx)
131 {
132   PetscReal     v2 = 0;
133   PetscFunctionBegin;
134   /* compute v^2 / 2 */
135   for (int i = 0; i < dim; ++i) v2 += x[i]*x[i];
136   /* evaluate the Maxwellian */
137   u[0] = v2/2;
138   PetscFunctionReturn(0);
139 }
140 
141 /* needs double */
142 static PetscErrorCode gamma_m1_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx)
143 {
144   PetscReal     *c2_0_arr = ((PetscReal*)actx);
145   double        u2 = 0, c02 = (double)*c2_0_arr, xx;
146 
147   PetscFunctionBegin;
148   /* compute u^2 / 2 */
149   for (int i = 0; i < dim; ++i) u2 += x[i]*x[i];
150   /* gamma - 1 = g_eps, for conditioning and we only take derivatives */
151   xx = u2/c02;
152 #if defined(PETSC_USE_DEBUG)
153   u[0] = PetscSqrtReal(1. + xx);
154 #else
155   u[0] = xx/(PetscSqrtReal(1. + xx) + 1.) - 1.; // better conditioned. -1 might help condition and only used for derivative
156 #endif
157   PetscFunctionReturn(0);
158 }
159 
160 /*
161  LandauFormJacobian_Internal - Evaluates Jacobian matrix.
162 
163  Input Parameters:
164  .  globX - input vector
165  .  actx - optional user-defined context
166  .  dim - dimension
167 
168  Output Parameters:
169  .  J0acP - Jacobian matrix filled, not created
170  */
171 static PetscErrorCode LandauFormJacobian_Internal(Vec a_X, Mat JacP, const PetscInt dim, PetscReal shift, void *a_ctx)
172 {
173   LandauCtx         *ctx = (LandauCtx*)a_ctx;
174   PetscErrorCode    ierr;
175   PetscInt          numCells[LANDAU_MAX_GRIDS],Nq,Nb;
176   PetscQuadrature   quad;
177   PetscReal         Eq_m[LANDAU_MAX_SPECIES]; // could be static data w/o quench (ex2)
178   PetscScalar       *cellClosure=NULL;
179   const PetscScalar *xdata=NULL;
180   PetscDS           prob;
181   PetscContainer    container;
182   P4estVertexMaps   *maps;
183   Mat               subJ[LANDAU_MAX_GRIDS*LANDAU_MAX_BATCH_SZ];
184 
185   PetscFunctionBegin;
186   PetscValidHeaderSpecific(a_X,VEC_CLASSID,1);
187   PetscValidHeaderSpecific(JacP,MAT_CLASSID,2);
188   PetscValidPointer(ctx,5);
189   /* check for matrix container for GPU assembly. Support CPU assembly for debugging */
190   PetscCheckFalse(ctx->plex[0] == NULL,ctx->comm,PETSC_ERR_ARG_WRONG,"Plex not created");
191   ierr = PetscLogEventBegin(ctx->events[10],0,0,0,0);CHKERRQ(ierr);
192   ierr = DMGetDS(ctx->plex[0], &prob);CHKERRQ(ierr); // same DS for all grids
193   ierr = PetscObjectQuery((PetscObject) JacP, "assembly_maps", (PetscObject *) &container);CHKERRQ(ierr);
194   if (container) {
195     PetscCheck(ctx->gpu_assembly,ctx->comm,PETSC_ERR_ARG_WRONG,"maps but no GPU assembly");
196     ierr = PetscContainerGetPointer(container, (void **) &maps);CHKERRQ(ierr);
197     PetscCheck(maps,ctx->comm,PETSC_ERR_ARG_WRONG,"empty GPU matrix container");
198     for (PetscInt i=0;i<ctx->num_grids*ctx->batch_sz;i++) subJ[i] = NULL;
199   } else {
200     PetscCheck(!ctx->gpu_assembly,ctx->comm,PETSC_ERR_ARG_WRONG,"No maps but GPU assembly");
201     for (PetscInt tid=0 ; tid<ctx->batch_sz ; tid++) {
202       for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
203         ierr = DMCreateMatrix(ctx->plex[grid], &subJ[ LAND_PACK_IDX(tid,grid) ]);CHKERRQ(ierr);
204       }
205     }
206     maps = NULL;
207   }
208   // get dynamic data (Eq is odd, for quench and Spitzer test) for CPU assembly and raw data for Jacobian GPU assembly. Get host numCells[], Nq (yuck)
209   ierr = PetscFEGetQuadrature(ctx->fe[0], &quad);CHKERRQ(ierr);
210   ierr = PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, NULL);CHKERRQ(ierr); Nb = Nq;
211   PetscCheckFalse(Nq >LANDAU_MAX_NQ,ctx->comm,PETSC_ERR_ARG_WRONG,"Order too high. Nq = %" PetscInt_FMT " > LANDAU_MAX_NQ (%" PetscInt_FMT ")",Nq,LANDAU_MAX_NQ);
212   // get metadata for collecting dynamic data
213   for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
214     PetscInt cStart, cEnd;
215     PetscCheckFalse(ctx->plex[grid] == NULL,ctx->comm,PETSC_ERR_ARG_WRONG,"Plex not created");
216     ierr = DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd);CHKERRQ(ierr);
217     numCells[grid] = cEnd - cStart; // grids can have different topology
218   }
219   ierr = PetscLogEventEnd(ctx->events[10],0,0,0,0);CHKERRQ(ierr);
220   if (shift==0) { /* create dynamic point data: f_alpha for closure of each cell (cellClosure[nbatch,ngrids,ncells[g],f[Nb,ns[g]]]) or xdata */
221     DM pack;
222     ierr = VecGetDM(a_X, &pack);CHKERRQ(ierr);
223     PetscCheck(pack,PETSC_COMM_SELF, PETSC_ERR_PLIB, "pack has no DM");
224     ierr = PetscLogEventBegin(ctx->events[1],0,0,0,0);CHKERRQ(ierr);
225     ierr = MatZeroEntries(JacP);CHKERRQ(ierr);
226     for (PetscInt fieldA=0;fieldA<ctx->num_species;fieldA++) {
227       Eq_m[fieldA] = ctx->Ez * ctx->t_0 * ctx->charges[fieldA] / (ctx->v_0 * ctx->masses[fieldA]); /* normalize dimensionless */
228       if (dim==2) Eq_m[fieldA] *=  2 * PETSC_PI; /* add the 2pi term that is not in Landau */
229     }
230     if (!ctx->gpu_assembly) {
231       Vec          *locXArray,*globXArray;
232       PetscScalar  *cellClosure_it;
233       PetscInt     cellClosure_sz=0,nDMs,Nf[LANDAU_MAX_GRIDS];
234       PetscSection section[LANDAU_MAX_GRIDS],globsection[LANDAU_MAX_GRIDS];
235       for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
236         ierr = DMGetLocalSection(ctx->plex[grid], &section[grid]);CHKERRQ(ierr);
237         ierr = DMGetGlobalSection(ctx->plex[grid], &globsection[grid]);CHKERRQ(ierr);
238         ierr = PetscSectionGetNumFields(section[grid], &Nf[grid]);CHKERRQ(ierr);
239       }
240       /* count cellClosure size */
241       ierr = DMCompositeGetNumberDM(pack,&nDMs);CHKERRQ(ierr);
242       for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) cellClosure_sz += Nb*Nf[grid]*numCells[grid];
243       ierr = PetscMalloc1(cellClosure_sz*ctx->batch_sz,&cellClosure);CHKERRQ(ierr);
244       cellClosure_it = cellClosure;
245       ierr = PetscMalloc(sizeof(*locXArray)*nDMs, &locXArray);CHKERRQ(ierr);
246       ierr = PetscMalloc(sizeof(*globXArray)*nDMs, &globXArray);CHKERRQ(ierr);
247       ierr = DMCompositeGetLocalAccessArray(pack, a_X, nDMs, NULL, locXArray);CHKERRQ(ierr);
248       ierr = DMCompositeGetAccessArray(pack, a_X, nDMs, NULL, globXArray);CHKERRQ(ierr);
249       for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) { // OpenMP (once)
250         for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) {
251           Vec         locX = locXArray[ LAND_PACK_IDX(b_id,grid) ], globX = globXArray[ LAND_PACK_IDX(b_id,grid) ], locX2;
252           PetscInt    cStart, cEnd, ei;
253           ierr = VecDuplicate(locX,&locX2);CHKERRQ(ierr);
254           ierr = DMGlobalToLocalBegin(ctx->plex[grid], globX, INSERT_VALUES, locX2);CHKERRQ(ierr);
255           ierr = DMGlobalToLocalEnd  (ctx->plex[grid], globX, INSERT_VALUES, locX2);CHKERRQ(ierr);
256           ierr = DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd);CHKERRQ(ierr);
257           for (ei = cStart ; ei < cEnd; ++ei) {
258             PetscScalar *coef = NULL;
259             ierr = DMPlexVecGetClosure(ctx->plex[grid], section[grid], locX2, ei, NULL, &coef);CHKERRQ(ierr);
260             ierr = PetscMemcpy(cellClosure_it,coef,Nb*Nf[grid]*sizeof(*cellClosure_it));CHKERRQ(ierr); /* change if LandauIPReal != PetscScalar */
261             ierr = DMPlexVecRestoreClosure(ctx->plex[grid], section[grid], locX2, ei, NULL, &coef);CHKERRQ(ierr);
262             cellClosure_it += Nb*Nf[grid];
263           }
264           ierr = VecDestroy(&locX2);CHKERRQ(ierr);
265         }
266       }
267       PetscCheck(cellClosure_it-cellClosure == cellClosure_sz*ctx->batch_sz,PETSC_COMM_SELF, PETSC_ERR_PLIB, "iteration wrong %" PetscInt_FMT " != cellClosure_sz = %" PetscInt_FMT,cellClosure_it-cellClosure,cellClosure_sz*ctx->batch_sz);
268       ierr = DMCompositeRestoreLocalAccessArray(pack, a_X, nDMs, NULL, locXArray);CHKERRQ(ierr);
269       ierr = DMCompositeRestoreAccessArray(pack, a_X, nDMs, NULL, globXArray);CHKERRQ(ierr);
270       ierr = PetscFree(locXArray);CHKERRQ(ierr);
271       ierr = PetscFree(globXArray);CHKERRQ(ierr);
272       xdata = NULL;
273     } else {
274       PetscMemType mtype;
275       if (ctx->jacobian_field_major_order) { // get data in batch ordering
276         ierr = VecScatterBegin(ctx->plex_batch,a_X,ctx->work_vec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
277         ierr = VecScatterEnd(ctx->plex_batch,a_X,ctx->work_vec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
278         ierr = VecGetArrayReadAndMemType(ctx->work_vec,&xdata,&mtype);CHKERRQ(ierr);
279       } else {
280         ierr = VecGetArrayReadAndMemType(a_X,&xdata,&mtype);CHKERRQ(ierr);
281       }
282       if (mtype!=PETSC_MEMTYPE_HOST && ctx->deviceType == LANDAU_CPU) {
283         SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"CPU run with device data: use -mat_type aij");
284       }
285       cellClosure = NULL;
286     }
287     ierr = PetscLogEventEnd(ctx->events[1],0,0,0,0);CHKERRQ(ierr);
288   } else xdata = cellClosure = NULL;
289 
290   /* do it */
291   if (ctx->deviceType == LANDAU_CUDA || ctx->deviceType == LANDAU_KOKKOS) {
292     if (ctx->deviceType == LANDAU_CUDA) {
293 #if defined(PETSC_HAVE_CUDA)
294       ierr = LandauCUDAJacobian(ctx->plex,Nq,ctx->batch_sz,ctx->num_grids,numCells,Eq_m,cellClosure,xdata,&ctx->SData_d,shift,ctx->events,ctx->mat_offset, ctx->species_offset, subJ, JacP);CHKERRQ(ierr);
295 #else
296       SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","cuda");
297 #endif
298     } else if (ctx->deviceType == LANDAU_KOKKOS) {
299 #if defined(PETSC_HAVE_KOKKOS_KERNELS)
300       ierr = LandauKokkosJacobian(ctx->plex,Nq,ctx->batch_sz,ctx->num_grids,numCells,Eq_m,cellClosure,xdata,&ctx->SData_d,shift,ctx->events,ctx->mat_offset, ctx->species_offset, subJ,JacP);CHKERRQ(ierr);
301 #else
302       SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","kokkos");
303 #endif
304     }
305   } else {   /* CPU version */
306     PetscTabulation *Tf; // used for CPU and print info. Same on all grids and all species
307     PetscInt        ip_offset[LANDAU_MAX_GRIDS+1], ipf_offset[LANDAU_MAX_GRIDS+1], elem_offset[LANDAU_MAX_GRIDS+1],IPf_sz_glb,IPf_sz_tot,num_grids=ctx->num_grids,Nf[LANDAU_MAX_GRIDS];
308     PetscReal       *ff, *dudx, *dudy, *dudz, *invJ_a = (PetscReal*)ctx->SData_d.invJ, *xx = (PetscReal*)ctx->SData_d.x, *yy = (PetscReal*)ctx->SData_d.y, *zz = (PetscReal*)ctx->SData_d.z, *ww = (PetscReal*)ctx->SData_d.w;
309     PetscReal       Eq_m[LANDAU_MAX_SPECIES], invMass[LANDAU_MAX_SPECIES], nu_alpha[LANDAU_MAX_SPECIES], nu_beta[LANDAU_MAX_SPECIES];
310     PetscSection    section[LANDAU_MAX_GRIDS],globsection[LANDAU_MAX_GRIDS];
311     PetscScalar     *coo_vals=NULL;
312     for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
313       ierr = DMGetLocalSection(ctx->plex[grid], &section[grid]);CHKERRQ(ierr);
314       ierr = DMGetGlobalSection(ctx->plex[grid], &globsection[grid]);CHKERRQ(ierr);
315       ierr = PetscSectionGetNumFields(section[grid], &Nf[grid]);CHKERRQ(ierr);
316     }
317     /* count IPf size, etc */
318     ierr = PetscDSGetTabulation(prob, &Tf);CHKERRQ(ierr); // Bf, &Df same for all grids
319     const PetscReal *const BB = Tf[0]->T[0], * const DD = Tf[0]->T[1];
320     ip_offset[0] = ipf_offset[0] = elem_offset[0] = 0;
321     for (PetscInt grid=0 ; grid<num_grids ; grid++) {
322       PetscInt nfloc = ctx->species_offset[grid+1] - ctx->species_offset[grid];
323       elem_offset[grid+1] = elem_offset[grid] + numCells[grid];
324       ip_offset[grid+1]   = ip_offset[grid]   + numCells[grid]*Nq;
325       ipf_offset[grid+1]  = ipf_offset[grid]  + Nq*nfloc*numCells[grid];
326     }
327     IPf_sz_glb = ipf_offset[num_grids];
328     IPf_sz_tot = IPf_sz_glb*ctx->batch_sz;
329     // prep COO
330     if (ctx->coo_assembly) {
331       ierr = PetscMalloc1(ctx->SData_d.coo_size,&coo_vals);CHKERRQ(ierr); // allocate every time?
332       ierr = PetscInfo(ctx->plex[0], "COO Allocate %" PetscInt_FMT " values\n",ctx->SData_d.coo_size);CHKERRQ(ierr);
333     }
334     if (shift==0.0) { /* compute dynamic data f and df and init data for Jacobian */
335 #if defined(PETSC_HAVE_THREADSAFETY)
336       double         starttime, endtime;
337       starttime = MPI_Wtime();
338 #endif
339       ierr = PetscLogEventBegin(ctx->events[8],0,0,0,0);CHKERRQ(ierr);
340       for (PetscInt fieldA=0;fieldA<ctx->num_species;fieldA++) {
341         invMass[fieldA]  = ctx->m_0/ctx->masses[fieldA];
342         Eq_m[fieldA]     = ctx->Ez * ctx->t_0 * ctx->charges[fieldA] / (ctx->v_0 * ctx->masses[fieldA]); /* normalize dimensionless */
343         if (dim==2) Eq_m[fieldA] *=  2 * PETSC_PI; /* add the 2pi term that is not in Landau */
344         nu_alpha[fieldA] = PetscSqr(ctx->charges[fieldA]/ctx->m_0)*ctx->m_0/ctx->masses[fieldA];
345         nu_beta[fieldA]  = PetscSqr(ctx->charges[fieldA]/ctx->epsilon0)*ctx->lnLam / (8*PETSC_PI) * ctx->t_0*ctx->n_0/PetscPowReal(ctx->v_0,3);
346       }
347       ierr = PetscMalloc4(IPf_sz_tot, &ff, IPf_sz_tot, &dudx, IPf_sz_tot, &dudy, dim==3 ? IPf_sz_tot : 0, &dudz);CHKERRQ(ierr);
348       // F df/dx
349       for (PetscInt tid = 0 ; tid < ctx->batch_sz*elem_offset[num_grids] ; tid++) { // for each element
350         const PetscInt b_Nelem = elem_offset[num_grids], b_elem_idx = tid%b_Nelem, b_id = tid/b_Nelem; // b_id == OMP thd_id in batch
351         // find my grid:
352         PetscInt       grid = 0;
353         while (b_elem_idx >= elem_offset[grid+1]) grid++; // yuck search for grid
354         {
355           const PetscInt     loc_nip = numCells[grid]*Nq, loc_Nf = ctx->species_offset[grid+1] - ctx->species_offset[grid], loc_elem = b_elem_idx - elem_offset[grid];
356           const PetscInt     moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset); //b_id*b_N + ctx->mat_offset[grid];
357           PetscScalar        *coef, coef_buff[LANDAU_MAX_SPECIES*LANDAU_MAX_NQ];
358           PetscReal          *invJe  = &invJ_a[(ip_offset[grid] + loc_elem*Nq)*dim*dim]; // ingJ is static data on batch 0
359           PetscInt           b,f,q;
360           if (cellClosure) {
361             coef = &cellClosure[b_id*IPf_sz_glb + ipf_offset[grid] + loc_elem*Nb*loc_Nf]; // this is const
362           } else {
363             coef = coef_buff;
364             for (f = 0; f < loc_Nf; ++f) {
365               LandauIdx *const Idxs = &maps[grid].gIdx[loc_elem][f][0];
366               for (b = 0; b < Nb; ++b) {
367                 PetscInt idx = Idxs[b];
368                 if (idx >= 0) {
369                   coef[f*Nb+b] = xdata[idx+moffset];
370                 } else {
371                   idx = -idx - 1;
372                   coef[f*Nb+b] = 0;
373                   for (q = 0; q < maps[grid].num_face; q++) {
374                     PetscInt    id    = maps[grid].c_maps[idx][q].gid;
375                     PetscScalar scale = maps[grid].c_maps[idx][q].scale;
376                     coef[f*Nb+b] += scale*xdata[id+moffset];
377                   }
378                 }
379               }
380             }
381           }
382           /* get f and df */
383           for (PetscInt qi = 0; qi < Nq; qi++) {
384             const PetscReal  *invJ = &invJe[qi*dim*dim];
385             const PetscReal  *Bq   = &BB[qi*Nb];
386             const PetscReal  *Dq   = &DD[qi*Nb*dim];
387             PetscReal        u_x[LANDAU_DIM];
388             /* get f & df */
389             for (f = 0; f < loc_Nf; ++f) {
390               const PetscInt idx = b_id*IPf_sz_glb + ipf_offset[grid] + f*loc_nip + loc_elem*Nq + qi;
391               PetscInt       b, e;
392               PetscReal      refSpaceDer[LANDAU_DIM];
393               ff[idx] = 0.0;
394               for (int d = 0; d < LANDAU_DIM; ++d) refSpaceDer[d] = 0.0;
395               for (b = 0; b < Nb; ++b) {
396                 const PetscInt    cidx = b;
397                 ff[idx] += Bq[cidx]*PetscRealPart(coef[f*Nb+cidx]);
398                 for (int d = 0; d < dim; ++d) {
399                   refSpaceDer[d] += Dq[cidx*dim+d]*PetscRealPart(coef[f*Nb+cidx]);
400                 }
401               }
402               for (int d = 0; d < LANDAU_DIM; ++d) {
403                 for (e = 0, u_x[d] = 0.0; e < LANDAU_DIM; ++e) {
404                   u_x[d] += invJ[e*dim+d]*refSpaceDer[e];
405                 }
406               }
407               dudx[idx] = u_x[0];
408               dudy[idx] = u_x[1];
409  #if LANDAU_DIM==3
410               dudz[idx] = u_x[2];
411 #endif
412             }
413           } // q
414         } // grid
415       } // grid*batch
416       ierr = PetscLogEventEnd(ctx->events[8],0,0,0,0);CHKERRQ(ierr);
417 #if defined(PETSC_HAVE_THREADSAFETY)
418       endtime = MPI_Wtime();
419       if (ctx->stage) ctx->times[LANDAU_F_DF] += (endtime - starttime);
420 #endif
421     } // Jacobian setup
422     // assemble Jacobian (or mass)
423     for (PetscInt tid = 0 ; tid < ctx->batch_sz*elem_offset[num_grids] ; tid++) { // for each element
424       const PetscInt b_Nelem      = elem_offset[num_grids];
425       const PetscInt glb_elem_idx = tid%b_Nelem, b_id = tid/b_Nelem;
426       PetscInt       grid         = 0;
427 #if defined(PETSC_HAVE_THREADSAFETY)
428       double         starttime, endtime;
429       starttime                   = MPI_Wtime();
430 #endif
431       while (glb_elem_idx >= elem_offset[grid+1]) grid++;
432       {
433         const PetscInt     loc_Nf  = ctx->species_offset[grid+1] - ctx->species_offset[grid], loc_elem = glb_elem_idx - elem_offset[grid];
434         const PetscInt     moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset), totDim = loc_Nf*Nq, elemMatSize = totDim*totDim;
435         PetscScalar        *elemMat;
436          const PetscReal   *invJe  = &invJ_a[(ip_offset[grid] + loc_elem*Nq)*dim*dim];
437         ierr = PetscMalloc1(elemMatSize, &elemMat);CHKERRQ(ierr);
438         ierr = PetscMemzero(elemMat, elemMatSize*sizeof(*elemMat));CHKERRQ(ierr);
439         if (shift==0.0) { // Jacobian
440           ierr = PetscLogEventBegin(ctx->events[4],0,0,0,0);CHKERRQ(ierr);
441         } else {          // mass
442           ierr = PetscLogEventBegin(ctx->events[16],0,0,0,0);CHKERRQ(ierr);
443         }
444         for (PetscInt qj = 0; qj < Nq; ++qj) {
445           const PetscInt   jpidx_glb = ip_offset[grid] + qj + loc_elem * Nq;
446           PetscReal        g0[LANDAU_MAX_SPECIES], g2[LANDAU_MAX_SPECIES][LANDAU_DIM], g3[LANDAU_MAX_SPECIES][LANDAU_DIM][LANDAU_DIM]; // could make a LANDAU_MAX_SPECIES_GRID ~ number of ions - 1
447           PetscInt         d,d2,dp,d3,IPf_idx;
448           if (shift==0.0) { // Jacobian
449             const PetscReal * const invJj = &invJe[qj*dim*dim];
450             PetscReal               gg2[LANDAU_MAX_SPECIES][LANDAU_DIM],gg3[LANDAU_MAX_SPECIES][LANDAU_DIM][LANDAU_DIM], gg2_temp[LANDAU_DIM], gg3_temp[LANDAU_DIM][LANDAU_DIM];
451             const PetscReal         vj[3] = {xx[jpidx_glb], yy[jpidx_glb], zz ? zz[jpidx_glb] : 0}, wj = ww[jpidx_glb];
452             // create g2 & g3
453             for (d=0;d<LANDAU_DIM;d++) { // clear accumulation data D & K
454               gg2_temp[d] = 0;
455               for (d2=0;d2<LANDAU_DIM;d2++) gg3_temp[d][d2] = 0;
456             }
457             /* inner beta reduction */
458             IPf_idx = 0;
459             for (PetscInt grid_r = 0, f_off = 0, ipidx = 0; grid_r < ctx->num_grids ; grid_r++, f_off = ctx->species_offset[grid_r]) { // IPf_idx += nip_loc_r*Nfloc_r
460               PetscInt  nip_loc_r = numCells[grid_r]*Nq, Nfloc_r = Nf[grid_r];
461               for (PetscInt ei_r = 0, loc_fdf_idx = 0; ei_r < numCells[grid_r]; ++ei_r) {
462                 for (PetscInt qi = 0; qi < Nq; qi++, ipidx++, loc_fdf_idx++) {
463                   const PetscReal wi       = ww[ipidx], x = xx[ipidx], y = yy[ipidx];
464                   PetscReal       temp1[3] = {0, 0, 0}, temp2 = 0;
465 #if LANDAU_DIM==2
466                   PetscReal       Ud[2][2], Uk[2][2], mask = (PetscAbs(vj[0]-x) < 100*PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[1]-y) < 100*PETSC_SQRT_MACHINE_EPSILON) ? 0. : 1.;
467                   LandauTensor2D(vj, x, y, Ud, Uk, mask);
468 #else
469                   PetscReal U[3][3], z = zz[ipidx], mask = (PetscAbs(vj[0]-x) < 100*PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[1]-y) < 100*PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[2]-z) < 100*PETSC_SQRT_MACHINE_EPSILON) ? 0. : 1.;
470                   if (ctx->use_relativistic_corrections) {
471                     LandauTensor3DRelativistic(vj, x, y, z, U, mask, C_0(ctx->v_0));
472                   } else {
473                     LandauTensor3D(vj, x, y, z, U, mask);
474                   }
475 #endif
476                   for (int f = 0; f < Nfloc_r ; ++f) {
477                     const PetscInt idx = b_id*IPf_sz_glb + ipf_offset[grid_r] + f*nip_loc_r + ei_r*Nq + qi;  // IPf_idx + f*nip_loc_r + loc_fdf_idx;
478                     temp1[0] += dudx[idx]*nu_beta[f+f_off]*invMass[f+f_off];
479                     temp1[1] += dudy[idx]*nu_beta[f+f_off]*invMass[f+f_off];
480 #if LANDAU_DIM==3
481                     temp1[2] += dudz[idx]*nu_beta[f+f_off]*invMass[f+f_off];
482 #endif
483                     temp2    += ff[idx]*nu_beta[f+f_off];
484                   }
485                   temp1[0] *= wi;
486                   temp1[1] *= wi;
487 #if LANDAU_DIM==3
488                   temp1[2] *= wi;
489 #endif
490                   temp2    *= wi;
491 #if LANDAU_DIM==2
492                   for (d2 = 0; d2 < 2; d2++) {
493                     for (d3 = 0; d3 < 2; ++d3) {
494                       /* K = U * grad(f): g2=e: i,A */
495                       gg2_temp[d2] += Uk[d2][d3]*temp1[d3];
496                       /* D = -U * (I \kron (fx)): g3=f: i,j,A */
497                       gg3_temp[d2][d3] += Ud[d2][d3]*temp2;
498                     }
499                   }
500 #else
501                   for (d2 = 0; d2 < 3; ++d2) {
502                     for (d3 = 0; d3 < 3; ++d3) {
503                       /* K = U * grad(f): g2 = e: i,A */
504                       gg2_temp[d2] += U[d2][d3]*temp1[d3];
505                       /* D = -U * (I \kron (fx)): g3 = f: i,j,A */
506                       gg3_temp[d2][d3] += U[d2][d3]*temp2;
507                     }
508                   }
509 #endif
510                 } // qi
511               } // ei_r
512               IPf_idx += nip_loc_r*Nfloc_r;
513             } /* grid_r - IPs */
514             PetscCheck(IPf_idx == IPf_sz_glb,PETSC_COMM_SELF, PETSC_ERR_PLIB, "IPf_idx != IPf_sz %" PetscInt_FMT " %" PetscInt_FMT,IPf_idx,IPf_sz_glb);
515             // add alpha and put in gg2/3
516             for (PetscInt fieldA = 0, f_off = ctx->species_offset[grid]; fieldA < loc_Nf; ++fieldA) {
517               for (d2 = 0; d2 < dim; d2++) {
518                 gg2[fieldA][d2] = gg2_temp[d2]*nu_alpha[fieldA+f_off];
519                 for (d3 = 0; d3 < dim; d3++) {
520                   gg3[fieldA][d2][d3] = -gg3_temp[d2][d3]*nu_alpha[fieldA+f_off]*invMass[fieldA+f_off];
521                 }
522               }
523             }
524             /* add electric field term once per IP */
525             for (PetscInt fieldA = 0, f_off = ctx->species_offset[grid] ; fieldA < loc_Nf; ++fieldA) {
526               gg2[fieldA][dim-1] += Eq_m[fieldA+f_off];
527             }
528             /* Jacobian transform - g2, g3 */
529             for (PetscInt fieldA = 0; fieldA < loc_Nf; ++fieldA) {
530               for (d = 0; d < dim; ++d) {
531                 g2[fieldA][d] = 0.0;
532                 for (d2 = 0; d2 < dim; ++d2) {
533                   g2[fieldA][d] += invJj[d*dim+d2]*gg2[fieldA][d2];
534                   g3[fieldA][d][d2] = 0.0;
535                   for (d3 = 0; d3 < dim; ++d3) {
536                     for (dp = 0; dp < dim; ++dp) {
537                       g3[fieldA][d][d2] += invJj[d*dim + d3]*gg3[fieldA][d3][dp]*invJj[d2*dim + dp];
538                     }
539                   }
540                   g3[fieldA][d][d2] *= wj;
541                 }
542                 g2[fieldA][d] *= wj;
543               }
544             }
545           } else { // mass
546             PetscReal wj = ww[jpidx_glb];
547             /* Jacobian transform - g0 */
548             for (PetscInt fieldA = 0; fieldA < loc_Nf ; ++fieldA) {
549               if (dim==2) {
550                 g0[fieldA] = wj * shift * 2. * PETSC_PI; // move this to below and remove g0
551               } else {
552                 g0[fieldA] = wj * shift; // move this to below and remove g0
553               }
554             }
555           }
556           /* FE matrix construction */
557           {
558             PetscInt  fieldA,d,f,d2,g;
559             const PetscReal *BJq = &BB[qj*Nb], *DIq = &DD[qj*Nb*dim];
560             /* assemble - on the diagonal (I,I) */
561             for (fieldA = 0; fieldA < loc_Nf ; fieldA++) {
562               for (f = 0; f < Nb ; f++) {
563                 const PetscInt i = fieldA*Nb + f; /* Element matrix row */
564                 for (g = 0; g < Nb; ++g) {
565                   const PetscInt j    = fieldA*Nb + g; /* Element matrix column */
566                   const PetscInt fOff = i*totDim + j;
567                   if (shift==0.0) {
568                     for (d = 0; d < dim; ++d) {
569                       elemMat[fOff] += DIq[f*dim+d]*g2[fieldA][d]*BJq[g];
570                       for (d2 = 0; d2 < dim; ++d2) {
571                         elemMat[fOff] += DIq[f*dim + d]*g3[fieldA][d][d2]*DIq[g*dim + d2];
572                       }
573                     }
574                   } else { // mass
575                     elemMat[fOff] += BJq[f]*g0[fieldA]*BJq[g];
576                   }
577                 }
578               }
579             }
580           }
581         } /* qj loop */
582         if (shift==0.0) { // Jacobian
583           ierr = PetscLogEventEnd(ctx->events[4],0,0,0,0);CHKERRQ(ierr);
584         } else {
585           ierr = PetscLogEventEnd(ctx->events[16],0,0,0,0);CHKERRQ(ierr);
586         }
587 #if defined(PETSC_HAVE_THREADSAFETY)
588         endtime = MPI_Wtime();
589         if (ctx->stage) ctx->times[LANDAU_KERNEL] += (endtime - starttime);
590 #endif
591         /* assemble matrix */
592         if (!container) {
593           PetscInt cStart;
594           ierr = PetscLogEventBegin(ctx->events[6],0,0,0,0);CHKERRQ(ierr);
595           ierr = DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, NULL);CHKERRQ(ierr);
596           ierr = DMPlexMatSetClosure(ctx->plex[grid], section[grid], globsection[grid], subJ[ LAND_PACK_IDX(b_id,grid) ], loc_elem + cStart, elemMat, ADD_VALUES);CHKERRQ(ierr);
597           ierr = PetscLogEventEnd(ctx->events[6],0,0,0,0);CHKERRQ(ierr);
598         } else {  // GPU like assembly for debugging
599           PetscInt      fieldA,q,f,g,d,nr,nc,rows0[LANDAU_MAX_Q_FACE]={0},cols0[LANDAU_MAX_Q_FACE]={0},rows[LANDAU_MAX_Q_FACE],cols[LANDAU_MAX_Q_FACE];
600           PetscScalar   vals[LANDAU_MAX_Q_FACE*LANDAU_MAX_Q_FACE]={0},row_scale[LANDAU_MAX_Q_FACE]={0},col_scale[LANDAU_MAX_Q_FACE]={0};
601           LandauIdx     *coo_elem_offsets = (LandauIdx*)ctx->SData_d.coo_elem_offsets, *coo_elem_fullNb = (LandauIdx*)ctx->SData_d.coo_elem_fullNb, (*coo_elem_point_offsets)[LANDAU_MAX_NQ+1] = (LandauIdx (*)[LANDAU_MAX_NQ+1])ctx->SData_d.coo_elem_point_offsets;
602           /* assemble - from the diagonal (I,I) in this format for DMPlexMatSetClosure */
603           for (fieldA = 0; fieldA < loc_Nf ; fieldA++) {
604             LandauIdx *const Idxs = &maps[grid].gIdx[loc_elem][fieldA][0];
605             for (f = 0; f < Nb ; f++) {
606               PetscInt idx = Idxs[f];
607               if (idx >= 0) {
608                 nr           = 1;
609                 rows0[0]     = idx;
610                 row_scale[0] = 1.;
611               } else {
612                 idx = -idx - 1;
613                 for (q = 0, nr = 0; q < maps[grid].num_face; q++, nr++) {
614                   if (maps[grid].c_maps[idx][q].gid < 0) break;
615                   rows0[q]     = maps[grid].c_maps[idx][q].gid;
616                   row_scale[q] = maps[grid].c_maps[idx][q].scale;
617                 }
618               }
619               for (g = 0; g < Nb; ++g) {
620                 idx = Idxs[g];
621                 if (idx >= 0) {
622                   nc = 1;
623                   cols0[0]     = idx;
624                   col_scale[0] = 1.;
625                 } else {
626                   idx = -idx - 1;
627                   nc = maps[grid].num_face;
628                   for (q = 0, nc = 0; q < maps[grid].num_face; q++, nc++) {
629                     if (maps[grid].c_maps[idx][q].gid < 0) break;
630                     cols0[q]     = maps[grid].c_maps[idx][q].gid;
631                     col_scale[q] = maps[grid].c_maps[idx][q].scale;
632                   }
633                 }
634                 const PetscInt    i   = fieldA*Nb + f; /* Element matrix row */
635                 const PetscInt    j   = fieldA*Nb + g; /* Element matrix column */
636                 const PetscScalar Aij = elemMat[i*totDim + j];
637                 if (coo_vals) { // mirror (i,j) in CreateStaticGPUData
638                   const int fullNb = coo_elem_fullNb[glb_elem_idx],fullNb2=fullNb*fullNb;
639                   const int idx0   = b_id*coo_elem_offsets[elem_offset[num_grids]] + coo_elem_offsets[glb_elem_idx] + fieldA*fullNb2 + fullNb * coo_elem_point_offsets[glb_elem_idx][f] + nr * coo_elem_point_offsets[glb_elem_idx][g];
640                   for (int q = 0, idx2 = idx0; q < nr; q++) {
641                     for (int d = 0; d < nc; d++, idx2++) {
642                       coo_vals[idx2] = row_scale[q]*col_scale[d]*Aij;
643                     }
644                   }
645                 } else {
646                   for (q = 0; q < nr; q++) rows[q] = rows0[q] + moffset;
647                   for (d = 0; d < nc; d++) cols[d] = cols0[d] + moffset;
648                   for (q = 0; q < nr; q++) {
649                     for (d = 0; d < nc; d++) {
650                       vals[q*nc + d] = row_scale[q]*col_scale[d]*Aij;
651                     }
652                   }
653                   ierr = MatSetValues(JacP,nr,rows,nc,cols,vals,ADD_VALUES);CHKERRQ(ierr);
654                 }
655               }
656             }
657           }
658         }
659         if (loc_elem==-1) {
660           PetscErrorCode    ierr2;
661           ierr2 = PetscPrintf(ctx->comm,"CPU Element matrix\n");CHKERRQ(ierr2);
662           for (int d = 0; d < totDim; ++d) {
663             for (int f = 0; f < totDim; ++f) {ierr2 = PetscPrintf(ctx->comm," %12.5e",  PetscRealPart(elemMat[d*totDim + f]));CHKERRQ(ierr2);}
664             ierr2 = PetscPrintf(ctx->comm,"\n");CHKERRQ(ierr2);
665           }
666           exit(12);
667         }
668         ierr = PetscFree(elemMat);CHKERRQ(ierr);
669       } /* grid */
670     } /* outer element & batch loop */
671     if (shift==0.0) { // mass
672       ierr = PetscFree4(ff, dudx, dudy, dudz);CHKERRQ(ierr);
673     }
674     if (!container) {   // 'CPU' assembly move nest matrix to global JacP
675       for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) { // OpenMP
676         for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) {
677           const PetscInt    moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset); // b_id*b_N + ctx->mat_offset[grid];
678           PetscInt          nloc, nzl, colbuf[1024], row;
679           const PetscInt    *cols;
680           const PetscScalar *vals;
681           Mat               B = subJ[ LAND_PACK_IDX(b_id,grid) ];
682           ierr = MatAssemblyBegin(B, MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
683           ierr = MatAssemblyEnd(B, MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
684           ierr = MatGetSize(B, &nloc, NULL);CHKERRQ(ierr);
685           for (int i=0 ; i<nloc ; i++) {
686             ierr = MatGetRow(B,i,&nzl,&cols,&vals);CHKERRQ(ierr);
687             PetscCheck(nzl<=1024,PetscObjectComm((PetscObject) B), PETSC_ERR_PLIB, "Row too big: %" PetscInt_FMT,nzl);
688             for (int j=0; j<nzl; j++) colbuf[j] = moffset + cols[j];
689             row  = moffset + i;
690             ierr = MatSetValues(JacP,1,&row,nzl,colbuf,vals,ADD_VALUES);CHKERRQ(ierr);
691             ierr = MatRestoreRow(B,i,&nzl,&cols,&vals);CHKERRQ(ierr);
692           }
693           ierr = MatDestroy(&B);CHKERRQ(ierr);
694         }
695       }
696     }
697     if (coo_vals) {
698       ierr = MatSetValuesCOO(JacP,coo_vals,ADD_VALUES);CHKERRQ(ierr);
699       ierr = PetscFree(coo_vals);CHKERRQ(ierr);
700     }
701   } /* CPU version */
702   ierr = MatAssemblyBegin(JacP, MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
703   ierr = MatAssemblyEnd(JacP, MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
704   /* clean up */
705   if (cellClosure) {
706     ierr = PetscFree(cellClosure);CHKERRQ(ierr);
707   }
708   if (xdata) {
709     ierr = VecRestoreArrayReadAndMemType(a_X,&xdata);CHKERRQ(ierr);
710   }
711   PetscFunctionReturn(0);
712 }
713 
714 #if defined(LANDAU_ADD_BCS)
715 static void zero_bc(PetscInt dim, PetscInt Nf, PetscInt NfAux,
716                     const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
717                     const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
718                     PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar uexact[])
719 {
720   uexact[0] = 0;
721 }
722 #endif
723 
724 #define MATVEC2(__a,__x,__p) {int i,j; for (i=0.; i<2; i++) {__p[i] = 0; for (j=0.; j<2; j++) __p[i] += __a[i][j]*__x[j]; }}
725 static void CircleInflate(PetscReal r1, PetscReal r2, PetscReal r0, PetscInt num_sections, PetscReal x, PetscReal y,
726                           PetscReal *outX, PetscReal *outY)
727 {
728   PetscReal rr = PetscSqrtReal(x*x + y*y), outfact, efact;
729   if (rr < r1 + PETSC_SQRT_MACHINE_EPSILON) {
730     *outX = x; *outY = y;
731   } else {
732     const PetscReal xy[2] = {x,y}, sinphi=y/rr, cosphi=x/rr;
733     PetscReal       cth,sth,xyprime[2],Rth[2][2],rotcos,newrr;
734     if (num_sections==2) {
735       rotcos  = 0.70710678118654;
736       outfact = 1.5; efact = 2.5;
737       /* rotate normalized vector into [-pi/4,pi/4) */
738       if (sinphi >= 0.) {         /* top cell, -pi/2 */
739         cth = 0.707106781186548; sth = -0.707106781186548;
740       } else {                    /* bottom cell -pi/8 */
741         cth = 0.707106781186548; sth = .707106781186548;
742       }
743     } else if (num_sections==3) {
744       rotcos  = 0.86602540378443;
745       outfact = 1.5; efact = 2.5;
746       /* rotate normalized vector into [-pi/6,pi/6) */
747       if (sinphi >= 0.5) {         /* top cell, -pi/3 */
748         cth = 0.5; sth = -0.866025403784439;
749       } else if (sinphi >= -.5) {  /* mid cell 0 */
750         cth = 1.; sth = .0;
751       } else { /* bottom cell +pi/3 */
752         cth = 0.5; sth = 0.866025403784439;
753       }
754     } else if (num_sections==4) {
755       rotcos  = 0.9238795325112;
756       outfact = 1.5; efact = 3;
757       /* rotate normalized vector into [-pi/8,pi/8) */
758       if (sinphi >= 0.707106781186548) {         /* top cell, -3pi/8 */
759         cth = 0.38268343236509;  sth = -0.923879532511287;
760       } else if (sinphi >= 0.) {                 /* mid top cell -pi/8 */
761         cth = 0.923879532511287; sth = -.38268343236509;
762       } else if (sinphi >= -0.707106781186548) { /* mid bottom cell + pi/8 */
763         cth = 0.923879532511287; sth = 0.38268343236509;
764       } else {                                   /* bottom cell + 3pi/8 */
765         cth = 0.38268343236509;  sth = .923879532511287;
766       }
767     } else {
768       cth = 0.; sth = 0.; rotcos = 0; efact = 0;
769     }
770     Rth[0][0] = cth; Rth[0][1] =-sth;
771     Rth[1][0] = sth; Rth[1][1] = cth;
772     MATVEC2(Rth,xy,xyprime);
773     if (num_sections==2) {
774       newrr = xyprime[0]/rotcos;
775     } else {
776       PetscReal newcosphi=xyprime[0]/rr, rin = r1, rout = rr - rin;
777       PetscReal routmax = r0*rotcos/newcosphi - rin, nroutmax = r0 - rin, routfrac = rout/routmax;
778       newrr = rin + routfrac*nroutmax;
779     }
780     *outX = cosphi*newrr; *outY = sinphi*newrr;
781     /* grade */
782     PetscReal fact,tt,rs,re, rr = PetscSqrtReal(PetscSqr(*outX) + PetscSqr(*outY));
783     if (rr > r2) { rs = r2; re = r0; fact = outfact;} /* outer zone */
784     else {         rs = r1; re = r2; fact = efact;} /* electron zone */
785     tt = (rs + PetscPowReal((rr - rs)/(re - rs),fact) * (re-rs)) / rr;
786     *outX *= tt;
787     *outY *= tt;
788   }
789 }
790 
791 static PetscErrorCode GeometryDMLandau(DM base, PetscInt point, PetscInt dim, const PetscReal abc[], PetscReal xyz[], void *a_ctx)
792 {
793   LandauCtx   *ctx = (LandauCtx*)a_ctx;
794   PetscReal   r = abc[0], z = abc[1];
795   if (ctx->inflate) {
796     PetscReal absR, absZ;
797     absR = PetscAbs(r);
798     absZ = PetscAbs(z);
799     CircleInflate(ctx->i_radius[0],ctx->e_radius,ctx->radius[0],ctx->num_sections,absR,absZ,&absR,&absZ); // wrong: how do I know what grid I am on?
800     r = (r > 0) ? absR : -absR;
801     z = (z > 0) ? absZ : -absZ;
802   }
803   xyz[0] = r;
804   xyz[1] = z;
805   if (dim==3) xyz[2] = abc[2];
806 
807   PetscFunctionReturn(0);
808 }
809 
810 /* create DMComposite of meshes for each species group */
811 static PetscErrorCode LandauDMCreateVMeshes(MPI_Comm comm_self, const PetscInt dim, const char prefix[], LandauCtx *ctx, DM pack)
812 {
813   PetscErrorCode ierr;
814 
815   PetscFunctionBegin;
816   { /* p4est, quads */
817     /* Create plex mesh of Landau domain */
818     for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
819       PetscReal radius = ctx->radius[grid];
820       if (!ctx->sphere) {
821         PetscInt       cells[] = {2,2,2};
822         PetscReal      lo[] = {-radius,-radius,-radius}, hi[] = {radius,radius,radius};
823         DMBoundaryType periodicity[3] = {DM_BOUNDARY_NONE, dim==2 ? DM_BOUNDARY_NONE : DM_BOUNDARY_NONE, DM_BOUNDARY_NONE};
824         if (dim==2) { lo[0] = 0; cells[0] /* = cells[1] */ = 1; }
825         ierr = DMPlexCreateBoxMesh(comm_self, dim, PETSC_FALSE, cells, lo, hi, periodicity, PETSC_TRUE, &ctx->plex[grid]);CHKERRQ(ierr); // todo: make composite and create dm[grid] here
826         ierr = DMLocalizeCoordinates(ctx->plex[grid]);CHKERRQ(ierr); /* needed for periodic */
827         if (dim==3) {ierr = PetscObjectSetName((PetscObject) ctx->plex[grid], "cube");CHKERRQ(ierr);}
828         else {ierr = PetscObjectSetName((PetscObject) ctx->plex[grid], "half-plane");CHKERRQ(ierr);}
829       } else if (dim==2) { // sphere is all wrong. should just have one inner radius
830         PetscInt       numCells,cells[16][4],i,j;
831         PetscInt       numVerts;
832         PetscReal      inner_radius1 = ctx->i_radius[grid], inner_radius2 = ctx->e_radius;
833         PetscReal      *flatCoords   = NULL;
834         PetscInt       *flatCells    = NULL, *pcell;
835         if (ctx->num_sections==2) {
836 #if 1
837           numCells = 5;
838           numVerts = 10;
839           int cells2[][4] = { {0,1,4,3},
840                               {1,2,5,4},
841                               {3,4,7,6},
842                               {4,5,8,7},
843                               {6,7,8,9} };
844           for (i = 0; i < numCells; i++) for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j];
845           ierr = PetscMalloc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells);CHKERRQ(ierr);
846           {
847             PetscReal (*coords)[2] = (PetscReal (*) [2]) flatCoords;
848             for (j = 0; j < numVerts-1; j++) {
849               PetscReal z, r, theta = -PETSC_PI/2 + (j%3) * PETSC_PI/2;
850               PetscReal rad = (j >= 6) ? inner_radius1 : (j >= 3) ? inner_radius2 : ctx->radius[grid];
851               z = rad * PetscSinReal(theta);
852               coords[j][1] = z;
853               r = rad * PetscCosReal(theta);
854               coords[j][0] = r;
855             }
856             coords[numVerts-1][0] = coords[numVerts-1][1] = 0;
857           }
858 #else
859           numCells = 4;
860           numVerts = 8;
861           static int     cells2[][4] = {{0,1,2,3},
862                                         {4,5,1,0},
863                                         {5,6,2,1},
864                                         {6,7,3,2}};
865           for (i = 0; i < numCells; i++) for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j];
866           ierr = loc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells);CHKERRQ(ierr);
867           {
868             PetscReal (*coords)[2] = (PetscReal (*) [2]) flatCoords;
869             PetscInt j;
870             for (j = 0; j < 8; j++) {
871               PetscReal z, r;
872               PetscReal theta = -PETSC_PI/2 + (j%4) * PETSC_PI/3.;
873               PetscReal rad = ctx->radius[grid] * ((j < 4) ? 0.5 : 1.0);
874               z = rad * PetscSinReal(theta);
875               coords[j][1] = z;
876               r = rad * PetscCosReal(theta);
877               coords[j][0] = r;
878             }
879           }
880 #endif
881         } else if (ctx->num_sections==3) {
882           numCells = 7;
883           numVerts = 12;
884           int cells2[][4] = { {0,1,5,4},
885                               {1,2,6,5},
886                               {2,3,7,6},
887                               {4,5,9,8},
888                               {5,6,10,9},
889                               {6,7,11,10},
890                               {8,9,10,11} };
891           for (i = 0; i < numCells; i++) for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j];
892           ierr = PetscMalloc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells);CHKERRQ(ierr);
893           {
894             PetscReal (*coords)[2] = (PetscReal (*) [2]) flatCoords;
895             for (j = 0; j < numVerts; j++) {
896               PetscReal z, r, theta = -PETSC_PI/2 + (j%4) * PETSC_PI/3;
897               PetscReal rad = (j >= 8) ? inner_radius1 : (j >= 4) ? inner_radius2 : ctx->radius[grid];
898               z = rad * PetscSinReal(theta);
899               coords[j][1] = z;
900               r = rad * PetscCosReal(theta);
901               coords[j][0] = r;
902             }
903           }
904         } else if (ctx->num_sections==4) {
905           numCells = 10;
906           numVerts = 16;
907           int cells2[][4] = { {0,1,6,5},
908                               {1,2,7,6},
909                               {2,3,8,7},
910                               {3,4,9,8},
911                               {5,6,11,10},
912                               {6,7,12,11},
913                               {7,8,13,12},
914                               {8,9,14,13},
915                               {10,11,12,15},
916                               {12,13,14,15}};
917           for (i = 0; i < numCells; i++) for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j];
918           ierr = PetscMalloc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells);CHKERRQ(ierr);
919           {
920             PetscReal (*coords)[2] = (PetscReal (*) [2]) flatCoords;
921             for (j = 0; j < numVerts-1; j++) {
922               PetscReal z, r, theta = -PETSC_PI/2 + (j%5) * PETSC_PI/4;
923               PetscReal rad = (j >= 10) ? inner_radius1 : (j >= 5) ? inner_radius2 : ctx->radius[grid];
924               z = rad * PetscSinReal(theta);
925               coords[j][1] = z;
926               r = rad * PetscCosReal(theta);
927               coords[j][0] = r;
928             }
929             coords[numVerts-1][0] = coords[numVerts-1][1] = 0;
930           }
931         } else {
932           numCells = 0;
933           numVerts = 0;
934         }
935         for (j = 0, pcell = flatCells; j < numCells; j++, pcell += 4) {
936           pcell[0] = cells[j][0]; pcell[1] = cells[j][1];
937           pcell[2] = cells[j][2]; pcell[3] = cells[j][3];
938         }
939         ierr = DMPlexCreateFromCellListPetsc(comm_self,2,numCells,numVerts,4,ctx->interpolate,flatCells,2,flatCoords,&ctx->plex[grid]);CHKERRQ(ierr);
940         ierr = PetscFree2(flatCoords,flatCells);CHKERRQ(ierr);
941         ierr = PetscObjectSetName((PetscObject) ctx->plex[grid], "semi-circle");CHKERRQ(ierr);
942       } else SETERRQ(ctx->comm, PETSC_ERR_PLIB, "Velocity space meshes does not support cubed sphere");
943 
944       ierr = DMSetFromOptions(ctx->plex[grid]);CHKERRQ(ierr);
945     } // grid loop
946     ierr = PetscObjectSetOptionsPrefix((PetscObject)pack,prefix);CHKERRQ(ierr);
947     ierr = DMSetFromOptions(pack);CHKERRQ(ierr);
948 
949     { /* convert to p4est (or whatever), wait for discretization to create pack */
950       char      convType[256];
951       PetscBool flg;
952       ierr = PetscOptionsBegin(ctx->comm, prefix, "Mesh conversion options", "DMPLEX");CHKERRQ(ierr);
953       ierr = PetscOptionsFList("-dm_landau_type","Convert DMPlex to another format (p4est)","plexland.c",DMList,DMPLEX,convType,256,&flg);CHKERRQ(ierr);
954       ierr = PetscOptionsEnd();CHKERRQ(ierr);
955       if (flg) {
956         ctx->use_p4est = PETSC_TRUE; /* flag for Forest */
957         for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
958           DM dmforest;
959           ierr = DMConvert(ctx->plex[grid],convType,&dmforest);CHKERRQ(ierr);
960           if (dmforest) {
961             PetscBool isForest;
962             ierr = PetscObjectSetOptionsPrefix((PetscObject)dmforest,prefix);CHKERRQ(ierr);
963             ierr = DMIsForest(dmforest,&isForest);CHKERRQ(ierr);
964             if (isForest) {
965               if (ctx->sphere && ctx->inflate) {
966                 ierr = DMForestSetBaseCoordinateMapping(dmforest,GeometryDMLandau,ctx);CHKERRQ(ierr);
967               }
968               ierr = DMDestroy(&ctx->plex[grid]);CHKERRQ(ierr);
969               ctx->plex[grid] = dmforest; // Forest for adaptivity
970             } else SETERRQ(ctx->comm, PETSC_ERR_PLIB, "Converted to non Forest?");
971           } else SETERRQ(ctx->comm, PETSC_ERR_PLIB, "Convert failed?");
972         }
973       } else ctx->use_p4est = PETSC_FALSE; /* flag for Forest */
974     }
975   } /* non-file */
976   ierr = DMSetDimension(pack, dim);CHKERRQ(ierr);
977   ierr = PetscObjectSetName((PetscObject) pack, "Mesh");CHKERRQ(ierr);
978   ierr = DMSetApplicationContext(pack, ctx);CHKERRQ(ierr);
979 
980   PetscFunctionReturn(0);
981 }
982 
983 static PetscErrorCode SetupDS(DM pack, PetscInt dim, PetscInt grid, LandauCtx *ctx)
984 {
985   PetscErrorCode  ierr;
986   PetscInt        ii,i0;
987   char            buf[256];
988   PetscSection    section;
989 
990   PetscFunctionBegin;
991   for (ii = ctx->species_offset[grid], i0 = 0 ; ii < ctx->species_offset[grid+1] ; ii++, i0++) {
992     if (ii==0) ierr = PetscSNPrintf(buf, 256, "e");
993     else {ierr = PetscSNPrintf(buf, sizeof(buf), "i%" PetscInt_FMT, ii);CHKERRQ(ierr);}
994     /* Setup Discretization - FEM */
995     ierr = PetscFECreateDefault(PETSC_COMM_SELF, dim, 1, PETSC_FALSE, NULL, PETSC_DECIDE, &ctx->fe[ii]);CHKERRQ(ierr);
996     ierr = PetscObjectSetName((PetscObject) ctx->fe[ii], buf);CHKERRQ(ierr);
997     ierr = DMSetField(ctx->plex[grid], i0, NULL, (PetscObject) ctx->fe[ii]);CHKERRQ(ierr);
998   }
999   ierr = DMCreateDS(ctx->plex[grid]);CHKERRQ(ierr);
1000   ierr = DMGetSection(ctx->plex[grid], &section);CHKERRQ(ierr);
1001   for (PetscInt ii = ctx->species_offset[grid], i0 = 0 ; ii < ctx->species_offset[grid+1] ; ii++, i0++) {
1002     if (ii==0) {ierr = PetscSNPrintf(buf, sizeof(buf), "se");CHKERRQ(ierr);}
1003     else {ierr = PetscSNPrintf(buf, sizeof(buf), "si%" PetscInt_FMT, ii);CHKERRQ(ierr);}
1004     ierr = PetscSectionSetComponentName(section, i0, 0, buf);CHKERRQ(ierr);
1005   }
1006   PetscFunctionReturn(0);
1007 }
1008 
1009 /* Define a Maxwellian function for testing out the operator. */
1010 
1011 /* Using cartesian velocity space coordinates, the particle */
1012 /* density, [1/m^3], is defined according to */
1013 
1014 /* $$ n=\int_{R^3} dv^3 \left(\frac{m}{2\pi T}\right)^{3/2}\exp [- mv^2/(2T)] $$ */
1015 
1016 /* Using some constant, c, we normalize the velocity vector into a */
1017 /* dimensionless variable according to v=c*x. Thus the density, $n$, becomes */
1018 
1019 /* $$ n=\int_{R^3} dx^3 \left(\frac{mc^2}{2\pi T}\right)^{3/2}\exp [- mc^2/(2T)*x^2] $$ */
1020 
1021 /* Defining $\theta=2T/mc^2$, we thus find that the probability density */
1022 /* for finding the particle within the interval in a box dx^3 around x is */
1023 
1024 /* f(x;\theta)=\left(\frac{1}{\pi\theta}\right)^{3/2} \exp [ -x^2/\theta ] */
1025 
1026 typedef struct {
1027   PetscReal v_0;
1028   PetscReal kT_m;
1029   PetscReal n;
1030   PetscReal shift;
1031 } MaxwellianCtx;
1032 
1033 static PetscErrorCode maxwellian(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx)
1034 {
1035   MaxwellianCtx *mctx = (MaxwellianCtx*)actx;
1036   PetscInt      i;
1037   PetscReal     v2 = 0, theta = 2*mctx->kT_m/(mctx->v_0*mctx->v_0); /* theta = 2kT/mc^2 */
1038   PetscFunctionBegin;
1039   /* compute the exponents, v^2 */
1040   for (i = 0; i < dim; ++i) v2 += x[i]*x[i];
1041   /* evaluate the Maxwellian */
1042   u[0] = mctx->n*PetscPowReal(PETSC_PI*theta,-1.5)*(PetscExpReal(-v2/theta));
1043   if (mctx->shift!=0.) {
1044     v2 = 0;
1045     for (i = 0; i < dim-1; ++i) v2 += x[i]*x[i];
1046     v2 += (x[dim-1]-mctx->shift)*(x[dim-1]-mctx->shift);
1047     /* evaluate the shifted Maxwellian */
1048     u[0] += mctx->n*PetscPowReal(PETSC_PI*theta,-1.5)*(PetscExpReal(-v2/theta));
1049   }
1050   PetscFunctionReturn(0);
1051 }
1052 
1053 /*@
1054  DMPlexLandauAddMaxwellians - Add a Maxwellian distribution to a state
1055 
1056  Collective on X
1057 
1058  Input Parameters:
1059  .   dm - The mesh (local)
1060  +   time - Current time
1061  -   temps - Temperatures of each species (global)
1062  .   ns - Number density of each species (global)
1063  -   grid - index into current grid - just used for offset into temp and ns
1064  +   actx - Landau context
1065 
1066  Output Parameter:
1067  .   X  - The state (local to this grid)
1068 
1069  Level: beginner
1070 
1071  .keywords: mesh
1072  .seealso: DMPlexLandauCreateVelocitySpace()
1073  @*/
1074 PetscErrorCode DMPlexLandauAddMaxwellians(DM dm, Vec X, PetscReal time, PetscReal temps[], PetscReal ns[], PetscInt grid, PetscInt b_id, void *actx)
1075 {
1076   LandauCtx      *ctx = (LandauCtx*)actx;
1077   PetscErrorCode (*initu[LANDAU_MAX_SPECIES])(PetscInt, PetscReal, const PetscReal [], PetscInt, PetscScalar [], void *);
1078   PetscErrorCode ierr,ii,i0;
1079   PetscInt       dim;
1080   MaxwellianCtx  *mctxs[LANDAU_MAX_SPECIES], data[LANDAU_MAX_SPECIES];
1081 
1082   PetscFunctionBegin;
1083   ierr = DMGetDimension(dm, &dim);CHKERRQ(ierr);
1084   if (!ctx) { ierr = DMGetApplicationContext(dm, &ctx);CHKERRQ(ierr); }
1085   for (ii = ctx->species_offset[grid], i0 = 0 ; ii < ctx->species_offset[grid+1] ; ii++, i0++) {
1086     mctxs[i0]      = &data[i0];
1087     data[i0].v_0   = ctx->v_0; // v_0 same for all grids
1088     data[i0].kT_m  = ctx->k*temps[ii]/ctx->masses[ii]; /* kT/m */
1089     data[i0].n     = ns[ii] * (1+(double)b_id/100.0); // make solves a little different to mimic application, n[0] use for Conner-Hastie
1090     initu[i0]      = maxwellian;
1091     data[i0].shift = 0;
1092   }
1093   data[0].shift = ctx->electronShift;
1094   /* need to make ADD_ALL_VALUES work - TODO */
1095   ierr = DMProjectFunction(dm, time, initu, (void**)mctxs, INSERT_ALL_VALUES, X);CHKERRQ(ierr);
1096   PetscFunctionReturn(0);
1097 }
1098 
1099 /*
1100  LandauSetInitialCondition - Addes Maxwellians with context
1101 
1102  Collective on X
1103 
1104  Input Parameters:
1105  .   dm - The mesh
1106  -   grid - index into current grid - just used for offset into temp and ns
1107  +   actx - Landau context with T and n
1108 
1109  Output Parameter:
1110  .   X  - The state
1111 
1112  Level: beginner
1113 
1114  .keywords: mesh
1115  .seealso: DMPlexLandauCreateVelocitySpace(), DMPlexLandauAddMaxwellians()
1116  */
1117 static PetscErrorCode LandauSetInitialCondition(DM dm, Vec X, PetscInt grid, PetscInt b_id, void *actx)
1118 {
1119   LandauCtx        *ctx = (LandauCtx*)actx;
1120   PetscErrorCode ierr;
1121   PetscFunctionBegin;
1122   if (!ctx) { ierr = DMGetApplicationContext(dm, &ctx);CHKERRQ(ierr); }
1123   ierr = VecZeroEntries(X);CHKERRQ(ierr);
1124   ierr = DMPlexLandauAddMaxwellians(dm, X, 0.0, ctx->thermal_temps, ctx->n, grid, b_id, ctx);CHKERRQ(ierr);
1125   PetscFunctionReturn(0);
1126 }
1127 
1128 // adapt a level once. Forest in/out
1129 static PetscErrorCode adaptToleranceFEM(PetscFE fem, Vec sol, PetscInt type, PetscInt grid, LandauCtx *ctx, DM *newForest)
1130 {
1131   DM               forest, plex, adaptedDM = NULL;
1132   PetscDS          prob;
1133   PetscBool        isForest;
1134   PetscQuadrature  quad;
1135   PetscInt         Nq, *Nb, cStart, cEnd, c, dim, qj, k;
1136   DMLabel          adaptLabel = NULL;
1137   PetscErrorCode   ierr;
1138 
1139   PetscFunctionBegin;
1140   forest = ctx->plex[grid];
1141   ierr = DMCreateDS(forest);CHKERRQ(ierr);
1142   ierr = DMGetDS(forest, &prob);CHKERRQ(ierr);
1143   ierr = DMGetDimension(forest, &dim);CHKERRQ(ierr);
1144   ierr = DMIsForest(forest, &isForest);CHKERRQ(ierr);
1145   PetscCheck(isForest,ctx->comm,PETSC_ERR_ARG_WRONG,"! Forest");
1146   ierr = DMConvert(forest, DMPLEX, &plex);CHKERRQ(ierr);
1147   ierr = DMPlexGetHeightStratum(plex,0,&cStart,&cEnd);CHKERRQ(ierr);
1148   ierr = DMLabelCreate(PETSC_COMM_SELF,"adapt",&adaptLabel);CHKERRQ(ierr);
1149   ierr = PetscFEGetQuadrature(fem, &quad);CHKERRQ(ierr);
1150   ierr = PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, NULL);CHKERRQ(ierr);
1151   PetscCheckFalse(Nq >LANDAU_MAX_NQ,ctx->comm,PETSC_ERR_ARG_WRONG,"Order too high. Nq = %" PetscInt_FMT " > LANDAU_MAX_NQ (%" PetscInt_FMT ")",Nq,LANDAU_MAX_NQ);
1152   ierr = PetscDSGetDimensions(prob, &Nb);CHKERRQ(ierr);
1153   if (type==4) {
1154     for (c = cStart; c < cEnd; c++) {
1155       ierr = DMLabelSetValue(adaptLabel, c, DM_ADAPT_REFINE);CHKERRQ(ierr);
1156     }
1157     ierr = PetscInfo(sol, "Phase:%s: Uniform refinement\n","adaptToleranceFEM");CHKERRQ(ierr);
1158   } else if (type==2) {
1159     PetscInt  rCellIdx[8], eCellIdx[64], iCellIdx[64], eMaxIdx = -1, iMaxIdx = -1, nr = 0, nrmax = (dim==3) ? 8 : 2;
1160     PetscReal minRad = PETSC_INFINITY, r, eMinRad = PETSC_INFINITY, iMinRad = PETSC_INFINITY;
1161     for (c = 0; c < 64; c++) { eCellIdx[c] = iCellIdx[c] = -1; }
1162     for (c = cStart; c < cEnd; c++) {
1163       PetscReal    tt, v0[LANDAU_MAX_NQ*3], detJ[LANDAU_MAX_NQ];
1164       ierr = DMPlexComputeCellGeometryFEM(plex, c, quad, v0, NULL, NULL, detJ);CHKERRQ(ierr);
1165       for (qj = 0; qj < Nq; ++qj) {
1166         tt = PetscSqr(v0[dim*qj+0]) + PetscSqr(v0[dim*qj+1]) + PetscSqr(((dim==3) ? v0[dim*qj+2] : 0));
1167         r  = PetscSqrtReal(tt);
1168         if (r < minRad - PETSC_SQRT_MACHINE_EPSILON*10.) {
1169           minRad = r;
1170           nr     = 0;
1171           rCellIdx[nr++]= c;
1172           ierr = PetscInfo(sol, "\t\tPhase: adaptToleranceFEM Found first inner r=%e, cell %" PetscInt_FMT ", qp %" PetscInt_FMT "/%" PetscInt_FMT "\n", r, c, qj+1, Nq);CHKERRQ(ierr);
1173         } else if ((r-minRad) < PETSC_SQRT_MACHINE_EPSILON*100. && nr < nrmax) {
1174           for (k=0;k<nr;k++) if (c == rCellIdx[k]) break;
1175           if (k==nr) {
1176             rCellIdx[nr++]= c;
1177             ierr = PetscInfo(sol, "\t\t\tPhase: adaptToleranceFEM Found another inner r=%e, cell %" PetscInt_FMT ", qp %" PetscInt_FMT "/%" PetscInt_FMT ", d=%e\n", r, c, qj+1, Nq, r-minRad);CHKERRQ(ierr);
1178           }
1179         }
1180         if (ctx->sphere) {
1181           if ((tt=r-ctx->e_radius) > 0) {
1182             ierr = PetscInfo(sol, "\t\t\t %" PetscInt_FMT " cell r=%g\n",c,tt);CHKERRQ(ierr);
1183             if (tt < eMinRad - PETSC_SQRT_MACHINE_EPSILON*100.) {
1184               eMinRad = tt;
1185               eMaxIdx = 0;
1186               eCellIdx[eMaxIdx++] = c;
1187             } else if (eMaxIdx > 0 && (tt-eMinRad) <= PETSC_SQRT_MACHINE_EPSILON && c != eCellIdx[eMaxIdx-1]) {
1188               eCellIdx[eMaxIdx++] = c;
1189             }
1190           }
1191           if ((tt=r-ctx->i_radius[grid]) > 0) {
1192             if (tt < iMinRad - 1.e-5) {
1193               iMinRad = tt;
1194               iMaxIdx = 0;
1195               iCellIdx[iMaxIdx++] = c;
1196             } else if (iMaxIdx > 0 && (tt-iMinRad) <= PETSC_SQRT_MACHINE_EPSILON && c != iCellIdx[iMaxIdx-1]) {
1197               iCellIdx[iMaxIdx++] = c;
1198             }
1199           }
1200         }
1201       }
1202     }
1203     for (k=0;k<nr;k++) {
1204       ierr = DMLabelSetValue(adaptLabel, rCellIdx[k], DM_ADAPT_REFINE);CHKERRQ(ierr);
1205     }
1206     if (ctx->sphere) {
1207       for (c = 0; c < eMaxIdx; c++) {
1208         ierr = DMLabelSetValue(adaptLabel, eCellIdx[c], DM_ADAPT_REFINE);CHKERRQ(ierr);
1209         ierr = PetscInfo(sol, "\t\tPhase:%s: refine sphere e cell %" PetscInt_FMT " r=%g\n","adaptToleranceFEM",eCellIdx[c],eMinRad);CHKERRQ(ierr);
1210       }
1211       for (c = 0; c < iMaxIdx; c++) {
1212         ierr = DMLabelSetValue(adaptLabel, iCellIdx[c], DM_ADAPT_REFINE);CHKERRQ(ierr);
1213         ierr = PetscInfo(sol, "\t\tPhase:%s: refine sphere i cell %" PetscInt_FMT " r=%g\n","adaptToleranceFEM",iCellIdx[c],iMinRad);CHKERRQ(ierr);
1214       }
1215     }
1216     ierr = PetscInfo(sol, "Phase:%s: Adaptive refine origin cells %" PetscInt_FMT ",%" PetscInt_FMT " r=%g\n","adaptToleranceFEM",rCellIdx[0],rCellIdx[1],minRad);CHKERRQ(ierr);
1217   } else if (type==0 || type==1 || type==3) { /* refine along r=0 axis */
1218     PetscScalar  *coef = NULL;
1219     Vec          coords;
1220     PetscInt     csize,Nv,d,nz;
1221     DM           cdm;
1222     PetscSection cs;
1223     ierr = DMGetCoordinatesLocal(forest, &coords);CHKERRQ(ierr);
1224     ierr = DMGetCoordinateDM(forest, &cdm);CHKERRQ(ierr);
1225     ierr = DMGetLocalSection(cdm, &cs);CHKERRQ(ierr);
1226     for (c = cStart; c < cEnd; c++) {
1227       PetscInt doit = 0, outside = 0;
1228       ierr = DMPlexVecGetClosure(cdm, cs, coords, c, &csize, &coef);CHKERRQ(ierr);
1229       Nv = csize/dim;
1230       for (nz = d = 0; d < Nv; d++) {
1231         PetscReal z = PetscRealPart(coef[d*dim + (dim-1)]), x = PetscSqr(PetscRealPart(coef[d*dim + 0])) + ((dim==3) ? PetscSqr(PetscRealPart(coef[d*dim + 1])) : 0);
1232         x = PetscSqrtReal(x);
1233         if (x < PETSC_MACHINE_EPSILON*10. && PetscAbs(z)<PETSC_MACHINE_EPSILON*10.) doit = 1;             /* refine origin */
1234         else if (type==0 && (z < -PETSC_MACHINE_EPSILON*10. || z > ctx->re_radius+PETSC_MACHINE_EPSILON*10.)) outside++;   /* first pass don't refine bottom */
1235         else if (type==1 && (z > ctx->vperp0_radius1 || z < -ctx->vperp0_radius1)) outside++; /* don't refine outside electron refine radius */
1236         else if (type==3 && (z > ctx->vperp0_radius2 || z < -ctx->vperp0_radius2)) outside++; /* don't refine outside ion refine radius */
1237         if (x < PETSC_MACHINE_EPSILON*10.) nz++;
1238       }
1239       ierr = DMPlexVecRestoreClosure(cdm, cs, coords, c, &csize, &coef);CHKERRQ(ierr);
1240       if (doit || (outside<Nv && nz)) {
1241         ierr = DMLabelSetValue(adaptLabel, c, DM_ADAPT_REFINE);CHKERRQ(ierr);
1242       }
1243     }
1244     ierr = PetscInfo(sol, "Phase:%s: RE refinement\n","adaptToleranceFEM");CHKERRQ(ierr);
1245   }
1246   ierr = DMDestroy(&plex);CHKERRQ(ierr);
1247   ierr = DMAdaptLabel(forest, adaptLabel, &adaptedDM);CHKERRQ(ierr);
1248   ierr = DMLabelDestroy(&adaptLabel);CHKERRQ(ierr);
1249   *newForest = adaptedDM;
1250   if (adaptedDM) {
1251     if (isForest) {
1252       ierr = DMForestSetAdaptivityForest(adaptedDM,NULL);CHKERRQ(ierr); // ????
1253     } else exit(33); // ???????
1254     ierr = DMConvert(adaptedDM, DMPLEX, &plex);CHKERRQ(ierr);
1255     ierr = DMPlexGetHeightStratum(plex,0,&cStart,&cEnd);CHKERRQ(ierr);
1256     ierr = PetscInfo(sol, "\tPhase: adaptToleranceFEM: %" PetscInt_FMT " cells, %" PetscInt_FMT " total quadrature points\n",cEnd-cStart,Nq*(cEnd-cStart));CHKERRQ(ierr);
1257     ierr = DMDestroy(&plex);CHKERRQ(ierr);
1258   } else *newForest = NULL;
1259   PetscFunctionReturn(0);
1260 }
1261 
1262 // forest goes in (ctx->plex[grid]), plex comes out
1263 static PetscErrorCode adapt(PetscInt grid, LandauCtx *ctx, Vec *uu)
1264 {
1265   PetscErrorCode  ierr;
1266   PetscInt        adaptIter;
1267 
1268   PetscFunctionBegin;
1269   PetscInt  type, limits[5] = {(grid==0) ? ctx->numRERefine : 0, (grid==0) ? ctx->nZRefine1 : 0, ctx->numAMRRefine[grid], (grid==0) ? ctx->nZRefine2 : 0,ctx->postAMRRefine[grid]};
1270   for (type=0;type<5;type++) {
1271     for (adaptIter = 0; adaptIter<limits[type];adaptIter++) {
1272       DM  newForest = NULL;
1273       ierr = adaptToleranceFEM(ctx->fe[0], *uu, type, grid, ctx, &newForest);CHKERRQ(ierr);
1274       if (newForest)  {
1275         ierr = DMDestroy(&ctx->plex[grid]);CHKERRQ(ierr);
1276         ierr = VecDestroy(uu);CHKERRQ(ierr);
1277         ierr = DMCreateGlobalVector(newForest,uu);CHKERRQ(ierr);
1278         ierr = PetscObjectSetName((PetscObject) *uu, "uAMR");CHKERRQ(ierr);
1279         ierr = LandauSetInitialCondition(newForest, *uu, grid, 0, ctx);CHKERRQ(ierr);
1280         ctx->plex[grid] = newForest;
1281       } else {
1282         exit(4); // can happen with no AMR and post refinement
1283       }
1284     }
1285   }
1286   PetscFunctionReturn(0);
1287 }
1288 
1289 static PetscErrorCode ProcessOptions(LandauCtx *ctx, const char prefix[])
1290 {
1291   PetscErrorCode    ierr;
1292   PetscBool         flg, sph_flg;
1293   PetscInt          ii,nt,nm,nc,num_species_grid[LANDAU_MAX_GRIDS];
1294   PetscReal         v0_grid[LANDAU_MAX_GRIDS];
1295   DM                dummy;
1296 
1297   PetscFunctionBegin;
1298   ierr = DMCreate(ctx->comm,&dummy);CHKERRQ(ierr);
1299   /* get options - initialize context */
1300   ctx->verbose = 1; // should be 0 for silent compliance
1301 #if defined(PETSC_HAVE_THREADSAFETY)
1302   ctx->batch_sz = PetscNumOMPThreads;
1303 #else
1304   ctx->batch_sz = 1;
1305 #endif
1306   ctx->batch_view_idx = 0;
1307   ctx->interpolate    = PETSC_TRUE;
1308   ctx->gpu_assembly   = PETSC_TRUE;
1309   ctx->aux_bool       = PETSC_FALSE;
1310   ctx->electronShift  = 0;
1311   ctx->M              = NULL;
1312   ctx->J              = NULL;
1313   /* geometry and grids */
1314   ctx->sphere         = PETSC_FALSE;
1315   ctx->inflate        = PETSC_FALSE;
1316   ctx->aux_bool       = PETSC_FALSE;
1317   ctx->use_p4est      = PETSC_FALSE;
1318   ctx->num_sections   = 3; /* 2, 3 or 4 */
1319   for (PetscInt grid=0;grid<LANDAU_MAX_GRIDS;grid++) {
1320     ctx->radius[grid]           = 5.; /* thermal radius (velocity) */
1321     ctx->numAMRRefine[grid]     = 5;
1322     ctx->postAMRRefine[grid]    = 0;
1323     ctx->species_offset[grid+1] = 1; // one species default
1324     num_species_grid[grid]      = 0;
1325     ctx->plex[grid] = NULL;     /* cache as expensive to Convert */
1326   }
1327   ctx->species_offset[0] = 0;
1328   ctx->re_radius         = 0.;
1329   ctx->vperp0_radius1    = 0;
1330   ctx->vperp0_radius2    = 0;
1331   ctx->nZRefine1         = 0;
1332   ctx->nZRefine2         = 0;
1333   ctx->numRERefine       = 0;
1334   num_species_grid[0]    = 1; // one species default
1335   /* species - [0] electrons, [1] one ion species eg, duetarium, [2] heavy impurity ion, ... */
1336   ctx->charges[0]        = -1;  /* electron charge (MKS) */
1337   ctx->masses[0]         = 1/1835.469965278441013; /* temporary value in proton mass */
1338   ctx->n[0]              = 1;
1339   ctx->v_0               = 1; /* thermal velocity, we could start with a scale != 1 */
1340   ctx->thermal_temps[0]  = 1;
1341   /* constants, etc. */
1342   ctx->epsilon0          = 8.8542e-12; /* permittivity of free space (MKS) F/m */
1343   ctx->k                 = 1.38064852e-23; /* Boltzmann constant (MKS) J/K */
1344   ctx->lnLam             = 10;         /* cross section ratio large - small angle collisions */
1345   ctx->n_0               = 1.e20;        /* typical plasma n, but could set it to 1 */
1346   ctx->Ez                = 0;
1347   for (PetscInt grid=0;grid<LANDAU_NUM_TIMERS;grid++) ctx->times[grid] = 0;
1348   ctx->use_matrix_mass   =  PETSC_FALSE;
1349   ctx->use_relativistic_corrections = PETSC_FALSE;
1350   ctx->use_energy_tensor_trick      = PETSC_FALSE; /* Use Eero's trick for energy conservation v --> grad(v^2/2) */
1351   ctx->SData_d.w         = NULL;
1352   ctx->SData_d.x         = NULL;
1353   ctx->SData_d.y         = NULL;
1354   ctx->SData_d.z         = NULL;
1355   ctx->SData_d.invJ      = NULL;
1356   ctx->jacobian_field_major_order     = PETSC_FALSE;
1357   ctx->SData_d.coo_elem_offsets       = NULL;
1358   ctx->SData_d.coo_elem_point_offsets = NULL;
1359   ctx->coo_assembly                   = PETSC_FALSE;
1360   ctx->SData_d.coo_elem_fullNb        = NULL;
1361   ctx->SData_d.coo_size               = 0;
1362   ierr = PetscOptionsBegin(ctx->comm, prefix, "Options for Fokker-Plank-Landau collision operator", "none");CHKERRQ(ierr);
1363   {
1364     char opstring[256];
1365 #if defined(PETSC_HAVE_KOKKOS_KERNELS)
1366     ctx->deviceType = LANDAU_KOKKOS;
1367     ierr = PetscStrcpy(opstring,"kokkos");CHKERRQ(ierr);
1368 #elif defined(PETSC_HAVE_CUDA)
1369     ctx->deviceType = LANDAU_CUDA;
1370     ierr = PetscStrcpy(opstring,"cuda");CHKERRQ(ierr);
1371 #else
1372     ctx->deviceType = LANDAU_CPU;
1373     ierr = PetscStrcpy(opstring,"cpu");CHKERRQ(ierr);
1374 #endif
1375     ierr = PetscOptionsString("-dm_landau_device_type","Use kernels on 'cpu', 'cuda', or 'kokkos'","plexland.c",opstring,opstring,sizeof(opstring),NULL);CHKERRQ(ierr);
1376     ierr = PetscStrcmp("cpu",opstring,&flg);CHKERRQ(ierr);
1377     if (flg) {
1378       ctx->deviceType = LANDAU_CPU;
1379     } else {
1380       ierr = PetscStrcmp("cuda",opstring,&flg);CHKERRQ(ierr);
1381       if (flg) {
1382         ctx->deviceType = LANDAU_CUDA;
1383       } else {
1384         ierr = PetscStrcmp("kokkos",opstring,&flg);CHKERRQ(ierr);
1385         if (flg) ctx->deviceType = LANDAU_KOKKOS;
1386         else SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_device_type %s",opstring);
1387       }
1388     }
1389   }
1390   ierr = PetscOptionsReal("-dm_landau_electron_shift","Shift in thermal velocity of electrons","none",ctx->electronShift,&ctx->electronShift, NULL);CHKERRQ(ierr);
1391   ierr = PetscOptionsInt("-dm_landau_verbose", "Level of verbosity output", "plexland.c", ctx->verbose, &ctx->verbose, NULL);CHKERRQ(ierr);
1392   ierr = PetscOptionsInt("-dm_landau_batch_size", "Number of 'vertices' to batch", "ex2.c", ctx->batch_sz, &ctx->batch_sz, NULL);CHKERRQ(ierr);
1393   PetscCheck(LANDAU_MAX_BATCH_SZ >= ctx->batch_sz,ctx->comm,PETSC_ERR_ARG_WRONG,"LANDAU_MAX_BATCH_SZ %" PetscInt_FMT " < ctx->batch_sz %" PetscInt_FMT,LANDAU_MAX_BATCH_SZ,ctx->batch_sz);
1394   ierr = PetscOptionsInt("-dm_landau_batch_view_idx", "Index of batch for diagnostics like plotting", "ex2.c", ctx->batch_view_idx, &ctx->batch_view_idx, NULL);CHKERRQ(ierr);
1395   PetscCheck(ctx->batch_view_idx < ctx->batch_sz,ctx->comm,PETSC_ERR_ARG_WRONG,"-ctx->batch_view_idx %" PetscInt_FMT " > ctx->batch_sz %" PetscInt_FMT,ctx->batch_view_idx,ctx->batch_sz);
1396   ierr = PetscOptionsReal("-dm_landau_Ez","Initial parallel electric field in unites of Conner-Hastie critical field","plexland.c",ctx->Ez,&ctx->Ez, NULL);CHKERRQ(ierr);
1397   ierr = PetscOptionsReal("-dm_landau_n_0","Normalization constant for number density","plexland.c",ctx->n_0,&ctx->n_0, NULL);CHKERRQ(ierr);
1398   ierr = PetscOptionsReal("-dm_landau_ln_lambda","Cross section parameter","plexland.c",ctx->lnLam,&ctx->lnLam, NULL);CHKERRQ(ierr);
1399   ierr = PetscOptionsBool("-dm_landau_use_mataxpy_mass", "Use fast but slightly fragile MATAXPY to add mass term", "plexland.c", ctx->use_matrix_mass, &ctx->use_matrix_mass, NULL);CHKERRQ(ierr);
1400   ierr = PetscOptionsBool("-dm_landau_use_relativistic_corrections", "Use relativistic corrections", "plexland.c", ctx->use_relativistic_corrections, &ctx->use_relativistic_corrections, NULL);CHKERRQ(ierr);
1401   ierr = PetscOptionsBool("-dm_landau_use_energy_tensor_trick", "Use Eero's trick of using grad(v^2/2) instead of v as args to Landau tensor to conserve energy with relativistic corrections and Q1 elements", "plexland.c", ctx->use_energy_tensor_trick, &ctx->use_energy_tensor_trick, NULL);CHKERRQ(ierr);
1402 
1403   /* get num species with temperature, set defaults */
1404   for (ii=1;ii<LANDAU_MAX_SPECIES;ii++) {
1405     ctx->thermal_temps[ii] = 1;
1406     ctx->charges[ii]       = 1;
1407     ctx->masses[ii]        = 1;
1408     ctx->n[ii]             = 1;
1409   }
1410   nt = LANDAU_MAX_SPECIES;
1411   ierr = PetscOptionsRealArray("-dm_landau_thermal_temps", "Temperature of each species [e,i_0,i_1,...] in keV (must be set to set number of species)", "plexland.c", ctx->thermal_temps, &nt, &flg);CHKERRQ(ierr);
1412   if (flg) {
1413     ierr = PetscInfo(dummy, "num_species set to number of thermal temps provided (%" PetscInt_FMT ")\n",nt);CHKERRQ(ierr);
1414     ctx->num_species = nt;
1415   } else SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_thermal_temps ,t1,t2,.. must be provided to set the number of species");
1416   for (ii=0;ii<ctx->num_species;ii++) ctx->thermal_temps[ii] *= 1.1604525e7; /* convert to Kelvin */
1417   nm = LANDAU_MAX_SPECIES-1;
1418   ierr = PetscOptionsRealArray("-dm_landau_ion_masses", "Mass of each species in units of proton mass [i_0=2,i_1=40...]", "plexland.c", &ctx->masses[1], &nm, &flg);CHKERRQ(ierr);
1419   if (flg && nm != ctx->num_species-1) {
1420     SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"num ion masses %" PetscInt_FMT " != num species %" PetscInt_FMT "",nm,ctx->num_species-1);
1421   }
1422   nm = LANDAU_MAX_SPECIES;
1423   ierr = PetscOptionsRealArray("-dm_landau_n", "Number density of each species = n_s * n_0", "plexland.c", ctx->n, &nm, &flg);CHKERRQ(ierr);
1424   PetscCheckFalse(flg && nm != ctx->num_species,ctx->comm,PETSC_ERR_ARG_WRONG,"wrong num n: %" PetscInt_FMT " != num species %" PetscInt_FMT "",nm,ctx->num_species);
1425   for (ii=0;ii<LANDAU_MAX_SPECIES;ii++) ctx->masses[ii] *= 1.6720e-27; /* scale by proton mass kg */
1426   ctx->masses[0] = 9.10938356e-31; /* electron mass kg (should be about right already) */
1427   ctx->m_0 = ctx->masses[0]; /* arbitrary reference mass, electrons */
1428   nc = LANDAU_MAX_SPECIES-1;
1429   ierr = PetscOptionsRealArray("-dm_landau_ion_charges", "Charge of each species in units of proton charge [i_0=2,i_1=18,...]", "plexland.c", &ctx->charges[1], &nc, &flg);CHKERRQ(ierr);
1430   if (flg) PetscCheck(nc == ctx->num_species-1,ctx->comm,PETSC_ERR_ARG_WRONG,"num charges %" PetscInt_FMT " != num species %" PetscInt_FMT,nc,ctx->num_species-1);
1431   for (ii=0;ii<LANDAU_MAX_SPECIES;ii++) ctx->charges[ii] *= 1.6022e-19; /* electron/proton charge (MKS) */
1432   /* geometry and grids */
1433   nt = LANDAU_MAX_GRIDS;
1434   ierr = PetscOptionsIntArray("-dm_landau_num_species_grid","Number of species on each grid: [ 1, ....] or [S, 0 ....] for single grid","plexland.c", num_species_grid, &nt, &flg);CHKERRQ(ierr);
1435   if (flg) {
1436     ctx->num_grids = nt;
1437     for (ii=nt=0;ii<ctx->num_grids;ii++) nt += num_species_grid[ii];
1438     PetscCheck(ctx->num_species == nt,ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_num_species_grid: sum %" PetscInt_FMT " != num_species = %" PetscInt_FMT ". %" PetscInt_FMT " grids (check that number of grids <= LANDAU_MAX_GRIDS = %" PetscInt_FMT ")",nt,ctx->num_species,ctx->num_grids,LANDAU_MAX_GRIDS);
1439   } else {
1440     ctx->num_grids = 1; // go back to a single grid run
1441     num_species_grid[0] = ctx->num_species;
1442   }
1443   for (ctx->species_offset[0] = ii = 0; ii < ctx->num_grids ; ii++) ctx->species_offset[ii+1] = ctx->species_offset[ii] + num_species_grid[ii];
1444   PetscCheck(ctx->species_offset[ctx->num_grids] == ctx->num_species,ctx->comm,PETSC_ERR_ARG_WRONG,"ctx->species_offset[ctx->num_grids] %" PetscInt_FMT " != ctx->num_species = %" PetscInt_FMT " ???????????",ctx->species_offset[ctx->num_grids],ctx->num_species);
1445   for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) {
1446     int iii = ctx->species_offset[grid]; // normalize with first (arbitrary) species on grid
1447     v0_grid[grid] = PetscSqrtReal(ctx->k*ctx->thermal_temps[iii]/ctx->masses[iii]); /* arbitrary units for non-dimensionalization: mean velocity in 1D of first species on grid */
1448   }
1449   ii = 0;
1450   ierr = PetscOptionsInt("-dm_landau_v0_grid", "Index of grid to use for setting v_0 (electrons are default). Not recommended to change", "plexland.c", ii, &ii, NULL);CHKERRQ(ierr);
1451   ctx->v_0 = v0_grid[ii]; /* arbitrary units for non dimensionalization: global mean velocity in 1D of electrons */
1452   ctx->t_0 = 8*PETSC_PI*PetscSqr(ctx->epsilon0*ctx->m_0/PetscSqr(ctx->charges[0]))/ctx->lnLam/ctx->n_0*PetscPowReal(ctx->v_0,3); /* note, this t_0 makes nu[0,0]=1 */
1453   /* domain */
1454   nt = LANDAU_MAX_GRIDS;
1455   ierr = PetscOptionsRealArray("-dm_landau_domain_radius","Phase space size in units of thermal velocity of grid","plexland.c",ctx->radius,&nt, &flg);CHKERRQ(ierr);
1456   if (flg) PetscCheck(nt >= ctx->num_grids,ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_domain_radius: given %" PetscInt_FMT " radius != number grids %" PetscInt_FMT,nt,ctx->num_grids);
1457   for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) {
1458     if (flg && ctx->radius[grid] <= 0) { /* negative is ratio of c */
1459       if (ctx->radius[grid] == 0) ctx->radius[grid] = 0.75;
1460       else ctx->radius[grid] = -ctx->radius[grid];
1461       ctx->radius[grid] = ctx->radius[grid]*SPEED_OF_LIGHT/ctx->v_0; // use any species on grid to normalize (v_0 same for all on grid)
1462       ierr = PetscInfo(dummy, "Change domain radius to %g for grid %" PetscInt_FMT "\n",ctx->radius[grid],grid);CHKERRQ(ierr);
1463     }
1464     ctx->radius[grid] *= v0_grid[grid]/ctx->v_0; // scale domain by thermal radius relative to v_0
1465   }
1466   /* amr parametres */
1467   nt = LANDAU_MAX_GRIDS;
1468   ierr = PetscOptionsIntArray("-dm_landau_amr_levels_max", "Number of AMR levels of refinement around origin, after (RE) refinements along z", "plexland.c", ctx->numAMRRefine, &nt, &flg);CHKERRQ(ierr);
1469   PetscCheckFalse(flg && nt < ctx->num_grids,ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_amr_levels_max: given %" PetscInt_FMT " != number grids %" PetscInt_FMT,nt,ctx->num_grids);
1470   nt = LANDAU_MAX_GRIDS;
1471   ierr = PetscOptionsIntArray("-dm_landau_amr_post_refine", "Number of levels to uniformly refine after AMR", "plexland.c", ctx->postAMRRefine, &nt, &flg);CHKERRQ(ierr);
1472   for (ii=1;ii<ctx->num_grids;ii++)  ctx->postAMRRefine[ii] = ctx->postAMRRefine[0]; // all grids the same now
1473   ierr = PetscOptionsInt("-dm_landau_amr_re_levels", "Number of levels to refine along v_perp=0, z>0", "plexland.c", ctx->numRERefine, &ctx->numRERefine, &flg);CHKERRQ(ierr);
1474   ierr = PetscOptionsInt("-dm_landau_amr_z_refine1",  "Number of levels to refine along v_perp=0", "plexland.c", ctx->nZRefine1, &ctx->nZRefine1, &flg);CHKERRQ(ierr);
1475   ierr = PetscOptionsInt("-dm_landau_amr_z_refine2",  "Number of levels to refine along v_perp=0", "plexland.c", ctx->nZRefine2, &ctx->nZRefine2, &flg);CHKERRQ(ierr);
1476   ierr = PetscOptionsReal("-dm_landau_re_radius","velocity range to refine on positive (z>0) r=0 axis for runaways","plexland.c",ctx->re_radius,&ctx->re_radius, &flg);CHKERRQ(ierr);
1477   ierr = PetscOptionsReal("-dm_landau_z_radius1","velocity range to refine r=0 axis (for electrons)","plexland.c",ctx->vperp0_radius1,&ctx->vperp0_radius1, &flg);CHKERRQ(ierr);
1478   ierr = PetscOptionsReal("-dm_landau_z_radius2","velocity range to refine r=0 axis (for ions) after origin AMR","plexland.c",ctx->vperp0_radius2, &ctx->vperp0_radius2, &flg);CHKERRQ(ierr);
1479   /* spherical domain (not used) */
1480   ierr = PetscOptionsInt("-dm_landau_num_sections", "Number of tangential section in (2D) grid, 2, 3, of 4", "plexland.c", ctx->num_sections, &ctx->num_sections, NULL);CHKERRQ(ierr);
1481   ierr = PetscOptionsBool("-dm_landau_sphere", "use sphere/semi-circle domain instead of rectangle", "plexland.c", ctx->sphere, &ctx->sphere, &sph_flg);CHKERRQ(ierr);
1482   ierr = PetscOptionsBool("-dm_landau_inflate", "With sphere, inflate for curved edges", "plexland.c", ctx->inflate, &ctx->inflate, &flg);CHKERRQ(ierr);
1483   ierr = PetscOptionsReal("-dm_landau_e_radius","Electron thermal velocity, used for circular meshes","plexland.c",ctx->e_radius, &ctx->e_radius, &flg);CHKERRQ(ierr);
1484   if (flg && !sph_flg) ctx->sphere = PETSC_TRUE; /* you gave me an e radius but did not set sphere, user error really */
1485   if (!flg) {
1486     ctx->e_radius = 1.5*PetscSqrtReal(8*ctx->k*ctx->thermal_temps[0]/ctx->masses[0]/PETSC_PI)/ctx->v_0;
1487   }
1488   nt = LANDAU_MAX_GRIDS;
1489   ierr = PetscOptionsRealArray("-dm_landau_i_radius","Ion thermal velocity, used for circular meshes","plexland.c",ctx->i_radius, &nt, &flg);CHKERRQ(ierr);
1490   if (flg && !sph_flg) ctx->sphere = PETSC_TRUE;
1491   if (!flg) {
1492     ctx->i_radius[0] = 1.5*PetscSqrtReal(8*ctx->k*ctx->thermal_temps[1]/ctx->masses[1]/PETSC_PI)/ctx->v_0; // need to correct for ion grid domain
1493   }
1494   if (flg) PetscCheck(ctx->num_grids == nt,ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_i_radius: %" PetscInt_FMT " != num_species = %" PetscInt_FMT,nt,ctx->num_grids);
1495   if (ctx->sphere) PetscCheck(ctx->e_radius > ctx->i_radius[0],ctx->comm,PETSC_ERR_ARG_WRONG,"bad radii: %g < %g < %g",ctx->i_radius[0],ctx->e_radius,ctx->radius[0]);
1496   /* processing options */
1497   ierr = PetscOptionsBool("-dm_landau_gpu_assembly", "Assemble Jacobian on GPU", "plexland.c", ctx->gpu_assembly, &ctx->gpu_assembly, NULL);CHKERRQ(ierr);
1498   if (ctx->deviceType == LANDAU_CPU || ctx->deviceType == LANDAU_KOKKOS) { // make Kokkos
1499     ierr = PetscOptionsBool("-dm_landau_coo_assembly", "Assemble Jacobian with Kokkos on 'device'", "plexland.c", ctx->coo_assembly, &ctx->coo_assembly, NULL);CHKERRQ(ierr);
1500     if (ctx->coo_assembly) PetscCheck(ctx->gpu_assembly,ctx->comm,PETSC_ERR_ARG_WRONG,"COO assembly requires 'gpu assembly' even if Kokkos 'CPU' back-end %d",ctx->coo_assembly);
1501   }
1502   ierr = PetscOptionsBool("-dm_landau_jacobian_field_major_order", "Reorder Jacobian for GPU assembly with field major, or block diagonal, ordering", "plexland.c", ctx->jacobian_field_major_order, &ctx->jacobian_field_major_order, NULL);CHKERRQ(ierr);
1503   if (ctx->jacobian_field_major_order) PetscCheck(ctx->gpu_assembly,ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_jacobian_field_major_order requires -dm_landau_gpu_assembly");
1504   ierr = PetscOptionsEnd();CHKERRQ(ierr);
1505 
1506   for (ii=ctx->num_species;ii<LANDAU_MAX_SPECIES;ii++) ctx->masses[ii] = ctx->thermal_temps[ii]  = ctx->charges[ii] = 0;
1507   if (ctx->verbose > 0) {
1508     ierr = PetscPrintf(ctx->comm, "masses:        e=%10.3e; ions in proton mass units:   %10.3e %10.3e ...\n",ctx->masses[0],ctx->masses[1]/1.6720e-27,ctx->num_species>2 ? ctx->masses[2]/1.6720e-27 : 0);CHKERRQ(ierr);
1509     ierr = PetscPrintf(ctx->comm, "charges:       e=%10.3e; charges in elementary units: %10.3e %10.3e\n", ctx->charges[0],-ctx->charges[1]/ctx->charges[0],ctx->num_species>2 ? -ctx->charges[2]/ctx->charges[0] : 0);CHKERRQ(ierr);
1510     ierr = PetscPrintf(ctx->comm, "n:             e: %10.3e                           i: %10.3e %10.3e\n", ctx->n[0],ctx->n[1],ctx->num_species>2 ? ctx->n[2] : 0);CHKERRQ(ierr);
1511     ierr = PetscPrintf(ctx->comm, "thermal T (K): e=%10.3e i=%10.3e %10.3e. v_0=%10.3e (%10.3ec) n_0=%10.3e t_0=%10.3e, %s, %s, %" PetscInt_FMT " batched\n", ctx->thermal_temps[0], ctx->thermal_temps[1], (ctx->num_species>2) ? ctx->thermal_temps[2] : 0, ctx->v_0, ctx->v_0/SPEED_OF_LIGHT, ctx->n_0, ctx->t_0, ctx->use_relativistic_corrections ? "relativistic" : "classical", ctx->use_energy_tensor_trick ? "Use trick" : "Intuitive",ctx->batch_sz);CHKERRQ(ierr);
1512     ierr = PetscPrintf(ctx->comm, "Domain radius (AMR levels) grid %" PetscInt_FMT ": %10.3e (%" PetscInt_FMT ") ",0,ctx->radius[0],ctx->numAMRRefine[0]);CHKERRQ(ierr);
1513     for (ii=1;ii<ctx->num_grids;ii++) {ierr = PetscPrintf(ctx->comm, ", %" PetscInt_FMT ": %10.3e (%" PetscInt_FMT ") ",ii,ctx->radius[ii],ctx->numAMRRefine[ii]);CHKERRQ(ierr);}
1514     ierr = PetscPrintf(ctx->comm,"\n");CHKERRQ(ierr);
1515     if (ctx->jacobian_field_major_order) {
1516       ierr = PetscPrintf(ctx->comm,"Using field major order for GPU Jacobian\n");CHKERRQ(ierr);
1517     } else {
1518       ierr = PetscPrintf(ctx->comm,"Using default Plex order for all matrices\n");CHKERRQ(ierr);
1519     }
1520   }
1521   ierr = DMDestroy(&dummy);CHKERRQ(ierr);
1522   {
1523     PetscMPIInt    rank;
1524     ierr = MPI_Comm_rank(ctx->comm, &rank);CHKERRMPI(ierr);
1525     ctx->stage = 0;
1526     ierr = PetscLogEventRegister("Landau Create", DM_CLASSID, &ctx->events[13]);CHKERRQ(ierr); /* 13 */
1527     ierr = PetscLogEventRegister(" GPU ass. setup", DM_CLASSID, &ctx->events[2]);CHKERRQ(ierr); /* 2 */
1528     ierr = PetscLogEventRegister(" Build matrix", DM_CLASSID, &ctx->events[12]);CHKERRQ(ierr); /* 12 */
1529     ierr = PetscLogEventRegister(" Assembly maps", DM_CLASSID, &ctx->events[15]);CHKERRQ(ierr); /* 15 */
1530     ierr = PetscLogEventRegister("Landau Mass mat", DM_CLASSID, &ctx->events[14]);CHKERRQ(ierr); /* 14 */
1531     ierr = PetscLogEventRegister("Landau Operator", DM_CLASSID, &ctx->events[11]);CHKERRQ(ierr); /* 11 */
1532     ierr = PetscLogEventRegister("Landau Jacobian", DM_CLASSID, &ctx->events[0]);CHKERRQ(ierr); /* 0 */
1533     ierr = PetscLogEventRegister("Landau Mass", DM_CLASSID, &ctx->events[9]);CHKERRQ(ierr); /* 9 */
1534     ierr = PetscLogEventRegister(" Preamble", DM_CLASSID, &ctx->events[10]);CHKERRQ(ierr); /* 10 */
1535     ierr = PetscLogEventRegister(" static IP Data", DM_CLASSID, &ctx->events[7]);CHKERRQ(ierr); /* 7 */
1536     ierr = PetscLogEventRegister(" dynamic IP-Jac", DM_CLASSID, &ctx->events[1]);CHKERRQ(ierr); /* 1 */
1537     ierr = PetscLogEventRegister(" Kernel-init", DM_CLASSID, &ctx->events[3]);CHKERRQ(ierr); /* 3 */
1538     ierr = PetscLogEventRegister(" Jac-f-df (GPU)", DM_CLASSID, &ctx->events[8]);CHKERRQ(ierr); /* 8 */
1539     ierr = PetscLogEventRegister(" J Kernel (GPU)", DM_CLASSID, &ctx->events[4]);CHKERRQ(ierr); /* 4 */
1540     ierr = PetscLogEventRegister(" M Kernel (GPU)", DM_CLASSID, &ctx->events[16]);CHKERRQ(ierr); /* 16 */
1541     ierr = PetscLogEventRegister(" Copy to CPU", DM_CLASSID, &ctx->events[5]);CHKERRQ(ierr); /* 5 */
1542     ierr = PetscLogEventRegister(" CPU assemble", DM_CLASSID, &ctx->events[6]);CHKERRQ(ierr); /* 6 */
1543 
1544     if (rank) { /* turn off output stuff for duplicate runs - do we need to add the prefix to all this? */
1545       ierr = PetscOptionsClearValue(NULL,"-snes_converged_reason");CHKERRQ(ierr);
1546       ierr = PetscOptionsClearValue(NULL,"-ksp_converged_reason");CHKERRQ(ierr);
1547       ierr = PetscOptionsClearValue(NULL,"-snes_monitor");CHKERRQ(ierr);
1548       ierr = PetscOptionsClearValue(NULL,"-ksp_monitor");CHKERRQ(ierr);
1549       ierr = PetscOptionsClearValue(NULL,"-ts_monitor");CHKERRQ(ierr);
1550       ierr = PetscOptionsClearValue(NULL,"-ts_view");CHKERRQ(ierr);
1551       ierr = PetscOptionsClearValue(NULL,"-ts_adapt_monitor");CHKERRQ(ierr);
1552       ierr = PetscOptionsClearValue(NULL,"-dm_landau_amr_dm_view");CHKERRQ(ierr);
1553       ierr = PetscOptionsClearValue(NULL,"-dm_landau_amr_vec_view");CHKERRQ(ierr);
1554       ierr = PetscOptionsClearValue(NULL,"-dm_landau_mass_dm_view");CHKERRQ(ierr);
1555       ierr = PetscOptionsClearValue(NULL,"-dm_landau_mass_view");CHKERRQ(ierr);
1556       ierr = PetscOptionsClearValue(NULL,"-dm_landau_jacobian_view");CHKERRQ(ierr);
1557       ierr = PetscOptionsClearValue(NULL,"-dm_landau_mat_view");CHKERRQ(ierr);
1558       ierr = PetscOptionsClearValue(NULL,"-pc_bjkokkos_ksp_converged_reason");CHKERRQ(ierr);
1559       ierr = PetscOptionsClearValue(NULL,"-pc_bjkokkos_ksp_monitor");CHKERRQ(ierr);
1560       ierr = PetscOptionsClearValue(NULL,"-");CHKERRQ(ierr);
1561       ierr = PetscOptionsClearValue(NULL,"-info");CHKERRQ(ierr);
1562     }
1563   }
1564   PetscFunctionReturn(0);
1565 }
1566 
1567 static PetscErrorCode CreateStaticGPUData(PetscInt dim, IS grid_batch_is_inv[], LandauCtx *ctx)
1568 {
1569   PetscErrorCode    ierr;
1570   PetscSection      section[LANDAU_MAX_GRIDS],globsection[LANDAU_MAX_GRIDS];
1571   PetscQuadrature   quad;
1572   const PetscReal   *quadWeights;
1573   PetscInt          numCells[LANDAU_MAX_GRIDS],Nq,Nf[LANDAU_MAX_GRIDS], ncellsTot=0;
1574   PetscTabulation   *Tf;
1575   PetscDS           prob;
1576 
1577   PetscFunctionBegin;
1578   ierr = DMGetDS(ctx->plex[0], &prob);CHKERRQ(ierr); // same DS for all grids
1579   ierr = PetscDSGetTabulation(prob, &Tf);CHKERRQ(ierr); // Bf, &Df same for all grids
1580   /* DS, Tab and quad is same on all grids */
1581   PetscCheck(ctx->plex[0],ctx->comm,PETSC_ERR_ARG_WRONG,"Plex not created");
1582   ierr = PetscFEGetQuadrature(ctx->fe[0], &quad);CHKERRQ(ierr);
1583   ierr = PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL,  &quadWeights);CHKERRQ(ierr);
1584   PetscCheck(Nq <= LANDAU_MAX_NQ,ctx->comm,PETSC_ERR_ARG_WRONG,"Order too high. Nq = %" PetscInt_FMT " > LANDAU_MAX_NQ (%" PetscInt_FMT ")",Nq,LANDAU_MAX_NQ);
1585   /* setup each grid */
1586   for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
1587     PetscInt cStart, cEnd;
1588     PetscCheckFalse(ctx->plex[grid] == NULL,ctx->comm,PETSC_ERR_ARG_WRONG,"Plex not created");
1589     ierr = DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd);CHKERRQ(ierr);
1590     numCells[grid] = cEnd - cStart; // grids can have different topology
1591     ierr = DMGetLocalSection(ctx->plex[grid], &section[grid]);CHKERRQ(ierr);
1592     ierr = DMGetGlobalSection(ctx->plex[grid], &globsection[grid]);CHKERRQ(ierr);
1593     ierr = PetscSectionGetNumFields(section[grid], &Nf[grid]);CHKERRQ(ierr);
1594     ncellsTot += numCells[grid];
1595   }
1596 #define MAP_BF_SIZE (64*LANDAU_DIM*LANDAU_DIM*LANDAU_MAX_Q_FACE*LANDAU_MAX_SPECIES)
1597   /* create GPU assembly data */
1598   if (ctx->gpu_assembly) { /* we need GPU object with GPU assembly */
1599     PetscContainer          container;
1600     PetscScalar             elemMatrix[LANDAU_MAX_NQ*LANDAU_MAX_NQ*LANDAU_MAX_SPECIES*LANDAU_MAX_SPECIES], *elMat;
1601     pointInterpolationP4est pointMaps[MAP_BF_SIZE][LANDAU_MAX_Q_FACE];
1602     P4estVertexMaps         *maps;
1603     const PetscInt          *plex_batch=NULL,Nb=Nq; // tensor elements;
1604     LandauIdx               *coo_elem_offsets=NULL, *coo_elem_fullNb=NULL, (*coo_elem_point_offsets)[LANDAU_MAX_NQ+1] = NULL;
1605     /* create GPU asssembly data */
1606     ierr = PetscInfo(ctx->plex[0], "Make GPU maps %d\n",1);CHKERRQ(ierr);
1607     ierr = PetscLogEventBegin(ctx->events[2],0,0,0,0);CHKERRQ(ierr);
1608     ierr = PetscMalloc(sizeof(*maps)*ctx->num_grids, &maps);CHKERRQ(ierr);
1609 
1610     if (ctx->coo_assembly) { // setup COO assembly -- put COO metadata directly in ctx->SData_d
1611       ierr = PetscMalloc3(ncellsTot+1,&coo_elem_offsets,ncellsTot,&coo_elem_fullNb,ncellsTot, &coo_elem_point_offsets);CHKERRQ(ierr); // array of integer pointers
1612       coo_elem_offsets[0] = 0; // finish later
1613       ierr = PetscInfo(ctx->plex[0], "COO initialization, %" PetscInt_FMT " cells\n",ncellsTot);CHKERRQ(ierr);
1614       ctx->SData_d.coo_n_cellsTot         = ncellsTot;
1615       ctx->SData_d.coo_elem_offsets       = (void*)coo_elem_offsets;
1616       ctx->SData_d.coo_elem_fullNb        = (void*)coo_elem_fullNb;
1617       ctx->SData_d.coo_elem_point_offsets = (void*)coo_elem_point_offsets;
1618     } else {
1619       ctx->SData_d.coo_elem_offsets       = ctx->SData_d.coo_elem_fullNb = NULL;
1620       ctx->SData_d.coo_elem_point_offsets = NULL;
1621       ctx->SData_d.coo_n_cellsTot         = 0;
1622     }
1623 
1624     ctx->SData_d.coo_max_fullnb = 0;
1625     for (PetscInt grid=0,glb_elem_idx=0;grid<ctx->num_grids;grid++) {
1626       PetscInt cStart, cEnd, Nfloc = Nf[grid], totDim = Nfloc*Nq;
1627       if (grid_batch_is_inv[grid]) {
1628         ierr = ISGetIndices(grid_batch_is_inv[grid], &plex_batch);CHKERRQ(ierr);
1629       }
1630       ierr = DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd);CHKERRQ(ierr);
1631       // make maps
1632       maps[grid].d_self       = NULL;
1633       maps[grid].num_elements = numCells[grid];
1634       maps[grid].num_face = (PetscInt)(pow(Nq,1./((double)dim))+.001); // Q
1635       maps[grid].num_face = (PetscInt)(pow(maps[grid].num_face,(double)(dim-1))+.001); // Q^2
1636       maps[grid].num_reduced  = 0;
1637       maps[grid].deviceType   = ctx->deviceType;
1638       maps[grid].numgrids     = ctx->num_grids;
1639       // count reduced and get
1640       ierr = PetscMalloc(maps[grid].num_elements * sizeof(*maps[grid].gIdx), &maps[grid].gIdx);CHKERRQ(ierr);
1641       for (int ej = cStart, eidx = 0 ; ej < cEnd; ++ej, ++eidx, glb_elem_idx++) {
1642         if (coo_elem_offsets) coo_elem_offsets[glb_elem_idx+1] = coo_elem_offsets[glb_elem_idx]; // start with last one, then add
1643         for (int fieldA=0;fieldA<Nf[grid];fieldA++) {
1644           int fullNb = 0;
1645           for (int q = 0; q < Nb; ++q) {
1646             PetscInt    numindices,*indices;
1647             PetscScalar *valuesOrig = elMat = elemMatrix;
1648             ierr = PetscArrayzero(elMat, totDim*totDim);CHKERRQ(ierr);
1649             elMat[ (fieldA*Nb + q)*totDim + fieldA*Nb + q] = 1;
1650             ierr = DMPlexGetClosureIndices(ctx->plex[grid], section[grid], globsection[grid], ej, PETSC_TRUE, &numindices, &indices, NULL, (PetscScalar **) &elMat);CHKERRQ(ierr);
1651             for (PetscInt f = 0 ; f < numindices ; ++f) { // look for a non-zero on the diagonal
1652               if (PetscAbs(PetscRealPart(elMat[f*numindices + f])) > PETSC_MACHINE_EPSILON) {
1653                 // found it
1654                 if (PetscAbs(PetscRealPart(elMat[f*numindices + f] - 1.)) < PETSC_MACHINE_EPSILON) { // normal vertex 1.0
1655                   if (plex_batch) {
1656                     maps[grid].gIdx[eidx][fieldA][q] = (LandauIdx) plex_batch[indices[f]];
1657                   } else {
1658                     maps[grid].gIdx[eidx][fieldA][q] = (LandauIdx)indices[f];
1659                   }
1660                   fullNb++;
1661                 } else { //found a constraint
1662                   int       jj      = 0;
1663                   PetscReal sum     = 0;
1664                   const PetscInt ff = f;
1665                   maps[grid].gIdx[eidx][fieldA][q] = -maps[grid].num_reduced - 1; // store (-)index: id = -(idx+1): idx = -id - 1
1666 
1667                   do {  // constraints are continuous in Plex - exploit that here
1668                     int ii; // get 'scale'
1669                     for (ii = 0, pointMaps[maps[grid].num_reduced][jj].scale = 0; ii < maps[grid].num_face; ii++) { // sum row of outer product to recover vector value
1670                       if (ff + ii < numindices) { // 3D has Q and Q^2 interps so might run off end. We could test that elMat[f*numindices + ff + ii] > 0, and break if not
1671                         pointMaps[maps[grid].num_reduced][jj].scale += PetscRealPart(elMat[f*numindices + ff + ii]);
1672                       }
1673                     }
1674                     sum += pointMaps[maps[grid].num_reduced][jj].scale; // diagnostic
1675                     // get 'gid'
1676                     if (pointMaps[maps[grid].num_reduced][jj].scale == 0) pointMaps[maps[grid].num_reduced][jj].gid = -1; // 3D has Q and Q^2 interps
1677                     else {
1678                       if (plex_batch) {
1679                         pointMaps[maps[grid].num_reduced][jj].gid = plex_batch[indices[f]];
1680                       } else {
1681                         pointMaps[maps[grid].num_reduced][jj].gid = indices[f];
1682                       }
1683                       fullNb++;
1684                     }
1685                   } while (++jj < maps[grid].num_face && ++f < numindices); // jj is incremented if we hit the end
1686                   while (jj < maps[grid].num_face) {
1687                     pointMaps[maps[grid].num_reduced][jj].scale = 0;
1688                     pointMaps[maps[grid].num_reduced][jj].gid = -1;
1689                     jj++;
1690                   }
1691                   if (PetscAbs(sum-1.0) > 10*PETSC_MACHINE_EPSILON) { // debug
1692                     int       d,f;
1693                     PetscReal tmp = 0;
1694                     ierr = PetscPrintf(PETSC_COMM_SELF,"\t\t%" PetscInt_FMT ".%" PetscInt_FMT ".%" PetscInt_FMT ") ERROR total I = %22.16e (LANDAU_MAX_Q_FACE=%d, #face=%" PetscInt_FMT ")\n",eidx,q,fieldA,sum,LANDAU_MAX_Q_FACE,maps[grid].num_face);CHKERRQ(ierr);
1695                     for (d = 0, tmp = 0; d < numindices; ++d) {
1696                       if (tmp!=0 && PetscAbs(tmp-1.0) > 10*PETSC_MACHINE_EPSILON) {ierr = PetscPrintf(PETSC_COMM_WORLD,"%3" PetscInt_FMT ") %3" PetscInt_FMT ": ",d,indices[d]);CHKERRQ(ierr);}
1697                       for (f = 0; f < numindices; ++f) {
1698                         tmp += PetscRealPart(elMat[d*numindices + f]);
1699                       }
1700                       if (tmp!=0) {ierr = PetscPrintf(ctx->comm," | %22.16e\n",tmp);CHKERRQ(ierr);}
1701                     }
1702                   }
1703                   maps[grid].num_reduced++;
1704                   PetscCheckFalse(maps[grid].num_reduced>=MAP_BF_SIZE,PETSC_COMM_SELF, PETSC_ERR_PLIB, "maps[grid].num_reduced %d > %d",maps[grid].num_reduced,MAP_BF_SIZE);
1705                 }
1706                 break;
1707               }
1708             }
1709             // cleanup
1710             ierr = DMPlexRestoreClosureIndices(ctx->plex[grid], section[grid], globsection[grid], ej, PETSC_TRUE, &numindices, &indices, NULL, (PetscScalar **) &elMat);CHKERRQ(ierr);
1711             if (elMat != valuesOrig) {ierr = DMRestoreWorkArray(ctx->plex[grid], numindices*numindices, MPIU_SCALAR, &elMat);CHKERRQ(ierr);}
1712           }
1713           if (ctx->coo_assembly) { // setup COO assembly
1714             coo_elem_offsets[glb_elem_idx+1] += fullNb*fullNb; // one species block, adds a block for each species, on this element in this grid
1715             if (fieldA==0) { // cache full Nb for this element, on this grid per species
1716               coo_elem_fullNb[glb_elem_idx] = fullNb;
1717               if (fullNb>ctx->SData_d.coo_max_fullnb) ctx->SData_d.coo_max_fullnb = fullNb;
1718             } else PetscCheck(coo_elem_fullNb[glb_elem_idx] == fullNb,PETSC_COMM_SELF, PETSC_ERR_PLIB, "full element size change with species %" PetscInt_FMT " %" PetscInt_FMT,coo_elem_fullNb[glb_elem_idx],fullNb);
1719           }
1720         } // field
1721       } // cell
1722       // allocate and copy point data maps[grid].gIdx[eidx][field][q]
1723       ierr = PetscMalloc(maps[grid].num_reduced * sizeof(*maps[grid].c_maps), &maps[grid].c_maps);CHKERRQ(ierr);
1724       for (int ej = 0; ej < maps[grid].num_reduced; ++ej) {
1725         for (int q = 0; q < maps[grid].num_face; ++q) {
1726           maps[grid].c_maps[ej][q].scale = pointMaps[ej][q].scale;
1727           maps[grid].c_maps[ej][q].gid   = pointMaps[ej][q].gid;
1728         }
1729       }
1730 #if defined(PETSC_HAVE_KOKKOS_KERNELS)
1731       if (ctx->deviceType == LANDAU_KOKKOS) {
1732         ierr = LandauKokkosCreateMatMaps(maps, pointMaps, Nf, Nq, grid);CHKERRQ(ierr); // imples Kokkos does
1733       } // else could be CUDA
1734 #endif
1735 #if defined(PETSC_HAVE_CUDA)
1736       if (ctx->deviceType == LANDAU_CUDA) {
1737         ierr = LandauCUDACreateMatMaps(maps, pointMaps, Nf, Nq, grid);CHKERRQ(ierr);
1738       }
1739 #endif
1740       if (plex_batch) {
1741         ierr = ISRestoreIndices(grid_batch_is_inv[grid], &plex_batch);CHKERRQ(ierr);
1742         ierr = ISDestroy(&grid_batch_is_inv[grid]);CHKERRQ(ierr); // we are done with this
1743       }
1744     } /* grids */
1745     // finish COO
1746     if (ctx->coo_assembly) { // setup COO assembly
1747       PetscInt *oor, *ooc;
1748       ctx->SData_d.coo_size = coo_elem_offsets[ncellsTot]*ctx->batch_sz;
1749       ierr = PetscMalloc2(ctx->SData_d.coo_size,&oor,ctx->SData_d.coo_size,&ooc);CHKERRQ(ierr);
1750       for (int i=0;i<ctx->SData_d.coo_size;i++) oor[i] = ooc[i] = -1;
1751       // get
1752       for (int grid=0,glb_elem_idx=0;grid<ctx->num_grids;grid++) {
1753         for (int ej = 0 ; ej < numCells[grid] ; ++ej, glb_elem_idx++) {
1754           const int              fullNb = coo_elem_fullNb[glb_elem_idx];
1755           const LandauIdx *const Idxs = &maps[grid].gIdx[ej][0][0]; // just use field-0 maps, They should be the same but this is just for COO storage
1756           coo_elem_point_offsets[glb_elem_idx][0] = 0;
1757           for (int f=0, cnt2=0;f<Nb;f++) {
1758             int idx = Idxs[f];
1759             coo_elem_point_offsets[glb_elem_idx][f+1] = coo_elem_point_offsets[glb_elem_idx][f]; // start at last
1760             if (idx >= 0) {
1761               cnt2++;
1762               coo_elem_point_offsets[glb_elem_idx][f+1]++; // inc
1763             } else {
1764               idx = -idx - 1;
1765               for (int q = 0 ; q < maps[grid].num_face; q++) {
1766                 if (maps[grid].c_maps[idx][q].gid < 0) break;
1767                 cnt2++;
1768                 coo_elem_point_offsets[glb_elem_idx][f+1]++; // inc
1769               }
1770             }
1771             PetscCheck(cnt2 <= fullNb,PETSC_COMM_SELF, PETSC_ERR_PLIB, "wrong count %d < %d",fullNb,cnt2);
1772           }
1773           PetscCheck(coo_elem_point_offsets[glb_elem_idx][Nb]==fullNb,PETSC_COMM_SELF, PETSC_ERR_PLIB, "coo_elem_point_offsets size %d != fullNb=%d",coo_elem_point_offsets[glb_elem_idx][Nb],fullNb);
1774         }
1775       }
1776       // set
1777       for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) {
1778         for (int grid=0,glb_elem_idx=0;grid<ctx->num_grids;grid++) {
1779           const PetscInt moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset);
1780           for (int ej = 0 ; ej < numCells[grid] ; ++ej, glb_elem_idx++) {
1781             const int  fullNb = coo_elem_fullNb[glb_elem_idx],fullNb2=fullNb*fullNb;
1782             // set (i,j)
1783             for (int fieldA=0;fieldA<Nf[grid];fieldA++) {
1784               const LandauIdx *const Idxs = &maps[grid].gIdx[ej][fieldA][0];
1785               int                    rows[LANDAU_MAX_Q_FACE],cols[LANDAU_MAX_Q_FACE];
1786               for (int f = 0; f < Nb; ++f) {
1787                 const int nr =  coo_elem_point_offsets[glb_elem_idx][f+1] - coo_elem_point_offsets[glb_elem_idx][f];
1788                 if (nr==1) rows[0] = Idxs[f];
1789                 else {
1790                   const int idx = -Idxs[f] - 1;
1791                   for (int q = 0; q < nr; q++) {
1792                     rows[q] = maps[grid].c_maps[idx][q].gid;
1793                   }
1794                 }
1795                 for (int g = 0; g < Nb; ++g) {
1796                   const int nc =  coo_elem_point_offsets[glb_elem_idx][g+1] - coo_elem_point_offsets[glb_elem_idx][g];
1797                   if (nc==1) cols[0] = Idxs[g];
1798                   else {
1799                     const int idx = -Idxs[g] - 1;
1800                     for (int q = 0; q < nc; q++) {
1801                       cols[q] = maps[grid].c_maps[idx][q].gid;
1802                     }
1803                   }
1804                   const int idx0 = b_id*coo_elem_offsets[ncellsTot] + coo_elem_offsets[glb_elem_idx] + fieldA*fullNb2 + fullNb * coo_elem_point_offsets[glb_elem_idx][f] + nr * coo_elem_point_offsets[glb_elem_idx][g];
1805                   for (int q = 0, idx = idx0; q < nr; q++) {
1806                     for (int d = 0; d < nc; d++, idx++) {
1807                       oor[idx] = rows[q] + moffset;
1808                       ooc[idx] = cols[d] + moffset;
1809                     }
1810                   }
1811                 }
1812               }
1813             }
1814           } // cell
1815         } // grid
1816       } // batch
1817       ierr = MatSetPreallocationCOO(ctx->J,ctx->SData_d.coo_size,oor,ooc);CHKERRQ(ierr);
1818       ierr = PetscFree2(oor,ooc);CHKERRQ(ierr);
1819     }
1820     ierr = PetscContainerCreate(PETSC_COMM_SELF, &container);CHKERRQ(ierr);
1821     ierr = PetscContainerSetPointer(container, (void *)maps);CHKERRQ(ierr);
1822     ierr = PetscContainerSetUserDestroy(container, LandauGPUMapsDestroy);CHKERRQ(ierr);
1823     ierr = PetscObjectCompose((PetscObject) ctx->J, "assembly_maps", (PetscObject) container);CHKERRQ(ierr);
1824     ierr = PetscContainerDestroy(&container);CHKERRQ(ierr);
1825     ierr = PetscLogEventEnd(ctx->events[2],0,0,0,0);CHKERRQ(ierr);
1826   } // end GPU assembly
1827   { /* create static point data, Jacobian called first, only one vertex copy */
1828     PetscReal      *invJe,*ww,*xx,*yy,*zz=NULL,*invJ_a;
1829     PetscInt       outer_ipidx, outer_ej,grid, nip_glb = 0;
1830     PetscFE        fe;
1831     const PetscInt Nb = Nq;
1832     ierr = PetscLogEventBegin(ctx->events[7],0,0,0,0);CHKERRQ(ierr);
1833     ierr = PetscInfo(ctx->plex[0], "Initialize static data\n");CHKERRQ(ierr);
1834     for (PetscInt grid=0;grid<ctx->num_grids;grid++) nip_glb += Nq*numCells[grid];
1835     /* collect f data, first time is for Jacobian, but make mass now */
1836     if (ctx->verbose > 0) {
1837       PetscInt ncells = 0, N;
1838       ierr = MatGetSize(ctx->J,&N,NULL);CHKERRQ(ierr);
1839       for (PetscInt grid=0;grid<ctx->num_grids;grid++) ncells += numCells[grid];
1840       ierr = PetscPrintf(ctx->comm,"%" PetscInt_FMT ") %s %" PetscInt_FMT " IPs, %" PetscInt_FMT " cells total, Nb=%" PetscInt_FMT ", Nq=%" PetscInt_FMT ", dim=%" PetscInt_FMT ", Tab: Nb=%" PetscInt_FMT " Nf=%" PetscInt_FMT " Np=%" PetscInt_FMT " cdim=%" PetscInt_FMT " N=%" PetscInt_FMT "\n",
1841                          0,"FormLandau",nip_glb,ncells, Nb, Nq, dim, Nb, ctx->num_species, Nb, dim, N);CHKERRQ(ierr);
1842     }
1843     ierr = PetscMalloc4(nip_glb,&ww,nip_glb,&xx,nip_glb,&yy,nip_glb*dim*dim,&invJ_a);CHKERRQ(ierr);
1844     if (dim==3) {
1845       ierr = PetscMalloc1(nip_glb,&zz);CHKERRQ(ierr);
1846     }
1847     if (ctx->use_energy_tensor_trick) {
1848       ierr = PetscFECreateDefault(PETSC_COMM_SELF, dim, 1, PETSC_FALSE, NULL, PETSC_DECIDE, &fe);CHKERRQ(ierr);
1849       ierr = PetscObjectSetName((PetscObject) fe, "energy");CHKERRQ(ierr);
1850     }
1851     /* init each grids static data - no batch */
1852     for (grid=0, outer_ipidx=0, outer_ej=0 ; grid < ctx->num_grids ; grid++) { // OpenMP (once)
1853       Vec             v2_2 = NULL; // projected function: v^2/2 for non-relativistic, gamma... for relativistic
1854       PetscSection    e_section;
1855       DM              dmEnergy;
1856       PetscInt        cStart, cEnd, ej;
1857 
1858       ierr = DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd);CHKERRQ(ierr);
1859       // prep energy trick, get v^2 / 2 vector
1860       if (ctx->use_energy_tensor_trick) {
1861         PetscErrorCode (*energyf[1])(PetscInt, PetscReal, const PetscReal [], PetscInt, PetscScalar [], void *) = {ctx->use_relativistic_corrections ? gamma_m1_f : energy_f};
1862         Vec            glob_v2;
1863         PetscReal      *c2_0[1], data[1] = {PetscSqr(C_0(ctx->v_0))};
1864 
1865         ierr = DMClone(ctx->plex[grid], &dmEnergy);CHKERRQ(ierr);
1866         ierr = PetscObjectSetName((PetscObject) dmEnergy, "energy");CHKERRQ(ierr);
1867         ierr = DMSetField(dmEnergy, 0, NULL, (PetscObject)fe);CHKERRQ(ierr);
1868         ierr = DMCreateDS(dmEnergy);CHKERRQ(ierr);
1869         ierr = DMGetSection(dmEnergy, &e_section);CHKERRQ(ierr);
1870         ierr = DMGetGlobalVector(dmEnergy,&glob_v2);CHKERRQ(ierr);
1871         ierr = PetscObjectSetName((PetscObject) glob_v2, "trick");CHKERRQ(ierr);
1872         c2_0[0] = &data[0];
1873         ierr = DMProjectFunction(dmEnergy, 0., energyf, (void**)c2_0, INSERT_ALL_VALUES, glob_v2);CHKERRQ(ierr);
1874         ierr = DMGetLocalVector(dmEnergy, &v2_2);CHKERRQ(ierr);
1875         ierr = VecZeroEntries(v2_2);CHKERRQ(ierr); /* zero BCs so don't set */
1876         ierr = DMGlobalToLocalBegin(dmEnergy, glob_v2, INSERT_VALUES, v2_2);CHKERRQ(ierr);
1877         ierr = DMGlobalToLocalEnd  (dmEnergy, glob_v2, INSERT_VALUES, v2_2);CHKERRQ(ierr);
1878         ierr = DMViewFromOptions(dmEnergy,NULL, "-energy_dm_view");CHKERRQ(ierr);
1879         ierr = VecViewFromOptions(glob_v2,NULL, "-energy_vec_view");CHKERRQ(ierr);
1880         ierr = DMRestoreGlobalVector(dmEnergy, &glob_v2);CHKERRQ(ierr);
1881       }
1882       /* append part of the IP data for each grid */
1883       for (ej = 0 ; ej < numCells[grid]; ++ej, ++outer_ej) {
1884         PetscScalar *coefs = NULL;
1885         PetscReal    vj[LANDAU_MAX_NQ*LANDAU_DIM],detJj[LANDAU_MAX_NQ], Jdummy[LANDAU_MAX_NQ*LANDAU_DIM*LANDAU_DIM], c0 = C_0(ctx->v_0), c02 = PetscSqr(c0);
1886         invJe = invJ_a + outer_ej*Nq*dim*dim;
1887         ierr = DMPlexComputeCellGeometryFEM(ctx->plex[grid], ej+cStart, quad, vj, Jdummy, invJe, detJj);CHKERRQ(ierr);
1888         if (ctx->use_energy_tensor_trick) {
1889           ierr = DMPlexVecGetClosure(dmEnergy, e_section, v2_2, ej+cStart, NULL, &coefs);CHKERRQ(ierr);
1890         }
1891         /* create static point data */
1892         for (PetscInt qj = 0; qj < Nq; qj++, outer_ipidx++) {
1893           const PetscInt  gidx = outer_ipidx;
1894           const PetscReal *invJ = &invJe[qj*dim*dim];
1895           ww    [gidx] = detJj[qj] * quadWeights[qj];
1896           if (dim==2) ww    [gidx] *=              vj[qj * dim + 0];  /* cylindrical coordinate, w/o 2pi */
1897           // get xx, yy, zz
1898           if (ctx->use_energy_tensor_trick) {
1899             double                  refSpaceDer[3],eGradPhi[3];
1900             const PetscReal * const DD = Tf[0]->T[1];
1901             const PetscReal         *Dq = &DD[qj*Nb*dim];
1902             for (int d = 0; d < 3; ++d) refSpaceDer[d] = eGradPhi[d] = 0.0;
1903             for (int b = 0; b < Nb; ++b) {
1904               for (int d = 0; d < dim; ++d) refSpaceDer[d] += Dq[b*dim+d]*PetscRealPart(coefs[b]);
1905             }
1906             xx[gidx] = 1e10;
1907             if (ctx->use_relativistic_corrections) {
1908               double dg2_c2 = 0;
1909               //for (int d = 0; d < dim; ++d) refSpaceDer[d] *= c02;
1910               for (int d = 0; d < dim; ++d) dg2_c2 += PetscSqr(refSpaceDer[d]);
1911               dg2_c2 *= (double)c02;
1912               if (dg2_c2 >= .999) {
1913                 xx[gidx] = vj[qj * dim + 0]; /* coordinate */
1914                 yy[gidx] = vj[qj * dim + 1];
1915                 if (dim==3) zz[gidx] = vj[qj * dim + 2];
1916                 ierr = PetscPrintf(ctx->comm,"Error: %12.5e %" PetscInt_FMT ".%" PetscInt_FMT ") dg2/c02 = %12.5e x= %12.5e %12.5e %12.5e\n",PetscSqrtReal(xx[gidx]*xx[gidx] + yy[gidx]*yy[gidx] + zz[gidx]*zz[gidx]), ej, qj, dg2_c2, xx[gidx],yy[gidx],zz[gidx]);CHKERRQ(ierr);
1917               } else {
1918                 PetscReal fact = c02/PetscSqrtReal(1. - dg2_c2);
1919                 for (int d = 0; d < dim; ++d) refSpaceDer[d] *= fact;
1920                 // could test with other point u' that (grad - grad') * U (refSpaceDer, refSpaceDer') == 0
1921               }
1922             }
1923             if (xx[gidx] == 1e10) {
1924               for (int d = 0; d < dim; ++d) {
1925                 for (int e = 0 ; e < dim; ++e) {
1926                   eGradPhi[d] += invJ[e*dim+d]*refSpaceDer[e];
1927                 }
1928               }
1929               xx[gidx] = eGradPhi[0];
1930               yy[gidx] = eGradPhi[1];
1931               if (dim==3) zz[gidx] = eGradPhi[2];
1932             }
1933           } else {
1934             xx[gidx] = vj[qj * dim + 0]; /* coordinate */
1935             yy[gidx] = vj[qj * dim + 1];
1936             if (dim==3) zz[gidx] = vj[qj * dim + 2];
1937           }
1938         } /* q */
1939         if (ctx->use_energy_tensor_trick) {
1940           ierr = DMPlexVecRestoreClosure(dmEnergy, e_section, v2_2, ej+cStart, NULL, &coefs);CHKERRQ(ierr);
1941         }
1942       } /* ej */
1943       if (ctx->use_energy_tensor_trick) {
1944         ierr = DMRestoreLocalVector(dmEnergy, &v2_2);CHKERRQ(ierr);
1945         ierr = DMDestroy(&dmEnergy);CHKERRQ(ierr);
1946       }
1947     } /* grid */
1948     if (ctx->use_energy_tensor_trick) {
1949       ierr = PetscFEDestroy(&fe);CHKERRQ(ierr);
1950     }
1951     /* cache static data */
1952     if (ctx->deviceType == LANDAU_CUDA || ctx->deviceType == LANDAU_KOKKOS) {
1953 #if defined(PETSC_HAVE_CUDA) || defined(PETSC_HAVE_KOKKOS_KERNELS)
1954       PetscReal invMass[LANDAU_MAX_SPECIES],nu_alpha[LANDAU_MAX_SPECIES], nu_beta[LANDAU_MAX_SPECIES];
1955       for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) {
1956         for (PetscInt ii=ctx->species_offset[grid];ii<ctx->species_offset[grid+1];ii++) {
1957           invMass[ii]  = ctx->m_0/ctx->masses[ii];
1958           nu_alpha[ii] = PetscSqr(ctx->charges[ii]/ctx->m_0)*ctx->m_0/ctx->masses[ii];
1959           nu_beta[ii]  = PetscSqr(ctx->charges[ii]/ctx->epsilon0)*ctx->lnLam / (8*PETSC_PI) * ctx->t_0*ctx->n_0/PetscPowReal(ctx->v_0,3);
1960         }
1961       }
1962       if (ctx->deviceType == LANDAU_CUDA) {
1963 #if defined(PETSC_HAVE_CUDA)
1964         ierr = LandauCUDAStaticDataSet(ctx->plex[0], Nq, ctx->batch_sz, ctx->num_grids, numCells, ctx->species_offset, ctx->mat_offset,
1965                                        nu_alpha, nu_beta, invMass, invJ_a, xx, yy, zz, ww, &ctx->SData_d);CHKERRQ(ierr);
1966 #else
1967         SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","cuda");
1968 #endif
1969       } else if (ctx->deviceType == LANDAU_KOKKOS) {
1970 #if defined(PETSC_HAVE_KOKKOS_KERNELS)
1971         ierr = LandauKokkosStaticDataSet(ctx->plex[0], Nq, ctx->batch_sz, ctx->num_grids, numCells, ctx->species_offset, ctx->mat_offset,
1972                                          nu_alpha, nu_beta, invMass,invJ_a,xx,yy,zz,ww,&ctx->SData_d);CHKERRQ(ierr);
1973 #else
1974         SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","kokkos");
1975 #endif
1976       }
1977 #endif
1978       /* free */
1979       ierr = PetscFree4(ww,xx,yy,invJ_a);CHKERRQ(ierr);
1980       if (dim==3) {
1981         ierr = PetscFree(zz);CHKERRQ(ierr);
1982       }
1983     } else { /* CPU version, just copy in, only use part */
1984       ctx->SData_d.w = (void*)ww;
1985       ctx->SData_d.x = (void*)xx;
1986       ctx->SData_d.y = (void*)yy;
1987       ctx->SData_d.z = (void*)zz;
1988       ctx->SData_d.invJ = (void*)invJ_a;
1989     }
1990     ierr = PetscLogEventEnd(ctx->events[7],0,0,0,0);CHKERRQ(ierr);
1991   } // initialize
1992   PetscFunctionReturn(0);
1993 }
1994 
1995 /* < v, u > */
1996 static void g0_1(PetscInt dim, PetscInt Nf, PetscInt NfAux,
1997                  const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
1998                  const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
1999                  PetscReal t, PetscReal u_tShift, const PetscReal x[],  PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
2000 {
2001   g0[0] = 1.;
2002 }
2003 
2004 /* < v, u > */
2005 static void g0_fake(PetscInt dim, PetscInt Nf, PetscInt NfAux,
2006                  const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
2007                  const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
2008                  PetscReal t, PetscReal u_tShift, const PetscReal x[],  PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
2009 {
2010   static double ttt = 1;
2011   g0[0] = ttt++;
2012 }
2013 
2014 /* < v, u > */
2015 static void g0_r(PetscInt dim, PetscInt Nf, PetscInt NfAux,
2016                  const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
2017                  const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
2018                  PetscReal t, PetscReal u_tShift, const PetscReal x[],  PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
2019 {
2020   g0[0] = 2.*PETSC_PI*x[0];
2021 }
2022 
2023 static PetscErrorCode MatrixNfDestroy(void *ptr)
2024 {
2025   PetscInt *nf = (PetscInt *)ptr;
2026   PetscErrorCode  ierr;
2027   PetscFunctionBegin;
2028   ierr = PetscFree(nf);CHKERRQ(ierr);
2029   PetscFunctionReturn(0);
2030 }
2031 
2032 static PetscErrorCode LandauCreateMatrix(MPI_Comm comm, Vec X, IS grid_batch_is_inv[LANDAU_MAX_GRIDS], LandauCtx *ctx)
2033 {
2034   PetscErrorCode ierr;
2035   PetscInt       *idxs=NULL;
2036   Mat            subM[LANDAU_MAX_GRIDS];
2037 
2038   PetscFunctionBegin;
2039   if (!ctx->gpu_assembly) { /* we need GPU object with GPU assembly */
2040     PetscFunctionReturn(0);
2041   }
2042   // get the RCM for this grid to separate out species into blocks -- create 'idxs' & 'ctx->batch_is'
2043   if (ctx->gpu_assembly && ctx->jacobian_field_major_order) {
2044     ierr = PetscMalloc1(ctx->mat_offset[ctx->num_grids]*ctx->batch_sz, &idxs);CHKERRQ(ierr);
2045   }
2046   for (PetscInt grid=0 ; grid < ctx->num_grids ; grid++) {
2047     const PetscInt *values, n = ctx->mat_offset[grid+1] - ctx->mat_offset[grid];
2048     Mat             gMat;
2049     DM              massDM;
2050     PetscDS         prob;
2051     Vec             tvec;
2052     // get "mass" matrix for reordering
2053     ierr = DMClone(ctx->plex[grid], &massDM);CHKERRQ(ierr);
2054     ierr = DMCopyFields(ctx->plex[grid], massDM);CHKERRQ(ierr);
2055     ierr = DMCreateDS(massDM);CHKERRQ(ierr);
2056     ierr = DMGetDS(massDM, &prob);CHKERRQ(ierr);
2057     for (int ix=0, ii=ctx->species_offset[grid];ii<ctx->species_offset[grid+1];ii++,ix++) {
2058       ierr = PetscDSSetJacobian(prob, ix, ix, g0_fake, NULL, NULL, NULL);CHKERRQ(ierr);
2059     }
2060     ierr = PetscOptionsInsertString(NULL,"-dm_preallocate_only");
2061     ierr = DMSetFromOptions(massDM);CHKERRQ(ierr);
2062     ierr = DMCreateMatrix(massDM, &gMat);CHKERRQ(ierr);
2063     ierr = PetscOptionsInsertString(NULL,"-dm_preallocate_only false");
2064     ierr = MatSetOption(gMat,MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE);CHKERRQ(ierr);
2065     ierr = MatSetOption(gMat,MAT_IGNORE_ZERO_ENTRIES,PETSC_TRUE);CHKERRQ(ierr);
2066     ierr = DMCreateLocalVector(ctx->plex[grid],&tvec);CHKERRQ(ierr);
2067     ierr = DMPlexSNESComputeJacobianFEM(massDM, tvec, gMat, gMat, ctx);CHKERRQ(ierr);
2068     ierr = MatViewFromOptions(gMat, NULL, "-dm_landau_reorder_mat_view");CHKERRQ(ierr);
2069     ierr = DMDestroy(&massDM);CHKERRQ(ierr);
2070     ierr = VecDestroy(&tvec);CHKERRQ(ierr);
2071     subM[grid] = gMat;
2072     if (ctx->gpu_assembly && ctx->jacobian_field_major_order) {
2073       MatOrderingType rtype = MATORDERINGRCM;
2074       IS              isrow,isicol;
2075       ierr = MatGetOrdering(gMat,rtype,&isrow,&isicol);CHKERRQ(ierr);
2076       ierr = ISInvertPermutation(isrow,PETSC_DECIDE,&grid_batch_is_inv[grid]);CHKERRQ(ierr);
2077       ierr = ISGetIndices(isrow, &values);CHKERRQ(ierr);
2078       for (PetscInt b_id=0 ; b_id < ctx->batch_sz ; b_id++) { // add batch size DMs for this species grid
2079 #if !defined(LANDAU_SPECIES_MAJOR)
2080         PetscInt N = ctx->mat_offset[ctx->num_grids], n0 = ctx->mat_offset[grid] + b_id*N;
2081         for (int ii = 0; ii < n; ++ii) idxs[n0+ii] = values[ii] + n0;
2082 #else
2083         PetscInt n0 = ctx->mat_offset[grid]*ctx->batch_sz + b_id*n;
2084         for (int ii = 0; ii < n; ++ii) idxs[n0+ii] = values[ii] + n0;
2085 #endif
2086       }
2087       ierr = ISRestoreIndices(isrow, &values);CHKERRQ(ierr);
2088       ierr = ISDestroy(&isrow);CHKERRQ(ierr);
2089       ierr = ISDestroy(&isicol);CHKERRQ(ierr);
2090     }
2091   }
2092   if (ctx->gpu_assembly && ctx->jacobian_field_major_order) {
2093     ierr = ISCreateGeneral(comm,ctx->mat_offset[ctx->num_grids]*ctx->batch_sz,idxs,PETSC_OWN_POINTER,&ctx->batch_is);CHKERRQ(ierr);
2094   }
2095   // get a block matrix
2096   for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) {
2097     Mat               B = subM[grid];
2098     PetscInt          nloc, nzl, colbuf[1024], row;
2099     ierr = MatGetSize(B, &nloc, NULL);CHKERRQ(ierr);
2100     for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) {
2101       const PetscInt    moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset);
2102       const PetscInt    *cols;
2103       const PetscScalar *vals;
2104       for (int i=0 ; i<nloc ; i++) {
2105         ierr = MatGetRow(B,i,&nzl,&cols,&vals);CHKERRQ(ierr);
2106         PetscCheck(nzl<=1024,comm, PETSC_ERR_PLIB, "Row too big: %" PetscInt_FMT,nzl);
2107         for (int j=0; j<nzl; j++) colbuf[j] = cols[j] + moffset;
2108         row = i + moffset;
2109         ierr = MatSetValues(ctx->J,1,&row,nzl,colbuf,vals,INSERT_VALUES);CHKERRQ(ierr);
2110         ierr = MatRestoreRow(B,i,&nzl,&cols,&vals);CHKERRQ(ierr);
2111       }
2112     }
2113   }
2114   for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) {
2115     ierr = MatDestroy(&subM[grid]);CHKERRQ(ierr);
2116   }
2117   ierr = MatAssemblyBegin(ctx->J,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
2118   ierr = MatAssemblyEnd(ctx->J,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
2119 
2120   if (ctx->gpu_assembly && ctx->jacobian_field_major_order) {
2121     Mat            mat_block_order;
2122     ierr = MatCreateSubMatrix(ctx->J,ctx->batch_is,ctx->batch_is,MAT_INITIAL_MATRIX,&mat_block_order);CHKERRQ(ierr); // use MatPermute
2123     ierr = MatViewFromOptions(mat_block_order, NULL, "-dm_landau_field_major_mat_view");CHKERRQ(ierr);
2124     ierr = MatDestroy(&ctx->J);CHKERRQ(ierr);
2125     ctx->J = mat_block_order;
2126     // override ops to make KSP work in field major space
2127     ctx->seqaij_mult                  = mat_block_order->ops->mult;
2128     mat_block_order->ops->mult        = LandauMatMult;
2129     mat_block_order->ops->multadd     = LandauMatMultAdd;
2130     ctx->seqaij_solve                 = NULL;
2131     ctx->seqaij_getdiagonal           = mat_block_order->ops->getdiagonal;
2132     mat_block_order->ops->getdiagonal = LandauMatGetDiagonal;
2133     ctx->seqaij_multtranspose         = mat_block_order->ops->multtranspose;
2134     mat_block_order->ops->multtranspose = LandauMatMultTranspose;
2135     ierr = VecDuplicate(X,&ctx->work_vec);CHKERRQ(ierr);
2136     ierr = VecScatterCreate(X, ctx->batch_is, ctx->work_vec, NULL, &ctx->plex_batch);CHKERRQ(ierr);
2137   }
2138 
2139   PetscFunctionReturn(0);
2140 }
2141 
2142 PetscErrorCode DMPlexLandauCreateMassMatrix(DM pack, Mat *Amat);
2143 /*@C
2144  DMPlexLandauCreateVelocitySpace - Create a DMPlex velocity space mesh
2145 
2146  Collective on comm
2147 
2148  Input Parameters:
2149  +   comm  - The MPI communicator
2150  .   dim - velocity space dimension (2 for axisymmetric, 3 for full 3X + 3V solver)
2151  -   prefix - prefix for options (not tested)
2152 
2153  Output Parameter:
2154  .   pack  - The DM object representing the mesh
2155  +   X - A vector (user destroys)
2156  -   J - Optional matrix (object destroys)
2157 
2158  Level: beginner
2159 
2160  .keywords: mesh
2161  .seealso: DMPlexCreate(), DMPlexLandauDestroyVelocitySpace()
2162  @*/
2163 PetscErrorCode DMPlexLandauCreateVelocitySpace(MPI_Comm comm, PetscInt dim, const char prefix[], Vec *X, Mat *J, DM *pack)
2164 {
2165   PetscErrorCode ierr;
2166   LandauCtx      *ctx;
2167   Vec            Xsub[LANDAU_MAX_GRIDS];
2168   IS             grid_batch_is_inv[LANDAU_MAX_GRIDS];
2169 
2170   PetscFunctionBegin;
2171   PetscCheckFalse(dim!=2 && dim!=3,PETSC_COMM_SELF, PETSC_ERR_PLIB, "Only 2D and 3D supported");
2172   PetscCheck(LANDAU_DIM == dim,PETSC_COMM_SELF, PETSC_ERR_PLIB, "dim %" PetscInt_FMT " != LANDAU_DIM %d",dim,LANDAU_DIM);
2173   ierr = PetscNew(&ctx);CHKERRQ(ierr);
2174   ctx->comm = comm; /* used for diagnostics and global errors */
2175   /* process options */
2176   ierr = ProcessOptions(ctx,prefix);CHKERRQ(ierr);
2177   if (dim==2) ctx->use_relativistic_corrections = PETSC_FALSE;
2178   /* Create Mesh */
2179   ierr = DMCompositeCreate(PETSC_COMM_SELF,pack);CHKERRQ(ierr);
2180   ierr = PetscLogEventBegin(ctx->events[13],0,0,0,0);CHKERRQ(ierr);
2181   ierr = PetscLogEventBegin(ctx->events[15],0,0,0,0);CHKERRQ(ierr);
2182   ierr = LandauDMCreateVMeshes(PETSC_COMM_SELF, dim, prefix, ctx, *pack);CHKERRQ(ierr); // creates grids (Forest of AMR)
2183   for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
2184     /* create FEM */
2185     ierr = SetupDS(ctx->plex[grid],dim,grid,ctx);CHKERRQ(ierr);
2186     /* set initial state */
2187     ierr = DMCreateGlobalVector(ctx->plex[grid],&Xsub[grid]);CHKERRQ(ierr);
2188     ierr = PetscObjectSetName((PetscObject) Xsub[grid], "u_orig");CHKERRQ(ierr);
2189     /* initial static refinement, no solve */
2190     ierr = LandauSetInitialCondition(ctx->plex[grid], Xsub[grid], grid, 0, ctx);CHKERRQ(ierr);
2191     /* forest refinement - forest goes in (if forest), plex comes out */
2192     if (ctx->use_p4est) {
2193       DM plex;
2194       ierr = adapt(grid,ctx,&Xsub[grid]);CHKERRQ(ierr); // forest goes in, plex comes out
2195       ierr = DMViewFromOptions(ctx->plex[grid],NULL,"-dm_landau_amr_dm_view");CHKERRQ(ierr); // need to differentiate - todo
2196       ierr = VecViewFromOptions(Xsub[grid], NULL, "-dm_landau_amr_vec_view");CHKERRQ(ierr);
2197       // convert to plex, all done with this level
2198       ierr = DMConvert(ctx->plex[grid], DMPLEX, &plex);CHKERRQ(ierr);
2199       ierr = DMDestroy(&ctx->plex[grid]);CHKERRQ(ierr);
2200       ctx->plex[grid] = plex;
2201     }
2202 #if !defined(LANDAU_SPECIES_MAJOR)
2203     ierr = DMCompositeAddDM(*pack,ctx->plex[grid]);CHKERRQ(ierr);
2204 #else
2205     for (PetscInt b_id=0;b_id<ctx->batch_sz;b_id++) { // add batch size DMs for this species grid
2206       ierr = DMCompositeAddDM(*pack,ctx->plex[grid]);CHKERRQ(ierr);
2207     }
2208 #endif
2209     ierr = DMSetApplicationContext(ctx->plex[grid], ctx);CHKERRQ(ierr);
2210   }
2211 #if !defined(LANDAU_SPECIES_MAJOR)
2212   // stack the batched DMs, could do it all here!!! b_id=0
2213   for (PetscInt b_id=1;b_id<ctx->batch_sz;b_id++) {
2214     for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
2215       ierr = DMCompositeAddDM(*pack,ctx->plex[grid]);CHKERRQ(ierr);
2216     }
2217   }
2218 #endif
2219   // create ctx->mat_offset
2220   ctx->mat_offset[0] = 0;
2221   for (PetscInt grid=0 ; grid < ctx->num_grids ; grid++) {
2222     PetscInt    n;
2223     ierr = VecGetLocalSize(Xsub[grid],&n);CHKERRQ(ierr);
2224     ctx->mat_offset[grid+1] = ctx->mat_offset[grid] + n;
2225   }
2226   // creat DM & Jac
2227   ierr = DMSetApplicationContext(*pack, ctx);CHKERRQ(ierr);
2228   ierr = PetscOptionsInsertString(NULL,"-dm_preallocate_only");
2229   ierr = DMSetFromOptions(*pack);CHKERRQ(ierr);
2230   ierr = DMCreateMatrix(*pack, &ctx->J);CHKERRQ(ierr);
2231   ierr = PetscOptionsInsertString(NULL,"-dm_preallocate_only false");
2232   ierr = MatSetOption(ctx->J,MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE);CHKERRQ(ierr);
2233   ierr = MatSetOption(ctx->J,MAT_IGNORE_ZERO_ENTRIES,PETSC_TRUE);CHKERRQ(ierr);
2234   ierr = PetscObjectSetName((PetscObject)ctx->J, "Jac");CHKERRQ(ierr);
2235   // construct initial conditions in X
2236   ierr = DMCreateGlobalVector(*pack,X);CHKERRQ(ierr);
2237   for (PetscInt grid=0 ; grid < ctx->num_grids ; grid++) {
2238     PetscInt n;
2239     ierr = VecGetLocalSize(Xsub[grid],&n);CHKERRQ(ierr);
2240     for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) {
2241       PetscScalar const *values;
2242       const PetscInt    moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset);
2243       ierr = LandauSetInitialCondition(ctx->plex[grid], Xsub[grid], grid, b_id, ctx);CHKERRQ(ierr);
2244       ierr = VecGetArrayRead(Xsub[grid],&values);CHKERRQ(ierr);
2245       for (int i=0, idx = moffset; i<n; i++, idx++) {
2246         ierr = VecSetValue(*X,idx,values[i],INSERT_VALUES);CHKERRQ(ierr);
2247       }
2248       ierr = VecRestoreArrayRead(Xsub[grid],&values);CHKERRQ(ierr);
2249     }
2250   }
2251   // cleanup
2252   for (PetscInt grid=0 ; grid < ctx->num_grids ; grid++) {
2253     ierr = VecDestroy(&Xsub[grid]);CHKERRQ(ierr);
2254   }
2255   /* check for correct matrix type */
2256   if (ctx->gpu_assembly) { /* we need GPU object with GPU assembly */
2257     PetscBool flg;
2258     if (ctx->deviceType == LANDAU_CUDA) {
2259       ierr = PetscObjectTypeCompareAny((PetscObject)ctx->J,&flg,MATSEQAIJCUSPARSE,MATMPIAIJCUSPARSE,MATAIJCUSPARSE,"");CHKERRQ(ierr);
2260       PetscCheck(flg,ctx->comm,PETSC_ERR_ARG_WRONG,"must use '-dm_mat_type aijcusparse -dm_vec_type cuda' for GPU assembly and Cuda or use '-dm_landau_device_type cpu'");
2261     } else if (ctx->deviceType == LANDAU_KOKKOS) {
2262       ierr = PetscObjectTypeCompareAny((PetscObject)ctx->J,&flg,MATSEQAIJKOKKOS,MATMPIAIJKOKKOS,MATAIJKOKKOS,"");CHKERRQ(ierr);
2263 #if defined(PETSC_HAVE_KOKKOS_KERNELS)
2264       PetscCheck(flg,ctx->comm,PETSC_ERR_ARG_WRONG,"must use '-dm_mat_type aijkokkos -dm_vec_type kokkos' for GPU assembly and Kokkos or use '-dm_landau_device_type cpu'");
2265 #else
2266       PetscCheck(flg,ctx->comm,PETSC_ERR_ARG_WRONG,"must configure with '--download-kokkos-kernels' for GPU assembly and Kokkos or use '-dm_landau_device_type cpu'");
2267 #endif
2268     }
2269   }
2270   ierr = PetscLogEventEnd(ctx->events[15],0,0,0,0);CHKERRQ(ierr);
2271   // create field major ordering
2272 
2273   ctx->work_vec   = NULL;
2274   ctx->plex_batch = NULL;
2275   ctx->batch_is   = NULL;
2276   for (int i=0;i<LANDAU_MAX_GRIDS;i++) grid_batch_is_inv[i] = NULL;
2277   ierr = PetscLogEventBegin(ctx->events[12],0,0,0,0);CHKERRQ(ierr);
2278   ierr = LandauCreateMatrix(comm, *X, grid_batch_is_inv, ctx);CHKERRQ(ierr);
2279   ierr = PetscLogEventEnd(ctx->events[12],0,0,0,0);CHKERRQ(ierr);
2280 
2281   // create AMR GPU assembly maps and static GPU data
2282   ierr = CreateStaticGPUData(dim,grid_batch_is_inv,ctx);CHKERRQ(ierr);
2283 
2284   ierr = PetscLogEventEnd(ctx->events[13],0,0,0,0);CHKERRQ(ierr);
2285 
2286   // create mass matrix
2287   ierr = DMPlexLandauCreateMassMatrix(*pack, NULL);CHKERRQ(ierr);
2288 
2289   if (J) *J = ctx->J;
2290 
2291   if (ctx->gpu_assembly && ctx->jacobian_field_major_order) {
2292     PetscContainer container;
2293     // cache ctx for KSP with batch/field major Jacobian ordering -ksp_type gmres/etc -dm_landau_jacobian_field_major_order
2294     ierr = PetscContainerCreate(PETSC_COMM_SELF, &container);CHKERRQ(ierr);
2295     ierr = PetscContainerSetPointer(container, (void *)ctx);CHKERRQ(ierr);
2296     ierr = PetscObjectCompose((PetscObject) ctx->J, "LandauCtx", (PetscObject) container);CHKERRQ(ierr);
2297     ierr = PetscContainerDestroy(&container);CHKERRQ(ierr);
2298     // batch solvers need to map -- can batch solvers work
2299     ierr = PetscContainerCreate(PETSC_COMM_SELF, &container);CHKERRQ(ierr);
2300     ierr = PetscContainerSetPointer(container, (void *)ctx->plex_batch);CHKERRQ(ierr);
2301     ierr = PetscObjectCompose((PetscObject) ctx->J, "plex_batch_is", (PetscObject) container);CHKERRQ(ierr);
2302     ierr = PetscContainerDestroy(&container);CHKERRQ(ierr);
2303   }
2304   // for batch solvers
2305   {
2306     PetscContainer  container;
2307     PetscInt        *pNf;
2308     ierr = PetscContainerCreate(PETSC_COMM_SELF, &container);CHKERRQ(ierr);
2309     ierr = PetscMalloc1(sizeof(*pNf), &pNf);CHKERRQ(ierr);
2310     *pNf = ctx->batch_sz;
2311     ierr = PetscContainerSetPointer(container, (void *)pNf);CHKERRQ(ierr);
2312     ierr = PetscContainerSetUserDestroy(container, MatrixNfDestroy);CHKERRQ(ierr);
2313     ierr = PetscObjectCompose((PetscObject)ctx->J, "batch size", (PetscObject) container);CHKERRQ(ierr);
2314     ierr = PetscContainerDestroy(&container);CHKERRQ(ierr);
2315   }
2316 
2317   PetscFunctionReturn(0);
2318 }
2319 
2320 /*@
2321  DMPlexLandauDestroyVelocitySpace - Destroy a DMPlex velocity space mesh
2322 
2323  Collective on dm
2324 
2325  Input/Output Parameters:
2326  .   dm - the dm to destroy
2327 
2328  Level: beginner
2329 
2330  .keywords: mesh
2331  .seealso: DMPlexLandauCreateVelocitySpace()
2332  @*/
2333 PetscErrorCode DMPlexLandauDestroyVelocitySpace(DM *dm)
2334 {
2335   PetscErrorCode ierr,ii;
2336   LandauCtx      *ctx;
2337   PetscFunctionBegin;
2338   ierr = DMGetApplicationContext(*dm, &ctx);CHKERRQ(ierr);
2339   ierr = MatDestroy(&ctx->M);CHKERRQ(ierr);
2340   ierr = MatDestroy(&ctx->J);CHKERRQ(ierr);
2341   for (ii=0;ii<ctx->num_species;ii++) {
2342     ierr = PetscFEDestroy(&ctx->fe[ii]);CHKERRQ(ierr);
2343   }
2344   ierr = ISDestroy(&ctx->batch_is);CHKERRQ(ierr);
2345   ierr = VecDestroy(&ctx->work_vec);CHKERRQ(ierr);
2346   ierr = VecScatterDestroy(&ctx->plex_batch);CHKERRQ(ierr);
2347   if (ctx->deviceType == LANDAU_CUDA) {
2348 #if defined(PETSC_HAVE_CUDA)
2349     ierr = LandauCUDAStaticDataClear(&ctx->SData_d);CHKERRQ(ierr);
2350 #else
2351     SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","cuda");
2352 #endif
2353   } else if (ctx->deviceType == LANDAU_KOKKOS) {
2354 #if defined(PETSC_HAVE_KOKKOS_KERNELS)
2355     ierr = LandauKokkosStaticDataClear(&ctx->SData_d);CHKERRQ(ierr);
2356 #else
2357     SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","kokkos");
2358 #endif
2359   } else {
2360     if (ctx->SData_d.x) { /* in a CPU run */
2361       PetscReal *invJ = (PetscReal*)ctx->SData_d.invJ, *xx = (PetscReal*)ctx->SData_d.x, *yy = (PetscReal*)ctx->SData_d.y, *zz = (PetscReal*)ctx->SData_d.z, *ww = (PetscReal*)ctx->SData_d.w;
2362       LandauIdx *coo_elem_offsets = (LandauIdx*)ctx->SData_d.coo_elem_offsets, *coo_elem_fullNb = (LandauIdx*)ctx->SData_d.coo_elem_fullNb, (*coo_elem_point_offsets)[LANDAU_MAX_NQ+1] = (LandauIdx (*)[LANDAU_MAX_NQ+1])ctx->SData_d.coo_elem_point_offsets;
2363       ierr = PetscFree4(ww,xx,yy,invJ);CHKERRQ(ierr);
2364       if (zz) {
2365         ierr = PetscFree(zz);CHKERRQ(ierr);
2366       }
2367       if (coo_elem_offsets) {
2368         ierr = PetscFree3(coo_elem_offsets,coo_elem_fullNb,coo_elem_point_offsets);CHKERRQ(ierr); // could be NULL
2369       }
2370     }
2371   }
2372 
2373   if (ctx->times[LANDAU_MATRIX_TOTAL] > 0) { // OMP timings
2374     ierr = PetscPrintf(ctx->comm, "TSStep               N  1.0 %10.3e\n",ctx->times[LANDAU_EX2_TSSOLVE]);CHKERRQ(ierr);
2375     ierr = PetscPrintf(ctx->comm, "2:           Solve:  %10.3e with %" PetscInt_FMT " threads\n",ctx->times[LANDAU_EX2_TSSOLVE] - ctx->times[LANDAU_MATRIX_TOTAL],ctx->batch_sz);CHKERRQ(ierr);
2376     ierr = PetscPrintf(ctx->comm, "3:          Landau:  %10.3e\n",ctx->times[LANDAU_MATRIX_TOTAL]);CHKERRQ(ierr);
2377     ierr = PetscPrintf(ctx->comm, "Landau Jacobian       %" PetscInt_FMT " 1.0 %10.3e\n",(PetscInt)ctx->times[LANDAU_JACOBIAN_COUNT],ctx->times[LANDAU_JACOBIAN]);CHKERRQ(ierr);
2378     ierr = PetscPrintf(ctx->comm, "Landau Operator       N 1.0  %10.3e\n",ctx->times[LANDAU_OPERATOR]);CHKERRQ(ierr);
2379     ierr = PetscPrintf(ctx->comm, "Landau Mass           N 1.0  %10.3e\n",ctx->times[LANDAU_MASS]);CHKERRQ(ierr);
2380     ierr = PetscPrintf(ctx->comm, " Jac-f-df (GPU)       N 1.0  %10.3e\n",ctx->times[LANDAU_F_DF]);CHKERRQ(ierr);
2381     ierr = PetscPrintf(ctx->comm, " Kernel (GPU)         N 1.0  %10.3e\n",ctx->times[LANDAU_KERNEL]);CHKERRQ(ierr);
2382     ierr = PetscPrintf(ctx->comm, "MatLUFactorNum        X 1.0 %10.3e\n",ctx->times[KSP_FACTOR]);CHKERRQ(ierr);
2383     ierr = PetscPrintf(ctx->comm, "MatSolve              X 1.0 %10.3e\n",ctx->times[KSP_SOLVE]);CHKERRQ(ierr);
2384   }
2385   for (PetscInt grid=0 ; grid < ctx->num_grids ; grid++) {
2386     ierr = DMDestroy(&ctx->plex[grid]);CHKERRQ(ierr);
2387   }
2388   PetscFree(ctx);
2389   ierr = DMDestroy(dm);CHKERRQ(ierr);
2390   PetscFunctionReturn(0);
2391 }
2392 
2393 /* < v, ru > */
2394 static void f0_s_den(PetscInt dim, PetscInt Nf, PetscInt NfAux,
2395                      const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
2396                      const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
2397                      PetscReal t, const PetscReal x[],  PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2398 {
2399   PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
2400   f0[0] = u[ii];
2401 }
2402 
2403 /* < v, ru > */
2404 static void f0_s_mom(PetscInt dim, PetscInt Nf, PetscInt NfAux,
2405                      const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
2406                      const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
2407                      PetscReal t, const PetscReal x[],  PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2408 {
2409   PetscInt ii = (PetscInt)PetscRealPart(constants[0]), jj = (PetscInt)PetscRealPart(constants[1]);
2410   f0[0] = x[jj]*u[ii]; /* x momentum */
2411 }
2412 
2413 static void f0_s_v2(PetscInt dim, PetscInt Nf, PetscInt NfAux,
2414                     const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
2415                     const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
2416                     PetscReal t, const PetscReal x[],  PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2417 {
2418   PetscInt i, ii = (PetscInt)PetscRealPart(constants[0]);
2419   double tmp1 = 0.;
2420   for (i = 0; i < dim; ++i) tmp1 += x[i]*x[i];
2421   f0[0] = tmp1*u[ii];
2422 }
2423 
2424 static PetscErrorCode gamma_n_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void *actx)
2425 {
2426   const PetscReal *c2_0_arr = ((PetscReal*)actx);
2427   const PetscReal c02 = c2_0_arr[0];
2428 
2429   PetscFunctionBegin;
2430   for (int s = 0 ; s < Nf ; s++) {
2431     PetscReal tmp1 = 0.;
2432     for (int i = 0; i < dim; ++i) tmp1 += x[i]*x[i];
2433 #if defined(PETSC_USE_DEBUG)
2434     u[s] = PetscSqrtReal(1. + tmp1/c02);//  u[0] = PetscSqrtReal(1. + xx);
2435 #else
2436     {
2437       PetscReal xx = tmp1/c02;
2438       u[s] = xx/(PetscSqrtReal(1. + xx) + 1.); // better conditioned = xx/(PetscSqrtReal(1. + xx) + 1.)
2439     }
2440 #endif
2441   }
2442   PetscFunctionReturn(0);
2443 }
2444 
2445 /* < v, ru > */
2446 static void f0_s_rden(PetscInt dim, PetscInt Nf, PetscInt NfAux,
2447                       const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
2448                       const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
2449                       PetscReal t, const PetscReal x[],  PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2450 {
2451   PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
2452   f0[0] = 2.*PETSC_PI*x[0]*u[ii];
2453 }
2454 
2455 /* < v, ru > */
2456 static void f0_s_rmom(PetscInt dim, PetscInt Nf, PetscInt NfAux,
2457                       const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
2458                       const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
2459                       PetscReal t, const PetscReal x[],  PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2460 {
2461   PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
2462   f0[0] = 2.*PETSC_PI*x[0]*x[1]*u[ii];
2463 }
2464 
2465 static void f0_s_rv2(PetscInt dim, PetscInt Nf, PetscInt NfAux,
2466                      const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
2467                      const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
2468                      PetscReal t, const PetscReal x[],  PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2469 {
2470   PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
2471   f0[0] =  2.*PETSC_PI*x[0]*(x[0]*x[0] + x[1]*x[1])*u[ii];
2472 }
2473 
2474 /*@
2475  DMPlexLandauPrintNorms - collects moments and prints them
2476 
2477  Collective on dm
2478 
2479  Input Parameters:
2480  +   X  - the state
2481  -   stepi - current step to print
2482 
2483  Level: beginner
2484 
2485  .keywords: mesh
2486  .seealso: DMPlexLandauCreateVelocitySpace()
2487  @*/
2488 PetscErrorCode DMPlexLandauPrintNorms(Vec X, PetscInt stepi)
2489 {
2490   PetscErrorCode ierr;
2491   LandauCtx      *ctx;
2492   PetscDS        prob;
2493   DM             pack;
2494   PetscInt       cStart, cEnd, dim, ii, i0, nDMs;
2495   PetscScalar    xmomentumtot=0, ymomentumtot=0, zmomentumtot=0, energytot=0, densitytot=0, tt[LANDAU_MAX_SPECIES];
2496   PetscScalar    xmomentum[LANDAU_MAX_SPECIES],  ymomentum[LANDAU_MAX_SPECIES],  zmomentum[LANDAU_MAX_SPECIES], energy[LANDAU_MAX_SPECIES], density[LANDAU_MAX_SPECIES];
2497   Vec            *globXArray;
2498 
2499   PetscFunctionBegin;
2500   ierr = VecGetDM(X, &pack);CHKERRQ(ierr);
2501   PetscCheck(pack,PETSC_COMM_SELF, PETSC_ERR_PLIB, "Vector has no DM");
2502   ierr = DMGetDimension(pack, &dim);CHKERRQ(ierr);
2503   PetscCheck(dim == 2 || dim == 3,PETSC_COMM_SELF, PETSC_ERR_PLIB, "dim %" PetscInt_FMT " not in [2,3]",dim);
2504   ierr = DMGetApplicationContext(pack, &ctx);CHKERRQ(ierr);
2505   PetscCheck(ctx,PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
2506   /* print momentum and energy */
2507   ierr = DMCompositeGetNumberDM(pack,&nDMs);CHKERRQ(ierr);
2508   PetscCheck(nDMs == ctx->num_grids*ctx->batch_sz,PETSC_COMM_WORLD, PETSC_ERR_PLIB, "#DM wrong %" PetscInt_FMT " %" PetscInt_FMT,nDMs,ctx->num_grids*ctx->batch_sz);
2509   ierr = PetscMalloc(sizeof(*globXArray)*nDMs, &globXArray);CHKERRQ(ierr);
2510   ierr = DMCompositeGetAccessArray(pack, X, nDMs, NULL, globXArray);CHKERRQ(ierr);
2511   for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) {
2512     Vec Xloc = globXArray[ LAND_PACK_IDX(ctx->batch_view_idx,grid) ];
2513     ierr = DMGetDS(ctx->plex[grid], &prob);CHKERRQ(ierr);
2514     for (ii=ctx->species_offset[grid],i0=0;ii<ctx->species_offset[grid+1];ii++,i0++) {
2515       PetscScalar user[2] = { (PetscScalar)i0, (PetscScalar)ctx->charges[ii]};
2516       ierr = PetscDSSetConstants(prob, 2, user);CHKERRQ(ierr);
2517       if (dim==2) { /* 2/3X + 3V (cylindrical coordinates) */
2518         ierr = PetscDSSetObjective(prob, 0, &f0_s_rden);CHKERRQ(ierr);
2519         ierr = DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx);CHKERRQ(ierr);
2520         density[ii] = tt[0]*ctx->n_0*ctx->charges[ii];
2521         ierr = PetscDSSetObjective(prob, 0, &f0_s_rmom);CHKERRQ(ierr);
2522         ierr = DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx);CHKERRQ(ierr);
2523         zmomentum[ii] = tt[0]*ctx->n_0*ctx->v_0*ctx->masses[ii];
2524         ierr = PetscDSSetObjective(prob, 0, &f0_s_rv2);CHKERRQ(ierr);
2525         ierr = DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx);CHKERRQ(ierr);
2526         energy[ii] = tt[0]*0.5*ctx->n_0*ctx->v_0*ctx->v_0*ctx->masses[ii];
2527         zmomentumtot += zmomentum[ii];
2528         energytot  += energy[ii];
2529         densitytot += density[ii];
2530         ierr = PetscPrintf(ctx->comm, "%3D) species-%" PetscInt_FMT ": charge density= %20.13e z-momentum= %20.13e energy= %20.13e",stepi,ii,PetscRealPart(density[ii]),PetscRealPart(zmomentum[ii]),PetscRealPart(energy[ii]));CHKERRQ(ierr);
2531       } else { /* 2/3Xloc + 3V */
2532         ierr = PetscDSSetObjective(prob, 0, &f0_s_den);CHKERRQ(ierr);
2533         ierr = DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx);CHKERRQ(ierr);
2534         density[ii] = tt[0]*ctx->n_0*ctx->charges[ii];
2535         ierr = PetscDSSetObjective(prob, 0, &f0_s_mom);CHKERRQ(ierr);
2536         user[1] = 0;
2537         ierr = PetscDSSetConstants(prob, 2, user);CHKERRQ(ierr);
2538         ierr = DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx);CHKERRQ(ierr);
2539         xmomentum[ii]  = tt[0]*ctx->n_0*ctx->v_0*ctx->masses[ii];
2540         user[1] = 1;
2541         ierr = PetscDSSetConstants(prob, 2, user);CHKERRQ(ierr);
2542         ierr = DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx);CHKERRQ(ierr);
2543         ymomentum[ii] = tt[0]*ctx->n_0*ctx->v_0*ctx->masses[ii];
2544         user[1] = 2;
2545         ierr = PetscDSSetConstants(prob, 2, user);CHKERRQ(ierr);
2546         ierr = DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx);CHKERRQ(ierr);
2547         zmomentum[ii] = tt[0]*ctx->n_0*ctx->v_0*ctx->masses[ii];
2548         if (ctx->use_relativistic_corrections) {
2549           /* gamma * M * f */
2550           if (ii==0 && grid==0) { // do all at once
2551             Vec            Mf, globGamma, *globMfArray, *globGammaArray;
2552             PetscErrorCode (*gammaf[1])(PetscInt, PetscReal, const PetscReal [], PetscInt, PetscScalar [], void *) = {gamma_n_f};
2553             PetscReal      *c2_0[1], data[1];
2554 
2555             ierr = VecDuplicate(X,&globGamma);CHKERRQ(ierr);
2556             ierr = VecDuplicate(X,&Mf);CHKERRQ(ierr);
2557             ierr = PetscMalloc(sizeof(*globMfArray)*nDMs, &globMfArray);CHKERRQ(ierr);
2558             ierr = PetscMalloc(sizeof(*globMfArray)*nDMs, &globGammaArray);CHKERRQ(ierr);
2559             /* M * f */
2560             ierr = MatMult(ctx->M,X,Mf);CHKERRQ(ierr);
2561             /* gamma */
2562             ierr = DMCompositeGetAccessArray(pack, globGamma, nDMs, NULL, globGammaArray);CHKERRQ(ierr);
2563             for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) { // yes a grid loop in a grid loop to print nice, need to fix for batching
2564               Vec v1 = globGammaArray[ LAND_PACK_IDX(ctx->batch_view_idx,grid) ];
2565               data[0] = PetscSqr(C_0(ctx->v_0));
2566               c2_0[0] = &data[0];
2567               ierr = DMProjectFunction(ctx->plex[grid], 0., gammaf, (void**)c2_0, INSERT_ALL_VALUES, v1);CHKERRQ(ierr);
2568             }
2569             ierr = DMCompositeRestoreAccessArray(pack, globGamma, nDMs, NULL, globGammaArray);CHKERRQ(ierr);
2570             /* gamma * Mf */
2571             ierr = DMCompositeGetAccessArray(pack, globGamma, nDMs, NULL, globGammaArray);CHKERRQ(ierr);
2572             ierr = DMCompositeGetAccessArray(pack, Mf, nDMs, NULL, globMfArray);CHKERRQ(ierr);
2573             for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) { // yes a grid loop in a grid loop to print nice
2574               PetscInt Nf = ctx->species_offset[grid+1] - ctx->species_offset[grid], N, bs;
2575               Vec      Mfsub = globMfArray[ LAND_PACK_IDX(ctx->batch_view_idx,grid) ], Gsub = globGammaArray[ LAND_PACK_IDX(ctx->batch_view_idx,grid) ], v1, v2;
2576               // get each component
2577               ierr = VecGetSize(Mfsub,&N);CHKERRQ(ierr);
2578               ierr = VecCreate(ctx->comm,&v1);CHKERRQ(ierr);
2579               ierr = VecSetSizes(v1,PETSC_DECIDE,N/Nf);CHKERRQ(ierr);
2580               ierr = VecCreate(ctx->comm,&v2);CHKERRQ(ierr);
2581               ierr = VecSetSizes(v2,PETSC_DECIDE,N/Nf);CHKERRQ(ierr);
2582               ierr = VecSetFromOptions(v1);CHKERRQ(ierr); // ???
2583               ierr = VecSetFromOptions(v2);CHKERRQ(ierr);
2584               // get each component
2585               ierr = VecGetBlockSize(Gsub,&bs);CHKERRQ(ierr);
2586               PetscCheck(bs == Nf,PETSC_COMM_SELF, PETSC_ERR_PLIB, "bs %" PetscInt_FMT " != num_species %" PetscInt_FMT " in Gsub",bs,Nf);
2587               ierr = VecGetBlockSize(Mfsub,&bs);CHKERRQ(ierr);
2588               PetscCheck(bs == Nf,PETSC_COMM_SELF, PETSC_ERR_PLIB, "bs %" PetscInt_FMT " != num_species %" PetscInt_FMT,bs,Nf);
2589               for (int i=0, ix=ctx->species_offset[grid] ; i<Nf ; i++, ix++) {
2590                 PetscScalar val;
2591                 ierr = VecStrideGather(Gsub,i,v1,INSERT_VALUES);CHKERRQ(ierr);
2592                 ierr = VecStrideGather(Mfsub,i,v2,INSERT_VALUES);CHKERRQ(ierr);
2593                 ierr = VecDot(v1,v2,&val);CHKERRQ(ierr);
2594                 energy[ix] = PetscRealPart(val)*ctx->n_0*ctx->v_0*ctx->v_0*ctx->masses[ix];
2595               }
2596               ierr = VecDestroy(&v1);CHKERRQ(ierr);
2597               ierr = VecDestroy(&v2);CHKERRQ(ierr);
2598             } /* grids */
2599             ierr = DMCompositeRestoreAccessArray(pack, globGamma, nDMs, NULL, globGammaArray);CHKERRQ(ierr);
2600             ierr = DMCompositeRestoreAccessArray(pack, Mf, nDMs, NULL, globMfArray);CHKERRQ(ierr);
2601             ierr = PetscFree(globGammaArray);CHKERRQ(ierr);
2602             ierr = PetscFree(globMfArray);CHKERRQ(ierr);
2603             ierr = VecDestroy(&globGamma);CHKERRQ(ierr);
2604             ierr = VecDestroy(&Mf);CHKERRQ(ierr);
2605           }
2606         } else {
2607           ierr = PetscDSSetObjective(prob, 0, &f0_s_v2);CHKERRQ(ierr);
2608           ierr = DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx);CHKERRQ(ierr);
2609           energy[ii]    = 0.5*tt[0]*ctx->n_0*ctx->v_0*ctx->v_0*ctx->masses[ii];
2610         }
2611         ierr = PetscPrintf(ctx->comm, "%3" PetscInt_FMT ") species %" PetscInt_FMT ": density=%20.13e, x-momentum=%20.13e, y-momentum=%20.13e, z-momentum=%20.13e, energy=%21.13e",
2612                            stepi,ii,PetscRealPart(density[ii]),PetscRealPart(xmomentum[ii]),PetscRealPart(ymomentum[ii]),PetscRealPart(zmomentum[ii]),PetscRealPart(energy[ii]));CHKERRQ(ierr);
2613         xmomentumtot += xmomentum[ii];
2614         ymomentumtot += ymomentum[ii];
2615         zmomentumtot += zmomentum[ii];
2616         energytot    += energy[ii];
2617         densitytot   += density[ii];
2618       }
2619       if (ctx->num_species>1) PetscPrintf(ctx->comm, "\n");
2620     }
2621   }
2622   ierr = DMCompositeRestoreAccessArray(pack, X, nDMs, NULL, globXArray);CHKERRQ(ierr);
2623   ierr = PetscFree(globXArray);CHKERRQ(ierr);
2624   /* totals */
2625   ierr = DMPlexGetHeightStratum(ctx->plex[0],0,&cStart,&cEnd);CHKERRQ(ierr);
2626   if (ctx->num_species>1) {
2627     if (dim==2) {
2628       ierr = PetscPrintf(ctx->comm, "\t%3" PetscInt_FMT ") Total: charge density=%21.13e, momentum=%21.13e, energy=%21.13e (m_i[0]/m_e = %g, %" PetscInt_FMT " cells on electron grid)",
2629                          stepi,(double)PetscRealPart(densitytot),(double)PetscRealPart(zmomentumtot),(double)PetscRealPart(energytot),(double)(ctx->masses[1]/ctx->masses[0]),cEnd-cStart);CHKERRQ(ierr);
2630     } else {
2631       ierr = PetscPrintf(ctx->comm, "\t%3" PetscInt_FMT ") Total: charge density=%21.13e, x-momentum=%21.13e, y-momentum=%21.13e, z-momentum=%21.13e, energy=%21.13e (m_i[0]/m_e = %g, %" PetscInt_FMT " cells)",
2632                          stepi,(double)PetscRealPart(densitytot),(double)PetscRealPart(xmomentumtot),(double)PetscRealPart(ymomentumtot),(double)PetscRealPart(zmomentumtot),(double)PetscRealPart(energytot),(double)(ctx->masses[1]/ctx->masses[0]),cEnd-cStart);CHKERRQ(ierr);
2633     }
2634   } else {
2635     ierr = PetscPrintf(ctx->comm, " -- %" PetscInt_FMT " cells",cEnd-cStart);CHKERRQ(ierr);
2636   }
2637   ierr = PetscPrintf(ctx->comm,"\n");CHKERRQ(ierr);
2638   PetscFunctionReturn(0);
2639 }
2640 
2641 /*@
2642  DMPlexLandauCreateMassMatrix - Create mass matrix for Landau in Plex space (not field major order of Jacobian)
2643 
2644  Collective on pack
2645 
2646  Input Parameters:
2647  . pack     - the DM object
2648 
2649  Output Parameters:
2650  . Amat - The mass matrix (optional), mass matrix is added to the DM context
2651 
2652  Level: beginner
2653 
2654  .keywords: mesh
2655  .seealso: DMPlexLandauCreateVelocitySpace()
2656  @*/
2657 PetscErrorCode DMPlexLandauCreateMassMatrix(DM pack, Mat *Amat)
2658 {
2659   DM             mass_pack,massDM[LANDAU_MAX_GRIDS];
2660   PetscDS        prob;
2661   PetscInt       ii,dim,N1=1,N2;
2662   PetscErrorCode ierr;
2663   LandauCtx      *ctx;
2664   Mat            packM,subM[LANDAU_MAX_GRIDS];
2665 
2666   PetscFunctionBegin;
2667   PetscValidHeaderSpecific(pack,DM_CLASSID,1);
2668   if (Amat) PetscValidPointer(Amat,2);
2669   ierr = DMGetApplicationContext(pack, &ctx);CHKERRQ(ierr);
2670   PetscCheck(ctx,PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
2671   ierr = PetscLogEventBegin(ctx->events[14],0,0,0,0);CHKERRQ(ierr);
2672   ierr = DMGetDimension(pack, &dim);CHKERRQ(ierr);
2673   ierr = DMCompositeCreate(PetscObjectComm((PetscObject) pack),&mass_pack);CHKERRQ(ierr);
2674   /* create pack mass matrix */
2675   for (PetscInt grid=0, ix=0 ; grid<ctx->num_grids ; grid++) {
2676     ierr = DMClone(ctx->plex[grid], &massDM[grid]);CHKERRQ(ierr);
2677     ierr = DMCopyFields(ctx->plex[grid], massDM[grid]);CHKERRQ(ierr);
2678     ierr = DMCreateDS(massDM[grid]);CHKERRQ(ierr);
2679     ierr = DMGetDS(massDM[grid], &prob);CHKERRQ(ierr);
2680     for (ix=0, ii=ctx->species_offset[grid];ii<ctx->species_offset[grid+1];ii++,ix++) {
2681       if (dim==3) {ierr = PetscDSSetJacobian(prob, ix, ix, g0_1, NULL, NULL, NULL);CHKERRQ(ierr);}
2682       else        {ierr = PetscDSSetJacobian(prob, ix, ix, g0_r, NULL, NULL, NULL);CHKERRQ(ierr);}
2683     }
2684 #if !defined(LANDAU_SPECIES_MAJOR)
2685     ierr = DMCompositeAddDM(mass_pack,massDM[grid]);CHKERRQ(ierr);
2686 #else
2687     for (PetscInt b_id=0;b_id<ctx->batch_sz;b_id++) { // add batch size DMs for this species grid
2688       ierr = DMCompositeAddDM(mass_pack,massDM[grid]);CHKERRQ(ierr);
2689     }
2690 #endif
2691     ierr = DMCreateMatrix(massDM[grid], &subM[grid]);CHKERRQ(ierr);
2692   }
2693 #if !defined(LANDAU_SPECIES_MAJOR)
2694   // stack the batched DMs
2695   for (PetscInt b_id=1;b_id<ctx->batch_sz;b_id++) {
2696     for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
2697       ierr = DMCompositeAddDM(mass_pack, massDM[grid]);CHKERRQ(ierr);
2698     }
2699   }
2700 #endif
2701   ierr = PetscOptionsInsertString(NULL,"-dm_preallocate_only");
2702   ierr = DMSetFromOptions(mass_pack);CHKERRQ(ierr);
2703   ierr = DMCreateMatrix(mass_pack, &packM);CHKERRQ(ierr);
2704   ierr = PetscOptionsInsertString(NULL,"-dm_preallocate_only false");
2705   ierr = MatSetOption(packM,MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE);CHKERRQ(ierr);
2706   ierr = MatSetOption(packM,MAT_IGNORE_ZERO_ENTRIES,PETSC_TRUE);CHKERRQ(ierr);
2707   ierr = DMDestroy(&mass_pack);CHKERRQ(ierr);
2708   /* make mass matrix for each block */
2709   for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
2710     Vec locX;
2711     DM  plex = massDM[grid];
2712     ierr = DMGetLocalVector(plex, &locX);CHKERRQ(ierr);
2713     /* Mass matrix is independent of the input, so no need to fill locX */
2714     ierr = DMPlexSNESComputeJacobianFEM(plex, locX, subM[grid], subM[grid], ctx);CHKERRQ(ierr);
2715     ierr = DMRestoreLocalVector(plex, &locX);CHKERRQ(ierr);
2716     ierr = DMDestroy(&massDM[grid]);CHKERRQ(ierr);
2717   }
2718   ierr = MatGetSize(ctx->J, &N1, NULL);CHKERRQ(ierr);
2719   ierr = MatGetSize(packM, &N2, NULL);CHKERRQ(ierr);
2720   PetscCheck(N1 == N2,PetscObjectComm((PetscObject) pack), PETSC_ERR_PLIB, "Incorrect matrix sizes: |Jacobian| = %" PetscInt_FMT ", |Mass|=%" PetscInt_FMT,N1,N2);
2721   /* assemble block diagonals */
2722   for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) {
2723     Mat               B = subM[grid];
2724     PetscInt          nloc, nzl, colbuf[1024], row;
2725     ierr = MatGetSize(B, &nloc, NULL);CHKERRQ(ierr);
2726     for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) {
2727       const PetscInt    moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset);
2728       const PetscInt    *cols;
2729       const PetscScalar *vals;
2730       for (int i=0 ; i<nloc ; i++) {
2731         ierr = MatGetRow(B,i,&nzl,&cols,&vals);CHKERRQ(ierr);
2732         PetscCheck(nzl<=1024,PetscObjectComm((PetscObject) pack), PETSC_ERR_PLIB, "Row too big: %" PetscInt_FMT,nzl);
2733         for (int j=0; j<nzl; j++) colbuf[j] = cols[j] + moffset;
2734         row = i + moffset;
2735         ierr = MatSetValues(packM,1,&row,nzl,colbuf,vals,INSERT_VALUES);CHKERRQ(ierr);
2736         ierr = MatRestoreRow(B,i,&nzl,&cols,&vals);CHKERRQ(ierr);
2737       }
2738     }
2739   }
2740   // cleanup
2741   for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) {
2742     ierr = MatDestroy(&subM[grid]);CHKERRQ(ierr);
2743   }
2744   ierr = MatAssemblyBegin(packM,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
2745   ierr = MatAssemblyEnd(packM,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
2746   ierr = PetscObjectSetName((PetscObject)packM, "mass");CHKERRQ(ierr);
2747   ierr = MatViewFromOptions(packM,NULL,"-dm_landau_mass_view");CHKERRQ(ierr);
2748   ctx->M = packM;
2749   if (Amat) *Amat = packM;
2750   ierr = PetscLogEventEnd(ctx->events[14],0,0,0,0);CHKERRQ(ierr);
2751   PetscFunctionReturn(0);
2752 }
2753 
2754 /*@
2755  DMPlexLandauIFunction - TS residual calculation
2756 
2757  Collective on ts
2758 
2759  Input Parameters:
2760  +   TS  - The time stepping context
2761  .   time_dummy - current time (not used)
2762  -   X - Current state
2763  +   X_t - Time derivative of current state
2764  .   actx - Landau context
2765 
2766  Output Parameter:
2767  .   F  - The residual
2768 
2769  Level: beginner
2770 
2771  .keywords: mesh
2772  .seealso: DMPlexLandauCreateVelocitySpace(), DMPlexLandauIJacobian()
2773  @*/
2774 PetscErrorCode DMPlexLandauIFunction(TS ts, PetscReal time_dummy, Vec X, Vec X_t, Vec F, void *actx)
2775 {
2776   PetscErrorCode ierr;
2777   LandauCtx      *ctx=(LandauCtx*)actx;
2778   PetscInt       dim;
2779   DM             pack;
2780 #if defined(PETSC_HAVE_THREADSAFETY)
2781   double         starttime, endtime;
2782 #endif
2783 
2784   PetscFunctionBegin;
2785   ierr = TSGetDM(ts,&pack);CHKERRQ(ierr);
2786   ierr = DMGetApplicationContext(pack, &ctx);CHKERRQ(ierr);
2787   PetscCheck(ctx,PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
2788   if (ctx->stage) {
2789     ierr = PetscLogStagePush(ctx->stage);CHKERRQ(ierr);
2790   }
2791   ierr = PetscLogEventBegin(ctx->events[11],0,0,0,0);CHKERRQ(ierr);
2792   ierr = PetscLogEventBegin(ctx->events[0],0,0,0,0);CHKERRQ(ierr);
2793 #if defined(PETSC_HAVE_THREADSAFETY)
2794   starttime = MPI_Wtime();
2795 #endif
2796   ierr = DMGetDimension(pack, &dim);CHKERRQ(ierr);
2797   if (!ctx->aux_bool) {
2798     ierr = PetscInfo(ts, "Create Landau Jacobian t=%g X=%p %s\n",time_dummy,X_t,ctx->aux_bool ? " -- seems to be in line search" : "");CHKERRQ(ierr);
2799     ierr = LandauFormJacobian_Internal(X,ctx->J,dim,0.0,(void*)ctx);CHKERRQ(ierr);
2800     ierr = MatViewFromOptions(ctx->J, NULL, "-dm_landau_jacobian_view");CHKERRQ(ierr);
2801     ctx->aux_bool = PETSC_TRUE;
2802   } else {
2803     ierr = PetscInfo(ts, "Skip forming Jacobian, has not changed (should check norm)\n");CHKERRQ(ierr);
2804   }
2805   /* mat vec for op */
2806   ierr = MatMult(ctx->J,X,F);CHKERRQ(ierr);CHKERRQ(ierr); /* C*f */
2807   /* add time term */
2808   if (X_t) {
2809     ierr = MatMultAdd(ctx->M,X_t,F,F);CHKERRQ(ierr);
2810   }
2811 #if defined(PETSC_HAVE_THREADSAFETY)
2812   if (ctx->stage) {
2813     endtime = MPI_Wtime();
2814     ctx->times[LANDAU_OPERATOR] += (endtime - starttime);
2815     ctx->times[LANDAU_JACOBIAN] += (endtime - starttime);
2816     ctx->times[LANDAU_JACOBIAN_COUNT] += 1;
2817   }
2818 #endif
2819   ierr = PetscLogEventEnd(ctx->events[0],0,0,0,0);CHKERRQ(ierr);
2820   ierr = PetscLogEventEnd(ctx->events[11],0,0,0,0);CHKERRQ(ierr);
2821   if (ctx->stage) {
2822     ierr = PetscLogStagePop();CHKERRQ(ierr);
2823 #if defined(PETSC_HAVE_THREADSAFETY)
2824     ctx->times[LANDAU_MATRIX_TOTAL] += (endtime - starttime);
2825 #endif
2826   }
2827   PetscFunctionReturn(0);
2828 }
2829 
2830 /*@
2831  DMPlexLandauIJacobian - TS Jacobian construction
2832 
2833  Collective on ts
2834 
2835  Input Parameters:
2836  +   TS  - The time stepping context
2837  .   time_dummy - current time (not used)
2838  -   X - Current state
2839  +   U_tdummy - Time derivative of current state (not used)
2840  .   shift - shift for du/dt term
2841  -   actx - Landau context
2842 
2843  Output Parameter:
2844  .   Amat  - Jacobian
2845  +   Pmat  - same as Amat
2846 
2847  Level: beginner
2848 
2849  .keywords: mesh
2850  .seealso: DMPlexLandauCreateVelocitySpace(), DMPlexLandauIFunction()
2851  @*/
2852 PetscErrorCode DMPlexLandauIJacobian(TS ts, PetscReal time_dummy, Vec X, Vec U_tdummy, PetscReal shift, Mat Amat, Mat Pmat, void *actx)
2853 {
2854   PetscErrorCode ierr;
2855   LandauCtx      *ctx=NULL;
2856   PetscInt       dim;
2857   DM             pack;
2858 #if defined(PETSC_HAVE_THREADSAFETY)
2859   double         starttime, endtime;
2860 #endif
2861   PetscFunctionBegin;
2862   ierr = TSGetDM(ts,&pack);CHKERRQ(ierr);
2863   ierr = DMGetApplicationContext(pack, &ctx);CHKERRQ(ierr);
2864   PetscCheck(ctx,PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
2865   PetscCheckFalse(Amat!=Pmat || Amat!=ctx->J,ctx->comm, PETSC_ERR_PLIB, "Amat!=Pmat || Amat!=ctx->J");
2866   ierr = DMGetDimension(pack, &dim);CHKERRQ(ierr);
2867   /* get collision Jacobian into A */
2868   if (ctx->stage) {
2869     ierr = PetscLogStagePush(ctx->stage);CHKERRQ(ierr);
2870   }
2871   ierr = PetscLogEventBegin(ctx->events[11],0,0,0,0);CHKERRQ(ierr);
2872   ierr = PetscLogEventBegin(ctx->events[9],0,0,0,0);CHKERRQ(ierr);
2873 #if defined(PETSC_HAVE_THREADSAFETY)
2874   starttime = MPI_Wtime();
2875 #endif
2876   ierr = PetscInfo(ts, "Adding just mass to Jacobian t=%g, shift=%g\n",(double)time_dummy,(double)shift);CHKERRQ(ierr);
2877   PetscCheckFalse(shift==0.0,ctx->comm, PETSC_ERR_PLIB, "zero shift");
2878   PetscCheck(ctx->aux_bool,ctx->comm, PETSC_ERR_PLIB, "wrong state");
2879   if (!ctx->use_matrix_mass) {
2880     ierr = LandauFormJacobian_Internal(X,ctx->J,dim,shift,(void*)ctx);CHKERRQ(ierr);
2881     ierr = MatViewFromOptions(ctx->J, NULL, "-dm_landau_mat_view");CHKERRQ(ierr);
2882   } else { /* add mass */
2883     ierr = MatAXPY(Pmat,shift,ctx->M,SAME_NONZERO_PATTERN);CHKERRQ(ierr);
2884   }
2885   ctx->aux_bool = PETSC_FALSE;
2886 #if defined(PETSC_HAVE_THREADSAFETY)
2887   if (ctx->stage) {
2888     endtime = MPI_Wtime();
2889     ctx->times[LANDAU_OPERATOR] += (endtime - starttime);
2890     ctx->times[LANDAU_MASS] += (endtime - starttime);
2891   }
2892 #endif
2893   ierr = PetscLogEventEnd(ctx->events[9],0,0,0,0);CHKERRQ(ierr);
2894   ierr = PetscLogEventEnd(ctx->events[11],0,0,0,0);CHKERRQ(ierr);
2895   if (ctx->stage) {
2896     ierr = PetscLogStagePop();CHKERRQ(ierr);
2897 #if defined(PETSC_HAVE_THREADSAFETY)
2898     ctx->times[LANDAU_MATRIX_TOTAL] += (endtime - starttime);
2899 #endif
2900   }
2901   PetscFunctionReturn(0);
2902 }
2903