xref: /petsc/src/ts/utils/dmplexlandau/plexland.c (revision 8fb5bd83c3955fefcf33a54e3bb66920a9fa884b)
1 #include <../src/mat/impls/aij/seq/aij.h>
2 #include <petsc/private/dmpleximpl.h>   /*I "petscdmplex.h" I*/
3 #include <petsclandau.h>                /*I "petsclandau.h"   I*/
4 #include <petscts.h>
5 #include <petscdmforest.h>
6 #include <petscdmcomposite.h>
7 
8 /* Landau collision operator */
9 
10 /* relativistic terms */
11 #if defined(PETSC_USE_REAL_SINGLE)
12 #define SPEED_OF_LIGHT 2.99792458e8F
13 #define C_0(v0) (SPEED_OF_LIGHT/v0) /* needed for relativistic tensor on all architectures */
14 #else
15 #define SPEED_OF_LIGHT 2.99792458e8
16 #define C_0(v0) (SPEED_OF_LIGHT/v0) /* needed for relativistic tensor on all architectures */
17 #endif
18 
19 #define PETSC_THREAD_SYNC
20 #include "land_tensors.h"
21 
22 #if defined(PETSC_HAVE_OPENMP)
23 #include <omp.h>
24 #endif
25 
26 /* vector padding not supported */
27 #define LANDAU_VL  1
28 
29 static PetscErrorCode LandauMatMult(Mat A, Vec x, Vec y)
30 {
31   LandauCtx       *ctx;
32   PetscContainer  container;
33 
34   PetscFunctionBegin;
35   PetscCall(PetscObjectQuery((PetscObject) A, "LandauCtx", (PetscObject *) &container));
36   if (container) {
37     PetscCall(PetscContainerGetPointer(container, (void **) &ctx));
38     PetscCall(VecScatterBegin(ctx->plex_batch,x,ctx->work_vec,INSERT_VALUES,SCATTER_FORWARD));
39     PetscCall(VecScatterEnd(ctx->plex_batch,x,ctx->work_vec,INSERT_VALUES,SCATTER_FORWARD));
40     PetscCall((*ctx->seqaij_mult)(A,ctx->work_vec,y));
41     PetscCall(VecCopy(y, ctx->work_vec));
42     PetscCall(VecScatterBegin(ctx->plex_batch,ctx->work_vec,y,INSERT_VALUES,SCATTER_REVERSE));
43     PetscCall(VecScatterEnd(ctx->plex_batch,ctx->work_vec,y,INSERT_VALUES,SCATTER_REVERSE));
44     PetscFunctionReturn(0);
45   }
46   PetscCall(MatMult(A,x,y));
47   PetscFunctionReturn(0);
48 }
49 
50 // Computes v3 = v2 + A * v1.
51 static PetscErrorCode LandauMatMultAdd(Mat A,Vec v1,Vec v2,Vec v3)
52 {
53   PetscFunctionBegin;
54   SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "?????");
55   PetscCall(LandauMatMult(A,v1,v3));
56   PetscCall(VecAYPX(v3,1,v2));
57   PetscFunctionReturn(0);
58 }
59 
60 static PetscErrorCode LandauMatMultTranspose(Mat A, Vec x, Vec y)
61 {
62   LandauCtx       *ctx;
63   PetscContainer  container;
64 
65   PetscFunctionBegin;
66   PetscCall(PetscObjectQuery((PetscObject) A, "LandauCtx", (PetscObject *) &container));
67   if (container) {
68     PetscCall(PetscContainerGetPointer(container, (void **) &ctx));
69     PetscCall(VecScatterBegin(ctx->plex_batch,x,ctx->work_vec,INSERT_VALUES,SCATTER_FORWARD));
70     PetscCall(VecScatterEnd(ctx->plex_batch,x,ctx->work_vec,INSERT_VALUES,SCATTER_FORWARD));
71     PetscCall((*ctx->seqaij_multtranspose)(A,ctx->work_vec,y));
72     PetscCall(VecCopy(y, ctx->work_vec));
73     PetscCall(VecScatterBegin(ctx->plex_batch,ctx->work_vec,y,INSERT_VALUES,SCATTER_REVERSE));
74     PetscCall(VecScatterEnd(ctx->plex_batch,ctx->work_vec,y,INSERT_VALUES,SCATTER_REVERSE));
75     PetscFunctionReturn(0);
76   }
77   PetscCall(MatMultTranspose(A,x,y));
78   PetscFunctionReturn(0);
79 }
80 
81 static PetscErrorCode LandauMatGetDiagonal(Mat A,Vec x)
82 {
83   LandauCtx       *ctx;
84   PetscContainer  container;
85 
86   PetscFunctionBegin;
87   PetscCall(PetscObjectQuery((PetscObject) A, "LandauCtx", (PetscObject *) &container));
88   if (container) {
89     PetscCall(PetscContainerGetPointer(container, (void **) &ctx));
90     PetscCall((*ctx->seqaij_getdiagonal)(A,ctx->work_vec));
91     PetscCall(VecScatterBegin(ctx->plex_batch,ctx->work_vec,x,INSERT_VALUES,SCATTER_REVERSE));
92     PetscCall(VecScatterEnd(ctx->plex_batch,ctx->work_vec,x,INSERT_VALUES,SCATTER_REVERSE));
93     PetscFunctionReturn(0);
94   }
95   PetscCall(MatGetDiagonal(A, x));
96   PetscFunctionReturn(0);
97 }
98 
99 static PetscErrorCode LandauGPUMapsDestroy(void *ptr)
100 {
101   P4estVertexMaps *maps = (P4estVertexMaps*)ptr;
102   PetscFunctionBegin;
103   // free device data
104   if (maps[0].deviceType != LANDAU_CPU) {
105 #if defined(PETSC_HAVE_KOKKOS_KERNELS)
106     if (maps[0].deviceType == LANDAU_KOKKOS) {
107       PetscCall(LandauKokkosDestroyMatMaps(maps,  maps[0].numgrids)); // imples Kokkos does
108     } // else could be CUDA
109 #elif defined(PETSC_HAVE_CUDA)
110     if (maps[0].deviceType == LANDAU_CUDA) {
111       PetscCall(LandauCUDADestroyMatMaps(maps, maps[0].numgrids));
112     } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "maps->deviceType %d ?????",maps->deviceType);
113 #endif
114   }
115   // free host data
116   for (PetscInt grid=0 ; grid < maps[0].numgrids ; grid++) {
117     PetscCall(PetscFree(maps[grid].c_maps));
118     PetscCall(PetscFree(maps[grid].gIdx));
119   }
120   PetscCall(PetscFree(maps));
121 
122   PetscFunctionReturn(0);
123 }
124 static PetscErrorCode energy_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx)
125 {
126   PetscReal     v2 = 0;
127   PetscFunctionBegin;
128   /* compute v^2 / 2 */
129   for (int i = 0; i < dim; ++i) v2 += x[i]*x[i];
130   /* evaluate the Maxwellian */
131   u[0] = v2/2;
132   PetscFunctionReturn(0);
133 }
134 
135 /* needs double */
136 static PetscErrorCode gamma_m1_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx)
137 {
138   PetscReal     *c2_0_arr = ((PetscReal*)actx);
139   double        u2 = 0, c02 = (double)*c2_0_arr, xx;
140 
141   PetscFunctionBegin;
142   /* compute u^2 / 2 */
143   for (int i = 0; i < dim; ++i) u2 += x[i]*x[i];
144   /* gamma - 1 = g_eps, for conditioning and we only take derivatives */
145   xx = u2/c02;
146 #if defined(PETSC_USE_DEBUG)
147   u[0] = PetscSqrtReal(1. + xx);
148 #else
149   u[0] = xx/(PetscSqrtReal(1. + xx) + 1.) - 1.; // better conditioned. -1 might help condition and only used for derivative
150 #endif
151   PetscFunctionReturn(0);
152 }
153 
154 /*
155  LandauFormJacobian_Internal - Evaluates Jacobian matrix.
156 
157  Input Parameters:
158  .  globX - input vector
159  .  actx - optional user-defined context
160  .  dim - dimension
161 
162  Output Parameters:
163  .  J0acP - Jacobian matrix filled, not created
164  */
165 static PetscErrorCode LandauFormJacobian_Internal(Vec a_X, Mat JacP, const PetscInt dim, PetscReal shift, void *a_ctx)
166 {
167   LandauCtx         *ctx = (LandauCtx*)a_ctx;
168   PetscInt          numCells[LANDAU_MAX_GRIDS],Nq,Nb;
169   PetscQuadrature   quad;
170   PetscReal         Eq_m[LANDAU_MAX_SPECIES]; // could be static data w/o quench (ex2)
171   PetscScalar       *cellClosure=NULL;
172   const PetscScalar *xdata=NULL;
173   PetscDS           prob;
174   PetscContainer    container;
175   P4estVertexMaps   *maps;
176   Mat               subJ[LANDAU_MAX_GRIDS*LANDAU_MAX_BATCH_SZ];
177 
178   PetscFunctionBegin;
179   PetscValidHeaderSpecific(a_X,VEC_CLASSID,1);
180   PetscValidHeaderSpecific(JacP,MAT_CLASSID,2);
181   PetscValidPointer(ctx,5);
182   /* check for matrix container for GPU assembly. Support CPU assembly for debugging */
183   PetscCheck(ctx->plex[0] != NULL,ctx->comm,PETSC_ERR_ARG_WRONG,"Plex not created");
184   PetscCall(PetscLogEventBegin(ctx->events[10],0,0,0,0));
185   PetscCall(DMGetDS(ctx->plex[0], &prob)); // same DS for all grids
186   PetscCall(PetscObjectQuery((PetscObject) JacP, "assembly_maps", (PetscObject *) &container));
187   if (container) {
188     PetscCheck(ctx->gpu_assembly,ctx->comm,PETSC_ERR_ARG_WRONG,"maps but no GPU assembly");
189     PetscCall(PetscContainerGetPointer(container, (void **) &maps));
190     PetscCheck(maps,ctx->comm,PETSC_ERR_ARG_WRONG,"empty GPU matrix container");
191     for (PetscInt i=0;i<ctx->num_grids*ctx->batch_sz;i++) subJ[i] = NULL;
192   } else {
193     PetscCheck(!ctx->gpu_assembly,ctx->comm,PETSC_ERR_ARG_WRONG,"No maps but GPU assembly");
194     for (PetscInt tid=0 ; tid<ctx->batch_sz ; tid++) {
195       for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
196         PetscCall(DMCreateMatrix(ctx->plex[grid], &subJ[ LAND_PACK_IDX(tid,grid) ]));
197       }
198     }
199     maps = NULL;
200   }
201   // get dynamic data (Eq is odd, for quench and Spitzer test) for CPU assembly and raw data for Jacobian GPU assembly. Get host numCells[], Nq (yuck)
202   PetscCall(PetscFEGetQuadrature(ctx->fe[0], &quad));
203   PetscCall(PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, NULL)); Nb = Nq;
204   PetscCheck(Nq <=LANDAU_MAX_NQ,ctx->comm,PETSC_ERR_ARG_WRONG,"Order too high. Nq = %" PetscInt_FMT " > LANDAU_MAX_NQ (%d)",Nq,LANDAU_MAX_NQ);
205   // get metadata for collecting dynamic data
206   for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
207     PetscInt cStart, cEnd;
208     PetscCheck(ctx->plex[grid] != NULL,ctx->comm,PETSC_ERR_ARG_WRONG,"Plex not created");
209     PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd));
210     numCells[grid] = cEnd - cStart; // grids can have different topology
211   }
212   PetscCall(PetscLogEventEnd(ctx->events[10],0,0,0,0));
213   if (shift==0) { /* create dynamic point data: f_alpha for closure of each cell (cellClosure[nbatch,ngrids,ncells[g],f[Nb,ns[g]]]) or xdata */
214     DM pack;
215     PetscCall(VecGetDM(a_X, &pack));
216     PetscCheck(pack,PETSC_COMM_SELF, PETSC_ERR_PLIB, "pack has no DM");
217     PetscCall(PetscLogEventBegin(ctx->events[1],0,0,0,0));
218     for (PetscInt fieldA=0;fieldA<ctx->num_species;fieldA++) {
219       Eq_m[fieldA] = ctx->Ez * ctx->t_0 * ctx->charges[fieldA] / (ctx->v_0 * ctx->masses[fieldA]); /* normalize dimensionless */
220       if (dim==2) Eq_m[fieldA] *=  2 * PETSC_PI; /* add the 2pi term that is not in Landau */
221     }
222     if (!ctx->gpu_assembly) {
223       Vec          *locXArray,*globXArray;
224       PetscScalar  *cellClosure_it;
225       PetscInt     cellClosure_sz=0,nDMs,Nf[LANDAU_MAX_GRIDS];
226       PetscSection section[LANDAU_MAX_GRIDS],globsection[LANDAU_MAX_GRIDS];
227       for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
228         PetscCall(DMGetLocalSection(ctx->plex[grid], &section[grid]));
229         PetscCall(DMGetGlobalSection(ctx->plex[grid], &globsection[grid]));
230         PetscCall(PetscSectionGetNumFields(section[grid], &Nf[grid]));
231       }
232       /* count cellClosure size */
233       PetscCall(DMCompositeGetNumberDM(pack,&nDMs));
234       for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) cellClosure_sz += Nb*Nf[grid]*numCells[grid];
235       PetscCall(PetscMalloc1(cellClosure_sz*ctx->batch_sz,&cellClosure));
236       cellClosure_it = cellClosure;
237       PetscCall(PetscMalloc(sizeof(*locXArray)*nDMs, &locXArray));
238       PetscCall(PetscMalloc(sizeof(*globXArray)*nDMs, &globXArray));
239       PetscCall(DMCompositeGetLocalAccessArray(pack, a_X, nDMs, NULL, locXArray));
240       PetscCall(DMCompositeGetAccessArray(pack, a_X, nDMs, NULL, globXArray));
241       for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) { // OpenMP (once)
242         for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) {
243           Vec         locX = locXArray[ LAND_PACK_IDX(b_id,grid) ], globX = globXArray[ LAND_PACK_IDX(b_id,grid) ], locX2;
244           PetscInt    cStart, cEnd, ei;
245           PetscCall(VecDuplicate(locX,&locX2));
246           PetscCall(DMGlobalToLocalBegin(ctx->plex[grid], globX, INSERT_VALUES, locX2));
247           PetscCall(DMGlobalToLocalEnd  (ctx->plex[grid], globX, INSERT_VALUES, locX2));
248           PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd));
249           for (ei = cStart ; ei < cEnd; ++ei) {
250             PetscScalar *coef = NULL;
251             PetscCall(DMPlexVecGetClosure(ctx->plex[grid], section[grid], locX2, ei, NULL, &coef));
252             PetscCall(PetscMemcpy(cellClosure_it,coef,Nb*Nf[grid]*sizeof(*cellClosure_it))); /* change if LandauIPReal != PetscScalar */
253             PetscCall(DMPlexVecRestoreClosure(ctx->plex[grid], section[grid], locX2, ei, NULL, &coef));
254             cellClosure_it += Nb*Nf[grid];
255           }
256           PetscCall(VecDestroy(&locX2));
257         }
258       }
259       PetscCheck(cellClosure_it-cellClosure == cellClosure_sz*ctx->batch_sz,PETSC_COMM_SELF, PETSC_ERR_PLIB, "iteration wrong %" PetscCount_FMT " != cellClosure_sz = %" PetscInt_FMT,(PetscCount)(cellClosure_it-cellClosure),cellClosure_sz*ctx->batch_sz);
260       PetscCall(DMCompositeRestoreLocalAccessArray(pack, a_X, nDMs, NULL, locXArray));
261       PetscCall(DMCompositeRestoreAccessArray(pack, a_X, nDMs, NULL, globXArray));
262       PetscCall(PetscFree(locXArray));
263       PetscCall(PetscFree(globXArray));
264       xdata = NULL;
265     } else {
266       PetscMemType mtype;
267       if (ctx->jacobian_field_major_order) { // get data in batch ordering
268         PetscCall(VecScatterBegin(ctx->plex_batch,a_X,ctx->work_vec,INSERT_VALUES,SCATTER_FORWARD));
269         PetscCall(VecScatterEnd(ctx->plex_batch,a_X,ctx->work_vec,INSERT_VALUES,SCATTER_FORWARD));
270         PetscCall(VecGetArrayReadAndMemType(ctx->work_vec,&xdata,&mtype));
271       } else {
272         PetscCall(VecGetArrayReadAndMemType(a_X,&xdata,&mtype));
273       }
274       if (mtype!=PETSC_MEMTYPE_HOST && ctx->deviceType == LANDAU_CPU) {
275         SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"CPU run with device data: use -mat_type aij");
276       }
277       cellClosure = NULL;
278     }
279     PetscCall(PetscLogEventEnd(ctx->events[1],0,0,0,0));
280   } else xdata = cellClosure = NULL;
281 
282   /* do it */
283   if (ctx->deviceType == LANDAU_CUDA || ctx->deviceType == LANDAU_KOKKOS) {
284     if (ctx->deviceType == LANDAU_CUDA) {
285 #if defined(PETSC_HAVE_CUDA)
286       PetscCall(LandauCUDAJacobian(ctx->plex,Nq,ctx->batch_sz,ctx->num_grids,numCells,Eq_m,cellClosure,xdata,&ctx->SData_d,shift,ctx->events,ctx->mat_offset, ctx->species_offset, subJ, JacP));
287 #else
288       SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","cuda");
289 #endif
290     } else if (ctx->deviceType == LANDAU_KOKKOS) {
291 #if defined(PETSC_HAVE_KOKKOS_KERNELS)
292       PetscCall(LandauKokkosJacobian(ctx->plex,Nq,ctx->batch_sz,ctx->num_grids,numCells,Eq_m,cellClosure,xdata,&ctx->SData_d,shift,ctx->events,ctx->mat_offset, ctx->species_offset, subJ,JacP));
293 #else
294       SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","kokkos");
295 #endif
296     }
297   } else {   /* CPU version */
298     PetscTabulation *Tf; // used for CPU and print info. Same on all grids and all species
299     PetscInt        ip_offset[LANDAU_MAX_GRIDS+1], ipf_offset[LANDAU_MAX_GRIDS+1], elem_offset[LANDAU_MAX_GRIDS+1],IPf_sz_glb,IPf_sz_tot,num_grids=ctx->num_grids,Nf[LANDAU_MAX_GRIDS];
300     PetscReal       *ff, *dudx, *dudy, *dudz, *invJ_a = (PetscReal*)ctx->SData_d.invJ, *xx = (PetscReal*)ctx->SData_d.x, *yy = (PetscReal*)ctx->SData_d.y, *zz = (PetscReal*)ctx->SData_d.z, *ww = (PetscReal*)ctx->SData_d.w;
301     PetscReal       Eq_m[LANDAU_MAX_SPECIES], invMass[LANDAU_MAX_SPECIES], nu_alpha[LANDAU_MAX_SPECIES], nu_beta[LANDAU_MAX_SPECIES];
302     PetscSection    section[LANDAU_MAX_GRIDS],globsection[LANDAU_MAX_GRIDS];
303     PetscScalar     *coo_vals=NULL;
304     for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
305       PetscCall(DMGetLocalSection(ctx->plex[grid], &section[grid]));
306       PetscCall(DMGetGlobalSection(ctx->plex[grid], &globsection[grid]));
307       PetscCall(PetscSectionGetNumFields(section[grid], &Nf[grid]));
308     }
309     /* count IPf size, etc */
310     PetscCall(PetscDSGetTabulation(prob, &Tf)); // Bf, &Df same for all grids
311     const PetscReal *const BB = Tf[0]->T[0], * const DD = Tf[0]->T[1];
312     ip_offset[0] = ipf_offset[0] = elem_offset[0] = 0;
313     for (PetscInt grid=0 ; grid<num_grids ; grid++) {
314       PetscInt nfloc = ctx->species_offset[grid+1] - ctx->species_offset[grid];
315       elem_offset[grid+1] = elem_offset[grid] + numCells[grid];
316       ip_offset[grid+1]   = ip_offset[grid]   + numCells[grid]*Nq;
317       ipf_offset[grid+1]  = ipf_offset[grid]  + Nq*nfloc*numCells[grid];
318     }
319     IPf_sz_glb = ipf_offset[num_grids];
320     IPf_sz_tot = IPf_sz_glb*ctx->batch_sz;
321     // prep COO
322     if (ctx->coo_assembly) {
323       PetscCall(PetscMalloc1(ctx->SData_d.coo_size,&coo_vals)); // allocate every time?
324       PetscCall(PetscInfo(ctx->plex[0], "COO Allocate %" PetscInt_FMT " values\n",(PetscInt)ctx->SData_d.coo_size));
325     }
326     if (shift==0.0) { /* compute dynamic data f and df and init data for Jacobian */
327 #if defined(PETSC_HAVE_THREADSAFETY)
328       double         starttime, endtime;
329       starttime = MPI_Wtime();
330 #endif
331       PetscCall(PetscLogEventBegin(ctx->events[8],0,0,0,0));
332       for (PetscInt fieldA=0;fieldA<ctx->num_species;fieldA++) {
333         invMass[fieldA]  = ctx->m_0/ctx->masses[fieldA];
334         Eq_m[fieldA]     = ctx->Ez * ctx->t_0 * ctx->charges[fieldA] / (ctx->v_0 * ctx->masses[fieldA]); /* normalize dimensionless */
335         if (dim==2) Eq_m[fieldA] *=  2 * PETSC_PI; /* add the 2pi term that is not in Landau */
336         nu_alpha[fieldA] = PetscSqr(ctx->charges[fieldA]/ctx->m_0)*ctx->m_0/ctx->masses[fieldA];
337         nu_beta[fieldA]  = PetscSqr(ctx->charges[fieldA]/ctx->epsilon0)*ctx->lnLam / (8*PETSC_PI) * ctx->t_0*ctx->n_0/PetscPowReal(ctx->v_0,3);
338       }
339       PetscCall(PetscMalloc4(IPf_sz_tot, &ff, IPf_sz_tot, &dudx, IPf_sz_tot, &dudy, dim==3 ? IPf_sz_tot : 0, &dudz));
340       // F df/dx
341       for (PetscInt tid = 0 ; tid < ctx->batch_sz*elem_offset[num_grids] ; tid++) { // for each element
342         const PetscInt b_Nelem = elem_offset[num_grids], b_elem_idx = tid%b_Nelem, b_id = tid/b_Nelem; // b_id == OMP thd_id in batch
343         // find my grid:
344         PetscInt       grid = 0;
345         while (b_elem_idx >= elem_offset[grid+1]) grid++; // yuck search for grid
346         {
347           const PetscInt     loc_nip = numCells[grid]*Nq, loc_Nf = ctx->species_offset[grid+1] - ctx->species_offset[grid], loc_elem = b_elem_idx - elem_offset[grid];
348           const PetscInt     moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset); //b_id*b_N + ctx->mat_offset[grid];
349           PetscScalar        *coef, coef_buff[LANDAU_MAX_SPECIES*LANDAU_MAX_NQ];
350           PetscReal          *invJe  = &invJ_a[(ip_offset[grid] + loc_elem*Nq)*dim*dim]; // ingJ is static data on batch 0
351           PetscInt           b,f,q;
352           if (cellClosure) {
353             coef = &cellClosure[b_id*IPf_sz_glb + ipf_offset[grid] + loc_elem*Nb*loc_Nf]; // this is const
354           } else {
355             coef = coef_buff;
356             for (f = 0; f < loc_Nf; ++f) {
357               LandauIdx *const Idxs = &maps[grid].gIdx[loc_elem][f][0];
358               for (b = 0; b < Nb; ++b) {
359                 PetscInt idx = Idxs[b];
360                 if (idx >= 0) {
361                   coef[f*Nb+b] = xdata[idx+moffset];
362                 } else {
363                   idx = -idx - 1;
364                   coef[f*Nb+b] = 0;
365                   for (q = 0; q < maps[grid].num_face; q++) {
366                     PetscInt    id    = maps[grid].c_maps[idx][q].gid;
367                     PetscScalar scale = maps[grid].c_maps[idx][q].scale;
368                     coef[f*Nb+b] += scale*xdata[id+moffset];
369                   }
370                 }
371               }
372             }
373           }
374           /* get f and df */
375           for (PetscInt qi = 0; qi < Nq; qi++) {
376             const PetscReal  *invJ = &invJe[qi*dim*dim];
377             const PetscReal  *Bq   = &BB[qi*Nb];
378             const PetscReal  *Dq   = &DD[qi*Nb*dim];
379             PetscReal        u_x[LANDAU_DIM];
380             /* get f & df */
381             for (f = 0; f < loc_Nf; ++f) {
382               const PetscInt idx = b_id*IPf_sz_glb + ipf_offset[grid] + f*loc_nip + loc_elem*Nq + qi;
383               PetscInt       b, e;
384               PetscReal      refSpaceDer[LANDAU_DIM];
385               ff[idx] = 0.0;
386               for (int d = 0; d < LANDAU_DIM; ++d) refSpaceDer[d] = 0.0;
387               for (b = 0; b < Nb; ++b) {
388                 const PetscInt    cidx = b;
389                 ff[idx] += Bq[cidx]*PetscRealPart(coef[f*Nb+cidx]);
390                 for (int d = 0; d < dim; ++d) {
391                   refSpaceDer[d] += Dq[cidx*dim+d]*PetscRealPart(coef[f*Nb+cidx]);
392                 }
393               }
394               for (int d = 0; d < LANDAU_DIM; ++d) {
395                 for (e = 0, u_x[d] = 0.0; e < LANDAU_DIM; ++e) {
396                   u_x[d] += invJ[e*dim+d]*refSpaceDer[e];
397                 }
398               }
399               dudx[idx] = u_x[0];
400               dudy[idx] = u_x[1];
401  #if LANDAU_DIM==3
402               dudz[idx] = u_x[2];
403 #endif
404             }
405           } // q
406         } // grid
407       } // grid*batch
408       PetscCall(PetscLogEventEnd(ctx->events[8],0,0,0,0));
409 #if defined(PETSC_HAVE_THREADSAFETY)
410       endtime = MPI_Wtime();
411       if (ctx->stage) ctx->times[LANDAU_F_DF] += (endtime - starttime);
412 #endif
413     } // Jacobian setup
414     // assemble Jacobian (or mass)
415     for (PetscInt tid = 0 ; tid < ctx->batch_sz*elem_offset[num_grids] ; tid++) { // for each element
416       const PetscInt b_Nelem      = elem_offset[num_grids];
417       const PetscInt glb_elem_idx = tid%b_Nelem, b_id = tid/b_Nelem;
418       PetscInt       grid         = 0;
419 #if defined(PETSC_HAVE_THREADSAFETY)
420       double         starttime, endtime;
421       starttime                   = MPI_Wtime();
422 #endif
423       while (glb_elem_idx >= elem_offset[grid+1]) grid++;
424       {
425         const PetscInt     loc_Nf  = ctx->species_offset[grid+1] - ctx->species_offset[grid], loc_elem = glb_elem_idx - elem_offset[grid];
426         const PetscInt     moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset), totDim = loc_Nf*Nq, elemMatSize = totDim*totDim;
427         PetscScalar        *elemMat;
428          const PetscReal   *invJe  = &invJ_a[(ip_offset[grid] + loc_elem*Nq)*dim*dim];
429         PetscCall(PetscMalloc1(elemMatSize, &elemMat));
430         PetscCall(PetscMemzero(elemMat, elemMatSize*sizeof(*elemMat)));
431         if (shift==0.0) { // Jacobian
432           PetscCall(PetscLogEventBegin(ctx->events[4],0,0,0,0));
433         } else {          // mass
434           PetscCall(PetscLogEventBegin(ctx->events[16],0,0,0,0));
435         }
436         for (PetscInt qj = 0; qj < Nq; ++qj) {
437           const PetscInt   jpidx_glb = ip_offset[grid] + qj + loc_elem * Nq;
438           PetscReal        g0[LANDAU_MAX_SPECIES], g2[LANDAU_MAX_SPECIES][LANDAU_DIM], g3[LANDAU_MAX_SPECIES][LANDAU_DIM][LANDAU_DIM]; // could make a LANDAU_MAX_SPECIES_GRID ~ number of ions - 1
439           PetscInt         d,d2,dp,d3,IPf_idx;
440           if (shift==0.0) { // Jacobian
441             const PetscReal * const invJj = &invJe[qj*dim*dim];
442             PetscReal               gg2[LANDAU_MAX_SPECIES][LANDAU_DIM],gg3[LANDAU_MAX_SPECIES][LANDAU_DIM][LANDAU_DIM], gg2_temp[LANDAU_DIM], gg3_temp[LANDAU_DIM][LANDAU_DIM];
443             const PetscReal         vj[3] = {xx[jpidx_glb], yy[jpidx_glb], zz ? zz[jpidx_glb] : 0}, wj = ww[jpidx_glb];
444             // create g2 & g3
445             for (d=0;d<LANDAU_DIM;d++) { // clear accumulation data D & K
446               gg2_temp[d] = 0;
447               for (d2=0;d2<LANDAU_DIM;d2++) gg3_temp[d][d2] = 0;
448             }
449             /* inner beta reduction */
450             IPf_idx = 0;
451             for (PetscInt grid_r = 0, f_off = 0, ipidx = 0; grid_r < ctx->num_grids ; grid_r++, f_off = ctx->species_offset[grid_r]) { // IPf_idx += nip_loc_r*Nfloc_r
452               PetscInt  nip_loc_r = numCells[grid_r]*Nq, Nfloc_r = Nf[grid_r];
453               for (PetscInt ei_r = 0, loc_fdf_idx = 0; ei_r < numCells[grid_r]; ++ei_r) {
454                 for (PetscInt qi = 0; qi < Nq; qi++, ipidx++, loc_fdf_idx++) {
455                   const PetscReal wi       = ww[ipidx], x = xx[ipidx], y = yy[ipidx];
456                   PetscReal       temp1[3] = {0, 0, 0}, temp2 = 0;
457 #if LANDAU_DIM==2
458                   PetscReal       Ud[2][2], Uk[2][2], mask = (PetscAbs(vj[0]-x) < 100*PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[1]-y) < 100*PETSC_SQRT_MACHINE_EPSILON) ? 0. : 1.;
459                   LandauTensor2D(vj, x, y, Ud, Uk, mask);
460 #else
461                   PetscReal U[3][3], z = zz[ipidx], mask = (PetscAbs(vj[0]-x) < 100*PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[1]-y) < 100*PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[2]-z) < 100*PETSC_SQRT_MACHINE_EPSILON) ? 0. : 1.;
462                   if (ctx->use_relativistic_corrections) {
463                     LandauTensor3DRelativistic(vj, x, y, z, U, mask, C_0(ctx->v_0));
464                   } else {
465                     LandauTensor3D(vj, x, y, z, U, mask);
466                   }
467 #endif
468                   for (int f = 0; f < Nfloc_r ; ++f) {
469                     const PetscInt idx = b_id*IPf_sz_glb + ipf_offset[grid_r] + f*nip_loc_r + ei_r*Nq + qi;  // IPf_idx + f*nip_loc_r + loc_fdf_idx;
470                     temp1[0] += dudx[idx]*nu_beta[f+f_off]*invMass[f+f_off];
471                     temp1[1] += dudy[idx]*nu_beta[f+f_off]*invMass[f+f_off];
472 #if LANDAU_DIM==3
473                     temp1[2] += dudz[idx]*nu_beta[f+f_off]*invMass[f+f_off];
474 #endif
475                     temp2    += ff[idx]*nu_beta[f+f_off];
476                   }
477                   temp1[0] *= wi;
478                   temp1[1] *= wi;
479 #if LANDAU_DIM==3
480                   temp1[2] *= wi;
481 #endif
482                   temp2    *= wi;
483 #if LANDAU_DIM==2
484                   for (d2 = 0; d2 < 2; d2++) {
485                     for (d3 = 0; d3 < 2; ++d3) {
486                       /* K = U * grad(f): g2=e: i,A */
487                       gg2_temp[d2] += Uk[d2][d3]*temp1[d3];
488                       /* D = -U * (I \kron (fx)): g3=f: i,j,A */
489                       gg3_temp[d2][d3] += Ud[d2][d3]*temp2;
490                     }
491                   }
492 #else
493                   for (d2 = 0; d2 < 3; ++d2) {
494                     for (d3 = 0; d3 < 3; ++d3) {
495                       /* K = U * grad(f): g2 = e: i,A */
496                       gg2_temp[d2] += U[d2][d3]*temp1[d3];
497                       /* D = -U * (I \kron (fx)): g3 = f: i,j,A */
498                       gg3_temp[d2][d3] += U[d2][d3]*temp2;
499                     }
500                   }
501 #endif
502                 } // qi
503               } // ei_r
504               IPf_idx += nip_loc_r*Nfloc_r;
505             } /* grid_r - IPs */
506             PetscCheck(IPf_idx == IPf_sz_glb,PETSC_COMM_SELF, PETSC_ERR_PLIB, "IPf_idx != IPf_sz %" PetscInt_FMT " %" PetscInt_FMT,IPf_idx,IPf_sz_glb);
507             // add alpha and put in gg2/3
508             for (PetscInt fieldA = 0, f_off = ctx->species_offset[grid]; fieldA < loc_Nf; ++fieldA) {
509               for (d2 = 0; d2 < LANDAU_DIM; d2++) {
510                 gg2[fieldA][d2] = gg2_temp[d2]*nu_alpha[fieldA+f_off];
511                 for (d3 = 0; d3 < LANDAU_DIM; d3++) {
512                   gg3[fieldA][d2][d3] = -gg3_temp[d2][d3]*nu_alpha[fieldA+f_off]*invMass[fieldA+f_off];
513                 }
514               }
515             }
516             /* add electric field term once per IP */
517             for (PetscInt fieldA = 0, f_off = ctx->species_offset[grid] ; fieldA < loc_Nf; ++fieldA) {
518               gg2[fieldA][LANDAU_DIM-1] += Eq_m[fieldA+f_off];
519             }
520             /* Jacobian transform - g2, g3 */
521             for (PetscInt fieldA = 0; fieldA < loc_Nf; ++fieldA) {
522               for (d = 0; d < dim; ++d) {
523                 g2[fieldA][d] = 0.0;
524                 for (d2 = 0; d2 < dim; ++d2) {
525                   g2[fieldA][d] += invJj[d*dim+d2]*gg2[fieldA][d2];
526                   g3[fieldA][d][d2] = 0.0;
527                   for (d3 = 0; d3 < dim; ++d3) {
528                     for (dp = 0; dp < dim; ++dp) {
529                       g3[fieldA][d][d2] += invJj[d*dim + d3]*gg3[fieldA][d3][dp]*invJj[d2*dim + dp];
530                     }
531                   }
532                   g3[fieldA][d][d2] *= wj;
533                 }
534                 g2[fieldA][d] *= wj;
535               }
536             }
537           } else { // mass
538             PetscReal wj = ww[jpidx_glb];
539             /* Jacobian transform - g0 */
540             for (PetscInt fieldA = 0; fieldA < loc_Nf ; ++fieldA) {
541               if (dim==2) {
542                 g0[fieldA] = wj * shift * 2. * PETSC_PI; // move this to below and remove g0
543               } else {
544                 g0[fieldA] = wj * shift; // move this to below and remove g0
545               }
546             }
547           }
548           /* FE matrix construction */
549           {
550             PetscInt  fieldA,d,f,d2,g;
551             const PetscReal *BJq = &BB[qj*Nb], *DIq = &DD[qj*Nb*dim];
552             /* assemble - on the diagonal (I,I) */
553             for (fieldA = 0; fieldA < loc_Nf ; fieldA++) {
554               for (f = 0; f < Nb ; f++) {
555                 const PetscInt i = fieldA*Nb + f; /* Element matrix row */
556                 for (g = 0; g < Nb; ++g) {
557                   const PetscInt j    = fieldA*Nb + g; /* Element matrix column */
558                   const PetscInt fOff = i*totDim + j;
559                   if (shift==0.0) {
560                     for (d = 0; d < dim; ++d) {
561                       elemMat[fOff] += DIq[f*dim+d]*g2[fieldA][d]*BJq[g];
562                       for (d2 = 0; d2 < dim; ++d2) {
563                         elemMat[fOff] += DIq[f*dim + d]*g3[fieldA][d][d2]*DIq[g*dim + d2];
564                       }
565                     }
566                   } else { // mass
567                     elemMat[fOff] += BJq[f]*g0[fieldA]*BJq[g];
568                   }
569                 }
570               }
571             }
572           }
573         } /* qj loop */
574         if (shift==0.0) { // Jacobian
575           PetscCall(PetscLogEventEnd(ctx->events[4],0,0,0,0));
576         } else {
577           PetscCall(PetscLogEventEnd(ctx->events[16],0,0,0,0));
578         }
579 #if defined(PETSC_HAVE_THREADSAFETY)
580         endtime = MPI_Wtime();
581         if (ctx->stage) ctx->times[LANDAU_KERNEL] += (endtime - starttime);
582 #endif
583         /* assemble matrix */
584         if (!container) {
585           PetscInt cStart;
586           PetscCall(PetscLogEventBegin(ctx->events[6],0,0,0,0));
587           PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, NULL));
588           PetscCall(DMPlexMatSetClosure(ctx->plex[grid], section[grid], globsection[grid], subJ[ LAND_PACK_IDX(b_id,grid) ], loc_elem + cStart, elemMat, ADD_VALUES));
589           PetscCall(PetscLogEventEnd(ctx->events[6],0,0,0,0));
590         } else {  // GPU like assembly for debugging
591           PetscInt      fieldA,q,f,g,d,nr,nc,rows0[LANDAU_MAX_Q_FACE]={0},cols0[LANDAU_MAX_Q_FACE]={0},rows[LANDAU_MAX_Q_FACE],cols[LANDAU_MAX_Q_FACE];
592           PetscScalar   vals[LANDAU_MAX_Q_FACE*LANDAU_MAX_Q_FACE]={0},row_scale[LANDAU_MAX_Q_FACE]={0},col_scale[LANDAU_MAX_Q_FACE]={0};
593           LandauIdx     *coo_elem_offsets = (LandauIdx*)ctx->SData_d.coo_elem_offsets, *coo_elem_fullNb = (LandauIdx*)ctx->SData_d.coo_elem_fullNb, (*coo_elem_point_offsets)[LANDAU_MAX_NQ+1] = (LandauIdx (*)[LANDAU_MAX_NQ+1])ctx->SData_d.coo_elem_point_offsets;
594           /* assemble - from the diagonal (I,I) in this format for DMPlexMatSetClosure */
595           for (fieldA = 0; fieldA < loc_Nf ; fieldA++) {
596             LandauIdx *const Idxs = &maps[grid].gIdx[loc_elem][fieldA][0];
597             for (f = 0; f < Nb ; f++) {
598               PetscInt idx = Idxs[f];
599               if (idx >= 0) {
600                 nr           = 1;
601                 rows0[0]     = idx;
602                 row_scale[0] = 1.;
603               } else {
604                 idx = -idx - 1;
605                 for (q = 0, nr = 0; q < maps[grid].num_face; q++, nr++) {
606                   if (maps[grid].c_maps[idx][q].gid < 0) break;
607                   rows0[q]     = maps[grid].c_maps[idx][q].gid;
608                   row_scale[q] = maps[grid].c_maps[idx][q].scale;
609                 }
610               }
611               for (g = 0; g < Nb; ++g) {
612                 idx = Idxs[g];
613                 if (idx >= 0) {
614                   nc = 1;
615                   cols0[0]     = idx;
616                   col_scale[0] = 1.;
617                 } else {
618                   idx = -idx - 1;
619                   nc = maps[grid].num_face;
620                   for (q = 0, nc = 0; q < maps[grid].num_face; q++, nc++) {
621                     if (maps[grid].c_maps[idx][q].gid < 0) break;
622                     cols0[q]     = maps[grid].c_maps[idx][q].gid;
623                     col_scale[q] = maps[grid].c_maps[idx][q].scale;
624                   }
625                 }
626                 const PetscInt    i   = fieldA*Nb + f; /* Element matrix row */
627                 const PetscInt    j   = fieldA*Nb + g; /* Element matrix column */
628                 const PetscScalar Aij = elemMat[i*totDim + j];
629                 if (coo_vals) { // mirror (i,j) in CreateStaticGPUData
630                   const int fullNb = coo_elem_fullNb[glb_elem_idx],fullNb2=fullNb*fullNb;
631                   const int idx0   = b_id*coo_elem_offsets[elem_offset[num_grids]] + coo_elem_offsets[glb_elem_idx] + fieldA*fullNb2 + fullNb * coo_elem_point_offsets[glb_elem_idx][f] + nr * coo_elem_point_offsets[glb_elem_idx][g];
632                   for (int q = 0, idx2 = idx0; q < nr; q++) {
633                     for (int d = 0; d < nc; d++, idx2++) {
634                       coo_vals[idx2] = row_scale[q]*col_scale[d]*Aij;
635                     }
636                   }
637                 } else {
638                   for (q = 0; q < nr; q++) rows[q] = rows0[q] + moffset;
639                   for (d = 0; d < nc; d++) cols[d] = cols0[d] + moffset;
640                   for (q = 0; q < nr; q++) {
641                     for (d = 0; d < nc; d++) {
642                       vals[q*nc + d] = row_scale[q]*col_scale[d]*Aij;
643                     }
644                   }
645                   PetscCall(MatSetValues(JacP,nr,rows,nc,cols,vals,ADD_VALUES));
646                 }
647               }
648             }
649           }
650         }
651         if (loc_elem==-1) {
652           PetscCall(PetscPrintf(ctx->comm,"CPU Element matrix\n"));
653           for (int d = 0; d < totDim; ++d) {
654             for (int f = 0; f < totDim; ++f) PetscCall(PetscPrintf(ctx->comm," %12.5e",  (double)PetscRealPart(elemMat[d*totDim + f])));
655             PetscCall(PetscPrintf(ctx->comm,"\n"));
656           }
657           exit(12);
658         }
659         PetscCall(PetscFree(elemMat));
660       } /* grid */
661     } /* outer element & batch loop */
662     if (shift==0.0) { // mass
663       PetscCall(PetscFree4(ff, dudx, dudy, dudz));
664     }
665     if (!container) {   // 'CPU' assembly move nest matrix to global JacP
666       for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) { // OpenMP
667         for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) {
668           const PetscInt    moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset); // b_id*b_N + ctx->mat_offset[grid];
669           PetscInt          nloc, nzl, colbuf[1024], row;
670           const PetscInt    *cols;
671           const PetscScalar *vals;
672           Mat               B = subJ[ LAND_PACK_IDX(b_id,grid) ];
673           PetscCall(MatAssemblyBegin(B, MAT_FINAL_ASSEMBLY));
674           PetscCall(MatAssemblyEnd(B, MAT_FINAL_ASSEMBLY));
675           PetscCall(MatGetSize(B, &nloc, NULL));
676           for (int i=0 ; i<nloc ; i++) {
677             PetscCall(MatGetRow(B,i,&nzl,&cols,&vals));
678             PetscCheck(nzl<=1024,PetscObjectComm((PetscObject) B), PETSC_ERR_PLIB, "Row too big: %" PetscInt_FMT,nzl);
679             for (int j=0; j<nzl; j++) colbuf[j] = moffset + cols[j];
680             row  = moffset + i;
681             PetscCall(MatSetValues(JacP,1,&row,nzl,colbuf,vals,ADD_VALUES));
682             PetscCall(MatRestoreRow(B,i,&nzl,&cols,&vals));
683           }
684           PetscCall(MatDestroy(&B));
685         }
686       }
687     }
688     if (coo_vals) {
689       PetscCall(MatSetValuesCOO(JacP,coo_vals,ADD_VALUES));
690       PetscCall(PetscFree(coo_vals));
691     }
692   } /* CPU version */
693   PetscCall(MatAssemblyBegin(JacP, MAT_FINAL_ASSEMBLY));
694   PetscCall(MatAssemblyEnd(JacP, MAT_FINAL_ASSEMBLY));
695   /* clean up */
696   if (cellClosure) PetscCall(PetscFree(cellClosure));
697   if (xdata) {
698     PetscCall(VecRestoreArrayReadAndMemType(a_X,&xdata));
699   }
700   PetscFunctionReturn(0);
701 }
702 
703 #if defined(LANDAU_ADD_BCS)
704 static void zero_bc(PetscInt dim, PetscInt Nf, PetscInt NfAux,
705                     const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
706                     const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
707                     PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar uexact[])
708 {
709   uexact[0] = 0;
710 }
711 #endif
712 
713 #define MATVEC2(__a,__x,__p) {int i,j; for (i=0.; i<2; i++) {__p[i] = 0; for (j=0.; j<2; j++) __p[i] += __a[i][j]*__x[j]; }}
714 static void CircleInflate(PetscReal r1, PetscReal r2, PetscReal r0, PetscInt num_sections, PetscReal x, PetscReal y,
715                           PetscReal *outX, PetscReal *outY)
716 {
717   PetscReal rr = PetscSqrtReal(x*x + y*y), outfact, efact;
718   if (rr < r1 + PETSC_SQRT_MACHINE_EPSILON) {
719     *outX = x; *outY = y;
720   } else {
721     const PetscReal xy[2] = {x,y}, sinphi=y/rr, cosphi=x/rr;
722     PetscReal       cth,sth,xyprime[2],Rth[2][2],rotcos,newrr;
723     if (num_sections==2) {
724       rotcos  = 0.70710678118654;
725       outfact = 1.5; efact = 2.5;
726       /* rotate normalized vector into [-pi/4,pi/4) */
727       if (sinphi >= 0.) {         /* top cell, -pi/2 */
728         cth = 0.707106781186548; sth = -0.707106781186548;
729       } else {                    /* bottom cell -pi/8 */
730         cth = 0.707106781186548; sth = .707106781186548;
731       }
732     } else if (num_sections==3) {
733       rotcos  = 0.86602540378443;
734       outfact = 1.5; efact = 2.5;
735       /* rotate normalized vector into [-pi/6,pi/6) */
736       if (sinphi >= 0.5) {         /* top cell, -pi/3 */
737         cth = 0.5; sth = -0.866025403784439;
738       } else if (sinphi >= -.5) {  /* mid cell 0 */
739         cth = 1.; sth = .0;
740       } else { /* bottom cell +pi/3 */
741         cth = 0.5; sth = 0.866025403784439;
742       }
743     } else if (num_sections==4) {
744       rotcos  = 0.9238795325112;
745       outfact = 1.5; efact = 3;
746       /* rotate normalized vector into [-pi/8,pi/8) */
747       if (sinphi >= 0.707106781186548) {         /* top cell, -3pi/8 */
748         cth = 0.38268343236509;  sth = -0.923879532511287;
749       } else if (sinphi >= 0.) {                 /* mid top cell -pi/8 */
750         cth = 0.923879532511287; sth = -.38268343236509;
751       } else if (sinphi >= -0.707106781186548) { /* mid bottom cell + pi/8 */
752         cth = 0.923879532511287; sth = 0.38268343236509;
753       } else {                                   /* bottom cell + 3pi/8 */
754         cth = 0.38268343236509;  sth = .923879532511287;
755       }
756     } else {
757       cth = 0.; sth = 0.; rotcos = 0; efact = 0;
758     }
759     Rth[0][0] = cth; Rth[0][1] =-sth;
760     Rth[1][0] = sth; Rth[1][1] = cth;
761     MATVEC2(Rth,xy,xyprime);
762     if (num_sections==2) {
763       newrr = xyprime[0]/rotcos;
764     } else {
765       PetscReal newcosphi=xyprime[0]/rr, rin = r1, rout = rr - rin;
766       PetscReal routmax = r0*rotcos/newcosphi - rin, nroutmax = r0 - rin, routfrac = rout/routmax;
767       newrr = rin + routfrac*nroutmax;
768     }
769     *outX = cosphi*newrr; *outY = sinphi*newrr;
770     /* grade */
771     PetscReal fact,tt,rs,re, rr = PetscSqrtReal(PetscSqr(*outX) + PetscSqr(*outY));
772     if (rr > r2) { rs = r2; re = r0; fact = outfact;} /* outer zone */
773     else {         rs = r1; re = r2; fact = efact;} /* electron zone */
774     tt = (rs + PetscPowReal((rr - rs)/(re - rs),fact) * (re-rs)) / rr;
775     *outX *= tt;
776     *outY *= tt;
777   }
778 }
779 
780 static PetscErrorCode GeometryDMLandau(DM base, PetscInt point, PetscInt dim, const PetscReal abc[], PetscReal xyz[], void *a_ctx)
781 {
782   LandauCtx   *ctx = (LandauCtx*)a_ctx;
783   PetscReal   r = abc[0], z = abc[1];
784   if (ctx->inflate) {
785     PetscReal absR, absZ;
786     absR = PetscAbs(r);
787     absZ = PetscAbs(z);
788     CircleInflate(ctx->i_radius[0],ctx->e_radius,ctx->radius[0],ctx->num_sections,absR,absZ,&absR,&absZ); // wrong: how do I know what grid I am on?
789     r = (r > 0) ? absR : -absR;
790     z = (z > 0) ? absZ : -absZ;
791   }
792   xyz[0] = r;
793   xyz[1] = z;
794   if (dim==3) xyz[2] = abc[2];
795 
796   PetscFunctionReturn(0);
797 }
798 
799 /* create DMComposite of meshes for each species group */
800 static PetscErrorCode LandauDMCreateVMeshes(MPI_Comm comm_self, const PetscInt dim, const char prefix[], LandauCtx *ctx, DM pack)
801 {
802   PetscFunctionBegin;
803   { /* p4est, quads */
804     /* Create plex mesh of Landau domain */
805     for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
806       PetscReal radius = ctx->radius[grid];
807       if (!ctx->sphere) {
808         PetscInt       cells[] = {2,2,2};
809         PetscReal      lo[] = {-radius,-radius,-radius}, hi[] = {radius,radius,radius};
810         DMBoundaryType periodicity[3] = {DM_BOUNDARY_NONE, dim==2 ? DM_BOUNDARY_NONE : DM_BOUNDARY_NONE, DM_BOUNDARY_NONE};
811         if (dim==2) { lo[0] = 0; cells[0] /* = cells[1] */ = 1; }
812         PetscCall(DMPlexCreateBoxMesh(comm_self, dim, PETSC_FALSE, cells, lo, hi, periodicity, PETSC_TRUE, &ctx->plex[grid])); // todo: make composite and create dm[grid] here
813         PetscCall(DMLocalizeCoordinates(ctx->plex[grid])); /* needed for periodic */
814         if (dim==3) PetscCall(PetscObjectSetName((PetscObject) ctx->plex[grid], "cube"));
815         else PetscCall(PetscObjectSetName((PetscObject) ctx->plex[grid], "half-plane"));
816       } else if (dim==2) { // sphere is all wrong. should just have one inner radius
817         PetscInt       numCells,cells[16][4],i,j;
818         PetscInt       numVerts;
819         PetscReal      inner_radius1 = ctx->i_radius[grid], inner_radius2 = ctx->e_radius;
820         PetscReal      *flatCoords   = NULL;
821         PetscInt       *flatCells    = NULL, *pcell;
822         if (ctx->num_sections==2) {
823 #if 1
824           numCells = 5;
825           numVerts = 10;
826           int cells2[][4] = { {0,1,4,3},
827                               {1,2,5,4},
828                               {3,4,7,6},
829                               {4,5,8,7},
830                               {6,7,8,9} };
831           for (i = 0; i < numCells; i++) for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j];
832           PetscCall(PetscMalloc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells));
833           {
834             PetscReal (*coords)[2] = (PetscReal (*) [2]) flatCoords;
835             for (j = 0; j < numVerts-1; j++) {
836               PetscReal z, r, theta = -PETSC_PI/2 + (j%3) * PETSC_PI/2;
837               PetscReal rad = (j >= 6) ? inner_radius1 : (j >= 3) ? inner_radius2 : ctx->radius[grid];
838               z = rad * PetscSinReal(theta);
839               coords[j][1] = z;
840               r = rad * PetscCosReal(theta);
841               coords[j][0] = r;
842             }
843             coords[numVerts-1][0] = coords[numVerts-1][1] = 0;
844           }
845 #else
846           numCells = 4;
847           numVerts = 8;
848           static int     cells2[][4] = {{0,1,2,3},
849                                         {4,5,1,0},
850                                         {5,6,2,1},
851                                         {6,7,3,2}};
852           for (i = 0; i < numCells; i++) for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j];
853           PetscCall(loc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells));
854           {
855             PetscReal (*coords)[2] = (PetscReal (*) [2]) flatCoords;
856             PetscInt j;
857             for (j = 0; j < 8; j++) {
858               PetscReal z, r;
859               PetscReal theta = -PETSC_PI/2 + (j%4) * PETSC_PI/3.;
860               PetscReal rad = ctx->radius[grid] * ((j < 4) ? 0.5 : 1.0);
861               z = rad * PetscSinReal(theta);
862               coords[j][1] = z;
863               r = rad * PetscCosReal(theta);
864               coords[j][0] = r;
865             }
866           }
867 #endif
868         } else if (ctx->num_sections==3) {
869           numCells = 7;
870           numVerts = 12;
871           int cells2[][4] = { {0,1,5,4},
872                               {1,2,6,5},
873                               {2,3,7,6},
874                               {4,5,9,8},
875                               {5,6,10,9},
876                               {6,7,11,10},
877                               {8,9,10,11} };
878           for (i = 0; i < numCells; i++) for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j];
879           PetscCall(PetscMalloc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells));
880           {
881             PetscReal (*coords)[2] = (PetscReal (*) [2]) flatCoords;
882             for (j = 0; j < numVerts; j++) {
883               PetscReal z, r, theta = -PETSC_PI/2 + (j%4) * PETSC_PI/3;
884               PetscReal rad = (j >= 8) ? inner_radius1 : (j >= 4) ? inner_radius2 : ctx->radius[grid];
885               z = rad * PetscSinReal(theta);
886               coords[j][1] = z;
887               r = rad * PetscCosReal(theta);
888               coords[j][0] = r;
889             }
890           }
891         } else if (ctx->num_sections==4) {
892           numCells = 10;
893           numVerts = 16;
894           int cells2[][4] = { {0,1,6,5},
895                               {1,2,7,6},
896                               {2,3,8,7},
897                               {3,4,9,8},
898                               {5,6,11,10},
899                               {6,7,12,11},
900                               {7,8,13,12},
901                               {8,9,14,13},
902                               {10,11,12,15},
903                               {12,13,14,15}};
904           for (i = 0; i < numCells; i++) for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j];
905           PetscCall(PetscMalloc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells));
906           {
907             PetscReal (*coords)[2] = (PetscReal (*) [2]) flatCoords;
908             for (j = 0; j < numVerts-1; j++) {
909               PetscReal z, r, theta = -PETSC_PI/2 + (j%5) * PETSC_PI/4;
910               PetscReal rad = (j >= 10) ? inner_radius1 : (j >= 5) ? inner_radius2 : ctx->radius[grid];
911               z = rad * PetscSinReal(theta);
912               coords[j][1] = z;
913               r = rad * PetscCosReal(theta);
914               coords[j][0] = r;
915             }
916             coords[numVerts-1][0] = coords[numVerts-1][1] = 0;
917           }
918         } else {
919           numCells = 0;
920           numVerts = 0;
921         }
922         for (j = 0, pcell = flatCells; j < numCells; j++, pcell += 4) {
923           pcell[0] = cells[j][0]; pcell[1] = cells[j][1];
924           pcell[2] = cells[j][2]; pcell[3] = cells[j][3];
925         }
926         PetscCall(DMPlexCreateFromCellListPetsc(comm_self,2,numCells,numVerts,4,ctx->interpolate,flatCells,2,flatCoords,&ctx->plex[grid]));
927         PetscCall(PetscFree2(flatCoords,flatCells));
928         PetscCall(PetscObjectSetName((PetscObject) ctx->plex[grid], "semi-circle"));
929       } else SETERRQ(ctx->comm, PETSC_ERR_PLIB, "Velocity space meshes does not support cubed sphere");
930 
931       PetscCall(DMSetFromOptions(ctx->plex[grid]));
932     } // grid loop
933     PetscCall(PetscObjectSetOptionsPrefix((PetscObject)pack,prefix));
934     PetscCall(DMSetFromOptions(pack));
935 
936     { /* convert to p4est (or whatever), wait for discretization to create pack */
937       char           convType[256];
938       PetscBool      flg;
939 
940       PetscOptionsBegin(ctx->comm, prefix, "Mesh conversion options", "DMPLEX");
941       PetscCall(PetscOptionsFList("-dm_landau_type","Convert DMPlex to another format (p4est)","plexland.c",DMList,DMPLEX,convType,256,&flg));
942       PetscOptionsEnd();
943       if (flg) {
944         ctx->use_p4est = PETSC_TRUE; /* flag for Forest */
945         for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
946           DM dmforest;
947           PetscCall(DMConvert(ctx->plex[grid],convType,&dmforest));
948           if (dmforest) {
949             PetscBool isForest;
950             PetscCall(PetscObjectSetOptionsPrefix((PetscObject)dmforest,prefix));
951             PetscCall(DMIsForest(dmforest,&isForest));
952             if (isForest) {
953               if (ctx->sphere && ctx->inflate) {
954                 PetscCall(DMForestSetBaseCoordinateMapping(dmforest,GeometryDMLandau,ctx));
955               }
956               PetscCall(DMDestroy(&ctx->plex[grid]));
957               ctx->plex[grid] = dmforest; // Forest for adaptivity
958             } else SETERRQ(ctx->comm, PETSC_ERR_PLIB, "Converted to non Forest?");
959           } else SETERRQ(ctx->comm, PETSC_ERR_PLIB, "Convert failed?");
960         }
961       } else ctx->use_p4est = PETSC_FALSE; /* flag for Forest */
962     }
963   } /* non-file */
964   PetscCall(DMSetDimension(pack, dim));
965   PetscCall(PetscObjectSetName((PetscObject) pack, "Mesh"));
966   PetscCall(DMSetApplicationContext(pack, ctx));
967 
968   PetscFunctionReturn(0);
969 }
970 
971 static PetscErrorCode SetupDS(DM pack, PetscInt dim, PetscInt grid, LandauCtx *ctx)
972 {
973   PetscInt        ii,i0;
974   char            buf[256];
975   PetscSection    section;
976 
977   PetscFunctionBegin;
978   for (ii = ctx->species_offset[grid], i0 = 0 ; ii < ctx->species_offset[grid+1] ; ii++, i0++) {
979     if (ii==0) PetscCall(PetscSNPrintf(buf, sizeof(buf), "e"));
980     else PetscCall(PetscSNPrintf(buf, sizeof(buf), "i%" PetscInt_FMT, ii));
981     /* Setup Discretization - FEM */
982     PetscCall(PetscFECreateDefault(PETSC_COMM_SELF, dim, 1, PETSC_FALSE, NULL, PETSC_DECIDE, &ctx->fe[ii]));
983     PetscCall(PetscObjectSetName((PetscObject) ctx->fe[ii], buf));
984     PetscCall(DMSetField(ctx->plex[grid], i0, NULL, (PetscObject) ctx->fe[ii]));
985   }
986   PetscCall(DMCreateDS(ctx->plex[grid]));
987   PetscCall(DMGetSection(ctx->plex[grid], &section));
988   for (PetscInt ii = ctx->species_offset[grid], i0 = 0 ; ii < ctx->species_offset[grid+1] ; ii++, i0++) {
989     if (ii==0) PetscCall(PetscSNPrintf(buf, sizeof(buf), "se"));
990     else PetscCall(PetscSNPrintf(buf, sizeof(buf), "si%" PetscInt_FMT, ii));
991     PetscCall(PetscSectionSetComponentName(section, i0, 0, buf));
992   }
993   PetscFunctionReturn(0);
994 }
995 
996 /* Define a Maxwellian function for testing out the operator. */
997 
998 /* Using cartesian velocity space coordinates, the particle */
999 /* density, [1/m^3], is defined according to */
1000 
1001 /* $$ n=\int_{R^3} dv^3 \left(\frac{m}{2\pi T}\right)^{3/2}\exp [- mv^2/(2T)] $$ */
1002 
1003 /* Using some constant, c, we normalize the velocity vector into a */
1004 /* dimensionless variable according to v=c*x. Thus the density, $n$, becomes */
1005 
1006 /* $$ n=\int_{R^3} dx^3 \left(\frac{mc^2}{2\pi T}\right)^{3/2}\exp [- mc^2/(2T)*x^2] $$ */
1007 
1008 /* Defining $\theta=2T/mc^2$, we thus find that the probability density */
1009 /* for finding the particle within the interval in a box dx^3 around x is */
1010 
1011 /* f(x;\theta)=\left(\frac{1}{\pi\theta}\right)^{3/2} \exp [ -x^2/\theta ] */
1012 
1013 typedef struct {
1014   PetscReal v_0;
1015   PetscReal kT_m;
1016   PetscReal n;
1017   PetscReal shift;
1018 } MaxwellianCtx;
1019 
1020 static PetscErrorCode maxwellian(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx)
1021 {
1022   MaxwellianCtx *mctx = (MaxwellianCtx*)actx;
1023   PetscInt      i;
1024   PetscReal     v2 = 0, theta = 2*mctx->kT_m/(mctx->v_0*mctx->v_0); /* theta = 2kT/mc^2 */
1025   PetscFunctionBegin;
1026   /* compute the exponents, v^2 */
1027   for (i = 0; i < dim; ++i) v2 += x[i]*x[i];
1028   /* evaluate the Maxwellian */
1029   u[0] = mctx->n*PetscPowReal(PETSC_PI*theta,-1.5)*(PetscExpReal(-v2/theta));
1030   if (mctx->shift!=0.) {
1031     v2 = 0;
1032     for (i = 0; i < dim-1; ++i) v2 += x[i]*x[i];
1033     v2 += (x[dim-1]-mctx->shift)*(x[dim-1]-mctx->shift);
1034     /* evaluate the shifted Maxwellian */
1035     u[0] += mctx->n*PetscPowReal(PETSC_PI*theta,-1.5)*(PetscExpReal(-v2/theta));
1036   }
1037   PetscFunctionReturn(0);
1038 }
1039 
1040 /*@
1041  DMPlexLandauAddMaxwellians - Add a Maxwellian distribution to a state
1042 
1043  Collective on X
1044 
1045  Input Parameters:
1046  .   dm - The mesh (local)
1047  +   time - Current time
1048  -   temps - Temperatures of each species (global)
1049  .   ns - Number density of each species (global)
1050  -   grid - index into current grid - just used for offset into temp and ns
1051  +   actx - Landau context
1052 
1053  Output Parameter:
1054  .   X  - The state (local to this grid)
1055 
1056  Level: beginner
1057 
1058  .keywords: mesh
1059  .seealso: `DMPlexLandauCreateVelocitySpace()`
1060  @*/
1061 PetscErrorCode DMPlexLandauAddMaxwellians(DM dm, Vec X, PetscReal time, PetscReal temps[], PetscReal ns[], PetscInt grid, PetscInt b_id, void *actx)
1062 {
1063   LandauCtx      *ctx = (LandauCtx*)actx;
1064   PetscErrorCode (*initu[LANDAU_MAX_SPECIES])(PetscInt, PetscReal, const PetscReal [], PetscInt, PetscScalar [], void *);
1065   PetscInt       dim;
1066   MaxwellianCtx  *mctxs[LANDAU_MAX_SPECIES], data[LANDAU_MAX_SPECIES];
1067 
1068   PetscFunctionBegin;
1069   PetscCall(DMGetDimension(dm, &dim));
1070   if (!ctx) PetscCall(DMGetApplicationContext(dm, &ctx));
1071   for (PetscInt ii = ctx->species_offset[grid], i0 = 0 ; ii < ctx->species_offset[grid+1] ; ii++, i0++) {
1072     mctxs[i0]      = &data[i0];
1073     data[i0].v_0   = ctx->v_0; // v_0 same for all grids
1074     data[i0].kT_m  = ctx->k*temps[ii]/ctx->masses[ii]; /* kT/m */
1075     data[i0].n     = ns[ii] * (1+(double)b_id/100.0); // make solves a little different to mimic application, n[0] use for Conner-Hastie
1076     initu[i0]      = maxwellian;
1077     data[i0].shift = 0;
1078   }
1079   data[0].shift = ctx->electronShift;
1080   /* need to make ADD_ALL_VALUES work - TODO */
1081   PetscCall(DMProjectFunction(dm, time, initu, (void**)mctxs, INSERT_ALL_VALUES, X));
1082   PetscFunctionReturn(0);
1083 }
1084 
1085 /*
1086  LandauSetInitialCondition - Addes Maxwellians with context
1087 
1088  Collective on X
1089 
1090  Input Parameters:
1091  .   dm - The mesh
1092  -   grid - index into current grid - just used for offset into temp and ns
1093  +   actx - Landau context with T and n
1094 
1095  Output Parameter:
1096  .   X  - The state
1097 
1098  Level: beginner
1099 
1100  .keywords: mesh
1101  .seealso: `DMPlexLandauCreateVelocitySpace()`, `DMPlexLandauAddMaxwellians()`
1102  */
1103 static PetscErrorCode LandauSetInitialCondition(DM dm, Vec X, PetscInt grid, PetscInt b_id, void *actx)
1104 {
1105   LandauCtx        *ctx = (LandauCtx*)actx;
1106   PetscFunctionBegin;
1107   if (!ctx) PetscCall(DMGetApplicationContext(dm, &ctx));
1108   PetscCall(VecZeroEntries(X));
1109   PetscCall(DMPlexLandauAddMaxwellians(dm, X, 0.0, ctx->thermal_temps, ctx->n, grid, b_id, ctx));
1110   PetscFunctionReturn(0);
1111 }
1112 
1113 // adapt a level once. Forest in/out
1114 static PetscErrorCode adaptToleranceFEM(PetscFE fem, Vec sol, PetscInt type, PetscInt grid, LandauCtx *ctx, DM *newForest)
1115 {
1116   DM               forest, plex, adaptedDM = NULL;
1117   PetscDS          prob;
1118   PetscBool        isForest;
1119   PetscQuadrature  quad;
1120   PetscInt         Nq, *Nb, cStart, cEnd, c, dim, qj, k;
1121   DMLabel          adaptLabel = NULL;
1122 
1123   PetscFunctionBegin;
1124   forest = ctx->plex[grid];
1125   PetscCall(DMCreateDS(forest));
1126   PetscCall(DMGetDS(forest, &prob));
1127   PetscCall(DMGetDimension(forest, &dim));
1128   PetscCall(DMIsForest(forest, &isForest));
1129   PetscCheck(isForest,ctx->comm,PETSC_ERR_ARG_WRONG,"! Forest");
1130   PetscCall(DMConvert(forest, DMPLEX, &plex));
1131   PetscCall(DMPlexGetHeightStratum(plex,0,&cStart,&cEnd));
1132   PetscCall(DMLabelCreate(PETSC_COMM_SELF,"adapt",&adaptLabel));
1133   PetscCall(PetscFEGetQuadrature(fem, &quad));
1134   PetscCall(PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, NULL));
1135   PetscCheck(Nq <=LANDAU_MAX_NQ,ctx->comm,PETSC_ERR_ARG_WRONG,"Order too high. Nq = %" PetscInt_FMT " > LANDAU_MAX_NQ (%d)",Nq,LANDAU_MAX_NQ);
1136   PetscCall(PetscDSGetDimensions(prob, &Nb));
1137   if (type==4) {
1138     for (c = cStart; c < cEnd; c++) {
1139       PetscCall(DMLabelSetValue(adaptLabel, c, DM_ADAPT_REFINE));
1140     }
1141     PetscCall(PetscInfo(sol, "Phase:%s: Uniform refinement\n","adaptToleranceFEM"));
1142   } else if (type==2) {
1143     PetscInt  rCellIdx[8], eCellIdx[64], iCellIdx[64], eMaxIdx = -1, iMaxIdx = -1, nr = 0, nrmax = (dim==3) ? 8 : 2;
1144     PetscReal minRad = PETSC_INFINITY, r, eMinRad = PETSC_INFINITY, iMinRad = PETSC_INFINITY;
1145     for (c = 0; c < 64; c++) { eCellIdx[c] = iCellIdx[c] = -1; }
1146     for (c = cStart; c < cEnd; c++) {
1147       PetscReal    tt, v0[LANDAU_MAX_NQ*3], detJ[LANDAU_MAX_NQ];
1148       PetscCall(DMPlexComputeCellGeometryFEM(plex, c, quad, v0, NULL, NULL, detJ));
1149       for (qj = 0; qj < Nq; ++qj) {
1150         tt = PetscSqr(v0[dim*qj+0]) + PetscSqr(v0[dim*qj+1]) + PetscSqr(((dim==3) ? v0[dim*qj+2] : 0));
1151         r  = PetscSqrtReal(tt);
1152         if (r < minRad - PETSC_SQRT_MACHINE_EPSILON*10.) {
1153           minRad = r;
1154           nr     = 0;
1155           rCellIdx[nr++]= c;
1156           PetscCall(PetscInfo(sol, "\t\tPhase: adaptToleranceFEM Found first inner r=%e, cell %" PetscInt_FMT ", qp %" PetscInt_FMT "/%" PetscInt_FMT "\n", (double)r, c, qj+1, Nq));
1157         } else if ((r-minRad) < PETSC_SQRT_MACHINE_EPSILON*100. && nr < nrmax) {
1158           for (k=0;k<nr;k++) if (c == rCellIdx[k]) break;
1159           if (k==nr) {
1160             rCellIdx[nr++]= c;
1161             PetscCall(PetscInfo(sol, "\t\t\tPhase: adaptToleranceFEM Found another inner r=%e, cell %" PetscInt_FMT ", qp %" PetscInt_FMT "/%" PetscInt_FMT ", d=%e\n", (double)r, c, qj+1, Nq, (double)(r-minRad)));
1162           }
1163         }
1164         if (ctx->sphere) {
1165           if ((tt=r-ctx->e_radius) > 0) {
1166             PetscCall(PetscInfo(sol, "\t\t\t %" PetscInt_FMT " cell r=%g\n",c,(double)tt));
1167             if (tt < eMinRad - PETSC_SQRT_MACHINE_EPSILON*100.) {
1168               eMinRad = tt;
1169               eMaxIdx = 0;
1170               eCellIdx[eMaxIdx++] = c;
1171             } else if (eMaxIdx > 0 && (tt-eMinRad) <= PETSC_SQRT_MACHINE_EPSILON && c != eCellIdx[eMaxIdx-1]) {
1172               eCellIdx[eMaxIdx++] = c;
1173             }
1174           }
1175           if ((tt=r-ctx->i_radius[grid]) > 0) {
1176             if (tt < iMinRad - 1.e-5) {
1177               iMinRad = tt;
1178               iMaxIdx = 0;
1179               iCellIdx[iMaxIdx++] = c;
1180             } else if (iMaxIdx > 0 && (tt-iMinRad) <= PETSC_SQRT_MACHINE_EPSILON && c != iCellIdx[iMaxIdx-1]) {
1181               iCellIdx[iMaxIdx++] = c;
1182             }
1183           }
1184         }
1185       }
1186     }
1187     for (k=0;k<nr;k++) {
1188       PetscCall(DMLabelSetValue(adaptLabel, rCellIdx[k], DM_ADAPT_REFINE));
1189     }
1190     if (ctx->sphere) {
1191       for (c = 0; c < eMaxIdx; c++) {
1192         PetscCall(DMLabelSetValue(adaptLabel, eCellIdx[c], DM_ADAPT_REFINE));
1193         PetscCall(PetscInfo(sol, "\t\tPhase:%s: refine sphere e cell %" PetscInt_FMT " r=%g\n","adaptToleranceFEM",eCellIdx[c],(double)eMinRad));
1194       }
1195       for (c = 0; c < iMaxIdx; c++) {
1196         PetscCall(DMLabelSetValue(adaptLabel, iCellIdx[c], DM_ADAPT_REFINE));
1197         PetscCall(PetscInfo(sol, "\t\tPhase:%s: refine sphere i cell %" PetscInt_FMT " r=%g\n","adaptToleranceFEM",iCellIdx[c],(double)iMinRad));
1198       }
1199     }
1200     PetscCall(PetscInfo(sol, "Phase:%s: Adaptive refine origin cells %" PetscInt_FMT ",%" PetscInt_FMT " r=%g\n","adaptToleranceFEM",rCellIdx[0],rCellIdx[1],(double)minRad));
1201   } else if (type==0 || type==1 || type==3) { /* refine along r=0 axis */
1202     PetscScalar  *coef = NULL;
1203     Vec          coords;
1204     PetscInt     csize,Nv,d,nz;
1205     DM           cdm;
1206     PetscSection cs;
1207     PetscCall(DMGetCoordinatesLocal(forest, &coords));
1208     PetscCall(DMGetCoordinateDM(forest, &cdm));
1209     PetscCall(DMGetLocalSection(cdm, &cs));
1210     for (c = cStart; c < cEnd; c++) {
1211       PetscInt doit = 0, outside = 0;
1212       PetscCall(DMPlexVecGetClosure(cdm, cs, coords, c, &csize, &coef));
1213       Nv = csize/dim;
1214       for (nz = d = 0; d < Nv; d++) {
1215         PetscReal z = PetscRealPart(coef[d*dim + (dim-1)]), x = PetscSqr(PetscRealPart(coef[d*dim + 0])) + ((dim==3) ? PetscSqr(PetscRealPart(coef[d*dim + 1])) : 0);
1216         x = PetscSqrtReal(x);
1217         if (x < PETSC_MACHINE_EPSILON*10. && PetscAbs(z)<PETSC_MACHINE_EPSILON*10.) doit = 1;             /* refine origin */
1218         else if (type==0 && (z < -PETSC_MACHINE_EPSILON*10. || z > ctx->re_radius+PETSC_MACHINE_EPSILON*10.)) outside++;   /* first pass don't refine bottom */
1219         else if (type==1 && (z > ctx->vperp0_radius1 || z < -ctx->vperp0_radius1)) outside++; /* don't refine outside electron refine radius */
1220         else if (type==3 && (z > ctx->vperp0_radius2 || z < -ctx->vperp0_radius2)) outside++; /* don't refine outside ion refine radius */
1221         if (x < PETSC_MACHINE_EPSILON*10.) nz++;
1222       }
1223       PetscCall(DMPlexVecRestoreClosure(cdm, cs, coords, c, &csize, &coef));
1224       if (doit || (outside<Nv && nz)) {
1225         PetscCall(DMLabelSetValue(adaptLabel, c, DM_ADAPT_REFINE));
1226       }
1227     }
1228     PetscCall(PetscInfo(sol, "Phase:%s: RE refinement\n","adaptToleranceFEM"));
1229   }
1230   PetscCall(DMDestroy(&plex));
1231   PetscCall(DMAdaptLabel(forest, adaptLabel, &adaptedDM));
1232   PetscCall(DMLabelDestroy(&adaptLabel));
1233   *newForest = adaptedDM;
1234   if (adaptedDM) {
1235     if (isForest) {
1236       PetscCall(DMForestSetAdaptivityForest(adaptedDM,NULL)); // ????
1237     } else exit(33); // ???????
1238     PetscCall(DMConvert(adaptedDM, DMPLEX, &plex));
1239     PetscCall(DMPlexGetHeightStratum(plex,0,&cStart,&cEnd));
1240     PetscCall(PetscInfo(sol, "\tPhase: adaptToleranceFEM: %" PetscInt_FMT " cells, %" PetscInt_FMT " total quadrature points\n",cEnd-cStart,Nq*(cEnd-cStart)));
1241     PetscCall(DMDestroy(&plex));
1242   } else *newForest = NULL;
1243   PetscFunctionReturn(0);
1244 }
1245 
1246 // forest goes in (ctx->plex[grid]), plex comes out
1247 static PetscErrorCode adapt(PetscInt grid, LandauCtx *ctx, Vec *uu)
1248 {
1249   PetscInt        adaptIter;
1250 
1251   PetscFunctionBegin;
1252   PetscInt  type, limits[5] = {(grid==0) ? ctx->numRERefine : 0, (grid==0) ? ctx->nZRefine1 : 0, ctx->numAMRRefine[grid], (grid==0) ? ctx->nZRefine2 : 0,ctx->postAMRRefine[grid]};
1253   for (type=0;type<5;type++) {
1254     for (adaptIter = 0; adaptIter<limits[type];adaptIter++) {
1255       DM  newForest = NULL;
1256       PetscCall(adaptToleranceFEM(ctx->fe[0], *uu, type, grid, ctx, &newForest));
1257       if (newForest)  {
1258         PetscCall(DMDestroy(&ctx->plex[grid]));
1259         PetscCall(VecDestroy(uu));
1260         PetscCall(DMCreateGlobalVector(newForest,uu));
1261         PetscCall(PetscObjectSetName((PetscObject) *uu, "uAMR"));
1262         PetscCall(LandauSetInitialCondition(newForest, *uu, grid, 0, ctx));
1263         ctx->plex[grid] = newForest;
1264       } else {
1265         exit(4); // can happen with no AMR and post refinement
1266       }
1267     }
1268   }
1269   PetscFunctionReturn(0);
1270 }
1271 
1272 static PetscErrorCode ProcessOptions(LandauCtx *ctx, const char prefix[])
1273 {
1274   PetscBool         flg, sph_flg;
1275   PetscInt          ii,nt,nm,nc,num_species_grid[LANDAU_MAX_GRIDS];
1276   PetscReal         v0_grid[LANDAU_MAX_GRIDS];
1277   DM                dummy;
1278 
1279   PetscFunctionBegin;
1280   PetscCall(DMCreate(ctx->comm,&dummy));
1281   /* get options - initialize context */
1282   ctx->verbose = 1; // should be 0 for silent compliance
1283 #if defined(PETSC_HAVE_THREADSAFETY)
1284   ctx->batch_sz = PetscNumOMPThreads;
1285 #else
1286   ctx->batch_sz = 1;
1287 #endif
1288   ctx->batch_view_idx = 0;
1289   ctx->interpolate    = PETSC_TRUE;
1290   ctx->gpu_assembly   = PETSC_TRUE;
1291   ctx->norm_state     = 0;
1292   ctx->electronShift  = 0;
1293   ctx->M              = NULL;
1294   ctx->J              = NULL;
1295   /* geometry and grids */
1296   ctx->sphere         = PETSC_FALSE;
1297   ctx->inflate        = PETSC_FALSE;
1298   ctx->use_p4est      = PETSC_FALSE;
1299   ctx->num_sections   = 3; /* 2, 3 or 4 */
1300   for (PetscInt grid=0;grid<LANDAU_MAX_GRIDS;grid++) {
1301     ctx->radius[grid]           = 5.; /* thermal radius (velocity) */
1302     ctx->numAMRRefine[grid]     = 5;
1303     ctx->postAMRRefine[grid]    = 0;
1304     ctx->species_offset[grid+1] = 1; // one species default
1305     num_species_grid[grid]      = 0;
1306     ctx->plex[grid] = NULL;     /* cache as expensive to Convert */
1307   }
1308   ctx->species_offset[0] = 0;
1309   ctx->re_radius         = 0.;
1310   ctx->vperp0_radius1    = 0;
1311   ctx->vperp0_radius2    = 0;
1312   ctx->nZRefine1         = 0;
1313   ctx->nZRefine2         = 0;
1314   ctx->numRERefine       = 0;
1315   num_species_grid[0]    = 1; // one species default
1316   /* species - [0] electrons, [1] one ion species eg, duetarium, [2] heavy impurity ion, ... */
1317   ctx->charges[0]        = -1;  /* electron charge (MKS) */
1318   ctx->masses[0]         = 1/1835.469965278441013; /* temporary value in proton mass */
1319   ctx->n[0]              = 1;
1320   ctx->v_0               = 1; /* thermal velocity, we could start with a scale != 1 */
1321   ctx->thermal_temps[0]  = 1;
1322   /* constants, etc. */
1323   ctx->epsilon0          = 8.8542e-12; /* permittivity of free space (MKS) F/m */
1324   ctx->k                 = 1.38064852e-23; /* Boltzmann constant (MKS) J/K */
1325   ctx->lnLam             = 10;         /* cross section ratio large - small angle collisions */
1326   ctx->n_0               = 1.e20;        /* typical plasma n, but could set it to 1 */
1327   ctx->Ez                = 0;
1328   for (PetscInt grid=0;grid<LANDAU_NUM_TIMERS;grid++) ctx->times[grid] = 0;
1329   ctx->use_matrix_mass   =  PETSC_FALSE;
1330   ctx->use_relativistic_corrections = PETSC_FALSE;
1331   ctx->use_energy_tensor_trick      = PETSC_FALSE; /* Use Eero's trick for energy conservation v --> grad(v^2/2) */
1332   ctx->SData_d.w         = NULL;
1333   ctx->SData_d.x         = NULL;
1334   ctx->SData_d.y         = NULL;
1335   ctx->SData_d.z         = NULL;
1336   ctx->SData_d.invJ      = NULL;
1337   ctx->jacobian_field_major_order     = PETSC_FALSE;
1338   ctx->SData_d.coo_elem_offsets       = NULL;
1339   ctx->SData_d.coo_elem_point_offsets = NULL;
1340   ctx->coo_assembly                   = PETSC_FALSE;
1341   ctx->SData_d.coo_elem_fullNb        = NULL;
1342   ctx->SData_d.coo_size               = 0;
1343   PetscOptionsBegin(ctx->comm, prefix, "Options for Fokker-Plank-Landau collision operator", "none");
1344   {
1345     char opstring[256];
1346 #if defined(PETSC_HAVE_KOKKOS_KERNELS)
1347     ctx->deviceType = LANDAU_KOKKOS;
1348     PetscCall(PetscStrcpy(opstring,"kokkos"));
1349 #elif defined(PETSC_HAVE_CUDA)
1350     ctx->deviceType = LANDAU_CUDA;
1351     PetscCall(PetscStrcpy(opstring,"cuda"));
1352 #else
1353     ctx->deviceType = LANDAU_CPU;
1354     PetscCall(PetscStrcpy(opstring,"cpu"));
1355 #endif
1356     PetscCall(PetscOptionsString("-dm_landau_device_type","Use kernels on 'cpu', 'cuda', or 'kokkos'","plexland.c",opstring,opstring,sizeof(opstring),NULL));
1357     PetscCall(PetscStrcmp("cpu",opstring,&flg));
1358     if (flg) {
1359       ctx->deviceType = LANDAU_CPU;
1360     } else {
1361       PetscCall(PetscStrcmp("cuda",opstring,&flg));
1362       if (flg) {
1363         ctx->deviceType = LANDAU_CUDA;
1364       } else {
1365         PetscCall(PetscStrcmp("kokkos",opstring,&flg));
1366         if (flg) ctx->deviceType = LANDAU_KOKKOS;
1367         else SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_device_type %s",opstring);
1368       }
1369     }
1370   }
1371   PetscCall(PetscOptionsReal("-dm_landau_electron_shift","Shift in thermal velocity of electrons","none",ctx->electronShift,&ctx->electronShift, NULL));
1372   PetscCall(PetscOptionsInt("-dm_landau_verbose", "Level of verbosity output", "plexland.c", ctx->verbose, &ctx->verbose, NULL));
1373   PetscCall(PetscOptionsInt("-dm_landau_batch_size", "Number of 'vertices' to batch", "ex2.c", ctx->batch_sz, &ctx->batch_sz, NULL));
1374   PetscCheck(LANDAU_MAX_BATCH_SZ >= ctx->batch_sz,ctx->comm,PETSC_ERR_ARG_WRONG,"LANDAU_MAX_BATCH_SZ %" PetscInt_FMT " < ctx->batch_sz %" PetscInt_FMT,(PetscInt)LANDAU_MAX_BATCH_SZ,ctx->batch_sz);
1375   PetscCall(PetscOptionsInt("-dm_landau_batch_view_idx", "Index of batch for diagnostics like plotting", "ex2.c", ctx->batch_view_idx, &ctx->batch_view_idx, NULL));
1376   PetscCheck(ctx->batch_view_idx < ctx->batch_sz,ctx->comm,PETSC_ERR_ARG_WRONG,"-ctx->batch_view_idx %" PetscInt_FMT " > ctx->batch_sz %" PetscInt_FMT,ctx->batch_view_idx,ctx->batch_sz);
1377   PetscCall(PetscOptionsReal("-dm_landau_Ez","Initial parallel electric field in unites of Conner-Hastie critical field","plexland.c",ctx->Ez,&ctx->Ez, NULL));
1378   PetscCall(PetscOptionsReal("-dm_landau_n_0","Normalization constant for number density","plexland.c",ctx->n_0,&ctx->n_0, NULL));
1379   PetscCall(PetscOptionsReal("-dm_landau_ln_lambda","Cross section parameter","plexland.c",ctx->lnLam,&ctx->lnLam, NULL));
1380   PetscCall(PetscOptionsBool("-dm_landau_use_mataxpy_mass", "Use fast but slightly fragile MATAXPY to add mass term", "plexland.c", ctx->use_matrix_mass, &ctx->use_matrix_mass, NULL));
1381   PetscCall(PetscOptionsBool("-dm_landau_use_relativistic_corrections", "Use relativistic corrections", "plexland.c", ctx->use_relativistic_corrections, &ctx->use_relativistic_corrections, NULL));
1382   PetscCall(PetscOptionsBool("-dm_landau_use_energy_tensor_trick", "Use Eero's trick of using grad(v^2/2) instead of v as args to Landau tensor to conserve energy with relativistic corrections and Q1 elements", "plexland.c", ctx->use_energy_tensor_trick, &ctx->use_energy_tensor_trick, NULL));
1383 
1384   /* get num species with temperature, set defaults */
1385   for (ii=1;ii<LANDAU_MAX_SPECIES;ii++) {
1386     ctx->thermal_temps[ii] = 1;
1387     ctx->charges[ii]       = 1;
1388     ctx->masses[ii]        = 1;
1389     ctx->n[ii]             = 1;
1390   }
1391   nt = LANDAU_MAX_SPECIES;
1392   PetscCall(PetscOptionsRealArray("-dm_landau_thermal_temps", "Temperature of each species [e,i_0,i_1,...] in keV (must be set to set number of species)", "plexland.c", ctx->thermal_temps, &nt, &flg));
1393   if (flg) {
1394     PetscCall(PetscInfo(dummy, "num_species set to number of thermal temps provided (%" PetscInt_FMT ")\n",nt));
1395     ctx->num_species = nt;
1396   } else SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_thermal_temps ,t1,t2,.. must be provided to set the number of species");
1397   for (ii=0;ii<ctx->num_species;ii++) ctx->thermal_temps[ii] *= 1.1604525e7; /* convert to Kelvin */
1398   nm = LANDAU_MAX_SPECIES-1;
1399   PetscCall(PetscOptionsRealArray("-dm_landau_ion_masses", "Mass of each species in units of proton mass [i_0=2,i_1=40...]", "plexland.c", &ctx->masses[1], &nm, &flg));
1400   if (flg && nm != ctx->num_species-1) {
1401     SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"num ion masses %" PetscInt_FMT " != num species %" PetscInt_FMT,nm,ctx->num_species-1);
1402   }
1403   nm = LANDAU_MAX_SPECIES;
1404   PetscCall(PetscOptionsRealArray("-dm_landau_n", "Number density of each species = n_s * n_0", "plexland.c", ctx->n, &nm, &flg));
1405   PetscCheck(!flg || nm == ctx->num_species,ctx->comm,PETSC_ERR_ARG_WRONG,"wrong num n: %" PetscInt_FMT " != num species %" PetscInt_FMT,nm,ctx->num_species);
1406   for (ii=0;ii<LANDAU_MAX_SPECIES;ii++) ctx->masses[ii] *= 1.6720e-27; /* scale by proton mass kg */
1407   ctx->masses[0] = 9.10938356e-31; /* electron mass kg (should be about right already) */
1408   ctx->m_0 = ctx->masses[0]; /* arbitrary reference mass, electrons */
1409   nc = LANDAU_MAX_SPECIES-1;
1410   PetscCall(PetscOptionsRealArray("-dm_landau_ion_charges", "Charge of each species in units of proton charge [i_0=2,i_1=18,...]", "plexland.c", &ctx->charges[1], &nc, &flg));
1411   if (flg) PetscCheck(nc == ctx->num_species-1,ctx->comm,PETSC_ERR_ARG_WRONG,"num charges %" PetscInt_FMT " != num species %" PetscInt_FMT,nc,ctx->num_species-1);
1412   for (ii=0;ii<LANDAU_MAX_SPECIES;ii++) ctx->charges[ii] *= 1.6022e-19; /* electron/proton charge (MKS) */
1413   /* geometry and grids */
1414   nt = LANDAU_MAX_GRIDS;
1415   PetscCall(PetscOptionsIntArray("-dm_landau_num_species_grid","Number of species on each grid: [ 1, ....] or [S, 0 ....] for single grid","plexland.c", num_species_grid, &nt, &flg));
1416   if (flg) {
1417     ctx->num_grids = nt;
1418     for (ii=nt=0;ii<ctx->num_grids;ii++) nt += num_species_grid[ii];
1419     PetscCheck(ctx->num_species == nt,ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_num_species_grid: sum %" PetscInt_FMT " != num_species = %" PetscInt_FMT ". %" PetscInt_FMT " grids (check that number of grids <= LANDAU_MAX_GRIDS = %d)",nt,ctx->num_species,ctx->num_grids,LANDAU_MAX_GRIDS);
1420   } else {
1421     ctx->num_grids = 1; // go back to a single grid run
1422     num_species_grid[0] = ctx->num_species;
1423   }
1424   for (ctx->species_offset[0] = ii = 0; ii < ctx->num_grids ; ii++) ctx->species_offset[ii+1] = ctx->species_offset[ii] + num_species_grid[ii];
1425   PetscCheck(ctx->species_offset[ctx->num_grids] == ctx->num_species,ctx->comm,PETSC_ERR_ARG_WRONG,"ctx->species_offset[ctx->num_grids] %" PetscInt_FMT " != ctx->num_species = %" PetscInt_FMT " ???????????",ctx->species_offset[ctx->num_grids],ctx->num_species);
1426   for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) {
1427     int iii = ctx->species_offset[grid]; // normalize with first (arbitrary) species on grid
1428     v0_grid[grid] = PetscSqrtReal(ctx->k*ctx->thermal_temps[iii]/ctx->masses[iii]); /* arbitrary units for non-dimensionalization: mean velocity in 1D of first species on grid */
1429   }
1430   ii = 0;
1431   PetscCall(PetscOptionsInt("-dm_landau_v0_grid", "Index of grid to use for setting v_0 (electrons are default). Not recommended to change", "plexland.c", ii, &ii, NULL));
1432   ctx->v_0 = v0_grid[ii]; /* arbitrary units for non dimensionalization: global mean velocity in 1D of electrons */
1433   ctx->t_0 = 8*PETSC_PI*PetscSqr(ctx->epsilon0*ctx->m_0/PetscSqr(ctx->charges[0]))/ctx->lnLam/ctx->n_0*PetscPowReal(ctx->v_0,3); /* note, this t_0 makes nu[0,0]=1 */
1434   /* domain */
1435   nt = LANDAU_MAX_GRIDS;
1436   PetscCall(PetscOptionsRealArray("-dm_landau_domain_radius","Phase space size in units of thermal velocity of grid","plexland.c",ctx->radius,&nt, &flg));
1437   if (flg) PetscCheck(nt >= ctx->num_grids,ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_domain_radius: given %" PetscInt_FMT " radius != number grids %" PetscInt_FMT,nt,ctx->num_grids);
1438   for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) {
1439     if (flg && ctx->radius[grid] <= 0) { /* negative is ratio of c */
1440       if (ctx->radius[grid] == 0) ctx->radius[grid] = 0.75;
1441       else ctx->radius[grid] = -ctx->radius[grid];
1442       ctx->radius[grid] = ctx->radius[grid]*SPEED_OF_LIGHT/ctx->v_0; // use any species on grid to normalize (v_0 same for all on grid)
1443       PetscCall(PetscInfo(dummy, "Change domain radius to %g for grid %" PetscInt_FMT "\n",(double)ctx->radius[grid],grid));
1444     }
1445     ctx->radius[grid] *= v0_grid[grid]/ctx->v_0; // scale domain by thermal radius relative to v_0
1446   }
1447   /* amr parametres */
1448   nt = LANDAU_MAX_GRIDS;
1449   PetscCall(PetscOptionsIntArray("-dm_landau_amr_levels_max", "Number of AMR levels of refinement around origin, after (RE) refinements along z", "plexland.c", ctx->numAMRRefine, &nt, &flg));
1450   PetscCheck(!flg || nt >= ctx->num_grids,ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_amr_levels_max: given %" PetscInt_FMT " != number grids %" PetscInt_FMT,nt,ctx->num_grids);
1451   nt = LANDAU_MAX_GRIDS;
1452   PetscCall(PetscOptionsIntArray("-dm_landau_amr_post_refine", "Number of levels to uniformly refine after AMR", "plexland.c", ctx->postAMRRefine, &nt, &flg));
1453   for (ii=1;ii<ctx->num_grids;ii++)  ctx->postAMRRefine[ii] = ctx->postAMRRefine[0]; // all grids the same now
1454   PetscCall(PetscOptionsInt("-dm_landau_amr_re_levels", "Number of levels to refine along v_perp=0, z>0", "plexland.c", ctx->numRERefine, &ctx->numRERefine, &flg));
1455   PetscCall(PetscOptionsInt("-dm_landau_amr_z_refine1",  "Number of levels to refine along v_perp=0", "plexland.c", ctx->nZRefine1, &ctx->nZRefine1, &flg));
1456   PetscCall(PetscOptionsInt("-dm_landau_amr_z_refine2",  "Number of levels to refine along v_perp=0", "plexland.c", ctx->nZRefine2, &ctx->nZRefine2, &flg));
1457   PetscCall(PetscOptionsReal("-dm_landau_re_radius","velocity range to refine on positive (z>0) r=0 axis for runaways","plexland.c",ctx->re_radius,&ctx->re_radius, &flg));
1458   PetscCall(PetscOptionsReal("-dm_landau_z_radius1","velocity range to refine r=0 axis (for electrons)","plexland.c",ctx->vperp0_radius1,&ctx->vperp0_radius1, &flg));
1459   PetscCall(PetscOptionsReal("-dm_landau_z_radius2","velocity range to refine r=0 axis (for ions) after origin AMR","plexland.c",ctx->vperp0_radius2, &ctx->vperp0_radius2, &flg));
1460   /* spherical domain (not used) */
1461   PetscCall(PetscOptionsInt("-dm_landau_num_sections", "Number of tangential section in (2D) grid, 2, 3, of 4", "plexland.c", ctx->num_sections, &ctx->num_sections, NULL));
1462   PetscCall(PetscOptionsBool("-dm_landau_sphere", "use sphere/semi-circle domain instead of rectangle", "plexland.c", ctx->sphere, &ctx->sphere, &sph_flg));
1463   PetscCall(PetscOptionsBool("-dm_landau_inflate", "With sphere, inflate for curved edges", "plexland.c", ctx->inflate, &ctx->inflate, &flg));
1464   PetscCall(PetscOptionsReal("-dm_landau_e_radius","Electron thermal velocity, used for circular meshes","plexland.c",ctx->e_radius, &ctx->e_radius, &flg));
1465   if (flg && !sph_flg) ctx->sphere = PETSC_TRUE; /* you gave me an e radius but did not set sphere, user error really */
1466   if (!flg) {
1467     ctx->e_radius = 1.5*PetscSqrtReal(8*ctx->k*ctx->thermal_temps[0]/ctx->masses[0]/PETSC_PI)/ctx->v_0;
1468   }
1469   nt = LANDAU_MAX_GRIDS;
1470   PetscCall(PetscOptionsRealArray("-dm_landau_i_radius","Ion thermal velocity, used for circular meshes","plexland.c",ctx->i_radius, &nt, &flg));
1471   if (flg && !sph_flg) ctx->sphere = PETSC_TRUE;
1472   if (!flg) {
1473     ctx->i_radius[0] = 1.5*PetscSqrtReal(8*ctx->k*ctx->thermal_temps[1]/ctx->masses[1]/PETSC_PI)/ctx->v_0; // need to correct for ion grid domain
1474   }
1475   if (flg) PetscCheck(ctx->num_grids == nt,ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_i_radius: %" PetscInt_FMT " != num_species = %" PetscInt_FMT,nt,ctx->num_grids);
1476   if (ctx->sphere) PetscCheck(ctx->e_radius > ctx->i_radius[0],ctx->comm,PETSC_ERR_ARG_WRONG,"bad radii: %g < %g < %g",(double)ctx->i_radius[0],(double)ctx->e_radius,(double)ctx->radius[0]);
1477   /* processing options */
1478   PetscCall(PetscOptionsBool("-dm_landau_gpu_assembly", "Assemble Jacobian on GPU", "plexland.c", ctx->gpu_assembly, &ctx->gpu_assembly, NULL));
1479   if (ctx->deviceType == LANDAU_CPU || ctx->deviceType == LANDAU_KOKKOS) { // make Kokkos
1480     PetscCall(PetscOptionsBool("-dm_landau_coo_assembly", "Assemble Jacobian with Kokkos on 'device'", "plexland.c", ctx->coo_assembly, &ctx->coo_assembly, NULL));
1481     if (ctx->coo_assembly) PetscCheck(ctx->gpu_assembly,ctx->comm,PETSC_ERR_ARG_WRONG,"COO assembly requires 'gpu assembly' even if Kokkos 'CPU' back-end %d",ctx->coo_assembly);
1482   }
1483   PetscCall(PetscOptionsBool("-dm_landau_jacobian_field_major_order", "Reorder Jacobian for GPU assembly with field major, or block diagonal, ordering", "plexland.c", ctx->jacobian_field_major_order, &ctx->jacobian_field_major_order, NULL));
1484   if (ctx->jacobian_field_major_order) PetscCheck(ctx->gpu_assembly,ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_jacobian_field_major_order requires -dm_landau_gpu_assembly");
1485   PetscOptionsEnd();
1486 
1487   for (ii=ctx->num_species;ii<LANDAU_MAX_SPECIES;ii++) ctx->masses[ii] = ctx->thermal_temps[ii]  = ctx->charges[ii] = 0;
1488   if (ctx->verbose > 0) {
1489     PetscCall(PetscPrintf(ctx->comm, "masses:        e=%10.3e; ions in proton mass units:   %10.3e %10.3e ...\n",(double)ctx->masses[0],(double)(ctx->masses[1]/1.6720e-27),(double)(ctx->num_species>2 ? ctx->masses[2]/1.6720e-27 : 0)));
1490     PetscCall(PetscPrintf(ctx->comm, "charges:       e=%10.3e; charges in elementary units: %10.3e %10.3e\n", (double)ctx->charges[0],(double)(-ctx->charges[1]/ctx->charges[0]),(double)(ctx->num_species>2 ? -ctx->charges[2]/ctx->charges[0] : 0)));
1491     PetscCall(PetscPrintf(ctx->comm, "n:             e: %10.3e                           i: %10.3e %10.3e\n", (double)ctx->n[0],(double)ctx->n[1],(double)(ctx->num_species>2 ? ctx->n[2] : 0)));
1492     PetscCall(PetscPrintf(ctx->comm, "thermal T (K): e=%10.3e i=%10.3e %10.3e. v_0=%10.3e (%10.3ec) n_0=%10.3e t_0=%10.3e, %s, %s, %" PetscInt_FMT " batched\n", (double)ctx->thermal_temps[0], (double)ctx->thermal_temps[1], (double)((ctx->num_species>2) ? ctx->thermal_temps[2] : 0), (double)ctx->v_0, (double)(ctx->v_0/SPEED_OF_LIGHT), (double)ctx->n_0, (double)ctx->t_0, ctx->use_relativistic_corrections ? "relativistic" : "classical", ctx->use_energy_tensor_trick ? "Use trick" : "Intuitive",ctx->batch_sz));
1493     PetscCall(PetscPrintf(ctx->comm, "Domain radius (AMR levels) grid %d: %10.3e (%" PetscInt_FMT ") ",0,(double)ctx->radius[0],ctx->numAMRRefine[0]));
1494     for (ii=1;ii<ctx->num_grids;ii++) PetscCall(PetscPrintf(ctx->comm, ", %" PetscInt_FMT ": %10.3e (%" PetscInt_FMT ") ",ii,(double)ctx->radius[ii],ctx->numAMRRefine[ii]));
1495     PetscCall(PetscPrintf(ctx->comm,"\n"));
1496     if (ctx->jacobian_field_major_order) {
1497       PetscCall(PetscPrintf(ctx->comm,"Using field major order for GPU Jacobian\n"));
1498     } else {
1499       PetscCall(PetscPrintf(ctx->comm,"Using default Plex order for all matrices\n"));
1500     }
1501   }
1502   PetscCall(DMDestroy(&dummy));
1503   {
1504     PetscMPIInt    rank;
1505     PetscCallMPI(MPI_Comm_rank(ctx->comm, &rank));
1506     ctx->stage = 0;
1507     PetscCall(PetscLogEventRegister("Landau Create", DM_CLASSID, &ctx->events[13])); /* 13 */
1508     PetscCall(PetscLogEventRegister(" GPU ass. setup", DM_CLASSID, &ctx->events[2])); /* 2 */
1509     PetscCall(PetscLogEventRegister(" Build matrix", DM_CLASSID, &ctx->events[12])); /* 12 */
1510     PetscCall(PetscLogEventRegister(" Assembly maps", DM_CLASSID, &ctx->events[15])); /* 15 */
1511     PetscCall(PetscLogEventRegister("Landau Mass mat", DM_CLASSID, &ctx->events[14])); /* 14 */
1512     PetscCall(PetscLogEventRegister("Landau Operator", DM_CLASSID, &ctx->events[11])); /* 11 */
1513     PetscCall(PetscLogEventRegister("Landau Jacobian", DM_CLASSID, &ctx->events[0])); /* 0 */
1514     PetscCall(PetscLogEventRegister("Landau Mass", DM_CLASSID, &ctx->events[9])); /* 9 */
1515     PetscCall(PetscLogEventRegister(" Preamble", DM_CLASSID, &ctx->events[10])); /* 10 */
1516     PetscCall(PetscLogEventRegister(" static IP Data", DM_CLASSID, &ctx->events[7])); /* 7 */
1517     PetscCall(PetscLogEventRegister(" dynamic IP-Jac", DM_CLASSID, &ctx->events[1])); /* 1 */
1518     PetscCall(PetscLogEventRegister(" Kernel-init", DM_CLASSID, &ctx->events[3])); /* 3 */
1519     PetscCall(PetscLogEventRegister(" Jac-f-df (GPU)", DM_CLASSID, &ctx->events[8])); /* 8 */
1520     PetscCall(PetscLogEventRegister(" J Kernel (GPU)", DM_CLASSID, &ctx->events[4])); /* 4 */
1521     PetscCall(PetscLogEventRegister(" M Kernel (GPU)", DM_CLASSID, &ctx->events[16])); /* 16 */
1522     PetscCall(PetscLogEventRegister(" Copy to CPU", DM_CLASSID, &ctx->events[5])); /* 5 */
1523     PetscCall(PetscLogEventRegister(" CPU assemble", DM_CLASSID, &ctx->events[6])); /* 6 */
1524 
1525     if (rank) { /* turn off output stuff for duplicate runs - do we need to add the prefix to all this? */
1526       PetscCall(PetscOptionsClearValue(NULL,"-snes_converged_reason"));
1527       PetscCall(PetscOptionsClearValue(NULL,"-ksp_converged_reason"));
1528       PetscCall(PetscOptionsClearValue(NULL,"-snes_monitor"));
1529       PetscCall(PetscOptionsClearValue(NULL,"-ksp_monitor"));
1530       PetscCall(PetscOptionsClearValue(NULL,"-ts_monitor"));
1531       PetscCall(PetscOptionsClearValue(NULL,"-ts_view"));
1532       PetscCall(PetscOptionsClearValue(NULL,"-ts_adapt_monitor"));
1533       PetscCall(PetscOptionsClearValue(NULL,"-dm_landau_amr_dm_view"));
1534       PetscCall(PetscOptionsClearValue(NULL,"-dm_landau_amr_vec_view"));
1535       PetscCall(PetscOptionsClearValue(NULL,"-dm_landau_mass_dm_view"));
1536       PetscCall(PetscOptionsClearValue(NULL,"-dm_landau_mass_view"));
1537       PetscCall(PetscOptionsClearValue(NULL,"-dm_landau_jacobian_view"));
1538       PetscCall(PetscOptionsClearValue(NULL,"-dm_landau_mat_view"));
1539       PetscCall(PetscOptionsClearValue(NULL,"-pc_bjkokkos_ksp_converged_reason"));
1540       PetscCall(PetscOptionsClearValue(NULL,"-pc_bjkokkos_ksp_monitor"));
1541       PetscCall(PetscOptionsClearValue(NULL,"-"));
1542       PetscCall(PetscOptionsClearValue(NULL,"-info"));
1543     }
1544   }
1545   PetscFunctionReturn(0);
1546 }
1547 
1548 static PetscErrorCode CreateStaticGPUData(PetscInt dim, IS grid_batch_is_inv[], LandauCtx *ctx)
1549 {
1550   PetscSection      section[LANDAU_MAX_GRIDS],globsection[LANDAU_MAX_GRIDS];
1551   PetscQuadrature   quad;
1552   const PetscReal   *quadWeights;
1553   PetscInt          numCells[LANDAU_MAX_GRIDS],Nq,Nf[LANDAU_MAX_GRIDS], ncellsTot=0, MAP_BF_SIZE = 64*LANDAU_DIM*LANDAU_DIM*LANDAU_MAX_Q_FACE*LANDAU_MAX_SPECIES;
1554   PetscTabulation   *Tf;
1555   PetscDS           prob;
1556 
1557   PetscFunctionBegin;
1558   PetscCall(DMGetDS(ctx->plex[0], &prob)); // same DS for all grids
1559   PetscCall(PetscDSGetTabulation(prob, &Tf)); // Bf, &Df same for all grids
1560   /* DS, Tab and quad is same on all grids */
1561   PetscCheck(ctx->plex[0],ctx->comm,PETSC_ERR_ARG_WRONG,"Plex not created");
1562   PetscCall(PetscFEGetQuadrature(ctx->fe[0], &quad));
1563   PetscCall(PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL,  &quadWeights));
1564   PetscCheck(Nq <= LANDAU_MAX_NQ,ctx->comm,PETSC_ERR_ARG_WRONG,"Order too high. Nq = %" PetscInt_FMT " > LANDAU_MAX_NQ (%d)",Nq,LANDAU_MAX_NQ);
1565   /* setup each grid */
1566   for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
1567     PetscInt cStart, cEnd;
1568     PetscCheck(ctx->plex[grid] != NULL,ctx->comm,PETSC_ERR_ARG_WRONG,"Plex not created");
1569     PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd));
1570     numCells[grid] = cEnd - cStart; // grids can have different topology
1571     PetscCall(DMGetLocalSection(ctx->plex[grid], &section[grid]));
1572     PetscCall(DMGetGlobalSection(ctx->plex[grid], &globsection[grid]));
1573     PetscCall(PetscSectionGetNumFields(section[grid], &Nf[grid]));
1574     ncellsTot += numCells[grid];
1575   }
1576   /* create GPU assembly data */
1577   if (ctx->gpu_assembly) { /* we need GPU object with GPU assembly */
1578     PetscContainer          container;
1579     PetscScalar             elemMatrix[LANDAU_MAX_NQ*LANDAU_MAX_NQ*LANDAU_MAX_SPECIES*LANDAU_MAX_SPECIES], *elMat;
1580     pointInterpolationP4est (*pointMaps)[LANDAU_MAX_Q_FACE];
1581     P4estVertexMaps         *maps;
1582     const PetscInt          *plex_batch=NULL,Nb=Nq; // tensor elements;
1583     LandauIdx               *coo_elem_offsets=NULL, *coo_elem_fullNb=NULL, (*coo_elem_point_offsets)[LANDAU_MAX_NQ+1] = NULL;
1584     /* create GPU asssembly data */
1585     PetscCall(PetscInfo(ctx->plex[0], "Make GPU maps %d\n",1));
1586     PetscCall(PetscLogEventBegin(ctx->events[2],0,0,0,0));
1587     PetscCall(PetscMalloc(sizeof(*maps)*ctx->num_grids, &maps));
1588     PetscCall(PetscMalloc(sizeof(*pointMaps)*MAP_BF_SIZE, &pointMaps));
1589 
1590     if (ctx->coo_assembly) { // setup COO assembly -- put COO metadata directly in ctx->SData_d
1591       PetscCall(PetscMalloc3(ncellsTot+1,&coo_elem_offsets,ncellsTot,&coo_elem_fullNb,ncellsTot, &coo_elem_point_offsets)); // array of integer pointers
1592       coo_elem_offsets[0] = 0; // finish later
1593       PetscCall(PetscInfo(ctx->plex[0], "COO initialization, %" PetscInt_FMT " cells\n",ncellsTot));
1594       ctx->SData_d.coo_n_cellsTot         = ncellsTot;
1595       ctx->SData_d.coo_elem_offsets       = (void*)coo_elem_offsets;
1596       ctx->SData_d.coo_elem_fullNb        = (void*)coo_elem_fullNb;
1597       ctx->SData_d.coo_elem_point_offsets = (void*)coo_elem_point_offsets;
1598     } else {
1599       ctx->SData_d.coo_elem_offsets       = ctx->SData_d.coo_elem_fullNb = NULL;
1600       ctx->SData_d.coo_elem_point_offsets = NULL;
1601       ctx->SData_d.coo_n_cellsTot         = 0;
1602     }
1603 
1604     ctx->SData_d.coo_max_fullnb = 0;
1605     for (PetscInt grid=0,glb_elem_idx=0;grid<ctx->num_grids;grid++) {
1606       PetscInt cStart, cEnd, Nfloc = Nf[grid], totDim = Nfloc*Nq;
1607       if (grid_batch_is_inv[grid]) {
1608         PetscCall(ISGetIndices(grid_batch_is_inv[grid], &plex_batch));
1609       }
1610       PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd));
1611       // make maps
1612       maps[grid].d_self       = NULL;
1613       maps[grid].num_elements = numCells[grid];
1614       maps[grid].num_face = (PetscInt)(pow(Nq,1./((double)dim))+.001); // Q
1615       maps[grid].num_face = (PetscInt)(pow(maps[grid].num_face,(double)(dim-1))+.001); // Q^2
1616       maps[grid].num_reduced  = 0;
1617       maps[grid].deviceType   = ctx->deviceType;
1618       maps[grid].numgrids     = ctx->num_grids;
1619       // count reduced and get
1620       PetscCall(PetscMalloc(maps[grid].num_elements * sizeof(*maps[grid].gIdx), &maps[grid].gIdx));
1621       for (int ej = cStart, eidx = 0 ; ej < cEnd; ++ej, ++eidx, glb_elem_idx++) {
1622         if (coo_elem_offsets) coo_elem_offsets[glb_elem_idx+1] = coo_elem_offsets[glb_elem_idx]; // start with last one, then add
1623         for (int fieldA=0;fieldA<Nf[grid];fieldA++) {
1624           int fullNb = 0;
1625           for (int q = 0; q < Nb; ++q) {
1626             PetscInt    numindices,*indices;
1627             PetscScalar *valuesOrig = elMat = elemMatrix;
1628             PetscCall(PetscArrayzero(elMat, totDim*totDim));
1629             elMat[ (fieldA*Nb + q)*totDim + fieldA*Nb + q] = 1;
1630             PetscCall(DMPlexGetClosureIndices(ctx->plex[grid], section[grid], globsection[grid], ej, PETSC_TRUE, &numindices, &indices, NULL, (PetscScalar **) &elMat));
1631             for (PetscInt f = 0 ; f < numindices ; ++f) { // look for a non-zero on the diagonal
1632               if (PetscAbs(PetscRealPart(elMat[f*numindices + f])) > PETSC_MACHINE_EPSILON) {
1633                 // found it
1634                 if (PetscAbs(PetscRealPart(elMat[f*numindices + f] - 1.)) < PETSC_MACHINE_EPSILON) { // normal vertex 1.0
1635                   if (plex_batch) {
1636                     maps[grid].gIdx[eidx][fieldA][q] = (LandauIdx) plex_batch[indices[f]];
1637                   } else {
1638                     maps[grid].gIdx[eidx][fieldA][q] = (LandauIdx)indices[f];
1639                   }
1640                   fullNb++;
1641                 } else { //found a constraint
1642                   int       jj      = 0;
1643                   PetscReal sum     = 0;
1644                   const PetscInt ff = f;
1645                   maps[grid].gIdx[eidx][fieldA][q] = -maps[grid].num_reduced - 1; // store (-)index: id = -(idx+1): idx = -id - 1
1646 
1647                   do {  // constraints are continuous in Plex - exploit that here
1648                     int ii; // get 'scale'
1649                     for (ii = 0, pointMaps[maps[grid].num_reduced][jj].scale = 0; ii < maps[grid].num_face; ii++) { // sum row of outer product to recover vector value
1650                       if (ff + ii < numindices) { // 3D has Q and Q^2 interps so might run off end. We could test that elMat[f*numindices + ff + ii] > 0, and break if not
1651                         pointMaps[maps[grid].num_reduced][jj].scale += PetscRealPart(elMat[f*numindices + ff + ii]);
1652                       }
1653                     }
1654                     sum += pointMaps[maps[grid].num_reduced][jj].scale; // diagnostic
1655                     // get 'gid'
1656                     if (pointMaps[maps[grid].num_reduced][jj].scale == 0) pointMaps[maps[grid].num_reduced][jj].gid = -1; // 3D has Q and Q^2 interps
1657                     else {
1658                       if (plex_batch) {
1659                         pointMaps[maps[grid].num_reduced][jj].gid = plex_batch[indices[f]];
1660                       } else {
1661                         pointMaps[maps[grid].num_reduced][jj].gid = indices[f];
1662                       }
1663                       fullNb++;
1664                     }
1665                   } while (++jj < maps[grid].num_face && ++f < numindices); // jj is incremented if we hit the end
1666                   while (jj < maps[grid].num_face) {
1667                     pointMaps[maps[grid].num_reduced][jj].scale = 0;
1668                     pointMaps[maps[grid].num_reduced][jj].gid = -1;
1669                     jj++;
1670                   }
1671                   if (PetscAbs(sum-1.0) > 10*PETSC_MACHINE_EPSILON) { // debug
1672                     int       d,f;
1673                     PetscReal tmp = 0;
1674                     PetscCall(PetscPrintf(PETSC_COMM_SELF,"\t\t%d.%d.%d) ERROR total I = %22.16e (LANDAU_MAX_Q_FACE=%d, #face=%d)\n",eidx,q,fieldA,(double)sum,LANDAU_MAX_Q_FACE,maps[grid].num_face));
1675                     for (d = 0, tmp = 0; d < numindices; ++d) {
1676                       if (tmp!=0 && PetscAbs(tmp-1.0) > 10*PETSC_MACHINE_EPSILON) PetscCall(PetscPrintf(PETSC_COMM_WORLD,"%3d) %3" PetscInt_FMT ": ",d,indices[d]));
1677                       for (f = 0; f < numindices; ++f) {
1678                         tmp += PetscRealPart(elMat[d*numindices + f]);
1679                       }
1680                       if (tmp!=0) PetscCall(PetscPrintf(ctx->comm," | %22.16e\n",(double)tmp));
1681                     }
1682                   }
1683                   maps[grid].num_reduced++;
1684                   PetscCheck(maps[grid].num_reduced<MAP_BF_SIZE,PETSC_COMM_SELF, PETSC_ERR_PLIB, "maps[grid].num_reduced %d > %" PetscInt_FMT,maps[grid].num_reduced,MAP_BF_SIZE);
1685                 }
1686                 break;
1687               }
1688             }
1689             // cleanup
1690             PetscCall(DMPlexRestoreClosureIndices(ctx->plex[grid], section[grid], globsection[grid], ej, PETSC_TRUE, &numindices, &indices, NULL, (PetscScalar **) &elMat));
1691             if (elMat != valuesOrig) PetscCall(DMRestoreWorkArray(ctx->plex[grid], numindices*numindices, MPIU_SCALAR, &elMat));
1692           }
1693           if (ctx->coo_assembly) { // setup COO assembly
1694             coo_elem_offsets[glb_elem_idx+1] += fullNb*fullNb; // one species block, adds a block for each species, on this element in this grid
1695             if (fieldA==0) { // cache full Nb for this element, on this grid per species
1696               coo_elem_fullNb[glb_elem_idx] = fullNb;
1697               if (fullNb>ctx->SData_d.coo_max_fullnb) ctx->SData_d.coo_max_fullnb = fullNb;
1698             } else PetscCheck(coo_elem_fullNb[glb_elem_idx] == fullNb,PETSC_COMM_SELF, PETSC_ERR_PLIB, "full element size change with species %d %d",coo_elem_fullNb[glb_elem_idx],fullNb);
1699           }
1700         } // field
1701       } // cell
1702       // allocate and copy point data maps[grid].gIdx[eidx][field][q]
1703       PetscCall(PetscMalloc(maps[grid].num_reduced * sizeof(*maps[grid].c_maps), &maps[grid].c_maps));
1704       for (int ej = 0; ej < maps[grid].num_reduced; ++ej) {
1705         for (int q = 0; q < maps[grid].num_face; ++q) {
1706           maps[grid].c_maps[ej][q].scale = pointMaps[ej][q].scale;
1707           maps[grid].c_maps[ej][q].gid   = pointMaps[ej][q].gid;
1708         }
1709       }
1710 #if defined(PETSC_HAVE_KOKKOS_KERNELS)
1711       if (ctx->deviceType == LANDAU_KOKKOS) {
1712         PetscCall(LandauKokkosCreateMatMaps(maps, pointMaps, Nf, Nq, grid)); // imples Kokkos does
1713       } // else could be CUDA
1714 #endif
1715 #if defined(PETSC_HAVE_CUDA)
1716       if (ctx->deviceType == LANDAU_CUDA) {
1717         PetscCall(LandauCUDACreateMatMaps(maps, pointMaps, Nf, Nq, grid));
1718       }
1719 #endif
1720       if (plex_batch) {
1721         PetscCall(ISRestoreIndices(grid_batch_is_inv[grid], &plex_batch));
1722         PetscCall(ISDestroy(&grid_batch_is_inv[grid])); // we are done with this
1723       }
1724     } /* grids */
1725     // finish COO
1726     if (ctx->coo_assembly) { // setup COO assembly
1727       PetscInt *oor, *ooc;
1728       ctx->SData_d.coo_size = coo_elem_offsets[ncellsTot]*ctx->batch_sz;
1729       PetscCall(PetscMalloc2(ctx->SData_d.coo_size,&oor,ctx->SData_d.coo_size,&ooc));
1730       for (int i=0;i<ctx->SData_d.coo_size;i++) oor[i] = ooc[i] = -1;
1731       // get
1732       for (int grid=0,glb_elem_idx=0;grid<ctx->num_grids;grid++) {
1733         for (int ej = 0 ; ej < numCells[grid] ; ++ej, glb_elem_idx++) {
1734           const int              fullNb = coo_elem_fullNb[glb_elem_idx];
1735           const LandauIdx *const Idxs = &maps[grid].gIdx[ej][0][0]; // just use field-0 maps, They should be the same but this is just for COO storage
1736           coo_elem_point_offsets[glb_elem_idx][0] = 0;
1737           for (int f=0, cnt2=0;f<Nb;f++) {
1738             int idx = Idxs[f];
1739             coo_elem_point_offsets[glb_elem_idx][f+1] = coo_elem_point_offsets[glb_elem_idx][f]; // start at last
1740             if (idx >= 0) {
1741               cnt2++;
1742               coo_elem_point_offsets[glb_elem_idx][f+1]++; // inc
1743             } else {
1744               idx = -idx - 1;
1745               for (int q = 0 ; q < maps[grid].num_face; q++) {
1746                 if (maps[grid].c_maps[idx][q].gid < 0) break;
1747                 cnt2++;
1748                 coo_elem_point_offsets[glb_elem_idx][f+1]++; // inc
1749               }
1750             }
1751             PetscCheck(cnt2 <= fullNb,PETSC_COMM_SELF, PETSC_ERR_PLIB, "wrong count %d < %d",fullNb,cnt2);
1752           }
1753           PetscCheck(coo_elem_point_offsets[glb_elem_idx][Nb]==fullNb,PETSC_COMM_SELF, PETSC_ERR_PLIB, "coo_elem_point_offsets size %d != fullNb=%d",coo_elem_point_offsets[glb_elem_idx][Nb],fullNb);
1754         }
1755       }
1756       // set
1757       for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) {
1758         for (int grid=0,glb_elem_idx=0;grid<ctx->num_grids;grid++) {
1759           const PetscInt moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset);
1760           for (int ej = 0 ; ej < numCells[grid] ; ++ej, glb_elem_idx++) {
1761             const int  fullNb = coo_elem_fullNb[glb_elem_idx],fullNb2=fullNb*fullNb;
1762             // set (i,j)
1763             for (int fieldA=0;fieldA<Nf[grid];fieldA++) {
1764               const LandauIdx *const Idxs = &maps[grid].gIdx[ej][fieldA][0];
1765               int                    rows[LANDAU_MAX_Q_FACE],cols[LANDAU_MAX_Q_FACE];
1766               for (int f = 0; f < Nb; ++f) {
1767                 const int nr =  coo_elem_point_offsets[glb_elem_idx][f+1] - coo_elem_point_offsets[glb_elem_idx][f];
1768                 if (nr==1) rows[0] = Idxs[f];
1769                 else {
1770                   const int idx = -Idxs[f] - 1;
1771                   for (int q = 0; q < nr; q++) {
1772                     rows[q] = maps[grid].c_maps[idx][q].gid;
1773                   }
1774                 }
1775                 for (int g = 0; g < Nb; ++g) {
1776                   const int nc =  coo_elem_point_offsets[glb_elem_idx][g+1] - coo_elem_point_offsets[glb_elem_idx][g];
1777                   if (nc==1) cols[0] = Idxs[g];
1778                   else {
1779                     const int idx = -Idxs[g] - 1;
1780                     for (int q = 0; q < nc; q++) {
1781                       cols[q] = maps[grid].c_maps[idx][q].gid;
1782                     }
1783                   }
1784                   const int idx0 = b_id*coo_elem_offsets[ncellsTot] + coo_elem_offsets[glb_elem_idx] + fieldA*fullNb2 + fullNb * coo_elem_point_offsets[glb_elem_idx][f] + nr * coo_elem_point_offsets[glb_elem_idx][g];
1785                   for (int q = 0, idx = idx0; q < nr; q++) {
1786                     for (int d = 0; d < nc; d++, idx++) {
1787                       oor[idx] = rows[q] + moffset;
1788                       ooc[idx] = cols[d] + moffset;
1789                     }
1790                   }
1791                 }
1792               }
1793             }
1794           } // cell
1795         } // grid
1796       } // batch
1797       PetscCall(MatSetPreallocationCOO(ctx->J,ctx->SData_d.coo_size,oor,ooc));
1798       PetscCall(PetscFree2(oor,ooc));
1799     }
1800     PetscCall(PetscFree(pointMaps));
1801     PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container));
1802     PetscCall(PetscContainerSetPointer(container, (void *)maps));
1803     PetscCall(PetscContainerSetUserDestroy(container, LandauGPUMapsDestroy));
1804     PetscCall(PetscObjectCompose((PetscObject) ctx->J, "assembly_maps", (PetscObject) container));
1805     PetscCall(PetscContainerDestroy(&container));
1806     PetscCall(PetscLogEventEnd(ctx->events[2],0,0,0,0));
1807   } // end GPU assembly
1808   { /* create static point data, Jacobian called first, only one vertex copy */
1809     PetscReal      *invJe,*ww,*xx,*yy,*zz=NULL,*invJ_a;
1810     PetscInt       outer_ipidx, outer_ej,grid, nip_glb = 0;
1811     PetscFE        fe;
1812     const PetscInt Nb = Nq;
1813     PetscCall(PetscLogEventBegin(ctx->events[7],0,0,0,0));
1814     PetscCall(PetscInfo(ctx->plex[0], "Initialize static data\n"));
1815     for (PetscInt grid=0;grid<ctx->num_grids;grid++) nip_glb += Nq*numCells[grid];
1816     /* collect f data, first time is for Jacobian, but make mass now */
1817     if (ctx->verbose > 0) {
1818       PetscInt ncells = 0, N;
1819       PetscCall(MatGetSize(ctx->J,&N,NULL));
1820       for (PetscInt grid=0;grid<ctx->num_grids;grid++) ncells += numCells[grid];
1821       PetscCall(PetscPrintf(ctx->comm,"%d) %s %" PetscInt_FMT " IPs, %" PetscInt_FMT " cells total, Nb=%" PetscInt_FMT ", Nq=%" PetscInt_FMT ", dim=%" PetscInt_FMT ", Tab: Nb=%" PetscInt_FMT " Nf=%" PetscInt_FMT " Np=%" PetscInt_FMT " cdim=%" PetscInt_FMT " N=%" PetscInt_FMT "\n",0,"FormLandau",nip_glb,ncells, Nb, Nq, dim, Nb, ctx->num_species, Nb, dim, N));
1822     }
1823     PetscCall(PetscMalloc4(nip_glb,&ww,nip_glb,&xx,nip_glb,&yy,nip_glb*dim*dim,&invJ_a));
1824     if (dim==3) {
1825       PetscCall(PetscMalloc1(nip_glb,&zz));
1826     }
1827     if (ctx->use_energy_tensor_trick) {
1828       PetscCall(PetscFECreateDefault(PETSC_COMM_SELF, dim, 1, PETSC_FALSE, NULL, PETSC_DECIDE, &fe));
1829       PetscCall(PetscObjectSetName((PetscObject) fe, "energy"));
1830     }
1831     /* init each grids static data - no batch */
1832     for (grid=0, outer_ipidx=0, outer_ej=0 ; grid < ctx->num_grids ; grid++) { // OpenMP (once)
1833       Vec             v2_2 = NULL; // projected function: v^2/2 for non-relativistic, gamma... for relativistic
1834       PetscSection    e_section;
1835       DM              dmEnergy;
1836       PetscInt        cStart, cEnd, ej;
1837 
1838       PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd));
1839       // prep energy trick, get v^2 / 2 vector
1840       if (ctx->use_energy_tensor_trick) {
1841         PetscErrorCode (*energyf[1])(PetscInt, PetscReal, const PetscReal [], PetscInt, PetscScalar [], void *) = {ctx->use_relativistic_corrections ? gamma_m1_f : energy_f};
1842         Vec            glob_v2;
1843         PetscReal      *c2_0[1], data[1] = {PetscSqr(C_0(ctx->v_0))};
1844 
1845         PetscCall(DMClone(ctx->plex[grid], &dmEnergy));
1846         PetscCall(PetscObjectSetName((PetscObject) dmEnergy, "energy"));
1847         PetscCall(DMSetField(dmEnergy, 0, NULL, (PetscObject)fe));
1848         PetscCall(DMCreateDS(dmEnergy));
1849         PetscCall(DMGetSection(dmEnergy, &e_section));
1850         PetscCall(DMGetGlobalVector(dmEnergy,&glob_v2));
1851         PetscCall(PetscObjectSetName((PetscObject) glob_v2, "trick"));
1852         c2_0[0] = &data[0];
1853         PetscCall(DMProjectFunction(dmEnergy, 0., energyf, (void**)c2_0, INSERT_ALL_VALUES, glob_v2));
1854         PetscCall(DMGetLocalVector(dmEnergy, &v2_2));
1855         PetscCall(VecZeroEntries(v2_2)); /* zero BCs so don't set */
1856         PetscCall(DMGlobalToLocalBegin(dmEnergy, glob_v2, INSERT_VALUES, v2_2));
1857         PetscCall(DMGlobalToLocalEnd  (dmEnergy, glob_v2, INSERT_VALUES, v2_2));
1858         PetscCall(DMViewFromOptions(dmEnergy,NULL, "-energy_dm_view"));
1859         PetscCall(VecViewFromOptions(glob_v2,NULL, "-energy_vec_view"));
1860         PetscCall(DMRestoreGlobalVector(dmEnergy, &glob_v2));
1861       }
1862       /* append part of the IP data for each grid */
1863       for (ej = 0 ; ej < numCells[grid]; ++ej, ++outer_ej) {
1864         PetscScalar *coefs = NULL;
1865         PetscReal    vj[LANDAU_MAX_NQ*LANDAU_DIM],detJj[LANDAU_MAX_NQ], Jdummy[LANDAU_MAX_NQ*LANDAU_DIM*LANDAU_DIM], c0 = C_0(ctx->v_0), c02 = PetscSqr(c0);
1866         invJe = invJ_a + outer_ej*Nq*dim*dim;
1867         PetscCall(DMPlexComputeCellGeometryFEM(ctx->plex[grid], ej+cStart, quad, vj, Jdummy, invJe, detJj));
1868         if (ctx->use_energy_tensor_trick) {
1869           PetscCall(DMPlexVecGetClosure(dmEnergy, e_section, v2_2, ej+cStart, NULL, &coefs));
1870         }
1871         /* create static point data */
1872         for (PetscInt qj = 0; qj < Nq; qj++, outer_ipidx++) {
1873           const PetscInt  gidx = outer_ipidx;
1874           const PetscReal *invJ = &invJe[qj*dim*dim];
1875           ww    [gidx] = detJj[qj] * quadWeights[qj];
1876           if (dim==2) ww    [gidx] *=              vj[qj * dim + 0];  /* cylindrical coordinate, w/o 2pi */
1877           // get xx, yy, zz
1878           if (ctx->use_energy_tensor_trick) {
1879             double                  refSpaceDer[3],eGradPhi[3];
1880             const PetscReal * const DD = Tf[0]->T[1];
1881             const PetscReal         *Dq = &DD[qj*Nb*dim];
1882             for (int d = 0; d < 3; ++d) refSpaceDer[d] = eGradPhi[d] = 0.0;
1883             for (int b = 0; b < Nb; ++b) {
1884               for (int d = 0; d < dim; ++d) refSpaceDer[d] += Dq[b*dim+d]*PetscRealPart(coefs[b]);
1885             }
1886             xx[gidx] = 1e10;
1887             if (ctx->use_relativistic_corrections) {
1888               double dg2_c2 = 0;
1889               //for (int d = 0; d < dim; ++d) refSpaceDer[d] *= c02;
1890               for (int d = 0; d < dim; ++d) dg2_c2 += PetscSqr(refSpaceDer[d]);
1891               dg2_c2 *= (double)c02;
1892               if (dg2_c2 >= .999) {
1893                 xx[gidx] = vj[qj * dim + 0]; /* coordinate */
1894                 yy[gidx] = vj[qj * dim + 1];
1895                 if (dim==3) zz[gidx] = vj[qj * dim + 2];
1896                 PetscCall(PetscPrintf(ctx->comm,"Error: %12.5e %" PetscInt_FMT ".%" PetscInt_FMT ") dg2/c02 = %12.5e x= %12.5e %12.5e %12.5e\n",(double)PetscSqrtReal(xx[gidx]*xx[gidx] + yy[gidx]*yy[gidx] + zz[gidx]*zz[gidx]), ej, qj, dg2_c2, (double)xx[gidx], (double)yy[gidx], (double)zz[gidx]));
1897               } else {
1898                 PetscReal fact = c02/PetscSqrtReal(1. - dg2_c2);
1899                 for (int d = 0; d < dim; ++d) refSpaceDer[d] *= fact;
1900                 // could test with other point u' that (grad - grad') * U (refSpaceDer, refSpaceDer') == 0
1901               }
1902             }
1903             if (xx[gidx] == 1e10) {
1904               for (int d = 0; d < dim; ++d) {
1905                 for (int e = 0 ; e < dim; ++e) {
1906                   eGradPhi[d] += invJ[e*dim+d]*refSpaceDer[e];
1907                 }
1908               }
1909               xx[gidx] = eGradPhi[0];
1910               yy[gidx] = eGradPhi[1];
1911               if (dim==3) zz[gidx] = eGradPhi[2];
1912             }
1913           } else {
1914             xx[gidx] = vj[qj * dim + 0]; /* coordinate */
1915             yy[gidx] = vj[qj * dim + 1];
1916             if (dim==3) zz[gidx] = vj[qj * dim + 2];
1917           }
1918         } /* q */
1919         if (ctx->use_energy_tensor_trick) {
1920           PetscCall(DMPlexVecRestoreClosure(dmEnergy, e_section, v2_2, ej+cStart, NULL, &coefs));
1921         }
1922       } /* ej */
1923       if (ctx->use_energy_tensor_trick) {
1924         PetscCall(DMRestoreLocalVector(dmEnergy, &v2_2));
1925         PetscCall(DMDestroy(&dmEnergy));
1926       }
1927     } /* grid */
1928     if (ctx->use_energy_tensor_trick) {
1929       PetscCall(PetscFEDestroy(&fe));
1930     }
1931     /* cache static data */
1932     if (ctx->deviceType == LANDAU_CUDA || ctx->deviceType == LANDAU_KOKKOS) {
1933 #if defined(PETSC_HAVE_CUDA) || defined(PETSC_HAVE_KOKKOS_KERNELS)
1934       PetscReal invMass[LANDAU_MAX_SPECIES],nu_alpha[LANDAU_MAX_SPECIES], nu_beta[LANDAU_MAX_SPECIES];
1935       for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) {
1936         for (PetscInt ii=ctx->species_offset[grid];ii<ctx->species_offset[grid+1];ii++) {
1937           invMass[ii]  = ctx->m_0/ctx->masses[ii];
1938           nu_alpha[ii] = PetscSqr(ctx->charges[ii]/ctx->m_0)*ctx->m_0/ctx->masses[ii];
1939           nu_beta[ii]  = PetscSqr(ctx->charges[ii]/ctx->epsilon0)*ctx->lnLam / (8*PETSC_PI) * ctx->t_0*ctx->n_0/PetscPowReal(ctx->v_0,3);
1940         }
1941       }
1942       if (ctx->deviceType == LANDAU_CUDA) {
1943 #if defined(PETSC_HAVE_CUDA)
1944         PetscCall(LandauCUDAStaticDataSet(ctx->plex[0], Nq, ctx->batch_sz, ctx->num_grids, numCells, ctx->species_offset, ctx->mat_offset,
1945                                         nu_alpha, nu_beta, invMass, invJ_a, xx, yy, zz, ww, &ctx->SData_d));
1946 #else
1947         SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type cuda not built");
1948 #endif
1949       } else if (ctx->deviceType == LANDAU_KOKKOS) {
1950 #if defined(PETSC_HAVE_KOKKOS_KERNELS)
1951         PetscCall(LandauKokkosStaticDataSet(ctx->plex[0], Nq, ctx->batch_sz, ctx->num_grids, numCells, ctx->species_offset, ctx->mat_offset,
1952                                           nu_alpha, nu_beta, invMass,invJ_a,xx,yy,zz,ww,&ctx->SData_d));
1953 #else
1954         SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type kokkos not built");
1955 #endif
1956       }
1957 #endif
1958       /* free */
1959       PetscCall(PetscFree4(ww,xx,yy,invJ_a));
1960       if (dim==3) PetscCall(PetscFree(zz));
1961     } else { /* CPU version, just copy in, only use part */
1962       ctx->SData_d.w = (void*)ww;
1963       ctx->SData_d.x = (void*)xx;
1964       ctx->SData_d.y = (void*)yy;
1965       ctx->SData_d.z = (void*)zz;
1966       ctx->SData_d.invJ = (void*)invJ_a;
1967     }
1968     PetscCall(PetscLogEventEnd(ctx->events[7],0,0,0,0));
1969   } // initialize
1970   PetscFunctionReturn(0);
1971 }
1972 
1973 /* < v, u > */
1974 static void g0_1(PetscInt dim, PetscInt Nf, PetscInt NfAux,
1975                  const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
1976                  const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
1977                  PetscReal t, PetscReal u_tShift, const PetscReal x[],  PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
1978 {
1979   g0[0] = 1.;
1980 }
1981 
1982 /* < v, u > */
1983 static void g0_fake(PetscInt dim, PetscInt Nf, PetscInt NfAux,
1984                  const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
1985                  const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
1986                  PetscReal t, PetscReal u_tShift, const PetscReal x[],  PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
1987 {
1988   static double ttt = 1;
1989   g0[0] = ttt++;
1990 }
1991 
1992 /* < v, u > */
1993 static void g0_r(PetscInt dim, PetscInt Nf, PetscInt NfAux,
1994                  const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
1995                  const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
1996                  PetscReal t, PetscReal u_tShift, const PetscReal x[],  PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
1997 {
1998   g0[0] = 2.*PETSC_PI*x[0];
1999 }
2000 
2001 static PetscErrorCode MatrixNfDestroy(void *ptr)
2002 {
2003   PetscInt *nf = (PetscInt *)ptr;
2004   PetscFunctionBegin;
2005   PetscCall(PetscFree(nf));
2006   PetscFunctionReturn(0);
2007 }
2008 
2009 static PetscErrorCode LandauCreateMatrix(MPI_Comm comm, Vec X, IS grid_batch_is_inv[LANDAU_MAX_GRIDS], LandauCtx *ctx)
2010 {
2011   PetscInt       *idxs=NULL;
2012   Mat            subM[LANDAU_MAX_GRIDS];
2013 
2014   PetscFunctionBegin;
2015   if (!ctx->gpu_assembly) { /* we need GPU object with GPU assembly */
2016     PetscFunctionReturn(0);
2017   }
2018   // get the RCM for this grid to separate out species into blocks -- create 'idxs' & 'ctx->batch_is'
2019   if (ctx->gpu_assembly && ctx->jacobian_field_major_order) {
2020     PetscCall(PetscMalloc1(ctx->mat_offset[ctx->num_grids]*ctx->batch_sz, &idxs));
2021   }
2022   for (PetscInt grid=0 ; grid < ctx->num_grids ; grid++) {
2023     const PetscInt *values, n = ctx->mat_offset[grid+1] - ctx->mat_offset[grid];
2024     Mat             gMat;
2025     DM              massDM;
2026     PetscDS         prob;
2027     Vec             tvec;
2028     // get "mass" matrix for reordering
2029     PetscCall(DMClone(ctx->plex[grid], &massDM));
2030     PetscCall(DMCopyFields(ctx->plex[grid], massDM));
2031     PetscCall(DMCreateDS(massDM));
2032     PetscCall(DMGetDS(massDM, &prob));
2033     for (int ix=0, ii=ctx->species_offset[grid];ii<ctx->species_offset[grid+1];ii++,ix++) {
2034       PetscCall(PetscDSSetJacobian(prob, ix, ix, g0_fake, NULL, NULL, NULL));
2035     }
2036     PetscCall(PetscOptionsInsertString(NULL,"-dm_preallocate_only"));
2037     PetscCall(DMSetFromOptions(massDM));
2038     PetscCall(DMCreateMatrix(massDM, &gMat));
2039     PetscCall(PetscOptionsInsertString(NULL,"-dm_preallocate_only false"));
2040     PetscCall(MatSetOption(gMat,MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE));
2041     PetscCall(MatSetOption(gMat,MAT_IGNORE_ZERO_ENTRIES,PETSC_TRUE));
2042     PetscCall(DMCreateLocalVector(ctx->plex[grid],&tvec));
2043     PetscCall(DMPlexSNESComputeJacobianFEM(massDM, tvec, gMat, gMat, ctx));
2044     PetscCall(MatViewFromOptions(gMat, NULL, "-dm_landau_reorder_mat_view"));
2045     PetscCall(DMDestroy(&massDM));
2046     PetscCall(VecDestroy(&tvec));
2047     subM[grid] = gMat;
2048     if (ctx->gpu_assembly && ctx->jacobian_field_major_order) {
2049       MatOrderingType rtype = MATORDERINGRCM;
2050       IS              isrow,isicol;
2051       PetscCall(MatGetOrdering(gMat,rtype,&isrow,&isicol));
2052       PetscCall(ISInvertPermutation(isrow,PETSC_DECIDE,&grid_batch_is_inv[grid]));
2053       PetscCall(ISGetIndices(isrow, &values));
2054       for (PetscInt b_id=0 ; b_id < ctx->batch_sz ; b_id++) { // add batch size DMs for this species grid
2055 #if !defined(LANDAU_SPECIES_MAJOR)
2056         PetscInt N = ctx->mat_offset[ctx->num_grids], n0 = ctx->mat_offset[grid] + b_id*N;
2057         for (int ii = 0; ii < n; ++ii) idxs[n0+ii] = values[ii] + n0;
2058 #else
2059         PetscInt n0 = ctx->mat_offset[grid]*ctx->batch_sz + b_id*n;
2060         for (int ii = 0; ii < n; ++ii) idxs[n0+ii] = values[ii] + n0;
2061 #endif
2062       }
2063       PetscCall(ISRestoreIndices(isrow, &values));
2064       PetscCall(ISDestroy(&isrow));
2065       PetscCall(ISDestroy(&isicol));
2066     }
2067   }
2068   if (ctx->gpu_assembly && ctx->jacobian_field_major_order) {
2069     PetscCall(ISCreateGeneral(comm,ctx->mat_offset[ctx->num_grids]*ctx->batch_sz,idxs,PETSC_OWN_POINTER,&ctx->batch_is));
2070   }
2071   // get a block matrix
2072   for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) {
2073     Mat               B = subM[grid];
2074     PetscInt          nloc, nzl, colbuf[1024], row;
2075     PetscCall(MatGetSize(B, &nloc, NULL));
2076     for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) {
2077       const PetscInt    moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset);
2078       const PetscInt    *cols;
2079       const PetscScalar *vals;
2080       for (int i=0 ; i<nloc ; i++) {
2081         PetscCall(MatGetRow(B,i,&nzl,&cols,&vals));
2082         PetscCheck(nzl<=1024,comm, PETSC_ERR_PLIB, "Row too big: %" PetscInt_FMT,nzl);
2083         for (int j=0; j<nzl; j++) colbuf[j] = cols[j] + moffset;
2084         row = i + moffset;
2085         PetscCall(MatSetValues(ctx->J,1,&row,nzl,colbuf,vals,INSERT_VALUES));
2086         PetscCall(MatRestoreRow(B,i,&nzl,&cols,&vals));
2087       }
2088     }
2089   }
2090   for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) {
2091     PetscCall(MatDestroy(&subM[grid]));
2092   }
2093   PetscCall(MatAssemblyBegin(ctx->J,MAT_FINAL_ASSEMBLY));
2094   PetscCall(MatAssemblyEnd(ctx->J,MAT_FINAL_ASSEMBLY));
2095 
2096   if (ctx->gpu_assembly && ctx->jacobian_field_major_order) {
2097     Mat            mat_block_order;
2098     PetscCall(MatCreateSubMatrix(ctx->J,ctx->batch_is,ctx->batch_is,MAT_INITIAL_MATRIX,&mat_block_order)); // use MatPermute
2099     PetscCall(MatViewFromOptions(mat_block_order, NULL, "-dm_landau_field_major_mat_view"));
2100     PetscCall(MatDestroy(&ctx->J));
2101     ctx->J = mat_block_order;
2102     // override ops to make KSP work in field major space
2103     ctx->seqaij_mult                  = mat_block_order->ops->mult;
2104     mat_block_order->ops->mult        = LandauMatMult;
2105     mat_block_order->ops->multadd     = LandauMatMultAdd;
2106     ctx->seqaij_solve                 = NULL;
2107     ctx->seqaij_getdiagonal           = mat_block_order->ops->getdiagonal;
2108     mat_block_order->ops->getdiagonal = LandauMatGetDiagonal;
2109     ctx->seqaij_multtranspose         = mat_block_order->ops->multtranspose;
2110     mat_block_order->ops->multtranspose = LandauMatMultTranspose;
2111     PetscCall(VecDuplicate(X,&ctx->work_vec));
2112     PetscCall(VecScatterCreate(X, ctx->batch_is, ctx->work_vec, NULL, &ctx->plex_batch));
2113   }
2114 
2115   PetscFunctionReturn(0);
2116 }
2117 
2118 PetscErrorCode DMPlexLandauCreateMassMatrix(DM pack, Mat *Amat);
2119 /*@C
2120  DMPlexLandauCreateVelocitySpace - Create a DMPlex velocity space mesh
2121 
2122  Collective on comm
2123 
2124  Input Parameters:
2125  +   comm  - The MPI communicator
2126  .   dim - velocity space dimension (2 for axisymmetric, 3 for full 3X + 3V solver)
2127  -   prefix - prefix for options (not tested)
2128 
2129  Output Parameter:
2130  .   pack  - The DM object representing the mesh
2131  +   X - A vector (user destroys)
2132  -   J - Optional matrix (object destroys)
2133 
2134  Level: beginner
2135 
2136  .keywords: mesh
2137  .seealso: `DMPlexCreate()`, `DMPlexLandauDestroyVelocitySpace()`
2138  @*/
2139 PetscErrorCode DMPlexLandauCreateVelocitySpace(MPI_Comm comm, PetscInt dim, const char prefix[], Vec *X, Mat *J, DM *pack)
2140 {
2141   LandauCtx      *ctx;
2142   Vec            Xsub[LANDAU_MAX_GRIDS];
2143   IS             grid_batch_is_inv[LANDAU_MAX_GRIDS];
2144 
2145   PetscFunctionBegin;
2146   PetscCheck(dim == 2 || dim == 3,PETSC_COMM_SELF, PETSC_ERR_PLIB, "Only 2D and 3D supported");
2147   PetscCheck(LANDAU_DIM == dim,PETSC_COMM_SELF, PETSC_ERR_PLIB, "dim %" PetscInt_FMT " != LANDAU_DIM %d",dim,LANDAU_DIM);
2148   PetscCall(PetscNew(&ctx));
2149   ctx->comm = comm; /* used for diagnostics and global errors */
2150   /* process options */
2151   PetscCall(ProcessOptions(ctx,prefix));
2152   if (dim==2) ctx->use_relativistic_corrections = PETSC_FALSE;
2153   /* Create Mesh */
2154   PetscCall(DMCompositeCreate(PETSC_COMM_SELF,pack));
2155   PetscCall(PetscLogEventBegin(ctx->events[13],0,0,0,0));
2156   PetscCall(PetscLogEventBegin(ctx->events[15],0,0,0,0));
2157   PetscCall(LandauDMCreateVMeshes(PETSC_COMM_SELF, dim, prefix, ctx, *pack)); // creates grids (Forest of AMR)
2158   for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
2159     /* create FEM */
2160     PetscCall(SetupDS(ctx->plex[grid],dim,grid,ctx));
2161     /* set initial state */
2162     PetscCall(DMCreateGlobalVector(ctx->plex[grid],&Xsub[grid]));
2163     PetscCall(PetscObjectSetName((PetscObject) Xsub[grid], "u_orig"));
2164     /* initial static refinement, no solve */
2165     PetscCall(LandauSetInitialCondition(ctx->plex[grid], Xsub[grid], grid, 0, ctx));
2166     /* forest refinement - forest goes in (if forest), plex comes out */
2167     if (ctx->use_p4est) {
2168       DM plex;
2169       PetscCall(adapt(grid,ctx,&Xsub[grid])); // forest goes in, plex comes out
2170       PetscCall(DMViewFromOptions(ctx->plex[grid],NULL,"-dm_landau_amr_dm_view")); // need to differentiate - todo
2171       PetscCall(VecViewFromOptions(Xsub[grid], NULL, "-dm_landau_amr_vec_view"));
2172       // convert to plex, all done with this level
2173       PetscCall(DMConvert(ctx->plex[grid], DMPLEX, &plex));
2174       PetscCall(DMDestroy(&ctx->plex[grid]));
2175       ctx->plex[grid] = plex;
2176     }
2177 #if !defined(LANDAU_SPECIES_MAJOR)
2178     PetscCall(DMCompositeAddDM(*pack,ctx->plex[grid]));
2179 #else
2180     for (PetscInt b_id=0;b_id<ctx->batch_sz;b_id++) { // add batch size DMs for this species grid
2181       PetscCall(DMCompositeAddDM(*pack,ctx->plex[grid]));
2182     }
2183 #endif
2184     PetscCall(DMSetApplicationContext(ctx->plex[grid], ctx));
2185   }
2186 #if !defined(LANDAU_SPECIES_MAJOR)
2187   // stack the batched DMs, could do it all here!!! b_id=0
2188   for (PetscInt b_id=1;b_id<ctx->batch_sz;b_id++) {
2189     for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
2190       PetscCall(DMCompositeAddDM(*pack,ctx->plex[grid]));
2191     }
2192   }
2193 #endif
2194   // create ctx->mat_offset
2195   ctx->mat_offset[0] = 0;
2196   for (PetscInt grid=0 ; grid < ctx->num_grids ; grid++) {
2197     PetscInt    n;
2198     PetscCall(VecGetLocalSize(Xsub[grid],&n));
2199     ctx->mat_offset[grid+1] = ctx->mat_offset[grid] + n;
2200   }
2201   // creat DM & Jac
2202   PetscCall(DMSetApplicationContext(*pack, ctx));
2203   PetscCall(PetscOptionsInsertString(NULL,"-dm_preallocate_only"));
2204   PetscCall(DMSetFromOptions(*pack));
2205   PetscCall(DMCreateMatrix(*pack, &ctx->J));
2206   PetscCall(PetscOptionsInsertString(NULL,"-dm_preallocate_only false"));
2207   PetscCall(MatSetOption(ctx->J,MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE));
2208   PetscCall(MatSetOption(ctx->J,MAT_IGNORE_ZERO_ENTRIES,PETSC_TRUE));
2209   PetscCall(PetscObjectSetName((PetscObject)ctx->J, "Jac"));
2210   // construct initial conditions in X
2211   PetscCall(DMCreateGlobalVector(*pack,X));
2212   for (PetscInt grid=0 ; grid < ctx->num_grids ; grid++) {
2213     PetscInt n;
2214     PetscCall(VecGetLocalSize(Xsub[grid],&n));
2215     for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) {
2216       PetscScalar const *values;
2217       const PetscInt    moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset);
2218       PetscCall(LandauSetInitialCondition(ctx->plex[grid], Xsub[grid], grid, b_id, ctx));
2219       PetscCall(VecGetArrayRead(Xsub[grid],&values));
2220       for (int i=0, idx = moffset; i<n; i++, idx++) {
2221         PetscCall(VecSetValue(*X,idx,values[i],INSERT_VALUES));
2222       }
2223       PetscCall(VecRestoreArrayRead(Xsub[grid],&values));
2224     }
2225   }
2226   // cleanup
2227   for (PetscInt grid=0 ; grid < ctx->num_grids ; grid++) {
2228     PetscCall(VecDestroy(&Xsub[grid]));
2229   }
2230   /* check for correct matrix type */
2231   if (ctx->gpu_assembly) { /* we need GPU object with GPU assembly */
2232     PetscBool flg;
2233     if (ctx->deviceType == LANDAU_CUDA) {
2234       PetscCall(PetscObjectTypeCompareAny((PetscObject)ctx->J,&flg,MATSEQAIJCUSPARSE,MATMPIAIJCUSPARSE,MATAIJCUSPARSE,""));
2235       PetscCheck(flg,ctx->comm,PETSC_ERR_ARG_WRONG,"must use '-dm_mat_type aijcusparse -dm_vec_type cuda' for GPU assembly and Cuda or use '-dm_landau_device_type cpu'");
2236     } else if (ctx->deviceType == LANDAU_KOKKOS) {
2237       PetscCall(PetscObjectTypeCompareAny((PetscObject)ctx->J,&flg,MATSEQAIJKOKKOS,MATMPIAIJKOKKOS,MATAIJKOKKOS,""));
2238 #if defined(PETSC_HAVE_KOKKOS_KERNELS)
2239       PetscCheck(flg,ctx->comm,PETSC_ERR_ARG_WRONG,"must use '-dm_mat_type aijkokkos -dm_vec_type kokkos' for GPU assembly and Kokkos or use '-dm_landau_device_type cpu'");
2240 #else
2241       PetscCheck(flg,ctx->comm,PETSC_ERR_ARG_WRONG,"must configure with '--download-kokkos-kernels' for GPU assembly and Kokkos or use '-dm_landau_device_type cpu'");
2242 #endif
2243     }
2244   }
2245   PetscCall(PetscLogEventEnd(ctx->events[15],0,0,0,0));
2246   // create field major ordering
2247 
2248   ctx->work_vec   = NULL;
2249   ctx->plex_batch = NULL;
2250   ctx->batch_is   = NULL;
2251   for (int i=0;i<LANDAU_MAX_GRIDS;i++) grid_batch_is_inv[i] = NULL;
2252   PetscCall(PetscLogEventBegin(ctx->events[12],0,0,0,0));
2253   PetscCall(LandauCreateMatrix(comm, *X, grid_batch_is_inv, ctx));
2254   PetscCall(PetscLogEventEnd(ctx->events[12],0,0,0,0));
2255 
2256   // create AMR GPU assembly maps and static GPU data
2257   PetscCall(CreateStaticGPUData(dim,grid_batch_is_inv,ctx));
2258 
2259   PetscCall(PetscLogEventEnd(ctx->events[13],0,0,0,0));
2260 
2261   // create mass matrix
2262   PetscCall(DMPlexLandauCreateMassMatrix(*pack, NULL));
2263 
2264   if (J) *J = ctx->J;
2265 
2266   if (ctx->gpu_assembly && ctx->jacobian_field_major_order) {
2267     PetscContainer container;
2268     // cache ctx for KSP with batch/field major Jacobian ordering -ksp_type gmres/etc -dm_landau_jacobian_field_major_order
2269     PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container));
2270     PetscCall(PetscContainerSetPointer(container, (void *)ctx));
2271     PetscCall(PetscObjectCompose((PetscObject) ctx->J, "LandauCtx", (PetscObject) container));
2272     PetscCall(PetscContainerDestroy(&container));
2273     // batch solvers need to map -- can batch solvers work
2274     PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container));
2275     PetscCall(PetscContainerSetPointer(container, (void *)ctx->plex_batch));
2276     PetscCall(PetscObjectCompose((PetscObject) ctx->J, "plex_batch_is", (PetscObject) container));
2277     PetscCall(PetscContainerDestroy(&container));
2278   }
2279   // for batch solvers
2280   {
2281     PetscContainer  container;
2282     PetscInt        *pNf;
2283     PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container));
2284     PetscCall(PetscMalloc1(sizeof(*pNf), &pNf));
2285     *pNf = ctx->batch_sz;
2286     PetscCall(PetscContainerSetPointer(container, (void *)pNf));
2287     PetscCall(PetscContainerSetUserDestroy(container, MatrixNfDestroy));
2288     PetscCall(PetscObjectCompose((PetscObject)ctx->J, "batch size", (PetscObject) container));
2289     PetscCall(PetscContainerDestroy(&container));
2290   }
2291 
2292   PetscFunctionReturn(0);
2293 }
2294 
2295 /*@
2296  DMPlexLandauDestroyVelocitySpace - Destroy a DMPlex velocity space mesh
2297 
2298  Collective on dm
2299 
2300  Input/Output Parameters:
2301  .   dm - the dm to destroy
2302 
2303  Level: beginner
2304 
2305  .keywords: mesh
2306  .seealso: `DMPlexLandauCreateVelocitySpace()`
2307  @*/
2308 PetscErrorCode DMPlexLandauDestroyVelocitySpace(DM *dm)
2309 {
2310   LandauCtx      *ctx;
2311   PetscFunctionBegin;
2312   PetscCall(DMGetApplicationContext(*dm, &ctx));
2313   PetscCall(MatDestroy(&ctx->M));
2314   PetscCall(MatDestroy(&ctx->J));
2315   for (PetscInt ii=0;ii<ctx->num_species;ii++) PetscCall(PetscFEDestroy(&ctx->fe[ii]));
2316   PetscCall(ISDestroy(&ctx->batch_is));
2317   PetscCall(VecDestroy(&ctx->work_vec));
2318   PetscCall(VecScatterDestroy(&ctx->plex_batch));
2319   if (ctx->deviceType == LANDAU_CUDA) {
2320 #if defined(PETSC_HAVE_CUDA)
2321     PetscCall(LandauCUDAStaticDataClear(&ctx->SData_d));
2322 #else
2323     SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","cuda");
2324 #endif
2325   } else if (ctx->deviceType == LANDAU_KOKKOS) {
2326 #if defined(PETSC_HAVE_KOKKOS_KERNELS)
2327     PetscCall(LandauKokkosStaticDataClear(&ctx->SData_d));
2328 #else
2329     SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","kokkos");
2330 #endif
2331   } else {
2332     if (ctx->SData_d.x) { /* in a CPU run */
2333       PetscReal *invJ = (PetscReal*)ctx->SData_d.invJ, *xx = (PetscReal*)ctx->SData_d.x, *yy = (PetscReal*)ctx->SData_d.y, *zz = (PetscReal*)ctx->SData_d.z, *ww = (PetscReal*)ctx->SData_d.w;
2334       LandauIdx *coo_elem_offsets = (LandauIdx*)ctx->SData_d.coo_elem_offsets, *coo_elem_fullNb = (LandauIdx*)ctx->SData_d.coo_elem_fullNb, (*coo_elem_point_offsets)[LANDAU_MAX_NQ+1] = (LandauIdx (*)[LANDAU_MAX_NQ+1])ctx->SData_d.coo_elem_point_offsets;
2335       PetscCall(PetscFree4(ww,xx,yy,invJ));
2336       if (zz) PetscCall(PetscFree(zz));
2337       if (coo_elem_offsets) {
2338         PetscCall(PetscFree3(coo_elem_offsets,coo_elem_fullNb,coo_elem_point_offsets)); // could be NULL
2339       }
2340     }
2341   }
2342 
2343   if (ctx->times[LANDAU_MATRIX_TOTAL] > 0) { // OMP timings
2344     PetscCall(PetscPrintf(ctx->comm, "TSStep               N  1.0 %10.3e\n",ctx->times[LANDAU_EX2_TSSOLVE]));
2345     PetscCall(PetscPrintf(ctx->comm, "2:           Solve:  %10.3e with %" PetscInt_FMT " threads\n",ctx->times[LANDAU_EX2_TSSOLVE] - ctx->times[LANDAU_MATRIX_TOTAL],ctx->batch_sz));
2346     PetscCall(PetscPrintf(ctx->comm, "3:          Landau:  %10.3e\n",ctx->times[LANDAU_MATRIX_TOTAL]));
2347     PetscCall(PetscPrintf(ctx->comm, "Landau Jacobian       %" PetscInt_FMT " 1.0 %10.3e\n",(PetscInt)ctx->times[LANDAU_JACOBIAN_COUNT],ctx->times[LANDAU_JACOBIAN]));
2348     PetscCall(PetscPrintf(ctx->comm, "Landau Operator       N 1.0  %10.3e\n",ctx->times[LANDAU_OPERATOR]));
2349     PetscCall(PetscPrintf(ctx->comm, "Landau Mass           N 1.0  %10.3e\n",ctx->times[LANDAU_MASS]));
2350     PetscCall(PetscPrintf(ctx->comm, " Jac-f-df (GPU)       N 1.0  %10.3e\n",ctx->times[LANDAU_F_DF]));
2351     PetscCall(PetscPrintf(ctx->comm, " Kernel (GPU)         N 1.0  %10.3e\n",ctx->times[LANDAU_KERNEL]));
2352     PetscCall(PetscPrintf(ctx->comm, "MatLUFactorNum        X 1.0 %10.3e\n",ctx->times[KSP_FACTOR]));
2353     PetscCall(PetscPrintf(ctx->comm, "MatSolve              X 1.0 %10.3e\n",ctx->times[KSP_SOLVE]));
2354   }
2355   for (PetscInt grid=0 ; grid < ctx->num_grids ; grid++) {
2356     PetscCall(DMDestroy(&ctx->plex[grid]));
2357   }
2358   PetscFree(ctx);
2359   PetscCall(DMDestroy(dm));
2360   PetscFunctionReturn(0);
2361 }
2362 
2363 /* < v, ru > */
2364 static void f0_s_den(PetscInt dim, PetscInt Nf, PetscInt NfAux,
2365                      const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
2366                      const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
2367                      PetscReal t, const PetscReal x[],  PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2368 {
2369   PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
2370   f0[0] = u[ii];
2371 }
2372 
2373 /* < v, ru > */
2374 static void f0_s_mom(PetscInt dim, PetscInt Nf, PetscInt NfAux,
2375                      const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
2376                      const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
2377                      PetscReal t, const PetscReal x[],  PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2378 {
2379   PetscInt ii = (PetscInt)PetscRealPart(constants[0]), jj = (PetscInt)PetscRealPart(constants[1]);
2380   f0[0] = x[jj]*u[ii]; /* x momentum */
2381 }
2382 
2383 static void f0_s_v2(PetscInt dim, PetscInt Nf, PetscInt NfAux,
2384                     const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
2385                     const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
2386                     PetscReal t, const PetscReal x[],  PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2387 {
2388   PetscInt i, ii = (PetscInt)PetscRealPart(constants[0]);
2389   double tmp1 = 0.;
2390   for (i = 0; i < dim; ++i) tmp1 += x[i]*x[i];
2391   f0[0] = tmp1*u[ii];
2392 }
2393 
2394 static PetscErrorCode gamma_n_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void *actx)
2395 {
2396   const PetscReal *c2_0_arr = ((PetscReal*)actx);
2397   const PetscReal c02 = c2_0_arr[0];
2398 
2399   PetscFunctionBegin;
2400   for (int s = 0 ; s < Nf ; s++) {
2401     PetscReal tmp1 = 0.;
2402     for (int i = 0; i < dim; ++i) tmp1 += x[i]*x[i];
2403 #if defined(PETSC_USE_DEBUG)
2404     u[s] = PetscSqrtReal(1. + tmp1/c02);//  u[0] = PetscSqrtReal(1. + xx);
2405 #else
2406     {
2407       PetscReal xx = tmp1/c02;
2408       u[s] = xx/(PetscSqrtReal(1. + xx) + 1.); // better conditioned = xx/(PetscSqrtReal(1. + xx) + 1.)
2409     }
2410 #endif
2411   }
2412   PetscFunctionReturn(0);
2413 }
2414 
2415 /* < v, ru > */
2416 static void f0_s_rden(PetscInt dim, PetscInt Nf, PetscInt NfAux,
2417                       const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
2418                       const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
2419                       PetscReal t, const PetscReal x[],  PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2420 {
2421   PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
2422   f0[0] = 2.*PETSC_PI*x[0]*u[ii];
2423 }
2424 
2425 /* < v, ru > */
2426 static void f0_s_rmom(PetscInt dim, PetscInt Nf, PetscInt NfAux,
2427                       const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
2428                       const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
2429                       PetscReal t, const PetscReal x[],  PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2430 {
2431   PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
2432   f0[0] = 2.*PETSC_PI*x[0]*x[1]*u[ii];
2433 }
2434 
2435 static void f0_s_rv2(PetscInt dim, PetscInt Nf, PetscInt NfAux,
2436                      const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
2437                      const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
2438                      PetscReal t, const PetscReal x[],  PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2439 {
2440   PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
2441   f0[0] =  2.*PETSC_PI*x[0]*(x[0]*x[0] + x[1]*x[1])*u[ii];
2442 }
2443 
2444 /*@
2445  DMPlexLandauPrintNorms - collects moments and prints them
2446 
2447  Collective on dm
2448 
2449  Input Parameters:
2450  +   X  - the state
2451  -   stepi - current step to print
2452 
2453  Level: beginner
2454 
2455  .keywords: mesh
2456  .seealso: `DMPlexLandauCreateVelocitySpace()`
2457  @*/
2458 PetscErrorCode DMPlexLandauPrintNorms(Vec X, PetscInt stepi)
2459 {
2460   LandauCtx      *ctx;
2461   PetscDS        prob;
2462   DM             pack;
2463   PetscInt       cStart, cEnd, dim, ii, i0, nDMs;
2464   PetscScalar    xmomentumtot=0, ymomentumtot=0, zmomentumtot=0, energytot=0, densitytot=0, tt[LANDAU_MAX_SPECIES];
2465   PetscScalar    xmomentum[LANDAU_MAX_SPECIES],  ymomentum[LANDAU_MAX_SPECIES],  zmomentum[LANDAU_MAX_SPECIES], energy[LANDAU_MAX_SPECIES], density[LANDAU_MAX_SPECIES];
2466   Vec            *globXArray;
2467 
2468   PetscFunctionBegin;
2469   PetscCall(VecGetDM(X, &pack));
2470   PetscCheck(pack,PETSC_COMM_SELF, PETSC_ERR_PLIB, "Vector has no DM");
2471   PetscCall(DMGetDimension(pack, &dim));
2472   PetscCheck(dim == 2 || dim == 3,PETSC_COMM_SELF, PETSC_ERR_PLIB, "dim %" PetscInt_FMT " not in [2,3]",dim);
2473   PetscCall(DMGetApplicationContext(pack, &ctx));
2474   PetscCheck(ctx,PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
2475   /* print momentum and energy */
2476   PetscCall(DMCompositeGetNumberDM(pack,&nDMs));
2477   PetscCheck(nDMs == ctx->num_grids*ctx->batch_sz,PETSC_COMM_WORLD, PETSC_ERR_PLIB, "#DM wrong %" PetscInt_FMT " %" PetscInt_FMT,nDMs,ctx->num_grids*ctx->batch_sz);
2478   PetscCall(PetscMalloc(sizeof(*globXArray)*nDMs, &globXArray));
2479   PetscCall(DMCompositeGetAccessArray(pack, X, nDMs, NULL, globXArray));
2480   for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) {
2481     Vec Xloc = globXArray[ LAND_PACK_IDX(ctx->batch_view_idx,grid) ];
2482     PetscCall(DMGetDS(ctx->plex[grid], &prob));
2483     for (ii=ctx->species_offset[grid],i0=0;ii<ctx->species_offset[grid+1];ii++,i0++) {
2484       PetscScalar user[2] = { (PetscScalar)i0, (PetscScalar)ctx->charges[ii]};
2485       PetscCall(PetscDSSetConstants(prob, 2, user));
2486       if (dim==2) { /* 2/3X + 3V (cylindrical coordinates) */
2487         PetscCall(PetscDSSetObjective(prob, 0, &f0_s_rden));
2488         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx));
2489         density[ii] = tt[0]*ctx->n_0*ctx->charges[ii];
2490         PetscCall(PetscDSSetObjective(prob, 0, &f0_s_rmom));
2491         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx));
2492         zmomentum[ii] = tt[0]*ctx->n_0*ctx->v_0*ctx->masses[ii];
2493         PetscCall(PetscDSSetObjective(prob, 0, &f0_s_rv2));
2494         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx));
2495         energy[ii] = tt[0]*0.5*ctx->n_0*ctx->v_0*ctx->v_0*ctx->masses[ii];
2496         zmomentumtot += zmomentum[ii];
2497         energytot  += energy[ii];
2498         densitytot += density[ii];
2499         PetscCall(PetscPrintf(ctx->comm, "%3" PetscInt_FMT ") species-%" PetscInt_FMT ": charge density= %20.13e z-momentum= %20.13e energy= %20.13e",stepi,ii,(double)PetscRealPart(density[ii]),(double)PetscRealPart(zmomentum[ii]),(double)PetscRealPart(energy[ii])));
2500       } else { /* 2/3Xloc + 3V */
2501         PetscCall(PetscDSSetObjective(prob, 0, &f0_s_den));
2502         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx));
2503         density[ii] = tt[0]*ctx->n_0*ctx->charges[ii];
2504         PetscCall(PetscDSSetObjective(prob, 0, &f0_s_mom));
2505         user[1] = 0;
2506         PetscCall(PetscDSSetConstants(prob, 2, user));
2507         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx));
2508         xmomentum[ii]  = tt[0]*ctx->n_0*ctx->v_0*ctx->masses[ii];
2509         user[1] = 1;
2510         PetscCall(PetscDSSetConstants(prob, 2, user));
2511         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx));
2512         ymomentum[ii] = tt[0]*ctx->n_0*ctx->v_0*ctx->masses[ii];
2513         user[1] = 2;
2514         PetscCall(PetscDSSetConstants(prob, 2, user));
2515         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx));
2516         zmomentum[ii] = tt[0]*ctx->n_0*ctx->v_0*ctx->masses[ii];
2517         if (ctx->use_relativistic_corrections) {
2518           /* gamma * M * f */
2519           if (ii==0 && grid==0) { // do all at once
2520             Vec            Mf, globGamma, *globMfArray, *globGammaArray;
2521             PetscErrorCode (*gammaf[1])(PetscInt, PetscReal, const PetscReal [], PetscInt, PetscScalar [], void *) = {gamma_n_f};
2522             PetscReal      *c2_0[1], data[1];
2523 
2524             PetscCall(VecDuplicate(X,&globGamma));
2525             PetscCall(VecDuplicate(X,&Mf));
2526             PetscCall(PetscMalloc(sizeof(*globMfArray)*nDMs, &globMfArray));
2527             PetscCall(PetscMalloc(sizeof(*globMfArray)*nDMs, &globGammaArray));
2528             /* M * f */
2529             PetscCall(MatMult(ctx->M,X,Mf));
2530             /* gamma */
2531             PetscCall(DMCompositeGetAccessArray(pack, globGamma, nDMs, NULL, globGammaArray));
2532             for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) { // yes a grid loop in a grid loop to print nice, need to fix for batching
2533               Vec v1 = globGammaArray[ LAND_PACK_IDX(ctx->batch_view_idx,grid) ];
2534               data[0] = PetscSqr(C_0(ctx->v_0));
2535               c2_0[0] = &data[0];
2536               PetscCall(DMProjectFunction(ctx->plex[grid], 0., gammaf, (void**)c2_0, INSERT_ALL_VALUES, v1));
2537             }
2538             PetscCall(DMCompositeRestoreAccessArray(pack, globGamma, nDMs, NULL, globGammaArray));
2539             /* gamma * Mf */
2540             PetscCall(DMCompositeGetAccessArray(pack, globGamma, nDMs, NULL, globGammaArray));
2541             PetscCall(DMCompositeGetAccessArray(pack, Mf, nDMs, NULL, globMfArray));
2542             for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) { // yes a grid loop in a grid loop to print nice
2543               PetscInt Nf = ctx->species_offset[grid+1] - ctx->species_offset[grid], N, bs;
2544               Vec      Mfsub = globMfArray[ LAND_PACK_IDX(ctx->batch_view_idx,grid) ], Gsub = globGammaArray[ LAND_PACK_IDX(ctx->batch_view_idx,grid) ], v1, v2;
2545               // get each component
2546               PetscCall(VecGetSize(Mfsub,&N));
2547               PetscCall(VecCreate(ctx->comm,&v1));
2548               PetscCall(VecSetSizes(v1,PETSC_DECIDE,N/Nf));
2549               PetscCall(VecCreate(ctx->comm,&v2));
2550               PetscCall(VecSetSizes(v2,PETSC_DECIDE,N/Nf));
2551               PetscCall(VecSetFromOptions(v1)); // ???
2552               PetscCall(VecSetFromOptions(v2));
2553               // get each component
2554               PetscCall(VecGetBlockSize(Gsub,&bs));
2555               PetscCheck(bs == Nf,PETSC_COMM_SELF, PETSC_ERR_PLIB, "bs %" PetscInt_FMT " != num_species %" PetscInt_FMT " in Gsub",bs,Nf);
2556               PetscCall(VecGetBlockSize(Mfsub,&bs));
2557               PetscCheck(bs == Nf,PETSC_COMM_SELF, PETSC_ERR_PLIB, "bs %" PetscInt_FMT " != num_species %" PetscInt_FMT,bs,Nf);
2558               for (int i=0, ix=ctx->species_offset[grid] ; i<Nf ; i++, ix++) {
2559                 PetscScalar val;
2560                 PetscCall(VecStrideGather(Gsub,i,v1,INSERT_VALUES));
2561                 PetscCall(VecStrideGather(Mfsub,i,v2,INSERT_VALUES));
2562                 PetscCall(VecDot(v1,v2,&val));
2563                 energy[ix] = PetscRealPart(val)*ctx->n_0*ctx->v_0*ctx->v_0*ctx->masses[ix];
2564               }
2565               PetscCall(VecDestroy(&v1));
2566               PetscCall(VecDestroy(&v2));
2567             } /* grids */
2568             PetscCall(DMCompositeRestoreAccessArray(pack, globGamma, nDMs, NULL, globGammaArray));
2569             PetscCall(DMCompositeRestoreAccessArray(pack, Mf, nDMs, NULL, globMfArray));
2570             PetscCall(PetscFree(globGammaArray));
2571             PetscCall(PetscFree(globMfArray));
2572             PetscCall(VecDestroy(&globGamma));
2573             PetscCall(VecDestroy(&Mf));
2574           }
2575         } else {
2576           PetscCall(PetscDSSetObjective(prob, 0, &f0_s_v2));
2577           PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx));
2578           energy[ii]    = 0.5*tt[0]*ctx->n_0*ctx->v_0*ctx->v_0*ctx->masses[ii];
2579         }
2580         PetscCall(PetscPrintf(ctx->comm, "%3" PetscInt_FMT ") species %" PetscInt_FMT ": density=%20.13e, x-momentum=%20.13e, y-momentum=%20.13e, z-momentum=%20.13e, energy=%21.13e",stepi,ii,(double)PetscRealPart(density[ii]),(double)PetscRealPart(xmomentum[ii]),(double)PetscRealPart(ymomentum[ii]),(double)PetscRealPart(zmomentum[ii]),(double)PetscRealPart(energy[ii])));
2581         xmomentumtot += xmomentum[ii];
2582         ymomentumtot += ymomentum[ii];
2583         zmomentumtot += zmomentum[ii];
2584         energytot    += energy[ii];
2585         densitytot   += density[ii];
2586       }
2587       if (ctx->num_species>1) PetscPrintf(ctx->comm, "\n");
2588     }
2589   }
2590   PetscCall(DMCompositeRestoreAccessArray(pack, X, nDMs, NULL, globXArray));
2591   PetscCall(PetscFree(globXArray));
2592   /* totals */
2593   PetscCall(DMPlexGetHeightStratum(ctx->plex[0],0,&cStart,&cEnd));
2594   if (ctx->num_species>1) {
2595     if (dim==2) {
2596       PetscCall(PetscPrintf(ctx->comm, "\t%3" PetscInt_FMT ") Total: charge density=%21.13e, momentum=%21.13e, energy=%21.13e (m_i[0]/m_e = %g, %" PetscInt_FMT " cells on electron grid)",stepi,(double)PetscRealPart(densitytot),(double)PetscRealPart(zmomentumtot),(double)PetscRealPart(energytot),(double)(ctx->masses[1]/ctx->masses[0]),cEnd-cStart));
2597     } else {
2598       PetscCall(PetscPrintf(ctx->comm, "\t%3" PetscInt_FMT ") Total: charge density=%21.13e, x-momentum=%21.13e, y-momentum=%21.13e, z-momentum=%21.13e, energy=%21.13e (m_i[0]/m_e = %g, %" PetscInt_FMT " cells)",stepi,(double)PetscRealPart(densitytot),(double)PetscRealPart(xmomentumtot),(double)PetscRealPart(ymomentumtot),(double)PetscRealPart(zmomentumtot),(double)PetscRealPart(energytot),(double)(ctx->masses[1]/ctx->masses[0]),cEnd-cStart));
2599     }
2600   } else PetscCall(PetscPrintf(ctx->comm, " -- %" PetscInt_FMT " cells",cEnd-cStart));
2601   PetscCall(PetscPrintf(ctx->comm,"\n"));
2602   PetscFunctionReturn(0);
2603 }
2604 
2605 /*@
2606  DMPlexLandauCreateMassMatrix - Create mass matrix for Landau in Plex space (not field major order of Jacobian)
2607 
2608  Collective on pack
2609 
2610  Input Parameters:
2611 . pack     - the DM object
2612 
2613  Output Parameters:
2614 . Amat - The mass matrix (optional), mass matrix is added to the DM context
2615 
2616  Level: beginner
2617 
2618  .keywords: mesh
2619  .seealso: `DMPlexLandauCreateVelocitySpace()`
2620  @*/
2621 PetscErrorCode DMPlexLandauCreateMassMatrix(DM pack, Mat *Amat)
2622 {
2623   DM             mass_pack,massDM[LANDAU_MAX_GRIDS];
2624   PetscDS        prob;
2625   PetscInt       ii,dim,N1=1,N2;
2626   LandauCtx      *ctx;
2627   Mat            packM,subM[LANDAU_MAX_GRIDS];
2628 
2629   PetscFunctionBegin;
2630   PetscValidHeaderSpecific(pack,DM_CLASSID,1);
2631   if (Amat) PetscValidPointer(Amat,2);
2632   PetscCall(DMGetApplicationContext(pack, &ctx));
2633   PetscCheck(ctx,PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
2634   PetscCall(PetscLogEventBegin(ctx->events[14],0,0,0,0));
2635   PetscCall(DMGetDimension(pack, &dim));
2636   PetscCall(DMCompositeCreate(PetscObjectComm((PetscObject) pack),&mass_pack));
2637   /* create pack mass matrix */
2638   for (PetscInt grid=0, ix=0 ; grid<ctx->num_grids ; grid++) {
2639     PetscCall(DMClone(ctx->plex[grid], &massDM[grid]));
2640     PetscCall(DMCopyFields(ctx->plex[grid], massDM[grid]));
2641     PetscCall(DMCreateDS(massDM[grid]));
2642     PetscCall(DMGetDS(massDM[grid], &prob));
2643     for (ix=0, ii=ctx->species_offset[grid];ii<ctx->species_offset[grid+1];ii++,ix++) {
2644       if (dim==3) PetscCall(PetscDSSetJacobian(prob, ix, ix, g0_1, NULL, NULL, NULL));
2645       else        PetscCall(PetscDSSetJacobian(prob, ix, ix, g0_r, NULL, NULL, NULL));
2646     }
2647 #if !defined(LANDAU_SPECIES_MAJOR)
2648     PetscCall(DMCompositeAddDM(mass_pack,massDM[grid]));
2649 #else
2650     for (PetscInt b_id=0;b_id<ctx->batch_sz;b_id++) { // add batch size DMs for this species grid
2651       PetscCall(DMCompositeAddDM(mass_pack,massDM[grid]));
2652     }
2653 #endif
2654     PetscCall(DMCreateMatrix(massDM[grid], &subM[grid]));
2655   }
2656 #if !defined(LANDAU_SPECIES_MAJOR)
2657   // stack the batched DMs
2658   for (PetscInt b_id=1;b_id<ctx->batch_sz;b_id++) {
2659     for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
2660       PetscCall(DMCompositeAddDM(mass_pack, massDM[grid]));
2661     }
2662   }
2663 #endif
2664   PetscCall(PetscOptionsInsertString(NULL,"-dm_preallocate_only"));
2665   PetscCall(DMSetFromOptions(mass_pack));
2666   PetscCall(DMCreateMatrix(mass_pack, &packM));
2667   PetscCall(PetscOptionsInsertString(NULL,"-dm_preallocate_only false"));
2668   PetscCall(MatSetOption(packM,MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE));
2669   PetscCall(MatSetOption(packM,MAT_IGNORE_ZERO_ENTRIES,PETSC_TRUE));
2670   PetscCall(DMDestroy(&mass_pack));
2671   /* make mass matrix for each block */
2672   for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
2673     Vec locX;
2674     DM  plex = massDM[grid];
2675     PetscCall(DMGetLocalVector(plex, &locX));
2676     /* Mass matrix is independent of the input, so no need to fill locX */
2677     PetscCall(DMPlexSNESComputeJacobianFEM(plex, locX, subM[grid], subM[grid], ctx));
2678     PetscCall(DMRestoreLocalVector(plex, &locX));
2679     PetscCall(DMDestroy(&massDM[grid]));
2680   }
2681   PetscCall(MatGetSize(ctx->J, &N1, NULL));
2682   PetscCall(MatGetSize(packM, &N2, NULL));
2683   PetscCheck(N1 == N2,PetscObjectComm((PetscObject) pack), PETSC_ERR_PLIB, "Incorrect matrix sizes: |Jacobian| = %" PetscInt_FMT ", |Mass|=%" PetscInt_FMT,N1,N2);
2684   /* assemble block diagonals */
2685   for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) {
2686     Mat               B = subM[grid];
2687     PetscInt          nloc, nzl, colbuf[1024], row;
2688     PetscCall(MatGetSize(B, &nloc, NULL));
2689     for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) {
2690       const PetscInt    moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset);
2691       const PetscInt    *cols;
2692       const PetscScalar *vals;
2693       for (int i=0 ; i<nloc ; i++) {
2694         PetscCall(MatGetRow(B,i,&nzl,&cols,&vals));
2695         PetscCheck(nzl<=1024,PetscObjectComm((PetscObject) pack), PETSC_ERR_PLIB, "Row too big: %" PetscInt_FMT,nzl);
2696         for (int j=0; j<nzl; j++) colbuf[j] = cols[j] + moffset;
2697         row = i + moffset;
2698         PetscCall(MatSetValues(packM,1,&row,nzl,colbuf,vals,INSERT_VALUES));
2699         PetscCall(MatRestoreRow(B,i,&nzl,&cols,&vals));
2700       }
2701     }
2702   }
2703   // cleanup
2704   for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) {
2705     PetscCall(MatDestroy(&subM[grid]));
2706   }
2707   PetscCall(MatAssemblyBegin(packM,MAT_FINAL_ASSEMBLY));
2708   PetscCall(MatAssemblyEnd(packM,MAT_FINAL_ASSEMBLY));
2709   PetscCall(PetscObjectSetName((PetscObject)packM, "mass"));
2710   PetscCall(MatViewFromOptions(packM,NULL,"-dm_landau_mass_view"));
2711   ctx->M = packM;
2712   if (Amat) *Amat = packM;
2713   PetscCall(PetscLogEventEnd(ctx->events[14],0,0,0,0));
2714   PetscFunctionReturn(0);
2715 }
2716 
2717 /*@
2718  DMPlexLandauIFunction - TS residual calculation, confusingly this computes the Jacobian w/o mass
2719 
2720  Collective on ts
2721 
2722  Input Parameters:
2723 +   TS  - The time stepping context
2724 .   time_dummy - current time (not used)
2725 .   X - Current state
2726 .   X_t - Time derivative of current state
2727 -   actx - Landau context
2728 
2729  Output Parameter:
2730 .   F  - The residual
2731 
2732  Level: beginner
2733 
2734  .keywords: mesh
2735  .seealso: `DMPlexLandauCreateVelocitySpace()`, `DMPlexLandauIJacobian()`
2736  @*/
2737 PetscErrorCode DMPlexLandauIFunction(TS ts, PetscReal time_dummy, Vec X, Vec X_t, Vec F, void *actx)
2738 {
2739   LandauCtx        *ctx=(LandauCtx*)actx;
2740   PetscInt         dim;
2741   DM               pack;
2742 #if defined(PETSC_HAVE_THREADSAFETY)
2743   double           starttime, endtime;
2744 #endif
2745   PetscObjectState state;
2746 
2747   PetscFunctionBegin;
2748   PetscCall(TSGetDM(ts,&pack));
2749   PetscCall(DMGetApplicationContext(pack, &ctx));
2750   PetscCheck(ctx,PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
2751   if (ctx->stage) PetscCall(PetscLogStagePush(ctx->stage));
2752   PetscCall(PetscLogEventBegin(ctx->events[11],0,0,0,0));
2753   PetscCall(PetscLogEventBegin(ctx->events[0],0,0,0,0));
2754 #if defined(PETSC_HAVE_THREADSAFETY)
2755   starttime = MPI_Wtime();
2756 #endif
2757   PetscCall(DMGetDimension(pack, &dim));
2758   PetscCall(PetscObjectStateGet((PetscObject)ctx->J,&state));
2759   if (state != ctx->norm_state) {
2760     PetscCall(PetscInfo(ts, "Create Landau Jacobian t=%g J.state %" PetscInt64_FMT " --> %" PetscInt64_FMT "\n",(double)time_dummy, ctx->norm_state, state));
2761     PetscCall(MatZeroEntries(ctx->J));
2762     PetscCall(LandauFormJacobian_Internal(X,ctx->J,dim,0.0,(void*)ctx));
2763     PetscCall(MatViewFromOptions(ctx->J, NULL, "-dm_landau_jacobian_view"));
2764     PetscCall(PetscObjectStateGet((PetscObject)ctx->J,&state));
2765     ctx->norm_state = state;
2766   } else {
2767     PetscCall(PetscInfo(ts, "WARNING Skip forming Jacobian, has not changed %" PetscInt64_FMT "\n",state));
2768   }
2769   /* mat vec for op */
2770   PetscCall(MatMult(ctx->J,X,F)); /* C*f */
2771   /* add time term */
2772   if (X_t) PetscCall(MatMultAdd(ctx->M,X_t,F,F));
2773 #if defined(PETSC_HAVE_THREADSAFETY)
2774   if (ctx->stage) {
2775     endtime = MPI_Wtime();
2776     ctx->times[LANDAU_OPERATOR] += (endtime - starttime);
2777     ctx->times[LANDAU_JACOBIAN] += (endtime - starttime);
2778     ctx->times[LANDAU_JACOBIAN_COUNT] += 1;
2779   }
2780 #endif
2781   PetscCall(PetscLogEventEnd(ctx->events[0],0,0,0,0));
2782   PetscCall(PetscLogEventEnd(ctx->events[11],0,0,0,0));
2783   if (ctx->stage) {
2784     PetscCall(PetscLogStagePop());
2785 #if defined(PETSC_HAVE_THREADSAFETY)
2786     ctx->times[LANDAU_MATRIX_TOTAL] += (endtime - starttime);
2787 #endif
2788   }
2789   PetscFunctionReturn(0);
2790 }
2791 
2792 /*@
2793  DMPlexLandauIJacobian - TS Jacobian construction, confusingly this adds mass
2794 
2795  Collective on ts
2796 
2797  Input Parameters:
2798 +   TS  - The time stepping context
2799 .   time_dummy - current time (not used)
2800 .   X - Current state
2801 .   U_tdummy - Time derivative of current state (not used)
2802 .   shift - shift for du/dt term
2803 -   actx - Landau context
2804 
2805  Output Parameters:
2806 +   Amat  - Jacobian
2807 -   Pmat  - same as Amat
2808 
2809  Level: beginner
2810 
2811  .keywords: mesh
2812  .seealso: `DMPlexLandauCreateVelocitySpace()`, `DMPlexLandauIFunction()`
2813  @*/
2814 PetscErrorCode DMPlexLandauIJacobian(TS ts, PetscReal time_dummy, Vec X, Vec U_tdummy, PetscReal shift, Mat Amat, Mat Pmat, void *actx)
2815 {
2816   LandauCtx        *ctx=NULL;
2817   PetscInt         dim;
2818   DM               pack;
2819 #if defined(PETSC_HAVE_THREADSAFETY)
2820   double           starttime, endtime;
2821 #endif
2822   PetscObjectState state;
2823 
2824   PetscFunctionBegin;
2825   PetscCall(TSGetDM(ts,&pack));
2826   PetscCall(DMGetApplicationContext(pack, &ctx));
2827   PetscCheck(ctx,PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
2828   PetscCheck(Amat == Pmat && Amat == ctx->J,ctx->comm, PETSC_ERR_PLIB, "Amat!=Pmat || Amat!=ctx->J");
2829   PetscCall(DMGetDimension(pack, &dim));
2830   /* get collision Jacobian into A */
2831   if (ctx->stage) PetscCall(PetscLogStagePush(ctx->stage));
2832   PetscCall(PetscLogEventBegin(ctx->events[11],0,0,0,0));
2833   PetscCall(PetscLogEventBegin(ctx->events[9],0,0,0,0));
2834 #if defined(PETSC_HAVE_THREADSAFETY)
2835   starttime = MPI_Wtime();
2836 #endif
2837   PetscCall(PetscInfo(ts, "Adding mass to Jacobian t=%g, shift=%g\n",(double)time_dummy,(double)shift));
2838   PetscCheck(shift!=0.0,ctx->comm, PETSC_ERR_PLIB, "zero shift");
2839   PetscCall(PetscObjectStateGet((PetscObject)ctx->J,&state));
2840   PetscCheck(state == ctx->norm_state,ctx->comm, PETSC_ERR_PLIB, "wrong state, %" PetscInt64_FMT " %" PetscInt64_FMT "",ctx->norm_state,state);
2841   if (!ctx->use_matrix_mass) {
2842     PetscCall(LandauFormJacobian_Internal(X,ctx->J,dim,shift,(void*)ctx));
2843     PetscCall(MatViewFromOptions(ctx->J, NULL, "-dm_landau_mat_view"));
2844   } else { /* add mass */
2845     PetscCall(MatAXPY(Pmat,shift,ctx->M,SAME_NONZERO_PATTERN));
2846   }
2847 #if defined(PETSC_HAVE_THREADSAFETY)
2848   if (ctx->stage) {
2849     endtime = MPI_Wtime();
2850     ctx->times[LANDAU_OPERATOR] += (endtime - starttime);
2851     ctx->times[LANDAU_MASS] += (endtime - starttime);
2852   }
2853 #endif
2854   PetscCall(PetscLogEventEnd(ctx->events[9],0,0,0,0));
2855   PetscCall(PetscLogEventEnd(ctx->events[11],0,0,0,0));
2856   if (ctx->stage) {
2857     PetscCall(PetscLogStagePop());
2858 #if defined(PETSC_HAVE_THREADSAFETY)
2859     ctx->times[LANDAU_MATRIX_TOTAL] += (endtime - starttime);
2860 #endif
2861   }
2862   PetscFunctionReturn(0);
2863 }
2864