1 #include <../src/mat/impls/aij/seq/aij.h> 2 #include <petsc/private/dmpleximpl.h> /*I "petscdmplex.h" I*/ 3 #include <petsclandau.h> /*I "petsclandau.h" I*/ 4 #include <petscts.h> 5 #include <petscdmforest.h> 6 #include <petscdmcomposite.h> 7 8 /* Landau collision operator */ 9 10 /* relativistic terms */ 11 #if defined(PETSC_USE_REAL_SINGLE) 12 #define SPEED_OF_LIGHT 2.99792458e8F 13 #define C_0(v0) (SPEED_OF_LIGHT / v0) /* needed for relativistic tensor on all architectures */ 14 #else 15 #define SPEED_OF_LIGHT 2.99792458e8 16 #define C_0(v0) (SPEED_OF_LIGHT / v0) /* needed for relativistic tensor on all architectures */ 17 #endif 18 19 #include "land_tensors.h" 20 21 #if defined(PETSC_HAVE_OPENMP) 22 #include <omp.h> 23 #endif 24 25 static PetscErrorCode LandauGPUMapsDestroy(void **ptr) 26 { 27 P4estVertexMaps *maps = (P4estVertexMaps *)*ptr; 28 29 PetscFunctionBegin; 30 // free device data 31 if (maps[0].deviceType != LANDAU_CPU) { 32 #if defined(PETSC_HAVE_KOKKOS) 33 if (maps[0].deviceType == LANDAU_KOKKOS) PetscCall(LandauKokkosDestroyMatMaps(maps, maps[0].numgrids)); // implies Kokkos does 34 #endif 35 } 36 // free host data 37 for (PetscInt grid = 0; grid < maps[0].numgrids; grid++) { 38 PetscCall(PetscFree(maps[grid].c_maps)); 39 PetscCall(PetscFree(maps[grid].gIdx)); 40 } 41 PetscCall(PetscFree(maps)); 42 PetscFunctionReturn(PETSC_SUCCESS); 43 } 44 static PetscErrorCode energy_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx) 45 { 46 PetscReal v2 = 0; 47 48 PetscFunctionBegin; 49 /* compute v^2 / 2 */ 50 for (PetscInt i = 0; i < dim; ++i) v2 += x[i] * x[i]; 51 /* evaluate the Maxwellian */ 52 u[0] = v2 / 2; 53 PetscFunctionReturn(PETSC_SUCCESS); 54 } 55 56 /* needs double */ 57 static PetscErrorCode gamma_m1_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx) 58 { 59 PetscReal *c2_0_arr = ((PetscReal *)actx); 60 double u2 = 0, c02 = (double)*c2_0_arr, xx; 61 62 PetscFunctionBegin; 63 /* compute u^2 / 2 */ 64 for (PetscInt i = 0; i < dim; ++i) u2 += x[i] * x[i]; 65 /* gamma - 1 = g_eps, for conditioning and we only take derivatives */ 66 xx = u2 / c02; 67 #if defined(PETSC_USE_DEBUG) 68 u[0] = PetscSqrtReal(1. + xx); 69 #else 70 u[0] = xx / (PetscSqrtReal(1. + xx) + 1.) - 1.; // better conditioned. -1 might help condition and only used for derivative 71 #endif 72 PetscFunctionReturn(PETSC_SUCCESS); 73 } 74 75 /* 76 LandauFormJacobian_Internal - Evaluates Jacobian matrix. 77 78 Input Parameters: 79 . globX - input vector 80 . actx - optional user-defined context 81 . dim - dimension 82 83 Output Parameter: 84 . J0acP - Jacobian matrix filled, not created 85 */ 86 static PetscErrorCode LandauFormJacobian_Internal(Vec a_X, Mat JacP, const PetscInt dim, PetscReal shift, void *a_ctx) 87 { 88 LandauCtx *ctx = (LandauCtx *)a_ctx; 89 PetscInt numCells[LANDAU_MAX_GRIDS], Nq, Nb; 90 PetscQuadrature quad; 91 PetscReal Eq_m[LANDAU_MAX_SPECIES]; // could be static data w/o quench (ex2) 92 PetscScalar *cellClosure = NULL; 93 const PetscScalar *xdata = NULL; 94 PetscDS prob; 95 PetscContainer container; 96 P4estVertexMaps *maps; 97 Mat subJ[LANDAU_MAX_GRIDS * LANDAU_MAX_BATCH_SZ]; 98 99 PetscFunctionBegin; 100 PetscValidHeaderSpecific(a_X, VEC_CLASSID, 1); 101 PetscValidHeaderSpecific(JacP, MAT_CLASSID, 2); 102 PetscAssertPointer(ctx, 5); 103 /* check for matrix container for GPU assembly. Support CPU assembly for debugging */ 104 PetscCheck(ctx->plex[0] != NULL, ctx->comm, PETSC_ERR_ARG_WRONG, "Plex not created"); 105 PetscCall(PetscLogEventBegin(ctx->events[10], 0, 0, 0, 0)); 106 PetscCall(DMGetDS(ctx->plex[0], &prob)); // same DS for all grids 107 PetscCall(PetscObjectQuery((PetscObject)JacP, "assembly_maps", (PetscObject *)&container)); 108 if (container) { 109 PetscCheck(ctx->gpu_assembly, ctx->comm, PETSC_ERR_ARG_WRONG, "maps but no GPU assembly"); 110 PetscCall(PetscContainerGetPointer(container, (void **)&maps)); 111 PetscCheck(maps, ctx->comm, PETSC_ERR_ARG_WRONG, "empty GPU matrix container"); 112 for (PetscInt i = 0; i < ctx->num_grids * ctx->batch_sz; i++) subJ[i] = NULL; 113 } else { 114 PetscCheck(!ctx->gpu_assembly, ctx->comm, PETSC_ERR_ARG_WRONG, "No maps but GPU assembly"); 115 for (PetscInt tid = 0; tid < ctx->batch_sz; tid++) { 116 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) PetscCall(DMCreateMatrix(ctx->plex[grid], &subJ[LAND_PACK_IDX(tid, grid)])); 117 } 118 maps = NULL; 119 } 120 // get dynamic data (Eq is odd, for quench and Spitzer test) for CPU assembly and raw data for Jacobian GPU assembly. Get host numCells[], Nq (yuck) 121 PetscCall(PetscFEGetQuadrature(ctx->fe[0], &quad)); 122 PetscCall(PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, NULL)); 123 PetscCall(PetscFEGetDimension(ctx->fe[0], &Nb)); 124 PetscCheck(Nq <= LANDAU_MAX_NQND, ctx->comm, PETSC_ERR_ARG_WRONG, "Order too high. Nq = %" PetscInt_FMT " > LANDAU_MAX_NQND (%d)", Nq, LANDAU_MAX_NQND); 125 PetscCheck(Nb <= LANDAU_MAX_NQND, ctx->comm, PETSC_ERR_ARG_WRONG, "Order too high. Nb = %" PetscInt_FMT " > LANDAU_MAX_NQND (%d)", Nb, LANDAU_MAX_NQND); 126 // get metadata for collecting dynamic data 127 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 128 PetscInt cStart, cEnd; 129 PetscCheck(ctx->plex[grid] != NULL, ctx->comm, PETSC_ERR_ARG_WRONG, "Plex not created"); 130 PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd)); 131 numCells[grid] = cEnd - cStart; // grids can have different topology 132 } 133 PetscCall(PetscLogEventEnd(ctx->events[10], 0, 0, 0, 0)); 134 if (shift == 0) { /* create dynamic point data: f_alpha for closure of each cell (cellClosure[nbatch,ngrids,ncells[g],f[Nb,ns[g]]]) or xdata */ 135 DM pack; 136 PetscCall(VecGetDM(a_X, &pack)); 137 PetscCheck(pack, PETSC_COMM_SELF, PETSC_ERR_PLIB, "pack has no DM"); 138 PetscCall(PetscLogEventBegin(ctx->events[1], 0, 0, 0, 0)); 139 for (PetscInt fieldA = 0; fieldA < ctx->num_species; fieldA++) { 140 Eq_m[fieldA] = ctx->Ez * ctx->t_0 * ctx->charges[fieldA] / (ctx->v_0 * ctx->masses[fieldA]); /* normalize dimensionless */ 141 if (dim == 2) Eq_m[fieldA] *= 2 * PETSC_PI; /* add the 2pi term that is not in Landau */ 142 } 143 if (!ctx->gpu_assembly) { 144 Vec *locXArray, *globXArray; 145 PetscScalar *cellClosure_it; 146 PetscInt cellClosure_sz = 0, nDMs, Nf[LANDAU_MAX_GRIDS]; 147 PetscSection section[LANDAU_MAX_GRIDS], globsection[LANDAU_MAX_GRIDS]; 148 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 149 PetscCall(DMGetLocalSection(ctx->plex[grid], §ion[grid])); 150 PetscCall(DMGetGlobalSection(ctx->plex[grid], &globsection[grid])); 151 PetscCall(PetscSectionGetNumFields(section[grid], &Nf[grid])); 152 } 153 /* count cellClosure size */ 154 PetscCall(DMCompositeGetNumberDM(pack, &nDMs)); 155 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) cellClosure_sz += Nb * Nf[grid] * numCells[grid]; 156 PetscCall(PetscMalloc1(cellClosure_sz * ctx->batch_sz, &cellClosure)); 157 cellClosure_it = cellClosure; 158 PetscCall(PetscMalloc(sizeof(*locXArray) * nDMs, &locXArray)); 159 PetscCall(PetscMalloc(sizeof(*globXArray) * nDMs, &globXArray)); 160 PetscCall(DMCompositeGetLocalAccessArray(pack, a_X, nDMs, NULL, locXArray)); 161 PetscCall(DMCompositeGetAccessArray(pack, a_X, nDMs, NULL, globXArray)); 162 for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) { // OpenMP (once) 163 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 164 Vec locX = locXArray[LAND_PACK_IDX(b_id, grid)], globX = globXArray[LAND_PACK_IDX(b_id, grid)], locX2; 165 PetscInt cStart, cEnd, ei; 166 PetscCall(VecDuplicate(locX, &locX2)); 167 PetscCall(DMGlobalToLocalBegin(ctx->plex[grid], globX, INSERT_VALUES, locX2)); 168 PetscCall(DMGlobalToLocalEnd(ctx->plex[grid], globX, INSERT_VALUES, locX2)); 169 PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd)); 170 for (ei = cStart; ei < cEnd; ++ei) { 171 PetscScalar *coef = NULL; 172 PetscCall(DMPlexVecGetClosure(ctx->plex[grid], section[grid], locX2, ei, NULL, &coef)); 173 PetscCall(PetscMemcpy(cellClosure_it, coef, Nb * Nf[grid] * sizeof(*cellClosure_it))); /* change if LandauIPReal != PetscScalar */ 174 PetscCall(DMPlexVecRestoreClosure(ctx->plex[grid], section[grid], locX2, ei, NULL, &coef)); 175 cellClosure_it += Nb * Nf[grid]; 176 } 177 PetscCall(VecDestroy(&locX2)); 178 } 179 } 180 PetscCheck(cellClosure_it - cellClosure == cellClosure_sz * ctx->batch_sz, PETSC_COMM_SELF, PETSC_ERR_PLIB, "iteration wrong %" PetscCount_FMT " != cellClosure_sz = %" PetscInt_FMT, cellClosure_it - cellClosure, cellClosure_sz * ctx->batch_sz); 181 PetscCall(DMCompositeRestoreLocalAccessArray(pack, a_X, nDMs, NULL, locXArray)); 182 PetscCall(DMCompositeRestoreAccessArray(pack, a_X, nDMs, NULL, globXArray)); 183 PetscCall(PetscFree(locXArray)); 184 PetscCall(PetscFree(globXArray)); 185 xdata = NULL; 186 } else { 187 PetscMemType mtype; 188 if (ctx->jacobian_field_major_order) { // get data in batch ordering 189 PetscCall(VecScatterBegin(ctx->plex_batch, a_X, ctx->work_vec, INSERT_VALUES, SCATTER_FORWARD)); 190 PetscCall(VecScatterEnd(ctx->plex_batch, a_X, ctx->work_vec, INSERT_VALUES, SCATTER_FORWARD)); 191 PetscCall(VecGetArrayReadAndMemType(ctx->work_vec, &xdata, &mtype)); 192 } else { 193 PetscCall(VecGetArrayReadAndMemType(a_X, &xdata, &mtype)); 194 } 195 PetscCheck(mtype == PETSC_MEMTYPE_HOST || ctx->deviceType != LANDAU_CPU, ctx->comm, PETSC_ERR_ARG_WRONG, "CPU run with device data: use -mat_type aij"); 196 cellClosure = NULL; 197 } 198 PetscCall(PetscLogEventEnd(ctx->events[1], 0, 0, 0, 0)); 199 } else xdata = cellClosure = NULL; 200 201 /* do it */ 202 if (ctx->deviceType == LANDAU_KOKKOS) { 203 #if defined(PETSC_HAVE_KOKKOS) 204 PetscCall(LandauKokkosJacobian(ctx->plex, Nq, Nb, ctx->batch_sz, ctx->num_grids, numCells, Eq_m, cellClosure, xdata, &ctx->SData_d, shift, ctx->events, ctx->mat_offset, ctx->species_offset, subJ, JacP)); 205 #else 206 SETERRQ(ctx->comm, PETSC_ERR_ARG_WRONG, "-landau_device_type %s not built", "kokkos"); 207 #endif 208 } else { /* CPU version */ 209 PetscTabulation *Tf; // used for CPU and print info. Same on all grids and all species 210 PetscInt ip_offset[LANDAU_MAX_GRIDS + 1], ipf_offset[LANDAU_MAX_GRIDS + 1], elem_offset[LANDAU_MAX_GRIDS + 1], IPf_sz_glb, IPf_sz_tot, num_grids = ctx->num_grids, Nf[LANDAU_MAX_GRIDS]; 211 PetscReal *ff, *dudx, *dudy, *dudz, *invJ_a = (PetscReal *)ctx->SData_d.invJ, *xx = (PetscReal *)ctx->SData_d.x, *yy = (PetscReal *)ctx->SData_d.y, *zz = (PetscReal *)ctx->SData_d.z, *ww = (PetscReal *)ctx->SData_d.w; 212 PetscReal *nu_alpha = (PetscReal *)ctx->SData_d.alpha, *nu_beta = (PetscReal *)ctx->SData_d.beta, *invMass = (PetscReal *)ctx->SData_d.invMass; 213 PetscReal (*lambdas)[LANDAU_MAX_GRIDS][LANDAU_MAX_GRIDS] = (PetscReal (*)[LANDAU_MAX_GRIDS][LANDAU_MAX_GRIDS])ctx->SData_d.lambdas; 214 PetscSection section[LANDAU_MAX_GRIDS], globsection[LANDAU_MAX_GRIDS]; 215 PetscScalar *coo_vals = NULL; 216 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 217 PetscCall(DMGetLocalSection(ctx->plex[grid], §ion[grid])); 218 PetscCall(DMGetGlobalSection(ctx->plex[grid], &globsection[grid])); 219 PetscCall(PetscSectionGetNumFields(section[grid], &Nf[grid])); 220 } 221 /* count IPf size, etc */ 222 PetscCall(PetscDSGetTabulation(prob, &Tf)); // Bf, &Df same for all grids 223 const PetscReal *const BB = Tf[0]->T[0], *const DD = Tf[0]->T[1]; 224 ip_offset[0] = ipf_offset[0] = elem_offset[0] = 0; 225 for (PetscInt grid = 0; grid < num_grids; grid++) { 226 PetscInt nfloc = ctx->species_offset[grid + 1] - ctx->species_offset[grid]; 227 elem_offset[grid + 1] = elem_offset[grid] + numCells[grid]; 228 ip_offset[grid + 1] = ip_offset[grid] + numCells[grid] * Nq; 229 ipf_offset[grid + 1] = ipf_offset[grid] + Nq * nfloc * numCells[grid]; 230 } 231 IPf_sz_glb = ipf_offset[num_grids]; 232 IPf_sz_tot = IPf_sz_glb * ctx->batch_sz; 233 // prep COO 234 PetscCall(PetscMalloc1(ctx->SData_d.coo_size, &coo_vals)); // allocate every time? 235 if (shift == 0.0) { /* compute dynamic data f and df and init data for Jacobian */ 236 #if defined(PETSC_HAVE_THREADSAFETY) 237 double starttime, endtime; 238 starttime = MPI_Wtime(); 239 #endif 240 PetscCall(PetscLogEventBegin(ctx->events[8], 0, 0, 0, 0)); 241 PetscCall(PetscMalloc4(IPf_sz_tot, &ff, IPf_sz_tot, &dudx, IPf_sz_tot, &dudy, (dim == 3 ? IPf_sz_tot : 0), &dudz)); 242 // F df/dx 243 for (PetscInt tid = 0; tid < ctx->batch_sz * elem_offset[num_grids]; tid++) { // for each element 244 const PetscInt b_Nelem = elem_offset[num_grids], b_elem_idx = tid % b_Nelem, b_id = tid / b_Nelem; // b_id == OMP thd_id in batch 245 // find my grid: 246 PetscInt grid = 0; 247 while (b_elem_idx >= elem_offset[grid + 1]) grid++; // yuck search for grid 248 { 249 const PetscInt loc_nip = numCells[grid] * Nq, loc_Nf = ctx->species_offset[grid + 1] - ctx->species_offset[grid], loc_elem = b_elem_idx - elem_offset[grid]; 250 const PetscInt moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset); //b_id*b_N + ctx->mat_offset[grid]; 251 PetscScalar *coef, coef_buff[LANDAU_MAX_SPECIES * LANDAU_MAX_NQND]; 252 PetscReal *invJe = &invJ_a[(ip_offset[grid] + loc_elem * Nq) * dim * dim]; // ingJ is static data on batch 0 253 PetscInt b, f, q; 254 if (cellClosure) { 255 coef = &cellClosure[b_id * IPf_sz_glb + ipf_offset[grid] + loc_elem * Nb * loc_Nf]; // this is const 256 } else { 257 coef = coef_buff; 258 for (f = 0; f < loc_Nf; ++f) { 259 LandauIdx *const Idxs = &maps[grid].gIdx[loc_elem][f][0]; 260 for (b = 0; b < Nb; ++b) { 261 PetscInt idx = Idxs[b]; 262 if (idx >= 0) { 263 coef[f * Nb + b] = xdata[idx + moffset]; 264 } else { 265 idx = -idx - 1; 266 coef[f * Nb + b] = 0; 267 for (q = 0; q < maps[grid].num_face; q++) { 268 PetscInt id = maps[grid].c_maps[idx][q].gid; 269 PetscScalar scale = maps[grid].c_maps[idx][q].scale; 270 coef[f * Nb + b] += scale * xdata[id + moffset]; 271 } 272 } 273 } 274 } 275 } 276 /* get f and df */ 277 for (PetscInt qi = 0; qi < Nq; qi++) { 278 const PetscReal *invJ = &invJe[qi * dim * dim]; 279 const PetscReal *Bq = &BB[qi * Nb]; 280 const PetscReal *Dq = &DD[qi * Nb * dim]; 281 PetscReal u_x[LANDAU_DIM]; 282 /* get f & df */ 283 for (f = 0; f < loc_Nf; ++f) { 284 const PetscInt idx = b_id * IPf_sz_glb + ipf_offset[grid] + f * loc_nip + loc_elem * Nq + qi; 285 PetscInt b, e; 286 PetscReal refSpaceDer[LANDAU_DIM]; 287 ff[idx] = 0.0; 288 for (PetscInt d = 0; d < LANDAU_DIM; ++d) refSpaceDer[d] = 0.0; 289 for (b = 0; b < Nb; ++b) { 290 const PetscInt cidx = b; 291 ff[idx] += Bq[cidx] * PetscRealPart(coef[f * Nb + cidx]); 292 for (PetscInt d = 0; d < dim; ++d) refSpaceDer[d] += Dq[cidx * dim + d] * PetscRealPart(coef[f * Nb + cidx]); 293 } 294 for (PetscInt d = 0; d < LANDAU_DIM; ++d) { 295 for (e = 0, u_x[d] = 0.0; e < LANDAU_DIM; ++e) u_x[d] += invJ[e * dim + d] * refSpaceDer[e]; 296 } 297 dudx[idx] = u_x[0]; 298 dudy[idx] = u_x[1]; 299 #if LANDAU_DIM == 3 300 dudz[idx] = u_x[2]; 301 #endif 302 } 303 } // q 304 } // grid 305 } // grid*batch 306 PetscCall(PetscLogEventEnd(ctx->events[8], 0, 0, 0, 0)); 307 #if defined(PETSC_HAVE_THREADSAFETY) 308 endtime = MPI_Wtime(); 309 if (ctx->stage) ctx->times[LANDAU_F_DF] += (endtime - starttime); 310 #endif 311 } // Jacobian setup 312 // assemble Jacobian (or mass) 313 for (PetscInt tid = 0; tid < ctx->batch_sz * elem_offset[num_grids]; tid++) { // for each element 314 const PetscInt b_Nelem = elem_offset[num_grids]; 315 const PetscInt glb_elem_idx = tid % b_Nelem, b_id = tid / b_Nelem; 316 PetscInt grid = 0; 317 #if defined(PETSC_HAVE_THREADSAFETY) 318 double starttime, endtime; 319 starttime = MPI_Wtime(); 320 #endif 321 while (glb_elem_idx >= elem_offset[grid + 1]) grid++; 322 { 323 const PetscInt loc_Nf = ctx->species_offset[grid + 1] - ctx->species_offset[grid], loc_elem = glb_elem_idx - elem_offset[grid]; 324 const PetscInt moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset), totDim = loc_Nf * Nq, elemMatSize = totDim * totDim; 325 PetscScalar *elemMat; 326 const PetscReal *invJe = &invJ_a[(ip_offset[grid] + loc_elem * Nq) * dim * dim]; 327 PetscCall(PetscMalloc1(elemMatSize, &elemMat)); 328 PetscCall(PetscMemzero(elemMat, elemMatSize * sizeof(*elemMat))); 329 if (shift == 0.0) { // Jacobian 330 PetscCall(PetscLogEventBegin(ctx->events[4], 0, 0, 0, 0)); 331 } else { // mass 332 PetscCall(PetscLogEventBegin(ctx->events[16], 0, 0, 0, 0)); 333 } 334 for (PetscInt qj = 0; qj < Nq; ++qj) { 335 const PetscInt jpidx_glb = ip_offset[grid] + qj + loc_elem * Nq; 336 PetscReal g0[LANDAU_MAX_SPECIES], g2[LANDAU_MAX_SPECIES][LANDAU_DIM], g3[LANDAU_MAX_SPECIES][LANDAU_DIM][LANDAU_DIM]; // could make a LANDAU_MAX_SPECIES_GRID ~ number of ions - 1 337 PetscInt d, d2, dp, d3, IPf_idx; 338 if (shift == 0.0) { // Jacobian 339 const PetscReal *const invJj = &invJe[qj * dim * dim]; 340 PetscReal gg2[LANDAU_MAX_SPECIES][LANDAU_DIM], gg3[LANDAU_MAX_SPECIES][LANDAU_DIM][LANDAU_DIM], gg2_temp[LANDAU_DIM], gg3_temp[LANDAU_DIM][LANDAU_DIM]; 341 const PetscReal vj[3] = {xx[jpidx_glb], yy[jpidx_glb], zz ? zz[jpidx_glb] : 0}, wj = ww[jpidx_glb]; 342 // create g2 & g3 343 for (d = 0; d < LANDAU_DIM; d++) { // clear accumulation data D & K 344 gg2_temp[d] = 0; 345 for (d2 = 0; d2 < LANDAU_DIM; d2++) gg3_temp[d][d2] = 0; 346 } 347 /* inner beta reduction */ 348 IPf_idx = 0; 349 for (PetscInt grid_r = 0, f_off = 0, ipidx = 0; grid_r < ctx->num_grids; grid_r++, f_off = ctx->species_offset[grid_r]) { // IPf_idx += nip_loc_r*Nfloc_r 350 PetscInt nip_loc_r = numCells[grid_r] * Nq, Nfloc_r = Nf[grid_r]; 351 for (PetscInt ei_r = 0; ei_r < numCells[grid_r]; ++ei_r) { 352 for (PetscInt qi = 0; qi < Nq; qi++, ipidx++) { 353 const PetscReal wi = ww[ipidx], x = xx[ipidx], y = yy[ipidx]; 354 PetscReal temp1[3] = {0, 0, 0}, temp2 = 0; 355 #if LANDAU_DIM == 2 356 PetscReal Ud[2][2], Uk[2][2], mask = (PetscAbs(vj[0] - x) < 100 * PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[1] - y) < 100 * PETSC_SQRT_MACHINE_EPSILON) ? 0. : 1.; 357 LandauTensor2D(vj, x, y, Ud, Uk, mask); 358 #else 359 PetscReal U[3][3], z = zz[ipidx], mask = (PetscAbs(vj[0] - x) < 100 * PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[1] - y) < 100 * PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[2] - z) < 100 * PETSC_SQRT_MACHINE_EPSILON) ? 0. : 1.; 360 if (ctx->use_relativistic_corrections) { 361 LandauTensor3DRelativistic(vj, x, y, z, U, mask, C_0(ctx->v_0)); 362 } else { 363 LandauTensor3D(vj, x, y, z, U, mask); 364 } 365 #endif 366 for (PetscInt f = 0; f < Nfloc_r; ++f) { 367 const PetscInt idx = b_id * IPf_sz_glb + ipf_offset[grid_r] + f * nip_loc_r + ei_r * Nq + qi; 368 369 temp1[0] += dudx[idx] * nu_beta[f + f_off] * invMass[f + f_off] * (*lambdas)[grid][grid_r]; 370 temp1[1] += dudy[idx] * nu_beta[f + f_off] * invMass[f + f_off] * (*lambdas)[grid][grid_r]; 371 #if LANDAU_DIM == 3 372 temp1[2] += dudz[idx] * nu_beta[f + f_off] * invMass[f + f_off] * (*lambdas)[grid][grid_r]; 373 #endif 374 temp2 += ff[idx] * nu_beta[f + f_off] * (*lambdas)[grid][grid_r]; 375 } 376 temp1[0] *= wi; 377 temp1[1] *= wi; 378 #if LANDAU_DIM == 3 379 temp1[2] *= wi; 380 #endif 381 temp2 *= wi; 382 #if LANDAU_DIM == 2 383 for (d2 = 0; d2 < 2; d2++) { 384 for (d3 = 0; d3 < 2; ++d3) { 385 /* K = U * grad(f): g2=e: i,A */ 386 gg2_temp[d2] += Uk[d2][d3] * temp1[d3]; 387 /* D = -U * (I \kron (fx)): g3=f: i,j,A */ 388 gg3_temp[d2][d3] += Ud[d2][d3] * temp2; 389 } 390 } 391 #else 392 for (d2 = 0; d2 < 3; ++d2) { 393 for (d3 = 0; d3 < 3; ++d3) { 394 /* K = U * grad(f): g2 = e: i,A */ 395 gg2_temp[d2] += U[d2][d3] * temp1[d3]; 396 /* D = -U * (I \kron (fx)): g3 = f: i,j,A */ 397 gg3_temp[d2][d3] += U[d2][d3] * temp2; 398 } 399 } 400 #endif 401 } // qi 402 } // ei_r 403 IPf_idx += nip_loc_r * Nfloc_r; 404 } /* grid_r - IPs */ 405 PetscCheck(IPf_idx == IPf_sz_glb, PETSC_COMM_SELF, PETSC_ERR_PLIB, "IPf_idx != IPf_sz %" PetscInt_FMT " %" PetscInt_FMT, IPf_idx, IPf_sz_glb); 406 // add alpha and put in gg2/3 407 for (PetscInt fieldA = 0, f_off = ctx->species_offset[grid]; fieldA < loc_Nf; ++fieldA) { 408 for (d2 = 0; d2 < LANDAU_DIM; d2++) { 409 gg2[fieldA][d2] = gg2_temp[d2] * nu_alpha[fieldA + f_off]; 410 for (d3 = 0; d3 < LANDAU_DIM; d3++) gg3[fieldA][d2][d3] = -gg3_temp[d2][d3] * nu_alpha[fieldA + f_off] * invMass[fieldA + f_off]; 411 } 412 } 413 /* add electric field term once per IP */ 414 for (PetscInt fieldA = 0, f_off = ctx->species_offset[grid]; fieldA < loc_Nf; ++fieldA) gg2[fieldA][LANDAU_DIM - 1] += Eq_m[fieldA + f_off]; 415 /* Jacobian transform - g2, g3 */ 416 for (PetscInt fieldA = 0; fieldA < loc_Nf; ++fieldA) { 417 for (d = 0; d < dim; ++d) { 418 g2[fieldA][d] = 0.0; 419 for (d2 = 0; d2 < dim; ++d2) { 420 g2[fieldA][d] += invJj[d * dim + d2] * gg2[fieldA][d2]; 421 g3[fieldA][d][d2] = 0.0; 422 for (d3 = 0; d3 < dim; ++d3) { 423 for (dp = 0; dp < dim; ++dp) g3[fieldA][d][d2] += invJj[d * dim + d3] * gg3[fieldA][d3][dp] * invJj[d2 * dim + dp]; 424 } 425 g3[fieldA][d][d2] *= wj; 426 } 427 g2[fieldA][d] *= wj; 428 } 429 } 430 } else { // mass 431 PetscReal wj = ww[jpidx_glb]; 432 /* Jacobian transform - g0 */ 433 for (PetscInt fieldA = 0; fieldA < loc_Nf; ++fieldA) { 434 if (dim == 2) { 435 g0[fieldA] = wj * shift * 2. * PETSC_PI; // move this to below and remove g0 436 } else { 437 g0[fieldA] = wj * shift; // move this to below and remove g0 438 } 439 } 440 } 441 /* FE matrix construction */ 442 { 443 PetscInt fieldA, d, f, d2, g; 444 const PetscReal *BJq = &BB[qj * Nb], *DIq = &DD[qj * Nb * dim]; 445 /* assemble - on the diagonal (I,I) */ 446 for (fieldA = 0; fieldA < loc_Nf; fieldA++) { 447 for (f = 0; f < Nb; f++) { 448 const PetscInt i = fieldA * Nb + f; /* Element matrix row */ 449 for (g = 0; g < Nb; ++g) { 450 const PetscInt j = fieldA * Nb + g; /* Element matrix column */ 451 const PetscInt fOff = i * totDim + j; 452 if (shift == 0.0) { 453 for (d = 0; d < dim; ++d) { 454 elemMat[fOff] += DIq[f * dim + d] * g2[fieldA][d] * BJq[g]; 455 for (d2 = 0; d2 < dim; ++d2) elemMat[fOff] += DIq[f * dim + d] * g3[fieldA][d][d2] * DIq[g * dim + d2]; 456 } 457 } else { // mass 458 elemMat[fOff] += BJq[f] * g0[fieldA] * BJq[g]; 459 } 460 } 461 } 462 } 463 } 464 } /* qj loop */ 465 if (shift == 0.0) { // Jacobian 466 PetscCall(PetscLogEventEnd(ctx->events[4], 0, 0, 0, 0)); 467 } else { 468 PetscCall(PetscLogEventEnd(ctx->events[16], 0, 0, 0, 0)); 469 } 470 #if defined(PETSC_HAVE_THREADSAFETY) 471 endtime = MPI_Wtime(); 472 if (ctx->stage) ctx->times[LANDAU_KERNEL] += (endtime - starttime); 473 #endif 474 /* assemble matrix */ 475 if (!container) { 476 PetscInt cStart; 477 PetscCall(PetscLogEventBegin(ctx->events[6], 0, 0, 0, 0)); 478 PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, NULL)); 479 PetscCall(DMPlexMatSetClosure(ctx->plex[grid], section[grid], globsection[grid], subJ[LAND_PACK_IDX(b_id, grid)], loc_elem + cStart, elemMat, ADD_VALUES)); 480 PetscCall(PetscLogEventEnd(ctx->events[6], 0, 0, 0, 0)); 481 } else { // GPU like assembly for debugging 482 PetscInt fieldA, q, f, g, d, nr, nc, rows0[LANDAU_MAX_Q_FACE] = {0}, cols0[LANDAU_MAX_Q_FACE] = {0}, rows[LANDAU_MAX_Q_FACE], cols[LANDAU_MAX_Q_FACE]; 483 PetscScalar vals[LANDAU_MAX_Q_FACE * LANDAU_MAX_Q_FACE] = {0}, row_scale[LANDAU_MAX_Q_FACE] = {0}, col_scale[LANDAU_MAX_Q_FACE] = {0}; 484 LandauIdx *coo_elem_offsets = (LandauIdx *)ctx->SData_d.coo_elem_offsets, *coo_elem_fullNb = (LandauIdx *)ctx->SData_d.coo_elem_fullNb, (*coo_elem_point_offsets)[LANDAU_MAX_NQND + 1] = (LandauIdx(*)[LANDAU_MAX_NQND + 1]) ctx->SData_d.coo_elem_point_offsets; 485 /* assemble - from the diagonal (I,I) in this format for DMPlexMatSetClosure */ 486 for (fieldA = 0; fieldA < loc_Nf; fieldA++) { 487 LandauIdx *const Idxs = &maps[grid].gIdx[loc_elem][fieldA][0]; 488 for (f = 0; f < Nb; f++) { 489 PetscInt idx = Idxs[f]; 490 if (idx >= 0) { 491 nr = 1; 492 rows0[0] = idx; 493 row_scale[0] = 1.; 494 } else { 495 idx = -idx - 1; 496 for (q = 0, nr = 0; q < maps[grid].num_face; q++, nr++) { 497 if (maps[grid].c_maps[idx][q].gid < 0) break; 498 rows0[q] = maps[grid].c_maps[idx][q].gid; 499 row_scale[q] = maps[grid].c_maps[idx][q].scale; 500 } 501 } 502 for (g = 0; g < Nb; ++g) { 503 idx = Idxs[g]; 504 if (idx >= 0) { 505 nc = 1; 506 cols0[0] = idx; 507 col_scale[0] = 1.; 508 } else { 509 idx = -idx - 1; 510 nc = maps[grid].num_face; 511 for (q = 0, nc = 0; q < maps[grid].num_face; q++, nc++) { 512 if (maps[grid].c_maps[idx][q].gid < 0) break; 513 cols0[q] = maps[grid].c_maps[idx][q].gid; 514 col_scale[q] = maps[grid].c_maps[idx][q].scale; 515 } 516 } 517 const PetscInt i = fieldA * Nb + f; /* Element matrix row */ 518 const PetscInt j = fieldA * Nb + g; /* Element matrix column */ 519 const PetscScalar Aij = elemMat[i * totDim + j]; 520 if (coo_vals) { // mirror (i,j) in CreateStaticGPUData 521 const PetscInt fullNb = coo_elem_fullNb[glb_elem_idx], fullNb2 = fullNb * fullNb; 522 const PetscInt idx0 = b_id * coo_elem_offsets[elem_offset[num_grids]] + coo_elem_offsets[glb_elem_idx] + fieldA * fullNb2 + fullNb * coo_elem_point_offsets[glb_elem_idx][f] + nr * coo_elem_point_offsets[glb_elem_idx][g]; 523 for (PetscInt q = 0, idx2 = idx0; q < nr; q++) { 524 for (PetscInt d = 0; d < nc; d++, idx2++) coo_vals[idx2] = row_scale[q] * col_scale[d] * Aij; 525 } 526 } else { 527 for (q = 0; q < nr; q++) rows[q] = rows0[q] + moffset; 528 for (d = 0; d < nc; d++) cols[d] = cols0[d] + moffset; 529 for (q = 0; q < nr; q++) { 530 for (d = 0; d < nc; d++) vals[q * nc + d] = row_scale[q] * col_scale[d] * Aij; 531 } 532 PetscCall(MatSetValues(JacP, nr, rows, nc, cols, vals, ADD_VALUES)); 533 } 534 } 535 } 536 } 537 } 538 if (loc_elem == -1) { 539 PetscCall(PetscPrintf(ctx->comm, "CPU Element matrix\n")); 540 for (PetscInt d = 0; d < totDim; ++d) { 541 for (PetscInt f = 0; f < totDim; ++f) PetscCall(PetscPrintf(ctx->comm, " %12.5e", (double)PetscRealPart(elemMat[d * totDim + f]))); 542 PetscCall(PetscPrintf(ctx->comm, "\n")); 543 } 544 exit(12); 545 } 546 PetscCall(PetscFree(elemMat)); 547 } /* grid */ 548 } /* outer element & batch loop */ 549 if (shift == 0.0) { // mass 550 PetscCall(PetscFree4(ff, dudx, dudy, dudz)); 551 } 552 if (!container) { // 'CPU' assembly move nest matrix to global JacP 553 for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) { // OpenMP 554 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 555 const PetscInt moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset); // b_id*b_N + ctx->mat_offset[grid]; 556 PetscInt nloc, nzl, colbuf[1024], row; 557 const PetscInt *cols; 558 const PetscScalar *vals; 559 Mat B = subJ[LAND_PACK_IDX(b_id, grid)]; 560 PetscCall(MatAssemblyBegin(B, MAT_FINAL_ASSEMBLY)); 561 PetscCall(MatAssemblyEnd(B, MAT_FINAL_ASSEMBLY)); 562 PetscCall(MatGetSize(B, &nloc, NULL)); 563 for (PetscInt i = 0; i < nloc; i++) { 564 PetscCall(MatGetRow(B, i, &nzl, &cols, &vals)); 565 PetscCheck(nzl <= 1024, PetscObjectComm((PetscObject)B), PETSC_ERR_PLIB, "Row too big: %" PetscInt_FMT, nzl); 566 for (PetscInt j = 0; j < nzl; j++) colbuf[j] = moffset + cols[j]; 567 row = moffset + i; 568 PetscCall(MatSetValues(JacP, 1, &row, nzl, colbuf, vals, ADD_VALUES)); 569 PetscCall(MatRestoreRow(B, i, &nzl, &cols, &vals)); 570 } 571 PetscCall(MatDestroy(&B)); 572 } 573 } 574 } 575 if (coo_vals) { 576 PetscCall(MatSetValuesCOO(JacP, coo_vals, ADD_VALUES)); 577 PetscCall(PetscFree(coo_vals)); 578 } 579 } /* CPU version */ 580 PetscCall(MatAssemblyBegin(JacP, MAT_FINAL_ASSEMBLY)); 581 PetscCall(MatAssemblyEnd(JacP, MAT_FINAL_ASSEMBLY)); 582 /* clean up */ 583 if (cellClosure) PetscCall(PetscFree(cellClosure)); 584 if (xdata) PetscCall(VecRestoreArrayReadAndMemType(a_X, &xdata)); 585 PetscFunctionReturn(PETSC_SUCCESS); 586 } 587 588 /* create DMComposite of meshes for each species group */ 589 static PetscErrorCode LandauDMCreateVMeshes(MPI_Comm comm_self, const PetscInt dim, const char prefix[], LandauCtx *ctx, DM pack) 590 { 591 PetscFunctionBegin; 592 /* p4est, quads */ 593 /* Create plex mesh of Landau domain */ 594 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 595 PetscReal par_radius = ctx->radius_par[grid], perp_radius = ctx->radius_perp[grid]; 596 if (!ctx->sphere && !ctx->simplex) { // 2 or 3D (only 3D option) 597 PetscReal lo[] = {-perp_radius, -par_radius, -par_radius}, hi[] = {perp_radius, par_radius, par_radius}; 598 DMBoundaryType periodicity[3] = {DM_BOUNDARY_NONE, dim == 2 ? DM_BOUNDARY_NONE : DM_BOUNDARY_NONE, DM_BOUNDARY_NONE}; 599 if (dim == 2) lo[0] = 0; 600 else { 601 lo[1] = -perp_radius; 602 hi[1] = perp_radius; // 3D y is a perp 603 } 604 PetscCall(DMPlexCreateBoxMesh(comm_self, dim, PETSC_FALSE, ctx->cells0, lo, hi, periodicity, PETSC_TRUE, 0, PETSC_TRUE, &ctx->plex[grid])); // TODO: make composite and create dm[grid] here 605 PetscCall(DMLocalizeCoordinates(ctx->plex[grid])); /* needed for periodic */ 606 if (dim == 3) PetscCall(PetscObjectSetName((PetscObject)ctx->plex[grid], "cube")); 607 else PetscCall(PetscObjectSetName((PetscObject)ctx->plex[grid], "half-plane")); 608 } else if (dim == 2) { 609 size_t len; 610 PetscCall(PetscStrlen(ctx->filename, &len)); 611 if (len) { 612 Vec coords; 613 PetscScalar *x; 614 PetscInt N; 615 char str[] = "-dm_landau_view_file_0"; 616 str[21] += grid; 617 PetscCall(DMPlexCreateFromFile(comm_self, ctx->filename, "plexland.c", PETSC_TRUE, &ctx->plex[grid])); 618 PetscCall(DMPlexOrient(ctx->plex[grid])); 619 PetscCall(DMGetCoordinatesLocal(ctx->plex[grid], &coords)); 620 PetscCall(VecGetSize(coords, &N)); 621 PetscCall(VecGetArray(coords, &x)); 622 /* scale by domain size */ 623 for (PetscInt i = 0; i < N; i += 2) { 624 x[i + 0] *= ctx->radius_perp[grid]; 625 x[i + 1] *= ctx->radius_par[grid]; 626 } 627 PetscCall(VecRestoreArray(coords, &x)); 628 PetscCall(PetscObjectSetName((PetscObject)ctx->plex[grid], ctx->filename)); 629 PetscCall(PetscInfo(ctx->plex[grid], "%" PetscInt_FMT ") Read %s mesh file (%s)\n", grid, ctx->filename, str)); 630 PetscCall(DMViewFromOptions(ctx->plex[grid], NULL, str)); 631 } else { // simplex forces a sphere 632 PetscInt numCells = ctx->simplex ? 12 : 6, cell_size = ctx->simplex ? 3 : 4, j; 633 const PetscInt numVerts = 11; 634 PetscInt cellsT[][4] = { 635 {0, 1, 6, 5 }, 636 {1, 2, 7, 6 }, 637 {2, 3, 8, 7 }, 638 {3, 4, 9, 8 }, 639 {5, 6, 7, 10}, 640 {10, 7, 8, 9 } 641 }; 642 PetscInt cellsS[][3] = { 643 {0, 1, 6 }, 644 {1, 2, 6 }, 645 {6, 2, 7 }, 646 {7, 2, 8 }, 647 {8, 2, 3 }, 648 {8, 3, 4 }, 649 {0, 6, 5 }, 650 {5, 6, 7 }, 651 {5, 7, 10}, 652 {10, 7, 9 }, 653 {9, 7, 8 }, 654 {9, 8, 4 } 655 }; 656 const PetscInt *pcell = (const PetscInt *)(ctx->simplex ? &cellsS[0][0] : &cellsT[0][0]); 657 PetscReal coords[11][2], *flatCoords = &coords[0][0]; 658 PetscReal rad = ctx->radius[grid]; 659 for (j = 0; j < 5; j++) { // outside edge 660 PetscReal z, r, theta = -PETSC_PI / 2 + (j % 5) * PETSC_PI / 4; 661 r = rad * PetscCosReal(theta); 662 coords[j][0] = r; 663 z = rad * PetscSinReal(theta); 664 coords[j][1] = z; 665 } 666 coords[j][0] = 0; 667 coords[j++][1] = -rad * ctx->sphere_inner_radius_90degree[grid]; 668 coords[j][0] = rad * ctx->sphere_inner_radius_45degree[grid] * 0.707106781186548; 669 coords[j++][1] = -rad * ctx->sphere_inner_radius_45degree[grid] * 0.707106781186548; 670 coords[j][0] = rad * ctx->sphere_inner_radius_90degree[grid]; 671 coords[j++][1] = 0; 672 coords[j][0] = rad * ctx->sphere_inner_radius_45degree[grid] * 0.707106781186548; 673 coords[j++][1] = rad * ctx->sphere_inner_radius_45degree[grid] * 0.707106781186548; 674 coords[j][0] = 0; 675 coords[j++][1] = rad * ctx->sphere_inner_radius_90degree[grid]; 676 coords[j][0] = 0; 677 coords[j++][1] = 0; 678 PetscCall(DMPlexCreateFromCellListPetsc(comm_self, 2, numCells, numVerts, cell_size, ctx->interpolate, pcell, 2, flatCoords, &ctx->plex[grid])); 679 PetscCall(PetscObjectSetName((PetscObject)ctx->plex[grid], "semi-circle")); 680 PetscCall(PetscInfo(ctx->plex[grid], "\t%" PetscInt_FMT ") Make circle %s mesh\n", grid, ctx->simplex ? "simplex" : "tensor")); 681 } 682 } else { 683 PetscCheck(dim == 3 && ctx->sphere && !ctx->simplex, ctx->comm, PETSC_ERR_ARG_WRONG, "not: dim == 3 && ctx->sphere && !ctx->simplex"); 684 PetscReal rad = ctx->radius[grid], inner_rad = rad * ctx->sphere_inner_radius_90degree[grid], outer_rad = rad; 685 const PetscInt numCells = 7, cell_size = 8, numVerts = 16; 686 const PetscInt cells[][8] = { 687 {0, 3, 2, 1, 4, 5, 6, 7 }, 688 {0, 4, 5, 1, 8, 9, 13, 12}, 689 {1, 5, 6, 2, 9, 10, 14, 13}, 690 {2, 6, 7, 3, 10, 11, 15, 14}, 691 {0, 3, 7, 4, 8, 12, 15, 11}, 692 {0, 1, 2, 3, 8, 11, 10, 9 }, 693 {4, 7, 6, 5, 12, 13, 14, 15} 694 }; 695 PetscReal coords[16 /* numVerts */][3]; 696 for (PetscInt j = 0; j < 4; j++) { // inner edge, low 697 coords[j][0] = inner_rad * (j == 0 || j == 3 ? 1 : -1); 698 coords[j][1] = inner_rad * (j / 2 < 1 ? 1 : -1); 699 coords[j][2] = inner_rad * -1; 700 } 701 for (PetscInt j = 0, jj = 4; j < 4; j++, jj++) { // inner edge, hi 702 coords[jj][0] = inner_rad * (j == 0 || j == 3 ? 1 : -1); 703 coords[jj][1] = inner_rad * (j / 2 < 1 ? 1 : -1); 704 coords[jj][2] = inner_rad * 1; 705 } 706 for (PetscInt j = 0, jj = 8; j < 4; j++, jj++) { // outer edge, low 707 coords[jj][0] = outer_rad * (j == 0 || j == 3 ? 1 : -1); 708 coords[jj][1] = outer_rad * (j / 2 < 1 ? 1 : -1); 709 coords[jj][2] = outer_rad * -1; 710 } 711 for (PetscInt j = 0, jj = 12; j < 4; j++, jj++) { // outer edge, hi 712 coords[jj][0] = outer_rad * (j == 0 || j == 3 ? 1 : -1); 713 coords[jj][1] = outer_rad * (j / 2 < 1 ? 1 : -1); 714 coords[jj][2] = outer_rad * 1; 715 } 716 PetscCall(DMPlexCreateFromCellListPetsc(comm_self, 3, numCells, numVerts, cell_size, ctx->interpolate, (const PetscInt *)cells, 3, (const PetscReal *)coords, &ctx->plex[grid])); 717 PetscCall(PetscObjectSetName((PetscObject)ctx->plex[grid], "cubed sphere")); 718 PetscCall(PetscInfo(ctx->plex[grid], "\t%" PetscInt_FMT ") Make cubed sphere %s mesh\n", grid, ctx->simplex ? "simplex" : "tensor")); 719 } 720 PetscCall(DMSetOptionsPrefix(ctx->plex[grid], prefix)); 721 PetscCall(DMSetFromOptions(ctx->plex[grid])); 722 } // grid loop 723 PetscCall(DMSetOptionsPrefix(pack, prefix)); 724 { /* convert to p4est (or whatever), wait for discretization to create pack */ 725 char convType[256]; 726 PetscBool flg; 727 728 PetscOptionsBegin(ctx->comm, prefix, "Mesh conversion options", "DMPLEX"); 729 PetscCall(PetscOptionsFList("-dm_landau_type", "Convert DMPlex to another format (p4est)", "plexland.c", DMList, DMPLEX, convType, 256, &flg)); 730 PetscOptionsEnd(); 731 if (flg) { 732 ctx->use_p4est = PETSC_TRUE; /* flag for Forest */ 733 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 734 DM dmforest; 735 PetscBool isForest; 736 737 PetscCall(DMConvert(ctx->plex[grid], convType, &dmforest)); 738 PetscCheck(dmforest, ctx->comm, PETSC_ERR_PLIB, "Convert failed?"); 739 PetscCall(DMSetOptionsPrefix(dmforest, prefix)); 740 PetscCall(DMIsForest(dmforest, &isForest)); 741 PetscCheck(isForest, ctx->comm, PETSC_ERR_PLIB, "Converted to non Forest?"); 742 PetscCall(DMDestroy(&ctx->plex[grid])); 743 ctx->plex[grid] = dmforest; // Forest for adaptivity 744 } 745 } else ctx->use_p4est = PETSC_FALSE; /* flag for Forest */ 746 } 747 PetscCall(DMSetDimension(pack, dim)); 748 PetscCall(PetscObjectSetName((PetscObject)pack, "Mesh")); 749 PetscCall(DMSetApplicationContext(pack, ctx)); 750 PetscFunctionReturn(PETSC_SUCCESS); 751 } 752 753 static PetscErrorCode SetupDS(DM pack, PetscInt dim, PetscInt grid, const char prefix[], LandauCtx *ctx) 754 { 755 PetscInt ii, i0; 756 char buf[256]; 757 PetscSection section; 758 759 PetscFunctionBegin; 760 for (ii = ctx->species_offset[grid], i0 = 0; ii < ctx->species_offset[grid + 1]; ii++, i0++) { 761 if (ii == 0) PetscCall(PetscSNPrintf(buf, sizeof(buf), "e")); 762 else PetscCall(PetscSNPrintf(buf, sizeof(buf), "i%" PetscInt_FMT, ii)); 763 /* Setup Discretization - FEM */ 764 PetscCall(PetscFECreateDefault(PETSC_COMM_SELF, dim, 1, ctx->simplex, prefix, PETSC_DECIDE, &ctx->fe[ii])); 765 PetscCall(PetscObjectSetName((PetscObject)ctx->fe[ii], buf)); 766 PetscCall(DMSetField(ctx->plex[grid], i0, NULL, (PetscObject)ctx->fe[ii])); 767 } 768 PetscCall(DMCreateDS(ctx->plex[grid])); 769 PetscCall(DMGetLocalSection(ctx->plex[grid], §ion)); 770 for (PetscInt ii = ctx->species_offset[grid], i0 = 0; ii < ctx->species_offset[grid + 1]; ii++, i0++) { 771 if (ii == 0) PetscCall(PetscSNPrintf(buf, sizeof(buf), "se")); 772 else PetscCall(PetscSNPrintf(buf, sizeof(buf), "si%" PetscInt_FMT, ii)); 773 PetscCall(PetscSectionSetComponentName(section, i0, 0, buf)); 774 } 775 PetscFunctionReturn(PETSC_SUCCESS); 776 } 777 778 /* Define a Maxwellian function for testing out the operator. */ 779 780 /* Using cartesian velocity space coordinates, the particle */ 781 /* density, [1/m^3], is defined according to */ 782 783 /* $$ n=\int_{R^3} dv^3 \left(\frac{m}{2\pi T}\right)^{3/2}\exp [- mv^2/(2T)] $$ */ 784 785 /* Using some constant, c, we normalize the velocity vector into a */ 786 /* dimensionless variable according to v=c*x. Thus the density, $n$, becomes */ 787 788 /* $$ n=\int_{R^3} dx^3 \left(\frac{mc^2}{2\pi T}\right)^{3/2}\exp [- mc^2/(2T)*x^2] $$ */ 789 790 /* Defining $\theta=2T/mc^2$, we thus find that the probability density */ 791 /* for finding the particle within the interval in a box dx^3 around x is */ 792 793 /* f(x;\theta)=\left(\frac{1}{\pi\theta}\right)^{3/2} \exp [ -x^2/\theta ] */ 794 795 typedef struct { 796 PetscReal v_0; 797 PetscReal kT_m; 798 PetscReal n; 799 PetscReal shift; 800 } MaxwellianCtx; 801 802 static PetscErrorCode maxwellian(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx) 803 { 804 MaxwellianCtx *mctx = (MaxwellianCtx *)actx; 805 PetscInt i; 806 PetscReal v2 = 0, theta = 2 * mctx->kT_m / (mctx->v_0 * mctx->v_0), shift; /* theta = 2kT/mc^2 */ 807 808 PetscFunctionBegin; 809 /* compute the exponents, v^2 */ 810 for (i = 0; i < dim; ++i) v2 += x[i] * x[i]; 811 /* evaluate the Maxwellian */ 812 if (mctx->shift < 0) shift = -mctx->shift; 813 else { 814 u[0] = mctx->n * PetscPowReal(PETSC_PI * theta, -1.5) * (PetscExpReal(-v2 / theta)); 815 shift = mctx->shift; 816 } 817 if (shift != 0.) { 818 v2 = 0; 819 for (i = 0; i < dim - 1; ++i) v2 += x[i] * x[i]; 820 v2 += (x[dim - 1] - shift) * (x[dim - 1] - shift); 821 /* evaluate the shifted Maxwellian */ 822 u[0] += mctx->n * PetscPowReal(PETSC_PI * theta, -1.5) * (PetscExpReal(-v2 / theta)); 823 } 824 PetscFunctionReturn(PETSC_SUCCESS); 825 } 826 827 /*@ 828 DMPlexLandauAddMaxwellians - Add a Maxwellian distribution to a state 829 830 Collective 831 832 Input Parameters: 833 + dm - The mesh (local) 834 . time - Current time 835 . temps - Temperatures of each species (global) 836 . ns - Number density of each species (global) 837 . grid - index into current grid - just used for offset into `temp` and `ns` 838 . b_id - batch index 839 . n_batch - number of batches 840 - actx - Landau context 841 842 Output Parameter: 843 . X - The state (local to this grid) 844 845 Level: beginner 846 847 .seealso: `DMPlexLandauCreateVelocitySpace()` 848 @*/ 849 PetscErrorCode DMPlexLandauAddMaxwellians(DM dm, Vec X, PetscReal time, PetscReal temps[], PetscReal ns[], PetscInt grid, PetscInt b_id, PetscInt n_batch, void *actx) 850 { 851 LandauCtx *ctx = (LandauCtx *)actx; 852 PetscErrorCode (*initu[LANDAU_MAX_SPECIES])(PetscInt, PetscReal, const PetscReal[], PetscInt, PetscScalar[], void *); 853 PetscInt dim; 854 MaxwellianCtx *mctxs[LANDAU_MAX_SPECIES], data[LANDAU_MAX_SPECIES]; 855 856 PetscFunctionBegin; 857 PetscCall(DMGetDimension(dm, &dim)); 858 if (!ctx) PetscCall(DMGetApplicationContext(dm, &ctx)); 859 for (PetscInt ii = ctx->species_offset[grid], i0 = 0; ii < ctx->species_offset[grid + 1]; ii++, i0++) { 860 mctxs[i0] = &data[i0]; 861 data[i0].v_0 = ctx->v_0; // v_0 same for all grids 862 data[i0].kT_m = ctx->k * temps[ii] / ctx->masses[ii]; /* kT/m */ 863 data[i0].n = ns[ii]; 864 initu[i0] = maxwellian; 865 data[i0].shift = 0; 866 } 867 data[0].shift = ctx->electronShift; 868 /* need to make ADD_ALL_VALUES work - TODO */ 869 PetscCall(DMProjectFunction(dm, time, initu, (void **)mctxs, INSERT_ALL_VALUES, X)); 870 PetscFunctionReturn(PETSC_SUCCESS); 871 } 872 873 /* 874 LandauSetInitialCondition - Adds Maxwellians with context 875 876 Collective 877 878 Input Parameters: 879 . dm - The mesh 880 - grid - index into current grid - just used for offset into temp and ns 881 . b_id - batch index 882 - n_batch - number of batches 883 + actx - Landau context with T and n 884 885 Output Parameter: 886 . X - The state 887 888 Level: beginner 889 890 .seealso: `DMPlexLandauCreateVelocitySpace()`, `DMPlexLandauAddMaxwellians()` 891 */ 892 static PetscErrorCode LandauSetInitialCondition(DM dm, Vec X, PetscInt grid, PetscInt b_id, PetscInt n_batch, void *actx) 893 { 894 LandauCtx *ctx = (LandauCtx *)actx; 895 896 PetscFunctionBegin; 897 if (!ctx) PetscCall(DMGetApplicationContext(dm, &ctx)); 898 PetscCall(VecZeroEntries(X)); 899 PetscCall(DMPlexLandauAddMaxwellians(dm, X, 0.0, ctx->thermal_temps, ctx->n, grid, b_id, n_batch, ctx)); 900 PetscFunctionReturn(PETSC_SUCCESS); 901 } 902 903 // adapt a level once. Forest in/out 904 #if defined(PETSC_USE_INFO) 905 static const char *s_refine_names[] = {"RE", "Z1", "Origin", "Z2", "Uniform"}; 906 #endif 907 static PetscErrorCode adaptToleranceFEM(PetscFE fem, Vec sol, PetscInt type, PetscInt grid, LandauCtx *ctx, DM *newForest) 908 { 909 DM forest, plex, adaptedDM = NULL; 910 PetscDS prob; 911 PetscBool isForest; 912 PetscQuadrature quad; 913 PetscInt Nq, Nb, *Nb2, cStart, cEnd, c, dim, qj, k; 914 DMLabel adaptLabel = NULL; 915 916 PetscFunctionBegin; 917 forest = ctx->plex[grid]; 918 PetscCall(DMCreateDS(forest)); 919 PetscCall(DMGetDS(forest, &prob)); 920 PetscCall(DMGetDimension(forest, &dim)); 921 PetscCall(DMIsForest(forest, &isForest)); 922 PetscCheck(isForest, ctx->comm, PETSC_ERR_ARG_WRONG, "! Forest"); 923 PetscCall(DMConvert(forest, DMPLEX, &plex)); 924 PetscCall(DMPlexGetHeightStratum(plex, 0, &cStart, &cEnd)); 925 PetscCall(DMLabelCreate(PETSC_COMM_SELF, "adapt", &adaptLabel)); 926 PetscCall(PetscFEGetQuadrature(fem, &quad)); 927 PetscCall(PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, NULL)); 928 PetscCheck(Nq <= LANDAU_MAX_NQND, ctx->comm, PETSC_ERR_ARG_WRONG, "Order too high. Nq = %" PetscInt_FMT " > LANDAU_MAX_NQND (%d)", Nq, LANDAU_MAX_NQND); 929 PetscCall(PetscFEGetDimension(ctx->fe[0], &Nb)); 930 PetscCall(PetscDSGetDimensions(prob, &Nb2)); 931 PetscCheck(Nb2[0] == Nb, ctx->comm, PETSC_ERR_ARG_WRONG, " Nb = %" PetscInt_FMT " != Nb (%" PetscInt_FMT ")", Nb, Nb2[0]); 932 PetscCheck(Nb <= LANDAU_MAX_NQND, ctx->comm, PETSC_ERR_ARG_WRONG, "Order too high. Nb = %" PetscInt_FMT " > LANDAU_MAX_NQND (%d)", Nb, LANDAU_MAX_NQND); 933 PetscCall(PetscInfo(sol, "%" PetscInt_FMT ") Refine phase: %s\n", grid, s_refine_names[type])); 934 if (type == 4) { 935 for (c = cStart; c < cEnd; c++) PetscCall(DMLabelSetValue(adaptLabel, c, DM_ADAPT_REFINE)); 936 } else if (type == 2) { 937 PetscInt rCellIdx[8], nr = 0, nrmax = (dim == 3) ? 8 : 2; 938 PetscReal minRad = PETSC_INFINITY, r; 939 for (c = cStart; c < cEnd; c++) { 940 PetscReal tt, v0[LANDAU_MAX_NQND * 3], J[LANDAU_MAX_NQND * 9], invJ[LANDAU_MAX_NQND * 9], detJ[LANDAU_MAX_NQND]; 941 PetscCall(DMPlexComputeCellGeometryFEM(plex, c, quad, v0, J, invJ, detJ)); 942 (void)J; 943 (void)invJ; 944 for (qj = 0; qj < Nq; ++qj) { 945 tt = PetscSqr(v0[dim * qj + 0]) + PetscSqr(v0[dim * qj + 1]) + PetscSqr((dim == 3) ? v0[dim * qj + 2] : 0); 946 r = PetscSqrtReal(tt); 947 if (r < minRad - PETSC_SQRT_MACHINE_EPSILON * 10.) { 948 minRad = r; 949 nr = 0; 950 rCellIdx[nr++] = c; 951 PetscCall(PetscInfo(sol, "\t\t%" PetscInt_FMT ") Found first inner r=%e, cell %" PetscInt_FMT ", qp %" PetscInt_FMT "/%" PetscInt_FMT "\n", grid, (double)r, c, qj + 1, Nq)); 952 } else if ((r - minRad) < PETSC_SQRT_MACHINE_EPSILON * 100. && nr < nrmax) { 953 for (k = 0; k < nr; k++) 954 if (c == rCellIdx[k]) break; 955 if (k == nr) { 956 rCellIdx[nr++] = c; 957 PetscCall(PetscInfo(sol, "\t\t\t%" PetscInt_FMT ") Found another inner r=%e, cell %" PetscInt_FMT ", qp %" PetscInt_FMT "/%" PetscInt_FMT ", d=%e\n", grid, (double)r, c, qj + 1, Nq, (double)(r - minRad))); 958 } 959 } 960 } 961 } 962 for (k = 0; k < nr; k++) PetscCall(DMLabelSetValue(adaptLabel, rCellIdx[k], DM_ADAPT_REFINE)); 963 PetscCall(PetscInfo(sol, "\t\t\t%" PetscInt_FMT ") Refined %" PetscInt_FMT " origin cells %" PetscInt_FMT ",%" PetscInt_FMT " r=%g\n", grid, nr, rCellIdx[0], rCellIdx[1], (double)minRad)); 964 } else if (type == 0 || type == 1 || type == 3) { /* refine along r=0 axis */ 965 PetscScalar *coef = NULL; 966 Vec coords; 967 PetscInt csize, Nv, d, nz, nrefined = 0; 968 DM cdm; 969 PetscSection cs; 970 PetscCall(DMGetCoordinatesLocal(forest, &coords)); 971 PetscCall(DMGetCoordinateDM(forest, &cdm)); 972 PetscCall(DMGetLocalSection(cdm, &cs)); 973 for (c = cStart; c < cEnd; c++) { 974 PetscInt doit = 0, outside = 0; 975 PetscCall(DMPlexVecGetClosure(cdm, cs, coords, c, &csize, &coef)); 976 Nv = csize / dim; 977 for (nz = d = 0; d < Nv; d++) { 978 PetscReal z = PetscRealPart(coef[d * dim + (dim - 1)]), x = PetscSqr(PetscRealPart(coef[d * dim + 0])) + ((dim == 3) ? PetscSqr(PetscRealPart(coef[d * dim + 1])) : 0); 979 x = PetscSqrtReal(x); 980 if (type == 0) { 981 if (ctx->re_radius > PETSC_SQRT_MACHINE_EPSILON && (z < -PETSC_MACHINE_EPSILON * 10. || z > ctx->re_radius + PETSC_MACHINE_EPSILON * 10.)) outside++; /* first pass don't refine bottom */ 982 } else if (type == 1 && (z > ctx->vperp0_radius1 || z < -ctx->vperp0_radius1)) { 983 outside++; /* don't refine outside electron refine radius */ 984 PetscCall(PetscInfo(sol, "\t%" PetscInt_FMT ") (debug) found %s cells\n", grid, s_refine_names[type])); 985 } else if (type == 3 && (z > ctx->vperp0_radius2 || z < -ctx->vperp0_radius2)) { 986 outside++; /* refine r=0 cells on refinement front */ 987 PetscCall(PetscInfo(sol, "\t%" PetscInt_FMT ") (debug) found %s cells\n", grid, s_refine_names[type])); 988 } 989 if (x < PETSC_MACHINE_EPSILON * 10. && (type != 0 || ctx->re_radius > PETSC_SQRT_MACHINE_EPSILON)) nz++; 990 } 991 PetscCall(DMPlexVecRestoreClosure(cdm, cs, coords, c, &csize, &coef)); 992 if (doit || (outside < Nv && nz)) { 993 PetscCall(DMLabelSetValue(adaptLabel, c, DM_ADAPT_REFINE)); 994 nrefined++; 995 } 996 } 997 PetscCall(PetscInfo(sol, "\t%" PetscInt_FMT ") Refined %" PetscInt_FMT " cells\n", grid, nrefined)); 998 } 999 PetscCall(DMDestroy(&plex)); 1000 PetscCall(DMAdaptLabel(forest, adaptLabel, &adaptedDM)); 1001 PetscCall(DMLabelDestroy(&adaptLabel)); 1002 *newForest = adaptedDM; 1003 if (adaptedDM) { 1004 if (isForest) PetscCall(DMForestSetAdaptivityForest(adaptedDM, NULL)); // ???? 1005 PetscCall(DMConvert(adaptedDM, DMPLEX, &plex)); 1006 PetscCall(DMPlexGetHeightStratum(plex, 0, &cStart, &cEnd)); 1007 PetscCall(PetscInfo(sol, "\t\t\t\t%" PetscInt_FMT ") %" PetscInt_FMT " cells, %" PetscInt_FMT " total quadrature points\n", grid, cEnd - cStart, Nq * (cEnd - cStart))); 1008 PetscCall(DMDestroy(&plex)); 1009 } else *newForest = NULL; 1010 PetscFunctionReturn(PETSC_SUCCESS); 1011 } 1012 1013 // forest goes in (ctx->plex[grid]), plex comes out 1014 static PetscErrorCode adapt(PetscInt grid, LandauCtx *ctx, Vec *uu) 1015 { 1016 PetscInt adaptIter; 1017 1018 PetscFunctionBegin; 1019 PetscInt type, limits[5] = {(grid == 0) ? ctx->numRERefine : 0, (grid == 0) ? ctx->nZRefine1 : 0, ctx->numAMRRefine[grid], (grid == 0) ? ctx->nZRefine2 : 0, ctx->postAMRRefine[grid]}; 1020 for (type = 0; type < 5; type++) { 1021 for (adaptIter = 0; adaptIter < limits[type]; adaptIter++) { 1022 DM newForest = NULL; 1023 PetscCall(adaptToleranceFEM(ctx->fe[0], *uu, type, grid, ctx, &newForest)); 1024 if (newForest) { 1025 PetscCall(DMDestroy(&ctx->plex[grid])); 1026 PetscCall(VecDestroy(uu)); 1027 PetscCall(DMCreateGlobalVector(newForest, uu)); 1028 PetscCall(LandauSetInitialCondition(newForest, *uu, grid, 0, 1, ctx)); 1029 ctx->plex[grid] = newForest; 1030 } else { 1031 PetscCall(PetscInfo(*uu, "No refinement\n")); 1032 } 1033 } 1034 } 1035 PetscCall(PetscObjectSetName((PetscObject)*uu, "uAMR")); 1036 PetscFunctionReturn(PETSC_SUCCESS); 1037 } 1038 1039 // make log(Lambdas) from NRL Plasma formulary 1040 static PetscErrorCode makeLambdas(LandauCtx *ctx) 1041 { 1042 PetscFunctionBegin; 1043 for (PetscInt gridi = 0; gridi < ctx->num_grids; gridi++) { 1044 PetscInt iii = ctx->species_offset[gridi]; 1045 PetscReal Ti_ev = (ctx->thermal_temps[iii] / 1.1604525e7) * 1000; // convert (back) to eV 1046 PetscReal ni = ctx->n[iii] * ctx->n_0; 1047 for (PetscInt gridj = gridi; gridj < ctx->num_grids; gridj++) { 1048 PetscInt jjj = ctx->species_offset[gridj]; 1049 PetscReal Zj = ctx->charges[jjj] / 1.6022e-19; 1050 if (gridi == 0) { 1051 if (gridj == 0) { // lam_ee 1052 ctx->lambdas[gridi][gridj] = 23.5 - PetscLogReal(PetscSqrtReal(ni) * PetscPowReal(Ti_ev, -1.25)) - PetscSqrtReal(1e-5 + PetscSqr(PetscLogReal(Ti_ev) - 2) / 16); 1053 } else { // lam_ei == lam_ie 1054 if (10 * Zj * Zj > Ti_ev) { 1055 ctx->lambdas[gridi][gridj] = ctx->lambdas[gridj][gridi] = 23 - PetscLogReal(PetscSqrtReal(ni) * Zj * PetscPowReal(Ti_ev, -1.5)); 1056 } else { 1057 ctx->lambdas[gridi][gridj] = ctx->lambdas[gridj][gridi] = 24 - PetscLogReal(PetscSqrtReal(ni) / Ti_ev); 1058 } 1059 } 1060 } else { // lam_ii' 1061 PetscReal mui = ctx->masses[iii] / 1.6720e-27, Zi = ctx->charges[iii] / 1.6022e-19; 1062 PetscReal Tj_ev = (ctx->thermal_temps[jjj] / 1.1604525e7) * 1000; // convert (back) to eV 1063 PetscReal muj = ctx->masses[jjj] / 1.6720e-27; 1064 PetscReal nj = ctx->n[jjj] * ctx->n_0; 1065 ctx->lambdas[gridi][gridj] = ctx->lambdas[gridj][gridi] = 23 - PetscLogReal(Zi * Zj * (mui + muj) / (mui * Tj_ev + muj * Ti_ev) * PetscSqrtReal(ni * Zi * Zi / Ti_ev + nj * Zj * Zj / Tj_ev)); 1066 } 1067 } 1068 } 1069 //PetscReal v0 = PetscSqrtReal(ctx->k * ctx->thermal_temps[iii] / ctx->masses[iii]); /* arbitrary units for non-dimensionalization: plasma formulary def */ 1070 PetscFunctionReturn(PETSC_SUCCESS); 1071 } 1072 1073 static PetscErrorCode ProcessOptions(LandauCtx *ctx, const char prefix[]) 1074 { 1075 PetscBool flg, fileflg; 1076 PetscInt ii, nt, nm, nc, num_species_grid[LANDAU_MAX_GRIDS], non_dim_grid; 1077 PetscReal lnLam = 10; 1078 DM dummy; 1079 1080 PetscFunctionBegin; 1081 PetscCall(DMCreate(ctx->comm, &dummy)); 1082 /* get options - initialize context */ 1083 ctx->verbose = 1; // should be 0 for silent compliance 1084 ctx->batch_sz = 1; 1085 ctx->batch_view_idx = 0; 1086 ctx->interpolate = PETSC_TRUE; 1087 ctx->gpu_assembly = PETSC_TRUE; 1088 ctx->norm_state = 0; 1089 ctx->electronShift = 0; 1090 ctx->M = NULL; 1091 ctx->J = NULL; 1092 /* geometry and grids */ 1093 ctx->sphere = PETSC_FALSE; 1094 ctx->use_p4est = PETSC_FALSE; 1095 ctx->simplex = PETSC_FALSE; 1096 for (PetscInt grid = 0; grid < LANDAU_MAX_GRIDS; grid++) { 1097 ctx->radius[grid] = 5.; /* thermal radius (velocity) */ 1098 ctx->radius_perp[grid] = 5.; /* thermal radius (velocity) */ 1099 ctx->radius_par[grid] = 5.; /* thermal radius (velocity) */ 1100 ctx->numAMRRefine[grid] = 0; 1101 ctx->postAMRRefine[grid] = 0; 1102 ctx->species_offset[grid + 1] = 1; // one species default 1103 num_species_grid[grid] = 0; 1104 ctx->plex[grid] = NULL; /* cache as expensive to Convert */ 1105 } 1106 ctx->species_offset[0] = 0; 1107 ctx->re_radius = 0.; 1108 ctx->vperp0_radius1 = 0; 1109 ctx->vperp0_radius2 = 0; 1110 ctx->nZRefine1 = 0; 1111 ctx->nZRefine2 = 0; 1112 ctx->numRERefine = 0; 1113 num_species_grid[0] = 1; // one species default 1114 /* species - [0] electrons, [1] one ion species eg, duetarium, [2] heavy impurity ion, ... */ 1115 ctx->charges[0] = -1; /* electron charge (MKS) */ 1116 ctx->masses[0] = 1 / 1835.469965278441013; /* temporary value in proton mass */ 1117 ctx->n[0] = 1; 1118 ctx->v_0 = 1; /* thermal velocity, we could start with a scale != 1 */ 1119 ctx->thermal_temps[0] = 1; 1120 /* constants, etc. */ 1121 ctx->epsilon0 = 8.8542e-12; /* permittivity of free space (MKS) F/m */ 1122 ctx->k = 1.38064852e-23; /* Boltzmann constant (MKS) J/K */ 1123 ctx->n_0 = 1.e20; /* typical plasma n, but could set it to 1 */ 1124 ctx->Ez = 0; 1125 for (PetscInt grid = 0; grid < LANDAU_NUM_TIMERS; grid++) ctx->times[grid] = 0; 1126 for (PetscInt ii = 0; ii < LANDAU_DIM; ii++) ctx->cells0[ii] = 2; 1127 if (LANDAU_DIM == 2) ctx->cells0[0] = 1; 1128 ctx->use_matrix_mass = PETSC_FALSE; 1129 ctx->use_relativistic_corrections = PETSC_FALSE; 1130 ctx->use_energy_tensor_trick = PETSC_FALSE; /* Use Eero's trick for energy conservation v --> grad(v^2/2) */ 1131 ctx->SData_d.w = NULL; 1132 ctx->SData_d.x = NULL; 1133 ctx->SData_d.y = NULL; 1134 ctx->SData_d.z = NULL; 1135 ctx->SData_d.invJ = NULL; 1136 ctx->jacobian_field_major_order = PETSC_FALSE; 1137 ctx->SData_d.coo_elem_offsets = NULL; 1138 ctx->SData_d.coo_elem_point_offsets = NULL; 1139 ctx->SData_d.coo_elem_fullNb = NULL; 1140 ctx->SData_d.coo_size = 0; 1141 PetscOptionsBegin(ctx->comm, prefix, "Options for Fokker-Plank-Landau collision operator", "none"); 1142 #if defined(PETSC_HAVE_KOKKOS) 1143 ctx->deviceType = LANDAU_KOKKOS; 1144 PetscCall(PetscStrncpy(ctx->filename, "kokkos", sizeof(ctx->filename))); 1145 #else 1146 ctx->deviceType = LANDAU_CPU; 1147 PetscCall(PetscStrncpy(ctx->filename, "cpu", sizeof(ctx->filename))); 1148 #endif 1149 PetscCall(PetscOptionsString("-dm_landau_device_type", "Use kernels on 'cpu' 'kokkos'", "plexland.c", ctx->filename, ctx->filename, sizeof(ctx->filename), NULL)); 1150 PetscCall(PetscStrcmp("cpu", ctx->filename, &flg)); 1151 if (flg) { 1152 ctx->deviceType = LANDAU_CPU; 1153 } else { 1154 PetscCall(PetscStrcmp("kokkos", ctx->filename, &flg)); 1155 PetscCheck(flg, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_device_type %s", ctx->filename); 1156 ctx->deviceType = LANDAU_KOKKOS; 1157 } 1158 ctx->filename[0] = '\0'; 1159 PetscCall(PetscOptionsString("-dm_landau_filename", "file to read mesh from", "plexland.c", ctx->filename, ctx->filename, sizeof(ctx->filename), &fileflg)); 1160 PetscCall(PetscOptionsReal("-dm_landau_electron_shift", "Shift in thermal velocity of electrons", "none", ctx->electronShift, &ctx->electronShift, NULL)); 1161 PetscCall(PetscOptionsInt("-dm_landau_verbose", "Level of verbosity output", "plexland.c", ctx->verbose, &ctx->verbose, NULL)); 1162 PetscCall(PetscOptionsInt("-dm_landau_batch_size", "Number of 'vertices' to batch", "ex2.c", ctx->batch_sz, &ctx->batch_sz, NULL)); 1163 PetscCheck(LANDAU_MAX_BATCH_SZ >= ctx->batch_sz, ctx->comm, PETSC_ERR_ARG_WRONG, "LANDAU_MAX_BATCH_SZ %d < ctx->batch_sz %" PetscInt_FMT, LANDAU_MAX_BATCH_SZ, ctx->batch_sz); 1164 PetscCall(PetscOptionsInt("-dm_landau_batch_view_idx", "Index of batch for diagnostics like plotting", "ex2.c", ctx->batch_view_idx, &ctx->batch_view_idx, NULL)); 1165 PetscCheck(ctx->batch_view_idx < ctx->batch_sz, ctx->comm, PETSC_ERR_ARG_WRONG, "-ctx->batch_view_idx %" PetscInt_FMT " > ctx->batch_sz %" PetscInt_FMT, ctx->batch_view_idx, ctx->batch_sz); 1166 PetscCall(PetscOptionsReal("-dm_landau_Ez", "Initial parallel electric field in unites of Conner-Hastie critical field", "plexland.c", ctx->Ez, &ctx->Ez, NULL)); 1167 PetscCall(PetscOptionsReal("-dm_landau_n_0", "Normalization constant for number density", "plexland.c", ctx->n_0, &ctx->n_0, NULL)); 1168 PetscCall(PetscOptionsBool("-dm_landau_use_mataxpy_mass", "Use fast but slightly fragile MATAXPY to add mass term", "plexland.c", ctx->use_matrix_mass, &ctx->use_matrix_mass, NULL)); 1169 PetscCall(PetscOptionsBool("-dm_landau_use_relativistic_corrections", "Use relativistic corrections", "plexland.c", ctx->use_relativistic_corrections, &ctx->use_relativistic_corrections, NULL)); 1170 PetscCall(PetscOptionsBool("-dm_landau_simplex", "Use simplex elements", "plexland.c", ctx->simplex, &ctx->simplex, NULL)); 1171 PetscCall(PetscOptionsBool("-dm_landau_sphere", "use sphere/semi-circle domain instead of rectangle", "plexland.c", ctx->sphere, &ctx->sphere, NULL)); 1172 if (LANDAU_DIM == 2 && ctx->use_relativistic_corrections) ctx->use_relativistic_corrections = PETSC_FALSE; // should warn 1173 PetscCall(PetscOptionsBool("-dm_landau_use_energy_tensor_trick", "Use Eero's trick of using grad(v^2/2) instead of v as args to Landau tensor to conserve energy with relativistic corrections and Q1 elements", "plexland.c", ctx->use_energy_tensor_trick, 1174 &ctx->use_energy_tensor_trick, NULL)); 1175 1176 /* get num species with temperature, set defaults */ 1177 for (ii = 1; ii < LANDAU_MAX_SPECIES; ii++) { 1178 ctx->thermal_temps[ii] = 1; 1179 ctx->charges[ii] = 1; 1180 ctx->masses[ii] = 1; 1181 ctx->n[ii] = 1; 1182 } 1183 nt = LANDAU_MAX_SPECIES; 1184 PetscCall(PetscOptionsRealArray("-dm_landau_thermal_temps", "Temperature of each species [e,i_0,i_1,...] in keV (must be set to set number of species)", "plexland.c", ctx->thermal_temps, &nt, &flg)); 1185 PetscCheck(flg, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_thermal_temps ,t1,t2,.. must be provided to set the number of species"); 1186 PetscCall(PetscInfo(dummy, "num_species set to number of thermal temps provided (%" PetscInt_FMT ")\n", nt)); 1187 ctx->num_species = nt; 1188 for (ii = 0; ii < ctx->num_species; ii++) ctx->thermal_temps[ii] *= 1.1604525e7; /* convert to Kelvin */ 1189 nm = LANDAU_MAX_SPECIES - 1; 1190 PetscCall(PetscOptionsRealArray("-dm_landau_ion_masses", "Mass of each species in units of proton mass [i_0=2,i_1=40...]", "plexland.c", &ctx->masses[1], &nm, &flg)); 1191 PetscCheck(!flg || nm == ctx->num_species - 1, ctx->comm, PETSC_ERR_ARG_WRONG, "num ion masses %" PetscInt_FMT " != num species %" PetscInt_FMT, nm, ctx->num_species - 1); 1192 nm = LANDAU_MAX_SPECIES; 1193 PetscCall(PetscOptionsRealArray("-dm_landau_n", "Number density of each species = n_s * n_0", "plexland.c", ctx->n, &nm, &flg)); 1194 PetscCheck(!flg || nm == ctx->num_species, ctx->comm, PETSC_ERR_ARG_WRONG, "wrong num n: %" PetscInt_FMT " != num species %" PetscInt_FMT, nm, ctx->num_species); 1195 for (ii = 0; ii < LANDAU_MAX_SPECIES; ii++) ctx->masses[ii] *= 1.6720e-27; /* scale by proton mass kg */ 1196 ctx->masses[0] = 9.10938356e-31; /* electron mass kg (should be about right already) */ 1197 nc = LANDAU_MAX_SPECIES - 1; 1198 PetscCall(PetscOptionsRealArray("-dm_landau_ion_charges", "Charge of each species in units of proton charge [i_0=2,i_1=18,...]", "plexland.c", &ctx->charges[1], &nc, &flg)); 1199 if (flg) PetscCheck(nc == ctx->num_species - 1, ctx->comm, PETSC_ERR_ARG_WRONG, "num charges %" PetscInt_FMT " != num species %" PetscInt_FMT, nc, ctx->num_species - 1); 1200 for (ii = 0; ii < LANDAU_MAX_SPECIES; ii++) ctx->charges[ii] *= 1.6022e-19; /* electron/proton charge (MKS) */ 1201 /* geometry and grids */ 1202 nt = LANDAU_MAX_GRIDS; 1203 PetscCall(PetscOptionsIntArray("-dm_landau_num_species_grid", "Number of species on each grid: [ 1, ....] or [S, 0 ....] for single grid", "plexland.c", num_species_grid, &nt, &flg)); 1204 if (flg) { 1205 ctx->num_grids = nt; 1206 for (ii = nt = 0; ii < ctx->num_grids; ii++) nt += num_species_grid[ii]; 1207 PetscCheck(ctx->num_species == nt, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_num_species_grid: sum %" PetscInt_FMT " != num_species = %" PetscInt_FMT ". %" PetscInt_FMT " grids (check that number of grids <= LANDAU_MAX_GRIDS = %d)", nt, ctx->num_species, 1208 ctx->num_grids, LANDAU_MAX_GRIDS); 1209 } else { 1210 if (ctx->num_species > LANDAU_MAX_GRIDS) { 1211 num_species_grid[0] = 1; 1212 num_species_grid[1] = ctx->num_species - 1; 1213 ctx->num_grids = 2; 1214 } else { 1215 ctx->num_grids = ctx->num_species; 1216 for (ii = 0; ii < ctx->num_grids; ii++) num_species_grid[ii] = 1; 1217 } 1218 } 1219 for (ctx->species_offset[0] = ii = 0; ii < ctx->num_grids; ii++) ctx->species_offset[ii + 1] = ctx->species_offset[ii] + num_species_grid[ii]; 1220 PetscCheck(ctx->species_offset[ctx->num_grids] == ctx->num_species, ctx->comm, PETSC_ERR_ARG_WRONG, "ctx->species_offset[ctx->num_grids] %" PetscInt_FMT " != ctx->num_species = %" PetscInt_FMT " ???????????", ctx->species_offset[ctx->num_grids], 1221 ctx->num_species); 1222 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 1223 PetscInt iii = ctx->species_offset[grid]; // normalize with first (arbitrary) species on grid 1224 ctx->thermal_speed[grid] = PetscSqrtReal(ctx->k * ctx->thermal_temps[iii] / ctx->masses[iii]); /* arbitrary units for non-dimensionalization: plasma formulary def */ 1225 } 1226 // get lambdas here because we need them for t_0 etc 1227 PetscCall(PetscOptionsReal("-dm_landau_ln_lambda", "Universal cross section parameter. Default uses NRL formulas", "plexland.c", lnLam, &lnLam, &flg)); 1228 if (flg) { 1229 for (PetscInt grid = 0; grid < LANDAU_MAX_GRIDS; grid++) { 1230 for (PetscInt gridj = 0; gridj < LANDAU_MAX_GRIDS; gridj++) ctx->lambdas[gridj][grid] = lnLam; /* cross section ratio large - small angle collisions */ 1231 } 1232 } else { 1233 PetscCall(makeLambdas(ctx)); 1234 } 1235 non_dim_grid = 0; 1236 PetscCall(PetscOptionsInt("-dm_landau_normalization_grid", "Index of grid to use for setting v_0, m_0, t_0. (Not recommended)", "plexland.c", non_dim_grid, &non_dim_grid, &flg)); 1237 if (non_dim_grid != 0) PetscCall(PetscInfo(dummy, "Normalization grid set to %" PetscInt_FMT ", but non-default not well verified\n", non_dim_grid)); 1238 PetscCheck(non_dim_grid >= 0 && non_dim_grid < ctx->num_species, ctx->comm, PETSC_ERR_ARG_WRONG, "Normalization grid wrong: %" PetscInt_FMT, non_dim_grid); 1239 ctx->v_0 = ctx->thermal_speed[non_dim_grid]; /* arbitrary units for non dimensionalization: global mean velocity in 1D of electrons */ 1240 ctx->m_0 = ctx->masses[non_dim_grid]; /* arbitrary reference mass, electrons */ 1241 ctx->t_0 = 8 * PETSC_PI * PetscSqr(ctx->epsilon0 * ctx->m_0 / PetscSqr(ctx->charges[non_dim_grid])) / ctx->lambdas[non_dim_grid][non_dim_grid] / ctx->n_0 * PetscPowReal(ctx->v_0, 3); /* note, this t_0 makes nu[non_dim_grid,non_dim_grid]=1 */ 1242 /* domain */ 1243 nt = LANDAU_MAX_GRIDS; 1244 PetscCall(PetscOptionsRealArray("-dm_landau_domain_radius", "Phase space size in units of thermal velocity of grid", "plexland.c", ctx->radius, &nt, &flg)); 1245 if (flg) { 1246 PetscCheck(nt >= ctx->num_grids, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_domain_radius: given %" PetscInt_FMT " radius != number grids %" PetscInt_FMT, nt, ctx->num_grids); 1247 while (nt--) ctx->radius_par[nt] = ctx->radius_perp[nt] = ctx->radius[nt]; 1248 } else { 1249 nt = LANDAU_MAX_GRIDS; 1250 PetscCall(PetscOptionsRealArray("-dm_landau_domain_max_par", "Parallel velocity domain size in units of thermal velocity of grid", "plexland.c", ctx->radius_par, &nt, &flg)); 1251 if (flg) PetscCheck(nt >= ctx->num_grids, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_domain_max_par: given %" PetscInt_FMT " radius != number grids %" PetscInt_FMT, nt, ctx->num_grids); 1252 PetscCall(PetscOptionsRealArray("-dm_landau_domain_max_perp", "Perpendicular velocity domain size in units of thermal velocity of grid", "plexland.c", ctx->radius_perp, &nt, &flg)); 1253 if (flg) PetscCheck(nt >= ctx->num_grids, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_domain_max_perp: given %" PetscInt_FMT " radius != number grids %" PetscInt_FMT, nt, ctx->num_grids); 1254 } 1255 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 1256 if (flg && ctx->radius[grid] <= 0) { /* negative is ratio of c - need to set par and perp with this -- todo */ 1257 if (ctx->radius[grid] == 0) ctx->radius[grid] = 0.75; 1258 else ctx->radius[grid] = -ctx->radius[grid]; 1259 ctx->radius[grid] = ctx->radius[grid] * SPEED_OF_LIGHT / ctx->v_0; // use any species on grid to normalize (v_0 same for all on grid) 1260 PetscCall(PetscInfo(dummy, "Change domain radius to %g for grid %" PetscInt_FMT "\n", (double)ctx->radius[grid], grid)); 1261 } 1262 ctx->radius[grid] *= ctx->thermal_speed[grid] / ctx->v_0; // scale domain by thermal radius relative to v_0 1263 ctx->radius_perp[grid] *= ctx->thermal_speed[grid] / ctx->v_0; // scale domain by thermal radius relative to v_0 1264 ctx->radius_par[grid] *= ctx->thermal_speed[grid] / ctx->v_0; // scale domain by thermal radius relative to v_0 1265 } 1266 /* amr parameters */ 1267 if (!fileflg) { 1268 nt = LANDAU_MAX_GRIDS; 1269 PetscCall(PetscOptionsIntArray("-dm_landau_amr_levels_max", "Number of AMR levels of refinement around origin, after (RE) refinements along z", "plexland.c", ctx->numAMRRefine, &nt, &flg)); 1270 PetscCheck(!flg || nt >= ctx->num_grids, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_amr_levels_max: given %" PetscInt_FMT " != number grids %" PetscInt_FMT, nt, ctx->num_grids); 1271 nt = LANDAU_MAX_GRIDS; 1272 PetscCall(PetscOptionsIntArray("-dm_landau_amr_post_refine", "Number of levels to uniformly refine after AMR", "plexland.c", ctx->postAMRRefine, &nt, &flg)); 1273 for (ii = 1; ii < ctx->num_grids; ii++) ctx->postAMRRefine[ii] = ctx->postAMRRefine[0]; // all grids the same now 1274 PetscCall(PetscOptionsInt("-dm_landau_amr_re_levels", "Number of levels to refine along v_perp=0, z>0", "plexland.c", ctx->numRERefine, &ctx->numRERefine, &flg)); 1275 PetscCall(PetscOptionsInt("-dm_landau_amr_z_refine_pre", "Number of levels to refine along v_perp=0 before origin refine", "plexland.c", ctx->nZRefine1, &ctx->nZRefine1, &flg)); 1276 PetscCall(PetscOptionsInt("-dm_landau_amr_z_refine_post", "Number of levels to refine along v_perp=0 after origin refine", "plexland.c", ctx->nZRefine2, &ctx->nZRefine2, &flg)); 1277 PetscCall(PetscOptionsReal("-dm_landau_re_radius", "velocity range to refine on positive (z>0) r=0 axis for runaways", "plexland.c", ctx->re_radius, &ctx->re_radius, &flg)); 1278 PetscCall(PetscOptionsReal("-dm_landau_z_radius_pre", "velocity range to refine r=0 axis (for electrons)", "plexland.c", ctx->vperp0_radius1, &ctx->vperp0_radius1, &flg)); 1279 PetscCall(PetscOptionsReal("-dm_landau_z_radius_post", "velocity range to refine r=0 axis (for electrons) after origin AMR", "plexland.c", ctx->vperp0_radius2, &ctx->vperp0_radius2, &flg)); 1280 /* spherical domain */ 1281 if (ctx->sphere || ctx->simplex) { 1282 ctx->sphere_uniform_normal = PETSC_FALSE; 1283 PetscCall(PetscOptionsBool("-dm_landau_sphere_uniform_normal", "Scaling of circle radius to get uniform particles per cell with Maxwellians (not used)", "plexland.c", ctx->sphere_uniform_normal, &ctx->sphere_uniform_normal, NULL)); 1284 if (!ctx->sphere_uniform_normal) { // true 1285 nt = LANDAU_MAX_GRIDS; 1286 PetscCall(PetscOptionsRealArray("-dm_landau_sphere_inner_radius_90degree_scale", "Scaling of radius for inner circle on 90 degree grid", "plexland.c", ctx->sphere_inner_radius_90degree, &nt, &flg)); 1287 if (flg && nt < ctx->num_grids) { 1288 for (PetscInt grid = nt; grid < ctx->num_grids; grid++) ctx->sphere_inner_radius_90degree[grid] = ctx->sphere_inner_radius_90degree[0]; 1289 } else if (!flg || nt == 0) { 1290 if (ctx->sphere && !ctx->simplex && LANDAU_DIM == 3) { 1291 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) ctx->sphere_inner_radius_90degree[grid] = 0.35; // optimized for R=6, Q4, AMR=0, 0 refinement 1292 } else { 1293 if (LANDAU_DIM == 2) { 1294 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) ctx->sphere_inner_radius_90degree[grid] = 0.4; // optimized for R=5, Q4, AMR=0 1295 } else { 1296 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) ctx->sphere_inner_radius_90degree[grid] = 0.577 * 0.40; 1297 } 1298 } 1299 } 1300 nt = LANDAU_MAX_GRIDS; 1301 PetscCall(PetscOptionsRealArray("-dm_landau_sphere_inner_radius_45degree_scale", "Scaling of radius for inner circle on 45 degree grid", "plexland.c", ctx->sphere_inner_radius_45degree, &nt, &flg)); 1302 if (flg && nt < ctx->num_grids) { 1303 for (PetscInt grid = nt; grid < ctx->num_grids; grid++) ctx->sphere_inner_radius_45degree[grid] = ctx->sphere_inner_radius_45degree[0]; 1304 } else if (!flg || nt == 0) { 1305 if (LANDAU_DIM == 2) { 1306 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) ctx->sphere_inner_radius_45degree[grid] = 0.45; // optimized for R=5, Q4, AMR=0 1307 } else { 1308 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) ctx->sphere_inner_radius_45degree[grid] = 0.4; // 3D sphere 1309 } 1310 } 1311 if (ctx->sphere) PetscCall(PetscInfo(ctx->plex[0], "sphere : , 45 degree scaling = %g; 90 degree scaling = %g\n", (double)ctx->sphere_inner_radius_45degree[0], (double)ctx->sphere_inner_radius_90degree[0])); 1312 } else { 1313 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 1314 switch (ctx->numAMRRefine[grid]) { 1315 case 0: 1316 case 1: 1317 case 2: 1318 case 3: 1319 default: 1320 if (LANDAU_DIM == 2) { 1321 ctx->sphere_inner_radius_90degree[grid] = 0.40; 1322 ctx->sphere_inner_radius_45degree[grid] = 0.45; 1323 } else { 1324 ctx->sphere_inner_radius_45degree[grid] = 0.25; 1325 } 1326 } 1327 } 1328 } 1329 } else { 1330 nt = LANDAU_DIM; 1331 PetscCall(PetscOptionsIntArray("-dm_landau_num_cells", "Number of cells in each dimension of base grid", "plexland.c", ctx->cells0, &nt, &flg)); 1332 } 1333 } 1334 /* processing options */ 1335 PetscCall(PetscOptionsBool("-dm_landau_gpu_assembly", "Assemble Jacobian on GPU", "plexland.c", ctx->gpu_assembly, &ctx->gpu_assembly, NULL)); 1336 PetscCall(PetscOptionsBool("-dm_landau_jacobian_field_major_order", "Reorder Jacobian for GPU assembly with field major, or block diagonal, ordering (DEPRECATED)", "plexland.c", ctx->jacobian_field_major_order, &ctx->jacobian_field_major_order, NULL)); 1337 if (ctx->jacobian_field_major_order) PetscCheck(ctx->gpu_assembly, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_jacobian_field_major_order requires -dm_landau_gpu_assembly"); 1338 PetscCheck(!ctx->jacobian_field_major_order, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_jacobian_field_major_order DEPRECATED"); 1339 PetscOptionsEnd(); 1340 1341 for (ii = ctx->num_species; ii < LANDAU_MAX_SPECIES; ii++) ctx->masses[ii] = ctx->thermal_temps[ii] = ctx->charges[ii] = 0; 1342 if (ctx->verbose != 0) { 1343 PetscReal pmassunit = PetscRealConstant(1.6720e-27); 1344 1345 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "masses: e=%10.3e; ions in proton mass units: %10.3e %10.3e ...\n", (double)ctx->masses[0], (double)(ctx->masses[1] / pmassunit), (double)(ctx->num_species > 2 ? ctx->masses[2] / pmassunit : 0))); 1346 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "charges: e=%10.3e; charges in elementary units: %10.3e %10.3e\n", (double)ctx->charges[0], (double)(-ctx->charges[1] / ctx->charges[0]), (double)(ctx->num_species > 2 ? -ctx->charges[2] / ctx->charges[0] : 0))); 1347 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "n: e: %10.3e i: %10.3e %10.3e\n", (double)ctx->n[0], (double)ctx->n[1], (double)(ctx->num_species > 2 ? ctx->n[2] : 0))); 1348 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "thermal T (K): e=%10.3e i=%10.3e %10.3e. Normalization grid %" PetscInt_FMT ": v_0=%10.3e (%10.3ec) n_0=%10.3e t_0=%10.3e %" PetscInt_FMT " batched, view batch %" PetscInt_FMT "\n", (double)ctx->thermal_temps[0], 1349 (double)ctx->thermal_temps[1], (double)((ctx->num_species > 2) ? ctx->thermal_temps[2] : 0), non_dim_grid, (double)ctx->v_0, (double)(ctx->v_0 / SPEED_OF_LIGHT), (double)ctx->n_0, (double)ctx->t_0, ctx->batch_sz, ctx->batch_view_idx)); 1350 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "Domain radius (AMR levels) grid %d: par=%10.3e perp=%10.3e (%" PetscInt_FMT ") ", 0, (double)ctx->radius_par[0], (double)ctx->radius_perp[0], ctx->numAMRRefine[0])); 1351 for (ii = 1; ii < ctx->num_grids; ii++) PetscCall(PetscPrintf(PETSC_COMM_WORLD, ", %" PetscInt_FMT ": par=%10.3e perp=%10.3e (%" PetscInt_FMT ") ", ii, (double)ctx->radius_par[ii], (double)ctx->radius_perp[ii], ctx->numAMRRefine[ii])); 1352 if (ctx->use_relativistic_corrections) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\nUse relativistic corrections\n")); 1353 else PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\n")); 1354 } 1355 PetscCall(DMDestroy(&dummy)); 1356 { 1357 PetscMPIInt rank; 1358 PetscCallMPI(MPI_Comm_rank(PETSC_COMM_WORLD, &rank)); 1359 ctx->stage = 0; 1360 PetscCall(PetscLogEventRegister("Landau Create", DM_CLASSID, &ctx->events[13])); /* 13 */ 1361 PetscCall(PetscLogEventRegister(" GPU ass. setup", DM_CLASSID, &ctx->events[2])); /* 2 */ 1362 PetscCall(PetscLogEventRegister(" Build matrix", DM_CLASSID, &ctx->events[12])); /* 12 */ 1363 PetscCall(PetscLogEventRegister(" Assembly maps", DM_CLASSID, &ctx->events[15])); /* 15 */ 1364 PetscCall(PetscLogEventRegister("Landau Mass mat", DM_CLASSID, &ctx->events[14])); /* 14 */ 1365 PetscCall(PetscLogEventRegister("Landau Operator", DM_CLASSID, &ctx->events[11])); /* 11 */ 1366 PetscCall(PetscLogEventRegister("Landau Jacobian", DM_CLASSID, &ctx->events[0])); /* 0 */ 1367 PetscCall(PetscLogEventRegister("Landau Mass", DM_CLASSID, &ctx->events[9])); /* 9 */ 1368 PetscCall(PetscLogEventRegister(" Preamble", DM_CLASSID, &ctx->events[10])); /* 10 */ 1369 PetscCall(PetscLogEventRegister(" static IP Data", DM_CLASSID, &ctx->events[7])); /* 7 */ 1370 PetscCall(PetscLogEventRegister(" dynamic IP-Jac", DM_CLASSID, &ctx->events[1])); /* 1 */ 1371 PetscCall(PetscLogEventRegister(" Kernel-init", DM_CLASSID, &ctx->events[3])); /* 3 */ 1372 PetscCall(PetscLogEventRegister(" Jac-f-df (GPU)", DM_CLASSID, &ctx->events[8])); /* 8 */ 1373 PetscCall(PetscLogEventRegister(" J Kernel (GPU)", DM_CLASSID, &ctx->events[4])); /* 4 */ 1374 PetscCall(PetscLogEventRegister(" M Kernel (GPU)", DM_CLASSID, &ctx->events[16])); /* 16 */ 1375 PetscCall(PetscLogEventRegister(" Copy to CPU", DM_CLASSID, &ctx->events[5])); /* 5 */ 1376 PetscCall(PetscLogEventRegister(" CPU assemble", DM_CLASSID, &ctx->events[6])); /* 6 */ 1377 1378 if (rank) { /* turn off output stuff for duplicate runs - do we need to add the prefix to all this? */ 1379 PetscCall(PetscOptionsClearValue(NULL, "-snes_converged_reason")); 1380 PetscCall(PetscOptionsClearValue(NULL, "-ksp_converged_reason")); 1381 PetscCall(PetscOptionsClearValue(NULL, "-snes_monitor")); 1382 PetscCall(PetscOptionsClearValue(NULL, "-ksp_monitor")); 1383 PetscCall(PetscOptionsClearValue(NULL, "-ts_monitor")); 1384 PetscCall(PetscOptionsClearValue(NULL, "-ts_view")); 1385 PetscCall(PetscOptionsClearValue(NULL, "-ts_adapt_monitor")); 1386 PetscCall(PetscOptionsClearValue(NULL, "-dm_landau_amr_dm_view")); 1387 PetscCall(PetscOptionsClearValue(NULL, "-dm_landau_amr_vec_view")); 1388 PetscCall(PetscOptionsClearValue(NULL, "-dm_landau_mass_dm_view")); 1389 PetscCall(PetscOptionsClearValue(NULL, "-dm_landau_mass_view")); 1390 PetscCall(PetscOptionsClearValue(NULL, "-dm_landau_jacobian_view")); 1391 PetscCall(PetscOptionsClearValue(NULL, "-dm_landau_mat_view")); 1392 PetscCall(PetscOptionsClearValue(NULL, "-pc_bjkokkos_ksp_converged_reason")); 1393 PetscCall(PetscOptionsClearValue(NULL, "-pc_bjkokkos_ksp_monitor")); 1394 PetscCall(PetscOptionsClearValue(NULL, "-")); 1395 PetscCall(PetscOptionsClearValue(NULL, "-info")); 1396 } 1397 } 1398 PetscFunctionReturn(PETSC_SUCCESS); 1399 } 1400 1401 static PetscErrorCode CreateStaticData(PetscInt dim, IS grid_batch_is_inv[], const char prefix[], LandauCtx *ctx) 1402 { 1403 PetscSection section[LANDAU_MAX_GRIDS], globsection[LANDAU_MAX_GRIDS]; 1404 PetscQuadrature quad; 1405 const PetscReal *quadWeights; 1406 PetscReal invMass[LANDAU_MAX_SPECIES], nu_alpha[LANDAU_MAX_SPECIES], nu_beta[LANDAU_MAX_SPECIES]; 1407 PetscInt numCells[LANDAU_MAX_GRIDS], Nq, Nb, Nf[LANDAU_MAX_GRIDS], ncellsTot = 0, MAP_BF_SIZE = 64 * LANDAU_DIM * LANDAU_DIM * LANDAU_MAX_Q_FACE * LANDAU_MAX_SPECIES; 1408 PetscTabulation *Tf; 1409 PetscDS prob; 1410 1411 PetscFunctionBegin; 1412 PetscCall(PetscFEGetDimension(ctx->fe[0], &Nb)); 1413 PetscCheck(Nb <= LANDAU_MAX_NQND, ctx->comm, PETSC_ERR_ARG_WRONG, "Order too high. Nb = %" PetscInt_FMT " > LANDAU_MAX_NQND (%d)", Nb, LANDAU_MAX_NQND); 1414 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 1415 for (PetscInt ii = ctx->species_offset[grid]; ii < ctx->species_offset[grid + 1]; ii++) { 1416 invMass[ii] = ctx->m_0 / ctx->masses[ii]; 1417 nu_alpha[ii] = PetscSqr(ctx->charges[ii] / ctx->m_0) * ctx->m_0 / ctx->masses[ii]; 1418 nu_beta[ii] = PetscSqr(ctx->charges[ii] / ctx->epsilon0) / (8 * PETSC_PI) * ctx->t_0 * ctx->n_0 / PetscPowReal(ctx->v_0, 3); 1419 } 1420 } 1421 if (ctx->verbose == 4) { 1422 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "nu_alpha: ")); 1423 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 1424 PetscInt iii = ctx->species_offset[grid]; 1425 for (PetscInt ii = iii; ii < ctx->species_offset[grid + 1]; ii++) PetscCall(PetscPrintf(PETSC_COMM_WORLD, " %e", (double)nu_alpha[ii])); 1426 } 1427 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\nnu_beta: ")); 1428 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 1429 PetscInt iii = ctx->species_offset[grid]; 1430 for (PetscInt ii = iii; ii < ctx->species_offset[grid + 1]; ii++) PetscCall(PetscPrintf(PETSC_COMM_WORLD, " %e", (double)nu_beta[ii])); 1431 } 1432 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\nnu_alpha[i]*nu_beta[j]*lambda[i][j]:\n")); 1433 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 1434 PetscInt iii = ctx->species_offset[grid]; 1435 for (PetscInt ii = iii; ii < ctx->species_offset[grid + 1]; ii++) { 1436 for (PetscInt gridj = 0; gridj < ctx->num_grids; gridj++) { 1437 PetscInt jjj = ctx->species_offset[gridj]; 1438 for (PetscInt jj = jjj; jj < ctx->species_offset[gridj + 1]; jj++) PetscCall(PetscPrintf(PETSC_COMM_WORLD, " %14.9e", (double)(nu_alpha[ii] * nu_beta[jj] * ctx->lambdas[grid][gridj]))); 1439 } 1440 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\n")); 1441 } 1442 } 1443 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "lambda[i][j]:\n")); 1444 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 1445 PetscInt iii = ctx->species_offset[grid]; 1446 for (PetscInt ii = iii; ii < ctx->species_offset[grid + 1]; ii++) { 1447 for (PetscInt gridj = 0; gridj < ctx->num_grids; gridj++) { 1448 PetscInt jjj = ctx->species_offset[gridj]; 1449 for (PetscInt jj = jjj; jj < ctx->species_offset[gridj + 1]; jj++) PetscCall(PetscPrintf(PETSC_COMM_WORLD, " %14.9e", (double)ctx->lambdas[grid][gridj])); 1450 } 1451 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\n")); 1452 } 1453 } 1454 } 1455 PetscCall(DMGetDS(ctx->plex[0], &prob)); // same DS for all grids 1456 PetscCall(PetscDSGetTabulation(prob, &Tf)); // Bf, &Df same for all grids 1457 /* DS, Tab and quad is same on all grids */ 1458 PetscCheck(ctx->plex[0], ctx->comm, PETSC_ERR_ARG_WRONG, "Plex not created"); 1459 PetscCall(PetscFEGetQuadrature(ctx->fe[0], &quad)); 1460 PetscCall(PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, &quadWeights)); 1461 PetscCheck(Nq <= LANDAU_MAX_NQND, ctx->comm, PETSC_ERR_ARG_WRONG, "Order too high. Nq = %" PetscInt_FMT " > LANDAU_MAX_NQND (%d)", Nq, LANDAU_MAX_NQND); 1462 /* setup each grid */ 1463 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 1464 PetscInt cStart, cEnd; 1465 PetscCheck(ctx->plex[grid] != NULL, ctx->comm, PETSC_ERR_ARG_WRONG, "Plex not created"); 1466 PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd)); 1467 numCells[grid] = cEnd - cStart; // grids can have different topology 1468 PetscCall(DMGetLocalSection(ctx->plex[grid], §ion[grid])); 1469 PetscCall(DMGetGlobalSection(ctx->plex[grid], &globsection[grid])); 1470 PetscCall(PetscSectionGetNumFields(section[grid], &Nf[grid])); 1471 ncellsTot += numCells[grid]; 1472 } 1473 /* create GPU assembly data */ 1474 if (ctx->gpu_assembly) { /* we need GPU object with GPU assembly */ 1475 PetscContainer container; 1476 PetscScalar *elemMatrix, *elMat; 1477 pointInterpolationP4est(*pointMaps)[LANDAU_MAX_Q_FACE]; 1478 P4estVertexMaps *maps; 1479 const PetscInt *plex_batch = NULL, elMatSz = Nb * Nb * ctx->num_species * ctx->num_species; 1480 LandauIdx *coo_elem_offsets = NULL, *coo_elem_fullNb = NULL, (*coo_elem_point_offsets)[LANDAU_MAX_NQND + 1] = NULL; 1481 /* create GPU assembly data */ 1482 PetscCall(PetscInfo(ctx->plex[0], "Make GPU maps %d\n", 1)); 1483 PetscCall(PetscLogEventBegin(ctx->events[2], 0, 0, 0, 0)); 1484 PetscCall(PetscMalloc(sizeof(*maps) * ctx->num_grids, &maps)); 1485 PetscCall(PetscMalloc(sizeof(*pointMaps) * MAP_BF_SIZE, &pointMaps)); 1486 PetscCall(PetscMalloc(sizeof(*elemMatrix) * elMatSz, &elemMatrix)); 1487 1488 { // setup COO assembly -- put COO metadata directly in ctx->SData_d 1489 PetscCall(PetscMalloc3(ncellsTot + 1, &coo_elem_offsets, ncellsTot, &coo_elem_fullNb, ncellsTot, &coo_elem_point_offsets)); // array of integer pointers 1490 coo_elem_offsets[0] = 0; // finish later 1491 PetscCall(PetscInfo(ctx->plex[0], "COO initialization, %" PetscInt_FMT " cells\n", ncellsTot)); 1492 ctx->SData_d.coo_n_cellsTot = ncellsTot; 1493 ctx->SData_d.coo_elem_offsets = (void *)coo_elem_offsets; 1494 ctx->SData_d.coo_elem_fullNb = (void *)coo_elem_fullNb; 1495 ctx->SData_d.coo_elem_point_offsets = (void *)coo_elem_point_offsets; 1496 } 1497 1498 ctx->SData_d.coo_max_fullnb = 0; 1499 for (PetscInt grid = 0, glb_elem_idx = 0; grid < ctx->num_grids; grid++) { 1500 PetscInt cStart, cEnd, Nfloc = Nf[grid], totDim = Nfloc * Nb; 1501 if (grid_batch_is_inv[grid]) PetscCall(ISGetIndices(grid_batch_is_inv[grid], &plex_batch)); 1502 PetscCheck(!plex_batch, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_jacobian_field_major_order DEPRECATED"); 1503 PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd)); 1504 // make maps 1505 maps[grid].d_self = NULL; 1506 maps[grid].num_elements = numCells[grid]; 1507 maps[grid].num_face = (PetscInt)(pow(Nq, 1. / ((double)dim)) + .001); // Q 1508 maps[grid].num_face = (PetscInt)(pow(maps[grid].num_face, (double)(dim - 1)) + .001); // Q^2 1509 maps[grid].num_reduced = 0; 1510 maps[grid].deviceType = ctx->deviceType; 1511 maps[grid].numgrids = ctx->num_grids; 1512 // count reduced and get 1513 PetscCall(PetscMalloc(maps[grid].num_elements * sizeof(*maps[grid].gIdx), &maps[grid].gIdx)); 1514 for (PetscInt ej = cStart, eidx = 0; ej < cEnd; ++ej, ++eidx, glb_elem_idx++) { 1515 if (coo_elem_offsets) coo_elem_offsets[glb_elem_idx + 1] = coo_elem_offsets[glb_elem_idx]; // start with last one, then add 1516 for (PetscInt fieldA = 0; fieldA < Nf[grid]; fieldA++) { 1517 PetscInt fullNb = 0; 1518 for (PetscInt q = 0; q < Nb; ++q) { 1519 PetscInt numindices, *indices; 1520 PetscScalar *valuesOrig = elMat = elemMatrix; 1521 PetscCall(PetscArrayzero(elMat, totDim * totDim)); 1522 elMat[(fieldA * Nb + q) * totDim + fieldA * Nb + q] = 1; 1523 PetscCall(DMPlexGetClosureIndices(ctx->plex[grid], section[grid], globsection[grid], ej, PETSC_TRUE, &numindices, &indices, NULL, &elMat)); 1524 if (ctx->simplex) { 1525 PetscCheck(numindices == Nb, ctx->comm, PETSC_ERR_ARG_WRONG, "numindices != Nb numindices=%" PetscInt_FMT " Nb=%" PetscInt_FMT, numindices, Nb); 1526 for (PetscInt q = 0; q < numindices; ++q) maps[grid].gIdx[eidx][fieldA][q] = indices[q]; 1527 fullNb++; 1528 } else { 1529 for (PetscInt f = 0; f < numindices; ++f) { // look for a non-zero on the diagonal (is this too complicated for simplices?) 1530 if (PetscAbs(PetscRealPart(elMat[f * numindices + f])) > PETSC_MACHINE_EPSILON) { 1531 // found it 1532 if (PetscAbs(PetscRealPart(elMat[f * numindices + f] - 1.)) < PETSC_MACHINE_EPSILON) { // normal vertex 1.0 1533 if (plex_batch) { 1534 maps[grid].gIdx[eidx][fieldA][q] = plex_batch[indices[f]]; 1535 } else { 1536 maps[grid].gIdx[eidx][fieldA][q] = indices[f]; 1537 } 1538 fullNb++; 1539 } else { //found a constraint 1540 PetscInt jj = 0; 1541 PetscReal sum = 0; 1542 const PetscInt ff = f; 1543 maps[grid].gIdx[eidx][fieldA][q] = -maps[grid].num_reduced - 1; // store (-)index: id = -(idx+1): idx = -id - 1 1544 PetscCheck(!ctx->simplex, ctx->comm, PETSC_ERR_ARG_WRONG, "No constraints with simplex"); 1545 do { // constraints are continuous in Plex - exploit that here 1546 PetscInt ii; // get 'scale' 1547 for (ii = 0, pointMaps[maps[grid].num_reduced][jj].scale = 0; ii < maps[grid].num_face; ii++) { // sum row of outer product to recover vector value 1548 if (ff + ii < numindices) { // 3D has Q and Q^2 interps so might run off end. We could test that elMat[f*numindices + ff + ii] > 0, and break if not 1549 pointMaps[maps[grid].num_reduced][jj].scale += PetscRealPart(elMat[f * numindices + ff + ii]); 1550 } 1551 } 1552 sum += pointMaps[maps[grid].num_reduced][jj].scale; // diagnostic 1553 // get 'gid' 1554 if (pointMaps[maps[grid].num_reduced][jj].scale == 0) pointMaps[maps[grid].num_reduced][jj].gid = -1; // 3D has Q and Q^2 interps 1555 else { 1556 if (plex_batch) { 1557 pointMaps[maps[grid].num_reduced][jj].gid = plex_batch[indices[f]]; 1558 } else { 1559 pointMaps[maps[grid].num_reduced][jj].gid = indices[f]; 1560 } 1561 fullNb++; 1562 } 1563 } while (++jj < maps[grid].num_face && ++f < numindices); // jj is incremented if we hit the end 1564 while (jj < maps[grid].num_face) { 1565 pointMaps[maps[grid].num_reduced][jj].scale = 0; 1566 pointMaps[maps[grid].num_reduced][jj].gid = -1; 1567 jj++; 1568 } 1569 if (PetscAbs(sum - 1.0) > 10 * PETSC_MACHINE_EPSILON) { // debug 1570 PetscInt d, f; 1571 PetscReal tmp = 0; 1572 PetscCall( 1573 PetscPrintf(PETSC_COMM_SELF, "\t\t%" PetscInt_FMT ".%" PetscInt_FMT ".%" PetscInt_FMT ") ERROR total I = %22.16e (LANDAU_MAX_Q_FACE=%d, #face=%" PetscInt_FMT ")\n", eidx, q, fieldA, (double)sum, LANDAU_MAX_Q_FACE, maps[grid].num_face)); 1574 for (d = 0, tmp = 0; d < numindices; ++d) { 1575 if (tmp != 0 && PetscAbs(tmp - 1.0) > 10 * PETSC_MACHINE_EPSILON) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "%3" PetscInt_FMT ") %3" PetscInt_FMT ": ", d, indices[d])); 1576 for (f = 0; f < numindices; ++f) tmp += PetscRealPart(elMat[d * numindices + f]); 1577 if (tmp != 0) PetscCall(PetscPrintf(ctx->comm, " | %22.16e\n", (double)tmp)); 1578 } 1579 } 1580 maps[grid].num_reduced++; 1581 PetscCheck(maps[grid].num_reduced < MAP_BF_SIZE, PETSC_COMM_SELF, PETSC_ERR_PLIB, "maps[grid].num_reduced %" PetscInt_FMT " > %" PetscInt_FMT, maps[grid].num_reduced, MAP_BF_SIZE); 1582 } 1583 break; 1584 } 1585 } 1586 } // !simplex 1587 // cleanup 1588 PetscCall(DMPlexRestoreClosureIndices(ctx->plex[grid], section[grid], globsection[grid], ej, PETSC_TRUE, &numindices, &indices, NULL, &elMat)); 1589 if (elMat != valuesOrig) PetscCall(DMRestoreWorkArray(ctx->plex[grid], numindices * numindices, MPIU_SCALAR, &elMat)); 1590 } 1591 { // setup COO assembly 1592 coo_elem_offsets[glb_elem_idx + 1] += fullNb * fullNb; // one species block, adds a block for each species, on this element in this grid 1593 if (fieldA == 0) { // cache full Nb for this element, on this grid per species 1594 coo_elem_fullNb[glb_elem_idx] = fullNb; 1595 if (fullNb > ctx->SData_d.coo_max_fullnb) ctx->SData_d.coo_max_fullnb = fullNb; 1596 } else PetscCheck(coo_elem_fullNb[glb_elem_idx] == fullNb, PETSC_COMM_SELF, PETSC_ERR_PLIB, "full element size change with species %" PetscInt_FMT " %" PetscInt_FMT, coo_elem_fullNb[glb_elem_idx], fullNb); 1597 } 1598 } // field 1599 } // cell 1600 // allocate and copy point data maps[grid].gIdx[eidx][field][q] 1601 PetscCall(PetscMalloc(maps[grid].num_reduced * sizeof(*maps[grid].c_maps), &maps[grid].c_maps)); 1602 for (PetscInt ej = 0; ej < maps[grid].num_reduced; ++ej) { 1603 for (PetscInt q = 0; q < maps[grid].num_face; ++q) { 1604 maps[grid].c_maps[ej][q].scale = pointMaps[ej][q].scale; 1605 maps[grid].c_maps[ej][q].gid = pointMaps[ej][q].gid; 1606 } 1607 } 1608 #if defined(PETSC_HAVE_KOKKOS) 1609 if (ctx->deviceType == LANDAU_KOKKOS) PetscCall(LandauKokkosCreateMatMaps(maps, pointMaps, Nf, grid)); // implies Kokkos does 1610 #endif 1611 if (plex_batch) { 1612 PetscCall(ISRestoreIndices(grid_batch_is_inv[grid], &plex_batch)); 1613 PetscCall(ISDestroy(&grid_batch_is_inv[grid])); // we are done with this 1614 } 1615 } /* grids */ 1616 // finish COO 1617 { // setup COO assembly 1618 PetscInt *oor, *ooc; 1619 ctx->SData_d.coo_size = coo_elem_offsets[ncellsTot] * ctx->batch_sz; 1620 PetscCall(PetscMalloc2(ctx->SData_d.coo_size, &oor, ctx->SData_d.coo_size, &ooc)); 1621 for (PetscInt i = 0; i < ctx->SData_d.coo_size; i++) oor[i] = ooc[i] = -1; 1622 // get 1623 for (PetscInt grid = 0, glb_elem_idx = 0; grid < ctx->num_grids; grid++) { 1624 for (PetscInt ej = 0; ej < numCells[grid]; ++ej, glb_elem_idx++) { 1625 const PetscInt fullNb = coo_elem_fullNb[glb_elem_idx]; 1626 const LandauIdx *const Idxs = &maps[grid].gIdx[ej][0][0]; // just use field-0 maps, They should be the same but this is just for COO storage 1627 coo_elem_point_offsets[glb_elem_idx][0] = 0; 1628 for (PetscInt f = 0, cnt2 = 0; f < Nb; f++) { 1629 PetscInt idx = Idxs[f]; 1630 coo_elem_point_offsets[glb_elem_idx][f + 1] = coo_elem_point_offsets[glb_elem_idx][f]; // start at last 1631 if (idx >= 0) { 1632 cnt2++; 1633 coo_elem_point_offsets[glb_elem_idx][f + 1]++; // inc 1634 } else { 1635 idx = -idx - 1; 1636 for (PetscInt q = 0; q < maps[grid].num_face; q++) { 1637 if (maps[grid].c_maps[idx][q].gid < 0) break; 1638 cnt2++; 1639 coo_elem_point_offsets[glb_elem_idx][f + 1]++; // inc 1640 } 1641 } 1642 PetscCheck(cnt2 <= fullNb, PETSC_COMM_SELF, PETSC_ERR_PLIB, "wrong count %" PetscInt_FMT " < %" PetscInt_FMT, fullNb, cnt2); 1643 } 1644 PetscCheck(coo_elem_point_offsets[glb_elem_idx][Nb] == fullNb, PETSC_COMM_SELF, PETSC_ERR_PLIB, "coo_elem_point_offsets size %" PetscInt_FMT " != fullNb=%" PetscInt_FMT, coo_elem_point_offsets[glb_elem_idx][Nb], fullNb); 1645 } 1646 } 1647 // set 1648 for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) { 1649 for (PetscInt grid = 0, glb_elem_idx = 0; grid < ctx->num_grids; grid++) { 1650 const PetscInt moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset); 1651 for (PetscInt ej = 0; ej < numCells[grid]; ++ej, glb_elem_idx++) { 1652 const PetscInt fullNb = coo_elem_fullNb[glb_elem_idx], fullNb2 = fullNb * fullNb; 1653 // set (i,j) 1654 for (PetscInt fieldA = 0; fieldA < Nf[grid]; fieldA++) { 1655 const LandauIdx *const Idxs = &maps[grid].gIdx[ej][fieldA][0]; 1656 PetscInt rows[LANDAU_MAX_Q_FACE], cols[LANDAU_MAX_Q_FACE]; 1657 for (PetscInt f = 0; f < Nb; ++f) { 1658 const PetscInt nr = coo_elem_point_offsets[glb_elem_idx][f + 1] - coo_elem_point_offsets[glb_elem_idx][f]; 1659 if (nr == 1) rows[0] = Idxs[f]; 1660 else { 1661 const PetscInt idx = -Idxs[f] - 1; 1662 for (PetscInt q = 0; q < nr; q++) rows[q] = maps[grid].c_maps[idx][q].gid; 1663 } 1664 for (PetscInt g = 0; g < Nb; ++g) { 1665 const PetscInt nc = coo_elem_point_offsets[glb_elem_idx][g + 1] - coo_elem_point_offsets[glb_elem_idx][g]; 1666 if (nc == 1) cols[0] = Idxs[g]; 1667 else { 1668 const PetscInt idx = -Idxs[g] - 1; 1669 for (PetscInt q = 0; q < nc; q++) cols[q] = maps[grid].c_maps[idx][q].gid; 1670 } 1671 const PetscInt idx0 = b_id * coo_elem_offsets[ncellsTot] + coo_elem_offsets[glb_elem_idx] + fieldA * fullNb2 + fullNb * coo_elem_point_offsets[glb_elem_idx][f] + nr * coo_elem_point_offsets[glb_elem_idx][g]; 1672 for (PetscInt q = 0, idx = idx0; q < nr; q++) { 1673 for (PetscInt d = 0; d < nc; d++, idx++) { 1674 oor[idx] = rows[q] + moffset; 1675 ooc[idx] = cols[d] + moffset; 1676 } 1677 } 1678 } 1679 } 1680 } 1681 } // cell 1682 } // grid 1683 } // batch 1684 PetscCall(MatSetPreallocationCOO(ctx->J, ctx->SData_d.coo_size, oor, ooc)); 1685 PetscCall(PetscFree2(oor, ooc)); 1686 } 1687 PetscCall(PetscFree(pointMaps)); 1688 PetscCall(PetscFree(elemMatrix)); 1689 PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container)); 1690 PetscCall(PetscContainerSetPointer(container, (void *)maps)); 1691 PetscCall(PetscContainerSetCtxDestroy(container, LandauGPUMapsDestroy)); 1692 PetscCall(PetscObjectCompose((PetscObject)ctx->J, "assembly_maps", (PetscObject)container)); 1693 PetscCall(PetscContainerDestroy(&container)); 1694 PetscCall(PetscLogEventEnd(ctx->events[2], 0, 0, 0, 0)); 1695 } // end GPU assembly 1696 { /* create static point data, Jacobian called first, only one vertex copy */ 1697 PetscReal *invJe, *ww, *xx, *yy, *zz = NULL, *invJ_a; 1698 PetscInt outer_ipidx, outer_ej, grid, nip_glb = 0; 1699 PetscFE fe; 1700 PetscCall(PetscLogEventBegin(ctx->events[7], 0, 0, 0, 0)); 1701 PetscCall(PetscInfo(ctx->plex[0], "Initialize static data\n")); 1702 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) nip_glb += Nq * numCells[grid]; 1703 /* collect f data, first time is for Jacobian, but make mass now */ 1704 if (ctx->verbose != 0) { 1705 PetscInt ncells = 0, N; 1706 MatInfo info; 1707 PetscCall(MatGetInfo(ctx->J, MAT_LOCAL, &info)); 1708 PetscCall(MatGetSize(ctx->J, &N, NULL)); 1709 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) ncells += numCells[grid]; 1710 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "%d) %s %" PetscInt_FMT " IPs, %" PetscInt_FMT " cells total, Nb=%" PetscInt_FMT ", Nq=%" PetscInt_FMT ", dim=%" PetscInt_FMT ", Tab: Nb=%" PetscInt_FMT " Nf=%" PetscInt_FMT " Np=%" PetscInt_FMT " cdim=%" PetscInt_FMT " N=%" PetscInt_FMT " nnz= %" PetscInt_FMT "\n", 0, "FormLandau", nip_glb, ncells, Nb, Nq, dim, Nb, 1711 ctx->num_species, Nb, dim, N, (PetscInt)info.nz_used)); 1712 } 1713 PetscCall(PetscMalloc4(nip_glb, &ww, nip_glb, &xx, nip_glb, &yy, nip_glb * dim * dim, &invJ_a)); 1714 if (dim == 3) PetscCall(PetscMalloc1(nip_glb, &zz)); 1715 if (ctx->use_energy_tensor_trick) { 1716 PetscCall(PetscFECreateDefault(PETSC_COMM_SELF, dim, 1, ctx->simplex, prefix, PETSC_DECIDE, &fe)); 1717 PetscCall(PetscObjectSetName((PetscObject)fe, "energy")); 1718 } 1719 /* init each grids static data - no batch */ 1720 for (grid = 0, outer_ipidx = 0, outer_ej = 0; grid < ctx->num_grids; grid++) { // OpenMP (once) 1721 Vec v2_2 = NULL; // projected function: v^2/2 for non-relativistic, gamma... for relativistic 1722 PetscSection e_section; 1723 DM dmEnergy; 1724 PetscInt cStart, cEnd, ej; 1725 1726 PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd)); 1727 // prep energy trick, get v^2 / 2 vector 1728 if (ctx->use_energy_tensor_trick) { 1729 PetscErrorCode (*energyf[1])(PetscInt, PetscReal, const PetscReal[], PetscInt, PetscScalar[], void *) = {ctx->use_relativistic_corrections ? gamma_m1_f : energy_f}; 1730 Vec glob_v2; 1731 PetscReal *c2_0[1], data[1] = {PetscSqr(C_0(ctx->v_0))}; 1732 1733 PetscCall(DMClone(ctx->plex[grid], &dmEnergy)); 1734 PetscCall(PetscObjectSetName((PetscObject)dmEnergy, "energy")); 1735 PetscCall(DMSetField(dmEnergy, 0, NULL, (PetscObject)fe)); 1736 PetscCall(DMCreateDS(dmEnergy)); 1737 PetscCall(DMGetLocalSection(dmEnergy, &e_section)); 1738 PetscCall(DMGetGlobalVector(dmEnergy, &glob_v2)); 1739 PetscCall(PetscObjectSetName((PetscObject)glob_v2, "trick")); 1740 c2_0[0] = &data[0]; 1741 PetscCall(DMProjectFunction(dmEnergy, 0., energyf, (void **)c2_0, INSERT_ALL_VALUES, glob_v2)); 1742 PetscCall(DMGetLocalVector(dmEnergy, &v2_2)); 1743 PetscCall(VecZeroEntries(v2_2)); /* zero BCs so don't set */ 1744 PetscCall(DMGlobalToLocalBegin(dmEnergy, glob_v2, INSERT_VALUES, v2_2)); 1745 PetscCall(DMGlobalToLocalEnd(dmEnergy, glob_v2, INSERT_VALUES, v2_2)); 1746 PetscCall(DMViewFromOptions(dmEnergy, NULL, "-energy_dm_view")); 1747 PetscCall(VecViewFromOptions(glob_v2, NULL, "-energy_vec_view")); 1748 PetscCall(DMRestoreGlobalVector(dmEnergy, &glob_v2)); 1749 } 1750 /* append part of the IP data for each grid */ 1751 for (ej = 0; ej < numCells[grid]; ++ej, ++outer_ej) { 1752 PetscScalar *coefs = NULL; 1753 PetscReal vj[LANDAU_MAX_NQND * LANDAU_DIM], detJj[LANDAU_MAX_NQND], Jdummy[LANDAU_MAX_NQND * LANDAU_DIM * LANDAU_DIM], c0 = C_0(ctx->v_0), c02 = PetscSqr(c0); 1754 invJe = invJ_a + outer_ej * Nq * dim * dim; 1755 PetscCall(DMPlexComputeCellGeometryFEM(ctx->plex[grid], ej + cStart, quad, vj, Jdummy, invJe, detJj)); 1756 if (ctx->use_energy_tensor_trick) PetscCall(DMPlexVecGetClosure(dmEnergy, e_section, v2_2, ej + cStart, NULL, &coefs)); 1757 /* create static point data */ 1758 for (PetscInt qj = 0; qj < Nq; qj++, outer_ipidx++) { 1759 const PetscInt gidx = outer_ipidx; 1760 const PetscReal *invJ = &invJe[qj * dim * dim]; 1761 ww[gidx] = detJj[qj] * quadWeights[qj]; 1762 if (dim == 2) ww[gidx] *= vj[qj * dim + 0]; /* cylindrical coordinate, w/o 2pi */ 1763 // get xx, yy, zz 1764 if (ctx->use_energy_tensor_trick) { 1765 double refSpaceDer[3], eGradPhi[3]; 1766 const PetscReal *const DD = Tf[0]->T[1]; 1767 const PetscReal *Dq = &DD[qj * Nb * dim]; 1768 for (PetscInt d = 0; d < 3; ++d) refSpaceDer[d] = eGradPhi[d] = 0.0; 1769 for (PetscInt b = 0; b < Nb; ++b) { 1770 for (PetscInt d = 0; d < dim; ++d) refSpaceDer[d] += Dq[b * dim + d] * PetscRealPart(coefs[b]); 1771 } 1772 xx[gidx] = 1e10; 1773 if (ctx->use_relativistic_corrections) { 1774 double dg2_c2 = 0; 1775 //for (PetscInt d = 0; d < dim; ++d) refSpaceDer[d] *= c02; 1776 for (PetscInt d = 0; d < dim; ++d) dg2_c2 += PetscSqr(refSpaceDer[d]); 1777 dg2_c2 *= (double)c02; 1778 if (dg2_c2 >= .999) { 1779 xx[gidx] = vj[qj * dim + 0]; /* coordinate */ 1780 yy[gidx] = vj[qj * dim + 1]; 1781 if (dim == 3) zz[gidx] = vj[qj * dim + 2]; 1782 PetscCall(PetscPrintf(ctx->comm, "Error: %12.5e %" PetscInt_FMT ".%" PetscInt_FMT ") dg2/c02 = %12.5e x= %12.5e %12.5e %12.5e\n", (double)PetscSqrtReal(xx[gidx] * xx[gidx] + yy[gidx] * yy[gidx] + zz[gidx] * zz[gidx]), ej, qj, dg2_c2, (double)xx[gidx], (double)yy[gidx], (double)zz[gidx])); 1783 } else { 1784 PetscReal fact = c02 / PetscSqrtReal(1. - dg2_c2); 1785 for (PetscInt d = 0; d < dim; ++d) refSpaceDer[d] *= fact; 1786 // could test with other point u' that (grad - grad') * U (refSpaceDer, refSpaceDer') == 0 1787 } 1788 } 1789 if (xx[gidx] == 1e10) { 1790 for (PetscInt d = 0; d < dim; ++d) { 1791 for (PetscInt e = 0; e < dim; ++e) eGradPhi[d] += invJ[e * dim + d] * refSpaceDer[e]; 1792 } 1793 xx[gidx] = eGradPhi[0]; 1794 yy[gidx] = eGradPhi[1]; 1795 if (dim == 3) zz[gidx] = eGradPhi[2]; 1796 } 1797 } else { 1798 xx[gidx] = vj[qj * dim + 0]; /* coordinate */ 1799 yy[gidx] = vj[qj * dim + 1]; 1800 if (dim == 3) zz[gidx] = vj[qj * dim + 2]; 1801 } 1802 } /* q */ 1803 if (ctx->use_energy_tensor_trick) PetscCall(DMPlexVecRestoreClosure(dmEnergy, e_section, v2_2, ej + cStart, NULL, &coefs)); 1804 } /* ej */ 1805 if (ctx->use_energy_tensor_trick) { 1806 PetscCall(DMRestoreLocalVector(dmEnergy, &v2_2)); 1807 PetscCall(DMDestroy(&dmEnergy)); 1808 } 1809 } /* grid */ 1810 if (ctx->use_energy_tensor_trick) PetscCall(PetscFEDestroy(&fe)); 1811 /* cache static data */ 1812 if (ctx->deviceType == LANDAU_KOKKOS) { 1813 #if defined(PETSC_HAVE_KOKKOS) 1814 PetscCall(LandauKokkosStaticDataSet(ctx->plex[0], Nq, Nb, ctx->batch_sz, ctx->num_grids, numCells, ctx->species_offset, ctx->mat_offset, nu_alpha, nu_beta, invMass, (PetscReal *)ctx->lambdas, invJ_a, xx, yy, zz, ww, &ctx->SData_d)); 1815 /* free */ 1816 PetscCall(PetscFree4(ww, xx, yy, invJ_a)); 1817 if (dim == 3) PetscCall(PetscFree(zz)); 1818 #else 1819 SETERRQ(ctx->comm, PETSC_ERR_ARG_WRONG, "-landau_device_type kokkos not built"); 1820 #endif 1821 } else { /* CPU version, just copy in, only use part */ 1822 PetscReal *nu_alpha_p = (PetscReal *)ctx->SData_d.alpha, *nu_beta_p = (PetscReal *)ctx->SData_d.beta, *invMass_p = (PetscReal *)ctx->SData_d.invMass, *lambdas_p = NULL; // why set these ? 1823 ctx->SData_d.w = (void *)ww; 1824 ctx->SData_d.x = (void *)xx; 1825 ctx->SData_d.y = (void *)yy; 1826 ctx->SData_d.z = (void *)zz; 1827 ctx->SData_d.invJ = (void *)invJ_a; 1828 PetscCall(PetscMalloc4(ctx->num_species, &nu_alpha_p, ctx->num_species, &nu_beta_p, ctx->num_species, &invMass_p, LANDAU_MAX_GRIDS * LANDAU_MAX_GRIDS, &lambdas_p)); 1829 for (PetscInt ii = 0; ii < ctx->num_species; ii++) { 1830 nu_alpha_p[ii] = nu_alpha[ii]; 1831 nu_beta_p[ii] = nu_beta[ii]; 1832 invMass_p[ii] = invMass[ii]; 1833 } 1834 ctx->SData_d.alpha = (void *)nu_alpha_p; 1835 ctx->SData_d.beta = (void *)nu_beta_p; 1836 ctx->SData_d.invMass = (void *)invMass_p; 1837 ctx->SData_d.lambdas = (void *)lambdas_p; 1838 for (PetscInt grid = 0; grid < LANDAU_MAX_GRIDS; grid++) { 1839 PetscReal (*lambdas)[LANDAU_MAX_GRIDS][LANDAU_MAX_GRIDS] = (PetscReal (*)[LANDAU_MAX_GRIDS][LANDAU_MAX_GRIDS])ctx->SData_d.lambdas; 1840 for (PetscInt gridj = 0; gridj < LANDAU_MAX_GRIDS; gridj++) (*lambdas)[grid][gridj] = ctx->lambdas[grid][gridj]; 1841 } 1842 } 1843 PetscCall(PetscLogEventEnd(ctx->events[7], 0, 0, 0, 0)); 1844 } // initialize 1845 PetscFunctionReturn(PETSC_SUCCESS); 1846 } 1847 1848 /* < v, u > */ 1849 static void g0_1(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[]) 1850 { 1851 g0[0] = 1.; 1852 } 1853 1854 /* < v, u > */ 1855 static void g0_fake(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[]) 1856 { 1857 static double ttt = 1e-12; 1858 g0[0] = ttt++; 1859 } 1860 1861 /* < v, u > */ 1862 static void g0_r(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[]) 1863 { 1864 g0[0] = 2. * PETSC_PI * x[0]; 1865 } 1866 1867 /* 1868 LandauCreateJacobianMatrix - creates ctx->J with without real data. Hard to keep sparse. 1869 - Like DMPlexLandauCreateMassMatrix. Should remove one and combine 1870 - has old support for field major ordering 1871 */ 1872 static PetscErrorCode LandauCreateJacobianMatrix(MPI_Comm comm, Vec X, IS grid_batch_is_inv[LANDAU_MAX_GRIDS], LandauCtx *ctx) 1873 { 1874 PetscInt *idxs = NULL; 1875 Mat subM[LANDAU_MAX_GRIDS]; 1876 1877 PetscFunctionBegin; 1878 if (!ctx->gpu_assembly) { /* we need GPU object with GPU assembly */ 1879 PetscFunctionReturn(PETSC_SUCCESS); 1880 } 1881 // get the RCM for this grid to separate out species into blocks -- create 'idxs' & 'ctx->batch_is' -- not used 1882 if (ctx->gpu_assembly && ctx->jacobian_field_major_order) PetscCall(PetscMalloc1(ctx->mat_offset[ctx->num_grids] * ctx->batch_sz, &idxs)); 1883 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 1884 const PetscInt *values, n = ctx->mat_offset[grid + 1] - ctx->mat_offset[grid]; 1885 Mat gMat; 1886 DM massDM; 1887 PetscDS prob; 1888 Vec tvec; 1889 // get "mass" matrix for reordering 1890 PetscCall(DMClone(ctx->plex[grid], &massDM)); 1891 PetscCall(DMCopyFields(ctx->plex[grid], PETSC_DETERMINE, PETSC_DETERMINE, massDM)); 1892 PetscCall(DMCreateDS(massDM)); 1893 PetscCall(DMGetDS(massDM, &prob)); 1894 for (PetscInt ix = 0, ii = ctx->species_offset[grid]; ii < ctx->species_offset[grid + 1]; ii++, ix++) PetscCall(PetscDSSetJacobian(prob, ix, ix, g0_fake, NULL, NULL, NULL)); 1895 PetscCall(PetscOptionsInsertString(NULL, "-dm_preallocate_only")); // this trick is need to both sparsify the matrix and avoid runtime error 1896 PetscCall(DMCreateMatrix(massDM, &gMat)); 1897 PetscCall(PetscOptionsInsertString(NULL, "-dm_preallocate_only false")); 1898 PetscCall(MatSetOption(gMat, MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE)); 1899 PetscCall(MatSetOption(gMat, MAT_IGNORE_ZERO_ENTRIES, PETSC_TRUE)); 1900 PetscCall(DMCreateLocalVector(ctx->plex[grid], &tvec)); 1901 PetscCall(DMPlexSNESComputeJacobianFEM(massDM, tvec, gMat, gMat, ctx)); 1902 PetscCall(MatViewFromOptions(gMat, NULL, "-dm_landau_reorder_mat_view")); 1903 PetscCall(DMDestroy(&massDM)); 1904 PetscCall(VecDestroy(&tvec)); 1905 subM[grid] = gMat; 1906 if (ctx->gpu_assembly && ctx->jacobian_field_major_order) { 1907 MatOrderingType rtype = MATORDERINGRCM; 1908 IS isrow, isicol; 1909 PetscCall(MatGetOrdering(gMat, rtype, &isrow, &isicol)); 1910 PetscCall(ISInvertPermutation(isrow, PETSC_DECIDE, &grid_batch_is_inv[grid])); 1911 PetscCall(ISGetIndices(isrow, &values)); 1912 for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) { // add batch size DMs for this species grid 1913 #if !defined(LANDAU_SPECIES_MAJOR) 1914 PetscInt N = ctx->mat_offset[ctx->num_grids], n0 = ctx->mat_offset[grid] + b_id * N; 1915 for (PetscInt ii = 0; ii < n; ++ii) idxs[n0 + ii] = values[ii] + n0; 1916 #else 1917 PetscInt n0 = ctx->mat_offset[grid] * ctx->batch_sz + b_id * n; 1918 for (PetscInt ii = 0; ii < n; ++ii) idxs[n0 + ii] = values[ii] + n0; 1919 #endif 1920 } 1921 PetscCall(ISRestoreIndices(isrow, &values)); 1922 PetscCall(ISDestroy(&isrow)); 1923 PetscCall(ISDestroy(&isicol)); 1924 } 1925 } 1926 if (ctx->gpu_assembly && ctx->jacobian_field_major_order) PetscCall(ISCreateGeneral(comm, ctx->mat_offset[ctx->num_grids] * ctx->batch_sz, idxs, PETSC_OWN_POINTER, &ctx->batch_is)); 1927 // get a block matrix 1928 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 1929 Mat B = subM[grid]; 1930 PetscInt nloc, nzl, *colbuf, row, COL_BF_SIZE = 1024; 1931 PetscCall(PetscMalloc(sizeof(*colbuf) * COL_BF_SIZE, &colbuf)); 1932 PetscCall(MatGetSize(B, &nloc, NULL)); 1933 for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) { 1934 const PetscInt moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset); 1935 const PetscInt *cols; 1936 const PetscScalar *vals; 1937 for (PetscInt i = 0; i < nloc; i++) { 1938 PetscCall(MatGetRow(B, i, &nzl, NULL, NULL)); 1939 if (nzl > COL_BF_SIZE) { 1940 PetscCall(PetscFree(colbuf)); 1941 PetscCall(PetscInfo(ctx->plex[grid], "Realloc buffer %" PetscInt_FMT " to %" PetscInt_FMT " (row size %" PetscInt_FMT ") \n", COL_BF_SIZE, 2 * COL_BF_SIZE, nzl)); 1942 COL_BF_SIZE = nzl; 1943 PetscCall(PetscMalloc(sizeof(*colbuf) * COL_BF_SIZE, &colbuf)); 1944 } 1945 PetscCall(MatGetRow(B, i, &nzl, &cols, &vals)); 1946 for (PetscInt j = 0; j < nzl; j++) colbuf[j] = cols[j] + moffset; 1947 row = i + moffset; 1948 PetscCall(MatSetValues(ctx->J, 1, &row, nzl, colbuf, vals, INSERT_VALUES)); 1949 PetscCall(MatRestoreRow(B, i, &nzl, &cols, &vals)); 1950 } 1951 } 1952 PetscCall(PetscFree(colbuf)); 1953 } 1954 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) PetscCall(MatDestroy(&subM[grid])); 1955 PetscCall(MatAssemblyBegin(ctx->J, MAT_FINAL_ASSEMBLY)); 1956 PetscCall(MatAssemblyEnd(ctx->J, MAT_FINAL_ASSEMBLY)); 1957 1958 // debug 1959 PetscCall(MatViewFromOptions(ctx->J, NULL, "-dm_landau_mat_view")); 1960 if (ctx->gpu_assembly && ctx->jacobian_field_major_order) { 1961 Mat mat_block_order; 1962 PetscCall(MatCreateSubMatrix(ctx->J, ctx->batch_is, ctx->batch_is, MAT_INITIAL_MATRIX, &mat_block_order)); // use MatPermute 1963 PetscCall(MatViewFromOptions(mat_block_order, NULL, "-dm_landau_mat_view")); 1964 PetscCall(MatDestroy(&mat_block_order)); 1965 PetscCall(VecScatterCreate(X, ctx->batch_is, X, NULL, &ctx->plex_batch)); 1966 PetscCall(VecDuplicate(X, &ctx->work_vec)); 1967 } 1968 PetscFunctionReturn(PETSC_SUCCESS); 1969 } 1970 1971 static void LandauSphereMapping(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f[]) 1972 { 1973 PetscReal u_max = 0, u_norm = 0, scale, square_inner_radius = PetscRealPart(constants[0]), square_radius = PetscRealPart(constants[1]); 1974 PetscInt d; 1975 1976 for (d = 0; d < dim; ++d) { 1977 PetscReal val = PetscAbsReal(PetscRealPart(u[d])); 1978 if (val > u_max) u_max = val; 1979 u_norm += PetscRealPart(u[d]) * PetscRealPart(u[d]); 1980 } 1981 u_norm = PetscSqrtReal(u_norm); 1982 1983 if (u_max < square_inner_radius) { 1984 for (d = 0; d < dim; ++d) f[d] = u[d]; 1985 return; 1986 } 1987 1988 /* 1989 A outer cube has corners at |u| = square_radius. 1990 u_1 is the intersection of the ray with the outer cube face. 1991 R_max = square_radius * sqrt(3) is radius of sphere we want points on outer cube mapped to. 1992 u_0 is the intersection of the ray with the inner cube face. 1993 The cube has corners at |u| = square_inner_radius. 1994 scale to point linearly between u_0 and u_1 so that a point on the inner face does not move, and a point on the outer face moves to the sphere. 1995 */ 1996 if (u_max > square_radius + 1e-5) (void)PetscPrintf(PETSC_COMM_SELF, "Error: Point outside outer radius: u_max %g > %g\n", (double)u_max, (double)square_radius); 1997 /* if (PetscAbsReal(u_max - square_inner_radius) < 1e-5 || PetscAbsReal(u_max - square_radius) < 1e-5) { 1998 (void)PetscPrintf(PETSC_COMM_SELF, "Warning: Point near corner of inner and outer cube: u_max %g, inner %g, outer %g\n", (double)u_max, (double)square_inner_radius, (double)square_radius); 1999 } */ 2000 { 2001 PetscReal u_0_norm = u_norm * square_inner_radius / u_max; 2002 PetscReal R_max = square_radius * PetscSqrtReal((PetscReal)dim); 2003 PetscReal t = (u_max - square_inner_radius) / (square_radius - square_inner_radius); 2004 PetscReal rho_prime = (1.0 - t) * u_0_norm + t * R_max; 2005 scale = rho_prime / u_norm; 2006 } 2007 for (d = 0; d < dim; ++d) f[d] = u[d] * scale; 2008 } 2009 2010 static PetscErrorCode LandauSphereMesh(DM dm, PetscReal inner, PetscReal radius) 2011 { 2012 DM cdm; 2013 PetscDS cds; 2014 PetscScalar consts[2]; 2015 2016 PetscFunctionBegin; 2017 consts[0] = inner; 2018 consts[1] = radius; 2019 PetscCall(DMGetCoordinateDM(dm, &cdm)); 2020 PetscCall(DMGetDS(cdm, &cds)); 2021 PetscCall(PetscDSSetConstants(cds, 2, consts)); 2022 PetscCall(DMPlexRemapGeometry(dm, 0.0, LandauSphereMapping)); 2023 PetscFunctionReturn(PETSC_SUCCESS); 2024 } 2025 2026 PetscErrorCode DMPlexLandauCreateMassMatrix(DM pack, Mat *Amat); 2027 2028 /*@C 2029 DMPlexLandauCreateVelocitySpace - Create a `DMPLEX` velocity space mesh 2030 2031 Collective 2032 2033 Input Parameters: 2034 + comm - The MPI communicator 2035 . dim - velocity space dimension (2 for axisymmetric, 3 for full 3X + 3V solver) 2036 - prefix - prefix for options (not tested) 2037 2038 Output Parameters: 2039 + pack - The `DM` object representing the mesh 2040 . X - A vector (user destroys) 2041 - J - Optional matrix (object destroys) 2042 2043 Level: beginner 2044 2045 .seealso: `DMPlexCreate()`, `DMPlexLandauDestroyVelocitySpace()` 2046 @*/ 2047 PetscErrorCode DMPlexLandauCreateVelocitySpace(MPI_Comm comm, PetscInt dim, const char prefix[], Vec *X, Mat *J, DM *pack) 2048 { 2049 LandauCtx *ctx; 2050 Vec Xsub[LANDAU_MAX_GRIDS]; 2051 IS grid_batch_is_inv[LANDAU_MAX_GRIDS]; 2052 2053 PetscFunctionBegin; 2054 PetscCheck(dim == 2 || dim == 3, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Only 2D and 3D supported"); 2055 PetscCheck(LANDAU_DIM == dim, PETSC_COMM_SELF, PETSC_ERR_PLIB, "dim %" PetscInt_FMT " != LANDAU_DIM %d", dim, LANDAU_DIM); 2056 PetscCall(PetscNew(&ctx)); 2057 ctx->comm = comm; /* used for diagnostics and global errors */ 2058 /* process options */ 2059 PetscCall(ProcessOptions(ctx, prefix)); 2060 if (dim == 2) ctx->use_relativistic_corrections = PETSC_FALSE; 2061 /* Create Mesh */ 2062 PetscCall(DMCompositeCreate(PETSC_COMM_SELF, pack)); 2063 PetscCall(PetscLogEventBegin(ctx->events[13], 0, 0, 0, 0)); 2064 PetscCall(PetscLogEventBegin(ctx->events[15], 0, 0, 0, 0)); 2065 PetscCall(LandauDMCreateVMeshes(PETSC_COMM_SELF, dim, prefix, ctx, *pack)); // creates grids (Forest of AMR) 2066 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 2067 /* create FEM */ 2068 PetscCall(SetupDS(ctx->plex[grid], dim, grid, prefix, ctx)); 2069 /* set initial state */ 2070 PetscCall(DMCreateGlobalVector(ctx->plex[grid], &Xsub[grid])); 2071 PetscCall(PetscObjectSetName((PetscObject)Xsub[grid], "u_orig")); 2072 /* initial static refinement, no solve */ 2073 PetscCall(LandauSetInitialCondition(ctx->plex[grid], Xsub[grid], grid, 0, 1, ctx)); 2074 /* forest refinement - forest goes in (if forest), plex comes out */ 2075 if (ctx->use_p4est) { 2076 DM plex; 2077 PetscCall(adapt(grid, ctx, &Xsub[grid])); // forest goes in, plex comes out 2078 // convert to plex, all done with this level 2079 PetscCall(DMConvert(ctx->plex[grid], DMPLEX, &plex)); 2080 PetscCall(DMDestroy(&ctx->plex[grid])); 2081 ctx->plex[grid] = plex; 2082 } else if (ctx->sphere && dim == 3) { 2083 PetscCall(LandauSphereMesh(ctx->plex[grid], ctx->radius[grid] * ctx->sphere_inner_radius_90degree[grid], ctx->radius[grid])); 2084 PetscCall(LandauSetInitialCondition(ctx->plex[grid], Xsub[grid], grid, 0, 1, ctx)); 2085 } 2086 if (grid == 0) { 2087 PetscCall(DMViewFromOptions(ctx->plex[grid], NULL, "-dm_landau_amr_dm_view")); 2088 PetscCall(VecSetOptionsPrefix(Xsub[grid], prefix)); 2089 PetscCall(VecViewFromOptions(Xsub[grid], NULL, "-dm_landau_amr_vec_view")); 2090 } 2091 #if !defined(LANDAU_SPECIES_MAJOR) 2092 PetscCall(DMCompositeAddDM(*pack, ctx->plex[grid])); 2093 #else 2094 for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) { // add batch size DMs for this species grid 2095 PetscCall(DMCompositeAddDM(*pack, ctx->plex[grid])); 2096 } 2097 #endif 2098 PetscCall(DMSetApplicationContext(ctx->plex[grid], ctx)); 2099 } 2100 #if !defined(LANDAU_SPECIES_MAJOR) 2101 // stack the batched DMs, could do it all here!!! b_id=0 2102 for (PetscInt b_id = 1; b_id < ctx->batch_sz; b_id++) { 2103 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) PetscCall(DMCompositeAddDM(*pack, ctx->plex[grid])); 2104 } 2105 #endif 2106 // create ctx->mat_offset 2107 ctx->mat_offset[0] = 0; 2108 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 2109 PetscInt n; 2110 PetscCall(VecGetLocalSize(Xsub[grid], &n)); 2111 ctx->mat_offset[grid + 1] = ctx->mat_offset[grid] + n; 2112 } 2113 // creat DM & Jac 2114 PetscCall(DMSetApplicationContext(*pack, ctx)); 2115 PetscCall(PetscOptionsInsertString(NULL, "-dm_preallocate_only")); 2116 PetscCall(DMCreateMatrix(*pack, &ctx->J)); 2117 PetscCall(PetscOptionsInsertString(NULL, "-dm_preallocate_only false")); 2118 PetscCall(MatSetOption(ctx->J, MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE)); 2119 PetscCall(MatSetOption(ctx->J, MAT_IGNORE_ZERO_ENTRIES, PETSC_TRUE)); 2120 PetscCall(PetscObjectSetName((PetscObject)ctx->J, "Jac")); 2121 // construct initial conditions in X 2122 PetscCall(DMCreateGlobalVector(*pack, X)); 2123 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 2124 PetscInt n; 2125 PetscCall(VecGetLocalSize(Xsub[grid], &n)); 2126 for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) { 2127 PetscScalar const *values; 2128 const PetscInt moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset); 2129 PetscCall(LandauSetInitialCondition(ctx->plex[grid], Xsub[grid], grid, b_id, ctx->batch_sz, ctx)); 2130 PetscCall(VecGetArrayRead(Xsub[grid], &values)); // Drop whole grid in Plex ordering 2131 for (PetscInt i = 0, idx = moffset; i < n; i++, idx++) PetscCall(VecSetValue(*X, idx, values[i], INSERT_VALUES)); 2132 PetscCall(VecRestoreArrayRead(Xsub[grid], &values)); 2133 } 2134 } 2135 // cleanup 2136 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) PetscCall(VecDestroy(&Xsub[grid])); 2137 /* check for correct matrix type */ 2138 if (ctx->gpu_assembly) { /* we need GPU object with GPU assembly */ 2139 PetscBool flg; 2140 if (ctx->deviceType == LANDAU_KOKKOS) { 2141 PetscCall(PetscObjectTypeCompareAny((PetscObject)ctx->J, &flg, MATSEQAIJKOKKOS, MATMPIAIJKOKKOS, MATAIJKOKKOS, "")); 2142 #if defined(PETSC_HAVE_KOKKOS) 2143 PetscCheck(flg, ctx->comm, PETSC_ERR_ARG_WRONG, "must use '-dm_mat_type aijkokkos -dm_vec_type kokkos' for GPU assembly and Kokkos or use '-dm_landau_device_type cpu'"); 2144 #else 2145 PetscCheck(flg, ctx->comm, PETSC_ERR_ARG_WRONG, "must configure with '--download-kokkos-kernels' for GPU assembly and Kokkos or use '-dm_landau_device_type cpu'"); 2146 #endif 2147 } 2148 } 2149 PetscCall(PetscLogEventEnd(ctx->events[15], 0, 0, 0, 0)); 2150 2151 // create field major ordering 2152 ctx->work_vec = NULL; 2153 ctx->plex_batch = NULL; 2154 ctx->batch_is = NULL; 2155 for (PetscInt i = 0; i < LANDAU_MAX_GRIDS; i++) grid_batch_is_inv[i] = NULL; 2156 PetscCall(PetscLogEventBegin(ctx->events[12], 0, 0, 0, 0)); 2157 PetscCall(LandauCreateJacobianMatrix(comm, *X, grid_batch_is_inv, ctx)); 2158 PetscCall(PetscLogEventEnd(ctx->events[12], 0, 0, 0, 0)); 2159 2160 // create AMR GPU assembly maps and static GPU data 2161 PetscCall(CreateStaticData(dim, grid_batch_is_inv, prefix, ctx)); 2162 2163 PetscCall(PetscLogEventEnd(ctx->events[13], 0, 0, 0, 0)); 2164 2165 // create mass matrix 2166 PetscCall(DMPlexLandauCreateMassMatrix(*pack, NULL)); 2167 2168 if (J) *J = ctx->J; 2169 2170 if (ctx->gpu_assembly && ctx->jacobian_field_major_order) { 2171 PetscContainer container; 2172 // cache ctx for KSP with batch/field major Jacobian ordering -ksp_type gmres/etc -dm_landau_jacobian_field_major_order 2173 PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container)); 2174 PetscCall(PetscContainerSetPointer(container, (void *)ctx)); 2175 PetscCall(PetscObjectCompose((PetscObject)ctx->J, "LandauCtx", (PetscObject)container)); 2176 PetscCall(PetscContainerDestroy(&container)); 2177 // batch solvers need to map -- can batch solvers work 2178 PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container)); 2179 PetscCall(PetscContainerSetPointer(container, (void *)ctx->plex_batch)); 2180 PetscCall(PetscObjectCompose((PetscObject)ctx->J, "plex_batch_is", (PetscObject)container)); 2181 PetscCall(PetscContainerDestroy(&container)); 2182 } 2183 // for batch solvers 2184 { 2185 PetscContainer container; 2186 PetscInt *pNf; 2187 PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container)); 2188 PetscCall(PetscMalloc1(sizeof(*pNf), &pNf)); 2189 *pNf = ctx->batch_sz; 2190 PetscCall(PetscContainerSetPointer(container, (void *)pNf)); 2191 PetscCall(PetscContainerSetCtxDestroy(container, PetscCtxDestroyDefault)); 2192 PetscCall(PetscObjectCompose((PetscObject)ctx->J, "batch size", (PetscObject)container)); 2193 PetscCall(PetscContainerDestroy(&container)); 2194 } 2195 PetscFunctionReturn(PETSC_SUCCESS); 2196 } 2197 2198 /*@C 2199 DMPlexLandauAccess - Access to the distribution function with user callback 2200 2201 Collective 2202 2203 Input Parameters: 2204 + pack - the `DMCOMPOSITE` 2205 . func - call back function 2206 - user_ctx - user context 2207 2208 Input/Output Parameter: 2209 . X - Vector to data to 2210 2211 Level: advanced 2212 2213 .seealso: `DMPlexLandauCreateVelocitySpace()` 2214 @*/ 2215 PetscErrorCode DMPlexLandauAccess(DM pack, Vec X, PetscErrorCode (*func)(DM, Vec, PetscInt, PetscInt, PetscInt, void *), void *user_ctx) 2216 { 2217 LandauCtx *ctx; 2218 2219 PetscFunctionBegin; 2220 PetscCall(DMGetApplicationContext(pack, &ctx)); // uses ctx->num_grids; ctx->plex[grid]; ctx->batch_sz; ctx->mat_offset 2221 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 2222 PetscInt dim, n; 2223 PetscCall(DMGetDimension(pack, &dim)); 2224 for (PetscInt sp = ctx->species_offset[grid], i0 = 0; sp < ctx->species_offset[grid + 1]; sp++, i0++) { 2225 Vec vec; 2226 PetscInt vf[1] = {i0}; 2227 IS vis; 2228 DM vdm; 2229 PetscCall(DMCreateSubDM(ctx->plex[grid], 1, vf, &vis, &vdm)); 2230 PetscCall(DMSetApplicationContext(vdm, ctx)); // the user might want this 2231 PetscCall(DMCreateGlobalVector(vdm, &vec)); 2232 PetscCall(VecGetSize(vec, &n)); 2233 for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) { 2234 const PetscInt moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset); 2235 PetscCall(VecZeroEntries(vec)); 2236 /* Add your data with 'dm' for species 'sp' to 'vec' */ 2237 PetscCall(func(vdm, vec, i0, grid, b_id, user_ctx)); 2238 /* add to global */ 2239 PetscScalar const *values; 2240 const PetscInt *offsets; 2241 PetscCall(VecGetArrayRead(vec, &values)); 2242 PetscCall(ISGetIndices(vis, &offsets)); 2243 for (PetscInt i = 0; i < n; i++) PetscCall(VecSetValue(X, moffset + offsets[i], values[i], ADD_VALUES)); 2244 PetscCall(VecRestoreArrayRead(vec, &values)); 2245 PetscCall(ISRestoreIndices(vis, &offsets)); 2246 } // batch 2247 PetscCall(VecDestroy(&vec)); 2248 PetscCall(ISDestroy(&vis)); 2249 PetscCall(DMDestroy(&vdm)); 2250 } 2251 } // grid 2252 PetscFunctionReturn(PETSC_SUCCESS); 2253 } 2254 2255 /*@ 2256 DMPlexLandauDestroyVelocitySpace - Destroy a `DMPLEX` velocity space mesh 2257 2258 Collective 2259 2260 Input/Output Parameters: 2261 . dm - the `DM` to destroy 2262 2263 Level: beginner 2264 2265 .seealso: `DMPlexLandauCreateVelocitySpace()` 2266 @*/ 2267 PetscErrorCode DMPlexLandauDestroyVelocitySpace(DM *dm) 2268 { 2269 LandauCtx *ctx; 2270 2271 PetscFunctionBegin; 2272 PetscCall(DMGetApplicationContext(*dm, &ctx)); 2273 PetscCall(MatDestroy(&ctx->M)); 2274 PetscCall(MatDestroy(&ctx->J)); 2275 for (PetscInt ii = 0; ii < ctx->num_species; ii++) PetscCall(PetscFEDestroy(&ctx->fe[ii])); 2276 PetscCall(ISDestroy(&ctx->batch_is)); 2277 PetscCall(VecDestroy(&ctx->work_vec)); 2278 PetscCall(VecScatterDestroy(&ctx->plex_batch)); 2279 if (ctx->deviceType == LANDAU_KOKKOS) { 2280 #if defined(PETSC_HAVE_KOKKOS) 2281 PetscCall(LandauKokkosStaticDataClear(&ctx->SData_d)); 2282 #else 2283 SETERRQ(ctx->comm, PETSC_ERR_ARG_WRONG, "-landau_device_type %s not built", "kokkos"); 2284 #endif 2285 } else { 2286 if (ctx->SData_d.x) { /* in a CPU run */ 2287 PetscReal *invJ = (PetscReal *)ctx->SData_d.invJ, *xx = (PetscReal *)ctx->SData_d.x, *yy = (PetscReal *)ctx->SData_d.y, *zz = (PetscReal *)ctx->SData_d.z, *ww = (PetscReal *)ctx->SData_d.w; 2288 LandauIdx *coo_elem_offsets = (LandauIdx *)ctx->SData_d.coo_elem_offsets, *coo_elem_fullNb = (LandauIdx *)ctx->SData_d.coo_elem_fullNb, (*coo_elem_point_offsets)[LANDAU_MAX_NQND + 1] = (LandauIdx(*)[LANDAU_MAX_NQND + 1]) ctx->SData_d.coo_elem_point_offsets; 2289 PetscCall(PetscFree4(ww, xx, yy, invJ)); 2290 if (zz) PetscCall(PetscFree(zz)); 2291 if (coo_elem_offsets) PetscCall(PetscFree3(coo_elem_offsets, coo_elem_fullNb, coo_elem_point_offsets)); // could be NULL 2292 PetscCall(PetscFree4(ctx->SData_d.alpha, ctx->SData_d.beta, ctx->SData_d.invMass, ctx->SData_d.lambdas)); 2293 } 2294 } 2295 2296 if (ctx->times[LANDAU_MATRIX_TOTAL] > 0) { // OMP timings 2297 PetscCall(PetscPrintf(ctx->comm, "TSStep N 1.0 %10.3e\n", ctx->times[LANDAU_EX2_TSSOLVE])); 2298 PetscCall(PetscPrintf(ctx->comm, "2: Solve: %10.3e with %" PetscInt_FMT " threads\n", ctx->times[LANDAU_EX2_TSSOLVE] - ctx->times[LANDAU_MATRIX_TOTAL], ctx->batch_sz)); 2299 PetscCall(PetscPrintf(ctx->comm, "3: Landau: %10.3e\n", ctx->times[LANDAU_MATRIX_TOTAL])); 2300 PetscCall(PetscPrintf(ctx->comm, "Landau Jacobian %" PetscInt_FMT " 1.0 %10.3e\n", (PetscInt)ctx->times[LANDAU_JACOBIAN_COUNT], ctx->times[LANDAU_JACOBIAN])); 2301 PetscCall(PetscPrintf(ctx->comm, "Landau Operator N 1.0 %10.3e\n", ctx->times[LANDAU_OPERATOR])); 2302 PetscCall(PetscPrintf(ctx->comm, "Landau Mass N 1.0 %10.3e\n", ctx->times[LANDAU_MASS])); 2303 PetscCall(PetscPrintf(ctx->comm, " Jac-f-df (GPU) N 1.0 %10.3e\n", ctx->times[LANDAU_F_DF])); 2304 PetscCall(PetscPrintf(ctx->comm, " Kernel (GPU) N 1.0 %10.3e\n", ctx->times[LANDAU_KERNEL])); 2305 PetscCall(PetscPrintf(ctx->comm, "MatLUFactorNum X 1.0 %10.3e\n", ctx->times[KSP_FACTOR])); 2306 PetscCall(PetscPrintf(ctx->comm, "MatSolve X 1.0 %10.3e\n", ctx->times[KSP_SOLVE])); 2307 } 2308 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) PetscCall(DMDestroy(&ctx->plex[grid])); 2309 PetscCall(PetscFree(ctx)); 2310 PetscCall(DMDestroy(dm)); 2311 PetscFunctionReturn(PETSC_SUCCESS); 2312 } 2313 2314 /* < v, ru > */ 2315 static void f0_s_den(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0) 2316 { 2317 PetscInt ii = (PetscInt)PetscRealPart(constants[0]); 2318 f0[0] = u[ii]; 2319 } 2320 2321 /* < v, ru > */ 2322 static void f0_s_mom(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0) 2323 { 2324 PetscInt ii = (PetscInt)PetscRealPart(constants[0]), jj = (PetscInt)PetscRealPart(constants[1]); 2325 f0[0] = x[jj] * u[ii]; /* x momentum */ 2326 } 2327 2328 static void f0_s_v2(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0) 2329 { 2330 PetscInt i, ii = (PetscInt)PetscRealPart(constants[0]); 2331 double tmp1 = 0.; 2332 for (i = 0; i < dim; ++i) tmp1 += x[i] * x[i]; 2333 f0[0] = tmp1 * u[ii]; 2334 } 2335 2336 static PetscErrorCode gamma_n_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void *actx) 2337 { 2338 const PetscReal *c2_0_arr = ((PetscReal *)actx); 2339 const PetscReal c02 = c2_0_arr[0]; 2340 2341 PetscFunctionBegin; 2342 for (PetscInt s = 0; s < Nf; s++) { 2343 PetscReal tmp1 = 0.; 2344 for (PetscInt i = 0; i < dim; ++i) tmp1 += x[i] * x[i]; 2345 #if defined(PETSC_USE_DEBUG) 2346 u[s] = PetscSqrtReal(1. + tmp1 / c02); // u[0] = PetscSqrtReal(1. + xx); 2347 #else 2348 { 2349 PetscReal xx = tmp1 / c02; 2350 u[s] = xx / (PetscSqrtReal(1. + xx) + 1.); // better conditioned = xx/(PetscSqrtReal(1. + xx) + 1.) 2351 } 2352 #endif 2353 } 2354 PetscFunctionReturn(PETSC_SUCCESS); 2355 } 2356 2357 /* < v, ru > */ 2358 static void f0_s_rden(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0) 2359 { 2360 PetscInt ii = (PetscInt)PetscRealPart(constants[0]); 2361 f0[0] = 2. * PETSC_PI * x[0] * u[ii]; 2362 } 2363 2364 /* < v, ru > */ 2365 static void f0_s_rmom(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0) 2366 { 2367 PetscInt ii = (PetscInt)PetscRealPart(constants[0]); 2368 f0[0] = 2. * PETSC_PI * x[0] * x[1] * u[ii]; 2369 } 2370 2371 static void f0_s_rv2(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0) 2372 { 2373 PetscInt ii = (PetscInt)PetscRealPart(constants[0]); 2374 f0[0] = 2. * PETSC_PI * x[0] * (x[0] * x[0] + x[1] * x[1]) * u[ii]; 2375 } 2376 2377 /*@ 2378 DMPlexLandauPrintNorms - collects moments and prints them 2379 2380 Collective 2381 2382 Input Parameters: 2383 + X - the state 2384 - stepi - current step to print 2385 2386 Level: beginner 2387 2388 .seealso: `DMPlexLandauCreateVelocitySpace()` 2389 @*/ 2390 PetscErrorCode DMPlexLandauPrintNorms(Vec X, PetscInt stepi) 2391 { 2392 LandauCtx *ctx; 2393 PetscDS prob; 2394 DM pack; 2395 PetscInt cStart, cEnd, dim, ii, i0, nDMs; 2396 PetscScalar xmomentumtot = 0, ymomentumtot = 0, zmomentumtot = 0, energytot = 0, densitytot = 0, tt[LANDAU_MAX_SPECIES]; 2397 PetscScalar xmomentum[LANDAU_MAX_SPECIES], ymomentum[LANDAU_MAX_SPECIES], zmomentum[LANDAU_MAX_SPECIES], energy[LANDAU_MAX_SPECIES], density[LANDAU_MAX_SPECIES]; 2398 Vec *globXArray; 2399 2400 PetscFunctionBegin; 2401 PetscCall(VecGetDM(X, &pack)); 2402 PetscCheck(pack, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Vector has no DM"); 2403 PetscCall(DMGetDimension(pack, &dim)); 2404 PetscCheck(dim == 2 || dim == 3, PETSC_COMM_SELF, PETSC_ERR_PLIB, "dim %" PetscInt_FMT " not in [2,3]", dim); 2405 PetscCall(DMGetApplicationContext(pack, &ctx)); 2406 PetscCheck(ctx, PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context"); 2407 /* print momentum and energy */ 2408 PetscCall(DMCompositeGetNumberDM(pack, &nDMs)); 2409 PetscCheck(nDMs == ctx->num_grids * ctx->batch_sz, PETSC_COMM_WORLD, PETSC_ERR_PLIB, "#DM wrong %" PetscInt_FMT " %" PetscInt_FMT, nDMs, ctx->num_grids * ctx->batch_sz); 2410 PetscCall(PetscMalloc(sizeof(*globXArray) * nDMs, &globXArray)); 2411 PetscCall(DMCompositeGetAccessArray(pack, X, nDMs, NULL, globXArray)); 2412 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 2413 Vec Xloc = globXArray[LAND_PACK_IDX(ctx->batch_view_idx, grid)]; 2414 PetscCall(DMGetDS(ctx->plex[grid], &prob)); 2415 for (ii = ctx->species_offset[grid], i0 = 0; ii < ctx->species_offset[grid + 1]; ii++, i0++) { 2416 PetscScalar user[2] = {(PetscScalar)i0, ctx->charges[ii]}; 2417 PetscCall(PetscDSSetConstants(prob, 2, user)); 2418 if (dim == 2) { /* 2/3X + 3V (cylindrical coordinates) */ 2419 PetscCall(PetscDSSetObjective(prob, 0, &f0_s_rden)); 2420 PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx)); 2421 density[ii] = tt[0] * ctx->n_0 * ctx->charges[ii]; 2422 PetscCall(PetscDSSetObjective(prob, 0, &f0_s_rmom)); 2423 PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx)); 2424 zmomentum[ii] = tt[0] * ctx->n_0 * ctx->v_0 * ctx->masses[ii]; 2425 PetscCall(PetscDSSetObjective(prob, 0, &f0_s_rv2)); 2426 PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx)); 2427 energy[ii] = tt[0] * 0.5 * ctx->n_0 * ctx->v_0 * ctx->v_0 * ctx->masses[ii]; 2428 zmomentumtot += zmomentum[ii]; 2429 energytot += energy[ii]; 2430 densitytot += density[ii]; 2431 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "%3" PetscInt_FMT ") species-%" PetscInt_FMT ": charge density= %20.13e z-momentum= %20.13e energy= %20.13e", stepi, ii, (double)PetscRealPart(density[ii]), (double)PetscRealPart(zmomentum[ii]), (double)PetscRealPart(energy[ii]))); 2432 } else { /* 2/3Xloc + 3V */ 2433 PetscCall(PetscDSSetObjective(prob, 0, &f0_s_den)); 2434 PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx)); 2435 density[ii] = tt[0] * ctx->n_0 * ctx->charges[ii]; 2436 PetscCall(PetscDSSetObjective(prob, 0, &f0_s_mom)); 2437 user[1] = 0; 2438 PetscCall(PetscDSSetConstants(prob, 2, user)); 2439 PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx)); 2440 xmomentum[ii] = tt[0] * ctx->n_0 * ctx->v_0 * ctx->masses[ii]; 2441 user[1] = 1; 2442 PetscCall(PetscDSSetConstants(prob, 2, user)); 2443 PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx)); 2444 ymomentum[ii] = tt[0] * ctx->n_0 * ctx->v_0 * ctx->masses[ii]; 2445 user[1] = 2; 2446 PetscCall(PetscDSSetConstants(prob, 2, user)); 2447 PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx)); 2448 zmomentum[ii] = tt[0] * ctx->n_0 * ctx->v_0 * ctx->masses[ii]; 2449 if (ctx->use_relativistic_corrections) { 2450 /* gamma * M * f */ 2451 if (ii == 0 && grid == 0) { // do all at once 2452 Vec Mf, globGamma, *globMfArray, *globGammaArray; 2453 PetscErrorCode (*gammaf[1])(PetscInt, PetscReal, const PetscReal[], PetscInt, PetscScalar[], void *) = {gamma_n_f}; 2454 PetscReal *c2_0[1], data[1]; 2455 2456 PetscCall(VecDuplicate(X, &globGamma)); 2457 PetscCall(VecDuplicate(X, &Mf)); 2458 PetscCall(PetscMalloc(sizeof(*globMfArray) * nDMs, &globMfArray)); 2459 PetscCall(PetscMalloc(sizeof(*globMfArray) * nDMs, &globGammaArray)); 2460 /* M * f */ 2461 PetscCall(MatMult(ctx->M, X, Mf)); 2462 /* gamma */ 2463 PetscCall(DMCompositeGetAccessArray(pack, globGamma, nDMs, NULL, globGammaArray)); 2464 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // yes a grid loop in a grid loop to print nice, need to fix for batching 2465 Vec v1 = globGammaArray[LAND_PACK_IDX(ctx->batch_view_idx, grid)]; 2466 data[0] = PetscSqr(C_0(ctx->v_0)); 2467 c2_0[0] = &data[0]; 2468 PetscCall(DMProjectFunction(ctx->plex[grid], 0., gammaf, (void **)c2_0, INSERT_ALL_VALUES, v1)); 2469 } 2470 PetscCall(DMCompositeRestoreAccessArray(pack, globGamma, nDMs, NULL, globGammaArray)); 2471 /* gamma * Mf */ 2472 PetscCall(DMCompositeGetAccessArray(pack, globGamma, nDMs, NULL, globGammaArray)); 2473 PetscCall(DMCompositeGetAccessArray(pack, Mf, nDMs, NULL, globMfArray)); 2474 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // yes a grid loop in a grid loop to print nice 2475 PetscInt Nf = ctx->species_offset[grid + 1] - ctx->species_offset[grid], N, bs; 2476 Vec Mfsub = globMfArray[LAND_PACK_IDX(ctx->batch_view_idx, grid)], Gsub = globGammaArray[LAND_PACK_IDX(ctx->batch_view_idx, grid)], v1, v2; 2477 // get each component 2478 PetscCall(VecGetSize(Mfsub, &N)); 2479 PetscCall(VecCreate(ctx->comm, &v1)); 2480 PetscCall(VecSetSizes(v1, PETSC_DECIDE, N / Nf)); 2481 PetscCall(VecCreate(ctx->comm, &v2)); 2482 PetscCall(VecSetSizes(v2, PETSC_DECIDE, N / Nf)); 2483 PetscCall(VecSetFromOptions(v1)); // ??? 2484 PetscCall(VecSetFromOptions(v2)); 2485 // get each component 2486 PetscCall(VecGetBlockSize(Gsub, &bs)); 2487 PetscCheck(bs == Nf, PETSC_COMM_SELF, PETSC_ERR_PLIB, "bs %" PetscInt_FMT " != num_species %" PetscInt_FMT " in Gsub", bs, Nf); 2488 PetscCall(VecGetBlockSize(Mfsub, &bs)); 2489 PetscCheck(bs == Nf, PETSC_COMM_SELF, PETSC_ERR_PLIB, "bs %" PetscInt_FMT " != num_species %" PetscInt_FMT, bs, Nf); 2490 for (PetscInt i = 0, ix = ctx->species_offset[grid]; i < Nf; i++, ix++) { 2491 PetscScalar val; 2492 PetscCall(VecStrideGather(Gsub, i, v1, INSERT_VALUES)); // this is not right -- TODO 2493 PetscCall(VecStrideGather(Mfsub, i, v2, INSERT_VALUES)); 2494 PetscCall(VecDot(v1, v2, &val)); 2495 energy[ix] = PetscRealPart(val) * ctx->n_0 * ctx->v_0 * ctx->v_0 * ctx->masses[ix]; 2496 } 2497 PetscCall(VecDestroy(&v1)); 2498 PetscCall(VecDestroy(&v2)); 2499 } /* grids */ 2500 PetscCall(DMCompositeRestoreAccessArray(pack, globGamma, nDMs, NULL, globGammaArray)); 2501 PetscCall(DMCompositeRestoreAccessArray(pack, Mf, nDMs, NULL, globMfArray)); 2502 PetscCall(PetscFree(globGammaArray)); 2503 PetscCall(PetscFree(globMfArray)); 2504 PetscCall(VecDestroy(&globGamma)); 2505 PetscCall(VecDestroy(&Mf)); 2506 } 2507 } else { 2508 PetscCall(PetscDSSetObjective(prob, 0, &f0_s_v2)); 2509 PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx)); 2510 energy[ii] = 0.5 * tt[0] * ctx->n_0 * ctx->v_0 * ctx->v_0 * ctx->masses[ii]; 2511 } 2512 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "%3" PetscInt_FMT ") species %" PetscInt_FMT ": density=%20.13e, x-momentum=%20.13e, y-momentum=%20.13e, z-momentum=%20.13e, energy=%21.13e", stepi, ii, (double)PetscRealPart(density[ii]), (double)PetscRealPart(xmomentum[ii]), (double)PetscRealPart(ymomentum[ii]), (double)PetscRealPart(zmomentum[ii]), (double)PetscRealPart(energy[ii]))); 2513 xmomentumtot += xmomentum[ii]; 2514 ymomentumtot += ymomentum[ii]; 2515 zmomentumtot += zmomentum[ii]; 2516 energytot += energy[ii]; 2517 densitytot += density[ii]; 2518 } 2519 if (ctx->num_species > 1) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\n")); 2520 } 2521 } 2522 PetscCall(DMCompositeRestoreAccessArray(pack, X, nDMs, NULL, globXArray)); 2523 PetscCall(PetscFree(globXArray)); 2524 /* totals */ 2525 PetscCall(DMPlexGetHeightStratum(ctx->plex[0], 0, &cStart, &cEnd)); 2526 if (ctx->num_species > 1) { 2527 if (dim == 2) { 2528 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\t%3" PetscInt_FMT ") Total: charge density=%21.13e, momentum=%21.13e, energy=%21.13e (m_i[0]/m_e = %g, %" PetscInt_FMT " cells on electron grid)", stepi, (double)PetscRealPart(densitytot), (double)PetscRealPart(zmomentumtot), (double)PetscRealPart(energytot), 2529 (double)(ctx->masses[1] / ctx->masses[0]), cEnd - cStart)); 2530 } else { 2531 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\t%3" PetscInt_FMT ") Total: charge density=%21.13e, x-momentum=%21.13e, y-momentum=%21.13e, z-momentum=%21.13e, energy=%21.13e (m_i[0]/m_e = %g, %" PetscInt_FMT " cells)", stepi, (double)PetscRealPart(densitytot), (double)PetscRealPart(xmomentumtot), (double)PetscRealPart(ymomentumtot), (double)PetscRealPart(zmomentumtot), (double)PetscRealPart(energytot), 2532 (double)(ctx->masses[1] / ctx->masses[0]), cEnd - cStart)); 2533 } 2534 } else PetscCall(PetscPrintf(PETSC_COMM_WORLD, " -- %" PetscInt_FMT " cells", cEnd - cStart)); 2535 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\n")); 2536 PetscFunctionReturn(PETSC_SUCCESS); 2537 } 2538 2539 /*@ 2540 DMPlexLandauCreateMassMatrix - Create mass matrix for Landau in Plex space (not field major order of Jacobian) 2541 - puts mass matrix into ctx->M 2542 2543 Collective 2544 2545 Input Parameter: 2546 . pack - the `DM` object. Puts matrix in Landau context M field 2547 2548 Output Parameter: 2549 . Amat - The mass matrix (optional), mass matrix is added to the `DM` context 2550 2551 Level: beginner 2552 2553 .seealso: `DMPlexLandauCreateVelocitySpace()` 2554 @*/ 2555 PetscErrorCode DMPlexLandauCreateMassMatrix(DM pack, Mat *Amat) 2556 { 2557 DM mass_pack, massDM[LANDAU_MAX_GRIDS]; 2558 PetscDS prob; 2559 PetscInt ii, dim, N1 = 1, N2; 2560 LandauCtx *ctx; 2561 Mat packM, subM[LANDAU_MAX_GRIDS]; 2562 2563 PetscFunctionBegin; 2564 PetscValidHeaderSpecific(pack, DM_CLASSID, 1); 2565 if (Amat) PetscAssertPointer(Amat, 2); 2566 PetscCall(DMGetApplicationContext(pack, &ctx)); 2567 PetscCheck(ctx, PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context"); 2568 PetscCall(PetscLogEventBegin(ctx->events[14], 0, 0, 0, 0)); 2569 PetscCall(DMGetDimension(pack, &dim)); 2570 PetscCall(DMCompositeCreate(PetscObjectComm((PetscObject)pack), &mass_pack)); 2571 /* create pack mass matrix */ 2572 for (PetscInt grid = 0, ix = 0; grid < ctx->num_grids; grid++) { 2573 PetscCall(DMClone(ctx->plex[grid], &massDM[grid])); 2574 PetscCall(DMCopyFields(ctx->plex[grid], PETSC_DETERMINE, PETSC_DETERMINE, massDM[grid])); 2575 PetscCall(DMCreateDS(massDM[grid])); 2576 PetscCall(DMGetDS(massDM[grid], &prob)); 2577 for (ix = 0, ii = ctx->species_offset[grid]; ii < ctx->species_offset[grid + 1]; ii++, ix++) { 2578 if (dim == 3) PetscCall(PetscDSSetJacobian(prob, ix, ix, g0_1, NULL, NULL, NULL)); 2579 else PetscCall(PetscDSSetJacobian(prob, ix, ix, g0_r, NULL, NULL, NULL)); 2580 } 2581 #if !defined(LANDAU_SPECIES_MAJOR) 2582 PetscCall(DMCompositeAddDM(mass_pack, massDM[grid])); 2583 #else 2584 for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) { // add batch size DMs for this species grid 2585 PetscCall(DMCompositeAddDM(mass_pack, massDM[grid])); 2586 } 2587 #endif 2588 PetscCall(DMCreateMatrix(massDM[grid], &subM[grid])); 2589 } 2590 #if !defined(LANDAU_SPECIES_MAJOR) 2591 // stack the batched DMs 2592 for (PetscInt b_id = 1; b_id < ctx->batch_sz; b_id++) { 2593 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) PetscCall(DMCompositeAddDM(mass_pack, massDM[grid])); 2594 } 2595 #endif 2596 PetscCall(PetscOptionsInsertString(NULL, "-dm_preallocate_only")); 2597 PetscCall(DMCreateMatrix(mass_pack, &packM)); 2598 PetscCall(PetscOptionsInsertString(NULL, "-dm_preallocate_only false")); 2599 PetscCall(MatSetOption(packM, MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE)); 2600 PetscCall(MatSetOption(packM, MAT_IGNORE_ZERO_ENTRIES, PETSC_TRUE)); 2601 PetscCall(DMDestroy(&mass_pack)); 2602 /* make mass matrix for each block */ 2603 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 2604 Vec locX; 2605 DM plex = massDM[grid]; 2606 PetscCall(DMGetLocalVector(plex, &locX)); 2607 /* Mass matrix is independent of the input, so no need to fill locX */ 2608 PetscCall(DMPlexSNESComputeJacobianFEM(plex, locX, subM[grid], subM[grid], ctx)); 2609 PetscCall(DMRestoreLocalVector(plex, &locX)); 2610 PetscCall(DMDestroy(&massDM[grid])); 2611 } 2612 PetscCall(MatGetSize(ctx->J, &N1, NULL)); 2613 PetscCall(MatGetSize(packM, &N2, NULL)); 2614 PetscCheck(N1 == N2, PetscObjectComm((PetscObject)pack), PETSC_ERR_PLIB, "Incorrect matrix sizes: |Jacobian| = %" PetscInt_FMT ", |Mass|=%" PetscInt_FMT, N1, N2); 2615 /* assemble block diagonals */ 2616 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 2617 Mat B = subM[grid]; 2618 PetscInt nloc, nzl, *colbuf, COL_BF_SIZE = 1024, row; 2619 PetscCall(PetscMalloc(sizeof(*colbuf) * COL_BF_SIZE, &colbuf)); 2620 PetscCall(MatGetSize(B, &nloc, NULL)); 2621 for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) { 2622 const PetscInt moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset); 2623 const PetscInt *cols; 2624 const PetscScalar *vals; 2625 for (PetscInt i = 0; i < nloc; i++) { 2626 PetscCall(MatGetRow(B, i, &nzl, NULL, NULL)); 2627 if (nzl > COL_BF_SIZE) { 2628 PetscCall(PetscFree(colbuf)); 2629 PetscCall(PetscInfo(pack, "Realloc buffer %" PetscInt_FMT " to %" PetscInt_FMT " (row size %" PetscInt_FMT ") \n", COL_BF_SIZE, 2 * COL_BF_SIZE, nzl)); 2630 COL_BF_SIZE = nzl; 2631 PetscCall(PetscMalloc(sizeof(*colbuf) * COL_BF_SIZE, &colbuf)); 2632 } 2633 PetscCall(MatGetRow(B, i, &nzl, &cols, &vals)); 2634 for (PetscInt j = 0; j < nzl; j++) colbuf[j] = cols[j] + moffset; 2635 row = i + moffset; 2636 PetscCall(MatSetValues(packM, 1, &row, nzl, colbuf, vals, INSERT_VALUES)); 2637 PetscCall(MatRestoreRow(B, i, &nzl, &cols, &vals)); 2638 } 2639 } 2640 PetscCall(PetscFree(colbuf)); 2641 } 2642 // cleanup 2643 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) PetscCall(MatDestroy(&subM[grid])); 2644 PetscCall(MatAssemblyBegin(packM, MAT_FINAL_ASSEMBLY)); 2645 PetscCall(MatAssemblyEnd(packM, MAT_FINAL_ASSEMBLY)); 2646 PetscCall(PetscObjectSetName((PetscObject)packM, "mass")); 2647 PetscCall(MatViewFromOptions(packM, NULL, "-dm_landau_mass_view")); 2648 ctx->M = packM; 2649 if (Amat) *Amat = packM; 2650 PetscCall(PetscLogEventEnd(ctx->events[14], 0, 0, 0, 0)); 2651 PetscFunctionReturn(PETSC_SUCCESS); 2652 } 2653 2654 /*@ 2655 DMPlexLandauIFunction - `TS` residual calculation, confusingly this computes the Jacobian w/o mass 2656 2657 Collective 2658 2659 Input Parameters: 2660 + ts - The time stepping context 2661 . time_dummy - current time (not used) 2662 . X - Current state 2663 . X_t - Time derivative of current state 2664 - actx - Landau context 2665 2666 Output Parameter: 2667 . F - The residual 2668 2669 Level: beginner 2670 2671 .seealso: `DMPlexLandauCreateVelocitySpace()`, `DMPlexLandauIJacobian()` 2672 @*/ 2673 PetscErrorCode DMPlexLandauIFunction(TS ts, PetscReal time_dummy, Vec X, Vec X_t, Vec F, void *actx) 2674 { 2675 LandauCtx *ctx = (LandauCtx *)actx; 2676 PetscInt dim; 2677 DM pack; 2678 #if defined(PETSC_HAVE_THREADSAFETY) 2679 double starttime, endtime; 2680 #endif 2681 PetscObjectState state; 2682 2683 PetscFunctionBegin; 2684 PetscCall(TSGetDM(ts, &pack)); 2685 PetscCall(DMGetApplicationContext(pack, &ctx)); 2686 PetscCheck(ctx, PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context"); 2687 if (ctx->stage) PetscCall(PetscLogStagePush(ctx->stage)); 2688 PetscCall(PetscLogEventBegin(ctx->events[11], 0, 0, 0, 0)); 2689 PetscCall(PetscLogEventBegin(ctx->events[0], 0, 0, 0, 0)); 2690 #if defined(PETSC_HAVE_THREADSAFETY) 2691 starttime = MPI_Wtime(); 2692 #endif 2693 PetscCall(DMGetDimension(pack, &dim)); 2694 PetscCall(PetscObjectStateGet((PetscObject)ctx->J, &state)); 2695 if (state != ctx->norm_state) { 2696 PetscCall(MatZeroEntries(ctx->J)); 2697 PetscCall(LandauFormJacobian_Internal(X, ctx->J, dim, 0.0, (void *)ctx)); 2698 PetscCall(MatViewFromOptions(ctx->J, NULL, "-dm_landau_jacobian_view")); 2699 PetscCall(PetscObjectStateGet((PetscObject)ctx->J, &state)); 2700 ctx->norm_state = state; 2701 } else { 2702 PetscCall(PetscInfo(ts, "WARNING Skip forming Jacobian, has not changed %" PetscInt64_FMT "\n", state)); 2703 } 2704 /* mat vec for op */ 2705 PetscCall(MatMult(ctx->J, X, F)); /* C*f */ 2706 /* add time term */ 2707 if (X_t) PetscCall(MatMultAdd(ctx->M, X_t, F, F)); 2708 #if defined(PETSC_HAVE_THREADSAFETY) 2709 if (ctx->stage) { 2710 endtime = MPI_Wtime(); 2711 ctx->times[LANDAU_OPERATOR] += (endtime - starttime); 2712 ctx->times[LANDAU_JACOBIAN] += (endtime - starttime); 2713 ctx->times[LANDAU_MATRIX_TOTAL] += (endtime - starttime); 2714 ctx->times[LANDAU_JACOBIAN_COUNT] += 1; 2715 } 2716 #endif 2717 PetscCall(PetscLogEventEnd(ctx->events[0], 0, 0, 0, 0)); 2718 PetscCall(PetscLogEventEnd(ctx->events[11], 0, 0, 0, 0)); 2719 if (ctx->stage) PetscCall(PetscLogStagePop()); 2720 PetscFunctionReturn(PETSC_SUCCESS); 2721 } 2722 2723 /*@ 2724 DMPlexLandauIJacobian - `TS` Jacobian construction, confusingly this adds mass 2725 2726 Collective 2727 2728 Input Parameters: 2729 + ts - The time stepping context 2730 . time_dummy - current time (not used) 2731 . X - Current state 2732 . U_tdummy - Time derivative of current state (not used) 2733 . shift - shift for du/dt term 2734 - actx - Landau context 2735 2736 Output Parameters: 2737 + Amat - Jacobian 2738 - Pmat - same as Amat 2739 2740 Level: beginner 2741 2742 .seealso: `DMPlexLandauCreateVelocitySpace()`, `DMPlexLandauIFunction()` 2743 @*/ 2744 PetscErrorCode DMPlexLandauIJacobian(TS ts, PetscReal time_dummy, Vec X, Vec U_tdummy, PetscReal shift, Mat Amat, Mat Pmat, void *actx) 2745 { 2746 LandauCtx *ctx = NULL; 2747 PetscInt dim; 2748 DM pack; 2749 #if defined(PETSC_HAVE_THREADSAFETY) 2750 double starttime, endtime; 2751 #endif 2752 PetscObjectState state; 2753 2754 PetscFunctionBegin; 2755 PetscCall(TSGetDM(ts, &pack)); 2756 PetscCall(DMGetApplicationContext(pack, &ctx)); 2757 PetscCheck(ctx, PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context"); 2758 PetscCheck(Amat == Pmat && Amat == ctx->J, ctx->comm, PETSC_ERR_PLIB, "Amat!=Pmat || Amat!=ctx->J"); 2759 PetscCall(DMGetDimension(pack, &dim)); 2760 /* get collision Jacobian into A */ 2761 if (ctx->stage) PetscCall(PetscLogStagePush(ctx->stage)); 2762 PetscCall(PetscLogEventBegin(ctx->events[11], 0, 0, 0, 0)); 2763 PetscCall(PetscLogEventBegin(ctx->events[9], 0, 0, 0, 0)); 2764 #if defined(PETSC_HAVE_THREADSAFETY) 2765 starttime = MPI_Wtime(); 2766 #endif 2767 PetscCheck(shift != 0.0, ctx->comm, PETSC_ERR_PLIB, "zero shift"); 2768 PetscCall(PetscObjectStateGet((PetscObject)ctx->J, &state)); 2769 PetscCheck(state == ctx->norm_state, ctx->comm, PETSC_ERR_PLIB, "wrong state, %" PetscInt64_FMT " %" PetscInt64_FMT, ctx->norm_state, state); 2770 if (!ctx->use_matrix_mass) { 2771 PetscCall(LandauFormJacobian_Internal(X, ctx->J, dim, shift, (void *)ctx)); 2772 } else { /* add mass */ 2773 PetscCall(MatAXPY(Pmat, shift, ctx->M, SAME_NONZERO_PATTERN)); 2774 } 2775 #if defined(PETSC_HAVE_THREADSAFETY) 2776 if (ctx->stage) { 2777 endtime = MPI_Wtime(); 2778 ctx->times[LANDAU_OPERATOR] += (endtime - starttime); 2779 ctx->times[LANDAU_MASS] += (endtime - starttime); 2780 ctx->times[LANDAU_MATRIX_TOTAL] += (endtime - starttime); 2781 } 2782 #endif 2783 PetscCall(PetscLogEventEnd(ctx->events[9], 0, 0, 0, 0)); 2784 PetscCall(PetscLogEventEnd(ctx->events[11], 0, 0, 0, 0)); 2785 if (ctx->stage) PetscCall(PetscLogStagePop()); 2786 PetscFunctionReturn(PETSC_SUCCESS); 2787 } 2788