Actual source code: plexland.c

  1: #include <../src/mat/impls/aij/seq/aij.h>
  2: #include <petsc/private/dmpleximpl.h>
  3: #include <petsclandau.h>
  4: #include <petscts.h>
  5: #include <petscdmforest.h>
  6: #include <petscdmcomposite.h>

  8: /* Landau collision operator */

 10: /* relativistic terms */
 11: #if defined(PETSC_USE_REAL_SINGLE)
 12:   #define SPEED_OF_LIGHT 2.99792458e8F
 13:   #define C_0(v0)        (SPEED_OF_LIGHT / v0) /* needed for relativistic tensor on all architectures */
 14: #else
 15:   #define SPEED_OF_LIGHT 2.99792458e8
 16:   #define C_0(v0)        (SPEED_OF_LIGHT / v0) /* needed for relativistic tensor on all architectures */
 17: #endif

 19: #define PETSC_THREAD_SYNC
 20: #include "land_tensors.h"

 22: #if defined(PETSC_HAVE_OPENMP)
 23:   #include <omp.h>
 24: #endif

 26: static PetscErrorCode LandauGPUMapsDestroy(void *ptr)
 27: {
 28:   P4estVertexMaps *maps = (P4estVertexMaps *)ptr;
 29:   PetscFunctionBegin;
 30:   // free device data
 31:   if (maps[0].deviceType != LANDAU_CPU) {
 32: #if defined(PETSC_HAVE_KOKKOS_KERNELS)
 33:     if (maps[0].deviceType == LANDAU_KOKKOS) {
 34:       PetscCall(LandauKokkosDestroyMatMaps(maps, maps[0].numgrids)); // implies Kokkos does
 35:     }                                                                // else could be CUDA
 36: #elif defined(PETSC_HAVE_CUDA)
 37:     if (maps[0].deviceType == LANDAU_CUDA) {
 38:       PetscCall(LandauCUDADestroyMatMaps(maps, maps[0].numgrids));
 39:     } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "maps->deviceType %d ?????", maps->deviceType);
 40: #endif
 41:   }
 42:   // free host data
 43:   for (PetscInt grid = 0; grid < maps[0].numgrids; grid++) {
 44:     PetscCall(PetscFree(maps[grid].c_maps));
 45:     PetscCall(PetscFree(maps[grid].gIdx));
 46:   }
 47:   PetscCall(PetscFree(maps));

 49:   PetscFunctionReturn(PETSC_SUCCESS);
 50: }
 51: static PetscErrorCode energy_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx)
 52: {
 53:   PetscReal v2 = 0;
 54:   PetscFunctionBegin;
 55:   /* compute v^2 / 2 */
 56:   for (int i = 0; i < dim; ++i) v2 += x[i] * x[i];
 57:   /* evaluate the Maxwellian */
 58:   u[0] = v2 / 2;
 59:   PetscFunctionReturn(PETSC_SUCCESS);
 60: }

 62: /* needs double */
 63: static PetscErrorCode gamma_m1_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx)
 64: {
 65:   PetscReal *c2_0_arr = ((PetscReal *)actx);
 66:   double     u2 = 0, c02 = (double)*c2_0_arr, xx;

 68:   PetscFunctionBegin;
 69:   /* compute u^2 / 2 */
 70:   for (int i = 0; i < dim; ++i) u2 += x[i] * x[i];
 71:   /* gamma - 1 = g_eps, for conditioning and we only take derivatives */
 72:   xx = u2 / c02;
 73: #if defined(PETSC_USE_DEBUG)
 74:   u[0] = PetscSqrtReal(1. + xx);
 75: #else
 76:   u[0] = xx / (PetscSqrtReal(1. + xx) + 1.) - 1.; // better conditioned. -1 might help condition and only used for derivative
 77: #endif
 78:   PetscFunctionReturn(PETSC_SUCCESS);
 79: }

 81: /*
 82:  LandauFormJacobian_Internal - Evaluates Jacobian matrix.

 84:  Input Parameters:
 85:  .  globX - input vector
 86:  .  actx - optional user-defined context
 87:  .  dim - dimension

 89:  Output Parameter:
 90:  .  J0acP - Jacobian matrix filled, not created
 91:  */
 92: static PetscErrorCode LandauFormJacobian_Internal(Vec a_X, Mat JacP, const PetscInt dim, PetscReal shift, void *a_ctx)
 93: {
 94:   LandauCtx         *ctx = (LandauCtx *)a_ctx;
 95:   PetscInt           numCells[LANDAU_MAX_GRIDS], Nq, Nb;
 96:   PetscQuadrature    quad;
 97:   PetscReal          Eq_m[LANDAU_MAX_SPECIES]; // could be static data w/o quench (ex2)
 98:   PetscScalar       *cellClosure = NULL;
 99:   const PetscScalar *xdata       = NULL;
100:   PetscDS            prob;
101:   PetscContainer     container;
102:   P4estVertexMaps   *maps;
103:   Mat                subJ[LANDAU_MAX_GRIDS * LANDAU_MAX_BATCH_SZ];

105:   PetscFunctionBegin;
109:   /* check for matrix container for GPU assembly. Support CPU assembly for debugging */
110:   PetscCheck(ctx->plex[0] != NULL, ctx->comm, PETSC_ERR_ARG_WRONG, "Plex not created");
111:   PetscCall(PetscLogEventBegin(ctx->events[10], 0, 0, 0, 0));
112:   PetscCall(DMGetDS(ctx->plex[0], &prob)); // same DS for all grids
113:   PetscCall(PetscObjectQuery((PetscObject)JacP, "assembly_maps", (PetscObject *)&container));
114:   if (container) {
115:     PetscCheck(ctx->gpu_assembly, ctx->comm, PETSC_ERR_ARG_WRONG, "maps but no GPU assembly");
116:     PetscCall(PetscContainerGetPointer(container, (void **)&maps));
117:     PetscCheck(maps, ctx->comm, PETSC_ERR_ARG_WRONG, "empty GPU matrix container");
118:     for (PetscInt i = 0; i < ctx->num_grids * ctx->batch_sz; i++) subJ[i] = NULL;
119:   } else {
120:     PetscCheck(!ctx->gpu_assembly, ctx->comm, PETSC_ERR_ARG_WRONG, "No maps but GPU assembly");
121:     for (PetscInt tid = 0; tid < ctx->batch_sz; tid++) {
122:       for (PetscInt grid = 0; grid < ctx->num_grids; grid++) PetscCall(DMCreateMatrix(ctx->plex[grid], &subJ[LAND_PACK_IDX(tid, grid)]));
123:     }
124:     maps = NULL;
125:   }
126:   // get dynamic data (Eq is odd, for quench and Spitzer test) for CPU assembly and raw data for Jacobian GPU assembly. Get host numCells[], Nq (yuck)
127:   PetscCall(PetscFEGetQuadrature(ctx->fe[0], &quad));
128:   PetscCall(PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, NULL));
129:   Nb = Nq;
130:   PetscCheck(Nq <= LANDAU_MAX_NQ, ctx->comm, PETSC_ERR_ARG_WRONG, "Order too high. Nq = %" PetscInt_FMT " > LANDAU_MAX_NQ (%d)", Nq, LANDAU_MAX_NQ);
131:   // get metadata for collecting dynamic data
132:   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
133:     PetscInt cStart, cEnd;
134:     PetscCheck(ctx->plex[grid] != NULL, ctx->comm, PETSC_ERR_ARG_WRONG, "Plex not created");
135:     PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd));
136:     numCells[grid] = cEnd - cStart; // grids can have different topology
137:   }
138:   PetscCall(PetscLogEventEnd(ctx->events[10], 0, 0, 0, 0));
139:   if (shift == 0) { /* create dynamic point data: f_alpha for closure of each cell (cellClosure[nbatch,ngrids,ncells[g],f[Nb,ns[g]]]) or xdata */
140:     DM pack;
141:     PetscCall(VecGetDM(a_X, &pack));
142:     PetscCheck(pack, PETSC_COMM_SELF, PETSC_ERR_PLIB, "pack has no DM");
143:     PetscCall(PetscLogEventBegin(ctx->events[1], 0, 0, 0, 0));
144:     for (PetscInt fieldA = 0; fieldA < ctx->num_species; fieldA++) {
145:       Eq_m[fieldA] = ctx->Ez * ctx->t_0 * ctx->charges[fieldA] / (ctx->v_0 * ctx->masses[fieldA]); /* normalize dimensionless */
146:       if (dim == 2) Eq_m[fieldA] *= 2 * PETSC_PI;                                                  /* add the 2pi term that is not in Landau */
147:     }
148:     if (!ctx->gpu_assembly) {
149:       Vec         *locXArray, *globXArray;
150:       PetscScalar *cellClosure_it;
151:       PetscInt     cellClosure_sz = 0, nDMs, Nf[LANDAU_MAX_GRIDS];
152:       PetscSection section[LANDAU_MAX_GRIDS], globsection[LANDAU_MAX_GRIDS];
153:       for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
154:         PetscCall(DMGetLocalSection(ctx->plex[grid], &section[grid]));
155:         PetscCall(DMGetGlobalSection(ctx->plex[grid], &globsection[grid]));
156:         PetscCall(PetscSectionGetNumFields(section[grid], &Nf[grid]));
157:       }
158:       /* count cellClosure size */
159:       PetscCall(DMCompositeGetNumberDM(pack, &nDMs));
160:       for (PetscInt grid = 0; grid < ctx->num_grids; grid++) cellClosure_sz += Nb * Nf[grid] * numCells[grid];
161:       PetscCall(PetscMalloc1(cellClosure_sz * ctx->batch_sz, &cellClosure));
162:       cellClosure_it = cellClosure;
163:       PetscCall(PetscMalloc(sizeof(*locXArray) * nDMs, &locXArray));
164:       PetscCall(PetscMalloc(sizeof(*globXArray) * nDMs, &globXArray));
165:       PetscCall(DMCompositeGetLocalAccessArray(pack, a_X, nDMs, NULL, locXArray));
166:       PetscCall(DMCompositeGetAccessArray(pack, a_X, nDMs, NULL, globXArray));
167:       for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) { // OpenMP (once)
168:         for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
169:           Vec      locX = locXArray[LAND_PACK_IDX(b_id, grid)], globX = globXArray[LAND_PACK_IDX(b_id, grid)], locX2;
170:           PetscInt cStart, cEnd, ei;
171:           PetscCall(VecDuplicate(locX, &locX2));
172:           PetscCall(DMGlobalToLocalBegin(ctx->plex[grid], globX, INSERT_VALUES, locX2));
173:           PetscCall(DMGlobalToLocalEnd(ctx->plex[grid], globX, INSERT_VALUES, locX2));
174:           PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd));
175:           for (ei = cStart; ei < cEnd; ++ei) {
176:             PetscScalar *coef = NULL;
177:             PetscCall(DMPlexVecGetClosure(ctx->plex[grid], section[grid], locX2, ei, NULL, &coef));
178:             PetscCall(PetscMemcpy(cellClosure_it, coef, Nb * Nf[grid] * sizeof(*cellClosure_it))); /* change if LandauIPReal != PetscScalar */
179:             PetscCall(DMPlexVecRestoreClosure(ctx->plex[grid], section[grid], locX2, ei, NULL, &coef));
180:             cellClosure_it += Nb * Nf[grid];
181:           }
182:           PetscCall(VecDestroy(&locX2));
183:         }
184:       }
185:       PetscCheck(cellClosure_it - cellClosure == cellClosure_sz * ctx->batch_sz, PETSC_COMM_SELF, PETSC_ERR_PLIB, "iteration wrong %" PetscCount_FMT " != cellClosure_sz = %" PetscInt_FMT, (PetscCount)(cellClosure_it - cellClosure),
186:                  cellClosure_sz * ctx->batch_sz);
187:       PetscCall(DMCompositeRestoreLocalAccessArray(pack, a_X, nDMs, NULL, locXArray));
188:       PetscCall(DMCompositeRestoreAccessArray(pack, a_X, nDMs, NULL, globXArray));
189:       PetscCall(PetscFree(locXArray));
190:       PetscCall(PetscFree(globXArray));
191:       xdata = NULL;
192:     } else {
193:       PetscMemType mtype;
194:       if (ctx->jacobian_field_major_order) { // get data in batch ordering
195:         PetscCall(VecScatterBegin(ctx->plex_batch, a_X, ctx->work_vec, INSERT_VALUES, SCATTER_FORWARD));
196:         PetscCall(VecScatterEnd(ctx->plex_batch, a_X, ctx->work_vec, INSERT_VALUES, SCATTER_FORWARD));
197:         PetscCall(VecGetArrayReadAndMemType(ctx->work_vec, &xdata, &mtype));
198:       } else {
199:         PetscCall(VecGetArrayReadAndMemType(a_X, &xdata, &mtype));
200:       }
201:       PetscCheck(mtype == PETSC_MEMTYPE_HOST || ctx->deviceType != LANDAU_CPU, ctx->comm, PETSC_ERR_ARG_WRONG, "CPU run with device data: use -mat_type aij");
202:       cellClosure = NULL;
203:     }
204:     PetscCall(PetscLogEventEnd(ctx->events[1], 0, 0, 0, 0));
205:   } else xdata = cellClosure = NULL;

207:   /* do it */
208:   if (ctx->deviceType == LANDAU_CUDA || ctx->deviceType == LANDAU_KOKKOS) {
209:     if (ctx->deviceType == LANDAU_CUDA) {
210: #if defined(PETSC_HAVE_CUDA)
211:       PetscCall(LandauCUDAJacobian(ctx->plex, Nq, ctx->batch_sz, ctx->num_grids, numCells, Eq_m, cellClosure, xdata, &ctx->SData_d, shift, ctx->events, ctx->mat_offset, ctx->species_offset, subJ, JacP));
212: #else
213:       SETERRQ(ctx->comm, PETSC_ERR_ARG_WRONG, "-landau_device_type %s not built", "cuda");
214: #endif
215:     } else if (ctx->deviceType == LANDAU_KOKKOS) {
216: #if defined(PETSC_HAVE_KOKKOS_KERNELS)
217:       PetscCall(LandauKokkosJacobian(ctx->plex, Nq, ctx->batch_sz, ctx->num_grids, numCells, Eq_m, cellClosure, xdata, &ctx->SData_d, shift, ctx->events, ctx->mat_offset, ctx->species_offset, subJ, JacP));
218: #else
219:       SETERRQ(ctx->comm, PETSC_ERR_ARG_WRONG, "-landau_device_type %s not built", "kokkos");
220: #endif
221:     }
222:   } else {               /* CPU version */
223:     PetscTabulation *Tf; // used for CPU and print info. Same on all grids and all species
224:     PetscInt         ip_offset[LANDAU_MAX_GRIDS + 1], ipf_offset[LANDAU_MAX_GRIDS + 1], elem_offset[LANDAU_MAX_GRIDS + 1], IPf_sz_glb, IPf_sz_tot, num_grids = ctx->num_grids, Nf[LANDAU_MAX_GRIDS];
225:     PetscReal       *ff, *dudx, *dudy, *dudz, *invJ_a = (PetscReal *)ctx->SData_d.invJ, *xx = (PetscReal *)ctx->SData_d.x, *yy = (PetscReal *)ctx->SData_d.y, *zz = (PetscReal *)ctx->SData_d.z, *ww = (PetscReal *)ctx->SData_d.w;
226:     PetscReal       *nu_alpha = (PetscReal *)ctx->SData_d.alpha, *nu_beta = (PetscReal *)ctx->SData_d.beta, *invMass = (PetscReal *)ctx->SData_d.invMass;
227:     PetscReal(*lambdas)[LANDAU_MAX_GRIDS][LANDAU_MAX_GRIDS] = (PetscReal(*)[LANDAU_MAX_GRIDS][LANDAU_MAX_GRIDS])ctx->SData_d.lambdas;
228:     PetscSection section[LANDAU_MAX_GRIDS], globsection[LANDAU_MAX_GRIDS];
229:     PetscScalar *coo_vals = NULL;
230:     for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
231:       PetscCall(DMGetLocalSection(ctx->plex[grid], &section[grid]));
232:       PetscCall(DMGetGlobalSection(ctx->plex[grid], &globsection[grid]));
233:       PetscCall(PetscSectionGetNumFields(section[grid], &Nf[grid]));
234:     }
235:     /* count IPf size, etc */
236:     PetscCall(PetscDSGetTabulation(prob, &Tf)); // Bf, &Df same for all grids
237:     const PetscReal *const BB = Tf[0]->T[0], *const DD = Tf[0]->T[1];
238:     ip_offset[0] = ipf_offset[0] = elem_offset[0] = 0;
239:     for (PetscInt grid = 0; grid < num_grids; grid++) {
240:       PetscInt nfloc        = ctx->species_offset[grid + 1] - ctx->species_offset[grid];
241:       elem_offset[grid + 1] = elem_offset[grid] + numCells[grid];
242:       ip_offset[grid + 1]   = ip_offset[grid] + numCells[grid] * Nq;
243:       ipf_offset[grid + 1]  = ipf_offset[grid] + Nq * nfloc * numCells[grid];
244:     }
245:     IPf_sz_glb = ipf_offset[num_grids];
246:     IPf_sz_tot = IPf_sz_glb * ctx->batch_sz;
247:     // prep COO
248:     if (ctx->coo_assembly) {
249:       PetscCall(PetscMalloc1(ctx->SData_d.coo_size, &coo_vals)); // allocate every time?
250:       PetscCall(PetscInfo(ctx->plex[0], "COO Allocate %" PetscInt_FMT " values\n", (PetscInt)ctx->SData_d.coo_size));
251:     }
252:     if (shift == 0.0) { /* compute dynamic data f and df and init data for Jacobian */
253: #if defined(PETSC_HAVE_THREADSAFETY)
254:       double starttime, endtime;
255:       starttime = MPI_Wtime();
256: #endif
257:       PetscCall(PetscLogEventBegin(ctx->events[8], 0, 0, 0, 0));
258:       PetscCall(PetscMalloc4(IPf_sz_tot, &ff, IPf_sz_tot, &dudx, IPf_sz_tot, &dudy, dim == 3 ? IPf_sz_tot : 0, &dudz));
259:       // F df/dx
260:       for (PetscInt tid = 0; tid < ctx->batch_sz * elem_offset[num_grids]; tid++) {                        // for each element
261:         const PetscInt b_Nelem = elem_offset[num_grids], b_elem_idx = tid % b_Nelem, b_id = tid / b_Nelem; // b_id == OMP thd_id in batch
262:         // find my grid:
263:         PetscInt grid = 0;
264:         while (b_elem_idx >= elem_offset[grid + 1]) grid++; // yuck search for grid
265:         {
266:           const PetscInt loc_nip = numCells[grid] * Nq, loc_Nf = ctx->species_offset[grid + 1] - ctx->species_offset[grid], loc_elem = b_elem_idx - elem_offset[grid];
267:           const PetscInt moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset); //b_id*b_N + ctx->mat_offset[grid];
268:           PetscScalar   *coef, coef_buff[LANDAU_MAX_SPECIES * LANDAU_MAX_NQ];
269:           PetscReal     *invJe = &invJ_a[(ip_offset[grid] + loc_elem * Nq) * dim * dim]; // ingJ is static data on batch 0
270:           PetscInt       b, f, q;
271:           if (cellClosure) {
272:             coef = &cellClosure[b_id * IPf_sz_glb + ipf_offset[grid] + loc_elem * Nb * loc_Nf]; // this is const
273:           } else {
274:             coef = coef_buff;
275:             for (f = 0; f < loc_Nf; ++f) {
276:               LandauIdx *const Idxs = &maps[grid].gIdx[loc_elem][f][0];
277:               for (b = 0; b < Nb; ++b) {
278:                 PetscInt idx = Idxs[b];
279:                 if (idx >= 0) {
280:                   coef[f * Nb + b] = xdata[idx + moffset];
281:                 } else {
282:                   idx              = -idx - 1;
283:                   coef[f * Nb + b] = 0;
284:                   for (q = 0; q < maps[grid].num_face; q++) {
285:                     PetscInt    id    = maps[grid].c_maps[idx][q].gid;
286:                     PetscScalar scale = maps[grid].c_maps[idx][q].scale;
287:                     coef[f * Nb + b] += scale * xdata[id + moffset];
288:                   }
289:                 }
290:               }
291:             }
292:           }
293:           /* get f and df */
294:           for (PetscInt qi = 0; qi < Nq; qi++) {
295:             const PetscReal *invJ = &invJe[qi * dim * dim];
296:             const PetscReal *Bq   = &BB[qi * Nb];
297:             const PetscReal *Dq   = &DD[qi * Nb * dim];
298:             PetscReal        u_x[LANDAU_DIM];
299:             /* get f & df */
300:             for (f = 0; f < loc_Nf; ++f) {
301:               const PetscInt idx = b_id * IPf_sz_glb + ipf_offset[grid] + f * loc_nip + loc_elem * Nq + qi;
302:               PetscInt       b, e;
303:               PetscReal      refSpaceDer[LANDAU_DIM];
304:               ff[idx] = 0.0;
305:               for (int d = 0; d < LANDAU_DIM; ++d) refSpaceDer[d] = 0.0;
306:               for (b = 0; b < Nb; ++b) {
307:                 const PetscInt cidx = b;
308:                 ff[idx] += Bq[cidx] * PetscRealPart(coef[f * Nb + cidx]);
309:                 for (int d = 0; d < dim; ++d) refSpaceDer[d] += Dq[cidx * dim + d] * PetscRealPart(coef[f * Nb + cidx]);
310:               }
311:               for (int d = 0; d < LANDAU_DIM; ++d) {
312:                 for (e = 0, u_x[d] = 0.0; e < LANDAU_DIM; ++e) u_x[d] += invJ[e * dim + d] * refSpaceDer[e];
313:               }
314:               dudx[idx] = u_x[0];
315:               dudy[idx] = u_x[1];
316: #if LANDAU_DIM == 3
317:               dudz[idx] = u_x[2];
318: #endif
319:             }
320:           } // q
321:         }   // grid
322:       }     // grid*batch
323:       PetscCall(PetscLogEventEnd(ctx->events[8], 0, 0, 0, 0));
324: #if defined(PETSC_HAVE_THREADSAFETY)
325:       endtime = MPI_Wtime();
326:       if (ctx->stage) ctx->times[LANDAU_F_DF] += (endtime - starttime);
327: #endif
328:     } // Jacobian setup
329:     // assemble Jacobian (or mass)
330:     for (PetscInt tid = 0; tid < ctx->batch_sz * elem_offset[num_grids]; tid++) { // for each element
331:       const PetscInt b_Nelem      = elem_offset[num_grids];
332:       const PetscInt glb_elem_idx = tid % b_Nelem, b_id = tid / b_Nelem;
333:       PetscInt       grid = 0;
334: #if defined(PETSC_HAVE_THREADSAFETY)
335:       double starttime, endtime;
336:       starttime = MPI_Wtime();
337: #endif
338:       while (glb_elem_idx >= elem_offset[grid + 1]) grid++;
339:       {
340:         const PetscInt   loc_Nf = ctx->species_offset[grid + 1] - ctx->species_offset[grid], loc_elem = glb_elem_idx - elem_offset[grid];
341:         const PetscInt   moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset), totDim = loc_Nf * Nq, elemMatSize = totDim * totDim;
342:         PetscScalar     *elemMat;
343:         const PetscReal *invJe = &invJ_a[(ip_offset[grid] + loc_elem * Nq) * dim * dim];
344:         PetscCall(PetscMalloc1(elemMatSize, &elemMat));
345:         PetscCall(PetscMemzero(elemMat, elemMatSize * sizeof(*elemMat)));
346:         if (shift == 0.0) { // Jacobian
347:           PetscCall(PetscLogEventBegin(ctx->events[4], 0, 0, 0, 0));
348:         } else { // mass
349:           PetscCall(PetscLogEventBegin(ctx->events[16], 0, 0, 0, 0));
350:         }
351:         for (PetscInt qj = 0; qj < Nq; ++qj) {
352:           const PetscInt jpidx_glb = ip_offset[grid] + qj + loc_elem * Nq;
353:           PetscReal      g0[LANDAU_MAX_SPECIES], g2[LANDAU_MAX_SPECIES][LANDAU_DIM], g3[LANDAU_MAX_SPECIES][LANDAU_DIM][LANDAU_DIM]; // could make a LANDAU_MAX_SPECIES_GRID ~ number of ions - 1
354:           PetscInt       d, d2, dp, d3, IPf_idx;
355:           if (shift == 0.0) { // Jacobian
356:             const PetscReal *const invJj = &invJe[qj * dim * dim];
357:             PetscReal              gg2[LANDAU_MAX_SPECIES][LANDAU_DIM], gg3[LANDAU_MAX_SPECIES][LANDAU_DIM][LANDAU_DIM], gg2_temp[LANDAU_DIM], gg3_temp[LANDAU_DIM][LANDAU_DIM];
358:             const PetscReal        vj[3] = {xx[jpidx_glb], yy[jpidx_glb], zz ? zz[jpidx_glb] : 0}, wj = ww[jpidx_glb];
359:             // create g2 & g3
360:             for (d = 0; d < LANDAU_DIM; d++) { // clear accumulation data D & K
361:               gg2_temp[d] = 0;
362:               for (d2 = 0; d2 < LANDAU_DIM; d2++) gg3_temp[d][d2] = 0;
363:             }
364:             /* inner beta reduction */
365:             IPf_idx = 0;
366:             for (PetscInt grid_r = 0, f_off = 0, ipidx = 0; grid_r < ctx->num_grids; grid_r++, f_off = ctx->species_offset[grid_r]) { // IPf_idx += nip_loc_r*Nfloc_r
367:               PetscInt nip_loc_r = numCells[grid_r] * Nq, Nfloc_r = Nf[grid_r];
368:               for (PetscInt ei_r = 0, loc_fdf_idx = 0; ei_r < numCells[grid_r]; ++ei_r) {
369:                 for (PetscInt qi = 0; qi < Nq; qi++, ipidx++, loc_fdf_idx++) {
370:                   const PetscReal wi = ww[ipidx], x = xx[ipidx], y = yy[ipidx];
371:                   PetscReal       temp1[3] = {0, 0, 0}, temp2 = 0;
372: #if LANDAU_DIM == 2
373:                   PetscReal Ud[2][2], Uk[2][2], mask = (PetscAbs(vj[0] - x) < 100 * PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[1] - y) < 100 * PETSC_SQRT_MACHINE_EPSILON) ? 0. : 1.;
374:                   LandauTensor2D(vj, x, y, Ud, Uk, mask);
375: #else
376:                   PetscReal U[3][3], z = zz[ipidx], mask = (PetscAbs(vj[0] - x) < 100 * PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[1] - y) < 100 * PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[2] - z) < 100 * PETSC_SQRT_MACHINE_EPSILON) ? 0. : 1.;
377:                   if (ctx->use_relativistic_corrections) {
378:                     LandauTensor3DRelativistic(vj, x, y, z, U, mask, C_0(ctx->v_0));
379:                   } else {
380:                     LandauTensor3D(vj, x, y, z, U, mask);
381:                   }
382: #endif
383:                   for (int f = 0; f < Nfloc_r; ++f) {
384:                     const PetscInt idx = b_id * IPf_sz_glb + ipf_offset[grid_r] + f * nip_loc_r + ei_r * Nq + qi; // IPf_idx + f*nip_loc_r + loc_fdf_idx;
385:                     temp1[0] += dudx[idx] * nu_beta[f + f_off] * invMass[f + f_off] * (*lambdas)[grid][grid_r];
386:                     temp1[1] += dudy[idx] * nu_beta[f + f_off] * invMass[f + f_off] * (*lambdas)[grid][grid_r];
387: #if LANDAU_DIM == 3
388:                     temp1[2] += dudz[idx] * nu_beta[f + f_off] * invMass[f + f_off] * (*lambdas)[grid][grid_r];
389: #endif
390:                     temp2 += ff[idx] * nu_beta[f + f_off] * (*lambdas)[grid][grid_r];
391:                   }
392:                   temp1[0] *= wi;
393:                   temp1[1] *= wi;
394: #if LANDAU_DIM == 3
395:                   temp1[2] *= wi;
396: #endif
397:                   temp2 *= wi;
398: #if LANDAU_DIM == 2
399:                   for (d2 = 0; d2 < 2; d2++) {
400:                     for (d3 = 0; d3 < 2; ++d3) {
401:                       /* K = U * grad(f): g2=e: i,A */
402:                       gg2_temp[d2] += Uk[d2][d3] * temp1[d3];
403:                       /* D = -U * (I \kron (fx)): g3=f: i,j,A */
404:                       gg3_temp[d2][d3] += Ud[d2][d3] * temp2;
405:                     }
406:                   }
407: #else
408:                   for (d2 = 0; d2 < 3; ++d2) {
409:                     for (d3 = 0; d3 < 3; ++d3) {
410:                       /* K = U * grad(f): g2 = e: i,A */
411:                       gg2_temp[d2] += U[d2][d3] * temp1[d3];
412:                       /* D = -U * (I \kron (fx)): g3 = f: i,j,A */
413:                       gg3_temp[d2][d3] += U[d2][d3] * temp2;
414:                     }
415:                   }
416: #endif
417:                 } // qi
418:               }   // ei_r
419:               IPf_idx += nip_loc_r * Nfloc_r;
420:             } /* grid_r - IPs */
421:             PetscCheck(IPf_idx == IPf_sz_glb, PETSC_COMM_SELF, PETSC_ERR_PLIB, "IPf_idx != IPf_sz %" PetscInt_FMT " %" PetscInt_FMT, IPf_idx, IPf_sz_glb);
422:             // add alpha and put in gg2/3
423:             for (PetscInt fieldA = 0, f_off = ctx->species_offset[grid]; fieldA < loc_Nf; ++fieldA) {
424:               for (d2 = 0; d2 < LANDAU_DIM; d2++) {
425:                 gg2[fieldA][d2] = gg2_temp[d2] * nu_alpha[fieldA + f_off];
426:                 for (d3 = 0; d3 < LANDAU_DIM; d3++) gg3[fieldA][d2][d3] = -gg3_temp[d2][d3] * nu_alpha[fieldA + f_off] * invMass[fieldA + f_off];
427:               }
428:             }
429:             /* add electric field term once per IP */
430:             for (PetscInt fieldA = 0, f_off = ctx->species_offset[grid]; fieldA < loc_Nf; ++fieldA) gg2[fieldA][LANDAU_DIM - 1] += Eq_m[fieldA + f_off];
431:             /* Jacobian transform - g2, g3 */
432:             for (PetscInt fieldA = 0; fieldA < loc_Nf; ++fieldA) {
433:               for (d = 0; d < dim; ++d) {
434:                 g2[fieldA][d] = 0.0;
435:                 for (d2 = 0; d2 < dim; ++d2) {
436:                   g2[fieldA][d] += invJj[d * dim + d2] * gg2[fieldA][d2];
437:                   g3[fieldA][d][d2] = 0.0;
438:                   for (d3 = 0; d3 < dim; ++d3) {
439:                     for (dp = 0; dp < dim; ++dp) g3[fieldA][d][d2] += invJj[d * dim + d3] * gg3[fieldA][d3][dp] * invJj[d2 * dim + dp];
440:                   }
441:                   g3[fieldA][d][d2] *= wj;
442:                 }
443:                 g2[fieldA][d] *= wj;
444:               }
445:             }
446:           } else { // mass
447:             PetscReal wj = ww[jpidx_glb];
448:             /* Jacobian transform - g0 */
449:             for (PetscInt fieldA = 0; fieldA < loc_Nf; ++fieldA) {
450:               if (dim == 2) {
451:                 g0[fieldA] = wj * shift * 2. * PETSC_PI; // move this to below and remove g0
452:               } else {
453:                 g0[fieldA] = wj * shift; // move this to below and remove g0
454:               }
455:             }
456:           }
457:           /* FE matrix construction */
458:           {
459:             PetscInt         fieldA, d, f, d2, g;
460:             const PetscReal *BJq = &BB[qj * Nb], *DIq = &DD[qj * Nb * dim];
461:             /* assemble - on the diagonal (I,I) */
462:             for (fieldA = 0; fieldA < loc_Nf; fieldA++) {
463:               for (f = 0; f < Nb; f++) {
464:                 const PetscInt i = fieldA * Nb + f; /* Element matrix row */
465:                 for (g = 0; g < Nb; ++g) {
466:                   const PetscInt j    = fieldA * Nb + g; /* Element matrix column */
467:                   const PetscInt fOff = i * totDim + j;
468:                   if (shift == 0.0) {
469:                     for (d = 0; d < dim; ++d) {
470:                       elemMat[fOff] += DIq[f * dim + d] * g2[fieldA][d] * BJq[g];
471:                       for (d2 = 0; d2 < dim; ++d2) elemMat[fOff] += DIq[f * dim + d] * g3[fieldA][d][d2] * DIq[g * dim + d2];
472:                     }
473:                   } else { // mass
474:                     elemMat[fOff] += BJq[f] * g0[fieldA] * BJq[g];
475:                   }
476:                 }
477:               }
478:             }
479:           }
480:         }                   /* qj loop */
481:         if (shift == 0.0) { // Jacobian
482:           PetscCall(PetscLogEventEnd(ctx->events[4], 0, 0, 0, 0));
483:         } else {
484:           PetscCall(PetscLogEventEnd(ctx->events[16], 0, 0, 0, 0));
485:         }
486: #if defined(PETSC_HAVE_THREADSAFETY)
487:         endtime = MPI_Wtime();
488:         if (ctx->stage) ctx->times[LANDAU_KERNEL] += (endtime - starttime);
489: #endif
490:         /* assemble matrix */
491:         if (!container) {
492:           PetscInt cStart;
493:           PetscCall(PetscLogEventBegin(ctx->events[6], 0, 0, 0, 0));
494:           PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, NULL));
495:           PetscCall(DMPlexMatSetClosure(ctx->plex[grid], section[grid], globsection[grid], subJ[LAND_PACK_IDX(b_id, grid)], loc_elem + cStart, elemMat, ADD_VALUES));
496:           PetscCall(PetscLogEventEnd(ctx->events[6], 0, 0, 0, 0));
497:         } else { // GPU like assembly for debugging
498:           PetscInt    fieldA, q, f, g, d, nr, nc, rows0[LANDAU_MAX_Q_FACE] = {0}, cols0[LANDAU_MAX_Q_FACE] = {0}, rows[LANDAU_MAX_Q_FACE], cols[LANDAU_MAX_Q_FACE];
499:           PetscScalar vals[LANDAU_MAX_Q_FACE * LANDAU_MAX_Q_FACE] = {0}, row_scale[LANDAU_MAX_Q_FACE] = {0}, col_scale[LANDAU_MAX_Q_FACE] = {0};
500:           LandauIdx *coo_elem_offsets = (LandauIdx *)ctx->SData_d.coo_elem_offsets, *coo_elem_fullNb = (LandauIdx *)ctx->SData_d.coo_elem_fullNb, (*coo_elem_point_offsets)[LANDAU_MAX_NQ + 1] = (LandauIdx(*)[LANDAU_MAX_NQ + 1]) ctx->SData_d.coo_elem_point_offsets;
501:           /* assemble - from the diagonal (I,I) in this format for DMPlexMatSetClosure */
502:           for (fieldA = 0; fieldA < loc_Nf; fieldA++) {
503:             LandauIdx *const Idxs = &maps[grid].gIdx[loc_elem][fieldA][0];
504:             for (f = 0; f < Nb; f++) {
505:               PetscInt idx = Idxs[f];
506:               if (idx >= 0) {
507:                 nr           = 1;
508:                 rows0[0]     = idx;
509:                 row_scale[0] = 1.;
510:               } else {
511:                 idx = -idx - 1;
512:                 for (q = 0, nr = 0; q < maps[grid].num_face; q++, nr++) {
513:                   if (maps[grid].c_maps[idx][q].gid < 0) break;
514:                   rows0[q]     = maps[grid].c_maps[idx][q].gid;
515:                   row_scale[q] = maps[grid].c_maps[idx][q].scale;
516:                 }
517:               }
518:               for (g = 0; g < Nb; ++g) {
519:                 idx = Idxs[g];
520:                 if (idx >= 0) {
521:                   nc           = 1;
522:                   cols0[0]     = idx;
523:                   col_scale[0] = 1.;
524:                 } else {
525:                   idx = -idx - 1;
526:                   nc  = maps[grid].num_face;
527:                   for (q = 0, nc = 0; q < maps[grid].num_face; q++, nc++) {
528:                     if (maps[grid].c_maps[idx][q].gid < 0) break;
529:                     cols0[q]     = maps[grid].c_maps[idx][q].gid;
530:                     col_scale[q] = maps[grid].c_maps[idx][q].scale;
531:                   }
532:                 }
533:                 const PetscInt    i   = fieldA * Nb + f; /* Element matrix row */
534:                 const PetscInt    j   = fieldA * Nb + g; /* Element matrix column */
535:                 const PetscScalar Aij = elemMat[i * totDim + j];
536:                 if (coo_vals) { // mirror (i,j) in CreateStaticGPUData
537:                   const int fullNb = coo_elem_fullNb[glb_elem_idx], fullNb2 = fullNb * fullNb;
538:                   const int idx0 = b_id * coo_elem_offsets[elem_offset[num_grids]] + coo_elem_offsets[glb_elem_idx] + fieldA * fullNb2 + fullNb * coo_elem_point_offsets[glb_elem_idx][f] + nr * coo_elem_point_offsets[glb_elem_idx][g];
539:                   for (int q = 0, idx2 = idx0; q < nr; q++) {
540:                     for (int d = 0; d < nc; d++, idx2++) coo_vals[idx2] = row_scale[q] * col_scale[d] * Aij;
541:                   }
542:                 } else {
543:                   for (q = 0; q < nr; q++) rows[q] = rows0[q] + moffset;
544:                   for (d = 0; d < nc; d++) cols[d] = cols0[d] + moffset;
545:                   for (q = 0; q < nr; q++) {
546:                     for (d = 0; d < nc; d++) vals[q * nc + d] = row_scale[q] * col_scale[d] * Aij;
547:                   }
548:                   PetscCall(MatSetValues(JacP, nr, rows, nc, cols, vals, ADD_VALUES));
549:                 }
550:               }
551:             }
552:           }
553:         }
554:         if (loc_elem == -1) {
555:           PetscCall(PetscPrintf(ctx->comm, "CPU Element matrix\n"));
556:           for (int d = 0; d < totDim; ++d) {
557:             for (int f = 0; f < totDim; ++f) PetscCall(PetscPrintf(ctx->comm, " %12.5e", (double)PetscRealPart(elemMat[d * totDim + f])));
558:             PetscCall(PetscPrintf(ctx->comm, "\n"));
559:           }
560:           exit(12);
561:         }
562:         PetscCall(PetscFree(elemMat));
563:       }                 /* grid */
564:     }                   /* outer element & batch loop */
565:     if (shift == 0.0) { // mass
566:       PetscCall(PetscFree4(ff, dudx, dudy, dudz));
567:     }
568:     if (!container) {                                         // 'CPU' assembly move nest matrix to global JacP
569:       for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) { // OpenMP
570:         for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
571:           const PetscInt     moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset); // b_id*b_N + ctx->mat_offset[grid];
572:           PetscInt           nloc, nzl, colbuf[1024], row;
573:           const PetscInt    *cols;
574:           const PetscScalar *vals;
575:           Mat                B = subJ[LAND_PACK_IDX(b_id, grid)];
576:           PetscCall(MatAssemblyBegin(B, MAT_FINAL_ASSEMBLY));
577:           PetscCall(MatAssemblyEnd(B, MAT_FINAL_ASSEMBLY));
578:           PetscCall(MatGetSize(B, &nloc, NULL));
579:           for (int i = 0; i < nloc; i++) {
580:             PetscCall(MatGetRow(B, i, &nzl, &cols, &vals));
581:             PetscCheck(nzl <= 1024, PetscObjectComm((PetscObject)B), PETSC_ERR_PLIB, "Row too big: %" PetscInt_FMT, nzl);
582:             for (int j = 0; j < nzl; j++) colbuf[j] = moffset + cols[j];
583:             row = moffset + i;
584:             PetscCall(MatSetValues(JacP, 1, &row, nzl, colbuf, vals, ADD_VALUES));
585:             PetscCall(MatRestoreRow(B, i, &nzl, &cols, &vals));
586:           }
587:           PetscCall(MatDestroy(&B));
588:         }
589:       }
590:     }
591:     if (coo_vals) {
592:       PetscCall(MatSetValuesCOO(JacP, coo_vals, ADD_VALUES));
593:       PetscCall(PetscFree(coo_vals));
594:     }
595:   } /* CPU version */
596:   PetscCall(MatAssemblyBegin(JacP, MAT_FINAL_ASSEMBLY));
597:   PetscCall(MatAssemblyEnd(JacP, MAT_FINAL_ASSEMBLY));
598:   /* clean up */
599:   if (cellClosure) PetscCall(PetscFree(cellClosure));
600:   if (xdata) PetscCall(VecRestoreArrayReadAndMemType(a_X, &xdata));
601:   PetscFunctionReturn(PETSC_SUCCESS);
602: }

604: static PetscErrorCode GeometryDMLandau(DM base, PetscInt point, PetscInt dim, const PetscReal abc[], PetscReal xyz[], void *a_ctx)
605: {
606:   PetscReal r = abc[0], z = abc[1];

608:   PetscFunctionBegin;
609:   xyz[0] = r;
610:   xyz[1] = z;
611:   if (dim == 3) xyz[2] = abc[2];

613:   PetscFunctionReturn(PETSC_SUCCESS);
614: }

616: /* create DMComposite of meshes for each species group */
617: static PetscErrorCode LandauDMCreateVMeshes(MPI_Comm comm_self, const PetscInt dim, const char prefix[], LandauCtx *ctx, DM pack)
618: {
619:   PetscFunctionBegin;
620:   { /* p4est, quads */
621:     /* Create plex mesh of Landau domain */
622:     for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
623:       PetscReal par_radius = ctx->radius_par[grid], perp_radius = ctx->radius_perp[grid];
624:       if (!ctx->sphere) {
625:         PetscReal      lo[] = {-perp_radius, -par_radius, -par_radius}, hi[] = {perp_radius, par_radius, par_radius};
626:         DMBoundaryType periodicity[3] = {DM_BOUNDARY_NONE, dim == 2 ? DM_BOUNDARY_NONE : DM_BOUNDARY_NONE, DM_BOUNDARY_NONE};
627:         if (dim == 2) lo[0] = 0;
628:         else {
629:           lo[1] = -perp_radius;
630:           hi[1] = perp_radius; // 3D y is a perp
631:         }
632:         PetscCall(DMPlexCreateBoxMesh(comm_self, dim, PETSC_FALSE, ctx->cells0, lo, hi, periodicity, PETSC_TRUE, &ctx->plex[grid])); // todo: make composite and create dm[grid] here
633:         PetscCall(DMLocalizeCoordinates(ctx->plex[grid]));                                                                           /* needed for periodic */
634:         if (dim == 3) PetscCall(PetscObjectSetName((PetscObject)ctx->plex[grid], "cube"));
635:         else PetscCall(PetscObjectSetName((PetscObject)ctx->plex[grid], "half-plane"));
636:       } else if (dim == 2) {
637:         PetscInt   numCells    = 6, cells[11][4], i, j;
638:         PetscInt   numVerts    = 11;
639:         PetscReal *flatCoords  = NULL;
640:         PetscInt  *flatCells   = NULL, *pcell;
641:         int        cells2[][4] = {
642:           {0,  1, 6, 5 },
643:           {1,  2, 7, 6 },
644:           {2,  3, 8, 7 },
645:           {3,  4, 9, 8 },
646:           {5,  6, 7, 10},
647:           {10, 7, 8, 9 },
648:         };
649:         for (i = 0; i < numCells; i++)
650:           for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j];
651:         PetscCall(PetscMalloc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells));
652:         {
653:           PetscReal(*coords)[2] = (PetscReal(*)[2])flatCoords;
654:           PetscReal rad         = ctx->radius[grid];
655:           for (j = 0; j < 5; j++) {
656:             PetscReal z, r, theta = -PETSC_PI / 2 + (j % 5) * PETSC_PI / 4;
657:             r            = rad * PetscCosReal(theta);
658:             coords[j][0] = r;
659:             z            = rad * PetscSinReal(theta);
660:             coords[j][1] = z;
661:           }
662:           coords[j][0]   = 0;
663:           coords[j++][1] = -rad * ctx->sphere_inner_radius_90degree;
664:           coords[j][0]   = rad * ctx->sphere_inner_radius_45degree;
665:           coords[j++][1] = -rad * ctx->sphere_inner_radius_45degree;
666:           coords[j][0]   = rad * ctx->sphere_inner_radius_90degree;
667:           coords[j++][1] = 0;
668:           coords[j][0]   = rad * ctx->sphere_inner_radius_45degree;
669:           coords[j++][1] = rad * ctx->sphere_inner_radius_45degree;
670:           coords[j][0]   = 0;
671:           coords[j++][1] = rad * ctx->sphere_inner_radius_90degree;
672:           coords[j][0]   = 0;
673:           coords[j++][1] = 0;
674:         }
675:         for (j = 0, pcell = flatCells; j < numCells; j++, pcell += 4) {
676:           for (int jj = 0; jj < 4; jj++) pcell[jj] = cells[j][jj];
677:         }
678:         PetscCall(DMPlexCreateFromCellListPetsc(comm_self, 2, numCells, numVerts, 4, ctx->interpolate, flatCells, 2, flatCoords, &ctx->plex[grid]));
679:         PetscCall(PetscFree2(flatCoords, flatCells));
680:         PetscCall(PetscObjectSetName((PetscObject)ctx->plex[grid], "semi-circle"));
681:       } else SETERRQ(ctx->comm, PETSC_ERR_PLIB, "Velocity space meshes does not support 3V cubed sphere");
682:       PetscCall(DMSetFromOptions(ctx->plex[grid]));
683:     } // grid loop
684:     PetscCall(PetscObjectSetOptionsPrefix((PetscObject)pack, prefix));

686:     { /* convert to p4est (or whatever), wait for discretization to create pack */
687:       char      convType[256];
688:       PetscBool flg;

690:       PetscOptionsBegin(ctx->comm, prefix, "Mesh conversion options", "DMPLEX");
691:       PetscCall(PetscOptionsFList("-dm_landau_type", "Convert DMPlex to another format (p4est)", "plexland.c", DMList, DMPLEX, convType, 256, &flg));
692:       PetscOptionsEnd();
693:       if (flg) {
694:         ctx->use_p4est = PETSC_TRUE; /* flag for Forest */
695:         for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
696:           DM dmforest;
697:           PetscCall(DMConvert(ctx->plex[grid], convType, &dmforest));
698:           if (dmforest) {
699:             PetscBool isForest;
700:             PetscCall(PetscObjectSetOptionsPrefix((PetscObject)dmforest, prefix));
701:             PetscCall(DMIsForest(dmforest, &isForest));
702:             if (isForest) {
703:               if (ctx->sphere) PetscCall(DMForestSetBaseCoordinateMapping(dmforest, GeometryDMLandau, ctx));
704:               PetscCall(DMDestroy(&ctx->plex[grid]));
705:               ctx->plex[grid] = dmforest; // Forest for adaptivity
706:             } else SETERRQ(ctx->comm, PETSC_ERR_PLIB, "Converted to non Forest?");
707:           } else SETERRQ(ctx->comm, PETSC_ERR_PLIB, "Convert failed?");
708:         }
709:       } else ctx->use_p4est = PETSC_FALSE; /* flag for Forest */
710:     }
711:   } /* non-file */
712:   PetscCall(DMSetDimension(pack, dim));
713:   PetscCall(PetscObjectSetName((PetscObject)pack, "Mesh"));
714:   PetscCall(DMSetApplicationContext(pack, ctx));

716:   PetscFunctionReturn(PETSC_SUCCESS);
717: }

719: static PetscErrorCode SetupDS(DM pack, PetscInt dim, PetscInt grid, LandauCtx *ctx)
720: {
721:   PetscInt     ii, i0;
722:   char         buf[256];
723:   PetscSection section;

725:   PetscFunctionBegin;
726:   for (ii = ctx->species_offset[grid], i0 = 0; ii < ctx->species_offset[grid + 1]; ii++, i0++) {
727:     if (ii == 0) PetscCall(PetscSNPrintf(buf, sizeof(buf), "e"));
728:     else PetscCall(PetscSNPrintf(buf, sizeof(buf), "i%" PetscInt_FMT, ii));
729:     /* Setup Discretization - FEM */
730:     PetscCall(PetscFECreateDefault(PETSC_COMM_SELF, dim, 1, PETSC_FALSE, NULL, PETSC_DECIDE, &ctx->fe[ii]));
731:     PetscCall(PetscObjectSetName((PetscObject)ctx->fe[ii], buf));
732:     PetscCall(DMSetField(ctx->plex[grid], i0, NULL, (PetscObject)ctx->fe[ii]));
733:   }
734:   PetscCall(DMCreateDS(ctx->plex[grid]));
735:   PetscCall(DMGetSection(ctx->plex[grid], &section));
736:   for (PetscInt ii = ctx->species_offset[grid], i0 = 0; ii < ctx->species_offset[grid + 1]; ii++, i0++) {
737:     if (ii == 0) PetscCall(PetscSNPrintf(buf, sizeof(buf), "se"));
738:     else PetscCall(PetscSNPrintf(buf, sizeof(buf), "si%" PetscInt_FMT, ii));
739:     PetscCall(PetscSectionSetComponentName(section, i0, 0, buf));
740:   }
741:   PetscFunctionReturn(PETSC_SUCCESS);
742: }

744: /* Define a Maxwellian function for testing out the operator. */

746: /* Using cartesian velocity space coordinates, the particle */
747: /* density, [1/m^3], is defined according to */

749: /* $$ n=\int_{R^3} dv^3 \left(\frac{m}{2\pi T}\right)^{3/2}\exp [- mv^2/(2T)] $$ */

751: /* Using some constant, c, we normalize the velocity vector into a */
752: /* dimensionless variable according to v=c*x. Thus the density, $n$, becomes */

754: /* $$ n=\int_{R^3} dx^3 \left(\frac{mc^2}{2\pi T}\right)^{3/2}\exp [- mc^2/(2T)*x^2] $$ */

756: /* Defining $\theta=2T/mc^2$, we thus find that the probability density */
757: /* for finding the particle within the interval in a box dx^3 around x is */

759: /* f(x;\theta)=\left(\frac{1}{\pi\theta}\right)^{3/2} \exp [ -x^2/\theta ] */

761: typedef struct {
762:   PetscReal v_0;
763:   PetscReal kT_m;
764:   PetscReal n;
765:   PetscReal shift;
766: } MaxwellianCtx;

768: static PetscErrorCode maxwellian(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx)
769: {
770:   MaxwellianCtx *mctx = (MaxwellianCtx *)actx;
771:   PetscInt       i;
772:   PetscReal      v2 = 0, theta = 2 * mctx->kT_m / (mctx->v_0 * mctx->v_0), shift; /* theta = 2kT/mc^2 */
773:   PetscFunctionBegin;
774:   /* compute the exponents, v^2 */
775:   for (i = 0; i < dim; ++i) v2 += x[i] * x[i];
776:   /* evaluate the Maxwellian */
777:   if (mctx->shift < 0) shift = -mctx->shift;
778:   else {
779:     u[0]  = mctx->n * PetscPowReal(PETSC_PI * theta, -1.5) * (PetscExpReal(-v2 / theta));
780:     shift = mctx->shift;
781:   }
782:   if (shift != 0.) {
783:     v2 = 0;
784:     for (i = 0; i < dim - 1; ++i) v2 += x[i] * x[i];
785:     v2 += (x[dim - 1] - shift) * (x[dim - 1] - shift);
786:     /* evaluate the shifted Maxwellian */
787:     u[0] += mctx->n * PetscPowReal(PETSC_PI * theta, -1.5) * (PetscExpReal(-v2 / theta));
788:   }
789:   PetscFunctionReturn(PETSC_SUCCESS);
790: }

792: /*@
793:  DMPlexLandauAddMaxwellians - Add a Maxwellian distribution to a state

795:  Collective

797:  Input Parameters:
798:  .   dm - The mesh (local)
799:  +   time - Current time
800:  -   temps - Temperatures of each species (global)
801:  .   ns - Number density of each species (global)
802:  -   grid - index into current grid - just used for offset into temp and ns
803:  .   b_id - batch index
804:  -   n_batch - number of batches
805:  +   actx - Landau context

807:  Output Parameter:
808:  .   X  - The state (local to this grid)

810:  Level: beginner

812:  .keywords: mesh
813: .seealso: `DMPlexLandauCreateVelocitySpace()`
814:  @*/
815: PetscErrorCode DMPlexLandauAddMaxwellians(DM dm, Vec X, PetscReal time, PetscReal temps[], PetscReal ns[], PetscInt grid, PetscInt b_id, PetscInt n_batch, void *actx)
816: {
817:   LandauCtx *ctx = (LandauCtx *)actx;
818:   PetscErrorCode (*initu[LANDAU_MAX_SPECIES])(PetscInt, PetscReal, const PetscReal[], PetscInt, PetscScalar[], void *);
819:   PetscInt       dim;
820:   MaxwellianCtx *mctxs[LANDAU_MAX_SPECIES], data[LANDAU_MAX_SPECIES];

822:   PetscFunctionBegin;
823:   PetscCall(DMGetDimension(dm, &dim));
824:   if (!ctx) PetscCall(DMGetApplicationContext(dm, &ctx));
825:   for (PetscInt ii = ctx->species_offset[grid], i0 = 0; ii < ctx->species_offset[grid + 1]; ii++, i0++) {
826:     mctxs[i0]      = &data[i0];
827:     data[i0].v_0   = ctx->v_0;                             // v_0 same for all grids
828:     data[i0].kT_m  = ctx->k * temps[ii] / ctx->masses[ii]; /* kT/m */
829:     data[i0].n     = ns[ii];
830:     initu[i0]      = maxwellian;
831:     data[i0].shift = 0;
832:   }
833:   data[0].shift = ctx->electronShift;
834:   /* need to make ADD_ALL_VALUES work - TODO */
835:   PetscCall(DMProjectFunction(dm, time, initu, (void **)mctxs, INSERT_ALL_VALUES, X));
836:   PetscFunctionReturn(PETSC_SUCCESS);
837: }

839: /*
840:  LandauSetInitialCondition - Adds Maxwellians with context

842:  Collective

844:  Input Parameters:
845:  .   dm - The mesh
846:  -   grid - index into current grid - just used for offset into temp and ns
847:  .   b_id - batch index
848:  -   n_batch - number of batches
849:  +   actx - Landau context with T and n

851:  Output Parameter:
852:  .   X  - The state

854:  Level: beginner

856:  .keywords: mesh
857: .seealso: `DMPlexLandauCreateVelocitySpace()`, `DMPlexLandauAddMaxwellians()`
858:  */
859: static PetscErrorCode LandauSetInitialCondition(DM dm, Vec X, PetscInt grid, PetscInt b_id, PetscInt n_batch, void *actx)
860: {
861:   LandauCtx *ctx = (LandauCtx *)actx;
862:   PetscFunctionBegin;
863:   if (!ctx) PetscCall(DMGetApplicationContext(dm, &ctx));
864:   PetscCall(VecZeroEntries(X));
865:   PetscCall(DMPlexLandauAddMaxwellians(dm, X, 0.0, ctx->thermal_temps, ctx->n, grid, b_id, n_batch, ctx));
866:   PetscFunctionReturn(PETSC_SUCCESS);
867: }

869: // adapt a level once. Forest in/out
870: #if defined(PETSC_USE_INFO)
871: static const char *s_refine_names[] = {"RE", "Z1", "Origin", "Z2", "Uniform"};
872: #endif
873: static PetscErrorCode adaptToleranceFEM(PetscFE fem, Vec sol, PetscInt type, PetscInt grid, LandauCtx *ctx, DM *newForest)
874: {
875:   DM              forest, plex, adaptedDM = NULL;
876:   PetscDS         prob;
877:   PetscBool       isForest;
878:   PetscQuadrature quad;
879:   PetscInt        Nq, *Nb, cStart, cEnd, c, dim, qj, k;
880:   DMLabel         adaptLabel = NULL;

882:   PetscFunctionBegin;
883:   forest = ctx->plex[grid];
884:   PetscCall(DMCreateDS(forest));
885:   PetscCall(DMGetDS(forest, &prob));
886:   PetscCall(DMGetDimension(forest, &dim));
887:   PetscCall(DMIsForest(forest, &isForest));
888:   PetscCheck(isForest, ctx->comm, PETSC_ERR_ARG_WRONG, "! Forest");
889:   PetscCall(DMConvert(forest, DMPLEX, &plex));
890:   PetscCall(DMPlexGetHeightStratum(plex, 0, &cStart, &cEnd));
891:   PetscCall(DMLabelCreate(PETSC_COMM_SELF, "adapt", &adaptLabel));
892:   PetscCall(PetscFEGetQuadrature(fem, &quad));
893:   PetscCall(PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, NULL));
894:   PetscCheck(Nq <= LANDAU_MAX_NQ, ctx->comm, PETSC_ERR_ARG_WRONG, "Order too high. Nq = %" PetscInt_FMT " > LANDAU_MAX_NQ (%d)", Nq, LANDAU_MAX_NQ);
895:   PetscCall(PetscDSGetDimensions(prob, &Nb));
896:   PetscCall(PetscInfo(sol, "%" PetscInt_FMT ") Refine phase: %s\n", grid, s_refine_names[type]));
897:   if (type == 4) {
898:     for (c = cStart; c < cEnd; c++) PetscCall(DMLabelSetValue(adaptLabel, c, DM_ADAPT_REFINE));
899:   } else if (type == 2) {
900:     PetscInt  rCellIdx[8], nr = 0, nrmax = (dim == 3) ? 8 : 2;
901:     PetscReal minRad = PETSC_INFINITY, r;
902:     for (c = cStart; c < cEnd; c++) {
903:       PetscReal tt, v0[LANDAU_MAX_NQ * 3], detJ[LANDAU_MAX_NQ];
904:       PetscCall(DMPlexComputeCellGeometryFEM(plex, c, quad, v0, NULL, NULL, detJ));
905:       for (qj = 0; qj < Nq; ++qj) {
906:         tt = PetscSqr(v0[dim * qj + 0]) + PetscSqr(v0[dim * qj + 1]) + PetscSqr(((dim == 3) ? v0[dim * qj + 2] : 0));
907:         r  = PetscSqrtReal(tt);
908:         if (r < minRad - PETSC_SQRT_MACHINE_EPSILON * 10.) {
909:           minRad         = r;
910:           nr             = 0;
911:           rCellIdx[nr++] = c;
912:           PetscCall(PetscInfo(sol, "\t\t%" PetscInt_FMT ") Found first inner r=%e, cell %" PetscInt_FMT ", qp %" PetscInt_FMT "/%" PetscInt_FMT "\n", grid, (double)r, c, qj + 1, Nq));
913:         } else if ((r - minRad) < PETSC_SQRT_MACHINE_EPSILON * 100. && nr < nrmax) {
914:           for (k = 0; k < nr; k++)
915:             if (c == rCellIdx[k]) break;
916:           if (k == nr) {
917:             rCellIdx[nr++] = c;
918:             PetscCall(PetscInfo(sol, "\t\t\t%" PetscInt_FMT ") Found another inner r=%e, cell %" PetscInt_FMT ", qp %" PetscInt_FMT "/%" PetscInt_FMT ", d=%e\n", grid, (double)r, c, qj + 1, Nq, (double)(r - minRad)));
919:           }
920:         }
921:       }
922:     }
923:     for (k = 0; k < nr; k++) PetscCall(DMLabelSetValue(adaptLabel, rCellIdx[k], DM_ADAPT_REFINE));
924:     PetscCall(PetscInfo(sol, "\t\t\t%" PetscInt_FMT ") Refined %" PetscInt_FMT " origin cells %" PetscInt_FMT ",%" PetscInt_FMT " r=%g\n", grid, nr, rCellIdx[0], rCellIdx[1], (double)minRad));
925:   } else if (type == 0 || type == 1 || type == 3) { /* refine along r=0 axis */
926:     PetscScalar *coef = NULL;
927:     Vec          coords;
928:     PetscInt     csize, Nv, d, nz, nrefined = 0;
929:     DM           cdm;
930:     PetscSection cs;
931:     PetscCall(DMGetCoordinatesLocal(forest, &coords));
932:     PetscCall(DMGetCoordinateDM(forest, &cdm));
933:     PetscCall(DMGetLocalSection(cdm, &cs));
934:     for (c = cStart; c < cEnd; c++) {
935:       PetscInt doit = 0, outside = 0;
936:       PetscCall(DMPlexVecGetClosure(cdm, cs, coords, c, &csize, &coef));
937:       Nv = csize / dim;
938:       for (nz = d = 0; d < Nv; d++) {
939:         PetscReal z = PetscRealPart(coef[d * dim + (dim - 1)]), x = PetscSqr(PetscRealPart(coef[d * dim + 0])) + ((dim == 3) ? PetscSqr(PetscRealPart(coef[d * dim + 1])) : 0);
940:         x = PetscSqrtReal(x);
941:         if (type == 0) {
942:           if (ctx->re_radius > PETSC_SQRT_MACHINE_EPSILON && (z < -PETSC_MACHINE_EPSILON * 10. || z > ctx->re_radius + PETSC_MACHINE_EPSILON * 10.)) outside++; /* first pass don't refine bottom */
943:         } else if (type == 1 && (z > ctx->vperp0_radius1 || z < -ctx->vperp0_radius1)) {
944:           outside++; /* don't refine outside electron refine radius */
945:           PetscCall(PetscInfo(sol, "\t%" PetscInt_FMT ") (debug) found %s cells\n", grid, s_refine_names[type]));
946:         } else if (type == 3 && (z > ctx->vperp0_radius2 || z < -ctx->vperp0_radius2)) {
947:           outside++; /* refine r=0 cells on refinement front */
948:           PetscCall(PetscInfo(sol, "\t%" PetscInt_FMT ") (debug) found %s cells\n", grid, s_refine_names[type]));
949:         }
950:         if (x < PETSC_MACHINE_EPSILON * 10. && (type != 0 || ctx->re_radius > PETSC_SQRT_MACHINE_EPSILON)) nz++;
951:       }
952:       PetscCall(DMPlexVecRestoreClosure(cdm, cs, coords, c, &csize, &coef));
953:       if (doit || (outside < Nv && nz)) {
954:         PetscCall(DMLabelSetValue(adaptLabel, c, DM_ADAPT_REFINE));
955:         nrefined++;
956:       }
957:     }
958:     PetscCall(PetscInfo(sol, "\t%" PetscInt_FMT ") Refined %" PetscInt_FMT " cells\n", grid, nrefined));
959:   }
960:   PetscCall(DMDestroy(&plex));
961:   PetscCall(DMAdaptLabel(forest, adaptLabel, &adaptedDM));
962:   PetscCall(DMLabelDestroy(&adaptLabel));
963:   *newForest = adaptedDM;
964:   if (adaptedDM) {
965:     if (isForest) {
966:       PetscCall(DMForestSetAdaptivityForest(adaptedDM, NULL)); // ????
967:     }
968:     PetscCall(DMConvert(adaptedDM, DMPLEX, &plex));
969:     PetscCall(DMPlexGetHeightStratum(plex, 0, &cStart, &cEnd));
970:     PetscCall(PetscInfo(sol, "\t\t\t\t%" PetscInt_FMT ") %" PetscInt_FMT " cells, %" PetscInt_FMT " total quadrature points\n", grid, cEnd - cStart, Nq * (cEnd - cStart)));
971:     PetscCall(DMDestroy(&plex));
972:   } else *newForest = NULL;
973:   PetscFunctionReturn(PETSC_SUCCESS);
974: }

976: // forest goes in (ctx->plex[grid]), plex comes out
977: static PetscErrorCode adapt(PetscInt grid, LandauCtx *ctx, Vec *uu)
978: {
979:   PetscInt adaptIter;

981:   PetscFunctionBegin;
982:   PetscInt type, limits[5] = {(grid == 0) ? ctx->numRERefine : 0, (grid == 0) ? ctx->nZRefine1 : 0, ctx->numAMRRefine[grid], (grid == 0) ? ctx->nZRefine2 : 0, ctx->postAMRRefine[grid]};
983:   for (type = 0; type < 5; type++) {
984:     for (adaptIter = 0; adaptIter < limits[type]; adaptIter++) {
985:       DM newForest = NULL;
986:       PetscCall(adaptToleranceFEM(ctx->fe[0], *uu, type, grid, ctx, &newForest));
987:       if (newForest) {
988:         PetscCall(DMDestroy(&ctx->plex[grid]));
989:         PetscCall(VecDestroy(uu));
990:         PetscCall(DMCreateGlobalVector(newForest, uu));
991:         PetscCall(PetscObjectSetName((PetscObject)*uu, "uAMR"));
992:         PetscCall(LandauSetInitialCondition(newForest, *uu, grid, 0, 1, ctx));
993:         ctx->plex[grid] = newForest;
994:       } else {
995:         PetscCall(PetscInfo(*uu, "No refinement\n"));
996:       }
997:     }
998:   }
999:   PetscFunctionReturn(PETSC_SUCCESS);
1000: }

1002: // make log(Lambdas) from NRL Plasma formulary
1003: static PetscErrorCode makeLambdas(LandauCtx *ctx)
1004: {
1005:   PetscFunctionBegin;
1006:   for (PetscInt gridi = 0; gridi < ctx->num_grids; gridi++) {
1007:     int       iii   = ctx->species_offset[gridi];
1008:     PetscReal Ti_ev = (ctx->thermal_temps[iii] / 1.1604525e7) * 1000; // convert (back) to eV
1009:     PetscReal ni    = ctx->n[iii] * ctx->n_0;
1010:     for (PetscInt gridj = gridi; gridj < ctx->num_grids; gridj++) {
1011:       PetscInt  jjj = ctx->species_offset[gridj];
1012:       PetscReal Zj  = ctx->charges[jjj] / 1.6022e-19;
1013:       if (gridi == 0) {
1014:         if (gridj == 0) { // lam_ee
1015:           ctx->lambdas[gridi][gridj] = 23.5 - PetscLogReal(PetscSqrtReal(ni) * PetscPowReal(Ti_ev, -1.25)) - PetscSqrtReal(1e-5 + PetscSqr(PetscLogReal(Ti_ev) - 2) / 16);
1016:         } else { // lam_ei == lam_ie
1017:           if (10 * Zj * Zj > Ti_ev) {
1018:             ctx->lambdas[gridi][gridj] = ctx->lambdas[gridj][gridi] = 23 - PetscLogReal(PetscSqrtReal(ni) * Zj * PetscPowReal(Ti_ev, -1.5));
1019:           } else {
1020:             ctx->lambdas[gridi][gridj] = ctx->lambdas[gridj][gridi] = 24 - PetscLogReal(PetscSqrtReal(ni) / Ti_ev);
1021:           }
1022:         }
1023:       } else { // lam_ii'
1024:         PetscReal mui = ctx->masses[iii] / 1.6720e-27, Zi = ctx->charges[iii] / 1.6022e-19;
1025:         PetscReal Tj_ev            = (ctx->thermal_temps[jjj] / 1.1604525e7) * 1000; // convert (back) to eV
1026:         PetscReal muj              = ctx->masses[jjj] / 1.6720e-27;
1027:         PetscReal nj               = ctx->n[jjj] * ctx->n_0;
1028:         ctx->lambdas[gridi][gridj] = ctx->lambdas[gridj][gridi] = 23 - PetscLogReal(Zi * Zj * (mui + muj) / (mui * Tj_ev + muj * Ti_ev) * PetscSqrtReal(ni * Zi * Zi / Ti_ev + nj * Zj * Zj / Tj_ev));
1029:       }
1030:     }
1031:   }
1032:   //PetscReal v0 = PetscSqrtReal(ctx->k * ctx->thermal_temps[iii] / ctx->masses[iii]); /* arbitrary units for non-dimensionalization: plasma formulary def */
1033:   PetscFunctionReturn(PETSC_SUCCESS);
1034: }

1036: static PetscErrorCode ProcessOptions(LandauCtx *ctx, const char prefix[])
1037: {
1038:   PetscBool flg;
1039:   PetscInt  ii, nt, nm, nc, num_species_grid[LANDAU_MAX_GRIDS], non_dim_grid;
1040:   PetscReal v0_grid[LANDAU_MAX_GRIDS], lnLam = 10;
1041:   DM        dummy;

1043:   PetscFunctionBegin;
1044:   PetscCall(DMCreate(ctx->comm, &dummy));
1045:   /* get options - initialize context */
1046:   ctx->verbose = 1; // should be 0 for silent compliance
1047: #if defined(PETSC_HAVE_THREADSAFETY) && defined(PETSC_HAVE_OPENMP)
1048:   ctx->batch_sz = PetscNumOMPThreads;
1049: #else
1050:   ctx->batch_sz = 1;
1051: #endif
1052:   ctx->batch_view_idx = 0;
1053:   ctx->interpolate    = PETSC_TRUE;
1054:   ctx->gpu_assembly   = PETSC_TRUE;
1055:   ctx->norm_state     = 0;
1056:   ctx->electronShift  = 0;
1057:   ctx->M              = NULL;
1058:   ctx->J              = NULL;
1059:   /* geometry and grids */
1060:   ctx->sphere    = PETSC_FALSE;
1061:   ctx->use_p4est = PETSC_FALSE;
1062:   for (PetscInt grid = 0; grid < LANDAU_MAX_GRIDS; grid++) {
1063:     ctx->radius[grid]             = 5.; /* thermal radius (velocity) */
1064:     ctx->radius_perp[grid]        = 5.; /* thermal radius (velocity) */
1065:     ctx->radius_par[grid]         = 5.; /* thermal radius (velocity) */
1066:     ctx->numAMRRefine[grid]       = 0;
1067:     ctx->postAMRRefine[grid]      = 0;
1068:     ctx->species_offset[grid + 1] = 1; // one species default
1069:     num_species_grid[grid]        = 0;
1070:     ctx->plex[grid]               = NULL; /* cache as expensive to Convert */
1071:   }
1072:   ctx->species_offset[0] = 0;
1073:   ctx->re_radius         = 0.;
1074:   ctx->vperp0_radius1    = 0;
1075:   ctx->vperp0_radius2    = 0;
1076:   ctx->nZRefine1         = 0;
1077:   ctx->nZRefine2         = 0;
1078:   ctx->numRERefine       = 0;
1079:   num_species_grid[0]    = 1; // one species default
1080:   /* species - [0] electrons, [1] one ion species eg, duetarium, [2] heavy impurity ion, ... */
1081:   ctx->charges[0]       = -1;                       /* electron charge (MKS) */
1082:   ctx->masses[0]        = 1 / 1835.469965278441013; /* temporary value in proton mass */
1083:   ctx->n[0]             = 1;
1084:   ctx->v_0              = 1; /* thermal velocity, we could start with a scale != 1 */
1085:   ctx->thermal_temps[0] = 1;
1086:   /* constants, etc. */
1087:   ctx->epsilon0 = 8.8542e-12;     /* permittivity of free space (MKS) F/m */
1088:   ctx->k        = 1.38064852e-23; /* Boltzmann constant (MKS) J/K */
1089:   ctx->n_0      = 1.e20;          /* typical plasma n, but could set it to 1 */
1090:   ctx->Ez       = 0;
1091:   for (PetscInt grid = 0; grid < LANDAU_NUM_TIMERS; grid++) ctx->times[grid] = 0;
1092:   for (PetscInt ii = 0; ii < LANDAU_DIM; ii++) ctx->cells0[ii] = 2;
1093:   if (LANDAU_DIM == 2) ctx->cells0[0] = 1;
1094:   ctx->use_matrix_mass                = PETSC_FALSE;
1095:   ctx->use_relativistic_corrections   = PETSC_FALSE;
1096:   ctx->use_energy_tensor_trick        = PETSC_FALSE; /* Use Eero's trick for energy conservation v --> grad(v^2/2) */
1097:   ctx->SData_d.w                      = NULL;
1098:   ctx->SData_d.x                      = NULL;
1099:   ctx->SData_d.y                      = NULL;
1100:   ctx->SData_d.z                      = NULL;
1101:   ctx->SData_d.invJ                   = NULL;
1102:   ctx->jacobian_field_major_order     = PETSC_FALSE;
1103:   ctx->SData_d.coo_elem_offsets       = NULL;
1104:   ctx->SData_d.coo_elem_point_offsets = NULL;
1105:   ctx->coo_assembly                   = PETSC_FALSE;
1106:   ctx->SData_d.coo_elem_fullNb        = NULL;
1107:   ctx->SData_d.coo_size               = 0;
1108:   PetscOptionsBegin(ctx->comm, prefix, "Options for Fokker-Plank-Landau collision operator", "none");
1109:   {
1110:     char opstring[256];
1111: #if defined(PETSC_HAVE_KOKKOS_KERNELS)
1112:     ctx->deviceType = LANDAU_KOKKOS;
1113:     PetscCall(PetscStrncpy(opstring, "kokkos", sizeof(opstring)));
1114: #elif defined(PETSC_HAVE_CUDA)
1115:     ctx->deviceType = LANDAU_CUDA;
1116:     PetscCall(PetscStrncpy(opstring, "cuda", sizeof(opstring)));
1117: #else
1118:     ctx->deviceType = LANDAU_CPU;
1119:     PetscCall(PetscStrncpy(opstring, "cpu", sizeof(opstring)));
1120: #endif
1121:     PetscCall(PetscOptionsString("-dm_landau_device_type", "Use kernels on 'cpu', 'cuda', or 'kokkos'", "plexland.c", opstring, opstring, sizeof(opstring), NULL));
1122:     PetscCall(PetscStrcmp("cpu", opstring, &flg));
1123:     if (flg) {
1124:       ctx->deviceType = LANDAU_CPU;
1125:     } else {
1126:       PetscCall(PetscStrcmp("cuda", opstring, &flg));
1127:       if (flg) {
1128:         ctx->deviceType = LANDAU_CUDA;
1129:       } else {
1130:         PetscCall(PetscStrcmp("kokkos", opstring, &flg));
1131:         if (flg) ctx->deviceType = LANDAU_KOKKOS;
1132:         else SETERRQ(ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_device_type %s", opstring);
1133:       }
1134:     }
1135:   }
1136:   PetscCall(PetscOptionsReal("-dm_landau_electron_shift", "Shift in thermal velocity of electrons", "none", ctx->electronShift, &ctx->electronShift, NULL));
1137:   PetscCall(PetscOptionsInt("-dm_landau_verbose", "Level of verbosity output", "plexland.c", ctx->verbose, &ctx->verbose, NULL));
1138:   PetscCall(PetscOptionsInt("-dm_landau_batch_size", "Number of 'vertices' to batch", "ex2.c", ctx->batch_sz, &ctx->batch_sz, NULL));
1139:   PetscCheck(LANDAU_MAX_BATCH_SZ >= ctx->batch_sz, ctx->comm, PETSC_ERR_ARG_WRONG, "LANDAU_MAX_BATCH_SZ %" PetscInt_FMT " < ctx->batch_sz %" PetscInt_FMT, (PetscInt)LANDAU_MAX_BATCH_SZ, ctx->batch_sz);
1140:   PetscCall(PetscOptionsInt("-dm_landau_batch_view_idx", "Index of batch for diagnostics like plotting", "ex2.c", ctx->batch_view_idx, &ctx->batch_view_idx, NULL));
1141:   PetscCheck(ctx->batch_view_idx < ctx->batch_sz, ctx->comm, PETSC_ERR_ARG_WRONG, "-ctx->batch_view_idx %" PetscInt_FMT " > ctx->batch_sz %" PetscInt_FMT, ctx->batch_view_idx, ctx->batch_sz);
1142:   PetscCall(PetscOptionsReal("-dm_landau_Ez", "Initial parallel electric field in unites of Conner-Hastie critical field", "plexland.c", ctx->Ez, &ctx->Ez, NULL));
1143:   PetscCall(PetscOptionsReal("-dm_landau_n_0", "Normalization constant for number density", "plexland.c", ctx->n_0, &ctx->n_0, NULL));
1144:   PetscCall(PetscOptionsBool("-dm_landau_use_mataxpy_mass", "Use fast but slightly fragile MATAXPY to add mass term", "plexland.c", ctx->use_matrix_mass, &ctx->use_matrix_mass, NULL));
1145:   PetscCall(PetscOptionsBool("-dm_landau_use_relativistic_corrections", "Use relativistic corrections", "plexland.c", ctx->use_relativistic_corrections, &ctx->use_relativistic_corrections, NULL));
1146:   if (LANDAU_DIM == 2 && ctx->use_relativistic_corrections) ctx->use_relativistic_corrections = PETSC_FALSE; // should warn
1147:   PetscCall(PetscOptionsBool("-dm_landau_use_energy_tensor_trick", "Use Eero's trick of using grad(v^2/2) instead of v as args to Landau tensor to conserve energy with relativistic corrections and Q1 elements", "plexland.c", ctx->use_energy_tensor_trick,
1148:                              &ctx->use_energy_tensor_trick, NULL));

1150:   /* get num species with temperature, set defaults */
1151:   for (ii = 1; ii < LANDAU_MAX_SPECIES; ii++) {
1152:     ctx->thermal_temps[ii] = 1;
1153:     ctx->charges[ii]       = 1;
1154:     ctx->masses[ii]        = 1;
1155:     ctx->n[ii]             = 1;
1156:   }
1157:   nt = LANDAU_MAX_SPECIES;
1158:   PetscCall(PetscOptionsRealArray("-dm_landau_thermal_temps", "Temperature of each species [e,i_0,i_1,...] in keV (must be set to set number of species)", "plexland.c", ctx->thermal_temps, &nt, &flg));
1159:   if (flg) {
1160:     PetscCall(PetscInfo(dummy, "num_species set to number of thermal temps provided (%" PetscInt_FMT ")\n", nt));
1161:     ctx->num_species = nt;
1162:   } else SETERRQ(ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_thermal_temps ,t1,t2,.. must be provided to set the number of species");
1163:   for (ii = 0; ii < ctx->num_species; ii++) ctx->thermal_temps[ii] *= 1.1604525e7; /* convert to Kelvin */
1164:   nm = LANDAU_MAX_SPECIES - 1;
1165:   PetscCall(PetscOptionsRealArray("-dm_landau_ion_masses", "Mass of each species in units of proton mass [i_0=2,i_1=40...]", "plexland.c", &ctx->masses[1], &nm, &flg));
1166:   PetscCheck(!flg || nm == ctx->num_species - 1, ctx->comm, PETSC_ERR_ARG_WRONG, "num ion masses %" PetscInt_FMT " != num species %" PetscInt_FMT, nm, ctx->num_species - 1);
1167:   nm = LANDAU_MAX_SPECIES;
1168:   PetscCall(PetscOptionsRealArray("-dm_landau_n", "Number density of each species = n_s * n_0", "plexland.c", ctx->n, &nm, &flg));
1169:   PetscCheck(!flg || nm == ctx->num_species, ctx->comm, PETSC_ERR_ARG_WRONG, "wrong num n: %" PetscInt_FMT " != num species %" PetscInt_FMT, nm, ctx->num_species);
1170:   for (ii = 0; ii < LANDAU_MAX_SPECIES; ii++) ctx->masses[ii] *= 1.6720e-27; /* scale by proton mass kg */
1171:   ctx->masses[0] = 9.10938356e-31;                                           /* electron mass kg (should be about right already) */
1172:   nc             = LANDAU_MAX_SPECIES - 1;
1173:   PetscCall(PetscOptionsRealArray("-dm_landau_ion_charges", "Charge of each species in units of proton charge [i_0=2,i_1=18,...]", "plexland.c", &ctx->charges[1], &nc, &flg));
1174:   if (flg) PetscCheck(nc == ctx->num_species - 1, ctx->comm, PETSC_ERR_ARG_WRONG, "num charges %" PetscInt_FMT " != num species %" PetscInt_FMT, nc, ctx->num_species - 1);
1175:   for (ii = 0; ii < LANDAU_MAX_SPECIES; ii++) ctx->charges[ii] *= 1.6022e-19; /* electron/proton charge (MKS) */
1176:   /* geometry and grids */
1177:   nt = LANDAU_MAX_GRIDS;
1178:   PetscCall(PetscOptionsIntArray("-dm_landau_num_species_grid", "Number of species on each grid: [ 1, ....] or [S, 0 ....] for single grid", "plexland.c", num_species_grid, &nt, &flg));
1179:   if (flg) {
1180:     ctx->num_grids = nt;
1181:     for (ii = nt = 0; ii < ctx->num_grids; ii++) nt += num_species_grid[ii];
1182:     PetscCheck(ctx->num_species == nt, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_num_species_grid: sum %" PetscInt_FMT " != num_species = %" PetscInt_FMT ". %" PetscInt_FMT " grids (check that number of grids <= LANDAU_MAX_GRIDS = %d)", nt, ctx->num_species,
1183:                ctx->num_grids, LANDAU_MAX_GRIDS);
1184:   } else {
1185:     ctx->num_grids      = 1; // go back to a single grid run
1186:     num_species_grid[0] = ctx->num_species;
1187:   }
1188:   for (ctx->species_offset[0] = ii = 0; ii < ctx->num_grids; ii++) ctx->species_offset[ii + 1] = ctx->species_offset[ii] + num_species_grid[ii];
1189:   PetscCheck(ctx->species_offset[ctx->num_grids] == ctx->num_species, ctx->comm, PETSC_ERR_ARG_WRONG, "ctx->species_offset[ctx->num_grids] %" PetscInt_FMT " != ctx->num_species = %" PetscInt_FMT " ???????????", ctx->species_offset[ctx->num_grids],
1190:              ctx->num_species);
1191:   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1192:     int iii       = ctx->species_offset[grid];                                          // normalize with first (arbitrary) species on grid
1193:     v0_grid[grid] = PetscSqrtReal(ctx->k * ctx->thermal_temps[iii] / ctx->masses[iii]); /* arbitrary units for non-dimensionalization: plasma formulary def */
1194:   }
1195:   // get lambdas here because we need them for t_0 etc
1196:   PetscCall(PetscOptionsReal("-dm_landau_ln_lambda", "Universal cross section parameter. Default uses NRL formulas", "plexland.c", lnLam, &lnLam, &flg));
1197:   if (flg) {
1198:     for (PetscInt grid = 0; grid < LANDAU_MAX_GRIDS; grid++) {
1199:       for (PetscInt gridj = 0; gridj < LANDAU_MAX_GRIDS; gridj++) ctx->lambdas[gridj][grid] = lnLam; /* cross section ratio large - small angle collisions */
1200:     }
1201:   } else {
1202:     PetscCall(makeLambdas(ctx));
1203:   }
1204:   non_dim_grid = 0;
1205:   PetscCall(PetscOptionsInt("-dm_landau_normalization_grid", "Index of grid to use for setting v_0, m_0, t_0. (Not recommended)", "plexland.c", non_dim_grid, &non_dim_grid, &flg));
1206:   if (non_dim_grid != 0) PetscCall(PetscInfo(dummy, "Normalization grid set to %" PetscInt_FMT ", but non-default not well verified\n", non_dim_grid));
1207:   PetscCheck(non_dim_grid >= 0 && non_dim_grid < ctx->num_species, ctx->comm, PETSC_ERR_ARG_WRONG, "Normalization grid wrong: %" PetscInt_FMT, non_dim_grid);
1208:   ctx->v_0 = v0_grid[non_dim_grid];     /* arbitrary units for non dimensionalization: global mean velocity in 1D of electrons */
1209:   ctx->m_0 = ctx->masses[non_dim_grid]; /* arbitrary reference mass, electrons */
1210:   ctx->t_0 = 8 * PETSC_PI * PetscSqr(ctx->epsilon0 * ctx->m_0 / PetscSqr(ctx->charges[non_dim_grid])) / ctx->lambdas[non_dim_grid][non_dim_grid] / ctx->n_0 * PetscPowReal(ctx->v_0, 3); /* note, this t_0 makes nu[non_dim_grid,non_dim_grid]=1 */
1211:   /* domain */
1212:   nt = LANDAU_MAX_GRIDS;
1213:   PetscCall(PetscOptionsRealArray("-dm_landau_domain_radius", "Phase space size in units of thermal velocity of grid", "plexland.c", ctx->radius, &nt, &flg));
1214:   if (flg) {
1215:     PetscCheck(nt >= ctx->num_grids, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_domain_radius: given %" PetscInt_FMT " radius != number grids %" PetscInt_FMT, nt, ctx->num_grids);
1216:     while (nt--) ctx->radius_par[nt] = ctx->radius_perp[nt] = ctx->radius[nt];
1217:   } else {
1218:     nt = LANDAU_MAX_GRIDS;
1219:     PetscCall(PetscOptionsRealArray("-dm_landau_domain_max_par", "Parallel velocity domain size in units of thermal velocity of grid", "plexland.c", ctx->radius_par, &nt, &flg));
1220:     if (flg) PetscCheck(nt >= ctx->num_grids, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_domain_max_par: given %" PetscInt_FMT " radius != number grids %" PetscInt_FMT, nt, ctx->num_grids);
1221:     PetscCall(PetscOptionsRealArray("-dm_landau_domain_max_perp", "Perpendicular velocity domain size in units of thermal velocity of grid", "plexland.c", ctx->radius_perp, &nt, &flg));
1222:     if (flg) PetscCheck(nt >= ctx->num_grids, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_domain_max_perp: given %" PetscInt_FMT " radius != number grids %" PetscInt_FMT, nt, ctx->num_grids);
1223:   }
1224:   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1225:     if (flg && ctx->radius[grid] <= 0) { /* negative is ratio of c - need to set par and perp with this -- todo */
1226:       if (ctx->radius[grid] == 0) ctx->radius[grid] = 0.75;
1227:       else ctx->radius[grid] = -ctx->radius[grid];
1228:       ctx->radius[grid] = ctx->radius[grid] * SPEED_OF_LIGHT / ctx->v_0; // use any species on grid to normalize (v_0 same for all on grid)
1229:       PetscCall(PetscInfo(dummy, "Change domain radius to %g for grid %" PetscInt_FMT "\n", (double)ctx->radius[grid], grid));
1230:     }
1231:     ctx->radius[grid] *= v0_grid[grid] / ctx->v_0;      // scale domain by thermal radius relative to v_0
1232:     ctx->radius_perp[grid] *= v0_grid[grid] / ctx->v_0; // scale domain by thermal radius relative to v_0
1233:     ctx->radius_par[grid] *= v0_grid[grid] / ctx->v_0;  // scale domain by thermal radius relative to v_0
1234:   }
1235:   /* amr parameters */
1236:   nt = LANDAU_MAX_GRIDS;
1237:   PetscCall(PetscOptionsIntArray("-dm_landau_amr_levels_max", "Number of AMR levels of refinement around origin, after (RE) refinements along z", "plexland.c", ctx->numAMRRefine, &nt, &flg));
1238:   PetscCheck(!flg || nt >= ctx->num_grids, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_amr_levels_max: given %" PetscInt_FMT " != number grids %" PetscInt_FMT, nt, ctx->num_grids);
1239:   nt = LANDAU_MAX_GRIDS;
1240:   PetscCall(PetscOptionsIntArray("-dm_landau_amr_post_refine", "Number of levels to uniformly refine after AMR", "plexland.c", ctx->postAMRRefine, &nt, &flg));
1241:   for (ii = 1; ii < ctx->num_grids; ii++) ctx->postAMRRefine[ii] = ctx->postAMRRefine[0]; // all grids the same now
1242:   PetscCall(PetscOptionsInt("-dm_landau_amr_re_levels", "Number of levels to refine along v_perp=0, z>0", "plexland.c", ctx->numRERefine, &ctx->numRERefine, &flg));
1243:   PetscCall(PetscOptionsInt("-dm_landau_amr_z_refine_pre", "Number of levels to refine along v_perp=0 before origin refine", "plexland.c", ctx->nZRefine1, &ctx->nZRefine1, &flg));
1244:   PetscCall(PetscOptionsInt("-dm_landau_amr_z_refine_post", "Number of levels to refine along v_perp=0 after origin refine", "plexland.c", ctx->nZRefine2, &ctx->nZRefine2, &flg));
1245:   PetscCall(PetscOptionsReal("-dm_landau_re_radius", "velocity range to refine on positive (z>0) r=0 axis for runaways", "plexland.c", ctx->re_radius, &ctx->re_radius, &flg));
1246:   PetscCall(PetscOptionsReal("-dm_landau_z_radius_pre", "velocity range to refine r=0 axis (for electrons)", "plexland.c", ctx->vperp0_radius1, &ctx->vperp0_radius1, &flg));
1247:   PetscCall(PetscOptionsReal("-dm_landau_z_radius_post", "velocity range to refine r=0 axis (for electrons) after origin AMR", "plexland.c", ctx->vperp0_radius2, &ctx->vperp0_radius2, &flg));
1248:   /* spherical domain (not used) */
1249:   PetscCall(PetscOptionsBool("-dm_landau_sphere", "use sphere/semi-circle domain instead of rectangle", "plexland.c", ctx->sphere, &ctx->sphere, NULL));
1250:   if (ctx->sphere) {
1251:     ctx->sphere_inner_radius_90degree = 0.40;
1252:     ctx->sphere_inner_radius_45degree = 0.35;
1253:     PetscCall(PetscOptionsReal("-dm_landau_sphere_inner_radius_90degree_scale", "Scaling of radius for inner circle on 90 degree grid", "plexland.c", ctx->sphere_inner_radius_90degree, &ctx->sphere_inner_radius_90degree, NULL));
1254:     PetscCall(PetscOptionsReal("-dm_landau_sphere_inner_radius_45degree_scale", "Scaling of radius for inner circle on 45 degree grid", "plexland.c", ctx->sphere_inner_radius_45degree, &ctx->sphere_inner_radius_45degree, NULL));
1255:   } else {
1256:     nt = LANDAU_DIM;
1257:     PetscCall(PetscOptionsIntArray("-dm_landau_num_cells", "Number of cells in each dimension of base grid", "plexland.c", ctx->cells0, &nt, &flg));
1258:   }
1259:   /* processing options */
1260:   PetscCall(PetscOptionsBool("-dm_landau_gpu_assembly", "Assemble Jacobian on GPU", "plexland.c", ctx->gpu_assembly, &ctx->gpu_assembly, NULL));
1261:   if (ctx->deviceType == LANDAU_CPU || ctx->deviceType == LANDAU_KOKKOS) { // make Kokkos
1262:     PetscCall(PetscOptionsBool("-dm_landau_coo_assembly", "Assemble Jacobian with Kokkos on 'device'", "plexland.c", ctx->coo_assembly, &ctx->coo_assembly, NULL));
1263:     if (ctx->coo_assembly) PetscCheck(ctx->gpu_assembly, ctx->comm, PETSC_ERR_ARG_WRONG, "COO assembly requires 'gpu assembly' even if Kokkos 'CPU' back-end %d", ctx->coo_assembly);
1264:   }
1265:   PetscCall(PetscOptionsBool("-dm_landau_jacobian_field_major_order", "Reorder Jacobian for GPU assembly with field major, or block diagonal, ordering (DEPRECATED)", "plexland.c", ctx->jacobian_field_major_order, &ctx->jacobian_field_major_order, NULL));
1266:   if (ctx->jacobian_field_major_order) PetscCheck(ctx->gpu_assembly, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_jacobian_field_major_order requires -dm_landau_gpu_assembly");
1267:   PetscCheck(!ctx->jacobian_field_major_order, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_jacobian_field_major_order DEPRECATED");
1268:   PetscOptionsEnd();

1270:   for (ii = ctx->num_species; ii < LANDAU_MAX_SPECIES; ii++) ctx->masses[ii] = ctx->thermal_temps[ii] = ctx->charges[ii] = 0;
1271:   if (ctx->verbose != 0) {
1272:     PetscCall(PetscPrintf(PETSC_COMM_WORLD, "masses:        e=%10.3e; ions in proton mass units:   %10.3e %10.3e ...\n", (double)ctx->masses[0], (double)(ctx->masses[1] / 1.6720e-27), (double)(ctx->num_species > 2 ? ctx->masses[2] / 1.6720e-27 : 0)));
1273:     PetscCall(PetscPrintf(PETSC_COMM_WORLD, "charges:       e=%10.3e; charges in elementary units: %10.3e %10.3e\n", (double)ctx->charges[0], (double)(-ctx->charges[1] / ctx->charges[0]), (double)(ctx->num_species > 2 ? -ctx->charges[2] / ctx->charges[0] : 0)));
1274:     PetscCall(PetscPrintf(PETSC_COMM_WORLD, "n:             e: %10.3e                           i: %10.3e %10.3e\n", (double)ctx->n[0], (double)ctx->n[1], (double)(ctx->num_species > 2 ? ctx->n[2] : 0)));
1275:     PetscCall(PetscPrintf(PETSC_COMM_WORLD, "thermal T (K): e=%10.3e i=%10.3e %10.3e. Normalization grid %d: v_0=%10.3e (%10.3ec) n_0=%10.3e t_0=%10.3e %" PetscInt_FMT " batched, view batch %" PetscInt_FMT "\n", (double)ctx->thermal_temps[0],
1276:                           (double)ctx->thermal_temps[1], (double)((ctx->num_species > 2) ? ctx->thermal_temps[2] : 0), (int)non_dim_grid, (double)ctx->v_0, (double)(ctx->v_0 / SPEED_OF_LIGHT), (double)ctx->n_0, (double)ctx->t_0, ctx->batch_sz, ctx->batch_view_idx));
1277:     PetscCall(PetscPrintf(PETSC_COMM_WORLD, "Domain radius (AMR levels) grid %d: par=%10.3e perp=%10.3e (%" PetscInt_FMT ") ", 0, (double)ctx->radius_par[0], (double)ctx->radius_perp[0], ctx->numAMRRefine[0]));
1278:     for (ii = 1; ii < ctx->num_grids; ii++) PetscCall(PetscPrintf(PETSC_COMM_WORLD, ", %" PetscInt_FMT ": par=%10.3e perp=%10.3e (%" PetscInt_FMT ") ", ii, (double)ctx->radius_par[ii], (double)ctx->radius_perp[ii], ctx->numAMRRefine[ii]));
1279:     if (ctx->use_relativistic_corrections) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\nUse relativistic corrections\n"));
1280:     else PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\n"));
1281:   }
1282:   PetscCall(DMDestroy(&dummy));
1283:   {
1284:     PetscMPIInt rank;
1285:     PetscCallMPI(MPI_Comm_rank(PETSC_COMM_WORLD, &rank));
1286:     ctx->stage = 0;
1287:     PetscCall(PetscLogEventRegister("Landau Create", DM_CLASSID, &ctx->events[13]));   /* 13 */
1288:     PetscCall(PetscLogEventRegister(" GPU ass. setup", DM_CLASSID, &ctx->events[2]));  /* 2 */
1289:     PetscCall(PetscLogEventRegister(" Build matrix", DM_CLASSID, &ctx->events[12]));   /* 12 */
1290:     PetscCall(PetscLogEventRegister(" Assembly maps", DM_CLASSID, &ctx->events[15]));  /* 15 */
1291:     PetscCall(PetscLogEventRegister("Landau Mass mat", DM_CLASSID, &ctx->events[14])); /* 14 */
1292:     PetscCall(PetscLogEventRegister("Landau Operator", DM_CLASSID, &ctx->events[11])); /* 11 */
1293:     PetscCall(PetscLogEventRegister("Landau Jacobian", DM_CLASSID, &ctx->events[0]));  /* 0 */
1294:     PetscCall(PetscLogEventRegister("Landau Mass", DM_CLASSID, &ctx->events[9]));      /* 9 */
1295:     PetscCall(PetscLogEventRegister(" Preamble", DM_CLASSID, &ctx->events[10]));       /* 10 */
1296:     PetscCall(PetscLogEventRegister(" static IP Data", DM_CLASSID, &ctx->events[7]));  /* 7 */
1297:     PetscCall(PetscLogEventRegister(" dynamic IP-Jac", DM_CLASSID, &ctx->events[1]));  /* 1 */
1298:     PetscCall(PetscLogEventRegister(" Kernel-init", DM_CLASSID, &ctx->events[3]));     /* 3 */
1299:     PetscCall(PetscLogEventRegister(" Jac-f-df (GPU)", DM_CLASSID, &ctx->events[8]));  /* 8 */
1300:     PetscCall(PetscLogEventRegister(" J Kernel (GPU)", DM_CLASSID, &ctx->events[4]));  /* 4 */
1301:     PetscCall(PetscLogEventRegister(" M Kernel (GPU)", DM_CLASSID, &ctx->events[16])); /* 16 */
1302:     PetscCall(PetscLogEventRegister(" Copy to CPU", DM_CLASSID, &ctx->events[5]));     /* 5 */
1303:     PetscCall(PetscLogEventRegister(" CPU assemble", DM_CLASSID, &ctx->events[6]));    /* 6 */

1305:     if (rank) { /* turn off output stuff for duplicate runs - do we need to add the prefix to all this? */
1306:       PetscCall(PetscOptionsClearValue(NULL, "-snes_converged_reason"));
1307:       PetscCall(PetscOptionsClearValue(NULL, "-ksp_converged_reason"));
1308:       PetscCall(PetscOptionsClearValue(NULL, "-snes_monitor"));
1309:       PetscCall(PetscOptionsClearValue(NULL, "-ksp_monitor"));
1310:       PetscCall(PetscOptionsClearValue(NULL, "-ts_monitor"));
1311:       PetscCall(PetscOptionsClearValue(NULL, "-ts_view"));
1312:       PetscCall(PetscOptionsClearValue(NULL, "-ts_adapt_monitor"));
1313:       PetscCall(PetscOptionsClearValue(NULL, "-dm_landau_amr_dm_view"));
1314:       PetscCall(PetscOptionsClearValue(NULL, "-dm_landau_amr_vec_view"));
1315:       PetscCall(PetscOptionsClearValue(NULL, "-dm_landau_mass_dm_view"));
1316:       PetscCall(PetscOptionsClearValue(NULL, "-dm_landau_mass_view"));
1317:       PetscCall(PetscOptionsClearValue(NULL, "-dm_landau_jacobian_view"));
1318:       PetscCall(PetscOptionsClearValue(NULL, "-dm_landau_mat_view"));
1319:       PetscCall(PetscOptionsClearValue(NULL, "-pc_bjkokkos_ksp_converged_reason"));
1320:       PetscCall(PetscOptionsClearValue(NULL, "-pc_bjkokkos_ksp_monitor"));
1321:       PetscCall(PetscOptionsClearValue(NULL, "-"));
1322:       PetscCall(PetscOptionsClearValue(NULL, "-info"));
1323:     }
1324:   }
1325:   PetscFunctionReturn(PETSC_SUCCESS);
1326: }

1328: static PetscErrorCode CreateStaticData(PetscInt dim, IS grid_batch_is_inv[], LandauCtx *ctx)
1329: {
1330:   PetscSection     section[LANDAU_MAX_GRIDS], globsection[LANDAU_MAX_GRIDS];
1331:   PetscQuadrature  quad;
1332:   const PetscReal *quadWeights;
1333:   PetscReal        invMass[LANDAU_MAX_SPECIES], nu_alpha[LANDAU_MAX_SPECIES], nu_beta[LANDAU_MAX_SPECIES];
1334:   PetscInt         numCells[LANDAU_MAX_GRIDS], Nq, Nf[LANDAU_MAX_GRIDS], ncellsTot = 0, MAP_BF_SIZE = 64 * LANDAU_DIM * LANDAU_DIM * LANDAU_MAX_Q_FACE * LANDAU_MAX_SPECIES;
1335:   PetscTabulation *Tf;
1336:   PetscDS          prob;

1338:   PetscFunctionBegin;
1339:   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1340:     for (PetscInt ii = ctx->species_offset[grid]; ii < ctx->species_offset[grid + 1]; ii++) {
1341:       invMass[ii]  = ctx->m_0 / ctx->masses[ii];
1342:       nu_alpha[ii] = PetscSqr(ctx->charges[ii] / ctx->m_0) * ctx->m_0 / ctx->masses[ii];
1343:       nu_beta[ii]  = PetscSqr(ctx->charges[ii] / ctx->epsilon0) / (8 * PETSC_PI) * ctx->t_0 * ctx->n_0 / PetscPowReal(ctx->v_0, 3);
1344:     }
1345:   }
1346:   if (ctx->verbose == 4) {
1347:     PetscCall(PetscPrintf(PETSC_COMM_WORLD, "nu_alpha: "));
1348:     for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1349:       int iii = ctx->species_offset[grid];
1350:       for (PetscInt ii = iii; ii < ctx->species_offset[grid + 1]; ii++) PetscCall(PetscPrintf(PETSC_COMM_WORLD, " %e", (double)nu_alpha[ii]));
1351:     }
1352:     PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\nnu_beta: "));
1353:     for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1354:       int iii = ctx->species_offset[grid];
1355:       for (PetscInt ii = iii; ii < ctx->species_offset[grid + 1]; ii++) PetscCall(PetscPrintf(PETSC_COMM_WORLD, " %e", (double)nu_beta[ii]));
1356:     }
1357:     PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\nnu_alpha[i]*nu_beta[j]*lambda[i][j]:\n"));
1358:     for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1359:       int iii = ctx->species_offset[grid];
1360:       for (PetscInt ii = iii; ii < ctx->species_offset[grid + 1]; ii++) {
1361:         for (PetscInt gridj = 0; gridj < ctx->num_grids; gridj++) {
1362:           int jjj = ctx->species_offset[gridj];
1363:           for (PetscInt jj = jjj; jj < ctx->species_offset[gridj + 1]; jj++) PetscCall(PetscPrintf(PETSC_COMM_WORLD, " %14.9e", (double)(nu_alpha[ii] * nu_beta[jj] * ctx->lambdas[grid][gridj])));
1364:         }
1365:         PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\n"));
1366:       }
1367:     }
1368:     PetscCall(PetscPrintf(PETSC_COMM_WORLD, "lambda[i][j]:\n"));
1369:     for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1370:       int iii = ctx->species_offset[grid];
1371:       for (PetscInt ii = iii; ii < ctx->species_offset[grid + 1]; ii++) {
1372:         for (PetscInt gridj = 0; gridj < ctx->num_grids; gridj++) {
1373:           int jjj = ctx->species_offset[gridj];
1374:           for (PetscInt jj = jjj; jj < ctx->species_offset[gridj + 1]; jj++) PetscCall(PetscPrintf(PETSC_COMM_WORLD, " %14.9e", (double)ctx->lambdas[grid][gridj]));
1375:         }
1376:         PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\n"));
1377:       }
1378:     }
1379:   }
1380:   PetscCall(DMGetDS(ctx->plex[0], &prob));    // same DS for all grids
1381:   PetscCall(PetscDSGetTabulation(prob, &Tf)); // Bf, &Df same for all grids
1382:   /* DS, Tab and quad is same on all grids */
1383:   PetscCheck(ctx->plex[0], ctx->comm, PETSC_ERR_ARG_WRONG, "Plex not created");
1384:   PetscCall(PetscFEGetQuadrature(ctx->fe[0], &quad));
1385:   PetscCall(PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, &quadWeights));
1386:   PetscCheck(Nq <= LANDAU_MAX_NQ, ctx->comm, PETSC_ERR_ARG_WRONG, "Order too high. Nq = %" PetscInt_FMT " > LANDAU_MAX_NQ (%d)", Nq, LANDAU_MAX_NQ);
1387:   /* setup each grid */
1388:   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1389:     PetscInt cStart, cEnd;
1390:     PetscCheck(ctx->plex[grid] != NULL, ctx->comm, PETSC_ERR_ARG_WRONG, "Plex not created");
1391:     PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd));
1392:     numCells[grid] = cEnd - cStart; // grids can have different topology
1393:     PetscCall(DMGetLocalSection(ctx->plex[grid], &section[grid]));
1394:     PetscCall(DMGetGlobalSection(ctx->plex[grid], &globsection[grid]));
1395:     PetscCall(PetscSectionGetNumFields(section[grid], &Nf[grid]));
1396:     ncellsTot += numCells[grid];
1397:   }
1398:   /* create GPU assembly data */
1399:   if (ctx->gpu_assembly) { /* we need GPU object with GPU assembly */
1400:     PetscContainer container;
1401:     PetscScalar   *elemMatrix, *elMat;
1402:     pointInterpolationP4est(*pointMaps)[LANDAU_MAX_Q_FACE];
1403:     P4estVertexMaps *maps;
1404:     const PetscInt  *plex_batch = NULL, Nb = Nq, elMatSz = Nq * Nq * ctx->num_species * ctx->num_species; // tensor elements;
1405:     LandauIdx       *coo_elem_offsets = NULL, *coo_elem_fullNb = NULL, (*coo_elem_point_offsets)[LANDAU_MAX_NQ + 1] = NULL;
1406:     /* create GPU assembly data */
1407:     PetscCall(PetscInfo(ctx->plex[0], "Make GPU maps %d\n", 1));
1408:     PetscCall(PetscLogEventBegin(ctx->events[2], 0, 0, 0, 0));
1409:     PetscCall(PetscMalloc(sizeof(*maps) * ctx->num_grids, &maps));
1410:     PetscCall(PetscMalloc(sizeof(*pointMaps) * MAP_BF_SIZE, &pointMaps));
1411:     PetscCall(PetscMalloc(sizeof(*elemMatrix) * elMatSz, &elemMatrix));

1413:     if (ctx->coo_assembly) {                                                                                                      // setup COO assembly -- put COO metadata directly in ctx->SData_d
1414:       PetscCall(PetscMalloc3(ncellsTot + 1, &coo_elem_offsets, ncellsTot, &coo_elem_fullNb, ncellsTot, &coo_elem_point_offsets)); // array of integer pointers
1415:       coo_elem_offsets[0] = 0;                                                                                                    // finish later
1416:       PetscCall(PetscInfo(ctx->plex[0], "COO initialization, %" PetscInt_FMT " cells\n", ncellsTot));
1417:       ctx->SData_d.coo_n_cellsTot         = ncellsTot;
1418:       ctx->SData_d.coo_elem_offsets       = (void *)coo_elem_offsets;
1419:       ctx->SData_d.coo_elem_fullNb        = (void *)coo_elem_fullNb;
1420:       ctx->SData_d.coo_elem_point_offsets = (void *)coo_elem_point_offsets;
1421:     } else {
1422:       ctx->SData_d.coo_elem_offsets = ctx->SData_d.coo_elem_fullNb = NULL;
1423:       ctx->SData_d.coo_elem_point_offsets                          = NULL;
1424:       ctx->SData_d.coo_n_cellsTot                                  = 0;
1425:     }

1427:     ctx->SData_d.coo_max_fullnb = 0;
1428:     for (PetscInt grid = 0, glb_elem_idx = 0; grid < ctx->num_grids; grid++) {
1429:       PetscInt cStart, cEnd, Nfloc = Nf[grid], totDim = Nfloc * Nq;
1430:       if (grid_batch_is_inv[grid]) PetscCall(ISGetIndices(grid_batch_is_inv[grid], &plex_batch));
1431:       PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd));
1432:       // make maps
1433:       maps[grid].d_self       = NULL;
1434:       maps[grid].num_elements = numCells[grid];
1435:       maps[grid].num_face     = (PetscInt)(pow(Nq, 1. / ((double)dim)) + .001);                 // Q
1436:       maps[grid].num_face     = (PetscInt)(pow(maps[grid].num_face, (double)(dim - 1)) + .001); // Q^2
1437:       maps[grid].num_reduced  = 0;
1438:       maps[grid].deviceType   = ctx->deviceType;
1439:       maps[grid].numgrids     = ctx->num_grids;
1440:       // count reduced and get
1441:       PetscCall(PetscMalloc(maps[grid].num_elements * sizeof(*maps[grid].gIdx), &maps[grid].gIdx));
1442:       for (int ej = cStart, eidx = 0; ej < cEnd; ++ej, ++eidx, glb_elem_idx++) {
1443:         if (coo_elem_offsets) coo_elem_offsets[glb_elem_idx + 1] = coo_elem_offsets[glb_elem_idx]; // start with last one, then add
1444:         for (int fieldA = 0; fieldA < Nf[grid]; fieldA++) {
1445:           int fullNb = 0;
1446:           for (int q = 0; q < Nb; ++q) {
1447:             PetscInt     numindices, *indices;
1448:             PetscScalar *valuesOrig = elMat = elemMatrix;
1449:             PetscCall(PetscArrayzero(elMat, totDim * totDim));
1450:             elMat[(fieldA * Nb + q) * totDim + fieldA * Nb + q] = 1;
1451:             PetscCall(DMPlexGetClosureIndices(ctx->plex[grid], section[grid], globsection[grid], ej, PETSC_TRUE, &numindices, &indices, NULL, (PetscScalar **)&elMat));
1452:             for (PetscInt f = 0; f < numindices; ++f) { // look for a non-zero on the diagonal
1453:               if (PetscAbs(PetscRealPart(elMat[f * numindices + f])) > PETSC_MACHINE_EPSILON) {
1454:                 // found it
1455:                 if (PetscAbs(PetscRealPart(elMat[f * numindices + f] - 1.)) < PETSC_MACHINE_EPSILON) { // normal vertex 1.0
1456:                   if (plex_batch) {
1457:                     maps[grid].gIdx[eidx][fieldA][q] = (LandauIdx)plex_batch[indices[f]];
1458:                   } else {
1459:                     maps[grid].gIdx[eidx][fieldA][q] = (LandauIdx)indices[f];
1460:                   }
1461:                   fullNb++;
1462:                 } else { //found a constraint
1463:                   int            jj                = 0;
1464:                   PetscReal      sum               = 0;
1465:                   const PetscInt ff                = f;
1466:                   maps[grid].gIdx[eidx][fieldA][q] = -maps[grid].num_reduced - 1; // store (-)index: id = -(idx+1): idx = -id - 1

1468:                   do {                                                                                              // constraints are continuous in Plex - exploit that here
1469:                     int ii;                                                                                         // get 'scale'
1470:                     for (ii = 0, pointMaps[maps[grid].num_reduced][jj].scale = 0; ii < maps[grid].num_face; ii++) { // sum row of outer product to recover vector value
1471:                       if (ff + ii < numindices) {                                                                   // 3D has Q and Q^2 interps so might run off end. We could test that elMat[f*numindices + ff + ii] > 0, and break if not
1472:                         pointMaps[maps[grid].num_reduced][jj].scale += PetscRealPart(elMat[f * numindices + ff + ii]);
1473:                       }
1474:                     }
1475:                     sum += pointMaps[maps[grid].num_reduced][jj].scale; // diagnostic
1476:                     // get 'gid'
1477:                     if (pointMaps[maps[grid].num_reduced][jj].scale == 0) pointMaps[maps[grid].num_reduced][jj].gid = -1; // 3D has Q and Q^2 interps
1478:                     else {
1479:                       if (plex_batch) {
1480:                         pointMaps[maps[grid].num_reduced][jj].gid = plex_batch[indices[f]];
1481:                       } else {
1482:                         pointMaps[maps[grid].num_reduced][jj].gid = indices[f];
1483:                       }
1484:                       fullNb++;
1485:                     }
1486:                   } while (++jj < maps[grid].num_face && ++f < numindices); // jj is incremented if we hit the end
1487:                   while (jj < maps[grid].num_face) {
1488:                     pointMaps[maps[grid].num_reduced][jj].scale = 0;
1489:                     pointMaps[maps[grid].num_reduced][jj].gid   = -1;
1490:                     jj++;
1491:                   }
1492:                   if (PetscAbs(sum - 1.0) > 10 * PETSC_MACHINE_EPSILON) { // debug
1493:                     int       d, f;
1494:                     PetscReal tmp = 0;
1495:                     PetscCall(PetscPrintf(PETSC_COMM_SELF, "\t\t%d.%d.%d) ERROR total I = %22.16e (LANDAU_MAX_Q_FACE=%d, #face=%d)\n", eidx, q, fieldA, (double)sum, LANDAU_MAX_Q_FACE, maps[grid].num_face));
1496:                     for (d = 0, tmp = 0; d < numindices; ++d) {
1497:                       if (tmp != 0 && PetscAbs(tmp - 1.0) > 10 * PETSC_MACHINE_EPSILON) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "%3d) %3" PetscInt_FMT ": ", d, indices[d]));
1498:                       for (f = 0; f < numindices; ++f) tmp += PetscRealPart(elMat[d * numindices + f]);
1499:                       if (tmp != 0) PetscCall(PetscPrintf(ctx->comm, " | %22.16e\n", (double)tmp));
1500:                     }
1501:                   }
1502:                   maps[grid].num_reduced++;
1503:                   PetscCheck(maps[grid].num_reduced < MAP_BF_SIZE, PETSC_COMM_SELF, PETSC_ERR_PLIB, "maps[grid].num_reduced %d > %" PetscInt_FMT, maps[grid].num_reduced, MAP_BF_SIZE);
1504:                 }
1505:                 break;
1506:               }
1507:             }
1508:             // cleanup
1509:             PetscCall(DMPlexRestoreClosureIndices(ctx->plex[grid], section[grid], globsection[grid], ej, PETSC_TRUE, &numindices, &indices, NULL, (PetscScalar **)&elMat));
1510:             if (elMat != valuesOrig) PetscCall(DMRestoreWorkArray(ctx->plex[grid], numindices * numindices, MPIU_SCALAR, &elMat));
1511:           }
1512:           if (ctx->coo_assembly) {                                 // setup COO assembly
1513:             coo_elem_offsets[glb_elem_idx + 1] += fullNb * fullNb; // one species block, adds a block for each species, on this element in this grid
1514:             if (fieldA == 0) {                                     // cache full Nb for this element, on this grid per species
1515:               coo_elem_fullNb[glb_elem_idx] = fullNb;
1516:               if (fullNb > ctx->SData_d.coo_max_fullnb) ctx->SData_d.coo_max_fullnb = fullNb;
1517:             } else PetscCheck(coo_elem_fullNb[glb_elem_idx] == fullNb, PETSC_COMM_SELF, PETSC_ERR_PLIB, "full element size change with species %d %d", coo_elem_fullNb[glb_elem_idx], fullNb);
1518:           }
1519:         } // field
1520:       }   // cell
1521:       // allocate and copy point data maps[grid].gIdx[eidx][field][q]
1522:       PetscCall(PetscMalloc(maps[grid].num_reduced * sizeof(*maps[grid].c_maps), &maps[grid].c_maps));
1523:       for (int ej = 0; ej < maps[grid].num_reduced; ++ej) {
1524:         for (int q = 0; q < maps[grid].num_face; ++q) {
1525:           maps[grid].c_maps[ej][q].scale = pointMaps[ej][q].scale;
1526:           maps[grid].c_maps[ej][q].gid   = pointMaps[ej][q].gid;
1527:         }
1528:       }
1529: #if defined(PETSC_HAVE_KOKKOS_KERNELS)
1530:       if (ctx->deviceType == LANDAU_KOKKOS) {
1531:         PetscCall(LandauKokkosCreateMatMaps(maps, pointMaps, Nf, Nq, grid)); // implies Kokkos does
1532:       }                                                                      // else could be CUDA
1533: #endif
1534: #if defined(PETSC_HAVE_CUDA)
1535:       if (ctx->deviceType == LANDAU_CUDA) PetscCall(LandauCUDACreateMatMaps(maps, pointMaps, Nf, Nq, grid));
1536: #endif
1537:       if (plex_batch) {
1538:         PetscCall(ISRestoreIndices(grid_batch_is_inv[grid], &plex_batch));
1539:         PetscCall(ISDestroy(&grid_batch_is_inv[grid])); // we are done with this
1540:       }
1541:     } /* grids */
1542:     // finish COO
1543:     if (ctx->coo_assembly) { // setup COO assembly
1544:       PetscInt *oor, *ooc;
1545:       ctx->SData_d.coo_size = coo_elem_offsets[ncellsTot] * ctx->batch_sz;
1546:       PetscCall(PetscMalloc2(ctx->SData_d.coo_size, &oor, ctx->SData_d.coo_size, &ooc));
1547:       for (int i = 0; i < ctx->SData_d.coo_size; i++) oor[i] = ooc[i] = -1;
1548:       // get
1549:       for (int grid = 0, glb_elem_idx = 0; grid < ctx->num_grids; grid++) {
1550:         for (int ej = 0; ej < numCells[grid]; ++ej, glb_elem_idx++) {
1551:           const int              fullNb           = coo_elem_fullNb[glb_elem_idx];
1552:           const LandauIdx *const Idxs             = &maps[grid].gIdx[ej][0][0]; // just use field-0 maps, They should be the same but this is just for COO storage
1553:           coo_elem_point_offsets[glb_elem_idx][0] = 0;
1554:           for (int f = 0, cnt2 = 0; f < Nb; f++) {
1555:             int idx                                     = Idxs[f];
1556:             coo_elem_point_offsets[glb_elem_idx][f + 1] = coo_elem_point_offsets[glb_elem_idx][f]; // start at last
1557:             if (idx >= 0) {
1558:               cnt2++;
1559:               coo_elem_point_offsets[glb_elem_idx][f + 1]++; // inc
1560:             } else {
1561:               idx = -idx - 1;
1562:               for (int q = 0; q < maps[grid].num_face; q++) {
1563:                 if (maps[grid].c_maps[idx][q].gid < 0) break;
1564:                 cnt2++;
1565:                 coo_elem_point_offsets[glb_elem_idx][f + 1]++; // inc
1566:               }
1567:             }
1568:             PetscCheck(cnt2 <= fullNb, PETSC_COMM_SELF, PETSC_ERR_PLIB, "wrong count %d < %d", fullNb, cnt2);
1569:           }
1570:           PetscCheck(coo_elem_point_offsets[glb_elem_idx][Nb] == fullNb, PETSC_COMM_SELF, PETSC_ERR_PLIB, "coo_elem_point_offsets size %d != fullNb=%d", coo_elem_point_offsets[glb_elem_idx][Nb], fullNb);
1571:         }
1572:       }
1573:       // set
1574:       for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) {
1575:         for (int grid = 0, glb_elem_idx = 0; grid < ctx->num_grids; grid++) {
1576:           const PetscInt moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset);
1577:           for (int ej = 0; ej < numCells[grid]; ++ej, glb_elem_idx++) {
1578:             const int fullNb = coo_elem_fullNb[glb_elem_idx], fullNb2 = fullNb * fullNb;
1579:             // set (i,j)
1580:             for (int fieldA = 0; fieldA < Nf[grid]; fieldA++) {
1581:               const LandauIdx *const Idxs = &maps[grid].gIdx[ej][fieldA][0];
1582:               int                    rows[LANDAU_MAX_Q_FACE], cols[LANDAU_MAX_Q_FACE];
1583:               for (int f = 0; f < Nb; ++f) {
1584:                 const int nr = coo_elem_point_offsets[glb_elem_idx][f + 1] - coo_elem_point_offsets[glb_elem_idx][f];
1585:                 if (nr == 1) rows[0] = Idxs[f];
1586:                 else {
1587:                   const int idx = -Idxs[f] - 1;
1588:                   for (int q = 0; q < nr; q++) rows[q] = maps[grid].c_maps[idx][q].gid;
1589:                 }
1590:                 for (int g = 0; g < Nb; ++g) {
1591:                   const int nc = coo_elem_point_offsets[glb_elem_idx][g + 1] - coo_elem_point_offsets[glb_elem_idx][g];
1592:                   if (nc == 1) cols[0] = Idxs[g];
1593:                   else {
1594:                     const int idx = -Idxs[g] - 1;
1595:                     for (int q = 0; q < nc; q++) cols[q] = maps[grid].c_maps[idx][q].gid;
1596:                   }
1597:                   const int idx0 = b_id * coo_elem_offsets[ncellsTot] + coo_elem_offsets[glb_elem_idx] + fieldA * fullNb2 + fullNb * coo_elem_point_offsets[glb_elem_idx][f] + nr * coo_elem_point_offsets[glb_elem_idx][g];
1598:                   for (int q = 0, idx = idx0; q < nr; q++) {
1599:                     for (int d = 0; d < nc; d++, idx++) {
1600:                       oor[idx] = rows[q] + moffset;
1601:                       ooc[idx] = cols[d] + moffset;
1602:                     }
1603:                   }
1604:                 }
1605:               }
1606:             }
1607:           } // cell
1608:         }   // grid
1609:       }     // batch
1610:       PetscCall(MatSetPreallocationCOO(ctx->J, ctx->SData_d.coo_size, oor, ooc));
1611:       PetscCall(PetscFree2(oor, ooc));
1612:     }
1613:     PetscCall(PetscFree(pointMaps));
1614:     PetscCall(PetscFree(elemMatrix));
1615:     PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container));
1616:     PetscCall(PetscContainerSetPointer(container, (void *)maps));
1617:     PetscCall(PetscContainerSetUserDestroy(container, LandauGPUMapsDestroy));
1618:     PetscCall(PetscObjectCompose((PetscObject)ctx->J, "assembly_maps", (PetscObject)container));
1619:     PetscCall(PetscContainerDestroy(&container));
1620:     PetscCall(PetscLogEventEnd(ctx->events[2], 0, 0, 0, 0));
1621:   } // end GPU assembly
1622:   { /* create static point data, Jacobian called first, only one vertex copy */
1623:     PetscReal     *invJe, *ww, *xx, *yy, *zz = NULL, *invJ_a;
1624:     PetscInt       outer_ipidx, outer_ej, grid, nip_glb = 0;
1625:     PetscFE        fe;
1626:     const PetscInt Nb = Nq;
1627:     PetscCall(PetscLogEventBegin(ctx->events[7], 0, 0, 0, 0));
1628:     PetscCall(PetscInfo(ctx->plex[0], "Initialize static data\n"));
1629:     for (PetscInt grid = 0; grid < ctx->num_grids; grid++) nip_glb += Nq * numCells[grid];
1630:     /* collect f data, first time is for Jacobian, but make mass now */
1631:     if (ctx->verbose != 0) {
1632:       PetscInt ncells = 0, N;
1633:       PetscCall(MatGetSize(ctx->J, &N, NULL));
1634:       for (PetscInt grid = 0; grid < ctx->num_grids; grid++) ncells += numCells[grid];
1635:       PetscCall(PetscPrintf(PETSC_COMM_WORLD, "%d) %s %" PetscInt_FMT " IPs, %" PetscInt_FMT " cells total, Nb=%" PetscInt_FMT ", Nq=%" PetscInt_FMT ", dim=%" PetscInt_FMT ", Tab: Nb=%" PetscInt_FMT " Nf=%" PetscInt_FMT " Np=%" PetscInt_FMT " cdim=%" PetscInt_FMT " N=%" PetscInt_FMT "\n", 0, "FormLandau", nip_glb, ncells, Nb, Nq, dim, Nb,
1636:                             ctx->num_species, Nb, dim, N));
1637:     }
1638:     PetscCall(PetscMalloc4(nip_glb, &ww, nip_glb, &xx, nip_glb, &yy, nip_glb * dim * dim, &invJ_a));
1639:     if (dim == 3) PetscCall(PetscMalloc1(nip_glb, &zz));
1640:     if (ctx->use_energy_tensor_trick) {
1641:       PetscCall(PetscFECreateDefault(PETSC_COMM_SELF, dim, 1, PETSC_FALSE, NULL, PETSC_DECIDE, &fe));
1642:       PetscCall(PetscObjectSetName((PetscObject)fe, "energy"));
1643:     }
1644:     /* init each grids static data - no batch */
1645:     for (grid = 0, outer_ipidx = 0, outer_ej = 0; grid < ctx->num_grids; grid++) { // OpenMP (once)
1646:       Vec          v2_2 = NULL;                                                    // projected function: v^2/2 for non-relativistic, gamma... for relativistic
1647:       PetscSection e_section;
1648:       DM           dmEnergy;
1649:       PetscInt     cStart, cEnd, ej;

1651:       PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd));
1652:       // prep energy trick, get v^2 / 2 vector
1653:       if (ctx->use_energy_tensor_trick) {
1654:         PetscErrorCode (*energyf[1])(PetscInt, PetscReal, const PetscReal[], PetscInt, PetscScalar[], void *) = {ctx->use_relativistic_corrections ? gamma_m1_f : energy_f};
1655:         Vec        glob_v2;
1656:         PetscReal *c2_0[1], data[1] = {PetscSqr(C_0(ctx->v_0))};

1658:         PetscCall(DMClone(ctx->plex[grid], &dmEnergy));
1659:         PetscCall(PetscObjectSetName((PetscObject)dmEnergy, "energy"));
1660:         PetscCall(DMSetField(dmEnergy, 0, NULL, (PetscObject)fe));
1661:         PetscCall(DMCreateDS(dmEnergy));
1662:         PetscCall(DMGetSection(dmEnergy, &e_section));
1663:         PetscCall(DMGetGlobalVector(dmEnergy, &glob_v2));
1664:         PetscCall(PetscObjectSetName((PetscObject)glob_v2, "trick"));
1665:         c2_0[0] = &data[0];
1666:         PetscCall(DMProjectFunction(dmEnergy, 0., energyf, (void **)c2_0, INSERT_ALL_VALUES, glob_v2));
1667:         PetscCall(DMGetLocalVector(dmEnergy, &v2_2));
1668:         PetscCall(VecZeroEntries(v2_2)); /* zero BCs so don't set */
1669:         PetscCall(DMGlobalToLocalBegin(dmEnergy, glob_v2, INSERT_VALUES, v2_2));
1670:         PetscCall(DMGlobalToLocalEnd(dmEnergy, glob_v2, INSERT_VALUES, v2_2));
1671:         PetscCall(DMViewFromOptions(dmEnergy, NULL, "-energy_dm_view"));
1672:         PetscCall(VecViewFromOptions(glob_v2, NULL, "-energy_vec_view"));
1673:         PetscCall(DMRestoreGlobalVector(dmEnergy, &glob_v2));
1674:       }
1675:       /* append part of the IP data for each grid */
1676:       for (ej = 0; ej < numCells[grid]; ++ej, ++outer_ej) {
1677:         PetscScalar *coefs = NULL;
1678:         PetscReal    vj[LANDAU_MAX_NQ * LANDAU_DIM], detJj[LANDAU_MAX_NQ], Jdummy[LANDAU_MAX_NQ * LANDAU_DIM * LANDAU_DIM], c0 = C_0(ctx->v_0), c02 = PetscSqr(c0);
1679:         invJe = invJ_a + outer_ej * Nq * dim * dim;
1680:         PetscCall(DMPlexComputeCellGeometryFEM(ctx->plex[grid], ej + cStart, quad, vj, Jdummy, invJe, detJj));
1681:         if (ctx->use_energy_tensor_trick) PetscCall(DMPlexVecGetClosure(dmEnergy, e_section, v2_2, ej + cStart, NULL, &coefs));
1682:         /* create static point data */
1683:         for (PetscInt qj = 0; qj < Nq; qj++, outer_ipidx++) {
1684:           const PetscInt   gidx = outer_ipidx;
1685:           const PetscReal *invJ = &invJe[qj * dim * dim];
1686:           ww[gidx]              = detJj[qj] * quadWeights[qj];
1687:           if (dim == 2) ww[gidx] *= vj[qj * dim + 0]; /* cylindrical coordinate, w/o 2pi */
1688:           // get xx, yy, zz
1689:           if (ctx->use_energy_tensor_trick) {
1690:             double                 refSpaceDer[3], eGradPhi[3];
1691:             const PetscReal *const DD = Tf[0]->T[1];
1692:             const PetscReal       *Dq = &DD[qj * Nb * dim];
1693:             for (int d = 0; d < 3; ++d) refSpaceDer[d] = eGradPhi[d] = 0.0;
1694:             for (int b = 0; b < Nb; ++b) {
1695:               for (int d = 0; d < dim; ++d) refSpaceDer[d] += Dq[b * dim + d] * PetscRealPart(coefs[b]);
1696:             }
1697:             xx[gidx] = 1e10;
1698:             if (ctx->use_relativistic_corrections) {
1699:               double dg2_c2 = 0;
1700:               //for (int d = 0; d < dim; ++d) refSpaceDer[d] *= c02;
1701:               for (int d = 0; d < dim; ++d) dg2_c2 += PetscSqr(refSpaceDer[d]);
1702:               dg2_c2 *= (double)c02;
1703:               if (dg2_c2 >= .999) {
1704:                 xx[gidx] = vj[qj * dim + 0]; /* coordinate */
1705:                 yy[gidx] = vj[qj * dim + 1];
1706:                 if (dim == 3) zz[gidx] = vj[qj * dim + 2];
1707:                 PetscCall(PetscPrintf(ctx->comm, "Error: %12.5e %" PetscInt_FMT ".%" PetscInt_FMT ") dg2/c02 = %12.5e x= %12.5e %12.5e %12.5e\n", (double)PetscSqrtReal(xx[gidx] * xx[gidx] + yy[gidx] * yy[gidx] + zz[gidx] * zz[gidx]), ej, qj, dg2_c2, (double)xx[gidx], (double)yy[gidx], (double)zz[gidx]));
1708:               } else {
1709:                 PetscReal fact = c02 / PetscSqrtReal(1. - dg2_c2);
1710:                 for (int d = 0; d < dim; ++d) refSpaceDer[d] *= fact;
1711:                 // could test with other point u' that (grad - grad') * U (refSpaceDer, refSpaceDer') == 0
1712:               }
1713:             }
1714:             if (xx[gidx] == 1e10) {
1715:               for (int d = 0; d < dim; ++d) {
1716:                 for (int e = 0; e < dim; ++e) eGradPhi[d] += invJ[e * dim + d] * refSpaceDer[e];
1717:               }
1718:               xx[gidx] = eGradPhi[0];
1719:               yy[gidx] = eGradPhi[1];
1720:               if (dim == 3) zz[gidx] = eGradPhi[2];
1721:             }
1722:           } else {
1723:             xx[gidx] = vj[qj * dim + 0]; /* coordinate */
1724:             yy[gidx] = vj[qj * dim + 1];
1725:             if (dim == 3) zz[gidx] = vj[qj * dim + 2];
1726:           }
1727:         } /* q */
1728:         if (ctx->use_energy_tensor_trick) PetscCall(DMPlexVecRestoreClosure(dmEnergy, e_section, v2_2, ej + cStart, NULL, &coefs));
1729:       } /* ej */
1730:       if (ctx->use_energy_tensor_trick) {
1731:         PetscCall(DMRestoreLocalVector(dmEnergy, &v2_2));
1732:         PetscCall(DMDestroy(&dmEnergy));
1733:       }
1734:     } /* grid */
1735:     if (ctx->use_energy_tensor_trick) PetscCall(PetscFEDestroy(&fe));
1736:     /* cache static data */
1737:     if (ctx->deviceType == LANDAU_CUDA || ctx->deviceType == LANDAU_KOKKOS) {
1738: #if defined(PETSC_HAVE_CUDA) || defined(PETSC_HAVE_KOKKOS_KERNELS)
1739:       if (ctx->deviceType == LANDAU_CUDA) {
1740:   #if defined(PETSC_HAVE_CUDA)
1741:         PetscCall(LandauCUDAStaticDataSet(ctx->plex[0], Nq, ctx->batch_sz, ctx->num_grids, numCells, ctx->species_offset, ctx->mat_offset, nu_alpha, nu_beta, invMass, (PetscReal *)ctx->lambdas, invJ_a, xx, yy, zz, ww, &ctx->SData_d));
1742:   #else
1743:         SETERRQ(ctx->comm, PETSC_ERR_ARG_WRONG, "-landau_device_type cuda not built");
1744:   #endif
1745:       } else if (ctx->deviceType == LANDAU_KOKKOS) {
1746:   #if defined(PETSC_HAVE_KOKKOS_KERNELS)
1747:         PetscCall(LandauKokkosStaticDataSet(ctx->plex[0], Nq, ctx->batch_sz, ctx->num_grids, numCells, ctx->species_offset, ctx->mat_offset, nu_alpha, nu_beta, invMass, (PetscReal *)ctx->lambdas, invJ_a, xx, yy, zz, ww, &ctx->SData_d));
1748:   #else
1749:         SETERRQ(ctx->comm, PETSC_ERR_ARG_WRONG, "-landau_device_type kokkos not built");
1750:   #endif
1751:       }
1752: #endif
1753:       /* free */
1754:       PetscCall(PetscFree4(ww, xx, yy, invJ_a));
1755:       if (dim == 3) PetscCall(PetscFree(zz));
1756:     } else {                                                                                                                                                                   /* CPU version, just copy in, only use part */
1757:       PetscReal *nu_alpha_p = (PetscReal *)ctx->SData_d.alpha, *nu_beta_p = (PetscReal *)ctx->SData_d.beta, *invMass_p = (PetscReal *)ctx->SData_d.invMass, *lambdas_p = NULL; // why set these ?
1758:       ctx->SData_d.w    = (void *)ww;
1759:       ctx->SData_d.x    = (void *)xx;
1760:       ctx->SData_d.y    = (void *)yy;
1761:       ctx->SData_d.z    = (void *)zz;
1762:       ctx->SData_d.invJ = (void *)invJ_a;
1763:       PetscCall(PetscMalloc4(ctx->num_species, &nu_alpha_p, ctx->num_species, &nu_beta_p, ctx->num_species, &invMass_p, LANDAU_MAX_GRIDS * LANDAU_MAX_GRIDS, &lambdas_p));
1764:       for (PetscInt ii = 0; ii < ctx->num_species; ii++) {
1765:         nu_alpha_p[ii] = nu_alpha[ii];
1766:         nu_beta_p[ii]  = nu_beta[ii];
1767:         invMass_p[ii]  = invMass[ii];
1768:       }
1769:       ctx->SData_d.alpha   = (void *)nu_alpha_p;
1770:       ctx->SData_d.beta    = (void *)nu_beta_p;
1771:       ctx->SData_d.invMass = (void *)invMass_p;
1772:       ctx->SData_d.lambdas = (void *)lambdas_p;
1773:       for (PetscInt grid = 0; grid < LANDAU_MAX_GRIDS; grid++) {
1774:         PetscReal(*lambdas)[LANDAU_MAX_GRIDS][LANDAU_MAX_GRIDS] = (PetscReal(*)[LANDAU_MAX_GRIDS][LANDAU_MAX_GRIDS])ctx->SData_d.lambdas;
1775:         for (PetscInt gridj = 0; gridj < LANDAU_MAX_GRIDS; gridj++) { (*lambdas)[grid][gridj] = ctx->lambdas[grid][gridj]; }
1776:       }
1777:     }
1778:     PetscCall(PetscLogEventEnd(ctx->events[7], 0, 0, 0, 0));
1779:   } // initialize
1780:   PetscFunctionReturn(PETSC_SUCCESS);
1781: }

1783: /* < v, u > */
1784: static void g0_1(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
1785: {
1786:   g0[0] = 1.;
1787: }

1789: /* < v, u > */
1790: static void g0_fake(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
1791: {
1792:   static double ttt = 1e-12;
1793:   g0[0]             = ttt++;
1794: }

1796: /* < v, u > */
1797: static void g0_r(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
1798: {
1799:   g0[0] = 2. * PETSC_PI * x[0];
1800: }

1802: static PetscErrorCode MatrixNfDestroy(void *ptr)
1803: {
1804:   PetscInt *nf = (PetscInt *)ptr;
1805:   PetscFunctionBegin;
1806:   PetscCall(PetscFree(nf));
1807:   PetscFunctionReturn(PETSC_SUCCESS);
1808: }

1810: /*
1811:  LandauCreateJacobianMatrix - creates ctx->J with without real data. Hard to keep sparse.
1812:   - Like DMPlexLandauCreateMassMatrix. Should remove one and combine
1813:   - has old support for field major ordering
1814:  */
1815: static PetscErrorCode LandauCreateJacobianMatrix(MPI_Comm comm, Vec X, IS grid_batch_is_inv[LANDAU_MAX_GRIDS], LandauCtx *ctx)
1816: {
1817:   PetscInt *idxs = NULL;
1818:   Mat       subM[LANDAU_MAX_GRIDS];

1820:   PetscFunctionBegin;
1821:   if (!ctx->gpu_assembly) { /* we need GPU object with GPU assembly */
1822:     PetscFunctionReturn(PETSC_SUCCESS);
1823:   }
1824:   // get the RCM for this grid to separate out species into blocks -- create 'idxs' & 'ctx->batch_is' -- not used
1825:   if (ctx->gpu_assembly && ctx->jacobian_field_major_order) PetscCall(PetscMalloc1(ctx->mat_offset[ctx->num_grids] * ctx->batch_sz, &idxs));
1826:   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1827:     const PetscInt *values, n = ctx->mat_offset[grid + 1] - ctx->mat_offset[grid];
1828:     Mat             gMat;
1829:     DM              massDM;
1830:     PetscDS         prob;
1831:     Vec             tvec;
1832:     // get "mass" matrix for reordering
1833:     PetscCall(DMClone(ctx->plex[grid], &massDM));
1834:     PetscCall(DMCopyFields(ctx->plex[grid], massDM));
1835:     PetscCall(DMCreateDS(massDM));
1836:     PetscCall(DMGetDS(massDM, &prob));
1837:     for (int ix = 0, ii = ctx->species_offset[grid]; ii < ctx->species_offset[grid + 1]; ii++, ix++) PetscCall(PetscDSSetJacobian(prob, ix, ix, g0_fake, NULL, NULL, NULL));
1838:     PetscCall(PetscOptionsInsertString(NULL, "-dm_preallocate_only")); // this trick is need to both sparsify the matrix and avoid runtime error
1839:     PetscCall(DMCreateMatrix(massDM, &gMat));
1840:     PetscCall(PetscOptionsInsertString(NULL, "-dm_preallocate_only false"));
1841:     PetscCall(MatSetOption(gMat, MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE));
1842:     PetscCall(MatSetOption(gMat, MAT_IGNORE_ZERO_ENTRIES, PETSC_TRUE));
1843:     PetscCall(DMCreateLocalVector(ctx->plex[grid], &tvec));
1844:     PetscCall(DMPlexSNESComputeJacobianFEM(massDM, tvec, gMat, gMat, ctx));
1845:     PetscCall(MatViewFromOptions(gMat, NULL, "-dm_landau_reorder_mat_view"));
1846:     PetscCall(DMDestroy(&massDM));
1847:     PetscCall(VecDestroy(&tvec));
1848:     subM[grid] = gMat;
1849:     if (ctx->gpu_assembly && ctx->jacobian_field_major_order) {
1850:       MatOrderingType rtype = MATORDERINGRCM;
1851:       IS              isrow, isicol;
1852:       PetscCall(MatGetOrdering(gMat, rtype, &isrow, &isicol));
1853:       PetscCall(ISInvertPermutation(isrow, PETSC_DECIDE, &grid_batch_is_inv[grid]));
1854:       PetscCall(ISGetIndices(isrow, &values));
1855:       for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) { // add batch size DMs for this species grid
1856: #if !defined(LANDAU_SPECIES_MAJOR)
1857:         PetscInt N = ctx->mat_offset[ctx->num_grids], n0 = ctx->mat_offset[grid] + b_id * N;
1858:         for (int ii = 0; ii < n; ++ii) idxs[n0 + ii] = values[ii] + n0;
1859: #else
1860:         PetscInt n0 = ctx->mat_offset[grid] * ctx->batch_sz + b_id * n;
1861:         for (int ii = 0; ii < n; ++ii) idxs[n0 + ii] = values[ii] + n0;
1862: #endif
1863:       }
1864:       PetscCall(ISRestoreIndices(isrow, &values));
1865:       PetscCall(ISDestroy(&isrow));
1866:       PetscCall(ISDestroy(&isicol));
1867:     }
1868:   }
1869:   if (ctx->gpu_assembly && ctx->jacobian_field_major_order) PetscCall(ISCreateGeneral(comm, ctx->mat_offset[ctx->num_grids] * ctx->batch_sz, idxs, PETSC_OWN_POINTER, &ctx->batch_is));
1870:   // get a block matrix
1871:   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1872:     Mat      B = subM[grid];
1873:     PetscInt nloc, nzl, *colbuf, row, COL_BF_SIZE = 1024;
1874:     PetscCall(PetscMalloc(sizeof(*colbuf) * COL_BF_SIZE, &colbuf));
1875:     PetscCall(MatGetSize(B, &nloc, NULL));
1876:     for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) {
1877:       const PetscInt     moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset);
1878:       const PetscInt    *cols;
1879:       const PetscScalar *vals;
1880:       for (int i = 0; i < nloc; i++) {
1881:         PetscCall(MatGetRow(B, i, &nzl, NULL, NULL));
1882:         if (nzl > COL_BF_SIZE) {
1883:           PetscCall(PetscFree(colbuf));
1884:           PetscCall(PetscInfo(ctx->plex[grid], "Realloc buffer %" PetscInt_FMT " to %" PetscInt_FMT " (row size %" PetscInt_FMT ") \n", COL_BF_SIZE, 2 * COL_BF_SIZE, nzl));
1885:           COL_BF_SIZE = nzl;
1886:           PetscCall(PetscMalloc(sizeof(*colbuf) * COL_BF_SIZE, &colbuf));
1887:         }
1888:         PetscCall(MatGetRow(B, i, &nzl, &cols, &vals));
1889:         for (int j = 0; j < nzl; j++) colbuf[j] = cols[j] + moffset;
1890:         row = i + moffset;
1891:         PetscCall(MatSetValues(ctx->J, 1, &row, nzl, colbuf, vals, INSERT_VALUES));
1892:         PetscCall(MatRestoreRow(B, i, &nzl, &cols, &vals));
1893:       }
1894:     }
1895:     PetscCall(PetscFree(colbuf));
1896:   }
1897:   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) PetscCall(MatDestroy(&subM[grid]));
1898:   PetscCall(MatAssemblyBegin(ctx->J, MAT_FINAL_ASSEMBLY));
1899:   PetscCall(MatAssemblyEnd(ctx->J, MAT_FINAL_ASSEMBLY));

1901:   // debug
1902:   PetscCall(MatViewFromOptions(ctx->J, NULL, "-dm_landau_mat_view"));
1903:   if (ctx->gpu_assembly && ctx->jacobian_field_major_order) {
1904:     Mat mat_block_order;
1905:     PetscCall(MatCreateSubMatrix(ctx->J, ctx->batch_is, ctx->batch_is, MAT_INITIAL_MATRIX, &mat_block_order)); // use MatPermute
1906:     PetscCall(MatViewFromOptions(mat_block_order, NULL, "-dm_landau_mat_view"));
1907:     PetscCall(MatDestroy(&mat_block_order));
1908:     PetscCall(VecScatterCreate(X, ctx->batch_is, X, NULL, &ctx->plex_batch));
1909:     PetscCall(VecDuplicate(X, &ctx->work_vec));
1910:   }

1912:   PetscFunctionReturn(PETSC_SUCCESS);
1913: }

1915: PetscErrorCode DMPlexLandauCreateMassMatrix(DM pack, Mat *Amat);
1916: /*@C
1917:  DMPlexLandauCreateVelocitySpace - Create a DMPlex velocity space mesh

1919:  Collective

1921:  Input Parameters:
1922:  +   comm  - The MPI communicator
1923:  .   dim - velocity space dimension (2 for axisymmetric, 3 for full 3X + 3V solver)
1924:  -   prefix - prefix for options (not tested)

1926:  Output Parameter:
1927:  .   pack  - The DM object representing the mesh
1928:  +   X - A vector (user destroys)
1929:  -   J - Optional matrix (object destroys)

1931:  Level: beginner

1933:  .keywords: mesh
1934: .seealso: `DMPlexCreate()`, `DMPlexLandauDestroyVelocitySpace()`
1935:  @*/
1936: PetscErrorCode DMPlexLandauCreateVelocitySpace(MPI_Comm comm, PetscInt dim, const char prefix[], Vec *X, Mat *J, DM *pack)
1937: {
1938:   LandauCtx *ctx;
1939:   Vec        Xsub[LANDAU_MAX_GRIDS];
1940:   IS         grid_batch_is_inv[LANDAU_MAX_GRIDS];

1942:   PetscFunctionBegin;
1943:   PetscCheck(dim == 2 || dim == 3, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Only 2D and 3D supported");
1944:   PetscCheck(LANDAU_DIM == dim, PETSC_COMM_SELF, PETSC_ERR_PLIB, "dim %" PetscInt_FMT " != LANDAU_DIM %d", dim, LANDAU_DIM);
1945:   PetscCall(PetscNew(&ctx));
1946:   ctx->comm = comm; /* used for diagnostics and global errors */
1947:   /* process options */
1948:   PetscCall(ProcessOptions(ctx, prefix));
1949:   if (dim == 2) ctx->use_relativistic_corrections = PETSC_FALSE;
1950:   /* Create Mesh */
1951:   PetscCall(DMCompositeCreate(PETSC_COMM_SELF, pack));
1952:   PetscCall(PetscLogEventBegin(ctx->events[13], 0, 0, 0, 0));
1953:   PetscCall(PetscLogEventBegin(ctx->events[15], 0, 0, 0, 0));
1954:   PetscCall(LandauDMCreateVMeshes(PETSC_COMM_SELF, dim, prefix, ctx, *pack)); // creates grids (Forest of AMR)
1955:   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1956:     /* create FEM */
1957:     PetscCall(SetupDS(ctx->plex[grid], dim, grid, ctx));
1958:     /* set initial state */
1959:     PetscCall(DMCreateGlobalVector(ctx->plex[grid], &Xsub[grid]));
1960:     PetscCall(PetscObjectSetName((PetscObject)Xsub[grid], "u_orig"));
1961:     /* initial static refinement, no solve */
1962:     PetscCall(LandauSetInitialCondition(ctx->plex[grid], Xsub[grid], grid, 0, 1, ctx));
1963:     /* forest refinement - forest goes in (if forest), plex comes out */
1964:     if (ctx->use_p4est) {
1965:       DM plex;
1966:       PetscCall(adapt(grid, ctx, &Xsub[grid]));                                      // forest goes in, plex comes out
1967:       PetscCall(DMViewFromOptions(ctx->plex[grid], NULL, "-dm_landau_amr_dm_view")); // need to differentiate - todo
1968:       PetscCall(VecViewFromOptions(Xsub[grid], NULL, "-dm_landau_amr_vec_view"));
1969:       // convert to plex, all done with this level
1970:       PetscCall(DMConvert(ctx->plex[grid], DMPLEX, &plex));
1971:       PetscCall(DMDestroy(&ctx->plex[grid]));
1972:       ctx->plex[grid] = plex;
1973:     }
1974: #if !defined(LANDAU_SPECIES_MAJOR)
1975:     PetscCall(DMCompositeAddDM(*pack, ctx->plex[grid]));
1976: #else
1977:     for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) { // add batch size DMs for this species grid
1978:       PetscCall(DMCompositeAddDM(*pack, ctx->plex[grid]));
1979:     }
1980: #endif
1981:     PetscCall(DMSetApplicationContext(ctx->plex[grid], ctx));
1982:   }
1983: #if !defined(LANDAU_SPECIES_MAJOR)
1984:   // stack the batched DMs, could do it all here!!! b_id=0
1985:   for (PetscInt b_id = 1; b_id < ctx->batch_sz; b_id++) {
1986:     for (PetscInt grid = 0; grid < ctx->num_grids; grid++) PetscCall(DMCompositeAddDM(*pack, ctx->plex[grid]));
1987:   }
1988: #endif
1989:   // create ctx->mat_offset
1990:   ctx->mat_offset[0] = 0;
1991:   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1992:     PetscInt n;
1993:     PetscCall(VecGetLocalSize(Xsub[grid], &n));
1994:     ctx->mat_offset[grid + 1] = ctx->mat_offset[grid] + n;
1995:   }
1996:   // creat DM & Jac
1997:   PetscCall(DMSetApplicationContext(*pack, ctx));
1998:   PetscCall(PetscOptionsInsertString(NULL, "-dm_preallocate_only"));
1999:   PetscCall(DMCreateMatrix(*pack, &ctx->J));
2000:   PetscCall(PetscOptionsInsertString(NULL, "-dm_preallocate_only false"));
2001:   PetscCall(MatSetOption(ctx->J, MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE));
2002:   PetscCall(MatSetOption(ctx->J, MAT_IGNORE_ZERO_ENTRIES, PETSC_TRUE));
2003:   PetscCall(PetscObjectSetName((PetscObject)ctx->J, "Jac"));
2004:   // construct initial conditions in X
2005:   PetscCall(DMCreateGlobalVector(*pack, X));
2006:   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
2007:     PetscInt n;
2008:     PetscCall(VecGetLocalSize(Xsub[grid], &n));
2009:     for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) {
2010:       PetscScalar const *values;
2011:       const PetscInt     moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset);
2012:       PetscCall(LandauSetInitialCondition(ctx->plex[grid], Xsub[grid], grid, b_id, ctx->batch_sz, ctx));
2013:       PetscCall(VecGetArrayRead(Xsub[grid], &values)); // Drop whole grid in Plex ordering
2014:       for (int i = 0, idx = moffset; i < n; i++, idx++) PetscCall(VecSetValue(*X, idx, values[i], INSERT_VALUES));
2015:       PetscCall(VecRestoreArrayRead(Xsub[grid], &values));
2016:     }
2017:   }
2018:   // cleanup
2019:   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) PetscCall(VecDestroy(&Xsub[grid]));
2020:   /* check for correct matrix type */
2021:   if (ctx->gpu_assembly) { /* we need GPU object with GPU assembly */
2022:     PetscBool flg;
2023:     if (ctx->deviceType == LANDAU_CUDA) {
2024:       PetscCall(PetscObjectTypeCompareAny((PetscObject)ctx->J, &flg, MATSEQAIJCUSPARSE, MATMPIAIJCUSPARSE, MATAIJCUSPARSE, ""));
2025:       PetscCheck(flg, ctx->comm, PETSC_ERR_ARG_WRONG, "must use '-dm_mat_type aijcusparse -dm_vec_type cuda' for GPU assembly and Cuda or use '-dm_landau_device_type cpu'");
2026:     } else if (ctx->deviceType == LANDAU_KOKKOS) {
2027:       PetscCall(PetscObjectTypeCompareAny((PetscObject)ctx->J, &flg, MATSEQAIJKOKKOS, MATMPIAIJKOKKOS, MATAIJKOKKOS, ""));
2028: #if defined(PETSC_HAVE_KOKKOS_KERNELS)
2029:       PetscCheck(flg, ctx->comm, PETSC_ERR_ARG_WRONG, "must use '-dm_mat_type aijkokkos -dm_vec_type kokkos' for GPU assembly and Kokkos or use '-dm_landau_device_type cpu'");
2030: #else
2031:       PetscCheck(flg, ctx->comm, PETSC_ERR_ARG_WRONG, "must configure with '--download-kokkos-kernels' for GPU assembly and Kokkos or use '-dm_landau_device_type cpu'");
2032: #endif
2033:     }
2034:   }
2035:   PetscCall(PetscLogEventEnd(ctx->events[15], 0, 0, 0, 0));

2037:   // create field major ordering
2038:   ctx->work_vec   = NULL;
2039:   ctx->plex_batch = NULL;
2040:   ctx->batch_is   = NULL;
2041:   for (int i = 0; i < LANDAU_MAX_GRIDS; i++) grid_batch_is_inv[i] = NULL;
2042:   PetscCall(PetscLogEventBegin(ctx->events[12], 0, 0, 0, 0));
2043:   PetscCall(LandauCreateJacobianMatrix(comm, *X, grid_batch_is_inv, ctx));
2044:   PetscCall(PetscLogEventEnd(ctx->events[12], 0, 0, 0, 0));

2046:   // create AMR GPU assembly maps and static GPU data
2047:   PetscCall(CreateStaticData(dim, grid_batch_is_inv, ctx));

2049:   PetscCall(PetscLogEventEnd(ctx->events[13], 0, 0, 0, 0));

2051:   // create mass matrix
2052:   PetscCall(DMPlexLandauCreateMassMatrix(*pack, NULL));

2054:   if (J) *J = ctx->J;

2056:   if (ctx->gpu_assembly && ctx->jacobian_field_major_order) {
2057:     PetscContainer container;
2058:     // cache ctx for KSP with batch/field major Jacobian ordering -ksp_type gmres/etc -dm_landau_jacobian_field_major_order
2059:     PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container));
2060:     PetscCall(PetscContainerSetPointer(container, (void *)ctx));
2061:     PetscCall(PetscObjectCompose((PetscObject)ctx->J, "LandauCtx", (PetscObject)container));
2062:     PetscCall(PetscContainerDestroy(&container));
2063:     // batch solvers need to map -- can batch solvers work
2064:     PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container));
2065:     PetscCall(PetscContainerSetPointer(container, (void *)ctx->plex_batch));
2066:     PetscCall(PetscObjectCompose((PetscObject)ctx->J, "plex_batch_is", (PetscObject)container));
2067:     PetscCall(PetscContainerDestroy(&container));
2068:   }
2069:   // for batch solvers
2070:   {
2071:     PetscContainer container;
2072:     PetscInt      *pNf;
2073:     PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container));
2074:     PetscCall(PetscMalloc1(sizeof(*pNf), &pNf));
2075:     *pNf = ctx->batch_sz;
2076:     PetscCall(PetscContainerSetPointer(container, (void *)pNf));
2077:     PetscCall(PetscContainerSetUserDestroy(container, MatrixNfDestroy));
2078:     PetscCall(PetscObjectCompose((PetscObject)ctx->J, "batch size", (PetscObject)container));
2079:     PetscCall(PetscContainerDestroy(&container));
2080:   }

2082:   PetscFunctionReturn(PETSC_SUCCESS);
2083: }

2085: /*@
2086:  DMPlexLandauAccess - Access to the distribution function with user callback

2088:  Collective

2090:  Input Parameters:
2091:  .   pack - the DMComposite
2092:  +   func - call back function
2093:  .   user_ctx - user context

2095:  Input/Output Parameter:
2096:  .   X - Vector to data to

2098:  Level: advanced

2100:  .keywords: mesh
2101: .seealso: `DMPlexLandauCreateVelocitySpace()`
2102:  @*/
2103: PetscErrorCode DMPlexLandauAccess(DM pack, Vec X, PetscErrorCode (*func)(DM, Vec, PetscInt, PetscInt, PetscInt, void *), void *user_ctx)
2104: {
2105:   LandauCtx *ctx;
2106:   PetscFunctionBegin;
2107:   PetscCall(DMGetApplicationContext(pack, &ctx)); // uses ctx->num_grids; ctx->plex[grid]; ctx->batch_sz; ctx->mat_offset
2108:   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
2109:     PetscInt dim, n;
2110:     PetscCall(DMGetDimension(pack, &dim));
2111:     for (PetscInt sp = ctx->species_offset[grid], i0 = 0; sp < ctx->species_offset[grid + 1]; sp++, i0++) {
2112:       Vec      vec;
2113:       PetscInt vf[1] = {i0};
2114:       IS       vis;
2115:       DM       vdm;
2116:       PetscCall(DMCreateSubDM(ctx->plex[grid], 1, vf, &vis, &vdm));
2117:       PetscCall(DMSetApplicationContext(vdm, ctx)); // the user might want this
2118:       PetscCall(DMCreateGlobalVector(vdm, &vec));
2119:       PetscCall(VecGetSize(vec, &n));
2120:       for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) {
2121:         const PetscInt moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset);
2122:         PetscCall(VecZeroEntries(vec));
2123:         /* Add your data with 'dm' for species 'sp' to 'vec' */
2124:         PetscCall(func(vdm, vec, i0, grid, b_id, user_ctx));
2125:         /* add to global */
2126:         PetscScalar const *values;
2127:         const PetscInt    *offsets;
2128:         PetscCall(VecGetArrayRead(vec, &values));
2129:         PetscCall(ISGetIndices(vis, &offsets));
2130:         for (int i = 0; i < n; i++) PetscCall(VecSetValue(X, moffset + offsets[i], values[i], ADD_VALUES));
2131:         PetscCall(VecRestoreArrayRead(vec, &values));
2132:         PetscCall(ISRestoreIndices(vis, &offsets));
2133:       } // batch
2134:       PetscCall(VecDestroy(&vec));
2135:       PetscCall(ISDestroy(&vis));
2136:       PetscCall(DMDestroy(&vdm));
2137:     }
2138:   } // grid
2139:   PetscFunctionReturn(PETSC_SUCCESS);
2140: }

2142: /*@
2143:  DMPlexLandauDestroyVelocitySpace - Destroy a DMPlex velocity space mesh

2145:  Collective

2147:  Input/Output Parameters:
2148:  .   dm - the dm to destroy

2150:  Level: beginner

2152:  .keywords: mesh
2153: .seealso: `DMPlexLandauCreateVelocitySpace()`
2154:  @*/
2155: PetscErrorCode DMPlexLandauDestroyVelocitySpace(DM *dm)
2156: {
2157:   LandauCtx *ctx;
2158:   PetscFunctionBegin;
2159:   PetscCall(DMGetApplicationContext(*dm, &ctx));
2160:   PetscCall(MatDestroy(&ctx->M));
2161:   PetscCall(MatDestroy(&ctx->J));
2162:   for (PetscInt ii = 0; ii < ctx->num_species; ii++) PetscCall(PetscFEDestroy(&ctx->fe[ii]));
2163:   PetscCall(ISDestroy(&ctx->batch_is));
2164:   PetscCall(VecDestroy(&ctx->work_vec));
2165:   PetscCall(VecScatterDestroy(&ctx->plex_batch));
2166:   if (ctx->deviceType == LANDAU_CUDA) {
2167: #if defined(PETSC_HAVE_CUDA)
2168:     PetscCall(LandauCUDAStaticDataClear(&ctx->SData_d));
2169: #else
2170:     SETERRQ(ctx->comm, PETSC_ERR_ARG_WRONG, "-landau_device_type %s not built", "cuda");
2171: #endif
2172:   } else if (ctx->deviceType == LANDAU_KOKKOS) {
2173: #if defined(PETSC_HAVE_KOKKOS_KERNELS)
2174:     PetscCall(LandauKokkosStaticDataClear(&ctx->SData_d));
2175: #else
2176:     SETERRQ(ctx->comm, PETSC_ERR_ARG_WRONG, "-landau_device_type %s not built", "kokkos");
2177: #endif
2178:   } else {
2179:     if (ctx->SData_d.x) { /* in a CPU run */
2180:       PetscReal *invJ = (PetscReal *)ctx->SData_d.invJ, *xx = (PetscReal *)ctx->SData_d.x, *yy = (PetscReal *)ctx->SData_d.y, *zz = (PetscReal *)ctx->SData_d.z, *ww = (PetscReal *)ctx->SData_d.w;
2181:       LandauIdx *coo_elem_offsets = (LandauIdx *)ctx->SData_d.coo_elem_offsets, *coo_elem_fullNb = (LandauIdx *)ctx->SData_d.coo_elem_fullNb, (*coo_elem_point_offsets)[LANDAU_MAX_NQ + 1] = (LandauIdx(*)[LANDAU_MAX_NQ + 1]) ctx->SData_d.coo_elem_point_offsets;
2182:       PetscCall(PetscFree4(ww, xx, yy, invJ));
2183:       if (zz) PetscCall(PetscFree(zz));
2184:       if (coo_elem_offsets) {
2185:         PetscCall(PetscFree3(coo_elem_offsets, coo_elem_fullNb, coo_elem_point_offsets)); // could be NULL
2186:       }
2187:       PetscCall(PetscFree4(ctx->SData_d.alpha, ctx->SData_d.beta, ctx->SData_d.invMass, ctx->SData_d.lambdas));
2188:     }
2189:   }

2191:   if (ctx->times[LANDAU_MATRIX_TOTAL] > 0) { // OMP timings
2192:     PetscCall(PetscPrintf(ctx->comm, "TSStep               N  1.0 %10.3e\n", ctx->times[LANDAU_EX2_TSSOLVE]));
2193:     PetscCall(PetscPrintf(ctx->comm, "2:           Solve:  %10.3e with %" PetscInt_FMT " threads\n", ctx->times[LANDAU_EX2_TSSOLVE] - ctx->times[LANDAU_MATRIX_TOTAL], ctx->batch_sz));
2194:     PetscCall(PetscPrintf(ctx->comm, "3:          Landau:  %10.3e\n", ctx->times[LANDAU_MATRIX_TOTAL]));
2195:     PetscCall(PetscPrintf(ctx->comm, "Landau Jacobian       %" PetscInt_FMT " 1.0 %10.3e\n", (PetscInt)ctx->times[LANDAU_JACOBIAN_COUNT], ctx->times[LANDAU_JACOBIAN]));
2196:     PetscCall(PetscPrintf(ctx->comm, "Landau Operator       N 1.0  %10.3e\n", ctx->times[LANDAU_OPERATOR]));
2197:     PetscCall(PetscPrintf(ctx->comm, "Landau Mass           N 1.0  %10.3e\n", ctx->times[LANDAU_MASS]));
2198:     PetscCall(PetscPrintf(ctx->comm, " Jac-f-df (GPU)       N 1.0  %10.3e\n", ctx->times[LANDAU_F_DF]));
2199:     PetscCall(PetscPrintf(ctx->comm, " Kernel (GPU)         N 1.0  %10.3e\n", ctx->times[LANDAU_KERNEL]));
2200:     PetscCall(PetscPrintf(ctx->comm, "MatLUFactorNum        X 1.0 %10.3e\n", ctx->times[KSP_FACTOR]));
2201:     PetscCall(PetscPrintf(ctx->comm, "MatSolve              X 1.0 %10.3e\n", ctx->times[KSP_SOLVE]));
2202:   }
2203:   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) PetscCall(DMDestroy(&ctx->plex[grid]));
2204:   PetscCall(PetscFree(ctx));
2205:   PetscCall(DMDestroy(dm));
2206:   PetscFunctionReturn(PETSC_SUCCESS);
2207: }

2209: /* < v, ru > */
2210: static void f0_s_den(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2211: {
2212:   PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
2213:   f0[0]       = u[ii];
2214: }

2216: /* < v, ru > */
2217: static void f0_s_mom(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2218: {
2219:   PetscInt ii = (PetscInt)PetscRealPart(constants[0]), jj = (PetscInt)PetscRealPart(constants[1]);
2220:   f0[0] = x[jj] * u[ii]; /* x momentum */
2221: }

2223: static void f0_s_v2(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2224: {
2225:   PetscInt i, ii = (PetscInt)PetscRealPart(constants[0]);
2226:   double   tmp1 = 0.;
2227:   for (i = 0; i < dim; ++i) tmp1 += x[i] * x[i];
2228:   f0[0] = tmp1 * u[ii];
2229: }

2231: static PetscErrorCode gamma_n_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void *actx)
2232: {
2233:   const PetscReal *c2_0_arr = ((PetscReal *)actx);
2234:   const PetscReal  c02      = c2_0_arr[0];

2236:   PetscFunctionBegin;
2237:   for (int s = 0; s < Nf; s++) {
2238:     PetscReal tmp1 = 0.;
2239:     for (int i = 0; i < dim; ++i) tmp1 += x[i] * x[i];
2240: #if defined(PETSC_USE_DEBUG)
2241:     u[s] = PetscSqrtReal(1. + tmp1 / c02); //  u[0] = PetscSqrtReal(1. + xx);
2242: #else
2243:     {
2244:       PetscReal xx = tmp1 / c02;
2245:       u[s]         = xx / (PetscSqrtReal(1. + xx) + 1.); // better conditioned = xx/(PetscSqrtReal(1. + xx) + 1.)
2246:     }
2247: #endif
2248:   }
2249:   PetscFunctionReturn(PETSC_SUCCESS);
2250: }

2252: /* < v, ru > */
2253: static void f0_s_rden(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2254: {
2255:   PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
2256:   f0[0]       = 2. * PETSC_PI * x[0] * u[ii];
2257: }

2259: /* < v, ru > */
2260: static void f0_s_rmom(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2261: {
2262:   PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
2263:   f0[0]       = 2. * PETSC_PI * x[0] * x[1] * u[ii];
2264: }

2266: static void f0_s_rv2(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2267: {
2268:   PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
2269:   f0[0]       = 2. * PETSC_PI * x[0] * (x[0] * x[0] + x[1] * x[1]) * u[ii];
2270: }

2272: /*@
2273:  DMPlexLandauPrintNorms - collects moments and prints them

2275:  Collective

2277:  Input Parameters:
2278:  +   X  - the state
2279:  -   stepi - current step to print

2281:  Level: beginner

2283:  .keywords: mesh
2284: .seealso: `DMPlexLandauCreateVelocitySpace()`
2285:  @*/
2286: PetscErrorCode DMPlexLandauPrintNorms(Vec X, PetscInt stepi)
2287: {
2288:   LandauCtx  *ctx;
2289:   PetscDS     prob;
2290:   DM          pack;
2291:   PetscInt    cStart, cEnd, dim, ii, i0, nDMs;
2292:   PetscScalar xmomentumtot = 0, ymomentumtot = 0, zmomentumtot = 0, energytot = 0, densitytot = 0, tt[LANDAU_MAX_SPECIES];
2293:   PetscScalar xmomentum[LANDAU_MAX_SPECIES], ymomentum[LANDAU_MAX_SPECIES], zmomentum[LANDAU_MAX_SPECIES], energy[LANDAU_MAX_SPECIES], density[LANDAU_MAX_SPECIES];
2294:   Vec        *globXArray;

2296:   PetscFunctionBegin;
2297:   PetscCall(VecGetDM(X, &pack));
2298:   PetscCheck(pack, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Vector has no DM");
2299:   PetscCall(DMGetDimension(pack, &dim));
2300:   PetscCheck(dim == 2 || dim == 3, PETSC_COMM_SELF, PETSC_ERR_PLIB, "dim %" PetscInt_FMT " not in [2,3]", dim);
2301:   PetscCall(DMGetApplicationContext(pack, &ctx));
2302:   PetscCheck(ctx, PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
2303:   /* print momentum and energy */
2304:   PetscCall(DMCompositeGetNumberDM(pack, &nDMs));
2305:   PetscCheck(nDMs == ctx->num_grids * ctx->batch_sz, PETSC_COMM_WORLD, PETSC_ERR_PLIB, "#DM wrong %" PetscInt_FMT " %" PetscInt_FMT, nDMs, ctx->num_grids * ctx->batch_sz);
2306:   PetscCall(PetscMalloc(sizeof(*globXArray) * nDMs, &globXArray));
2307:   PetscCall(DMCompositeGetAccessArray(pack, X, nDMs, NULL, globXArray));
2308:   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
2309:     Vec Xloc = globXArray[LAND_PACK_IDX(ctx->batch_view_idx, grid)];
2310:     PetscCall(DMGetDS(ctx->plex[grid], &prob));
2311:     for (ii = ctx->species_offset[grid], i0 = 0; ii < ctx->species_offset[grid + 1]; ii++, i0++) {
2312:       PetscScalar user[2] = {(PetscScalar)i0, (PetscScalar)ctx->charges[ii]};
2313:       PetscCall(PetscDSSetConstants(prob, 2, user));
2314:       if (dim == 2) { /* 2/3X + 3V (cylindrical coordinates) */
2315:         PetscCall(PetscDSSetObjective(prob, 0, &f0_s_rden));
2316:         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx));
2317:         density[ii] = tt[0] * ctx->n_0 * ctx->charges[ii];
2318:         PetscCall(PetscDSSetObjective(prob, 0, &f0_s_rmom));
2319:         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx));
2320:         zmomentum[ii] = tt[0] * ctx->n_0 * ctx->v_0 * ctx->masses[ii];
2321:         PetscCall(PetscDSSetObjective(prob, 0, &f0_s_rv2));
2322:         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx));
2323:         energy[ii] = tt[0] * 0.5 * ctx->n_0 * ctx->v_0 * ctx->v_0 * ctx->masses[ii];
2324:         zmomentumtot += zmomentum[ii];
2325:         energytot += energy[ii];
2326:         densitytot += density[ii];
2327:         PetscCall(PetscPrintf(PETSC_COMM_WORLD, "%3" PetscInt_FMT ") species-%" PetscInt_FMT ": charge density= %20.13e z-momentum= %20.13e energy= %20.13e", stepi, ii, (double)PetscRealPart(density[ii]), (double)PetscRealPart(zmomentum[ii]), (double)PetscRealPart(energy[ii])));
2328:       } else { /* 2/3Xloc + 3V */
2329:         PetscCall(PetscDSSetObjective(prob, 0, &f0_s_den));
2330:         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx));
2331:         density[ii] = tt[0] * ctx->n_0 * ctx->charges[ii];
2332:         PetscCall(PetscDSSetObjective(prob, 0, &f0_s_mom));
2333:         user[1] = 0;
2334:         PetscCall(PetscDSSetConstants(prob, 2, user));
2335:         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx));
2336:         xmomentum[ii] = tt[0] * ctx->n_0 * ctx->v_0 * ctx->masses[ii];
2337:         user[1]       = 1;
2338:         PetscCall(PetscDSSetConstants(prob, 2, user));
2339:         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx));
2340:         ymomentum[ii] = tt[0] * ctx->n_0 * ctx->v_0 * ctx->masses[ii];
2341:         user[1]       = 2;
2342:         PetscCall(PetscDSSetConstants(prob, 2, user));
2343:         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx));
2344:         zmomentum[ii] = tt[0] * ctx->n_0 * ctx->v_0 * ctx->masses[ii];
2345:         if (ctx->use_relativistic_corrections) {
2346:           /* gamma * M * f */
2347:           if (ii == 0 && grid == 0) { // do all at once
2348:             Vec Mf, globGamma, *globMfArray, *globGammaArray;
2349:             PetscErrorCode (*gammaf[1])(PetscInt, PetscReal, const PetscReal[], PetscInt, PetscScalar[], void *) = {gamma_n_f};
2350:             PetscReal *c2_0[1], data[1];

2352:             PetscCall(VecDuplicate(X, &globGamma));
2353:             PetscCall(VecDuplicate(X, &Mf));
2354:             PetscCall(PetscMalloc(sizeof(*globMfArray) * nDMs, &globMfArray));
2355:             PetscCall(PetscMalloc(sizeof(*globMfArray) * nDMs, &globGammaArray));
2356:             /* M * f */
2357:             PetscCall(MatMult(ctx->M, X, Mf));
2358:             /* gamma */
2359:             PetscCall(DMCompositeGetAccessArray(pack, globGamma, nDMs, NULL, globGammaArray));
2360:             for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // yes a grid loop in a grid loop to print nice, need to fix for batching
2361:               Vec v1  = globGammaArray[LAND_PACK_IDX(ctx->batch_view_idx, grid)];
2362:               data[0] = PetscSqr(C_0(ctx->v_0));
2363:               c2_0[0] = &data[0];
2364:               PetscCall(DMProjectFunction(ctx->plex[grid], 0., gammaf, (void **)c2_0, INSERT_ALL_VALUES, v1));
2365:             }
2366:             PetscCall(DMCompositeRestoreAccessArray(pack, globGamma, nDMs, NULL, globGammaArray));
2367:             /* gamma * Mf */
2368:             PetscCall(DMCompositeGetAccessArray(pack, globGamma, nDMs, NULL, globGammaArray));
2369:             PetscCall(DMCompositeGetAccessArray(pack, Mf, nDMs, NULL, globMfArray));
2370:             for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // yes a grid loop in a grid loop to print nice
2371:               PetscInt Nf    = ctx->species_offset[grid + 1] - ctx->species_offset[grid], N, bs;
2372:               Vec      Mfsub = globMfArray[LAND_PACK_IDX(ctx->batch_view_idx, grid)], Gsub = globGammaArray[LAND_PACK_IDX(ctx->batch_view_idx, grid)], v1, v2;
2373:               // get each component
2374:               PetscCall(VecGetSize(Mfsub, &N));
2375:               PetscCall(VecCreate(ctx->comm, &v1));
2376:               PetscCall(VecSetSizes(v1, PETSC_DECIDE, N / Nf));
2377:               PetscCall(VecCreate(ctx->comm, &v2));
2378:               PetscCall(VecSetSizes(v2, PETSC_DECIDE, N / Nf));
2379:               PetscCall(VecSetFromOptions(v1)); // ???
2380:               PetscCall(VecSetFromOptions(v2));
2381:               // get each component
2382:               PetscCall(VecGetBlockSize(Gsub, &bs));
2383:               PetscCheck(bs == Nf, PETSC_COMM_SELF, PETSC_ERR_PLIB, "bs %" PetscInt_FMT " != num_species %" PetscInt_FMT " in Gsub", bs, Nf);
2384:               PetscCall(VecGetBlockSize(Mfsub, &bs));
2385:               PetscCheck(bs == Nf, PETSC_COMM_SELF, PETSC_ERR_PLIB, "bs %" PetscInt_FMT " != num_species %" PetscInt_FMT, bs, Nf);
2386:               for (int i = 0, ix = ctx->species_offset[grid]; i < Nf; i++, ix++) {
2387:                 PetscScalar val;
2388:                 PetscCall(VecStrideGather(Gsub, i, v1, INSERT_VALUES)); // this is not right -- TODO
2389:                 PetscCall(VecStrideGather(Mfsub, i, v2, INSERT_VALUES));
2390:                 PetscCall(VecDot(v1, v2, &val));
2391:                 energy[ix] = PetscRealPart(val) * ctx->n_0 * ctx->v_0 * ctx->v_0 * ctx->masses[ix];
2392:               }
2393:               PetscCall(VecDestroy(&v1));
2394:               PetscCall(VecDestroy(&v2));
2395:             } /* grids */
2396:             PetscCall(DMCompositeRestoreAccessArray(pack, globGamma, nDMs, NULL, globGammaArray));
2397:             PetscCall(DMCompositeRestoreAccessArray(pack, Mf, nDMs, NULL, globMfArray));
2398:             PetscCall(PetscFree(globGammaArray));
2399:             PetscCall(PetscFree(globMfArray));
2400:             PetscCall(VecDestroy(&globGamma));
2401:             PetscCall(VecDestroy(&Mf));
2402:           }
2403:         } else {
2404:           PetscCall(PetscDSSetObjective(prob, 0, &f0_s_v2));
2405:           PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx));
2406:           energy[ii] = 0.5 * tt[0] * ctx->n_0 * ctx->v_0 * ctx->v_0 * ctx->masses[ii];
2407:         }
2408:         PetscCall(PetscPrintf(PETSC_COMM_WORLD, "%3" PetscInt_FMT ") species %" PetscInt_FMT ": density=%20.13e, x-momentum=%20.13e, y-momentum=%20.13e, z-momentum=%20.13e, energy=%21.13e", stepi, ii, (double)PetscRealPart(density[ii]), (double)PetscRealPart(xmomentum[ii]), (double)PetscRealPart(ymomentum[ii]), (double)PetscRealPart(zmomentum[ii]), (double)PetscRealPart(energy[ii])));
2409:         xmomentumtot += xmomentum[ii];
2410:         ymomentumtot += ymomentum[ii];
2411:         zmomentumtot += zmomentum[ii];
2412:         energytot += energy[ii];
2413:         densitytot += density[ii];
2414:       }
2415:       if (ctx->num_species > 1) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\n"));
2416:     }
2417:   }
2418:   PetscCall(DMCompositeRestoreAccessArray(pack, X, nDMs, NULL, globXArray));
2419:   PetscCall(PetscFree(globXArray));
2420:   /* totals */
2421:   PetscCall(DMPlexGetHeightStratum(ctx->plex[0], 0, &cStart, &cEnd));
2422:   if (ctx->num_species > 1) {
2423:     if (dim == 2) {
2424:       PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\t%3" PetscInt_FMT ") Total: charge density=%21.13e, momentum=%21.13e, energy=%21.13e (m_i[0]/m_e = %g, %" PetscInt_FMT " cells on electron grid)", stepi, (double)PetscRealPart(densitytot), (double)PetscRealPart(zmomentumtot), (double)PetscRealPart(energytot),
2425:                             (double)(ctx->masses[1] / ctx->masses[0]), cEnd - cStart));
2426:     } else {
2427:       PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\t%3" PetscInt_FMT ") Total: charge density=%21.13e, x-momentum=%21.13e, y-momentum=%21.13e, z-momentum=%21.13e, energy=%21.13e (m_i[0]/m_e = %g, %" PetscInt_FMT " cells)", stepi, (double)PetscRealPart(densitytot), (double)PetscRealPart(xmomentumtot), (double)PetscRealPart(ymomentumtot), (double)PetscRealPart(zmomentumtot), (double)PetscRealPart(energytot),
2428:                             (double)(ctx->masses[1] / ctx->masses[0]), cEnd - cStart));
2429:     }
2430:   } else PetscCall(PetscPrintf(PETSC_COMM_WORLD, " -- %" PetscInt_FMT " cells", cEnd - cStart));
2431:   PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\n"));
2432:   PetscFunctionReturn(PETSC_SUCCESS);
2433: }

2435: /*@
2436:  DMPlexLandauCreateMassMatrix - Create mass matrix for Landau in Plex space (not field major order of Jacobian)
2437:   - puts mass matrix into ctx->M

2439:  Collective

2441:  Input Parameter:
2442: . pack     - the DM object. Puts matrix in Landau context M field

2444:  Output Parameter:
2445: . Amat - The mass matrix (optional), mass matrix is added to the DM context

2447:  Level: beginner

2449:  .keywords: mesh
2450: .seealso: `DMPlexLandauCreateVelocitySpace()`
2451:  @*/
2452: PetscErrorCode DMPlexLandauCreateMassMatrix(DM pack, Mat *Amat)
2453: {
2454:   DM         mass_pack, massDM[LANDAU_MAX_GRIDS];
2455:   PetscDS    prob;
2456:   PetscInt   ii, dim, N1 = 1, N2;
2457:   LandauCtx *ctx;
2458:   Mat        packM, subM[LANDAU_MAX_GRIDS];

2460:   PetscFunctionBegin;
2463:   PetscCall(DMGetApplicationContext(pack, &ctx));
2464:   PetscCheck(ctx, PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
2465:   PetscCall(PetscLogEventBegin(ctx->events[14], 0, 0, 0, 0));
2466:   PetscCall(DMGetDimension(pack, &dim));
2467:   PetscCall(DMCompositeCreate(PetscObjectComm((PetscObject)pack), &mass_pack));
2468:   /* create pack mass matrix */
2469:   for (PetscInt grid = 0, ix = 0; grid < ctx->num_grids; grid++) {
2470:     PetscCall(DMClone(ctx->plex[grid], &massDM[grid]));
2471:     PetscCall(DMCopyFields(ctx->plex[grid], massDM[grid]));
2472:     PetscCall(DMCreateDS(massDM[grid]));
2473:     PetscCall(DMGetDS(massDM[grid], &prob));
2474:     for (ix = 0, ii = ctx->species_offset[grid]; ii < ctx->species_offset[grid + 1]; ii++, ix++) {
2475:       if (dim == 3) PetscCall(PetscDSSetJacobian(prob, ix, ix, g0_1, NULL, NULL, NULL));
2476:       else PetscCall(PetscDSSetJacobian(prob, ix, ix, g0_r, NULL, NULL, NULL));
2477:     }
2478: #if !defined(LANDAU_SPECIES_MAJOR)
2479:     PetscCall(DMCompositeAddDM(mass_pack, massDM[grid]));
2480: #else
2481:     for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) { // add batch size DMs for this species grid
2482:       PetscCall(DMCompositeAddDM(mass_pack, massDM[grid]));
2483:     }
2484: #endif
2485:     PetscCall(DMCreateMatrix(massDM[grid], &subM[grid]));
2486:   }
2487: #if !defined(LANDAU_SPECIES_MAJOR)
2488:   // stack the batched DMs
2489:   for (PetscInt b_id = 1; b_id < ctx->batch_sz; b_id++) {
2490:     for (PetscInt grid = 0; grid < ctx->num_grids; grid++) PetscCall(DMCompositeAddDM(mass_pack, massDM[grid]));
2491:   }
2492: #endif
2493:   PetscCall(PetscOptionsInsertString(NULL, "-dm_preallocate_only"));
2494:   PetscCall(DMCreateMatrix(mass_pack, &packM));
2495:   PetscCall(PetscOptionsInsertString(NULL, "-dm_preallocate_only false"));
2496:   PetscCall(MatSetOption(packM, MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE));
2497:   PetscCall(MatSetOption(packM, MAT_IGNORE_ZERO_ENTRIES, PETSC_TRUE));
2498:   PetscCall(DMDestroy(&mass_pack));
2499:   /* make mass matrix for each block */
2500:   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
2501:     Vec locX;
2502:     DM  plex = massDM[grid];
2503:     PetscCall(DMGetLocalVector(plex, &locX));
2504:     /* Mass matrix is independent of the input, so no need to fill locX */
2505:     PetscCall(DMPlexSNESComputeJacobianFEM(plex, locX, subM[grid], subM[grid], ctx));
2506:     PetscCall(DMRestoreLocalVector(plex, &locX));
2507:     PetscCall(DMDestroy(&massDM[grid]));
2508:   }
2509:   PetscCall(MatGetSize(ctx->J, &N1, NULL));
2510:   PetscCall(MatGetSize(packM, &N2, NULL));
2511:   PetscCheck(N1 == N2, PetscObjectComm((PetscObject)pack), PETSC_ERR_PLIB, "Incorrect matrix sizes: |Jacobian| = %" PetscInt_FMT ", |Mass|=%" PetscInt_FMT, N1, N2);
2512:   /* assemble block diagonals */
2513:   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
2514:     Mat      B = subM[grid];
2515:     PetscInt nloc, nzl, *colbuf, COL_BF_SIZE = 1024, row;
2516:     PetscCall(PetscMalloc(sizeof(*colbuf) * COL_BF_SIZE, &colbuf));
2517:     PetscCall(MatGetSize(B, &nloc, NULL));
2518:     for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) {
2519:       const PetscInt     moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset);
2520:       const PetscInt    *cols;
2521:       const PetscScalar *vals;
2522:       for (int i = 0; i < nloc; i++) {
2523:         PetscCall(MatGetRow(B, i, &nzl, NULL, NULL));
2524:         if (nzl > COL_BF_SIZE) {
2525:           PetscCall(PetscFree(colbuf));
2526:           PetscCall(PetscInfo(pack, "Realloc buffer %" PetscInt_FMT " to %" PetscInt_FMT " (row size %" PetscInt_FMT ") \n", COL_BF_SIZE, 2 * COL_BF_SIZE, nzl));
2527:           COL_BF_SIZE = nzl;
2528:           PetscCall(PetscMalloc(sizeof(*colbuf) * COL_BF_SIZE, &colbuf));
2529:         }
2530:         PetscCall(MatGetRow(B, i, &nzl, &cols, &vals));
2531:         for (int j = 0; j < nzl; j++) colbuf[j] = cols[j] + moffset;
2532:         row = i + moffset;
2533:         PetscCall(MatSetValues(packM, 1, &row, nzl, colbuf, vals, INSERT_VALUES));
2534:         PetscCall(MatRestoreRow(B, i, &nzl, &cols, &vals));
2535:       }
2536:     }
2537:     PetscCall(PetscFree(colbuf));
2538:   }
2539:   // cleanup
2540:   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) PetscCall(MatDestroy(&subM[grid]));
2541:   PetscCall(MatAssemblyBegin(packM, MAT_FINAL_ASSEMBLY));
2542:   PetscCall(MatAssemblyEnd(packM, MAT_FINAL_ASSEMBLY));
2543:   PetscCall(PetscObjectSetName((PetscObject)packM, "mass"));
2544:   PetscCall(MatViewFromOptions(packM, NULL, "-dm_landau_mass_view"));
2545:   ctx->M = packM;
2546:   if (Amat) *Amat = packM;
2547:   PetscCall(PetscLogEventEnd(ctx->events[14], 0, 0, 0, 0));
2548:   PetscFunctionReturn(PETSC_SUCCESS);
2549: }

2551: /*@
2552:  DMPlexLandauIFunction - TS residual calculation, confusingly this computes the Jacobian w/o mass

2554:  Collective

2556:  Input Parameters:
2557: +   TS  - The time stepping context
2558: .   time_dummy - current time (not used)
2559: .   X - Current state
2560: .   X_t - Time derivative of current state
2561: -   actx - Landau context

2563:  Output Parameter:
2564: .   F  - The residual

2566:  Level: beginner

2568:  .keywords: mesh
2569: .seealso: `DMPlexLandauCreateVelocitySpace()`, `DMPlexLandauIJacobian()`
2570:  @*/
2571: PetscErrorCode DMPlexLandauIFunction(TS ts, PetscReal time_dummy, Vec X, Vec X_t, Vec F, void *actx)
2572: {
2573:   LandauCtx *ctx = (LandauCtx *)actx;
2574:   PetscInt   dim;
2575:   DM         pack;
2576: #if defined(PETSC_HAVE_THREADSAFETY)
2577:   double starttime, endtime;
2578: #endif
2579:   PetscObjectState state;

2581:   PetscFunctionBegin;
2582:   PetscCall(TSGetDM(ts, &pack));
2583:   PetscCall(DMGetApplicationContext(pack, &ctx));
2584:   PetscCheck(ctx, PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
2585:   if (ctx->stage) PetscCall(PetscLogStagePush(ctx->stage));
2586:   PetscCall(PetscLogEventBegin(ctx->events[11], 0, 0, 0, 0));
2587:   PetscCall(PetscLogEventBegin(ctx->events[0], 0, 0, 0, 0));
2588: #if defined(PETSC_HAVE_THREADSAFETY)
2589:   starttime = MPI_Wtime();
2590: #endif
2591:   PetscCall(DMGetDimension(pack, &dim));
2592:   PetscCall(PetscObjectStateGet((PetscObject)ctx->J, &state));
2593:   if (state != ctx->norm_state) {
2594:     PetscCall(PetscInfo(ts, "Create Landau Jacobian t=%g J.state %" PetscInt64_FMT " --> %" PetscInt64_FMT "\n", (double)time_dummy, ctx->norm_state, state));
2595:     PetscCall(MatZeroEntries(ctx->J));
2596:     PetscCall(LandauFormJacobian_Internal(X, ctx->J, dim, 0.0, (void *)ctx));
2597:     PetscCall(MatViewFromOptions(ctx->J, NULL, "-dm_landau_jacobian_view"));
2598:     PetscCall(PetscObjectStateGet((PetscObject)ctx->J, &state));
2599:     ctx->norm_state = state;
2600:   } else {
2601:     PetscCall(PetscInfo(ts, "WARNING Skip forming Jacobian, has not changed %" PetscInt64_FMT "\n", state));
2602:   }
2603:   /* mat vec for op */
2604:   PetscCall(MatMult(ctx->J, X, F)); /* C*f */
2605:   /* add time term */
2606:   if (X_t) PetscCall(MatMultAdd(ctx->M, X_t, F, F));
2607: #if defined(PETSC_HAVE_THREADSAFETY)
2608:   if (ctx->stage) {
2609:     endtime = MPI_Wtime();
2610:     ctx->times[LANDAU_OPERATOR] += (endtime - starttime);
2611:     ctx->times[LANDAU_JACOBIAN] += (endtime - starttime);
2612:     ctx->times[LANDAU_MATRIX_TOTAL] += (endtime - starttime);
2613:     ctx->times[LANDAU_JACOBIAN_COUNT] += 1;
2614:   }
2615: #endif
2616:   PetscCall(PetscLogEventEnd(ctx->events[0], 0, 0, 0, 0));
2617:   PetscCall(PetscLogEventEnd(ctx->events[11], 0, 0, 0, 0));
2618:   if (ctx->stage) PetscCall(PetscLogStagePop());
2619:   PetscFunctionReturn(PETSC_SUCCESS);
2620: }

2622: /*@
2623:  DMPlexLandauIJacobian - TS Jacobian construction, confusingly this adds mass

2625:  Collective

2627:  Input Parameters:
2628: +   TS  - The time stepping context
2629: .   time_dummy - current time (not used)
2630: .   X - Current state
2631: .   U_tdummy - Time derivative of current state (not used)
2632: .   shift - shift for du/dt term
2633: -   actx - Landau context

2635:  Output Parameters:
2636: +   Amat  - Jacobian
2637: -   Pmat  - same as Amat

2639:  Level: beginner

2641:  .keywords: mesh
2642: .seealso: `DMPlexLandauCreateVelocitySpace()`, `DMPlexLandauIFunction()`
2643:  @*/
2644: PetscErrorCode DMPlexLandauIJacobian(TS ts, PetscReal time_dummy, Vec X, Vec U_tdummy, PetscReal shift, Mat Amat, Mat Pmat, void *actx)
2645: {
2646:   LandauCtx *ctx = NULL;
2647:   PetscInt   dim;
2648:   DM         pack;
2649: #if defined(PETSC_HAVE_THREADSAFETY)
2650:   double starttime, endtime;
2651: #endif
2652:   PetscObjectState state;

2654:   PetscFunctionBegin;
2655:   PetscCall(TSGetDM(ts, &pack));
2656:   PetscCall(DMGetApplicationContext(pack, &ctx));
2657:   PetscCheck(ctx, PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
2658:   PetscCheck(Amat == Pmat && Amat == ctx->J, ctx->comm, PETSC_ERR_PLIB, "Amat!=Pmat || Amat!=ctx->J");
2659:   PetscCall(DMGetDimension(pack, &dim));
2660:   /* get collision Jacobian into A */
2661:   if (ctx->stage) PetscCall(PetscLogStagePush(ctx->stage));
2662:   PetscCall(PetscLogEventBegin(ctx->events[11], 0, 0, 0, 0));
2663:   PetscCall(PetscLogEventBegin(ctx->events[9], 0, 0, 0, 0));
2664: #if defined(PETSC_HAVE_THREADSAFETY)
2665:   starttime = MPI_Wtime();
2666: #endif
2667:   PetscCall(PetscInfo(ts, "Adding mass to Jacobian t=%g, shift=%g\n", (double)time_dummy, (double)shift));
2668:   PetscCheck(shift != 0.0, ctx->comm, PETSC_ERR_PLIB, "zero shift");
2669:   PetscCall(PetscObjectStateGet((PetscObject)ctx->J, &state));
2670:   PetscCheck(state == ctx->norm_state, ctx->comm, PETSC_ERR_PLIB, "wrong state, %" PetscInt64_FMT " %" PetscInt64_FMT "", ctx->norm_state, state);
2671:   if (!ctx->use_matrix_mass) {
2672:     PetscCall(LandauFormJacobian_Internal(X, ctx->J, dim, shift, (void *)ctx));
2673:   } else { /* add mass */
2674:     PetscCall(MatAXPY(Pmat, shift, ctx->M, SAME_NONZERO_PATTERN));
2675:   }
2676: #if defined(PETSC_HAVE_THREADSAFETY)
2677:   if (ctx->stage) {
2678:     endtime = MPI_Wtime();
2679:     ctx->times[LANDAU_OPERATOR] += (endtime - starttime);
2680:     ctx->times[LANDAU_MASS] += (endtime - starttime);
2681:     ctx->times[LANDAU_MATRIX_TOTAL] += (endtime - starttime);
2682:   }
2683: #endif
2684:   PetscCall(PetscLogEventEnd(ctx->events[9], 0, 0, 0, 0));
2685:   PetscCall(PetscLogEventEnd(ctx->events[11], 0, 0, 0, 0));
2686:   if (ctx->stage) PetscCall(PetscLogStagePop());
2687:   PetscFunctionReturn(PETSC_SUCCESS);
2688: }