Actual source code: ex30.c

  1: static char help[] = "Grid based Landau collision operator with PIC interface with OpenMP setup. (one species per grid)\n";

  3: /*
  4:    Support 2.5V with axisymmetric coordinates
  5:      - r,z coordinates
  6:      - Domain and species data input by Landau operator
  7:      - "radius" for each grid, normalized with electron thermal velocity
  8:      - Domain: (0,radius) x (-radius,radius), thus first coordinate x[0] is perpendicular velocity and 2pi*x[0] term is added for axisymmetric
  9:    Supports full 3V

 11:  */

 13: #include "petscdmplex.h"
 14: #include "petscds.h"
 15: #include "petscdmswarm.h"
 16: #include "petscksp.h"
 17: #include <petsc/private/petscimpl.h>
 18: #if defined(PETSC_HAVE_OPENMP) && defined(PETSC_HAVE_THREADSAFETY)
 19:   #include <omp.h>
 20: #endif
 21: #include <petsclandau.h>
 22: #include <petscdmcomposite.h>

 24: typedef struct {
 25:   Mat MpTrans;
 26:   Mat Mp;
 27:   Vec ff;
 28:   Vec uu;
 29: } MatShellCtx;

 31: PetscErrorCode MatMultMtM_SeqAIJ(Mat MtM, Vec xx, Vec yy)
 32: {
 33:   MatShellCtx *matshellctx;

 35:   PetscFunctionBeginUser;
 36:   PetscCall(MatShellGetContext(MtM, &matshellctx));
 37:   PetscCheck(matshellctx, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "No context");
 38:   PetscCall(MatMult(matshellctx->Mp, xx, matshellctx->ff));
 39:   PetscCall(MatMult(matshellctx->MpTrans, matshellctx->ff, yy));
 40:   PetscFunctionReturn(PETSC_SUCCESS);
 41: }

 43: PetscErrorCode MatMultAddMtM_SeqAIJ(Mat MtM, Vec xx, Vec yy, Vec zz)
 44: {
 45:   MatShellCtx *matshellctx;

 47:   PetscFunctionBeginUser;
 48:   PetscCall(MatShellGetContext(MtM, &matshellctx));
 49:   PetscCheck(matshellctx, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "No context");
 50:   PetscCall(MatMult(matshellctx->Mp, xx, matshellctx->ff));
 51:   PetscCall(MatMultAdd(matshellctx->MpTrans, matshellctx->ff, yy, zz));
 52:   PetscFunctionReturn(PETSC_SUCCESS);
 53: }

 55: PetscErrorCode createSwarm(const DM dm, PetscInt dim, DM *sw)
 56: {
 57:   PetscInt Nc = 1;

 59:   PetscFunctionBeginUser;
 60:   PetscCall(DMCreate(PETSC_COMM_SELF, sw));
 61:   PetscCall(DMSetType(*sw, DMSWARM));
 62:   PetscCall(DMSetDimension(*sw, dim));
 63:   PetscCall(DMSwarmSetType(*sw, DMSWARM_PIC));
 64:   PetscCall(DMSwarmSetCellDM(*sw, dm));
 65:   PetscCall(DMSwarmRegisterPetscDatatypeField(*sw, "w_q", Nc, PETSC_SCALAR));
 66:   PetscCall(DMSwarmFinalizeFieldRegister(*sw));
 67:   PetscCall(DMSetFromOptions(*sw));
 68:   PetscFunctionReturn(PETSC_SUCCESS);
 69: }

 71: PetscErrorCode gridToParticles(const DM dm, DM sw, Vec rhs, Vec work, Mat M_p, Mat Mass)
 72: {
 73:   PetscBool    is_lsqr;
 74:   KSP          ksp;
 75:   Mat          PM_p = NULL, MtM, D;
 76:   Vec          ff;
 77:   PetscInt     N, M, nzl;
 78:   MatShellCtx *matshellctx;

 80:   PetscFunctionBeginUser;
 81:   PetscCall(MatMult(Mass, rhs, work));
 82:   PetscCall(VecCopy(work, rhs));
 83:   // pseudo-inverse
 84:   PetscCall(KSPCreate(PETSC_COMM_SELF, &ksp));
 85:   PetscCall(KSPSetOptionsPrefix(ksp, "ftop_"));
 86:   PetscCall(KSPSetFromOptions(ksp));
 87:   PetscCall(PetscObjectTypeCompare((PetscObject)ksp, KSPLSQR, &is_lsqr));
 88:   if (!is_lsqr) {
 89:     PetscCall(MatGetLocalSize(M_p, &M, &N));
 90:     if (N > M) {
 91:       PC pc;
 92:       PetscCall(PetscInfo(ksp, " M (%" PetscInt_FMT ") < M (%" PetscInt_FMT ") -- skip revert to lsqr\n", M, N));
 93:       is_lsqr = PETSC_TRUE;
 94:       PetscCall(KSPSetType(ksp, KSPLSQR));
 95:       PetscCall(KSPGetPC(ksp, &pc));
 96:       PetscCall(PCSetType(pc, PCNONE)); // could put in better solver -ftop_pc_type bjacobi -ftop_sub_pc_type lu -ftop_sub_pc_factor_shift_type nonzero
 97:     } else {
 98:       PetscCall(PetscNew(&matshellctx));
 99:       PetscCall(MatCreateShell(PetscObjectComm((PetscObject)dm), N, N, PETSC_DECIDE, PETSC_DECIDE, matshellctx, &MtM));
100:       PetscCall(MatTranspose(M_p, MAT_INITIAL_MATRIX, &matshellctx->MpTrans));
101:       matshellctx->Mp = M_p;
102:       PetscCall(MatShellSetOperation(MtM, MATOP_MULT, (void (*)(void))MatMultMtM_SeqAIJ));
103:       PetscCall(MatShellSetOperation(MtM, MATOP_MULT_ADD, (void (*)(void))MatMultAddMtM_SeqAIJ));
104:       PetscCall(MatCreateVecs(M_p, &matshellctx->uu, &matshellctx->ff));
105:       PetscCall(MatCreateSeqAIJ(PETSC_COMM_SELF, N, N, 1, NULL, &D));
106:       PetscCall(MatViewFromOptions(matshellctx->MpTrans, NULL, "-ftop2_Mp_mat_view"));
107:       for (int i = 0; i < N; i++) {
108:         const PetscScalar *vals;
109:         const PetscInt    *cols;
110:         PetscScalar        dot = 0;
111:         PetscCall(MatGetRow(matshellctx->MpTrans, i, &nzl, &cols, &vals));
112:         for (int ii = 0; ii < nzl; ii++) dot += PetscSqr(vals[ii]);
113:         PetscCheck(dot != 0.0, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Row %d is empty", i);
114:         PetscCall(MatSetValue(D, i, i, dot, INSERT_VALUES));
115:       }
116:       PetscCall(MatAssemblyBegin(D, MAT_FINAL_ASSEMBLY));
117:       PetscCall(MatAssemblyEnd(D, MAT_FINAL_ASSEMBLY));
118:       PetscCall(PetscInfo(M_p, "createMtMKSP Have %" PetscInt_FMT " eqs, nzl = %" PetscInt_FMT "\n", N, nzl));
119:       PetscCall(KSPSetOperators(ksp, MtM, D));
120:       PetscCall(MatViewFromOptions(D, NULL, "-ftop2_D_mat_view"));
121:       PetscCall(MatViewFromOptions(M_p, NULL, "-ftop2_Mp_mat_view"));
122:       PetscCall(MatViewFromOptions(matshellctx->MpTrans, NULL, "-ftop2_MpTranspose_mat_view"));
123:     }
124:   }
125:   if (is_lsqr) {
126:     PC        pc;
127:     PetscBool is_bjac;
128:     PetscCall(KSPGetPC(ksp, &pc));
129:     PetscCall(PetscObjectTypeCompare((PetscObject)pc, PCBJACOBI, &is_bjac));
130:     if (is_bjac) {
131:       PetscCall(DMSwarmCreateMassMatrixSquare(sw, dm, &PM_p));
132:       PetscCall(KSPSetOperators(ksp, M_p, PM_p));
133:     } else {
134:       PetscCall(KSPSetOperators(ksp, M_p, M_p));
135:     }
136:   }
137:   PetscCall(DMSwarmCreateGlobalVectorFromField(sw, "w_q", &ff)); // this grabs access
138:   if (!is_lsqr) {
139:     PetscCall(KSPSolve(ksp, rhs, matshellctx->uu));
140:     PetscCall(MatMult(M_p, matshellctx->uu, ff));
141:     PetscCall(MatDestroy(&matshellctx->MpTrans));
142:     PetscCall(VecDestroy(&matshellctx->ff));
143:     PetscCall(VecDestroy(&matshellctx->uu));
144:     PetscCall(MatDestroy(&D));
145:     PetscCall(MatDestroy(&MtM));
146:     PetscCall(PetscFree(matshellctx));
147:   } else {
148:     PetscCall(KSPSolveTranspose(ksp, rhs, ff));
149:   }
150:   PetscCall(KSPDestroy(&ksp));
151:   /* Visualize particle field */
152:   PetscCall(VecViewFromOptions(ff, NULL, "-weights_view"));
153:   PetscCall(MatDestroy(&PM_p));
154:   PetscCall(DMSwarmDestroyGlobalVectorFromField(sw, "w_q", &ff));

156:   PetscFunctionReturn(PETSC_SUCCESS);
157: }

159: PetscErrorCode particlesToGrid(const DM dm, DM sw, const PetscInt Np, const PetscInt a_tid, const PetscInt dim, const PetscReal xx[], const PetscReal yy[], const PetscReal zz[], const PetscReal a_wp[], Vec rho, Mat *Mp_out)
160: {
161:   PetscBool     removePoints = PETSC_TRUE;
162:   PetscReal    *wq, *coords;
163:   PetscDataType dtype;
164:   Mat           M_p;
165:   Vec           ff;
166:   PetscInt      bs, p, zero = 0;

168:   PetscFunctionBeginUser;
169:   PetscCall(DMSwarmSetLocalSizes(sw, Np, zero));
170:   PetscCall(DMSwarmGetField(sw, "w_q", &bs, &dtype, (void **)&wq));
171:   PetscCall(DMSwarmGetField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
172:   for (p = 0; p < Np; p++) {
173:     coords[p * dim + 0] = xx[p];
174:     coords[p * dim + 1] = yy[p];
175:     wq[p]               = a_wp[p];
176:     if (dim == 3) coords[p * dim + 2] = zz[p];
177:   }
178:   PetscCall(DMSwarmRestoreField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
179:   PetscCall(DMSwarmRestoreField(sw, "w_q", &bs, &dtype, (void **)&wq));
180:   PetscCall(DMSwarmMigrate(sw, removePoints));
181:   PetscCall(PetscObjectSetName((PetscObject)sw, "Particle Grid"));

183:   /* This gives M f = \int_\Omega \phi f, which looks like a rhs for a PDE */
184:   PetscCall(DMCreateMassMatrix(sw, dm, &M_p));

186:   PetscCall(PetscObjectSetName((PetscObject)rho, "rho"));
187:   PetscCall(DMSwarmCreateGlobalVectorFromField(sw, "w_q", &ff));
188:   PetscCall(PetscObjectSetName((PetscObject)ff, "weights"));
189:   PetscCall(MatMultTranspose(M_p, ff, rho));
190:   PetscCall(DMSwarmDestroyGlobalVectorFromField(sw, "w_q", &ff));

192:   // output
193:   *Mp_out = M_p;

195:   PetscFunctionReturn(PETSC_SUCCESS);
196: }
197: static void maxwellian(PetscInt dim, const PetscReal x[], PetscReal kt_m, PetscReal n, PetscScalar *u)
198: {
199:   PetscInt  i;
200:   PetscReal v2 = 0, theta = 2.0 * kt_m; /* theta = 2kT/mc^2 */

202:   /* compute the exponents, v^2 */
203:   for (i = 0; i < dim; ++i) v2 += x[i] * x[i];
204:   /* evaluate the Maxwellian */
205:   u[0] = n * PetscPowReal(PETSC_PI * theta, -1.5) * (PetscExpReal(-v2 / theta));
206: }

208: #define MAX_NUM_THRDS 12
209: PetscErrorCode go(TS ts, Vec X, const PetscInt NUserV, const PetscInt a_Np, const PetscInt dim, const PetscInt b_target, const PetscInt g_target)
210: {
211:   DM             pack, *globSwarmArray, grid_dm[LANDAU_MAX_GRIDS];
212:   Mat           *globMpArray, g_Mass[LANDAU_MAX_GRIDS];
213:   KSP            t_ksp[LANDAU_MAX_GRIDS][MAX_NUM_THRDS];
214:   Vec            t_fhat[LANDAU_MAX_GRIDS][MAX_NUM_THRDS];
215:   PetscInt       nDMs, glb_b_id, nTargetP = 0;
216:   PetscErrorCode ierr = 0;
217: #if defined(PETSC_HAVE_OPENMP) && defined(PETSC_HAVE_THREADSAFETY)
218:   PetscInt numthreads = PetscNumOMPThreads;
219: #else
220:   PetscInt numthreads = 1;
221: #endif
222:   LandauCtx *ctx;
223:   Vec       *globXArray;
224:   PetscReal  moments_0[3], moments_1[3], dt_init;

226:   PetscFunctionBeginUser;
227:   PetscCheck(numthreads <= MAX_NUM_THRDS, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Too many threads %" PetscInt_FMT " > %d", numthreads, MAX_NUM_THRDS);
228:   PetscCheck(numthreads > 0, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Number threads %" PetscInt_FMT " > %d", numthreads, MAX_NUM_THRDS);
229:   PetscCall(TSGetDM(ts, &pack));
230:   PetscCall(DMGetApplicationContext(pack, &ctx));
231:   PetscCheck(ctx->batch_sz % numthreads == 0, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "batch size (-dm_landau_batch_size) %" PetscInt_FMT "  mod #threads %" PetscInt_FMT " must equal zero", ctx->batch_sz, numthreads);
232:   PetscCall(DMCompositeGetNumberDM(pack, &nDMs));
233:   PetscCall(PetscInfo(pack, "Have %" PetscInt_FMT " total grids, with %" PetscInt_FMT " Landau local batched and %" PetscInt_FMT " global items (vertices)\n", ctx->num_grids, ctx->batch_sz, NUserV));
234:   PetscCall(PetscMalloc(sizeof(*globXArray) * nDMs, &globXArray));
235:   PetscCall(PetscMalloc(sizeof(*globMpArray) * nDMs, &globMpArray));
236:   PetscCall(PetscMalloc(sizeof(*globSwarmArray) * nDMs, &globSwarmArray));
237:   PetscCall(DMViewFromOptions(ctx->plex[g_target], NULL, "-ex30_dm_view"));
238:   // create mass matrices
239:   PetscCall(DMCompositeGetAccessArray(pack, X, nDMs, NULL, globXArray)); // just to duplicate
240:   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {               // add same particels for all grids
241:     Vec          subX = globXArray[LAND_PACK_IDX(0, grid)];
242:     DM           dm   = ctx->plex[grid];
243:     PetscSection s;
244:     grid_dm[grid] = dm;
245:     PetscCall(DMCreateMassMatrix(dm, dm, &g_Mass[grid]));
246:     //
247:     PetscCall(DMGetLocalSection(dm, &s));
248:     PetscCall(DMPlexCreateClosureIndex(dm, s));
249:     for (int tid = 0; tid < numthreads; tid++) {
250:       PetscCall(VecDuplicate(subX, &t_fhat[grid][tid]));
251:       PetscCall(KSPCreate(PETSC_COMM_SELF, &t_ksp[grid][tid]));
252:       PetscCall(KSPSetOptionsPrefix(t_ksp[grid][tid], "ptof_"));
253:       PetscCall(KSPSetOperators(t_ksp[grid][tid], g_Mass[grid], g_Mass[grid]));
254:       PetscCall(KSPSetFromOptions(t_ksp[grid][tid]));
255:     }
256:   }
257:   PetscCall(DMCompositeRestoreAccessArray(pack, X, nDMs, NULL, globXArray));
258:   // create particle raw data. could use OMP with a thread safe malloc, but this is just the fake user
259:   for (int i = 0; i < 3; i++) moments_0[i] = moments_1[i] = 0;
260:   PetscCall(TSGetTimeStep(ts, &dt_init)); // we could have an adaptive time stepper
261:   for (PetscInt global_batch_id = 0; global_batch_id < NUserV; global_batch_id += ctx->batch_sz) {
262:     ierr = TSSetTime(ts, 0);
263:     CHKERRQ(ierr);
264:     ierr = TSSetStepNumber(ts, 0);
265:     CHKERRQ(ierr);
266:     ierr = TSSetTimeStep(ts, dt_init);
267:     CHKERRQ(ierr);
268:     PetscCall(VecZeroEntries(X));
269:     PetscCall(DMCompositeGetAccessArray(pack, X, nDMs, NULL, globXArray));
270:     if (b_target >= global_batch_id && b_target < global_batch_id + ctx->batch_sz) PetscCall(PetscObjectSetName((PetscObject)globXArray[LAND_PACK_IDX(b_target % ctx->batch_sz, g_target)], "rho"));
271:     // create fake particles
272:     for (PetscInt b_id_0 = 0; b_id_0 < ctx->batch_sz; b_id_0 += numthreads) {
273:       PetscReal *xx_t[LANDAU_MAX_GRIDS][MAX_NUM_THRDS], *yy_t[LANDAU_MAX_GRIDS][MAX_NUM_THRDS], *zz_t[LANDAU_MAX_GRIDS][MAX_NUM_THRDS], *wp_t[LANDAU_MAX_GRIDS][MAX_NUM_THRDS];
274:       PetscInt   Np_t[LANDAU_MAX_GRIDS][MAX_NUM_THRDS];
275:       // make particles
276:       for (int tid = 0; tid < numthreads; tid++) {
277:         const PetscInt b_id = b_id_0 + tid;
278:         if ((glb_b_id = global_batch_id + b_id) < NUserV) {        // the ragged edge of the last batch
279:           PetscInt Npp0 = a_Np + (glb_b_id % a_Np), NN;            // fake user: number of particels in each dimension with add some load imbalance and diff (<2x)
280:           for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
281:             const PetscReal kT_m = ctx->k * ctx->thermal_temps[ctx->species_offset[grid]] / ctx->masses[ctx->species_offset[grid]] / (ctx->v_0 * ctx->v_0); /* theta = 2kT/mc^2 per species -- TODO */
282:             ;
283:             PetscReal lo[3] = {-ctx->radius[grid], -ctx->radius[grid], -ctx->radius[grid]}, hi[3] = {ctx->radius[grid], ctx->radius[grid], ctx->radius[grid]}, hp[3], vole; // would be nice to get box from DM
284:             PetscInt  Npi = Npp0, Npj = 2 * Npp0, Npk = 1;
285:             if (dim == 2) lo[0] = 0; // Landau coordinate (r,z)
286:             else Npi = Npj = Npk = Npp0;
287:             // User: use glb_b_id to index into your data
288:             NN = Npi * Npj * Npk; // make a regular grid of particles Npp x Npp
289:             if (glb_b_id == b_target) {
290:               nTargetP = NN;
291:               PetscCall(PetscInfo(pack, "Target %" PetscInt_FMT " with %" PetscInt_FMT " particels\n", glb_b_id, NN));
292:             }
293:             Np_t[grid][tid] = NN;
294:             PetscCall(PetscMalloc4(NN, &xx_t[grid][tid], NN, &yy_t[grid][tid], NN, &wp_t[grid][tid], dim == 2 ? 1 : NN, &zz_t[grid][tid]));
295:             hp[0] = (hi[0] - lo[0]) / Npi;
296:             hp[1] = (hi[1] - lo[1]) / Npj;
297:             hp[2] = (hi[2] - lo[2]) / Npk;
298:             if (dim == 2) hp[2] = 1;
299:             PetscCall(PetscInfo(pack, " lo = %14.7e, hi = %14.7e; hp = %14.7e, %14.7e; kT_m = %g; \n", (double)lo[1], (double)hi[1], (double)hp[0], (double)hp[1], (double)kT_m)); // temp
300:             vole = hp[0] * hp[1] * hp[2] * ctx->n[grid];                                                                                                                           // fix for multi-species
301:             PetscCall(PetscInfo(pack, "Vertex %" PetscInt_FMT ", grid %" PetscInt_FMT " with %" PetscInt_FMT " particles (diagnostic target = %" PetscInt_FMT ")\n", glb_b_id, grid, NN, b_target));
302:             for (int pj = 0, pp = 0; pj < Npj; pj++) {
303:               for (int pk = 0; pk < Npk; pk++) {
304:                 for (int pi = 0; pi < Npi; pi++, pp++) {
305:                   xx_t[grid][tid][pp] = lo[0] + hp[0] / 2.0 + pi * hp[0];
306:                   yy_t[grid][tid][pp] = lo[1] + hp[1] / 2.0 + pj * hp[1];
307:                   if (dim == 3) zz_t[grid][tid][pp] = lo[2] + hp[2] / 2.0 + pk * hp[2];
308:                   {
309:                     PetscReal x[] = {xx_t[grid][tid][pp], yy_t[grid][tid][pp], dim == 2 ? 0 : zz_t[grid][tid][pp]};
310:                     maxwellian(dim, x, kT_m, vole, &wp_t[grid][tid][pp]);
311:                     //PetscCall(PetscInfo(pack,"%" PetscInt_FMT ") x = %14.7e, %14.7e, %14.7e, n = %14.7e, w = %14.7e\n", pp, x[0], x[1], dim==2 ? 0 : x[2], ctx->n[grid], wp_t[grid][tid][pp])); // temp
312:                     if (glb_b_id == b_target) {
313:                       PetscReal v2 = 0, fact = dim == 2 ? 2.0 * PETSC_PI * x[0] : 1;
314:                       for (int i = 0; i < dim; ++i) v2 += PetscSqr(x[i]);
315:                       moments_0[0] += fact * wp_t[grid][tid][pp] * ctx->n_0 * ctx->masses[ctx->species_offset[grid]];
316:                       moments_0[1] += fact * wp_t[grid][tid][pp] * ctx->n_0 * ctx->v_0 * ctx->masses[ctx->species_offset[grid]] * x[1]; // z-momentum
317:                       moments_0[2] += fact * wp_t[grid][tid][pp] * ctx->n_0 * ctx->v_0 * ctx->v_0 * ctx->masses[ctx->species_offset[grid]] * v2;
318:                     }
319:                   }
320:                 }
321:               }
322:             }
323:           } // grid
324:         }   // active
325:       }     // fake threads
326:       /* Create particle swarm */
327:       PetscPragmaOMP(parallel for)
328:       for (int tid = 0; tid < numthreads; tid++) {
329:         const PetscInt b_id = b_id_0 + tid;
330:         if ((glb_b_id = global_batch_id + b_id) < NUserV) { // the ragged edge of the last batch
331:           //PetscCall(PetscInfo(pack,"Create swarms for 'glob' index %" PetscInt_FMT " create swarm\n",glb_b_id));
332:           for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
333:             PetscErrorCode ierr_t;
334:             PetscSection   section;
335:             PetscInt       Nf;
336:             DM             dm = grid_dm[grid];
337:             ierr_t            = DMGetLocalSection(dm, &section);
338:             ierr_t            = PetscSectionGetNumFields(section, &Nf);
339:             if (Nf != 1) ierr_t = 9999;
340:             else {
341:               ierr_t = DMViewFromOptions(dm, NULL, "-dm_view");
342:               ierr_t = PetscInfo(pack, "call createSwarm [%" PetscInt_FMT ".%" PetscInt_FMT "] local batch index %" PetscInt_FMT "\n", b_id, grid, LAND_PACK_IDX(b_id, grid));
343:               ierr_t = createSwarm(dm, dim, &globSwarmArray[LAND_PACK_IDX(b_id, grid)]);
344:             }
345:             if (ierr_t) ierr = ierr_t;
346:           }
347:         } // active
348:       }
349:       PetscCheck(ierr != 9999, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Only support one species per grid");
350:       PetscCall(ierr);
351:       // p --> g: make globMpArray & set X
352:       PetscPragmaOMP(parallel for)
353:       for (int tid = 0; tid < numthreads; tid++) {
354:         const PetscInt b_id = b_id_0 + tid;
355:         if ((glb_b_id = global_batch_id + b_id) < NUserV) {
356:           for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
357:             PetscErrorCode ierr_t;
358:             DM             dm   = grid_dm[grid];
359:             DM             sw   = globSwarmArray[LAND_PACK_IDX(b_id, grid)];
360:             Vec            subX = globXArray[LAND_PACK_IDX(b_id, grid)], work = t_fhat[grid][tid];
361:             ierr_t = PetscInfo(pack, "particlesToGrid %" PetscInt_FMT ".%" PetscInt_FMT ") particlesToGrid for local batch %" PetscInt_FMT "\n", global_batch_id, grid, LAND_PACK_IDX(b_id, grid));
362:             ierr_t = particlesToGrid(dm, sw, Np_t[grid][tid], tid, dim, xx_t[grid][tid], yy_t[grid][tid], zz_t[grid][tid], wp_t[grid][tid], subX, &globMpArray[LAND_PACK_IDX(b_id, grid)]);
363:             if (ierr_t) ierr = ierr_t;
364:             // u = M^_1 f_w
365:             ierr_t = VecCopy(subX, work);
366:             ierr_t = KSPSolve(t_ksp[grid][tid], work, subX);
367:             if (ierr_t) ierr = ierr_t;
368:           }
369:         }
370:       }
371:       PetscCall(ierr);
372:       /* Cleanup */
373:       for (int tid = 0; tid < numthreads; tid++) {
374:         const PetscInt b_id = b_id_0 + tid;
375:         if ((glb_b_id = global_batch_id + b_id) < NUserV) {
376:           PetscCall(PetscInfo(pack, "Free for global batch %" PetscInt_FMT " of %" PetscInt_FMT "\n", glb_b_id + 1, NUserV));
377:           for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
378:             PetscCall(PetscFree4(xx_t[grid][tid], yy_t[grid][tid], wp_t[grid][tid], zz_t[grid][tid]));
379:           }
380:         } // active
381:       }
382:     } // Landau
383:     if (b_target >= global_batch_id && b_target < global_batch_id + ctx->batch_sz) PetscCall(VecViewFromOptions(globXArray[LAND_PACK_IDX(b_target % ctx->batch_sz, g_target)], NULL, "-ex30_vec_view"));
384:     PetscCall(DMCompositeRestoreAccessArray(pack, X, nDMs, NULL, globXArray));
385:     PetscCall(DMPlexLandauPrintNorms(X, 0));
386:     // advance
387:     PetscCall(TSSetSolution(ts, X));
388:     PetscCall(PetscInfo(pack, "Advance vertex %" PetscInt_FMT " to %" PetscInt_FMT " (with padding)\n", global_batch_id, global_batch_id + ctx->batch_sz));
389:     PetscCall(TSSolve(ts, X));
390:     PetscCall(DMPlexLandauPrintNorms(X, 1));
391:     PetscCall(DMCompositeGetAccessArray(pack, X, nDMs, NULL, globXArray));
392:     // map back to particles
393:     for (PetscInt b_id_0 = 0; b_id_0 < ctx->batch_sz; b_id_0 += numthreads) {
394:       PetscCall(PetscInfo(pack, "g2p: global batch %" PetscInt_FMT " of %" PetscInt_FMT ", Landau batch %" PetscInt_FMT " of %" PetscInt_FMT ": map back to particles\n", global_batch_id + 1, NUserV, b_id_0 + 1, ctx->batch_sz));
395:       PetscPragmaOMP(parallel for)
396:       for (int tid = 0; tid < numthreads; tid++) {
397:         const PetscInt b_id = b_id_0 + tid;
398:         if ((glb_b_id = global_batch_id + b_id) < NUserV) {
399:           for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
400:             PetscErrorCode ierr_t;
401:             ierr_t = PetscInfo(pack, "gridToParticles: global batch %" PetscInt_FMT ", local batch b=%" PetscInt_FMT ", grid g=%" PetscInt_FMT ", index(b,g) %" PetscInt_FMT "\n", global_batch_id, b_id, grid, LAND_PACK_IDX(b_id, grid));
402:             ierr_t = gridToParticles(grid_dm[grid], globSwarmArray[LAND_PACK_IDX(b_id, grid)], globXArray[LAND_PACK_IDX(b_id, grid)], t_fhat[grid][tid], globMpArray[LAND_PACK_IDX(b_id, grid)], g_Mass[grid]);
403:             if (ierr_t) ierr = ierr_t;
404:           }
405:         }
406:       }
407:       PetscCall(ierr);
408:       /* Cleanup, and get data */
409:       PetscCall(PetscInfo(pack, "Cleanup batches %" PetscInt_FMT " to %" PetscInt_FMT "\n", b_id_0, b_id_0 + numthreads));
410:       for (int tid = 0; tid < numthreads; tid++) {
411:         const PetscInt b_id = b_id_0 + tid;
412:         if ((glb_b_id = global_batch_id + b_id) < NUserV) {
413:           for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
414:             PetscDataType dtype;
415:             PetscReal    *wp, *coords;
416:             DM            sw = globSwarmArray[LAND_PACK_IDX(b_id, grid)];
417:             PetscInt      npoints, bs = 1;
418:             PetscCall(DMSwarmGetField(sw, "w_q", &bs, &dtype, (void **)&wp)); // take data out here
419:             if (glb_b_id == b_target) {
420:               PetscCall(DMSwarmGetField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
421:               PetscCall(DMSwarmGetLocalSize(sw, &npoints));
422:               for (int p = 0; p < npoints; p++) {
423:                 PetscReal v2 = 0, fact = dim == 2 ? 2.0 * PETSC_PI * coords[p * dim + 0] : 1;
424:                 for (int i = 0; i < dim; ++i) v2 += PetscSqr(coords[p * dim + i]);
425:                 moments_1[0] += fact * wp[p] * ctx->n_0 * ctx->masses[ctx->species_offset[grid]];
426:                 moments_1[1] += fact * wp[p] * ctx->n_0 * ctx->v_0 * ctx->masses[ctx->species_offset[grid]] * coords[p * dim + 1]; // z-momentum
427:                 moments_1[2] += fact * wp[p] * ctx->n_0 * ctx->v_0 * ctx->v_0 * ctx->masses[ctx->species_offset[grid]] * v2;
428:               }
429:               PetscCall(DMSwarmRestoreField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
430:             }
431:             PetscCall(DMSwarmRestoreField(sw, "w_q", &bs, &dtype, (void **)&wp));
432:             PetscCall(DMDestroy(&globSwarmArray[LAND_PACK_IDX(b_id, grid)]));
433:             PetscCall(MatDestroy(&globMpArray[LAND_PACK_IDX(b_id, grid)]));
434:           }
435:         }
436:       }
437:     } // thread batch
438:     PetscCall(DMCompositeRestoreAccessArray(pack, X, nDMs, NULL, globXArray));
439:   } // user batch
440:   /* Cleanup */
441:   PetscCall(PetscFree(globXArray));
442:   PetscCall(PetscFree(globSwarmArray));
443:   PetscCall(PetscFree(globMpArray));
444:   // clean up mass matrices
445:   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
446:     PetscCall(MatDestroy(&g_Mass[grid]));
447:     for (int tid = 0; tid < numthreads; tid++) {
448:       PetscCall(VecDestroy(&t_fhat[grid][tid]));
449:       PetscCall(KSPDestroy(&t_ksp[grid][tid]));
450:     }
451:   }
452:   PetscCall(PetscInfo(X, "Total number density: %20.12e (%20.12e); x-momentum = %20.12e (%20.12e); energy = %20.12e (%20.12e) error = %e (log10 of error = %" PetscInt_FMT "), %" PetscInt_FMT " particles. Use %" PetscInt_FMT " threads\n", (double)moments_1[0], (double)moments_0[0], (double)moments_1[1], (double)moments_0[1], (double)moments_1[2], (double)moments_0[2], (double)((moments_1[2] - moments_0[2]) / moments_0[2]), (PetscInt)PetscLog10Real(PetscAbsReal((moments_1[2] - moments_0[2]) / moments_0[2])), nTargetP, numthreads));
453:   PetscFunctionReturn(PETSC_SUCCESS);
454: }

456: int main(int argc, char **argv)
457: {
458:   DM         pack;
459:   Vec        X;
460:   PetscInt   dim = 2, nvert = 1, Np = 10, btarget = 0, gtarget = 0;
461:   TS         ts;
462:   Mat        J;
463:   LandauCtx *ctx;
464: #if defined(PETSC_USE_LOG)
465:   PetscLogStage stage;
466: #endif

468:   PetscFunctionBeginUser;
469:   PetscCall(PetscInitialize(&argc, &argv, NULL, help));
470:   // process args
471:   PetscOptionsBegin(PETSC_COMM_SELF, "", "Collision Options", "DMPLEX");
472:   PetscCall(PetscOptionsInt("-number_spatial_vertices", "Number of user spatial vertices to be batched for Landau", "ex30.c", nvert, &nvert, NULL));
473:   PetscCall(PetscOptionsInt("-dim", "Velocity space dimension", "ex30.c", dim, &dim, NULL));
474:   PetscCall(PetscOptionsInt("-number_particles_per_dimension", "Number of particles per grid, with slight modification per spatial vertex, in each dimension of base Cartesian grid", "ex30.c", Np, &Np, NULL));
475:   PetscCall(PetscOptionsInt("-view_vertex_target", "Batch to view with diagnostics", "ex30.c", btarget, &btarget, NULL));
476:   PetscCheck(btarget < nvert, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Batch to view %" PetscInt_FMT " should be < number of vertices %" PetscInt_FMT, btarget, nvert);
477:   PetscCall(PetscOptionsInt("-view_grid_target", "Grid to view with diagnostics", "ex30.c", gtarget, &gtarget, NULL));
478:   PetscOptionsEnd();
479:   /* Create a mesh */
480:   PetscCall(DMPlexLandauCreateVelocitySpace(PETSC_COMM_SELF, dim, "", &X, &J, &pack));
481:   PetscCall(DMSetUp(pack));
482:   PetscCall(DMSetOutputSequenceNumber(pack, 0, 0.0));
483:   PetscCall(DMGetApplicationContext(pack, &ctx));
484:   PetscCheck(gtarget < ctx->num_grids, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Grid to view %" PetscInt_FMT " should be < number of grids %" PetscInt_FMT, gtarget, ctx->num_grids);
485:   PetscCheck(nvert >= ctx->batch_sz, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Number of vertices %" PetscInt_FMT " should be <= batch size %" PetscInt_FMT, nvert, ctx->batch_sz);
486:   /* Create timestepping solver context */
487:   PetscCall(TSCreate(PETSC_COMM_SELF, &ts));
488:   PetscCall(TSSetDM(ts, pack));
489:   PetscCall(TSSetIFunction(ts, NULL, DMPlexLandauIFunction, NULL));
490:   PetscCall(TSSetIJacobian(ts, J, J, DMPlexLandauIJacobian, NULL));
491:   PetscCall(TSSetExactFinalTime(ts, TS_EXACTFINALTIME_STEPOVER));
492:   PetscCall(TSSetFromOptions(ts));
493:   PetscCall(PetscObjectSetName((PetscObject)X, "X"));
494:   // do particle advance, warmup
495:   PetscCall(go(ts, X, nvert, Np, dim, btarget, gtarget));
496:   PetscCall(MatZeroEntries(J)); // need to zero out so as to not reuse it in Landau's logic
497:   // hot
498:   PetscCall(PetscLogStageRegister("ex30 hot solve", &stage));
499:   PetscCall(PetscLogStagePush(stage));
500:   PetscCall(go(ts, X, nvert, Np, dim, btarget, gtarget));
501:   PetscCall(PetscLogStagePop());
502:   /* clean up */
503:   PetscCall(DMPlexLandauDestroyVelocitySpace(&pack));
504:   PetscCall(TSDestroy(&ts));
505:   PetscCall(VecDestroy(&X));
506:   PetscCall(PetscFinalize());
507:   return 0;
508: }

510: /*TEST

512:   build:
513:     requires: !complex p4est

515:   testset:
516:     requires: double defined(PETSC_USE_DMLANDAU_2D)
517:     output_file: output/ex30_0.out
518:     args: -dim 2 -petscspace_degree 3 -dm_landau_type p4est -dm_landau_num_species_grid 1,1,1 -dm_landau_amr_levels_max 0,0,0 \
519:           -dm_landau_amr_post_refine 1 -number_particles_per_dimension 10 -dm_plex_hash_location \
520:           -dm_landau_batch_size 2 -number_spatial_vertices 3 -dm_landau_batch_view_idx 1 -view_vertex_target 2 -view_grid_target 1 \
521:           -dm_landau_n 1.000018,1,1e-6 -dm_landau_thermal_temps 2,1,1 -dm_landau_ion_masses 2,180 -dm_landau_ion_charges 1,18 \
522:           -ftop_ksp_converged_reason -ftop_ksp_rtol 1e-10 -ftop_ksp_type lsqr -ftop_pc_type bjacobi -ftop_sub_pc_factor_shift_type nonzero -ftop_sub_pc_type lu \
523:           -ksp_type preonly -pc_type lu \
524:           -ptof_ksp_type cg -ptof_pc_type jacobi -ptof_ksp_converged_reason -ptof_ksp_rtol 1e-12\
525:           -snes_converged_reason -snes_monitor -snes_rtol 1e-14 -snes_stol 1e-14\
526:           -ts_dt 0.01 -ts_rtol 1e-1 -ts_exact_final_time stepover -ts_max_snes_failures -1 -ts_max_steps 1 -ts_monitor -ts_type beuler -info :vec

528:     test:
529:       suffix: cpu
530:       args: -dm_landau_device_type cpu
531:     test:
532:       suffix: kokkos
533:       requires: kokkos_kernels
534:       args: -dm_landau_device_type kokkos -dm_mat_type aijkokkos -dm_vec_type kokkos
535:     test:
536:       suffix: cuda
537:       requires: cuda
538:       args: -dm_landau_device_type cuda -dm_mat_type aijcusparse -dm_vec_type cuda

540:   testset:
541:     requires: double !defined(PETSC_USE_DMLANDAU_2D)
542:     output_file: output/ex30_3d.out
543:     args: -dim 3 -petscspace_degree 2 -dm_landau_type p8est -dm_landau_num_species_grid 1,1,1 -dm_landau_amr_levels_max 0,0,0 \
544:           -dm_landau_amr_post_refine 0 -number_particles_per_dimension 5 -dm_plex_hash_location \
545:           -dm_landau_batch_size 1 -number_spatial_vertices 1 -dm_landau_batch_view_idx 0 -view_vertex_target 0 -view_grid_target 0 \
546:           -dm_landau_n 1.000018,1,1e-6 -dm_landau_thermal_temps 2,1,1 -dm_landau_ion_masses 2,180 -dm_landau_ion_charges 1,18 \
547:           -ftop_ksp_converged_reason -ftop_ksp_rtol 1e-12 -ftop_ksp_type cg -ftop_pc_type jacobi \
548:           -ksp_type preonly -pc_type lu \
549:           -ptof_ksp_type cg -ptof_pc_type jacobi -ptof_ksp_converged_reason -ptof_ksp_rtol 1e-12\
550:           -snes_converged_reason -snes_monitor -snes_rtol 1e-12 -snes_stol 1e-12\
551:           -ts_dt 0.1 -ts_exact_final_time stepover -ts_max_snes_failures -1 -ts_max_steps 1 -ts_monitor -ts_type beuler -info :vec

553:     test:
554:       suffix: cpu_3d
555:       args: -dm_landau_device_type cpu
556:     test:
557:       suffix: kokkos_3d
558:       requires: kokkos_kernels
559:       args: -dm_landau_device_type kokkos -dm_mat_type aijkokkos -dm_vec_type kokkos
560:     test:
561:       suffix: cuda_3d
562:       requires: cuda
563:       args: -dm_landau_device_type cuda -dm_mat_type aijcusparse -dm_vec_type cuda

565: TEST*/