Actual source code: ex7.c
1: static char help[] = "Example program demonstrating projection between particle and finite element spaces using OpenMP in 2D cylindrical coordinates\n";
3: #include "petscdmplex.h"
4: #include "petscds.h"
5: #include "petscdmswarm.h"
6: #include "petscksp.h"
7: #include <petsc/private/petscimpl.h>
8: #if defined(PETSC_HAVE_OPENMP) && defined(PETSC_HAVE_THREADSAFETY)
9: #include <omp.h>
10: #endif
12: typedef struct {
13: Mat MpTrans;
14: Mat Mp;
15: Vec ff;
16: Vec uu;
17: } MatShellCtx;
19: PetscErrorCode MatMultMtM_SeqAIJ(Mat MtM, Vec xx, Vec yy)
20: {
21: MatShellCtx *matshellctx;
23: PetscFunctionBeginUser;
24: PetscCall(MatShellGetContext(MtM, &matshellctx));
25: PetscCheck(matshellctx, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "No context");
26: PetscCall(MatMult(matshellctx->Mp, xx, matshellctx->ff));
27: PetscCall(MatMult(matshellctx->MpTrans, matshellctx->ff, yy));
28: PetscFunctionReturn(PETSC_SUCCESS);
29: }
31: PetscErrorCode MatMultAddMtM_SeqAIJ(Mat MtM, Vec xx, Vec yy, Vec zz)
32: {
33: MatShellCtx *matshellctx;
35: PetscFunctionBeginUser;
36: PetscCall(MatShellGetContext(MtM, &matshellctx));
37: PetscCheck(matshellctx, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "No context");
38: PetscCall(MatMult(matshellctx->Mp, xx, matshellctx->ff));
39: PetscCall(MatMultAdd(matshellctx->MpTrans, matshellctx->ff, yy, zz));
40: PetscFunctionReturn(PETSC_SUCCESS);
41: }
43: PetscErrorCode createSwarm(const DM dm, DM *sw)
44: {
45: PetscInt Nc = 1, dim = 2;
47: PetscFunctionBeginUser;
48: PetscCall(DMCreate(PETSC_COMM_SELF, sw));
49: PetscCall(DMSetType(*sw, DMSWARM));
50: PetscCall(DMSetDimension(*sw, dim));
51: PetscCall(DMSwarmSetType(*sw, DMSWARM_PIC));
52: PetscCall(DMSwarmSetCellDM(*sw, dm));
53: PetscCall(DMSwarmRegisterPetscDatatypeField(*sw, "w_q", Nc, PETSC_SCALAR));
54: PetscCall(DMSwarmFinalizeFieldRegister(*sw));
55: PetscCall(DMSetFromOptions(*sw));
56: PetscFunctionReturn(PETSC_SUCCESS);
57: }
59: PetscErrorCode gridToParticles(const DM dm, DM sw, PetscReal *moments, Vec rhs, Mat M_p)
60: {
61: PetscBool is_lsqr;
62: KSP ksp;
63: Mat PM_p = NULL, MtM, D;
64: Vec ff;
65: PetscInt Np, timestep = 0, bs, N, M, nzl;
66: PetscReal time = 0.0;
67: PetscDataType dtype;
68: MatShellCtx *matshellctx;
70: PetscFunctionBeginUser;
71: PetscCall(KSPCreate(PETSC_COMM_SELF, &ksp));
72: PetscCall(KSPSetOptionsPrefix(ksp, "ftop_"));
73: PetscCall(KSPSetFromOptions(ksp));
74: PetscCall(PetscObjectTypeCompare((PetscObject)ksp, KSPLSQR, &is_lsqr));
75: if (!is_lsqr) {
76: PetscCall(MatGetLocalSize(M_p, &M, &N));
77: if (N > M) {
78: PC pc;
79: PetscCall(PetscInfo(ksp, " M (%" PetscInt_FMT ") < N (%" PetscInt_FMT ") -- skip revert to lsqr\n", M, N));
80: is_lsqr = PETSC_TRUE;
81: PetscCall(KSPSetType(ksp, KSPLSQR));
82: PetscCall(KSPGetPC(ksp, &pc));
83: PetscCall(PCSetType(pc, PCNONE)); // could put in better solver -ftop_pc_type bjacobi -ftop_sub_pc_type lu -ftop_sub_pc_factor_shift_type nonzero
84: } else {
85: PetscCall(PetscNew(&matshellctx));
86: PetscCall(MatCreateShell(PetscObjectComm((PetscObject)dm), N, N, PETSC_DECIDE, PETSC_DECIDE, matshellctx, &MtM));
87: PetscCall(MatTranspose(M_p, MAT_INITIAL_MATRIX, &matshellctx->MpTrans));
88: matshellctx->Mp = M_p;
89: PetscCall(MatShellSetOperation(MtM, MATOP_MULT, (void (*)(void))MatMultMtM_SeqAIJ));
90: PetscCall(MatShellSetOperation(MtM, MATOP_MULT_ADD, (void (*)(void))MatMultAddMtM_SeqAIJ));
91: PetscCall(MatCreateVecs(M_p, &matshellctx->uu, &matshellctx->ff));
92: PetscCall(MatCreateSeqAIJ(PETSC_COMM_SELF, N, N, 1, NULL, &D));
93: for (int i = 0; i < N; i++) {
94: const PetscScalar *vals;
95: const PetscInt *cols;
96: PetscScalar dot = 0;
97: PetscCall(MatGetRow(matshellctx->MpTrans, i, &nzl, &cols, &vals));
98: for (int ii = 0; ii < nzl; ii++) dot += PetscSqr(vals[ii]);
99: PetscCheck(dot != 0.0, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Row %d is empty", i);
100: PetscCall(MatSetValue(D, i, i, dot, INSERT_VALUES));
101: }
102: PetscCall(MatAssemblyBegin(D, MAT_FINAL_ASSEMBLY));
103: PetscCall(MatAssemblyEnd(D, MAT_FINAL_ASSEMBLY));
104: PetscCall(PetscInfo(M_p, "createMtMKSP Have %" PetscInt_FMT " eqs, nzl = %" PetscInt_FMT "\n", N, nzl));
105: PetscCall(KSPSetOperators(ksp, MtM, D));
106: PetscCall(MatViewFromOptions(D, NULL, "-ftop2_D_mat_view"));
107: PetscCall(MatViewFromOptions(M_p, NULL, "-ftop2_Mp_mat_view"));
108: PetscCall(MatViewFromOptions(matshellctx->MpTrans, NULL, "-ftop2_MpTranspose_mat_view"));
109: }
110: }
111: if (is_lsqr) {
112: PC pc;
113: PetscBool is_bjac;
114: PetscCall(KSPGetPC(ksp, &pc));
115: PetscCall(PetscObjectTypeCompare((PetscObject)pc, PCBJACOBI, &is_bjac));
116: if (is_bjac) {
117: PetscCall(DMSwarmCreateMassMatrixSquare(sw, dm, &PM_p));
118: PetscCall(KSPSetOperators(ksp, M_p, PM_p));
119: } else {
120: PetscCall(KSPSetOperators(ksp, M_p, M_p));
121: }
122: }
123: PetscCall(DMSwarmCreateGlobalVectorFromField(sw, "w_q", &ff)); // this grabs access !!!!!
124: if (!is_lsqr) {
125: PetscCall(KSPSolve(ksp, rhs, matshellctx->uu));
126: PetscCall(MatMult(M_p, matshellctx->uu, ff));
127: PetscCall(MatDestroy(&matshellctx->MpTrans));
128: PetscCall(VecDestroy(&matshellctx->ff));
129: PetscCall(VecDestroy(&matshellctx->uu));
130: PetscCall(MatDestroy(&D));
131: PetscCall(MatDestroy(&MtM));
132: PetscCall(PetscFree(matshellctx));
133: } else {
134: PetscCall(KSPSolveTranspose(ksp, rhs, ff));
135: }
136: PetscCall(KSPDestroy(&ksp));
137: /* Visualize particle field */
138: PetscCall(DMSetOutputSequenceNumber(sw, timestep, time));
139: PetscCall(VecViewFromOptions(ff, NULL, "-weights_view"));
140: PetscCall(DMSwarmDestroyGlobalVectorFromField(sw, "w_q", &ff));
142: /* compute energy */
143: if (moments) {
144: PetscReal *wq, *coords;
145: PetscCall(DMSwarmGetLocalSize(sw, &Np));
146: PetscCall(DMSwarmGetField(sw, "w_q", &bs, &dtype, (void **)&wq));
147: PetscCall(DMSwarmGetField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
148: moments[0] = moments[1] = moments[2] = 0;
149: for (int p = 0; p < Np; p++) {
150: moments[0] += wq[p];
151: moments[1] += wq[p] * coords[p * 2 + 0]; // x-momentum
152: moments[2] += wq[p] * (PetscSqr(coords[p * 2 + 0]) + PetscSqr(coords[p * 2 + 1]));
153: }
154: PetscCall(DMSwarmRestoreField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
155: PetscCall(DMSwarmRestoreField(sw, "w_q", &bs, &dtype, (void **)&wq));
156: }
157: PetscCall(MatDestroy(&PM_p));
158: PetscFunctionReturn(PETSC_SUCCESS);
159: }
161: PetscErrorCode particlesToGrid(const DM dm, DM sw, const PetscInt Np, const PetscInt a_tid, const PetscInt dim, const PetscInt target, const PetscReal xx[], const PetscReal yy[], const PetscReal a_wp[], Vec rho, Mat *Mp_out)
162: {
163: PetscBool removePoints = PETSC_TRUE;
164: PetscReal *wq, *coords;
165: PetscDataType dtype;
166: Mat M_p;
167: Vec ff;
168: PetscInt bs, p, zero = 0;
170: PetscFunctionBeginUser;
171: PetscCall(DMSwarmSetLocalSizes(sw, Np, zero));
172: PetscCall(DMSwarmGetField(sw, "w_q", &bs, &dtype, (void **)&wq));
173: PetscCall(DMSwarmGetField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
174: for (p = 0; p < Np; p++) {
175: coords[p * 2 + 0] = xx[p];
176: coords[p * 2 + 1] = yy[p];
177: wq[p] = a_wp[p];
178: }
179: PetscCall(DMSwarmRestoreField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
180: PetscCall(DMSwarmRestoreField(sw, "w_q", &bs, &dtype, (void **)&wq));
181: PetscCall(DMSwarmMigrate(sw, removePoints));
182: PetscCall(PetscObjectSetName((PetscObject)sw, "Particle Grid"));
183: if (a_tid == target) PetscCall(DMViewFromOptions(sw, NULL, "-swarm_view"));
185: /* Project particles to field */
186: /* This gives M f = \int_\Omega \phi f, which looks like a rhs for a PDE */
187: PetscCall(DMCreateMassMatrix(sw, dm, &M_p));
188: PetscCall(PetscObjectSetName((PetscObject)rho, "rho"));
190: PetscCall(DMSwarmCreateGlobalVectorFromField(sw, "w_q", &ff)); // this grabs access !!!!!
191: PetscCall(PetscObjectSetName((PetscObject)ff, "weights"));
192: PetscCall(MatMultTranspose(M_p, ff, rho));
193: PetscCall(DMSwarmDestroyGlobalVectorFromField(sw, "w_q", &ff));
195: /* Visualize mesh field */
196: if (a_tid == target) PetscCall(VecViewFromOptions(rho, NULL, "-rho_view"));
197: // output
198: *Mp_out = M_p;
200: PetscFunctionReturn(PETSC_SUCCESS);
201: }
202: static PetscErrorCode maxwellian(PetscInt dim, const PetscReal x[], PetscReal kt_m, PetscReal n, PetscScalar *u)
203: {
204: PetscInt i;
205: PetscReal v2 = 0, theta = 2 * kt_m; /* theta = 2kT/mc^2 */
207: PetscFunctionBegin;
208: /* compute the exponents, v^2 */
209: for (i = 0; i < dim; ++i) v2 += x[i] * x[i];
210: /* evaluate the Maxwellian */
211: u[0] = n * PetscPowReal(PETSC_PI * theta, -1.5) * (PetscExpReal(-v2 / theta)) * 2. * PETSC_PI * x[1]; // radial term for 2D axi-sym.
212: PetscFunctionReturn(PETSC_SUCCESS);
213: }
215: #define MAX_NUM_THRDS 12
216: PetscErrorCode go()
217: {
218: DM dm_t[MAX_NUM_THRDS], sw_t[MAX_NUM_THRDS];
219: PetscFE fe;
220: PetscInt dim = 2, Nc = 1, i, faces[3];
221: PetscInt Np[2] = {10, 10}, Np2[2], field = 0, target = 0, Np_t[MAX_NUM_THRDS];
222: PetscReal moments_0[3], moments_1[3], vol = 1;
223: PetscReal lo[3] = {-5, 0, -5}, hi[3] = {5, 5, 5}, h[3], hp[3], *xx_t[MAX_NUM_THRDS], *yy_t[MAX_NUM_THRDS], *wp_t[MAX_NUM_THRDS];
224: Vec rho_t[MAX_NUM_THRDS], rhs_t[MAX_NUM_THRDS];
225: Mat M_p_t[MAX_NUM_THRDS];
226: #if defined PETSC_USE_LOG
227: PetscLogStage stage;
228: PetscLogEvent swarm_create_ev, solve_ev, solve_loop_ev;
229: #endif
230: #if defined(PETSC_HAVE_OPENMP) && defined(PETSC_HAVE_THREADSAFETY)
231: PetscInt numthreads = PetscNumOMPThreads;
232: #else
233: PetscInt numthreads = 1;
234: #endif
236: PetscFunctionBeginUser;
237: #if defined(PETSC_HAVE_OPENMP) && defined(PETSC_HAVE_THREADSAFETY)
238: PetscCheck(numthreads <= MAX_NUM_THRDS, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Too many threads %" PetscInt_FMT " > %d", numthreads, MAX_NUM_THRDS);
239: PetscCheck(numthreads > 0, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "No threads %" PetscInt_FMT " > %d", numthreads, MAX_NUM_THRDS);
240: #endif
241: if (target >= numthreads) target = numthreads - 1;
242: PetscCall(PetscLogEventRegister("Create Swarm", DM_CLASSID, &swarm_create_ev));
243: PetscCall(PetscLogEventRegister("Single solve", DM_CLASSID, &solve_ev));
244: PetscCall(PetscLogEventRegister("Solve loop", DM_CLASSID, &solve_loop_ev));
245: PetscCall(PetscLogStageRegister("Solve", &stage));
246: i = dim;
247: PetscCall(PetscOptionsGetIntArray(NULL, NULL, "-dm_plex_box_faces", faces, &i, NULL));
248: i = dim;
249: PetscCall(PetscOptionsGetIntArray(NULL, NULL, "-np", Np, &i, NULL));
250: /* Create thread meshes */
251: for (int tid = 0; tid < numthreads; tid++) {
252: // setup mesh dm_t, could use PETSc's Landau create velocity space mesh here to get dm_t[tid]
253: PetscCall(DMCreate(PETSC_COMM_SELF, &dm_t[tid]));
254: PetscCall(DMSetType(dm_t[tid], DMPLEX));
255: PetscCall(DMSetFromOptions(dm_t[tid]));
256: PetscCall(PetscFECreateDefault(PETSC_COMM_SELF, dim, Nc, PETSC_FALSE, "", PETSC_DECIDE, &fe));
257: PetscCall(PetscFESetFromOptions(fe));
258: PetscCall(PetscObjectSetName((PetscObject)fe, "fe"));
259: PetscCall(DMSetField(dm_t[tid], field, NULL, (PetscObject)fe));
260: PetscCall(DMCreateDS(dm_t[tid]));
261: PetscCall(PetscFEDestroy(&fe));
262: // helper vectors
263: PetscCall(DMCreateGlobalVector(dm_t[tid], &rho_t[tid]));
264: PetscCall(DMCreateGlobalVector(dm_t[tid], &rhs_t[tid]));
265: // this mimics application code
266: PetscCall(DMGetBoundingBox(dm_t[tid], lo, hi));
267: if (tid == target) {
268: PetscCall(DMViewFromOptions(dm_t[tid], NULL, "-dm_view"));
269: for (i = 0, vol = 1; i < dim; i++) {
270: h[i] = (hi[i] - lo[i]) / faces[i];
271: hp[i] = (hi[i] - lo[i]) / Np[i];
272: vol *= (hi[i] - lo[i]);
273: PetscCall(PetscInfo(dm_t[tid], " lo = %g hi = %g n = %" PetscInt_FMT " h = %g hp = %g\n", (double)lo[i], (double)hi[i], faces[i], (double)h[i], (double)hp[i]));
274: (void)h[i];
275: }
276: }
277: }
278: // prepare particle data for problems. This mimics application code
279: PetscCall(PetscLogEventBegin(swarm_create_ev, 0, 0, 0, 0));
280: Np2[0] = Np[0];
281: Np2[1] = Np[1];
282: for (int tid = 0; tid < numthreads; tid++) { // change size of particle list a little
283: Np_t[tid] = Np2[0] * Np2[1];
284: PetscCall(PetscMalloc3(Np_t[tid], &xx_t[tid], Np_t[tid], &yy_t[tid], Np_t[tid], &wp_t[tid]));
285: if (tid == target) moments_0[0] = moments_0[1] = moments_0[2] = 0;
286: for (int pi = 0, pp = 0; pi < Np2[0]; pi++) {
287: for (int pj = 0; pj < Np2[1]; pj++, pp++) {
288: xx_t[tid][pp] = lo[0] + hp[0] / 2. + pi * hp[0];
289: yy_t[tid][pp] = lo[1] + hp[1] / 2. + pj * hp[1];
290: {
291: PetscReal x[] = {xx_t[tid][pp], yy_t[tid][pp]};
292: PetscCall(maxwellian(2, x, 1.0, vol / (PetscReal)Np_t[tid], &wp_t[tid][pp]));
293: }
294: if (tid == target) { //energy_0 += wp_t[tid][pp]*(PetscSqr(xx_t[tid][pp])+PetscSqr(yy_t[tid][pp]));
295: moments_0[0] += wp_t[tid][pp];
296: moments_0[1] += wp_t[tid][pp] * xx_t[tid][pp]; // x-momentum
297: moments_0[2] += wp_t[tid][pp] * (PetscSqr(xx_t[tid][pp]) + PetscSqr(yy_t[tid][pp]));
298: }
299: }
300: }
301: Np2[0]++;
302: Np2[1]++;
303: }
304: PetscCall(PetscLogEventEnd(swarm_create_ev, 0, 0, 0, 0));
305: PetscCall(PetscLogEventBegin(solve_ev, 0, 0, 0, 0));
306: /* Create particle swarm */
307: PetscPragmaOMP(parallel for)
308: for (int tid = 0; tid < numthreads; tid++) PetscCallAbort(PETSC_COMM_SELF, createSwarm(dm_t[tid], &sw_t[tid]));
309: PetscPragmaOMP(parallel for)
310: for (int tid = 0; tid < numthreads; tid++) PetscCallAbort(PETSC_COMM_SELF, particlesToGrid(dm_t[tid], sw_t[tid], Np_t[tid], tid, dim, target, xx_t[tid], yy_t[tid], wp_t[tid], rho_t[tid], &M_p_t[tid]));
311: /* Project field to particles */
312: /* This gives f_p = M_p^+ M f */
313: PetscPragmaOMP(parallel for)
314: for (int tid = 0; tid < numthreads; tid++) PetscCallAbort(PETSC_COMM_SELF, VecCopy(rho_t[tid], rhs_t[tid])); /* Identity: M^1 M rho */
315: PetscPragmaOMP(parallel for)
316: for (int tid = 0; tid < numthreads; tid++) PetscCallAbort(PETSC_COMM_SELF, gridToParticles(dm_t[tid], sw_t[tid], (tid == target) ? moments_1 : NULL, rhs_t[tid], M_p_t[tid]));
317: /* Cleanup */
318: for (int tid = 0; tid < numthreads; tid++) {
319: PetscCall(MatDestroy(&M_p_t[tid]));
320: PetscCall(DMDestroy(&sw_t[tid]));
321: }
322: PetscCall(PetscLogEventEnd(solve_ev, 0, 0, 0, 0));
323: //
324: PetscCall(PetscPrintf(PETSC_COMM_SELF, "Total number density: %20.12e (%20.12e); x-momentum = %g (%g); energy = %g error = %e, %" PetscInt_FMT " particles. Use %" PetscInt_FMT " threads\n", (double)moments_1[0], (double)moments_0[0], (double)moments_1[1], (double)moments_0[1], (double)moments_1[2], (double)((moments_1[2] - moments_0[2]) / moments_0[2]), Np[0] * Np[1], numthreads));
325: /* Cleanup */
326: for (int tid = 0; tid < numthreads; tid++) {
327: PetscCall(VecDestroy(&rho_t[tid]));
328: PetscCall(VecDestroy(&rhs_t[tid]));
329: PetscCall(DMDestroy(&dm_t[tid]));
330: PetscCall(PetscFree3(xx_t[tid], yy_t[tid], wp_t[tid]));
331: }
332: PetscFunctionReturn(PETSC_SUCCESS);
333: }
335: int main(int argc, char **argv)
336: {
337: PetscFunctionBeginUser;
338: PetscCall(PetscInitialize(&argc, &argv, NULL, help));
339: PetscCall(go());
340: PetscCall(PetscFinalize());
341: return 0;
342: }
344: /*TEST
346: build:
347: requires: !complex
349: test:
350: suffix: 0
351: requires: double triangle
352: args: -dm_plex_simplex 0 -dm_plex_box_faces 8,4 -np 10,10 -dm_plex_box_lower -2.0,0.0 -dm_plex_box_upper 2.0,2.0 -petscspace_degree 2 -ftop_ksp_type lsqr -ftop_pc_type none -dm_view -ftop_ksp_converged_reason -ftop_ksp_rtol 1.e-14
353: filter: grep -v DM_ | grep -v atomic
355: test:
356: suffix: 1
357: requires: double triangle
358: args: -dm_plex_simplex 0 -dm_plex_box_faces 8,4 -np 10,10 -dm_plex_box_lower -2.0,0.0 -dm_plex_box_upper 2.0,2.0 -petscspace_degree 2 -dm_plex_hash_location -ftop_ksp_type lsqr -ftop_pc_type bjacobi -ftop_sub_pc_type lu -ftop_sub_pc_factor_shift_type nonzero -dm_view -ftop_ksp_converged_reason -ftop_ksp_rtol 1.e-14
359: filter: grep -v DM_ | grep -v atomic
361: test:
362: suffix: 2
363: requires: double triangle
364: args: -dm_plex_simplex 0 -dm_plex_box_faces 8,4 -np 15,15 -dm_plex_box_lower -2.0,0.0 -dm_plex_box_upper 2.0,2.0 -petscspace_degree 2 -dm_plex_hash_location -ftop_ksp_type cg -ftop_pc_type jacobi -dm_view -ftop_ksp_converged_reason -ftop_ksp_rtol 1.e-14
365: filter: grep -v DM_ | grep -v atomic
367: TEST*/