Actual source code: petscpctypes.h
1: #ifndef PETSCPCTYPES_H
2: #define PETSCPCTYPES_H
4: /* SUBMANSEC = PC */
6: /*S
7: PC - Abstract PETSc object that manages all preconditioners including direct solvers such as `PCLU`
9: Level: beginner
11: .seealso: [](doc_linsolve), [](sec_pc), `PCCreate()`, `PCSetType()`, `PCType`
12: S*/
13: typedef struct _p_PC *PC;
15: /*J
16: PCType - String with the name of a PETSc preconditioner
18: Level: beginner
20: Note:
21: `PCRegister()` is used to register preconditioners that are then accessible via `PCSetType()`
23: .seealso: [](doc_linsolve), [](sec_pc), `PCSetType()`, `PC`, `PCCreate()`, `PCRegister()`, `PCSetFromOptions()`, `PCLU`, `PCJACOBI`, `PCBJACOBI`
24: J*/
25: typedef const char *PCType;
26: #define PCNONE "none"
27: #define PCJACOBI "jacobi"
28: #define PCSOR "sor"
29: #define PCLU "lu"
30: #define PCQR "qr"
31: #define PCSHELL "shell"
32: #define PCAMGX "amgx"
33: #define PCBJACOBI "bjacobi"
34: #define PCMG "mg"
35: #define PCEISENSTAT "eisenstat"
36: #define PCILU "ilu"
37: #define PCICC "icc"
38: #define PCASM "asm"
39: #define PCGASM "gasm"
40: #define PCKSP "ksp"
41: #define PCBJKOKKOS "bjkokkos"
42: #define PCCOMPOSITE "composite"
43: #define PCREDUNDANT "redundant"
44: #define PCSPAI "spai"
45: #define PCNN "nn"
46: #define PCCHOLESKY "cholesky"
47: #define PCPBJACOBI "pbjacobi"
48: #define PCVPBJACOBI "vpbjacobi"
49: #define PCMAT "mat"
50: #define PCHYPRE "hypre"
51: #define PCPARMS "parms"
52: #define PCFIELDSPLIT "fieldsplit"
53: #define PCTFS "tfs"
54: #define PCML "ml"
55: #define PCGALERKIN "galerkin"
56: #define PCEXOTIC "exotic"
57: #define PCCP "cp"
58: #define PCBFBT "bfbt"
59: #define PCLSC "lsc"
60: #define PCPYTHON "python"
61: #define PCPFMG "pfmg"
62: #define PCSMG "smg"
63: #define PCSYSPFMG "syspfmg"
64: #define PCREDISTRIBUTE "redistribute"
65: #define PCSVD "svd"
66: #define PCGAMG "gamg"
67: #define PCCHOWILUVIENNACL "chowiluviennacl"
68: #define PCROWSCALINGVIENNACL "rowscalingviennacl"
69: #define PCSAVIENNACL "saviennacl"
70: #define PCBDDC "bddc"
71: #define PCKACZMARZ "kaczmarz"
72: #define PCTELESCOPE "telescope"
73: #define PCPATCH "patch"
74: #define PCLMVM "lmvm"
75: #define PCHMG "hmg"
76: #define PCDEFLATION "deflation"
77: #define PCHPDDM "hpddm"
78: #define PCH2OPUS "h2opus"
79: #define PCMPI "mpi"
81: /*E
82: PCSide - If the preconditioner is to be applied to the left, right
83: or symmetrically around the operator.
85: Values:
86: + `PC_LEFT` - applied after the operator is applied
87: . `PC_RIGHT` - applied before the operator is applied
88: - `PC_SYMMETRIC` - a portion of the preconditioner is applied before the operator and the transpose of this portion is applied after the operator is applied.
90: Level: beginner
92: Note:
93: Certain `KSPType` support only a subset of `PCSide` values
95: .seealso: [](sec_pc), `PC`, `KSPSetPCSide()`
96: E*/
97: typedef enum {
98: PC_SIDE_DEFAULT = -1,
99: PC_LEFT,
100: PC_RIGHT,
101: PC_SYMMETRIC
102: } PCSide;
103: #define PC_SIDE_MAX (PC_SYMMETRIC + 1)
105: /*E
106: PCRichardsonConvergedReason - reason a `PCRICHARDSON` `PCApplyRichardson()` method terminated
108: Level: advanced
110: Developer Note:
111: This must match `include/petsc/finclude/petscpc.h` and the `KSPConvergedReason` values in `include/petscksp.h
113: .seealso: [](sec_pc), `PCRICHARDSON`, `PC`, `PCApplyRichardson()`
114: E*/
115: typedef enum {
116: PCRICHARDSON_CONVERGED_RTOL = 2,
117: PCRICHARDSON_CONVERGED_ATOL = 3,
118: PCRICHARDSON_CONVERGED_ITS = 4,
119: PCRICHARDSON_DIVERGED_DTOL = -4
120: } PCRichardsonConvergedReason;
122: /*E
123: PCJacobiType - What elements of the matrix are used to form the Jacobi preconditioner
125: Values:
126: + `PC_JACOBI_DIAGONAL` - use the diagonal entry, if it is zero use one
127: . `PC_JACOBI_ROWMAX` - use the maximum absolute value in the row
128: - `PC_JACOBI_ROWSUM` - use the sum of the values in the row (not the absolute values)
130: Level: intermediate
132: .seealso: [](sec_pc), `PCJACOBI`, `PC`
133: E*/
134: typedef enum {
135: PC_JACOBI_DIAGONAL,
136: PC_JACOBI_ROWMAX,
137: PC_JACOBI_ROWSUM
138: } PCJacobiType;
140: /*E
141: PCASMType - Type of additive Schwarz method to use
143: Values:
144: + `PC_ASM_BASIC` - Symmetric version where residuals from the ghost points are used
145: and computed values in ghost regions are added together.
146: Classical standard additive Schwarz.
147: . `PC_ASM_RESTRICT` - Residuals from ghost points are used but computed values in ghost
148: region are discarded.
149: Default.
150: . `PC_ASM_INTERPOLATE` - Residuals from ghost points are not used, computed values in ghost
151: region are added back in.
152: - `PC_ASM_NONE` - Residuals from ghost points are not used, computed ghost values are
153: discarded.
154: Not very good.
156: Level: beginner
158: .seealso: [](sec_pc), `PC`, `PCASM`, `PCASMSetType()`, `PCGASMType`
159: E*/
160: typedef enum {
161: PC_ASM_BASIC = 3,
162: PC_ASM_RESTRICT = 1,
163: PC_ASM_INTERPOLATE = 2,
164: PC_ASM_NONE = 0
165: } PCASMType;
167: /*E
168: PCGASMType - Type of generalized additive Schwarz method to use (differs from `PCASM` in allowing multiple processors per subdomain).
170: Values:
171: + `PC_GASM_BASIC` - Symmetric version where the full from the outer subdomain is used, and the resulting correction is applied
172: over the outer subdomains. As a result, points in the overlap will receive the sum of the corrections
173: from neighboring subdomains.
174: Classical standard additive Schwarz.
175: . `PC_GASM_RESTRICT` - Residual from the outer subdomain is used but the correction is restricted to the inner subdomain only
176: (i.e., zeroed out over the overlap portion of the outer subdomain before being applied). As a result,
177: each point will receive a correction only from the unique inner subdomain containing it (nonoverlapping covering
178: assumption).
179: Default.
180: . `PC_GASM_INTERPOLATE` - Residual is zeroed out over the overlap portion of the outer subdomain, but the resulting correction is
181: applied over the outer subdomain. As a result, points in the overlap will receive the sum of the corrections
182: from neighboring subdomains.
183: - `PC_GASM_NONE` - Residuals and corrections are zeroed out outside the local subdomains.
184: Not very good.
186: Level: beginner
188: Note:
189: Each subdomain has nested inner and outer parts. The inner subdomains are assumed to form a non-overlapping covering of the computational
190: domain, while the outer subdomains contain the inner subdomains and overlap with each other. This preconditioner will compute
191: a subdomain correction over each *outer* subdomain from a residual computed there, but its different variants will differ in
192: (a) how the outer subdomain residual is computed, and (b) how the outer subdomain correction is computed.
194: .seealso: [](sec_pc), `PCGASM`, `PCASM`, `PC`, `PCGASMSetType()`, `PCASMType`
195: E*/
196: typedef enum {
197: PC_GASM_BASIC = 3,
198: PC_GASM_RESTRICT = 1,
199: PC_GASM_INTERPOLATE = 2,
200: PC_GASM_NONE = 0
201: } PCGASMType;
203: /*E
204: PCCompositeType - Determines how two or more preconditioner are composed with the `PCType` of `PCCOMPOSITE`
206: Values:
207: + `PC_COMPOSITE_ADDITIVE` - results from application of all preconditioners are added together
208: . `PC_COMPOSITE_MULTIPLICATIVE` - preconditioners are applied sequentially to the residual freshly
209: computed after the previous preconditioner application
210: . `PC_COMPOSITE_SYMMETRIC_MULTIPLICATIVE` - preconditioners are applied sequentially to the residual freshly
211: computed from first preconditioner to last and then back (Use only for symmetric matrices and preconditioners)
212: . `PC_COMPOSITE_SPECIAL` - This is very special for a matrix of the form alpha I + R + S
213: where first preconditioner is built from alpha I + S and second from
214: alpha I + R
215: . `PC_COMPOSITE_SCHUR` - composes the Schur complement of the matrix from two blocks, see `PCFIELDSPLIT`
216: - `PC_COMPOSITE_GKB` - the generalized Golub-Kahan bidiagonalization preconditioner, see `PCFIELDSPLIT`
218: Level: beginner
220: .seealso: [](sec_pc), `PCCOMPOSITE`, `PCFIELDSPLIT`, `PC`, `PCCompositeSetType()`
221: E*/
222: typedef enum {
223: PC_COMPOSITE_ADDITIVE,
224: PC_COMPOSITE_MULTIPLICATIVE,
225: PC_COMPOSITE_SYMMETRIC_MULTIPLICATIVE,
226: PC_COMPOSITE_SPECIAL,
227: PC_COMPOSITE_SCHUR,
228: PC_COMPOSITE_GKB
229: } PCCompositeType;
231: /*E
232: PCFieldSplitSchurPreType - Determines how to precondition a Schur complement
234: Values:
235: + `PC_FIELDSPLIT_SCHUR_PRE_SELF` - the preconditioner for the Schur complement is generated from the symbolic representation of the Schur complement matrix:
236: The only preconditioner that currently works with this symbolic representation matrix object is `PCLSC`
237: . `PC_FIELDSPLIT_SCHUR_PRE_SELFP` - the preconditioning for the Schur complement is generated from an explicitly-assembled approximation Sp = A11 - A10 inv(diag(A00)) A01
238: This is only a good preconditioner when diag(A00) is a good preconditioner for A00. Optionally, A00 can be
239: lumped before extracting the diagonal using the additional option `-fieldsplit_1_mat_schur_complement_ainv_type lump`
240: . `PC_FIELDSPLIT_SCHUR_PRE_A11` - the preconditioner for the Schur complement is generated from the block diagonal part of the matrix used to define the preconditioner,
241: associated with the Schur complement (i.e. A11), not the Schur complement matrix
242: . `PC_FIELDSPLIT_SCHUR_PRE_USER` - the preconditioner for the Schur complement is generated from the user provided matrix (pre argument
243: to this function).
244: - `PC_FIELDSPLIT_SCHUR_PRE_FULL` - the preconditioner for the Schur complement is generated from the exact Schur complement matrix representation
245: computed internally by `PCFIELDSPLIT` (this is expensive) useful mostly as a test that the Schur complement approach can work for your problem
247: Level: intermediate
249: .seealso: [](sec_pc), `PCFIELDSPLIT`, `PCFieldSplitSetSchurPre()`, `PC`
250: E*/
251: typedef enum {
252: PC_FIELDSPLIT_SCHUR_PRE_SELF,
253: PC_FIELDSPLIT_SCHUR_PRE_SELFP,
254: PC_FIELDSPLIT_SCHUR_PRE_A11,
255: PC_FIELDSPLIT_SCHUR_PRE_USER,
256: PC_FIELDSPLIT_SCHUR_PRE_FULL
257: } PCFieldSplitSchurPreType;
259: /*E
260: PCFieldSplitSchurFactType - determines which off-diagonal parts of the approximate block factorization to use
262: Values:
263: + `PC_FIELDSPLIT_SCHUR_FACT_DIAG` - the preconditioner is solving `D`
264: . `PC_FIELDSPLIT_SCHUR_FACT_LOWER` - the preconditioner is solving `L D`
265: . `PC_FIELDSPLIT_SCHUR_FACT_UPPER` - the preconditioner is solving `D U`
266: - `PC_FIELDSPLIT_SCHUR_FACT_FULL` - the preconditioner is solving `L(D U)`
268: where the matrix is factorized as
269: .vb
270: (A B) = (1 0) (A 0) (1 Ainv*B) = L D U
271: (C E) (C*Ainv 1) (0 S) (0 1)
272: .ve
274: Level: intermediate
276: .seealso: [](sec_pc), `PCFIELDSPLIT`, `PCFieldSplitSetSchurFactType()`, `PC`
277: E*/
278: typedef enum {
279: PC_FIELDSPLIT_SCHUR_FACT_DIAG,
280: PC_FIELDSPLIT_SCHUR_FACT_LOWER,
281: PC_FIELDSPLIT_SCHUR_FACT_UPPER,
282: PC_FIELDSPLIT_SCHUR_FACT_FULL
283: } PCFieldSplitSchurFactType;
285: /*E
286: PCPARMSGlobalType - Determines the global preconditioner method in `PCPARMS`
288: Level: intermediate
290: .seealso: [](sec_pc), `PCPARMS`, `PCPARMSSetGlobal()`, `PC`
291: E*/
292: typedef enum {
293: PC_PARMS_GLOBAL_RAS,
294: PC_PARMS_GLOBAL_SCHUR,
295: PC_PARMS_GLOBAL_BJ
296: } PCPARMSGlobalType;
298: /*E
299: PCPARMSLocalType - Determines the local preconditioner method in `PCPARMS`
301: Level: intermediate
303: .seealso: [](sec_pc), `PCPARMS`, `PCPARMSSetLocal()`, `PC`
304: E*/
305: typedef enum {
306: PC_PARMS_LOCAL_ILU0,
307: PC_PARMS_LOCAL_ILUK,
308: PC_PARMS_LOCAL_ILUT,
309: PC_PARMS_LOCAL_ARMS
310: } PCPARMSLocalType;
312: /*J
313: PCGAMGType - type of generalized algebraic multigrid `PCGAMG` method
315: Values:
316: + `PCGAMGAGG` - (the default) smoothed aggregation algorithm, robust, very well tested
317: . `PCGAMGGEO` - geometric coarsening, uses mesh generator to produce coarser meshes, limited to triangles, not well tested
318: - `PCGAMGCLASSICAL` - classical algebraic multigrid preconditioner, incomplete, poorly tested
320: Level: intermediate
322: .seealso: [](sec_pc), `PCGAMG`, `PCMG`, `PC`, `PCSetType()`, `PCGAMGSetThreshold()`, `PCGAMGSetThreshold()`, `PCGAMGSetReuseInterpolation()`
323: J*/
324: typedef const char *PCGAMGType;
325: #define PCGAMGAGG "agg"
326: #define PCGAMGGEO "geo"
327: #define PCGAMGCLASSICAL "classical"
329: typedef const char *PCGAMGClassicalType;
330: #define PCGAMGCLASSICALDIRECT "direct"
331: #define PCGAMGCLASSICALSTANDARD "standard"
333: /*E
334: PCMGType - Determines the type of multigrid method that is run.
336: Values:
337: + `PC_MG_MULTIPLICATIVE` (default) - traditional V or W cycle as determined by `PCMGSetCycleType()`
338: . `PC_MG_ADDITIVE` - the additive multigrid preconditioner where all levels are
339: smoothed before updating the residual. This only uses the
340: down smoother, in the preconditioner the upper smoother is ignored
341: . `PC_MG_FULL` - same as multiplicative except one also performs grid sequencing,
342: that is starts on the coarsest grid, performs a cycle, interpolates
343: to the next, performs a cycle etc. This is much like the F-cycle presented in "Multigrid" by Trottenberg, Oosterlee, Schuller page 49, but that
344: algorithm supports smoothing on before the restriction on each level in the initial restriction to the coarsest stage. In addition that algorithm
345: calls the V-cycle only on the coarser level and has a post-smoother instead.
346: - `PC_MG_KASKADE` - like full multigrid except one never goes back to a coarser level from a finer
348: Level: beginner
350: .seealso: [](sec_pc), `PCMG`, `PC`, `PCMGSetType()`, `PCMGSetCycleType()`, `PCMGSetCycleTypeOnLevel()`
351: E*/
352: typedef enum {
353: PC_MG_MULTIPLICATIVE,
354: PC_MG_ADDITIVE,
355: PC_MG_FULL,
356: PC_MG_KASKADE
357: } PCMGType;
358: #define PC_MG_CASCADE PC_MG_KASKADE;
360: /*E
361: PCMGCycleType - Use V-cycle or W-cycle
363: Values:
364: + `PC_MG_V_CYCLE` - use the v cycle
365: - `PC_MG_W_CYCLE` - use the w cycle
367: Level: beginner
369: .seealso: [](sec_pc), `PCMG`, `PC`, `PCMGSetCycleType()`
370: E*/
371: typedef enum {
372: PC_MG_CYCLE_V = 1,
373: PC_MG_CYCLE_W = 2
374: } PCMGCycleType;
376: /*E
377: PCMGalerkinType - Determines if the coarse grid operators are computed via the Galerkin process
379: Values:
380: + `PC_MG_GALERKIN_PMAT` - computes the pmat (matrix from which the preconditioner is built) via the Galerkin process from the finest grid
381: . `PC_MG_GALERKIN_MAT` - computes the mat (matrix used to apply the operator) via the Galerkin process from the finest grid
382: . `PC_MG_GALERKIN_BOTH` - computes both the mat and pmat via the Galerkin process (if pmat == mat the construction is only done once
383: - `PC_MG_GALERKIN_NONE` - neither operator is computed via the Galerkin process, the user must provide the operator
385: Level: beginner
387: Note:
388: Users should never set `PC_MG_GALERKIN_EXTERNAL`, it is used by `PCHYPRE` and `PCML`
390: .seealso: [](sec_pc), `PCMG`, `PC`, `PCMGSetCycleType()`
391: E*/
392: typedef enum {
393: PC_MG_GALERKIN_BOTH,
394: PC_MG_GALERKIN_PMAT,
395: PC_MG_GALERKIN_MAT,
396: PC_MG_GALERKIN_NONE,
397: PC_MG_GALERKIN_EXTERNAL
398: } PCMGGalerkinType;
400: /*E
401: PCExoticType - Face based or wirebasket based coarse grid space
403: Level: beginner
405: .seealso: [](sec_pc), `PCExoticSetType()`, `PCEXOTIC`
406: E*/
407: typedef enum {
408: PC_EXOTIC_FACE,
409: PC_EXOTIC_WIREBASKET
410: } PCExoticType;
412: /*E
413: PCBDDCInterfaceExtType - Defines how interface balancing is extended into the interior of subdomains.
415: Values:
416: + `PC_BDDC_INTERFACE_EXT_DIRICHLET` - solves Dirichlet interior problem; this is the standard BDDC algorithm
417: - `PC_BDDC_INTERFACE_EXT_LUMP` - skips interior solve; sometimes called M_1 and associated with "lumped FETI-DP"
419: Level: intermediate
421: .seealso: [](sec_pc), `PCBDDC`, `PC`
422: E*/
423: typedef enum {
424: PC_BDDC_INTERFACE_EXT_DIRICHLET,
425: PC_BDDC_INTERFACE_EXT_LUMP
426: } PCBDDCInterfaceExtType;
428: /*E
429: PCMGCoarseSpaceType - Function space for coarse space for adaptive interpolation
431: Level: beginner
433: .seealso: [](sec_pc), `PCMGSetAdaptCoarseSpaceType()`, `PCMG`, `PC`
434: E*/
435: typedef enum {
436: PCMG_ADAPT_NONE,
437: PCMG_ADAPT_POLYNOMIAL,
438: PCMG_ADAPT_HARMONIC,
439: PCMG_ADAPT_EIGENVECTOR,
440: PCMG_ADAPT_GENERALIZED_EIGENVECTOR,
441: PCMG_ADAPT_GDSW
442: } PCMGCoarseSpaceType;
444: /*E
445: PCPatchConstructType - The algorithm used to construct patches for the `PCPATCH` preconditioner
447: Level: beginner
449: .seealso: [](sec_pc), `PCPatchSetConstructType()`, `PCPATCH`, `PC`
450: E*/
451: typedef enum {
452: PC_PATCH_STAR,
453: PC_PATCH_VANKA,
454: PC_PATCH_PARDECOMP,
455: PC_PATCH_USER,
456: PC_PATCH_PYTHON
457: } PCPatchConstructType;
459: /*E
460: PCDeflationSpaceType - Type of deflation
462: Values:
463: + `PC_DEFLATION_SPACE_HAAR` - directly assembled based on Haar (db2) wavelet with overflowed filter cuted-off
464: . `PC_DEFLATION_SPACE_DB2` - `MATCOMPOSITE` of 1-lvl matices based on db2 (2 coefficient Daubechies / Haar wavelet)
465: . `PC_DEFLATION_SPACE_DB4` - same as above, but with db4 (4 coefficient Daubechies)
466: . `PC_DEFLATION_SPACE_DB8` - same as above, but with db8 (8 coefficient Daubechies)
467: . `PC_DEFLATION_SPACE_DB16` - same as above, but with db16 (16 coefficient Daubechies)
468: . `PC_DEFLATION_SPACE_BIORTH22` - same as above, but with biorthogonal 2.2 (6 coefficients)
469: . `PC_DEFLATION_SPACE_MEYER` - same as above, but with Meyer/FIR (62 coefficients)
470: . `PC_DEFLATION_SPACE_AGGREGATION` - aggregates local indices (given by operator matrix distribution) into a subdomain
471: - `PC_DEFLATION_SPACE_USER` - indicates space set by user
473: Level: intermediate
475: Note:
476: Wavelet-based space (except Haar) can be used in multilevel deflation.
478: .seealso: [](sec_pc), `PCDeflationSetSpaceToCompute()`, `PCDEFLATION`, `PC`
479: E*/
480: typedef enum {
481: PC_DEFLATION_SPACE_HAAR,
482: PC_DEFLATION_SPACE_DB2,
483: PC_DEFLATION_SPACE_DB4,
484: PC_DEFLATION_SPACE_DB8,
485: PC_DEFLATION_SPACE_DB16,
486: PC_DEFLATION_SPACE_BIORTH22,
487: PC_DEFLATION_SPACE_MEYER,
488: PC_DEFLATION_SPACE_AGGREGATION,
489: PC_DEFLATION_SPACE_USER
490: } PCDeflationSpaceType;
492: /*E
493: PCHPDDMCoarseCorrectionType - Type of coarse correction used by `PCHPDDM`
495: Values:
496: + `PC_HPDDM_COARSE_CORRECTION_DEFLATED` (default) - eq. (1) in `PCHPDDMShellApply()`
497: . `PC_HPDDM_COARSE_CORRECTION_ADDITIVE` - eq. (2)
498: - `PC_HPDDM_COARSE_CORRECTION_BALANCED` - eq. (3)
500: Level: intermediate
502: .seealso: [](sec_pc), `PCHPDDM`, `PC`, `PCSetType()`, `PCHPDDMShellApply()`
503: E*/
504: typedef enum {
505: PC_HPDDM_COARSE_CORRECTION_DEFLATED,
506: PC_HPDDM_COARSE_CORRECTION_ADDITIVE,
507: PC_HPDDM_COARSE_CORRECTION_BALANCED
508: } PCHPDDMCoarseCorrectionType;
510: /*E
511: PCFailedReason - indicates type of `PC` failure
513: Level: beginner
515: Developer Note:
516: Any additions/changes here MUST also be made in `include/petsc/finclude/petscpc.h`
518: .seealso: [](sec_pc), `PC`
519: E*/
520: typedef enum {
521: PC_SETUP_ERROR = -1,
522: PC_NOERROR,
523: PC_FACTOR_STRUCT_ZEROPIVOT,
524: PC_FACTOR_NUMERIC_ZEROPIVOT,
525: PC_FACTOR_OUTMEMORY,
526: PC_FACTOR_OTHER,
527: PC_INCONSISTENT_RHS,
528: PC_SUBPC_ERROR
529: } PCFailedReason;
531: /*E
532: PCGAMGLayoutType - Layout for reduced grids
534: Level: intermediate
536: Developer Note:
537: Any additions/changes here MUST also be made in `include/petsc/finclude/petscpc.h`
539: .seealso: [](sec_pc), `PCGAMG`, `PC`, `PCGAMGSetCoarseGridLayoutType()`
540: E*/
541: typedef enum {
542: PCGAMG_LAYOUT_COMPACT,
543: PCGAMG_LAYOUT_SPREAD
544: } PCGAMGLayoutType;
546: #endif