Actual source code: gcreate.c
1: #include <petsc/private/matimpl.h>
3: PETSC_INTERN PetscErrorCode MatSetBlockSizes_Default(Mat mat, PetscInt rbs, PetscInt cbs)
4: {
5: PetscFunctionBegin;
6: if (!mat->preallocated) PetscFunctionReturn(PETSC_SUCCESS);
7: PetscCheck(mat->rmap->bs <= 0 || mat->rmap->bs == rbs, PetscObjectComm((PetscObject)mat), PETSC_ERR_SUP, "Cannot change row block size %" PetscInt_FMT " to %" PetscInt_FMT, mat->rmap->bs, rbs);
8: PetscCheck(mat->cmap->bs <= 0 || mat->cmap->bs == cbs, PetscObjectComm((PetscObject)mat), PETSC_ERR_SUP, "Cannot change column block size %" PetscInt_FMT " to %" PetscInt_FMT, mat->cmap->bs, cbs);
9: PetscFunctionReturn(PETSC_SUCCESS);
10: }
12: PETSC_INTERN PetscErrorCode MatShift_Basic(Mat Y, PetscScalar a)
13: {
14: PetscInt i, start, end;
15: PetscScalar alpha = a;
16: PetscBool prevoption;
18: PetscFunctionBegin;
19: PetscCall(MatGetOption(Y, MAT_NO_OFF_PROC_ENTRIES, &prevoption));
20: PetscCall(MatSetOption(Y, MAT_NO_OFF_PROC_ENTRIES, PETSC_TRUE));
21: PetscCall(MatGetOwnershipRange(Y, &start, &end));
22: for (i = start; i < end; i++) {
23: if (i < Y->cmap->N) PetscCall(MatSetValues(Y, 1, &i, 1, &i, &alpha, ADD_VALUES));
24: }
25: PetscCall(MatAssemblyBegin(Y, MAT_FINAL_ASSEMBLY));
26: PetscCall(MatAssemblyEnd(Y, MAT_FINAL_ASSEMBLY));
27: PetscCall(MatSetOption(Y, MAT_NO_OFF_PROC_ENTRIES, prevoption));
28: PetscFunctionReturn(PETSC_SUCCESS);
29: }
31: /*@
32: MatCreate - Creates a matrix where the type is determined
33: from either a call to `MatSetType()` or from the options database
34: with a call to `MatSetFromOptions()`. The default matrix type is
35: `MATAIJ`, using the routines `MatCreateSeqAIJ()` or `MatCreateAIJ()`
36: if you do not set a type in the options database. If you never
37: call `MatSetType()` or `MatSetFromOptions()` it will generate an
38: error when you try to use the matrix.
40: Collective
42: Input Parameter:
43: . comm - MPI communicator
45: Output Parameter:
46: . A - the matrix
48: Options Database Keys:
49: + -mat_type seqaij - `MATSEQAIJ` type, uses `MatCreateSeqAIJ()`
50: . -mat_type mpiaij - `MATMPIAIJ` type, uses `MatCreateAIJ()`
51: . -mat_type seqdense - `MATSEQDENSE`, uses `MatCreateSeqDense()`
52: . -mat_type mpidense - `MATMPIDENSE` type, uses `MatCreateDense()`
53: . -mat_type seqbaij - `MATSEQBAIJ` type, uses `MatCreateSeqBAIJ()`
54: - -mat_type mpibaij - `MATMPIBAIJ` type, uses `MatCreateBAIJ()`
56: See the manpages for particular formats (e.g., `MATSEQAIJ`)
57: for additional format-specific options.
59: Level: beginner
61: .seealso: [](ch_matrices), `Mat`, `MatCreateSeqAIJ()`, `MatCreateAIJ()`,
62: `MatCreateSeqDense()`, `MatCreateDense()`,
63: `MatCreateSeqBAIJ()`, `MatCreateBAIJ()`,
64: `MatCreateSeqSBAIJ()`, `MatCreateSBAIJ()`,
65: `MatConvert()`
66: @*/
67: PetscErrorCode MatCreate(MPI_Comm comm, Mat *A)
68: {
69: Mat B;
71: PetscFunctionBegin;
74: *A = NULL;
75: PetscCall(MatInitializePackage());
77: PetscCall(PetscHeaderCreate(B, MAT_CLASSID, "Mat", "Matrix", "Mat", comm, MatDestroy, MatView));
78: PetscCall(PetscLayoutCreate(comm, &B->rmap));
79: PetscCall(PetscLayoutCreate(comm, &B->cmap));
80: PetscCall(PetscStrallocpy(VECSTANDARD, &B->defaultvectype));
81: PetscCall(PetscStrallocpy(PETSCRANDER48, &B->defaultrandtype));
83: B->symmetric = PETSC_BOOL3_UNKNOWN;
84: B->hermitian = PETSC_BOOL3_UNKNOWN;
85: B->structurally_symmetric = PETSC_BOOL3_UNKNOWN;
86: B->spd = PETSC_BOOL3_UNKNOWN;
87: B->symmetry_eternal = PETSC_FALSE;
88: B->structural_symmetry_eternal = PETSC_FALSE;
90: B->congruentlayouts = PETSC_DECIDE;
91: B->preallocated = PETSC_FALSE;
92: #if defined(PETSC_HAVE_DEVICE)
93: B->boundtocpu = PETSC_TRUE;
94: #endif
95: *A = B;
96: PetscFunctionReturn(PETSC_SUCCESS);
97: }
99: /*@
100: MatSetErrorIfFailure - Causes `Mat` to generate an immediate error, for example a zero pivot, is detected.
102: Logically Collective
104: Input Parameters:
105: + mat - matrix obtained from `MatCreate()`
106: - flg - `PETSC_TRUE` indicates you want the error generated
108: Level: advanced
110: Note:
111: If this flag is not set then the matrix operation will note the error and continue. The error may cause a later `PC` or `KSP` error
112: or result in a `KSPConvergedReason` indicating the method did not converge.
114: .seealso: [](ch_matrices), `Mat`, `PCSetErrorIfFailure()`, `KSPConvergedReason`, `SNESConvergedReason`
115: @*/
116: PetscErrorCode MatSetErrorIfFailure(Mat mat, PetscBool flg)
117: {
118: PetscFunctionBegin;
121: mat->erroriffailure = flg;
122: PetscFunctionReturn(PETSC_SUCCESS);
123: }
125: /*@
126: MatSetSizes - Sets the local and global sizes, and checks to determine compatibility
128: Collective
130: Input Parameters:
131: + A - the matrix
132: . m - number of local rows (or `PETSC_DECIDE`)
133: . n - number of local columns (or `PETSC_DECIDE`)
134: . M - number of global rows (or `PETSC_DETERMINE`)
135: - N - number of global columns (or `PETSC_DETERMINE`)
137: Level: beginner
139: Notes:
140: `m` (`n`) and `M` (`N`) cannot be both `PETSC_DECIDE`
141: If one processor calls this with `M` (`N`) of `PETSC_DECIDE` then all processors must, otherwise the program will hang.
143: If `PETSC_DECIDE` is not used for the arguments 'm' and 'n', then the
144: user must ensure that they are chosen to be compatible with the
145: vectors. To do this, one first considers the matrix-vector product
146: 'y = A x'. The `m` that is used in the above routine must match the
147: local size used in the vector creation routine `VecCreateMPI()` for 'y'.
148: Likewise, the `n` used must match that used as the local size in
149: `VecCreateMPI()` for 'x'.
151: You cannot change the sizes once they have been set.
153: The sizes must be set before `MatSetUp()` or MatXXXSetPreallocation() is called.
155: .seealso: [](ch_matrices), `Mat`, `MatGetSize()`, `PetscSplitOwnership()`
156: @*/
157: PetscErrorCode MatSetSizes(Mat A, PetscInt m, PetscInt n, PetscInt M, PetscInt N)
158: {
159: PetscFunctionBegin;
163: PetscCheck(M <= 0 || m <= M, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Local row size %" PetscInt_FMT " cannot be larger than global row size %" PetscInt_FMT, m, M);
164: PetscCheck(N <= 0 || n <= N, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Local column size %" PetscInt_FMT " cannot be larger than global column size %" PetscInt_FMT, n, N);
165: PetscCheck((A->rmap->n < 0 || A->rmap->N < 0) || (A->rmap->n == m && (M <= 0 || A->rmap->N == M)), PETSC_COMM_SELF, PETSC_ERR_SUP, "Cannot change/reset row sizes to %" PetscInt_FMT " local %" PetscInt_FMT " global after previously setting them to %" PetscInt_FMT " local %" PetscInt_FMT " global", m, M,
166: A->rmap->n, A->rmap->N);
167: PetscCheck((A->cmap->n < 0 || A->cmap->N < 0) || (A->cmap->n == n && (N <= 0 || A->cmap->N == N)), PETSC_COMM_SELF, PETSC_ERR_SUP, "Cannot change/reset column sizes to %" PetscInt_FMT " local %" PetscInt_FMT " global after previously setting them to %" PetscInt_FMT " local %" PetscInt_FMT " global", n, N,
168: A->cmap->n, A->cmap->N);
169: A->rmap->n = m;
170: A->cmap->n = n;
171: A->rmap->N = M > -1 ? M : A->rmap->N;
172: A->cmap->N = N > -1 ? N : A->cmap->N;
173: PetscFunctionReturn(PETSC_SUCCESS);
174: }
176: /*@
177: MatSetFromOptions - Creates a matrix where the type is determined
178: from the options database. Generates a parallel MPI matrix if the
179: communicator has more than one processor. The default matrix type is
180: `MATAIJ`, using the routines `MatCreateSeqAIJ()` and `MatCreateAIJ()` if
181: you do not select a type in the options database.
183: Collective
185: Input Parameter:
186: . A - the matrix
188: Options Database Keys:
189: + -mat_type seqaij - `MATSEQAIJ` type, uses `MatCreateSeqAIJ()`
190: . -mat_type mpiaij - `MATMPIAIJ` type, uses `MatCreateAIJ()`
191: . -mat_type seqdense - `MATSEQDENSE` type, uses `MatCreateSeqDense()`
192: . -mat_type mpidense - `MATMPIDENSE`, uses `MatCreateDense()`
193: . -mat_type seqbaij - `MATSEQBAIJ`, uses `MatCreateSeqBAIJ()`
194: - -mat_type mpibaij - `MATMPIBAIJ`, uses `MatCreateBAIJ()`
196: See the manpages for particular formats (e.g., `MATSEQAIJ`)
197: for additional format-specific options.
199: Level: beginner
201: .seealso: [](ch_matrices), `Mat`, `MatCreateSeqAIJ()`, `MatCreateAIJ()`,
202: `MatCreateSeqDense()`, `MatCreateDense()`,
203: `MatCreateSeqBAIJ()`, `MatCreateBAIJ()`,
204: `MatCreateSeqSBAIJ()`, `MatCreateSBAIJ()`,
205: `MatConvert()`
206: @*/
207: PetscErrorCode MatSetFromOptions(Mat B)
208: {
209: const char *deft = MATAIJ;
210: char type[256];
211: PetscBool flg, set;
212: PetscInt bind_below = 0;
214: PetscFunctionBegin;
217: PetscObjectOptionsBegin((PetscObject)B);
219: if (B->rmap->bs < 0) {
220: PetscInt newbs = -1;
221: PetscCall(PetscOptionsInt("-mat_block_size", "Set the blocksize used to store the matrix", "MatSetBlockSize", newbs, &newbs, &flg));
222: if (flg) {
223: PetscCall(PetscLayoutSetBlockSize(B->rmap, newbs));
224: PetscCall(PetscLayoutSetBlockSize(B->cmap, newbs));
225: }
226: }
228: PetscCall(PetscOptionsFList("-mat_type", "Matrix type", "MatSetType", MatList, deft, type, 256, &flg));
229: if (flg) {
230: PetscCall(MatSetType(B, type));
231: } else if (!((PetscObject)B)->type_name) {
232: PetscCall(MatSetType(B, deft));
233: }
235: PetscCall(PetscOptionsName("-mat_is_symmetric", "Checks if mat is symmetric on MatAssemblyEnd()", "MatIsSymmetric", &B->checksymmetryonassembly));
236: PetscCall(PetscOptionsReal("-mat_is_symmetric", "Checks if mat is symmetric on MatAssemblyEnd()", "MatIsSymmetric", B->checksymmetrytol, &B->checksymmetrytol, NULL));
237: PetscCall(PetscOptionsBool("-mat_null_space_test", "Checks if provided null space is correct in MatAssemblyEnd()", "MatSetNullSpaceTest", B->checknullspaceonassembly, &B->checknullspaceonassembly, NULL));
238: PetscCall(PetscOptionsBool("-mat_error_if_failure", "Generate an error if an error occurs when factoring the matrix", "MatSetErrorIfFailure", B->erroriffailure, &B->erroriffailure, NULL));
240: PetscTryTypeMethod(B, setfromoptions, PetscOptionsObject);
242: flg = PETSC_FALSE;
243: PetscCall(PetscOptionsBool("-mat_new_nonzero_location_err", "Generate an error if new nonzeros are created in the matrix structure (useful to test preallocation)", "MatSetOption", flg, &flg, &set));
244: if (set) PetscCall(MatSetOption(B, MAT_NEW_NONZERO_LOCATION_ERR, flg));
245: flg = PETSC_FALSE;
246: PetscCall(PetscOptionsBool("-mat_new_nonzero_allocation_err", "Generate an error if new nonzeros are allocated in the matrix structure (useful to test preallocation)", "MatSetOption", flg, &flg, &set));
247: if (set) PetscCall(MatSetOption(B, MAT_NEW_NONZERO_ALLOCATION_ERR, flg));
248: flg = PETSC_FALSE;
249: PetscCall(PetscOptionsBool("-mat_ignore_zero_entries", "For AIJ/IS matrices this will stop zero values from creating a zero location in the matrix", "MatSetOption", flg, &flg, &set));
250: if (set) PetscCall(MatSetOption(B, MAT_IGNORE_ZERO_ENTRIES, flg));
252: flg = PETSC_FALSE;
253: PetscCall(PetscOptionsBool("-mat_form_explicit_transpose", "Hint to form an explicit transpose for operations like MatMultTranspose", "MatSetOption", flg, &flg, &set));
254: if (set) PetscCall(MatSetOption(B, MAT_FORM_EXPLICIT_TRANSPOSE, flg));
256: /* Bind to CPU if below a user-specified size threshold.
257: * This perhaps belongs in the options for the GPU Mat types, but MatBindToCPU() does nothing when called on non-GPU types,
258: * and putting it here makes is more maintainable than duplicating this for all. */
259: PetscCall(PetscOptionsInt("-mat_bind_below", "Set the size threshold (in local rows) below which the Mat is bound to the CPU", "MatBindToCPU", bind_below, &bind_below, &flg));
260: if (flg && B->rmap->n < bind_below) PetscCall(MatBindToCPU(B, PETSC_TRUE));
262: /* process any options handlers added with PetscObjectAddOptionsHandler() */
263: PetscCall(PetscObjectProcessOptionsHandlers((PetscObject)B, PetscOptionsObject));
264: PetscOptionsEnd();
265: PetscFunctionReturn(PETSC_SUCCESS);
266: }
268: /*@C
269: MatXAIJSetPreallocation - set preallocation for serial and parallel `MATAIJ`, `MATBAIJ`, and `MATSBAIJ` matrices and their unassembled versions.
271: Collective
273: Input Parameters:
274: + A - matrix being preallocated
275: . bs - block size
276: . dnnz - number of nonzero column blocks per block row of diagonal part of parallel matrix
277: . onnz - number of nonzero column blocks per block row of off-diagonal part of parallel matrix
278: . dnnzu - number of nonzero column blocks per block row of upper-triangular part of diagonal part of parallel matrix
279: - onnzu - number of nonzero column blocks per block row of upper-triangular part of off-diagonal part of parallel matrix
281: Level: beginner
283: .seealso: [](ch_matrices), `Mat`, `MatSeqAIJSetPreallocation()`, `MatMPIAIJSetPreallocation()`, `MatSeqBAIJSetPreallocation()`, `MatMPIBAIJSetPreallocation()`,
284: `MatSeqSBAIJSetPreallocation()`, `MatMPISBAIJSetPreallocation()`,
285: `PetscSplitOwnership()`
286: @*/
287: PetscErrorCode MatXAIJSetPreallocation(Mat A, PetscInt bs, const PetscInt dnnz[], const PetscInt onnz[], const PetscInt dnnzu[], const PetscInt onnzu[])
288: {
289: PetscInt cbs;
290: void (*aij)(void);
291: void (*is)(void);
292: void (*hyp)(void) = NULL;
294: PetscFunctionBegin;
295: if (bs != PETSC_DECIDE) { /* don't mess with an already set block size */
296: PetscCall(MatSetBlockSize(A, bs));
297: }
298: PetscCall(PetscLayoutSetUp(A->rmap));
299: PetscCall(PetscLayoutSetUp(A->cmap));
300: PetscCall(MatGetBlockSizes(A, &bs, &cbs));
301: /* these routines assumes bs == cbs, this should be checked somehow */
302: PetscCall(MatSeqBAIJSetPreallocation(A, bs, 0, dnnz));
303: PetscCall(MatMPIBAIJSetPreallocation(A, bs, 0, dnnz, 0, onnz));
304: PetscCall(MatSeqSBAIJSetPreallocation(A, bs, 0, dnnzu));
305: PetscCall(MatMPISBAIJSetPreallocation(A, bs, 0, dnnzu, 0, onnzu));
306: /*
307: In general, we have to do extra work to preallocate for scalar (AIJ) or unassembled (IS) matrices so we check whether it will do any
308: good before going on with it.
309: */
310: PetscCall(PetscObjectQueryFunction((PetscObject)A, "MatMPIAIJSetPreallocation_C", &aij));
311: PetscCall(PetscObjectQueryFunction((PetscObject)A, "MatISSetPreallocation_C", &is));
312: #if defined(PETSC_HAVE_HYPRE)
313: PetscCall(PetscObjectQueryFunction((PetscObject)A, "MatHYPRESetPreallocation_C", &hyp));
314: #endif
315: if (!aij && !is && !hyp) PetscCall(PetscObjectQueryFunction((PetscObject)A, "MatSeqAIJSetPreallocation_C", &aij));
316: if (aij || is || hyp) {
317: if (bs == cbs && bs == 1) {
318: PetscCall(MatSeqAIJSetPreallocation(A, 0, dnnz));
319: PetscCall(MatMPIAIJSetPreallocation(A, 0, dnnz, 0, onnz));
320: PetscCall(MatISSetPreallocation(A, 0, dnnz, 0, onnz));
321: #if defined(PETSC_HAVE_HYPRE)
322: PetscCall(MatHYPRESetPreallocation(A, 0, dnnz, 0, onnz));
323: #endif
324: } else { /* Convert block-row precallocation to scalar-row */
325: PetscInt i, m, *sdnnz, *sonnz;
326: PetscCall(MatGetLocalSize(A, &m, NULL));
327: PetscCall(PetscMalloc2((!!dnnz) * m, &sdnnz, (!!onnz) * m, &sonnz));
328: for (i = 0; i < m; i++) {
329: if (dnnz) sdnnz[i] = dnnz[i / bs] * cbs;
330: if (onnz) sonnz[i] = onnz[i / bs] * cbs;
331: }
332: PetscCall(MatSeqAIJSetPreallocation(A, 0, dnnz ? sdnnz : NULL));
333: PetscCall(MatMPIAIJSetPreallocation(A, 0, dnnz ? sdnnz : NULL, 0, onnz ? sonnz : NULL));
334: PetscCall(MatISSetPreallocation(A, 0, dnnz ? sdnnz : NULL, 0, onnz ? sonnz : NULL));
335: #if defined(PETSC_HAVE_HYPRE)
336: PetscCall(MatHYPRESetPreallocation(A, 0, dnnz ? sdnnz : NULL, 0, onnz ? sonnz : NULL));
337: #endif
338: PetscCall(PetscFree2(sdnnz, sonnz));
339: }
340: }
341: PetscFunctionReturn(PETSC_SUCCESS);
342: }
344: /*
345: Merges some information from Cs header to A; the C object is then destroyed
347: This is somewhat different from MatHeaderReplace() it would be nice to merge the code
348: */
349: PetscErrorCode MatHeaderMerge(Mat A, Mat *C)
350: {
351: PetscInt refct;
352: PetscOps Abops;
353: struct _MatOps Aops;
354: char *mtype, *mname, *mprefix;
355: Mat_Product *product;
356: Mat_Redundant *redundant;
357: PetscObjectState state;
359: PetscFunctionBegin;
362: if (A == *C) PetscFunctionReturn(PETSC_SUCCESS);
363: PetscCheckSameComm(A, 1, *C, 2);
364: /* save the parts of A we need */
365: Abops = ((PetscObject)A)->bops[0];
366: Aops = A->ops[0];
367: refct = ((PetscObject)A)->refct;
368: mtype = ((PetscObject)A)->type_name;
369: mname = ((PetscObject)A)->name;
370: state = ((PetscObject)A)->state;
371: mprefix = ((PetscObject)A)->prefix;
372: product = A->product;
373: redundant = A->redundant;
375: /* zero these so the destroy below does not free them */
376: ((PetscObject)A)->type_name = NULL;
377: ((PetscObject)A)->name = NULL;
379: /*
380: free all the interior data structures from mat
381: cannot use PetscUseTypeMethod(A,destroy); because compiler
382: thinks it may print NULL type_name and name
383: */
384: PetscTryTypeMethod(A, destroy);
386: PetscCall(PetscFree(A->defaultvectype));
387: PetscCall(PetscFree(A->defaultrandtype));
388: PetscCall(PetscLayoutDestroy(&A->rmap));
389: PetscCall(PetscLayoutDestroy(&A->cmap));
390: PetscCall(PetscFunctionListDestroy(&((PetscObject)A)->qlist));
391: PetscCall(PetscObjectListDestroy(&((PetscObject)A)->olist));
392: PetscCall(PetscComposedQuantitiesDestroy((PetscObject)A));
394: /* copy C over to A */
395: PetscCall(PetscFree(A->factorprefix));
396: PetscCall(PetscMemcpy(A, *C, sizeof(struct _p_Mat)));
398: /* return the parts of A we saved */
399: ((PetscObject)A)->bops[0] = Abops;
400: A->ops[0] = Aops;
401: ((PetscObject)A)->refct = refct;
402: ((PetscObject)A)->type_name = mtype;
403: ((PetscObject)A)->name = mname;
404: ((PetscObject)A)->prefix = mprefix;
405: ((PetscObject)A)->state = state + 1;
406: A->product = product;
407: A->redundant = redundant;
409: /* since these two are copied into A we do not want them destroyed in C */
410: ((PetscObject)*C)->qlist = NULL;
411: ((PetscObject)*C)->olist = NULL;
413: PetscCall(PetscHeaderDestroy(C));
414: PetscFunctionReturn(PETSC_SUCCESS);
415: }
416: /*
417: Replace A's header with that of C; the C object is then destroyed
419: This is essentially code moved from MatDestroy()
421: This is somewhat different from MatHeaderMerge() it would be nice to merge the code
423: Used in DM hence is declared PETSC_EXTERN
424: */
425: PETSC_EXTERN PetscErrorCode MatHeaderReplace(Mat A, Mat *C)
426: {
427: PetscInt refct;
428: PetscObjectState state;
429: struct _p_Mat buffer;
430: MatStencilInfo stencil;
432: PetscFunctionBegin;
435: if (A == *C) PetscFunctionReturn(PETSC_SUCCESS);
436: PetscCheckSameComm(A, 1, *C, 2);
437: PetscCheck(((PetscObject)*C)->refct == 1, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "Object C has refct %" PetscInt_FMT " > 1, would leave hanging reference", ((PetscObject)*C)->refct);
439: /* swap C and A */
440: refct = ((PetscObject)A)->refct;
441: state = ((PetscObject)A)->state;
442: stencil = A->stencil;
443: PetscCall(PetscMemcpy(&buffer, A, sizeof(struct _p_Mat)));
444: PetscCall(PetscMemcpy(A, *C, sizeof(struct _p_Mat)));
445: PetscCall(PetscMemcpy(*C, &buffer, sizeof(struct _p_Mat)));
446: ((PetscObject)A)->refct = refct;
447: ((PetscObject)A)->state = state + 1;
448: A->stencil = stencil;
450: ((PetscObject)*C)->refct = 1;
451: PetscCall(MatShellSetOperation(*C, MATOP_DESTROY, (void (*)(void))NULL));
452: PetscCall(MatDestroy(C));
453: PetscFunctionReturn(PETSC_SUCCESS);
454: }
456: /*@
457: MatBindToCPU - marks a matrix to temporarily stay on the CPU and perform computations on the CPU
459: Logically Collective
461: Input Parameters:
462: + A - the matrix
463: - flg - bind to the CPU if value of `PETSC_TRUE`
465: Level: intermediate
467: .seealso: [](ch_matrices), `Mat`, `MatBoundToCPU()`
468: @*/
469: PetscErrorCode MatBindToCPU(Mat A, PetscBool flg)
470: {
471: PetscFunctionBegin;
474: #if defined(PETSC_HAVE_DEVICE)
475: if (A->boundtocpu == flg) PetscFunctionReturn(PETSC_SUCCESS);
476: A->boundtocpu = flg;
477: PetscTryTypeMethod(A, bindtocpu, flg);
478: #endif
479: PetscFunctionReturn(PETSC_SUCCESS);
480: }
482: /*@
483: MatBoundToCPU - query if a matrix is bound to the CPU
485: Input Parameter:
486: . A - the matrix
488: Output Parameter:
489: . flg - the logical flag
491: Level: intermediate
493: .seealso: [](ch_matrices), `Mat`, `MatBindToCPU()`
494: @*/
495: PetscErrorCode MatBoundToCPU(Mat A, PetscBool *flg)
496: {
497: PetscFunctionBegin;
500: #if defined(PETSC_HAVE_DEVICE)
501: *flg = A->boundtocpu;
502: #else
503: *flg = PETSC_TRUE;
504: #endif
505: PetscFunctionReturn(PETSC_SUCCESS);
506: }
508: PetscErrorCode MatSetValuesCOO_Basic(Mat A, const PetscScalar coo_v[], InsertMode imode)
509: {
510: IS is_coo_i, is_coo_j;
511: const PetscInt *coo_i, *coo_j;
512: PetscInt n, n_i, n_j;
513: PetscScalar zero = 0.;
515: PetscFunctionBegin;
516: PetscCall(PetscObjectQuery((PetscObject)A, "__PETSc_coo_i", (PetscObject *)&is_coo_i));
517: PetscCall(PetscObjectQuery((PetscObject)A, "__PETSc_coo_j", (PetscObject *)&is_coo_j));
518: PetscCheck(is_coo_i, PetscObjectComm((PetscObject)A), PETSC_ERR_COR, "Missing coo_i IS");
519: PetscCheck(is_coo_j, PetscObjectComm((PetscObject)A), PETSC_ERR_COR, "Missing coo_j IS");
520: PetscCall(ISGetLocalSize(is_coo_i, &n_i));
521: PetscCall(ISGetLocalSize(is_coo_j, &n_j));
522: PetscCheck(n_i == n_j, PETSC_COMM_SELF, PETSC_ERR_COR, "Wrong local size %" PetscInt_FMT " != %" PetscInt_FMT, n_i, n_j);
523: PetscCall(ISGetIndices(is_coo_i, &coo_i));
524: PetscCall(ISGetIndices(is_coo_j, &coo_j));
525: if (imode != ADD_VALUES) PetscCall(MatZeroEntries(A));
526: for (n = 0; n < n_i; n++) PetscCall(MatSetValue(A, coo_i[n], coo_j[n], coo_v ? coo_v[n] : zero, ADD_VALUES));
527: PetscCall(ISRestoreIndices(is_coo_i, &coo_i));
528: PetscCall(ISRestoreIndices(is_coo_j, &coo_j));
529: PetscFunctionReturn(PETSC_SUCCESS);
530: }
532: PetscErrorCode MatSetPreallocationCOO_Basic(Mat A, PetscCount ncoo, const PetscInt coo_i[], const PetscInt coo_j[])
533: {
534: Mat preallocator;
535: IS is_coo_i, is_coo_j;
536: PetscScalar zero = 0.0;
538: PetscFunctionBegin;
539: PetscCall(PetscLayoutSetUp(A->rmap));
540: PetscCall(PetscLayoutSetUp(A->cmap));
541: PetscCall(MatCreate(PetscObjectComm((PetscObject)A), &preallocator));
542: PetscCall(MatSetType(preallocator, MATPREALLOCATOR));
543: PetscCall(MatSetSizes(preallocator, A->rmap->n, A->cmap->n, A->rmap->N, A->cmap->N));
544: PetscCall(MatSetLayouts(preallocator, A->rmap, A->cmap));
545: PetscCall(MatSetUp(preallocator));
546: for (PetscCount n = 0; n < ncoo; n++) PetscCall(MatSetValue(preallocator, coo_i[n], coo_j[n], zero, INSERT_VALUES));
547: PetscCall(MatAssemblyBegin(preallocator, MAT_FINAL_ASSEMBLY));
548: PetscCall(MatAssemblyEnd(preallocator, MAT_FINAL_ASSEMBLY));
549: PetscCall(MatPreallocatorPreallocate(preallocator, PETSC_TRUE, A));
550: PetscCall(MatDestroy(&preallocator));
551: PetscCheck(ncoo <= PETSC_MAX_INT, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "ncoo %" PetscCount_FMT " overflowed PetscInt; configure --with-64-bit-indices or request support", ncoo);
552: PetscCall(ISCreateGeneral(PETSC_COMM_SELF, ncoo, coo_i, PETSC_COPY_VALUES, &is_coo_i));
553: PetscCall(ISCreateGeneral(PETSC_COMM_SELF, ncoo, coo_j, PETSC_COPY_VALUES, &is_coo_j));
554: PetscCall(PetscObjectCompose((PetscObject)A, "__PETSc_coo_i", (PetscObject)is_coo_i));
555: PetscCall(PetscObjectCompose((PetscObject)A, "__PETSc_coo_j", (PetscObject)is_coo_j));
556: PetscCall(ISDestroy(&is_coo_i));
557: PetscCall(ISDestroy(&is_coo_j));
558: PetscFunctionReturn(PETSC_SUCCESS);
559: }
561: /*@C
562: MatSetPreallocationCOO - set preallocation for matrices using a coordinate format of the entries with global indices
564: Collective
566: Input Parameters:
567: + A - matrix being preallocated
568: . ncoo - number of entries
569: . coo_i - row indices
570: - coo_j - column indices
572: Level: beginner
574: Notes:
575: The indices `coo_i` and `coo_j` may be modified within this function. The caller should not rely on them
576: having any specific value after this function returns. The arrays can be freed or reused immediately
577: after this function returns.
579: Entries can be repeated, see `MatSetValuesCOO()`. Entries with negative row or column indices are allowed
580: but will be ignored. The corresponding entries in `MatSetValuesCOO()` will be ignored too. Remote entries
581: are allowed and will be properly added or inserted to the matrix, unless the matrix option `MAT_IGNORE_OFF_PROC_ENTRIES`
582: is set, in which case remote entries are ignored, or `MAT_NO_OFF_PROC_ENTRIES` is set, in which case an error will be generated.
584: If you just want to create a sequential AIJ matrix (`MATSEQAIJ`), and your matrix entries in COO format are unique, you can also use
585: `MatCreateSeqAIJFromTriple()`. But that is not recommended for iterative applications.
587: .seealso: [](ch_matrices), `Mat`, `MatSetValuesCOO()`, `MatSeqAIJSetPreallocation()`, `MatMPIAIJSetPreallocation()`, `MatSeqBAIJSetPreallocation()`,
588: `MatMPIBAIJSetPreallocation()`, `MatSeqSBAIJSetPreallocation()`, `MatMPISBAIJSetPreallocation()`, `MatSetPreallocationCOOLocal()`,
589: `DMSetMatrixPreallocateSkip()`, `MatCreateSeqAIJFromTriple()`
590: @*/
591: PetscErrorCode MatSetPreallocationCOO(Mat A, PetscCount ncoo, PetscInt coo_i[], PetscInt coo_j[])
592: {
593: PetscErrorCode (*f)(Mat, PetscCount, const PetscInt[], const PetscInt[]) = NULL;
595: PetscFunctionBegin;
600: PetscCall(PetscLayoutSetUp(A->rmap));
601: PetscCall(PetscLayoutSetUp(A->cmap));
602: PetscCall(PetscObjectQueryFunction((PetscObject)A, "MatSetPreallocationCOO_C", &f));
604: PetscCall(PetscLogEventBegin(MAT_PreallCOO, A, 0, 0, 0));
605: if (f) {
606: PetscCall((*f)(A, ncoo, coo_i, coo_j));
607: } else { /* allow fallback, very slow */
608: PetscCall(MatSetPreallocationCOO_Basic(A, ncoo, coo_i, coo_j));
609: }
610: PetscCall(PetscLogEventEnd(MAT_PreallCOO, A, 0, 0, 0));
611: A->preallocated = PETSC_TRUE;
612: A->nonzerostate++;
613: PetscFunctionReturn(PETSC_SUCCESS);
614: }
616: /*@C
617: MatSetPreallocationCOOLocal - set preallocation for matrices using a coordinate format of the entries with local indices
619: Collective
621: Input Parameters:
622: + A - matrix being preallocated
623: . ncoo - number of entries
624: . coo_i - row indices (local numbering; may be modified)
625: - coo_j - column indices (local numbering; may be modified)
627: Level: beginner
629: Notes:
630: The local indices are translated using the local to global mapping, thus `MatSetLocalToGlobalMapping()` must have been
631: called prior to this function. For matrices created with `DMCreateMatrix()` the local to global mapping is often already provided.
633: The indices `coo_i` and `coo_j` may be modified within this function. They might be translated to corresponding global
634: indices, but the caller should not rely on them having any specific value after this function returns. The arrays
635: can be freed or reused immediately after this function returns.
637: Entries can be repeated, see `MatSetValuesCOO()`. Entries with negative row or column indices are allowed
638: but will be ignored. The corresponding entries in `MatSetValuesCOO()` will be ignored too. Remote entries
639: are allowed and will be properly added or inserted to the matrix.
641: .seealso: [](ch_matrices), `Mat`, `MatSetValuesCOO()`, `MatSeqAIJSetPreallocation()`, `MatMPIAIJSetPreallocation()`, `MatSeqBAIJSetPreallocation()`,
642: `MatMPIBAIJSetPreallocation()`, `MatSeqSBAIJSetPreallocation()`, `MatMPISBAIJSetPreallocation()`, `MatSetPreallocationCOO()`,
643: `DMSetMatrixPreallocateSkip()`
644: @*/
645: PetscErrorCode MatSetPreallocationCOOLocal(Mat A, PetscCount ncoo, PetscInt coo_i[], PetscInt coo_j[])
646: {
647: PetscErrorCode (*f)(Mat, PetscCount, PetscInt[], PetscInt[]) = NULL;
649: PetscFunctionBegin;
654: PetscCheck(ncoo <= PETSC_MAX_INT, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "ncoo %" PetscCount_FMT " overflowed PetscInt; configure --with-64-bit-indices or request support", ncoo);
655: PetscCall(PetscLayoutSetUp(A->rmap));
656: PetscCall(PetscLayoutSetUp(A->cmap));
658: PetscCall(PetscObjectQueryFunction((PetscObject)A, "MatSetPreallocationCOOLocal_C", &f));
659: if (f) {
660: PetscCall((*f)(A, ncoo, coo_i, coo_j));
661: A->nonzerostate++;
662: } else {
663: ISLocalToGlobalMapping ltog_row, ltog_col;
664: PetscCall(MatGetLocalToGlobalMapping(A, <og_row, <og_col));
665: if (ltog_row) PetscCall(ISLocalToGlobalMappingApply(ltog_row, ncoo, coo_i, coo_i));
666: if (ltog_col) PetscCall(ISLocalToGlobalMappingApply(ltog_col, ncoo, coo_j, coo_j));
667: PetscCall(MatSetPreallocationCOO(A, ncoo, coo_i, coo_j));
668: }
669: A->preallocated = PETSC_TRUE;
670: PetscFunctionReturn(PETSC_SUCCESS);
671: }
673: /*@
674: MatSetValuesCOO - set values at once in a matrix preallocated using `MatSetPreallocationCOO()`
676: Collective
678: Input Parameters:
679: + A - matrix being preallocated
680: . coo_v - the matrix values (can be `NULL`)
681: - imode - the insert mode
683: Level: beginner
685: Notes:
686: The values must follow the order of the indices prescribed with `MatSetPreallocationCOO()` or `MatSetPreallocationCOOLocal()`.
688: When repeated entries are specified in the COO indices the `coo_v` values are first properly summed, regardless of the value of imode.
689: The imode flag indicates if coo_v must be added to the current values of the matrix (`ADD_VALUES`) or overwritten (`INSERT_VALUES`).
691: `MatAssemblyBegin()` and `MatAssemblyEnd()` do not need to be called after this routine. It automatically handles the assembly process.
693: .seealso: [](ch_matrices), `Mat`, `MatSetPreallocationCOO()`, `MatSetPreallocationCOOLocal()`, `InsertMode`, `INSERT_VALUES`, `ADD_VALUES`
694: @*/
695: PetscErrorCode MatSetValuesCOO(Mat A, const PetscScalar coo_v[], InsertMode imode)
696: {
697: PetscErrorCode (*f)(Mat, const PetscScalar[], InsertMode) = NULL;
699: PetscFunctionBegin;
702: MatCheckPreallocated(A, 1);
704: PetscCall(PetscObjectQueryFunction((PetscObject)A, "MatSetValuesCOO_C", &f));
705: PetscCall(PetscLogEventBegin(MAT_SetVCOO, A, 0, 0, 0));
706: if (f) {
707: PetscCall((*f)(A, coo_v, imode));
708: } else { /* allow fallback */
709: PetscCall(MatSetValuesCOO_Basic(A, coo_v, imode));
710: }
711: PetscCall(PetscLogEventEnd(MAT_SetVCOO, A, 0, 0, 0));
712: PetscCall(MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY));
713: PetscCall(MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY));
714: PetscFunctionReturn(PETSC_SUCCESS);
715: }
717: /*@
718: MatSetBindingPropagates - Sets whether the state of being bound to the CPU for a GPU matrix type propagates to child and some other associated objects
720: Input Parameters:
721: + A - the matrix
722: - flg - flag indicating whether the boundtocpu flag should be propagated
724: Level: developer
726: Notes:
727: If the value of flg is set to true, the following will occur
728: + `MatCreateSubMatrices()` and `MatCreateRedundantMatrix()` - bind created matrices to CPU if the input matrix is bound to the CPU.
729: - `MatCreateVecs()` - bind created vectors to CPU if the input matrix is bound to the CPU.
731: The bindingpropagates flag itself is also propagated by the above routines.
733: Developer Note:
734: If the fine-scale `DMDA` has the `-dm_bind_below` option set to true, then `DMCreateInterpolationScale()` calls `MatSetBindingPropagates()`
735: on the restriction/interpolation operator to set the bindingpropagates flag to true.
737: .seealso: [](ch_matrices), `Mat`, `VecSetBindingPropagates()`, `MatGetBindingPropagates()`
738: @*/
739: PetscErrorCode MatSetBindingPropagates(Mat A, PetscBool flg)
740: {
741: PetscFunctionBegin;
743: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA) || defined(PETSC_HAVE_HIP)
744: A->bindingpropagates = flg;
745: #endif
746: PetscFunctionReturn(PETSC_SUCCESS);
747: }
749: /*@
750: MatGetBindingPropagates - Gets whether the state of being bound to the CPU for a GPU matrix type propagates to child and some other associated objects
752: Input Parameter:
753: . A - the matrix
755: Output Parameter:
756: . flg - flag indicating whether the boundtocpu flag will be propagated
758: Level: developer
760: .seealso: [](ch_matrices), `Mat`, `MatSetBindingPropagates()`
761: @*/
762: PetscErrorCode MatGetBindingPropagates(Mat A, PetscBool *flg)
763: {
764: PetscFunctionBegin;
767: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA) || defined(PETSC_HAVE_HIP)
768: *flg = A->bindingpropagates;
769: #else
770: *flg = PETSC_FALSE;
771: #endif
772: PetscFunctionReturn(PETSC_SUCCESS);
773: }