Actual source code: mpisell.c

  1: #include <../src/mat/impls/aij/mpi/mpiaij.h>
  2: #include <../src/mat/impls/sell/mpi/mpisell.h>
  3: #include <petsc/private/vecimpl.h>
  4: #include <petsc/private/isimpl.h>
  5: #include <petscblaslapack.h>
  6: #include <petscsf.h>

  8: /*MC
  9:    MATSELL - MATSELL = "sell" - A matrix type to be used for sparse matrices.

 11:    This matrix type is identical to `MATSEQSELL` when constructed with a single process communicator,
 12:    and `MATMPISELL` otherwise.  As a result, for single process communicators,
 13:   `MatSeqSELLSetPreallocation()` is supported, and similarly `MatMPISELLSetPreallocation()` is supported
 14:   for communicators controlling multiple processes.  It is recommended that you call both of
 15:   the above preallocation routines for simplicity.

 17:    Options Database Keys:
 18: . -mat_type sell - sets the matrix type to `MATSELL` during a call to `MatSetFromOptions()`

 20:   Level: beginner

 22: .seealso: `Mat`, `MATAIJ`, `MATBAIJ`, `MATSBAIJ`, `MatCreateSELL()`, `MatCreateSeqSELL()`, `MATSEQSELL`, `MATMPISELL`
 23: M*/

 25: PetscErrorCode MatDiagonalSet_MPISELL(Mat Y, Vec D, InsertMode is)
 26: {
 27:   Mat_MPISELL *sell = (Mat_MPISELL *)Y->data;

 29:   PetscFunctionBegin;
 30:   if (Y->assembled && Y->rmap->rstart == Y->cmap->rstart && Y->rmap->rend == Y->cmap->rend) {
 31:     PetscCall(MatDiagonalSet(sell->A, D, is));
 32:   } else {
 33:     PetscCall(MatDiagonalSet_Default(Y, D, is));
 34:   }
 35:   PetscFunctionReturn(PETSC_SUCCESS);
 36: }

 38: /*
 39:   Local utility routine that creates a mapping from the global column
 40: number to the local number in the off-diagonal part of the local
 41: storage of the matrix.  When PETSC_USE_CTABLE is used this is scalable at
 42: a slightly higher hash table cost; without it it is not scalable (each processor
 43: has an order N integer array but is fast to access.
 44: */
 45: PetscErrorCode MatCreateColmap_MPISELL_Private(Mat mat)
 46: {
 47:   Mat_MPISELL *sell = (Mat_MPISELL *)mat->data;
 48:   PetscInt     n    = sell->B->cmap->n, i;

 50:   PetscFunctionBegin;
 51:   PetscCheck(sell->garray, PETSC_COMM_SELF, PETSC_ERR_PLIB, "MPISELL Matrix was assembled but is missing garray");
 52: #if defined(PETSC_USE_CTABLE)
 53:   PetscCall(PetscHMapICreateWithSize(n, &sell->colmap));
 54:   for (i = 0; i < n; i++) PetscCall(PetscHMapISet(sell->colmap, sell->garray[i] + 1, i + 1));
 55: #else
 56:   PetscCall(PetscCalloc1(mat->cmap->N + 1, &sell->colmap));
 57:   for (i = 0; i < n; i++) sell->colmap[sell->garray[i]] = i + 1;
 58: #endif
 59:   PetscFunctionReturn(PETSC_SUCCESS);
 60: }

 62: #define MatSetValues_SeqSELL_A_Private(row, col, value, addv, orow, ocol) \
 63:   { \
 64:     if (col <= lastcol1) low1 = 0; \
 65:     else high1 = nrow1; \
 66:     lastcol1 = col; \
 67:     while (high1 - low1 > 5) { \
 68:       t = (low1 + high1) / 2; \
 69:       if (*(cp1 + 8 * t) > col) high1 = t; \
 70:       else low1 = t; \
 71:     } \
 72:     for (_i = low1; _i < high1; _i++) { \
 73:       if (*(cp1 + 8 * _i) > col) break; \
 74:       if (*(cp1 + 8 * _i) == col) { \
 75:         if (addv == ADD_VALUES) *(vp1 + 8 * _i) += value; \
 76:         else *(vp1 + 8 * _i) = value; \
 77:         goto a_noinsert; \
 78:       } \
 79:     } \
 80:     if (value == 0.0 && ignorezeroentries) { \
 81:       low1  = 0; \
 82:       high1 = nrow1; \
 83:       goto a_noinsert; \
 84:     } \
 85:     if (nonew == 1) { \
 86:       low1  = 0; \
 87:       high1 = nrow1; \
 88:       goto a_noinsert; \
 89:     } \
 90:     PetscCheck(nonew != -1, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Inserting a new nonzero at global row/column (%" PetscInt_FMT ", %" PetscInt_FMT ") into matrix", orow, ocol); \
 91:     MatSeqXSELLReallocateSELL(A, am, 1, nrow1, a->sliidx, row / 8, row, col, a->colidx, a->val, cp1, vp1, nonew, MatScalar); \
 92:     /* shift up all the later entries in this row */ \
 93:     for (ii = nrow1 - 1; ii >= _i; ii--) { \
 94:       *(cp1 + 8 * (ii + 1)) = *(cp1 + 8 * ii); \
 95:       *(vp1 + 8 * (ii + 1)) = *(vp1 + 8 * ii); \
 96:     } \
 97:     *(cp1 + 8 * _i) = col; \
 98:     *(vp1 + 8 * _i) = value; \
 99:     a->nz++; \
100:     nrow1++; \
101:     A->nonzerostate++; \
102:   a_noinsert:; \
103:     a->rlen[row] = nrow1; \
104:   }

106: #define MatSetValues_SeqSELL_B_Private(row, col, value, addv, orow, ocol) \
107:   { \
108:     if (col <= lastcol2) low2 = 0; \
109:     else high2 = nrow2; \
110:     lastcol2 = col; \
111:     while (high2 - low2 > 5) { \
112:       t = (low2 + high2) / 2; \
113:       if (*(cp2 + 8 * t) > col) high2 = t; \
114:       else low2 = t; \
115:     } \
116:     for (_i = low2; _i < high2; _i++) { \
117:       if (*(cp2 + 8 * _i) > col) break; \
118:       if (*(cp2 + 8 * _i) == col) { \
119:         if (addv == ADD_VALUES) *(vp2 + 8 * _i) += value; \
120:         else *(vp2 + 8 * _i) = value; \
121:         goto b_noinsert; \
122:       } \
123:     } \
124:     if (value == 0.0 && ignorezeroentries) { \
125:       low2  = 0; \
126:       high2 = nrow2; \
127:       goto b_noinsert; \
128:     } \
129:     if (nonew == 1) { \
130:       low2  = 0; \
131:       high2 = nrow2; \
132:       goto b_noinsert; \
133:     } \
134:     PetscCheck(nonew != -1, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Inserting a new nonzero at global row/column (%" PetscInt_FMT ", %" PetscInt_FMT ") into matrix", orow, ocol); \
135:     MatSeqXSELLReallocateSELL(B, bm, 1, nrow2, b->sliidx, row / 8, row, col, b->colidx, b->val, cp2, vp2, nonew, MatScalar); \
136:     /* shift up all the later entries in this row */ \
137:     for (ii = nrow2 - 1; ii >= _i; ii--) { \
138:       *(cp2 + 8 * (ii + 1)) = *(cp2 + 8 * ii); \
139:       *(vp2 + 8 * (ii + 1)) = *(vp2 + 8 * ii); \
140:     } \
141:     *(cp2 + 8 * _i) = col; \
142:     *(vp2 + 8 * _i) = value; \
143:     b->nz++; \
144:     nrow2++; \
145:     B->nonzerostate++; \
146:   b_noinsert:; \
147:     b->rlen[row] = nrow2; \
148:   }

150: PetscErrorCode MatSetValues_MPISELL(Mat mat, PetscInt m, const PetscInt im[], PetscInt n, const PetscInt in[], const PetscScalar v[], InsertMode addv)
151: {
152:   Mat_MPISELL *sell = (Mat_MPISELL *)mat->data;
153:   PetscScalar  value;
154:   PetscInt     i, j, rstart = mat->rmap->rstart, rend = mat->rmap->rend, shift1, shift2;
155:   PetscInt     cstart = mat->cmap->rstart, cend = mat->cmap->rend, row, col;
156:   PetscBool    roworiented = sell->roworiented;

158:   /* Some Variables required in the macro */
159:   Mat          A                 = sell->A;
160:   Mat_SeqSELL *a                 = (Mat_SeqSELL *)A->data;
161:   PetscBool    ignorezeroentries = a->ignorezeroentries, found;
162:   Mat          B                 = sell->B;
163:   Mat_SeqSELL *b                 = (Mat_SeqSELL *)B->data;
164:   PetscInt    *cp1, *cp2, ii, _i, nrow1, nrow2, low1, high1, low2, high2, t, lastcol1, lastcol2;
165:   MatScalar   *vp1, *vp2;

167:   PetscFunctionBegin;
168:   for (i = 0; i < m; i++) {
169:     if (im[i] < 0) continue;
170:     PetscCheck(im[i] < mat->rmap->N, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Row too large: row %" PetscInt_FMT " max %" PetscInt_FMT, im[i], mat->rmap->N - 1);
171:     if (im[i] >= rstart && im[i] < rend) {
172:       row      = im[i] - rstart;
173:       lastcol1 = -1;
174:       shift1   = a->sliidx[row >> 3] + (row & 0x07); /* starting index of the row */
175:       cp1      = a->colidx + shift1;
176:       vp1      = a->val + shift1;
177:       nrow1    = a->rlen[row];
178:       low1     = 0;
179:       high1    = nrow1;
180:       lastcol2 = -1;
181:       shift2   = b->sliidx[row >> 3] + (row & 0x07); /* starting index of the row */
182:       cp2      = b->colidx + shift2;
183:       vp2      = b->val + shift2;
184:       nrow2    = b->rlen[row];
185:       low2     = 0;
186:       high2    = nrow2;

188:       for (j = 0; j < n; j++) {
189:         if (roworiented) value = v[i * n + j];
190:         else value = v[i + j * m];
191:         if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES)) continue;
192:         if (in[j] >= cstart && in[j] < cend) {
193:           col = in[j] - cstart;
194:           MatSetValue_SeqSELL_Private(A, row, col, value, addv, im[i], in[j], cp1, vp1, lastcol1, low1, high1); /* set one value */
195:         } else if (in[j] < 0) {
196:           continue;
197:         } else {
198:           PetscCheck(in[j] < mat->cmap->N, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Column too large: col %" PetscInt_FMT " max %" PetscInt_FMT, in[j], mat->cmap->N - 1);
199:           if (mat->was_assembled) {
200:             if (!sell->colmap) PetscCall(MatCreateColmap_MPISELL_Private(mat));
201: #if defined(PETSC_USE_CTABLE)
202:             PetscCall(PetscHMapIGetWithDefault(sell->colmap, in[j] + 1, 0, &col));
203:             col--;
204: #else
205:             col = sell->colmap[in[j]] - 1;
206: #endif
207:             if (col < 0 && !((Mat_SeqSELL *)(sell->B->data))->nonew) {
208:               PetscCall(MatDisAssemble_MPISELL(mat));
209:               col = in[j];
210:               /* Reinitialize the variables required by MatSetValues_SeqSELL_B_Private() */
211:               B      = sell->B;
212:               b      = (Mat_SeqSELL *)B->data;
213:               shift2 = b->sliidx[row >> 3] + (row & 0x07); /* starting index of the row */
214:               cp2    = b->colidx + shift2;
215:               vp2    = b->val + shift2;
216:               nrow2  = b->rlen[row];
217:               low2   = 0;
218:               high2  = nrow2;
219:             } else {
220:               PetscCheck(col >= 0, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Inserting a new nonzero at global row/column (%" PetscInt_FMT ", %" PetscInt_FMT ") into matrix", im[i], in[j]);
221:             }
222:           } else col = in[j];
223:           MatSetValue_SeqSELL_Private(B, row, col, value, addv, im[i], in[j], cp2, vp2, lastcol2, low2, high2); /* set one value */
224:         }
225:       }
226:     } else {
227:       PetscCheck(!mat->nooffprocentries, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Setting off process row %" PetscInt_FMT " even though MatSetOption(,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE) was set", im[i]);
228:       if (!sell->donotstash) {
229:         mat->assembled = PETSC_FALSE;
230:         if (roworiented) {
231:           PetscCall(MatStashValuesRow_Private(&mat->stash, im[i], n, in, v + i * n, (PetscBool)(ignorezeroentries && (addv == ADD_VALUES))));
232:         } else {
233:           PetscCall(MatStashValuesCol_Private(&mat->stash, im[i], n, in, v + i, m, (PetscBool)(ignorezeroentries && (addv == ADD_VALUES))));
234:         }
235:       }
236:     }
237:   }
238:   PetscFunctionReturn(PETSC_SUCCESS);
239: }

241: PetscErrorCode MatGetValues_MPISELL(Mat mat, PetscInt m, const PetscInt idxm[], PetscInt n, const PetscInt idxn[], PetscScalar v[])
242: {
243:   Mat_MPISELL *sell = (Mat_MPISELL *)mat->data;
244:   PetscInt     i, j, rstart = mat->rmap->rstart, rend = mat->rmap->rend;
245:   PetscInt     cstart = mat->cmap->rstart, cend = mat->cmap->rend, row, col;

247:   PetscFunctionBegin;
248:   for (i = 0; i < m; i++) {
249:     if (idxm[i] < 0) continue; /* negative row */
250:     PetscCheck(idxm[i] < mat->rmap->N, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Row too large: row %" PetscInt_FMT " max %" PetscInt_FMT, idxm[i], mat->rmap->N - 1);
251:     if (idxm[i] >= rstart && idxm[i] < rend) {
252:       row = idxm[i] - rstart;
253:       for (j = 0; j < n; j++) {
254:         if (idxn[j] < 0) continue; /* negative column */
255:         PetscCheck(idxn[j] < mat->cmap->N, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Column too large: col %" PetscInt_FMT " max %" PetscInt_FMT, idxn[j], mat->cmap->N - 1);
256:         if (idxn[j] >= cstart && idxn[j] < cend) {
257:           col = idxn[j] - cstart;
258:           PetscCall(MatGetValues(sell->A, 1, &row, 1, &col, v + i * n + j));
259:         } else {
260:           if (!sell->colmap) PetscCall(MatCreateColmap_MPISELL_Private(mat));
261: #if defined(PETSC_USE_CTABLE)
262:           PetscCall(PetscHMapIGetWithDefault(sell->colmap, idxn[j] + 1, 0, &col));
263:           col--;
264: #else
265:           col = sell->colmap[idxn[j]] - 1;
266: #endif
267:           if ((col < 0) || (sell->garray[col] != idxn[j])) *(v + i * n + j) = 0.0;
268:           else PetscCall(MatGetValues(sell->B, 1, &row, 1, &col, v + i * n + j));
269:         }
270:       }
271:     } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "Only local values currently supported");
272:   }
273:   PetscFunctionReturn(PETSC_SUCCESS);
274: }

276: extern PetscErrorCode MatMultDiagonalBlock_MPISELL(Mat, Vec, Vec);

278: PetscErrorCode MatAssemblyBegin_MPISELL(Mat mat, MatAssemblyType mode)
279: {
280:   Mat_MPISELL *sell = (Mat_MPISELL *)mat->data;
281:   PetscInt     nstash, reallocs;

283:   PetscFunctionBegin;
284:   if (sell->donotstash || mat->nooffprocentries) PetscFunctionReturn(PETSC_SUCCESS);

286:   PetscCall(MatStashScatterBegin_Private(mat, &mat->stash, mat->rmap->range));
287:   PetscCall(MatStashGetInfo_Private(&mat->stash, &nstash, &reallocs));
288:   PetscCall(PetscInfo(sell->A, "Stash has %" PetscInt_FMT " entries, uses %" PetscInt_FMT " mallocs.\n", nstash, reallocs));
289:   PetscFunctionReturn(PETSC_SUCCESS);
290: }

292: PetscErrorCode MatAssemblyEnd_MPISELL(Mat mat, MatAssemblyType mode)
293: {
294:   Mat_MPISELL *sell = (Mat_MPISELL *)mat->data;
295:   PetscMPIInt  n;
296:   PetscInt     i, flg;
297:   PetscInt    *row, *col;
298:   PetscScalar *val;
299:   PetscBool    other_disassembled;
300:   /* do not use 'b = (Mat_SeqSELL*)sell->B->data' as B can be reset in disassembly */
301:   PetscFunctionBegin;
302:   if (!sell->donotstash && !mat->nooffprocentries) {
303:     while (1) {
304:       PetscCall(MatStashScatterGetMesg_Private(&mat->stash, &n, &row, &col, &val, &flg));
305:       if (!flg) break;

307:       for (i = 0; i < n; i++) { /* assemble one by one */
308:         PetscCall(MatSetValues_MPISELL(mat, 1, row + i, 1, col + i, val + i, mat->insertmode));
309:       }
310:     }
311:     PetscCall(MatStashScatterEnd_Private(&mat->stash));
312:   }
313:   PetscCall(MatAssemblyBegin(sell->A, mode));
314:   PetscCall(MatAssemblyEnd(sell->A, mode));

316:   /*
317:      determine if any processor has disassembled, if so we must
318:      also disassemble ourselves, in order that we may reassemble.
319:   */
320:   /*
321:      if nonzero structure of submatrix B cannot change then we know that
322:      no processor disassembled thus we can skip this stuff
323:   */
324:   if (!((Mat_SeqSELL *)sell->B->data)->nonew) {
325:     PetscCall(MPIU_Allreduce(&mat->was_assembled, &other_disassembled, 1, MPIU_BOOL, MPI_LAND, PetscObjectComm((PetscObject)mat)));
326:     PetscCheck(!mat->was_assembled || other_disassembled, PETSC_COMM_SELF, PETSC_ERR_SUP, "MatDisAssemble not implemented yet");
327:   }
328:   if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) PetscCall(MatSetUpMultiply_MPISELL(mat));
329:   /*
330:   PetscCall(MatSetOption(sell->B,MAT_USE_INODES,PETSC_FALSE));
331:   */
332:   PetscCall(MatAssemblyBegin(sell->B, mode));
333:   PetscCall(MatAssemblyEnd(sell->B, mode));
334:   PetscCall(PetscFree2(sell->rowvalues, sell->rowindices));
335:   sell->rowvalues = NULL;
336:   PetscCall(VecDestroy(&sell->diag));

338:   /* if no new nonzero locations are allowed in matrix then only set the matrix state the first time through */
339:   if ((!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) || !((Mat_SeqSELL *)(sell->A->data))->nonew) {
340:     PetscObjectState state = sell->A->nonzerostate + sell->B->nonzerostate;
341:     PetscCall(MPIU_Allreduce(&state, &mat->nonzerostate, 1, MPIU_INT64, MPI_SUM, PetscObjectComm((PetscObject)mat)));
342:   }
343:   PetscFunctionReturn(PETSC_SUCCESS);
344: }

346: PetscErrorCode MatZeroEntries_MPISELL(Mat A)
347: {
348:   Mat_MPISELL *l = (Mat_MPISELL *)A->data;

350:   PetscFunctionBegin;
351:   PetscCall(MatZeroEntries(l->A));
352:   PetscCall(MatZeroEntries(l->B));
353:   PetscFunctionReturn(PETSC_SUCCESS);
354: }

356: PetscErrorCode MatMult_MPISELL(Mat A, Vec xx, Vec yy)
357: {
358:   Mat_MPISELL *a = (Mat_MPISELL *)A->data;
359:   PetscInt     nt;

361:   PetscFunctionBegin;
362:   PetscCall(VecGetLocalSize(xx, &nt));
363:   PetscCheck(nt == A->cmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Incompatible partition of A (%" PetscInt_FMT ") and xx (%" PetscInt_FMT ")", A->cmap->n, nt);
364:   PetscCall(VecScatterBegin(a->Mvctx, xx, a->lvec, INSERT_VALUES, SCATTER_FORWARD));
365:   PetscCall((*a->A->ops->mult)(a->A, xx, yy));
366:   PetscCall(VecScatterEnd(a->Mvctx, xx, a->lvec, INSERT_VALUES, SCATTER_FORWARD));
367:   PetscCall((*a->B->ops->multadd)(a->B, a->lvec, yy, yy));
368:   PetscFunctionReturn(PETSC_SUCCESS);
369: }

371: PetscErrorCode MatMultDiagonalBlock_MPISELL(Mat A, Vec bb, Vec xx)
372: {
373:   Mat_MPISELL *a = (Mat_MPISELL *)A->data;

375:   PetscFunctionBegin;
376:   PetscCall(MatMultDiagonalBlock(a->A, bb, xx));
377:   PetscFunctionReturn(PETSC_SUCCESS);
378: }

380: PetscErrorCode MatMultAdd_MPISELL(Mat A, Vec xx, Vec yy, Vec zz)
381: {
382:   Mat_MPISELL *a = (Mat_MPISELL *)A->data;

384:   PetscFunctionBegin;
385:   PetscCall(VecScatterBegin(a->Mvctx, xx, a->lvec, INSERT_VALUES, SCATTER_FORWARD));
386:   PetscCall((*a->A->ops->multadd)(a->A, xx, yy, zz));
387:   PetscCall(VecScatterEnd(a->Mvctx, xx, a->lvec, INSERT_VALUES, SCATTER_FORWARD));
388:   PetscCall((*a->B->ops->multadd)(a->B, a->lvec, zz, zz));
389:   PetscFunctionReturn(PETSC_SUCCESS);
390: }

392: PetscErrorCode MatMultTranspose_MPISELL(Mat A, Vec xx, Vec yy)
393: {
394:   Mat_MPISELL *a = (Mat_MPISELL *)A->data;

396:   PetscFunctionBegin;
397:   /* do nondiagonal part */
398:   PetscCall((*a->B->ops->multtranspose)(a->B, xx, a->lvec));
399:   /* do local part */
400:   PetscCall((*a->A->ops->multtranspose)(a->A, xx, yy));
401:   /* add partial results together */
402:   PetscCall(VecScatterBegin(a->Mvctx, a->lvec, yy, ADD_VALUES, SCATTER_REVERSE));
403:   PetscCall(VecScatterEnd(a->Mvctx, a->lvec, yy, ADD_VALUES, SCATTER_REVERSE));
404:   PetscFunctionReturn(PETSC_SUCCESS);
405: }

407: PetscErrorCode MatIsTranspose_MPISELL(Mat Amat, Mat Bmat, PetscReal tol, PetscBool *f)
408: {
409:   MPI_Comm     comm;
410:   Mat_MPISELL *Asell = (Mat_MPISELL *)Amat->data, *Bsell;
411:   Mat          Adia  = Asell->A, Bdia, Aoff, Boff, *Aoffs, *Boffs;
412:   IS           Me, Notme;
413:   PetscInt     M, N, first, last, *notme, i;
414:   PetscMPIInt  size;

416:   PetscFunctionBegin;
417:   /* Easy test: symmetric diagonal block */
418:   Bsell = (Mat_MPISELL *)Bmat->data;
419:   Bdia  = Bsell->A;
420:   PetscCall(MatIsTranspose(Adia, Bdia, tol, f));
421:   if (!*f) PetscFunctionReturn(PETSC_SUCCESS);
422:   PetscCall(PetscObjectGetComm((PetscObject)Amat, &comm));
423:   PetscCallMPI(MPI_Comm_size(comm, &size));
424:   if (size == 1) PetscFunctionReturn(PETSC_SUCCESS);

426:   /* Hard test: off-diagonal block. This takes a MatCreateSubMatrix. */
427:   PetscCall(MatGetSize(Amat, &M, &N));
428:   PetscCall(MatGetOwnershipRange(Amat, &first, &last));
429:   PetscCall(PetscMalloc1(N - last + first, &notme));
430:   for (i = 0; i < first; i++) notme[i] = i;
431:   for (i = last; i < M; i++) notme[i - last + first] = i;
432:   PetscCall(ISCreateGeneral(MPI_COMM_SELF, N - last + first, notme, PETSC_COPY_VALUES, &Notme));
433:   PetscCall(ISCreateStride(MPI_COMM_SELF, last - first, first, 1, &Me));
434:   PetscCall(MatCreateSubMatrices(Amat, 1, &Me, &Notme, MAT_INITIAL_MATRIX, &Aoffs));
435:   Aoff = Aoffs[0];
436:   PetscCall(MatCreateSubMatrices(Bmat, 1, &Notme, &Me, MAT_INITIAL_MATRIX, &Boffs));
437:   Boff = Boffs[0];
438:   PetscCall(MatIsTranspose(Aoff, Boff, tol, f));
439:   PetscCall(MatDestroyMatrices(1, &Aoffs));
440:   PetscCall(MatDestroyMatrices(1, &Boffs));
441:   PetscCall(ISDestroy(&Me));
442:   PetscCall(ISDestroy(&Notme));
443:   PetscCall(PetscFree(notme));
444:   PetscFunctionReturn(PETSC_SUCCESS);
445: }

447: PetscErrorCode MatMultTransposeAdd_MPISELL(Mat A, Vec xx, Vec yy, Vec zz)
448: {
449:   Mat_MPISELL *a = (Mat_MPISELL *)A->data;

451:   PetscFunctionBegin;
452:   /* do nondiagonal part */
453:   PetscCall((*a->B->ops->multtranspose)(a->B, xx, a->lvec));
454:   /* do local part */
455:   PetscCall((*a->A->ops->multtransposeadd)(a->A, xx, yy, zz));
456:   /* add partial results together */
457:   PetscCall(VecScatterBegin(a->Mvctx, a->lvec, zz, ADD_VALUES, SCATTER_REVERSE));
458:   PetscCall(VecScatterEnd(a->Mvctx, a->lvec, zz, ADD_VALUES, SCATTER_REVERSE));
459:   PetscFunctionReturn(PETSC_SUCCESS);
460: }

462: /*
463:   This only works correctly for square matrices where the subblock A->A is the
464:    diagonal block
465: */
466: PetscErrorCode MatGetDiagonal_MPISELL(Mat A, Vec v)
467: {
468:   Mat_MPISELL *a = (Mat_MPISELL *)A->data;

470:   PetscFunctionBegin;
471:   PetscCheck(A->rmap->N == A->cmap->N, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "Supports only square matrix where A->A is diag block");
472:   PetscCheck(A->rmap->rstart == A->cmap->rstart && A->rmap->rend == A->cmap->rend, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "row partition must equal col partition");
473:   PetscCall(MatGetDiagonal(a->A, v));
474:   PetscFunctionReturn(PETSC_SUCCESS);
475: }

477: PetscErrorCode MatScale_MPISELL(Mat A, PetscScalar aa)
478: {
479:   Mat_MPISELL *a = (Mat_MPISELL *)A->data;

481:   PetscFunctionBegin;
482:   PetscCall(MatScale(a->A, aa));
483:   PetscCall(MatScale(a->B, aa));
484:   PetscFunctionReturn(PETSC_SUCCESS);
485: }

487: PetscErrorCode MatDestroy_MPISELL(Mat mat)
488: {
489:   Mat_MPISELL *sell = (Mat_MPISELL *)mat->data;

491:   PetscFunctionBegin;
492: #if defined(PETSC_USE_LOG)
493:   PetscCall(PetscLogObjectState((PetscObject)mat, "Rows=%" PetscInt_FMT ", Cols=%" PetscInt_FMT, mat->rmap->N, mat->cmap->N));
494: #endif
495:   PetscCall(MatStashDestroy_Private(&mat->stash));
496:   PetscCall(VecDestroy(&sell->diag));
497:   PetscCall(MatDestroy(&sell->A));
498:   PetscCall(MatDestroy(&sell->B));
499: #if defined(PETSC_USE_CTABLE)
500:   PetscCall(PetscHMapIDestroy(&sell->colmap));
501: #else
502:   PetscCall(PetscFree(sell->colmap));
503: #endif
504:   PetscCall(PetscFree(sell->garray));
505:   PetscCall(VecDestroy(&sell->lvec));
506:   PetscCall(VecScatterDestroy(&sell->Mvctx));
507:   PetscCall(PetscFree2(sell->rowvalues, sell->rowindices));
508:   PetscCall(PetscFree(sell->ld));
509:   PetscCall(PetscFree(mat->data));

511:   PetscCall(PetscObjectChangeTypeName((PetscObject)mat, NULL));
512:   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatStoreValues_C", NULL));
513:   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatRetrieveValues_C", NULL));
514:   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatIsTranspose_C", NULL));
515:   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatMPISELLSetPreallocation_C", NULL));
516:   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpisell_mpiaij_C", NULL));
517:   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatDiagonalScaleLocal_C", NULL));
518:   PetscFunctionReturn(PETSC_SUCCESS);
519: }

521: #include <petscdraw.h>
522: PetscErrorCode MatView_MPISELL_ASCIIorDraworSocket(Mat mat, PetscViewer viewer)
523: {
524:   Mat_MPISELL      *sell = (Mat_MPISELL *)mat->data;
525:   PetscMPIInt       rank = sell->rank, size = sell->size;
526:   PetscBool         isdraw, iascii, isbinary;
527:   PetscViewer       sviewer;
528:   PetscViewerFormat format;

530:   PetscFunctionBegin;
531:   PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERDRAW, &isdraw));
532:   PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERASCII, &iascii));
533:   PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERBINARY, &isbinary));
534:   if (iascii) {
535:     PetscCall(PetscViewerGetFormat(viewer, &format));
536:     if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
537:       MatInfo   info;
538:       PetscInt *inodes;

540:       PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)mat), &rank));
541:       PetscCall(MatGetInfo(mat, MAT_LOCAL, &info));
542:       PetscCall(MatInodeGetInodeSizes(sell->A, NULL, &inodes, NULL));
543:       PetscCall(PetscViewerASCIIPushSynchronized(viewer));
544:       if (!inodes) {
545:         PetscCall(PetscViewerASCIISynchronizedPrintf(viewer, "[%d] Local rows %" PetscInt_FMT " nz %" PetscInt_FMT " nz alloced %" PetscInt_FMT " mem %" PetscInt_FMT ", not using I-node routines\n", rank, mat->rmap->n, (PetscInt)info.nz_used,
546:                                                      (PetscInt)info.nz_allocated, (PetscInt)info.memory));
547:       } else {
548:         PetscCall(PetscViewerASCIISynchronizedPrintf(viewer, "[%d] Local rows %" PetscInt_FMT " nz %" PetscInt_FMT " nz alloced %" PetscInt_FMT " mem %" PetscInt_FMT ", using I-node routines\n", rank, mat->rmap->n, (PetscInt)info.nz_used,
549:                                                      (PetscInt)info.nz_allocated, (PetscInt)info.memory));
550:       }
551:       PetscCall(MatGetInfo(sell->A, MAT_LOCAL, &info));
552:       PetscCall(PetscViewerASCIISynchronizedPrintf(viewer, "[%d] on-diagonal part: nz %" PetscInt_FMT " \n", rank, (PetscInt)info.nz_used));
553:       PetscCall(MatGetInfo(sell->B, MAT_LOCAL, &info));
554:       PetscCall(PetscViewerASCIISynchronizedPrintf(viewer, "[%d] off-diagonal part: nz %" PetscInt_FMT " \n", rank, (PetscInt)info.nz_used));
555:       PetscCall(PetscViewerFlush(viewer));
556:       PetscCall(PetscViewerASCIIPopSynchronized(viewer));
557:       PetscCall(PetscViewerASCIIPrintf(viewer, "Information on VecScatter used in matrix-vector product: \n"));
558:       PetscCall(VecScatterView(sell->Mvctx, viewer));
559:       PetscFunctionReturn(PETSC_SUCCESS);
560:     } else if (format == PETSC_VIEWER_ASCII_INFO) {
561:       PetscInt inodecount, inodelimit, *inodes;
562:       PetscCall(MatInodeGetInodeSizes(sell->A, &inodecount, &inodes, &inodelimit));
563:       if (inodes) {
564:         PetscCall(PetscViewerASCIIPrintf(viewer, "using I-node (on process 0) routines: found %" PetscInt_FMT " nodes, limit used is %" PetscInt_FMT "\n", inodecount, inodelimit));
565:       } else {
566:         PetscCall(PetscViewerASCIIPrintf(viewer, "not using I-node (on process 0) routines\n"));
567:       }
568:       PetscFunctionReturn(PETSC_SUCCESS);
569:     } else if (format == PETSC_VIEWER_ASCII_FACTOR_INFO) {
570:       PetscFunctionReturn(PETSC_SUCCESS);
571:     }
572:   } else if (isbinary) {
573:     if (size == 1) {
574:       PetscCall(PetscObjectSetName((PetscObject)sell->A, ((PetscObject)mat)->name));
575:       PetscCall(MatView(sell->A, viewer));
576:     } else {
577:       /* PetscCall(MatView_MPISELL_Binary(mat,viewer)); */
578:     }
579:     PetscFunctionReturn(PETSC_SUCCESS);
580:   } else if (isdraw) {
581:     PetscDraw draw;
582:     PetscBool isnull;
583:     PetscCall(PetscViewerDrawGetDraw(viewer, 0, &draw));
584:     PetscCall(PetscDrawIsNull(draw, &isnull));
585:     if (isnull) PetscFunctionReturn(PETSC_SUCCESS);
586:   }

588:   {
589:     /* assemble the entire matrix onto first processor. */
590:     Mat          A;
591:     Mat_SeqSELL *Aloc;
592:     PetscInt     M = mat->rmap->N, N = mat->cmap->N, *acolidx, row, col, i, j;
593:     MatScalar   *aval;
594:     PetscBool    isnonzero;

596:     PetscCall(MatCreate(PetscObjectComm((PetscObject)mat), &A));
597:     if (rank == 0) {
598:       PetscCall(MatSetSizes(A, M, N, M, N));
599:     } else {
600:       PetscCall(MatSetSizes(A, 0, 0, M, N));
601:     }
602:     /* This is just a temporary matrix, so explicitly using MATMPISELL is probably best */
603:     PetscCall(MatSetType(A, MATMPISELL));
604:     PetscCall(MatMPISELLSetPreallocation(A, 0, NULL, 0, NULL));
605:     PetscCall(MatSetOption(A, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_FALSE));

607:     /* copy over the A part */
608:     Aloc    = (Mat_SeqSELL *)sell->A->data;
609:     acolidx = Aloc->colidx;
610:     aval    = Aloc->val;
611:     for (i = 0; i < Aloc->totalslices; i++) { /* loop over slices */
612:       for (j = Aloc->sliidx[i]; j < Aloc->sliidx[i + 1]; j++) {
613:         isnonzero = (PetscBool)((j - Aloc->sliidx[i]) / 8 < Aloc->rlen[(i << 3) + (j & 0x07)]);
614:         if (isnonzero) {                                   /* check the mask bit */
615:           row = (i << 3) + (j & 0x07) + mat->rmap->rstart; /* i<<3 is the starting row of this slice */
616:           col = *acolidx + mat->rmap->rstart;
617:           PetscCall(MatSetValues(A, 1, &row, 1, &col, aval, INSERT_VALUES));
618:         }
619:         aval++;
620:         acolidx++;
621:       }
622:     }

624:     /* copy over the B part */
625:     Aloc    = (Mat_SeqSELL *)sell->B->data;
626:     acolidx = Aloc->colidx;
627:     aval    = Aloc->val;
628:     for (i = 0; i < Aloc->totalslices; i++) {
629:       for (j = Aloc->sliidx[i]; j < Aloc->sliidx[i + 1]; j++) {
630:         isnonzero = (PetscBool)((j - Aloc->sliidx[i]) / 8 < Aloc->rlen[(i << 3) + (j & 0x07)]);
631:         if (isnonzero) {
632:           row = (i << 3) + (j & 0x07) + mat->rmap->rstart;
633:           col = sell->garray[*acolidx];
634:           PetscCall(MatSetValues(A, 1, &row, 1, &col, aval, INSERT_VALUES));
635:         }
636:         aval++;
637:         acolidx++;
638:       }
639:     }

641:     PetscCall(MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY));
642:     PetscCall(MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY));
643:     /*
644:        Everyone has to call to draw the matrix since the graphics waits are
645:        synchronized across all processors that share the PetscDraw object
646:     */
647:     PetscCall(PetscViewerGetSubViewer(viewer, PETSC_COMM_SELF, &sviewer));
648:     if (rank == 0) {
649:       PetscCall(PetscObjectSetName((PetscObject)((Mat_MPISELL *)(A->data))->A, ((PetscObject)mat)->name));
650:       PetscCall(MatView_SeqSELL(((Mat_MPISELL *)(A->data))->A, sviewer));
651:     }
652:     PetscCall(PetscViewerRestoreSubViewer(viewer, PETSC_COMM_SELF, &sviewer));
653:     PetscCall(PetscViewerFlush(viewer));
654:     PetscCall(MatDestroy(&A));
655:   }
656:   PetscFunctionReturn(PETSC_SUCCESS);
657: }

659: PetscErrorCode MatView_MPISELL(Mat mat, PetscViewer viewer)
660: {
661:   PetscBool iascii, isdraw, issocket, isbinary;

663:   PetscFunctionBegin;
664:   PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERASCII, &iascii));
665:   PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERDRAW, &isdraw));
666:   PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERBINARY, &isbinary));
667:   PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERSOCKET, &issocket));
668:   if (iascii || isdraw || isbinary || issocket) PetscCall(MatView_MPISELL_ASCIIorDraworSocket(mat, viewer));
669:   PetscFunctionReturn(PETSC_SUCCESS);
670: }

672: PetscErrorCode MatGetGhosts_MPISELL(Mat mat, PetscInt *nghosts, const PetscInt *ghosts[])
673: {
674:   Mat_MPISELL *sell = (Mat_MPISELL *)mat->data;

676:   PetscFunctionBegin;
677:   PetscCall(MatGetSize(sell->B, NULL, nghosts));
678:   if (ghosts) *ghosts = sell->garray;
679:   PetscFunctionReturn(PETSC_SUCCESS);
680: }

682: PetscErrorCode MatGetInfo_MPISELL(Mat matin, MatInfoType flag, MatInfo *info)
683: {
684:   Mat_MPISELL   *mat = (Mat_MPISELL *)matin->data;
685:   Mat            A = mat->A, B = mat->B;
686:   PetscLogDouble isend[5], irecv[5];

688:   PetscFunctionBegin;
689:   info->block_size = 1.0;
690:   PetscCall(MatGetInfo(A, MAT_LOCAL, info));

692:   isend[0] = info->nz_used;
693:   isend[1] = info->nz_allocated;
694:   isend[2] = info->nz_unneeded;
695:   isend[3] = info->memory;
696:   isend[4] = info->mallocs;

698:   PetscCall(MatGetInfo(B, MAT_LOCAL, info));

700:   isend[0] += info->nz_used;
701:   isend[1] += info->nz_allocated;
702:   isend[2] += info->nz_unneeded;
703:   isend[3] += info->memory;
704:   isend[4] += info->mallocs;
705:   if (flag == MAT_LOCAL) {
706:     info->nz_used      = isend[0];
707:     info->nz_allocated = isend[1];
708:     info->nz_unneeded  = isend[2];
709:     info->memory       = isend[3];
710:     info->mallocs      = isend[4];
711:   } else if (flag == MAT_GLOBAL_MAX) {
712:     PetscCall(MPIU_Allreduce(isend, irecv, 5, MPIU_PETSCLOGDOUBLE, MPI_MAX, PetscObjectComm((PetscObject)matin)));

714:     info->nz_used      = irecv[0];
715:     info->nz_allocated = irecv[1];
716:     info->nz_unneeded  = irecv[2];
717:     info->memory       = irecv[3];
718:     info->mallocs      = irecv[4];
719:   } else if (flag == MAT_GLOBAL_SUM) {
720:     PetscCall(MPIU_Allreduce(isend, irecv, 5, MPIU_PETSCLOGDOUBLE, MPI_SUM, PetscObjectComm((PetscObject)matin)));

722:     info->nz_used      = irecv[0];
723:     info->nz_allocated = irecv[1];
724:     info->nz_unneeded  = irecv[2];
725:     info->memory       = irecv[3];
726:     info->mallocs      = irecv[4];
727:   }
728:   info->fill_ratio_given  = 0; /* no parallel LU/ILU/Cholesky */
729:   info->fill_ratio_needed = 0;
730:   info->factor_mallocs    = 0;
731:   PetscFunctionReturn(PETSC_SUCCESS);
732: }

734: PetscErrorCode MatSetOption_MPISELL(Mat A, MatOption op, PetscBool flg)
735: {
736:   Mat_MPISELL *a = (Mat_MPISELL *)A->data;

738:   PetscFunctionBegin;
739:   switch (op) {
740:   case MAT_NEW_NONZERO_LOCATIONS:
741:   case MAT_NEW_NONZERO_ALLOCATION_ERR:
742:   case MAT_UNUSED_NONZERO_LOCATION_ERR:
743:   case MAT_KEEP_NONZERO_PATTERN:
744:   case MAT_NEW_NONZERO_LOCATION_ERR:
745:   case MAT_USE_INODES:
746:   case MAT_IGNORE_ZERO_ENTRIES:
747:     MatCheckPreallocated(A, 1);
748:     PetscCall(MatSetOption(a->A, op, flg));
749:     PetscCall(MatSetOption(a->B, op, flg));
750:     break;
751:   case MAT_ROW_ORIENTED:
752:     MatCheckPreallocated(A, 1);
753:     a->roworiented = flg;

755:     PetscCall(MatSetOption(a->A, op, flg));
756:     PetscCall(MatSetOption(a->B, op, flg));
757:     break;
758:   case MAT_FORCE_DIAGONAL_ENTRIES:
759:   case MAT_SORTED_FULL:
760:     PetscCall(PetscInfo(A, "Option %s ignored\n", MatOptions[op]));
761:     break;
762:   case MAT_IGNORE_OFF_PROC_ENTRIES:
763:     a->donotstash = flg;
764:     break;
765:   case MAT_SPD:
766:   case MAT_SPD_ETERNAL:
767:     break;
768:   case MAT_SYMMETRIC:
769:     MatCheckPreallocated(A, 1);
770:     PetscCall(MatSetOption(a->A, op, flg));
771:     break;
772:   case MAT_STRUCTURALLY_SYMMETRIC:
773:     MatCheckPreallocated(A, 1);
774:     PetscCall(MatSetOption(a->A, op, flg));
775:     break;
776:   case MAT_HERMITIAN:
777:     MatCheckPreallocated(A, 1);
778:     PetscCall(MatSetOption(a->A, op, flg));
779:     break;
780:   case MAT_SYMMETRY_ETERNAL:
781:     MatCheckPreallocated(A, 1);
782:     PetscCall(MatSetOption(a->A, op, flg));
783:     break;
784:   case MAT_STRUCTURAL_SYMMETRY_ETERNAL:
785:     MatCheckPreallocated(A, 1);
786:     PetscCall(MatSetOption(a->A, op, flg));
787:     break;
788:   default:
789:     SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "unknown option %d", op);
790:   }
791:   PetscFunctionReturn(PETSC_SUCCESS);
792: }

794: PetscErrorCode MatDiagonalScale_MPISELL(Mat mat, Vec ll, Vec rr)
795: {
796:   Mat_MPISELL *sell = (Mat_MPISELL *)mat->data;
797:   Mat          a = sell->A, b = sell->B;
798:   PetscInt     s1, s2, s3;

800:   PetscFunctionBegin;
801:   PetscCall(MatGetLocalSize(mat, &s2, &s3));
802:   if (rr) {
803:     PetscCall(VecGetLocalSize(rr, &s1));
804:     PetscCheck(s1 == s3, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "right vector non-conforming local size");
805:     /* Overlap communication with computation. */
806:     PetscCall(VecScatterBegin(sell->Mvctx, rr, sell->lvec, INSERT_VALUES, SCATTER_FORWARD));
807:   }
808:   if (ll) {
809:     PetscCall(VecGetLocalSize(ll, &s1));
810:     PetscCheck(s1 == s2, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "left vector non-conforming local size");
811:     PetscUseTypeMethod(b, diagonalscale, ll, NULL);
812:   }
813:   /* scale  the diagonal block */
814:   PetscUseTypeMethod(a, diagonalscale, ll, rr);

816:   if (rr) {
817:     /* Do a scatter end and then right scale the off-diagonal block */
818:     PetscCall(VecScatterEnd(sell->Mvctx, rr, sell->lvec, INSERT_VALUES, SCATTER_FORWARD));
819:     PetscUseTypeMethod(b, diagonalscale, NULL, sell->lvec);
820:   }
821:   PetscFunctionReturn(PETSC_SUCCESS);
822: }

824: PetscErrorCode MatSetUnfactored_MPISELL(Mat A)
825: {
826:   Mat_MPISELL *a = (Mat_MPISELL *)A->data;

828:   PetscFunctionBegin;
829:   PetscCall(MatSetUnfactored(a->A));
830:   PetscFunctionReturn(PETSC_SUCCESS);
831: }

833: PetscErrorCode MatEqual_MPISELL(Mat A, Mat B, PetscBool *flag)
834: {
835:   Mat_MPISELL *matB = (Mat_MPISELL *)B->data, *matA = (Mat_MPISELL *)A->data;
836:   Mat          a, b, c, d;
837:   PetscBool    flg;

839:   PetscFunctionBegin;
840:   a = matA->A;
841:   b = matA->B;
842:   c = matB->A;
843:   d = matB->B;

845:   PetscCall(MatEqual(a, c, &flg));
846:   if (flg) PetscCall(MatEqual(b, d, &flg));
847:   PetscCall(MPIU_Allreduce(&flg, flag, 1, MPIU_BOOL, MPI_LAND, PetscObjectComm((PetscObject)A)));
848:   PetscFunctionReturn(PETSC_SUCCESS);
849: }

851: PetscErrorCode MatCopy_MPISELL(Mat A, Mat B, MatStructure str)
852: {
853:   Mat_MPISELL *a = (Mat_MPISELL *)A->data;
854:   Mat_MPISELL *b = (Mat_MPISELL *)B->data;

856:   PetscFunctionBegin;
857:   /* If the two matrices don't have the same copy implementation, they aren't compatible for fast copy. */
858:   if ((str != SAME_NONZERO_PATTERN) || (A->ops->copy != B->ops->copy)) {
859:     /* because of the column compression in the off-processor part of the matrix a->B,
860:        the number of columns in a->B and b->B may be different, hence we cannot call
861:        the MatCopy() directly on the two parts. If need be, we can provide a more
862:        efficient copy than the MatCopy_Basic() by first uncompressing the a->B matrices
863:        then copying the submatrices */
864:     PetscCall(MatCopy_Basic(A, B, str));
865:   } else {
866:     PetscCall(MatCopy(a->A, b->A, str));
867:     PetscCall(MatCopy(a->B, b->B, str));
868:   }
869:   PetscFunctionReturn(PETSC_SUCCESS);
870: }

872: PetscErrorCode MatSetUp_MPISELL(Mat A)
873: {
874:   PetscFunctionBegin;
875:   PetscCall(MatMPISELLSetPreallocation(A, PETSC_DEFAULT, NULL, PETSC_DEFAULT, NULL));
876:   PetscFunctionReturn(PETSC_SUCCESS);
877: }

879: extern PetscErrorCode MatConjugate_SeqSELL(Mat);

881: PetscErrorCode MatConjugate_MPISELL(Mat mat)
882: {
883:   PetscFunctionBegin;
884:   if (PetscDefined(USE_COMPLEX)) {
885:     Mat_MPISELL *sell = (Mat_MPISELL *)mat->data;

887:     PetscCall(MatConjugate_SeqSELL(sell->A));
888:     PetscCall(MatConjugate_SeqSELL(sell->B));
889:   }
890:   PetscFunctionReturn(PETSC_SUCCESS);
891: }

893: PetscErrorCode MatRealPart_MPISELL(Mat A)
894: {
895:   Mat_MPISELL *a = (Mat_MPISELL *)A->data;

897:   PetscFunctionBegin;
898:   PetscCall(MatRealPart(a->A));
899:   PetscCall(MatRealPart(a->B));
900:   PetscFunctionReturn(PETSC_SUCCESS);
901: }

903: PetscErrorCode MatImaginaryPart_MPISELL(Mat A)
904: {
905:   Mat_MPISELL *a = (Mat_MPISELL *)A->data;

907:   PetscFunctionBegin;
908:   PetscCall(MatImaginaryPart(a->A));
909:   PetscCall(MatImaginaryPart(a->B));
910:   PetscFunctionReturn(PETSC_SUCCESS);
911: }

913: PetscErrorCode MatInvertBlockDiagonal_MPISELL(Mat A, const PetscScalar **values)
914: {
915:   Mat_MPISELL *a = (Mat_MPISELL *)A->data;

917:   PetscFunctionBegin;
918:   PetscCall(MatInvertBlockDiagonal(a->A, values));
919:   A->factorerrortype = a->A->factorerrortype;
920:   PetscFunctionReturn(PETSC_SUCCESS);
921: }

923: static PetscErrorCode MatSetRandom_MPISELL(Mat x, PetscRandom rctx)
924: {
925:   Mat_MPISELL *sell = (Mat_MPISELL *)x->data;

927:   PetscFunctionBegin;
928:   PetscCall(MatSetRandom(sell->A, rctx));
929:   PetscCall(MatSetRandom(sell->B, rctx));
930:   PetscCall(MatAssemblyBegin(x, MAT_FINAL_ASSEMBLY));
931:   PetscCall(MatAssemblyEnd(x, MAT_FINAL_ASSEMBLY));
932:   PetscFunctionReturn(PETSC_SUCCESS);
933: }

935: PetscErrorCode MatSetFromOptions_MPISELL(Mat A, PetscOptionItems *PetscOptionsObject)
936: {
937:   PetscFunctionBegin;
938:   PetscOptionsHeadBegin(PetscOptionsObject, "MPISELL options");
939:   PetscOptionsHeadEnd();
940:   PetscFunctionReturn(PETSC_SUCCESS);
941: }

943: PetscErrorCode MatShift_MPISELL(Mat Y, PetscScalar a)
944: {
945:   Mat_MPISELL *msell = (Mat_MPISELL *)Y->data;
946:   Mat_SeqSELL *sell  = (Mat_SeqSELL *)msell->A->data;

948:   PetscFunctionBegin;
949:   if (!Y->preallocated) {
950:     PetscCall(MatMPISELLSetPreallocation(Y, 1, NULL, 0, NULL));
951:   } else if (!sell->nz) {
952:     PetscInt nonew = sell->nonew;
953:     PetscCall(MatSeqSELLSetPreallocation(msell->A, 1, NULL));
954:     sell->nonew = nonew;
955:   }
956:   PetscCall(MatShift_Basic(Y, a));
957:   PetscFunctionReturn(PETSC_SUCCESS);
958: }

960: PetscErrorCode MatMissingDiagonal_MPISELL(Mat A, PetscBool *missing, PetscInt *d)
961: {
962:   Mat_MPISELL *a = (Mat_MPISELL *)A->data;

964:   PetscFunctionBegin;
965:   PetscCheck(A->rmap->n == A->cmap->n, PETSC_COMM_SELF, PETSC_ERR_SUP, "Only works for square matrices");
966:   PetscCall(MatMissingDiagonal(a->A, missing, d));
967:   if (d) {
968:     PetscInt rstart;
969:     PetscCall(MatGetOwnershipRange(A, &rstart, NULL));
970:     *d += rstart;
971:   }
972:   PetscFunctionReturn(PETSC_SUCCESS);
973: }

975: PetscErrorCode MatGetDiagonalBlock_MPISELL(Mat A, Mat *a)
976: {
977:   PetscFunctionBegin;
978:   *a = ((Mat_MPISELL *)A->data)->A;
979:   PetscFunctionReturn(PETSC_SUCCESS);
980: }

982: static struct _MatOps MatOps_Values = {MatSetValues_MPISELL,
983:                                        NULL,
984:                                        NULL,
985:                                        MatMult_MPISELL,
986:                                        /* 4*/ MatMultAdd_MPISELL,
987:                                        MatMultTranspose_MPISELL,
988:                                        MatMultTransposeAdd_MPISELL,
989:                                        NULL,
990:                                        NULL,
991:                                        NULL,
992:                                        /*10*/ NULL,
993:                                        NULL,
994:                                        NULL,
995:                                        MatSOR_MPISELL,
996:                                        NULL,
997:                                        /*15*/ MatGetInfo_MPISELL,
998:                                        MatEqual_MPISELL,
999:                                        MatGetDiagonal_MPISELL,
1000:                                        MatDiagonalScale_MPISELL,
1001:                                        NULL,
1002:                                        /*20*/ MatAssemblyBegin_MPISELL,
1003:                                        MatAssemblyEnd_MPISELL,
1004:                                        MatSetOption_MPISELL,
1005:                                        MatZeroEntries_MPISELL,
1006:                                        /*24*/ NULL,
1007:                                        NULL,
1008:                                        NULL,
1009:                                        NULL,
1010:                                        NULL,
1011:                                        /*29*/ MatSetUp_MPISELL,
1012:                                        NULL,
1013:                                        NULL,
1014:                                        MatGetDiagonalBlock_MPISELL,
1015:                                        NULL,
1016:                                        /*34*/ MatDuplicate_MPISELL,
1017:                                        NULL,
1018:                                        NULL,
1019:                                        NULL,
1020:                                        NULL,
1021:                                        /*39*/ NULL,
1022:                                        NULL,
1023:                                        NULL,
1024:                                        MatGetValues_MPISELL,
1025:                                        MatCopy_MPISELL,
1026:                                        /*44*/ NULL,
1027:                                        MatScale_MPISELL,
1028:                                        MatShift_MPISELL,
1029:                                        MatDiagonalSet_MPISELL,
1030:                                        NULL,
1031:                                        /*49*/ MatSetRandom_MPISELL,
1032:                                        NULL,
1033:                                        NULL,
1034:                                        NULL,
1035:                                        NULL,
1036:                                        /*54*/ MatFDColoringCreate_MPIXAIJ,
1037:                                        NULL,
1038:                                        MatSetUnfactored_MPISELL,
1039:                                        NULL,
1040:                                        NULL,
1041:                                        /*59*/ NULL,
1042:                                        MatDestroy_MPISELL,
1043:                                        MatView_MPISELL,
1044:                                        NULL,
1045:                                        NULL,
1046:                                        /*64*/ NULL,
1047:                                        NULL,
1048:                                        NULL,
1049:                                        NULL,
1050:                                        NULL,
1051:                                        /*69*/ NULL,
1052:                                        NULL,
1053:                                        NULL,
1054:                                        NULL,
1055:                                        NULL,
1056:                                        NULL,
1057:                                        /*75*/ MatFDColoringApply_AIJ, /* reuse AIJ function */
1058:                                        MatSetFromOptions_MPISELL,
1059:                                        NULL,
1060:                                        NULL,
1061:                                        NULL,
1062:                                        /*80*/ NULL,
1063:                                        NULL,
1064:                                        NULL,
1065:                                        /*83*/ NULL,
1066:                                        NULL,
1067:                                        NULL,
1068:                                        NULL,
1069:                                        NULL,
1070:                                        NULL,
1071:                                        /*89*/ NULL,
1072:                                        NULL,
1073:                                        NULL,
1074:                                        NULL,
1075:                                        NULL,
1076:                                        /*94*/ NULL,
1077:                                        NULL,
1078:                                        NULL,
1079:                                        NULL,
1080:                                        NULL,
1081:                                        /*99*/ NULL,
1082:                                        NULL,
1083:                                        NULL,
1084:                                        MatConjugate_MPISELL,
1085:                                        NULL,
1086:                                        /*104*/ NULL,
1087:                                        MatRealPart_MPISELL,
1088:                                        MatImaginaryPart_MPISELL,
1089:                                        NULL,
1090:                                        NULL,
1091:                                        /*109*/ NULL,
1092:                                        NULL,
1093:                                        NULL,
1094:                                        NULL,
1095:                                        MatMissingDiagonal_MPISELL,
1096:                                        /*114*/ NULL,
1097:                                        NULL,
1098:                                        MatGetGhosts_MPISELL,
1099:                                        NULL,
1100:                                        NULL,
1101:                                        /*119*/ NULL,
1102:                                        NULL,
1103:                                        NULL,
1104:                                        NULL,
1105:                                        NULL,
1106:                                        /*124*/ NULL,
1107:                                        NULL,
1108:                                        MatInvertBlockDiagonal_MPISELL,
1109:                                        NULL,
1110:                                        NULL,
1111:                                        /*129*/ NULL,
1112:                                        NULL,
1113:                                        NULL,
1114:                                        NULL,
1115:                                        NULL,
1116:                                        /*134*/ NULL,
1117:                                        NULL,
1118:                                        NULL,
1119:                                        NULL,
1120:                                        NULL,
1121:                                        /*139*/ NULL,
1122:                                        NULL,
1123:                                        NULL,
1124:                                        MatFDColoringSetUp_MPIXAIJ,
1125:                                        NULL,
1126:                                        /*144*/ NULL,
1127:                                        NULL,
1128:                                        NULL,
1129:                                        NULL,
1130:                                        NULL,
1131:                                        NULL,
1132:                                        /*150*/ NULL,
1133:                                        NULL};

1135: PetscErrorCode MatStoreValues_MPISELL(Mat mat)
1136: {
1137:   Mat_MPISELL *sell = (Mat_MPISELL *)mat->data;

1139:   PetscFunctionBegin;
1140:   PetscCall(MatStoreValues(sell->A));
1141:   PetscCall(MatStoreValues(sell->B));
1142:   PetscFunctionReturn(PETSC_SUCCESS);
1143: }

1145: PetscErrorCode MatRetrieveValues_MPISELL(Mat mat)
1146: {
1147:   Mat_MPISELL *sell = (Mat_MPISELL *)mat->data;

1149:   PetscFunctionBegin;
1150:   PetscCall(MatRetrieveValues(sell->A));
1151:   PetscCall(MatRetrieveValues(sell->B));
1152:   PetscFunctionReturn(PETSC_SUCCESS);
1153: }

1155: PetscErrorCode MatMPISELLSetPreallocation_MPISELL(Mat B, PetscInt d_rlenmax, const PetscInt d_rlen[], PetscInt o_rlenmax, const PetscInt o_rlen[])
1156: {
1157:   Mat_MPISELL *b;

1159:   PetscFunctionBegin;
1160:   PetscCall(PetscLayoutSetUp(B->rmap));
1161:   PetscCall(PetscLayoutSetUp(B->cmap));
1162:   b = (Mat_MPISELL *)B->data;

1164:   if (!B->preallocated) {
1165:     /* Explicitly create 2 MATSEQSELL matrices. */
1166:     PetscCall(MatCreate(PETSC_COMM_SELF, &b->A));
1167:     PetscCall(MatSetSizes(b->A, B->rmap->n, B->cmap->n, B->rmap->n, B->cmap->n));
1168:     PetscCall(MatSetBlockSizesFromMats(b->A, B, B));
1169:     PetscCall(MatSetType(b->A, MATSEQSELL));
1170:     PetscCall(MatCreate(PETSC_COMM_SELF, &b->B));
1171:     PetscCall(MatSetSizes(b->B, B->rmap->n, B->cmap->N, B->rmap->n, B->cmap->N));
1172:     PetscCall(MatSetBlockSizesFromMats(b->B, B, B));
1173:     PetscCall(MatSetType(b->B, MATSEQSELL));
1174:   }

1176:   PetscCall(MatSeqSELLSetPreallocation(b->A, d_rlenmax, d_rlen));
1177:   PetscCall(MatSeqSELLSetPreallocation(b->B, o_rlenmax, o_rlen));
1178:   B->preallocated  = PETSC_TRUE;
1179:   B->was_assembled = PETSC_FALSE;
1180:   /*
1181:     critical for MatAssemblyEnd to work.
1182:     MatAssemblyBegin checks it to set up was_assembled
1183:     and MatAssemblyEnd checks was_assembled to determine whether to build garray
1184:   */
1185:   B->assembled = PETSC_FALSE;
1186:   PetscFunctionReturn(PETSC_SUCCESS);
1187: }

1189: PetscErrorCode MatDuplicate_MPISELL(Mat matin, MatDuplicateOption cpvalues, Mat *newmat)
1190: {
1191:   Mat          mat;
1192:   Mat_MPISELL *a, *oldmat = (Mat_MPISELL *)matin->data;

1194:   PetscFunctionBegin;
1195:   *newmat = NULL;
1196:   PetscCall(MatCreate(PetscObjectComm((PetscObject)matin), &mat));
1197:   PetscCall(MatSetSizes(mat, matin->rmap->n, matin->cmap->n, matin->rmap->N, matin->cmap->N));
1198:   PetscCall(MatSetBlockSizesFromMats(mat, matin, matin));
1199:   PetscCall(MatSetType(mat, ((PetscObject)matin)->type_name));
1200:   a = (Mat_MPISELL *)mat->data;

1202:   mat->factortype   = matin->factortype;
1203:   mat->assembled    = PETSC_TRUE;
1204:   mat->insertmode   = NOT_SET_VALUES;
1205:   mat->preallocated = PETSC_TRUE;

1207:   a->size         = oldmat->size;
1208:   a->rank         = oldmat->rank;
1209:   a->donotstash   = oldmat->donotstash;
1210:   a->roworiented  = oldmat->roworiented;
1211:   a->rowindices   = NULL;
1212:   a->rowvalues    = NULL;
1213:   a->getrowactive = PETSC_FALSE;

1215:   PetscCall(PetscLayoutReference(matin->rmap, &mat->rmap));
1216:   PetscCall(PetscLayoutReference(matin->cmap, &mat->cmap));

1218:   if (oldmat->colmap) {
1219: #if defined(PETSC_USE_CTABLE)
1220:     PetscCall(PetscHMapIDuplicate(oldmat->colmap, &a->colmap));
1221: #else
1222:     PetscCall(PetscMalloc1(mat->cmap->N, &a->colmap));
1223:     PetscCall(PetscArraycpy(a->colmap, oldmat->colmap, mat->cmap->N));
1224: #endif
1225:   } else a->colmap = NULL;
1226:   if (oldmat->garray) {
1227:     PetscInt len;
1228:     len = oldmat->B->cmap->n;
1229:     PetscCall(PetscMalloc1(len + 1, &a->garray));
1230:     if (len) PetscCall(PetscArraycpy(a->garray, oldmat->garray, len));
1231:   } else a->garray = NULL;

1233:   PetscCall(VecDuplicate(oldmat->lvec, &a->lvec));
1234:   PetscCall(VecScatterCopy(oldmat->Mvctx, &a->Mvctx));
1235:   PetscCall(MatDuplicate(oldmat->A, cpvalues, &a->A));
1236:   PetscCall(MatDuplicate(oldmat->B, cpvalues, &a->B));
1237:   PetscCall(PetscFunctionListDuplicate(((PetscObject)matin)->qlist, &((PetscObject)mat)->qlist));
1238:   *newmat = mat;
1239:   PetscFunctionReturn(PETSC_SUCCESS);
1240: }

1242: /*@C
1243:    MatMPISELLSetPreallocation - Preallocates memory for a `MATMPISELL` sparse parallel matrix in sell format.
1244:    For good matrix assembly performance the user should preallocate the matrix storage by
1245:    setting the parameters `d_nz` (or `d_nnz`) and `o_nz` (or `o_nnz`).

1247:    Collective

1249:    Input Parameters:
1250: +  B - the matrix
1251: .  d_nz  - number of nonzeros per row in DIAGONAL portion of local submatrix
1252:            (same value is used for all local rows)
1253: .  d_nnz - array containing the number of nonzeros in the various rows of the
1254:            DIAGONAL portion of the local submatrix (possibly different for each row)
1255:            or NULL (`PETSC_NULL_INTEGER` in Fortran), if `d_nz` is used to specify the nonzero structure.
1256:            The size of this array is equal to the number of local rows, i.e 'm'.
1257:            For matrices that will be factored, you must leave room for (and set)
1258:            the diagonal entry even if it is zero.
1259: .  o_nz  - number of nonzeros per row in the OFF-DIAGONAL portion of local
1260:            submatrix (same value is used for all local rows).
1261: -  o_nnz - array containing the number of nonzeros in the various rows of the
1262:            OFF-DIAGONAL portion of the local submatrix (possibly different for
1263:            each row) or NULL (`PETSC_NULL_INTEGER` in Fortran), if `o_nz` is used to specify the nonzero
1264:            structure. The size of this array is equal to the number
1265:            of local rows, i.e 'm'.

1267:    Example usage:
1268:    Consider the following 8x8 matrix with 34 non-zero values, that is
1269:    assembled across 3 processors. Lets assume that proc0 owns 3 rows,
1270:    proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
1271:    as follows

1273: .vb
1274:             1  2  0  |  0  3  0  |  0  4
1275:     Proc0   0  5  6  |  7  0  0  |  8  0
1276:             9  0 10  | 11  0  0  | 12  0
1277:     -------------------------------------
1278:            13  0 14  | 15 16 17  |  0  0
1279:     Proc1   0 18  0  | 19 20 21  |  0  0
1280:             0  0  0  | 22 23  0  | 24  0
1281:     -------------------------------------
1282:     Proc2  25 26 27  |  0  0 28  | 29  0
1283:            30  0  0  | 31 32 33  |  0 34
1284: .ve

1286:    This can be represented as a collection of submatrices as

1288: .vb
1289:       A B C
1290:       D E F
1291:       G H I
1292: .ve

1294:    Where the submatrices A,B,C are owned by proc0, D,E,F are
1295:    owned by proc1, G,H,I are owned by proc2.

1297:    The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
1298:    The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
1299:    The 'M','N' parameters are 8,8, and have the same values on all procs.

1301:    The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are
1302:    submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices
1303:    corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively.
1304:    Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL
1305:    part as `MATSEQSELL` matrices. For example, proc1 will store [E] as a `MATSEQSELL`
1306:    matrix, ans [DF] as another SeqSELL matrix.

1308:    When `d_nz`, `o_nz` parameters are specified, `d_nz` storage elements are
1309:    allocated for every row of the local diagonal submatrix, and o_nz
1310:    storage locations are allocated for every row of the OFF-DIAGONAL submat.
1311:    One way to choose `d_nz` and `o_nz` is to use the max nonzerors per local
1312:    rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices.
1313:    In this case, the values of d_nz,o_nz are
1314: .vb
1315:      proc0  dnz = 2, o_nz = 2
1316:      proc1  dnz = 3, o_nz = 2
1317:      proc2  dnz = 1, o_nz = 4
1318: .ve
1319:    We are allocating m*(d_nz+o_nz) storage locations for every proc. This
1320:    translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10
1321:    for proc3. i.e we are using 12+15+10=37 storage locations to store
1322:    34 values.

1324:    When `d_nnz`, `o_nnz` parameters are specified, the storage is specified
1325:    for every row, corresponding to both DIAGONAL and OFF-DIAGONAL submatrices.
1326:    In the above case the values for d_nnz,o_nnz are
1327: .vb
1328:      proc0 d_nnz = [2,2,2] and o_nnz = [2,2,2]
1329:      proc1 d_nnz = [3,3,2] and o_nnz = [2,1,1]
1330:      proc2 d_nnz = [1,1]   and o_nnz = [4,4]
1331: .ve
1332:    Here the space allocated is according to nz (or maximum values in the nnz
1333:    if nnz is provided) for DIAGONAL and OFF-DIAGONAL submatrices, i.e (2+2+3+2)*3+(1+4)*2=37

1335:    Level: intermediate

1337:    Notes:
1338:    If the *_nnz parameter is given then the *_nz parameter is ignored

1340:    The stored row and column indices begin with zero.

1342:    The parallel matrix is partitioned such that the first m0 rows belong to
1343:    process 0, the next m1 rows belong to process 1, the next m2 rows belong
1344:    to process 2 etc.. where m0,m1,m2... are the input parameter 'm'.

1346:    The DIAGONAL portion of the local submatrix of a processor can be defined
1347:    as the submatrix which is obtained by extraction the part corresponding to
1348:    the rows r1-r2 and columns c1-c2 of the global matrix, where r1 is the
1349:    first row that belongs to the processor, r2 is the last row belonging to
1350:    the this processor, and c1-c2 is range of indices of the local part of a
1351:    vector suitable for applying the matrix to.  This is an mxn matrix.  In the
1352:    common case of a square matrix, the row and column ranges are the same and
1353:    the DIAGONAL part is also square. The remaining portion of the local
1354:    submatrix (mxN) constitute the OFF-DIAGONAL portion.

1356:    If `o_nnz`, `d_nnz` are specified, then `o_nz`, and `d_nz` are ignored.

1358:    You can call `MatGetInfo()` to get information on how effective the preallocation was;
1359:    for example the fields mallocs,nz_allocated,nz_used,nz_unneeded;
1360:    You can also run with the option -info and look for messages with the string
1361:    malloc in them to see if additional memory allocation was needed.

1363: .seealso: `Mat`, `MatCreate()`, `MatCreateSeqSELL()`, `MatSetValues()`, `MatCreatesell()`,
1364:           `MATMPISELL`, `MatGetInfo()`, `PetscSplitOwnership()`, `MATSELL`
1365: @*/
1366: PetscErrorCode MatMPISELLSetPreallocation(Mat B, PetscInt d_nz, const PetscInt d_nnz[], PetscInt o_nz, const PetscInt o_nnz[])
1367: {
1368:   PetscFunctionBegin;
1371:   PetscTryMethod(B, "MatMPISELLSetPreallocation_C", (Mat, PetscInt, const PetscInt[], PetscInt, const PetscInt[]), (B, d_nz, d_nnz, o_nz, o_nnz));
1372:   PetscFunctionReturn(PETSC_SUCCESS);
1373: }

1375: /*MC
1376:    MATMPISELL - MATMPISELL = "mpisell" - A matrix type to be used for MPI sparse matrices,
1377:    based on the sliced Ellpack format

1379:    Options Database Key:
1380: . -mat_type sell - sets the matrix type to `MATSELL` during a call to `MatSetFromOptions()`

1382:    Level: beginner

1384: .seealso: `Mat`, `MatCreateSell()`, `MATSEQSELL`, `MATSELL`, `MATSEQAIJ`, `MATAIJ`, `MATMPIAIJ`
1385: M*/

1387: /*@C
1388:    MatCreateSELL - Creates a sparse parallel matrix in `MATSELL` format.

1390:    Collective

1392:    Input Parameters:
1393: +  comm - MPI communicator
1394: .  m - number of local rows (or `PETSC_DECIDE` to have calculated if M is given)
1395:            This value should be the same as the local size used in creating the
1396:            y vector for the matrix-vector product y = Ax.
1397: .  n - This value should be the same as the local size used in creating the
1398:        x vector for the matrix-vector product y = Ax. (or `PETSC_DECIDE` to have
1399:        calculated if `N` is given) For square matrices n is almost always `m`.
1400: .  M - number of global rows (or `PETSC_DETERMINE` to have calculated if `m` is given)
1401: .  N - number of global columns (or `PETSC_DETERMINE` to have calculated if `n` is given)
1402: .  d_rlenmax - max number of nonzeros per row in DIAGONAL portion of local submatrix
1403:                (same value is used for all local rows)
1404: .  d_rlen - array containing the number of nonzeros in the various rows of the
1405:             DIAGONAL portion of the local submatrix (possibly different for each row)
1406:             or `NULL`, if d_rlenmax is used to specify the nonzero structure.
1407:             The size of this array is equal to the number of local rows, i.e `m`.
1408: .  o_rlenmax - max number of nonzeros per row in the OFF-DIAGONAL portion of local
1409:                submatrix (same value is used for all local rows).
1410: -  o_rlen - array containing the number of nonzeros in the various rows of the
1411:             OFF-DIAGONAL portion of the local submatrix (possibly different for
1412:             each row) or `NULL`, if `o_rlenmax` is used to specify the nonzero
1413:             structure. The size of this array is equal to the number
1414:             of local rows, i.e `m`.

1416:    Output Parameter:
1417: .  A - the matrix

1419:    Options Database Key:
1420: -  -mat_sell_oneindex - Internally use indexing starting at 1
1421:         rather than 0.  When calling `MatSetValues()`,
1422:         the user still MUST index entries starting at 0!

1424:    Example:
1425:    Consider the following 8x8 matrix with 34 non-zero values, that is
1426:    assembled across 3 processors. Lets assume that proc0 owns 3 rows,
1427:    proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
1428:    as follows

1430: .vb
1431:             1  2  0  |  0  3  0  |  0  4
1432:     Proc0   0  5  6  |  7  0  0  |  8  0
1433:             9  0 10  | 11  0  0  | 12  0
1434:     -------------------------------------
1435:            13  0 14  | 15 16 17  |  0  0
1436:     Proc1   0 18  0  | 19 20 21  |  0  0
1437:             0  0  0  | 22 23  0  | 24  0
1438:     -------------------------------------
1439:     Proc2  25 26 27  |  0  0 28  | 29  0
1440:            30  0  0  | 31 32 33  |  0 34
1441: .ve

1443:    This can be represented as a collection of submatrices as
1444: .vb
1445:       A B C
1446:       D E F
1447:       G H I
1448: .ve

1450:    Where the submatrices A,B,C are owned by proc0, D,E,F are
1451:    owned by proc1, G,H,I are owned by proc2.

1453:    The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
1454:    The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
1455:    The 'M','N' parameters are 8,8, and have the same values on all procs.

1457:    The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are
1458:    submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices
1459:    corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively.
1460:    Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL
1461:    part as `MATSEQSELL` matrices. For example, proc1 will store [E] as a `MATSEQSELL`
1462:    matrix, ans [DF] as another `MATSEQSELL` matrix.

1464:    When d_rlenmax, o_rlenmax parameters are specified, d_rlenmax storage elements are
1465:    allocated for every row of the local diagonal submatrix, and o_rlenmax
1466:    storage locations are allocated for every row of the OFF-DIAGONAL submat.
1467:    One way to choose d_rlenmax and o_rlenmax is to use the max nonzerors per local
1468:    rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices.
1469:    In this case, the values of d_rlenmax,o_rlenmax are
1470: .vb
1471:      proc0 - d_rlenmax = 2, o_rlenmax = 2
1472:      proc1 - d_rlenmax = 3, o_rlenmax = 2
1473:      proc2 - d_rlenmax = 1, o_rlenmax = 4
1474: .ve
1475:    We are allocating m*(d_rlenmax+o_rlenmax) storage locations for every proc. This
1476:    translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10
1477:    for proc3. i.e we are using 12+15+10=37 storage locations to store
1478:    34 values.

1480:    When `d_rlen`, `o_rlen` parameters are specified, the storage is specified
1481:    for every row, corresponding to both DIAGONAL and OFF-DIAGONAL submatrices.
1482:    In the above case the values for `d_nnz`, `o_nnz` are
1483: .vb
1484:      proc0 - d_nnz = [2,2,2] and o_nnz = [2,2,2]
1485:      proc1 - d_nnz = [3,3,2] and o_nnz = [2,1,1]
1486:      proc2 - d_nnz = [1,1]   and o_nnz = [4,4]
1487: .ve
1488:    Here the space allocated is still 37 though there are 34 nonzeros because
1489:    the allocation is always done according to rlenmax.

1491:    Level: intermediate

1493:    Notes:
1494:    It is recommended that one use the `MatCreate()`, `MatSetType()` and/or `MatSetFromOptions()`,
1495:    MatXXXXSetPreallocation() paradigm instead of this routine directly.
1496:    [MatXXXXSetPreallocation() is, for example, `MatSeqSELLSetPreallocation()`]

1498:    If the *_rlen parameter is given then the *_rlenmax parameter is ignored

1500:    `m`, `n`, `M`, `N` parameters specify the size of the matrix, and its partitioning across
1501:    processors, while `d_rlenmax`, `d_rlen`, `o_rlenmax` , `o_rlen` parameters specify the approximate
1502:    storage requirements for this matrix.

1504:    If `PETSC_DECIDE` or  `PETSC_DETERMINE` is used for a particular argument on one
1505:    processor than it must be used on all processors that share the object for
1506:    that argument.

1508:    The user MUST specify either the local or global matrix dimensions
1509:    (possibly both).

1511:    The parallel matrix is partitioned across processors such that the
1512:    first m0 rows belong to process 0, the next m1 rows belong to
1513:    process 1, the next m2 rows belong to process 2 etc.. where
1514:    m0,m1,m2,.. are the input parameter 'm'. i.e each processor stores
1515:    values corresponding to [`m` x `N`] submatrix.

1517:    The columns are logically partitioned with the n0 columns belonging
1518:    to 0th partition, the next n1 columns belonging to the next
1519:    partition etc.. where n0,n1,n2... are the input parameter `n`.

1521:    The DIAGONAL portion of the local submatrix on any given processor
1522:    is the submatrix corresponding to the rows and columns `m`, `n`
1523:    corresponding to the given processor. i.e diagonal matrix on
1524:    process 0 is [m0 x n0], diagonal matrix on process 1 is [m1 x n1]
1525:    etc. The remaining portion of the local submatrix [m x (N-n)]
1526:    constitute the OFF-DIAGONAL portion. The example below better
1527:    illustrates this concept.

1529:    For a square global matrix we define each processor's diagonal portion
1530:    to be its local rows and the corresponding columns (a square submatrix);
1531:    each processor's off-diagonal portion encompasses the remainder of the
1532:    local matrix (a rectangular submatrix).

1534:    If `o_rlen`, `d_rlen` are specified, then `o_rlenmax`, and `d_rlenmax` are ignored.

1536:    When calling this routine with a single process communicator, a matrix of
1537:    type `MATSEQSELL` is returned.  If a matrix of type `MATMPISELL` is desired for this
1538:    type of communicator, use the construction mechanism
1539: .vb
1540:    MatCreate(...,&A);
1541:    MatSetType(A,MATMPISELL);
1542:    MatSetSizes(A, m,n,M,N);
1543:    MatMPISELLSetPreallocation(A,...);
1544: .ve

1546: .seealso: `Mat`, `MATSELL`, `MatCreate()`, `MatCreateSeqSELL()`, `MatSetValues()`, `MatMPISELLSetPreallocation()`, `MatMPISELLSetPreallocationSELL()`,
1547:           `MATMPISELL`, `MatCreateMPISELLWithArrays()`
1548: @*/
1549: PetscErrorCode MatCreateSELL(MPI_Comm comm, PetscInt m, PetscInt n, PetscInt M, PetscInt N, PetscInt d_rlenmax, const PetscInt d_rlen[], PetscInt o_rlenmax, const PetscInt o_rlen[], Mat *A)
1550: {
1551:   PetscMPIInt size;

1553:   PetscFunctionBegin;
1554:   PetscCall(MatCreate(comm, A));
1555:   PetscCall(MatSetSizes(*A, m, n, M, N));
1556:   PetscCallMPI(MPI_Comm_size(comm, &size));
1557:   if (size > 1) {
1558:     PetscCall(MatSetType(*A, MATMPISELL));
1559:     PetscCall(MatMPISELLSetPreallocation(*A, d_rlenmax, d_rlen, o_rlenmax, o_rlen));
1560:   } else {
1561:     PetscCall(MatSetType(*A, MATSEQSELL));
1562:     PetscCall(MatSeqSELLSetPreallocation(*A, d_rlenmax, d_rlen));
1563:   }
1564:   PetscFunctionReturn(PETSC_SUCCESS);
1565: }

1567: PetscErrorCode MatMPISELLGetSeqSELL(Mat A, Mat *Ad, Mat *Ao, const PetscInt *colmap[])
1568: {
1569:   Mat_MPISELL *a = (Mat_MPISELL *)A->data;
1570:   PetscBool    flg;

1572:   PetscFunctionBegin;
1573:   PetscCall(PetscObjectTypeCompare((PetscObject)A, MATMPISELL, &flg));
1574:   PetscCheck(flg, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "This function requires a MATMPISELL matrix as input");
1575:   if (Ad) *Ad = a->A;
1576:   if (Ao) *Ao = a->B;
1577:   if (colmap) *colmap = a->garray;
1578:   PetscFunctionReturn(PETSC_SUCCESS);
1579: }

1581: /*@C
1582:      MatMPISELLGetLocalMatCondensed - Creates a `MATSEQSELL` matrix from an `MATMPISELL` matrix by taking all its local rows and NON-ZERO columns

1584:     Not Collective

1586:    Input Parameters:
1587: +    A - the matrix
1588: .    scall - either `MAT_INITIAL_MATRIX` or `MAT_REUSE_MATRIX`
1589: .    row - index sets of rows to extract (or `NULL`)
1590: -    col - index sets of columns to extract (or `NULL`)

1592:    Output Parameter:
1593: .    A_loc - the local sequential matrix generated

1595:     Level: developer

1597: .seealso: `Mat`, `MATSEQSELL`, `MATMPISELL`, `MatGetOwnershipRange()`, `MatMPISELLGetLocalMat()`
1598: @*/
1599: PetscErrorCode MatMPISELLGetLocalMatCondensed(Mat A, MatReuse scall, IS *row, IS *col, Mat *A_loc)
1600: {
1601:   Mat_MPISELL *a = (Mat_MPISELL *)A->data;
1602:   PetscInt     i, start, end, ncols, nzA, nzB, *cmap, imark, *idx;
1603:   IS           isrowa, iscola;
1604:   Mat         *aloc;
1605:   PetscBool    match;

1607:   PetscFunctionBegin;
1608:   PetscCall(PetscObjectTypeCompare((PetscObject)A, MATMPISELL, &match));
1609:   PetscCheck(match, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "Requires MATMPISELL matrix as input");
1610:   PetscCall(PetscLogEventBegin(MAT_Getlocalmatcondensed, A, 0, 0, 0));
1611:   if (!row) {
1612:     start = A->rmap->rstart;
1613:     end   = A->rmap->rend;
1614:     PetscCall(ISCreateStride(PETSC_COMM_SELF, end - start, start, 1, &isrowa));
1615:   } else {
1616:     isrowa = *row;
1617:   }
1618:   if (!col) {
1619:     start = A->cmap->rstart;
1620:     cmap  = a->garray;
1621:     nzA   = a->A->cmap->n;
1622:     nzB   = a->B->cmap->n;
1623:     PetscCall(PetscMalloc1(nzA + nzB, &idx));
1624:     ncols = 0;
1625:     for (i = 0; i < nzB; i++) {
1626:       if (cmap[i] < start) idx[ncols++] = cmap[i];
1627:       else break;
1628:     }
1629:     imark = i;
1630:     for (i = 0; i < nzA; i++) idx[ncols++] = start + i;
1631:     for (i = imark; i < nzB; i++) idx[ncols++] = cmap[i];
1632:     PetscCall(ISCreateGeneral(PETSC_COMM_SELF, ncols, idx, PETSC_OWN_POINTER, &iscola));
1633:   } else {
1634:     iscola = *col;
1635:   }
1636:   if (scall != MAT_INITIAL_MATRIX) {
1637:     PetscCall(PetscMalloc1(1, &aloc));
1638:     aloc[0] = *A_loc;
1639:   }
1640:   PetscCall(MatCreateSubMatrices(A, 1, &isrowa, &iscola, scall, &aloc));
1641:   *A_loc = aloc[0];
1642:   PetscCall(PetscFree(aloc));
1643:   if (!row) PetscCall(ISDestroy(&isrowa));
1644:   if (!col) PetscCall(ISDestroy(&iscola));
1645:   PetscCall(PetscLogEventEnd(MAT_Getlocalmatcondensed, A, 0, 0, 0));
1646:   PetscFunctionReturn(PETSC_SUCCESS);
1647: }

1649: #include <../src/mat/impls/aij/mpi/mpiaij.h>

1651: PetscErrorCode MatConvert_MPISELL_MPIAIJ(Mat A, MatType newtype, MatReuse reuse, Mat *newmat)
1652: {
1653:   Mat_MPISELL *a = (Mat_MPISELL *)A->data;
1654:   Mat          B;
1655:   Mat_MPIAIJ  *b;

1657:   PetscFunctionBegin;
1658:   PetscCheck(A->assembled, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "Matrix must be assembled");

1660:   if (reuse == MAT_REUSE_MATRIX) {
1661:     B = *newmat;
1662:   } else {
1663:     PetscCall(MatCreate(PetscObjectComm((PetscObject)A), &B));
1664:     PetscCall(MatSetType(B, MATMPIAIJ));
1665:     PetscCall(MatSetSizes(B, A->rmap->n, A->cmap->n, A->rmap->N, A->cmap->N));
1666:     PetscCall(MatSetBlockSizes(B, A->rmap->bs, A->cmap->bs));
1667:     PetscCall(MatSeqAIJSetPreallocation(B, 0, NULL));
1668:     PetscCall(MatMPIAIJSetPreallocation(B, 0, NULL, 0, NULL));
1669:   }
1670:   b = (Mat_MPIAIJ *)B->data;

1672:   if (reuse == MAT_REUSE_MATRIX) {
1673:     PetscCall(MatConvert_SeqSELL_SeqAIJ(a->A, MATSEQAIJ, MAT_REUSE_MATRIX, &b->A));
1674:     PetscCall(MatConvert_SeqSELL_SeqAIJ(a->B, MATSEQAIJ, MAT_REUSE_MATRIX, &b->B));
1675:   } else {
1676:     PetscCall(MatDestroy(&b->A));
1677:     PetscCall(MatDestroy(&b->B));
1678:     PetscCall(MatDisAssemble_MPISELL(A));
1679:     PetscCall(MatConvert_SeqSELL_SeqAIJ(a->A, MATSEQAIJ, MAT_INITIAL_MATRIX, &b->A));
1680:     PetscCall(MatConvert_SeqSELL_SeqAIJ(a->B, MATSEQAIJ, MAT_INITIAL_MATRIX, &b->B));
1681:     PetscCall(MatAssemblyBegin(B, MAT_FINAL_ASSEMBLY));
1682:     PetscCall(MatAssemblyEnd(B, MAT_FINAL_ASSEMBLY));
1683:     PetscCall(MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY));
1684:     PetscCall(MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY));
1685:   }

1687:   if (reuse == MAT_INPLACE_MATRIX) {
1688:     PetscCall(MatHeaderReplace(A, &B));
1689:   } else {
1690:     *newmat = B;
1691:   }
1692:   PetscFunctionReturn(PETSC_SUCCESS);
1693: }

1695: PetscErrorCode MatConvert_MPIAIJ_MPISELL(Mat A, MatType newtype, MatReuse reuse, Mat *newmat)
1696: {
1697:   Mat_MPIAIJ  *a = (Mat_MPIAIJ *)A->data;
1698:   Mat          B;
1699:   Mat_MPISELL *b;

1701:   PetscFunctionBegin;
1702:   PetscCheck(A->assembled, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "Matrix must be assembled");

1704:   if (reuse == MAT_REUSE_MATRIX) {
1705:     B = *newmat;
1706:   } else {
1707:     PetscCall(MatCreate(PetscObjectComm((PetscObject)A), &B));
1708:     PetscCall(MatSetType(B, MATMPISELL));
1709:     PetscCall(MatSetSizes(B, A->rmap->n, A->cmap->n, A->rmap->N, A->cmap->N));
1710:     PetscCall(MatSetBlockSizes(B, A->rmap->bs, A->cmap->bs));
1711:     PetscCall(MatSeqAIJSetPreallocation(B, 0, NULL));
1712:     PetscCall(MatMPIAIJSetPreallocation(B, 0, NULL, 0, NULL));
1713:   }
1714:   b = (Mat_MPISELL *)B->data;

1716:   if (reuse == MAT_REUSE_MATRIX) {
1717:     PetscCall(MatConvert_SeqAIJ_SeqSELL(a->A, MATSEQSELL, MAT_REUSE_MATRIX, &b->A));
1718:     PetscCall(MatConvert_SeqAIJ_SeqSELL(a->B, MATSEQSELL, MAT_REUSE_MATRIX, &b->B));
1719:   } else {
1720:     PetscCall(MatDestroy(&b->A));
1721:     PetscCall(MatDestroy(&b->B));
1722:     PetscCall(MatDisAssemble_MPIAIJ(A));
1723:     PetscCall(MatConvert_SeqAIJ_SeqSELL(a->A, MATSEQSELL, MAT_INITIAL_MATRIX, &b->A));
1724:     PetscCall(MatConvert_SeqAIJ_SeqSELL(a->B, MATSEQSELL, MAT_INITIAL_MATRIX, &b->B));
1725:     PetscCall(MatAssemblyBegin(B, MAT_FINAL_ASSEMBLY));
1726:     PetscCall(MatAssemblyEnd(B, MAT_FINAL_ASSEMBLY));
1727:     PetscCall(MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY));
1728:     PetscCall(MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY));
1729:   }

1731:   if (reuse == MAT_INPLACE_MATRIX) {
1732:     PetscCall(MatHeaderReplace(A, &B));
1733:   } else {
1734:     *newmat = B;
1735:   }
1736:   PetscFunctionReturn(PETSC_SUCCESS);
1737: }

1739: PetscErrorCode MatSOR_MPISELL(Mat matin, Vec bb, PetscReal omega, MatSORType flag, PetscReal fshift, PetscInt its, PetscInt lits, Vec xx)
1740: {
1741:   Mat_MPISELL *mat = (Mat_MPISELL *)matin->data;
1742:   Vec          bb1 = NULL;

1744:   PetscFunctionBegin;
1745:   if (flag == SOR_APPLY_UPPER) {
1746:     PetscCall((*mat->A->ops->sor)(mat->A, bb, omega, flag, fshift, lits, 1, xx));
1747:     PetscFunctionReturn(PETSC_SUCCESS);
1748:   }

1750:   if (its > 1 || ~flag & SOR_ZERO_INITIAL_GUESS || flag & SOR_EISENSTAT) PetscCall(VecDuplicate(bb, &bb1));

1752:   if ((flag & SOR_LOCAL_SYMMETRIC_SWEEP) == SOR_LOCAL_SYMMETRIC_SWEEP) {
1753:     if (flag & SOR_ZERO_INITIAL_GUESS) {
1754:       PetscCall((*mat->A->ops->sor)(mat->A, bb, omega, flag, fshift, lits, 1, xx));
1755:       its--;
1756:     }

1758:     while (its--) {
1759:       PetscCall(VecScatterBegin(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD));
1760:       PetscCall(VecScatterEnd(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD));

1762:       /* update rhs: bb1 = bb - B*x */
1763:       PetscCall(VecScale(mat->lvec, -1.0));
1764:       PetscCall((*mat->B->ops->multadd)(mat->B, mat->lvec, bb, bb1));

1766:       /* local sweep */
1767:       PetscCall((*mat->A->ops->sor)(mat->A, bb1, omega, SOR_SYMMETRIC_SWEEP, fshift, lits, 1, xx));
1768:     }
1769:   } else if (flag & SOR_LOCAL_FORWARD_SWEEP) {
1770:     if (flag & SOR_ZERO_INITIAL_GUESS) {
1771:       PetscCall((*mat->A->ops->sor)(mat->A, bb, omega, flag, fshift, lits, 1, xx));
1772:       its--;
1773:     }
1774:     while (its--) {
1775:       PetscCall(VecScatterBegin(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD));
1776:       PetscCall(VecScatterEnd(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD));

1778:       /* update rhs: bb1 = bb - B*x */
1779:       PetscCall(VecScale(mat->lvec, -1.0));
1780:       PetscCall((*mat->B->ops->multadd)(mat->B, mat->lvec, bb, bb1));

1782:       /* local sweep */
1783:       PetscCall((*mat->A->ops->sor)(mat->A, bb1, omega, SOR_FORWARD_SWEEP, fshift, lits, 1, xx));
1784:     }
1785:   } else if (flag & SOR_LOCAL_BACKWARD_SWEEP) {
1786:     if (flag & SOR_ZERO_INITIAL_GUESS) {
1787:       PetscCall((*mat->A->ops->sor)(mat->A, bb, omega, flag, fshift, lits, 1, xx));
1788:       its--;
1789:     }
1790:     while (its--) {
1791:       PetscCall(VecScatterBegin(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD));
1792:       PetscCall(VecScatterEnd(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD));

1794:       /* update rhs: bb1 = bb - B*x */
1795:       PetscCall(VecScale(mat->lvec, -1.0));
1796:       PetscCall((*mat->B->ops->multadd)(mat->B, mat->lvec, bb, bb1));

1798:       /* local sweep */
1799:       PetscCall((*mat->A->ops->sor)(mat->A, bb1, omega, SOR_BACKWARD_SWEEP, fshift, lits, 1, xx));
1800:     }
1801:   } else SETERRQ(PetscObjectComm((PetscObject)matin), PETSC_ERR_SUP, "Parallel SOR not supported");

1803:   PetscCall(VecDestroy(&bb1));

1805:   matin->factorerrortype = mat->A->factorerrortype;
1806:   PetscFunctionReturn(PETSC_SUCCESS);
1807: }

1809: /*MC
1810:    MATMPISELL - MATMPISELL = "MPISELL" - A matrix type to be used for parallel sparse matrices.

1812:    Options Database Keys:
1813: . -mat_type mpisell - sets the matrix type to `MATMPISELL` during a call to `MatSetFromOptions()`

1815:   Level: beginner

1817: .seealso: `Mat`, `MATSELL`, `MATSEQSELL` `MatCreateSELL()`
1818: M*/
1819: PETSC_EXTERN PetscErrorCode MatCreate_MPISELL(Mat B)
1820: {
1821:   Mat_MPISELL *b;
1822:   PetscMPIInt  size;

1824:   PetscFunctionBegin;
1825:   PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)B), &size));
1826:   PetscCall(PetscNew(&b));
1827:   B->data = (void *)b;
1828:   PetscCall(PetscMemcpy(B->ops, &MatOps_Values, sizeof(struct _MatOps)));
1829:   B->assembled  = PETSC_FALSE;
1830:   B->insertmode = NOT_SET_VALUES;
1831:   b->size       = size;
1832:   PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)B), &b->rank));
1833:   /* build cache for off array entries formed */
1834:   PetscCall(MatStashCreate_Private(PetscObjectComm((PetscObject)B), 1, &B->stash));

1836:   b->donotstash  = PETSC_FALSE;
1837:   b->colmap      = NULL;
1838:   b->garray      = NULL;
1839:   b->roworiented = PETSC_TRUE;

1841:   /* stuff used for matrix vector multiply */
1842:   b->lvec  = NULL;
1843:   b->Mvctx = NULL;

1845:   /* stuff for MatGetRow() */
1846:   b->rowindices   = NULL;
1847:   b->rowvalues    = NULL;
1848:   b->getrowactive = PETSC_FALSE;

1850:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatStoreValues_C", MatStoreValues_MPISELL));
1851:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatRetrieveValues_C", MatRetrieveValues_MPISELL));
1852:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatIsTranspose_C", MatIsTranspose_MPISELL));
1853:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMPISELLSetPreallocation_C", MatMPISELLSetPreallocation_MPISELL));
1854:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpisell_mpiaij_C", MatConvert_MPISELL_MPIAIJ));
1855:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatDiagonalScaleLocal_C", MatDiagonalScaleLocal_MPISELL));
1856:   PetscCall(PetscObjectChangeTypeName((PetscObject)B, MATMPISELL));
1857:   PetscFunctionReturn(PETSC_SUCCESS);
1858: }