Actual source code: baijov.c
2: /*
3: Routines to compute overlapping regions of a parallel MPI matrix
4: and to find submatrices that were shared across processors.
5: */
6: #include <../src/mat/impls/baij/mpi/mpibaij.h>
7: #include <petscbt.h>
9: static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Local(Mat, PetscInt, char **, PetscInt *, PetscInt **);
10: static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Receive(Mat, PetscInt, PetscInt **, PetscInt **, PetscInt *);
11: extern PetscErrorCode MatGetRow_MPIBAIJ(Mat, PetscInt, PetscInt *, PetscInt **, PetscScalar **);
12: extern PetscErrorCode MatRestoreRow_MPIBAIJ(Mat, PetscInt, PetscInt *, PetscInt **, PetscScalar **);
14: PetscErrorCode MatIncreaseOverlap_MPIBAIJ(Mat C, PetscInt imax, IS is[], PetscInt ov)
15: {
16: PetscInt i, N = C->cmap->N, bs = C->rmap->bs, n;
17: const PetscInt *idx;
18: IS *is_new;
20: PetscFunctionBegin;
21: PetscCall(PetscMalloc1(imax, &is_new));
22: /* Convert the indices into block format */
23: PetscCall(ISCompressIndicesGeneral(N, C->rmap->n, bs, imax, is, is_new));
24: PetscCheck(ov >= 0, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Negative overlap specified");
25: for (i = 0; i < ov; ++i) PetscCall(MatIncreaseOverlap_MPIBAIJ_Once(C, imax, is_new));
26: for (i = 0; i < imax; i++) {
27: PetscCall(ISDestroy(&is[i]));
28: PetscCall(ISGetLocalSize(is_new[i], &n));
29: PetscCall(ISGetIndices(is_new[i], &idx));
30: PetscCall(ISCreateBlock(PetscObjectComm((PetscObject)is_new[i]), bs, n, idx, PETSC_COPY_VALUES, &is[i]));
31: PetscCall(ISDestroy(&is_new[i]));
32: }
33: PetscCall(PetscFree(is_new));
34: PetscFunctionReturn(PETSC_SUCCESS);
35: }
37: /*
38: Sample message format:
39: If a processor A wants processor B to process some elements corresponding
40: to index sets is[1], is[5]
41: mesg [0] = 2 (no of index sets in the mesg)
42: -----------
43: mesg [1] = 1 => is[1]
44: mesg [2] = sizeof(is[1]);
45: -----------
46: mesg [5] = 5 => is[5]
47: mesg [6] = sizeof(is[5]);
48: -----------
49: mesg [7]
50: mesg [n] data(is[1])
51: -----------
52: mesg[n+1]
53: mesg[m] data(is[5])
54: -----------
56: Notes:
57: nrqs - no of requests sent (or to be sent out)
58: nrqr - no of requests received (which have to be or which have been processed)
59: */
60: PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Once(Mat C, PetscInt imax, IS is[])
61: {
62: Mat_MPIBAIJ *c = (Mat_MPIBAIJ *)C->data;
63: const PetscInt **idx, *idx_i;
64: PetscInt *n, *w3, *w4, **data, len;
65: PetscMPIInt size, rank, tag1, tag2, *w2, *w1, nrqr;
66: PetscInt Mbs, i, j, k, **rbuf, row, nrqs, msz, **outdat, **ptr;
67: PetscInt *ctr, *pa, *tmp, *isz, *isz1, **xdata, **rbuf2, *d_p;
68: PetscMPIInt *onodes1, *olengths1, *onodes2, *olengths2, proc = -1;
69: PetscBT *table;
70: MPI_Comm comm, *iscomms;
71: MPI_Request *s_waits1, *r_waits1, *s_waits2, *r_waits2;
72: char *t_p;
74: PetscFunctionBegin;
75: PetscCall(PetscObjectGetComm((PetscObject)C, &comm));
76: size = c->size;
77: rank = c->rank;
78: Mbs = c->Mbs;
80: PetscCall(PetscObjectGetNewTag((PetscObject)C, &tag1));
81: PetscCall(PetscObjectGetNewTag((PetscObject)C, &tag2));
83: PetscCall(PetscMalloc2(imax, (PetscInt ***)&idx, imax, &n));
85: for (i = 0; i < imax; i++) {
86: PetscCall(ISGetIndices(is[i], &idx[i]));
87: PetscCall(ISGetLocalSize(is[i], &n[i]));
88: }
90: /* evaluate communication - mesg to who,length of mesg, and buffer space
91: required. Based on this, buffers are allocated, and data copied into them*/
92: PetscCall(PetscCalloc4(size, &w1, size, &w2, size, &w3, size, &w4));
93: for (i = 0; i < imax; i++) {
94: PetscCall(PetscArrayzero(w4, size)); /* initialise work vector*/
95: idx_i = idx[i];
96: len = n[i];
97: for (j = 0; j < len; j++) {
98: row = idx_i[j];
99: PetscCheck(row >= 0, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Index set cannot have negative entries");
100: PetscCall(PetscLayoutFindOwner(C->rmap, row * C->rmap->bs, &proc));
101: w4[proc]++;
102: }
103: for (j = 0; j < size; j++) {
104: if (w4[j]) {
105: w1[j] += w4[j];
106: w3[j]++;
107: }
108: }
109: }
111: nrqs = 0; /* no of outgoing messages */
112: msz = 0; /* total mesg length (for all proc */
113: w1[rank] = 0; /* no mesg sent to itself */
114: w3[rank] = 0;
115: for (i = 0; i < size; i++) {
116: if (w1[i]) {
117: w2[i] = 1;
118: nrqs++;
119: } /* there exists a message to proc i */
120: }
121: /* pa - is list of processors to communicate with */
122: PetscCall(PetscMalloc1(nrqs, &pa));
123: for (i = 0, j = 0; i < size; i++) {
124: if (w1[i]) {
125: pa[j] = i;
126: j++;
127: }
128: }
130: /* Each message would have a header = 1 + 2*(no of IS) + data */
131: for (i = 0; i < nrqs; i++) {
132: j = pa[i];
133: w1[j] += w2[j] + 2 * w3[j];
134: msz += w1[j];
135: }
137: /* Determine the number of messages to expect, their lengths, from from-ids */
138: PetscCall(PetscGatherNumberOfMessages(comm, w2, w1, &nrqr));
139: PetscCall(PetscGatherMessageLengths(comm, nrqs, nrqr, w1, &onodes1, &olengths1));
141: /* Now post the Irecvs corresponding to these messages */
142: PetscCall(PetscPostIrecvInt(comm, tag1, nrqr, onodes1, olengths1, &rbuf, &r_waits1));
144: /* Allocate Memory for outgoing messages */
145: PetscCall(PetscMalloc4(size, &outdat, size, &ptr, msz, &tmp, size, &ctr));
146: PetscCall(PetscArrayzero(outdat, size));
147: PetscCall(PetscArrayzero(ptr, size));
148: {
149: PetscInt *iptr = tmp, ict = 0;
150: for (i = 0; i < nrqs; i++) {
151: j = pa[i];
152: iptr += ict;
153: outdat[j] = iptr;
154: ict = w1[j];
155: }
156: }
158: /* Form the outgoing messages */
159: /*plug in the headers*/
160: for (i = 0; i < nrqs; i++) {
161: j = pa[i];
162: outdat[j][0] = 0;
163: PetscCall(PetscArrayzero(outdat[j] + 1, 2 * w3[j]));
164: ptr[j] = outdat[j] + 2 * w3[j] + 1;
165: }
167: /* Memory for doing local proc's work*/
168: {
169: PetscCall(PetscCalloc5(imax, &table, imax, &data, imax, &isz, Mbs * imax, &d_p, (Mbs / PETSC_BITS_PER_BYTE + 1) * imax, &t_p));
171: for (i = 0; i < imax; i++) {
172: table[i] = t_p + (Mbs / PETSC_BITS_PER_BYTE + 1) * i;
173: data[i] = d_p + (Mbs)*i;
174: }
175: }
177: /* Parse the IS and update local tables and the outgoing buf with the data*/
178: {
179: PetscInt n_i, *data_i, isz_i, *outdat_j, ctr_j;
180: PetscBT table_i;
182: for (i = 0; i < imax; i++) {
183: PetscCall(PetscArrayzero(ctr, size));
184: n_i = n[i];
185: table_i = table[i];
186: idx_i = idx[i];
187: data_i = data[i];
188: isz_i = isz[i];
189: for (j = 0; j < n_i; j++) { /* parse the indices of each IS */
190: row = idx_i[j];
191: PetscCall(PetscLayoutFindOwner(C->rmap, row * C->rmap->bs, &proc));
192: if (proc != rank) { /* copy to the outgoing buffer */
193: ctr[proc]++;
194: *ptr[proc] = row;
195: ptr[proc]++;
196: } else { /* Update the local table */
197: if (!PetscBTLookupSet(table_i, row)) data_i[isz_i++] = row;
198: }
199: }
200: /* Update the headers for the current IS */
201: for (j = 0; j < size; j++) { /* Can Optimise this loop by using pa[] */
202: if ((ctr_j = ctr[j])) {
203: outdat_j = outdat[j];
204: k = ++outdat_j[0];
205: outdat_j[2 * k] = ctr_j;
206: outdat_j[2 * k - 1] = i;
207: }
208: }
209: isz[i] = isz_i;
210: }
211: }
213: /* Now post the sends */
214: PetscCall(PetscMalloc1(nrqs, &s_waits1));
215: for (i = 0; i < nrqs; ++i) {
216: j = pa[i];
217: PetscCallMPI(MPI_Isend(outdat[j], w1[j], MPIU_INT, j, tag1, comm, s_waits1 + i));
218: }
220: /* No longer need the original indices*/
221: for (i = 0; i < imax; ++i) PetscCall(ISRestoreIndices(is[i], idx + i));
222: PetscCall(PetscFree2(*(PetscInt ***)&idx, n));
224: PetscCall(PetscMalloc1(imax, &iscomms));
225: for (i = 0; i < imax; ++i) {
226: PetscCall(PetscCommDuplicate(PetscObjectComm((PetscObject)is[i]), &iscomms[i], NULL));
227: PetscCall(ISDestroy(&is[i]));
228: }
230: /* Do Local work*/
231: PetscCall(MatIncreaseOverlap_MPIBAIJ_Local(C, imax, table, isz, data));
233: /* Receive messages*/
234: PetscCallMPI(MPI_Waitall(nrqr, r_waits1, MPI_STATUSES_IGNORE));
235: PetscCallMPI(MPI_Waitall(nrqs, s_waits1, MPI_STATUSES_IGNORE));
237: /* Phase 1 sends are complete - deallocate buffers */
238: PetscCall(PetscFree4(outdat, ptr, tmp, ctr));
239: PetscCall(PetscFree4(w1, w2, w3, w4));
241: PetscCall(PetscMalloc1(nrqr, &xdata));
242: PetscCall(PetscMalloc1(nrqr, &isz1));
243: PetscCall(MatIncreaseOverlap_MPIBAIJ_Receive(C, nrqr, rbuf, xdata, isz1));
244: if (rbuf) {
245: PetscCall(PetscFree(rbuf[0]));
246: PetscCall(PetscFree(rbuf));
247: }
249: /* Send the data back*/
250: /* Do a global reduction to know the buffer space req for incoming messages*/
251: {
252: PetscMPIInt *rw1;
254: PetscCall(PetscCalloc1(size, &rw1));
256: for (i = 0; i < nrqr; ++i) {
257: proc = onodes1[i];
258: rw1[proc] = isz1[i];
259: }
261: /* Determine the number of messages to expect, their lengths, from from-ids */
262: PetscCall(PetscGatherMessageLengths(comm, nrqr, nrqs, rw1, &onodes2, &olengths2));
263: PetscCall(PetscFree(rw1));
264: }
265: /* Now post the Irecvs corresponding to these messages */
266: PetscCall(PetscPostIrecvInt(comm, tag2, nrqs, onodes2, olengths2, &rbuf2, &r_waits2));
268: /* Now post the sends */
269: PetscCall(PetscMalloc1(nrqr, &s_waits2));
270: for (i = 0; i < nrqr; ++i) {
271: j = onodes1[i];
272: PetscCallMPI(MPI_Isend(xdata[i], isz1[i], MPIU_INT, j, tag2, comm, s_waits2 + i));
273: }
275: PetscCall(PetscFree(onodes1));
276: PetscCall(PetscFree(olengths1));
278: /* receive work done on other processors*/
279: {
280: PetscMPIInt idex;
281: PetscInt is_no, ct1, max, *rbuf2_i, isz_i, *data_i, jmax;
282: PetscBT table_i;
284: for (i = 0; i < nrqs; ++i) {
285: PetscCallMPI(MPI_Waitany(nrqs, r_waits2, &idex, MPI_STATUS_IGNORE));
286: /* Process the message*/
287: rbuf2_i = rbuf2[idex];
288: ct1 = 2 * rbuf2_i[0] + 1;
289: jmax = rbuf2[idex][0];
290: for (j = 1; j <= jmax; j++) {
291: max = rbuf2_i[2 * j];
292: is_no = rbuf2_i[2 * j - 1];
293: isz_i = isz[is_no];
294: data_i = data[is_no];
295: table_i = table[is_no];
296: for (k = 0; k < max; k++, ct1++) {
297: row = rbuf2_i[ct1];
298: if (!PetscBTLookupSet(table_i, row)) data_i[isz_i++] = row;
299: }
300: isz[is_no] = isz_i;
301: }
302: }
303: PetscCallMPI(MPI_Waitall(nrqr, s_waits2, MPI_STATUSES_IGNORE));
304: }
306: for (i = 0; i < imax; ++i) {
307: PetscCall(ISCreateGeneral(iscomms[i], isz[i], data[i], PETSC_COPY_VALUES, is + i));
308: PetscCall(PetscCommDestroy(&iscomms[i]));
309: }
311: PetscCall(PetscFree(iscomms));
312: PetscCall(PetscFree(onodes2));
313: PetscCall(PetscFree(olengths2));
315: PetscCall(PetscFree(pa));
316: if (rbuf2) {
317: PetscCall(PetscFree(rbuf2[0]));
318: PetscCall(PetscFree(rbuf2));
319: }
320: PetscCall(PetscFree(s_waits1));
321: PetscCall(PetscFree(r_waits1));
322: PetscCall(PetscFree(s_waits2));
323: PetscCall(PetscFree(r_waits2));
324: PetscCall(PetscFree5(table, data, isz, d_p, t_p));
325: if (xdata) {
326: PetscCall(PetscFree(xdata[0]));
327: PetscCall(PetscFree(xdata));
328: }
329: PetscCall(PetscFree(isz1));
330: PetscFunctionReturn(PETSC_SUCCESS);
331: }
333: /*
334: MatIncreaseOverlap_MPIBAIJ_Local - Called by MatincreaseOverlap, to do
335: the work on the local processor.
337: Inputs:
338: C - MAT_MPIBAIJ;
339: imax - total no of index sets processed at a time;
340: table - an array of char - size = Mbs bits.
342: Output:
343: isz - array containing the count of the solution elements corresponding
344: to each index set;
345: data - pointer to the solutions
346: */
347: static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Local(Mat C, PetscInt imax, PetscBT *table, PetscInt *isz, PetscInt **data)
348: {
349: Mat_MPIBAIJ *c = (Mat_MPIBAIJ *)C->data;
350: Mat A = c->A, B = c->B;
351: Mat_SeqBAIJ *a = (Mat_SeqBAIJ *)A->data, *b = (Mat_SeqBAIJ *)B->data;
352: PetscInt start, end, val, max, rstart, cstart, *ai, *aj;
353: PetscInt *bi, *bj, *garray, i, j, k, row, *data_i, isz_i;
354: PetscBT table_i;
356: PetscFunctionBegin;
357: rstart = c->rstartbs;
358: cstart = c->cstartbs;
359: ai = a->i;
360: aj = a->j;
361: bi = b->i;
362: bj = b->j;
363: garray = c->garray;
365: for (i = 0; i < imax; i++) {
366: data_i = data[i];
367: table_i = table[i];
368: isz_i = isz[i];
369: for (j = 0, max = isz[i]; j < max; j++) {
370: row = data_i[j] - rstart;
371: start = ai[row];
372: end = ai[row + 1];
373: for (k = start; k < end; k++) { /* Amat */
374: val = aj[k] + cstart;
375: if (!PetscBTLookupSet(table_i, val)) data_i[isz_i++] = val;
376: }
377: start = bi[row];
378: end = bi[row + 1];
379: for (k = start; k < end; k++) { /* Bmat */
380: val = garray[bj[k]];
381: if (!PetscBTLookupSet(table_i, val)) data_i[isz_i++] = val;
382: }
383: }
384: isz[i] = isz_i;
385: }
386: PetscFunctionReturn(PETSC_SUCCESS);
387: }
388: /*
389: MatIncreaseOverlap_MPIBAIJ_Receive - Process the received messages,
390: and return the output
392: Input:
393: C - the matrix
394: nrqr - no of messages being processed.
395: rbuf - an array of pointers to the received requests
397: Output:
398: xdata - array of messages to be sent back
399: isz1 - size of each message
401: For better efficiency perhaps we should malloc separately each xdata[i],
402: then if a remalloc is required we need only copy the data for that one row
403: rather than all previous rows as it is now where a single large chunk of
404: memory is used.
406: */
407: static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Receive(Mat C, PetscInt nrqr, PetscInt **rbuf, PetscInt **xdata, PetscInt *isz1)
408: {
409: Mat_MPIBAIJ *c = (Mat_MPIBAIJ *)C->data;
410: Mat A = c->A, B = c->B;
411: Mat_SeqBAIJ *a = (Mat_SeqBAIJ *)A->data, *b = (Mat_SeqBAIJ *)B->data;
412: PetscInt rstart, cstart, *ai, *aj, *bi, *bj, *garray, i, j, k;
413: PetscInt row, total_sz, ct, ct1, ct2, ct3, mem_estimate, oct2, l, start, end;
414: PetscInt val, max1, max2, Mbs, no_malloc = 0, *tmp, new_estimate, ctr;
415: PetscInt *rbuf_i, kmax, rbuf_0;
416: PetscBT xtable;
418: PetscFunctionBegin;
419: Mbs = c->Mbs;
420: rstart = c->rstartbs;
421: cstart = c->cstartbs;
422: ai = a->i;
423: aj = a->j;
424: bi = b->i;
425: bj = b->j;
426: garray = c->garray;
428: for (i = 0, ct = 0, total_sz = 0; i < nrqr; ++i) {
429: rbuf_i = rbuf[i];
430: rbuf_0 = rbuf_i[0];
431: ct += rbuf_0;
432: for (j = 1; j <= rbuf_0; j++) total_sz += rbuf_i[2 * j];
433: }
435: if (c->Mbs) max1 = ct * (a->nz + b->nz) / c->Mbs;
436: else max1 = 1;
437: mem_estimate = 3 * ((total_sz > max1 ? total_sz : max1) + 1);
438: if (nrqr) {
439: PetscCall(PetscMalloc1(mem_estimate, &xdata[0]));
440: ++no_malloc;
441: }
442: PetscCall(PetscBTCreate(Mbs, &xtable));
443: PetscCall(PetscArrayzero(isz1, nrqr));
445: ct3 = 0;
446: for (i = 0; i < nrqr; i++) { /* for easch mesg from proc i */
447: rbuf_i = rbuf[i];
448: rbuf_0 = rbuf_i[0];
449: ct1 = 2 * rbuf_0 + 1;
450: ct2 = ct1;
451: ct3 += ct1;
452: for (j = 1; j <= rbuf_0; j++) { /* for each IS from proc i*/
453: PetscCall(PetscBTMemzero(Mbs, xtable));
454: oct2 = ct2;
455: kmax = rbuf_i[2 * j];
456: for (k = 0; k < kmax; k++, ct1++) {
457: row = rbuf_i[ct1];
458: if (!PetscBTLookupSet(xtable, row)) {
459: if (!(ct3 < mem_estimate)) {
460: new_estimate = (PetscInt)(1.5 * mem_estimate) + 1;
461: PetscCall(PetscMalloc1(new_estimate, &tmp));
462: PetscCall(PetscArraycpy(tmp, xdata[0], mem_estimate));
463: PetscCall(PetscFree(xdata[0]));
464: xdata[0] = tmp;
465: mem_estimate = new_estimate;
466: ++no_malloc;
467: for (ctr = 1; ctr <= i; ctr++) xdata[ctr] = xdata[ctr - 1] + isz1[ctr - 1];
468: }
469: xdata[i][ct2++] = row;
470: ct3++;
471: }
472: }
473: for (k = oct2, max2 = ct2; k < max2; k++) {
474: row = xdata[i][k] - rstart;
475: start = ai[row];
476: end = ai[row + 1];
477: for (l = start; l < end; l++) {
478: val = aj[l] + cstart;
479: if (!PetscBTLookupSet(xtable, val)) {
480: if (!(ct3 < mem_estimate)) {
481: new_estimate = (PetscInt)(1.5 * mem_estimate) + 1;
482: PetscCall(PetscMalloc1(new_estimate, &tmp));
483: PetscCall(PetscArraycpy(tmp, xdata[0], mem_estimate));
484: PetscCall(PetscFree(xdata[0]));
485: xdata[0] = tmp;
486: mem_estimate = new_estimate;
487: ++no_malloc;
488: for (ctr = 1; ctr <= i; ctr++) xdata[ctr] = xdata[ctr - 1] + isz1[ctr - 1];
489: }
490: xdata[i][ct2++] = val;
491: ct3++;
492: }
493: }
494: start = bi[row];
495: end = bi[row + 1];
496: for (l = start; l < end; l++) {
497: val = garray[bj[l]];
498: if (!PetscBTLookupSet(xtable, val)) {
499: if (!(ct3 < mem_estimate)) {
500: new_estimate = (PetscInt)(1.5 * mem_estimate) + 1;
501: PetscCall(PetscMalloc1(new_estimate, &tmp));
502: PetscCall(PetscArraycpy(tmp, xdata[0], mem_estimate));
503: PetscCall(PetscFree(xdata[0]));
504: xdata[0] = tmp;
505: mem_estimate = new_estimate;
506: ++no_malloc;
507: for (ctr = 1; ctr <= i; ctr++) xdata[ctr] = xdata[ctr - 1] + isz1[ctr - 1];
508: }
509: xdata[i][ct2++] = val;
510: ct3++;
511: }
512: }
513: }
514: /* Update the header*/
515: xdata[i][2 * j] = ct2 - oct2; /* Undo the vector isz1 and use only a var*/
516: xdata[i][2 * j - 1] = rbuf_i[2 * j - 1];
517: }
518: xdata[i][0] = rbuf_0;
519: if (i + 1 < nrqr) xdata[i + 1] = xdata[i] + ct2;
520: isz1[i] = ct2; /* size of each message */
521: }
522: PetscCall(PetscBTDestroy(&xtable));
523: PetscCall(PetscInfo(C, "Allocated %" PetscInt_FMT " bytes, required %" PetscInt_FMT ", no of mallocs = %" PetscInt_FMT "\n", mem_estimate, ct3, no_malloc));
524: PetscFunctionReturn(PETSC_SUCCESS);
525: }
527: PetscErrorCode MatCreateSubMatrices_MPIBAIJ(Mat C, PetscInt ismax, const IS isrow[], const IS iscol[], MatReuse scall, Mat *submat[])
528: {
529: IS *isrow_block, *iscol_block;
530: Mat_MPIBAIJ *c = (Mat_MPIBAIJ *)C->data;
531: PetscInt nmax, nstages_local, nstages, i, pos, max_no, N = C->cmap->N, bs = C->rmap->bs;
532: Mat_SeqBAIJ *subc;
533: Mat_SubSppt *smat;
535: PetscFunctionBegin;
536: /* The compression and expansion should be avoided. Doesn't point
537: out errors, might change the indices, hence buggey */
538: PetscCall(PetscMalloc2(ismax, &isrow_block, ismax, &iscol_block));
539: PetscCall(ISCompressIndicesGeneral(C->rmap->N, C->rmap->n, bs, ismax, isrow, isrow_block));
540: if (isrow == iscol) {
541: for (i = 0; i < ismax; i++) {
542: iscol_block[i] = isrow_block[i];
543: PetscCall(PetscObjectReference((PetscObject)iscol_block[i]));
544: }
545: } else PetscCall(ISCompressIndicesGeneral(N, C->cmap->n, bs, ismax, iscol, iscol_block));
547: /* Determine the number of stages through which submatrices are done */
548: if (!C->cmap->N) nmax = 20 * 1000000 / sizeof(PetscInt);
549: else nmax = 20 * 1000000 / (c->Nbs * sizeof(PetscInt));
550: if (!nmax) nmax = 1;
552: if (scall == MAT_INITIAL_MATRIX) {
553: nstages_local = ismax / nmax + ((ismax % nmax) ? 1 : 0); /* local nstages */
555: /* Make sure every processor loops through the nstages */
556: PetscCall(MPIU_Allreduce(&nstages_local, &nstages, 1, MPIU_INT, MPI_MAX, PetscObjectComm((PetscObject)C)));
558: /* Allocate memory to hold all the submatrices and dummy submatrices */
559: PetscCall(PetscCalloc1(ismax + nstages, submat));
560: } else { /* MAT_REUSE_MATRIX */
561: if (ismax) {
562: subc = (Mat_SeqBAIJ *)((*submat)[0]->data);
563: smat = subc->submatis1;
564: } else { /* (*submat)[0] is a dummy matrix */
565: smat = (Mat_SubSppt *)(*submat)[0]->data;
566: }
567: PetscCheck(smat, PETSC_COMM_SELF, PETSC_ERR_ARG_NULL, "MatCreateSubMatrices(...,MAT_REUSE_MATRIX,...) requires submat");
568: nstages = smat->nstages;
569: }
571: for (i = 0, pos = 0; i < nstages; i++) {
572: if (pos + nmax <= ismax) max_no = nmax;
573: else if (pos >= ismax) max_no = 0;
574: else max_no = ismax - pos;
576: PetscCall(MatCreateSubMatrices_MPIBAIJ_local(C, max_no, isrow_block + pos, iscol_block + pos, scall, *submat + pos));
577: if (!max_no) {
578: if (scall == MAT_INITIAL_MATRIX) { /* submat[pos] is a dummy matrix */
579: smat = (Mat_SubSppt *)(*submat)[pos]->data;
580: smat->nstages = nstages;
581: }
582: pos++; /* advance to next dummy matrix if any */
583: } else pos += max_no;
584: }
586: if (scall == MAT_INITIAL_MATRIX && ismax) {
587: /* save nstages for reuse */
588: subc = (Mat_SeqBAIJ *)((*submat)[0]->data);
589: smat = subc->submatis1;
590: smat->nstages = nstages;
591: }
593: for (i = 0; i < ismax; i++) {
594: PetscCall(ISDestroy(&isrow_block[i]));
595: PetscCall(ISDestroy(&iscol_block[i]));
596: }
597: PetscCall(PetscFree2(isrow_block, iscol_block));
598: PetscFunctionReturn(PETSC_SUCCESS);
599: }
601: #if defined(PETSC_USE_CTABLE)
602: PetscErrorCode PetscGetProc(const PetscInt row, const PetscMPIInt size, const PetscInt proc_gnode[], PetscMPIInt *rank)
603: {
604: PetscInt nGlobalNd = proc_gnode[size];
605: PetscMPIInt fproc;
607: PetscFunctionBegin;
608: PetscCall(PetscMPIIntCast((PetscInt)(((float)row * (float)size / (float)nGlobalNd + 0.5)), &fproc));
609: if (fproc > size) fproc = size;
610: while (row < proc_gnode[fproc] || row >= proc_gnode[fproc + 1]) {
611: if (row < proc_gnode[fproc]) fproc--;
612: else fproc++;
613: }
614: *rank = fproc;
615: PetscFunctionReturn(PETSC_SUCCESS);
616: }
617: #endif
619: /* This code is used for BAIJ and SBAIJ matrices (unfortunate dependency) */
620: PetscErrorCode MatCreateSubMatrices_MPIBAIJ_local(Mat C, PetscInt ismax, const IS isrow[], const IS iscol[], MatReuse scall, Mat *submats)
621: {
622: Mat_MPIBAIJ *c = (Mat_MPIBAIJ *)C->data;
623: Mat A = c->A;
624: Mat_SeqBAIJ *a = (Mat_SeqBAIJ *)A->data, *b = (Mat_SeqBAIJ *)c->B->data, *subc;
625: const PetscInt **icol, **irow;
626: PetscInt *nrow, *ncol, start;
627: PetscMPIInt rank, size, tag0, tag2, tag3, tag4, *w1, *w2, *w3, *w4, nrqr;
628: PetscInt **sbuf1, **sbuf2, *sbuf2_i, i, j, k, l, ct1, ct2, **rbuf1, row, proc = -1;
629: PetscInt nrqs = 0, msz, **ptr = NULL, *req_size = NULL, *ctr = NULL, *pa, *tmp = NULL, tcol;
630: PetscInt **rbuf3 = NULL, *req_source1 = NULL, *req_source2, **sbuf_aj, **rbuf2 = NULL, max1, max2;
631: PetscInt **lens, is_no, ncols, *cols, mat_i, *mat_j, tmp2, jmax;
632: #if defined(PETSC_USE_CTABLE)
633: PetscHMapI *cmap, cmap_i = NULL, *rmap, rmap_i;
634: #else
635: PetscInt **cmap, *cmap_i = NULL, **rmap, *rmap_i;
636: #endif
637: const PetscInt *irow_i, *icol_i;
638: PetscInt ctr_j, *sbuf1_j, *sbuf_aj_i, *rbuf1_i, kmax, *lens_i;
639: MPI_Request *s_waits1, *r_waits1, *s_waits2, *r_waits2, *r_waits3;
640: MPI_Request *r_waits4, *s_waits3, *s_waits4;
641: MPI_Comm comm;
642: PetscScalar **rbuf4, *rbuf4_i = NULL, **sbuf_aa, *vals, *mat_a = NULL, *imat_a = NULL, *sbuf_aa_i;
643: PetscMPIInt *onodes1, *olengths1, end;
644: PetscInt **row2proc, *row2proc_i, *imat_ilen, *imat_j, *imat_i;
645: Mat_SubSppt *smat_i;
646: PetscBool *issorted, colflag, iscsorted = PETSC_TRUE;
647: PetscInt *sbuf1_i, *rbuf2_i, *rbuf3_i, ilen;
648: PetscInt bs = C->rmap->bs, bs2 = c->bs2, rstart = c->rstartbs;
649: PetscBool ijonly = c->ijonly; /* private flag indicates only matrix data structures are requested */
650: PetscInt nzA, nzB, *a_i = a->i, *b_i = b->i, *a_j = a->j, *b_j = b->j, ctmp, imark, *cworkA, *cworkB;
651: PetscScalar *vworkA = NULL, *vworkB = NULL, *a_a = a->a, *b_a = b->a;
652: PetscInt cstart = c->cstartbs, *bmap = c->garray;
653: PetscBool *allrows, *allcolumns;
655: PetscFunctionBegin;
656: PetscCall(PetscObjectGetComm((PetscObject)C, &comm));
657: size = c->size;
658: rank = c->rank;
660: PetscCall(PetscMalloc5(ismax, &row2proc, ismax, &cmap, ismax, &rmap, ismax + 1, &allcolumns, ismax, &allrows));
661: PetscCall(PetscMalloc5(ismax, (PetscInt ***)&irow, ismax, (PetscInt ***)&icol, ismax, &nrow, ismax, &ncol, ismax, &issorted));
663: for (i = 0; i < ismax; i++) {
664: PetscCall(ISSorted(iscol[i], &issorted[i]));
665: if (!issorted[i]) iscsorted = issorted[i]; /* columns are not sorted! */
666: PetscCall(ISSorted(isrow[i], &issorted[i]));
668: /* Check for special case: allcolumns */
669: PetscCall(ISIdentity(iscol[i], &colflag));
670: PetscCall(ISGetLocalSize(iscol[i], &ncol[i]));
672: if (colflag && ncol[i] == c->Nbs) {
673: allcolumns[i] = PETSC_TRUE;
674: icol[i] = NULL;
675: } else {
676: allcolumns[i] = PETSC_FALSE;
677: PetscCall(ISGetIndices(iscol[i], &icol[i]));
678: }
680: /* Check for special case: allrows */
681: PetscCall(ISIdentity(isrow[i], &colflag));
682: PetscCall(ISGetLocalSize(isrow[i], &nrow[i]));
683: if (colflag && nrow[i] == c->Mbs) {
684: allrows[i] = PETSC_TRUE;
685: irow[i] = NULL;
686: } else {
687: allrows[i] = PETSC_FALSE;
688: PetscCall(ISGetIndices(isrow[i], &irow[i]));
689: }
690: }
692: if (scall == MAT_REUSE_MATRIX) {
693: /* Assumes new rows are same length as the old rows */
694: for (i = 0; i < ismax; i++) {
695: subc = (Mat_SeqBAIJ *)(submats[i]->data);
696: PetscCheck(subc->mbs == nrow[i] && subc->nbs == ncol[i], PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Cannot reuse matrix. wrong size");
698: /* Initial matrix as if empty */
699: PetscCall(PetscArrayzero(subc->ilen, subc->mbs));
701: /* Initial matrix as if empty */
702: submats[i]->factortype = C->factortype;
704: smat_i = subc->submatis1;
706: nrqs = smat_i->nrqs;
707: nrqr = smat_i->nrqr;
708: rbuf1 = smat_i->rbuf1;
709: rbuf2 = smat_i->rbuf2;
710: rbuf3 = smat_i->rbuf3;
711: req_source2 = smat_i->req_source2;
713: sbuf1 = smat_i->sbuf1;
714: sbuf2 = smat_i->sbuf2;
715: ptr = smat_i->ptr;
716: tmp = smat_i->tmp;
717: ctr = smat_i->ctr;
719: pa = smat_i->pa;
720: req_size = smat_i->req_size;
721: req_source1 = smat_i->req_source1;
723: allcolumns[i] = smat_i->allcolumns;
724: allrows[i] = smat_i->allrows;
725: row2proc[i] = smat_i->row2proc;
726: rmap[i] = smat_i->rmap;
727: cmap[i] = smat_i->cmap;
728: }
730: if (!ismax) { /* Get dummy submatrices and retrieve struct submatis1 */
731: PetscCheck(submats[0], PETSC_COMM_SELF, PETSC_ERR_ARG_NULL, "submats are null, cannot reuse");
732: smat_i = (Mat_SubSppt *)submats[0]->data;
734: nrqs = smat_i->nrqs;
735: nrqr = smat_i->nrqr;
736: rbuf1 = smat_i->rbuf1;
737: rbuf2 = smat_i->rbuf2;
738: rbuf3 = smat_i->rbuf3;
739: req_source2 = smat_i->req_source2;
741: sbuf1 = smat_i->sbuf1;
742: sbuf2 = smat_i->sbuf2;
743: ptr = smat_i->ptr;
744: tmp = smat_i->tmp;
745: ctr = smat_i->ctr;
747: pa = smat_i->pa;
748: req_size = smat_i->req_size;
749: req_source1 = smat_i->req_source1;
751: allcolumns[0] = PETSC_FALSE;
752: }
753: } else { /* scall == MAT_INITIAL_MATRIX */
754: /* Get some new tags to keep the communication clean */
755: PetscCall(PetscObjectGetNewTag((PetscObject)C, &tag2));
756: PetscCall(PetscObjectGetNewTag((PetscObject)C, &tag3));
758: /* evaluate communication - mesg to who, length of mesg, and buffer space
759: required. Based on this, buffers are allocated, and data copied into them*/
760: PetscCall(PetscCalloc4(size, &w1, size, &w2, size, &w3, size, &w4)); /* mesg size, initialize work vectors */
762: for (i = 0; i < ismax; i++) {
763: jmax = nrow[i];
764: irow_i = irow[i];
766: PetscCall(PetscMalloc1(jmax, &row2proc_i));
767: row2proc[i] = row2proc_i;
769: if (issorted[i]) proc = 0;
770: for (j = 0; j < jmax; j++) {
771: if (!issorted[i]) proc = 0;
772: if (allrows[i]) row = j;
773: else row = irow_i[j];
775: while (row >= c->rangebs[proc + 1]) proc++;
776: w4[proc]++;
777: row2proc_i[j] = proc; /* map row index to proc */
778: }
779: for (j = 0; j < size; j++) {
780: if (w4[j]) {
781: w1[j] += w4[j];
782: w3[j]++;
783: w4[j] = 0;
784: }
785: }
786: }
788: nrqs = 0; /* no of outgoing messages */
789: msz = 0; /* total mesg length (for all procs) */
790: w1[rank] = 0; /* no mesg sent to self */
791: w3[rank] = 0;
792: for (i = 0; i < size; i++) {
793: if (w1[i]) {
794: w2[i] = 1;
795: nrqs++;
796: } /* there exists a message to proc i */
797: }
798: PetscCall(PetscMalloc1(nrqs, &pa)); /*(proc -array)*/
799: for (i = 0, j = 0; i < size; i++) {
800: if (w1[i]) {
801: pa[j] = i;
802: j++;
803: }
804: }
806: /* Each message would have a header = 1 + 2*(no of IS) + data */
807: for (i = 0; i < nrqs; i++) {
808: j = pa[i];
809: w1[j] += w2[j] + 2 * w3[j];
810: msz += w1[j];
811: }
812: PetscCall(PetscInfo(0, "Number of outgoing messages %" PetscInt_FMT " Total message length %" PetscInt_FMT "\n", nrqs, msz));
814: /* Determine the number of messages to expect, their lengths, from from-ids */
815: PetscCall(PetscGatherNumberOfMessages(comm, w2, w1, &nrqr));
816: PetscCall(PetscGatherMessageLengths(comm, nrqs, nrqr, w1, &onodes1, &olengths1));
818: /* Now post the Irecvs corresponding to these messages */
819: PetscCall(PetscObjectGetNewTag((PetscObject)C, &tag0));
820: PetscCall(PetscPostIrecvInt(comm, tag0, nrqr, onodes1, olengths1, &rbuf1, &r_waits1));
822: /* Allocate Memory for outgoing messages */
823: PetscCall(PetscMalloc4(size, &sbuf1, size, &ptr, 2 * msz, &tmp, size, &ctr));
824: PetscCall(PetscArrayzero(sbuf1, size));
825: PetscCall(PetscArrayzero(ptr, size));
827: {
828: PetscInt *iptr = tmp;
829: k = 0;
830: for (i = 0; i < nrqs; i++) {
831: j = pa[i];
832: iptr += k;
833: sbuf1[j] = iptr;
834: k = w1[j];
835: }
836: }
838: /* Form the outgoing messages. Initialize the header space */
839: for (i = 0; i < nrqs; i++) {
840: j = pa[i];
841: sbuf1[j][0] = 0;
842: PetscCall(PetscArrayzero(sbuf1[j] + 1, 2 * w3[j]));
843: ptr[j] = sbuf1[j] + 2 * w3[j] + 1;
844: }
846: /* Parse the isrow and copy data into outbuf */
847: for (i = 0; i < ismax; i++) {
848: row2proc_i = row2proc[i];
849: PetscCall(PetscArrayzero(ctr, size));
850: irow_i = irow[i];
851: jmax = nrow[i];
852: for (j = 0; j < jmax; j++) { /* parse the indices of each IS */
853: proc = row2proc_i[j];
854: if (allrows[i]) row = j;
855: else row = irow_i[j];
857: if (proc != rank) { /* copy to the outgoing buf*/
858: ctr[proc]++;
859: *ptr[proc] = row;
860: ptr[proc]++;
861: }
862: }
863: /* Update the headers for the current IS */
864: for (j = 0; j < size; j++) { /* Can Optimise this loop too */
865: if ((ctr_j = ctr[j])) {
866: sbuf1_j = sbuf1[j];
867: k = ++sbuf1_j[0];
868: sbuf1_j[2 * k] = ctr_j;
869: sbuf1_j[2 * k - 1] = i;
870: }
871: }
872: }
874: /* Now post the sends */
875: PetscCall(PetscMalloc1(nrqs, &s_waits1));
876: for (i = 0; i < nrqs; ++i) {
877: j = pa[i];
878: PetscCallMPI(MPI_Isend(sbuf1[j], w1[j], MPIU_INT, j, tag0, comm, s_waits1 + i));
879: }
881: /* Post Receives to capture the buffer size */
882: PetscCall(PetscMalloc1(nrqs, &r_waits2));
883: PetscCall(PetscMalloc3(nrqs, &req_source2, nrqs, &rbuf2, nrqs, &rbuf3));
884: if (nrqs) rbuf2[0] = tmp + msz;
885: for (i = 1; i < nrqs; ++i) rbuf2[i] = rbuf2[i - 1] + w1[pa[i - 1]];
886: for (i = 0; i < nrqs; ++i) {
887: j = pa[i];
888: PetscCallMPI(MPI_Irecv(rbuf2[i], w1[j], MPIU_INT, j, tag2, comm, r_waits2 + i));
889: }
891: /* Send to other procs the buf size they should allocate */
892: /* Receive messages*/
893: PetscCall(PetscMalloc1(nrqr, &s_waits2));
894: PetscCall(PetscMalloc3(nrqr, &sbuf2, nrqr, &req_size, nrqr, &req_source1));
896: PetscCallMPI(MPI_Waitall(nrqr, r_waits1, MPI_STATUSES_IGNORE));
897: for (i = 0; i < nrqr; ++i) {
898: req_size[i] = 0;
899: rbuf1_i = rbuf1[i];
900: start = 2 * rbuf1_i[0] + 1;
901: end = olengths1[i];
902: PetscCall(PetscMalloc1(end, &sbuf2[i]));
903: sbuf2_i = sbuf2[i];
904: for (j = start; j < end; j++) {
905: row = rbuf1_i[j] - rstart;
906: ncols = a_i[row + 1] - a_i[row] + b_i[row + 1] - b_i[row];
907: sbuf2_i[j] = ncols;
908: req_size[i] += ncols;
909: }
910: req_source1[i] = onodes1[i];
911: /* form the header */
912: sbuf2_i[0] = req_size[i];
913: for (j = 1; j < start; j++) sbuf2_i[j] = rbuf1_i[j];
915: PetscCallMPI(MPI_Isend(sbuf2_i, end, MPIU_INT, req_source1[i], tag2, comm, s_waits2 + i));
916: }
918: PetscCall(PetscFree(onodes1));
919: PetscCall(PetscFree(olengths1));
921: PetscCall(PetscFree(r_waits1));
922: PetscCall(PetscFree4(w1, w2, w3, w4));
924: /* Receive messages*/
925: PetscCall(PetscMalloc1(nrqs, &r_waits3));
927: PetscCallMPI(MPI_Waitall(nrqs, r_waits2, MPI_STATUSES_IGNORE));
928: for (i = 0; i < nrqs; ++i) {
929: PetscCall(PetscMalloc1(rbuf2[i][0], &rbuf3[i]));
930: req_source2[i] = pa[i];
931: PetscCallMPI(MPI_Irecv(rbuf3[i], rbuf2[i][0], MPIU_INT, req_source2[i], tag3, comm, r_waits3 + i));
932: }
933: PetscCall(PetscFree(r_waits2));
935: /* Wait on sends1 and sends2 */
936: PetscCallMPI(MPI_Waitall(nrqs, s_waits1, MPI_STATUSES_IGNORE));
937: PetscCallMPI(MPI_Waitall(nrqr, s_waits2, MPI_STATUSES_IGNORE));
938: PetscCall(PetscFree(s_waits1));
939: PetscCall(PetscFree(s_waits2));
941: /* Now allocate sending buffers for a->j, and send them off */
942: PetscCall(PetscMalloc1(nrqr, &sbuf_aj));
943: for (i = 0, j = 0; i < nrqr; i++) j += req_size[i];
944: if (nrqr) PetscCall(PetscMalloc1(j, &sbuf_aj[0]));
945: for (i = 1; i < nrqr; i++) sbuf_aj[i] = sbuf_aj[i - 1] + req_size[i - 1];
947: PetscCall(PetscMalloc1(nrqr, &s_waits3));
948: {
949: for (i = 0; i < nrqr; i++) {
950: rbuf1_i = rbuf1[i];
951: sbuf_aj_i = sbuf_aj[i];
952: ct1 = 2 * rbuf1_i[0] + 1;
953: ct2 = 0;
954: for (j = 1, max1 = rbuf1_i[0]; j <= max1; j++) {
955: kmax = rbuf1[i][2 * j];
956: for (k = 0; k < kmax; k++, ct1++) {
957: row = rbuf1_i[ct1] - rstart;
958: nzA = a_i[row + 1] - a_i[row];
959: nzB = b_i[row + 1] - b_i[row];
960: ncols = nzA + nzB;
961: cworkA = a_j + a_i[row];
962: cworkB = b_j + b_i[row];
964: /* load the column indices for this row into cols */
965: cols = sbuf_aj_i + ct2;
966: for (l = 0; l < nzB; l++) {
967: if ((ctmp = bmap[cworkB[l]]) < cstart) cols[l] = ctmp;
968: else break;
969: }
970: imark = l;
971: for (l = 0; l < nzA; l++) cols[imark + l] = cstart + cworkA[l];
972: for (l = imark; l < nzB; l++) cols[nzA + l] = bmap[cworkB[l]];
973: ct2 += ncols;
974: }
975: }
976: PetscCallMPI(MPI_Isend(sbuf_aj_i, req_size[i], MPIU_INT, req_source1[i], tag3, comm, s_waits3 + i));
977: }
978: }
980: /* create col map: global col of C -> local col of submatrices */
981: #if defined(PETSC_USE_CTABLE)
982: for (i = 0; i < ismax; i++) {
983: if (!allcolumns[i]) {
984: PetscCall(PetscHMapICreateWithSize(ncol[i], cmap + i));
986: jmax = ncol[i];
987: icol_i = icol[i];
988: cmap_i = cmap[i];
989: for (j = 0; j < jmax; j++) PetscCall(PetscHMapISet(cmap[i], icol_i[j] + 1, j + 1));
990: } else cmap[i] = NULL;
991: }
992: #else
993: for (i = 0; i < ismax; i++) {
994: if (!allcolumns[i]) {
995: PetscCall(PetscCalloc1(c->Nbs, &cmap[i]));
996: jmax = ncol[i];
997: icol_i = icol[i];
998: cmap_i = cmap[i];
999: for (j = 0; j < jmax; j++) cmap_i[icol_i[j]] = j + 1;
1000: } else cmap[i] = NULL;
1001: }
1002: #endif
1004: /* Create lens which is required for MatCreate... */
1005: for (i = 0, j = 0; i < ismax; i++) j += nrow[i];
1006: PetscCall(PetscMalloc1(ismax, &lens));
1008: if (ismax) PetscCall(PetscCalloc1(j, &lens[0]));
1009: for (i = 1; i < ismax; i++) lens[i] = lens[i - 1] + nrow[i - 1];
1011: /* Update lens from local data */
1012: for (i = 0; i < ismax; i++) {
1013: row2proc_i = row2proc[i];
1014: jmax = nrow[i];
1015: if (!allcolumns[i]) cmap_i = cmap[i];
1016: irow_i = irow[i];
1017: lens_i = lens[i];
1018: for (j = 0; j < jmax; j++) {
1019: if (allrows[i]) row = j;
1020: else row = irow_i[j]; /* global blocked row of C */
1022: proc = row2proc_i[j];
1023: if (proc == rank) {
1024: /* Get indices from matA and then from matB */
1025: #if defined(PETSC_USE_CTABLE)
1026: PetscInt tt;
1027: #endif
1028: row = row - rstart;
1029: nzA = a_i[row + 1] - a_i[row];
1030: nzB = b_i[row + 1] - b_i[row];
1031: cworkA = a_j + a_i[row];
1032: cworkB = b_j + b_i[row];
1034: if (!allcolumns[i]) {
1035: #if defined(PETSC_USE_CTABLE)
1036: for (k = 0; k < nzA; k++) {
1037: PetscCall(PetscHMapIGetWithDefault(cmap_i, cstart + cworkA[k] + 1, 0, &tt));
1038: if (tt) lens_i[j]++;
1039: }
1040: for (k = 0; k < nzB; k++) {
1041: PetscCall(PetscHMapIGetWithDefault(cmap_i, bmap[cworkB[k]] + 1, 0, &tt));
1042: if (tt) lens_i[j]++;
1043: }
1045: #else
1046: for (k = 0; k < nzA; k++) {
1047: if (cmap_i[cstart + cworkA[k]]) lens_i[j]++;
1048: }
1049: for (k = 0; k < nzB; k++) {
1050: if (cmap_i[bmap[cworkB[k]]]) lens_i[j]++;
1051: }
1052: #endif
1053: } else { /* allcolumns */
1054: lens_i[j] = nzA + nzB;
1055: }
1056: }
1057: }
1058: }
1060: /* Create row map: global row of C -> local row of submatrices */
1061: for (i = 0; i < ismax; i++) {
1062: if (!allrows[i]) {
1063: #if defined(PETSC_USE_CTABLE)
1064: PetscCall(PetscHMapICreateWithSize(nrow[i], rmap + i));
1065: irow_i = irow[i];
1066: jmax = nrow[i];
1067: for (j = 0; j < jmax; j++) {
1068: if (allrows[i]) {
1069: PetscCall(PetscHMapISet(rmap[i], j + 1, j + 1));
1070: } else {
1071: PetscCall(PetscHMapISet(rmap[i], irow_i[j] + 1, j + 1));
1072: }
1073: }
1074: #else
1075: PetscCall(PetscCalloc1(c->Mbs, &rmap[i]));
1076: rmap_i = rmap[i];
1077: irow_i = irow[i];
1078: jmax = nrow[i];
1079: for (j = 0; j < jmax; j++) {
1080: if (allrows[i]) rmap_i[j] = j;
1081: else rmap_i[irow_i[j]] = j;
1082: }
1083: #endif
1084: } else rmap[i] = NULL;
1085: }
1087: /* Update lens from offproc data */
1088: {
1089: PetscInt *rbuf2_i, *rbuf3_i, *sbuf1_i;
1091: PetscCallMPI(MPI_Waitall(nrqs, r_waits3, MPI_STATUSES_IGNORE));
1092: for (tmp2 = 0; tmp2 < nrqs; tmp2++) {
1093: sbuf1_i = sbuf1[pa[tmp2]];
1094: jmax = sbuf1_i[0];
1095: ct1 = 2 * jmax + 1;
1096: ct2 = 0;
1097: rbuf2_i = rbuf2[tmp2];
1098: rbuf3_i = rbuf3[tmp2];
1099: for (j = 1; j <= jmax; j++) {
1100: is_no = sbuf1_i[2 * j - 1];
1101: max1 = sbuf1_i[2 * j];
1102: lens_i = lens[is_no];
1103: if (!allcolumns[is_no]) cmap_i = cmap[is_no];
1104: rmap_i = rmap[is_no];
1105: for (k = 0; k < max1; k++, ct1++) {
1106: if (allrows[is_no]) {
1107: row = sbuf1_i[ct1];
1108: } else {
1109: #if defined(PETSC_USE_CTABLE)
1110: PetscCall(PetscHMapIGetWithDefault(rmap_i, sbuf1_i[ct1] + 1, 0, &row));
1111: row--;
1112: PetscCheck(row >= 0, PETSC_COMM_SELF, PETSC_ERR_PLIB, "row not found in table");
1113: #else
1114: row = rmap_i[sbuf1_i[ct1]]; /* the val in the new matrix to be */
1115: #endif
1116: }
1117: max2 = rbuf2_i[ct1];
1118: for (l = 0; l < max2; l++, ct2++) {
1119: if (!allcolumns[is_no]) {
1120: #if defined(PETSC_USE_CTABLE)
1121: PetscCall(PetscHMapIGetWithDefault(cmap_i, rbuf3_i[ct2] + 1, 0, &tcol));
1122: #else
1123: tcol = cmap_i[rbuf3_i[ct2]];
1124: #endif
1125: if (tcol) lens_i[row]++;
1126: } else { /* allcolumns */
1127: lens_i[row]++; /* lens_i[row] += max2 ? */
1128: }
1129: }
1130: }
1131: }
1132: }
1133: }
1134: PetscCall(PetscFree(r_waits3));
1135: PetscCallMPI(MPI_Waitall(nrqr, s_waits3, MPI_STATUSES_IGNORE));
1136: PetscCall(PetscFree(s_waits3));
1138: /* Create the submatrices */
1139: for (i = 0; i < ismax; i++) {
1140: PetscInt bs_tmp;
1141: if (ijonly) bs_tmp = 1;
1142: else bs_tmp = bs;
1144: PetscCall(MatCreate(PETSC_COMM_SELF, submats + i));
1145: PetscCall(MatSetSizes(submats[i], nrow[i] * bs_tmp, ncol[i] * bs_tmp, PETSC_DETERMINE, PETSC_DETERMINE));
1147: PetscCall(MatSetType(submats[i], ((PetscObject)A)->type_name));
1148: PetscCall(MatSeqBAIJSetPreallocation(submats[i], bs_tmp, 0, lens[i]));
1149: PetscCall(MatSeqSBAIJSetPreallocation(submats[i], bs_tmp, 0, lens[i])); /* this subroutine is used by SBAIJ routines */
1151: /* create struct Mat_SubSppt and attached it to submat */
1152: PetscCall(PetscNew(&smat_i));
1153: subc = (Mat_SeqBAIJ *)submats[i]->data;
1154: subc->submatis1 = smat_i;
1156: smat_i->destroy = submats[i]->ops->destroy;
1157: submats[i]->ops->destroy = MatDestroySubMatrix_SeqBAIJ;
1158: submats[i]->factortype = C->factortype;
1160: smat_i->id = i;
1161: smat_i->nrqs = nrqs;
1162: smat_i->nrqr = nrqr;
1163: smat_i->rbuf1 = rbuf1;
1164: smat_i->rbuf2 = rbuf2;
1165: smat_i->rbuf3 = rbuf3;
1166: smat_i->sbuf2 = sbuf2;
1167: smat_i->req_source2 = req_source2;
1169: smat_i->sbuf1 = sbuf1;
1170: smat_i->ptr = ptr;
1171: smat_i->tmp = tmp;
1172: smat_i->ctr = ctr;
1174: smat_i->pa = pa;
1175: smat_i->req_size = req_size;
1176: smat_i->req_source1 = req_source1;
1178: smat_i->allcolumns = allcolumns[i];
1179: smat_i->allrows = allrows[i];
1180: smat_i->singleis = PETSC_FALSE;
1181: smat_i->row2proc = row2proc[i];
1182: smat_i->rmap = rmap[i];
1183: smat_i->cmap = cmap[i];
1184: }
1186: if (!ismax) { /* Create dummy submats[0] for reuse struct subc */
1187: PetscCall(MatCreate(PETSC_COMM_SELF, &submats[0]));
1188: PetscCall(MatSetSizes(submats[0], 0, 0, PETSC_DETERMINE, PETSC_DETERMINE));
1189: PetscCall(MatSetType(submats[0], MATDUMMY));
1191: /* create struct Mat_SubSppt and attached it to submat */
1192: PetscCall(PetscNew(&smat_i));
1193: submats[0]->data = (void *)smat_i;
1195: smat_i->destroy = submats[0]->ops->destroy;
1196: submats[0]->ops->destroy = MatDestroySubMatrix_Dummy;
1197: submats[0]->factortype = C->factortype;
1199: smat_i->id = 0;
1200: smat_i->nrqs = nrqs;
1201: smat_i->nrqr = nrqr;
1202: smat_i->rbuf1 = rbuf1;
1203: smat_i->rbuf2 = rbuf2;
1204: smat_i->rbuf3 = rbuf3;
1205: smat_i->sbuf2 = sbuf2;
1206: smat_i->req_source2 = req_source2;
1208: smat_i->sbuf1 = sbuf1;
1209: smat_i->ptr = ptr;
1210: smat_i->tmp = tmp;
1211: smat_i->ctr = ctr;
1213: smat_i->pa = pa;
1214: smat_i->req_size = req_size;
1215: smat_i->req_source1 = req_source1;
1217: smat_i->allcolumns = PETSC_FALSE;
1218: smat_i->singleis = PETSC_FALSE;
1219: smat_i->row2proc = NULL;
1220: smat_i->rmap = NULL;
1221: smat_i->cmap = NULL;
1222: }
1224: if (ismax) PetscCall(PetscFree(lens[0]));
1225: PetscCall(PetscFree(lens));
1226: if (sbuf_aj) {
1227: PetscCall(PetscFree(sbuf_aj[0]));
1228: PetscCall(PetscFree(sbuf_aj));
1229: }
1231: } /* endof scall == MAT_INITIAL_MATRIX */
1233: /* Post recv matrix values */
1234: if (!ijonly) {
1235: PetscCall(PetscObjectGetNewTag((PetscObject)C, &tag4));
1236: PetscCall(PetscMalloc1(nrqs, &rbuf4));
1237: PetscCall(PetscMalloc1(nrqs, &r_waits4));
1238: for (i = 0; i < nrqs; ++i) {
1239: PetscCall(PetscMalloc1(rbuf2[i][0] * bs2, &rbuf4[i]));
1240: PetscCallMPI(MPI_Irecv(rbuf4[i], rbuf2[i][0] * bs2, MPIU_SCALAR, req_source2[i], tag4, comm, r_waits4 + i));
1241: }
1243: /* Allocate sending buffers for a->a, and send them off */
1244: PetscCall(PetscMalloc1(nrqr, &sbuf_aa));
1245: for (i = 0, j = 0; i < nrqr; i++) j += req_size[i];
1247: if (nrqr) PetscCall(PetscMalloc1(j * bs2, &sbuf_aa[0]));
1248: for (i = 1; i < nrqr; i++) sbuf_aa[i] = sbuf_aa[i - 1] + req_size[i - 1] * bs2;
1250: PetscCall(PetscMalloc1(nrqr, &s_waits4));
1252: for (i = 0; i < nrqr; i++) {
1253: rbuf1_i = rbuf1[i];
1254: sbuf_aa_i = sbuf_aa[i];
1255: ct1 = 2 * rbuf1_i[0] + 1;
1256: ct2 = 0;
1257: for (j = 1, max1 = rbuf1_i[0]; j <= max1; j++) {
1258: kmax = rbuf1_i[2 * j];
1259: for (k = 0; k < kmax; k++, ct1++) {
1260: row = rbuf1_i[ct1] - rstart;
1261: nzA = a_i[row + 1] - a_i[row];
1262: nzB = b_i[row + 1] - b_i[row];
1263: ncols = nzA + nzB;
1264: cworkB = b_j + b_i[row];
1265: vworkA = a_a + a_i[row] * bs2;
1266: vworkB = b_a + b_i[row] * bs2;
1268: /* load the column values for this row into vals*/
1269: vals = sbuf_aa_i + ct2 * bs2;
1270: for (l = 0; l < nzB; l++) {
1271: if ((bmap[cworkB[l]]) < cstart) {
1272: PetscCall(PetscArraycpy(vals + l * bs2, vworkB + l * bs2, bs2));
1273: } else break;
1274: }
1275: imark = l;
1276: for (l = 0; l < nzA; l++) PetscCall(PetscArraycpy(vals + (imark + l) * bs2, vworkA + l * bs2, bs2));
1277: for (l = imark; l < nzB; l++) PetscCall(PetscArraycpy(vals + (nzA + l) * bs2, vworkB + l * bs2, bs2));
1279: ct2 += ncols;
1280: }
1281: }
1282: PetscCallMPI(MPI_Isend(sbuf_aa_i, req_size[i] * bs2, MPIU_SCALAR, req_source1[i], tag4, comm, s_waits4 + i));
1283: }
1284: }
1286: /* Assemble the matrices */
1287: /* First assemble the local rows */
1288: for (i = 0; i < ismax; i++) {
1289: row2proc_i = row2proc[i];
1290: subc = (Mat_SeqBAIJ *)submats[i]->data;
1291: imat_ilen = subc->ilen;
1292: imat_j = subc->j;
1293: imat_i = subc->i;
1294: imat_a = subc->a;
1296: if (!allcolumns[i]) cmap_i = cmap[i];
1297: rmap_i = rmap[i];
1298: irow_i = irow[i];
1299: jmax = nrow[i];
1300: for (j = 0; j < jmax; j++) {
1301: if (allrows[i]) row = j;
1302: else row = irow_i[j];
1303: proc = row2proc_i[j];
1305: if (proc == rank) {
1306: row = row - rstart;
1307: nzA = a_i[row + 1] - a_i[row];
1308: nzB = b_i[row + 1] - b_i[row];
1309: cworkA = a_j + a_i[row];
1310: cworkB = b_j + b_i[row];
1311: if (!ijonly) {
1312: vworkA = a_a + a_i[row] * bs2;
1313: vworkB = b_a + b_i[row] * bs2;
1314: }
1316: if (allrows[i]) {
1317: row = row + rstart;
1318: } else {
1319: #if defined(PETSC_USE_CTABLE)
1320: PetscCall(PetscHMapIGetWithDefault(rmap_i, row + rstart + 1, 0, &row));
1321: row--;
1323: PetscCheck(row >= 0, PETSC_COMM_SELF, PETSC_ERR_PLIB, "row not found in table");
1324: #else
1325: row = rmap_i[row + rstart];
1326: #endif
1327: }
1328: mat_i = imat_i[row];
1329: if (!ijonly) mat_a = imat_a + mat_i * bs2;
1330: mat_j = imat_j + mat_i;
1331: ilen = imat_ilen[row];
1333: /* load the column indices for this row into cols*/
1334: if (!allcolumns[i]) {
1335: for (l = 0; l < nzB; l++) {
1336: if ((ctmp = bmap[cworkB[l]]) < cstart) {
1337: #if defined(PETSC_USE_CTABLE)
1338: PetscCall(PetscHMapIGetWithDefault(cmap_i, ctmp + 1, 0, &tcol));
1339: if (tcol) {
1340: #else
1341: if ((tcol = cmap_i[ctmp])) {
1342: #endif
1343: *mat_j++ = tcol - 1;
1344: PetscCall(PetscArraycpy(mat_a, vworkB + l * bs2, bs2));
1345: mat_a += bs2;
1346: ilen++;
1347: }
1348: } else break;
1349: }
1350: imark = l;
1351: for (l = 0; l < nzA; l++) {
1352: #if defined(PETSC_USE_CTABLE)
1353: PetscCall(PetscHMapIGetWithDefault(cmap_i, cstart + cworkA[l] + 1, 0, &tcol));
1354: if (tcol) {
1355: #else
1356: if ((tcol = cmap_i[cstart + cworkA[l]])) {
1357: #endif
1358: *mat_j++ = tcol - 1;
1359: if (!ijonly) {
1360: PetscCall(PetscArraycpy(mat_a, vworkA + l * bs2, bs2));
1361: mat_a += bs2;
1362: }
1363: ilen++;
1364: }
1365: }
1366: for (l = imark; l < nzB; l++) {
1367: #if defined(PETSC_USE_CTABLE)
1368: PetscCall(PetscHMapIGetWithDefault(cmap_i, bmap[cworkB[l]] + 1, 0, &tcol));
1369: if (tcol) {
1370: #else
1371: if ((tcol = cmap_i[bmap[cworkB[l]]])) {
1372: #endif
1373: *mat_j++ = tcol - 1;
1374: if (!ijonly) {
1375: PetscCall(PetscArraycpy(mat_a, vworkB + l * bs2, bs2));
1376: mat_a += bs2;
1377: }
1378: ilen++;
1379: }
1380: }
1381: } else { /* allcolumns */
1382: for (l = 0; l < nzB; l++) {
1383: if ((ctmp = bmap[cworkB[l]]) < cstart) {
1384: *mat_j++ = ctmp;
1385: PetscCall(PetscArraycpy(mat_a, vworkB + l * bs2, bs2));
1386: mat_a += bs2;
1387: ilen++;
1388: } else break;
1389: }
1390: imark = l;
1391: for (l = 0; l < nzA; l++) {
1392: *mat_j++ = cstart + cworkA[l];
1393: if (!ijonly) {
1394: PetscCall(PetscArraycpy(mat_a, vworkA + l * bs2, bs2));
1395: mat_a += bs2;
1396: }
1397: ilen++;
1398: }
1399: for (l = imark; l < nzB; l++) {
1400: *mat_j++ = bmap[cworkB[l]];
1401: if (!ijonly) {
1402: PetscCall(PetscArraycpy(mat_a, vworkB + l * bs2, bs2));
1403: mat_a += bs2;
1404: }
1405: ilen++;
1406: }
1407: }
1408: imat_ilen[row] = ilen;
1409: }
1410: }
1411: }
1413: /* Now assemble the off proc rows */
1414: if (!ijonly) PetscCallMPI(MPI_Waitall(nrqs, r_waits4, MPI_STATUSES_IGNORE));
1415: for (tmp2 = 0; tmp2 < nrqs; tmp2++) {
1416: sbuf1_i = sbuf1[pa[tmp2]];
1417: jmax = sbuf1_i[0];
1418: ct1 = 2 * jmax + 1;
1419: ct2 = 0;
1420: rbuf2_i = rbuf2[tmp2];
1421: rbuf3_i = rbuf3[tmp2];
1422: if (!ijonly) rbuf4_i = rbuf4[tmp2];
1423: for (j = 1; j <= jmax; j++) {
1424: is_no = sbuf1_i[2 * j - 1];
1425: rmap_i = rmap[is_no];
1426: if (!allcolumns[is_no]) cmap_i = cmap[is_no];
1427: subc = (Mat_SeqBAIJ *)submats[is_no]->data;
1428: imat_ilen = subc->ilen;
1429: imat_j = subc->j;
1430: imat_i = subc->i;
1431: if (!ijonly) imat_a = subc->a;
1432: max1 = sbuf1_i[2 * j];
1433: for (k = 0; k < max1; k++, ct1++) { /* for each recved block row */
1434: row = sbuf1_i[ct1];
1436: if (allrows[is_no]) {
1437: row = sbuf1_i[ct1];
1438: } else {
1439: #if defined(PETSC_USE_CTABLE)
1440: PetscCall(PetscHMapIGetWithDefault(rmap_i, row + 1, 0, &row));
1441: row--;
1442: PetscCheck(row >= 0, PETSC_COMM_SELF, PETSC_ERR_PLIB, "row not found in table");
1443: #else
1444: row = rmap_i[row];
1445: #endif
1446: }
1447: ilen = imat_ilen[row];
1448: mat_i = imat_i[row];
1449: if (!ijonly) mat_a = imat_a + mat_i * bs2;
1450: mat_j = imat_j + mat_i;
1451: max2 = rbuf2_i[ct1];
1452: if (!allcolumns[is_no]) {
1453: for (l = 0; l < max2; l++, ct2++) {
1454: #if defined(PETSC_USE_CTABLE)
1455: PetscCall(PetscHMapIGetWithDefault(cmap_i, rbuf3_i[ct2] + 1, 0, &tcol));
1456: #else
1457: tcol = cmap_i[rbuf3_i[ct2]];
1458: #endif
1459: if (tcol) {
1460: *mat_j++ = tcol - 1;
1461: if (!ijonly) {
1462: PetscCall(PetscArraycpy(mat_a, rbuf4_i + ct2 * bs2, bs2));
1463: mat_a += bs2;
1464: }
1465: ilen++;
1466: }
1467: }
1468: } else { /* allcolumns */
1469: for (l = 0; l < max2; l++, ct2++) {
1470: *mat_j++ = rbuf3_i[ct2]; /* same global column index of C */
1471: if (!ijonly) {
1472: PetscCall(PetscArraycpy(mat_a, rbuf4_i + ct2 * bs2, bs2));
1473: mat_a += bs2;
1474: }
1475: ilen++;
1476: }
1477: }
1478: imat_ilen[row] = ilen;
1479: }
1480: }
1481: }
1483: if (!iscsorted) { /* sort column indices of the rows */
1484: MatScalar *work;
1486: PetscCall(PetscMalloc1(bs2, &work));
1487: for (i = 0; i < ismax; i++) {
1488: subc = (Mat_SeqBAIJ *)submats[i]->data;
1489: imat_ilen = subc->ilen;
1490: imat_j = subc->j;
1491: imat_i = subc->i;
1492: if (!ijonly) imat_a = subc->a;
1493: if (allcolumns[i]) continue;
1495: jmax = nrow[i];
1496: for (j = 0; j < jmax; j++) {
1497: mat_i = imat_i[j];
1498: mat_j = imat_j + mat_i;
1499: ilen = imat_ilen[j];
1500: if (ijonly) {
1501: PetscCall(PetscSortInt(ilen, mat_j));
1502: } else {
1503: mat_a = imat_a + mat_i * bs2;
1504: PetscCall(PetscSortIntWithDataArray(ilen, mat_j, mat_a, bs2 * sizeof(MatScalar), work));
1505: }
1506: }
1507: }
1508: PetscCall(PetscFree(work));
1509: }
1511: if (!ijonly) {
1512: PetscCall(PetscFree(r_waits4));
1513: PetscCallMPI(MPI_Waitall(nrqr, s_waits4, MPI_STATUSES_IGNORE));
1514: PetscCall(PetscFree(s_waits4));
1515: }
1517: /* Restore the indices */
1518: for (i = 0; i < ismax; i++) {
1519: if (!allrows[i]) PetscCall(ISRestoreIndices(isrow[i], irow + i));
1520: if (!allcolumns[i]) PetscCall(ISRestoreIndices(iscol[i], icol + i));
1521: }
1523: for (i = 0; i < ismax; i++) {
1524: PetscCall(MatAssemblyBegin(submats[i], MAT_FINAL_ASSEMBLY));
1525: PetscCall(MatAssemblyEnd(submats[i], MAT_FINAL_ASSEMBLY));
1526: }
1528: PetscCall(PetscFree5(*(PetscInt ***)&irow, *(PetscInt ***)&icol, nrow, ncol, issorted));
1529: PetscCall(PetscFree5(row2proc, cmap, rmap, allcolumns, allrows));
1531: if (!ijonly) {
1532: if (sbuf_aa) {
1533: PetscCall(PetscFree(sbuf_aa[0]));
1534: PetscCall(PetscFree(sbuf_aa));
1535: }
1537: for (i = 0; i < nrqs; ++i) PetscCall(PetscFree(rbuf4[i]));
1538: PetscCall(PetscFree(rbuf4));
1539: }
1540: c->ijonly = PETSC_FALSE; /* set back to the default */
1541: PetscFunctionReturn(PETSC_SUCCESS);
1542: }