Actual source code: sbaijov.c
2: /*
3: Routines to compute overlapping regions of a parallel MPI matrix.
4: Used for finding submatrices that were shared across processors.
5: */
6: #include <../src/mat/impls/sbaij/mpi/mpisbaij.h>
7: #include <petscbt.h>
9: static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Once(Mat, PetscInt, IS *);
10: static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Local(Mat, PetscInt *, PetscInt, PetscInt *, PetscBT *);
12: PetscErrorCode MatIncreaseOverlap_MPISBAIJ(Mat C, PetscInt is_max, IS is[], PetscInt ov)
13: {
14: PetscInt i, N = C->cmap->N, bs = C->rmap->bs, M = C->rmap->N, Mbs = M / bs, *nidx, isz, iov;
15: IS *is_new, *is_row;
16: Mat *submats;
17: Mat_MPISBAIJ *c = (Mat_MPISBAIJ *)C->data;
18: Mat_SeqSBAIJ *asub_i;
19: PetscBT table;
20: PetscInt *ai, brow, nz, nis, l, nmax, nstages_local, nstages, max_no, pos;
21: const PetscInt *idx;
22: PetscBool flg;
24: PetscFunctionBegin;
25: PetscCall(PetscMalloc1(is_max, &is_new));
26: /* Convert the indices into block format */
27: PetscCall(ISCompressIndicesGeneral(N, C->rmap->n, bs, is_max, is, is_new));
28: PetscCheck(ov >= 0, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Negative overlap specified");
30: /* previous non-scalable implementation */
31: flg = PETSC_FALSE;
32: PetscCall(PetscOptionsHasName(NULL, NULL, "-IncreaseOverlap_old", &flg));
33: if (flg) { /* previous non-scalable implementation */
34: printf("use previous non-scalable implementation...\n");
35: for (i = 0; i < ov; ++i) PetscCall(MatIncreaseOverlap_MPISBAIJ_Once(C, is_max, is_new));
36: } else { /* implementation using modified BAIJ routines */
38: PetscCall(PetscMalloc1(Mbs + 1, &nidx));
39: PetscCall(PetscBTCreate(Mbs, &table)); /* for column search */
41: /* Create is_row */
42: PetscCall(PetscMalloc1(is_max, &is_row));
43: PetscCall(ISCreateStride(PETSC_COMM_SELF, Mbs, 0, 1, &is_row[0]));
45: for (i = 1; i < is_max; i++) { is_row[i] = is_row[0]; /* reuse is_row[0] */ }
47: /* Allocate memory to hold all the submatrices - Modified from MatCreateSubMatrices_MPIBAIJ() */
48: PetscCall(PetscMalloc1(is_max + 1, &submats));
50: /* Determine the number of stages through which submatrices are done */
51: nmax = 20 * 1000000 / (c->Nbs * sizeof(PetscInt));
52: if (!nmax) nmax = 1;
53: nstages_local = is_max / nmax + ((is_max % nmax) ? 1 : 0);
55: /* Make sure every processor loops through the nstages */
56: PetscCall(MPIU_Allreduce(&nstages_local, &nstages, 1, MPIU_INT, MPI_MAX, PetscObjectComm((PetscObject)C)));
58: {
59: const PetscObject obj = (PetscObject)c->A;
60: size_t new_len, cur_len, max_len;
62: PetscCall(PetscStrlen(MATSEQBAIJ, &new_len));
63: PetscCall(PetscStrlen(MATSEQSBAIJ, &cur_len));
64: max_len = PetscMax(cur_len, new_len) + 1;
65: PetscCall(PetscRealloc(max_len * sizeof(*(obj->type_name)), &obj->type_name));
66: /* The resulting submatrices should be BAIJ, not SBAIJ, hence we change this value to
67: trigger that */
68: for (iov = 0; iov < ov; ++iov) {
69: /* 1) Get submats for column search */
70: PetscCall(PetscStrncpy(obj->type_name, MATSEQBAIJ, max_len));
71: for (i = 0, pos = 0; i < nstages; i++) {
72: if (pos + nmax <= is_max) max_no = nmax;
73: else if (pos == is_max) max_no = 0;
74: else max_no = is_max - pos;
75: c->ijonly = PETSC_TRUE; /* only matrix data structures are requested */
77: PetscCall(MatCreateSubMatrices_MPIBAIJ_local(C, max_no, is_row + pos, is_new + pos, MAT_INITIAL_MATRIX, submats + pos));
78: pos += max_no;
79: }
80: PetscCall(PetscStrncpy(obj->type_name, MATSEQSBAIJ, max_len));
82: /* 2) Row search */
83: PetscCall(MatIncreaseOverlap_MPIBAIJ_Once(C, is_max, is_new));
85: /* 3) Column search */
86: for (i = 0; i < is_max; i++) {
87: asub_i = (Mat_SeqSBAIJ *)submats[i]->data;
88: ai = asub_i->i;
90: /* put is_new obtained from MatIncreaseOverlap_MPIBAIJ() to table */
91: PetscCall(PetscBTMemzero(Mbs, table));
93: PetscCall(ISGetIndices(is_new[i], &idx));
94: PetscCall(ISGetLocalSize(is_new[i], &nis));
95: for (l = 0; l < nis; l++) {
96: PetscCall(PetscBTSet(table, idx[l]));
97: nidx[l] = idx[l];
98: }
99: isz = nis;
101: /* add column entries to table */
102: for (brow = 0; brow < Mbs; brow++) {
103: nz = ai[brow + 1] - ai[brow];
104: if (nz) {
105: if (!PetscBTLookupSet(table, brow)) nidx[isz++] = brow;
106: }
107: }
108: PetscCall(ISRestoreIndices(is_new[i], &idx));
109: PetscCall(ISDestroy(&is_new[i]));
111: /* create updated is_new */
112: PetscCall(ISCreateGeneral(PETSC_COMM_SELF, isz, nidx, PETSC_COPY_VALUES, is_new + i));
113: }
115: /* Free tmp spaces */
116: for (i = 0; i < is_max; i++) PetscCall(MatDestroy(&submats[i]));
117: }
119: PetscCall(PetscBTDestroy(&table));
120: PetscCall(PetscFree(submats));
121: PetscCall(ISDestroy(&is_row[0]));
122: PetscCall(PetscFree(is_row));
123: PetscCall(PetscFree(nidx));
124: }
125: }
126: for (i = 0; i < is_max; i++) {
127: PetscCall(ISDestroy(&is[i]));
128: PetscCall(ISGetLocalSize(is_new[i], &nis));
129: PetscCall(ISGetIndices(is_new[i], &idx));
130: PetscCall(ISCreateBlock(PetscObjectComm((PetscObject)is_new[i]), bs, nis, idx, PETSC_COPY_VALUES, &is[i]));
131: PetscCall(ISDestroy(&is_new[i]));
132: }
133: PetscCall(PetscFree(is_new));
134: PetscFunctionReturn(PETSC_SUCCESS);
135: }
137: typedef enum {
138: MINE,
139: OTHER
140: } WhoseOwner;
141: /* data1, odata1 and odata2 are packed in the format (for communication):
142: data[0] = is_max, no of is
143: data[1] = size of is[0]
144: ...
145: data[is_max] = size of is[is_max-1]
146: data[is_max + 1] = data(is[0])
147: ...
148: data[is_max+1+sum(size of is[k]), k=0,...,i-1] = data(is[i])
149: ...
150: data2 is packed in the format (for creating output is[]):
151: data[0] = is_max, no of is
152: data[1] = size of is[0]
153: ...
154: data[is_max] = size of is[is_max-1]
155: data[is_max + 1] = data(is[0])
156: ...
157: data[is_max + 1 + Mbs*i) = data(is[i])
158: ...
159: */
160: static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Once(Mat C, PetscInt is_max, IS is[])
161: {
162: Mat_MPISBAIJ *c = (Mat_MPISBAIJ *)C->data;
163: PetscMPIInt size, rank, tag1, tag2, *len_s, nrqr, nrqs, *id_r1, *len_r1, flag, len, *iwork;
164: const PetscInt *idx_i;
165: PetscInt idx, isz, col, *n, *data1, **data1_start, *data2, *data2_i, *data, *data_i;
166: PetscInt Mbs, i, j, k, *odata1, *odata2;
167: PetscInt proc_id, **odata2_ptr, *ctable = NULL, *btable, len_max, len_est;
168: PetscInt proc_end = 0, len_unused, nodata2;
169: PetscInt ois_max; /* max no of is[] in each of processor */
170: char *t_p;
171: MPI_Comm comm;
172: MPI_Request *s_waits1, *s_waits2, r_req;
173: MPI_Status *s_status, r_status;
174: PetscBT *table; /* mark indices of this processor's is[] */
175: PetscBT table_i;
176: PetscBT otable; /* mark indices of other processors' is[] */
177: PetscInt bs = C->rmap->bs, Bn = c->B->cmap->n, Bnbs = Bn / bs, *Bowners;
178: IS garray_local, garray_gl;
180: PetscFunctionBegin;
181: PetscCall(PetscObjectGetComm((PetscObject)C, &comm));
182: size = c->size;
183: rank = c->rank;
184: Mbs = c->Mbs;
186: PetscCall(PetscObjectGetNewTag((PetscObject)C, &tag1));
187: PetscCall(PetscObjectGetNewTag((PetscObject)C, &tag2));
189: /* create tables used in
190: step 1: table[i] - mark c->garray of proc [i]
191: step 3: table[i] - mark indices of is[i] when whose=MINE
192: table[0] - mark incideces of is[] when whose=OTHER */
193: len = PetscMax(is_max, size);
194: PetscCall(PetscMalloc2(len, &table, (Mbs / PETSC_BITS_PER_BYTE + 1) * len, &t_p));
195: for (i = 0; i < len; i++) table[i] = t_p + (Mbs / PETSC_BITS_PER_BYTE + 1) * i;
197: PetscCall(MPIU_Allreduce(&is_max, &ois_max, 1, MPIU_INT, MPI_MAX, comm));
199: /* 1. Send this processor's is[] to other processors */
200: /* allocate spaces */
201: PetscCall(PetscMalloc1(is_max, &n));
202: len = 0;
203: for (i = 0; i < is_max; i++) {
204: PetscCall(ISGetLocalSize(is[i], &n[i]));
205: len += n[i];
206: }
207: if (!len) {
208: is_max = 0;
209: } else {
210: len += 1 + is_max; /* max length of data1 for one processor */
211: }
213: PetscCall(PetscMalloc1(size * len + 1, &data1));
214: PetscCall(PetscMalloc1(size, &data1_start));
215: for (i = 0; i < size; i++) data1_start[i] = data1 + i * len;
217: PetscCall(PetscMalloc4(size, &len_s, size, &btable, size, &iwork, size + 1, &Bowners));
219: /* gather c->garray from all processors */
220: PetscCall(ISCreateGeneral(comm, Bnbs, c->garray, PETSC_COPY_VALUES, &garray_local));
221: PetscCall(ISAllGather(garray_local, &garray_gl));
222: PetscCall(ISDestroy(&garray_local));
223: PetscCallMPI(MPI_Allgather(&Bnbs, 1, MPIU_INT, Bowners + 1, 1, MPIU_INT, comm));
225: Bowners[0] = 0;
226: for (i = 0; i < size; i++) Bowners[i + 1] += Bowners[i];
228: if (is_max) {
229: /* hash table ctable which maps c->row to proc_id) */
230: PetscCall(PetscMalloc1(Mbs, &ctable));
231: for (proc_id = 0, j = 0; proc_id < size; proc_id++) {
232: for (; j < C->rmap->range[proc_id + 1] / bs; j++) ctable[j] = proc_id;
233: }
235: /* hash tables marking c->garray */
236: PetscCall(ISGetIndices(garray_gl, &idx_i));
237: for (i = 0; i < size; i++) {
238: table_i = table[i];
239: PetscCall(PetscBTMemzero(Mbs, table_i));
240: for (j = Bowners[i]; j < Bowners[i + 1]; j++) { /* go through B cols of proc[i]*/
241: PetscCall(PetscBTSet(table_i, idx_i[j]));
242: }
243: }
244: PetscCall(ISRestoreIndices(garray_gl, &idx_i));
245: } /* if (is_max) */
246: PetscCall(ISDestroy(&garray_gl));
248: /* evaluate communication - mesg to who, length, and buffer space */
249: for (i = 0; i < size; i++) len_s[i] = 0;
251: /* header of data1 */
252: for (proc_id = 0; proc_id < size; proc_id++) {
253: iwork[proc_id] = 0;
254: *data1_start[proc_id] = is_max;
255: data1_start[proc_id]++;
256: for (j = 0; j < is_max; j++) {
257: if (proc_id == rank) {
258: *data1_start[proc_id] = n[j];
259: } else {
260: *data1_start[proc_id] = 0;
261: }
262: data1_start[proc_id]++;
263: }
264: }
266: for (i = 0; i < is_max; i++) {
267: PetscCall(ISGetIndices(is[i], &idx_i));
268: for (j = 0; j < n[i]; j++) {
269: idx = idx_i[j];
270: *data1_start[rank] = idx;
271: data1_start[rank]++; /* for local processing */
272: proc_end = ctable[idx];
273: for (proc_id = 0; proc_id <= proc_end; proc_id++) { /* for others to process */
274: if (proc_id == rank) continue; /* done before this loop */
275: if (proc_id < proc_end && !PetscBTLookup(table[proc_id], idx)) continue; /* no need for sending idx to [proc_id] */
276: *data1_start[proc_id] = idx;
277: data1_start[proc_id]++;
278: len_s[proc_id]++;
279: }
280: }
281: /* update header data */
282: for (proc_id = 0; proc_id < size; proc_id++) {
283: if (proc_id == rank) continue;
284: *(data1 + proc_id * len + 1 + i) = len_s[proc_id] - iwork[proc_id];
285: iwork[proc_id] = len_s[proc_id];
286: }
287: PetscCall(ISRestoreIndices(is[i], &idx_i));
288: }
290: nrqs = 0;
291: nrqr = 0;
292: for (i = 0; i < size; i++) {
293: data1_start[i] = data1 + i * len;
294: if (len_s[i]) {
295: nrqs++;
296: len_s[i] += 1 + is_max; /* add no. of header msg */
297: }
298: }
300: for (i = 0; i < is_max; i++) PetscCall(ISDestroy(&is[i]));
301: PetscCall(PetscFree(n));
302: PetscCall(PetscFree(ctable));
304: /* Determine the number of messages to expect, their lengths, from from-ids */
305: PetscCall(PetscGatherNumberOfMessages(comm, NULL, len_s, &nrqr));
306: PetscCall(PetscGatherMessageLengths(comm, nrqs, nrqr, len_s, &id_r1, &len_r1));
308: /* Now post the sends */
309: PetscCall(PetscMalloc2(size, &s_waits1, size, &s_waits2));
310: k = 0;
311: for (proc_id = 0; proc_id < size; proc_id++) { /* send data1 to processor [proc_id] */
312: if (len_s[proc_id]) {
313: PetscCallMPI(MPI_Isend(data1_start[proc_id], len_s[proc_id], MPIU_INT, proc_id, tag1, comm, s_waits1 + k));
314: k++;
315: }
316: }
318: /* 2. Receive other's is[] and process. Then send back */
319: len = 0;
320: for (i = 0; i < nrqr; i++) {
321: if (len_r1[i] > len) len = len_r1[i];
322: }
323: PetscCall(PetscFree(len_r1));
324: PetscCall(PetscFree(id_r1));
326: for (proc_id = 0; proc_id < size; proc_id++) len_s[proc_id] = iwork[proc_id] = 0;
328: PetscCall(PetscMalloc1(len + 1, &odata1));
329: PetscCall(PetscMalloc1(size, &odata2_ptr));
330: PetscCall(PetscBTCreate(Mbs, &otable));
332: len_max = ois_max * (Mbs + 1); /* max space storing all is[] for each receive */
333: len_est = 2 * len_max; /* estimated space of storing is[] for all receiving messages */
334: PetscCall(PetscMalloc1(len_est + 1, &odata2));
335: nodata2 = 0; /* nodata2+1: num of PetscMalloc(,&odata2_ptr[]) called */
337: odata2_ptr[nodata2] = odata2;
339: len_unused = len_est; /* unused space in the array odata2_ptr[nodata2]-- needs to be >= len_max */
341: k = 0;
342: while (k < nrqr) {
343: /* Receive messages */
344: PetscCallMPI(MPI_Iprobe(MPI_ANY_SOURCE, tag1, comm, &flag, &r_status));
345: if (flag) {
346: PetscCallMPI(MPI_Get_count(&r_status, MPIU_INT, &len));
347: proc_id = r_status.MPI_SOURCE;
348: PetscCallMPI(MPI_Irecv(odata1, len, MPIU_INT, proc_id, r_status.MPI_TAG, comm, &r_req));
349: PetscCallMPI(MPI_Wait(&r_req, &r_status));
351: /* Process messages */
352: /* make sure there is enough unused space in odata2 array */
353: if (len_unused < len_max) { /* allocate more space for odata2 */
354: PetscCall(PetscMalloc1(len_est + 1, &odata2));
356: odata2_ptr[++nodata2] = odata2;
358: len_unused = len_est;
359: }
361: PetscCall(MatIncreaseOverlap_MPISBAIJ_Local(C, odata1, OTHER, odata2, &otable));
362: len = 1 + odata2[0];
363: for (i = 0; i < odata2[0]; i++) len += odata2[1 + i];
365: /* Send messages back */
366: PetscCallMPI(MPI_Isend(odata2, len, MPIU_INT, proc_id, tag2, comm, s_waits2 + k));
367: k++;
368: odata2 += len;
369: len_unused -= len;
370: len_s[proc_id] = len; /* num of messages sending back to [proc_id] by this proc */
371: }
372: }
373: PetscCall(PetscFree(odata1));
374: PetscCall(PetscBTDestroy(&otable));
376: /* 3. Do local work on this processor's is[] */
377: /* make sure there is enough unused space in odata2(=data) array */
378: len_max = is_max * (Mbs + 1); /* max space storing all is[] for this processor */
379: if (len_unused < len_max) { /* allocate more space for odata2 */
380: PetscCall(PetscMalloc1(len_est + 1, &odata2));
382: odata2_ptr[++nodata2] = odata2;
383: }
385: data = odata2;
386: PetscCall(MatIncreaseOverlap_MPISBAIJ_Local(C, data1_start[rank], MINE, data, table));
387: PetscCall(PetscFree(data1_start));
389: /* 4. Receive work done on other processors, then merge */
390: /* get max number of messages that this processor expects to recv */
391: PetscCall(MPIU_Allreduce(len_s, iwork, size, MPI_INT, MPI_MAX, comm));
392: PetscCall(PetscMalloc1(iwork[rank] + 1, &data2));
393: PetscCall(PetscFree4(len_s, btable, iwork, Bowners));
395: k = 0;
396: while (k < nrqs) {
397: /* Receive messages */
398: PetscCallMPI(MPI_Iprobe(MPI_ANY_SOURCE, tag2, comm, &flag, &r_status));
399: if (flag) {
400: PetscCallMPI(MPI_Get_count(&r_status, MPIU_INT, &len));
402: proc_id = r_status.MPI_SOURCE;
404: PetscCallMPI(MPI_Irecv(data2, len, MPIU_INT, proc_id, r_status.MPI_TAG, comm, &r_req));
405: PetscCallMPI(MPI_Wait(&r_req, &r_status));
406: if (len > 1 + is_max) { /* Add data2 into data */
407: data2_i = data2 + 1 + is_max;
408: for (i = 0; i < is_max; i++) {
409: table_i = table[i];
410: data_i = data + 1 + is_max + Mbs * i;
411: isz = data[1 + i];
412: for (j = 0; j < data2[1 + i]; j++) {
413: col = data2_i[j];
414: if (!PetscBTLookupSet(table_i, col)) data_i[isz++] = col;
415: }
416: data[1 + i] = isz;
417: if (i < is_max - 1) data2_i += data2[1 + i];
418: }
419: }
420: k++;
421: }
422: }
423: PetscCall(PetscFree(data2));
424: PetscCall(PetscFree2(table, t_p));
426: /* phase 1 sends are complete */
427: PetscCall(PetscMalloc1(size, &s_status));
428: if (nrqs) PetscCallMPI(MPI_Waitall(nrqs, s_waits1, s_status));
429: PetscCall(PetscFree(data1));
431: /* phase 2 sends are complete */
432: if (nrqr) PetscCallMPI(MPI_Waitall(nrqr, s_waits2, s_status));
433: PetscCall(PetscFree2(s_waits1, s_waits2));
434: PetscCall(PetscFree(s_status));
436: /* 5. Create new is[] */
437: for (i = 0; i < is_max; i++) {
438: data_i = data + 1 + is_max + Mbs * i;
439: PetscCall(ISCreateGeneral(PETSC_COMM_SELF, data[1 + i], data_i, PETSC_COPY_VALUES, is + i));
440: }
441: for (k = 0; k <= nodata2; k++) PetscCall(PetscFree(odata2_ptr[k]));
442: PetscCall(PetscFree(odata2_ptr));
443: PetscFunctionReturn(PETSC_SUCCESS);
444: }
446: /*
447: MatIncreaseOverlap_MPISBAIJ_Local - Called by MatIncreaseOverlap, to do
448: the work on the local processor.
450: Inputs:
451: C - MAT_MPISBAIJ;
452: data - holds is[]. See MatIncreaseOverlap_MPISBAIJ_Once() for the format.
453: whose - whose is[] to be processed,
454: MINE: this processor's is[]
455: OTHER: other processor's is[]
456: Output:
457: nidx - whose = MINE:
458: holds input and newly found indices in the same format as data
459: whose = OTHER:
460: only holds the newly found indices
461: table - table[i]: mark the indices of is[i], i=0,...,is_max. Used only in the case 'whose=MINE'.
462: */
463: /* Would computation be reduced by swapping the loop 'for each is' and 'for each row'? */
464: static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Local(Mat C, PetscInt *data, PetscInt whose, PetscInt *nidx, PetscBT *table)
465: {
466: Mat_MPISBAIJ *c = (Mat_MPISBAIJ *)C->data;
467: Mat_SeqSBAIJ *a = (Mat_SeqSBAIJ *)(c->A)->data;
468: Mat_SeqBAIJ *b = (Mat_SeqBAIJ *)(c->B)->data;
469: PetscInt row, mbs, Mbs, *nidx_i, col, col_max, isz, isz0, *ai, *aj, *bi, *bj, *garray, rstart, l;
470: PetscInt a_start, a_end, b_start, b_end, i, j, k, is_max, *idx_i, n;
471: PetscBT table0; /* mark the indices of input is[] for look up */
472: PetscBT table_i; /* points to i-th table. When whose=OTHER, a single table is used for all is[] */
474: PetscFunctionBegin;
475: Mbs = c->Mbs;
476: mbs = a->mbs;
477: ai = a->i;
478: aj = a->j;
479: bi = b->i;
480: bj = b->j;
481: garray = c->garray;
482: rstart = c->rstartbs;
483: is_max = data[0];
485: PetscCall(PetscBTCreate(Mbs, &table0));
487: nidx[0] = is_max;
488: idx_i = data + is_max + 1; /* ptr to input is[0] array */
489: nidx_i = nidx + is_max + 1; /* ptr to output is[0] array */
490: for (i = 0; i < is_max; i++) { /* for each is */
491: isz = 0;
492: n = data[1 + i]; /* size of input is[i] */
494: /* initialize and set table_i(mark idx and nidx) and table0(only mark idx) */
495: if (whose == MINE) { /* process this processor's is[] */
496: table_i = table[i];
497: nidx_i = nidx + 1 + is_max + Mbs * i;
498: } else { /* process other processor's is[] - only use one temp table */
499: table_i = table[0];
500: }
501: PetscCall(PetscBTMemzero(Mbs, table_i));
502: PetscCall(PetscBTMemzero(Mbs, table0));
503: if (n == 0) {
504: nidx[1 + i] = 0; /* size of new is[i] */
505: continue;
506: }
508: isz0 = 0;
509: col_max = 0;
510: for (j = 0; j < n; j++) {
511: col = idx_i[j];
512: PetscCheck(col < Mbs, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "index col %" PetscInt_FMT " >= Mbs %" PetscInt_FMT, col, Mbs);
513: if (!PetscBTLookupSet(table_i, col)) {
514: PetscCall(PetscBTSet(table0, col));
515: if (whose == MINE) nidx_i[isz0] = col;
516: if (col_max < col) col_max = col;
517: isz0++;
518: }
519: }
521: if (whose == MINE) isz = isz0;
522: k = 0; /* no. of indices from input is[i] that have been examined */
523: for (row = 0; row < mbs; row++) {
524: a_start = ai[row];
525: a_end = ai[row + 1];
526: b_start = bi[row];
527: b_end = bi[row + 1];
528: if (PetscBTLookup(table0, row + rstart)) { /* row is on input is[i]:
529: do row search: collect all col in this row */
530: for (l = a_start; l < a_end; l++) { /* Amat */
531: col = aj[l] + rstart;
532: if (!PetscBTLookupSet(table_i, col)) nidx_i[isz++] = col;
533: }
534: for (l = b_start; l < b_end; l++) { /* Bmat */
535: col = garray[bj[l]];
536: if (!PetscBTLookupSet(table_i, col)) nidx_i[isz++] = col;
537: }
538: k++;
539: if (k >= isz0) break; /* for (row=0; row<mbs; row++) */
540: } else { /* row is not on input is[i]:
541: do col search: add row onto nidx_i if there is a col in nidx_i */
542: for (l = a_start; l < a_end; l++) { /* Amat */
543: col = aj[l] + rstart;
544: if (col > col_max) break;
545: if (PetscBTLookup(table0, col)) {
546: if (!PetscBTLookupSet(table_i, row + rstart)) nidx_i[isz++] = row + rstart;
547: break; /* for l = start; l<end ; l++) */
548: }
549: }
550: for (l = b_start; l < b_end; l++) { /* Bmat */
551: col = garray[bj[l]];
552: if (col > col_max) break;
553: if (PetscBTLookup(table0, col)) {
554: if (!PetscBTLookupSet(table_i, row + rstart)) nidx_i[isz++] = row + rstart;
555: break; /* for l = start; l<end ; l++) */
556: }
557: }
558: }
559: }
561: if (i < is_max - 1) {
562: idx_i += n; /* ptr to input is[i+1] array */
563: nidx_i += isz; /* ptr to output is[i+1] array */
564: }
565: nidx[1 + i] = isz; /* size of new is[i] */
566: } /* for each is */
567: PetscCall(PetscBTDestroy(&table0));
568: PetscFunctionReturn(PETSC_SUCCESS);
569: }