Actual source code: vecstash.c


  2: #include <petsc/private/vecimpl.h>

  4: #define DEFAULT_STASH_SIZE 100

  6: /*
  7:   VecStashCreate_Private - Creates a stash,currently used for all the parallel
  8:   matrix implementations. The stash is where elements of a matrix destined
  9:   to be stored on other processors are kept until matrix assembly is done.

 11:   This is a simple minded stash. Simply adds entries to end of stash.

 13:   Input Parameters:
 14:   comm - communicator, required for scatters.
 15:   bs   - stash block size. used when stashing blocks of values

 17:   Output Parameter:
 18: . stash    - the newly created stash
 19: */
 20: PetscErrorCode VecStashCreate_Private(MPI_Comm comm, PetscInt bs, VecStash *stash)
 21: {
 22:   PetscInt  max, *opt, nopt;
 23:   PetscBool flg;

 25:   PetscFunctionBegin;
 26:   /* Require 2 tags, get the second using PetscCommGetNewTag() */
 27:   stash->comm = comm;
 28:   PetscCall(PetscCommGetNewTag(stash->comm, &stash->tag1));
 29:   PetscCall(PetscCommGetNewTag(stash->comm, &stash->tag2));
 30:   PetscCallMPI(MPI_Comm_size(stash->comm, &stash->size));
 31:   PetscCallMPI(MPI_Comm_rank(stash->comm, &stash->rank));

 33:   nopt = stash->size;
 34:   PetscCall(PetscMalloc1(nopt, &opt));
 35:   PetscCall(PetscOptionsGetIntArray(NULL, NULL, "-vecstash_initial_size", opt, &nopt, &flg));
 36:   if (flg) {
 37:     if (nopt == 1) max = opt[0];
 38:     else if (nopt == stash->size) max = opt[stash->rank];
 39:     else if (stash->rank < nopt) max = opt[stash->rank];
 40:     else max = 0; /* use default */
 41:     stash->umax = max;
 42:   } else {
 43:     stash->umax = 0;
 44:   }
 45:   PetscCall(PetscFree(opt));

 47:   if (bs <= 0) bs = 1;

 49:   stash->bs       = bs;
 50:   stash->nmax     = 0;
 51:   stash->oldnmax  = 0;
 52:   stash->n        = 0;
 53:   stash->reallocs = -1;
 54:   stash->idx      = NULL;
 55:   stash->array    = NULL;

 57:   stash->send_waits   = NULL;
 58:   stash->recv_waits   = NULL;
 59:   stash->send_status  = NULL;
 60:   stash->nsends       = 0;
 61:   stash->nrecvs       = 0;
 62:   stash->svalues      = NULL;
 63:   stash->rvalues      = NULL;
 64:   stash->rmax         = 0;
 65:   stash->nprocs       = NULL;
 66:   stash->nprocessed   = 0;
 67:   stash->donotstash   = PETSC_FALSE;
 68:   stash->ignorenegidx = PETSC_FALSE;
 69:   PetscFunctionReturn(PETSC_SUCCESS);
 70: }

 72: /*
 73:    VecStashDestroy_Private - Destroy the stash
 74: */
 75: PetscErrorCode VecStashDestroy_Private(VecStash *stash)
 76: {
 77:   PetscFunctionBegin;
 78:   PetscCall(PetscFree2(stash->array, stash->idx));
 79:   PetscCall(PetscFree(stash->bowners));
 80:   PetscFunctionReturn(PETSC_SUCCESS);
 81: }

 83: /*
 84:    VecStashScatterEnd_Private - This is called as the fial stage of
 85:    scatter. The final stages of message passing is done here, and
 86:    all the memory used for message passing is cleanedu up. This
 87:    routine also resets the stash, and deallocates the memory used
 88:    for the stash. It also keeps track of the current memory usage
 89:    so that the same value can be used the next time through.
 90: */
 91: PetscErrorCode VecStashScatterEnd_Private(VecStash *stash)
 92: {
 93:   PetscInt    nsends = stash->nsends, oldnmax;
 94:   MPI_Status *send_status;

 96:   PetscFunctionBegin;
 97:   /* wait on sends */
 98:   if (nsends) {
 99:     PetscCall(PetscMalloc1(2 * nsends, &send_status));
100:     PetscCallMPI(MPI_Waitall(2 * nsends, stash->send_waits, send_status));
101:     PetscCall(PetscFree(send_status));
102:   }

104:   /* Now update nmaxold to be app 10% more than max n, this way the
105:      wastage of space is reduced the next time this stash is used.
106:      Also update the oldmax, only if it increases */
107:   if (stash->n) {
108:     oldnmax = ((PetscInt)(stash->n * 1.1) + 5) * stash->bs;
109:     if (oldnmax > stash->oldnmax) stash->oldnmax = oldnmax;
110:   }

112:   stash->nmax       = 0;
113:   stash->n          = 0;
114:   stash->reallocs   = -1;
115:   stash->rmax       = 0;
116:   stash->nprocessed = 0;

118:   PetscCall(PetscFree2(stash->array, stash->idx));
119:   stash->array = NULL;
120:   stash->idx   = NULL;
121:   PetscCall(PetscFree(stash->send_waits));
122:   PetscCall(PetscFree(stash->recv_waits));
123:   PetscCall(PetscFree2(stash->svalues, stash->sindices));
124:   PetscCall(PetscFree2(stash->rvalues, stash->rindices));
125:   PetscCall(PetscFree(stash->nprocs));
126:   PetscFunctionReturn(PETSC_SUCCESS);
127: }

129: /*
130:    VecStashGetInfo_Private - Gets the relevant statistics of the stash

132:    Input Parameters:
133:    stash    - the stash
134:    nstash   - the size of the stash
135:    reallocs - the number of additional mallocs incurred.

137: */
138: PetscErrorCode VecStashGetInfo_Private(VecStash *stash, PetscInt *nstash, PetscInt *reallocs)
139: {
140:   PetscFunctionBegin;
141:   if (nstash) *nstash = stash->n * stash->bs;
142:   if (reallocs) {
143:     if (stash->reallocs < 0) *reallocs = 0;
144:     else *reallocs = stash->reallocs;
145:   }
146:   PetscFunctionReturn(PETSC_SUCCESS);
147: }

149: /*
150:    VecStashSetInitialSize_Private - Sets the initial size of the stash

152:    Input Parameters:
153:    stash  - the stash
154:    max    - the value that is used as the max size of the stash.
155:             this value is used while allocating memory. It specifies
156:             the number of vals stored, even with the block-stash
157: */
158: PetscErrorCode VecStashSetInitialSize_Private(VecStash *stash, PetscInt max)
159: {
160:   PetscFunctionBegin;
161:   stash->umax = max;
162:   PetscFunctionReturn(PETSC_SUCCESS);
163: }

165: /* VecStashExpand_Private - Expand the stash. This function is called
166:    when the space in the stash is not sufficient to add the new values
167:    being inserted into the stash.

169:    Input Parameters:
170:    stash - the stash
171:    incr  - the minimum increase requested

173:    Notes:
174:    This routine doubles the currently used memory.
175: */
176: PetscErrorCode VecStashExpand_Private(VecStash *stash, PetscInt incr)
177: {
178:   PetscInt    *n_idx, newnmax, bs = stash->bs;
179:   PetscScalar *n_array;

181:   PetscFunctionBegin;
182:   /* allocate a larger stash. */
183:   if (!stash->oldnmax && !stash->nmax) { /* new stash */
184:     if (stash->umax) newnmax = stash->umax / bs;
185:     else newnmax = DEFAULT_STASH_SIZE / bs;
186:   } else if (!stash->nmax) { /* resuing stash */
187:     if (stash->umax > stash->oldnmax) newnmax = stash->umax / bs;
188:     else newnmax = stash->oldnmax / bs;
189:   } else newnmax = stash->nmax * 2;

191:   if (newnmax < (stash->nmax + incr)) newnmax += 2 * incr;

193:   PetscCall(PetscMalloc2(bs * newnmax, &n_array, newnmax, &n_idx));
194:   PetscCall(PetscMemcpy(n_array, stash->array, bs * stash->nmax * sizeof(PetscScalar)));
195:   PetscCall(PetscMemcpy(n_idx, stash->idx, stash->nmax * sizeof(PetscInt)));
196:   PetscCall(PetscFree2(stash->array, stash->idx));

198:   stash->array = n_array;
199:   stash->idx   = n_idx;
200:   stash->nmax  = newnmax;
201:   stash->reallocs++;
202:   PetscFunctionReturn(PETSC_SUCCESS);
203: }
204: /*
205:   VecStashScatterBegin_Private - Initiates the transfer of values to the
206:   correct owners. This function goes through the stash, and check the
207:   owners of each stashed value, and sends the values off to the owner
208:   processors.

210:   Input Parameters:
211:   stash  - the stash
212:   owners - an array of size 'no-of-procs' which gives the ownership range
213:            for each node.

215:   Notes:
216:     The 'owners' array in the cased of the blocked-stash has the
217:   ranges specified blocked global indices, and for the regular stash in
218:   the proper global indices.
219: */
220: PetscErrorCode VecStashScatterBegin_Private(VecStash *stash, PetscInt *owners)
221: {
222:   PetscMPIInt  size = stash->size, tag1 = stash->tag1, tag2 = stash->tag2;
223:   PetscInt    *owner, *start, *nprocs, nsends, nreceives;
224:   PetscInt     nmax, count, *sindices, *rindices, i, j, idx, bs = stash->bs, lastidx;
225:   PetscScalar *rvalues, *svalues;
226:   MPI_Comm     comm = stash->comm;
227:   MPI_Request *send_waits, *recv_waits;

229:   PetscFunctionBegin;
230:   /*  first count number of contributors to each processor */
231:   PetscCall(PetscCalloc1(2 * size, &nprocs));
232:   PetscCall(PetscMalloc1(stash->n, &owner));

234:   j       = 0;
235:   lastidx = -1;
236:   for (i = 0; i < stash->n; i++) {
237:     /* if indices are NOT locally sorted, need to start search at the beginning */
238:     if (lastidx > (idx = stash->idx[i])) j = 0;
239:     lastidx = idx;
240:     for (; j < size; j++) {
241:       if (idx >= owners[j] && idx < owners[j + 1]) {
242:         nprocs[2 * j]++;
243:         nprocs[2 * j + 1] = 1;
244:         owner[i]          = j;
245:         break;
246:       }
247:     }
248:   }
249:   nsends = 0;
250:   for (i = 0; i < size; i++) nsends += nprocs[2 * i + 1];

252:   /* inform other processors of number of messages and max length*/
253:   PetscCall(PetscMaxSum(comm, nprocs, &nmax, &nreceives));

255:   /* post receives:
256:      since we don't know how long each individual message is we
257:      allocate the largest needed buffer for each receive. Potentially
258:      this is a lot of wasted space.
259:   */
260:   PetscCall(PetscMalloc2(nreceives * nmax * bs, &rvalues, nreceives * nmax, &rindices));
261:   PetscCall(PetscMalloc1(2 * nreceives, &recv_waits));
262:   for (i = 0, count = 0; i < nreceives; i++) {
263:     PetscCallMPI(MPI_Irecv(rvalues + bs * nmax * i, bs * nmax, MPIU_SCALAR, MPI_ANY_SOURCE, tag1, comm, recv_waits + count++));
264:     PetscCallMPI(MPI_Irecv(rindices + nmax * i, nmax, MPIU_INT, MPI_ANY_SOURCE, tag2, comm, recv_waits + count++));
265:   }

267:   /* do sends:
268:       1) starts[i] gives the starting index in svalues for stuff going to
269:          the ith processor
270:   */
271:   PetscCall(PetscMalloc2(stash->n * bs, &svalues, stash->n, &sindices));
272:   PetscCall(PetscMalloc1(2 * nsends, &send_waits));
273:   PetscCall(PetscMalloc1(size, &start));
274:   /* use 2 sends the first with all_v, the next with all_i */
275:   start[0] = 0;
276:   for (i = 1; i < size; i++) start[i] = start[i - 1] + nprocs[2 * i - 2];

278:   for (i = 0; i < stash->n; i++) {
279:     j = owner[i];
280:     if (bs == 1) svalues[start[j]] = stash->array[i];
281:     else PetscCall(PetscMemcpy(svalues + bs * start[j], stash->array + bs * i, bs * sizeof(PetscScalar)));
282:     sindices[start[j]] = stash->idx[i];
283:     start[j]++;
284:   }
285:   start[0] = 0;
286:   for (i = 1; i < size; i++) start[i] = start[i - 1] + nprocs[2 * i - 2];

288:   for (i = 0, count = 0; i < size; i++) {
289:     if (nprocs[2 * i + 1]) {
290:       PetscCallMPI(MPI_Isend(svalues + bs * start[i], bs * nprocs[2 * i], MPIU_SCALAR, i, tag1, comm, send_waits + count++));
291:       PetscCallMPI(MPI_Isend(sindices + start[i], nprocs[2 * i], MPIU_INT, i, tag2, comm, send_waits + count++));
292:     }
293:   }
294:   PetscCall(PetscFree(owner));
295:   PetscCall(PetscFree(start));
296:   /* This memory is reused in scatter end  for a different purpose*/
297:   for (i = 0; i < 2 * size; i++) nprocs[i] = -1;

299:   stash->nprocs     = nprocs;
300:   stash->svalues    = svalues;
301:   stash->sindices   = sindices;
302:   stash->rvalues    = rvalues;
303:   stash->rindices   = rindices;
304:   stash->nsends     = nsends;
305:   stash->nrecvs     = nreceives;
306:   stash->send_waits = send_waits;
307:   stash->recv_waits = recv_waits;
308:   stash->rmax       = nmax;
309:   PetscFunctionReturn(PETSC_SUCCESS);
310: }

312: /*
313:    VecStashScatterGetMesg_Private - This function waits on the receives posted
314:    in the function VecStashScatterBegin_Private() and returns one message at
315:    a time to the calling function. If no messages are left, it indicates this
316:    by setting flg = 0, else it sets flg = 1.

318:    Input Parameters:
319:    stash - the stash

321:    Output Parameters:
322:    nvals - the number of entries in the current message.
323:    rows  - an array of row indices (or blocked indices) corresponding to the values
324:    cols  - an array of columnindices (or blocked indices) corresponding to the values
325:    vals  - the values
326:    flg   - 0 indicates no more message left, and the current call has no values associated.
327:            1 indicates that the current call successfully received a message, and the
328:              other output parameters nvals,rows,cols,vals are set appropriately.
329: */
330: PetscErrorCode VecStashScatterGetMesg_Private(VecStash *stash, PetscMPIInt *nvals, PetscInt **rows, PetscScalar **vals, PetscInt *flg)
331: {
332:   PetscMPIInt i = 0; /* dummy value so MPI-Uni doesn't think it is not set */
333:   PetscInt   *flg_v;
334:   PetscInt    i1, i2, bs = stash->bs;
335:   MPI_Status  recv_status;
336:   PetscBool   match_found = PETSC_FALSE;

338:   PetscFunctionBegin;
339:   *flg = 0; /* When a message is discovered this is reset to 1 */
340:   /* Return if no more messages to process */
341:   if (stash->nprocessed == stash->nrecvs) PetscFunctionReturn(PETSC_SUCCESS);

343:   flg_v = stash->nprocs;
344:   /* If a matching pair of receives are found, process them, and return the data to
345:      the calling function. Until then keep receiving messages */
346:   while (!match_found) {
347:     PetscCallMPI(MPI_Waitany(2 * stash->nrecvs, stash->recv_waits, &i, &recv_status));
348:     /* Now pack the received message into a structure which is useable by others */
349:     if (i % 2) {
350:       PetscCallMPI(MPI_Get_count(&recv_status, MPIU_INT, nvals));
351:       flg_v[2 * recv_status.MPI_SOURCE + 1] = i / 2;
352:     } else {
353:       PetscCallMPI(MPI_Get_count(&recv_status, MPIU_SCALAR, nvals));
354:       flg_v[2 * recv_status.MPI_SOURCE] = i / 2;
355:       *nvals                            = *nvals / bs;
356:     }

358:     /* Check if we have both the messages from this proc */
359:     i1 = flg_v[2 * recv_status.MPI_SOURCE];
360:     i2 = flg_v[2 * recv_status.MPI_SOURCE + 1];
361:     if (i1 != -1 && i2 != -1) {
362:       *rows = stash->rindices + i2 * stash->rmax;
363:       *vals = stash->rvalues + i1 * bs * stash->rmax;
364:       *flg  = 1;
365:       stash->nprocessed++;
366:       match_found = PETSC_TRUE;
367:     }
368:   }
369:   PetscFunctionReturn(PETSC_SUCCESS);
370: }

372: /*
373:  * Sort the stash, removing duplicates (combining as appropriate).
374:  */
375: PetscErrorCode VecStashSortCompress_Private(VecStash *stash)
376: {
377:   PetscInt i, j, bs = stash->bs;

379:   PetscFunctionBegin;
380:   if (!stash->n) PetscFunctionReturn(PETSC_SUCCESS);
381:   if (bs == 1) {
382:     PetscCall(PetscSortIntWithScalarArray(stash->n, stash->idx, stash->array));
383:     for (i = 1, j = 0; i < stash->n; i++) {
384:       if (stash->idx[i] == stash->idx[j]) {
385:         switch (stash->insertmode) {
386:         case ADD_VALUES:
387:           stash->array[j] += stash->array[i];
388:           break;
389:         case INSERT_VALUES:
390:           stash->array[j] = stash->array[i];
391:           break;
392:         default:
393:           SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "Insert mode not supported 0x%x", stash->insertmode);
394:         }
395:       } else {
396:         j++;
397:         stash->idx[j]   = stash->idx[i];
398:         stash->array[j] = stash->array[i];
399:       }
400:     }
401:     stash->n = j + 1;
402:   } else { /* block stash */
403:     PetscInt    *perm = NULL;
404:     PetscScalar *arr;
405:     PetscCall(PetscMalloc2(stash->n, &perm, stash->n * bs, &arr));
406:     for (i = 0; i < stash->n; i++) perm[i] = i;
407:     PetscCall(PetscSortIntWithArray(stash->n, stash->idx, perm));

409:     /* Out-of-place copy of arr */
410:     PetscCall(PetscMemcpy(arr, stash->array + perm[0] * bs, bs * sizeof(PetscScalar)));
411:     for (i = 1, j = 0; i < stash->n; i++) {
412:       PetscInt k;
413:       if (stash->idx[i] == stash->idx[j]) {
414:         switch (stash->insertmode) {
415:         case ADD_VALUES:
416:           for (k = 0; k < bs; k++) arr[j * bs + k] += stash->array[perm[i] * bs + k];
417:           break;
418:         case INSERT_VALUES:
419:           for (k = 0; k < bs; k++) arr[j * bs + k] = stash->array[perm[i] * bs + k];
420:           break;
421:         default:
422:           SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "Insert mode not supported 0x%x", stash->insertmode);
423:         }
424:       } else {
425:         j++;
426:         stash->idx[j] = stash->idx[i];
427:         for (k = 0; k < bs; k++) arr[j * bs + k] = stash->array[perm[i] * bs + k];
428:       }
429:     }
430:     stash->n = j + 1;
431:     PetscCall(PetscMemcpy(stash->array, arr, stash->n * bs * sizeof(PetscScalar)));
432:     PetscCall(PetscFree2(perm, arr));
433:   }
434:   PetscFunctionReturn(PETSC_SUCCESS);
435: }

437: PetscErrorCode VecStashGetOwnerList_Private(VecStash *stash, PetscLayout map, PetscMPIInt *nowners, PetscMPIInt **owners)
438: {
439:   PetscInt       i, bs = stash->bs;
440:   PetscMPIInt    r;
441:   PetscSegBuffer seg;

443:   PetscFunctionBegin;
444:   PetscCheck(bs == 1 || bs == map->bs, map->comm, PETSC_ERR_PLIB, "Stash block size %" PetscInt_FMT " does not match layout block size %" PetscInt_FMT, bs, map->bs);
445:   PetscCall(PetscSegBufferCreate(sizeof(PetscMPIInt), 50, &seg));
446:   *nowners = 0;
447:   for (i = 0, r = -1; i < stash->n; i++) {
448:     if (stash->idx[i] * bs >= map->range[r + 1]) {
449:       PetscMPIInt *rank;
450:       PetscCall(PetscSegBufferGet(seg, 1, &rank));
451:       PetscCall(PetscLayoutFindOwner(map, stash->idx[i] * bs, &r));
452:       *rank = r;
453:       (*nowners)++;
454:     }
455:   }
456:   PetscCall(PetscSegBufferExtractAlloc(seg, owners));
457:   PetscCall(PetscSegBufferDestroy(&seg));
458:   PetscFunctionReturn(PETSC_SUCCESS);
459: }