Actual source code: mcrl.c


  2: /*
  3:   Defines a matrix-vector product for the MATMPIAIJCRL matrix class.
  4:   This class is derived from the MATMPIAIJ class and retains the
  5:   compressed row storage (aka Yale sparse matrix format) but augments
  6:   it with a column oriented storage that is more efficient for
  7:   matrix vector products on Vector machines.

  9:   CRL stands for constant row length (that is the same number of columns
 10:   is kept (padded with zeros) for each row of the sparse matrix.

 12:    See src/mat/impls/aij/seq/crl/crl.c for the sequential version
 13: */

 15: #include <../src/mat/impls/aij/mpi/mpiaij.h>
 16: #include <../src/mat/impls/aij/seq/crl/crl.h>

 18: PetscErrorCode MatDestroy_MPIAIJCRL(Mat A)
 19: {
 20:   Mat_AIJCRL *aijcrl = (Mat_AIJCRL *)A->spptr;

 22:   PetscFunctionBegin;
 23:   if (aijcrl) {
 24:     PetscCall(PetscFree2(aijcrl->acols, aijcrl->icols));
 25:     PetscCall(VecDestroy(&aijcrl->fwork));
 26:     PetscCall(VecDestroy(&aijcrl->xwork));
 27:     PetscCall(PetscFree(aijcrl->array));
 28:   }
 29:   PetscCall(PetscFree(A->spptr));

 31:   PetscCall(PetscObjectChangeTypeName((PetscObject)A, MATMPIAIJ));
 32:   PetscCall(MatDestroy_MPIAIJ(A));
 33:   PetscFunctionReturn(PETSC_SUCCESS);
 34: }

 36: PetscErrorCode MatMPIAIJCRL_create_aijcrl(Mat A)
 37: {
 38:   Mat_MPIAIJ  *a   = (Mat_MPIAIJ *)(A)->data;
 39:   Mat_SeqAIJ  *Aij = (Mat_SeqAIJ *)(a->A->data), *Bij = (Mat_SeqAIJ *)(a->B->data);
 40:   Mat_AIJCRL  *aijcrl = (Mat_AIJCRL *)A->spptr;
 41:   PetscInt     m      = A->rmap->n;       /* Number of rows in the matrix. */
 42:   PetscInt     nd     = a->A->cmap->n;    /* number of columns in diagonal portion */
 43:   PetscInt    *aj = Aij->j, *bj = Bij->j; /* From the CSR representation; points to the beginning  of each row. */
 44:   PetscInt     i, j, rmax = 0, *icols, *ailen = Aij->ilen, *bilen = Bij->ilen;
 45:   PetscScalar *aa = Aij->a, *ba = Bij->a, *acols, *array;

 47:   PetscFunctionBegin;
 48:   /* determine the row with the most columns */
 49:   for (i = 0; i < m; i++) rmax = PetscMax(rmax, ailen[i] + bilen[i]);
 50:   aijcrl->nz   = Aij->nz + Bij->nz;
 51:   aijcrl->m    = A->rmap->n;
 52:   aijcrl->rmax = rmax;

 54:   PetscCall(PetscFree2(aijcrl->acols, aijcrl->icols));
 55:   PetscCall(PetscMalloc2(rmax * m, &aijcrl->acols, rmax * m, &aijcrl->icols));
 56:   acols = aijcrl->acols;
 57:   icols = aijcrl->icols;
 58:   for (i = 0; i < m; i++) {
 59:     for (j = 0; j < ailen[i]; j++) {
 60:       acols[j * m + i] = *aa++;
 61:       icols[j * m + i] = *aj++;
 62:     }
 63:     for (; j < ailen[i] + bilen[i]; j++) {
 64:       acols[j * m + i] = *ba++;
 65:       icols[j * m + i] = nd + *bj++;
 66:     }
 67:     for (; j < rmax; j++) { /* empty column entries */
 68:       acols[j * m + i] = 0.0;
 69:       icols[j * m + i] = (j) ? icols[(j - 1) * m + i] : 0; /* handle case where row is EMPTY */
 70:     }
 71:   }
 72:   PetscCall(PetscInfo(A, "Percentage of 0's introduced for vectorized multiply %g\n", 1.0 - ((double)(aijcrl->nz)) / ((double)(rmax * m))));

 74:   PetscCall(PetscFree(aijcrl->array));
 75:   PetscCall(PetscMalloc1(a->B->cmap->n + nd, &array));
 76:   /* xwork array is actually B->n+nd long, but we define xwork this length so can copy into it */
 77:   PetscCall(VecDestroy(&aijcrl->xwork));
 78:   PetscCall(VecCreateMPIWithArray(PetscObjectComm((PetscObject)A), 1, nd, PETSC_DECIDE, array, &aijcrl->xwork));
 79:   PetscCall(VecDestroy(&aijcrl->fwork));
 80:   PetscCall(VecCreateSeqWithArray(PETSC_COMM_SELF, 1, a->B->cmap->n, array + nd, &aijcrl->fwork));

 82:   aijcrl->array = array;
 83:   aijcrl->xscat = a->Mvctx;
 84:   PetscFunctionReturn(PETSC_SUCCESS);
 85: }

 87: PetscErrorCode MatAssemblyEnd_MPIAIJCRL(Mat A, MatAssemblyType mode)
 88: {
 89:   Mat_MPIAIJ *a   = (Mat_MPIAIJ *)A->data;
 90:   Mat_SeqAIJ *Aij = (Mat_SeqAIJ *)(a->A->data), *Bij = (Mat_SeqAIJ *)(a->A->data);

 92:   PetscFunctionBegin;
 93:   Aij->inode.use = PETSC_FALSE;
 94:   Bij->inode.use = PETSC_FALSE;

 96:   PetscCall(MatAssemblyEnd_MPIAIJ(A, mode));
 97:   if (mode == MAT_FLUSH_ASSEMBLY) PetscFunctionReturn(PETSC_SUCCESS);

 99:   /* Now calculate the permutation and grouping information. */
100:   PetscCall(MatMPIAIJCRL_create_aijcrl(A));
101:   PetscFunctionReturn(PETSC_SUCCESS);
102: }

104: extern PetscErrorCode MatMult_AIJCRL(Mat, Vec, Vec);
105: extern PetscErrorCode MatDuplicate_AIJCRL(Mat, MatDuplicateOption, Mat *);

107: /* MatConvert_MPIAIJ_MPIAIJCRL converts a MPIAIJ matrix into a
108:  * MPIAIJCRL matrix.  This routine is called by the MatCreate_MPIAIJCRL()
109:  * routine, but can also be used to convert an assembled MPIAIJ matrix
110:  * into a MPIAIJCRL one. */

112: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJCRL(Mat A, MatType type, MatReuse reuse, Mat *newmat)
113: {
114:   Mat         B = *newmat;
115:   Mat_AIJCRL *aijcrl;

117:   PetscFunctionBegin;
118:   if (reuse == MAT_INITIAL_MATRIX) PetscCall(MatDuplicate(A, MAT_COPY_VALUES, &B));

120:   PetscCall(PetscNew(&aijcrl));
121:   B->spptr = (void *)aijcrl;

123:   /* Set function pointers for methods that we inherit from AIJ but override. */
124:   B->ops->duplicate   = MatDuplicate_AIJCRL;
125:   B->ops->assemblyend = MatAssemblyEnd_MPIAIJCRL;
126:   B->ops->destroy     = MatDestroy_MPIAIJCRL;
127:   B->ops->mult        = MatMult_AIJCRL;

129:   /* If A has already been assembled, compute the permutation. */
130:   if (A->assembled) PetscCall(MatMPIAIJCRL_create_aijcrl(B));
131:   PetscCall(PetscObjectChangeTypeName((PetscObject)B, MATMPIAIJCRL));
132:   *newmat = B;
133:   PetscFunctionReturn(PETSC_SUCCESS);
134: }

136: /*@C
137:    MatCreateMPIAIJCRL - Creates a sparse matrix of type `MATMPIAIJCRL`.
138:    This type inherits from `MATAIJ`, but stores some additional
139:    information that is used to allow better vectorization of
140:    the matrix-vector product. At the cost of increased storage, the AIJ formatted
141:    matrix can be copied to a format in which pieces of the matrix are
142:    stored in ELLPACK format, allowing the vectorized matrix multiply
143:    routine to use stride-1 memory accesses.

145:    Collective

147:    Input Parameters:
148: +  comm - MPI communicator, set to `PETSC_COMM_SELF`
149: .  m - number of rows
150: .  n - number of columns
151: .  nz - number of nonzeros per row (same for all rows), for the "diagonal" submatrix
152: .  nnz - array containing the number of nonzeros in the various rows (possibly different for each row) or `NULL`, for the "diagonal" submatrix
153: .  onz - number of nonzeros per row (same for all rows), for the "off-diagonal" submatrix
154: -  onnz - array containing the number of nonzeros in the various rows (possibly different for each row) or `NULL`, for the "off-diagonal" submatrix

156:    Output Parameter:
157: .  A - the matrix

159:    Level: intermediate

161:    Note:
162:    If `nnz` is given then `nz` is ignored

164: .seealso: [](ch_matrices), `Mat`, [Sparse Matrix Creation](sec_matsparse), `MATAIJ`, `MATAIJSELL`, `MATAIJPERM`, `MATAIJMKL`, `MatCreate()`, `MatCreateMPIAIJPERM()`, `MatSetValues()`
165: @*/
166: PetscErrorCode MatCreateMPIAIJCRL(MPI_Comm comm, PetscInt m, PetscInt n, PetscInt nz, const PetscInt nnz[], PetscInt onz, const PetscInt onnz[], Mat *A)
167: {
168:   PetscFunctionBegin;
169:   PetscCall(MatCreate(comm, A));
170:   PetscCall(MatSetSizes(*A, m, n, m, n));
171:   PetscCall(MatSetType(*A, MATMPIAIJCRL));
172:   PetscCall(MatMPIAIJSetPreallocation_MPIAIJ(*A, nz, (PetscInt *)nnz, onz, (PetscInt *)onnz));
173:   PetscFunctionReturn(PETSC_SUCCESS);
174: }

176: PETSC_EXTERN PetscErrorCode MatCreate_MPIAIJCRL(Mat A)
177: {
178:   PetscFunctionBegin;
179:   PetscCall(MatSetType(A, MATMPIAIJ));
180:   PetscCall(MatConvert_MPIAIJ_MPIAIJCRL(A, MATMPIAIJCRL, MAT_INPLACE_MATRIX, &A));
181:   PetscFunctionReturn(PETSC_SUCCESS);
182: }