Actual source code: mpiaijsell.c

  1: #include <../src/mat/impls/aij/mpi/mpiaij.h>
  2: /*@C
  3:    MatCreateMPIAIJSELL - Creates a sparse parallel matrix whose local
  4:    portions are stored as `MATSEQAIJSELL` matrices (a matrix class that inherits
  5:    from SEQAIJ but performs some operations in SELL format).  The same
  6:    guidelines that apply to `MATMPIAIJ` matrices for preallocating the matrix
  7:    storage apply here as well.

  9:       Collective

 11:    Input Parameters:
 12: +  comm - MPI communicator
 13: .  m - number of local rows (or `PETSC_DECIDE` to have calculated if `M` is given)
 14:            This value should be the same as the local size used in creating the
 15:            y vector for the matrix-vector product y = Ax.
 16: .  n - This value should be the same as the local size used in creating the
 17:        x vector for the matrix-vector product y = Ax. (or `PETSC_DECIDE` to have
 18:        calculated if `N` is given) For square matrices `n` is almost always `m`.
 19: .  M - number of global rows (or `PETSC_DETERMINE` to have calculated if `m` is given)
 20: .  N - number of global columns (or `PETSC_DETERMINE` to have calculated if `n` is given)
 21: .  d_nz  - number of nonzeros per row in DIAGONAL portion of local submatrix
 22:            (same value is used for all local rows)
 23: .  d_nnz - array containing the number of nonzeros in the various rows of the
 24:            DIAGONAL portion of the local submatrix (possibly different for each row)
 25:            or `NULL`, if `d_nz` is used to specify the nonzero structure.
 26:            The size of this array is equal to the number of local rows, i.e `m`.
 27:            For matrices you plan to factor you must leave room for the diagonal entry and
 28:            put in the entry even if it is zero.
 29: .  o_nz  - number of nonzeros per row in the OFF-DIAGONAL portion of local
 30:            submatrix (same value is used for all local rows).
 31: -  o_nnz - array containing the number of nonzeros in the various rows of the
 32:            OFF-DIAGONAL portion of the local submatrix (possibly different for
 33:            each row) or `NULL`, if `o_nz` is used to specify the nonzero
 34:            structure. The size of this array is equal to the number
 35:            of local rows, i.e `m`.

 37:    Output Parameter:
 38: .  A - the matrix

 40:    Options Database Key:
 41: .  -mat_aijsell_eager_shadow - Construct shadow matrix upon matrix assembly; default is to take a "lazy" approach, performing this step the first
 42:                                time the matrix is applied

 44:    Level: intermediate

 46:    Notes:
 47:    If the *_nnz parameter is given then the *_nz parameter is ignored

 49:    `m`,`n`,`M`,`N` parameters specify the size of the matrix, and its partitioning across
 50:    processors, while `d_nz`,`d_nnz`,`o_nz`,`o_nnz` parameters specify the approximate
 51:    storage requirements for this matrix.

 53:    If `PETSC_DECIDE` or `PETSC_DETERMINE` is used for a particular argument on one
 54:    processor than it must be used on all processors that share the object for
 55:    that argument.

 57:    The user MUST specify either the local or global matrix dimensions
 58:    (possibly both).

 60:    The parallel matrix is partitioned such that the first m0 rows belong to
 61:    process 0, the next m1 rows belong to process 1, the next m2 rows belong
 62:    to process 2 etc.. where m0,m1,m2... are the input parameter `m`.

 64:    The DIAGONAL portion of the local submatrix of a processor can be defined
 65:    as the submatrix which is obtained by extraction the part corresponding
 66:    to the rows r1-r2 and columns r1-r2 of the global matrix, where r1 is the
 67:    first row that belongs to the processor, and r2 is the last row belonging
 68:    to the this processor. This is a square mxm matrix. The remaining portion
 69:    of the local submatrix (mxN) constitute the OFF-DIAGONAL portion.

 71:    If `o_nnz`, `d_nnz` are specified, then `o_nz`, and `d_nz` are ignored.

 73:    When calling this routine with a single process communicator, a matrix of
 74:    type `MATSEQAIJSELL` is returned.  If a matrix of type `MATMPIAIJSELL` is desired
 75:    for this type of communicator, use the construction mechanism:
 76: .vb
 77:    MatCreate(...,&A);
 78:    MatSetType(A,MPIAIJSELL);
 79:    MatMPIAIJSetPreallocation(A,...);
 80: .ve

 82: .seealso: [](chapter_matrices), `Mat`, [Sparse Matrix Creation](sec_matsparse), `MATSEQAIJSELL`, `MATMPIAIJSELL`, `MATAIJSELL`, `MatCreate()`, `MatCreateSeqAIJSELL()`, `MatSetValues()`
 83: @*/
 84: PetscErrorCode MatCreateMPIAIJSELL(MPI_Comm comm, PetscInt m, PetscInt n, PetscInt M, PetscInt N, PetscInt d_nz, const PetscInt d_nnz[], PetscInt o_nz, const PetscInt o_nnz[], Mat *A)
 85: {
 86:   PetscMPIInt size;

 88:   PetscFunctionBegin;
 89:   PetscCall(MatCreate(comm, A));
 90:   PetscCall(MatSetSizes(*A, m, n, M, N));
 91:   PetscCallMPI(MPI_Comm_size(comm, &size));
 92:   if (size > 1) {
 93:     PetscCall(MatSetType(*A, MATMPIAIJSELL));
 94:     PetscCall(MatMPIAIJSetPreallocation(*A, d_nz, d_nnz, o_nz, o_nnz));
 95:   } else {
 96:     PetscCall(MatSetType(*A, MATSEQAIJSELL));
 97:     PetscCall(MatSeqAIJSetPreallocation(*A, d_nz, d_nnz));
 98:   }
 99:   PetscFunctionReturn(PETSC_SUCCESS);
100: }

102: PETSC_INTERN PetscErrorCode MatConvert_SeqAIJ_SeqAIJSELL(Mat, MatType, MatReuse, Mat *);

104: PetscErrorCode MatMPIAIJSetPreallocation_MPIAIJSELL(Mat B, PetscInt d_nz, const PetscInt d_nnz[], PetscInt o_nz, const PetscInt o_nnz[])
105: {
106:   Mat_MPIAIJ *b = (Mat_MPIAIJ *)B->data;

108:   PetscFunctionBegin;
109:   PetscCall(MatMPIAIJSetPreallocation_MPIAIJ(B, d_nz, d_nnz, o_nz, o_nnz));
110:   PetscCall(MatConvert_SeqAIJ_SeqAIJSELL(b->A, MATSEQAIJSELL, MAT_INPLACE_MATRIX, &b->A));
111:   PetscCall(MatConvert_SeqAIJ_SeqAIJSELL(b->B, MATSEQAIJSELL, MAT_INPLACE_MATRIX, &b->B));
112:   PetscFunctionReturn(PETSC_SUCCESS);
113: }

115: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJSELL(Mat A, MatType type, MatReuse reuse, Mat *newmat)
116: {
117:   Mat B = *newmat;

119:   PetscFunctionBegin;
120:   if (reuse == MAT_INITIAL_MATRIX) PetscCall(MatDuplicate(A, MAT_COPY_VALUES, &B));

122:   PetscCall(PetscObjectChangeTypeName((PetscObject)B, MATMPIAIJSELL));
123:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMPIAIJSetPreallocation_C", MatMPIAIJSetPreallocation_MPIAIJSELL));
124:   *newmat = B;
125:   PetscFunctionReturn(PETSC_SUCCESS);
126: }

128: PETSC_EXTERN PetscErrorCode MatCreate_MPIAIJSELL(Mat A)
129: {
130:   PetscFunctionBegin;
131:   PetscCall(MatSetType(A, MATMPIAIJ));
132:   PetscCall(MatConvert_MPIAIJ_MPIAIJSELL(A, MATMPIAIJSELL, MAT_INPLACE_MATRIX, &A));
133:   PetscFunctionReturn(PETSC_SUCCESS);
134: }

136: /*MC
137:    MATAIJSELL - "AIJSELL" - A matrix type to be used for sparse matrices.

139:    This matrix type is identical to `MATSEQAIJSELL` when constructed with a single process communicator,
140:    and `MATMPIAIJSELL` otherwise.  As a result, for single process communicators,
141:    MatSeqAIJSetPreallocation() is supported, and similarly `MatMPIAIJSetPreallocation()` is supported
142:    for communicators controlling multiple processes.  It is recommended that you call both of
143:    the above preallocation routines for simplicity.

145:    Options Database Key:
146: . -mat_type aijsell - sets the matrix type to `MATAIJSELL`

148:   Level: beginner

150: .seealso: [](chapter_matrices), `Mat`, `MatCreateMPIAIJSELL()`, `MATSEQAIJSELL`, `MATMPIAIJSELL`, `MATSEQAIJ`, `MATMPIAIJ`, `MATSEQAIJPERM`, `MATMPIAIJPERM`, `MATSEQAIJMKL`, `MATMPIAIJMKL`
151: M*/