Actual source code: mpiaijperm.c
2: #include <../src/mat/impls/aij/mpi/mpiaij.h>
3: /*@C
4: MatCreateMPIAIJPERM - Creates a sparse parallel matrix whose local
5: portions are stored as `MATSEQAIJPERM` matrices (a matrix class that inherits
6: from SEQAIJ but includes some optimizations to allow more effective
7: vectorization). The same guidelines that apply to `MATMPIAIJ` matrices for
8: preallocating the matrix storage apply here as well.
10: Collective
12: Input Parameters:
13: + comm - MPI communicator
14: . m - number of local rows (or `PETSC_DECIDE` to have calculated if `M` is given)
15: This value should be the same as the local size used in creating the
16: y vector for the matrix-vector product y = Ax.
17: . n - This value should be the same as the local size used in creating the
18: x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
19: calculated if `N` is given) For square matrices `n` is almost always `m`.
20: . M - number of global rows (or `PETSC_DETERMINE` to have calculated if `m` is given)
21: . N - number of global columns (or `PETSC_DETERMINE` to have calculated if `n` is given)
22: . d_nz - number of nonzeros per row in DIAGONAL portion of local submatrix
23: (same value is used for all local rows)
24: . d_nnz - array containing the number of nonzeros in the various rows of the
25: DIAGONAL portion of the local submatrix (possibly different for each row)
26: or `NULL`, if `d_nz` is used to specify the nonzero structure.
27: The size of this array is equal to the number of local rows, i.e `m`.
28: For matrices you plan to factor you must leave room for the diagonal entry and
29: put in the entry even if it is zero.
30: . o_nz - number of nonzeros per row in the OFF-DIAGONAL portion of local
31: submatrix (same value is used for all local rows).
32: - o_nnz - array containing the number of nonzeros in the various rows of the
33: OFF-DIAGONAL portion of the local submatrix (possibly different for
34: each row) or `NULL`, if `o_nz` is used to specify the nonzero
35: structure. The size of this array is equal to the number
36: of local rows, i.e `m`.
38: Output Parameter:
39: . A - the matrix
41: Options Database Keys:
42: + -mat_no_inode - Do not use inodes
43: - -mat_inode_limit <limit> - Sets inode limit (max limit=5)
45: Level: intermediate
47: Notes:
48: If the *_nnz parameter is given then the *_nz parameter is ignored
50: `m`,`n`,`M`,`N` parameters specify the size of the matrix, and its partitioning across
51: processors, while `d_nz`,`d_nnz`,`o_nz`,`o_nnz` parameters specify the approximate
52: storage requirements for this matrix.
54: If `PETSC_DECIDE` or `PETSC_DETERMINE` is used for a particular argument on one
55: processor than it must be used on all processors that share the object for
56: that argument.
58: The user MUST specify either the local or global matrix dimensions
59: (possibly both).
61: The parallel matrix is partitioned such that the first m0 rows belong to
62: process 0, the next m1 rows belong to process 1, the next m2 rows belong
63: to process 2 etc.. where m0,m1,m2... are the input parameter `m`.
65: The DIAGONAL portion of the local submatrix of a processor can be defined
66: as the submatrix which is obtained by extraction the part corresponding
67: to the rows r1-r2 and columns r1-r2 of the global matrix, where r1 is the
68: first row that belongs to the processor, and r2 is the last row belonging
69: to the this processor. This is a square mxm matrix. The remaining portion
70: of the local submatrix (mxN) constitute the OFF-DIAGONAL portion.
72: If `o_nnz`, `d_nnz` are specified, then `o_nz`, and `d_nz` are ignored.
74: When calling this routine with a single process communicator, a matrix of
75: type `MATSEQAIJPERM` is returned. If a matrix of type `MATMPIAIJPERM` is desired
76: for this type of communicator, use the construction mechanism
77: .vb
78: MatCreate(...,&A);
79: MatSetType(A,MPIAIJ);
80: MatMPIAIJSetPreallocation(A,...);
81: .ve
83: By default, this format uses inodes (identical nodes) when possible.
84: We search for consecutive rows with the same nonzero structure, thereby
85: reusing matrix information to achieve increased efficiency.
87: .seealso: [](chapter_matrices), `Mat`, [Sparse Matrix Creation](sec_matsparse), `MATMPIAIJPERM`, `MatCreate()`, `MatCreateSeqAIJPERM()`, `MatSetValues()`
88: @*/
89: PetscErrorCode MatCreateMPIAIJPERM(MPI_Comm comm, PetscInt m, PetscInt n, PetscInt M, PetscInt N, PetscInt d_nz, const PetscInt d_nnz[], PetscInt o_nz, const PetscInt o_nnz[], Mat *A)
90: {
91: PetscMPIInt size;
93: PetscFunctionBegin;
94: PetscCall(MatCreate(comm, A));
95: PetscCall(MatSetSizes(*A, m, n, M, N));
96: PetscCallMPI(MPI_Comm_size(comm, &size));
97: if (size > 1) {
98: PetscCall(MatSetType(*A, MATMPIAIJPERM));
99: PetscCall(MatMPIAIJSetPreallocation(*A, d_nz, d_nnz, o_nz, o_nnz));
100: } else {
101: PetscCall(MatSetType(*A, MATSEQAIJPERM));
102: PetscCall(MatSeqAIJSetPreallocation(*A, d_nz, d_nnz));
103: }
104: PetscFunctionReturn(PETSC_SUCCESS);
105: }
107: PetscErrorCode MatMPIAIJSetPreallocation_MPIAIJPERM(Mat B, PetscInt d_nz, const PetscInt d_nnz[], PetscInt o_nz, const PetscInt o_nnz[])
108: {
109: Mat_MPIAIJ *b = (Mat_MPIAIJ *)B->data;
111: PetscFunctionBegin;
112: PetscCall(MatMPIAIJSetPreallocation_MPIAIJ(B, d_nz, d_nnz, o_nz, o_nnz));
113: PetscCall(MatConvert_SeqAIJ_SeqAIJPERM(b->A, MATSEQAIJPERM, MAT_INPLACE_MATRIX, &b->A));
114: PetscCall(MatConvert_SeqAIJ_SeqAIJPERM(b->B, MATSEQAIJPERM, MAT_INPLACE_MATRIX, &b->B));
115: PetscFunctionReturn(PETSC_SUCCESS);
116: }
118: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJPERM(Mat A, MatType type, MatReuse reuse, Mat *newmat)
119: {
120: Mat B = *newmat;
122: PetscFunctionBegin;
123: if (reuse == MAT_INITIAL_MATRIX) PetscCall(MatDuplicate(A, MAT_COPY_VALUES, &B));
125: PetscCall(PetscObjectChangeTypeName((PetscObject)B, MATMPIAIJPERM));
126: PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMPIAIJSetPreallocation_C", MatMPIAIJSetPreallocation_MPIAIJPERM));
127: *newmat = B;
128: PetscFunctionReturn(PETSC_SUCCESS);
129: }
131: PETSC_EXTERN PetscErrorCode MatCreate_MPIAIJPERM(Mat A)
132: {
133: PetscFunctionBegin;
134: PetscCall(MatSetType(A, MATMPIAIJ));
135: PetscCall(MatConvert_MPIAIJ_MPIAIJPERM(A, MATMPIAIJPERM, MAT_INPLACE_MATRIX, &A));
136: PetscFunctionReturn(PETSC_SUCCESS);
137: }
139: /*MC
140: MATAIJPERM - "AIJPERM" - A matrix type to be used for sparse matrices.
142: This matrix type is identical to `MATSEQAIJPERM` when constructed with a single process communicator,
143: and `MATMPIAIJPERM` otherwise. As a result, for single process communicators,
144: `MatSeqAIJSetPreallocation()` is supported, and similarly `MatMPIAIJSetPreallocation()` is supported
145: for communicators controlling multiple processes. It is recommended that you call both of
146: the above preallocation routines for simplicity.
148: Options Database Key:
149: . -mat_type aijperm - sets the matrix type to `MATAIJPERM`
151: Level: beginner
153: .seealso: [](chapter_matrices), `Mat`, `MatCreateMPIAIJPERM()`, `MATSEQAIJPERM`, `MATMPIAIJPERM`, `MATSEQAIJ`, `MATMPIAIJ`, `MATSEQAIJMKL`, `MATMPIAIJMKL`, `MATSEQAIJSELL`, `MATMPIAIJSELL`
154: M*/