Actual source code: matrix.c
petsc-dev 2014-02-02
2: /*
3: This is where the abstract matrix operations are defined
4: */
6: #include <petsc-private/matimpl.h> /*I "petscmat.h" I*/
7: #include <petsc-private/vecimpl.h>
9: /* Logging support */
10: PetscClassId MAT_CLASSID;
11: PetscClassId MAT_COLORING_CLASSID;
12: PetscClassId MAT_FDCOLORING_CLASSID;
13: PetscClassId MAT_TRANSPOSECOLORING_CLASSID;
15: PetscLogEvent MAT_Mult, MAT_Mults, MAT_MultConstrained, MAT_MultAdd, MAT_MultTranspose;
16: PetscLogEvent MAT_MultTransposeConstrained, MAT_MultTransposeAdd, MAT_Solve, MAT_Solves, MAT_SolveAdd, MAT_SolveTranspose, MAT_MatSolve;
17: PetscLogEvent MAT_SolveTransposeAdd, MAT_SOR, MAT_ForwardSolve, MAT_BackwardSolve, MAT_LUFactor, MAT_LUFactorSymbolic;
18: PetscLogEvent MAT_LUFactorNumeric, MAT_CholeskyFactor, MAT_CholeskyFactorSymbolic, MAT_CholeskyFactorNumeric, MAT_ILUFactor;
19: PetscLogEvent MAT_ILUFactorSymbolic, MAT_ICCFactorSymbolic, MAT_Copy, MAT_Convert, MAT_Scale, MAT_AssemblyBegin;
20: PetscLogEvent MAT_AssemblyEnd, MAT_SetValues, MAT_GetValues, MAT_GetRow, MAT_GetRowIJ, MAT_GetSubMatrices, MAT_GetOrdering, MAT_GetRedundantMatrix, MAT_GetSeqNonzeroStructure;
21: PetscLogEvent MAT_IncreaseOverlap, MAT_Partitioning, MAT_Coarsen, MAT_ZeroEntries, MAT_Load, MAT_View, MAT_AXPY, MAT_FDColoringCreate;
22: PetscLogEvent MAT_FDColoringSetUp, MAT_FDColoringApply,MAT_Transpose,MAT_FDColoringFunction;
23: PetscLogEvent MAT_TransposeColoringCreate;
24: PetscLogEvent MAT_MatMult, MAT_MatMultSymbolic, MAT_MatMultNumeric;
25: PetscLogEvent MAT_PtAP, MAT_PtAPSymbolic, MAT_PtAPNumeric,MAT_RARt, MAT_RARtSymbolic, MAT_RARtNumeric;
26: PetscLogEvent MAT_MatTransposeMult, MAT_MatTransposeMultSymbolic, MAT_MatTransposeMultNumeric;
27: PetscLogEvent MAT_TransposeMatMult, MAT_TransposeMatMultSymbolic, MAT_TransposeMatMultNumeric;
28: PetscLogEvent MAT_MatMatMult, MAT_MatMatMultSymbolic, MAT_MatMatMultNumeric;
29: PetscLogEvent MAT_MultHermitianTranspose,MAT_MultHermitianTransposeAdd;
30: PetscLogEvent MAT_Getsymtranspose, MAT_Getsymtransreduced, MAT_Transpose_SeqAIJ, MAT_GetBrowsOfAcols;
31: PetscLogEvent MAT_GetBrowsOfAocols, MAT_Getlocalmat, MAT_Getlocalmatcondensed, MAT_Seqstompi, MAT_Seqstompinum, MAT_Seqstompisym;
32: PetscLogEvent MAT_Applypapt, MAT_Applypapt_numeric, MAT_Applypapt_symbolic, MAT_GetSequentialNonzeroStructure;
33: PetscLogEvent MAT_GetMultiProcBlock;
34: PetscLogEvent MAT_CUSPCopyToGPU, MAT_CUSPARSECopyToGPU, MAT_SetValuesBatch, MAT_SetValuesBatchI, MAT_SetValuesBatchII, MAT_SetValuesBatchIII, MAT_SetValuesBatchIV;
35: PetscLogEvent MAT_ViennaCLCopyToGPU;
36: PetscLogEvent MAT_Merge,MAT_Residual;
37: PetscLogEvent Mat_Coloring_Apply,Mat_Coloring_Comm,Mat_Coloring_Local,Mat_Coloring_ISCreate,Mat_Coloring_SetUp;
39: const char *const MatFactorTypes[] = {"NONE","LU","CHOLESKY","ILU","ICC","ILUDT","MatFactorType","MAT_FACTOR_",0};
43: /*@
44: MatSetRandom - Sets all components of a matrix to random numbers. For sparse matrices that have been preallocated it randomly selects appropriate locations
46: Logically Collective on Vec
48: Input Parameters:
49: + x - the vector
50: - rctx - the random number context, formed by PetscRandomCreate(), or NULL and
51: it will create one internally.
53: Output Parameter:
54: . x - the vector
56: Example of Usage:
57: .vb
58: PetscRandomCreate(PETSC_COMM_WORLD,&rctx);
59: VecSetRandom(x,rctx);
60: PetscRandomDestroy(rctx);
61: .ve
63: Level: intermediate
65: Concepts: vector^setting to random
66: Concepts: random^vector
68: .seealso: MatZeroEntries(), MatSetValues(), PetscRandomCreate(), PetscRandomDestroy()
69: @*/
70: PetscErrorCode MatSetRandom(Mat x,PetscRandom rctx)
71: {
73: PetscRandom randObj = NULL;
80: if (!rctx) {
81: MPI_Comm comm;
82: PetscObjectGetComm((PetscObject)x,&comm);
83: PetscRandomCreate(comm,&randObj);
84: PetscRandomSetFromOptions(randObj);
85: rctx = randObj;
86: }
88: PetscLogEventBegin(VEC_SetRandom,x,rctx,0,0);
89: (*x->ops->setrandom)(x,rctx);
90: PetscLogEventEnd(VEC_SetRandom,x,rctx,0,0);
92: x->assembled = PETSC_TRUE;
93: PetscRandomDestroy(&randObj);
94: return(0);
95: }
100: /*@
101: MatFindNonzeroRows - Locate all rows that are not completely zero in the matrix
103: Input Parameter:
104: . A - the matrix
106: Output Parameter:
107: . keptrows - the rows that are not completely zero
109: Level: intermediate
111: @*/
112: PetscErrorCode MatFindNonzeroRows(Mat mat,IS *keptrows)
113: {
118: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
119: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
120: if (!mat->ops->findnonzerorows) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Not coded for this matrix type");
121: (*mat->ops->findnonzerorows)(mat,keptrows);
122: return(0);
123: }
127: /*@
128: MatGetDiagonalBlock - Returns the part of the matrix associated with the on-process coupling
130: Not Collective
132: Input Parameters:
133: . A - the matrix
135: Output Parameters:
136: . a - the diagonal part (which is a SEQUENTIAL matrix)
138: Notes: see the manual page for MatCreateAIJ() for more information on the "diagonal part" of the matrix.
140: Level: advanced
142: @*/
143: PetscErrorCode MatGetDiagonalBlock(Mat A,Mat *a)
144: {
145: PetscErrorCode ierr,(*f)(Mat,Mat*);
146: PetscMPIInt size;
152: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
153: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
154: MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);
155: PetscObjectQueryFunction((PetscObject)A,"MatGetDiagonalBlock_C",&f);
156: if (f) {
157: (*f)(A,a);
158: return(0);
159: } else if (size == 1) {
160: *a = A;
161: } else {
162: MatType mattype;
163: MatGetType(A,&mattype);
164: SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Matrix type %s does not support getting diagonal block",mattype);
165: }
166: return(0);
167: }
171: /*@
172: MatGetTrace - Gets the trace of a matrix. The sum of the diagonal entries.
174: Collective on Mat
176: Input Parameters:
177: . mat - the matrix
179: Output Parameter:
180: . trace - the sum of the diagonal entries
182: Level: advanced
184: @*/
185: PetscErrorCode MatGetTrace(Mat mat,PetscScalar *trace)
186: {
188: Vec diag;
191: MatGetVecs(mat,&diag,NULL);
192: MatGetDiagonal(mat,diag);
193: VecSum(diag,trace);
194: VecDestroy(&diag);
195: return(0);
196: }
200: /*@
201: MatRealPart - Zeros out the imaginary part of the matrix
203: Logically Collective on Mat
205: Input Parameters:
206: . mat - the matrix
208: Level: advanced
211: .seealso: MatImaginaryPart()
212: @*/
213: PetscErrorCode MatRealPart(Mat mat)
214: {
220: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
221: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
222: if (!mat->ops->realpart) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
223: MatCheckPreallocated(mat,1);
224: (*mat->ops->realpart)(mat);
225: #if defined(PETSC_HAVE_CUSP)
226: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
227: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
228: }
229: #endif
230: #if defined(PETSC_HAVE_VIENNACL)
231: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
232: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
233: }
234: #endif
235: return(0);
236: }
240: /*@C
241: MatGetGhosts - Get the global index of all ghost nodes defined by the sparse matrix
243: Collective on Mat
245: Input Parameter:
246: . mat - the matrix
248: Output Parameters:
249: + nghosts - number of ghosts (note for BAIJ matrices there is one ghost for each block)
250: - ghosts - the global indices of the ghost points
252: Notes: the nghosts and ghosts are suitable to pass into VecCreateGhost()
254: Level: advanced
256: @*/
257: PetscErrorCode MatGetGhosts(Mat mat,PetscInt *nghosts,const PetscInt *ghosts[])
258: {
264: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
265: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
266: if (!mat->ops->getghosts) {
267: if (nghosts) *nghosts = 0;
268: if (ghosts) *ghosts = 0;
269: } else {
270: (*mat->ops->getghosts)(mat,nghosts,ghosts);
271: }
272: return(0);
273: }
278: /*@
279: MatImaginaryPart - Moves the imaginary part of the matrix to the real part and zeros the imaginary part
281: Logically Collective on Mat
283: Input Parameters:
284: . mat - the matrix
286: Level: advanced
289: .seealso: MatRealPart()
290: @*/
291: PetscErrorCode MatImaginaryPart(Mat mat)
292: {
298: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
299: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
300: if (!mat->ops->imaginarypart) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
301: MatCheckPreallocated(mat,1);
302: (*mat->ops->imaginarypart)(mat);
303: #if defined(PETSC_HAVE_CUSP)
304: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
305: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
306: }
307: #endif
308: #if defined(PETSC_HAVE_VIENNACL)
309: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
310: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
311: }
312: #endif
313: return(0);
314: }
318: /*@
319: MatMissingDiagonal - Determine if sparse matrix is missing a diagonal entry (or block entry for BAIJ matrices)
321: Collective on Mat
323: Input Parameter:
324: . mat - the matrix
326: Output Parameters:
327: + missing - is any diagonal missing
328: - dd - first diagonal entry that is missing (optional)
330: Level: advanced
333: .seealso: MatRealPart()
334: @*/
335: PetscErrorCode MatMissingDiagonal(Mat mat,PetscBool *missing,PetscInt *dd)
336: {
342: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
343: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
344: if (!mat->ops->missingdiagonal) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
345: (*mat->ops->missingdiagonal)(mat,missing,dd);
346: return(0);
347: }
351: /*@C
352: MatGetRow - Gets a row of a matrix. You MUST call MatRestoreRow()
353: for each row that you get to ensure that your application does
354: not bleed memory.
356: Not Collective
358: Input Parameters:
359: + mat - the matrix
360: - row - the row to get
362: Output Parameters:
363: + ncols - if not NULL, the number of nonzeros in the row
364: . cols - if not NULL, the column numbers
365: - vals - if not NULL, the values
367: Notes:
368: This routine is provided for people who need to have direct access
369: to the structure of a matrix. We hope that we provide enough
370: high-level matrix routines that few users will need it.
372: MatGetRow() always returns 0-based column indices, regardless of
373: whether the internal representation is 0-based (default) or 1-based.
375: For better efficiency, set cols and/or vals to NULL if you do
376: not wish to extract these quantities.
378: The user can only examine the values extracted with MatGetRow();
379: the values cannot be altered. To change the matrix entries, one
380: must use MatSetValues().
382: You can only have one call to MatGetRow() outstanding for a particular
383: matrix at a time, per processor. MatGetRow() can only obtain rows
384: associated with the given processor, it cannot get rows from the
385: other processors; for that we suggest using MatGetSubMatrices(), then
386: MatGetRow() on the submatrix. The row indix passed to MatGetRows()
387: is in the global number of rows.
389: Fortran Notes:
390: The calling sequence from Fortran is
391: .vb
392: MatGetRow(matrix,row,ncols,cols,values,ierr)
393: Mat matrix (input)
394: integer row (input)
395: integer ncols (output)
396: integer cols(maxcols) (output)
397: double precision (or double complex) values(maxcols) output
398: .ve
399: where maxcols >= maximum nonzeros in any row of the matrix.
402: Caution:
403: Do not try to change the contents of the output arrays (cols and vals).
404: In some cases, this may corrupt the matrix.
406: Level: advanced
408: Concepts: matrices^row access
410: .seealso: MatRestoreRow(), MatSetValues(), MatGetValues(), MatGetSubMatrices(), MatGetDiagonal()
411: @*/
412: PetscErrorCode MatGetRow(Mat mat,PetscInt row,PetscInt *ncols,const PetscInt *cols[],const PetscScalar *vals[])
413: {
415: PetscInt incols;
420: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
421: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
422: if (!mat->ops->getrow) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
423: MatCheckPreallocated(mat,1);
424: PetscLogEventBegin(MAT_GetRow,mat,0,0,0);
425: (*mat->ops->getrow)(mat,row,&incols,(PetscInt**)cols,(PetscScalar**)vals);
426: if (ncols) *ncols = incols;
427: PetscLogEventEnd(MAT_GetRow,mat,0,0,0);
428: return(0);
429: }
433: /*@
434: MatConjugate - replaces the matrix values with their complex conjugates
436: Logically Collective on Mat
438: Input Parameters:
439: . mat - the matrix
441: Level: advanced
443: .seealso: VecConjugate()
444: @*/
445: PetscErrorCode MatConjugate(Mat mat)
446: {
447: #if defined(PETSC_USE_COMPLEX)
452: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
453: if (!mat->ops->conjugate) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Not provided for this matrix format, send email to petsc-maint@mcs.anl.gov");
454: (*mat->ops->conjugate)(mat);
455: #if defined(PETSC_HAVE_CUSP)
456: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
457: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
458: }
459: #endif
460: #if defined(PETSC_HAVE_VIENNACL)
461: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
462: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
463: }
464: #endif
465: return(0);
466: #else
467: return 0;
468: #endif
469: }
473: /*@C
474: MatRestoreRow - Frees any temporary space allocated by MatGetRow().
476: Not Collective
478: Input Parameters:
479: + mat - the matrix
480: . row - the row to get
481: . ncols, cols - the number of nonzeros and their columns
482: - vals - if nonzero the column values
484: Notes:
485: This routine should be called after you have finished examining the entries.
487: This routine zeros out ncols, cols, and vals. This is to prevent accidental
488: us of the array after it has been restored. If you pass NULL, it will
489: not zero the pointers. Use of cols or vals after MatRestoreRow is invalid.
491: Fortran Notes:
492: The calling sequence from Fortran is
493: .vb
494: MatRestoreRow(matrix,row,ncols,cols,values,ierr)
495: Mat matrix (input)
496: integer row (input)
497: integer ncols (output)
498: integer cols(maxcols) (output)
499: double precision (or double complex) values(maxcols) output
500: .ve
501: Where maxcols >= maximum nonzeros in any row of the matrix.
503: In Fortran MatRestoreRow() MUST be called after MatGetRow()
504: before another call to MatGetRow() can be made.
506: Level: advanced
508: .seealso: MatGetRow()
509: @*/
510: PetscErrorCode MatRestoreRow(Mat mat,PetscInt row,PetscInt *ncols,const PetscInt *cols[],const PetscScalar *vals[])
511: {
517: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
518: if (!mat->ops->restorerow) return(0);
519: (*mat->ops->restorerow)(mat,row,ncols,(PetscInt **)cols,(PetscScalar **)vals);
520: if (ncols) *ncols = 0;
521: if (cols) *cols = NULL;
522: if (vals) *vals = NULL;
523: return(0);
524: }
528: /*@
529: MatGetRowUpperTriangular - Sets a flag to enable calls to MatGetRow() for matrix in MATSBAIJ format.
530: You should call MatRestoreRowUpperTriangular() after calling MatGetRow/MatRestoreRow() to disable the flag.
532: Not Collective
534: Input Parameters:
535: + mat - the matrix
537: Notes:
538: The flag is to ensure that users are aware of MatGetRow() only provides the upper trianglular part of the row for the matrices in MATSBAIJ format.
540: Level: advanced
542: Concepts: matrices^row access
544: .seealso: MatRestoreRowRowUpperTriangular()
545: @*/
546: PetscErrorCode MatGetRowUpperTriangular(Mat mat)
547: {
553: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
554: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
555: if (!mat->ops->getrowuppertriangular) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
556: MatCheckPreallocated(mat,1);
557: (*mat->ops->getrowuppertriangular)(mat);
558: return(0);
559: }
563: /*@
564: MatRestoreRowUpperTriangular - Disable calls to MatGetRow() for matrix in MATSBAIJ format.
566: Not Collective
568: Input Parameters:
569: + mat - the matrix
571: Notes:
572: This routine should be called after you have finished MatGetRow/MatRestoreRow().
575: Level: advanced
577: .seealso: MatGetRowUpperTriangular()
578: @*/
579: PetscErrorCode MatRestoreRowUpperTriangular(Mat mat)
580: {
585: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
586: if (!mat->ops->restorerowuppertriangular) return(0);
587: (*mat->ops->restorerowuppertriangular)(mat);
588: return(0);
589: }
593: /*@C
594: MatSetOptionsPrefix - Sets the prefix used for searching for all
595: Mat options in the database.
597: Logically Collective on Mat
599: Input Parameter:
600: + A - the Mat context
601: - prefix - the prefix to prepend to all option names
603: Notes:
604: A hyphen (-) must NOT be given at the beginning of the prefix name.
605: The first character of all runtime options is AUTOMATICALLY the hyphen.
607: Level: advanced
609: .keywords: Mat, set, options, prefix, database
611: .seealso: MatSetFromOptions()
612: @*/
613: PetscErrorCode MatSetOptionsPrefix(Mat A,const char prefix[])
614: {
619: PetscObjectSetOptionsPrefix((PetscObject)A,prefix);
620: return(0);
621: }
625: /*@C
626: MatAppendOptionsPrefix - Appends to the prefix used for searching for all
627: Mat options in the database.
629: Logically Collective on Mat
631: Input Parameters:
632: + A - the Mat context
633: - prefix - the prefix to prepend to all option names
635: Notes:
636: A hyphen (-) must NOT be given at the beginning of the prefix name.
637: The first character of all runtime options is AUTOMATICALLY the hyphen.
639: Level: advanced
641: .keywords: Mat, append, options, prefix, database
643: .seealso: MatGetOptionsPrefix()
644: @*/
645: PetscErrorCode MatAppendOptionsPrefix(Mat A,const char prefix[])
646: {
651: PetscObjectAppendOptionsPrefix((PetscObject)A,prefix);
652: return(0);
653: }
657: /*@C
658: MatGetOptionsPrefix - Sets the prefix used for searching for all
659: Mat options in the database.
661: Not Collective
663: Input Parameter:
664: . A - the Mat context
666: Output Parameter:
667: . prefix - pointer to the prefix string used
669: Notes: On the fortran side, the user should pass in a string 'prefix' of
670: sufficient length to hold the prefix.
672: Level: advanced
674: .keywords: Mat, get, options, prefix, database
676: .seealso: MatAppendOptionsPrefix()
677: @*/
678: PetscErrorCode MatGetOptionsPrefix(Mat A,const char *prefix[])
679: {
684: PetscObjectGetOptionsPrefix((PetscObject)A,prefix);
685: return(0);
686: }
690: /*@
691: MatSetUp - Sets up the internal matrix data structures for the later use.
693: Collective on Mat
695: Input Parameters:
696: . A - the Mat context
698: Notes:
699: If the user has not set preallocation for this matrix then a default preallocation that is likely to be inefficient is used.
701: If a suitable preallocation routine is used, this function does not need to be called.
703: See the Performance chapter of the PETSc users manual for how to preallocate matrices
705: Level: beginner
707: .keywords: Mat, setup
709: .seealso: MatCreate(), MatDestroy()
710: @*/
711: PetscErrorCode MatSetUp(Mat A)
712: {
713: PetscMPIInt size;
718: if (!((PetscObject)A)->type_name) {
719: MPI_Comm_size(PetscObjectComm((PetscObject)A), &size);
720: if (size == 1) {
721: MatSetType(A, MATSEQAIJ);
722: } else {
723: MatSetType(A, MATMPIAIJ);
724: }
725: }
726: if (!A->preallocated && A->ops->setup) {
727: PetscInfo(A,"Warning not preallocating matrix storage\n");
728: (*A->ops->setup)(A);
729: }
730: A->preallocated = PETSC_TRUE;
731: return(0);
732: }
734: #if defined(PETSC_HAVE_SAWS)
735: #include <petscviewersaws.h>
736: #endif
739: /*@C
740: MatView - Visualizes a matrix object.
742: Collective on Mat
744: Input Parameters:
745: + mat - the matrix
746: - viewer - visualization context
748: Notes:
749: The available visualization contexts include
750: + PETSC_VIEWER_STDOUT_SELF - standard output (default)
751: . PETSC_VIEWER_STDOUT_WORLD - synchronized standard
752: output where only the first processor opens
753: the file. All other processors send their
754: data to the first processor to print.
755: - PETSC_VIEWER_DRAW_WORLD - graphical display of nonzero structure
757: The user can open alternative visualization contexts with
758: + PetscViewerASCIIOpen() - Outputs matrix to a specified file
759: . PetscViewerBinaryOpen() - Outputs matrix in binary to a
760: specified file; corresponding input uses MatLoad()
761: . PetscViewerDrawOpen() - Outputs nonzero matrix structure to
762: an X window display
763: - PetscViewerSocketOpen() - Outputs matrix to Socket viewer.
764: Currently only the sequential dense and AIJ
765: matrix types support the Socket viewer.
767: The user can call PetscViewerSetFormat() to specify the output
768: format of ASCII printed objects (when using PETSC_VIEWER_STDOUT_SELF,
769: PETSC_VIEWER_STDOUT_WORLD and PetscViewerASCIIOpen). Available formats include
770: + PETSC_VIEWER_DEFAULT - default, prints matrix contents
771: . PETSC_VIEWER_ASCII_MATLAB - prints matrix contents in Matlab format
772: . PETSC_VIEWER_ASCII_DENSE - prints entire matrix including zeros
773: . PETSC_VIEWER_ASCII_COMMON - prints matrix contents, using a sparse
774: format common among all matrix types
775: . PETSC_VIEWER_ASCII_IMPL - prints matrix contents, using an implementation-specific
776: format (which is in many cases the same as the default)
777: . PETSC_VIEWER_ASCII_INFO - prints basic information about the matrix
778: size and structure (not the matrix entries)
779: . PETSC_VIEWER_ASCII_INFO_DETAIL - prints more detailed information about
780: the matrix structure
782: Options Database Keys:
783: + -mat_view ::ascii_info - Prints info on matrix at conclusion of MatEndAssembly()
784: . -mat_view ::ascii_info_detail - Prints more detailed info
785: . -mat_view - Prints matrix in ASCII format
786: . -mat_view ::ascii_matlab - Prints matrix in Matlab format
787: . -mat_view draw - PetscDraws nonzero structure of matrix, using MatView() and PetscDrawOpenX().
788: . -display <name> - Sets display name (default is host)
789: . -draw_pause <sec> - Sets number of seconds to pause after display
790: . -mat_view socket - Sends matrix to socket, can be accessed from Matlab (see the <a href="../../docs/manual.pdf">users manual</a> for details).
791: . -viewer_socket_machine <machine>
792: . -viewer_socket_port <port>
793: . -mat_view binary - save matrix to file in binary format
794: - -viewer_binary_filename <name>
795: Level: beginner
797: Notes: see the manual page for MatLoad() for the exact format of the binary file when the binary
798: viewer is used.
800: See bin/matlab/PetscBinaryRead.m for a Matlab code that can read in the binary file when the binary
801: viewer is used.
803: One can use '-mat_view draw -draw_pause -1' to pause the graphical display of matrix nonzero structure.
804: And then use the following mouse functions:
805: left mouse: zoom in
806: middle mouse: zoom out
807: right mouse: continue with the simulation
809: Concepts: matrices^viewing
810: Concepts: matrices^plotting
811: Concepts: matrices^printing
813: .seealso: PetscViewerSetFormat(), PetscViewerASCIIOpen(), PetscViewerDrawOpen(),
814: PetscViewerSocketOpen(), PetscViewerBinaryOpen(), MatLoad()
815: @*/
816: PetscErrorCode MatView(Mat mat,PetscViewer viewer)
817: {
818: PetscErrorCode ierr;
819: PetscInt rows,cols,bs;
820: PetscBool iascii;
821: PetscViewerFormat format;
822: #if defined(PETSC_HAVE_SAWS)
823: PetscBool isams;
824: #endif
829: if (!viewer) {
830: PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)mat),&viewer);
831: }
834: MatCheckPreallocated(mat,1);
836: PetscLogEventBegin(MAT_View,mat,viewer,0,0);
837: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
838: #if defined(PETSC_HAVE_SAWS)
839: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERSAWS,&isams);
840: #endif
841: if (iascii) {
842: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ORDER,"Must call MatAssemblyBegin/End() before viewing matrix");
843: PetscViewerGetFormat(viewer,&format);
844: if (format == PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
845: PetscObjectPrintClassNamePrefixType((PetscObject)mat,viewer);
846: PetscViewerASCIIPushTab(viewer);
847: MatGetSize(mat,&rows,&cols);
848: MatGetBlockSize(mat,&bs);
849: if (bs != 1) {
850: PetscViewerASCIIPrintf(viewer,"rows=%D, cols=%D, bs=%D\n",rows,cols,bs);
851: } else {
852: PetscViewerASCIIPrintf(viewer,"rows=%D, cols=%D\n",rows,cols);
853: }
854: if (mat->factortype) {
855: const MatSolverPackage solver;
856: MatFactorGetSolverPackage(mat,&solver);
857: PetscViewerASCIIPrintf(viewer,"package used to perform factorization: %s\n",solver);
858: }
859: if (mat->ops->getinfo) {
860: MatInfo info;
861: MatGetInfo(mat,MAT_GLOBAL_SUM,&info);
862: PetscViewerASCIIPrintf(viewer,"total: nonzeros=%lld, allocated nonzeros=%lld\n",(Petsc64bitInt)info.nz_used,(Petsc64bitInt)info.nz_allocated);
863: PetscViewerASCIIPrintf(viewer,"total number of mallocs used during MatSetValues calls =%D\n",(PetscInt)info.mallocs);
864: }
865: if (mat->nullsp) {PetscViewerASCIIPrintf(viewer," has attached null space\n");}
866: if (mat->nearnullsp) {PetscViewerASCIIPrintf(viewer," has attached near null space\n");}
867: }
868: #if defined(PETSC_HAVE_SAWS)
869: } else if (isams) {
870: PetscMPIInt rank;
872: PetscObjectName((PetscObject)mat);
873: MPI_Comm_rank(PETSC_COMM_WORLD,&rank);
874: if (!((PetscObject)mat)->amsmem && !rank) {
875: PetscObjectViewSAWs((PetscObject)mat,viewer);
876: }
877: #endif
878: }
879: if (mat->ops->view) {
880: PetscViewerASCIIPushTab(viewer);
881: (*mat->ops->view)(mat,viewer);
882: PetscViewerASCIIPopTab(viewer);
883: } else if (!iascii) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Viewer type %s not supported",((PetscObject)viewer)->type_name);
884: if (iascii) {
885: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ORDER,"Must call MatAssemblyBegin/End() before viewing matrix");
886: PetscViewerGetFormat(viewer,&format);
887: if (format == PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
888: PetscViewerASCIIPopTab(viewer);
889: }
890: }
891: PetscLogEventEnd(MAT_View,mat,viewer,0,0);
892: return(0);
893: }
895: #if defined(PETSC_USE_DEBUG)
896: #include <../src/sys/totalview/tv_data_display.h>
897: PETSC_UNUSED static int TV_display_type(const struct _p_Mat *mat)
898: {
899: TV_add_row("Local rows", "int", &mat->rmap->n);
900: TV_add_row("Local columns", "int", &mat->cmap->n);
901: TV_add_row("Global rows", "int", &mat->rmap->N);
902: TV_add_row("Global columns", "int", &mat->cmap->N);
903: TV_add_row("Typename", TV_ascii_string_type, ((PetscObject)mat)->type_name);
904: return TV_format_OK;
905: }
906: #endif
910: /*@C
911: MatLoad - Loads a matrix that has been stored in binary format
912: with MatView(). The matrix format is determined from the options database.
913: Generates a parallel MPI matrix if the communicator has more than one
914: processor. The default matrix type is AIJ.
916: Collective on PetscViewer
918: Input Parameters:
919: + newmat - the newly loaded matrix, this needs to have been created with MatCreate()
920: or some related function before a call to MatLoad()
921: - viewer - binary file viewer, created with PetscViewerBinaryOpen()
923: Options Database Keys:
924: Used with block matrix formats (MATSEQBAIJ, ...) to specify
925: block size
926: . -matload_block_size <bs>
928: Level: beginner
930: Notes:
931: If the Mat type has not yet been given then MATAIJ is used, call MatSetFromOptions() on the
932: Mat before calling this routine if you wish to set it from the options database.
934: MatLoad() automatically loads into the options database any options
935: given in the file filename.info where filename is the name of the file
936: that was passed to the PetscViewerBinaryOpen(). The options in the info
937: file will be ignored if you use the -viewer_binary_skip_info option.
939: If the type or size of newmat is not set before a call to MatLoad, PETSc
940: sets the default matrix type AIJ and sets the local and global sizes.
941: If type and/or size is already set, then the same are used.
943: In parallel, each processor can load a subset of rows (or the
944: entire matrix). This routine is especially useful when a large
945: matrix is stored on disk and only part of it is desired on each
946: processor. For example, a parallel solver may access only some of
947: the rows from each processor. The algorithm used here reads
948: relatively small blocks of data rather than reading the entire
949: matrix and then subsetting it.
951: Notes for advanced users:
952: Most users should not need to know the details of the binary storage
953: format, since MatLoad() and MatView() completely hide these details.
954: But for anyone who's interested, the standard binary matrix storage
955: format is
957: $ int MAT_FILE_CLASSID
958: $ int number of rows
959: $ int number of columns
960: $ int total number of nonzeros
961: $ int *number nonzeros in each row
962: $ int *column indices of all nonzeros (starting index is zero)
963: $ PetscScalar *values of all nonzeros
965: PETSc automatically does the byte swapping for
966: machines that store the bytes reversed, e.g. DEC alpha, freebsd,
967: linux, Windows and the paragon; thus if you write your own binary
968: read/write routines you have to swap the bytes; see PetscBinaryRead()
969: and PetscBinaryWrite() to see how this may be done.
971: .keywords: matrix, load, binary, input
973: .seealso: PetscViewerBinaryOpen(), MatView(), VecLoad()
975: @*/
976: PetscErrorCode MatLoad(Mat newmat,PetscViewer viewer)
977: {
979: PetscBool isbinary,flg;
984: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
985: if (!isbinary) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Invalid viewer; open viewer with PetscViewerBinaryOpen()");
987: if (!((PetscObject)newmat)->type_name) {
988: MatSetType(newmat,MATAIJ);
989: }
991: if (!newmat->ops->load) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MatLoad is not supported for type");
992: PetscLogEventBegin(MAT_Load,viewer,0,0,0);
993: (*newmat->ops->load)(newmat,viewer);
994: PetscLogEventEnd(MAT_Load,viewer,0,0,0);
996: flg = PETSC_FALSE;
997: PetscOptionsGetBool(((PetscObject)newmat)->prefix,"-matload_symmetric",&flg,NULL);
998: if (flg) {
999: MatSetOption(newmat,MAT_SYMMETRIC,PETSC_TRUE);
1000: MatSetOption(newmat,MAT_SYMMETRY_ETERNAL,PETSC_TRUE);
1001: }
1002: flg = PETSC_FALSE;
1003: PetscOptionsGetBool(((PetscObject)newmat)->prefix,"-matload_spd",&flg,NULL);
1004: if (flg) {
1005: MatSetOption(newmat,MAT_SPD,PETSC_TRUE);
1006: }
1007: return(0);
1008: }
1012: /*@
1013: MatDestroy - Frees space taken by a matrix.
1015: Collective on Mat
1017: Input Parameter:
1018: . A - the matrix
1020: Level: beginner
1022: @*/
1023: PetscErrorCode MatDestroy(Mat *A)
1024: {
1028: if (!*A) return(0);
1030: if (--((PetscObject)(*A))->refct > 0) {*A = NULL; return(0);}
1032: /* if memory was published with SAWs then destroy it */
1033: PetscObjectSAWsViewOff((PetscObject)*A);
1034: if ((*A)->ops->destroy) {
1035: (*(*A)->ops->destroy)(*A);
1036: }
1037: MatNullSpaceDestroy(&(*A)->nullsp);
1038: MatNullSpaceDestroy(&(*A)->nearnullsp);
1039: PetscLayoutDestroy(&(*A)->rmap);
1040: PetscLayoutDestroy(&(*A)->cmap);
1041: PetscHeaderDestroy(A);
1042: return(0);
1043: }
1047: /*@
1048: MatSetValues - Inserts or adds a block of values into a matrix.
1049: These values may be cached, so MatAssemblyBegin() and MatAssemblyEnd()
1050: MUST be called after all calls to MatSetValues() have been completed.
1052: Not Collective
1054: Input Parameters:
1055: + mat - the matrix
1056: . v - a logically two-dimensional array of values
1057: . m, idxm - the number of rows and their global indices
1058: . n, idxn - the number of columns and their global indices
1059: - addv - either ADD_VALUES or INSERT_VALUES, where
1060: ADD_VALUES adds values to any existing entries, and
1061: INSERT_VALUES replaces existing entries with new values
1063: Notes:
1064: If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatXXXXSetPreallocation() or
1065: MatSetUp() before using this routine
1067: By default the values, v, are row-oriented. See MatSetOption() for other options.
1069: Calls to MatSetValues() with the INSERT_VALUES and ADD_VALUES
1070: options cannot be mixed without intervening calls to the assembly
1071: routines.
1073: MatSetValues() uses 0-based row and column numbers in Fortran
1074: as well as in C.
1076: Negative indices may be passed in idxm and idxn, these rows and columns are
1077: simply ignored. This allows easily inserting element stiffness matrices
1078: with homogeneous Dirchlet boundary conditions that you don't want represented
1079: in the matrix.
1081: Efficiency Alert:
1082: The routine MatSetValuesBlocked() may offer much better efficiency
1083: for users of block sparse formats (MATSEQBAIJ and MATMPIBAIJ).
1085: Level: beginner
1087: Concepts: matrices^putting entries in
1089: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
1090: InsertMode, INSERT_VALUES, ADD_VALUES
1091: @*/
1092: PetscErrorCode MatSetValues(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],const PetscScalar v[],InsertMode addv)
1093: {
1095: #if defined(PETSC_USE_DEBUG)
1096: PetscInt i,j;
1097: #endif
1102: if (!m || !n) return(0); /* no values to insert */
1106: MatCheckPreallocated(mat,1);
1107: if (mat->insertmode == NOT_SET_VALUES) {
1108: mat->insertmode = addv;
1109: }
1110: #if defined(PETSC_USE_DEBUG)
1111: else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
1112: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1113: if (!mat->ops->setvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
1115: if (v) {
1116: for (i=0; i<m; i++) {
1117: for (j=0; j<n; j++) {
1118: if (PetscIsInfOrNanScalar(v[i*n+j]))
1119: #if defined(PETSC_USE_COMPLEX)
1120: SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_FP,"Inserting %g+ig at matrix entry (%D,%D)",(double)PetscRealPart(v[i*n+j]),(double)PetscImaginaryPart(v[i*n+j]),idxm[i],idxn[j]);
1121: #else
1122: SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_FP,"Inserting %g at matrix entry (%D,%D)",(double)v[i*n+j],idxm[i],idxn[j]);
1123: #endif
1124: }
1125: }
1126: }
1127: #endif
1129: if (mat->assembled) {
1130: mat->was_assembled = PETSC_TRUE;
1131: mat->assembled = PETSC_FALSE;
1132: }
1133: PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
1134: (*mat->ops->setvalues)(mat,m,idxm,n,idxn,v,addv);
1135: PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
1136: #if defined(PETSC_HAVE_CUSP)
1137: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
1138: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
1139: }
1140: #endif
1141: #if defined(PETSC_HAVE_VIENNACL)
1142: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
1143: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
1144: }
1145: #endif
1146: return(0);
1147: }
1152: /*@
1153: MatSetValuesRowLocal - Inserts a row (block row for BAIJ matrices) of nonzero
1154: values into a matrix
1156: Not Collective
1158: Input Parameters:
1159: + mat - the matrix
1160: . row - the (block) row to set
1161: - v - a logically two-dimensional array of values
1163: Notes:
1164: By the values, v, are column-oriented (for the block version) and sorted
1166: All the nonzeros in the row must be provided
1168: The matrix must have previously had its column indices set
1170: The row must belong to this process
1172: Level: intermediate
1174: Concepts: matrices^putting entries in
1176: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
1177: InsertMode, INSERT_VALUES, ADD_VALUES, MatSetValues(), MatSetValuesRow(), MatSetLocalToGlobalMapping()
1178: @*/
1179: PetscErrorCode MatSetValuesRowLocal(Mat mat,PetscInt row,const PetscScalar v[])
1180: {
1182: PetscInt globalrow;
1188: ISLocalToGlobalMappingApply(mat->rmap->mapping,1,&row,&globalrow);
1189: MatSetValuesRow(mat,globalrow,v);
1190: #if defined(PETSC_HAVE_CUSP)
1191: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
1192: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
1193: }
1194: #endif
1195: #if defined(PETSC_HAVE_VIENNACL)
1196: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
1197: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
1198: }
1199: #endif
1200: return(0);
1201: }
1205: /*@
1206: MatSetValuesRow - Inserts a row (block row for BAIJ matrices) of nonzero
1207: values into a matrix
1209: Not Collective
1211: Input Parameters:
1212: + mat - the matrix
1213: . row - the (block) row to set
1214: - v - a logically two-dimensional array of values
1216: Notes:
1217: The values, v, are column-oriented for the block version.
1219: All the nonzeros in the row must be provided
1221: THE MATRIX MUSAT HAVE PREVIOUSLY HAD ITS COLUMN INDICES SET. IT IS RARE THAT THIS ROUTINE IS USED, usually MatSetValues() is used.
1223: The row must belong to this process
1225: Level: advanced
1227: Concepts: matrices^putting entries in
1229: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
1230: InsertMode, INSERT_VALUES, ADD_VALUES, MatSetValues()
1231: @*/
1232: PetscErrorCode MatSetValuesRow(Mat mat,PetscInt row,const PetscScalar v[])
1233: {
1239: MatCheckPreallocated(mat,1);
1241: #if defined(PETSC_USE_DEBUG)
1242: if (mat->insertmode == ADD_VALUES) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add and insert values");
1243: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1244: #endif
1245: mat->insertmode = INSERT_VALUES;
1247: if (mat->assembled) {
1248: mat->was_assembled = PETSC_TRUE;
1249: mat->assembled = PETSC_FALSE;
1250: }
1251: PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
1252: if (!mat->ops->setvaluesrow) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
1253: (*mat->ops->setvaluesrow)(mat,row,v);
1254: PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
1255: #if defined(PETSC_HAVE_CUSP)
1256: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
1257: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
1258: }
1259: #endif
1260: #if defined(PETSC_HAVE_VIENNACL)
1261: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
1262: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
1263: }
1264: #endif
1265: return(0);
1266: }
1270: /*@
1271: MatSetValuesStencil - Inserts or adds a block of values into a matrix.
1272: Using structured grid indexing
1274: Not Collective
1276: Input Parameters:
1277: + mat - the matrix
1278: . m - number of rows being entered
1279: . idxm - grid coordinates (and component number when dof > 1) for matrix rows being entered
1280: . n - number of columns being entered
1281: . idxn - grid coordinates (and component number when dof > 1) for matrix columns being entered
1282: . v - a logically two-dimensional array of values
1283: - addv - either ADD_VALUES or INSERT_VALUES, where
1284: ADD_VALUES adds values to any existing entries, and
1285: INSERT_VALUES replaces existing entries with new values
1287: Notes:
1288: By default the values, v, are row-oriented. See MatSetOption() for other options.
1290: Calls to MatSetValuesStencil() with the INSERT_VALUES and ADD_VALUES
1291: options cannot be mixed without intervening calls to the assembly
1292: routines.
1294: The grid coordinates are across the entire grid, not just the local portion
1296: MatSetValuesStencil() uses 0-based row and column numbers in Fortran
1297: as well as in C.
1299: For setting/accessing vector values via array coordinates you can use the DMDAVecGetArray() routine
1301: In order to use this routine you must either obtain the matrix with DMCreateMatrix()
1302: or call MatSetLocalToGlobalMapping() and MatSetStencil() first.
1304: The columns and rows in the stencil passed in MUST be contained within the
1305: ghost region of the given process as set with DMDACreateXXX() or MatSetStencil(). For example,
1306: if you create a DMDA with an overlap of one grid level and on a particular process its first
1307: local nonghost x logical coordinate is 6 (so its first ghost x logical coordinate is 5) the
1308: first i index you can use in your column and row indices in MatSetStencil() is 5.
1310: In Fortran idxm and idxn should be declared as
1311: $ MatStencil idxm(4,m),idxn(4,n)
1312: and the values inserted using
1313: $ idxm(MatStencil_i,1) = i
1314: $ idxm(MatStencil_j,1) = j
1315: $ idxm(MatStencil_k,1) = k
1316: $ idxm(MatStencil_c,1) = c
1317: etc
1319: For periodic boundary conditions use negative indices for values to the left (below 0; that are to be
1320: obtained by wrapping values from right edge). For values to the right of the last entry using that index plus one
1321: etc to obtain values that obtained by wrapping the values from the left edge. This does not work for anything but the
1322: DMDA_BOUNDARY_PERIODIC boundary type.
1324: For indices that don't mean anything for your case (like the k index when working in 2d) or the c index when you have
1325: a single value per point) you can skip filling those indices.
1327: Inspired by the structured grid interface to the HYPRE package
1328: (http://www.llnl.gov/CASC/hypre)
1330: Efficiency Alert:
1331: The routine MatSetValuesBlockedStencil() may offer much better efficiency
1332: for users of block sparse formats (MATSEQBAIJ and MATMPIBAIJ).
1334: Level: beginner
1336: Concepts: matrices^putting entries in
1338: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal()
1339: MatSetValues(), MatSetValuesBlockedStencil(), MatSetStencil(), DMCreateMatrix(), DMDAVecGetArray(), MatStencil
1340: @*/
1341: PetscErrorCode MatSetValuesStencil(Mat mat,PetscInt m,const MatStencil idxm[],PetscInt n,const MatStencil idxn[],const PetscScalar v[],InsertMode addv)
1342: {
1344: PetscInt buf[8192],*bufm=0,*bufn=0,*jdxm,*jdxn;
1345: PetscInt j,i,dim = mat->stencil.dim,*dims = mat->stencil.dims+1,tmp;
1346: PetscInt *starts = mat->stencil.starts,*dxm = (PetscInt*)idxm,*dxn = (PetscInt*)idxn,sdim = dim - (1 - (PetscInt)mat->stencil.noc);
1349: if (!m || !n) return(0); /* no values to insert */
1356: if ((m+n) <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
1357: jdxm = buf; jdxn = buf+m;
1358: } else {
1359: PetscMalloc2(m,&bufm,n,&bufn);
1360: jdxm = bufm; jdxn = bufn;
1361: }
1362: for (i=0; i<m; i++) {
1363: for (j=0; j<3-sdim; j++) dxm++;
1364: tmp = *dxm++ - starts[0];
1365: for (j=0; j<dim-1; j++) {
1366: if ((*dxm++ - starts[j+1]) < 0 || tmp < 0) tmp = -1;
1367: else tmp = tmp*dims[j] + *(dxm-1) - starts[j+1];
1368: }
1369: if (mat->stencil.noc) dxm++;
1370: jdxm[i] = tmp;
1371: }
1372: for (i=0; i<n; i++) {
1373: for (j=0; j<3-sdim; j++) dxn++;
1374: tmp = *dxn++ - starts[0];
1375: for (j=0; j<dim-1; j++) {
1376: if ((*dxn++ - starts[j+1]) < 0 || tmp < 0) tmp = -1;
1377: else tmp = tmp*dims[j] + *(dxn-1) - starts[j+1];
1378: }
1379: if (mat->stencil.noc) dxn++;
1380: jdxn[i] = tmp;
1381: }
1382: MatSetValuesLocal(mat,m,jdxm,n,jdxn,v,addv);
1383: PetscFree2(bufm,bufn);
1384: return(0);
1385: }
1389: /*@
1390: MatSetValuesBlockedStencil - Inserts or adds a block of values into a matrix.
1391: Using structured grid indexing
1393: Not Collective
1395: Input Parameters:
1396: + mat - the matrix
1397: . m - number of rows being entered
1398: . idxm - grid coordinates for matrix rows being entered
1399: . n - number of columns being entered
1400: . idxn - grid coordinates for matrix columns being entered
1401: . v - a logically two-dimensional array of values
1402: - addv - either ADD_VALUES or INSERT_VALUES, where
1403: ADD_VALUES adds values to any existing entries, and
1404: INSERT_VALUES replaces existing entries with new values
1406: Notes:
1407: By default the values, v, are row-oriented and unsorted.
1408: See MatSetOption() for other options.
1410: Calls to MatSetValuesBlockedStencil() with the INSERT_VALUES and ADD_VALUES
1411: options cannot be mixed without intervening calls to the assembly
1412: routines.
1414: The grid coordinates are across the entire grid, not just the local portion
1416: MatSetValuesBlockedStencil() uses 0-based row and column numbers in Fortran
1417: as well as in C.
1419: For setting/accessing vector values via array coordinates you can use the DMDAVecGetArray() routine
1421: In order to use this routine you must either obtain the matrix with DMCreateMatrix()
1422: or call MatSetBlockSize(), MatSetLocalToGlobalMapping() and MatSetStencil() first.
1424: The columns and rows in the stencil passed in MUST be contained within the
1425: ghost region of the given process as set with DMDACreateXXX() or MatSetStencil(). For example,
1426: if you create a DMDA with an overlap of one grid level and on a particular process its first
1427: local nonghost x logical coordinate is 6 (so its first ghost x logical coordinate is 5) the
1428: first i index you can use in your column and row indices in MatSetStencil() is 5.
1430: In Fortran idxm and idxn should be declared as
1431: $ MatStencil idxm(4,m),idxn(4,n)
1432: and the values inserted using
1433: $ idxm(MatStencil_i,1) = i
1434: $ idxm(MatStencil_j,1) = j
1435: $ idxm(MatStencil_k,1) = k
1436: etc
1438: Negative indices may be passed in idxm and idxn, these rows and columns are
1439: simply ignored. This allows easily inserting element stiffness matrices
1440: with homogeneous Dirchlet boundary conditions that you don't want represented
1441: in the matrix.
1443: Inspired by the structured grid interface to the HYPRE package
1444: (http://www.llnl.gov/CASC/hypre)
1446: Level: beginner
1448: Concepts: matrices^putting entries in
1450: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal()
1451: MatSetValues(), MatSetValuesStencil(), MatSetStencil(), DMCreateMatrix(), DMDAVecGetArray(), MatStencil,
1452: MatSetBlockSize(), MatSetLocalToGlobalMapping()
1453: @*/
1454: PetscErrorCode MatSetValuesBlockedStencil(Mat mat,PetscInt m,const MatStencil idxm[],PetscInt n,const MatStencil idxn[],const PetscScalar v[],InsertMode addv)
1455: {
1457: PetscInt buf[8192],*bufm=0,*bufn=0,*jdxm,*jdxn;
1458: PetscInt j,i,dim = mat->stencil.dim,*dims = mat->stencil.dims+1,tmp;
1459: PetscInt *starts = mat->stencil.starts,*dxm = (PetscInt*)idxm,*dxn = (PetscInt*)idxn,sdim = dim - (1 - (PetscInt)mat->stencil.noc);
1462: if (!m || !n) return(0); /* no values to insert */
1469: if ((m+n) <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
1470: jdxm = buf; jdxn = buf+m;
1471: } else {
1472: PetscMalloc2(m,&bufm,n,&bufn);
1473: jdxm = bufm; jdxn = bufn;
1474: }
1475: for (i=0; i<m; i++) {
1476: for (j=0; j<3-sdim; j++) dxm++;
1477: tmp = *dxm++ - starts[0];
1478: for (j=0; j<sdim-1; j++) {
1479: if ((*dxm++ - starts[j+1]) < 0 || tmp < 0) tmp = -1;
1480: else tmp = tmp*dims[j] + *(dxm-1) - starts[j+1];
1481: }
1482: dxm++;
1483: jdxm[i] = tmp;
1484: }
1485: for (i=0; i<n; i++) {
1486: for (j=0; j<3-sdim; j++) dxn++;
1487: tmp = *dxn++ - starts[0];
1488: for (j=0; j<sdim-1; j++) {
1489: if ((*dxn++ - starts[j+1]) < 0 || tmp < 0) tmp = -1;
1490: else tmp = tmp*dims[j] + *(dxn-1) - starts[j+1];
1491: }
1492: dxn++;
1493: jdxn[i] = tmp;
1494: }
1495: MatSetValuesBlockedLocal(mat,m,jdxm,n,jdxn,v,addv);
1496: PetscFree2(bufm,bufn);
1497: #if defined(PETSC_HAVE_CUSP)
1498: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
1499: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
1500: }
1501: #endif
1502: #if defined(PETSC_HAVE_VIENNACL)
1503: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
1504: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
1505: }
1506: #endif
1507: return(0);
1508: }
1512: /*@
1513: MatSetStencil - Sets the grid information for setting values into a matrix via
1514: MatSetValuesStencil()
1516: Not Collective
1518: Input Parameters:
1519: + mat - the matrix
1520: . dim - dimension of the grid 1, 2, or 3
1521: . dims - number of grid points in x, y, and z direction, including ghost points on your processor
1522: . starts - starting point of ghost nodes on your processor in x, y, and z direction
1523: - dof - number of degrees of freedom per node
1526: Inspired by the structured grid interface to the HYPRE package
1527: (www.llnl.gov/CASC/hyper)
1529: For matrices generated with DMCreateMatrix() this routine is automatically called and so not needed by the
1530: user.
1532: Level: beginner
1534: Concepts: matrices^putting entries in
1536: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal()
1537: MatSetValues(), MatSetValuesBlockedStencil(), MatSetValuesStencil()
1538: @*/
1539: PetscErrorCode MatSetStencil(Mat mat,PetscInt dim,const PetscInt dims[],const PetscInt starts[],PetscInt dof)
1540: {
1541: PetscInt i;
1548: mat->stencil.dim = dim + (dof > 1);
1549: for (i=0; i<dim; i++) {
1550: mat->stencil.dims[i] = dims[dim-i-1]; /* copy the values in backwards */
1551: mat->stencil.starts[i] = starts[dim-i-1];
1552: }
1553: mat->stencil.dims[dim] = dof;
1554: mat->stencil.starts[dim] = 0;
1555: mat->stencil.noc = (PetscBool)(dof == 1);
1556: return(0);
1557: }
1561: /*@
1562: MatSetValuesBlocked - Inserts or adds a block of values into a matrix.
1564: Not Collective
1566: Input Parameters:
1567: + mat - the matrix
1568: . v - a logically two-dimensional array of values
1569: . m, idxm - the number of block rows and their global block indices
1570: . n, idxn - the number of block columns and their global block indices
1571: - addv - either ADD_VALUES or INSERT_VALUES, where
1572: ADD_VALUES adds values to any existing entries, and
1573: INSERT_VALUES replaces existing entries with new values
1575: Notes:
1576: If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call
1577: MatXXXXSetPreallocation() or MatSetUp() before using this routine.
1579: The m and n count the NUMBER of blocks in the row direction and column direction,
1580: NOT the total number of rows/columns; for example, if the block size is 2 and
1581: you are passing in values for rows 2,3,4,5 then m would be 2 (not 4).
1582: The values in idxm would be 1 2; that is the first index for each block divided by
1583: the block size.
1585: Note that you must call MatSetBlockSize() when constructing this matrix (after
1586: preallocating it).
1588: By default the values, v, are row-oriented, so the layout of
1589: v is the same as for MatSetValues(). See MatSetOption() for other options.
1591: Calls to MatSetValuesBlocked() with the INSERT_VALUES and ADD_VALUES
1592: options cannot be mixed without intervening calls to the assembly
1593: routines.
1595: MatSetValuesBlocked() uses 0-based row and column numbers in Fortran
1596: as well as in C.
1598: Negative indices may be passed in idxm and idxn, these rows and columns are
1599: simply ignored. This allows easily inserting element stiffness matrices
1600: with homogeneous Dirchlet boundary conditions that you don't want represented
1601: in the matrix.
1603: Each time an entry is set within a sparse matrix via MatSetValues(),
1604: internal searching must be done to determine where to place the the
1605: data in the matrix storage space. By instead inserting blocks of
1606: entries via MatSetValuesBlocked(), the overhead of matrix assembly is
1607: reduced.
1609: Example:
1610: $ Suppose m=n=2 and block size(bs) = 2 The array is
1611: $
1612: $ 1 2 | 3 4
1613: $ 5 6 | 7 8
1614: $ - - - | - - -
1615: $ 9 10 | 11 12
1616: $ 13 14 | 15 16
1617: $
1618: $ v[] should be passed in like
1619: $ v[] = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]
1620: $
1621: $ If you are not using row oriented storage of v (that is you called MatSetOption(mat,MAT_ROW_ORIENTED,PETSC_FALSE)) then
1622: $ v[] = [1,5,9,13,2,6,10,14,3,7,11,15,4,8,12,16]
1624: Level: intermediate
1626: Concepts: matrices^putting entries in blocked
1628: .seealso: MatSetBlockSize(), MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValues(), MatSetValuesBlockedLocal()
1629: @*/
1630: PetscErrorCode MatSetValuesBlocked(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],const PetscScalar v[],InsertMode addv)
1631: {
1637: if (!m || !n) return(0); /* no values to insert */
1641: MatCheckPreallocated(mat,1);
1642: if (mat->insertmode == NOT_SET_VALUES) {
1643: mat->insertmode = addv;
1644: }
1645: #if defined(PETSC_USE_DEBUG)
1646: else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
1647: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1648: if (!mat->ops->setvaluesblocked && !mat->ops->setvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
1649: #endif
1651: if (mat->assembled) {
1652: mat->was_assembled = PETSC_TRUE;
1653: mat->assembled = PETSC_FALSE;
1654: }
1655: PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
1656: if (mat->ops->setvaluesblocked) {
1657: (*mat->ops->setvaluesblocked)(mat,m,idxm,n,idxn,v,addv);
1658: } else {
1659: PetscInt buf[8192],*bufr=0,*bufc=0,*iidxm,*iidxn;
1660: PetscInt i,j,bs = mat->rmap->bs,cbs = mat->cmap->bs;
1661: if (m*bs+n*cbs <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
1662: iidxm = buf; iidxn = buf + m*bs;
1663: } else {
1664: PetscMalloc2(m*bs,&bufr,n*cbs,&bufc);
1665: iidxm = bufr; iidxn = bufc;
1666: }
1667: for (i=0; i<m; i++) {
1668: for (j=0; j<bs; j++) {
1669: iidxm[i*bs+j] = bs*idxm[i] + j;
1670: }
1671: }
1672: for (i=0; i<n; i++) {
1673: for (j=0; j<cbs; j++) {
1674: iidxn[i*cbs+j] = cbs*idxn[i] + j;
1675: }
1676: }
1677: MatSetValues(mat,m*bs,iidxm,n*cbs,iidxn,v,addv);
1678: PetscFree2(bufr,bufc);
1679: }
1680: PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
1681: #if defined(PETSC_HAVE_CUSP)
1682: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
1683: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
1684: }
1685: #endif
1686: #if defined(PETSC_HAVE_VIENNACL)
1687: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
1688: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
1689: }
1690: #endif
1691: return(0);
1692: }
1696: /*@
1697: MatGetValues - Gets a block of values from a matrix.
1699: Not Collective; currently only returns a local block
1701: Input Parameters:
1702: + mat - the matrix
1703: . v - a logically two-dimensional array for storing the values
1704: . m, idxm - the number of rows and their global indices
1705: - n, idxn - the number of columns and their global indices
1707: Notes:
1708: The user must allocate space (m*n PetscScalars) for the values, v.
1709: The values, v, are then returned in a row-oriented format,
1710: analogous to that used by default in MatSetValues().
1712: MatGetValues() uses 0-based row and column numbers in
1713: Fortran as well as in C.
1715: MatGetValues() requires that the matrix has been assembled
1716: with MatAssemblyBegin()/MatAssemblyEnd(). Thus, calls to
1717: MatSetValues() and MatGetValues() CANNOT be made in succession
1718: without intermediate matrix assembly.
1720: Negative row or column indices will be ignored and those locations in v[] will be
1721: left unchanged.
1723: Level: advanced
1725: Concepts: matrices^accessing values
1727: .seealso: MatGetRow(), MatGetSubMatrices(), MatSetValues()
1728: @*/
1729: PetscErrorCode MatGetValues(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[])
1730: {
1736: if (!m || !n) return(0);
1740: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
1741: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1742: if (!mat->ops->getvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
1743: MatCheckPreallocated(mat,1);
1745: PetscLogEventBegin(MAT_GetValues,mat,0,0,0);
1746: (*mat->ops->getvalues)(mat,m,idxm,n,idxn,v);
1747: PetscLogEventEnd(MAT_GetValues,mat,0,0,0);
1748: return(0);
1749: }
1753: /*@
1754: MatSetValuesBatch - Adds (ADD_VALUES) many blocks of values into a matrix at once. The blocks must all be square and
1755: the same size. Currently, this can only be called once and creates the given matrix.
1757: Not Collective
1759: Input Parameters:
1760: + mat - the matrix
1761: . nb - the number of blocks
1762: . bs - the number of rows (and columns) in each block
1763: . rows - a concatenation of the rows for each block
1764: - v - a concatenation of logically two-dimensional arrays of values
1766: Notes:
1767: In the future, we will extend this routine to handle rectangular blocks, and to allow multiple calls for a given matrix.
1769: Level: advanced
1771: Concepts: matrices^putting entries in
1773: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
1774: InsertMode, INSERT_VALUES, ADD_VALUES, MatSetValues()
1775: @*/
1776: PetscErrorCode MatSetValuesBatch(Mat mat, PetscInt nb, PetscInt bs, PetscInt rows[], const PetscScalar v[])
1777: {
1785: #if defined(PETSC_USE_DEBUG)
1786: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1787: #endif
1789: PetscLogEventBegin(MAT_SetValuesBatch,mat,0,0,0);
1790: if (mat->ops->setvaluesbatch) {
1791: (*mat->ops->setvaluesbatch)(mat,nb,bs,rows,v);
1792: } else {
1793: PetscInt b;
1794: for (b = 0; b < nb; ++b) {
1795: MatSetValues(mat, bs, &rows[b*bs], bs, &rows[b*bs], &v[b*bs*bs], ADD_VALUES);
1796: }
1797: }
1798: PetscLogEventEnd(MAT_SetValuesBatch,mat,0,0,0);
1799: return(0);
1800: }
1804: /*@
1805: MatSetLocalToGlobalMapping - Sets a local-to-global numbering for use by
1806: the routine MatSetValuesLocal() to allow users to insert matrix entries
1807: using a local (per-processor) numbering.
1809: Not Collective
1811: Input Parameters:
1812: + x - the matrix
1813: . rmapping - row mapping created with ISLocalToGlobalMappingCreate()
1814: or ISLocalToGlobalMappingCreateIS()
1815: - cmapping - column mapping
1817: Level: intermediate
1819: Concepts: matrices^local to global mapping
1820: Concepts: local to global mapping^for matrices
1822: .seealso: MatAssemblyBegin(), MatAssemblyEnd(), MatSetValues(), MatSetValuesLocal()
1823: @*/
1824: PetscErrorCode MatSetLocalToGlobalMapping(Mat x,ISLocalToGlobalMapping rmapping,ISLocalToGlobalMapping cmapping)
1825: {
1834: if (x->ops->setlocaltoglobalmapping) {
1835: (*x->ops->setlocaltoglobalmapping)(x,rmapping,cmapping);
1836: } else {
1837: PetscLayoutSetISLocalToGlobalMapping(x->rmap,rmapping);
1838: PetscLayoutSetISLocalToGlobalMapping(x->cmap,cmapping);
1839: }
1840: return(0);
1841: }
1845: /*@
1846: MatSetLocalToGlobalMappingBlock - Sets a local-to-global numbering for use
1847: by the routine MatSetValuesBlockedLocal() to allow users to insert matrix
1848: entries using a local (per-processor) numbering.
1850: Not Collective
1852: Input Parameters:
1853: + x - the matrix
1854: . rmapping - row mapping created with ISLocalToGlobalMappingCreate() or
1855: ISLocalToGlobalMappingCreateIS()
1856: - cmapping - column mapping
1858: Level: intermediate
1860: Concepts: matrices^local to global mapping blocked
1861: Concepts: local to global mapping^for matrices, blocked
1863: .seealso: MatAssemblyBegin(), MatAssemblyEnd(), MatSetValues(), MatSetValuesBlockedLocal(),
1864: MatSetValuesBlocked(), MatSetValuesLocal()
1865: @*/
1866: PetscErrorCode MatSetLocalToGlobalMappingBlock(Mat x,ISLocalToGlobalMapping rmapping,ISLocalToGlobalMapping cmapping)
1867: {
1876: PetscLayoutSetISLocalToGlobalMappingBlock(x->rmap,rmapping);
1877: PetscLayoutSetISLocalToGlobalMappingBlock(x->cmap,cmapping);
1878: return(0);
1879: }
1883: /*@
1884: MatGetLocalToGlobalMapping - Gets the local-to-global numbering set by MatSetLocalToGlobalMapping()
1886: Not Collective
1888: Input Parameters:
1889: . A - the matrix
1891: Output Parameters:
1892: + rmapping - row mapping
1893: - cmapping - column mapping
1895: Level: advanced
1897: Concepts: matrices^local to global mapping
1898: Concepts: local to global mapping^for matrices
1900: .seealso: MatSetValuesLocal(), MatGetLocalToGlobalMappingBlock()
1901: @*/
1902: PetscErrorCode MatGetLocalToGlobalMapping(Mat A,ISLocalToGlobalMapping *rmapping,ISLocalToGlobalMapping *cmapping)
1903: {
1909: if (rmapping) *rmapping = A->rmap->mapping;
1910: if (cmapping) *cmapping = A->cmap->mapping;
1911: return(0);
1912: }
1916: /*@
1917: MatGetLocalToGlobalMappingBlock - Gets the local-to-global numbering set by MatSetLocalToGlobalMappingBlock()
1919: Not Collective
1921: Input Parameters:
1922: . A - the matrix
1924: Output Parameters:
1925: + rmapping - row mapping
1926: - cmapping - column mapping
1928: Level: advanced
1930: Concepts: matrices^local to global mapping blocked
1931: Concepts: local to global mapping^for matrices, blocked
1933: .seealso: MatSetValuesBlockedLocal(), MatGetLocalToGlobalMapping()
1934: @*/
1935: PetscErrorCode MatGetLocalToGlobalMappingBlock(Mat A,ISLocalToGlobalMapping *rmapping,ISLocalToGlobalMapping *cmapping)
1936: {
1942: if (rmapping) *rmapping = A->rmap->bmapping;
1943: if (cmapping) *cmapping = A->cmap->bmapping;
1944: return(0);
1945: }
1949: /*@
1950: MatSetValuesLocal - Inserts or adds values into certain locations of a matrix,
1951: using a local ordering of the nodes.
1953: Not Collective
1955: Input Parameters:
1956: + x - the matrix
1957: . nrow, irow - number of rows and their local indices
1958: . ncol, icol - number of columns and their local indices
1959: . y - a logically two-dimensional array of values
1960: - addv - either INSERT_VALUES or ADD_VALUES, where
1961: ADD_VALUES adds values to any existing entries, and
1962: INSERT_VALUES replaces existing entries with new values
1964: Notes:
1965: If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatXXXXSetPreallocation() or
1966: MatSetUp() before using this routine
1968: If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatSetLocalToGlobalMapping() before using this routine
1970: Calls to MatSetValuesLocal() with the INSERT_VALUES and ADD_VALUES
1971: options cannot be mixed without intervening calls to the assembly
1972: routines.
1974: These values may be cached, so MatAssemblyBegin() and MatAssemblyEnd()
1975: MUST be called after all calls to MatSetValuesLocal() have been completed.
1977: Level: intermediate
1979: Concepts: matrices^putting entries in with local numbering
1981: .seealso: MatAssemblyBegin(), MatAssemblyEnd(), MatSetValues(), MatSetLocalToGlobalMapping(),
1982: MatSetValueLocal()
1983: @*/
1984: PetscErrorCode MatSetValuesLocal(Mat mat,PetscInt nrow,const PetscInt irow[],PetscInt ncol,const PetscInt icol[],const PetscScalar y[],InsertMode addv)
1985: {
1991: MatCheckPreallocated(mat,1);
1992: if (!nrow || !ncol) return(0); /* no values to insert */
1996: if (mat->insertmode == NOT_SET_VALUES) {
1997: mat->insertmode = addv;
1998: }
1999: #if defined(PETSC_USE_DEBUG)
2000: else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
2001: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2002: if (!mat->ops->setvalueslocal && !mat->ops->setvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2003: #endif
2005: if (mat->assembled) {
2006: mat->was_assembled = PETSC_TRUE;
2007: mat->assembled = PETSC_FALSE;
2008: }
2009: PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
2010: if (mat->ops->setvalueslocal) {
2011: (*mat->ops->setvalueslocal)(mat,nrow,irow,ncol,icol,y,addv);
2012: } else {
2013: PetscInt buf[8192],*bufr=0,*bufc=0,*irowm,*icolm;
2014: if ((nrow+ncol) <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
2015: irowm = buf; icolm = buf+nrow;
2016: } else {
2017: PetscMalloc2(nrow,&bufr,ncol,&bufc);
2018: irowm = bufr; icolm = bufc;
2019: }
2020: ISLocalToGlobalMappingApply(mat->rmap->mapping,nrow,irow,irowm);
2021: ISLocalToGlobalMappingApply(mat->cmap->mapping,ncol,icol,icolm);
2022: MatSetValues(mat,nrow,irowm,ncol,icolm,y,addv);
2023: PetscFree2(bufr,bufc);
2024: }
2025: PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
2026: #if defined(PETSC_HAVE_CUSP)
2027: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
2028: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
2029: }
2030: #endif
2031: #if defined(PETSC_HAVE_VIENNACL)
2032: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
2033: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
2034: }
2035: #endif
2036: return(0);
2037: }
2041: /*@
2042: MatSetValuesBlockedLocal - Inserts or adds values into certain locations of a matrix,
2043: using a local ordering of the nodes a block at a time.
2045: Not Collective
2047: Input Parameters:
2048: + x - the matrix
2049: . nrow, irow - number of rows and their local indices
2050: . ncol, icol - number of columns and their local indices
2051: . y - a logically two-dimensional array of values
2052: - addv - either INSERT_VALUES or ADD_VALUES, where
2053: ADD_VALUES adds values to any existing entries, and
2054: INSERT_VALUES replaces existing entries with new values
2056: Notes:
2057: If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatXXXXSetPreallocation() or
2058: MatSetUp() before using this routine
2060: If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatSetBlockSize() and MatSetLocalToGlobalMappingBlock()
2061: before using this routineBefore calling MatSetValuesLocal(), the user must first set the
2063: Calls to MatSetValuesBlockedLocal() with the INSERT_VALUES and ADD_VALUES
2064: options cannot be mixed without intervening calls to the assembly
2065: routines.
2067: These values may be cached, so MatAssemblyBegin() and MatAssemblyEnd()
2068: MUST be called after all calls to MatSetValuesBlockedLocal() have been completed.
2070: Level: intermediate
2072: Concepts: matrices^putting blocked values in with local numbering
2074: .seealso: MatSetBlockSize(), MatSetLocalToGlobalMappingBlock(), MatAssemblyBegin(), MatAssemblyEnd(),
2075: MatSetValuesLocal(), MatSetLocalToGlobalMappingBlock(), MatSetValuesBlocked()
2076: @*/
2077: PetscErrorCode MatSetValuesBlockedLocal(Mat mat,PetscInt nrow,const PetscInt irow[],PetscInt ncol,const PetscInt icol[],const PetscScalar y[],InsertMode addv)
2078: {
2084: MatCheckPreallocated(mat,1);
2085: if (!nrow || !ncol) return(0); /* no values to insert */
2089: if (mat->insertmode == NOT_SET_VALUES) {
2090: mat->insertmode = addv;
2091: }
2092: #if defined(PETSC_USE_DEBUG)
2093: else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
2094: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2095: if (!mat->ops->setvaluesblockedlocal && !mat->ops->setvaluesblocked && !mat->ops->setvalueslocal && !mat->ops->setvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2096: #endif
2098: if (mat->assembled) {
2099: mat->was_assembled = PETSC_TRUE;
2100: mat->assembled = PETSC_FALSE;
2101: }
2102: PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
2103: if (mat->ops->setvaluesblockedlocal) {
2104: (*mat->ops->setvaluesblockedlocal)(mat,nrow,irow,ncol,icol,y,addv);
2105: } else {
2106: PetscInt buf[8192],*bufr=0,*bufc=0,*irowm,*icolm;
2107: if (mat->rmap->bmapping && mat->cmap->bmapping) {
2108: if ((nrow+ncol) <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
2109: irowm = buf; icolm = buf + nrow;
2110: } else {
2111: PetscMalloc2(nrow,&bufr,ncol,&bufc);
2112: irowm = bufr; icolm = bufc;
2113: }
2114: ISLocalToGlobalMappingApply(mat->rmap->bmapping,nrow,irow,irowm);
2115: ISLocalToGlobalMappingApply(mat->cmap->bmapping,ncol,icol,icolm);
2116: MatSetValuesBlocked(mat,nrow,irowm,ncol,icolm,y,addv);
2117: PetscFree2(bufr,bufc);
2118: } else {
2119: PetscInt i,j,bs = mat->rmap->bs,cbs = mat->cmap->bs;
2120: if (nrow*bs+ncol*cbs <=(PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
2121: irowm = buf; icolm = buf + nrow;
2122: } else {
2123: PetscMalloc2(nrow*bs,&bufr,ncol*cbs,&bufc);
2124: irowm = bufr; icolm = bufc;
2125: }
2126: for (i=0; i<nrow; i++) {
2127: for (j=0; j<bs; j++) irowm[i*bs+j] = irow[i]*bs+j;
2128: }
2129: for (i=0; i<ncol; i++) {
2130: for (j=0; j<cbs; j++) icolm[i*cbs+j] = icol[i]*cbs+j;
2131: }
2132: MatSetValuesLocal(mat,nrow*bs,irowm,ncol*cbs,icolm,y,addv);
2133: PetscFree2(bufr,bufc);
2134: }
2135: }
2136: PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
2137: #if defined(PETSC_HAVE_CUSP)
2138: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
2139: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
2140: }
2141: #endif
2142: #if defined(PETSC_HAVE_VIENNACL)
2143: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
2144: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
2145: }
2146: #endif
2147: return(0);
2148: }
2152: /*@
2153: MatMultDiagonalBlock - Computes the matrix-vector product, y = Dx. Where D is defined by the inode or block structure of the diagonal
2155: Collective on Mat and Vec
2157: Input Parameters:
2158: + mat - the matrix
2159: - x - the vector to be multiplied
2161: Output Parameters:
2162: . y - the result
2164: Notes:
2165: The vectors x and y cannot be the same. I.e., one cannot
2166: call MatMult(A,y,y).
2168: Level: developer
2170: Concepts: matrix-vector product
2172: .seealso: MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
2173: @*/
2174: PetscErrorCode MatMultDiagonalBlock(Mat mat,Vec x,Vec y)
2175: {
2184: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2185: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2186: if (x == y) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2187: MatCheckPreallocated(mat,1);
2189: if (!mat->ops->multdiagonalblock) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"This matrix type does not have a multiply defined");
2190: (*mat->ops->multdiagonalblock)(mat,x,y);
2191: PetscObjectStateIncrease((PetscObject)y);
2192: return(0);
2193: }
2195: /* --------------------------------------------------------*/
2198: /*@
2199: MatMult - Computes the matrix-vector product, y = Ax.
2201: Neighbor-wise Collective on Mat and Vec
2203: Input Parameters:
2204: + mat - the matrix
2205: - x - the vector to be multiplied
2207: Output Parameters:
2208: . y - the result
2210: Notes:
2211: The vectors x and y cannot be the same. I.e., one cannot
2212: call MatMult(A,y,y).
2214: Level: beginner
2216: Concepts: matrix-vector product
2218: .seealso: MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
2219: @*/
2220: PetscErrorCode MatMult(Mat mat,Vec x,Vec y)
2221: {
2229: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2230: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2231: if (x == y) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2232: #if !defined(PETSC_HAVE_CONSTRAINTS)
2233: if (mat->cmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
2234: if (mat->rmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->rmap->N,y->map->N);
2235: if (mat->rmap->n != y->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: local dim %D %D",mat->rmap->n,y->map->n);
2236: #endif
2237: VecValidValues(x,2,PETSC_TRUE);
2238: MatCheckPreallocated(mat,1);
2240: if (!mat->ops->mult) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"This matrix type does not have a multiply defined");
2241: PetscLogEventBegin(MAT_Mult,mat,x,y,0);
2242: (*mat->ops->mult)(mat,x,y);
2243: PetscLogEventEnd(MAT_Mult,mat,x,y,0);
2244: VecValidValues(y,3,PETSC_FALSE);
2245: return(0);
2246: }
2250: /*@
2251: MatMultTranspose - Computes matrix transpose times a vector.
2253: Neighbor-wise Collective on Mat and Vec
2255: Input Parameters:
2256: + mat - the matrix
2257: - x - the vector to be multilplied
2259: Output Parameters:
2260: . y - the result
2262: Notes:
2263: The vectors x and y cannot be the same. I.e., one cannot
2264: call MatMultTranspose(A,y,y).
2266: For complex numbers this does NOT compute the Hermitian (complex conjugate) transpose multiple,
2267: use MatMultHermitianTranspose()
2269: Level: beginner
2271: Concepts: matrix vector product^transpose
2273: .seealso: MatMult(), MatMultAdd(), MatMultTransposeAdd(), MatMultHermitianTranspose(), MatTranspose()
2274: @*/
2275: PetscErrorCode MatMultTranspose(Mat mat,Vec x,Vec y)
2276: {
2285: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2286: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2287: if (x == y) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2288: #if !defined(PETSC_HAVE_CONSTRAINTS)
2289: if (mat->rmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->rmap->N,x->map->N);
2290: if (mat->cmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->cmap->N,y->map->N);
2291: #endif
2292: VecValidValues(x,2,PETSC_TRUE);
2293: MatCheckPreallocated(mat,1);
2295: if (!mat->ops->multtranspose) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"This matrix type does not have a multiply tranpose defined");
2296: PetscLogEventBegin(MAT_MultTranspose,mat,x,y,0);
2297: (*mat->ops->multtranspose)(mat,x,y);
2298: PetscLogEventEnd(MAT_MultTranspose,mat,x,y,0);
2299: PetscObjectStateIncrease((PetscObject)y);
2300: VecValidValues(y,3,PETSC_FALSE);
2301: return(0);
2302: }
2306: /*@
2307: MatMultHermitianTranspose - Computes matrix Hermitian transpose times a vector.
2309: Neighbor-wise Collective on Mat and Vec
2311: Input Parameters:
2312: + mat - the matrix
2313: - x - the vector to be multilplied
2315: Output Parameters:
2316: . y - the result
2318: Notes:
2319: The vectors x and y cannot be the same. I.e., one cannot
2320: call MatMultHermitianTranspose(A,y,y).
2322: Also called the conjugate transpose, complex conjugate transpose, or adjoint.
2324: For real numbers MatMultTranspose() and MatMultHermitianTranspose() are identical.
2326: Level: beginner
2328: Concepts: matrix vector product^transpose
2330: .seealso: MatMult(), MatMultAdd(), MatMultHermitianTransposeAdd(), MatMultTranspose()
2331: @*/
2332: PetscErrorCode MatMultHermitianTranspose(Mat mat,Vec x,Vec y)
2333: {
2335: Vec w;
2343: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2344: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2345: if (x == y) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2346: #if !defined(PETSC_HAVE_CONSTRAINTS)
2347: if (mat->rmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->rmap->N,x->map->N);
2348: if (mat->cmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->cmap->N,y->map->N);
2349: #endif
2350: MatCheckPreallocated(mat,1);
2352: PetscLogEventBegin(MAT_MultHermitianTranspose,mat,x,y,0);
2353: if (mat->ops->multhermitiantranspose) {
2354: (*mat->ops->multhermitiantranspose)(mat,x,y);
2355: } else {
2356: VecDuplicate(x,&w);
2357: VecCopy(x,w);
2358: VecConjugate(w);
2359: MatMultTranspose(mat,w,y);
2360: VecDestroy(&w);
2361: VecConjugate(y);
2362: }
2363: PetscLogEventEnd(MAT_MultHermitianTranspose,mat,x,y,0);
2364: PetscObjectStateIncrease((PetscObject)y);
2365: return(0);
2366: }
2370: /*@
2371: MatMultAdd - Computes v3 = v2 + A * v1.
2373: Neighbor-wise Collective on Mat and Vec
2375: Input Parameters:
2376: + mat - the matrix
2377: - v1, v2 - the vectors
2379: Output Parameters:
2380: . v3 - the result
2382: Notes:
2383: The vectors v1 and v3 cannot be the same. I.e., one cannot
2384: call MatMultAdd(A,v1,v2,v1).
2386: Level: beginner
2388: Concepts: matrix vector product^addition
2390: .seealso: MatMultTranspose(), MatMult(), MatMultTransposeAdd()
2391: @*/
2392: PetscErrorCode MatMultAdd(Mat mat,Vec v1,Vec v2,Vec v3)
2393: {
2403: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2404: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2405: if (mat->cmap->N != v1->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v1: global dim %D %D",mat->cmap->N,v1->map->N);
2406: /* if (mat->rmap->N != v2->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec v2: global dim %D %D",mat->rmap->N,v2->map->N);
2407: if (mat->rmap->N != v3->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec v3: global dim %D %D",mat->rmap->N,v3->map->N); */
2408: if (mat->rmap->n != v3->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec v3: local dim %D %D",mat->rmap->n,v3->map->n);
2409: if (mat->rmap->n != v2->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec v2: local dim %D %D",mat->rmap->n,v2->map->n);
2410: if (v1 == v3) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"v1 and v3 must be different vectors");
2411: MatCheckPreallocated(mat,1);
2413: if (!mat->ops->multadd) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"No MatMultAdd() for matrix type '%s'",((PetscObject)mat)->type_name);
2414: PetscLogEventBegin(MAT_MultAdd,mat,v1,v2,v3);
2415: (*mat->ops->multadd)(mat,v1,v2,v3);
2416: PetscLogEventEnd(MAT_MultAdd,mat,v1,v2,v3);
2417: PetscObjectStateIncrease((PetscObject)v3);
2418: return(0);
2419: }
2423: /*@
2424: MatMultTransposeAdd - Computes v3 = v2 + A' * v1.
2426: Neighbor-wise Collective on Mat and Vec
2428: Input Parameters:
2429: + mat - the matrix
2430: - v1, v2 - the vectors
2432: Output Parameters:
2433: . v3 - the result
2435: Notes:
2436: The vectors v1 and v3 cannot be the same. I.e., one cannot
2437: call MatMultTransposeAdd(A,v1,v2,v1).
2439: Level: beginner
2441: Concepts: matrix vector product^transpose and addition
2443: .seealso: MatMultTranspose(), MatMultAdd(), MatMult()
2444: @*/
2445: PetscErrorCode MatMultTransposeAdd(Mat mat,Vec v1,Vec v2,Vec v3)
2446: {
2456: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2457: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2458: if (!mat->ops->multtransposeadd) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2459: if (v1 == v3) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"v1 and v3 must be different vectors");
2460: if (mat->rmap->N != v1->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v1: global dim %D %D",mat->rmap->N,v1->map->N);
2461: if (mat->cmap->N != v2->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v2: global dim %D %D",mat->cmap->N,v2->map->N);
2462: if (mat->cmap->N != v3->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v3: global dim %D %D",mat->cmap->N,v3->map->N);
2463: MatCheckPreallocated(mat,1);
2465: PetscLogEventBegin(MAT_MultTransposeAdd,mat,v1,v2,v3);
2466: (*mat->ops->multtransposeadd)(mat,v1,v2,v3);
2467: PetscLogEventEnd(MAT_MultTransposeAdd,mat,v1,v2,v3);
2468: PetscObjectStateIncrease((PetscObject)v3);
2469: return(0);
2470: }
2474: /*@
2475: MatMultHermitianTransposeAdd - Computes v3 = v2 + A^H * v1.
2477: Neighbor-wise Collective on Mat and Vec
2479: Input Parameters:
2480: + mat - the matrix
2481: - v1, v2 - the vectors
2483: Output Parameters:
2484: . v3 - the result
2486: Notes:
2487: The vectors v1 and v3 cannot be the same. I.e., one cannot
2488: call MatMultHermitianTransposeAdd(A,v1,v2,v1).
2490: Level: beginner
2492: Concepts: matrix vector product^transpose and addition
2494: .seealso: MatMultHermitianTranspose(), MatMultTranspose(), MatMultAdd(), MatMult()
2495: @*/
2496: PetscErrorCode MatMultHermitianTransposeAdd(Mat mat,Vec v1,Vec v2,Vec v3)
2497: {
2507: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2508: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2509: if (!mat->ops->multhermitiantransposeadd) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2510: if (v1 == v3) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"v1 and v3 must be different vectors");
2511: if (mat->rmap->N != v1->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v1: global dim %D %D",mat->rmap->N,v1->map->N);
2512: if (mat->cmap->N != v2->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v2: global dim %D %D",mat->cmap->N,v2->map->N);
2513: if (mat->cmap->N != v3->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v3: global dim %D %D",mat->cmap->N,v3->map->N);
2514: MatCheckPreallocated(mat,1);
2516: PetscLogEventBegin(MAT_MultHermitianTransposeAdd,mat,v1,v2,v3);
2517: (*mat->ops->multhermitiantransposeadd)(mat,v1,v2,v3);
2518: PetscLogEventEnd(MAT_MultHermitianTransposeAdd,mat,v1,v2,v3);
2519: PetscObjectStateIncrease((PetscObject)v3);
2520: return(0);
2521: }
2525: /*@
2526: MatMultConstrained - The inner multiplication routine for a
2527: constrained matrix P^T A P.
2529: Neighbor-wise Collective on Mat and Vec
2531: Input Parameters:
2532: + mat - the matrix
2533: - x - the vector to be multilplied
2535: Output Parameters:
2536: . y - the result
2538: Notes:
2539: The vectors x and y cannot be the same. I.e., one cannot
2540: call MatMult(A,y,y).
2542: Level: beginner
2544: .keywords: matrix, multiply, matrix-vector product, constraint
2545: .seealso: MatMult(), MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
2546: @*/
2547: PetscErrorCode MatMultConstrained(Mat mat,Vec x,Vec y)
2548: {
2555: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2556: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2557: if (x == y) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2558: if (mat->cmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
2559: if (mat->rmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->rmap->N,y->map->N);
2560: if (mat->rmap->n != y->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: local dim %D %D",mat->rmap->n,y->map->n);
2562: PetscLogEventBegin(MAT_MultConstrained,mat,x,y,0);
2563: (*mat->ops->multconstrained)(mat,x,y);
2564: PetscLogEventEnd(MAT_MultConstrained,mat,x,y,0);
2565: PetscObjectStateIncrease((PetscObject)y);
2566: return(0);
2567: }
2571: /*@
2572: MatMultTransposeConstrained - The inner multiplication routine for a
2573: constrained matrix P^T A^T P.
2575: Neighbor-wise Collective on Mat and Vec
2577: Input Parameters:
2578: + mat - the matrix
2579: - x - the vector to be multilplied
2581: Output Parameters:
2582: . y - the result
2584: Notes:
2585: The vectors x and y cannot be the same. I.e., one cannot
2586: call MatMult(A,y,y).
2588: Level: beginner
2590: .keywords: matrix, multiply, matrix-vector product, constraint
2591: .seealso: MatMult(), MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
2592: @*/
2593: PetscErrorCode MatMultTransposeConstrained(Mat mat,Vec x,Vec y)
2594: {
2601: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2602: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2603: if (x == y) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2604: if (mat->rmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
2605: if (mat->cmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->rmap->N,y->map->N);
2607: PetscLogEventBegin(MAT_MultConstrained,mat,x,y,0);
2608: (*mat->ops->multtransposeconstrained)(mat,x,y);
2609: PetscLogEventEnd(MAT_MultConstrained,mat,x,y,0);
2610: PetscObjectStateIncrease((PetscObject)y);
2611: return(0);
2612: }
2616: /*@C
2617: MatGetFactorType - gets the type of factorization it is
2619: Note Collective
2620: as the flag
2622: Input Parameters:
2623: . mat - the matrix
2625: Output Parameters:
2626: . t - the type, one of MAT_FACTOR_NONE, MAT_FACTOR_LU, MAT_FACTOR_CHOLESKY, MAT_FACTOR_ILU, MAT_FACTOR_ICC,MAT_FACTOR_ILUDT
2628: Level: intermediate
2630: .seealso: MatFactorType, MatGetFactor()
2631: @*/
2632: PetscErrorCode MatGetFactorType(Mat mat,MatFactorType *t)
2633: {
2637: *t = mat->factortype;
2638: return(0);
2639: }
2641: /* ------------------------------------------------------------*/
2644: /*@C
2645: MatGetInfo - Returns information about matrix storage (number of
2646: nonzeros, memory, etc.).
2648: Collective on Mat if MAT_GLOBAL_MAX or MAT_GLOBAL_SUM is used as the flag
2650: Input Parameters:
2651: . mat - the matrix
2653: Output Parameters:
2654: + flag - flag indicating the type of parameters to be returned
2655: (MAT_LOCAL - local matrix, MAT_GLOBAL_MAX - maximum over all processors,
2656: MAT_GLOBAL_SUM - sum over all processors)
2657: - info - matrix information context
2659: Notes:
2660: The MatInfo context contains a variety of matrix data, including
2661: number of nonzeros allocated and used, number of mallocs during
2662: matrix assembly, etc. Additional information for factored matrices
2663: is provided (such as the fill ratio, number of mallocs during
2664: factorization, etc.). Much of this info is printed to PETSC_STDOUT
2665: when using the runtime options
2666: $ -info -mat_view ::ascii_info
2668: Example for C/C++ Users:
2669: See the file ${PETSC_DIR}/include/petscmat.h for a complete list of
2670: data within the MatInfo context. For example,
2671: .vb
2672: MatInfo info;
2673: Mat A;
2674: double mal, nz_a, nz_u;
2676: MatGetInfo(A,MAT_LOCAL,&info);
2677: mal = info.mallocs;
2678: nz_a = info.nz_allocated;
2679: .ve
2681: Example for Fortran Users:
2682: Fortran users should declare info as a double precision
2683: array of dimension MAT_INFO_SIZE, and then extract the parameters
2684: of interest. See the file ${PETSC_DIR}/include/finclude/petscmat.h
2685: a complete list of parameter names.
2686: .vb
2687: double precision info(MAT_INFO_SIZE)
2688: double precision mal, nz_a
2689: Mat A
2690: integer ierr
2692: call MatGetInfo(A,MAT_LOCAL,info,ierr)
2693: mal = info(MAT_INFO_MALLOCS)
2694: nz_a = info(MAT_INFO_NZ_ALLOCATED)
2695: .ve
2697: Level: intermediate
2699: Concepts: matrices^getting information on
2701: Developer Note: fortran interface is not autogenerated as the f90
2702: interface defintion cannot be generated correctly [due to MatInfo]
2704: .seealso: MatStashGetInfo()
2706: @*/
2707: PetscErrorCode MatGetInfo(Mat mat,MatInfoType flag,MatInfo *info)
2708: {
2715: if (!mat->ops->getinfo) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2716: MatCheckPreallocated(mat,1);
2717: (*mat->ops->getinfo)(mat,flag,info);
2718: return(0);
2719: }
2721: /* ----------------------------------------------------------*/
2725: /*@C
2726: MatLUFactor - Performs in-place LU factorization of matrix.
2728: Collective on Mat
2730: Input Parameters:
2731: + mat - the matrix
2732: . row - row permutation
2733: . col - column permutation
2734: - info - options for factorization, includes
2735: $ fill - expected fill as ratio of original fill.
2736: $ dtcol - pivot tolerance (0 no pivot, 1 full column pivoting)
2737: $ Run with the option -info to determine an optimal value to use
2739: Notes:
2740: Most users should employ the simplified KSP interface for linear solvers
2741: instead of working directly with matrix algebra routines such as this.
2742: See, e.g., KSPCreate().
2744: This changes the state of the matrix to a factored matrix; it cannot be used
2745: for example with MatSetValues() unless one first calls MatSetUnfactored().
2747: Level: developer
2749: Concepts: matrices^LU factorization
2751: .seealso: MatLUFactorSymbolic(), MatLUFactorNumeric(), MatCholeskyFactor(),
2752: MatGetOrdering(), MatSetUnfactored(), MatFactorInfo, MatGetFactor()
2754: Developer Note: fortran interface is not autogenerated as the f90
2755: interface defintion cannot be generated correctly [due to MatFactorInfo]
2757: @*/
2758: PetscErrorCode MatLUFactor(Mat mat,IS row,IS col,const MatFactorInfo *info)
2759: {
2761: MatFactorInfo tinfo;
2769: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2770: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2771: if (!mat->ops->lufactor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2772: MatCheckPreallocated(mat,1);
2773: if (!info) {
2774: MatFactorInfoInitialize(&tinfo);
2775: info = &tinfo;
2776: }
2778: PetscLogEventBegin(MAT_LUFactor,mat,row,col,0);
2779: (*mat->ops->lufactor)(mat,row,col,info);
2780: PetscLogEventEnd(MAT_LUFactor,mat,row,col,0);
2781: PetscObjectStateIncrease((PetscObject)mat);
2782: return(0);
2783: }
2787: /*@C
2788: MatILUFactor - Performs in-place ILU factorization of matrix.
2790: Collective on Mat
2792: Input Parameters:
2793: + mat - the matrix
2794: . row - row permutation
2795: . col - column permutation
2796: - info - structure containing
2797: $ levels - number of levels of fill.
2798: $ expected fill - as ratio of original fill.
2799: $ 1 or 0 - indicating force fill on diagonal (improves robustness for matrices
2800: missing diagonal entries)
2802: Notes:
2803: Probably really in-place only when level of fill is zero, otherwise allocates
2804: new space to store factored matrix and deletes previous memory.
2806: Most users should employ the simplified KSP interface for linear solvers
2807: instead of working directly with matrix algebra routines such as this.
2808: See, e.g., KSPCreate().
2810: Level: developer
2812: Concepts: matrices^ILU factorization
2814: .seealso: MatILUFactorSymbolic(), MatLUFactorNumeric(), MatCholeskyFactor(), MatFactorInfo
2816: Developer Note: fortran interface is not autogenerated as the f90
2817: interface defintion cannot be generated correctly [due to MatFactorInfo]
2819: @*/
2820: PetscErrorCode MatILUFactor(Mat mat,IS row,IS col,const MatFactorInfo *info)
2821: {
2830: if (mat->rmap->N != mat->cmap->N) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONG,"matrix must be square");
2831: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2832: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2833: if (!mat->ops->ilufactor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2834: MatCheckPreallocated(mat,1);
2836: PetscLogEventBegin(MAT_ILUFactor,mat,row,col,0);
2837: (*mat->ops->ilufactor)(mat,row,col,info);
2838: PetscLogEventEnd(MAT_ILUFactor,mat,row,col,0);
2839: PetscObjectStateIncrease((PetscObject)mat);
2840: return(0);
2841: }
2845: /*@C
2846: MatLUFactorSymbolic - Performs symbolic LU factorization of matrix.
2847: Call this routine before calling MatLUFactorNumeric().
2849: Collective on Mat
2851: Input Parameters:
2852: + fact - the factor matrix obtained with MatGetFactor()
2853: . mat - the matrix
2854: . row, col - row and column permutations
2855: - info - options for factorization, includes
2856: $ fill - expected fill as ratio of original fill.
2857: $ dtcol - pivot tolerance (0 no pivot, 1 full column pivoting)
2858: $ Run with the option -info to determine an optimal value to use
2861: Notes:
2862: See the <a href="../../docs/manual.pdf">users manual</a> for additional information about
2863: choosing the fill factor for better efficiency.
2865: Most users should employ the simplified KSP interface for linear solvers
2866: instead of working directly with matrix algebra routines such as this.
2867: See, e.g., KSPCreate().
2869: Level: developer
2871: Concepts: matrices^LU symbolic factorization
2873: .seealso: MatLUFactor(), MatLUFactorNumeric(), MatCholeskyFactor(), MatFactorInfo
2875: Developer Note: fortran interface is not autogenerated as the f90
2876: interface defintion cannot be generated correctly [due to MatFactorInfo]
2878: @*/
2879: PetscErrorCode MatLUFactorSymbolic(Mat fact,Mat mat,IS row,IS col,const MatFactorInfo *info)
2880: {
2890: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2891: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2892: if (!(fact)->ops->lufactorsymbolic) {
2893: const MatSolverPackage spackage;
2894: MatFactorGetSolverPackage(fact,&spackage);
2895: SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Matrix type %s symbolic LU using solver package %s",((PetscObject)mat)->type_name,spackage);
2896: }
2897: MatCheckPreallocated(mat,2);
2899: PetscLogEventBegin(MAT_LUFactorSymbolic,mat,row,col,0);
2900: (fact->ops->lufactorsymbolic)(fact,mat,row,col,info);
2901: PetscLogEventEnd(MAT_LUFactorSymbolic,mat,row,col,0);
2902: PetscObjectStateIncrease((PetscObject)fact);
2903: return(0);
2904: }
2908: /*@C
2909: MatLUFactorNumeric - Performs numeric LU factorization of a matrix.
2910: Call this routine after first calling MatLUFactorSymbolic().
2912: Collective on Mat
2914: Input Parameters:
2915: + fact - the factor matrix obtained with MatGetFactor()
2916: . mat - the matrix
2917: - info - options for factorization
2919: Notes:
2920: See MatLUFactor() for in-place factorization. See
2921: MatCholeskyFactorNumeric() for the symmetric, positive definite case.
2923: Most users should employ the simplified KSP interface for linear solvers
2924: instead of working directly with matrix algebra routines such as this.
2925: See, e.g., KSPCreate().
2927: Level: developer
2929: Concepts: matrices^LU numeric factorization
2931: .seealso: MatLUFactorSymbolic(), MatLUFactor(), MatCholeskyFactor()
2933: Developer Note: fortran interface is not autogenerated as the f90
2934: interface defintion cannot be generated correctly [due to MatFactorInfo]
2936: @*/
2937: PetscErrorCode MatLUFactorNumeric(Mat fact,Mat mat,const MatFactorInfo *info)
2938: {
2946: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2947: if (mat->rmap->N != (fact)->rmap->N || mat->cmap->N != (fact)->cmap->N) SETERRQ4(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Mat fact: global dimensions are different %D should = %D %D should = %D",mat->rmap->N,(fact)->rmap->N,mat->cmap->N,(fact)->cmap->N);
2949: if (!(fact)->ops->lufactornumeric) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s numeric LU",((PetscObject)mat)->type_name);
2950: MatCheckPreallocated(mat,2);
2951: PetscLogEventBegin(MAT_LUFactorNumeric,mat,fact,0,0);
2952: (fact->ops->lufactornumeric)(fact,mat,info);
2953: PetscLogEventEnd(MAT_LUFactorNumeric,mat,fact,0,0);
2954: MatViewFromOptions(fact,NULL,"-mat_view");
2955: PetscObjectStateIncrease((PetscObject)fact);
2956: return(0);
2957: }
2961: /*@C
2962: MatCholeskyFactor - Performs in-place Cholesky factorization of a
2963: symmetric matrix.
2965: Collective on Mat
2967: Input Parameters:
2968: + mat - the matrix
2969: . perm - row and column permutations
2970: - f - expected fill as ratio of original fill
2972: Notes:
2973: See MatLUFactor() for the nonsymmetric case. See also
2974: MatCholeskyFactorSymbolic(), and MatCholeskyFactorNumeric().
2976: Most users should employ the simplified KSP interface for linear solvers
2977: instead of working directly with matrix algebra routines such as this.
2978: See, e.g., KSPCreate().
2980: Level: developer
2982: Concepts: matrices^Cholesky factorization
2984: .seealso: MatLUFactor(), MatCholeskyFactorSymbolic(), MatCholeskyFactorNumeric()
2985: MatGetOrdering()
2987: Developer Note: fortran interface is not autogenerated as the f90
2988: interface defintion cannot be generated correctly [due to MatFactorInfo]
2990: @*/
2991: PetscErrorCode MatCholeskyFactor(Mat mat,IS perm,const MatFactorInfo *info)
2992: {
3000: if (mat->rmap->N != mat->cmap->N) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONG,"Matrix must be square");
3001: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3002: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3003: if (!mat->ops->choleskyfactor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3004: MatCheckPreallocated(mat,1);
3006: PetscLogEventBegin(MAT_CholeskyFactor,mat,perm,0,0);
3007: (*mat->ops->choleskyfactor)(mat,perm,info);
3008: PetscLogEventEnd(MAT_CholeskyFactor,mat,perm,0,0);
3009: PetscObjectStateIncrease((PetscObject)mat);
3010: return(0);
3011: }
3015: /*@C
3016: MatCholeskyFactorSymbolic - Performs symbolic Cholesky factorization
3017: of a symmetric matrix.
3019: Collective on Mat
3021: Input Parameters:
3022: + fact - the factor matrix obtained with MatGetFactor()
3023: . mat - the matrix
3024: . perm - row and column permutations
3025: - info - options for factorization, includes
3026: $ fill - expected fill as ratio of original fill.
3027: $ dtcol - pivot tolerance (0 no pivot, 1 full column pivoting)
3028: $ Run with the option -info to determine an optimal value to use
3030: Notes:
3031: See MatLUFactorSymbolic() for the nonsymmetric case. See also
3032: MatCholeskyFactor() and MatCholeskyFactorNumeric().
3034: Most users should employ the simplified KSP interface for linear solvers
3035: instead of working directly with matrix algebra routines such as this.
3036: See, e.g., KSPCreate().
3038: Level: developer
3040: Concepts: matrices^Cholesky symbolic factorization
3042: .seealso: MatLUFactorSymbolic(), MatCholeskyFactor(), MatCholeskyFactorNumeric()
3043: MatGetOrdering()
3045: Developer Note: fortran interface is not autogenerated as the f90
3046: interface defintion cannot be generated correctly [due to MatFactorInfo]
3048: @*/
3049: PetscErrorCode MatCholeskyFactorSymbolic(Mat fact,Mat mat,IS perm,const MatFactorInfo *info)
3050: {
3059: if (mat->rmap->N != mat->cmap->N) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONG,"Matrix must be square");
3060: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3061: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3062: if (!(fact)->ops->choleskyfactorsymbolic) {
3063: const MatSolverPackage spackage;
3064: MatFactorGetSolverPackage(fact,&spackage);
3065: SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s symbolic factor Cholesky using solver package %s",((PetscObject)mat)->type_name,spackage);
3066: }
3067: MatCheckPreallocated(mat,2);
3069: PetscLogEventBegin(MAT_CholeskyFactorSymbolic,mat,perm,0,0);
3070: (fact->ops->choleskyfactorsymbolic)(fact,mat,perm,info);
3071: PetscLogEventEnd(MAT_CholeskyFactorSymbolic,mat,perm,0,0);
3072: PetscObjectStateIncrease((PetscObject)fact);
3073: return(0);
3074: }
3078: /*@C
3079: MatCholeskyFactorNumeric - Performs numeric Cholesky factorization
3080: of a symmetric matrix. Call this routine after first calling
3081: MatCholeskyFactorSymbolic().
3083: Collective on Mat
3085: Input Parameters:
3086: + fact - the factor matrix obtained with MatGetFactor()
3087: . mat - the initial matrix
3088: . info - options for factorization
3089: - fact - the symbolic factor of mat
3092: Notes:
3093: Most users should employ the simplified KSP interface for linear solvers
3094: instead of working directly with matrix algebra routines such as this.
3095: See, e.g., KSPCreate().
3097: Level: developer
3099: Concepts: matrices^Cholesky numeric factorization
3101: .seealso: MatCholeskyFactorSymbolic(), MatCholeskyFactor(), MatLUFactorNumeric()
3103: Developer Note: fortran interface is not autogenerated as the f90
3104: interface defintion cannot be generated correctly [due to MatFactorInfo]
3106: @*/
3107: PetscErrorCode MatCholeskyFactorNumeric(Mat fact,Mat mat,const MatFactorInfo *info)
3108: {
3116: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3117: if (!(fact)->ops->choleskyfactornumeric) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s numeric factor Cholesky",((PetscObject)mat)->type_name);
3118: if (mat->rmap->N != (fact)->rmap->N || mat->cmap->N != (fact)->cmap->N) SETERRQ4(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Mat fact: global dim %D should = %D %D should = %D",mat->rmap->N,(fact)->rmap->N,mat->cmap->N,(fact)->cmap->N);
3119: MatCheckPreallocated(mat,2);
3121: PetscLogEventBegin(MAT_CholeskyFactorNumeric,mat,fact,0,0);
3122: (fact->ops->choleskyfactornumeric)(fact,mat,info);
3123: PetscLogEventEnd(MAT_CholeskyFactorNumeric,mat,fact,0,0);
3124: MatViewFromOptions(fact,NULL,"-mat_view");
3125: PetscObjectStateIncrease((PetscObject)fact);
3126: return(0);
3127: }
3129: /* ----------------------------------------------------------------*/
3132: /*@
3133: MatSolve - Solves A x = b, given a factored matrix.
3135: Neighbor-wise Collective on Mat and Vec
3137: Input Parameters:
3138: + mat - the factored matrix
3139: - b - the right-hand-side vector
3141: Output Parameter:
3142: . x - the result vector
3144: Notes:
3145: The vectors b and x cannot be the same. I.e., one cannot
3146: call MatSolve(A,x,x).
3148: Notes:
3149: Most users should employ the simplified KSP interface for linear solvers
3150: instead of working directly with matrix algebra routines such as this.
3151: See, e.g., KSPCreate().
3153: Level: developer
3155: Concepts: matrices^triangular solves
3157: .seealso: MatSolveAdd(), MatSolveTranspose(), MatSolveTransposeAdd()
3158: @*/
3159: PetscErrorCode MatSolve(Mat mat,Vec b,Vec x)
3160: {
3170: if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3171: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3172: if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3173: if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3174: if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3175: if (!mat->rmap->N && !mat->cmap->N) return(0);
3176: if (!mat->ops->solve) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3177: MatCheckPreallocated(mat,1);
3179: PetscLogEventBegin(MAT_Solve,mat,b,x,0);
3180: (*mat->ops->solve)(mat,b,x);
3181: PetscLogEventEnd(MAT_Solve,mat,b,x,0);
3182: PetscObjectStateIncrease((PetscObject)x);
3183: return(0);
3184: }
3188: PetscErrorCode MatMatSolve_Basic(Mat A,Mat B,Mat X)
3189: {
3191: Vec b,x;
3192: PetscInt m,N,i;
3193: PetscScalar *bb,*xx;
3194: PetscBool flg;
3197: PetscObjectTypeCompareAny((PetscObject)B,&flg,MATSEQDENSE,MATMPIDENSE,NULL);
3198: if (!flg) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Matrix B must be MATDENSE matrix");
3199: PetscObjectTypeCompareAny((PetscObject)X,&flg,MATSEQDENSE,MATMPIDENSE,NULL);
3200: if (!flg) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Matrix X must be MATDENSE matrix");
3202: MatDenseGetArray(B,&bb);
3203: MatDenseGetArray(X,&xx);
3204: MatGetLocalSize(B,&m,NULL); /* number local rows */
3205: MatGetSize(B,NULL,&N); /* total columns in dense matrix */
3206: MatGetVecs(A,&x,&b);
3207: for (i=0; i<N; i++) {
3208: VecPlaceArray(b,bb + i*m);
3209: VecPlaceArray(x,xx + i*m);
3210: MatSolve(A,b,x);
3211: VecResetArray(x);
3212: VecResetArray(b);
3213: }
3214: VecDestroy(&b);
3215: VecDestroy(&x);
3216: MatDenseRestoreArray(B,&bb);
3217: MatDenseRestoreArray(X,&xx);
3218: return(0);
3219: }
3223: /*@
3224: MatMatSolve - Solves A X = B, given a factored matrix.
3226: Neighbor-wise Collective on Mat
3228: Input Parameters:
3229: + mat - the factored matrix
3230: - B - the right-hand-side matrix (dense matrix)
3232: Output Parameter:
3233: . X - the result matrix (dense matrix)
3235: Notes:
3236: The matrices b and x cannot be the same. I.e., one cannot
3237: call MatMatSolve(A,x,x).
3239: Notes:
3240: Most users should usually employ the simplified KSP interface for linear solvers
3241: instead of working directly with matrix algebra routines such as this.
3242: See, e.g., KSPCreate(). However KSP can only solve for one vector (column of X)
3243: at a time.
3245: When using SuperLU_Dist as a parallel solver PETSc will use the SuperLU_Dist functionality to solve multiple right hand sides simultaneously. For MUMPS
3246: it calls a separate solve for each right hand side since MUMPS does not yet support distributed right hand sides.
3248: Since the resulting matrix X must always be dense we do not support sparse representation of the matrix B.
3250: Level: developer
3252: Concepts: matrices^triangular solves
3254: .seealso: MatMatSolveAdd(), MatMatSolveTranspose(), MatMatSolveTransposeAdd(), MatLUFactor(), MatCholeskyFactor()
3255: @*/
3256: PetscErrorCode MatMatSolve(Mat A,Mat B,Mat X)
3257: {
3267: if (X == B) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_IDN,"X and B must be different matrices");
3268: if (!A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3269: if (A->cmap->N != X->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat X: global dim %D %D",A->cmap->N,X->rmap->N);
3270: if (A->rmap->N != B->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat B: global dim %D %D",A->rmap->N,B->rmap->N);
3271: if (A->rmap->n != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat A,Mat B: local dim %D %D",A->rmap->n,B->rmap->n);
3272: if (!A->rmap->N && !A->cmap->N) return(0);
3273: MatCheckPreallocated(A,1);
3275: PetscLogEventBegin(MAT_MatSolve,A,B,X,0);
3276: if (!A->ops->matsolve) {
3277: PetscInfo1(A,"Mat type %s using basic MatMatSolve",((PetscObject)A)->type_name);
3278: MatMatSolve_Basic(A,B,X);
3279: } else {
3280: (*A->ops->matsolve)(A,B,X);
3281: }
3282: PetscLogEventEnd(MAT_MatSolve,A,B,X,0);
3283: PetscObjectStateIncrease((PetscObject)X);
3284: return(0);
3285: }
3290: /*@
3291: MatForwardSolve - Solves L x = b, given a factored matrix, A = LU, or
3292: U^T*D^(1/2) x = b, given a factored symmetric matrix, A = U^T*D*U,
3294: Neighbor-wise Collective on Mat and Vec
3296: Input Parameters:
3297: + mat - the factored matrix
3298: - b - the right-hand-side vector
3300: Output Parameter:
3301: . x - the result vector
3303: Notes:
3304: MatSolve() should be used for most applications, as it performs
3305: a forward solve followed by a backward solve.
3307: The vectors b and x cannot be the same, i.e., one cannot
3308: call MatForwardSolve(A,x,x).
3310: For matrix in seqsbaij format with block size larger than 1,
3311: the diagonal blocks are not implemented as D = D^(1/2) * D^(1/2) yet.
3312: MatForwardSolve() solves U^T*D y = b, and
3313: MatBackwardSolve() solves U x = y.
3314: Thus they do not provide a symmetric preconditioner.
3316: Most users should employ the simplified KSP interface for linear solvers
3317: instead of working directly with matrix algebra routines such as this.
3318: See, e.g., KSPCreate().
3320: Level: developer
3322: Concepts: matrices^forward solves
3324: .seealso: MatSolve(), MatBackwardSolve()
3325: @*/
3326: PetscErrorCode MatForwardSolve(Mat mat,Vec b,Vec x)
3327: {
3337: if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3338: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3339: if (!mat->ops->forwardsolve) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3340: if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3341: if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3342: if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3343: MatCheckPreallocated(mat,1);
3344: PetscLogEventBegin(MAT_ForwardSolve,mat,b,x,0);
3345: (*mat->ops->forwardsolve)(mat,b,x);
3346: PetscLogEventEnd(MAT_ForwardSolve,mat,b,x,0);
3347: PetscObjectStateIncrease((PetscObject)x);
3348: return(0);
3349: }
3353: /*@
3354: MatBackwardSolve - Solves U x = b, given a factored matrix, A = LU.
3355: D^(1/2) U x = b, given a factored symmetric matrix, A = U^T*D*U,
3357: Neighbor-wise Collective on Mat and Vec
3359: Input Parameters:
3360: + mat - the factored matrix
3361: - b - the right-hand-side vector
3363: Output Parameter:
3364: . x - the result vector
3366: Notes:
3367: MatSolve() should be used for most applications, as it performs
3368: a forward solve followed by a backward solve.
3370: The vectors b and x cannot be the same. I.e., one cannot
3371: call MatBackwardSolve(A,x,x).
3373: For matrix in seqsbaij format with block size larger than 1,
3374: the diagonal blocks are not implemented as D = D^(1/2) * D^(1/2) yet.
3375: MatForwardSolve() solves U^T*D y = b, and
3376: MatBackwardSolve() solves U x = y.
3377: Thus they do not provide a symmetric preconditioner.
3379: Most users should employ the simplified KSP interface for linear solvers
3380: instead of working directly with matrix algebra routines such as this.
3381: See, e.g., KSPCreate().
3383: Level: developer
3385: Concepts: matrices^backward solves
3387: .seealso: MatSolve(), MatForwardSolve()
3388: @*/
3389: PetscErrorCode MatBackwardSolve(Mat mat,Vec b,Vec x)
3390: {
3400: if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3401: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3402: if (!mat->ops->backwardsolve) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3403: if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3404: if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3405: if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3406: MatCheckPreallocated(mat,1);
3408: PetscLogEventBegin(MAT_BackwardSolve,mat,b,x,0);
3409: (*mat->ops->backwardsolve)(mat,b,x);
3410: PetscLogEventEnd(MAT_BackwardSolve,mat,b,x,0);
3411: PetscObjectStateIncrease((PetscObject)x);
3412: return(0);
3413: }
3417: /*@
3418: MatSolveAdd - Computes x = y + inv(A)*b, given a factored matrix.
3420: Neighbor-wise Collective on Mat and Vec
3422: Input Parameters:
3423: + mat - the factored matrix
3424: . b - the right-hand-side vector
3425: - y - the vector to be added to
3427: Output Parameter:
3428: . x - the result vector
3430: Notes:
3431: The vectors b and x cannot be the same. I.e., one cannot
3432: call MatSolveAdd(A,x,y,x).
3434: Most users should employ the simplified KSP interface for linear solvers
3435: instead of working directly with matrix algebra routines such as this.
3436: See, e.g., KSPCreate().
3438: Level: developer
3440: Concepts: matrices^triangular solves
3442: .seealso: MatSolve(), MatSolveTranspose(), MatSolveTransposeAdd()
3443: @*/
3444: PetscErrorCode MatSolveAdd(Mat mat,Vec b,Vec y,Vec x)
3445: {
3446: PetscScalar one = 1.0;
3447: Vec tmp;
3459: if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3460: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3461: if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3462: if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3463: if (mat->rmap->N != y->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->rmap->N,y->map->N);
3464: if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3465: if (x->map->n != y->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Vec x,Vec y: local dim %D %D",x->map->n,y->map->n);
3466: MatCheckPreallocated(mat,1);
3468: PetscLogEventBegin(MAT_SolveAdd,mat,b,x,y);
3469: if (mat->ops->solveadd) {
3470: (*mat->ops->solveadd)(mat,b,y,x);
3471: } else {
3472: /* do the solve then the add manually */
3473: if (x != y) {
3474: MatSolve(mat,b,x);
3475: VecAXPY(x,one,y);
3476: } else {
3477: VecDuplicate(x,&tmp);
3478: PetscLogObjectParent((PetscObject)mat,(PetscObject)tmp);
3479: VecCopy(x,tmp);
3480: MatSolve(mat,b,x);
3481: VecAXPY(x,one,tmp);
3482: VecDestroy(&tmp);
3483: }
3484: }
3485: PetscLogEventEnd(MAT_SolveAdd,mat,b,x,y);
3486: PetscObjectStateIncrease((PetscObject)x);
3487: return(0);
3488: }
3492: /*@
3493: MatSolveTranspose - Solves A' x = b, given a factored matrix.
3495: Neighbor-wise Collective on Mat and Vec
3497: Input Parameters:
3498: + mat - the factored matrix
3499: - b - the right-hand-side vector
3501: Output Parameter:
3502: . x - the result vector
3504: Notes:
3505: The vectors b and x cannot be the same. I.e., one cannot
3506: call MatSolveTranspose(A,x,x).
3508: Most users should employ the simplified KSP interface for linear solvers
3509: instead of working directly with matrix algebra routines such as this.
3510: See, e.g., KSPCreate().
3512: Level: developer
3514: Concepts: matrices^triangular solves
3516: .seealso: MatSolve(), MatSolveAdd(), MatSolveTransposeAdd()
3517: @*/
3518: PetscErrorCode MatSolveTranspose(Mat mat,Vec b,Vec x)
3519: {
3529: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3530: if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3531: if (!mat->ops->solvetranspose) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Matrix type %s",((PetscObject)mat)->type_name);
3532: if (mat->rmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->rmap->N,x->map->N);
3533: if (mat->cmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->cmap->N,b->map->N);
3534: MatCheckPreallocated(mat,1);
3535: PetscLogEventBegin(MAT_SolveTranspose,mat,b,x,0);
3536: (*mat->ops->solvetranspose)(mat,b,x);
3537: PetscLogEventEnd(MAT_SolveTranspose,mat,b,x,0);
3538: PetscObjectStateIncrease((PetscObject)x);
3539: return(0);
3540: }
3544: /*@
3545: MatSolveTransposeAdd - Computes x = y + inv(Transpose(A)) b, given a
3546: factored matrix.
3548: Neighbor-wise Collective on Mat and Vec
3550: Input Parameters:
3551: + mat - the factored matrix
3552: . b - the right-hand-side vector
3553: - y - the vector to be added to
3555: Output Parameter:
3556: . x - the result vector
3558: Notes:
3559: The vectors b and x cannot be the same. I.e., one cannot
3560: call MatSolveTransposeAdd(A,x,y,x).
3562: Most users should employ the simplified KSP interface for linear solvers
3563: instead of working directly with matrix algebra routines such as this.
3564: See, e.g., KSPCreate().
3566: Level: developer
3568: Concepts: matrices^triangular solves
3570: .seealso: MatSolve(), MatSolveAdd(), MatSolveTranspose()
3571: @*/
3572: PetscErrorCode MatSolveTransposeAdd(Mat mat,Vec b,Vec y,Vec x)
3573: {
3574: PetscScalar one = 1.0;
3576: Vec tmp;
3587: if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3588: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3589: if (mat->rmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->rmap->N,x->map->N);
3590: if (mat->cmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->cmap->N,b->map->N);
3591: if (mat->cmap->N != y->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->cmap->N,y->map->N);
3592: if (x->map->n != y->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Vec x,Vec y: local dim %D %D",x->map->n,y->map->n);
3593: MatCheckPreallocated(mat,1);
3595: PetscLogEventBegin(MAT_SolveTransposeAdd,mat,b,x,y);
3596: if (mat->ops->solvetransposeadd) {
3597: (*mat->ops->solvetransposeadd)(mat,b,y,x);
3598: } else {
3599: /* do the solve then the add manually */
3600: if (x != y) {
3601: MatSolveTranspose(mat,b,x);
3602: VecAXPY(x,one,y);
3603: } else {
3604: VecDuplicate(x,&tmp);
3605: PetscLogObjectParent((PetscObject)mat,(PetscObject)tmp);
3606: VecCopy(x,tmp);
3607: MatSolveTranspose(mat,b,x);
3608: VecAXPY(x,one,tmp);
3609: VecDestroy(&tmp);
3610: }
3611: }
3612: PetscLogEventEnd(MAT_SolveTransposeAdd,mat,b,x,y);
3613: PetscObjectStateIncrease((PetscObject)x);
3614: return(0);
3615: }
3616: /* ----------------------------------------------------------------*/
3620: /*@
3621: MatSOR - Computes relaxation (SOR, Gauss-Seidel) sweeps.
3623: Neighbor-wise Collective on Mat and Vec
3625: Input Parameters:
3626: + mat - the matrix
3627: . b - the right hand side
3628: . omega - the relaxation factor
3629: . flag - flag indicating the type of SOR (see below)
3630: . shift - diagonal shift
3631: . its - the number of iterations
3632: - lits - the number of local iterations
3634: Output Parameters:
3635: . x - the solution (can contain an initial guess, use option SOR_ZERO_INITIAL_GUESS to indicate no guess)
3637: SOR Flags:
3638: . SOR_FORWARD_SWEEP - forward SOR
3639: . SOR_BACKWARD_SWEEP - backward SOR
3640: . SOR_SYMMETRIC_SWEEP - SSOR (symmetric SOR)
3641: . SOR_LOCAL_FORWARD_SWEEP - local forward SOR
3642: . SOR_LOCAL_BACKWARD_SWEEP - local forward SOR
3643: . SOR_LOCAL_SYMMETRIC_SWEEP - local SSOR
3644: . SOR_APPLY_UPPER, SOR_APPLY_LOWER - applies
3645: upper/lower triangular part of matrix to
3646: vector (with omega)
3647: . SOR_ZERO_INITIAL_GUESS - zero initial guess
3649: Notes:
3650: SOR_LOCAL_FORWARD_SWEEP, SOR_LOCAL_BACKWARD_SWEEP, and
3651: SOR_LOCAL_SYMMETRIC_SWEEP perform separate independent smoothings
3652: on each processor.
3654: Application programmers will not generally use MatSOR() directly,
3655: but instead will employ the KSP/PC interface.
3657: Notes: for BAIJ, SBAIJ, and AIJ matrices with Inodes this does a block SOR smoothing, otherwise it does a pointwise smoothing
3659: Notes for Advanced Users:
3660: The flags are implemented as bitwise inclusive or operations.
3661: For example, use (SOR_ZERO_INITIAL_GUESS | SOR_SYMMETRIC_SWEEP)
3662: to specify a zero initial guess for SSOR.
3664: Most users should employ the simplified KSP interface for linear solvers
3665: instead of working directly with matrix algebra routines such as this.
3666: See, e.g., KSPCreate().
3668: Vectors x and b CANNOT be the same
3670: Developer Note: We should add block SOR support for AIJ matrices with block size set to great than one and no inodes
3672: Level: developer
3674: Concepts: matrices^relaxation
3675: Concepts: matrices^SOR
3676: Concepts: matrices^Gauss-Seidel
3678: @*/
3679: PetscErrorCode MatSOR(Mat mat,Vec b,PetscReal omega,MatSORType flag,PetscReal shift,PetscInt its,PetscInt lits,Vec x)
3680: {
3690: if (!mat->ops->sor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3691: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3692: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3693: if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3694: if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3695: if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3696: if (its <= 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Relaxation requires global its %D positive",its);
3697: if (lits <= 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Relaxation requires local its %D positive",lits);
3698: if (b == x) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_IDN,"b and x vector cannot be the same");
3700: MatCheckPreallocated(mat,1);
3701: PetscLogEventBegin(MAT_SOR,mat,b,x,0);
3702: ierr =(*mat->ops->sor)(mat,b,omega,flag,shift,its,lits,x);
3703: PetscLogEventEnd(MAT_SOR,mat,b,x,0);
3704: PetscObjectStateIncrease((PetscObject)x);
3705: return(0);
3706: }
3710: /*
3711: Default matrix copy routine.
3712: */
3713: PetscErrorCode MatCopy_Basic(Mat A,Mat B,MatStructure str)
3714: {
3715: PetscErrorCode ierr;
3716: PetscInt i,rstart = 0,rend = 0,nz;
3717: const PetscInt *cwork;
3718: const PetscScalar *vwork;
3721: if (B->assembled) {
3722: MatZeroEntries(B);
3723: }
3724: MatGetOwnershipRange(A,&rstart,&rend);
3725: for (i=rstart; i<rend; i++) {
3726: MatGetRow(A,i,&nz,&cwork,&vwork);
3727: MatSetValues(B,1,&i,nz,cwork,vwork,INSERT_VALUES);
3728: MatRestoreRow(A,i,&nz,&cwork,&vwork);
3729: }
3730: MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
3731: MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
3732: PetscObjectStateIncrease((PetscObject)B);
3733: return(0);
3734: }
3738: /*@
3739: MatCopy - Copys a matrix to another matrix.
3741: Collective on Mat
3743: Input Parameters:
3744: + A - the matrix
3745: - str - SAME_NONZERO_PATTERN or DIFFERENT_NONZERO_PATTERN
3747: Output Parameter:
3748: . B - where the copy is put
3750: Notes:
3751: If you use SAME_NONZERO_PATTERN then the two matrices had better have the
3752: same nonzero pattern or the routine will crash.
3754: MatCopy() copies the matrix entries of a matrix to another existing
3755: matrix (after first zeroing the second matrix). A related routine is
3756: MatConvert(), which first creates a new matrix and then copies the data.
3758: Level: intermediate
3760: Concepts: matrices^copying
3762: .seealso: MatConvert(), MatDuplicate()
3764: @*/
3765: PetscErrorCode MatCopy(Mat A,Mat B,MatStructure str)
3766: {
3768: PetscInt i;
3776: MatCheckPreallocated(B,2);
3777: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3778: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3779: if (A->rmap->N != B->rmap->N || A->cmap->N != B->cmap->N) SETERRQ4(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat B: global dim (%D,%D) (%D,%D)",A->rmap->N,B->rmap->N,A->cmap->N,B->cmap->N);
3780: MatCheckPreallocated(A,1);
3782: PetscLogEventBegin(MAT_Copy,A,B,0,0);
3783: if (A->ops->copy) {
3784: (*A->ops->copy)(A,B,str);
3785: } else { /* generic conversion */
3786: MatCopy_Basic(A,B,str);
3787: }
3789: B->stencil.dim = A->stencil.dim;
3790: B->stencil.noc = A->stencil.noc;
3791: for (i=0; i<=A->stencil.dim; i++) {
3792: B->stencil.dims[i] = A->stencil.dims[i];
3793: B->stencil.starts[i] = A->stencil.starts[i];
3794: }
3796: PetscLogEventEnd(MAT_Copy,A,B,0,0);
3797: PetscObjectStateIncrease((PetscObject)B);
3798: return(0);
3799: }
3803: /*@C
3804: MatConvert - Converts a matrix to another matrix, either of the same
3805: or different type.
3807: Collective on Mat
3809: Input Parameters:
3810: + mat - the matrix
3811: . newtype - new matrix type. Use MATSAME to create a new matrix of the
3812: same type as the original matrix.
3813: - reuse - denotes if the destination matrix is to be created or reused. Currently
3814: MAT_REUSE_MATRIX is only supported for inplace conversion, otherwise use
3815: MAT_INITIAL_MATRIX.
3817: Output Parameter:
3818: . M - pointer to place new matrix
3820: Notes:
3821: MatConvert() first creates a new matrix and then copies the data from
3822: the first matrix. A related routine is MatCopy(), which copies the matrix
3823: entries of one matrix to another already existing matrix context.
3825: Cannot be used to convert a sequential matrix to parallel or parallel to sequential,
3826: the MPI communicator of the generated matrix is always the same as the communicator
3827: of the input matrix.
3829: Level: intermediate
3831: Concepts: matrices^converting between storage formats
3833: .seealso: MatCopy(), MatDuplicate()
3834: @*/
3835: PetscErrorCode MatConvert(Mat mat, MatType newtype,MatReuse reuse,Mat *M)
3836: {
3838: PetscBool sametype,issame,flg;
3839: char convname[256],mtype[256];
3840: Mat B;
3846: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3847: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3848: MatCheckPreallocated(mat,1);
3849: MatSetOption(mat,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_FALSE);
3851: PetscOptionsGetString(((PetscObject)mat)->prefix,"-matconvert_type",mtype,256,&flg);
3852: if (flg) {
3853: newtype = mtype;
3854: }
3855: PetscObjectTypeCompare((PetscObject)mat,newtype,&sametype);
3856: PetscStrcmp(newtype,"same",&issame);
3857: if ((reuse == MAT_REUSE_MATRIX) && (mat != *M)) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"MAT_REUSE_MATRIX only supported for in-place conversion currently");
3859: if ((reuse == MAT_REUSE_MATRIX) && (issame || sametype)) return(0);
3861: if ((sametype || issame) && (reuse==MAT_INITIAL_MATRIX) && mat->ops->duplicate) {
3862: (*mat->ops->duplicate)(mat,MAT_COPY_VALUES,M);
3863: } else {
3864: PetscErrorCode (*conv)(Mat, MatType,MatReuse,Mat*)=NULL;
3865: const char *prefix[3] = {"seq","mpi",""};
3866: PetscInt i;
3867: /*
3868: Order of precedence:
3869: 1) See if a specialized converter is known to the current matrix.
3870: 2) See if a specialized converter is known to the desired matrix class.
3871: 3) See if a good general converter is registered for the desired class
3872: (as of 6/27/03 only MATMPIADJ falls into this category).
3873: 4) See if a good general converter is known for the current matrix.
3874: 5) Use a really basic converter.
3875: */
3877: /* 1) See if a specialized converter is known to the current matrix and the desired class */
3878: for (i=0; i<3; i++) {
3879: PetscStrcpy(convname,"MatConvert_");
3880: PetscStrcat(convname,((PetscObject)mat)->type_name);
3881: PetscStrcat(convname,"_");
3882: PetscStrcat(convname,prefix[i]);
3883: PetscStrcat(convname,issame ? ((PetscObject)mat)->type_name : newtype);
3884: PetscStrcat(convname,"_C");
3885: PetscObjectQueryFunction((PetscObject)mat,convname,&conv);
3886: if (conv) goto foundconv;
3887: }
3889: /* 2) See if a specialized converter is known to the desired matrix class. */
3890: MatCreate(PetscObjectComm((PetscObject)mat),&B);
3891: MatSetSizes(B,mat->rmap->n,mat->cmap->n,mat->rmap->N,mat->cmap->N);
3892: MatSetType(B,newtype);
3893: for (i=0; i<3; i++) {
3894: PetscStrcpy(convname,"MatConvert_");
3895: PetscStrcat(convname,((PetscObject)mat)->type_name);
3896: PetscStrcat(convname,"_");
3897: PetscStrcat(convname,prefix[i]);
3898: PetscStrcat(convname,newtype);
3899: PetscStrcat(convname,"_C");
3900: PetscObjectQueryFunction((PetscObject)B,convname,&conv);
3901: if (conv) {
3902: MatDestroy(&B);
3903: goto foundconv;
3904: }
3905: }
3907: /* 3) See if a good general converter is registered for the desired class */
3908: conv = B->ops->convertfrom;
3909: MatDestroy(&B);
3910: if (conv) goto foundconv;
3912: /* 4) See if a good general converter is known for the current matrix */
3913: if (mat->ops->convert) {
3914: conv = mat->ops->convert;
3915: }
3916: if (conv) goto foundconv;
3918: /* 5) Use a really basic converter. */
3919: conv = MatConvert_Basic;
3921: foundconv:
3922: PetscLogEventBegin(MAT_Convert,mat,0,0,0);
3923: (*conv)(mat,newtype,reuse,M);
3924: PetscLogEventEnd(MAT_Convert,mat,0,0,0);
3925: }
3926: PetscObjectStateIncrease((PetscObject)*M);
3928: /* Copy Mat options */
3929: if (mat->symmetric) {MatSetOption(*M,MAT_SYMMETRIC,PETSC_TRUE);}
3930: if (mat->hermitian) {MatSetOption(*M,MAT_HERMITIAN,PETSC_TRUE);}
3931: return(0);
3932: }
3936: /*@C
3937: MatFactorGetSolverPackage - Returns name of the package providing the factorization routines
3939: Not Collective
3941: Input Parameter:
3942: . mat - the matrix, must be a factored matrix
3944: Output Parameter:
3945: . type - the string name of the package (do not free this string)
3947: Notes:
3948: In Fortran you pass in a empty string and the package name will be copied into it.
3949: (Make sure the string is long enough)
3951: Level: intermediate
3953: .seealso: MatCopy(), MatDuplicate(), MatGetFactorAvailable(), MatGetFactor()
3954: @*/
3955: PetscErrorCode MatFactorGetSolverPackage(Mat mat, const MatSolverPackage *type)
3956: {
3957: PetscErrorCode ierr, (*conv)(Mat,const MatSolverPackage*);
3962: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Only for factored matrix");
3963: PetscObjectQueryFunction((PetscObject)mat,"MatFactorGetSolverPackage_C",&conv);
3964: if (!conv) {
3965: *type = MATSOLVERPETSC;
3966: } else {
3967: (*conv)(mat,type);
3968: }
3969: return(0);
3970: }
3974: /*@C
3975: MatGetFactor - Returns a matrix suitable to calls to MatXXFactorSymbolic()
3977: Collective on Mat
3979: Input Parameters:
3980: + mat - the matrix
3981: . type - name of solver type, for example, superlu, petsc (to use PETSc's default)
3982: - ftype - factor type, MAT_FACTOR_LU, MAT_FACTOR_CHOLESKY, MAT_FACTOR_ICC, MAT_FACTOR_ILU,
3984: Output Parameters:
3985: . f - the factor matrix used with MatXXFactorSymbolic() calls
3987: Notes:
3988: Some PETSc matrix formats have alternative solvers available that are contained in alternative packages
3989: such as pastix, superlu, mumps etc.
3991: PETSc must have been ./configure to use the external solver, using the option --download-package
3993: Level: intermediate
3995: .seealso: MatCopy(), MatDuplicate(), MatGetFactorAvailable()
3996: @*/
3997: PetscErrorCode MatGetFactor(Mat mat, const MatSolverPackage type,MatFactorType ftype,Mat *f)
3998: {
3999: PetscErrorCode ierr,(*conv)(Mat,MatFactorType,Mat*);
4000: char convname[256];
4006: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4007: MatCheckPreallocated(mat,1);
4009: PetscStrcpy(convname,"MatGetFactor_");
4010: PetscStrcat(convname,type);
4011: PetscStrcat(convname,"_C");
4012: PetscObjectQueryFunction((PetscObject)mat,convname,&conv);
4013: if (!conv) {
4014: PetscBool flag;
4015: MPI_Comm comm;
4017: PetscObjectGetComm((PetscObject)mat,&comm);
4018: PetscStrcasecmp(MATSOLVERPETSC,type,&flag);
4019: if (flag) SETERRQ2(comm,PETSC_ERR_SUP,"Matrix format %s does not have a built-in PETSc %s",((PetscObject)mat)->type_name,MatFactorTypes[ftype]);
4020: else SETERRQ4(comm,PETSC_ERR_SUP,"Matrix format %s does not have a solver package %s for %s. Perhaps you must ./configure with --download-%s",((PetscObject)mat)->type_name,type,MatFactorTypes[ftype],type);
4021: }
4022: (*conv)(mat,ftype,f);
4023: return(0);
4024: }
4028: /*@C
4029: MatGetFactorAvailable - Returns a a flag if matrix supports particular package and factor type
4031: Not Collective
4033: Input Parameters:
4034: + mat - the matrix
4035: . type - name of solver type, for example, superlu, petsc (to use PETSc's default)
4036: - ftype - factor type, MAT_FACTOR_LU, MAT_FACTOR_CHOLESKY, MAT_FACTOR_ICC, MAT_FACTOR_ILU,
4038: Output Parameter:
4039: . flg - PETSC_TRUE if the factorization is available
4041: Notes:
4042: Some PETSc matrix formats have alternative solvers available that are contained in alternative packages
4043: such as pastix, superlu, mumps etc.
4045: PETSc must have been ./configure to use the external solver, using the option --download-package
4047: Level: intermediate
4049: .seealso: MatCopy(), MatDuplicate(), MatGetFactor()
4050: @*/
4051: PetscErrorCode MatGetFactorAvailable(Mat mat, const MatSolverPackage type,MatFactorType ftype,PetscBool *flg)
4052: {
4053: PetscErrorCode ierr, (*conv)(Mat,MatFactorType,PetscBool*);
4054: char convname[256];
4060: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4061: MatCheckPreallocated(mat,1);
4063: PetscStrcpy(convname,"MatGetFactorAvailable_");
4064: PetscStrcat(convname,type);
4065: PetscStrcat(convname,"_C");
4066: PetscObjectQueryFunction((PetscObject)mat,convname,&conv);
4067: if (!conv) {
4068: *flg = PETSC_FALSE;
4069: } else {
4070: (*conv)(mat,ftype,flg);
4071: }
4072: return(0);
4073: }
4078: /*@
4079: MatDuplicate - Duplicates a matrix including the non-zero structure.
4081: Collective on Mat
4083: Input Parameters:
4084: + mat - the matrix
4085: - op - either MAT_DO_NOT_COPY_VALUES or MAT_COPY_VALUES, cause it to copy the numerical values in the matrix
4086: MAT_SHARE_NONZERO_PATTERN to share the nonzero patterns with the previous matrix and not copy them.
4088: Output Parameter:
4089: . M - pointer to place new matrix
4091: Level: intermediate
4093: Concepts: matrices^duplicating
4095: Notes: You cannot change the nonzero pattern for the parent or child matrix if you use MAT_SHARE_NONZERO_PATTERN.
4097: .seealso: MatCopy(), MatConvert()
4098: @*/
4099: PetscErrorCode MatDuplicate(Mat mat,MatDuplicateOption op,Mat *M)
4100: {
4102: Mat B;
4103: PetscInt i;
4109: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4110: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4111: MatCheckPreallocated(mat,1);
4113: *M = 0;
4114: if (!mat->ops->duplicate) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Not written for this matrix type");
4115: PetscLogEventBegin(MAT_Convert,mat,0,0,0);
4116: (*mat->ops->duplicate)(mat,op,M);
4117: B = *M;
4119: B->stencil.dim = mat->stencil.dim;
4120: B->stencil.noc = mat->stencil.noc;
4121: for (i=0; i<=mat->stencil.dim; i++) {
4122: B->stencil.dims[i] = mat->stencil.dims[i];
4123: B->stencil.starts[i] = mat->stencil.starts[i];
4124: }
4126: B->nooffproczerorows = mat->nooffproczerorows;
4127: B->nooffprocentries = mat->nooffprocentries;
4129: PetscLogEventEnd(MAT_Convert,mat,0,0,0);
4130: PetscObjectStateIncrease((PetscObject)B);
4131: return(0);
4132: }
4136: /*@
4137: MatGetDiagonal - Gets the diagonal of a matrix.
4139: Logically Collective on Mat and Vec
4141: Input Parameters:
4142: + mat - the matrix
4143: - v - the vector for storing the diagonal
4145: Output Parameter:
4146: . v - the diagonal of the matrix
4148: Level: intermediate
4150: Note:
4151: Currently only correct in parallel for square matrices.
4153: Concepts: matrices^accessing diagonals
4155: .seealso: MatGetRow(), MatGetSubMatrices(), MatGetSubmatrix(), MatGetRowMaxAbs()
4156: @*/
4157: PetscErrorCode MatGetDiagonal(Mat mat,Vec v)
4158: {
4165: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4166: if (!mat->ops->getdiagonal) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4167: MatCheckPreallocated(mat,1);
4169: (*mat->ops->getdiagonal)(mat,v);
4170: PetscObjectStateIncrease((PetscObject)v);
4171: return(0);
4172: }
4176: /*@
4177: MatGetRowMin - Gets the minimum value (of the real part) of each
4178: row of the matrix
4180: Logically Collective on Mat and Vec
4182: Input Parameters:
4183: . mat - the matrix
4185: Output Parameter:
4186: + v - the vector for storing the maximums
4187: - idx - the indices of the column found for each row (optional)
4189: Level: intermediate
4191: Notes: The result of this call are the same as if one converted the matrix to dense format
4192: and found the minimum value in each row (i.e. the implicit zeros are counted as zeros).
4194: This code is only implemented for a couple of matrix formats.
4196: Concepts: matrices^getting row maximums
4198: .seealso: MatGetDiagonal(), MatGetSubMatrices(), MatGetSubmatrix(), MatGetRowMaxAbs(),
4199: MatGetRowMax()
4200: @*/
4201: PetscErrorCode MatGetRowMin(Mat mat,Vec v,PetscInt idx[])
4202: {
4209: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4210: if (!mat->ops->getrowmax) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4211: MatCheckPreallocated(mat,1);
4213: (*mat->ops->getrowmin)(mat,v,idx);
4214: PetscObjectStateIncrease((PetscObject)v);
4215: return(0);
4216: }
4220: /*@
4221: MatGetRowMinAbs - Gets the minimum value (in absolute value) of each
4222: row of the matrix
4224: Logically Collective on Mat and Vec
4226: Input Parameters:
4227: . mat - the matrix
4229: Output Parameter:
4230: + v - the vector for storing the minimums
4231: - idx - the indices of the column found for each row (or NULL if not needed)
4233: Level: intermediate
4235: Notes: if a row is completely empty or has only 0.0 values then the idx[] value for that
4236: row is 0 (the first column).
4238: This code is only implemented for a couple of matrix formats.
4240: Concepts: matrices^getting row maximums
4242: .seealso: MatGetDiagonal(), MatGetSubMatrices(), MatGetSubmatrix(), MatGetRowMax(), MatGetRowMaxAbs(), MatGetRowMin()
4243: @*/
4244: PetscErrorCode MatGetRowMinAbs(Mat mat,Vec v,PetscInt idx[])
4245: {
4252: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4253: if (!mat->ops->getrowminabs) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4254: MatCheckPreallocated(mat,1);
4255: if (idx) {PetscMemzero(idx,mat->rmap->n*sizeof(PetscInt));}
4257: (*mat->ops->getrowminabs)(mat,v,idx);
4258: PetscObjectStateIncrease((PetscObject)v);
4259: return(0);
4260: }
4264: /*@
4265: MatGetRowMax - Gets the maximum value (of the real part) of each
4266: row of the matrix
4268: Logically Collective on Mat and Vec
4270: Input Parameters:
4271: . mat - the matrix
4273: Output Parameter:
4274: + v - the vector for storing the maximums
4275: - idx - the indices of the column found for each row (optional)
4277: Level: intermediate
4279: Notes: The result of this call are the same as if one converted the matrix to dense format
4280: and found the minimum value in each row (i.e. the implicit zeros are counted as zeros).
4282: This code is only implemented for a couple of matrix formats.
4284: Concepts: matrices^getting row maximums
4286: .seealso: MatGetDiagonal(), MatGetSubMatrices(), MatGetSubmatrix(), MatGetRowMaxAbs(), MatGetRowMin()
4287: @*/
4288: PetscErrorCode MatGetRowMax(Mat mat,Vec v,PetscInt idx[])
4289: {
4296: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4297: if (!mat->ops->getrowmax) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4298: MatCheckPreallocated(mat,1);
4300: (*mat->ops->getrowmax)(mat,v,idx);
4301: PetscObjectStateIncrease((PetscObject)v);
4302: return(0);
4303: }
4307: /*@
4308: MatGetRowMaxAbs - Gets the maximum value (in absolute value) of each
4309: row of the matrix
4311: Logically Collective on Mat and Vec
4313: Input Parameters:
4314: . mat - the matrix
4316: Output Parameter:
4317: + v - the vector for storing the maximums
4318: - idx - the indices of the column found for each row (or NULL if not needed)
4320: Level: intermediate
4322: Notes: if a row is completely empty or has only 0.0 values then the idx[] value for that
4323: row is 0 (the first column).
4325: This code is only implemented for a couple of matrix formats.
4327: Concepts: matrices^getting row maximums
4329: .seealso: MatGetDiagonal(), MatGetSubMatrices(), MatGetSubmatrix(), MatGetRowMax(), MatGetRowMin()
4330: @*/
4331: PetscErrorCode MatGetRowMaxAbs(Mat mat,Vec v,PetscInt idx[])
4332: {
4339: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4340: if (!mat->ops->getrowmaxabs) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4341: MatCheckPreallocated(mat,1);
4342: if (idx) {PetscMemzero(idx,mat->rmap->n*sizeof(PetscInt));}
4344: (*mat->ops->getrowmaxabs)(mat,v,idx);
4345: PetscObjectStateIncrease((PetscObject)v);
4346: return(0);
4347: }
4351: /*@
4352: MatGetRowSum - Gets the sum of each row of the matrix
4354: Logically Collective on Mat and Vec
4356: Input Parameters:
4357: . mat - the matrix
4359: Output Parameter:
4360: . v - the vector for storing the sum of rows
4362: Level: intermediate
4364: Notes: This code is slow since it is not currently specialized for different formats
4366: Concepts: matrices^getting row sums
4368: .seealso: MatGetDiagonal(), MatGetSubMatrices(), MatGetSubmatrix(), MatGetRowMax(), MatGetRowMin()
4369: @*/
4370: PetscErrorCode MatGetRowSum(Mat mat, Vec v)
4371: {
4372: PetscInt start = 0, end = 0, row;
4373: PetscScalar *array;
4380: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4381: MatCheckPreallocated(mat,1);
4382: MatGetOwnershipRange(mat, &start, &end);
4383: VecGetArray(v, &array);
4384: for (row = start; row < end; ++row) {
4385: PetscInt ncols, col;
4386: const PetscInt *cols;
4387: const PetscScalar *vals;
4389: array[row - start] = 0.0;
4391: MatGetRow(mat, row, &ncols, &cols, &vals);
4392: for (col = 0; col < ncols; col++) {
4393: array[row - start] += vals[col];
4394: }
4395: MatRestoreRow(mat, row, &ncols, &cols, &vals);
4396: }
4397: VecRestoreArray(v, &array);
4398: PetscObjectStateIncrease((PetscObject) v);
4399: return(0);
4400: }
4404: /*@
4405: MatTranspose - Computes an in-place or out-of-place transpose of a matrix.
4407: Collective on Mat
4409: Input Parameter:
4410: + mat - the matrix to transpose
4411: - reuse - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
4413: Output Parameters:
4414: . B - the transpose
4416: Notes:
4417: If you pass in &mat for B the transpose will be done in place, for example MatTranspose(mat,MAT_REUSE_MATRIX,&mat);
4419: Consider using MatCreateTranspose() instead if you only need a matrix that behaves like the transpose, but don't need the storage to be changed.
4421: Level: intermediate
4423: Concepts: matrices^transposing
4425: .seealso: MatMultTranspose(), MatMultTransposeAdd(), MatIsTranspose(), MatReuse
4426: @*/
4427: PetscErrorCode MatTranspose(Mat mat,MatReuse reuse,Mat *B)
4428: {
4434: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4435: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4436: if (!mat->ops->transpose) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4437: MatCheckPreallocated(mat,1);
4439: PetscLogEventBegin(MAT_Transpose,mat,0,0,0);
4440: (*mat->ops->transpose)(mat,reuse,B);
4441: PetscLogEventEnd(MAT_Transpose,mat,0,0,0);
4442: if (B) {PetscObjectStateIncrease((PetscObject)*B);}
4443: return(0);
4444: }
4448: /*@
4449: MatIsTranspose - Test whether a matrix is another one's transpose,
4450: or its own, in which case it tests symmetry.
4452: Collective on Mat
4454: Input Parameter:
4455: + A - the matrix to test
4456: - B - the matrix to test against, this can equal the first parameter
4458: Output Parameters:
4459: . flg - the result
4461: Notes:
4462: Only available for SeqAIJ/MPIAIJ matrices. The sequential algorithm
4463: has a running time of the order of the number of nonzeros; the parallel
4464: test involves parallel copies of the block-offdiagonal parts of the matrix.
4466: Level: intermediate
4468: Concepts: matrices^transposing, matrix^symmetry
4470: .seealso: MatTranspose(), MatIsSymmetric(), MatIsHermitian()
4471: @*/
4472: PetscErrorCode MatIsTranspose(Mat A,Mat B,PetscReal tol,PetscBool *flg)
4473: {
4474: PetscErrorCode ierr,(*f)(Mat,Mat,PetscReal,PetscBool*),(*g)(Mat,Mat,PetscReal,PetscBool*);
4480: PetscObjectQueryFunction((PetscObject)A,"MatIsTranspose_C",&f);
4481: PetscObjectQueryFunction((PetscObject)B,"MatIsTranspose_C",&g);
4482: *flg = PETSC_FALSE;
4483: if (f && g) {
4484: if (f == g) {
4485: (*f)(A,B,tol,flg);
4486: } else SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_NOTSAMETYPE,"Matrices do not have the same comparator for symmetry test");
4487: } else {
4488: MatType mattype;
4489: if (!f) {
4490: MatGetType(A,&mattype);
4491: } else {
4492: MatGetType(B,&mattype);
4493: }
4494: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for transpose",mattype);
4495: }
4496: return(0);
4497: }
4501: /*@
4502: MatHermitianTranspose - Computes an in-place or out-of-place transpose of a matrix in complex conjugate.
4504: Collective on Mat
4506: Input Parameter:
4507: + mat - the matrix to transpose and complex conjugate
4508: - reuse - store the transpose matrix in the provided B
4510: Output Parameters:
4511: . B - the Hermitian
4513: Notes:
4514: If you pass in &mat for B the Hermitian will be done in place
4516: Level: intermediate
4518: Concepts: matrices^transposing, complex conjugatex
4520: .seealso: MatTranspose(), MatMultTranspose(), MatMultTransposeAdd(), MatIsTranspose(), MatReuse
4521: @*/
4522: PetscErrorCode MatHermitianTranspose(Mat mat,MatReuse reuse,Mat *B)
4523: {
4527: MatTranspose(mat,reuse,B);
4528: #if defined(PETSC_USE_COMPLEX)
4529: MatConjugate(*B);
4530: #endif
4531: return(0);
4532: }
4536: /*@
4537: MatIsHermitianTranspose - Test whether a matrix is another one's Hermitian transpose,
4539: Collective on Mat
4541: Input Parameter:
4542: + A - the matrix to test
4543: - B - the matrix to test against, this can equal the first parameter
4545: Output Parameters:
4546: . flg - the result
4548: Notes:
4549: Only available for SeqAIJ/MPIAIJ matrices. The sequential algorithm
4550: has a running time of the order of the number of nonzeros; the parallel
4551: test involves parallel copies of the block-offdiagonal parts of the matrix.
4553: Level: intermediate
4555: Concepts: matrices^transposing, matrix^symmetry
4557: .seealso: MatTranspose(), MatIsSymmetric(), MatIsHermitian(), MatIsTranspose()
4558: @*/
4559: PetscErrorCode MatIsHermitianTranspose(Mat A,Mat B,PetscReal tol,PetscBool *flg)
4560: {
4561: PetscErrorCode ierr,(*f)(Mat,Mat,PetscReal,PetscBool*),(*g)(Mat,Mat,PetscReal,PetscBool*);
4567: PetscObjectQueryFunction((PetscObject)A,"MatIsHermitianTranspose_C",&f);
4568: PetscObjectQueryFunction((PetscObject)B,"MatIsHermitianTranspose_C",&g);
4569: if (f && g) {
4570: if (f==g) {
4571: (*f)(A,B,tol,flg);
4572: } else SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_NOTSAMETYPE,"Matrices do not have the same comparator for Hermitian test");
4573: }
4574: return(0);
4575: }
4579: /*@
4580: MatPermute - Creates a new matrix with rows and columns permuted from the
4581: original.
4583: Collective on Mat
4585: Input Parameters:
4586: + mat - the matrix to permute
4587: . row - row permutation, each processor supplies only the permutation for its rows
4588: - col - column permutation, each processor supplies only the permutation for its columns
4590: Output Parameters:
4591: . B - the permuted matrix
4593: Level: advanced
4595: Note:
4596: The index sets map from row/col of permuted matrix to row/col of original matrix.
4597: The index sets should be on the same communicator as Mat and have the same local sizes.
4599: Concepts: matrices^permuting
4601: .seealso: MatGetOrdering(), ISAllGather()
4603: @*/
4604: PetscErrorCode MatPermute(Mat mat,IS row,IS col,Mat *B)
4605: {
4614: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4615: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4616: if (!mat->ops->permute) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"MatPermute not available for Mat type %s",((PetscObject)mat)->type_name);
4617: MatCheckPreallocated(mat,1);
4619: (*mat->ops->permute)(mat,row,col,B);
4620: PetscObjectStateIncrease((PetscObject)*B);
4621: return(0);
4622: }
4626: /*@
4627: MatEqual - Compares two matrices.
4629: Collective on Mat
4631: Input Parameters:
4632: + A - the first matrix
4633: - B - the second matrix
4635: Output Parameter:
4636: . flg - PETSC_TRUE if the matrices are equal; PETSC_FALSE otherwise.
4638: Level: intermediate
4640: Concepts: matrices^equality between
4641: @*/
4642: PetscErrorCode MatEqual(Mat A,Mat B,PetscBool *flg)
4643: {
4653: MatCheckPreallocated(B,2);
4654: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4655: if (!B->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4656: if (A->rmap->N != B->rmap->N || A->cmap->N != B->cmap->N) SETERRQ4(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat B: global dim %D %D %D %D",A->rmap->N,B->rmap->N,A->cmap->N,B->cmap->N);
4657: if (!A->ops->equal) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Mat type %s",((PetscObject)A)->type_name);
4658: if (!B->ops->equal) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Mat type %s",((PetscObject)B)->type_name);
4659: if (A->ops->equal != B->ops->equal) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"A is type: %s\nB is type: %s",((PetscObject)A)->type_name,((PetscObject)B)->type_name);
4660: MatCheckPreallocated(A,1);
4662: (*A->ops->equal)(A,B,flg);
4663: return(0);
4664: }
4668: /*@
4669: MatDiagonalScale - Scales a matrix on the left and right by diagonal
4670: matrices that are stored as vectors. Either of the two scaling
4671: matrices can be NULL.
4673: Collective on Mat
4675: Input Parameters:
4676: + mat - the matrix to be scaled
4677: . l - the left scaling vector (or NULL)
4678: - r - the right scaling vector (or NULL)
4680: Notes:
4681: MatDiagonalScale() computes A = LAR, where
4682: L = a diagonal matrix (stored as a vector), R = a diagonal matrix (stored as a vector)
4683: The L scales the rows of the matrix, the R scales the columns of the matrix.
4685: Level: intermediate
4687: Concepts: matrices^diagonal scaling
4688: Concepts: diagonal scaling of matrices
4690: .seealso: MatScale()
4691: @*/
4692: PetscErrorCode MatDiagonalScale(Mat mat,Vec l,Vec r)
4693: {
4699: if (!mat->ops->diagonalscale) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4702: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4703: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4704: MatCheckPreallocated(mat,1);
4706: PetscLogEventBegin(MAT_Scale,mat,0,0,0);
4707: (*mat->ops->diagonalscale)(mat,l,r);
4708: PetscLogEventEnd(MAT_Scale,mat,0,0,0);
4709: PetscObjectStateIncrease((PetscObject)mat);
4710: #if defined(PETSC_HAVE_CUSP)
4711: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
4712: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
4713: }
4714: #endif
4715: #if defined(PETSC_HAVE_VIENNACL)
4716: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
4717: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
4718: }
4719: #endif
4720: return(0);
4721: }
4725: /*@
4726: MatScale - Scales all elements of a matrix by a given number.
4728: Logically Collective on Mat
4730: Input Parameters:
4731: + mat - the matrix to be scaled
4732: - a - the scaling value
4734: Output Parameter:
4735: . mat - the scaled matrix
4737: Level: intermediate
4739: Concepts: matrices^scaling all entries
4741: .seealso: MatDiagonalScale()
4742: @*/
4743: PetscErrorCode MatScale(Mat mat,PetscScalar a)
4744: {
4750: if (a != (PetscScalar)1.0 && !mat->ops->scale) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4751: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4752: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4754: MatCheckPreallocated(mat,1);
4756: PetscLogEventBegin(MAT_Scale,mat,0,0,0);
4757: if (a != (PetscScalar)1.0) {
4758: (*mat->ops->scale)(mat,a);
4759: PetscObjectStateIncrease((PetscObject)mat);
4760: }
4761: PetscLogEventEnd(MAT_Scale,mat,0,0,0);
4762: #if defined(PETSC_HAVE_CUSP)
4763: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
4764: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
4765: }
4766: #endif
4767: #if defined(PETSC_HAVE_VIENNACL)
4768: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
4769: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
4770: }
4771: #endif
4772: return(0);
4773: }
4777: /*@
4778: MatNorm - Calculates various norms of a matrix.
4780: Collective on Mat
4782: Input Parameters:
4783: + mat - the matrix
4784: - type - the type of norm, NORM_1, NORM_FROBENIUS, NORM_INFINITY
4786: Output Parameters:
4787: . nrm - the resulting norm
4789: Level: intermediate
4791: Concepts: matrices^norm
4792: Concepts: norm^of matrix
4793: @*/
4794: PetscErrorCode MatNorm(Mat mat,NormType type,PetscReal *nrm)
4795: {
4803: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4804: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4805: if (!mat->ops->norm) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4806: MatCheckPreallocated(mat,1);
4808: (*mat->ops->norm)(mat,type,nrm);
4809: return(0);
4810: }
4812: /*
4813: This variable is used to prevent counting of MatAssemblyBegin() that
4814: are called from within a MatAssemblyEnd().
4815: */
4816: static PetscInt MatAssemblyEnd_InUse = 0;
4819: /*@
4820: MatAssemblyBegin - Begins assembling the matrix. This routine should
4821: be called after completing all calls to MatSetValues().
4823: Collective on Mat
4825: Input Parameters:
4826: + mat - the matrix
4827: - type - type of assembly, either MAT_FLUSH_ASSEMBLY or MAT_FINAL_ASSEMBLY
4829: Notes:
4830: MatSetValues() generally caches the values. The matrix is ready to
4831: use only after MatAssemblyBegin() and MatAssemblyEnd() have been called.
4832: Use MAT_FLUSH_ASSEMBLY when switching between ADD_VALUES and INSERT_VALUES
4833: in MatSetValues(); use MAT_FINAL_ASSEMBLY for the final assembly before
4834: using the matrix.
4836: ALL processes that share a matrix MUST call MatAssemblyBegin() and MatAssemblyEnd() the SAME NUMBER of times, and each time with the
4837: same flag of MAT_FLUSH_ASSEMBLY or MAT_FINAL_ASSEMBLY for all processes. Thus you CANNOT locally change from ADD_VALUES to INSERT_VALUES, that is
4838: a global collective operation requring all processes that share the matrix.
4840: Space for preallocated nonzeros that is not filled by a call to MatSetValues() or a related routine are compressed
4841: out by assembly. If you intend to use that extra space on a subsequent assembly, be sure to insert explicit zeros
4842: before MAT_FINAL_ASSEMBLY so the space is not compressed out.
4844: Level: beginner
4846: Concepts: matrices^assembling
4848: .seealso: MatAssemblyEnd(), MatSetValues(), MatAssembled()
4849: @*/
4850: PetscErrorCode MatAssemblyBegin(Mat mat,MatAssemblyType type)
4851: {
4857: MatCheckPreallocated(mat,1);
4858: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix.\nDid you forget to call MatSetUnfactored()?");
4859: if (mat->assembled) {
4860: mat->was_assembled = PETSC_TRUE;
4861: mat->assembled = PETSC_FALSE;
4862: }
4863: if (!MatAssemblyEnd_InUse) {
4864: PetscLogEventBegin(MAT_AssemblyBegin,mat,0,0,0);
4865: if (mat->ops->assemblybegin) {(*mat->ops->assemblybegin)(mat,type);}
4866: PetscLogEventEnd(MAT_AssemblyBegin,mat,0,0,0);
4867: } else if (mat->ops->assemblybegin) {
4868: (*mat->ops->assemblybegin)(mat,type);
4869: }
4870: return(0);
4871: }
4875: /*@
4876: MatAssembled - Indicates if a matrix has been assembled and is ready for
4877: use; for example, in matrix-vector product.
4879: Not Collective
4881: Input Parameter:
4882: . mat - the matrix
4884: Output Parameter:
4885: . assembled - PETSC_TRUE or PETSC_FALSE
4887: Level: advanced
4889: Concepts: matrices^assembled?
4891: .seealso: MatAssemblyEnd(), MatSetValues(), MatAssemblyBegin()
4892: @*/
4893: PetscErrorCode MatAssembled(Mat mat,PetscBool *assembled)
4894: {
4899: *assembled = mat->assembled;
4900: return(0);
4901: }
4905: /*@
4906: MatAssemblyEnd - Completes assembling the matrix. This routine should
4907: be called after MatAssemblyBegin().
4909: Collective on Mat
4911: Input Parameters:
4912: + mat - the matrix
4913: - type - type of assembly, either MAT_FLUSH_ASSEMBLY or MAT_FINAL_ASSEMBLY
4915: Options Database Keys:
4916: + -mat_view ::ascii_info - Prints info on matrix at conclusion of MatEndAssembly()
4917: . -mat_view ::ascii_info_detail - Prints more detailed info
4918: . -mat_view - Prints matrix in ASCII format
4919: . -mat_view ::ascii_matlab - Prints matrix in Matlab format
4920: . -mat_view draw - PetscDraws nonzero structure of matrix, using MatView() and PetscDrawOpenX().
4921: . -display <name> - Sets display name (default is host)
4922: . -draw_pause <sec> - Sets number of seconds to pause after display
4923: . -mat_view socket - Sends matrix to socket, can be accessed from Matlab (See the <a href="../../docs/manual.pdf">users manual</a>)
4924: . -viewer_socket_machine <machine>
4925: . -viewer_socket_port <port>
4926: . -mat_view binary - save matrix to file in binary format
4927: - -viewer_binary_filename <name>
4929: Notes:
4930: MatSetValues() generally caches the values. The matrix is ready to
4931: use only after MatAssemblyBegin() and MatAssemblyEnd() have been called.
4932: Use MAT_FLUSH_ASSEMBLY when switching between ADD_VALUES and INSERT_VALUES
4933: in MatSetValues(); use MAT_FINAL_ASSEMBLY for the final assembly before
4934: using the matrix.
4936: Space for preallocated nonzeros that is not filled by a call to MatSetValues() or a related routine are compressed
4937: out by assembly. If you intend to use that extra space on a subsequent assembly, be sure to insert explicit zeros
4938: before MAT_FINAL_ASSEMBLY so the space is not compressed out.
4940: Level: beginner
4942: .seealso: MatAssemblyBegin(), MatSetValues(), PetscDrawOpenX(), PetscDrawCreate(), MatView(), MatAssembled(), PetscViewerSocketOpen()
4943: @*/
4944: PetscErrorCode MatAssemblyEnd(Mat mat,MatAssemblyType type)
4945: {
4946: PetscErrorCode ierr;
4947: static PetscInt inassm = 0;
4948: PetscBool flg = PETSC_FALSE;
4954: inassm++;
4955: MatAssemblyEnd_InUse++;
4956: if (MatAssemblyEnd_InUse == 1) { /* Do the logging only the first time through */
4957: PetscLogEventBegin(MAT_AssemblyEnd,mat,0,0,0);
4958: if (mat->ops->assemblyend) {
4959: (*mat->ops->assemblyend)(mat,type);
4960: }
4961: PetscLogEventEnd(MAT_AssemblyEnd,mat,0,0,0);
4962: } else if (mat->ops->assemblyend) {
4963: (*mat->ops->assemblyend)(mat,type);
4964: }
4966: /* Flush assembly is not a true assembly */
4967: if (type != MAT_FLUSH_ASSEMBLY) {
4968: mat->assembled = PETSC_TRUE; mat->num_ass++;
4969: }
4970: mat->insertmode = NOT_SET_VALUES;
4971: MatAssemblyEnd_InUse--;
4972: PetscObjectStateIncrease((PetscObject)mat);
4973: if (!mat->symmetric_eternal) {
4974: mat->symmetric_set = PETSC_FALSE;
4975: mat->hermitian_set = PETSC_FALSE;
4976: mat->structurally_symmetric_set = PETSC_FALSE;
4977: }
4978: #if defined(PETSC_HAVE_CUSP)
4979: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
4980: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
4981: }
4982: #endif
4983: #if defined(PETSC_HAVE_VIENNACL)
4984: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
4985: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
4986: }
4987: #endif
4988: if (inassm == 1 && type != MAT_FLUSH_ASSEMBLY) {
4989: MatViewFromOptions(mat,NULL,"-mat_view");
4991: if (mat->checksymmetryonassembly) {
4992: MatIsSymmetric(mat,mat->checksymmetrytol,&flg);
4993: if (flg) {
4994: PetscPrintf(PetscObjectComm((PetscObject)mat),"Matrix is symmetric (tolerance %g)\n",(double)mat->checksymmetrytol);
4995: } else {
4996: PetscPrintf(PetscObjectComm((PetscObject)mat),"Matrix is not symmetric (tolerance %g)\n",(double)mat->checksymmetrytol);
4997: }
4998: }
4999: if (mat->nullsp && mat->checknullspaceonassembly) {
5000: MatNullSpaceTest(mat->nullsp,mat,NULL);
5001: }
5002: }
5003: inassm--;
5004: return(0);
5005: }
5009: /*@
5010: MatSetOption - Sets a parameter option for a matrix. Some options
5011: may be specific to certain storage formats. Some options
5012: determine how values will be inserted (or added). Sorted,
5013: row-oriented input will generally assemble the fastest. The default
5014: is row-oriented.
5016: Logically Collective on Mat for certain operations, such as MAT_SPD, not collective for MAT_ROW_ORIENTED, see MatOption
5018: Input Parameters:
5019: + mat - the matrix
5020: . option - the option, one of those listed below (and possibly others),
5021: - flg - turn the option on (PETSC_TRUE) or off (PETSC_FALSE)
5023: Options Describing Matrix Structure:
5024: + MAT_SPD - symmetric positive definite
5025: - MAT_SYMMETRIC - symmetric in terms of both structure and value
5026: . MAT_HERMITIAN - transpose is the complex conjugation
5027: . MAT_STRUCTURALLY_SYMMETRIC - symmetric nonzero structure
5028: - MAT_SYMMETRY_ETERNAL - if you would like the symmetry/Hermitian flag
5029: you set to be kept with all future use of the matrix
5030: including after MatAssemblyBegin/End() which could
5031: potentially change the symmetry structure, i.e. you
5032: KNOW the matrix will ALWAYS have the property you set.
5035: Options For Use with MatSetValues():
5036: Insert a logically dense subblock, which can be
5037: . MAT_ROW_ORIENTED - row-oriented (default)
5039: Note these options reflect the data you pass in with MatSetValues(); it has
5040: nothing to do with how the data is stored internally in the matrix
5041: data structure.
5043: When (re)assembling a matrix, we can restrict the input for
5044: efficiency/debugging purposes. These options include
5045: + MAT_NEW_NONZERO_LOCATIONS - additional insertions will be
5046: allowed if they generate a new nonzero
5047: . MAT_NEW_DIAGONALS - new diagonals will be allowed (for block diagonal format only)
5048: . MAT_IGNORE_OFF_PROC_ENTRIES - drops off-processor entries
5049: . MAT_NEW_NONZERO_LOCATION_ERR - generates an error for new matrix entry
5050: . MAT_USE_HASH_TABLE - uses a hash table to speed up matrix assembly
5051: + MAT_NO_OFF_PROC_ENTRIES - you know each process will only set values for its own rows, will generate an error if
5052: any process sets values for another process. This avoids all reductions in the MatAssembly routines and thus improves
5053: performance for very large process counts.
5055: Notes:
5056: Some options are relevant only for particular matrix types and
5057: are thus ignored by others. Other options are not supported by
5058: certain matrix types and will generate an error message if set.
5060: If using a Fortran 77 module to compute a matrix, one may need to
5061: use the column-oriented option (or convert to the row-oriented
5062: format).
5064: MAT_NEW_NONZERO_LOCATIONS set to PETSC_FALSE indicates that any add or insertion
5065: that would generate a new entry in the nonzero structure is instead
5066: ignored. Thus, if memory has not alredy been allocated for this particular
5067: data, then the insertion is ignored. For dense matrices, in which
5068: the entire array is allocated, no entries are ever ignored.
5069: Set after the first MatAssemblyEnd()
5071: MAT_NEW_NONZERO_LOCATION_ERR indicates that any add or insertion
5072: that would generate a new entry in the nonzero structure instead produces
5073: an error. (Currently supported for AIJ and BAIJ formats only.)
5074: This is a useful flag when using SAME_NONZERO_PATTERN in calling
5075: KSPSetOperators() to ensure that the nonzero pattern truely does
5076: remain unchanged. Set after the first MatAssemblyEnd()
5078: MAT_NEW_NONZERO_ALLOCATION_ERR indicates that any add or insertion
5079: that would generate a new entry that has not been preallocated will
5080: instead produce an error. (Currently supported for AIJ and BAIJ formats
5081: only.) This is a useful flag when debugging matrix memory preallocation.
5083: MAT_IGNORE_OFF_PROC_ENTRIES indicates entries destined for
5084: other processors should be dropped, rather than stashed.
5085: This is useful if you know that the "owning" processor is also
5086: always generating the correct matrix entries, so that PETSc need
5087: not transfer duplicate entries generated on another processor.
5089: MAT_USE_HASH_TABLE indicates that a hash table be used to improve the
5090: searches during matrix assembly. When this flag is set, the hash table
5091: is created during the first Matrix Assembly. This hash table is
5092: used the next time through, during MatSetVaules()/MatSetVaulesBlocked()
5093: to improve the searching of indices. MAT_NEW_NONZERO_LOCATIONS flag
5094: should be used with MAT_USE_HASH_TABLE flag. This option is currently
5095: supported by MATMPIBAIJ format only.
5097: MAT_KEEP_NONZERO_PATTERN indicates when MatZeroRows() is called the zeroed entries
5098: are kept in the nonzero structure
5100: MAT_IGNORE_ZERO_ENTRIES - for AIJ/IS matrices this will stop zero values from creating
5101: a zero location in the matrix
5103: MAT_USE_INODES - indicates using inode version of the code - works with AIJ and
5104: ROWBS matrix types
5106: MAT_NO_OFF_PROC_ZERO_ROWS - you know each process will only zero its own rows. This avoids all reductions in the
5107: zero row routines and thus improves performance for very large process counts.
5109: MAT_IGNORE_LOWER_TRIANGULAR - For SBAIJ matrices will ignore any insertions you make in the lower triangular
5110: part of the matrix (since they should match the upper triangular part).
5112: Notes: Can only be called after MatSetSizes() and MatSetType() have been set.
5114: Level: intermediate
5116: Concepts: matrices^setting options
5118: .seealso: MatOption, Mat
5120: @*/
5121: PetscErrorCode MatSetOption(Mat mat,MatOption op,PetscBool flg)
5122: {
5128: if (op > 0) {
5131: }
5133: if (((int) op) <= MAT_OPTION_MIN || ((int) op) >= MAT_OPTION_MAX) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Options %d is out of range",(int)op);
5134: if (!((PetscObject)mat)->type_name) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_TYPENOTSET,"Cannot set options until type and size have been set, see MatSetType() and MatSetSizes()");
5136: switch (op) {
5137: case MAT_NO_OFF_PROC_ENTRIES:
5138: mat->nooffprocentries = flg;
5139: return(0);
5140: break;
5141: case MAT_NO_OFF_PROC_ZERO_ROWS:
5142: mat->nooffproczerorows = flg;
5143: return(0);
5144: break;
5145: case MAT_SPD:
5146: mat->spd_set = PETSC_TRUE;
5147: mat->spd = flg;
5148: if (flg) {
5149: mat->symmetric = PETSC_TRUE;
5150: mat->structurally_symmetric = PETSC_TRUE;
5151: mat->symmetric_set = PETSC_TRUE;
5152: mat->structurally_symmetric_set = PETSC_TRUE;
5153: }
5154: break;
5155: case MAT_SYMMETRIC:
5156: mat->symmetric = flg;
5157: if (flg) mat->structurally_symmetric = PETSC_TRUE;
5158: mat->symmetric_set = PETSC_TRUE;
5159: mat->structurally_symmetric_set = flg;
5160: break;
5161: case MAT_HERMITIAN:
5162: mat->hermitian = flg;
5163: if (flg) mat->structurally_symmetric = PETSC_TRUE;
5164: mat->hermitian_set = PETSC_TRUE;
5165: mat->structurally_symmetric_set = flg;
5166: break;
5167: case MAT_STRUCTURALLY_SYMMETRIC:
5168: mat->structurally_symmetric = flg;
5169: mat->structurally_symmetric_set = PETSC_TRUE;
5170: break;
5171: case MAT_SYMMETRY_ETERNAL:
5172: mat->symmetric_eternal = flg;
5173: break;
5174: default:
5175: break;
5176: }
5177: if (mat->ops->setoption) {
5178: (*mat->ops->setoption)(mat,op,flg);
5179: }
5180: return(0);
5181: }
5185: /*@
5186: MatZeroEntries - Zeros all entries of a matrix. For sparse matrices
5187: this routine retains the old nonzero structure.
5189: Logically Collective on Mat
5191: Input Parameters:
5192: . mat - the matrix
5194: Level: intermediate
5196: Notes: If the matrix was not preallocated then a default, likely poor preallocation will be set in the matrix, so this should be called after the preallocation phase.
5197: See the Performance chapter of the users manual for information on preallocating matrices.
5199: Concepts: matrices^zeroing
5201: .seealso: MatZeroRows()
5202: @*/
5203: PetscErrorCode MatZeroEntries(Mat mat)
5204: {
5210: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5211: if (mat->insertmode != NOT_SET_VALUES) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for matrices where you have set values but not yet assembled");
5212: if (!mat->ops->zeroentries) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
5213: MatCheckPreallocated(mat,1);
5215: PetscLogEventBegin(MAT_ZeroEntries,mat,0,0,0);
5216: (*mat->ops->zeroentries)(mat);
5217: PetscLogEventEnd(MAT_ZeroEntries,mat,0,0,0);
5218: PetscObjectStateIncrease((PetscObject)mat);
5219: #if defined(PETSC_HAVE_CUSP)
5220: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
5221: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
5222: }
5223: #endif
5224: #if defined(PETSC_HAVE_VIENNACL)
5225: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
5226: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
5227: }
5228: #endif
5229: return(0);
5230: }
5234: /*@C
5235: MatZeroRowsColumns - Zeros all entries (except possibly the main diagonal)
5236: of a set of rows and columns of a matrix.
5238: Collective on Mat
5240: Input Parameters:
5241: + mat - the matrix
5242: . numRows - the number of rows to remove
5243: . rows - the global row indices
5244: . diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5245: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5246: - b - optional vector of right hand side, that will be adjusted by provided solution
5248: Notes:
5249: This does not change the nonzero structure of the matrix, it merely zeros those entries in the matrix.
5251: The user can set a value in the diagonal entry (or for the AIJ and
5252: row formats can optionally remove the main diagonal entry from the
5253: nonzero structure as well, by passing 0.0 as the final argument).
5255: For the parallel case, all processes that share the matrix (i.e.,
5256: those in the communicator used for matrix creation) MUST call this
5257: routine, regardless of whether any rows being zeroed are owned by
5258: them.
5260: Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5261: list only rows local to itself).
5263: The option MAT_NO_OFF_PROC_ZERO_ROWS does not apply to this routine.
5265: Level: intermediate
5267: Concepts: matrices^zeroing rows
5269: .seealso: MatZeroRowsIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(), MatZeroRowsColumnsIS()
5270: @*/
5271: PetscErrorCode MatZeroRowsColumns(Mat mat,PetscInt numRows,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
5272: {
5279: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5280: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5281: if (!mat->ops->zerorowscolumns) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
5282: MatCheckPreallocated(mat,1);
5284: (*mat->ops->zerorowscolumns)(mat,numRows,rows,diag,x,b);
5285: MatViewFromOptions(mat,NULL,"-mat_view");
5286: PetscObjectStateIncrease((PetscObject)mat);
5287: #if defined(PETSC_HAVE_CUSP)
5288: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
5289: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
5290: }
5291: #endif
5292: #if defined(PETSC_HAVE_VIENNACL)
5293: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
5294: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
5295: }
5296: #endif
5297: return(0);
5298: }
5302: /*@C
5303: MatZeroRowsColumnsIS - Zeros all entries (except possibly the main diagonal)
5304: of a set of rows and columns of a matrix.
5306: Collective on Mat
5308: Input Parameters:
5309: + mat - the matrix
5310: . is - the rows to zero
5311: . diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5312: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5313: - b - optional vector of right hand side, that will be adjusted by provided solution
5315: Notes:
5316: This does not change the nonzero structure of the matrix, it merely zeros those entries in the matrix.
5318: The user can set a value in the diagonal entry (or for the AIJ and
5319: row formats can optionally remove the main diagonal entry from the
5320: nonzero structure as well, by passing 0.0 as the final argument).
5322: For the parallel case, all processes that share the matrix (i.e.,
5323: those in the communicator used for matrix creation) MUST call this
5324: routine, regardless of whether any rows being zeroed are owned by
5325: them.
5327: Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5328: list only rows local to itself).
5330: The option MAT_NO_OFF_PROC_ZERO_ROWS does not apply to this routine.
5332: Level: intermediate
5334: Concepts: matrices^zeroing rows
5336: .seealso: MatZeroRowsIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(), MatZeroRowsColumns()
5337: @*/
5338: PetscErrorCode MatZeroRowsColumnsIS(Mat mat,IS is,PetscScalar diag,Vec x,Vec b)
5339: {
5341: PetscInt numRows;
5342: const PetscInt *rows;
5349: ISGetLocalSize(is,&numRows);
5350: ISGetIndices(is,&rows);
5351: MatZeroRowsColumns(mat,numRows,rows,diag,x,b);
5352: ISRestoreIndices(is,&rows);
5353: return(0);
5354: }
5358: /*@C
5359: MatZeroRows - Zeros all entries (except possibly the main diagonal)
5360: of a set of rows of a matrix.
5362: Collective on Mat
5364: Input Parameters:
5365: + mat - the matrix
5366: . numRows - the number of rows to remove
5367: . rows - the global row indices
5368: . diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5369: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5370: - b - optional vector of right hand side, that will be adjusted by provided solution
5372: Notes:
5373: For the AIJ and BAIJ matrix formats this removes the old nonzero structure,
5374: but does not release memory. For the dense and block diagonal
5375: formats this does not alter the nonzero structure.
5377: If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5378: of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5379: merely zeroed.
5381: The user can set a value in the diagonal entry (or for the AIJ and
5382: row formats can optionally remove the main diagonal entry from the
5383: nonzero structure as well, by passing 0.0 as the final argument).
5385: For the parallel case, all processes that share the matrix (i.e.,
5386: those in the communicator used for matrix creation) MUST call this
5387: routine, regardless of whether any rows being zeroed are owned by
5388: them.
5390: Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5391: list only rows local to itself).
5393: You can call MatSetOption(mat,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) if each process indicates only rows it
5394: owns that are to be zeroed. This saves a global synchronization in the implementation.
5396: Level: intermediate
5398: Concepts: matrices^zeroing rows
5400: .seealso: MatZeroRowsIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption()
5401: @*/
5402: PetscErrorCode MatZeroRows(Mat mat,PetscInt numRows,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
5403: {
5410: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5411: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5412: if (!mat->ops->zerorows) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
5413: MatCheckPreallocated(mat,1);
5415: (*mat->ops->zerorows)(mat,numRows,rows,diag,x,b);
5416: MatViewFromOptions(mat,NULL,"-mat_view");
5417: PetscObjectStateIncrease((PetscObject)mat);
5418: #if defined(PETSC_HAVE_CUSP)
5419: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
5420: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
5421: }
5422: #endif
5423: #if defined(PETSC_HAVE_VIENNACL)
5424: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
5425: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
5426: }
5427: #endif
5428: return(0);
5429: }
5433: /*@C
5434: MatZeroRowsIS - Zeros all entries (except possibly the main diagonal)
5435: of a set of rows of a matrix.
5437: Collective on Mat
5439: Input Parameters:
5440: + mat - the matrix
5441: . is - index set of rows to remove
5442: . diag - value put in all diagonals of eliminated rows
5443: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5444: - b - optional vector of right hand side, that will be adjusted by provided solution
5446: Notes:
5447: For the AIJ and BAIJ matrix formats this removes the old nonzero structure,
5448: but does not release memory. For the dense and block diagonal
5449: formats this does not alter the nonzero structure.
5451: If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5452: of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5453: merely zeroed.
5455: The user can set a value in the diagonal entry (or for the AIJ and
5456: row formats can optionally remove the main diagonal entry from the
5457: nonzero structure as well, by passing 0.0 as the final argument).
5459: For the parallel case, all processes that share the matrix (i.e.,
5460: those in the communicator used for matrix creation) MUST call this
5461: routine, regardless of whether any rows being zeroed are owned by
5462: them.
5464: Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5465: list only rows local to itself).
5467: You can call MatSetOption(mat,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) if each process indicates only rows it
5468: owns that are to be zeroed. This saves a global synchronization in the implementation.
5470: Level: intermediate
5472: Concepts: matrices^zeroing rows
5474: .seealso: MatZeroRows(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption()
5475: @*/
5476: PetscErrorCode MatZeroRowsIS(Mat mat,IS is,PetscScalar diag,Vec x,Vec b)
5477: {
5478: PetscInt numRows;
5479: const PetscInt *rows;
5486: ISGetLocalSize(is,&numRows);
5487: ISGetIndices(is,&rows);
5488: MatZeroRows(mat,numRows,rows,diag,x,b);
5489: ISRestoreIndices(is,&rows);
5490: return(0);
5491: }
5495: /*@C
5496: MatZeroRowsStencil - Zeros all entries (except possibly the main diagonal)
5497: of a set of rows of a matrix. These rows must be local to the process.
5499: Collective on Mat
5501: Input Parameters:
5502: + mat - the matrix
5503: . numRows - the number of rows to remove
5504: . rows - the grid coordinates (and component number when dof > 1) for matrix rows
5505: . diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5506: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5507: - b - optional vector of right hand side, that will be adjusted by provided solution
5509: Notes:
5510: For the AIJ and BAIJ matrix formats this removes the old nonzero structure,
5511: but does not release memory. For the dense and block diagonal
5512: formats this does not alter the nonzero structure.
5514: If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5515: of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5516: merely zeroed.
5518: The user can set a value in the diagonal entry (or for the AIJ and
5519: row formats can optionally remove the main diagonal entry from the
5520: nonzero structure as well, by passing 0.0 as the final argument).
5522: For the parallel case, all processes that share the matrix (i.e.,
5523: those in the communicator used for matrix creation) MUST call this
5524: routine, regardless of whether any rows being zeroed are owned by
5525: them.
5527: Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5528: list only rows local to itself).
5530: The grid coordinates are across the entire grid, not just the local portion
5532: In Fortran idxm and idxn should be declared as
5533: $ MatStencil idxm(4,m)
5534: and the values inserted using
5535: $ idxm(MatStencil_i,1) = i
5536: $ idxm(MatStencil_j,1) = j
5537: $ idxm(MatStencil_k,1) = k
5538: $ idxm(MatStencil_c,1) = c
5539: etc
5541: For periodic boundary conditions use negative indices for values to the left (below 0; that are to be
5542: obtained by wrapping values from right edge). For values to the right of the last entry using that index plus one
5543: etc to obtain values that obtained by wrapping the values from the left edge. This does not work for anything but the
5544: DMDA_BOUNDARY_PERIODIC boundary type.
5546: For indices that don't mean anything for your case (like the k index when working in 2d) or the c index when you have
5547: a single value per point) you can skip filling those indices.
5549: Level: intermediate
5551: Concepts: matrices^zeroing rows
5553: .seealso: MatZeroRows(), MatZeroRowsIS(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption()
5554: @*/
5555: PetscErrorCode MatZeroRowsStencil(Mat mat,PetscInt numRows,const MatStencil rows[],PetscScalar diag,Vec x,Vec b)
5556: {
5557: PetscInt dim = mat->stencil.dim;
5558: PetscInt sdim = dim - (1 - (PetscInt) mat->stencil.noc);
5559: PetscInt *dims = mat->stencil.dims+1;
5560: PetscInt *starts = mat->stencil.starts;
5561: PetscInt *dxm = (PetscInt*) rows;
5562: PetscInt *jdxm, i, j, tmp, numNewRows = 0;
5570: PetscMalloc1(numRows, &jdxm);
5571: for (i = 0; i < numRows; ++i) {
5572: /* Skip unused dimensions (they are ordered k, j, i, c) */
5573: for (j = 0; j < 3-sdim; ++j) dxm++;
5574: /* Local index in X dir */
5575: tmp = *dxm++ - starts[0];
5576: /* Loop over remaining dimensions */
5577: for (j = 0; j < dim-1; ++j) {
5578: /* If nonlocal, set index to be negative */
5579: if ((*dxm++ - starts[j+1]) < 0 || tmp < 0) tmp = PETSC_MIN_INT;
5580: /* Update local index */
5581: else tmp = tmp*dims[j] + *(dxm-1) - starts[j+1];
5582: }
5583: /* Skip component slot if necessary */
5584: if (mat->stencil.noc) dxm++;
5585: /* Local row number */
5586: if (tmp >= 0) {
5587: jdxm[numNewRows++] = tmp;
5588: }
5589: }
5590: MatZeroRowsLocal(mat,numNewRows,jdxm,diag,x,b);
5591: PetscFree(jdxm);
5592: return(0);
5593: }
5597: /*@C
5598: MatZeroRowsColumnsStencil - Zeros all row and column entries (except possibly the main diagonal)
5599: of a set of rows and columns of a matrix.
5601: Collective on Mat
5603: Input Parameters:
5604: + mat - the matrix
5605: . numRows - the number of rows/columns to remove
5606: . rows - the grid coordinates (and component number when dof > 1) for matrix rows
5607: . diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5608: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5609: - b - optional vector of right hand side, that will be adjusted by provided solution
5611: Notes:
5612: For the AIJ and BAIJ matrix formats this removes the old nonzero structure,
5613: but does not release memory. For the dense and block diagonal
5614: formats this does not alter the nonzero structure.
5616: If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5617: of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5618: merely zeroed.
5620: The user can set a value in the diagonal entry (or for the AIJ and
5621: row formats can optionally remove the main diagonal entry from the
5622: nonzero structure as well, by passing 0.0 as the final argument).
5624: For the parallel case, all processes that share the matrix (i.e.,
5625: those in the communicator used for matrix creation) MUST call this
5626: routine, regardless of whether any rows being zeroed are owned by
5627: them.
5629: Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5630: list only rows local to itself, but the row/column numbers are given in local numbering).
5632: The grid coordinates are across the entire grid, not just the local portion
5634: In Fortran idxm and idxn should be declared as
5635: $ MatStencil idxm(4,m)
5636: and the values inserted using
5637: $ idxm(MatStencil_i,1) = i
5638: $ idxm(MatStencil_j,1) = j
5639: $ idxm(MatStencil_k,1) = k
5640: $ idxm(MatStencil_c,1) = c
5641: etc
5643: For periodic boundary conditions use negative indices for values to the left (below 0; that are to be
5644: obtained by wrapping values from right edge). For values to the right of the last entry using that index plus one
5645: etc to obtain values that obtained by wrapping the values from the left edge. This does not work for anything but the
5646: DMDA_BOUNDARY_PERIODIC boundary type.
5648: For indices that don't mean anything for your case (like the k index when working in 2d) or the c index when you have
5649: a single value per point) you can skip filling those indices.
5651: Level: intermediate
5653: Concepts: matrices^zeroing rows
5655: .seealso: MatZeroRows(), MatZeroRowsIS(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption()
5656: @*/
5657: PetscErrorCode MatZeroRowsColumnsStencil(Mat mat,PetscInt numRows,const MatStencil rows[],PetscScalar diag,Vec x,Vec b)
5658: {
5659: PetscInt dim = mat->stencil.dim;
5660: PetscInt sdim = dim - (1 - (PetscInt) mat->stencil.noc);
5661: PetscInt *dims = mat->stencil.dims+1;
5662: PetscInt *starts = mat->stencil.starts;
5663: PetscInt *dxm = (PetscInt*) rows;
5664: PetscInt *jdxm, i, j, tmp, numNewRows = 0;
5672: PetscMalloc1(numRows, &jdxm);
5673: for (i = 0; i < numRows; ++i) {
5674: /* Skip unused dimensions (they are ordered k, j, i, c) */
5675: for (j = 0; j < 3-sdim; ++j) dxm++;
5676: /* Local index in X dir */
5677: tmp = *dxm++ - starts[0];
5678: /* Loop over remaining dimensions */
5679: for (j = 0; j < dim-1; ++j) {
5680: /* If nonlocal, set index to be negative */
5681: if ((*dxm++ - starts[j+1]) < 0 || tmp < 0) tmp = PETSC_MIN_INT;
5682: /* Update local index */
5683: else tmp = tmp*dims[j] + *(dxm-1) - starts[j+1];
5684: }
5685: /* Skip component slot if necessary */
5686: if (mat->stencil.noc) dxm++;
5687: /* Local row number */
5688: if (tmp >= 0) {
5689: jdxm[numNewRows++] = tmp;
5690: }
5691: }
5692: MatZeroRowsColumnsLocal(mat,numNewRows,jdxm,diag,x,b);
5693: PetscFree(jdxm);
5694: return(0);
5695: }
5699: /*@C
5700: MatZeroRowsLocal - Zeros all entries (except possibly the main diagonal)
5701: of a set of rows of a matrix; using local numbering of rows.
5703: Collective on Mat
5705: Input Parameters:
5706: + mat - the matrix
5707: . numRows - the number of rows to remove
5708: . rows - the global row indices
5709: . diag - value put in all diagonals of eliminated rows
5710: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5711: - b - optional vector of right hand side, that will be adjusted by provided solution
5713: Notes:
5714: Before calling MatZeroRowsLocal(), the user must first set the
5715: local-to-global mapping by calling MatSetLocalToGlobalMapping().
5717: For the AIJ matrix formats this removes the old nonzero structure,
5718: but does not release memory. For the dense and block diagonal
5719: formats this does not alter the nonzero structure.
5721: If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5722: of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5723: merely zeroed.
5725: The user can set a value in the diagonal entry (or for the AIJ and
5726: row formats can optionally remove the main diagonal entry from the
5727: nonzero structure as well, by passing 0.0 as the final argument).
5729: You can call MatSetOption(mat,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) if each process indicates only rows it
5730: owns that are to be zeroed. This saves a global synchronization in the implementation.
5732: Level: intermediate
5734: Concepts: matrices^zeroing
5736: .seealso: MatZeroRows(), MatZeroRowsLocalIS(), MatZeroEntries(), MatZeroRows(), MatSetLocalToGlobalMapping
5737: @*/
5738: PetscErrorCode MatZeroRowsLocal(Mat mat,PetscInt numRows,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
5739: {
5741: PetscMPIInt size;
5747: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5748: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5749: MatCheckPreallocated(mat,1);
5751: MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);
5752: if (mat->ops->zerorowslocal) {
5753: (*mat->ops->zerorowslocal)(mat,numRows,rows,diag,x,b);
5754: } else if (size == 1) {
5755: (*mat->ops->zerorows)(mat,numRows,rows,diag,x,b);
5756: } else {
5757: IS is, newis;
5758: const PetscInt *newRows;
5760: if (!mat->rmap->mapping) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Need to provide local to global mapping to matrix first");
5761: ISCreateGeneral(PETSC_COMM_SELF,numRows,rows,PETSC_COPY_VALUES,&is);
5762: ISLocalToGlobalMappingApplyIS(mat->rmap->mapping,is,&newis);
5763: ISGetIndices(newis,&newRows);
5764: (*mat->ops->zerorows)(mat,numRows,newRows,diag,x,b);
5765: ISRestoreIndices(newis,&newRows);
5766: ISDestroy(&newis);
5767: ISDestroy(&is);
5768: }
5769: PetscObjectStateIncrease((PetscObject)mat);
5770: #if defined(PETSC_HAVE_CUSP)
5771: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
5772: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
5773: }
5774: #endif
5775: #if defined(PETSC_HAVE_VIENNACL)
5776: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
5777: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
5778: }
5779: #endif
5780: return(0);
5781: }
5785: /*@C
5786: MatZeroRowsLocalIS - Zeros all entries (except possibly the main diagonal)
5787: of a set of rows of a matrix; using local numbering of rows.
5789: Collective on Mat
5791: Input Parameters:
5792: + mat - the matrix
5793: . is - index set of rows to remove
5794: . diag - value put in all diagonals of eliminated rows
5795: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5796: - b - optional vector of right hand side, that will be adjusted by provided solution
5798: Notes:
5799: Before calling MatZeroRowsLocalIS(), the user must first set the
5800: local-to-global mapping by calling MatSetLocalToGlobalMapping().
5802: For the AIJ matrix formats this removes the old nonzero structure,
5803: but does not release memory. For the dense and block diagonal
5804: formats this does not alter the nonzero structure.
5806: If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5807: of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5808: merely zeroed.
5810: The user can set a value in the diagonal entry (or for the AIJ and
5811: row formats can optionally remove the main diagonal entry from the
5812: nonzero structure as well, by passing 0.0 as the final argument).
5814: You can call MatSetOption(mat,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) if each process indicates only rows it
5815: owns that are to be zeroed. This saves a global synchronization in the implementation.
5817: Level: intermediate
5819: Concepts: matrices^zeroing
5821: .seealso: MatZeroRows(), MatZeroRowsLocal(), MatZeroEntries(), MatZeroRows(), MatSetLocalToGlobalMapping
5822: @*/
5823: PetscErrorCode MatZeroRowsLocalIS(Mat mat,IS is,PetscScalar diag,Vec x,Vec b)
5824: {
5826: PetscInt numRows;
5827: const PetscInt *rows;
5833: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5834: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5835: MatCheckPreallocated(mat,1);
5837: ISGetLocalSize(is,&numRows);
5838: ISGetIndices(is,&rows);
5839: MatZeroRowsLocal(mat,numRows,rows,diag,x,b);
5840: ISRestoreIndices(is,&rows);
5841: return(0);
5842: }
5846: /*@C
5847: MatZeroRowsColumnsLocal - Zeros all entries (except possibly the main diagonal)
5848: of a set of rows and columns of a matrix; using local numbering of rows.
5850: Collective on Mat
5852: Input Parameters:
5853: + mat - the matrix
5854: . numRows - the number of rows to remove
5855: . rows - the global row indices
5856: . diag - value put in all diagonals of eliminated rows
5857: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5858: - b - optional vector of right hand side, that will be adjusted by provided solution
5860: Notes:
5861: Before calling MatZeroRowsColumnsLocal(), the user must first set the
5862: local-to-global mapping by calling MatSetLocalToGlobalMapping().
5864: The user can set a value in the diagonal entry (or for the AIJ and
5865: row formats can optionally remove the main diagonal entry from the
5866: nonzero structure as well, by passing 0.0 as the final argument).
5868: Level: intermediate
5870: Concepts: matrices^zeroing
5872: .seealso: MatZeroRows(), MatZeroRowsLocalIS(), MatZeroEntries(), MatZeroRows(), MatSetLocalToGlobalMapping
5873: @*/
5874: PetscErrorCode MatZeroRowsColumnsLocal(Mat mat,PetscInt numRows,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
5875: {
5877: PetscMPIInt size;
5883: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5884: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5885: MatCheckPreallocated(mat,1);
5887: MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);
5888: if (size == 1) {
5889: (*mat->ops->zerorowscolumns)(mat,numRows,rows,diag,x,b);
5890: } else {
5891: IS is, newis;
5892: const PetscInt *newRows;
5894: if (!mat->cmap->mapping) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Need to provide local to global mapping to matrix first");
5895: ISCreateGeneral(PETSC_COMM_SELF,numRows,rows,PETSC_COPY_VALUES,&is);
5896: ISLocalToGlobalMappingApplyIS(mat->cmap->mapping,is,&newis);
5897: ISGetIndices(newis,&newRows);
5898: (*mat->ops->zerorowscolumns)(mat,numRows,newRows,diag,x,b);
5899: ISRestoreIndices(newis,&newRows);
5900: ISDestroy(&newis);
5901: ISDestroy(&is);
5902: }
5903: PetscObjectStateIncrease((PetscObject)mat);
5904: #if defined(PETSC_HAVE_CUSP)
5905: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
5906: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
5907: }
5908: #endif
5909: #if defined(PETSC_HAVE_VIENNACL)
5910: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
5911: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
5912: }
5913: #endif
5914: return(0);
5915: }
5919: /*@C
5920: MatZeroRowsColumnsLocalIS - Zeros all entries (except possibly the main diagonal)
5921: of a set of rows and columns of a matrix; using local numbering of rows.
5923: Collective on Mat
5925: Input Parameters:
5926: + mat - the matrix
5927: . is - index set of rows to remove
5928: . diag - value put in all diagonals of eliminated rows
5929: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5930: - b - optional vector of right hand side, that will be adjusted by provided solution
5932: Notes:
5933: Before calling MatZeroRowsColumnsLocalIS(), the user must first set the
5934: local-to-global mapping by calling MatSetLocalToGlobalMapping().
5936: The user can set a value in the diagonal entry (or for the AIJ and
5937: row formats can optionally remove the main diagonal entry from the
5938: nonzero structure as well, by passing 0.0 as the final argument).
5940: Level: intermediate
5942: Concepts: matrices^zeroing
5944: .seealso: MatZeroRows(), MatZeroRowsLocal(), MatZeroEntries(), MatZeroRows(), MatSetLocalToGlobalMapping
5945: @*/
5946: PetscErrorCode MatZeroRowsColumnsLocalIS(Mat mat,IS is,PetscScalar diag,Vec x,Vec b)
5947: {
5949: PetscInt numRows;
5950: const PetscInt *rows;
5956: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5957: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5958: MatCheckPreallocated(mat,1);
5960: ISGetLocalSize(is,&numRows);
5961: ISGetIndices(is,&rows);
5962: MatZeroRowsColumnsLocal(mat,numRows,rows,diag,x,b);
5963: ISRestoreIndices(is,&rows);
5964: return(0);
5965: }
5969: /*@
5970: MatGetSize - Returns the numbers of rows and columns in a matrix.
5972: Not Collective
5974: Input Parameter:
5975: . mat - the matrix
5977: Output Parameters:
5978: + m - the number of global rows
5979: - n - the number of global columns
5981: Note: both output parameters can be NULL on input.
5983: Level: beginner
5985: Concepts: matrices^size
5987: .seealso: MatGetLocalSize()
5988: @*/
5989: PetscErrorCode MatGetSize(Mat mat,PetscInt *m,PetscInt *n)
5990: {
5993: if (m) *m = mat->rmap->N;
5994: if (n) *n = mat->cmap->N;
5995: return(0);
5996: }
6000: /*@
6001: MatGetLocalSize - Returns the number of rows and columns in a matrix
6002: stored locally. This information may be implementation dependent, so
6003: use with care.
6005: Not Collective
6007: Input Parameters:
6008: . mat - the matrix
6010: Output Parameters:
6011: + m - the number of local rows
6012: - n - the number of local columns
6014: Note: both output parameters can be NULL on input.
6016: Level: beginner
6018: Concepts: matrices^local size
6020: .seealso: MatGetSize()
6021: @*/
6022: PetscErrorCode MatGetLocalSize(Mat mat,PetscInt *m,PetscInt *n)
6023: {
6028: if (m) *m = mat->rmap->n;
6029: if (n) *n = mat->cmap->n;
6030: return(0);
6031: }
6035: /*@
6036: MatGetOwnershipRangeColumn - Returns the range of matrix columns associated with rows of a vector one multiplies by that owned by
6037: this processor. (The columns of the "diagonal block")
6039: Not Collective, unless matrix has not been allocated, then collective on Mat
6041: Input Parameters:
6042: . mat - the matrix
6044: Output Parameters:
6045: + m - the global index of the first local column
6046: - n - one more than the global index of the last local column
6048: Notes: both output parameters can be NULL on input.
6050: Level: developer
6052: Concepts: matrices^column ownership
6054: .seealso: MatGetOwnershipRange(), MatGetOwnershipRanges(), MatGetOwnershipRangesColumn()
6056: @*/
6057: PetscErrorCode MatGetOwnershipRangeColumn(Mat mat,PetscInt *m,PetscInt *n)
6058: {
6064: MatCheckPreallocated(mat,1);
6065: if (m) *m = mat->cmap->rstart;
6066: if (n) *n = mat->cmap->rend;
6067: return(0);
6068: }
6072: /*@
6073: MatGetOwnershipRange - Returns the range of matrix rows owned by
6074: this processor, assuming that the matrix is laid out with the first
6075: n1 rows on the first processor, the next n2 rows on the second, etc.
6076: For certain parallel layouts this range may not be well defined.
6078: Not Collective
6080: Input Parameters:
6081: . mat - the matrix
6083: Output Parameters:
6084: + m - the global index of the first local row
6085: - n - one more than the global index of the last local row
6087: Note: Both output parameters can be NULL on input.
6088: $ This function requires that the matrix be preallocated. If you have not preallocated, consider using
6089: $ PetscSplitOwnership(MPI_Comm comm, PetscInt *n, PetscInt *N)
6090: $ and then MPI_Scan() to calculate prefix sums of the local sizes.
6092: Level: beginner
6094: Concepts: matrices^row ownership
6096: .seealso: MatGetOwnershipRanges(), MatGetOwnershipRangeColumn(), MatGetOwnershipRangesColumn(), PetscSplitOwnership(), PetscSplitOwnershipBlock()
6098: @*/
6099: PetscErrorCode MatGetOwnershipRange(Mat mat,PetscInt *m,PetscInt *n)
6100: {
6106: MatCheckPreallocated(mat,1);
6107: if (m) *m = mat->rmap->rstart;
6108: if (n) *n = mat->rmap->rend;
6109: return(0);
6110: }
6114: /*@C
6115: MatGetOwnershipRanges - Returns the range of matrix rows owned by
6116: each process
6118: Not Collective, unless matrix has not been allocated, then collective on Mat
6120: Input Parameters:
6121: . mat - the matrix
6123: Output Parameters:
6124: . ranges - start of each processors portion plus one more then the total length at the end
6126: Level: beginner
6128: Concepts: matrices^row ownership
6130: .seealso: MatGetOwnershipRange(), MatGetOwnershipRangeColumn(), MatGetOwnershipRangesColumn()
6132: @*/
6133: PetscErrorCode MatGetOwnershipRanges(Mat mat,const PetscInt **ranges)
6134: {
6140: MatCheckPreallocated(mat,1);
6141: PetscLayoutGetRanges(mat->rmap,ranges);
6142: return(0);
6143: }
6147: /*@C
6148: MatGetOwnershipRangesColumn - Returns the range of matrix columns associated with rows of a vector one multiplies by that owned by
6149: this processor. (The columns of the "diagonal blocks" for each process)
6151: Not Collective, unless matrix has not been allocated, then collective on Mat
6153: Input Parameters:
6154: . mat - the matrix
6156: Output Parameters:
6157: . ranges - start of each processors portion plus one more then the total length at the end
6159: Level: beginner
6161: Concepts: matrices^column ownership
6163: .seealso: MatGetOwnershipRange(), MatGetOwnershipRangeColumn(), MatGetOwnershipRanges()
6165: @*/
6166: PetscErrorCode MatGetOwnershipRangesColumn(Mat mat,const PetscInt **ranges)
6167: {
6173: MatCheckPreallocated(mat,1);
6174: PetscLayoutGetRanges(mat->cmap,ranges);
6175: return(0);
6176: }
6180: /*@C
6181: MatGetOwnershipIS - Get row and column ownership as index sets
6183: Not Collective
6185: Input Arguments:
6186: . A - matrix of type Elemental
6188: Output Arguments:
6189: + rows - rows in which this process owns elements
6190: . cols - columns in which this process owns elements
6192: Level: intermediate
6194: .seealso: MatGetOwnershipRange(), MatGetOwnershipRangeColumn(), MatSetValues(), MATELEMENTAL, MatSetValues()
6195: @*/
6196: PetscErrorCode MatGetOwnershipIS(Mat A,IS *rows,IS *cols)
6197: {
6198: PetscErrorCode ierr,(*f)(Mat,IS*,IS*);
6201: MatCheckPreallocated(A,1);
6202: PetscObjectQueryFunction((PetscObject)A,"MatGetOwnershipIS_C",&f);
6203: if (f) {
6204: (*f)(A,rows,cols);
6205: } else { /* Create a standard row-based partition, each process is responsible for ALL columns in their row block */
6206: if (rows) {ISCreateStride(PETSC_COMM_SELF,A->rmap->n,A->rmap->rstart,1,rows);}
6207: if (cols) {ISCreateStride(PETSC_COMM_SELF,A->cmap->N,0,1,cols);}
6208: }
6209: return(0);
6210: }
6214: /*@C
6215: MatILUFactorSymbolic - Performs symbolic ILU factorization of a matrix.
6216: Uses levels of fill only, not drop tolerance. Use MatLUFactorNumeric()
6217: to complete the factorization.
6219: Collective on Mat
6221: Input Parameters:
6222: + mat - the matrix
6223: . row - row permutation
6224: . column - column permutation
6225: - info - structure containing
6226: $ levels - number of levels of fill.
6227: $ expected fill - as ratio of original fill.
6228: $ 1 or 0 - indicating force fill on diagonal (improves robustness for matrices
6229: missing diagonal entries)
6231: Output Parameters:
6232: . fact - new matrix that has been symbolically factored
6234: Notes:
6235: See the <a href="../../docs/manual.pdf">users manual</a> for additional information about
6236: choosing the fill factor for better efficiency.
6238: Most users should employ the simplified KSP interface for linear solvers
6239: instead of working directly with matrix algebra routines such as this.
6240: See, e.g., KSPCreate().
6242: Level: developer
6244: Concepts: matrices^symbolic LU factorization
6245: Concepts: matrices^factorization
6246: Concepts: LU^symbolic factorization
6248: .seealso: MatLUFactorSymbolic(), MatLUFactorNumeric(), MatCholeskyFactor()
6249: MatGetOrdering(), MatFactorInfo
6251: Developer Note: fortran interface is not autogenerated as the f90
6252: interface defintion cannot be generated correctly [due to MatFactorInfo]
6254: @*/
6255: PetscErrorCode MatILUFactorSymbolic(Mat fact,Mat mat,IS row,IS col,const MatFactorInfo *info)
6256: {
6266: if (info->levels < 0) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Levels of fill negative %D",(PetscInt)info->levels);
6267: if (info->fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Expected fill less than 1.0 %g",(double)info->fill);
6268: if (!(fact)->ops->ilufactorsymbolic) {
6269: const MatSolverPackage spackage;
6270: MatFactorGetSolverPackage(fact,&spackage);
6271: SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Matrix type %s symbolic ILU using solver package %s",((PetscObject)mat)->type_name,spackage);
6272: }
6273: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6274: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6275: MatCheckPreallocated(mat,2);
6277: PetscLogEventBegin(MAT_ILUFactorSymbolic,mat,row,col,0);
6278: (fact->ops->ilufactorsymbolic)(fact,mat,row,col,info);
6279: PetscLogEventEnd(MAT_ILUFactorSymbolic,mat,row,col,0);
6280: return(0);
6281: }
6285: /*@C
6286: MatICCFactorSymbolic - Performs symbolic incomplete
6287: Cholesky factorization for a symmetric matrix. Use
6288: MatCholeskyFactorNumeric() to complete the factorization.
6290: Collective on Mat
6292: Input Parameters:
6293: + mat - the matrix
6294: . perm - row and column permutation
6295: - info - structure containing
6296: $ levels - number of levels of fill.
6297: $ expected fill - as ratio of original fill.
6299: Output Parameter:
6300: . fact - the factored matrix
6302: Notes:
6303: Most users should employ the KSP interface for linear solvers
6304: instead of working directly with matrix algebra routines such as this.
6305: See, e.g., KSPCreate().
6307: Level: developer
6309: Concepts: matrices^symbolic incomplete Cholesky factorization
6310: Concepts: matrices^factorization
6311: Concepts: Cholsky^symbolic factorization
6313: .seealso: MatCholeskyFactorNumeric(), MatCholeskyFactor(), MatFactorInfo
6315: Developer Note: fortran interface is not autogenerated as the f90
6316: interface defintion cannot be generated correctly [due to MatFactorInfo]
6318: @*/
6319: PetscErrorCode MatICCFactorSymbolic(Mat fact,Mat mat,IS perm,const MatFactorInfo *info)
6320: {
6329: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6330: if (info->levels < 0) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Levels negative %D",(PetscInt) info->levels);
6331: if (info->fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Expected fill less than 1.0 %g",(double)info->fill);
6332: if (!(fact)->ops->iccfactorsymbolic) {
6333: const MatSolverPackage spackage;
6334: MatFactorGetSolverPackage(fact,&spackage);
6335: SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Matrix type %s symbolic ICC using solver package %s",((PetscObject)mat)->type_name,spackage);
6336: }
6337: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6338: MatCheckPreallocated(mat,2);
6340: PetscLogEventBegin(MAT_ICCFactorSymbolic,mat,perm,0,0);
6341: (fact->ops->iccfactorsymbolic)(fact,mat,perm,info);
6342: PetscLogEventEnd(MAT_ICCFactorSymbolic,mat,perm,0,0);
6343: return(0);
6344: }
6348: /*@C
6349: MatGetSubMatrices - Extracts several submatrices from a matrix. If submat
6350: points to an array of valid matrices, they may be reused to store the new
6351: submatrices.
6353: Collective on Mat
6355: Input Parameters:
6356: + mat - the matrix
6357: . n - the number of submatrixes to be extracted (on this processor, may be zero)
6358: . irow, icol - index sets of rows and columns to extract (must be sorted)
6359: - scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
6361: Output Parameter:
6362: . submat - the array of submatrices
6364: Notes:
6365: MatGetSubMatrices() can extract ONLY sequential submatrices
6366: (from both sequential and parallel matrices). Use MatGetSubMatrix()
6367: to extract a parallel submatrix.
6369: Currently both row and column indices must be sorted to guarantee
6370: correctness with all matrix types.
6372: When extracting submatrices from a parallel matrix, each processor can
6373: form a different submatrix by setting the rows and columns of its
6374: individual index sets according to the local submatrix desired.
6376: When finished using the submatrices, the user should destroy
6377: them with MatDestroyMatrices().
6379: MAT_REUSE_MATRIX can only be used when the nonzero structure of the
6380: original matrix has not changed from that last call to MatGetSubMatrices().
6382: This routine creates the matrices in submat; you should NOT create them before
6383: calling it. It also allocates the array of matrix pointers submat.
6385: For BAIJ matrices the index sets must respect the block structure, that is if they
6386: request one row/column in a block, they must request all rows/columns that are in
6387: that block. For example, if the block size is 2 you cannot request just row 0 and
6388: column 0.
6390: Fortran Note:
6391: The Fortran interface is slightly different from that given below; it
6392: requires one to pass in as submat a Mat (integer) array of size at least m.
6394: Level: advanced
6396: Concepts: matrices^accessing submatrices
6397: Concepts: submatrices
6399: .seealso: MatDestroyMatrices(), MatGetSubMatrix(), MatGetRow(), MatGetDiagonal(), MatReuse
6400: @*/
6401: PetscErrorCode MatGetSubMatrices(Mat mat,PetscInt n,const IS irow[],const IS icol[],MatReuse scall,Mat *submat[])
6402: {
6404: PetscInt i;
6405: PetscBool eq;
6410: if (n) {
6415: }
6417: if (n && scall == MAT_REUSE_MATRIX) {
6420: }
6421: if (!mat->ops->getsubmatrices) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
6422: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6423: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6424: MatCheckPreallocated(mat,1);
6426: PetscLogEventBegin(MAT_GetSubMatrices,mat,0,0,0);
6427: (*mat->ops->getsubmatrices)(mat,n,irow,icol,scall,submat);
6428: PetscLogEventEnd(MAT_GetSubMatrices,mat,0,0,0);
6429: for (i=0; i<n; i++) {
6430: if (mat->symmetric || mat->structurally_symmetric || mat->hermitian) {
6431: ISEqual(irow[i],icol[i],&eq);
6432: if (eq) {
6433: if (mat->symmetric) {
6434: MatSetOption((*submat)[i],MAT_SYMMETRIC,PETSC_TRUE);
6435: } else if (mat->hermitian) {
6436: MatSetOption((*submat)[i],MAT_HERMITIAN,PETSC_TRUE);
6437: } else if (mat->structurally_symmetric) {
6438: MatSetOption((*submat)[i],MAT_STRUCTURALLY_SYMMETRIC,PETSC_TRUE);
6439: }
6440: }
6441: }
6442: }
6443: return(0);
6444: }
6448: PetscErrorCode MatGetSubMatricesParallel(Mat mat,PetscInt n,const IS irow[],const IS icol[],MatReuse scall,Mat *submat[])
6449: {
6451: PetscInt i;
6452: PetscBool eq;
6457: if (n) {
6462: }
6464: if (n && scall == MAT_REUSE_MATRIX) {
6467: }
6468: if (!mat->ops->getsubmatricesparallel) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
6469: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6470: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6471: MatCheckPreallocated(mat,1);
6473: PetscLogEventBegin(MAT_GetSubMatrices,mat,0,0,0);
6474: (*mat->ops->getsubmatricesparallel)(mat,n,irow,icol,scall,submat);
6475: PetscLogEventEnd(MAT_GetSubMatrices,mat,0,0,0);
6476: for (i=0; i<n; i++) {
6477: if (mat->symmetric || mat->structurally_symmetric || mat->hermitian) {
6478: ISEqual(irow[i],icol[i],&eq);
6479: if (eq) {
6480: if (mat->symmetric) {
6481: MatSetOption((*submat)[i],MAT_SYMMETRIC,PETSC_TRUE);
6482: } else if (mat->hermitian) {
6483: MatSetOption((*submat)[i],MAT_HERMITIAN,PETSC_TRUE);
6484: } else if (mat->structurally_symmetric) {
6485: MatSetOption((*submat)[i],MAT_STRUCTURALLY_SYMMETRIC,PETSC_TRUE);
6486: }
6487: }
6488: }
6489: }
6490: return(0);
6491: }
6495: /*@C
6496: MatDestroyMatrices - Destroys a set of matrices obtained with MatGetSubMatrices().
6498: Collective on Mat
6500: Input Parameters:
6501: + n - the number of local matrices
6502: - mat - the matrices (note that this is a pointer to the array of matrices, just to match the calling
6503: sequence of MatGetSubMatrices())
6505: Level: advanced
6507: Notes: Frees not only the matrices, but also the array that contains the matrices
6508: In Fortran will not free the array.
6510: .seealso: MatGetSubMatrices()
6511: @*/
6512: PetscErrorCode MatDestroyMatrices(PetscInt n,Mat *mat[])
6513: {
6515: PetscInt i;
6518: if (!*mat) return(0);
6519: if (n < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Trying to destroy negative number of matrices %D",n);
6521: for (i=0; i<n; i++) {
6522: MatDestroy(&(*mat)[i]);
6523: }
6524: /* memory is allocated even if n = 0 */
6525: PetscFree(*mat);
6526: *mat = NULL;
6527: return(0);
6528: }
6532: /*@C
6533: MatGetSeqNonzeroStructure - Extracts the sequential nonzero structure from a matrix.
6535: Collective on Mat
6537: Input Parameters:
6538: . mat - the matrix
6540: Output Parameter:
6541: . matstruct - the sequential matrix with the nonzero structure of mat
6543: Level: intermediate
6545: .seealso: MatDestroySeqNonzeroStructure(), MatGetSubMatrices(), MatDestroyMatrices()
6546: @*/
6547: PetscErrorCode MatGetSeqNonzeroStructure(Mat mat,Mat *matstruct)
6548: {
6556: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6557: MatCheckPreallocated(mat,1);
6559: if (!mat->ops->getseqnonzerostructure) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Not for matrix type %s\n",((PetscObject)mat)->type_name);
6560: PetscLogEventBegin(MAT_GetSeqNonzeroStructure,mat,0,0,0);
6561: (*mat->ops->getseqnonzerostructure)(mat,matstruct);
6562: PetscLogEventEnd(MAT_GetSeqNonzeroStructure,mat,0,0,0);
6563: return(0);
6564: }
6568: /*@C
6569: MatDestroySeqNonzeroStructure - Destroys matrix obtained with MatGetSeqNonzeroStructure().
6571: Collective on Mat
6573: Input Parameters:
6574: . mat - the matrix (note that this is a pointer to the array of matrices, just to match the calling
6575: sequence of MatGetSequentialNonzeroStructure())
6577: Level: advanced
6579: Notes: Frees not only the matrices, but also the array that contains the matrices
6581: .seealso: MatGetSeqNonzeroStructure()
6582: @*/
6583: PetscErrorCode MatDestroySeqNonzeroStructure(Mat *mat)
6584: {
6589: MatDestroy(mat);
6590: return(0);
6591: }
6595: /*@
6596: MatIncreaseOverlap - Given a set of submatrices indicated by index sets,
6597: replaces the index sets by larger ones that represent submatrices with
6598: additional overlap.
6600: Collective on Mat
6602: Input Parameters:
6603: + mat - the matrix
6604: . n - the number of index sets
6605: . is - the array of index sets (these index sets will changed during the call)
6606: - ov - the additional overlap requested
6608: Level: developer
6610: Concepts: overlap
6611: Concepts: ASM^computing overlap
6613: .seealso: MatGetSubMatrices()
6614: @*/
6615: PetscErrorCode MatIncreaseOverlap(Mat mat,PetscInt n,IS is[],PetscInt ov)
6616: {
6622: if (n < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must have one or more domains, you have %D",n);
6623: if (n) {
6626: }
6627: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6628: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6629: MatCheckPreallocated(mat,1);
6631: if (!ov) return(0);
6632: if (!mat->ops->increaseoverlap) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
6633: PetscLogEventBegin(MAT_IncreaseOverlap,mat,0,0,0);
6634: (*mat->ops->increaseoverlap)(mat,n,is,ov);
6635: PetscLogEventEnd(MAT_IncreaseOverlap,mat,0,0,0);
6636: return(0);
6637: }
6641: /*@
6642: MatGetBlockSize - Returns the matrix block size; useful especially for the
6643: block row and block diagonal formats.
6645: Not Collective
6647: Input Parameter:
6648: . mat - the matrix
6650: Output Parameter:
6651: . bs - block size
6653: Notes:
6654: Block row formats are MATSEQBAIJ, MATMPIBAIJ, MATSEQSBAIJ, MATMPISBAIJ.
6656: If the block size has not been set yet this routine returns -1.
6658: Level: intermediate
6660: Concepts: matrices^block size
6662: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSizes()
6663: @*/
6664: PetscErrorCode MatGetBlockSize(Mat mat,PetscInt *bs)
6665: {
6669: *bs = mat->rmap->bs;
6670: return(0);
6671: }
6675: /*@
6676: MatGetBlockSizes - Returns the matrix block row and column sizes;
6677: useful especially for the block row and block diagonal formats.
6679: Not Collective
6681: Input Parameter:
6682: . mat - the matrix
6684: Output Parameter:
6685: . rbs - row block size
6686: . cbs - coumn block size
6688: Notes:
6689: Block row formats are MATSEQBAIJ, MATMPIBAIJ, MATSEQSBAIJ, MATMPISBAIJ.
6691: If a block size has not been set yet this routine returns -1.
6693: Level: intermediate
6695: Concepts: matrices^block size
6697: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize()
6698: @*/
6699: PetscErrorCode MatGetBlockSizes(Mat mat,PetscInt *rbs, PetscInt *cbs)
6700: {
6705: if (rbs) *rbs = mat->rmap->bs;
6706: if (cbs) *cbs = mat->cmap->bs;
6707: return(0);
6708: }
6712: /*@
6713: MatSetBlockSize - Sets the matrix block size.
6715: Logically Collective on Mat
6717: Input Parameters:
6718: + mat - the matrix
6719: - bs - block size
6721: Notes:
6722: This must be called before MatSetUp() or MatXXXSetPreallocation() (or will default to 1) and the block size cannot be changed later
6724: Level: intermediate
6726: Concepts: matrices^block size
6728: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize()
6729: @*/
6730: PetscErrorCode MatSetBlockSize(Mat mat,PetscInt bs)
6731: {
6737: PetscLayoutSetBlockSize(mat->rmap,bs);
6738: PetscLayoutSetBlockSize(mat->cmap,bs);
6739: return(0);
6740: }
6744: /*@
6745: MatSetBlockSizes - Sets the matrix block row and column sizes.
6747: Logically Collective on Mat
6749: Input Parameters:
6750: + mat - the matrix
6751: - rbs - row block size
6752: - cbs - column block size
6754: Notes:
6755: This must be called before MatSetUp() or MatXXXSetPreallocation() (or will default to 1) and the block size cannot be changed later
6757: Level: intermediate
6759: Concepts: matrices^block size
6761: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize()
6762: @*/
6763: PetscErrorCode MatSetBlockSizes(Mat mat,PetscInt rbs,PetscInt cbs)
6764: {
6771: PetscLayoutSetBlockSize(mat->rmap,rbs);
6772: PetscLayoutSetBlockSize(mat->cmap,cbs);
6773: return(0);
6774: }
6778: /*@
6779: MatResidual - Default routine to calculate the residual.
6781: Collective on Mat and Vec
6783: Input Parameters:
6784: + mat - the matrix
6785: . b - the right-hand-side
6786: - x - the approximate solution
6788: Output Parameter:
6789: . r - location to store the residual
6791: Level: developer
6793: .keywords: MG, default, multigrid, residual
6795: .seealso: PCMGSetResidual()
6796: @*/
6797: PetscErrorCode MatResidual(Mat mat,Vec b,Vec x,Vec r)
6798: {
6807: MatCheckPreallocated(mat,1);
6808: PetscLogEventBegin(MAT_Residual,mat,0,0,0);
6809: if (!mat->ops->residual) {
6810: MatMult(mat,x,r);
6811: VecAYPX(r,-1.0,b);
6812: } else {
6813: (*mat->ops->residual)(mat,b,x,r);
6814: }
6815: PetscLogEventEnd(MAT_Residual,mat,0,0,0);
6816: return(0);
6817: }
6821: /*@C
6822: MatGetRowIJ - Returns the compressed row storage i and j indices for sequential matrices.
6824: Collective on Mat
6826: Input Parameters:
6827: + mat - the matrix
6828: . shift - 0 or 1 indicating we want the indices starting at 0 or 1
6829: . symmetric - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be symmetrized
6830: - inodecompressed - PETSC_TRUE or PETSC_FALSE indicating if the nonzero structure of the
6831: inodes or the nonzero elements is wanted. For BAIJ matrices the compressed version is
6832: always used.
6834: Output Parameters:
6835: + n - number of rows in the (possibly compressed) matrix
6836: . ia - the row pointers [of length n+1]
6837: . ja - the column indices
6838: - done - indicates if the routine actually worked and returned appropriate ia[] and ja[] arrays; callers
6839: are responsible for handling the case when done == PETSC_FALSE and ia and ja are not set
6841: Level: developer
6843: Notes: You CANNOT change any of the ia[] or ja[] values.
6845: Use MatRestoreRowIJ() when you are finished accessing the ia[] and ja[] values
6847: Fortran Node
6849: In Fortran use
6850: $ PetscInt ia(1), ja(1)
6851: $ PetscOffset iia, jja
6852: $ call MatGetRowIJ(mat,shift,symmetric,inodecompressed,n,ia,iia,ja,jja,done,ierr)
6853: $
6854: $ or
6855: $
6856: $ PetscScalar, pointer :: xx_v(:)
6857: $ call MatGetRowIJF90(mat,shift,symmetric,inodecompressed,n,ia,ja,done,ierr)
6860: Acess the ith and jth entries via ia(iia + i) and ja(jja + j)
6862: .seealso: MatGetColumnIJ(), MatRestoreRowIJ(), MatSeqAIJGetArray()
6863: @*/
6864: PetscErrorCode MatGetRowIJ(Mat mat,PetscInt shift,PetscBool symmetric,PetscBool inodecompressed,PetscInt *n,const PetscInt *ia[],const PetscInt *ja[],PetscBool *done)
6865: {
6875: MatCheckPreallocated(mat,1);
6876: if (!mat->ops->getrowij) *done = PETSC_FALSE;
6877: else {
6878: *done = PETSC_TRUE;
6879: PetscLogEventBegin(MAT_GetRowIJ,mat,0,0,0);
6880: (*mat->ops->getrowij)(mat,shift,symmetric,inodecompressed,n,ia,ja,done);
6881: PetscLogEventEnd(MAT_GetRowIJ,mat,0,0,0);
6882: }
6883: return(0);
6884: }
6888: /*@C
6889: MatGetColumnIJ - Returns the compressed column storage i and j indices for sequential matrices.
6891: Collective on Mat
6893: Input Parameters:
6894: + mat - the matrix
6895: . shift - 1 or zero indicating we want the indices starting at 0 or 1
6896: . symmetric - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be
6897: symmetrized
6898: . inodecompressed - PETSC_TRUE or PETSC_FALSE indicating if the nonzero structure of the
6899: inodes or the nonzero elements is wanted. For BAIJ matrices the compressed version is
6900: always used.
6901: . n - number of columns in the (possibly compressed) matrix
6902: . ia - the column pointers
6903: - ja - the row indices
6905: Output Parameters:
6906: . done - PETSC_TRUE or PETSC_FALSE, indicating whether the values have been returned
6908: Note:
6909: This routine zeros out n, ia, and ja. This is to prevent accidental
6910: us of the array after it has been restored. If you pass NULL, it will
6911: not zero the pointers. Use of ia or ja after MatRestoreColumnIJ() is invalid.
6913: Level: developer
6915: .seealso: MatGetRowIJ(), MatRestoreColumnIJ()
6916: @*/
6917: PetscErrorCode MatGetColumnIJ(Mat mat,PetscInt shift,PetscBool symmetric,PetscBool inodecompressed,PetscInt *n,const PetscInt *ia[],const PetscInt *ja[],PetscBool *done)
6918: {
6928: MatCheckPreallocated(mat,1);
6929: if (!mat->ops->getcolumnij) *done = PETSC_FALSE;
6930: else {
6931: *done = PETSC_TRUE;
6932: (*mat->ops->getcolumnij)(mat,shift,symmetric,inodecompressed,n,ia,ja,done);
6933: }
6934: return(0);
6935: }
6939: /*@C
6940: MatRestoreRowIJ - Call after you are completed with the ia,ja indices obtained with
6941: MatGetRowIJ().
6943: Collective on Mat
6945: Input Parameters:
6946: + mat - the matrix
6947: . shift - 1 or zero indicating we want the indices starting at 0 or 1
6948: . symmetric - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be
6949: symmetrized
6950: . inodecompressed - PETSC_TRUE or PETSC_FALSE indicating if the nonzero structure of the
6951: inodes or the nonzero elements is wanted. For BAIJ matrices the compressed version is
6952: always used.
6953: . n - size of (possibly compressed) matrix
6954: . ia - the row pointers
6955: - ja - the column indices
6957: Output Parameters:
6958: . done - PETSC_TRUE or PETSC_FALSE indicated that the values have been returned
6960: Note:
6961: This routine zeros out n, ia, and ja. This is to prevent accidental
6962: us of the array after it has been restored. If you pass NULL, it will
6963: not zero the pointers. Use of ia or ja after MatRestoreRowIJ() is invalid.
6965: Level: developer
6967: .seealso: MatGetRowIJ(), MatRestoreColumnIJ()
6968: @*/
6969: PetscErrorCode MatRestoreRowIJ(Mat mat,PetscInt shift,PetscBool symmetric,PetscBool inodecompressed,PetscInt *n,const PetscInt *ia[],const PetscInt *ja[],PetscBool *done)
6970: {
6979: MatCheckPreallocated(mat,1);
6981: if (!mat->ops->restorerowij) *done = PETSC_FALSE;
6982: else {
6983: *done = PETSC_TRUE;
6984: (*mat->ops->restorerowij)(mat,shift,symmetric,inodecompressed,n,ia,ja,done);
6985: if (n) *n = 0;
6986: if (ia) *ia = NULL;
6987: if (ja) *ja = NULL;
6988: }
6989: return(0);
6990: }
6994: /*@C
6995: MatRestoreColumnIJ - Call after you are completed with the ia,ja indices obtained with
6996: MatGetColumnIJ().
6998: Collective on Mat
7000: Input Parameters:
7001: + mat - the matrix
7002: . shift - 1 or zero indicating we want the indices starting at 0 or 1
7003: - symmetric - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be
7004: symmetrized
7005: - inodecompressed - PETSC_TRUE or PETSC_FALSE indicating if the nonzero structure of the
7006: inodes or the nonzero elements is wanted. For BAIJ matrices the compressed version is
7007: always used.
7009: Output Parameters:
7010: + n - size of (possibly compressed) matrix
7011: . ia - the column pointers
7012: . ja - the row indices
7013: - done - PETSC_TRUE or PETSC_FALSE indicated that the values have been returned
7015: Level: developer
7017: .seealso: MatGetColumnIJ(), MatRestoreRowIJ()
7018: @*/
7019: PetscErrorCode MatRestoreColumnIJ(Mat mat,PetscInt shift,PetscBool symmetric,PetscBool inodecompressed,PetscInt *n,const PetscInt *ia[],const PetscInt *ja[],PetscBool *done)
7020: {
7029: MatCheckPreallocated(mat,1);
7031: if (!mat->ops->restorecolumnij) *done = PETSC_FALSE;
7032: else {
7033: *done = PETSC_TRUE;
7034: (*mat->ops->restorecolumnij)(mat,shift,symmetric,inodecompressed,n,ia,ja,done);
7035: if (n) *n = 0;
7036: if (ia) *ia = NULL;
7037: if (ja) *ja = NULL;
7038: }
7039: return(0);
7040: }
7044: /*@C
7045: MatColoringPatch -Used inside matrix coloring routines that
7046: use MatGetRowIJ() and/or MatGetColumnIJ().
7048: Collective on Mat
7050: Input Parameters:
7051: + mat - the matrix
7052: . ncolors - max color value
7053: . n - number of entries in colorarray
7054: - colorarray - array indicating color for each column
7056: Output Parameters:
7057: . iscoloring - coloring generated using colorarray information
7059: Level: developer
7061: .seealso: MatGetRowIJ(), MatGetColumnIJ()
7063: @*/
7064: PetscErrorCode MatColoringPatch(Mat mat,PetscInt ncolors,PetscInt n,ISColoringValue colorarray[],ISColoring *iscoloring)
7065: {
7073: MatCheckPreallocated(mat,1);
7075: if (!mat->ops->coloringpatch) {
7076: ISColoringCreate(PetscObjectComm((PetscObject)mat),ncolors,n,colorarray,iscoloring);
7077: } else {
7078: (*mat->ops->coloringpatch)(mat,ncolors,n,colorarray,iscoloring);
7079: }
7080: return(0);
7081: }
7086: /*@
7087: MatSetUnfactored - Resets a factored matrix to be treated as unfactored.
7089: Logically Collective on Mat
7091: Input Parameter:
7092: . mat - the factored matrix to be reset
7094: Notes:
7095: This routine should be used only with factored matrices formed by in-place
7096: factorization via ILU(0) (or by in-place LU factorization for the MATSEQDENSE
7097: format). This option can save memory, for example, when solving nonlinear
7098: systems with a matrix-free Newton-Krylov method and a matrix-based, in-place
7099: ILU(0) preconditioner.
7101: Note that one can specify in-place ILU(0) factorization by calling
7102: .vb
7103: PCType(pc,PCILU);
7104: PCFactorSeUseInPlace(pc);
7105: .ve
7106: or by using the options -pc_type ilu -pc_factor_in_place
7108: In-place factorization ILU(0) can also be used as a local
7109: solver for the blocks within the block Jacobi or additive Schwarz
7110: methods (runtime option: -sub_pc_factor_in_place). See the discussion
7111: of these preconditioners in the <a href="../../docs/manual.pdf#ch_pc">PC chapter of the users manual</a> for details on setting
7112: local solver options.
7114: Most users should employ the simplified KSP interface for linear solvers
7115: instead of working directly with matrix algebra routines such as this.
7116: See, e.g., KSPCreate().
7118: Level: developer
7120: .seealso: PCFactorSetUseInPlace()
7122: Concepts: matrices^unfactored
7124: @*/
7125: PetscErrorCode MatSetUnfactored(Mat mat)
7126: {
7132: MatCheckPreallocated(mat,1);
7133: mat->factortype = MAT_FACTOR_NONE;
7134: if (!mat->ops->setunfactored) return(0);
7135: (*mat->ops->setunfactored)(mat);
7136: return(0);
7137: }
7139: /*MC
7140: MatDenseGetArrayF90 - Accesses a matrix array from Fortran90.
7142: Synopsis:
7143: MatDenseGetArrayF90(Mat x,{Scalar, pointer :: xx_v(:,:)},integer ierr)
7145: Not collective
7147: Input Parameter:
7148: . x - matrix
7150: Output Parameters:
7151: + xx_v - the Fortran90 pointer to the array
7152: - ierr - error code
7154: Example of Usage:
7155: .vb
7156: PetscScalar, pointer xx_v(:,:)
7157: ....
7158: call MatDenseGetArrayF90(x,xx_v,ierr)
7159: a = xx_v(3)
7160: call MatDenseRestoreArrayF90(x,xx_v,ierr)
7161: .ve
7163: Level: advanced
7165: .seealso: MatDenseRestoreArrayF90(), MatDenseGetArray(), MatDenseRestoreArray(), MatSeqAIJGetArrayF90()
7167: Concepts: matrices^accessing array
7169: M*/
7171: /*MC
7172: MatDenseRestoreArrayF90 - Restores a matrix array that has been
7173: accessed with MatDenseGetArrayF90().
7175: Synopsis:
7176: MatDenseRestoreArrayF90(Mat x,{Scalar, pointer :: xx_v(:)},integer ierr)
7178: Not collective
7180: Input Parameters:
7181: + x - matrix
7182: - xx_v - the Fortran90 pointer to the array
7184: Output Parameter:
7185: . ierr - error code
7187: Example of Usage:
7188: .vb
7189: PetscScalar, pointer xx_v(:)
7190: ....
7191: call MatDenseGetArrayF90(x,xx_v,ierr)
7192: a = xx_v(3)
7193: call MatDenseRestoreArrayF90(x,xx_v,ierr)
7194: .ve
7196: Level: advanced
7198: .seealso: MatDenseGetArrayF90(), MatDenseGetArray(), MatDenseRestoreArray(), MatSeqAIJRestoreArrayF90()
7200: M*/
7203: /*MC
7204: MatSeqAIJGetArrayF90 - Accesses a matrix array from Fortran90.
7206: Synopsis:
7207: MatSeqAIJGetArrayF90(Mat x,{Scalar, pointer :: xx_v(:,:)},integer ierr)
7209: Not collective
7211: Input Parameter:
7212: . x - matrix
7214: Output Parameters:
7215: + xx_v - the Fortran90 pointer to the array
7216: - ierr - error code
7218: Example of Usage:
7219: .vb
7220: PetscScalar, pointer xx_v(:,:)
7221: ....
7222: call MatSeqAIJGetArrayF90(x,xx_v,ierr)
7223: a = xx_v(3)
7224: call MatSeqAIJRestoreArrayF90(x,xx_v,ierr)
7225: .ve
7227: Level: advanced
7229: .seealso: MatSeqAIJRestoreArrayF90(), MatSeqAIJGetArray(), MatSeqAIJRestoreArray(), MatDenseGetArrayF90()
7231: Concepts: matrices^accessing array
7233: M*/
7235: /*MC
7236: MatSeqAIJRestoreArrayF90 - Restores a matrix array that has been
7237: accessed with MatSeqAIJGetArrayF90().
7239: Synopsis:
7240: MatSeqAIJRestoreArrayF90(Mat x,{Scalar, pointer :: xx_v(:)},integer ierr)
7242: Not collective
7244: Input Parameters:
7245: + x - matrix
7246: - xx_v - the Fortran90 pointer to the array
7248: Output Parameter:
7249: . ierr - error code
7251: Example of Usage:
7252: .vb
7253: PetscScalar, pointer xx_v(:)
7254: ....
7255: call MatSeqAIJGetArrayF90(x,xx_v,ierr)
7256: a = xx_v(3)
7257: call MatSeqAIJRestoreArrayF90(x,xx_v,ierr)
7258: .ve
7260: Level: advanced
7262: .seealso: MatSeqAIJGetArrayF90(), MatSeqAIJGetArray(), MatSeqAIJRestoreArray(), MatDenseRestoreArrayF90()
7264: M*/
7269: /*@
7270: MatGetSubMatrix - Gets a single submatrix on the same number of processors
7271: as the original matrix.
7273: Collective on Mat
7275: Input Parameters:
7276: + mat - the original matrix
7277: . isrow - parallel IS containing the rows this processor should obtain
7278: . iscol - parallel IS containing all columns you wish to keep. Each process should list the columns that will be in IT's "diagonal part" in the new matrix.
7279: - cll - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
7281: Output Parameter:
7282: . newmat - the new submatrix, of the same type as the old
7284: Level: advanced
7286: Notes:
7287: The submatrix will be able to be multiplied with vectors using the same layout as iscol.
7289: The rows in isrow will be sorted into the same order as the original matrix on each process.
7291: The first time this is called you should use a cll of MAT_INITIAL_MATRIX,
7292: the MatGetSubMatrix() routine will create the newmat for you. Any additional calls
7293: to this routine with a mat of the same nonzero structure and with a call of MAT_REUSE_MATRIX
7294: will reuse the matrix generated the first time. You should call MatDestroy() on newmat when
7295: you are finished using it.
7297: The communicator of the newly obtained matrix is ALWAYS the same as the communicator of
7298: the input matrix.
7300: If iscol is NULL then all columns are obtained (not supported in Fortran).
7302: Example usage:
7303: Consider the following 8x8 matrix with 34 non-zero values, that is
7304: assembled across 3 processors. Let's assume that proc0 owns 3 rows,
7305: proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
7306: as follows:
7308: .vb
7309: 1 2 0 | 0 3 0 | 0 4
7310: Proc0 0 5 6 | 7 0 0 | 8 0
7311: 9 0 10 | 11 0 0 | 12 0
7312: -------------------------------------
7313: 13 0 14 | 15 16 17 | 0 0
7314: Proc1 0 18 0 | 19 20 21 | 0 0
7315: 0 0 0 | 22 23 0 | 24 0
7316: -------------------------------------
7317: Proc2 25 26 27 | 0 0 28 | 29 0
7318: 30 0 0 | 31 32 33 | 0 34
7319: .ve
7321: Suppose isrow = [0 1 | 4 | 6 7] and iscol = [1 2 | 3 4 5 | 6]. The resulting submatrix is
7323: .vb
7324: 2 0 | 0 3 0 | 0
7325: Proc0 5 6 | 7 0 0 | 8
7326: -------------------------------
7327: Proc1 18 0 | 19 20 21 | 0
7328: -------------------------------
7329: Proc2 26 27 | 0 0 28 | 29
7330: 0 0 | 31 32 33 | 0
7331: .ve
7334: Concepts: matrices^submatrices
7336: .seealso: MatGetSubMatrices()
7337: @*/
7338: PetscErrorCode MatGetSubMatrix(Mat mat,IS isrow,IS iscol,MatReuse cll,Mat *newmat)
7339: {
7341: PetscMPIInt size;
7342: Mat *local;
7343: IS iscoltmp;
7352: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
7353: MatCheckPreallocated(mat,1);
7354: MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);
7356: if (!iscol) {
7357: ISCreateStride(PetscObjectComm((PetscObject)mat),mat->cmap->n,mat->cmap->rstart,1,&iscoltmp);
7358: } else {
7359: iscoltmp = iscol;
7360: }
7362: /* if original matrix is on just one processor then use submatrix generated */
7363: if (mat->ops->getsubmatrices && !mat->ops->getsubmatrix && size == 1 && cll == MAT_REUSE_MATRIX) {
7364: MatGetSubMatrices(mat,1,&isrow,&iscoltmp,MAT_REUSE_MATRIX,&newmat);
7365: if (!iscol) {ISDestroy(&iscoltmp);}
7366: return(0);
7367: } else if (mat->ops->getsubmatrices && !mat->ops->getsubmatrix && size == 1) {
7368: MatGetSubMatrices(mat,1,&isrow,&iscoltmp,MAT_INITIAL_MATRIX,&local);
7369: *newmat = *local;
7370: PetscFree(local);
7371: if (!iscol) {ISDestroy(&iscoltmp);}
7372: return(0);
7373: } else if (!mat->ops->getsubmatrix) {
7374: /* Create a new matrix type that implements the operation using the full matrix */
7375: switch (cll) {
7376: case MAT_INITIAL_MATRIX:
7377: MatCreateSubMatrix(mat,isrow,iscoltmp,newmat);
7378: break;
7379: case MAT_REUSE_MATRIX:
7380: MatSubMatrixUpdate(*newmat,mat,isrow,iscoltmp);
7381: break;
7382: default: SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Invalid MatReuse, must be either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX");
7383: }
7384: if (!iscol) {ISDestroy(&iscoltmp);}
7385: return(0);
7386: }
7388: if (!mat->ops->getsubmatrix) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
7389: (*mat->ops->getsubmatrix)(mat,isrow,iscoltmp,cll,newmat);
7390: if (!iscol) {ISDestroy(&iscoltmp);}
7391: if (*newmat && cll == MAT_INITIAL_MATRIX) {PetscObjectStateIncrease((PetscObject)*newmat);}
7392: return(0);
7393: }
7397: /*@
7398: MatStashSetInitialSize - sets the sizes of the matrix stash, that is
7399: used during the assembly process to store values that belong to
7400: other processors.
7402: Not Collective
7404: Input Parameters:
7405: + mat - the matrix
7406: . size - the initial size of the stash.
7407: - bsize - the initial size of the block-stash(if used).
7409: Options Database Keys:
7410: + -matstash_initial_size <size> or <size0,size1,...sizep-1>
7411: - -matstash_block_initial_size <bsize> or <bsize0,bsize1,...bsizep-1>
7413: Level: intermediate
7415: Notes:
7416: The block-stash is used for values set with MatSetValuesBlocked() while
7417: the stash is used for values set with MatSetValues()
7419: Run with the option -info and look for output of the form
7420: MatAssemblyBegin_MPIXXX:Stash has MM entries, uses nn mallocs.
7421: to determine the appropriate value, MM, to use for size and
7422: MatAssemblyBegin_MPIXXX:Block-Stash has BMM entries, uses nn mallocs.
7423: to determine the value, BMM to use for bsize
7425: Concepts: stash^setting matrix size
7426: Concepts: matrices^stash
7428: .seealso: MatAssemblyBegin(), MatAssemblyEnd(), Mat, MatStashGetInfo()
7430: @*/
7431: PetscErrorCode MatStashSetInitialSize(Mat mat,PetscInt size, PetscInt bsize)
7432: {
7438: MatStashSetInitialSize_Private(&mat->stash,size);
7439: MatStashSetInitialSize_Private(&mat->bstash,bsize);
7440: return(0);
7441: }
7445: /*@
7446: MatInterpolateAdd - w = y + A*x or A'*x depending on the shape of
7447: the matrix
7449: Neighbor-wise Collective on Mat
7451: Input Parameters:
7452: + mat - the matrix
7453: . x,y - the vectors
7454: - w - where the result is stored
7456: Level: intermediate
7458: Notes:
7459: w may be the same vector as y.
7461: This allows one to use either the restriction or interpolation (its transpose)
7462: matrix to do the interpolation
7464: Concepts: interpolation
7466: .seealso: MatMultAdd(), MatMultTransposeAdd(), MatRestrict()
7468: @*/
7469: PetscErrorCode MatInterpolateAdd(Mat A,Vec x,Vec y,Vec w)
7470: {
7472: PetscInt M,N,Ny;
7480: MatCheckPreallocated(A,1);
7481: MatGetSize(A,&M,&N);
7482: VecGetSize(y,&Ny);
7483: if (M == Ny) {
7484: MatMultAdd(A,x,y,w);
7485: } else {
7486: MatMultTransposeAdd(A,x,y,w);
7487: }
7488: return(0);
7489: }
7493: /*@
7494: MatInterpolate - y = A*x or A'*x depending on the shape of
7495: the matrix
7497: Neighbor-wise Collective on Mat
7499: Input Parameters:
7500: + mat - the matrix
7501: - x,y - the vectors
7503: Level: intermediate
7505: Notes:
7506: This allows one to use either the restriction or interpolation (its transpose)
7507: matrix to do the interpolation
7509: Concepts: matrices^interpolation
7511: .seealso: MatMultAdd(), MatMultTransposeAdd(), MatRestrict()
7513: @*/
7514: PetscErrorCode MatInterpolate(Mat A,Vec x,Vec y)
7515: {
7517: PetscInt M,N,Ny;
7524: MatCheckPreallocated(A,1);
7525: MatGetSize(A,&M,&N);
7526: VecGetSize(y,&Ny);
7527: if (M == Ny) {
7528: MatMult(A,x,y);
7529: } else {
7530: MatMultTranspose(A,x,y);
7531: }
7532: return(0);
7533: }
7537: /*@
7538: MatRestrict - y = A*x or A'*x
7540: Neighbor-wise Collective on Mat
7542: Input Parameters:
7543: + mat - the matrix
7544: - x,y - the vectors
7546: Level: intermediate
7548: Notes:
7549: This allows one to use either the restriction or interpolation (its transpose)
7550: matrix to do the restriction
7552: Concepts: matrices^restriction
7554: .seealso: MatMultAdd(), MatMultTransposeAdd(), MatInterpolate()
7556: @*/
7557: PetscErrorCode MatRestrict(Mat A,Vec x,Vec y)
7558: {
7560: PetscInt M,N,Ny;
7567: MatCheckPreallocated(A,1);
7569: MatGetSize(A,&M,&N);
7570: VecGetSize(y,&Ny);
7571: if (M == Ny) {
7572: MatMult(A,x,y);
7573: } else {
7574: MatMultTranspose(A,x,y);
7575: }
7576: return(0);
7577: }
7581: /*@
7582: MatGetNullSpace - retrieves the null space to a matrix.
7584: Logically Collective on Mat and MatNullSpace
7586: Input Parameters:
7587: + mat - the matrix
7588: - nullsp - the null space object
7590: Level: developer
7592: Notes:
7593: This null space is used by solvers. Overwrites any previous null space that may have been attached
7595: Concepts: null space^attaching to matrix
7597: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNearNullSpace()
7598: @*/
7599: PetscErrorCode MatGetNullSpace(Mat mat, MatNullSpace *nullsp)
7600: {
7605: *nullsp = mat->nullsp;
7606: return(0);
7607: }
7611: /*@
7612: MatSetNullSpace - attaches a null space to a matrix.
7613: This null space will be removed from the resulting vector whenever
7614: MatMult() is called
7616: Logically Collective on Mat and MatNullSpace
7618: Input Parameters:
7619: + mat - the matrix
7620: - nullsp - the null space object
7622: Level: advanced
7624: Notes:
7625: This null space is used by solvers. Overwrites any previous null space that may have been attached
7627: Concepts: null space^attaching to matrix
7629: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNearNullSpace()
7630: @*/
7631: PetscErrorCode MatSetNullSpace(Mat mat,MatNullSpace nullsp)
7632: {
7639: MatCheckPreallocated(mat,1);
7640: PetscObjectReference((PetscObject)nullsp);
7641: MatNullSpaceDestroy(&mat->nullsp);
7643: mat->nullsp = nullsp;
7644: return(0);
7645: }
7649: /*@
7650: MatSetNearNullSpace - attaches a null space to a matrix.
7651: This null space will be used to provide near null space vectors to a multigrid preconditioner built from this matrix.
7653: Logically Collective on Mat and MatNullSpace
7655: Input Parameters:
7656: + mat - the matrix
7657: - nullsp - the null space object
7659: Level: advanced
7661: Notes:
7662: Overwrites any previous near null space that may have been attached
7664: Concepts: null space^attaching to matrix
7666: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNullSpace()
7667: @*/
7668: PetscErrorCode MatSetNearNullSpace(Mat mat,MatNullSpace nullsp)
7669: {
7676: MatCheckPreallocated(mat,1);
7677: PetscObjectReference((PetscObject)nullsp);
7678: MatNullSpaceDestroy(&mat->nearnullsp);
7680: mat->nearnullsp = nullsp;
7681: return(0);
7682: }
7686: /*@
7687: MatGetNearNullSpace -Get null space attached with MatSetNearNullSpace()
7689: Not Collective
7691: Input Parameters:
7692: . mat - the matrix
7694: Output Parameters:
7695: . nullsp - the null space object, NULL if not set
7697: Level: developer
7699: Concepts: null space^attaching to matrix
7701: .seealso: MatSetNearNullSpace(), MatGetNullSpace()
7702: @*/
7703: PetscErrorCode MatGetNearNullSpace(Mat mat,MatNullSpace *nullsp)
7704: {
7709: MatCheckPreallocated(mat,1);
7710: *nullsp = mat->nearnullsp;
7711: return(0);
7712: }
7716: /*@C
7717: MatICCFactor - Performs in-place incomplete Cholesky factorization of matrix.
7719: Collective on Mat
7721: Input Parameters:
7722: + mat - the matrix
7723: . row - row/column permutation
7724: . fill - expected fill factor >= 1.0
7725: - level - level of fill, for ICC(k)
7727: Notes:
7728: Probably really in-place only when level of fill is zero, otherwise allocates
7729: new space to store factored matrix and deletes previous memory.
7731: Most users should employ the simplified KSP interface for linear solvers
7732: instead of working directly with matrix algebra routines such as this.
7733: See, e.g., KSPCreate().
7735: Level: developer
7737: Concepts: matrices^incomplete Cholesky factorization
7738: Concepts: Cholesky factorization
7740: .seealso: MatICCFactorSymbolic(), MatLUFactorNumeric(), MatCholeskyFactor()
7742: Developer Note: fortran interface is not autogenerated as the f90
7743: interface defintion cannot be generated correctly [due to MatFactorInfo]
7745: @*/
7746: PetscErrorCode MatICCFactor(Mat mat,IS row,const MatFactorInfo *info)
7747: {
7755: if (mat->rmap->N != mat->cmap->N) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONG,"matrix must be square");
7756: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
7757: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
7758: if (!mat->ops->iccfactor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
7759: MatCheckPreallocated(mat,1);
7760: (*mat->ops->iccfactor)(mat,row,info);
7761: PetscObjectStateIncrease((PetscObject)mat);
7762: return(0);
7763: }
7767: /*@
7768: MatSetValuesAdifor - Sets values computed with automatic differentiation into a matrix.
7770: Not Collective
7772: Input Parameters:
7773: + mat - the matrix
7774: . nl - leading dimension of v
7775: - v - the values compute with ADIFOR
7777: Level: developer
7779: Notes:
7780: Must call MatSetColoring() before using this routine. Also this matrix must already
7781: have its nonzero pattern determined.
7783: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
7784: MatSetValues(), MatSetColoring()
7785: @*/
7786: PetscErrorCode MatSetValuesAdifor(Mat mat,PetscInt nl,void *v)
7787: {
7795: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Matrix must be already assembled");
7796: PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
7797: if (!mat->ops->setvaluesadifor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
7798: (*mat->ops->setvaluesadifor)(mat,nl,v);
7799: PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
7800: PetscObjectStateIncrease((PetscObject)mat);
7801: return(0);
7802: }
7806: /*@
7807: MatDiagonalScaleLocal - Scales columns of a matrix given the scaling values including the
7808: ghosted ones.
7810: Not Collective
7812: Input Parameters:
7813: + mat - the matrix
7814: - diag = the diagonal values, including ghost ones
7816: Level: developer
7818: Notes: Works only for MPIAIJ and MPIBAIJ matrices
7820: .seealso: MatDiagonalScale()
7821: @*/
7822: PetscErrorCode MatDiagonalScaleLocal(Mat mat,Vec diag)
7823: {
7825: PetscMPIInt size;
7832: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Matrix must be already assembled");
7833: PetscLogEventBegin(MAT_Scale,mat,0,0,0);
7834: MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);
7835: if (size == 1) {
7836: PetscInt n,m;
7837: VecGetSize(diag,&n);
7838: MatGetSize(mat,0,&m);
7839: if (m == n) {
7840: MatDiagonalScale(mat,0,diag);
7841: } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only supported for sequential matrices when no ghost points/periodic conditions");
7842: } else {
7843: PetscUseMethod(mat,"MatDiagonalScaleLocal_C",(Mat,Vec),(mat,diag));
7844: }
7845: PetscLogEventEnd(MAT_Scale,mat,0,0,0);
7846: PetscObjectStateIncrease((PetscObject)mat);
7847: return(0);
7848: }
7852: /*@
7853: MatGetInertia - Gets the inertia from a factored matrix
7855: Collective on Mat
7857: Input Parameter:
7858: . mat - the matrix
7860: Output Parameters:
7861: + nneg - number of negative eigenvalues
7862: . nzero - number of zero eigenvalues
7863: - npos - number of positive eigenvalues
7865: Level: advanced
7867: Notes: Matrix must have been factored by MatCholeskyFactor()
7870: @*/
7871: PetscErrorCode MatGetInertia(Mat mat,PetscInt *nneg,PetscInt *nzero,PetscInt *npos)
7872: {
7878: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
7879: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Numeric factor mat is not assembled");
7880: if (!mat->ops->getinertia) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
7881: (*mat->ops->getinertia)(mat,nneg,nzero,npos);
7882: return(0);
7883: }
7885: /* ----------------------------------------------------------------*/
7888: /*@C
7889: MatSolves - Solves A x = b, given a factored matrix, for a collection of vectors
7891: Neighbor-wise Collective on Mat and Vecs
7893: Input Parameters:
7894: + mat - the factored matrix
7895: - b - the right-hand-side vectors
7897: Output Parameter:
7898: . x - the result vectors
7900: Notes:
7901: The vectors b and x cannot be the same. I.e., one cannot
7902: call MatSolves(A,x,x).
7904: Notes:
7905: Most users should employ the simplified KSP interface for linear solvers
7906: instead of working directly with matrix algebra routines such as this.
7907: See, e.g., KSPCreate().
7909: Level: developer
7911: Concepts: matrices^triangular solves
7913: .seealso: MatSolveAdd(), MatSolveTranspose(), MatSolveTransposeAdd(), MatSolve()
7914: @*/
7915: PetscErrorCode MatSolves(Mat mat,Vecs b,Vecs x)
7916: {
7922: if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
7923: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
7924: if (!mat->rmap->N && !mat->cmap->N) return(0);
7926: if (!mat->ops->solves) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
7927: MatCheckPreallocated(mat,1);
7928: PetscLogEventBegin(MAT_Solves,mat,0,0,0);
7929: (*mat->ops->solves)(mat,b,x);
7930: PetscLogEventEnd(MAT_Solves,mat,0,0,0);
7931: return(0);
7932: }
7936: /*@
7937: MatIsSymmetric - Test whether a matrix is symmetric
7939: Collective on Mat
7941: Input Parameter:
7942: + A - the matrix to test
7943: - tol - difference between value and its transpose less than this amount counts as equal (use 0.0 for exact transpose)
7945: Output Parameters:
7946: . flg - the result
7948: Notes: For real numbers MatIsSymmetric() and MatIsHermitian() return identical results
7950: Level: intermediate
7952: Concepts: matrix^symmetry
7954: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsStructurallySymmetric(), MatSetOption(), MatIsSymmetricKnown()
7955: @*/
7956: PetscErrorCode MatIsSymmetric(Mat A,PetscReal tol,PetscBool *flg)
7957: {
7964: if (!A->symmetric_set) {
7965: if (!A->ops->issymmetric) {
7966: MatType mattype;
7967: MatGetType(A,&mattype);
7968: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for symmetric",mattype);
7969: }
7970: (*A->ops->issymmetric)(A,tol,flg);
7971: if (!tol) {
7972: A->symmetric_set = PETSC_TRUE;
7973: A->symmetric = *flg;
7974: if (A->symmetric) {
7975: A->structurally_symmetric_set = PETSC_TRUE;
7976: A->structurally_symmetric = PETSC_TRUE;
7977: }
7978: }
7979: } else if (A->symmetric) {
7980: *flg = PETSC_TRUE;
7981: } else if (!tol) {
7982: *flg = PETSC_FALSE;
7983: } else {
7984: if (!A->ops->issymmetric) {
7985: MatType mattype;
7986: MatGetType(A,&mattype);
7987: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for symmetric",mattype);
7988: }
7989: (*A->ops->issymmetric)(A,tol,flg);
7990: }
7991: return(0);
7992: }
7996: /*@
7997: MatIsHermitian - Test whether a matrix is Hermitian
7999: Collective on Mat
8001: Input Parameter:
8002: + A - the matrix to test
8003: - tol - difference between value and its transpose less than this amount counts as equal (use 0.0 for exact Hermitian)
8005: Output Parameters:
8006: . flg - the result
8008: Level: intermediate
8010: Concepts: matrix^symmetry
8012: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsStructurallySymmetric(), MatSetOption(),
8013: MatIsSymmetricKnown(), MatIsSymmetric()
8014: @*/
8015: PetscErrorCode MatIsHermitian(Mat A,PetscReal tol,PetscBool *flg)
8016: {
8023: if (!A->hermitian_set) {
8024: if (!A->ops->ishermitian) {
8025: MatType mattype;
8026: MatGetType(A,&mattype);
8027: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for hermitian",mattype);
8028: }
8029: (*A->ops->ishermitian)(A,tol,flg);
8030: if (!tol) {
8031: A->hermitian_set = PETSC_TRUE;
8032: A->hermitian = *flg;
8033: if (A->hermitian) {
8034: A->structurally_symmetric_set = PETSC_TRUE;
8035: A->structurally_symmetric = PETSC_TRUE;
8036: }
8037: }
8038: } else if (A->hermitian) {
8039: *flg = PETSC_TRUE;
8040: } else if (!tol) {
8041: *flg = PETSC_FALSE;
8042: } else {
8043: if (!A->ops->ishermitian) {
8044: MatType mattype;
8045: MatGetType(A,&mattype);
8046: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for hermitian",mattype);
8047: }
8048: (*A->ops->ishermitian)(A,tol,flg);
8049: }
8050: return(0);
8051: }
8055: /*@
8056: MatIsSymmetricKnown - Checks the flag on the matrix to see if it is symmetric.
8058: Not Collective
8060: Input Parameter:
8061: . A - the matrix to check
8063: Output Parameters:
8064: + set - if the symmetric flag is set (this tells you if the next flag is valid)
8065: - flg - the result
8067: Level: advanced
8069: Concepts: matrix^symmetry
8071: Note: Does not check the matrix values directly, so this may return unknown (set = PETSC_FALSE). Use MatIsSymmetric()
8072: if you want it explicitly checked
8074: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsStructurallySymmetric(), MatSetOption(), MatIsSymmetric()
8075: @*/
8076: PetscErrorCode MatIsSymmetricKnown(Mat A,PetscBool *set,PetscBool *flg)
8077: {
8082: if (A->symmetric_set) {
8083: *set = PETSC_TRUE;
8084: *flg = A->symmetric;
8085: } else {
8086: *set = PETSC_FALSE;
8087: }
8088: return(0);
8089: }
8093: /*@
8094: MatIsHermitianKnown - Checks the flag on the matrix to see if it is hermitian.
8096: Not Collective
8098: Input Parameter:
8099: . A - the matrix to check
8101: Output Parameters:
8102: + set - if the hermitian flag is set (this tells you if the next flag is valid)
8103: - flg - the result
8105: Level: advanced
8107: Concepts: matrix^symmetry
8109: Note: Does not check the matrix values directly, so this may return unknown (set = PETSC_FALSE). Use MatIsHermitian()
8110: if you want it explicitly checked
8112: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsStructurallySymmetric(), MatSetOption(), MatIsSymmetric()
8113: @*/
8114: PetscErrorCode MatIsHermitianKnown(Mat A,PetscBool *set,PetscBool *flg)
8115: {
8120: if (A->hermitian_set) {
8121: *set = PETSC_TRUE;
8122: *flg = A->hermitian;
8123: } else {
8124: *set = PETSC_FALSE;
8125: }
8126: return(0);
8127: }
8131: /*@
8132: MatIsStructurallySymmetric - Test whether a matrix is structurally symmetric
8134: Collective on Mat
8136: Input Parameter:
8137: . A - the matrix to test
8139: Output Parameters:
8140: . flg - the result
8142: Level: intermediate
8144: Concepts: matrix^symmetry
8146: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsSymmetric(), MatSetOption()
8147: @*/
8148: PetscErrorCode MatIsStructurallySymmetric(Mat A,PetscBool *flg)
8149: {
8155: if (!A->structurally_symmetric_set) {
8156: if (!A->ops->isstructurallysymmetric) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Matrix does not support checking for structural symmetric");
8157: (*A->ops->isstructurallysymmetric)(A,&A->structurally_symmetric);
8159: A->structurally_symmetric_set = PETSC_TRUE;
8160: }
8161: *flg = A->structurally_symmetric;
8162: return(0);
8163: }
8167: extern PetscErrorCode MatStashGetInfo_Private(MatStash*,PetscInt*,PetscInt*);
8168: /*@
8169: MatStashGetInfo - Gets how many values are currently in the matrix stash, i.e. need
8170: to be communicated to other processors during the MatAssemblyBegin/End() process
8172: Not collective
8174: Input Parameter:
8175: . vec - the vector
8177: Output Parameters:
8178: + nstash - the size of the stash
8179: . reallocs - the number of additional mallocs incurred.
8180: . bnstash - the size of the block stash
8181: - breallocs - the number of additional mallocs incurred.in the block stash
8183: Level: advanced
8185: .seealso: MatAssemblyBegin(), MatAssemblyEnd(), Mat, MatStashSetInitialSize()
8187: @*/
8188: PetscErrorCode MatStashGetInfo(Mat mat,PetscInt *nstash,PetscInt *reallocs,PetscInt *bnstash,PetscInt *breallocs)
8189: {
8193: MatStashGetInfo_Private(&mat->stash,nstash,reallocs);
8194: MatStashGetInfo_Private(&mat->bstash,bnstash,breallocs);
8195: return(0);
8196: }
8200: /*@C
8201: MatGetVecs - Get vector(s) compatible with the matrix, i.e. with the same
8202: parallel layout
8204: Collective on Mat
8206: Input Parameter:
8207: . mat - the matrix
8209: Output Parameter:
8210: + right - (optional) vector that the matrix can be multiplied against
8211: - left - (optional) vector that the matrix vector product can be stored in
8213: Level: advanced
8215: .seealso: MatCreate()
8216: @*/
8217: PetscErrorCode MatGetVecs(Mat mat,Vec *right,Vec *left)
8218: {
8224: MatCheckPreallocated(mat,1);
8225: if (mat->ops->getvecs) {
8226: (*mat->ops->getvecs)(mat,right,left);
8227: } else {
8228: PetscMPIInt size;
8229: MPI_Comm_size(PetscObjectComm((PetscObject)mat), &size);
8230: if (right) {
8231: VecCreate(PetscObjectComm((PetscObject)mat),right);
8232: VecSetSizes(*right,mat->cmap->n,PETSC_DETERMINE);
8233: VecSetBlockSize(*right,mat->rmap->bs);
8234: VecSetType(*right,VECSTANDARD);
8235: PetscLayoutReference(mat->cmap,&(*right)->map);
8236: }
8237: if (left) {
8238: VecCreate(PetscObjectComm((PetscObject)mat),left);
8239: VecSetSizes(*left,mat->rmap->n,PETSC_DETERMINE);
8240: VecSetBlockSize(*left,mat->rmap->bs);
8241: VecSetType(*left,VECSTANDARD);
8242: PetscLayoutReference(mat->rmap,&(*left)->map);
8243: }
8244: }
8245: return(0);
8246: }
8250: /*@C
8251: MatFactorInfoInitialize - Initializes a MatFactorInfo data structure
8252: with default values.
8254: Not Collective
8256: Input Parameters:
8257: . info - the MatFactorInfo data structure
8260: Notes: The solvers are generally used through the KSP and PC objects, for example
8261: PCLU, PCILU, PCCHOLESKY, PCICC
8263: Level: developer
8265: .seealso: MatFactorInfo
8267: Developer Note: fortran interface is not autogenerated as the f90
8268: interface defintion cannot be generated correctly [due to MatFactorInfo]
8270: @*/
8272: PetscErrorCode MatFactorInfoInitialize(MatFactorInfo *info)
8273: {
8277: PetscMemzero(info,sizeof(MatFactorInfo));
8278: return(0);
8279: }
8283: /*@
8284: MatPtAP - Creates the matrix product C = P^T * A * P
8286: Neighbor-wise Collective on Mat
8288: Input Parameters:
8289: + A - the matrix
8290: . P - the projection matrix
8291: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
8292: - fill - expected fill as ratio of nnz(C)/(nnz(A) + nnz(P))
8294: Output Parameters:
8295: . C - the product matrix
8297: Notes:
8298: C will be created and must be destroyed by the user with MatDestroy().
8300: This routine is currently only implemented for pairs of AIJ matrices and classes
8301: which inherit from AIJ.
8303: Level: intermediate
8305: .seealso: MatPtAPSymbolic(), MatPtAPNumeric(), MatMatMult(), MatRARt()
8306: @*/
8307: PetscErrorCode MatPtAP(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C)
8308: {
8310: PetscErrorCode (*fA)(Mat,Mat,MatReuse,PetscReal,Mat*);
8311: PetscErrorCode (*fP)(Mat,Mat,MatReuse,PetscReal,Mat*);
8312: PetscErrorCode (*ptap)(Mat,Mat,MatReuse,PetscReal,Mat*)=NULL;
8313: PetscBool viatranspose=PETSC_FALSE,viamatmatmatmult=PETSC_FALSE;
8316: PetscOptionsGetBool(((PetscObject)A)->prefix,"-matptap_viatranspose",&viatranspose,NULL);
8317: PetscOptionsGetBool(((PetscObject)A)->prefix,"-matptap_viamatmatmatmult",&viamatmatmatmult,NULL);
8321: MatCheckPreallocated(A,1);
8322: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8323: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8326: MatCheckPreallocated(P,2);
8327: if (!P->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8328: if (P->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8330: if (P->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->rmap->N,A->cmap->N);
8331: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
8333: if (scall == MAT_REUSE_MATRIX) {
8336: if (viatranspose || viamatmatmatmult) {
8337: Mat Pt;
8338: MatTranspose(P,MAT_INITIAL_MATRIX,&Pt);
8339: if (viamatmatmatmult) {
8340: MatMatMatMult(Pt,A,P,scall,fill,C);
8341: } else {
8342: Mat AP;
8343: MatMatMult(A,P,MAT_INITIAL_MATRIX,fill,&AP);
8344: MatMatMult(Pt,AP,scall,fill,C);
8345: MatDestroy(&AP);
8346: }
8347: MatDestroy(&Pt);
8348: } else {
8349: PetscLogEventBegin(MAT_PtAP,A,P,0,0);
8350: PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);
8351: (*(*C)->ops->ptapnumeric)(A,P,*C);
8352: PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);
8353: PetscLogEventEnd(MAT_PtAP,A,P,0,0);
8354: }
8355: return(0);
8356: }
8358: if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
8359: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
8361: fA = A->ops->ptap;
8362: fP = P->ops->ptap;
8363: if (fP == fA) {
8364: if (!fA) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatPtAP not supported for A of type %s",((PetscObject)A)->type_name);
8365: ptap = fA;
8366: } else {
8367: /* dispatch based on the type of A and P from their PetscObject's PetscFunctionLists. */
8368: char ptapname[256];
8369: PetscStrcpy(ptapname,"MatPtAP_");
8370: PetscStrcat(ptapname,((PetscObject)A)->type_name);
8371: PetscStrcat(ptapname,"_");
8372: PetscStrcat(ptapname,((PetscObject)P)->type_name);
8373: PetscStrcat(ptapname,"_C"); /* e.g., ptapname = "MatPtAP_seqdense_seqaij_C" */
8374: PetscObjectQueryFunction((PetscObject)P,ptapname,&ptap);
8375: if (!ptap) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MatPtAP requires A, %s, to be compatible with P, %s",((PetscObject)A)->type_name,((PetscObject)P)->type_name);
8376: }
8378: if (viatranspose || viamatmatmatmult) {
8379: Mat Pt;
8380: MatTranspose(P,MAT_INITIAL_MATRIX,&Pt);
8381: if (viamatmatmatmult) {
8382: MatMatMatMult(Pt,A,P,scall,fill,C);
8383: PetscInfo(*C,"MatPtAP via MatMatMatMult\n");
8384: } else {
8385: Mat AP;
8386: MatMatMult(A,P,MAT_INITIAL_MATRIX,fill,&AP);
8387: MatMatMult(Pt,AP,scall,fill,C);
8388: MatDestroy(&AP);
8389: PetscInfo(*C,"MatPtAP via MatTranspose and MatMatMult\n");
8390: }
8391: MatDestroy(&Pt);
8392: } else {
8393: PetscLogEventBegin(MAT_PtAP,A,P,0,0);
8394: (*ptap)(A,P,scall,fill,C);
8395: PetscLogEventEnd(MAT_PtAP,A,P,0,0);
8396: }
8397: return(0);
8398: }
8402: /*@
8403: MatPtAPNumeric - Computes the matrix product C = P^T * A * P
8405: Neighbor-wise Collective on Mat
8407: Input Parameters:
8408: + A - the matrix
8409: - P - the projection matrix
8411: Output Parameters:
8412: . C - the product matrix
8414: Notes:
8415: C must have been created by calling MatPtAPSymbolic and must be destroyed by
8416: the user using MatDeatroy().
8418: This routine is currently only implemented for pairs of AIJ matrices and classes
8419: which inherit from AIJ. C will be of type MATAIJ.
8421: Level: intermediate
8423: .seealso: MatPtAP(), MatPtAPSymbolic(), MatMatMultNumeric()
8424: @*/
8425: PetscErrorCode MatPtAPNumeric(Mat A,Mat P,Mat C)
8426: {
8432: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8433: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8436: MatCheckPreallocated(P,2);
8437: if (!P->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8438: if (P->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8441: MatCheckPreallocated(C,3);
8442: if (C->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8443: if (P->cmap->N!=C->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->cmap->N,C->rmap->N);
8444: if (P->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->rmap->N,A->cmap->N);
8445: if (A->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix 'A' must be square, %D != %D",A->rmap->N,A->cmap->N);
8446: if (P->cmap->N!=C->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->cmap->N,C->cmap->N);
8447: MatCheckPreallocated(A,1);
8449: PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);
8450: (*C->ops->ptapnumeric)(A,P,C);
8451: PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);
8452: return(0);
8453: }
8457: /*@
8458: MatPtAPSymbolic - Creates the (i,j) structure of the matrix product C = P^T * A * P
8460: Neighbor-wise Collective on Mat
8462: Input Parameters:
8463: + A - the matrix
8464: - P - the projection matrix
8466: Output Parameters:
8467: . C - the (i,j) structure of the product matrix
8469: Notes:
8470: C will be created and must be destroyed by the user with MatDestroy().
8472: This routine is currently only implemented for pairs of SeqAIJ matrices and classes
8473: which inherit from SeqAIJ. C will be of type MATSEQAIJ. The product is computed using
8474: this (i,j) structure by calling MatPtAPNumeric().
8476: Level: intermediate
8478: .seealso: MatPtAP(), MatPtAPNumeric(), MatMatMultSymbolic()
8479: @*/
8480: PetscErrorCode MatPtAPSymbolic(Mat A,Mat P,PetscReal fill,Mat *C)
8481: {
8487: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8488: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8489: if (fill <1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
8492: MatCheckPreallocated(P,2);
8493: if (!P->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8494: if (P->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8497: if (P->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->rmap->N,A->cmap->N);
8498: if (A->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix 'A' must be square, %D != %D",A->rmap->N,A->cmap->N);
8499: MatCheckPreallocated(A,1);
8500: PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);
8501: (*A->ops->ptapsymbolic)(A,P,fill,C);
8502: PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);
8504: /* MatSetBlockSize(*C,A->rmap->bs); NO! this is not always true -ma */
8505: return(0);
8506: }
8510: /*@
8511: MatRARt - Creates the matrix product C = R * A * R^T
8513: Neighbor-wise Collective on Mat
8515: Input Parameters:
8516: + A - the matrix
8517: . R - the projection matrix
8518: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
8519: - fill - expected fill as ratio of nnz(C)/nnz(A)
8521: Output Parameters:
8522: . C - the product matrix
8524: Notes:
8525: C will be created and must be destroyed by the user with MatDestroy().
8527: This routine is currently only implemented for pairs of AIJ matrices and classes
8528: which inherit from AIJ.
8530: Level: intermediate
8532: .seealso: MatRARtSymbolic(), MatRARtNumeric(), MatMatMult(), MatPtAP()
8533: @*/
8534: PetscErrorCode MatRARt(Mat A,Mat R,MatReuse scall,PetscReal fill,Mat *C)
8535: {
8541: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8542: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8545: MatCheckPreallocated(R,2);
8546: if (!R->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8547: if (R->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8549: if (R->cmap->N!=A->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)R),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",R->cmap->N,A->rmap->N);
8550: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
8551: MatCheckPreallocated(A,1);
8553: if (!A->ops->rart) {
8554: MatType mattype;
8555: MatGetType(A,&mattype);
8556: SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Matrix of type <%s> does not support RARt",mattype);
8557: }
8558: PetscLogEventBegin(MAT_RARt,A,R,0,0);
8559: (*A->ops->rart)(A,R,scall,fill,C);
8560: PetscLogEventEnd(MAT_RARt,A,R,0,0);
8561: return(0);
8562: }
8566: /*@
8567: MatRARtNumeric - Computes the matrix product C = R * A * R^T
8569: Neighbor-wise Collective on Mat
8571: Input Parameters:
8572: + A - the matrix
8573: - R - the projection matrix
8575: Output Parameters:
8576: . C - the product matrix
8578: Notes:
8579: C must have been created by calling MatRARtSymbolic and must be destroyed by
8580: the user using MatDeatroy().
8582: This routine is currently only implemented for pairs of AIJ matrices and classes
8583: which inherit from AIJ. C will be of type MATAIJ.
8585: Level: intermediate
8587: .seealso: MatRARt(), MatRARtSymbolic(), MatMatMultNumeric()
8588: @*/
8589: PetscErrorCode MatRARtNumeric(Mat A,Mat R,Mat C)
8590: {
8596: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8597: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8600: MatCheckPreallocated(R,2);
8601: if (!R->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8602: if (R->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8605: MatCheckPreallocated(C,3);
8606: if (C->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8607: if (R->rmap->N!=C->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",R->rmap->N,C->rmap->N);
8608: if (R->cmap->N!=A->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",R->cmap->N,A->rmap->N);
8609: if (A->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix 'A' must be square, %D != %D",A->rmap->N,A->cmap->N);
8610: if (R->rmap->N!=C->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",R->rmap->N,C->cmap->N);
8611: MatCheckPreallocated(A,1);
8613: PetscLogEventBegin(MAT_RARtNumeric,A,R,0,0);
8614: (*A->ops->rartnumeric)(A,R,C);
8615: PetscLogEventEnd(MAT_RARtNumeric,A,R,0,0);
8616: return(0);
8617: }
8621: /*@
8622: MatRARtSymbolic - Creates the (i,j) structure of the matrix product C = R * A * R^T
8624: Neighbor-wise Collective on Mat
8626: Input Parameters:
8627: + A - the matrix
8628: - R - the projection matrix
8630: Output Parameters:
8631: . C - the (i,j) structure of the product matrix
8633: Notes:
8634: C will be created and must be destroyed by the user with MatDestroy().
8636: This routine is currently only implemented for pairs of SeqAIJ matrices and classes
8637: which inherit from SeqAIJ. C will be of type MATSEQAIJ. The product is computed using
8638: this (i,j) structure by calling MatRARtNumeric().
8640: Level: intermediate
8642: .seealso: MatRARt(), MatRARtNumeric(), MatMatMultSymbolic()
8643: @*/
8644: PetscErrorCode MatRARtSymbolic(Mat A,Mat R,PetscReal fill,Mat *C)
8645: {
8651: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8652: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8653: if (fill <1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
8656: MatCheckPreallocated(R,2);
8657: if (!R->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8658: if (R->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8661: if (R->cmap->N!=A->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",R->cmap->N,A->rmap->N);
8662: if (A->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix 'A' must be square, %D != %D",A->rmap->N,A->cmap->N);
8663: MatCheckPreallocated(A,1);
8664: PetscLogEventBegin(MAT_RARtSymbolic,A,R,0,0);
8665: (*A->ops->rartsymbolic)(A,R,fill,C);
8666: PetscLogEventEnd(MAT_RARtSymbolic,A,R,0,0);
8668: MatSetBlockSize(*C,A->rmap->bs);
8669: return(0);
8670: }
8674: /*@
8675: MatMatMult - Performs Matrix-Matrix Multiplication C=A*B.
8677: Neighbor-wise Collective on Mat
8679: Input Parameters:
8680: + A - the left matrix
8681: . B - the right matrix
8682: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
8683: - fill - expected fill as ratio of nnz(C)/(nnz(A) + nnz(B)), use PETSC_DEFAULT if you do not have a good estimate
8684: if the result is a dense matrix this is irrelevent
8686: Output Parameters:
8687: . C - the product matrix
8689: Notes:
8690: Unless scall is MAT_REUSE_MATRIX C will be created.
8692: MAT_REUSE_MATRIX can only be used if the matrices A and B have the same nonzero pattern as in the previous call
8694: To determine the correct fill value, run with -info and search for the string "Fill ratio" to see the value
8695: actually needed.
8697: If you have many matrices with the same non-zero structure to multiply, you
8698: should either
8699: $ 1) use MAT_REUSE_MATRIX in all calls but the first or
8700: $ 2) call MatMatMultSymbolic() once and then MatMatMultNumeric() for each product needed
8702: Level: intermediate
8704: .seealso: MatMatMultSymbolic(), MatMatMultNumeric(), MatTransposeMatMult(), MatMatTransposeMult(), MatPtAP()
8705: @*/
8706: PetscErrorCode MatMatMult(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
8707: {
8709: PetscErrorCode (*fA)(Mat,Mat,MatReuse,PetscReal,Mat*);
8710: PetscErrorCode (*fB)(Mat,Mat,MatReuse,PetscReal,Mat*);
8711: PetscErrorCode (*mult)(Mat,Mat,MatReuse,PetscReal,Mat*)=NULL;
8716: MatCheckPreallocated(A,1);
8717: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8718: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8721: MatCheckPreallocated(B,2);
8722: if (!B->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8723: if (B->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8725: if (B->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",B->rmap->N,A->cmap->N);
8726: if (scall == MAT_REUSE_MATRIX) {
8729: PetscLogEventBegin(MAT_MatMult,A,B,0,0);
8730: PetscLogEventBegin(MAT_MatMultNumeric,A,B,0,0);
8731: (*(*C)->ops->matmultnumeric)(A,B,*C);
8732: PetscLogEventEnd(MAT_MatMultNumeric,A,B,0,0);
8733: PetscLogEventEnd(MAT_MatMult,A,B,0,0);
8734: return(0);
8735: }
8736: if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
8737: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
8739: fA = A->ops->matmult;
8740: fB = B->ops->matmult;
8741: if (fB == fA) {
8742: if (!fB) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatMatMult not supported for B of type %s",((PetscObject)B)->type_name);
8743: mult = fB;
8744: } else {
8745: /* dispatch based on the type of A and B from their PetscObject's PetscFunctionLists. */
8746: char multname[256];
8747: PetscStrcpy(multname,"MatMatMult_");
8748: PetscStrcat(multname,((PetscObject)A)->type_name);
8749: PetscStrcat(multname,"_");
8750: PetscStrcat(multname,((PetscObject)B)->type_name);
8751: PetscStrcat(multname,"_C"); /* e.g., multname = "MatMatMult_seqdense_seqaij_C" */
8752: PetscObjectQueryFunction((PetscObject)B,multname,&mult);
8753: if (!mult) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MatMatMult requires A, %s, to be compatible with B, %s",((PetscObject)A)->type_name,((PetscObject)B)->type_name);
8754: }
8755: PetscLogEventBegin(MAT_MatMult,A,B,0,0);
8756: (*mult)(A,B,scall,fill,C);
8757: PetscLogEventEnd(MAT_MatMult,A,B,0,0);
8758: return(0);
8759: }
8763: /*@
8764: MatMatMultSymbolic - Performs construction, preallocation, and computes the ij structure
8765: of the matrix-matrix product C=A*B. Call this routine before calling MatMatMultNumeric().
8767: Neighbor-wise Collective on Mat
8769: Input Parameters:
8770: + A - the left matrix
8771: . B - the right matrix
8772: - fill - expected fill as ratio of nnz(C)/(nnz(A) + nnz(B)), use PETSC_DEFAULT if you do not have a good estimate,
8773: if C is a dense matrix this is irrelevent
8775: Output Parameters:
8776: . C - the product matrix
8778: Notes:
8779: Unless scall is MAT_REUSE_MATRIX C will be created.
8781: To determine the correct fill value, run with -info and search for the string "Fill ratio" to see the value
8782: actually needed.
8784: This routine is currently implemented for
8785: - pairs of AIJ matrices and classes which inherit from AIJ, C will be of type AIJ
8786: - pairs of AIJ (A) and Dense (B) matrix, C will be of type Dense.
8787: - pairs of Dense (A) and AIJ (B) matrix, C will be of type Dense.
8789: Level: intermediate
8791: Developers Note: There are ways to estimate the number of nonzeros in the resulting product, see for example, http://arxiv.org/abs/1006.4173
8792: We should incorporate them into PETSc.
8794: .seealso: MatMatMult(), MatMatMultNumeric()
8795: @*/
8796: PetscErrorCode MatMatMultSymbolic(Mat A,Mat B,PetscReal fill,Mat *C)
8797: {
8799: PetscErrorCode (*Asymbolic)(Mat,Mat,PetscReal,Mat*);
8800: PetscErrorCode (*Bsymbolic)(Mat,Mat,PetscReal,Mat*);
8801: PetscErrorCode (*symbolic)(Mat,Mat,PetscReal,Mat*)=NULL;
8806: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8807: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8811: MatCheckPreallocated(B,2);
8812: if (!B->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8813: if (B->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8816: if (B->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",B->rmap->N,A->cmap->N);
8817: if (fill == PETSC_DEFAULT) fill = 2.0;
8818: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be > 1.0",(double)fill);
8819: MatCheckPreallocated(A,1);
8821: Asymbolic = A->ops->matmultsymbolic;
8822: Bsymbolic = B->ops->matmultsymbolic;
8823: if (Asymbolic == Bsymbolic) {
8824: if (!Bsymbolic) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"C=A*B not implemented for B of type %s",((PetscObject)B)->type_name);
8825: symbolic = Bsymbolic;
8826: } else { /* dispatch based on the type of A and B */
8827: char symbolicname[256];
8828: PetscStrcpy(symbolicname,"MatMatMultSymbolic_");
8829: PetscStrcat(symbolicname,((PetscObject)A)->type_name);
8830: PetscStrcat(symbolicname,"_");
8831: PetscStrcat(symbolicname,((PetscObject)B)->type_name);
8832: PetscStrcat(symbolicname,"_C");
8833: PetscObjectQueryFunction((PetscObject)B,symbolicname,&symbolic);
8834: if (!symbolic) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MatMatMultSymbolic requires A, %s, to be compatible with B, %s",((PetscObject)A)->type_name,((PetscObject)B)->type_name);
8835: }
8836: PetscLogEventBegin(MAT_MatMultSymbolic,A,B,0,0);
8837: (*symbolic)(A,B,fill,C);
8838: PetscLogEventEnd(MAT_MatMultSymbolic,A,B,0,0);
8839: return(0);
8840: }
8844: /*@
8845: MatMatMultNumeric - Performs the numeric matrix-matrix product.
8846: Call this routine after first calling MatMatMultSymbolic().
8848: Neighbor-wise Collective on Mat
8850: Input Parameters:
8851: + A - the left matrix
8852: - B - the right matrix
8854: Output Parameters:
8855: . C - the product matrix, which was created by from MatMatMultSymbolic() or a call to MatMatMult().
8857: Notes:
8858: C must have been created with MatMatMultSymbolic().
8860: This routine is currently implemented for
8861: - pairs of AIJ matrices and classes which inherit from AIJ, C will be of type MATAIJ.
8862: - pairs of AIJ (A) and Dense (B) matrix, C will be of type Dense.
8863: - pairs of Dense (A) and AIJ (B) matrix, C will be of type Dense.
8865: Level: intermediate
8867: .seealso: MatMatMult(), MatMatMultSymbolic()
8868: @*/
8869: PetscErrorCode MatMatMultNumeric(Mat A,Mat B,Mat C)
8870: {
8874: MatMatMult(A,B,MAT_REUSE_MATRIX,0.0,&C);
8875: return(0);
8876: }
8880: /*@
8881: MatMatTransposeMult - Performs Matrix-Matrix Multiplication C=A*B^T.
8883: Neighbor-wise Collective on Mat
8885: Input Parameters:
8886: + A - the left matrix
8887: . B - the right matrix
8888: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
8889: - fill - expected fill as ratio of nnz(C)/(nnz(A) + nnz(B)), use PETSC_DEFAULT if not known
8891: Output Parameters:
8892: . C - the product matrix
8894: Notes:
8895: C will be created if MAT_INITIAL_MATRIX and must be destroyed by the user with MatDestroy().
8897: MAT_REUSE_MATRIX can only be used if the matrices A and B have the same nonzero pattern as in the previous call
8899: To determine the correct fill value, run with -info and search for the string "Fill ratio" to see the value
8900: actually needed.
8902: This routine is currently only implemented for pairs of SeqAIJ matrices. C will be of type MATSEQAIJ.
8904: Level: intermediate
8906: .seealso: MatMatTransposeMultSymbolic(), MatMatTransposeMultNumeric(), MatMatMult(), MatTransposeMatMult() MatPtAP()
8907: @*/
8908: PetscErrorCode MatMatTransposeMult(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
8909: {
8911: PetscErrorCode (*fA)(Mat,Mat,MatReuse,PetscReal,Mat*);
8912: PetscErrorCode (*fB)(Mat,Mat,MatReuse,PetscReal,Mat*);
8917: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8918: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8921: MatCheckPreallocated(B,2);
8922: if (!B->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8923: if (B->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8925: if (B->cmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, AN %D != BN %D",A->cmap->N,B->cmap->N);
8926: if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
8927: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be > 1.0",(double)fill);
8928: MatCheckPreallocated(A,1);
8930: fA = A->ops->mattransposemult;
8931: if (!fA) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatMatTransposeMult not supported for A of type %s",((PetscObject)A)->type_name);
8932: fB = B->ops->mattransposemult;
8933: if (!fB) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatMatTransposeMult not supported for B of type %s",((PetscObject)B)->type_name);
8934: if (fB!=fA) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MatMatTransposeMult requires A, %s, to be compatible with B, %s",((PetscObject)A)->type_name,((PetscObject)B)->type_name);
8936: PetscLogEventBegin(MAT_MatTransposeMult,A,B,0,0);
8937: if (scall == MAT_INITIAL_MATRIX) {
8938: PetscLogEventBegin(MAT_MatTransposeMultSymbolic,A,B,0,0);
8939: (*A->ops->mattransposemultsymbolic)(A,B,fill,C);
8940: PetscLogEventEnd(MAT_MatTransposeMultSymbolic,A,B,0,0);
8941: }
8942: PetscLogEventBegin(MAT_MatTransposeMultNumeric,A,B,0,0);
8943: (*A->ops->mattransposemultnumeric)(A,B,*C);
8944: PetscLogEventEnd(MAT_MatTransposeMultNumeric,A,B,0,0);
8945: PetscLogEventEnd(MAT_MatTransposeMult,A,B,0,0);
8946: return(0);
8947: }
8951: /*@
8952: MatTransposeMatMult - Performs Matrix-Matrix Multiplication C=A^T*B.
8954: Neighbor-wise Collective on Mat
8956: Input Parameters:
8957: + A - the left matrix
8958: . B - the right matrix
8959: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
8960: - fill - expected fill as ratio of nnz(C)/(nnz(A) + nnz(B)), use PETSC_DEFAULT if not known
8962: Output Parameters:
8963: . C - the product matrix
8965: Notes:
8966: C will be created if MAT_INITIAL_MATRIX and must be destroyed by the user with MatDestroy().
8968: MAT_REUSE_MATRIX can only be used if the matrices A and B have the same nonzero pattern as in the previous call
8970: To determine the correct fill value, run with -info and search for the string "Fill ratio" to see the value
8971: actually needed.
8973: This routine is currently implemented for pairs of AIJ matrices and pairs of SeqDense matrices and classes
8974: which inherit from SeqAIJ. C will be of same type as the input matrices.
8976: Level: intermediate
8978: .seealso: MatTransposeMatMultSymbolic(), MatTransposeMatMultNumeric(), MatMatMult(), MatMatTransposeMult(), MatPtAP()
8979: @*/
8980: PetscErrorCode MatTransposeMatMult(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
8981: {
8983: PetscErrorCode (*fA)(Mat,Mat,MatReuse,PetscReal,Mat*);
8984: PetscErrorCode (*fB)(Mat,Mat,MatReuse,PetscReal,Mat*);
8985: PetscErrorCode (*transposematmult)(Mat,Mat,MatReuse,PetscReal,Mat*) = NULL;
8990: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8991: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8994: MatCheckPreallocated(B,2);
8995: if (!B->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8996: if (B->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8998: if (B->rmap->N!=A->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",B->rmap->N,A->rmap->N);
8999: if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
9000: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be > 1.0",(double)fill);
9001: MatCheckPreallocated(A,1);
9003: fA = A->ops->transposematmult;
9004: fB = B->ops->transposematmult;
9005: if (fB==fA) {
9006: if (!fA) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatTransposeMatMult not supported for A of type %s",((PetscObject)A)->type_name);
9007: transposematmult = fA;
9008: } else {
9009: /* dispatch based on the type of A and B from their PetscObject's PetscFunctionLists. */
9010: char multname[256];
9011: PetscStrcpy(multname,"MatTransposeMatMult_");
9012: PetscStrcat(multname,((PetscObject)A)->type_name);
9013: PetscStrcat(multname,"_");
9014: PetscStrcat(multname,((PetscObject)B)->type_name);
9015: PetscStrcat(multname,"_C"); /* e.g., multname = "MatMatMult_seqdense_seqaij_C" */
9016: PetscObjectQueryFunction((PetscObject)B,multname,&transposematmult);
9017: if (!transposematmult) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MatTransposeMatMult requires A, %s, to be compatible with B, %s",((PetscObject)A)->type_name,((PetscObject)B)->type_name);
9018: }
9019: PetscLogEventBegin(MAT_TransposeMatMult,A,B,0,0);
9020: (*transposematmult)(A,B,scall,fill,C);
9021: PetscLogEventEnd(MAT_TransposeMatMult,A,B,0,0);
9022: return(0);
9023: }
9027: /*@
9028: MatMatMatMult - Performs Matrix-Matrix-Matrix Multiplication D=A*B*C.
9030: Neighbor-wise Collective on Mat
9032: Input Parameters:
9033: + A - the left matrix
9034: . B - the middle matrix
9035: . C - the right matrix
9036: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9037: - fill - expected fill as ratio of nnz(D)/(nnz(A) + nnz(B)+nnz(C)), use PETSC_DEFAULT if you do not have a good estimate
9038: if the result is a dense matrix this is irrelevent
9040: Output Parameters:
9041: . D - the product matrix
9043: Notes:
9044: Unless scall is MAT_REUSE_MATRIX D will be created.
9046: MAT_REUSE_MATRIX can only be used if the matrices A, B and C have the same nonzero pattern as in the previous call
9048: To determine the correct fill value, run with -info and search for the string "Fill ratio" to see the value
9049: actually needed.
9051: If you have many matrices with the same non-zero structure to multiply, you
9052: should either
9053: $ 1) use MAT_REUSE_MATRIX in all calls but the first or
9054: $ 2) call MatMatMatMultSymbolic() once and then MatMatMatMultNumeric() for each product needed
9056: Level: intermediate
9058: .seealso: MatMatMult, MatPtAP()
9059: @*/
9060: PetscErrorCode MatMatMatMult(Mat A,Mat B,Mat C,MatReuse scall,PetscReal fill,Mat *D)
9061: {
9063: PetscErrorCode (*fA)(Mat,Mat,Mat,MatReuse,PetscReal,Mat*);
9064: PetscErrorCode (*fB)(Mat,Mat,Mat,MatReuse,PetscReal,Mat*);
9065: PetscErrorCode (*fC)(Mat,Mat,Mat,MatReuse,PetscReal,Mat*);
9066: PetscErrorCode (*mult)(Mat,Mat,Mat,MatReuse,PetscReal,Mat*)=NULL;
9071: MatCheckPreallocated(A,1);
9072: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9073: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9076: MatCheckPreallocated(B,2);
9077: if (!B->assembled) SETERRQ(PetscObjectComm((PetscObject)B),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9078: if (B->factortype) SETERRQ(PetscObjectComm((PetscObject)B),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9081: MatCheckPreallocated(C,3);
9082: if (!C->assembled) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9083: if (C->factortype) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9084: if (B->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)B),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",B->rmap->N,A->cmap->N);
9085: if (C->rmap->N!=B->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",C->rmap->N,B->cmap->N);
9086: if (scall == MAT_REUSE_MATRIX) {
9089: PetscLogEventBegin(MAT_MatMatMult,A,B,0,0);
9090: (*(*D)->ops->matmatmult)(A,B,C,scall,fill,D);
9091: PetscLogEventEnd(MAT_MatMatMult,A,B,0,0);
9092: return(0);
9093: }
9094: if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
9095: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
9097: fA = A->ops->matmatmult;
9098: fB = B->ops->matmatmult;
9099: fC = C->ops->matmatmult;
9100: if (fA == fB && fA == fC) {
9101: if (!fA) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatMatMatMult not supported for A of type %s",((PetscObject)A)->type_name);
9102: mult = fA;
9103: } else {
9104: /* dispatch based on the type of A, B and C from their PetscObject's PetscFunctionLists. */
9105: char multname[256];
9106: PetscStrcpy(multname,"MatMatMatMult_");
9107: PetscStrcat(multname,((PetscObject)A)->type_name);
9108: PetscStrcat(multname,"_");
9109: PetscStrcat(multname,((PetscObject)B)->type_name);
9110: PetscStrcat(multname,"_");
9111: PetscStrcat(multname,((PetscObject)C)->type_name);
9112: PetscStrcat(multname,"_C");
9113: PetscObjectQueryFunction((PetscObject)B,multname,&mult);
9114: if (!mult) SETERRQ3(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MatMatMatMult requires A, %s, to be compatible with B, %s, C, %s",((PetscObject)A)->type_name,((PetscObject)B)->type_name,((PetscObject)C)->type_name);
9115: }
9116: PetscLogEventBegin(MAT_MatMatMult,A,B,0,0);
9117: (*mult)(A,B,C,scall,fill,D);
9118: PetscLogEventEnd(MAT_MatMatMult,A,B,0,0);
9119: return(0);
9120: }
9124: /*@C
9125: MatGetRedundantMatrix - Create redundant matrices and put them into processors of subcommunicators.
9127: Collective on Mat
9129: Input Parameters:
9130: + mat - the matrix
9131: . nsubcomm - the number of subcommunicators (= number of redundant parallel or sequential matrices)
9132: . subcomm - MPI communicator split from the communicator where mat resides in (or MPI_COMM_NULL if nsubcomm is used)
9133: - reuse - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9135: Output Parameter:
9136: . matredundant - redundant matrix
9138: Notes:
9139: MAT_REUSE_MATRIX can only be used when the nonzero structure of the
9140: original matrix has not changed from that last call to MatGetRedundantMatrix().
9142: This routine creates the duplicated matrices in subcommunicators; you should NOT create them before
9143: calling it.
9145: Only MPIAIJ matrix is supported.
9147: Level: advanced
9149: Concepts: subcommunicator
9150: Concepts: duplicate matrix
9152: .seealso: MatDestroy()
9153: @*/
9154: PetscErrorCode MatGetRedundantMatrix(Mat mat,PetscInt nsubcomm,MPI_Comm subcomm,MatReuse reuse,Mat *matredundant)
9155: {
9160: if (nsubcomm && reuse == MAT_REUSE_MATRIX) {
9163: }
9164: if (!mat->ops->getredundantmatrix) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
9165: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9166: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9167: MatCheckPreallocated(mat,1);
9169: PetscLogEventBegin(MAT_GetRedundantMatrix,mat,0,0,0);
9170: (*mat->ops->getredundantmatrix)(mat,nsubcomm,subcomm,reuse,matredundant);
9171: PetscLogEventEnd(MAT_GetRedundantMatrix,mat,0,0,0);
9172: return(0);
9173: }
9177: /*@C
9178: MatGetMultiProcBlock - Create multiple [bjacobi] 'parallel submatrices' from
9179: a given 'mat' object. Each submatrix can span multiple procs.
9181: Collective on Mat
9183: Input Parameters:
9184: + mat - the matrix
9185: . subcomm - the subcommunicator obtained by com_split(comm)
9186: - scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9188: Output Parameter:
9189: . subMat - 'parallel submatrices each spans a given subcomm
9191: Notes:
9192: The submatrix partition across processors is dictated by 'subComm' a
9193: communicator obtained by com_split(comm). The comm_split
9194: is not restriced to be grouped with consecutive original ranks.
9196: Due the comm_split() usage, the parallel layout of the submatrices
9197: map directly to the layout of the original matrix [wrt the local
9198: row,col partitioning]. So the original 'DiagonalMat' naturally maps
9199: into the 'DiagonalMat' of the subMat, hence it is used directly from
9200: the subMat. However the offDiagMat looses some columns - and this is
9201: reconstructed with MatSetValues()
9203: Level: advanced
9205: Concepts: subcommunicator
9206: Concepts: submatrices
9208: .seealso: MatGetSubMatrices()
9209: @*/
9210: PetscErrorCode MatGetMultiProcBlock(Mat mat, MPI_Comm subComm, MatReuse scall,Mat *subMat)
9211: {
9213: PetscMPIInt commsize,subCommSize;
9216: MPI_Comm_size(PetscObjectComm((PetscObject)mat),&commsize);
9217: MPI_Comm_size(subComm,&subCommSize);
9218: if (subCommSize > commsize) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"CommSize %D < SubCommZize %D",commsize,subCommSize);
9220: PetscLogEventBegin(MAT_GetMultiProcBlock,mat,0,0,0);
9221: (*mat->ops->getmultiprocblock)(mat,subComm,scall,subMat);
9222: PetscLogEventEnd(MAT_GetMultiProcBlock,mat,0,0,0);
9223: return(0);
9224: }
9228: /*@
9229: MatGetLocalSubMatrix - Gets a reference to a submatrix specified in local numbering
9231: Not Collective
9233: Input Arguments:
9234: mat - matrix to extract local submatrix from
9235: isrow - local row indices for submatrix
9236: iscol - local column indices for submatrix
9238: Output Arguments:
9239: submat - the submatrix
9241: Level: intermediate
9243: Notes:
9244: The submat should be returned with MatRestoreLocalSubMatrix().
9246: Depending on the format of mat, the returned submat may not implement MatMult(). Its communicator may be
9247: the same as mat, it may be PETSC_COMM_SELF, or some other subcomm of mat's.
9249: The submat always implements MatSetValuesLocal(). If isrow and iscol have the same block size, then
9250: MatSetValuesBlockedLocal() will also be implemented.
9252: .seealso: MatRestoreLocalSubMatrix(), MatCreateLocalRef()
9253: @*/
9254: PetscErrorCode MatGetLocalSubMatrix(Mat mat,IS isrow,IS iscol,Mat *submat)
9255: {
9265: if (mat->ops->getlocalsubmatrix) {
9266: (*mat->ops->getlocalsubmatrix)(mat,isrow,iscol,submat);
9267: } else {
9268: MatCreateLocalRef(mat,isrow,iscol,submat);
9269: }
9270: return(0);
9271: }
9275: /*@
9276: MatRestoreLocalSubMatrix - Restores a reference to a submatrix specified in local numbering
9278: Not Collective
9280: Input Arguments:
9281: mat - matrix to extract local submatrix from
9282: isrow - local row indices for submatrix
9283: iscol - local column indices for submatrix
9284: submat - the submatrix
9286: Level: intermediate
9288: .seealso: MatGetLocalSubMatrix()
9289: @*/
9290: PetscErrorCode MatRestoreLocalSubMatrix(Mat mat,IS isrow,IS iscol,Mat *submat)
9291: {
9300: if (*submat) {
9302: }
9304: if (mat->ops->restorelocalsubmatrix) {
9305: (*mat->ops->restorelocalsubmatrix)(mat,isrow,iscol,submat);
9306: } else {
9307: MatDestroy(submat);
9308: }
9309: *submat = NULL;
9310: return(0);
9311: }
9313: /* --------------------------------------------------------*/
9316: /*@
9317: MatFindZeroDiagonals - Finds all the rows of a matrix that have zero or no entry in the matrix
9319: Collective on Mat
9321: Input Parameter:
9322: . mat - the matrix
9324: Output Parameter:
9325: . is - if any rows have zero diagonals this contains the list of them
9327: Level: developer
9329: Concepts: matrix-vector product
9331: .seealso: MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
9332: @*/
9333: PetscErrorCode MatFindZeroDiagonals(Mat mat,IS *is)
9334: {
9340: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9341: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9343: if (!mat->ops->findzerodiagonals) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"This matrix type does not have a find zero diagonals defined");
9344: (*mat->ops->findzerodiagonals)(mat,is);
9345: return(0);
9346: }
9350: /*@C
9351: MatInvertBlockDiagonal - Inverts the block diagonal entries.
9353: Collective on Mat
9355: Input Parameters:
9356: . mat - the matrix
9358: Output Parameters:
9359: . values - the block inverses in column major order (FORTRAN-like)
9361: Note:
9362: This routine is not available from Fortran.
9364: Level: advanced
9365: @*/
9366: PetscErrorCode MatInvertBlockDiagonal(Mat mat,const PetscScalar **values)
9367: {
9372: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9373: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9374: if (!mat->ops->invertblockdiagonal) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not supported");
9375: (*mat->ops->invertblockdiagonal)(mat,values);
9376: return(0);
9377: }
9381: /*@C
9382: MatTransposeColoringDestroy - Destroys a coloring context for matrix product C=A*B^T that was created
9383: via MatTransposeColoringCreate().
9385: Collective on MatTransposeColoring
9387: Input Parameter:
9388: . c - coloring context
9390: Level: intermediate
9392: .seealso: MatTransposeColoringCreate()
9393: @*/
9394: PetscErrorCode MatTransposeColoringDestroy(MatTransposeColoring *c)
9395: {
9396: PetscErrorCode ierr;
9397: MatTransposeColoring matcolor=*c;
9400: if (!matcolor) return(0);
9401: if (--((PetscObject)matcolor)->refct > 0) {matcolor = 0; return(0);}
9403: PetscFree3(matcolor->ncolumns,matcolor->nrows,matcolor->colorforrow);
9404: PetscFree(matcolor->rows);
9405: PetscFree(matcolor->den2sp);
9406: PetscFree(matcolor->colorforcol);
9407: PetscFree(matcolor->columns);
9408: if (matcolor->brows>0) {
9409: PetscFree(matcolor->lstart);
9410: }
9411: PetscHeaderDestroy(c);
9412: return(0);
9413: }
9417: /*@C
9418: MatTransColoringApplySpToDen - Given a symbolic matrix product C=A*B^T for which
9419: a MatTransposeColoring context has been created, computes a dense B^T by Apply
9420: MatTransposeColoring to sparse B.
9422: Collective on MatTransposeColoring
9424: Input Parameters:
9425: + B - sparse matrix B
9426: . Btdense - symbolic dense matrix B^T
9427: - coloring - coloring context created with MatTransposeColoringCreate()
9429: Output Parameter:
9430: . Btdense - dense matrix B^T
9432: Options Database Keys:
9433: + -mat_transpose_coloring_view - Activates basic viewing or coloring
9434: . -mat_transpose_coloring_view_draw - Activates drawing of coloring
9435: - -mat_transpose_coloring_view_info - Activates viewing of coloring info
9437: Level: intermediate
9439: .seealso: MatTransposeColoringCreate(), MatTransposeColoringDestroy()
9441: .keywords: coloring
9442: @*/
9443: PetscErrorCode MatTransColoringApplySpToDen(MatTransposeColoring coloring,Mat B,Mat Btdense)
9444: {
9452: if (!B->ops->transcoloringapplysptoden) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not supported for this matrix type %s",((PetscObject)B)->type_name);
9453: (B->ops->transcoloringapplysptoden)(coloring,B,Btdense);
9454: return(0);
9455: }
9459: /*@C
9460: MatTransColoringApplyDenToSp - Given a symbolic matrix product Csp=A*B^T for which
9461: a MatTransposeColoring context has been created and a dense matrix Cden=A*Btdense
9462: in which Btdens is obtained from MatTransColoringApplySpToDen(), recover sparse matrix
9463: Csp from Cden.
9465: Collective on MatTransposeColoring
9467: Input Parameters:
9468: + coloring - coloring context created with MatTransposeColoringCreate()
9469: - Cden - matrix product of a sparse matrix and a dense matrix Btdense
9471: Output Parameter:
9472: . Csp - sparse matrix
9474: Options Database Keys:
9475: + -mat_multtranspose_coloring_view - Activates basic viewing or coloring
9476: . -mat_multtranspose_coloring_view_draw - Activates drawing of coloring
9477: - -mat_multtranspose_coloring_view_info - Activates viewing of coloring info
9479: Level: intermediate
9481: .seealso: MatTransposeColoringCreate(), MatTransposeColoringDestroy(), MatTransColoringApplySpToDen()
9483: .keywords: coloring
9484: @*/
9485: PetscErrorCode MatTransColoringApplyDenToSp(MatTransposeColoring matcoloring,Mat Cden,Mat Csp)
9486: {
9494: if (!Csp->ops->transcoloringapplydentosp) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not supported for this matrix type %s",((PetscObject)Csp)->type_name);
9495: (Csp->ops->transcoloringapplydentosp)(matcoloring,Cden,Csp);
9496: return(0);
9497: }
9501: /*@C
9502: MatTransposeColoringCreate - Creates a matrix coloring context for matrix product C=A*B^T.
9504: Collective on Mat
9506: Input Parameters:
9507: + mat - the matrix product C
9508: - iscoloring - the coloring of the matrix; usually obtained with MatColoringCreate() or DMCreateColoring()
9510: Output Parameter:
9511: . color - the new coloring context
9513: Level: intermediate
9515: .seealso: MatTransposeColoringDestroy(), MatTransposeColoringSetFromOptions(), MatTransColoringApplySpToDen(),
9516: MatTransColoringApplyDenToSp(), MatTransposeColoringView(),
9517: @*/
9518: PetscErrorCode MatTransposeColoringCreate(Mat mat,ISColoring iscoloring,MatTransposeColoring *color)
9519: {
9520: MatTransposeColoring c;
9521: MPI_Comm comm;
9522: PetscErrorCode ierr;
9525: PetscLogEventBegin(MAT_TransposeColoringCreate,mat,0,0,0);
9526: PetscObjectGetComm((PetscObject)mat,&comm);
9527: PetscHeaderCreate(c,_p_MatTransposeColoring,int,MAT_TRANSPOSECOLORING_CLASSID,"MatTransposeColoring","Matrix product C=A*B^T via coloring","Mat",comm,MatTransposeColoringDestroy,0);
9529: c->ctype = iscoloring->ctype;
9530: if (mat->ops->transposecoloringcreate) {
9531: (*mat->ops->transposecoloringcreate)(mat,iscoloring,c);
9532: } else SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Code not yet written for this matrix type");
9534: *color = c;
9535: PetscLogEventEnd(MAT_TransposeColoringCreate,mat,0,0,0);
9536: return(0);
9537: }