Actual source code: matrix.c
petsc-3.6.0 2015-06-09
2: /*
3: This is where the abstract matrix operations are defined
4: */
6: #include <petsc/private/matimpl.h> /*I "petscmat.h" I*/
7: #include <petsc/private/vecimpl.h>
8: #include <petsc/private/isimpl.h>
10: /* Logging support */
11: PetscClassId MAT_CLASSID;
12: PetscClassId MAT_COLORING_CLASSID;
13: PetscClassId MAT_FDCOLORING_CLASSID;
14: PetscClassId MAT_TRANSPOSECOLORING_CLASSID;
16: PetscLogEvent MAT_Mult, MAT_Mults, MAT_MultConstrained, MAT_MultAdd, MAT_MultTranspose;
17: PetscLogEvent MAT_MultTransposeConstrained, MAT_MultTransposeAdd, MAT_Solve, MAT_Solves, MAT_SolveAdd, MAT_SolveTranspose, MAT_MatSolve;
18: PetscLogEvent MAT_SolveTransposeAdd, MAT_SOR, MAT_ForwardSolve, MAT_BackwardSolve, MAT_LUFactor, MAT_LUFactorSymbolic;
19: PetscLogEvent MAT_LUFactorNumeric, MAT_CholeskyFactor, MAT_CholeskyFactorSymbolic, MAT_CholeskyFactorNumeric, MAT_ILUFactor;
20: PetscLogEvent MAT_ILUFactorSymbolic, MAT_ICCFactorSymbolic, MAT_Copy, MAT_Convert, MAT_Scale, MAT_AssemblyBegin;
21: PetscLogEvent MAT_AssemblyEnd, MAT_SetValues, MAT_GetValues, MAT_GetRow, MAT_GetRowIJ, MAT_GetSubMatrices, MAT_GetOrdering, MAT_RedundantMat, MAT_GetSeqNonzeroStructure;
22: PetscLogEvent MAT_IncreaseOverlap, MAT_Partitioning, MAT_Coarsen, MAT_ZeroEntries, MAT_Load, MAT_View, MAT_AXPY, MAT_FDColoringCreate;
23: PetscLogEvent MAT_FDColoringSetUp, MAT_FDColoringApply,MAT_Transpose,MAT_FDColoringFunction, MAT_GetSubMatrix;
24: PetscLogEvent MAT_TransposeColoringCreate;
25: PetscLogEvent MAT_MatMult, MAT_MatMultSymbolic, MAT_MatMultNumeric;
26: PetscLogEvent MAT_PtAP, MAT_PtAPSymbolic, MAT_PtAPNumeric,MAT_RARt, MAT_RARtSymbolic, MAT_RARtNumeric;
27: PetscLogEvent MAT_MatTransposeMult, MAT_MatTransposeMultSymbolic, MAT_MatTransposeMultNumeric;
28: PetscLogEvent MAT_TransposeMatMult, MAT_TransposeMatMultSymbolic, MAT_TransposeMatMultNumeric;
29: PetscLogEvent MAT_MatMatMult, MAT_MatMatMultSymbolic, MAT_MatMatMultNumeric;
30: PetscLogEvent MAT_MultHermitianTranspose,MAT_MultHermitianTransposeAdd;
31: PetscLogEvent MAT_Getsymtranspose, MAT_Getsymtransreduced, MAT_Transpose_SeqAIJ, MAT_GetBrowsOfAcols;
32: PetscLogEvent MAT_GetBrowsOfAocols, MAT_Getlocalmat, MAT_Getlocalmatcondensed, MAT_Seqstompi, MAT_Seqstompinum, MAT_Seqstompisym;
33: PetscLogEvent MAT_Applypapt, MAT_Applypapt_numeric, MAT_Applypapt_symbolic, MAT_GetSequentialNonzeroStructure;
34: PetscLogEvent MAT_GetMultiProcBlock;
35: PetscLogEvent MAT_CUSPCopyToGPU, MAT_CUSPARSECopyToGPU, MAT_SetValuesBatch, MAT_SetValuesBatchI, MAT_SetValuesBatchII, MAT_SetValuesBatchIII, MAT_SetValuesBatchIV;
36: PetscLogEvent MAT_ViennaCLCopyToGPU;
37: PetscLogEvent MAT_Merge,MAT_Residual;
38: PetscLogEvent Mat_Coloring_Apply,Mat_Coloring_Comm,Mat_Coloring_Local,Mat_Coloring_ISCreate,Mat_Coloring_SetUp,Mat_Coloring_Weights;
40: const char *const MatFactorTypes[] = {"NONE","LU","CHOLESKY","ILU","ICC","ILUDT","MatFactorType","MAT_FACTOR_",0};
44: /*@
45: MatSetRandom - Sets all components of a matrix to random numbers. For sparse matrices that have been preallocated it randomly selects appropriate locations
47: Logically Collective on Vec
49: Input Parameters:
50: + x - the vector
51: - rctx - the random number context, formed by PetscRandomCreate(), or NULL and
52: it will create one internally.
54: Output Parameter:
55: . x - the vector
57: Example of Usage:
58: .vb
59: PetscRandomCreate(PETSC_COMM_WORLD,&rctx);
60: VecSetRandom(x,rctx);
61: PetscRandomDestroy(rctx);
62: .ve
64: Level: intermediate
66: Concepts: vector^setting to random
67: Concepts: random^vector
69: .seealso: MatZeroEntries(), MatSetValues(), PetscRandomCreate(), PetscRandomDestroy()
70: @*/
71: PetscErrorCode MatSetRandom(Mat x,PetscRandom rctx)
72: {
74: PetscRandom randObj = NULL;
81: if (!rctx) {
82: MPI_Comm comm;
83: PetscObjectGetComm((PetscObject)x,&comm);
84: PetscRandomCreate(comm,&randObj);
85: PetscRandomSetFromOptions(randObj);
86: rctx = randObj;
87: }
89: PetscLogEventBegin(VEC_SetRandom,x,rctx,0,0);
90: (*x->ops->setrandom)(x,rctx);
91: PetscLogEventEnd(VEC_SetRandom,x,rctx,0,0);
93: x->assembled = PETSC_TRUE;
94: PetscRandomDestroy(&randObj);
95: return(0);
96: }
101: /*@
102: MatFindNonzeroRows - Locate all rows that are not completely zero in the matrix
104: Input Parameter:
105: . A - the matrix
107: Output Parameter:
108: . keptrows - the rows that are not completely zero
110: Level: intermediate
112: @*/
113: PetscErrorCode MatFindNonzeroRows(Mat mat,IS *keptrows)
114: {
119: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
120: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
121: if (!mat->ops->findnonzerorows) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Not coded for this matrix type");
122: (*mat->ops->findnonzerorows)(mat,keptrows);
123: return(0);
124: }
128: /*@
129: MatGetDiagonalBlock - Returns the part of the matrix associated with the on-process coupling
131: Not Collective
133: Input Parameters:
134: . A - the matrix
136: Output Parameters:
137: . a - the diagonal part (which is a SEQUENTIAL matrix)
139: Notes: see the manual page for MatCreateAIJ() for more information on the "diagonal part" of the matrix.
140: Use caution, as the reference count on the returned matrix is not incremented and it is used as
141: part of the containing MPI Mat's normal operation.
143: Level: advanced
145: @*/
146: PetscErrorCode MatGetDiagonalBlock(Mat A,Mat *a)
147: {
148: PetscErrorCode ierr,(*f)(Mat,Mat*);
149: PetscMPIInt size;
155: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
156: MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);
157: PetscObjectQueryFunction((PetscObject)A,"MatGetDiagonalBlock_C",&f);
158: if (f) {
159: (*f)(A,a);
160: return(0);
161: } else if (size == 1) {
162: *a = A;
163: } else {
164: MatType mattype;
165: MatGetType(A,&mattype);
166: SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Matrix type %s does not support getting diagonal block",mattype);
167: }
168: return(0);
169: }
173: /*@
174: MatGetTrace - Gets the trace of a matrix. The sum of the diagonal entries.
176: Collective on Mat
178: Input Parameters:
179: . mat - the matrix
181: Output Parameter:
182: . trace - the sum of the diagonal entries
184: Level: advanced
186: @*/
187: PetscErrorCode MatGetTrace(Mat mat,PetscScalar *trace)
188: {
190: Vec diag;
193: MatCreateVecs(mat,&diag,NULL);
194: MatGetDiagonal(mat,diag);
195: VecSum(diag,trace);
196: VecDestroy(&diag);
197: return(0);
198: }
202: /*@
203: MatRealPart - Zeros out the imaginary part of the matrix
205: Logically Collective on Mat
207: Input Parameters:
208: . mat - the matrix
210: Level: advanced
213: .seealso: MatImaginaryPart()
214: @*/
215: PetscErrorCode MatRealPart(Mat mat)
216: {
222: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
223: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
224: if (!mat->ops->realpart) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
225: MatCheckPreallocated(mat,1);
226: (*mat->ops->realpart)(mat);
227: #if defined(PETSC_HAVE_CUSP)
228: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
229: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
230: }
231: #endif
232: #if defined(PETSC_HAVE_VIENNACL)
233: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
234: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
235: }
236: #endif
237: return(0);
238: }
242: /*@C
243: MatGetGhosts - Get the global index of all ghost nodes defined by the sparse matrix
245: Collective on Mat
247: Input Parameter:
248: . mat - the matrix
250: Output Parameters:
251: + nghosts - number of ghosts (note for BAIJ matrices there is one ghost for each block)
252: - ghosts - the global indices of the ghost points
254: Notes: the nghosts and ghosts are suitable to pass into VecCreateGhost()
256: Level: advanced
258: @*/
259: PetscErrorCode MatGetGhosts(Mat mat,PetscInt *nghosts,const PetscInt *ghosts[])
260: {
266: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
267: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
268: if (!mat->ops->getghosts) {
269: if (nghosts) *nghosts = 0;
270: if (ghosts) *ghosts = 0;
271: } else {
272: (*mat->ops->getghosts)(mat,nghosts,ghosts);
273: }
274: return(0);
275: }
280: /*@
281: MatImaginaryPart - Moves the imaginary part of the matrix to the real part and zeros the imaginary part
283: Logically Collective on Mat
285: Input Parameters:
286: . mat - the matrix
288: Level: advanced
291: .seealso: MatRealPart()
292: @*/
293: PetscErrorCode MatImaginaryPart(Mat mat)
294: {
300: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
301: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
302: if (!mat->ops->imaginarypart) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
303: MatCheckPreallocated(mat,1);
304: (*mat->ops->imaginarypart)(mat);
305: #if defined(PETSC_HAVE_CUSP)
306: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
307: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
308: }
309: #endif
310: #if defined(PETSC_HAVE_VIENNACL)
311: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
312: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
313: }
314: #endif
315: return(0);
316: }
320: /*@
321: MatMissingDiagonal - Determine if sparse matrix is missing a diagonal entry (or block entry for BAIJ matrices)
323: Collective on Mat
325: Input Parameter:
326: . mat - the matrix
328: Output Parameters:
329: + missing - is any diagonal missing
330: - dd - first diagonal entry that is missing (optional)
332: Level: advanced
335: .seealso: MatRealPart()
336: @*/
337: PetscErrorCode MatMissingDiagonal(Mat mat,PetscBool *missing,PetscInt *dd)
338: {
344: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
345: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
346: if (!mat->ops->missingdiagonal) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
347: (*mat->ops->missingdiagonal)(mat,missing,dd);
348: return(0);
349: }
353: /*@C
354: MatGetRow - Gets a row of a matrix. You MUST call MatRestoreRow()
355: for each row that you get to ensure that your application does
356: not bleed memory.
358: Not Collective
360: Input Parameters:
361: + mat - the matrix
362: - row - the row to get
364: Output Parameters:
365: + ncols - if not NULL, the number of nonzeros in the row
366: . cols - if not NULL, the column numbers
367: - vals - if not NULL, the values
369: Notes:
370: This routine is provided for people who need to have direct access
371: to the structure of a matrix. We hope that we provide enough
372: high-level matrix routines that few users will need it.
374: MatGetRow() always returns 0-based column indices, regardless of
375: whether the internal representation is 0-based (default) or 1-based.
377: For better efficiency, set cols and/or vals to NULL if you do
378: not wish to extract these quantities.
380: The user can only examine the values extracted with MatGetRow();
381: the values cannot be altered. To change the matrix entries, one
382: must use MatSetValues().
384: You can only have one call to MatGetRow() outstanding for a particular
385: matrix at a time, per processor. MatGetRow() can only obtain rows
386: associated with the given processor, it cannot get rows from the
387: other processors; for that we suggest using MatGetSubMatrices(), then
388: MatGetRow() on the submatrix. The row indix passed to MatGetRows()
389: is in the global number of rows.
391: Fortran Notes:
392: The calling sequence from Fortran is
393: .vb
394: MatGetRow(matrix,row,ncols,cols,values,ierr)
395: Mat matrix (input)
396: integer row (input)
397: integer ncols (output)
398: integer cols(maxcols) (output)
399: double precision (or double complex) values(maxcols) output
400: .ve
401: where maxcols >= maximum nonzeros in any row of the matrix.
404: Caution:
405: Do not try to change the contents of the output arrays (cols and vals).
406: In some cases, this may corrupt the matrix.
408: Level: advanced
410: Concepts: matrices^row access
412: .seealso: MatRestoreRow(), MatSetValues(), MatGetValues(), MatGetSubMatrices(), MatGetDiagonal()
413: @*/
414: PetscErrorCode MatGetRow(Mat mat,PetscInt row,PetscInt *ncols,const PetscInt *cols[],const PetscScalar *vals[])
415: {
417: PetscInt incols;
422: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
423: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
424: if (!mat->ops->getrow) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
425: MatCheckPreallocated(mat,1);
426: PetscLogEventBegin(MAT_GetRow,mat,0,0,0);
427: (*mat->ops->getrow)(mat,row,&incols,(PetscInt**)cols,(PetscScalar**)vals);
428: if (ncols) *ncols = incols;
429: PetscLogEventEnd(MAT_GetRow,mat,0,0,0);
430: return(0);
431: }
435: /*@
436: MatConjugate - replaces the matrix values with their complex conjugates
438: Logically Collective on Mat
440: Input Parameters:
441: . mat - the matrix
443: Level: advanced
445: .seealso: VecConjugate()
446: @*/
447: PetscErrorCode MatConjugate(Mat mat)
448: {
449: #if defined(PETSC_USE_COMPLEX)
454: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
455: if (!mat->ops->conjugate) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Not provided for this matrix format, send email to petsc-maint@mcs.anl.gov");
456: (*mat->ops->conjugate)(mat);
457: #if defined(PETSC_HAVE_CUSP)
458: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
459: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
460: }
461: #endif
462: #if defined(PETSC_HAVE_VIENNACL)
463: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
464: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
465: }
466: #endif
467: return(0);
468: #else
469: return 0;
470: #endif
471: }
475: /*@C
476: MatRestoreRow - Frees any temporary space allocated by MatGetRow().
478: Not Collective
480: Input Parameters:
481: + mat - the matrix
482: . row - the row to get
483: . ncols, cols - the number of nonzeros and their columns
484: - vals - if nonzero the column values
486: Notes:
487: This routine should be called after you have finished examining the entries.
489: This routine zeros out ncols, cols, and vals. This is to prevent accidental
490: us of the array after it has been restored. If you pass NULL, it will
491: not zero the pointers. Use of cols or vals after MatRestoreRow is invalid.
493: Fortran Notes:
494: The calling sequence from Fortran is
495: .vb
496: MatRestoreRow(matrix,row,ncols,cols,values,ierr)
497: Mat matrix (input)
498: integer row (input)
499: integer ncols (output)
500: integer cols(maxcols) (output)
501: double precision (or double complex) values(maxcols) output
502: .ve
503: Where maxcols >= maximum nonzeros in any row of the matrix.
505: In Fortran MatRestoreRow() MUST be called after MatGetRow()
506: before another call to MatGetRow() can be made.
508: Level: advanced
510: .seealso: MatGetRow()
511: @*/
512: PetscErrorCode MatRestoreRow(Mat mat,PetscInt row,PetscInt *ncols,const PetscInt *cols[],const PetscScalar *vals[])
513: {
519: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
520: if (!mat->ops->restorerow) return(0);
521: (*mat->ops->restorerow)(mat,row,ncols,(PetscInt **)cols,(PetscScalar **)vals);
522: if (ncols) *ncols = 0;
523: if (cols) *cols = NULL;
524: if (vals) *vals = NULL;
525: return(0);
526: }
530: /*@
531: MatGetRowUpperTriangular - Sets a flag to enable calls to MatGetRow() for matrix in MATSBAIJ format.
532: You should call MatRestoreRowUpperTriangular() after calling MatGetRow/MatRestoreRow() to disable the flag.
534: Not Collective
536: Input Parameters:
537: + mat - the matrix
539: Notes:
540: The flag is to ensure that users are aware of MatGetRow() only provides the upper trianglular part of the row for the matrices in MATSBAIJ format.
542: Level: advanced
544: Concepts: matrices^row access
546: .seealso: MatRestoreRowRowUpperTriangular()
547: @*/
548: PetscErrorCode MatGetRowUpperTriangular(Mat mat)
549: {
555: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
556: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
557: if (!mat->ops->getrowuppertriangular) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
558: MatCheckPreallocated(mat,1);
559: (*mat->ops->getrowuppertriangular)(mat);
560: return(0);
561: }
565: /*@
566: MatRestoreRowUpperTriangular - Disable calls to MatGetRow() for matrix in MATSBAIJ format.
568: Not Collective
570: Input Parameters:
571: + mat - the matrix
573: Notes:
574: This routine should be called after you have finished MatGetRow/MatRestoreRow().
577: Level: advanced
579: .seealso: MatGetRowUpperTriangular()
580: @*/
581: PetscErrorCode MatRestoreRowUpperTriangular(Mat mat)
582: {
587: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
588: if (!mat->ops->restorerowuppertriangular) return(0);
589: (*mat->ops->restorerowuppertriangular)(mat);
590: return(0);
591: }
595: /*@C
596: MatSetOptionsPrefix - Sets the prefix used for searching for all
597: Mat options in the database.
599: Logically Collective on Mat
601: Input Parameter:
602: + A - the Mat context
603: - prefix - the prefix to prepend to all option names
605: Notes:
606: A hyphen (-) must NOT be given at the beginning of the prefix name.
607: The first character of all runtime options is AUTOMATICALLY the hyphen.
609: Level: advanced
611: .keywords: Mat, set, options, prefix, database
613: .seealso: MatSetFromOptions()
614: @*/
615: PetscErrorCode MatSetOptionsPrefix(Mat A,const char prefix[])
616: {
621: PetscObjectSetOptionsPrefix((PetscObject)A,prefix);
622: return(0);
623: }
627: /*@C
628: MatAppendOptionsPrefix - Appends to the prefix used for searching for all
629: Mat options in the database.
631: Logically Collective on Mat
633: Input Parameters:
634: + A - the Mat context
635: - prefix - the prefix to prepend to all option names
637: Notes:
638: A hyphen (-) must NOT be given at the beginning of the prefix name.
639: The first character of all runtime options is AUTOMATICALLY the hyphen.
641: Level: advanced
643: .keywords: Mat, append, options, prefix, database
645: .seealso: MatGetOptionsPrefix()
646: @*/
647: PetscErrorCode MatAppendOptionsPrefix(Mat A,const char prefix[])
648: {
653: PetscObjectAppendOptionsPrefix((PetscObject)A,prefix);
654: return(0);
655: }
659: /*@C
660: MatGetOptionsPrefix - Sets the prefix used for searching for all
661: Mat options in the database.
663: Not Collective
665: Input Parameter:
666: . A - the Mat context
668: Output Parameter:
669: . prefix - pointer to the prefix string used
671: Notes: On the fortran side, the user should pass in a string 'prefix' of
672: sufficient length to hold the prefix.
674: Level: advanced
676: .keywords: Mat, get, options, prefix, database
678: .seealso: MatAppendOptionsPrefix()
679: @*/
680: PetscErrorCode MatGetOptionsPrefix(Mat A,const char *prefix[])
681: {
686: PetscObjectGetOptionsPrefix((PetscObject)A,prefix);
687: return(0);
688: }
692: /*@
693: MatSetUp - Sets up the internal matrix data structures for the later use.
695: Collective on Mat
697: Input Parameters:
698: . A - the Mat context
700: Notes:
701: If the user has not set preallocation for this matrix then a default preallocation that is likely to be inefficient is used.
703: If a suitable preallocation routine is used, this function does not need to be called.
705: See the Performance chapter of the PETSc users manual for how to preallocate matrices
707: Level: beginner
709: .keywords: Mat, setup
711: .seealso: MatCreate(), MatDestroy()
712: @*/
713: PetscErrorCode MatSetUp(Mat A)
714: {
715: PetscMPIInt size;
720: if (!((PetscObject)A)->type_name) {
721: MPI_Comm_size(PetscObjectComm((PetscObject)A), &size);
722: if (size == 1) {
723: MatSetType(A, MATSEQAIJ);
724: } else {
725: MatSetType(A, MATMPIAIJ);
726: }
727: }
728: if (!A->preallocated && A->ops->setup) {
729: PetscInfo(A,"Warning not preallocating matrix storage\n");
730: (*A->ops->setup)(A);
731: }
732: A->preallocated = PETSC_TRUE;
733: return(0);
734: }
736: #if defined(PETSC_HAVE_SAWS)
737: #include <petscviewersaws.h>
738: #endif
741: /*@C
742: MatView - Visualizes a matrix object.
744: Collective on Mat
746: Input Parameters:
747: + mat - the matrix
748: - viewer - visualization context
750: Notes:
751: The available visualization contexts include
752: + PETSC_VIEWER_STDOUT_SELF - standard output (default)
753: . PETSC_VIEWER_STDOUT_WORLD - synchronized standard
754: output where only the first processor opens
755: the file. All other processors send their
756: data to the first processor to print.
757: - PETSC_VIEWER_DRAW_WORLD - graphical display of nonzero structure
759: The user can open alternative visualization contexts with
760: + PetscViewerASCIIOpen() - Outputs matrix to a specified file
761: . PetscViewerBinaryOpen() - Outputs matrix in binary to a
762: specified file; corresponding input uses MatLoad()
763: . PetscViewerDrawOpen() - Outputs nonzero matrix structure to
764: an X window display
765: - PetscViewerSocketOpen() - Outputs matrix to Socket viewer.
766: Currently only the sequential dense and AIJ
767: matrix types support the Socket viewer.
769: The user can call PetscViewerSetFormat() to specify the output
770: format of ASCII printed objects (when using PETSC_VIEWER_STDOUT_SELF,
771: PETSC_VIEWER_STDOUT_WORLD and PetscViewerASCIIOpen). Available formats include
772: + PETSC_VIEWER_DEFAULT - default, prints matrix contents
773: . PETSC_VIEWER_ASCII_MATLAB - prints matrix contents in Matlab format
774: . PETSC_VIEWER_ASCII_DENSE - prints entire matrix including zeros
775: . PETSC_VIEWER_ASCII_COMMON - prints matrix contents, using a sparse
776: format common among all matrix types
777: . PETSC_VIEWER_ASCII_IMPL - prints matrix contents, using an implementation-specific
778: format (which is in many cases the same as the default)
779: . PETSC_VIEWER_ASCII_INFO - prints basic information about the matrix
780: size and structure (not the matrix entries)
781: . PETSC_VIEWER_ASCII_INFO_DETAIL - prints more detailed information about
782: the matrix structure
784: Options Database Keys:
785: + -mat_view ::ascii_info - Prints info on matrix at conclusion of MatEndAssembly()
786: . -mat_view ::ascii_info_detail - Prints more detailed info
787: . -mat_view - Prints matrix in ASCII format
788: . -mat_view ::ascii_matlab - Prints matrix in Matlab format
789: . -mat_view draw - PetscDraws nonzero structure of matrix, using MatView() and PetscDrawOpenX().
790: . -display <name> - Sets display name (default is host)
791: . -draw_pause <sec> - Sets number of seconds to pause after display
792: . -mat_view socket - Sends matrix to socket, can be accessed from Matlab (see Users-Manual: Chapter 11 Using MATLAB with PETSc for details)
793: . -viewer_socket_machine <machine> -
794: . -viewer_socket_port <port> -
795: . -mat_view binary - save matrix to file in binary format
796: - -viewer_binary_filename <name> -
797: Level: beginner
799: Notes: see the manual page for MatLoad() for the exact format of the binary file when the binary
800: viewer is used.
802: See share/petsc/matlab/PetscBinaryRead.m for a Matlab code that can read in the binary file when the binary
803: viewer is used.
805: One can use '-mat_view draw -draw_pause -1' to pause the graphical display of matrix nonzero structure.
806: And then use the following mouse functions:
807: left mouse: zoom in
808: middle mouse: zoom out
809: right mouse: continue with the simulation
811: Concepts: matrices^viewing
812: Concepts: matrices^plotting
813: Concepts: matrices^printing
815: .seealso: PetscViewerSetFormat(), PetscViewerASCIIOpen(), PetscViewerDrawOpen(),
816: PetscViewerSocketOpen(), PetscViewerBinaryOpen(), MatLoad()
817: @*/
818: PetscErrorCode MatView(Mat mat,PetscViewer viewer)
819: {
820: PetscErrorCode ierr;
821: PetscInt rows,cols,rbs,cbs;
822: PetscBool iascii;
823: PetscViewerFormat format;
824: #if defined(PETSC_HAVE_SAWS)
825: PetscBool issaws;
826: #endif
831: if (!viewer) {
832: PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)mat),&viewer);
833: }
836: MatCheckPreallocated(mat,1);
838: PetscLogEventBegin(MAT_View,mat,viewer,0,0);
839: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
840: PetscViewerGetFormat(viewer,&format);
841: if ((!iascii || (format != PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL)) && mat->factortype) {
842: SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"No viewers for factored matrix except ASCII info or info_detailed");
843: }
845: #if defined(PETSC_HAVE_SAWS)
846: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERSAWS,&issaws);
847: #endif
848: if (iascii) {
849: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ORDER,"Must call MatAssemblyBegin/End() before viewing matrix");
850: PetscObjectPrintClassNamePrefixType((PetscObject)mat,viewer);
851: if (format == PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
852: PetscViewerASCIIPushTab(viewer);
853: MatGetSize(mat,&rows,&cols);
854: MatGetBlockSizes(mat,&rbs,&cbs);
855: if (rbs != 1 || cbs != 1) {
856: if (rbs != cbs) {PetscViewerASCIIPrintf(viewer,"rows=%D, cols=%D, rbs=%D, cbs = %D\n",rows,cols,rbs,cbs);}
857: else {PetscViewerASCIIPrintf(viewer,"rows=%D, cols=%D, bs=%D\n",rows,cols,rbs);}
858: } else {
859: PetscViewerASCIIPrintf(viewer,"rows=%D, cols=%D\n",rows,cols);
860: }
861: if (mat->factortype) {
862: const MatSolverPackage solver;
863: MatFactorGetSolverPackage(mat,&solver);
864: PetscViewerASCIIPrintf(viewer,"package used to perform factorization: %s\n",solver);
865: }
866: if (mat->ops->getinfo) {
867: MatInfo info;
868: MatGetInfo(mat,MAT_GLOBAL_SUM,&info);
869: PetscViewerASCIIPrintf(viewer,"total: nonzeros=%g, allocated nonzeros=%g\n",info.nz_used,info.nz_allocated);
870: PetscViewerASCIIPrintf(viewer,"total number of mallocs used during MatSetValues calls =%D\n",(PetscInt)info.mallocs);
871: }
872: if (mat->nullsp) {PetscViewerASCIIPrintf(viewer," has attached null space\n");}
873: if (mat->nearnullsp) {PetscViewerASCIIPrintf(viewer," has attached near null space\n");}
874: }
875: #if defined(PETSC_HAVE_SAWS)
876: } else if (issaws) {
877: PetscMPIInt rank;
879: PetscObjectName((PetscObject)mat);
880: MPI_Comm_rank(PETSC_COMM_WORLD,&rank);
881: if (!((PetscObject)mat)->amsmem && !rank) {
882: PetscObjectViewSAWs((PetscObject)mat,viewer);
883: }
884: #endif
885: }
886: if (mat->ops->view) {
887: PetscViewerASCIIPushTab(viewer);
888: (*mat->ops->view)(mat,viewer);
889: PetscViewerASCIIPopTab(viewer);
890: }
891: if (iascii) {
892: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ORDER,"Must call MatAssemblyBegin/End() before viewing matrix");
893: PetscViewerGetFormat(viewer,&format);
894: if (format == PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
895: PetscViewerASCIIPopTab(viewer);
896: }
897: }
898: PetscLogEventEnd(MAT_View,mat,viewer,0,0);
899: return(0);
900: }
902: #if defined(PETSC_USE_DEBUG)
903: #include <../src/sys/totalview/tv_data_display.h>
904: PETSC_UNUSED static int TV_display_type(const struct _p_Mat *mat)
905: {
906: TV_add_row("Local rows", "int", &mat->rmap->n);
907: TV_add_row("Local columns", "int", &mat->cmap->n);
908: TV_add_row("Global rows", "int", &mat->rmap->N);
909: TV_add_row("Global columns", "int", &mat->cmap->N);
910: TV_add_row("Typename", TV_ascii_string_type, ((PetscObject)mat)->type_name);
911: return TV_format_OK;
912: }
913: #endif
917: /*@C
918: MatLoad - Loads a matrix that has been stored in binary format
919: with MatView(). The matrix format is determined from the options database.
920: Generates a parallel MPI matrix if the communicator has more than one
921: processor. The default matrix type is AIJ.
923: Collective on PetscViewer
925: Input Parameters:
926: + newmat - the newly loaded matrix, this needs to have been created with MatCreate()
927: or some related function before a call to MatLoad()
928: - viewer - binary file viewer, created with PetscViewerBinaryOpen()
930: Options Database Keys:
931: Used with block matrix formats (MATSEQBAIJ, ...) to specify
932: block size
933: . -matload_block_size <bs>
935: Level: beginner
937: Notes:
938: If the Mat type has not yet been given then MATAIJ is used, call MatSetFromOptions() on the
939: Mat before calling this routine if you wish to set it from the options database.
941: MatLoad() automatically loads into the options database any options
942: given in the file filename.info where filename is the name of the file
943: that was passed to the PetscViewerBinaryOpen(). The options in the info
944: file will be ignored if you use the -viewer_binary_skip_info option.
946: If the type or size of newmat is not set before a call to MatLoad, PETSc
947: sets the default matrix type AIJ and sets the local and global sizes.
948: If type and/or size is already set, then the same are used.
950: In parallel, each processor can load a subset of rows (or the
951: entire matrix). This routine is especially useful when a large
952: matrix is stored on disk and only part of it is desired on each
953: processor. For example, a parallel solver may access only some of
954: the rows from each processor. The algorithm used here reads
955: relatively small blocks of data rather than reading the entire
956: matrix and then subsetting it.
958: Notes for advanced users:
959: Most users should not need to know the details of the binary storage
960: format, since MatLoad() and MatView() completely hide these details.
961: But for anyone who's interested, the standard binary matrix storage
962: format is
964: $ int MAT_FILE_CLASSID
965: $ int number of rows
966: $ int number of columns
967: $ int total number of nonzeros
968: $ int *number nonzeros in each row
969: $ int *column indices of all nonzeros (starting index is zero)
970: $ PetscScalar *values of all nonzeros
972: PETSc automatically does the byte swapping for
973: machines that store the bytes reversed, e.g. DEC alpha, freebsd,
974: linux, Windows and the paragon; thus if you write your own binary
975: read/write routines you have to swap the bytes; see PetscBinaryRead()
976: and PetscBinaryWrite() to see how this may be done.
978: .keywords: matrix, load, binary, input
980: .seealso: PetscViewerBinaryOpen(), MatView(), VecLoad()
982: @*/
983: PetscErrorCode MatLoad(Mat newmat,PetscViewer viewer)
984: {
986: PetscBool isbinary,flg;
991: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
992: if (!isbinary) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Invalid viewer; open viewer with PetscViewerBinaryOpen()");
994: if (!((PetscObject)newmat)->type_name) {
995: MatSetType(newmat,MATAIJ);
996: }
998: if (!newmat->ops->load) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MatLoad is not supported for type");
999: PetscLogEventBegin(MAT_Load,viewer,0,0,0);
1000: (*newmat->ops->load)(newmat,viewer);
1001: PetscLogEventEnd(MAT_Load,viewer,0,0,0);
1003: flg = PETSC_FALSE;
1004: PetscOptionsGetBool(((PetscObject)newmat)->prefix,"-matload_symmetric",&flg,NULL);
1005: if (flg) {
1006: MatSetOption(newmat,MAT_SYMMETRIC,PETSC_TRUE);
1007: MatSetOption(newmat,MAT_SYMMETRY_ETERNAL,PETSC_TRUE);
1008: }
1009: flg = PETSC_FALSE;
1010: PetscOptionsGetBool(((PetscObject)newmat)->prefix,"-matload_spd",&flg,NULL);
1011: if (flg) {
1012: MatSetOption(newmat,MAT_SPD,PETSC_TRUE);
1013: }
1014: return(0);
1015: }
1019: PetscErrorCode MatDestroy_Redundant(Mat_Redundant **redundant)
1020: {
1022: Mat_Redundant *redund = *redundant;
1023: PetscInt i;
1026: if (redund){
1027: if (redund->matseq) { /* via MatGetSubMatrices() */
1028: ISDestroy(&redund->isrow);
1029: ISDestroy(&redund->iscol);
1030: MatDestroy(&redund->matseq[0]);
1031: PetscFree(redund->matseq);
1032: } else {
1033: PetscFree2(redund->send_rank,redund->recv_rank);
1034: PetscFree(redund->sbuf_j);
1035: PetscFree(redund->sbuf_a);
1036: for (i=0; i<redund->nrecvs; i++) {
1037: PetscFree(redund->rbuf_j[i]);
1038: PetscFree(redund->rbuf_a[i]);
1039: }
1040: PetscFree4(redund->sbuf_nz,redund->rbuf_nz,redund->rbuf_j,redund->rbuf_a);
1041: }
1043: if (redund->subcomm) {
1044: PetscCommDestroy(&redund->subcomm);
1045: }
1046: PetscFree(redund);
1047: }
1048: return(0);
1049: }
1053: /*@
1054: MatDestroy - Frees space taken by a matrix.
1056: Collective on Mat
1058: Input Parameter:
1059: . A - the matrix
1061: Level: beginner
1063: @*/
1064: PetscErrorCode MatDestroy(Mat *A)
1065: {
1069: if (!*A) return(0);
1071: if (--((PetscObject)(*A))->refct > 0) {*A = NULL; return(0);}
1073: /* if memory was published with SAWs then destroy it */
1074: PetscObjectSAWsViewOff((PetscObject)*A);
1075: if ((*A)->ops->destroy) {
1076: (*(*A)->ops->destroy)(*A);
1077: }
1078: MatDestroy_Redundant(&(*A)->redundant);
1079: MatNullSpaceDestroy(&(*A)->nullsp);
1080: MatNullSpaceDestroy(&(*A)->transnullsp);
1081: MatNullSpaceDestroy(&(*A)->nearnullsp);
1082: PetscLayoutDestroy(&(*A)->rmap);
1083: PetscLayoutDestroy(&(*A)->cmap);
1084: PetscHeaderDestroy(A);
1085: return(0);
1086: }
1090: /*@
1091: MatSetValues - Inserts or adds a block of values into a matrix.
1092: These values may be cached, so MatAssemblyBegin() and MatAssemblyEnd()
1093: MUST be called after all calls to MatSetValues() have been completed.
1095: Not Collective
1097: Input Parameters:
1098: + mat - the matrix
1099: . v - a logically two-dimensional array of values
1100: . m, idxm - the number of rows and their global indices
1101: . n, idxn - the number of columns and their global indices
1102: - addv - either ADD_VALUES or INSERT_VALUES, where
1103: ADD_VALUES adds values to any existing entries, and
1104: INSERT_VALUES replaces existing entries with new values
1106: Notes:
1107: If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatXXXXSetPreallocation() or
1108: MatSetUp() before using this routine
1110: By default the values, v, are row-oriented. See MatSetOption() for other options.
1112: Calls to MatSetValues() with the INSERT_VALUES and ADD_VALUES
1113: options cannot be mixed without intervening calls to the assembly
1114: routines.
1116: MatSetValues() uses 0-based row and column numbers in Fortran
1117: as well as in C.
1119: Negative indices may be passed in idxm and idxn, these rows and columns are
1120: simply ignored. This allows easily inserting element stiffness matrices
1121: with homogeneous Dirchlet boundary conditions that you don't want represented
1122: in the matrix.
1124: Efficiency Alert:
1125: The routine MatSetValuesBlocked() may offer much better efficiency
1126: for users of block sparse formats (MATSEQBAIJ and MATMPIBAIJ).
1128: Level: beginner
1130: Concepts: matrices^putting entries in
1132: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
1133: InsertMode, INSERT_VALUES, ADD_VALUES
1134: @*/
1135: PetscErrorCode MatSetValues(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],const PetscScalar v[],InsertMode addv)
1136: {
1138: #if defined(PETSC_USE_DEBUG)
1139: PetscInt i,j;
1140: #endif
1145: if (!m || !n) return(0); /* no values to insert */
1149: MatCheckPreallocated(mat,1);
1150: if (mat->insertmode == NOT_SET_VALUES) {
1151: mat->insertmode = addv;
1152: }
1153: #if defined(PETSC_USE_DEBUG)
1154: else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
1155: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1156: if (!mat->ops->setvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
1158: for (i=0; i<m; i++) {
1159: for (j=0; j<n; j++) {
1160: if (mat->erroriffpe && PetscIsInfOrNanScalar(v[i*n+j]))
1161: #if defined(PETSC_USE_COMPLEX)
1162: SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_FP,"Inserting %g+ig at matrix entry (%D,%D)",(double)PetscRealPart(v[i*n+j]),(double)PetscImaginaryPart(v[i*n+j]),idxm[i],idxn[j]);
1163: #else
1164: SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_FP,"Inserting %g at matrix entry (%D,%D)",(double)v[i*n+j],idxm[i],idxn[j]);
1165: #endif
1166: }
1167: }
1168: #endif
1170: if (mat->assembled) {
1171: mat->was_assembled = PETSC_TRUE;
1172: mat->assembled = PETSC_FALSE;
1173: }
1174: PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
1175: (*mat->ops->setvalues)(mat,m,idxm,n,idxn,v,addv);
1176: PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
1177: #if defined(PETSC_HAVE_CUSP)
1178: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
1179: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
1180: }
1181: #endif
1182: #if defined(PETSC_HAVE_VIENNACL)
1183: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
1184: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
1185: }
1186: #endif
1187: return(0);
1188: }
1193: /*@
1194: MatSetValuesRowLocal - Inserts a row (block row for BAIJ matrices) of nonzero
1195: values into a matrix
1197: Not Collective
1199: Input Parameters:
1200: + mat - the matrix
1201: . row - the (block) row to set
1202: - v - a logically two-dimensional array of values
1204: Notes:
1205: By the values, v, are column-oriented (for the block version) and sorted
1207: All the nonzeros in the row must be provided
1209: The matrix must have previously had its column indices set
1211: The row must belong to this process
1213: Level: intermediate
1215: Concepts: matrices^putting entries in
1217: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
1218: InsertMode, INSERT_VALUES, ADD_VALUES, MatSetValues(), MatSetValuesRow(), MatSetLocalToGlobalMapping()
1219: @*/
1220: PetscErrorCode MatSetValuesRowLocal(Mat mat,PetscInt row,const PetscScalar v[])
1221: {
1223: PetscInt globalrow;
1229: ISLocalToGlobalMappingApply(mat->rmap->mapping,1,&row,&globalrow);
1230: MatSetValuesRow(mat,globalrow,v);
1231: #if defined(PETSC_HAVE_CUSP)
1232: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
1233: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
1234: }
1235: #endif
1236: #if defined(PETSC_HAVE_VIENNACL)
1237: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
1238: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
1239: }
1240: #endif
1241: return(0);
1242: }
1246: /*@
1247: MatSetValuesRow - Inserts a row (block row for BAIJ matrices) of nonzero
1248: values into a matrix
1250: Not Collective
1252: Input Parameters:
1253: + mat - the matrix
1254: . row - the (block) row to set
1255: - v - a logically two-dimensional array of values
1257: Notes:
1258: The values, v, are column-oriented for the block version.
1260: All the nonzeros in the row must be provided
1262: THE MATRIX MUSAT HAVE PREVIOUSLY HAD ITS COLUMN INDICES SET. IT IS RARE THAT THIS ROUTINE IS USED, usually MatSetValues() is used.
1264: The row must belong to this process
1266: Level: advanced
1268: Concepts: matrices^putting entries in
1270: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
1271: InsertMode, INSERT_VALUES, ADD_VALUES, MatSetValues()
1272: @*/
1273: PetscErrorCode MatSetValuesRow(Mat mat,PetscInt row,const PetscScalar v[])
1274: {
1280: MatCheckPreallocated(mat,1);
1282: #if defined(PETSC_USE_DEBUG)
1283: if (mat->insertmode == ADD_VALUES) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add and insert values");
1284: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1285: #endif
1286: mat->insertmode = INSERT_VALUES;
1288: if (mat->assembled) {
1289: mat->was_assembled = PETSC_TRUE;
1290: mat->assembled = PETSC_FALSE;
1291: }
1292: PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
1293: if (!mat->ops->setvaluesrow) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
1294: (*mat->ops->setvaluesrow)(mat,row,v);
1295: PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
1296: #if defined(PETSC_HAVE_CUSP)
1297: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
1298: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
1299: }
1300: #endif
1301: #if defined(PETSC_HAVE_VIENNACL)
1302: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
1303: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
1304: }
1305: #endif
1306: return(0);
1307: }
1311: /*@
1312: MatSetValuesStencil - Inserts or adds a block of values into a matrix.
1313: Using structured grid indexing
1315: Not Collective
1317: Input Parameters:
1318: + mat - the matrix
1319: . m - number of rows being entered
1320: . idxm - grid coordinates (and component number when dof > 1) for matrix rows being entered
1321: . n - number of columns being entered
1322: . idxn - grid coordinates (and component number when dof > 1) for matrix columns being entered
1323: . v - a logically two-dimensional array of values
1324: - addv - either ADD_VALUES or INSERT_VALUES, where
1325: ADD_VALUES adds values to any existing entries, and
1326: INSERT_VALUES replaces existing entries with new values
1328: Notes:
1329: By default the values, v, are row-oriented. See MatSetOption() for other options.
1331: Calls to MatSetValuesStencil() with the INSERT_VALUES and ADD_VALUES
1332: options cannot be mixed without intervening calls to the assembly
1333: routines.
1335: The grid coordinates are across the entire grid, not just the local portion
1337: MatSetValuesStencil() uses 0-based row and column numbers in Fortran
1338: as well as in C.
1340: For setting/accessing vector values via array coordinates you can use the DMDAVecGetArray() routine
1342: In order to use this routine you must either obtain the matrix with DMCreateMatrix()
1343: or call MatSetLocalToGlobalMapping() and MatSetStencil() first.
1345: The columns and rows in the stencil passed in MUST be contained within the
1346: ghost region of the given process as set with DMDACreateXXX() or MatSetStencil(). For example,
1347: if you create a DMDA with an overlap of one grid level and on a particular process its first
1348: local nonghost x logical coordinate is 6 (so its first ghost x logical coordinate is 5) the
1349: first i index you can use in your column and row indices in MatSetStencil() is 5.
1351: In Fortran idxm and idxn should be declared as
1352: $ MatStencil idxm(4,m),idxn(4,n)
1353: and the values inserted using
1354: $ idxm(MatStencil_i,1) = i
1355: $ idxm(MatStencil_j,1) = j
1356: $ idxm(MatStencil_k,1) = k
1357: $ idxm(MatStencil_c,1) = c
1358: etc
1360: For periodic boundary conditions use negative indices for values to the left (below 0; that are to be
1361: obtained by wrapping values from right edge). For values to the right of the last entry using that index plus one
1362: etc to obtain values that obtained by wrapping the values from the left edge. This does not work for anything but the
1363: DM_BOUNDARY_PERIODIC boundary type.
1365: For indices that don't mean anything for your case (like the k index when working in 2d) or the c index when you have
1366: a single value per point) you can skip filling those indices.
1368: Inspired by the structured grid interface to the HYPRE package
1369: (http://www.llnl.gov/CASC/hypre)
1371: Efficiency Alert:
1372: The routine MatSetValuesBlockedStencil() may offer much better efficiency
1373: for users of block sparse formats (MATSEQBAIJ and MATMPIBAIJ).
1375: Level: beginner
1377: Concepts: matrices^putting entries in
1379: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal()
1380: MatSetValues(), MatSetValuesBlockedStencil(), MatSetStencil(), DMCreateMatrix(), DMDAVecGetArray(), MatStencil
1381: @*/
1382: PetscErrorCode MatSetValuesStencil(Mat mat,PetscInt m,const MatStencil idxm[],PetscInt n,const MatStencil idxn[],const PetscScalar v[],InsertMode addv)
1383: {
1385: PetscInt buf[8192],*bufm=0,*bufn=0,*jdxm,*jdxn;
1386: PetscInt j,i,dim = mat->stencil.dim,*dims = mat->stencil.dims+1,tmp;
1387: PetscInt *starts = mat->stencil.starts,*dxm = (PetscInt*)idxm,*dxn = (PetscInt*)idxn,sdim = dim - (1 - (PetscInt)mat->stencil.noc);
1390: if (!m || !n) return(0); /* no values to insert */
1397: if ((m+n) <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
1398: jdxm = buf; jdxn = buf+m;
1399: } else {
1400: PetscMalloc2(m,&bufm,n,&bufn);
1401: jdxm = bufm; jdxn = bufn;
1402: }
1403: for (i=0; i<m; i++) {
1404: for (j=0; j<3-sdim; j++) dxm++;
1405: tmp = *dxm++ - starts[0];
1406: for (j=0; j<dim-1; j++) {
1407: if ((*dxm++ - starts[j+1]) < 0 || tmp < 0) tmp = -1;
1408: else tmp = tmp*dims[j] + *(dxm-1) - starts[j+1];
1409: }
1410: if (mat->stencil.noc) dxm++;
1411: jdxm[i] = tmp;
1412: }
1413: for (i=0; i<n; i++) {
1414: for (j=0; j<3-sdim; j++) dxn++;
1415: tmp = *dxn++ - starts[0];
1416: for (j=0; j<dim-1; j++) {
1417: if ((*dxn++ - starts[j+1]) < 0 || tmp < 0) tmp = -1;
1418: else tmp = tmp*dims[j] + *(dxn-1) - starts[j+1];
1419: }
1420: if (mat->stencil.noc) dxn++;
1421: jdxn[i] = tmp;
1422: }
1423: MatSetValuesLocal(mat,m,jdxm,n,jdxn,v,addv);
1424: PetscFree2(bufm,bufn);
1425: return(0);
1426: }
1430: /*@
1431: MatSetValuesBlockedStencil - Inserts or adds a block of values into a matrix.
1432: Using structured grid indexing
1434: Not Collective
1436: Input Parameters:
1437: + mat - the matrix
1438: . m - number of rows being entered
1439: . idxm - grid coordinates for matrix rows being entered
1440: . n - number of columns being entered
1441: . idxn - grid coordinates for matrix columns being entered
1442: . v - a logically two-dimensional array of values
1443: - addv - either ADD_VALUES or INSERT_VALUES, where
1444: ADD_VALUES adds values to any existing entries, and
1445: INSERT_VALUES replaces existing entries with new values
1447: Notes:
1448: By default the values, v, are row-oriented and unsorted.
1449: See MatSetOption() for other options.
1451: Calls to MatSetValuesBlockedStencil() with the INSERT_VALUES and ADD_VALUES
1452: options cannot be mixed without intervening calls to the assembly
1453: routines.
1455: The grid coordinates are across the entire grid, not just the local portion
1457: MatSetValuesBlockedStencil() uses 0-based row and column numbers in Fortran
1458: as well as in C.
1460: For setting/accessing vector values via array coordinates you can use the DMDAVecGetArray() routine
1462: In order to use this routine you must either obtain the matrix with DMCreateMatrix()
1463: or call MatSetBlockSize(), MatSetLocalToGlobalMapping() and MatSetStencil() first.
1465: The columns and rows in the stencil passed in MUST be contained within the
1466: ghost region of the given process as set with DMDACreateXXX() or MatSetStencil(). For example,
1467: if you create a DMDA with an overlap of one grid level and on a particular process its first
1468: local nonghost x logical coordinate is 6 (so its first ghost x logical coordinate is 5) the
1469: first i index you can use in your column and row indices in MatSetStencil() is 5.
1471: In Fortran idxm and idxn should be declared as
1472: $ MatStencil idxm(4,m),idxn(4,n)
1473: and the values inserted using
1474: $ idxm(MatStencil_i,1) = i
1475: $ idxm(MatStencil_j,1) = j
1476: $ idxm(MatStencil_k,1) = k
1477: etc
1479: Negative indices may be passed in idxm and idxn, these rows and columns are
1480: simply ignored. This allows easily inserting element stiffness matrices
1481: with homogeneous Dirchlet boundary conditions that you don't want represented
1482: in the matrix.
1484: Inspired by the structured grid interface to the HYPRE package
1485: (http://www.llnl.gov/CASC/hypre)
1487: Level: beginner
1489: Concepts: matrices^putting entries in
1491: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal()
1492: MatSetValues(), MatSetValuesStencil(), MatSetStencil(), DMCreateMatrix(), DMDAVecGetArray(), MatStencil,
1493: MatSetBlockSize(), MatSetLocalToGlobalMapping()
1494: @*/
1495: PetscErrorCode MatSetValuesBlockedStencil(Mat mat,PetscInt m,const MatStencil idxm[],PetscInt n,const MatStencil idxn[],const PetscScalar v[],InsertMode addv)
1496: {
1498: PetscInt buf[8192],*bufm=0,*bufn=0,*jdxm,*jdxn;
1499: PetscInt j,i,dim = mat->stencil.dim,*dims = mat->stencil.dims+1,tmp;
1500: PetscInt *starts = mat->stencil.starts,*dxm = (PetscInt*)idxm,*dxn = (PetscInt*)idxn,sdim = dim - (1 - (PetscInt)mat->stencil.noc);
1503: if (!m || !n) return(0); /* no values to insert */
1510: if ((m+n) <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
1511: jdxm = buf; jdxn = buf+m;
1512: } else {
1513: PetscMalloc2(m,&bufm,n,&bufn);
1514: jdxm = bufm; jdxn = bufn;
1515: }
1516: for (i=0; i<m; i++) {
1517: for (j=0; j<3-sdim; j++) dxm++;
1518: tmp = *dxm++ - starts[0];
1519: for (j=0; j<sdim-1; j++) {
1520: if ((*dxm++ - starts[j+1]) < 0 || tmp < 0) tmp = -1;
1521: else tmp = tmp*dims[j] + *(dxm-1) - starts[j+1];
1522: }
1523: dxm++;
1524: jdxm[i] = tmp;
1525: }
1526: for (i=0; i<n; i++) {
1527: for (j=0; j<3-sdim; j++) dxn++;
1528: tmp = *dxn++ - starts[0];
1529: for (j=0; j<sdim-1; j++) {
1530: if ((*dxn++ - starts[j+1]) < 0 || tmp < 0) tmp = -1;
1531: else tmp = tmp*dims[j] + *(dxn-1) - starts[j+1];
1532: }
1533: dxn++;
1534: jdxn[i] = tmp;
1535: }
1536: MatSetValuesBlockedLocal(mat,m,jdxm,n,jdxn,v,addv);
1537: PetscFree2(bufm,bufn);
1538: #if defined(PETSC_HAVE_CUSP)
1539: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
1540: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
1541: }
1542: #endif
1543: #if defined(PETSC_HAVE_VIENNACL)
1544: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
1545: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
1546: }
1547: #endif
1548: return(0);
1549: }
1553: /*@
1554: MatSetStencil - Sets the grid information for setting values into a matrix via
1555: MatSetValuesStencil()
1557: Not Collective
1559: Input Parameters:
1560: + mat - the matrix
1561: . dim - dimension of the grid 1, 2, or 3
1562: . dims - number of grid points in x, y, and z direction, including ghost points on your processor
1563: . starts - starting point of ghost nodes on your processor in x, y, and z direction
1564: - dof - number of degrees of freedom per node
1567: Inspired by the structured grid interface to the HYPRE package
1568: (www.llnl.gov/CASC/hyper)
1570: For matrices generated with DMCreateMatrix() this routine is automatically called and so not needed by the
1571: user.
1573: Level: beginner
1575: Concepts: matrices^putting entries in
1577: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal()
1578: MatSetValues(), MatSetValuesBlockedStencil(), MatSetValuesStencil()
1579: @*/
1580: PetscErrorCode MatSetStencil(Mat mat,PetscInt dim,const PetscInt dims[],const PetscInt starts[],PetscInt dof)
1581: {
1582: PetscInt i;
1589: mat->stencil.dim = dim + (dof > 1);
1590: for (i=0; i<dim; i++) {
1591: mat->stencil.dims[i] = dims[dim-i-1]; /* copy the values in backwards */
1592: mat->stencil.starts[i] = starts[dim-i-1];
1593: }
1594: mat->stencil.dims[dim] = dof;
1595: mat->stencil.starts[dim] = 0;
1596: mat->stencil.noc = (PetscBool)(dof == 1);
1597: return(0);
1598: }
1602: /*@
1603: MatSetValuesBlocked - Inserts or adds a block of values into a matrix.
1605: Not Collective
1607: Input Parameters:
1608: + mat - the matrix
1609: . v - a logically two-dimensional array of values
1610: . m, idxm - the number of block rows and their global block indices
1611: . n, idxn - the number of block columns and their global block indices
1612: - addv - either ADD_VALUES or INSERT_VALUES, where
1613: ADD_VALUES adds values to any existing entries, and
1614: INSERT_VALUES replaces existing entries with new values
1616: Notes:
1617: If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call
1618: MatXXXXSetPreallocation() or MatSetUp() before using this routine.
1620: The m and n count the NUMBER of blocks in the row direction and column direction,
1621: NOT the total number of rows/columns; for example, if the block size is 2 and
1622: you are passing in values for rows 2,3,4,5 then m would be 2 (not 4).
1623: The values in idxm would be 1 2; that is the first index for each block divided by
1624: the block size.
1626: Note that you must call MatSetBlockSize() when constructing this matrix (before
1627: preallocating it).
1629: By default the values, v, are row-oriented, so the layout of
1630: v is the same as for MatSetValues(). See MatSetOption() for other options.
1632: Calls to MatSetValuesBlocked() with the INSERT_VALUES and ADD_VALUES
1633: options cannot be mixed without intervening calls to the assembly
1634: routines.
1636: MatSetValuesBlocked() uses 0-based row and column numbers in Fortran
1637: as well as in C.
1639: Negative indices may be passed in idxm and idxn, these rows and columns are
1640: simply ignored. This allows easily inserting element stiffness matrices
1641: with homogeneous Dirchlet boundary conditions that you don't want represented
1642: in the matrix.
1644: Each time an entry is set within a sparse matrix via MatSetValues(),
1645: internal searching must be done to determine where to place the the
1646: data in the matrix storage space. By instead inserting blocks of
1647: entries via MatSetValuesBlocked(), the overhead of matrix assembly is
1648: reduced.
1650: Example:
1651: $ Suppose m=n=2 and block size(bs) = 2 The array is
1652: $
1653: $ 1 2 | 3 4
1654: $ 5 6 | 7 8
1655: $ - - - | - - -
1656: $ 9 10 | 11 12
1657: $ 13 14 | 15 16
1658: $
1659: $ v[] should be passed in like
1660: $ v[] = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]
1661: $
1662: $ If you are not using row oriented storage of v (that is you called MatSetOption(mat,MAT_ROW_ORIENTED,PETSC_FALSE)) then
1663: $ v[] = [1,5,9,13,2,6,10,14,3,7,11,15,4,8,12,16]
1665: Level: intermediate
1667: Concepts: matrices^putting entries in blocked
1669: .seealso: MatSetBlockSize(), MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValues(), MatSetValuesBlockedLocal()
1670: @*/
1671: PetscErrorCode MatSetValuesBlocked(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],const PetscScalar v[],InsertMode addv)
1672: {
1678: if (!m || !n) return(0); /* no values to insert */
1682: MatCheckPreallocated(mat,1);
1683: if (mat->insertmode == NOT_SET_VALUES) {
1684: mat->insertmode = addv;
1685: }
1686: #if defined(PETSC_USE_DEBUG)
1687: else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
1688: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1689: if (!mat->ops->setvaluesblocked && !mat->ops->setvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
1690: #endif
1692: if (mat->assembled) {
1693: mat->was_assembled = PETSC_TRUE;
1694: mat->assembled = PETSC_FALSE;
1695: }
1696: PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
1697: if (mat->ops->setvaluesblocked) {
1698: (*mat->ops->setvaluesblocked)(mat,m,idxm,n,idxn,v,addv);
1699: } else {
1700: PetscInt buf[8192],*bufr=0,*bufc=0,*iidxm,*iidxn;
1701: PetscInt i,j,bs,cbs;
1702: MatGetBlockSizes(mat,&bs,&cbs);
1703: if (m*bs+n*cbs <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
1704: iidxm = buf; iidxn = buf + m*bs;
1705: } else {
1706: PetscMalloc2(m*bs,&bufr,n*cbs,&bufc);
1707: iidxm = bufr; iidxn = bufc;
1708: }
1709: for (i=0; i<m; i++) {
1710: for (j=0; j<bs; j++) {
1711: iidxm[i*bs+j] = bs*idxm[i] + j;
1712: }
1713: }
1714: for (i=0; i<n; i++) {
1715: for (j=0; j<cbs; j++) {
1716: iidxn[i*cbs+j] = cbs*idxn[i] + j;
1717: }
1718: }
1719: MatSetValues(mat,m*bs,iidxm,n*cbs,iidxn,v,addv);
1720: PetscFree2(bufr,bufc);
1721: }
1722: PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
1723: #if defined(PETSC_HAVE_CUSP)
1724: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
1725: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
1726: }
1727: #endif
1728: #if defined(PETSC_HAVE_VIENNACL)
1729: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
1730: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
1731: }
1732: #endif
1733: return(0);
1734: }
1738: /*@
1739: MatGetValues - Gets a block of values from a matrix.
1741: Not Collective; currently only returns a local block
1743: Input Parameters:
1744: + mat - the matrix
1745: . v - a logically two-dimensional array for storing the values
1746: . m, idxm - the number of rows and their global indices
1747: - n, idxn - the number of columns and their global indices
1749: Notes:
1750: The user must allocate space (m*n PetscScalars) for the values, v.
1751: The values, v, are then returned in a row-oriented format,
1752: analogous to that used by default in MatSetValues().
1754: MatGetValues() uses 0-based row and column numbers in
1755: Fortran as well as in C.
1757: MatGetValues() requires that the matrix has been assembled
1758: with MatAssemblyBegin()/MatAssemblyEnd(). Thus, calls to
1759: MatSetValues() and MatGetValues() CANNOT be made in succession
1760: without intermediate matrix assembly.
1762: Negative row or column indices will be ignored and those locations in v[] will be
1763: left unchanged.
1765: Level: advanced
1767: Concepts: matrices^accessing values
1769: .seealso: MatGetRow(), MatGetSubMatrices(), MatSetValues()
1770: @*/
1771: PetscErrorCode MatGetValues(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[])
1772: {
1778: if (!m || !n) return(0);
1782: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
1783: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1784: if (!mat->ops->getvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
1785: MatCheckPreallocated(mat,1);
1787: PetscLogEventBegin(MAT_GetValues,mat,0,0,0);
1788: (*mat->ops->getvalues)(mat,m,idxm,n,idxn,v);
1789: PetscLogEventEnd(MAT_GetValues,mat,0,0,0);
1790: return(0);
1791: }
1795: /*@
1796: MatSetValuesBatch - Adds (ADD_VALUES) many blocks of values into a matrix at once. The blocks must all be square and
1797: the same size. Currently, this can only be called once and creates the given matrix.
1799: Not Collective
1801: Input Parameters:
1802: + mat - the matrix
1803: . nb - the number of blocks
1804: . bs - the number of rows (and columns) in each block
1805: . rows - a concatenation of the rows for each block
1806: - v - a concatenation of logically two-dimensional arrays of values
1808: Notes:
1809: In the future, we will extend this routine to handle rectangular blocks, and to allow multiple calls for a given matrix.
1811: Level: advanced
1813: Concepts: matrices^putting entries in
1815: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
1816: InsertMode, INSERT_VALUES, ADD_VALUES, MatSetValues()
1817: @*/
1818: PetscErrorCode MatSetValuesBatch(Mat mat, PetscInt nb, PetscInt bs, PetscInt rows[], const PetscScalar v[])
1819: {
1827: #if defined(PETSC_USE_DEBUG)
1828: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1829: #endif
1831: PetscLogEventBegin(MAT_SetValuesBatch,mat,0,0,0);
1832: if (mat->ops->setvaluesbatch) {
1833: (*mat->ops->setvaluesbatch)(mat,nb,bs,rows,v);
1834: } else {
1835: PetscInt b;
1836: for (b = 0; b < nb; ++b) {
1837: MatSetValues(mat, bs, &rows[b*bs], bs, &rows[b*bs], &v[b*bs*bs], ADD_VALUES);
1838: }
1839: }
1840: PetscLogEventEnd(MAT_SetValuesBatch,mat,0,0,0);
1841: return(0);
1842: }
1846: /*@
1847: MatSetLocalToGlobalMapping - Sets a local-to-global numbering for use by
1848: the routine MatSetValuesLocal() to allow users to insert matrix entries
1849: using a local (per-processor) numbering.
1851: Not Collective
1853: Input Parameters:
1854: + x - the matrix
1855: . rmapping - row mapping created with ISLocalToGlobalMappingCreate() or ISLocalToGlobalMappingCreateIS()
1856: - cmapping - column mapping
1858: Level: intermediate
1860: Concepts: matrices^local to global mapping
1861: Concepts: local to global mapping^for matrices
1863: .seealso: MatAssemblyBegin(), MatAssemblyEnd(), MatSetValues(), MatSetValuesLocal()
1864: @*/
1865: PetscErrorCode MatSetLocalToGlobalMapping(Mat x,ISLocalToGlobalMapping rmapping,ISLocalToGlobalMapping cmapping)
1866: {
1875: if (x->ops->setlocaltoglobalmapping) {
1876: (*x->ops->setlocaltoglobalmapping)(x,rmapping,cmapping);
1877: } else {
1878: PetscLayoutSetISLocalToGlobalMapping(x->rmap,rmapping);
1879: PetscLayoutSetISLocalToGlobalMapping(x->cmap,cmapping);
1880: }
1881: return(0);
1882: }
1887: /*@
1888: MatGetLocalToGlobalMapping - Gets the local-to-global numbering set by MatSetLocalToGlobalMapping()
1890: Not Collective
1892: Input Parameters:
1893: . A - the matrix
1895: Output Parameters:
1896: + rmapping - row mapping
1897: - cmapping - column mapping
1899: Level: advanced
1901: Concepts: matrices^local to global mapping
1902: Concepts: local to global mapping^for matrices
1904: .seealso: MatSetValuesLocal()
1905: @*/
1906: PetscErrorCode MatGetLocalToGlobalMapping(Mat A,ISLocalToGlobalMapping *rmapping,ISLocalToGlobalMapping *cmapping)
1907: {
1913: if (rmapping) *rmapping = A->rmap->mapping;
1914: if (cmapping) *cmapping = A->cmap->mapping;
1915: return(0);
1916: }
1920: /*@
1921: MatGetLayouts - Gets the PetscLayout objects for rows and columns
1923: Not Collective
1925: Input Parameters:
1926: . A - the matrix
1928: Output Parameters:
1929: + rmap - row layout
1930: - cmap - column layout
1932: Level: advanced
1934: .seealso: MatCreateVecs(), MatGetLocalToGlobalMapping()
1935: @*/
1936: PetscErrorCode MatGetLayouts(Mat A,PetscLayout *rmap,PetscLayout *cmap)
1937: {
1943: if (rmap) *rmap = A->rmap;
1944: if (cmap) *cmap = A->cmap;
1945: return(0);
1946: }
1950: /*@
1951: MatSetValuesLocal - Inserts or adds values into certain locations of a matrix,
1952: using a local ordering of the nodes.
1954: Not Collective
1956: Input Parameters:
1957: + x - the matrix
1958: . nrow, irow - number of rows and their local indices
1959: . ncol, icol - number of columns and their local indices
1960: . y - a logically two-dimensional array of values
1961: - addv - either INSERT_VALUES or ADD_VALUES, where
1962: ADD_VALUES adds values to any existing entries, and
1963: INSERT_VALUES replaces existing entries with new values
1965: Notes:
1966: If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatXXXXSetPreallocation() or
1967: MatSetUp() before using this routine
1969: If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatSetLocalToGlobalMapping() before using this routine
1971: Calls to MatSetValuesLocal() with the INSERT_VALUES and ADD_VALUES
1972: options cannot be mixed without intervening calls to the assembly
1973: routines.
1975: These values may be cached, so MatAssemblyBegin() and MatAssemblyEnd()
1976: MUST be called after all calls to MatSetValuesLocal() have been completed.
1978: Level: intermediate
1980: Concepts: matrices^putting entries in with local numbering
1982: .seealso: MatAssemblyBegin(), MatAssemblyEnd(), MatSetValues(), MatSetLocalToGlobalMapping(),
1983: MatSetValueLocal()
1984: @*/
1985: PetscErrorCode MatSetValuesLocal(Mat mat,PetscInt nrow,const PetscInt irow[],PetscInt ncol,const PetscInt icol[],const PetscScalar y[],InsertMode addv)
1986: {
1992: MatCheckPreallocated(mat,1);
1993: if (!nrow || !ncol) return(0); /* no values to insert */
1997: if (mat->insertmode == NOT_SET_VALUES) {
1998: mat->insertmode = addv;
1999: }
2000: #if defined(PETSC_USE_DEBUG)
2001: else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
2002: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2003: if (!mat->ops->setvalueslocal && !mat->ops->setvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2004: #endif
2006: if (mat->assembled) {
2007: mat->was_assembled = PETSC_TRUE;
2008: mat->assembled = PETSC_FALSE;
2009: }
2010: PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
2011: if (mat->ops->setvalueslocal) {
2012: (*mat->ops->setvalueslocal)(mat,nrow,irow,ncol,icol,y,addv);
2013: } else {
2014: PetscInt buf[8192],*bufr=0,*bufc=0,*irowm,*icolm;
2015: if ((nrow+ncol) <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
2016: irowm = buf; icolm = buf+nrow;
2017: } else {
2018: PetscMalloc2(nrow,&bufr,ncol,&bufc);
2019: irowm = bufr; icolm = bufc;
2020: }
2021: ISLocalToGlobalMappingApply(mat->rmap->mapping,nrow,irow,irowm);
2022: ISLocalToGlobalMappingApply(mat->cmap->mapping,ncol,icol,icolm);
2023: MatSetValues(mat,nrow,irowm,ncol,icolm,y,addv);
2024: PetscFree2(bufr,bufc);
2025: }
2026: PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
2027: #if defined(PETSC_HAVE_CUSP)
2028: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
2029: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
2030: }
2031: #endif
2032: #if defined(PETSC_HAVE_VIENNACL)
2033: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
2034: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
2035: }
2036: #endif
2037: return(0);
2038: }
2042: /*@
2043: MatSetValuesBlockedLocal - Inserts or adds values into certain locations of a matrix,
2044: using a local ordering of the nodes a block at a time.
2046: Not Collective
2048: Input Parameters:
2049: + x - the matrix
2050: . nrow, irow - number of rows and their local indices
2051: . ncol, icol - number of columns and their local indices
2052: . y - a logically two-dimensional array of values
2053: - addv - either INSERT_VALUES or ADD_VALUES, where
2054: ADD_VALUES adds values to any existing entries, and
2055: INSERT_VALUES replaces existing entries with new values
2057: Notes:
2058: If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatXXXXSetPreallocation() or
2059: MatSetUp() before using this routine
2061: If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatSetBlockSize() and MatSetLocalToGlobalMapping()
2062: before using this routineBefore calling MatSetValuesLocal(), the user must first set the
2064: Calls to MatSetValuesBlockedLocal() with the INSERT_VALUES and ADD_VALUES
2065: options cannot be mixed without intervening calls to the assembly
2066: routines.
2068: These values may be cached, so MatAssemblyBegin() and MatAssemblyEnd()
2069: MUST be called after all calls to MatSetValuesBlockedLocal() have been completed.
2071: Level: intermediate
2073: Concepts: matrices^putting blocked values in with local numbering
2075: .seealso: MatSetBlockSize(), MatSetLocalToGlobalMapping(), MatAssemblyBegin(), MatAssemblyEnd(),
2076: MatSetValuesLocal(), MatSetValuesBlocked()
2077: @*/
2078: PetscErrorCode MatSetValuesBlockedLocal(Mat mat,PetscInt nrow,const PetscInt irow[],PetscInt ncol,const PetscInt icol[],const PetscScalar y[],InsertMode addv)
2079: {
2085: MatCheckPreallocated(mat,1);
2086: if (!nrow || !ncol) return(0); /* no values to insert */
2090: if (mat->insertmode == NOT_SET_VALUES) {
2091: mat->insertmode = addv;
2092: }
2093: #if defined(PETSC_USE_DEBUG)
2094: else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
2095: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2096: if (!mat->ops->setvaluesblockedlocal && !mat->ops->setvaluesblocked && !mat->ops->setvalueslocal && !mat->ops->setvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2097: #endif
2099: if (mat->assembled) {
2100: mat->was_assembled = PETSC_TRUE;
2101: mat->assembled = PETSC_FALSE;
2102: }
2103: PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
2104: if (mat->ops->setvaluesblockedlocal) {
2105: (*mat->ops->setvaluesblockedlocal)(mat,nrow,irow,ncol,icol,y,addv);
2106: } else {
2107: PetscInt buf[8192],*bufr=0,*bufc=0,*irowm,*icolm;
2108: if ((nrow+ncol) <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
2109: irowm = buf; icolm = buf + nrow;
2110: } else {
2111: PetscMalloc2(nrow,&bufr,ncol,&bufc);
2112: irowm = bufr; icolm = bufc;
2113: }
2114: ISLocalToGlobalMappingApplyBlock(mat->rmap->mapping,nrow,irow,irowm);
2115: ISLocalToGlobalMappingApplyBlock(mat->cmap->mapping,ncol,icol,icolm);
2116: MatSetValuesBlocked(mat,nrow,irowm,ncol,icolm,y,addv);
2117: PetscFree2(bufr,bufc);
2118: }
2119: PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
2120: #if defined(PETSC_HAVE_CUSP)
2121: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
2122: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
2123: }
2124: #endif
2125: #if defined(PETSC_HAVE_VIENNACL)
2126: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
2127: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
2128: }
2129: #endif
2130: return(0);
2131: }
2135: /*@
2136: MatMultDiagonalBlock - Computes the matrix-vector product, y = Dx. Where D is defined by the inode or block structure of the diagonal
2138: Collective on Mat and Vec
2140: Input Parameters:
2141: + mat - the matrix
2142: - x - the vector to be multiplied
2144: Output Parameters:
2145: . y - the result
2147: Notes:
2148: The vectors x and y cannot be the same. I.e., one cannot
2149: call MatMult(A,y,y).
2151: Level: developer
2153: Concepts: matrix-vector product
2155: .seealso: MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
2156: @*/
2157: PetscErrorCode MatMultDiagonalBlock(Mat mat,Vec x,Vec y)
2158: {
2167: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2168: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2169: if (x == y) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2170: MatCheckPreallocated(mat,1);
2172: if (!mat->ops->multdiagonalblock) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"This matrix type does not have a multiply defined");
2173: (*mat->ops->multdiagonalblock)(mat,x,y);
2174: PetscObjectStateIncrease((PetscObject)y);
2175: return(0);
2176: }
2178: /* --------------------------------------------------------*/
2181: /*@
2182: MatMult - Computes the matrix-vector product, y = Ax.
2184: Neighbor-wise Collective on Mat and Vec
2186: Input Parameters:
2187: + mat - the matrix
2188: - x - the vector to be multiplied
2190: Output Parameters:
2191: . y - the result
2193: Notes:
2194: The vectors x and y cannot be the same. I.e., one cannot
2195: call MatMult(A,y,y).
2197: Level: beginner
2199: Concepts: matrix-vector product
2201: .seealso: MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
2202: @*/
2203: PetscErrorCode MatMult(Mat mat,Vec x,Vec y)
2204: {
2212: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2213: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2214: if (x == y) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2215: #if !defined(PETSC_HAVE_CONSTRAINTS)
2216: if (mat->cmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
2217: if (mat->rmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->rmap->N,y->map->N);
2218: if (mat->rmap->n != y->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: local dim %D %D",mat->rmap->n,y->map->n);
2219: #endif
2220: VecLocked(y,3);
2221: if (mat->erroriffpe) {VecValidValues(x,2,PETSC_TRUE);}
2222: MatCheckPreallocated(mat,1);
2224: VecLockPush(x);
2225: if (!mat->ops->mult) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"This matrix type does not have a multiply defined");
2226: PetscLogEventBegin(MAT_Mult,mat,x,y,0);
2227: (*mat->ops->mult)(mat,x,y);
2228: PetscLogEventEnd(MAT_Mult,mat,x,y,0);
2229: if (mat->erroriffpe) {VecValidValues(y,3,PETSC_FALSE);}
2230: VecLockPop(x);
2231: return(0);
2232: }
2236: /*@
2237: MatMultTranspose - Computes matrix transpose times a vector.
2239: Neighbor-wise Collective on Mat and Vec
2241: Input Parameters:
2242: + mat - the matrix
2243: - x - the vector to be multilplied
2245: Output Parameters:
2246: . y - the result
2248: Notes:
2249: The vectors x and y cannot be the same. I.e., one cannot
2250: call MatMultTranspose(A,y,y).
2252: For complex numbers this does NOT compute the Hermitian (complex conjugate) transpose multiple,
2253: use MatMultHermitianTranspose()
2255: Level: beginner
2257: Concepts: matrix vector product^transpose
2259: .seealso: MatMult(), MatMultAdd(), MatMultTransposeAdd(), MatMultHermitianTranspose(), MatTranspose()
2260: @*/
2261: PetscErrorCode MatMultTranspose(Mat mat,Vec x,Vec y)
2262: {
2271: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2272: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2273: if (x == y) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2274: #if !defined(PETSC_HAVE_CONSTRAINTS)
2275: if (mat->rmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->rmap->N,x->map->N);
2276: if (mat->cmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->cmap->N,y->map->N);
2277: #endif
2278: if (mat->erroriffpe) {VecValidValues(x,2,PETSC_TRUE);}
2279: MatCheckPreallocated(mat,1);
2281: if (!mat->ops->multtranspose) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"This matrix type does not have a multiply tranpose defined");
2282: PetscLogEventBegin(MAT_MultTranspose,mat,x,y,0);
2283: VecLockPush(x);
2284: (*mat->ops->multtranspose)(mat,x,y);
2285: VecLockPop(x);
2286: PetscLogEventEnd(MAT_MultTranspose,mat,x,y,0);
2287: PetscObjectStateIncrease((PetscObject)y);
2288: if (mat->erroriffpe) {VecValidValues(y,3,PETSC_FALSE);}
2289: return(0);
2290: }
2294: /*@
2295: MatMultHermitianTranspose - Computes matrix Hermitian transpose times a vector.
2297: Neighbor-wise Collective on Mat and Vec
2299: Input Parameters:
2300: + mat - the matrix
2301: - x - the vector to be multilplied
2303: Output Parameters:
2304: . y - the result
2306: Notes:
2307: The vectors x and y cannot be the same. I.e., one cannot
2308: call MatMultHermitianTranspose(A,y,y).
2310: Also called the conjugate transpose, complex conjugate transpose, or adjoint.
2312: For real numbers MatMultTranspose() and MatMultHermitianTranspose() are identical.
2314: Level: beginner
2316: Concepts: matrix vector product^transpose
2318: .seealso: MatMult(), MatMultAdd(), MatMultHermitianTransposeAdd(), MatMultTranspose()
2319: @*/
2320: PetscErrorCode MatMultHermitianTranspose(Mat mat,Vec x,Vec y)
2321: {
2323: Vec w;
2331: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2332: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2333: if (x == y) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2334: #if !defined(PETSC_HAVE_CONSTRAINTS)
2335: if (mat->rmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->rmap->N,x->map->N);
2336: if (mat->cmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->cmap->N,y->map->N);
2337: #endif
2338: MatCheckPreallocated(mat,1);
2340: PetscLogEventBegin(MAT_MultHermitianTranspose,mat,x,y,0);
2341: if (mat->ops->multhermitiantranspose) {
2342: VecLockPush(x);
2343: (*mat->ops->multhermitiantranspose)(mat,x,y);
2344: VecLockPop(x);
2345: } else {
2346: VecDuplicate(x,&w);
2347: VecCopy(x,w);
2348: VecConjugate(w);
2349: MatMultTranspose(mat,w,y);
2350: VecDestroy(&w);
2351: VecConjugate(y);
2352: }
2353: PetscLogEventEnd(MAT_MultHermitianTranspose,mat,x,y,0);
2354: PetscObjectStateIncrease((PetscObject)y);
2355: return(0);
2356: }
2360: /*@
2361: MatMultAdd - Computes v3 = v2 + A * v1.
2363: Neighbor-wise Collective on Mat and Vec
2365: Input Parameters:
2366: + mat - the matrix
2367: - v1, v2 - the vectors
2369: Output Parameters:
2370: . v3 - the result
2372: Notes:
2373: The vectors v1 and v3 cannot be the same. I.e., one cannot
2374: call MatMultAdd(A,v1,v2,v1).
2376: Level: beginner
2378: Concepts: matrix vector product^addition
2380: .seealso: MatMultTranspose(), MatMult(), MatMultTransposeAdd()
2381: @*/
2382: PetscErrorCode MatMultAdd(Mat mat,Vec v1,Vec v2,Vec v3)
2383: {
2393: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2394: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2395: if (mat->cmap->N != v1->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v1: global dim %D %D",mat->cmap->N,v1->map->N);
2396: /* if (mat->rmap->N != v2->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec v2: global dim %D %D",mat->rmap->N,v2->map->N);
2397: if (mat->rmap->N != v3->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec v3: global dim %D %D",mat->rmap->N,v3->map->N); */
2398: if (mat->rmap->n != v3->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec v3: local dim %D %D",mat->rmap->n,v3->map->n);
2399: if (mat->rmap->n != v2->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec v2: local dim %D %D",mat->rmap->n,v2->map->n);
2400: if (v1 == v3) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"v1 and v3 must be different vectors");
2401: MatCheckPreallocated(mat,1);
2403: if (!mat->ops->multadd) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"No MatMultAdd() for matrix type '%s'",((PetscObject)mat)->type_name);
2404: PetscLogEventBegin(MAT_MultAdd,mat,v1,v2,v3);
2405: VecLockPush(v1);
2406: (*mat->ops->multadd)(mat,v1,v2,v3);
2407: VecLockPop(v1);
2408: PetscLogEventEnd(MAT_MultAdd,mat,v1,v2,v3);
2409: PetscObjectStateIncrease((PetscObject)v3);
2410: return(0);
2411: }
2415: /*@
2416: MatMultTransposeAdd - Computes v3 = v2 + A' * v1.
2418: Neighbor-wise Collective on Mat and Vec
2420: Input Parameters:
2421: + mat - the matrix
2422: - v1, v2 - the vectors
2424: Output Parameters:
2425: . v3 - the result
2427: Notes:
2428: The vectors v1 and v3 cannot be the same. I.e., one cannot
2429: call MatMultTransposeAdd(A,v1,v2,v1).
2431: Level: beginner
2433: Concepts: matrix vector product^transpose and addition
2435: .seealso: MatMultTranspose(), MatMultAdd(), MatMult()
2436: @*/
2437: PetscErrorCode MatMultTransposeAdd(Mat mat,Vec v1,Vec v2,Vec v3)
2438: {
2448: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2449: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2450: if (!mat->ops->multtransposeadd) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2451: if (v1 == v3) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"v1 and v3 must be different vectors");
2452: if (mat->rmap->N != v1->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v1: global dim %D %D",mat->rmap->N,v1->map->N);
2453: if (mat->cmap->N != v2->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v2: global dim %D %D",mat->cmap->N,v2->map->N);
2454: if (mat->cmap->N != v3->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v3: global dim %D %D",mat->cmap->N,v3->map->N);
2455: MatCheckPreallocated(mat,1);
2457: PetscLogEventBegin(MAT_MultTransposeAdd,mat,v1,v2,v3);
2458: VecLockPush(v1);
2459: (*mat->ops->multtransposeadd)(mat,v1,v2,v3);
2460: VecLockPop(v1);
2461: PetscLogEventEnd(MAT_MultTransposeAdd,mat,v1,v2,v3);
2462: PetscObjectStateIncrease((PetscObject)v3);
2463: return(0);
2464: }
2468: /*@
2469: MatMultHermitianTransposeAdd - Computes v3 = v2 + A^H * v1.
2471: Neighbor-wise Collective on Mat and Vec
2473: Input Parameters:
2474: + mat - the matrix
2475: - v1, v2 - the vectors
2477: Output Parameters:
2478: . v3 - the result
2480: Notes:
2481: The vectors v1 and v3 cannot be the same. I.e., one cannot
2482: call MatMultHermitianTransposeAdd(A,v1,v2,v1).
2484: Level: beginner
2486: Concepts: matrix vector product^transpose and addition
2488: .seealso: MatMultHermitianTranspose(), MatMultTranspose(), MatMultAdd(), MatMult()
2489: @*/
2490: PetscErrorCode MatMultHermitianTransposeAdd(Mat mat,Vec v1,Vec v2,Vec v3)
2491: {
2501: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2502: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2503: if (!mat->ops->multhermitiantransposeadd) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2504: if (v1 == v3) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"v1 and v3 must be different vectors");
2505: if (mat->rmap->N != v1->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v1: global dim %D %D",mat->rmap->N,v1->map->N);
2506: if (mat->cmap->N != v2->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v2: global dim %D %D",mat->cmap->N,v2->map->N);
2507: if (mat->cmap->N != v3->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v3: global dim %D %D",mat->cmap->N,v3->map->N);
2508: MatCheckPreallocated(mat,1);
2510: PetscLogEventBegin(MAT_MultHermitianTransposeAdd,mat,v1,v2,v3);
2511: VecLockPush(v1);
2512: (*mat->ops->multhermitiantransposeadd)(mat,v1,v2,v3);
2513: VecLockPop(v1);
2514: PetscLogEventEnd(MAT_MultHermitianTransposeAdd,mat,v1,v2,v3);
2515: PetscObjectStateIncrease((PetscObject)v3);
2516: return(0);
2517: }
2521: /*@
2522: MatMultConstrained - The inner multiplication routine for a
2523: constrained matrix P^T A P.
2525: Neighbor-wise Collective on Mat and Vec
2527: Input Parameters:
2528: + mat - the matrix
2529: - x - the vector to be multilplied
2531: Output Parameters:
2532: . y - the result
2534: Notes:
2535: The vectors x and y cannot be the same. I.e., one cannot
2536: call MatMult(A,y,y).
2538: Level: beginner
2540: .keywords: matrix, multiply, matrix-vector product, constraint
2541: .seealso: MatMult(), MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
2542: @*/
2543: PetscErrorCode MatMultConstrained(Mat mat,Vec x,Vec y)
2544: {
2551: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2552: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2553: if (x == y) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2554: if (mat->cmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
2555: if (mat->rmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->rmap->N,y->map->N);
2556: if (mat->rmap->n != y->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: local dim %D %D",mat->rmap->n,y->map->n);
2558: PetscLogEventBegin(MAT_MultConstrained,mat,x,y,0);
2559: VecLockPush(x);
2560: (*mat->ops->multconstrained)(mat,x,y);
2561: VecLockPop(x);
2562: PetscLogEventEnd(MAT_MultConstrained,mat,x,y,0);
2563: PetscObjectStateIncrease((PetscObject)y);
2564: return(0);
2565: }
2569: /*@
2570: MatMultTransposeConstrained - The inner multiplication routine for a
2571: constrained matrix P^T A^T P.
2573: Neighbor-wise Collective on Mat and Vec
2575: Input Parameters:
2576: + mat - the matrix
2577: - x - the vector to be multilplied
2579: Output Parameters:
2580: . y - the result
2582: Notes:
2583: The vectors x and y cannot be the same. I.e., one cannot
2584: call MatMult(A,y,y).
2586: Level: beginner
2588: .keywords: matrix, multiply, matrix-vector product, constraint
2589: .seealso: MatMult(), MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
2590: @*/
2591: PetscErrorCode MatMultTransposeConstrained(Mat mat,Vec x,Vec y)
2592: {
2599: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2600: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2601: if (x == y) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2602: if (mat->rmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
2603: if (mat->cmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->rmap->N,y->map->N);
2605: PetscLogEventBegin(MAT_MultConstrained,mat,x,y,0);
2606: (*mat->ops->multtransposeconstrained)(mat,x,y);
2607: PetscLogEventEnd(MAT_MultConstrained,mat,x,y,0);
2608: PetscObjectStateIncrease((PetscObject)y);
2609: return(0);
2610: }
2614: /*@C
2615: MatGetFactorType - gets the type of factorization it is
2617: Note Collective
2618: as the flag
2620: Input Parameters:
2621: . mat - the matrix
2623: Output Parameters:
2624: . t - the type, one of MAT_FACTOR_NONE, MAT_FACTOR_LU, MAT_FACTOR_CHOLESKY, MAT_FACTOR_ILU, MAT_FACTOR_ICC,MAT_FACTOR_ILUDT
2626: Level: intermediate
2628: .seealso: MatFactorType, MatGetFactor()
2629: @*/
2630: PetscErrorCode MatGetFactorType(Mat mat,MatFactorType *t)
2631: {
2635: *t = mat->factortype;
2636: return(0);
2637: }
2639: /* ------------------------------------------------------------*/
2642: /*@C
2643: MatGetInfo - Returns information about matrix storage (number of
2644: nonzeros, memory, etc.).
2646: Collective on Mat if MAT_GLOBAL_MAX or MAT_GLOBAL_SUM is used as the flag
2648: Input Parameters:
2649: . mat - the matrix
2651: Output Parameters:
2652: + flag - flag indicating the type of parameters to be returned
2653: (MAT_LOCAL - local matrix, MAT_GLOBAL_MAX - maximum over all processors,
2654: MAT_GLOBAL_SUM - sum over all processors)
2655: - info - matrix information context
2657: Notes:
2658: The MatInfo context contains a variety of matrix data, including
2659: number of nonzeros allocated and used, number of mallocs during
2660: matrix assembly, etc. Additional information for factored matrices
2661: is provided (such as the fill ratio, number of mallocs during
2662: factorization, etc.). Much of this info is printed to PETSC_STDOUT
2663: when using the runtime options
2664: $ -info -mat_view ::ascii_info
2666: Example for C/C++ Users:
2667: See the file ${PETSC_DIR}/include/petscmat.h for a complete list of
2668: data within the MatInfo context. For example,
2669: .vb
2670: MatInfo info;
2671: Mat A;
2672: double mal, nz_a, nz_u;
2674: MatGetInfo(A,MAT_LOCAL,&info);
2675: mal = info.mallocs;
2676: nz_a = info.nz_allocated;
2677: .ve
2679: Example for Fortran Users:
2680: Fortran users should declare info as a double precision
2681: array of dimension MAT_INFO_SIZE, and then extract the parameters
2682: of interest. See the file ${PETSC_DIR}/include/petsc/finclude/petscmat.h
2683: a complete list of parameter names.
2684: .vb
2685: double precision info(MAT_INFO_SIZE)
2686: double precision mal, nz_a
2687: Mat A
2688: integer ierr
2690: call MatGetInfo(A,MAT_LOCAL,info,ierr)
2691: mal = info(MAT_INFO_MALLOCS)
2692: nz_a = info(MAT_INFO_NZ_ALLOCATED)
2693: .ve
2695: Level: intermediate
2697: Concepts: matrices^getting information on
2699: Developer Note: fortran interface is not autogenerated as the f90
2700: interface defintion cannot be generated correctly [due to MatInfo]
2702: .seealso: MatStashGetInfo()
2704: @*/
2705: PetscErrorCode MatGetInfo(Mat mat,MatInfoType flag,MatInfo *info)
2706: {
2713: if (!mat->ops->getinfo) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2714: MatCheckPreallocated(mat,1);
2715: (*mat->ops->getinfo)(mat,flag,info);
2716: return(0);
2717: }
2719: /* ----------------------------------------------------------*/
2723: /*@C
2724: MatLUFactor - Performs in-place LU factorization of matrix.
2726: Collective on Mat
2728: Input Parameters:
2729: + mat - the matrix
2730: . row - row permutation
2731: . col - column permutation
2732: - info - options for factorization, includes
2733: $ fill - expected fill as ratio of original fill.
2734: $ dtcol - pivot tolerance (0 no pivot, 1 full column pivoting)
2735: $ Run with the option -info to determine an optimal value to use
2737: Notes:
2738: Most users should employ the simplified KSP interface for linear solvers
2739: instead of working directly with matrix algebra routines such as this.
2740: See, e.g., KSPCreate().
2742: This changes the state of the matrix to a factored matrix; it cannot be used
2743: for example with MatSetValues() unless one first calls MatSetUnfactored().
2745: Level: developer
2747: Concepts: matrices^LU factorization
2749: .seealso: MatLUFactorSymbolic(), MatLUFactorNumeric(), MatCholeskyFactor(),
2750: MatGetOrdering(), MatSetUnfactored(), MatFactorInfo, MatGetFactor()
2752: Developer Note: fortran interface is not autogenerated as the f90
2753: interface defintion cannot be generated correctly [due to MatFactorInfo]
2755: @*/
2756: PetscErrorCode MatLUFactor(Mat mat,IS row,IS col,const MatFactorInfo *info)
2757: {
2759: MatFactorInfo tinfo;
2767: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2768: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2769: if (!mat->ops->lufactor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2770: MatCheckPreallocated(mat,1);
2771: if (!info) {
2772: MatFactorInfoInitialize(&tinfo);
2773: info = &tinfo;
2774: }
2776: PetscLogEventBegin(MAT_LUFactor,mat,row,col,0);
2777: (*mat->ops->lufactor)(mat,row,col,info);
2778: PetscLogEventEnd(MAT_LUFactor,mat,row,col,0);
2779: PetscObjectStateIncrease((PetscObject)mat);
2780: return(0);
2781: }
2785: /*@C
2786: MatILUFactor - Performs in-place ILU factorization of matrix.
2788: Collective on Mat
2790: Input Parameters:
2791: + mat - the matrix
2792: . row - row permutation
2793: . col - column permutation
2794: - info - structure containing
2795: $ levels - number of levels of fill.
2796: $ expected fill - as ratio of original fill.
2797: $ 1 or 0 - indicating force fill on diagonal (improves robustness for matrices
2798: missing diagonal entries)
2800: Notes:
2801: Probably really in-place only when level of fill is zero, otherwise allocates
2802: new space to store factored matrix and deletes previous memory.
2804: Most users should employ the simplified KSP interface for linear solvers
2805: instead of working directly with matrix algebra routines such as this.
2806: See, e.g., KSPCreate().
2808: Level: developer
2810: Concepts: matrices^ILU factorization
2812: .seealso: MatILUFactorSymbolic(), MatLUFactorNumeric(), MatCholeskyFactor(), MatFactorInfo
2814: Developer Note: fortran interface is not autogenerated as the f90
2815: interface defintion cannot be generated correctly [due to MatFactorInfo]
2817: @*/
2818: PetscErrorCode MatILUFactor(Mat mat,IS row,IS col,const MatFactorInfo *info)
2819: {
2828: if (mat->rmap->N != mat->cmap->N) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONG,"matrix must be square");
2829: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2830: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2831: if (!mat->ops->ilufactor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2832: MatCheckPreallocated(mat,1);
2834: PetscLogEventBegin(MAT_ILUFactor,mat,row,col,0);
2835: (*mat->ops->ilufactor)(mat,row,col,info);
2836: PetscLogEventEnd(MAT_ILUFactor,mat,row,col,0);
2837: PetscObjectStateIncrease((PetscObject)mat);
2838: return(0);
2839: }
2843: /*@C
2844: MatLUFactorSymbolic - Performs symbolic LU factorization of matrix.
2845: Call this routine before calling MatLUFactorNumeric().
2847: Collective on Mat
2849: Input Parameters:
2850: + fact - the factor matrix obtained with MatGetFactor()
2851: . mat - the matrix
2852: . row, col - row and column permutations
2853: - info - options for factorization, includes
2854: $ fill - expected fill as ratio of original fill.
2855: $ dtcol - pivot tolerance (0 no pivot, 1 full column pivoting)
2856: $ Run with the option -info to determine an optimal value to use
2859: Notes: See Users-Manual: ch_mat for additional information about choosing the fill factor for better efficiency.
2861: Most users should employ the simplified KSP interface for linear solvers
2862: instead of working directly with matrix algebra routines such as this.
2863: See, e.g., KSPCreate().
2865: Level: developer
2867: Concepts: matrices^LU symbolic factorization
2869: .seealso: MatLUFactor(), MatLUFactorNumeric(), MatCholeskyFactor(), MatFactorInfo, MatFactorInfoInitialize()
2871: Developer Note: fortran interface is not autogenerated as the f90
2872: interface defintion cannot be generated correctly [due to MatFactorInfo]
2874: @*/
2875: PetscErrorCode MatLUFactorSymbolic(Mat fact,Mat mat,IS row,IS col,const MatFactorInfo *info)
2876: {
2886: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2887: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2888: if (!(fact)->ops->lufactorsymbolic) {
2889: const MatSolverPackage spackage;
2890: MatFactorGetSolverPackage(fact,&spackage);
2891: SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Matrix type %s symbolic LU using solver package %s",((PetscObject)mat)->type_name,spackage);
2892: }
2893: MatCheckPreallocated(mat,2);
2895: PetscLogEventBegin(MAT_LUFactorSymbolic,mat,row,col,0);
2896: (fact->ops->lufactorsymbolic)(fact,mat,row,col,info);
2897: PetscLogEventEnd(MAT_LUFactorSymbolic,mat,row,col,0);
2898: PetscObjectStateIncrease((PetscObject)fact);
2899: return(0);
2900: }
2904: /*@C
2905: MatLUFactorNumeric - Performs numeric LU factorization of a matrix.
2906: Call this routine after first calling MatLUFactorSymbolic().
2908: Collective on Mat
2910: Input Parameters:
2911: + fact - the factor matrix obtained with MatGetFactor()
2912: . mat - the matrix
2913: - info - options for factorization
2915: Notes:
2916: See MatLUFactor() for in-place factorization. See
2917: MatCholeskyFactorNumeric() for the symmetric, positive definite case.
2919: Most users should employ the simplified KSP interface for linear solvers
2920: instead of working directly with matrix algebra routines such as this.
2921: See, e.g., KSPCreate().
2923: Level: developer
2925: Concepts: matrices^LU numeric factorization
2927: .seealso: MatLUFactorSymbolic(), MatLUFactor(), MatCholeskyFactor()
2929: Developer Note: fortran interface is not autogenerated as the f90
2930: interface defintion cannot be generated correctly [due to MatFactorInfo]
2932: @*/
2933: PetscErrorCode MatLUFactorNumeric(Mat fact,Mat mat,const MatFactorInfo *info)
2934: {
2942: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2943: if (mat->rmap->N != (fact)->rmap->N || mat->cmap->N != (fact)->cmap->N) SETERRQ4(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Mat fact: global dimensions are different %D should = %D %D should = %D",mat->rmap->N,(fact)->rmap->N,mat->cmap->N,(fact)->cmap->N);
2945: if (!(fact)->ops->lufactornumeric) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s numeric LU",((PetscObject)mat)->type_name);
2946: MatCheckPreallocated(mat,2);
2947: PetscLogEventBegin(MAT_LUFactorNumeric,mat,fact,0,0);
2948: (fact->ops->lufactornumeric)(fact,mat,info);
2949: PetscLogEventEnd(MAT_LUFactorNumeric,mat,fact,0,0);
2950: MatViewFromOptions(fact,NULL,"-mat_factor_view");
2951: PetscObjectStateIncrease((PetscObject)fact);
2952: return(0);
2953: }
2957: /*@C
2958: MatCholeskyFactor - Performs in-place Cholesky factorization of a
2959: symmetric matrix.
2961: Collective on Mat
2963: Input Parameters:
2964: + mat - the matrix
2965: . perm - row and column permutations
2966: - f - expected fill as ratio of original fill
2968: Notes:
2969: See MatLUFactor() for the nonsymmetric case. See also
2970: MatCholeskyFactorSymbolic(), and MatCholeskyFactorNumeric().
2972: Most users should employ the simplified KSP interface for linear solvers
2973: instead of working directly with matrix algebra routines such as this.
2974: See, e.g., KSPCreate().
2976: Level: developer
2978: Concepts: matrices^Cholesky factorization
2980: .seealso: MatLUFactor(), MatCholeskyFactorSymbolic(), MatCholeskyFactorNumeric()
2981: MatGetOrdering()
2983: Developer Note: fortran interface is not autogenerated as the f90
2984: interface defintion cannot be generated correctly [due to MatFactorInfo]
2986: @*/
2987: PetscErrorCode MatCholeskyFactor(Mat mat,IS perm,const MatFactorInfo *info)
2988: {
2996: if (mat->rmap->N != mat->cmap->N) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONG,"Matrix must be square");
2997: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2998: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2999: if (!mat->ops->choleskyfactor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3000: MatCheckPreallocated(mat,1);
3002: PetscLogEventBegin(MAT_CholeskyFactor,mat,perm,0,0);
3003: (*mat->ops->choleskyfactor)(mat,perm,info);
3004: PetscLogEventEnd(MAT_CholeskyFactor,mat,perm,0,0);
3005: PetscObjectStateIncrease((PetscObject)mat);
3006: return(0);
3007: }
3011: /*@C
3012: MatCholeskyFactorSymbolic - Performs symbolic Cholesky factorization
3013: of a symmetric matrix.
3015: Collective on Mat
3017: Input Parameters:
3018: + fact - the factor matrix obtained with MatGetFactor()
3019: . mat - the matrix
3020: . perm - row and column permutations
3021: - info - options for factorization, includes
3022: $ fill - expected fill as ratio of original fill.
3023: $ dtcol - pivot tolerance (0 no pivot, 1 full column pivoting)
3024: $ Run with the option -info to determine an optimal value to use
3026: Notes:
3027: See MatLUFactorSymbolic() for the nonsymmetric case. See also
3028: MatCholeskyFactor() and MatCholeskyFactorNumeric().
3030: Most users should employ the simplified KSP interface for linear solvers
3031: instead of working directly with matrix algebra routines such as this.
3032: See, e.g., KSPCreate().
3034: Level: developer
3036: Concepts: matrices^Cholesky symbolic factorization
3038: .seealso: MatLUFactorSymbolic(), MatCholeskyFactor(), MatCholeskyFactorNumeric()
3039: MatGetOrdering()
3041: Developer Note: fortran interface is not autogenerated as the f90
3042: interface defintion cannot be generated correctly [due to MatFactorInfo]
3044: @*/
3045: PetscErrorCode MatCholeskyFactorSymbolic(Mat fact,Mat mat,IS perm,const MatFactorInfo *info)
3046: {
3055: if (mat->rmap->N != mat->cmap->N) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONG,"Matrix must be square");
3056: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3057: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3058: if (!(fact)->ops->choleskyfactorsymbolic) {
3059: const MatSolverPackage spackage;
3060: MatFactorGetSolverPackage(fact,&spackage);
3061: SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s symbolic factor Cholesky using solver package %s",((PetscObject)mat)->type_name,spackage);
3062: }
3063: MatCheckPreallocated(mat,2);
3065: PetscLogEventBegin(MAT_CholeskyFactorSymbolic,mat,perm,0,0);
3066: (fact->ops->choleskyfactorsymbolic)(fact,mat,perm,info);
3067: PetscLogEventEnd(MAT_CholeskyFactorSymbolic,mat,perm,0,0);
3068: PetscObjectStateIncrease((PetscObject)fact);
3069: return(0);
3070: }
3074: /*@C
3075: MatCholeskyFactorNumeric - Performs numeric Cholesky factorization
3076: of a symmetric matrix. Call this routine after first calling
3077: MatCholeskyFactorSymbolic().
3079: Collective on Mat
3081: Input Parameters:
3082: + fact - the factor matrix obtained with MatGetFactor()
3083: . mat - the initial matrix
3084: . info - options for factorization
3085: - fact - the symbolic factor of mat
3088: Notes:
3089: Most users should employ the simplified KSP interface for linear solvers
3090: instead of working directly with matrix algebra routines such as this.
3091: See, e.g., KSPCreate().
3093: Level: developer
3095: Concepts: matrices^Cholesky numeric factorization
3097: .seealso: MatCholeskyFactorSymbolic(), MatCholeskyFactor(), MatLUFactorNumeric()
3099: Developer Note: fortran interface is not autogenerated as the f90
3100: interface defintion cannot be generated correctly [due to MatFactorInfo]
3102: @*/
3103: PetscErrorCode MatCholeskyFactorNumeric(Mat fact,Mat mat,const MatFactorInfo *info)
3104: {
3112: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3113: if (!(fact)->ops->choleskyfactornumeric) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s numeric factor Cholesky",((PetscObject)mat)->type_name);
3114: if (mat->rmap->N != (fact)->rmap->N || mat->cmap->N != (fact)->cmap->N) SETERRQ4(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Mat fact: global dim %D should = %D %D should = %D",mat->rmap->N,(fact)->rmap->N,mat->cmap->N,(fact)->cmap->N);
3115: MatCheckPreallocated(mat,2);
3117: PetscLogEventBegin(MAT_CholeskyFactorNumeric,mat,fact,0,0);
3118: (fact->ops->choleskyfactornumeric)(fact,mat,info);
3119: PetscLogEventEnd(MAT_CholeskyFactorNumeric,mat,fact,0,0);
3120: MatViewFromOptions(fact,NULL,"-mat_factor_view");
3121: PetscObjectStateIncrease((PetscObject)fact);
3122: return(0);
3123: }
3125: /* ----------------------------------------------------------------*/
3128: /*@
3129: MatSolve - Solves A x = b, given a factored matrix.
3131: Neighbor-wise Collective on Mat and Vec
3133: Input Parameters:
3134: + mat - the factored matrix
3135: - b - the right-hand-side vector
3137: Output Parameter:
3138: . x - the result vector
3140: Notes:
3141: The vectors b and x cannot be the same. I.e., one cannot
3142: call MatSolve(A,x,x).
3144: Notes:
3145: Most users should employ the simplified KSP interface for linear solvers
3146: instead of working directly with matrix algebra routines such as this.
3147: See, e.g., KSPCreate().
3149: Level: developer
3151: Concepts: matrices^triangular solves
3153: .seealso: MatSolveAdd(), MatSolveTranspose(), MatSolveTransposeAdd()
3154: @*/
3155: PetscErrorCode MatSolve(Mat mat,Vec b,Vec x)
3156: {
3166: if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3167: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3168: if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3169: if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3170: if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3171: if (!mat->rmap->N && !mat->cmap->N) return(0);
3172: if (!mat->ops->solve) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3173: MatCheckPreallocated(mat,1);
3175: PetscLogEventBegin(MAT_Solve,mat,b,x,0);
3176: (*mat->ops->solve)(mat,b,x);
3177: PetscLogEventEnd(MAT_Solve,mat,b,x,0);
3178: PetscObjectStateIncrease((PetscObject)x);
3179: return(0);
3180: }
3184: PetscErrorCode MatMatSolve_Basic(Mat A,Mat B,Mat X)
3185: {
3187: Vec b,x;
3188: PetscInt m,N,i;
3189: PetscScalar *bb,*xx;
3190: PetscBool flg;
3193: PetscObjectTypeCompareAny((PetscObject)B,&flg,MATSEQDENSE,MATMPIDENSE,NULL);
3194: if (!flg) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Matrix B must be MATDENSE matrix");
3195: PetscObjectTypeCompareAny((PetscObject)X,&flg,MATSEQDENSE,MATMPIDENSE,NULL);
3196: if (!flg) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Matrix X must be MATDENSE matrix");
3198: MatDenseGetArray(B,&bb);
3199: MatDenseGetArray(X,&xx);
3200: MatGetLocalSize(B,&m,NULL); /* number local rows */
3201: MatGetSize(B,NULL,&N); /* total columns in dense matrix */
3202: MatCreateVecs(A,&x,&b);
3203: for (i=0; i<N; i++) {
3204: VecPlaceArray(b,bb + i*m);
3205: VecPlaceArray(x,xx + i*m);
3206: MatSolve(A,b,x);
3207: VecResetArray(x);
3208: VecResetArray(b);
3209: }
3210: VecDestroy(&b);
3211: VecDestroy(&x);
3212: MatDenseRestoreArray(B,&bb);
3213: MatDenseRestoreArray(X,&xx);
3214: return(0);
3215: }
3219: /*@
3220: MatMatSolve - Solves A X = B, given a factored matrix.
3222: Neighbor-wise Collective on Mat
3224: Input Parameters:
3225: + mat - the factored matrix
3226: - B - the right-hand-side matrix (dense matrix)
3228: Output Parameter:
3229: . X - the result matrix (dense matrix)
3231: Notes:
3232: The matrices b and x cannot be the same. I.e., one cannot
3233: call MatMatSolve(A,x,x).
3235: Notes:
3236: Most users should usually employ the simplified KSP interface for linear solvers
3237: instead of working directly with matrix algebra routines such as this.
3238: See, e.g., KSPCreate(). However KSP can only solve for one vector (column of X)
3239: at a time.
3241: When using SuperLU_Dist as a parallel solver PETSc will use the SuperLU_Dist functionality to solve multiple right hand sides simultaneously. For MUMPS
3242: it calls a separate solve for each right hand side since MUMPS does not yet support distributed right hand sides.
3244: Since the resulting matrix X must always be dense we do not support sparse representation of the matrix B.
3246: Level: developer
3248: Concepts: matrices^triangular solves
3250: .seealso: MatMatSolveAdd(), MatMatSolveTranspose(), MatMatSolveTransposeAdd(), MatLUFactor(), MatCholeskyFactor()
3251: @*/
3252: PetscErrorCode MatMatSolve(Mat A,Mat B,Mat X)
3253: {
3263: if (X == B) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_IDN,"X and B must be different matrices");
3264: if (!A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3265: if (A->cmap->N != X->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat X: global dim %D %D",A->cmap->N,X->rmap->N);
3266: if (A->rmap->N != B->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat B: global dim %D %D",A->rmap->N,B->rmap->N);
3267: if (A->rmap->n != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat A,Mat B: local dim %D %D",A->rmap->n,B->rmap->n);
3268: if (X->cmap->N < B->cmap->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Solution matrix must have same number of columns as rhs matrix");
3269: if (!A->rmap->N && !A->cmap->N) return(0);
3270: MatCheckPreallocated(A,1);
3272: PetscLogEventBegin(MAT_MatSolve,A,B,X,0);
3273: if (!A->ops->matsolve) {
3274: PetscInfo1(A,"Mat type %s using basic MatMatSolve\n",((PetscObject)A)->type_name);
3275: MatMatSolve_Basic(A,B,X);
3276: } else {
3277: (*A->ops->matsolve)(A,B,X);
3278: }
3279: PetscLogEventEnd(MAT_MatSolve,A,B,X,0);
3280: PetscObjectStateIncrease((PetscObject)X);
3281: return(0);
3282: }
3287: /*@
3288: MatForwardSolve - Solves L x = b, given a factored matrix, A = LU, or
3289: U^T*D^(1/2) x = b, given a factored symmetric matrix, A = U^T*D*U,
3291: Neighbor-wise Collective on Mat and Vec
3293: Input Parameters:
3294: + mat - the factored matrix
3295: - b - the right-hand-side vector
3297: Output Parameter:
3298: . x - the result vector
3300: Notes:
3301: MatSolve() should be used for most applications, as it performs
3302: a forward solve followed by a backward solve.
3304: The vectors b and x cannot be the same, i.e., one cannot
3305: call MatForwardSolve(A,x,x).
3307: For matrix in seqsbaij format with block size larger than 1,
3308: the diagonal blocks are not implemented as D = D^(1/2) * D^(1/2) yet.
3309: MatForwardSolve() solves U^T*D y = b, and
3310: MatBackwardSolve() solves U x = y.
3311: Thus they do not provide a symmetric preconditioner.
3313: Most users should employ the simplified KSP interface for linear solvers
3314: instead of working directly with matrix algebra routines such as this.
3315: See, e.g., KSPCreate().
3317: Level: developer
3319: Concepts: matrices^forward solves
3321: .seealso: MatSolve(), MatBackwardSolve()
3322: @*/
3323: PetscErrorCode MatForwardSolve(Mat mat,Vec b,Vec x)
3324: {
3334: if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3335: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3336: if (!mat->ops->forwardsolve) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3337: if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3338: if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3339: if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3340: MatCheckPreallocated(mat,1);
3341: PetscLogEventBegin(MAT_ForwardSolve,mat,b,x,0);
3342: (*mat->ops->forwardsolve)(mat,b,x);
3343: PetscLogEventEnd(MAT_ForwardSolve,mat,b,x,0);
3344: PetscObjectStateIncrease((PetscObject)x);
3345: return(0);
3346: }
3350: /*@
3351: MatBackwardSolve - Solves U x = b, given a factored matrix, A = LU.
3352: D^(1/2) U x = b, given a factored symmetric matrix, A = U^T*D*U,
3354: Neighbor-wise Collective on Mat and Vec
3356: Input Parameters:
3357: + mat - the factored matrix
3358: - b - the right-hand-side vector
3360: Output Parameter:
3361: . x - the result vector
3363: Notes:
3364: MatSolve() should be used for most applications, as it performs
3365: a forward solve followed by a backward solve.
3367: The vectors b and x cannot be the same. I.e., one cannot
3368: call MatBackwardSolve(A,x,x).
3370: For matrix in seqsbaij format with block size larger than 1,
3371: the diagonal blocks are not implemented as D = D^(1/2) * D^(1/2) yet.
3372: MatForwardSolve() solves U^T*D y = b, and
3373: MatBackwardSolve() solves U x = y.
3374: Thus they do not provide a symmetric preconditioner.
3376: Most users should employ the simplified KSP interface for linear solvers
3377: instead of working directly with matrix algebra routines such as this.
3378: See, e.g., KSPCreate().
3380: Level: developer
3382: Concepts: matrices^backward solves
3384: .seealso: MatSolve(), MatForwardSolve()
3385: @*/
3386: PetscErrorCode MatBackwardSolve(Mat mat,Vec b,Vec x)
3387: {
3397: if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3398: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3399: if (!mat->ops->backwardsolve) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3400: if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3401: if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3402: if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3403: MatCheckPreallocated(mat,1);
3405: PetscLogEventBegin(MAT_BackwardSolve,mat,b,x,0);
3406: (*mat->ops->backwardsolve)(mat,b,x);
3407: PetscLogEventEnd(MAT_BackwardSolve,mat,b,x,0);
3408: PetscObjectStateIncrease((PetscObject)x);
3409: return(0);
3410: }
3414: /*@
3415: MatSolveAdd - Computes x = y + inv(A)*b, given a factored matrix.
3417: Neighbor-wise Collective on Mat and Vec
3419: Input Parameters:
3420: + mat - the factored matrix
3421: . b - the right-hand-side vector
3422: - y - the vector to be added to
3424: Output Parameter:
3425: . x - the result vector
3427: Notes:
3428: The vectors b and x cannot be the same. I.e., one cannot
3429: call MatSolveAdd(A,x,y,x).
3431: Most users should employ the simplified KSP interface for linear solvers
3432: instead of working directly with matrix algebra routines such as this.
3433: See, e.g., KSPCreate().
3435: Level: developer
3437: Concepts: matrices^triangular solves
3439: .seealso: MatSolve(), MatSolveTranspose(), MatSolveTransposeAdd()
3440: @*/
3441: PetscErrorCode MatSolveAdd(Mat mat,Vec b,Vec y,Vec x)
3442: {
3443: PetscScalar one = 1.0;
3444: Vec tmp;
3456: if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3457: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3458: if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3459: if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3460: if (mat->rmap->N != y->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->rmap->N,y->map->N);
3461: if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3462: if (x->map->n != y->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Vec x,Vec y: local dim %D %D",x->map->n,y->map->n);
3463: MatCheckPreallocated(mat,1);
3465: PetscLogEventBegin(MAT_SolveAdd,mat,b,x,y);
3466: if (mat->ops->solveadd) {
3467: (*mat->ops->solveadd)(mat,b,y,x);
3468: } else {
3469: /* do the solve then the add manually */
3470: if (x != y) {
3471: MatSolve(mat,b,x);
3472: VecAXPY(x,one,y);
3473: } else {
3474: VecDuplicate(x,&tmp);
3475: PetscLogObjectParent((PetscObject)mat,(PetscObject)tmp);
3476: VecCopy(x,tmp);
3477: MatSolve(mat,b,x);
3478: VecAXPY(x,one,tmp);
3479: VecDestroy(&tmp);
3480: }
3481: }
3482: PetscLogEventEnd(MAT_SolveAdd,mat,b,x,y);
3483: PetscObjectStateIncrease((PetscObject)x);
3484: return(0);
3485: }
3489: /*@
3490: MatSolveTranspose - Solves A' x = b, given a factored matrix.
3492: Neighbor-wise Collective on Mat and Vec
3494: Input Parameters:
3495: + mat - the factored matrix
3496: - b - the right-hand-side vector
3498: Output Parameter:
3499: . x - the result vector
3501: Notes:
3502: The vectors b and x cannot be the same. I.e., one cannot
3503: call MatSolveTranspose(A,x,x).
3505: Most users should employ the simplified KSP interface for linear solvers
3506: instead of working directly with matrix algebra routines such as this.
3507: See, e.g., KSPCreate().
3509: Level: developer
3511: Concepts: matrices^triangular solves
3513: .seealso: MatSolve(), MatSolveAdd(), MatSolveTransposeAdd()
3514: @*/
3515: PetscErrorCode MatSolveTranspose(Mat mat,Vec b,Vec x)
3516: {
3526: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3527: if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3528: if (!mat->ops->solvetranspose) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Matrix type %s",((PetscObject)mat)->type_name);
3529: if (mat->rmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->rmap->N,x->map->N);
3530: if (mat->cmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->cmap->N,b->map->N);
3531: MatCheckPreallocated(mat,1);
3532: PetscLogEventBegin(MAT_SolveTranspose,mat,b,x,0);
3533: (*mat->ops->solvetranspose)(mat,b,x);
3534: PetscLogEventEnd(MAT_SolveTranspose,mat,b,x,0);
3535: PetscObjectStateIncrease((PetscObject)x);
3536: return(0);
3537: }
3541: /*@
3542: MatSolveTransposeAdd - Computes x = y + inv(Transpose(A)) b, given a
3543: factored matrix.
3545: Neighbor-wise Collective on Mat and Vec
3547: Input Parameters:
3548: + mat - the factored matrix
3549: . b - the right-hand-side vector
3550: - y - the vector to be added to
3552: Output Parameter:
3553: . x - the result vector
3555: Notes:
3556: The vectors b and x cannot be the same. I.e., one cannot
3557: call MatSolveTransposeAdd(A,x,y,x).
3559: Most users should employ the simplified KSP interface for linear solvers
3560: instead of working directly with matrix algebra routines such as this.
3561: See, e.g., KSPCreate().
3563: Level: developer
3565: Concepts: matrices^triangular solves
3567: .seealso: MatSolve(), MatSolveAdd(), MatSolveTranspose()
3568: @*/
3569: PetscErrorCode MatSolveTransposeAdd(Mat mat,Vec b,Vec y,Vec x)
3570: {
3571: PetscScalar one = 1.0;
3573: Vec tmp;
3584: if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3585: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3586: if (mat->rmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->rmap->N,x->map->N);
3587: if (mat->cmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->cmap->N,b->map->N);
3588: if (mat->cmap->N != y->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->cmap->N,y->map->N);
3589: if (x->map->n != y->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Vec x,Vec y: local dim %D %D",x->map->n,y->map->n);
3590: MatCheckPreallocated(mat,1);
3592: PetscLogEventBegin(MAT_SolveTransposeAdd,mat,b,x,y);
3593: if (mat->ops->solvetransposeadd) {
3594: (*mat->ops->solvetransposeadd)(mat,b,y,x);
3595: } else {
3596: /* do the solve then the add manually */
3597: if (x != y) {
3598: MatSolveTranspose(mat,b,x);
3599: VecAXPY(x,one,y);
3600: } else {
3601: VecDuplicate(x,&tmp);
3602: PetscLogObjectParent((PetscObject)mat,(PetscObject)tmp);
3603: VecCopy(x,tmp);
3604: MatSolveTranspose(mat,b,x);
3605: VecAXPY(x,one,tmp);
3606: VecDestroy(&tmp);
3607: }
3608: }
3609: PetscLogEventEnd(MAT_SolveTransposeAdd,mat,b,x,y);
3610: PetscObjectStateIncrease((PetscObject)x);
3611: return(0);
3612: }
3613: /* ----------------------------------------------------------------*/
3617: /*@
3618: MatSOR - Computes relaxation (SOR, Gauss-Seidel) sweeps.
3620: Neighbor-wise Collective on Mat and Vec
3622: Input Parameters:
3623: + mat - the matrix
3624: . b - the right hand side
3625: . omega - the relaxation factor
3626: . flag - flag indicating the type of SOR (see below)
3627: . shift - diagonal shift
3628: . its - the number of iterations
3629: - lits - the number of local iterations
3631: Output Parameters:
3632: . x - the solution (can contain an initial guess, use option SOR_ZERO_INITIAL_GUESS to indicate no guess)
3634: SOR Flags:
3635: . SOR_FORWARD_SWEEP - forward SOR
3636: . SOR_BACKWARD_SWEEP - backward SOR
3637: . SOR_SYMMETRIC_SWEEP - SSOR (symmetric SOR)
3638: . SOR_LOCAL_FORWARD_SWEEP - local forward SOR
3639: . SOR_LOCAL_BACKWARD_SWEEP - local forward SOR
3640: . SOR_LOCAL_SYMMETRIC_SWEEP - local SSOR
3641: . SOR_APPLY_UPPER, SOR_APPLY_LOWER - applies
3642: upper/lower triangular part of matrix to
3643: vector (with omega)
3644: . SOR_ZERO_INITIAL_GUESS - zero initial guess
3646: Notes:
3647: SOR_LOCAL_FORWARD_SWEEP, SOR_LOCAL_BACKWARD_SWEEP, and
3648: SOR_LOCAL_SYMMETRIC_SWEEP perform separate independent smoothings
3649: on each processor.
3651: Application programmers will not generally use MatSOR() directly,
3652: but instead will employ the KSP/PC interface.
3654: Notes: for BAIJ, SBAIJ, and AIJ matrices with Inodes this does a block SOR smoothing, otherwise it does a pointwise smoothing
3656: Notes for Advanced Users:
3657: The flags are implemented as bitwise inclusive or operations.
3658: For example, use (SOR_ZERO_INITIAL_GUESS | SOR_SYMMETRIC_SWEEP)
3659: to specify a zero initial guess for SSOR.
3661: Most users should employ the simplified KSP interface for linear solvers
3662: instead of working directly with matrix algebra routines such as this.
3663: See, e.g., KSPCreate().
3665: Vectors x and b CANNOT be the same
3667: Developer Note: We should add block SOR support for AIJ matrices with block size set to great than one and no inodes
3669: Level: developer
3671: Concepts: matrices^relaxation
3672: Concepts: matrices^SOR
3673: Concepts: matrices^Gauss-Seidel
3675: @*/
3676: PetscErrorCode MatSOR(Mat mat,Vec b,PetscReal omega,MatSORType flag,PetscReal shift,PetscInt its,PetscInt lits,Vec x)
3677: {
3687: if (!mat->ops->sor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3688: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3689: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3690: if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3691: if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3692: if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3693: if (its <= 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Relaxation requires global its %D positive",its);
3694: if (lits <= 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Relaxation requires local its %D positive",lits);
3695: if (b == x) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_IDN,"b and x vector cannot be the same");
3697: MatCheckPreallocated(mat,1);
3698: PetscLogEventBegin(MAT_SOR,mat,b,x,0);
3699: ierr =(*mat->ops->sor)(mat,b,omega,flag,shift,its,lits,x);
3700: PetscLogEventEnd(MAT_SOR,mat,b,x,0);
3701: PetscObjectStateIncrease((PetscObject)x);
3702: return(0);
3703: }
3707: /*
3708: Default matrix copy routine.
3709: */
3710: PetscErrorCode MatCopy_Basic(Mat A,Mat B,MatStructure str)
3711: {
3712: PetscErrorCode ierr;
3713: PetscInt i,rstart = 0,rend = 0,nz;
3714: const PetscInt *cwork;
3715: const PetscScalar *vwork;
3718: if (B->assembled) {
3719: MatZeroEntries(B);
3720: }
3721: MatGetOwnershipRange(A,&rstart,&rend);
3722: for (i=rstart; i<rend; i++) {
3723: MatGetRow(A,i,&nz,&cwork,&vwork);
3724: MatSetValues(B,1,&i,nz,cwork,vwork,INSERT_VALUES);
3725: MatRestoreRow(A,i,&nz,&cwork,&vwork);
3726: }
3727: MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
3728: MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
3729: PetscObjectStateIncrease((PetscObject)B);
3730: return(0);
3731: }
3735: /*@
3736: MatCopy - Copys a matrix to another matrix.
3738: Collective on Mat
3740: Input Parameters:
3741: + A - the matrix
3742: - str - SAME_NONZERO_PATTERN or DIFFERENT_NONZERO_PATTERN
3744: Output Parameter:
3745: . B - where the copy is put
3747: Notes:
3748: If you use SAME_NONZERO_PATTERN then the two matrices had better have the
3749: same nonzero pattern or the routine will crash.
3751: MatCopy() copies the matrix entries of a matrix to another existing
3752: matrix (after first zeroing the second matrix). A related routine is
3753: MatConvert(), which first creates a new matrix and then copies the data.
3755: Level: intermediate
3757: Concepts: matrices^copying
3759: .seealso: MatConvert(), MatDuplicate()
3761: @*/
3762: PetscErrorCode MatCopy(Mat A,Mat B,MatStructure str)
3763: {
3765: PetscInt i;
3773: MatCheckPreallocated(B,2);
3774: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3775: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3776: if (A->rmap->N != B->rmap->N || A->cmap->N != B->cmap->N) SETERRQ4(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat B: global dim (%D,%D) (%D,%D)",A->rmap->N,B->rmap->N,A->cmap->N,B->cmap->N);
3777: MatCheckPreallocated(A,1);
3779: PetscLogEventBegin(MAT_Copy,A,B,0,0);
3780: if (A->ops->copy) {
3781: (*A->ops->copy)(A,B,str);
3782: } else { /* generic conversion */
3783: MatCopy_Basic(A,B,str);
3784: }
3786: B->stencil.dim = A->stencil.dim;
3787: B->stencil.noc = A->stencil.noc;
3788: for (i=0; i<=A->stencil.dim; i++) {
3789: B->stencil.dims[i] = A->stencil.dims[i];
3790: B->stencil.starts[i] = A->stencil.starts[i];
3791: }
3793: PetscLogEventEnd(MAT_Copy,A,B,0,0);
3794: PetscObjectStateIncrease((PetscObject)B);
3795: return(0);
3796: }
3800: /*@C
3801: MatConvert - Converts a matrix to another matrix, either of the same
3802: or different type.
3804: Collective on Mat
3806: Input Parameters:
3807: + mat - the matrix
3808: . newtype - new matrix type. Use MATSAME to create a new matrix of the
3809: same type as the original matrix.
3810: - reuse - denotes if the destination matrix is to be created or reused. Currently
3811: MAT_REUSE_MATRIX is only supported for inplace conversion, otherwise use
3812: MAT_INITIAL_MATRIX.
3814: Output Parameter:
3815: . M - pointer to place new matrix
3817: Notes:
3818: MatConvert() first creates a new matrix and then copies the data from
3819: the first matrix. A related routine is MatCopy(), which copies the matrix
3820: entries of one matrix to another already existing matrix context.
3822: Cannot be used to convert a sequential matrix to parallel or parallel to sequential,
3823: the MPI communicator of the generated matrix is always the same as the communicator
3824: of the input matrix.
3826: Level: intermediate
3828: Concepts: matrices^converting between storage formats
3830: .seealso: MatCopy(), MatDuplicate()
3831: @*/
3832: PetscErrorCode MatConvert(Mat mat, MatType newtype,MatReuse reuse,Mat *M)
3833: {
3835: PetscBool sametype,issame,flg;
3836: char convname[256],mtype[256];
3837: Mat B;
3843: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3844: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3845: MatCheckPreallocated(mat,1);
3846: MatSetOption(mat,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_FALSE);
3848: PetscOptionsGetString(((PetscObject)mat)->prefix,"-matconvert_type",mtype,256,&flg);
3849: if (flg) {
3850: newtype = mtype;
3851: }
3852: PetscObjectTypeCompare((PetscObject)mat,newtype,&sametype);
3853: PetscStrcmp(newtype,"same",&issame);
3854: if ((reuse == MAT_REUSE_MATRIX) && (mat != *M)) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"MAT_REUSE_MATRIX only supported for in-place conversion currently");
3856: if ((reuse == MAT_REUSE_MATRIX) && (issame || sametype)) return(0);
3858: if ((sametype || issame) && (reuse==MAT_INITIAL_MATRIX) && mat->ops->duplicate) {
3859: (*mat->ops->duplicate)(mat,MAT_COPY_VALUES,M);
3860: } else {
3861: PetscErrorCode (*conv)(Mat, MatType,MatReuse,Mat*)=NULL;
3862: const char *prefix[3] = {"seq","mpi",""};
3863: PetscInt i;
3864: /*
3865: Order of precedence:
3866: 1) See if a specialized converter is known to the current matrix.
3867: 2) See if a specialized converter is known to the desired matrix class.
3868: 3) See if a good general converter is registered for the desired class
3869: (as of 6/27/03 only MATMPIADJ falls into this category).
3870: 4) See if a good general converter is known for the current matrix.
3871: 5) Use a really basic converter.
3872: */
3874: /* 1) See if a specialized converter is known to the current matrix and the desired class */
3875: for (i=0; i<3; i++) {
3876: PetscStrcpy(convname,"MatConvert_");
3877: PetscStrcat(convname,((PetscObject)mat)->type_name);
3878: PetscStrcat(convname,"_");
3879: PetscStrcat(convname,prefix[i]);
3880: PetscStrcat(convname,issame ? ((PetscObject)mat)->type_name : newtype);
3881: PetscStrcat(convname,"_C");
3882: PetscObjectQueryFunction((PetscObject)mat,convname,&conv);
3883: if (conv) goto foundconv;
3884: }
3886: /* 2) See if a specialized converter is known to the desired matrix class. */
3887: MatCreate(PetscObjectComm((PetscObject)mat),&B);
3888: MatSetSizes(B,mat->rmap->n,mat->cmap->n,mat->rmap->N,mat->cmap->N);
3889: MatSetType(B,newtype);
3890: for (i=0; i<3; i++) {
3891: PetscStrcpy(convname,"MatConvert_");
3892: PetscStrcat(convname,((PetscObject)mat)->type_name);
3893: PetscStrcat(convname,"_");
3894: PetscStrcat(convname,prefix[i]);
3895: PetscStrcat(convname,newtype);
3896: PetscStrcat(convname,"_C");
3897: PetscObjectQueryFunction((PetscObject)B,convname,&conv);
3898: if (conv) {
3899: MatDestroy(&B);
3900: goto foundconv;
3901: }
3902: }
3904: /* 3) See if a good general converter is registered for the desired class */
3905: conv = B->ops->convertfrom;
3906: MatDestroy(&B);
3907: if (conv) goto foundconv;
3909: /* 4) See if a good general converter is known for the current matrix */
3910: if (mat->ops->convert) {
3911: conv = mat->ops->convert;
3912: }
3913: if (conv) goto foundconv;
3915: /* 5) Use a really basic converter. */
3916: conv = MatConvert_Basic;
3918: foundconv:
3919: PetscLogEventBegin(MAT_Convert,mat,0,0,0);
3920: (*conv)(mat,newtype,reuse,M);
3921: PetscLogEventEnd(MAT_Convert,mat,0,0,0);
3922: }
3923: PetscObjectStateIncrease((PetscObject)*M);
3925: /* Copy Mat options */
3926: if (mat->symmetric) {MatSetOption(*M,MAT_SYMMETRIC,PETSC_TRUE);}
3927: if (mat->hermitian) {MatSetOption(*M,MAT_HERMITIAN,PETSC_TRUE);}
3928: return(0);
3929: }
3933: /*@C
3934: MatFactorGetSolverPackage - Returns name of the package providing the factorization routines
3936: Not Collective
3938: Input Parameter:
3939: . mat - the matrix, must be a factored matrix
3941: Output Parameter:
3942: . type - the string name of the package (do not free this string)
3944: Notes:
3945: In Fortran you pass in a empty string and the package name will be copied into it.
3946: (Make sure the string is long enough)
3948: Level: intermediate
3950: .seealso: MatCopy(), MatDuplicate(), MatGetFactorAvailable(), MatGetFactor()
3951: @*/
3952: PetscErrorCode MatFactorGetSolverPackage(Mat mat, const MatSolverPackage *type)
3953: {
3954: PetscErrorCode ierr, (*conv)(Mat,const MatSolverPackage*);
3959: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Only for factored matrix");
3960: PetscObjectQueryFunction((PetscObject)mat,"MatFactorGetSolverPackage_C",&conv);
3961: if (!conv) {
3962: *type = MATSOLVERPETSC;
3963: } else {
3964: (*conv)(mat,type);
3965: }
3966: return(0);
3967: }
3969: typedef struct _MatSolverPackageForSpecifcType* MatSolverPackageForSpecifcType;
3970: struct _MatSolverPackageForSpecifcType {
3971: MatType mtype;
3972: PetscErrorCode (*getfactor[4])(Mat,MatFactorType,Mat*);
3973: MatSolverPackageForSpecifcType next;
3974: };
3976: typedef struct _MatSolverPackageHolder* MatSolverPackageHolder;
3977: struct _MatSolverPackageHolder {
3978: char *name;
3979: MatSolverPackageForSpecifcType handlers;
3980: MatSolverPackageHolder next;
3981: };
3983: static MatSolverPackageHolder MatSolverPackageHolders = NULL;
3987: /*@C
3988: MatSolvePackageRegister - Registers a MatSolverPackage that works for a particular matrix type
3990: Input Parameters:
3991: + package - name of the package, for example petsc or superlu
3992: . mtype - the matrix type that works with this package
3993: . ftype - the type of factorization supported by the package
3994: - getfactor - routine that will create the factored matrix ready to be used
3996: Level: intermediate
3998: .seealso: MatCopy(), MatDuplicate(), MatGetFactorAvailable()
3999: @*/
4000: PetscErrorCode MatSolverPackageRegister(const MatSolverPackage package,const MatType mtype,MatFactorType ftype,PetscErrorCode (*getfactor)(Mat,MatFactorType,Mat*))
4001: {
4002: PetscErrorCode ierr;
4003: MatSolverPackageHolder next = MatSolverPackageHolders,prev;
4004: PetscBool flg;
4005: MatSolverPackageForSpecifcType inext,iprev = NULL;
4008: if (!MatSolverPackageHolders) {
4009: PetscNew(&MatSolverPackageHolders);
4010: PetscStrallocpy(package,&MatSolverPackageHolders->name);
4011: PetscNew(&MatSolverPackageHolders->handlers);
4012: PetscStrallocpy(mtype,(char **)&MatSolverPackageHolders->handlers->mtype);
4013: MatSolverPackageHolders->handlers->getfactor[(int)ftype-1] = getfactor;
4014: return(0);
4015: }
4016: while (next) {
4017: PetscStrcasecmp(package,next->name,&flg);
4018: if (flg) {
4019: inext = next->handlers;
4020: while (inext) {
4021: PetscStrcasecmp(mtype,inext->mtype,&flg);
4022: if (flg) {
4023: inext->getfactor[(int)ftype-1] = getfactor;
4024: return(0);
4025: }
4026: iprev = inext;
4027: inext = inext->next;
4028: }
4029: PetscNew(&iprev->next);
4030: PetscStrallocpy(mtype,(char **)&iprev->next->mtype);
4031: iprev->next->getfactor[(int)ftype-1] = getfactor;
4032: return(0);
4033: }
4034: prev = next;
4035: next = next->next;
4036: }
4037: PetscNew(&prev->next);
4038: PetscStrallocpy(package,&prev->next->name);
4039: PetscNew(&prev->next->handlers);
4040: PetscStrallocpy(mtype,(char **)&prev->next->handlers->mtype);
4041: prev->next->handlers->getfactor[(int)ftype-1] = getfactor;
4042: return(0);
4043: }
4047: /*@C
4048: MatSolvePackageGet - Get's the function that creates the factor matrix if it exist
4050: Input Parameters:
4051: + package - name of the package, for example petsc or superlu
4052: . ftype - the type of factorization supported by the package
4053: - mtype - the matrix type that works with this package
4055: Output Parameters:
4056: + foundpackage - PETSC_TRUE if the package was registered
4057: . foundmtype - PETSC_TRUE if the package supports the requested mtype
4058: - getfactor - routine that will create the factored matrix ready to be used or NULL if not found
4060: Level: intermediate
4062: .seealso: MatCopy(), MatDuplicate(), MatGetFactorAvailable()
4063: @*/
4064: PetscErrorCode MatSolverPackageGet(const MatSolverPackage package,const MatType mtype,MatFactorType ftype,PetscBool *foundpackage,PetscBool *foundmtype,PetscErrorCode (**getfactor)(Mat,MatFactorType,Mat*))
4065: {
4066: PetscErrorCode ierr;
4067: MatSolverPackageHolder next = MatSolverPackageHolders;
4068: PetscBool flg;
4069: MatSolverPackageForSpecifcType inext;
4072: if (foundpackage) *foundpackage = PETSC_FALSE;
4073: if (foundmtype) *foundmtype = PETSC_FALSE;
4074: if (getfactor) *getfactor = NULL;
4075: while (next) {
4076: PetscStrcasecmp(package,next->name,&flg);
4077: if (flg) {
4078: if (foundpackage) *foundpackage = PETSC_TRUE;
4079: inext = next->handlers;
4080: while (inext) {
4081: PetscStrcasecmp(mtype,inext->mtype,&flg);
4082: if (flg) {
4083: if (foundmtype) *foundmtype = PETSC_TRUE;
4084: if (getfactor) *getfactor = inext->getfactor[(int)ftype-1];
4085: return(0);
4086: }
4087: inext = inext->next;
4088: }
4089: }
4090: next = next->next;
4091: }
4092: return(0);
4093: }
4097: PetscErrorCode MatSolverPackageDestroy(void)
4098: {
4099: PetscErrorCode ierr;
4100: MatSolverPackageHolder next = MatSolverPackageHolders,prev;
4101: MatSolverPackageForSpecifcType inext,iprev;
4104: while (next) {
4105: PetscFree(next->name);
4106: inext = next->handlers;
4107: while (inext) {
4108: PetscFree(inext->mtype);
4109: iprev = inext;
4110: inext = inext->next;
4111: PetscFree(iprev);
4112: }
4113: prev = next;
4114: next = next->next;
4115: PetscFree(prev);
4116: }
4117: MatSolverPackageHolders = NULL;
4118: return(0);
4119: }
4123: /*@C
4124: MatGetFactor - Returns a matrix suitable to calls to MatXXFactorSymbolic()
4126: Collective on Mat
4128: Input Parameters:
4129: + mat - the matrix
4130: . type - name of solver type, for example, superlu, petsc (to use PETSc's default)
4131: - ftype - factor type, MAT_FACTOR_LU, MAT_FACTOR_CHOLESKY, MAT_FACTOR_ICC, MAT_FACTOR_ILU,
4133: Output Parameters:
4134: . f - the factor matrix used with MatXXFactorSymbolic() calls
4136: Notes:
4137: Some PETSc matrix formats have alternative solvers available that are contained in alternative packages
4138: such as pastix, superlu, mumps etc.
4140: PETSc must have been ./configure to use the external solver, using the option --download-package
4142: Level: intermediate
4144: .seealso: MatCopy(), MatDuplicate(), MatGetFactorAvailable()
4145: @*/
4146: PetscErrorCode MatGetFactor(Mat mat, const MatSolverPackage type,MatFactorType ftype,Mat *f)
4147: {
4148: PetscErrorCode ierr,(*conv)(Mat,MatFactorType,Mat*);
4149: PetscBool foundpackage,foundmtype;
4155: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4156: MatCheckPreallocated(mat,1);
4158: MatSolverPackageGet(type,((PetscObject)mat)->type_name,ftype,&foundpackage,&foundmtype,&conv);
4159: if (!foundpackage) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_MISSING_FACTOR,"Could not locate solver package %s. Perhaps you must ./configure with --download-%s",type,type);
4160: if (!foundmtype) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_MISSING_FACTOR,"MatSolverPackage %s does not support matrix type %s",type,((PetscObject)mat)->type_name);
4161: if (!conv) SETERRQ3(PetscObjectComm((PetscObject)mat),PETSC_ERR_MISSING_FACTOR,"MatSolverPackage %s does not support factorization type %s for matrix type %s",type,MatFactorTypes[ftype],((PetscObject)mat)->type_name);
4163: (*conv)(mat,ftype,f);
4164: return(0);
4165: }
4169: /*@C
4170: MatGetFactorAvailable - Returns a a flag if matrix supports particular package and factor type
4172: Not Collective
4174: Input Parameters:
4175: + mat - the matrix
4176: . type - name of solver type, for example, superlu, petsc (to use PETSc's default)
4177: - ftype - factor type, MAT_FACTOR_LU, MAT_FACTOR_CHOLESKY, MAT_FACTOR_ICC, MAT_FACTOR_ILU,
4179: Output Parameter:
4180: . flg - PETSC_TRUE if the factorization is available
4182: Notes:
4183: Some PETSc matrix formats have alternative solvers available that are contained in alternative packages
4184: such as pastix, superlu, mumps etc.
4186: PETSc must have been ./configure to use the external solver, using the option --download-package
4188: Level: intermediate
4190: .seealso: MatCopy(), MatDuplicate(), MatGetFactor()
4191: @*/
4192: PetscErrorCode MatGetFactorAvailable(Mat mat, const MatSolverPackage type,MatFactorType ftype,PetscBool *flg)
4193: {
4194: PetscErrorCode ierr, (*gconv)(Mat,MatFactorType,Mat*);
4200: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4201: MatCheckPreallocated(mat,1);
4203: *flg = PETSC_FALSE;
4204: MatSolverPackageGet(type,((PetscObject)mat)->type_name,ftype,NULL,NULL,&gconv);
4205: if (gconv) {
4206: *flg = PETSC_TRUE;
4207: }
4208: return(0);
4209: }
4211: #include <petscdmtypes.h>
4215: /*@
4216: MatDuplicate - Duplicates a matrix including the non-zero structure.
4218: Collective on Mat
4220: Input Parameters:
4221: + mat - the matrix
4222: - op - either MAT_DO_NOT_COPY_VALUES or MAT_COPY_VALUES, cause it to copy the numerical values in the matrix
4223: MAT_SHARE_NONZERO_PATTERN to share the nonzero patterns with the previous matrix and not copy them.
4225: Output Parameter:
4226: . M - pointer to place new matrix
4228: Level: intermediate
4230: Concepts: matrices^duplicating
4232: Notes: You cannot change the nonzero pattern for the parent or child matrix if you use MAT_SHARE_NONZERO_PATTERN.
4234: .seealso: MatCopy(), MatConvert()
4235: @*/
4236: PetscErrorCode MatDuplicate(Mat mat,MatDuplicateOption op,Mat *M)
4237: {
4239: Mat B;
4240: PetscInt i;
4241: DM dm;
4247: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4248: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4249: MatCheckPreallocated(mat,1);
4251: *M = 0;
4252: if (!mat->ops->duplicate) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Not written for this matrix type");
4253: PetscLogEventBegin(MAT_Convert,mat,0,0,0);
4254: (*mat->ops->duplicate)(mat,op,M);
4255: B = *M;
4257: B->stencil.dim = mat->stencil.dim;
4258: B->stencil.noc = mat->stencil.noc;
4259: for (i=0; i<=mat->stencil.dim; i++) {
4260: B->stencil.dims[i] = mat->stencil.dims[i];
4261: B->stencil.starts[i] = mat->stencil.starts[i];
4262: }
4264: B->nooffproczerorows = mat->nooffproczerorows;
4265: B->nooffprocentries = mat->nooffprocentries;
4267: PetscObjectQuery((PetscObject) mat, "__PETSc_dm", (PetscObject*) &dm);
4268: if (dm) {
4269: PetscObjectCompose((PetscObject) B, "__PETSc_dm", (PetscObject) dm);
4270: }
4271: PetscLogEventEnd(MAT_Convert,mat,0,0,0);
4272: PetscObjectStateIncrease((PetscObject)B);
4273: return(0);
4274: }
4278: /*@
4279: MatGetDiagonal - Gets the diagonal of a matrix.
4281: Logically Collective on Mat and Vec
4283: Input Parameters:
4284: + mat - the matrix
4285: - v - the vector for storing the diagonal
4287: Output Parameter:
4288: . v - the diagonal of the matrix
4290: Level: intermediate
4292: Note:
4293: Currently only correct in parallel for square matrices.
4295: Concepts: matrices^accessing diagonals
4297: .seealso: MatGetRow(), MatGetSubMatrices(), MatGetSubmatrix(), MatGetRowMaxAbs()
4298: @*/
4299: PetscErrorCode MatGetDiagonal(Mat mat,Vec v)
4300: {
4307: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4308: if (!mat->ops->getdiagonal) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4309: MatCheckPreallocated(mat,1);
4311: (*mat->ops->getdiagonal)(mat,v);
4312: PetscObjectStateIncrease((PetscObject)v);
4313: return(0);
4314: }
4318: /*@C
4319: MatGetRowMin - Gets the minimum value (of the real part) of each
4320: row of the matrix
4322: Logically Collective on Mat and Vec
4324: Input Parameters:
4325: . mat - the matrix
4327: Output Parameter:
4328: + v - the vector for storing the maximums
4329: - idx - the indices of the column found for each row (optional)
4331: Level: intermediate
4333: Notes: The result of this call are the same as if one converted the matrix to dense format
4334: and found the minimum value in each row (i.e. the implicit zeros are counted as zeros).
4336: This code is only implemented for a couple of matrix formats.
4338: Concepts: matrices^getting row maximums
4340: .seealso: MatGetDiagonal(), MatGetSubMatrices(), MatGetSubmatrix(), MatGetRowMaxAbs(),
4341: MatGetRowMax()
4342: @*/
4343: PetscErrorCode MatGetRowMin(Mat mat,Vec v,PetscInt idx[])
4344: {
4351: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4352: if (!mat->ops->getrowmax) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4353: MatCheckPreallocated(mat,1);
4355: (*mat->ops->getrowmin)(mat,v,idx);
4356: PetscObjectStateIncrease((PetscObject)v);
4357: return(0);
4358: }
4362: /*@C
4363: MatGetRowMinAbs - Gets the minimum value (in absolute value) of each
4364: row of the matrix
4366: Logically Collective on Mat and Vec
4368: Input Parameters:
4369: . mat - the matrix
4371: Output Parameter:
4372: + v - the vector for storing the minimums
4373: - idx - the indices of the column found for each row (or NULL if not needed)
4375: Level: intermediate
4377: Notes: if a row is completely empty or has only 0.0 values then the idx[] value for that
4378: row is 0 (the first column).
4380: This code is only implemented for a couple of matrix formats.
4382: Concepts: matrices^getting row maximums
4384: .seealso: MatGetDiagonal(), MatGetSubMatrices(), MatGetSubmatrix(), MatGetRowMax(), MatGetRowMaxAbs(), MatGetRowMin()
4385: @*/
4386: PetscErrorCode MatGetRowMinAbs(Mat mat,Vec v,PetscInt idx[])
4387: {
4394: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4395: if (!mat->ops->getrowminabs) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4396: MatCheckPreallocated(mat,1);
4397: if (idx) {PetscMemzero(idx,mat->rmap->n*sizeof(PetscInt));}
4399: (*mat->ops->getrowminabs)(mat,v,idx);
4400: PetscObjectStateIncrease((PetscObject)v);
4401: return(0);
4402: }
4406: /*@C
4407: MatGetRowMax - Gets the maximum value (of the real part) of each
4408: row of the matrix
4410: Logically Collective on Mat and Vec
4412: Input Parameters:
4413: . mat - the matrix
4415: Output Parameter:
4416: + v - the vector for storing the maximums
4417: - idx - the indices of the column found for each row (optional)
4419: Level: intermediate
4421: Notes: The result of this call are the same as if one converted the matrix to dense format
4422: and found the minimum value in each row (i.e. the implicit zeros are counted as zeros).
4424: This code is only implemented for a couple of matrix formats.
4426: Concepts: matrices^getting row maximums
4428: .seealso: MatGetDiagonal(), MatGetSubMatrices(), MatGetSubmatrix(), MatGetRowMaxAbs(), MatGetRowMin()
4429: @*/
4430: PetscErrorCode MatGetRowMax(Mat mat,Vec v,PetscInt idx[])
4431: {
4438: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4439: if (!mat->ops->getrowmax) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4440: MatCheckPreallocated(mat,1);
4442: (*mat->ops->getrowmax)(mat,v,idx);
4443: PetscObjectStateIncrease((PetscObject)v);
4444: return(0);
4445: }
4449: /*@C
4450: MatGetRowMaxAbs - Gets the maximum value (in absolute value) of each
4451: row of the matrix
4453: Logically Collective on Mat and Vec
4455: Input Parameters:
4456: . mat - the matrix
4458: Output Parameter:
4459: + v - the vector for storing the maximums
4460: - idx - the indices of the column found for each row (or NULL if not needed)
4462: Level: intermediate
4464: Notes: if a row is completely empty or has only 0.0 values then the idx[] value for that
4465: row is 0 (the first column).
4467: This code is only implemented for a couple of matrix formats.
4469: Concepts: matrices^getting row maximums
4471: .seealso: MatGetDiagonal(), MatGetSubMatrices(), MatGetSubmatrix(), MatGetRowMax(), MatGetRowMin()
4472: @*/
4473: PetscErrorCode MatGetRowMaxAbs(Mat mat,Vec v,PetscInt idx[])
4474: {
4481: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4482: if (!mat->ops->getrowmaxabs) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4483: MatCheckPreallocated(mat,1);
4484: if (idx) {PetscMemzero(idx,mat->rmap->n*sizeof(PetscInt));}
4486: (*mat->ops->getrowmaxabs)(mat,v,idx);
4487: PetscObjectStateIncrease((PetscObject)v);
4488: return(0);
4489: }
4493: /*@
4494: MatGetRowSum - Gets the sum of each row of the matrix
4496: Logically Collective on Mat and Vec
4498: Input Parameters:
4499: . mat - the matrix
4501: Output Parameter:
4502: . v - the vector for storing the sum of rows
4504: Level: intermediate
4506: Notes: This code is slow since it is not currently specialized for different formats
4508: Concepts: matrices^getting row sums
4510: .seealso: MatGetDiagonal(), MatGetSubMatrices(), MatGetSubmatrix(), MatGetRowMax(), MatGetRowMin()
4511: @*/
4512: PetscErrorCode MatGetRowSum(Mat mat, Vec v)
4513: {
4514: PetscInt start = 0, end = 0, row;
4515: PetscScalar *array;
4522: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4523: MatCheckPreallocated(mat,1);
4524: MatGetOwnershipRange(mat, &start, &end);
4525: VecGetArray(v, &array);
4526: for (row = start; row < end; ++row) {
4527: PetscInt ncols, col;
4528: const PetscInt *cols;
4529: const PetscScalar *vals;
4531: array[row - start] = 0.0;
4533: MatGetRow(mat, row, &ncols, &cols, &vals);
4534: for (col = 0; col < ncols; col++) {
4535: array[row - start] += vals[col];
4536: }
4537: MatRestoreRow(mat, row, &ncols, &cols, &vals);
4538: }
4539: VecRestoreArray(v, &array);
4540: PetscObjectStateIncrease((PetscObject) v);
4541: return(0);
4542: }
4546: /*@
4547: MatTranspose - Computes an in-place or out-of-place transpose of a matrix.
4549: Collective on Mat
4551: Input Parameter:
4552: + mat - the matrix to transpose
4553: - reuse - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
4555: Output Parameters:
4556: . B - the transpose
4558: Notes:
4559: If you pass in &mat for B the transpose will be done in place, for example MatTranspose(mat,MAT_REUSE_MATRIX,&mat);
4561: Consider using MatCreateTranspose() instead if you only need a matrix that behaves like the transpose, but don't need the storage to be changed.
4563: Level: intermediate
4565: Concepts: matrices^transposing
4567: .seealso: MatMultTranspose(), MatMultTransposeAdd(), MatIsTranspose(), MatReuse
4568: @*/
4569: PetscErrorCode MatTranspose(Mat mat,MatReuse reuse,Mat *B)
4570: {
4576: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4577: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4578: if (!mat->ops->transpose) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4579: MatCheckPreallocated(mat,1);
4581: PetscLogEventBegin(MAT_Transpose,mat,0,0,0);
4582: (*mat->ops->transpose)(mat,reuse,B);
4583: PetscLogEventEnd(MAT_Transpose,mat,0,0,0);
4584: if (B) {PetscObjectStateIncrease((PetscObject)*B);}
4585: return(0);
4586: }
4590: /*@
4591: MatIsTranspose - Test whether a matrix is another one's transpose,
4592: or its own, in which case it tests symmetry.
4594: Collective on Mat
4596: Input Parameter:
4597: + A - the matrix to test
4598: - B - the matrix to test against, this can equal the first parameter
4600: Output Parameters:
4601: . flg - the result
4603: Notes:
4604: Only available for SeqAIJ/MPIAIJ matrices. The sequential algorithm
4605: has a running time of the order of the number of nonzeros; the parallel
4606: test involves parallel copies of the block-offdiagonal parts of the matrix.
4608: Level: intermediate
4610: Concepts: matrices^transposing, matrix^symmetry
4612: .seealso: MatTranspose(), MatIsSymmetric(), MatIsHermitian()
4613: @*/
4614: PetscErrorCode MatIsTranspose(Mat A,Mat B,PetscReal tol,PetscBool *flg)
4615: {
4616: PetscErrorCode ierr,(*f)(Mat,Mat,PetscReal,PetscBool*),(*g)(Mat,Mat,PetscReal,PetscBool*);
4622: PetscObjectQueryFunction((PetscObject)A,"MatIsTranspose_C",&f);
4623: PetscObjectQueryFunction((PetscObject)B,"MatIsTranspose_C",&g);
4624: *flg = PETSC_FALSE;
4625: if (f && g) {
4626: if (f == g) {
4627: (*f)(A,B,tol,flg);
4628: } else SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_NOTSAMETYPE,"Matrices do not have the same comparator for symmetry test");
4629: } else {
4630: MatType mattype;
4631: if (!f) {
4632: MatGetType(A,&mattype);
4633: } else {
4634: MatGetType(B,&mattype);
4635: }
4636: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for transpose",mattype);
4637: }
4638: return(0);
4639: }
4643: /*@
4644: MatHermitianTranspose - Computes an in-place or out-of-place transpose of a matrix in complex conjugate.
4646: Collective on Mat
4648: Input Parameter:
4649: + mat - the matrix to transpose and complex conjugate
4650: - reuse - store the transpose matrix in the provided B
4652: Output Parameters:
4653: . B - the Hermitian
4655: Notes:
4656: If you pass in &mat for B the Hermitian will be done in place
4658: Level: intermediate
4660: Concepts: matrices^transposing, complex conjugatex
4662: .seealso: MatTranspose(), MatMultTranspose(), MatMultTransposeAdd(), MatIsTranspose(), MatReuse
4663: @*/
4664: PetscErrorCode MatHermitianTranspose(Mat mat,MatReuse reuse,Mat *B)
4665: {
4669: MatTranspose(mat,reuse,B);
4670: #if defined(PETSC_USE_COMPLEX)
4671: MatConjugate(*B);
4672: #endif
4673: return(0);
4674: }
4678: /*@
4679: MatIsHermitianTranspose - Test whether a matrix is another one's Hermitian transpose,
4681: Collective on Mat
4683: Input Parameter:
4684: + A - the matrix to test
4685: - B - the matrix to test against, this can equal the first parameter
4687: Output Parameters:
4688: . flg - the result
4690: Notes:
4691: Only available for SeqAIJ/MPIAIJ matrices. The sequential algorithm
4692: has a running time of the order of the number of nonzeros; the parallel
4693: test involves parallel copies of the block-offdiagonal parts of the matrix.
4695: Level: intermediate
4697: Concepts: matrices^transposing, matrix^symmetry
4699: .seealso: MatTranspose(), MatIsSymmetric(), MatIsHermitian(), MatIsTranspose()
4700: @*/
4701: PetscErrorCode MatIsHermitianTranspose(Mat A,Mat B,PetscReal tol,PetscBool *flg)
4702: {
4703: PetscErrorCode ierr,(*f)(Mat,Mat,PetscReal,PetscBool*),(*g)(Mat,Mat,PetscReal,PetscBool*);
4709: PetscObjectQueryFunction((PetscObject)A,"MatIsHermitianTranspose_C",&f);
4710: PetscObjectQueryFunction((PetscObject)B,"MatIsHermitianTranspose_C",&g);
4711: if (f && g) {
4712: if (f==g) {
4713: (*f)(A,B,tol,flg);
4714: } else SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_NOTSAMETYPE,"Matrices do not have the same comparator for Hermitian test");
4715: }
4716: return(0);
4717: }
4721: /*@
4722: MatPermute - Creates a new matrix with rows and columns permuted from the
4723: original.
4725: Collective on Mat
4727: Input Parameters:
4728: + mat - the matrix to permute
4729: . row - row permutation, each processor supplies only the permutation for its rows
4730: - col - column permutation, each processor supplies only the permutation for its columns
4732: Output Parameters:
4733: . B - the permuted matrix
4735: Level: advanced
4737: Note:
4738: The index sets map from row/col of permuted matrix to row/col of original matrix.
4739: The index sets should be on the same communicator as Mat and have the same local sizes.
4741: Concepts: matrices^permuting
4743: .seealso: MatGetOrdering(), ISAllGather()
4745: @*/
4746: PetscErrorCode MatPermute(Mat mat,IS row,IS col,Mat *B)
4747: {
4756: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4757: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4758: if (!mat->ops->permute) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"MatPermute not available for Mat type %s",((PetscObject)mat)->type_name);
4759: MatCheckPreallocated(mat,1);
4761: (*mat->ops->permute)(mat,row,col,B);
4762: PetscObjectStateIncrease((PetscObject)*B);
4763: return(0);
4764: }
4768: /*@
4769: MatEqual - Compares two matrices.
4771: Collective on Mat
4773: Input Parameters:
4774: + A - the first matrix
4775: - B - the second matrix
4777: Output Parameter:
4778: . flg - PETSC_TRUE if the matrices are equal; PETSC_FALSE otherwise.
4780: Level: intermediate
4782: Concepts: matrices^equality between
4783: @*/
4784: PetscErrorCode MatEqual(Mat A,Mat B,PetscBool *flg)
4785: {
4795: MatCheckPreallocated(B,2);
4796: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4797: if (!B->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4798: if (A->rmap->N != B->rmap->N || A->cmap->N != B->cmap->N) SETERRQ4(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat B: global dim %D %D %D %D",A->rmap->N,B->rmap->N,A->cmap->N,B->cmap->N);
4799: if (!A->ops->equal) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Mat type %s",((PetscObject)A)->type_name);
4800: if (!B->ops->equal) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Mat type %s",((PetscObject)B)->type_name);
4801: if (A->ops->equal != B->ops->equal) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"A is type: %s\nB is type: %s",((PetscObject)A)->type_name,((PetscObject)B)->type_name);
4802: MatCheckPreallocated(A,1);
4804: (*A->ops->equal)(A,B,flg);
4805: return(0);
4806: }
4810: /*@
4811: MatDiagonalScale - Scales a matrix on the left and right by diagonal
4812: matrices that are stored as vectors. Either of the two scaling
4813: matrices can be NULL.
4815: Collective on Mat
4817: Input Parameters:
4818: + mat - the matrix to be scaled
4819: . l - the left scaling vector (or NULL)
4820: - r - the right scaling vector (or NULL)
4822: Notes:
4823: MatDiagonalScale() computes A = LAR, where
4824: L = a diagonal matrix (stored as a vector), R = a diagonal matrix (stored as a vector)
4825: The L scales the rows of the matrix, the R scales the columns of the matrix.
4827: Level: intermediate
4829: Concepts: matrices^diagonal scaling
4830: Concepts: diagonal scaling of matrices
4832: .seealso: MatScale()
4833: @*/
4834: PetscErrorCode MatDiagonalScale(Mat mat,Vec l,Vec r)
4835: {
4841: if (!mat->ops->diagonalscale) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4844: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4845: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4846: MatCheckPreallocated(mat,1);
4848: PetscLogEventBegin(MAT_Scale,mat,0,0,0);
4849: (*mat->ops->diagonalscale)(mat,l,r);
4850: PetscLogEventEnd(MAT_Scale,mat,0,0,0);
4851: PetscObjectStateIncrease((PetscObject)mat);
4852: #if defined(PETSC_HAVE_CUSP)
4853: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
4854: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
4855: }
4856: #endif
4857: #if defined(PETSC_HAVE_VIENNACL)
4858: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
4859: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
4860: }
4861: #endif
4862: return(0);
4863: }
4867: /*@
4868: MatScale - Scales all elements of a matrix by a given number.
4870: Logically Collective on Mat
4872: Input Parameters:
4873: + mat - the matrix to be scaled
4874: - a - the scaling value
4876: Output Parameter:
4877: . mat - the scaled matrix
4879: Level: intermediate
4881: Concepts: matrices^scaling all entries
4883: .seealso: MatDiagonalScale()
4884: @*/
4885: PetscErrorCode MatScale(Mat mat,PetscScalar a)
4886: {
4892: if (a != (PetscScalar)1.0 && !mat->ops->scale) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4893: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4894: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4896: MatCheckPreallocated(mat,1);
4898: PetscLogEventBegin(MAT_Scale,mat,0,0,0);
4899: if (a != (PetscScalar)1.0) {
4900: (*mat->ops->scale)(mat,a);
4901: PetscObjectStateIncrease((PetscObject)mat);
4902: }
4903: PetscLogEventEnd(MAT_Scale,mat,0,0,0);
4904: #if defined(PETSC_HAVE_CUSP)
4905: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
4906: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
4907: }
4908: #endif
4909: #if defined(PETSC_HAVE_VIENNACL)
4910: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
4911: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
4912: }
4913: #endif
4914: return(0);
4915: }
4919: /*@
4920: MatNorm - Calculates various norms of a matrix.
4922: Collective on Mat
4924: Input Parameters:
4925: + mat - the matrix
4926: - type - the type of norm, NORM_1, NORM_FROBENIUS, NORM_INFINITY
4928: Output Parameters:
4929: . nrm - the resulting norm
4931: Level: intermediate
4933: Concepts: matrices^norm
4934: Concepts: norm^of matrix
4935: @*/
4936: PetscErrorCode MatNorm(Mat mat,NormType type,PetscReal *nrm)
4937: {
4945: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4946: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4947: if (!mat->ops->norm) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4948: MatCheckPreallocated(mat,1);
4950: (*mat->ops->norm)(mat,type,nrm);
4951: return(0);
4952: }
4954: /*
4955: This variable is used to prevent counting of MatAssemblyBegin() that
4956: are called from within a MatAssemblyEnd().
4957: */
4958: static PetscInt MatAssemblyEnd_InUse = 0;
4961: /*@
4962: MatAssemblyBegin - Begins assembling the matrix. This routine should
4963: be called after completing all calls to MatSetValues().
4965: Collective on Mat
4967: Input Parameters:
4968: + mat - the matrix
4969: - type - type of assembly, either MAT_FLUSH_ASSEMBLY or MAT_FINAL_ASSEMBLY
4971: Notes:
4972: MatSetValues() generally caches the values. The matrix is ready to
4973: use only after MatAssemblyBegin() and MatAssemblyEnd() have been called.
4974: Use MAT_FLUSH_ASSEMBLY when switching between ADD_VALUES and INSERT_VALUES
4975: in MatSetValues(); use MAT_FINAL_ASSEMBLY for the final assembly before
4976: using the matrix.
4978: ALL processes that share a matrix MUST call MatAssemblyBegin() and MatAssemblyEnd() the SAME NUMBER of times, and each time with the
4979: same flag of MAT_FLUSH_ASSEMBLY or MAT_FINAL_ASSEMBLY for all processes. Thus you CANNOT locally change from ADD_VALUES to INSERT_VALUES, that is
4980: a global collective operation requring all processes that share the matrix.
4982: Space for preallocated nonzeros that is not filled by a call to MatSetValues() or a related routine are compressed
4983: out by assembly. If you intend to use that extra space on a subsequent assembly, be sure to insert explicit zeros
4984: before MAT_FINAL_ASSEMBLY so the space is not compressed out.
4986: Level: beginner
4988: Concepts: matrices^assembling
4990: .seealso: MatAssemblyEnd(), MatSetValues(), MatAssembled()
4991: @*/
4992: PetscErrorCode MatAssemblyBegin(Mat mat,MatAssemblyType type)
4993: {
4999: MatCheckPreallocated(mat,1);
5000: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix.\nDid you forget to call MatSetUnfactored()?");
5001: if (mat->assembled) {
5002: mat->was_assembled = PETSC_TRUE;
5003: mat->assembled = PETSC_FALSE;
5004: }
5005: if (!MatAssemblyEnd_InUse) {
5006: PetscLogEventBegin(MAT_AssemblyBegin,mat,0,0,0);
5007: if (mat->ops->assemblybegin) {(*mat->ops->assemblybegin)(mat,type);}
5008: PetscLogEventEnd(MAT_AssemblyBegin,mat,0,0,0);
5009: } else if (mat->ops->assemblybegin) {
5010: (*mat->ops->assemblybegin)(mat,type);
5011: }
5012: return(0);
5013: }
5017: /*@
5018: MatAssembled - Indicates if a matrix has been assembled and is ready for
5019: use; for example, in matrix-vector product.
5021: Not Collective
5023: Input Parameter:
5024: . mat - the matrix
5026: Output Parameter:
5027: . assembled - PETSC_TRUE or PETSC_FALSE
5029: Level: advanced
5031: Concepts: matrices^assembled?
5033: .seealso: MatAssemblyEnd(), MatSetValues(), MatAssemblyBegin()
5034: @*/
5035: PetscErrorCode MatAssembled(Mat mat,PetscBool *assembled)
5036: {
5041: *assembled = mat->assembled;
5042: return(0);
5043: }
5047: /*@
5048: MatAssemblyEnd - Completes assembling the matrix. This routine should
5049: be called after MatAssemblyBegin().
5051: Collective on Mat
5053: Input Parameters:
5054: + mat - the matrix
5055: - type - type of assembly, either MAT_FLUSH_ASSEMBLY or MAT_FINAL_ASSEMBLY
5057: Options Database Keys:
5058: + -mat_view ::ascii_info - Prints info on matrix at conclusion of MatEndAssembly()
5059: . -mat_view ::ascii_info_detail - Prints more detailed info
5060: . -mat_view - Prints matrix in ASCII format
5061: . -mat_view ::ascii_matlab - Prints matrix in Matlab format
5062: . -mat_view draw - PetscDraws nonzero structure of matrix, using MatView() and PetscDrawOpenX().
5063: . -display <name> - Sets display name (default is host)
5064: . -draw_pause <sec> - Sets number of seconds to pause after display
5065: . -mat_view socket - Sends matrix to socket, can be accessed from Matlab (See Users-Manual: Chapter 11 Using MATLAB with PETSc )
5066: . -viewer_socket_machine <machine> - Machine to use for socket
5067: . -viewer_socket_port <port> - Port number to use for socket
5068: - -mat_view binary:filename[:append] - Save matrix to file in binary format
5070: Notes:
5071: MatSetValues() generally caches the values. The matrix is ready to
5072: use only after MatAssemblyBegin() and MatAssemblyEnd() have been called.
5073: Use MAT_FLUSH_ASSEMBLY when switching between ADD_VALUES and INSERT_VALUES
5074: in MatSetValues(); use MAT_FINAL_ASSEMBLY for the final assembly before
5075: using the matrix.
5077: Space for preallocated nonzeros that is not filled by a call to MatSetValues() or a related routine are compressed
5078: out by assembly. If you intend to use that extra space on a subsequent assembly, be sure to insert explicit zeros
5079: before MAT_FINAL_ASSEMBLY so the space is not compressed out.
5081: Level: beginner
5083: .seealso: MatAssemblyBegin(), MatSetValues(), PetscDrawOpenX(), PetscDrawCreate(), MatView(), MatAssembled(), PetscViewerSocketOpen()
5084: @*/
5085: PetscErrorCode MatAssemblyEnd(Mat mat,MatAssemblyType type)
5086: {
5087: PetscErrorCode ierr;
5088: static PetscInt inassm = 0;
5089: PetscBool flg = PETSC_FALSE;
5095: inassm++;
5096: MatAssemblyEnd_InUse++;
5097: if (MatAssemblyEnd_InUse == 1) { /* Do the logging only the first time through */
5098: PetscLogEventBegin(MAT_AssemblyEnd,mat,0,0,0);
5099: if (mat->ops->assemblyend) {
5100: (*mat->ops->assemblyend)(mat,type);
5101: }
5102: PetscLogEventEnd(MAT_AssemblyEnd,mat,0,0,0);
5103: } else if (mat->ops->assemblyend) {
5104: (*mat->ops->assemblyend)(mat,type);
5105: }
5107: /* Flush assembly is not a true assembly */
5108: if (type != MAT_FLUSH_ASSEMBLY) {
5109: mat->assembled = PETSC_TRUE; mat->num_ass++;
5110: }
5111: mat->insertmode = NOT_SET_VALUES;
5112: MatAssemblyEnd_InUse--;
5113: PetscObjectStateIncrease((PetscObject)mat);
5114: if (!mat->symmetric_eternal) {
5115: mat->symmetric_set = PETSC_FALSE;
5116: mat->hermitian_set = PETSC_FALSE;
5117: mat->structurally_symmetric_set = PETSC_FALSE;
5118: }
5119: #if defined(PETSC_HAVE_CUSP)
5120: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
5121: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
5122: }
5123: #endif
5124: #if defined(PETSC_HAVE_VIENNACL)
5125: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
5126: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
5127: }
5128: #endif
5129: if (inassm == 1 && type != MAT_FLUSH_ASSEMBLY) {
5130: MatViewFromOptions(mat,NULL,"-mat_view");
5132: if (mat->checksymmetryonassembly) {
5133: MatIsSymmetric(mat,mat->checksymmetrytol,&flg);
5134: if (flg) {
5135: PetscPrintf(PetscObjectComm((PetscObject)mat),"Matrix is symmetric (tolerance %g)\n",(double)mat->checksymmetrytol);
5136: } else {
5137: PetscPrintf(PetscObjectComm((PetscObject)mat),"Matrix is not symmetric (tolerance %g)\n",(double)mat->checksymmetrytol);
5138: }
5139: }
5140: if (mat->nullsp && mat->checknullspaceonassembly) {
5141: MatNullSpaceTest(mat->nullsp,mat,NULL);
5142: }
5143: }
5144: inassm--;
5145: return(0);
5146: }
5150: /*@
5151: MatSetOption - Sets a parameter option for a matrix. Some options
5152: may be specific to certain storage formats. Some options
5153: determine how values will be inserted (or added). Sorted,
5154: row-oriented input will generally assemble the fastest. The default
5155: is row-oriented.
5157: Logically Collective on Mat for certain operations, such as MAT_SPD, not collective for MAT_ROW_ORIENTED, see MatOption
5159: Input Parameters:
5160: + mat - the matrix
5161: . option - the option, one of those listed below (and possibly others),
5162: - flg - turn the option on (PETSC_TRUE) or off (PETSC_FALSE)
5164: Options Describing Matrix Structure:
5165: + MAT_SPD - symmetric positive definite
5166: . MAT_SYMMETRIC - symmetric in terms of both structure and value
5167: . MAT_HERMITIAN - transpose is the complex conjugation
5168: . MAT_STRUCTURALLY_SYMMETRIC - symmetric nonzero structure
5169: - MAT_SYMMETRY_ETERNAL - if you would like the symmetry/Hermitian flag
5170: you set to be kept with all future use of the matrix
5171: including after MatAssemblyBegin/End() which could
5172: potentially change the symmetry structure, i.e. you
5173: KNOW the matrix will ALWAYS have the property you set.
5176: Options For Use with MatSetValues():
5177: Insert a logically dense subblock, which can be
5178: . MAT_ROW_ORIENTED - row-oriented (default)
5180: Note these options reflect the data you pass in with MatSetValues(); it has
5181: nothing to do with how the data is stored internally in the matrix
5182: data structure.
5184: When (re)assembling a matrix, we can restrict the input for
5185: efficiency/debugging purposes. These options include:
5186: + MAT_NEW_NONZERO_LOCATIONS - additional insertions will be allowed if they generate a new nonzero (slow)
5187: . MAT_NEW_DIAGONALS - new diagonals will be allowed (for block diagonal format only)
5188: . MAT_IGNORE_OFF_PROC_ENTRIES - drops off-processor entries
5189: . MAT_NEW_NONZERO_LOCATION_ERR - generates an error for new matrix entry
5190: . MAT_USE_HASH_TABLE - uses a hash table to speed up matrix assembly
5191: + MAT_NO_OFF_PROC_ENTRIES - you know each process will only set values for its own rows, will generate an error if
5192: any process sets values for another process. This avoids all reductions in the MatAssembly routines and thus improves
5193: performance for very large process counts.
5195: Notes:
5196: Some options are relevant only for particular matrix types and
5197: are thus ignored by others. Other options are not supported by
5198: certain matrix types and will generate an error message if set.
5200: If using a Fortran 77 module to compute a matrix, one may need to
5201: use the column-oriented option (or convert to the row-oriented
5202: format).
5204: MAT_NEW_NONZERO_LOCATIONS set to PETSC_FALSE indicates that any add or insertion
5205: that would generate a new entry in the nonzero structure is instead
5206: ignored. Thus, if memory has not alredy been allocated for this particular
5207: data, then the insertion is ignored. For dense matrices, in which
5208: the entire array is allocated, no entries are ever ignored.
5209: Set after the first MatAssemblyEnd()
5211: MAT_NEW_NONZERO_LOCATION_ERR set to PETSC_TRUE indicates that any add or insertion
5212: that would generate a new entry in the nonzero structure instead produces
5213: an error. (Currently supported for AIJ and BAIJ formats only.)
5215: MAT_NEW_NONZERO_ALLOCATION_ERR set to PETSC_TRUE indicates that any add or insertion
5216: that would generate a new entry that has not been preallocated will
5217: instead produce an error. (Currently supported for AIJ and BAIJ formats
5218: only.) This is a useful flag when debugging matrix memory preallocation.
5220: MAT_IGNORE_OFF_PROC_ENTRIES set to PETSC_TRUE indicates entries destined for
5221: other processors should be dropped, rather than stashed.
5222: This is useful if you know that the "owning" processor is also
5223: always generating the correct matrix entries, so that PETSc need
5224: not transfer duplicate entries generated on another processor.
5226: MAT_USE_HASH_TABLE indicates that a hash table be used to improve the
5227: searches during matrix assembly. When this flag is set, the hash table
5228: is created during the first Matrix Assembly. This hash table is
5229: used the next time through, during MatSetVaules()/MatSetVaulesBlocked()
5230: to improve the searching of indices. MAT_NEW_NONZERO_LOCATIONS flag
5231: should be used with MAT_USE_HASH_TABLE flag. This option is currently
5232: supported by MATMPIBAIJ format only.
5234: MAT_KEEP_NONZERO_PATTERN indicates when MatZeroRows() is called the zeroed entries
5235: are kept in the nonzero structure
5237: MAT_IGNORE_ZERO_ENTRIES - for AIJ/IS matrices this will stop zero values from creating
5238: a zero location in the matrix
5240: MAT_USE_INODES - indicates using inode version of the code - works with AIJ and
5241: ROWBS matrix types
5243: MAT_NO_OFF_PROC_ZERO_ROWS - you know each process will only zero its own rows. This avoids all reductions in the
5244: zero row routines and thus improves performance for very large process counts.
5246: MAT_IGNORE_LOWER_TRIANGULAR - For SBAIJ matrices will ignore any insertions you make in the lower triangular
5247: part of the matrix (since they should match the upper triangular part).
5249: Notes: Can only be called after MatSetSizes() and MatSetType() have been set.
5251: Level: intermediate
5253: Concepts: matrices^setting options
5255: .seealso: MatOption, Mat
5257: @*/
5258: PetscErrorCode MatSetOption(Mat mat,MatOption op,PetscBool flg)
5259: {
5265: if (op > 0) {
5268: }
5270: if (((int) op) <= MAT_OPTION_MIN || ((int) op) >= MAT_OPTION_MAX) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Options %d is out of range",(int)op);
5271: if (!((PetscObject)mat)->type_name) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_TYPENOTSET,"Cannot set options until type and size have been set, see MatSetType() and MatSetSizes()");
5273: switch (op) {
5274: case MAT_NO_OFF_PROC_ENTRIES:
5275: mat->nooffprocentries = flg;
5276: return(0);
5277: break;
5278: case MAT_NO_OFF_PROC_ZERO_ROWS:
5279: mat->nooffproczerorows = flg;
5280: return(0);
5281: break;
5282: case MAT_SPD:
5283: mat->spd_set = PETSC_TRUE;
5284: mat->spd = flg;
5285: if (flg) {
5286: mat->symmetric = PETSC_TRUE;
5287: mat->structurally_symmetric = PETSC_TRUE;
5288: mat->symmetric_set = PETSC_TRUE;
5289: mat->structurally_symmetric_set = PETSC_TRUE;
5290: }
5291: break;
5292: case MAT_SYMMETRIC:
5293: mat->symmetric = flg;
5294: if (flg) mat->structurally_symmetric = PETSC_TRUE;
5295: mat->symmetric_set = PETSC_TRUE;
5296: mat->structurally_symmetric_set = flg;
5297: break;
5298: case MAT_HERMITIAN:
5299: mat->hermitian = flg;
5300: if (flg) mat->structurally_symmetric = PETSC_TRUE;
5301: mat->hermitian_set = PETSC_TRUE;
5302: mat->structurally_symmetric_set = flg;
5303: break;
5304: case MAT_STRUCTURALLY_SYMMETRIC:
5305: mat->structurally_symmetric = flg;
5306: mat->structurally_symmetric_set = PETSC_TRUE;
5307: break;
5308: case MAT_SYMMETRY_ETERNAL:
5309: mat->symmetric_eternal = flg;
5310: break;
5311: default:
5312: break;
5313: }
5314: if (mat->ops->setoption) {
5315: (*mat->ops->setoption)(mat,op,flg);
5316: }
5317: return(0);
5318: }
5322: /*@
5323: MatGetOption - Gets a parameter option that has been set for a matrix.
5325: Logically Collective on Mat for certain operations, such as MAT_SPD, not collective for MAT_ROW_ORIENTED, see MatOption
5327: Input Parameters:
5328: + mat - the matrix
5329: - option - the option, this only responds to certain options, check the code for which ones
5331: Output Parameter:
5332: . flg - turn the option on (PETSC_TRUE) or off (PETSC_FALSE)
5334: Notes: Can only be called after MatSetSizes() and MatSetType() have been set.
5336: Level: intermediate
5338: Concepts: matrices^setting options
5340: .seealso: MatOption, MatSetOption()
5342: @*/
5343: PetscErrorCode MatGetOption(Mat mat,MatOption op,PetscBool *flg)
5344: {
5349: if (((int) op) <= MAT_OPTION_MIN || ((int) op) >= MAT_OPTION_MAX) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Options %d is out of range",(int)op);
5350: if (!((PetscObject)mat)->type_name) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_TYPENOTSET,"Cannot get options until type and size have been set, see MatSetType() and MatSetSizes()");
5352: switch (op) {
5353: case MAT_NO_OFF_PROC_ENTRIES:
5354: *flg = mat->nooffprocentries;
5355: break;
5356: case MAT_NO_OFF_PROC_ZERO_ROWS:
5357: *flg = mat->nooffproczerorows;
5358: break;
5359: case MAT_SYMMETRIC:
5360: *flg = mat->symmetric;
5361: break;
5362: case MAT_HERMITIAN:
5363: *flg = mat->hermitian;
5364: break;
5365: case MAT_STRUCTURALLY_SYMMETRIC:
5366: *flg = mat->structurally_symmetric;
5367: break;
5368: case MAT_SYMMETRY_ETERNAL:
5369: *flg = mat->symmetric_eternal;
5370: break;
5371: default:
5372: break;
5373: }
5374: return(0);
5375: }
5379: /*@
5380: MatZeroEntries - Zeros all entries of a matrix. For sparse matrices
5381: this routine retains the old nonzero structure.
5383: Logically Collective on Mat
5385: Input Parameters:
5386: . mat - the matrix
5388: Level: intermediate
5390: Notes: If the matrix was not preallocated then a default, likely poor preallocation will be set in the matrix, so this should be called after the preallocation phase.
5391: See the Performance chapter of the users manual for information on preallocating matrices.
5393: Concepts: matrices^zeroing
5395: .seealso: MatZeroRows()
5396: @*/
5397: PetscErrorCode MatZeroEntries(Mat mat)
5398: {
5404: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5405: if (mat->insertmode != NOT_SET_VALUES) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for matrices where you have set values but not yet assembled");
5406: if (!mat->ops->zeroentries) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
5407: MatCheckPreallocated(mat,1);
5409: PetscLogEventBegin(MAT_ZeroEntries,mat,0,0,0);
5410: (*mat->ops->zeroentries)(mat);
5411: PetscLogEventEnd(MAT_ZeroEntries,mat,0,0,0);
5412: PetscObjectStateIncrease((PetscObject)mat);
5413: #if defined(PETSC_HAVE_CUSP)
5414: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
5415: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
5416: }
5417: #endif
5418: #if defined(PETSC_HAVE_VIENNACL)
5419: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
5420: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
5421: }
5422: #endif
5423: return(0);
5424: }
5428: /*@C
5429: MatZeroRowsColumns - Zeros all entries (except possibly the main diagonal)
5430: of a set of rows and columns of a matrix.
5432: Collective on Mat
5434: Input Parameters:
5435: + mat - the matrix
5436: . numRows - the number of rows to remove
5437: . rows - the global row indices
5438: . diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5439: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5440: - b - optional vector of right hand side, that will be adjusted by provided solution
5442: Notes:
5443: This does not change the nonzero structure of the matrix, it merely zeros those entries in the matrix.
5445: The user can set a value in the diagonal entry (or for the AIJ and
5446: row formats can optionally remove the main diagonal entry from the
5447: nonzero structure as well, by passing 0.0 as the final argument).
5449: For the parallel case, all processes that share the matrix (i.e.,
5450: those in the communicator used for matrix creation) MUST call this
5451: routine, regardless of whether any rows being zeroed are owned by
5452: them.
5454: Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5455: list only rows local to itself).
5457: The option MAT_NO_OFF_PROC_ZERO_ROWS does not apply to this routine.
5459: Level: intermediate
5461: Concepts: matrices^zeroing rows
5463: .seealso: MatZeroRowsIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(), MatZeroRowsColumnsIS()
5464: @*/
5465: PetscErrorCode MatZeroRowsColumns(Mat mat,PetscInt numRows,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
5466: {
5473: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5474: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5475: if (!mat->ops->zerorowscolumns) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
5476: MatCheckPreallocated(mat,1);
5478: (*mat->ops->zerorowscolumns)(mat,numRows,rows,diag,x,b);
5479: MatViewFromOptions(mat,NULL,"-mat_view");
5480: PetscObjectStateIncrease((PetscObject)mat);
5481: #if defined(PETSC_HAVE_CUSP)
5482: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
5483: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
5484: }
5485: #endif
5486: #if defined(PETSC_HAVE_VIENNACL)
5487: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
5488: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
5489: }
5490: #endif
5491: return(0);
5492: }
5496: /*@C
5497: MatZeroRowsColumnsIS - Zeros all entries (except possibly the main diagonal)
5498: of a set of rows and columns of a matrix.
5500: Collective on Mat
5502: Input Parameters:
5503: + mat - the matrix
5504: . is - the rows to zero
5505: . diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5506: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5507: - b - optional vector of right hand side, that will be adjusted by provided solution
5509: Notes:
5510: This does not change the nonzero structure of the matrix, it merely zeros those entries in the matrix.
5512: The user can set a value in the diagonal entry (or for the AIJ and
5513: row formats can optionally remove the main diagonal entry from the
5514: nonzero structure as well, by passing 0.0 as the final argument).
5516: For the parallel case, all processes that share the matrix (i.e.,
5517: those in the communicator used for matrix creation) MUST call this
5518: routine, regardless of whether any rows being zeroed are owned by
5519: them.
5521: Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5522: list only rows local to itself).
5524: The option MAT_NO_OFF_PROC_ZERO_ROWS does not apply to this routine.
5526: Level: intermediate
5528: Concepts: matrices^zeroing rows
5530: .seealso: MatZeroRowsIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(), MatZeroRowsColumns()
5531: @*/
5532: PetscErrorCode MatZeroRowsColumnsIS(Mat mat,IS is,PetscScalar diag,Vec x,Vec b)
5533: {
5535: PetscInt numRows;
5536: const PetscInt *rows;
5543: ISGetLocalSize(is,&numRows);
5544: ISGetIndices(is,&rows);
5545: MatZeroRowsColumns(mat,numRows,rows,diag,x,b);
5546: ISRestoreIndices(is,&rows);
5547: return(0);
5548: }
5552: /*@C
5553: MatZeroRows - Zeros all entries (except possibly the main diagonal)
5554: of a set of rows of a matrix.
5556: Collective on Mat
5558: Input Parameters:
5559: + mat - the matrix
5560: . numRows - the number of rows to remove
5561: . rows - the global row indices
5562: . diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5563: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5564: - b - optional vector of right hand side, that will be adjusted by provided solution
5566: Notes:
5567: For the AIJ and BAIJ matrix formats this removes the old nonzero structure,
5568: but does not release memory. For the dense and block diagonal
5569: formats this does not alter the nonzero structure.
5571: If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5572: of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5573: merely zeroed.
5575: The user can set a value in the diagonal entry (or for the AIJ and
5576: row formats can optionally remove the main diagonal entry from the
5577: nonzero structure as well, by passing 0.0 as the final argument).
5579: For the parallel case, all processes that share the matrix (i.e.,
5580: those in the communicator used for matrix creation) MUST call this
5581: routine, regardless of whether any rows being zeroed are owned by
5582: them.
5584: Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5585: list only rows local to itself).
5587: You can call MatSetOption(mat,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) if each process indicates only rows it
5588: owns that are to be zeroed. This saves a global synchronization in the implementation.
5590: Level: intermediate
5592: Concepts: matrices^zeroing rows
5594: .seealso: MatZeroRowsIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption()
5595: @*/
5596: PetscErrorCode MatZeroRows(Mat mat,PetscInt numRows,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
5597: {
5604: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5605: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5606: if (!mat->ops->zerorows) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
5607: MatCheckPreallocated(mat,1);
5609: (*mat->ops->zerorows)(mat,numRows,rows,diag,x,b);
5610: MatViewFromOptions(mat,NULL,"-mat_view");
5611: PetscObjectStateIncrease((PetscObject)mat);
5612: #if defined(PETSC_HAVE_CUSP)
5613: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
5614: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
5615: }
5616: #endif
5617: #if defined(PETSC_HAVE_VIENNACL)
5618: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
5619: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
5620: }
5621: #endif
5622: return(0);
5623: }
5627: /*@C
5628: MatZeroRowsIS - Zeros all entries (except possibly the main diagonal)
5629: of a set of rows of a matrix.
5631: Collective on Mat
5633: Input Parameters:
5634: + mat - the matrix
5635: . is - index set of rows to remove
5636: . diag - value put in all diagonals of eliminated rows
5637: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5638: - b - optional vector of right hand side, that will be adjusted by provided solution
5640: Notes:
5641: For the AIJ and BAIJ matrix formats this removes the old nonzero structure,
5642: but does not release memory. For the dense and block diagonal
5643: formats this does not alter the nonzero structure.
5645: If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5646: of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5647: merely zeroed.
5649: The user can set a value in the diagonal entry (or for the AIJ and
5650: row formats can optionally remove the main diagonal entry from the
5651: nonzero structure as well, by passing 0.0 as the final argument).
5653: For the parallel case, all processes that share the matrix (i.e.,
5654: those in the communicator used for matrix creation) MUST call this
5655: routine, regardless of whether any rows being zeroed are owned by
5656: them.
5658: Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5659: list only rows local to itself).
5661: You can call MatSetOption(mat,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) if each process indicates only rows it
5662: owns that are to be zeroed. This saves a global synchronization in the implementation.
5664: Level: intermediate
5666: Concepts: matrices^zeroing rows
5668: .seealso: MatZeroRows(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption()
5669: @*/
5670: PetscErrorCode MatZeroRowsIS(Mat mat,IS is,PetscScalar diag,Vec x,Vec b)
5671: {
5672: PetscInt numRows;
5673: const PetscInt *rows;
5680: ISGetLocalSize(is,&numRows);
5681: ISGetIndices(is,&rows);
5682: MatZeroRows(mat,numRows,rows,diag,x,b);
5683: ISRestoreIndices(is,&rows);
5684: return(0);
5685: }
5689: /*@C
5690: MatZeroRowsStencil - Zeros all entries (except possibly the main diagonal)
5691: of a set of rows of a matrix. These rows must be local to the process.
5693: Collective on Mat
5695: Input Parameters:
5696: + mat - the matrix
5697: . numRows - the number of rows to remove
5698: . rows - the grid coordinates (and component number when dof > 1) for matrix rows
5699: . diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5700: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5701: - b - optional vector of right hand side, that will be adjusted by provided solution
5703: Notes:
5704: For the AIJ and BAIJ matrix formats this removes the old nonzero structure,
5705: but does not release memory. For the dense and block diagonal
5706: formats this does not alter the nonzero structure.
5708: If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5709: of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5710: merely zeroed.
5712: The user can set a value in the diagonal entry (or for the AIJ and
5713: row formats can optionally remove the main diagonal entry from the
5714: nonzero structure as well, by passing 0.0 as the final argument).
5716: For the parallel case, all processes that share the matrix (i.e.,
5717: those in the communicator used for matrix creation) MUST call this
5718: routine, regardless of whether any rows being zeroed are owned by
5719: them.
5721: Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5722: list only rows local to itself).
5724: The grid coordinates are across the entire grid, not just the local portion
5726: In Fortran idxm and idxn should be declared as
5727: $ MatStencil idxm(4,m)
5728: and the values inserted using
5729: $ idxm(MatStencil_i,1) = i
5730: $ idxm(MatStencil_j,1) = j
5731: $ idxm(MatStencil_k,1) = k
5732: $ idxm(MatStencil_c,1) = c
5733: etc
5735: For periodic boundary conditions use negative indices for values to the left (below 0; that are to be
5736: obtained by wrapping values from right edge). For values to the right of the last entry using that index plus one
5737: etc to obtain values that obtained by wrapping the values from the left edge. This does not work for anything but the
5738: DM_BOUNDARY_PERIODIC boundary type.
5740: For indices that don't mean anything for your case (like the k index when working in 2d) or the c index when you have
5741: a single value per point) you can skip filling those indices.
5743: Level: intermediate
5745: Concepts: matrices^zeroing rows
5747: .seealso: MatZeroRows(), MatZeroRowsIS(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption()
5748: @*/
5749: PetscErrorCode MatZeroRowsStencil(Mat mat,PetscInt numRows,const MatStencil rows[],PetscScalar diag,Vec x,Vec b)
5750: {
5751: PetscInt dim = mat->stencil.dim;
5752: PetscInt sdim = dim - (1 - (PetscInt) mat->stencil.noc);
5753: PetscInt *dims = mat->stencil.dims+1;
5754: PetscInt *starts = mat->stencil.starts;
5755: PetscInt *dxm = (PetscInt*) rows;
5756: PetscInt *jdxm, i, j, tmp, numNewRows = 0;
5764: PetscMalloc1(numRows, &jdxm);
5765: for (i = 0; i < numRows; ++i) {
5766: /* Skip unused dimensions (they are ordered k, j, i, c) */
5767: for (j = 0; j < 3-sdim; ++j) dxm++;
5768: /* Local index in X dir */
5769: tmp = *dxm++ - starts[0];
5770: /* Loop over remaining dimensions */
5771: for (j = 0; j < dim-1; ++j) {
5772: /* If nonlocal, set index to be negative */
5773: if ((*dxm++ - starts[j+1]) < 0 || tmp < 0) tmp = PETSC_MIN_INT;
5774: /* Update local index */
5775: else tmp = tmp*dims[j] + *(dxm-1) - starts[j+1];
5776: }
5777: /* Skip component slot if necessary */
5778: if (mat->stencil.noc) dxm++;
5779: /* Local row number */
5780: if (tmp >= 0) {
5781: jdxm[numNewRows++] = tmp;
5782: }
5783: }
5784: MatZeroRowsLocal(mat,numNewRows,jdxm,diag,x,b);
5785: PetscFree(jdxm);
5786: return(0);
5787: }
5791: /*@C
5792: MatZeroRowsColumnsStencil - Zeros all row and column entries (except possibly the main diagonal)
5793: of a set of rows and columns of a matrix.
5795: Collective on Mat
5797: Input Parameters:
5798: + mat - the matrix
5799: . numRows - the number of rows/columns to remove
5800: . rows - the grid coordinates (and component number when dof > 1) for matrix rows
5801: . diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5802: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5803: - b - optional vector of right hand side, that will be adjusted by provided solution
5805: Notes:
5806: For the AIJ and BAIJ matrix formats this removes the old nonzero structure,
5807: but does not release memory. For the dense and block diagonal
5808: formats this does not alter the nonzero structure.
5810: If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5811: of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5812: merely zeroed.
5814: The user can set a value in the diagonal entry (or for the AIJ and
5815: row formats can optionally remove the main diagonal entry from the
5816: nonzero structure as well, by passing 0.0 as the final argument).
5818: For the parallel case, all processes that share the matrix (i.e.,
5819: those in the communicator used for matrix creation) MUST call this
5820: routine, regardless of whether any rows being zeroed are owned by
5821: them.
5823: Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5824: list only rows local to itself, but the row/column numbers are given in local numbering).
5826: The grid coordinates are across the entire grid, not just the local portion
5828: In Fortran idxm and idxn should be declared as
5829: $ MatStencil idxm(4,m)
5830: and the values inserted using
5831: $ idxm(MatStencil_i,1) = i
5832: $ idxm(MatStencil_j,1) = j
5833: $ idxm(MatStencil_k,1) = k
5834: $ idxm(MatStencil_c,1) = c
5835: etc
5837: For periodic boundary conditions use negative indices for values to the left (below 0; that are to be
5838: obtained by wrapping values from right edge). For values to the right of the last entry using that index plus one
5839: etc to obtain values that obtained by wrapping the values from the left edge. This does not work for anything but the
5840: DM_BOUNDARY_PERIODIC boundary type.
5842: For indices that don't mean anything for your case (like the k index when working in 2d) or the c index when you have
5843: a single value per point) you can skip filling those indices.
5845: Level: intermediate
5847: Concepts: matrices^zeroing rows
5849: .seealso: MatZeroRows(), MatZeroRowsIS(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption()
5850: @*/
5851: PetscErrorCode MatZeroRowsColumnsStencil(Mat mat,PetscInt numRows,const MatStencil rows[],PetscScalar diag,Vec x,Vec b)
5852: {
5853: PetscInt dim = mat->stencil.dim;
5854: PetscInt sdim = dim - (1 - (PetscInt) mat->stencil.noc);
5855: PetscInt *dims = mat->stencil.dims+1;
5856: PetscInt *starts = mat->stencil.starts;
5857: PetscInt *dxm = (PetscInt*) rows;
5858: PetscInt *jdxm, i, j, tmp, numNewRows = 0;
5866: PetscMalloc1(numRows, &jdxm);
5867: for (i = 0; i < numRows; ++i) {
5868: /* Skip unused dimensions (they are ordered k, j, i, c) */
5869: for (j = 0; j < 3-sdim; ++j) dxm++;
5870: /* Local index in X dir */
5871: tmp = *dxm++ - starts[0];
5872: /* Loop over remaining dimensions */
5873: for (j = 0; j < dim-1; ++j) {
5874: /* If nonlocal, set index to be negative */
5875: if ((*dxm++ - starts[j+1]) < 0 || tmp < 0) tmp = PETSC_MIN_INT;
5876: /* Update local index */
5877: else tmp = tmp*dims[j] + *(dxm-1) - starts[j+1];
5878: }
5879: /* Skip component slot if necessary */
5880: if (mat->stencil.noc) dxm++;
5881: /* Local row number */
5882: if (tmp >= 0) {
5883: jdxm[numNewRows++] = tmp;
5884: }
5885: }
5886: MatZeroRowsColumnsLocal(mat,numNewRows,jdxm,diag,x,b);
5887: PetscFree(jdxm);
5888: return(0);
5889: }
5893: /*@C
5894: MatZeroRowsLocal - Zeros all entries (except possibly the main diagonal)
5895: of a set of rows of a matrix; using local numbering of rows.
5897: Collective on Mat
5899: Input Parameters:
5900: + mat - the matrix
5901: . numRows - the number of rows to remove
5902: . rows - the global row indices
5903: . diag - value put in all diagonals of eliminated rows
5904: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5905: - b - optional vector of right hand side, that will be adjusted by provided solution
5907: Notes:
5908: Before calling MatZeroRowsLocal(), the user must first set the
5909: local-to-global mapping by calling MatSetLocalToGlobalMapping().
5911: For the AIJ matrix formats this removes the old nonzero structure,
5912: but does not release memory. For the dense and block diagonal
5913: formats this does not alter the nonzero structure.
5915: If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5916: of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5917: merely zeroed.
5919: The user can set a value in the diagonal entry (or for the AIJ and
5920: row formats can optionally remove the main diagonal entry from the
5921: nonzero structure as well, by passing 0.0 as the final argument).
5923: You can call MatSetOption(mat,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) if each process indicates only rows it
5924: owns that are to be zeroed. This saves a global synchronization in the implementation.
5926: Level: intermediate
5928: Concepts: matrices^zeroing
5930: .seealso: MatZeroRows(), MatZeroRowsLocalIS(), MatZeroEntries(), MatZeroRows(), MatSetLocalToGlobalMapping
5931: @*/
5932: PetscErrorCode MatZeroRowsLocal(Mat mat,PetscInt numRows,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
5933: {
5940: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5941: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5942: MatCheckPreallocated(mat,1);
5944: if (mat->ops->zerorowslocal) {
5945: (*mat->ops->zerorowslocal)(mat,numRows,rows,diag,x,b);
5946: } else {
5947: IS is, newis;
5948: const PetscInt *newRows;
5950: if (!mat->rmap->mapping) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Need to provide local to global mapping to matrix first");
5951: ISCreateGeneral(PETSC_COMM_SELF,numRows,rows,PETSC_COPY_VALUES,&is);
5952: ISLocalToGlobalMappingApplyIS(mat->rmap->mapping,is,&newis);
5953: ISGetIndices(newis,&newRows);
5954: (*mat->ops->zerorows)(mat,numRows,newRows,diag,x,b);
5955: ISRestoreIndices(newis,&newRows);
5956: ISDestroy(&newis);
5957: ISDestroy(&is);
5958: }
5959: PetscObjectStateIncrease((PetscObject)mat);
5960: #if defined(PETSC_HAVE_CUSP)
5961: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
5962: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
5963: }
5964: #endif
5965: #if defined(PETSC_HAVE_VIENNACL)
5966: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
5967: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
5968: }
5969: #endif
5970: return(0);
5971: }
5975: /*@C
5976: MatZeroRowsLocalIS - Zeros all entries (except possibly the main diagonal)
5977: of a set of rows of a matrix; using local numbering of rows.
5979: Collective on Mat
5981: Input Parameters:
5982: + mat - the matrix
5983: . is - index set of rows to remove
5984: . diag - value put in all diagonals of eliminated rows
5985: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5986: - b - optional vector of right hand side, that will be adjusted by provided solution
5988: Notes:
5989: Before calling MatZeroRowsLocalIS(), the user must first set the
5990: local-to-global mapping by calling MatSetLocalToGlobalMapping().
5992: For the AIJ matrix formats this removes the old nonzero structure,
5993: but does not release memory. For the dense and block diagonal
5994: formats this does not alter the nonzero structure.
5996: If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5997: of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5998: merely zeroed.
6000: The user can set a value in the diagonal entry (or for the AIJ and
6001: row formats can optionally remove the main diagonal entry from the
6002: nonzero structure as well, by passing 0.0 as the final argument).
6004: You can call MatSetOption(mat,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) if each process indicates only rows it
6005: owns that are to be zeroed. This saves a global synchronization in the implementation.
6007: Level: intermediate
6009: Concepts: matrices^zeroing
6011: .seealso: MatZeroRows(), MatZeroRowsLocal(), MatZeroEntries(), MatZeroRows(), MatSetLocalToGlobalMapping
6012: @*/
6013: PetscErrorCode MatZeroRowsLocalIS(Mat mat,IS is,PetscScalar diag,Vec x,Vec b)
6014: {
6016: PetscInt numRows;
6017: const PetscInt *rows;
6023: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6024: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6025: MatCheckPreallocated(mat,1);
6027: ISGetLocalSize(is,&numRows);
6028: ISGetIndices(is,&rows);
6029: MatZeroRowsLocal(mat,numRows,rows,diag,x,b);
6030: ISRestoreIndices(is,&rows);
6031: return(0);
6032: }
6036: /*@C
6037: MatZeroRowsColumnsLocal - Zeros all entries (except possibly the main diagonal)
6038: of a set of rows and columns of a matrix; using local numbering of rows.
6040: Collective on Mat
6042: Input Parameters:
6043: + mat - the matrix
6044: . numRows - the number of rows to remove
6045: . rows - the global row indices
6046: . diag - value put in all diagonals of eliminated rows
6047: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
6048: - b - optional vector of right hand side, that will be adjusted by provided solution
6050: Notes:
6051: Before calling MatZeroRowsColumnsLocal(), the user must first set the
6052: local-to-global mapping by calling MatSetLocalToGlobalMapping().
6054: The user can set a value in the diagonal entry (or for the AIJ and
6055: row formats can optionally remove the main diagonal entry from the
6056: nonzero structure as well, by passing 0.0 as the final argument).
6058: Level: intermediate
6060: Concepts: matrices^zeroing
6062: .seealso: MatZeroRows(), MatZeroRowsLocalIS(), MatZeroEntries(), MatZeroRows(), MatSetLocalToGlobalMapping
6063: @*/
6064: PetscErrorCode MatZeroRowsColumnsLocal(Mat mat,PetscInt numRows,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
6065: {
6067: IS is, newis;
6068: const PetscInt *newRows;
6074: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6075: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6076: MatCheckPreallocated(mat,1);
6078: if (!mat->cmap->mapping) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Need to provide local to global mapping to matrix first");
6079: ISCreateGeneral(PETSC_COMM_SELF,numRows,rows,PETSC_COPY_VALUES,&is);
6080: ISLocalToGlobalMappingApplyIS(mat->cmap->mapping,is,&newis);
6081: ISGetIndices(newis,&newRows);
6082: (*mat->ops->zerorowscolumns)(mat,numRows,newRows,diag,x,b);
6083: ISRestoreIndices(newis,&newRows);
6084: ISDestroy(&newis);
6085: ISDestroy(&is);
6086: PetscObjectStateIncrease((PetscObject)mat);
6087: #if defined(PETSC_HAVE_CUSP)
6088: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
6089: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
6090: }
6091: #endif
6092: #if defined(PETSC_HAVE_VIENNACL)
6093: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
6094: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
6095: }
6096: #endif
6097: return(0);
6098: }
6102: /*@C
6103: MatZeroRowsColumnsLocalIS - Zeros all entries (except possibly the main diagonal)
6104: of a set of rows and columns of a matrix; using local numbering of rows.
6106: Collective on Mat
6108: Input Parameters:
6109: + mat - the matrix
6110: . is - index set of rows to remove
6111: . diag - value put in all diagonals of eliminated rows
6112: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
6113: - b - optional vector of right hand side, that will be adjusted by provided solution
6115: Notes:
6116: Before calling MatZeroRowsColumnsLocalIS(), the user must first set the
6117: local-to-global mapping by calling MatSetLocalToGlobalMapping().
6119: The user can set a value in the diagonal entry (or for the AIJ and
6120: row formats can optionally remove the main diagonal entry from the
6121: nonzero structure as well, by passing 0.0 as the final argument).
6123: Level: intermediate
6125: Concepts: matrices^zeroing
6127: .seealso: MatZeroRows(), MatZeroRowsLocal(), MatZeroEntries(), MatZeroRows(), MatSetLocalToGlobalMapping
6128: @*/
6129: PetscErrorCode MatZeroRowsColumnsLocalIS(Mat mat,IS is,PetscScalar diag,Vec x,Vec b)
6130: {
6132: PetscInt numRows;
6133: const PetscInt *rows;
6139: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6140: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6141: MatCheckPreallocated(mat,1);
6143: ISGetLocalSize(is,&numRows);
6144: ISGetIndices(is,&rows);
6145: MatZeroRowsColumnsLocal(mat,numRows,rows,diag,x,b);
6146: ISRestoreIndices(is,&rows);
6147: return(0);
6148: }
6152: /*@
6153: MatGetSize - Returns the numbers of rows and columns in a matrix.
6155: Not Collective
6157: Input Parameter:
6158: . mat - the matrix
6160: Output Parameters:
6161: + m - the number of global rows
6162: - n - the number of global columns
6164: Note: both output parameters can be NULL on input.
6166: Level: beginner
6168: Concepts: matrices^size
6170: .seealso: MatGetLocalSize()
6171: @*/
6172: PetscErrorCode MatGetSize(Mat mat,PetscInt *m,PetscInt *n)
6173: {
6176: if (m) *m = mat->rmap->N;
6177: if (n) *n = mat->cmap->N;
6178: return(0);
6179: }
6183: /*@
6184: MatGetLocalSize - Returns the number of rows and columns in a matrix
6185: stored locally. This information may be implementation dependent, so
6186: use with care.
6188: Not Collective
6190: Input Parameters:
6191: . mat - the matrix
6193: Output Parameters:
6194: + m - the number of local rows
6195: - n - the number of local columns
6197: Note: both output parameters can be NULL on input.
6199: Level: beginner
6201: Concepts: matrices^local size
6203: .seealso: MatGetSize()
6204: @*/
6205: PetscErrorCode MatGetLocalSize(Mat mat,PetscInt *m,PetscInt *n)
6206: {
6211: if (m) *m = mat->rmap->n;
6212: if (n) *n = mat->cmap->n;
6213: return(0);
6214: }
6218: /*@
6219: MatGetOwnershipRangeColumn - Returns the range of matrix columns associated with rows of a vector one multiplies by that owned by
6220: this processor. (The columns of the "diagonal block")
6222: Not Collective, unless matrix has not been allocated, then collective on Mat
6224: Input Parameters:
6225: . mat - the matrix
6227: Output Parameters:
6228: + m - the global index of the first local column
6229: - n - one more than the global index of the last local column
6231: Notes: both output parameters can be NULL on input.
6233: Level: developer
6235: Concepts: matrices^column ownership
6237: .seealso: MatGetOwnershipRange(), MatGetOwnershipRanges(), MatGetOwnershipRangesColumn()
6239: @*/
6240: PetscErrorCode MatGetOwnershipRangeColumn(Mat mat,PetscInt *m,PetscInt *n)
6241: {
6247: MatCheckPreallocated(mat,1);
6248: if (m) *m = mat->cmap->rstart;
6249: if (n) *n = mat->cmap->rend;
6250: return(0);
6251: }
6255: /*@
6256: MatGetOwnershipRange - Returns the range of matrix rows owned by
6257: this processor, assuming that the matrix is laid out with the first
6258: n1 rows on the first processor, the next n2 rows on the second, etc.
6259: For certain parallel layouts this range may not be well defined.
6261: Not Collective
6263: Input Parameters:
6264: . mat - the matrix
6266: Output Parameters:
6267: + m - the global index of the first local row
6268: - n - one more than the global index of the last local row
6270: Note: Both output parameters can be NULL on input.
6271: $ This function requires that the matrix be preallocated. If you have not preallocated, consider using
6272: $ PetscSplitOwnership(MPI_Comm comm, PetscInt *n, PetscInt *N)
6273: $ and then MPI_Scan() to calculate prefix sums of the local sizes.
6275: Level: beginner
6277: Concepts: matrices^row ownership
6279: .seealso: MatGetOwnershipRanges(), MatGetOwnershipRangeColumn(), MatGetOwnershipRangesColumn(), PetscSplitOwnership(), PetscSplitOwnershipBlock()
6281: @*/
6282: PetscErrorCode MatGetOwnershipRange(Mat mat,PetscInt *m,PetscInt *n)
6283: {
6289: MatCheckPreallocated(mat,1);
6290: if (m) *m = mat->rmap->rstart;
6291: if (n) *n = mat->rmap->rend;
6292: return(0);
6293: }
6297: /*@C
6298: MatGetOwnershipRanges - Returns the range of matrix rows owned by
6299: each process
6301: Not Collective, unless matrix has not been allocated, then collective on Mat
6303: Input Parameters:
6304: . mat - the matrix
6306: Output Parameters:
6307: . ranges - start of each processors portion plus one more then the total length at the end
6309: Level: beginner
6311: Concepts: matrices^row ownership
6313: .seealso: MatGetOwnershipRange(), MatGetOwnershipRangeColumn(), MatGetOwnershipRangesColumn()
6315: @*/
6316: PetscErrorCode MatGetOwnershipRanges(Mat mat,const PetscInt **ranges)
6317: {
6323: MatCheckPreallocated(mat,1);
6324: PetscLayoutGetRanges(mat->rmap,ranges);
6325: return(0);
6326: }
6330: /*@C
6331: MatGetOwnershipRangesColumn - Returns the range of matrix columns associated with rows of a vector one multiplies by that owned by
6332: this processor. (The columns of the "diagonal blocks" for each process)
6334: Not Collective, unless matrix has not been allocated, then collective on Mat
6336: Input Parameters:
6337: . mat - the matrix
6339: Output Parameters:
6340: . ranges - start of each processors portion plus one more then the total length at the end
6342: Level: beginner
6344: Concepts: matrices^column ownership
6346: .seealso: MatGetOwnershipRange(), MatGetOwnershipRangeColumn(), MatGetOwnershipRanges()
6348: @*/
6349: PetscErrorCode MatGetOwnershipRangesColumn(Mat mat,const PetscInt **ranges)
6350: {
6356: MatCheckPreallocated(mat,1);
6357: PetscLayoutGetRanges(mat->cmap,ranges);
6358: return(0);
6359: }
6363: /*@C
6364: MatGetOwnershipIS - Get row and column ownership as index sets
6366: Not Collective
6368: Input Arguments:
6369: . A - matrix of type Elemental
6371: Output Arguments:
6372: + rows - rows in which this process owns elements
6373: . cols - columns in which this process owns elements
6375: Level: intermediate
6377: .seealso: MatGetOwnershipRange(), MatGetOwnershipRangeColumn(), MatSetValues(), MATELEMENTAL, MatSetValues()
6378: @*/
6379: PetscErrorCode MatGetOwnershipIS(Mat A,IS *rows,IS *cols)
6380: {
6381: PetscErrorCode ierr,(*f)(Mat,IS*,IS*);
6384: MatCheckPreallocated(A,1);
6385: PetscObjectQueryFunction((PetscObject)A,"MatGetOwnershipIS_C",&f);
6386: if (f) {
6387: (*f)(A,rows,cols);
6388: } else { /* Create a standard row-based partition, each process is responsible for ALL columns in their row block */
6389: if (rows) {ISCreateStride(PETSC_COMM_SELF,A->rmap->n,A->rmap->rstart,1,rows);}
6390: if (cols) {ISCreateStride(PETSC_COMM_SELF,A->cmap->N,0,1,cols);}
6391: }
6392: return(0);
6393: }
6397: /*@C
6398: MatILUFactorSymbolic - Performs symbolic ILU factorization of a matrix.
6399: Uses levels of fill only, not drop tolerance. Use MatLUFactorNumeric()
6400: to complete the factorization.
6402: Collective on Mat
6404: Input Parameters:
6405: + mat - the matrix
6406: . row - row permutation
6407: . column - column permutation
6408: - info - structure containing
6409: $ levels - number of levels of fill.
6410: $ expected fill - as ratio of original fill.
6411: $ 1 or 0 - indicating force fill on diagonal (improves robustness for matrices
6412: missing diagonal entries)
6414: Output Parameters:
6415: . fact - new matrix that has been symbolically factored
6417: Notes: See Users-Manual: ch_mat for additional information about choosing the fill factor for better efficiency.
6419: Most users should employ the simplified KSP interface for linear solvers
6420: instead of working directly with matrix algebra routines such as this.
6421: See, e.g., KSPCreate().
6423: Level: developer
6425: Concepts: matrices^symbolic LU factorization
6426: Concepts: matrices^factorization
6427: Concepts: LU^symbolic factorization
6429: .seealso: MatLUFactorSymbolic(), MatLUFactorNumeric(), MatCholeskyFactor()
6430: MatGetOrdering(), MatFactorInfo
6432: Developer Note: fortran interface is not autogenerated as the f90
6433: interface defintion cannot be generated correctly [due to MatFactorInfo]
6435: @*/
6436: PetscErrorCode MatILUFactorSymbolic(Mat fact,Mat mat,IS row,IS col,const MatFactorInfo *info)
6437: {
6447: if (info->levels < 0) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Levels of fill negative %D",(PetscInt)info->levels);
6448: if (info->fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Expected fill less than 1.0 %g",(double)info->fill);
6449: if (!(fact)->ops->ilufactorsymbolic) {
6450: const MatSolverPackage spackage;
6451: MatFactorGetSolverPackage(fact,&spackage);
6452: SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Matrix type %s symbolic ILU using solver package %s",((PetscObject)mat)->type_name,spackage);
6453: }
6454: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6455: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6456: MatCheckPreallocated(mat,2);
6458: PetscLogEventBegin(MAT_ILUFactorSymbolic,mat,row,col,0);
6459: (fact->ops->ilufactorsymbolic)(fact,mat,row,col,info);
6460: PetscLogEventEnd(MAT_ILUFactorSymbolic,mat,row,col,0);
6461: return(0);
6462: }
6466: /*@C
6467: MatICCFactorSymbolic - Performs symbolic incomplete
6468: Cholesky factorization for a symmetric matrix. Use
6469: MatCholeskyFactorNumeric() to complete the factorization.
6471: Collective on Mat
6473: Input Parameters:
6474: + mat - the matrix
6475: . perm - row and column permutation
6476: - info - structure containing
6477: $ levels - number of levels of fill.
6478: $ expected fill - as ratio of original fill.
6480: Output Parameter:
6481: . fact - the factored matrix
6483: Notes:
6484: Most users should employ the KSP interface for linear solvers
6485: instead of working directly with matrix algebra routines such as this.
6486: See, e.g., KSPCreate().
6488: Level: developer
6490: Concepts: matrices^symbolic incomplete Cholesky factorization
6491: Concepts: matrices^factorization
6492: Concepts: Cholsky^symbolic factorization
6494: .seealso: MatCholeskyFactorNumeric(), MatCholeskyFactor(), MatFactorInfo
6496: Developer Note: fortran interface is not autogenerated as the f90
6497: interface defintion cannot be generated correctly [due to MatFactorInfo]
6499: @*/
6500: PetscErrorCode MatICCFactorSymbolic(Mat fact,Mat mat,IS perm,const MatFactorInfo *info)
6501: {
6510: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6511: if (info->levels < 0) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Levels negative %D",(PetscInt) info->levels);
6512: if (info->fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Expected fill less than 1.0 %g",(double)info->fill);
6513: if (!(fact)->ops->iccfactorsymbolic) {
6514: const MatSolverPackage spackage;
6515: MatFactorGetSolverPackage(fact,&spackage);
6516: SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Matrix type %s symbolic ICC using solver package %s",((PetscObject)mat)->type_name,spackage);
6517: }
6518: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6519: MatCheckPreallocated(mat,2);
6521: PetscLogEventBegin(MAT_ICCFactorSymbolic,mat,perm,0,0);
6522: (fact->ops->iccfactorsymbolic)(fact,mat,perm,info);
6523: PetscLogEventEnd(MAT_ICCFactorSymbolic,mat,perm,0,0);
6524: return(0);
6525: }
6529: /*@C
6530: MatGetSubMatrices - Extracts several submatrices from a matrix. If submat
6531: points to an array of valid matrices, they may be reused to store the new
6532: submatrices.
6534: Collective on Mat
6536: Input Parameters:
6537: + mat - the matrix
6538: . n - the number of submatrixes to be extracted (on this processor, may be zero)
6539: . irow, icol - index sets of rows and columns to extract
6540: - scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
6542: Output Parameter:
6543: . submat - the array of submatrices
6545: Notes:
6546: MatGetSubMatrices() can extract ONLY sequential submatrices
6547: (from both sequential and parallel matrices). Use MatGetSubMatrix()
6548: to extract a parallel submatrix.
6550: Some matrix types place restrictions on the row and column
6551: indices, such as that they be sorted or that they be equal to each other.
6553: The index sets may not have duplicate entries.
6555: When extracting submatrices from a parallel matrix, each processor can
6556: form a different submatrix by setting the rows and columns of its
6557: individual index sets according to the local submatrix desired.
6559: When finished using the submatrices, the user should destroy
6560: them with MatDestroyMatrices().
6562: MAT_REUSE_MATRIX can only be used when the nonzero structure of the
6563: original matrix has not changed from that last call to MatGetSubMatrices().
6565: This routine creates the matrices in submat; you should NOT create them before
6566: calling it. It also allocates the array of matrix pointers submat.
6568: For BAIJ matrices the index sets must respect the block structure, that is if they
6569: request one row/column in a block, they must request all rows/columns that are in
6570: that block. For example, if the block size is 2 you cannot request just row 0 and
6571: column 0.
6573: Fortran Note:
6574: The Fortran interface is slightly different from that given below; it
6575: requires one to pass in as submat a Mat (integer) array of size at least m.
6577: Level: advanced
6579: Concepts: matrices^accessing submatrices
6580: Concepts: submatrices
6582: .seealso: MatDestroyMatrices(), MatGetSubMatrix(), MatGetRow(), MatGetDiagonal(), MatReuse
6583: @*/
6584: PetscErrorCode MatGetSubMatrices(Mat mat,PetscInt n,const IS irow[],const IS icol[],MatReuse scall,Mat *submat[])
6585: {
6587: PetscInt i;
6588: PetscBool eq;
6593: if (n) {
6598: }
6600: if (n && scall == MAT_REUSE_MATRIX) {
6603: }
6604: if (!mat->ops->getsubmatrices) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
6605: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6606: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6607: MatCheckPreallocated(mat,1);
6609: PetscLogEventBegin(MAT_GetSubMatrices,mat,0,0,0);
6610: (*mat->ops->getsubmatrices)(mat,n,irow,icol,scall,submat);
6611: PetscLogEventEnd(MAT_GetSubMatrices,mat,0,0,0);
6612: for (i=0; i<n; i++) {
6613: (*submat)[i]->factortype = MAT_FACTOR_NONE; /* in case in place factorization was previously done on submatrix */
6614: if (mat->symmetric || mat->structurally_symmetric || mat->hermitian) {
6615: ISEqual(irow[i],icol[i],&eq);
6616: if (eq) {
6617: if (mat->symmetric) {
6618: MatSetOption((*submat)[i],MAT_SYMMETRIC,PETSC_TRUE);
6619: } else if (mat->hermitian) {
6620: MatSetOption((*submat)[i],MAT_HERMITIAN,PETSC_TRUE);
6621: } else if (mat->structurally_symmetric) {
6622: MatSetOption((*submat)[i],MAT_STRUCTURALLY_SYMMETRIC,PETSC_TRUE);
6623: }
6624: }
6625: }
6626: }
6627: return(0);
6628: }
6632: PetscErrorCode MatGetSubMatricesMPI(Mat mat,PetscInt n,const IS irow[],const IS icol[],MatReuse scall,Mat *submat[])
6633: {
6635: PetscInt i;
6636: PetscBool eq;
6641: if (n) {
6646: }
6648: if (n && scall == MAT_REUSE_MATRIX) {
6651: }
6652: if (!mat->ops->getsubmatricesmpi) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
6653: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6654: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6655: MatCheckPreallocated(mat,1);
6657: PetscLogEventBegin(MAT_GetSubMatrices,mat,0,0,0);
6658: (*mat->ops->getsubmatricesmpi)(mat,n,irow,icol,scall,submat);
6659: PetscLogEventEnd(MAT_GetSubMatrices,mat,0,0,0);
6660: for (i=0; i<n; i++) {
6661: if (mat->symmetric || mat->structurally_symmetric || mat->hermitian) {
6662: ISEqual(irow[i],icol[i],&eq);
6663: if (eq) {
6664: if (mat->symmetric) {
6665: MatSetOption((*submat)[i],MAT_SYMMETRIC,PETSC_TRUE);
6666: } else if (mat->hermitian) {
6667: MatSetOption((*submat)[i],MAT_HERMITIAN,PETSC_TRUE);
6668: } else if (mat->structurally_symmetric) {
6669: MatSetOption((*submat)[i],MAT_STRUCTURALLY_SYMMETRIC,PETSC_TRUE);
6670: }
6671: }
6672: }
6673: }
6674: return(0);
6675: }
6679: /*@C
6680: MatDestroyMatrices - Destroys a set of matrices obtained with MatGetSubMatrices().
6682: Collective on Mat
6684: Input Parameters:
6685: + n - the number of local matrices
6686: - mat - the matrices (note that this is a pointer to the array of matrices, just to match the calling
6687: sequence of MatGetSubMatrices())
6689: Level: advanced
6691: Notes: Frees not only the matrices, but also the array that contains the matrices
6692: In Fortran will not free the array.
6694: .seealso: MatGetSubMatrices()
6695: @*/
6696: PetscErrorCode MatDestroyMatrices(PetscInt n,Mat *mat[])
6697: {
6699: PetscInt i;
6702: if (!*mat) return(0);
6703: if (n < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Trying to destroy negative number of matrices %D",n);
6705: for (i=0; i<n; i++) {
6706: MatDestroy(&(*mat)[i]);
6707: }
6708: /* memory is allocated even if n = 0 */
6709: PetscFree(*mat);
6710: *mat = NULL;
6711: return(0);
6712: }
6716: /*@C
6717: MatGetSeqNonzeroStructure - Extracts the sequential nonzero structure from a matrix.
6719: Collective on Mat
6721: Input Parameters:
6722: . mat - the matrix
6724: Output Parameter:
6725: . matstruct - the sequential matrix with the nonzero structure of mat
6727: Level: intermediate
6729: .seealso: MatDestroySeqNonzeroStructure(), MatGetSubMatrices(), MatDestroyMatrices()
6730: @*/
6731: PetscErrorCode MatGetSeqNonzeroStructure(Mat mat,Mat *matstruct)
6732: {
6740: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6741: MatCheckPreallocated(mat,1);
6743: if (!mat->ops->getseqnonzerostructure) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Not for matrix type %s\n",((PetscObject)mat)->type_name);
6744: PetscLogEventBegin(MAT_GetSeqNonzeroStructure,mat,0,0,0);
6745: (*mat->ops->getseqnonzerostructure)(mat,matstruct);
6746: PetscLogEventEnd(MAT_GetSeqNonzeroStructure,mat,0,0,0);
6747: return(0);
6748: }
6752: /*@C
6753: MatDestroySeqNonzeroStructure - Destroys matrix obtained with MatGetSeqNonzeroStructure().
6755: Collective on Mat
6757: Input Parameters:
6758: . mat - the matrix (note that this is a pointer to the array of matrices, just to match the calling
6759: sequence of MatGetSequentialNonzeroStructure())
6761: Level: advanced
6763: Notes: Frees not only the matrices, but also the array that contains the matrices
6765: .seealso: MatGetSeqNonzeroStructure()
6766: @*/
6767: PetscErrorCode MatDestroySeqNonzeroStructure(Mat *mat)
6768: {
6773: MatDestroy(mat);
6774: return(0);
6775: }
6779: /*@
6780: MatIncreaseOverlap - Given a set of submatrices indicated by index sets,
6781: replaces the index sets by larger ones that represent submatrices with
6782: additional overlap.
6784: Collective on Mat
6786: Input Parameters:
6787: + mat - the matrix
6788: . n - the number of index sets
6789: . is - the array of index sets (these index sets will changed during the call)
6790: - ov - the additional overlap requested
6792: Level: developer
6794: Concepts: overlap
6795: Concepts: ASM^computing overlap
6797: .seealso: MatGetSubMatrices()
6798: @*/
6799: PetscErrorCode MatIncreaseOverlap(Mat mat,PetscInt n,IS is[],PetscInt ov)
6800: {
6806: if (n < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must have one or more domains, you have %D",n);
6807: if (n) {
6810: }
6811: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6812: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6813: MatCheckPreallocated(mat,1);
6815: if (!ov) return(0);
6816: if (!mat->ops->increaseoverlap) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
6817: PetscLogEventBegin(MAT_IncreaseOverlap,mat,0,0,0);
6818: (*mat->ops->increaseoverlap)(mat,n,is,ov);
6819: PetscLogEventEnd(MAT_IncreaseOverlap,mat,0,0,0);
6820: return(0);
6821: }
6825: /*@
6826: MatGetBlockSize - Returns the matrix block size.
6828: Not Collective
6830: Input Parameter:
6831: . mat - the matrix
6833: Output Parameter:
6834: . bs - block size
6836: Notes:
6837: Block row formats are MATSEQBAIJ, MATMPIBAIJ, MATSEQSBAIJ, MATMPISBAIJ. These formats ALWAYS have square block storage in the matrix.
6839: If the block size has not been set yet this routine returns 1.
6841: Level: intermediate
6843: Concepts: matrices^block size
6845: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSizes()
6846: @*/
6847: PetscErrorCode MatGetBlockSize(Mat mat,PetscInt *bs)
6848: {
6852: *bs = PetscAbs(mat->rmap->bs);
6853: return(0);
6854: }
6858: /*@
6859: MatGetBlockSizes - Returns the matrix block row and column sizes.
6861: Not Collective
6863: Input Parameter:
6864: . mat - the matrix
6866: Output Parameter:
6867: . rbs - row block size
6868: . cbs - coumn block size
6870: Notes:
6871: Block row formats are MATSEQBAIJ, MATMPIBAIJ, MATSEQSBAIJ, MATMPISBAIJ. These formats ALWAYS have square block storage in the matrix.
6872: If you pass a different block size for the columns than the rows, the row block size determines the square block storage.
6874: If a block size has not been set yet this routine returns 1.
6876: Level: intermediate
6878: Concepts: matrices^block size
6880: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSize(), MatSetBlockSizes()
6881: @*/
6882: PetscErrorCode MatGetBlockSizes(Mat mat,PetscInt *rbs, PetscInt *cbs)
6883: {
6888: if (rbs) *rbs = PetscAbs(mat->rmap->bs);
6889: if (cbs) *cbs = PetscAbs(mat->cmap->bs);
6890: return(0);
6891: }
6895: /*@
6896: MatSetBlockSize - Sets the matrix block size.
6898: Logically Collective on Mat
6900: Input Parameters:
6901: + mat - the matrix
6902: - bs - block size
6904: Notes:
6905: Block row formats are MATSEQBAIJ, MATMPIBAIJ, MATSEQSBAIJ, MATMPISBAIJ. These formats ALWAYS have square block storage in the matrix.
6907: This must be called before MatSetUp() or MatXXXSetPreallocation() (or will default to 1) and the block size cannot be changed later
6909: Level: intermediate
6911: Concepts: matrices^block size
6913: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSizes(), MatGetBlockSizes()
6914: @*/
6915: PetscErrorCode MatSetBlockSize(Mat mat,PetscInt bs)
6916: {
6922: PetscLayoutSetBlockSize(mat->rmap,bs);
6923: PetscLayoutSetBlockSize(mat->cmap,bs);
6924: return(0);
6925: }
6929: /*@
6930: MatSetBlockSizes - Sets the matrix block row and column sizes.
6932: Logically Collective on Mat
6934: Input Parameters:
6935: + mat - the matrix
6936: - rbs - row block size
6937: - cbs - column block size
6939: Notes:
6940: Block row formats are MATSEQBAIJ, MATMPIBAIJ, MATSEQSBAIJ, MATMPISBAIJ. These formats ALWAYS have square block storage in the matrix.
6941: If you pass a different block size for the columns than the rows, the row block size determines the square block storage.
6943: This must be called before MatSetUp() or MatXXXSetPreallocation() (or will default to 1) and the block size cannot be changed later
6945: The row and column block size determine the blocksize of the "row" and "column" vectors returned by MatCreateVecs().
6947: Level: intermediate
6949: Concepts: matrices^block size
6951: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSize(), MatGetBlockSizes()
6952: @*/
6953: PetscErrorCode MatSetBlockSizes(Mat mat,PetscInt rbs,PetscInt cbs)
6954: {
6961: PetscLayoutSetBlockSize(mat->rmap,rbs);
6962: PetscLayoutSetBlockSize(mat->cmap,cbs);
6963: return(0);
6964: }
6968: /*@
6969: MatSetBlockSizesFromMats - Sets the matrix block row and column sizes to match a pair of matrices
6971: Logically Collective on Mat
6973: Input Parameters:
6974: + mat - the matrix
6975: . fromRow - matrix from which to copy row block size
6976: - fromCol - matrix from which to copy column block size (can be same as fromRow)
6978: Level: developer
6980: Concepts: matrices^block size
6982: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSizes()
6983: @*/
6984: PetscErrorCode MatSetBlockSizesFromMats(Mat mat,Mat fromRow,Mat fromCol)
6985: {
6992: if (fromRow->rmap->bs > 0) {PetscLayoutSetBlockSize(mat->rmap,fromRow->rmap->bs);}
6993: if (fromCol->cmap->bs > 0) {PetscLayoutSetBlockSize(mat->cmap,fromCol->cmap->bs);}
6994: return(0);
6995: }
6999: /*@
7000: MatResidual - Default routine to calculate the residual.
7002: Collective on Mat and Vec
7004: Input Parameters:
7005: + mat - the matrix
7006: . b - the right-hand-side
7007: - x - the approximate solution
7009: Output Parameter:
7010: . r - location to store the residual
7012: Level: developer
7014: .keywords: MG, default, multigrid, residual
7016: .seealso: PCMGSetResidual()
7017: @*/
7018: PetscErrorCode MatResidual(Mat mat,Vec b,Vec x,Vec r)
7019: {
7028: MatCheckPreallocated(mat,1);
7029: PetscLogEventBegin(MAT_Residual,mat,0,0,0);
7030: if (!mat->ops->residual) {
7031: MatMult(mat,x,r);
7032: VecAYPX(r,-1.0,b);
7033: } else {
7034: (*mat->ops->residual)(mat,b,x,r);
7035: }
7036: PetscLogEventEnd(MAT_Residual,mat,0,0,0);
7037: return(0);
7038: }
7042: /*@C
7043: MatGetRowIJ - Returns the compressed row storage i and j indices for sequential matrices.
7045: Collective on Mat
7047: Input Parameters:
7048: + mat - the matrix
7049: . shift - 0 or 1 indicating we want the indices starting at 0 or 1
7050: . symmetric - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be symmetrized
7051: - inodecompressed - PETSC_TRUE or PETSC_FALSE indicating if the nonzero structure of the
7052: inodes or the nonzero elements is wanted. For BAIJ matrices the compressed version is
7053: always used.
7055: Output Parameters:
7056: + n - number of rows in the (possibly compressed) matrix
7057: . ia - the row pointers [of length n+1]
7058: . ja - the column indices
7059: - done - indicates if the routine actually worked and returned appropriate ia[] and ja[] arrays; callers
7060: are responsible for handling the case when done == PETSC_FALSE and ia and ja are not set
7062: Level: developer
7064: Notes: You CANNOT change any of the ia[] or ja[] values.
7066: Use MatRestoreRowIJ() when you are finished accessing the ia[] and ja[] values
7068: Fortran Node
7070: In Fortran use
7071: $ PetscInt ia(1), ja(1)
7072: $ PetscOffset iia, jja
7073: $ call MatGetRowIJ(mat,shift,symmetric,inodecompressed,n,ia,iia,ja,jja,done,ierr)
7074: $
7075: $ or
7076: $
7077: $ PetscScalar, pointer :: xx_v(:)
7078: $ call MatGetRowIJF90(mat,shift,symmetric,inodecompressed,n,ia,ja,done,ierr)
7081: Acess the ith and jth entries via ia(iia + i) and ja(jja + j)
7083: .seealso: MatGetColumnIJ(), MatRestoreRowIJ(), MatSeqAIJGetArray()
7084: @*/
7085: PetscErrorCode MatGetRowIJ(Mat mat,PetscInt shift,PetscBool symmetric,PetscBool inodecompressed,PetscInt *n,const PetscInt *ia[],const PetscInt *ja[],PetscBool *done)
7086: {
7096: MatCheckPreallocated(mat,1);
7097: if (!mat->ops->getrowij) *done = PETSC_FALSE;
7098: else {
7099: *done = PETSC_TRUE;
7100: PetscLogEventBegin(MAT_GetRowIJ,mat,0,0,0);
7101: (*mat->ops->getrowij)(mat,shift,symmetric,inodecompressed,n,ia,ja,done);
7102: PetscLogEventEnd(MAT_GetRowIJ,mat,0,0,0);
7103: }
7104: return(0);
7105: }
7109: /*@C
7110: MatGetColumnIJ - Returns the compressed column storage i and j indices for sequential matrices.
7112: Collective on Mat
7114: Input Parameters:
7115: + mat - the matrix
7116: . shift - 1 or zero indicating we want the indices starting at 0 or 1
7117: . symmetric - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be
7118: symmetrized
7119: . inodecompressed - PETSC_TRUE or PETSC_FALSE indicating if the nonzero structure of the
7120: inodes or the nonzero elements is wanted. For BAIJ matrices the compressed version is
7121: always used.
7122: . n - number of columns in the (possibly compressed) matrix
7123: . ia - the column pointers
7124: - ja - the row indices
7126: Output Parameters:
7127: . done - PETSC_TRUE or PETSC_FALSE, indicating whether the values have been returned
7129: Note:
7130: This routine zeros out n, ia, and ja. This is to prevent accidental
7131: us of the array after it has been restored. If you pass NULL, it will
7132: not zero the pointers. Use of ia or ja after MatRestoreColumnIJ() is invalid.
7134: Level: developer
7136: .seealso: MatGetRowIJ(), MatRestoreColumnIJ()
7137: @*/
7138: PetscErrorCode MatGetColumnIJ(Mat mat,PetscInt shift,PetscBool symmetric,PetscBool inodecompressed,PetscInt *n,const PetscInt *ia[],const PetscInt *ja[],PetscBool *done)
7139: {
7149: MatCheckPreallocated(mat,1);
7150: if (!mat->ops->getcolumnij) *done = PETSC_FALSE;
7151: else {
7152: *done = PETSC_TRUE;
7153: (*mat->ops->getcolumnij)(mat,shift,symmetric,inodecompressed,n,ia,ja,done);
7154: }
7155: return(0);
7156: }
7160: /*@C
7161: MatRestoreRowIJ - Call after you are completed with the ia,ja indices obtained with
7162: MatGetRowIJ().
7164: Collective on Mat
7166: Input Parameters:
7167: + mat - the matrix
7168: . shift - 1 or zero indicating we want the indices starting at 0 or 1
7169: . symmetric - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be
7170: symmetrized
7171: . inodecompressed - PETSC_TRUE or PETSC_FALSE indicating if the nonzero structure of the
7172: inodes or the nonzero elements is wanted. For BAIJ matrices the compressed version is
7173: always used.
7174: . n - size of (possibly compressed) matrix
7175: . ia - the row pointers
7176: - ja - the column indices
7178: Output Parameters:
7179: . done - PETSC_TRUE or PETSC_FALSE indicated that the values have been returned
7181: Note:
7182: This routine zeros out n, ia, and ja. This is to prevent accidental
7183: us of the array after it has been restored. If you pass NULL, it will
7184: not zero the pointers. Use of ia or ja after MatRestoreRowIJ() is invalid.
7186: Level: developer
7188: .seealso: MatGetRowIJ(), MatRestoreColumnIJ()
7189: @*/
7190: PetscErrorCode MatRestoreRowIJ(Mat mat,PetscInt shift,PetscBool symmetric,PetscBool inodecompressed,PetscInt *n,const PetscInt *ia[],const PetscInt *ja[],PetscBool *done)
7191: {
7200: MatCheckPreallocated(mat,1);
7202: if (!mat->ops->restorerowij) *done = PETSC_FALSE;
7203: else {
7204: *done = PETSC_TRUE;
7205: (*mat->ops->restorerowij)(mat,shift,symmetric,inodecompressed,n,ia,ja,done);
7206: if (n) *n = 0;
7207: if (ia) *ia = NULL;
7208: if (ja) *ja = NULL;
7209: }
7210: return(0);
7211: }
7215: /*@C
7216: MatRestoreColumnIJ - Call after you are completed with the ia,ja indices obtained with
7217: MatGetColumnIJ().
7219: Collective on Mat
7221: Input Parameters:
7222: + mat - the matrix
7223: . shift - 1 or zero indicating we want the indices starting at 0 or 1
7224: - symmetric - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be
7225: symmetrized
7226: - inodecompressed - PETSC_TRUE or PETSC_FALSE indicating if the nonzero structure of the
7227: inodes or the nonzero elements is wanted. For BAIJ matrices the compressed version is
7228: always used.
7230: Output Parameters:
7231: + n - size of (possibly compressed) matrix
7232: . ia - the column pointers
7233: . ja - the row indices
7234: - done - PETSC_TRUE or PETSC_FALSE indicated that the values have been returned
7236: Level: developer
7238: .seealso: MatGetColumnIJ(), MatRestoreRowIJ()
7239: @*/
7240: PetscErrorCode MatRestoreColumnIJ(Mat mat,PetscInt shift,PetscBool symmetric,PetscBool inodecompressed,PetscInt *n,const PetscInt *ia[],const PetscInt *ja[],PetscBool *done)
7241: {
7250: MatCheckPreallocated(mat,1);
7252: if (!mat->ops->restorecolumnij) *done = PETSC_FALSE;
7253: else {
7254: *done = PETSC_TRUE;
7255: (*mat->ops->restorecolumnij)(mat,shift,symmetric,inodecompressed,n,ia,ja,done);
7256: if (n) *n = 0;
7257: if (ia) *ia = NULL;
7258: if (ja) *ja = NULL;
7259: }
7260: return(0);
7261: }
7265: /*@C
7266: MatColoringPatch -Used inside matrix coloring routines that
7267: use MatGetRowIJ() and/or MatGetColumnIJ().
7269: Collective on Mat
7271: Input Parameters:
7272: + mat - the matrix
7273: . ncolors - max color value
7274: . n - number of entries in colorarray
7275: - colorarray - array indicating color for each column
7277: Output Parameters:
7278: . iscoloring - coloring generated using colorarray information
7280: Level: developer
7282: .seealso: MatGetRowIJ(), MatGetColumnIJ()
7284: @*/
7285: PetscErrorCode MatColoringPatch(Mat mat,PetscInt ncolors,PetscInt n,ISColoringValue colorarray[],ISColoring *iscoloring)
7286: {
7294: MatCheckPreallocated(mat,1);
7296: if (!mat->ops->coloringpatch) {
7297: ISColoringCreate(PetscObjectComm((PetscObject)mat),ncolors,n,colorarray,PETSC_OWN_POINTER,iscoloring);
7298: } else {
7299: (*mat->ops->coloringpatch)(mat,ncolors,n,colorarray,iscoloring);
7300: }
7301: return(0);
7302: }
7307: /*@
7308: MatSetUnfactored - Resets a factored matrix to be treated as unfactored.
7310: Logically Collective on Mat
7312: Input Parameter:
7313: . mat - the factored matrix to be reset
7315: Notes:
7316: This routine should be used only with factored matrices formed by in-place
7317: factorization via ILU(0) (or by in-place LU factorization for the MATSEQDENSE
7318: format). This option can save memory, for example, when solving nonlinear
7319: systems with a matrix-free Newton-Krylov method and a matrix-based, in-place
7320: ILU(0) preconditioner.
7322: Note that one can specify in-place ILU(0) factorization by calling
7323: .vb
7324: PCType(pc,PCILU);
7325: PCFactorSeUseInPlace(pc);
7326: .ve
7327: or by using the options -pc_type ilu -pc_factor_in_place
7329: In-place factorization ILU(0) can also be used as a local
7330: solver for the blocks within the block Jacobi or additive Schwarz
7331: methods (runtime option: -sub_pc_factor_in_place). See Users-Manual: ch_pc
7332: for details on setting local solver options.
7334: Most users should employ the simplified KSP interface for linear solvers
7335: instead of working directly with matrix algebra routines such as this.
7336: See, e.g., KSPCreate().
7338: Level: developer
7340: .seealso: PCFactorSetUseInPlace(), PCFactorGetUseInPlace()
7342: Concepts: matrices^unfactored
7344: @*/
7345: PetscErrorCode MatSetUnfactored(Mat mat)
7346: {
7352: MatCheckPreallocated(mat,1);
7353: mat->factortype = MAT_FACTOR_NONE;
7354: if (!mat->ops->setunfactored) return(0);
7355: (*mat->ops->setunfactored)(mat);
7356: return(0);
7357: }
7359: /*MC
7360: MatDenseGetArrayF90 - Accesses a matrix array from Fortran90.
7362: Synopsis:
7363: MatDenseGetArrayF90(Mat x,{Scalar, pointer :: xx_v(:,:)},integer ierr)
7365: Not collective
7367: Input Parameter:
7368: . x - matrix
7370: Output Parameters:
7371: + xx_v - the Fortran90 pointer to the array
7372: - ierr - error code
7374: Example of Usage:
7375: .vb
7376: PetscScalar, pointer xx_v(:,:)
7377: ....
7378: call MatDenseGetArrayF90(x,xx_v,ierr)
7379: a = xx_v(3)
7380: call MatDenseRestoreArrayF90(x,xx_v,ierr)
7381: .ve
7383: Level: advanced
7385: .seealso: MatDenseRestoreArrayF90(), MatDenseGetArray(), MatDenseRestoreArray(), MatSeqAIJGetArrayF90()
7387: Concepts: matrices^accessing array
7389: M*/
7391: /*MC
7392: MatDenseRestoreArrayF90 - Restores a matrix array that has been
7393: accessed with MatDenseGetArrayF90().
7395: Synopsis:
7396: MatDenseRestoreArrayF90(Mat x,{Scalar, pointer :: xx_v(:)},integer ierr)
7398: Not collective
7400: Input Parameters:
7401: + x - matrix
7402: - xx_v - the Fortran90 pointer to the array
7404: Output Parameter:
7405: . ierr - error code
7407: Example of Usage:
7408: .vb
7409: PetscScalar, pointer xx_v(:)
7410: ....
7411: call MatDenseGetArrayF90(x,xx_v,ierr)
7412: a = xx_v(3)
7413: call MatDenseRestoreArrayF90(x,xx_v,ierr)
7414: .ve
7416: Level: advanced
7418: .seealso: MatDenseGetArrayF90(), MatDenseGetArray(), MatDenseRestoreArray(), MatSeqAIJRestoreArrayF90()
7420: M*/
7423: /*MC
7424: MatSeqAIJGetArrayF90 - Accesses a matrix array from Fortran90.
7426: Synopsis:
7427: MatSeqAIJGetArrayF90(Mat x,{Scalar, pointer :: xx_v(:,:)},integer ierr)
7429: Not collective
7431: Input Parameter:
7432: . x - matrix
7434: Output Parameters:
7435: + xx_v - the Fortran90 pointer to the array
7436: - ierr - error code
7438: Example of Usage:
7439: .vb
7440: PetscScalar, pointer xx_v(:,:)
7441: ....
7442: call MatSeqAIJGetArrayF90(x,xx_v,ierr)
7443: a = xx_v(3)
7444: call MatSeqAIJRestoreArrayF90(x,xx_v,ierr)
7445: .ve
7447: Level: advanced
7449: .seealso: MatSeqAIJRestoreArrayF90(), MatSeqAIJGetArray(), MatSeqAIJRestoreArray(), MatDenseGetArrayF90()
7451: Concepts: matrices^accessing array
7453: M*/
7455: /*MC
7456: MatSeqAIJRestoreArrayF90 - Restores a matrix array that has been
7457: accessed with MatSeqAIJGetArrayF90().
7459: Synopsis:
7460: MatSeqAIJRestoreArrayF90(Mat x,{Scalar, pointer :: xx_v(:)},integer ierr)
7462: Not collective
7464: Input Parameters:
7465: + x - matrix
7466: - xx_v - the Fortran90 pointer to the array
7468: Output Parameter:
7469: . ierr - error code
7471: Example of Usage:
7472: .vb
7473: PetscScalar, pointer xx_v(:)
7474: ....
7475: call MatSeqAIJGetArrayF90(x,xx_v,ierr)
7476: a = xx_v(3)
7477: call MatSeqAIJRestoreArrayF90(x,xx_v,ierr)
7478: .ve
7480: Level: advanced
7482: .seealso: MatSeqAIJGetArrayF90(), MatSeqAIJGetArray(), MatSeqAIJRestoreArray(), MatDenseRestoreArrayF90()
7484: M*/
7489: /*@
7490: MatGetSubMatrix - Gets a single submatrix on the same number of processors
7491: as the original matrix.
7493: Collective on Mat
7495: Input Parameters:
7496: + mat - the original matrix
7497: . isrow - parallel IS containing the rows this processor should obtain
7498: . iscol - parallel IS containing all columns you wish to keep. Each process should list the columns that will be in IT's "diagonal part" in the new matrix.
7499: - cll - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
7501: Output Parameter:
7502: . newmat - the new submatrix, of the same type as the old
7504: Level: advanced
7506: Notes:
7507: The submatrix will be able to be multiplied with vectors using the same layout as iscol.
7509: Some matrix types place restrictions on the row and column indices, such
7510: as that they be sorted or that they be equal to each other.
7512: The index sets may not have duplicate entries.
7514: The first time this is called you should use a cll of MAT_INITIAL_MATRIX,
7515: the MatGetSubMatrix() routine will create the newmat for you. Any additional calls
7516: to this routine with a mat of the same nonzero structure and with a call of MAT_REUSE_MATRIX
7517: will reuse the matrix generated the first time. You should call MatDestroy() on newmat when
7518: you are finished using it.
7520: The communicator of the newly obtained matrix is ALWAYS the same as the communicator of
7521: the input matrix.
7523: If iscol is NULL then all columns are obtained (not supported in Fortran).
7525: Example usage:
7526: Consider the following 8x8 matrix with 34 non-zero values, that is
7527: assembled across 3 processors. Let's assume that proc0 owns 3 rows,
7528: proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
7529: as follows:
7531: .vb
7532: 1 2 0 | 0 3 0 | 0 4
7533: Proc0 0 5 6 | 7 0 0 | 8 0
7534: 9 0 10 | 11 0 0 | 12 0
7535: -------------------------------------
7536: 13 0 14 | 15 16 17 | 0 0
7537: Proc1 0 18 0 | 19 20 21 | 0 0
7538: 0 0 0 | 22 23 0 | 24 0
7539: -------------------------------------
7540: Proc2 25 26 27 | 0 0 28 | 29 0
7541: 30 0 0 | 31 32 33 | 0 34
7542: .ve
7544: Suppose isrow = [0 1 | 4 | 6 7] and iscol = [1 2 | 3 4 5 | 6]. The resulting submatrix is
7546: .vb
7547: 2 0 | 0 3 0 | 0
7548: Proc0 5 6 | 7 0 0 | 8
7549: -------------------------------
7550: Proc1 18 0 | 19 20 21 | 0
7551: -------------------------------
7552: Proc2 26 27 | 0 0 28 | 29
7553: 0 0 | 31 32 33 | 0
7554: .ve
7557: Concepts: matrices^submatrices
7559: .seealso: MatGetSubMatrices()
7560: @*/
7561: PetscErrorCode MatGetSubMatrix(Mat mat,IS isrow,IS iscol,MatReuse cll,Mat *newmat)
7562: {
7564: PetscMPIInt size;
7565: Mat *local;
7566: IS iscoltmp;
7575: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
7576: if (cll == MAT_IGNORE_MATRIX) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Cannot use MAT_IGNORE_MATRIX");
7578: MatCheckPreallocated(mat,1);
7579: MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);
7581: if (!iscol || isrow == iscol) {
7582: PetscBool stride;
7583: PetscMPIInt grabentirematrix = 0,grab;
7584: PetscObjectTypeCompare((PetscObject)isrow,ISSTRIDE,&stride);
7585: if (stride) {
7586: PetscInt first,step,n,rstart,rend;
7587: ISStrideGetInfo(isrow,&first,&step);
7588: if (step == 1) {
7589: MatGetOwnershipRange(mat,&rstart,&rend);
7590: if (rstart == first) {
7591: ISGetLocalSize(isrow,&n);
7592: if (n == rend-rstart) {
7593: grabentirematrix = 1;
7594: }
7595: }
7596: }
7597: }
7598: MPI_Allreduce(&grabentirematrix,&grab,1,MPI_INT,MPI_MIN,PetscObjectComm((PetscObject)mat));
7599: if (grab) {
7600: PetscInfo(mat,"Getting entire matrix as submatrix\n");
7601: if (cll == MAT_INITIAL_MATRIX) {
7602: *newmat = mat;
7603: PetscObjectReference((PetscObject)mat);
7604: }
7605: return(0);
7606: }
7607: }
7609: if (!iscol) {
7610: ISCreateStride(PetscObjectComm((PetscObject)mat),mat->cmap->n,mat->cmap->rstart,1,&iscoltmp);
7611: } else {
7612: iscoltmp = iscol;
7613: }
7615: /* if original matrix is on just one processor then use submatrix generated */
7616: if (mat->ops->getsubmatrices && !mat->ops->getsubmatrix && size == 1 && cll == MAT_REUSE_MATRIX) {
7617: MatGetSubMatrices(mat,1,&isrow,&iscoltmp,MAT_REUSE_MATRIX,&newmat);
7618: if (!iscol) {ISDestroy(&iscoltmp);}
7619: return(0);
7620: } else if (mat->ops->getsubmatrices && !mat->ops->getsubmatrix && size == 1) {
7621: MatGetSubMatrices(mat,1,&isrow,&iscoltmp,MAT_INITIAL_MATRIX,&local);
7622: *newmat = *local;
7623: PetscFree(local);
7624: if (!iscol) {ISDestroy(&iscoltmp);}
7625: return(0);
7626: } else if (!mat->ops->getsubmatrix) {
7627: /* Create a new matrix type that implements the operation using the full matrix */
7628: PetscLogEventBegin(MAT_GetSubMatrix,mat,0,0,0);
7629: switch (cll) {
7630: case MAT_INITIAL_MATRIX:
7631: MatCreateSubMatrix(mat,isrow,iscoltmp,newmat);
7632: break;
7633: case MAT_REUSE_MATRIX:
7634: MatSubMatrixUpdate(*newmat,mat,isrow,iscoltmp);
7635: break;
7636: default: SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Invalid MatReuse, must be either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX");
7637: }
7638: PetscLogEventEnd(MAT_GetSubMatrix,mat,0,0,0);
7639: if (!iscol) {ISDestroy(&iscoltmp);}
7640: return(0);
7641: }
7643: if (!mat->ops->getsubmatrix) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
7644: PetscLogEventBegin(MAT_GetSubMatrix,mat,0,0,0);
7645: (*mat->ops->getsubmatrix)(mat,isrow,iscoltmp,cll,newmat);
7646: PetscLogEventEnd(MAT_GetSubMatrix,mat,0,0,0);
7647: if (!iscol) {ISDestroy(&iscoltmp);}
7648: if (*newmat && cll == MAT_INITIAL_MATRIX) {PetscObjectStateIncrease((PetscObject)*newmat);}
7649: return(0);
7650: }
7654: /*@
7655: MatStashSetInitialSize - sets the sizes of the matrix stash, that is
7656: used during the assembly process to store values that belong to
7657: other processors.
7659: Not Collective
7661: Input Parameters:
7662: + mat - the matrix
7663: . size - the initial size of the stash.
7664: - bsize - the initial size of the block-stash(if used).
7666: Options Database Keys:
7667: + -matstash_initial_size <size> or <size0,size1,...sizep-1>
7668: - -matstash_block_initial_size <bsize> or <bsize0,bsize1,...bsizep-1>
7670: Level: intermediate
7672: Notes:
7673: The block-stash is used for values set with MatSetValuesBlocked() while
7674: the stash is used for values set with MatSetValues()
7676: Run with the option -info and look for output of the form
7677: MatAssemblyBegin_MPIXXX:Stash has MM entries, uses nn mallocs.
7678: to determine the appropriate value, MM, to use for size and
7679: MatAssemblyBegin_MPIXXX:Block-Stash has BMM entries, uses nn mallocs.
7680: to determine the value, BMM to use for bsize
7682: Concepts: stash^setting matrix size
7683: Concepts: matrices^stash
7685: .seealso: MatAssemblyBegin(), MatAssemblyEnd(), Mat, MatStashGetInfo()
7687: @*/
7688: PetscErrorCode MatStashSetInitialSize(Mat mat,PetscInt size, PetscInt bsize)
7689: {
7695: MatStashSetInitialSize_Private(&mat->stash,size);
7696: MatStashSetInitialSize_Private(&mat->bstash,bsize);
7697: return(0);
7698: }
7702: /*@
7703: MatInterpolateAdd - w = y + A*x or A'*x depending on the shape of
7704: the matrix
7706: Neighbor-wise Collective on Mat
7708: Input Parameters:
7709: + mat - the matrix
7710: . x,y - the vectors
7711: - w - where the result is stored
7713: Level: intermediate
7715: Notes:
7716: w may be the same vector as y.
7718: This allows one to use either the restriction or interpolation (its transpose)
7719: matrix to do the interpolation
7721: Concepts: interpolation
7723: .seealso: MatMultAdd(), MatMultTransposeAdd(), MatRestrict()
7725: @*/
7726: PetscErrorCode MatInterpolateAdd(Mat A,Vec x,Vec y,Vec w)
7727: {
7729: PetscInt M,N,Ny;
7737: MatCheckPreallocated(A,1);
7738: MatGetSize(A,&M,&N);
7739: VecGetSize(y,&Ny);
7740: if (M == Ny) {
7741: MatMultAdd(A,x,y,w);
7742: } else {
7743: MatMultTransposeAdd(A,x,y,w);
7744: }
7745: return(0);
7746: }
7750: /*@
7751: MatInterpolate - y = A*x or A'*x depending on the shape of
7752: the matrix
7754: Neighbor-wise Collective on Mat
7756: Input Parameters:
7757: + mat - the matrix
7758: - x,y - the vectors
7760: Level: intermediate
7762: Notes:
7763: This allows one to use either the restriction or interpolation (its transpose)
7764: matrix to do the interpolation
7766: Concepts: matrices^interpolation
7768: .seealso: MatMultAdd(), MatMultTransposeAdd(), MatRestrict()
7770: @*/
7771: PetscErrorCode MatInterpolate(Mat A,Vec x,Vec y)
7772: {
7774: PetscInt M,N,Ny;
7781: MatCheckPreallocated(A,1);
7782: MatGetSize(A,&M,&N);
7783: VecGetSize(y,&Ny);
7784: if (M == Ny) {
7785: MatMult(A,x,y);
7786: } else {
7787: MatMultTranspose(A,x,y);
7788: }
7789: return(0);
7790: }
7794: /*@
7795: MatRestrict - y = A*x or A'*x
7797: Neighbor-wise Collective on Mat
7799: Input Parameters:
7800: + mat - the matrix
7801: - x,y - the vectors
7803: Level: intermediate
7805: Notes:
7806: This allows one to use either the restriction or interpolation (its transpose)
7807: matrix to do the restriction
7809: Concepts: matrices^restriction
7811: .seealso: MatMultAdd(), MatMultTransposeAdd(), MatInterpolate()
7813: @*/
7814: PetscErrorCode MatRestrict(Mat A,Vec x,Vec y)
7815: {
7817: PetscInt M,N,Ny;
7824: MatCheckPreallocated(A,1);
7826: MatGetSize(A,&M,&N);
7827: VecGetSize(y,&Ny);
7828: if (M == Ny) {
7829: MatMult(A,x,y);
7830: } else {
7831: MatMultTranspose(A,x,y);
7832: }
7833: return(0);
7834: }
7838: /*@
7839: MatGetNullSpace - retrieves the null space to a matrix.
7841: Logically Collective on Mat and MatNullSpace
7843: Input Parameters:
7844: + mat - the matrix
7845: - nullsp - the null space object
7847: Level: developer
7849: Notes:
7850: This null space is used by solvers. Overwrites any previous null space that may have been attached
7852: Concepts: null space^attaching to matrix
7854: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNearNullSpace()
7855: @*/
7856: PetscErrorCode MatGetNullSpace(Mat mat, MatNullSpace *nullsp)
7857: {
7862: *nullsp = mat->nullsp;
7863: return(0);
7864: }
7868: /*@
7869: MatSetNullSpace - attaches a null space to a matrix.
7871: Logically Collective on Mat and MatNullSpace
7873: Input Parameters:
7874: + mat - the matrix
7875: - nullsp - the null space object
7877: Level: advanced
7879: Notes:
7880: This null space is used by the linear solvers. Overwrites any previous null space that may have been attached
7882: For inconsistent singular systems (linear systems where the right hand side is not in the range of the operator) you also likely should
7883: call MatSetTransposeNullSpace(). This allows the linear system to be solved in a least squares sense.
7886: The fundamental theorem of linear algebra (Gilbert Strang, Introduction to Applied Mathematics, page 72) states that
7887: the domain of a matrix A (from R^n to R^m (m rows, n columns) R^n = the direct sum of the null space of A, n(A), + the range of A^T, R(A^T).
7888: Similarly R^m = direct sum n(A^T) + R(A). Hence the linear system A x = b has a solution only if b in R(A) (or correspondingly b is orthogonal to
7889: n(A^T)) and if x is a solution then x + alpha n(A) is a solution for any alpha. The minimum norm solution is orthogonal to n(A). For problems without a solution
7890: the solution that minimizes the norm of the residual (the least squares solution) can be obtained by solving A x = \hat{b} where \hat{b} is b orthogonalized to the n(A^T).
7892: Krylov solvers can produce the minimal norm solution to the least squares problem by utilizing MatNullSpaceRemove().
7894: Concepts: null space^attaching to matrix
7896: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNearNullSpace(), MatGetNullSpace(), MatSetTransposeNullSpace(), MatGetTransposeNullSpace(), MatNullSpaceRemove()
7897: @*/
7898: PetscErrorCode MatSetNullSpace(Mat mat,MatNullSpace nullsp)
7899: {
7906: MatCheckPreallocated(mat,1);
7907: PetscObjectReference((PetscObject)nullsp);
7908: MatNullSpaceDestroy(&mat->nullsp);
7909: mat->nullsp = nullsp;
7910: return(0);
7911: }
7915: /*@
7916: MatGetTransposeNullSpace - retrieves the null space to a matrix.
7918: Logically Collective on Mat and MatNullSpace
7920: Input Parameters:
7921: + mat - the matrix
7922: - nullsp - the null space object
7924: Level: developer
7926: Notes:
7927: This null space is used by solvers. Overwrites any previous null space that may have been attached
7929: Concepts: null space^attaching to matrix
7931: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNearNullSpace()
7932: @*/
7933: PetscErrorCode MatGetTransposeNullSpace(Mat mat, MatNullSpace *nullsp)
7934: {
7939: *nullsp = mat->transnullsp;
7940: return(0);
7941: }
7945: /*@
7946: MatSetTransposeNullSpace - attaches a null space to a matrix.
7948: Logically Collective on Mat and MatNullSpace
7950: Input Parameters:
7951: + mat - the matrix
7952: - nullsp - the null space object
7954: Level: advanced
7956: Notes:
7957: For inconsistent singular systems (linear systems where the right hand side is not in the range of the operator) this allows the linear system to be solved in a least squares sense.
7958: You must also call MatSetNullSpace()
7961: The fundamental theorem of linear algebra (Gilbert Strang, Introduction to Applied Mathematics, page 72) states that
7962: the domain of a matrix A (from R^n to R^m (m rows, n columns) R^n = the direct sum of the null space of A, n(A), + the range of A^T, R(A^T).
7963: Similarly R^m = direct sum n(A^T) + R(A). Hence the linear system A x = b has a solution only if b in R(A) (or correspondingly b is orthogonal to
7964: n(A^T)) and if x is a solution then x + alpha n(A) is a solution for any alpha. The minimum norm solution is orthogonal to n(A). For problems without a solution
7965: the solution that minimizes the norm of the residual (the least squares solution) can be obtained by solving A x = \hat{b} where \hat{b} is b orthogonalized to the n(A^T).
7967: Krylov solvers can produce the minimal norm solution to the least squares problem by utilizing MatNullSpaceRemove().
7969: Concepts: null space^attaching to matrix
7971: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNearNullSpace(), MatGetNullSpace(), MatSetNullSpace(), MatGetNullSpace(), MatNullSpaceRemove()
7972: @*/
7973: PetscErrorCode MatSetTransposeNullSpace(Mat mat,MatNullSpace nullsp)
7974: {
7981: MatCheckPreallocated(mat,1);
7982: PetscObjectReference((PetscObject)nullsp);
7983: MatNullSpaceDestroy(&mat->transnullsp);
7984: mat->transnullsp = nullsp;
7985: return(0);
7986: }
7990: /*@
7991: MatSetNearNullSpace - attaches a null space to a matrix.
7992: This null space will be used to provide near null space vectors to a multigrid preconditioner built from this matrix.
7994: Logically Collective on Mat and MatNullSpace
7996: Input Parameters:
7997: + mat - the matrix
7998: - nullsp - the null space object
8000: Level: advanced
8002: Notes:
8003: Overwrites any previous near null space that may have been attached
8005: Concepts: null space^attaching to matrix
8007: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNullSpace()
8008: @*/
8009: PetscErrorCode MatSetNearNullSpace(Mat mat,MatNullSpace nullsp)
8010: {
8017: MatCheckPreallocated(mat,1);
8018: PetscObjectReference((PetscObject)nullsp);
8019: MatNullSpaceDestroy(&mat->nearnullsp);
8021: mat->nearnullsp = nullsp;
8022: return(0);
8023: }
8027: /*@
8028: MatGetNearNullSpace -Get null space attached with MatSetNearNullSpace()
8030: Not Collective
8032: Input Parameters:
8033: . mat - the matrix
8035: Output Parameters:
8036: . nullsp - the null space object, NULL if not set
8038: Level: developer
8040: Concepts: null space^attaching to matrix
8042: .seealso: MatSetNearNullSpace(), MatGetNullSpace()
8043: @*/
8044: PetscErrorCode MatGetNearNullSpace(Mat mat,MatNullSpace *nullsp)
8045: {
8050: MatCheckPreallocated(mat,1);
8051: *nullsp = mat->nearnullsp;
8052: return(0);
8053: }
8057: /*@C
8058: MatICCFactor - Performs in-place incomplete Cholesky factorization of matrix.
8060: Collective on Mat
8062: Input Parameters:
8063: + mat - the matrix
8064: . row - row/column permutation
8065: . fill - expected fill factor >= 1.0
8066: - level - level of fill, for ICC(k)
8068: Notes:
8069: Probably really in-place only when level of fill is zero, otherwise allocates
8070: new space to store factored matrix and deletes previous memory.
8072: Most users should employ the simplified KSP interface for linear solvers
8073: instead of working directly with matrix algebra routines such as this.
8074: See, e.g., KSPCreate().
8076: Level: developer
8078: Concepts: matrices^incomplete Cholesky factorization
8079: Concepts: Cholesky factorization
8081: .seealso: MatICCFactorSymbolic(), MatLUFactorNumeric(), MatCholeskyFactor()
8083: Developer Note: fortran interface is not autogenerated as the f90
8084: interface defintion cannot be generated correctly [due to MatFactorInfo]
8086: @*/
8087: PetscErrorCode MatICCFactor(Mat mat,IS row,const MatFactorInfo *info)
8088: {
8096: if (mat->rmap->N != mat->cmap->N) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONG,"matrix must be square");
8097: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8098: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8099: if (!mat->ops->iccfactor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
8100: MatCheckPreallocated(mat,1);
8101: (*mat->ops->iccfactor)(mat,row,info);
8102: PetscObjectStateIncrease((PetscObject)mat);
8103: return(0);
8104: }
8108: /*@
8109: MatSetValuesAdifor - Sets values computed with automatic differentiation into a matrix.
8111: Not Collective
8113: Input Parameters:
8114: + mat - the matrix
8115: . nl - leading dimension of v
8116: - v - the values compute with ADIFOR
8118: Level: developer
8120: Notes:
8121: Must call MatSetColoring() before using this routine. Also this matrix must already
8122: have its nonzero pattern determined.
8124: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
8125: MatSetValues(), MatSetColoring()
8126: @*/
8127: PetscErrorCode MatSetValuesAdifor(Mat mat,PetscInt nl,void *v)
8128: {
8136: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Matrix must be already assembled");
8137: PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
8138: if (!mat->ops->setvaluesadifor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
8139: (*mat->ops->setvaluesadifor)(mat,nl,v);
8140: PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
8141: PetscObjectStateIncrease((PetscObject)mat);
8142: return(0);
8143: }
8147: /*@
8148: MatDiagonalScaleLocal - Scales columns of a matrix given the scaling values including the
8149: ghosted ones.
8151: Not Collective
8153: Input Parameters:
8154: + mat - the matrix
8155: - diag = the diagonal values, including ghost ones
8157: Level: developer
8159: Notes: Works only for MPIAIJ and MPIBAIJ matrices
8161: .seealso: MatDiagonalScale()
8162: @*/
8163: PetscErrorCode MatDiagonalScaleLocal(Mat mat,Vec diag)
8164: {
8166: PetscMPIInt size;
8173: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Matrix must be already assembled");
8174: PetscLogEventBegin(MAT_Scale,mat,0,0,0);
8175: MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);
8176: if (size == 1) {
8177: PetscInt n,m;
8178: VecGetSize(diag,&n);
8179: MatGetSize(mat,0,&m);
8180: if (m == n) {
8181: MatDiagonalScale(mat,0,diag);
8182: } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only supported for sequential matrices when no ghost points/periodic conditions");
8183: } else {
8184: PetscUseMethod(mat,"MatDiagonalScaleLocal_C",(Mat,Vec),(mat,diag));
8185: }
8186: PetscLogEventEnd(MAT_Scale,mat,0,0,0);
8187: PetscObjectStateIncrease((PetscObject)mat);
8188: return(0);
8189: }
8193: /*@
8194: MatGetInertia - Gets the inertia from a factored matrix
8196: Collective on Mat
8198: Input Parameter:
8199: . mat - the matrix
8201: Output Parameters:
8202: + nneg - number of negative eigenvalues
8203: . nzero - number of zero eigenvalues
8204: - npos - number of positive eigenvalues
8206: Level: advanced
8208: Notes: Matrix must have been factored by MatCholeskyFactor()
8211: @*/
8212: PetscErrorCode MatGetInertia(Mat mat,PetscInt *nneg,PetscInt *nzero,PetscInt *npos)
8213: {
8219: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
8220: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Numeric factor mat is not assembled");
8221: if (!mat->ops->getinertia) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
8222: (*mat->ops->getinertia)(mat,nneg,nzero,npos);
8223: return(0);
8224: }
8226: /* ----------------------------------------------------------------*/
8229: /*@C
8230: MatSolves - Solves A x = b, given a factored matrix, for a collection of vectors
8232: Neighbor-wise Collective on Mat and Vecs
8234: Input Parameters:
8235: + mat - the factored matrix
8236: - b - the right-hand-side vectors
8238: Output Parameter:
8239: . x - the result vectors
8241: Notes:
8242: The vectors b and x cannot be the same. I.e., one cannot
8243: call MatSolves(A,x,x).
8245: Notes:
8246: Most users should employ the simplified KSP interface for linear solvers
8247: instead of working directly with matrix algebra routines such as this.
8248: See, e.g., KSPCreate().
8250: Level: developer
8252: Concepts: matrices^triangular solves
8254: .seealso: MatSolveAdd(), MatSolveTranspose(), MatSolveTransposeAdd(), MatSolve()
8255: @*/
8256: PetscErrorCode MatSolves(Mat mat,Vecs b,Vecs x)
8257: {
8263: if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
8264: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
8265: if (!mat->rmap->N && !mat->cmap->N) return(0);
8267: if (!mat->ops->solves) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
8268: MatCheckPreallocated(mat,1);
8269: PetscLogEventBegin(MAT_Solves,mat,0,0,0);
8270: (*mat->ops->solves)(mat,b,x);
8271: PetscLogEventEnd(MAT_Solves,mat,0,0,0);
8272: return(0);
8273: }
8277: /*@
8278: MatIsSymmetric - Test whether a matrix is symmetric
8280: Collective on Mat
8282: Input Parameter:
8283: + A - the matrix to test
8284: - tol - difference between value and its transpose less than this amount counts as equal (use 0.0 for exact transpose)
8286: Output Parameters:
8287: . flg - the result
8289: Notes: For real numbers MatIsSymmetric() and MatIsHermitian() return identical results
8291: Level: intermediate
8293: Concepts: matrix^symmetry
8295: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsStructurallySymmetric(), MatSetOption(), MatIsSymmetricKnown()
8296: @*/
8297: PetscErrorCode MatIsSymmetric(Mat A,PetscReal tol,PetscBool *flg)
8298: {
8305: if (!A->symmetric_set) {
8306: if (!A->ops->issymmetric) {
8307: MatType mattype;
8308: MatGetType(A,&mattype);
8309: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for symmetric",mattype);
8310: }
8311: (*A->ops->issymmetric)(A,tol,flg);
8312: if (!tol) {
8313: A->symmetric_set = PETSC_TRUE;
8314: A->symmetric = *flg;
8315: if (A->symmetric) {
8316: A->structurally_symmetric_set = PETSC_TRUE;
8317: A->structurally_symmetric = PETSC_TRUE;
8318: }
8319: }
8320: } else if (A->symmetric) {
8321: *flg = PETSC_TRUE;
8322: } else if (!tol) {
8323: *flg = PETSC_FALSE;
8324: } else {
8325: if (!A->ops->issymmetric) {
8326: MatType mattype;
8327: MatGetType(A,&mattype);
8328: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for symmetric",mattype);
8329: }
8330: (*A->ops->issymmetric)(A,tol,flg);
8331: }
8332: return(0);
8333: }
8337: /*@
8338: MatIsHermitian - Test whether a matrix is Hermitian
8340: Collective on Mat
8342: Input Parameter:
8343: + A - the matrix to test
8344: - tol - difference between value and its transpose less than this amount counts as equal (use 0.0 for exact Hermitian)
8346: Output Parameters:
8347: . flg - the result
8349: Level: intermediate
8351: Concepts: matrix^symmetry
8353: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsStructurallySymmetric(), MatSetOption(),
8354: MatIsSymmetricKnown(), MatIsSymmetric()
8355: @*/
8356: PetscErrorCode MatIsHermitian(Mat A,PetscReal tol,PetscBool *flg)
8357: {
8364: if (!A->hermitian_set) {
8365: if (!A->ops->ishermitian) {
8366: MatType mattype;
8367: MatGetType(A,&mattype);
8368: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for hermitian",mattype);
8369: }
8370: (*A->ops->ishermitian)(A,tol,flg);
8371: if (!tol) {
8372: A->hermitian_set = PETSC_TRUE;
8373: A->hermitian = *flg;
8374: if (A->hermitian) {
8375: A->structurally_symmetric_set = PETSC_TRUE;
8376: A->structurally_symmetric = PETSC_TRUE;
8377: }
8378: }
8379: } else if (A->hermitian) {
8380: *flg = PETSC_TRUE;
8381: } else if (!tol) {
8382: *flg = PETSC_FALSE;
8383: } else {
8384: if (!A->ops->ishermitian) {
8385: MatType mattype;
8386: MatGetType(A,&mattype);
8387: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for hermitian",mattype);
8388: }
8389: (*A->ops->ishermitian)(A,tol,flg);
8390: }
8391: return(0);
8392: }
8396: /*@
8397: MatIsSymmetricKnown - Checks the flag on the matrix to see if it is symmetric.
8399: Not Collective
8401: Input Parameter:
8402: . A - the matrix to check
8404: Output Parameters:
8405: + set - if the symmetric flag is set (this tells you if the next flag is valid)
8406: - flg - the result
8408: Level: advanced
8410: Concepts: matrix^symmetry
8412: Note: Does not check the matrix values directly, so this may return unknown (set = PETSC_FALSE). Use MatIsSymmetric()
8413: if you want it explicitly checked
8415: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsStructurallySymmetric(), MatSetOption(), MatIsSymmetric()
8416: @*/
8417: PetscErrorCode MatIsSymmetricKnown(Mat A,PetscBool *set,PetscBool *flg)
8418: {
8423: if (A->symmetric_set) {
8424: *set = PETSC_TRUE;
8425: *flg = A->symmetric;
8426: } else {
8427: *set = PETSC_FALSE;
8428: }
8429: return(0);
8430: }
8434: /*@
8435: MatIsHermitianKnown - Checks the flag on the matrix to see if it is hermitian.
8437: Not Collective
8439: Input Parameter:
8440: . A - the matrix to check
8442: Output Parameters:
8443: + set - if the hermitian flag is set (this tells you if the next flag is valid)
8444: - flg - the result
8446: Level: advanced
8448: Concepts: matrix^symmetry
8450: Note: Does not check the matrix values directly, so this may return unknown (set = PETSC_FALSE). Use MatIsHermitian()
8451: if you want it explicitly checked
8453: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsStructurallySymmetric(), MatSetOption(), MatIsSymmetric()
8454: @*/
8455: PetscErrorCode MatIsHermitianKnown(Mat A,PetscBool *set,PetscBool *flg)
8456: {
8461: if (A->hermitian_set) {
8462: *set = PETSC_TRUE;
8463: *flg = A->hermitian;
8464: } else {
8465: *set = PETSC_FALSE;
8466: }
8467: return(0);
8468: }
8472: /*@
8473: MatIsStructurallySymmetric - Test whether a matrix is structurally symmetric
8475: Collective on Mat
8477: Input Parameter:
8478: . A - the matrix to test
8480: Output Parameters:
8481: . flg - the result
8483: Level: intermediate
8485: Concepts: matrix^symmetry
8487: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsSymmetric(), MatSetOption()
8488: @*/
8489: PetscErrorCode MatIsStructurallySymmetric(Mat A,PetscBool *flg)
8490: {
8496: if (!A->structurally_symmetric_set) {
8497: if (!A->ops->isstructurallysymmetric) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Matrix does not support checking for structural symmetric");
8498: (*A->ops->isstructurallysymmetric)(A,&A->structurally_symmetric);
8500: A->structurally_symmetric_set = PETSC_TRUE;
8501: }
8502: *flg = A->structurally_symmetric;
8503: return(0);
8504: }
8508: extern PetscErrorCode MatStashGetInfo_Private(MatStash*,PetscInt*,PetscInt*);
8509: /*@
8510: MatStashGetInfo - Gets how many values are currently in the matrix stash, i.e. need
8511: to be communicated to other processors during the MatAssemblyBegin/End() process
8513: Not collective
8515: Input Parameter:
8516: . vec - the vector
8518: Output Parameters:
8519: + nstash - the size of the stash
8520: . reallocs - the number of additional mallocs incurred.
8521: . bnstash - the size of the block stash
8522: - breallocs - the number of additional mallocs incurred.in the block stash
8524: Level: advanced
8526: .seealso: MatAssemblyBegin(), MatAssemblyEnd(), Mat, MatStashSetInitialSize()
8528: @*/
8529: PetscErrorCode MatStashGetInfo(Mat mat,PetscInt *nstash,PetscInt *reallocs,PetscInt *bnstash,PetscInt *breallocs)
8530: {
8534: MatStashGetInfo_Private(&mat->stash,nstash,reallocs);
8535: MatStashGetInfo_Private(&mat->bstash,bnstash,breallocs);
8536: return(0);
8537: }
8541: /*@C
8542: MatCreateVecs - Get vector(s) compatible with the matrix, i.e. with the same
8543: parallel layout
8545: Collective on Mat
8547: Input Parameter:
8548: . mat - the matrix
8550: Output Parameter:
8551: + right - (optional) vector that the matrix can be multiplied against
8552: - left - (optional) vector that the matrix vector product can be stored in
8554: Notes:
8555: The blocksize of the returned vectors is determined by the row and column block sizes set with MatSetBlockSizes() or the single blocksize (same for both) set by MatSetBlockSize().
8557: Notes: These are new vectors which are not owned by the Mat, they should be destroyed in VecDestroy() when no longer needed
8559: Level: advanced
8561: .seealso: MatCreate(), VecDestroy()
8562: @*/
8563: PetscErrorCode MatCreateVecs(Mat mat,Vec *right,Vec *left)
8564: {
8570: MatCheckPreallocated(mat,1);
8571: if (mat->ops->getvecs) {
8572: (*mat->ops->getvecs)(mat,right,left);
8573: } else {
8574: PetscMPIInt size;
8575: PetscInt rbs,cbs;
8576: MPI_Comm_size(PetscObjectComm((PetscObject)mat), &size);
8577: MatGetBlockSizes(mat,&rbs,&cbs);
8578: if (right) {
8579: VecCreate(PetscObjectComm((PetscObject)mat),right);
8580: VecSetSizes(*right,mat->cmap->n,PETSC_DETERMINE);
8581: VecSetBlockSize(*right,cbs);
8582: VecSetType(*right,VECSTANDARD);
8583: PetscLayoutReference(mat->cmap,&(*right)->map);
8584: }
8585: if (left) {
8586: VecCreate(PetscObjectComm((PetscObject)mat),left);
8587: VecSetSizes(*left,mat->rmap->n,PETSC_DETERMINE);
8588: VecSetBlockSize(*left,rbs);
8589: VecSetType(*left,VECSTANDARD);
8590: PetscLayoutReference(mat->rmap,&(*left)->map);
8591: }
8592: }
8593: return(0);
8594: }
8598: /*@C
8599: MatFactorInfoInitialize - Initializes a MatFactorInfo data structure
8600: with default values.
8602: Not Collective
8604: Input Parameters:
8605: . info - the MatFactorInfo data structure
8608: Notes: The solvers are generally used through the KSP and PC objects, for example
8609: PCLU, PCILU, PCCHOLESKY, PCICC
8611: Level: developer
8613: .seealso: MatFactorInfo
8615: Developer Note: fortran interface is not autogenerated as the f90
8616: interface defintion cannot be generated correctly [due to MatFactorInfo]
8618: @*/
8620: PetscErrorCode MatFactorInfoInitialize(MatFactorInfo *info)
8621: {
8625: PetscMemzero(info,sizeof(MatFactorInfo));
8626: return(0);
8627: }
8631: /*@
8632: MatPtAP - Creates the matrix product C = P^T * A * P
8634: Neighbor-wise Collective on Mat
8636: Input Parameters:
8637: + A - the matrix
8638: . P - the projection matrix
8639: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
8640: - fill - expected fill as ratio of nnz(C)/(nnz(A) + nnz(P))
8642: Output Parameters:
8643: . C - the product matrix
8645: Notes:
8646: C will be created and must be destroyed by the user with MatDestroy().
8648: This routine is currently only implemented for pairs of AIJ matrices and classes
8649: which inherit from AIJ.
8651: Level: intermediate
8653: .seealso: MatPtAPSymbolic(), MatPtAPNumeric(), MatMatMult(), MatRARt()
8654: @*/
8655: PetscErrorCode MatPtAP(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C)
8656: {
8658: PetscErrorCode (*fA)(Mat,Mat,MatReuse,PetscReal,Mat*);
8659: PetscErrorCode (*fP)(Mat,Mat,MatReuse,PetscReal,Mat*);
8660: PetscErrorCode (*ptap)(Mat,Mat,MatReuse,PetscReal,Mat*)=NULL;
8661: PetscBool viatranspose=PETSC_FALSE,viamatmatmatmult=PETSC_FALSE;
8664: PetscOptionsGetBool(((PetscObject)A)->prefix,"-matptap_viatranspose",&viatranspose,NULL);
8665: PetscOptionsGetBool(((PetscObject)A)->prefix,"-matptap_viamatmatmatmult",&viamatmatmatmult,NULL);
8669: MatCheckPreallocated(A,1);
8670: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8671: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8674: MatCheckPreallocated(P,2);
8675: if (!P->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8676: if (P->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8678: if (P->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->rmap->N,A->cmap->N);
8679: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
8681: if (scall == MAT_REUSE_MATRIX) {
8684: if (viatranspose || viamatmatmatmult) {
8685: Mat Pt;
8686: MatTranspose(P,MAT_INITIAL_MATRIX,&Pt);
8687: if (viamatmatmatmult) {
8688: MatMatMatMult(Pt,A,P,scall,fill,C);
8689: } else {
8690: Mat AP;
8691: MatMatMult(A,P,MAT_INITIAL_MATRIX,fill,&AP);
8692: MatMatMult(Pt,AP,scall,fill,C);
8693: MatDestroy(&AP);
8694: }
8695: MatDestroy(&Pt);
8696: } else {
8697: PetscLogEventBegin(MAT_PtAP,A,P,0,0);
8698: PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);
8699: (*(*C)->ops->ptapnumeric)(A,P,*C);
8700: PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);
8701: PetscLogEventEnd(MAT_PtAP,A,P,0,0);
8702: }
8703: return(0);
8704: }
8706: if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
8707: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
8709: fA = A->ops->ptap;
8710: fP = P->ops->ptap;
8711: if (fP == fA) {
8712: if (!fA) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatPtAP not supported for A of type %s",((PetscObject)A)->type_name);
8713: ptap = fA;
8714: } else {
8715: /* dispatch based on the type of A and P from their PetscObject's PetscFunctionLists. */
8716: char ptapname[256];
8717: PetscStrcpy(ptapname,"MatPtAP_");
8718: PetscStrcat(ptapname,((PetscObject)A)->type_name);
8719: PetscStrcat(ptapname,"_");
8720: PetscStrcat(ptapname,((PetscObject)P)->type_name);
8721: PetscStrcat(ptapname,"_C"); /* e.g., ptapname = "MatPtAP_seqdense_seqaij_C" */
8722: PetscObjectQueryFunction((PetscObject)P,ptapname,&ptap);
8723: if (!ptap) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MatPtAP requires A, %s, to be compatible with P, %s",((PetscObject)A)->type_name,((PetscObject)P)->type_name);
8724: }
8726: if (viatranspose || viamatmatmatmult) {
8727: Mat Pt;
8728: MatTranspose(P,MAT_INITIAL_MATRIX,&Pt);
8729: if (viamatmatmatmult) {
8730: MatMatMatMult(Pt,A,P,scall,fill,C);
8731: PetscInfo(*C,"MatPtAP via MatMatMatMult\n");
8732: } else {
8733: Mat AP;
8734: MatMatMult(A,P,MAT_INITIAL_MATRIX,fill,&AP);
8735: MatMatMult(Pt,AP,scall,fill,C);
8736: MatDestroy(&AP);
8737: PetscInfo(*C,"MatPtAP via MatTranspose and MatMatMult\n");
8738: }
8739: MatDestroy(&Pt);
8740: } else {
8741: PetscLogEventBegin(MAT_PtAP,A,P,0,0);
8742: (*ptap)(A,P,scall,fill,C);
8743: PetscLogEventEnd(MAT_PtAP,A,P,0,0);
8744: }
8745: return(0);
8746: }
8750: /*@
8751: MatPtAPNumeric - Computes the matrix product C = P^T * A * P
8753: Neighbor-wise Collective on Mat
8755: Input Parameters:
8756: + A - the matrix
8757: - P - the projection matrix
8759: Output Parameters:
8760: . C - the product matrix
8762: Notes:
8763: C must have been created by calling MatPtAPSymbolic and must be destroyed by
8764: the user using MatDeatroy().
8766: This routine is currently only implemented for pairs of AIJ matrices and classes
8767: which inherit from AIJ. C will be of type MATAIJ.
8769: Level: intermediate
8771: .seealso: MatPtAP(), MatPtAPSymbolic(), MatMatMultNumeric()
8772: @*/
8773: PetscErrorCode MatPtAPNumeric(Mat A,Mat P,Mat C)
8774: {
8780: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8781: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8784: MatCheckPreallocated(P,2);
8785: if (!P->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8786: if (P->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8789: MatCheckPreallocated(C,3);
8790: if (C->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8791: if (P->cmap->N!=C->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->cmap->N,C->rmap->N);
8792: if (P->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->rmap->N,A->cmap->N);
8793: if (A->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix 'A' must be square, %D != %D",A->rmap->N,A->cmap->N);
8794: if (P->cmap->N!=C->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->cmap->N,C->cmap->N);
8795: MatCheckPreallocated(A,1);
8797: PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);
8798: (*C->ops->ptapnumeric)(A,P,C);
8799: PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);
8800: return(0);
8801: }
8805: /*@
8806: MatPtAPSymbolic - Creates the (i,j) structure of the matrix product C = P^T * A * P
8808: Neighbor-wise Collective on Mat
8810: Input Parameters:
8811: + A - the matrix
8812: - P - the projection matrix
8814: Output Parameters:
8815: . C - the (i,j) structure of the product matrix
8817: Notes:
8818: C will be created and must be destroyed by the user with MatDestroy().
8820: This routine is currently only implemented for pairs of SeqAIJ matrices and classes
8821: which inherit from SeqAIJ. C will be of type MATSEQAIJ. The product is computed using
8822: this (i,j) structure by calling MatPtAPNumeric().
8824: Level: intermediate
8826: .seealso: MatPtAP(), MatPtAPNumeric(), MatMatMultSymbolic()
8827: @*/
8828: PetscErrorCode MatPtAPSymbolic(Mat A,Mat P,PetscReal fill,Mat *C)
8829: {
8835: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8836: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8837: if (fill <1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
8840: MatCheckPreallocated(P,2);
8841: if (!P->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8842: if (P->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8845: if (P->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->rmap->N,A->cmap->N);
8846: if (A->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix 'A' must be square, %D != %D",A->rmap->N,A->cmap->N);
8847: MatCheckPreallocated(A,1);
8848: PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);
8849: (*A->ops->ptapsymbolic)(A,P,fill,C);
8850: PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);
8852: /* MatSetBlockSize(*C,A->rmap->bs); NO! this is not always true -ma */
8853: return(0);
8854: }
8858: /*@
8859: MatRARt - Creates the matrix product C = R * A * R^T
8861: Neighbor-wise Collective on Mat
8863: Input Parameters:
8864: + A - the matrix
8865: . R - the projection matrix
8866: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
8867: - fill - expected fill as ratio of nnz(C)/nnz(A)
8869: Output Parameters:
8870: . C - the product matrix
8872: Notes:
8873: C will be created and must be destroyed by the user with MatDestroy().
8875: This routine is currently only implemented for pairs of AIJ matrices and classes
8876: which inherit from AIJ.
8878: Level: intermediate
8880: .seealso: MatRARtSymbolic(), MatRARtNumeric(), MatMatMult(), MatPtAP()
8881: @*/
8882: PetscErrorCode MatRARt(Mat A,Mat R,MatReuse scall,PetscReal fill,Mat *C)
8883: {
8889: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8890: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8893: MatCheckPreallocated(R,2);
8894: if (!R->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8895: if (R->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8897: if (R->cmap->N!=A->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)R),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",R->cmap->N,A->rmap->N);
8898: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
8899: MatCheckPreallocated(A,1);
8901: if (!A->ops->rart) {
8902: MatType mattype;
8903: MatGetType(A,&mattype);
8904: SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Matrix of type <%s> does not support RARt",mattype);
8905: }
8906: PetscLogEventBegin(MAT_RARt,A,R,0,0);
8907: (*A->ops->rart)(A,R,scall,fill,C);
8908: PetscLogEventEnd(MAT_RARt,A,R,0,0);
8909: return(0);
8910: }
8914: /*@
8915: MatRARtNumeric - Computes the matrix product C = R * A * R^T
8917: Neighbor-wise Collective on Mat
8919: Input Parameters:
8920: + A - the matrix
8921: - R - the projection matrix
8923: Output Parameters:
8924: . C - the product matrix
8926: Notes:
8927: C must have been created by calling MatRARtSymbolic and must be destroyed by
8928: the user using MatDeatroy().
8930: This routine is currently only implemented for pairs of AIJ matrices and classes
8931: which inherit from AIJ. C will be of type MATAIJ.
8933: Level: intermediate
8935: .seealso: MatRARt(), MatRARtSymbolic(), MatMatMultNumeric()
8936: @*/
8937: PetscErrorCode MatRARtNumeric(Mat A,Mat R,Mat C)
8938: {
8944: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8945: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8948: MatCheckPreallocated(R,2);
8949: if (!R->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8950: if (R->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8953: MatCheckPreallocated(C,3);
8954: if (C->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8955: if (R->rmap->N!=C->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",R->rmap->N,C->rmap->N);
8956: if (R->cmap->N!=A->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",R->cmap->N,A->rmap->N);
8957: if (A->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix 'A' must be square, %D != %D",A->rmap->N,A->cmap->N);
8958: if (R->rmap->N!=C->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",R->rmap->N,C->cmap->N);
8959: MatCheckPreallocated(A,1);
8961: PetscLogEventBegin(MAT_RARtNumeric,A,R,0,0);
8962: (*A->ops->rartnumeric)(A,R,C);
8963: PetscLogEventEnd(MAT_RARtNumeric,A,R,0,0);
8964: return(0);
8965: }
8969: /*@
8970: MatRARtSymbolic - Creates the (i,j) structure of the matrix product C = R * A * R^T
8972: Neighbor-wise Collective on Mat
8974: Input Parameters:
8975: + A - the matrix
8976: - R - the projection matrix
8978: Output Parameters:
8979: . C - the (i,j) structure of the product matrix
8981: Notes:
8982: C will be created and must be destroyed by the user with MatDestroy().
8984: This routine is currently only implemented for pairs of SeqAIJ matrices and classes
8985: which inherit from SeqAIJ. C will be of type MATSEQAIJ. The product is computed using
8986: this (i,j) structure by calling MatRARtNumeric().
8988: Level: intermediate
8990: .seealso: MatRARt(), MatRARtNumeric(), MatMatMultSymbolic()
8991: @*/
8992: PetscErrorCode MatRARtSymbolic(Mat A,Mat R,PetscReal fill,Mat *C)
8993: {
8999: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9000: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9001: if (fill <1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
9004: MatCheckPreallocated(R,2);
9005: if (!R->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9006: if (R->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9009: if (R->cmap->N!=A->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",R->cmap->N,A->rmap->N);
9010: if (A->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix 'A' must be square, %D != %D",A->rmap->N,A->cmap->N);
9011: MatCheckPreallocated(A,1);
9012: PetscLogEventBegin(MAT_RARtSymbolic,A,R,0,0);
9013: (*A->ops->rartsymbolic)(A,R,fill,C);
9014: PetscLogEventEnd(MAT_RARtSymbolic,A,R,0,0);
9016: MatSetBlockSizes(*C,PetscAbs(R->rmap->bs),PetscAbs(R->rmap->bs));
9017: return(0);
9018: }
9022: /*@
9023: MatMatMult - Performs Matrix-Matrix Multiplication C=A*B.
9025: Neighbor-wise Collective on Mat
9027: Input Parameters:
9028: + A - the left matrix
9029: . B - the right matrix
9030: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9031: - fill - expected fill as ratio of nnz(C)/(nnz(A) + nnz(B)), use PETSC_DEFAULT if you do not have a good estimate
9032: if the result is a dense matrix this is irrelevent
9034: Output Parameters:
9035: . C - the product matrix
9037: Notes:
9038: Unless scall is MAT_REUSE_MATRIX C will be created.
9040: MAT_REUSE_MATRIX can only be used if the matrices A and B have the same nonzero pattern as in the previous call
9042: To determine the correct fill value, run with -info and search for the string "Fill ratio" to see the value
9043: actually needed.
9045: If you have many matrices with the same non-zero structure to multiply, you
9046: should either
9047: $ 1) use MAT_REUSE_MATRIX in all calls but the first or
9048: $ 2) call MatMatMultSymbolic() once and then MatMatMultNumeric() for each product needed
9050: Level: intermediate
9052: .seealso: MatMatMultSymbolic(), MatMatMultNumeric(), MatTransposeMatMult(), MatMatTransposeMult(), MatPtAP()
9053: @*/
9054: PetscErrorCode MatMatMult(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
9055: {
9057: PetscErrorCode (*fA)(Mat,Mat,MatReuse,PetscReal,Mat*);
9058: PetscErrorCode (*fB)(Mat,Mat,MatReuse,PetscReal,Mat*);
9059: PetscErrorCode (*mult)(Mat,Mat,MatReuse,PetscReal,Mat*)=NULL;
9064: MatCheckPreallocated(A,1);
9065: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9066: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9069: MatCheckPreallocated(B,2);
9070: if (!B->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9071: if (B->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9073: if (B->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",B->rmap->N,A->cmap->N);
9074: if (scall == MAT_REUSE_MATRIX) {
9077: PetscLogEventBegin(MAT_MatMult,A,B,0,0);
9078: PetscLogEventBegin(MAT_MatMultNumeric,A,B,0,0);
9079: (*(*C)->ops->matmultnumeric)(A,B,*C);
9080: PetscLogEventEnd(MAT_MatMultNumeric,A,B,0,0);
9081: PetscLogEventEnd(MAT_MatMult,A,B,0,0);
9082: return(0);
9083: }
9084: if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
9085: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
9087: fA = A->ops->matmult;
9088: fB = B->ops->matmult;
9089: if (fB == fA) {
9090: if (!fB) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatMatMult not supported for B of type %s",((PetscObject)B)->type_name);
9091: mult = fB;
9092: } else {
9093: /* dispatch based on the type of A and B from their PetscObject's PetscFunctionLists. */
9094: char multname[256];
9095: PetscStrcpy(multname,"MatMatMult_");
9096: PetscStrcat(multname,((PetscObject)A)->type_name);
9097: PetscStrcat(multname,"_");
9098: PetscStrcat(multname,((PetscObject)B)->type_name);
9099: PetscStrcat(multname,"_C"); /* e.g., multname = "MatMatMult_seqdense_seqaij_C" */
9100: PetscObjectQueryFunction((PetscObject)B,multname,&mult);
9101: if (!mult) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MatMatMult requires A, %s, to be compatible with B, %s",((PetscObject)A)->type_name,((PetscObject)B)->type_name);
9102: }
9103: PetscLogEventBegin(MAT_MatMult,A,B,0,0);
9104: (*mult)(A,B,scall,fill,C);
9105: PetscLogEventEnd(MAT_MatMult,A,B,0,0);
9106: return(0);
9107: }
9111: /*@
9112: MatMatMultSymbolic - Performs construction, preallocation, and computes the ij structure
9113: of the matrix-matrix product C=A*B. Call this routine before calling MatMatMultNumeric().
9115: Neighbor-wise Collective on Mat
9117: Input Parameters:
9118: + A - the left matrix
9119: . B - the right matrix
9120: - fill - expected fill as ratio of nnz(C)/(nnz(A) + nnz(B)), use PETSC_DEFAULT if you do not have a good estimate,
9121: if C is a dense matrix this is irrelevent
9123: Output Parameters:
9124: . C - the product matrix
9126: Notes:
9127: Unless scall is MAT_REUSE_MATRIX C will be created.
9129: To determine the correct fill value, run with -info and search for the string "Fill ratio" to see the value
9130: actually needed.
9132: This routine is currently implemented for
9133: - pairs of AIJ matrices and classes which inherit from AIJ, C will be of type AIJ
9134: - pairs of AIJ (A) and Dense (B) matrix, C will be of type Dense.
9135: - pairs of Dense (A) and AIJ (B) matrix, C will be of type Dense.
9137: Level: intermediate
9139: Developers Note: There are ways to estimate the number of nonzeros in the resulting product, see for example, http://arxiv.org/abs/1006.4173
9140: We should incorporate them into PETSc.
9142: .seealso: MatMatMult(), MatMatMultNumeric()
9143: @*/
9144: PetscErrorCode MatMatMultSymbolic(Mat A,Mat B,PetscReal fill,Mat *C)
9145: {
9147: PetscErrorCode (*Asymbolic)(Mat,Mat,PetscReal,Mat*);
9148: PetscErrorCode (*Bsymbolic)(Mat,Mat,PetscReal,Mat*);
9149: PetscErrorCode (*symbolic)(Mat,Mat,PetscReal,Mat*)=NULL;
9154: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9155: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9159: MatCheckPreallocated(B,2);
9160: if (!B->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9161: if (B->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9164: if (B->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",B->rmap->N,A->cmap->N);
9165: if (fill == PETSC_DEFAULT) fill = 2.0;
9166: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be > 1.0",(double)fill);
9167: MatCheckPreallocated(A,1);
9169: Asymbolic = A->ops->matmultsymbolic;
9170: Bsymbolic = B->ops->matmultsymbolic;
9171: if (Asymbolic == Bsymbolic) {
9172: if (!Bsymbolic) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"C=A*B not implemented for B of type %s",((PetscObject)B)->type_name);
9173: symbolic = Bsymbolic;
9174: } else { /* dispatch based on the type of A and B */
9175: char symbolicname[256];
9176: PetscStrcpy(symbolicname,"MatMatMultSymbolic_");
9177: PetscStrcat(symbolicname,((PetscObject)A)->type_name);
9178: PetscStrcat(symbolicname,"_");
9179: PetscStrcat(symbolicname,((PetscObject)B)->type_name);
9180: PetscStrcat(symbolicname,"_C");
9181: PetscObjectQueryFunction((PetscObject)B,symbolicname,&symbolic);
9182: if (!symbolic) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MatMatMultSymbolic requires A, %s, to be compatible with B, %s",((PetscObject)A)->type_name,((PetscObject)B)->type_name);
9183: }
9184: PetscLogEventBegin(MAT_MatMultSymbolic,A,B,0,0);
9185: (*symbolic)(A,B,fill,C);
9186: PetscLogEventEnd(MAT_MatMultSymbolic,A,B,0,0);
9187: return(0);
9188: }
9192: /*@
9193: MatMatMultNumeric - Performs the numeric matrix-matrix product.
9194: Call this routine after first calling MatMatMultSymbolic().
9196: Neighbor-wise Collective on Mat
9198: Input Parameters:
9199: + A - the left matrix
9200: - B - the right matrix
9202: Output Parameters:
9203: . C - the product matrix, which was created by from MatMatMultSymbolic() or a call to MatMatMult().
9205: Notes:
9206: C must have been created with MatMatMultSymbolic().
9208: This routine is currently implemented for
9209: - pairs of AIJ matrices and classes which inherit from AIJ, C will be of type MATAIJ.
9210: - pairs of AIJ (A) and Dense (B) matrix, C will be of type Dense.
9211: - pairs of Dense (A) and AIJ (B) matrix, C will be of type Dense.
9213: Level: intermediate
9215: .seealso: MatMatMult(), MatMatMultSymbolic()
9216: @*/
9217: PetscErrorCode MatMatMultNumeric(Mat A,Mat B,Mat C)
9218: {
9222: MatMatMult(A,B,MAT_REUSE_MATRIX,0.0,&C);
9223: return(0);
9224: }
9228: /*@
9229: MatMatTransposeMult - Performs Matrix-Matrix Multiplication C=A*B^T.
9231: Neighbor-wise Collective on Mat
9233: Input Parameters:
9234: + A - the left matrix
9235: . B - the right matrix
9236: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9237: - fill - expected fill as ratio of nnz(C)/(nnz(A) + nnz(B)), use PETSC_DEFAULT if not known
9239: Output Parameters:
9240: . C - the product matrix
9242: Notes:
9243: C will be created if MAT_INITIAL_MATRIX and must be destroyed by the user with MatDestroy().
9245: MAT_REUSE_MATRIX can only be used if the matrices A and B have the same nonzero pattern as in the previous call
9247: To determine the correct fill value, run with -info and search for the string "Fill ratio" to see the value
9248: actually needed.
9250: This routine is currently only implemented for pairs of SeqAIJ matrices. C will be of type MATSEQAIJ.
9252: Level: intermediate
9254: .seealso: MatMatTransposeMultSymbolic(), MatMatTransposeMultNumeric(), MatMatMult(), MatTransposeMatMult() MatPtAP()
9255: @*/
9256: PetscErrorCode MatMatTransposeMult(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
9257: {
9259: PetscErrorCode (*fA)(Mat,Mat,MatReuse,PetscReal,Mat*);
9260: PetscErrorCode (*fB)(Mat,Mat,MatReuse,PetscReal,Mat*);
9265: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9266: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9269: MatCheckPreallocated(B,2);
9270: if (!B->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9271: if (B->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9273: if (B->cmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, AN %D != BN %D",A->cmap->N,B->cmap->N);
9274: if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
9275: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be > 1.0",(double)fill);
9276: MatCheckPreallocated(A,1);
9278: fA = A->ops->mattransposemult;
9279: if (!fA) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatMatTransposeMult not supported for A of type %s",((PetscObject)A)->type_name);
9280: fB = B->ops->mattransposemult;
9281: if (!fB) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatMatTransposeMult not supported for B of type %s",((PetscObject)B)->type_name);
9282: if (fB!=fA) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MatMatTransposeMult requires A, %s, to be compatible with B, %s",((PetscObject)A)->type_name,((PetscObject)B)->type_name);
9284: PetscLogEventBegin(MAT_MatTransposeMult,A,B,0,0);
9285: if (scall == MAT_INITIAL_MATRIX) {
9286: PetscLogEventBegin(MAT_MatTransposeMultSymbolic,A,B,0,0);
9287: (*A->ops->mattransposemultsymbolic)(A,B,fill,C);
9288: PetscLogEventEnd(MAT_MatTransposeMultSymbolic,A,B,0,0);
9289: }
9290: PetscLogEventBegin(MAT_MatTransposeMultNumeric,A,B,0,0);
9291: (*A->ops->mattransposemultnumeric)(A,B,*C);
9292: PetscLogEventEnd(MAT_MatTransposeMultNumeric,A,B,0,0);
9293: PetscLogEventEnd(MAT_MatTransposeMult,A,B,0,0);
9294: return(0);
9295: }
9299: /*@
9300: MatTransposeMatMult - Performs Matrix-Matrix Multiplication C=A^T*B.
9302: Neighbor-wise Collective on Mat
9304: Input Parameters:
9305: + A - the left matrix
9306: . B - the right matrix
9307: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9308: - fill - expected fill as ratio of nnz(C)/(nnz(A) + nnz(B)), use PETSC_DEFAULT if not known
9310: Output Parameters:
9311: . C - the product matrix
9313: Notes:
9314: C will be created if MAT_INITIAL_MATRIX and must be destroyed by the user with MatDestroy().
9316: MAT_REUSE_MATRIX can only be used if the matrices A and B have the same nonzero pattern as in the previous call
9318: To determine the correct fill value, run with -info and search for the string "Fill ratio" to see the value
9319: actually needed.
9321: This routine is currently implemented for pairs of AIJ matrices and pairs of SeqDense matrices and classes
9322: which inherit from SeqAIJ. C will be of same type as the input matrices.
9324: Level: intermediate
9326: .seealso: MatTransposeMatMultSymbolic(), MatTransposeMatMultNumeric(), MatMatMult(), MatMatTransposeMult(), MatPtAP()
9327: @*/
9328: PetscErrorCode MatTransposeMatMult(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
9329: {
9331: PetscErrorCode (*fA)(Mat,Mat,MatReuse,PetscReal,Mat*);
9332: PetscErrorCode (*fB)(Mat,Mat,MatReuse,PetscReal,Mat*);
9333: PetscErrorCode (*transposematmult)(Mat,Mat,MatReuse,PetscReal,Mat*) = NULL;
9338: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9339: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9342: MatCheckPreallocated(B,2);
9343: if (!B->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9344: if (B->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9346: if (B->rmap->N!=A->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",B->rmap->N,A->rmap->N);
9347: if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
9348: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be > 1.0",(double)fill);
9349: MatCheckPreallocated(A,1);
9351: fA = A->ops->transposematmult;
9352: fB = B->ops->transposematmult;
9353: if (fB==fA) {
9354: if (!fA) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatTransposeMatMult not supported for A of type %s",((PetscObject)A)->type_name);
9355: transposematmult = fA;
9356: } else {
9357: /* dispatch based on the type of A and B from their PetscObject's PetscFunctionLists. */
9358: char multname[256];
9359: PetscStrcpy(multname,"MatTransposeMatMult_");
9360: PetscStrcat(multname,((PetscObject)A)->type_name);
9361: PetscStrcat(multname,"_");
9362: PetscStrcat(multname,((PetscObject)B)->type_name);
9363: PetscStrcat(multname,"_C"); /* e.g., multname = "MatMatMult_seqdense_seqaij_C" */
9364: PetscObjectQueryFunction((PetscObject)B,multname,&transposematmult);
9365: if (!transposematmult) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MatTransposeMatMult requires A, %s, to be compatible with B, %s",((PetscObject)A)->type_name,((PetscObject)B)->type_name);
9366: }
9367: PetscLogEventBegin(MAT_TransposeMatMult,A,B,0,0);
9368: (*transposematmult)(A,B,scall,fill,C);
9369: PetscLogEventEnd(MAT_TransposeMatMult,A,B,0,0);
9370: return(0);
9371: }
9375: /*@
9376: MatMatMatMult - Performs Matrix-Matrix-Matrix Multiplication D=A*B*C.
9378: Neighbor-wise Collective on Mat
9380: Input Parameters:
9381: + A - the left matrix
9382: . B - the middle matrix
9383: . C - the right matrix
9384: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9385: - fill - expected fill as ratio of nnz(D)/(nnz(A) + nnz(B)+nnz(C)), use PETSC_DEFAULT if you do not have a good estimate
9386: if the result is a dense matrix this is irrelevent
9388: Output Parameters:
9389: . D - the product matrix
9391: Notes:
9392: Unless scall is MAT_REUSE_MATRIX D will be created.
9394: MAT_REUSE_MATRIX can only be used if the matrices A, B and C have the same nonzero pattern as in the previous call
9396: To determine the correct fill value, run with -info and search for the string "Fill ratio" to see the value
9397: actually needed.
9399: If you have many matrices with the same non-zero structure to multiply, you
9400: should either
9401: $ 1) use MAT_REUSE_MATRIX in all calls but the first or
9402: $ 2) call MatMatMatMultSymbolic() once and then MatMatMatMultNumeric() for each product needed
9404: Level: intermediate
9406: .seealso: MatMatMult, MatPtAP()
9407: @*/
9408: PetscErrorCode MatMatMatMult(Mat A,Mat B,Mat C,MatReuse scall,PetscReal fill,Mat *D)
9409: {
9411: PetscErrorCode (*fA)(Mat,Mat,Mat,MatReuse,PetscReal,Mat*);
9412: PetscErrorCode (*fB)(Mat,Mat,Mat,MatReuse,PetscReal,Mat*);
9413: PetscErrorCode (*fC)(Mat,Mat,Mat,MatReuse,PetscReal,Mat*);
9414: PetscErrorCode (*mult)(Mat,Mat,Mat,MatReuse,PetscReal,Mat*)=NULL;
9419: MatCheckPreallocated(A,1);
9420: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9421: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9424: MatCheckPreallocated(B,2);
9425: if (!B->assembled) SETERRQ(PetscObjectComm((PetscObject)B),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9426: if (B->factortype) SETERRQ(PetscObjectComm((PetscObject)B),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9429: MatCheckPreallocated(C,3);
9430: if (!C->assembled) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9431: if (C->factortype) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9432: if (B->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)B),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",B->rmap->N,A->cmap->N);
9433: if (C->rmap->N!=B->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",C->rmap->N,B->cmap->N);
9434: if (scall == MAT_REUSE_MATRIX) {
9437: PetscLogEventBegin(MAT_MatMatMult,A,B,0,0);
9438: (*(*D)->ops->matmatmult)(A,B,C,scall,fill,D);
9439: PetscLogEventEnd(MAT_MatMatMult,A,B,0,0);
9440: return(0);
9441: }
9442: if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
9443: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
9445: fA = A->ops->matmatmult;
9446: fB = B->ops->matmatmult;
9447: fC = C->ops->matmatmult;
9448: if (fA == fB && fA == fC) {
9449: if (!fA) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatMatMatMult not supported for A of type %s",((PetscObject)A)->type_name);
9450: mult = fA;
9451: } else {
9452: /* dispatch based on the type of A, B and C from their PetscObject's PetscFunctionLists. */
9453: char multname[256];
9454: PetscStrcpy(multname,"MatMatMatMult_");
9455: PetscStrcat(multname,((PetscObject)A)->type_name);
9456: PetscStrcat(multname,"_");
9457: PetscStrcat(multname,((PetscObject)B)->type_name);
9458: PetscStrcat(multname,"_");
9459: PetscStrcat(multname,((PetscObject)C)->type_name);
9460: PetscStrcat(multname,"_C");
9461: PetscObjectQueryFunction((PetscObject)B,multname,&mult);
9462: if (!mult) SETERRQ3(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MatMatMatMult requires A, %s, to be compatible with B, %s, C, %s",((PetscObject)A)->type_name,((PetscObject)B)->type_name,((PetscObject)C)->type_name);
9463: }
9464: PetscLogEventBegin(MAT_MatMatMult,A,B,0,0);
9465: (*mult)(A,B,C,scall,fill,D);
9466: PetscLogEventEnd(MAT_MatMatMult,A,B,0,0);
9467: return(0);
9468: }
9472: /*@C
9473: MatCreateRedundantMatrix - Create redundant matrices and put them into processors of subcommunicators.
9475: Collective on Mat
9477: Input Parameters:
9478: + mat - the matrix
9479: . nsubcomm - the number of subcommunicators (= number of redundant parallel or sequential matrices)
9480: . subcomm - MPI communicator split from the communicator where mat resides in (or MPI_COMM_NULL if nsubcomm is used)
9481: - reuse - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9483: Output Parameter:
9484: . matredundant - redundant matrix
9486: Notes:
9487: MAT_REUSE_MATRIX can only be used when the nonzero structure of the
9488: original matrix has not changed from that last call to MatCreateRedundantMatrix().
9490: This routine creates the duplicated matrices in subcommunicators; you should NOT create them before
9491: calling it.
9493: Level: advanced
9495: Concepts: subcommunicator
9496: Concepts: duplicate matrix
9498: .seealso: MatDestroy()
9499: @*/
9500: PetscErrorCode MatCreateRedundantMatrix(Mat mat,PetscInt nsubcomm,MPI_Comm subcomm,MatReuse reuse,Mat *matredundant)
9501: {
9503: MPI_Comm comm;
9504: PetscMPIInt size;
9505: PetscInt mloc_sub,rstart,rend,M=mat->rmap->N,N=mat->cmap->N,bs=mat->rmap->bs;
9506: Mat_Redundant *redund=NULL;
9507: PetscSubcomm psubcomm=NULL;
9508: MPI_Comm subcomm_in=subcomm;
9509: Mat *matseq;
9510: IS isrow,iscol;
9511: PetscBool newsubcomm=PETSC_FALSE;
9514: MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);
9515: if (size == 1 || nsubcomm == 1) {
9516: if (reuse == MAT_INITIAL_MATRIX) {
9517: MatDuplicate(mat,MAT_COPY_VALUES,matredundant);
9518: } else {
9519: MatCopy(mat,*matredundant,SAME_NONZERO_PATTERN);
9520: }
9521: return(0);
9522: }
9525: if (nsubcomm && reuse == MAT_REUSE_MATRIX) {
9528: }
9529: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9530: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9531: MatCheckPreallocated(mat,1);
9533: PetscLogEventBegin(MAT_RedundantMat,mat,0,0,0);
9534: if (subcomm_in == MPI_COMM_NULL && reuse == MAT_INITIAL_MATRIX) { /* get subcomm if user does not provide subcomm */
9535: /* create psubcomm, then get subcomm */
9536: PetscObjectGetComm((PetscObject)mat,&comm);
9537: MPI_Comm_size(comm,&size);
9538: if (nsubcomm < 1 || nsubcomm > size) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"nsubcomm must between 1 and %D",size);
9540: PetscSubcommCreate(comm,&psubcomm);
9541: PetscSubcommSetNumber(psubcomm,nsubcomm);
9542: PetscSubcommSetType(psubcomm,PETSC_SUBCOMM_CONTIGUOUS);
9543: PetscSubcommSetFromOptions(psubcomm);
9544: PetscCommDuplicate(PetscSubcommChild(psubcomm),&subcomm,NULL);
9545: newsubcomm = PETSC_TRUE;
9546: PetscSubcommDestroy(&psubcomm);
9547: }
9549: /* get isrow, iscol and a local sequential matrix matseq[0] */
9550: if (reuse == MAT_INITIAL_MATRIX) {
9551: mloc_sub = PETSC_DECIDE;
9552: if (bs < 1) {
9553: PetscSplitOwnership(subcomm,&mloc_sub,&M);
9554: } else {
9555: PetscSplitOwnershipBlock(subcomm,bs,&mloc_sub,&M);
9556: }
9557: MPI_Scan(&mloc_sub,&rend,1,MPIU_INT,MPI_SUM,subcomm);
9558: rstart = rend - mloc_sub;
9559: ISCreateStride(PETSC_COMM_SELF,mloc_sub,rstart,1,&isrow);
9560: ISCreateStride(PETSC_COMM_SELF,N,0,1,&iscol);
9561: } else { /* reuse == MAT_REUSE_MATRIX */
9562: /* retrieve subcomm */
9563: PetscObjectGetComm((PetscObject)(*matredundant),&subcomm);
9564: redund = (*matredundant)->redundant;
9565: isrow = redund->isrow;
9566: iscol = redund->iscol;
9567: matseq = redund->matseq;
9568: }
9569: MatGetSubMatrices(mat,1,&isrow,&iscol,reuse,&matseq);
9571: /* get matredundant over subcomm */
9572: if (reuse == MAT_INITIAL_MATRIX) {
9573: MatCreateMPIMatConcatenateSeqMat(subcomm,matseq[0],mloc_sub,reuse,matredundant);
9575: /* create a supporting struct and attach it to C for reuse */
9576: PetscNewLog(*matredundant,&redund);
9577: (*matredundant)->redundant = redund;
9578: redund->isrow = isrow;
9579: redund->iscol = iscol;
9580: redund->matseq = matseq;
9581: if (newsubcomm) {
9582: redund->subcomm = subcomm;
9583: } else {
9584: redund->subcomm = MPI_COMM_NULL;
9585: }
9586: } else {
9587: MatCreateMPIMatConcatenateSeqMat(subcomm,matseq[0],PETSC_DECIDE,reuse,matredundant);
9588: }
9589: PetscLogEventEnd(MAT_RedundantMat,mat,0,0,0);
9590: return(0);
9591: }
9595: /*@C
9596: MatGetMultiProcBlock - Create multiple [bjacobi] 'parallel submatrices' from
9597: a given 'mat' object. Each submatrix can span multiple procs.
9599: Collective on Mat
9601: Input Parameters:
9602: + mat - the matrix
9603: . subcomm - the subcommunicator obtained by com_split(comm)
9604: - scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9606: Output Parameter:
9607: . subMat - 'parallel submatrices each spans a given subcomm
9609: Notes:
9610: The submatrix partition across processors is dictated by 'subComm' a
9611: communicator obtained by com_split(comm). The comm_split
9612: is not restriced to be grouped with consecutive original ranks.
9614: Due the comm_split() usage, the parallel layout of the submatrices
9615: map directly to the layout of the original matrix [wrt the local
9616: row,col partitioning]. So the original 'DiagonalMat' naturally maps
9617: into the 'DiagonalMat' of the subMat, hence it is used directly from
9618: the subMat. However the offDiagMat looses some columns - and this is
9619: reconstructed with MatSetValues()
9621: Level: advanced
9623: Concepts: subcommunicator
9624: Concepts: submatrices
9626: .seealso: MatGetSubMatrices()
9627: @*/
9628: PetscErrorCode MatGetMultiProcBlock(Mat mat, MPI_Comm subComm, MatReuse scall,Mat *subMat)
9629: {
9631: PetscMPIInt commsize,subCommSize;
9634: MPI_Comm_size(PetscObjectComm((PetscObject)mat),&commsize);
9635: MPI_Comm_size(subComm,&subCommSize);
9636: if (subCommSize > commsize) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"CommSize %D < SubCommZize %D",commsize,subCommSize);
9638: PetscLogEventBegin(MAT_GetMultiProcBlock,mat,0,0,0);
9639: (*mat->ops->getmultiprocblock)(mat,subComm,scall,subMat);
9640: PetscLogEventEnd(MAT_GetMultiProcBlock,mat,0,0,0);
9641: return(0);
9642: }
9646: /*@
9647: MatGetLocalSubMatrix - Gets a reference to a submatrix specified in local numbering
9649: Not Collective
9651: Input Arguments:
9652: mat - matrix to extract local submatrix from
9653: isrow - local row indices for submatrix
9654: iscol - local column indices for submatrix
9656: Output Arguments:
9657: submat - the submatrix
9659: Level: intermediate
9661: Notes:
9662: The submat should be returned with MatRestoreLocalSubMatrix().
9664: Depending on the format of mat, the returned submat may not implement MatMult(). Its communicator may be
9665: the same as mat, it may be PETSC_COMM_SELF, or some other subcomm of mat's.
9667: The submat always implements MatSetValuesLocal(). If isrow and iscol have the same block size, then
9668: MatSetValuesBlockedLocal() will also be implemented.
9670: .seealso: MatRestoreLocalSubMatrix(), MatCreateLocalRef()
9671: @*/
9672: PetscErrorCode MatGetLocalSubMatrix(Mat mat,IS isrow,IS iscol,Mat *submat)
9673: {
9683: if (mat->ops->getlocalsubmatrix) {
9684: (*mat->ops->getlocalsubmatrix)(mat,isrow,iscol,submat);
9685: } else {
9686: MatCreateLocalRef(mat,isrow,iscol,submat);
9687: }
9688: return(0);
9689: }
9693: /*@
9694: MatRestoreLocalSubMatrix - Restores a reference to a submatrix specified in local numbering
9696: Not Collective
9698: Input Arguments:
9699: mat - matrix to extract local submatrix from
9700: isrow - local row indices for submatrix
9701: iscol - local column indices for submatrix
9702: submat - the submatrix
9704: Level: intermediate
9706: .seealso: MatGetLocalSubMatrix()
9707: @*/
9708: PetscErrorCode MatRestoreLocalSubMatrix(Mat mat,IS isrow,IS iscol,Mat *submat)
9709: {
9718: if (*submat) {
9720: }
9722: if (mat->ops->restorelocalsubmatrix) {
9723: (*mat->ops->restorelocalsubmatrix)(mat,isrow,iscol,submat);
9724: } else {
9725: MatDestroy(submat);
9726: }
9727: *submat = NULL;
9728: return(0);
9729: }
9731: /* --------------------------------------------------------*/
9734: /*@
9735: MatFindZeroDiagonals - Finds all the rows of a matrix that have zero or no entry in the matrix
9737: Collective on Mat
9739: Input Parameter:
9740: . mat - the matrix
9742: Output Parameter:
9743: . is - if any rows have zero diagonals this contains the list of them
9745: Level: developer
9747: Concepts: matrix-vector product
9749: .seealso: MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
9750: @*/
9751: PetscErrorCode MatFindZeroDiagonals(Mat mat,IS *is)
9752: {
9758: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9759: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9761: if (!mat->ops->findzerodiagonals) {
9762: Vec diag;
9763: const PetscScalar *a;
9764: PetscInt *rows;
9765: PetscInt rStart, rEnd, r, nrow = 0;
9767: MatCreateVecs(mat, &diag, NULL);
9768: MatGetDiagonal(mat, diag);
9769: MatGetOwnershipRange(mat, &rStart, &rEnd);
9770: VecGetArrayRead(diag, &a);
9771: for (r = 0; r < rEnd-rStart; ++r) if (a[r] == 0.0) ++nrow;
9772: PetscMalloc1(nrow, &rows);
9773: nrow = 0;
9774: for (r = 0; r < rEnd-rStart; ++r) if (a[r] == 0.0) rows[nrow++] = r+rStart;
9775: VecRestoreArrayRead(diag, &a);
9776: VecDestroy(&diag);
9777: ISCreateGeneral(PetscObjectComm((PetscObject) mat), nrow, rows, PETSC_OWN_POINTER, is);
9778: } else {
9779: (*mat->ops->findzerodiagonals)(mat, is);
9780: }
9781: return(0);
9782: }
9786: /*@
9787: MatFindOffBlockDiagonalEntries - Finds all the rows of a matrix that have entries outside of the main diagonal block (defined by the matrix block size)
9789: Collective on Mat
9791: Input Parameter:
9792: . mat - the matrix
9794: Output Parameter:
9795: . is - contains the list of rows with off block diagonal entries
9797: Level: developer
9799: Concepts: matrix-vector product
9801: .seealso: MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
9802: @*/
9803: PetscErrorCode MatFindOffBlockDiagonalEntries(Mat mat,IS *is)
9804: {
9810: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9811: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9813: if (!mat->ops->findoffblockdiagonalentries) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"This matrix type does not have a find off block diagonal entries defined");
9814: (*mat->ops->findoffblockdiagonalentries)(mat,is);
9815: return(0);
9816: }
9820: /*@C
9821: MatInvertBlockDiagonal - Inverts the block diagonal entries.
9823: Collective on Mat
9825: Input Parameters:
9826: . mat - the matrix
9828: Output Parameters:
9829: . values - the block inverses in column major order (FORTRAN-like)
9831: Note:
9832: This routine is not available from Fortran.
9834: Level: advanced
9835: @*/
9836: PetscErrorCode MatInvertBlockDiagonal(Mat mat,const PetscScalar **values)
9837: {
9842: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9843: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9844: if (!mat->ops->invertblockdiagonal) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not supported");
9845: (*mat->ops->invertblockdiagonal)(mat,values);
9846: return(0);
9847: }
9851: /*@C
9852: MatTransposeColoringDestroy - Destroys a coloring context for matrix product C=A*B^T that was created
9853: via MatTransposeColoringCreate().
9855: Collective on MatTransposeColoring
9857: Input Parameter:
9858: . c - coloring context
9860: Level: intermediate
9862: .seealso: MatTransposeColoringCreate()
9863: @*/
9864: PetscErrorCode MatTransposeColoringDestroy(MatTransposeColoring *c)
9865: {
9866: PetscErrorCode ierr;
9867: MatTransposeColoring matcolor=*c;
9870: if (!matcolor) return(0);
9871: if (--((PetscObject)matcolor)->refct > 0) {matcolor = 0; return(0);}
9873: PetscFree3(matcolor->ncolumns,matcolor->nrows,matcolor->colorforrow);
9874: PetscFree(matcolor->rows);
9875: PetscFree(matcolor->den2sp);
9876: PetscFree(matcolor->colorforcol);
9877: PetscFree(matcolor->columns);
9878: if (matcolor->brows>0) {
9879: PetscFree(matcolor->lstart);
9880: }
9881: PetscHeaderDestroy(c);
9882: return(0);
9883: }
9887: /*@C
9888: MatTransColoringApplySpToDen - Given a symbolic matrix product C=A*B^T for which
9889: a MatTransposeColoring context has been created, computes a dense B^T by Apply
9890: MatTransposeColoring to sparse B.
9892: Collective on MatTransposeColoring
9894: Input Parameters:
9895: + B - sparse matrix B
9896: . Btdense - symbolic dense matrix B^T
9897: - coloring - coloring context created with MatTransposeColoringCreate()
9899: Output Parameter:
9900: . Btdense - dense matrix B^T
9902: Options Database Keys:
9903: + -mat_transpose_coloring_view - Activates basic viewing or coloring
9904: . -mat_transpose_coloring_view_draw - Activates drawing of coloring
9905: - -mat_transpose_coloring_view_info - Activates viewing of coloring info
9907: Level: intermediate
9909: .seealso: MatTransposeColoringCreate(), MatTransposeColoringDestroy()
9911: .keywords: coloring
9912: @*/
9913: PetscErrorCode MatTransColoringApplySpToDen(MatTransposeColoring coloring,Mat B,Mat Btdense)
9914: {
9922: if (!B->ops->transcoloringapplysptoden) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not supported for this matrix type %s",((PetscObject)B)->type_name);
9923: (B->ops->transcoloringapplysptoden)(coloring,B,Btdense);
9924: return(0);
9925: }
9929: /*@C
9930: MatTransColoringApplyDenToSp - Given a symbolic matrix product Csp=A*B^T for which
9931: a MatTransposeColoring context has been created and a dense matrix Cden=A*Btdense
9932: in which Btdens is obtained from MatTransColoringApplySpToDen(), recover sparse matrix
9933: Csp from Cden.
9935: Collective on MatTransposeColoring
9937: Input Parameters:
9938: + coloring - coloring context created with MatTransposeColoringCreate()
9939: - Cden - matrix product of a sparse matrix and a dense matrix Btdense
9941: Output Parameter:
9942: . Csp - sparse matrix
9944: Options Database Keys:
9945: + -mat_multtranspose_coloring_view - Activates basic viewing or coloring
9946: . -mat_multtranspose_coloring_view_draw - Activates drawing of coloring
9947: - -mat_multtranspose_coloring_view_info - Activates viewing of coloring info
9949: Level: intermediate
9951: .seealso: MatTransposeColoringCreate(), MatTransposeColoringDestroy(), MatTransColoringApplySpToDen()
9953: .keywords: coloring
9954: @*/
9955: PetscErrorCode MatTransColoringApplyDenToSp(MatTransposeColoring matcoloring,Mat Cden,Mat Csp)
9956: {
9964: if (!Csp->ops->transcoloringapplydentosp) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not supported for this matrix type %s",((PetscObject)Csp)->type_name);
9965: (Csp->ops->transcoloringapplydentosp)(matcoloring,Cden,Csp);
9966: return(0);
9967: }
9971: /*@C
9972: MatTransposeColoringCreate - Creates a matrix coloring context for matrix product C=A*B^T.
9974: Collective on Mat
9976: Input Parameters:
9977: + mat - the matrix product C
9978: - iscoloring - the coloring of the matrix; usually obtained with MatColoringCreate() or DMCreateColoring()
9980: Output Parameter:
9981: . color - the new coloring context
9983: Level: intermediate
9985: .seealso: MatTransposeColoringDestroy(), MatTransposeColoringSetFromOptions(), MatTransColoringApplySpToDen(),
9986: MatTransColoringApplyDenToSp(), MatTransposeColoringView(),
9987: @*/
9988: PetscErrorCode MatTransposeColoringCreate(Mat mat,ISColoring iscoloring,MatTransposeColoring *color)
9989: {
9990: MatTransposeColoring c;
9991: MPI_Comm comm;
9992: PetscErrorCode ierr;
9995: PetscLogEventBegin(MAT_TransposeColoringCreate,mat,0,0,0);
9996: PetscObjectGetComm((PetscObject)mat,&comm);
9997: PetscHeaderCreate(c,MAT_TRANSPOSECOLORING_CLASSID,"MatTransposeColoring","Matrix product C=A*B^T via coloring","Mat",comm,MatTransposeColoringDestroy,NULL);
9999: c->ctype = iscoloring->ctype;
10000: if (mat->ops->transposecoloringcreate) {
10001: (*mat->ops->transposecoloringcreate)(mat,iscoloring,c);
10002: } else SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Code not yet written for this matrix type");
10004: *color = c;
10005: PetscLogEventEnd(MAT_TransposeColoringCreate,mat,0,0,0);
10006: return(0);
10007: }
10011: /*@
10012: MatGetNonzeroState - Returns a 64 bit integer representing the current state of nonzeros in the matrix. If the
10013: matrix has had no new nonzero locations added to the matrix since the previous call then the value will be the
10014: same, otherwise it will be larger
10016: Not Collective
10018: Input Parameter:
10019: . A - the matrix
10021: Output Parameter:
10022: . state - the current state
10024: Notes: You can only compare states from two different calls to the SAME matrix, you cannot compare calls between
10025: different matrices
10027: Level: intermediate
10029: @*/
10030: PetscErrorCode MatGetNonzeroState(Mat mat,PetscObjectState *state)
10031: {
10034: *state = mat->nonzerostate;
10035: return(0);
10036: }
10040: /*@
10041: MatCreateMPIMatConcatenateSeqMat - Creates a single large PETSc matrix by concatenating sequential
10042: matrices from each processor
10044: Collective on MPI_Comm
10046: Input Parameters:
10047: + comm - the communicators the parallel matrix will live on
10048: . seqmat - the input sequential matrices
10049: . n - number of local columns (or PETSC_DECIDE)
10050: - reuse - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
10052: Output Parameter:
10053: . mpimat - the parallel matrix generated
10055: Level: advanced
10057: Notes: The number of columns of the matrix in EACH processor MUST be the same.
10059: @*/
10060: PetscErrorCode MatCreateMPIMatConcatenateSeqMat(MPI_Comm comm,Mat seqmat,PetscInt n,MatReuse reuse,Mat *mpimat)
10061: {
10063: PetscMPIInt size;
10066: MPI_Comm_size(comm,&size);
10067: if (size == 1) {
10068: if (reuse == MAT_INITIAL_MATRIX) {
10069: MatDuplicate(seqmat,MAT_COPY_VALUES,mpimat);
10070: } else {
10071: MatCopy(seqmat,*mpimat,SAME_NONZERO_PATTERN);
10072: }
10073: return(0);
10074: }
10076: if (!seqmat->ops->creatempimatconcatenateseqmat) SETERRQ1(PetscObjectComm((PetscObject)seqmat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)seqmat)->type_name);
10077: PetscLogEventBegin(MAT_Merge,seqmat,0,0,0);
10078: (*seqmat->ops->creatempimatconcatenateseqmat)(comm,seqmat,n,reuse,mpimat);
10079: PetscLogEventEnd(MAT_Merge,seqmat,0,0,0);
10080: return(0);
10081: }
10085: /*@
10086: MatSubdomainsCreateCoalesce - Creates index subdomains by coalescing adjacent
10087: ranks' ownership ranges.
10089: Collective on A
10091: Input Parameters:
10092: + A - the matrix to create subdomains from
10093: - N - requested number of subdomains
10096: Output Parameters:
10097: + n - number of subdomains resulting on this rank
10098: - iss - IS list with indices of subdomains on this rank
10100: Level: advanced
10102: Notes: number of subdomains must be smaller than the communicator size
10103: @*/
10104: PetscErrorCode MatSubdomainsCreateCoalesce(Mat A,PetscInt N,PetscInt *n,IS *iss[])
10105: {
10106: MPI_Comm comm,subcomm;
10107: PetscMPIInt size,rank,color,subsize,subrank;
10108: PetscInt rstart,rend,k;
10109: PetscErrorCode ierr;
10112: PetscObjectGetComm((PetscObject)A,&comm);
10113: MPI_Comm_size(comm,&size);
10114: MPI_Comm_rank(comm,&rank);
10115: if (N < 1 || N >= (PetscInt)size) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"number of subdomains must be > 0 and < %D, got N = %D",size,N);
10116: *n = 1;
10117: k = ((PetscInt)size)/N + ((PetscInt)size%N>0); /* There are up to k ranks to a color */
10118: color = rank/k;
10119: MPI_Comm_split(comm,color,rank,&subcomm);
10120: MPI_Comm_size(subcomm,&subsize);
10121: MPI_Comm_size(subcomm,&subrank);
10122: PetscMalloc1(1,iss);
10123: MatGetOwnershipRange(A,&rstart,&rend);
10124: ISCreateStride(subcomm,rend-rstart,rstart,1,*iss);
10125: return(0);
10126: }