Actual source code: mumps.c
2: /*
3: Provides an interface to the MUMPS_4.3.1 sparse solver
4: */
5: #include src/mat/impls/aij/seq/aij.h
6: #include src/mat/impls/aij/mpi/mpiaij.h
7: #include src/mat/impls/sbaij/seq/sbaij.h
8: #include src/mat/impls/sbaij/mpi/mpisbaij.h
11: #if defined(PETSC_USE_COMPLEX)
12: #include "zmumps_c.h"
13: #else
14: #include "dmumps_c.h"
15: #endif
17: #define JOB_INIT -1
18: #define JOB_END -2
19: /* macros s.t. indices match MUMPS documentation */
20: #define ICNTL(I) icntl[(I)-1]
21: #define CNTL(I) cntl[(I)-1]
22: #define INFOG(I) infog[(I)-1]
23: #define INFO(I) info[(I)-1]
24: #define RINFOG(I) rinfog[(I)-1]
25: #define RINFO(I) rinfo[(I)-1]
27: typedef struct {
28: #if defined(PETSC_USE_COMPLEX)
29: ZMUMPS_STRUC_C id;
30: #else
31: DMUMPS_STRUC_C id;
32: #endif
33: MatStructure matstruc;
34: int myid,size,*irn,*jcn,sym;
35: PetscScalar *val;
36: MPI_Comm comm_mumps;
38: PetscTruth isAIJ,CleanUpMUMPS;
39: PetscErrorCode (*MatDuplicate)(Mat,MatDuplicateOption,Mat*);
40: PetscErrorCode (*MatView)(Mat,PetscViewer);
41: PetscErrorCode (*MatAssemblyEnd)(Mat,MatAssemblyType);
42: PetscErrorCode (*MatLUFactorSymbolic)(Mat,IS,IS,MatFactorInfo*,Mat*);
43: PetscErrorCode (*MatCholeskyFactorSymbolic)(Mat,IS,MatFactorInfo*,Mat*);
44: PetscErrorCode (*MatDestroy)(Mat);
45: PetscErrorCode (*specialdestroy)(Mat);
46: PetscErrorCode (*MatPreallocate)(Mat,int,int,int*,int,int*);
47: } Mat_MUMPS;
49: EXTERN PetscErrorCode MatDuplicate_MUMPS(Mat,MatDuplicateOption,Mat*);
51: PetscErrorCode MatConvert_SBAIJ_SBAIJMUMPS(Mat,const MatType,Mat*);
53: /* convert Petsc mpiaij matrix to triples: row[nz], col[nz], val[nz] */
54: /*
55: input:
56: A - matrix in mpiaij or mpisbaij (bs=1) format
57: shift - 0: C style output triple; 1: Fortran style output triple.
58: valOnly - FALSE: spaces are allocated and values are set for the triple
59: TRUE: only the values in v array are updated
60: output:
61: nnz - dim of r, c, and v (number of local nonzero entries of A)
62: r, c, v - row and col index, matrix values (matrix triples)
63: */
64: PetscErrorCode MatConvertToTriples(Mat A,int shift,PetscTruth valOnly,int *nnz,int **r, int **c, PetscScalar **v) {
65: int *ai, *aj, *bi, *bj, rstart,nz, *garray;
67: int i,j,jj,jB,irow,m=A->m,*ajj,*bjj,countA,countB,colA_start,jcol;
68: int *row,*col;
69: PetscScalar *av, *bv,*val;
70: Mat_MUMPS *mumps=(Mat_MUMPS*)A->spptr;
73: if (mumps->isAIJ){
74: Mat_MPIAIJ *mat = (Mat_MPIAIJ*)A->data;
75: Mat_SeqAIJ *aa=(Mat_SeqAIJ*)(mat->A)->data;
76: Mat_SeqAIJ *bb=(Mat_SeqAIJ*)(mat->B)->data;
77: nz = aa->nz + bb->nz;
78: ai=aa->i; aj=aa->j; bi=bb->i; bj=bb->j; rstart= mat->rstart;
79: garray = mat->garray;
80: av=aa->a; bv=bb->a;
81:
82: } else {
83: Mat_MPISBAIJ *mat = (Mat_MPISBAIJ*)A->data;
84: Mat_SeqSBAIJ *aa=(Mat_SeqSBAIJ*)(mat->A)->data;
85: Mat_SeqBAIJ *bb=(Mat_SeqBAIJ*)(mat->B)->data;
86: if (A->bs > 1) SETERRQ1(PETSC_ERR_SUP," bs=%d is not supported yet\n", A->bs);
87: nz = aa->nz + bb->nz;
88: ai=aa->i; aj=aa->j; bi=bb->i; bj=bb->j; rstart= mat->rstart;
89: garray = mat->garray;
90: av=aa->a; bv=bb->a;
91: }
93: if (!valOnly){
94: PetscMalloc(nz*sizeof(int),&row);
95: PetscMalloc(nz*sizeof(int),&col);
96: PetscMalloc(nz*sizeof(PetscScalar),&val);
97: *r = row; *c = col; *v = val;
98: } else {
99: row = *r; col = *c; val = *v;
100: }
101: *nnz = nz;
103: jj = 0; irow = rstart;
104: for ( i=0; i<m; i++ ) {
105: ajj = aj + ai[i]; /* ptr to the beginning of this row */
106: countA = ai[i+1] - ai[i];
107: countB = bi[i+1] - bi[i];
108: bjj = bj + bi[i];
110: /* get jB, the starting local col index for the 2nd B-part */
111: colA_start = rstart + ajj[0]; /* the smallest col index for A */
112: j=-1;
113: do {
114: j++;
115: if (j == countB) break;
116: jcol = garray[bjj[j]];
117: } while (jcol < colA_start);
118: jB = j;
119:
120: /* B-part, smaller col index */
121: colA_start = rstart + ajj[0]; /* the smallest col index for A */
122: for (j=0; j<jB; j++){
123: jcol = garray[bjj[j]];
124: if (!valOnly){
125: row[jj] = irow + shift; col[jj] = jcol + shift;
127: }
128: val[jj++] = *bv++;
129: }
130: /* A-part */
131: for (j=0; j<countA; j++){
132: if (!valOnly){
133: row[jj] = irow + shift; col[jj] = rstart + ajj[j] + shift;
134: }
135: val[jj++] = *av++;
136: }
137: /* B-part, larger col index */
138: for (j=jB; j<countB; j++){
139: if (!valOnly){
140: row[jj] = irow + shift; col[jj] = garray[bjj[j]] + shift;
141: }
142: val[jj++] = *bv++;
143: }
144: irow++;
145: }
146:
147: return(0);
148: }
153: PetscErrorCode MatConvert_MUMPS_Base(Mat A,const MatType type,Mat *newmat) \
154: {
156: Mat B=*newmat;
157: Mat_MUMPS *mumps=(Mat_MUMPS*)A->spptr;
158: void (*f)(void);
161: if (B != A) {
162: MatDuplicate(A,MAT_COPY_VALUES,&B);
163: }
164: B->ops->duplicate = mumps->MatDuplicate;
165: B->ops->view = mumps->MatView;
166: B->ops->assemblyend = mumps->MatAssemblyEnd;
167: B->ops->lufactorsymbolic = mumps->MatLUFactorSymbolic;
168: B->ops->choleskyfactorsymbolic = mumps->MatCholeskyFactorSymbolic;
169: B->ops->destroy = mumps->MatDestroy;
171: PetscObjectQueryFunction((PetscObject)B,"MatMPISBAIJSetPreallocation_C",&f);
172: if (f) {
173: PetscObjectComposeFunction((PetscObject)B,"MatMPISBAIJSetPreallocation_C","",(FCNVOID)mumps->MatPreallocate);
174: }
175: PetscFree(mumps);
177: PetscObjectComposeFunction((PetscObject)B,"MatConvert_seqaij_aijmumps_C","",PETSC_NULL);
178: PetscObjectComposeFunction((PetscObject)B,"MatConvert_aijmumps_seqaij_C","",PETSC_NULL);
179: PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_aijmumps_C","",PETSC_NULL);
180: PetscObjectComposeFunction((PetscObject)B,"MatConvert_aijmumps_mpiaij_C","",PETSC_NULL);
181: PetscObjectComposeFunction((PetscObject)B,"MatConvert_seqsbaij_sbaijmumps_C","",PETSC_NULL);
182: PetscObjectComposeFunction((PetscObject)B,"MatConvert_sbaijmumps_seqsbaij_C","",PETSC_NULL);
183: PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpisbaij_sbaijmumps_C","",PETSC_NULL);
184: PetscObjectComposeFunction((PetscObject)B,"MatConvert_sbaijmumps_mpisbaij_C","",PETSC_NULL);
186: PetscObjectChangeTypeName((PetscObject)B,type);
187: *newmat = B;
188: return(0);
189: }
194: PetscErrorCode MatDestroy_MUMPS(Mat A)
195: {
196: Mat_MUMPS *lu=(Mat_MUMPS*)A->spptr;
198: int size=lu->size;
199: PetscErrorCode (*specialdestroy)(Mat);
201: if (lu->CleanUpMUMPS) {
202: /* Terminate instance, deallocate memories */
203: lu->id.job=JOB_END;
204: #if defined(PETSC_USE_COMPLEX)
205: zmumps_c(&lu->id);
206: #else
207: dmumps_c(&lu->id);
208: #endif
209: if (lu->irn) {
210: PetscFree(lu->irn);
211: }
212: if (lu->jcn) {
213: PetscFree(lu->jcn);
214: }
215: if (size>1 && lu->val) {
216: PetscFree(lu->val);
217: }
218: MPI_Comm_free(&(lu->comm_mumps));
219: }
220: specialdestroy = lu->specialdestroy;
221: (*specialdestroy)(A);
222: (*A->ops->destroy)(A);
223: return(0);
224: }
228: PetscErrorCode MatDestroy_AIJMUMPS(Mat A)
229: {
231: int size;
234: MPI_Comm_size(A->comm,&size);
235: if (size==1) {
236: MatConvert_MUMPS_Base(A,MATSEQAIJ,&A);
237: } else {
238: MatConvert_MUMPS_Base(A,MATMPIAIJ,&A);
239: }
240: return(0);
241: }
245: PetscErrorCode MatDestroy_SBAIJMUMPS(Mat A)
246: {
248: int size;
251: MPI_Comm_size(A->comm,&size);
252: if (size==1) {
253: MatConvert_MUMPS_Base(A,MATSEQSBAIJ,&A);
254: } else {
255: MatConvert_MUMPS_Base(A,MATMPISBAIJ,&A);
256: }
257: return(0);
258: }
262: PetscErrorCode MatFactorInfo_MUMPS(Mat A,PetscViewer viewer) {
263: Mat_MUMPS *lu=(Mat_MUMPS*)A->spptr;
267: PetscViewerASCIIPrintf(viewer,"MUMPS run parameters:\n");
268: PetscViewerASCIIPrintf(viewer," SYM (matrix type): %d \n",lu->id.sym);
269: PetscViewerASCIIPrintf(viewer," PAR (host participation): %d \n",lu->id.par);
270: PetscViewerASCIIPrintf(viewer," ICNTL(4) (level of printing): %d \n",lu->id.ICNTL(4));
271: PetscViewerASCIIPrintf(viewer," ICNTL(5) (input mat struct): %d \n",lu->id.ICNTL(5));
272: PetscViewerASCIIPrintf(viewer," ICNTL(6) (matrix prescaling): %d \n",lu->id.ICNTL(6));
273: PetscViewerASCIIPrintf(viewer," ICNTL(7) (matrix ordering): %d \n",lu->id.ICNTL(7));
274: PetscViewerASCIIPrintf(viewer," ICNTL(9) (A/A^T x=b is solved): %d \n",lu->id.ICNTL(9));
275: PetscViewerASCIIPrintf(viewer," ICNTL(10) (max num of refinements): %d \n",lu->id.ICNTL(10));
276: PetscViewerASCIIPrintf(viewer," ICNTL(11) (error analysis): %d \n",lu->id.ICNTL(11));
277: if (!lu->myid && lu->id.ICNTL(11)>0) {
278: PetscPrintf(PETSC_COMM_SELF," RINFOG(4) (inf norm of input mat): %g\n",lu->id.RINFOG(4));
279: PetscPrintf(PETSC_COMM_SELF," RINFOG(5) (inf norm of solution): %g\n",lu->id.RINFOG(5));
280: PetscPrintf(PETSC_COMM_SELF," RINFOG(6) (inf norm of residual): %g\n",lu->id.RINFOG(6));
281: PetscPrintf(PETSC_COMM_SELF," RINFOG(7),RINFOG(8) (backward error est): %g, %g\n",lu->id.RINFOG(7),lu->id.RINFOG(8));
282: PetscPrintf(PETSC_COMM_SELF," RINFOG(9) (error estimate): %g \n",lu->id.RINFOG(9));
283: PetscPrintf(PETSC_COMM_SELF," RINFOG(10),RINFOG(11)(condition numbers): %g, %g\n",lu->id.RINFOG(10),lu->id.RINFOG(11));
284:
285: }
286: PetscViewerASCIIPrintf(viewer," ICNTL(12) (efficiency control): %d \n",lu->id.ICNTL(12));
287: PetscViewerASCIIPrintf(viewer," ICNTL(13) (efficiency control): %d \n",lu->id.ICNTL(13));
288: PetscViewerASCIIPrintf(viewer," ICNTL(14) (percentage of estimated workspace increase): %d \n",lu->id.ICNTL(14));
289: PetscViewerASCIIPrintf(viewer," ICNTL(15) (efficiency control): %d \n",lu->id.ICNTL(15));
290: PetscViewerASCIIPrintf(viewer," ICNTL(18) (input mat struct): %d \n",lu->id.ICNTL(18));
292: PetscViewerASCIIPrintf(viewer," CNTL(1) (relative pivoting threshold): %g \n",lu->id.CNTL(1));
293: PetscViewerASCIIPrintf(viewer," CNTL(2) (stopping criterion of refinement): %g \n",lu->id.CNTL(2));
294: PetscViewerASCIIPrintf(viewer," CNTL(3) (absolute pivoting threshold): %g \n",lu->id.CNTL(3));
296: /* infomation local to each processor */
297: if (!lu->myid) PetscPrintf(PETSC_COMM_SELF, " RINFO(1) (local estimated flops for the elimination after analysis): \n");
298: PetscSynchronizedPrintf(A->comm," [%d] %g \n",lu->myid,lu->id.RINFO(1));
299: PetscSynchronizedFlush(A->comm);
300: if (!lu->myid) PetscPrintf(PETSC_COMM_SELF, " RINFO(2) (local estimated flops for the assembly after factorization): \n");
301: PetscSynchronizedPrintf(A->comm," [%d] %g \n",lu->myid,lu->id.RINFO(2));
302: PetscSynchronizedFlush(A->comm);
303: if (!lu->myid) PetscPrintf(PETSC_COMM_SELF, " RINFO(3) (local estimated flops for the elimination after factorization): \n");
304: PetscSynchronizedPrintf(A->comm," [%d] %g \n",lu->myid,lu->id.RINFO(3));
305: PetscSynchronizedFlush(A->comm);
307: if (!lu->myid){ /* information from the host */
308: PetscViewerASCIIPrintf(viewer," RINFOG(1) (global estimated flops for the elimination after analysis): %g \n",lu->id.RINFOG(1));
309: PetscViewerASCIIPrintf(viewer," RINFOG(2) (global estimated flops for the assembly after factorization): %g \n",lu->id.RINFOG(2));
310: PetscViewerASCIIPrintf(viewer," RINFOG(3) (global estimated flops for the elimination after factorization): %g \n",lu->id.RINFOG(3));
312: PetscViewerASCIIPrintf(viewer," INFOG(3) (estimated real workspace for factors on all processors after analysis): %d \n",lu->id.INFOG(3));
313: PetscViewerASCIIPrintf(viewer," INFOG(4) (estimated integer workspace for factors on all processors after analysis): %d \n",lu->id.INFOG(4));
314: PetscViewerASCIIPrintf(viewer," INFOG(5) (estimated maximum front size in the complete tree): %d \n",lu->id.INFOG(5));
315: PetscViewerASCIIPrintf(viewer," INFOG(6) (number of nodes in the complete tree): %d \n",lu->id.INFOG(6));
316: PetscViewerASCIIPrintf(viewer," INFOG(7) (ordering option effectively uese after analysis): %d \n",lu->id.INFOG(7));
317: PetscViewerASCIIPrintf(viewer," INFOG(8) (structural symmetry in percent of the permuted matrix after analysis): %d \n",lu->id.INFOG(8));
318: PetscViewerASCIIPrintf(viewer," INFOG(9) (total real space store the matrix factors after analysis): %d \n",lu->id.INFOG(9));
319: PetscViewerASCIIPrintf(viewer," INFOG(10) (total integer space store the matrix factors after analysis): %d \n",lu->id.INFOG(10));
320: PetscViewerASCIIPrintf(viewer," INFOG(11) (order of largest frontal matrix): %d \n",lu->id.INFOG(11));
321: PetscViewerASCIIPrintf(viewer," INFOG(12) (number of off-diagonal pivots): %d \n",lu->id.INFOG(12));
322: PetscViewerASCIIPrintf(viewer," INFOG(13) (number of delayed pivots after factorization): %d \n",lu->id.INFOG(13));
323: PetscViewerASCIIPrintf(viewer," INFOG(14) (number of memory compress after factorization): %d \n",lu->id.INFOG(14));
324: PetscViewerASCIIPrintf(viewer," INFOG(15) (number of steps of iterative refinement after solution): %d \n",lu->id.INFOG(15));
325: PetscViewerASCIIPrintf(viewer," INFOG(16) (estimated size (in million of bytes) of all MUMPS internal data for factorization after analysis: value on the most memory consuming processor): %d \n",lu->id.INFOG(16));
326: PetscViewerASCIIPrintf(viewer," INFOG(17) (estimated size of all MUMPS internal data for factorization after analysis: sum over all processors): %d \n",lu->id.INFOG(17));
327: PetscViewerASCIIPrintf(viewer," INFOG(18) (size of all MUMPS internal data allocated during factorization: value on the most memory consuming processor): %d \n",lu->id.INFOG(18));
328: PetscViewerASCIIPrintf(viewer," INFOG(19) (size of all MUMPS internal data allocated during factorization: sum over all processors): %d \n",lu->id.INFOG(19));
329: PetscViewerASCIIPrintf(viewer," INFOG(20) (estimated number of entries in the factors): %d \n",lu->id.INFOG(20));
330: }
332: return(0);
333: }
337: PetscErrorCode MatView_AIJMUMPS(Mat A,PetscViewer viewer) {
339: PetscTruth iascii;
340: PetscViewerFormat format;
341: Mat_MUMPS *mumps=(Mat_MUMPS*)(A->spptr);
344: (*mumps->MatView)(A,viewer);
346: PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&iascii);
347: if (iascii) {
348: PetscViewerGetFormat(viewer,&format);
349: if (format == PETSC_VIEWER_ASCII_FACTOR_INFO) {
350: MatFactorInfo_MUMPS(A,viewer);
351: }
352: }
353: return(0);
354: }
358: PetscErrorCode MatSolve_AIJMUMPS(Mat A,Vec b,Vec x) {
359: Mat_MUMPS *lu=(Mat_MUMPS*)A->spptr;
360: PetscScalar *array;
361: Vec x_seq;
362: IS iden;
363: VecScatter scat;
367: if (lu->size > 1){
368: if (!lu->myid){
369: VecCreateSeq(PETSC_COMM_SELF,A->N,&x_seq);
370: ISCreateStride(PETSC_COMM_SELF,A->N,0,1,&iden);
371: } else {
372: VecCreateSeq(PETSC_COMM_SELF,0,&x_seq);
373: ISCreateStride(PETSC_COMM_SELF,0,0,1,&iden);
374: }
375: VecScatterCreate(b,iden,x_seq,iden,&scat);
376: ISDestroy(iden);
378: VecScatterBegin(b,x_seq,INSERT_VALUES,SCATTER_FORWARD,scat);
379: VecScatterEnd(b,x_seq,INSERT_VALUES,SCATTER_FORWARD,scat);
380: if (!lu->myid) {VecGetArray(x_seq,&array);}
381: } else { /* size == 1 */
382: VecCopy(b,x);
383: VecGetArray(x,&array);
384: }
385: if (!lu->myid) { /* define rhs on the host */
386: #if defined(PETSC_USE_COMPLEX)
387: lu->id.rhs = (mumps_double_complex*)array;
388: #else
389: lu->id.rhs = array;
390: #endif
391: }
393: /* solve phase */
394: lu->id.job=3;
395: #if defined(PETSC_USE_COMPLEX)
396: zmumps_c(&lu->id);
397: #else
398: dmumps_c(&lu->id);
399: #endif
400: if (lu->id.INFOG(1) < 0) {
401: SETERRQ1(PETSC_ERR_LIB,"Error reported by MUMPS in solve phase: INFOG(1)=%d\n",lu->id.INFOG(1));
402: }
404: /* convert mumps solution x_seq to petsc mpi x */
405: if (lu->size > 1) {
406: if (!lu->myid){
407: VecRestoreArray(x_seq,&array);
408: }
409: VecScatterBegin(x_seq,x,INSERT_VALUES,SCATTER_REVERSE,scat);
410: VecScatterEnd(x_seq,x,INSERT_VALUES,SCATTER_REVERSE,scat);
411: VecScatterDestroy(scat);
412: VecDestroy(x_seq);
413: } else {
414: VecRestoreArray(x,&array);
415: }
416:
417: return(0);
418: }
420: /*
421: input:
422: F: numeric factor
423: output:
424: nneg: total number of negative pivots
425: nzero: 0
426: npos: (global dimension of F) - nneg
427: */
431: PetscErrorCode MatGetInertia_SBAIJMUMPS(Mat F,int *nneg,int *nzero,int *npos)
432: {
433: Mat_MUMPS *lu =(Mat_MUMPS*)F->spptr;
435: int size;
438: MPI_Comm_size(F->comm,&size);
439: /* MUMPS 4.3.1 calls ScaLAPACK when ICNTL(13)=0 (default), which does not offer the possibility to compute the inertia of a dense matrix. Set ICNTL(13)=1 to skip ScaLAPACK */
440: if (size > 1 && lu->id.ICNTL(13) != 1){
441: SETERRQ1(PETSC_ERR_ARG_WRONG,"ICNTL(13)=%d. -mat_mumps_icntl_13 must be set as 1 for correct global matrix inertia\n",lu->id.INFOG(13));
442: }
443: if (nneg){
444: if (!lu->myid){
445: *nneg = lu->id.INFOG(12);
446: }
447: MPI_Bcast(nneg,1,MPI_INT,0,lu->comm_mumps);
448: }
449: if (nzero) *nzero = 0;
450: if (npos) *npos = F->M - (*nneg);
451: return(0);
452: }
456: PetscErrorCode MatFactorNumeric_AIJMUMPS(Mat A,Mat *F) {
457: Mat_MUMPS *lu =(Mat_MUMPS*)(*F)->spptr;
458: Mat_MUMPS *lua=(Mat_MUMPS*)(A)->spptr;
460: int rnz,nnz,nz,i,M=A->M,*ai,*aj,icntl;
461: PetscTruth valOnly,flg;
464: if (lu->matstruc == DIFFERENT_NONZERO_PATTERN){
465: (*F)->ops->solve = MatSolve_AIJMUMPS;
467: /* Initialize a MUMPS instance */
468: MPI_Comm_rank(A->comm, &lu->myid);
469: MPI_Comm_size(A->comm,&lu->size);
470: lua->myid = lu->myid; lua->size = lu->size;
471: lu->id.job = JOB_INIT;
472: MPI_Comm_dup(A->comm,&(lu->comm_mumps));
473: lu->id.comm_fortran = lu->comm_mumps;
475: /* Set mumps options */
476: PetscOptionsBegin(A->comm,A->prefix,"MUMPS Options","Mat");
477: lu->id.par=1; /* host participates factorizaton and solve */
478: lu->id.sym=lu->sym;
479: if (lu->sym == 2){
480: PetscOptionsInt("-mat_mumps_sym","SYM: (1,2)","None",lu->id.sym,&icntl,&flg);
481: if (flg && icntl == 1) lu->id.sym=icntl; /* matrix is spd */
482: }
483: #if defined(PETSC_USE_COMPLEX)
484: zmumps_c(&lu->id);
485: #else
486: dmumps_c(&lu->id);
487: #endif
488:
489: if (lu->size == 1){
490: lu->id.ICNTL(18) = 0; /* centralized assembled matrix input */
491: } else {
492: lu->id.ICNTL(18) = 3; /* distributed assembled matrix input */
493: }
495: icntl=-1;
496: PetscOptionsInt("-mat_mumps_icntl_4","ICNTL(4): level of printing (0 to 4)","None",lu->id.ICNTL(4),&icntl,&flg);
497: if ((flg && icntl > 0) || PetscLogPrintInfo) {
498: lu->id.ICNTL(4)=icntl; /* and use mumps default icntl(i), i=1,2,3 */
499: } else { /* no output */
500: lu->id.ICNTL(1) = 0; /* error message, default= 6 */
501: lu->id.ICNTL(2) = -1; /* output stream for diagnostic printing, statistics, and warning. default=0 */
502: lu->id.ICNTL(3) = -1; /* output stream for global information, default=6 */
503: lu->id.ICNTL(4) = 0; /* level of printing, 0,1,2,3,4, default=2 */
504: }
505: PetscOptionsInt("-mat_mumps_icntl_6","ICNTL(6): matrix prescaling (0 to 7)","None",lu->id.ICNTL(6),&lu->id.ICNTL(6),PETSC_NULL);
506: icntl=-1;
507: PetscOptionsInt("-mat_mumps_icntl_7","ICNTL(7): matrix ordering (0 to 7)","None",lu->id.ICNTL(7),&icntl,&flg);
508: if (flg) {
509: if (icntl== 1){
510: SETERRQ(PETSC_ERR_SUP,"pivot order be set by the user in PERM_IN -- not supported by the PETSc/MUMPS interface\n");
511: } else {
512: lu->id.ICNTL(7) = icntl;
513: }
514: }
515: PetscOptionsInt("-mat_mumps_icntl_9","ICNTL(9): A or A^T x=b to be solved. 1: A; otherwise: A^T","None",lu->id.ICNTL(9),&lu->id.ICNTL(9),PETSC_NULL);
516: PetscOptionsInt("-mat_mumps_icntl_10","ICNTL(10): max num of refinements","None",lu->id.ICNTL(10),&lu->id.ICNTL(10),PETSC_NULL);
517: PetscOptionsInt("-mat_mumps_icntl_11","ICNTL(11): error analysis, a positive value returns statistics (by -ksp_view)","None",lu->id.ICNTL(11),&lu->id.ICNTL(11),PETSC_NULL);
518: PetscOptionsInt("-mat_mumps_icntl_12","ICNTL(12): efficiency control","None",lu->id.ICNTL(12),&lu->id.ICNTL(12),PETSC_NULL);
519: PetscOptionsInt("-mat_mumps_icntl_13","ICNTL(13): efficiency control","None",lu->id.ICNTL(13),&lu->id.ICNTL(13),PETSC_NULL);
520: PetscOptionsInt("-mat_mumps_icntl_14","ICNTL(14): percentage of estimated workspace increase","None",lu->id.ICNTL(14),&lu->id.ICNTL(14),PETSC_NULL);
521: PetscOptionsInt("-mat_mumps_icntl_15","ICNTL(15): efficiency control","None",lu->id.ICNTL(15),&lu->id.ICNTL(15),PETSC_NULL);
523: /*
524: PetscOptionsInt("-mat_mumps_icntl_16","ICNTL(16): 1: rank detection; 2: rank detection and nullspace","None",lu->id.ICNTL(16),&icntl,&flg);
525: if (flg){
526: if (icntl >-1 && icntl <3 ){
527: if (lu->myid==0) lu->id.ICNTL(16) = icntl;
528: } else {
529: SETERRQ1(PETSC_ERR_SUP,"ICNTL(16)=%d -- not supported\n",icntl);
530: }
531: }
532: */
534: PetscOptionsReal("-mat_mumps_cntl_1","CNTL(1): relative pivoting threshold","None",lu->id.CNTL(1),&lu->id.CNTL(1),PETSC_NULL);
535: PetscOptionsReal("-mat_mumps_cntl_2","CNTL(2): stopping criterion of refinement","None",lu->id.CNTL(2),&lu->id.CNTL(2),PETSC_NULL);
536: PetscOptionsReal("-mat_mumps_cntl_3","CNTL(3): absolute pivoting threshold","None",lu->id.CNTL(3),&lu->id.CNTL(3),PETSC_NULL);
537: PetscOptionsEnd();
538: }
540: /* define matrix A */
541: switch (lu->id.ICNTL(18)){
542: case 0: /* centralized assembled matrix input (size=1) */
543: if (!lu->myid) {
544: if (lua->isAIJ){
545: Mat_SeqAIJ *aa = (Mat_SeqAIJ*)A->data;
546: nz = aa->nz;
547: ai = aa->i; aj = aa->j; lu->val = aa->a;
548: } else {
549: Mat_SeqSBAIJ *aa = (Mat_SeqSBAIJ*)A->data;
550: nz = aa->nz;
551: ai = aa->i; aj = aa->j; lu->val = aa->a;
552: }
553: if (lu->matstruc == DIFFERENT_NONZERO_PATTERN){ /* first numeric factorization, get irn and jcn */
554: PetscMalloc(nz*sizeof(int),&lu->irn);
555: PetscMalloc(nz*sizeof(int),&lu->jcn);
556: nz = 0;
557: for (i=0; i<M; i++){
558: rnz = ai[i+1] - ai[i];
559: while (rnz--) { /* Fortran row/col index! */
560: lu->irn[nz] = i+1; lu->jcn[nz] = (*aj)+1; aj++; nz++;
561: }
562: }
563: }
564: }
565: break;
566: case 3: /* distributed assembled matrix input (size>1) */
567: if (lu->matstruc == DIFFERENT_NONZERO_PATTERN){
568: valOnly = PETSC_FALSE;
569: } else {
570: valOnly = PETSC_TRUE; /* only update mat values, not row and col index */
571: }
572: MatConvertToTriples(A,1,valOnly, &nnz, &lu->irn, &lu->jcn, &lu->val);
573: break;
574: default: SETERRQ(PETSC_ERR_SUP,"Matrix input format is not supported by MUMPS.");
575: }
577: /* analysis phase */
578: if (lu->matstruc == DIFFERENT_NONZERO_PATTERN){
579: lu->id.n = M;
580: switch (lu->id.ICNTL(18)){
581: case 0: /* centralized assembled matrix input */
582: if (!lu->myid) {
583: lu->id.nz =nz; lu->id.irn=lu->irn; lu->id.jcn=lu->jcn;
584: if (lu->id.ICNTL(6)>1){
585: #if defined(PETSC_USE_COMPLEX)
586: lu->id.a = (mumps_double_complex*)lu->val;
587: #else
588: lu->id.a = lu->val;
589: #endif
590: }
591: }
592: break;
593: case 3: /* distributed assembled matrix input (size>1) */
594: lu->id.nz_loc = nnz;
595: lu->id.irn_loc=lu->irn; lu->id.jcn_loc=lu->jcn;
596: if (lu->id.ICNTL(6)>1) {
597: #if defined(PETSC_USE_COMPLEX)
598: lu->id.a_loc = (mumps_double_complex*)lu->val;
599: #else
600: lu->id.a_loc = lu->val;
601: #endif
602: }
603: break;
604: }
605: lu->id.job=1;
606: #if defined(PETSC_USE_COMPLEX)
607: zmumps_c(&lu->id);
608: #else
609: dmumps_c(&lu->id);
610: #endif
611: if (lu->id.INFOG(1) < 0) {
612: SETERRQ1(PETSC_ERR_LIB,"Error reported by MUMPS in analysis phase: INFOG(1)=%d\n",lu->id.INFOG(1));
613: }
614: }
616: /* numerical factorization phase */
617: if(!lu->id.ICNTL(18)) {
618: if (!lu->myid) {
619: #if defined(PETSC_USE_COMPLEX)
620: lu->id.a = (mumps_double_complex*)lu->val;
621: #else
622: lu->id.a = lu->val;
623: #endif
624: }
625: } else {
626: #if defined(PETSC_USE_COMPLEX)
627: lu->id.a_loc = (mumps_double_complex*)lu->val;
628: #else
629: lu->id.a_loc = lu->val;
630: #endif
631: }
632: lu->id.job=2;
633: #if defined(PETSC_USE_COMPLEX)
634: zmumps_c(&lu->id);
635: #else
636: dmumps_c(&lu->id);
637: #endif
638: if (lu->id.INFOG(1) < 0) {
639: if (lu->id.INFO(1) == -13) {
640: SETERRQ1(PETSC_ERR_LIB,"Error reported by MUMPS in numerical factorization phase: Cannot allocate required memory %d megabytes\n",lu->id.INFO(2));
641: } else {
642: SETERRQ2(PETSC_ERR_LIB,"Error reported by MUMPS in numerical factorization phase: INFO(1)=%d, INFO(2)=%d\n",lu->id.INFO(1),lu->id.INFO(2));
643: }
644: }
646: if (!lu->myid && lu->id.ICNTL(16) > 0){
647: SETERRQ1(PETSC_ERR_LIB," lu->id.ICNTL(16):=%d\n",lu->id.INFOG(16));
648: }
649:
650: (*F)->assembled = PETSC_TRUE;
651: lu->matstruc = SAME_NONZERO_PATTERN;
652: lu->CleanUpMUMPS = PETSC_TRUE;
653: return(0);
654: }
656: /* Note the Petsc r and c permutations are ignored */
659: PetscErrorCode MatLUFactorSymbolic_AIJMUMPS(Mat A,IS r,IS c,MatFactorInfo *info,Mat *F) {
660: Mat B;
661: Mat_MUMPS *lu;
666: /* Create the factorization matrix */
667: MatCreate(A->comm,A->m,A->n,A->M,A->N,&B);
668: MatSetType(B,A->type_name);
669: MatSeqAIJSetPreallocation(B,0,PETSC_NULL);
670: MatMPIAIJSetPreallocation(B,0,PETSC_NULL,0,PETSC_NULL);
672: B->ops->lufactornumeric = MatFactorNumeric_AIJMUMPS;
673: B->factor = FACTOR_LU;
674: lu = (Mat_MUMPS*)B->spptr;
675: lu->sym = 0;
676: lu->matstruc = DIFFERENT_NONZERO_PATTERN;
678: *F = B;
679: return(0);
680: }
682: /* Note the Petsc r permutation is ignored */
685: PetscErrorCode MatCholeskyFactorSymbolic_SBAIJMUMPS(Mat A,IS r,MatFactorInfo *info,Mat *F) {
686: Mat B;
687: Mat_MUMPS *lu;
691: /* Create the factorization matrix */
692: MatCreate(A->comm,A->m,A->n,A->M,A->N,&B);
693: MatSetType(B,A->type_name);
694: MatSeqSBAIJSetPreallocation(B,1,0,PETSC_NULL);
695: MatMPISBAIJSetPreallocation(B,1,0,PETSC_NULL,0,PETSC_NULL);
697: B->ops->choleskyfactornumeric = MatFactorNumeric_AIJMUMPS;
698: B->ops->getinertia = MatGetInertia_SBAIJMUMPS;
699: B->factor = FACTOR_CHOLESKY;
700: lu = (Mat_MUMPS*)B->spptr;
701: lu->sym = 2;
702: lu->matstruc = DIFFERENT_NONZERO_PATTERN;
704: *F = B;
705: return(0);
706: }
710: PetscErrorCode MatAssemblyEnd_AIJMUMPS(Mat A,MatAssemblyType mode) {
712: Mat_MUMPS *mumps=(Mat_MUMPS*)A->spptr;
715: (*mumps->MatAssemblyEnd)(A,mode);
717: mumps->MatLUFactorSymbolic = A->ops->lufactorsymbolic;
718: mumps->MatCholeskyFactorSymbolic = A->ops->choleskyfactorsymbolic;
719: A->ops->lufactorsymbolic = MatLUFactorSymbolic_AIJMUMPS;
720: return(0);
721: }
726: PetscErrorCode MatConvert_AIJ_AIJMUMPS(Mat A,const MatType newtype,Mat *newmat)
727: {
729: PetscMPIInt size;
730: MPI_Comm comm;
731: Mat B=*newmat;
732: Mat_MUMPS *mumps;
735: if (B != A) {
736: MatDuplicate(A,MAT_COPY_VALUES,&B);
737: }
739: PetscObjectGetComm((PetscObject)A,&comm);
740: PetscNew(Mat_MUMPS,&mumps);
742: mumps->MatDuplicate = A->ops->duplicate;
743: mumps->MatView = A->ops->view;
744: mumps->MatAssemblyEnd = A->ops->assemblyend;
745: mumps->MatLUFactorSymbolic = A->ops->lufactorsymbolic;
746: mumps->MatCholeskyFactorSymbolic = A->ops->choleskyfactorsymbolic;
747: mumps->MatDestroy = A->ops->destroy;
748: mumps->specialdestroy = MatDestroy_AIJMUMPS;
749: mumps->CleanUpMUMPS = PETSC_FALSE;
750: mumps->isAIJ = PETSC_TRUE;
752: B->spptr = (void*)mumps;
753: B->ops->duplicate = MatDuplicate_MUMPS;
754: B->ops->view = MatView_AIJMUMPS;
755: B->ops->assemblyend = MatAssemblyEnd_AIJMUMPS;
756: B->ops->lufactorsymbolic = MatLUFactorSymbolic_AIJMUMPS;
757: B->ops->destroy = MatDestroy_MUMPS;
759: MPI_Comm_size(comm,&size);
760: if (size == 1) {
761: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatConvert_seqaij_aijmumps_C",
762: "MatConvert_AIJ_AIJMUMPS",MatConvert_AIJ_AIJMUMPS);
763: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatConvert_aijmumps_seqaij_C",
764: "MatConvert_MUMPS_Base",MatConvert_MUMPS_Base);
765: } else {
766: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatConvert_mpiaij_aijmumps_C",
767: "MatConvert_AIJ_AIJMUMPS",MatConvert_AIJ_AIJMUMPS);
768: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatConvert_aijmumps_mpiaij_C",
769: "MatConvert_MUMPS_Base",MatConvert_MUMPS_Base);
770: }
772: PetscLogInfo(0,"Using MUMPS for LU factorization and solves.");
773: PetscObjectChangeTypeName((PetscObject)B,newtype);
774: *newmat = B;
775: return(0);
776: }
779: /*MC
780: MATAIJMUMPS - MATAIJMUMPS = "aijmumps" - A matrix type providing direct solvers (LU) for distributed
781: and sequential matrices via the external package MUMPS.
783: If MUMPS is installed (see the manual for instructions
784: on how to declare the existence of external packages),
785: a matrix type can be constructed which invokes MUMPS solvers.
786: After calling MatCreate(...,A), simply call MatSetType(A,MATAIJMUMPS).
787: This matrix type is only supported for double precision real.
789: If created with a single process communicator, this matrix type inherits from MATSEQAIJ.
790: Otherwise, this matrix type inherits from MATMPIAIJ. Hence for single process communicators,
791: MatSeqAIJSetPreallocation is supported, and similarly MatMPIAIJSetPreallocation is supported
792: for communicators controlling multiple processes. It is recommended that you call both of
793: the above preallocation routines for simplicity. One can also call MatConvert for an inplace
794: conversion to or from the MATSEQAIJ or MATMPIAIJ type (depending on the communicator size)
795: without data copy.
797: Options Database Keys:
798: + -mat_type aijmumps - sets the matrix type to "aijmumps" during a call to MatSetFromOptions()
799: . -mat_mumps_sym <0,1,2> - 0 the matrix is unsymmetric, 1 symmetric positive definite, 2 symmetric
800: . -mat_mumps_icntl_4 <0,1,2,3,4> - print level
801: . -mat_mumps_icntl_6 <0,...,7> - matrix prescaling options (see MUMPS User's Guide)
802: . -mat_mumps_icntl_7 <0,...,7> - matrix orderings (see MUMPS User's Guide)
803: . -mat_mumps_icntl_9 <1,2> - A or A^T x=b to be solved: 1 denotes A, 2 denotes A^T
804: . -mat_mumps_icntl_10 <n> - maximum number of iterative refinements
805: . -mat_mumps_icntl_11 <n> - error analysis, a positive value returns statistics during -ksp_view
806: . -mat_mumps_icntl_12 <n> - efficiency control (see MUMPS User's Guide)
807: . -mat_mumps_icntl_13 <n> - efficiency control (see MUMPS User's Guide)
808: . -mat_mumps_icntl_14 <n> - efficiency control (see MUMPS User's Guide)
809: . -mat_mumps_icntl_15 <n> - efficiency control (see MUMPS User's Guide)
810: . -mat_mumps_cntl_1 <delta> - relative pivoting threshold
811: . -mat_mumps_cntl_2 <tol> - stopping criterion for refinement
812: - -mat_mumps_cntl_3 <adelta> - absolute pivoting threshold
814: Level: beginner
816: .seealso: MATSBAIJMUMPS
817: M*/
822: PetscErrorCode MatCreate_AIJMUMPS(Mat A)
823: {
825: int size;
826: Mat A_diag;
827: MPI_Comm comm;
828:
830: /* Change type name before calling MatSetType to force proper construction of SeqAIJ or MPIAIJ */
831: /* and AIJMUMPS types */
832: PetscObjectChangeTypeName((PetscObject)A,MATAIJMUMPS);
833: PetscObjectGetComm((PetscObject)A,&comm);
834: MPI_Comm_size(comm,&size);
835: if (size == 1) {
836: MatSetType(A,MATSEQAIJ);
837: } else {
838: MatSetType(A,MATMPIAIJ);
839: A_diag = ((Mat_MPIAIJ *)A->data)->A;
840: MatConvert_AIJ_AIJMUMPS(A_diag,MATAIJMUMPS,&A_diag);
841: }
842: MatConvert_AIJ_AIJMUMPS(A,MATAIJMUMPS,&A);
843: return(0);
844: }
849: PetscErrorCode MatAssemblyEnd_SBAIJMUMPS(Mat A,MatAssemblyType mode)
850: {
852: Mat_MUMPS *mumps=(Mat_MUMPS*)A->spptr;
855: (*mumps->MatAssemblyEnd)(A,mode);
856: mumps->MatLUFactorSymbolic = A->ops->lufactorsymbolic;
857: mumps->MatCholeskyFactorSymbolic = A->ops->choleskyfactorsymbolic;
858: A->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_SBAIJMUMPS;
859: return(0);
860: }
865: PetscErrorCode MatMPISBAIJSetPreallocation_MPISBAIJMUMPS(Mat B,int bs,int d_nz,int *d_nnz,int o_nz,int *o_nnz)
866: {
867: Mat A;
868: Mat_MUMPS *mumps=(Mat_MUMPS*)B->spptr;
872: /*
873: After performing the MPISBAIJ Preallocation, we need to convert the local diagonal block matrix
874: into MUMPS type so that the block jacobi preconditioner (for example) can use MUMPS. I would
875: like this to be done in the MatCreate routine, but the creation of this inner matrix requires
876: block size info so that PETSc can determine the local size properly. The block size info is set
877: in the preallocation routine.
878: */
879: (*mumps->MatPreallocate)(B,bs,d_nz,d_nnz,o_nz,o_nnz);
880: A = ((Mat_MPISBAIJ *)B->data)->A;
881: MatConvert_SBAIJ_SBAIJMUMPS(A,MATSBAIJMUMPS,&A);
882: return(0);
883: }
889: PetscErrorCode MatConvert_SBAIJ_SBAIJMUMPS(Mat A,const MatType newtype,Mat *newmat)
890: {
892: PetscMPIInt size;
893: MPI_Comm comm;
894: Mat B=*newmat;
895: Mat_MUMPS *mumps;
896: void (*f)(void);
899: if (B != A) {
900: MatDuplicate(A,MAT_COPY_VALUES,&B);
901: }
903: PetscObjectGetComm((PetscObject)A,&comm);
904: PetscNew(Mat_MUMPS,&mumps);
906: mumps->MatDuplicate = A->ops->duplicate;
907: mumps->MatView = A->ops->view;
908: mumps->MatAssemblyEnd = A->ops->assemblyend;
909: mumps->MatLUFactorSymbolic = A->ops->lufactorsymbolic;
910: mumps->MatCholeskyFactorSymbolic = A->ops->choleskyfactorsymbolic;
911: mumps->MatDestroy = A->ops->destroy;
912: mumps->specialdestroy = MatDestroy_SBAIJMUMPS;
913: mumps->CleanUpMUMPS = PETSC_FALSE;
914: mumps->isAIJ = PETSC_FALSE;
915:
916: B->spptr = (void*)mumps;
917: B->ops->duplicate = MatDuplicate_MUMPS;
918: B->ops->view = MatView_AIJMUMPS;
919: B->ops->assemblyend = MatAssemblyEnd_SBAIJMUMPS;
920: B->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_SBAIJMUMPS;
921: B->ops->destroy = MatDestroy_MUMPS;
923: MPI_Comm_size(comm,&size);
924: if (size == 1) {
925: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatConvert_seqsbaij_sbaijmumps_C",
926: "MatConvert_SBAIJ_SBAIJMUMPS",MatConvert_SBAIJ_SBAIJMUMPS);
927: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatConvert_sbaijmumps_seqsbaij_C",
928: "MatConvert_MUMPS_Base",MatConvert_MUMPS_Base);
929: } else {
930: /* I really don't like needing to know the tag: MatMPISBAIJSetPreallocation_C */
931: PetscObjectQueryFunction((PetscObject)B,"MatMPISBAIJSetPreallocation_C",&f);
932: if (f) { /* This case should always be true when this routine is called */
933: mumps->MatPreallocate = (PetscErrorCode (*)(Mat,int,int,int*,int,int*))f;
934: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatMPISBAIJSetPreallocation_C",
935: "MatMPISBAIJSetPreallocation_MPISBAIJMUMPS",
936: MatMPISBAIJSetPreallocation_MPISBAIJMUMPS);
937: }
938: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatConvert_mpisbaij_sbaijmumps_C",
939: "MatConvert_SBAIJ_SBAIJMUMPS",MatConvert_SBAIJ_SBAIJMUMPS);
940: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatConvert_sbaijmumps_mpisbaij_C",
941: "MatConvert_MUMPS_Base",MatConvert_MUMPS_Base);
942: }
944: PetscLogInfo(0,"Using MUMPS for Cholesky factorization and solves.");
945: PetscObjectChangeTypeName((PetscObject)B,newtype);
946: *newmat = B;
947: return(0);
948: }
953: PetscErrorCode MatDuplicate_MUMPS(Mat A, MatDuplicateOption op, Mat *M) {
955: Mat_MUMPS *lu=(Mat_MUMPS *)A->spptr;
958: (*lu->MatDuplicate)(A,op,M);
959: PetscMemcpy((*M)->spptr,lu,sizeof(Mat_MUMPS));
960: return(0);
961: }
963: /*MC
964: MATSBAIJMUMPS - MATSBAIJMUMPS = "sbaijmumps" - A symmetric matrix type providing direct solvers (Cholesky) for
965: distributed and sequential matrices via the external package MUMPS.
967: If MUMPS is installed (see the manual for instructions
968: on how to declare the existence of external packages),
969: a matrix type can be constructed which invokes MUMPS solvers.
970: After calling MatCreate(...,A), simply call MatSetType(A,MATSBAIJMUMPS).
971: This matrix type is only supported for double precision real.
973: If created with a single process communicator, this matrix type inherits from MATSEQSBAIJ.
974: Otherwise, this matrix type inherits from MATMPISBAIJ. Hence for single process communicators,
975: MatSeqSBAIJSetPreallocation is supported, and similarly MatMPISBAIJSetPreallocation is supported
976: for communicators controlling multiple processes. It is recommended that you call both of
977: the above preallocation routines for simplicity. One can also call MatConvert for an inplace
978: conversion to or from the MATSEQSBAIJ or MATMPISBAIJ type (depending on the communicator size)
979: without data copy.
981: Options Database Keys:
982: + -mat_type sbaijmumps - sets the matrix type to "sbaijmumps" during a call to MatSetFromOptions()
983: . -mat_mumps_sym <0,1,2> - 0 the matrix is unsymmetric, 1 symmetric positive definite, 2 symmetric
984: . -mat_mumps_icntl_4 <0,...,4> - print level
985: . -mat_mumps_icntl_6 <0,...,7> - matrix prescaling options (see MUMPS User's Guide)
986: . -mat_mumps_icntl_7 <0,...,7> - matrix orderings (see MUMPS User's Guide)
987: . -mat_mumps_icntl_9 <1,2> - A or A^T x=b to be solved: 1 denotes A, 2 denotes A^T
988: . -mat_mumps_icntl_10 <n> - maximum number of iterative refinements
989: . -mat_mumps_icntl_11 <n> - error analysis, a positive value returns statistics during -ksp_view
990: . -mat_mumps_icntl_12 <n> - efficiency control (see MUMPS User's Guide)
991: . -mat_mumps_icntl_13 <n> - efficiency control (see MUMPS User's Guide)
992: . -mat_mumps_icntl_14 <n> - efficiency control (see MUMPS User's Guide)
993: . -mat_mumps_icntl_15 <n> - efficiency control (see MUMPS User's Guide)
994: . -mat_mumps_cntl_1 <delta> - relative pivoting threshold
995: . -mat_mumps_cntl_2 <tol> - stopping criterion for refinement
996: - -mat_mumps_cntl_3 <adelta> - absolute pivoting threshold
998: Level: beginner
1000: .seealso: MATAIJMUMPS
1001: M*/
1006: PetscErrorCode MatCreate_SBAIJMUMPS(Mat A)
1007: {
1009: int size;
1012: /* Change type name before calling MatSetType to force proper construction of SeqSBAIJ or MPISBAIJ */
1013: /* and SBAIJMUMPS types */
1014: PetscObjectChangeTypeName((PetscObject)A,MATSBAIJMUMPS);
1015: MPI_Comm_size(A->comm,&size);
1016: if (size == 1) {
1017: MatSetType(A,MATSEQSBAIJ);
1018: } else {
1019: MatSetType(A,MATMPISBAIJ);
1020: }
1021: MatConvert_SBAIJ_SBAIJMUMPS(A,MATSBAIJMUMPS,&A);
1022: return(0);
1023: }