Actual source code: ml.c
1: #define PETSCKSP_DLL
3: /*
4: Provides an interface to the ML 5.0 smoothed Aggregation
5: */
6: #include private/pcimpl.h
7: #include src/ksp/pc/impls/mg/mgimpl.h
8: #include src/mat/impls/aij/seq/aij.h
9: #include src/mat/impls/aij/mpi/mpiaij.h
11: #include <math.h>
13: #include "ml_config.h"
14: #include "ml_include.h"
17: /* The context (data structure) at each grid level */
18: typedef struct {
19: Vec x,b,r; /* global vectors */
20: Mat A,P,R;
21: KSP ksp;
22: } GridCtx;
24: /* The context used to input PETSc matrix into ML at fine grid */
25: typedef struct {
26: Mat A; /* Petsc matrix in aij format */
27: Mat Aloc; /* local portion of A to be used by ML */
28: Vec x,y;
29: ML_Operator *mlmat;
30: PetscScalar *pwork; /* tmp array used by PetscML_comm() */
31: } FineGridCtx;
33: /* The context associates a ML matrix with a PETSc shell matrix */
34: typedef struct {
35: Mat A; /* PETSc shell matrix associated with mlmat */
36: ML_Operator *mlmat; /* ML matrix assorciated with A */
37: Vec y;
38: } Mat_MLShell;
40: /* Private context for the ML preconditioner */
41: typedef struct {
42: ML *ml_object;
43: ML_Aggregate *agg_object;
44: GridCtx *gridctx;
45: FineGridCtx *PetscMLdata;
46: PetscInt Nlevels,MaxNlevels,MaxCoarseSize,CoarsenScheme;
47: PetscReal Threshold,DampingFactor;
48: PetscTruth SpectralNormScheme_Anorm;
49: PetscMPIInt size; /* size of communicator for pc->pmat */
50: PetscErrorCode (*PCSetUp)(PC);
51: PetscErrorCode (*PCDestroy)(PC);
52: } PC_ML;
55: int allocated_space,int columns[],double values[],int row_lengths[]);
67: /* -------------------------------------------------------------------------- */
68: /*
69: PCSetUp_ML - Prepares for the use of the ML preconditioner
70: by setting data structures and options.
72: Input Parameter:
73: . pc - the preconditioner context
75: Application Interface Routine: PCSetUp()
77: Notes:
78: The interface routine PCSetUp() is not usually called directly by
79: the user, but instead is called by PCApply() if necessary.
80: */
84: PetscErrorCode PCSetUp_ML(PC pc)
85: {
86: PetscErrorCode ierr;
87: PetscMPIInt size;
88: FineGridCtx *PetscMLdata;
89: ML *ml_object;
90: ML_Aggregate *agg_object;
91: ML_Operator *mlmat;
92: PetscInt nlocal_allcols,Nlevels,mllevel,level,level1,m,fine_level;
93: Mat A,Aloc;
94: GridCtx *gridctx;
95: PC_ML *pc_ml=PETSC_NULL;
96: PetscContainer container;
97: MatReuse reuse = MAT_INITIAL_MATRIX;
100: PetscObjectQuery((PetscObject)pc,"PC_ML",(PetscObject *)&container);
101: if (container) {
102: PetscContainerGetPointer(container,(void **)&pc_ml);
103: } else {
104: SETERRQ(PETSC_ERR_ARG_NULL,"Container does not exit");
105: }
107: if (pc->setupcalled){
108: if (pc->flag == SAME_NONZERO_PATTERN){
109: reuse = MAT_REUSE_MATRIX;
110: PetscMLdata = pc_ml->PetscMLdata;
111: gridctx = pc_ml->gridctx;
112: /* ML objects cannot be reused */
113: ML_Destroy(&pc_ml->ml_object);
114: ML_Aggregate_Destroy(&pc_ml->agg_object);
115: } else {
116: PC_ML *pc_ml_new = PETSC_NULL;
117: PetscContainer container_new;
118: PetscNewLog(pc,PC_ML,&pc_ml_new);
119: PetscContainerCreate(PETSC_COMM_SELF,&container_new);
120: PetscContainerSetPointer(container_new,pc_ml_new);
121: PetscContainerSetUserDestroy(container_new,PetscContainerDestroy_PC_ML);
122: PetscObjectCompose((PetscObject)pc,"PC_ML",(PetscObject)container_new);
124: PetscMemcpy(pc_ml_new,pc_ml,sizeof(PC_ML));
125: PetscContainerDestroy(container);
126: pc_ml = pc_ml_new;
127: }
128: }
129:
130: /* setup special features of PCML */
131: /*--------------------------------*/
132: /* covert A to Aloc to be used by ML at fine grid */
133: A = pc->pmat;
134: MPI_Comm_size(((PetscObject)A)->comm,&size);
135: pc_ml->size = size;
136: if (size > 1){
137: if (reuse) Aloc = PetscMLdata->Aloc;
138: MatConvert_MPIAIJ_ML(A,PETSC_NULL,reuse,&Aloc);
139: } else {
140: Aloc = A;
141: }
143: /* create and initialize struct 'PetscMLdata' */
144: if (!reuse){
145: PetscNewLog(pc,FineGridCtx,&PetscMLdata);
146: pc_ml->PetscMLdata = PetscMLdata;
147: PetscMalloc((Aloc->cmap.n+1)*sizeof(PetscScalar),&PetscMLdata->pwork);
149: VecCreate(PETSC_COMM_SELF,&PetscMLdata->x);
150: VecSetSizes(PetscMLdata->x,Aloc->cmap.n,Aloc->cmap.n);
151: VecSetType(PetscMLdata->x,VECSEQ);
153: VecCreate(PETSC_COMM_SELF,&PetscMLdata->y);
154: VecSetSizes(PetscMLdata->y,A->rmap.n,PETSC_DECIDE);
155: VecSetType(PetscMLdata->y,VECSEQ);
156: }
157: PetscMLdata->A = A;
158: PetscMLdata->Aloc = Aloc;
159:
160: /* create ML discretization matrix at fine grid */
161: /* ML requires input of fine-grid matrix. It determines nlevels. */
162: MatGetSize(Aloc,&m,&nlocal_allcols);
163: ML_Create(&ml_object,pc_ml->MaxNlevels);
164: pc_ml->ml_object = ml_object;
165: ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata);
166: ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols);
167: ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec);
168:
169: /* aggregation */
170: ML_Aggregate_Create(&agg_object);
171: pc_ml->agg_object = agg_object;
172:
173: ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize);
174: /* set options */
175: switch (pc_ml->CoarsenScheme) {
176: case 1:
177: ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object);break;
178: case 2:
179: ML_Aggregate_Set_CoarsenScheme_MIS(agg_object);break;
180: case 3:
181: ML_Aggregate_Set_CoarsenScheme_METIS(agg_object);break;
182: }
183: ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold);
184: ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor);
185: if (pc_ml->SpectralNormScheme_Anorm){
186: ML_Aggregate_Set_SpectralNormScheme_Anorm(agg_object);
187: }
189: Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object);
190: if (Nlevels<=0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels);
191: if (pc->setupcalled && pc_ml->Nlevels != Nlevels) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"previous Nlevels %D and current Nlevels %d must be same", pc_ml->Nlevels,Nlevels);
192: pc_ml->Nlevels = Nlevels;
193: if (!pc->setupcalled){
194: PCMGSetLevels(pc,Nlevels,PETSC_NULL);
195: PCSetFromOptions_MG(pc); /* should be called in PCSetFromOptions_ML(), but cannot be called prior to PCMGSetLevels() */
196: }
197:
198: if (!reuse){
199: PetscMalloc(Nlevels*sizeof(GridCtx),&gridctx);
200: pc_ml->gridctx = gridctx;
201: }
202: fine_level = Nlevels - 1;
204: /* wrap ML matrices by PETSc shell matrices at coarsened grids.
205: Level 0 is the finest grid for ML, but coarsest for PETSc! */
206: gridctx[fine_level].A = A;
207:
208: level = fine_level - 1;
209: if (size == 1){ /* convert ML P, R and A into seqaij format */
210: for (mllevel=1; mllevel<Nlevels; mllevel++){
211: mlmat = &(ml_object->Pmat[mllevel]);
212: MatWrapML_SeqAIJ(mlmat,reuse,&gridctx[level].P);
213: mlmat = &(ml_object->Rmat[mllevel-1]);
214: MatWrapML_SeqAIJ(mlmat,reuse,&gridctx[level].R);
215:
216: mlmat = &(ml_object->Amat[mllevel]);
217: if (reuse){
218: /* ML matrix A changes sparse pattern although PETSc A doesn't, thus gridctx[level].A must be recreated! */
219: MatDestroy(gridctx[level].A);
220: }
221: MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);
222: level--;
223: }
224: } else { /* convert ML P and R into shell format, ML A into mpiaij format */
225: for (mllevel=1; mllevel<Nlevels; mllevel++){
226: mlmat = &(ml_object->Pmat[mllevel]);
227: MatWrapML_SHELL(mlmat,reuse,&gridctx[level].P);
228: mlmat = &(ml_object->Rmat[mllevel-1]);
229: MatWrapML_SHELL(mlmat,reuse,&gridctx[level].R);
231: mlmat = &(ml_object->Amat[mllevel]);
232: if (reuse){
233: MatDestroy(gridctx[level].A);
234: }
235: MatWrapML_MPIAIJ(mlmat,&gridctx[level].A);
236: level--;
237: }
238: }
240: /* create vectors and ksp at all levels */
241: if (!reuse){
242: for (level=0; level<fine_level; level++){
243: level1 = level + 1;
244: VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].x);
245: VecSetSizes(gridctx[level].x,gridctx[level].A->cmap.n,PETSC_DECIDE);
246: VecSetType(gridctx[level].x,VECMPI);
247: PCMGSetX(pc,level,gridctx[level].x);
248:
249: VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].b);
250: VecSetSizes(gridctx[level].b,gridctx[level].A->rmap.n,PETSC_DECIDE);
251: VecSetType(gridctx[level].b,VECMPI);
252: PCMGSetRhs(pc,level,gridctx[level].b);
253:
254: VecCreate(((PetscObject)gridctx[level1].A)->comm,&gridctx[level1].r);
255: VecSetSizes(gridctx[level1].r,gridctx[level1].A->rmap.n,PETSC_DECIDE);
256: VecSetType(gridctx[level1].r,VECMPI);
257: PCMGSetR(pc,level1,gridctx[level1].r);
259: if (level == 0){
260: PCMGGetCoarseSolve(pc,&gridctx[level].ksp);
261: } else {
262: PCMGGetSmoother(pc,level,&gridctx[level].ksp);
263: }
264: }
265: PCMGGetSmoother(pc,fine_level,&gridctx[fine_level].ksp);
266: }
268: /* create coarse level and the interpolation between the levels */
269: for (level=0; level<fine_level; level++){
270: level1 = level + 1;
271: PCMGSetInterpolation(pc,level1,gridctx[level].P);
272: PCMGSetRestriction(pc,level1,gridctx[level].R);
273: if (level > 0){
274: PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);
275: }
276: KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,DIFFERENT_NONZERO_PATTERN);
277: }
278: PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);
279: KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,DIFFERENT_NONZERO_PATTERN);
280:
281: /* now call PCSetUp_MG() */
282: /*-------------------------------*/
283: (*pc_ml->PCSetUp)(pc);
284: return(0);
285: }
289: PetscErrorCode PetscContainerDestroy_PC_ML(void *ptr)
290: {
291: PetscErrorCode ierr;
292: PC_ML *pc_ml = (PC_ML*)ptr;
293: PetscInt level,fine_level=pc_ml->Nlevels-1;
296: ML_Aggregate_Destroy(&pc_ml->agg_object);
297: ML_Destroy(&pc_ml->ml_object);
299: if (pc_ml->PetscMLdata) {
300: PetscFree(pc_ml->PetscMLdata->pwork);
301: if (pc_ml->size > 1) {MatDestroy(pc_ml->PetscMLdata->Aloc);}
302: if (pc_ml->PetscMLdata->x){VecDestroy(pc_ml->PetscMLdata->x);}
303: if (pc_ml->PetscMLdata->y){VecDestroy(pc_ml->PetscMLdata->y);}
304: }
305: PetscFree(pc_ml->PetscMLdata);
307: for (level=0; level<fine_level; level++){
308: if (pc_ml->gridctx[level].A){MatDestroy(pc_ml->gridctx[level].A);}
309: if (pc_ml->gridctx[level].P){MatDestroy(pc_ml->gridctx[level].P);}
310: if (pc_ml->gridctx[level].R){MatDestroy(pc_ml->gridctx[level].R);}
311: if (pc_ml->gridctx[level].x){VecDestroy(pc_ml->gridctx[level].x);}
312: if (pc_ml->gridctx[level].b){VecDestroy(pc_ml->gridctx[level].b);}
313: if (pc_ml->gridctx[level+1].r){VecDestroy(pc_ml->gridctx[level+1].r);}
314: }
315: PetscFree(pc_ml->gridctx);
316: PetscFree(pc_ml);
317: return(0);
318: }
319: /* -------------------------------------------------------------------------- */
320: /*
321: PCDestroy_ML - Destroys the private context for the ML preconditioner
322: that was created with PCCreate_ML().
324: Input Parameter:
325: . pc - the preconditioner context
327: Application Interface Routine: PCDestroy()
328: */
331: PetscErrorCode PCDestroy_ML(PC pc)
332: {
333: PetscErrorCode ierr;
334: PC_ML *pc_ml=PETSC_NULL;
335: PetscContainer container;
338: PetscObjectQuery((PetscObject)pc,"PC_ML",(PetscObject *)&container);
339: if (container) {
340: PetscContainerGetPointer(container,(void **)&pc_ml);
341: pc->ops->destroy = pc_ml->PCDestroy;
342: } else {
343: SETERRQ(PETSC_ERR_ARG_NULL,"Container does not exit");
344: }
345: /* detach pc and PC_ML and dereference container */
346: PetscContainerDestroy(container);
347: PetscObjectCompose((PetscObject)pc,"PC_ML",0);
348: if (pc->ops->destroy) {
349: (*pc->ops->destroy)(pc);
350: }
351: return(0);
352: }
356: PetscErrorCode PCSetFromOptions_ML(PC pc)
357: {
358: PetscErrorCode ierr;
359: PetscInt indx,m,PrintLevel;
360: PetscTruth flg;
361: const char *scheme[] = {"Uncoupled","Coupled","MIS","METIS"};
362: PC_ML *pc_ml=PETSC_NULL;
363: PetscContainer container;
364: PCMGType mgtype;
367: PetscObjectQuery((PetscObject)pc,"PC_ML",(PetscObject *)&container);
368: if (container) {
369: PetscContainerGetPointer(container,(void **)&pc_ml);
370: } else {
371: SETERRQ(PETSC_ERR_ARG_NULL,"Container does not exit");
372: }
374: /* inherited MG options */
375: PetscOptionsHead("Multigrid options(inherited)");
376: PetscOptionsInt("-pc_mg_cycles","1 for V cycle, 2 for W-cycle","MGSetCycles",1,&m,&flg);
377: PetscOptionsInt("-pc_mg_smoothup","Number of post-smoothing steps","MGSetNumberSmoothUp",1,&m,&flg);
378: PetscOptionsInt("-pc_mg_smoothdown","Number of pre-smoothing steps","MGSetNumberSmoothDown",1,&m,&flg);
379: PetscOptionsEnum("-pc_mg_type","Multigrid type","PCMGSetType",PCMGTypes,(PetscEnum)PC_MG_MULTIPLICATIVE,(PetscEnum*)&mgtype,&flg);
380: PetscOptionsTail();
382: /* ML options */
383: PetscOptionsHead("ML options");
384: /* set defaults */
385: PrintLevel = 0;
386: indx = 0;
387: PetscOptionsInt("-pc_ml_PrintLevel","Print level","ML_Set_PrintLevel",PrintLevel,&PrintLevel,PETSC_NULL);
388: ML_Set_PrintLevel(PrintLevel);
389: PetscOptionsInt("-pc_ml_maxNlevels","Maximum number of levels","None",pc_ml->MaxNlevels,&pc_ml->MaxNlevels,PETSC_NULL);
390: PetscOptionsInt("-pc_ml_maxCoarseSize","Maximum coarsest mesh size","ML_Aggregate_Set_MaxCoarseSize",pc_ml->MaxCoarseSize,&pc_ml->MaxCoarseSize,PETSC_NULL);
391: PetscOptionsEList("-pc_ml_CoarsenScheme","Aggregate Coarsen Scheme","ML_Aggregate_Set_CoarsenScheme_*",scheme,4,scheme[0],&indx,PETSC_NULL); /* ??? */
392: pc_ml->CoarsenScheme = indx;
394: PetscOptionsReal("-pc_ml_DampingFactor","P damping factor","ML_Aggregate_Set_DampingFactor",pc_ml->DampingFactor,&pc_ml->DampingFactor,PETSC_NULL);
395:
396: PetscOptionsReal("-pc_ml_Threshold","Smoother drop tol","ML_Aggregate_Set_Threshold",pc_ml->Threshold,&pc_ml->Threshold,PETSC_NULL);
398: PetscOptionsTruth("-pc_ml_SpectralNormScheme_Anorm","Method used for estimating spectral radius","ML_Aggregate_Set_SpectralNormScheme_Anorm",pc_ml->SpectralNormScheme_Anorm,&pc_ml->SpectralNormScheme_Anorm,PETSC_NULL);
399:
400: PetscOptionsTail();
401: return(0);
402: }
404: /* -------------------------------------------------------------------------- */
405: /*
406: PCCreate_ML - Creates a ML preconditioner context, PC_ML,
407: and sets this as the private data within the generic preconditioning
408: context, PC, that was created within PCCreate().
410: Input Parameter:
411: . pc - the preconditioner context
413: Application Interface Routine: PCCreate()
414: */
416: /*MC
417: PCML - Use algebraic multigrid preconditioning. This preconditioner requires you provide
418: fine grid discretization matrix. The coarser grid matrices and restriction/interpolation
419: operators are computed by ML, with the matrices coverted to PETSc matrices in aij format
420: and the restriction/interpolation operators wrapped as PETSc shell matrices.
422: Options Database Key:
423: Multigrid options(inherited)
424: + -pc_mg_cycles <1>: 1 for V cycle, 2 for W-cycle (MGSetCycles)
425: . -pc_mg_smoothup <1>: Number of post-smoothing steps (MGSetNumberSmoothUp)
426: . -pc_mg_smoothdown <1>: Number of pre-smoothing steps (MGSetNumberSmoothDown)
427: - -pc_mg_type <multiplicative>: (one of) additive multiplicative full cascade kascade
428:
429: ML options:
430: + -pc_ml_PrintLevel <0>: Print level (ML_Set_PrintLevel)
431: . -pc_ml_maxNlevels <10>: Maximum number of levels (None)
432: . -pc_ml_maxCoarseSize <1>: Maximum coarsest mesh size (ML_Aggregate_Set_MaxCoarseSize)
433: . -pc_ml_CoarsenScheme <Uncoupled>: (one of) Uncoupled Coupled MIS METIS
434: . -pc_ml_DampingFactor <1.33333>: P damping factor (ML_Aggregate_Set_DampingFactor)
435: . -pc_ml_Threshold <0>: Smoother drop tol (ML_Aggregate_Set_Threshold)
436: - -pc_ml_SpectralNormScheme_Anorm <false>: Method used for estimating spectral radius (ML_Aggregate_Set_SpectralNormScheme_Anorm)
438: Level: intermediate
440: Concepts: multigrid
441:
442: .seealso: PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType,
443: PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetNumberSmoothDown(),
444: PCMGSetNumberSmoothUp(), PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(),
445: PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(),
446: PCMGSetCyclesOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR()
447: M*/
452: PetscErrorCode PCCreate_ML(PC pc)
453: {
454: PetscErrorCode ierr;
455: PC_ML *pc_ml;
456: PetscContainer container;
459: /* PCML is an inherited class of PCMG. Initialize pc as PCMG */
460: PCSetType(pc,PCMG); /* calls PCCreate_MG() and MGCreate_Private() */
462: /* create a supporting struct and attach it to pc */
463: PetscNewLog(pc,PC_ML,&pc_ml);
464: PetscContainerCreate(PETSC_COMM_SELF,&container);
465: PetscContainerSetPointer(container,pc_ml);
466: PetscContainerSetUserDestroy(container,PetscContainerDestroy_PC_ML);
467: PetscObjectCompose((PetscObject)pc,"PC_ML",(PetscObject)container);
468:
469: pc_ml->ml_object = 0;
470: pc_ml->agg_object = 0;
471: pc_ml->gridctx = 0;
472: pc_ml->PetscMLdata = 0;
473: pc_ml->Nlevels = -1;
474: pc_ml->MaxNlevels = 10;
475: pc_ml->MaxCoarseSize = 1;
476: pc_ml->CoarsenScheme = 1; /* ??? */
477: pc_ml->Threshold = 0.0;
478: pc_ml->DampingFactor = 4.0/3.0;
479: pc_ml->SpectralNormScheme_Anorm = PETSC_FALSE;
480: pc_ml->size = 0;
482: pc_ml->PCSetUp = pc->ops->setup;
483: pc_ml->PCDestroy = pc->ops->destroy;
485: /* overwrite the pointers of PCMG by the functions of PCML */
486: pc->ops->setfromoptions = PCSetFromOptions_ML;
487: pc->ops->setup = PCSetUp_ML;
488: pc->ops->destroy = PCDestroy_ML;
489: return(0);
490: }
493: int PetscML_getrow(ML_Operator *ML_data, int N_requested_rows, int requested_rows[],
494: int allocated_space, int columns[], double values[], int row_lengths[])
495: {
497: Mat Aloc;
498: Mat_SeqAIJ *a;
499: PetscInt m,i,j,k=0,row,*aj;
500: PetscScalar *aa;
501: FineGridCtx *ml=(FineGridCtx*)ML_Get_MyGetrowData(ML_data);
503: Aloc = ml->Aloc;
504: a = (Mat_SeqAIJ*)Aloc->data;
505: MatGetSize(Aloc,&m,PETSC_NULL);
507: for (i = 0; i<N_requested_rows; i++) {
508: row = requested_rows[i];
509: row_lengths[i] = a->ilen[row];
510: if (allocated_space < k+row_lengths[i]) return(0);
511: if ( (row >= 0) || (row <= (m-1)) ) {
512: aj = a->j + a->i[row];
513: aa = a->a + a->i[row];
514: for (j=0; j<row_lengths[i]; j++){
515: columns[k] = aj[j];
516: values[k++] = aa[j];
517: }
518: }
519: }
520: return(1);
521: }
523: int PetscML_matvec(ML_Operator *ML_data,int in_length,double p[],int out_length,double ap[])
524: {
526: FineGridCtx *ml=(FineGridCtx*)ML_Get_MyMatvecData(ML_data);
527: Mat A=ml->A, Aloc=ml->Aloc;
528: PetscMPIInt size;
529: PetscScalar *pwork=ml->pwork;
530: PetscInt i;
532: MPI_Comm_size(((PetscObject)A)->comm,&size);
533: if (size == 1){
534: VecPlaceArray(ml->x,p);
535: } else {
536: for (i=0; i<in_length; i++) pwork[i] = p[i];
537: PetscML_comm(pwork,ml);
538: VecPlaceArray(ml->x,pwork);
539: }
540: VecPlaceArray(ml->y,ap);
541: MatMult(Aloc,ml->x,ml->y);
542: VecResetArray(ml->x);
543: VecResetArray(ml->y);
544: return 0;
545: }
547: int PetscML_comm(double p[],void *ML_data)
548: {
550: FineGridCtx *ml=(FineGridCtx*)ML_data;
551: Mat A=ml->A;
552: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
553: PetscMPIInt size;
554: PetscInt i,in_length=A->rmap.n,out_length=ml->Aloc->cmap.n;
555: PetscScalar *array;
557: MPI_Comm_size(((PetscObject)A)->comm,&size);
558: if (size == 1) return 0;
559:
560: VecPlaceArray(ml->y,p);
561: VecScatterBegin(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
562: VecScatterEnd(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
563: VecResetArray(ml->y);
564: VecGetArray(a->lvec,&array);
565: for (i=in_length; i<out_length; i++){
566: p[i] = array[i-in_length];
567: }
568: VecRestoreArray(a->lvec,&array);
569: return 0;
570: }
573: PetscErrorCode MatMult_ML(Mat A,Vec x,Vec y)
574: {
575: PetscErrorCode ierr;
576: Mat_MLShell *shell;
577: PetscScalar *xarray,*yarray;
578: PetscInt x_length,y_length;
579:
581: MatShellGetContext(A,(void **)&shell);
582: VecGetArray(x,&xarray);
583: VecGetArray(y,&yarray);
584: x_length = shell->mlmat->invec_leng;
585: y_length = shell->mlmat->outvec_leng;
587: ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray);
589: VecRestoreArray(x,&xarray);
590: VecRestoreArray(y,&yarray);
591: return(0);
592: }
593: /* MatMultAdd_ML - Compute y = w + A*x */
596: PetscErrorCode MatMultAdd_ML(Mat A,Vec x,Vec w,Vec y)
597: {
598: PetscErrorCode ierr;
599: Mat_MLShell *shell;
600: PetscScalar *xarray,*yarray;
601: PetscInt x_length,y_length;
602:
604: MatShellGetContext(A,(void **)&shell);
605: VecGetArray(x,&xarray);
606: VecGetArray(y,&yarray);
608: x_length = shell->mlmat->invec_leng;
609: y_length = shell->mlmat->outvec_leng;
611: ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray);
613: VecRestoreArray(x,&xarray);
614: VecRestoreArray(y,&yarray);
615: VecAXPY(y,1.0,w);
617: return(0);
618: }
620: /* newtype is ignored because "ml" is not listed under Petsc MatType yet */
623: PetscErrorCode MatConvert_MPIAIJ_ML(Mat A,MatType newtype,MatReuse scall,Mat *Aloc)
624: {
625: PetscErrorCode ierr;
626: Mat_MPIAIJ *mpimat=(Mat_MPIAIJ*)A->data;
627: Mat_SeqAIJ *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data;
628: PetscInt *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j;
629: PetscScalar *aa=a->a,*ba=b->a,*ca;
630: PetscInt am=A->rmap.n,an=A->cmap.n,i,j,k;
631: PetscInt *ci,*cj,ncols;
634: if (am != an) SETERRQ2(PETSC_ERR_ARG_WRONG,"A must have a square diagonal portion, am: %d != an: %d",am,an);
636: if (scall == MAT_INITIAL_MATRIX){
637: PetscMalloc((1+am)*sizeof(PetscInt),&ci);
638: ci[0] = 0;
639: for (i=0; i<am; i++){
640: ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]);
641: }
642: PetscMalloc((1+ci[am])*sizeof(PetscInt),&cj);
643: PetscMalloc((1+ci[am])*sizeof(PetscScalar),&ca);
645: k = 0;
646: for (i=0; i<am; i++){
647: /* diagonal portion of A */
648: ncols = ai[i+1] - ai[i];
649: for (j=0; j<ncols; j++) {
650: cj[k] = *aj++;
651: ca[k++] = *aa++;
652: }
653: /* off-diagonal portion of A */
654: ncols = bi[i+1] - bi[i];
655: for (j=0; j<ncols; j++) {
656: cj[k] = an + (*bj); bj++;
657: ca[k++] = *ba++;
658: }
659: }
660: if (k != ci[am]) SETERRQ2(PETSC_ERR_ARG_WRONG,"k: %d != ci[am]: %d",k,ci[am]);
662: /* put together the new matrix */
663: an = mpimat->A->cmap.n+mpimat->B->cmap.n;
664: MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,an,ci,cj,ca,Aloc);
666: /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
667: /* Since these are PETSc arrays, change flags to free them as necessary. */
668: mat = (Mat_SeqAIJ*)(*Aloc)->data;
669: mat->free_a = PETSC_TRUE;
670: mat->free_ij = PETSC_TRUE;
672: mat->nonew = 0;
673: } else if (scall == MAT_REUSE_MATRIX){
674: mat=(Mat_SeqAIJ*)(*Aloc)->data;
675: ci = mat->i; cj = mat->j; ca = mat->a;
676: for (i=0; i<am; i++) {
677: /* diagonal portion of A */
678: ncols = ai[i+1] - ai[i];
679: for (j=0; j<ncols; j++) *ca++ = *aa++;
680: /* off-diagonal portion of A */
681: ncols = bi[i+1] - bi[i];
682: for (j=0; j<ncols; j++) *ca++ = *ba++;
683: }
684: } else {
685: SETERRQ1(PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
686: }
687: return(0);
688: }
692: PetscErrorCode MatDestroy_ML(Mat A)
693: {
695: Mat_MLShell *shell;
698: MatShellGetContext(A,(void **)&shell);
699: VecDestroy(shell->y);
700: PetscFree(shell);
701: MatDestroy_Shell(A);
702: PetscObjectChangeTypeName((PetscObject)A,0);
703: return(0);
704: }
708: PetscErrorCode MatWrapML_SeqAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
709: {
710: struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data;
711: PetscErrorCode ierr;
712: PetscInt m=mlmat->outvec_leng,n=mlmat->invec_leng,*nnz,nz_max;
713: PetscInt *ml_cols=matdata->columns,*aj,i,j,k;
714: PetscScalar *ml_vals=matdata->values,*aa;
715:
717: if ( mlmat->getrow == NULL) SETERRQ(PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
718: if (m != n){ /* ML Pmat and Rmat are in CSR format. Pass array pointers into SeqAIJ matrix */
719: if (reuse){
720: Mat_SeqAIJ *aij= (Mat_SeqAIJ*)(*newmat)->data;
721: aij->i = matdata->rowptr;
722: aij->j = ml_cols;
723: aij->a = ml_vals;
724: } else {
725: MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,matdata->rowptr,ml_cols,ml_vals,newmat);
726: }
727: return(0);
728: }
730: /* ML Amat is in MSR format. Copy its data into SeqAIJ matrix */
731: MatCreate(PETSC_COMM_SELF,newmat);
732: MatSetSizes(*newmat,m,n,PETSC_DECIDE,PETSC_DECIDE);
733: MatSetType(*newmat,MATSEQAIJ);
735: PetscMalloc((m+1)*sizeof(PetscInt),&nnz);
736: nz_max = 1;
737: for (i=0; i<m; i++) {
738: nnz[i] = ml_cols[i+1] - ml_cols[i] + 1;
739: if (nnz[i] > nz_max) nz_max += nnz[i];
740: }
742: MatSeqAIJSetPreallocation(*newmat,0,nnz);
743: PetscMalloc(nz_max*(sizeof(PetscInt)+sizeof(PetscScalar)),&aj);
744: aa = (PetscScalar*)(aj + nz_max);
745:
746: for (i=0; i<m; i++){
747: k = 0;
748: /* diagonal entry */
749: aj[k] = i; aa[k++] = ml_vals[i];
750: /* off diagonal entries */
751: for (j=ml_cols[i]; j<ml_cols[i+1]; j++){
752: aj[k] = ml_cols[j]; aa[k++] = ml_vals[j];
753: }
754: /* sort aj and aa */
755: PetscSortIntWithScalarArray(nnz[i],aj,aa);
756: MatSetValues(*newmat,1,&i,nnz[i],aj,aa,INSERT_VALUES);
757: }
758: MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);
759: MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);
761: PetscFree(aj);
762: PetscFree(nnz);
763: return(0);
764: }
768: PetscErrorCode MatWrapML_SHELL(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
769: {
771: PetscInt m,n;
772: ML_Comm *MLcomm;
773: Mat_MLShell *shellctx;
776: m = mlmat->outvec_leng;
777: n = mlmat->invec_leng;
778: if (!m || !n){
779: newmat = PETSC_NULL;
780: return(0);
781: }
783: if (reuse){
784: MatShellGetContext(*newmat,(void **)&shellctx);
785: shellctx->mlmat = mlmat;
786: return(0);
787: }
789: MLcomm = mlmat->comm;
790: PetscNew(Mat_MLShell,&shellctx);
791: MatCreateShell(MLcomm->USR_comm,m,n,PETSC_DETERMINE,PETSC_DETERMINE,shellctx,newmat);
792: MatShellSetOperation(*newmat,MATOP_MULT,(void(*)(void))MatMult_ML);
793: MatShellSetOperation(*newmat,MATOP_MULT_ADD,(void(*)(void))MatMultAdd_ML);
794: shellctx->A = *newmat;
795: shellctx->mlmat = mlmat;
796: VecCreate(PETSC_COMM_WORLD,&shellctx->y);
797: VecSetSizes(shellctx->y,m,PETSC_DECIDE);
798: VecSetFromOptions(shellctx->y);
799: (*newmat)->ops->destroy = MatDestroy_ML;
800: return(0);
801: }
805: PetscErrorCode MatWrapML_MPIAIJ(ML_Operator *mlmat,Mat *newmat)
806: {
807: struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data;
808: PetscInt *ml_cols=matdata->columns,*aj;
809: PetscScalar *ml_vals=matdata->values,*aa;
810: PetscErrorCode ierr;
811: PetscInt i,j,k,*gordering;
812: PetscInt m=mlmat->outvec_leng,n,*nnzA,*nnzB,*nnz,nz_max,row;
813: Mat A;
816: if (mlmat->getrow == NULL) SETERRQ(PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
817: n = mlmat->invec_leng;
818: if (m != n) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"m %d must equal to n %d",m,n);
820: MatCreate(mlmat->comm->USR_comm,&A);
821: MatSetSizes(A,m,n,PETSC_DECIDE,PETSC_DECIDE);
822: MatSetType(A,MATMPIAIJ);
823: PetscMalloc3(m,PetscInt,&nnzA,m,PetscInt,&nnzB,m,PetscInt,&nnz);
824:
825: nz_max = 0;
826: for (i=0; i<m; i++){
827: nnz[i] = ml_cols[i+1] - ml_cols[i] + 1;
828: if (nz_max < nnz[i]) nz_max = nnz[i];
829: nnzA[i] = 1; /* diag */
830: for (j=ml_cols[i]; j<ml_cols[i+1]; j++){
831: if (ml_cols[j] < m) nnzA[i]++;
832: }
833: nnzB[i] = nnz[i] - nnzA[i];
834: }
835: MatMPIAIJSetPreallocation(A,0,nnzA,0,nnzB);
837: /* insert mat values -- remap row and column indices */
838: nz_max++;
839: PetscMalloc(nz_max*(sizeof(PetscInt)+sizeof(PetscScalar)),&aj);
840: aa = (PetscScalar*)(aj + nz_max);
841: /* create global row numbering for a ML_Operator */
842: ML_build_global_numbering(mlmat,&gordering,"rows");
843: for (i=0; i<m; i++){
844: row = gordering[i];
845: k = 0;
846: /* diagonal entry */
847: aj[k] = row; aa[k++] = ml_vals[i];
848: /* off diagonal entries */
849: for (j=ml_cols[i]; j<ml_cols[i+1]; j++){
850: aj[k] = gordering[ml_cols[j]]; aa[k++] = ml_vals[j];
851: }
852: MatSetValues(A,1,&row,nnz[i],aj,aa,INSERT_VALUES);
853: }
854: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
855: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
856: *newmat = A;
858: PetscFree3(nnzA,nnzB,nnz);
859: PetscFree(aj);
860: return(0);
861: }