Actual source code: ml.c

  1: #define PETSCKSP_DLL

  3: /* 
  4:    Provides an interface to the ML 5.0 smoothed Aggregation 
  5: */
 6:  #include private/pcimpl.h
 7:  #include src/ksp/pc/impls/mg/mgimpl.h
 8:  #include src/mat/impls/aij/seq/aij.h
 9:  #include src/mat/impls/aij/mpi/mpiaij.h

 11: #include <math.h>
 13: #include "ml_config.h"
 14: #include "ml_include.h"

 17: /* The context (data structure) at each grid level */
 18: typedef struct {
 19:   Vec        x,b,r;           /* global vectors */
 20:   Mat        A,P,R;
 21:   KSP        ksp;
 22: } GridCtx;

 24: /* The context used to input PETSc matrix into ML at fine grid */
 25: typedef struct {
 26:   Mat          A;      /* Petsc matrix in aij format */
 27:   Mat          Aloc;   /* local portion of A to be used by ML */
 28:   Vec          x,y;
 29:   ML_Operator  *mlmat;
 30:   PetscScalar  *pwork; /* tmp array used by PetscML_comm() */
 31: } FineGridCtx;

 33: /* The context associates a ML matrix with a PETSc shell matrix */
 34: typedef struct {
 35:   Mat          A;       /* PETSc shell matrix associated with mlmat */
 36:   ML_Operator  *mlmat;  /* ML matrix assorciated with A */
 37:   Vec          y;
 38: } Mat_MLShell;

 40: /* Private context for the ML preconditioner */
 41: typedef struct {
 42:   ML             *ml_object;
 43:   ML_Aggregate   *agg_object;
 44:   GridCtx        *gridctx;
 45:   FineGridCtx    *PetscMLdata;
 46:   PetscInt       Nlevels,MaxNlevels,MaxCoarseSize,CoarsenScheme;
 47:   PetscReal      Threshold,DampingFactor;
 48:   PetscTruth     SpectralNormScheme_Anorm;
 49:   PetscMPIInt    size; /* size of communicator for pc->pmat */
 50:   PetscErrorCode (*PCSetUp)(PC);
 51:   PetscErrorCode (*PCDestroy)(PC);
 52: } PC_ML;

 55:                           int allocated_space,int columns[],double values[],int row_lengths[]);

 67: /* -------------------------------------------------------------------------- */
 68: /*
 69:    PCSetUp_ML - Prepares for the use of the ML preconditioner
 70:                     by setting data structures and options.   

 72:    Input Parameter:
 73: .  pc - the preconditioner context

 75:    Application Interface Routine: PCSetUp()

 77:    Notes:
 78:    The interface routine PCSetUp() is not usually called directly by
 79:    the user, but instead is called by PCApply() if necessary.
 80: */
 84: PetscErrorCode PCSetUp_ML(PC pc)
 85: {
 86:   PetscErrorCode  ierr;
 87:   PetscMPIInt     size;
 88:   FineGridCtx     *PetscMLdata;
 89:   ML              *ml_object;
 90:   ML_Aggregate    *agg_object;
 91:   ML_Operator     *mlmat;
 92:   PetscInt        nlocal_allcols,Nlevels,mllevel,level,level1,m,fine_level;
 93:   Mat             A,Aloc;
 94:   GridCtx         *gridctx;
 95:   PC_ML           *pc_ml=PETSC_NULL;
 96:   PetscContainer  container;
 97:   MatReuse        reuse = MAT_INITIAL_MATRIX;

100:   PetscObjectQuery((PetscObject)pc,"PC_ML",(PetscObject *)&container);
101:   if (container) {
102:     PetscContainerGetPointer(container,(void **)&pc_ml);
103:   } else {
104:     SETERRQ(PETSC_ERR_ARG_NULL,"Container does not exit");
105:   }

107:   if (pc->setupcalled){
108:     if (pc->flag == SAME_NONZERO_PATTERN){
109:       reuse = MAT_REUSE_MATRIX;
110:       PetscMLdata = pc_ml->PetscMLdata;
111:       gridctx     = pc_ml->gridctx;
112:       /* ML objects cannot be reused */
113:       ML_Destroy(&pc_ml->ml_object);
114:       ML_Aggregate_Destroy(&pc_ml->agg_object);
115:     } else {
116:       PC_ML           *pc_ml_new = PETSC_NULL;
117:       PetscContainer  container_new;
118:       PetscNewLog(pc,PC_ML,&pc_ml_new);
119:       PetscContainerCreate(PETSC_COMM_SELF,&container_new);
120:       PetscContainerSetPointer(container_new,pc_ml_new);
121:       PetscContainerSetUserDestroy(container_new,PetscContainerDestroy_PC_ML);
122:       PetscObjectCompose((PetscObject)pc,"PC_ML",(PetscObject)container_new);

124:       PetscMemcpy(pc_ml_new,pc_ml,sizeof(PC_ML));
125:       PetscContainerDestroy(container);
126:       pc_ml = pc_ml_new;
127:     }
128:   }
129: 
130:   /* setup special features of PCML */
131:   /*--------------------------------*/
132:   /* covert A to Aloc to be used by ML at fine grid */
133:   A = pc->pmat;
134:   MPI_Comm_size(((PetscObject)A)->comm,&size);
135:   pc_ml->size = size;
136:   if (size > 1){
137:     if (reuse) Aloc = PetscMLdata->Aloc;
138:     MatConvert_MPIAIJ_ML(A,PETSC_NULL,reuse,&Aloc);
139:   } else {
140:     Aloc = A;
141:   }

143:   /* create and initialize struct 'PetscMLdata' */
144:   if (!reuse){
145:     PetscNewLog(pc,FineGridCtx,&PetscMLdata);
146:     pc_ml->PetscMLdata = PetscMLdata;
147:     PetscMalloc((Aloc->cmap.n+1)*sizeof(PetscScalar),&PetscMLdata->pwork);

149:     VecCreate(PETSC_COMM_SELF,&PetscMLdata->x);
150:     VecSetSizes(PetscMLdata->x,Aloc->cmap.n,Aloc->cmap.n);
151:     VecSetType(PetscMLdata->x,VECSEQ);

153:     VecCreate(PETSC_COMM_SELF,&PetscMLdata->y);
154:     VecSetSizes(PetscMLdata->y,A->rmap.n,PETSC_DECIDE);
155:     VecSetType(PetscMLdata->y,VECSEQ);
156:   }
157:   PetscMLdata->A    = A;
158:   PetscMLdata->Aloc = Aloc;
159: 
160:   /* create ML discretization matrix at fine grid */
161:   /* ML requires input of fine-grid matrix. It determines nlevels. */
162:   MatGetSize(Aloc,&m,&nlocal_allcols);
163:   ML_Create(&ml_object,pc_ml->MaxNlevels);
164:   pc_ml->ml_object = ml_object;
165:   ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata);
166:   ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols);
167:   ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec);
168: 
169:   /* aggregation */
170:   ML_Aggregate_Create(&agg_object);
171:   pc_ml->agg_object = agg_object;
172: 
173:   ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize);
174:   /* set options */
175:   switch (pc_ml->CoarsenScheme) {
176:   case 1:
177:     ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object);break;
178:   case 2:
179:     ML_Aggregate_Set_CoarsenScheme_MIS(agg_object);break;
180:   case 3:
181:     ML_Aggregate_Set_CoarsenScheme_METIS(agg_object);break;
182:   }
183:   ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold);
184:   ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor);
185:   if (pc_ml->SpectralNormScheme_Anorm){
186:     ML_Aggregate_Set_SpectralNormScheme_Anorm(agg_object);
187:   }

189:   Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object);
190:   if (Nlevels<=0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels);
191:   if (pc->setupcalled && pc_ml->Nlevels != Nlevels) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"previous Nlevels %D and current Nlevels %d must be same", pc_ml->Nlevels,Nlevels);
192:   pc_ml->Nlevels = Nlevels;
193:   fine_level = Nlevels - 1;
194:   if (!pc->setupcalled){
195:     PCMGSetLevels(pc,Nlevels,PETSC_NULL);
196:     /* set default smoothers */
197:     KSP smoother;
198:     PC  subpc;
199:     for (level=1; level<=fine_level; level++){
200:       if (size == 1){
201:         PCMGGetSmoother(pc,level,&smoother);
202:         KSPSetType(smoother,KSPRICHARDSON);
203:         KSPGetPC(smoother,&subpc);
204:         PCSetType(subpc,PCSOR);
205:       } else {
206:         PCMGGetSmoother(pc,level,&smoother);
207:         KSPSetType(smoother,KSPRICHARDSON);
208:         KSPGetPC(smoother,&subpc);
209:         PCSetType(subpc,PCSOR);
210:       }
211:     }
212:     PCSetFromOptions_MG(pc); /* should be called in PCSetFromOptions_ML(), but cannot be called prior to PCMGSetLevels() */
213:   }
214: 
215:   if (!reuse){
216:     PetscMalloc(Nlevels*sizeof(GridCtx),&gridctx);
217:     pc_ml->gridctx = gridctx;
218:   }

220:   /* wrap ML matrices by PETSc shell matrices at coarsened grids. 
221:      Level 0 is the finest grid for ML, but coarsest for PETSc! */
222:   gridctx[fine_level].A = A;
223: 
224:   level = fine_level - 1;
225:   if (size == 1){ /* convert ML P, R and A into seqaij format */
226:     for (mllevel=1; mllevel<Nlevels; mllevel++){
227:       mlmat = &(ml_object->Pmat[mllevel]);
228:       MatWrapML_SeqAIJ(mlmat,reuse,&gridctx[level].P);
229:       mlmat = &(ml_object->Rmat[mllevel-1]);
230:       MatWrapML_SeqAIJ(mlmat,reuse,&gridctx[level].R);
231: 
232:       mlmat = &(ml_object->Amat[mllevel]);
233:       if (reuse){
234:         /* ML matrix A changes sparse pattern although PETSc A doesn't, thus gridctx[level].A must be recreated! */
235:         MatDestroy(gridctx[level].A);
236:       }
237:       MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);
238:       level--;
239:     }
240:   } else { /* convert ML P and R into shell format, ML A into mpiaij format */
241:     for (mllevel=1; mllevel<Nlevels; mllevel++){
242:       mlmat  = &(ml_object->Pmat[mllevel]);
243:       MatWrapML_SHELL(mlmat,reuse,&gridctx[level].P);
244:       mlmat  = &(ml_object->Rmat[mllevel-1]);
245:       MatWrapML_SHELL(mlmat,reuse,&gridctx[level].R);

247:       mlmat  = &(ml_object->Amat[mllevel]);
248:       if (reuse){
249:         MatDestroy(gridctx[level].A);
250:       }
251:       MatWrapML_MPIAIJ(mlmat,&gridctx[level].A);
252:       level--;
253:     }
254:   }

256:   /* create vectors and ksp at all levels */
257:   if (!reuse){
258:     for (level=0; level<fine_level; level++){
259:       level1 = level + 1;
260:       VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].x);
261:       VecSetSizes(gridctx[level].x,gridctx[level].A->cmap.n,PETSC_DECIDE);
262:       VecSetType(gridctx[level].x,VECMPI);
263:       PCMGSetX(pc,level,gridctx[level].x);
264: 
265:       VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].b);
266:       VecSetSizes(gridctx[level].b,gridctx[level].A->rmap.n,PETSC_DECIDE);
267:       VecSetType(gridctx[level].b,VECMPI);
268:       PCMGSetRhs(pc,level,gridctx[level].b);
269: 
270:       VecCreate(((PetscObject)gridctx[level1].A)->comm,&gridctx[level1].r);
271:       VecSetSizes(gridctx[level1].r,gridctx[level1].A->rmap.n,PETSC_DECIDE);
272:       VecSetType(gridctx[level1].r,VECMPI);
273:       PCMGSetR(pc,level1,gridctx[level1].r);

275:       if (level == 0){
276:         PCMGGetCoarseSolve(pc,&gridctx[level].ksp);
277:       } else {
278:         PCMGGetSmoother(pc,level,&gridctx[level].ksp);
279:       }
280:     }
281:     PCMGGetSmoother(pc,fine_level,&gridctx[fine_level].ksp);
282:   }

284:   /* create coarse level and the interpolation between the levels */
285:   for (level=0; level<fine_level; level++){
286:     level1 = level + 1;
287:     PCMGSetInterpolation(pc,level1,gridctx[level].P);
288:     PCMGSetRestriction(pc,level1,gridctx[level].R);
289:     if (level > 0){
290:       PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);
291:     }
292:     KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,DIFFERENT_NONZERO_PATTERN);
293:   }
294:   PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);
295:   KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,DIFFERENT_NONZERO_PATTERN);
296: 
297:   /* now call PCSetUp_MG()         */
298:   /*-------------------------------*/
299:   (*pc_ml->PCSetUp)(pc);
300:   return(0);
301: }

305: PetscErrorCode PetscContainerDestroy_PC_ML(void *ptr)
306: {
307:   PetscErrorCode  ierr;
308:   PC_ML           *pc_ml = (PC_ML*)ptr;
309:   PetscInt        level,fine_level=pc_ml->Nlevels-1;

312:   ML_Aggregate_Destroy(&pc_ml->agg_object);
313:   ML_Destroy(&pc_ml->ml_object);

315:   if (pc_ml->PetscMLdata) {
316:     PetscFree(pc_ml->PetscMLdata->pwork);
317:     if (pc_ml->size > 1)      {MatDestroy(pc_ml->PetscMLdata->Aloc);}
318:     if (pc_ml->PetscMLdata->x){VecDestroy(pc_ml->PetscMLdata->x);}
319:     if (pc_ml->PetscMLdata->y){VecDestroy(pc_ml->PetscMLdata->y);}
320:   }
321:   PetscFree(pc_ml->PetscMLdata);

323:   for (level=0; level<fine_level; level++){
324:     if (pc_ml->gridctx[level].A){MatDestroy(pc_ml->gridctx[level].A);}
325:     if (pc_ml->gridctx[level].P){MatDestroy(pc_ml->gridctx[level].P);}
326:     if (pc_ml->gridctx[level].R){MatDestroy(pc_ml->gridctx[level].R);}
327:     if (pc_ml->gridctx[level].x){VecDestroy(pc_ml->gridctx[level].x);}
328:     if (pc_ml->gridctx[level].b){VecDestroy(pc_ml->gridctx[level].b);}
329:     if (pc_ml->gridctx[level+1].r){VecDestroy(pc_ml->gridctx[level+1].r);}
330:   }
331:   PetscFree(pc_ml->gridctx);
332:   PetscFree(pc_ml);
333:   return(0);
334: }
335: /* -------------------------------------------------------------------------- */
336: /*
337:    PCDestroy_ML - Destroys the private context for the ML preconditioner
338:    that was created with PCCreate_ML().

340:    Input Parameter:
341: .  pc - the preconditioner context

343:    Application Interface Routine: PCDestroy()
344: */
347: PetscErrorCode PCDestroy_ML(PC pc)
348: {
349:   PetscErrorCode  ierr;
350:   PC_ML           *pc_ml=PETSC_NULL;
351:   PetscContainer  container;

354:   PetscObjectQuery((PetscObject)pc,"PC_ML",(PetscObject *)&container);
355:   if (container) {
356:     PetscContainerGetPointer(container,(void **)&pc_ml);
357:     pc->ops->destroy = pc_ml->PCDestroy;
358:   } else {
359:     SETERRQ(PETSC_ERR_ARG_NULL,"Container does not exit");
360:   }
361:   /* detach pc and PC_ML and dereference container */
362:   PetscContainerDestroy(container);
363:   PetscObjectCompose((PetscObject)pc,"PC_ML",0);
364:   if (pc->ops->destroy) {
365:     (*pc->ops->destroy)(pc);
366:   }
367:   return(0);
368: }

372: PetscErrorCode PCSetFromOptions_ML(PC pc)
373: {
374:   PetscErrorCode  ierr;
375:   PetscInt        indx,m,PrintLevel;
376:   PetscTruth      flg;
377:   const char      *scheme[] = {"Uncoupled","Coupled","MIS","METIS"};
378:   PC_ML           *pc_ml=PETSC_NULL;
379:   PetscContainer  container;
380:   PCMGType        mgtype;

383:   PetscObjectQuery((PetscObject)pc,"PC_ML",(PetscObject *)&container);
384:   if (container) {
385:     PetscContainerGetPointer(container,(void **)&pc_ml);
386:   } else {
387:     SETERRQ(PETSC_ERR_ARG_NULL,"Container does not exit");
388:   }

390:   /* inherited MG options */
391:   PetscOptionsHead("Multigrid options(inherited)");
392:     PetscOptionsInt("-pc_mg_cycles","1 for V cycle, 2 for W-cycle","MGSetCycles",1,&m,&flg);
393:     PetscOptionsInt("-pc_mg_smoothup","Number of post-smoothing steps","MGSetNumberSmoothUp",1,&m,&flg);
394:     PetscOptionsInt("-pc_mg_smoothdown","Number of pre-smoothing steps","MGSetNumberSmoothDown",1,&m,&flg);
395:     PetscOptionsEnum("-pc_mg_type","Multigrid type","PCMGSetType",PCMGTypes,(PetscEnum)PC_MG_MULTIPLICATIVE,(PetscEnum*)&mgtype,&flg);
396:   PetscOptionsTail();

398:   /* ML options */
399:   PetscOptionsHead("ML options");
400:   /* set defaults */
401:   PrintLevel    = 0;
402:   indx          = 0;
403:   PetscOptionsInt("-pc_ml_PrintLevel","Print level","ML_Set_PrintLevel",PrintLevel,&PrintLevel,PETSC_NULL);
404:   ML_Set_PrintLevel(PrintLevel);
405:   PetscOptionsInt("-pc_ml_maxNlevels","Maximum number of levels","None",pc_ml->MaxNlevels,&pc_ml->MaxNlevels,PETSC_NULL);
406:   PetscOptionsInt("-pc_ml_maxCoarseSize","Maximum coarsest mesh size","ML_Aggregate_Set_MaxCoarseSize",pc_ml->MaxCoarseSize,&pc_ml->MaxCoarseSize,PETSC_NULL);
407:   PetscOptionsEList("-pc_ml_CoarsenScheme","Aggregate Coarsen Scheme","ML_Aggregate_Set_CoarsenScheme_*",scheme,4,scheme[0],&indx,PETSC_NULL); /* ??? */
408:   pc_ml->CoarsenScheme = indx;

410:   PetscOptionsReal("-pc_ml_DampingFactor","P damping factor","ML_Aggregate_Set_DampingFactor",pc_ml->DampingFactor,&pc_ml->DampingFactor,PETSC_NULL);
411: 
412:   PetscOptionsReal("-pc_ml_Threshold","Smoother drop tol","ML_Aggregate_Set_Threshold",pc_ml->Threshold,&pc_ml->Threshold,PETSC_NULL);

414:   PetscOptionsTruth("-pc_ml_SpectralNormScheme_Anorm","Method used for estimating spectral radius","ML_Aggregate_Set_SpectralNormScheme_Anorm",pc_ml->SpectralNormScheme_Anorm,&pc_ml->SpectralNormScheme_Anorm,PETSC_NULL);
415: 
416:   PetscOptionsTail();
417:   return(0);
418: }

420: /* -------------------------------------------------------------------------- */
421: /*
422:    PCCreate_ML - Creates a ML preconditioner context, PC_ML, 
423:    and sets this as the private data within the generic preconditioning 
424:    context, PC, that was created within PCCreate().

426:    Input Parameter:
427: .  pc - the preconditioner context

429:    Application Interface Routine: PCCreate()
430: */

432: /*MC
433:      PCML - Use algebraic multigrid preconditioning. This preconditioner requires you provide 
434:        fine grid discretization matrix. The coarser grid matrices and restriction/interpolation 
435:        operators are computed by ML, with the matrices coverted to PETSc matrices in aij format
436:        and the restriction/interpolation operators wrapped as PETSc shell matrices.

438:    Options Database Key: 
439:    Multigrid options(inherited)
440: +  -pc_mg_cycles <1>: 1 for V cycle, 2 for W-cycle (MGSetCycles)
441: .  -pc_mg_smoothup <1>: Number of post-smoothing steps (MGSetNumberSmoothUp)
442: .  -pc_mg_smoothdown <1>: Number of pre-smoothing steps (MGSetNumberSmoothDown)
443: -  -pc_mg_type <multiplicative>: (one of) additive multiplicative full cascade kascade
444:    
445:    ML options:
446: +  -pc_ml_PrintLevel <0>: Print level (ML_Set_PrintLevel)
447: .  -pc_ml_maxNlevels <10>: Maximum number of levels (None)
448: .  -pc_ml_maxCoarseSize <1>: Maximum coarsest mesh size (ML_Aggregate_Set_MaxCoarseSize)
449: .  -pc_ml_CoarsenScheme <Uncoupled>: (one of) Uncoupled Coupled MIS METIS
450: .  -pc_ml_DampingFactor <1.33333>: P damping factor (ML_Aggregate_Set_DampingFactor)
451: .  -pc_ml_Threshold <0>: Smoother drop tol (ML_Aggregate_Set_Threshold)
452: -  -pc_ml_SpectralNormScheme_Anorm <false>: Method used for estimating spectral radius (ML_Aggregate_Set_SpectralNormScheme_Anorm)

454:    Level: intermediate

456:   Concepts: multigrid
457:  
458: .seealso:  PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType, 
459:            PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetNumberSmoothDown(),
460:            PCMGSetNumberSmoothUp(), PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(),
461:            PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(),
462:            PCMGSetCyclesOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR()      
463: M*/

468: PetscErrorCode  PCCreate_ML(PC pc)
469: {
470:   PetscErrorCode  ierr;
471:   PC_ML           *pc_ml;
472:   PetscContainer  container;

475:   /* PCML is an inherited class of PCMG. Initialize pc as PCMG */
476:   PCSetType(pc,PCMG); /* calls PCCreate_MG() and MGCreate_Private() */

478:   /* create a supporting struct and attach it to pc */
479:   PetscNewLog(pc,PC_ML,&pc_ml);
480:   PetscContainerCreate(PETSC_COMM_SELF,&container);
481:   PetscContainerSetPointer(container,pc_ml);
482:   PetscContainerSetUserDestroy(container,PetscContainerDestroy_PC_ML);
483:   PetscObjectCompose((PetscObject)pc,"PC_ML",(PetscObject)container);
484: 
485:   pc_ml->ml_object     = 0;
486:   pc_ml->agg_object    = 0;
487:   pc_ml->gridctx       = 0;
488:   pc_ml->PetscMLdata   = 0;
489:   pc_ml->Nlevels       = -1;
490:   pc_ml->MaxNlevels    = 10;
491:   pc_ml->MaxCoarseSize = 1;
492:   pc_ml->CoarsenScheme = 1; /* ??? */
493:   pc_ml->Threshold     = 0.0;
494:   pc_ml->DampingFactor = 4.0/3.0;
495:   pc_ml->SpectralNormScheme_Anorm = PETSC_FALSE;
496:   pc_ml->size          = 0;

498:   pc_ml->PCSetUp   = pc->ops->setup;
499:   pc_ml->PCDestroy = pc->ops->destroy;

501:   /* overwrite the pointers of PCMG by the functions of PCML */
502:   pc->ops->setfromoptions = PCSetFromOptions_ML;
503:   pc->ops->setup          = PCSetUp_ML;
504:   pc->ops->destroy        = PCDestroy_ML;
505:   return(0);
506: }

509: int PetscML_getrow(ML_Operator *ML_data, int N_requested_rows, int requested_rows[],
510:    int allocated_space, int columns[], double values[], int row_lengths[])
511: {
513:   Mat            Aloc;
514:   Mat_SeqAIJ     *a;
515:   PetscInt       m,i,j,k=0,row,*aj;
516:   PetscScalar    *aa;
517:   FineGridCtx    *ml=(FineGridCtx*)ML_Get_MyGetrowData(ML_data);

519:   Aloc = ml->Aloc;
520:   a    = (Mat_SeqAIJ*)Aloc->data;
521:   MatGetSize(Aloc,&m,PETSC_NULL);

523:   for (i = 0; i<N_requested_rows; i++) {
524:     row   = requested_rows[i];
525:     row_lengths[i] = a->ilen[row];
526:     if (allocated_space < k+row_lengths[i]) return(0);
527:     if ( (row >= 0) || (row <= (m-1)) ) {
528:       aj = a->j + a->i[row];
529:       aa = a->a + a->i[row];
530:       for (j=0; j<row_lengths[i]; j++){
531:         columns[k]  = aj[j];
532:         values[k++] = aa[j];
533:       }
534:     }
535:   }
536:   return(1);
537: }

539: int PetscML_matvec(ML_Operator *ML_data,int in_length,double p[],int out_length,double ap[])
540: {
542:   FineGridCtx    *ml=(FineGridCtx*)ML_Get_MyMatvecData(ML_data);
543:   Mat            A=ml->A, Aloc=ml->Aloc;
544:   PetscMPIInt    size;
545:   PetscScalar    *pwork=ml->pwork;
546:   PetscInt       i;

548:   MPI_Comm_size(((PetscObject)A)->comm,&size);
549:   if (size == 1){
550:     VecPlaceArray(ml->x,p);
551:   } else {
552:     for (i=0; i<in_length; i++) pwork[i] = p[i];
553:     PetscML_comm(pwork,ml);
554:     VecPlaceArray(ml->x,pwork);
555:   }
556:   VecPlaceArray(ml->y,ap);
557:   MatMult(Aloc,ml->x,ml->y);
558:   VecResetArray(ml->x);
559:   VecResetArray(ml->y);
560:   return 0;
561: }

563: int PetscML_comm(double p[],void *ML_data)
564: {
566:   FineGridCtx    *ml=(FineGridCtx*)ML_data;
567:   Mat            A=ml->A;
568:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
569:   PetscMPIInt    size;
570:   PetscInt       i,in_length=A->rmap.n,out_length=ml->Aloc->cmap.n;
571:   PetscScalar    *array;

573:   MPI_Comm_size(((PetscObject)A)->comm,&size);
574:   if (size == 1) return 0;
575: 
576:   VecPlaceArray(ml->y,p);
577:   VecScatterBegin(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
578:   VecScatterEnd(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
579:   VecResetArray(ml->y);
580:   VecGetArray(a->lvec,&array);
581:   for (i=in_length; i<out_length; i++){
582:     p[i] = array[i-in_length];
583:   }
584:   VecRestoreArray(a->lvec,&array);
585:   return 0;
586: }
589: PetscErrorCode MatMult_ML(Mat A,Vec x,Vec y)
590: {
591:   PetscErrorCode   ierr;
592:   Mat_MLShell      *shell;
593:   PetscScalar      *xarray,*yarray;
594:   PetscInt         x_length,y_length;
595: 
597:   MatShellGetContext(A,(void **)&shell);
598:   VecGetArray(x,&xarray);
599:   VecGetArray(y,&yarray);
600:   x_length = shell->mlmat->invec_leng;
601:   y_length = shell->mlmat->outvec_leng;

603:   ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray);

605:   VecRestoreArray(x,&xarray);
606:   VecRestoreArray(y,&yarray);
607:   return(0);
608: }
609: /* MatMultAdd_ML -  Compute y = w + A*x */
612: PetscErrorCode MatMultAdd_ML(Mat A,Vec x,Vec w,Vec y)
613: {
614:   PetscErrorCode    ierr;
615:   Mat_MLShell       *shell;
616:   PetscScalar       *xarray,*yarray;
617:   PetscInt          x_length,y_length;
618: 
620:   MatShellGetContext(A,(void **)&shell);
621:   VecGetArray(x,&xarray);
622:   VecGetArray(y,&yarray);

624:   x_length = shell->mlmat->invec_leng;
625:   y_length = shell->mlmat->outvec_leng;

627:   ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray);

629:   VecRestoreArray(x,&xarray);
630:   VecRestoreArray(y,&yarray);
631:   VecAXPY(y,1.0,w);

633:   return(0);
634: }

636: /* newtype is ignored because "ml" is not listed under Petsc MatType yet */
639: PetscErrorCode MatConvert_MPIAIJ_ML(Mat A,MatType newtype,MatReuse scall,Mat *Aloc)
640: {
641:   PetscErrorCode  ierr;
642:   Mat_MPIAIJ      *mpimat=(Mat_MPIAIJ*)A->data;
643:   Mat_SeqAIJ      *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data;
644:   PetscInt        *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j;
645:   PetscScalar     *aa=a->a,*ba=b->a,*ca;
646:   PetscInt        am=A->rmap.n,an=A->cmap.n,i,j,k;
647:   PetscInt        *ci,*cj,ncols;

650:   if (am != an) SETERRQ2(PETSC_ERR_ARG_WRONG,"A must have a square diagonal portion, am: %d != an: %d",am,an);

652:   if (scall == MAT_INITIAL_MATRIX){
653:     PetscMalloc((1+am)*sizeof(PetscInt),&ci);
654:     ci[0] = 0;
655:     for (i=0; i<am; i++){
656:       ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]);
657:     }
658:     PetscMalloc((1+ci[am])*sizeof(PetscInt),&cj);
659:     PetscMalloc((1+ci[am])*sizeof(PetscScalar),&ca);

661:     k = 0;
662:     for (i=0; i<am; i++){
663:       /* diagonal portion of A */
664:       ncols = ai[i+1] - ai[i];
665:       for (j=0; j<ncols; j++) {
666:         cj[k]   = *aj++;
667:         ca[k++] = *aa++;
668:       }
669:       /* off-diagonal portion of A */
670:       ncols = bi[i+1] - bi[i];
671:       for (j=0; j<ncols; j++) {
672:         cj[k]   = an + (*bj); bj++;
673:         ca[k++] = *ba++;
674:       }
675:     }
676:     if (k != ci[am]) SETERRQ2(PETSC_ERR_ARG_WRONG,"k: %d != ci[am]: %d",k,ci[am]);

678:     /* put together the new matrix */
679:     an = mpimat->A->cmap.n+mpimat->B->cmap.n;
680:     MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,an,ci,cj,ca,Aloc);

682:     /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
683:     /* Since these are PETSc arrays, change flags to free them as necessary. */
684:     mat = (Mat_SeqAIJ*)(*Aloc)->data;
685:     mat->free_a       = PETSC_TRUE;
686:     mat->free_ij      = PETSC_TRUE;

688:     mat->nonew    = 0;
689:   } else if (scall == MAT_REUSE_MATRIX){
690:     mat=(Mat_SeqAIJ*)(*Aloc)->data;
691:     ci = mat->i; cj = mat->j; ca = mat->a;
692:     for (i=0; i<am; i++) {
693:       /* diagonal portion of A */
694:       ncols = ai[i+1] - ai[i];
695:       for (j=0; j<ncols; j++) *ca++ = *aa++;
696:       /* off-diagonal portion of A */
697:       ncols = bi[i+1] - bi[i];
698:       for (j=0; j<ncols; j++) *ca++ = *ba++;
699:     }
700:   } else {
701:     SETERRQ1(PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
702:   }
703:   return(0);
704: }
708: PetscErrorCode MatDestroy_ML(Mat A)
709: {
711:   Mat_MLShell    *shell;

714:   MatShellGetContext(A,(void **)&shell);
715:   VecDestroy(shell->y);
716:   PetscFree(shell);
717:   MatDestroy_Shell(A);
718:   PetscObjectChangeTypeName((PetscObject)A,0);
719:   return(0);
720: }

724: PetscErrorCode MatWrapML_SeqAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
725: {
726:   struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data;
727:   PetscErrorCode        ierr;
728:   PetscInt              m=mlmat->outvec_leng,n=mlmat->invec_leng,*nnz,nz_max;
729:   PetscInt              *ml_cols=matdata->columns,*ml_rowptr=matdata->rowptr,*aj,i,j,k;
730:   PetscScalar           *ml_vals=matdata->values,*aa;
731: 
733:   if ( mlmat->getrow == NULL) SETERRQ(PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
734:   if (m != n){ /* ML Pmat and Rmat are in CSR format. Pass array pointers into SeqAIJ matrix */
735:     if (reuse){
736:       Mat_SeqAIJ  *aij= (Mat_SeqAIJ*)(*newmat)->data;
737:       aij->i = ml_rowptr;
738:       aij->j = ml_cols;
739:       aij->a = ml_vals;
740:     } else {
741:       /* sort ml_cols and ml_vals */
742:       PetscMalloc((m+1)*sizeof(PetscInt),&nnz);
743:       for (i=0; i<m; i++) {
744:         nnz[i] = ml_rowptr[i+1] - ml_rowptr[i];
745:       }
746:       aj = ml_cols; aa = ml_vals;
747:       for (i=0; i<m; i++){
748:         PetscSortIntWithScalarArray(nnz[i],aj,aa);
749:         aj += nnz[i]; aa += nnz[i];
750:       }
751:       MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,ml_rowptr,ml_cols,ml_vals,newmat);
752:       PetscFree(nnz);
753:     }
754:     return(0);
755:   }

757:   /* ML Amat is in MSR format. Copy its data into SeqAIJ matrix */
758:   MatCreate(PETSC_COMM_SELF,newmat);
759:   MatSetSizes(*newmat,m,n,PETSC_DECIDE,PETSC_DECIDE);
760:   MatSetType(*newmat,MATSEQAIJ);

762:   PetscMalloc((m+1)*sizeof(PetscInt),&nnz);
763:   nz_max = 1;
764:   for (i=0; i<m; i++) {
765:     nnz[i] = ml_cols[i+1] - ml_cols[i] + 1;
766:     if (nnz[i] > nz_max) nz_max += nnz[i];
767:   }

769:   MatSeqAIJSetPreallocation(*newmat,0,nnz);
770:   PetscMalloc(nz_max*(sizeof(PetscInt)+sizeof(PetscScalar)),&aj);
771:   aa = (PetscScalar*)(aj + nz_max);
772: 
773:   for (i=0; i<m; i++){
774:     k = 0;
775:     /* diagonal entry */
776:     aj[k] = i; aa[k++] = ml_vals[i];
777:     /* off diagonal entries */
778:     for (j=ml_cols[i]; j<ml_cols[i+1]; j++){
779:       aj[k] = ml_cols[j]; aa[k++] = ml_vals[j];
780:     }
781:     /* sort aj and aa */
782:     PetscSortIntWithScalarArray(nnz[i],aj,aa);
783:     MatSetValues(*newmat,1,&i,nnz[i],aj,aa,INSERT_VALUES);
784:   }
785:   MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);
786:   MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);

788:   PetscFree(aj);
789:   PetscFree(nnz);
790:   return(0);
791: }

795: PetscErrorCode MatWrapML_SHELL(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
796: {
798:   PetscInt       m,n;
799:   ML_Comm        *MLcomm;
800:   Mat_MLShell    *shellctx;

803:   m = mlmat->outvec_leng;
804:   n = mlmat->invec_leng;
805:   if (!m || !n){
806:     newmat = PETSC_NULL;
807:     return(0);
808:   }

810:   if (reuse){
811:     MatShellGetContext(*newmat,(void **)&shellctx);
812:     shellctx->mlmat = mlmat;
813:     return(0);
814:   }

816:   MLcomm = mlmat->comm;
817:   PetscNew(Mat_MLShell,&shellctx);
818:   MatCreateShell(MLcomm->USR_comm,m,n,PETSC_DETERMINE,PETSC_DETERMINE,shellctx,newmat);
819:   MatShellSetOperation(*newmat,MATOP_MULT,(void(*)(void))MatMult_ML);
820:   MatShellSetOperation(*newmat,MATOP_MULT_ADD,(void(*)(void))MatMultAdd_ML);
821:   shellctx->A         = *newmat;
822:   shellctx->mlmat     = mlmat;
823:   VecCreate(PETSC_COMM_WORLD,&shellctx->y);
824:   VecSetSizes(shellctx->y,m,PETSC_DECIDE);
825:   VecSetFromOptions(shellctx->y);
826:   (*newmat)->ops->destroy = MatDestroy_ML;
827:   return(0);
828: }

832: PetscErrorCode MatWrapML_MPIAIJ(ML_Operator *mlmat,Mat *newmat)
833: {
834:   struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data;
835:   PetscInt              *ml_cols=matdata->columns,*aj;
836:   PetscScalar           *ml_vals=matdata->values,*aa;
837:   PetscErrorCode        ierr;
838:   PetscInt              i,j,k,*gordering;
839:   PetscInt              m=mlmat->outvec_leng,n,*nnzA,*nnzB,*nnz,nz_max,row;
840:   Mat                   A;

843:   if (mlmat->getrow == NULL) SETERRQ(PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
844:   n = mlmat->invec_leng;
845:   if (m != n) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"m %d must equal to n %d",m,n);

847:   MatCreate(mlmat->comm->USR_comm,&A);
848:   MatSetSizes(A,m,n,PETSC_DECIDE,PETSC_DECIDE);
849:   MatSetType(A,MATMPIAIJ);
850:   PetscMalloc3(m,PetscInt,&nnzA,m,PetscInt,&nnzB,m,PetscInt,&nnz);
851: 
852:   nz_max = 0;
853:   for (i=0; i<m; i++){
854:     nnz[i] = ml_cols[i+1] - ml_cols[i] + 1;
855:     if (nz_max < nnz[i]) nz_max = nnz[i];
856:     nnzA[i] = 1; /* diag */
857:     for (j=ml_cols[i]; j<ml_cols[i+1]; j++){
858:       if (ml_cols[j] < m) nnzA[i]++;
859:     }
860:     nnzB[i] = nnz[i] - nnzA[i];
861:   }
862:   MatMPIAIJSetPreallocation(A,0,nnzA,0,nnzB);

864:   /* insert mat values -- remap row and column indices */
865:   nz_max++;
866:   PetscMalloc(nz_max*(sizeof(PetscInt)+sizeof(PetscScalar)),&aj);
867:   aa = (PetscScalar*)(aj + nz_max);
868:   /* create global row numbering for a ML_Operator */
869:   ML_build_global_numbering(mlmat,&gordering,"rows");
870:   for (i=0; i<m; i++){
871:     row = gordering[i];
872:     k = 0;
873:     /* diagonal entry */
874:     aj[k] = row; aa[k++] = ml_vals[i];
875:     /* off diagonal entries */
876:     for (j=ml_cols[i]; j<ml_cols[i+1]; j++){
877:       aj[k] = gordering[ml_cols[j]]; aa[k++] = ml_vals[j];
878:     }
879:     MatSetValues(A,1,&row,nnz[i],aj,aa,INSERT_VALUES);
880:   }
881:   MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
882:   MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
883:   *newmat = A;

885:   PetscFree3(nnzA,nnzB,nnz);
886:   PetscFree(aj);
887:   return(0);
888: }