Actual source code: pmetis.c

  1: 
 2:  #include src/mat/impls/adj/mpi/mpiadj.h

  4: /* 
  5:    Currently using ParMetis-2.0. The following include file has
  6:    to be changed to par_kmetis.h for ParMetis-1.0
  7: */
  9: #include "parmetis.h"

 12: /*
 13:       The first 5 elements of this structure are the input control array to Metis
 14: */
 15: typedef struct {
 16:   int cuts;         /* number of cuts made (output) */
 17:   int foldfactor;
 18:   int parallel;     /* use parallel partitioner for coarse problem */
 19:   int indexing;     /* 0 indicates C indexing, 1 Fortran */
 20:   int printout;     /* indicates if one wishes Metis to print info */
 21:   MPI_Comm comm_pmetis;
 22: } MatPartitioning_Parmetis;

 24: /*
 25:    Uses the ParMETIS parallel matrix partitioner to partition the matrix in parallel
 26: */
 29: static PetscErrorCode MatPartitioningApply_Parmetis(MatPartitioning part,IS *partitioning)
 30: {
 31:   MatPartitioning_Parmetis *parmetis = (MatPartitioning_Parmetis*)part->data;
 32:   PetscErrorCode           ierr;
 33:   int                      *locals,size,rank;
 34:   int                      *vtxdist,*xadj,*adjncy,itmp = 0;
 35:   int                      wgtflag=0, numflag=0, ncon=1, nparts=part->n, options[3],  i,j;
 36:   Mat                      mat = part->adj,newmat;
 37:   Mat_MPIAdj               *adj = (Mat_MPIAdj *)mat->data;
 38:   PetscTruth               flg;
 39:   float                    *tpwgts,*ubvec;

 42:   MPI_Comm_size(mat->comm,&size);

 44:   PetscTypeCompare((PetscObject)mat,MATMPIADJ,&flg);
 45:   if (!flg) {
 46:     MatConvert(mat,MATMPIADJ,&newmat);
 47:     adj  = (Mat_MPIAdj *)newmat->data;
 48:   }

 50:   vtxdist = adj->rowners;
 51:   xadj    = adj->i;
 52:   adjncy  = adj->j;
 53:   MPI_Comm_rank(part->comm,&rank);
 54:   if (!(vtxdist[rank+1] - vtxdist[rank])) {
 55:     SETERRQ(PETSC_ERR_LIB,"Does not support any processor with no entries");
 56:   }
 57: #if defined(PETSC_USE_BOPT_g)
 58:   /* check that matrix has no diagonal entries */
 59:   {
 60:     int i,j,rstart;
 61:     MatGetOwnershipRange(mat,&rstart,PETSC_NULL);
 62:     for (i=0; i<mat->m; i++) {
 63:       for (j=xadj[i]; j<xadj[i+1]; j++) {
 64:         if (adjncy[j] == i+rstart) SETERRQ1(PETSC_ERR_ARG_WRONG,"Row %d has diagonal entry; Parmetis forbids diagonal entry",i+rstart);
 65:       }
 66:     }
 67:   }
 68: #endif

 70:   PetscMalloc((mat->m+1)*sizeof(int),&locals);

 72:   if (PetscLogPrintInfo) {itmp = parmetis->printout; parmetis->printout = 127;}
 73:   PetscMalloc(ncon*nparts*sizeof(float),&tpwgts);
 74:   for (i=0; i<ncon; i++) {
 75:     for (j=0; j<nparts; j++) {
 76:       if (part->part_weights) {
 77:         tpwgts[i*nparts+j] = part->part_weights[i*nparts+j];
 78:       } else {
 79:         tpwgts[i*nparts+j] = 1./nparts;
 80:       }
 81:     }
 82:   }
 83:   PetscMalloc(ncon*sizeof(float),&ubvec);
 84:   for (i=0; i<ncon; i++) {
 85:     ubvec[i] = 1.05;
 86:   }
 87:   options[0] = 0;
 88:   /* ParMETIS has no error conditions ??? */
 89:   ParMETIS_V3_PartKway(vtxdist,xadj,adjncy,part->vertex_weights,adj->values,&wgtflag,&numflag,&ncon,&nparts,tpwgts,ubvec,
 90:                        options,&parmetis->cuts,locals,&parmetis->comm_pmetis);
 91:   PetscFree(tpwgts);
 92:   PetscFree(ubvec);
 93:   if (PetscLogPrintInfo) {parmetis->printout = itmp;}

 95:   ISCreateGeneral(part->comm,mat->m,locals,partitioning);
 96:   PetscFree(locals);

 98:   if (!flg) {
 99:     MatDestroy(newmat);
100:   }
101:   return(0);
102: }


107: PetscErrorCode MatPartitioningView_Parmetis(MatPartitioning part,PetscViewer viewer)
108: {
109:   MatPartitioning_Parmetis *parmetis = (MatPartitioning_Parmetis *)part->data;
111:   int rank;
112:   PetscTruth               iascii;

115:   MPI_Comm_rank(part->comm,&rank);
116:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&iascii);
117:   if (iascii) {
118:     if (parmetis->parallel == 2) {
119:       PetscViewerASCIIPrintf(viewer,"  Using parallel coarse grid partitioner\n");
120:     } else {
121:       PetscViewerASCIIPrintf(viewer,"  Using sequential coarse grid partitioner\n");
122:     }
123:     PetscViewerASCIIPrintf(viewer,"  Using %d fold factor\n",parmetis->foldfactor);
124:     PetscViewerASCIISynchronizedPrintf(viewer,"  [%d]Number of cuts found %d\n",rank,parmetis->cuts);
125:     PetscViewerFlush(viewer);
126:   } else {
127:     SETERRQ1(PETSC_ERR_SUP,"Viewer type %s not supported for this Parmetis partitioner",((PetscObject)viewer)->type_name);
128:   }

130:   return(0);
131: }

135: /*@
136:      MatPartitioningParmetisSetCoarseSequential - Use the sequential code to 
137:          do the partitioning of the coarse grid.

139:   Collective on MatPartitioning

141:   Input Parameter:
142: .  part - the partitioning context

144:    Level: advanced

146: @*/
147: PetscErrorCode MatPartitioningParmetisSetCoarseSequential(MatPartitioning part)
148: {
149:   MatPartitioning_Parmetis *parmetis = (MatPartitioning_Parmetis *)part->data;

152:   parmetis->parallel = 1;
153:   return(0);
154: }
157: PetscErrorCode MatPartitioningSetFromOptions_Parmetis(MatPartitioning part)
158: {
160:   PetscTruth flag;

163:   PetscOptionsHead("Set ParMeTiS partitioning options");
164:     PetscOptionsName("-mat_partitioning_parmetis_coarse_sequential","Use sequential coarse partitioner","MatPartitioningParmetisSetCoarseSequential",&flag);
165:     if (flag) {
166:       MatPartitioningParmetisSetCoarseSequential(part);
167:     }
168:   PetscOptionsTail();
169:   return(0);
170: }


175: PetscErrorCode MatPartitioningDestroy_Parmetis(MatPartitioning part)
176: {
177:   MatPartitioning_Parmetis *parmetis = (MatPartitioning_Parmetis *)part->data;

181:   MPI_Comm_free(&(parmetis->comm_pmetis));
182:   PetscFree(parmetis);
183:   return(0);
184: }

189: PetscErrorCode MatPartitioningCreate_Parmetis(MatPartitioning part)
190: {
192:   MatPartitioning_Parmetis *parmetis;

195:   PetscNew(MatPartitioning_Parmetis,&parmetis);

197:   parmetis->cuts       = 0;   /* output variable */
198:   parmetis->foldfactor = 150; /*folding factor */
199:   parmetis->parallel   = 2;   /* use parallel partitioner for coarse grid */
200:   parmetis->indexing   = 0;   /* index numbering starts from 0 */
201:   parmetis->printout   = 0;   /* print no output while running */

203:   MPI_Comm_dup(part->comm,&(parmetis->comm_pmetis));

205:   part->ops->apply          = MatPartitioningApply_Parmetis;
206:   part->ops->view           = MatPartitioningView_Parmetis;
207:   part->ops->destroy        = MatPartitioningDestroy_Parmetis;
208:   part->ops->setfromoptions = MatPartitioningSetFromOptions_Parmetis;
209:   part->data                = (void*)parmetis;
210:   return(0);
211: }