Actual source code: chaco.c

 2:  #include src/mat/impls/adj/mpi/mpiadj.h

  4: #ifdef PETSC_HAVE_UNISTD_H
  5: #include <unistd.h>
  6: #endif

  8: #ifdef PETSC_HAVE_STDLIB_H
  9: #include <stdlib.h>
 10: #endif

 12: #include "petscfix.h"

 15: /* Chaco does not have an include file */
 17:     float *ewgts, float *x, float *y, float *z, char *outassignname,
 18:     char *outfilename, short *assignment, int architecture, int ndims_tot,
 19:     int mesh_dims[3], double *goal, int global_method, int local_method,
 20:     int rqi_flag, int vmax, int ndims, double eigtol, long seed);


 24: /*
 25: int       nvtxs;                number of vertices in full graph 
 26: int      *start;                start of edge list for each vertex 
 27: int      *adjacency;                edge list data 
 28: int      *vwgts;                weights for all vertices 
 29: float    *ewgts;                weights for all edges 
 30: float    *x, *y, *z;                coordinates for inertial method 
 31: char     *outassignname;        name of assignment output file 
 32: char     *outfilename;          output file name 
 33: short    *assignment;                set number of each vtx (length n) 
 34: int       architecture;         0 => hypercube, d => d-dimensional mesh 
 35: int       ndims_tot;                total number of cube dimensions to divide 
 36: int       mesh_dims[3];         dimensions of mesh of processors 
 37: double   *goal;                        desired set sizes for each set 
 38: int       global_method;        global partitioning algorithm 
 39: int       local_method;         local partitioning algorithm 
 40: int       rqi_flag;                should I use RQI/Symmlq eigensolver? 
 41: int       vmax;                        how many vertices to coarsen down to? 
 42: int       ndims;                number of eigenvectors (2^d sets) 
 43: double    eigtol;                tolerance on eigenvectors 
 44: long      seed;                        for random graph mutations 
 45: */


 49: typedef struct {
 50:     int architecture;
 51:     int ndims_tot;
 52:     int mesh_dims[3];
 53:     int rqi_flag;
 54:     int numbereigen;
 55:     double eigtol;
 56:     int global_method;          /* global method */
 57:     int local_method;           /* local method */
 58:     int nbvtxcoarsed;           /* number of vertices for the coarse graph */
 59:     char *mesg_log;
 60: } MatPartitioning_Chaco;

 62: #define SIZE_LOG 10000          /* size of buffer for msg_log */

 66: static PetscErrorCode MatPartitioningApply_Chaco(MatPartitioning part, IS *partitioning)
 67: {
 69:     int  *parttab, *locals, i, size, rank;
 70:     Mat mat = part->adj, matMPI, matSeq;
 71:     int nb_locals;
 72:     Mat_MPIAdj *adj;
 73:     MatPartitioning_Chaco *chaco = (MatPartitioning_Chaco *) part->data;
 74:     PetscTruth flg;
 75: #ifdef PETSC_HAVE_UNISTD_H
 76:     int fd_stdout, fd_pipe[2], count;
 77: #endif


 81:     FREE_GRAPH = 0; /* otherwise Chaco will attempt to free memory for adjacency graph */
 82: 
 83:     MPI_Comm_size(mat->comm, &size);

 85:     PetscTypeCompare((PetscObject) mat, MATMPIADJ, &flg);

 87:     /* check if the matrix is sequential, use MatGetSubMatrices if necessary */
 88:     if (size > 1) {
 89:         int M, N;
 90:         IS isrow, iscol;
 91:         Mat *A;

 93:         if (flg) {
 94:             SETERRQ(0, "Distributed matrix format MPIAdj is not supported for sequential partitioners");
 95:         }
 96:         PetscPrintf(part->comm, "Converting distributed matrix to sequential: this could be a performance loss\n");

 98:         MatGetSize(mat, &M, &N);
 99:         ISCreateStride(PETSC_COMM_SELF, M, 0, 1, &isrow);
100:         ISCreateStride(PETSC_COMM_SELF, N, 0, 1, &iscol);
101:         MatGetSubMatrices(mat, 1, &isrow, &iscol, MAT_INITIAL_MATRIX, &A);
102:         ISDestroy(isrow);
103:         ISDestroy(iscol);
104:         matSeq = *A;
105:     } else
106:         matSeq = mat;

108:     /* check for the input format that is supported only for a MPIADJ type 
109:        and set it to matMPI */
110:     if (!flg) {
111:         MatConvert(matSeq, MATMPIADJ, &matMPI);
112:     } else
113:         matMPI = matSeq;

115:     adj = (Mat_MPIAdj *) matMPI->data;  /* finaly adj contains adjacency graph */

117:     {
118:         /* arguments for Chaco library */
119:         int nvtxs = mat->M;                     /* number of vertices in full graph */
120:         int *start = adj->i;                    /* start of edge list for each vertex */
121:         int *adjacency;                         /* = adj -> j; edge list data  */
122:         int *vwgts = NULL;                      /* weights for all vertices */
123:         float *ewgts = NULL;                    /* weights for all edges */
124:         float *x = NULL, *y = NULL, *z = NULL;  /* coordinates for inertial method */
125:         char *outassignname = NULL;             /*  name of assignment output file */
126:         char *outfilename = NULL;               /* output file name */
127:         short *assignment;                      /* set number of each vtx (length n) */
128:         int architecture = chaco->architecture; /* 0 => hypercube, d => d-dimensional mesh */
129:         int ndims_tot = chaco->ndims_tot;       /* total number of cube dimensions to divide */
130:         int *mesh_dims = chaco->mesh_dims;      /* dimensions of mesh of processors */
131:         double *goal = NULL;                    /* desired set sizes for each set */
132:         int global_method = chaco->global_method; /* global partitioning algorithm */
133:         int local_method = chaco->local_method; /* local partitioning algorithm */
134:         int rqi_flag = chaco->rqi_flag;         /* should I use RQI/Symmlq eigensolver? */
135:         int vmax = chaco->nbvtxcoarsed;         /* how many vertices to coarsen down to? */
136:         int ndims = chaco->numbereigen;         /* number of eigenvectors (2^d sets) */
137:         double eigtol = chaco->eigtol;          /* tolerance on eigenvectors */
138:         long seed = 123636512;                  /* for random graph mutations */

140:         /* return value of Chaco */
141:         PetscMalloc((mat->M) * sizeof(short), &assignment);
142: 
143:         /* index change for libraries that have fortran implementation */
144:         PetscMalloc(sizeof(int) * start[nvtxs], &adjacency);
145:         for (i = 0; i < start[nvtxs]; i++)
146:             adjacency[i] = (adj->j)[i] + 1;

148:         /* redirect output to buffer: chaco -> mesg_log */
149: #ifdef PETSC_HAVE_UNISTD_H
150:         fd_stdout = dup(1);
151:         pipe(fd_pipe);
152:         close(1);
153:         dup2(fd_pipe[1], 1);
154:         PetscMalloc(SIZE_LOG * sizeof(char), &(chaco->mesg_log));
155: #endif

157:         /* library call */
158:         interface(nvtxs, start, adjacency, vwgts, ewgts, x, y, z,
159:             outassignname, outfilename, assignment, architecture, ndims_tot,
160:             mesh_dims, goal, global_method, local_method, rqi_flag, vmax, ndims,
161:             eigtol, seed);

163: #ifdef PETSC_HAVE_UNISTD_H
164:         fflush(stdout);
165:         count =  read(fd_pipe[0], chaco->mesg_log, (SIZE_LOG - 1) * sizeof(char));
166:         if (count < 0)
167:             count = 0;
168:         chaco->mesg_log[count] = 0;
169:         close(1);
170:         dup2(fd_stdout, 1);
171:         close(fd_stdout);
172:         close(fd_pipe[0]);
173:         close(fd_pipe[1]);
174: #endif

176:         if (ierr) { SETERRQ(PETSC_ERR_LIB, chaco->mesg_log); }

178:         PetscFree(adjacency);

180:         PetscMalloc((mat->M) * sizeof(int), &parttab);
181:         for (i = 0; i < nvtxs; i++) {
182:             parttab[i] = assignment[i];
183:         }
184:         PetscFree(assignment);
185:     }

187:     /* Creation of the index set */
188:     MPI_Comm_rank(part->comm, &rank);
189:     MPI_Comm_size(part->comm, &size);
190:     nb_locals = mat->M / size;
191:     locals = parttab + rank * nb_locals;
192:     if (rank < mat->M % size) {
193:         nb_locals++;
194:         locals += rank;
195:     } else
196:         locals += mat->M % size;
197:     ISCreateGeneral(part->comm, nb_locals, locals, partitioning);

199:     /* destroy temporary objects */
200:     PetscFree(parttab);
201:     if (matSeq != mat) {
202:         MatDestroy(matSeq);
203:     }
204:     if (matMPI != mat) {
205:         MatDestroy(matMPI);
206:     }
207:     return(0);
208: }


213: PetscErrorCode MatPartitioningView_Chaco(MatPartitioning part, PetscViewer viewer)
214: {

216:     MatPartitioning_Chaco *chaco = (MatPartitioning_Chaco *) part->data;
217:     PetscErrorCode        ierr;
218:     PetscMPIInt           rank;
219:     PetscTruth            iascii;


223:     MPI_Comm_rank(part->comm, &rank);
224:     PetscTypeCompare((PetscObject) viewer, PETSC_VIEWER_ASCII, &iascii);
225:     if (iascii) {
226:         if (!rank && chaco->mesg_log) {
227:             PetscViewerASCIIPrintf(viewer, "%s\n", chaco->mesg_log);
228:         }
229:     } else {
230:         SETERRQ1(PETSC_ERR_SUP,"Viewer type %s not supported for this Chaco partitioner",((PetscObject) viewer)->type_name);
231:     }

233:     return(0);
234: }

238: /*@
239:      MatPartitioningChacoSetGlobal - Set method for global partitioning.

241:   Input Parameter:
242: .  part - the partitioning context
243: .  method - MP_CHACO_MULTILEVEL_KL, MP_CHACO_SPECTRAL, MP_CHACO_LINEAR, 
244:     MP_CHACO_RANDOM or MP_CHACO_SCATTERED

246:    Level: advanced

248: @*/
249: PetscErrorCode MatPartitioningChacoSetGlobal(MatPartitioning part, MPChacoGlobalType method)
250: {
251:     MatPartitioning_Chaco *chaco = (MatPartitioning_Chaco *) part->data;


255:     switch (method) {
256:     case MP_CHACO_MULTILEVEL_KL:
257:         chaco->global_method = 1;
258:         break;
259:     case MP_CHACO_SPECTRAL:
260:         chaco->global_method = 2;
261:         break;
262:     case MP_CHACO_LINEAR:
263:         chaco->global_method = 4;
264:         break;
265:     case MP_CHACO_RANDOM:
266:         chaco->global_method = 5;
267:         break;
268:     case MP_CHACO_SCATTERED:
269:         chaco->global_method = 6;
270:         break;
271:     default:
272:         SETERRQ(PETSC_ERR_SUP, "Chaco: Unknown or unsupported option");
273:     }
274:     return(0);
275: }

279: /*@
280:      MatPartitioningChacoSetLocal - Set method for local partitioning.

282:   Input Parameter:
283: .  part - the partitioning context
284: .  method - MP_CHACO_KERNIGHAN_LIN or MP_CHACO_NONE

286:    Level: advanced

288: @*/
289: PetscErrorCode MatPartitioningChacoSetLocal(MatPartitioning part, MPChacoLocalType method)
290: {
291:     MatPartitioning_Chaco *chaco = (MatPartitioning_Chaco *) part->data;


295:     switch (method) {
296:     case MP_CHACO_KERNIGHAN_LIN:
297:         chaco->local_method = 1;
298:         break;
299:     case MP_CHACO_NONE:
300:         chaco->local_method = 2;
301:         break;
302:     default:
303:         SETERRQ(PETSC_ERR_ARG_CORRUPT, "Chaco: Unknown or unsupported option");
304:     }

306:     return(0);
307: }

311: /*@
312:     MatPartitioningChacoSetCoarseLevel - Set the coarse level 
313:     
314:   Input Parameter:
315: .  part - the partitioning context
316: .  level - the coarse level in range [0.0,1.0]

318:    Level: advanced

320: @*/
321: PetscErrorCode MatPartitioningChacoSetCoarseLevel(MatPartitioning part, PetscReal level)
322: {
323:     MatPartitioning_Chaco *chaco = (MatPartitioning_Chaco *) part->data;


327:     if (level < 0 || level > 1.0) {
328:         SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,
329:             "Chaco: level of coarsening out of range [0.01-1.0]");
330:     } else
331:         chaco->nbvtxcoarsed = (int)(part->adj->N * level);

333:     if (chaco->nbvtxcoarsed < 20)
334:         chaco->nbvtxcoarsed = 20;

336:     return(0);
337: }

341: /*@
342:      MatPartitioningChacoSetEigenSolver - Set method for eigensolver.

344:   Input Parameter:
345: .  method - MP_CHACO_LANCZOS or MP_CHACO_RQI_SYMMLQ

347:    Level: advanced

349: @*/
350: PetscErrorCode MatPartitioningChacoSetEigenSolver(MatPartitioning part,
351:     MPChacoEigenType method)
352: {
353:     MatPartitioning_Chaco *chaco = (MatPartitioning_Chaco *) part->data;


357:     switch (method) {
358:     case MP_CHACO_LANCZOS:
359:         chaco->rqi_flag = 0;
360:         break;
361:     case MP_CHACO_RQI_SYMMLQ:
362:         chaco->rqi_flag = 1;
363:         break;
364:     default:
365:         SETERRQ(PETSC_ERR_ARG_CORRUPT, "Chaco: Unknown or unsupported option");
366:     }

368:     return(0);
369: }

373: /*@
374:      MatPartitioningChacoSetEigenTol - Set tolerance for eigensolver.

376:   Input Parameter:
377: .  tol - Tolerance requested.

379:    Level: advanced

381: @*/
382: PetscErrorCode MatPartitioningChacoSetEigenTol(MatPartitioning part, PetscReal tol)
383: {
384:     MatPartitioning_Chaco *chaco = (MatPartitioning_Chaco *) part->data;


388:     if (tol <= 0.0) {
389:         SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,
390:             "Chaco: Eigensolver tolerance out of range");
391:     } else
392:         chaco->eigtol = tol;

394:     return(0);
395: }

399: /*@
400:      MatPartitioningChacoSetEigenNumber - Set number of eigenvectors for partitioning.

402:   Input Parameter:
403: .  num - This argument should have a value of 1, 2 or 3 indicating  
404:     partitioning by bisection, quadrisection, or octosection.

406:    Level: advanced

408: @*/
409: PetscErrorCode MatPartitioningChacoSetEigenNumber(MatPartitioning part, int num)
410: {
411:     MatPartitioning_Chaco *chaco = (MatPartitioning_Chaco *) part->data;


415:     if (num > 3 || num < 1) {
416:         SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,
417:             "Chaco: number of eigenvectors out of range");
418:     } else
419:         chaco->numbereigen = num;

421:     return(0);
422: }

426: PetscErrorCode MatPartitioningSetFromOptions_Chaco(MatPartitioning part)
427: {
429:     int  i;
430:     PetscReal r;
431:     PetscTruth flag;
432:     const char *global[] =
433:         { "multilevel-kl", "spectral", "linear", "random", "scattered" };
434:     const char *local[] = { "kernighan-lin", "none" };
435:     const char *eigen[] = { "lanczos", "rqi_symmlq" };

438:     PetscOptionsHead("Set Chaco partitioning options");

440:     PetscOptionsEList("-mat_partitioning_chaco_global",
441:         "Global method to use", "MatPartitioningChacoSetGlobal", global, 5,
442:         global[0], &i, &flag);
443:     if (flag)
444:         MatPartitioningChacoSetGlobal(part, (MPChacoGlobalType)i);

446:     PetscOptionsEList("-mat_partitioning_chaco_local",
447:         "Local method to use", "MatPartitioningChacoSetLocal", local, 2,
448:         local[0], &i, &flag);
449:     if (flag)
450:         MatPartitioningChacoSetLocal(part, (MPChacoLocalType)i);

452:     PetscOptionsReal("-mat_partitioning_chaco_coarse_level",
453:         "Coarse level", "MatPartitioningChacoSetCoarseLevel", 0, &r,
454:         &flag);
455:     if (flag)
456:         MatPartitioningChacoSetCoarseLevel(part, r);

458:     PetscOptionsEList("-mat_partitioning_chaco_eigen_solver",
459:         "Eigensolver to use in spectral method", "MatPartitioningChacoSetEigenSolver",
460:         eigen, 2, eigen[0], &i, &flag);
461:     if (flag)
462:         MatPartitioningChacoSetEigenSolver(part, (MPChacoEigenType)i);

464:     PetscOptionsReal("-mat_partitioning_chaco_eigen_tol",
465:         "Tolerance for eigensolver", "MatPartitioningChacoSetEigenTol", 0.001,
466:         &r, &flag);
467:     if (flag)
468:         MatPartitioningChacoSetEigenTol(part, r);

470:     PetscOptionsInt("-mat_partitioning_chaco_eigen_number",
471:         "Number of eigenvectors: 1, 2, or 3 (bi-, quadri-, or octosection)",
472:         "MatPartitioningChacoSetEigenNumber", 1, &i, &flag);
473:     if (flag)
474:         MatPartitioningChacoSetEigenNumber(part, i);

476:     PetscOptionsTail();
477:     return(0);
478: }


483: PetscErrorCode MatPartitioningDestroy_Chaco(MatPartitioning part)
484: {
485:     MatPartitioning_Chaco *chaco = (MatPartitioning_Chaco *) part->data;


490:     if (chaco->mesg_log) {
491:         PetscFree(chaco->mesg_log);
492:     }

494:     PetscFree(chaco);

496:     return(0);
497: }

502: PetscErrorCode MatPartitioningCreate_Chaco(MatPartitioning part)
503: {
505:     MatPartitioning_Chaco *chaco;

508:     PetscNew(MatPartitioning_Chaco, &chaco);

510:     chaco->architecture = 1;
511:     chaco->ndims_tot = 0;
512:     chaco->mesh_dims[0] = part->n;
513:     chaco->global_method = 1;
514:     chaco->local_method = 1;
515:     chaco->rqi_flag = 0;
516:     chaco->nbvtxcoarsed = 200;
517:     chaco->numbereigen = 1;
518:     chaco->eigtol = 0.001;
519:     chaco->mesg_log = NULL;

521:     part->ops->apply = MatPartitioningApply_Chaco;
522:     part->ops->view = MatPartitioningView_Chaco;
523:     part->ops->destroy = MatPartitioningDestroy_Chaco;
524:     part->ops->setfromoptions = MatPartitioningSetFromOptions_Chaco;
525:     part->data = (void*) chaco;

527:     return(0);
528: }