Actual source code: iscoloring.c
petsc-dev 2014-02-02
2: #include <petsc-private/isimpl.h> /*I "petscis.h" I*/
3: #include <petscviewer.h>
5: const char *const ISColoringTypes[] = {"global","ghosted","ISColoringType","IS_COLORING_",0};
9: PetscErrorCode ISColoringReference(ISColoring coloring)
10: {
12: (coloring)->refct++;
13: return(0);
14: }
18: PetscErrorCode ISColoringSetType(ISColoring coloring,ISColoringType type)
19: {
21: (coloring)->ctype = type;
22: return(0);
23: }
27: /*@
28: ISColoringDestroy - Destroys a coloring context.
30: Collective on ISColoring
32: Input Parameter:
33: . iscoloring - the coloring context
35: Level: advanced
37: .seealso: ISColoringView(), MatColoring
38: @*/
39: PetscErrorCode ISColoringDestroy(ISColoring *iscoloring)
40: {
41: PetscInt i;
45: if (!*iscoloring) return(0);
47: if (--(*iscoloring)->refct > 0) {*iscoloring = 0; return(0);}
49: if ((*iscoloring)->is) {
50: for (i=0; i<(*iscoloring)->n; i++) {
51: ISDestroy(&(*iscoloring)->is[i]);
52: }
53: PetscFree((*iscoloring)->is);
54: }
55: PetscFree((*iscoloring)->colors);
56: PetscCommDestroy(&(*iscoloring)->comm);
57: PetscFree((*iscoloring));
58: return(0);
59: }
63: /*
64: ISColoringViewFromOptions - Processes command line options to determine if/how an ISColoring object is to be viewed.
66: Collective on ISColoring
68: Input Parameters:
69: + obj - the ISColoring object
70: . prefix - prefix to use for viewing, or NULL to use prefix of 'mat'
71: - optionname - option to activate viewing
73: Level: intermediate
75: Developer Note: This cannot use PetscObjectViewFromOptions() because ISColoring is not a PetscObject
77: */
78: PetscErrorCode ISColoringViewFromOptions(ISColoring obj,const char prefix[],const char optionname[])
79: {
80: PetscErrorCode ierr;
81: PetscViewer viewer;
82: PetscBool flg;
83: static PetscBool incall = PETSC_FALSE;
84: PetscViewerFormat format;
87: if (incall) return(0);
88: incall = PETSC_TRUE;
89: PetscOptionsGetViewer(obj->comm,prefix,optionname,&viewer,&format,&flg);
90: if (flg) {
91: PetscViewerPushFormat(viewer,format);
92: ISColoringView(obj,viewer);
93: PetscViewerPopFormat(viewer);
94: PetscViewerDestroy(&viewer);
95: }
96: incall = PETSC_FALSE;
97: return(0);
98: }
102: /*@C
103: ISColoringView - Views a coloring context.
105: Collective on ISColoring
107: Input Parameters:
108: + iscoloring - the coloring context
109: - viewer - the viewer
111: Level: advanced
113: .seealso: ISColoringDestroy(), ISColoringGetIS(), MatColoring
114: @*/
115: PetscErrorCode ISColoringView(ISColoring iscoloring,PetscViewer viewer)
116: {
117: PetscInt i;
119: PetscBool iascii;
120: IS *is;
124: if (!viewer) {
125: PetscViewerASCIIGetStdout(iscoloring->comm,&viewer);
126: }
129: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
130: if (iascii) {
131: MPI_Comm comm;
132: PetscMPIInt rank;
133: PetscObjectGetComm((PetscObject)viewer,&comm);
134: MPI_Comm_rank(comm,&rank);
135: PetscViewerASCIISynchronizedAllow(viewer,PETSC_TRUE);
136: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Number of colors %d\n",rank,iscoloring->n);
137: PetscViewerFlush(viewer);
138: PetscViewerASCIISynchronizedAllow(viewer,PETSC_FALSE);
139: } else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Viewer type %s not supported for ISColoring",((PetscObject)viewer)->type_name);
141: ISColoringGetIS(iscoloring,PETSC_IGNORE,&is);
142: for (i=0; i<iscoloring->n; i++) {
143: ISView(iscoloring->is[i],viewer);
144: }
145: ISColoringRestoreIS(iscoloring,&is);
146: return(0);
147: }
151: /*@C
152: ISColoringGetIS - Extracts index sets from the coloring context
154: Collective on ISColoring
156: Input Parameter:
157: . iscoloring - the coloring context
159: Output Parameters:
160: + nn - number of index sets in the coloring context
161: - is - array of index sets
163: Level: advanced
165: .seealso: ISColoringRestoreIS(), ISColoringView()
166: @*/
167: PetscErrorCode ISColoringGetIS(ISColoring iscoloring,PetscInt *nn,IS *isis[])
168: {
174: if (nn) *nn = iscoloring->n;
175: if (isis) {
176: if (!iscoloring->is) {
177: PetscInt *mcolors,**ii,nc = iscoloring->n,i,base, n = iscoloring->N;
178: ISColoringValue *colors = iscoloring->colors;
179: IS *is;
181: #if defined(PETSC_USE_DEBUG)
182: for (i=0; i<n; i++) {
183: if (((PetscInt)colors[i]) >= nc) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Coloring is our of range index %d value %d number colors %d",(int)i,(int)colors[i],(int)nc);
184: }
185: #endif
187: /* generate the lists of nodes for each color */
188: PetscCalloc1(nc,&mcolors);
189: for (i=0; i<n; i++) mcolors[colors[i]]++;
191: PetscMalloc1(nc,&ii);
192: PetscMalloc1(n,&ii[0]);
193: for (i=1; i<nc; i++) ii[i] = ii[i-1] + mcolors[i-1];
194: PetscMemzero(mcolors,nc*sizeof(PetscInt));
196: if (iscoloring->ctype == IS_COLORING_GLOBAL) {
197: MPI_Scan(&iscoloring->N,&base,1,MPIU_INT,MPI_SUM,iscoloring->comm);
198: base -= iscoloring->N;
199: for (i=0; i<n; i++) ii[colors[i]][mcolors[colors[i]]++] = i + base; /* global idx */
200: } else if (iscoloring->ctype == IS_COLORING_GHOSTED) {
201: for (i=0; i<n; i++) ii[colors[i]][mcolors[colors[i]]++] = i; /* local idx */
202: } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not provided for this ISColoringType type");
204: PetscMalloc1(nc,&is);
205: for (i=0; i<nc; i++) {
206: ISCreateGeneral(iscoloring->comm,mcolors[i],ii[i],PETSC_COPY_VALUES,is+i);
207: }
209: iscoloring->is = is;
210: PetscFree(ii[0]);
211: PetscFree(ii);
212: PetscFree(mcolors);
213: }
214: *isis = iscoloring->is;
215: }
216: return(0);
217: }
221: /*@C
222: ISColoringRestoreIS - Restores the index sets extracted from the coloring context
224: Collective on ISColoring
226: Input Parameter:
227: + iscoloring - the coloring context
228: - is - array of index sets
230: Level: advanced
232: .seealso: ISColoringGetIS(), ISColoringView()
233: @*/
234: PetscErrorCode ISColoringRestoreIS(ISColoring iscoloring,IS *is[])
235: {
239: /* currently nothing is done here */
240: return(0);
241: }
246: /*@C
247: ISColoringCreate - Generates an ISColoring context from lists (provided
248: by each processor) of colors for each node.
250: Collective on MPI_Comm
252: Input Parameters:
253: + comm - communicator for the processors creating the coloring
254: . ncolors - max color value
255: . n - number of nodes on this processor
256: - colors - array containing the colors for this processor, color
257: numbers begin at 0. In C/C++ this array must have been obtained with PetscMalloc()
258: and should NOT be freed (The ISColoringDestroy() will free it).
260: Output Parameter:
261: . iscoloring - the resulting coloring data structure
263: Options Database Key:
264: . -is_coloring_view - Activates ISColoringView()
266: Level: advanced
268: Notes: By default sets coloring type to IS_COLORING_GLOBAL
270: .seealso: MatColoringCreate(), ISColoringView(), ISColoringDestroy(), ISColoringSetType()
272: @*/
273: PetscErrorCode ISColoringCreate(MPI_Comm comm,PetscInt ncolors,PetscInt n,const ISColoringValue colors[],ISColoring *iscoloring)
274: {
276: PetscMPIInt size,rank,tag;
277: PetscInt base,top,i;
278: PetscInt nc,ncwork;
279: MPI_Status status;
282: if (ncolors != PETSC_DECIDE && ncolors > IS_COLORING_MAX) {
283: if (ncolors > 65535) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Max color value exeeds 65535 limit. This number is unrealistic. Perhaps a bug in code?\nCurrent max: %d user rewuested: %d",IS_COLORING_MAX,ncolors);
284: else SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Max color value exeeds limit. Perhaps reconfigure PETSc with --with-is-color-value-type=short?\n Current max: %d user rewuested: %d",IS_COLORING_MAX,ncolors);
285: }
286: PetscNew(iscoloring);
287: PetscCommDuplicate(comm,&(*iscoloring)->comm,&tag);
288: comm = (*iscoloring)->comm;
290: /* compute the number of the first node on my processor */
291: MPI_Comm_size(comm,&size);
293: /* should use MPI_Scan() */
294: MPI_Comm_rank(comm,&rank);
295: if (!rank) {
296: base = 0;
297: top = n;
298: } else {
299: MPI_Recv(&base,1,MPIU_INT,rank-1,tag,comm,&status);
300: top = base+n;
301: }
302: if (rank < size-1) {
303: MPI_Send(&top,1,MPIU_INT,rank+1,tag,comm);
304: }
306: /* compute the total number of colors */
307: ncwork = 0;
308: for (i=0; i<n; i++) {
309: if (ncwork < colors[i]) ncwork = colors[i];
310: }
311: ncwork++;
312: MPI_Allreduce(&ncwork,&nc,1,MPIU_INT,MPI_MAX,comm);
313: if (nc > ncolors) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of colors passed in %D is less then the actual number of colors in array %D",ncolors,nc);
314: (*iscoloring)->n = nc;
315: (*iscoloring)->is = 0;
316: (*iscoloring)->colors = (ISColoringValue*)colors;
317: (*iscoloring)->N = n;
318: (*iscoloring)->refct = 1;
319: (*iscoloring)->ctype = IS_COLORING_GLOBAL;
320: ISColoringViewFromOptions(*iscoloring,NULL,"-is_coloring_view");
321: PetscInfo1(0,"Number of colors %D\n",nc);
322: return(0);
323: }
327: /*@
328: ISPartitioningToNumbering - Takes an ISPartitioning and on each processor
329: generates an IS that contains a new global node number for each index based
330: on the partitioing.
332: Collective on IS
334: Input Parameters
335: . partitioning - a partitioning as generated by MatPartitioningApply()
337: Output Parameter:
338: . is - on each processor the index set that defines the global numbers
339: (in the new numbering) for all the nodes currently (before the partitioning)
340: on that processor
342: Level: advanced
344: .seealso: MatPartitioningCreate(), AOCreateBasic(), ISPartitioningCount()
346: @*/
347: PetscErrorCode ISPartitioningToNumbering(IS part,IS *is)
348: {
349: MPI_Comm comm;
350: PetscInt i,np,npt,n,*starts = NULL,*sums = NULL,*lsizes = NULL,*newi = NULL;
351: const PetscInt *indices = NULL;
355: PetscObjectGetComm((PetscObject)part,&comm);
357: /* count the number of partitions, i.e., virtual processors */
358: ISGetLocalSize(part,&n);
359: ISGetIndices(part,&indices);
360: np = 0;
361: for (i=0; i<n; i++) np = PetscMax(np,indices[i]);
362: MPI_Allreduce(&np,&npt,1,MPIU_INT,MPI_MAX,comm);
363: np = npt+1; /* so that it looks like a MPI_Comm_size output */
365: /*
366: lsizes - number of elements of each partition on this particular processor
367: sums - total number of "previous" nodes for any particular partition
368: starts - global number of first element in each partition on this processor
369: */
370: PetscMalloc3(np,&lsizes,np,&starts,np,&sums);
371: PetscMemzero(lsizes,np*sizeof(PetscInt));
372: for (i=0; i<n; i++) lsizes[indices[i]]++;
373: MPI_Allreduce(lsizes,sums,np,MPIU_INT,MPI_SUM,comm);
374: MPI_Scan(lsizes,starts,np,MPIU_INT,MPI_SUM,comm);
375: for (i=0; i<np; i++) starts[i] -= lsizes[i];
376: for (i=1; i<np; i++) {
377: sums[i] += sums[i-1];
378: starts[i] += sums[i-1];
379: }
381: /*
382: For each local index give it the new global number
383: */
384: PetscMalloc1(n,&newi);
385: for (i=0; i<n; i++) newi[i] = starts[indices[i]]++;
386: PetscFree3(lsizes,starts,sums);
388: ISRestoreIndices(part,&indices);
389: ISCreateGeneral(comm,n,newi,PETSC_OWN_POINTER,is);
390: ISSetPermutation(*is);
391: return(0);
392: }
396: /*@
397: ISPartitioningCount - Takes a ISPartitioning and determines the number of
398: resulting elements on each (partition) process
400: Collective on IS
402: Input Parameters:
403: + partitioning - a partitioning as generated by MatPartitioningApply()
404: - len - length of the array count, this is the total number of partitions
406: Output Parameter:
407: . count - array of length size, to contain the number of elements assigned
408: to each partition, where size is the number of partitions generated
409: (see notes below).
411: Level: advanced
413: Notes:
414: By default the number of partitions generated (and thus the length
415: of count) is the size of the communicator associated with IS,
416: but it can be set by MatPartitioningSetNParts. The resulting array
417: of lengths can for instance serve as input of PCBJacobiSetTotalBlocks.
420: .seealso: MatPartitioningCreate(), AOCreateBasic(), ISPartitioningToNumbering(),
421: MatPartitioningSetNParts(), MatPartitioningApply()
423: @*/
424: PetscErrorCode ISPartitioningCount(IS part,PetscInt len,PetscInt count[])
425: {
426: MPI_Comm comm;
427: PetscInt i,n,*lsizes;
428: const PetscInt *indices;
430: PetscMPIInt npp;
433: PetscObjectGetComm((PetscObject)part,&comm);
434: if (len == PETSC_DEFAULT) {
435: PetscMPIInt size;
436: MPI_Comm_size(comm,&size);
437: len = (PetscInt) size;
438: }
440: /* count the number of partitions */
441: ISGetLocalSize(part,&n);
442: ISGetIndices(part,&indices);
443: #if defined(PETSC_USE_DEBUG)
444: {
445: PetscInt np = 0,npt;
446: for (i=0; i<n; i++) np = PetscMax(np,indices[i]);
447: MPI_Allreduce(&np,&npt,1,MPIU_INT,MPI_MAX,comm);
448: np = npt+1; /* so that it looks like a MPI_Comm_size output */
449: if (np > len) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Length of count array %D is less than number of partitions %D",len,np);
450: }
451: #endif
453: /*
454: lsizes - number of elements of each partition on this particular processor
455: sums - total number of "previous" nodes for any particular partition
456: starts - global number of first element in each partition on this processor
457: */
458: PetscCalloc1(len,&lsizes);
459: for (i=0; i<n; i++) lsizes[indices[i]]++;
460: ISRestoreIndices(part,&indices);
461: PetscMPIIntCast(len,&npp);
462: MPI_Allreduce(lsizes,count,npp,MPIU_INT,MPI_SUM,comm);
463: PetscFree(lsizes);
464: return(0);
465: }
469: /*@
470: ISAllGather - Given an index set (IS) on each processor, generates a large
471: index set (same on each processor) by concatenating together each
472: processors index set.
474: Collective on IS
476: Input Parameter:
477: . is - the distributed index set
479: Output Parameter:
480: . isout - the concatenated index set (same on all processors)
482: Notes:
483: ISAllGather() is clearly not scalable for large index sets.
485: The IS created on each processor must be created with a common
486: communicator (e.g., PETSC_COMM_WORLD). If the index sets were created
487: with PETSC_COMM_SELF, this routine will not work as expected, since
488: each process will generate its own new IS that consists only of
489: itself.
491: The communicator for this new IS is PETSC_COMM_SELF
493: Level: intermediate
495: Concepts: gather^index sets
496: Concepts: index sets^gathering to all processors
497: Concepts: IS^gathering to all processors
499: .seealso: ISCreateGeneral(), ISCreateStride(), ISCreateBlock()
500: @*/
501: PetscErrorCode ISAllGather(IS is,IS *isout)
502: {
504: PetscInt *indices,n,i,N,step,first;
505: const PetscInt *lindices;
506: MPI_Comm comm;
507: PetscMPIInt size,*sizes = NULL,*offsets = NULL,nn;
508: PetscBool stride;
514: PetscObjectGetComm((PetscObject)is,&comm);
515: MPI_Comm_size(comm,&size);
516: ISGetLocalSize(is,&n);
517: PetscObjectTypeCompare((PetscObject)is,ISSTRIDE,&stride);
518: if (size == 1 && stride) { /* should handle parallel ISStride also */
519: ISStrideGetInfo(is,&first,&step);
520: ISCreateStride(PETSC_COMM_SELF,n,first,step,isout);
521: } else {
522: PetscMalloc2(size,&sizes,size,&offsets);
524: PetscMPIIntCast(n,&nn);
525: MPI_Allgather(&nn,1,MPI_INT,sizes,1,MPI_INT,comm);
526: offsets[0] = 0;
527: for (i=1; i<size; i++) offsets[i] = offsets[i-1] + sizes[i-1];
528: N = offsets[size-1] + sizes[size-1];
530: PetscMalloc1(N,&indices);
531: ISGetIndices(is,&lindices);
532: MPI_Allgatherv((void*)lindices,nn,MPIU_INT,indices,sizes,offsets,MPIU_INT,comm);
533: ISRestoreIndices(is,&lindices);
534: PetscFree2(sizes,offsets);
536: ISCreateGeneral(PETSC_COMM_SELF,N,indices,PETSC_OWN_POINTER,isout);
537: }
538: return(0);
539: }
543: /*@C
544: ISAllGatherColors - Given a a set of colors on each processor, generates a large
545: set (same on each processor) by concatenating together each processors colors
547: Collective on MPI_Comm
549: Input Parameter:
550: + comm - communicator to share the indices
551: . n - local size of set
552: - lindices - local colors
554: Output Parameter:
555: + outN - total number of indices
556: - outindices - all of the colors
558: Notes:
559: ISAllGatherColors() is clearly not scalable for large index sets.
562: Level: intermediate
564: Concepts: gather^index sets
565: Concepts: index sets^gathering to all processors
566: Concepts: IS^gathering to all processors
568: .seealso: ISCreateGeneral(), ISCreateStride(), ISCreateBlock(), ISAllGather()
569: @*/
570: PetscErrorCode ISAllGatherColors(MPI_Comm comm,PetscInt n,ISColoringValue *lindices,PetscInt *outN,ISColoringValue *outindices[])
571: {
572: ISColoringValue *indices;
573: PetscErrorCode ierr;
574: PetscInt i,N;
575: PetscMPIInt size,*offsets = NULL,*sizes = NULL, nn = n;
578: MPI_Comm_size(comm,&size);
579: PetscMalloc2(size,&sizes,size,&offsets);
581: MPI_Allgather(&nn,1,MPI_INT,sizes,1,MPI_INT,comm);
582: offsets[0] = 0;
583: for (i=1; i<size; i++) offsets[i] = offsets[i-1] + sizes[i-1];
584: N = offsets[size-1] + sizes[size-1];
585: PetscFree2(sizes,offsets);
587: PetscMalloc1((N+1),&indices);
588: MPI_Allgatherv(lindices,(PetscMPIInt)n,MPIU_COLORING_VALUE,indices,sizes,offsets,MPIU_COLORING_VALUE,comm);
590: *outindices = indices;
591: if (outN) *outN = N;
592: return(0);
593: }
597: /*@
598: ISComplement - Given an index set (IS) generates the complement index set. That is all
599: all indices that are NOT in the given set.
601: Collective on IS
603: Input Parameter:
604: + is - the index set
605: . nmin - the first index desired in the local part of the complement
606: - nmax - the largest index desired in the local part of the complement (note that all indices in is must be greater or equal to nmin and less than nmax)
608: Output Parameter:
609: . isout - the complement
611: Notes: The communicator for this new IS is the same as for the input IS
613: For a parallel IS, this will generate the local part of the complement on each process
615: To generate the entire complement (on each process) of a parallel IS, first call ISAllGather() and then
616: call this routine.
618: Level: intermediate
620: Concepts: gather^index sets
621: Concepts: index sets^gathering to all processors
622: Concepts: IS^gathering to all processors
624: .seealso: ISCreateGeneral(), ISCreateStride(), ISCreateBlock(), ISAllGather()
625: @*/
626: PetscErrorCode ISComplement(IS is,PetscInt nmin,PetscInt nmax,IS *isout)
627: {
629: const PetscInt *indices;
630: PetscInt n,i,j,unique,cnt,*nindices;
631: PetscBool sorted;
636: if (nmin < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"nmin %D cannot be negative",nmin);
637: if (nmin > nmax) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"nmin %D cannot be greater than nmax %D",nmin,nmax);
638: ISSorted(is,&sorted);
639: if (!sorted) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Index set must be sorted");
641: ISGetLocalSize(is,&n);
642: ISGetIndices(is,&indices);
643: #if defined(PETSC_USE_DEBUG)
644: for (i=0; i<n; i++) {
645: if (indices[i] < nmin) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Index %D's value %D is smaller than minimum given %D",i,indices[i],nmin);
646: if (indices[i] >= nmax) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Index %D's value %D is larger than maximum given %D",i,indices[i],nmax);
647: }
648: #endif
649: /* Count number of unique entries */
650: unique = (n>0);
651: for (i=0; i<n-1; i++) {
652: if (indices[i+1] != indices[i]) unique++;
653: }
654: PetscMalloc1((nmax-nmin-unique),&nindices);
655: cnt = 0;
656: for (i=nmin,j=0; i<nmax; i++) {
657: if (j<n && i==indices[j]) do { j++; } while (j<n && i==indices[j]);
658: else nindices[cnt++] = i;
659: }
660: if (cnt != nmax-nmin-unique) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Number of entries found in complement %D does not match expected %D",cnt,nmax-nmin-unique);
661: ISCreateGeneral(PetscObjectComm((PetscObject)is),cnt,nindices,PETSC_OWN_POINTER,isout);
662: ISRestoreIndices(is,&indices);
663: return(0);
664: }