Actual source code: isltog.c
petsc-dev 2014-02-02
2: #include <petsc-private/isimpl.h> /*I "petscis.h" I*/
3: #include <petscsf.h>
4: #include <petscviewer.h>
6: PetscClassId IS_LTOGM_CLASSID;
11: PetscErrorCode ISG2LMapApply(ISLocalToGlobalMapping mapping,PetscInt n,const PetscInt in[],PetscInt out[])
12: {
14: PetscInt i,start,end;
17: if (!mapping->globals) {
18: ISGlobalToLocalMappingApply(mapping,IS_GTOLM_MASK,0,0,0,0);
19: }
20: start = mapping->globalstart;
21: end = mapping->globalend;
22: for (i=0; i<n; i++) {
23: if (in[i] < 0) out[i] = in[i];
24: else if (in[i] < start) out[i] = -1;
25: else if (in[i] > end) out[i] = -1;
26: else out[i] = mapping->globals[in[i] - start];
27: }
28: return(0);
29: }
34: /*@C
35: ISLocalToGlobalMappingGetSize - Gets the local size of a local to global mapping.
37: Not Collective
39: Input Parameter:
40: . ltog - local to global mapping
42: Output Parameter:
43: . n - the number of entries in the local mapping
45: Level: advanced
47: Concepts: mapping^local to global
49: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
50: @*/
51: PetscErrorCode ISLocalToGlobalMappingGetSize(ISLocalToGlobalMapping mapping,PetscInt *n)
52: {
56: *n = mapping->n;
57: return(0);
58: }
62: /*@C
63: ISLocalToGlobalMappingView - View a local to global mapping
65: Not Collective
67: Input Parameters:
68: + ltog - local to global mapping
69: - viewer - viewer
71: Level: advanced
73: Concepts: mapping^local to global
75: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
76: @*/
77: PetscErrorCode ISLocalToGlobalMappingView(ISLocalToGlobalMapping mapping,PetscViewer viewer)
78: {
79: PetscInt i;
80: PetscMPIInt rank;
81: PetscBool iascii;
86: if (!viewer) {
87: PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)mapping),&viewer);
88: }
91: MPI_Comm_rank(PetscObjectComm((PetscObject)mapping),&rank);
92: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
93: if (iascii) {
94: PetscViewerASCIISynchronizedAllow(viewer,PETSC_TRUE);
95: for (i=0; i<mapping->n; i++) {
96: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] %D %D\n",rank,i,mapping->indices[i]);
97: }
98: PetscViewerFlush(viewer);
99: PetscViewerASCIISynchronizedAllow(viewer,PETSC_FALSE);
100: } else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Viewer type %s not supported for ISLocalToGlobalMapping",((PetscObject)viewer)->type_name);
101: return(0);
102: }
106: /*@
107: ISLocalToGlobalMappingCreateIS - Creates a mapping between a local (0 to n)
108: ordering and a global parallel ordering.
110: Not collective
112: Input Parameter:
113: . is - index set containing the global numbers for each local number
115: Output Parameter:
116: . mapping - new mapping data structure
118: Level: advanced
120: Concepts: mapping^local to global
122: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
123: @*/
124: PetscErrorCode ISLocalToGlobalMappingCreateIS(IS is,ISLocalToGlobalMapping *mapping)
125: {
127: PetscInt n,bs;
128: const PetscInt *indices;
129: MPI_Comm comm;
130: PetscBool isblock;
136: PetscObjectGetComm((PetscObject)is,&comm);
137: ISGetLocalSize(is,&n);
138: ISGetIndices(is,&indices);
139: PetscObjectTypeCompare((PetscObject)is,ISBLOCK,&isblock);
140: ISGetBlockSize(is,&bs);
141: if (!isblock || bs == 1) {
142: ISLocalToGlobalMappingCreate(comm,n,indices,PETSC_COPY_VALUES,mapping);
143: ISRestoreIndices(is,&indices);
144: } else {
145: ISLocalToGlobalMappingCreate(comm,n,indices,PETSC_OWN_POINTER,mapping);
146: }
147: return(0);
148: }
152: /*@C
153: ISLocalToGlobalMappingCreateSF - Creates a mapping between a local (0 to n)
154: ordering and a global parallel ordering.
156: Collective
158: Input Parameter:
159: + sf - star forest mapping contiguous local indices to (rank, offset)
160: - start - first global index on this process
162: Output Parameter:
163: . mapping - new mapping data structure
165: Level: advanced
167: Concepts: mapping^local to global
169: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate(), ISLocalToGlobalMappingCreateIS()
170: @*/
171: PetscErrorCode ISLocalToGlobalMappingCreateSF(PetscSF sf,PetscInt start,ISLocalToGlobalMapping *mapping)
172: {
174: PetscInt i,maxlocal,nroots,nleaves,*globals,*ltog;
175: const PetscInt *ilocal;
176: MPI_Comm comm;
182: PetscObjectGetComm((PetscObject)sf,&comm);
183: PetscSFGetGraph(sf,&nroots,&nleaves,&ilocal,NULL);
184: if (ilocal) {
185: for (i=0,maxlocal=0; i<nleaves; i++) maxlocal = PetscMax(maxlocal,ilocal[i]+1);
186: }
187: else maxlocal = nleaves;
188: PetscMalloc1(nroots,&globals);
189: PetscMalloc1(maxlocal,<og);
190: for (i=0; i<nroots; i++) globals[i] = start + i;
191: for (i=0; i<maxlocal; i++) ltog[i] = -1;
192: PetscSFBcastBegin(sf,MPIU_INT,globals,ltog);
193: PetscSFBcastEnd(sf,MPIU_INT,globals,ltog);
194: ISLocalToGlobalMappingCreate(comm,maxlocal,ltog,PETSC_OWN_POINTER,mapping);
195: PetscFree(globals);
196: return(0);
197: }
201: /*@
202: ISLocalToGlobalMappingCreate - Creates a mapping between a local (0 to n)
203: ordering and a global parallel ordering.
205: Not Collective, but communicator may have more than one process
207: Input Parameters:
208: + comm - MPI communicator
209: . n - the number of local elements
210: . indices - the global index for each local element, these do not need to be in increasing order (sorted)
211: - mode - see PetscCopyMode
213: Output Parameter:
214: . mapping - new mapping data structure
216: Level: advanced
218: Concepts: mapping^local to global
220: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS()
221: @*/
222: PetscErrorCode ISLocalToGlobalMappingCreate(MPI_Comm cm,PetscInt n,const PetscInt indices[],PetscCopyMode mode,ISLocalToGlobalMapping *mapping)
223: {
225: PetscInt *in;
231: *mapping = NULL;
232: ISInitializePackage();
234: PetscHeaderCreate(*mapping,_p_ISLocalToGlobalMapping,int,IS_LTOGM_CLASSID,"ISLocalToGlobalMapping","Local to global mapping","IS",
235: cm,ISLocalToGlobalMappingDestroy,ISLocalToGlobalMappingView);
236: (*mapping)->n = n;
237: /*
238: Do not create the global to local mapping. This is only created if
239: ISGlobalToLocalMapping() is called
240: */
241: (*mapping)->globals = 0;
242: if (mode == PETSC_COPY_VALUES) {
243: PetscMalloc1(n,&in);
244: PetscMemcpy(in,indices,n*sizeof(PetscInt));
245: PetscLogObjectMemory((PetscObject)*mapping,n*sizeof(PetscInt));
246: (*mapping)->indices = in;
247: } else if (mode == PETSC_OWN_POINTER) (*mapping)->indices = (PetscInt*)indices;
248: else SETERRQ(cm,PETSC_ERR_SUP,"Cannot currently use PETSC_USE_POINTER");
249: return(0);
250: }
254: /*@
255: ISLocalToGlobalMappingBlock - Creates a blocked index version of an
256: ISLocalToGlobalMapping that is appropriate for MatSetLocalToGlobalMappingBlock()
257: and VecSetLocalToGlobalMappingBlock().
259: Not Collective, but communicator may have more than one process
261: Input Parameters:
262: + inmap - original point-wise mapping
263: - bs - block size
265: Output Parameter:
266: . outmap - block based mapping; the indices are relative to BLOCKS, not individual vector or matrix entries.
268: Level: advanced
270: Concepts: mapping^local to global
272: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate(), ISLocalToGlobalMappingCreateIS()
273: @*/
274: PetscErrorCode ISLocalToGlobalMappingBlock(ISLocalToGlobalMapping inmap,PetscInt bs,ISLocalToGlobalMapping *outmap)
275: {
277: PetscInt *ii,i,n;
282: if (bs > 1) {
283: n = inmap->n/bs;
284: if (n*bs != inmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Pointwise mapping length is not divisible by block size");
285: PetscMalloc1(n,&ii);
286: for (i=0; i<n; i++) ii[i] = inmap->indices[bs*i]/bs;
287: ISLocalToGlobalMappingCreate(PetscObjectComm((PetscObject)inmap),n,ii,PETSC_OWN_POINTER,outmap);
288: } else {
289: PetscObjectReference((PetscObject)inmap);
290: *outmap = inmap;
291: }
292: return(0);
293: }
297: /*@
298: ISLocalToGlobalMappingUnBlock - Creates a scalar index version of a blocked
299: ISLocalToGlobalMapping
301: Not Collective, but communicator may have more than one process
303: Input Parameter:
304: + inmap - block based mapping; the indices are relative to BLOCKS, not individual vector or matrix entries.
305: - bs - block size
307: Output Parameter:
308: . outmap - pointwise mapping
310: Level: advanced
312: Concepts: mapping^local to global
314: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate(), ISLocalToGlobalMappingBlock()
315: @*/
316: PetscErrorCode ISLocalToGlobalMappingUnBlock(ISLocalToGlobalMapping inmap,PetscInt bs,ISLocalToGlobalMapping *outmap)
317: {
319: PetscInt *ii,i,n;
324: if (bs > 1) {
325: n = inmap->n*bs;
326: PetscMalloc1(n,&ii);
327: for (i=0; i<n; i++) ii[i] = inmap->indices[i/bs]*bs + (i%bs);
328: ISLocalToGlobalMappingCreate(PetscObjectComm((PetscObject)inmap),n,ii,PETSC_OWN_POINTER,outmap);
329: } else {
330: PetscObjectReference((PetscObject)inmap);
331: *outmap = inmap;
332: }
333: return(0);
334: }
338: /*@
339: ISLocalToGlobalMappingDestroy - Destroys a mapping between a local (0 to n)
340: ordering and a global parallel ordering.
342: Note Collective
344: Input Parameters:
345: . mapping - mapping data structure
347: Level: advanced
349: .seealso: ISLocalToGlobalMappingCreate()
350: @*/
351: PetscErrorCode ISLocalToGlobalMappingDestroy(ISLocalToGlobalMapping *mapping)
352: {
356: if (!*mapping) return(0);
358: if (--((PetscObject)(*mapping))->refct > 0) {*mapping = 0;return(0);}
359: PetscFree((*mapping)->indices);
360: PetscFree((*mapping)->globals);
361: PetscHeaderDestroy(mapping);
362: *mapping = 0;
363: return(0);
364: }
368: /*@
369: ISLocalToGlobalMappingApplyIS - Creates from an IS in the local numbering
370: a new index set using the global numbering defined in an ISLocalToGlobalMapping
371: context.
373: Not collective
375: Input Parameters:
376: + mapping - mapping between local and global numbering
377: - is - index set in local numbering
379: Output Parameters:
380: . newis - index set in global numbering
382: Level: advanced
384: Concepts: mapping^local to global
386: .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(),
387: ISLocalToGlobalMappingDestroy(), ISGlobalToLocalMappingApply()
388: @*/
389: PetscErrorCode ISLocalToGlobalMappingApplyIS(ISLocalToGlobalMapping mapping,IS is,IS *newis)
390: {
392: PetscInt n,i,*idxmap,*idxout,Nmax = mapping->n;
393: const PetscInt *idxin;
400: ISGetLocalSize(is,&n);
401: ISGetIndices(is,&idxin);
402: idxmap = mapping->indices;
404: PetscMalloc1(n,&idxout);
405: for (i=0; i<n; i++) {
406: if (idxin[i] >= Nmax) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Local index %D too large %D (max) at %D",idxin[i],Nmax-1,i);
407: idxout[i] = idxmap[idxin[i]];
408: }
409: ISRestoreIndices(is,&idxin);
410: ISCreateGeneral(PETSC_COMM_SELF,n,idxout,PETSC_OWN_POINTER,newis);
411: return(0);
412: }
416: /*@
417: ISLocalToGlobalMappingApply - Takes a list of integers in a local numbering
418: and converts them to the global numbering.
420: Not collective
422: Input Parameters:
423: + mapping - the local to global mapping context
424: . N - number of integers
425: - in - input indices in local numbering
427: Output Parameter:
428: . out - indices in global numbering
430: Notes:
431: The in and out array parameters may be identical.
433: Level: advanced
435: .seealso: ISLocalToGlobalMappingCreate(),ISLocalToGlobalMappingDestroy(),
436: ISLocalToGlobalMappingApplyIS(),AOCreateBasic(),AOApplicationToPetsc(),
437: AOPetscToApplication(), ISGlobalToLocalMappingApply()
439: Concepts: mapping^local to global
440: @*/
441: PetscErrorCode ISLocalToGlobalMappingApply(ISLocalToGlobalMapping mapping,PetscInt N,const PetscInt in[],PetscInt out[])
442: {
443: PetscInt i,Nmax = mapping->n;
444: const PetscInt *idx = mapping->indices;
447: for (i=0; i<N; i++) {
448: if (in[i] < 0) {
449: out[i] = in[i];
450: continue;
451: }
452: if (in[i] >= Nmax) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Local index %D too large %D (max) at %D",in[i],Nmax,i);
453: out[i] = idx[in[i]];
454: }
455: return(0);
456: }
458: /* -----------------------------------------------------------------------------------------*/
462: /*
463: Creates the global fields in the ISLocalToGlobalMapping structure
464: */
465: static PetscErrorCode ISGlobalToLocalMappingSetUp_Private(ISLocalToGlobalMapping mapping)
466: {
468: PetscInt i,*idx = mapping->indices,n = mapping->n,end,start,*globals;
471: end = 0;
472: start = PETSC_MAX_INT;
474: for (i=0; i<n; i++) {
475: if (idx[i] < 0) continue;
476: if (idx[i] < start) start = idx[i];
477: if (idx[i] > end) end = idx[i];
478: }
479: if (start > end) {start = 0; end = -1;}
480: mapping->globalstart = start;
481: mapping->globalend = end;
483: PetscMalloc1((end-start+2),&globals);
484: mapping->globals = globals;
485: for (i=0; i<end-start+1; i++) globals[i] = -1;
486: for (i=0; i<n; i++) {
487: if (idx[i] < 0) continue;
488: globals[idx[i] - start] = i;
489: }
491: PetscLogObjectMemory((PetscObject)mapping,(end-start+1)*sizeof(PetscInt));
492: return(0);
493: }
497: /*@
498: ISGlobalToLocalMappingApply - Provides the local numbering for a list of integers
499: specified with a global numbering.
501: Not collective
503: Input Parameters:
504: + mapping - mapping between local and global numbering
505: . type - IS_GTOLM_MASK - replaces global indices with no local value with -1
506: IS_GTOLM_DROP - drops the indices with no local value from the output list
507: . n - number of global indices to map
508: - idx - global indices to map
510: Output Parameters:
511: + nout - number of indices in output array (if type == IS_GTOLM_MASK then nout = n)
512: - idxout - local index of each global index, one must pass in an array long enough
513: to hold all the indices. You can call ISGlobalToLocalMappingApply() with
514: idxout == NULL to determine the required length (returned in nout)
515: and then allocate the required space and call ISGlobalToLocalMappingApply()
516: a second time to set the values.
518: Notes:
519: Either nout or idxout may be NULL. idx and idxout may be identical.
521: This is not scalable in memory usage. Each processor requires O(Nglobal) size
522: array to compute these.
524: Level: advanced
526: Developer Note: The manual page states that idx and idxout may be identical but the calling
527: sequence declares idx as const so it cannot be the same as idxout.
529: Concepts: mapping^global to local
531: .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(),
532: ISLocalToGlobalMappingDestroy()
533: @*/
534: PetscErrorCode ISGlobalToLocalMappingApply(ISLocalToGlobalMapping mapping,ISGlobalToLocalMappingType type,
535: PetscInt n,const PetscInt idx[],PetscInt *nout,PetscInt idxout[])
536: {
537: PetscInt i,*globals,nf = 0,tmp,start,end;
542: if (!mapping->globals) {
543: ISGlobalToLocalMappingSetUp_Private(mapping);
544: }
545: globals = mapping->globals;
546: start = mapping->globalstart;
547: end = mapping->globalend;
549: if (type == IS_GTOLM_MASK) {
550: if (idxout) {
551: for (i=0; i<n; i++) {
552: if (idx[i] < 0) idxout[i] = idx[i];
553: else if (idx[i] < start) idxout[i] = -1;
554: else if (idx[i] > end) idxout[i] = -1;
555: else idxout[i] = globals[idx[i] - start];
556: }
557: }
558: if (nout) *nout = n;
559: } else {
560: if (idxout) {
561: for (i=0; i<n; i++) {
562: if (idx[i] < 0) continue;
563: if (idx[i] < start) continue;
564: if (idx[i] > end) continue;
565: tmp = globals[idx[i] - start];
566: if (tmp < 0) continue;
567: idxout[nf++] = tmp;
568: }
569: } else {
570: for (i=0; i<n; i++) {
571: if (idx[i] < 0) continue;
572: if (idx[i] < start) continue;
573: if (idx[i] > end) continue;
574: tmp = globals[idx[i] - start];
575: if (tmp < 0) continue;
576: nf++;
577: }
578: }
579: if (nout) *nout = nf;
580: }
581: return(0);
582: }
586: /*@C
587: ISLocalToGlobalMappingGetInfo - Gets the neighbor information for each processor and
588: each index shared by more than one processor
590: Collective on ISLocalToGlobalMapping
592: Input Parameters:
593: . mapping - the mapping from local to global indexing
595: Output Parameter:
596: + nproc - number of processors that are connected to this one
597: . proc - neighboring processors
598: . numproc - number of indices for each subdomain (processor)
599: - indices - indices of nodes (in local numbering) shared with neighbors (sorted by global numbering)
601: Level: advanced
603: Concepts: mapping^local to global
605: Fortran Usage:
606: $ ISLocalToGlobalMpngGetInfoSize(ISLocalToGlobalMapping,PetscInt nproc,PetscInt numprocmax,ierr) followed by
607: $ ISLocalToGlobalMappingGetInfo(ISLocalToGlobalMapping,PetscInt nproc, PetscInt procs[nproc],PetscInt numprocs[nproc],
608: PetscInt indices[nproc][numprocmax],ierr)
609: There is no ISLocalToGlobalMappingRestoreInfo() in Fortran. You must make sure that procs[], numprocs[] and
610: indices[][] are large enough arrays, either by allocating them dynamically or defining static ones large enough.
613: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(),
614: ISLocalToGlobalMappingRestoreInfo()
615: @*/
616: PetscErrorCode ISLocalToGlobalMappingGetInfo(ISLocalToGlobalMapping mapping,PetscInt *nproc,PetscInt *procs[],PetscInt *numprocs[],PetscInt **indices[])
617: {
619: PetscMPIInt size,rank,tag1,tag2,tag3,*len,*source,imdex;
620: PetscInt i,n = mapping->n,Ng,ng,max = 0,*lindices = mapping->indices;
621: PetscInt *nprocs,*owner,nsends,*sends,j,*starts,nmax,nrecvs,*recvs,proc;
622: PetscInt cnt,scale,*ownedsenders,*nownedsenders,rstart,nowned;
623: PetscInt node,nownedm,nt,*sends2,nsends2,*starts2,*lens2,*dest,nrecvs2,*starts3,*recvs2,k,*bprocs,*tmp;
624: PetscInt first_procs,first_numprocs,*first_indices;
625: MPI_Request *recv_waits,*send_waits;
626: MPI_Status recv_status,*send_status,*recv_statuses;
627: MPI_Comm comm;
628: PetscBool debug = PETSC_FALSE;
632: PetscObjectGetComm((PetscObject)mapping,&comm);
633: MPI_Comm_size(comm,&size);
634: MPI_Comm_rank(comm,&rank);
635: if (size == 1) {
636: *nproc = 0;
637: *procs = NULL;
638: PetscMalloc(sizeof(PetscInt),numprocs);
639: (*numprocs)[0] = 0;
640: PetscMalloc(sizeof(PetscInt*),indices);
641: (*indices)[0] = NULL;
642: return(0);
643: }
645: PetscOptionsGetBool(NULL,"-islocaltoglobalmappinggetinfo_debug",&debug,NULL);
647: /*
648: Notes on ISLocalToGlobalMappingGetInfo
650: globally owned node - the nodes that have been assigned to this processor in global
651: numbering, just for this routine.
653: nontrivial globally owned node - node assigned to this processor that is on a subdomain
654: boundary (i.e. is has more than one local owner)
656: locally owned node - node that exists on this processors subdomain
658: nontrivial locally owned node - node that is not in the interior (i.e. has more than one
659: local subdomain
660: */
661: PetscObjectGetNewTag((PetscObject)mapping,&tag1);
662: PetscObjectGetNewTag((PetscObject)mapping,&tag2);
663: PetscObjectGetNewTag((PetscObject)mapping,&tag3);
665: for (i=0; i<n; i++) {
666: if (lindices[i] > max) max = lindices[i];
667: }
668: MPI_Allreduce(&max,&Ng,1,MPIU_INT,MPI_MAX,comm);
669: Ng++;
670: MPI_Comm_size(comm,&size);
671: MPI_Comm_rank(comm,&rank);
672: scale = Ng/size + 1;
673: ng = scale; if (rank == size-1) ng = Ng - scale*(size-1); ng = PetscMax(1,ng);
674: rstart = scale*rank;
676: /* determine ownership ranges of global indices */
677: PetscMalloc1(2*size,&nprocs);
678: PetscMemzero(nprocs,2*size*sizeof(PetscInt));
680: /* determine owners of each local node */
681: PetscMalloc1(n,&owner);
682: for (i=0; i<n; i++) {
683: proc = lindices[i]/scale; /* processor that globally owns this index */
684: nprocs[2*proc+1] = 1; /* processor globally owns at least one of ours */
685: owner[i] = proc;
686: nprocs[2*proc]++; /* count of how many that processor globally owns of ours */
687: }
688: nsends = 0; for (i=0; i<size; i++) nsends += nprocs[2*i+1];
689: PetscInfo1(mapping,"Number of global owners for my local data %D\n",nsends);
691: /* inform other processors of number of messages and max length*/
692: PetscMaxSum(comm,nprocs,&nmax,&nrecvs);
693: PetscInfo1(mapping,"Number of local owners for my global data %D\n",nrecvs);
695: /* post receives for owned rows */
696: PetscMalloc1((2*nrecvs+1)*(nmax+1),&recvs);
697: PetscMalloc1((nrecvs+1),&recv_waits);
698: for (i=0; i<nrecvs; i++) {
699: MPI_Irecv(recvs+2*nmax*i,2*nmax,MPIU_INT,MPI_ANY_SOURCE,tag1,comm,recv_waits+i);
700: }
702: /* pack messages containing lists of local nodes to owners */
703: PetscMalloc1((2*n+1),&sends);
704: PetscMalloc1((size+1),&starts);
705: starts[0] = 0;
706: for (i=1; i<size; i++) starts[i] = starts[i-1] + 2*nprocs[2*i-2];
707: for (i=0; i<n; i++) {
708: sends[starts[owner[i]]++] = lindices[i];
709: sends[starts[owner[i]]++] = i;
710: }
711: PetscFree(owner);
712: starts[0] = 0;
713: for (i=1; i<size; i++) starts[i] = starts[i-1] + 2*nprocs[2*i-2];
715: /* send the messages */
716: PetscMalloc1((nsends+1),&send_waits);
717: PetscMalloc1((nsends+1),&dest);
718: cnt = 0;
719: for (i=0; i<size; i++) {
720: if (nprocs[2*i]) {
721: MPI_Isend(sends+starts[i],2*nprocs[2*i],MPIU_INT,i,tag1,comm,send_waits+cnt);
722: dest[cnt] = i;
723: cnt++;
724: }
725: }
726: PetscFree(starts);
728: /* wait on receives */
729: PetscMalloc1((nrecvs+1),&source);
730: PetscMalloc1((nrecvs+1),&len);
731: cnt = nrecvs;
732: PetscMalloc1((ng+1),&nownedsenders);
733: PetscMemzero(nownedsenders,ng*sizeof(PetscInt));
734: while (cnt) {
735: MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);
736: /* unpack receives into our local space */
737: MPI_Get_count(&recv_status,MPIU_INT,&len[imdex]);
738: source[imdex] = recv_status.MPI_SOURCE;
739: len[imdex] = len[imdex]/2;
740: /* count how many local owners for each of my global owned indices */
741: for (i=0; i<len[imdex]; i++) nownedsenders[recvs[2*imdex*nmax+2*i]-rstart]++;
742: cnt--;
743: }
744: PetscFree(recv_waits);
746: /* count how many globally owned indices are on an edge multiplied by how many processors own them. */
747: nowned = 0;
748: nownedm = 0;
749: for (i=0; i<ng; i++) {
750: if (nownedsenders[i] > 1) {nownedm += nownedsenders[i]; nowned++;}
751: }
753: /* create single array to contain rank of all local owners of each globally owned index */
754: PetscMalloc1((nownedm+1),&ownedsenders);
755: PetscMalloc1((ng+1),&starts);
756: starts[0] = 0;
757: for (i=1; i<ng; i++) {
758: if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
759: else starts[i] = starts[i-1];
760: }
762: /* for each nontrival globally owned node list all arriving processors */
763: for (i=0; i<nrecvs; i++) {
764: for (j=0; j<len[i]; j++) {
765: node = recvs[2*i*nmax+2*j]-rstart;
766: if (nownedsenders[node] > 1) ownedsenders[starts[node]++] = source[i];
767: }
768: }
770: if (debug) { /* ----------------------------------- */
771: starts[0] = 0;
772: for (i=1; i<ng; i++) {
773: if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
774: else starts[i] = starts[i-1];
775: }
776: for (i=0; i<ng; i++) {
777: if (nownedsenders[i] > 1) {
778: PetscSynchronizedPrintf(comm,"[%d] global node %D local owner processors: ",rank,i+rstart);
779: for (j=0; j<nownedsenders[i]; j++) {
780: PetscSynchronizedPrintf(comm,"%D ",ownedsenders[starts[i]+j]);
781: }
782: PetscSynchronizedPrintf(comm,"\n");
783: }
784: }
785: PetscSynchronizedFlush(comm,PETSC_STDOUT);
786: } /* ----------------------------------- */
788: /* wait on original sends */
789: if (nsends) {
790: PetscMalloc1(nsends,&send_status);
791: MPI_Waitall(nsends,send_waits,send_status);
792: PetscFree(send_status);
793: }
794: PetscFree(send_waits);
795: PetscFree(sends);
796: PetscFree(nprocs);
798: /* pack messages to send back to local owners */
799: starts[0] = 0;
800: for (i=1; i<ng; i++) {
801: if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
802: else starts[i] = starts[i-1];
803: }
804: nsends2 = nrecvs;
805: PetscMalloc1((nsends2+1),&nprocs); /* length of each message */
806: for (i=0; i<nrecvs; i++) {
807: nprocs[i] = 1;
808: for (j=0; j<len[i]; j++) {
809: node = recvs[2*i*nmax+2*j]-rstart;
810: if (nownedsenders[node] > 1) nprocs[i] += 2 + nownedsenders[node];
811: }
812: }
813: nt = 0;
814: for (i=0; i<nsends2; i++) nt += nprocs[i];
816: PetscMalloc1((nt+1),&sends2);
817: PetscMalloc1((nsends2+1),&starts2);
819: starts2[0] = 0;
820: for (i=1; i<nsends2; i++) starts2[i] = starts2[i-1] + nprocs[i-1];
821: /*
822: Each message is 1 + nprocs[i] long, and consists of
823: (0) the number of nodes being sent back
824: (1) the local node number,
825: (2) the number of processors sharing it,
826: (3) the processors sharing it
827: */
828: for (i=0; i<nsends2; i++) {
829: cnt = 1;
830: sends2[starts2[i]] = 0;
831: for (j=0; j<len[i]; j++) {
832: node = recvs[2*i*nmax+2*j]-rstart;
833: if (nownedsenders[node] > 1) {
834: sends2[starts2[i]]++;
835: sends2[starts2[i]+cnt++] = recvs[2*i*nmax+2*j+1];
836: sends2[starts2[i]+cnt++] = nownedsenders[node];
837: PetscMemcpy(&sends2[starts2[i]+cnt],&ownedsenders[starts[node]],nownedsenders[node]*sizeof(PetscInt));
838: cnt += nownedsenders[node];
839: }
840: }
841: }
843: /* receive the message lengths */
844: nrecvs2 = nsends;
845: PetscMalloc1((nrecvs2+1),&lens2);
846: PetscMalloc1((nrecvs2+1),&starts3);
847: PetscMalloc1((nrecvs2+1),&recv_waits);
848: for (i=0; i<nrecvs2; i++) {
849: MPI_Irecv(&lens2[i],1,MPIU_INT,dest[i],tag2,comm,recv_waits+i);
850: }
852: /* send the message lengths */
853: for (i=0; i<nsends2; i++) {
854: MPI_Send(&nprocs[i],1,MPIU_INT,source[i],tag2,comm);
855: }
857: /* wait on receives of lens */
858: if (nrecvs2) {
859: PetscMalloc1(nrecvs2,&recv_statuses);
860: MPI_Waitall(nrecvs2,recv_waits,recv_statuses);
861: PetscFree(recv_statuses);
862: }
863: PetscFree(recv_waits);
865: starts3[0] = 0;
866: nt = 0;
867: for (i=0; i<nrecvs2-1; i++) {
868: starts3[i+1] = starts3[i] + lens2[i];
869: nt += lens2[i];
870: }
871: if (nrecvs2) nt += lens2[nrecvs2-1];
873: PetscMalloc1((nt+1),&recvs2);
874: PetscMalloc1((nrecvs2+1),&recv_waits);
875: for (i=0; i<nrecvs2; i++) {
876: MPI_Irecv(recvs2+starts3[i],lens2[i],MPIU_INT,dest[i],tag3,comm,recv_waits+i);
877: }
879: /* send the messages */
880: PetscMalloc1((nsends2+1),&send_waits);
881: for (i=0; i<nsends2; i++) {
882: MPI_Isend(sends2+starts2[i],nprocs[i],MPIU_INT,source[i],tag3,comm,send_waits+i);
883: }
885: /* wait on receives */
886: if (nrecvs2) {
887: PetscMalloc1(nrecvs2,&recv_statuses);
888: MPI_Waitall(nrecvs2,recv_waits,recv_statuses);
889: PetscFree(recv_statuses);
890: }
891: PetscFree(recv_waits);
892: PetscFree(nprocs);
894: if (debug) { /* ----------------------------------- */
895: cnt = 0;
896: for (i=0; i<nrecvs2; i++) {
897: nt = recvs2[cnt++];
898: for (j=0; j<nt; j++) {
899: PetscSynchronizedPrintf(comm,"[%d] local node %D number of subdomains %D: ",rank,recvs2[cnt],recvs2[cnt+1]);
900: for (k=0; k<recvs2[cnt+1]; k++) {
901: PetscSynchronizedPrintf(comm,"%D ",recvs2[cnt+2+k]);
902: }
903: cnt += 2 + recvs2[cnt+1];
904: PetscSynchronizedPrintf(comm,"\n");
905: }
906: }
907: PetscSynchronizedFlush(comm,PETSC_STDOUT);
908: } /* ----------------------------------- */
910: /* count number subdomains for each local node */
911: PetscMalloc1(size,&nprocs);
912: PetscMemzero(nprocs,size*sizeof(PetscInt));
913: cnt = 0;
914: for (i=0; i<nrecvs2; i++) {
915: nt = recvs2[cnt++];
916: for (j=0; j<nt; j++) {
917: for (k=0; k<recvs2[cnt+1]; k++) nprocs[recvs2[cnt+2+k]]++;
918: cnt += 2 + recvs2[cnt+1];
919: }
920: }
921: nt = 0; for (i=0; i<size; i++) nt += (nprocs[i] > 0);
922: *nproc = nt;
923: PetscMalloc1((nt+1),procs);
924: PetscMalloc1((nt+1),numprocs);
925: PetscMalloc1((nt+1),indices);
926: for (i=0;i<nt+1;i++) (*indices)[i]=NULL;
927: PetscMalloc1(size,&bprocs);
928: cnt = 0;
929: for (i=0; i<size; i++) {
930: if (nprocs[i] > 0) {
931: bprocs[i] = cnt;
932: (*procs)[cnt] = i;
933: (*numprocs)[cnt] = nprocs[i];
934: PetscMalloc1(nprocs[i],&(*indices)[cnt]);
935: cnt++;
936: }
937: }
939: /* make the list of subdomains for each nontrivial local node */
940: PetscMemzero(*numprocs,nt*sizeof(PetscInt));
941: cnt = 0;
942: for (i=0; i<nrecvs2; i++) {
943: nt = recvs2[cnt++];
944: for (j=0; j<nt; j++) {
945: for (k=0; k<recvs2[cnt+1]; k++) (*indices)[bprocs[recvs2[cnt+2+k]]][(*numprocs)[bprocs[recvs2[cnt+2+k]]]++] = recvs2[cnt];
946: cnt += 2 + recvs2[cnt+1];
947: }
948: }
949: PetscFree(bprocs);
950: PetscFree(recvs2);
952: /* sort the node indexing by their global numbers */
953: nt = *nproc;
954: for (i=0; i<nt; i++) {
955: PetscMalloc1(((*numprocs)[i]),&tmp);
956: for (j=0; j<(*numprocs)[i]; j++) tmp[j] = lindices[(*indices)[i][j]];
957: PetscSortIntWithArray((*numprocs)[i],tmp,(*indices)[i]);
958: PetscFree(tmp);
959: }
961: if (debug) { /* ----------------------------------- */
962: nt = *nproc;
963: for (i=0; i<nt; i++) {
964: PetscSynchronizedPrintf(comm,"[%d] subdomain %D number of indices %D: ",rank,(*procs)[i],(*numprocs)[i]);
965: for (j=0; j<(*numprocs)[i]; j++) {
966: PetscSynchronizedPrintf(comm,"%D ",(*indices)[i][j]);
967: }
968: PetscSynchronizedPrintf(comm,"\n");
969: }
970: PetscSynchronizedFlush(comm,PETSC_STDOUT);
971: } /* ----------------------------------- */
973: /* wait on sends */
974: if (nsends2) {
975: PetscMalloc1(nsends2,&send_status);
976: MPI_Waitall(nsends2,send_waits,send_status);
977: PetscFree(send_status);
978: }
980: PetscFree(starts3);
981: PetscFree(dest);
982: PetscFree(send_waits);
984: PetscFree(nownedsenders);
985: PetscFree(ownedsenders);
986: PetscFree(starts);
987: PetscFree(starts2);
988: PetscFree(lens2);
990: PetscFree(source);
991: PetscFree(len);
992: PetscFree(recvs);
993: PetscFree(nprocs);
994: PetscFree(sends2);
996: /* put the information about myself as the first entry in the list */
997: first_procs = (*procs)[0];
998: first_numprocs = (*numprocs)[0];
999: first_indices = (*indices)[0];
1000: for (i=0; i<*nproc; i++) {
1001: if ((*procs)[i] == rank) {
1002: (*procs)[0] = (*procs)[i];
1003: (*numprocs)[0] = (*numprocs)[i];
1004: (*indices)[0] = (*indices)[i];
1005: (*procs)[i] = first_procs;
1006: (*numprocs)[i] = first_numprocs;
1007: (*indices)[i] = first_indices;
1008: break;
1009: }
1010: }
1011: return(0);
1012: }
1016: /*@C
1017: ISLocalToGlobalMappingRestoreInfo - Frees the memory allocated by ISLocalToGlobalMappingGetInfo()
1019: Collective on ISLocalToGlobalMapping
1021: Input Parameters:
1022: . mapping - the mapping from local to global indexing
1024: Output Parameter:
1025: + nproc - number of processors that are connected to this one
1026: . proc - neighboring processors
1027: . numproc - number of indices for each processor
1028: - indices - indices of local nodes shared with neighbor (sorted by global numbering)
1030: Level: advanced
1032: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(),
1033: ISLocalToGlobalMappingGetInfo()
1034: @*/
1035: PetscErrorCode ISLocalToGlobalMappingRestoreInfo(ISLocalToGlobalMapping mapping,PetscInt *nproc,PetscInt *procs[],PetscInt *numprocs[],PetscInt **indices[])
1036: {
1038: PetscInt i;
1041: PetscFree(*procs);
1042: PetscFree(*numprocs);
1043: if (*indices) {
1044: PetscFree((*indices)[0]);
1045: for (i=1; i<*nproc; i++) {
1046: PetscFree((*indices)[i]);
1047: }
1048: PetscFree(*indices);
1049: }
1050: return(0);
1051: }
1055: /*@C
1056: ISLocalToGlobalMappingGetIndices - Get global indices for every local point
1058: Not Collective
1060: Input Arguments:
1061: . ltog - local to global mapping
1063: Output Arguments:
1064: . array - array of indices
1066: Level: advanced
1068: .seealso: ISLocalToGlobalMappingCreate(), ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingRestoreIndices()
1069: @*/
1070: PetscErrorCode ISLocalToGlobalMappingGetIndices(ISLocalToGlobalMapping ltog,const PetscInt **array)
1071: {
1075: *array = ltog->indices;
1076: return(0);
1077: }
1081: /*@C
1082: ISLocalToGlobalMappingRestoreIndices - Restore indices obtained with ISLocalToGlobalMappingRestoreIndices()
1084: Not Collective
1086: Input Arguments:
1087: + ltog - local to global mapping
1088: - array - array of indices
1090: Level: advanced
1092: .seealso: ISLocalToGlobalMappingCreate(), ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingGetIndices()
1093: @*/
1094: PetscErrorCode ISLocalToGlobalMappingRestoreIndices(ISLocalToGlobalMapping ltog,const PetscInt **array)
1095: {
1099: if (*array != ltog->indices) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_BADPTR,"Trying to return mismatched pointer");
1100: *array = NULL;
1101: return(0);
1102: }
1106: /*@C
1107: ISLocalToGlobalMappingConcatenate - Create a new mapping that concatenates a list of mappings
1109: Not Collective
1111: Input Arguments:
1112: + comm - communicator for the new mapping, must contain the communicator of every mapping to concatenate
1113: . n - number of mappings to concatenate
1114: - ltogs - local to global mappings
1116: Output Arguments:
1117: . ltogcat - new mapping
1119: Level: advanced
1121: .seealso: ISLocalToGlobalMappingCreate()
1122: @*/
1123: PetscErrorCode ISLocalToGlobalMappingConcatenate(MPI_Comm comm,PetscInt n,const ISLocalToGlobalMapping ltogs[],ISLocalToGlobalMapping *ltogcat)
1124: {
1125: PetscInt i,cnt,m,*idx;
1129: if (n < 0) SETERRQ1(comm,PETSC_ERR_ARG_OUTOFRANGE,"Must have a non-negative number of mappings, given %D",n);
1133: for (cnt=0,i=0; i<n; i++) {
1134: ISLocalToGlobalMappingGetSize(ltogs[i],&m);
1135: cnt += m;
1136: }
1137: PetscMalloc1(cnt,&idx);
1138: for (cnt=0,i=0; i<n; i++) {
1139: const PetscInt *subidx;
1140: ISLocalToGlobalMappingGetSize(ltogs[i],&m);
1141: ISLocalToGlobalMappingGetIndices(ltogs[i],&subidx);
1142: PetscMemcpy(&idx[cnt],subidx,m*sizeof(PetscInt));
1143: ISLocalToGlobalMappingRestoreIndices(ltogs[i],&subidx);
1144: cnt += m;
1145: }
1146: ISLocalToGlobalMappingCreate(comm,cnt,idx,PETSC_OWN_POINTER,ltogcat);
1147: return(0);
1148: }