Actual source code: isltog.c

  1: /*$Id: isltog.c,v 1.65 2001/05/21 14:16:29 bsmith Exp $*/

 3:  #include petscsys.h
 4:  #include src/vec/is/isimpl.h

  6: EXTERN int VecInitializePackage(char *);

  8: /*@C
  9:     ISLocalToGlobalMappingGetSize - Gets the local size of a local to global mapping.

 11:     Not Collective

 13:     Input Parameter:
 14: .   ltog - local to global mapping

 16:     Output Parameter:
 17: .   n - the number of entries in the local mapping

 19:     Level: advanced

 21:     Concepts: mapping^local to global

 23: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
 24: @*/
 25: int ISLocalToGlobalMappingGetSize(ISLocalToGlobalMapping mapping,int *n)
 26: {
 29:   *n = mapping->n;
 30:   return(0);
 31: }

 33: /*@C
 34:     ISLocalToGlobalMappingView - View a local to global mapping

 36:     Not Collective

 38:     Input Parameters:
 39: +   ltog - local to global mapping
 40: -   viewer - viewer

 42:     Level: advanced

 44:     Concepts: mapping^local to global

 46: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
 47: @*/
 48: int ISLocalToGlobalMappingView(ISLocalToGlobalMapping mapping,PetscViewer viewer)
 49: {
 50:   int        i,ierr,rank;
 51:   PetscTruth isascii;

 55:   if (!viewer) viewer = PETSC_VIEWER_STDOUT_(mapping->comm);

 58:   MPI_Comm_rank(mapping->comm,&rank);
 59:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&isascii);
 60:   if (isascii) {
 61:     for (i=0; i<mapping->n; i++) {
 62:       PetscViewerASCIISynchronizedPrintf(viewer,"[%d] %d %dn",rank,i,mapping->indices[i]);
 63:     }
 64:     PetscViewerFlush(viewer);
 65:   } else {
 66:     SETERRQ1(1,"Viewer type %s not supported for ISLocalToGlobalMapping",((PetscObject)viewer)->type_name);
 67:   }

 69:   return(0);
 70: }

 72: /*@C
 73:     ISLocalToGlobalMappingCreateIS - Creates a mapping between a local (0 to n)
 74:     ordering and a global parallel ordering.

 76:     Not collective

 78:     Input Parameter:
 79: .   is - index set containing the global numbers for each local

 81:     Output Parameter:
 82: .   mapping - new mapping data structure

 84:     Level: advanced

 86:     Concepts: mapping^local to global

 88: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
 89: @*/
 90: int ISLocalToGlobalMappingCreateIS(IS is,ISLocalToGlobalMapping *mapping)
 91: {
 92:   int      n,*indices,ierr;
 93:   MPI_Comm comm;


 98:   PetscObjectGetComm((PetscObject)is,&comm);
 99:   ISGetLocalSize(is,&n);
100:   ISGetIndices(is,&indices);
101:   ISLocalToGlobalMappingCreate(comm,n,indices,mapping);
102:   ISRestoreIndices(is,&indices);

104:   return(0);
105: }

107: /*@C
108:     ISLocalToGlobalMappingCreate - Creates a mapping between a local (0 to n)
109:     ordering and a global parallel ordering.

111:     Not Collective, but communicator may have more than one process

113:     Input Parameters:
114: +   comm - MPI communicator
115: .   n - the number of local elements
116: -   indices - the global index for each local element

118:     Output Parameter:
119: .   mapping - new mapping data structure

121:     Level: advanced

123:     Concepts: mapping^local to global

125: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS()
126: @*/
127: int ISLocalToGlobalMappingCreate(MPI_Comm cm,int n,const int indices[],ISLocalToGlobalMapping *mapping)
128: {

134:   *mapping = PETSC_NULL;
135: #ifndef PETSC_USE_DYNAMIC_LIBRARIES
136:   VecInitializePackage(PETSC_NULL);
137: #endif

139:   PetscHeaderCreate(*mapping,_p_ISLocalToGlobalMapping,int,IS_LTOGM_COOKIE,0,"ISLocalToGlobalMapping",
140:                     cm,ISLocalToGlobalMappingDestroy,ISLocalToGlobalMappingView);
141:   PetscLogObjectCreate(*mapping);
142:   PetscLogObjectMemory(*mapping,sizeof(struct _p_ISLocalToGlobalMapping)+n*sizeof(int));

144:   (*mapping)->n       = n;
145:   PetscMalloc((n+1)*sizeof(int),&(*mapping)->indices);
146:   PetscMemcpy((*mapping)->indices,indices,n*sizeof(int));

148:   /*
149:       Do not create the global to local mapping. This is only created if 
150:      ISGlobalToLocalMapping() is called 
151:   */
152:   (*mapping)->globals = 0;
153:   return(0);
154: }

156: /*@C
157:     ISLocalToGlobalMappingBlock - Creates a blocked index version of an 
158:        ISLocalToGlobalMapping that is appropriate for MatSetLocalToGlobalMappingBlock()
159:        and VecSetLocalToGlobalMappingBlock().

161:     Not Collective, but communicator may have more than one process

163:     Input Parameters:
164: +    inmap - original point-wise mapping
165: -    bs - block size

167:     Output Parameter:
168: .   outmap - block based mapping

170:     Level: advanced

172:     Concepts: mapping^local to global

174: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate(), ISLocalToGlobalMappingCreateIS()
175: @*/
176: int ISLocalToGlobalMappingBlock(ISLocalToGlobalMapping inmap,int bs,ISLocalToGlobalMapping *outmap)
177: {
178:   int ierr,*ii,i,n;


182:   if (bs > 1) {
183:     n    = inmap->n/bs;
184:     PetscMalloc(n*sizeof(int),&ii);
185:     for (i=0; i<n; i++) {
186:       ii[i] = inmap->indices[bs*i]/bs;
187:     }
188:     ISLocalToGlobalMappingCreate(inmap->comm,n,ii,outmap);
189:     PetscFree(ii);
190:   } else {
191:     *outmap = inmap;
192:     ierr    = PetscObjectReference((PetscObject)inmap);
193:   }
194:   return(0);
195: }
196: 
197: /*@
198:    ISLocalToGlobalMappingDestroy - Destroys a mapping between a local (0 to n)
199:    ordering and a global parallel ordering.

201:    Note Collective

203:    Input Parameters:
204: .  mapping - mapping data structure

206:    Level: advanced

208: .seealso: ISLocalToGlobalMappingCreate()
209: @*/
210: int ISLocalToGlobalMappingDestroy(ISLocalToGlobalMapping mapping)
211: {
215:   if (--mapping->refct > 0) return(0);
216:   if (mapping->refct < 0) {
217:     SETERRQ(1,"Mapping already destroyed");
218:   }

220:   PetscFree(mapping->indices);
221:   if (mapping->globals) {PetscFree(mapping->globals);}
222:   PetscLogObjectDestroy(mapping);
223:   PetscHeaderDestroy(mapping);
224:   return(0);
225: }
226: 
227: /*@
228:     ISLocalToGlobalMappingApplyIS - Creates from an IS in the local numbering
229:     a new index set using the global numbering defined in an ISLocalToGlobalMapping
230:     context.

232:     Not collective

234:     Input Parameters:
235: +   mapping - mapping between local and global numbering
236: -   is - index set in local numbering

238:     Output Parameters:
239: .   newis - index set in global numbering

241:     Level: advanced

243:     Concepts: mapping^local to global

245: .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(),
246:           ISLocalToGlobalMappingDestroy(), ISGlobalToLocalMappingApply()
247: @*/
248: int ISLocalToGlobalMappingApplyIS(ISLocalToGlobalMapping mapping,IS is,IS *newis)
249: {
250:   int ierr,n,i,*idxin,*idxmap,*idxout,Nmax = mapping->n;


257:   ierr   = ISGetLocalSize(is,&n);
258:   ierr   = ISGetIndices(is,&idxin);
259:   idxmap = mapping->indices;
260: 
261:   PetscMalloc((n+1)*sizeof(int),&idxout);
262:   for (i=0; i<n; i++) {
263:     if (idxin[i] >= Nmax) SETERRQ3(PETSC_ERR_ARG_OUTOFRANGE,"Local index %d too large %d (max) at %d",idxin[i],Nmax,i);
264:     idxout[i] = idxmap[idxin[i]];
265:   }
266:   ISRestoreIndices(is,&idxin);
267:   ISCreateGeneral(PETSC_COMM_SELF,n,idxout,newis);
268:   PetscFree(idxout);
269:   return(0);
270: }

272: /*MC
273:    ISLocalToGlobalMappingApply - Takes a list of integers in a local numbering
274:    and converts them to the global numbering.

276:    Not collective

278:    Input Parameters:
279: +  mapping - the local to global mapping context
280: .  N - number of integers
281: -  in - input indices in local numbering

283:    Output Parameter:
284: .  out - indices in global numbering

286:    Synopsis:
287:    int ISLocalToGlobalMappingApply(ISLocalToGlobalMapping mapping,int N,int in[],int out[])

289:    Notes: 
290:    The in and out array parameters may be identical.

292:    Level: advanced

294: .seealso: ISLocalToGlobalMappingCreate(),ISLocalToGlobalMappingDestroy(), 
295:           ISLocalToGlobalMappingApplyIS(),AOCreateBasic(),AOApplicationToPetsc(),
296:           AOPetscToApplication(), ISGlobalToLocalMappingApply()

298:     Concepts: mapping^local to global

300: M*/

302: /* -----------------------------------------------------------------------------------------*/

304: /*
305:     Creates the global fields in the ISLocalToGlobalMapping structure
306: */
307: static int ISGlobalToLocalMappingSetUp_Private(ISLocalToGlobalMapping mapping)
308: {
309:   int ierr,i,*idx = mapping->indices,n = mapping->n,end,start,*globals;

312:   end   = 0;
313:   start = 100000000;

315:   for (i=0; i<n; i++) {
316:     if (idx[i] < 0) continue;
317:     if (idx[i] < start) start = idx[i];
318:     if (idx[i] > end)   end   = idx[i];
319:   }
320:   if (start > end) {start = 0; end = -1;}
321:   mapping->globalstart = start;
322:   mapping->globalend   = end;

324:   ierr             = PetscMalloc((end-start+2)*sizeof(int),&globals);
325:   mapping->globals = globals;
326:   for (i=0; i<end-start+1; i++) {
327:     globals[i] = -1;
328:   }
329:   for (i=0; i<n; i++) {
330:     if (idx[i] < 0) continue;
331:     globals[idx[i] - start] = i;
332:   }

334:   PetscLogObjectMemory(mapping,(end-start+1)*sizeof(int));
335:   return(0);
336: }

338: /*@
339:     ISGlobalToLocalMappingApply - Provides the local numbering for a list of integers
340:     specified with a global numbering.

342:     Not collective

344:     Input Parameters:
345: +   mapping - mapping between local and global numbering
346: .   type - IS_GTOLM_MASK - replaces global indices with no local value with -1
347:            IS_GTOLM_DROP - drops the indices with no local value from the output list
348: .   n - number of global indices to map
349: -   idx - global indices to map

351:     Output Parameters:
352: +   nout - number of indices in output array (if type == IS_GTOLM_MASK then nout = n)
353: -   idxout - local index of each global index, one must pass in an array long enough 
354:              to hold all the indices. You can call ISGlobalToLocalMappingApply() with 
355:              idxout == PETSC_NULL to determine the required length (returned in nout)
356:              and then allocate the required space and call ISGlobalToLocalMappingApply()
357:              a second time to set the values.

359:     Notes:
360:     Either nout or idxout may be PETSC_NULL. idx and idxout may be identical.

362:     This is not scalable in memory usage. Each processor requires O(Nglobal) size 
363:     array to compute these.

365:     Level: advanced

367:     Concepts: mapping^global to local

369: .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(),
370:           ISLocalToGlobalMappingDestroy()
371: @*/
372: int ISGlobalToLocalMappingApply(ISLocalToGlobalMapping mapping,ISGlobalToLocalMappingType type,
373:                                   int n,const int idx[],int *nout,int idxout[])
374: {
375:   int i,ierr,*globals,nf = 0,tmp,start,end;

378:   if (!mapping->globals) {
379:     ISGlobalToLocalMappingSetUp_Private(mapping);
380:   }
381:   globals = mapping->globals;
382:   start   = mapping->globalstart;
383:   end     = mapping->globalend;

385:   if (type == IS_GTOLM_MASK) {
386:     if (idxout) {
387:       for (i=0; i<n; i++) {
388:         if (idx[i] < 0) idxout[i] = idx[i];
389:         else if (idx[i] < start) idxout[i] = -1;
390:         else if (idx[i] > end)   idxout[i] = -1;
391:         else                     idxout[i] = globals[idx[i] - start];
392:       }
393:     }
394:     if (nout) *nout = n;
395:   } else {
396:     if (idxout) {
397:       for (i=0; i<n; i++) {
398:         if (idx[i] < 0) continue;
399:         if (idx[i] < start) continue;
400:         if (idx[i] > end) continue;
401:         tmp = globals[idx[i] - start];
402:         if (tmp < 0) continue;
403:         idxout[nf++] = tmp;
404:       }
405:     } else {
406:       for (i=0; i<n; i++) {
407:         if (idx[i] < 0) continue;
408:         if (idx[i] < start) continue;
409:         if (idx[i] > end) continue;
410:         tmp = globals[idx[i] - start];
411:         if (tmp < 0) continue;
412:         nf++;
413:       }
414:     }
415:     if (nout) *nout = nf;
416:   }

418:   return(0);
419: }

421: /*@C
422:     ISLocalToGlobalMappingGetInfo - Gets the neighbor information for each processor and 
423:      each index shared by more than one processor 

425:     Collective on ISLocalToGlobalMapping

427:     Input Parameters:
428: .   mapping - the mapping from local to global indexing

430:     Output Parameter:
431: +   nproc - number of processors that are connected to this one
432: .   proc - neighboring processors
433: .   numproc - number of indices for each subdomain (processor)
434: -   indices - indices of local nodes shared with neighbor (sorted by global numbering)

436:     Level: advanced

438:     Concepts: mapping^local to global

440: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(),
441:           ISLocalToGlobalMappingRestoreInfo()
442: @*/
443: int ISLocalToGlobalMappingGetInfo(ISLocalToGlobalMapping mapping,int *nproc,int **procs,int **numprocs,int ***indices)
444: {
445:   int         i,n = mapping->n,ierr,Ng,ng,max = 0,*lindices = mapping->indices;
446:   int         size,rank,*nprocs,*owner,nsends,*sends,j,*starts,*work,nmax,nrecvs,*recvs,proc;
447:   int         tag1,tag2,tag3,cnt,*len,*source,imdex,scale,*ownedsenders,*nownedsenders,rstart,nowned;
448:   int         node,nownedm,nt,*sends2,nsends2,*starts2,*lens2,*dest,nrecvs2,*starts3,*recvs2,k,*bprocs,*tmp;
449:   int         first_procs,first_numprocs,*first_indices;
450:   MPI_Request *recv_waits,*send_waits;
451:   MPI_Status  recv_status,*send_status,*recv_statuses;
452:   MPI_Comm    comm = mapping->comm;
453:   PetscTruth  debug = PETSC_FALSE;

456:   ierr   = MPI_Comm_size(comm,&size);
457:   ierr   = MPI_Comm_rank(comm,&rank);
458:   if (size == 1) {
459:     *nproc         = 0;
460:     *procs         = PETSC_NULL;
461:     ierr           = PetscMalloc(sizeof(int),numprocs);
462:     (*numprocs)[0] = 0;
463:     ierr           = PetscMalloc(sizeof(int*),indices);
464:     (*indices)[0]  = PETSC_NULL;
465:     return(0);
466:   }

468:   PetscOptionsHasName(PETSC_NULL,"-islocaltoglobalmappinggetinfo_debug",&debug);

470:   /*
471:     Notes on ISLocalToGlobalMappingGetInfo

473:     globally owned node - the nodes that have been assigned to this processor in global
474:            numbering, just for this routine.

476:     nontrivial globally owned node - node assigned to this processor that is on a subdomain
477:            boundary (i.e. is has more than one local owner)

479:     locally owned node - node that exists on this processors subdomain

481:     nontrivial locally owned node - node that is not in the interior (i.e. has more than one
482:            local subdomain
483:   */
484:   PetscObjectGetNewTag((PetscObject)mapping,&tag1);
485:   PetscObjectGetNewTag((PetscObject)mapping,&tag2);
486:   PetscObjectGetNewTag((PetscObject)mapping,&tag3);

488:   for (i=0; i<n; i++) {
489:     if (lindices[i] > max) max = lindices[i];
490:   }
491:   ierr   = MPI_Allreduce(&max,&Ng,1,MPI_INT,MPI_MAX,comm);
492:   Ng++;
493:   ierr   = MPI_Comm_size(comm,&size);
494:   ierr   = MPI_Comm_rank(comm,&rank);
495:   scale  = Ng/size + 1;
496:   ng     = scale; if (rank == size-1) ng = Ng - scale*(size-1); ng = PetscMax(1,ng);
497:   rstart = scale*rank;

499:   /* determine ownership ranges of global indices */
500:   PetscMalloc((2*size+1)*sizeof(int),&nprocs);
501:   PetscMemzero(nprocs,2*size*sizeof(int));

503:   /* determine owners of each local node  */
504:   PetscMalloc((n+1)*sizeof(int),&owner);
505:   for (i=0; i<n; i++) {
506:     proc              = lindices[i]/scale; /* processor that globally owns this index */
507:     nprocs[size+proc] = 1;                 /* processor globally owns at least one of ours */
508:     owner[i]          = proc;
509:     nprocs[proc]++;                        /* count of how many that processor globally owns of ours */
510:   }
511:   nsends = 0; for (i=0; i<size; i++) nsends += nprocs[size + i];
512:   PetscLogInfo(0,"ISLocalToGlobalMappingGetInfo: Number of global owners for my local data %dn",nsends);

514:   /* inform other processors of number of messages and max length*/
515:   PetscMalloc(2*size*sizeof(int),&work);
516:   ierr   = MPI_Allreduce(nprocs,work,2*size,MPI_INT,PetscMaxSum_Op,comm);
517:   nmax   = work[rank];
518:   nrecvs = work[size+rank];
519:   ierr   = PetscFree(work);
520:   PetscLogInfo(0,"ISLocalToGlobalMappingGetInfo: Number of local owners for my global data %dn",nrecvs);

522:   /* post receives for owned rows */
523:   PetscMalloc((2*nrecvs+1)*(nmax+1)*sizeof(int),&recvs);
524:   PetscMalloc((nrecvs+1)*sizeof(MPI_Request),&recv_waits);
525:   for (i=0; i<nrecvs; i++) {
526:     MPI_Irecv(recvs+2*nmax*i,2*nmax,MPI_INT,MPI_ANY_SOURCE,tag1,comm,recv_waits+i);
527:   }

529:   /* pack messages containing lists of local nodes to owners */
530:   ierr       = PetscMalloc((2*n+1)*sizeof(int),&sends);
531:   ierr       = PetscMalloc((size+1)*sizeof(int),&starts);
532:   starts[0]  = 0;
533:   for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[i-1];}
534:   for (i=0; i<n; i++) {
535:     sends[starts[owner[i]]++] = lindices[i];
536:     sends[starts[owner[i]]++] = i;
537:   }
538:   PetscFree(owner);
539:   starts[0]  = 0;
540:   for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[i-1];}

542:   /* send the messages */
543:   PetscMalloc((nsends+1)*sizeof(MPI_Request),&send_waits);
544:   PetscMalloc((nsends+1)*sizeof(int),&dest);
545:   cnt = 0;
546:   for (i=0; i<size; i++) {
547:     if (nprocs[i]) {
548:       ierr      = MPI_Isend(sends+starts[i],2*nprocs[i],MPI_INT,i,tag1,comm,send_waits+cnt);
549:       dest[cnt] = i;
550:       cnt++;
551:     }
552:   }
553:   PetscFree(starts);

555:   /* wait on receives */
556:   PetscMalloc((2*nrecvs+1)*sizeof(int),&source);
557:   len  = source + nrecvs;
558:   cnt  = nrecvs;
559:   PetscMalloc((ng+1)*sizeof(int),&nownedsenders);
560:   PetscMemzero(nownedsenders,ng*sizeof(int));
561:   while (cnt) {
562:     MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);
563:     /* unpack receives into our local space */
564:     ierr           = MPI_Get_count(&recv_status,MPI_INT,&len[imdex]);
565:     source[imdex]  = recv_status.MPI_SOURCE;
566:     len[imdex]     = len[imdex]/2;
567:     /* count how many local owners for each of my global owned indices */
568:     for (i=0; i<len[imdex]; i++) nownedsenders[recvs[2*imdex*nmax+2*i]-rstart]++;
569:     cnt--;
570:   }
571:   PetscFree(recv_waits);

573:   /* count how many globally owned indices are on an edge multiplied by how many processors own them. */
574:   nowned  = 0;
575:   nownedm = 0;
576:   for (i=0; i<ng; i++) {
577:     if (nownedsenders[i] > 1) {nownedm += nownedsenders[i]; nowned++;}
578:   }

580:   /* create single array to contain rank of all local owners of each globally owned index */
581:   ierr      = PetscMalloc((nownedm+1)*sizeof(int),&ownedsenders);
582:   ierr      = PetscMalloc((ng+1)*sizeof(int),&starts);
583:   starts[0] = 0;
584:   for (i=1; i<ng; i++) {
585:     if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
586:     else starts[i] = starts[i-1];
587:   }

589:   /* for each nontrival globally owned node list all arriving processors */
590:   for (i=0; i<nrecvs; i++) {
591:     for (j=0; j<len[i]; j++) {
592:       node = recvs[2*i*nmax+2*j]-rstart;
593:       if (nownedsenders[node] > 1) {
594:         ownedsenders[starts[node]++] = source[i];
595:       }
596:     }
597:   }

599:   if (debug) { /* -----------------------------------  */
600:     starts[0]    = 0;
601:     for (i=1; i<ng; i++) {
602:       if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
603:       else starts[i] = starts[i-1];
604:     }
605:     for (i=0; i<ng; i++) {
606:       if (nownedsenders[i] > 1) {
607:         PetscSynchronizedPrintf(comm,"[%d] global node %d local owner processors: ",rank,i+rstart);
608:         for (j=0; j<nownedsenders[i]; j++) {
609:           PetscSynchronizedPrintf(comm,"%d ",ownedsenders[starts[i]+j]);
610:         }
611:         PetscSynchronizedPrintf(comm,"n");
612:       }
613:     }
614:     PetscSynchronizedFlush(comm);
615:   }/* -----------------------------------  */

617:   /* wait on original sends */
618:   if (nsends) {
619:     PetscMalloc(nsends*sizeof(MPI_Status),&send_status);
620:     MPI_Waitall(nsends,send_waits,send_status);
621:     PetscFree(send_status);
622:   }
623:   PetscFree(send_waits);
624:   PetscFree(sends);
625:   PetscFree(nprocs);

627:   /* pack messages to send back to local owners */
628:   starts[0]    = 0;
629:   for (i=1; i<ng; i++) {
630:     if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
631:     else starts[i] = starts[i-1];
632:   }
633:   nsends2 = nrecvs;
634:   ierr    = PetscMalloc((nsends2+1)*sizeof(int),&nprocs); /* length of each message */
635:   for (i=0; i<nrecvs; i++) {
636:     nprocs[i] = 1;
637:     for (j=0; j<len[i]; j++) {
638:       node = recvs[2*i*nmax+2*j]-rstart;
639:       if (nownedsenders[node] > 1) {
640:         nprocs[i] += 2 + nownedsenders[node];
641:       }
642:     }
643:   }
644:   nt = 0; for (i=0; i<nsends2; i++) nt += nprocs[i];
645:   PetscMalloc((nt+1)*sizeof(int),&sends2);
646:   PetscMalloc((nsends2+1)*sizeof(int),&starts2);
647:   starts2[0] = 0; for (i=1; i<nsends2; i++) starts2[i] = starts2[i-1] + nprocs[i-1];
648:   /*
649:      Each message is 1 + nprocs[i] long, and consists of 
650:        (0) the number of nodes being sent back 
651:        (1) the local node number,
652:        (2) the number of processors sharing it,
653:        (3) the processors sharing it
654:   */
655:   for (i=0; i<nsends2; i++) {
656:     cnt = 1;
657:     sends2[starts2[i]] = 0;
658:     for (j=0; j<len[i]; j++) {
659:       node = recvs[2*i*nmax+2*j]-rstart;
660:       if (nownedsenders[node] > 1) {
661:         sends2[starts2[i]]++;
662:         sends2[starts2[i]+cnt++] = recvs[2*i*nmax+2*j+1];
663:         sends2[starts2[i]+cnt++] = nownedsenders[node];
664:         PetscMemcpy(&sends2[starts2[i]+cnt],&ownedsenders[starts[node]],nownedsenders[node]*sizeof(int));
665:         cnt += nownedsenders[node];
666:       }
667:     }
668:   }

670:   /* send the message lengths */
671:   for (i=0; i<nsends2; i++) {
672:     MPI_Send(&nprocs[i],1,MPI_INT,source[i],tag2,comm);
673:   }

675:   /* receive the message lengths */
676:   nrecvs2 = nsends;
677:   PetscMalloc((nrecvs2+1)*sizeof(int),&lens2);
678:   PetscMalloc((nrecvs2+1)*sizeof(int),&starts3);
679:   nt      = 0;
680:   for (i=0; i<nrecvs2; i++) {
681:      MPI_Recv(&lens2[i],1,MPI_INT,dest[i],tag2,comm,&recv_status);
682:     nt   += lens2[i];
683:   }
684:   starts3[0] = 0;
685:   for (i=0; i<nrecvs2-1; i++) {
686:     starts3[i+1] = starts3[i] + lens2[i];
687:   }
688:   PetscMalloc((nt+1)*sizeof(int),&recvs2);
689:   PetscMalloc((nrecvs2+1)*sizeof(MPI_Request),&recv_waits);
690:   for (i=0; i<nrecvs2; i++) {
691:     MPI_Irecv(recvs2+starts3[i],lens2[i],MPI_INT,dest[i],tag3,comm,recv_waits+i);
692:   }
693: 
694:   /* send the messages */
695:   PetscMalloc((nsends2+1)*sizeof(MPI_Request),&send_waits);
696:   for (i=0; i<nsends2; i++) {
697:     MPI_Isend(sends2+starts2[i],nprocs[i],MPI_INT,source[i],tag3,comm,send_waits+i);
698:   }

700:   /* wait on receives */
701:   PetscMalloc((nrecvs2+1)*sizeof(MPI_Status),&recv_statuses);
702:   MPI_Waitall(nrecvs2,recv_waits,recv_statuses);
703:   PetscFree(recv_statuses);
704:   PetscFree(recv_waits);
705:   PetscFree(nprocs);

707:   if (debug) { /* -----------------------------------  */
708:     cnt = 0;
709:     for (i=0; i<nrecvs2; i++) {
710:       nt = recvs2[cnt++];
711:       for (j=0; j<nt; j++) {
712:         PetscSynchronizedPrintf(comm,"[%d] local node %d number of subdomains %d: ",rank,recvs2[cnt],recvs2[cnt+1]);
713:         for (k=0; k<recvs2[cnt+1]; k++) {
714:           PetscSynchronizedPrintf(comm,"%d ",recvs2[cnt+2+k]);
715:         }
716:         cnt += 2 + recvs2[cnt+1];
717:         PetscSynchronizedPrintf(comm,"n");
718:       }
719:     }
720:     PetscSynchronizedFlush(comm);
721:   } /* -----------------------------------  */

723:   /* count number subdomains for each local node */
724:   PetscMalloc(size*sizeof(int),&nprocs);
725:   PetscMemzero(nprocs,size*sizeof(int));
726:   cnt  = 0;
727:   for (i=0; i<nrecvs2; i++) {
728:     nt = recvs2[cnt++];
729:     for (j=0; j<nt; j++) {
730:       for (k=0; k<recvs2[cnt+1]; k++) {
731:         nprocs[recvs2[cnt+2+k]]++;
732:       }
733:       cnt += 2 + recvs2[cnt+1];
734:     }
735:   }
736:   nt = 0; for (i=0; i<size; i++) nt += (nprocs[i] > 0);
737:   *nproc    = nt;
738:   PetscMalloc((nt+1)*sizeof(int),procs);
739:   PetscMalloc((nt+1)*sizeof(int),numprocs);
740:   PetscMalloc((nt+1)*sizeof(int*),indices);
741:   PetscMalloc(size*sizeof(int),&bprocs);
742:   cnt       = 0;
743:   for (i=0; i<size; i++) {
744:     if (nprocs[i] > 0) {
745:       bprocs[i]        = cnt;
746:       (*procs)[cnt]    = i;
747:       (*numprocs)[cnt] = nprocs[i];
748:       ierr             = PetscMalloc(nprocs[i]*sizeof(int),&(*indices)[cnt]);
749:       cnt++;
750:     }
751:   }

753:   /* make the list of subdomains for each nontrivial local node */
754:   PetscMemzero(*numprocs,nt*sizeof(int));
755:   cnt  = 0;
756:   for (i=0; i<nrecvs2; i++) {
757:     nt = recvs2[cnt++];
758:     for (j=0; j<nt; j++) {
759:       for (k=0; k<recvs2[cnt+1]; k++) {
760:         (*indices)[bprocs[recvs2[cnt+2+k]]][(*numprocs)[bprocs[recvs2[cnt+2+k]]]++] = recvs2[cnt];
761:       }
762:       cnt += 2 + recvs2[cnt+1];
763:     }
764:   }
765:   PetscFree(bprocs);
766:   PetscFree(recvs2);

768:   /* sort the node indexing by their global numbers */
769:   nt = *nproc;
770:   for (i=0; i<nt; i++) {
771:     PetscMalloc(((*numprocs)[i])*sizeof(int),&tmp);
772:     for (j=0; j<(*numprocs)[i]; j++) {
773:       tmp[j] = lindices[(*indices)[i][j]];
774:     }
775:     PetscSortIntWithArray((*numprocs)[i],tmp,(*indices)[i]);
776:     PetscFree(tmp);
777:   }

779:   if (debug) { /* -----------------------------------  */
780:     nt = *nproc;
781:     for (i=0; i<nt; i++) {
782:       PetscSynchronizedPrintf(comm,"[%d] subdomain %d number of indices %d: ",rank,(*procs)[i],(*numprocs)[i]);
783:       for (j=0; j<(*numprocs)[i]; j++) {
784:         PetscSynchronizedPrintf(comm,"%d ",(*indices)[i][j]);
785:       }
786:       PetscSynchronizedPrintf(comm,"n");
787:     }
788:     PetscSynchronizedFlush(comm);
789:   } /* -----------------------------------  */

791:   /* wait on sends */
792:   if (nsends2) {
793:     PetscMalloc(nsends2*sizeof(MPI_Status),&send_status);
794:     MPI_Waitall(nsends2,send_waits,send_status);
795:     PetscFree(send_status);
796:   }

798:   PetscFree(starts3);
799:   PetscFree(dest);
800:   PetscFree(send_waits);

802:   PetscFree(nownedsenders);
803:   PetscFree(ownedsenders);
804:   PetscFree(starts);
805:   PetscFree(starts2);
806:   PetscFree(lens2);

808:   PetscFree(source);
809:   PetscFree(recvs);
810:   PetscFree(nprocs);
811:   PetscFree(sends2);

813:   /* put the information about myself as the first entry in the list */
814:   first_procs    = (*procs)[0];
815:   first_numprocs = (*numprocs)[0];
816:   first_indices  = (*indices)[0];
817:   for (i=0; i<*nproc; i++) {
818:     if ((*procs)[i] == rank) {
819:       (*procs)[0]    = (*procs)[i];
820:       (*numprocs)[0] = (*numprocs)[i];
821:       (*indices)[0]  = (*indices)[i];
822:       (*procs)[i]    = first_procs;
823:       (*numprocs)[i] = first_numprocs;
824:       (*indices)[i]  = first_indices;
825:       break;
826:     }
827:   }

829:   return(0);
830: }

832: /*@C
833:     ISLocalToGlobalMappingRestoreInfo - Frees the memory allocated by ISLocalToGlobalMappingGetInfo()

835:     Collective on ISLocalToGlobalMapping

837:     Input Parameters:
838: .   mapping - the mapping from local to global indexing

840:     Output Parameter:
841: +   nproc - number of processors that are connected to this one
842: .   proc - neighboring processors
843: .   numproc - number of indices for each processor
844: -   indices - indices of local nodes shared with neighbor (sorted by global numbering)

846:     Level: advanced

848: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(),
849:           ISLocalToGlobalMappingGetInfo()
850: @*/
851: int ISLocalToGlobalMappingRestoreInfo(ISLocalToGlobalMapping mapping,int *nproc,int **procs,int **numprocs,int ***indices)
852: {
853:   int ierr,i;

856:   if (*procs) {PetscFree(*procs);}
857:   if (*numprocs) {PetscFree(*numprocs);}
858:   if (*indices) {
859:     if ((*indices)[0]) {PetscFree((*indices)[0]);}
860:     for (i=1; i<*nproc; i++) {
861:       if ((*indices)[i]) {PetscFree((*indices)[i]);}
862:     }
863:     PetscFree(*indices);
864:   }
865:   return(0);
866: }