Actual source code: da1.c

  1: /*$Id: da1.c,v 1.127 2001/03/28 19:42:42 balay Exp bsmith $*/

  3: /* 
  4:    Code for manipulating distributed regular 1d arrays in parallel.
  5:    This file was created by Peter Mell   6/30/95    
  6: */

 8:  #include src/dm/da/daimpl.h

 10: #if defined (PETSC_HAVE_AMS)
 11: EXTERN_C_BEGIN
 12: EXTERN int AMSSetFieldBlock_DA(AMS_Memory,char *,Vec);
 13: EXTERN_C_END
 14: #endif

 16: int DAView_1d(DA da,PetscViewer viewer)
 17: {
 18:   int        rank,ierr;
 19:   PetscTruth isascii,isdraw,isbinary;

 22:   MPI_Comm_rank(da->comm,&rank);

 24:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&isascii);
 25:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_DRAW,&isdraw);
 26:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_BINARY,&isbinary);
 27:   if (isascii) {
 28:     PetscViewerASCIISynchronizedPrintf(viewer,"Processor [%d] M %d m %d w %d s %dn",rank,da->M,
 29:                  da->m,da->w,da->s);
 30:     PetscViewerASCIISynchronizedPrintf(viewer,"X range of indices: %d %dn",da->xs,da->xe);
 31:     PetscViewerFlush(viewer);
 32:   } else if (isdraw) {
 33:     PetscDraw       draw;
 34:     double     ymin = -1,ymax = 1,xmin = -1,xmax = da->M,x;
 35:     int        base;
 36:     char       node[10];
 37:     PetscTruth isnull;

 39:     PetscViewerDrawGetDraw(viewer,0,&draw);
 40:     PetscDrawIsNull(draw,&isnull); if (isnull) return(0);

 42:     PetscDrawSetCoordinates(draw,xmin,ymin,xmax,ymax);
 43:     PetscDrawSynchronizedClear(draw);

 45:     /* first processor draws all node lines */
 46:     if (!rank) {
 47:       int xmin_tmp;
 48:       ymin = 0.0; ymax = 0.3;
 49: 
 50:       /* ADIC doesn't like doubles in a for loop */
 51:       for (xmin_tmp =0; xmin_tmp < (int)da->M; xmin_tmp++) {
 52:          PetscDrawLine(draw,(double)xmin_tmp,ymin,(double)xmin_tmp,ymax,PETSC_DRAW_BLACK);
 53:       }

 55:       xmin = 0.0; xmax = da->M - 1;
 56:       PetscDrawLine(draw,xmin,ymin,xmax,ymin,PETSC_DRAW_BLACK);
 57:       PetscDrawLine(draw,xmin,ymax,xmax,ymax,PETSC_DRAW_BLACK);
 58:     }

 60:     PetscDrawSynchronizedFlush(draw);
 61:     PetscDrawPause(draw);

 63:     /* draw my box */
 64:     ymin = 0; ymax = 0.3; xmin = da->xs / da->w; xmax = (da->xe / da->w)  - 1;
 65:     PetscDrawLine(draw,xmin,ymin,xmax,ymin,PETSC_DRAW_RED);
 66:     PetscDrawLine(draw,xmin,ymin,xmin,ymax,PETSC_DRAW_RED);
 67:     PetscDrawLine(draw,xmin,ymax,xmax,ymax,PETSC_DRAW_RED);
 68:     PetscDrawLine(draw,xmax,ymin,xmax,ymax,PETSC_DRAW_RED);

 70:     /* Put in index numbers */
 71:     base = da->base / da->w;
 72:     for (x=xmin; x<=xmax; x++) {
 73:       sprintf(node,"%d",base++);
 74:       PetscDrawString(draw,x,ymin,PETSC_DRAW_RED,node);
 75:     }

 77:     PetscDrawSynchronizedFlush(draw);
 78:     PetscDrawPause(draw);
 79:   } else if (isbinary) {
 80:     DAView_Binary(da,viewer);
 81:   } else {
 82:     SETERRQ1(1,"Viewer type %s not supported for DA 1d",((PetscObject)viewer)->type_name);
 83:   }
 84:   return(0);
 85: }

 87: EXTERN int DAPublish_Petsc(PetscObject);

 89: /*@C
 90:    DACreate1d - Creates an object that will manage the communication of  one-dimensional 
 91:    regular array data that is distributed across some processors.

 93:    Collective on MPI_Comm

 95:    Input Parameters:
 96: +  comm - MPI communicator
 97: .  wrap - type of periodicity should the array have, if any. Use 
 98:           either DA_NONPERIODIC or DA_XPERIODIC
 99: .  M - global dimension of the array
100: .  dof - number of degrees of freedom per node
101: .  lc - array containing number of nodes in the X direction on each processor, 
102:         or PETSC_NULL. If non-null, must be of length as m.
103: -  s - stencil width  

105:    Output Parameter:
106: .  inra - the resulting distributed array object

108:    Options Database Key:
109: +  -da_view - Calls DAView() at the conclusion of DACreate1d()
110: .  -da_grid_x <nx> - number of grid points in x direction
111: -  -da_noao - do not compute natural to PETSc ordering object

113:    Level: beginner

115:    Notes:
116:    The array data itself is NOT stored in the DA, it is stored in Vec objects;
117:    The appropriate vector objects can be obtained with calls to DACreateGlobalVector()
118:    and DACreateLocalVector() and calls to VecDuplicate() if more are needed.


121: .keywords: distributed array, create, one-dimensional

123: .seealso: DADestroy(), DAView(), DACreate2d(), DACreate3d(), DAGlobalToLocalBegin(),
124:           DAGlobalToLocalEnd(), DALocalToGlobal(), DALocalToLocalBegin(), DALocalToLocalEnd(),
125:           DAGetInfo(), DACreateGlobalVector(), DACreateLocalVector(), DACreateNaturalVector(), DALoad(), DAView()

127: @*/
128: int DACreate1d(MPI_Comm comm,DAPeriodicType wrap,int M,int dof,int s,int *lc,DA *inra)
129: {
130:   int        rank,size,xs,xe,x,Xs,Xe,ierr,start,end,m;
131:   int        i,*idx,nn,j,left,gdim,refine_x = 2;
132:   PetscTruth flg1,flg2;
133:   DA         da;
134:   Vec        local,global;
135:   VecScatter ltog,gtol;
136:   IS         to,from;

139:   *inra = 0;

141:   if (dof < 1) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Must have 1 or more degrees of freedom per node: %d",dof);
142:   if (s < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Stencil width cannot be negative: %d",s);

144:   PetscOptionsBegin(comm,PETSC_NULL,"1d DA Options","DA");
145:     PetscOptionsInt("-da_grid_x","Number of grid points in x direction","DACreate1d",M,&M,PETSC_NULL);
146:     PetscOptionsInt("-da_refine_x","Refinement ratio in x direction","DACreate1d",refine_x,&refine_x,PETSC_NULL);
147:   PetscOptionsEnd();

149:   PetscHeaderCreate(da,_p_DA,struct _DAOps,DA_COOKIE,0,"DA",comm,DADestroy,DAView);
150:   PetscLogObjectCreate(da);
151:   da->bops->publish           = DAPublish_Petsc;
152:   da->ops->createglobalvector = DACreateGlobalVector;
153:   da->ops->getinterpolation   = DAGetInterpolation;
154:   da->ops->getcoloring        = DAGetColoring;
155:   da->ops->refine             = DARefine;
156:   PetscLogObjectMemory(da,sizeof(struct _p_DA));
157:   da->dim        = 1;
158:   da->gtog1      = 0;
159:   da->refine_x   = refine_x;
160:   PetscMalloc(dof*sizeof(char*),&da->fieldname);
161:   PetscMemzero(da->fieldname,dof*sizeof(char*));
162:   MPI_Comm_size(comm,&size);
163:   MPI_Comm_rank(comm,&rank);

165:   m = size;

167:   if (M < m)     SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"More processors than data points! %d %d",m,M);
168:   if ((M-1) < s) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Array is too small for stencil! %d %d",M-1,s);

170:   /* 
171:      Determine locally owned region 
172:      xs is the first local node number, x is the number of local nodes 
173:   */
174:   if (!lc) {
175:     PetscOptionsHasName(PETSC_NULL,"-da_partition_blockcomm",&flg1);
176:     PetscOptionsHasName(PETSC_NULL,"-da_partition_nodes_at_end",&flg2);
177:     if (flg1) {      /* Block Comm type Distribution */
178:       xs = rank*M/m;
179:       x  = (rank + 1)*M/m - xs;
180:     } else if (flg2) { /* The odd nodes are evenly distributed across last nodes */
181:       x = (M + rank)/m;
182:       if (M/m == x) { xs = rank*x; }
183:       else          { xs = rank*(x-1) + (M+rank)%(x*m); }
184:     } else { /* The odd nodes are evenly distributed across the first k nodes */
185:       /* Regular PETSc Distribution */
186:       x = M/m + ((M % m) > rank);
187:       if (rank >= (M % m)) {xs = (rank * (int)(M/m) + M % m);}
188:       else                 {xs = rank * (int)(M/m) + rank;}
189:     }
190:   } else {
191:     x  = lc[rank];
192:     xs = 0;
193:     for (i=0; i<rank; i++) {
194:       xs += lc[i];
195:     }
196:     /* verify that data user provided is consistent */
197:     left = xs;
198:     for (i=rank; i<size; i++) {
199:       left += lc[i];
200:     }
201:     if (left != M) {
202:       SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Sum of lc across processors not equal to M %d %d",left,M);
203:     }
204:   }

206:   /* From now on x,s,xs,xe,Xs,Xe are the exact location in the array */
207:   x  *= dof;
208:   s  *= dof;  /* NOTE: here change s to be absolute stencil distance */
209:   xs *= dof;
210:   xe = xs + x;

212:   /* determine ghost region */
213:   if (wrap == DA_XPERIODIC) {
214:     Xs = xs - s;
215:     Xe = xe + s;
216:   } else {
217:     if ((xs-s) >= 0)   Xs = xs-s;  else Xs = 0;
218:     if ((xe+s) <= M*dof) Xe = xe+s;  else Xe = M*dof;
219:   }

221:   /* allocate the base parallel and sequential vectors */
222:   VecCreateMPI(comm,x,PETSC_DECIDE,&global);
223:   VecSetBlockSize(global,dof);
224:   VecCreateSeq(PETSC_COMM_SELF,(Xe-Xs),&local);
225:   VecSetBlockSize(local,dof);
226: 
227:   /* Create Local to Global Vector Scatter Context */
228:   /* local to global inserts non-ghost point region into global */
229:   VecGetOwnershipRange(global,&start,&end);
230:   ISCreateStride(comm,x,start,1,&to);
231:   ISCreateStride(comm,x,xs-Xs,1,&from);
232:   VecScatterCreate(local,from,global,to,&ltog);
233:   PetscLogObjectParent(da,to);
234:   PetscLogObjectParent(da,from);
235:   PetscLogObjectParent(da,ltog);
236:   ISDestroy(from);
237:   ISDestroy(to);

239:   /* Create Global to Local Vector Scatter Context */
240:   /* global to local must retrieve ghost points */
241:   ISCreateStride(comm,(Xe-Xs),0,1,&to);
242: 
243:   PetscMalloc((x+2*s)*sizeof(int),&idx);
244:   PetscLogObjectMemory(da,(x+2*s)*sizeof(int));

246:   nn = 0;
247:   if (wrap == DA_XPERIODIC) {    /* Handle all cases with wrap first */

249:     for (i=0; i<s; i++) {  /* Left ghost points */
250:       if ((xs-s+i)>=0) { idx[nn++] = xs-s+i;}
251:       else             { idx[nn++] = M*dof+(xs-s+i);}
252:     }

254:     for (i=0; i<x; i++) { idx [nn++] = xs + i;}  /* Non-ghost points */
255: 
256:     for (i=0; i<s; i++) { /* Right ghost points */
257:       if ((xe+i)<M*dof) { idx [nn++] =  xe+i; }
258:       else            { idx [nn++] = (xe+i) - M*dof;}
259:     }
260:   } else {      /* Now do all cases with no wrapping */

262:     if (s <= xs) {for (i=0; i<s; i++) {idx[nn++] = xs - s + i;}}
263:     else         {for (i=0; i<xs;  i++) {idx[nn++] = i;}}

265:     for (i=0; i<x; i++) { idx [nn++] = xs + i;}
266: 
267:     if ((xe+s)<=M*dof) {for (i=0;  i<s;     i++) {idx[nn++]=xe+i;}}
268:     else             {for (i=xe; i<(M*dof); i++) {idx[nn++]=i;   }}
269:   }

271:   ISCreateGeneral(comm,nn,idx,&from);
272:   VecScatterCreate(global,from,local,to,&gtol);
273:   PetscLogObjectParent(da,to);
274:   PetscLogObjectParent(da,from);
275:   PetscLogObjectParent(da,gtol);
276:   ISDestroy(to);
277:   ISDestroy(from);

279:   da->M  = M;  da->N  = 1;  da->m  = m; da->n = 1;
280:   da->xs = xs; da->xe = xe; da->ys = 0; da->ye = 1; da->zs = 0; da->ze = 1;
281:   da->Xs = Xs; da->Xe = Xe; da->Ys = 0; da->Ye = 1; da->Zs = 0; da->Ze = 1;
282:   da->P  = 1;  da->p  = 1;  da->w = dof; da->s = s/dof;

284:   PetscLogObjectParent(da,global);
285:   PetscLogObjectParent(da,local);

287:   da->global       = global;
288:   da->local        = local;
289:   da->gtol         = gtol;
290:   da->ltog         = ltog;
291:   da->idx          = idx;
292:   da->Nl           = nn;
293:   da->base         = xs;
294:   da->ops->view    = DAView_1d;
295:   da->wrap         = wrap;
296:   da->stencil_type = DA_STENCIL_STAR;

298:   /* 
299:      Set the local to global ordering in the global vector, this allows use
300:      of VecSetValuesLocal().
301:   */
302:   ISLocalToGlobalMappingCreate(comm,nn,idx,&da->ltogmap);
303:   VecSetLocalToGlobalMapping(da->global,da->ltogmap);
304:   ISLocalToGlobalMappingBlock(da->ltogmap,da->w,&da->ltogmapb);
305:   VecSetLocalToGlobalMappingBlock(da->global,da->ltogmapb);
306:   PetscLogObjectParent(da,da->ltogmap);

308:   /* construct the local to local scatter context */
309:   /* 
310:       We simply remap the values in the from part of 
311:     global to local to read from an array with the ghost values 
312:     rather then from the plain array.
313:   */
314:   VecScatterCopy(gtol,&da->ltol);
315:   PetscLogObjectParent(da,da->ltol);
316:   left = xs - Xs;
317:   PetscMalloc((Xe-Xs)*sizeof(int),&idx);
318:   for (j=0; j<Xe-Xs; j++) {
319:     idx[j] = left + j;
320:   }
321:   VecScatterRemap(da->ltol,idx,PETSC_NULL);
322:   PetscFree(idx);

324:   /* 
325:      Build the natural ordering to PETSc ordering mappings.
326:   */
327:   PetscOptionsHasName(PETSC_NULL,"-da_noao",&flg1);
328:   if (!flg1) {
329:     IS is;
330: 
331:     ISCreateStride(comm,da->xe-da->xs,da->base,1,&is);
332:     AOCreateBasicIS(is,is,&da->ao);
333:     PetscLogObjectParent(da,da->ao);
334:     ISDestroy(is);
335:   } else {
336:     da->ao = PETSC_NULL;
337:   }

339:   /*
340:      Note the following will be removed soon. Since the functionality 
341:     is replaced by the above.
342:   */
343:   /* Construct the mapping from current global ordering to global
344:      ordering that would be used if only 1 processor were employed.
345:      This mapping is intended only for internal use by discrete
346:      function and matrix viewers.

348:      We don't really need this for 1D distributed arrays, since the
349:      ordering is the same regardless.  But for now we form it anyway
350:      Maybe we'll change in the near future.
351:    */
352:   VecGetSize(global,&gdim);
353:   PetscMalloc(gdim*sizeof(int),&da->gtog1);
354:   PetscLogObjectMemory(da,gdim*sizeof(int));
355:   for (i=0; i<gdim; i++) da->gtog1[i] = i;

357:   PetscOptionsHasName(PETSC_NULL,"-da_view",&flg1);
358:   if (flg1) {DAView(da,PETSC_VIEWER_STDOUT_(da->comm));}
359:   PetscOptionsHasName(PETSC_NULL,"-da_view_draw",&flg1);
360:   if (flg1) {DAView(da,PETSC_VIEWER_DRAW_(da->comm));}
361:   PetscOptionsHasName(PETSC_NULL,"-help",&flg1);
362:   if (flg1) {DAPrintHelp(da);}
363:   *inra = da;
364:   PetscPublishAll(da);
365: #if defined(PETSC_HAVE_AMS)
366:   PetscObjectComposeFunctionDynamic((PetscObject)global,"AMSSetFieldBlock_C",
367:          "AMSSetFieldBlock_DA",AMSSetFieldBlock_DA);
368:   PetscObjectComposeFunctionDynamic((PetscObject)local,"AMSSetFieldBlock_C",
369:          "AMSSetFieldBlock_DA",AMSSetFieldBlock_DA);
370:   if (((PetscObject)global)->amem > -1) {
371:     AMSSetFieldBlock_DA(((PetscObject)global)->amem,"values",global);
372:   }
373: #endif
374:   VecSetOperation(global,VECOP_VIEW,(void(*)())VecView_MPI_DA);
375:   VecSetOperation(global,VECOP_LOADINTOVECTOR,(void(*)())VecLoadIntoVector_Binary_DA);
376:   return(0);
377: }