Actual source code: da1.c

  1: /*$Id: da1.c,v 1.127 2001/03/28 19:42:42 balay Exp $*/

  3: /* 
  4:    Code for manipulating distributed regular 1d arrays in parallel.
  5:    This file was created by Peter Mell   6/30/95    
  6: */

  8: #include "src/dm/da/daimpl.h"     /*I  "petscda.h"   I*/

 10: #if defined (PETSC_HAVE_AMS)
 11: EXTERN_C_BEGIN
 12: EXTERN int AMSSetFieldBlock_DA(AMS_Memory,char *,Vec);
 13: EXTERN_C_END
 14: #endif

 16: int DAView_1d(DA da,PetscViewer viewer)
 17: {
 18:   int        rank,ierr;
 19:   PetscTruth isascii,isdraw,isbinary;

 22:   MPI_Comm_rank(da->comm,&rank);

 24:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&isascii);
 25:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_DRAW,&isdraw);
 26:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_BINARY,&isbinary);
 27:   if (isascii) {
 28:     PetscViewerASCIISynchronizedPrintf(viewer,"Processor [%d] M %d m %d w %d s %dn",rank,da->M,
 29:                  da->m,da->w,da->s);
 30:     PetscViewerASCIISynchronizedPrintf(viewer,"X range of indices: %d %dn",da->xs,da->xe);
 31:     PetscViewerFlush(viewer);
 32:   } else if (isdraw) {
 33:     PetscDraw       draw;
 34:     double     ymin = -1,ymax = 1,xmin = -1,xmax = da->M,x;
 35:     int        base;
 36:     char       node[10];
 37:     PetscTruth isnull;

 39:     PetscViewerDrawGetDraw(viewer,0,&draw);
 40:     PetscDrawIsNull(draw,&isnull); if (isnull) return(0);

 42:     PetscDrawSetCoordinates(draw,xmin,ymin,xmax,ymax);
 43:     PetscDrawSynchronizedClear(draw);

 45:     /* first processor draws all node lines */
 46:     if (!rank) {
 47:       int xmin_tmp;
 48:       ymin = 0.0; ymax = 0.3;
 49: 
 50:       /* ADIC doesn't like doubles in a for loop */
 51:       for (xmin_tmp =0; xmin_tmp < (int)da->M; xmin_tmp++) {
 52:          PetscDrawLine(draw,(double)xmin_tmp,ymin,(double)xmin_tmp,ymax,PETSC_DRAW_BLACK);
 53:       }

 55:       xmin = 0.0; xmax = da->M - 1;
 56:       PetscDrawLine(draw,xmin,ymin,xmax,ymin,PETSC_DRAW_BLACK);
 57:       PetscDrawLine(draw,xmin,ymax,xmax,ymax,PETSC_DRAW_BLACK);
 58:     }

 60:     PetscDrawSynchronizedFlush(draw);
 61:     PetscDrawPause(draw);

 63:     /* draw my box */
 64:     ymin = 0; ymax = 0.3; xmin = da->xs / da->w; xmax = (da->xe / da->w)  - 1;
 65:     PetscDrawLine(draw,xmin,ymin,xmax,ymin,PETSC_DRAW_RED);
 66:     PetscDrawLine(draw,xmin,ymin,xmin,ymax,PETSC_DRAW_RED);
 67:     PetscDrawLine(draw,xmin,ymax,xmax,ymax,PETSC_DRAW_RED);
 68:     PetscDrawLine(draw,xmax,ymin,xmax,ymax,PETSC_DRAW_RED);

 70:     /* Put in index numbers */
 71:     base = da->base / da->w;
 72:     for (x=xmin; x<=xmax; x++) {
 73:       sprintf(node,"%d",base++);
 74:       PetscDrawString(draw,x,ymin,PETSC_DRAW_RED,node);
 75:     }

 77:     PetscDrawSynchronizedFlush(draw);
 78:     PetscDrawPause(draw);
 79:   } else if (isbinary) {
 80:     DAView_Binary(da,viewer);
 81:   } else {
 82:     SETERRQ1(1,"Viewer type %s not supported for DA 1d",((PetscObject)viewer)->type_name);
 83:   }
 84:   return(0);
 85: }

 87: EXTERN int DAPublish_Petsc(PetscObject);

 89: /*@C
 90:    DACreate1d - Creates an object that will manage the communication of  one-dimensional 
 91:    regular array data that is distributed across some processors.

 93:    Collective on MPI_Comm

 95:    Input Parameters:
 96: +  comm - MPI communicator
 97: .  wrap - type of periodicity should the array have, if any. Use 
 98:           either DA_NONPERIODIC or DA_XPERIODIC
 99: .  M - global dimension of the array
100: .  dof - number of degrees of freedom per node
101: .  lc - array containing number of nodes in the X direction on each processor, 
102:         or PETSC_NULL. If non-null, must be of length as m.
103: -  s - stencil width  

105:    Output Parameter:
106: .  inra - the resulting distributed array object

108:    Options Database Key:
109: +  -da_view - Calls DAView() at the conclusion of DACreate1d()
110: .  -da_grid_x <nx> - number of grid points in x direction
111: -  -da_noao - do not compute natural to PETSc ordering object

113:    Level: beginner

115:    Notes:
116:    The array data itself is NOT stored in the DA, it is stored in Vec objects;
117:    The appropriate vector objects can be obtained with calls to DACreateGlobalVector()
118:    and DACreateLocalVector() and calls to VecDuplicate() if more are needed.


121: .keywords: distributed array, create, one-dimensional

123: .seealso: DADestroy(), DAView(), DACreate2d(), DACreate3d(), DAGlobalToLocalBegin(),
124:           DAGlobalToLocalEnd(), DALocalToGlobal(), DALocalToLocalBegin(), DALocalToLocalEnd(),
125:           DAGetInfo(), DACreateGlobalVector(), DACreateLocalVector(), DACreateNaturalVector(), DALoad(), DAView()

127: @*/
128: int DACreate1d(MPI_Comm comm,DAPeriodicType wrap,int M,int dof,int s,int *lc,DA *inra)
129: {
130:   int        rank,size,xs,xe,x,Xs,Xe,ierr,start,end,m;
131:   int        i,*idx,nn,j,left,gdim;
132:   PetscTruth flg1,flg2;
133:   DA         da;
134:   Vec        local,global;
135:   VecScatter ltog,gtol;
136:   IS         to,from;

139:   *inra = 0;

141:   if (dof < 1) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Must have 1 or more degrees of freedom per node: %d",dof);
142:   if (s < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Stencil width cannot be negative: %d",s);

144:   PetscOptionsBegin(comm,PETSC_NULL,"1d DA Options","DA");
145:     PetscOptionsInt("-da_grid_x","Number of grid points in x direction","DACreate1d",M,&M,PETSC_NULL);
146:   PetscOptionsEnd();

148:   PetscHeaderCreate(da,_p_DA,struct _DAOps,DA_COOKIE,0,"DA",comm,DADestroy,DAView);
149:   PetscLogObjectCreate(da);
150:   da->bops->publish           = DAPublish_Petsc;
151:   da->ops->createglobalvector = DACreateGlobalVector;
152:   da->ops->getinterpolation   = DAGetInterpolation;
153:   da->ops->getcoloring        = DAGetColoring;
154:   da->ops->refine             = DARefine;
155:   PetscLogObjectMemory(da,sizeof(struct _p_DA));
156:   da->dim        = 1;
157:   da->gtog1      = 0;
158:   PetscMalloc(dof*sizeof(char*),&da->fieldname);
159:   PetscMemzero(da->fieldname,dof*sizeof(char*));
160:   MPI_Comm_size(comm,&size);
161:   MPI_Comm_rank(comm,&rank);

163:   m = size;

165:   if (M < m)     SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"More processors than data points! %d %d",m,M);
166:   if ((M-1) < s) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Array is too small for stencil! %d %d",M-1,s);

168:   /* 
169:      Determine locally owned region 
170:      xs is the first local node number, x is the number of local nodes 
171:   */
172:   if (!lc) {
173:     PetscOptionsHasName(PETSC_NULL,"-da_partition_blockcomm",&flg1);
174:     PetscOptionsHasName(PETSC_NULL,"-da_partition_nodes_at_end",&flg2);
175:     if (flg1) {      /* Block Comm type Distribution */
176:       xs = rank*M/m;
177:       x  = (rank + 1)*M/m - xs;
178:     } else if (flg2) { /* The odd nodes are evenly distributed across last nodes */
179:       x = (M + rank)/m;
180:       if (M/m == x) { xs = rank*x; }
181:       else          { xs = rank*(x-1) + (M+rank)%(x*m); }
182:     } else { /* The odd nodes are evenly distributed across the first k nodes */
183:       /* Regular PETSc Distribution */
184:       x = M/m + ((M % m) > rank);
185:       if (rank >= (M % m)) {xs = (rank * (int)(M/m) + M % m);}
186:       else                 {xs = rank * (int)(M/m) + rank;}
187:     }
188:   } else {
189:     x  = lc[rank];
190:     xs = 0;
191:     for (i=0; i<rank; i++) {
192:       xs += lc[i];
193:     }
194:     /* verify that data user provided is consistent */
195:     left = xs;
196:     for (i=rank; i<size; i++) {
197:       left += lc[i];
198:     }
199:     if (left != M) {
200:       SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Sum of lc across processors not equal to M %d %d",left,M);
201:     }
202:   }

204:   /* From now on x,s,xs,xe,Xs,Xe are the exact location in the array */
205:   x  *= dof;
206:   s  *= dof;  /* NOTE: here change s to be absolute stencil distance */
207:   xs *= dof;
208:   xe = xs + x;

210:   /* determine ghost region */
211:   if (wrap == DA_XPERIODIC) {
212:     Xs = xs - s;
213:     Xe = xe + s;
214:   } else {
215:     if ((xs-s) >= 0)   Xs = xs-s;  else Xs = 0;
216:     if ((xe+s) <= M*dof) Xe = xe+s;  else Xe = M*dof;
217:   }

219:   /* allocate the base parallel and sequential vectors */
220:   VecCreateMPI(comm,x,PETSC_DECIDE,&global);
221:   VecSetBlockSize(global,dof);
222:   VecCreateSeq(PETSC_COMM_SELF,(Xe-Xs),&local);
223:   VecSetBlockSize(local,dof);
224: 
225:   /* Create Local to Global Vector Scatter Context */
226:   /* local to global inserts non-ghost point region into global */
227:   VecGetOwnershipRange(global,&start,&end);
228:   ISCreateStride(comm,x,start,1,&to);
229:   ISCreateStride(comm,x,xs-Xs,1,&from);
230:   VecScatterCreate(local,from,global,to,&ltog);
231:   PetscLogObjectParent(da,to);
232:   PetscLogObjectParent(da,from);
233:   PetscLogObjectParent(da,ltog);
234:   ISDestroy(from);
235:   ISDestroy(to);

237:   /* Create Global to Local Vector Scatter Context */
238:   /* global to local must retrieve ghost points */
239:   ISCreateStride(comm,(Xe-Xs),0,1,&to);
240: 
241:   PetscMalloc((x+2*s)*sizeof(int),&idx);
242:   PetscLogObjectMemory(da,(x+2*s)*sizeof(int));

244:   nn = 0;
245:   if (wrap == DA_XPERIODIC) {    /* Handle all cases with wrap first */

247:     for (i=0; i<s; i++) {  /* Left ghost points */
248:       if ((xs-s+i)>=0) { idx[nn++] = xs-s+i;}
249:       else             { idx[nn++] = M*dof+(xs-s+i);}
250:     }

252:     for (i=0; i<x; i++) { idx [nn++] = xs + i;}  /* Non-ghost points */
253: 
254:     for (i=0; i<s; i++) { /* Right ghost points */
255:       if ((xe+i)<M*dof) { idx [nn++] =  xe+i; }
256:       else            { idx [nn++] = (xe+i) - M*dof;}
257:     }
258:   } else {      /* Now do all cases with no wrapping */

260:     if (s <= xs) {for (i=0; i<s; i++) {idx[nn++] = xs - s + i;}}
261:     else         {for (i=0; i<xs;  i++) {idx[nn++] = i;}}

263:     for (i=0; i<x; i++) { idx [nn++] = xs + i;}
264: 
265:     if ((xe+s)<=M*dof) {for (i=0;  i<s;     i++) {idx[nn++]=xe+i;}}
266:     else             {for (i=xe; i<(M*dof); i++) {idx[nn++]=i;   }}
267:   }

269:   ISCreateGeneral(comm,nn,idx,&from);
270:   VecScatterCreate(global,from,local,to,&gtol);
271:   PetscLogObjectParent(da,to);
272:   PetscLogObjectParent(da,from);
273:   PetscLogObjectParent(da,gtol);
274:   ISDestroy(to);
275:   ISDestroy(from);

277:   da->M  = M;  da->N  = 1;  da->m  = m; da->n = 1;
278:   da->xs = xs; da->xe = xe; da->ys = 0; da->ye = 1; da->zs = 0; da->ze = 1;
279:   da->Xs = Xs; da->Xe = Xe; da->Ys = 0; da->Ye = 1; da->Zs = 0; da->Ze = 1;
280:   da->P  = 1;  da->p  = 1;  da->w = dof; da->s = s/dof;

282:   PetscLogObjectParent(da,global);
283:   PetscLogObjectParent(da,local);

285:   da->global       = global;
286:   da->local        = local;
287:   da->gtol         = gtol;
288:   da->ltog         = ltog;
289:   da->idx          = idx;
290:   da->Nl           = nn;
291:   da->base         = xs;
292:   da->ops->view    = DAView_1d;
293:   da->wrap         = wrap;
294:   da->stencil_type = DA_STENCIL_STAR;

296:   /* 
297:      Set the local to global ordering in the global vector, this allows use
298:      of VecSetValuesLocal().
299:   */
300:   ISLocalToGlobalMappingCreate(comm,nn,idx,&da->ltogmap);
301:   VecSetLocalToGlobalMapping(da->global,da->ltogmap);
302:   ISLocalToGlobalMappingBlock(da->ltogmap,da->w,&da->ltogmapb);
303:   VecSetLocalToGlobalMappingBlock(da->global,da->ltogmapb);
304:   PetscLogObjectParent(da,da->ltogmap);

306:   /* construct the local to local scatter context */
307:   /* 
308:       We simply remap the values in the from part of 
309:     global to local to read from an array with the ghost values 
310:     rather then from the plain array.
311:   */
312:   VecScatterCopy(gtol,&da->ltol);
313:   PetscLogObjectParent(da,da->ltol);
314:   left = xs - Xs;
315:   PetscMalloc((Xe-Xs)*sizeof(int),&idx);
316:   for (j=0; j<Xe-Xs; j++) {
317:     idx[j] = left + j;
318:   }
319:   VecScatterRemap(da->ltol,idx,PETSC_NULL);
320:   PetscFree(idx);

322:   /* 
323:      Build the natural ordering to PETSc ordering mappings.
324:   */
325:   PetscOptionsHasName(PETSC_NULL,"-da_noao",&flg1);
326:   if (!flg1) {
327:     IS is;
328: 
329:     ISCreateStride(comm,da->xe-da->xs,da->base,1,&is);
330:     AOCreateBasicIS(is,is,&da->ao);
331:     PetscLogObjectParent(da,da->ao);
332:     ISDestroy(is);
333:   } else {
334:     da->ao = PETSC_NULL;
335:   }

337:   /*
338:      Note the following will be removed soon. Since the functionality 
339:     is replaced by the above.
340:   */
341:   /* Construct the mapping from current global ordering to global
342:      ordering that would be used if only 1 processor were employed.
343:      This mapping is intended only for internal use by discrete
344:      function and matrix viewers.

346:      We don't really need this for 1D distributed arrays, since the
347:      ordering is the same regardless.  But for now we form it anyway
348:      Maybe we'll change in the near future.
349:    */
350:   VecGetSize(global,&gdim);
351:   PetscMalloc(gdim*sizeof(int),&da->gtog1);
352:   PetscLogObjectMemory(da,gdim*sizeof(int));
353:   for (i=0; i<gdim; i++) da->gtog1[i] = i;

355:   PetscOptionsHasName(PETSC_NULL,"-da_view",&flg1);
356:   if (flg1) {DAView(da,PETSC_VIEWER_STDOUT_(da->comm));}
357:   PetscOptionsHasName(PETSC_NULL,"-da_view_draw",&flg1);
358:   if (flg1) {DAView(da,PETSC_VIEWER_DRAW_(da->comm));}
359:   PetscOptionsHasName(PETSC_NULL,"-help",&flg1);
360:   if (flg1) {DAPrintHelp(da);}
361:   *inra = da;
362:   PetscPublishAll(da);
363: #if defined(PETSC_HAVE_AMS)
364:   PetscObjectComposeFunctionDynamic((PetscObject)global,"AMSSetFieldBlock_C",
365:          "AMSSetFieldBlock_DA",AMSSetFieldBlock_DA);
366:   PetscObjectComposeFunctionDynamic((PetscObject)local,"AMSSetFieldBlock_C",
367:          "AMSSetFieldBlock_DA",AMSSetFieldBlock_DA);
368:   if (((PetscObject)global)->amem > -1) {
369:     AMSSetFieldBlock_DA(((PetscObject)global)->amem,"values",global);
370:   }
371: #endif
372:   VecSetOperation(global,VECOP_VIEW,(void(*)())VecView_MPI_DA);
373:   VecSetOperation(global,VECOP_LOADINTOVECTOR,(void(*)())VecLoadIntoVector_Binary_DA);
374:   return(0);
375: }