Actual source code: vecio.c

  1: /*$Id: vecio.c,v 1.74 2001/08/07 03:02:17 balay Exp $*/

  3: /* 
  4:    This file contains simple binary input routines for vectors.  The
  5:    analogous output routines are within each vector implementation's 
  6:    VecView (with viewer types PETSC_VIEWER_BINARY)
  7:  */

 9:  #include petsc.h
 10:  #include petscsys.h
 11:  #include petscvec.h
 12: #if defined(PETSC_HAVE_PNETCDF)
 13: EXTERN_C_BEGIN
 14: #include "pnetcdf.h"
 15: EXTERN_C_END
 16: #endif
 17: int VecLoad_Binary(PetscViewer, Vec*);
 18: int VecLoad_Netcdf(PetscViewer, Vec*);
 19: int VecLoadIntoVector_Binary(PetscViewer, Vec);
 20: int VecLoadIntoVector_Netcdf(PetscViewer, Vec);

 24: /*@C 
 25:   VecLoad - Loads a vector that has been stored in binary format
 26:   with VecView().

 28:   Collective on PetscViewer 

 30:   Input Parameters:
 31: . viewer - binary file viewer, obtained from PetscViewerBinaryOpen() or
 32:            NetCDF file viewer, obtained from PetscViewerNetcdfOpen()

 34:   Output Parameter:
 35: . newvec - the newly loaded vector

 37:    Level: intermediate

 39:   Notes:
 40:   The input file must contain the full global vector, as
 41:   written by the routine VecView().

 43:   Notes for advanced users:
 44:   Most users should not need to know the details of the binary storage
 45:   format, since VecLoad() and VecView() completely hide these details.
 46:   But for anyone who's interested, the standard binary matrix storage
 47:   format is
 48: .vb
 49:      int    VEC_FILE_COOKIE
 50:      int    number of rows
 51:      PetscScalar *values of all nonzeros
 52: .ve

 54:    Note for Cray users, the int's stored in the binary file are 32 bit
 55: integers; not 64 as they are represented in the memory, so if you
 56: write your own routines to read/write these binary files from the Cray
 57: you need to adjust the integer sizes that you read in, see
 58: PetscReadBinary() and PetscWriteBinary() to see how this may be
 59: done.

 61:    In addition, PETSc automatically does the byte swapping for
 62: machines that store the bytes reversed, e.g.  DEC alpha, freebsd,
 63: linux, nt and the paragon; thus if you write your own binary
 64: read/write routines you have to swap the bytes; see PetscReadBinary()
 65: and PetscWriteBinary() to see how this may be done.

 67:   Concepts: vector^loading from file

 69: .seealso: PetscViewerBinaryOpen(), VecView(), MatLoad(), VecLoadIntoVector() 
 70: @*/
 71: int VecLoad(PetscViewer viewer,Vec *newvec)
 72: {
 73:   int         ierr;
 74:   PetscTruth  isbinary,isnetcdf;

 78:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_BINARY,&isbinary);
 79:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_NETCDF,&isnetcdf);
 80:   if ((!isbinary) && (!isnetcdf)) SETERRQ(PETSC_ERR_ARG_WRONG,"Must be binary or NetCDF viewer");

 82: #ifndef PETSC_USE_DYNAMIC_LIBRARIES
 83:   VecInitializePackage(PETSC_NULL);
 84: #endif
 85:   if (isnetcdf) {
 86:     VecLoad_Netcdf(viewer,newvec);
 87:   } else {
 88:     VecLoad_Binary(viewer,newvec);
 89:   }
 90:   return(0);
 91: }

 95: int VecLoad_Netcdf(PetscViewer viewer,Vec *newvec)
 96: {
 97: #if defined(PETSC_HAVE_PNETCDF)
 98:   int         i,N,ierr,n,rank,bs;
 99:   int         ncid,start;
100:   Vec         vec;
101:   PetscScalar *avec;
102:   MPI_Comm    comm;
103:   MPI_Request request;
104:   MPI_Status  status;
105:   PetscMap    map;
106:   PetscTruth  isnetcdf,flag;
107:   char        name[NC_MAX_NAME];

110:   PetscLogEventBegin(VEC_Load,viewer,0,0,0);
111:   PetscObjectGetComm((PetscObject)viewer,&comm);
112:   MPI_Comm_rank(comm,&rank);
113:   PetscViewerNetcdfGetID(viewer,&ncid);
114:   ncmpi_inq_dim(ncid,0,name,(size_t*)&N);  /* N gets the global vector size */
115:   VecCreate(comm,&vec);
116:   VecSetSizes(vec,PETSC_DECIDE,N);
117:   if (!rank) {
118:     PetscOptionsGetInt(PETSC_NULL,"-vecload_block_size",&bs,&flag);
119:     if (flag) {
120:       VecSetBlockSize(vec,bs);
121:     }
122:   }
123:   VecSetFromOptions(vec);
124:   VecGetLocalSize(vec,&n);
125:   VecGetOwnershipRange(vec,&start,PETSC_NULL);
126:   VecGetArray(vec,&avec);
127:   ncmpi_get_vara_double_all(ncid,0,(const size_t*)&start,(const size_t*)&n,(double *)avec);
128:   VecRestoreArray(vec,&avec);
129:   *newvec = vec;
130:   VecAssemblyBegin(vec);
131:   VecAssemblyEnd(vec);
132:   PetscLogEventEnd(VEC_Load,viewer,0,0,0);
133:   return(0);
134: #else
136:   SETERRQ(1,"Build PETSc with NetCDF to use this viewer");
137: #endif
138: }

142: int VecLoad_Binary(PetscViewer viewer,Vec *newvec)
143: {
144:   int         i,rows,ierr,type,fd,rank,size,n,*range,tag,bs,nierr;
145:   Vec         vec;
146:   PetscScalar *avec;
147:   MPI_Comm    comm;
148:   MPI_Request request;
149:   MPI_Status  status;
150:   PetscMap    map;
151:   PetscTruth  flag;

154:   PetscLogEventBegin(VEC_Load,viewer,0,0,0);
155:   PetscViewerBinaryGetDescriptor(viewer,&fd);
156:   PetscObjectGetComm((PetscObject)viewer,&comm);
157:   MPI_Comm_rank(comm,&rank);
158:   MPI_Comm_size(comm,&size);

160:   if (!rank) {
161:     /* Read vector header. */
162:     PetscBinaryRead(fd,&type,1,PETSC_INT);if (ierr) goto handleerror;
163:     if (type != VEC_FILE_COOKIE) {PETSC_ERR_ARG_WRONG; goto handleerror;}
164:     PetscBinaryRead(fd,&rows,1,PETSC_INT);if (ierr) goto handleerror;
165:     MPI_Bcast(&rows,1,MPI_INT,0,comm);
166:     VecCreate(comm,&vec);
167:     VecSetSizes(vec,PETSC_DECIDE,rows);
168:     PetscOptionsGetInt(PETSC_NULL,"-vecload_block_size",&bs,&flag);
169:     if (flag) {
170:       VecSetBlockSize(vec,bs);
171:     }
172:     VecSetFromOptions(vec);
173:     VecGetLocalSize(vec,&n);
174:     VecGetArray(vec,&avec);
175:     PetscBinaryRead(fd,avec,n,PETSC_SCALAR);
176:     VecRestoreArray(vec,&avec);

178:     if (size > 1) {
179:       /* read in other chuncks and send to other processors */
180:       /* determine maximum chunck owned by other */
181:       VecGetPetscMap(vec,&map);
182:       PetscMapGetGlobalRange(map,&range);
183:       n = 1;
184:       for (i=1; i<size; i++) {
185:         n = PetscMax(n,range[i] - range[i-1]);
186:       }
187:       PetscMalloc(n*sizeof(PetscScalar),&avec);
188:       PetscObjectGetNewTag((PetscObject)viewer,&tag);
189:       for (i=1; i<size; i++) {
190:         n    = range[i+1] - range[i];
191:         PetscBinaryRead(fd,avec,n,PETSC_SCALAR);
192:         MPI_Isend(avec,n,MPIU_SCALAR,i,tag,comm,&request);
193:         MPI_Wait(&request,&status);
194:       }
195:       PetscFree(avec);
196:     }
197:   } else {
198:     MPI_Bcast(&rows,1,MPI_INT,0,comm);
199:     if (rows == -1)  SETERRQ(1,"Error loading vector");
200:     VecCreate(comm,&vec);
201:     VecSetSizes(vec,PETSC_DECIDE,rows);
202:     VecSetFromOptions(vec);
203:     VecGetLocalSize(vec,&n);
204:     PetscObjectGetNewTag((PetscObject)viewer,&tag);
205:     VecGetArray(vec,&avec);
206:     MPI_Recv(avec,n,MPIU_SCALAR,0,tag,comm,&status);
207:     VecRestoreArray(vec,&avec);
208:   }
209:   *newvec = vec;
210:   VecAssemblyBegin(vec);
211:   VecAssemblyEnd(vec);
212:   return(0);
213:   /* tell the other processors we've had an error */
214:   handleerror:
215:     nPetscLogEventEnd(VEC_Load,viewer,0,0,0);CHKERRQ(nierr);
216:     MPI_Bcast(&ierr,1,MPI_INT,0,comm);
217:     SETERRQ(ierr,"Error loading vector");
218: }

222: int VecLoadIntoVector_Default(PetscViewer viewer,Vec vec)
223: {
224:   PetscTruth isbinary,isnetcdf;
225:   int        ierr;


229:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_BINARY,&isbinary);
230:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_NETCDF,&isnetcdf);
231:   if ((!isbinary) && (!isnetcdf)) SETERRQ(PETSC_ERR_ARG_WRONG,"Must be binary or NetCDF viewer");

233:   if (isnetcdf) {
234:     VecLoadIntoVector_Netcdf(viewer,vec);
235:   } else {
236:     VecLoadIntoVector_Binary(viewer,vec);
237:   }
238:   return(0);
239: }

243: int VecLoadIntoVector_Netcdf(PetscViewer viewer,Vec vec)
244: {
245: #if defined(PETSC_HAVE_PNETCDF)
246:   int         i,N,rows,ierr,n,rank,bs;
247:   int         ncid,start;
248:   PetscScalar *avec;
249:   MPI_Comm    comm;
250:   MPI_Request request;
251:   MPI_Status  status;
252:   PetscMap    map;
253:   PetscTruth  isnetcdf,flag;
254:   char        name[NC_MAX_NAME];

257:   PetscLogEventBegin(VEC_Load,viewer,vec,0,0);
258:   PetscObjectGetComm((PetscObject)viewer,&comm);
259:   MPI_Comm_rank(comm,&rank);
260:   PetscViewerNetcdfGetID(viewer,&ncid);
261:   ncmpi_inq_dim(ncid,0,name,(size_t*)&N);  /* N gets the global vector size */
262:   if (!rank) {
263:     VecGetSize(vec,&rows);
264:     if (N != rows) SETERRQ(1,"Vector in file different length then input vector");
265:     PetscOptionsGetInt(PETSC_NULL,"-vecload_block_size",&bs,&flag);
266:     if (flag) {
267:       VecSetBlockSize(vec,bs);
268:     }
269:   }
270:   VecSetFromOptions(vec);
271:   VecGetLocalSize(vec,&n);
272:   VecGetOwnershipRange(vec,&start,PETSC_NULL);
273:   VecGetArray(vec,&avec);
274:   ncmpi_get_vara_double_all(ncid,0,(const size_t*)&start,(const size_t*)&n,(double *)avec);
275:   VecRestoreArray(vec,&avec);
276:   VecAssemblyBegin(vec);
277:   VecAssemblyEnd(vec);
278:   PetscLogEventEnd(VEC_Load,viewer,vec,0,0);
279:   return(0);
280: #else
282:   SETERRQ(1,"Build PETSc with NetCDF to use this viewer");
283: #endif
284: }
285: int VecLoadIntoVector_Binary(PetscViewer viewer,Vec vec)
286: {
287:   int         i,rows,ierr,type,fd,rank,size,n,*range,tag,bs;
288:   PetscScalar *avec;
289:   MPI_Comm    comm;
290:   MPI_Request request;
291:   MPI_Status  status;
292:   PetscMap    map;
293:   PetscTruth  flag;
294:   char        *prefix;

297:   PetscLogEventBegin(VEC_Load,viewer,vec,0,0);

299:   PetscViewerBinaryGetDescriptor(viewer,&fd);
300:   PetscObjectGetComm((PetscObject)viewer,&comm);
301:   MPI_Comm_rank(comm,&rank);
302:   MPI_Comm_size(comm,&size);

304:   if (!rank) {
305:     /* Read vector header. */
306:     PetscBinaryRead(fd,&type,1,PETSC_INT);
307:     if (type != VEC_FILE_COOKIE) SETERRQ(PETSC_ERR_ARG_WRONG,"Non-vector object");
308:     PetscBinaryRead(fd,&rows,1,PETSC_INT);
309:     VecGetSize(vec,&n);
310:     if (n != rows) SETERRQ(1,"Vector in file different length then input vector");
311:     MPI_Bcast(&rows,1,MPI_INT,0,comm);

313:     PetscObjectGetOptionsPrefix((PetscObject)vec,&prefix);
314:     PetscOptionsGetInt(prefix,"-vecload_block_size",&bs,&flag);
315:     if (flag) {
316:       VecSetBlockSize(vec,bs);
317:     }
318:     VecSetFromOptions(vec);
319:     VecGetLocalSize(vec,&n);
320:     VecGetArray(vec,&avec);
321:     PetscBinaryRead(fd,avec,n,PETSC_SCALAR);
322:     VecRestoreArray(vec,&avec);

324:     if (size > 1) {
325:       /* read in other chuncks and send to other processors */
326:       /* determine maximum chunck owned by other */
327:       VecGetPetscMap(vec,&map);
328:       PetscMapGetGlobalRange(map,&range);
329:       n = 1;
330:       for (i=1; i<size; i++) {
331:         n = PetscMax(n,range[i] - range[i-1]);
332:       }
333:       PetscMalloc(n*sizeof(PetscScalar),&avec);
334:       PetscObjectGetNewTag((PetscObject)viewer,&tag);
335:       for (i=1; i<size; i++) {
336:         n    = range[i+1] - range[i];
337:         PetscBinaryRead(fd,avec,n,PETSC_SCALAR);
338:         MPI_Isend(avec,n,MPIU_SCALAR,i,tag,comm,&request);
339:         MPI_Wait(&request,&status);
340:       }
341:       PetscFree(avec);
342:     }
343:   } else {
344:     MPI_Bcast(&rows,1,MPI_INT,0,comm);
345:     VecSetFromOptions(vec);
346:     VecGetLocalSize(vec,&n);
347:     PetscObjectGetNewTag((PetscObject)viewer,&tag);
348:     VecGetArray(vec,&avec);
349:     MPI_Recv(avec,n,MPIU_SCALAR,0,tag,comm,&status);
350:     VecRestoreArray(vec,&avec);
351:   }
352:   VecAssemblyBegin(vec);
353:   VecAssemblyEnd(vec);
354:   PetscLogEventEnd(VEC_Load,viewer,vec,0,0);
355:   return(0);
356: }