Actual source code: vecstash.c
2: #include vecimpl.h
4: #define DEFAULT_STASH_SIZE 100
6: /*
7: VecStashCreate_Private - Creates a stash,currently used for all the parallel
8: matrix implementations. The stash is where elements of a matrix destined
9: to be stored on other processors are kept until matrix assembly is done.
11: This is a simple minded stash. Simply adds entries to end of stash.
13: Input Parameters:
14: comm - communicator, required for scatters.
15: bs - stash block size. used when stashing blocks of values
17: Output Parameters:
18: stash - the newly created stash
19: */
22: PetscErrorCode VecStashCreate_Private(MPI_Comm comm,PetscInt bs,VecStash *stash)
23: {
25: PetscInt max,*opt,nopt;
26: PetscTruth flg;
29: /* Require 2 tags, get the second using PetscCommGetNewTag() */
30: stash->comm = comm;
31: PetscCommGetNewTag(stash->comm,&stash->tag1);
32: PetscCommGetNewTag(stash->comm,&stash->tag2);
33: MPI_Comm_size(stash->comm,&stash->size);
34: MPI_Comm_rank(stash->comm,&stash->rank);
36: nopt = stash->size;
37: PetscMalloc(nopt*sizeof(PetscInt),&opt);
38: PetscOptionsGetIntArray(PETSC_NULL,"-vecstash_initial_size",opt,&nopt,&flg);
39: if (flg) {
40: if (nopt == 1) max = opt[0];
41: else if (nopt == stash->size) max = opt[stash->rank];
42: else if (stash->rank < nopt) max = opt[stash->rank];
43: else max = 0; /* use default */
44: stash->umax = max;
45: } else {
46: stash->umax = 0;
47: }
48: PetscFree(opt);
50: if (bs <= 0) bs = 1;
52: stash->bs = bs;
53: stash->nmax = 0;
54: stash->oldnmax = 0;
55: stash->n = 0;
56: stash->reallocs = -1;
57: stash->idx = 0;
58: stash->array = 0;
60: stash->send_waits = 0;
61: stash->recv_waits = 0;
62: stash->send_status = 0;
63: stash->nsends = 0;
64: stash->nrecvs = 0;
65: stash->svalues = 0;
66: stash->rvalues = 0;
67: stash->rmax = 0;
68: stash->nprocs = 0;
69: stash->nprocessed = 0;
70: stash->donotstash = PETSC_FALSE;
71: return(0);
72: }
74: /*
75: VecStashDestroy_Private - Destroy the stash
76: */
79: PetscErrorCode VecStashDestroy_Private(VecStash *stash)
80: {
84: if (stash->array) {
85: PetscFree(stash->array);
86: stash->array = 0;
87: }
88: if (stash->bowners) {
89: PetscFree(stash->bowners);
90: }
91: return(0);
92: }
94: /*
95: VecStashScatterEnd_Private - This is called as the fial stage of
96: scatter. The final stages of message passing is done here, and
97: all the memory used for message passing is cleanedu up. This
98: routine also resets the stash, and deallocates the memory used
99: for the stash. It also keeps track of the current memory usage
100: so that the same value can be used the next time through.
101: */
104: PetscErrorCode VecStashScatterEnd_Private(VecStash *stash)
105: {
107: PetscInt nsends=stash->nsends,oldnmax;
108: MPI_Status *send_status;
111: /* wait on sends */
112: if (nsends) {
113: PetscMalloc(2*nsends*sizeof(MPI_Status),&send_status);
114: MPI_Waitall(2*nsends,stash->send_waits,send_status);
115: PetscFree(send_status);
116: }
118: /* Now update nmaxold to be app 10% more than max n, this way the
119: wastage of space is reduced the next time this stash is used.
120: Also update the oldmax, only if it increases */
121: if (stash->n) {
122: oldnmax = ((PetscInt)(stash->n * 1.1) + 5)*stash->bs;
123: if (oldnmax > stash->oldnmax) stash->oldnmax = oldnmax;
124: }
126: stash->nmax = 0;
127: stash->n = 0;
128: stash->reallocs = -1;
129: stash->rmax = 0;
130: stash->nprocessed = 0;
132: if (stash->array) {
133: PetscFree(stash->array);
134: stash->array = 0;
135: stash->idx = 0;
136: }
137: if (stash->send_waits) {
138: PetscFree(stash->send_waits);
139: stash->send_waits = 0;
140: }
141: if (stash->recv_waits) {
142: PetscFree(stash->recv_waits);
143: stash->recv_waits = 0;
144: }
145: if (stash->svalues) {
146: PetscFree(stash->svalues);
147: stash->svalues = 0;
148: }
149: if (stash->rvalues) {
150: PetscFree(stash->rvalues);
151: stash->rvalues = 0;
152: }
153: if (stash->nprocs) {
154: PetscFree(stash->nprocs);
155: stash->nprocs = 0;
156: }
157: return(0);
158: }
160: /*
161: VecStashGetInfo_Private - Gets the relavant statistics of the stash
163: Input Parameters:
164: stash - the stash
165: nstash - the size of the stash
166: reallocs - the number of additional mallocs incurred.
167:
168: */
171: PetscErrorCode VecStashGetInfo_Private(VecStash *stash,PetscInt *nstash,PetscInt *reallocs)
172: {
175: if (nstash) *nstash = stash->n*stash->bs;
176: if (reallocs) {
177: if (stash->reallocs < 0) *reallocs = 0;
178: else *reallocs = stash->reallocs;
179: }
180: return(0);
181: }
184: /*
185: VecStashSetInitialSize_Private - Sets the initial size of the stash
187: Input Parameters:
188: stash - the stash
189: max - the value that is used as the max size of the stash.
190: this value is used while allocating memory. It specifies
191: the number of vals stored, even with the block-stash
192: */
195: PetscErrorCode VecStashSetInitialSize_Private(VecStash *stash,PetscInt max)
196: {
198: stash->umax = max;
199: return(0);
200: }
202: /* VecStashExpand_Private - Expand the stash. This function is called
203: when the space in the stash is not sufficient to add the new values
204: being inserted into the stash.
205:
206: Input Parameters:
207: stash - the stash
208: incr - the minimum increase requested
209:
210: Notes:
211: This routine doubles the currently used memory.
212: */
215: PetscErrorCode VecStashExpand_Private(VecStash *stash,PetscInt incr)
216: {
218: PetscInt *n_idx,newnmax,bs=stash->bs;
219: PetscScalar *n_array;
222: /* allocate a larger stash. */
223: if (!stash->oldnmax && !stash->nmax) { /* new stash */
224: if (stash->umax) newnmax = stash->umax/bs;
225: else newnmax = DEFAULT_STASH_SIZE/bs;
226: } else if (!stash->nmax) { /* resuing stash */
227: if (stash->umax > stash->oldnmax) newnmax = stash->umax/bs;
228: else newnmax = stash->oldnmax/bs;
229: } else newnmax = stash->nmax*2;
231: if (newnmax < (stash->nmax + incr)) newnmax += 2*incr;
233: PetscMalloc((newnmax)*(sizeof(PetscInt)+bs*sizeof(PetscScalar)),&n_array);
234: n_idx = (PetscInt*)(n_array + bs*newnmax);
235: PetscMemcpy(n_array,stash->array,bs*stash->nmax*sizeof(PetscScalar));
236: PetscMemcpy(n_idx,stash->idx,stash->nmax*sizeof(PetscInt));
237: if (stash->array) {PetscFree(stash->array);}
238: stash->array = n_array;
239: stash->idx = n_idx;
240: stash->nmax = newnmax;
241: stash->reallocs++;
242: return(0);
243: }
244: /*
245: VecStashScatterBegin_Private - Initiates the transfer of values to the
246: correct owners. This function goes through the stash, and check the
247: owners of each stashed value, and sends the values off to the owner
248: processors.
250: Input Parameters:
251: stash - the stash
252: owners - an array of size 'no-of-procs' which gives the ownership range
253: for each node.
255: Notes: The 'owners' array in the cased of the blocked-stash has the
256: ranges specified blocked global indices, and for the regular stash in
257: the proper global indices.
258: */
261: PetscErrorCode VecStashScatterBegin_Private(VecStash *stash,PetscInt *owners)
262: {
264: PetscMPIInt size = stash->size,tag1=stash->tag1,tag2=stash->tag2;
265: PetscInt *owner,*start,*nprocs,nsends,nreceives;
266: PetscInt nmax,count,*sindices,*rindices,i,j,idx,bs=stash->bs;
267: PetscScalar *rvalues,*svalues;
268: MPI_Comm comm = stash->comm;
269: MPI_Request *send_waits,*recv_waits;
273: /* first count number of contributors to each processor */
274: PetscMalloc(2*size*sizeof(PetscInt),&nprocs);
275: PetscMemzero(nprocs,2*size*sizeof(PetscInt));
276: PetscMalloc((stash->n+1)*sizeof(PetscInt),&owner);
278: for (i=0; i<stash->n; i++) {
279: idx = stash->idx[i];
280: for (j=0; j<size; j++) {
281: if (idx >= owners[j] && idx < owners[j+1]) {
282: nprocs[2*j]++; nprocs[2*j+1] = 1; owner[i] = j; break;
283: }
284: }
285: }
286: nsends = 0; for (i=0; i<size; i++) { nsends += nprocs[2*i+1];}
287:
288: /* inform other processors of number of messages and max length*/
289: PetscMaxSum(comm,nprocs,&nmax,&nreceives);
291: /* post receives:
292: since we don't know how long each individual message is we
293: allocate the largest needed buffer for each receive. Potentially
294: this is a lot of wasted space.
295: */
296: PetscMalloc((nreceives+1)*(nmax+1)*(bs*sizeof(PetscScalar)+sizeof(PetscInt)),&rvalues);
297: rindices = (PetscInt*)(rvalues + bs*nreceives*nmax);
298: PetscMalloc((nreceives+1)*2*sizeof(MPI_Request),&recv_waits);
299: for (i=0,count=0; i<nreceives; i++) {
300: MPI_Irecv(rvalues+bs*nmax*i,bs*nmax,MPIU_SCALAR,MPI_ANY_SOURCE,tag1,comm,recv_waits+count++);
301: MPI_Irecv(rindices+nmax*i,nmax,MPIU_INT,MPI_ANY_SOURCE,tag2,comm,recv_waits+count++);
302: }
304: /* do sends:
305: 1) starts[i] gives the starting index in svalues for stuff going to
306: the ith processor
307: */
308: PetscMalloc((stash->n+1)*(bs*sizeof(PetscScalar)+sizeof(PetscInt)),&svalues);
309: sindices = (PetscInt*)(svalues + bs*stash->n);
310: PetscMalloc(2*(nsends+1)*sizeof(MPI_Request),&send_waits);
311: PetscMalloc(size*sizeof(PetscInt),&start);
312: /* use 2 sends the first with all_v, the next with all_i */
313: start[0] = 0;
314: for (i=1; i<size; i++) {
315: start[i] = start[i-1] + nprocs[2*i-2];
316: }
317: for (i=0; i<stash->n; i++) {
318: j = owner[i];
319: if (bs == 1) {
320: svalues[start[j]] = stash->array[i];
321: } else {
322: PetscMemcpy(svalues+bs*start[j],stash->array+bs*i,bs*sizeof(PetscScalar));
323: }
324: sindices[start[j]] = stash->idx[i];
325: start[j]++;
326: }
327: start[0] = 0;
328: for (i=1; i<size; i++) { start[i] = start[i-1] + nprocs[2*i-2];}
329: for (i=0,count=0; i<size; i++) {
330: if (nprocs[2*i+1]) {
331: MPI_Isend(svalues+bs*start[i],bs*nprocs[2*i],MPIU_SCALAR,i,tag1,comm,send_waits+count++);
332: MPI_Isend(sindices+start[i],nprocs[2*i],MPIU_INT,i,tag2,comm,send_waits+count++);
333: }
334: }
335: PetscFree(owner);
336: PetscFree(start);
337: /* This memory is reused in scatter end for a different purpose*/
338: for (i=0; i<2*size; i++) nprocs[i] = -1;
339: stash->nprocs = nprocs;
341: stash->svalues = svalues; stash->rvalues = rvalues;
342: stash->nsends = nsends; stash->nrecvs = nreceives;
343: stash->send_waits = send_waits; stash->recv_waits = recv_waits;
344: stash->rmax = nmax;
345: return(0);
346: }
348: /*
349: VecStashScatterGetMesg_Private - This function waits on the receives posted
350: in the function VecStashScatterBegin_Private() and returns one message at
351: a time to the calling function. If no messages are left, it indicates this
352: by setting flg = 0, else it sets flg = 1.
354: Input Parameters:
355: stash - the stash
357: Output Parameters:
358: nvals - the number of entries in the current message.
359: rows - an array of row indices (or blocked indices) corresponding to the values
360: cols - an array of columnindices (or blocked indices) corresponding to the values
361: vals - the values
362: flg - 0 indicates no more message left, and the current call has no values associated.
363: 1 indicates that the current call successfully received a message, and the
364: other output parameters nvals,rows,cols,vals are set appropriately.
365: */
368: PetscErrorCode VecStashScatterGetMesg_Private(VecStash *stash,PetscMPIInt *nvals,PetscInt **rows,PetscScalar **vals,PetscInt *flg)
369: {
371: PetscMPIInt i;
372: PetscInt *flg_v;
373: PetscInt i1,i2,*rindices,bs=stash->bs;
374: MPI_Status recv_status;
375: PetscTruth match_found = PETSC_FALSE;
379: *flg = 0; /* When a message is discovered this is reset to 1 */
380: /* Return if no more messages to process */
381: if (stash->nprocessed == stash->nrecvs) { return(0); }
383: flg_v = stash->nprocs;
384: /* If a matching pair of receieves are found, process them, and return the data to
385: the calling function. Until then keep receiving messages */
386: while (!match_found) {
387: MPI_Waitany(2*stash->nrecvs,stash->recv_waits,&i,&recv_status);
388: /* Now pack the received message into a structure which is useable by others */
389: if (i % 2) {
390: MPI_Get_count(&recv_status,MPIU_INT,nvals);
391: flg_v[2*recv_status.MPI_SOURCE+1] = i/2;
392: } else {
393: MPI_Get_count(&recv_status,MPIU_SCALAR,nvals);
394: flg_v[2*recv_status.MPI_SOURCE] = i/2;
395: *nvals = *nvals/bs;
396: }
397:
398: /* Check if we have both the messages from this proc */
399: i1 = flg_v[2*recv_status.MPI_SOURCE];
400: i2 = flg_v[2*recv_status.MPI_SOURCE+1];
401: if (i1 != -1 && i2 != -1) {
402: rindices = (PetscInt*)(stash->rvalues + bs*stash->rmax*stash->nrecvs);
403: *rows = rindices + i2*stash->rmax;
404: *vals = stash->rvalues + i1*bs*stash->rmax;
405: *flg = 1;
406: stash->nprocessed ++;
407: match_found = PETSC_TRUE;
408: }
409: }
410: return(0);
411: }