Actual source code: baijov.c
1: /*$Id: baijov.c,v 1.65 2001/08/06 21:15:42 bsmith Exp $*/
3: /*
4: Routines to compute overlapping regions of a parallel MPI matrix
5: and to find submatrices that were shared across processors.
6: */
7: #include src/mat/impls/baij/mpi/mpibaij.h
8: #include petscbt.h
10: static int MatIncreaseOverlap_MPIBAIJ_Once(Mat,int,IS *);
11: static int MatIncreaseOverlap_MPIBAIJ_Local(Mat,int,char **,int*,int**);
12: static int MatIncreaseOverlap_MPIBAIJ_Receive(Mat,int,int **,int**,int*);
13: EXTERN int MatGetRow_MPIBAIJ(Mat,int,int*,int**,PetscScalar**);
14: EXTERN int MatRestoreRow_MPIBAIJ(Mat,int,int*,int**,PetscScalar**);
15:
16: #undef __FUNCT__
18: static int MatCompressIndicesGeneral_MPIBAIJ(Mat C,int imax,IS *is_in,IS *is_out)
19: {
20: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)C->data;
21: int ierr,isz,bs = baij->bs,n,i,j,*idx,ival;
22: #if defined (PETSC_USE_CTABLE)
23: PetscTable gid1_lid1;
24: int tt, gid1, *nidx;
25: PetscTablePosition tpos;
26: #else
27: int Nbs,*nidx;
28: PetscBT table;
29: #endif
32: #if defined (PETSC_USE_CTABLE)
33: PetscTableCreate(baij->mbs,&gid1_lid1);
34: #else
35: Nbs = baij->Nbs;
36: PetscMalloc((Nbs+1)*sizeof(int),&nidx);
37: PetscBTCreate(Nbs,table);
38: #endif
39: for (i=0; i<imax; i++) {
40: isz = 0;
41: #if defined (PETSC_USE_CTABLE)
42: PetscTableRemoveAll(gid1_lid1);
43: #else
44: PetscBTMemzero(Nbs,table);
45: #endif
46: ISGetIndices(is_in[i],&idx);
47: ISGetLocalSize(is_in[i],&n);
48: for (j=0; j<n ; j++) {
49: ival = idx[j]/bs; /* convert the indices into block indices */
50: #if defined (PETSC_USE_CTABLE)
51: PetscTableFind(gid1_lid1,ival+1,&tt);
52: if (!tt) {
53: PetscTableAdd(gid1_lid1,ival+1,isz+1);
54: isz++;
55: }
56: #else
57: if (ival>Nbs) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"index greater than mat-dim");
58: if(!PetscBTLookupSet(table,ival)) { nidx[isz++] = ival;}
59: #endif
60: }
61: ISRestoreIndices(is_in[i],&idx);
62: #if defined (PETSC_USE_CTABLE)
63: PetscMalloc((isz+1)*sizeof(int),&nidx);
64: PetscTableGetHeadPosition(gid1_lid1,&tpos);
65: j = 0;
66: while (tpos) {
67: PetscTableGetNext(gid1_lid1,&tpos,&gid1,&tt);
68: if (tt-- > isz) { SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"index greater than array-dim"); }
69: nidx[tt] = gid1 - 1;
70: j++;
71: }
72: if (j != isz) { SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"table error: jj != isz"); }
73: ISCreateGeneral(PETSC_COMM_SELF,isz,nidx,(is_out+i));
74: PetscFree(nidx);
75: #else
76: ISCreateGeneral(PETSC_COMM_SELF,isz,nidx,(is_out+i));
77: #endif
78: }
79: #if defined (PETSC_USE_CTABLE)
80: PetscTableDelete(gid1_lid1);
81: #else
82: PetscBTDestroy(table);
83: PetscFree(nidx);
84: #endif
85: return(0);
86: }
88: #undef __FUNCT__
90: static int MatCompressIndicesSorted_MPIBAIJ(Mat C,int imax,IS *is_in,IS *is_out)
91: {
92: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)C->data;
93: int ierr,bs=baij->bs,i,j,k,val,n,*idx,*nidx,*idx_local;
94: PetscTruth flg;
95: #if defined (PETSC_USE_CTABLE)
96: int maxsz;
97: #else
98: int Nbs=baij->Nbs;
99: #endif
101: for (i=0; i<imax; i++) {
102: ISSorted(is_in[i],&flg);
103: if (!flg) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Indices are not sorted");
104: }
105: #if defined (PETSC_USE_CTABLE)
106: /* Now check max size */
107: for (i=0,maxsz=0; i<imax; i++) {
108: ISGetIndices(is_in[i],&idx);
109: ISGetLocalSize(is_in[i],&n);
110: if (n%bs !=0) SETERRQ(1,"Indices are not block ordered");
111: n = n/bs; /* The reduced index size */
112: if (n > maxsz) maxsz = n;
113: }
114: PetscMalloc((maxsz+1)*sizeof(int),&nidx);
115: #else
116: PetscMalloc((Nbs+1)*sizeof(int),&nidx);
117: #endif
118: /* Now check if the indices are in block order */
119: for (i=0; i<imax; i++) {
120: ISGetIndices(is_in[i],&idx);
121: ISGetLocalSize(is_in[i],&n);
122: if (n%bs !=0) SETERRQ(1,"Indices are not block ordered");
124: n = n/bs; /* The reduced index size */
125: idx_local = idx;
126: for (j=0; j<n ; j++) {
127: val = idx_local[0];
128: if (val%bs != 0) SETERRQ(1,"Indices are not block ordered");
129: for (k=0; k<bs; k++) {
130: if (val+k != idx_local[k]) SETERRQ(1,"Indices are not block ordered");
131: }
132: nidx[j] = val/bs;
133: idx_local +=bs;
134: }
135: ISRestoreIndices(is_in[i],&idx);
136: ISCreateGeneral(PETSC_COMM_SELF,n,nidx,(is_out+i));
137: }
138: PetscFree(nidx);
140: return(0);
141: }
143: #undef __FUNCT__
145: static int MatExpandIndices_MPIBAIJ(Mat C,int imax,IS *is_in,IS *is_out)
146: {
147: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)C->data;
148: int ierr,bs = baij->bs,n,i,j,k,*idx,*nidx;
149: #if defined (PETSC_USE_CTABLE)
150: int maxsz;
151: #else
152: int Nbs = baij->Nbs;
153: #endif
155:
156: #if defined (PETSC_USE_CTABLE)
157: /* Now check max size */
158: for (i=0,maxsz=0; i<imax; i++) {
159: ISGetIndices(is_in[i],&idx);
160: ISGetLocalSize(is_in[i],&n);
161: if (n*bs > maxsz) maxsz = n*bs;
162: }
163: PetscMalloc((maxsz+1)*sizeof(int),&nidx);
164: #else
165: PetscMalloc((Nbs*bs+1)*sizeof(int),&nidx);
166: #endif
168: for (i=0; i<imax; i++) {
169: ISGetIndices(is_in[i],&idx);
170: ISGetLocalSize(is_in[i],&n);
171: for (j=0; j<n ; ++j){
172: for (k=0; k<bs; k++)
173: nidx[j*bs+k] = idx[j]*bs+k;
174: }
175: ISRestoreIndices(is_in[i],&idx);
176: ISCreateGeneral(PETSC_COMM_SELF,n*bs,nidx,is_out+i);
177: }
178: PetscFree(nidx);
179: return(0);
180: }
183: #undef __FUNCT__
185: int MatIncreaseOverlap_MPIBAIJ(Mat C,int imax,IS *is,int ov)
186: {
187: int i,ierr;
188: IS *is_new;
191: PetscMalloc(imax*sizeof(IS),&is_new);
192: /* Convert the indices into block format */
193: MatCompressIndicesGeneral_MPIBAIJ(C,imax,is,is_new);
194: if (ov < 0){ SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Negative overlap specifiedn");}
195: for (i=0; i<ov; ++i) {
196: MatIncreaseOverlap_MPIBAIJ_Once(C,imax,is_new);
197: }
198: for (i=0; i<imax; i++) {ISDestroy(is[i]);}
199: MatExpandIndices_MPIBAIJ(C,imax,is_new,is);
200: for (i=0; i<imax; i++) {ISDestroy(is_new[i]);}
201: PetscFree(is_new);
202: return(0);
203: }
205: /*
206: Sample message format:
207: If a processor A wants processor B to process some elements corresponding
208: to index sets 1s[1], is[5]
209: mesg [0] = 2 (no of index sets in the mesg)
210: -----------
211: mesg [1] = 1 => is[1]
212: mesg [2] = sizeof(is[1]);
213: -----------
214: mesg [5] = 5 => is[5]
215: mesg [6] = sizeof(is[5]);
216: -----------
217: mesg [7]
218: mesg [n] datas[1]
219: -----------
220: mesg[n+1]
221: mesg[m] data(is[5])
222: -----------
223:
224: Notes:
225: nrqs - no of requests sent (or to be sent out)
226: nrqr - no of requests recieved (which have to be or which have been processed
227: */
228: #undef __FUNCT__
230: static int MatIncreaseOverlap_MPIBAIJ_Once(Mat C,int imax,IS *is)
231: {
232: Mat_MPIBAIJ *c = (Mat_MPIBAIJ*)C->data;
233: int **idx,*n,*w1,*w2,*w3,*w4,*rtable,**data,len,*idx_i;
234: int size,rank,Mbs,i,j,k,ierr,**rbuf,row,proc,nrqs,msz,**outdat,**ptr;
235: int *ctr,*pa,*tmp,nrqr,*isz,*isz1,**xdata,**rbuf2;
236: int *onodes1,*olengths1,tag1,tag2,*onodes2,*olengths2;
237: PetscBT *table;
238: MPI_Comm comm;
239: MPI_Request *s_waits1,*r_waits1,*s_waits2,*r_waits2;
240: MPI_Status *s_status,*recv_status;
243: comm = C->comm;
244: size = c->size;
245: rank = c->rank;
246: Mbs = c->Mbs;
248: PetscObjectGetNewTag((PetscObject)C,&tag1);
249: PetscObjectGetNewTag((PetscObject)C,&tag2);
251: len = (imax+1)*sizeof(int*)+ (imax + Mbs)*sizeof(int);
252: ierr = PetscMalloc(len,&idx);
253: n = (int*)(idx + imax);
254: rtable = n + imax;
255:
256: for (i=0; i<imax; i++) {
257: ISGetIndices(is[i],&idx[i]);
258: ISGetLocalSize(is[i],&n[i]);
259: }
260:
261: /* Create hash table for the mapping :row -> proc*/
262: for (i=0,j=0; i<size; i++) {
263: len = c->rowners[i+1];
264: for (; j<len; j++) {
265: rtable[j] = i;
266: }
267: }
269: /* evaluate communication - mesg to who,length of mesg, and buffer space
270: required. Based on this, buffers are allocated, and data copied into them*/
271: PetscMalloc(size*4*sizeof(int),&w1);/* mesg size */
272: w2 = w1 + size; /* if w2[i] marked, then a message to proc i*/
273: w3 = w2 + size; /* no of IS that needs to be sent to proc i */
274: w4 = w3 + size; /* temp work space used in determining w1, w2, w3 */
275: PetscMemzero(w1,size*3*sizeof(int)); /* initialise work vector*/
276: for (i=0; i<imax; i++) {
277: ierr = PetscMemzero(w4,size*sizeof(int)); /* initialise work vector*/
278: idx_i = idx[i];
279: len = n[i];
280: for (j=0; j<len; j++) {
281: row = idx_i[j];
282: if (row < 0) {
283: SETERRQ(1,"Index set cannot have negative entries");
284: }
285: proc = rtable[row];
286: w4[proc]++;
287: }
288: for (j=0; j<size; j++){
289: if (w4[j]) { w1[j] += w4[j]; w3[j]++;}
290: }
291: }
293: nrqs = 0; /* no of outgoing messages */
294: msz = 0; /* total mesg length (for all proc */
295: w1[rank] = 0; /* no mesg sent to intself */
296: w3[rank] = 0;
297: for (i=0; i<size; i++) {
298: if (w1[i]) {w2[i] = 1; nrqs++;} /* there exists a message to proc i */
299: }
300: /* pa - is list of processors to communicate with */
301: PetscMalloc((nrqs+1)*sizeof(int),&pa);
302: for (i=0,j=0; i<size; i++) {
303: if (w1[i]) {pa[j] = i; j++;}
304: }
306: /* Each message would have a header = 1 + 2*(no of IS) + data */
307: for (i=0; i<nrqs; i++) {
308: j = pa[i];
309: w1[j] += w2[j] + 2*w3[j];
310: msz += w1[j];
311: }
312:
313: /* Determine the number of messages to expect, their lengths, from from-ids */
314: PetscGatherNumberOfMessages(comm,w2,w1,&nrqr);
315: PetscGatherMessageLengths(comm,nrqs,nrqr,w1,&onodes1,&olengths1);
317: /* Now post the Irecvs corresponding to these messages */
318: PetscPostIrecvInt(comm,tag1,nrqr,onodes1,olengths1,&rbuf,&r_waits1);
319:
320: /* Allocate Memory for outgoing messages */
321: len = 2*size*sizeof(int*) + (size+msz)*sizeof(int);
322: PetscMalloc(len,&outdat);
323: ptr = outdat + size; /* Pointers to the data in outgoing buffers */
324: ierr = PetscMemzero(outdat,2*size*sizeof(int*));
325: tmp = (int*)(outdat + 2*size);
326: ctr = tmp + msz;
328: {
329: int *iptr = tmp,ict = 0;
330: for (i=0; i<nrqs; i++) {
331: j = pa[i];
332: iptr += ict;
333: outdat[j] = iptr;
334: ict = w1[j];
335: }
336: }
338: /* Form the outgoing messages */
339: /*plug in the headers*/
340: for (i=0; i<nrqs; i++) {
341: j = pa[i];
342: outdat[j][0] = 0;
343: PetscMemzero(outdat[j]+1,2*w3[j]*sizeof(int));
344: ptr[j] = outdat[j] + 2*w3[j] + 1;
345: }
346:
347: /* Memory for doing local proc's work*/
348: {
349: int *d_p;
350: char *t_p;
352: len = (imax)*(sizeof(PetscBT) + sizeof(int*)+ sizeof(int)) +
353: (Mbs)*imax*sizeof(int) + (Mbs/PETSC_BITS_PER_BYTE+1)*imax*sizeof(char) + 1;
354: PetscMalloc(len,&table);
355: PetscMemzero(table,len);
356: data = (int **)(table + imax);
357: isz = (int *)(data + imax);
358: d_p = (int *)(isz + imax);
359: t_p = (char *)(d_p + Mbs*imax);
360: for (i=0; i<imax; i++) {
361: table[i] = t_p + (Mbs/PETSC_BITS_PER_BYTE+1)*i;
362: data[i] = d_p + (Mbs)*i;
363: }
364: }
366: /* Parse the IS and update local tables and the outgoing buf with the data*/
367: {
368: int n_i,*data_i,isz_i,*outdat_j,ctr_j;
369: PetscBT table_i;
371: for (i=0; i<imax; i++) {
372: ierr = PetscMemzero(ctr,size*sizeof(int));
373: n_i = n[i];
374: table_i = table[i];
375: idx_i = idx[i];
376: data_i = data[i];
377: isz_i = isz[i];
378: for (j=0; j<n_i; j++) { /* parse the indices of each IS */
379: row = idx_i[j];
380: proc = rtable[row];
381: if (proc != rank) { /* copy to the outgoing buffer */
382: ctr[proc]++;
383: *ptr[proc] = row;
384: ptr[proc]++;
385: }
386: else { /* Update the local table */
387: if (!PetscBTLookupSet(table_i,row)) { data_i[isz_i++] = row;}
388: }
389: }
390: /* Update the headers for the current IS */
391: for (j=0; j<size; j++) { /* Can Optimise this loop by using pa[] */
392: if ((ctr_j = ctr[j])) {
393: outdat_j = outdat[j];
394: k = ++outdat_j[0];
395: outdat_j[2*k] = ctr_j;
396: outdat_j[2*k-1] = i;
397: }
398: }
399: isz[i] = isz_i;
400: }
401: }
402:
403: /* Now post the sends */
404: PetscMalloc((nrqs+1)*sizeof(MPI_Request),&s_waits1);
405: for (i=0; i<nrqs; ++i) {
406: j = pa[i];
407: MPI_Isend(outdat[j],w1[j],MPI_INT,j,tag1,comm,s_waits1+i);
408: }
409:
410: /* No longer need the original indices*/
411: for (i=0; i<imax; ++i) {
412: ISRestoreIndices(is[i],idx+i);
413: }
414: PetscFree(idx);
416: for (i=0; i<imax; ++i) {
417: ISDestroy(is[i]);
418: }
419:
420: /* Do Local work*/
421: MatIncreaseOverlap_MPIBAIJ_Local(C,imax,table,isz,data);
423: /* Receive messages*/
424: PetscMalloc((nrqr+1)*sizeof(MPI_Status),&recv_status);
425: MPI_Waitall(nrqr,r_waits1,recv_status);
426:
427: PetscMalloc((nrqs+1)*sizeof(MPI_Status),&s_status);
428: MPI_Waitall(nrqs,s_waits1,s_status);
430: /* Phase 1 sends are complete - deallocate buffers */
431: PetscFree(outdat);
432: PetscFree(w1);
434: PetscMalloc((nrqr+1)*sizeof(int *),&xdata);
435: PetscMalloc((nrqr+1)*sizeof(int),&isz1);
436: MatIncreaseOverlap_MPIBAIJ_Receive(C,nrqr,rbuf,xdata,isz1);
437: PetscFree(rbuf);
439: /* Send the data back*/
440: /* Do a global reduction to know the buffer space req for incoming messages*/
441: {
442: int *rw1;
443:
444: PetscMalloc(size*sizeof(int),&rw1);
445: PetscMemzero(rw1,size*sizeof(int));
447: for (i=0; i<nrqr; ++i) {
448: proc = recv_status[i].MPI_SOURCE;
449: if (proc != onodes1[i]) SETERRQ(1,"MPI_SOURCE mismatch");
450: rw1[proc] = isz1[i];
451: }
452:
453: PetscFree(onodes1);
454: PetscFree(olengths1);
456: /* Determine the number of messages to expect, their lengths, from from-ids */
457: PetscGatherMessageLengths(comm,nrqr,nrqs,rw1,&onodes2,&olengths2);
458: PetscFree(rw1);
459: }
460: /* Now post the Irecvs corresponding to these messages */
461: PetscPostIrecvInt(comm,tag2,nrqs,onodes2,olengths2,&rbuf2,&r_waits2);
462:
463: /* Now post the sends */
464: PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits2);
465: for (i=0; i<nrqr; ++i) {
466: j = recv_status[i].MPI_SOURCE;
467: MPI_Isend(xdata[i],isz1[i],MPI_INT,j,tag2,comm,s_waits2+i);
468: }
470: /* receive work done on other processors*/
471: {
472: int idex,is_no,ct1,max,*rbuf2_i,isz_i,*data_i,jmax;
473: PetscBT table_i;
474: MPI_Status *status2;
475:
476: PetscMalloc((PetscMax(nrqr,nrqs)+1)*sizeof(MPI_Status),&status2);
478: for (i=0; i<nrqs; ++i) {
479: MPI_Waitany(nrqs,r_waits2,&idex,status2+i);
480: /* Process the message*/
481: rbuf2_i = rbuf2[idex];
482: ct1 = 2*rbuf2_i[0]+1;
483: jmax = rbuf2[idex][0];
484: for (j=1; j<=jmax; j++) {
485: max = rbuf2_i[2*j];
486: is_no = rbuf2_i[2*j-1];
487: isz_i = isz[is_no];
488: data_i = data[is_no];
489: table_i = table[is_no];
490: for (k=0; k<max; k++,ct1++) {
491: row = rbuf2_i[ct1];
492: if (!PetscBTLookupSet(table_i,row)) { data_i[isz_i++] = row;}
493: }
494: isz[is_no] = isz_i;
495: }
496: }
497: MPI_Waitall(nrqr,s_waits2,status2);
498: PetscFree(status2);
499: }
500:
501: for (i=0; i<imax; ++i) {
502: ISCreateGeneral(PETSC_COMM_SELF,isz[i],data[i],is+i);
503: }
504:
505:
506: PetscFree(onodes2);
507: PetscFree(olengths2);
509: PetscFree(pa);
510: PetscFree(rbuf2);
511: PetscFree(s_waits1);
512: PetscFree(r_waits1);
513: PetscFree(s_waits2);
514: PetscFree(r_waits2);
515: PetscFree(table);
516: PetscFree(s_status);
517: PetscFree(recv_status);
518: PetscFree(xdata[0]);
519: PetscFree(xdata);
520: PetscFree(isz1);
521: return(0);
522: }
524: #undef __FUNCT__
526: /*
527: MatIncreaseOverlap_MPIBAIJ_Local - Called by MatincreaseOverlap, to do
528: the work on the local processor.
530: Inputs:
531: C - MAT_MPIBAIJ;
532: imax - total no of index sets processed at a time;
533: table - an array of char - size = Mbs bits.
534:
535: Output:
536: isz - array containing the count of the solution elements correspondign
537: to each index set;
538: data - pointer to the solutions
539: */
540: static int MatIncreaseOverlap_MPIBAIJ_Local(Mat C,int imax,PetscBT *table,int *isz,int **data)
541: {
542: Mat_MPIBAIJ *c = (Mat_MPIBAIJ*)C->data;
543: Mat A = c->A,B = c->B;
544: Mat_SeqBAIJ *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)B->data;
545: int start,end,val,max,rstart,cstart,*ai,*aj;
546: int *bi,*bj,*garray,i,j,k,row,*data_i,isz_i;
547: PetscBT table_i;
550: rstart = c->rstart;
551: cstart = c->cstart;
552: ai = a->i;
553: aj = a->j;
554: bi = b->i;
555: bj = b->j;
556: garray = c->garray;
558:
559: for (i=0; i<imax; i++) {
560: data_i = data[i];
561: table_i = table[i];
562: isz_i = isz[i];
563: for (j=0,max=isz[i]; j<max; j++) {
564: row = data_i[j] - rstart;
565: start = ai[row];
566: end = ai[row+1];
567: for (k=start; k<end; k++) { /* Amat */
568: val = aj[k] + cstart;
569: if (!PetscBTLookupSet(table_i,val)) { data_i[isz_i++] = val;}
570: }
571: start = bi[row];
572: end = bi[row+1];
573: for (k=start; k<end; k++) { /* Bmat */
574: val = garray[bj[k]];
575: if (!PetscBTLookupSet(table_i,val)) { data_i[isz_i++] = val;}
576: }
577: }
578: isz[i] = isz_i;
579: }
580: return(0);
581: }
582: #undef __FUNCT__
584: /*
585: MatIncreaseOverlap_MPIBAIJ_Receive - Process the recieved messages,
586: and return the output
588: Input:
589: C - the matrix
590: nrqr - no of messages being processed.
591: rbuf - an array of pointers to the recieved requests
592:
593: Output:
594: xdata - array of messages to be sent back
595: isz1 - size of each message
597: For better efficiency perhaps we should malloc seperately each xdata[i],
598: then if a remalloc is required we need only copy the data for that one row
599: rather then all previous rows as it is now where a single large chunck of
600: memory is used.
602: */
603: static int MatIncreaseOverlap_MPIBAIJ_Receive(Mat C,int nrqr,int **rbuf,int **xdata,int * isz1)
604: {
605: Mat_MPIBAIJ *c = (Mat_MPIBAIJ*)C->data;
606: Mat A = c->A,B = c->B;
607: Mat_SeqBAIJ *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)B->data;
608: int rstart,cstart,*ai,*aj,*bi,*bj,*garray,i,j,k;
609: int row,total_sz,ct,ct1,ct2,ct3,mem_estimate,oct2,l,start,end;
610: int val,max1,max2,rank,Mbs,no_malloc =0,*tmp,new_estimate,ctr;
611: int *rbuf_i,kmax,rbuf_0,ierr;
612: PetscBT xtable;
615: rank = c->rank;
616: Mbs = c->Mbs;
617: rstart = c->rstart;
618: cstart = c->cstart;
619: ai = a->i;
620: aj = a->j;
621: bi = b->i;
622: bj = b->j;
623: garray = c->garray;
624:
625:
626: for (i=0,ct=0,total_sz=0; i<nrqr; ++i) {
627: rbuf_i = rbuf[i];
628: rbuf_0 = rbuf_i[0];
629: ct += rbuf_0;
630: for (j=1; j<=rbuf_0; j++) { total_sz += rbuf_i[2*j]; }
631: }
632:
633: if (c->Mbs) max1 = ct*(a->nz +b->nz)/c->Mbs;
634: else max1 = 1;
635: mem_estimate = 3*((total_sz > max1 ? total_sz : max1)+1);
636: ierr = PetscMalloc(mem_estimate*sizeof(int),&xdata[0]);
637: ++no_malloc;
638: ierr = PetscBTCreate(Mbs,xtable);
639: ierr = PetscMemzero(isz1,nrqr*sizeof(int));
640:
641: ct3 = 0;
642: for (i=0; i<nrqr; i++) { /* for easch mesg from proc i */
643: rbuf_i = rbuf[i];
644: rbuf_0 = rbuf_i[0];
645: ct1 = 2*rbuf_0+1;
646: ct2 = ct1;
647: ct3 += ct1;
648: for (j=1; j<=rbuf_0; j++) { /* for each IS from proc i*/
649: PetscBTMemzero(Mbs,xtable);
650: oct2 = ct2;
651: kmax = rbuf_i[2*j];
652: for (k=0; k<kmax; k++,ct1++) {
653: row = rbuf_i[ct1];
654: if (!PetscBTLookupSet(xtable,row)) {
655: if (!(ct3 < mem_estimate)) {
656: new_estimate = (int)(1.5*mem_estimate)+1;
657: PetscMalloc(new_estimate * sizeof(int),&tmp);
658: PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(int));
659: PetscFree(xdata[0]);
660: xdata[0] = tmp;
661: mem_estimate = new_estimate; ++no_malloc;
662: for (ctr=1; ctr<=i; ctr++) { xdata[ctr] = xdata[ctr-1] + isz1[ctr-1];}
663: }
664: xdata[i][ct2++] = row;
665: ct3++;
666: }
667: }
668: for (k=oct2,max2=ct2; k<max2; k++) {
669: row = xdata[i][k] - rstart;
670: start = ai[row];
671: end = ai[row+1];
672: for (l=start; l<end; l++) {
673: val = aj[l] + cstart;
674: if (!PetscBTLookupSet(xtable,val)) {
675: if (!(ct3 < mem_estimate)) {
676: new_estimate = (int)(1.5*mem_estimate)+1;
677: PetscMalloc(new_estimate * sizeof(int),&tmp);
678: PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(int));
679: PetscFree(xdata[0]);
680: xdata[0] = tmp;
681: mem_estimate = new_estimate; ++no_malloc;
682: for (ctr=1; ctr<=i; ctr++) { xdata[ctr] = xdata[ctr-1] + isz1[ctr-1];}
683: }
684: xdata[i][ct2++] = val;
685: ct3++;
686: }
687: }
688: start = bi[row];
689: end = bi[row+1];
690: for (l=start; l<end; l++) {
691: val = garray[bj[l]];
692: if (!PetscBTLookupSet(xtable,val)) {
693: if (!(ct3 < mem_estimate)) {
694: new_estimate = (int)(1.5*mem_estimate)+1;
695: PetscMalloc(new_estimate * sizeof(int),&tmp);
696: PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(int));
697: PetscFree(xdata[0]);
698: xdata[0] = tmp;
699: mem_estimate = new_estimate; ++no_malloc;
700: for (ctr =1; ctr <=i; ctr++) { xdata[ctr] = xdata[ctr-1] + isz1[ctr-1];}
701: }
702: xdata[i][ct2++] = val;
703: ct3++;
704: }
705: }
706: }
707: /* Update the header*/
708: xdata[i][2*j] = ct2 - oct2; /* Undo the vector isz1 and use only a var*/
709: xdata[i][2*j-1] = rbuf_i[2*j-1];
710: }
711: xdata[i][0] = rbuf_0;
712: xdata[i+1] = xdata[i] + ct2;
713: isz1[i] = ct2; /* size of each message */
714: }
715: PetscBTDestroy(xtable);
716: PetscLogInfo(0,"MatIncreaseOverlap_MPIBAIJ:[%d] Allocated %d bytes, required %d, no of mallocs = %dn",rank,mem_estimate,ct3,no_malloc);
717: return(0);
718: }
720: static int MatGetSubMatrices_MPIBAIJ_local(Mat,int,IS *,IS *,MatReuse,Mat *);
722: #undef __FUNCT__
724: int MatGetSubMatrices_MPIBAIJ(Mat C,int ismax,IS *isrow,IS *iscol,MatReuse scall,Mat **submat)
725: {
726: IS *isrow_new,*iscol_new;
727: Mat_MPIBAIJ *c = (Mat_MPIBAIJ*)C->data;
728: int nmax,nstages_local,nstages,i,pos,max_no,ierr;
731: /* The compression and expansion should be avoided. Does'nt point
732: out errors might change the indices hence buggey */
734: PetscMalloc(2*(ismax+1)*sizeof(IS),&isrow_new);
735: iscol_new = isrow_new + ismax;
736: MatCompressIndicesSorted_MPIBAIJ(C,ismax,isrow,isrow_new);
737: MatCompressIndicesSorted_MPIBAIJ(C,ismax,iscol,iscol_new);
739: /* Allocate memory to hold all the submatrices */
740: if (scall != MAT_REUSE_MATRIX) {
741: PetscMalloc((ismax+1)*sizeof(Mat),submat);
742: }
743: /* Determine the number of stages through which submatrices are done */
744: nmax = 20*1000000 / (c->Nbs * sizeof(int));
745: if (!nmax) nmax = 1;
746: nstages_local = ismax/nmax + ((ismax % nmax)?1:0);
747:
748: /* Make sure every porcessor loops through the nstages */
749: MPI_Allreduce(&nstages_local,&nstages,1,MPI_INT,MPI_MAX,C->comm);
750: for (i=0,pos=0; i<nstages; i++) {
751: if (pos+nmax <= ismax) max_no = nmax;
752: else if (pos == ismax) max_no = 0;
753: else max_no = ismax-pos;
754: MatGetSubMatrices_MPIBAIJ_local(C,max_no,isrow_new+pos,iscol_new+pos,scall,*submat+pos);
755: pos += max_no;
756: }
757:
758: for (i=0; i<ismax; i++) {
759: ISDestroy(isrow_new[i]);
760: ISDestroy(iscol_new[i]);
761: }
762: PetscFree(isrow_new);
763: return(0);
764: }
766: #if defined (PETSC_USE_CTABLE)
767: #undef __FUNCT__
769: int PetscGetProc(const int gid, const int numprocs, const int proc_gnode[], int *proc)
770: {
771: int nGlobalNd = proc_gnode[numprocs];
772: int fproc = (int) ((float)gid * (float)numprocs / (float)nGlobalNd + 0.5);
773:
775: /* if(fproc < 0) SETERRQ(1,"fproc < 0");*/
776: if (fproc > numprocs) fproc = numprocs;
777: while (gid < proc_gnode[fproc] || gid >= proc_gnode[fproc+1]) {
778: if (gid < proc_gnode[fproc]) fproc--;
779: else fproc++;
780: }
781: /* if(fproc<0 || fproc>=numprocs) { SETERRQ(1,"fproc < 0 || fproc >= numprocs"); }*/
782: *proc = fproc;
783: return(0);
784: }
785: #endif
787: /* -------------------------------------------------------------------------*/
788: #undef __FUNCT__
790: static int MatGetSubMatrices_MPIBAIJ_local(Mat C,int ismax,IS *isrow,IS *iscol,MatReuse scall,Mat *submats)
791: {
792: Mat_MPIBAIJ *c = (Mat_MPIBAIJ*)C->data;
793: Mat A = c->A;
794: Mat_SeqBAIJ *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)c->B->data,*mat;
795: int **irow,**icol,*nrow,*ncol,*w1,*w2,*w3,*w4,start,end,size;
796: int **sbuf1,**sbuf2,rank,i,j,k,l,ct1,ct2,ierr,**rbuf1,row,proc;
797: int nrqs,msz,**ptr,idex,*req_size,*ctr,*pa,*tmp,tcol,nrqr;
798: int **rbuf3,*req_source,**sbuf_aj,**rbuf2,max1,max2;
799: int **lens,is_no,ncols,*cols,mat_i,*mat_j,tmp2,jmax,*irow_i;
800: int len,ctr_j,*sbuf1_j,*sbuf_aj_i,*rbuf1_i,kmax,*lens_i;
801: int bs=c->bs,bs2=c->bs2,*a_j=a->j,*b_j=b->j,*cworkA,*cworkB;
802: int cstart = c->cstart,nzA,nzB,*a_i=a->i,*b_i=b->i,imark;
803: int *bmap = c->garray,ctmp,rstart=c->rstart,tag0,tag1,tag2,tag3;
804: MPI_Request *s_waits1,*r_waits1,*s_waits2,*r_waits2,*r_waits3;
805: MPI_Request *r_waits4,*s_waits3,*s_waits4;
806: MPI_Status *r_status1,*r_status2,*s_status1,*s_status3,*s_status2;
807: MPI_Status *r_status3,*r_status4,*s_status4;
808: MPI_Comm comm;
809: MatScalar **rbuf4,**sbuf_aa,*vals,*mat_a,*sbuf_aa_i,*vworkA,*vworkB;
810: MatScalar *a_a=a->a,*b_a=b->a;
811: PetscTruth flag;
812: int *onodes1,*olengths1;
814: #if defined (PETSC_USE_CTABLE)
815: int tt;
816: PetscTable *rowmaps,*colmaps,lrow1_grow1,lcol1_gcol1;
817: #else
818: int **cmap,*cmap_i,*rtable,*rmap_i,**rmap, Mbs = c->Mbs;
819: #endif
822: comm = C->comm;
823: tag0 = C->tag;
824: size = c->size;
825: rank = c->rank;
826:
827: /* Get some new tags to keep the communication clean */
828: PetscObjectGetNewTag((PetscObject)C,&tag1);
829: PetscObjectGetNewTag((PetscObject)C,&tag2);
830: PetscObjectGetNewTag((PetscObject)C,&tag3);
832: /* Check if the col indices are sorted */
833: for (i=0; i<ismax; i++) {
834: ISSorted(iscol[i],(PetscTruth*)&j);
835: if (!j) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"IS is not sorted");
836: }
838: len = (2*ismax+1)*(sizeof(int*)+ sizeof(int));
839: #if !defined (PETSC_USE_CTABLE)
840: len += (Mbs+1)*sizeof(int);
841: #endif
842: PetscMalloc(len,&irow);
843: icol = irow + ismax;
844: nrow = (int*)(icol + ismax);
845: ncol = nrow + ismax;
846: #if !defined (PETSC_USE_CTABLE)
847: rtable = ncol + ismax;
848: /* Create hash table for the mapping :row -> proc*/
849: for (i=0,j=0; i<size; i++) {
850: jmax = c->rowners[i+1];
851: for (; j<jmax; j++) {
852: rtable[j] = i;
853: }
854: }
855: #endif
856:
857: for (i=0; i<ismax; i++) {
858: ISGetIndices(isrow[i],&irow[i]);
859: ISGetIndices(iscol[i],&icol[i]);
860: ISGetLocalSize(isrow[i],&nrow[i]);
861: ISGetLocalSize(iscol[i],&ncol[i]);
862: }
864: /* evaluate communication - mesg to who,length of mesg,and buffer space
865: required. Based on this, buffers are allocated, and data copied into them*/
866: PetscMalloc(size*4*sizeof(int),&w1); /* mesg size */
867: w2 = w1 + size; /* if w2[i] marked, then a message to proc i*/
868: w3 = w2 + size; /* no of IS that needs to be sent to proc i */
869: w4 = w3 + size; /* temp work space used in determining w1, w2, w3 */
870: PetscMemzero(w1,size*3*sizeof(int)); /* initialise work vector*/
871: for (i=0; i<ismax; i++) {
872: ierr = PetscMemzero(w4,size*sizeof(int)); /* initialise work vector*/
873: jmax = nrow[i];
874: irow_i = irow[i];
875: for (j=0; j<jmax; j++) {
876: row = irow_i[j];
877: #if defined (PETSC_USE_CTABLE)
878: PetscGetProc(row,size,c->rowners,&proc);
879: #else
880: proc = rtable[row];
881: #endif
882: w4[proc]++;
883: }
884: for (j=0; j<size; j++) {
885: if (w4[j]) { w1[j] += w4[j]; w3[j]++;}
886: }
887: }
889: nrqs = 0; /* no of outgoing messages */
890: msz = 0; /* total mesg length for all proc */
891: w1[rank] = 0; /* no mesg sent to intself */
892: w3[rank] = 0;
893: for (i=0; i<size; i++) {
894: if (w1[i]) { w2[i] = 1; nrqs++;} /* there exists a message to proc i */
895: }
896: PetscMalloc((nrqs+1)*sizeof(int),&pa); /*(proc -array)*/
897: for (i=0,j=0; i<size; i++) {
898: if (w1[i]) { pa[j] = i; j++; }
899: }
901: /* Each message would have a header = 1 + 2*(no of IS) + data */
902: for (i=0; i<nrqs; i++) {
903: j = pa[i];
904: w1[j] += w2[j] + 2* w3[j];
905: msz += w1[j];
906: }
908: /* Determine the number of messages to expect, their lengths, from from-ids */
909: PetscGatherNumberOfMessages(comm,w2,w1,&nrqr);
910: PetscGatherMessageLengths(comm,nrqs,nrqr,w1,&onodes1,&olengths1);
912: /* Now post the Irecvs corresponding to these messages */
913: PetscPostIrecvInt(comm,tag0,nrqr,onodes1,olengths1,&rbuf1,&r_waits1);
914:
915: PetscFree(onodes1);
916: PetscFree(olengths1);
918: /* Allocate Memory for outgoing messages */
919: len = 2*size*sizeof(int*) + 2*msz*sizeof(int) + size*sizeof(int);
920: PetscMalloc(len,&sbuf1);
921: ptr = sbuf1 + size; /* Pointers to the data in outgoing buffers */
922: PetscMemzero(sbuf1,2*size*sizeof(int*));
923: /* allocate memory for outgoing data + buf to receive the first reply */
924: tmp = (int*)(ptr + size);
925: ctr = tmp + 2*msz;
927: {
928: int *iptr = tmp,ict = 0;
929: for (i=0; i<nrqs; i++) {
930: j = pa[i];
931: iptr += ict;
932: sbuf1[j] = iptr;
933: ict = w1[j];
934: }
935: }
937: /* Form the outgoing messages */
938: /* Initialise the header space */
939: for (i=0; i<nrqs; i++) {
940: j = pa[i];
941: sbuf1[j][0] = 0;
942: ierr = PetscMemzero(sbuf1[j]+1,2*w3[j]*sizeof(int));
943: ptr[j] = sbuf1[j] + 2*w3[j] + 1;
944: }
945:
946: /* Parse the isrow and copy data into outbuf */
947: for (i=0; i<ismax; i++) {
948: ierr = PetscMemzero(ctr,size*sizeof(int));
949: irow_i = irow[i];
950: jmax = nrow[i];
951: for (j=0; j<jmax; j++) { /* parse the indices of each IS */
952: row = irow_i[j];
953: #if defined (PETSC_USE_CTABLE)
954: PetscGetProc(row,size,c->rowners,&proc);
955: #else
956: proc = rtable[row];
957: #endif
958: if (proc != rank) { /* copy to the outgoing buf*/
959: ctr[proc]++;
960: *ptr[proc] = row;
961: ptr[proc]++;
962: }
963: }
964: /* Update the headers for the current IS */
965: for (j=0; j<size; j++) { /* Can Optimise this loop too */
966: if ((ctr_j = ctr[j])) {
967: sbuf1_j = sbuf1[j];
968: k = ++sbuf1_j[0];
969: sbuf1_j[2*k] = ctr_j;
970: sbuf1_j[2*k-1] = i;
971: }
972: }
973: }
975: /* Now post the sends */
976: PetscMalloc((nrqs+1)*sizeof(MPI_Request),&s_waits1);
977: for (i=0; i<nrqs; ++i) {
978: j = pa[i];
979: MPI_Isend(sbuf1[j],w1[j],MPI_INT,j,tag0,comm,s_waits1+i);
980: }
982: /* Post Recieves to capture the buffer size */
983: ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&r_waits2);
984: ierr = PetscMalloc((nrqs+1)*sizeof(int *),&rbuf2);
985: rbuf2[0] = tmp + msz;
986: for (i=1; i<nrqs; ++i) {
987: j = pa[i];
988: rbuf2[i] = rbuf2[i-1]+w1[pa[i-1]];
989: }
990: for (i=0; i<nrqs; ++i) {
991: j = pa[i];
992: MPI_Irecv(rbuf2[i],w1[j],MPI_INT,j,tag1,comm,r_waits2+i);
993: }
995: /* Send to other procs the buf size they should allocate */
997: /* Receive messages*/
998: ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits2);
999: ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&r_status1);
1000: len = 2*nrqr*sizeof(int) + (nrqr+1)*sizeof(int*);
1001: ierr = PetscMalloc(len,&sbuf2);
1002: req_size = (int*)(sbuf2 + nrqr);
1003: req_source = req_size + nrqr;
1004:
1005: {
1006: Mat_SeqBAIJ *sA = (Mat_SeqBAIJ*)c->A->data,*sB = (Mat_SeqBAIJ*)c->B->data;
1007: int *sAi = sA->i,*sBi = sB->i,id,*sbuf2_i;
1009: for (i=0; i<nrqr; ++i) {
1010: MPI_Waitany(nrqr,r_waits1,&idex,r_status1+i);
1011: req_size[idex] = 0;
1012: rbuf1_i = rbuf1[idex];
1013: start = 2*rbuf1_i[0] + 1;
1014: ierr = MPI_Get_count(r_status1+i,MPI_INT,&end);
1015: ierr = PetscMalloc(end*sizeof(int),&sbuf2[idex]);
1016: sbuf2_i = sbuf2[idex];
1017: for (j=start; j<end; j++) {
1018: id = rbuf1_i[j] - rstart;
1019: ncols = sAi[id+1] - sAi[id] + sBi[id+1] - sBi[id];
1020: sbuf2_i[j] = ncols;
1021: req_size[idex] += ncols;
1022: }
1023: req_source[idex] = r_status1[i].MPI_SOURCE;
1024: /* form the header */
1025: sbuf2_i[0] = req_size[idex];
1026: for (j=1; j<start; j++) { sbuf2_i[j] = rbuf1_i[j]; }
1027: MPI_Isend(sbuf2_i,end,MPI_INT,req_source[idex],tag1,comm,s_waits2+i);
1028: }
1029: }
1030: PetscFree(r_status1);
1031: PetscFree(r_waits1);
1033: /* recv buffer sizes */
1034: /* Receive messages*/
1036: PetscMalloc((nrqs+1)*sizeof(int*),&rbuf3);
1037: PetscMalloc((nrqs+1)*sizeof(MatScalar*),&rbuf4);
1038: PetscMalloc((nrqs+1)*sizeof(MPI_Request),&r_waits3);
1039: PetscMalloc((nrqs+1)*sizeof(MPI_Request),&r_waits4);
1040: PetscMalloc((nrqs+1)*sizeof(MPI_Status),&r_status2);
1042: for (i=0; i<nrqs; ++i) {
1043: MPI_Waitany(nrqs,r_waits2,&idex,r_status2+i);
1044: PetscMalloc(rbuf2[idex][0]*sizeof(int),&rbuf3[idex]);
1045: PetscMalloc(rbuf2[idex][0]*bs2*sizeof(MatScalar),&rbuf4[idex]);
1046: MPI_Irecv(rbuf3[idex],rbuf2[idex][0],MPI_INT,
1047: r_status2[i].MPI_SOURCE,tag2,comm,r_waits3+idex);
1048: MPI_Irecv(rbuf4[idex],rbuf2[idex][0]*bs2,MPIU_MATSCALAR,
1049: r_status2[i].MPI_SOURCE,tag3,comm,r_waits4+idex);
1050: }
1051: PetscFree(r_status2);
1052: PetscFree(r_waits2);
1053:
1054: /* Wait on sends1 and sends2 */
1055: PetscMalloc((nrqs+1)*sizeof(MPI_Status),&s_status1);
1056: PetscMalloc((nrqr+1)*sizeof(MPI_Status),&s_status2);
1058: MPI_Waitall(nrqs,s_waits1,s_status1);
1059: MPI_Waitall(nrqr,s_waits2,s_status2);
1060: PetscFree(s_status1);
1061: PetscFree(s_status2);
1062: PetscFree(s_waits1);
1063: PetscFree(s_waits2);
1065: /* Now allocate buffers for a->j, and send them off */
1066: PetscMalloc((nrqr+1)*sizeof(int *),&sbuf_aj);
1067: for (i=0,j=0; i<nrqr; i++) j += req_size[i];
1068: PetscMalloc((j+1)*sizeof(int),&sbuf_aj[0]);
1069: for (i=1; i<nrqr; i++) sbuf_aj[i] = sbuf_aj[i-1] + req_size[i-1];
1070:
1071: PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits3);
1072: {
1073: for (i=0; i<nrqr; i++) {
1074: rbuf1_i = rbuf1[i];
1075: sbuf_aj_i = sbuf_aj[i];
1076: ct1 = 2*rbuf1_i[0] + 1;
1077: ct2 = 0;
1078: for (j=1,max1=rbuf1_i[0]; j<=max1; j++) {
1079: kmax = rbuf1[i][2*j];
1080: for (k=0; k<kmax; k++,ct1++) {
1081: row = rbuf1_i[ct1] - rstart;
1082: nzA = a_i[row+1] - a_i[row]; nzB = b_i[row+1] - b_i[row];
1083: ncols = nzA + nzB;
1084: cworkA = a_j + a_i[row]; cworkB = b_j + b_i[row];
1086: /* load the column indices for this row into cols*/
1087: cols = sbuf_aj_i + ct2;
1088: for (l=0; l<nzB; l++) {
1089: if ((ctmp = bmap[cworkB[l]]) < cstart) cols[l] = ctmp;
1090: else break;
1091: }
1092: imark = l;
1093: for (l=0; l<nzA; l++) cols[imark+l] = cstart + cworkA[l];
1094: for (l=imark; l<nzB; l++) cols[nzA+l] = bmap[cworkB[l]];
1095: ct2 += ncols;
1096: }
1097: }
1098: MPI_Isend(sbuf_aj_i,req_size[i],MPI_INT,req_source[i],tag2,comm,s_waits3+i);
1099: }
1100: }
1101: PetscMalloc((nrqs+1)*sizeof(MPI_Status),&r_status3);
1102: PetscMalloc((nrqr+1)*sizeof(MPI_Status),&s_status3);
1104: /* Allocate buffers for a->a, and send them off */
1105: PetscMalloc((nrqr+1)*sizeof(MatScalar *),&sbuf_aa);
1106: for (i=0,j=0; i<nrqr; i++) j += req_size[i];
1107: PetscMalloc((j+1)*bs2*sizeof(MatScalar),&sbuf_aa[0]);
1108: for (i=1; i<nrqr; i++) sbuf_aa[i] = sbuf_aa[i-1] + req_size[i-1]*bs2;
1109:
1110: PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits4);
1111: {
1112: for (i=0; i<nrqr; i++) {
1113: rbuf1_i = rbuf1[i];
1114: sbuf_aa_i = sbuf_aa[i];
1115: ct1 = 2*rbuf1_i[0]+1;
1116: ct2 = 0;
1117: for (j=1,max1=rbuf1_i[0]; j<=max1; j++) {
1118: kmax = rbuf1_i[2*j];
1119: for (k=0; k<kmax; k++,ct1++) {
1120: row = rbuf1_i[ct1] - rstart;
1121: nzA = a_i[row+1] - a_i[row]; nzB = b_i[row+1] - b_i[row];
1122: ncols = nzA + nzB;
1123: cworkA = a_j + a_i[row]; cworkB = b_j + b_i[row];
1124: vworkA = a_a + a_i[row]*bs2; vworkB = b_a + b_i[row]*bs2;
1126: /* load the column values for this row into vals*/
1127: vals = sbuf_aa_i+ct2*bs2;
1128: for (l=0; l<nzB; l++) {
1129: if ((bmap[cworkB[l]]) < cstart) {
1130: PetscMemcpy(vals+l*bs2,vworkB+l*bs2,bs2*sizeof(MatScalar));
1131: }
1132: else break;
1133: }
1134: imark = l;
1135: for (l=0; l<nzA; l++) {
1136: PetscMemcpy(vals+(imark+l)*bs2,vworkA+l*bs2,bs2*sizeof(MatScalar));
1137: }
1138: for (l=imark; l<nzB; l++) {
1139: PetscMemcpy(vals+(nzA+l)*bs2,vworkB+l*bs2,bs2*sizeof(MatScalar));
1140: }
1141: ct2 += ncols;
1142: }
1143: }
1144: MPI_Isend(sbuf_aa_i,req_size[i]*bs2,MPIU_MATSCALAR,req_source[i],tag3,comm,s_waits4+i);
1145: }
1146: }
1147: PetscMalloc((nrqs+1)*sizeof(MPI_Status),&r_status4);
1148: PetscMalloc((nrqr+1)*sizeof(MPI_Status),&s_status4);
1149: PetscFree(rbuf1);
1151: /* Form the matrix */
1152: /* create col map */
1153: {
1154: int *icol_i;
1155: #if defined (PETSC_USE_CTABLE)
1156: /* Create row map*/
1157: PetscMalloc((1+ismax)*sizeof(PetscTable),&colmaps);
1158: for (i=0; i<ismax+1; i++) {
1159: PetscTableCreate(((i<ismax) ? ncol[i] : ncol[i-1])+1,&colmaps[i]);
1160: }
1161: #else
1162: len = (1+ismax)*sizeof(int*)+ ismax*c->Nbs*sizeof(int);
1163: ierr = PetscMalloc(len,&cmap);
1164: cmap[0] = (int *)(cmap + ismax);
1165: ierr = PetscMemzero(cmap[0],(1+ismax*c->Nbs)*sizeof(int));
1166: for (i=1; i<ismax; i++) { cmap[i] = cmap[i-1] + c->Nbs; }
1167: #endif
1168: for (i=0; i<ismax; i++) {
1169: jmax = ncol[i];
1170: icol_i = icol[i];
1171: #if defined (PETSC_USE_CTABLE)
1172: lcol1_gcol1 = colmaps[i];
1173: for (j=0; j<jmax; j++) {
1174: PetscTableAdd(lcol1_gcol1,icol_i[j]+1,j+1);
1175: }
1176: #else
1177: cmap_i = cmap[i];
1178: for (j=0; j<jmax; j++) {
1179: cmap_i[icol_i[j]] = j+1;
1180: }
1181: #endif
1182: }
1183: }
1185: /* Create lens which is required for MatCreate... */
1186: for (i=0,j=0; i<ismax; i++) { j += nrow[i]; }
1187: len = (1+ismax)*sizeof(int*)+ j*sizeof(int);
1188: ierr = PetscMalloc(len,&lens);
1189: lens[0] = (int *)(lens + ismax);
1190: ierr = PetscMemzero(lens[0],j*sizeof(int));
1191: for (i=1; i<ismax; i++) { lens[i] = lens[i-1] + nrow[i-1]; }
1192:
1193: /* Update lens from local data */
1194: for (i=0; i<ismax; i++) {
1195: jmax = nrow[i];
1196: #if defined (PETSC_USE_CTABLE)
1197: lcol1_gcol1 = colmaps[i];
1198: #else
1199: cmap_i = cmap[i];
1200: #endif
1201: irow_i = irow[i];
1202: lens_i = lens[i];
1203: for (j=0; j<jmax; j++) {
1204: row = irow_i[j];
1205: #if defined (PETSC_USE_CTABLE)
1206: PetscGetProc(row,size,c->rowners,&proc);
1207: #else
1208: proc = rtable[row];
1209: #endif
1210: if (proc == rank) {
1211: /* Get indices from matA and then from matB */
1212: row = row - rstart;
1213: nzA = a_i[row+1] - a_i[row]; nzB = b_i[row+1] - b_i[row];
1214: cworkA = a_j + a_i[row]; cworkB = b_j + b_i[row];
1215: #if defined (PETSC_USE_CTABLE)
1216: for (k=0; k<nzA; k++) {
1217: PetscTableFind(lcol1_gcol1,cstart+cworkA[k]+1,&tt);
1218: if (tt) { lens_i[j]++; }
1219: }
1220: for (k=0; k<nzB; k++) {
1221: PetscTableFind(lcol1_gcol1,bmap[cworkB[k]]+1,&tt);
1222: if (tt) { lens_i[j]++; }
1223: }
1224: #else
1225: for (k=0; k<nzA; k++) {
1226: if (cmap_i[cstart + cworkA[k]]) { lens_i[j]++; }
1227: }
1228: for (k=0; k<nzB; k++) {
1229: if (cmap_i[bmap[cworkB[k]]]) { lens_i[j]++; }
1230: }
1231: #endif
1232: }
1233: }
1234: }
1235: #if defined (PETSC_USE_CTABLE)
1236: /* Create row map*/
1237: PetscMalloc((1+ismax)*sizeof(PetscTable),&rowmaps);
1238: for (i=0; i<ismax+1; i++){
1239: PetscTableCreate((i<ismax) ? nrow[i] : nrow[i-1],&rowmaps[i]);
1240: }
1241: #else
1242: /* Create row map*/
1243: len = (1+ismax)*sizeof(int*)+ ismax*Mbs*sizeof(int);
1244: ierr = PetscMalloc(len,&rmap);
1245: rmap[0] = (int *)(rmap + ismax);
1246: ierr = PetscMemzero(rmap[0],ismax*Mbs*sizeof(int));
1247: for (i=1; i<ismax; i++) { rmap[i] = rmap[i-1] + Mbs;}
1248: #endif
1249: for (i=0; i<ismax; i++) {
1250: irow_i = irow[i];
1251: jmax = nrow[i];
1252: #if defined (PETSC_USE_CTABLE)
1253: lrow1_grow1 = rowmaps[i];
1254: for (j=0; j<jmax; j++) {
1255: PetscTableAdd(lrow1_grow1,irow_i[j]+1,j+1);
1256: }
1257: #else
1258: rmap_i = rmap[i];
1259: for (j=0; j<jmax; j++) {
1260: rmap_i[irow_i[j]] = j;
1261: }
1262: #endif
1263: }
1265: /* Update lens from offproc data */
1266: {
1267: int *rbuf2_i,*rbuf3_i,*sbuf1_i;
1269: for (tmp2=0; tmp2<nrqs; tmp2++) {
1270: ierr = MPI_Waitany(nrqs,r_waits3,&i,r_status3+tmp2);
1271: idex = pa[i];
1272: sbuf1_i = sbuf1[idex];
1273: jmax = sbuf1_i[0];
1274: ct1 = 2*jmax+1;
1275: ct2 = 0;
1276: rbuf2_i = rbuf2[i];
1277: rbuf3_i = rbuf3[i];
1278: for (j=1; j<=jmax; j++) {
1279: is_no = sbuf1_i[2*j-1];
1280: max1 = sbuf1_i[2*j];
1281: lens_i = lens[is_no];
1282: #if defined (PETSC_USE_CTABLE)
1283: lcol1_gcol1 = colmaps[is_no];
1284: lrow1_grow1 = rowmaps[is_no];
1285: #else
1286: cmap_i = cmap[is_no];
1287: rmap_i = rmap[is_no];
1288: #endif
1289: for (k=0; k<max1; k++,ct1++) {
1290: #if defined (PETSC_USE_CTABLE)
1291: PetscTableFind(lrow1_grow1,sbuf1_i[ct1]+1,&row);
1292: row--;
1293: if(row < 0) { SETERRQ(1,"row not found in table"); }
1294: #else
1295: row = rmap_i[sbuf1_i[ct1]]; /* the val in the new matrix to be */
1296: #endif
1297: max2 = rbuf2_i[ct1];
1298: for (l=0; l<max2; l++,ct2++) {
1299: #if defined (PETSC_USE_CTABLE)
1300: PetscTableFind(lcol1_gcol1,rbuf3_i[ct2]+1,&tt);
1301: if (tt) {
1302: lens_i[row]++;
1303: }
1304: #else
1305: if (cmap_i[rbuf3_i[ct2]]) {
1306: lens_i[row]++;
1307: }
1308: #endif
1309: }
1310: }
1311: }
1312: }
1313: }
1314: PetscFree(r_status3);
1315: PetscFree(r_waits3);
1316: MPI_Waitall(nrqr,s_waits3,s_status3);
1317: PetscFree(s_status3);
1318: PetscFree(s_waits3);
1320: /* Create the submatrices */
1321: if (scall == MAT_REUSE_MATRIX) {
1322: /*
1323: Assumes new rows are same length as the old rows, hence bug!
1324: */
1325: for (i=0; i<ismax; i++) {
1326: mat = (Mat_SeqBAIJ *)(submats[i]->data);
1327: if ((mat->mbs != nrow[i]) || (mat->nbs != ncol[i] || mat->bs != bs)) {
1328: SETERRQ(PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. wrong size");
1329: }
1330: PetscMemcmp(mat->ilen,lens[i],mat->mbs *sizeof(int),&flag);
1331: if (flag == PETSC_FALSE) {
1332: SETERRQ(PETSC_ERR_ARG_INCOMP,"Cannot reuse matrix. wrong no of nonzeros");
1333: }
1334: /* Initial matrix as if empty */
1335: PetscMemzero(mat->ilen,mat->mbs*sizeof(int));
1336: submats[i]->factor = C->factor;
1337: }
1338: } else {
1339: for (i=0; i<ismax; i++) {
1340: MatCreateSeqBAIJ(PETSC_COMM_SELF,a->bs,nrow[i]*bs,ncol[i]*bs,0,lens[i],submats+i);
1341: }
1342: }
1344: /* Assemble the matrices */
1345: /* First assemble the local rows */
1346: {
1347: int ilen_row,*imat_ilen,*imat_j,*imat_i;
1348: MatScalar *imat_a;
1349:
1350: for (i=0; i<ismax; i++) {
1351: mat = (Mat_SeqBAIJ*)submats[i]->data;
1352: imat_ilen = mat->ilen;
1353: imat_j = mat->j;
1354: imat_i = mat->i;
1355: imat_a = mat->a;
1357: #if defined (PETSC_USE_CTABLE)
1358: lcol1_gcol1 = colmaps[i];
1359: lrow1_grow1 = rowmaps[i];
1360: #else
1361: cmap_i = cmap[i];
1362: rmap_i = rmap[i];
1363: #endif
1364: irow_i = irow[i];
1365: jmax = nrow[i];
1366: for (j=0; j<jmax; j++) {
1367: row = irow_i[j];
1368: #if defined (PETSC_USE_CTABLE)
1369: PetscGetProc(row,size,c->rowners,&proc);
1370: #else
1371: proc = rtable[row];
1372: #endif
1373: if (proc == rank) {
1374: row = row - rstart;
1375: nzA = a_i[row+1] - a_i[row];
1376: nzB = b_i[row+1] - b_i[row];
1377: cworkA = a_j + a_i[row];
1378: cworkB = b_j + b_i[row];
1379: vworkA = a_a + a_i[row]*bs2;
1380: vworkB = b_a + b_i[row]*bs2;
1381: #if defined (PETSC_USE_CTABLE)
1382: PetscTableFind(lrow1_grow1,row+rstart+1,&row);
1383: row--;
1384: if (row < 0) { SETERRQ(1,"row not found in table"); }
1385: #else
1386: row = rmap_i[row + rstart];
1387: #endif
1388: mat_i = imat_i[row];
1389: mat_a = imat_a + mat_i*bs2;
1390: mat_j = imat_j + mat_i;
1391: ilen_row = imat_ilen[row];
1393: /* load the column indices for this row into cols*/
1394: for (l=0; l<nzB; l++) {
1395: if ((ctmp = bmap[cworkB[l]]) < cstart) {
1396: #if defined (PETSC_USE_CTABLE)
1397: PetscTableFind(lcol1_gcol1,ctmp+1,&tcol);
1398: if (tcol) {
1399: #else
1400: if ((tcol = cmap_i[ctmp])) {
1401: #endif
1402: *mat_j++ = tcol - 1;
1403: ierr = PetscMemcpy(mat_a,vworkB+l*bs2,bs2*sizeof(MatScalar));
1404: mat_a += bs2;
1405: ilen_row++;
1406: }
1407: } else break;
1408: }
1409: imark = l;
1410: for (l=0; l<nzA; l++) {
1411: #if defined (PETSC_USE_CTABLE)
1412: PetscTableFind(lcol1_gcol1,cstart+cworkA[l]+1,&tcol);
1413: if (tcol) {
1414: #else
1415: if ((tcol = cmap_i[cstart + cworkA[l]])) {
1416: #endif
1417: *mat_j++ = tcol - 1;
1418: ierr = PetscMemcpy(mat_a,vworkA+l*bs2,bs2*sizeof(MatScalar));
1419: mat_a += bs2;
1420: ilen_row++;
1421: }
1422: }
1423: for (l=imark; l<nzB; l++) {
1424: #if defined (PETSC_USE_CTABLE)
1425: PetscTableFind(lcol1_gcol1,bmap[cworkB[l]]+1,&tcol);
1426: if (tcol) {
1427: #else
1428: if ((tcol = cmap_i[bmap[cworkB[l]]])) {
1429: #endif
1430: *mat_j++ = tcol - 1;
1431: ierr = PetscMemcpy(mat_a,vworkB+l*bs2,bs2*sizeof(MatScalar));
1432: mat_a += bs2;
1433: ilen_row++;
1434: }
1435: }
1436: imat_ilen[row] = ilen_row;
1437: }
1438: }
1439:
1440: }
1441: }
1443: /* Now assemble the off proc rows*/
1444: {
1445: int *sbuf1_i,*rbuf2_i,*rbuf3_i,*imat_ilen,ilen;
1446: int *imat_j,*imat_i;
1447: MatScalar *imat_a,*rbuf4_i;
1449: for (tmp2=0; tmp2<nrqs; tmp2++) {
1450: ierr = MPI_Waitany(nrqs,r_waits4,&i,r_status4+tmp2);
1451: idex = pa[i];
1452: sbuf1_i = sbuf1[idex];
1453: jmax = sbuf1_i[0];
1454: ct1 = 2*jmax + 1;
1455: ct2 = 0;
1456: rbuf2_i = rbuf2[i];
1457: rbuf3_i = rbuf3[i];
1458: rbuf4_i = rbuf4[i];
1459: for (j=1; j<=jmax; j++) {
1460: is_no = sbuf1_i[2*j-1];
1461: #if defined (PETSC_USE_CTABLE)
1462: lrow1_grow1 = rowmaps[is_no];
1463: lcol1_gcol1 = colmaps[is_no];
1464: #else
1465: rmap_i = rmap[is_no];
1466: cmap_i = cmap[is_no];
1467: #endif
1468: mat = (Mat_SeqBAIJ*)submats[is_no]->data;
1469: imat_ilen = mat->ilen;
1470: imat_j = mat->j;
1471: imat_i = mat->i;
1472: imat_a = mat->a;
1473: max1 = sbuf1_i[2*j];
1474: for (k=0; k<max1; k++,ct1++) {
1475: row = sbuf1_i[ct1];
1476: #if defined (PETSC_USE_CTABLE)
1477: PetscTableFind(lrow1_grow1,row+1,&row);
1478: row--;
1479: if(row < 0) { SETERRQ(1,"row not found in table"); }
1480: #else
1481: row = rmap_i[row];
1482: #endif
1483: ilen = imat_ilen[row];
1484: mat_i = imat_i[row];
1485: mat_a = imat_a + mat_i*bs2;
1486: mat_j = imat_j + mat_i;
1487: max2 = rbuf2_i[ct1];
1488: for (l=0; l<max2; l++,ct2++) {
1489: #if defined (PETSC_USE_CTABLE)
1490: PetscTableFind(lcol1_gcol1,rbuf3_i[ct2]+1,&tcol);
1491: if (tcol) {
1492: #else
1493: if ((tcol = cmap_i[rbuf3_i[ct2]])) {
1494: #endif
1495: *mat_j++ = tcol - 1;
1496: /* *mat_a++= rbuf4_i[ct2]; */
1497: ierr = PetscMemcpy(mat_a,rbuf4_i+ct2*bs2,bs2*sizeof(MatScalar));
1498: mat_a += bs2;
1499: ilen++;
1500: }
1501: }
1502: imat_ilen[row] = ilen;
1503: }
1504: }
1505: }
1506: }
1507: PetscFree(r_status4);
1508: PetscFree(r_waits4);
1509: MPI_Waitall(nrqr,s_waits4,s_status4);
1510: PetscFree(s_waits4);
1511: PetscFree(s_status4);
1513: /* Restore the indices */
1514: for (i=0; i<ismax; i++) {
1515: ISRestoreIndices(isrow[i],irow+i);
1516: ISRestoreIndices(iscol[i],icol+i);
1517: }
1519: /* Destroy allocated memory */
1520: PetscFree(irow);
1521: PetscFree(w1);
1522: PetscFree(pa);
1524: PetscFree(sbuf1);
1525: PetscFree(rbuf2);
1526: for (i=0; i<nrqr; ++i) {
1527: PetscFree(sbuf2[i]);
1528: }
1529: for (i=0; i<nrqs; ++i) {
1530: PetscFree(rbuf3[i]);
1531: PetscFree(rbuf4[i]);
1532: }
1534: PetscFree(sbuf2);
1535: PetscFree(rbuf3);
1536: PetscFree(rbuf4);
1537: PetscFree(sbuf_aj[0]);
1538: PetscFree(sbuf_aj);
1539: PetscFree(sbuf_aa[0]);
1540: PetscFree(sbuf_aa);
1542: #if defined (PETSC_USE_CTABLE)
1543: for (i=0; i<ismax+1; i++){
1544: PetscTableDelete(rowmaps[i]);
1545: PetscTableDelete(colmaps[i]);
1546: }
1547: PetscFree(colmaps);
1548: PetscFree(rowmaps);
1549: /* Mark Adams */
1550: #else
1551: PetscFree(rmap);
1552: PetscFree(cmap);
1553: #endif
1554: PetscFree(lens);
1556: for (i=0; i<ismax; i++) {
1557: MatAssemblyBegin(submats[i],MAT_FINAL_ASSEMBLY);
1558: MatAssemblyEnd(submats[i],MAT_FINAL_ASSEMBLY);
1559: }
1561: return(0);
1562: }