Actual source code: mpirowbs.c

  1: /* $Id: mpirowbs.c,v 2.9 2001/08/10 03:30:53 bsmith Exp $*/

 3:  #include src/mat/impls/rowbs/mpi/mpirowbs.h
 4:  #include src/vec/vecimpl.h

  6: #define CHUNCKSIZE_LOCAL   10

 10: static int MatFreeRowbs_Private(Mat A,int n,int *i,PetscScalar *v)
 11: {

 15:   if (v) {
 16: #if defined(PETSC_USE_LOG)
 17:     int len = -n*(sizeof(int)+sizeof(PetscScalar));
 18: #endif
 19:     PetscFree(v);
 20:     PetscLogObjectMemory(A,len);
 21:   }
 22:   return(0);
 23: }

 27: static int MatMallocRowbs_Private(Mat A,int n,int **i,PetscScalar **v)
 28: {
 29:   int len,ierr;

 32:   if (!n) {
 33:     *i = 0; *v = 0;
 34:   } else {
 35:     len = n*(sizeof(int) + sizeof(PetscScalar));
 36:     PetscMalloc(len,v);
 37:     PetscLogObjectMemory(A,len);
 38:     *i = (int *)(*v + n);
 39:   }
 40:   return(0);
 41: }

 45: int MatScale_MPIRowbs(const PetscScalar *alphain,Mat inA)
 46: {
 47:   Mat_MPIRowbs *a = (Mat_MPIRowbs*)inA->data;
 48:   BSspmat      *A = a->A;
 49:   BSsprow      *vs;
 50:   PetscScalar  *ap,alpha = *alphain;
 51:   int          i,m = inA->m,nrow,j;

 54:   for (i=0; i<m; i++) {
 55:     vs   = A->rows[i];
 56:     nrow = vs->length;
 57:     ap   = vs->nz;
 58:     for (j=0; j<nrow; j++) {
 59:       ap[j] *= alpha;
 60:     }
 61:   }
 62:   PetscLogFlops(a->nz);
 63:   return(0);
 64: }

 66: /* ----------------------------------------------------------------- */
 69: static int MatCreateMPIRowbs_local(Mat A,int nz,const int nnz[])
 70: {
 71:   Mat_MPIRowbs *bsif = (Mat_MPIRowbs*)A->data;
 72:   int          ierr,i,len,m = A->m,*tnnz;
 73:   BSspmat      *bsmat;
 74:   BSsprow      *vs;

 77:   PetscMalloc((m+1)*sizeof(int),&tnnz);
 78:   if (!nnz) {
 79:     if (nz == PETSC_DEFAULT || nz == PETSC_DECIDE) nz = 5;
 80:     if (nz <= 0)             nz = 1;
 81:     for (i=0; i<m; i++) tnnz[i] = nz;
 82:     nz      = nz*m;
 83:   } else {
 84:     nz = 0;
 85:     for (i=0; i<m; i++) {
 86:       if (nnz[i] <= 0) tnnz[i] = 1;
 87:       else             tnnz[i] = nnz[i];
 88:       nz += tnnz[i];
 89:     }
 90:   }

 92:   /* Allocate BlockSolve matrix context */
 93:   PetscNew(BSspmat,&bsif->A);
 94:   bsmat = bsif->A;
 95:   BSset_mat_icc_storage(bsmat,PETSC_FALSE);
 96:   BSset_mat_symmetric(bsmat,PETSC_FALSE);
 97:   len                    = m*(sizeof(BSsprow*)+ sizeof(BSsprow)) + 1;
 98:   PetscMalloc(len,&bsmat->rows);
 99:   bsmat->num_rows        = m;
100:   bsmat->global_num_rows = A->M;
101:   bsmat->map             = bsif->bsmap;
102:   vs                     = (BSsprow*)(bsmat->rows + m);
103:   for (i=0; i<m; i++) {
104:     bsmat->rows[i]  = vs;
105:     bsif->imax[i]   = tnnz[i];
106:     vs->diag_ind    = -1;
107:     MatMallocRowbs_Private(A,tnnz[i],&(vs->col),&(vs->nz));
108:     /* put zero on diagonal */
109:     /*vs->length            = 1;
110:     vs->col[0]      = i + bsif->rstart;
111:     vs->nz[0]       = 0.0;*/
112:     vs->length = 0;
113:     vs++;
114:   }
115:   PetscLogObjectMemory(A,sizeof(BSspmat) + len);
116:   bsif->nz               = 0;
117:   bsif->maxnz            = nz;
118:   bsif->sorted           = 0;
119:   bsif->roworiented      = PETSC_TRUE;
120:   bsif->nonew            = 0;
121:   bsif->bs_color_single  = 0;

123:   PetscFree(tnnz);
124:   return(0);
125: }

129: static int MatSetValues_MPIRowbs_local(Mat AA,int m,const int im[],int n,const int in[],const PetscScalar v[],InsertMode addv)
130: {
131:   Mat_MPIRowbs *mat = (Mat_MPIRowbs*)AA->data;
132:   BSspmat      *A = mat->A;
133:   BSsprow      *vs;
134:   int          *rp,k,a,b,t,ii,row,nrow,i,col,l,rmax,ierr;
135:   int          *imax = mat->imax,nonew = mat->nonew,sorted = mat->sorted;
136:   PetscScalar  *ap,value;

139:   for (k=0; k<m; k++) { /* loop over added rows */
140:     row = im[k];
141:     if (row < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Negative row: %d",row);
142:     if (row >= AA->m) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %d max %d",row,AA->m-1);
143:     vs   = A->rows[row];
144:     ap   = vs->nz; rp = vs->col;
145:     rmax = imax[row]; nrow = vs->length;
146:     a    = 0;
147:     for (l=0; l<n; l++) { /* loop over added columns */
148:       if (in[l] < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Negative col: %d",in[l]);
149:       if (in[l] >= AA->N) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %d max %d",in[l],AA->N-1);
150:       col = in[l]; value = *v++;
151:       if (!sorted) a = 0; b = nrow;
152:       while (b-a > 5) {
153:         t = (b+a)/2;
154:         if (rp[t] > col) b = t;
155:         else             a = t;
156:       }
157:       for (i=a; i<b; i++) {
158:         if (rp[i] > col) break;
159:         if (rp[i] == col) {
160:           if (addv == ADD_VALUES) ap[i] += value;
161:           else                    ap[i] = value;
162:           goto noinsert;
163:         }
164:       }
165:       if (nonew) goto noinsert;
166:       if (nrow >= rmax) {
167:         /* there is no extra room in row, therefore enlarge */
168:         int    *itemp,*iout,*iin = vs->col;
169:         PetscScalar *vout,*vin = vs->nz,*vtemp;

171:         /* malloc new storage space */
172:         imax[row] += CHUNCKSIZE_LOCAL;
173:         MatMallocRowbs_Private(AA,imax[row],&itemp,&vtemp);
174:         vout = vtemp; iout = itemp;
175:         for (ii=0; ii<i; ii++) {
176:           vout[ii] = vin[ii];
177:           iout[ii] = iin[ii];
178:         }
179:         vout[i] = value;
180:         iout[i] = col;
181:         for (ii=i+1; ii<=nrow; ii++) {
182:           vout[ii] = vin[ii-1];
183:           iout[ii] = iin[ii-1];
184:         }
185:         /* free old row storage */
186:         if (rmax > 0) {
187:           MatFreeRowbs_Private(AA,rmax,vs->col,vs->nz);
188:         }
189:         vs->col           =  iout; vs->nz = vout;
190:         rmax              =  imax[row];
191:         mat->maxnz        += CHUNCKSIZE_LOCAL;
192:         mat->reallocs++;
193:       } else {
194:         /* shift higher columns over to make room for newie */
195:         for (ii=nrow-1; ii>=i; ii--) {
196:           rp[ii+1] = rp[ii];
197:           ap[ii+1] = ap[ii];
198:         }
199:         rp[i] = col;
200:         ap[i] = value;
201:       }
202:       nrow++;
203:       mat->nz++;
204:       AA->same_nonzero = PETSC_FALSE;
205:       noinsert:;
206:       a = i + 1;
207:     }
208:     vs->length = nrow;
209:   }
210:   return(0);
211: }


216: static int MatAssemblyBegin_MPIRowbs_local(Mat A,MatAssemblyType mode)
217: {
219:   return(0);
220: }

224: static int MatAssemblyEnd_MPIRowbs_local(Mat AA,MatAssemblyType mode)
225: {
226:   Mat_MPIRowbs *a = (Mat_MPIRowbs*)AA->data;
227:   BSspmat      *A = a->A;
228:   BSsprow      *vs;
229:   int          i,j,rstart = a->rstart;

232:   if (mode == MAT_FLUSH_ASSEMBLY) return(0);

234:   /* Mark location of diagonal */
235:   for (i=0; i<AA->m; i++) {
236:     vs = A->rows[i];
237:     for (j=0; j<vs->length; j++) {
238:       if (vs->col[j] == i + rstart) {
239:         vs->diag_ind = j;
240:         break;
241:       }
242:     }
243:     if (vs->diag_ind == -1) {
244:       SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"no diagonal entry");
245:     }
246:   }
247:   return(0);
248: }

252: static int MatZeroRows_MPIRowbs_local(Mat A,IS is,const PetscScalar *diag)
253: {
254:   Mat_MPIRowbs *a = (Mat_MPIRowbs*)A->data;
255:   BSspmat      *l = a->A;
256:   int          i,ierr,N,*rz,m = A->m - 1,col,base=a->rowners[a->rank];

259:   ISGetLocalSize(is,&N);
260:   ISGetIndices(is,&rz);
261:   if (a->keepzeroedrows) {
262:     for (i=0; i<N; i++) {
263:       if (rz[i] < 0 || rz[i] > m) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"row out of range");
264:       PetscMemzero(l->rows[rz[i]]->nz,l->rows[rz[i]]->length*sizeof(PetscScalar));
265:       if (diag) {
266:         col=rz[i]+base;
267:         MatSetValues_MPIRowbs_local(A,1,&rz[i],1,&col,diag,INSERT_VALUES);
268:       }
269:     }
270:   } else {
271:     if (diag) {
272:       for (i=0; i<N; i++) {
273:         if (rz[i] < 0 || rz[i] > m) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Out of range");
274:         if (l->rows[rz[i]]->length > 0) { /* in case row was completely empty */
275:           l->rows[rz[i]]->length = 1;
276:           l->rows[rz[i]]->nz[0]  = *diag;
277:           l->rows[rz[i]]->col[0] = a->rstart + rz[i];
278:         } else {
279:           col=rz[i]+base;
280:           MatSetValues_MPIRowbs_local(A,1,&rz[i],1,&col,diag,INSERT_VALUES);
281:         }
282:       }
283:     } else {
284:       for (i=0; i<N; i++) {
285:         if (rz[i] < 0 || rz[i] > m) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Out of range");
286:         l->rows[rz[i]]->length = 0;
287:       }
288:     }
289:     A->same_nonzero = PETSC_FALSE;
290:   }
291:   ISRestoreIndices(is,&rz);
292:   MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
293:   MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
294:   return(0);
295: }

299: static int MatNorm_MPIRowbs_local(Mat A,NormType type,PetscReal *norm)
300: {
301:   Mat_MPIRowbs *mat = (Mat_MPIRowbs*)A->data;
302:   BSsprow      *vs,**rs;
303:   PetscScalar  *xv;
304:   PetscReal    sum = 0.0;
305:   int          *xi,nz,i,j,ierr;

308:   rs = mat->A->rows;
309:   if (type == NORM_FROBENIUS) {
310:     for (i=0; i<A->m; i++) {
311:       vs = *rs++;
312:       nz = vs->length;
313:       xv = vs->nz;
314:       while (nz--) {
315: #if defined(PETSC_USE_COMPLEX)
316:         sum += PetscRealPart(PetscConj(*xv)*(*xv)); xv++;
317: #else
318:         sum += (*xv)*(*xv); xv++;
319: #endif
320:       }
321:     }
322:     *norm = sqrt(sum);
323:   } else if (type == NORM_1) { /* max column norm */
324:     PetscReal *tmp;
325:     PetscMalloc(A->n*sizeof(PetscReal),&tmp);
326:     PetscMemzero(tmp,A->n*sizeof(PetscReal));
327:     *norm = 0.0;
328:     for (i=0; i<A->m; i++) {
329:       vs = *rs++;
330:       nz = vs->length;
331:       xi = vs->col;
332:       xv = vs->nz;
333:       while (nz--) {
334:         tmp[*xi] += PetscAbsScalar(*xv);
335:         xi++; xv++;
336:       }
337:     }
338:     for (j=0; j<A->n; j++) {
339:       if (tmp[j] > *norm) *norm = tmp[j];
340:     }
341:     PetscFree(tmp);
342:   } else if (type == NORM_INFINITY) { /* max row norm */
343:     *norm = 0.0;
344:     for (i=0; i<A->m; i++) {
345:       vs = *rs++;
346:       nz = vs->length;
347:       xv = vs->nz;
348:       sum = 0.0;
349:       while (nz--) {
350:         sum += PetscAbsScalar(*xv); xv++;
351:       }
352:       if (sum > *norm) *norm = sum;
353:     }
354:   } else {
355:     SETERRQ(PETSC_ERR_SUP,"No support for the two norm");
356:   }
357:   return(0);
358: }

360: /* ----------------------------------------------------------------- */

364: int MatSetValues_MPIRowbs(Mat mat,int m,const int im[],int n,const int in[],const PetscScalar v[],InsertMode av)
365: {
366:   Mat_MPIRowbs *a = (Mat_MPIRowbs*)mat->data;
367:   int          ierr,i,j,row,col,rstart = a->rstart,rend = a->rend;
368:   PetscTruth   roworiented = a->roworiented;

371:   /* Note:  There's no need to "unscale" the matrix, since scaling is
372:      confined to a->pA, and we're working with a->A here */
373:   for (i=0; i<m; i++) {
374:     if (im[i] < 0) continue;
375:     if (im[i] >= mat->M) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %d max %d",im[i],mat->M-1);
376:     if (im[i] >= rstart && im[i] < rend) {
377:       row = im[i] - rstart;
378:       for (j=0; j<n; j++) {
379:         if (in[j] < 0) continue;
380:         if (in[j] >= mat->N) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %d max %d",in[j],mat->N-1);
381:         if (in[j] >= 0 && in[j] < mat->N){
382:           col = in[j];
383:           if (roworiented) {
384:             MatSetValues_MPIRowbs_local(mat,1,&row,1,&col,v+i*n+j,av);
385:           } else {
386:             MatSetValues_MPIRowbs_local(mat,1,&row,1,&col,v+i+j*m,av);
387:           }
388:         } else {SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Invalid column");}
389:       }
390:     } else {
391:       if (!a->donotstash) {
392:         if (roworiented) {
393:           MatStashValuesRow_Private(&mat->stash,im[i],n,in,v+i*n);
394:         } else {
395:           MatStashValuesCol_Private(&mat->stash,im[i],n,in,v+i,m);
396:         }
397:       }
398:     }
399:   }
400:   return(0);
401: }

405: int MatAssemblyBegin_MPIRowbs(Mat mat,MatAssemblyType mode)
406: {
407:   Mat_MPIRowbs  *a = (Mat_MPIRowbs*)mat->data;
408:   MPI_Comm      comm = mat->comm;
409:   int           ierr,nstash,reallocs;
410:   InsertMode    addv;

413:   /* Note:  There's no need to "unscale" the matrix, since scaling is
414:             confined to a->pA, and we're working with a->A here */

416:   /* make sure all processors are either in INSERTMODE or ADDMODE */
417:   MPI_Allreduce(&mat->insertmode,&addv,1,MPI_INT,MPI_BOR,comm);
418:   if (addv == (ADD_VALUES|INSERT_VALUES)) {
419:     SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Some procs inserted; others added");
420:   }
421:   mat->insertmode = addv; /* in case this processor had no cache */

423:   MatStashScatterBegin_Private(&mat->stash,a->rowners);
424:   MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);
425:   PetscLogInfo(0,"MatAssemblyBegin_MPIRowbs:Block-Stash has %d entries, uses %d mallocs.\n",nstash,reallocs);
426:   return(0);
427: }

429:  #include petscviewer.h

433: static int MatView_MPIRowbs_ASCII(Mat mat,PetscViewer viewer)
434: {
435:   Mat_MPIRowbs      *a = (Mat_MPIRowbs*)mat->data;
436:   int               ierr,i,j;
437:   PetscTruth        isascii;
438:   BSspmat           *A = a->A;
439:   BSsprow           **rs = A->rows;
440:   PetscViewerFormat format;

443:   PetscViewerGetFormat(viewer,&format);
444:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&isascii);

446:   if (format == PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
447:     int ind_l,ind_g,clq_l,clq_g,color;
448:     ind_l = BSlocal_num_inodes(a->pA);CHKERRBS(0);
449:     ind_g = BSglobal_num_inodes(a->pA);CHKERRBS(0);
450:     clq_l = BSlocal_num_cliques(a->pA);CHKERRBS(0);
451:     clq_g = BSglobal_num_cliques(a->pA);CHKERRBS(0);
452:     color = BSnum_colors(a->pA);CHKERRBS(0);
453:     PetscViewerASCIIPrintf(viewer,"  %d global inode(s), %d global clique(s), %d color(s)\n",ind_g,clq_g,color);
454:     PetscViewerASCIISynchronizedPrintf(viewer,"    [%d] %d local inode(s), %d local clique(s)\n",a->rank,ind_l,clq_l);
455:   } else  if (format == PETSC_VIEWER_ASCII_COMMON) {
456:     for (i=0; i<A->num_rows; i++) {
457:       PetscViewerASCIISynchronizedPrintf(viewer,"row %d:",i+a->rstart);
458:       for (j=0; j<rs[i]->length; j++) {
459:         if (rs[i]->nz[j]) {PetscViewerASCIISynchronizedPrintf(viewer," %d %g ",rs[i]->col[j],rs[i]->nz[j]);}
460:       }
461:       PetscViewerASCIISynchronizedPrintf(viewer,"\n");
462:     }
463:   } else if (format == PETSC_VIEWER_ASCII_MATLAB) {
464:     SETERRQ(PETSC_ERR_SUP,"Matlab format not supported");
465:   } else {
466:     PetscViewerASCIIUseTabs(viewer,PETSC_NO);
467:     for (i=0; i<A->num_rows; i++) {
468:       PetscViewerASCIISynchronizedPrintf(viewer,"row %d:",i+a->rstart);
469:       for (j=0; j<rs[i]->length; j++) {
470:         PetscViewerASCIISynchronizedPrintf(viewer," %d %g ",rs[i]->col[j],rs[i]->nz[j]);
471:       }
472:       PetscViewerASCIISynchronizedPrintf(viewer,"\n");
473:     }
474:     PetscViewerASCIIUseTabs(viewer,PETSC_YES);
475:   }
476:   PetscViewerFlush(viewer);
477:   return(0);
478: }

482: static int MatView_MPIRowbs_Binary(Mat mat,PetscViewer viewer)
483: {
484:   Mat_MPIRowbs *a = (Mat_MPIRowbs*)mat->data;
485:   int          ierr,i,M,m,rank,size,*sbuff,*rowlengths;
486:   int          *recvcts,*recvdisp,fd,*cols,maxnz,nz,j;
487:   BSspmat      *A = a->A;
488:   BSsprow      **rs = A->rows;
489:   MPI_Comm     comm = mat->comm;
490:   MPI_Status   status;
491:   PetscScalar  *vals;
492:   MatInfo      info;

495:   MPI_Comm_size(comm,&size);
496:   MPI_Comm_rank(comm,&rank);

498:   M = mat->M; m = mat->m;
499:   /* First gather together on the first processor the lengths of 
500:      each row, and write them out to the file */
501:   PetscMalloc(m*sizeof(int),&sbuff);
502:   for (i=0; i<A->num_rows; i++) {
503:     sbuff[i] = rs[i]->length;
504:   }
505:   MatGetInfo(mat,MAT_GLOBAL_SUM,&info);
506:   if (!rank) {
507:     PetscViewerBinaryGetDescriptor(viewer,&fd);
508:     PetscMalloc((4+M)*sizeof(int),&rowlengths);
509:     PetscMalloc(size*sizeof(int),&recvcts);
510:     recvdisp = a->rowners;
511:     for (i=0; i<size; i++) {
512:       recvcts[i] = recvdisp[i+1] - recvdisp[i];
513:     }
514:     /* first four elements of rowlength are the header */
515:     rowlengths[0] = mat->cookie;
516:     rowlengths[1] = mat->M;
517:     rowlengths[2] = mat->N;
518:     rowlengths[3] = (int)info.nz_used;
519:     MPI_Gatherv(sbuff,m,MPI_INT,rowlengths+4,recvcts,recvdisp,MPI_INT,0,comm);
520:     PetscFree(sbuff);
521:     PetscBinaryWrite(fd,rowlengths,4+M,PETSC_INT,0);
522:     /* count the number of nonzeros on each processor */
523:     PetscMemzero(recvcts,size*sizeof(int));
524:     for (i=0; i<size; i++) {
525:       for (j=recvdisp[i]; j<recvdisp[i+1]; j++) {
526:         recvcts[i] += rowlengths[j+3];
527:       }
528:     }
529:     /* allocate buffer long enough to hold largest one */
530:     maxnz = 0;
531:     for (i=0; i<size; i++) {
532:       maxnz = PetscMax(maxnz,recvcts[i]);
533:     }
534:     PetscFree(rowlengths);
535:     PetscFree(recvcts);
536:     PetscMalloc(maxnz*sizeof(int),&cols);

538:     /* binary store column indices for 0th processor */
539:     nz = 0;
540:     for (i=0; i<A->num_rows; i++) {
541:       for (j=0; j<rs[i]->length; j++) {
542:         cols[nz++] = rs[i]->col[j];
543:       }
544:     }
545:     PetscBinaryWrite(fd,cols,nz,PETSC_INT,0);

547:     /* receive and store column indices for all other processors */
548:     for (i=1; i<size; i++) {
549:       /* should tell processor that I am now ready and to begin the send */
550:       MPI_Recv(cols,maxnz,MPI_INT,i,mat->tag,comm,&status);
551:       MPI_Get_count(&status,MPI_INT,&nz);
552:       PetscBinaryWrite(fd,cols,nz,PETSC_INT,0);
553:     }
554:     PetscFree(cols);
555:     PetscMalloc(maxnz*sizeof(PetscScalar),&vals);

557:     /* binary store values for 0th processor */
558:     nz = 0;
559:     for (i=0; i<A->num_rows; i++) {
560:       for (j=0; j<rs[i]->length; j++) {
561:         vals[nz++] = rs[i]->nz[j];
562:       }
563:     }
564:     PetscBinaryWrite(fd,vals,nz,PETSC_SCALAR,0);

566:     /* receive and store nonzeros for all other processors */
567:     for (i=1; i<size; i++) {
568:       /* should tell processor that I am now ready and to begin the send */
569:       MPI_Recv(vals,maxnz,MPIU_SCALAR,i,mat->tag,comm,&status);
570:       MPI_Get_count(&status,MPIU_SCALAR,&nz);
571:       PetscBinaryWrite(fd,vals,nz,PETSC_SCALAR,0);
572:     }
573:     PetscFree(vals);
574:   } else {
575:     MPI_Gatherv(sbuff,m,MPI_INT,0,0,0,MPI_INT,0,comm);
576:     PetscFree(sbuff);

578:     /* count local nonzeros */
579:     nz = 0;
580:     for (i=0; i<A->num_rows; i++) {
581:       for (j=0; j<rs[i]->length; j++) {
582:         nz++;
583:       }
584:     }
585:     /* copy into buffer column indices */
586:     PetscMalloc(nz*sizeof(int),&cols);
587:     nz = 0;
588:     for (i=0; i<A->num_rows; i++) {
589:       for (j=0; j<rs[i]->length; j++) {
590:         cols[nz++] = rs[i]->col[j];
591:       }
592:     }
593:     /* send */  /* should wait until processor zero tells me to go */
594:     MPI_Send(cols,nz,MPI_INT,0,mat->tag,comm);
595:     PetscFree(cols);

597:     /* copy into buffer column values */
598:     PetscMalloc(nz*sizeof(PetscScalar),&vals);
599:     nz   = 0;
600:     for (i=0; i<A->num_rows; i++) {
601:       for (j=0; j<rs[i]->length; j++) {
602:         vals[nz++] = rs[i]->nz[j];
603:       }
604:     }
605:     /* send */  /* should wait until processor zero tells me to go */
606:     MPI_Send(vals,nz,MPIU_SCALAR,0,mat->tag,comm);
607:     PetscFree(vals);
608:   }

610:   return(0);
611: }

615: int MatView_MPIRowbs(Mat mat,PetscViewer viewer)
616: {
617:   Mat_MPIRowbs *bsif = (Mat_MPIRowbs*)mat->data;
618:   int          ierr;
619:   PetscTruth   isascii,isbinary;

622:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&isascii);
623:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_BINARY,&isbinary);
624:   if (!bsif->blocksolveassembly) {
625:     MatAssemblyEnd_MPIRowbs_ForBlockSolve(mat);
626:   }
627:   if (isascii) {
628:     MatView_MPIRowbs_ASCII(mat,viewer);
629:   } else if (isbinary) {
630:     MatView_MPIRowbs_Binary(mat,viewer);
631:   } else {
632:     SETERRQ1(1,"Viewer type %s not supported by MPIRowbs matrices",((PetscObject)viewer)->type_name);
633:   }
634:   return(0);
635: }
636: 
639: static int MatAssemblyEnd_MPIRowbs_MakeSymmetric(Mat mat)
640: {
641:   Mat_MPIRowbs *a = (Mat_MPIRowbs*)mat->data;
642:   BSspmat      *A = a->A;
643:   BSsprow      *vs;
644:   int          size,rank,M,rstart,tag,i,j,*rtable,*w1,*w3,*w4,len,proc,nrqs;
645:   int          msz,*pa,bsz,nrqr,**rbuf1,**sbuf1,**ptr,*tmp,*ctr,col,idx,row;
646:   int          ctr_j,*sbuf1_j,k,ierr;
647:   PetscScalar  val=0.0;
648:   MPI_Comm     comm;
649:   MPI_Request  *s_waits1,*r_waits1;
650:   MPI_Status   *s_status,*r_status;

653:   comm   = mat->comm;
654:   tag    = mat->tag;
655:   size   = a->size;
656:   rank   = a->rank;
657:   M      = mat->M;
658:   rstart = a->rstart;

660:   PetscMalloc(M*sizeof(int),&rtable);
661:   /* Create hash table for the mapping :row -> proc */
662:   for (i=0,j=0; i<size; i++) {
663:     len = a->rowners[i+1];
664:     for (; j<len; j++) {
665:       rtable[j] = i;
666:     }
667:   }

669:   /* Evaluate communication - mesg to whom, length of mesg, and buffer space
670:      required. Based on this, buffers are allocated, and data copied into them. */
671:   PetscMalloc(size*4*sizeof(int),&w1);/*  mesg size */
672:   w3   = w1 + 2*size;       /* no of IS that needs to be sent to proc i */
673:   w4   = w3 + size;       /* temp work space used in determining w1,  w3 */
674:   PetscMemzero(w1,size*3*sizeof(int)); /* initialize work vector */

676:   for (i=0;  i<mat->m; i++) {
677:     PetscMemzero(w4,size*sizeof(int)); /* initialize work vector */
678:     vs = A->rows[i];
679:     for (j=0; j<vs->length; j++) {
680:       proc = rtable[vs->col[j]];
681:       w4[proc]++;
682:     }
683:     for (j=0; j<size; j++) {
684:       if (w4[j]) { w1[2*j] += w4[j]; w3[j]++;}
685:     }
686:   }
687: 
688:   nrqs       = 0;              /* number of outgoing messages */
689:   msz        = 0;              /* total mesg length (for all proc */
690:   w1[2*rank] = 0;              /* no mesg sent to itself */
691:   w3[rank]   = 0;
692:   for (i=0; i<size; i++) {
693:     if (w1[2*i])  {w1[2*i+1] = 1; nrqs++;} /* there exists a message to proc i */
694:   }
695:   /* pa - is list of processors to communicate with */
696:   PetscMalloc((nrqs+1)*sizeof(int),&pa);
697:   for (i=0,j=0; i<size; i++) {
698:     if (w1[2*i]) {pa[j] = i; j++;}
699:   }

701:   /* Each message would have a header = 1 + 2*(no of ROWS) + data */
702:   for (i=0; i<nrqs; i++) {
703:     j       = pa[i];
704:     w1[2*j] += w1[2*j+1] + 2*w3[j];
705:     msz     += w1[2*j];
706:   }
707: 
708:   /* Do a global reduction to determine how many messages to expect */
709:   PetscMaxSum(comm,w1,&bsz,&nrqr);

711:   /* Allocate memory for recv buffers . Prob none if nrqr = 0 ???? */
712:   len      = (nrqr+1)*sizeof(int*) + nrqr*bsz*sizeof(int);
713:   PetscMalloc(len,&rbuf1);
714:   rbuf1[0] = (int*)(rbuf1 + nrqr);
715:   for (i=1; i<nrqr; ++i) rbuf1[i] = rbuf1[i-1] + bsz;

717:   /* Post the receives */
718:   PetscMalloc((nrqr+1)*sizeof(MPI_Request),&r_waits1);
719:   for (i=0; i<nrqr; ++i){
720:     MPI_Irecv(rbuf1[i],bsz,MPI_INT,MPI_ANY_SOURCE,tag,comm,r_waits1+i);
721:   }
722: 
723:   /* Allocate Memory for outgoing messages */
724:   len   = 2*size*sizeof(int*) + (size+msz)*sizeof(int);
725:   PetscMalloc(len,&sbuf1);
726:   ptr   = sbuf1 + size;     /* Pointers to the data in outgoing buffers */
727:   PetscMemzero(sbuf1,2*size*sizeof(int*));
728:   tmp   = (int*)(sbuf1 + 2*size);
729:   ctr   = tmp + msz;

731:   {
732:     int *iptr = tmp,ict  = 0;
733:     for (i=0; i<nrqs; i++) {
734:       j        = pa[i];
735:       iptr    += ict;
736:       sbuf1[j] = iptr;
737:       ict      = w1[2*j];
738:     }
739:   }

741:   /* Form the outgoing messages */
742:   /* Clean up the header space */
743:   for (i=0; i<nrqs; i++) {
744:     j           = pa[i];
745:     sbuf1[j][0] = 0;
746:     PetscMemzero(sbuf1[j]+1,2*w3[j]*sizeof(int));
747:     ptr[j]      = sbuf1[j] + 2*w3[j] + 1;
748:   }

750:   /* Parse the matrix and copy the data into sbuf1 */
751:   for (i=0; i<mat->m; i++) {
752:     PetscMemzero(ctr,size*sizeof(int));
753:     vs = A->rows[i];
754:     for (j=0; j<vs->length; j++) {
755:       col  = vs->col[j];
756:       proc = rtable[col];
757:       if (proc != rank) { /* copy to the outgoing buffer */
758:         ctr[proc]++;
759:           *ptr[proc] = col;
760:           ptr[proc]++;
761:       } else {
762:         row = col - rstart;
763:         col = i + rstart;
764:         MatSetValues_MPIRowbs_local(mat,1,&row,1,&col,&val,ADD_VALUES);
765:       }
766:     }
767:     /* Update the headers for the current row */
768:     for (j=0; j<size; j++) { /* Can Optimise this loop by using pa[] */
769:       if ((ctr_j = ctr[j])) {
770:         sbuf1_j        = sbuf1[j];
771:         k               = ++sbuf1_j[0];
772:         sbuf1_j[2*k]   = ctr_j;
773:         sbuf1_j[2*k-1] = i + rstart;
774:       }
775:     }
776:   }
777:    /* Check Validity of the outgoing messages */
778:   {
779:     int sum;
780:     for (i=0 ; i<nrqs ; i++) {
781:       j = pa[i];
782:       if (w3[j] != sbuf1[j][0]) {SETERRQ(PETSC_ERR_PLIB,"Blew it! Header[1] mismatch!\n"); }
783:     }

785:     for (i=0 ; i<nrqs ; i++) {
786:       j = pa[i];
787:       sum = 1;
788:       for (k = 1; k <= w3[j]; k++) sum += sbuf1[j][2*k]+2;
789:       if (sum != w1[2*j]) { SETERRQ(PETSC_ERR_PLIB,"Blew it! Header[2-n] mismatch!\n"); }
790:     }
791:   }
792: 
793:   /* Now post the sends */
794:   PetscMalloc((nrqs+1)*sizeof(MPI_Request),&s_waits1);
795:   for (i=0; i<nrqs; ++i) {
796:     j    = pa[i];
797:     MPI_Isend(sbuf1[j],w1[2*j],MPI_INT,j,tag,comm,s_waits1+i);
798:   }
799: 
800:   /* Receive messages*/
801:   PetscMalloc((nrqr+1)*sizeof(MPI_Status),&r_status);
802:   for (i=0; i<nrqr; ++i) {
803:     MPI_Waitany(nrqr,r_waits1,&idx,r_status+i);
804:     /* Process the Message */
805:     {
806:       int    *rbuf1_i,n_row,ct1;

808:       rbuf1_i = rbuf1[idx];
809:       n_row   = rbuf1_i[0];
810:       ct1     = 2*n_row+1;
811:       val     = 0.0;
812:       /* Optimise this later */
813:       for (j=1; j<=n_row; j++) {
814:         col = rbuf1_i[2*j-1];
815:         for (k=0; k<rbuf1_i[2*j]; k++,ct1++) {
816:           row = rbuf1_i[ct1] - rstart;
817:           MatSetValues_MPIRowbs_local(mat,1,&row,1,&col,&val,ADD_VALUES);
818:         }
819:       }
820:     }
821:   }

823:   PetscMalloc((nrqs+1)*sizeof(MPI_Status),&s_status);
824:   MPI_Waitall(nrqs,s_waits1,s_status);

826:   PetscFree(rtable);
827:   PetscFree(w1);
828:   PetscFree(pa);
829:   PetscFree(rbuf1);
830:   PetscFree(sbuf1);
831:   PetscFree(r_waits1);
832:   PetscFree(s_waits1);
833:   PetscFree(r_status);
834:   PetscFree(s_status);
835:   return(0);
836: }

838: /*
839:      This does the BlockSolve portion of the matrix assembly.
840:    It is provided in a seperate routine so that users can
841:    operate on the matrix (using MatScale(), MatShift() etc.) after 
842:    the matrix has been assembled but before BlockSolve has sucked it
843:    in and devoured it.
844: */
847: int MatAssemblyEnd_MPIRowbs_ForBlockSolve(Mat mat)
848: {
849:   Mat_MPIRowbs *a = (Mat_MPIRowbs*)mat->data;
850:   int          ierr,ldim,low,high,i;
851:   PetscScalar  *diag;

854:   if ((mat->was_assembled) && (!mat->same_nonzero)) {  /* Free the old info */
855:     if (a->pA)       {BSfree_par_mat(a->pA);CHKERRBS(0);}
856:     if (a->comm_pA)  {BSfree_comm(a->comm_pA);CHKERRBS(0);}
857:   }

859:   if ((!mat->same_nonzero) || (!mat->was_assembled)) {
860:     /* Indicates bypassing cliques in coloring */
861:     if (a->bs_color_single) {
862:       BSctx_set_si(a->procinfo,100);
863:     }
864:     /* Form permuted matrix for efficient parallel execution */
865:     a->pA = BSmain_perm(a->procinfo,a->A);CHKERRBS(0);
866:     /* Set up the communication */
867:     a->comm_pA = BSsetup_forward(a->pA,a->procinfo);CHKERRBS(0);
868:   } else {
869:     /* Repermute the matrix */
870:     BSmain_reperm(a->procinfo,a->A,a->pA);CHKERRBS(0);
871:   }

873:   /* Symmetrically scale the matrix by the diagonal */
874:   BSscale_diag(a->pA,a->pA->diag,a->procinfo);CHKERRBS(0);

876:   /* Store inverse of square root of permuted diagonal scaling matrix */
877:   VecGetLocalSize(a->diag,&ldim);
878:   VecGetOwnershipRange(a->diag,&low,&high);
879:   VecGetArray(a->diag,&diag);
880:   for (i=0; i<ldim; i++) {
881:     if (a->pA->scale_diag[i] != 0.0) {
882:       diag[i] = 1.0/sqrt(PetscAbsScalar(a->pA->scale_diag[i]));
883:     } else {
884:       diag[i] = 1.0;
885:     }
886:   }
887:   VecRestoreArray(a->diag,&diag);
888:   a->blocksolveassembly = 1;
889:   mat->was_assembled    = PETSC_TRUE;
890:   mat->same_nonzero     = PETSC_TRUE;
891:   PetscLogInfo(mat,"MatAssemblyEnd_MPIRowbs_ForBlockSolve:Completed BlockSolve95 matrix assembly\n");
892:   return(0);
893: }

897: int MatAssemblyEnd_MPIRowbs(Mat mat,MatAssemblyType mode)
898: {
899:   Mat_MPIRowbs *a = (Mat_MPIRowbs*)mat->data;
900:   int          i,n,row,col,*rows,*cols,ierr,rstart,nzcount,flg,j,ncols;
901:   PetscScalar  *vals,val;
902:   InsertMode   addv = mat->insertmode;

905:   while (1) {
906:     MatStashScatterGetMesg_Private(&mat->stash,&n,&rows,&cols,&vals,&flg);
907:     if (!flg) break;
908: 
909:     for (i=0; i<n;) {
910:       /* Now identify the consecutive vals belonging to the same row */
911:       for (j=i,rstart=rows[j]; j<n; j++) { if (rows[j] != rstart) break; }
912:       if (j < n) ncols = j-i;
913:       else       ncols = n-i;
914:       /* Now assemble all these values with a single function call */
915:       MatSetValues_MPIRowbs(mat,1,rows+i,ncols,cols+i,vals+i,addv);
916:       i = j;
917:     }
918:   }
919:   MatStashScatterEnd_Private(&mat->stash);

921:   rstart = a->rstart;
922:   nzcount = a->nz; /* This is the number of nonzeros entered by the user */
923:   /* BlockSolve requires that the matrix is structurally symmetric */
924:   if (mode == MAT_FINAL_ASSEMBLY && !mat->structurally_symmetric) {
925:     MatAssemblyEnd_MPIRowbs_MakeSymmetric(mat);
926:   }
927: 
928:   /* BlockSolve requires that all the diagonal elements are set */
929:   val  = 0.0;
930:   for (i=0; i<mat->m; i++) {
931:     row = i; col = i + rstart;
932:     MatSetValues_MPIRowbs_local(mat,1,&row,1,&col,&val,ADD_VALUES);
933:   }
934: 
935:   MatAssemblyBegin_MPIRowbs_local(mat,mode);
936:   MatAssemblyEnd_MPIRowbs_local(mat,mode);
937: 
938:   a->blocksolveassembly = 0;
939:   PetscLogInfo(mat,"MatAssemblyEnd_MPIRowbs:Matrix size: %d X %d; storage space: %d unneeded,%d used\n",mat->m,mat->n,a->maxnz-a->nz,a->nz);
940:   PetscLogInfo(mat,"MatAssemblyEnd_MPIRowbs: User entered %d nonzeros, PETSc added %d\n",nzcount,a->nz-nzcount);
941:   PetscLogInfo(mat,"MatAssemblyEnd_MPIRowbs:Number of mallocs during MatSetValues is %d\n",a->reallocs);
942:   return(0);
943: }

947: int MatZeroEntries_MPIRowbs(Mat mat)
948: {
949:   Mat_MPIRowbs *l = (Mat_MPIRowbs*)mat->data;
950:   BSspmat      *A = l->A;
951:   BSsprow      *vs;
952:   int          i,j;

955:   for (i=0; i <mat->m; i++) {
956:     vs = A->rows[i];
957:     for (j=0; j< vs->length; j++) vs->nz[j] = 0.0;
958:   }
959:   return(0);
960: }

962: /* the code does not do the diagonal entries correctly unless the 
963:    matrix is square and the column and row owerships are identical.
964:    This is a BUG.
965: */

969: int MatZeroRows_MPIRowbs(Mat A,IS is,const PetscScalar *diag)
970: {
971:   Mat_MPIRowbs   *l = (Mat_MPIRowbs*)A->data;
972:   int            i,ierr,N,*rows,*owners = l->rowners,size = l->size;
973:   int            *nprocs,j,idx,nsends;
974:   int            nmax,*svalues,*starts,*owner,nrecvs,rank = l->rank;
975:   int            *rvalues,tag = A->tag,count,base,slen,n,*source;
976:   int            *lens,imdex,*lrows,*values;
977:   MPI_Comm       comm = A->comm;
978:   MPI_Request    *send_waits,*recv_waits;
979:   MPI_Status     recv_status,*send_status;
980:   IS             istmp;
981:   PetscTruth     found;

984:   ISGetLocalSize(is,&N);
985:   ISGetIndices(is,&rows);

987:   /*  first count number of contributors to each processor */
988:   PetscMalloc(2*size*sizeof(int),&nprocs);
989:   PetscMemzero(nprocs,2*size*sizeof(int));
990:   PetscMalloc((N+1)*sizeof(int),&owner); /* see note*/
991:   for (i=0; i<N; i++) {
992:     idx   = rows[i];
993:     found = PETSC_FALSE;
994:     for (j=0; j<size; j++) {
995:       if (idx >= owners[j] && idx < owners[j+1]) {
996:         nprocs[2*j]++; nprocs[2*j+1] = 1; owner[i] = j; found = PETSC_TRUE; break;
997:       }
998:     }
999:     if (!found) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Row out of range");
1000:   }
1001:   nsends = 0;  for (i=0; i<size; i++) {nsends += nprocs[2*i+1];}

1003:   /* inform other processors of number of messages and max length*/
1004:   PetscMaxSum(comm,nprocs,&nmax,&nrecvs);

1006:   /* post receives:   */
1007:   PetscMalloc((nrecvs+1)*(nmax+1)*sizeof(int),&rvalues);
1008:   PetscMalloc((nrecvs+1)*sizeof(MPI_Request),&recv_waits);
1009:   for (i=0; i<nrecvs; i++) {
1010:     MPI_Irecv(rvalues+nmax*i,nmax,MPI_INT,MPI_ANY_SOURCE,tag,comm,recv_waits+i);
1011:   }

1013:   /* do sends:
1014:       1) starts[i] gives the starting index in svalues for stuff going to 
1015:          the ith processor
1016:   */
1017:   PetscMalloc((N+1)*sizeof(int),&svalues);
1018:   PetscMalloc((nsends+1)*sizeof(MPI_Request),&send_waits);
1019:   PetscMalloc((size+1)*sizeof(int),&starts);
1020:   starts[0] = 0;
1021:   for (i=1; i<size; i++) { starts[i] = starts[i-1] + nprocs[2*i-2];}
1022:   for (i=0; i<N; i++) {
1023:     svalues[starts[owner[i]]++] = rows[i];
1024:   }
1025:   ISRestoreIndices(is,&rows);

1027:   starts[0] = 0;
1028:   for (i=1; i<size+1; i++) { starts[i] = starts[i-1] + nprocs[2*i-2];}
1029:   count = 0;
1030:   for (i=0; i<size; i++) {
1031:     if (nprocs[2*i+1]) {
1032:       MPI_Isend(svalues+starts[i],nprocs[2*i],MPI_INT,i,tag,comm,send_waits+count++);
1033:     }
1034:   }
1035:   PetscFree(starts);

1037:   base = owners[rank];

1039:   /*  wait on receives */
1040:   PetscMalloc(2*(nrecvs+1)*sizeof(int),&lens);
1041:   source = lens + nrecvs;
1042:   count = nrecvs; slen = 0;
1043:   while (count) {
1044:     MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);
1045:     /* unpack receives into our local space */
1046:     MPI_Get_count(&recv_status,MPI_INT,&n);
1047:     source[imdex]  = recv_status.MPI_SOURCE;
1048:     lens[imdex]    = n;
1049:     slen           += n;
1050:     count--;
1051:   }
1052:   PetscFree(recv_waits);
1053: 
1054:   /* move the data into the send scatter */
1055:   PetscMalloc((slen+1)*sizeof(int),&lrows);
1056:   count = 0;
1057:   for (i=0; i<nrecvs; i++) {
1058:     values = rvalues + i*nmax;
1059:     for (j=0; j<lens[i]; j++) {
1060:       lrows[count++] = values[j] - base;
1061:     }
1062:   }
1063:   PetscFree(rvalues);
1064:   PetscFree(lens);
1065:   PetscFree(owner);
1066:   PetscFree(nprocs);
1067: 
1068:   /* actually zap the local rows */
1069:   ISCreateGeneral(PETSC_COMM_SELF,slen,lrows,&istmp);
1070:   PetscLogObjectParent(A,istmp);
1071:   PetscFree(lrows);
1072:   MatZeroRows_MPIRowbs_local(A,istmp,diag);
1073:   ISDestroy(istmp);

1075:   /* wait on sends */
1076:   if (nsends) {
1077:     PetscMalloc(nsends*sizeof(MPI_Status),&send_status);
1078:     MPI_Waitall(nsends,send_waits,send_status);
1079:     PetscFree(send_status);
1080:   }
1081:   PetscFree(send_waits);
1082:   PetscFree(svalues);

1084:   return(0);
1085: }

1089: int MatNorm_MPIRowbs(Mat mat,NormType type,PetscReal *norm)
1090: {
1091:   Mat_MPIRowbs *a = (Mat_MPIRowbs*)mat->data;
1092:   BSsprow      *vs,**rs;
1093:   PetscScalar  *xv;
1094:   PetscReal    sum = 0.0;
1095:   int          *xi,nz,i,j,ierr;

1098:   if (a->size == 1) {
1099:     MatNorm_MPIRowbs_local(mat,type,norm);
1100:   } else {
1101:     rs = a->A->rows;
1102:     if (type == NORM_FROBENIUS) {
1103:       for (i=0; i<mat->m; i++) {
1104:         vs = *rs++;
1105:         nz = vs->length;
1106:         xv = vs->nz;
1107:         while (nz--) {
1108: #if defined(PETSC_USE_COMPLEX)
1109:           sum += PetscRealPart(PetscConj(*xv)*(*xv)); xv++;
1110: #else
1111:           sum += (*xv)*(*xv); xv++;
1112: #endif
1113:         }
1114:       }
1115:       MPI_Allreduce(&sum,norm,1,MPIU_REAL,MPI_SUM,mat->comm);
1116:       *norm = sqrt(*norm);
1117:     } else if (type == NORM_1) { /* max column norm */
1118:       PetscReal *tmp,*tmp2;
1119:       PetscMalloc(mat->n*sizeof(PetscReal),&tmp);
1120:       PetscMalloc(mat->n*sizeof(PetscReal),&tmp2);
1121:       PetscMemzero(tmp,mat->n*sizeof(PetscReal));
1122:       *norm = 0.0;
1123:       for (i=0; i<mat->m; i++) {
1124:         vs = *rs++;
1125:         nz = vs->length;
1126:         xi = vs->col;
1127:         xv = vs->nz;
1128:         while (nz--) {
1129:           tmp[*xi] += PetscAbsScalar(*xv);
1130:           xi++; xv++;
1131:         }
1132:       }
1133:       MPI_Allreduce(tmp,tmp2,mat->N,MPIU_REAL,MPI_SUM,mat->comm);
1134:       for (j=0; j<mat->n; j++) {
1135:         if (tmp2[j] > *norm) *norm = tmp2[j];
1136:       }
1137:       PetscFree(tmp);
1138:       PetscFree(tmp2);
1139:     } else if (type == NORM_INFINITY) { /* max row norm */
1140:       PetscReal ntemp = 0.0;
1141:       for (i=0; i<mat->m; i++) {
1142:         vs = *rs++;
1143:         nz = vs->length;
1144:         xv = vs->nz;
1145:         sum = 0.0;
1146:         while (nz--) {
1147:           sum += PetscAbsScalar(*xv); xv++;
1148:         }
1149:         if (sum > ntemp) ntemp = sum;
1150:       }
1151:       MPI_Allreduce(&ntemp,norm,1,MPIU_REAL,MPI_MAX,mat->comm);
1152:     } else {
1153:       SETERRQ(PETSC_ERR_SUP,"No support for two norm");
1154:     }
1155:   }
1156:   return(0);
1157: }

1161: int MatMult_MPIRowbs(Mat mat,Vec xx,Vec yy)
1162: {
1163:   Mat_MPIRowbs *bsif = (Mat_MPIRowbs*)mat->data;
1164:   BSprocinfo   *bspinfo = bsif->procinfo;
1165:   PetscScalar  *xxa,*xworka,*yya;
1166:   int          ierr;

1169:   if (!bsif->blocksolveassembly) {
1170:     MatAssemblyEnd_MPIRowbs_ForBlockSolve(mat);
1171:   }

1173:   /* Permute and apply diagonal scaling:  [ xwork = D^{1/2} * x ] */
1174:   if (!bsif->vecs_permscale) {
1175:     VecGetArray(bsif->xwork,&xworka);
1176:     VecGetArray(xx,&xxa);
1177:     BSperm_dvec(xxa,xworka,bsif->pA->perm);CHKERRBS(0);
1178:     VecRestoreArray(bsif->xwork,&xworka);
1179:     VecRestoreArray(xx,&xxa);
1180:     VecPointwiseDivide(bsif->xwork,bsif->diag,xx);
1181:   }

1183:   VecGetArray(xx,&xxa);
1184:   VecGetArray(yy,&yya);
1185:   /* Do lower triangular multiplication:  [ y = L * xwork ] */
1186:   if (bspinfo->single) {
1187:     BSforward1(bsif->pA,xxa,yya,bsif->comm_pA,bspinfo);CHKERRBS(0);
1188:   }  else {
1189:     BSforward(bsif->pA,xxa,yya,bsif->comm_pA,bspinfo);CHKERRBS(0);
1190:   }
1191: 
1192:   /* Do upper triangular multiplication:  [ y = y + L^{T} * xwork ] */
1193:   if (mat->symmetric) {
1194:     if (bspinfo->single){
1195:       BSbackward1(bsif->pA,xxa,yya,bsif->comm_pA,bspinfo);CHKERRBS(0);
1196:     } else {
1197:       BSbackward(bsif->pA,xxa,yya,bsif->comm_pA,bspinfo);CHKERRBS(0);
1198:     }
1199:   }
1200:   /* not needed for ILU version since forward does it all */
1201:   VecRestoreArray(xx,&xxa);
1202:   VecRestoreArray(yy,&yya);

1204:   /* Apply diagonal scaling to vector:  [  y = D^{1/2} * y ] */
1205:   if (!bsif->vecs_permscale) {
1206:     VecGetArray(bsif->xwork,&xworka);
1207:     VecGetArray(xx,&xxa);
1208:     BSiperm_dvec(xworka,xxa,bsif->pA->perm);CHKERRBS(0);
1209:     VecRestoreArray(bsif->xwork,&xworka);
1210:     VecRestoreArray(xx,&xxa);
1211:     VecPointwiseDivide(yy,bsif->diag,bsif->xwork);
1212:     VecGetArray(bsif->xwork,&xworka);
1213:     VecGetArray(yy,&yya);
1214:     BSiperm_dvec(xworka,yya,bsif->pA->perm);CHKERRBS(0);
1215:     VecRestoreArray(bsif->xwork,&xworka);
1216:     VecRestoreArray(yy,&yya);
1217:   }
1218:   PetscLogFlops(2*bsif->nz - mat->m);

1220:   return(0);
1221: }

1225: int MatMultAdd_MPIRowbs(Mat mat,Vec xx,Vec yy,Vec zz)
1226: {
1227:   int          ierr;
1228:   PetscScalar  one = 1.0;

1231:   (*mat->ops->mult)(mat,xx,zz);
1232:   VecAXPY(&one,yy,zz);
1233:   return(0);
1234: }

1238: int MatGetInfo_MPIRowbs(Mat A,MatInfoType flag,MatInfo *info)
1239: {
1240:   Mat_MPIRowbs *mat = (Mat_MPIRowbs*)A->data;
1241:   PetscReal    isend[5],irecv[5];
1242:   int          ierr;

1245:   info->rows_global    = (double)A->M;
1246:   info->columns_global = (double)A->N;
1247:   info->rows_local     = (double)A->m;
1248:   info->columns_local  = (double)A->N;
1249:   info->block_size     = 1.0;
1250:   info->mallocs        = (double)mat->reallocs;
1251:   isend[0] = mat->nz; isend[1] = mat->maxnz; isend[2] =  mat->maxnz -  mat->nz;
1252:   isend[3] = A->mem;  isend[4] = info->mallocs;

1254:   if (flag == MAT_LOCAL) {
1255:     info->nz_used      = isend[0];
1256:     info->nz_allocated = isend[1];
1257:     info->nz_unneeded  = isend[2];
1258:     info->memory       = isend[3];
1259:     info->mallocs      = isend[4];
1260:   } else if (flag == MAT_GLOBAL_MAX) {
1261:     MPI_Allreduce(isend,irecv,3,MPIU_REAL,MPI_MAX,A->comm);
1262:     info->nz_used      = irecv[0];
1263:     info->nz_allocated = irecv[1];
1264:     info->nz_unneeded  = irecv[2];
1265:     info->memory       = irecv[3];
1266:     info->mallocs      = irecv[4];
1267:   } else if (flag == MAT_GLOBAL_SUM) {
1268:     MPI_Allreduce(isend,irecv,3,MPIU_REAL,MPI_SUM,A->comm);
1269:     info->nz_used      = irecv[0];
1270:     info->nz_allocated = irecv[1];
1271:     info->nz_unneeded  = irecv[2];
1272:     info->memory       = irecv[3];
1273:     info->mallocs      = irecv[4];
1274:   }
1275:   return(0);
1276: }

1280: int MatGetDiagonal_MPIRowbs(Mat mat,Vec v)
1281: {
1282:   Mat_MPIRowbs *a = (Mat_MPIRowbs*)mat->data;
1283:   BSsprow      **rs = a->A->rows;
1284:   int          i,n,ierr;
1285:   PetscScalar  *x,zero = 0.0;

1288:   if (mat->factor) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1289:   if (!a->blocksolveassembly) {
1290:     MatAssemblyEnd_MPIRowbs_ForBlockSolve(mat);
1291:   }

1293:   VecSet(&zero,v);
1294:   VecGetLocalSize(v,&n);
1295:   if (n != mat->m) SETERRQ(PETSC_ERR_ARG_SIZ,"Nonconforming mat and vec");
1296:   VecGetArray(v,&x);
1297:   for (i=0; i<mat->m; i++) {
1298:     x[i] = rs[i]->nz[rs[i]->diag_ind];
1299:   }
1300:   VecRestoreArray(v,&x);
1301:   return(0);
1302: }

1306: int MatDestroy_MPIRowbs(Mat mat)
1307: {
1308:   Mat_MPIRowbs *a = (Mat_MPIRowbs*)mat->data;
1309:   BSspmat      *A = a->A;
1310:   BSsprow      *vs;
1311:   int          i,ierr;

1314: #if defined(PETSC_USE_LOG)
1315:   PetscLogObjectState((PetscObject)mat,"Rows=%d, Cols=%d",mat->M,mat->N);
1316: #endif
1317:   PetscFree(a->rowners);
1318:   MatStashDestroy_Private(&mat->stash);
1319:   if (a->bsmap) {
1320:     if (a->bsmap->vlocal2global) {PetscFree(a->bsmap->vlocal2global);}
1321:     if (a->bsmap->vglobal2local) {PetscFree(a->bsmap->vglobal2local);}
1322:     if (a->bsmap->vglobal2proc)  (*a->bsmap->free_g2p)(a->bsmap->vglobal2proc);
1323:     PetscFree(a->bsmap);
1324:   }

1326:   if (A) {
1327:     for (i=0; i<mat->m; i++) {
1328:       vs = A->rows[i];
1329:       MatFreeRowbs_Private(mat,vs->length,vs->col,vs->nz);
1330:     }
1331:     /* Note: A->map = a->bsmap is freed above */
1332:     PetscFree(A->rows);
1333:     PetscFree(A);
1334:   }
1335:   if (a->procinfo) {BSfree_ctx(a->procinfo);CHKERRBS(0);}
1336:   if (a->diag)     {VecDestroy(a->diag);}
1337:   if (a->xwork)    {VecDestroy(a->xwork);}
1338:   if (a->pA)       {BSfree_par_mat(a->pA);CHKERRBS(0);}
1339:   if (a->fpA)      {BSfree_copy_par_mat(a->fpA);CHKERRBS(0);}
1340:   if (a->comm_pA)  {BSfree_comm(a->comm_pA);CHKERRBS(0);}
1341:   if (a->comm_fpA) {BSfree_comm(a->comm_fpA);CHKERRBS(0);}
1342:   if (a->imax)     {PetscFree(a->imax);}
1343:   MPI_Comm_free(&(a->comm_mpirowbs));
1344:   PetscFree(a);
1345:   return(0);
1346: }

1350: int MatSetOption_MPIRowbs(Mat A,MatOption op)
1351: {
1352:   Mat_MPIRowbs *a = (Mat_MPIRowbs*)A->data;

1355:   switch (op) {
1356:   case MAT_ROW_ORIENTED:
1357:     a->roworiented = PETSC_TRUE;
1358:     break;
1359:   case MAT_COLUMN_ORIENTED:
1360:     a->roworiented = PETSC_FALSE;
1361:     break;
1362:   case MAT_COLUMNS_SORTED:
1363:     a->sorted      = 1;
1364:     break;
1365:   case MAT_COLUMNS_UNSORTED:
1366:     a->sorted      = 0;
1367:     break;
1368:   case MAT_NO_NEW_NONZERO_LOCATIONS:
1369:     a->nonew       = 1;
1370:     break;
1371:   case MAT_YES_NEW_NONZERO_LOCATIONS:
1372:     a->nonew       = 0;
1373:     break;
1374:   case MAT_DO_NOT_USE_INODES:
1375:     a->bs_color_single = 1;
1376:     break;
1377:   case MAT_YES_NEW_DIAGONALS:
1378:   case MAT_ROWS_SORTED:
1379:   case MAT_NEW_NONZERO_LOCATION_ERR:
1380:   case MAT_NEW_NONZERO_ALLOCATION_ERR:
1381:   case MAT_ROWS_UNSORTED:
1382:   case MAT_USE_HASH_TABLE:
1383:     PetscLogInfo(A,"MatSetOption_MPIRowbs:Option ignored\n");
1384:     break;
1385:   case MAT_IGNORE_OFF_PROC_ENTRIES:
1386:     a->donotstash = PETSC_TRUE;
1387:     break;
1388:   case MAT_NO_NEW_DIAGONALS:
1389:     SETERRQ(PETSC_ERR_SUP,"MAT_NO_NEW_DIAGONALS");
1390:     break;
1391:   case MAT_KEEP_ZEROED_ROWS:
1392:     a->keepzeroedrows    = PETSC_TRUE;
1393:     break;
1394:   default:
1395:     SETERRQ(PETSC_ERR_SUP,"unknown option");
1396:     break;
1397:   }
1398:   return(0);
1399: }

1403: int MatGetRow_MPIRowbs(Mat AA,int row,int *nz,int **idx,PetscScalar **v)
1404: {
1405:   Mat_MPIRowbs *mat = (Mat_MPIRowbs*)AA->data;
1406:   BSspmat      *A = mat->A;
1407:   BSsprow      *rs;
1408: 
1410:   if (row < mat->rstart || row >= mat->rend) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Only local rows");

1412:   rs  = A->rows[row - mat->rstart];
1413:   *nz = rs->length;
1414:   if (v)   *v   = rs->nz;
1415:   if (idx) *idx = rs->col;
1416:   return(0);
1417: }

1421: int MatRestoreRow_MPIRowbs(Mat A,int row,int *nz,int **idx,PetscScalar **v)
1422: {
1424:   return(0);
1425: }

1427: /* ------------------------------------------------------------------ */

1431: int MatPrintHelp_MPIRowbs(Mat A)
1432: {
1433:   static PetscTruth called = PETSC_FALSE;
1434:   MPI_Comm          comm = A->comm;
1435:   int               ierr;

1438:   if (called) {return(0);} else called = PETSC_TRUE;
1439:   (*PetscHelpPrintf)(comm," Options for MATMPIROWBS matrix format (needed for BlockSolve):\n");
1440:   (*PetscHelpPrintf)(comm,"  -mat_rowbs_no_inode  - Do not use inodes\n");
1441:   return(0);
1442: }

1446: int MatSetUpPreallocation_MPIRowbs(Mat A)
1447: {
1448:   int        ierr;

1451:    MatMPIRowbsSetPreallocation(A,PETSC_DEFAULT,0);
1452:   return(0);
1453: }

1455: /* -------------------------------------------------------------------*/
1456: static struct _MatOps MatOps_Values = {MatSetValues_MPIRowbs,
1457:        MatGetRow_MPIRowbs,
1458:        MatRestoreRow_MPIRowbs,
1459:        MatMult_MPIRowbs,
1460: /* 4*/ MatMultAdd_MPIRowbs,
1461:        MatMult_MPIRowbs,
1462:        MatMultAdd_MPIRowbs,
1463:        MatSolve_MPIRowbs,
1464:        0,
1465:        0,
1466: /*10*/ 0,
1467:        0,
1468:        0,
1469:        0,
1470:        0,
1471: /*15*/ MatGetInfo_MPIRowbs,
1472:        0,
1473:        MatGetDiagonal_MPIRowbs,
1474:        0,
1475:        MatNorm_MPIRowbs,
1476: /*20*/ MatAssemblyBegin_MPIRowbs,
1477:        MatAssemblyEnd_MPIRowbs,
1478:        0,
1479:        MatSetOption_MPIRowbs,
1480:        MatZeroEntries_MPIRowbs,
1481: /*25*/ MatZeroRows_MPIRowbs,
1482:        0,
1483:        MatLUFactorNumeric_MPIRowbs,
1484:        0,
1485:        MatCholeskyFactorNumeric_MPIRowbs,
1486: /*30*/ MatSetUpPreallocation_MPIRowbs,
1487:        MatILUFactorSymbolic_MPIRowbs,
1488:        MatIncompleteCholeskyFactorSymbolic_MPIRowbs,
1489:        0,
1490:        0,
1491: /*35*/ 0,
1492:        MatForwardSolve_MPIRowbs,
1493:        MatBackwardSolve_MPIRowbs,
1494:        0,
1495:        0,
1496: /*40*/ 0,
1497:        MatGetSubMatrices_MPIRowbs,
1498:        0,
1499:        0,
1500:        0,
1501: /*45*/ MatPrintHelp_MPIRowbs,
1502:        MatScale_MPIRowbs,
1503:        0,
1504:        0,
1505:        0,
1506: /*50*/ 0,
1507:        0,
1508:        0,
1509:        0,
1510:        0,
1511: /*55*/ 0,
1512:        0,
1513:        0,
1514:        0,
1515:        0,
1516: /*60*/ MatGetSubMatrix_MPIRowbs,
1517:        MatDestroy_MPIRowbs,
1518:        MatView_MPIRowbs,
1519:        MatGetPetscMaps_Petsc,
1520:        MatUseScaledForm_MPIRowbs,
1521: /*65*/ MatScaleSystem_MPIRowbs,
1522:        MatUnScaleSystem_MPIRowbs,
1523:        0,
1524:        0,
1525:        0,
1526: /*70*/ 0,
1527:        0,
1528:        0,
1529:        0,
1530:        0,
1531: /*75*/ 0,
1532:        0,
1533:        0,
1534:        0,
1535:        0,
1536: /*80*/ 0,
1537:        0,
1538:        0,
1539:        0,
1540:        0,
1541: /*85*/ MatLoad_MPIRowbs
1542: };

1544: /* ------------------------------------------------------------------- */

1546: EXTERN_C_BEGIN
1549: int MatMPIRowbsSetPreallocation_MPIRowbs(Mat mat,int nz,const int nnz[])
1550: {
1551:   int        ierr;

1554:   mat->preallocated = PETSC_TRUE;
1555:   MatCreateMPIRowbs_local(mat,nz,nnz);
1556:   return(0);
1557: }
1558: EXTERN_C_END

1560: /* MC
1561:    MATMPIROWBS - MATMPIROWBS = "mpirowbs" - A matrix type providing ILU and ICC for distributed sparse matrices for use
1562:    with the external package BlockSolve95.  If BlockSolve95 is installed (see the manual for instructions
1563:    on how to declare the existence of external packages), a matrix type can be constructed which invokes
1564:    BlockSolve95 preconditioners and solvers. 

1566:    Options Database Keys:
1567: . -mat_type mpirowbs - sets the matrix type to "mpirowbs" during a call to MatSetFromOptions()

1569:   Level: beginner

1571: .seealso: MatCreateMPIRowbs
1572: M*/

1574: EXTERN_C_BEGIN
1577: int MatCreate_MPIRowbs(Mat A)
1578: {
1579:   Mat_MPIRowbs *a;
1580:   BSmapping    *bsmap;
1581:   BSoff_map    *bsoff;
1582:   int          i,ierr,*offset,m,M;
1583:   PetscTruth   flg1,flg2,flg3;
1584:   BSprocinfo   *bspinfo;
1585:   MPI_Comm     comm;
1586: 
1588:   comm = A->comm;
1589:   m    = A->m;
1590:   M    = A->M;

1592:   PetscNew(Mat_MPIRowbs,&a);
1593:   A->data               = (void*)a;
1594:   PetscMemcpy(A->ops,&MatOps_Values,sizeof(struct _MatOps));
1595:   A->factor             = 0;
1596:   A->mapping            = 0;
1597:   a->vecs_permscale     = PETSC_FALSE;
1598:   A->insertmode         = NOT_SET_VALUES;
1599:   a->blocksolveassembly = 0;
1600:   a->keepzeroedrows     = PETSC_FALSE;

1602:   MPI_Comm_rank(comm,&a->rank);
1603:   MPI_Comm_size(comm,&a->size);

1605:   PetscSplitOwnership(comm,&m,&M);

1607:   A->N = M;
1608:   A->M = M;
1609:   A->m = m;
1610:   A->n = A->N;  /* each row stores all columns */
1611:   PetscMalloc((A->m+1)*sizeof(int),&a->imax);
1612:   a->reallocs                      = 0;

1614:   /* the information in the maps duplicates the information computed below, eventually 
1615:      we should remove the duplicate information that is not contained in the maps */
1616:   PetscMapCreateMPI(comm,m,M,&A->rmap);
1617:   PetscMapCreateMPI(comm,m,M,&A->cmap);

1619:   /* build local table of row ownerships */
1620:   PetscMalloc((a->size+2)*sizeof(int),&a->rowners);
1621:   MPI_Allgather(&m,1,MPI_INT,a->rowners+1,1,MPI_INT,comm);
1622:   a->rowners[0] = 0;
1623:   for (i=2; i<=a->size; i++) {
1624:     a->rowners[i] += a->rowners[i-1];
1625:   }
1626:   a->rstart = a->rowners[a->rank];
1627:   a->rend   = a->rowners[a->rank+1];
1628:   PetscLogObjectMemory(A,(A->m+a->size+3)*sizeof(int));

1630:   /* build cache for off array entries formed */
1631:   MatStashCreate_Private(A->comm,1,&A->stash);
1632:   a->donotstash = PETSC_FALSE;

1634:   /* Initialize BlockSolve information */
1635:   a->A              = 0;
1636:   a->pA              = 0;
1637:   a->comm_pA  = 0;
1638:   a->fpA      = 0;
1639:   a->comm_fpA = 0;
1640:   a->alpha    = 1.0;
1641:   a->0;
1642:   a->failures = 0;
1643:   MPI_Comm_dup(A->comm,&(a->comm_mpirowbs));
1644:   VecCreateMPI(A->comm,A->m,A->M,&(a->diag));
1645:   VecDuplicate(a->diag,&(a->xwork));
1646:   PetscLogObjectParent(A,a->diag);  PetscLogObjectParent(A,a->xwork);
1647:   PetscLogObjectMemory(A,(A->m+1)*sizeof(PetscScalar));
1648:   bspinfo = BScreate_ctx();CHKERRBS(0);
1649:   a->procinfo = bspinfo;
1650:   BSctx_set_id(bspinfo,a->rank);CHKERRBS(0);
1651:   BSctx_set_np(bspinfo,a->size);CHKERRBS(0);
1652:   BSctx_set_ps(bspinfo,a->comm_mpirowbs);CHKERRBS(0);
1653:   BSctx_set_cs(bspinfo,INT_MAX);CHKERRBS(0);
1654:   BSctx_set_is(bspinfo,INT_MAX);CHKERRBS(0);
1655:   BSctx_set_ct(bspinfo,IDO);CHKERRBS(0);
1656: #if defined(PETSC_USE_DEBUG)
1657:   BSctx_set_err(bspinfo,1);CHKERRBS(0);  /* BS error checking */
1658: #endif
1659:   BSctx_set_rt(bspinfo,1);CHKERRBS(0);
1660:   PetscOptionsHasName(PETSC_NULL,"-log_info",&flg1);
1661:   if (flg1) {
1662:     BSctx_set_pr(bspinfo,1);CHKERRBS(0);
1663:   }
1664:   PetscOptionsHasName(PETSC_NULL,"-pc_ilu_factorpointwise",&flg1);
1665:   PetscOptionsHasName(PETSC_NULL,"-pc_icc_factorpointwise",&flg2);
1666:   PetscOptionsHasName(PETSC_NULL,"-mat_rowbs_no_inode",&flg3);
1667:   if (flg1 || flg2 || flg3) {
1668:     BSctx_set_si(bspinfo,1);CHKERRBS(0);
1669:   } else {
1670:     BSctx_set_si(bspinfo,0);CHKERRBS(0);
1671:   }
1672: #if defined(PETSC_USE_LOG)
1673:   MLOG_INIT();  /* Initialize logging */
1674: #endif

1676:   /* Compute global offsets */
1677:   offset = &a->rstart;

1679:   PetscNew(BSmapping,&a->bsmap);
1680:   PetscLogObjectMemory(A,sizeof(BSmapping));
1681:   bsmap = a->bsmap;
1682:   PetscMalloc(sizeof(int),&bsmap->vlocal2global);
1683:   *((int *)bsmap->vlocal2global) = (*offset);
1684:   bsmap->flocal2global                 = BSloc2glob;
1685:   bsmap->free_l2g                = 0;
1686:   PetscMalloc(sizeof(int),&bsmap->vglobal2local);
1687:   *((int *)bsmap->vglobal2local) = (*offset);
1688:   bsmap->fglobal2local                 = BSglob2loc;
1689:   bsmap->free_g2l                 = 0;
1690:   bsoff                          = BSmake_off_map(*offset,bspinfo,A->M);
1691:   bsmap->vglobal2proc                 = (void *)bsoff;
1692:   bsmap->fglobal2proc                 = BSglob2proc;
1693:   bsmap->free_g2p                = (void(*)(void*)) BSfree_off_map;
1694:   PetscObjectComposeFunctionDynamic((PetscObject)A,"MatMPIRowbsSetPreallocation_C",
1695:                                     "MatMPIRowbsSetPreallocation_MPIRowbs",
1696:                                      MatMPIRowbsSetPreallocation_MPIRowbs);
1697:   return(0);
1698: }
1699: EXTERN_C_END

1703: /* @
1704:   MatMPIRowbsSetPreallocation - Sets the number of expected nonzeros 
1705:   per row in the matrix.

1707:   Input Parameter:
1708: +  mat - matrix
1709: .  nz - maximum expected for any row
1710: -  nzz - number expected in each row

1712:   Note:
1713:   This routine is valid only for matrices stored in the MATMPIROWBS
1714:   format.
1715: @ */
1716: int MatMPIRowbsSetPreallocation(Mat mat,int nz,const int nnz[])
1717: {
1718:   int ierr,(*f)(Mat,int,const int[]);

1721:   PetscObjectQueryFunction((PetscObject)mat,"MatMPIRowbsSetPreallocation_C",(void (**)(void))&f);
1722:   if (f) {
1723:     (*f)(mat,nz,nnz);
1724:   }
1725:   return(0);
1726: }

1728: /* --------------- extra BlockSolve-specific routines -------------- */
1731: /* @
1732:   MatGetBSProcinfo - Gets the BlockSolve BSprocinfo context, which the
1733:   user can then manipulate to alter the default parameters.

1735:   Input Parameter:
1736:   mat - matrix

1738:   Output Parameter:
1739:   procinfo - processor information context

1741:   Note:
1742:   This routine is valid only for matrices stored in the MATMPIROWBS
1743:   format.
1744: @ */
1745: int MatGetBSProcinfo(Mat mat,BSprocinfo *procinfo)
1746: {
1747:   Mat_MPIRowbs *a = (Mat_MPIRowbs*)mat->data;
1748:   PetscTruth   ismpirowbs;
1749:   int          ierr;

1752:   PetscTypeCompare((PetscObject)mat,MATMPIROWBS,&ismpirowbs);
1753:   if (!ismpirowbs) SETERRQ(PETSC_ERR_ARG_WRONG,"For MATMPIROWBS matrix type");
1754:   procinfo = a->procinfo;
1755:   return(0);
1756: }

1760: int MatLoad_MPIRowbs(PetscViewer viewer,MatType type,Mat *newmat)
1761: {
1762:   Mat_MPIRowbs *a;
1763:   BSspmat      *A;
1764:   BSsprow      **rs;
1765:   Mat          mat;
1766:   int          i,nz,ierr,j,rstart,rend,fd,*ourlens,*sndcounts = 0,*procsnz;
1767:   int          header[4],rank,size,*rowlengths = 0,M,m,*rowners,maxnz,*cols;
1768:   PetscScalar  *vals;
1769:   MPI_Comm     comm = ((PetscObject)viewer)->comm;
1770:   MPI_Status   status;

1773:   MPI_Comm_size(comm,&size);
1774:   MPI_Comm_rank(comm,&rank);
1775:   if (!rank) {
1776:     PetscViewerBinaryGetDescriptor(viewer,&fd);
1777:     PetscBinaryRead(fd,(char *)header,4,PETSC_INT);
1778:     if (header[0] != MAT_FILE_COOKIE) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"Not matrix object");
1779:     if (header[3] < 0) {
1780:       SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"Matrix stored in special format,cannot load as MPIRowbs");
1781:     }
1782:   }

1784:   MPI_Bcast(header+1,3,MPI_INT,0,comm);
1785:   M = header[1];
1786:   /* determine ownership of all rows */
1787:   m          = M/size + ((M % size) > rank);
1788:   PetscMalloc((size+2)*sizeof(int),&rowners);
1789:   MPI_Allgather(&m,1,MPI_INT,rowners+1,1,MPI_INT,comm);
1790:   rowners[0] = 0;
1791:   for (i=2; i<=size; i++) {
1792:     rowners[i] += rowners[i-1];
1793:   }
1794:   rstart = rowners[rank];
1795:   rend   = rowners[rank+1];

1797:   /* distribute row lengths to all processors */
1798:   PetscMalloc((rend-rstart)*sizeof(int),&ourlens);
1799:   if (!rank) {
1800:     PetscMalloc(M*sizeof(int),&rowlengths);
1801:     PetscBinaryRead(fd,rowlengths,M,PETSC_INT);
1802:     PetscMalloc(size*sizeof(int),&sndcounts);
1803:     for (i=0; i<size; i++) sndcounts[i] = rowners[i+1] - rowners[i];
1804:     MPI_Scatterv(rowlengths,sndcounts,rowners,MPI_INT,ourlens,rend-rstart,MPI_INT,0,comm);
1805:     PetscFree(sndcounts);
1806:   } else {
1807:     MPI_Scatterv(0,0,0,MPI_INT,ourlens,rend-rstart,MPI_INT,0,comm);
1808:   }

1810:   /* create our matrix */
1811:   MatCreateMPIRowbs(comm,m,M,0,ourlens,newmat);
1812:   mat = *newmat;
1813:   PetscFree(ourlens);

1815:   a = (Mat_MPIRowbs*)mat->data;
1816:   A = a->A;
1817:   rs = A->rows;

1819:   if (!rank) {
1820:     /* calculate the number of nonzeros on each processor */
1821:     PetscMalloc(size*sizeof(int),&procsnz);
1822:     PetscMemzero(procsnz,size*sizeof(int));
1823:     for (i=0; i<size; i++) {
1824:       for (j=rowners[i]; j< rowners[i+1]; j++) {
1825:         procsnz[i] += rowlengths[j];
1826:       }
1827:     }
1828:     PetscFree(rowlengths);

1830:     /* determine max buffer needed and allocate it */
1831:     maxnz = 0;
1832:     for (i=0; i<size; i++) {
1833:       maxnz = PetscMax(maxnz,procsnz[i]);
1834:     }
1835:     PetscMalloc(maxnz*sizeof(int),&cols);

1837:     /* read in my part of the matrix column indices  */
1838:     nz = procsnz[0];
1839:     PetscBinaryRead(fd,cols,nz,PETSC_INT);
1840: 
1841:     /* insert it into my part of matrix */
1842:     nz = 0;
1843:     for (i=0; i<A->num_rows; i++) {
1844:       for (j=0; j<a->imax[i]; j++) {
1845:         rs[i]->col[j] = cols[nz++];
1846:       }
1847:       rs[i]->length = a->imax[i];
1848:     }
1849:     /* read in parts for all other processors */
1850:     for (i=1; i<size; i++) {
1851:       nz   = procsnz[i];
1852:       PetscBinaryRead(fd,cols,nz,PETSC_INT);
1853:       MPI_Send(cols,nz,MPI_INT,i,mat->tag,comm);
1854:     }
1855:     PetscFree(cols);
1856:     PetscMalloc(maxnz*sizeof(PetscScalar),&vals);

1858:     /* read in my part of the matrix numerical values  */
1859:     nz   = procsnz[0];
1860:     PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
1861: 
1862:     /* insert it into my part of matrix */
1863:     nz = 0;
1864:     for (i=0; i<A->num_rows; i++) {
1865:       for (j=0; j<a->imax[i]; j++) {
1866:         rs[i]->nz[j] = vals[nz++];
1867:       }
1868:     }
1869:     /* read in parts for all other processors */
1870:     for (i=1; i<size; i++) {
1871:       nz   = procsnz[i];
1872:       PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
1873:       MPI_Send(vals,nz,MPIU_SCALAR,i,mat->tag,comm);
1874:     }
1875:     PetscFree(vals);
1876:     PetscFree(procsnz);
1877:   } else {
1878:     /* determine buffer space needed for message */
1879:     nz = 0;
1880:     for (i=0; i<A->num_rows; i++) {
1881:       nz += a->imax[i];
1882:     }
1883:     PetscMalloc(nz*sizeof(int),&cols);

1885:     /* receive message of column indices*/
1886:     MPI_Recv(cols,nz,MPI_INT,0,mat->tag,comm,&status);
1887:     MPI_Get_count(&status,MPI_INT,&maxnz);
1888:     if (maxnz != nz) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"something is wrong");

1890:     /* insert it into my part of matrix */
1891:     nz = 0;
1892:     for (i=0; i<A->num_rows; i++) {
1893:       for (j=0; j<a->imax[i]; j++) {
1894:         rs[i]->col[j] = cols[nz++];
1895:       }
1896:       rs[i]->length = a->imax[i];
1897:     }
1898:     PetscFree(cols);
1899:     PetscMalloc(nz*sizeof(PetscScalar),&vals);

1901:     /* receive message of values*/
1902:     MPI_Recv(vals,nz,MPIU_SCALAR,0,mat->tag,comm,&status);
1903:     MPI_Get_count(&status,MPIU_SCALAR,&maxnz);
1904:     if (maxnz != nz) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"something is wrong");

1906:     /* insert it into my part of matrix */
1907:     nz = 0;
1908:     for (i=0; i<A->num_rows; i++) {
1909:       for (j=0; j<a->imax[i]; j++) {
1910:         rs[i]->nz[j] = vals[nz++];
1911:       }
1912:       rs[i]->length = a->imax[i];
1913:     }
1914:     PetscFree(vals);
1915:   }
1916:   PetscFree(rowners);
1917:   a->nz = a->maxnz;
1918:   MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);
1919:   MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);
1920:   return(0);
1921: }

1923: /* 
1924:     Special destroy and view routines for factored matrices 
1925: */
1928: static int MatDestroy_MPIRowbs_Factored(Mat mat)
1929: {
1931: #if defined(PETSC_USE_LOG)
1932:   PetscLogObjectState((PetscObject)mat,"Rows=%d, Cols=%d",mat->M,mat->N);
1933: #endif
1934:   return(0);
1935: }

1939: static int MatView_MPIRowbs_Factored(Mat mat,PetscViewer viewer)
1940: {

1944:   MatView((Mat) mat->data,viewer);
1945:   return(0);
1946: }

1950: int MatIncompleteCholeskyFactorSymbolic_MPIRowbs(Mat mat,IS isrow,MatFactorInfo *info,Mat *newfact)
1951: {
1952:   /* Note:  f is not currently used in BlockSolve */
1953:   Mat          newmat;
1954:   Mat_MPIRowbs *mbs = (Mat_MPIRowbs*)mat->data;
1955:   int          ierr;
1956:   PetscTruth   idn;

1959:   if (isrow) {
1960:     ISIdentity(isrow,&idn);
1961:     if (!idn) SETERRQ(PETSC_ERR_SUP,"Only identity row permutation supported");
1962:   }

1964:   if (!mat->symmetric) {
1965:     SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"To use incomplete Cholesky \n\
1966:         preconditioning with a MATMPIROWBS matrix you must declare it to be \n\
1967:         symmetric using the option MatSetOption(A,MAT_SYMMETRIC)");
1968:   }

1970:   if (!mbs->blocksolveassembly) {
1971:     BSset_mat_icc_storage(mbs->A,PETSC_TRUE);CHKERRBS(0);
1972:     BSset_mat_symmetric(mbs->A,PETSC_TRUE);CHKERRBS(0);
1973:     MatAssemblyEnd_MPIRowbs_ForBlockSolve(mat);
1974:   }

1976:   /* Copy permuted matrix */
1977:   if (mbs->fpA) {BSfree_copy_par_mat(mbs->fpA);CHKERRBS(0);}
1978:   mbs->fpA = BScopy_par_mat(mbs->pA);CHKERRBS(0);

1980:   /* Set up the communication for factorization */
1981:   if (mbs->comm_fpA) {BSfree_comm(mbs->comm_fpA);CHKERRBS(0);}
1982:   mbs->comm_fpA = BSsetup_factor(mbs->fpA,mbs->procinfo);CHKERRBS(0);

1984:   /* 
1985:       Create a new Mat structure to hold the "factored" matrix, 
1986:     not this merely contains a pointer to the original matrix, since
1987:     the original matrix contains the factor information.
1988:   */
1989:   PetscHeaderCreate(newmat,_p_Mat,struct _MatOps,MAT_COOKIE,-1,"Mat",mat->comm,MatDestroy,MatView);
1990:   PetscLogObjectCreate(newmat);
1991:   PetscLogObjectMemory(newmat,sizeof(struct _p_Mat));

1993:   newmat->data         = (void*)mat;
1994:   PetscMemcpy(newmat->ops,&MatOps_Values,sizeof(struct _MatOps));
1995:   newmat->ops->destroy = MatDestroy_MPIRowbs_Factored;
1996:   newmat->ops->view    = MatView_MPIRowbs_Factored;
1997:   newmat->factor       = 1;
1998:   newmat->preallocated = PETSC_TRUE;
1999:   newmat->M            = mat->M;
2000:   newmat->N            = mat->N;
2001:   newmat->m            = mat->m;
2002:   newmat->n            = mat->n;
2003:   PetscStrallocpy(MATMPIROWBS,&newmat->type_name);

2005:   *newfact = newmat;
2006:   return(0);
2007: }

2011: int MatILUFactorSymbolic_MPIRowbs(Mat mat,IS isrow,IS iscol,MatFactorInfo* info,Mat *newfact)
2012: {
2013:   Mat          newmat;
2014:   Mat_MPIRowbs *mbs = (Mat_MPIRowbs*)mat->data;
2015:   int          ierr;
2016:   PetscTruth   idn;

2019:   if (info->levels != 0) SETERRQ(1,"Blocksolve ILU only supports 0 fill");
2020:   if (isrow) {
2021:     ISIdentity(isrow,&idn);
2022:     if (!idn) SETERRQ(PETSC_ERR_SUP,"Only identity row permutation supported");
2023:   }
2024:   if (iscol) {
2025:     ISIdentity(iscol,&idn);
2026:     if (!idn) SETERRQ(PETSC_ERR_SUP,"Only identity column permutation supported");
2027:   }

2029:   if (!mbs->blocksolveassembly) {
2030:     MatAssemblyEnd_MPIRowbs_ForBlockSolve(mat);
2031:   }
2032: 
2033:   if (mat->symmetric) {
2034:     SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"To use ILU preconditioner with \n\
2035:         MatCreateMPIRowbs() matrix you CANNOT declare it to be a symmetric matrix\n\
2036:         using the option MatSetOption(A,MAT_SYMMETRIC)");
2037:   }

2039:   /* Copy permuted matrix */
2040:   if (mbs->fpA) {BSfree_copy_par_mat(mbs->fpA);CHKERRBS(0);}
2041:   mbs->fpA = BScopy_par_mat(mbs->pA);CHKERRBS(0);

2043:   /* Set up the communication for factorization */
2044:   if (mbs->comm_fpA) {BSfree_comm(mbs->comm_fpA);CHKERRBS(0);}
2045:   mbs->comm_fpA = BSsetup_factor(mbs->fpA,mbs->procinfo);CHKERRBS(0);

2047:   /* 
2048:       Create a new Mat structure to hold the "factored" matrix,
2049:     not this merely contains a pointer to the original matrix, since
2050:     the original matrix contains the factor information.
2051:   */
2052:   PetscHeaderCreate(newmat,_p_Mat,struct _MatOps,MAT_COOKIE,-1,"Mat",mat->comm,MatDestroy,MatView);
2053:   PetscLogObjectCreate(newmat);
2054:   PetscLogObjectMemory(newmat,sizeof(struct _p_Mat));

2056:   newmat->data         = (void*)mat;
2057:   PetscMemcpy(newmat->ops,&MatOps_Values,sizeof(struct _MatOps));
2058:   newmat->ops->destroy = MatDestroy_MPIRowbs_Factored;
2059:   newmat->ops->view    = MatView_MPIRowbs_Factored;
2060:   newmat->factor       = 1;
2061:   newmat->preallocated = PETSC_TRUE;
2062:   newmat->M            = mat->M;
2063:   newmat->N            = mat->N;
2064:   newmat->m            = mat->m;
2065:   newmat->n            = mat->n;
2066:   PetscStrallocpy(MATMPIROWBS,&newmat->type_name);

2068:   *newfact = newmat;
2069:   return(0);
2070: }

2074: int MatMPIRowbsGetColor(Mat mat,ISColoring *coloring)
2075: {
2076:   int          ierr;

2080:   if (!mat->assembled) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2081:   ISColoringCreate(mat->comm,mat->m,0,coloring);

2083:   return(0);
2084: }

2088: /*@C
2089:    MatCreateMPIRowbs - Creates a sparse parallel matrix in the MATMPIROWBS
2090:    format.  This format is intended primarily as an interface for BlockSolve95.

2092:    Collective on MPI_Comm

2094:    Input Parameters:
2095: +  comm - MPI communicator
2096: .  m - number of local rows (or PETSC_DECIDE to have calculated)
2097: .  M - number of global rows (or PETSC_DECIDE to have calculated)
2098: .  nz - number of nonzeros per row (same for all local rows)
2099: -  nnz - number of nonzeros per row (possibly different for each row).

2101:    Output Parameter:
2102: .  newA - the matrix 

2104:    Notes:
2105:    If PETSC_DECIDE or  PETSC_DETERMINE is used for a particular argument on one processor
2106:    than it must be used on all processors that share the object for that argument.

2108:    The user MUST specify either the local or global matrix dimensions
2109:    (possibly both).

2111:    Specify the preallocated storage with either nz or nnz (not both).  Set 
2112:    nz=PETSC_DEFAULT and nnz=PETSC_NULL for PETSc to control dynamic memory 
2113:    allocation.

2115:    Notes:
2116:    By default, the matrix is assumed to be nonsymmetric; the user can
2117:    take advantage of special optimizations for symmetric matrices by calling
2118: $     MatSetOption(mat,MAT_SYMMETRIC)
2119:    BEFORE calling the routine MatAssemblyBegin().

2121:    Internally, the MATMPIROWBS format inserts zero elements to the
2122:    matrix if necessary, so that nonsymmetric matrices are considered
2123:    to be symmetric in terms of their sparsity structure; this format
2124:    is required for use of the parallel communication routines within
2125:    BlockSolve95. In particular, if the matrix element A[i,j] exists,
2126:    then PETSc will internally allocate a 0 value for the element
2127:    A[j,i] during MatAssemblyEnd() if the user has not already set
2128:    a value for the matrix element A[j,i].

2130:    Options Database Keys:
2131: .  -mat_rowbs_no_inode - Do not use inodes.

2133:    Level: intermediate
2134:   
2135: .keywords: matrix, row, symmetric, sparse, parallel, BlockSolve

2137: .seealso: MatCreate(), MatSetValues()
2138: @*/
2139: int MatCreateMPIRowbs(MPI_Comm comm,int m,int M,int nz,const int nnz[],Mat *newA)
2140: {
2142: 
2144:   MatCreate(comm,m,m,M,M,newA);
2145:   MatSetType(*newA,MATMPIROWBS);
2146:   MatMPIRowbsSetPreallocation(*newA,nz,nnz);
2147:   return(0);
2148: }


2151: /* -------------------------------------------------------------------------*/

2153:  #include src/mat/impls/aij/seq/aij.h
2154:  #include src/mat/impls/aij/mpi/mpiaij.h

2158: int MatGetSubMatrices_MPIRowbs(Mat C,int ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submat[])
2159: {
2160:   int         nmax,nstages_local,nstages,i,pos,max_no,ierr;


2164:   /* Allocate memory to hold all the submatrices */
2165:   if (scall != MAT_REUSE_MATRIX) {
2166:     PetscMalloc((ismax+1)*sizeof(Mat),submat);
2167:   }
2168: 
2169:   /* Determine the number of stages through which submatrices are done */
2170:   nmax          = 20*1000000 / (C->N * sizeof(int));
2171:   if (!nmax) nmax = 1;
2172:   nstages_local = ismax/nmax + ((ismax % nmax)?1:0);

2174:   /* Make sure every processor loops through the nstages */
2175:   MPI_Allreduce(&nstages_local,&nstages,1,MPI_INT,MPI_MAX,C->comm);

2177:   for (i=0,pos=0; i<nstages; i++) {
2178:     if (pos+nmax <= ismax) max_no = nmax;
2179:     else if (pos == ismax) max_no = 0;
2180:     else                   max_no = ismax-pos;
2181:     MatGetSubMatrices_MPIRowbs_Local(C,max_no,isrow+pos,iscol+pos,scall,*submat+pos);
2182:     pos += max_no;
2183:   }
2184:   return(0);
2185: }
2186: /* -------------------------------------------------------------------------*/
2187: /* for now MatGetSubMatrices_MPIRowbs_Local get MPIAij submatrices of input
2188:    matrix and preservs zeroes from structural symetry
2189:  */
2192: int MatGetSubMatrices_MPIRowbs_Local(Mat C,int ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submats)
2193: {
2194:   Mat_MPIRowbs  *c = (Mat_MPIRowbs *)(C->data);
2195:   BSspmat       *A = c->A;
2196:   Mat_SeqAIJ    *mat;
2197:   int         **irow,**icol,*nrow,*ncol,*w1,*w2,*w3,*w4,*rtable,start,end,size;
2198:   int         **sbuf1,**sbuf2,rank,m,i,j,k,l,ct1,ct2,ierr,**rbuf1,row,proc;
2199:   int         nrqs,msz,**ptr,idx,*req_size,*ctr,*pa,*tmp,tcol,nrqr;
2200:   int         **rbuf3,*req_source,**sbuf_aj,**rbuf2,max1,max2,**rmap;
2201:   int         **cmap,**lens,is_no,ncols,*cols,mat_i,*mat_j,tmp2,jmax,*irow_i;
2202:   int         len,ctr_j,*sbuf1_j,*sbuf_aj_i,*rbuf1_i,kmax,*cmap_i,*lens_i;
2203:   int         *rmap_i,tag0,tag1,tag2,tag3;
2204:   MPI_Request *s_waits1,*r_waits1,*s_waits2,*r_waits2,*r_waits3;
2205:   MPI_Request *r_waits4,*s_waits3,*s_waits4;
2206:   MPI_Status  *r_status1,*r_status2,*s_status1,*s_status3,*s_status2;
2207:   MPI_Status  *r_status3,*r_status4,*s_status4;
2208:   MPI_Comm    comm;
2209:   FLOAT       **rbuf4,**sbuf_aa,*vals,*sbuf_aa_i;
2210:   PetscScalar *mat_a;
2211:   PetscTruth  sorted;
2212:   int         *onodes1,*olengths1;

2215:   comm   = C->comm;
2216:   tag0   = C->tag;
2217:   size   = c->size;
2218:   rank   = c->rank;
2219:   m      = C->M;
2220: 
2221:   /* Get some new tags to keep the communication clean */
2222:   PetscObjectGetNewTag((PetscObject)C,&tag1);
2223:   PetscObjectGetNewTag((PetscObject)C,&tag2);
2224:   PetscObjectGetNewTag((PetscObject)C,&tag3);

2226:     /* Check if the col indices are sorted */
2227:   for (i=0; i<ismax; i++) {
2228:     ISSorted(isrow[i],&sorted);
2229:     if (!sorted) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"ISrow is not sorted");
2230:     ISSorted(iscol[i],&sorted);
2231:     /*    if (!sorted) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"IScol is not sorted"); */
2232:   }

2234:   len    = (2*ismax+1)*(sizeof(int*)+ sizeof(int)) + (m+1)*sizeof(int);
2235:   PetscMalloc(len,&irow);
2236:   icol   = irow + ismax;
2237:   nrow   = (int*)(icol + ismax);
2238:   ncol   = nrow + ismax;
2239:   rtable = ncol + ismax;

2241:   for (i=0; i<ismax; i++) {
2242:     ISGetIndices(isrow[i],&irow[i]);
2243:     ISGetIndices(iscol[i],&icol[i]);
2244:     ISGetLocalSize(isrow[i],&nrow[i]);
2245:     ISGetLocalSize(iscol[i],&ncol[i]);
2246:   }

2248:   /* Create hash table for the mapping :row -> proc*/
2249:   for (i=0,j=0; i<size; i++) {
2250:     jmax = c->rowners[i+1];
2251:     for (; j<jmax; j++) {
2252:       rtable[j] = i;
2253:     }
2254:   }

2256:   /* evaluate communication - mesg to who, length of mesg, and buffer space
2257:      required. Based on this, buffers are allocated, and data copied into them*/
2258:   PetscMalloc(size*4*sizeof(int),&w1); /* mesg size */
2259:   w2     = w1 + size;      /* if w2[i] marked, then a message to proc i*/
2260:   w3     = w2 + size;      /* no of IS that needs to be sent to proc i */
2261:   w4     = w3 + size;      /* temp work space used in determining w1, w2, w3 */
2262:   PetscMemzero(w1,size*3*sizeof(int)); /* initialize work vector*/
2263:   for (i=0; i<ismax; i++) {
2264:     PetscMemzero(w4,size*sizeof(int)); /* initialize work vector*/
2265:     jmax   = nrow[i];
2266:     irow_i = irow[i];
2267:     for (j=0; j<jmax; j++) {
2268:       row  = irow_i[j];
2269:       proc = rtable[row];
2270:       w4[proc]++;
2271:     }
2272:     for (j=0; j<size; j++) {
2273:       if (w4[j]) { w1[j] += w4[j];  w3[j]++;}
2274:     }
2275:   }
2276: 
2277:   nrqs     = 0;              /* no of outgoing messages */
2278:   msz      = 0;              /* total mesg length (for all procs) */
2279:   w1[rank] = 0;              /* no mesg sent to self */
2280:   w3[rank] = 0;
2281:   for (i=0; i<size; i++) {
2282:     if (w1[i])  { w2[i] = 1; nrqs++;} /* there exists a message to proc i */
2283:   }
2284:   PetscMalloc((nrqs+1)*sizeof(int),&pa); /*(proc -array)*/
2285:   for (i=0,j=0; i<size; i++) {
2286:     if (w1[i]) { pa[j] = i; j++; }
2287:   }

2289:   /* Each message would have a header = 1 + 2*(no of IS) + data */
2290:   for (i=0; i<nrqs; i++) {
2291:     j     = pa[i];
2292:     w1[j] += w2[j] + 2* w3[j];
2293:     msz   += w1[j];
2294:   }

2296:   /* Determine the number of messages to expect, their lengths, from from-ids */
2297:   PetscGatherNumberOfMessages(comm,w2,w1,&nrqr);
2298:   PetscGatherMessageLengths(comm,nrqs,nrqr,w1,&onodes1,&olengths1);

2300:   /* Now post the Irecvs corresponding to these messages */
2301:   PetscPostIrecvInt(comm,tag0,nrqr,onodes1,olengths1,&rbuf1,&r_waits1);
2302: 
2303:   PetscFree(onodes1);
2304:   PetscFree(olengths1);
2305: 
2306:   /* Allocate Memory for outgoing messages */
2307:   len      = 2*size*sizeof(int*) + 2*msz*sizeof(int) + size*sizeof(int);
2308:   PetscMalloc(len,&sbuf1);
2309:   ptr      = sbuf1 + size;   /* Pointers to the data in outgoing buffers */
2310:   PetscMemzero(sbuf1,2*size*sizeof(int*));
2311:   /* allocate memory for outgoing data + buf to receive the first reply */
2312:   tmp      = (int*)(ptr + size);
2313:   ctr      = tmp + 2*msz;

2315:   {
2316:     int *iptr = tmp,ict = 0;
2317:     for (i=0; i<nrqs; i++) {
2318:       j         = pa[i];
2319:       iptr     += ict;
2320:       sbuf1[j]  = iptr;
2321:       ict       = w1[j];
2322:     }
2323:   }

2325:   /* Form the outgoing messages */
2326:   /* Initialize the header space */
2327:   for (i=0; i<nrqs; i++) {
2328:     j           = pa[i];
2329:     sbuf1[j][0] = 0;
2330:     PetscMemzero(sbuf1[j]+1,2*w3[j]*sizeof(int));
2331:     ptr[j]      = sbuf1[j] + 2*w3[j] + 1;
2332:   }
2333: 
2334:   /* Parse the isrow and copy data into outbuf */
2335:   for (i=0; i<ismax; i++) {
2336:     PetscMemzero(ctr,size*sizeof(int));
2337:     irow_i = irow[i];
2338:     jmax   = nrow[i];
2339:     for (j=0; j<jmax; j++) {  /* parse the indices of each IS */
2340:       row  = irow_i[j];
2341:       proc = rtable[row];
2342:       if (proc != rank) { /* copy to the outgoing buf*/
2343:         ctr[proc]++;
2344:         *ptr[proc] = row;
2345:         ptr[proc]++;
2346:       }
2347:     }
2348:     /* Update the headers for the current IS */
2349:     for (j=0; j<size; j++) { /* Can Optimise this loop too */
2350:       if ((ctr_j = ctr[j])) {
2351:         sbuf1_j        = sbuf1[j];
2352:         k              = ++sbuf1_j[0];
2353:         sbuf1_j[2*k]   = ctr_j;
2354:         sbuf1_j[2*k-1] = i;
2355:       }
2356:     }
2357:   }

2359:   /*  Now  post the sends */
2360:   PetscMalloc((nrqs+1)*sizeof(MPI_Request),&s_waits1);
2361:   for (i=0; i<nrqs; ++i) {
2362:     j    = pa[i];
2363:     MPI_Isend(sbuf1[j],w1[j],MPI_INT,j,tag0,comm,s_waits1+i);
2364:   }

2366:   /* Post Receives to capture the buffer size */
2367:   PetscMalloc((nrqs+1)*sizeof(MPI_Request),&r_waits2);
2368:   PetscMalloc((nrqs+1)*sizeof(int *),&rbuf2);
2369:   rbuf2[0] = tmp + msz;
2370:   for (i=1; i<nrqs; ++i) {
2371:     rbuf2[i] = rbuf2[i-1]+w1[pa[i-1]];
2372:   }
2373:   for (i=0; i<nrqs; ++i) {
2374:     j    = pa[i];
2375:     MPI_Irecv(rbuf2[i],w1[j],MPI_INT,j,tag1,comm,r_waits2+i);
2376:   }

2378:   /* Send to other procs the buf size they should allocate */
2379: 

2381:   /* Receive messages*/
2382:   PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits2);
2383:   PetscMalloc((nrqr+1)*sizeof(MPI_Status),&r_status1);
2384:   len         = 2*nrqr*sizeof(int) + (nrqr+1)*sizeof(int*);
2385:   PetscMalloc(len,&sbuf2);
2386:   req_size    = (int*)(sbuf2 + nrqr);
2387:   req_source  = req_size + nrqr;
2388: 
2389:   {
2390:     BSsprow    **sAi = A->rows;
2391:     int        id,rstart = c->rstart;
2392:     int        *sbuf2_i;

2394:     for (i=0; i<nrqr; ++i) {
2395:       MPI_Waitany(nrqr,r_waits1,&idx,r_status1+i);
2396:       req_size[idx]   = 0;
2397:       rbuf1_i         = rbuf1[idx];
2398:       start           = 2*rbuf1_i[0] + 1;
2399:       MPI_Get_count(r_status1+i,MPI_INT,&end);
2400:       PetscMalloc((end+1)*sizeof(int),&sbuf2[idx]);
2401:       sbuf2_i         = sbuf2[idx];
2402:       for (j=start; j<end; j++) {
2403:         id               = rbuf1_i[j] - rstart;
2404:         ncols            = (sAi[id])->length;
2405:         sbuf2_i[j]       = ncols;
2406:         req_size[idx]   += ncols;
2407:       }
2408:       req_source[idx] = r_status1[i].MPI_SOURCE;
2409:       /* form the header */
2410:       sbuf2_i[0]   = req_size[idx];
2411:       for (j=1; j<start; j++) { sbuf2_i[j] = rbuf1_i[j]; }
2412:       MPI_Isend(sbuf2_i,end,MPI_INT,req_source[idx],tag1,comm,s_waits2+i);
2413:     }
2414:   }
2415:   PetscFree(r_status1);
2416:   PetscFree(r_waits1);

2418:   /*  recv buffer sizes */
2419:   /* Receive messages*/
2420: 
2421:   PetscMalloc((nrqs+1)*sizeof(int*),&rbuf3);
2422:   PetscMalloc((nrqs+1)*sizeof(FLOAT *),&rbuf4);
2423:   PetscMalloc((nrqs+1)*sizeof(MPI_Request),&r_waits3);
2424:   PetscMalloc((nrqs+1)*sizeof(MPI_Request),&r_waits4);
2425:   PetscMalloc((nrqs+1)*sizeof(MPI_Status),&r_status2);

2427:   for (i=0; i<nrqs; ++i) {
2428:     MPI_Waitany(nrqs,r_waits2,&idx,r_status2+i);
2429:     PetscMalloc((rbuf2[idx][0]+1)*sizeof(int),&rbuf3[idx]);
2430:     PetscMalloc((rbuf2[idx][0]+1)*sizeof(FLOAT),&rbuf4[idx]);
2431:     MPI_Irecv(rbuf3[idx],rbuf2[idx][0],MPI_INT,r_status2[i].MPI_SOURCE,tag2,comm,r_waits3+idx);
2432: #if FLOAT == double      
2433:     MPI_Irecv(rbuf4[idx],rbuf2[idx][0],MPI_DOUBLE,r_status2[i].MPI_SOURCE,tag3,comm,r_waits4+idx);
2434: #elif FLOAT == float      
2435:     MPI_Irecv(rbuf4[idx],rbuf2[idx][0],MPI_FLOAT,r_status2[i].MPI_SOURCE,tag3,comm,r_waits4+idx);
2436: #endif
2437:   }
2438:   PetscFree(r_status2);
2439:   PetscFree(r_waits2);
2440: 
2441:   /* Wait on sends1 and sends2 */
2442:   PetscMalloc((nrqs+1)*sizeof(MPI_Status),&s_status1);
2443:   PetscMalloc((nrqr+1)*sizeof(MPI_Status),&s_status2);

2445:   MPI_Waitall(nrqs,s_waits1,s_status1);
2446:   MPI_Waitall(nrqr,s_waits2,s_status2);
2447:   PetscFree(s_status1);
2448:   PetscFree(s_status2);
2449:   PetscFree(s_waits1);
2450:   PetscFree(s_waits2);

2452:   /* Now allocate buffers for a->j, and send them off */
2453:   PetscMalloc((nrqr+1)*sizeof(int*),&sbuf_aj);
2454:   for (i=0,j=0; i<nrqr; i++) j += req_size[i];
2455:   PetscMalloc((j+1)*sizeof(int),&sbuf_aj[0]);
2456:   for (i=1; i<nrqr; i++)  sbuf_aj[i] = sbuf_aj[i-1] + req_size[i-1];
2457: 
2458:   PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits3);
2459:   {
2460:     BSsprow *brow;
2461:     int *Acol;
2462:     int rstart = c->rstart;

2464:     for (i=0; i<nrqr; i++) {
2465:       rbuf1_i   = rbuf1[i];
2466:       sbuf_aj_i = sbuf_aj[i];
2467:       ct1       = 2*rbuf1_i[0] + 1;
2468:       ct2       = 0;
2469:       for (j=1,max1=rbuf1_i[0]; j<=max1; j++) {
2470:         kmax = rbuf1[i][2*j];
2471:         for (k=0; k<kmax; k++,ct1++) {
2472:           brow   = A->rows[rbuf1_i[ct1] - rstart];
2473:           ncols  = brow->length;
2474:           Acol   = brow->col;
2475:           /* load the column indices for this row into cols*/
2476:           cols  = sbuf_aj_i + ct2;
2477:           PetscMemcpy(cols,Acol,ncols*sizeof(int));
2478:           /*for (l=0; l<ncols;l++) cols[l]=Acol[l]; */ /* How is it with
2479:                                                           mappings?? */
2480:           ct2 += ncols;
2481:         }
2482:       }
2483:       MPI_Isend(sbuf_aj_i,req_size[i],MPI_INT,req_source[i],tag2,comm,s_waits3+i);
2484:     }
2485:   }
2486:   PetscMalloc((nrqs+1)*sizeof(MPI_Status),&r_status3);
2487:   PetscMalloc((nrqr+1)*sizeof(MPI_Status),&s_status3);

2489:   /* Allocate buffers for a->a, and send them off */
2490:   PetscMalloc((nrqr+1)*sizeof(FLOAT*),&sbuf_aa);
2491:   for (i=0,j=0; i<nrqr; i++) j += req_size[i];
2492:   PetscMalloc((j+1)*sizeof(FLOAT),&sbuf_aa[0]);
2493:   for (i=1; i<nrqr; i++)  sbuf_aa[i] = sbuf_aa[i-1] + req_size[i-1];
2494: 
2495:   PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits4);
2496:   {
2497:     BSsprow *brow;
2498:     FLOAT *Aval;
2499:     int rstart = c->rstart;
2500: 
2501:     for (i=0; i<nrqr; i++) {
2502:       rbuf1_i   = rbuf1[i];
2503:       sbuf_aa_i = sbuf_aa[i];
2504:       ct1       = 2*rbuf1_i[0]+1;
2505:       ct2       = 0;
2506:       for (j=1,max1=rbuf1_i[0]; j<=max1; j++) {
2507:         kmax = rbuf1_i[2*j];
2508:         for (k=0; k<kmax; k++,ct1++) {
2509:           brow  = A->rows[rbuf1_i[ct1] - rstart];
2510:           ncols = brow->length;
2511:           Aval  = brow->nz;
2512:           /* load the column values for this row into vals*/
2513:           vals  = sbuf_aa_i+ct2;
2514:           PetscMemcpy(vals,Aval,ncols*sizeof(FLOAT));
2515:           ct2 += ncols;
2516:         }
2517:       }
2518: #if FLOAT == double      
2519:       MPI_Isend(sbuf_aa_i,req_size[i],MPI_DOUBLE,req_source[i],tag3,comm,s_waits4+i);
2520: #elif FLOAT == float      
2521:       MPI_Isend(sbuf_aa_i,req_size[i],MPI_FLOAT,req_source[i],tag3,comm,s_waits4+i);
2522: #endif
2523:     }
2524:   }
2525:   PetscMalloc((nrqs+1)*sizeof(MPI_Status),&r_status4);
2526:   PetscMalloc((nrqr+1)*sizeof(MPI_Status),&s_status4);
2527:   PetscFree(rbuf1);

2529:   /* Form the matrix */
2530:   /* create col map */
2531:   {
2532:     int *icol_i;
2533: 
2534:     len     = (1+ismax)*sizeof(int*)+ ismax*C->N*sizeof(int);
2535:     PetscMalloc(len,&cmap);
2536:     cmap[0] = (int *)(cmap + ismax);
2537:     PetscMemzero(cmap[0],(1+ismax*C->N)*sizeof(int));
2538:     for (i=1; i<ismax; i++) { cmap[i] = cmap[i-1] + C->N; }
2539:     for (i=0; i<ismax; i++) {
2540:       jmax   = ncol[i];
2541:       icol_i = icol[i];
2542:       cmap_i = cmap[i];
2543:       for (j=0; j<jmax; j++) {
2544:         cmap_i[icol_i[j]] = j+1;
2545:       }
2546:     }
2547:   }

2549:   /* Create lens which is required for MatCreate... */
2550:   for (i=0,j=0; i<ismax; i++) { j += nrow[i]; }
2551:   len     = (1+ismax)*sizeof(int*)+ j*sizeof(int);
2552:   PetscMalloc(len,&lens);
2553:   lens[0] = (int *)(lens + ismax);
2554:   PetscMemzero(lens[0],j*sizeof(int));
2555:   for (i=1; i<ismax; i++) { lens[i] = lens[i-1] + nrow[i-1]; }
2556: 
2557:   /* Update lens from local data */
2558:   { BSsprow *Arow;
2559:     for (i=0; i<ismax; i++) {
2560:       jmax   = nrow[i];
2561:       cmap_i = cmap[i];
2562:       irow_i = irow[i];
2563:       lens_i = lens[i];
2564:       for (j=0; j<jmax; j++) {
2565:         row  = irow_i[j];
2566:         proc = rtable[row];
2567:         if (proc == rank) {
2568:           Arow=A->rows[row-c->rstart];
2569:           ncols=Arow->length;
2570:           cols=Arow->col;
2571:           for (k=0; k<ncols; k++) {
2572:             if (cmap_i[cols[k]]) { lens_i[j]++;}
2573:           }
2574:         }
2575:       }
2576:     }
2577:   }
2578: 
2579:   /* Create row map*/
2580:   len     = (1+ismax)*sizeof(int*)+ ismax*C->M*sizeof(int);
2581:   PetscMalloc(len,&rmap);
2582:   rmap[0] = (int *)(rmap + ismax);
2583:   PetscMemzero(rmap[0],ismax*C->M*sizeof(int));
2584:   for (i=1; i<ismax; i++) { rmap[i] = rmap[i-1] + C->M;}
2585:   for (i=0; i<ismax; i++) {
2586:     rmap_i = rmap[i];
2587:     irow_i = irow[i];
2588:     jmax   = nrow[i];
2589:     for (j=0; j<jmax; j++) {
2590:       rmap_i[irow_i[j]] = j;
2591:     }
2592:   }
2593: 
2594:   /* Update lens from offproc data */
2595:   {
2596:     int *rbuf2_i,*rbuf3_i,*sbuf1_i;

2598:     for (tmp2=0; tmp2<nrqs; tmp2++) {
2599:       MPI_Waitany(nrqs,r_waits3,&i,r_status3+tmp2);
2600:       idx     = pa[i];
2601:       sbuf1_i = sbuf1[idx];
2602:       jmax    = sbuf1_i[0];
2603:       ct1     = 2*jmax+1;
2604:       ct2     = 0;
2605:       rbuf2_i = rbuf2[i];
2606:       rbuf3_i = rbuf3[i];
2607:       for (j=1; j<=jmax; j++) {
2608:         is_no   = sbuf1_i[2*j-1];
2609:         max1    = sbuf1_i[2*j];
2610:         lens_i  = lens[is_no];
2611:         cmap_i  = cmap[is_no];
2612:         rmap_i  = rmap[is_no];
2613:         for (k=0; k<max1; k++,ct1++) {
2614:           row  = rmap_i[sbuf1_i[ct1]]; /* the val in the new matrix to be */
2615:           max2 = rbuf2_i[ct1];
2616:           for (l=0; l<max2; l++,ct2++) {
2617:             if (cmap_i[rbuf3_i[ct2]]) {
2618:               lens_i[row]++;
2619:             }
2620:           }
2621:         }
2622:       }
2623:     }
2624:   }
2625:   PetscFree(r_status3);
2626:   PetscFree(r_waits3);
2627:   MPI_Waitall(nrqr,s_waits3,s_status3);
2628:   PetscFree(s_status3);
2629:   PetscFree(s_waits3);

2631:   /* Create the submatrices */
2632:   if (scall == MAT_REUSE_MATRIX) {
2633:     PetscTruth same;
2634: 
2635:     /*
2636:         Assumes new rows are same length as the old rows,hence bug!
2637:     */
2638:     for (i=0; i<ismax; i++) {
2639:       PetscTypeCompare((PetscObject)(submats[i]),MATSEQAIJ,&same);
2640:       if (same == PETSC_FALSE) {
2641:         SETERRQ(PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. wrong type");
2642:       }
2643:       mat = (Mat_SeqAIJ*)(submats[i]->data);
2644:       if ((submats[i]->m != nrow[i]) || (submats[i]->n != ncol[i])) {
2645:         SETERRQ(PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. wrong size");
2646:       }
2647:       PetscMemcmp(mat->ilen,lens[i],submats[i]->m*sizeof(int),&same);
2648:       if (same == PETSC_FALSE) {
2649:         SETERRQ(PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. wrong no of nonzeros");
2650:       }
2651:       /* Initial matrix as if empty */
2652:       PetscMemzero(mat->ilen,submats[i]->m*sizeof(int));
2653:       submats[i]->factor = C->factor;
2654:     }
2655:   } else {
2656:     for (i=0; i<ismax; i++) {
2657:       MatCreateSeqAIJ(PETSC_COMM_SELF,nrow[i],ncol[i],0,lens[i],submats+i);
2658:     }
2659:   }

2661:   /* Assemble the matrices */
2662:   /* First assemble the local rows */
2663:   {
2664:     int    ilen_row,*imat_ilen,*imat_j,*imat_i,old_row;
2665:     PetscScalar *imat_a;
2666:     BSsprow *Arow;
2667: 
2668:     for (i=0; i<ismax; i++) {
2669:       mat       = (Mat_SeqAIJ*)submats[i]->data;
2670:       imat_ilen = mat->ilen;
2671:       imat_j    = mat->j;
2672:       imat_i    = mat->i;
2673:       imat_a    = mat->a;
2674:       cmap_i    = cmap[i];
2675:       rmap_i    = rmap[i];
2676:       irow_i    = irow[i];
2677:       jmax      = nrow[i];
2678:       for (j=0; j<jmax; j++) {
2679:         row      = irow_i[j];
2680:         proc     = rtable[row];
2681:         if (proc == rank) {
2682:           old_row  = row;
2683:           row      = rmap_i[row];
2684:           ilen_row = imat_ilen[row];
2685: 
2686:           Arow=A->rows[old_row-c->rstart];
2687:           ncols=Arow->length;
2688:           cols=Arow->col;
2689:           vals=Arow->nz;
2690: 
2691:           mat_i    = imat_i[row];
2692:           mat_a    = imat_a + mat_i;
2693:           mat_j    = imat_j + mat_i;
2694:           for (k=0; k<ncols; k++) {
2695:             if ((tcol = cmap_i[cols[k]])) {
2696:               *mat_j++ = tcol - 1;
2697:               *mat_a++ = (PetscScalar)vals[k];
2698:               ilen_row++;
2699:             }
2700:           }
2701:           imat_ilen[row] = ilen_row;
2702:         }
2703:       }
2704:     }
2705:   }

2707:   /*   Now assemble the off proc rows*/
2708:   {
2709:     int    *sbuf1_i,*rbuf2_i,*rbuf3_i,*imat_ilen,ilen;
2710:     int    *imat_j,*imat_i;
2711:     PetscScalar *imat_a;
2712:     FLOAT *rbuf4_i;
2713: 
2714:     for (tmp2=0; tmp2<nrqs; tmp2++) {
2715:       MPI_Waitany(nrqs,r_waits4,&i,r_status4+tmp2);
2716:       idx     = pa[i];
2717:       sbuf1_i = sbuf1[idx];
2718:       jmax    = sbuf1_i[0];
2719:       ct1     = 2*jmax + 1;
2720:       ct2     = 0;
2721:       rbuf2_i = rbuf2[i];
2722:       rbuf3_i = rbuf3[i];
2723:       rbuf4_i = rbuf4[i];
2724:       for (j=1; j<=jmax; j++) {
2725:         is_no     = sbuf1_i[2*j-1];
2726:         rmap_i    = rmap[is_no];
2727:         cmap_i    = cmap[is_no];
2728:         mat       = (Mat_SeqAIJ*)submats[is_no]->data;
2729:         imat_ilen = mat->ilen;
2730:         imat_j    = mat->j;
2731:         imat_i    = mat->i;
2732:         imat_a    = mat->a;
2733:         max1      = sbuf1_i[2*j];
2734:         for (k=0; k<max1; k++,ct1++) {
2735:           row   = sbuf1_i[ct1];
2736:           row   = rmap_i[row];
2737:           ilen  = imat_ilen[row];
2738:           mat_i = imat_i[row];
2739:           mat_a = imat_a + mat_i;
2740:           mat_j = imat_j + mat_i;
2741:           max2 = rbuf2_i[ct1];
2742:           for (l=0; l<max2; l++,ct2++) {
2743:             if ((tcol = cmap_i[rbuf3_i[ct2]])) {
2744:               *mat_j++ = tcol - 1;
2745:               *mat_a++ = (PetscScalar)rbuf4_i[ct2];
2746:               ilen++;
2747:             }
2748:           }
2749:           imat_ilen[row] = ilen;
2750:         }
2751:       }
2752:     }
2753:   }
2754:   PetscFree(r_status4);
2755:   PetscFree(r_waits4);
2756:   MPI_Waitall(nrqr,s_waits4,s_status4);
2757:   PetscFree(s_waits4);
2758:   PetscFree(s_status4);

2760:   /* Restore the indices */
2761:   for (i=0; i<ismax; i++) {
2762:     ISRestoreIndices(isrow[i],irow+i);
2763:     ISRestoreIndices(iscol[i],icol+i);
2764:   }

2766:   /* Destroy allocated memory */
2767:   PetscFree(irow);
2768:   PetscFree(w1);
2769:   PetscFree(pa);

2771:   PetscFree(sbuf1);
2772:   PetscFree(rbuf2);
2773:   for (i=0; i<nrqr; ++i) {
2774:     PetscFree(sbuf2[i]);
2775:   }
2776:   for (i=0; i<nrqs; ++i) {
2777:     PetscFree(rbuf3[i]);
2778:     PetscFree(rbuf4[i]);
2779:   }

2781:   PetscFree(sbuf2);
2782:   PetscFree(rbuf3);
2783:   PetscFree(rbuf4);
2784:   PetscFree(sbuf_aj[0]);
2785:   PetscFree(sbuf_aj);
2786:   PetscFree(sbuf_aa[0]);
2787:   PetscFree(sbuf_aa);
2788: 
2789:   PetscFree(cmap);
2790:   PetscFree(rmap);
2791:   PetscFree(lens);

2793:   for (i=0; i<ismax; i++) {
2794:     MatAssemblyBegin(submats[i],MAT_FINAL_ASSEMBLY);
2795:     MatAssemblyEnd(submats[i],MAT_FINAL_ASSEMBLY);
2796:   }
2797:   return(0);
2798: }

2800: /*
2801:   can be optimized by send only non-zeroes in iscol IS  -
2802:   so prebuild submatrix on sending side including A,B partitioning
2803:   */
2806:  #include src/vec/is/impls/general/general.h
2807: int MatGetSubMatrix_MPIRowbs(Mat C,IS isrow,IS iscol,int csize,MatReuse scall,Mat *submat)
2808: {
2809:   Mat_MPIRowbs  *c = (Mat_MPIRowbs*)C->data;
2810:   BSspmat       *A = c->A;
2811:   BSsprow *Arow;
2812:   Mat_SeqAIJ    *matA,*matB; /* on prac , off proc part of submat */
2813:   Mat_MPIAIJ    *mat;  /* submat->data */
2814:   int    *irow,*icol,nrow,ncol,*rtable,size,rank,tag0,tag1,tag2,tag3;
2815:   int    *w1,*w2,*pa,nrqs,nrqr,msz,row_t;
2816:   int    i,j,k,l,ierr,len,jmax,proc,idx;
2817:   int    **sbuf1,**sbuf2,**rbuf1,**rbuf2,*req_size,**sbuf3,**rbuf3;
2818:   FLOAT  **rbuf4,**sbuf4; /* FLOAT is from Block Solve 95 library */

2820:   int    *cmap,*rmap,nlocal,*o_nz,*d_nz,cstart,cend;
2821:   int    *req_source;
2822:   int    ncols_t;
2823: 
2824: 
2825:   MPI_Request *s_waits1,*r_waits1,*s_waits2,*r_waits2,*r_waits3;
2826:   MPI_Request *r_waits4,*s_waits3,*s_waits4;
2827: 
2828:   MPI_Status  *r_status1,*r_status2,*s_status1,*s_status3,*s_status2;
2829:   MPI_Status  *r_status3,*r_status4,*s_status4;
2830:   MPI_Comm    comm;


2834:   comm   = C->comm;
2835:   tag0   = C->tag;
2836:   size   = c->size;
2837:   rank   = c->rank;

2839:   if (size==1) {
2840:     if (scall == MAT_REUSE_MATRIX) {
2841:       ierr=MatGetSubMatrices(C,1,&isrow,&iscol,MAT_REUSE_MATRIX,&submat);
2842:       return(0);
2843:     } else {
2844:       Mat *newsubmat;
2845: 
2846:       ierr=MatGetSubMatrices(C,1,&isrow,&iscol,MAT_INITIAL_MATRIX,&newsubmat);
2847:       *submat=*newsubmat;
2848:       ierr=PetscFree(newsubmat);
2849:       return(0);
2850:     }
2851:   }
2852: 
2853:   /* Get some new tags to keep the communication clean */
2854:   PetscObjectGetNewTag((PetscObject)C,&tag1);
2855:   PetscObjectGetNewTag((PetscObject)C,&tag2);
2856:   PetscObjectGetNewTag((PetscObject)C,&tag3);

2858:   /* Check if the col indices are sorted */
2859:   {PetscTruth sorted;
2860:   ISSorted(isrow,&sorted);
2861:   if (!sorted) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"ISrow is not sorted");
2862:   ISSorted(iscol,&sorted);
2863:   if (!sorted) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"IScol is not sorted");
2864:   }
2865: 
2866:   ISGetIndices(isrow,&irow);
2867:   ISGetIndices(iscol,&icol);
2868:   ISGetLocalSize(isrow,&nrow);
2869:   ISGetLocalSize(iscol,&ncol);
2870: 
2871:   if (!isrow) SETERRQ(PETSC_ERR_ARG_SIZ,"Empty ISrow");
2872:   if (!iscol) SETERRQ(PETSC_ERR_ARG_SIZ,"Empty IScol");
2873: 
2874: 
2875:   len    = (C->M+1)*sizeof(int);
2876:   PetscMalloc(len,&rtable);
2877:   /* Create hash table for the mapping :row -> proc*/
2878:   for (i=0,j=0; i<size; i++) {
2879:     jmax = c->rowners[i+1];
2880:     for (; j<jmax; j++) {
2881:       rtable[j] = i;
2882:     }
2883:   }

2885:   /* evaluate communication - mesg to who, length of mesg, and buffer space
2886:      required. Based on this, buffers are allocated, and data copied into them*/
2887:   PetscMalloc(size*2*sizeof(int),&w1); /* mesg size */
2888:   w2     = w1 + size;      /* if w2[i] marked, then a message to proc i*/
2889:   PetscMemzero(w1,size*2*sizeof(int)); /* initialize work vector*/
2890:   for (j=0; j<nrow; j++) {
2891:     row_t  = irow[j];
2892:     proc   = rtable[row_t];
2893:     w1[proc]++;
2894:   }
2895:   nrqs     = 0;              /* no of outgoing messages */
2896:   msz      = 0;              /* total mesg length (for all procs) */
2897:   w1[rank] = 0;              /* no mesg sent to self */
2898:   for (i=0; i<size; i++) {
2899:     if (w1[i])  { w2[i] = 1; nrqs++;} /* there exists a message to proc i */
2900:   }
2901: 
2902:   PetscMalloc((nrqs+1)*sizeof(int),&pa); /*(proc -array)*/
2903:   for (i=0,j=0; i<size; i++) {
2904:     if (w1[i]) {
2905:       pa[j++] = i;
2906:       w1[i]++;  /* header for return data */
2907:       msz+=w1[i];
2908:     }
2909:   }
2910: 
2911:   {int  *onodes1,*olengths1;
2912:   /* Determine the number of messages to expect, their lengths, from from-ids */
2913:   PetscGatherNumberOfMessages(comm,w2,w1,&nrqr);
2914:   PetscGatherMessageLengths(comm,nrqs,nrqr,w1,&onodes1,&olengths1);
2915:   /* Now post the Irecvs corresponding to these messages */
2916:   PetscPostIrecvInt(comm,tag0,nrqr,onodes1,olengths1,&rbuf1,&r_waits1);
2917:   PetscFree(onodes1);
2918:   PetscFree(olengths1);
2919:   }
2920: 
2921: { int **ptr,*iptr,*tmp;
2922:   /* Allocate Memory for outgoing messages */
2923:   len      = 2*size*sizeof(int*) + msz*sizeof(int);
2924:   PetscMalloc(len,&sbuf1);
2925:   ptr      = sbuf1 + size;   /* Pointers to the data in outgoing buffers */
2926:   PetscMemzero(sbuf1,2*size*sizeof(int*));
2927:   /* allocate memory for outgoing data + buf to receive the first reply */
2928:   tmp      = (int*)(ptr + size);

2930:   for (i=0,iptr=tmp; i<nrqs; i++) {
2931:     j         = pa[i];
2932:     sbuf1[j]  = iptr;
2933:     iptr     += w1[j];
2934:   }

2936:   /* Form the outgoing messages */
2937:   for (i=0; i<nrqs; i++) {
2938:     j           = pa[i];
2939:     sbuf1[j][0] = 0;   /*header */
2940:     ptr[j]      = sbuf1[j] + 1;
2941:   }
2942: 
2943:   /* Parse the isrow and copy data into outbuf */
2944:   for (j=0; j<nrow; j++) {
2945:     row_t  = irow[j];
2946:     proc = rtable[row_t];
2947:     if (proc != rank) { /* copy to the outgoing buf*/
2948:       sbuf1[proc][0]++;
2949:       *ptr[proc] = row_t;
2950:       ptr[proc]++;
2951:     }
2952:   }
2953: } /* block */

2955:   /*  Now  post the sends */
2956: 
2957:   /* structure of sbuf1[i]/rbuf1[i] : 1 (num of rows) + nrow-local rows (nuberes
2958:    * of requested rows)*/

2960:   PetscMalloc((nrqs+1)*sizeof(MPI_Request),&s_waits1);
2961:   for (i=0; i<nrqs; ++i) {
2962:     j    = pa[i];
2963:     MPI_Isend(sbuf1[j],w1[j],MPI_INT,j,tag0,comm,s_waits1+i);
2964:   }

2966:   /* Post Receives to capture the buffer size */
2967:   PetscMalloc((nrqs+1)*sizeof(MPI_Request),&r_waits2);
2968:   PetscMalloc((nrqs+1)*sizeof(int *),&rbuf2);
2969:   PetscMalloc(msz*sizeof(int)+1,&(rbuf2[0]));
2970:   for (i=1; i<nrqs; ++i) {
2971:     rbuf2[i] = rbuf2[i-1]+w1[pa[i-1]];
2972:   }
2973:   for (i=0; i<nrqs; ++i) {
2974:     j    = pa[i];
2975:     MPI_Irecv(rbuf2[i],w1[j],MPI_INT,j,tag1,comm,r_waits2+i);
2976:   }

2978:   /* Send to other procs the buf size they should allocate */
2979:   /* structure of sbuf2[i]/rbuf2[i]: 1 (total size to allocate) + nrow-locrow
2980:    * (row sizes) */

2982:   /* Receive messages*/
2983:   PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits2);
2984:   PetscMalloc((nrqr+1)*sizeof(MPI_Status),&r_status1);
2985:   len         = 2*nrqr*sizeof(int) + (nrqr+1)*sizeof(int*);
2986:   PetscMalloc(len,&sbuf2);
2987:   req_size    = (int*)(sbuf2 + nrqr);
2988:   req_source  = req_size + nrqr;
2989: 
2990:   {
2991:     BSsprow    **sAi = A->rows;
2992:     int        id,rstart = c->rstart;
2993:     int        *sbuf2_i,*rbuf1_i,end;

2995:     for (i=0; i<nrqr; ++i) {
2996:       MPI_Waitany(nrqr,r_waits1,&idx,r_status1+i);
2997:       req_size[idx]   = 0;
2998:       rbuf1_i         = rbuf1[idx];
2999:       MPI_Get_count(r_status1+i,MPI_INT,&end);
3000:       PetscMalloc((end+1)*sizeof(int),&sbuf2[idx]);
3001:       sbuf2_i         = sbuf2[idx];
3002:       for (j=1; j<end; j++) {
3003:         id               = rbuf1_i[j] - rstart;
3004:         ncols_t          = (sAi[id])->length;
3005:         sbuf2_i[j]       = ncols_t;
3006:         req_size[idx]   += ncols_t;
3007:       }
3008:       req_source[idx] = r_status1[i].MPI_SOURCE;
3009:       /* form the header */
3010:       sbuf2_i[0]   = req_size[idx];
3011:       MPI_Isend(sbuf2_i,end,MPI_INT,req_source[idx],tag1,comm,s_waits2+i);
3012:     }
3013:   }
3014:   PetscFree(r_status1);
3015:   PetscFree(r_waits1);

3017:   /*  recv buffer sizes */
3018:   /* Receive messages*/
3019: 
3020:   PetscMalloc((nrqs+1)*sizeof(int*),&rbuf3);
3021:   PetscMalloc((nrqs+1)*sizeof(FLOAT*),&rbuf4);
3022:   PetscMalloc((nrqs+1)*sizeof(MPI_Request),&r_waits3);
3023:   PetscMalloc((nrqs+1)*sizeof(MPI_Request),&r_waits4);
3024:   PetscMalloc((nrqs+1)*sizeof(MPI_Status),&r_status2);

3026:   for (i=0; i<nrqs; ++i) {
3027:     MPI_Waitany(nrqs,r_waits2,&idx,r_status2+i);
3028:     PetscMalloc((rbuf2[idx][0]+1)*sizeof(int),&rbuf3[idx]);
3029:     PetscMalloc((rbuf2[idx][0]+1)*sizeof(FLOAT),&rbuf4[idx]);
3030:     MPI_Irecv(rbuf3[idx],rbuf2[idx][0],MPI_INT,r_status2[i].MPI_SOURCE,tag2,comm,r_waits3+idx);
3031: #if FLOAT == double      
3032:     MPI_Irecv(rbuf4[idx],rbuf2[idx][0],MPI_DOUBLE,r_status2[i].MPI_SOURCE,tag3,comm,r_waits4+idx);
3033: #elif FLOAT == float      
3034:     MPI_Irecv(rbuf4[idx],rbuf2[idx][0],MPI_FLOAT,r_status2[i].MPI_SOURCE,tag3,comm,r_waits4+idx);
3035: #endif
3036:   }
3037:   PetscFree(r_status2);
3038:   PetscFree(r_waits2);
3039: 
3040:   /* Wait on sends1 and sends2 */
3041:   PetscMalloc((nrqs+1)*sizeof(MPI_Status),&s_status1);
3042:   PetscMalloc((nrqr+1)*sizeof(MPI_Status),&s_status2);

3044:   MPI_Waitall(nrqs,s_waits1,s_status1);
3045:   MPI_Waitall(nrqr,s_waits2,s_status2);
3046:   PetscFree(s_status1);
3047:   PetscFree(s_status2);
3048:   PetscFree(s_waits1);
3049:   PetscFree(s_waits2);

3051:   /* Now allocate buffers for a->j, and send them off */
3052:   /* structure of sbuf3[i]/rbuf3[i],sbuf4[i]/rbuf4[i]: reqsize[i] (cols resp.
3053:    * vals of all req. rows; row sizes was in rbuf2; vals are of FLOAT type */
3054: 
3055:   PetscMalloc((nrqr+1)*sizeof(int*),&sbuf3);
3056:   for (i=0,j=0; i<nrqr; i++) j += req_size[i];
3057:   PetscMalloc((j+1)*sizeof(int),&sbuf3[0]);
3058:   for (i=1; i<nrqr; i++)  sbuf3[i] = sbuf3[i-1] + req_size[i-1];
3059: 
3060:   PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits3);
3061:   {
3062:     int *Acol,*rbuf1_i,*sbuf3_i,rqrow,noutcols,kmax,*cols,ncols;
3063:     int rstart = c->rstart;

3065:     for (i=0; i<nrqr; i++) {
3066:       rbuf1_i   = rbuf1[i];
3067:       sbuf3_i   = sbuf3[i];
3068:       noutcols  = 0;
3069:       kmax = rbuf1_i[0];  /* num. of req. rows */
3070:       for (k=0,rqrow=1; k<kmax; k++,rqrow++) {
3071:         Arow    = A->rows[rbuf1_i[rqrow] - rstart];
3072:         ncols  = Arow->length;
3073:         Acol   = Arow->col;
3074:         /* load the column indices for this row into cols*/
3075:         cols  = sbuf3_i + noutcols;
3076:         PetscMemcpy(cols,Acol,ncols*sizeof(int));
3077:         /*for (l=0; l<ncols;l++) cols[l]=Acol[l]; */ /* How is it with mappings?? */
3078:         noutcols += ncols;
3079:       }
3080:       MPI_Isend(sbuf3_i,req_size[i],MPI_INT,req_source[i],tag2,comm,s_waits3+i);
3081:     }
3082:   }
3083:   PetscMalloc((nrqs+1)*sizeof(MPI_Status),&r_status3);
3084:   PetscMalloc((nrqr+1)*sizeof(MPI_Status),&s_status3);

3086:   /* Allocate buffers for a->a, and send them off */
3087:   /* can be optimized by conect with previous block */
3088:   PetscMalloc((nrqr+1)*sizeof(FLOAT*),&sbuf4);
3089:   for (i=0,j=0; i<nrqr; i++) j += req_size[i];
3090:   PetscMalloc((j+1)*sizeof(FLOAT),&sbuf4[0]);
3091:   for (i=1; i<nrqr; i++)  sbuf4[i] = sbuf4[i-1] + req_size[i-1];
3092: 
3093:   PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits4);
3094:   {
3095:     FLOAT *Aval,*vals,*sbuf4_i;
3096:     int rstart = c->rstart,*rbuf1_i,rqrow,noutvals,kmax,ncols;
3097: 
3098: 
3099:     for (i=0; i<nrqr; i++) {
3100:       rbuf1_i   = rbuf1[i];
3101:       sbuf4_i   = sbuf4[i];
3102:       rqrow     = 1;
3103:       noutvals  = 0;
3104:       kmax      = rbuf1_i[0];  /* num of req. rows */
3105:       for (k=0; k<kmax; k++,rqrow++) {
3106:         Arow    = A->rows[rbuf1_i[rqrow] - rstart];
3107:         ncols  = Arow->length;
3108:         Aval = Arow->nz;
3109:         /* load the column values for this row into vals*/
3110:         vals  = sbuf4_i+noutvals;
3111:         PetscMemcpy(vals,Aval,ncols*sizeof(FLOAT));
3112:         noutvals += ncols;
3113:       }
3114: #if FLOAT == double      
3115:       MPI_Isend(sbuf4_i,req_size[i],MPI_DOUBLE,req_source[i],tag3,comm,s_waits4+i);
3116: #elif FLOAT == float      
3117:       MPI_Isend(sbuf4_i,req_size[i],MPI_FLOAT,req_source[i],tag3,comm,s_waits4+i);
3118: #endif
3119:     }
3120:   }
3121:   PetscMalloc((nrqs+1)*sizeof(MPI_Status),&r_status4);
3122:   PetscMalloc((nrqr+1)*sizeof(MPI_Status),&s_status4);
3123:   PetscFree(rbuf1);

3125:   /* Form the matrix */

3127:   /* create col map */
3128:   len     = C->N*sizeof(int)+1;
3129:   PetscMalloc(len,&cmap);
3130:   PetscMemzero(cmap,C->N*sizeof(int));
3131:   for (j=0; j<ncol; j++) {
3132:       cmap[icol[j]] = j+1;
3133:   }
3134: 
3135:   /* Create row map / maybe I will need global rowmap but here is local rowmap*/
3136:   len     = C->M*sizeof(int)+1;
3137:   PetscMalloc(len,&rmap);
3138:   PetscMemzero(rmap,C->M*sizeof(int));
3139:   for (j=0; j<nrow; j++) {
3140:     rmap[irow[j]] = j;
3141:   }

3143:   /*
3144:      Determine the number of non-zeros in the diagonal and off-diagonal 
3145:      portions of the matrix in order to do correct preallocation
3146:    */

3148:   /* first get start and end of "diagonal" columns */
3149:   if (csize == PETSC_DECIDE) {
3150:     nlocal = ncol/size + ((ncol % size) > rank);
3151:   } else {
3152:     nlocal = csize;
3153:   }
3154:   {
3155:     int ncols,*cols,olen,dlen,thecol;
3156:     int *rbuf2_i,*rbuf3_i,*sbuf1_i,row,kmax,cidx;
3157: 
3158:     MPI_Scan(&nlocal,&cend,1,MPI_INT,MPI_SUM,comm);
3159:     cstart = cend - nlocal;
3160:     if (rank == size - 1 && cend != ncol) {
3161:       SETERRQ(1,"Local column sizes do not add up to total number of columns");
3162:     }

3164:     PetscMalloc((2*nrow+1)*sizeof(int),&d_nz);
3165:     o_nz = d_nz + nrow;
3166: 
3167:     /* Update lens from local data */
3168:     for (j=0; j<nrow; j++) {
3169:       row  = irow[j];
3170:       proc = rtable[row];
3171:       if (proc == rank) {
3172:         Arow=A->rows[row-c->rstart];
3173:         ncols=Arow->length;
3174:         cols=Arow->col;
3175:         olen=dlen=0;
3176:         for (k=0; k<ncols; k++) {
3177:           if ((thecol=cmap[cols[k]])) {
3178:             if (cstart<thecol && thecol<=cend) dlen++; /* thecol is from 1 */
3179:             else olen++;
3180:           }
3181:         }
3182:         o_nz[j]=olen;
3183:         d_nz[j]=dlen;
3184:       } else d_nz[j]=o_nz[j]=0;
3185:     }
3186:     /* Update lens from offproc data and done waits */
3187:     /* this will be much simplier after sending only appropriate columns */
3188:     for (j=0; j<nrqs;j++) {
3189:       MPI_Waitany(nrqs,r_waits3,&i,r_status3+j);
3190:       proc   = pa[i];
3191:       sbuf1_i = sbuf1[proc];
3192:       cidx    = 0;
3193:       rbuf2_i = rbuf2[i];
3194:       rbuf3_i = rbuf3[i];
3195:       kmax    = sbuf1_i[0]; /*num of rq. rows*/
3196:       for (k=1; k<=kmax; k++) {
3197:         row  = rmap[sbuf1_i[k]]; /* the val in the new matrix to be */
3198:         for (l=0; l<rbuf2_i[k]; l++,cidx++) {
3199:           if ((thecol=cmap[rbuf3_i[cidx]])) {
3200: 
3201:             if (cstart<thecol && thecol<=cend) d_nz[row]++; /* thecol is from 1 */
3202:             else o_nz[row]++;
3203:           }
3204:         }
3205:       }
3206:     }
3207:   }
3208:   PetscFree(r_status3);
3209:   PetscFree(r_waits3);
3210:   MPI_Waitall(nrqr,s_waits3,s_status3);
3211:   PetscFree(s_status3);
3212:   PetscFree(s_waits3);

3214:   if (scall ==  MAT_INITIAL_MATRIX) {
3215:     MatCreateMPIAIJ(comm,nrow,nlocal,PETSC_DECIDE,ncol,0,d_nz,0,o_nz,submat);
3216:     mat=(Mat_MPIAIJ *)((*submat)->data);
3217:     matA=(Mat_SeqAIJ *)(mat->A->data);
3218:     matB=(Mat_SeqAIJ *)(mat->B->data);
3219: 
3220:   } else {
3221:     PetscTruth same;
3222:     /* folowing code can be optionaly dropped for debuged versions of users
3223:      * program, but I don't know PETSc option which can switch off such safety
3224:      * tests - in a same way counting of o_nz,d_nz can be droped for  REUSE
3225:      * matrix */
3226: 
3227:     PetscTypeCompare((PetscObject)(*submat),MATMPIAIJ,&same);
3228:     if (same == PETSC_FALSE) {
3229:       SETERRQ(PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. wrong type");
3230:     }
3231:     if (((*submat)->m != nrow) || ((*submat)->N != ncol)) {
3232:         SETERRQ(PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. wrong size");
3233:     }
3234:     mat=(Mat_MPIAIJ *)((*submat)->data);
3235:     matA=(Mat_SeqAIJ *)(mat->A->data);
3236:     matB=(Mat_SeqAIJ *)(mat->B->data);
3237:     PetscMemcmp(matA->ilen,d_nz,nrow*sizeof(int),&same);
3238:     if (same == PETSC_FALSE) {
3239:       SETERRQ(PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. wrong no of nonzeros");
3240:     }
3241:     PetscMemcmp(matB->ilen,o_nz,nrow*sizeof(int),&same);
3242:     if (same == PETSC_FALSE) {
3243:       SETERRQ(PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. wrong no of nonzeros");
3244:     }
3245:   /* Initial matrix as if empty */
3246:     PetscMemzero(matA->ilen,nrow*sizeof(int));
3247:     PetscMemzero(matB->ilen,nrow*sizeof(int));
3248:     /* Perhaps MatZeroEnteries may be better - look what it is exactly doing - I must
3249:      * delete all possibly nonactual inforamtion */
3250:     /*submats[i]->factor = C->factor; !!! ??? if factor will be same then I must
3251:      * copy some factor information - where are thay */
3252:     (*submat)->was_assembled=PETSC_FALSE;
3253:     (*submat)->assembled=PETSC_FALSE;
3254: 
3255:   }
3256:   PetscFree(d_nz);

3258:   /* Assemble the matrix */
3259:   /* First assemble from local rows */
3260:   {
3261:     int    i_row,oldrow,row,ncols,*cols,*matA_j,*matB_j,ilenA,ilenB,tcol;
3262:     FLOAT  *vals;
3263:     PetscScalar *matA_a,*matB_a;
3264: 
3265:     for (j=0; j<nrow; j++) {
3266:       oldrow = irow[j];
3267:       proc   = rtable[oldrow];
3268:       if (proc == rank) {
3269:         row  = rmap[oldrow];
3270: 
3271:         Arow  = A->rows[oldrow-c->rstart];
3272:         ncols = Arow->length;
3273:         cols  = Arow->col;
3274:         vals  = Arow->nz;
3275: 
3276:         i_row   = matA->i[row];
3277:         matA_a = matA->a + i_row;
3278:         matA_j = matA->j + i_row;
3279:         i_row   = matB->i[row];
3280:         matB_a = matB->a + i_row;
3281:         matB_j = matB->j + i_row;
3282:         for (k=0,ilenA=0,ilenB=0; k<ncols; k++) {
3283:           if ((tcol = cmap[cols[k]])) {
3284:             if (tcol<=cstart) {
3285:               *matB_j++ = tcol-1;
3286:               *matB_a++ = vals[k];
3287:               ilenB++;
3288:             } else if (tcol<=cend) {
3289:               *matA_j++ = (tcol-1)-cstart;
3290:               *matA_a++ = (PetscScalar)(vals[k]);
3291:               ilenA++;
3292:             } else {
3293:               *matB_j++ = tcol-1;
3294:               *matB_a++ = vals[k];
3295:               ilenB++;
3296:             }
3297:           }
3298:         }
3299:         matA->ilen[row]=ilenA;
3300:         matB->ilen[row]=ilenB;
3301: 
3302:       }
3303:     }
3304:   }

3306:   /*   Now assemble the off proc rows*/
3307:   {
3308:     int  *sbuf1_i,*rbuf2_i,*rbuf3_i,cidx,kmax,row,i_row;
3309:     int  *matA_j,*matB_j,lmax,tcol,ilenA,ilenB;
3310:     PetscScalar *matA_a,*matB_a;
3311:     FLOAT *rbuf4_i;

3313:     for (j=0; j<nrqs; j++) {
3314:       MPI_Waitany(nrqs,r_waits4,&i,r_status4+j);
3315:       proc   = pa[i];
3316:       sbuf1_i = sbuf1[proc];
3317: 
3318:       cidx    = 0;
3319:       rbuf2_i = rbuf2[i];
3320:       rbuf3_i = rbuf3[i];
3321:       rbuf4_i = rbuf4[i];
3322:       kmax    = sbuf1_i[0];
3323:       for (k=1; k<=kmax; k++) {
3324:         row = rmap[sbuf1_i[k]];
3325: 
3326:         i_row  = matA->i[row];
3327:         matA_a = matA->a + i_row;
3328:         matA_j = matA->j + i_row;
3329:         i_row  = matB->i[row];
3330:         matB_a = matB->a + i_row;
3331:         matB_j = matB->j + i_row;
3332: 
3333:         lmax = rbuf2_i[k];
3334:         for (l=0,ilenA=0,ilenB=0; l<lmax; l++,cidx++) {
3335:           if ((tcol = cmap[rbuf3_i[cidx]])) {
3336:             if (tcol<=cstart) {
3337:               *matB_j++ = tcol-1;
3338:               *matB_a++ = (PetscScalar)(rbuf4_i[cidx]);;
3339:               ilenB++;
3340:             } else if (tcol<=cend) {
3341:               *matA_j++ = (tcol-1)-cstart;
3342:               *matA_a++ = (PetscScalar)(rbuf4_i[cidx]);
3343:               ilenA++;
3344:             } else {
3345:               *matB_j++ = tcol-1;
3346:               *matB_a++ = (PetscScalar)(rbuf4_i[cidx]);
3347:               ilenB++;
3348:             }
3349:           }
3350:         }
3351:         matA->ilen[row]=ilenA;
3352:         matB->ilen[row]=ilenB;
3353:       }
3354:     }
3355:   }

3357:   PetscFree(r_status4);
3358:   PetscFree(r_waits4);
3359:   MPI_Waitall(nrqr,s_waits4,s_status4);
3360:   PetscFree(s_waits4);
3361:   PetscFree(s_status4);

3363:   /* Restore the indices */
3364:   ISRestoreIndices(isrow,&irow);
3365:   ISRestoreIndices(iscol,&icol);

3367:   /* Destroy allocated memory */
3368:   PetscFree(rtable);
3369:   PetscFree(w1);
3370:   PetscFree(pa);

3372:   PetscFree(sbuf1);
3373:   PetscFree(rbuf2[0]);
3374:   PetscFree(rbuf2);
3375:   for (i=0; i<nrqr; ++i) {
3376:     PetscFree(sbuf2[i]);
3377:   }
3378:   for (i=0; i<nrqs; ++i) {
3379:     PetscFree(rbuf3[i]);
3380:     PetscFree(rbuf4[i]);
3381:   }

3383:   PetscFree(sbuf2);
3384:   PetscFree(rbuf3);
3385:   PetscFree(rbuf4);
3386:   PetscFree(sbuf3[0]);
3387:   PetscFree(sbuf3);
3388:   PetscFree(sbuf4[0]);
3389:   PetscFree(sbuf4);
3390: 
3391:   PetscFree(cmap);
3392:   PetscFree(rmap);


3395:   MatAssemblyBegin(*submat,MAT_FINAL_ASSEMBLY);
3396:   MatAssemblyEnd(*submat,MAT_FINAL_ASSEMBLY);


3399:   return(0);
3400: }