Actual source code: mpisbaij.c

  1: /*$Id: mpisbaij.c,v 1.61 2001/08/10 03:31:37 bsmith Exp $*/

 3:  #include src/mat/impls/baij/mpi/mpibaij.h
 4:  #include src/vec/vecimpl.h
 5:  #include mpisbaij.h
 6:  #include src/mat/impls/sbaij/seq/sbaij.h

  8: extern int MatSetUpMultiply_MPISBAIJ(Mat);
  9: extern int MatSetUpMultiply_MPISBAIJ_2comm(Mat);
 10: extern int DisAssemble_MPISBAIJ(Mat);
 11: extern int MatGetValues_SeqSBAIJ(Mat,int,const int[],int,const int[],PetscScalar []);
 12: extern int MatGetValues_SeqBAIJ(Mat,int,const int[],int,const int[],PetscScalar []);
 13: extern int MatSetValues_SeqSBAIJ(Mat,int,const int [],int,const int [],const PetscScalar [],InsertMode);
 14: extern int MatSetValuesBlocked_SeqSBAIJ(Mat,int,const int[],int,const int[],const PetscScalar[],InsertMode);
 15: extern int MatSetValuesBlocked_SeqBAIJ(Mat,int,const int[],int,const int[],const PetscScalar[],InsertMode);
 16: extern int MatGetRow_SeqSBAIJ(Mat,int,int*,int**,PetscScalar**);
 17: extern int MatRestoreRow_SeqSBAIJ(Mat,int,int*,int**,PetscScalar**);
 18: extern int MatPrintHelp_SeqSBAIJ(Mat);
 19: extern int MatZeroRows_SeqSBAIJ(Mat,IS,PetscScalar*);
 20: extern int MatZeroRows_SeqBAIJ(Mat,IS,PetscScalar *);
 21: extern int MatGetRowMax_MPISBAIJ(Mat,Vec);
 22: extern int MatRelax_MPISBAIJ(Mat,Vec,PetscReal,MatSORType,PetscReal,int,int,Vec);

 24: /*  UGLY, ugly, ugly
 25:    When MatScalar == PetscScalar the function MatSetValuesBlocked_MPIBAIJ_MatScalar() does 
 26:    not exist. Otherwise ..._MatScalar() takes matrix elements in single precision and 
 27:    inserts them into the single precision data structure. The function MatSetValuesBlocked_MPIBAIJ()
 28:    converts the entries into single precision and then calls ..._MatScalar() to put them
 29:    into the single precision data structures.
 30: */
 31: #if defined(PETSC_USE_MAT_SINGLE)
 32: extern int MatSetValuesBlocked_SeqSBAIJ_MatScalar(Mat,int,const int[],int,const int[],const MatScalar[],InsertMode);
 33: extern int MatSetValues_MPISBAIJ_MatScalar(Mat,int,const int[],int,const int[],const MatScalar[],InsertMode);
 34: extern int MatSetValuesBlocked_MPISBAIJ_MatScalar(Mat,int,const int[],int,const int[],const MatScalar[],InsertMode);
 35: extern int MatSetValues_MPISBAIJ_HT_MatScalar(Mat,int,const int[],int,const int[],const MatScalar[],InsertMode);
 36: extern int MatSetValuesBlocked_MPISBAIJ_HT_MatScalar(Mat,int,const int[],int,const int[],const MatScalar[],InsertMode);
 37: #else
 38: #define MatSetValuesBlocked_SeqSBAIJ_MatScalar      MatSetValuesBlocked_SeqSBAIJ
 39: #define MatSetValues_MPISBAIJ_MatScalar             MatSetValues_MPISBAIJ
 40: #define MatSetValuesBlocked_MPISBAIJ_MatScalar      MatSetValuesBlocked_MPISBAIJ
 41: #define MatSetValues_MPISBAIJ_HT_MatScalar          MatSetValues_MPISBAIJ_HT
 42: #define MatSetValuesBlocked_MPISBAIJ_HT_MatScalar   MatSetValuesBlocked_MPISBAIJ_HT
 43: #endif

 45: EXTERN_C_BEGIN
 48: int MatStoreValues_MPISBAIJ(Mat mat)
 49: {
 50:   Mat_MPISBAIJ *aij = (Mat_MPISBAIJ *)mat->data;
 51:   int          ierr;

 54:   MatStoreValues(aij->A);
 55:   MatStoreValues(aij->B);
 56:   return(0);
 57: }
 58: EXTERN_C_END

 60: EXTERN_C_BEGIN
 63: int MatRetrieveValues_MPISBAIJ(Mat mat)
 64: {
 65:   Mat_MPISBAIJ *aij = (Mat_MPISBAIJ *)mat->data;
 66:   int          ierr;

 69:   MatRetrieveValues(aij->A);
 70:   MatRetrieveValues(aij->B);
 71:   return(0);
 72: }
 73: EXTERN_C_END

 75: /* 
 76:      Local utility routine that creates a mapping from the global column 
 77:    number to the local number in the off-diagonal part of the local 
 78:    storage of the matrix.  This is done in a non scable way since the 
 79:    length of colmap equals the global matrix length. 
 80: */
 83: static int CreateColmap_MPISBAIJ_Private(Mat mat)
 84: {
 86:   SETERRQ(1,"Function not yet written for SBAIJ format");
 87:   /* return(0); */
 88: }

 90: #define CHUNKSIZE  10

 92: #define  MatSetValues_SeqSBAIJ_A_Private(row,col,value,addv) \
 93: { \
 94:  \
 95:     brow = row/bs;  \
 96:     rp   = aj + ai[brow]; ap = aa + bs2*ai[brow]; \
 97:     rmax = aimax[brow]; nrow = ailen[brow]; \
 98:       bcol = col/bs; \
 99:       ridx = row % bs; cidx = col % bs; \
100:       low = 0; high = nrow; \
101:       while (high-low > 3) { \
102:         t = (low+high)/2; \
103:         if (rp[t] > bcol) high = t; \
104:         else              low  = t; \
105:       } \
106:       for (_i=low; _i<high; _i++) { \
107:         if (rp[_i] > bcol) break; \
108:         if (rp[_i] == bcol) { \
109:           bap  = ap +  bs2*_i + bs*cidx + ridx; \
110:           if (addv == ADD_VALUES) *bap += value;  \
111:           else                    *bap  = value;  \
112:           goto a_noinsert; \
113:         } \
114:       } \
115:       if (a->nonew == 1) goto a_noinsert; \
116:       else if (a->nonew == -1) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero (%d, %d) into matrix", row, col); \
117:       if (nrow >= rmax) { \
118:         /* there is no extra room in row, therefore enlarge */ \
119:         int       new_nz = ai[a->mbs] + CHUNKSIZE,len,*new_i,*new_j; \
120:         MatScalar *new_a; \
121:  \
122:         if (a->nonew == -2) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero (%d, %d) in the matrix", row, col); \
123:  \
124:         /* malloc new storage space */ \
125:         len   = new_nz*(sizeof(int)+bs2*sizeof(MatScalar))+(a->mbs+1)*sizeof(int); \
126:         PetscMalloc(len,&new_a); \
127:         new_j = (int*)(new_a + bs2*new_nz); \
128:         new_i = new_j + new_nz; \
129:  \
130:         /* copy over old data into new slots */ \
131:         for (ii=0; ii<brow+1; ii++) {new_i[ii] = ai[ii];} \
132:         for (ii=brow+1; ii<a->mbs+1; ii++) {new_i[ii] = ai[ii]+CHUNKSIZE;} \
133:         PetscMemcpy(new_j,aj,(ai[brow]+nrow)*sizeof(int)); \
134:         len = (new_nz - CHUNKSIZE - ai[brow] - nrow); \
135:         PetscMemcpy(new_j+ai[brow]+nrow+CHUNKSIZE,aj+ai[brow]+nrow,len*sizeof(int)); \
136:         PetscMemcpy(new_a,aa,(ai[brow]+nrow)*bs2*sizeof(MatScalar)); \
137:         PetscMemzero(new_a+bs2*(ai[brow]+nrow),bs2*CHUNKSIZE*sizeof(PetscScalar)); \
138:         PetscMemcpy(new_a+bs2*(ai[brow]+nrow+CHUNKSIZE), \
139:                     aa+bs2*(ai[brow]+nrow),bs2*len*sizeof(MatScalar));  \
140:         /* free up old matrix storage */ \
141:         PetscFree(a->a);  \
142:         if (!a->singlemalloc) { \
143:           PetscFree(a->i); \
144:           PetscFree(a->j);\
145:         } \
146:         aa = a->a = new_a; ai = a->i = new_i; aj = a->j = new_j;  \
147:         a->singlemalloc = PETSC_TRUE; \
148:  \
149:         rp   = aj + ai[brow]; ap = aa + bs2*ai[brow]; \
150:         rmax = aimax[brow] = aimax[brow] + CHUNKSIZE; \
151:         PetscLogObjectMemory(A,CHUNKSIZE*(sizeof(int) + bs2*sizeof(MatScalar))); \
152:         a->s_maxnz += bs2*CHUNKSIZE; \
153:         a->reallocs++; \
154:         a->s_nz++; \
155:       } \
156:       N = nrow++ - 1;  \
157:       /* shift up all the later entries in this row */ \
158:       for (ii=N; ii>=_i; ii--) { \
159:         rp[ii+1] = rp[ii]; \
160:         PetscMemcpy(ap+bs2*(ii+1),ap+bs2*(ii),bs2*sizeof(MatScalar)); \
161:       } \
162:       if (N>=_i) { PetscMemzero(ap+bs2*_i,bs2*sizeof(MatScalar)); }  \
163:       rp[_i]                      = bcol;  \
164:       ap[bs2*_i + bs*cidx + ridx] = value;  \
165:       a_noinsert:; \
166:     ailen[brow] = nrow; \
167: } 
168: #ifndef MatSetValues_SeqBAIJ_B_Private
169: #define  MatSetValues_SeqSBAIJ_B_Private(row,col,value,addv) \
170: { \
171:     brow = row/bs;  \
172:     rp   = bj + bi[brow]; ap = ba + bs2*bi[brow]; \
173:     rmax = bimax[brow]; nrow = bilen[brow]; \
174:       bcol = col/bs; \
175:       ridx = row % bs; cidx = col % bs; \
176:       low = 0; high = nrow; \
177:       while (high-low > 3) { \
178:         t = (low+high)/2; \
179:         if (rp[t] > bcol) high = t; \
180:         else              low  = t; \
181:       } \
182:       for (_i=low; _i<high; _i++) { \
183:         if (rp[_i] > bcol) break; \
184:         if (rp[_i] == bcol) { \
185:           bap  = ap +  bs2*_i + bs*cidx + ridx; \
186:           if (addv == ADD_VALUES) *bap += value;  \
187:           else                    *bap  = value;  \
188:           goto b_noinsert; \
189:         } \
190:       } \
191:       if (b->nonew == 1) goto b_noinsert; \
192:       else if (b->nonew == -1) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero (%d, %d) into matrix", row, col); \
193:       if (nrow >= rmax) { \
194:         /* there is no extra room in row, therefore enlarge */ \
195:         int       new_nz = bi[b->mbs] + CHUNKSIZE,len,*new_i,*new_j; \
196:         MatScalar *new_a; \
197:  \
198:         if (b->nonew == -2) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero (%d, %d) in the matrix", row, col); \
199:  \
200:         /* malloc new storage space */ \
201:         len   = new_nz*(sizeof(int)+bs2*sizeof(MatScalar))+(b->mbs+1)*sizeof(int); \
202:         PetscMalloc(len,&new_a); \
203:         new_j = (int*)(new_a + bs2*new_nz); \
204:         new_i = new_j + new_nz; \
205:  \
206:         /* copy over old data into new slots */ \
207:         for (ii=0; ii<brow+1; ii++) {new_i[ii] = bi[ii];} \
208:         for (ii=brow+1; ii<b->mbs+1; ii++) {new_i[ii] = bi[ii]+CHUNKSIZE;} \
209:         PetscMemcpy(new_j,bj,(bi[brow]+nrow)*sizeof(int)); \
210:         len  = (new_nz - CHUNKSIZE - bi[brow] - nrow); \
211:         PetscMemcpy(new_j+bi[brow]+nrow+CHUNKSIZE,bj+bi[brow]+nrow,len*sizeof(int)); \
212:         PetscMemcpy(new_a,ba,(bi[brow]+nrow)*bs2*sizeof(MatScalar)); \
213:         PetscMemzero(new_a+bs2*(bi[brow]+nrow),bs2*CHUNKSIZE*sizeof(MatScalar)); \
214:         PetscMemcpy(new_a+bs2*(bi[brow]+nrow+CHUNKSIZE), \
215:                     ba+bs2*(bi[brow]+nrow),bs2*len*sizeof(MatScalar));  \
216:         /* free up old matrix storage */ \
217:         PetscFree(b->a);  \
218:         if (!b->singlemalloc) { \
219:           PetscFree(b->i); \
220:           PetscFree(b->j); \
221:         } \
222:         ba = b->a = new_a; bi = b->i = new_i; bj = b->j = new_j;  \
223:         b->singlemalloc = PETSC_TRUE; \
224:  \
225:         rp   = bj + bi[brow]; ap = ba + bs2*bi[brow]; \
226:         rmax = bimax[brow] = bimax[brow] + CHUNKSIZE; \
227:         PetscLogObjectMemory(B,CHUNKSIZE*(sizeof(int) + bs2*sizeof(MatScalar))); \
228:         b->maxnz += bs2*CHUNKSIZE; \
229:         b->reallocs++; \
230:         b->nz++; \
231:       } \
232:       N = nrow++ - 1;  \
233:       /* shift up all the later entries in this row */ \
234:       for (ii=N; ii>=_i; ii--) { \
235:         rp[ii+1] = rp[ii]; \
236:         PetscMemcpy(ap+bs2*(ii+1),ap+bs2*(ii),bs2*sizeof(MatScalar)); \
237:       } \
238:       if (N>=_i) { PetscMemzero(ap+bs2*_i,bs2*sizeof(MatScalar));}  \
239:       rp[_i]                      = bcol;  \
240:       ap[bs2*_i + bs*cidx + ridx] = value;  \
241:       b_noinsert:; \
242:     bilen[brow] = nrow; \
243: } 
244: #endif

246: #if defined(PETSC_USE_MAT_SINGLE)
249: int MatSetValues_MPISBAIJ(Mat mat,int m,const int im[],int n,const int in[],const PetscScalar v[],InsertMode addv)
250: {
251:   Mat_MPISBAIJ *b = (Mat_MPISBAIJ*)mat->data;
252:   int          ierr,i,N = m*n;
253:   MatScalar    *vsingle;

256:   if (N > b->setvalueslen) {
257:     if (b->setvaluescopy) {PetscFree(b->setvaluescopy);}
258:     PetscMalloc(N*sizeof(MatScalar),&b->setvaluescopy);
259:     b->setvalueslen  = N;
260:   }
261:   vsingle = b->setvaluescopy;

263:   for (i=0; i<N; i++) {
264:     vsingle[i] = v[i];
265:   }
266:   MatSetValues_MPISBAIJ_MatScalar(mat,m,im,n,in,vsingle,addv);
267:   return(0);
268: }

272: int MatSetValuesBlocked_MPISBAIJ(Mat mat,int m,const int im[],int n,const int in[],const PetscScalar v[],InsertMode addv)
273: {
274:   Mat_MPIBAIJ *b = (Mat_MPIBAIJ*)mat->data;
275:   int         ierr,i,N = m*n*b->bs2;
276:   MatScalar   *vsingle;

279:   if (N > b->setvalueslen) {
280:     if (b->setvaluescopy) {PetscFree(b->setvaluescopy);}
281:     PetscMalloc(N*sizeof(MatScalar),&b->setvaluescopy);
282:     b->setvalueslen  = N;
283:   }
284:   vsingle = b->setvaluescopy;
285:   for (i=0; i<N; i++) {
286:     vsingle[i] = v[i];
287:   }
288:   MatSetValuesBlocked_MPISBAIJ_MatScalar(mat,m,im,n,in,vsingle,addv);
289:   return(0);
290: }

294: int MatSetValues_MPISBAIJ_HT(Mat mat,int m,const int im[],int n,const int in[],const PetscScalar v[],InsertMode addv)
295: {
296:   Mat_MPIBAIJ *b = (Mat_MPIBAIJ*)mat->data;
297:   int         ierr,i,N = m*n;
298:   MatScalar   *vsingle;

301:   SETERRQ(1,"Function not yet written for SBAIJ format");
302:   /* return(0); */
303: }

307: int MatSetValuesBlocked_MPISBAIJ_HT(Mat mat,int m,const int im[],int n,const int in[],const PetscScalar v[],InsertMode addv)
308: {
309:   Mat_MPIBAIJ *b = (Mat_MPIBAIJ*)mat->data;
310:   int         ierr,i,N = m*n*b->bs2;
311:   MatScalar   *vsingle;

314:   SETERRQ(1,"Function not yet written for SBAIJ format");
315:   /* return(0); */
316: }
317: #endif

319: /* Only add/insert a(i,j) with i<=j (blocks). 
320:    Any a(i,j) with i>j input by user is ingored. 
321: */
324: int MatSetValues_MPISBAIJ_MatScalar(Mat mat,int m,const int im[],int n,const int in[],const MatScalar v[],InsertMode addv)
325: {
326:   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
327:   MatScalar    value;
328:   PetscTruth   roworiented = baij->roworiented;
329:   int          ierr,i,j,row,col;
330:   int          rstart_orig=baij->rstart_bs;
331:   int          rend_orig=baij->rend_bs,cstart_orig=baij->cstart_bs;
332:   int          cend_orig=baij->cend_bs,bs=baij->bs;

334:   /* Some Variables required in the macro */
335:   Mat          A = baij->A;
336:   Mat_SeqSBAIJ *a = (Mat_SeqSBAIJ*)(A)->data;
337:   int          *aimax=a->imax,*ai=a->i,*ailen=a->ilen,*aj=a->j;
338:   MatScalar    *aa=a->a;

340:   Mat          B = baij->B;
341:   Mat_SeqBAIJ  *b = (Mat_SeqBAIJ*)(B)->data;
342:   int          *bimax=b->imax,*bi=b->i,*bilen=b->ilen,*bj=b->j;
343:   MatScalar    *ba=b->a;

345:   int          *rp,ii,nrow,_i,rmax,N,brow,bcol;
346:   int          low,high,t,ridx,cidx,bs2=a->bs2;
347:   MatScalar    *ap,*bap;

349:   /* for stash */
350:   int          n_loc, *in_loc=0;
351:   MatScalar    *v_loc=0;


355:   if(!baij->donotstash){
356:     PetscMalloc(n*sizeof(int),&in_loc);
357:     PetscMalloc(n*sizeof(MatScalar),&v_loc);
358:   }

360:   for (i=0; i<m; i++) {
361:     if (im[i] < 0) continue;
362: #if defined(PETSC_USE_BOPT_g)
363:     if (im[i] >= mat->M) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %d max %d",im[i],mat->M-1);
364: #endif
365:     if (im[i] >= rstart_orig && im[i] < rend_orig) { /* this processor entry */
366:       row = im[i] - rstart_orig;              /* local row index */
367:       for (j=0; j<n; j++) {
368:         if (im[i]/bs > in[j]/bs) continue;    /* ignore lower triangular blocks */
369:         if (in[j] >= cstart_orig && in[j] < cend_orig){  /* diag entry (A) */
370:           col = in[j] - cstart_orig;          /* local col index */
371:           brow = row/bs; bcol = col/bs;
372:           if (brow > bcol) continue;  /* ignore lower triangular blocks of A */
373:           if (roworiented) value = v[i*n+j]; else value = v[i+j*m];
374:           MatSetValues_SeqSBAIJ_A_Private(row,col,value,addv);
375:           /* MatSetValues_SeqBAIJ(baij->A,1,&row,1,&col,&value,addv); */
376:         } else if (in[j] < 0) continue;
377: #if defined(PETSC_USE_BOPT_g)
378:         else if (in[j] >= mat->N) {SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %d max %d",in[j],mat->N-1);}
379: #endif
380:         else {  /* off-diag entry (B) */
381:           if (mat->was_assembled) {
382:             if (!baij->colmap) {
383:               CreateColmap_MPISBAIJ_Private(mat);
384:             }
385: #if defined (PETSC_USE_CTABLE)
386:             PetscTableFind(baij->colmap,in[j]/bs + 1,&col);
387:             col  = col - 1;
388: #else
389:             col = baij->colmap[in[j]/bs] - 1;
390: #endif
391:             if (col < 0 && !((Mat_SeqSBAIJ*)(baij->A->data))->nonew) {
392:               DisAssemble_MPISBAIJ(mat);
393:               col =  in[j];
394:               /* Reinitialize the variables required by MatSetValues_SeqBAIJ_B_Private() */
395:               B = baij->B;
396:               b = (Mat_SeqBAIJ*)(B)->data;
397:               bimax=b->imax;bi=b->i;bilen=b->ilen;bj=b->j;
398:               ba=b->a;
399:             } else col += in[j]%bs;
400:           } else col = in[j];
401:           if (roworiented) value = v[i*n+j]; else value = v[i+j*m];
402:           MatSetValues_SeqSBAIJ_B_Private(row,col,value,addv);
403:           /* MatSetValues_SeqBAIJ(baij->B,1,&row,1,&col,&value,addv); */
404:         }
405:       }
406:     } else {  /* off processor entry */
407:       if (!baij->donotstash) {
408:         n_loc = 0;
409:         for (j=0; j<n; j++){
410:           if (im[i]/bs > in[j]/bs) continue; /* ignore lower triangular blocks */
411:           in_loc[n_loc] = in[j];
412:           if (roworiented) {
413:             v_loc[n_loc] = v[i*n+j];
414:           } else {
415:             v_loc[n_loc] = v[j*m+i];
416:           }
417:           n_loc++;
418:         }
419:         MatStashValuesRow_Private(&mat->stash,im[i],n_loc,in_loc,v_loc);
420:       }
421:     }
422:   }

424:   if(!baij->donotstash){
425:     PetscFree(in_loc);
426:     PetscFree(v_loc);
427:   }
428:   return(0);
429: }

433: int MatSetValuesBlocked_MPISBAIJ_MatScalar(Mat mat,int m,const int im[],int n,const int in[],const MatScalar v[],InsertMode addv)
434: {
435:   Mat_MPISBAIJ    *baij = (Mat_MPISBAIJ*)mat->data;
436:   const MatScalar *value;
437:   MatScalar       *barray=baij->barray;
438:   PetscTruth      roworiented = baij->roworiented;
439:   int             ierr,i,j,ii,jj,row,col,rstart=baij->rstart;
440:   int             rend=baij->rend,cstart=baij->cstart,stepval;
441:   int             cend=baij->cend,bs=baij->bs,bs2=baij->bs2;

444:   if(!barray) {
445:     PetscMalloc(bs2*sizeof(MatScalar),&barray);
446:     baij->barray = barray;
447:   }

449:   if (roworiented) {
450:     stepval = (n-1)*bs;
451:   } else {
452:     stepval = (m-1)*bs;
453:   }
454:   for (i=0; i<m; i++) {
455:     if (im[i] < 0) continue;
456: #if defined(PETSC_USE_BOPT_g)
457:     if (im[i] >= baij->Mbs) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row too large, row %d max %d",im[i],baij->Mbs-1);
458: #endif
459:     if (im[i] >= rstart && im[i] < rend) {
460:       row = im[i] - rstart;
461:       for (j=0; j<n; j++) {
462:         /* If NumCol = 1 then a copy is not required */
463:         if ((roworiented) && (n == 1)) {
464:           barray = (MatScalar*) v + i*bs2;
465:         } else if((!roworiented) && (m == 1)) {
466:           barray = (MatScalar*) v + j*bs2;
467:         } else { /* Here a copy is required */
468:           if (roworiented) {
469:             value = v + i*(stepval+bs)*bs + j*bs;
470:           } else {
471:             value = v + j*(stepval+bs)*bs + i*bs;
472:           }
473:           for (ii=0; ii<bs; ii++,value+=stepval) {
474:             for (jj=0; jj<bs; jj++) {
475:               *barray++  = *value++;
476:             }
477:           }
478:           barray -=bs2;
479:         }
480: 
481:         if (in[j] >= cstart && in[j] < cend){
482:           col  = in[j] - cstart;
483:           MatSetValuesBlocked_SeqSBAIJ(baij->A,1,&row,1,&col,barray,addv);
484:         }
485:         else if (in[j] < 0) continue;
486: #if defined(PETSC_USE_BOPT_g)
487:         else if (in[j] >= baij->Nbs) {SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Column too large, col %d max %d",in[j],baij->Nbs-1);}
488: #endif
489:         else {
490:           if (mat->was_assembled) {
491:             if (!baij->colmap) {
492:               CreateColmap_MPISBAIJ_Private(mat);
493:             }

495: #if defined(PETSC_USE_BOPT_g)
496: #if defined (PETSC_USE_CTABLE)
497:             { int data;
498:               PetscTableFind(baij->colmap,in[j]+1,&data);
499:               if ((data - 1) % bs) SETERRQ(PETSC_ERR_PLIB,"Incorrect colmap");
500:             }
501: #else
502:             if ((baij->colmap[in[j]] - 1) % bs) SETERRQ(PETSC_ERR_PLIB,"Incorrect colmap");
503: #endif
504: #endif
505: #if defined (PETSC_USE_CTABLE)
506:             PetscTableFind(baij->colmap,in[j]+1,&col);
507:             col  = (col - 1)/bs;
508: #else
509:             col = (baij->colmap[in[j]] - 1)/bs;
510: #endif
511:             if (col < 0 && !((Mat_SeqBAIJ*)(baij->A->data))->nonew) {
512:               DisAssemble_MPISBAIJ(mat);
513:               col =  in[j];
514:             }
515:           }
516:           else col = in[j];
517:           MatSetValuesBlocked_SeqBAIJ(baij->B,1,&row,1,&col,barray,addv);
518:         }
519:       }
520:     } else {
521:       if (!baij->donotstash) {
522:         if (roworiented) {
523:           MatStashValuesRowBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);
524:         } else {
525:           MatStashValuesColBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);
526:         }
527:       }
528:     }
529:   }
530:   return(0);
531: }

533: #define HASH_KEY 0.6180339887
534: #define HASH(size,key,tmp) (tmp = (key)*HASH_KEY,(int)((size)*(tmp-(int)tmp)))
535: /* #define HASH(size,key) ((int)((size)*fmod(((key)*HASH_KEY),1))) */
536: /* #define HASH(size,key,tmp) ((int)((size)*fmod(((key)*HASH_KEY),1))) */
539: int MatSetValues_MPISBAIJ_HT_MatScalar(Mat mat,int m,const int im[],int n,const int in[],const MatScalar v[],InsertMode addv)
540: {
542:   SETERRQ(1,"Function not yet written for SBAIJ format");
543:   /* return(0); */
544: }

548: int MatSetValuesBlocked_MPISBAIJ_HT_MatScalar(Mat mat,int m,const int im[],int n,const int in[],const MatScalar v[],InsertMode addv)
549: {
551:   SETERRQ(1,"Function not yet written for SBAIJ format");
552:   /* return(0); */
553: }

557: int MatGetValues_MPISBAIJ(Mat mat,int m,const int idxm[],int n,const int idxn[],PetscScalar v[])
558: {
559:   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
560:   int          bs=baij->bs,ierr,i,j,bsrstart = baij->rstart*bs,bsrend = baij->rend*bs;
561:   int          bscstart = baij->cstart*bs,bscend = baij->cend*bs,row,col,data;

564:   for (i=0; i<m; i++) {
565:     if (idxm[i] < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Negative row: %d",idxm[i]);
566:     if (idxm[i] >= mat->M) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %d max %d",idxm[i],mat->M-1);
567:     if (idxm[i] >= bsrstart && idxm[i] < bsrend) {
568:       row = idxm[i] - bsrstart;
569:       for (j=0; j<n; j++) {
570:         if (idxn[j] < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Negative column %d",idxn[j]);
571:         if (idxn[j] >= mat->N) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %d max %d",idxn[j],mat->N-1);
572:         if (idxn[j] >= bscstart && idxn[j] < bscend){
573:           col = idxn[j] - bscstart;
574:           MatGetValues_SeqSBAIJ(baij->A,1,&row,1,&col,v+i*n+j);
575:         } else {
576:           if (!baij->colmap) {
577:             CreateColmap_MPISBAIJ_Private(mat);
578:           }
579: #if defined (PETSC_USE_CTABLE)
580:           PetscTableFind(baij->colmap,idxn[j]/bs+1,&data);
581:           data --;
582: #else
583:           data = baij->colmap[idxn[j]/bs]-1;
584: #endif
585:           if((data < 0) || (baij->garray[data/bs] != idxn[j]/bs)) *(v+i*n+j) = 0.0;
586:           else {
587:             col  = data + idxn[j]%bs;
588:             MatGetValues_SeqBAIJ(baij->B,1,&row,1,&col,v+i*n+j);
589:           }
590:         }
591:       }
592:     } else {
593:       SETERRQ(PETSC_ERR_SUP,"Only local values currently supported");
594:     }
595:   }
596:  return(0);
597: }

601: int MatNorm_MPISBAIJ(Mat mat,NormType type,PetscReal *norm)
602: {
603:   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
604:   /* Mat_SeqSBAIJ *amat = (Mat_SeqSBAIJ*)baij->A->data; */
605:   /* Mat_SeqBAIJ  *bmat = (Mat_SeqBAIJ*)baij->B->data; */
606:   int        ierr;
607:   PetscReal  sum[2],*lnorm2;

610:   if (baij->size == 1) {
611:      MatNorm(baij->A,type,norm);
612:   } else {
613:     if (type == NORM_FROBENIUS) {
614:       PetscMalloc(2*sizeof(PetscReal),&lnorm2);
615:        MatNorm(baij->A,type,lnorm2);
616:       *lnorm2 = (*lnorm2)*(*lnorm2); lnorm2++;            /* squar power of norm(A) */
617:        MatNorm(baij->B,type,lnorm2);
618:       *lnorm2 = (*lnorm2)*(*lnorm2); lnorm2--;             /* squar power of norm(B) */
619:       /*
620:       MPI_Comm_rank(mat->comm,&rank);
621:       PetscSynchronizedPrintf(mat->comm,"[%d], lnorm2=%g, %g\n",rank,lnorm2[0],lnorm2[1]);
622:       */
623:       MPI_Allreduce(lnorm2,&sum,2,MPIU_REAL,MPI_SUM,mat->comm);
624:       /*
625:       PetscSynchronizedPrintf(mat->comm,"[%d], sum=%g, %g\n",rank,sum[0],sum[1]);
626:       PetscSynchronizedFlush(mat->comm); */
627: 
628:       *norm = sqrt(sum[0] + 2*sum[1]);
629:       PetscFree(lnorm2);
630:     } else {
631:       SETERRQ(PETSC_ERR_SUP,"No support for this norm yet");
632:     }
633:   }
634:   return(0);
635: }

637: /*
638:   Creates the hash table, and sets the table 
639:   This table is created only once. 
640:   If new entried need to be added to the matrix
641:   then the hash table has to be destroyed and
642:   recreated.
643: */
646: int MatCreateHashTable_MPISBAIJ_Private(Mat mat,PetscReal factor)
647: {
649:   SETERRQ(1,"Function not yet written for SBAIJ format");
650:   /* return(0); */
651: }

655: int MatAssemblyBegin_MPISBAIJ(Mat mat,MatAssemblyType mode)
656: {
657:   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
658:   int         ierr,nstash,reallocs;
659:   InsertMode  addv;

662:   if (baij->donotstash) {
663:     return(0);
664:   }

666:   /* make sure all processors are either in INSERTMODE or ADDMODE */
667:   MPI_Allreduce(&mat->insertmode,&addv,1,MPI_INT,MPI_BOR,mat->comm);
668:   if (addv == (ADD_VALUES|INSERT_VALUES)) {
669:     SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Some processors inserted others added");
670:   }
671:   mat->insertmode = addv; /* in case this processor had no cache */

673:   MatStashScatterBegin_Private(&mat->stash,baij->rowners_bs);
674:   MatStashScatterBegin_Private(&mat->bstash,baij->rowners);
675:   MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);
676:   PetscLogInfo(0,"MatAssemblyBegin_MPISBAIJ:Stash has %d entries,uses %d mallocs.\n",nstash,reallocs);
677:   MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);
678:   PetscLogInfo(0,"MatAssemblyBegin_MPISBAIJ:Block-Stash has %d entries, uses %d mallocs.\n",nstash,reallocs);
679:   return(0);
680: }

684: int MatAssemblyEnd_MPISBAIJ(Mat mat,MatAssemblyType mode)
685: {
686:   Mat_MPISBAIJ *baij=(Mat_MPISBAIJ*)mat->data;
687:   Mat_SeqSBAIJ  *a=(Mat_SeqSBAIJ*)baij->A->data;
688:   Mat_SeqBAIJ  *b=(Mat_SeqBAIJ*)baij->B->data;
689:   int         i,j,rstart,ncols,n,ierr,flg,bs2=baij->bs2;
690:   int         *row,*col,other_disassembled;
691:   PetscTruth  r1,r2,r3;
692:   MatScalar   *val;
693:   InsertMode  addv = mat->insertmode;


697:   if (!baij->donotstash) {
698:     while (1) {
699:       MatStashScatterGetMesg_Private(&mat->stash,&n,&row,&col,&val,&flg);
700:       /*
701:       PetscSynchronizedPrintf(mat->comm,"[%d]: in AssemblyEnd, stash, flg=%d\n",rank,flg);
702:       PetscSynchronizedFlush(mat->comm); 
703:       */
704:       if (!flg) break;

706:       for (i=0; i<n;) {
707:         /* Now identify the consecutive vals belonging to the same row */
708:         for (j=i,rstart=row[j]; j<n; j++) { if (row[j] != rstart) break; }
709:         if (j < n) ncols = j-i;
710:         else       ncols = n-i;
711:         /* Now assemble all these values with a single function call */
712:         MatSetValues_MPISBAIJ_MatScalar(mat,1,row+i,ncols,col+i,val+i,addv);
713:         i = j;
714:       }
715:     }
716:     MatStashScatterEnd_Private(&mat->stash);
717:     /* Now process the block-stash. Since the values are stashed column-oriented,
718:        set the roworiented flag to column oriented, and after MatSetValues() 
719:        restore the original flags */
720:     r1 = baij->roworiented;
721:     r2 = a->roworiented;
722:     r3 = b->roworiented;
723:     baij->roworiented = PETSC_FALSE;
724:     a->roworiented    = PETSC_FALSE;
725:     b->roworiented    = PETSC_FALSE;
726:     while (1) {
727:       MatStashScatterGetMesg_Private(&mat->bstash,&n,&row,&col,&val,&flg);
728:       if (!flg) break;
729: 
730:       for (i=0; i<n;) {
731:         /* Now identify the consecutive vals belonging to the same row */
732:         for (j=i,rstart=row[j]; j<n; j++) { if (row[j] != rstart) break; }
733:         if (j < n) ncols = j-i;
734:         else       ncols = n-i;
735:         MatSetValuesBlocked_MPISBAIJ_MatScalar(mat,1,row+i,ncols,col+i,val+i*bs2,addv);
736:         i = j;
737:       }
738:     }
739:     MatStashScatterEnd_Private(&mat->bstash);
740:     baij->roworiented = r1;
741:     a->roworiented    = r2;
742:     b->roworiented    = r3;
743:   }

745:   MatAssemblyBegin(baij->A,mode);
746:   MatAssemblyEnd(baij->A,mode);

748:   /* determine if any processor has disassembled, if so we must 
749:      also disassemble ourselfs, in order that we may reassemble. */
750:   /*
751:      if nonzero structure of submatrix B cannot change then we know that
752:      no processor disassembled thus we can skip this stuff
753:   */
754:   if (!((Mat_SeqBAIJ*)baij->B->data)->nonew)  {
755:     MPI_Allreduce(&mat->was_assembled,&other_disassembled,1,MPI_INT,MPI_PROD,mat->comm);
756:     if (mat->was_assembled && !other_disassembled) {
757:       DisAssemble_MPISBAIJ(mat);
758:     }
759:   }

761:   if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) {
762:     MatSetUpMultiply_MPISBAIJ(mat); /* setup Mvctx and sMvctx */
763:   }
764:   MatAssemblyBegin(baij->B,mode);
765:   MatAssemblyEnd(baij->B,mode);
766: 
767: #if defined(PETSC_USE_BOPT_g)
768:   if (baij->ht && mode== MAT_FINAL_ASSEMBLY) {
769:     PetscLogInfo(0,"MatAssemblyEnd_MPISBAIJ:Average Hash Table Search in MatSetValues = %5.2f\n",((PetscReal)baij->ht_total_ct)/baij->ht_insert_ct);
770:     baij->ht_total_ct  = 0;
771:     baij->ht_insert_ct = 0;
772:   }
773: #endif
774:   if (baij->ht_flag && !baij->ht && mode == MAT_FINAL_ASSEMBLY) {
775:     MatCreateHashTable_MPISBAIJ_Private(mat,baij->ht_fact);
776:     mat->ops->setvalues        = MatSetValues_MPISBAIJ_HT;
777:     mat->ops->setvaluesblocked = MatSetValuesBlocked_MPISBAIJ_HT;
778:   }

780:   if (baij->rowvalues) {
781:     PetscFree(baij->rowvalues);
782:     baij->rowvalues = 0;
783:   }

785:   return(0);
786: }

790: static int MatView_MPISBAIJ_ASCIIorDraworSocket(Mat mat,PetscViewer viewer)
791: {
792:   Mat_MPISBAIJ      *baij = (Mat_MPISBAIJ*)mat->data;
793:   int               ierr,bs = baij->bs,size = baij->size,rank = baij->rank;
794:   PetscTruth        isascii,isdraw;
795:   PetscViewer       sviewer;
796:   PetscViewerFormat format;

799:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&isascii);
800:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_DRAW,&isdraw);
801:   if (isascii) {
802:     PetscViewerGetFormat(viewer,&format);
803:     if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
804:       MatInfo info;
805:       MPI_Comm_rank(mat->comm,&rank);
806:       MatGetInfo(mat,MAT_LOCAL,&info);
807:       PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %d nz %d nz alloced %d bs %d mem %d\n",
808:               rank,mat->m,(int)info.nz_used*bs,(int)info.nz_allocated*bs,
809:               baij->bs,(int)info.memory);
810:       MatGetInfo(baij->A,MAT_LOCAL,&info);
811:       PetscViewerASCIISynchronizedPrintf(viewer,"[%d] on-diagonal part: nz %d \n",rank,(int)info.nz_used*bs);
812:       MatGetInfo(baij->B,MAT_LOCAL,&info);
813:       PetscViewerASCIISynchronizedPrintf(viewer,"[%d] off-diagonal part: nz %d \n",rank,(int)info.nz_used*bs);
814:       PetscViewerFlush(viewer);
815:       VecScatterView(baij->Mvctx,viewer);
816:       return(0);
817:     } else if (format == PETSC_VIEWER_ASCII_INFO) {
818:       PetscViewerASCIIPrintf(viewer,"  block size is %d\n",bs);
819:       return(0);
820:     }
821:   }

823:   if (isdraw) {
824:     PetscDraw       draw;
825:     PetscTruth isnull;
826:     PetscViewerDrawGetDraw(viewer,0,&draw);
827:     PetscDrawIsNull(draw,&isnull); if (isnull) return(0);
828:   }

830:   if (size == 1) {
831:     PetscObjectSetName((PetscObject)baij->A,mat->name);
832:     MatView(baij->A,viewer);
833:   } else {
834:     /* assemble the entire matrix onto first processor. */
835:     Mat         A;
836:     Mat_SeqSBAIJ *Aloc;
837:     Mat_SeqBAIJ *Bloc;
838:     int         M = mat->M,N = mat->N,*ai,*aj,col,i,j,k,*rvals,mbs = baij->mbs;
839:     MatScalar   *a;

841:     if (!rank) {
842:       MatCreateMPISBAIJ(mat->comm,baij->bs,M,N,M,N,0,PETSC_NULL,0,PETSC_NULL,&A);
843:     } else {
844:       MatCreateMPISBAIJ(mat->comm,baij->bs,0,0,M,N,0,PETSC_NULL,0,PETSC_NULL,&A);
845:     }
846:     PetscLogObjectParent(mat,A);

848:     /* copy over the A part */
849:     Aloc  = (Mat_SeqSBAIJ*)baij->A->data;
850:     ai    = Aloc->i; aj = Aloc->j; a = Aloc->a;
851:     PetscMalloc(bs*sizeof(int),&rvals);

853:     for (i=0; i<mbs; i++) {
854:       rvals[0] = bs*(baij->rstart + i);
855:       for (j=1; j<bs; j++) { rvals[j] = rvals[j-1] + 1; }
856:       for (j=ai[i]; j<ai[i+1]; j++) {
857:         col = (baij->cstart+aj[j])*bs;
858:         for (k=0; k<bs; k++) {
859:           MatSetValues_MPISBAIJ_MatScalar(A,bs,rvals,1,&col,a,INSERT_VALUES);
860:           col++; a += bs;
861:         }
862:       }
863:     }
864:     /* copy over the B part */
865:     Bloc = (Mat_SeqBAIJ*)baij->B->data;
866:     ai = Bloc->i; aj = Bloc->j; a = Bloc->a;
867:     for (i=0; i<mbs; i++) {
868:       rvals[0] = bs*(baij->rstart + i);
869:       for (j=1; j<bs; j++) { rvals[j] = rvals[j-1] + 1; }
870:       for (j=ai[i]; j<ai[i+1]; j++) {
871:         col = baij->garray[aj[j]]*bs;
872:         for (k=0; k<bs; k++) {
873:           MatSetValues_MPISBAIJ_MatScalar(A,bs,rvals,1,&col,a,INSERT_VALUES);
874:           col++; a += bs;
875:         }
876:       }
877:     }
878:     PetscFree(rvals);
879:     MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
880:     MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
881:     /* 
882:        Everyone has to call to draw the matrix since the graphics waits are
883:        synchronized across all processors that share the PetscDraw object
884:     */
885:     PetscViewerGetSingleton(viewer,&sviewer);
886:     if (!rank) {
887:       PetscObjectSetName((PetscObject)((Mat_MPISBAIJ*)(A->data))->A,mat->name);
888:       MatView(((Mat_MPISBAIJ*)(A->data))->A,sviewer);
889:     }
890:     PetscViewerRestoreSingleton(viewer,&sviewer);
891:     MatDestroy(A);
892:   }
893:   return(0);
894: }

898: int MatView_MPISBAIJ(Mat mat,PetscViewer viewer)
899: {
900:   int        ierr;
901:   PetscTruth isascii,isdraw,issocket,isbinary;

904:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&isascii);
905:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_DRAW,&isdraw);
906:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_SOCKET,&issocket);
907:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_BINARY,&isbinary);
908:   if (isascii || isdraw || issocket || isbinary) {
909:     MatView_MPISBAIJ_ASCIIorDraworSocket(mat,viewer);
910:   } else {
911:     SETERRQ1(1,"Viewer type %s not supported by MPISBAIJ matrices",((PetscObject)viewer)->type_name);
912:   }
913:   return(0);
914: }

918: int MatDestroy_MPISBAIJ(Mat mat)
919: {
920:   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
921:   int         ierr;

924: #if defined(PETSC_USE_LOG)
925:   PetscLogObjectState((PetscObject)mat,"Rows=%d,Cols=%d",mat->M,mat->N);
926: #endif
927:   MatStashDestroy_Private(&mat->stash);
928:   MatStashDestroy_Private(&mat->bstash);
929:   PetscFree(baij->rowners);
930:   MatDestroy(baij->A);
931:   MatDestroy(baij->B);
932: #if defined (PETSC_USE_CTABLE)
933:   if (baij->colmap) {PetscTableDelete(baij->colmap);}
934: #else
935:   if (baij->colmap) {PetscFree(baij->colmap);}
936: #endif
937:   if (baij->garray) {PetscFree(baij->garray);}
938:   if (baij->lvec)   {VecDestroy(baij->lvec);}
939:   if (baij->Mvctx)  {VecScatterDestroy(baij->Mvctx);}
940:   if (baij->slvec0) {
941:     VecDestroy(baij->slvec0);
942:     VecDestroy(baij->slvec0b);
943:   }
944:   if (baij->slvec1) {
945:     VecDestroy(baij->slvec1);
946:     VecDestroy(baij->slvec1a);
947:     VecDestroy(baij->slvec1b);
948:   }
949:   if (baij->sMvctx)  {VecScatterDestroy(baij->sMvctx);}
950:   if (baij->rowvalues) {PetscFree(baij->rowvalues);}
951:   if (baij->barray) {PetscFree(baij->barray);}
952:   if (baij->hd) {PetscFree(baij->hd);}
953: #if defined(PETSC_USE_MAT_SINGLE)
954:   if (baij->setvaluescopy) {PetscFree(baij->setvaluescopy);}
955: #endif
956:   PetscFree(baij);
957:   return(0);
958: }

962: int MatMult_MPISBAIJ(Mat A,Vec xx,Vec yy)
963: {
964:   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
965:   int         ierr,nt,mbs=a->mbs,bs=a->bs;
966:   PetscScalar *x,*from,zero=0.0;
967: 
969:   /*
970:   PetscSynchronizedPrintf(A->comm," _1comm is called ...\n");
971:   PetscSynchronizedFlush(A->comm);
972:   */
973:   VecGetLocalSize(xx,&nt);
974:   if (nt != A->n) {
975:     SETERRQ(PETSC_ERR_ARG_SIZ,"Incompatible partition of A and xx");
976:   }
977:   VecGetLocalSize(yy,&nt);
978:   if (nt != A->m) {
979:     SETERRQ(PETSC_ERR_ARG_SIZ,"Incompatible parition of A and yy");
980:   }

982:   /* diagonal part */
983:   (*a->A->ops->mult)(a->A,xx,a->slvec1a);
984:   VecSet(&zero,a->slvec1b);

986:   /* subdiagonal part */
987:   (*a->B->ops->multtranspose)(a->B,xx,a->slvec0b);

989:   /* copy x into the vec slvec0 */
990:   VecGetArray(a->slvec0,&from);
991:   VecGetArray(xx,&x);
992:   PetscMemcpy(from,x,bs*mbs*sizeof(MatScalar));
993:   VecRestoreArray(a->slvec0,&from);
994: 
995:   VecScatterBegin(a->slvec0,a->slvec1,ADD_VALUES,SCATTER_FORWARD,a->sMvctx);
996:   VecRestoreArray(xx,&x);
997:   VecScatterEnd(a->slvec0,a->slvec1,ADD_VALUES,SCATTER_FORWARD,a->sMvctx);
998: 
999:   /* supperdiagonal part */
1000:   (*a->B->ops->multadd)(a->B,a->slvec1b,a->slvec1a,yy);
1001: 
1002:   return(0);
1003: }

1007: int MatMult_MPISBAIJ_2comm(Mat A,Vec xx,Vec yy)
1008: {
1009:   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
1010:   int         ierr,nt;

1013:   VecGetLocalSize(xx,&nt);
1014:   if (nt != A->n) {
1015:     SETERRQ(PETSC_ERR_ARG_SIZ,"Incompatible partition of A and xx");
1016:   }
1017:   VecGetLocalSize(yy,&nt);
1018:   if (nt != A->m) {
1019:     SETERRQ(PETSC_ERR_ARG_SIZ,"Incompatible parition of A and yy");
1020:   }

1022:   VecScatterBegin(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);
1023:   /* do diagonal part */
1024:   (*a->A->ops->mult)(a->A,xx,yy);
1025:   /* do supperdiagonal part */
1026:   VecScatterEnd(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);
1027:   (*a->B->ops->multadd)(a->B,a->lvec,yy,yy);
1028:   /* do subdiagonal part */
1029:   (*a->B->ops->multtranspose)(a->B,xx,a->lvec);
1030:   VecScatterBegin(a->lvec,yy,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);
1031:   VecScatterEnd(a->lvec,yy,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);

1033:   return(0);
1034: }

1038: int MatMultAdd_MPISBAIJ(Mat A,Vec xx,Vec yy,Vec zz)
1039: {
1040:   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
1041:   int          ierr,mbs=a->mbs,bs=a->bs;
1042:   PetscScalar  *x,*from,zero=0.0;
1043: 
1045:   /*
1046:   PetscSynchronizedPrintf(A->comm," MatMultAdd is called ...\n");
1047:   PetscSynchronizedFlush(A->comm);
1048:   */
1049:   /* diagonal part */
1050:   (*a->A->ops->multadd)(a->A,xx,yy,a->slvec1a);
1051:   VecSet(&zero,a->slvec1b);

1053:   /* subdiagonal part */
1054:   (*a->B->ops->multtranspose)(a->B,xx,a->slvec0b);

1056:   /* copy x into the vec slvec0 */
1057:   VecGetArray(a->slvec0,&from);
1058:   VecGetArray(xx,&x);
1059:   PetscMemcpy(from,x,bs*mbs*sizeof(MatScalar));
1060:   VecRestoreArray(a->slvec0,&from);
1061: 
1062:   VecScatterBegin(a->slvec0,a->slvec1,ADD_VALUES,SCATTER_FORWARD,a->sMvctx);
1063:   VecRestoreArray(xx,&x);
1064:   VecScatterEnd(a->slvec0,a->slvec1,ADD_VALUES,SCATTER_FORWARD,a->sMvctx);
1065: 
1066:   /* supperdiagonal part */
1067:   (*a->B->ops->multadd)(a->B,a->slvec1b,a->slvec1a,zz);
1068: 
1069:   return(0);
1070: }

1074: int MatMultAdd_MPISBAIJ_2comm(Mat A,Vec xx,Vec yy,Vec zz)
1075: {
1076:   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
1077:   int        ierr;

1080:   VecScatterBegin(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);
1081:   /* do diagonal part */
1082:   (*a->A->ops->multadd)(a->A,xx,yy,zz);
1083:   /* do supperdiagonal part */
1084:   VecScatterEnd(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);
1085:   (*a->B->ops->multadd)(a->B,a->lvec,zz,zz);

1087:   /* do subdiagonal part */
1088:   (*a->B->ops->multtranspose)(a->B,xx,a->lvec);
1089:   VecScatterBegin(a->lvec,zz,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);
1090:   VecScatterEnd(a->lvec,zz,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);

1092:   return(0);
1093: }

1097: int MatMultTranspose_MPISBAIJ(Mat A,Vec xx,Vec yy)
1098: {
1100:   SETERRQ(1,"Matrix is symmetric. Call MatMult().");
1101:   /* return(0); */
1102: }

1106: int MatMultTransposeAdd_MPISBAIJ(Mat A,Vec xx,Vec yy,Vec zz)
1107: {
1109:   SETERRQ(1,"Matrix is symmetric. Call MatMultAdd().");
1110:   /* return(0); */
1111: }

1113: /*
1114:   This only works correctly for square matrices where the subblock A->A is the 
1115:    diagonal block
1116: */
1119: int MatGetDiagonal_MPISBAIJ(Mat A,Vec v)
1120: {
1121:   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
1122:   int         ierr;

1125:   /* if (a->M != a->N) SETERRQ(PETSC_ERR_SUP,"Supports only square matrix where A->A is diag block"); */
1126:   MatGetDiagonal(a->A,v);
1127:   return(0);
1128: }

1132: int MatScale_MPISBAIJ(const PetscScalar *aa,Mat A)
1133: {
1134:   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
1135:   int         ierr;

1138:   MatScale(aa,a->A);
1139:   MatScale(aa,a->B);
1140:   return(0);
1141: }

1145: int MatGetRow_MPISBAIJ(Mat matin,int row,int *nz,int **idx,PetscScalar **v)
1146: {
1147:   Mat_MPISBAIJ   *mat = (Mat_MPISBAIJ*)matin->data;
1148:   PetscScalar    *vworkA,*vworkB,**pvA,**pvB,*v_p;
1149:   int            bs = mat->bs,bs2 = mat->bs2,i,ierr,*cworkA,*cworkB,**pcA,**pcB;
1150:   int            nztot,nzA,nzB,lrow,brstart = mat->rstart*bs,brend = mat->rend*bs;
1151:   int            *cmap,*idx_p,cstart = mat->cstart;

1154:   if (mat->getrowactive == PETSC_TRUE) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Already active");
1155:   mat->getrowactive = PETSC_TRUE;

1157:   if (!mat->rowvalues && (idx || v)) {
1158:     /*
1159:         allocate enough space to hold information from the longest row.
1160:     */
1161:     Mat_SeqSBAIJ *Aa = (Mat_SeqSBAIJ*)mat->A->data;
1162:     Mat_SeqBAIJ  *Ba = (Mat_SeqBAIJ*)mat->B->data;
1163:     int     max = 1,mbs = mat->mbs,tmp;
1164:     for (i=0; i<mbs; i++) {
1165:       tmp = Aa->i[i+1] - Aa->i[i] + Ba->i[i+1] - Ba->i[i]; /* row length */
1166:       if (max < tmp) { max = tmp; }
1167:     }
1168:     PetscMalloc(max*bs2*(sizeof(int)+sizeof(PetscScalar)),&mat->rowvalues);
1169:     mat->rowindices = (int*)(mat->rowvalues + max*bs2);
1170:   }
1171: 
1172:   if (row < brstart || row >= brend) SETERRQ(PETSC_ERR_SUP,"Only local rows")
1173:   lrow = row - brstart;  /* local row index */

1175:   pvA = &vworkA; pcA = &cworkA; pvB = &vworkB; pcB = &cworkB;
1176:   if (!v)   {pvA = 0; pvB = 0;}
1177:   if (!idx) {pcA = 0; if (!v) pcB = 0;}
1178:   (*mat->A->ops->getrow)(mat->A,lrow,&nzA,pcA,pvA);
1179:   (*mat->B->ops->getrow)(mat->B,lrow,&nzB,pcB,pvB);
1180:   nztot = nzA + nzB;

1182:   cmap  = mat->garray;
1183:   if (v  || idx) {
1184:     if (nztot) {
1185:       /* Sort by increasing column numbers, assuming A and B already sorted */
1186:       int imark = -1;
1187:       if (v) {
1188:         *v = v_p = mat->rowvalues;
1189:         for (i=0; i<nzB; i++) {
1190:           if (cmap[cworkB[i]/bs] < cstart)   v_p[i] = vworkB[i];
1191:           else break;
1192:         }
1193:         imark = i;
1194:         for (i=0; i<nzA; i++)     v_p[imark+i] = vworkA[i];
1195:         for (i=imark; i<nzB; i++) v_p[nzA+i]   = vworkB[i];
1196:       }
1197:       if (idx) {
1198:         *idx = idx_p = mat->rowindices;
1199:         if (imark > -1) {
1200:           for (i=0; i<imark; i++) {
1201:             idx_p[i] = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs;
1202:           }
1203:         } else {
1204:           for (i=0; i<nzB; i++) {
1205:             if (cmap[cworkB[i]/bs] < cstart)
1206:               idx_p[i] = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs ;
1207:             else break;
1208:           }
1209:           imark = i;
1210:         }
1211:         for (i=0; i<nzA; i++)     idx_p[imark+i] = cstart*bs + cworkA[i];
1212:         for (i=imark; i<nzB; i++) idx_p[nzA+i]   = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs ;
1213:       }
1214:     } else {
1215:       if (idx) *idx = 0;
1216:       if (v)   *v   = 0;
1217:     }
1218:   }
1219:   *nz = nztot;
1220:   (*mat->A->ops->restorerow)(mat->A,lrow,&nzA,pcA,pvA);
1221:   (*mat->B->ops->restorerow)(mat->B,lrow,&nzB,pcB,pvB);
1222:   return(0);
1223: }

1227: int MatRestoreRow_MPISBAIJ(Mat mat,int row,int *nz,int **idx,PetscScalar **v)
1228: {
1229:   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;

1232:   if (baij->getrowactive == PETSC_FALSE) {
1233:     SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"MatGetRow not called");
1234:   }
1235:   baij->getrowactive = PETSC_FALSE;
1236:   return(0);
1237: }

1241: int MatGetBlockSize_MPISBAIJ(Mat mat,int *bs)
1242: {
1243:   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;

1246:   *bs = baij->bs;
1247:   return(0);
1248: }

1252: int MatZeroEntries_MPISBAIJ(Mat A)
1253: {
1254:   Mat_MPISBAIJ *l = (Mat_MPISBAIJ*)A->data;
1255:   int         ierr;

1258:   MatZeroEntries(l->A);
1259:   MatZeroEntries(l->B);
1260:   return(0);
1261: }

1265: int MatGetInfo_MPISBAIJ(Mat matin,MatInfoType flag,MatInfo *info)
1266: {
1267:   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)matin->data;
1268:   Mat         A = a->A,B = a->B;
1269:   int         ierr;
1270:   PetscReal   isend[5],irecv[5];

1273:   info->block_size     = (PetscReal)a->bs;
1274:   MatGetInfo(A,MAT_LOCAL,info);
1275:   isend[0] = info->nz_used; isend[1] = info->nz_allocated; isend[2] = info->nz_unneeded;
1276:   isend[3] = info->memory;  isend[4] = info->mallocs;
1277:   MatGetInfo(B,MAT_LOCAL,info);
1278:   isend[0] += info->nz_used; isend[1] += info->nz_allocated; isend[2] += info->nz_unneeded;
1279:   isend[3] += info->memory;  isend[4] += info->mallocs;
1280:   if (flag == MAT_LOCAL) {
1281:     info->nz_used      = isend[0];
1282:     info->nz_allocated = isend[1];
1283:     info->nz_unneeded  = isend[2];
1284:     info->memory       = isend[3];
1285:     info->mallocs      = isend[4];
1286:   } else if (flag == MAT_GLOBAL_MAX) {
1287:     MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPI_MAX,matin->comm);
1288:     info->nz_used      = irecv[0];
1289:     info->nz_allocated = irecv[1];
1290:     info->nz_unneeded  = irecv[2];
1291:     info->memory       = irecv[3];
1292:     info->mallocs      = irecv[4];
1293:   } else if (flag == MAT_GLOBAL_SUM) {
1294:     MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPI_SUM,matin->comm);
1295:     info->nz_used      = irecv[0];
1296:     info->nz_allocated = irecv[1];
1297:     info->nz_unneeded  = irecv[2];
1298:     info->memory       = irecv[3];
1299:     info->mallocs      = irecv[4];
1300:   } else {
1301:     SETERRQ1(1,"Unknown MatInfoType argument %d",flag);
1302:   }
1303:   info->rows_global       = (PetscReal)A->M;
1304:   info->columns_global    = (PetscReal)A->N;
1305:   info->rows_local        = (PetscReal)A->m;
1306:   info->columns_local     = (PetscReal)A->N;
1307:   info->fill_ratio_given  = 0; /* no parallel LU/ILU/Cholesky */
1308:   info->fill_ratio_needed = 0;
1309:   info->factor_mallocs    = 0;
1310:   return(0);
1311: }

1315: int MatSetOption_MPISBAIJ(Mat A,MatOption op)
1316: {
1317:   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
1318:   int         ierr;

1321:   switch (op) {
1322:   case MAT_NO_NEW_NONZERO_LOCATIONS:
1323:   case MAT_YES_NEW_NONZERO_LOCATIONS:
1324:   case MAT_COLUMNS_UNSORTED:
1325:   case MAT_COLUMNS_SORTED:
1326:   case MAT_NEW_NONZERO_ALLOCATION_ERR:
1327:   case MAT_KEEP_ZEROED_ROWS:
1328:   case MAT_NEW_NONZERO_LOCATION_ERR:
1329:     MatSetOption(a->A,op);
1330:     MatSetOption(a->B,op);
1331:     break;
1332:   case MAT_ROW_ORIENTED:
1333:     a->roworiented = PETSC_TRUE;
1334:     MatSetOption(a->A,op);
1335:     MatSetOption(a->B,op);
1336:     break;
1337:   case MAT_ROWS_SORTED:
1338:   case MAT_ROWS_UNSORTED:
1339:   case MAT_YES_NEW_DIAGONALS:
1340:     PetscLogInfo(A,"Info:MatSetOption_MPIBAIJ:Option ignored\n");
1341:     break;
1342:   case MAT_COLUMN_ORIENTED:
1343:     a->roworiented = PETSC_FALSE;
1344:     MatSetOption(a->A,op);
1345:     MatSetOption(a->B,op);
1346:     break;
1347:   case MAT_IGNORE_OFF_PROC_ENTRIES:
1348:     a->donotstash = PETSC_TRUE;
1349:     break;
1350:   case MAT_NO_NEW_DIAGONALS:
1351:     SETERRQ(PETSC_ERR_SUP,"MAT_NO_NEW_DIAGONALS");
1352:   case MAT_USE_HASH_TABLE:
1353:     a->ht_flag = PETSC_TRUE;
1354:     break;
1355:   default:
1356:     SETERRQ(PETSC_ERR_SUP,"unknown option");
1357:   }
1358:   return(0);
1359: }

1363: int MatTranspose_MPISBAIJ(Mat A,Mat *B)
1364: {
1367:   MatDuplicate(A,MAT_COPY_VALUES,B);
1368:   return(0);
1369: }

1373: int MatDiagonalScale_MPISBAIJ(Mat mat,Vec ll,Vec rr)
1374: {
1375:   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
1376:   Mat         a = baij->A,b = baij->B;
1377:   int         ierr,s1,s2,s3;

1380:   if (ll != rr) {
1381:     SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"For symmetric format, left and right scaling vectors must be same\n");
1382:   }
1383:   MatGetLocalSize(mat,&s2,&s3);
1384:   if (rr) {
1385:     VecGetLocalSize(rr,&s1);
1386:     if (s1!=s3) SETERRQ(PETSC_ERR_ARG_SIZ,"right vector non-conforming local size");
1387:     /* Overlap communication with computation. */
1388:     VecScatterBegin(rr,baij->lvec,INSERT_VALUES,SCATTER_FORWARD,baij->Mvctx);
1389:     /*} if (ll) { */
1390:     VecGetLocalSize(ll,&s1);
1391:     if (s1!=s2) SETERRQ(PETSC_ERR_ARG_SIZ,"left vector non-conforming local size");
1392:     (*b->ops->diagonalscale)(b,ll,PETSC_NULL);
1393:     /* } */
1394:   /* scale  the diagonal block */
1395:   (*a->ops->diagonalscale)(a,ll,rr);

1397:   /* if (rr) { */
1398:     /* Do a scatter end and then right scale the off-diagonal block */
1399:     VecScatterEnd(rr,baij->lvec,INSERT_VALUES,SCATTER_FORWARD,baij->Mvctx);
1400:     (*b->ops->diagonalscale)(b,PETSC_NULL,baij->lvec);
1401:   }
1402: 
1403:   return(0);
1404: }

1408: int MatZeroRows_MPISBAIJ(Mat A,IS is,const PetscScalar *diag)
1409: {
1411:   SETERRQ(PETSC_ERR_SUP,"No support for this function yet");
1412: }

1416: int MatPrintHelp_MPISBAIJ(Mat A)
1417: {
1418:   Mat_MPISBAIJ *a   = (Mat_MPISBAIJ*)A->data;
1419:   MPI_Comm    comm = A->comm;
1420:   static int  called = 0;
1421:   int         ierr;

1424:   if (!a->rank) {
1425:     MatPrintHelp_SeqSBAIJ(a->A);
1426:   }
1427:   if (called) {return(0);} else called = 1;
1428:   (*PetscHelpPrintf)(comm," Options for MATMPISBAIJ matrix format (the defaults):\n");
1429:   (*PetscHelpPrintf)(comm,"  -mat_use_hash_table <factor>: Use hashtable for efficient matrix assembly\n");
1430:   return(0);
1431: }

1435: int MatSetUnfactored_MPISBAIJ(Mat A)
1436: {
1437:   Mat_MPISBAIJ *a   = (Mat_MPISBAIJ*)A->data;
1438:   int         ierr;

1441:   MatSetUnfactored(a->A);
1442:   return(0);
1443: }

1445: static int MatDuplicate_MPISBAIJ(Mat,MatDuplicateOption,Mat *);

1449: int MatEqual_MPISBAIJ(Mat A,Mat B,PetscTruth *flag)
1450: {
1451:   Mat_MPISBAIJ *matB = (Mat_MPISBAIJ*)B->data,*matA = (Mat_MPISBAIJ*)A->data;
1452:   Mat         a,b,c,d;
1453:   PetscTruth  flg;
1454:   int         ierr;

1457:   a = matA->A; b = matA->B;
1458:   c = matB->A; d = matB->B;

1460:   MatEqual(a,c,&flg);
1461:   if (flg == PETSC_TRUE) {
1462:     MatEqual(b,d,&flg);
1463:   }
1464:   MPI_Allreduce(&flg,flag,1,MPI_INT,MPI_LAND,A->comm);
1465:   return(0);
1466: }

1470: int MatSetUpPreallocation_MPISBAIJ(Mat A)
1471: {
1472:   int        ierr;

1475:   MatMPISBAIJSetPreallocation(A,1,PETSC_DEFAULT,0,PETSC_DEFAULT,0);
1476:   return(0);
1477: }
1478: /* -------------------------------------------------------------------*/
1479: static struct _MatOps MatOps_Values = {
1480:        MatSetValues_MPISBAIJ,
1481:        MatGetRow_MPISBAIJ,
1482:        MatRestoreRow_MPISBAIJ,
1483:        MatMult_MPISBAIJ,
1484: /* 4*/ MatMultAdd_MPISBAIJ,
1485:        MatMultTranspose_MPISBAIJ,
1486:        MatMultTransposeAdd_MPISBAIJ,
1487:        0,
1488:        0,
1489:        0,
1490: /*10*/ 0,
1491:        0,
1492:        0,
1493:        MatRelax_MPISBAIJ,
1494:        MatTranspose_MPISBAIJ,
1495: /*15*/ MatGetInfo_MPISBAIJ,
1496:        MatEqual_MPISBAIJ,
1497:        MatGetDiagonal_MPISBAIJ,
1498:        MatDiagonalScale_MPISBAIJ,
1499:        MatNorm_MPISBAIJ,
1500: /*20*/ MatAssemblyBegin_MPISBAIJ,
1501:        MatAssemblyEnd_MPISBAIJ,
1502:        0,
1503:        MatSetOption_MPISBAIJ,
1504:        MatZeroEntries_MPISBAIJ,
1505: /*25*/ MatZeroRows_MPISBAIJ,
1506:        0,
1507:        0,
1508:        0,
1509:        0,
1510: /*30*/ MatSetUpPreallocation_MPISBAIJ,
1511:        0,
1512:        0,
1513:        0,
1514:        0,
1515: /*35*/ MatDuplicate_MPISBAIJ,
1516:        0,
1517:        0,
1518:        0,
1519:        0,
1520: /*40*/ 0,
1521:        0,
1522:        0,
1523:        MatGetValues_MPISBAIJ,
1524:        0,
1525: /*45*/ MatPrintHelp_MPISBAIJ,
1526:        MatScale_MPISBAIJ,
1527:        0,
1528:        0,
1529:        0,
1530: /*50*/ MatGetBlockSize_MPISBAIJ,
1531:        0,
1532:        0,
1533:        0,
1534:        0,
1535: /*55*/ 0,
1536:        0,
1537:        MatSetUnfactored_MPISBAIJ,
1538:        0,
1539:        MatSetValuesBlocked_MPISBAIJ,
1540: /*60*/ 0,
1541:        0,
1542:        0,
1543:        MatGetPetscMaps_Petsc,
1544:        0,
1545: /*65*/ 0,
1546:        0,
1547:        0,
1548:        0,
1549:        0,
1550: /*70*/ MatGetRowMax_MPISBAIJ,
1551:        0,
1552:        0,
1553:        0,
1554:        0,
1555: /*75*/ 0,
1556:        0,
1557:        0,
1558:        0,
1559:        0,
1560: /*80*/ 0,
1561:        0,
1562:        0,
1563:        0,
1564:        0,
1565: /*85*/ MatLoad_MPISBAIJ
1566: };


1569: EXTERN_C_BEGIN
1572: int MatGetDiagonalBlock_MPISBAIJ(Mat A,PetscTruth *iscopy,MatReuse reuse,Mat *a)
1573: {
1575:   *a      = ((Mat_MPISBAIJ *)A->data)->A;
1576:   *iscopy = PETSC_FALSE;
1577:   return(0);
1578: }
1579: EXTERN_C_END

1581: EXTERN_C_BEGIN
1584: int MatMPISBAIJSetPreallocation_MPISBAIJ(Mat B,int bs,int d_nz,int *d_nnz,int o_nz,int *o_nnz)
1585: {
1586:   Mat_MPISBAIJ *b;
1587:   int          ierr,i,mbs,Mbs;

1590:   PetscOptionsGetInt(B->prefix,"-mat_block_size",&bs,PETSC_NULL);

1592:   if (bs < 1) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Invalid block size specified, must be positive");
1593:   if (d_nz == PETSC_DECIDE || d_nz == PETSC_DEFAULT) d_nz = 3;
1594:   if (o_nz == PETSC_DECIDE || o_nz == PETSC_DEFAULT) o_nz = 1;
1595:   if (d_nz < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"d_nz cannot be less than 0: value %d",d_nz);
1596:   if (o_nz < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"o_nz cannot be less than 0: value %d",o_nz);
1597:   if (d_nnz) {
1598:     for (i=0; i<B->m/bs; i++) {
1599:       if (d_nnz[i] < 0) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"d_nnz cannot be less than -1: local row %d value %d",i,d_nnz[i]);
1600:     }
1601:   }
1602:   if (o_nnz) {
1603:     for (i=0; i<B->m/bs; i++) {
1604:       if (o_nnz[i] < 0) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"o_nnz cannot be less than -1: local row %d value %d",i,o_nnz[i]);
1605:     }
1606:   }
1607:   B->preallocated = PETSC_TRUE;
1608:   PetscSplitOwnershipBlock(B->comm,bs,&B->m,&B->M);
1609:   PetscSplitOwnershipBlock(B->comm,bs,&B->n,&B->N);
1610:   PetscMapCreateMPI(B->comm,B->m,B->M,&B->rmap);
1611:   PetscMapCreateMPI(B->comm,B->m,B->M,&B->cmap);

1613:   b   = (Mat_MPISBAIJ*)B->data;
1614:   mbs = B->m/bs;
1615:   Mbs = B->M/bs;
1616:   if (mbs*bs != B->m) {
1617:     SETERRQ2(PETSC_ERR_ARG_SIZ,"No of local rows %d must be divisible by blocksize %d",B->m,bs);
1618:   }

1620:   b->bs  = bs;
1621:   b->bs2 = bs*bs;
1622:   b->mbs = mbs;
1623:   b->nbs = mbs;
1624:   b->Mbs = Mbs;
1625:   b->Nbs = Mbs;

1627:   MPI_Allgather(&b->mbs,1,MPI_INT,b->rowners+1,1,MPI_INT,B->comm);
1628:   b->rowners[0]    = 0;
1629:   for (i=2; i<=b->size; i++) {
1630:     b->rowners[i] += b->rowners[i-1];
1631:   }
1632:   b->rstart    = b->rowners[b->rank];
1633:   b->rend      = b->rowners[b->rank+1];
1634:   b->cstart    = b->rstart;
1635:   b->cend      = b->rend;
1636:   for (i=0; i<=b->size; i++) {
1637:     b->rowners_bs[i] = b->rowners[i]*bs;
1638:   }
1639:   b->rstart_bs = b-> rstart*bs;
1640:   b->rend_bs   = b->rend*bs;
1641: 
1642:   b->cstart_bs = b->cstart*bs;
1643:   b->cend_bs   = b->cend*bs;
1644: 

1646:   MatCreateSeqSBAIJ(PETSC_COMM_SELF,bs,B->m,B->m,d_nz,d_nnz,&b->A);
1647:   PetscLogObjectParent(B,b->A);
1648:   MatCreateSeqBAIJ(PETSC_COMM_SELF,bs,B->m,B->M,o_nz,o_nnz,&b->B);
1649:   PetscLogObjectParent(B,b->B);

1651:   /* build cache for off array entries formed */
1652:   MatStashCreate_Private(B->comm,bs,&B->bstash);

1654:   return(0);
1655: }
1656: EXTERN_C_END

1658: /*MC
1659:    MATMPISBAIJ - MATMPISBAIJ = "mpisbaij" - A matrix type to be used for distributed symmetric sparse block matrices, 
1660:    based on block compressed sparse row format.  Only the upper triangular portion of the matrix is stored.

1662:    Options Database Keys:
1663: . -mat_type mpisbaij - sets the matrix type to "mpisbaij" during a call to MatSetFromOptions()

1665:   Level: beginner

1667: .seealso: MatCreateMPISBAIJ
1668: M*/

1670: EXTERN_C_BEGIN
1673: int MatCreate_MPISBAIJ(Mat B)
1674: {
1675:   Mat_MPISBAIJ *b;
1676:   int          ierr;
1677:   PetscTruth   flg;


1681:   PetscNew(Mat_MPISBAIJ,&b);
1682:   B->data = (void*)b;
1683:   PetscMemzero(b,sizeof(Mat_MPISBAIJ));
1684:   PetscMemcpy(B->ops,&MatOps_Values,sizeof(struct _MatOps));

1686:   B->ops->destroy    = MatDestroy_MPISBAIJ;
1687:   B->ops->view       = MatView_MPISBAIJ;
1688:   B->mapping    = 0;
1689:   B->factor     = 0;
1690:   B->assembled  = PETSC_FALSE;

1692:   B->insertmode = NOT_SET_VALUES;
1693:   MPI_Comm_rank(B->comm,&b->rank);
1694:   MPI_Comm_size(B->comm,&b->size);

1696:   /* build local table of row and column ownerships */
1697:   PetscMalloc(3*(b->size+2)*sizeof(int),&b->rowners);
1698:   b->cowners    = b->rowners + b->size + 2;
1699:   b->rowners_bs = b->cowners + b->size + 2;
1700:   PetscLogObjectMemory(B,3*(b->size+2)*sizeof(int)+sizeof(struct _p_Mat)+sizeof(Mat_MPISBAIJ));

1702:   /* build cache for off array entries formed */
1703:   MatStashCreate_Private(B->comm,1,&B->stash);
1704:   b->donotstash  = PETSC_FALSE;
1705:   b->colmap      = PETSC_NULL;
1706:   b->garray      = PETSC_NULL;
1707:   b->roworiented = PETSC_TRUE;

1709: #if defined(PETSC_USE_MAT_SINGLE)
1710:   /* stuff for MatSetValues_XXX in single precision */
1711:   b->setvalueslen     = 0;
1712:   b->setvaluescopy    = PETSC_NULL;
1713: #endif

1715:   /* stuff used in block assembly */
1716:   b->barray       = 0;

1718:   /* stuff used for matrix vector multiply */
1719:   b->lvec         = 0;
1720:   b->Mvctx        = 0;
1721:   b->slvec0       = 0;
1722:   b->slvec0b      = 0;
1723:   b->slvec1       = 0;
1724:   b->slvec1a      = 0;
1725:   b->slvec1b      = 0;
1726:   b->sMvctx       = 0;

1728:   /* stuff for MatGetRow() */
1729:   b->rowindices   = 0;
1730:   b->rowvalues    = 0;
1731:   b->getrowactive = PETSC_FALSE;

1733:   /* hash table stuff */
1734:   b->ht           = 0;
1735:   b->hd           = 0;
1736:   b->ht_size      = 0;
1737:   b->ht_flag      = PETSC_FALSE;
1738:   b->ht_fact      = 0;
1739:   b->ht_total_ct  = 0;
1740:   b->ht_insert_ct = 0;

1742:   PetscOptionsHasName(B->prefix,"-mat_use_hash_table",&flg);
1743:   if (flg) {
1744:     PetscReal fact = 1.39;
1745:     MatSetOption(B,MAT_USE_HASH_TABLE);
1746:     PetscOptionsGetReal(B->prefix,"-mat_use_hash_table",&fact,PETSC_NULL);
1747:     if (fact <= 1.0) fact = 1.39;
1748:     MatMPIBAIJSetHashTableFactor(B,fact);
1749:     PetscLogInfo(0,"MatCreateMPISBAIJ:Hash table Factor used %5.2f\n",fact);
1750:   }
1751:   PetscObjectComposeFunctionDynamic((PetscObject)B,"MatStoreValues_C",
1752:                                      "MatStoreValues_MPISBAIJ",
1753:                                      MatStoreValues_MPISBAIJ);
1754:   PetscObjectComposeFunctionDynamic((PetscObject)B,"MatRetrieveValues_C",
1755:                                      "MatRetrieveValues_MPISBAIJ",
1756:                                      MatRetrieveValues_MPISBAIJ);
1757:   PetscObjectComposeFunctionDynamic((PetscObject)B,"MatGetDiagonalBlock_C",
1758:                                      "MatGetDiagonalBlock_MPISBAIJ",
1759:                                      MatGetDiagonalBlock_MPISBAIJ);
1760:   PetscObjectComposeFunctionDynamic((PetscObject)B,"MatMPISBAIJSetPreallocation_C",
1761:                                      "MatMPISBAIJSetPreallocation_MPISBAIJ",
1762:                                      MatMPISBAIJSetPreallocation_MPISBAIJ);
1763:   return(0);
1764: }
1765: EXTERN_C_END

1767: /*MC
1768:    MATSBAIJ - MATSBAIJ = "sbaij" - A matrix type to be used for symmetric block sparse matrices.

1770:    This matrix type is identical to MATSEQSBAIJ when constructed with a single process communicator,
1771:    and MATMPISBAIJ otherwise.

1773:    Options Database Keys:
1774: . -mat_type sbaij - sets the matrix type to "sbaij" during a call to MatSetFromOptions()

1776:   Level: beginner

1778: .seealso: MatCreateMPISBAIJ,MATSEQSBAIJ,MATMPISBAIJ
1779: M*/

1781: EXTERN_C_BEGIN
1784: int MatCreate_SBAIJ(Mat A) {
1785:   int ierr,size;

1788:   PetscObjectChangeTypeName((PetscObject)A,MATSBAIJ);
1789:   MPI_Comm_size(A->comm,&size);
1790:   if (size == 1) {
1791:     MatSetType(A,MATSEQSBAIJ);
1792:   } else {
1793:     MatSetType(A,MATMPISBAIJ);
1794:   }
1795:   return(0);
1796: }
1797: EXTERN_C_END

1801: /*@C
1802:    MatMPISBAIJSetPreallocation - For good matrix assembly performance
1803:    the user should preallocate the matrix storage by setting the parameters 
1804:    d_nz (or d_nnz) and o_nz (or o_nnz).  By setting these parameters accurately,
1805:    performance can be increased by more than a factor of 50.

1807:    Collective on Mat

1809:    Input Parameters:
1810: +  A - the matrix 
1811: .  bs   - size of blockk
1812: .  d_nz  - number of block nonzeros per block row in diagonal portion of local 
1813:            submatrix  (same for all local rows)
1814: .  d_nnz - array containing the number of block nonzeros in the various block rows 
1815:            in the upper triangular and diagonal part of the in diagonal portion of the local
1816:            (possibly different for each block row) or PETSC_NULL.  You must leave room 
1817:            for the diagonal entry even if it is zero.
1818: .  o_nz  - number of block nonzeros per block row in the off-diagonal portion of local
1819:            submatrix (same for all local rows).
1820: -  o_nnz - array containing the number of nonzeros in the various block rows of the
1821:            off-diagonal portion of the local submatrix (possibly different for
1822:            each block row) or PETSC_NULL.


1825:    Options Database Keys:
1826: .   -mat_no_unroll - uses code that does not unroll the loops in the 
1827:                      block calculations (much slower)
1828: .   -mat_block_size - size of the blocks to use

1830:    Notes:

1832:    If PETSC_DECIDE or  PETSC_DETERMINE is used for a particular argument on one processor
1833:    than it must be used on all processors that share the object for that argument.

1835:    Storage Information:
1836:    For a square global matrix we define each processor's diagonal portion 
1837:    to be its local rows and the corresponding columns (a square submatrix);  
1838:    each processor's off-diagonal portion encompasses the remainder of the
1839:    local matrix (a rectangular submatrix). 

1841:    The user can specify preallocated storage for the diagonal part of
1842:    the local submatrix with either d_nz or d_nnz (not both).  Set 
1843:    d_nz=PETSC_DEFAULT and d_nnz=PETSC_NULL for PETSc to control dynamic
1844:    memory allocation.  Likewise, specify preallocated storage for the
1845:    off-diagonal part of the local submatrix with o_nz or o_nnz (not both).

1847:    Consider a processor that owns rows 3, 4 and 5 of a parallel matrix. In
1848:    the figure below we depict these three local rows and all columns (0-11).

1850: .vb
1851:            0 1 2 3 4 5 6 7 8 9 10 11
1852:           -------------------
1853:    row 3  |  o o o d d d o o o o o o
1854:    row 4  |  o o o d d d o o o o o o
1855:    row 5  |  o o o d d d o o o o o o
1856:           -------------------
1857: .ve
1858:   
1859:    Thus, any entries in the d locations are stored in the d (diagonal) 
1860:    submatrix, and any entries in the o locations are stored in the
1861:    o (off-diagonal) submatrix.  Note that the d matrix is stored in
1862:    MatSeqSBAIJ format and the o submatrix in MATSEQBAIJ format.

1864:    Now d_nz should indicate the number of block nonzeros per row in the upper triangular
1865:    plus the diagonal part of the d matrix,
1866:    and o_nz should indicate the number of block nonzeros per row in the o matrix.
1867:    In general, for PDE problems in which most nonzeros are near the diagonal,
1868:    one expects d_nz >> o_nz.   For large problems you MUST preallocate memory
1869:    or you will get TERRIBLE performance; see the users' manual chapter on
1870:    matrices.

1872:    Level: intermediate

1874: .keywords: matrix, block, aij, compressed row, sparse, parallel

1876: .seealso: MatCreate(), MatCreateSeqSBAIJ(), MatSetValues(), MatCreateMPIBAIJ()
1877: @*/
1878: int MatMPISBAIJSetPreallocation(Mat B,int bs,int d_nz,const int d_nnz[],int o_nz,const int o_nnz[])
1879: {
1880:   int ierr,(*f)(Mat,int,int,const int[],int,const int[]);

1883:   PetscObjectQueryFunction((PetscObject)B,"MatMPISBAIJSetPreallocation_C",(void (**)(void))&f);
1884:   if (f) {
1885:     (*f)(B,bs,d_nz,d_nnz,o_nz,o_nnz);
1886:   }
1887:   return(0);
1888: }

1892: /*@C
1893:    MatCreateMPISBAIJ - Creates a sparse parallel matrix in symmetric block AIJ format
1894:    (block compressed row).  For good matrix assembly performance
1895:    the user should preallocate the matrix storage by setting the parameters 
1896:    d_nz (or d_nnz) and o_nz (or o_nnz).  By setting these parameters accurately,
1897:    performance can be increased by more than a factor of 50.

1899:    Collective on MPI_Comm

1901:    Input Parameters:
1902: +  comm - MPI communicator
1903: .  bs   - size of blockk
1904: .  m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
1905:            This value should be the same as the local size used in creating the 
1906:            y vector for the matrix-vector product y = Ax.
1907: .  n - number of local columns (or PETSC_DECIDE to have calculated if N is given)
1908:            This value should be the same as the local size used in creating the 
1909:            x vector for the matrix-vector product y = Ax.
1910: .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
1911: .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
1912: .  d_nz  - number of block nonzeros per block row in diagonal portion of local 
1913:            submatrix  (same for all local rows)
1914: .  d_nnz - array containing the number of block nonzeros in the various block rows 
1915:            in the upper triangular portion of the in diagonal portion of the local 
1916:            (possibly different for each block block row) or PETSC_NULL.  
1917:            You must leave room for the diagonal entry even if it is zero.
1918: .  o_nz  - number of block nonzeros per block row in the off-diagonal portion of local
1919:            submatrix (same for all local rows).
1920: -  o_nnz - array containing the number of nonzeros in the various block rows of the
1921:            off-diagonal portion of the local submatrix (possibly different for
1922:            each block row) or PETSC_NULL.

1924:    Output Parameter:
1925: .  A - the matrix 

1927:    Options Database Keys:
1928: .   -mat_no_unroll - uses code that does not unroll the loops in the 
1929:                      block calculations (much slower)
1930: .   -mat_block_size - size of the blocks to use
1931: .   -mat_mpi - use the parallel matrix data structures even on one processor 
1932:                (defaults to using SeqBAIJ format on one processor)

1934:    Notes:
1935:    The user MUST specify either the local or global matrix dimensions
1936:    (possibly both).

1938:    If PETSC_DECIDE or  PETSC_DETERMINE is used for a particular argument on one processor
1939:    than it must be used on all processors that share the object for that argument.

1941:    Storage Information:
1942:    For a square global matrix we define each processor's diagonal portion 
1943:    to be its local rows and the corresponding columns (a square submatrix);  
1944:    each processor's off-diagonal portion encompasses the remainder of the
1945:    local matrix (a rectangular submatrix). 

1947:    The user can specify preallocated storage for the diagonal part of
1948:    the local submatrix with either d_nz or d_nnz (not both).  Set 
1949:    d_nz=PETSC_DEFAULT and d_nnz=PETSC_NULL for PETSc to control dynamic
1950:    memory allocation.  Likewise, specify preallocated storage for the
1951:    off-diagonal part of the local submatrix with o_nz or o_nnz (not both).

1953:    Consider a processor that owns rows 3, 4 and 5 of a parallel matrix. In
1954:    the figure below we depict these three local rows and all columns (0-11).

1956: .vb
1957:            0 1 2 3 4 5 6 7 8 9 10 11
1958:           -------------------
1959:    row 3  |  o o o d d d o o o o o o
1960:    row 4  |  o o o d d d o o o o o o
1961:    row 5  |  o o o d d d o o o o o o
1962:           -------------------
1963: .ve
1964:   
1965:    Thus, any entries in the d locations are stored in the d (diagonal) 
1966:    submatrix, and any entries in the o locations are stored in the
1967:    o (off-diagonal) submatrix.  Note that the d matrix is stored in
1968:    MatSeqSBAIJ format and the o submatrix in MATSEQBAIJ format.

1970:    Now d_nz should indicate the number of block nonzeros per row in the upper triangular
1971:    plus the diagonal part of the d matrix,
1972:    and o_nz should indicate the number of block nonzeros per row in the o matrix.
1973:    In general, for PDE problems in which most nonzeros are near the diagonal,
1974:    one expects d_nz >> o_nz.   For large problems you MUST preallocate memory
1975:    or you will get TERRIBLE performance; see the users' manual chapter on
1976:    matrices.

1978:    Level: intermediate

1980: .keywords: matrix, block, aij, compressed row, sparse, parallel

1982: .seealso: MatCreate(), MatCreateSeqSBAIJ(), MatSetValues(), MatCreateMPIBAIJ()
1983: @*/

1985: int MatCreateMPISBAIJ(MPI_Comm comm,int bs,int m,int n,int M,int N,int d_nz,const int d_nnz[],int o_nz,const int o_nnz[],Mat *A)
1986: {
1987:   int ierr,size;

1990:   MatCreate(comm,m,n,M,N,A);
1991:   MPI_Comm_size(comm,&size);
1992:   if (size > 1) {
1993:     MatSetType(*A,MATMPISBAIJ);
1994:     MatMPISBAIJSetPreallocation(*A,bs,d_nz,d_nnz,o_nz,o_nnz);
1995:   } else {
1996:     MatSetType(*A,MATSEQSBAIJ);
1997:     MatSeqSBAIJSetPreallocation(*A,bs,d_nz,d_nnz);
1998:   }
1999:   return(0);
2000: }


2005: static int MatDuplicate_MPISBAIJ(Mat matin,MatDuplicateOption cpvalues,Mat *newmat)
2006: {
2007:   Mat          mat;
2008:   Mat_MPISBAIJ *a,*oldmat = (Mat_MPISBAIJ*)matin->data;
2009:   int          ierr,len=0;

2012:   *newmat       = 0;
2013:   MatCreate(matin->comm,matin->m,matin->n,matin->M,matin->N,&mat);
2014:   MatSetType(mat,MATMPISBAIJ);
2015:   mat->preallocated = PETSC_TRUE;
2016:   a = (Mat_MPISBAIJ*)mat->data;
2017:   a->bs  = oldmat->bs;
2018:   a->bs2 = oldmat->bs2;
2019:   a->mbs = oldmat->mbs;
2020:   a->nbs = oldmat->nbs;
2021:   a->Mbs = oldmat->Mbs;
2022:   a->Nbs = oldmat->Nbs;
2023: 
2024:   a->rstart       = oldmat->rstart;
2025:   a->rend         = oldmat->rend;
2026:   a->cstart       = oldmat->cstart;
2027:   a->cend         = oldmat->cend;
2028:   a->size         = oldmat->size;
2029:   a->rank         = oldmat->rank;
2030:   a->donotstash   = oldmat->donotstash;
2031:   a->roworiented  = oldmat->roworiented;
2032:   a->rowindices   = 0;
2033:   a->rowvalues    = 0;
2034:   a->getrowactive = PETSC_FALSE;
2035:   a->barray       = 0;
2036:   a->rstart_bs    = oldmat->rstart_bs;
2037:   a->rend_bs      = oldmat->rend_bs;
2038:   a->cstart_bs    = oldmat->cstart_bs;
2039:   a->cend_bs      = oldmat->cend_bs;

2041:   /* hash table stuff */
2042:   a->ht           = 0;
2043:   a->hd           = 0;
2044:   a->ht_size      = 0;
2045:   a->ht_flag      = oldmat->ht_flag;
2046:   a->ht_fact      = oldmat->ht_fact;
2047:   a->ht_total_ct  = 0;
2048:   a->ht_insert_ct = 0;

2050:   PetscMalloc(3*(a->size+2)*sizeof(int),&a->rowners);
2051:   PetscLogObjectMemory(mat,3*(a->size+2)*sizeof(int)+sizeof(struct _p_Mat)+sizeof(Mat_MPISBAIJ));
2052:   a->cowners    = a->rowners + a->size + 2;
2053:   a->rowners_bs = a->cowners + a->size + 2;
2054:   PetscMemcpy(a->rowners,oldmat->rowners,3*(a->size+2)*sizeof(int));
2055:   MatStashCreate_Private(matin->comm,1,&mat->stash);
2056:   MatStashCreate_Private(matin->comm,oldmat->bs,&mat->bstash);
2057:   if (oldmat->colmap) {
2058: #if defined (PETSC_USE_CTABLE)
2059:     PetscTableCreateCopy(oldmat->colmap,&a->colmap);
2060: #else
2061:     PetscMalloc((a->Nbs)*sizeof(int),&a->colmap);
2062:     PetscLogObjectMemory(mat,(a->Nbs)*sizeof(int));
2063:     PetscMemcpy(a->colmap,oldmat->colmap,(a->Nbs)*sizeof(int));
2064: #endif
2065:   } else a->colmap = 0;
2066:   if (oldmat->garray && (len = ((Mat_SeqBAIJ*)(oldmat->B->data))->nbs)) {
2067:     PetscMalloc(len*sizeof(int),&a->garray);
2068:     PetscLogObjectMemory(mat,len*sizeof(int));
2069:     PetscMemcpy(a->garray,oldmat->garray,len*sizeof(int));
2070:   } else a->garray = 0;
2071: 
2072:    VecDuplicate(oldmat->lvec,&a->lvec);
2073:   PetscLogObjectParent(mat,a->lvec);
2074:    VecScatterCopy(oldmat->Mvctx,&a->Mvctx);

2076:   PetscLogObjectParent(mat,a->Mvctx);
2077:    MatDuplicate(oldmat->A,cpvalues,&a->A);
2078:   PetscLogObjectParent(mat,a->A);
2079:    MatDuplicate(oldmat->B,cpvalues,&a->B);
2080:   PetscLogObjectParent(mat,a->B);
2081:   PetscFListDuplicate(mat->qlist,&matin->qlist);
2082:   *newmat = mat;
2083:   return(0);
2084: }

2086:  #include petscsys.h

2090: int MatLoad_MPISBAIJ(PetscViewer viewer,MatType type,Mat *newmat)
2091: {
2092:   Mat          A;
2093:   int          i,nz,ierr,j,rstart,rend,fd;
2094:   PetscScalar  *vals,*buf;
2095:   MPI_Comm     comm = ((PetscObject)viewer)->comm;
2096:   MPI_Status   status;
2097:   int          header[4],rank,size,*rowlengths = 0,M,N,m,*rowners,*browners,maxnz,*cols;
2098:   int          *locrowlens,*sndcounts = 0,*procsnz = 0,jj,*mycols,*ibuf;
2099:   int          tag = ((PetscObject)viewer)->tag,bs=1,Mbs,mbs,extra_rows;
2100:   int          *dlens,*odlens,*mask,*masked1,*masked2,rowcount,odcount;
2101:   int          dcount,kmax,k,nzcount,tmp;
2102: 
2104:   PetscOptionsGetInt(PETSC_NULL,"-matload_block_size",&bs,PETSC_NULL);

2106:   MPI_Comm_size(comm,&size);
2107:   MPI_Comm_rank(comm,&rank);
2108:   if (!rank) {
2109:     PetscViewerBinaryGetDescriptor(viewer,&fd);
2110:     PetscBinaryRead(fd,(char *)header,4,PETSC_INT);
2111:     if (header[0] != MAT_FILE_COOKIE) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"not matrix object");
2112:     if (header[3] < 0) {
2113:       SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"Matrix stored in special format, cannot load as MPISBAIJ");
2114:     }
2115:   }

2117:   MPI_Bcast(header+1,3,MPI_INT,0,comm);
2118:   M = header[1]; N = header[2];

2120:   if (M != N) SETERRQ(PETSC_ERR_SUP,"Can only do square matrices");

2122:   /* 
2123:      This code adds extra rows to make sure the number of rows is 
2124:      divisible by the blocksize
2125:   */
2126:   Mbs        = M/bs;
2127:   extra_rows = bs - M + bs*(Mbs);
2128:   if (extra_rows == bs) extra_rows = 0;
2129:   else                  Mbs++;
2130:   if (extra_rows &&!rank) {
2131:     PetscLogInfo(0,"MatLoad_MPISBAIJ:Padding loaded matrix to match blocksize\n");
2132:   }

2134:   /* determine ownership of all rows */
2135:   mbs        = Mbs/size + ((Mbs % size) > rank);
2136:   m          = mbs*bs;
2137:   PetscMalloc(2*(size+2)*sizeof(int),&rowners);
2138:   browners   = rowners + size + 1;
2139:   MPI_Allgather(&mbs,1,MPI_INT,rowners+1,1,MPI_INT,comm);
2140:   rowners[0] = 0;
2141:   for (i=2; i<=size; i++) rowners[i] += rowners[i-1];
2142:   for (i=0; i<=size;  i++) browners[i] = rowners[i]*bs;
2143:   rstart = rowners[rank];
2144:   rend   = rowners[rank+1];
2145: 
2146:   /* distribute row lengths to all processors */
2147:   PetscMalloc((rend-rstart)*bs*sizeof(int),&locrowlens);
2148:   if (!rank) {
2149:     PetscMalloc((M+extra_rows)*sizeof(int),&rowlengths);
2150:     PetscBinaryRead(fd,rowlengths,M,PETSC_INT);
2151:     for (i=0; i<extra_rows; i++) rowlengths[M+i] = 1;
2152:     PetscMalloc(size*sizeof(int),&sndcounts);
2153:     for (i=0; i<size; i++) sndcounts[i] = browners[i+1] - browners[i];
2154:     MPI_Scatterv(rowlengths,sndcounts,browners,MPI_INT,locrowlens,(rend-rstart)*bs,MPI_INT,0,comm);
2155:     PetscFree(sndcounts);
2156:   } else {
2157:     MPI_Scatterv(0,0,0,MPI_INT,locrowlens,(rend-rstart)*bs,MPI_INT,0,comm);
2158:   }
2159: 
2160:   if (!rank) {   /* procs[0] */
2161:     /* calculate the number of nonzeros on each processor */
2162:     PetscMalloc(size*sizeof(int),&procsnz);
2163:     PetscMemzero(procsnz,size*sizeof(int));
2164:     for (i=0; i<size; i++) {
2165:       for (j=rowners[i]*bs; j< rowners[i+1]*bs; j++) {
2166:         procsnz[i] += rowlengths[j];
2167:       }
2168:     }
2169:     PetscFree(rowlengths);
2170: 
2171:     /* determine max buffer needed and allocate it */
2172:     maxnz = 0;
2173:     for (i=0; i<size; i++) {
2174:       maxnz = PetscMax(maxnz,procsnz[i]);
2175:     }
2176:     PetscMalloc(maxnz*sizeof(int),&cols);

2178:     /* read in my part of the matrix column indices  */
2179:     nz     = procsnz[0];
2180:     PetscMalloc(nz*sizeof(int),&ibuf);
2181:     mycols = ibuf;
2182:     if (size == 1)  nz -= extra_rows;
2183:     PetscBinaryRead(fd,mycols,nz,PETSC_INT);
2184:     if (size == 1)  for (i=0; i< extra_rows; i++) { mycols[nz+i] = M+i; }

2186:     /* read in every ones (except the last) and ship off */
2187:     for (i=1; i<size-1; i++) {
2188:       nz   = procsnz[i];
2189:       PetscBinaryRead(fd,cols,nz,PETSC_INT);
2190:       MPI_Send(cols,nz,MPI_INT,i,tag,comm);
2191:     }
2192:     /* read in the stuff for the last proc */
2193:     if (size != 1) {
2194:       nz   = procsnz[size-1] - extra_rows;  /* the extra rows are not on the disk */
2195:       PetscBinaryRead(fd,cols,nz,PETSC_INT);
2196:       for (i=0; i<extra_rows; i++) cols[nz+i] = M+i;
2197:       MPI_Send(cols,nz+extra_rows,MPI_INT,size-1,tag,comm);
2198:     }
2199:     PetscFree(cols);
2200:   } else {  /* procs[i], i>0 */
2201:     /* determine buffer space needed for message */
2202:     nz = 0;
2203:     for (i=0; i<m; i++) {
2204:       nz += locrowlens[i];
2205:     }
2206:     PetscMalloc(nz*sizeof(int),&ibuf);
2207:     mycols = ibuf;
2208:     /* receive message of column indices*/
2209:     MPI_Recv(mycols,nz,MPI_INT,0,tag,comm,&status);
2210:     MPI_Get_count(&status,MPI_INT,&maxnz);
2211:     if (maxnz != nz) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");
2212:   }

2214:   /* loop over local rows, determining number of off diagonal entries */
2215:   PetscMalloc(2*(rend-rstart+1)*sizeof(int),&dlens);
2216:   odlens   = dlens + (rend-rstart);
2217:   PetscMalloc(3*Mbs*sizeof(int),&mask);
2218:   PetscMemzero(mask,3*Mbs*sizeof(int));
2219:   masked1  = mask    + Mbs;
2220:   masked2  = masked1 + Mbs;
2221:   rowcount = 0; nzcount = 0;
2222:   for (i=0; i<mbs; i++) {
2223:     dcount  = 0;
2224:     odcount = 0;
2225:     for (j=0; j<bs; j++) {
2226:       kmax = locrowlens[rowcount];
2227:       for (k=0; k<kmax; k++) {
2228:         tmp = mycols[nzcount++]/bs; /* block col. index */
2229:         if (!mask[tmp]) {
2230:           mask[tmp] = 1;
2231:           if (tmp < rstart || tmp >= rend) masked2[odcount++] = tmp; /* entry in off-diag portion */
2232:           else masked1[dcount++] = tmp; /* entry in diag portion */
2233:         }
2234:       }
2235:       rowcount++;
2236:     }
2237: 
2238:     dlens[i]  = dcount;  /* d_nzz[i] */
2239:     odlens[i] = odcount; /* o_nzz[i] */

2241:     /* zero out the mask elements we set */
2242:     for (j=0; j<dcount; j++) mask[masked1[j]] = 0;
2243:     for (j=0; j<odcount; j++) mask[masked2[j]] = 0;
2244:   }
2245: 
2246:   /* create our matrix */
2247:   MatCreate(comm,m,m,PETSC_DETERMINE,PETSC_DETERMINE,&A);
2248:   MatSetType(A,type);
2249:   MatMPISBAIJSetPreallocation(A,bs,0,dlens,0,odlens);
2250:   MatSetOption(A,MAT_COLUMNS_SORTED);
2251: 
2252:   if (!rank) {
2253:     PetscMalloc(maxnz*sizeof(PetscScalar),&buf);
2254:     /* read in my part of the matrix numerical values  */
2255:     nz = procsnz[0];
2256:     vals = buf;
2257:     mycols = ibuf;
2258:     if (size == 1)  nz -= extra_rows;
2259:     PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
2260:     if (size == 1)  for (i=0; i< extra_rows; i++) { vals[nz+i] = 1.0; }

2262:     /* insert into matrix */
2263:     jj      = rstart*bs;
2264:     for (i=0; i<m; i++) {
2265:       MatSetValues(A,1,&jj,locrowlens[i],mycols,vals,INSERT_VALUES);
2266:       mycols += locrowlens[i];
2267:       vals   += locrowlens[i];
2268:       jj++;
2269:     }

2271:     /* read in other processors (except the last one) and ship out */
2272:     for (i=1; i<size-1; i++) {
2273:       nz   = procsnz[i];
2274:       vals = buf;
2275:       PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
2276:       MPI_Send(vals,nz,MPIU_SCALAR,i,A->tag,comm);
2277:     }
2278:     /* the last proc */
2279:     if (size != 1){
2280:       nz   = procsnz[i] - extra_rows;
2281:       vals = buf;
2282:       PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
2283:       for (i=0; i<extra_rows; i++) vals[nz+i] = 1.0;
2284:       MPI_Send(vals,nz+extra_rows,MPIU_SCALAR,size-1,A->tag,comm);
2285:     }
2286:     PetscFree(procsnz);

2288:   } else {
2289:     /* receive numeric values */
2290:     PetscMalloc(nz*sizeof(PetscScalar),&buf);

2292:     /* receive message of values*/
2293:     vals   = buf;
2294:     mycols = ibuf;
2295:     MPI_Recv(vals,nz,MPIU_SCALAR,0,A->tag,comm,&status);
2296:     MPI_Get_count(&status,MPIU_SCALAR,&maxnz);
2297:     if (maxnz != nz) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");

2299:     /* insert into matrix */
2300:     jj      = rstart*bs;
2301:     for (i=0; i<m; i++) {
2302:       MatSetValues_MPISBAIJ(A,1,&jj,locrowlens[i],mycols,vals,INSERT_VALUES);
2303:       mycols += locrowlens[i];
2304:       vals   += locrowlens[i];
2305:       jj++;
2306:     }
2307:   }

2309:   PetscFree(locrowlens);
2310:   PetscFree(buf);
2311:   PetscFree(ibuf);
2312:   PetscFree(rowners);
2313:   PetscFree(dlens);
2314:   PetscFree(mask);
2315:   MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
2316:   MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
2317:   *newmat = A;
2318:   return(0);
2319: }

2323: /*@
2324:    MatMPISBAIJSetHashTableFactor - Sets the factor required to compute the size of the HashTable.

2326:    Input Parameters:
2327: .  mat  - the matrix
2328: .  fact - factor

2330:    Collective on Mat

2332:    Level: advanced

2334:   Notes:
2335:    This can also be set by the command line option: -mat_use_hash_table fact

2337: .keywords: matrix, hashtable, factor, HT

2339: .seealso: MatSetOption()
2340: @*/
2341: int MatMPISBAIJSetHashTableFactor(Mat mat,PetscReal fact)
2342: {
2344:   SETERRQ(1,"Function not yet written for SBAIJ format");
2345:   /* return(0); */
2346: }

2350: int MatGetRowMax_MPISBAIJ(Mat A,Vec v)
2351: {
2352:   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
2353:   Mat_SeqBAIJ  *b = (Mat_SeqBAIJ*)(a->B)->data;
2354:   PetscReal    atmp;
2355:   PetscReal    *work,*svalues,*rvalues;
2356:   int          ierr,i,bs,mbs,*bi,*bj,brow,j,ncols,krow,kcol,col,row,Mbs,bcol;
2357:   int          rank,size,*rowners_bs,dest,count,source;
2358:   PetscScalar  *va;
2359:   MatScalar    *ba;
2360:   MPI_Status   stat;

2363:   MatGetRowMax(a->A,v);
2364:   VecGetArray(v,&va);

2366:   MPI_Comm_size(A->comm,&size);
2367:   MPI_Comm_rank(A->comm,&rank);

2369:   bs   = a->bs;
2370:   mbs  = a->mbs;
2371:   Mbs  = a->Mbs;
2372:   ba   = b->a;
2373:   bi   = b->i;
2374:   bj   = b->j;
2375:   /*
2376:   PetscSynchronizedPrintf(A->comm,"[%d] M: %d, bs: %d, mbs: %d \n",rank,bs*Mbs,bs,mbs); 
2377:   PetscSynchronizedFlush(A->comm);
2378:   */

2380:   /* find ownerships */
2381:   rowners_bs = a->rowners_bs;
2382:   /*
2383:   if (!rank){
2384:     for (i=0; i<size+1; i++) PetscPrintf(PETSC_COMM_SELF," rowners_bs[%d]: %d\n",i,rowners_bs[i]); 
2385:   }
2386:   */

2388:   /* each proc creates an array to be distributed */
2389:   PetscMalloc(bs*Mbs*sizeof(PetscReal),&work);
2390:   PetscMemzero(work,bs*Mbs*sizeof(PetscReal));

2392:   /* row_max for B */
2393:   if (rank != size-1){
2394:     for (i=0; i<mbs; i++) {
2395:       ncols = bi[1] - bi[0]; bi++;
2396:       brow  = bs*i;
2397:       for (j=0; j<ncols; j++){
2398:         bcol = bs*(*bj);
2399:         for (kcol=0; kcol<bs; kcol++){
2400:           col = bcol + kcol;                 /* local col index */
2401:           col += rowners_bs[rank+1];      /* global col index */
2402:           /* PetscPrintf(PETSC_COMM_SELF,"[%d], col: %d\n",rank,col); */
2403:           for (krow=0; krow<bs; krow++){
2404:             atmp = PetscAbsScalar(*ba); ba++;
2405:             row = brow + krow;    /* local row index */
2406:             /* printf("val[%d,%d]: %g\n",row,col,atmp); */
2407:             if (PetscRealPart(va[row]) < atmp) va[row] = atmp;
2408:             if (work[col] < atmp) work[col] = atmp;
2409:           }
2410:         }
2411:         bj++;
2412:       }
2413:     }
2414:     /*
2415:       PetscPrintf(PETSC_COMM_SELF,"[%d], work: ",rank);
2416:       for (i=0; i<bs*Mbs; i++) PetscPrintf(PETSC_COMM_SELF,"%g ",work[i]);
2417:       PetscPrintf(PETSC_COMM_SELF,"[%d]: \n");
2418:       */

2420:     /* send values to its owners */
2421:     for (dest=rank+1; dest<size; dest++){
2422:       svalues = work + rowners_bs[dest];
2423:       count   = rowners_bs[dest+1]-rowners_bs[dest];
2424:       MPI_Send(svalues,count,MPIU_REAL,dest,rank,A->comm);
2425:       /*
2426:       PetscSynchronizedPrintf(A->comm,"[%d] sends %d values to [%d]: %g, %g, %g, %g\n",rank,count,dest,svalues[0],svalues[1],svalues[2],svalues[3]); 
2427:       PetscSynchronizedFlush(A->comm);
2428:       */
2429:     }
2430:   }
2431: 
2432:   /* receive values */
2433:   if (rank){
2434:     rvalues = work;
2435:     count   = rowners_bs[rank+1]-rowners_bs[rank];
2436:     for (source=0; source<rank; source++){
2437:       MPI_Recv(rvalues,count,MPIU_REAL,MPI_ANY_SOURCE,MPI_ANY_TAG,A->comm,&stat);
2438:       /* process values */
2439:       for (i=0; i<count; i++){
2440:         if (PetscRealPart(va[i]) < rvalues[i]) va[i] = rvalues[i];
2441:       }
2442:       /*
2443:       PetscSynchronizedPrintf(A->comm,"[%d] received %d values from [%d]: %g, %g, %g, %g \n",rank,count,stat.MPI_SOURCE,rvalues[0],rvalues[1],rvalues[2],rvalues[3]);  
2444:       PetscSynchronizedFlush(A->comm);
2445:       */
2446:     }
2447:   }

2449:   VecRestoreArray(v,&va);
2450:   PetscFree(work);
2451:   return(0);
2452: }

2456: int MatRelax_MPISBAIJ(Mat matin,Vec bb,PetscReal omega,MatSORType flag,PetscReal fshift,int its,int lits,Vec xx)
2457: {
2458:   Mat_MPISBAIJ   *mat = (Mat_MPISBAIJ*)matin->data;
2459:   int            ierr,mbs=mat->mbs,bs=mat->bs;
2460:   PetscScalar    mone=-1.0,*x,*b,*ptr,zero=0.0;
2461:   Vec            bb1;
2462: 
2464:   if (its <= 0 || lits <= 0) SETERRQ2(PETSC_ERR_ARG_WRONG,"Relaxation requires global its %d and local its %d both positive",its,lits);
2465:   if (bs > 1)
2466:     SETERRQ(PETSC_ERR_SUP,"SSOR for block size > 1 is not yet implemented");

2468:   if ((flag & SOR_LOCAL_SYMMETRIC_SWEEP) == SOR_LOCAL_SYMMETRIC_SWEEP){
2469:     if ( flag & SOR_ZERO_INITIAL_GUESS ) {
2470:       (*mat->A->ops->relax)(mat->A,bb,omega,flag,fshift,lits,lits,xx);
2471:       its--;
2472:     }

2474:     VecDuplicate(bb,&bb1);
2475:     while (its--){
2476: 
2477:       /* lower triangular part: slvec0b = - B^T*xx */
2478:       (*mat->B->ops->multtranspose)(mat->B,xx,mat->slvec0b);
2479: 
2480:       /* copy xx into slvec0a */
2481:       VecGetArray(mat->slvec0,&ptr);
2482:       VecGetArray(xx,&x);
2483:       PetscMemcpy(ptr,x,bs*mbs*sizeof(MatScalar));
2484:       VecRestoreArray(mat->slvec0,&ptr);

2486:       VecScale(&mone,mat->slvec0);

2488:       /* copy bb into slvec1a */
2489:       VecGetArray(mat->slvec1,&ptr);
2490:       VecGetArray(bb,&b);
2491:       PetscMemcpy(ptr,b,bs*mbs*sizeof(MatScalar));
2492:       VecRestoreArray(mat->slvec1,&ptr);

2494:       /* set slvec1b = 0 */
2495:       VecSet(&zero,mat->slvec1b);

2497:       VecScatterBegin(mat->slvec0,mat->slvec1,ADD_VALUES,SCATTER_FORWARD,mat->sMvctx);
2498:       VecRestoreArray(xx,&x);
2499:       VecRestoreArray(bb,&b);
2500:       VecScatterEnd(mat->slvec0,mat->slvec1,ADD_VALUES,SCATTER_FORWARD,mat->sMvctx);

2502:       /* upper triangular part: bb1 = bb1 - B*x */
2503:       (*mat->B->ops->multadd)(mat->B,mat->slvec1b,mat->slvec1a,bb1);
2504: 
2505:       /* local diagonal sweep */
2506:       (*mat->A->ops->relax)(mat->A,bb1,omega,SOR_SYMMETRIC_SWEEP,fshift,lits,lits,xx);
2507:     }
2508:     VecDestroy(bb1);
2509:   } else {
2510:     SETERRQ(PETSC_ERR_SUP,"MatSORType is not supported for SBAIJ matrix format");
2511:   }
2512:   return(0);
2513: }

2517: int MatRelax_MPISBAIJ_2comm(Mat matin,Vec bb,PetscReal omega,MatSORType flag,PetscReal fshift,int its,int lits,Vec xx)
2518: {
2519:   Mat_MPISBAIJ   *mat = (Mat_MPISBAIJ*)matin->data;
2520:   int            ierr;
2521:   PetscScalar    mone=-1.0;
2522:   Vec            lvec1,bb1;
2523: 
2525:   if (its <= 0 || lits <= 0) SETERRQ2(PETSC_ERR_ARG_WRONG,"Relaxation requires global its %d and local its %d both positive",its,lits);
2526:   if (mat->bs > 1)
2527:     SETERRQ(PETSC_ERR_SUP,"SSOR for block size > 1 is not yet implemented");

2529:   if ((flag & SOR_LOCAL_SYMMETRIC_SWEEP) == SOR_LOCAL_SYMMETRIC_SWEEP){
2530:     if ( flag & SOR_ZERO_INITIAL_GUESS ) {
2531:       (*mat->A->ops->relax)(mat->A,bb,omega,flag,fshift,lits,lits,xx);
2532:       its--;
2533:     }

2535:     VecDuplicate(mat->lvec,&lvec1);
2536:     VecDuplicate(bb,&bb1);
2537:     while (its--){
2538:       VecScatterBegin(xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD,mat->Mvctx);
2539: 
2540:       /* lower diagonal part: bb1 = bb - B^T*xx */
2541:       (*mat->B->ops->multtranspose)(mat->B,xx,lvec1);
2542:       VecScale(&mone,lvec1);

2544:       VecScatterEnd(xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD,mat->Mvctx);
2545:       VecCopy(bb,bb1);
2546:       VecScatterBegin(lvec1,bb1,ADD_VALUES,SCATTER_REVERSE,mat->Mvctx);

2548:       /* upper diagonal part: bb1 = bb1 - B*x */
2549:       VecScale(&mone,mat->lvec);
2550:       (*mat->B->ops->multadd)(mat->B,mat->lvec,bb1,bb1);

2552:       VecScatterEnd(lvec1,bb1,ADD_VALUES,SCATTER_REVERSE,mat->Mvctx);
2553: 
2554:       /* diagonal sweep */
2555:       (*mat->A->ops->relax)(mat->A,bb1,omega,SOR_SYMMETRIC_SWEEP,fshift,lits,lits,xx);
2556:     }
2557:     VecDestroy(lvec1);
2558:     VecDestroy(bb1);
2559:   } else {
2560:     SETERRQ(PETSC_ERR_SUP,"MatSORType is not supported for SBAIJ matrix format");
2561:   }
2562:   return(0);
2563: }