Actual source code: mpisbaij.c

 2:  #include src/mat/impls/baij/mpi/mpibaij.h
 3:  #include mpisbaij.h
 4:  #include src/mat/impls/sbaij/seq/sbaij.h

  6: EXTERN PetscErrorCode MatSetUpMultiply_MPISBAIJ(Mat);
  7: EXTERN PetscErrorCode MatSetUpMultiply_MPISBAIJ_2comm(Mat);
  8: EXTERN PetscErrorCode DisAssemble_MPISBAIJ(Mat);
  9: EXTERN PetscErrorCode MatIncreaseOverlap_MPISBAIJ(Mat,PetscInt,IS[],PetscInt);
 10: EXTERN PetscErrorCode MatGetValues_SeqSBAIJ(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[],PetscScalar []);
 11: EXTERN PetscErrorCode MatGetValues_SeqBAIJ(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[],PetscScalar []);
 12: EXTERN PetscErrorCode MatSetValues_SeqSBAIJ(Mat,PetscInt,const PetscInt [],PetscInt,const PetscInt [],const PetscScalar [],InsertMode);
 13: EXTERN PetscErrorCode MatSetValuesBlocked_SeqSBAIJ(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[],const PetscScalar[],InsertMode);
 14: EXTERN PetscErrorCode MatSetValuesBlocked_SeqBAIJ(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[],const PetscScalar[],InsertMode);
 15: EXTERN PetscErrorCode MatGetRow_SeqSBAIJ(Mat,PetscInt,PetscInt*,PetscInt**,PetscScalar**);
 16: EXTERN PetscErrorCode MatRestoreRow_SeqSBAIJ(Mat,PetscInt,PetscInt*,PetscInt**,PetscScalar**);
 17: EXTERN PetscErrorCode MatPrintHelp_SeqSBAIJ(Mat);
 18: EXTERN PetscErrorCode MatZeroRows_SeqSBAIJ(Mat,IS,PetscScalar*);
 19: EXTERN PetscErrorCode MatZeroRows_SeqBAIJ(Mat,IS,PetscScalar *);
 20: EXTERN PetscErrorCode MatGetRowMax_MPISBAIJ(Mat,Vec);
 21: EXTERN PetscErrorCode MatRelax_MPISBAIJ(Mat,Vec,PetscReal,MatSORType,PetscReal,PetscInt,PetscInt,Vec);

 23: /*  UGLY, ugly, ugly
 24:    When MatScalar == PetscScalar the function MatSetValuesBlocked_MPIBAIJ_MatScalar() does 
 25:    not exist. Otherwise ..._MatScalar() takes matrix elements in single precision and 
 26:    inserts them into the single precision data structure. The function MatSetValuesBlocked_MPIBAIJ()
 27:    converts the entries into single precision and then calls ..._MatScalar() to put them
 28:    into the single precision data structures.
 29: */
 30: #if defined(PETSC_USE_MAT_SINGLE)
 31: EXTERN PetscErrorCode MatSetValuesBlocked_SeqSBAIJ_MatScalar(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[],const MatScalar[],InsertMode);
 32: EXTERN PetscErrorCode MatSetValues_MPISBAIJ_MatScalar(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[],const MatScalar[],InsertMode);
 33: EXTERN PetscErrorCode MatSetValuesBlocked_MPISBAIJ_MatScalar(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[],const MatScalar[],InsertMode);
 34: EXTERN PetscErrorCode MatSetValues_MPISBAIJ_HT_MatScalar(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[],const MatScalar[],InsertMode);
 35: EXTERN PetscErrorCode MatSetValuesBlocked_MPISBAIJ_HT_MatScalar(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[],const MatScalar[],InsertMode);
 36: #else
 37: #define MatSetValuesBlocked_SeqSBAIJ_MatScalar      MatSetValuesBlocked_SeqSBAIJ
 38: #define MatSetValues_MPISBAIJ_MatScalar             MatSetValues_MPISBAIJ
 39: #define MatSetValuesBlocked_MPISBAIJ_MatScalar      MatSetValuesBlocked_MPISBAIJ
 40: #define MatSetValues_MPISBAIJ_HT_MatScalar          MatSetValues_MPISBAIJ_HT
 41: #define MatSetValuesBlocked_MPISBAIJ_HT_MatScalar   MatSetValuesBlocked_MPISBAIJ_HT
 42: #endif

 47: PetscErrorCode MatStoreValues_MPISBAIJ(Mat mat)
 48: {
 49:   Mat_MPISBAIJ   *aij = (Mat_MPISBAIJ *)mat->data;

 53:   MatStoreValues(aij->A);
 54:   MatStoreValues(aij->B);
 55:   return(0);
 56: }

 62: PetscErrorCode MatRetrieveValues_MPISBAIJ(Mat mat)
 63: {
 64:   Mat_MPISBAIJ   *aij = (Mat_MPISBAIJ *)mat->data;

 68:   MatRetrieveValues(aij->A);
 69:   MatRetrieveValues(aij->B);
 70:   return(0);
 71: }


 75: #define CHUNKSIZE  10

 77: #define  MatSetValues_SeqSBAIJ_A_Private(row,col,value,addv) \
 78: { \
 79:  \
 80:     brow = row/bs;  \
 81:     rp   = aj + ai[brow]; ap = aa + bs2*ai[brow]; \
 82:     rmax = aimax[brow]; nrow = ailen[brow]; \
 83:       bcol = col/bs; \
 84:       ridx = row % bs; cidx = col % bs; \
 85:       low = 0; high = nrow; \
 86:       while (high-low > 3) { \
 87:         t = (low+high)/2; \
 88:         if (rp[t] > bcol) high = t; \
 89:         else              low  = t; \
 90:       } \
 91:       for (_i=low; _i<high; _i++) { \
 92:         if (rp[_i] > bcol) break; \
 93:         if (rp[_i] == bcol) { \
 94:           bap  = ap +  bs2*_i + bs*cidx + ridx; \
 95:           if (addv == ADD_VALUES) *bap += value;  \
 96:           else                    *bap  = value;  \
 97:           goto a_noinsert; \
 98:         } \
 99:       } \
100:       if (a->nonew == 1) goto a_noinsert; \
101:       else if (a->nonew == -1) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero (%D, %D) into matrix", row, col); \
102:       if (nrow >= rmax) { \
103:         /* there is no extra room in row, therefore enlarge */ \
104:         PetscInt       new_nz = ai[a->mbs] + CHUNKSIZE,len,*new_i,*new_j; \
105:         MatScalar *new_a; \
106:  \
107:         if (a->nonew == -2) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero (%D, %D) in the matrix", row, col); \
108:  \
109:         /* malloc new storage space */ \
110:         len   = new_nz*(sizeof(PetscInt)+bs2*sizeof(MatScalar))+(a->mbs+1)*sizeof(PetscInt); \
111:         PetscMalloc(len,&new_a); \
112:         new_j = (PetscInt*)(new_a + bs2*new_nz); \
113:         new_i = new_j + new_nz; \
114:  \
115:         /* copy over old data into new slots */ \
116:         for (ii=0; ii<brow+1; ii++) {new_i[ii] = ai[ii];} \
117:         for (ii=brow+1; ii<a->mbs+1; ii++) {new_i[ii] = ai[ii]+CHUNKSIZE;} \
118:         PetscMemcpy(new_j,aj,(ai[brow]+nrow)*sizeof(PetscInt)); \
119:         len = (new_nz - CHUNKSIZE - ai[brow] - nrow); \
120:         PetscMemcpy(new_j+ai[brow]+nrow+CHUNKSIZE,aj+ai[brow]+nrow,len*sizeof(PetscInt)); \
121:         PetscMemcpy(new_a,aa,(ai[brow]+nrow)*bs2*sizeof(MatScalar)); \
122:         PetscMemzero(new_a+bs2*(ai[brow]+nrow),bs2*CHUNKSIZE*sizeof(PetscScalar)); \
123:         PetscMemcpy(new_a+bs2*(ai[brow]+nrow+CHUNKSIZE), \
124:                     aa+bs2*(ai[brow]+nrow),bs2*len*sizeof(MatScalar));  \
125:         /* free up old matrix storage */ \
126:         PetscFree(a->a);  \
127:         if (!a->singlemalloc) { \
128:           PetscFree(a->i); \
129:           PetscFree(a->j);\
130:         } \
131:         aa = a->a = new_a; ai = a->i = new_i; aj = a->j = new_j;  \
132:         a->singlemalloc = PETSC_TRUE; \
133:  \
134:         rp   = aj + ai[brow]; ap = aa + bs2*ai[brow]; \
135:         rmax = aimax[brow] = aimax[brow] + CHUNKSIZE; \
136:         PetscLogObjectMemory(A,CHUNKSIZE*(sizeof(PetscInt) + bs2*sizeof(MatScalar))); \
137:         a->maxnz += bs2*CHUNKSIZE; \
138:         a->reallocs++; \
139:         a->nz++; \
140:       } \
141:       N = nrow++ - 1;  \
142:       /* shift up all the later entries in this row */ \
143:       for (ii=N; ii>=_i; ii--) { \
144:         rp[ii+1] = rp[ii]; \
145:         PetscMemcpy(ap+bs2*(ii+1),ap+bs2*(ii),bs2*sizeof(MatScalar)); \
146:       } \
147:       if (N>=_i) { PetscMemzero(ap+bs2*_i,bs2*sizeof(MatScalar)); }  \
148:       rp[_i]                      = bcol;  \
149:       ap[bs2*_i + bs*cidx + ridx] = value;  \
150:       a_noinsert:; \
151:     ailen[brow] = nrow; \
152: } 
153: #ifndef MatSetValues_SeqBAIJ_B_Private
154: #define  MatSetValues_SeqSBAIJ_B_Private(row,col,value,addv) \
155: { \
156:     brow = row/bs;  \
157:     rp   = bj + bi[brow]; ap = ba + bs2*bi[brow]; \
158:     rmax = bimax[brow]; nrow = bilen[brow]; \
159:       bcol = col/bs; \
160:       ridx = row % bs; cidx = col % bs; \
161:       low = 0; high = nrow; \
162:       while (high-low > 3) { \
163:         t = (low+high)/2; \
164:         if (rp[t] > bcol) high = t; \
165:         else              low  = t; \
166:       } \
167:       for (_i=low; _i<high; _i++) { \
168:         if (rp[_i] > bcol) break; \
169:         if (rp[_i] == bcol) { \
170:           bap  = ap +  bs2*_i + bs*cidx + ridx; \
171:           if (addv == ADD_VALUES) *bap += value;  \
172:           else                    *bap  = value;  \
173:           goto b_noinsert; \
174:         } \
175:       } \
176:       if (b->nonew == 1) goto b_noinsert; \
177:       else if (b->nonew == -1) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero (%D, %D) into matrix", row, col); \
178:       if (nrow >= rmax) { \
179:         /* there is no extra room in row, therefore enlarge */ \
180:         PetscInt  new_nz = bi[b->mbs] + CHUNKSIZE,len,*new_i,*new_j; \
181:         MatScalar *new_a; \
182:  \
183:         if (b->nonew == -2) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero (%D, %D) in the matrix", row, col); \
184:  \
185:         /* malloc new storage space */ \
186:         len   = new_nz*(sizeof(PetscInt)+bs2*sizeof(MatScalar))+(b->mbs+1)*sizeof(PetscInt); \
187:         PetscMalloc(len,&new_a); \
188:         new_j = (PetscInt*)(new_a + bs2*new_nz); \
189:         new_i = new_j + new_nz; \
190:  \
191:         /* copy over old data into new slots */ \
192:         for (ii=0; ii<brow+1; ii++) {new_i[ii] = bi[ii];} \
193:         for (ii=brow+1; ii<b->mbs+1; ii++) {new_i[ii] = bi[ii]+CHUNKSIZE;} \
194:         PetscMemcpy(new_j,bj,(bi[brow]+nrow)*sizeof(PetscInt)); \
195:         len  = (new_nz - CHUNKSIZE - bi[brow] - nrow); \
196:         PetscMemcpy(new_j+bi[brow]+nrow+CHUNKSIZE,bj+bi[brow]+nrow,len*sizeof(PetscInt)); \
197:         PetscMemcpy(new_a,ba,(bi[brow]+nrow)*bs2*sizeof(MatScalar)); \
198:         PetscMemzero(new_a+bs2*(bi[brow]+nrow),bs2*CHUNKSIZE*sizeof(MatScalar)); \
199:         PetscMemcpy(new_a+bs2*(bi[brow]+nrow+CHUNKSIZE), \
200:                     ba+bs2*(bi[brow]+nrow),bs2*len*sizeof(MatScalar));  \
201:         /* free up old matrix storage */ \
202:         PetscFree(b->a);  \
203:         if (!b->singlemalloc) { \
204:           PetscFree(b->i); \
205:           PetscFree(b->j); \
206:         } \
207:         ba = b->a = new_a; bi = b->i = new_i; bj = b->j = new_j;  \
208:         b->singlemalloc = PETSC_TRUE; \
209:  \
210:         rp   = bj + bi[brow]; ap = ba + bs2*bi[brow]; \
211:         rmax = bimax[brow] = bimax[brow] + CHUNKSIZE; \
212:         PetscLogObjectMemory(B,CHUNKSIZE*(sizeof(PetscInt) + bs2*sizeof(MatScalar))); \
213:         b->maxnz += bs2*CHUNKSIZE; \
214:         b->reallocs++; \
215:         b->nz++; \
216:       } \
217:       N = nrow++ - 1;  \
218:       /* shift up all the later entries in this row */ \
219:       for (ii=N; ii>=_i; ii--) { \
220:         rp[ii+1] = rp[ii]; \
221:         PetscMemcpy(ap+bs2*(ii+1),ap+bs2*(ii),bs2*sizeof(MatScalar)); \
222:       } \
223:       if (N>=_i) { PetscMemzero(ap+bs2*_i,bs2*sizeof(MatScalar));}  \
224:       rp[_i]                      = bcol;  \
225:       ap[bs2*_i + bs*cidx + ridx] = value;  \
226:       b_noinsert:; \
227:     bilen[brow] = nrow; \
228: } 
229: #endif

231: #if defined(PETSC_USE_MAT_SINGLE)
234: PetscErrorCode MatSetValues_MPISBAIJ(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv)
235: {
236:   Mat_MPISBAIJ   *b = (Mat_MPISBAIJ*)mat->data;
238:   PetscInt       i,N = m*n;
239:   MatScalar      *vsingle;

242:   if (N > b->setvalueslen) {
243:     if (b->setvaluescopy) {PetscFree(b->setvaluescopy);}
244:     PetscMalloc(N*sizeof(MatScalar),&b->setvaluescopy);
245:     b->setvalueslen  = N;
246:   }
247:   vsingle = b->setvaluescopy;

249:   for (i=0; i<N; i++) {
250:     vsingle[i] = v[i];
251:   }
252:   MatSetValues_MPISBAIJ_MatScalar(mat,m,im,n,in,vsingle,addv);
253:   return(0);
254: }

258: PetscErrorCode MatSetValuesBlocked_MPISBAIJ(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv)
259: {
260:   Mat_MPIBAIJ    *b = (Mat_MPIBAIJ*)mat->data;
262:   PetscInt       i,N = m*n*b->bs2;
263:   MatScalar      *vsingle;

266:   if (N > b->setvalueslen) {
267:     if (b->setvaluescopy) {PetscFree(b->setvaluescopy);}
268:     PetscMalloc(N*sizeof(MatScalar),&b->setvaluescopy);
269:     b->setvalueslen  = N;
270:   }
271:   vsingle = b->setvaluescopy;
272:   for (i=0; i<N; i++) {
273:     vsingle[i] = v[i];
274:   }
275:   MatSetValuesBlocked_MPISBAIJ_MatScalar(mat,m,im,n,in,vsingle,addv);
276:   return(0);
277: }

281: PetscErrorCode MatSetValues_MPISBAIJ_HT(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv)
282: {
283:   Mat_MPIBAIJ    *b = (Mat_MPIBAIJ*)mat->data;
285:   PetscInt       i,N = m*n;
286:   MatScalar      *vsingle;

289:   SETERRQ(PETSC_ERR_SUP,"Function not yet written for SBAIJ format");
290:   /* return(0); */
291: }

295: PetscErrorCode MatSetValuesBlocked_MPISBAIJ_HT(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv)
296: {
297:   Mat_MPIBAIJ    *b = (Mat_MPIBAIJ*)mat->data;
299:   PetscInt       i,N = m*n*b->bs2;
300:   MatScalar      *vsingle;

303:   SETERRQ(PETSC_ERR_SUP,"Function not yet written for SBAIJ format");
304:   /* return(0); */
305: }
306: #endif

308: /* Only add/insert a(i,j) with i<=j (blocks). 
309:    Any a(i,j) with i>j input by user is ingored. 
310: */
313: PetscErrorCode MatSetValues_MPISBAIJ_MatScalar(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const MatScalar v[],InsertMode addv)
314: {
315:   Mat_MPISBAIJ   *baij = (Mat_MPISBAIJ*)mat->data;
316:   MatScalar      value;
317:   PetscTruth     roworiented = baij->roworiented;
319:   PetscInt       i,j,row,col;
320:   PetscInt       rstart_orig=baij->rstart_bs;
321:   PetscInt       rend_orig=baij->rend_bs,cstart_orig=baij->cstart_bs;
322:   PetscInt       cend_orig=baij->cend_bs,bs=mat->bs;

324:   /* Some Variables required in the macro */
325:   Mat            A = baij->A;
326:   Mat_SeqSBAIJ   *a = (Mat_SeqSBAIJ*)(A)->data;
327:   PetscInt       *aimax=a->imax,*ai=a->i,*ailen=a->ilen,*aj=a->j;
328:   MatScalar      *aa=a->a;

330:   Mat            B = baij->B;
331:   Mat_SeqBAIJ   *b = (Mat_SeqBAIJ*)(B)->data;
332:   PetscInt      *bimax=b->imax,*bi=b->i,*bilen=b->ilen,*bj=b->j;
333:   MatScalar     *ba=b->a;

335:   PetscInt      *rp,ii,nrow,_i,rmax,N,brow,bcol;
336:   PetscInt      low,high,t,ridx,cidx,bs2=a->bs2;
337:   MatScalar     *ap,*bap;

339:   /* for stash */
340:   PetscInt      n_loc, *in_loc=0;
341:   MatScalar     *v_loc=0;


345:   if(!baij->donotstash){
346:     PetscMalloc(n*sizeof(PetscInt),&in_loc);
347:     PetscMalloc(n*sizeof(MatScalar),&v_loc);
348:   }

350:   for (i=0; i<m; i++) {
351:     if (im[i] < 0) continue;
352: #if defined(PETSC_USE_BOPT_g)
353:     if (im[i] >= mat->M) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->M-1);
354: #endif
355:     if (im[i] >= rstart_orig && im[i] < rend_orig) { /* this processor entry */
356:       row = im[i] - rstart_orig;              /* local row index */
357:       for (j=0; j<n; j++) {
358:         if (im[i]/bs > in[j]/bs) continue;    /* ignore lower triangular blocks */
359:         if (in[j] >= cstart_orig && in[j] < cend_orig){  /* diag entry (A) */
360:           col = in[j] - cstart_orig;          /* local col index */
361:           brow = row/bs; bcol = col/bs;
362:           if (brow > bcol) continue;  /* ignore lower triangular blocks of A */
363:           if (roworiented) value = v[i*n+j]; else value = v[i+j*m];
364:           MatSetValues_SeqSBAIJ_A_Private(row,col,value,addv);
365:           /* MatSetValues_SeqBAIJ(baij->A,1,&row,1,&col,&value,addv); */
366:         } else if (in[j] < 0) continue;
367: #if defined(PETSC_USE_BOPT_g)
368:         else if (in[j] >= mat->N) {SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",in[j],mat->N-1);}
369: #endif
370:         else {  /* off-diag entry (B) */
371:           if (mat->was_assembled) {
372:             if (!baij->colmap) {
373:               CreateColmap_MPIBAIJ_Private(mat);
374:             }
375: #if defined (PETSC_USE_CTABLE)
376:             PetscTableFind(baij->colmap,in[j]/bs + 1,&col);
377:             col  = col - 1;
378: #else
379:             col = baij->colmap[in[j]/bs] - 1;
380: #endif
381:             if (col < 0 && !((Mat_SeqSBAIJ*)(baij->A->data))->nonew) {
382:               DisAssemble_MPISBAIJ(mat);
383:               col =  in[j];
384:               /* Reinitialize the variables required by MatSetValues_SeqBAIJ_B_Private() */
385:               B = baij->B;
386:               b = (Mat_SeqBAIJ*)(B)->data;
387:               bimax=b->imax;bi=b->i;bilen=b->ilen;bj=b->j;
388:               ba=b->a;
389:             } else col += in[j]%bs;
390:           } else col = in[j];
391:           if (roworiented) value = v[i*n+j]; else value = v[i+j*m];
392:           MatSetValues_SeqSBAIJ_B_Private(row,col,value,addv);
393:           /* MatSetValues_SeqBAIJ(baij->B,1,&row,1,&col,&value,addv); */
394:         }
395:       }
396:     } else {  /* off processor entry */
397:       if (!baij->donotstash) {
398:         n_loc = 0;
399:         for (j=0; j<n; j++){
400:           if (im[i]/bs > in[j]/bs) continue; /* ignore lower triangular blocks */
401:           in_loc[n_loc] = in[j];
402:           if (roworiented) {
403:             v_loc[n_loc] = v[i*n+j];
404:           } else {
405:             v_loc[n_loc] = v[j*m+i];
406:           }
407:           n_loc++;
408:         }
409:         MatStashValuesRow_Private(&mat->stash,im[i],n_loc,in_loc,v_loc);
410:       }
411:     }
412:   }

414:   if(!baij->donotstash){
415:     PetscFree(in_loc);
416:     PetscFree(v_loc);
417:   }
418:   return(0);
419: }

423: PetscErrorCode MatSetValuesBlocked_MPISBAIJ_MatScalar(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const MatScalar v[],InsertMode addv)
424: {
425:   Mat_MPISBAIJ    *baij = (Mat_MPISBAIJ*)mat->data;
426:   const MatScalar *value;
427:   MatScalar       *barray=baij->barray;
428:   PetscTruth      roworiented = baij->roworiented;
429:   PetscErrorCode  ierr;
430:   PetscInt        i,j,ii,jj,row,col,rstart=baij->rstart;
431:   PetscInt        rend=baij->rend,cstart=baij->cstart,stepval;
432:   PetscInt        cend=baij->cend,bs=mat->bs,bs2=baij->bs2;

435:   if(!barray) {
436:     PetscMalloc(bs2*sizeof(MatScalar),&barray);
437:     baij->barray = barray;
438:   }

440:   if (roworiented) {
441:     stepval = (n-1)*bs;
442:   } else {
443:     stepval = (m-1)*bs;
444:   }
445:   for (i=0; i<m; i++) {
446:     if (im[i] < 0) continue;
447: #if defined(PETSC_USE_BOPT_g)
448:     if (im[i] >= baij->Mbs) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row too large, row %D max %D",im[i],baij->Mbs-1);
449: #endif
450:     if (im[i] >= rstart && im[i] < rend) {
451:       row = im[i] - rstart;
452:       for (j=0; j<n; j++) {
453:         /* If NumCol = 1 then a copy is not required */
454:         if ((roworiented) && (n == 1)) {
455:           barray = (MatScalar*) v + i*bs2;
456:         } else if((!roworiented) && (m == 1)) {
457:           barray = (MatScalar*) v + j*bs2;
458:         } else { /* Here a copy is required */
459:           if (roworiented) {
460:             value = v + i*(stepval+bs)*bs + j*bs;
461:           } else {
462:             value = v + j*(stepval+bs)*bs + i*bs;
463:           }
464:           for (ii=0; ii<bs; ii++,value+=stepval) {
465:             for (jj=0; jj<bs; jj++) {
466:               *barray++  = *value++;
467:             }
468:           }
469:           barray -=bs2;
470:         }
471: 
472:         if (in[j] >= cstart && in[j] < cend){
473:           col  = in[j] - cstart;
474:           MatSetValuesBlocked_SeqSBAIJ(baij->A,1,&row,1,&col,barray,addv);
475:         }
476:         else if (in[j] < 0) continue;
477: #if defined(PETSC_USE_BOPT_g)
478:         else if (in[j] >= baij->Nbs) {SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Column too large, col %D max %D",in[j],baij->Nbs-1);}
479: #endif
480:         else {
481:           if (mat->was_assembled) {
482:             if (!baij->colmap) {
483:               CreateColmap_MPIBAIJ_Private(mat);
484:             }

486: #if defined(PETSC_USE_BOPT_g)
487: #if defined (PETSC_USE_CTABLE)
488:             { PetscInt data;
489:               PetscTableFind(baij->colmap,in[j]+1,&data);
490:               if ((data - 1) % bs) SETERRQ(PETSC_ERR_PLIB,"Incorrect colmap");
491:             }
492: #else
493:             if ((baij->colmap[in[j]] - 1) % bs) SETERRQ(PETSC_ERR_PLIB,"Incorrect colmap");
494: #endif
495: #endif
496: #if defined (PETSC_USE_CTABLE)
497:             PetscTableFind(baij->colmap,in[j]+1,&col);
498:             col  = (col - 1)/bs;
499: #else
500:             col = (baij->colmap[in[j]] - 1)/bs;
501: #endif
502:             if (col < 0 && !((Mat_SeqBAIJ*)(baij->A->data))->nonew) {
503:               DisAssemble_MPISBAIJ(mat);
504:               col =  in[j];
505:             }
506:           }
507:           else col = in[j];
508:           MatSetValuesBlocked_SeqBAIJ(baij->B,1,&row,1,&col,barray,addv);
509:         }
510:       }
511:     } else {
512:       if (!baij->donotstash) {
513:         if (roworiented) {
514:           MatStashValuesRowBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);
515:         } else {
516:           MatStashValuesColBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);
517:         }
518:       }
519:     }
520:   }
521:   return(0);
522: }

524: #define HASH_KEY 0.6180339887
525: #define HASH(size,key,tmp) (tmp = (key)*HASH_KEY,(PetscInt)((size)*(tmp-(PetscInt)tmp)))
526: /* #define HASH(size,key) ((PetscInt)((size)*fmod(((key)*HASH_KEY),1))) */
527: /* #define HASH(size,key,tmp) ((PetscInt)((size)*fmod(((key)*HASH_KEY),1))) */
530: PetscErrorCode MatSetValues_MPISBAIJ_HT_MatScalar(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const MatScalar v[],InsertMode addv)
531: {
533:   SETERRQ(PETSC_ERR_SUP,"Function not yet written for SBAIJ format");
534:   /* return(0); */
535: }

539: PetscErrorCode MatSetValuesBlocked_MPISBAIJ_HT_MatScalar(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const MatScalar v[],InsertMode addv)
540: {
542:   SETERRQ(PETSC_ERR_SUP,"Function not yet written for SBAIJ format");
543:   /* return(0); */
544: }

548: PetscErrorCode MatGetValues_MPISBAIJ(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[])
549: {
550:   Mat_MPISBAIJ   *baij = (Mat_MPISBAIJ*)mat->data;
552:   PetscInt       bs=mat->bs,i,j,bsrstart = baij->rstart*bs,bsrend = baij->rend*bs;
553:   PetscInt       bscstart = baij->cstart*bs,bscend = baij->cend*bs,row,col,data;

556:   for (i=0; i<m; i++) {
557:     if (idxm[i] < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Negative row: %D",idxm[i]);
558:     if (idxm[i] >= mat->M) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",idxm[i],mat->M-1);
559:     if (idxm[i] >= bsrstart && idxm[i] < bsrend) {
560:       row = idxm[i] - bsrstart;
561:       for (j=0; j<n; j++) {
562:         if (idxn[j] < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Negative column %D",idxn[j]);
563:         if (idxn[j] >= mat->N) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",idxn[j],mat->N-1);
564:         if (idxn[j] >= bscstart && idxn[j] < bscend){
565:           col = idxn[j] - bscstart;
566:           MatGetValues_SeqSBAIJ(baij->A,1,&row,1,&col,v+i*n+j);
567:         } else {
568:           if (!baij->colmap) {
569:             CreateColmap_MPIBAIJ_Private(mat);
570:           }
571: #if defined (PETSC_USE_CTABLE)
572:           PetscTableFind(baij->colmap,idxn[j]/bs+1,&data);
573:           data --;
574: #else
575:           data = baij->colmap[idxn[j]/bs]-1;
576: #endif
577:           if((data < 0) || (baij->garray[data/bs] != idxn[j]/bs)) *(v+i*n+j) = 0.0;
578:           else {
579:             col  = data + idxn[j]%bs;
580:             MatGetValues_SeqBAIJ(baij->B,1,&row,1,&col,v+i*n+j);
581:           }
582:         }
583:       }
584:     } else {
585:       SETERRQ(PETSC_ERR_SUP,"Only local values currently supported");
586:     }
587:   }
588:  return(0);
589: }

593: PetscErrorCode MatNorm_MPISBAIJ(Mat mat,NormType type,PetscReal *norm)
594: {
595:   Mat_MPISBAIJ   *baij = (Mat_MPISBAIJ*)mat->data;
597:   PetscReal      sum[2],*lnorm2;

600:   if (baij->size == 1) {
601:      MatNorm(baij->A,type,norm);
602:   } else {
603:     if (type == NORM_FROBENIUS) {
604:       PetscMalloc(2*sizeof(PetscReal),&lnorm2);
605:        MatNorm(baij->A,type,lnorm2);
606:       *lnorm2 = (*lnorm2)*(*lnorm2); lnorm2++;            /* squar power of norm(A) */
607:        MatNorm(baij->B,type,lnorm2);
608:       *lnorm2 = (*lnorm2)*(*lnorm2); lnorm2--;             /* squar power of norm(B) */
609:       MPI_Allreduce(lnorm2,&sum,2,MPIU_REAL,MPI_SUM,mat->comm);
610:       *norm = sqrt(sum[0] + 2*sum[1]);
611:       PetscFree(lnorm2);
612:     } else {
613:       SETERRQ(PETSC_ERR_SUP,"No support for this norm yet");
614:     }
615:   }
616:   return(0);
617: }

619: /*
620:   Creates the hash table, and sets the table 
621:   This table is created only once. 
622:   If new entried need to be added to the matrix
623:   then the hash table has to be destroyed and
624:   recreated.
625: */
628: PetscErrorCode MatCreateHashTable_MPISBAIJ_Private(Mat mat,PetscReal factor)
629: {
631:   SETERRQ(PETSC_ERR_SUP,"Function not yet written for SBAIJ format");
632:   /* return(0); */
633: }

637: PetscErrorCode MatAssemblyBegin_MPISBAIJ(Mat mat,MatAssemblyType mode)
638: {
639:   Mat_MPISBAIJ   *baij = (Mat_MPISBAIJ*)mat->data;
641:   PetscInt       nstash,reallocs;
642:   InsertMode     addv;

645:   if (baij->donotstash) {
646:     return(0);
647:   }

649:   /* make sure all processors are either in INSERTMODE or ADDMODE */
650:   MPI_Allreduce(&mat->insertmode,&addv,1,MPI_INT,MPI_BOR,mat->comm);
651:   if (addv == (ADD_VALUES|INSERT_VALUES)) {
652:     SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Some processors inserted others added");
653:   }
654:   mat->insertmode = addv; /* in case this processor had no cache */

656:   MatStashScatterBegin_Private(&mat->stash,baij->rowners_bs);
657:   MatStashScatterBegin_Private(&mat->bstash,baij->rowners);
658:   MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);
659:   PetscLogInfo(0,"MatAssemblyBegin_MPISBAIJ:Stash has %D entries,uses %D mallocs.\n",nstash,reallocs);
660:   MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);
661:   PetscLogInfo(0,"MatAssemblyBegin_MPISBAIJ:Block-Stash has %D entries, uses %D mallocs.\n",nstash,reallocs);
662:   return(0);
663: }

667: PetscErrorCode MatAssemblyEnd_MPISBAIJ(Mat mat,MatAssemblyType mode)
668: {
669:   Mat_MPISBAIJ   *baij=(Mat_MPISBAIJ*)mat->data;
670:   Mat_SeqSBAIJ   *a=(Mat_SeqSBAIJ*)baij->A->data;
671:   Mat_SeqBAIJ    *b=(Mat_SeqBAIJ*)baij->B->data;
673:   PetscInt       i,j,rstart,ncols,flg,bs2=baij->bs2;
674:   PetscInt       *row,*col,other_disassembled;
675:   PetscMPIInt    n;
676:   PetscTruth     r1,r2,r3;
677:   MatScalar      *val;
678:   InsertMode     addv = mat->insertmode;


682:   if (!baij->donotstash) {
683:     while (1) {
684:       MatStashScatterGetMesg_Private(&mat->stash,&n,&row,&col,&val,&flg);
685:       if (!flg) break;

687:       for (i=0; i<n;) {
688:         /* Now identify the consecutive vals belonging to the same row */
689:         for (j=i,rstart=row[j]; j<n; j++) { if (row[j] != rstart) break; }
690:         if (j < n) ncols = j-i;
691:         else       ncols = n-i;
692:         /* Now assemble all these values with a single function call */
693:         MatSetValues_MPISBAIJ_MatScalar(mat,1,row+i,ncols,col+i,val+i,addv);
694:         i = j;
695:       }
696:     }
697:     MatStashScatterEnd_Private(&mat->stash);
698:     /* Now process the block-stash. Since the values are stashed column-oriented,
699:        set the roworiented flag to column oriented, and after MatSetValues() 
700:        restore the original flags */
701:     r1 = baij->roworiented;
702:     r2 = a->roworiented;
703:     r3 = b->roworiented;
704:     baij->roworiented = PETSC_FALSE;
705:     a->roworiented    = PETSC_FALSE;
706:     b->roworiented    = PETSC_FALSE;
707:     while (1) {
708:       MatStashScatterGetMesg_Private(&mat->bstash,&n,&row,&col,&val,&flg);
709:       if (!flg) break;
710: 
711:       for (i=0; i<n;) {
712:         /* Now identify the consecutive vals belonging to the same row */
713:         for (j=i,rstart=row[j]; j<n; j++) { if (row[j] != rstart) break; }
714:         if (j < n) ncols = j-i;
715:         else       ncols = n-i;
716:         MatSetValuesBlocked_MPISBAIJ_MatScalar(mat,1,row+i,ncols,col+i,val+i*bs2,addv);
717:         i = j;
718:       }
719:     }
720:     MatStashScatterEnd_Private(&mat->bstash);
721:     baij->roworiented = r1;
722:     a->roworiented    = r2;
723:     b->roworiented    = r3;
724:   }

726:   MatAssemblyBegin(baij->A,mode);
727:   MatAssemblyEnd(baij->A,mode);

729:   /* determine if any processor has disassembled, if so we must 
730:      also disassemble ourselfs, in order that we may reassemble. */
731:   /*
732:      if nonzero structure of submatrix B cannot change then we know that
733:      no processor disassembled thus we can skip this stuff
734:   */
735:   if (!((Mat_SeqBAIJ*)baij->B->data)->nonew)  {
736:     MPI_Allreduce(&mat->was_assembled,&other_disassembled,1,MPI_INT,MPI_PROD,mat->comm);
737:     if (mat->was_assembled && !other_disassembled) {
738:       DisAssemble_MPISBAIJ(mat);
739:     }
740:   }

742:   if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) {
743:     MatSetUpMultiply_MPISBAIJ(mat); /* setup Mvctx and sMvctx */
744:   }
745:   MatAssemblyBegin(baij->B,mode);
746:   MatAssemblyEnd(baij->B,mode);
747: 
748: #if defined(PETSC_USE_BOPT_g)
749:   if (baij->ht && mode== MAT_FINAL_ASSEMBLY) {
750:     PetscLogInfo(0,"MatAssemblyEnd_MPISBAIJ:Average Hash Table Search in MatSetValues = %5.2f\n",((PetscReal)baij->ht_total_ct)/baij->ht_insert_ct);
751:     baij->ht_total_ct  = 0;
752:     baij->ht_insert_ct = 0;
753:   }
754: #endif
755:   if (baij->ht_flag && !baij->ht && mode == MAT_FINAL_ASSEMBLY) {
756:     MatCreateHashTable_MPISBAIJ_Private(mat,baij->ht_fact);
757:     mat->ops->setvalues        = MatSetValues_MPISBAIJ_HT;
758:     mat->ops->setvaluesblocked = MatSetValuesBlocked_MPISBAIJ_HT;
759:   }

761:   if (baij->rowvalues) {
762:     PetscFree(baij->rowvalues);
763:     baij->rowvalues = 0;
764:   }

766:   return(0);
767: }

771: static PetscErrorCode MatView_MPISBAIJ_ASCIIorDraworSocket(Mat mat,PetscViewer viewer)
772: {
773:   Mat_MPISBAIJ      *baij = (Mat_MPISBAIJ*)mat->data;
774:   PetscErrorCode    ierr;
775:   PetscInt          bs = mat->bs;
776:   PetscMPIInt       size = baij->size,rank = baij->rank;
777:   PetscTruth        iascii,isdraw;
778:   PetscViewer       sviewer;
779:   PetscViewerFormat format;

782:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&iascii);
783:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_DRAW,&isdraw);
784:   if (iascii) {
785:     PetscViewerGetFormat(viewer,&format);
786:     if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
787:       MatInfo info;
788:       MPI_Comm_rank(mat->comm,&rank);
789:       MatGetInfo(mat,MAT_LOCAL,&info);
790:       PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %D nz %D nz alloced %D bs %D mem %D\n",
791:               rank,mat->m,(PetscInt)info.nz_used*bs,(PetscInt)info.nz_allocated*bs,
792:               mat->bs,(PetscInt)info.memory);
793:       MatGetInfo(baij->A,MAT_LOCAL,&info);
794:       PetscViewerASCIISynchronizedPrintf(viewer,"[%d] on-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used*bs);
795:       MatGetInfo(baij->B,MAT_LOCAL,&info);
796:       PetscViewerASCIISynchronizedPrintf(viewer,"[%d] off-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used*bs);
797:       PetscViewerFlush(viewer);
798:       VecScatterView(baij->Mvctx,viewer);
799:       return(0);
800:     } else if (format == PETSC_VIEWER_ASCII_INFO) {
801:       PetscViewerASCIIPrintf(viewer,"  block size is %D\n",bs);
802:       return(0);
803:     }
804:   }

806:   if (isdraw) {
807:     PetscDraw       draw;
808:     PetscTruth isnull;
809:     PetscViewerDrawGetDraw(viewer,0,&draw);
810:     PetscDrawIsNull(draw,&isnull); if (isnull) return(0);
811:   }

813:   if (size == 1) {
814:     PetscObjectSetName((PetscObject)baij->A,mat->name);
815:     MatView(baij->A,viewer);
816:   } else {
817:     /* assemble the entire matrix onto first processor. */
818:     Mat         A;
819:     Mat_SeqSBAIJ *Aloc;
820:     Mat_SeqBAIJ *Bloc;
821:     PetscInt         M = mat->M,N = mat->N,*ai,*aj,col,i,j,k,*rvals,mbs = baij->mbs;
822:     MatScalar   *a;

824:     /* Should this be the same type as mat? */
825:     if (!rank) {
826:       MatCreate(mat->comm,M,N,M,N,&A);
827:     } else {
828:       MatCreate(mat->comm,0,0,M,N,&A);
829:     }
830:     MatSetType(A,MATMPISBAIJ);
831:     MatMPISBAIJSetPreallocation(A,mat->bs,0,PETSC_NULL,0,PETSC_NULL);
832:     PetscLogObjectParent(mat,A);

834:     /* copy over the A part */
835:     Aloc  = (Mat_SeqSBAIJ*)baij->A->data;
836:     ai    = Aloc->i; aj = Aloc->j; a = Aloc->a;
837:     PetscMalloc(bs*sizeof(PetscInt),&rvals);

839:     for (i=0; i<mbs; i++) {
840:       rvals[0] = bs*(baij->rstart + i);
841:       for (j=1; j<bs; j++) { rvals[j] = rvals[j-1] + 1; }
842:       for (j=ai[i]; j<ai[i+1]; j++) {
843:         col = (baij->cstart+aj[j])*bs;
844:         for (k=0; k<bs; k++) {
845:           MatSetValues_MPISBAIJ_MatScalar(A,bs,rvals,1,&col,a,INSERT_VALUES);
846:           col++; a += bs;
847:         }
848:       }
849:     }
850:     /* copy over the B part */
851:     Bloc = (Mat_SeqBAIJ*)baij->B->data;
852:     ai = Bloc->i; aj = Bloc->j; a = Bloc->a;
853:     for (i=0; i<mbs; i++) {
854:       rvals[0] = bs*(baij->rstart + i);
855:       for (j=1; j<bs; j++) { rvals[j] = rvals[j-1] + 1; }
856:       for (j=ai[i]; j<ai[i+1]; j++) {
857:         col = baij->garray[aj[j]]*bs;
858:         for (k=0; k<bs; k++) {
859:           MatSetValues_MPISBAIJ_MatScalar(A,bs,rvals,1,&col,a,INSERT_VALUES);
860:           col++; a += bs;
861:         }
862:       }
863:     }
864:     PetscFree(rvals);
865:     MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
866:     MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
867:     /* 
868:        Everyone has to call to draw the matrix since the graphics waits are
869:        synchronized across all processors that share the PetscDraw object
870:     */
871:     PetscViewerGetSingleton(viewer,&sviewer);
872:     if (!rank) {
873:       PetscObjectSetName((PetscObject)((Mat_MPISBAIJ*)(A->data))->A,mat->name);
874:       MatView(((Mat_MPISBAIJ*)(A->data))->A,sviewer);
875:     }
876:     PetscViewerRestoreSingleton(viewer,&sviewer);
877:     MatDestroy(A);
878:   }
879:   return(0);
880: }

884: PetscErrorCode MatView_MPISBAIJ(Mat mat,PetscViewer viewer)
885: {
887:   PetscTruth     iascii,isdraw,issocket,isbinary;

890:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&iascii);
891:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_DRAW,&isdraw);
892:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_SOCKET,&issocket);
893:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_BINARY,&isbinary);
894:   if (iascii || isdraw || issocket || isbinary) {
895:     MatView_MPISBAIJ_ASCIIorDraworSocket(mat,viewer);
896:   } else {
897:     SETERRQ1(PETSC_ERR_SUP,"Viewer type %s not supported by MPISBAIJ matrices",((PetscObject)viewer)->type_name);
898:   }
899:   return(0);
900: }

904: PetscErrorCode MatDestroy_MPISBAIJ(Mat mat)
905: {
906:   Mat_MPISBAIJ   *baij = (Mat_MPISBAIJ*)mat->data;

910: #if defined(PETSC_USE_LOG)
911:   PetscLogObjectState((PetscObject)mat,"Rows=%D,Cols=%D",mat->M,mat->N);
912: #endif
913:   MatStashDestroy_Private(&mat->stash);
914:   MatStashDestroy_Private(&mat->bstash);
915:   PetscFree(baij->rowners);
916:   MatDestroy(baij->A);
917:   MatDestroy(baij->B);
918: #if defined (PETSC_USE_CTABLE)
919:   if (baij->colmap) {PetscTableDelete(baij->colmap);}
920: #else
921:   if (baij->colmap) {PetscFree(baij->colmap);}
922: #endif
923:   if (baij->garray) {PetscFree(baij->garray);}
924:   if (baij->lvec)   {VecDestroy(baij->lvec);}
925:   if (baij->Mvctx)  {VecScatterDestroy(baij->Mvctx);}
926:   if (baij->slvec0) {
927:     VecDestroy(baij->slvec0);
928:     VecDestroy(baij->slvec0b);
929:   }
930:   if (baij->slvec1) {
931:     VecDestroy(baij->slvec1);
932:     VecDestroy(baij->slvec1a);
933:     VecDestroy(baij->slvec1b);
934:   }
935:   if (baij->sMvctx)  {VecScatterDestroy(baij->sMvctx);}
936:   if (baij->rowvalues) {PetscFree(baij->rowvalues);}
937:   if (baij->barray) {PetscFree(baij->barray);}
938:   if (baij->hd) {PetscFree(baij->hd);}
939: #if defined(PETSC_USE_MAT_SINGLE)
940:   if (baij->setvaluescopy) {PetscFree(baij->setvaluescopy);}
941: #endif
942:   PetscFree(baij);

944:   PetscObjectComposeFunction((PetscObject)mat,"MatStoreValues_C","",PETSC_NULL);
945:   PetscObjectComposeFunction((PetscObject)mat,"MatRetrieveValues_C","",PETSC_NULL);
946:   PetscObjectComposeFunction((PetscObject)mat,"MatGetDiagonalBlock_C","",PETSC_NULL);
947:   PetscObjectComposeFunction((PetscObject)mat,"MatMPISBAIJSetPreallocation_C","",PETSC_NULL);
948:   return(0);
949: }

953: PetscErrorCode MatMult_MPISBAIJ(Mat A,Vec xx,Vec yy)
954: {
955:   Mat_MPISBAIJ   *a = (Mat_MPISBAIJ*)A->data;
957:   PetscInt       nt,mbs=a->mbs,bs=A->bs;
958:   PetscScalar    *x,*from,zero=0.0;
959: 
961:   VecGetLocalSize(xx,&nt);
962:   if (nt != A->n) {
963:     SETERRQ(PETSC_ERR_ARG_SIZ,"Incompatible partition of A and xx");
964:   }
965:   VecGetLocalSize(yy,&nt);
966:   if (nt != A->m) {
967:     SETERRQ(PETSC_ERR_ARG_SIZ,"Incompatible parition of A and yy");
968:   }

970:   /* diagonal part */
971:   (*a->A->ops->mult)(a->A,xx,a->slvec1a);
972:   VecSet(&zero,a->slvec1b);

974:   /* subdiagonal part */
975:   (*a->B->ops->multtranspose)(a->B,xx,a->slvec0b);

977:   /* copy x into the vec slvec0 */
978:   VecGetArray(a->slvec0,&from);
979:   VecGetArray(xx,&x);
980:   PetscMemcpy(from,x,bs*mbs*sizeof(MatScalar));
981:   VecRestoreArray(a->slvec0,&from);
982: 
983:   VecScatterBegin(a->slvec0,a->slvec1,ADD_VALUES,SCATTER_FORWARD,a->sMvctx);
984:   VecRestoreArray(xx,&x);
985:   VecScatterEnd(a->slvec0,a->slvec1,ADD_VALUES,SCATTER_FORWARD,a->sMvctx);
986: 
987:   /* supperdiagonal part */
988:   (*a->B->ops->multadd)(a->B,a->slvec1b,a->slvec1a,yy);
989: 
990:   return(0);
991: }

995: PetscErrorCode MatMult_MPISBAIJ_2comm(Mat A,Vec xx,Vec yy)
996: {
997:   Mat_MPISBAIJ   *a = (Mat_MPISBAIJ*)A->data;
999:   PetscInt       nt;

1002:   VecGetLocalSize(xx,&nt);
1003:   if (nt != A->n) {
1004:     SETERRQ(PETSC_ERR_ARG_SIZ,"Incompatible partition of A and xx");
1005:   }
1006:   VecGetLocalSize(yy,&nt);
1007:   if (nt != A->m) {
1008:     SETERRQ(PETSC_ERR_ARG_SIZ,"Incompatible parition of A and yy");
1009:   }

1011:   VecScatterBegin(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);
1012:   /* do diagonal part */
1013:   (*a->A->ops->mult)(a->A,xx,yy);
1014:   /* do supperdiagonal part */
1015:   VecScatterEnd(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);
1016:   (*a->B->ops->multadd)(a->B,a->lvec,yy,yy);
1017:   /* do subdiagonal part */
1018:   (*a->B->ops->multtranspose)(a->B,xx,a->lvec);
1019:   VecScatterBegin(a->lvec,yy,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);
1020:   VecScatterEnd(a->lvec,yy,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);

1022:   return(0);
1023: }

1027: PetscErrorCode MatMultAdd_MPISBAIJ(Mat A,Vec xx,Vec yy,Vec zz)
1028: {
1029:   Mat_MPISBAIJ   *a = (Mat_MPISBAIJ*)A->data;
1031:   PetscInt       mbs=a->mbs,bs=A->bs;
1032:   PetscScalar    *x,*from,zero=0.0;
1033: 
1035:   /*
1036:   PetscSynchronizedPrintf(A->comm," MatMultAdd is called ...\n");
1037:   PetscSynchronizedFlush(A->comm);
1038:   */
1039:   /* diagonal part */
1040:   (*a->A->ops->multadd)(a->A,xx,yy,a->slvec1a);
1041:   VecSet(&zero,a->slvec1b);

1043:   /* subdiagonal part */
1044:   (*a->B->ops->multtranspose)(a->B,xx,a->slvec0b);

1046:   /* copy x into the vec slvec0 */
1047:   VecGetArray(a->slvec0,&from);
1048:   VecGetArray(xx,&x);
1049:   PetscMemcpy(from,x,bs*mbs*sizeof(MatScalar));
1050:   VecRestoreArray(a->slvec0,&from);
1051: 
1052:   VecScatterBegin(a->slvec0,a->slvec1,ADD_VALUES,SCATTER_FORWARD,a->sMvctx);
1053:   VecRestoreArray(xx,&x);
1054:   VecScatterEnd(a->slvec0,a->slvec1,ADD_VALUES,SCATTER_FORWARD,a->sMvctx);
1055: 
1056:   /* supperdiagonal part */
1057:   (*a->B->ops->multadd)(a->B,a->slvec1b,a->slvec1a,zz);
1058: 
1059:   return(0);
1060: }

1064: PetscErrorCode MatMultAdd_MPISBAIJ_2comm(Mat A,Vec xx,Vec yy,Vec zz)
1065: {
1066:   Mat_MPISBAIJ   *a = (Mat_MPISBAIJ*)A->data;

1070:   VecScatterBegin(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);
1071:   /* do diagonal part */
1072:   (*a->A->ops->multadd)(a->A,xx,yy,zz);
1073:   /* do supperdiagonal part */
1074:   VecScatterEnd(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);
1075:   (*a->B->ops->multadd)(a->B,a->lvec,zz,zz);

1077:   /* do subdiagonal part */
1078:   (*a->B->ops->multtranspose)(a->B,xx,a->lvec);
1079:   VecScatterBegin(a->lvec,zz,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);
1080:   VecScatterEnd(a->lvec,zz,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);

1082:   return(0);
1083: }

1087: PetscErrorCode MatMultTranspose_MPISBAIJ(Mat A,Vec xx,Vec yy)
1088: {

1092:   MatMult(A,xx,yy);
1093:   return(0);
1094: }

1098: PetscErrorCode MatMultTransposeAdd_MPISBAIJ(Mat A,Vec xx,Vec yy,Vec zz)
1099: {

1103:   MatMultAdd(A,xx,yy,zz);
1104:   return(0);
1105: }

1107: /*
1108:   This only works correctly for square matrices where the subblock A->A is the 
1109:    diagonal block
1110: */
1113: PetscErrorCode MatGetDiagonal_MPISBAIJ(Mat A,Vec v)
1114: {
1115:   Mat_MPISBAIJ   *a = (Mat_MPISBAIJ*)A->data;

1119:   /* if (a->M != a->N) SETERRQ(PETSC_ERR_SUP,"Supports only square matrix where A->A is diag block"); */
1120:   MatGetDiagonal(a->A,v);
1121:   return(0);
1122: }

1126: PetscErrorCode MatScale_MPISBAIJ(const PetscScalar *aa,Mat A)
1127: {
1128:   Mat_MPISBAIJ   *a = (Mat_MPISBAIJ*)A->data;

1132:   MatScale(aa,a->A);
1133:   MatScale(aa,a->B);
1134:   return(0);
1135: }

1139: PetscErrorCode MatGetRow_MPISBAIJ(Mat matin,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
1140: {
1141:   Mat_MPISBAIJ   *mat = (Mat_MPISBAIJ*)matin->data;
1142:   PetscScalar    *vworkA,*vworkB,**pvA,**pvB,*v_p;
1144:   PetscInt       bs = matin->bs,bs2 = mat->bs2,i,*cworkA,*cworkB,**pcA,**pcB;
1145:   PetscInt       nztot,nzA,nzB,lrow,brstart = mat->rstart*bs,brend = mat->rend*bs;
1146:   PetscInt       *cmap,*idx_p,cstart = mat->cstart;

1149:   if (mat->getrowactive == PETSC_TRUE) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Already active");
1150:   mat->getrowactive = PETSC_TRUE;

1152:   if (!mat->rowvalues && (idx || v)) {
1153:     /*
1154:         allocate enough space to hold information from the longest row.
1155:     */
1156:     Mat_SeqSBAIJ *Aa = (Mat_SeqSBAIJ*)mat->A->data;
1157:     Mat_SeqBAIJ  *Ba = (Mat_SeqBAIJ*)mat->B->data;
1158:     PetscInt     max = 1,mbs = mat->mbs,tmp;
1159:     for (i=0; i<mbs; i++) {
1160:       tmp = Aa->i[i+1] - Aa->i[i] + Ba->i[i+1] - Ba->i[i]; /* row length */
1161:       if (max < tmp) { max = tmp; }
1162:     }
1163:     PetscMalloc(max*bs2*(sizeof(PetscInt)+sizeof(PetscScalar)),&mat->rowvalues);
1164:     mat->rowindices = (PetscInt*)(mat->rowvalues + max*bs2);
1165:   }
1166: 
1167:   if (row < brstart || row >= brend) SETERRQ(PETSC_ERR_SUP,"Only local rows")
1168:   lrow = row - brstart;  /* local row index */

1170:   pvA = &vworkA; pcA = &cworkA; pvB = &vworkB; pcB = &cworkB;
1171:   if (!v)   {pvA = 0; pvB = 0;}
1172:   if (!idx) {pcA = 0; if (!v) pcB = 0;}
1173:   (*mat->A->ops->getrow)(mat->A,lrow,&nzA,pcA,pvA);
1174:   (*mat->B->ops->getrow)(mat->B,lrow,&nzB,pcB,pvB);
1175:   nztot = nzA + nzB;

1177:   cmap  = mat->garray;
1178:   if (v  || idx) {
1179:     if (nztot) {
1180:       /* Sort by increasing column numbers, assuming A and B already sorted */
1181:       PetscInt imark = -1;
1182:       if (v) {
1183:         *v = v_p = mat->rowvalues;
1184:         for (i=0; i<nzB; i++) {
1185:           if (cmap[cworkB[i]/bs] < cstart)   v_p[i] = vworkB[i];
1186:           else break;
1187:         }
1188:         imark = i;
1189:         for (i=0; i<nzA; i++)     v_p[imark+i] = vworkA[i];
1190:         for (i=imark; i<nzB; i++) v_p[nzA+i]   = vworkB[i];
1191:       }
1192:       if (idx) {
1193:         *idx = idx_p = mat->rowindices;
1194:         if (imark > -1) {
1195:           for (i=0; i<imark; i++) {
1196:             idx_p[i] = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs;
1197:           }
1198:         } else {
1199:           for (i=0; i<nzB; i++) {
1200:             if (cmap[cworkB[i]/bs] < cstart)
1201:               idx_p[i] = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs ;
1202:             else break;
1203:           }
1204:           imark = i;
1205:         }
1206:         for (i=0; i<nzA; i++)     idx_p[imark+i] = cstart*bs + cworkA[i];
1207:         for (i=imark; i<nzB; i++) idx_p[nzA+i]   = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs ;
1208:       }
1209:     } else {
1210:       if (idx) *idx = 0;
1211:       if (v)   *v   = 0;
1212:     }
1213:   }
1214:   *nz = nztot;
1215:   (*mat->A->ops->restorerow)(mat->A,lrow,&nzA,pcA,pvA);
1216:   (*mat->B->ops->restorerow)(mat->B,lrow,&nzB,pcB,pvB);
1217:   return(0);
1218: }

1222: PetscErrorCode MatRestoreRow_MPISBAIJ(Mat mat,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
1223: {
1224:   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;

1227:   if (baij->getrowactive == PETSC_FALSE) {
1228:     SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"MatGetRow not called");
1229:   }
1230:   baij->getrowactive = PETSC_FALSE;
1231:   return(0);
1232: }

1236: PetscErrorCode MatZeroEntries_MPISBAIJ(Mat A)
1237: {
1238:   Mat_MPISBAIJ   *l = (Mat_MPISBAIJ*)A->data;

1242:   MatZeroEntries(l->A);
1243:   MatZeroEntries(l->B);
1244:   return(0);
1245: }

1249: PetscErrorCode MatGetInfo_MPISBAIJ(Mat matin,MatInfoType flag,MatInfo *info)
1250: {
1251:   Mat_MPISBAIJ   *a = (Mat_MPISBAIJ*)matin->data;
1252:   Mat            A = a->A,B = a->B;
1254:   PetscReal      isend[5],irecv[5];

1257:   info->block_size     = (PetscReal)matin->bs;
1258:   MatGetInfo(A,MAT_LOCAL,info);
1259:   isend[0] = info->nz_used; isend[1] = info->nz_allocated; isend[2] = info->nz_unneeded;
1260:   isend[3] = info->memory;  isend[4] = info->mallocs;
1261:   MatGetInfo(B,MAT_LOCAL,info);
1262:   isend[0] += info->nz_used; isend[1] += info->nz_allocated; isend[2] += info->nz_unneeded;
1263:   isend[3] += info->memory;  isend[4] += info->mallocs;
1264:   if (flag == MAT_LOCAL) {
1265:     info->nz_used      = isend[0];
1266:     info->nz_allocated = isend[1];
1267:     info->nz_unneeded  = isend[2];
1268:     info->memory       = isend[3];
1269:     info->mallocs      = isend[4];
1270:   } else if (flag == MAT_GLOBAL_MAX) {
1271:     MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPI_MAX,matin->comm);
1272:     info->nz_used      = irecv[0];
1273:     info->nz_allocated = irecv[1];
1274:     info->nz_unneeded  = irecv[2];
1275:     info->memory       = irecv[3];
1276:     info->mallocs      = irecv[4];
1277:   } else if (flag == MAT_GLOBAL_SUM) {
1278:     MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPI_SUM,matin->comm);
1279:     info->nz_used      = irecv[0];
1280:     info->nz_allocated = irecv[1];
1281:     info->nz_unneeded  = irecv[2];
1282:     info->memory       = irecv[3];
1283:     info->mallocs      = irecv[4];
1284:   } else {
1285:     SETERRQ1(PETSC_ERR_ARG_WRONG,"Unknown MatInfoType argument %d",(int)flag);
1286:   }
1287:   info->rows_global       = (PetscReal)A->M;
1288:   info->columns_global    = (PetscReal)A->N;
1289:   info->rows_local        = (PetscReal)A->m;
1290:   info->columns_local     = (PetscReal)A->N;
1291:   info->fill_ratio_given  = 0; /* no parallel LU/ILU/Cholesky */
1292:   info->fill_ratio_needed = 0;
1293:   info->factor_mallocs    = 0;
1294:   return(0);
1295: }

1299: PetscErrorCode MatSetOption_MPISBAIJ(Mat A,MatOption op)
1300: {
1301:   Mat_MPISBAIJ   *a = (Mat_MPISBAIJ*)A->data;

1305:   switch (op) {
1306:   case MAT_NO_NEW_NONZERO_LOCATIONS:
1307:   case MAT_YES_NEW_NONZERO_LOCATIONS:
1308:   case MAT_COLUMNS_UNSORTED:
1309:   case MAT_COLUMNS_SORTED:
1310:   case MAT_NEW_NONZERO_ALLOCATION_ERR:
1311:   case MAT_KEEP_ZEROED_ROWS:
1312:   case MAT_NEW_NONZERO_LOCATION_ERR:
1313:     MatSetOption(a->A,op);
1314:     MatSetOption(a->B,op);
1315:     break;
1316:   case MAT_ROW_ORIENTED:
1317:     a->roworiented = PETSC_TRUE;
1318:     MatSetOption(a->A,op);
1319:     MatSetOption(a->B,op);
1320:     break;
1321:   case MAT_ROWS_SORTED:
1322:   case MAT_ROWS_UNSORTED:
1323:   case MAT_YES_NEW_DIAGONALS:
1324:     PetscLogInfo(A,"Info:MatSetOption_MPIBAIJ:Option ignored\n");
1325:     break;
1326:   case MAT_COLUMN_ORIENTED:
1327:     a->roworiented = PETSC_FALSE;
1328:     MatSetOption(a->A,op);
1329:     MatSetOption(a->B,op);
1330:     break;
1331:   case MAT_IGNORE_OFF_PROC_ENTRIES:
1332:     a->donotstash = PETSC_TRUE;
1333:     break;
1334:   case MAT_NO_NEW_DIAGONALS:
1335:     SETERRQ(PETSC_ERR_SUP,"MAT_NO_NEW_DIAGONALS");
1336:   case MAT_USE_HASH_TABLE:
1337:     a->ht_flag = PETSC_TRUE;
1338:     break;
1339:   case MAT_NOT_SYMMETRIC:
1340:   case MAT_NOT_STRUCTURALLY_SYMMETRIC:
1341:   case MAT_HERMITIAN:
1342:     SETERRQ(PETSC_ERR_SUP,"Matrix must be symmetric");
1343:   case MAT_SYMMETRIC:
1344:   case MAT_STRUCTURALLY_SYMMETRIC:
1345:   case MAT_NOT_HERMITIAN:
1346:   case MAT_SYMMETRY_ETERNAL:
1347:   case MAT_NOT_SYMMETRY_ETERNAL:
1348:     break;
1349:   default:
1350:     SETERRQ(PETSC_ERR_SUP,"unknown option");
1351:   }
1352:   return(0);
1353: }

1357: PetscErrorCode MatTranspose_MPISBAIJ(Mat A,Mat *B)
1358: {
1361:   MatDuplicate(A,MAT_COPY_VALUES,B);
1362:   return(0);
1363: }

1367: PetscErrorCode MatDiagonalScale_MPISBAIJ(Mat mat,Vec ll,Vec rr)
1368: {
1369:   Mat_MPISBAIJ   *baij = (Mat_MPISBAIJ*)mat->data;
1370:   Mat            a = baij->A,b = baij->B;
1372:   PetscInt       s1,s2,s3;

1375:   if (ll != rr) {
1376:     SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"For symmetric format, left and right scaling vectors must be same\n");
1377:   }
1378:   MatGetLocalSize(mat,&s2,&s3);
1379:   if (rr) {
1380:     VecGetLocalSize(rr,&s1);
1381:     if (s1!=s3) SETERRQ(PETSC_ERR_ARG_SIZ,"right vector non-conforming local size");
1382:     /* Overlap communication with computation. */
1383:     VecScatterBegin(rr,baij->lvec,INSERT_VALUES,SCATTER_FORWARD,baij->Mvctx);
1384:     /*} if (ll) { */
1385:     VecGetLocalSize(ll,&s1);
1386:     if (s1!=s2) SETERRQ(PETSC_ERR_ARG_SIZ,"left vector non-conforming local size");
1387:     (*b->ops->diagonalscale)(b,ll,PETSC_NULL);
1388:     /* } */
1389:   /* scale  the diagonal block */
1390:   (*a->ops->diagonalscale)(a,ll,rr);

1392:   /* if (rr) { */
1393:     /* Do a scatter end and then right scale the off-diagonal block */
1394:     VecScatterEnd(rr,baij->lvec,INSERT_VALUES,SCATTER_FORWARD,baij->Mvctx);
1395:     (*b->ops->diagonalscale)(b,PETSC_NULL,baij->lvec);
1396:   }
1397: 
1398:   return(0);
1399: }

1403: PetscErrorCode MatZeroRows_MPISBAIJ(Mat A,IS is,const PetscScalar *diag)
1404: {
1406:   SETERRQ(PETSC_ERR_SUP,"No support for this function yet");
1407: }

1411: PetscErrorCode MatPrintHelp_MPISBAIJ(Mat A)
1412: {
1413:   Mat_MPISBAIJ      *a = (Mat_MPISBAIJ*)A->data;
1414:   MPI_Comm          comm = A->comm;
1415:   static PetscTruth called = PETSC_FALSE;
1416:   PetscErrorCode    ierr;

1419:   if (!a->rank) {
1420:     MatPrintHelp_SeqSBAIJ(a->A);
1421:   }
1422:   if (called) {return(0);} else called = PETSC_TRUE;
1423:   (*PetscHelpPrintf)(comm," Options for MATMPISBAIJ matrix format (the defaults):\n");
1424:   (*PetscHelpPrintf)(comm,"  -mat_use_hash_table <factor>: Use hashtable for efficient matrix assembly\n");
1425:   return(0);
1426: }

1430: PetscErrorCode MatSetUnfactored_MPISBAIJ(Mat A)
1431: {
1432:   Mat_MPISBAIJ   *a = (Mat_MPISBAIJ*)A->data;

1436:   MatSetUnfactored(a->A);
1437:   return(0);
1438: }

1440: static PetscErrorCode MatDuplicate_MPISBAIJ(Mat,MatDuplicateOption,Mat *);

1444: PetscErrorCode MatEqual_MPISBAIJ(Mat A,Mat B,PetscTruth *flag)
1445: {
1446:   Mat_MPISBAIJ   *matB = (Mat_MPISBAIJ*)B->data,*matA = (Mat_MPISBAIJ*)A->data;
1447:   Mat            a,b,c,d;
1448:   PetscTruth     flg;

1452:   a = matA->A; b = matA->B;
1453:   c = matB->A; d = matB->B;

1455:   MatEqual(a,c,&flg);
1456:   if (flg == PETSC_TRUE) {
1457:     MatEqual(b,d,&flg);
1458:   }
1459:   MPI_Allreduce(&flg,flag,1,MPI_INT,MPI_LAND,A->comm);
1460:   return(0);
1461: }

1465: PetscErrorCode MatSetUpPreallocation_MPISBAIJ(Mat A)
1466: {

1470:   MatMPISBAIJSetPreallocation(A,1,PETSC_DEFAULT,0,PETSC_DEFAULT,0);
1471:   return(0);
1472: }

1476: PetscErrorCode MatGetSubMatrices_MPISBAIJ(Mat A,PetscInt n,const IS irow[],const IS icol[],MatReuse scall,Mat *B[])
1477: {
1479:   PetscInt       i;
1480:   PetscTruth     flg;

1483:   for (i=0; i<n; i++) {
1484:     ISEqual(irow[i],icol[i],&flg);
1485:     if (!flg) {
1486:       SETERRQ(PETSC_ERR_SUP,"Can only get symmetric submatrix for MPISBAIJ matrices");
1487:     }
1488:   }
1489:   MatGetSubMatrices_MPIBAIJ(A,n,irow,icol,scall,B);
1490:   return(0);
1491: }
1492: 

1494: /* -------------------------------------------------------------------*/
1495: static struct _MatOps MatOps_Values = {
1496:        MatSetValues_MPISBAIJ,
1497:        MatGetRow_MPISBAIJ,
1498:        MatRestoreRow_MPISBAIJ,
1499:        MatMult_MPISBAIJ,
1500: /* 4*/ MatMultAdd_MPISBAIJ,
1501:        MatMultTranspose_MPISBAIJ,
1502:        MatMultTransposeAdd_MPISBAIJ,
1503:        0,
1504:        0,
1505:        0,
1506: /*10*/ 0,
1507:        0,
1508:        0,
1509:        MatRelax_MPISBAIJ,
1510:        MatTranspose_MPISBAIJ,
1511: /*15*/ MatGetInfo_MPISBAIJ,
1512:        MatEqual_MPISBAIJ,
1513:        MatGetDiagonal_MPISBAIJ,
1514:        MatDiagonalScale_MPISBAIJ,
1515:        MatNorm_MPISBAIJ,
1516: /*20*/ MatAssemblyBegin_MPISBAIJ,
1517:        MatAssemblyEnd_MPISBAIJ,
1518:        0,
1519:        MatSetOption_MPISBAIJ,
1520:        MatZeroEntries_MPISBAIJ,
1521: /*25*/ MatZeroRows_MPISBAIJ,
1522:        0,
1523:        0,
1524:        0,
1525:        0,
1526: /*30*/ MatSetUpPreallocation_MPISBAIJ,
1527:        0,
1528:        0,
1529:        0,
1530:        0,
1531: /*35*/ MatDuplicate_MPISBAIJ,
1532:        0,
1533:        0,
1534:        0,
1535:        0,
1536: /*40*/ 0,
1537:        MatGetSubMatrices_MPISBAIJ,
1538:        MatIncreaseOverlap_MPISBAIJ,
1539:        MatGetValues_MPISBAIJ,
1540:        0,
1541: /*45*/ MatPrintHelp_MPISBAIJ,
1542:        MatScale_MPISBAIJ,
1543:        0,
1544:        0,
1545:        0,
1546: /*50*/ 0,
1547:        0,
1548:        0,
1549:        0,
1550:        0,
1551: /*55*/ 0,
1552:        0,
1553:        MatSetUnfactored_MPISBAIJ,
1554:        0,
1555:        MatSetValuesBlocked_MPISBAIJ,
1556: /*60*/ 0,
1557:        0,
1558:        0,
1559:        MatGetPetscMaps_Petsc,
1560:        0,
1561: /*65*/ 0,
1562:        0,
1563:        0,
1564:        0,
1565:        0,
1566: /*70*/ MatGetRowMax_MPISBAIJ,
1567:        0,
1568:        0,
1569:        0,
1570:        0,
1571: /*75*/ 0,
1572:        0,
1573:        0,
1574:        0,
1575:        0,
1576: /*80*/ 0,
1577:        0,
1578:        0,
1579:        0,
1580:        MatLoad_MPISBAIJ,
1581: /*85*/ 0,
1582:        0,
1583:        0,
1584:        0,
1585:        0,
1586: /*90*/ 0,
1587:        0,
1588:        0,
1589:        0,
1590:        0,
1591: /*95*/ 0,
1592:        0,
1593:        0,
1594:        0};


1600: PetscErrorCode MatGetDiagonalBlock_MPISBAIJ(Mat A,PetscTruth *iscopy,MatReuse reuse,Mat *a)
1601: {
1603:   *a      = ((Mat_MPISBAIJ *)A->data)->A;
1604:   *iscopy = PETSC_FALSE;
1605:   return(0);
1606: }

1612: PetscErrorCode MatMPISBAIJSetPreallocation_MPISBAIJ(Mat B,PetscInt bs,PetscInt d_nz,PetscInt *d_nnz,PetscInt o_nz,PetscInt *o_nnz)
1613: {
1614:   Mat_MPISBAIJ   *b;
1616:   PetscInt       i,mbs,Mbs;

1619:   PetscOptionsGetInt(B->prefix,"-mat_block_size",&bs,PETSC_NULL);

1621:   if (bs < 1) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Invalid block size specified, must be positive");
1622:   if (d_nz == PETSC_DECIDE || d_nz == PETSC_DEFAULT) d_nz = 3;
1623:   if (o_nz == PETSC_DECIDE || o_nz == PETSC_DEFAULT) o_nz = 1;
1624:   if (d_nz < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"d_nz cannot be less than 0: value %D",d_nz);
1625:   if (o_nz < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"o_nz cannot be less than 0: value %D",o_nz);
1626:   if (d_nnz) {
1627:     for (i=0; i<B->m/bs; i++) {
1628:       if (d_nnz[i] < 0) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"d_nnz cannot be less than -1: local row %D value %D",i,d_nnz[i]);
1629:     }
1630:   }
1631:   if (o_nnz) {
1632:     for (i=0; i<B->m/bs; i++) {
1633:       if (o_nnz[i] < 0) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"o_nnz cannot be less than -1: local row %D value %D",i,o_nnz[i]);
1634:     }
1635:   }
1636:   B->preallocated = PETSC_TRUE;
1637:   PetscSplitOwnershipBlock(B->comm,bs,&B->m,&B->M);
1638:   PetscSplitOwnershipBlock(B->comm,bs,&B->n,&B->N);
1639:   PetscMapCreateMPI(B->comm,B->m,B->M,&B->rmap);
1640:   PetscMapCreateMPI(B->comm,B->m,B->M,&B->cmap);

1642:   b   = (Mat_MPISBAIJ*)B->data;
1643:   mbs = B->m/bs;
1644:   Mbs = B->M/bs;
1645:   if (mbs*bs != B->m) {
1646:     SETERRQ2(PETSC_ERR_ARG_SIZ,"No of local rows %D must be divisible by blocksize %D",B->m,bs);
1647:   }

1649:   B->bs  = bs;
1650:   b->bs2 = bs*bs;
1651:   b->mbs = mbs;
1652:   b->nbs = mbs;
1653:   b->Mbs = Mbs;
1654:   b->Nbs = Mbs;

1656:   MPI_Allgather(&b->mbs,1,MPIU_INT,b->rowners+1,1,MPIU_INT,B->comm);
1657:   b->rowners[0]    = 0;
1658:   for (i=2; i<=b->size; i++) {
1659:     b->rowners[i] += b->rowners[i-1];
1660:   }
1661:   b->rstart    = b->rowners[b->rank];
1662:   b->rend      = b->rowners[b->rank+1];
1663:   b->cstart    = b->rstart;
1664:   b->cend      = b->rend;
1665:   for (i=0; i<=b->size; i++) {
1666:     b->rowners_bs[i] = b->rowners[i]*bs;
1667:   }
1668:   b->rstart_bs = b-> rstart*bs;
1669:   b->rend_bs   = b->rend*bs;
1670: 
1671:   b->cstart_bs = b->cstart*bs;
1672:   b->cend_bs   = b->cend*bs;
1673: 
1674:   MatCreate(PETSC_COMM_SELF,B->m,B->m,B->m,B->m,&b->A);
1675:   MatSetType(b->A,MATSEQSBAIJ);
1676:   MatSeqSBAIJSetPreallocation(b->A,bs,d_nz,d_nnz);
1677:   PetscLogObjectParent(B,b->A);

1679:   MatCreate(PETSC_COMM_SELF,B->m,B->M,B->m,B->M,&b->B);
1680:   MatSetType(b->B,MATSEQBAIJ);
1681:   MatSeqBAIJSetPreallocation(b->B,bs,o_nz,o_nnz);
1682:   PetscLogObjectParent(B,b->B);

1684:   /* build cache for off array entries formed */
1685:   MatStashCreate_Private(B->comm,bs,&B->bstash);

1687:   return(0);
1688: }

1691: /*MC
1692:    MATMPISBAIJ - MATMPISBAIJ = "mpisbaij" - A matrix type to be used for distributed symmetric sparse block matrices, 
1693:    based on block compressed sparse row format.  Only the upper triangular portion of the matrix is stored.

1695:    Options Database Keys:
1696: . -mat_type mpisbaij - sets the matrix type to "mpisbaij" during a call to MatSetFromOptions()

1698:   Level: beginner

1700: .seealso: MatCreateMPISBAIJ
1701: M*/

1706: PetscErrorCode MatCreate_MPISBAIJ(Mat B)
1707: {
1708:   Mat_MPISBAIJ   *b;
1710:   PetscTruth     flg;


1714:   PetscNew(Mat_MPISBAIJ,&b);
1715:   B->data = (void*)b;
1716:   PetscMemzero(b,sizeof(Mat_MPISBAIJ));
1717:   PetscMemcpy(B->ops,&MatOps_Values,sizeof(struct _MatOps));

1719:   B->ops->destroy    = MatDestroy_MPISBAIJ;
1720:   B->ops->view       = MatView_MPISBAIJ;
1721:   B->mapping    = 0;
1722:   B->factor     = 0;
1723:   B->assembled  = PETSC_FALSE;

1725:   B->insertmode = NOT_SET_VALUES;
1726:   MPI_Comm_rank(B->comm,&b->rank);
1727:   MPI_Comm_size(B->comm,&b->size);

1729:   /* build local table of row and column ownerships */
1730:   PetscMalloc(3*(b->size+2)*sizeof(PetscInt),&b->rowners);
1731:   b->cowners    = b->rowners + b->size + 2;
1732:   b->rowners_bs = b->cowners + b->size + 2;
1733:   PetscLogObjectMemory(B,3*(b->size+2)*sizeof(PetscInt)+sizeof(struct _p_Mat)+sizeof(Mat_MPISBAIJ));

1735:   /* build cache for off array entries formed */
1736:   MatStashCreate_Private(B->comm,1,&B->stash);
1737:   b->donotstash  = PETSC_FALSE;
1738:   b->colmap      = PETSC_NULL;
1739:   b->garray      = PETSC_NULL;
1740:   b->roworiented = PETSC_TRUE;

1742: #if defined(PETSC_USE_MAT_SINGLE)
1743:   /* stuff for MatSetValues_XXX in single precision */
1744:   b->setvalueslen     = 0;
1745:   b->setvaluescopy    = PETSC_NULL;
1746: #endif

1748:   /* stuff used in block assembly */
1749:   b->barray       = 0;

1751:   /* stuff used for matrix vector multiply */
1752:   b->lvec         = 0;
1753:   b->Mvctx        = 0;
1754:   b->slvec0       = 0;
1755:   b->slvec0b      = 0;
1756:   b->slvec1       = 0;
1757:   b->slvec1a      = 0;
1758:   b->slvec1b      = 0;
1759:   b->sMvctx       = 0;

1761:   /* stuff for MatGetRow() */
1762:   b->rowindices   = 0;
1763:   b->rowvalues    = 0;
1764:   b->getrowactive = PETSC_FALSE;

1766:   /* hash table stuff */
1767:   b->ht           = 0;
1768:   b->hd           = 0;
1769:   b->ht_size      = 0;
1770:   b->ht_flag      = PETSC_FALSE;
1771:   b->ht_fact      = 0;
1772:   b->ht_total_ct  = 0;
1773:   b->ht_insert_ct = 0;

1775:   PetscOptionsHasName(B->prefix,"-mat_use_hash_table",&flg);
1776:   if (flg) {
1777:     PetscReal fact = 1.39;
1778:     MatSetOption(B,MAT_USE_HASH_TABLE);
1779:     PetscOptionsGetReal(B->prefix,"-mat_use_hash_table",&fact,PETSC_NULL);
1780:     if (fact <= 1.0) fact = 1.39;
1781:     MatMPIBAIJSetHashTableFactor(B,fact);
1782:     PetscLogInfo(0,"MatCreateMPISBAIJ:Hash table Factor used %5.2f\n",fact);
1783:   }
1784:   PetscObjectComposeFunctionDynamic((PetscObject)B,"MatStoreValues_C",
1785:                                      "MatStoreValues_MPISBAIJ",
1786:                                      MatStoreValues_MPISBAIJ);
1787:   PetscObjectComposeFunctionDynamic((PetscObject)B,"MatRetrieveValues_C",
1788:                                      "MatRetrieveValues_MPISBAIJ",
1789:                                      MatRetrieveValues_MPISBAIJ);
1790:   PetscObjectComposeFunctionDynamic((PetscObject)B,"MatGetDiagonalBlock_C",
1791:                                      "MatGetDiagonalBlock_MPISBAIJ",
1792:                                      MatGetDiagonalBlock_MPISBAIJ);
1793:   PetscObjectComposeFunctionDynamic((PetscObject)B,"MatMPISBAIJSetPreallocation_C",
1794:                                      "MatMPISBAIJSetPreallocation_MPISBAIJ",
1795:                                      MatMPISBAIJSetPreallocation_MPISBAIJ);
1796:   B->symmetric                  = PETSC_TRUE;
1797:   B->structurally_symmetric     = PETSC_TRUE;
1798:   B->symmetric_set              = PETSC_TRUE;
1799:   B->structurally_symmetric_set = PETSC_TRUE;
1800:   return(0);
1801: }

1804: /*MC
1805:    MATSBAIJ - MATSBAIJ = "sbaij" - A matrix type to be used for symmetric block sparse matrices.

1807:    This matrix type is identical to MATSEQSBAIJ when constructed with a single process communicator,
1808:    and MATMPISBAIJ otherwise.

1810:    Options Database Keys:
1811: . -mat_type sbaij - sets the matrix type to "sbaij" during a call to MatSetFromOptions()

1813:   Level: beginner

1815: .seealso: MatCreateMPISBAIJ,MATSEQSBAIJ,MATMPISBAIJ
1816: M*/

1821: PetscErrorCode MatCreate_SBAIJ(Mat A)
1822: {
1824:   PetscMPIInt    size;

1827:   PetscObjectChangeTypeName((PetscObject)A,MATSBAIJ);
1828:   MPI_Comm_size(A->comm,&size);
1829:   if (size == 1) {
1830:     MatSetType(A,MATSEQSBAIJ);
1831:   } else {
1832:     MatSetType(A,MATMPISBAIJ);
1833:   }
1834:   return(0);
1835: }

1840: /*@C
1841:    MatMPISBAIJSetPreallocation - For good matrix assembly performance
1842:    the user should preallocate the matrix storage by setting the parameters 
1843:    d_nz (or d_nnz) and o_nz (or o_nnz).  By setting these parameters accurately,
1844:    performance can be increased by more than a factor of 50.

1846:    Collective on Mat

1848:    Input Parameters:
1849: +  A - the matrix 
1850: .  bs   - size of blockk
1851: .  d_nz  - number of block nonzeros per block row in diagonal portion of local 
1852:            submatrix  (same for all local rows)
1853: .  d_nnz - array containing the number of block nonzeros in the various block rows 
1854:            in the upper triangular and diagonal part of the in diagonal portion of the local
1855:            (possibly different for each block row) or PETSC_NULL.  You must leave room 
1856:            for the diagonal entry even if it is zero.
1857: .  o_nz  - number of block nonzeros per block row in the off-diagonal portion of local
1858:            submatrix (same for all local rows).
1859: -  o_nnz - array containing the number of nonzeros in the various block rows of the
1860:            off-diagonal portion of the local submatrix (possibly different for
1861:            each block row) or PETSC_NULL.


1864:    Options Database Keys:
1865: .   -mat_no_unroll - uses code that does not unroll the loops in the 
1866:                      block calculations (much slower)
1867: .   -mat_block_size - size of the blocks to use

1869:    Notes:

1871:    If PETSC_DECIDE or  PETSC_DETERMINE is used for a particular argument on one processor
1872:    than it must be used on all processors that share the object for that argument.

1874:    If the *_nnz parameter is given then the *_nz parameter is ignored

1876:    Storage Information:
1877:    For a square global matrix we define each processor's diagonal portion 
1878:    to be its local rows and the corresponding columns (a square submatrix);  
1879:    each processor's off-diagonal portion encompasses the remainder of the
1880:    local matrix (a rectangular submatrix). 

1882:    The user can specify preallocated storage for the diagonal part of
1883:    the local submatrix with either d_nz or d_nnz (not both).  Set 
1884:    d_nz=PETSC_DEFAULT and d_nnz=PETSC_NULL for PETSc to control dynamic
1885:    memory allocation.  Likewise, specify preallocated storage for the
1886:    off-diagonal part of the local submatrix with o_nz or o_nnz (not both).

1888:    Consider a processor that owns rows 3, 4 and 5 of a parallel matrix. In
1889:    the figure below we depict these three local rows and all columns (0-11).

1891: .vb
1892:            0 1 2 3 4 5 6 7 8 9 10 11
1893:           -------------------
1894:    row 3  |  o o o d d d o o o o o o
1895:    row 4  |  o o o d d d o o o o o o
1896:    row 5  |  o o o d d d o o o o o o
1897:           -------------------
1898: .ve
1899:   
1900:    Thus, any entries in the d locations are stored in the d (diagonal) 
1901:    submatrix, and any entries in the o locations are stored in the
1902:    o (off-diagonal) submatrix.  Note that the d matrix is stored in
1903:    MatSeqSBAIJ format and the o submatrix in MATSEQBAIJ format.

1905:    Now d_nz should indicate the number of block nonzeros per row in the upper triangular
1906:    plus the diagonal part of the d matrix,
1907:    and o_nz should indicate the number of block nonzeros per row in the o matrix.
1908:    In general, for PDE problems in which most nonzeros are near the diagonal,
1909:    one expects d_nz >> o_nz.   For large problems you MUST preallocate memory
1910:    or you will get TERRIBLE performance; see the users' manual chapter on
1911:    matrices.

1913:    Level: intermediate

1915: .keywords: matrix, block, aij, compressed row, sparse, parallel

1917: .seealso: MatCreate(), MatCreateSeqSBAIJ(), MatSetValues(), MatCreateMPIBAIJ()
1918: @*/
1919: PetscErrorCode MatMPISBAIJSetPreallocation(Mat B,PetscInt bs,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
1920: {
1921:   PetscErrorCode ierr,(*f)(Mat,PetscInt,PetscInt,const PetscInt[],PetscInt,const PetscInt[]);

1924:   PetscObjectQueryFunction((PetscObject)B,"MatMPISBAIJSetPreallocation_C",(void (**)(void))&f);
1925:   if (f) {
1926:     (*f)(B,bs,d_nz,d_nnz,o_nz,o_nnz);
1927:   }
1928:   return(0);
1929: }

1933: /*@C
1934:    MatCreateMPISBAIJ - Creates a sparse parallel matrix in symmetric block AIJ format
1935:    (block compressed row).  For good matrix assembly performance
1936:    the user should preallocate the matrix storage by setting the parameters 
1937:    d_nz (or d_nnz) and o_nz (or o_nnz).  By setting these parameters accurately,
1938:    performance can be increased by more than a factor of 50.

1940:    Collective on MPI_Comm

1942:    Input Parameters:
1943: +  comm - MPI communicator
1944: .  bs   - size of blockk
1945: .  m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
1946:            This value should be the same as the local size used in creating the 
1947:            y vector for the matrix-vector product y = Ax.
1948: .  n - number of local columns (or PETSC_DECIDE to have calculated if N is given)
1949:            This value should be the same as the local size used in creating the 
1950:            x vector for the matrix-vector product y = Ax.
1951: .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
1952: .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
1953: .  d_nz  - number of block nonzeros per block row in diagonal portion of local 
1954:            submatrix  (same for all local rows)
1955: .  d_nnz - array containing the number of block nonzeros in the various block rows 
1956:            in the upper triangular portion of the in diagonal portion of the local 
1957:            (possibly different for each block block row) or PETSC_NULL.  
1958:            You must leave room for the diagonal entry even if it is zero.
1959: .  o_nz  - number of block nonzeros per block row in the off-diagonal portion of local
1960:            submatrix (same for all local rows).
1961: -  o_nnz - array containing the number of nonzeros in the various block rows of the
1962:            off-diagonal portion of the local submatrix (possibly different for
1963:            each block row) or PETSC_NULL.

1965:    Output Parameter:
1966: .  A - the matrix 

1968:    Options Database Keys:
1969: .   -mat_no_unroll - uses code that does not unroll the loops in the 
1970:                      block calculations (much slower)
1971: .   -mat_block_size - size of the blocks to use
1972: .   -mat_mpi - use the parallel matrix data structures even on one processor 
1973:                (defaults to using SeqBAIJ format on one processor)

1975:    Notes:
1976:    The user MUST specify either the local or global matrix dimensions
1977:    (possibly both).

1979:    If PETSC_DECIDE or  PETSC_DETERMINE is used for a particular argument on one processor
1980:    than it must be used on all processors that share the object for that argument.

1982:    If the *_nnz parameter is given then the *_nz parameter is ignored

1984:    Storage Information:
1985:    For a square global matrix we define each processor's diagonal portion 
1986:    to be its local rows and the corresponding columns (a square submatrix);  
1987:    each processor's off-diagonal portion encompasses the remainder of the
1988:    local matrix (a rectangular submatrix). 

1990:    The user can specify preallocated storage for the diagonal part of
1991:    the local submatrix with either d_nz or d_nnz (not both).  Set 
1992:    d_nz=PETSC_DEFAULT and d_nnz=PETSC_NULL for PETSc to control dynamic
1993:    memory allocation.  Likewise, specify preallocated storage for the
1994:    off-diagonal part of the local submatrix with o_nz or o_nnz (not both).

1996:    Consider a processor that owns rows 3, 4 and 5 of a parallel matrix. In
1997:    the figure below we depict these three local rows and all columns (0-11).

1999: .vb
2000:            0 1 2 3 4 5 6 7 8 9 10 11
2001:           -------------------
2002:    row 3  |  o o o d d d o o o o o o
2003:    row 4  |  o o o d d d o o o o o o
2004:    row 5  |  o o o d d d o o o o o o
2005:           -------------------
2006: .ve
2007:   
2008:    Thus, any entries in the d locations are stored in the d (diagonal) 
2009:    submatrix, and any entries in the o locations are stored in the
2010:    o (off-diagonal) submatrix.  Note that the d matrix is stored in
2011:    MatSeqSBAIJ format and the o submatrix in MATSEQBAIJ format.

2013:    Now d_nz should indicate the number of block nonzeros per row in the upper triangular
2014:    plus the diagonal part of the d matrix,
2015:    and o_nz should indicate the number of block nonzeros per row in the o matrix.
2016:    In general, for PDE problems in which most nonzeros are near the diagonal,
2017:    one expects d_nz >> o_nz.   For large problems you MUST preallocate memory
2018:    or you will get TERRIBLE performance; see the users' manual chapter on
2019:    matrices.

2021:    Level: intermediate

2023: .keywords: matrix, block, aij, compressed row, sparse, parallel

2025: .seealso: MatCreate(), MatCreateSeqSBAIJ(), MatSetValues(), MatCreateMPIBAIJ()
2026: @*/

2028: PetscErrorCode MatCreateMPISBAIJ(MPI_Comm comm,PetscInt bs,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A)
2029: {
2031:   PetscMPIInt    size;

2034:   MatCreate(comm,m,n,M,N,A);
2035:   MPI_Comm_size(comm,&size);
2036:   if (size > 1) {
2037:     MatSetType(*A,MATMPISBAIJ);
2038:     MatMPISBAIJSetPreallocation(*A,bs,d_nz,d_nnz,o_nz,o_nnz);
2039:   } else {
2040:     MatSetType(*A,MATSEQSBAIJ);
2041:     MatSeqSBAIJSetPreallocation(*A,bs,d_nz,d_nnz);
2042:   }
2043:   return(0);
2044: }


2049: static PetscErrorCode MatDuplicate_MPISBAIJ(Mat matin,MatDuplicateOption cpvalues,Mat *newmat)
2050: {
2051:   Mat            mat;
2052:   Mat_MPISBAIJ   *a,*oldmat = (Mat_MPISBAIJ*)matin->data;
2054:   PetscInt       len=0,nt,bs=matin->bs,mbs=oldmat->mbs;
2055:   PetscScalar    *array;

2058:   *newmat       = 0;
2059:   MatCreate(matin->comm,matin->m,matin->n,matin->M,matin->N,&mat);
2060:   MatSetType(mat,matin->type_name);
2061:   PetscMemcpy(mat->ops,matin->ops,sizeof(struct _MatOps));
2062: 
2063:   mat->factor       = matin->factor;
2064:   mat->preallocated = PETSC_TRUE;
2065:   mat->assembled    = PETSC_TRUE;
2066:   mat->insertmode   = NOT_SET_VALUES;

2068:   a = (Mat_MPISBAIJ*)mat->data;
2069:   mat->bs  = matin->bs;
2070:   a->bs2   = oldmat->bs2;
2071:   a->mbs   = oldmat->mbs;
2072:   a->nbs   = oldmat->nbs;
2073:   a->Mbs   = oldmat->Mbs;
2074:   a->Nbs   = oldmat->Nbs;
2075: 
2076:   a->rstart       = oldmat->rstart;
2077:   a->rend         = oldmat->rend;
2078:   a->cstart       = oldmat->cstart;
2079:   a->cend         = oldmat->cend;
2080:   a->size         = oldmat->size;
2081:   a->rank         = oldmat->rank;
2082:   a->donotstash   = oldmat->donotstash;
2083:   a->roworiented  = oldmat->roworiented;
2084:   a->rowindices   = 0;
2085:   a->rowvalues    = 0;
2086:   a->getrowactive = PETSC_FALSE;
2087:   a->barray       = 0;
2088:   a->rstart_bs    = oldmat->rstart_bs;
2089:   a->rend_bs      = oldmat->rend_bs;
2090:   a->cstart_bs    = oldmat->cstart_bs;
2091:   a->cend_bs      = oldmat->cend_bs;

2093:   /* hash table stuff */
2094:   a->ht           = 0;
2095:   a->hd           = 0;
2096:   a->ht_size      = 0;
2097:   a->ht_flag      = oldmat->ht_flag;
2098:   a->ht_fact      = oldmat->ht_fact;
2099:   a->ht_total_ct  = 0;
2100:   a->ht_insert_ct = 0;
2101: 
2102:   PetscMemcpy(a->rowners,oldmat->rowners,3*(a->size+2)*sizeof(PetscInt));
2103:   MatStashCreate_Private(matin->comm,1,&mat->stash);
2104:   MatStashCreate_Private(matin->comm,matin->bs,&mat->bstash);
2105:   if (oldmat->colmap) {
2106: #if defined (PETSC_USE_CTABLE)
2107:     PetscTableCreateCopy(oldmat->colmap,&a->colmap);
2108: #else
2109:     PetscMalloc((a->Nbs)*sizeof(PetscInt),&a->colmap);
2110:     PetscLogObjectMemory(mat,(a->Nbs)*sizeof(PetscInt));
2111:     PetscMemcpy(a->colmap,oldmat->colmap,(a->Nbs)*sizeof(PetscInt));
2112: #endif
2113:   } else a->colmap = 0;

2115:   if (oldmat->garray && (len = ((Mat_SeqBAIJ*)(oldmat->B->data))->nbs)) {
2116:     PetscMalloc(len*sizeof(PetscInt),&a->garray);
2117:     PetscLogObjectMemory(mat,len*sizeof(PetscInt));
2118:     PetscMemcpy(a->garray,oldmat->garray,len*sizeof(PetscInt));
2119:   } else a->garray = 0;
2120: 
2121:    VecDuplicate(oldmat->lvec,&a->lvec);
2122:   PetscLogObjectParent(mat,a->lvec);
2123:    VecScatterCopy(oldmat->Mvctx,&a->Mvctx);
2124:   PetscLogObjectParent(mat,a->Mvctx);

2126:    VecDuplicate(oldmat->slvec0,&a->slvec0);
2127:   PetscLogObjectParent(mat,a->slvec0);
2128:    VecDuplicate(oldmat->slvec1,&a->slvec1);
2129:   PetscLogObjectParent(mat,a->slvec1);

2131:   VecGetLocalSize(a->slvec1,&nt);
2132:   VecGetArray(a->slvec1,&array);
2133:   VecCreateSeqWithArray(PETSC_COMM_SELF,bs*mbs,array,&a->slvec1a);
2134:   VecCreateSeqWithArray(PETSC_COMM_SELF,nt-bs*mbs,array+bs*mbs,&a->slvec1b);
2135:   VecRestoreArray(a->slvec1,&array);
2136:   VecGetArray(a->slvec0,&array);
2137:   VecCreateSeqWithArray(PETSC_COMM_SELF,nt-bs*mbs,array+bs*mbs,&a->slvec0b);
2138:   VecRestoreArray(a->slvec0,&array);
2139:   PetscLogObjectParent(mat,a->slvec0);
2140:   PetscLogObjectParent(mat,a->slvec1);
2141:   PetscLogObjectParent(mat,a->slvec0b);
2142:   PetscLogObjectParent(mat,a->slvec1a);
2143:   PetscLogObjectParent(mat,a->slvec1b);

2145:   /*  VecScatterCopy(oldmat->sMvctx,&a->sMvctx); - not written yet, replaced by the lazy trick: */
2146:   PetscObjectReference((PetscObject)oldmat->sMvctx);
2147:   a->sMvctx = oldmat->sMvctx;
2148:   PetscLogObjectParent(mat,a->sMvctx);

2150:    MatDuplicate(oldmat->A,cpvalues,&a->A);
2151:   PetscLogObjectParent(mat,a->A);
2152:    MatDuplicate(oldmat->B,cpvalues,&a->B);
2153:   PetscLogObjectParent(mat,a->B);
2154:   PetscFListDuplicate(mat->qlist,&matin->qlist);
2155:   *newmat = mat;
2156:   return(0);
2157: }

2159:  #include petscsys.h

2163: PetscErrorCode MatLoad_MPISBAIJ(PetscViewer viewer,const MatType type,Mat *newmat)
2164: {
2165:   Mat            A;
2167:   PetscInt       i,nz,j,rstart,rend;
2168:   PetscScalar    *vals,*buf;
2169:   MPI_Comm       comm = ((PetscObject)viewer)->comm;
2170:   MPI_Status     status;
2171:   PetscMPIInt    rank,size,tag = ((PetscObject)viewer)->tag,*sndcounts = 0,*browners,maxnz,*rowners;
2172:   PetscInt       header[4],*rowlengths = 0,M,N,m,*cols;
2173:   PetscInt       *locrowlens,*procsnz = 0,jj,*mycols,*ibuf;
2174:   PetscInt       bs=1,Mbs,mbs,extra_rows;
2175:   PetscInt       *dlens,*odlens,*mask,*masked1,*masked2,rowcount,odcount;
2176:   PetscInt       dcount,kmax,k,nzcount,tmp;
2177:   int            fd;
2178: 
2180:   PetscOptionsGetInt(PETSC_NULL,"-matload_block_size",&bs,PETSC_NULL);

2182:   MPI_Comm_size(comm,&size);
2183:   MPI_Comm_rank(comm,&rank);
2184:   if (!rank) {
2185:     PetscViewerBinaryGetDescriptor(viewer,&fd);
2186:     PetscBinaryRead(fd,(char *)header,4,PETSC_INT);
2187:     if (header[0] != MAT_FILE_COOKIE) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"not matrix object");
2188:     if (header[3] < 0) {
2189:       SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"Matrix stored in special format, cannot load as MPISBAIJ");
2190:     }
2191:   }

2193:   MPI_Bcast(header+1,3,MPIU_INT,0,comm);
2194:   M = header[1]; N = header[2];

2196:   if (M != N) SETERRQ(PETSC_ERR_SUP,"Can only do square matrices");

2198:   /* 
2199:      This code adds extra rows to make sure the number of rows is 
2200:      divisible by the blocksize
2201:   */
2202:   Mbs        = M/bs;
2203:   extra_rows = bs - M + bs*(Mbs);
2204:   if (extra_rows == bs) extra_rows = 0;
2205:   else                  Mbs++;
2206:   if (extra_rows &&!rank) {
2207:     PetscLogInfo(0,"MatLoad_MPISBAIJ:Padding loaded matrix to match blocksize\n");
2208:   }

2210:   /* determine ownership of all rows */
2211:   mbs        = Mbs/size + ((Mbs % size) > rank);
2212:   m          = mbs*bs;
2213:   PetscMalloc(2*(size+2)*sizeof(PetscMPIInt),&rowners);
2214:   browners   = rowners + size + 1;
2215:   MPI_Allgather(&mbs,1,MPI_INT,rowners+1,1,MPI_INT,comm);
2216:   rowners[0] = 0;
2217:   for (i=2; i<=size; i++) rowners[i] += rowners[i-1];
2218:   for (i=0; i<=size;  i++) browners[i] = rowners[i]*bs;
2219:   rstart = rowners[rank];
2220:   rend   = rowners[rank+1];
2221: 
2222:   /* distribute row lengths to all processors */
2223:   PetscMalloc((rend-rstart)*bs*sizeof(PetscInt),&locrowlens);
2224:   if (!rank) {
2225:     PetscMalloc((M+extra_rows)*sizeof(PetscInt),&rowlengths);
2226:     PetscBinaryRead(fd,rowlengths,M,PETSC_INT);
2227:     for (i=0; i<extra_rows; i++) rowlengths[M+i] = 1;
2228:     PetscMalloc(size*sizeof(PetscMPIInt),&sndcounts);
2229:     for (i=0; i<size; i++) sndcounts[i] = browners[i+1] - browners[i];
2230:     MPI_Scatterv(rowlengths,sndcounts,browners,MPIU_INT,locrowlens,(rend-rstart)*bs,MPIU_INT,0,comm);
2231:     PetscFree(sndcounts);
2232:   } else {
2233:     MPI_Scatterv(0,0,0,MPIU_INT,locrowlens,(rend-rstart)*bs,MPIU_INT,0,comm);
2234:   }
2235: 
2236:   if (!rank) {   /* procs[0] */
2237:     /* calculate the number of nonzeros on each processor */
2238:     PetscMalloc(size*sizeof(PetscInt),&procsnz);
2239:     PetscMemzero(procsnz,size*sizeof(PetscInt));
2240:     for (i=0; i<size; i++) {
2241:       for (j=rowners[i]*bs; j< rowners[i+1]*bs; j++) {
2242:         procsnz[i] += rowlengths[j];
2243:       }
2244:     }
2245:     PetscFree(rowlengths);
2246: 
2247:     /* determine max buffer needed and allocate it */
2248:     maxnz = 0;
2249:     for (i=0; i<size; i++) {
2250:       maxnz = PetscMax(maxnz,procsnz[i]);
2251:     }
2252:     PetscMalloc(maxnz*sizeof(PetscInt),&cols);

2254:     /* read in my part of the matrix column indices  */
2255:     nz     = procsnz[0];
2256:     PetscMalloc(nz*sizeof(PetscInt),&ibuf);
2257:     mycols = ibuf;
2258:     if (size == 1)  nz -= extra_rows;
2259:     PetscBinaryRead(fd,mycols,nz,PETSC_INT);
2260:     if (size == 1)  for (i=0; i< extra_rows; i++) { mycols[nz+i] = M+i; }

2262:     /* read in every ones (except the last) and ship off */
2263:     for (i=1; i<size-1; i++) {
2264:       nz   = procsnz[i];
2265:       PetscBinaryRead(fd,cols,nz,PETSC_INT);
2266:       MPI_Send(cols,nz,MPIU_INT,i,tag,comm);
2267:     }
2268:     /* read in the stuff for the last proc */
2269:     if (size != 1) {
2270:       nz   = procsnz[size-1] - extra_rows;  /* the extra rows are not on the disk */
2271:       PetscBinaryRead(fd,cols,nz,PETSC_INT);
2272:       for (i=0; i<extra_rows; i++) cols[nz+i] = M+i;
2273:       MPI_Send(cols,nz+extra_rows,MPIU_INT,size-1,tag,comm);
2274:     }
2275:     PetscFree(cols);
2276:   } else {  /* procs[i], i>0 */
2277:     /* determine buffer space needed for message */
2278:     nz = 0;
2279:     for (i=0; i<m; i++) {
2280:       nz += locrowlens[i];
2281:     }
2282:     PetscMalloc(nz*sizeof(PetscInt),&ibuf);
2283:     mycols = ibuf;
2284:     /* receive message of column indices*/
2285:     MPI_Recv(mycols,nz,MPIU_INT,0,tag,comm,&status);
2286:     MPI_Get_count(&status,MPIU_INT,&maxnz);
2287:     if (maxnz != nz) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");
2288:   }

2290:   /* loop over local rows, determining number of off diagonal entries */
2291:   PetscMalloc(2*(rend-rstart+1)*sizeof(PetscInt),&dlens);
2292:   odlens   = dlens + (rend-rstart);
2293:   PetscMalloc(3*Mbs*sizeof(PetscInt),&mask);
2294:   PetscMemzero(mask,3*Mbs*sizeof(PetscInt));
2295:   masked1  = mask    + Mbs;
2296:   masked2  = masked1 + Mbs;
2297:   rowcount = 0; nzcount = 0;
2298:   for (i=0; i<mbs; i++) {
2299:     dcount  = 0;
2300:     odcount = 0;
2301:     for (j=0; j<bs; j++) {
2302:       kmax = locrowlens[rowcount];
2303:       for (k=0; k<kmax; k++) {
2304:         tmp = mycols[nzcount++]/bs; /* block col. index */
2305:         if (!mask[tmp]) {
2306:           mask[tmp] = 1;
2307:           if (tmp < rstart || tmp >= rend) masked2[odcount++] = tmp; /* entry in off-diag portion */
2308:           else masked1[dcount++] = tmp; /* entry in diag portion */
2309:         }
2310:       }
2311:       rowcount++;
2312:     }
2313: 
2314:     dlens[i]  = dcount;  /* d_nzz[i] */
2315:     odlens[i] = odcount; /* o_nzz[i] */

2317:     /* zero out the mask elements we set */
2318:     for (j=0; j<dcount; j++) mask[masked1[j]] = 0;
2319:     for (j=0; j<odcount; j++) mask[masked2[j]] = 0;
2320:   }
2321: 
2322:   /* create our matrix */
2323:   MatCreate(comm,m,m,PETSC_DETERMINE,PETSC_DETERMINE,&A);
2324:   MatSetType(A,type);
2325:   MatMPISBAIJSetPreallocation(A,bs,0,dlens,0,odlens);
2326:   MatSetOption(A,MAT_COLUMNS_SORTED);
2327: 
2328:   if (!rank) {
2329:     PetscMalloc(maxnz*sizeof(PetscScalar),&buf);
2330:     /* read in my part of the matrix numerical values  */
2331:     nz = procsnz[0];
2332:     vals = buf;
2333:     mycols = ibuf;
2334:     if (size == 1)  nz -= extra_rows;
2335:     PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
2336:     if (size == 1)  for (i=0; i< extra_rows; i++) { vals[nz+i] = 1.0; }

2338:     /* insert into matrix */
2339:     jj      = rstart*bs;
2340:     for (i=0; i<m; i++) {
2341:       MatSetValues(A,1,&jj,locrowlens[i],mycols,vals,INSERT_VALUES);
2342:       mycols += locrowlens[i];
2343:       vals   += locrowlens[i];
2344:       jj++;
2345:     }

2347:     /* read in other processors (except the last one) and ship out */
2348:     for (i=1; i<size-1; i++) {
2349:       nz   = procsnz[i];
2350:       vals = buf;
2351:       PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
2352:       MPI_Send(vals,nz,MPIU_SCALAR,i,A->tag,comm);
2353:     }
2354:     /* the last proc */
2355:     if (size != 1){
2356:       nz   = procsnz[i] - extra_rows;
2357:       vals = buf;
2358:       PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
2359:       for (i=0; i<extra_rows; i++) vals[nz+i] = 1.0;
2360:       MPI_Send(vals,nz+extra_rows,MPIU_SCALAR,size-1,A->tag,comm);
2361:     }
2362:     PetscFree(procsnz);

2364:   } else {
2365:     /* receive numeric values */
2366:     PetscMalloc(nz*sizeof(PetscScalar),&buf);

2368:     /* receive message of values*/
2369:     vals   = buf;
2370:     mycols = ibuf;
2371:     MPI_Recv(vals,nz,MPIU_SCALAR,0,A->tag,comm,&status);
2372:     MPI_Get_count(&status,MPIU_SCALAR,&maxnz);
2373:     if (maxnz != nz) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");

2375:     /* insert into matrix */
2376:     jj      = rstart*bs;
2377:     for (i=0; i<m; i++) {
2378:       MatSetValues_MPISBAIJ(A,1,&jj,locrowlens[i],mycols,vals,INSERT_VALUES);
2379:       mycols += locrowlens[i];
2380:       vals   += locrowlens[i];
2381:       jj++;
2382:     }
2383:   }

2385:   PetscFree(locrowlens);
2386:   PetscFree(buf);
2387:   PetscFree(ibuf);
2388:   PetscFree(rowners);
2389:   PetscFree(dlens);
2390:   PetscFree(mask);
2391:   MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
2392:   MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
2393:   *newmat = A;
2394:   return(0);
2395: }

2399: /*@
2400:    MatMPISBAIJSetHashTableFactor - Sets the factor required to compute the size of the HashTable.

2402:    Input Parameters:
2403: .  mat  - the matrix
2404: .  fact - factor

2406:    Collective on Mat

2408:    Level: advanced

2410:   Notes:
2411:    This can also be set by the command line option: -mat_use_hash_table fact

2413: .keywords: matrix, hashtable, factor, HT

2415: .seealso: MatSetOption()
2416: @*/
2417: PetscErrorCode MatMPISBAIJSetHashTableFactor(Mat mat,PetscReal fact)
2418: {
2420:   SETERRQ(PETSC_ERR_SUP,"Function not yet written for SBAIJ format");
2421:   /* return(0); */
2422: }

2426: PetscErrorCode MatGetRowMax_MPISBAIJ(Mat A,Vec v)
2427: {
2428:   Mat_MPISBAIJ   *a = (Mat_MPISBAIJ*)A->data;
2429:   Mat_SeqBAIJ    *b = (Mat_SeqBAIJ*)(a->B)->data;
2430:   PetscReal      atmp;
2431:   PetscReal      *work,*svalues,*rvalues;
2433:   PetscInt       i,bs,mbs,*bi,*bj,brow,j,ncols,krow,kcol,col,row,Mbs,bcol;
2434:   PetscMPIInt    rank,size;
2435:   PetscInt       *rowners_bs,dest,count,source;
2436:   PetscScalar    *va;
2437:   MatScalar      *ba;
2438:   MPI_Status     stat;

2441:   MatGetRowMax(a->A,v);
2442:   VecGetArray(v,&va);

2444:   MPI_Comm_size(A->comm,&size);
2445:   MPI_Comm_rank(A->comm,&rank);

2447:   bs   = A->bs;
2448:   mbs  = a->mbs;
2449:   Mbs  = a->Mbs;
2450:   ba   = b->a;
2451:   bi   = b->i;
2452:   bj   = b->j;

2454:   /* find ownerships */
2455:   rowners_bs = a->rowners_bs;

2457:   /* each proc creates an array to be distributed */
2458:   PetscMalloc(bs*Mbs*sizeof(PetscReal),&work);
2459:   PetscMemzero(work,bs*Mbs*sizeof(PetscReal));

2461:   /* row_max for B */
2462:   if (rank != size-1){
2463:     for (i=0; i<mbs; i++) {
2464:       ncols = bi[1] - bi[0]; bi++;
2465:       brow  = bs*i;
2466:       for (j=0; j<ncols; j++){
2467:         bcol = bs*(*bj);
2468:         for (kcol=0; kcol<bs; kcol++){
2469:           col = bcol + kcol;                 /* local col index */
2470:           col += rowners_bs[rank+1];      /* global col index */
2471:           for (krow=0; krow<bs; krow++){
2472:             atmp = PetscAbsScalar(*ba); ba++;
2473:             row = brow + krow;    /* local row index */
2474:             if (PetscRealPart(va[row]) < atmp) va[row] = atmp;
2475:             if (work[col] < atmp) work[col] = atmp;
2476:           }
2477:         }
2478:         bj++;
2479:       }
2480:     }

2482:     /* send values to its owners */
2483:     for (dest=rank+1; dest<size; dest++){
2484:       svalues = work + rowners_bs[dest];
2485:       count   = rowners_bs[dest+1]-rowners_bs[dest];
2486:       MPI_Send(svalues,count,MPIU_REAL,dest,rank,A->comm);
2487:     }
2488:   }
2489: 
2490:   /* receive values */
2491:   if (rank){
2492:     rvalues = work;
2493:     count   = rowners_bs[rank+1]-rowners_bs[rank];
2494:     for (source=0; source<rank; source++){
2495:       MPI_Recv(rvalues,count,MPIU_REAL,MPI_ANY_SOURCE,MPI_ANY_TAG,A->comm,&stat);
2496:       /* process values */
2497:       for (i=0; i<count; i++){
2498:         if (PetscRealPart(va[i]) < rvalues[i]) va[i] = rvalues[i];
2499:       }
2500:     }
2501:   }

2503:   VecRestoreArray(v,&va);
2504:   PetscFree(work);
2505:   return(0);
2506: }

2510: PetscErrorCode MatRelax_MPISBAIJ(Mat matin,Vec bb,PetscReal omega,MatSORType flag,PetscReal fshift,PetscInt its,PetscInt lits,Vec xx)
2511: {
2512:   Mat_MPISBAIJ   *mat = (Mat_MPISBAIJ*)matin->data;
2514:   PetscInt       mbs=mat->mbs,bs=matin->bs;
2515:   PetscScalar    mone=-1.0,*x,*b,*ptr,zero=0.0;
2516:   Vec            bb1;
2517: 
2519:   if (its <= 0 || lits <= 0) SETERRQ2(PETSC_ERR_ARG_WRONG,"Relaxation requires global its %D and local its %D both positive",its,lits);
2520:   if (bs > 1)
2521:     SETERRQ(PETSC_ERR_SUP,"SSOR for block size > 1 is not yet implemented");

2523:   if ((flag & SOR_LOCAL_SYMMETRIC_SWEEP) == SOR_LOCAL_SYMMETRIC_SWEEP){
2524:     if ( flag & SOR_ZERO_INITIAL_GUESS ) {
2525:       (*mat->A->ops->relax)(mat->A,bb,omega,flag,fshift,lits,lits,xx);
2526:       its--;
2527:     }

2529:     VecDuplicate(bb,&bb1);
2530:     while (its--){
2531: 
2532:       /* lower triangular part: slvec0b = - B^T*xx */
2533:       (*mat->B->ops->multtranspose)(mat->B,xx,mat->slvec0b);
2534: 
2535:       /* copy xx into slvec0a */
2536:       VecGetArray(mat->slvec0,&ptr);
2537:       VecGetArray(xx,&x);
2538:       PetscMemcpy(ptr,x,bs*mbs*sizeof(MatScalar));
2539:       VecRestoreArray(mat->slvec0,&ptr);

2541:       VecScale(&mone,mat->slvec0);

2543:       /* copy bb into slvec1a */
2544:       VecGetArray(mat->slvec1,&ptr);
2545:       VecGetArray(bb,&b);
2546:       PetscMemcpy(ptr,b,bs*mbs*sizeof(MatScalar));
2547:       VecRestoreArray(mat->slvec1,&ptr);

2549:       /* set slvec1b = 0 */
2550:       VecSet(&zero,mat->slvec1b);

2552:       VecScatterBegin(mat->slvec0,mat->slvec1,ADD_VALUES,SCATTER_FORWARD,mat->sMvctx);
2553:       VecRestoreArray(xx,&x);
2554:       VecRestoreArray(bb,&b);
2555:       VecScatterEnd(mat->slvec0,mat->slvec1,ADD_VALUES,SCATTER_FORWARD,mat->sMvctx);

2557:       /* upper triangular part: bb1 = bb1 - B*x */
2558:       (*mat->B->ops->multadd)(mat->B,mat->slvec1b,mat->slvec1a,bb1);
2559: 
2560:       /* local diagonal sweep */
2561:       (*mat->A->ops->relax)(mat->A,bb1,omega,SOR_SYMMETRIC_SWEEP,fshift,lits,lits,xx);
2562:     }
2563:     VecDestroy(bb1);
2564:   } else {
2565:     SETERRQ(PETSC_ERR_SUP,"MatSORType is not supported for SBAIJ matrix format");
2566:   }
2567:   return(0);
2568: }

2572: PetscErrorCode MatRelax_MPISBAIJ_2comm(Mat matin,Vec bb,PetscReal omega,MatSORType flag,PetscReal fshift,PetscInt its,PetscInt lits,Vec xx)
2573: {
2574:   Mat_MPISBAIJ   *mat = (Mat_MPISBAIJ*)matin->data;
2576:   PetscScalar    mone=-1.0;
2577:   Vec            lvec1,bb1;
2578: 
2580:   if (its <= 0 || lits <= 0) SETERRQ2(PETSC_ERR_ARG_WRONG,"Relaxation requires global its %D and local its %D both positive",its,lits);
2581:   if (matin->bs > 1)
2582:     SETERRQ(PETSC_ERR_SUP,"SSOR for block size > 1 is not yet implemented");

2584:   if ((flag & SOR_LOCAL_SYMMETRIC_SWEEP) == SOR_LOCAL_SYMMETRIC_SWEEP){
2585:     if ( flag & SOR_ZERO_INITIAL_GUESS ) {
2586:       (*mat->A->ops->relax)(mat->A,bb,omega,flag,fshift,lits,lits,xx);
2587:       its--;
2588:     }

2590:     VecDuplicate(mat->lvec,&lvec1);
2591:     VecDuplicate(bb,&bb1);
2592:     while (its--){
2593:       VecScatterBegin(xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD,mat->Mvctx);
2594: 
2595:       /* lower diagonal part: bb1 = bb - B^T*xx */
2596:       (*mat->B->ops->multtranspose)(mat->B,xx,lvec1);
2597:       VecScale(&mone,lvec1);

2599:       VecScatterEnd(xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD,mat->Mvctx);
2600:       VecCopy(bb,bb1);
2601:       VecScatterBegin(lvec1,bb1,ADD_VALUES,SCATTER_REVERSE,mat->Mvctx);

2603:       /* upper diagonal part: bb1 = bb1 - B*x */
2604:       VecScale(&mone,mat->lvec);
2605:       (*mat->B->ops->multadd)(mat->B,mat->lvec,bb1,bb1);

2607:       VecScatterEnd(lvec1,bb1,ADD_VALUES,SCATTER_REVERSE,mat->Mvctx);
2608: 
2609:       /* diagonal sweep */
2610:       (*mat->A->ops->relax)(mat->A,bb1,omega,SOR_SYMMETRIC_SWEEP,fshift,lits,lits,xx);
2611:     }
2612:     VecDestroy(lvec1);
2613:     VecDestroy(bb1);
2614:   } else {
2615:     SETERRQ(PETSC_ERR_SUP,"MatSORType is not supported for SBAIJ matrix format");
2616:   }
2617:   return(0);
2618: }