Actual source code: mpiaij.c
petsc-3.6.0 2015-06-09
2: #include <../src/mat/impls/aij/mpi/mpiaij.h> /*I "petscmat.h" I*/
3: #include <petsc/private/vecimpl.h>
4: #include <petsc/private/isimpl.h>
5: #include <petscblaslapack.h>
6: #include <petscsf.h>
8: /*MC
9: MATAIJ - MATAIJ = "aij" - A matrix type to be used for sparse matrices.
11: This matrix type is identical to MATSEQAIJ when constructed with a single process communicator,
12: and MATMPIAIJ otherwise. As a result, for single process communicators,
13: MatSeqAIJSetPreallocation is supported, and similarly MatMPIAIJSetPreallocation is supported
14: for communicators controlling multiple processes. It is recommended that you call both of
15: the above preallocation routines for simplicity.
17: Options Database Keys:
18: . -mat_type aij - sets the matrix type to "aij" during a call to MatSetFromOptions()
20: Developer Notes: Subclasses include MATAIJCUSP, MATAIJCUSPARSE, MATAIJPERM, MATAIJCRL, and also automatically switches over to use inodes when
21: enough exist.
23: Level: beginner
25: .seealso: MatCreateAIJ(), MatCreateSeqAIJ(), MATSEQAIJ,MATMPIAIJ
26: M*/
28: /*MC
29: MATAIJCRL - MATAIJCRL = "aijcrl" - A matrix type to be used for sparse matrices.
31: This matrix type is identical to MATSEQAIJCRL when constructed with a single process communicator,
32: and MATMPIAIJCRL otherwise. As a result, for single process communicators,
33: MatSeqAIJSetPreallocation() is supported, and similarly MatMPIAIJSetPreallocation() is supported
34: for communicators controlling multiple processes. It is recommended that you call both of
35: the above preallocation routines for simplicity.
37: Options Database Keys:
38: . -mat_type aijcrl - sets the matrix type to "aijcrl" during a call to MatSetFromOptions()
40: Level: beginner
42: .seealso: MatCreateMPIAIJCRL,MATSEQAIJCRL,MATMPIAIJCRL, MATSEQAIJCRL, MATMPIAIJCRL
43: M*/
47: PetscErrorCode MatFindNonzeroRows_MPIAIJ(Mat M,IS *keptrows)
48: {
49: PetscErrorCode ierr;
50: Mat_MPIAIJ *mat = (Mat_MPIAIJ*)M->data;
51: Mat_SeqAIJ *a = (Mat_SeqAIJ*)mat->A->data;
52: Mat_SeqAIJ *b = (Mat_SeqAIJ*)mat->B->data;
53: const PetscInt *ia,*ib;
54: const MatScalar *aa,*bb;
55: PetscInt na,nb,i,j,*rows,cnt=0,n0rows;
56: PetscInt m = M->rmap->n,rstart = M->rmap->rstart;
59: *keptrows = 0;
60: ia = a->i;
61: ib = b->i;
62: for (i=0; i<m; i++) {
63: na = ia[i+1] - ia[i];
64: nb = ib[i+1] - ib[i];
65: if (!na && !nb) {
66: cnt++;
67: goto ok1;
68: }
69: aa = a->a + ia[i];
70: for (j=0; j<na; j++) {
71: if (aa[j] != 0.0) goto ok1;
72: }
73: bb = b->a + ib[i];
74: for (j=0; j <nb; j++) {
75: if (bb[j] != 0.0) goto ok1;
76: }
77: cnt++;
78: ok1:;
79: }
80: MPI_Allreduce(&cnt,&n0rows,1,MPIU_INT,MPI_SUM,PetscObjectComm((PetscObject)M));
81: if (!n0rows) return(0);
82: PetscMalloc1(M->rmap->n-cnt,&rows);
83: cnt = 0;
84: for (i=0; i<m; i++) {
85: na = ia[i+1] - ia[i];
86: nb = ib[i+1] - ib[i];
87: if (!na && !nb) continue;
88: aa = a->a + ia[i];
89: for (j=0; j<na;j++) {
90: if (aa[j] != 0.0) {
91: rows[cnt++] = rstart + i;
92: goto ok2;
93: }
94: }
95: bb = b->a + ib[i];
96: for (j=0; j<nb; j++) {
97: if (bb[j] != 0.0) {
98: rows[cnt++] = rstart + i;
99: goto ok2;
100: }
101: }
102: ok2:;
103: }
104: ISCreateGeneral(PetscObjectComm((PetscObject)M),cnt,rows,PETSC_OWN_POINTER,keptrows);
105: return(0);
106: }
110: PetscErrorCode MatDiagonalSet_MPIAIJ(Mat Y,Vec D,InsertMode is)
111: {
112: PetscErrorCode ierr;
113: Mat_MPIAIJ *aij = (Mat_MPIAIJ*) Y->data;
116: if (Y->assembled && Y->rmap->rstart == Y->cmap->rstart && Y->rmap->rend == Y->cmap->rend) {
117: MatDiagonalSet(aij->A,D,is);
118: } else {
119: MatDiagonalSet_Default(Y,D,is);
120: }
121: return(0);
122: }
127: PetscErrorCode MatFindZeroDiagonals_MPIAIJ(Mat M,IS *zrows)
128: {
129: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)M->data;
131: PetscInt i,rstart,nrows,*rows;
134: *zrows = NULL;
135: MatFindZeroDiagonals_SeqAIJ_Private(aij->A,&nrows,&rows);
136: MatGetOwnershipRange(M,&rstart,NULL);
137: for (i=0; i<nrows; i++) rows[i] += rstart;
138: ISCreateGeneral(PetscObjectComm((PetscObject)M),nrows,rows,PETSC_OWN_POINTER,zrows);
139: return(0);
140: }
144: PetscErrorCode MatGetColumnNorms_MPIAIJ(Mat A,NormType type,PetscReal *norms)
145: {
147: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data;
148: PetscInt i,n,*garray = aij->garray;
149: Mat_SeqAIJ *a_aij = (Mat_SeqAIJ*) aij->A->data;
150: Mat_SeqAIJ *b_aij = (Mat_SeqAIJ*) aij->B->data;
151: PetscReal *work;
154: MatGetSize(A,NULL,&n);
155: PetscCalloc1(n,&work);
156: if (type == NORM_2) {
157: for (i=0; i<a_aij->i[aij->A->rmap->n]; i++) {
158: work[A->cmap->rstart + a_aij->j[i]] += PetscAbsScalar(a_aij->a[i]*a_aij->a[i]);
159: }
160: for (i=0; i<b_aij->i[aij->B->rmap->n]; i++) {
161: work[garray[b_aij->j[i]]] += PetscAbsScalar(b_aij->a[i]*b_aij->a[i]);
162: }
163: } else if (type == NORM_1) {
164: for (i=0; i<a_aij->i[aij->A->rmap->n]; i++) {
165: work[A->cmap->rstart + a_aij->j[i]] += PetscAbsScalar(a_aij->a[i]);
166: }
167: for (i=0; i<b_aij->i[aij->B->rmap->n]; i++) {
168: work[garray[b_aij->j[i]]] += PetscAbsScalar(b_aij->a[i]);
169: }
170: } else if (type == NORM_INFINITY) {
171: for (i=0; i<a_aij->i[aij->A->rmap->n]; i++) {
172: work[A->cmap->rstart + a_aij->j[i]] = PetscMax(PetscAbsScalar(a_aij->a[i]), work[A->cmap->rstart + a_aij->j[i]]);
173: }
174: for (i=0; i<b_aij->i[aij->B->rmap->n]; i++) {
175: work[garray[b_aij->j[i]]] = PetscMax(PetscAbsScalar(b_aij->a[i]),work[garray[b_aij->j[i]]]);
176: }
178: } else SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Unknown NormType");
179: if (type == NORM_INFINITY) {
180: MPI_Allreduce(work,norms,n,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)A));
181: } else {
182: MPI_Allreduce(work,norms,n,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)A));
183: }
184: PetscFree(work);
185: if (type == NORM_2) {
186: for (i=0; i<n; i++) norms[i] = PetscSqrtReal(norms[i]);
187: }
188: return(0);
189: }
193: PetscErrorCode MatFindOffBlockDiagonalEntries_MPIAIJ(Mat A,IS *is)
194: {
195: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
196: IS sis,gis;
197: PetscErrorCode ierr;
198: const PetscInt *isis,*igis;
199: PetscInt n,*iis,nsis,ngis,rstart,i;
202: MatFindOffBlockDiagonalEntries(a->A,&sis);
203: MatFindNonzeroRows(a->B,&gis);
204: ISGetSize(gis,&ngis);
205: ISGetSize(sis,&nsis);
206: ISGetIndices(sis,&isis);
207: ISGetIndices(gis,&igis);
209: PetscMalloc1(ngis+nsis,&iis);
210: PetscMemcpy(iis,igis,ngis*sizeof(PetscInt));
211: PetscMemcpy(iis+ngis,isis,nsis*sizeof(PetscInt));
212: n = ngis + nsis;
213: PetscSortRemoveDupsInt(&n,iis);
214: MatGetOwnershipRange(A,&rstart,NULL);
215: for (i=0; i<n; i++) iis[i] += rstart;
216: ISCreateGeneral(PetscObjectComm((PetscObject)A),n,iis,PETSC_OWN_POINTER,is);
218: ISRestoreIndices(sis,&isis);
219: ISRestoreIndices(gis,&igis);
220: ISDestroy(&sis);
221: ISDestroy(&gis);
222: return(0);
223: }
227: /*
228: Distributes a SeqAIJ matrix across a set of processes. Code stolen from
229: MatLoad_MPIAIJ(). Horrible lack of reuse. Should be a routine for each matrix type.
231: Only for square matrices
233: Used by a preconditioner, hence PETSC_EXTERN
234: */
235: PETSC_EXTERN PetscErrorCode MatDistribute_MPIAIJ(MPI_Comm comm,Mat gmat,PetscInt m,MatReuse reuse,Mat *inmat)
236: {
237: PetscMPIInt rank,size;
238: PetscInt *rowners,*dlens,*olens,i,rstart,rend,j,jj,nz = 0,*gmataj,cnt,row,*ld,bses[2];
240: Mat mat;
241: Mat_SeqAIJ *gmata;
242: PetscMPIInt tag;
243: MPI_Status status;
244: PetscBool aij;
245: MatScalar *gmataa,*ao,*ad,*gmataarestore=0;
248: MPI_Comm_rank(comm,&rank);
249: MPI_Comm_size(comm,&size);
250: if (!rank) {
251: PetscObjectTypeCompare((PetscObject)gmat,MATSEQAIJ,&aij);
252: if (!aij) SETERRQ1(PetscObjectComm((PetscObject)gmat),PETSC_ERR_SUP,"Currently no support for input matrix of type %s\n",((PetscObject)gmat)->type_name);
253: }
254: if (reuse == MAT_INITIAL_MATRIX) {
255: MatCreate(comm,&mat);
256: MatSetSizes(mat,m,m,PETSC_DETERMINE,PETSC_DETERMINE);
257: MatGetBlockSizes(gmat,&bses[0],&bses[1]);
258: MPI_Bcast(bses,2,MPIU_INT,0,comm);
259: MatSetBlockSizes(mat,bses[0],bses[1]);
260: MatSetType(mat,MATAIJ);
261: PetscMalloc1(size+1,&rowners);
262: PetscMalloc2(m,&dlens,m,&olens);
263: MPI_Allgather(&m,1,MPIU_INT,rowners+1,1,MPIU_INT,comm);
265: rowners[0] = 0;
266: for (i=2; i<=size; i++) rowners[i] += rowners[i-1];
267: rstart = rowners[rank];
268: rend = rowners[rank+1];
269: PetscObjectGetNewTag((PetscObject)mat,&tag);
270: if (!rank) {
271: gmata = (Mat_SeqAIJ*) gmat->data;
272: /* send row lengths to all processors */
273: for (i=0; i<m; i++) dlens[i] = gmata->ilen[i];
274: for (i=1; i<size; i++) {
275: MPI_Send(gmata->ilen + rowners[i],rowners[i+1]-rowners[i],MPIU_INT,i,tag,comm);
276: }
277: /* determine number diagonal and off-diagonal counts */
278: PetscMemzero(olens,m*sizeof(PetscInt));
279: PetscCalloc1(m,&ld);
280: jj = 0;
281: for (i=0; i<m; i++) {
282: for (j=0; j<dlens[i]; j++) {
283: if (gmata->j[jj] < rstart) ld[i]++;
284: if (gmata->j[jj] < rstart || gmata->j[jj] >= rend) olens[i]++;
285: jj++;
286: }
287: }
288: /* send column indices to other processes */
289: for (i=1; i<size; i++) {
290: nz = gmata->i[rowners[i+1]]-gmata->i[rowners[i]];
291: MPI_Send(&nz,1,MPIU_INT,i,tag,comm);
292: MPI_Send(gmata->j + gmata->i[rowners[i]],nz,MPIU_INT,i,tag,comm);
293: }
295: /* send numerical values to other processes */
296: for (i=1; i<size; i++) {
297: nz = gmata->i[rowners[i+1]]-gmata->i[rowners[i]];
298: MPI_Send(gmata->a + gmata->i[rowners[i]],nz,MPIU_SCALAR,i,tag,comm);
299: }
300: gmataa = gmata->a;
301: gmataj = gmata->j;
303: } else {
304: /* receive row lengths */
305: MPI_Recv(dlens,m,MPIU_INT,0,tag,comm,&status);
306: /* receive column indices */
307: MPI_Recv(&nz,1,MPIU_INT,0,tag,comm,&status);
308: PetscMalloc2(nz,&gmataa,nz,&gmataj);
309: MPI_Recv(gmataj,nz,MPIU_INT,0,tag,comm,&status);
310: /* determine number diagonal and off-diagonal counts */
311: PetscMemzero(olens,m*sizeof(PetscInt));
312: PetscCalloc1(m,&ld);
313: jj = 0;
314: for (i=0; i<m; i++) {
315: for (j=0; j<dlens[i]; j++) {
316: if (gmataj[jj] < rstart) ld[i]++;
317: if (gmataj[jj] < rstart || gmataj[jj] >= rend) olens[i]++;
318: jj++;
319: }
320: }
321: /* receive numerical values */
322: PetscMemzero(gmataa,nz*sizeof(PetscScalar));
323: MPI_Recv(gmataa,nz,MPIU_SCALAR,0,tag,comm,&status);
324: }
325: /* set preallocation */
326: for (i=0; i<m; i++) {
327: dlens[i] -= olens[i];
328: }
329: MatSeqAIJSetPreallocation(mat,0,dlens);
330: MatMPIAIJSetPreallocation(mat,0,dlens,0,olens);
332: for (i=0; i<m; i++) {
333: dlens[i] += olens[i];
334: }
335: cnt = 0;
336: for (i=0; i<m; i++) {
337: row = rstart + i;
338: MatSetValues(mat,1,&row,dlens[i],gmataj+cnt,gmataa+cnt,INSERT_VALUES);
339: cnt += dlens[i];
340: }
341: if (rank) {
342: PetscFree2(gmataa,gmataj);
343: }
344: PetscFree2(dlens,olens);
345: PetscFree(rowners);
347: ((Mat_MPIAIJ*)(mat->data))->ld = ld;
349: *inmat = mat;
350: } else { /* column indices are already set; only need to move over numerical values from process 0 */
351: Mat_SeqAIJ *Ad = (Mat_SeqAIJ*)((Mat_MPIAIJ*)((*inmat)->data))->A->data;
352: Mat_SeqAIJ *Ao = (Mat_SeqAIJ*)((Mat_MPIAIJ*)((*inmat)->data))->B->data;
353: mat = *inmat;
354: PetscObjectGetNewTag((PetscObject)mat,&tag);
355: if (!rank) {
356: /* send numerical values to other processes */
357: gmata = (Mat_SeqAIJ*) gmat->data;
358: MatGetOwnershipRanges(mat,(const PetscInt**)&rowners);
359: gmataa = gmata->a;
360: for (i=1; i<size; i++) {
361: nz = gmata->i[rowners[i+1]]-gmata->i[rowners[i]];
362: MPI_Send(gmataa + gmata->i[rowners[i]],nz,MPIU_SCALAR,i,tag,comm);
363: }
364: nz = gmata->i[rowners[1]]-gmata->i[rowners[0]];
365: } else {
366: /* receive numerical values from process 0*/
367: nz = Ad->nz + Ao->nz;
368: PetscMalloc1(nz,&gmataa); gmataarestore = gmataa;
369: MPI_Recv(gmataa,nz,MPIU_SCALAR,0,tag,comm,&status);
370: }
371: /* transfer numerical values into the diagonal A and off diagonal B parts of mat */
372: ld = ((Mat_MPIAIJ*)(mat->data))->ld;
373: ad = Ad->a;
374: ao = Ao->a;
375: if (mat->rmap->n) {
376: i = 0;
377: nz = ld[i]; PetscMemcpy(ao,gmataa,nz*sizeof(PetscScalar)); ao += nz; gmataa += nz;
378: nz = Ad->i[i+1] - Ad->i[i]; PetscMemcpy(ad,gmataa,nz*sizeof(PetscScalar)); ad += nz; gmataa += nz;
379: }
380: for (i=1; i<mat->rmap->n; i++) {
381: nz = Ao->i[i] - Ao->i[i-1] - ld[i-1] + ld[i]; PetscMemcpy(ao,gmataa,nz*sizeof(PetscScalar)); ao += nz; gmataa += nz;
382: nz = Ad->i[i+1] - Ad->i[i]; PetscMemcpy(ad,gmataa,nz*sizeof(PetscScalar)); ad += nz; gmataa += nz;
383: }
384: i--;
385: if (mat->rmap->n) {
386: nz = Ao->i[i+1] - Ao->i[i] - ld[i]; PetscMemcpy(ao,gmataa,nz*sizeof(PetscScalar));
387: }
388: if (rank) {
389: PetscFree(gmataarestore);
390: }
391: }
392: MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);
393: MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);
394: return(0);
395: }
397: /*
398: Local utility routine that creates a mapping from the global column
399: number to the local number in the off-diagonal part of the local
400: storage of the matrix. When PETSC_USE_CTABLE is used this is scalable at
401: a slightly higher hash table cost; without it it is not scalable (each processor
402: has an order N integer array but is fast to acess.
403: */
406: PetscErrorCode MatCreateColmap_MPIAIJ_Private(Mat mat)
407: {
408: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
410: PetscInt n = aij->B->cmap->n,i;
413: if (!aij->garray) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"MPIAIJ Matrix was assembled but is missing garray");
414: #if defined(PETSC_USE_CTABLE)
415: PetscTableCreate(n,mat->cmap->N+1,&aij->colmap);
416: for (i=0; i<n; i++) {
417: PetscTableAdd(aij->colmap,aij->garray[i]+1,i+1,INSERT_VALUES);
418: }
419: #else
420: PetscCalloc1(mat->cmap->N+1,&aij->colmap);
421: PetscLogObjectMemory((PetscObject)mat,(mat->cmap->N+1)*sizeof(PetscInt));
422: for (i=0; i<n; i++) aij->colmap[aij->garray[i]] = i+1;
423: #endif
424: return(0);
425: }
427: #define MatSetValues_SeqAIJ_A_Private(row,col,value,addv,orow,ocol) \
428: { \
429: if (col <= lastcol1) low1 = 0; \
430: else high1 = nrow1; \
431: lastcol1 = col;\
432: while (high1-low1 > 5) { \
433: t = (low1+high1)/2; \
434: if (rp1[t] > col) high1 = t; \
435: else low1 = t; \
436: } \
437: for (_i=low1; _i<high1; _i++) { \
438: if (rp1[_i] > col) break; \
439: if (rp1[_i] == col) { \
440: if (addv == ADD_VALUES) ap1[_i] += value; \
441: else ap1[_i] = value; \
442: goto a_noinsert; \
443: } \
444: } \
445: if (value == 0.0 && ignorezeroentries) {low1 = 0; high1 = nrow1;goto a_noinsert;} \
446: if (nonew == 1) {low1 = 0; high1 = nrow1; goto a_noinsert;} \
447: if (nonew == -1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero at global row/column (%D, %D) into matrix", orow, ocol); \
448: MatSeqXAIJReallocateAIJ(A,am,1,nrow1,row,col,rmax1,aa,ai,aj,rp1,ap1,aimax,nonew,MatScalar); \
449: N = nrow1++ - 1; a->nz++; high1++; \
450: /* shift up all the later entries in this row */ \
451: for (ii=N; ii>=_i; ii--) { \
452: rp1[ii+1] = rp1[ii]; \
453: ap1[ii+1] = ap1[ii]; \
454: } \
455: rp1[_i] = col; \
456: ap1[_i] = value; \
457: A->nonzerostate++;\
458: a_noinsert: ; \
459: ailen[row] = nrow1; \
460: }
463: #define MatSetValues_SeqAIJ_B_Private(row,col,value,addv,orow,ocol) \
464: { \
465: if (col <= lastcol2) low2 = 0; \
466: else high2 = nrow2; \
467: lastcol2 = col; \
468: while (high2-low2 > 5) { \
469: t = (low2+high2)/2; \
470: if (rp2[t] > col) high2 = t; \
471: else low2 = t; \
472: } \
473: for (_i=low2; _i<high2; _i++) { \
474: if (rp2[_i] > col) break; \
475: if (rp2[_i] == col) { \
476: if (addv == ADD_VALUES) ap2[_i] += value; \
477: else ap2[_i] = value; \
478: goto b_noinsert; \
479: } \
480: } \
481: if (value == 0.0 && ignorezeroentries) {low2 = 0; high2 = nrow2; goto b_noinsert;} \
482: if (nonew == 1) {low2 = 0; high2 = nrow2; goto b_noinsert;} \
483: if (nonew == -1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero at global row/column (%D, %D) into matrix", orow, ocol); \
484: MatSeqXAIJReallocateAIJ(B,bm,1,nrow2,row,col,rmax2,ba,bi,bj,rp2,ap2,bimax,nonew,MatScalar); \
485: N = nrow2++ - 1; b->nz++; high2++; \
486: /* shift up all the later entries in this row */ \
487: for (ii=N; ii>=_i; ii--) { \
488: rp2[ii+1] = rp2[ii]; \
489: ap2[ii+1] = ap2[ii]; \
490: } \
491: rp2[_i] = col; \
492: ap2[_i] = value; \
493: B->nonzerostate++; \
494: b_noinsert: ; \
495: bilen[row] = nrow2; \
496: }
500: PetscErrorCode MatSetValuesRow_MPIAIJ(Mat A,PetscInt row,const PetscScalar v[])
501: {
502: Mat_MPIAIJ *mat = (Mat_MPIAIJ*)A->data;
503: Mat_SeqAIJ *a = (Mat_SeqAIJ*)mat->A->data,*b = (Mat_SeqAIJ*)mat->B->data;
505: PetscInt l,*garray = mat->garray,diag;
508: /* code only works for square matrices A */
510: /* find size of row to the left of the diagonal part */
511: MatGetOwnershipRange(A,&diag,0);
512: row = row - diag;
513: for (l=0; l<b->i[row+1]-b->i[row]; l++) {
514: if (garray[b->j[b->i[row]+l]] > diag) break;
515: }
516: PetscMemcpy(b->a+b->i[row],v,l*sizeof(PetscScalar));
518: /* diagonal part */
519: PetscMemcpy(a->a+a->i[row],v+l,(a->i[row+1]-a->i[row])*sizeof(PetscScalar));
521: /* right of diagonal part */
522: PetscMemcpy(b->a+b->i[row]+l,v+l+a->i[row+1]-a->i[row],(b->i[row+1]-b->i[row]-l)*sizeof(PetscScalar));
523: return(0);
524: }
528: PetscErrorCode MatSetValues_MPIAIJ(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv)
529: {
530: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
531: PetscScalar value;
533: PetscInt i,j,rstart = mat->rmap->rstart,rend = mat->rmap->rend;
534: PetscInt cstart = mat->cmap->rstart,cend = mat->cmap->rend,row,col;
535: PetscBool roworiented = aij->roworiented;
537: /* Some Variables required in the macro */
538: Mat A = aij->A;
539: Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
540: PetscInt *aimax = a->imax,*ai = a->i,*ailen = a->ilen,*aj = a->j;
541: MatScalar *aa = a->a;
542: PetscBool ignorezeroentries = a->ignorezeroentries;
543: Mat B = aij->B;
544: Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data;
545: PetscInt *bimax = b->imax,*bi = b->i,*bilen = b->ilen,*bj = b->j,bm = aij->B->rmap->n,am = aij->A->rmap->n;
546: MatScalar *ba = b->a;
548: PetscInt *rp1,*rp2,ii,nrow1,nrow2,_i,rmax1,rmax2,N,low1,high1,low2,high2,t,lastcol1,lastcol2;
549: PetscInt nonew;
550: MatScalar *ap1,*ap2;
553: for (i=0; i<m; i++) {
554: if (im[i] < 0) continue;
555: #if defined(PETSC_USE_DEBUG)
556: if (im[i] >= mat->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->rmap->N-1);
557: #endif
558: if (im[i] >= rstart && im[i] < rend) {
559: row = im[i] - rstart;
560: lastcol1 = -1;
561: rp1 = aj + ai[row];
562: ap1 = aa + ai[row];
563: rmax1 = aimax[row];
564: nrow1 = ailen[row];
565: low1 = 0;
566: high1 = nrow1;
567: lastcol2 = -1;
568: rp2 = bj + bi[row];
569: ap2 = ba + bi[row];
570: rmax2 = bimax[row];
571: nrow2 = bilen[row];
572: low2 = 0;
573: high2 = nrow2;
575: for (j=0; j<n; j++) {
576: if (roworiented) value = v[i*n+j];
577: else value = v[i+j*m];
578: if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES)) continue;
579: if (in[j] >= cstart && in[j] < cend) {
580: col = in[j] - cstart;
581: nonew = a->nonew;
582: MatSetValues_SeqAIJ_A_Private(row,col,value,addv,im[i],in[j]);
583: } else if (in[j] < 0) continue;
584: #if defined(PETSC_USE_DEBUG)
585: else if (in[j] >= mat->cmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",in[j],mat->cmap->N-1);
586: #endif
587: else {
588: if (mat->was_assembled) {
589: if (!aij->colmap) {
590: MatCreateColmap_MPIAIJ_Private(mat);
591: }
592: #if defined(PETSC_USE_CTABLE)
593: PetscTableFind(aij->colmap,in[j]+1,&col);
594: col--;
595: #else
596: col = aij->colmap[in[j]] - 1;
597: #endif
598: if (col < 0 && !((Mat_SeqAIJ*)(aij->B->data))->nonew) {
599: MatDisAssemble_MPIAIJ(mat);
600: col = in[j];
601: /* Reinitialize the variables required by MatSetValues_SeqAIJ_B_Private() */
602: B = aij->B;
603: b = (Mat_SeqAIJ*)B->data;
604: bimax = b->imax; bi = b->i; bilen = b->ilen; bj = b->j; ba = b->a;
605: rp2 = bj + bi[row];
606: ap2 = ba + bi[row];
607: rmax2 = bimax[row];
608: nrow2 = bilen[row];
609: low2 = 0;
610: high2 = nrow2;
611: bm = aij->B->rmap->n;
612: ba = b->a;
613: } else if (col < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero at global row/column (%D, %D) into matrix", im[i], in[j]);
614: } else col = in[j];
615: nonew = b->nonew;
616: MatSetValues_SeqAIJ_B_Private(row,col,value,addv,im[i],in[j]);
617: }
618: }
619: } else {
620: if (mat->nooffprocentries) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Setting off process row %D even though MatSetOption(,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE) was set",im[i]);
621: if (!aij->donotstash) {
622: mat->assembled = PETSC_FALSE;
623: if (roworiented) {
624: MatStashValuesRow_Private(&mat->stash,im[i],n,in,v+i*n,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));
625: } else {
626: MatStashValuesCol_Private(&mat->stash,im[i],n,in,v+i,m,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));
627: }
628: }
629: }
630: }
631: return(0);
632: }
636: PetscErrorCode MatGetValues_MPIAIJ(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[])
637: {
638: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
640: PetscInt i,j,rstart = mat->rmap->rstart,rend = mat->rmap->rend;
641: PetscInt cstart = mat->cmap->rstart,cend = mat->cmap->rend,row,col;
644: for (i=0; i<m; i++) {
645: if (idxm[i] < 0) continue; /* SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative row: %D",idxm[i]);*/
646: if (idxm[i] >= mat->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",idxm[i],mat->rmap->N-1);
647: if (idxm[i] >= rstart && idxm[i] < rend) {
648: row = idxm[i] - rstart;
649: for (j=0; j<n; j++) {
650: if (idxn[j] < 0) continue; /* SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative column: %D",idxn[j]); */
651: if (idxn[j] >= mat->cmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",idxn[j],mat->cmap->N-1);
652: if (idxn[j] >= cstart && idxn[j] < cend) {
653: col = idxn[j] - cstart;
654: MatGetValues(aij->A,1,&row,1,&col,v+i*n+j);
655: } else {
656: if (!aij->colmap) {
657: MatCreateColmap_MPIAIJ_Private(mat);
658: }
659: #if defined(PETSC_USE_CTABLE)
660: PetscTableFind(aij->colmap,idxn[j]+1,&col);
661: col--;
662: #else
663: col = aij->colmap[idxn[j]] - 1;
664: #endif
665: if ((col < 0) || (aij->garray[col] != idxn[j])) *(v+i*n+j) = 0.0;
666: else {
667: MatGetValues(aij->B,1,&row,1,&col,v+i*n+j);
668: }
669: }
670: }
671: } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only local values currently supported");
672: }
673: return(0);
674: }
676: extern PetscErrorCode MatMultDiagonalBlock_MPIAIJ(Mat,Vec,Vec);
680: PetscErrorCode MatAssemblyBegin_MPIAIJ(Mat mat,MatAssemblyType mode)
681: {
682: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
684: PetscInt nstash,reallocs;
685: InsertMode addv;
688: if (aij->donotstash || mat->nooffprocentries) return(0);
690: /* make sure all processors are either in INSERTMODE or ADDMODE */
691: MPI_Allreduce((PetscEnum*)&mat->insertmode,(PetscEnum*)&addv,1,MPIU_ENUM,MPI_BOR,PetscObjectComm((PetscObject)mat));
692: if (addv == (ADD_VALUES|INSERT_VALUES)) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Some processors inserted others added");
693: mat->insertmode = addv; /* in case this processor had no cache */
695: MatStashScatterBegin_Private(mat,&mat->stash,mat->rmap->range);
696: MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);
697: PetscInfo2(aij->A,"Stash has %D entries, uses %D mallocs.\n",nstash,reallocs);
698: return(0);
699: }
703: PetscErrorCode MatAssemblyEnd_MPIAIJ(Mat mat,MatAssemblyType mode)
704: {
705: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
706: Mat_SeqAIJ *a = (Mat_SeqAIJ*)aij->A->data;
708: PetscMPIInt n;
709: PetscInt i,j,rstart,ncols,flg;
710: PetscInt *row,*col;
711: PetscBool other_disassembled;
712: PetscScalar *val;
713: InsertMode addv = mat->insertmode;
715: /* do not use 'b = (Mat_SeqAIJ*)aij->B->data' as B can be reset in disassembly */
718: if (!aij->donotstash && !mat->nooffprocentries) {
719: while (1) {
720: MatStashScatterGetMesg_Private(&mat->stash,&n,&row,&col,&val,&flg);
721: if (!flg) break;
723: for (i=0; i<n; ) {
724: /* Now identify the consecutive vals belonging to the same row */
725: for (j=i,rstart=row[j]; j<n; j++) {
726: if (row[j] != rstart) break;
727: }
728: if (j < n) ncols = j-i;
729: else ncols = n-i;
730: /* Now assemble all these values with a single function call */
731: MatSetValues_MPIAIJ(mat,1,row+i,ncols,col+i,val+i,addv);
733: i = j;
734: }
735: }
736: MatStashScatterEnd_Private(&mat->stash);
737: }
738: MatAssemblyBegin(aij->A,mode);
739: MatAssemblyEnd(aij->A,mode);
741: /* determine if any processor has disassembled, if so we must
742: also disassemble ourselfs, in order that we may reassemble. */
743: /*
744: if nonzero structure of submatrix B cannot change then we know that
745: no processor disassembled thus we can skip this stuff
746: */
747: if (!((Mat_SeqAIJ*)aij->B->data)->nonew) {
748: MPI_Allreduce(&mat->was_assembled,&other_disassembled,1,MPIU_BOOL,MPI_PROD,PetscObjectComm((PetscObject)mat));
749: if (mat->was_assembled && !other_disassembled) {
750: MatDisAssemble_MPIAIJ(mat);
751: }
752: }
753: if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) {
754: MatSetUpMultiply_MPIAIJ(mat);
755: }
756: MatSetOption(aij->B,MAT_USE_INODES,PETSC_FALSE);
757: MatAssemblyBegin(aij->B,mode);
758: MatAssemblyEnd(aij->B,mode);
760: PetscFree2(aij->rowvalues,aij->rowindices);
762: aij->rowvalues = 0;
764: VecDestroy(&aij->diag);
765: if (a->inode.size) mat->ops->multdiagonalblock = MatMultDiagonalBlock_MPIAIJ;
767: /* if no new nonzero locations are allowed in matrix then only set the matrix state the first time through */
768: if ((!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) || !((Mat_SeqAIJ*)(aij->A->data))->nonew) {
769: PetscObjectState state = aij->A->nonzerostate + aij->B->nonzerostate;
770: MPI_Allreduce(&state,&mat->nonzerostate,1,MPIU_INT64,MPI_SUM,PetscObjectComm((PetscObject)mat));
771: }
772: return(0);
773: }
777: PetscErrorCode MatZeroEntries_MPIAIJ(Mat A)
778: {
779: Mat_MPIAIJ *l = (Mat_MPIAIJ*)A->data;
783: MatZeroEntries(l->A);
784: MatZeroEntries(l->B);
785: return(0);
786: }
790: PetscErrorCode MatZeroRows_MPIAIJ(Mat A,PetscInt N,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
791: {
792: Mat_MPIAIJ *mat = (Mat_MPIAIJ *) A->data;
793: PetscInt *owners = A->rmap->range;
794: PetscInt n = A->rmap->n;
795: PetscSF sf;
796: PetscInt *lrows;
797: PetscSFNode *rrows;
798: PetscInt r, p = 0, len = 0;
802: /* Create SF where leaves are input rows and roots are owned rows */
803: PetscMalloc1(n, &lrows);
804: for (r = 0; r < n; ++r) lrows[r] = -1;
805: if (!A->nooffproczerorows) {PetscMalloc1(N, &rrows);}
806: for (r = 0; r < N; ++r) {
807: const PetscInt idx = rows[r];
808: if (idx < 0 || A->rmap->N <= idx) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row %D out of range [0,%D)",idx,A->rmap->N);
809: if (idx < owners[p] || owners[p+1] <= idx) { /* short-circuit the search if the last p owns this row too */
810: PetscLayoutFindOwner(A->rmap,idx,&p);
811: }
812: if (A->nooffproczerorows) {
813: if (p != mat->rank) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"MAT_NO_OFF_PROC_ZERO_ROWS set, but row %D is not owned by rank %d",idx,mat->rank);
814: lrows[len++] = idx - owners[p];
815: } else {
816: rrows[r].rank = p;
817: rrows[r].index = rows[r] - owners[p];
818: }
819: }
820: if (!A->nooffproczerorows) {
821: PetscSFCreate(PetscObjectComm((PetscObject) A), &sf);
822: PetscSFSetGraph(sf, n, N, NULL, PETSC_OWN_POINTER, rrows, PETSC_OWN_POINTER);
823: /* Collect flags for rows to be zeroed */
824: PetscSFReduceBegin(sf, MPIU_INT, (PetscInt*)rows, lrows, MPI_LOR);
825: PetscSFReduceEnd(sf, MPIU_INT, (PetscInt*)rows, lrows, MPI_LOR);
826: PetscSFDestroy(&sf);
827: /* Compress and put in row numbers */
828: for (r = 0; r < n; ++r) if (lrows[r] >= 0) lrows[len++] = r;
829: }
830: /* fix right hand side if needed */
831: if (x && b) {
832: const PetscScalar *xx;
833: PetscScalar *bb;
835: VecGetArrayRead(x, &xx);
836: VecGetArray(b, &bb);
837: for (r = 0; r < len; ++r) bb[lrows[r]] = diag*xx[lrows[r]];
838: VecRestoreArrayRead(x, &xx);
839: VecRestoreArray(b, &bb);
840: }
841: /* Must zero l->B before l->A because the (diag) case below may put values into l->B*/
842: MatZeroRows(mat->B, len, lrows, 0.0, NULL, NULL);
843: if ((diag != 0.0) && (mat->A->rmap->N == mat->A->cmap->N)) {
844: MatZeroRows(mat->A, len, lrows, diag, NULL, NULL);
845: } else if (diag != 0.0) {
846: MatZeroRows(mat->A, len, lrows, 0.0, NULL, NULL);
847: if (((Mat_SeqAIJ *) mat->A->data)->nonew) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "MatZeroRows() on rectangular matrices cannot be used with the Mat options\nMAT_NEW_NONZERO_LOCATIONS,MAT_NEW_NONZERO_LOCATION_ERR,MAT_NEW_NONZERO_ALLOCATION_ERR");
848: for (r = 0; r < len; ++r) {
849: const PetscInt row = lrows[r] + A->rmap->rstart;
850: MatSetValues(A, 1, &row, 1, &row, &diag, INSERT_VALUES);
851: }
852: MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY);
853: MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY);
854: } else {
855: MatZeroRows(mat->A, len, lrows, 0.0, NULL, NULL);
856: }
857: PetscFree(lrows);
859: /* only change matrix nonzero state if pattern was allowed to be changed */
860: if (!((Mat_SeqAIJ*)(mat->A->data))->keepnonzeropattern) {
861: PetscObjectState state = mat->A->nonzerostate + mat->B->nonzerostate;
862: MPI_Allreduce(&state,&A->nonzerostate,1,MPIU_INT64,MPI_SUM,PetscObjectComm((PetscObject)A));
863: }
864: return(0);
865: }
869: PetscErrorCode MatZeroRowsColumns_MPIAIJ(Mat A,PetscInt N,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
870: {
871: Mat_MPIAIJ *l = (Mat_MPIAIJ*)A->data;
872: PetscErrorCode ierr;
873: PetscMPIInt n = A->rmap->n;
874: PetscInt i,j,r,m,p = 0,len = 0;
875: PetscInt *lrows,*owners = A->rmap->range;
876: PetscSFNode *rrows;
877: PetscSF sf;
878: const PetscScalar *xx;
879: PetscScalar *bb,*mask;
880: Vec xmask,lmask;
881: Mat_SeqAIJ *aij = (Mat_SeqAIJ*)l->B->data;
882: const PetscInt *aj, *ii,*ridx;
883: PetscScalar *aa;
886: /* Create SF where leaves are input rows and roots are owned rows */
887: PetscMalloc1(n, &lrows);
888: for (r = 0; r < n; ++r) lrows[r] = -1;
889: PetscMalloc1(N, &rrows);
890: for (r = 0; r < N; ++r) {
891: const PetscInt idx = rows[r];
892: if (idx < 0 || A->rmap->N <= idx) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row %D out of range [0,%D)",idx,A->rmap->N);
893: if (idx < owners[p] || owners[p+1] <= idx) { /* short-circuit the search if the last p owns this row too */
894: PetscLayoutFindOwner(A->rmap,idx,&p);
895: }
896: rrows[r].rank = p;
897: rrows[r].index = rows[r] - owners[p];
898: }
899: PetscSFCreate(PetscObjectComm((PetscObject) A), &sf);
900: PetscSFSetGraph(sf, n, N, NULL, PETSC_OWN_POINTER, rrows, PETSC_OWN_POINTER);
901: /* Collect flags for rows to be zeroed */
902: PetscSFReduceBegin(sf, MPIU_INT, (PetscInt *) rows, lrows, MPI_LOR);
903: PetscSFReduceEnd(sf, MPIU_INT, (PetscInt *) rows, lrows, MPI_LOR);
904: PetscSFDestroy(&sf);
905: /* Compress and put in row numbers */
906: for (r = 0; r < n; ++r) if (lrows[r] >= 0) lrows[len++] = r;
907: /* zero diagonal part of matrix */
908: MatZeroRowsColumns(l->A,len,lrows,diag,x,b);
909: /* handle off diagonal part of matrix */
910: MatCreateVecs(A,&xmask,NULL);
911: VecDuplicate(l->lvec,&lmask);
912: VecGetArray(xmask,&bb);
913: for (i=0; i<len; i++) bb[lrows[i]] = 1;
914: VecRestoreArray(xmask,&bb);
915: VecScatterBegin(l->Mvctx,xmask,lmask,ADD_VALUES,SCATTER_FORWARD);
916: VecScatterEnd(l->Mvctx,xmask,lmask,ADD_VALUES,SCATTER_FORWARD);
917: VecDestroy(&xmask);
918: if (x) {
919: VecScatterBegin(l->Mvctx,x,l->lvec,INSERT_VALUES,SCATTER_FORWARD);
920: VecScatterEnd(l->Mvctx,x,l->lvec,INSERT_VALUES,SCATTER_FORWARD);
921: VecGetArrayRead(l->lvec,&xx);
922: VecGetArray(b,&bb);
923: }
924: VecGetArray(lmask,&mask);
925: /* remove zeroed rows of off diagonal matrix */
926: ii = aij->i;
927: for (i=0; i<len; i++) {
928: PetscMemzero(aij->a + ii[lrows[i]],(ii[lrows[i]+1] - ii[lrows[i]])*sizeof(PetscScalar));
929: }
930: /* loop over all elements of off process part of matrix zeroing removed columns*/
931: if (aij->compressedrow.use) {
932: m = aij->compressedrow.nrows;
933: ii = aij->compressedrow.i;
934: ridx = aij->compressedrow.rindex;
935: for (i=0; i<m; i++) {
936: n = ii[i+1] - ii[i];
937: aj = aij->j + ii[i];
938: aa = aij->a + ii[i];
940: for (j=0; j<n; j++) {
941: if (PetscAbsScalar(mask[*aj])) {
942: if (b) bb[*ridx] -= *aa*xx[*aj];
943: *aa = 0.0;
944: }
945: aa++;
946: aj++;
947: }
948: ridx++;
949: }
950: } else { /* do not use compressed row format */
951: m = l->B->rmap->n;
952: for (i=0; i<m; i++) {
953: n = ii[i+1] - ii[i];
954: aj = aij->j + ii[i];
955: aa = aij->a + ii[i];
956: for (j=0; j<n; j++) {
957: if (PetscAbsScalar(mask[*aj])) {
958: if (b) bb[i] -= *aa*xx[*aj];
959: *aa = 0.0;
960: }
961: aa++;
962: aj++;
963: }
964: }
965: }
966: if (x) {
967: VecRestoreArray(b,&bb);
968: VecRestoreArrayRead(l->lvec,&xx);
969: }
970: VecRestoreArray(lmask,&mask);
971: VecDestroy(&lmask);
972: PetscFree(lrows);
974: /* only change matrix nonzero state if pattern was allowed to be changed */
975: if (!((Mat_SeqAIJ*)(l->A->data))->keepnonzeropattern) {
976: PetscObjectState state = l->A->nonzerostate + l->B->nonzerostate;
977: MPI_Allreduce(&state,&A->nonzerostate,1,MPIU_INT64,MPI_SUM,PetscObjectComm((PetscObject)A));
978: }
979: return(0);
980: }
984: PetscErrorCode MatMult_MPIAIJ(Mat A,Vec xx,Vec yy)
985: {
986: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
988: PetscInt nt;
991: VecGetLocalSize(xx,&nt);
992: if (nt != A->cmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible partition of A (%D) and xx (%D)",A->cmap->n,nt);
993: VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
994: (*a->A->ops->mult)(a->A,xx,yy);
995: VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
996: (*a->B->ops->multadd)(a->B,a->lvec,yy,yy);
997: return(0);
998: }
1002: PetscErrorCode MatMultDiagonalBlock_MPIAIJ(Mat A,Vec bb,Vec xx)
1003: {
1004: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
1008: MatMultDiagonalBlock(a->A,bb,xx);
1009: return(0);
1010: }
1014: PetscErrorCode MatMultAdd_MPIAIJ(Mat A,Vec xx,Vec yy,Vec zz)
1015: {
1016: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
1020: VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
1021: (*a->A->ops->multadd)(a->A,xx,yy,zz);
1022: VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
1023: (*a->B->ops->multadd)(a->B,a->lvec,zz,zz);
1024: return(0);
1025: }
1029: PetscErrorCode MatMultTranspose_MPIAIJ(Mat A,Vec xx,Vec yy)
1030: {
1031: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
1033: PetscBool merged;
1036: VecScatterGetMerged(a->Mvctx,&merged);
1037: /* do nondiagonal part */
1038: (*a->B->ops->multtranspose)(a->B,xx,a->lvec);
1039: if (!merged) {
1040: /* send it on its way */
1041: VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
1042: /* do local part */
1043: (*a->A->ops->multtranspose)(a->A,xx,yy);
1044: /* receive remote parts: note this assumes the values are not actually */
1045: /* added in yy until the next line, */
1046: VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
1047: } else {
1048: /* do local part */
1049: (*a->A->ops->multtranspose)(a->A,xx,yy);
1050: /* send it on its way */
1051: VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
1052: /* values actually were received in the Begin() but we need to call this nop */
1053: VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
1054: }
1055: return(0);
1056: }
1060: PetscErrorCode MatIsTranspose_MPIAIJ(Mat Amat,Mat Bmat,PetscReal tol,PetscBool *f)
1061: {
1062: MPI_Comm comm;
1063: Mat_MPIAIJ *Aij = (Mat_MPIAIJ*) Amat->data, *Bij;
1064: Mat Adia = Aij->A, Bdia, Aoff,Boff,*Aoffs,*Boffs;
1065: IS Me,Notme;
1067: PetscInt M,N,first,last,*notme,i;
1068: PetscMPIInt size;
1071: /* Easy test: symmetric diagonal block */
1072: Bij = (Mat_MPIAIJ*) Bmat->data; Bdia = Bij->A;
1073: MatIsTranspose(Adia,Bdia,tol,f);
1074: if (!*f) return(0);
1075: PetscObjectGetComm((PetscObject)Amat,&comm);
1076: MPI_Comm_size(comm,&size);
1077: if (size == 1) return(0);
1079: /* Hard test: off-diagonal block. This takes a MatGetSubMatrix. */
1080: MatGetSize(Amat,&M,&N);
1081: MatGetOwnershipRange(Amat,&first,&last);
1082: PetscMalloc1(N-last+first,¬me);
1083: for (i=0; i<first; i++) notme[i] = i;
1084: for (i=last; i<M; i++) notme[i-last+first] = i;
1085: ISCreateGeneral(MPI_COMM_SELF,N-last+first,notme,PETSC_COPY_VALUES,&Notme);
1086: ISCreateStride(MPI_COMM_SELF,last-first,first,1,&Me);
1087: MatGetSubMatrices(Amat,1,&Me,&Notme,MAT_INITIAL_MATRIX,&Aoffs);
1088: Aoff = Aoffs[0];
1089: MatGetSubMatrices(Bmat,1,&Notme,&Me,MAT_INITIAL_MATRIX,&Boffs);
1090: Boff = Boffs[0];
1091: MatIsTranspose(Aoff,Boff,tol,f);
1092: MatDestroyMatrices(1,&Aoffs);
1093: MatDestroyMatrices(1,&Boffs);
1094: ISDestroy(&Me);
1095: ISDestroy(&Notme);
1096: PetscFree(notme);
1097: return(0);
1098: }
1102: PetscErrorCode MatMultTransposeAdd_MPIAIJ(Mat A,Vec xx,Vec yy,Vec zz)
1103: {
1104: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
1108: /* do nondiagonal part */
1109: (*a->B->ops->multtranspose)(a->B,xx,a->lvec);
1110: /* send it on its way */
1111: VecScatterBegin(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);
1112: /* do local part */
1113: (*a->A->ops->multtransposeadd)(a->A,xx,yy,zz);
1114: /* receive remote parts */
1115: VecScatterEnd(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);
1116: return(0);
1117: }
1119: /*
1120: This only works correctly for square matrices where the subblock A->A is the
1121: diagonal block
1122: */
1125: PetscErrorCode MatGetDiagonal_MPIAIJ(Mat A,Vec v)
1126: {
1128: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
1131: if (A->rmap->N != A->cmap->N) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Supports only square matrix where A->A is diag block");
1132: if (A->rmap->rstart != A->cmap->rstart || A->rmap->rend != A->cmap->rend) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"row partition must equal col partition");
1133: MatGetDiagonal(a->A,v);
1134: return(0);
1135: }
1139: PetscErrorCode MatScale_MPIAIJ(Mat A,PetscScalar aa)
1140: {
1141: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
1145: MatScale(a->A,aa);
1146: MatScale(a->B,aa);
1147: return(0);
1148: }
1152: PetscErrorCode MatDestroy_MPIAIJ(Mat mat)
1153: {
1154: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
1158: #if defined(PETSC_USE_LOG)
1159: PetscLogObjectState((PetscObject)mat,"Rows=%D, Cols=%D",mat->rmap->N,mat->cmap->N);
1160: #endif
1161: MatStashDestroy_Private(&mat->stash);
1162: VecDestroy(&aij->diag);
1163: MatDestroy(&aij->A);
1164: MatDestroy(&aij->B);
1165: #if defined(PETSC_USE_CTABLE)
1166: PetscTableDestroy(&aij->colmap);
1167: #else
1168: PetscFree(aij->colmap);
1169: #endif
1170: PetscFree(aij->garray);
1171: VecDestroy(&aij->lvec);
1172: VecScatterDestroy(&aij->Mvctx);
1173: PetscFree2(aij->rowvalues,aij->rowindices);
1174: PetscFree(aij->ld);
1175: PetscFree(mat->data);
1177: PetscObjectChangeTypeName((PetscObject)mat,0);
1178: PetscObjectComposeFunction((PetscObject)mat,"MatStoreValues_C",NULL);
1179: PetscObjectComposeFunction((PetscObject)mat,"MatRetrieveValues_C",NULL);
1180: PetscObjectComposeFunction((PetscObject)mat,"MatGetDiagonalBlock_C",NULL);
1181: PetscObjectComposeFunction((PetscObject)mat,"MatIsTranspose_C",NULL);
1182: PetscObjectComposeFunction((PetscObject)mat,"MatMPIAIJSetPreallocation_C",NULL);
1183: PetscObjectComposeFunction((PetscObject)mat,"MatMPIAIJSetPreallocationCSR_C",NULL);
1184: PetscObjectComposeFunction((PetscObject)mat,"MatDiagonalScaleLocal_C",NULL);
1185: PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_mpisbaij_C",NULL);
1186: #if defined(PETSC_HAVE_ELEMENTAL)
1187: PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_elemental_C",NULL);
1188: #endif
1189: return(0);
1190: }
1194: PetscErrorCode MatView_MPIAIJ_Binary(Mat mat,PetscViewer viewer)
1195: {
1196: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
1197: Mat_SeqAIJ *A = (Mat_SeqAIJ*)aij->A->data;
1198: Mat_SeqAIJ *B = (Mat_SeqAIJ*)aij->B->data;
1200: PetscMPIInt rank,size,tag = ((PetscObject)viewer)->tag;
1201: int fd;
1202: PetscInt nz,header[4],*row_lengths,*range=0,rlen,i;
1203: PetscInt nzmax,*column_indices,j,k,col,*garray = aij->garray,cnt,cstart = mat->cmap->rstart,rnz = 0;
1204: PetscScalar *column_values;
1205: PetscInt message_count,flowcontrolcount;
1206: FILE *file;
1209: MPI_Comm_rank(PetscObjectComm((PetscObject)mat),&rank);
1210: MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);
1211: nz = A->nz + B->nz;
1212: PetscViewerBinaryGetDescriptor(viewer,&fd);
1213: if (!rank) {
1214: header[0] = MAT_FILE_CLASSID;
1215: header[1] = mat->rmap->N;
1216: header[2] = mat->cmap->N;
1218: MPI_Reduce(&nz,&header[3],1,MPIU_INT,MPI_SUM,0,PetscObjectComm((PetscObject)mat));
1219: PetscBinaryWrite(fd,header,4,PETSC_INT,PETSC_TRUE);
1220: /* get largest number of rows any processor has */
1221: rlen = mat->rmap->n;
1222: range = mat->rmap->range;
1223: for (i=1; i<size; i++) rlen = PetscMax(rlen,range[i+1] - range[i]);
1224: } else {
1225: MPI_Reduce(&nz,0,1,MPIU_INT,MPI_SUM,0,PetscObjectComm((PetscObject)mat));
1226: rlen = mat->rmap->n;
1227: }
1229: /* load up the local row counts */
1230: PetscMalloc1(rlen+1,&row_lengths);
1231: for (i=0; i<mat->rmap->n; i++) row_lengths[i] = A->i[i+1] - A->i[i] + B->i[i+1] - B->i[i];
1233: /* store the row lengths to the file */
1234: PetscViewerFlowControlStart(viewer,&message_count,&flowcontrolcount);
1235: if (!rank) {
1236: PetscBinaryWrite(fd,row_lengths,mat->rmap->n,PETSC_INT,PETSC_TRUE);
1237: for (i=1; i<size; i++) {
1238: PetscViewerFlowControlStepMaster(viewer,i,&message_count,flowcontrolcount);
1239: rlen = range[i+1] - range[i];
1240: MPIULong_Recv(row_lengths,rlen,MPIU_INT,i,tag,PetscObjectComm((PetscObject)mat));
1241: PetscBinaryWrite(fd,row_lengths,rlen,PETSC_INT,PETSC_TRUE);
1242: }
1243: PetscViewerFlowControlEndMaster(viewer,&message_count);
1244: } else {
1245: PetscViewerFlowControlStepWorker(viewer,rank,&message_count);
1246: MPIULong_Send(row_lengths,mat->rmap->n,MPIU_INT,0,tag,PetscObjectComm((PetscObject)mat));
1247: PetscViewerFlowControlEndWorker(viewer,&message_count);
1248: }
1249: PetscFree(row_lengths);
1251: /* load up the local column indices */
1252: nzmax = nz; /* th processor needs space a largest processor needs */
1253: MPI_Reduce(&nz,&nzmax,1,MPIU_INT,MPI_MAX,0,PetscObjectComm((PetscObject)mat));
1254: PetscMalloc1(nzmax+1,&column_indices);
1255: cnt = 0;
1256: for (i=0; i<mat->rmap->n; i++) {
1257: for (j=B->i[i]; j<B->i[i+1]; j++) {
1258: if ((col = garray[B->j[j]]) > cstart) break;
1259: column_indices[cnt++] = col;
1260: }
1261: for (k=A->i[i]; k<A->i[i+1]; k++) column_indices[cnt++] = A->j[k] + cstart;
1262: for (; j<B->i[i+1]; j++) column_indices[cnt++] = garray[B->j[j]];
1263: }
1264: if (cnt != A->nz + B->nz) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_LIB,"Internal PETSc error: cnt = %D nz = %D",cnt,A->nz+B->nz);
1266: /* store the column indices to the file */
1267: PetscViewerFlowControlStart(viewer,&message_count,&flowcontrolcount);
1268: if (!rank) {
1269: MPI_Status status;
1270: PetscBinaryWrite(fd,column_indices,nz,PETSC_INT,PETSC_TRUE);
1271: for (i=1; i<size; i++) {
1272: PetscViewerFlowControlStepMaster(viewer,i,&message_count,flowcontrolcount);
1273: MPI_Recv(&rnz,1,MPIU_INT,i,tag,PetscObjectComm((PetscObject)mat),&status);
1274: if (rnz > nzmax) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_LIB,"Internal PETSc error: nz = %D nzmax = %D",nz,nzmax);
1275: MPIULong_Recv(column_indices,rnz,MPIU_INT,i,tag,PetscObjectComm((PetscObject)mat));
1276: PetscBinaryWrite(fd,column_indices,rnz,PETSC_INT,PETSC_TRUE);
1277: }
1278: PetscViewerFlowControlEndMaster(viewer,&message_count);
1279: } else {
1280: PetscViewerFlowControlStepWorker(viewer,rank,&message_count);
1281: MPI_Send(&nz,1,MPIU_INT,0,tag,PetscObjectComm((PetscObject)mat));
1282: MPIULong_Send(column_indices,nz,MPIU_INT,0,tag,PetscObjectComm((PetscObject)mat));
1283: PetscViewerFlowControlEndWorker(viewer,&message_count);
1284: }
1285: PetscFree(column_indices);
1287: /* load up the local column values */
1288: PetscMalloc1(nzmax+1,&column_values);
1289: cnt = 0;
1290: for (i=0; i<mat->rmap->n; i++) {
1291: for (j=B->i[i]; j<B->i[i+1]; j++) {
1292: if (garray[B->j[j]] > cstart) break;
1293: column_values[cnt++] = B->a[j];
1294: }
1295: for (k=A->i[i]; k<A->i[i+1]; k++) column_values[cnt++] = A->a[k];
1296: for (; j<B->i[i+1]; j++) column_values[cnt++] = B->a[j];
1297: }
1298: if (cnt != A->nz + B->nz) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Internal PETSc error: cnt = %D nz = %D",cnt,A->nz+B->nz);
1300: /* store the column values to the file */
1301: PetscViewerFlowControlStart(viewer,&message_count,&flowcontrolcount);
1302: if (!rank) {
1303: MPI_Status status;
1304: PetscBinaryWrite(fd,column_values,nz,PETSC_SCALAR,PETSC_TRUE);
1305: for (i=1; i<size; i++) {
1306: PetscViewerFlowControlStepMaster(viewer,i,&message_count,flowcontrolcount);
1307: MPI_Recv(&rnz,1,MPIU_INT,i,tag,PetscObjectComm((PetscObject)mat),&status);
1308: if (rnz > nzmax) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_LIB,"Internal PETSc error: nz = %D nzmax = %D",nz,nzmax);
1309: MPIULong_Recv(column_values,rnz,MPIU_SCALAR,i,tag,PetscObjectComm((PetscObject)mat));
1310: PetscBinaryWrite(fd,column_values,rnz,PETSC_SCALAR,PETSC_TRUE);
1311: }
1312: PetscViewerFlowControlEndMaster(viewer,&message_count);
1313: } else {
1314: PetscViewerFlowControlStepWorker(viewer,rank,&message_count);
1315: MPI_Send(&nz,1,MPIU_INT,0,tag,PetscObjectComm((PetscObject)mat));
1316: MPIULong_Send(column_values,nz,MPIU_SCALAR,0,tag,PetscObjectComm((PetscObject)mat));
1317: PetscViewerFlowControlEndWorker(viewer,&message_count);
1318: }
1319: PetscFree(column_values);
1321: PetscViewerBinaryGetInfoPointer(viewer,&file);
1322: if (file) fprintf(file,"-matload_block_size %d\n",(int)PetscAbs(mat->rmap->bs));
1323: return(0);
1324: }
1326: #include <petscdraw.h>
1329: PetscErrorCode MatView_MPIAIJ_ASCIIorDraworSocket(Mat mat,PetscViewer viewer)
1330: {
1331: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
1332: PetscErrorCode ierr;
1333: PetscMPIInt rank = aij->rank,size = aij->size;
1334: PetscBool isdraw,iascii,isbinary;
1335: PetscViewer sviewer;
1336: PetscViewerFormat format;
1339: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);
1340: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
1341: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
1342: if (iascii) {
1343: PetscViewerGetFormat(viewer,&format);
1344: if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
1345: MatInfo info;
1346: PetscBool inodes;
1348: MPI_Comm_rank(PetscObjectComm((PetscObject)mat),&rank);
1349: MatGetInfo(mat,MAT_LOCAL,&info);
1350: MatInodeGetInodeSizes(aij->A,NULL,(PetscInt**)&inodes,NULL);
1351: PetscViewerASCIISynchronizedAllow(viewer,PETSC_TRUE);
1352: if (!inodes) {
1353: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %D nz %D nz alloced %D mem %D, not using I-node routines\n",
1354: rank,mat->rmap->n,(PetscInt)info.nz_used,(PetscInt)info.nz_allocated,(PetscInt)info.memory);
1355: } else {
1356: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %D nz %D nz alloced %D mem %D, using I-node routines\n",
1357: rank,mat->rmap->n,(PetscInt)info.nz_used,(PetscInt)info.nz_allocated,(PetscInt)info.memory);
1358: }
1359: MatGetInfo(aij->A,MAT_LOCAL,&info);
1360: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] on-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used);
1361: MatGetInfo(aij->B,MAT_LOCAL,&info);
1362: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] off-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used);
1363: PetscViewerFlush(viewer);
1364: PetscViewerASCIISynchronizedAllow(viewer,PETSC_FALSE);
1365: PetscViewerASCIIPrintf(viewer,"Information on VecScatter used in matrix-vector product: \n");
1366: VecScatterView(aij->Mvctx,viewer);
1367: return(0);
1368: } else if (format == PETSC_VIEWER_ASCII_INFO) {
1369: PetscInt inodecount,inodelimit,*inodes;
1370: MatInodeGetInodeSizes(aij->A,&inodecount,&inodes,&inodelimit);
1371: if (inodes) {
1372: PetscViewerASCIIPrintf(viewer,"using I-node (on process 0) routines: found %D nodes, limit used is %D\n",inodecount,inodelimit);
1373: } else {
1374: PetscViewerASCIIPrintf(viewer,"not using I-node (on process 0) routines\n");
1375: }
1376: return(0);
1377: } else if (format == PETSC_VIEWER_ASCII_FACTOR_INFO) {
1378: return(0);
1379: }
1380: } else if (isbinary) {
1381: if (size == 1) {
1382: PetscObjectSetName((PetscObject)aij->A,((PetscObject)mat)->name);
1383: MatView(aij->A,viewer);
1384: } else {
1385: MatView_MPIAIJ_Binary(mat,viewer);
1386: }
1387: return(0);
1388: } else if (isdraw) {
1389: PetscDraw draw;
1390: PetscBool isnull;
1391: PetscViewerDrawGetDraw(viewer,0,&draw);
1392: PetscDrawIsNull(draw,&isnull); if (isnull) return(0);
1393: }
1395: {
1396: /* assemble the entire matrix onto first processor. */
1397: Mat A;
1398: Mat_SeqAIJ *Aloc;
1399: PetscInt M = mat->rmap->N,N = mat->cmap->N,m,*ai,*aj,row,*cols,i,*ct;
1400: MatScalar *a;
1402: MatCreate(PetscObjectComm((PetscObject)mat),&A);
1403: if (!rank) {
1404: MatSetSizes(A,M,N,M,N);
1405: } else {
1406: MatSetSizes(A,0,0,M,N);
1407: }
1408: /* This is just a temporary matrix, so explicitly using MATMPIAIJ is probably best */
1409: MatSetType(A,MATMPIAIJ);
1410: MatMPIAIJSetPreallocation(A,0,NULL,0,NULL);
1411: MatSetOption(A,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_FALSE);
1412: PetscLogObjectParent((PetscObject)mat,(PetscObject)A);
1414: /* copy over the A part */
1415: Aloc = (Mat_SeqAIJ*)aij->A->data;
1416: m = aij->A->rmap->n; ai = Aloc->i; aj = Aloc->j; a = Aloc->a;
1417: row = mat->rmap->rstart;
1418: for (i=0; i<ai[m]; i++) aj[i] += mat->cmap->rstart;
1419: for (i=0; i<m; i++) {
1420: MatSetValues(A,1,&row,ai[i+1]-ai[i],aj,a,INSERT_VALUES);
1421: row++;
1422: a += ai[i+1]-ai[i]; aj += ai[i+1]-ai[i];
1423: }
1424: aj = Aloc->j;
1425: for (i=0; i<ai[m]; i++) aj[i] -= mat->cmap->rstart;
1427: /* copy over the B part */
1428: Aloc = (Mat_SeqAIJ*)aij->B->data;
1429: m = aij->B->rmap->n; ai = Aloc->i; aj = Aloc->j; a = Aloc->a;
1430: row = mat->rmap->rstart;
1431: PetscMalloc1(ai[m]+1,&cols);
1432: ct = cols;
1433: for (i=0; i<ai[m]; i++) cols[i] = aij->garray[aj[i]];
1434: for (i=0; i<m; i++) {
1435: MatSetValues(A,1,&row,ai[i+1]-ai[i],cols,a,INSERT_VALUES);
1436: row++;
1437: a += ai[i+1]-ai[i]; cols += ai[i+1]-ai[i];
1438: }
1439: PetscFree(ct);
1440: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
1441: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
1442: /*
1443: Everyone has to call to draw the matrix since the graphics waits are
1444: synchronized across all processors that share the PetscDraw object
1445: */
1446: PetscViewerGetSingleton(viewer,&sviewer);
1447: if (!rank) {
1448: PetscObjectSetName((PetscObject)((Mat_MPIAIJ*)(A->data))->A,((PetscObject)mat)->name);
1449: MatView_SeqAIJ(((Mat_MPIAIJ*)(A->data))->A,sviewer);
1450: }
1451: PetscViewerRestoreSingleton(viewer,&sviewer);
1452: MatDestroy(&A);
1453: }
1454: return(0);
1455: }
1459: PetscErrorCode MatView_MPIAIJ(Mat mat,PetscViewer viewer)
1460: {
1462: PetscBool iascii,isdraw,issocket,isbinary;
1465: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
1466: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);
1467: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
1468: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERSOCKET,&issocket);
1469: if (iascii || isdraw || isbinary || issocket) {
1470: MatView_MPIAIJ_ASCIIorDraworSocket(mat,viewer);
1471: }
1472: return(0);
1473: }
1477: PetscErrorCode MatSOR_MPIAIJ(Mat matin,Vec bb,PetscReal omega,MatSORType flag,PetscReal fshift,PetscInt its,PetscInt lits,Vec xx)
1478: {
1479: Mat_MPIAIJ *mat = (Mat_MPIAIJ*)matin->data;
1481: Vec bb1 = 0;
1482: PetscBool hasop;
1485: if (flag == SOR_APPLY_UPPER) {
1486: (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1487: return(0);
1488: }
1490: if (its > 1 || ~flag & SOR_ZERO_INITIAL_GUESS || flag & SOR_EISENSTAT) {
1491: VecDuplicate(bb,&bb1);
1492: }
1494: if ((flag & SOR_LOCAL_SYMMETRIC_SWEEP) == SOR_LOCAL_SYMMETRIC_SWEEP) {
1495: if (flag & SOR_ZERO_INITIAL_GUESS) {
1496: (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1497: its--;
1498: }
1500: while (its--) {
1501: VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1502: VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1504: /* update rhs: bb1 = bb - B*x */
1505: VecScale(mat->lvec,-1.0);
1506: (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);
1508: /* local sweep */
1509: (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_SYMMETRIC_SWEEP,fshift,lits,1,xx);
1510: }
1511: } else if (flag & SOR_LOCAL_FORWARD_SWEEP) {
1512: if (flag & SOR_ZERO_INITIAL_GUESS) {
1513: (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1514: its--;
1515: }
1516: while (its--) {
1517: VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1518: VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1520: /* update rhs: bb1 = bb - B*x */
1521: VecScale(mat->lvec,-1.0);
1522: (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);
1524: /* local sweep */
1525: (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_FORWARD_SWEEP,fshift,lits,1,xx);
1526: }
1527: } else if (flag & SOR_LOCAL_BACKWARD_SWEEP) {
1528: if (flag & SOR_ZERO_INITIAL_GUESS) {
1529: (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1530: its--;
1531: }
1532: while (its--) {
1533: VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1534: VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1536: /* update rhs: bb1 = bb - B*x */
1537: VecScale(mat->lvec,-1.0);
1538: (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);
1540: /* local sweep */
1541: (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_BACKWARD_SWEEP,fshift,lits,1,xx);
1542: }
1543: } else if (flag & SOR_EISENSTAT) {
1544: Vec xx1;
1546: VecDuplicate(bb,&xx1);
1547: (*mat->A->ops->sor)(mat->A,bb,omega,(MatSORType)(SOR_ZERO_INITIAL_GUESS | SOR_LOCAL_BACKWARD_SWEEP),fshift,lits,1,xx);
1549: VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1550: VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1551: if (!mat->diag) {
1552: MatCreateVecs(matin,&mat->diag,NULL);
1553: MatGetDiagonal(matin,mat->diag);
1554: }
1555: MatHasOperation(matin,MATOP_MULT_DIAGONAL_BLOCK,&hasop);
1556: if (hasop) {
1557: MatMultDiagonalBlock(matin,xx,bb1);
1558: } else {
1559: VecPointwiseMult(bb1,mat->diag,xx);
1560: }
1561: VecAYPX(bb1,(omega-2.0)/omega,bb);
1563: MatMultAdd(mat->B,mat->lvec,bb1,bb1);
1565: /* local sweep */
1566: (*mat->A->ops->sor)(mat->A,bb1,omega,(MatSORType)(SOR_ZERO_INITIAL_GUESS | SOR_LOCAL_FORWARD_SWEEP),fshift,lits,1,xx1);
1567: VecAXPY(xx,1.0,xx1);
1568: VecDestroy(&xx1);
1569: } else SETERRQ(PetscObjectComm((PetscObject)matin),PETSC_ERR_SUP,"Parallel SOR not supported");
1571: VecDestroy(&bb1);
1572: return(0);
1573: }
1577: PetscErrorCode MatPermute_MPIAIJ(Mat A,IS rowp,IS colp,Mat *B)
1578: {
1579: Mat aA,aB,Aperm;
1580: const PetscInt *rwant,*cwant,*gcols,*ai,*bi,*aj,*bj;
1581: PetscScalar *aa,*ba;
1582: PetscInt i,j,m,n,ng,anz,bnz,*dnnz,*onnz,*tdnnz,*tonnz,*rdest,*cdest,*work,*gcdest;
1583: PetscSF rowsf,sf;
1584: IS parcolp = NULL;
1585: PetscBool done;
1589: MatGetLocalSize(A,&m,&n);
1590: ISGetIndices(rowp,&rwant);
1591: ISGetIndices(colp,&cwant);
1592: PetscMalloc3(PetscMax(m,n),&work,m,&rdest,n,&cdest);
1594: /* Invert row permutation to find out where my rows should go */
1595: PetscSFCreate(PetscObjectComm((PetscObject)A),&rowsf);
1596: PetscSFSetGraphLayout(rowsf,A->rmap,A->rmap->n,NULL,PETSC_OWN_POINTER,rwant);
1597: PetscSFSetFromOptions(rowsf);
1598: for (i=0; i<m; i++) work[i] = A->rmap->rstart + i;
1599: PetscSFReduceBegin(rowsf,MPIU_INT,work,rdest,MPIU_REPLACE);
1600: PetscSFReduceEnd(rowsf,MPIU_INT,work,rdest,MPIU_REPLACE);
1602: /* Invert column permutation to find out where my columns should go */
1603: PetscSFCreate(PetscObjectComm((PetscObject)A),&sf);
1604: PetscSFSetGraphLayout(sf,A->cmap,A->cmap->n,NULL,PETSC_OWN_POINTER,cwant);
1605: PetscSFSetFromOptions(sf);
1606: for (i=0; i<n; i++) work[i] = A->cmap->rstart + i;
1607: PetscSFReduceBegin(sf,MPIU_INT,work,cdest,MPIU_REPLACE);
1608: PetscSFReduceEnd(sf,MPIU_INT,work,cdest,MPIU_REPLACE);
1609: PetscSFDestroy(&sf);
1611: ISRestoreIndices(rowp,&rwant);
1612: ISRestoreIndices(colp,&cwant);
1613: MatMPIAIJGetSeqAIJ(A,&aA,&aB,&gcols);
1615: /* Find out where my gcols should go */
1616: MatGetSize(aB,NULL,&ng);
1617: PetscMalloc1(ng,&gcdest);
1618: PetscSFCreate(PetscObjectComm((PetscObject)A),&sf);
1619: PetscSFSetGraphLayout(sf,A->cmap,ng,NULL,PETSC_OWN_POINTER,gcols);
1620: PetscSFSetFromOptions(sf);
1621: PetscSFBcastBegin(sf,MPIU_INT,cdest,gcdest);
1622: PetscSFBcastEnd(sf,MPIU_INT,cdest,gcdest);
1623: PetscSFDestroy(&sf);
1625: PetscCalloc4(m,&dnnz,m,&onnz,m,&tdnnz,m,&tonnz);
1626: MatGetRowIJ(aA,0,PETSC_FALSE,PETSC_FALSE,&anz,&ai,&aj,&done);
1627: MatGetRowIJ(aB,0,PETSC_FALSE,PETSC_FALSE,&bnz,&bi,&bj,&done);
1628: for (i=0; i<m; i++) {
1629: PetscInt row = rdest[i],rowner;
1630: PetscLayoutFindOwner(A->rmap,row,&rowner);
1631: for (j=ai[i]; j<ai[i+1]; j++) {
1632: PetscInt cowner,col = cdest[aj[j]];
1633: PetscLayoutFindOwner(A->cmap,col,&cowner); /* Could build an index for the columns to eliminate this search */
1634: if (rowner == cowner) dnnz[i]++;
1635: else onnz[i]++;
1636: }
1637: for (j=bi[i]; j<bi[i+1]; j++) {
1638: PetscInt cowner,col = gcdest[bj[j]];
1639: PetscLayoutFindOwner(A->cmap,col,&cowner);
1640: if (rowner == cowner) dnnz[i]++;
1641: else onnz[i]++;
1642: }
1643: }
1644: PetscSFBcastBegin(rowsf,MPIU_INT,dnnz,tdnnz);
1645: PetscSFBcastEnd(rowsf,MPIU_INT,dnnz,tdnnz);
1646: PetscSFBcastBegin(rowsf,MPIU_INT,onnz,tonnz);
1647: PetscSFBcastEnd(rowsf,MPIU_INT,onnz,tonnz);
1648: PetscSFDestroy(&rowsf);
1650: MatCreateAIJ(PetscObjectComm((PetscObject)A),A->rmap->n,A->cmap->n,A->rmap->N,A->cmap->N,0,tdnnz,0,tonnz,&Aperm);
1651: MatSeqAIJGetArray(aA,&aa);
1652: MatSeqAIJGetArray(aB,&ba);
1653: for (i=0; i<m; i++) {
1654: PetscInt *acols = dnnz,*bcols = onnz; /* Repurpose now-unneeded arrays */
1655: PetscInt j0,rowlen;
1656: rowlen = ai[i+1] - ai[i];
1657: for (j0=j=0; j<rowlen; j0=j) { /* rowlen could be larger than number of rows m, so sum in batches */
1658: for ( ; j<PetscMin(rowlen,j0+m); j++) acols[j-j0] = cdest[aj[ai[i]+j]];
1659: MatSetValues(Aperm,1,&rdest[i],j-j0,acols,aa+ai[i]+j0,INSERT_VALUES);
1660: }
1661: rowlen = bi[i+1] - bi[i];
1662: for (j0=j=0; j<rowlen; j0=j) {
1663: for ( ; j<PetscMin(rowlen,j0+m); j++) bcols[j-j0] = gcdest[bj[bi[i]+j]];
1664: MatSetValues(Aperm,1,&rdest[i],j-j0,bcols,ba+bi[i]+j0,INSERT_VALUES);
1665: }
1666: }
1667: MatAssemblyBegin(Aperm,MAT_FINAL_ASSEMBLY);
1668: MatAssemblyEnd(Aperm,MAT_FINAL_ASSEMBLY);
1669: MatRestoreRowIJ(aA,0,PETSC_FALSE,PETSC_FALSE,&anz,&ai,&aj,&done);
1670: MatRestoreRowIJ(aB,0,PETSC_FALSE,PETSC_FALSE,&bnz,&bi,&bj,&done);
1671: MatSeqAIJRestoreArray(aA,&aa);
1672: MatSeqAIJRestoreArray(aB,&ba);
1673: PetscFree4(dnnz,onnz,tdnnz,tonnz);
1674: PetscFree3(work,rdest,cdest);
1675: PetscFree(gcdest);
1676: if (parcolp) {ISDestroy(&colp);}
1677: *B = Aperm;
1678: return(0);
1679: }
1683: PetscErrorCode MatGetInfo_MPIAIJ(Mat matin,MatInfoType flag,MatInfo *info)
1684: {
1685: Mat_MPIAIJ *mat = (Mat_MPIAIJ*)matin->data;
1686: Mat A = mat->A,B = mat->B;
1688: PetscReal isend[5],irecv[5];
1691: info->block_size = 1.0;
1692: MatGetInfo(A,MAT_LOCAL,info);
1694: isend[0] = info->nz_used; isend[1] = info->nz_allocated; isend[2] = info->nz_unneeded;
1695: isend[3] = info->memory; isend[4] = info->mallocs;
1697: MatGetInfo(B,MAT_LOCAL,info);
1699: isend[0] += info->nz_used; isend[1] += info->nz_allocated; isend[2] += info->nz_unneeded;
1700: isend[3] += info->memory; isend[4] += info->mallocs;
1701: if (flag == MAT_LOCAL) {
1702: info->nz_used = isend[0];
1703: info->nz_allocated = isend[1];
1704: info->nz_unneeded = isend[2];
1705: info->memory = isend[3];
1706: info->mallocs = isend[4];
1707: } else if (flag == MAT_GLOBAL_MAX) {
1708: MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)matin));
1710: info->nz_used = irecv[0];
1711: info->nz_allocated = irecv[1];
1712: info->nz_unneeded = irecv[2];
1713: info->memory = irecv[3];
1714: info->mallocs = irecv[4];
1715: } else if (flag == MAT_GLOBAL_SUM) {
1716: MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)matin));
1718: info->nz_used = irecv[0];
1719: info->nz_allocated = irecv[1];
1720: info->nz_unneeded = irecv[2];
1721: info->memory = irecv[3];
1722: info->mallocs = irecv[4];
1723: }
1724: info->fill_ratio_given = 0; /* no parallel LU/ILU/Cholesky */
1725: info->fill_ratio_needed = 0;
1726: info->factor_mallocs = 0;
1727: return(0);
1728: }
1732: PetscErrorCode MatSetOption_MPIAIJ(Mat A,MatOption op,PetscBool flg)
1733: {
1734: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
1738: switch (op) {
1739: case MAT_NEW_NONZERO_LOCATIONS:
1740: case MAT_NEW_NONZERO_ALLOCATION_ERR:
1741: case MAT_UNUSED_NONZERO_LOCATION_ERR:
1742: case MAT_KEEP_NONZERO_PATTERN:
1743: case MAT_NEW_NONZERO_LOCATION_ERR:
1744: case MAT_USE_INODES:
1745: case MAT_IGNORE_ZERO_ENTRIES:
1746: MatCheckPreallocated(A,1);
1747: MatSetOption(a->A,op,flg);
1748: MatSetOption(a->B,op,flg);
1749: break;
1750: case MAT_ROW_ORIENTED:
1751: a->roworiented = flg;
1753: MatSetOption(a->A,op,flg);
1754: MatSetOption(a->B,op,flg);
1755: break;
1756: case MAT_NEW_DIAGONALS:
1757: PetscInfo1(A,"Option %s ignored\n",MatOptions[op]);
1758: break;
1759: case MAT_IGNORE_OFF_PROC_ENTRIES:
1760: a->donotstash = flg;
1761: break;
1762: case MAT_SPD:
1763: A->spd_set = PETSC_TRUE;
1764: A->spd = flg;
1765: if (flg) {
1766: A->symmetric = PETSC_TRUE;
1767: A->structurally_symmetric = PETSC_TRUE;
1768: A->symmetric_set = PETSC_TRUE;
1769: A->structurally_symmetric_set = PETSC_TRUE;
1770: }
1771: break;
1772: case MAT_SYMMETRIC:
1773: MatSetOption(a->A,op,flg);
1774: break;
1775: case MAT_STRUCTURALLY_SYMMETRIC:
1776: MatSetOption(a->A,op,flg);
1777: break;
1778: case MAT_HERMITIAN:
1779: MatSetOption(a->A,op,flg);
1780: break;
1781: case MAT_SYMMETRY_ETERNAL:
1782: MatSetOption(a->A,op,flg);
1783: break;
1784: default:
1785: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"unknown option %d",op);
1786: }
1787: return(0);
1788: }
1792: PetscErrorCode MatGetRow_MPIAIJ(Mat matin,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
1793: {
1794: Mat_MPIAIJ *mat = (Mat_MPIAIJ*)matin->data;
1795: PetscScalar *vworkA,*vworkB,**pvA,**pvB,*v_p;
1797: PetscInt i,*cworkA,*cworkB,**pcA,**pcB,cstart = matin->cmap->rstart;
1798: PetscInt nztot,nzA,nzB,lrow,rstart = matin->rmap->rstart,rend = matin->rmap->rend;
1799: PetscInt *cmap,*idx_p;
1802: if (mat->getrowactive) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Already active");
1803: mat->getrowactive = PETSC_TRUE;
1805: if (!mat->rowvalues && (idx || v)) {
1806: /*
1807: allocate enough space to hold information from the longest row.
1808: */
1809: Mat_SeqAIJ *Aa = (Mat_SeqAIJ*)mat->A->data,*Ba = (Mat_SeqAIJ*)mat->B->data;
1810: PetscInt max = 1,tmp;
1811: for (i=0; i<matin->rmap->n; i++) {
1812: tmp = Aa->i[i+1] - Aa->i[i] + Ba->i[i+1] - Ba->i[i];
1813: if (max < tmp) max = tmp;
1814: }
1815: PetscMalloc2(max,&mat->rowvalues,max,&mat->rowindices);
1816: }
1818: if (row < rstart || row >= rend) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Only local rows");
1819: lrow = row - rstart;
1821: pvA = &vworkA; pcA = &cworkA; pvB = &vworkB; pcB = &cworkB;
1822: if (!v) {pvA = 0; pvB = 0;}
1823: if (!idx) {pcA = 0; if (!v) pcB = 0;}
1824: (*mat->A->ops->getrow)(mat->A,lrow,&nzA,pcA,pvA);
1825: (*mat->B->ops->getrow)(mat->B,lrow,&nzB,pcB,pvB);
1826: nztot = nzA + nzB;
1828: cmap = mat->garray;
1829: if (v || idx) {
1830: if (nztot) {
1831: /* Sort by increasing column numbers, assuming A and B already sorted */
1832: PetscInt imark = -1;
1833: if (v) {
1834: *v = v_p = mat->rowvalues;
1835: for (i=0; i<nzB; i++) {
1836: if (cmap[cworkB[i]] < cstart) v_p[i] = vworkB[i];
1837: else break;
1838: }
1839: imark = i;
1840: for (i=0; i<nzA; i++) v_p[imark+i] = vworkA[i];
1841: for (i=imark; i<nzB; i++) v_p[nzA+i] = vworkB[i];
1842: }
1843: if (idx) {
1844: *idx = idx_p = mat->rowindices;
1845: if (imark > -1) {
1846: for (i=0; i<imark; i++) {
1847: idx_p[i] = cmap[cworkB[i]];
1848: }
1849: } else {
1850: for (i=0; i<nzB; i++) {
1851: if (cmap[cworkB[i]] < cstart) idx_p[i] = cmap[cworkB[i]];
1852: else break;
1853: }
1854: imark = i;
1855: }
1856: for (i=0; i<nzA; i++) idx_p[imark+i] = cstart + cworkA[i];
1857: for (i=imark; i<nzB; i++) idx_p[nzA+i] = cmap[cworkB[i]];
1858: }
1859: } else {
1860: if (idx) *idx = 0;
1861: if (v) *v = 0;
1862: }
1863: }
1864: *nz = nztot;
1865: (*mat->A->ops->restorerow)(mat->A,lrow,&nzA,pcA,pvA);
1866: (*mat->B->ops->restorerow)(mat->B,lrow,&nzB,pcB,pvB);
1867: return(0);
1868: }
1872: PetscErrorCode MatRestoreRow_MPIAIJ(Mat mat,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
1873: {
1874: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
1877: if (!aij->getrowactive) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"MatGetRow() must be called first");
1878: aij->getrowactive = PETSC_FALSE;
1879: return(0);
1880: }
1884: PetscErrorCode MatNorm_MPIAIJ(Mat mat,NormType type,PetscReal *norm)
1885: {
1886: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
1887: Mat_SeqAIJ *amat = (Mat_SeqAIJ*)aij->A->data,*bmat = (Mat_SeqAIJ*)aij->B->data;
1889: PetscInt i,j,cstart = mat->cmap->rstart;
1890: PetscReal sum = 0.0;
1891: MatScalar *v;
1894: if (aij->size == 1) {
1895: MatNorm(aij->A,type,norm);
1896: } else {
1897: if (type == NORM_FROBENIUS) {
1898: v = amat->a;
1899: for (i=0; i<amat->nz; i++) {
1900: sum += PetscRealPart(PetscConj(*v)*(*v)); v++;
1901: }
1902: v = bmat->a;
1903: for (i=0; i<bmat->nz; i++) {
1904: sum += PetscRealPart(PetscConj(*v)*(*v)); v++;
1905: }
1906: MPI_Allreduce(&sum,norm,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)mat));
1907: *norm = PetscSqrtReal(*norm);
1908: } else if (type == NORM_1) { /* max column norm */
1909: PetscReal *tmp,*tmp2;
1910: PetscInt *jj,*garray = aij->garray;
1911: PetscCalloc1(mat->cmap->N+1,&tmp);
1912: PetscMalloc1(mat->cmap->N+1,&tmp2);
1913: *norm = 0.0;
1914: v = amat->a; jj = amat->j;
1915: for (j=0; j<amat->nz; j++) {
1916: tmp[cstart + *jj++] += PetscAbsScalar(*v); v++;
1917: }
1918: v = bmat->a; jj = bmat->j;
1919: for (j=0; j<bmat->nz; j++) {
1920: tmp[garray[*jj++]] += PetscAbsScalar(*v); v++;
1921: }
1922: MPI_Allreduce(tmp,tmp2,mat->cmap->N,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)mat));
1923: for (j=0; j<mat->cmap->N; j++) {
1924: if (tmp2[j] > *norm) *norm = tmp2[j];
1925: }
1926: PetscFree(tmp);
1927: PetscFree(tmp2);
1928: } else if (type == NORM_INFINITY) { /* max row norm */
1929: PetscReal ntemp = 0.0;
1930: for (j=0; j<aij->A->rmap->n; j++) {
1931: v = amat->a + amat->i[j];
1932: sum = 0.0;
1933: for (i=0; i<amat->i[j+1]-amat->i[j]; i++) {
1934: sum += PetscAbsScalar(*v); v++;
1935: }
1936: v = bmat->a + bmat->i[j];
1937: for (i=0; i<bmat->i[j+1]-bmat->i[j]; i++) {
1938: sum += PetscAbsScalar(*v); v++;
1939: }
1940: if (sum > ntemp) ntemp = sum;
1941: }
1942: MPI_Allreduce(&ntemp,norm,1,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)mat));
1943: } else SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"No support for two norm");
1944: }
1945: return(0);
1946: }
1950: PetscErrorCode MatTranspose_MPIAIJ(Mat A,MatReuse reuse,Mat *matout)
1951: {
1952: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
1953: Mat_SeqAIJ *Aloc=(Mat_SeqAIJ*)a->A->data,*Bloc=(Mat_SeqAIJ*)a->B->data;
1955: PetscInt M = A->rmap->N,N = A->cmap->N,ma,na,mb,nb,*ai,*aj,*bi,*bj,row,*cols,*cols_tmp,i;
1956: PetscInt cstart = A->cmap->rstart,ncol;
1957: Mat B;
1958: MatScalar *array;
1961: if (reuse == MAT_REUSE_MATRIX && A == *matout && M != N) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Square matrix only for in-place");
1963: ma = A->rmap->n; na = A->cmap->n; mb = a->B->rmap->n; nb = a->B->cmap->n;
1964: ai = Aloc->i; aj = Aloc->j;
1965: bi = Bloc->i; bj = Bloc->j;
1966: if (reuse == MAT_INITIAL_MATRIX || *matout == A) {
1967: PetscInt *d_nnz,*g_nnz,*o_nnz;
1968: PetscSFNode *oloc;
1969: PETSC_UNUSED PetscSF sf;
1971: PetscMalloc4(na,&d_nnz,na,&o_nnz,nb,&g_nnz,nb,&oloc);
1972: /* compute d_nnz for preallocation */
1973: PetscMemzero(d_nnz,na*sizeof(PetscInt));
1974: for (i=0; i<ai[ma]; i++) {
1975: d_nnz[aj[i]]++;
1976: aj[i] += cstart; /* global col index to be used by MatSetValues() */
1977: }
1978: /* compute local off-diagonal contributions */
1979: PetscMemzero(g_nnz,nb*sizeof(PetscInt));
1980: for (i=0; i<bi[ma]; i++) g_nnz[bj[i]]++;
1981: /* map those to global */
1982: PetscSFCreate(PetscObjectComm((PetscObject)A),&sf);
1983: PetscSFSetGraphLayout(sf,A->cmap,nb,NULL,PETSC_USE_POINTER,a->garray);
1984: PetscSFSetFromOptions(sf);
1985: PetscMemzero(o_nnz,na*sizeof(PetscInt));
1986: PetscSFReduceBegin(sf,MPIU_INT,g_nnz,o_nnz,MPIU_SUM);
1987: PetscSFReduceEnd(sf,MPIU_INT,g_nnz,o_nnz,MPIU_SUM);
1988: PetscSFDestroy(&sf);
1990: MatCreate(PetscObjectComm((PetscObject)A),&B);
1991: MatSetSizes(B,A->cmap->n,A->rmap->n,N,M);
1992: MatSetBlockSizes(B,PetscAbs(A->cmap->bs),PetscAbs(A->rmap->bs));
1993: MatSetType(B,((PetscObject)A)->type_name);
1994: MatMPIAIJSetPreallocation(B,0,d_nnz,0,o_nnz);
1995: PetscFree4(d_nnz,o_nnz,g_nnz,oloc);
1996: } else {
1997: B = *matout;
1998: MatSetOption(B,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_TRUE);
1999: for (i=0; i<ai[ma]; i++) aj[i] += cstart; /* global col index to be used by MatSetValues() */
2000: }
2002: /* copy over the A part */
2003: array = Aloc->a;
2004: row = A->rmap->rstart;
2005: for (i=0; i<ma; i++) {
2006: ncol = ai[i+1]-ai[i];
2007: MatSetValues(B,ncol,aj,1,&row,array,INSERT_VALUES);
2008: row++;
2009: array += ncol; aj += ncol;
2010: }
2011: aj = Aloc->j;
2012: for (i=0; i<ai[ma]; i++) aj[i] -= cstart; /* resume local col index */
2014: /* copy over the B part */
2015: PetscCalloc1(bi[mb],&cols);
2016: array = Bloc->a;
2017: row = A->rmap->rstart;
2018: for (i=0; i<bi[mb]; i++) cols[i] = a->garray[bj[i]];
2019: cols_tmp = cols;
2020: for (i=0; i<mb; i++) {
2021: ncol = bi[i+1]-bi[i];
2022: MatSetValues(B,ncol,cols_tmp,1,&row,array,INSERT_VALUES);
2023: row++;
2024: array += ncol; cols_tmp += ncol;
2025: }
2026: PetscFree(cols);
2028: MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
2029: MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
2030: if (reuse == MAT_INITIAL_MATRIX || *matout != A) {
2031: *matout = B;
2032: } else {
2033: MatHeaderMerge(A,B);
2034: }
2035: return(0);
2036: }
2040: PetscErrorCode MatDiagonalScale_MPIAIJ(Mat mat,Vec ll,Vec rr)
2041: {
2042: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
2043: Mat a = aij->A,b = aij->B;
2045: PetscInt s1,s2,s3;
2048: MatGetLocalSize(mat,&s2,&s3);
2049: if (rr) {
2050: VecGetLocalSize(rr,&s1);
2051: if (s1!=s3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"right vector non-conforming local size");
2052: /* Overlap communication with computation. */
2053: VecScatterBegin(aij->Mvctx,rr,aij->lvec,INSERT_VALUES,SCATTER_FORWARD);
2054: }
2055: if (ll) {
2056: VecGetLocalSize(ll,&s1);
2057: if (s1!=s2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"left vector non-conforming local size");
2058: (*b->ops->diagonalscale)(b,ll,0);
2059: }
2060: /* scale the diagonal block */
2061: (*a->ops->diagonalscale)(a,ll,rr);
2063: if (rr) {
2064: /* Do a scatter end and then right scale the off-diagonal block */
2065: VecScatterEnd(aij->Mvctx,rr,aij->lvec,INSERT_VALUES,SCATTER_FORWARD);
2066: (*b->ops->diagonalscale)(b,0,aij->lvec);
2067: }
2068: return(0);
2069: }
2073: PetscErrorCode MatSetUnfactored_MPIAIJ(Mat A)
2074: {
2075: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
2079: MatSetUnfactored(a->A);
2080: return(0);
2081: }
2085: PetscErrorCode MatEqual_MPIAIJ(Mat A,Mat B,PetscBool *flag)
2086: {
2087: Mat_MPIAIJ *matB = (Mat_MPIAIJ*)B->data,*matA = (Mat_MPIAIJ*)A->data;
2088: Mat a,b,c,d;
2089: PetscBool flg;
2093: a = matA->A; b = matA->B;
2094: c = matB->A; d = matB->B;
2096: MatEqual(a,c,&flg);
2097: if (flg) {
2098: MatEqual(b,d,&flg);
2099: }
2100: MPI_Allreduce(&flg,flag,1,MPIU_BOOL,MPI_LAND,PetscObjectComm((PetscObject)A));
2101: return(0);
2102: }
2106: PetscErrorCode MatCopy_MPIAIJ(Mat A,Mat B,MatStructure str)
2107: {
2109: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
2110: Mat_MPIAIJ *b = (Mat_MPIAIJ*)B->data;
2113: /* If the two matrices don't have the same copy implementation, they aren't compatible for fast copy. */
2114: if ((str != SAME_NONZERO_PATTERN) || (A->ops->copy != B->ops->copy)) {
2115: /* because of the column compression in the off-processor part of the matrix a->B,
2116: the number of columns in a->B and b->B may be different, hence we cannot call
2117: the MatCopy() directly on the two parts. If need be, we can provide a more
2118: efficient copy than the MatCopy_Basic() by first uncompressing the a->B matrices
2119: then copying the submatrices */
2120: MatCopy_Basic(A,B,str);
2121: } else {
2122: MatCopy(a->A,b->A,str);
2123: MatCopy(a->B,b->B,str);
2124: }
2125: return(0);
2126: }
2130: PetscErrorCode MatSetUp_MPIAIJ(Mat A)
2131: {
2135: MatMPIAIJSetPreallocation(A,PETSC_DEFAULT,0,PETSC_DEFAULT,0);
2136: return(0);
2137: }
2139: /*
2140: Computes the number of nonzeros per row needed for preallocation when X and Y
2141: have different nonzero structure.
2142: */
2145: PetscErrorCode MatAXPYGetPreallocation_MPIX_private(PetscInt m,const PetscInt *xi,const PetscInt *xj,const PetscInt *xltog,const PetscInt *yi,const PetscInt *yj,const PetscInt *yltog,PetscInt *nnz)
2146: {
2147: PetscInt i,j,k,nzx,nzy;
2150: /* Set the number of nonzeros in the new matrix */
2151: for (i=0; i<m; i++) {
2152: const PetscInt *xjj = xj+xi[i],*yjj = yj+yi[i];
2153: nzx = xi[i+1] - xi[i];
2154: nzy = yi[i+1] - yi[i];
2155: nnz[i] = 0;
2156: for (j=0,k=0; j<nzx; j++) { /* Point in X */
2157: for (; k<nzy && yltog[yjj[k]]<xltog[xjj[j]]; k++) nnz[i]++; /* Catch up to X */
2158: if (k<nzy && yltog[yjj[k]]==xltog[xjj[j]]) k++; /* Skip duplicate */
2159: nnz[i]++;
2160: }
2161: for (; k<nzy; k++) nnz[i]++;
2162: }
2163: return(0);
2164: }
2166: /* This is the same as MatAXPYGetPreallocation_SeqAIJ, except that the local-to-global map is provided */
2169: static PetscErrorCode MatAXPYGetPreallocation_MPIAIJ(Mat Y,const PetscInt *yltog,Mat X,const PetscInt *xltog,PetscInt *nnz)
2170: {
2172: PetscInt m = Y->rmap->N;
2173: Mat_SeqAIJ *x = (Mat_SeqAIJ*)X->data;
2174: Mat_SeqAIJ *y = (Mat_SeqAIJ*)Y->data;
2177: MatAXPYGetPreallocation_MPIX_private(m,x->i,x->j,xltog,y->i,y->j,yltog,nnz);
2178: return(0);
2179: }
2183: PetscErrorCode MatAXPY_MPIAIJ(Mat Y,PetscScalar a,Mat X,MatStructure str)
2184: {
2186: Mat_MPIAIJ *xx = (Mat_MPIAIJ*)X->data,*yy = (Mat_MPIAIJ*)Y->data;
2187: PetscBLASInt bnz,one=1;
2188: Mat_SeqAIJ *x,*y;
2191: if (str == SAME_NONZERO_PATTERN) {
2192: PetscScalar alpha = a;
2193: x = (Mat_SeqAIJ*)xx->A->data;
2194: PetscBLASIntCast(x->nz,&bnz);
2195: y = (Mat_SeqAIJ*)yy->A->data;
2196: PetscStackCallBLAS("BLASaxpy",BLASaxpy_(&bnz,&alpha,x->a,&one,y->a,&one));
2197: x = (Mat_SeqAIJ*)xx->B->data;
2198: y = (Mat_SeqAIJ*)yy->B->data;
2199: PetscBLASIntCast(x->nz,&bnz);
2200: PetscStackCallBLAS("BLASaxpy",BLASaxpy_(&bnz,&alpha,x->a,&one,y->a,&one));
2201: PetscObjectStateIncrease((PetscObject)Y);
2202: } else if (str == SUBSET_NONZERO_PATTERN) { /* nonzeros of X is a subset of Y's */
2203: MatAXPY_Basic(Y,a,X,str);
2204: } else {
2205: Mat B;
2206: PetscInt *nnz_d,*nnz_o;
2207: PetscMalloc1(yy->A->rmap->N,&nnz_d);
2208: PetscMalloc1(yy->B->rmap->N,&nnz_o);
2209: MatCreate(PetscObjectComm((PetscObject)Y),&B);
2210: PetscObjectSetName((PetscObject)B,((PetscObject)Y)->name);
2211: MatSetSizes(B,Y->rmap->n,Y->cmap->n,Y->rmap->N,Y->cmap->N);
2212: MatSetBlockSizesFromMats(B,Y,Y);
2213: MatSetType(B,MATMPIAIJ);
2214: MatAXPYGetPreallocation_SeqAIJ(yy->A,xx->A,nnz_d);
2215: MatAXPYGetPreallocation_MPIAIJ(yy->B,yy->garray,xx->B,xx->garray,nnz_o);
2216: MatMPIAIJSetPreallocation(B,0,nnz_d,0,nnz_o);
2217: MatAXPY_BasicWithPreallocation(B,Y,a,X,str);
2218: MatHeaderReplace(Y,B);
2219: PetscFree(nnz_d);
2220: PetscFree(nnz_o);
2221: }
2222: return(0);
2223: }
2225: extern PetscErrorCode MatConjugate_SeqAIJ(Mat);
2229: PetscErrorCode MatConjugate_MPIAIJ(Mat mat)
2230: {
2231: #if defined(PETSC_USE_COMPLEX)
2233: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
2236: MatConjugate_SeqAIJ(aij->A);
2237: MatConjugate_SeqAIJ(aij->B);
2238: #else
2240: #endif
2241: return(0);
2242: }
2246: PetscErrorCode MatRealPart_MPIAIJ(Mat A)
2247: {
2248: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
2252: MatRealPart(a->A);
2253: MatRealPart(a->B);
2254: return(0);
2255: }
2259: PetscErrorCode MatImaginaryPart_MPIAIJ(Mat A)
2260: {
2261: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
2265: MatImaginaryPart(a->A);
2266: MatImaginaryPart(a->B);
2267: return(0);
2268: }
2270: #if defined(PETSC_HAVE_PBGL)
2272: #include <boost/parallel/mpi/bsp_process_group.hpp>
2273: #include <boost/graph/distributed/ilu_default_graph.hpp>
2274: #include <boost/graph/distributed/ilu_0_block.hpp>
2275: #include <boost/graph/distributed/ilu_preconditioner.hpp>
2276: #include <boost/graph/distributed/petsc/interface.hpp>
2277: #include <boost/multi_array.hpp>
2278: #include <boost/parallel/distributed_property_map->hpp>
2282: /*
2283: This uses the parallel ILU factorization of Peter Gottschling <pgottsch@osl.iu.edu>
2284: */
2285: PetscErrorCode MatILUFactorSymbolic_MPIAIJ(Mat fact,Mat A, IS isrow, IS iscol, const MatFactorInfo *info)
2286: {
2287: namespace petsc = boost::distributed::petsc;
2289: namespace graph_dist = boost::graph::distributed;
2290: using boost::graph::distributed::ilu_default::process_group_type;
2291: using boost::graph::ilu_permuted;
2293: PetscBool row_identity, col_identity;
2294: PetscContainer c;
2295: PetscInt m, n, M, N;
2299: if (info->levels != 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only levels = 0 supported for parallel ilu");
2300: ISIdentity(isrow, &row_identity);
2301: ISIdentity(iscol, &col_identity);
2302: if (!row_identity || !col_identity) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Row and column permutations must be identity for parallel ILU");
2304: process_group_type pg;
2305: typedef graph_dist::ilu_default::ilu_level_graph_type lgraph_type;
2306: lgraph_type *lgraph_p = new lgraph_type(petsc::num_global_vertices(A), pg, petsc::matrix_distribution(A, pg));
2307: lgraph_type& level_graph = *lgraph_p;
2308: graph_dist::ilu_default::graph_type& graph(level_graph.graph);
2310: petsc::read_matrix(A, graph, get(boost::edge_weight, graph));
2311: ilu_permuted(level_graph);
2313: /* put together the new matrix */
2314: MatCreate(PetscObjectComm((PetscObject)A), fact);
2315: MatGetLocalSize(A, &m, &n);
2316: MatGetSize(A, &M, &N);
2317: MatSetSizes(fact, m, n, M, N);
2318: MatSetBlockSizesFromMats(fact,A,A);
2319: MatSetType(fact, ((PetscObject)A)->type_name);
2320: MatAssemblyBegin(fact, MAT_FINAL_ASSEMBLY);
2321: MatAssemblyEnd(fact, MAT_FINAL_ASSEMBLY);
2323: PetscContainerCreate(PetscObjectComm((PetscObject)A), &c);
2324: PetscContainerSetPointer(c, lgraph_p);
2325: PetscObjectCompose((PetscObject) (fact), "graph", (PetscObject) c);
2326: PetscContainerDestroy(&c);
2327: return(0);
2328: }
2332: PetscErrorCode MatLUFactorNumeric_MPIAIJ(Mat B,Mat A, const MatFactorInfo *info)
2333: {
2335: return(0);
2336: }
2340: /*
2341: This uses the parallel ILU factorization of Peter Gottschling <pgottsch@osl.iu.edu>
2342: */
2343: PetscErrorCode MatSolve_MPIAIJ(Mat A, Vec b, Vec x)
2344: {
2345: namespace graph_dist = boost::graph::distributed;
2347: typedef graph_dist::ilu_default::ilu_level_graph_type lgraph_type;
2348: lgraph_type *lgraph_p;
2349: PetscContainer c;
2353: PetscObjectQuery((PetscObject) A, "graph", (PetscObject*) &c);
2354: PetscContainerGetPointer(c, (void**) &lgraph_p);
2355: VecCopy(b, x);
2357: PetscScalar *array_x;
2358: VecGetArray(x, &array_x);
2359: PetscInt sx;
2360: VecGetSize(x, &sx);
2362: PetscScalar *array_b;
2363: VecGetArray(b, &array_b);
2364: PetscInt sb;
2365: VecGetSize(b, &sb);
2367: lgraph_type& level_graph = *lgraph_p;
2368: graph_dist::ilu_default::graph_type& graph(level_graph.graph);
2370: typedef boost::multi_array_ref<PetscScalar, 1> array_ref_type;
2371: array_ref_type ref_b(array_b, boost::extents[num_vertices(graph)]);
2372: array_ref_type ref_x(array_x, boost::extents[num_vertices(graph)]);
2374: typedef boost::iterator_property_map<array_ref_type::iterator,
2375: boost::property_map<graph_dist::ilu_default::graph_type, boost::vertex_index_t>::type> gvector_type;
2376: gvector_type vector_b(ref_b.begin(), get(boost::vertex_index, graph));
2377: gvector_type vector_x(ref_x.begin(), get(boost::vertex_index, graph));
2379: ilu_set_solve(*lgraph_p, vector_b, vector_x);
2380: return(0);
2381: }
2382: #endif
2386: PetscErrorCode MatGetRowMaxAbs_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2387: {
2388: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
2390: PetscInt i,*idxb = 0;
2391: PetscScalar *va,*vb;
2392: Vec vtmp;
2395: MatGetRowMaxAbs(a->A,v,idx);
2396: VecGetArray(v,&va);
2397: if (idx) {
2398: for (i=0; i<A->rmap->n; i++) {
2399: if (PetscAbsScalar(va[i])) idx[i] += A->cmap->rstart;
2400: }
2401: }
2403: VecCreateSeq(PETSC_COMM_SELF,A->rmap->n,&vtmp);
2404: if (idx) {
2405: PetscMalloc1(A->rmap->n,&idxb);
2406: }
2407: MatGetRowMaxAbs(a->B,vtmp,idxb);
2408: VecGetArray(vtmp,&vb);
2410: for (i=0; i<A->rmap->n; i++) {
2411: if (PetscAbsScalar(va[i]) < PetscAbsScalar(vb[i])) {
2412: va[i] = vb[i];
2413: if (idx) idx[i] = a->garray[idxb[i]];
2414: }
2415: }
2417: VecRestoreArray(v,&va);
2418: VecRestoreArray(vtmp,&vb);
2419: PetscFree(idxb);
2420: VecDestroy(&vtmp);
2421: return(0);
2422: }
2426: PetscErrorCode MatGetRowMinAbs_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2427: {
2428: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
2430: PetscInt i,*idxb = 0;
2431: PetscScalar *va,*vb;
2432: Vec vtmp;
2435: MatGetRowMinAbs(a->A,v,idx);
2436: VecGetArray(v,&va);
2437: if (idx) {
2438: for (i=0; i<A->cmap->n; i++) {
2439: if (PetscAbsScalar(va[i])) idx[i] += A->cmap->rstart;
2440: }
2441: }
2443: VecCreateSeq(PETSC_COMM_SELF,A->rmap->n,&vtmp);
2444: if (idx) {
2445: PetscMalloc1(A->rmap->n,&idxb);
2446: }
2447: MatGetRowMinAbs(a->B,vtmp,idxb);
2448: VecGetArray(vtmp,&vb);
2450: for (i=0; i<A->rmap->n; i++) {
2451: if (PetscAbsScalar(va[i]) > PetscAbsScalar(vb[i])) {
2452: va[i] = vb[i];
2453: if (idx) idx[i] = a->garray[idxb[i]];
2454: }
2455: }
2457: VecRestoreArray(v,&va);
2458: VecRestoreArray(vtmp,&vb);
2459: PetscFree(idxb);
2460: VecDestroy(&vtmp);
2461: return(0);
2462: }
2466: PetscErrorCode MatGetRowMin_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2467: {
2468: Mat_MPIAIJ *mat = (Mat_MPIAIJ*) A->data;
2469: PetscInt n = A->rmap->n;
2470: PetscInt cstart = A->cmap->rstart;
2471: PetscInt *cmap = mat->garray;
2472: PetscInt *diagIdx, *offdiagIdx;
2473: Vec diagV, offdiagV;
2474: PetscScalar *a, *diagA, *offdiagA;
2475: PetscInt r;
2479: PetscMalloc2(n,&diagIdx,n,&offdiagIdx);
2480: VecCreateSeq(PetscObjectComm((PetscObject)A), n, &diagV);
2481: VecCreateSeq(PetscObjectComm((PetscObject)A), n, &offdiagV);
2482: MatGetRowMin(mat->A, diagV, diagIdx);
2483: MatGetRowMin(mat->B, offdiagV, offdiagIdx);
2484: VecGetArray(v, &a);
2485: VecGetArray(diagV, &diagA);
2486: VecGetArray(offdiagV, &offdiagA);
2487: for (r = 0; r < n; ++r) {
2488: if (PetscAbsScalar(diagA[r]) <= PetscAbsScalar(offdiagA[r])) {
2489: a[r] = diagA[r];
2490: idx[r] = cstart + diagIdx[r];
2491: } else {
2492: a[r] = offdiagA[r];
2493: idx[r] = cmap[offdiagIdx[r]];
2494: }
2495: }
2496: VecRestoreArray(v, &a);
2497: VecRestoreArray(diagV, &diagA);
2498: VecRestoreArray(offdiagV, &offdiagA);
2499: VecDestroy(&diagV);
2500: VecDestroy(&offdiagV);
2501: PetscFree2(diagIdx, offdiagIdx);
2502: return(0);
2503: }
2507: PetscErrorCode MatGetRowMax_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2508: {
2509: Mat_MPIAIJ *mat = (Mat_MPIAIJ*) A->data;
2510: PetscInt n = A->rmap->n;
2511: PetscInt cstart = A->cmap->rstart;
2512: PetscInt *cmap = mat->garray;
2513: PetscInt *diagIdx, *offdiagIdx;
2514: Vec diagV, offdiagV;
2515: PetscScalar *a, *diagA, *offdiagA;
2516: PetscInt r;
2520: PetscMalloc2(n,&diagIdx,n,&offdiagIdx);
2521: VecCreateSeq(PETSC_COMM_SELF, n, &diagV);
2522: VecCreateSeq(PETSC_COMM_SELF, n, &offdiagV);
2523: MatGetRowMax(mat->A, diagV, diagIdx);
2524: MatGetRowMax(mat->B, offdiagV, offdiagIdx);
2525: VecGetArray(v, &a);
2526: VecGetArray(diagV, &diagA);
2527: VecGetArray(offdiagV, &offdiagA);
2528: for (r = 0; r < n; ++r) {
2529: if (PetscAbsScalar(diagA[r]) >= PetscAbsScalar(offdiagA[r])) {
2530: a[r] = diagA[r];
2531: idx[r] = cstart + diagIdx[r];
2532: } else {
2533: a[r] = offdiagA[r];
2534: idx[r] = cmap[offdiagIdx[r]];
2535: }
2536: }
2537: VecRestoreArray(v, &a);
2538: VecRestoreArray(diagV, &diagA);
2539: VecRestoreArray(offdiagV, &offdiagA);
2540: VecDestroy(&diagV);
2541: VecDestroy(&offdiagV);
2542: PetscFree2(diagIdx, offdiagIdx);
2543: return(0);
2544: }
2548: PetscErrorCode MatGetSeqNonzeroStructure_MPIAIJ(Mat mat,Mat *newmat)
2549: {
2551: Mat *dummy;
2554: MatGetSubMatrix_MPIAIJ_All(mat,MAT_DO_NOT_GET_VALUES,MAT_INITIAL_MATRIX,&dummy);
2555: *newmat = *dummy;
2556: PetscFree(dummy);
2557: return(0);
2558: }
2562: PetscErrorCode MatInvertBlockDiagonal_MPIAIJ(Mat A,const PetscScalar **values)
2563: {
2564: Mat_MPIAIJ *a = (Mat_MPIAIJ*) A->data;
2568: MatInvertBlockDiagonal(a->A,values);
2569: return(0);
2570: }
2574: static PetscErrorCode MatSetRandom_MPIAIJ(Mat x,PetscRandom rctx)
2575: {
2577: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)x->data;
2580: MatSetRandom(aij->A,rctx);
2581: MatSetRandom(aij->B,rctx);
2582: MatAssemblyBegin(x,MAT_FINAL_ASSEMBLY);
2583: MatAssemblyEnd(x,MAT_FINAL_ASSEMBLY);
2584: return(0);
2585: }
2589: PetscErrorCode MatShift_MPIAIJ(Mat Y,PetscScalar a)
2590: {
2592: Mat_MPIAIJ *maij = (Mat_MPIAIJ*)Y->data;
2593: Mat_SeqAIJ *aij = (Mat_SeqAIJ*)maij->A->data,*bij = (Mat_SeqAIJ*)maij->B->data;
2596: if (!aij->nz && !bij->nz) {
2597: MatMPIAIJSetPreallocation(Y,1,NULL,0,NULL);
2598: }
2599: MatShift_Basic(Y,a);
2600: return(0);
2601: }
2603: /* -------------------------------------------------------------------*/
2604: static struct _MatOps MatOps_Values = {MatSetValues_MPIAIJ,
2605: MatGetRow_MPIAIJ,
2606: MatRestoreRow_MPIAIJ,
2607: MatMult_MPIAIJ,
2608: /* 4*/ MatMultAdd_MPIAIJ,
2609: MatMultTranspose_MPIAIJ,
2610: MatMultTransposeAdd_MPIAIJ,
2611: #if defined(PETSC_HAVE_PBGL)
2612: MatSolve_MPIAIJ,
2613: #else
2614: 0,
2615: #endif
2616: 0,
2617: 0,
2618: /*10*/ 0,
2619: 0,
2620: 0,
2621: MatSOR_MPIAIJ,
2622: MatTranspose_MPIAIJ,
2623: /*15*/ MatGetInfo_MPIAIJ,
2624: MatEqual_MPIAIJ,
2625: MatGetDiagonal_MPIAIJ,
2626: MatDiagonalScale_MPIAIJ,
2627: MatNorm_MPIAIJ,
2628: /*20*/ MatAssemblyBegin_MPIAIJ,
2629: MatAssemblyEnd_MPIAIJ,
2630: MatSetOption_MPIAIJ,
2631: MatZeroEntries_MPIAIJ,
2632: /*24*/ MatZeroRows_MPIAIJ,
2633: 0,
2634: #if defined(PETSC_HAVE_PBGL)
2635: 0,
2636: #else
2637: 0,
2638: #endif
2639: 0,
2640: 0,
2641: /*29*/ MatSetUp_MPIAIJ,
2642: #if defined(PETSC_HAVE_PBGL)
2643: 0,
2644: #else
2645: 0,
2646: #endif
2647: 0,
2648: 0,
2649: 0,
2650: /*34*/ MatDuplicate_MPIAIJ,
2651: 0,
2652: 0,
2653: 0,
2654: 0,
2655: /*39*/ MatAXPY_MPIAIJ,
2656: MatGetSubMatrices_MPIAIJ,
2657: MatIncreaseOverlap_MPIAIJ,
2658: MatGetValues_MPIAIJ,
2659: MatCopy_MPIAIJ,
2660: /*44*/ MatGetRowMax_MPIAIJ,
2661: MatScale_MPIAIJ,
2662: MatShift_MPIAIJ,
2663: MatDiagonalSet_MPIAIJ,
2664: MatZeroRowsColumns_MPIAIJ,
2665: /*49*/ MatSetRandom_MPIAIJ,
2666: 0,
2667: 0,
2668: 0,
2669: 0,
2670: /*54*/ MatFDColoringCreate_MPIXAIJ,
2671: 0,
2672: MatSetUnfactored_MPIAIJ,
2673: MatPermute_MPIAIJ,
2674: 0,
2675: /*59*/ MatGetSubMatrix_MPIAIJ,
2676: MatDestroy_MPIAIJ,
2677: MatView_MPIAIJ,
2678: 0,
2679: MatMatMatMult_MPIAIJ_MPIAIJ_MPIAIJ,
2680: /*64*/ MatMatMatMultSymbolic_MPIAIJ_MPIAIJ_MPIAIJ,
2681: MatMatMatMultNumeric_MPIAIJ_MPIAIJ_MPIAIJ,
2682: 0,
2683: 0,
2684: 0,
2685: /*69*/ MatGetRowMaxAbs_MPIAIJ,
2686: MatGetRowMinAbs_MPIAIJ,
2687: 0,
2688: MatSetColoring_MPIAIJ,
2689: 0,
2690: MatSetValuesAdifor_MPIAIJ,
2691: /*75*/ MatFDColoringApply_AIJ,
2692: 0,
2693: 0,
2694: 0,
2695: MatFindZeroDiagonals_MPIAIJ,
2696: /*80*/ 0,
2697: 0,
2698: 0,
2699: /*83*/ MatLoad_MPIAIJ,
2700: 0,
2701: 0,
2702: 0,
2703: 0,
2704: 0,
2705: /*89*/ MatMatMult_MPIAIJ_MPIAIJ,
2706: MatMatMultSymbolic_MPIAIJ_MPIAIJ,
2707: MatMatMultNumeric_MPIAIJ_MPIAIJ,
2708: MatPtAP_MPIAIJ_MPIAIJ,
2709: MatPtAPSymbolic_MPIAIJ_MPIAIJ,
2710: /*94*/ MatPtAPNumeric_MPIAIJ_MPIAIJ,
2711: 0,
2712: 0,
2713: 0,
2714: 0,
2715: /*99*/ 0,
2716: 0,
2717: 0,
2718: MatConjugate_MPIAIJ,
2719: 0,
2720: /*104*/MatSetValuesRow_MPIAIJ,
2721: MatRealPart_MPIAIJ,
2722: MatImaginaryPart_MPIAIJ,
2723: 0,
2724: 0,
2725: /*109*/0,
2726: 0,
2727: MatGetRowMin_MPIAIJ,
2728: 0,
2729: 0,
2730: /*114*/MatGetSeqNonzeroStructure_MPIAIJ,
2731: 0,
2732: 0,
2733: 0,
2734: 0,
2735: /*119*/0,
2736: 0,
2737: 0,
2738: 0,
2739: MatGetMultiProcBlock_MPIAIJ,
2740: /*124*/MatFindNonzeroRows_MPIAIJ,
2741: MatGetColumnNorms_MPIAIJ,
2742: MatInvertBlockDiagonal_MPIAIJ,
2743: 0,
2744: MatGetSubMatricesMPI_MPIAIJ,
2745: /*129*/0,
2746: MatTransposeMatMult_MPIAIJ_MPIAIJ,
2747: MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ,
2748: MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ,
2749: 0,
2750: /*134*/0,
2751: 0,
2752: 0,
2753: 0,
2754: 0,
2755: /*139*/0,
2756: 0,
2757: 0,
2758: MatFDColoringSetUp_MPIXAIJ,
2759: MatFindOffBlockDiagonalEntries_MPIAIJ,
2760: /*144*/MatCreateMPIMatConcatenateSeqMat_MPIAIJ
2761: };
2763: /* ----------------------------------------------------------------------------------------*/
2767: PetscErrorCode MatStoreValues_MPIAIJ(Mat mat)
2768: {
2769: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
2773: MatStoreValues(aij->A);
2774: MatStoreValues(aij->B);
2775: return(0);
2776: }
2780: PetscErrorCode MatRetrieveValues_MPIAIJ(Mat mat)
2781: {
2782: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
2786: MatRetrieveValues(aij->A);
2787: MatRetrieveValues(aij->B);
2788: return(0);
2789: }
2793: PetscErrorCode MatMPIAIJSetPreallocation_MPIAIJ(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
2794: {
2795: Mat_MPIAIJ *b;
2799: PetscLayoutSetUp(B->rmap);
2800: PetscLayoutSetUp(B->cmap);
2801: b = (Mat_MPIAIJ*)B->data;
2803: if (!B->preallocated) {
2804: /* Explicitly create 2 MATSEQAIJ matrices. */
2805: MatCreate(PETSC_COMM_SELF,&b->A);
2806: MatSetSizes(b->A,B->rmap->n,B->cmap->n,B->rmap->n,B->cmap->n);
2807: MatSetBlockSizesFromMats(b->A,B,B);
2808: MatSetType(b->A,MATSEQAIJ);
2809: PetscLogObjectParent((PetscObject)B,(PetscObject)b->A);
2810: MatCreate(PETSC_COMM_SELF,&b->B);
2811: MatSetSizes(b->B,B->rmap->n,B->cmap->N,B->rmap->n,B->cmap->N);
2812: MatSetBlockSizesFromMats(b->B,B,B);
2813: MatSetType(b->B,MATSEQAIJ);
2814: PetscLogObjectParent((PetscObject)B,(PetscObject)b->B);
2815: }
2817: MatSeqAIJSetPreallocation(b->A,d_nz,d_nnz);
2818: MatSeqAIJSetPreallocation(b->B,o_nz,o_nnz);
2819: B->preallocated = PETSC_TRUE;
2820: return(0);
2821: }
2825: PetscErrorCode MatDuplicate_MPIAIJ(Mat matin,MatDuplicateOption cpvalues,Mat *newmat)
2826: {
2827: Mat mat;
2828: Mat_MPIAIJ *a,*oldmat = (Mat_MPIAIJ*)matin->data;
2832: *newmat = 0;
2833: MatCreate(PetscObjectComm((PetscObject)matin),&mat);
2834: MatSetSizes(mat,matin->rmap->n,matin->cmap->n,matin->rmap->N,matin->cmap->N);
2835: MatSetBlockSizesFromMats(mat,matin,matin);
2836: MatSetType(mat,((PetscObject)matin)->type_name);
2837: PetscMemcpy(mat->ops,matin->ops,sizeof(struct _MatOps));
2838: a = (Mat_MPIAIJ*)mat->data;
2840: mat->factortype = matin->factortype;
2841: mat->assembled = PETSC_TRUE;
2842: mat->insertmode = NOT_SET_VALUES;
2843: mat->preallocated = PETSC_TRUE;
2845: a->size = oldmat->size;
2846: a->rank = oldmat->rank;
2847: a->donotstash = oldmat->donotstash;
2848: a->roworiented = oldmat->roworiented;
2849: a->rowindices = 0;
2850: a->rowvalues = 0;
2851: a->getrowactive = PETSC_FALSE;
2853: PetscLayoutReference(matin->rmap,&mat->rmap);
2854: PetscLayoutReference(matin->cmap,&mat->cmap);
2856: if (oldmat->colmap) {
2857: #if defined(PETSC_USE_CTABLE)
2858: PetscTableCreateCopy(oldmat->colmap,&a->colmap);
2859: #else
2860: PetscMalloc1(mat->cmap->N,&a->colmap);
2861: PetscLogObjectMemory((PetscObject)mat,(mat->cmap->N)*sizeof(PetscInt));
2862: PetscMemcpy(a->colmap,oldmat->colmap,(mat->cmap->N)*sizeof(PetscInt));
2863: #endif
2864: } else a->colmap = 0;
2865: if (oldmat->garray) {
2866: PetscInt len;
2867: len = oldmat->B->cmap->n;
2868: PetscMalloc1(len+1,&a->garray);
2869: PetscLogObjectMemory((PetscObject)mat,len*sizeof(PetscInt));
2870: if (len) { PetscMemcpy(a->garray,oldmat->garray,len*sizeof(PetscInt)); }
2871: } else a->garray = 0;
2873: VecDuplicate(oldmat->lvec,&a->lvec);
2874: PetscLogObjectParent((PetscObject)mat,(PetscObject)a->lvec);
2875: VecScatterCopy(oldmat->Mvctx,&a->Mvctx);
2876: PetscLogObjectParent((PetscObject)mat,(PetscObject)a->Mvctx);
2877: MatDuplicate(oldmat->A,cpvalues,&a->A);
2878: PetscLogObjectParent((PetscObject)mat,(PetscObject)a->A);
2879: MatDuplicate(oldmat->B,cpvalues,&a->B);
2880: PetscLogObjectParent((PetscObject)mat,(PetscObject)a->B);
2881: PetscFunctionListDuplicate(((PetscObject)matin)->qlist,&((PetscObject)mat)->qlist);
2882: *newmat = mat;
2883: return(0);
2884: }
2890: PetscErrorCode MatLoad_MPIAIJ(Mat newMat, PetscViewer viewer)
2891: {
2892: PetscScalar *vals,*svals;
2893: MPI_Comm comm;
2895: PetscMPIInt rank,size,tag = ((PetscObject)viewer)->tag;
2896: PetscInt i,nz,j,rstart,rend,mmax,maxnz = 0;
2897: PetscInt header[4],*rowlengths = 0,M,N,m,*cols;
2898: PetscInt *ourlens = NULL,*procsnz = NULL,*offlens = NULL,jj,*mycols,*smycols;
2899: PetscInt cend,cstart,n,*rowners;
2900: int fd;
2901: PetscInt bs = newMat->rmap->bs;
2904: /* force binary viewer to load .info file if it has not yet done so */
2905: PetscViewerSetUp(viewer);
2906: PetscObjectGetComm((PetscObject)viewer,&comm);
2907: MPI_Comm_size(comm,&size);
2908: MPI_Comm_rank(comm,&rank);
2909: PetscViewerBinaryGetDescriptor(viewer,&fd);
2910: if (!rank) {
2911: PetscBinaryRead(fd,(char*)header,4,PETSC_INT);
2912: if (header[0] != MAT_FILE_CLASSID) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"not matrix object");
2913: }
2915: PetscOptionsBegin(comm,NULL,"Options for loading MPIAIJ matrix","Mat");
2916: PetscOptionsInt("-matload_block_size","Set the blocksize used to store the matrix","MatLoad",bs,&bs,NULL);
2917: PetscOptionsEnd();
2918: if (bs < 0) bs = 1;
2920: MPI_Bcast(header+1,3,MPIU_INT,0,comm);
2921: M = header[1]; N = header[2];
2923: /* If global sizes are set, check if they are consistent with that given in the file */
2924: if (newMat->rmap->N >= 0 && newMat->rmap->N != M) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"Inconsistent # of rows:Matrix in file has (%D) and input matrix has (%D)",newMat->rmap->N,M);
2925: if (newMat->cmap->N >=0 && newMat->cmap->N != N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"Inconsistent # of cols:Matrix in file has (%D) and input matrix has (%D)",newMat->cmap->N,N);
2927: /* determine ownership of all (block) rows */
2928: if (M%bs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED, "Inconsistent # of rows (%d) and block size (%d)",M,bs);
2929: if (newMat->rmap->n < 0) m = bs*((M/bs)/size + (((M/bs) % size) > rank)); /* PETSC_DECIDE */
2930: else m = newMat->rmap->n; /* Set by user */
2932: PetscMalloc1(size+1,&rowners);
2933: MPI_Allgather(&m,1,MPIU_INT,rowners+1,1,MPIU_INT,comm);
2935: /* First process needs enough room for process with most rows */
2936: if (!rank) {
2937: mmax = rowners[1];
2938: for (i=2; i<=size; i++) {
2939: mmax = PetscMax(mmax, rowners[i]);
2940: }
2941: } else mmax = -1; /* unused, but compilers complain */
2943: rowners[0] = 0;
2944: for (i=2; i<=size; i++) {
2945: rowners[i] += rowners[i-1];
2946: }
2947: rstart = rowners[rank];
2948: rend = rowners[rank+1];
2950: /* distribute row lengths to all processors */
2951: PetscMalloc2(m,&ourlens,m,&offlens);
2952: if (!rank) {
2953: PetscBinaryRead(fd,ourlens,m,PETSC_INT);
2954: PetscMalloc1(mmax,&rowlengths);
2955: PetscCalloc1(size,&procsnz);
2956: for (j=0; j<m; j++) {
2957: procsnz[0] += ourlens[j];
2958: }
2959: for (i=1; i<size; i++) {
2960: PetscBinaryRead(fd,rowlengths,rowners[i+1]-rowners[i],PETSC_INT);
2961: /* calculate the number of nonzeros on each processor */
2962: for (j=0; j<rowners[i+1]-rowners[i]; j++) {
2963: procsnz[i] += rowlengths[j];
2964: }
2965: MPIULong_Send(rowlengths,rowners[i+1]-rowners[i],MPIU_INT,i,tag,comm);
2966: }
2967: PetscFree(rowlengths);
2968: } else {
2969: MPIULong_Recv(ourlens,m,MPIU_INT,0,tag,comm);
2970: }
2972: if (!rank) {
2973: /* determine max buffer needed and allocate it */
2974: maxnz = 0;
2975: for (i=0; i<size; i++) {
2976: maxnz = PetscMax(maxnz,procsnz[i]);
2977: }
2978: PetscMalloc1(maxnz,&cols);
2980: /* read in my part of the matrix column indices */
2981: nz = procsnz[0];
2982: PetscMalloc1(nz,&mycols);
2983: PetscBinaryRead(fd,mycols,nz,PETSC_INT);
2985: /* read in every one elses and ship off */
2986: for (i=1; i<size; i++) {
2987: nz = procsnz[i];
2988: PetscBinaryRead(fd,cols,nz,PETSC_INT);
2989: MPIULong_Send(cols,nz,MPIU_INT,i,tag,comm);
2990: }
2991: PetscFree(cols);
2992: } else {
2993: /* determine buffer space needed for message */
2994: nz = 0;
2995: for (i=0; i<m; i++) {
2996: nz += ourlens[i];
2997: }
2998: PetscMalloc1(nz,&mycols);
3000: /* receive message of column indices*/
3001: MPIULong_Recv(mycols,nz,MPIU_INT,0,tag,comm);
3002: }
3004: /* determine column ownership if matrix is not square */
3005: if (N != M) {
3006: if (newMat->cmap->n < 0) n = N/size + ((N % size) > rank);
3007: else n = newMat->cmap->n;
3008: MPI_Scan(&n,&cend,1,MPIU_INT,MPI_SUM,comm);
3009: cstart = cend - n;
3010: } else {
3011: cstart = rstart;
3012: cend = rend;
3013: n = cend - cstart;
3014: }
3016: /* loop over local rows, determining number of off diagonal entries */
3017: PetscMemzero(offlens,m*sizeof(PetscInt));
3018: jj = 0;
3019: for (i=0; i<m; i++) {
3020: for (j=0; j<ourlens[i]; j++) {
3021: if (mycols[jj] < cstart || mycols[jj] >= cend) offlens[i]++;
3022: jj++;
3023: }
3024: }
3026: for (i=0; i<m; i++) {
3027: ourlens[i] -= offlens[i];
3028: }
3029: MatSetSizes(newMat,m,n,M,N);
3031: if (bs > 1) {MatSetBlockSize(newMat,bs);}
3033: MatMPIAIJSetPreallocation(newMat,0,ourlens,0,offlens);
3035: for (i=0; i<m; i++) {
3036: ourlens[i] += offlens[i];
3037: }
3039: if (!rank) {
3040: PetscMalloc1(maxnz+1,&vals);
3042: /* read in my part of the matrix numerical values */
3043: nz = procsnz[0];
3044: PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
3046: /* insert into matrix */
3047: jj = rstart;
3048: smycols = mycols;
3049: svals = vals;
3050: for (i=0; i<m; i++) {
3051: MatSetValues_MPIAIJ(newMat,1,&jj,ourlens[i],smycols,svals,INSERT_VALUES);
3052: smycols += ourlens[i];
3053: svals += ourlens[i];
3054: jj++;
3055: }
3057: /* read in other processors and ship out */
3058: for (i=1; i<size; i++) {
3059: nz = procsnz[i];
3060: PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
3061: MPIULong_Send(vals,nz,MPIU_SCALAR,i,((PetscObject)newMat)->tag,comm);
3062: }
3063: PetscFree(procsnz);
3064: } else {
3065: /* receive numeric values */
3066: PetscMalloc1(nz+1,&vals);
3068: /* receive message of values*/
3069: MPIULong_Recv(vals,nz,MPIU_SCALAR,0,((PetscObject)newMat)->tag,comm);
3071: /* insert into matrix */
3072: jj = rstart;
3073: smycols = mycols;
3074: svals = vals;
3075: for (i=0; i<m; i++) {
3076: MatSetValues_MPIAIJ(newMat,1,&jj,ourlens[i],smycols,svals,INSERT_VALUES);
3077: smycols += ourlens[i];
3078: svals += ourlens[i];
3079: jj++;
3080: }
3081: }
3082: PetscFree2(ourlens,offlens);
3083: PetscFree(vals);
3084: PetscFree(mycols);
3085: PetscFree(rowners);
3086: MatAssemblyBegin(newMat,MAT_FINAL_ASSEMBLY);
3087: MatAssemblyEnd(newMat,MAT_FINAL_ASSEMBLY);
3088: return(0);
3089: }
3093: /* TODO: Not scalable because of ISAllGather(). */
3094: PetscErrorCode MatGetSubMatrix_MPIAIJ(Mat mat,IS isrow,IS iscol,MatReuse call,Mat *newmat)
3095: {
3097: IS iscol_local;
3098: PetscInt csize;
3101: ISGetLocalSize(iscol,&csize);
3102: if (call == MAT_REUSE_MATRIX) {
3103: PetscObjectQuery((PetscObject)*newmat,"ISAllGather",(PetscObject*)&iscol_local);
3104: if (!iscol_local) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse");
3105: } else {
3106: PetscInt cbs;
3107: ISGetBlockSize(iscol,&cbs);
3108: ISAllGather(iscol,&iscol_local);
3109: ISSetBlockSize(iscol_local,cbs);
3110: }
3111: MatGetSubMatrix_MPIAIJ_Private(mat,isrow,iscol_local,csize,call,newmat);
3112: if (call == MAT_INITIAL_MATRIX) {
3113: PetscObjectCompose((PetscObject)*newmat,"ISAllGather",(PetscObject)iscol_local);
3114: ISDestroy(&iscol_local);
3115: }
3116: return(0);
3117: }
3119: extern PetscErrorCode MatGetSubMatrices_MPIAIJ_Local(Mat,PetscInt,const IS[],const IS[],MatReuse,PetscBool*,Mat*);
3122: /*
3123: Not great since it makes two copies of the submatrix, first an SeqAIJ
3124: in local and then by concatenating the local matrices the end result.
3125: Writing it directly would be much like MatGetSubMatrices_MPIAIJ()
3127: Note: This requires a sequential iscol with all indices.
3128: */
3129: PetscErrorCode MatGetSubMatrix_MPIAIJ_Private(Mat mat,IS isrow,IS iscol,PetscInt csize,MatReuse call,Mat *newmat)
3130: {
3132: PetscMPIInt rank,size;
3133: PetscInt i,m,n,rstart,row,rend,nz,*cwork,j,bs,cbs;
3134: PetscInt *ii,*jj,nlocal,*dlens,*olens,dlen,olen,jend,mglobal,ncol;
3135: PetscBool allcolumns, colflag;
3136: Mat M,Mreuse;
3137: MatScalar *vwork,*aa;
3138: MPI_Comm comm;
3139: Mat_SeqAIJ *aij;
3142: PetscObjectGetComm((PetscObject)mat,&comm);
3143: MPI_Comm_rank(comm,&rank);
3144: MPI_Comm_size(comm,&size);
3146: ISIdentity(iscol,&colflag);
3147: ISGetLocalSize(iscol,&ncol);
3148: if (colflag && ncol == mat->cmap->N) {
3149: allcolumns = PETSC_TRUE;
3150: } else {
3151: allcolumns = PETSC_FALSE;
3152: }
3153: if (call == MAT_REUSE_MATRIX) {
3154: PetscObjectQuery((PetscObject)*newmat,"SubMatrix",(PetscObject*)&Mreuse);
3155: if (!Mreuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse");
3156: MatGetSubMatrices_MPIAIJ_Local(mat,1,&isrow,&iscol,MAT_REUSE_MATRIX,&allcolumns,&Mreuse);
3157: } else {
3158: MatGetSubMatrices_MPIAIJ_Local(mat,1,&isrow,&iscol,MAT_INITIAL_MATRIX,&allcolumns,&Mreuse);
3159: }
3161: /*
3162: m - number of local rows
3163: n - number of columns (same on all processors)
3164: rstart - first row in new global matrix generated
3165: */
3166: MatGetSize(Mreuse,&m,&n);
3167: MatGetBlockSizes(Mreuse,&bs,&cbs);
3168: if (call == MAT_INITIAL_MATRIX) {
3169: aij = (Mat_SeqAIJ*)(Mreuse)->data;
3170: ii = aij->i;
3171: jj = aij->j;
3173: /*
3174: Determine the number of non-zeros in the diagonal and off-diagonal
3175: portions of the matrix in order to do correct preallocation
3176: */
3178: /* first get start and end of "diagonal" columns */
3179: if (csize == PETSC_DECIDE) {
3180: ISGetSize(isrow,&mglobal);
3181: if (mglobal == n) { /* square matrix */
3182: nlocal = m;
3183: } else {
3184: nlocal = n/size + ((n % size) > rank);
3185: }
3186: } else {
3187: nlocal = csize;
3188: }
3189: MPI_Scan(&nlocal,&rend,1,MPIU_INT,MPI_SUM,comm);
3190: rstart = rend - nlocal;
3191: if (rank == size - 1 && rend != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Local column sizes %D do not add up to total number of columns %D",rend,n);
3193: /* next, compute all the lengths */
3194: PetscMalloc1(2*m+1,&dlens);
3195: olens = dlens + m;
3196: for (i=0; i<m; i++) {
3197: jend = ii[i+1] - ii[i];
3198: olen = 0;
3199: dlen = 0;
3200: for (j=0; j<jend; j++) {
3201: if (*jj < rstart || *jj >= rend) olen++;
3202: else dlen++;
3203: jj++;
3204: }
3205: olens[i] = olen;
3206: dlens[i] = dlen;
3207: }
3208: MatCreate(comm,&M);
3209: MatSetSizes(M,m,nlocal,PETSC_DECIDE,n);
3210: MatSetBlockSizes(M,bs,cbs);
3211: MatSetType(M,((PetscObject)mat)->type_name);
3212: MatMPIAIJSetPreallocation(M,0,dlens,0,olens);
3213: PetscFree(dlens);
3214: } else {
3215: PetscInt ml,nl;
3217: M = *newmat;
3218: MatGetLocalSize(M,&ml,&nl);
3219: if (ml != m) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Previous matrix must be same size/layout as request");
3220: MatZeroEntries(M);
3221: /*
3222: The next two lines are needed so we may call MatSetValues_MPIAIJ() below directly,
3223: rather than the slower MatSetValues().
3224: */
3225: M->was_assembled = PETSC_TRUE;
3226: M->assembled = PETSC_FALSE;
3227: }
3228: MatGetOwnershipRange(M,&rstart,&rend);
3229: aij = (Mat_SeqAIJ*)(Mreuse)->data;
3230: ii = aij->i;
3231: jj = aij->j;
3232: aa = aij->a;
3233: for (i=0; i<m; i++) {
3234: row = rstart + i;
3235: nz = ii[i+1] - ii[i];
3236: cwork = jj; jj += nz;
3237: vwork = aa; aa += nz;
3238: MatSetValues_MPIAIJ(M,1,&row,nz,cwork,vwork,INSERT_VALUES);
3239: }
3241: MatAssemblyBegin(M,MAT_FINAL_ASSEMBLY);
3242: MatAssemblyEnd(M,MAT_FINAL_ASSEMBLY);
3243: *newmat = M;
3245: /* save submatrix used in processor for next request */
3246: if (call == MAT_INITIAL_MATRIX) {
3247: PetscObjectCompose((PetscObject)M,"SubMatrix",(PetscObject)Mreuse);
3248: MatDestroy(&Mreuse);
3249: }
3250: return(0);
3251: }
3255: PetscErrorCode MatMPIAIJSetPreallocationCSR_MPIAIJ(Mat B,const PetscInt Ii[],const PetscInt J[],const PetscScalar v[])
3256: {
3257: PetscInt m,cstart, cend,j,nnz,i,d;
3258: PetscInt *d_nnz,*o_nnz,nnz_max = 0,rstart,ii;
3259: const PetscInt *JJ;
3260: PetscScalar *values;
3264: if (Ii[0]) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Ii[0] must be 0 it is %D",Ii[0]);
3266: PetscLayoutSetUp(B->rmap);
3267: PetscLayoutSetUp(B->cmap);
3268: m = B->rmap->n;
3269: cstart = B->cmap->rstart;
3270: cend = B->cmap->rend;
3271: rstart = B->rmap->rstart;
3273: PetscMalloc2(m,&d_nnz,m,&o_nnz);
3275: #if defined(PETSC_USE_DEBUGGING)
3276: for (i=0; i<m; i++) {
3277: nnz = Ii[i+1]- Ii[i];
3278: JJ = J + Ii[i];
3279: if (nnz < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Local row %D has a negative %D number of columns",i,nnz);
3280: if (nnz && (JJ[0] < 0)) SETERRRQ1(PETSC_ERR_ARG_WRONGSTATE,"Row %D starts with negative column index",i,j);
3281: if (nnz && (JJ[nnz-1] >= B->cmap->N) SETERRRQ3(PETSC_ERR_ARG_WRONGSTATE,"Row %D ends with too large a column index %D (max allowed %D)",i,JJ[nnz-1],B->cmap->N);
3282: }
3283: #endif
3285: for (i=0; i<m; i++) {
3286: nnz = Ii[i+1]- Ii[i];
3287: JJ = J + Ii[i];
3288: nnz_max = PetscMax(nnz_max,nnz);
3289: d = 0;
3290: for (j=0; j<nnz; j++) {
3291: if (cstart <= JJ[j] && JJ[j] < cend) d++;
3292: }
3293: d_nnz[i] = d;
3294: o_nnz[i] = nnz - d;
3295: }
3296: MatMPIAIJSetPreallocation(B,0,d_nnz,0,o_nnz);
3297: PetscFree2(d_nnz,o_nnz);
3299: if (v) values = (PetscScalar*)v;
3300: else {
3301: PetscCalloc1(nnz_max+1,&values);
3302: }
3304: for (i=0; i<m; i++) {
3305: ii = i + rstart;
3306: nnz = Ii[i+1]- Ii[i];
3307: MatSetValues_MPIAIJ(B,1,&ii,nnz,J+Ii[i],values+(v ? Ii[i] : 0),INSERT_VALUES);
3308: }
3309: MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
3310: MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
3312: if (!v) {
3313: PetscFree(values);
3314: }
3315: MatSetOption(B,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
3316: return(0);
3317: }
3321: /*@
3322: MatMPIAIJSetPreallocationCSR - Allocates memory for a sparse parallel matrix in AIJ format
3323: (the default parallel PETSc format).
3325: Collective on MPI_Comm
3327: Input Parameters:
3328: + B - the matrix
3329: . i - the indices into j for the start of each local row (starts with zero)
3330: . j - the column indices for each local row (starts with zero)
3331: - v - optional values in the matrix
3333: Level: developer
3335: Notes:
3336: The i, j, and a arrays ARE copied by this routine into the internal format used by PETSc;
3337: thus you CANNOT change the matrix entries by changing the values of a[] after you have
3338: called this routine. Use MatCreateMPIAIJWithSplitArrays() to avoid needing to copy the arrays.
3340: The i and j indices are 0 based, and i indices are indices corresponding to the local j array.
3342: The format which is used for the sparse matrix input, is equivalent to a
3343: row-major ordering.. i.e for the following matrix, the input data expected is
3344: as shown:
3346: 1 0 0
3347: 2 0 3 P0
3348: -------
3349: 4 5 6 P1
3351: Process0 [P0]: rows_owned=[0,1]
3352: i = {0,1,3} [size = nrow+1 = 2+1]
3353: j = {0,0,2} [size = nz = 6]
3354: v = {1,2,3} [size = nz = 6]
3356: Process1 [P1]: rows_owned=[2]
3357: i = {0,3} [size = nrow+1 = 1+1]
3358: j = {0,1,2} [size = nz = 6]
3359: v = {4,5,6} [size = nz = 6]
3361: .keywords: matrix, aij, compressed row, sparse, parallel
3363: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatCreateAIJ(), MPIAIJ,
3364: MatCreateSeqAIJWithArrays(), MatCreateMPIAIJWithSplitArrays()
3365: @*/
3366: PetscErrorCode MatMPIAIJSetPreallocationCSR(Mat B,const PetscInt i[],const PetscInt j[], const PetscScalar v[])
3367: {
3371: PetscTryMethod(B,"MatMPIAIJSetPreallocationCSR_C",(Mat,const PetscInt[],const PetscInt[],const PetscScalar[]),(B,i,j,v));
3372: return(0);
3373: }
3377: /*@C
3378: MatMPIAIJSetPreallocation - Preallocates memory for a sparse parallel matrix in AIJ format
3379: (the default parallel PETSc format). For good matrix assembly performance
3380: the user should preallocate the matrix storage by setting the parameters
3381: d_nz (or d_nnz) and o_nz (or o_nnz). By setting these parameters accurately,
3382: performance can be increased by more than a factor of 50.
3384: Collective on MPI_Comm
3386: Input Parameters:
3387: + B - the matrix
3388: . d_nz - number of nonzeros per row in DIAGONAL portion of local submatrix
3389: (same value is used for all local rows)
3390: . d_nnz - array containing the number of nonzeros in the various rows of the
3391: DIAGONAL portion of the local submatrix (possibly different for each row)
3392: or NULL (PETSC_NULL_INTEGER in Fortran), if d_nz is used to specify the nonzero structure.
3393: The size of this array is equal to the number of local rows, i.e 'm'.
3394: For matrices that will be factored, you must leave room for (and set)
3395: the diagonal entry even if it is zero.
3396: . o_nz - number of nonzeros per row in the OFF-DIAGONAL portion of local
3397: submatrix (same value is used for all local rows).
3398: - o_nnz - array containing the number of nonzeros in the various rows of the
3399: OFF-DIAGONAL portion of the local submatrix (possibly different for
3400: each row) or NULL (PETSC_NULL_INTEGER in Fortran), if o_nz is used to specify the nonzero
3401: structure. The size of this array is equal to the number
3402: of local rows, i.e 'm'.
3404: If the *_nnz parameter is given then the *_nz parameter is ignored
3406: The AIJ format (also called the Yale sparse matrix format or
3407: compressed row storage (CSR)), is fully compatible with standard Fortran 77
3408: storage. The stored row and column indices begin with zero.
3409: See Users-Manual: ch_mat for details.
3411: The parallel matrix is partitioned such that the first m0 rows belong to
3412: process 0, the next m1 rows belong to process 1, the next m2 rows belong
3413: to process 2 etc.. where m0,m1,m2... are the input parameter 'm'.
3415: The DIAGONAL portion of the local submatrix of a processor can be defined
3416: as the submatrix which is obtained by extraction the part corresponding to
3417: the rows r1-r2 and columns c1-c2 of the global matrix, where r1 is the
3418: first row that belongs to the processor, r2 is the last row belonging to
3419: the this processor, and c1-c2 is range of indices of the local part of a
3420: vector suitable for applying the matrix to. This is an mxn matrix. In the
3421: common case of a square matrix, the row and column ranges are the same and
3422: the DIAGONAL part is also square. The remaining portion of the local
3423: submatrix (mxN) constitute the OFF-DIAGONAL portion.
3425: If o_nnz, d_nnz are specified, then o_nz, and d_nz are ignored.
3427: You can call MatGetInfo() to get information on how effective the preallocation was;
3428: for example the fields mallocs,nz_allocated,nz_used,nz_unneeded;
3429: You can also run with the option -info and look for messages with the string
3430: malloc in them to see if additional memory allocation was needed.
3432: Example usage:
3434: Consider the following 8x8 matrix with 34 non-zero values, that is
3435: assembled across 3 processors. Lets assume that proc0 owns 3 rows,
3436: proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
3437: as follows:
3439: .vb
3440: 1 2 0 | 0 3 0 | 0 4
3441: Proc0 0 5 6 | 7 0 0 | 8 0
3442: 9 0 10 | 11 0 0 | 12 0
3443: -------------------------------------
3444: 13 0 14 | 15 16 17 | 0 0
3445: Proc1 0 18 0 | 19 20 21 | 0 0
3446: 0 0 0 | 22 23 0 | 24 0
3447: -------------------------------------
3448: Proc2 25 26 27 | 0 0 28 | 29 0
3449: 30 0 0 | 31 32 33 | 0 34
3450: .ve
3452: This can be represented as a collection of submatrices as:
3454: .vb
3455: A B C
3456: D E F
3457: G H I
3458: .ve
3460: Where the submatrices A,B,C are owned by proc0, D,E,F are
3461: owned by proc1, G,H,I are owned by proc2.
3463: The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
3464: The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
3465: The 'M','N' parameters are 8,8, and have the same values on all procs.
3467: The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are
3468: submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices
3469: corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively.
3470: Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL
3471: part as SeqAIJ matrices. for eg: proc1 will store [E] as a SeqAIJ
3472: matrix, ans [DF] as another SeqAIJ matrix.
3474: When d_nz, o_nz parameters are specified, d_nz storage elements are
3475: allocated for every row of the local diagonal submatrix, and o_nz
3476: storage locations are allocated for every row of the OFF-DIAGONAL submat.
3477: One way to choose d_nz and o_nz is to use the max nonzerors per local
3478: rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices.
3479: In this case, the values of d_nz,o_nz are:
3480: .vb
3481: proc0 : dnz = 2, o_nz = 2
3482: proc1 : dnz = 3, o_nz = 2
3483: proc2 : dnz = 1, o_nz = 4
3484: .ve
3485: We are allocating m*(d_nz+o_nz) storage locations for every proc. This
3486: translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10
3487: for proc3. i.e we are using 12+15+10=37 storage locations to store
3488: 34 values.
3490: When d_nnz, o_nnz parameters are specified, the storage is specified
3491: for every row, coresponding to both DIAGONAL and OFF-DIAGONAL submatrices.
3492: In the above case the values for d_nnz,o_nnz are:
3493: .vb
3494: proc0: d_nnz = [2,2,2] and o_nnz = [2,2,2]
3495: proc1: d_nnz = [3,3,2] and o_nnz = [2,1,1]
3496: proc2: d_nnz = [1,1] and o_nnz = [4,4]
3497: .ve
3498: Here the space allocated is sum of all the above values i.e 34, and
3499: hence pre-allocation is perfect.
3501: Level: intermediate
3503: .keywords: matrix, aij, compressed row, sparse, parallel
3505: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatCreateAIJ(), MatMPIAIJSetPreallocationCSR(),
3506: MPIAIJ, MatGetInfo(), PetscSplitOwnership()
3507: @*/
3508: PetscErrorCode MatMPIAIJSetPreallocation(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
3509: {
3515: PetscTryMethod(B,"MatMPIAIJSetPreallocation_C",(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[]),(B,d_nz,d_nnz,o_nz,o_nnz));
3516: return(0);
3517: }
3521: /*@
3522: MatCreateMPIAIJWithArrays - creates a MPI AIJ matrix using arrays that contain in standard
3523: CSR format the local rows.
3525: Collective on MPI_Comm
3527: Input Parameters:
3528: + comm - MPI communicator
3529: . m - number of local rows (Cannot be PETSC_DECIDE)
3530: . n - This value should be the same as the local size used in creating the
3531: x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
3532: calculated if N is given) For square matrices n is almost always m.
3533: . M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
3534: . N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
3535: . i - row indices
3536: . j - column indices
3537: - a - matrix values
3539: Output Parameter:
3540: . mat - the matrix
3542: Level: intermediate
3544: Notes:
3545: The i, j, and a arrays ARE copied by this routine into the internal format used by PETSc;
3546: thus you CANNOT change the matrix entries by changing the values of a[] after you have
3547: called this routine. Use MatCreateMPIAIJWithSplitArrays() to avoid needing to copy the arrays.
3549: The i and j indices are 0 based, and i indices are indices corresponding to the local j array.
3551: The format which is used for the sparse matrix input, is equivalent to a
3552: row-major ordering.. i.e for the following matrix, the input data expected is
3553: as shown:
3555: 1 0 0
3556: 2 0 3 P0
3557: -------
3558: 4 5 6 P1
3560: Process0 [P0]: rows_owned=[0,1]
3561: i = {0,1,3} [size = nrow+1 = 2+1]
3562: j = {0,0,2} [size = nz = 6]
3563: v = {1,2,3} [size = nz = 6]
3565: Process1 [P1]: rows_owned=[2]
3566: i = {0,3} [size = nrow+1 = 1+1]
3567: j = {0,1,2} [size = nz = 6]
3568: v = {4,5,6} [size = nz = 6]
3570: .keywords: matrix, aij, compressed row, sparse, parallel
3572: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
3573: MPIAIJ, MatCreateAIJ(), MatCreateMPIAIJWithSplitArrays()
3574: @*/
3575: PetscErrorCode MatCreateMPIAIJWithArrays(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,const PetscInt i[],const PetscInt j[],const PetscScalar a[],Mat *mat)
3576: {
3580: if (i[0]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"i (row indices) must start with 0");
3581: if (m < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"local number of rows (m) cannot be PETSC_DECIDE, or negative");
3582: MatCreate(comm,mat);
3583: MatSetSizes(*mat,m,n,M,N);
3584: /* MatSetBlockSizes(M,bs,cbs); */
3585: MatSetType(*mat,MATMPIAIJ);
3586: MatMPIAIJSetPreallocationCSR(*mat,i,j,a);
3587: return(0);
3588: }
3592: /*@C
3593: MatCreateAIJ - Creates a sparse parallel matrix in AIJ format
3594: (the default parallel PETSc format). For good matrix assembly performance
3595: the user should preallocate the matrix storage by setting the parameters
3596: d_nz (or d_nnz) and o_nz (or o_nnz). By setting these parameters accurately,
3597: performance can be increased by more than a factor of 50.
3599: Collective on MPI_Comm
3601: Input Parameters:
3602: + comm - MPI communicator
3603: . m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
3604: This value should be the same as the local size used in creating the
3605: y vector for the matrix-vector product y = Ax.
3606: . n - This value should be the same as the local size used in creating the
3607: x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
3608: calculated if N is given) For square matrices n is almost always m.
3609: . M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
3610: . N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
3611: . d_nz - number of nonzeros per row in DIAGONAL portion of local submatrix
3612: (same value is used for all local rows)
3613: . d_nnz - array containing the number of nonzeros in the various rows of the
3614: DIAGONAL portion of the local submatrix (possibly different for each row)
3615: or NULL, if d_nz is used to specify the nonzero structure.
3616: The size of this array is equal to the number of local rows, i.e 'm'.
3617: . o_nz - number of nonzeros per row in the OFF-DIAGONAL portion of local
3618: submatrix (same value is used for all local rows).
3619: - o_nnz - array containing the number of nonzeros in the various rows of the
3620: OFF-DIAGONAL portion of the local submatrix (possibly different for
3621: each row) or NULL, if o_nz is used to specify the nonzero
3622: structure. The size of this array is equal to the number
3623: of local rows, i.e 'm'.
3625: Output Parameter:
3626: . A - the matrix
3628: It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(),
3629: MatXXXXSetPreallocation() paradgm instead of this routine directly.
3630: [MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation]
3632: Notes:
3633: If the *_nnz parameter is given then the *_nz parameter is ignored
3635: m,n,M,N parameters specify the size of the matrix, and its partitioning across
3636: processors, while d_nz,d_nnz,o_nz,o_nnz parameters specify the approximate
3637: storage requirements for this matrix.
3639: If PETSC_DECIDE or PETSC_DETERMINE is used for a particular argument on one
3640: processor than it must be used on all processors that share the object for
3641: that argument.
3643: The user MUST specify either the local or global matrix dimensions
3644: (possibly both).
3646: The parallel matrix is partitioned across processors such that the
3647: first m0 rows belong to process 0, the next m1 rows belong to
3648: process 1, the next m2 rows belong to process 2 etc.. where
3649: m0,m1,m2,.. are the input parameter 'm'. i.e each processor stores
3650: values corresponding to [m x N] submatrix.
3652: The columns are logically partitioned with the n0 columns belonging
3653: to 0th partition, the next n1 columns belonging to the next
3654: partition etc.. where n0,n1,n2... are the input parameter 'n'.
3656: The DIAGONAL portion of the local submatrix on any given processor
3657: is the submatrix corresponding to the rows and columns m,n
3658: corresponding to the given processor. i.e diagonal matrix on
3659: process 0 is [m0 x n0], diagonal matrix on process 1 is [m1 x n1]
3660: etc. The remaining portion of the local submatrix [m x (N-n)]
3661: constitute the OFF-DIAGONAL portion. The example below better
3662: illustrates this concept.
3664: For a square global matrix we define each processor's diagonal portion
3665: to be its local rows and the corresponding columns (a square submatrix);
3666: each processor's off-diagonal portion encompasses the remainder of the
3667: local matrix (a rectangular submatrix).
3669: If o_nnz, d_nnz are specified, then o_nz, and d_nz are ignored.
3671: When calling this routine with a single process communicator, a matrix of
3672: type SEQAIJ is returned. If a matrix of type MPIAIJ is desired for this
3673: type of communicator, use the construction mechanism:
3674: MatCreate(...,&A); MatSetType(A,MATMPIAIJ); MatSetSizes(A, m,n,M,N); MatMPIAIJSetPreallocation(A,...);
3676: By default, this format uses inodes (identical nodes) when possible.
3677: We search for consecutive rows with the same nonzero structure, thereby
3678: reusing matrix information to achieve increased efficiency.
3680: Options Database Keys:
3681: + -mat_no_inode - Do not use inodes
3682: . -mat_inode_limit <limit> - Sets inode limit (max limit=5)
3683: - -mat_aij_oneindex - Internally use indexing starting at 1
3684: rather than 0. Note that when calling MatSetValues(),
3685: the user still MUST index entries starting at 0!
3688: Example usage:
3690: Consider the following 8x8 matrix with 34 non-zero values, that is
3691: assembled across 3 processors. Lets assume that proc0 owns 3 rows,
3692: proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
3693: as follows:
3695: .vb
3696: 1 2 0 | 0 3 0 | 0 4
3697: Proc0 0 5 6 | 7 0 0 | 8 0
3698: 9 0 10 | 11 0 0 | 12 0
3699: -------------------------------------
3700: 13 0 14 | 15 16 17 | 0 0
3701: Proc1 0 18 0 | 19 20 21 | 0 0
3702: 0 0 0 | 22 23 0 | 24 0
3703: -------------------------------------
3704: Proc2 25 26 27 | 0 0 28 | 29 0
3705: 30 0 0 | 31 32 33 | 0 34
3706: .ve
3708: This can be represented as a collection of submatrices as:
3710: .vb
3711: A B C
3712: D E F
3713: G H I
3714: .ve
3716: Where the submatrices A,B,C are owned by proc0, D,E,F are
3717: owned by proc1, G,H,I are owned by proc2.
3719: The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
3720: The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
3721: The 'M','N' parameters are 8,8, and have the same values on all procs.
3723: The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are
3724: submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices
3725: corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively.
3726: Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL
3727: part as SeqAIJ matrices. for eg: proc1 will store [E] as a SeqAIJ
3728: matrix, ans [DF] as another SeqAIJ matrix.
3730: When d_nz, o_nz parameters are specified, d_nz storage elements are
3731: allocated for every row of the local diagonal submatrix, and o_nz
3732: storage locations are allocated for every row of the OFF-DIAGONAL submat.
3733: One way to choose d_nz and o_nz is to use the max nonzerors per local
3734: rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices.
3735: In this case, the values of d_nz,o_nz are:
3736: .vb
3737: proc0 : dnz = 2, o_nz = 2
3738: proc1 : dnz = 3, o_nz = 2
3739: proc2 : dnz = 1, o_nz = 4
3740: .ve
3741: We are allocating m*(d_nz+o_nz) storage locations for every proc. This
3742: translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10
3743: for proc3. i.e we are using 12+15+10=37 storage locations to store
3744: 34 values.
3746: When d_nnz, o_nnz parameters are specified, the storage is specified
3747: for every row, coresponding to both DIAGONAL and OFF-DIAGONAL submatrices.
3748: In the above case the values for d_nnz,o_nnz are:
3749: .vb
3750: proc0: d_nnz = [2,2,2] and o_nnz = [2,2,2]
3751: proc1: d_nnz = [3,3,2] and o_nnz = [2,1,1]
3752: proc2: d_nnz = [1,1] and o_nnz = [4,4]
3753: .ve
3754: Here the space allocated is sum of all the above values i.e 34, and
3755: hence pre-allocation is perfect.
3757: Level: intermediate
3759: .keywords: matrix, aij, compressed row, sparse, parallel
3761: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
3762: MPIAIJ, MatCreateMPIAIJWithArrays()
3763: @*/
3764: PetscErrorCode MatCreateAIJ(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A)
3765: {
3767: PetscMPIInt size;
3770: MatCreate(comm,A);
3771: MatSetSizes(*A,m,n,M,N);
3772: MPI_Comm_size(comm,&size);
3773: if (size > 1) {
3774: MatSetType(*A,MATMPIAIJ);
3775: MatMPIAIJSetPreallocation(*A,d_nz,d_nnz,o_nz,o_nnz);
3776: } else {
3777: MatSetType(*A,MATSEQAIJ);
3778: MatSeqAIJSetPreallocation(*A,d_nz,d_nnz);
3779: }
3780: return(0);
3781: }
3785: PetscErrorCode MatMPIAIJGetSeqAIJ(Mat A,Mat *Ad,Mat *Ao,const PetscInt *colmap[])
3786: {
3787: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
3790: if (Ad) *Ad = a->A;
3791: if (Ao) *Ao = a->B;
3792: if (colmap) *colmap = a->garray;
3793: return(0);
3794: }
3798: PetscErrorCode MatSetColoring_MPIAIJ(Mat A,ISColoring coloring)
3799: {
3801: PetscInt i;
3802: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
3805: if (coloring->ctype == IS_COLORING_GLOBAL) {
3806: ISColoringValue *allcolors,*colors;
3807: ISColoring ocoloring;
3809: /* set coloring for diagonal portion */
3810: MatSetColoring_SeqAIJ(a->A,coloring);
3812: /* set coloring for off-diagonal portion */
3813: ISAllGatherColors(PetscObjectComm((PetscObject)A),coloring->n,coloring->colors,NULL,&allcolors);
3814: PetscMalloc1(a->B->cmap->n+1,&colors);
3815: for (i=0; i<a->B->cmap->n; i++) {
3816: colors[i] = allcolors[a->garray[i]];
3817: }
3818: PetscFree(allcolors);
3819: ISColoringCreate(MPI_COMM_SELF,coloring->n,a->B->cmap->n,colors,PETSC_OWN_POINTER,&ocoloring);
3820: MatSetColoring_SeqAIJ(a->B,ocoloring);
3821: ISColoringDestroy(&ocoloring);
3822: } else if (coloring->ctype == IS_COLORING_GHOSTED) {
3823: ISColoringValue *colors;
3824: PetscInt *larray;
3825: ISColoring ocoloring;
3827: /* set coloring for diagonal portion */
3828: PetscMalloc1(a->A->cmap->n+1,&larray);
3829: for (i=0; i<a->A->cmap->n; i++) {
3830: larray[i] = i + A->cmap->rstart;
3831: }
3832: ISGlobalToLocalMappingApply(A->cmap->mapping,IS_GTOLM_MASK,a->A->cmap->n,larray,NULL,larray);
3833: PetscMalloc1(a->A->cmap->n+1,&colors);
3834: for (i=0; i<a->A->cmap->n; i++) {
3835: colors[i] = coloring->colors[larray[i]];
3836: }
3837: PetscFree(larray);
3838: ISColoringCreate(PETSC_COMM_SELF,coloring->n,a->A->cmap->n,colors,PETSC_OWN_POINTER,&ocoloring);
3839: MatSetColoring_SeqAIJ(a->A,ocoloring);
3840: ISColoringDestroy(&ocoloring);
3842: /* set coloring for off-diagonal portion */
3843: PetscMalloc1(a->B->cmap->n+1,&larray);
3844: ISGlobalToLocalMappingApply(A->cmap->mapping,IS_GTOLM_MASK,a->B->cmap->n,a->garray,NULL,larray);
3845: PetscMalloc1(a->B->cmap->n+1,&colors);
3846: for (i=0; i<a->B->cmap->n; i++) {
3847: colors[i] = coloring->colors[larray[i]];
3848: }
3849: PetscFree(larray);
3850: ISColoringCreate(MPI_COMM_SELF,coloring->n,a->B->cmap->n,colors,PETSC_OWN_POINTER,&ocoloring);
3851: MatSetColoring_SeqAIJ(a->B,ocoloring);
3852: ISColoringDestroy(&ocoloring);
3853: } else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"No support ISColoringType %d",(int)coloring->ctype);
3854: return(0);
3855: }
3859: PetscErrorCode MatSetValuesAdifor_MPIAIJ(Mat A,PetscInt nl,void *advalues)
3860: {
3861: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
3865: MatSetValuesAdifor_SeqAIJ(a->A,nl,advalues);
3866: MatSetValuesAdifor_SeqAIJ(a->B,nl,advalues);
3867: return(0);
3868: }
3872: PetscErrorCode MatCreateMPIMatConcatenateSeqMat_MPIAIJ(MPI_Comm comm,Mat inmat,PetscInt n,MatReuse scall,Mat *outmat)
3873: {
3875: PetscInt m,N,i,rstart,nnz,Ii;
3876: PetscInt *indx;
3877: PetscScalar *values;
3880: MatGetSize(inmat,&m,&N);
3881: if (scall == MAT_INITIAL_MATRIX) { /* symbolic phase */
3882: PetscInt *dnz,*onz,sum,bs,cbs;
3884: if (n == PETSC_DECIDE) {
3885: PetscSplitOwnership(comm,&n,&N);
3886: }
3887: /* Check sum(n) = N */
3888: MPI_Allreduce(&n,&sum,1,MPIU_INT,MPI_SUM,comm);
3889: if (sum != N) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Sum of local columns != global columns %d",N);
3891: MPI_Scan(&m, &rstart,1,MPIU_INT,MPI_SUM,comm);
3892: rstart -= m;
3894: MatPreallocateInitialize(comm,m,n,dnz,onz);
3895: for (i=0; i<m; i++) {
3896: MatGetRow_SeqAIJ(inmat,i,&nnz,&indx,NULL);
3897: MatPreallocateSet(i+rstart,nnz,indx,dnz,onz);
3898: MatRestoreRow_SeqAIJ(inmat,i,&nnz,&indx,NULL);
3899: }
3901: MatCreate(comm,outmat);
3902: MatSetSizes(*outmat,m,n,PETSC_DETERMINE,PETSC_DETERMINE);
3903: MatGetBlockSizes(inmat,&bs,&cbs);
3904: MatSetBlockSizes(*outmat,bs,cbs);
3905: MatSetType(*outmat,MATMPIAIJ);
3906: MatMPIAIJSetPreallocation(*outmat,0,dnz,0,onz);
3907: MatPreallocateFinalize(dnz,onz);
3908: }
3910: /* numeric phase */
3911: MatGetOwnershipRange(*outmat,&rstart,NULL);
3912: for (i=0; i<m; i++) {
3913: MatGetRow_SeqAIJ(inmat,i,&nnz,&indx,&values);
3914: Ii = i + rstart;
3915: MatSetValues(*outmat,1,&Ii,nnz,indx,values,INSERT_VALUES);
3916: MatRestoreRow_SeqAIJ(inmat,i,&nnz,&indx,&values);
3917: }
3918: MatAssemblyBegin(*outmat,MAT_FINAL_ASSEMBLY);
3919: MatAssemblyEnd(*outmat,MAT_FINAL_ASSEMBLY);
3920: return(0);
3921: }
3925: PetscErrorCode MatFileSplit(Mat A,char *outfile)
3926: {
3927: PetscErrorCode ierr;
3928: PetscMPIInt rank;
3929: PetscInt m,N,i,rstart,nnz;
3930: size_t len;
3931: const PetscInt *indx;
3932: PetscViewer out;
3933: char *name;
3934: Mat B;
3935: const PetscScalar *values;
3938: MatGetLocalSize(A,&m,0);
3939: MatGetSize(A,0,&N);
3940: /* Should this be the type of the diagonal block of A? */
3941: MatCreate(PETSC_COMM_SELF,&B);
3942: MatSetSizes(B,m,N,m,N);
3943: MatSetBlockSizesFromMats(B,A,A);
3944: MatSetType(B,MATSEQAIJ);
3945: MatSeqAIJSetPreallocation(B,0,NULL);
3946: MatGetOwnershipRange(A,&rstart,0);
3947: for (i=0; i<m; i++) {
3948: MatGetRow(A,i+rstart,&nnz,&indx,&values);
3949: MatSetValues(B,1,&i,nnz,indx,values,INSERT_VALUES);
3950: MatRestoreRow(A,i+rstart,&nnz,&indx,&values);
3951: }
3952: MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
3953: MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
3955: MPI_Comm_rank(PetscObjectComm((PetscObject)A),&rank);
3956: PetscStrlen(outfile,&len);
3957: PetscMalloc1(len+5,&name);
3958: sprintf(name,"%s.%d",outfile,rank);
3959: PetscViewerBinaryOpen(PETSC_COMM_SELF,name,FILE_MODE_APPEND,&out);
3960: PetscFree(name);
3961: MatView(B,out);
3962: PetscViewerDestroy(&out);
3963: MatDestroy(&B);
3964: return(0);
3965: }
3967: extern PetscErrorCode MatDestroy_MPIAIJ(Mat);
3970: PetscErrorCode MatDestroy_MPIAIJ_SeqsToMPI(Mat A)
3971: {
3972: PetscErrorCode ierr;
3973: Mat_Merge_SeqsToMPI *merge;
3974: PetscContainer container;
3977: PetscObjectQuery((PetscObject)A,"MatMergeSeqsToMPI",(PetscObject*)&container);
3978: if (container) {
3979: PetscContainerGetPointer(container,(void**)&merge);
3980: PetscFree(merge->id_r);
3981: PetscFree(merge->len_s);
3982: PetscFree(merge->len_r);
3983: PetscFree(merge->bi);
3984: PetscFree(merge->bj);
3985: PetscFree(merge->buf_ri[0]);
3986: PetscFree(merge->buf_ri);
3987: PetscFree(merge->buf_rj[0]);
3988: PetscFree(merge->buf_rj);
3989: PetscFree(merge->coi);
3990: PetscFree(merge->coj);
3991: PetscFree(merge->owners_co);
3992: PetscLayoutDestroy(&merge->rowmap);
3993: PetscFree(merge);
3994: PetscObjectCompose((PetscObject)A,"MatMergeSeqsToMPI",0);
3995: }
3996: MatDestroy_MPIAIJ(A);
3997: return(0);
3998: }
4000: #include <../src/mat/utils/freespace.h>
4001: #include <petscbt.h>
4005: PetscErrorCode MatCreateMPIAIJSumSeqAIJNumeric(Mat seqmat,Mat mpimat)
4006: {
4007: PetscErrorCode ierr;
4008: MPI_Comm comm;
4009: Mat_SeqAIJ *a =(Mat_SeqAIJ*)seqmat->data;
4010: PetscMPIInt size,rank,taga,*len_s;
4011: PetscInt N=mpimat->cmap->N,i,j,*owners,*ai=a->i,*aj;
4012: PetscInt proc,m;
4013: PetscInt **buf_ri,**buf_rj;
4014: PetscInt k,anzi,*bj_i,*bi,*bj,arow,bnzi,nextaj;
4015: PetscInt nrows,**buf_ri_k,**nextrow,**nextai;
4016: MPI_Request *s_waits,*r_waits;
4017: MPI_Status *status;
4018: MatScalar *aa=a->a;
4019: MatScalar **abuf_r,*ba_i;
4020: Mat_Merge_SeqsToMPI *merge;
4021: PetscContainer container;
4024: PetscObjectGetComm((PetscObject)mpimat,&comm);
4025: PetscLogEventBegin(MAT_Seqstompinum,seqmat,0,0,0);
4027: MPI_Comm_size(comm,&size);
4028: MPI_Comm_rank(comm,&rank);
4030: PetscObjectQuery((PetscObject)mpimat,"MatMergeSeqsToMPI",(PetscObject*)&container);
4031: PetscContainerGetPointer(container,(void**)&merge);
4033: bi = merge->bi;
4034: bj = merge->bj;
4035: buf_ri = merge->buf_ri;
4036: buf_rj = merge->buf_rj;
4038: PetscMalloc1(size,&status);
4039: owners = merge->rowmap->range;
4040: len_s = merge->len_s;
4042: /* send and recv matrix values */
4043: /*-----------------------------*/
4044: PetscObjectGetNewTag((PetscObject)mpimat,&taga);
4045: PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);
4047: PetscMalloc1(merge->nsend+1,&s_waits);
4048: for (proc=0,k=0; proc<size; proc++) {
4049: if (!len_s[proc]) continue;
4050: i = owners[proc];
4051: MPI_Isend(aa+ai[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);
4052: k++;
4053: }
4055: if (merge->nrecv) {MPI_Waitall(merge->nrecv,r_waits,status);}
4056: if (merge->nsend) {MPI_Waitall(merge->nsend,s_waits,status);}
4057: PetscFree(status);
4059: PetscFree(s_waits);
4060: PetscFree(r_waits);
4062: /* insert mat values of mpimat */
4063: /*----------------------------*/
4064: PetscMalloc1(N,&ba_i);
4065: PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextai);
4067: for (k=0; k<merge->nrecv; k++) {
4068: buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
4069: nrows = *(buf_ri_k[k]);
4070: nextrow[k] = buf_ri_k[k]+1; /* next row number of k-th recved i-structure */
4071: nextai[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */
4072: }
4074: /* set values of ba */
4075: m = merge->rowmap->n;
4076: for (i=0; i<m; i++) {
4077: arow = owners[rank] + i;
4078: bj_i = bj+bi[i]; /* col indices of the i-th row of mpimat */
4079: bnzi = bi[i+1] - bi[i];
4080: PetscMemzero(ba_i,bnzi*sizeof(PetscScalar));
4082: /* add local non-zero vals of this proc's seqmat into ba */
4083: anzi = ai[arow+1] - ai[arow];
4084: aj = a->j + ai[arow];
4085: aa = a->a + ai[arow];
4086: nextaj = 0;
4087: for (j=0; nextaj<anzi; j++) {
4088: if (*(bj_i + j) == aj[nextaj]) { /* bcol == acol */
4089: ba_i[j] += aa[nextaj++];
4090: }
4091: }
4093: /* add received vals into ba */
4094: for (k=0; k<merge->nrecv; k++) { /* k-th received message */
4095: /* i-th row */
4096: if (i == *nextrow[k]) {
4097: anzi = *(nextai[k]+1) - *nextai[k];
4098: aj = buf_rj[k] + *(nextai[k]);
4099: aa = abuf_r[k] + *(nextai[k]);
4100: nextaj = 0;
4101: for (j=0; nextaj<anzi; j++) {
4102: if (*(bj_i + j) == aj[nextaj]) { /* bcol == acol */
4103: ba_i[j] += aa[nextaj++];
4104: }
4105: }
4106: nextrow[k]++; nextai[k]++;
4107: }
4108: }
4109: MatSetValues(mpimat,1,&arow,bnzi,bj_i,ba_i,INSERT_VALUES);
4110: }
4111: MatAssemblyBegin(mpimat,MAT_FINAL_ASSEMBLY);
4112: MatAssemblyEnd(mpimat,MAT_FINAL_ASSEMBLY);
4114: PetscFree(abuf_r[0]);
4115: PetscFree(abuf_r);
4116: PetscFree(ba_i);
4117: PetscFree3(buf_ri_k,nextrow,nextai);
4118: PetscLogEventEnd(MAT_Seqstompinum,seqmat,0,0,0);
4119: return(0);
4120: }
4122: extern PetscErrorCode MatDestroy_MPIAIJ_SeqsToMPI(Mat);
4126: PetscErrorCode MatCreateMPIAIJSumSeqAIJSymbolic(MPI_Comm comm,Mat seqmat,PetscInt m,PetscInt n,Mat *mpimat)
4127: {
4128: PetscErrorCode ierr;
4129: Mat B_mpi;
4130: Mat_SeqAIJ *a=(Mat_SeqAIJ*)seqmat->data;
4131: PetscMPIInt size,rank,tagi,tagj,*len_s,*len_si,*len_ri;
4132: PetscInt **buf_rj,**buf_ri,**buf_ri_k;
4133: PetscInt M=seqmat->rmap->n,N=seqmat->cmap->n,i,*owners,*ai=a->i,*aj=a->j;
4134: PetscInt len,proc,*dnz,*onz,bs,cbs;
4135: PetscInt k,anzi,*bi,*bj,*lnk,nlnk,arow,bnzi,nspacedouble=0;
4136: PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextai;
4137: MPI_Request *si_waits,*sj_waits,*ri_waits,*rj_waits;
4138: MPI_Status *status;
4139: PetscFreeSpaceList free_space=NULL,current_space=NULL;
4140: PetscBT lnkbt;
4141: Mat_Merge_SeqsToMPI *merge;
4142: PetscContainer container;
4145: PetscLogEventBegin(MAT_Seqstompisym,seqmat,0,0,0);
4147: /* make sure it is a PETSc comm */
4148: PetscCommDuplicate(comm,&comm,NULL);
4149: MPI_Comm_size(comm,&size);
4150: MPI_Comm_rank(comm,&rank);
4152: PetscNew(&merge);
4153: PetscMalloc1(size,&status);
4155: /* determine row ownership */
4156: /*---------------------------------------------------------*/
4157: PetscLayoutCreate(comm,&merge->rowmap);
4158: PetscLayoutSetLocalSize(merge->rowmap,m);
4159: PetscLayoutSetSize(merge->rowmap,M);
4160: PetscLayoutSetBlockSize(merge->rowmap,1);
4161: PetscLayoutSetUp(merge->rowmap);
4162: PetscMalloc1(size,&len_si);
4163: PetscMalloc1(size,&merge->len_s);
4165: m = merge->rowmap->n;
4166: owners = merge->rowmap->range;
4168: /* determine the number of messages to send, their lengths */
4169: /*---------------------------------------------------------*/
4170: len_s = merge->len_s;
4172: len = 0; /* length of buf_si[] */
4173: merge->nsend = 0;
4174: for (proc=0; proc<size; proc++) {
4175: len_si[proc] = 0;
4176: if (proc == rank) {
4177: len_s[proc] = 0;
4178: } else {
4179: len_si[proc] = owners[proc+1] - owners[proc] + 1;
4180: len_s[proc] = ai[owners[proc+1]] - ai[owners[proc]]; /* num of rows to be sent to [proc] */
4181: }
4182: if (len_s[proc]) {
4183: merge->nsend++;
4184: nrows = 0;
4185: for (i=owners[proc]; i<owners[proc+1]; i++) {
4186: if (ai[i+1] > ai[i]) nrows++;
4187: }
4188: len_si[proc] = 2*(nrows+1);
4189: len += len_si[proc];
4190: }
4191: }
4193: /* determine the number and length of messages to receive for ij-structure */
4194: /*-------------------------------------------------------------------------*/
4195: PetscGatherNumberOfMessages(comm,NULL,len_s,&merge->nrecv);
4196: PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);
4198: /* post the Irecv of j-structure */
4199: /*-------------------------------*/
4200: PetscCommGetNewTag(comm,&tagj);
4201: PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rj_waits);
4203: /* post the Isend of j-structure */
4204: /*--------------------------------*/
4205: PetscMalloc2(merge->nsend,&si_waits,merge->nsend,&sj_waits);
4207: for (proc=0, k=0; proc<size; proc++) {
4208: if (!len_s[proc]) continue;
4209: i = owners[proc];
4210: MPI_Isend(aj+ai[i],len_s[proc],MPIU_INT,proc,tagj,comm,sj_waits+k);
4211: k++;
4212: }
4214: /* receives and sends of j-structure are complete */
4215: /*------------------------------------------------*/
4216: if (merge->nrecv) {MPI_Waitall(merge->nrecv,rj_waits,status);}
4217: if (merge->nsend) {MPI_Waitall(merge->nsend,sj_waits,status);}
4219: /* send and recv i-structure */
4220: /*---------------------------*/
4221: PetscCommGetNewTag(comm,&tagi);
4222: PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&ri_waits);
4224: PetscMalloc1(len+1,&buf_s);
4225: buf_si = buf_s; /* points to the beginning of k-th msg to be sent */
4226: for (proc=0,k=0; proc<size; proc++) {
4227: if (!len_s[proc]) continue;
4228: /* form outgoing message for i-structure:
4229: buf_si[0]: nrows to be sent
4230: [1:nrows]: row index (global)
4231: [nrows+1:2*nrows+1]: i-structure index
4232: */
4233: /*-------------------------------------------*/
4234: nrows = len_si[proc]/2 - 1;
4235: buf_si_i = buf_si + nrows+1;
4236: buf_si[0] = nrows;
4237: buf_si_i[0] = 0;
4238: nrows = 0;
4239: for (i=owners[proc]; i<owners[proc+1]; i++) {
4240: anzi = ai[i+1] - ai[i];
4241: if (anzi) {
4242: buf_si_i[nrows+1] = buf_si_i[nrows] + anzi; /* i-structure */
4243: buf_si[nrows+1] = i-owners[proc]; /* local row index */
4244: nrows++;
4245: }
4246: }
4247: MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,si_waits+k);
4248: k++;
4249: buf_si += len_si[proc];
4250: }
4252: if (merge->nrecv) {MPI_Waitall(merge->nrecv,ri_waits,status);}
4253: if (merge->nsend) {MPI_Waitall(merge->nsend,si_waits,status);}
4255: PetscInfo2(seqmat,"nsend: %D, nrecv: %D\n",merge->nsend,merge->nrecv);
4256: for (i=0; i<merge->nrecv; i++) {
4257: PetscInfo3(seqmat,"recv len_ri=%D, len_rj=%D from [%D]\n",len_ri[i],merge->len_r[i],merge->id_r[i]);
4258: }
4260: PetscFree(len_si);
4261: PetscFree(len_ri);
4262: PetscFree(rj_waits);
4263: PetscFree2(si_waits,sj_waits);
4264: PetscFree(ri_waits);
4265: PetscFree(buf_s);
4266: PetscFree(status);
4268: /* compute a local seq matrix in each processor */
4269: /*----------------------------------------------*/
4270: /* allocate bi array and free space for accumulating nonzero column info */
4271: PetscMalloc1(m+1,&bi);
4272: bi[0] = 0;
4274: /* create and initialize a linked list */
4275: nlnk = N+1;
4276: PetscLLCreate(N,N,nlnk,lnk,lnkbt);
4278: /* initial FreeSpace size is 2*(num of local nnz(seqmat)) */
4279: len = ai[owners[rank+1]] - ai[owners[rank]];
4280: PetscFreeSpaceGet((PetscInt)(2*len+1),&free_space);
4282: current_space = free_space;
4284: /* determine symbolic info for each local row */
4285: PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextai);
4287: for (k=0; k<merge->nrecv; k++) {
4288: buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
4289: nrows = *buf_ri_k[k];
4290: nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */
4291: nextai[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */
4292: }
4294: MatPreallocateInitialize(comm,m,n,dnz,onz);
4295: len = 0;
4296: for (i=0; i<m; i++) {
4297: bnzi = 0;
4298: /* add local non-zero cols of this proc's seqmat into lnk */
4299: arow = owners[rank] + i;
4300: anzi = ai[arow+1] - ai[arow];
4301: aj = a->j + ai[arow];
4302: PetscLLAddSorted(anzi,aj,N,nlnk,lnk,lnkbt);
4303: bnzi += nlnk;
4304: /* add received col data into lnk */
4305: for (k=0; k<merge->nrecv; k++) { /* k-th received message */
4306: if (i == *nextrow[k]) { /* i-th row */
4307: anzi = *(nextai[k]+1) - *nextai[k];
4308: aj = buf_rj[k] + *nextai[k];
4309: PetscLLAddSorted(anzi,aj,N,nlnk,lnk,lnkbt);
4310: bnzi += nlnk;
4311: nextrow[k]++; nextai[k]++;
4312: }
4313: }
4314: if (len < bnzi) len = bnzi; /* =max(bnzi) */
4316: /* if free space is not available, make more free space */
4317: if (current_space->local_remaining<bnzi) {
4318: PetscFreeSpaceGet(bnzi+current_space->total_array_size,¤t_space);
4319: nspacedouble++;
4320: }
4321: /* copy data into free space, then initialize lnk */
4322: PetscLLClean(N,N,bnzi,lnk,current_space->array,lnkbt);
4323: MatPreallocateSet(i+owners[rank],bnzi,current_space->array,dnz,onz);
4325: current_space->array += bnzi;
4326: current_space->local_used += bnzi;
4327: current_space->local_remaining -= bnzi;
4329: bi[i+1] = bi[i] + bnzi;
4330: }
4332: PetscFree3(buf_ri_k,nextrow,nextai);
4334: PetscMalloc1(bi[m]+1,&bj);
4335: PetscFreeSpaceContiguous(&free_space,bj);
4336: PetscLLDestroy(lnk,lnkbt);
4338: /* create symbolic parallel matrix B_mpi */
4339: /*---------------------------------------*/
4340: MatGetBlockSizes(seqmat,&bs,&cbs);
4341: MatCreate(comm,&B_mpi);
4342: if (n==PETSC_DECIDE) {
4343: MatSetSizes(B_mpi,m,n,PETSC_DETERMINE,N);
4344: } else {
4345: MatSetSizes(B_mpi,m,n,PETSC_DETERMINE,PETSC_DETERMINE);
4346: }
4347: MatSetBlockSizes(B_mpi,bs,cbs);
4348: MatSetType(B_mpi,MATMPIAIJ);
4349: MatMPIAIJSetPreallocation(B_mpi,0,dnz,0,onz);
4350: MatPreallocateFinalize(dnz,onz);
4351: MatSetOption(B_mpi,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_FALSE);
4353: /* B_mpi is not ready for use - assembly will be done by MatCreateMPIAIJSumSeqAIJNumeric() */
4354: B_mpi->assembled = PETSC_FALSE;
4355: B_mpi->ops->destroy = MatDestroy_MPIAIJ_SeqsToMPI;
4356: merge->bi = bi;
4357: merge->bj = bj;
4358: merge->buf_ri = buf_ri;
4359: merge->buf_rj = buf_rj;
4360: merge->coi = NULL;
4361: merge->coj = NULL;
4362: merge->owners_co = NULL;
4364: PetscCommDestroy(&comm);
4366: /* attach the supporting struct to B_mpi for reuse */
4367: PetscContainerCreate(PETSC_COMM_SELF,&container);
4368: PetscContainerSetPointer(container,merge);
4369: PetscObjectCompose((PetscObject)B_mpi,"MatMergeSeqsToMPI",(PetscObject)container);
4370: PetscContainerDestroy(&container);
4371: *mpimat = B_mpi;
4373: PetscLogEventEnd(MAT_Seqstompisym,seqmat,0,0,0);
4374: return(0);
4375: }
4379: /*@C
4380: MatCreateMPIAIJSumSeqAIJ - Creates a MPIAIJ matrix by adding sequential
4381: matrices from each processor
4383: Collective on MPI_Comm
4385: Input Parameters:
4386: + comm - the communicators the parallel matrix will live on
4387: . seqmat - the input sequential matrices
4388: . m - number of local rows (or PETSC_DECIDE)
4389: . n - number of local columns (or PETSC_DECIDE)
4390: - scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
4392: Output Parameter:
4393: . mpimat - the parallel matrix generated
4395: Level: advanced
4397: Notes:
4398: The dimensions of the sequential matrix in each processor MUST be the same.
4399: The input seqmat is included into the container "Mat_Merge_SeqsToMPI", and will be
4400: destroyed when mpimat is destroyed. Call PetscObjectQuery() to access seqmat.
4401: @*/
4402: PetscErrorCode MatCreateMPIAIJSumSeqAIJ(MPI_Comm comm,Mat seqmat,PetscInt m,PetscInt n,MatReuse scall,Mat *mpimat)
4403: {
4405: PetscMPIInt size;
4408: MPI_Comm_size(comm,&size);
4409: if (size == 1) {
4410: PetscLogEventBegin(MAT_Seqstompi,seqmat,0,0,0);
4411: if (scall == MAT_INITIAL_MATRIX) {
4412: MatDuplicate(seqmat,MAT_COPY_VALUES,mpimat);
4413: } else {
4414: MatCopy(seqmat,*mpimat,SAME_NONZERO_PATTERN);
4415: }
4416: PetscLogEventEnd(MAT_Seqstompi,seqmat,0,0,0);
4417: return(0);
4418: }
4419: PetscLogEventBegin(MAT_Seqstompi,seqmat,0,0,0);
4420: if (scall == MAT_INITIAL_MATRIX) {
4421: MatCreateMPIAIJSumSeqAIJSymbolic(comm,seqmat,m,n,mpimat);
4422: }
4423: MatCreateMPIAIJSumSeqAIJNumeric(seqmat,*mpimat);
4424: PetscLogEventEnd(MAT_Seqstompi,seqmat,0,0,0);
4425: return(0);
4426: }
4430: /*@
4431: MatMPIAIJGetLocalMat - Creates a SeqAIJ from a MPIAIJ matrix by taking all its local rows and putting them into a sequential vector with
4432: mlocal rows and n columns. Where mlocal is the row count obtained with MatGetLocalSize() and n is the global column count obtained
4433: with MatGetSize()
4435: Not Collective
4437: Input Parameters:
4438: + A - the matrix
4439: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
4441: Output Parameter:
4442: . A_loc - the local sequential matrix generated
4444: Level: developer
4446: .seealso: MatGetOwnerShipRange(), MatMPIAIJGetLocalMatCondensed()
4448: @*/
4449: PetscErrorCode MatMPIAIJGetLocalMat(Mat A,MatReuse scall,Mat *A_loc)
4450: {
4452: Mat_MPIAIJ *mpimat=(Mat_MPIAIJ*)A->data;
4453: Mat_SeqAIJ *mat,*a,*b;
4454: PetscInt *ai,*aj,*bi,*bj,*cmap=mpimat->garray;
4455: MatScalar *aa,*ba,*cam;
4456: PetscScalar *ca;
4457: PetscInt am=A->rmap->n,i,j,k,cstart=A->cmap->rstart;
4458: PetscInt *ci,*cj,col,ncols_d,ncols_o,jo;
4459: PetscBool match;
4460: MPI_Comm comm;
4461: PetscMPIInt size;
4464: PetscObjectTypeCompare((PetscObject)A,MATMPIAIJ,&match);
4465: if (!match) SETERRQ(PetscObjectComm((PetscObject)A), PETSC_ERR_SUP,"Requires MPIAIJ matrix as input");
4466: PetscObjectGetComm((PetscObject)A,&comm);
4467: MPI_Comm_size(comm,&size);
4468: if (size == 1 && scall == MAT_REUSE_MATRIX) return(0);
4470: PetscLogEventBegin(MAT_Getlocalmat,A,0,0,0);
4471: a = (Mat_SeqAIJ*)(mpimat->A)->data;
4472: b = (Mat_SeqAIJ*)(mpimat->B)->data;
4473: ai = a->i; aj = a->j; bi = b->i; bj = b->j;
4474: aa = a->a; ba = b->a;
4475: if (scall == MAT_INITIAL_MATRIX) {
4476: if (size == 1) {
4477: MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,A->cmap->N,ai,aj,aa,A_loc);
4478: return(0);
4479: }
4481: PetscMalloc1(1+am,&ci);
4482: ci[0] = 0;
4483: for (i=0; i<am; i++) {
4484: ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]);
4485: }
4486: PetscMalloc1(1+ci[am],&cj);
4487: PetscMalloc1(1+ci[am],&ca);
4488: k = 0;
4489: for (i=0; i<am; i++) {
4490: ncols_o = bi[i+1] - bi[i];
4491: ncols_d = ai[i+1] - ai[i];
4492: /* off-diagonal portion of A */
4493: for (jo=0; jo<ncols_o; jo++) {
4494: col = cmap[*bj];
4495: if (col >= cstart) break;
4496: cj[k] = col; bj++;
4497: ca[k++] = *ba++;
4498: }
4499: /* diagonal portion of A */
4500: for (j=0; j<ncols_d; j++) {
4501: cj[k] = cstart + *aj++;
4502: ca[k++] = *aa++;
4503: }
4504: /* off-diagonal portion of A */
4505: for (j=jo; j<ncols_o; j++) {
4506: cj[k] = cmap[*bj++];
4507: ca[k++] = *ba++;
4508: }
4509: }
4510: /* put together the new matrix */
4511: MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,A->cmap->N,ci,cj,ca,A_loc);
4512: /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
4513: /* Since these are PETSc arrays, change flags to free them as necessary. */
4514: mat = (Mat_SeqAIJ*)(*A_loc)->data;
4515: mat->free_a = PETSC_TRUE;
4516: mat->free_ij = PETSC_TRUE;
4517: mat->nonew = 0;
4518: } else if (scall == MAT_REUSE_MATRIX) {
4519: mat=(Mat_SeqAIJ*)(*A_loc)->data;
4520: ci = mat->i; cj = mat->j; cam = mat->a;
4521: for (i=0; i<am; i++) {
4522: /* off-diagonal portion of A */
4523: ncols_o = bi[i+1] - bi[i];
4524: for (jo=0; jo<ncols_o; jo++) {
4525: col = cmap[*bj];
4526: if (col >= cstart) break;
4527: *cam++ = *ba++; bj++;
4528: }
4529: /* diagonal portion of A */
4530: ncols_d = ai[i+1] - ai[i];
4531: for (j=0; j<ncols_d; j++) *cam++ = *aa++;
4532: /* off-diagonal portion of A */
4533: for (j=jo; j<ncols_o; j++) {
4534: *cam++ = *ba++; bj++;
4535: }
4536: }
4537: } else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
4538: PetscLogEventEnd(MAT_Getlocalmat,A,0,0,0);
4539: return(0);
4540: }
4544: /*@C
4545: MatMPIAIJGetLocalMatCondensed - Creates a SeqAIJ matrix from an MPIAIJ matrix by taking all its local rows and NON-ZERO columns
4547: Not Collective
4549: Input Parameters:
4550: + A - the matrix
4551: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
4552: - row, col - index sets of rows and columns to extract (or NULL)
4554: Output Parameter:
4555: . A_loc - the local sequential matrix generated
4557: Level: developer
4559: .seealso: MatGetOwnershipRange(), MatMPIAIJGetLocalMat()
4561: @*/
4562: PetscErrorCode MatMPIAIJGetLocalMatCondensed(Mat A,MatReuse scall,IS *row,IS *col,Mat *A_loc)
4563: {
4564: Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data;
4566: PetscInt i,start,end,ncols,nzA,nzB,*cmap,imark,*idx;
4567: IS isrowa,iscola;
4568: Mat *aloc;
4569: PetscBool match;
4572: PetscObjectTypeCompare((PetscObject)A,MATMPIAIJ,&match);
4573: if (!match) SETERRQ(PetscObjectComm((PetscObject)A), PETSC_ERR_SUP,"Requires MPIAIJ matrix as input");
4574: PetscLogEventBegin(MAT_Getlocalmatcondensed,A,0,0,0);
4575: if (!row) {
4576: start = A->rmap->rstart; end = A->rmap->rend;
4577: ISCreateStride(PETSC_COMM_SELF,end-start,start,1,&isrowa);
4578: } else {
4579: isrowa = *row;
4580: }
4581: if (!col) {
4582: start = A->cmap->rstart;
4583: cmap = a->garray;
4584: nzA = a->A->cmap->n;
4585: nzB = a->B->cmap->n;
4586: PetscMalloc1(nzA+nzB, &idx);
4587: ncols = 0;
4588: for (i=0; i<nzB; i++) {
4589: if (cmap[i] < start) idx[ncols++] = cmap[i];
4590: else break;
4591: }
4592: imark = i;
4593: for (i=0; i<nzA; i++) idx[ncols++] = start + i;
4594: for (i=imark; i<nzB; i++) idx[ncols++] = cmap[i];
4595: ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,PETSC_OWN_POINTER,&iscola);
4596: } else {
4597: iscola = *col;
4598: }
4599: if (scall != MAT_INITIAL_MATRIX) {
4600: PetscMalloc1(1,&aloc);
4601: aloc[0] = *A_loc;
4602: }
4603: MatGetSubMatrices(A,1,&isrowa,&iscola,scall,&aloc);
4604: *A_loc = aloc[0];
4605: PetscFree(aloc);
4606: if (!row) {
4607: ISDestroy(&isrowa);
4608: }
4609: if (!col) {
4610: ISDestroy(&iscola);
4611: }
4612: PetscLogEventEnd(MAT_Getlocalmatcondensed,A,0,0,0);
4613: return(0);
4614: }
4618: /*@C
4619: MatGetBrowsOfAcols - Creates a SeqAIJ matrix by taking rows of B that equal to nonzero columns of local A
4621: Collective on Mat
4623: Input Parameters:
4624: + A,B - the matrices in mpiaij format
4625: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
4626: - rowb, colb - index sets of rows and columns of B to extract (or NULL)
4628: Output Parameter:
4629: + rowb, colb - index sets of rows and columns of B to extract
4630: - B_seq - the sequential matrix generated
4632: Level: developer
4634: @*/
4635: PetscErrorCode MatGetBrowsOfAcols(Mat A,Mat B,MatReuse scall,IS *rowb,IS *colb,Mat *B_seq)
4636: {
4637: Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data;
4639: PetscInt *idx,i,start,ncols,nzA,nzB,*cmap,imark;
4640: IS isrowb,iscolb;
4641: Mat *bseq=NULL;
4644: if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend) {
4645: SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
4646: }
4647: PetscLogEventBegin(MAT_GetBrowsOfAcols,A,B,0,0);
4649: if (scall == MAT_INITIAL_MATRIX) {
4650: start = A->cmap->rstart;
4651: cmap = a->garray;
4652: nzA = a->A->cmap->n;
4653: nzB = a->B->cmap->n;
4654: PetscMalloc1(nzA+nzB, &idx);
4655: ncols = 0;
4656: for (i=0; i<nzB; i++) { /* row < local row index */
4657: if (cmap[i] < start) idx[ncols++] = cmap[i];
4658: else break;
4659: }
4660: imark = i;
4661: for (i=0; i<nzA; i++) idx[ncols++] = start + i; /* local rows */
4662: for (i=imark; i<nzB; i++) idx[ncols++] = cmap[i]; /* row > local row index */
4663: ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,PETSC_OWN_POINTER,&isrowb);
4664: ISCreateStride(PETSC_COMM_SELF,B->cmap->N,0,1,&iscolb);
4665: } else {
4666: if (!rowb || !colb) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"IS rowb and colb must be provided for MAT_REUSE_MATRIX");
4667: isrowb = *rowb; iscolb = *colb;
4668: PetscMalloc1(1,&bseq);
4669: bseq[0] = *B_seq;
4670: }
4671: MatGetSubMatrices(B,1,&isrowb,&iscolb,scall,&bseq);
4672: *B_seq = bseq[0];
4673: PetscFree(bseq);
4674: if (!rowb) {
4675: ISDestroy(&isrowb);
4676: } else {
4677: *rowb = isrowb;
4678: }
4679: if (!colb) {
4680: ISDestroy(&iscolb);
4681: } else {
4682: *colb = iscolb;
4683: }
4684: PetscLogEventEnd(MAT_GetBrowsOfAcols,A,B,0,0);
4685: return(0);
4686: }
4690: /*
4691: MatGetBrowsOfAoCols_MPIAIJ - Creates a SeqAIJ matrix by taking rows of B that equal to nonzero columns
4692: of the OFF-DIAGONAL portion of local A
4694: Collective on Mat
4696: Input Parameters:
4697: + A,B - the matrices in mpiaij format
4698: - scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
4700: Output Parameter:
4701: + startsj_s - starting point in B's sending j-arrays, saved for MAT_REUSE (or NULL)
4702: . startsj_r - starting point in B's receiving j-arrays, saved for MAT_REUSE (or NULL)
4703: . bufa_ptr - array for sending matrix values, saved for MAT_REUSE (or NULL)
4704: - B_oth - the sequential matrix generated with size aBn=a->B->cmap->n by B->cmap->N
4706: Level: developer
4708: */
4709: PetscErrorCode MatGetBrowsOfAoCols_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscInt **startsj_s,PetscInt **startsj_r,MatScalar **bufa_ptr,Mat *B_oth)
4710: {
4711: VecScatter_MPI_General *gen_to,*gen_from;
4712: PetscErrorCode ierr;
4713: Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data;
4714: Mat_SeqAIJ *b_oth;
4715: VecScatter ctx =a->Mvctx;
4716: MPI_Comm comm;
4717: PetscMPIInt *rprocs,*sprocs,tag=((PetscObject)ctx)->tag,rank;
4718: PetscInt *rowlen,*bufj,*bufJ,ncols,aBn=a->B->cmap->n,row,*b_othi,*b_othj;
4719: PetscScalar *rvalues,*svalues;
4720: MatScalar *b_otha,*bufa,*bufA;
4721: PetscInt i,j,k,l,ll,nrecvs,nsends,nrows,*srow,*rstarts,*rstartsj = 0,*sstarts,*sstartsj,len;
4722: MPI_Request *rwaits = NULL,*swaits = NULL;
4723: MPI_Status *sstatus,rstatus;
4724: PetscMPIInt jj,size;
4725: PetscInt *cols,sbs,rbs;
4726: PetscScalar *vals;
4729: PetscObjectGetComm((PetscObject)A,&comm);
4730: MPI_Comm_size(comm,&size);
4732: if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend) {
4733: SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%d, %d) != (%d,%d)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
4734: }
4735: PetscLogEventBegin(MAT_GetBrowsOfAocols,A,B,0,0);
4736: MPI_Comm_rank(comm,&rank);
4738: gen_to = (VecScatter_MPI_General*)ctx->todata;
4739: gen_from = (VecScatter_MPI_General*)ctx->fromdata;
4740: rvalues = gen_from->values; /* holds the length of receiving row */
4741: svalues = gen_to->values; /* holds the length of sending row */
4742: nrecvs = gen_from->n;
4743: nsends = gen_to->n;
4745: PetscMalloc2(nrecvs,&rwaits,nsends,&swaits);
4746: srow = gen_to->indices; /* local row index to be sent */
4747: sstarts = gen_to->starts;
4748: sprocs = gen_to->procs;
4749: sstatus = gen_to->sstatus;
4750: sbs = gen_to->bs;
4751: rstarts = gen_from->starts;
4752: rprocs = gen_from->procs;
4753: rbs = gen_from->bs;
4755: if (!startsj_s || !bufa_ptr) scall = MAT_INITIAL_MATRIX;
4756: if (scall == MAT_INITIAL_MATRIX) {
4757: /* i-array */
4758: /*---------*/
4759: /* post receives */
4760: for (i=0; i<nrecvs; i++) {
4761: rowlen = (PetscInt*)rvalues + rstarts[i]*rbs;
4762: nrows = (rstarts[i+1]-rstarts[i])*rbs; /* num of indices to be received */
4763: MPI_Irecv(rowlen,nrows,MPIU_INT,rprocs[i],tag,comm,rwaits+i);
4764: }
4766: /* pack the outgoing message */
4767: PetscMalloc2(nsends+1,&sstartsj,nrecvs+1,&rstartsj);
4769: sstartsj[0] = 0;
4770: rstartsj[0] = 0;
4771: len = 0; /* total length of j or a array to be sent */
4772: k = 0;
4773: for (i=0; i<nsends; i++) {
4774: rowlen = (PetscInt*)svalues + sstarts[i]*sbs;
4775: nrows = sstarts[i+1]-sstarts[i]; /* num of block rows */
4776: for (j=0; j<nrows; j++) {
4777: row = srow[k] + B->rmap->range[rank]; /* global row idx */
4778: for (l=0; l<sbs; l++) {
4779: MatGetRow_MPIAIJ(B,row+l,&ncols,NULL,NULL); /* rowlength */
4781: rowlen[j*sbs+l] = ncols;
4783: len += ncols;
4784: MatRestoreRow_MPIAIJ(B,row+l,&ncols,NULL,NULL);
4785: }
4786: k++;
4787: }
4788: MPI_Isend(rowlen,nrows*sbs,MPIU_INT,sprocs[i],tag,comm,swaits+i);
4790: sstartsj[i+1] = len; /* starting point of (i+1)-th outgoing msg in bufj and bufa */
4791: }
4792: /* recvs and sends of i-array are completed */
4793: i = nrecvs;
4794: while (i--) {
4795: MPI_Waitany(nrecvs,rwaits,&jj,&rstatus);
4796: }
4797: if (nsends) {MPI_Waitall(nsends,swaits,sstatus);}
4799: /* allocate buffers for sending j and a arrays */
4800: PetscMalloc1(len+1,&bufj);
4801: PetscMalloc1(len+1,&bufa);
4803: /* create i-array of B_oth */
4804: PetscMalloc1(aBn+2,&b_othi);
4806: b_othi[0] = 0;
4807: len = 0; /* total length of j or a array to be received */
4808: k = 0;
4809: for (i=0; i<nrecvs; i++) {
4810: rowlen = (PetscInt*)rvalues + rstarts[i]*rbs;
4811: nrows = rbs*(rstarts[i+1]-rstarts[i]); /* num of rows to be recieved */
4812: for (j=0; j<nrows; j++) {
4813: b_othi[k+1] = b_othi[k] + rowlen[j];
4814: len += rowlen[j]; k++;
4815: }
4816: rstartsj[i+1] = len; /* starting point of (i+1)-th incoming msg in bufj and bufa */
4817: }
4819: /* allocate space for j and a arrrays of B_oth */
4820: PetscMalloc1(b_othi[aBn]+1,&b_othj);
4821: PetscMalloc1(b_othi[aBn]+1,&b_otha);
4823: /* j-array */
4824: /*---------*/
4825: /* post receives of j-array */
4826: for (i=0; i<nrecvs; i++) {
4827: nrows = rstartsj[i+1]-rstartsj[i]; /* length of the msg received */
4828: MPI_Irecv(b_othj+rstartsj[i],nrows,MPIU_INT,rprocs[i],tag,comm,rwaits+i);
4829: }
4831: /* pack the outgoing message j-array */
4832: k = 0;
4833: for (i=0; i<nsends; i++) {
4834: nrows = sstarts[i+1]-sstarts[i]; /* num of block rows */
4835: bufJ = bufj+sstartsj[i];
4836: for (j=0; j<nrows; j++) {
4837: row = srow[k++] + B->rmap->range[rank]; /* global row idx */
4838: for (ll=0; ll<sbs; ll++) {
4839: MatGetRow_MPIAIJ(B,row+ll,&ncols,&cols,NULL);
4840: for (l=0; l<ncols; l++) {
4841: *bufJ++ = cols[l];
4842: }
4843: MatRestoreRow_MPIAIJ(B,row+ll,&ncols,&cols,NULL);
4844: }
4845: }
4846: MPI_Isend(bufj+sstartsj[i],sstartsj[i+1]-sstartsj[i],MPIU_INT,sprocs[i],tag,comm,swaits+i);
4847: }
4849: /* recvs and sends of j-array are completed */
4850: i = nrecvs;
4851: while (i--) {
4852: MPI_Waitany(nrecvs,rwaits,&jj,&rstatus);
4853: }
4854: if (nsends) {MPI_Waitall(nsends,swaits,sstatus);}
4855: } else if (scall == MAT_REUSE_MATRIX) {
4856: sstartsj = *startsj_s;
4857: rstartsj = *startsj_r;
4858: bufa = *bufa_ptr;
4859: b_oth = (Mat_SeqAIJ*)(*B_oth)->data;
4860: b_otha = b_oth->a;
4861: } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE, "Matrix P does not posses an object container");
4863: /* a-array */
4864: /*---------*/
4865: /* post receives of a-array */
4866: for (i=0; i<nrecvs; i++) {
4867: nrows = rstartsj[i+1]-rstartsj[i]; /* length of the msg received */
4868: MPI_Irecv(b_otha+rstartsj[i],nrows,MPIU_SCALAR,rprocs[i],tag,comm,rwaits+i);
4869: }
4871: /* pack the outgoing message a-array */
4872: k = 0;
4873: for (i=0; i<nsends; i++) {
4874: nrows = sstarts[i+1]-sstarts[i]; /* num of block rows */
4875: bufA = bufa+sstartsj[i];
4876: for (j=0; j<nrows; j++) {
4877: row = srow[k++] + B->rmap->range[rank]; /* global row idx */
4878: for (ll=0; ll<sbs; ll++) {
4879: MatGetRow_MPIAIJ(B,row+ll,&ncols,NULL,&vals);
4880: for (l=0; l<ncols; l++) {
4881: *bufA++ = vals[l];
4882: }
4883: MatRestoreRow_MPIAIJ(B,row+ll,&ncols,NULL,&vals);
4884: }
4885: }
4886: MPI_Isend(bufa+sstartsj[i],sstartsj[i+1]-sstartsj[i],MPIU_SCALAR,sprocs[i],tag,comm,swaits+i);
4887: }
4888: /* recvs and sends of a-array are completed */
4889: i = nrecvs;
4890: while (i--) {
4891: MPI_Waitany(nrecvs,rwaits,&jj,&rstatus);
4892: }
4893: if (nsends) {MPI_Waitall(nsends,swaits,sstatus);}
4894: PetscFree2(rwaits,swaits);
4896: if (scall == MAT_INITIAL_MATRIX) {
4897: /* put together the new matrix */
4898: MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,aBn,B->cmap->N,b_othi,b_othj,b_otha,B_oth);
4900: /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
4901: /* Since these are PETSc arrays, change flags to free them as necessary. */
4902: b_oth = (Mat_SeqAIJ*)(*B_oth)->data;
4903: b_oth->free_a = PETSC_TRUE;
4904: b_oth->free_ij = PETSC_TRUE;
4905: b_oth->nonew = 0;
4907: PetscFree(bufj);
4908: if (!startsj_s || !bufa_ptr) {
4909: PetscFree2(sstartsj,rstartsj);
4910: PetscFree(bufa_ptr);
4911: } else {
4912: *startsj_s = sstartsj;
4913: *startsj_r = rstartsj;
4914: *bufa_ptr = bufa;
4915: }
4916: }
4917: PetscLogEventEnd(MAT_GetBrowsOfAocols,A,B,0,0);
4918: return(0);
4919: }
4923: /*@C
4924: MatGetCommunicationStructs - Provides access to the communication structures used in matrix-vector multiplication.
4926: Not Collective
4928: Input Parameters:
4929: . A - The matrix in mpiaij format
4931: Output Parameter:
4932: + lvec - The local vector holding off-process values from the argument to a matrix-vector product
4933: . colmap - A map from global column index to local index into lvec
4934: - multScatter - A scatter from the argument of a matrix-vector product to lvec
4936: Level: developer
4938: @*/
4939: #if defined(PETSC_USE_CTABLE)
4940: PetscErrorCode MatGetCommunicationStructs(Mat A, Vec *lvec, PetscTable *colmap, VecScatter *multScatter)
4941: #else
4942: PetscErrorCode MatGetCommunicationStructs(Mat A, Vec *lvec, PetscInt *colmap[], VecScatter *multScatter)
4943: #endif
4944: {
4945: Mat_MPIAIJ *a;
4952: a = (Mat_MPIAIJ*) A->data;
4953: if (lvec) *lvec = a->lvec;
4954: if (colmap) *colmap = a->colmap;
4955: if (multScatter) *multScatter = a->Mvctx;
4956: return(0);
4957: }
4959: PETSC_EXTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJCRL(Mat,MatType,MatReuse,Mat*);
4960: PETSC_EXTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJPERM(Mat,MatType,MatReuse,Mat*);
4961: PETSC_EXTERN PetscErrorCode MatConvert_MPIAIJ_MPISBAIJ(Mat,MatType,MatReuse,Mat*);
4962: #if defined(PETSC_HAVE_ELEMENTAL)
4963: PETSC_EXTERN PetscErrorCode MatConvert_MPIAIJ_Elemental(Mat,MatType,MatReuse,Mat*);
4964: #endif
4968: /*
4969: Computes (B'*A')' since computing B*A directly is untenable
4971: n p p
4972: ( ) ( ) ( )
4973: m ( A ) * n ( B ) = m ( C )
4974: ( ) ( ) ( )
4976: */
4977: PetscErrorCode MatMatMultNumeric_MPIDense_MPIAIJ(Mat A,Mat B,Mat C)
4978: {
4980: Mat At,Bt,Ct;
4983: MatTranspose(A,MAT_INITIAL_MATRIX,&At);
4984: MatTranspose(B,MAT_INITIAL_MATRIX,&Bt);
4985: MatMatMult(Bt,At,MAT_INITIAL_MATRIX,1.0,&Ct);
4986: MatDestroy(&At);
4987: MatDestroy(&Bt);
4988: MatTranspose(Ct,MAT_REUSE_MATRIX,&C);
4989: MatDestroy(&Ct);
4990: return(0);
4991: }
4995: PetscErrorCode MatMatMultSymbolic_MPIDense_MPIAIJ(Mat A,Mat B,PetscReal fill,Mat *C)
4996: {
4998: PetscInt m=A->rmap->n,n=B->cmap->n;
4999: Mat Cmat;
5002: if (A->cmap->n != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"A->cmap->n %d != B->rmap->n %d\n",A->cmap->n,B->rmap->n);
5003: MatCreate(PetscObjectComm((PetscObject)A),&Cmat);
5004: MatSetSizes(Cmat,m,n,PETSC_DETERMINE,PETSC_DETERMINE);
5005: MatSetBlockSizesFromMats(Cmat,A,B);
5006: MatSetType(Cmat,MATMPIDENSE);
5007: MatMPIDenseSetPreallocation(Cmat,NULL);
5008: MatAssemblyBegin(Cmat,MAT_FINAL_ASSEMBLY);
5009: MatAssemblyEnd(Cmat,MAT_FINAL_ASSEMBLY);
5011: Cmat->ops->matmultnumeric = MatMatMultNumeric_MPIDense_MPIAIJ;
5013: *C = Cmat;
5014: return(0);
5015: }
5017: /* ----------------------------------------------------------------*/
5020: PetscErrorCode MatMatMult_MPIDense_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
5021: {
5025: if (scall == MAT_INITIAL_MATRIX) {
5026: PetscLogEventBegin(MAT_MatMultSymbolic,A,B,0,0);
5027: MatMatMultSymbolic_MPIDense_MPIAIJ(A,B,fill,C);
5028: PetscLogEventEnd(MAT_MatMultSymbolic,A,B,0,0);
5029: }
5030: PetscLogEventBegin(MAT_MatMultNumeric,A,B,0,0);
5031: MatMatMultNumeric_MPIDense_MPIAIJ(A,B,*C);
5032: PetscLogEventEnd(MAT_MatMultNumeric,A,B,0,0);
5033: return(0);
5034: }
5036: /*MC
5037: MATMPIAIJ - MATMPIAIJ = "mpiaij" - A matrix type to be used for parallel sparse matrices.
5039: Options Database Keys:
5040: . -mat_type mpiaij - sets the matrix type to "mpiaij" during a call to MatSetFromOptions()
5042: Level: beginner
5044: .seealso: MatCreateAIJ()
5045: M*/
5049: PETSC_EXTERN PetscErrorCode MatCreate_MPIAIJ(Mat B)
5050: {
5051: Mat_MPIAIJ *b;
5053: PetscMPIInt size;
5056: MPI_Comm_size(PetscObjectComm((PetscObject)B),&size);
5058: PetscNewLog(B,&b);
5059: B->data = (void*)b;
5060: PetscMemcpy(B->ops,&MatOps_Values,sizeof(struct _MatOps));
5061: B->assembled = PETSC_FALSE;
5062: B->insertmode = NOT_SET_VALUES;
5063: b->size = size;
5065: MPI_Comm_rank(PetscObjectComm((PetscObject)B),&b->rank);
5067: /* build cache for off array entries formed */
5068: MatStashCreate_Private(PetscObjectComm((PetscObject)B),1,&B->stash);
5070: b->donotstash = PETSC_FALSE;
5071: b->colmap = 0;
5072: b->garray = 0;
5073: b->roworiented = PETSC_TRUE;
5075: /* stuff used for matrix vector multiply */
5076: b->lvec = NULL;
5077: b->Mvctx = NULL;
5079: /* stuff for MatGetRow() */
5080: b->rowindices = 0;
5081: b->rowvalues = 0;
5082: b->getrowactive = PETSC_FALSE;
5084: /* flexible pointer used in CUSP/CUSPARSE classes */
5085: b->spptr = NULL;
5087: PetscObjectComposeFunction((PetscObject)B,"MatStoreValues_C",MatStoreValues_MPIAIJ);
5088: PetscObjectComposeFunction((PetscObject)B,"MatRetrieveValues_C",MatRetrieveValues_MPIAIJ);
5089: PetscObjectComposeFunction((PetscObject)B,"MatGetDiagonalBlock_C",MatGetDiagonalBlock_MPIAIJ);
5090: PetscObjectComposeFunction((PetscObject)B,"MatIsTranspose_C",MatIsTranspose_MPIAIJ);
5091: PetscObjectComposeFunction((PetscObject)B,"MatMPIAIJSetPreallocation_C",MatMPIAIJSetPreallocation_MPIAIJ);
5092: PetscObjectComposeFunction((PetscObject)B,"MatMPIAIJSetPreallocationCSR_C",MatMPIAIJSetPreallocationCSR_MPIAIJ);
5093: PetscObjectComposeFunction((PetscObject)B,"MatDiagonalScaleLocal_C",MatDiagonalScaleLocal_MPIAIJ);
5094: PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijperm_C",MatConvert_MPIAIJ_MPIAIJPERM);
5095: PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijcrl_C",MatConvert_MPIAIJ_MPIAIJCRL);
5096: PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpisbaij_C",MatConvert_MPIAIJ_MPISBAIJ);
5097: #if defined(PETSC_HAVE_ELEMENTAL)
5098: PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_elemental_C",MatConvert_MPIAIJ_Elemental);
5099: #endif
5100: PetscObjectComposeFunction((PetscObject)B,"MatMatMult_mpidense_mpiaij_C",MatMatMult_MPIDense_MPIAIJ);
5101: PetscObjectComposeFunction((PetscObject)B,"MatMatMultSymbolic_mpidense_mpiaij_C",MatMatMultSymbolic_MPIDense_MPIAIJ);
5102: PetscObjectComposeFunction((PetscObject)B,"MatMatMultNumeric_mpidense_mpiaij_C",MatMatMultNumeric_MPIDense_MPIAIJ);
5103: PetscObjectChangeTypeName((PetscObject)B,MATMPIAIJ);
5104: return(0);
5105: }
5109: /*@C
5110: MatCreateMPIAIJWithSplitArrays - creates a MPI AIJ matrix using arrays that contain the "diagonal"
5111: and "off-diagonal" part of the matrix in CSR format.
5113: Collective on MPI_Comm
5115: Input Parameters:
5116: + comm - MPI communicator
5117: . m - number of local rows (Cannot be PETSC_DECIDE)
5118: . n - This value should be the same as the local size used in creating the
5119: x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
5120: calculated if N is given) For square matrices n is almost always m.
5121: . M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
5122: . N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
5123: . i - row indices for "diagonal" portion of matrix
5124: . j - column indices
5125: . a - matrix values
5126: . oi - row indices for "off-diagonal" portion of matrix
5127: . oj - column indices
5128: - oa - matrix values
5130: Output Parameter:
5131: . mat - the matrix
5133: Level: advanced
5135: Notes:
5136: The i, j, and a arrays ARE NOT copied by this routine into the internal format used by PETSc. The user
5137: must free the arrays once the matrix has been destroyed and not before.
5139: The i and j indices are 0 based
5141: See MatCreateAIJ() for the definition of "diagonal" and "off-diagonal" portion of the matrix
5143: This sets local rows and cannot be used to set off-processor values.
5145: Use of this routine is discouraged because it is inflexible and cumbersome to use. It is extremely rare that a
5146: legacy application natively assembles into exactly this split format. The code to do so is nontrivial and does
5147: not easily support in-place reassembly. It is recommended to use MatSetValues() (or a variant thereof) because
5148: the resulting assembly is easier to implement, will work with any matrix format, and the user does not have to
5149: keep track of the underlying array. Use MatSetOption(A,MAT_IGNORE_OFF_PROC_ENTRIES,PETSC_TRUE) to disable all
5150: communication if it is known that only local entries will be set.
5152: .keywords: matrix, aij, compressed row, sparse, parallel
5154: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
5155: MPIAIJ, MatCreateAIJ(), MatCreateMPIAIJWithArrays()
5156: @*/
5157: PetscErrorCode MatCreateMPIAIJWithSplitArrays(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt i[],PetscInt j[],PetscScalar a[],PetscInt oi[], PetscInt oj[],PetscScalar oa[],Mat *mat)
5158: {
5160: Mat_MPIAIJ *maij;
5163: if (m < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"local number of rows (m) cannot be PETSC_DECIDE, or negative");
5164: if (i[0]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"i (row indices) must start with 0");
5165: if (oi[0]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"oi (row indices) must start with 0");
5166: MatCreate(comm,mat);
5167: MatSetSizes(*mat,m,n,M,N);
5168: MatSetType(*mat,MATMPIAIJ);
5169: maij = (Mat_MPIAIJ*) (*mat)->data;
5171: (*mat)->preallocated = PETSC_TRUE;
5173: PetscLayoutSetUp((*mat)->rmap);
5174: PetscLayoutSetUp((*mat)->cmap);
5176: MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,i,j,a,&maij->A);
5177: MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,(*mat)->cmap->N,oi,oj,oa,&maij->B);
5179: MatAssemblyBegin(maij->A,MAT_FINAL_ASSEMBLY);
5180: MatAssemblyEnd(maij->A,MAT_FINAL_ASSEMBLY);
5181: MatAssemblyBegin(maij->B,MAT_FINAL_ASSEMBLY);
5182: MatAssemblyEnd(maij->B,MAT_FINAL_ASSEMBLY);
5184: MatAssemblyBegin(*mat,MAT_FINAL_ASSEMBLY);
5185: MatAssemblyEnd(*mat,MAT_FINAL_ASSEMBLY);
5186: MatSetOption(*mat,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
5187: return(0);
5188: }
5190: /*
5191: Special version for direct calls from Fortran
5192: */
5193: #include <petsc/private/fortranimpl.h>
5195: #if defined(PETSC_HAVE_FORTRAN_CAPS)
5196: #define matsetvaluesmpiaij_ MATSETVALUESMPIAIJ
5197: #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
5198: #define matsetvaluesmpiaij_ matsetvaluesmpiaij
5199: #endif
5201: /* Change these macros so can be used in void function */
5202: #undef CHKERRQ
5203: #define CHKERRQ(ierr) CHKERRABORT(PETSC_COMM_WORLD,ierr)
5204: #undef SETERRQ2
5205: #define SETERRQ2(comm,ierr,b,c,d) CHKERRABORT(comm,ierr)
5206: #undef SETERRQ3
5207: #define SETERRQ3(comm,ierr,b,c,d,e) CHKERRABORT(comm,ierr)
5208: #undef SETERRQ
5209: #define SETERRQ(c,ierr,b) CHKERRABORT(c,ierr)
5213: PETSC_EXTERN void PETSC_STDCALL matsetvaluesmpiaij_(Mat *mmat,PetscInt *mm,const PetscInt im[],PetscInt *mn,const PetscInt in[],const PetscScalar v[],InsertMode *maddv,PetscErrorCode *_ierr)
5214: {
5215: Mat mat = *mmat;
5216: PetscInt m = *mm, n = *mn;
5217: InsertMode addv = *maddv;
5218: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
5219: PetscScalar value;
5222: MatCheckPreallocated(mat,1);
5223: if (mat->insertmode == NOT_SET_VALUES) mat->insertmode = addv;
5225: #if defined(PETSC_USE_DEBUG)
5226: else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
5227: #endif
5228: {
5229: PetscInt i,j,rstart = mat->rmap->rstart,rend = mat->rmap->rend;
5230: PetscInt cstart = mat->cmap->rstart,cend = mat->cmap->rend,row,col;
5231: PetscBool roworiented = aij->roworiented;
5233: /* Some Variables required in the macro */
5234: Mat A = aij->A;
5235: Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
5236: PetscInt *aimax = a->imax,*ai = a->i,*ailen = a->ilen,*aj = a->j;
5237: MatScalar *aa = a->a;
5238: PetscBool ignorezeroentries = (((a->ignorezeroentries)&&(addv==ADD_VALUES)) ? PETSC_TRUE : PETSC_FALSE);
5239: Mat B = aij->B;
5240: Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data;
5241: PetscInt *bimax = b->imax,*bi = b->i,*bilen = b->ilen,*bj = b->j,bm = aij->B->rmap->n,am = aij->A->rmap->n;
5242: MatScalar *ba = b->a;
5244: PetscInt *rp1,*rp2,ii,nrow1,nrow2,_i,rmax1,rmax2,N,low1,high1,low2,high2,t,lastcol1,lastcol2;
5245: PetscInt nonew = a->nonew;
5246: MatScalar *ap1,*ap2;
5249: for (i=0; i<m; i++) {
5250: if (im[i] < 0) continue;
5251: #if defined(PETSC_USE_DEBUG)
5252: if (im[i] >= mat->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->rmap->N-1);
5253: #endif
5254: if (im[i] >= rstart && im[i] < rend) {
5255: row = im[i] - rstart;
5256: lastcol1 = -1;
5257: rp1 = aj + ai[row];
5258: ap1 = aa + ai[row];
5259: rmax1 = aimax[row];
5260: nrow1 = ailen[row];
5261: low1 = 0;
5262: high1 = nrow1;
5263: lastcol2 = -1;
5264: rp2 = bj + bi[row];
5265: ap2 = ba + bi[row];
5266: rmax2 = bimax[row];
5267: nrow2 = bilen[row];
5268: low2 = 0;
5269: high2 = nrow2;
5271: for (j=0; j<n; j++) {
5272: if (roworiented) value = v[i*n+j];
5273: else value = v[i+j*m];
5274: if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES)) continue;
5275: if (in[j] >= cstart && in[j] < cend) {
5276: col = in[j] - cstart;
5277: MatSetValues_SeqAIJ_A_Private(row,col,value,addv,im[i],in[j]);
5278: } else if (in[j] < 0) continue;
5279: #if defined(PETSC_USE_DEBUG)
5280: else if (in[j] >= mat->cmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",in[j],mat->cmap->N-1);
5281: #endif
5282: else {
5283: if (mat->was_assembled) {
5284: if (!aij->colmap) {
5285: MatCreateColmap_MPIAIJ_Private(mat);
5286: }
5287: #if defined(PETSC_USE_CTABLE)
5288: PetscTableFind(aij->colmap,in[j]+1,&col);
5289: col--;
5290: #else
5291: col = aij->colmap[in[j]] - 1;
5292: #endif
5293: if (col < 0 && !((Mat_SeqAIJ*)(aij->A->data))->nonew) {
5294: MatDisAssemble_MPIAIJ(mat);
5295: col = in[j];
5296: /* Reinitialize the variables required by MatSetValues_SeqAIJ_B_Private() */
5297: B = aij->B;
5298: b = (Mat_SeqAIJ*)B->data;
5299: bimax = b->imax; bi = b->i; bilen = b->ilen; bj = b->j;
5300: rp2 = bj + bi[row];
5301: ap2 = ba + bi[row];
5302: rmax2 = bimax[row];
5303: nrow2 = bilen[row];
5304: low2 = 0;
5305: high2 = nrow2;
5306: bm = aij->B->rmap->n;
5307: ba = b->a;
5308: }
5309: } else col = in[j];
5310: MatSetValues_SeqAIJ_B_Private(row,col,value,addv,im[i],in[j]);
5311: }
5312: }
5313: } else if (!aij->donotstash) {
5314: if (roworiented) {
5315: MatStashValuesRow_Private(&mat->stash,im[i],n,in,v+i*n,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));
5316: } else {
5317: MatStashValuesCol_Private(&mat->stash,im[i],n,in,v+i,m,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));
5318: }
5319: }
5320: }
5321: }
5322: PetscFunctionReturnVoid();
5323: }