Actual source code: mpiptap.c
petsc-3.7.0 2016-04-25
2: /*
3: Defines projective product routines where A is a MPIAIJ matrix
4: C = P^T * A * P
5: */
7: #include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/
8: #include <../src/mat/utils/freespace.h>
9: #include <../src/mat/impls/aij/mpi/mpiaij.h>
10: #include <petscbt.h>
11: #include <petsctime.h>
13: /* #define PTAP_PROFILE */
15: extern PetscErrorCode MatDestroy_MPIAIJ(Mat);
18: PetscErrorCode MatDestroy_MPIAIJ_PtAP(Mat A)
19: {
21: Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data;
22: Mat_PtAPMPI *ptap=a->ptap;
25: if (ptap) {
26: Mat_Merge_SeqsToMPI *merge=ptap->merge;
27: PetscFree2(ptap->startsj_s,ptap->startsj_r);
28: PetscFree(ptap->bufa);
29: MatDestroy(&ptap->P_loc);
30: MatDestroy(&ptap->P_oth);
31: MatDestroy(&ptap->A_loc); /* used by MatTransposeMatMult() */
32: MatDestroy(&ptap->Rd);
33: MatDestroy(&ptap->Ro);
34: if (ptap->AP_loc) { /* used by alg_rap */
35: Mat_SeqAIJ *ap = (Mat_SeqAIJ*)(ptap->AP_loc)->data;
36: PetscFree(ap->i);
37: PetscFree2(ap->j,ap->a);
38: MatDestroy(&ptap->AP_loc);
39: } else { /* used by alg_ptap */
40: PetscFree(ptap->api);
41: PetscFree(ptap->apj);
42: }
43: MatDestroy(&ptap->C_loc);
44: MatDestroy(&ptap->C_oth);
45: if (ptap->apa) {PetscFree(ptap->apa);}
46: if (merge) { /* used by alg_ptap */
47: PetscFree(merge->id_r);
48: PetscFree(merge->len_s);
49: PetscFree(merge->len_r);
50: PetscFree(merge->bi);
51: PetscFree(merge->bj);
52: PetscFree(merge->buf_ri[0]);
53: PetscFree(merge->buf_ri);
54: PetscFree(merge->buf_rj[0]);
55: PetscFree(merge->buf_rj);
56: PetscFree(merge->coi);
57: PetscFree(merge->coj);
58: PetscFree(merge->owners_co);
59: PetscLayoutDestroy(&merge->rowmap);
60: merge->destroy(A);
61: PetscFree(ptap->merge);
62: } else {
63: MatDestroy_MPIAIJ(A);
64: }
65: PetscFree(ptap);
66: }
67: return(0);
68: }
72: PetscErrorCode MatDuplicate_MPIAIJ_MatPtAP(Mat A, MatDuplicateOption op, Mat *M)
73: {
75: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
76: Mat_PtAPMPI *ptap = a->ptap;
79: (*ptap->duplicate)(A,op,M);
80: (*M)->ops->destroy = ptap->destroy;
81: (*M)->ops->duplicate = ptap->duplicate;
82: return(0);
83: }
87: PETSC_INTERN PetscErrorCode MatPtAP_MPIAIJ_MPIAIJ(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C)
88: {
90: PetscBool rap=PETSC_TRUE; /* do R=P^T locally, then C=R*A*P */
91: MPI_Comm comm;
94: /* check if matrix local sizes are compatible */
95: PetscObjectGetComm((PetscObject)A,&comm);
96: if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Arow (%D, %D) != Prow (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend);
97: if (A->cmap->rstart != P->rmap->rstart || A->cmap->rend != P->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Acol (%D, %D) != Prow (%D,%D)",A->cmap->rstart,A->cmap->rend,P->rmap->rstart,P->rmap->rend);
99: PetscOptionsGetBool(NULL,NULL,"-matrap",&rap,NULL);
100: if (scall == MAT_INITIAL_MATRIX) {
101: PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);
102: if (rap) { /* do R=P^T locally, then C=R*A*P */
103: MatPtAPSymbolic_MPIAIJ_MPIAIJ(A,P,fill,C);
104: } else { /* do P^T*A*P */
105: MatPtAPSymbolic_MPIAIJ_MPIAIJ_ptap(A,P,fill,C);
106: }
107: PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);
108: }
109: PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);
110: (*(*C)->ops->ptapnumeric)(A,P,*C);
111: PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);
112: return(0);
113: }
117: PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C)
118: {
119: PetscErrorCode ierr;
120: Mat_PtAPMPI *ptap;
121: Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c;
122: MPI_Comm comm;
123: PetscMPIInt size,rank;
124: Mat Cmpi;
125: PetscFreeSpaceList free_space=NULL,current_space=NULL;
126: PetscInt am=A->rmap->n,pm=P->rmap->n,pN=P->cmap->N,pn=P->cmap->n;
127: PetscInt *lnk,i,k,pnz,row,nsend;
128: PetscBT lnkbt;
129: PetscMPIInt tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0,nrecv;
130: PetscInt **buf_rj,**buf_ri,**buf_ri_k;
131: PetscInt len,proc,*dnz,*onz,*owners,nzi,nspacedouble;
132: PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
133: MPI_Request *swaits,*rwaits;
134: MPI_Status *sstatus,rstatus;
135: PetscLayout rowmap;
136: PetscInt *owners_co,*coi,*coj; /* i and j array of (p->B)^T*A*P - used in the communication */
137: PetscMPIInt *len_r,*id_r; /* array of length of comm->size, store send/recv matrix values */
138: PetscInt *api,*apj,*Jptr,apnz,*prmap=p->garray,con,j,ap_rmax=0,Crmax,*aj,*ai,*pi;
139: Mat_SeqAIJ *p_loc,*p_oth,*ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*c_loc,*c_oth;
140: PetscScalar *apv;
141: PetscTable ta;
142: const char *algTypes[2] = {"scalable","nonscalable"};
143: PetscInt alg=1; /* set default algorithm */
144: #if defined(PETSC_USE_INFO)
145: PetscReal apfill;
146: #endif
147: #if defined(PTAP_PROFILE)
148: PetscLogDouble t0,t1,t11,t12,t2,t3,t4;
149: #endif
152: PetscObjectGetComm((PetscObject)A,&comm);
153: MPI_Comm_size(comm,&size);
154: MPI_Comm_rank(comm,&rank);
155: #if defined(PTAP_PROFILE)
156: PetscTime(&t0);
157: #endif
159: /* create struct Mat_PtAPMPI and attached it to C later */
160: PetscNew(&ptap);
161: ptap->reuse = MAT_INITIAL_MATRIX;
163: /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
164: MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);
165: /* get P_loc by taking all local rows of P */
166: MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);
168: /* (0) compute Rd = Pd^T, Ro = Po^T */
169: /* --------------------------------- */
170: MatTranspose_SeqAIJ(p->A,MAT_INITIAL_MATRIX,&ptap->Rd);
171: MatTranspose_SeqAIJ(p->B,MAT_INITIAL_MATRIX,&ptap->Ro);
172: #if defined(PTAP_PROFILE)
173: PetscTime(&t11);
174: #endif
176: /* (1) compute symbolic AP = A_loc*P = Ad*P_loc + Ao*P_oth (api,apj) */
177: /* ----------------------------------------------------------------- */
178: p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
179: p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
181: /* create and initialize a linked list */
182: Crmax = 5*(p_loc->rmax+p_oth->rmax + (PetscInt)(1.e-2*pN)); /* expected Crmax */
183: if (Crmax > pN) Crmax=pN;
184: PetscTableCreate(Crmax,pN,&ta); /* for compute AP_loc and Cmpi */
185: MatRowMergeMax_SeqAIJ(p_loc,ptap->P_loc->rmap->N,ta);
186: MatRowMergeMax_SeqAIJ(p_oth,ptap->P_oth->rmap->N,ta);
187: PetscTableGetCount(ta,&Crmax); /* Crmax = nnz(sum of Prows) */
188: /* printf("[%d] est %d, Crmax %d; pN %d\n",rank,5*(p_loc->rmax+p_oth->rmax + (PetscInt)(1.e-2*pN)),Crmax,pN); */
190: PetscLLCondensedCreate(Crmax,pN,&lnk,&lnkbt);
192: /* Initial FreeSpace size is fill*(nnz(A) + nnz(P)) */
193: PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],PetscIntSumTruncate(ao->i[am],p_loc->i[pm]))),&free_space);
194: current_space = free_space;
195: nspacedouble = 0;
197: PetscMalloc1(am+1,&api);
198: api[0] = 0;
199: for (i=0; i<am; i++) {
200: /* diagonal portion: Ad[i,:]*P */
201: ai = ad->i; pi = p_loc->i;
202: nzi = ai[i+1] - ai[i];
203: aj = ad->j + ai[i];
204: for (j=0; j<nzi; j++) {
205: row = aj[j];
206: pnz = pi[row+1] - pi[row];
207: Jptr = p_loc->j + pi[row];
208: /* add non-zero cols of P into the sorted linked list lnk */
209: PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);
210: }
211: /* off-diagonal portion: Ao[i,:]*P */
212: ai = ao->i; pi = p_oth->i;
213: nzi = ai[i+1] - ai[i];
214: aj = ao->j + ai[i];
215: for (j=0; j<nzi; j++) {
216: row = aj[j];
217: pnz = pi[row+1] - pi[row];
218: Jptr = p_oth->j + pi[row];
219: PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);
220: }
221: apnz = lnk[0];
222: api[i+1] = api[i] + apnz;
223: if (ap_rmax < apnz) ap_rmax = apnz;
225: /* if free space is not available, double the total space in the list */
226: if (current_space->local_remaining<apnz) {
227: PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),¤t_space);
228: nspacedouble++;
229: }
231: /* Copy data into free space, then initialize lnk */
232: PetscLLCondensedClean(pN,apnz,current_space->array,lnk,lnkbt);
234: current_space->array += apnz;
235: current_space->local_used += apnz;
236: current_space->local_remaining -= apnz;
237: }
238: /* Allocate space for apj and apv, initialize apj, and */
239: /* destroy list of free space and other temporary array(s) */
240: PetscMalloc2(api[am],&apj,api[am],&apv);
241: PetscFreeSpaceContiguous(&free_space,apj);
242: PetscLLDestroy(lnk,lnkbt);
244: /* Create AP_loc for reuse */
245: MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,pN,api,apj,apv,&ptap->AP_loc);
247: #if defined(PETSC_USE_INFO)
248: apfill = (PetscReal)api[am]/(ad->i[am]+ao->i[am]+p_loc->i[pm]+1);
249: ptap->AP_loc->info.mallocs = nspacedouble;
250: ptap->AP_loc->info.fill_ratio_given = fill;
251: ptap->AP_loc->info.fill_ratio_needed = apfill;
253: if (api[am]) {
254: PetscInfo3(ptap->AP_loc,"AP_loc reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)apfill);
255: PetscInfo1(ptap->AP_loc,"Use MatPtAP(A,B,MatReuse,%g,&C) for best AP_loc performance.;\n",(double)apfill);
256: } else {
257: PetscInfo(ptap->AP_loc,"AP_loc is empty \n");
258: }
259: #endif
261: #if defined(PTAP_PROFILE)
262: PetscTime(&t12);
263: #endif
265: /* (2-1) compute symbolic Co = Ro*AP_loc */
266: /* ------------------------------------ */
267: MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro,ptap->AP_loc,fill,&ptap->C_oth);
268: #if defined(PTAP_PROFILE)
269: PetscTime(&t1);
270: #endif
271:
272: /* (3) send coj of C_oth to other processors */
273: /* ------------------------------------------ */
274: /* determine row ownership */
275: PetscLayoutCreate(comm,&rowmap);
276: rowmap->n = pn;
277: rowmap->bs = 1;
278: PetscLayoutSetUp(rowmap);
279: owners = rowmap->range;
281: /* determine the number of messages to send, their lengths */
282: PetscMalloc4(size,&len_s,size,&len_si,size,&sstatus,size+2,&owners_co);
283: PetscMemzero(len_s,size*sizeof(PetscMPIInt));
284: PetscMemzero(len_si,size*sizeof(PetscMPIInt));
286: c_oth = (Mat_SeqAIJ*)ptap->C_oth->data;
287: coi = c_oth->i; coj = c_oth->j;
288: con = ptap->C_oth->rmap->n;
289: proc = 0;
290: for (i=0; i<con; i++) {
291: while (prmap[i] >= owners[proc+1]) proc++;
292: len_si[proc]++; /* num of rows in Co(=Pt*AP) to be sent to [proc] */
293: len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
294: }
296: len = 0; /* max length of buf_si[], see (4) */
297: owners_co[0] = 0;
298: nsend = 0;
299: for (proc=0; proc<size; proc++) {
300: owners_co[proc+1] = owners_co[proc] + len_si[proc];
301: if (len_s[proc]) {
302: nsend++;
303: len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
304: len += len_si[proc];
305: }
306: }
308: /* determine the number and length of messages to receive for coi and coj */
309: PetscGatherNumberOfMessages(comm,NULL,len_s,&nrecv);
310: PetscGatherMessageLengths2(comm,nsend,nrecv,len_s,len_si,&id_r,&len_r,&len_ri);
312: /* post the Irecv and Isend of coj */
313: PetscCommGetNewTag(comm,&tagj);
314: PetscPostIrecvInt(comm,tagj,nrecv,id_r,len_r,&buf_rj,&rwaits);
315: PetscMalloc1(nsend+1,&swaits);
316: for (proc=0, k=0; proc<size; proc++) {
317: if (!len_s[proc]) continue;
318: i = owners_co[proc];
319: MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);
320: k++;
321: }
323: /* (2-2) compute symbolic C_loc = Rd*AP_loc */
324: /* ---------------------------------------- */
325: MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd,ptap->AP_loc,fill,&ptap->C_loc);
326: c_loc = (Mat_SeqAIJ*)ptap->C_loc->data;
328: /* receives coj are complete */
329: for (i=0; i<nrecv; i++) {
330: MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);
331: }
332: PetscFree(rwaits);
333: if (nsend) {MPI_Waitall(nsend,swaits,sstatus);}
335: /* add received column indices into ta to update Crmax */
336: for (k=0; k<nrecv; k++) {/* k-th received message */
337: Jptr = buf_rj[k];
338: for (j=0; j<len_r[k]; j++) {
339: PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);
340: }
341: }
342: PetscTableGetCount(ta,&Crmax);
343: PetscTableDestroy(&ta);
345: /* (4) send and recv coi */
346: /*-----------------------*/
347: PetscCommGetNewTag(comm,&tagi);
348: PetscPostIrecvInt(comm,tagi,nrecv,id_r,len_ri,&buf_ri,&rwaits);
349: PetscMalloc1(len+1,&buf_s);
350: buf_si = buf_s; /* points to the beginning of k-th msg to be sent */
351: for (proc=0,k=0; proc<size; proc++) {
352: if (!len_s[proc]) continue;
353: /* form outgoing message for i-structure:
354: buf_si[0]: nrows to be sent
355: [1:nrows]: row index (global)
356: [nrows+1:2*nrows+1]: i-structure index
357: */
358: /*-------------------------------------------*/
359: nrows = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */
360: buf_si_i = buf_si + nrows+1;
361: buf_si[0] = nrows;
362: buf_si_i[0] = 0;
363: nrows = 0;
364: for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
365: nzi = coi[i+1] - coi[i];
366: buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */
367: buf_si[nrows+1] = prmap[i] -owners[proc]; /* local row index */
368: nrows++;
369: }
370: MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);
371: k++;
372: buf_si += len_si[proc];
373: }
374: for (i=0; i<nrecv; i++) {
375: MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);
376: }
377: PetscFree(rwaits);
378: if (nsend) {MPI_Waitall(nsend,swaits,sstatus);}
380: PetscFree4(len_s,len_si,sstatus,owners_co);
381: PetscFree(len_ri);
382: PetscFree(swaits);
383: PetscFree(buf_s);
384: #if defined(PTAP_PROFILE)
385: PetscTime(&t2);
386: #endif
387: /* (5) compute the local portion of Cmpi */
388: /* ------------------------------------------ */
389: /* set initial free space to be Crmax, sufficient for holding nozeros in each row of Cmpi */
390: PetscFreeSpaceGet(Crmax,&free_space);
391: current_space = free_space;
393: PetscMalloc3(nrecv,&buf_ri_k,nrecv,&nextrow,nrecv,&nextci);
394: for (k=0; k<nrecv; k++) {
395: buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
396: nrows = *buf_ri_k[k];
397: nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */
398: nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */
399: }
401: MatPreallocateInitialize(comm,pn,pn,dnz,onz);
402: PetscLLCondensedCreate(Crmax,pN,&lnk,&lnkbt);
403: for (i=0; i<pn; i++) {
404: /* add C_loc into Cmpi */
405: nzi = c_loc->i[i+1] - c_loc->i[i];
406: Jptr = c_loc->j + c_loc->i[i];
407: PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);
409: /* add received col data into lnk */
410: for (k=0; k<nrecv; k++) { /* k-th received message */
411: if (i == *nextrow[k]) { /* i-th row */
412: nzi = *(nextci[k]+1) - *nextci[k];
413: Jptr = buf_rj[k] + *nextci[k];
414: PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);
415: nextrow[k]++; nextci[k]++;
416: }
417: }
418: nzi = lnk[0];
420: /* copy data into free space, then initialize lnk */
421: PetscLLCondensedClean(pN,nzi,current_space->array,lnk,lnkbt);
422: MatPreallocateSet(i+owners[rank],nzi,current_space->array,dnz,onz);
423: }
424: PetscFree3(buf_ri_k,nextrow,nextci);
425: PetscLLDestroy(lnk,lnkbt);
426: PetscFreeSpaceDestroy(free_space);
427: #if defined(PTAP_PROFILE)
428: PetscTime(&t3);
429: #endif
431: /* (6) create symbolic parallel matrix Cmpi */
432: /*------------------------------------------*/
433: MatCreate(comm,&Cmpi);
434: MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);
435: MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(P->cmap->bs));
436: MatSetType(Cmpi,MATMPIAIJ);
437: MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);
438: MatPreallocateFinalize(dnz,onz);
440: /* members in merge */
441: PetscFree(id_r);
442: PetscFree(len_r);
443: PetscFree(buf_ri[0]);
444: PetscFree(buf_ri);
445: PetscFree(buf_rj[0]);
446: PetscFree(buf_rj);
447: PetscLayoutDestroy(&rowmap);
449: /* attach the supporting struct to Cmpi for reuse */
450: c = (Mat_MPIAIJ*)Cmpi->data;
451: c->ptap = ptap;
452: ptap->duplicate = Cmpi->ops->duplicate;
453: ptap->destroy = Cmpi->ops->destroy;
455: PetscObjectOptionsBegin((PetscObject)A);
456: PetscOptionsEList("-matptap_via","Algorithmic approach","MatPtAP",algTypes,2,algTypes[1],&alg,NULL);
457: PetscOptionsEnd();
459: if (alg == 1) {
460: /* Do dense axpy in MatPtAPNumeric_MPIAIJ_MPIAIJ() */
461: PetscCalloc1(pN,&ptap->apa);
462: Cmpi->ops->ptapnumeric = MatPtAPNumeric_MPIAIJ_MPIAIJ;
463: } else {
464: Cmpi->ops->ptapnumeric = 0; /* not done yet */
465: SETERRQ(comm,PETSC_ERR_ARG_SIZ,"Not done yet");
466: }
469: /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */
470: Cmpi->assembled = PETSC_FALSE;
471: Cmpi->ops->destroy = MatDestroy_MPIAIJ_PtAP;
472: Cmpi->ops->duplicate = MatDuplicate_MPIAIJ_MatPtAP;
473: *C = Cmpi;
475: #if defined(PTAP_PROFILE)
476: PetscTime(&t4);
477: if (rank == 1) {
478: printf("PtAPSym: %g + %g + %g + %g + %g + %g = %g\n",t11-t0,t1-t11,t12-t11,t2-t2,t3-t2,t4-t3,t4-t0);
479: }
480: #endif
481: return(0);
482: }
486: PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C)
487: {
488: PetscErrorCode ierr;
489: Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data;
490: Mat_SeqAIJ *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
491: Mat_SeqAIJ *ap,*p_loc,*p_oth,*c_seq;
492: Mat_PtAPMPI *ptap = c->ptap;
493: Mat AP_loc,C_loc,C_oth;
494: PetscInt i,rstart,rend,cm,ncols,row;
495: PetscInt *api,*apj,am = A->rmap->n,j,col,apnz;
496: PetscScalar *apa;
497: const PetscInt *cols;
498: const PetscScalar *vals;
499: #if defined(PTAP_PROFILE)
500: PetscMPIInt rank;
501: MPI_Comm comm;
502: PetscLogDouble t0,t1,t2,t3,t4,eR,eAP,eCseq,eCmpi;
503: #endif
506: MatZeroEntries(C);
508: /* 1) get R = Pd^T,Ro = Po^T */
509: #if defined(PTAP_PROFILE)
510: PetscTime(&t0);
511: #endif
512: if (ptap->reuse == MAT_REUSE_MATRIX) {
513: MatTranspose_SeqAIJ(p->A,MAT_REUSE_MATRIX,&ptap->Rd);
514: MatTranspose_SeqAIJ(p->B,MAT_REUSE_MATRIX,&ptap->Ro);
515: }
516: #if defined(PTAP_PROFILE)
517: PetscTime(&t1);
518: eR = t1 - t0;
519: #endif
521: /* 2) get AP_loc */
522: AP_loc = ptap->AP_loc;
523: ap = (Mat_SeqAIJ*)AP_loc->data;
525: /* 2-1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */
526: /*-----------------------------------------------------*/
527: if (ptap->reuse == MAT_REUSE_MATRIX) {
528: /* P_oth and P_loc are obtained in MatPtASymbolic() when reuse == MAT_INITIAL_MATRIX */
529: MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);
530: MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);
531: }
532:
533: /* 2-2) compute numeric A_loc*P - dominating part */
534: /* ---------------------------------------------- */
535: /* get data from symbolic products */
536: p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
537: p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
538: apa = ptap->apa;
539: api = ap->i;
540: apj = ap->j;
541: for (i=0; i<am; i++) {
542: /* AP[i,:] = A[i,:]*P = Ad*P_loc Ao*P_oth */
543: AProw_nonscalable(i,ad,ao,p_loc,p_oth,apa);
544: apnz = api[i+1] - api[i];
545: for (j=0; j<apnz; j++) {
546: col = apj[j+api[i]];
547: ap->a[j+ap->i[i]] = apa[col];
548: apa[col] = 0.0;
549: }
550: PetscLogFlops(2.0*apnz);
551: }
552: #if defined(PTAP_PROFILE)
553: PetscTime(&t2);
554: eAP = t2 - t1;
555: #endif
556:
557: /* 3) C_loc = Rd*AP_loc, C_oth = Ro*AP_loc */
558: ((ptap->C_loc)->ops->matmultnumeric)(ptap->Rd,AP_loc,ptap->C_loc);
559: ((ptap->C_oth)->ops->matmultnumeric)(ptap->Ro,AP_loc,ptap->C_oth);
560: C_loc = ptap->C_loc;
561: C_oth = ptap->C_oth;
562: #if defined(PTAP_PROFILE)
563: PetscTime(&t3);
564: eCseq = t3 - t2;
565: #endif
567: /* add C_loc and Co to to C */
568: MatGetOwnershipRange(C,&rstart,&rend);
569:
570: /* C_loc -> C */
571: cm = C_loc->rmap->N;
572: c_seq = (Mat_SeqAIJ*)C_loc->data;
573: cols = c_seq->j;
574: vals = c_seq->a;
575: for (i=0; i<cm; i++) {
576: ncols = c_seq->i[i+1] - c_seq->i[i];
577: row = rstart + i;
578: MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);
579: cols += ncols; vals += ncols;
580: }
581:
582: /* Co -> C, off-processor part */
583: cm = C_oth->rmap->N;
584: c_seq = (Mat_SeqAIJ*)C_oth->data;
585: cols = c_seq->j;
586: vals = c_seq->a;
587: for (i=0; i<cm; i++) {
588: ncols = c_seq->i[i+1] - c_seq->i[i];
589: row = p->garray[i];
590: MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);
591: cols += ncols; vals += ncols;
592: }
593: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
594: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
595: #if defined(PTAP_PROFILE)
596: PetscTime(&t4);
597: eCmpi = t4 - t3;
599: PetscObjectGetComm((PetscObject)C,&comm);
600: MPI_Comm_rank(comm,&rank);
601: if (rank==1) {
602: PetscPrintf(MPI_COMM_SELF," R %g, AP %g, Cseq %g, Cmpi %g = %g\n", eR,eAP,eCseq,eCmpi,eR+eAP+eCseq+eCmpi);
603: }
604: #endif
605: ptap->reuse = MAT_REUSE_MATRIX;
606: return(0);
607: }
611: PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ_ptap(Mat A,Mat P,PetscReal fill,Mat *C)
612: {
613: PetscErrorCode ierr;
614: Mat Cmpi;
615: Mat_PtAPMPI *ptap;
616: PetscFreeSpaceList free_space=NULL,current_space=NULL;
617: Mat_MPIAIJ *a =(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c;
618: Mat_SeqAIJ *ad =(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
619: Mat_SeqAIJ *p_loc,*p_oth;
620: PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pdti,*pdtj,*poti,*potj,*ptJ;
621: PetscInt *adi=ad->i,*aj,*aoi=ao->i,nnz;
622: PetscInt *lnk,*owners_co,*coi,*coj,i,k,pnz,row;
623: PetscInt am=A->rmap->n,pN=P->cmap->N,pm=P->rmap->n,pn=P->cmap->n;
624: PetscBT lnkbt;
625: MPI_Comm comm;
626: PetscMPIInt size,rank,tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0;
627: PetscInt **buf_rj,**buf_ri,**buf_ri_k;
628: PetscInt len,proc,*dnz,*onz,*owners;
629: PetscInt nzi,*pti,*ptj;
630: PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
631: MPI_Request *swaits,*rwaits;
632: MPI_Status *sstatus,rstatus;
633: Mat_Merge_SeqsToMPI *merge;
634: PetscInt *api,*apj,*Jptr,apnz,*prmap=p->garray,pon,nspacedouble=0,j,ap_rmax=0;
635: PetscReal afill=1.0,afill_tmp;
638: PetscObjectGetComm((PetscObject)A,&comm);
639: MPI_Comm_size(comm,&size);
640: MPI_Comm_rank(comm,&rank);
642: /* create struct Mat_PtAPMPI and attached it to C later */
643: PetscNew(&ptap);
644: PetscNew(&merge);
645: ptap->merge = merge;
646: ptap->reuse = MAT_INITIAL_MATRIX;
648: /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
649: MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);
651: /* get P_loc by taking all local rows of P */
652: MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);
654: p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
655: p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
656: pi_loc = p_loc->i; pj_loc = p_loc->j;
657: pi_oth = p_oth->i; pj_oth = p_oth->j;
659: /* (1) compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth (api,apj) */
660: /*--------------------------------------------------------------------------*/
661: PetscMalloc1(am+1,&api);
662: api[0] = 0;
664: /* create and initialize a linked list */
665: PetscLLCondensedCreate(pN,pN,&lnk,&lnkbt);
667: /* Initial FreeSpace size is fill*(nnz(A) + nnz(P)) -OOM for ex56, np=8k on Intrepid! */
668: PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(adi[am],PetscIntSumTruncate(aoi[am],pi_loc[pm]))),&free_space);
669: current_space = free_space;
671: for (i=0; i<am; i++) {
672: /* diagonal portion of A */
673: nzi = adi[i+1] - adi[i];
674: aj = ad->j + adi[i];
675: for (j=0; j<nzi; j++) {
676: row = aj[j];
677: pnz = pi_loc[row+1] - pi_loc[row];
678: Jptr = pj_loc + pi_loc[row];
679: /* add non-zero cols of P into the sorted linked list lnk */
680: PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);
681: }
682: /* off-diagonal portion of A */
683: nzi = aoi[i+1] - aoi[i];
684: aj = ao->j + aoi[i];
685: for (j=0; j<nzi; j++) {
686: row = aj[j];
687: pnz = pi_oth[row+1] - pi_oth[row];
688: Jptr = pj_oth + pi_oth[row];
689: PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);
690: }
691: apnz = lnk[0];
692: api[i+1] = api[i] + apnz;
693: if (ap_rmax < apnz) ap_rmax = apnz;
695: /* if free space is not available, double the total space in the list */
696: if (current_space->local_remaining<apnz) {
697: PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),¤t_space);
698: nspacedouble++;
699: }
701: /* Copy data into free space, then initialize lnk */
702: PetscLLCondensedClean(pN,apnz,current_space->array,lnk,lnkbt);
704: current_space->array += apnz;
705: current_space->local_used += apnz;
706: current_space->local_remaining -= apnz;
707: }
709: /* Allocate space for apj, initialize apj, and */
710: /* destroy list of free space and other temporary array(s) */
711: PetscMalloc1(api[am]+1,&apj);
712: PetscFreeSpaceContiguous(&free_space,apj);
713: afill_tmp = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1);
714: if (afill_tmp > afill) afill = afill_tmp;
716: /* (2) determine symbolic Co=(p->B)^T*AP - send to others (coi,coj)*/
717: /*-----------------------------------------------------------------*/
718: MatGetSymbolicTranspose_SeqAIJ(p->B,&poti,&potj);
720: /* then, compute symbolic Co = (p->B)^T*AP */
721: pon = (p->B)->cmap->n; /* total num of rows to be sent to other processors
722: >= (num of nonzero rows of C_seq) - pn */
723: PetscMalloc1(pon+1,&coi);
724: coi[0] = 0;
726: /* set initial free space to be fill*(nnz(p->B) + nnz(AP)) */
727: nnz = PetscRealIntMultTruncate(fill,PetscIntSumTruncate(poti[pon],api[am]));
728: PetscFreeSpaceGet(nnz,&free_space);
729: current_space = free_space;
731: for (i=0; i<pon; i++) {
732: pnz = poti[i+1] - poti[i];
733: ptJ = potj + poti[i];
734: for (j=0; j<pnz; j++) {
735: row = ptJ[j]; /* row of AP == col of Pot */
736: apnz = api[row+1] - api[row];
737: Jptr = apj + api[row];
738: /* add non-zero cols of AP into the sorted linked list lnk */
739: PetscLLCondensedAddSorted(apnz,Jptr,lnk,lnkbt);
740: }
741: nnz = lnk[0];
743: /* If free space is not available, double the total space in the list */
744: if (current_space->local_remaining<nnz) {
745: PetscFreeSpaceGet(PetscIntSumTruncate(nnz,current_space->total_array_size),¤t_space);
746: nspacedouble++;
747: }
749: /* Copy data into free space, and zero out denserows */
750: PetscLLCondensedClean(pN,nnz,current_space->array,lnk,lnkbt);
752: current_space->array += nnz;
753: current_space->local_used += nnz;
754: current_space->local_remaining -= nnz;
756: coi[i+1] = coi[i] + nnz;
757: }
758:
759: PetscMalloc1(coi[pon],&coj);
760: PetscFreeSpaceContiguous(&free_space,coj);
761: afill_tmp = (PetscReal)coi[pon]/(poti[pon] + api[am]+1);
762: if (afill_tmp > afill) afill = afill_tmp;
763: MatRestoreSymbolicTranspose_SeqAIJ(p->B,&poti,&potj);
765: /* (3) send j-array (coj) of Co to other processors */
766: /*--------------------------------------------------*/
767: PetscCalloc1(size,&merge->len_s);
768: len_s = merge->len_s;
769: merge->nsend = 0;
772: /* determine row ownership */
773: PetscLayoutCreate(comm,&merge->rowmap);
774: merge->rowmap->n = pn;
775: merge->rowmap->bs = 1;
777: PetscLayoutSetUp(merge->rowmap);
778: owners = merge->rowmap->range;
780: /* determine the number of messages to send, their lengths */
781: PetscMalloc2(size,&len_si,size,&sstatus);
782: PetscMemzero(len_si,size*sizeof(PetscMPIInt));
783: PetscMalloc1(size+2,&owners_co);
785: proc = 0;
786: for (i=0; i<pon; i++) {
787: while (prmap[i] >= owners[proc+1]) proc++;
788: len_si[proc]++; /* num of rows in Co(=Pt*AP) to be sent to [proc] */
789: len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
790: }
792: len = 0; /* max length of buf_si[], see (4) */
793: owners_co[0] = 0;
794: for (proc=0; proc<size; proc++) {
795: owners_co[proc+1] = owners_co[proc] + len_si[proc];
796: if (len_s[proc]) {
797: merge->nsend++;
798: len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
799: len += len_si[proc];
800: }
801: }
803: /* determine the number and length of messages to receive for coi and coj */
804: PetscGatherNumberOfMessages(comm,NULL,len_s,&merge->nrecv);
805: PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);
807: /* post the Irecv and Isend of coj */
808: PetscCommGetNewTag(comm,&tagj);
809: PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rwaits);
810: PetscMalloc1(merge->nsend+1,&swaits);
811: for (proc=0, k=0; proc<size; proc++) {
812: if (!len_s[proc]) continue;
813: i = owners_co[proc];
814: MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);
815: k++;
816: }
818: /* receives and sends of coj are complete */
819: for (i=0; i<merge->nrecv; i++) {
820: MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);
821: }
822: PetscFree(rwaits);
823: if (merge->nsend) {MPI_Waitall(merge->nsend,swaits,sstatus);}
825: /* (4) send and recv coi */
826: /*-----------------------*/
827: PetscCommGetNewTag(comm,&tagi);
828: PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&rwaits);
829: PetscMalloc1(len+1,&buf_s);
830: buf_si = buf_s; /* points to the beginning of k-th msg to be sent */
831: for (proc=0,k=0; proc<size; proc++) {
832: if (!len_s[proc]) continue;
833: /* form outgoing message for i-structure:
834: buf_si[0]: nrows to be sent
835: [1:nrows]: row index (global)
836: [nrows+1:2*nrows+1]: i-structure index
837: */
838: /*-------------------------------------------*/
839: nrows = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */
840: buf_si_i = buf_si + nrows+1;
841: buf_si[0] = nrows;
842: buf_si_i[0] = 0;
843: nrows = 0;
844: for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
845: nzi = coi[i+1] - coi[i];
846: buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */
847: buf_si[nrows+1] = prmap[i] -owners[proc]; /* local row index */
848: nrows++;
849: }
850: MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);
851: k++;
852: buf_si += len_si[proc];
853: }
854: i = merge->nrecv;
855: while (i--) {
856: MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);
857: }
858: PetscFree(rwaits);
859: if (merge->nsend) {MPI_Waitall(merge->nsend,swaits,sstatus);}
861: PetscFree2(len_si,sstatus);
862: PetscFree(len_ri);
863: PetscFree(swaits);
864: PetscFree(buf_s);
866: /* (5) compute the local portion of C (mpi mat) */
867: /*----------------------------------------------*/
868: MatGetSymbolicTranspose_SeqAIJ(p->A,&pdti,&pdtj);
870: /* allocate pti array and free space for accumulating nonzero column info */
871: PetscMalloc1(pn+1,&pti);
872: pti[0] = 0;
874: /* set initial free space to be fill*(nnz(P) + nnz(AP)) */
875: nnz = PetscRealIntMultTruncate(fill,PetscIntSumTruncate(pi_loc[pm],api[am]));
876: PetscFreeSpaceGet(nnz,&free_space);
877: current_space = free_space;
879: PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);
880: for (k=0; k<merge->nrecv; k++) {
881: buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
882: nrows = *buf_ri_k[k];
883: nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */
884: nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */
885: }
886: MatPreallocateInitialize(comm,pn,pn,dnz,onz);
887:
888: for (i=0; i<pn; i++) {
889: /* add pdt[i,:]*AP into lnk */
890: pnz = pdti[i+1] - pdti[i];
891: ptJ = pdtj + pdti[i];
892: for (j=0; j<pnz; j++) {
893: row = ptJ[j]; /* row of AP == col of Pt */
894: apnz = api[row+1] - api[row];
895: Jptr = apj + api[row];
896: /* add non-zero cols of AP into the sorted linked list lnk */
897: PetscLLCondensedAddSorted(apnz,Jptr,lnk,lnkbt);
898: }
900: /* add received col data into lnk */
901: for (k=0; k<merge->nrecv; k++) { /* k-th received message */
902: if (i == *nextrow[k]) { /* i-th row */
903: nzi = *(nextci[k]+1) - *nextci[k];
904: Jptr = buf_rj[k] + *nextci[k];
905: PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);
906: nextrow[k]++; nextci[k]++;
907: }
908: }
909: nnz = lnk[0];
911: /* if free space is not available, make more free space */
912: if (current_space->local_remaining<nnz) {
913: PetscFreeSpaceGet(PetscIntSumTruncate(nnz,current_space->total_array_size),¤t_space);
914: nspacedouble++;
915: }
916: /* copy data into free space, then initialize lnk */
917: PetscLLCondensedClean(pN,nnz,current_space->array,lnk,lnkbt);
918: MatPreallocateSet(i+owners[rank],nnz,current_space->array,dnz,onz);
920: current_space->array += nnz;
921: current_space->local_used += nnz;
922: current_space->local_remaining -= nnz;
924: pti[i+1] = pti[i] + nnz;
925: }
926: MatRestoreSymbolicTranspose_SeqAIJ(p->A,&pdti,&pdtj);
927: PetscFree3(buf_ri_k,nextrow,nextci);
929: PetscMalloc1(pti[pn]+1,&ptj);
930: PetscFreeSpaceContiguous(&free_space,ptj);
931: afill_tmp = (PetscReal)pti[pn]/(pi_loc[pm] + api[am]+1);
932: if (afill_tmp > afill) afill = afill_tmp;
933: PetscLLDestroy(lnk,lnkbt);
935: /* (6) create symbolic parallel matrix Cmpi */
936: /*------------------------------------------*/
937: MatCreate(comm,&Cmpi);
938: MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);
939: MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(P->cmap->bs));
940: MatSetType(Cmpi,MATMPIAIJ);
941: MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);
942: MatPreallocateFinalize(dnz,onz);
944: merge->bi = pti; /* Cseq->i */
945: merge->bj = ptj; /* Cseq->j */
946: merge->coi = coi; /* Co->i */
947: merge->coj = coj; /* Co->j */
948: merge->buf_ri = buf_ri;
949: merge->buf_rj = buf_rj;
950: merge->owners_co = owners_co;
951: merge->destroy = Cmpi->ops->destroy;
953: /* attach the supporting struct to Cmpi for reuse */
954: c = (Mat_MPIAIJ*)Cmpi->data;
955: c->ptap = ptap;
956: ptap->api = api;
957: ptap->apj = apj;
958: ptap->duplicate = Cmpi->ops->duplicate;
959: ptap->destroy = Cmpi->ops->destroy;
961: /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */
962: Cmpi->assembled = PETSC_FALSE;
963: Cmpi->ops->destroy = MatDestroy_MPIAIJ_PtAP;
964: Cmpi->ops->duplicate = MatDuplicate_MPIAIJ_MatPtAP;
965: Cmpi->ops->ptapnumeric = MatPtAPNumeric_MPIAIJ_MPIAIJ_ptap;
966: *C = Cmpi;
968: /* flag 'scalable' determines which implementations to be used:
969: 0: do dense axpy in MatPtAPNumeric() - fast, but requires storage of a nonscalable dense array apa;
970: 1: do sparse axpy in MatPtAPNumeric() - might slow, uses a sparse array apa */
971: /* set default scalable */
972: ptap->scalable = PETSC_FALSE; /* PETSC_TRUE; */
974: PetscOptionsGetBool(((PetscObject)Cmpi)->options,((PetscObject)Cmpi)->prefix,"-matptap_scalable",&ptap->scalable,NULL);
975: if (!ptap->scalable) { /* Do dense axpy */
976: PetscCalloc1(pN,&ptap->apa);
977: } else {
978: PetscCalloc1(ap_rmax+1,&ptap->apa);
979: }
981: #if defined(PETSC_USE_INFO)
982: if (pti[pn] != 0) {
983: PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);
984: PetscInfo1(Cmpi,"Use MatPtAP(A,P,MatReuse,%g,&C) for best performance.\n",(double)afill);
985: } else {
986: PetscInfo(Cmpi,"Empty matrix product\n");
987: }
988: #endif
989: return(0);
990: }
994: PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ_ptap(Mat A,Mat P,Mat C)
995: {
996: PetscErrorCode ierr;
997: Mat_MPIAIJ *a =(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data;
998: Mat_SeqAIJ *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
999: Mat_SeqAIJ *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data;
1000: Mat_SeqAIJ *p_loc,*p_oth;
1001: Mat_PtAPMPI *ptap;
1002: Mat_Merge_SeqsToMPI *merge;
1003: PetscInt *adi=ad->i,*aoi=ao->i,*adj,*aoj,*apJ,nextp;
1004: PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pJ,*pj;
1005: PetscInt i,j,k,anz,pnz,apnz,nextap,row,*cj;
1006: MatScalar *ada,*aoa,*apa,*pa,*ca,*pa_loc,*pa_oth,valtmp;
1007: PetscInt am =A->rmap->n,cm=C->rmap->n,pon=(p->B)->cmap->n;
1008: MPI_Comm comm;
1009: PetscMPIInt size,rank,taga,*len_s;
1010: PetscInt *owners,proc,nrows,**buf_ri_k,**nextrow,**nextci;
1011: PetscInt **buf_ri,**buf_rj;
1012: PetscInt cnz=0,*bj_i,*bi,*bj,bnz,nextcj; /* bi,bj,ba: local array of C(mpi mat) */
1013: MPI_Request *s_waits,*r_waits;
1014: MPI_Status *status;
1015: MatScalar **abuf_r,*ba_i,*pA,*coa,*ba;
1016: PetscInt *api,*apj,*coi,*coj;
1017: PetscInt *poJ=po->j,*pdJ=pd->j,pcstart=P->cmap->rstart,pcend=P->cmap->rend;
1018: PetscBool scalable;
1019: #if defined(PTAP_PROFILE)
1020: PetscLogDouble t0,t1,t2,eP,t3,t4,et2_AP=0.0,ePtAP=0.0,t2_0,t2_1,t2_2;
1021: #endif
1024: PetscObjectGetComm((PetscObject)C,&comm);
1025: #if defined(PTAP_PROFILE)
1026: PetscTime(&t0);
1027: #endif
1028: MPI_Comm_size(comm,&size);
1029: MPI_Comm_rank(comm,&rank);
1031: ptap = c->ptap;
1032: if (!ptap) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_INCOMP,"MatPtAP() has not been called to create matrix C yet, cannot use MAT_REUSE_MATRIX");
1033: merge = ptap->merge;
1034: apa = ptap->apa;
1035: scalable = ptap->scalable;
1037: /* 1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */
1038: /*-----------------------------------------------------*/
1039: if (ptap->reuse == MAT_INITIAL_MATRIX) {
1040: /* P_oth and P_loc are obtained in MatPtASymbolic(), skip calling MatGetBrowsOfAoCols() and MatMPIAIJGetLocalMat() */
1041: ptap->reuse = MAT_REUSE_MATRIX;
1042: } else { /* update numerical values of P_oth and P_loc */
1043: MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);
1044: MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);
1045: }
1046: #if defined(PTAP_PROFILE)
1047: PetscTime(&t1);
1048: eP = t1-t0;
1049: #endif
1050: /*
1051: printf("[%d] Ad: %d, %d; Ao: %d, %d; P_loc: %d, %d; P_oth %d, %d;\n",rank,
1052: a->A->rmap->N,a->A->cmap->N,a->B->rmap->N,a->B->cmap->N,
1053: ptap->P_loc->rmap->N,ptap->P_loc->cmap->N,
1054: ptap->P_oth->rmap->N,ptap->P_oth->cmap->N);
1055: */
1057: /* 2) compute numeric C_seq = P_loc^T*A_loc*P - dominating part */
1058: /*--------------------------------------------------------------*/
1059: /* get data from symbolic products */
1060: p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
1061: p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
1062: pi_loc=p_loc->i; pj_loc=p_loc->j; pa_loc=p_loc->a;
1063: pi_oth=p_oth->i; pj_oth=p_oth->j; pa_oth=p_oth->a;
1065: coi = merge->coi; coj = merge->coj;
1066: PetscCalloc1(coi[pon]+1,&coa);
1068: bi = merge->bi; bj = merge->bj;
1069: owners = merge->rowmap->range;
1070: PetscCalloc1(bi[cm]+1,&ba); /* ba: Cseq->a */
1072: api = ptap->api; apj = ptap->apj;
1074: if (!scalable) { /* Do dense axpy on apa (length of pN, stores A[i,:]*P) - nonscalable, but faster (could take 1/3 scalable time) */
1075: PetscInfo(C,"Using non-scalable dense axpy\n");
1076: #if defined(PTAP_PROFILE)
1077: PetscTime(&t1);
1078: #endif
1079: for (i=0; i<am; i++) {
1080: #if defined(PTAP_PROFILE)
1081: PetscTime(&t2_0);
1082: #endif
1083: /* 2-a) form i-th sparse row of A_loc*P = Ad*P_loc + Ao*P_oth */
1084: /*------------------------------------------------------------*/
1085: apJ = apj + api[i];
1087: /* diagonal portion of A */
1088: anz = adi[i+1] - adi[i];
1089: adj = ad->j + adi[i];
1090: ada = ad->a + adi[i];
1091: for (j=0; j<anz; j++) {
1092: row = adj[j];
1093: pnz = pi_loc[row+1] - pi_loc[row];
1094: pj = pj_loc + pi_loc[row];
1095: pa = pa_loc + pi_loc[row];
1097: /* perform dense axpy */
1098: valtmp = ada[j];
1099: for (k=0; k<pnz; k++) {
1100: apa[pj[k]] += valtmp*pa[k];
1101: }
1102: PetscLogFlops(2.0*pnz);
1103: }
1105: /* off-diagonal portion of A */
1106: anz = aoi[i+1] - aoi[i];
1107: aoj = ao->j + aoi[i];
1108: aoa = ao->a + aoi[i];
1109: for (j=0; j<anz; j++) {
1110: row = aoj[j];
1111: pnz = pi_oth[row+1] - pi_oth[row];
1112: pj = pj_oth + pi_oth[row];
1113: pa = pa_oth + pi_oth[row];
1115: /* perform dense axpy */
1116: valtmp = aoa[j];
1117: for (k=0; k<pnz; k++) {
1118: apa[pj[k]] += valtmp*pa[k];
1119: }
1120: PetscLogFlops(2.0*pnz);
1121: }
1122: #if defined(PTAP_PROFILE)
1123: PetscTime(&t2_1);
1124: et2_AP += t2_1 - t2_0;
1125: #endif
1127: /* 2-b) Compute Cseq = P_loc[i,:]^T*AP[i,:] using outer product */
1128: /*--------------------------------------------------------------*/
1129: apnz = api[i+1] - api[i];
1130: /* put the value into Co=(p->B)^T*AP (off-diagonal part, send to others) */
1131: pnz = po->i[i+1] - po->i[i];
1132: poJ = po->j + po->i[i];
1133: pA = po->a + po->i[i];
1134: for (j=0; j<pnz; j++) {
1135: row = poJ[j];
1136: cnz = coi[row+1] - coi[row];
1137: cj = coj + coi[row];
1138: ca = coa + coi[row];
1139: /* perform dense axpy */
1140: valtmp = pA[j];
1141: for (k=0; k<cnz; k++) {
1142: ca[k] += valtmp*apa[cj[k]];
1143: }
1144: PetscLogFlops(2.0*cnz);
1145: }
1146: /* put the value into Cd (diagonal part) */
1147: pnz = pd->i[i+1] - pd->i[i];
1148: pdJ = pd->j + pd->i[i];
1149: pA = pd->a + pd->i[i];
1150: for (j=0; j<pnz; j++) {
1151: row = pdJ[j];
1152: cnz = bi[row+1] - bi[row];
1153: cj = bj + bi[row];
1154: ca = ba + bi[row];
1155: /* perform dense axpy */
1156: valtmp = pA[j];
1157: for (k=0; k<cnz; k++) {
1158: ca[k] += valtmp*apa[cj[k]];
1159: }
1160: PetscLogFlops(2.0*cnz);
1161: }
1162:
1163: /* zero the current row of A*P */
1164: for (k=0; k<apnz; k++) apa[apJ[k]] = 0.0;
1165: #if defined(PTAP_PROFILE)
1166: PetscTime(&t2_2);
1167: ePtAP += t2_2 - t2_1;
1168: #endif
1169: }
1170: } else { /* Do sparse axpy on apa (length of ap_rmax, stores A[i,:]*P) - scalable, but slower */
1171: PetscInfo(C,"Using scalable sparse axpy\n");
1172: /*-----------------------------------------------------------------------------------------*/
1173: pA=pa_loc;
1174: for (i=0; i<am; i++) {
1175: #if defined(PTAP_PROFILE)
1176: PetscTime(&t2_0);
1177: #endif
1178: /* form i-th sparse row of A*P */
1179: apnz = api[i+1] - api[i];
1180: apJ = apj + api[i];
1181: /* diagonal portion of A */
1182: anz = adi[i+1] - adi[i];
1183: adj = ad->j + adi[i];
1184: ada = ad->a + adi[i];
1185: for (j=0; j<anz; j++) {
1186: row = adj[j];
1187: pnz = pi_loc[row+1] - pi_loc[row];
1188: pj = pj_loc + pi_loc[row];
1189: pa = pa_loc + pi_loc[row];
1190: valtmp = ada[j];
1191: nextp = 0;
1192: for (k=0; nextp<pnz; k++) {
1193: if (apJ[k] == pj[nextp]) { /* col of AP == col of P */
1194: apa[k] += valtmp*pa[nextp++];
1195: }
1196: }
1197: PetscLogFlops(2.0*pnz);
1198: }
1199: /* off-diagonal portion of A */
1200: anz = aoi[i+1] - aoi[i];
1201: aoj = ao->j + aoi[i];
1202: aoa = ao->a + aoi[i];
1203: for (j=0; j<anz; j++) {
1204: row = aoj[j];
1205: pnz = pi_oth[row+1] - pi_oth[row];
1206: pj = pj_oth + pi_oth[row];
1207: pa = pa_oth + pi_oth[row];
1208: valtmp = aoa[j];
1209: nextp = 0;
1210: for (k=0; nextp<pnz; k++) {
1211: if (apJ[k] == pj[nextp]) { /* col of AP == col of P */
1212: apa[k] += valtmp*pa[nextp++];
1213: }
1214: }
1215: PetscLogFlops(2.0*pnz);
1216: }
1217: #if defined(PTAP_PROFILE)
1218: PetscTime(&t2_1);
1219: et2_AP += t2_1 - t2_0;
1220: #endif
1222: /* 2-b) Compute Cseq = P_loc[i,:]^T*AP[i,:] using outer product */
1223: /*--------------------------------------------------------------*/
1224: pnz = pi_loc[i+1] - pi_loc[i];
1225: pJ = pj_loc + pi_loc[i];
1226: for (j=0; j<pnz; j++) {
1227: nextap = 0;
1228: row = pJ[j]; /* global index */
1229: if (row < pcstart || row >=pcend) { /* put the value into Co */
1230: row = *poJ;
1231: cj = coj + coi[row];
1232: ca = coa + coi[row]; poJ++;
1233: } else { /* put the value into Cd */
1234: row = *pdJ;
1235: cj = bj + bi[row];
1236: ca = ba + bi[row]; pdJ++;
1237: }
1238: valtmp = pA[j];
1239: for (k=0; nextap<apnz; k++) {
1240: if (cj[k]==apJ[nextap]) ca[k] += valtmp*apa[nextap++];
1241: }
1242: PetscLogFlops(2.0*apnz);
1243: }
1244: pA += pnz;
1245: /* zero the current row info for A*P */
1246: PetscMemzero(apa,apnz*sizeof(MatScalar));
1247: #if defined(PTAP_PROFILE)
1248: PetscTime(&t2_2);
1249: ePtAP += t2_2 - t2_1;
1250: #endif
1251: }
1252: }
1253: #if defined(PTAP_PROFILE)
1254: PetscTime(&t2);
1255: #endif
1257: /* 3) send and recv matrix values coa */
1258: /*------------------------------------*/
1259: buf_ri = merge->buf_ri;
1260: buf_rj = merge->buf_rj;
1261: len_s = merge->len_s;
1262: PetscCommGetNewTag(comm,&taga);
1263: PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);
1265: PetscMalloc2(merge->nsend+1,&s_waits,size,&status);
1266: for (proc=0,k=0; proc<size; proc++) {
1267: if (!len_s[proc]) continue;
1268: i = merge->owners_co[proc];
1269: MPI_Isend(coa+coi[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);
1270: k++;
1271: }
1272: if (merge->nrecv) {MPI_Waitall(merge->nrecv,r_waits,status);}
1273: if (merge->nsend) {MPI_Waitall(merge->nsend,s_waits,status);}
1275: PetscFree2(s_waits,status);
1276: PetscFree(r_waits);
1277: PetscFree(coa);
1278: #if defined(PTAP_PROFILE)
1279: PetscTime(&t3);
1280: #endif
1282: /* 4) insert local Cseq and received values into Cmpi */
1283: /*------------------------------------------------------*/
1284: PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);
1285: for (k=0; k<merge->nrecv; k++) {
1286: buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1287: nrows = *(buf_ri_k[k]);
1288: nextrow[k] = buf_ri_k[k]+1; /* next row number of k-th recved i-structure */
1289: nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */
1290: }
1292: for (i=0; i<cm; i++) {
1293: row = owners[rank] + i; /* global row index of C_seq */
1294: bj_i = bj + bi[i]; /* col indices of the i-th row of C */
1295: ba_i = ba + bi[i];
1296: bnz = bi[i+1] - bi[i];
1297: /* add received vals into ba */
1298: for (k=0; k<merge->nrecv; k++) { /* k-th received message */
1299: /* i-th row */
1300: if (i == *nextrow[k]) {
1301: cnz = *(nextci[k]+1) - *nextci[k];
1302: cj = buf_rj[k] + *(nextci[k]);
1303: ca = abuf_r[k] + *(nextci[k]);
1304: nextcj = 0;
1305: for (j=0; nextcj<cnz; j++) {
1306: if (bj_i[j] == cj[nextcj]) { /* bcol == ccol */
1307: ba_i[j] += ca[nextcj++];
1308: }
1309: }
1310: nextrow[k]++; nextci[k]++;
1311: PetscLogFlops(2.0*cnz);
1312: }
1313: }
1314: MatSetValues(C,1,&row,bnz,bj_i,ba_i,INSERT_VALUES);
1315: }
1316: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
1317: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
1319: PetscFree(ba);
1320: PetscFree(abuf_r[0]);
1321: PetscFree(abuf_r);
1322: PetscFree3(buf_ri_k,nextrow,nextci);
1323: #if defined(PTAP_PROFILE)
1324: PetscTime(&t4);
1325: if (rank==1) {
1326: PetscPrintf(MPI_COMM_SELF," [%d] PtAPNum %g/P + %g/PtAP( %g/A*P + %g/Pt*AP ) + %g/comm + %g/Cloc = %g\n\n",rank,eP,t2-t1,et2_AP,ePtAP,t3-t2,t4-t3,t4-t0);
1327: }
1328: #endif
1329: return(0);
1330: }