Actual source code: adda.c
1: /*
3: Contributed by Arvid Bessen, Columbia University, June 2007
5: Extension of DA object to any number of dimensions.
7: */
8: #include src/dm/adda/addaimpl.h
10: PetscCookie ADDA_COOKIE = 0;
14: /*@C
15: ADDACreate - Creates and ADDA object that translate between coordinates
16: in a geometric grid of arbitrary dimension and data in a PETSc vector
17: distributed on several processors.
19: Collective on MPI_Comm
21: Input Parameters:
22: + comm - MPI communicator
23: . dim - the dimension of the grid
24: . nodes - array with d entries that give the number of nodes in each dimension
25: . procs - array with d entries that give the number of processors in each dimension
26: (or PETSC_NULL if to be determined automatically)
27: . dof - number of degrees of freedom per node
28: - periodic - array with d entries that, i-th entry is set to true iff dimension i is periodic
30: Output Parameters:
31: . adda - pointer to ADDA data structure that is created
33: Level: intermediate
35: @*/
36: PetscErrorCode ADDACreate(MPI_Comm comm, PetscInt dim, PetscInt *nodes,PetscInt *procs,
37: PetscInt dof, PetscTruth *periodic,ADDA *adda_p)
38: {
40: ADDA adda;
41: PetscInt s=1; /* stencil width, fixed to 1 at the moment */
42: PetscMPIInt rank,size;
43: PetscInt i;
44: PetscInt nodes_total;
45: PetscInt nodesleft;
46: PetscInt procsleft;
47: PetscInt procsdimi;
48: PetscInt ranki;
49: PetscInt rpq;
54: #ifndef PETSC_USE_DYNAMIC_LIBRARIES
55: DMInitializePackage(PETSC_NULL);
56: #endif
58: PetscHeaderCreate(*adda_p,_p_ADDA,struct _ADDAOps,ADDA_COOKIE,0,"ADDA",comm,ADDADestroy,0);
59: adda = *adda_p;
60: adda->ops->view = ADDAView;
61: adda->ops->createglobalvector = ADDACreateGlobalVector;
62: adda->ops->getcoloring = ADDAGetColoring;
63: adda->ops->getmatrix = ADDAGetMatrix;
64: adda->ops->getinterpolation = ADDAGetInterpolation;
65: adda->ops->refine = ADDARefine;
66: adda->ops->coarsen = ADDACoarsen;
67: adda->ops->getinjection = ADDAGetInjection;
68: adda->ops->getaggregates = ADDAGetAggregates;
69:
70: MPI_Comm_size(comm,&size);
71: MPI_Comm_rank(comm,&rank);
72:
73: adda->dim = dim;
74: adda->dof = dof;
76: /* nodes */
77: PetscMalloc(dim*sizeof(PetscInt), &(adda->nodes));
78: PetscMemcpy(adda->nodes, nodes, dim*sizeof(PetscInt));
79: /* total number of nodes */
80: nodes_total = 1;
81: for(i=0; i<dim; i++) nodes_total *= nodes[i];
83: /* procs */
84: PetscMalloc(dim*sizeof(PetscInt), &(adda->procs));
85: /* create distribution of nodes to processors */
86: if(procs == PETSC_NULL) {
87: procs = adda->procs;
88: nodesleft = nodes_total;
89: procsleft = size;
90: /* figure out a good way to split the array to several processors */
91: for(i=0; i<dim; i++) {
92: if(i==dim-1) {
93: procs[i] = procsleft;
94: } else {
95: /* calculate best partition */
96: procs[i] = (PetscInt)(((double) nodes[i])*pow(((double) procsleft)/((double) nodesleft),1./((double)(dim-i)))+0.5);
97: if(procs[i]<1) procs[i]=1;
98: while( procs[i] > 0 ) {
99: if( procsleft % procs[i] )
100: procs[i]--;
101: else
102: break;
103: }
104: nodesleft /= nodes[i];
105: procsleft /= procs[i];
106: }
107: }
108: } else {
109: /* user provided the number of processors */
110: PetscMemcpy(adda->procs, procs, dim*sizeof(PetscInt));
111: }
112: /* check for validity */
113: procsleft = 1;
114: for(i=0; i<dim; i++) {
115: if (nodes[i] < procs[i]) {
116: SETERRQ3(PETSC_ERR_ARG_OUTOFRANGE,"Partition in direction %d is too fine! %D nodes, %D processors", i, nodes[i], procs[i]);
117: }
118: procsleft *= procs[i];
119: }
120: if(procsleft != size) {
121: SETERRQ(1, "Created or was provided with inconsistent distribution of processors");
122: }
124: /* periodicity */
125: adda->periodic = periodic;
126:
127: /* find out local region */
128: PetscMalloc(dim*sizeof(PetscInt), &(adda->lcs));
129: PetscMalloc(dim*sizeof(PetscInt), &(adda->lce));
130: procsdimi=size;
131: ranki=rank;
132: for(i=0; i<dim; i++) {
133: /* What is the number of processor for dimensions i+1, ..., dim-1? */
134: procsdimi /= procs[i];
135: /* these are all nodes that come before our region */
136: rpq = ranki / procsdimi;
137: adda->lcs[i] = rpq * (nodes[i]/procs[i]);
138: if( rpq + 1 < procs[i] ) {
139: adda->lce[i] = (rpq + 1) * (nodes[i]/procs[i]);
140: } else {
141: /* last one gets all the rest */
142: adda->lce[i] = nodes[i];
143: }
144: ranki = ranki - rpq*procsdimi;
145: }
146:
147: /* compute local size */
148: adda->lsize=1;
149: for(i=0; i<dim; i++) {
150: adda->lsize *= (adda->lce[i]-adda->lcs[i]);
151: }
152: adda->lsize *= dof;
154: /* find out ghost points */
155: PetscMalloc(dim*sizeof(PetscInt), &(adda->lgs));
156: PetscMalloc(dim*sizeof(PetscInt), &(adda->lge));
157: for(i=0; i<dim; i++) {
158: if( periodic[i] ) {
159: adda->lgs[i] = adda->lcs[i] - s;
160: adda->lge[i] = adda->lce[i] + s;
161: } else {
162: adda->lgs[i] = PetscMax(adda->lcs[i] - s, 0);
163: adda->lge[i] = PetscMin(adda->lce[i] + s, nodes[i]);
164: }
165: }
166:
167: /* compute local size with ghost points */
168: adda->lgsize=1;
169: for(i=0; i<dim; i++) {
170: adda->lgsize *= (adda->lge[i]-adda->lgs[i]);
171: }
172: adda->lgsize *= dof;
174: /* create global and local prototype vector */
175: VecCreateMPIWithArray(comm,adda->lsize,PETSC_DECIDE,0,&(adda->global));
176: VecSetBlockSize(adda->global,adda->dof);
177: /* local includes ghost points */
178: VecCreateSeqWithArray(PETSC_COMM_SELF,adda->lgsize,0,&(adda->local));
179: VecSetBlockSize(adda->local,dof);
181: PetscMalloc(dim*sizeof(PetscInt), &(adda->refine));
182: for(i=0; i<dim; i++) adda->refine[i] = 3;
183: adda->dofrefine = 1;
185: return(0);
186: }
190: /*@
191: ADDADestroy - Destroys a distributed array.
193: Collective on ADDA
195: Input Parameter:
196: . adda - the distributed array to destroy
198: Level: beginner
200: .keywords: distributed array, destroy
202: .seealso: ADDACreate()
203: @*/
204: PetscErrorCode ADDADestroy(ADDA adda)
205: {
210: /* check reference count */
211: if(--((PetscObject)adda)->refct > 0) return(0);
213: /* destroy the allocated data */
214: PetscFree(adda->nodes);
215: PetscFree(adda->procs);
216: PetscFree(adda->lcs);
217: PetscFree(adda->lce);
218: PetscFree(adda->lgs);
219: PetscFree(adda->lge);
221: PetscHeaderDestroy(adda);
222: return(0);
223: }
227: /*@
228: ADDAView - Views a distributed array.
230: Collective on ADDA
232: Input Parameter:
233: + adda - the ADDA object to view
234: - v - the viewer
236: Level: developer
238: .keywords: distributed array, view
240: .seealso: DMView()
241: @*/
242: PetscErrorCode ADDAView(ADDA adda, PetscViewer v) {
244: SETERRQ(PETSC_ERR_SUP, "Not implemented yet");
245: return(0);
246: }
250: /*@
251: ADDACreateGlobalVector - Creates global vector for distributed array.
253: Collective on ADDA
255: Input Parameter:
256: . adda - the distributed array for which we create a global vector
258: Output Parameter:
259: . vec - the global vector
261: Level: beginner
263: .keywords: distributed array, vector
265: .seealso: DMCreateGlobalVector()
266: @*/
267: PetscErrorCode ADDACreateGlobalVector(ADDA adda, Vec *vec) {
272: VecDuplicate(adda->global, vec);
273: return(0);
274: }
278: /*@
279: ADDAGetColoring - Creates coloring for distributed array.
281: Collective on ADDA
283: Input Parameter:
284: + adda - the distributed array for which we create a global vector
285: - ctype - IS_COLORING_GHOSTED or IS_COLORING_LOCAL
287: Output Parameter:
288: . coloring - the coloring
290: Level: developer
292: .keywords: distributed array, coloring
294: .seealso: DMGetColoring()
295: @*/
296: PetscErrorCode ADDAGetColoring(ADDA adda, ISColoringType ctype,ISColoring *coloring) {
298: SETERRQ(PETSC_ERR_SUP, "Not implemented yet");
299: return(0);
300: }
304: /*@
305: ADDAGetMatrix - Creates matrix compatible with distributed array.
307: Collective on ADDA
309: Input Parameter:
310: . adda - the distributed array for which we create the matrix
311: - mtype - Supported types are MATSEQAIJ, MATMPIAIJ, MATSEQBAIJ, MATMPIBAIJ, or
312: any type which inherits from one of these (such as MATAIJ, MATLUSOL, etc.).
314: Output Parameter:
315: . mat - the empty Jacobian
317: Level: beginner
319: .keywords: distributed array, matrix
321: .seealso: DMGetMatrix()
322: @*/
323: PetscErrorCode ADDAGetMatrix(ADDA adda, MatType mtype, Mat *mat) {
327: MatCreate(((PetscObject)adda)->comm, mat);
328: MatSetSizes(*mat, adda->lsize, adda->lsize, PETSC_DECIDE, PETSC_DECIDE);
329: MatSetType(*mat, mtype);
330: return(0);
331: }
335: /*@
336: ADDAGetMatrixNS - Creates matrix compatiable with two distributed arrays
338: Collective on ADDA
340: Input Parameter:
341: . addar - the distributed array for which we create the matrix, which indexes the rows
342: . addac - the distributed array for which we create the matrix, which indexes the columns
343: - mtype - Supported types are MATSEQAIJ, MATMPIAIJ, MATSEQBAIJ, MATMPIBAIJ, or
344: any type which inherits from one of these (such as MATAIJ, MATLUSOL, etc.).
346: Output Parameter:
347: . mat - the empty Jacobian
349: Level: beginner
351: .keywords: distributed array, matrix
353: .seealso: DMGetMatrix()
354: @*/
355: PetscErrorCode ADDAGetMatrixNS(ADDA addar, ADDA addac, MatType mtype, Mat *mat) {
361: MatCreate(((PetscObject)addar)->comm, mat);
362: MatSetSizes(*mat, addar->lsize, addac->lsize, PETSC_DECIDE, PETSC_DECIDE);
363: MatSetType(*mat, mtype);
364: return(0);
365: }
369: /*@
370: ADDAGetInterpolation - Gets interpolation matrix between two ADDA objects
372: Collective on ADDA
374: Input Parameter:
375: + adda1 - the fine ADDA object
376: - adda2 - the second, coarser ADDA object
378: Output Parameter:
379: + mat - the interpolation matrix
380: - vec - the scaling (optional)
382: Level: developer
384: .keywords: distributed array, interpolation
386: .seealso: DMGetInterpolation()
387: @*/
388: PetscErrorCode ADDAGetInterpolation(ADDA adda1,ADDA adda2,Mat *mat,Vec *vec) {
390: SETERRQ(PETSC_ERR_SUP, "Not implemented yet");
391: return(0);
392: }
396: /*@
397: ADDARefine - Refines a distributed array.
399: Collective on ADDA
401: Input Parameter:
402: + adda - the distributed array to refine
403: - comm - the communicator to contain the new ADDA object (or PETSC_NULL)
405: Output Parameter:
406: . addaf - the refined ADDA
408: Level: developer
410: .keywords: distributed array, refine
412: .seealso: DMRefine()
413: @*/
414: PetscErrorCode ADDARefine(ADDA adda, MPI_Comm comm, ADDA *addaf) {
416: SETERRQ(PETSC_ERR_SUP, "Not implemented yet");
417: return(0);
418: }
422: /*@
423: ADDACoarsen - Coarsens a distributed array.
425: Collective on ADDA
427: Input Parameter:
428: + adda - the distributed array to coarsen
429: - comm - the communicator to contain the new ADDA object (or PETSC_NULL)
431: Output Parameter:
432: . addac - the coarsened ADDA
434: Level: developer
436: .keywords: distributed array, coarsen
438: .seealso: DMCoarsen()
439: @*/
440: PetscErrorCode ADDACoarsen(ADDA adda, MPI_Comm comm,ADDA *addac) {
442: PetscInt *nodesc;
443: PetscInt *procsc;
444: PetscInt dofc;
445: PetscInt i;
449: PetscMalloc(adda->dim*sizeof(PetscInt), &nodesc);
450: for(i=0; i<adda->dim; i++) {
451: nodesc[i] = (adda->nodes[i] % adda->refine[i]) ? adda->nodes[i] / adda->refine[i] + 1 : adda->nodes[i] / adda->refine[i];
452: }
453: dofc = (adda->dof % adda->dofrefine) ? adda->dof / adda->dofrefine + 1 : adda->dof / adda->dofrefine;
454: PetscMalloc(adda->dim*sizeof(PetscInt), &procsc);
455: PetscMemcpy(procsc, adda->procs, adda->dim*sizeof(PetscInt));
456: ADDACreate(((PetscObject)adda)->comm, adda->dim, nodesc, procsc, dofc, adda->periodic, addac);
457: /* copy refinement factors */
458: ADDASetRefinement(*addac, adda->refine, adda->dofrefine);
459: return(0);
460: }
464: /*@
465: ADDAGetInjection - Gets injection between distributed arrays.
467: Collective on ADDA
469: Input Parameter:
470: + adda1 - the fine ADDA object
471: - adda2 - the second, coarser ADDA object
473: Output Parameter:
474: . ctx - the injection
476: Level: developer
478: .keywords: distributed array, injection
480: .seealso: DMGetInjection()
481: @*/
482: PetscErrorCode ADDAGetInjection(ADDA adda1, ADDA adda2, VecScatter *ctx) {
484: SETERRQ(PETSC_ERR_SUP, "Not implemented yet");
485: return(0);
486: }
488: /*@C
489: ADDAHCiterStartup - performs the first check for an iteration through a hypercube
490: lc, uc, idx all have to be valid arrays of size dim
491: This function sets idx to lc and then checks, whether the lower corner (lc) is less
492: than thre upper corner (uc). If lc "<=" uc in all coordinates, it returns PETSC_TRUE,
493: and PETSC_FALSE otherwise.
494:
495: Input Parameters:
496: + dim - the number of dimension
497: . lc - the "lower" corner
498: - uc - the "upper" corner
500: Output Parameters:
501: . idx - the index that this function increases
503: Level: developer
504: @*/
505: PetscTruth ADDAHCiterStartup(const PetscInt dim, const PetscInt *const lc, const PetscInt *const uc, PetscInt *const idx) {
507: PetscInt i;
509: PetscMemcpy(idx, lc, sizeof(PetscInt)*dim);
510: if(ierr) {
511: PetscError(__LINE__,__FUNCT__,__FILE__,__SDIR__,ierr,0," ");
512: return PETSC_FALSE;
513: }
514: for(i=0; i<dim; i++) {
515: if( lc[i] > uc[i] ) {
516: return PETSC_FALSE;
517: }
518: }
519: return PETSC_TRUE;
520: }
522: /*@C
523: ADDAHCiter - iterates through a hypercube
524: lc, uc, idx all have to be valid arrays of size dim
525: This function return PETSC_FALSE, if idx exceeds uc, PETSC_TRUE otherwise.
526: There are no guarantees on what happens if idx is not in the hypercube
527: spanned by lc, uc, this should be checked with ADDAHCiterStartup.
528:
529: Use this code as follows:
530: if( ADDAHCiterStartup(dim, lc, uc, idx) ) {
531: do {
532: ...
533: } while( ADDAHCiter(dim, lc, uc, idx) );
534: }
535:
536: Input Parameters:
537: + dim - the number of dimension
538: . lc - the "lower" corner
539: - uc - the "upper" corner
541: Output Parameters:
542: . idx - the index that this function increases
544: Level: developer
545: @*/
546: PetscTruth ADDAHCiter(const PetscInt dim, const PetscInt *const lc, const PetscInt *const uc, PetscInt *const idx) {
547: PetscInt i;
548: for(i=dim-1; i>=0; i--) {
549: idx[i] += 1;
550: if( uc[i] > idx[i] ) {
551: return PETSC_TRUE;
552: } else {
553: idx[i] -= uc[i] - lc[i];
554: }
555: }
556: return PETSC_FALSE;
557: }
561: /*@C
562: ADDAGetAggregates - Gets the aggregates that map between
563: grids associated with two ADDAs.
565: Collective on ADDA
567: Input Parameters:
568: + addac - the coarse grid ADDA
569: - addaf - the fine grid ADDA
571: Output Parameters:
572: . rest - the restriction matrix (transpose of the projection matrix)
574: Level: intermediate
576: .keywords: interpolation, restriction, multigrid
578: .seealso: ADDARefine(), ADDAGetInjection(), ADDAGetInterpolation()
579: @*/
580: PetscErrorCode ADDAGetAggregates(ADDA addac,ADDA addaf,Mat *rest)
581: {
582: PetscErrorCode ierr=0;
583: PetscInt i;
584: PetscInt dim;
585: PetscInt dofc, doff;
586: PetscInt *lcs_c, *lce_c;
587: PetscInt *lcs_f, *lce_f;
588: PetscInt *fgs, *fge;
589: PetscInt fgdofs, fgdofe;
590: ADDAIdx iter_c, iter_f;
591: PetscInt max_agg_size;
592: PetscMPIInt comm_size;
593: ADDAIdx *fine_nodes;
594: PetscInt fn_idx;
595: PetscScalar *one_vec;
601: if (addac->dim != addaf->dim) SETERRQ2(PETSC_ERR_ARG_INCOMP,"Dimensions of ADDA do not match %D %D", addac->dim, addaf->dim);
602: /* if (addac->dof != addaf->dof) SETERRQ2(PETSC_ERR_ARG_INCOMP,"DOF of ADDA do not match %D %D", addac->dof, addaf->dof); */
603: dim = addac->dim;
604: dofc = addac->dof;
605: doff = addaf->dof;
607: ADDAGetCorners(addac, &lcs_c, &lce_c);
608: ADDAGetCorners(addaf, &lcs_f, &lce_f);
609:
610: /* compute maximum size of aggregate */
611: max_agg_size = 1;
612: for(i=0; i<dim; i++) {
613: max_agg_size *= addaf->nodes[i] / addac->nodes[i] + 1;
614: }
615: max_agg_size *= doff / dofc + 1;
617: /* create the matrix that will contain the restriction operator */
618: MPI_Comm_size(PETSC_COMM_WORLD,&comm_size);
620: /* construct matrix */
621: if( comm_size == 1 ) {
622: ADDAGetMatrixNS(addac, addaf, MATSEQAIJ, rest);
623: MatSeqAIJSetPreallocation(*rest, max_agg_size, PETSC_NULL);
624: } else {
625: ADDAGetMatrixNS(addac, addaf, MATMPIAIJ, rest);
626: MatMPIAIJSetPreallocation(*rest, max_agg_size, PETSC_NULL, max_agg_size, PETSC_NULL);
627: }
628: /* store nodes in the fine grid here */
629: PetscMalloc(sizeof(ADDAIdx)*max_agg_size, &fine_nodes);
630: /* these are the values to set to, a collection of 1's */
631: PetscMalloc(sizeof(PetscScalar)*max_agg_size, &one_vec);
632: /* initialize */
633: for(i=0; i<max_agg_size; i++) {
634: PetscMalloc(sizeof(PetscInt)*dim, &(fine_nodes[i].x));
635: one_vec[i] = 1.0;
636: }
638: /* get iterators */
639: PetscMalloc(sizeof(PetscInt)*dim, &(iter_c.x));
640: PetscMalloc(sizeof(PetscInt)*dim, &(iter_f.x));
642: /* the fine grid node corner for each coarse grid node */
643: PetscMalloc(sizeof(PetscInt)*dim, &fgs);
644: PetscMalloc(sizeof(PetscInt)*dim, &fge);
646: /* loop over all coarse nodes */
647: PetscMemcpy(iter_c.x, lcs_c, sizeof(PetscInt)*dim);
648: if( ADDAHCiterStartup(dim, lcs_c, lce_c, iter_c.x) ) {
649: do {
650: /* find corresponding fine grid nodes */
651: for(i=0; i<dim; i++) {
652: fgs[i] = iter_c.x[i]*addaf->nodes[i]/addac->nodes[i];
653: fge[i] = PetscMin((iter_c.x[i]+1)*addaf->nodes[i]/addac->nodes[i], addaf->nodes[i]);
654: }
655: /* treat all dof of the coarse grid */
656: for(iter_c.d=0; iter_c.d<dofc; iter_c.d++) {
657: /* find corresponding fine grid dof's */
658: fgdofs = iter_c.d*doff/dofc;
659: fgdofe = PetscMin((iter_c.d+1)*doff/dofc, doff);
660: /* we now know the "box" of all the fine grid nodes that are mapped to one coarse grid node */
661: fn_idx = 0;
662: /* loop over those corresponding fine grid nodes */
663: if( ADDAHCiterStartup(dim, fgs, fge, iter_f.x) ) {
664: do {
665: /* loop over all corresponding fine grid dof */
666: for(iter_f.d=fgdofs; iter_f.d<fgdofe; iter_f.d++) {
667: PetscMemcpy(fine_nodes[fn_idx].x, iter_f.x, sizeof(PetscInt)*dim);
668: fine_nodes[fn_idx].d = iter_f.d;
669: fn_idx++;
670: }
671: } while( ADDAHCiter(dim, fgs, fge, iter_f.x) );
672: }
673: /* add all these points to one aggregate */
674: ADDAMatSetValues(*rest, addac, 1, &iter_c, addaf, fn_idx, fine_nodes, one_vec, INSERT_VALUES);
675: }
676: } while( ADDAHCiter(dim, lcs_c, lce_c, iter_c.x) );
677: }
679: /* free memory */
680: PetscFree(fgs);
681: PetscFree(fge);
682: PetscFree(iter_c.x);
683: PetscFree(iter_f.x);
684: PetscFree(lcs_c);
685: PetscFree(lce_c);
686: PetscFree(lcs_f);
687: PetscFree(lce_f);
688: PetscFree(one_vec);
689: for(i=0; i<dim; i++) {
690: PetscFree(fine_nodes[i].x);
691: }
692: PetscFree(fine_nodes);
694: MatAssemblyBegin(*rest, MAT_FINAL_ASSEMBLY);
695: MatAssemblyEnd(*rest, MAT_FINAL_ASSEMBLY);
696: return(0);
697: }
701: /*@
702: ADDASetRefinement - Sets the refinement factors of the distributed arrays.
704: Collective on ADDA
706: Input Parameter:
707: + adda - the ADDA object
708: . refine - array of refinement factors
709: - dofrefine - the refinement factor for the dof, usually just 1
711: Level: developer
713: .keywords: distributed array, refinement
714: @*/
715: PetscErrorCode ADDASetRefinement(ADDA adda, PetscInt *refine, PetscInt dofrefine) {
720: PetscMemcpy(adda->refine, refine, adda->dim*sizeof(PetscInt));
721: adda->dofrefine = dofrefine;
722: return(0);
723: }
727: /*@
728: ADDAGetCorners - Gets the corners of the local area
730: Collective on ADDA
732: Input Parameter:
733: . adda - the ADDA object
735: Output Parameter:
736: + lcorner - the "lower" corner
737: - ucorner - the "upper" corner
739: Both lcorner and ucorner are allocated by this procedure and will point to an
740: array of size adda->dim.
742: Level: beginner
744: .keywords: distributed array, refinement
745: @*/
746: PetscErrorCode ADDAGetCorners(ADDA adda, PetscInt **lcorner, PetscInt **ucorner) {
752: PetscMalloc(adda->dim*sizeof(PetscInt), lcorner);
753: PetscMalloc(adda->dim*sizeof(PetscInt), ucorner);
754: PetscMemcpy(*lcorner, adda->lcs, adda->dim*sizeof(PetscInt));
755: PetscMemcpy(*ucorner, adda->lce, adda->dim*sizeof(PetscInt));
756: return(0);
757: }
761: /*@
762: ADDAGetGhostCorners - Gets the ghost corners of the local area
764: Collective on ADDA
766: Input Parameter:
767: . adda - the ADDA object
769: Output Parameter:
770: + lcorner - the "lower" corner of the ghosted area
771: - ucorner - the "upper" corner of the ghosted area
773: Both lcorner and ucorner are allocated by this procedure and will point to an
774: array of size adda->dim.
776: Level: beginner
778: .keywords: distributed array, refinement
779: @*/
780: PetscErrorCode ADDAGetGhostCorners(ADDA adda, PetscInt **lcorner, PetscInt **ucorner) {
786: PetscMalloc(adda->dim*sizeof(PetscInt), lcorner);
787: PetscMalloc(adda->dim*sizeof(PetscInt), ucorner);
788: PetscMemcpy(*lcorner, adda->lgs, adda->dim*sizeof(PetscInt));
789: PetscMemcpy(*ucorner, adda->lge, adda->dim*sizeof(PetscInt));
790: return(0);
791: }
797: /*@
798: ADDAMatSetValues - Inserts or adds a block of values into a matrix. The values
799: are indexed geometrically with the help of the ADDA data structure.
800: These values may be cached, so MatAssemblyBegin() and MatAssemblyEnd()
801: MUST be called after all calls to ADDAMatSetValues() have been completed.
803: Not Collective
805: Input Parameters:
806: + mat - the matrix
807: . addam - the ADDA geometry information for the rows
808: . m - the number of rows
809: . idxm - the row indices, each of the a proper ADDAIdx
810: + addan - the ADDA geometry information for the columns
811: . n - the number of columns
812: . idxn - the column indices, each of the a proper ADDAIdx
813: . v - a logically two-dimensional array of values of size m*n
814: - addv - either ADD_VALUES or INSERT_VALUES, where
815: ADD_VALUES adds values to any existing entries, and
816: INSERT_VALUES replaces existing entries with new values
818: Notes:
819: By default the values, v, are row-oriented and unsorted.
820: See MatSetOption() for other options.
822: Calls to ADDAMatSetValues() (and MatSetValues()) with the INSERT_VALUES and ADD_VALUES
823: options cannot be mixed without intervening calls to the assembly
824: routines.
826: Efficiency Alert:
827: The routine ADDAMatSetValuesBlocked() may offer much better efficiency
828: for users of block sparse formats (MATSEQBAIJ and MATMPIBAIJ).
830: Level: beginner
832: Concepts: matrices^putting entries in
834: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValues(), ADDAMatSetValuesBlocked(),
835: InsertMode, INSERT_VALUES, ADD_VALUES
836: @*/
837: PetscErrorCode ADDAMatSetValues(Mat mat, ADDA addam, PetscInt m, const ADDAIdx idxm[],
838: ADDA addan, PetscInt n, const ADDAIdx idxn[],
839: const PetscScalar v[], InsertMode addv) {
841: PetscInt *nodemult;
842: PetscInt i, j;
843: PetscInt *matidxm, *matidxn;
844: PetscInt *x, d;
845: PetscInt idx;
848: /* find correct multiplying factors */
849: PetscMalloc(addam->dim*sizeof(PetscInt), &nodemult);
850: nodemult[addam->dim-1] = 1;
851: for(j=addam->dim-2; j>=0; j--) {
852: nodemult[j] = nodemult[j+1]*(addam->nodes[j+1]);
853: }
854: /* convert each coordinate in idxm to the matrix row index */
855: PetscMalloc(m*sizeof(PetscInt), &matidxm);
856: for(i=0; i<m; i++) {
857: x = idxm[i].x; d = idxm[i].d;
858: idx = 0;
859: for(j=addam->dim-1; j>=0; j--) {
860: if( x[j] < 0 ) { /* "left", "below", etc. of boundary */
861: if( addam->periodic[j] ) { /* periodic wraps around */
862: x[j] += addam->nodes[j];
863: } else { /* non-periodic get discarded */
864: matidxm[i] = -1; /* entries with -1 are ignored by MatSetValues() */
865: goto endofloop_m;
866: }
867: }
868: if( x[j] >= addam->nodes[j] ) { /* "right", "above", etc. of boundary */
869: if( addam->periodic[j] ) { /* periodic wraps around */
870: x[j] -= addam->nodes[j];
871: } else { /* non-periodic get discarded */
872: matidxm[i] = -1; /* entries with -1 are ignored by MatSetValues() */
873: goto endofloop_m;
874: }
875: }
876: idx += x[j]*nodemult[j];
877: }
878: matidxm[i] = idx*(addam->dof) + d;
879: endofloop_m:
880: ;
881: }
882: PetscFree(nodemult);
884: /* find correct multiplying factors */
885: PetscMalloc(addan->dim*sizeof(PetscInt), &nodemult);
886: nodemult[addan->dim-1] = 1;
887: for(j=addan->dim-2; j>=0; j--) {
888: nodemult[j] = nodemult[j+1]*(addan->nodes[j+1]);
889: }
890: /* convert each coordinate in idxn to the matrix colum index */
891: PetscMalloc(n*sizeof(PetscInt), &matidxn);
892: for(i=0; i<n; i++) {
893: x = idxn[i].x; d = idxn[i].d;
894: idx = 0;
895: for(j=addan->dim-1; j>=0; j--) {
896: if( x[j] < 0 ) { /* "left", "below", etc. of boundary */
897: if( addan->periodic[j] ) { /* periodic wraps around */
898: x[j] += addan->nodes[j];
899: } else { /* non-periodic get discarded */
900: matidxn[i] = -1; /* entries with -1 are ignored by MatSetValues() */
901: goto endofloop_n;
902: }
903: }
904: if( x[j] >= addan->nodes[j] ) { /* "right", "above", etc. of boundary */
905: if( addan->periodic[j] ) { /* periodic wraps around */
906: x[j] -= addan->nodes[j];
907: } else { /* non-periodic get discarded */
908: matidxn[i] = -1; /* entries with -1 are ignored by MatSetValues() */
909: goto endofloop_n;
910: }
911: }
912: idx += x[j]*nodemult[j];
913: }
914: matidxn[i] = idx*(addan->dof) + d;
915: endofloop_n:
916: ;
917: }
918: /* call original MatSetValues() */
919: MatSetValues(mat, m, matidxm, n, matidxn, v, addv);
920: /* clean up */
921: PetscFree(nodemult);
922: PetscFree(matidxm);
923: PetscFree(matidxn);
924: return(0);
925: }