Actual source code: adda.c
petsc-dev 2014-02-02
1: /*
3: Contributed by Arvid Bessen, Columbia University, June 2007
5: Extension of DMDA object to any number of dimensions.
7: */
8: #include <../src/dm/impls/adda/addaimpl.h> /*I "petscdmadda.h" I*/
13: PetscErrorCode DMDestroy_ADDA(DM dm)
14: {
16: DM_ADDA *dd = (DM_ADDA*)dm->data;
19: /* destroy the allocated data */
20: PetscFree(dd->nodes);
21: PetscFree(dd->procs);
22: PetscFree(dd->lcs);
23: PetscFree(dd->lce);
24: PetscFree(dd->lgs);
25: PetscFree(dd->lge);
26: PetscFree(dd->refine);
28: VecDestroy(&dd->global);
29: /* This was originally freed in DMDestroy(), but that prevents reference counting of backend objects */
30: PetscFree(dd);
31: return(0);
32: }
36: PetscErrorCode DMView_ADDA(DM dm, PetscViewer v)
37: {
39: SETERRQ(PetscObjectComm((PetscObject)dm),PETSC_ERR_SUP, "Not implemented yet");
40: return(0);
41: }
45: PetscErrorCode DMCreateGlobalVector_ADDA(DM dm, Vec *vec)
46: {
48: DM_ADDA *dd = (DM_ADDA*)dm->data;
53: VecDuplicate(dd->global, vec);
54: return(0);
55: }
59: PetscErrorCode DMCreateColoring_ADDA(DM dm, ISColoringType ctype,ISColoring *coloring)
60: {
62: SETERRQ(PetscObjectComm((PetscObject)dm),PETSC_ERR_SUP, "Not implemented yet");
63: return(0);
64: }
68: PetscErrorCode DMCreateMatrix_ADDA(DM dm, Mat *mat)
69: {
71: DM_ADDA *dd = (DM_ADDA*)dm->data;
75: MatCreate(PetscObjectComm((PetscObject)dm), mat);
76: MatSetSizes(*mat, dd->lsize, dd->lsize, PETSC_DECIDE, PETSC_DECIDE);
77: MatSetType(*mat, dm->mattype);
78: MatSetUp(*mat);
79: return(0);
80: }
84: /*@
85: DMADDAGetMatrixNS - Creates matrix compatiable with two distributed arrays
87: Collective on ADDA
89: Input Parameter:
90: . addar - the distributed array for which we create the matrix, which indexes the rows
91: . addac - the distributed array for which we create the matrix, which indexes the columns
92: - mtype - Supported types are MATSEQAIJ, MATMPIAIJ, MATSEQBAIJ, MATMPIBAIJ, or
93: any type which inherits from one of these (such as MATAIJ, MATLUSOL, etc.).
95: Output Parameter:
96: . mat - the empty Jacobian
98: Level: beginner
100: .keywords: distributed array, matrix
102: .seealso: DMCreateMatrix()
103: @*/
104: PetscErrorCode DMADDAGetMatrixNS(DM dm, DM dmc, MatType mtype, Mat *mat)
105: {
107: DM_ADDA *dd = (DM_ADDA*)dm->data;
108: DM_ADDA *ddc = (DM_ADDA*)dmc->data;
114: MatCreate(PetscObjectComm((PetscObject)dm), mat);
115: MatSetSizes(*mat, dd->lsize, ddc->lsize, PETSC_DECIDE, PETSC_DECIDE);
116: MatSetType(*mat, mtype);
117: return(0);
118: }
122: PetscErrorCode DMCreateInterpolation_ADDA(DM dm1,DM dm2,Mat *mat,Vec *vec)
123: {
125: SETERRQ(PetscObjectComm((PetscObject)dm1),PETSC_ERR_SUP, "Not implemented yet");
126: return(0);
127: }
131: PetscErrorCode DMRefine_ADDA(DM dm, MPI_Comm comm, DM *dmf)
132: {
134: SETERRQ(PetscObjectComm((PetscObject)dm),PETSC_ERR_SUP, "Not implemented yet");
135: return(0);
136: }
140: PetscErrorCode DMCoarsen_ADDA(DM dm, MPI_Comm comm,DM *dmc)
141: {
143: PetscInt *nodesc;
144: PetscInt dofc;
145: PetscInt i;
146: DM_ADDA *dd = (DM_ADDA*)dm->data;
151: PetscMalloc1(dd->dim, &nodesc);
152: for (i=0; i<dd->dim; i++) {
153: nodesc[i] = (dd->nodes[i] % dd->refine[i]) ? dd->nodes[i] / dd->refine[i] + 1 : dd->nodes[i] / dd->refine[i];
154: }
155: dofc = (dd->dof % dd->dofrefine) ? dd->dof / dd->dofrefine + 1 : dd->dof / dd->dofrefine;
156: DMADDACreate(PetscObjectComm((PetscObject)dm), dd->dim, nodesc, dd->procs, dofc, dd->periodic, dmc);
157: PetscFree(nodesc);
158: /* copy refinement factors */
159: DMADDASetRefinement(*dmc, dd->refine, dd->dofrefine);
160: return(0);
161: }
165: PetscErrorCode DMCreateInjection_ADDA(DM dm1,DM dm2, VecScatter *ctx)
166: {
168: SETERRQ(PetscObjectComm((PetscObject)dm1),PETSC_ERR_SUP, "Not implemented yet");
169: return(0);
170: }
172: /*@C
173: ADDAHCiterStartup - performs the first check for an iteration through a hypercube
174: lc, uc, idx all have to be valid arrays of size dim
175: This function sets idx to lc and then checks, whether the lower corner (lc) is less
176: than thre upper corner (uc). If lc "<=" uc in all coordinates, it returns PETSC_TRUE,
177: and PETSC_FALSE otherwise.
179: Input Parameters:
180: + dim - the number of dimension
181: . lc - the "lower" corner
182: - uc - the "upper" corner
184: Output Parameters:
185: . idx - the index that this function increases
187: Developer Notes: This code is crap! You cannot return a value and NO ERROR code in PETSc!
189: Level: developer
190: @*/
191: PetscBool ADDAHCiterStartup(const PetscInt dim, const PetscInt *const lc, const PetscInt *const uc, PetscInt *const idx)
192: {
194: PetscInt i;
196: PetscMemcpy(idx, lc, sizeof(PetscInt)*dim);
197: if (ierr) {
198: PetscError(PETSC_COMM_SELF,__LINE__,__FUNCT__,__FILE__,ierr,PETSC_ERROR_REPEAT," ");
199: return PETSC_FALSE;
200: }
201: for (i=0; i<dim; i++) {
202: if (lc[i] > uc[i]) return PETSC_FALSE;
203: }
204: return PETSC_TRUE;
205: }
207: /*@C
208: ADDAHCiter - iterates through a hypercube
209: lc, uc, idx all have to be valid arrays of size dim
210: This function return PETSC_FALSE, if idx exceeds uc, PETSC_TRUE otherwise.
211: There are no guarantees on what happens if idx is not in the hypercube
212: spanned by lc, uc, this should be checked with ADDAHCiterStartup.
214: Use this code as follows:
215: if (ADDAHCiterStartup(dim, lc, uc, idx)) {
216: do {
217: ...
218: } while (ADDAHCiter(dim, lc, uc, idx));
219: }
221: Input Parameters:
222: + dim - the number of dimension
223: . lc - the "lower" corner
224: - uc - the "upper" corner
226: Output Parameters:
227: . idx - the index that this function increases
229: Level: developer
230: @*/
231: PetscBool ADDAHCiter(const PetscInt dim, const PetscInt *const lc, const PetscInt *const uc, PetscInt *const idx)
232: {
233: PetscInt i;
234: for (i=dim-1; i>=0; i--) {
235: idx[i] += 1;
236: if (uc[i] > idx[i]) {
237: return PETSC_TRUE;
238: } else {
239: idx[i] -= uc[i] - lc[i];
240: }
241: }
242: return PETSC_FALSE;
243: }
247: PetscErrorCode DMCreateAggregates_ADDA(DM dmc,DM dmf,Mat *rest)
248: {
249: PetscErrorCode ierr=0;
250: PetscInt i;
251: PetscInt dim;
252: PetscInt dofc, doff;
253: PetscInt *lcs_c, *lce_c;
254: PetscInt *lcs_f, *lce_f;
255: PetscInt *fgs, *fge;
256: PetscInt fgdofs, fgdofe;
257: ADDAIdx iter_c, iter_f;
258: PetscInt max_agg_size;
259: PetscMPIInt comm_size;
260: ADDAIdx *fine_nodes;
261: PetscInt fn_idx;
262: PetscScalar *one_vec;
263: DM_ADDA *ddc = (DM_ADDA*)dmc->data;
264: DM_ADDA *ddf = (DM_ADDA*)dmf->data;
270: if (ddc->dim != ddf->dim) SETERRQ2(PetscObjectComm((PetscObject)dmf),PETSC_ERR_ARG_INCOMP,"Dimensions of ADDA do not match %D %D", ddc->dim, ddf->dim);
271: /* if (dmc->dof != dmf->dof) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"DOF of ADDA do not match %D %D", dmc->dof, dmf->dof); */
272: dim = ddc->dim;
273: dofc = ddc->dof;
274: doff = ddf->dof;
276: DMADDAGetCorners(dmc, &lcs_c, &lce_c);
277: DMADDAGetCorners(dmf, &lcs_f, &lce_f);
279: /* compute maximum size of aggregate */
280: max_agg_size = 1;
281: for (i=0; i<dim; i++) {
282: max_agg_size *= ddf->nodes[i] / ddc->nodes[i] + 1;
283: }
284: max_agg_size *= doff / dofc + 1;
286: /* create the matrix that will contain the restriction operator */
287: MPI_Comm_size(PETSC_COMM_WORLD,&comm_size);
289: /* construct matrix */
290: if (comm_size == 1) {
291: DMADDAGetMatrixNS(dmc, dmf, MATSEQAIJ, rest);
292: MatSeqAIJSetPreallocation(*rest, max_agg_size, NULL);
293: } else {
294: DMADDAGetMatrixNS(dmc, dmf, MATMPIAIJ, rest);
295: MatMPIAIJSetPreallocation(*rest, max_agg_size, NULL, max_agg_size, NULL);
296: }
297: /* store nodes in the fine grid here */
298: PetscMalloc(sizeof(ADDAIdx)*max_agg_size, &fine_nodes);
299: /* these are the values to set to, a collection of 1's */
300: PetscMalloc(sizeof(PetscScalar)*max_agg_size, &one_vec);
301: /* initialize */
302: for (i=0; i<max_agg_size; i++) {
303: PetscMalloc(sizeof(PetscInt)*dim, &(fine_nodes[i].x));
304: one_vec[i] = 1.0;
305: }
307: /* get iterators */
308: PetscMalloc(sizeof(PetscInt)*dim, &(iter_c.x));
309: PetscMalloc(sizeof(PetscInt)*dim, &(iter_f.x));
311: /* the fine grid node corner for each coarse grid node */
312: PetscMalloc(sizeof(PetscInt)*dim, &fgs);
313: PetscMalloc(sizeof(PetscInt)*dim, &fge);
315: /* loop over all coarse nodes */
316: PetscMemcpy(iter_c.x, lcs_c, sizeof(PetscInt)*dim);
317: if (ADDAHCiterStartup(dim, lcs_c, lce_c, iter_c.x)) {
318: do {
319: /* find corresponding fine grid nodes */
320: for (i=0; i<dim; i++) {
321: fgs[i] = iter_c.x[i]*ddf->nodes[i]/ddc->nodes[i];
322: fge[i] = PetscMin((iter_c.x[i]+1)*ddf->nodes[i]/ddc->nodes[i], ddf->nodes[i]);
323: }
324: /* treat all dof of the coarse grid */
325: for (iter_c.d=0; iter_c.d<dofc; iter_c.d++) {
326: /* find corresponding fine grid dof's */
327: fgdofs = iter_c.d*doff/dofc;
328: fgdofe = PetscMin((iter_c.d+1)*doff/dofc, doff);
329: /* we now know the "box" of all the fine grid nodes that are mapped to one coarse grid node */
330: fn_idx = 0;
331: /* loop over those corresponding fine grid nodes */
332: if (ADDAHCiterStartup(dim, fgs, fge, iter_f.x)) {
333: do {
334: /* loop over all corresponding fine grid dof */
335: for (iter_f.d=fgdofs; iter_f.d<fgdofe; iter_f.d++) {
336: PetscMemcpy(fine_nodes[fn_idx].x, iter_f.x, sizeof(PetscInt)*dim);
338: fine_nodes[fn_idx].d = iter_f.d;
339: fn_idx++;
340: }
341: } while (ADDAHCiter(dim, fgs, fge, iter_f.x));
342: }
343: /* add all these points to one aggregate */
344: DMADDAMatSetValues(*rest, dmc, 1, &iter_c, dmf, fn_idx, fine_nodes, one_vec, INSERT_VALUES);
345: }
346: } while (ADDAHCiter(dim, lcs_c, lce_c, iter_c.x));
347: }
349: /* free memory */
350: PetscFree(fgs);
351: PetscFree(fge);
352: PetscFree(iter_c.x);
353: PetscFree(iter_f.x);
354: PetscFree(lcs_c);
355: PetscFree(lce_c);
356: PetscFree(lcs_f);
357: PetscFree(lce_f);
358: PetscFree(one_vec);
359: for (i=0; i<max_agg_size; i++) {
360: PetscFree(fine_nodes[i].x);
361: }
362: PetscFree(fine_nodes);
364: MatAssemblyBegin(*rest, MAT_FINAL_ASSEMBLY);
365: MatAssemblyEnd(*rest, MAT_FINAL_ASSEMBLY);
366: return(0);
367: }
371: /*@
372: DMADDASetRefinement - Sets the refinement factors of the distributed arrays.
374: Collective on ADDA
376: Input Parameter:
377: + adda - the ADDA object
378: . refine - array of refinement factors
379: - dofrefine - the refinement factor for the dof, usually just 1
381: Level: developer
383: .keywords: distributed array, refinement
384: @*/
385: PetscErrorCode DMADDASetRefinement(DM dm, PetscInt *refine, PetscInt dofrefine)
386: {
387: DM_ADDA *dd = (DM_ADDA*)dm->data;
393: PetscMemcpy(dd->refine, refine, dd->dim*sizeof(PetscInt));
394: dd->dofrefine = dofrefine;
395: return(0);
396: }
400: /*@
401: DMADDAGetCorners - Gets the corners of the local area
403: Not Collective
405: Input Parameter:
406: . adda - the ADDA object
408: Output Parameter:
409: + lcorner - the "lower" corner
410: - ucorner - the "upper" corner
412: Both lcorner and ucorner are allocated by this procedure and will point to an
413: array of size dd->dim.
415: Level: beginner
417: .keywords: distributed array, refinement
418: @*/
419: PetscErrorCode DMADDAGetCorners(DM dm, PetscInt **lcorner, PetscInt **ucorner)
420: {
421: DM_ADDA *dd = (DM_ADDA*)dm->data;
428: PetscMalloc1(dd->dim, lcorner);
429: PetscMalloc1(dd->dim, ucorner);
430: PetscMemcpy(*lcorner, dd->lcs, dd->dim*sizeof(PetscInt));
431: PetscMemcpy(*ucorner, dd->lce, dd->dim*sizeof(PetscInt));
432: return(0);
433: }
437: /*@
438: DMADDAGetGhostCorners - Gets the ghost corners of the local area
440: Note Collective
442: Input Parameter:
443: . adda - the ADDA object
445: Output Parameter:
446: + lcorner - the "lower" corner of the ghosted area
447: - ucorner - the "upper" corner of the ghosted area
449: Both lcorner and ucorner are allocated by this procedure and will point to an
450: array of size dd->dim.
452: Level: beginner
454: .keywords: distributed array, refinement
455: @*/
456: PetscErrorCode DMADDAGetGhostCorners(DM dm, PetscInt **lcorner, PetscInt **ucorner)
457: {
458: DM_ADDA *dd = (DM_ADDA*)dm->data;
465: PetscMalloc1(dd->dim, lcorner);
466: PetscMalloc1(dd->dim, ucorner);
467: PetscMemcpy(*lcorner, dd->lgs, dd->dim*sizeof(PetscInt));
468: PetscMemcpy(*ucorner, dd->lge, dd->dim*sizeof(PetscInt));
469: return(0);
470: }
476: /*@C
477: DMADDAMatSetValues - Inserts or adds a block of values into a matrix. The values
478: are indexed geometrically with the help of the ADDA data structure.
479: These values may be cached, so MatAssemblyBegin() and MatAssemblyEnd()
480: MUST be called after all calls to ADDAMatSetValues() have been completed.
482: Not Collective
484: Input Parameters:
485: + mat - the matrix
486: . addam - the ADDA geometry information for the rows
487: . m - the number of rows
488: . idxm - the row indices, each of the a proper ADDAIdx
489: + addan - the ADDA geometry information for the columns
490: . n - the number of columns
491: . idxn - the column indices, each of the a proper ADDAIdx
492: . v - a logically two-dimensional array of values of size m*n
493: - addv - either ADD_VALUES or INSERT_VALUES, where
494: ADD_VALUES adds values to any existing entries, and
495: INSERT_VALUES replaces existing entries with new values
497: Notes:
498: By default the values, v, are row-oriented and unsorted.
499: See MatSetOption() for other options.
501: Calls to ADDAMatSetValues() (and MatSetValues()) with the INSERT_VALUES and ADD_VALUES
502: options cannot be mixed without intervening calls to the assembly
503: routines.
505: Efficiency Alert:
506: The routine ADDAMatSetValuesBlocked() may offer much better efficiency
507: for users of block sparse formats (MATSEQBAIJ and MATMPIBAIJ).
509: Level: beginner
511: Concepts: matrices^putting entries in
513: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValues(), ADDAMatSetValuesBlocked(),
514: InsertMode, INSERT_VALUES, ADD_VALUES
515: @*/
516: PetscErrorCode DMADDAMatSetValues(Mat mat, DM dmm, PetscInt m, const ADDAIdx idxm[],DM dmn, PetscInt n, const ADDAIdx idxn[],const PetscScalar v[], InsertMode addv)
517: {
518: DM_ADDA *ddm = (DM_ADDA*)dmm->data;
519: DM_ADDA *ddn = (DM_ADDA*)dmn->data;
521: PetscInt *nodemult;
522: PetscInt i, j;
523: PetscInt *matidxm, *matidxn;
524: PetscInt *x, d;
525: PetscInt idx;
528: /* find correct multiplying factors */
529: PetscMalloc1(ddm->dim, &nodemult);
531: nodemult[ddm->dim-1] = 1;
532: for (j=ddm->dim-2; j>=0; j--) {
533: nodemult[j] = nodemult[j+1]*(ddm->nodes[j+1]);
534: }
535: /* convert each coordinate in idxm to the matrix row index */
536: PetscMalloc1(m, &matidxm);
537: for (i=0; i<m; i++) {
538: x = idxm[i].x; d = idxm[i].d;
539: idx = 0;
540: for (j=ddm->dim-1; j>=0; j--) {
541: if (x[j] < 0) { /* "left", "below", etc. of boundary */
542: if (ddm->periodic[j]) { /* periodic wraps around */
543: x[j] += ddm->nodes[j];
544: } else { /* non-periodic get discarded */
545: matidxm[i] = -1; /* entries with -1 are ignored by MatSetValues() */
546: goto endofloop_m;
547: }
548: }
549: if (x[j] >= ddm->nodes[j]) { /* "right", "above", etc. of boundary */
550: if (ddm->periodic[j]) { /* periodic wraps around */
551: x[j] -= ddm->nodes[j];
552: } else { /* non-periodic get discarded */
553: matidxm[i] = -1; /* entries with -1 are ignored by MatSetValues() */
554: goto endofloop_m;
555: }
556: }
557: idx += x[j]*nodemult[j];
558: }
559: matidxm[i] = idx*(ddm->dof) + d;
560: endofloop_m:
561: ;
562: }
563: PetscFree(nodemult);
565: /* find correct multiplying factors */
566: PetscMalloc1(ddn->dim, &nodemult);
568: nodemult[ddn->dim-1] = 1;
569: for (j=ddn->dim-2; j>=0; j--) {
570: nodemult[j] = nodemult[j+1]*(ddn->nodes[j+1]);
571: }
572: /* convert each coordinate in idxn to the matrix colum index */
573: PetscMalloc1(n, &matidxn);
574: for (i=0; i<n; i++) {
575: x = idxn[i].x; d = idxn[i].d;
576: idx = 0;
577: for (j=ddn->dim-1; j>=0; j--) {
578: if (x[j] < 0) { /* "left", "below", etc. of boundary */
579: if (ddn->periodic[j]) { /* periodic wraps around */
580: x[j] += ddn->nodes[j];
581: } else { /* non-periodic get discarded */
582: matidxn[i] = -1; /* entries with -1 are ignored by MatSetValues() */
583: goto endofloop_n;
584: }
585: }
586: if (x[j] >= ddn->nodes[j]) { /* "right", "above", etc. of boundary */
587: if (ddn->periodic[j]) { /* periodic wraps around */
588: x[j] -= ddn->nodes[j];
589: } else { /* non-periodic get discarded */
590: matidxn[i] = -1; /* entries with -1 are ignored by MatSetValues() */
591: goto endofloop_n;
592: }
593: }
594: idx += x[j]*nodemult[j];
595: }
596: matidxn[i] = idx*(ddn->dof) + d;
597: endofloop_n:
598: ;
599: }
600: /* call original MatSetValues() */
601: MatSetValues(mat, m, matidxm, n, matidxn, v, addv);
602: /* clean up */
603: PetscFree(nodemult);
604: PetscFree(matidxm);
605: PetscFree(matidxn);
606: return(0);
607: }
611: PetscErrorCode DMADDASetParameters(DM dm,PetscInt dim, PetscInt *nodes,PetscInt *procs,PetscInt dof,PetscBool *periodic)
612: {
614: PetscMPIInt rank,size;
615: MPI_Comm comm;
616: PetscInt i;
617: PetscInt nodes_total;
618: PetscInt nodesleft;
619: PetscInt procsleft;
620: DM_ADDA *dd = (DM_ADDA*)dm->data;
623: PetscObjectGetComm((PetscObject)dm,&comm);
624: MPI_Comm_size(comm,&size);
625: MPI_Comm_rank(comm,&rank);
627: /* total number of nodes */
628: nodes_total = 1;
629: for (i=0; i<dim; i++) nodes_total *= nodes[i];
630: dd->dim = dim;
631: dd->dof = dof;
632: dd->periodic = periodic;
634: PetscMalloc1(dim, &(dd->nodes));
635: PetscMemcpy(dd->nodes, nodes, dim*sizeof(PetscInt));
637: /* procs */
638: PetscMalloc1(dim, &(dd->procs));
639: /* create distribution of nodes to processors */
640: if (procs == NULL) {
641: procs = dd->procs;
642: nodesleft = nodes_total;
643: procsleft = size;
644: /* figure out a good way to split the array to several processors */
645: for (i=0; i<dim; i++) {
646: if (i==dim-1) {
647: procs[i] = procsleft;
648: } else {
649: /* calculate best partition */
650: procs[i] = (PetscInt)(((PetscReal) nodes[i])*PetscPowReal(((PetscReal) procsleft)/((PetscReal) nodesleft),1./((PetscReal)(dim-i)))+0.5);
651: if (procs[i]<1) procs[i]=1;
652: while (procs[i] > 0) {
653: if (procsleft % procs[i]) procs[i]--;
654: else break;
655: }
656: nodesleft /= nodes[i];
657: procsleft /= procs[i];
658: }
659: }
660: } else {
661: /* user provided the number of processors */
662: PetscMemcpy(dd->procs, procs, dim*sizeof(PetscInt));
663: }
664: return(0);
665: }
669: PetscErrorCode DMSetUp_ADDA(DM dm)
670: {
672: PetscInt s=1; /* stencil width, fixed to 1 at the moment */
673: PetscMPIInt rank,size;
674: PetscInt i;
675: PetscInt procsleft;
676: PetscInt procsdimi;
677: PetscInt ranki;
678: PetscInt rpq;
679: DM_ADDA *dd = (DM_ADDA*)dm->data;
680: MPI_Comm comm;
681: PetscInt *nodes,*procs,dim,dof;
682: PetscBool *periodic;
685: PetscObjectGetComm((PetscObject)dm,&comm);
686: MPI_Comm_size(comm,&size);
687: MPI_Comm_rank(comm,&rank);
688: procs = dd->procs;
689: nodes = dd->nodes;
690: dim = dd->dim;
691: dof = dd->dof;
692: periodic = dd->periodic;
694: /* check for validity */
695: procsleft = 1;
696: for (i=0; i<dim; i++) {
697: if (nodes[i] < procs[i]) SETERRQ3(comm,PETSC_ERR_ARG_OUTOFRANGE,"Partition in direction %d is too fine! %D nodes, %D processors", i, nodes[i], procs[i]);
698: procsleft *= procs[i];
699: }
700: if (procsleft != size) SETERRQ(comm,PETSC_ERR_PLIB, "Created or was provided with inconsistent distribution of processors");
703: /* find out local region */
704: PetscMalloc1(dim, &(dd->lcs));
705: PetscMalloc1(dim, &(dd->lce));
706: procsdimi = size;
707: ranki = rank;
708: for (i=0; i<dim; i++) {
709: /* What is the number of processor for dimensions i+1, ..., dim-1? */
710: procsdimi /= procs[i];
711: /* these are all nodes that come before our region */
712: rpq = ranki / procsdimi;
713: dd->lcs[i] = rpq * (nodes[i]/procs[i]);
714: if (rpq + 1 < procs[i]) {
715: dd->lce[i] = (rpq + 1) * (nodes[i]/procs[i]);
716: } else {
717: /* last one gets all the rest */
718: dd->lce[i] = nodes[i];
719: }
720: ranki = ranki - rpq*procsdimi;
721: }
723: /* compute local size */
724: dd->lsize=1;
725: for (i=0; i<dim; i++) {
726: dd->lsize *= (dd->lce[i]-dd->lcs[i]);
727: }
728: dd->lsize *= dof;
730: /* find out ghost points */
731: PetscMalloc1(dim, &(dd->lgs));
732: PetscMalloc1(dim, &(dd->lge));
733: for (i=0; i<dim; i++) {
734: if (periodic[i]) {
735: dd->lgs[i] = dd->lcs[i] - s;
736: dd->lge[i] = dd->lce[i] + s;
737: } else {
738: dd->lgs[i] = PetscMax(dd->lcs[i] - s, 0);
739: dd->lge[i] = PetscMin(dd->lce[i] + s, nodes[i]);
740: }
741: }
743: /* compute local size with ghost points */
744: dd->lgsize=1;
745: for (i=0; i<dim; i++) {
746: dd->lgsize *= (dd->lge[i]-dd->lgs[i]);
747: }
748: dd->lgsize *= dof;
750: /* create global and local prototype vector */
751: VecCreateMPIWithArray(comm,dd->dof,dd->lsize,PETSC_DECIDE,0,&(dd->global));
752: #if ADDA_NEEDS_LOCAL_VECTOR
753: /* local includes ghost points */
754: VecCreateSeqWithArray(PETSC_COMM_SELF,dof,dd->lgsize,0,&(dd->local));
755: #endif
757: PetscMalloc1(dim, &(dd->refine));
758: for (i=0; i<dim; i++) dd->refine[i] = 3;
759: dd->dofrefine = 1;
760: return(0);
761: }
765: PETSC_EXTERN PetscErrorCode DMCreate_ADDA(DM dm)
766: {
768: DM_ADDA *dd;
771: PetscNewLog(dm,&dd);
772: dm->data = (void*)dd;
774: dm->ops->view = DMView;
775: dm->ops->createglobalvector = DMCreateGlobalVector_ADDA;
776: dm->ops->getcoloring = DMCreateColoring_ADDA;
777: dm->ops->creatematrix = DMCreateMatrix_ADDA;
778: dm->ops->createinterpolation = DMCreateInterpolation_ADDA;
779: dm->ops->refine = DMRefine_ADDA;
780: dm->ops->coarsen = DMCoarsen_ADDA;
781: dm->ops->getinjection = DMCreateInjection_ADDA;
782: dm->ops->getaggregates = DMCreateAggregates_ADDA;
783: dm->ops->setup = DMSetUp_ADDA;
784: dm->ops->destroy = DMDestroy_ADDA;
785: return(0);
786: }
791: /*@C
792: DMADDACreate - Creates and ADDA object that translate between coordinates
793: in a geometric grid of arbitrary dimension and data in a PETSc vector
794: distributed on several processors.
796: Collective on MPI_Comm
798: Input Parameters:
799: + comm - MPI communicator
800: . dim - the dimension of the grid
801: . nodes - array with d entries that give the number of nodes in each dimension
802: . procs - array with d entries that give the number of processors in each dimension
803: (or NULL if to be determined automatically)
804: . dof - number of degrees of freedom per node
805: - periodic - array with d entries that, i-th entry is set to true iff dimension i is periodic
807: Output Parameters:
808: . adda - pointer to ADDA data structure that is created
810: Level: intermediate
812: @*/
813: PetscErrorCode DMADDACreate(MPI_Comm comm, PetscInt dim, PetscInt *nodes,PetscInt *procs,PetscInt dof, PetscBool *periodic,DM *dm_p)
814: {
818: DMCreate(comm,dm_p);
819: DMSetType(*dm_p,DMADDA);
820: DMADDASetParameters(*dm_p,dim,nodes,procs,dof,periodic);
821: DMSetUp(*dm_p);
822: return(0);
823: }