Actual source code: pmap.c
1: /*$Id: pmap.c,v 1.20 2001/03/23 23:21:26 balay Exp $*/
3: /*
4: This file contains routines for basic map object implementation.
5: */
7: #include petsc.h
8: #include src/vec/vecimpl.h
10: int MapGetLocalSize_MPI(Map m,int *n)
11: {
13: *n = m->n;
14: return(0);
15: }
17: int MapGetSize_MPI(Map m,int *N)
18: {
20: *N = m->N;
21: return(0);
22: }
24: int MapGetLocalRange_MPI(Map m,int *rstart,int *rend)
25: {
27: if (rstart) *rstart = m->rstart;
28: if (rend) *rend = m->rend;
29: return(0);
30: }
32: int MapGetGlobalRange_MPI(Map m,int *range[])
33: {
35: *range = m->range;
36: return(0);
37: }
39: int MapDestroy_MPI(Map m)
40: {
44: if (--m->refct > 0) return(0);
45: PetscFree(m->range);
46: PetscLogObjectDestroy(m);
47: PetscHeaderDestroy(m);
48: return(0);
49: }
51: static struct _MapOps DvOps = {
52: MapGetLocalSize_MPI,
53: MapGetSize_MPI,
54: MapGetLocalRange_MPI,
55: MapGetGlobalRange_MPI,
56: MapDestroy_MPI};
58: /*@C
59: MapCreateMPI - Creates a map object.
61: Collective on MPI_Comm
62:
63: Input Parameters:
64: + comm - the MPI communicator to use
65: . n - local vector length (or PETSC_DECIDE to have calculated if N is given)
66: - N - global vector length (or PETSC_DECIDE to have calculated if n is given)
68: Output Parameter:
69: . mm - the map object
71: Suggested by:
72: Robert Clay and Alan Williams, developers of ISIS++, Sandia National Laboratories.
74: Level: developer
76: Concepts: maps^creating
78: .seealso: MapDestroy(), MapGetLocalSize(), MapGetSize(), MapGetGlobalRange(),
79: MapGetLocalRange()
81: @*/
82: int MapCreateMPI(MPI_Comm comm,int n,int N,Map *mm)
83: {
84: int ierr,i,rank,size;
85: Map m;
88: PetscSplitOwnership(comm,&n,&N);
90: MPI_Comm_size(comm,&size);
91: MPI_Comm_rank(comm,&rank);
93: PetscHeaderCreate(m,_p_Map,struct _MapOps,MAP_COOKIE,0,"Map",comm,MapDestroy,0);
94: PetscLogObjectCreate(m);
95: PetscLogObjectMemory(m,sizeof(struct _p_Map));
96: PetscMemcpy(m->ops,&DvOps,sizeof(DvOps));
97: PetscMalloc((size+1)*sizeof(int),&m->range);
98:
99: MPI_Allgather(&n,1,MPI_INT,m->range+1,1,MPI_INT,comm);
100: m->range[0] = 0;
101: for (i=2; i<=size; i++) {
102: m->range[i] += m->range[i-1];
103: }
104: m->rstart = m->range[rank];
105: m->rend = m->range[rank+1];
106: m->n = n;
107: m->N = N;
108: *mm = m;
110: return(0);
111: }