Actual source code: pvec2.c

  1: /*
  2:      Code for some of the parallel vector primatives.
  3: */
 4:  #include src/vec/impls/mpi/pvecimpl.h
 5:  #include src/inline/dot.h
 6:  #include petscblaslapack.h

  8: #define do_not_use_ethernet
  9: PetscErrorCode Ethernet_Allreduce(PetscReal *in,PetscReal *out,PetscInt n,MPI_Datatype type,MPI_Op op,MPI_Comm comm)
 10: {
 12:   PetscMPIInt    rank,size;
 13:   PetscInt            i;
 14:   MPI_Status     status;


 17:   MPI_Comm_size(comm,&size);
 18:   MPI_Comm_rank(comm,&rank);

 20:   if (rank) {
 21:     MPI_Recv(out,n,MPIU_REAL,rank-1,837,comm,&status);
 22:     for (i =0; i<n; i++) in[i] += out[i];
 23:   }
 24:   if (rank != size - 1) {
 25:     MPI_Send(in,n,MPIU_REAL,rank+1,837,comm);
 26:   }
 27:   if (rank == size-1) {
 28:     for (i=0; i<n; i++) out[i] = in[i];
 29:   } else {
 30:     MPI_Recv(out,n,MPIU_REAL,rank+1,838,comm,&status);
 31:   }
 32:   if (rank) {
 33:     MPI_Send(out,n,MPIU_REAL,rank-1,838,comm);
 34:   }
 35:   return 0;
 36: }


 41: PetscErrorCode VecMDot_MPI(PetscInt nv,Vec xin,const Vec y[],PetscScalar *z)
 42: {
 43:   PetscScalar awork[128],*work = awork;

 47:   if (nv > 128) {
 48:     PetscMalloc(nv*sizeof(PetscScalar),&work);
 49:   }
 50:   VecMDot_Seq(nv,xin,y,work);
 51:   MPI_Allreduce(work,z,nv,MPIU_SCALAR,PetscSum_Op,xin->comm);
 52:   if (nv > 128) {
 53:     PetscFree(work);
 54:   }
 55:   return(0);
 56: }

 60: PetscErrorCode VecMTDot_MPI(PetscInt nv,Vec xin,const Vec y[],PetscScalar *z)
 61: {
 62:   PetscScalar awork[128],*work = awork;

 66:   if (nv > 128) {
 67:     PetscMalloc(nv*sizeof(PetscScalar),&work);
 68:   }
 69:   VecMTDot_Seq(nv,xin,y,work);
 70:   MPI_Allreduce(work,z,nv,MPIU_SCALAR,PetscSum_Op,xin->comm);
 71:   if (nv > 128) {
 72:     PetscFree(work);
 73:   }
 74:   return(0);
 75: }

 79: PetscErrorCode VecNorm_MPI(Vec xin,NormType type,PetscReal *z)
 80: {
 81:   Vec_MPI        *x = (Vec_MPI*)xin->data;
 82:   PetscReal      sum,work = 0.0;
 83:   PetscScalar    *xx = x->array;
 85:   PetscInt       n = xin->n;

 88:   if (type == NORM_2 || type == NORM_FROBENIUS) {

 90: #if defined(PETSC_HAVE_SLOW_NRM2)
 91: #if defined(PETSC_USE_FORTRAN_KERNEL_NORM)
 92:     fortrannormsqr_(xx,&n,&work);
 93: #elif defined(PETSC_USE_UNROLLED_NORM)
 94:     switch (n & 0x3) {
 95:       case 3: work += PetscRealPart(xx[0]*PetscConj(xx[0])); xx++;
 96:       case 2: work += PetscRealPart(xx[0]*PetscConj(xx[0])); xx++;
 97:       case 1: work += PetscRealPart(xx[0]*PetscConj(xx[0])); xx++; n -= 4;
 98:     }
 99:     while (n>0) {
100:       work += PetscRealPart(xx[0]*PetscConj(xx[0])+xx[1]*PetscConj(xx[1])+
101:                         xx[2]*PetscConj(xx[2])+xx[3]*PetscConj(xx[3]));
102:       xx += 4; n -= 4;
103:     }
104: #else
105:     {PetscInt i; for (i=0; i<n; i++) work += PetscRealPart((xx[i])*(PetscConj(xx[i])));}
106: #endif
107: #else
108:     {PetscBLASInt bn = (PetscBLASInt)n, one = 1;
109:       work  = BLnrm2_(&bn,xx,&one);
110:       work *= work;
111:     }
112: #endif
113:     MPI_Allreduce(&work,&sum,1,MPIU_REAL,MPI_SUM,xin->comm);
114:     *z = sqrt(sum);
115:     PetscLogFlops(2*xin->n);
116:   } else if (type == NORM_1) {
117:     /* Find the local part */
118:     VecNorm_Seq(xin,NORM_1,&work);
119:     /* Find the global max */
120:     MPI_Allreduce(&work,z,1,MPIU_REAL,MPI_SUM,xin->comm);
121:   } else if (type == NORM_INFINITY) {
122:     /* Find the local max */
123:     VecNorm_Seq(xin,NORM_INFINITY,&work);
124:     /* Find the global max */
125:     MPI_Allreduce(&work,z,1,MPIU_REAL,MPI_MAX,xin->comm);
126:   } else if (type == NORM_1_AND_2) {
127:     PetscReal temp[2];
128:     VecNorm_Seq(xin,NORM_1,temp);
129:     VecNorm_Seq(xin,NORM_2,temp+1);
130:     temp[1] = temp[1]*temp[1];
131:     MPI_Allreduce(temp,z,2,MPIU_REAL,MPI_SUM,xin->comm);
132:     z[1] = sqrt(z[1]);
133:   }
134:   return(0);
135: }

137: /*
138:        These two functions are the MPI reduction operation used for max and min with index
139:    The call below to MPI_Op_create() converts the function Vec[Max,Min]_Local() to the 
140:    MPI operator Vec[Max,Min]_Local_Op.
141: */
142: MPI_Op VecMax_Local_Op = 0;
143: MPI_Op VecMin_Local_Op = 0;

148: void VecMax_Local(void *in,void *out,PetscMPIInt *cnt,MPI_Datatype *datatype)
149: {
150:   PetscReal *xin = (PetscReal *)in,*xout = (PetscReal*)out;

153:   if (*datatype != MPIU_REAL) {
154:     (*PetscErrorPrintf)("Can only handle MPIU_REAL data types");
155:     MPI_Abort(MPI_COMM_WORLD,1);
156:   }
157:   if (xin[0] > xout[0]) {
158:     xout[0] = xin[0];
159:     xout[1] = xin[1];
160:   }
161:   PetscStackPop;
162:   return; /* cannot return a value */
163: }

169: void VecMin_Local(void *in,void *out,PetscMPIInt *cnt,MPI_Datatype *datatype)
170: {
171:   PetscReal *xin = (PetscReal *)in,*xout = (PetscReal*)out;

174:   if (*datatype != MPIU_REAL) {
175:     (*PetscErrorPrintf)("Can only handle MPIU_REAL data types");
176:     MPI_Abort(MPI_COMM_WORLD,1);
177:   }
178:   if (xin[0] < xout[0]) {
179:     xout[0] = xin[0];
180:     xout[1] = xin[1];
181:   }
182:   PetscStackPop;
183:   return;
184: }

189: PetscErrorCode VecMax_MPI(Vec xin,PetscInt *idx,PetscReal *z)
190: {
192:   PetscReal      work;

195:   /* Find the local max */
196:   VecMax_Seq(xin,idx,&work);

198:   /* Find the global max */
199:   if (!idx) {
200:     MPI_Allreduce(&work,z,1,MPIU_REAL,MPI_MAX,xin->comm);
201:   } else {
202:     PetscReal work2[2],z2[2];
203:     PetscInt  rstart;

205:     VecGetOwnershipRange(xin,&rstart,PETSC_NULL);
206:     work2[0] = work;
207:     work2[1] = *idx + rstart;
208:     MPI_Allreduce(work2,z2,2,MPIU_REAL,VecMax_Local_Op,xin->comm);
209:     *z   = z2[0];
210:     *idx = (PetscInt)z2[1];
211:   }
212:   return(0);
213: }

217: PetscErrorCode VecMin_MPI(Vec xin,PetscInt *idx,PetscReal *z)
218: {
220:   PetscReal work;

223:   /* Find the local Min */
224:   VecMin_Seq(xin,idx,&work);

226:   /* Find the global Min */
227:   if (!idx) {
228:     MPI_Allreduce(&work,z,1,MPIU_REAL,MPI_MIN,xin->comm);
229:   } else {
230:     PetscReal work2[2],z2[2];
231:     PetscInt       rstart;

233:     VecGetOwnershipRange(xin,&rstart,PETSC_NULL);
234:     work2[0] = work;
235:     work2[1] = *idx + rstart;
236:     MPI_Allreduce(work2,z2,2,MPIU_REAL,VecMin_Local_Op,xin->comm);
237:     *z   = z2[0];
238:     *idx = (PetscInt)z2[1];
239:   }
240:   return(0);
241: }