Actual source code: axpy.c
1: /*$Id: axpy.c,v 1.53 2001/03/23 23:22:45 balay Exp $*/
3: #include src/mat/matimpl.h
5: /*@
6: MatAXPY - Computes Y = a*X + Y.
8: Collective on Mat
10: Input Parameters:
11: + X, Y - the matrices
12: - a - the scalar multiplier
14: Contributed by: Matthew Knepley
16: Notes:
17: Since the current implementation of MatAXPY() uses MatGetRow() to access
18: matrix data, efficiency is somewhat limited.
20: Level: intermediate
22: .keywords: matrix, add
24: .seealso: MatAYPX()
25: @*/
26: int MatAXPY(Scalar *a,Mat X,Mat Y)
27: {
28: int m1,m2,n1,n2,i,*row,start,end,j,ncols,ierr;
29: Scalar *val,*vals;
36: MatGetSize(X,&m1,&n1);
37: MatGetSize(Y,&m2,&n2);
38: if (m1 != m2 || n1 != n2) SETERRQ4(PETSC_ERR_ARG_SIZ,"Non conforming matrix add: %d %d %d %d",m1,m2,n1,n2);
40: if (X->ops->axpy) {
41: (*X->ops->axpy)(a,X,Y);
42: } else {
43: MatGetOwnershipRange(X,&start,&end);
44: if (*a == 1.0) {
45: for (i = start; i < end; i++) {
46: MatGetRow(X,i,&ncols,&row,&vals);
47: MatSetValues(Y,1,&i,ncols,row,vals,ADD_VALUES);
48: MatRestoreRow(X,i,&ncols,&row,&vals);
49: }
50: } else {
51: PetscMalloc((n1+1)*sizeof(Scalar),&vals);
52: for (i=start; i<end; i++) {
53: MatGetRow(X,i,&ncols,&row,&val);
54: for (j=0; j<ncols; j++) {
55: vals[j] = (*a)*val[j];
56: }
57: MatSetValues(Y,1,&i,ncols,row,vals,ADD_VALUES);
58: MatRestoreRow(X,i,&ncols,&row,&val);
59: }
60: PetscFree(vals);
61: }
62: MatAssemblyBegin(Y,MAT_FINAL_ASSEMBLY);
63: MatAssemblyEnd(Y,MAT_FINAL_ASSEMBLY);
64: }
65: return(0);
66: }
68: /*@
69: MatShift - Computes Y = Y + a I, where a is a scalar and I is the identity matrix.
71: Collective on Mat
73: Input Parameters:
74: + Y - the matrices
75: - a - the scalar
77: Level: intermediate
79: .keywords: matrix, add, shift
81: .seealso: MatDiagonalSet()
82: @*/
83: int MatShift(Scalar *a,Mat Y)
84: {
85: int i,start,end,ierr;
90: if (Y->ops->shift) {
91: (*Y->ops->shift)(a,Y);
92: } else {
93: MatGetOwnershipRange(Y,&start,&end);
94: for (i=start; i<end; i++) {
95: MatSetValues(Y,1,&i,1,&i,a,ADD_VALUES);
96: }
97: MatAssemblyBegin(Y,MAT_FINAL_ASSEMBLY);
98: MatAssemblyEnd(Y,MAT_FINAL_ASSEMBLY);
99: }
100: return(0);
101: }
103: /*@
104: MatDiagonalSet - Computes Y = Y + D, where D is a diagonal matrix
105: that is represented as a vector. Or Y[i,i] = D[i] if InsertMode is
106: INSERT_VALUES.
108: Input Parameters:
109: + Y - the input matrix
110: . D - the diagonal matrix, represented as a vector
111: - i - INSERT_VALUES or ADD_VALUES
113: Collective on Mat and Vec
115: Level: intermediate
117: .keywords: matrix, add, shift, diagonal
119: .seealso: MatShift()
120: @*/
121: int MatDiagonalSet(Mat Y,Vec D,InsertMode is)
122: {
123: int i,start,end,ierr;
128: if (Y->ops->diagonalset) {
129: (*Y->ops->diagonalset)(Y,D,is);
130: } else {
131: int vstart,vend;
132: Scalar *v;
133: VecGetOwnershipRange(D,&vstart,&vend);
134: MatGetOwnershipRange(Y,&start,&end);
135: if (vstart != start || vend != end) {
136: SETERRQ4(PETSC_ERR_ARG_SIZ,"Vector ownership range not compatible with matrix: %d %d vec %d %d mat",vstart,vend,start,end);
137: }
138: VecGetArray(D,&v);
139: for (i=start; i<end; i++) {
140: MatSetValues(Y,1,&i,1,&i,v+i-start,is);
141: }
142: VecRestoreArray(D,&v);
143: MatAssemblyBegin(Y,MAT_FINAL_ASSEMBLY);
144: MatAssemblyEnd(Y,MAT_FINAL_ASSEMBLY);
145: }
146: return(0);
147: }
149: /*@
150: MatAYPX - Computes Y = X + a*Y.
152: Collective on Mat
154: Input Parameters:
155: + X,Y - the matrices
156: - a - the scalar multiplier
158: Contributed by: Matthew Knepley
160: Notes:
161: This routine currently uses the MatAXPY() implementation.
163: Level: intermediate
165: .keywords: matrix, add
167: .seealso: MatAXPY()
168: @*/
169: int MatAYPX(Scalar *a,Mat X,Mat Y)
170: {
171: Scalar one = 1.0;
172: int mX,mY,nX,nY,ierr;
179: MatGetSize(X,&mX,&nX);
180: MatGetSize(X,&mY,&nY);
181: if (mX != mY || nX != nY) SETERRQ4(PETSC_ERR_ARG_SIZ,"Non conforming matrices: %d %d first %d %d second",mX,mY,nX,nY);
183: MatScale(a,Y);
184: MatAXPY(&one,X,Y);
185: return(0);
186: }
188: /*@
189: MatComputeExplicitOperator - Computes the explicit matrix
191: Collective on Mat
193: Input Parameter:
194: . inmat - the matrix
196: Output Parameter:
197: . mat - the explict preconditioned operator
199: Notes:
200: This computation is done by applying the operators to columns of the
201: identity matrix.
203: Currently, this routine uses a dense matrix format when 1 processor
204: is used and a sparse format otherwise. This routine is costly in general,
205: and is recommended for use only with relatively small systems.
207: Level: advanced
208:
209: .keywords: Mat, compute, explicit, operator
211: @*/
212: int MatComputeExplicitOperator(Mat inmat,Mat *mat)
213: {
214: Vec in,out;
215: int ierr,i,M,m,size,*rows,start,end;
216: MPI_Comm comm;
217: Scalar *array,zero = 0.0,one = 1.0;
223: comm = inmat->comm;
224: MPI_Comm_size(comm,&size);
226: MatGetLocalSize(inmat,&m,0);
227: MatGetSize(inmat,&M,0);
228: VecCreateMPI(comm,m,M,&in);
229: VecDuplicate(in,&out);
230: VecGetOwnershipRange(in,&start,&end);
231: PetscMalloc((m+1)*sizeof(int),&rows);
232: for (i=0; i<m; i++) {rows[i] = start + i;}
234: if (size == 1) {
235: MatCreateSeqDense(comm,M,M,PETSC_NULL,mat);
236: } else {
237: MatCreateMPIAIJ(comm,m,m,M,M,0,0,0,0,mat);
238: }
240: for (i=0; i<M; i++) {
242: VecSet(&zero,in);
243: VecSetValues(in,1,&i,&one,INSERT_VALUES);
244: VecAssemblyBegin(in);
245: VecAssemblyEnd(in);
247: MatMult(inmat,in,out);
248:
249: VecGetArray(out,&array);
250: MatSetValues(*mat,m,rows,1,&i,array,INSERT_VALUES);
251: VecRestoreArray(out,&array);
253: }
254: PetscFree(rows);
255: VecDestroy(out);
256: VecDestroy(in);
257: MatAssemblyBegin(*mat,MAT_FINAL_ASSEMBLY);
258: MatAssemblyEnd(*mat,MAT_FINAL_ASSEMBLY);
259: return(0);
260: }