Actual source code: mpiaijviennacl.cxx

petsc-dev 2014-02-02
Report Typos and Errors
  1: #include <petscconf.h>
  2: #include <../src/mat/impls/aij/mpi/mpiaij.h>   /*I "petscmat.h" I*/
  3: #include <../src/mat/impls/aij/seq/seqviennacl/viennaclmatimpl.h>

  7: PetscErrorCode  MatMPIAIJSetPreallocation_MPIAIJViennaCL(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
  8: {
  9:   Mat_MPIAIJ *b = (Mat_MPIAIJ*)B->data;

 13:   PetscLayoutSetUp(B->rmap);
 14:   PetscLayoutSetUp(B->cmap);
 15:   if (!B->preallocated) {
 16:     /* Explicitly create the two MATSEQAIJVIENNACL matrices. */
 17:     MatCreate(PETSC_COMM_SELF,&b->A);
 18:     MatSetSizes(b->A,B->rmap->n,B->cmap->n,B->rmap->n,B->cmap->n);
 19:     MatSetType(b->A,MATSEQAIJVIENNACL);
 20:     PetscLogObjectParent((PetscObject)B,(PetscObject)b->A);
 21:     MatCreate(PETSC_COMM_SELF,&b->B);
 22:     MatSetSizes(b->B,B->rmap->n,B->cmap->N,B->rmap->n,B->cmap->N);
 23:     MatSetType(b->B,MATSEQAIJVIENNACL);
 24:     PetscLogObjectParent((PetscObject)B,(PetscObject)b->B);
 25:   }
 26:   MatSeqAIJSetPreallocation(b->A,d_nz,d_nnz);
 27:   MatSeqAIJSetPreallocation(b->B,o_nz,o_nnz);
 28:   B->preallocated = PETSC_TRUE;
 29:   return(0);
 30: }

 34: PetscErrorCode  MatGetVecs_MPIAIJViennaCL(Mat mat,Vec *right,Vec *left)
 35: {

 39:   if (right) {
 40:     VecCreate(PetscObjectComm((PetscObject)mat),right);
 41:     VecSetSizes(*right,mat->cmap->n,PETSC_DETERMINE);
 42:     VecSetBlockSize(*right,mat->rmap->bs);
 43:     VecSetType(*right,VECVIENNACL);
 44:     VecSetLayout(*right,mat->cmap);
 45:   }
 46:   if (left) {
 47:     VecCreate(PetscObjectComm((PetscObject)mat),left);
 48:     VecSetSizes(*left,mat->rmap->n,PETSC_DETERMINE);
 49:     VecSetBlockSize(*left,mat->rmap->bs);
 50:     VecSetType(*left,VECVIENNACL);
 51:     VecSetLayout(*left,mat->rmap);
 52:   }
 53:   return(0);
 54: }


 59: PetscErrorCode MatDestroy_MPIAIJViennaCL(Mat A)
 60: {

 64:   MatDestroy_MPIAIJ(A);
 65:   return(0);
 66: }

 70: PETSC_EXTERN PetscErrorCode MatCreate_MPIAIJViennaCL(Mat A)
 71: {

 75:   MatCreate_MPIAIJ(A);
 76:   PetscObjectComposeFunction((PetscObject)A,"MatMPIAIJSetPreallocation_C",MatMPIAIJSetPreallocation_MPIAIJViennaCL);
 77:   A->ops->getvecs        = MatGetVecs_MPIAIJViennaCL;

 79:   PetscObjectChangeTypeName((PetscObject)A,MATMPIAIJVIENNACL);
 80:   return(0);
 81: }


 84: /*@
 85:    MatCreateAIJViennaCL - Creates a sparse matrix in AIJ (compressed row) format
 86:    (the default parallel PETSc format).  This matrix will ultimately be pushed down
 87:    to GPUs and use the ViennaCL library for calculations. For good matrix
 88:    assembly performance the user should preallocate the matrix storage by setting
 89:    the parameter nz (or the array nnz).  By setting these parameters accurately,
 90:    performance during matrix assembly can be increased substantially.


 93:    Collective on MPI_Comm

 95:    Input Parameters:
 96: +  comm - MPI communicator, set to PETSC_COMM_SELF
 97: .  m - number of rows
 98: .  n - number of columns
 99: .  nz - number of nonzeros per row (same for all rows)
100: -  nnz - array containing the number of nonzeros in the various rows
101:          (possibly different for each row) or NULL

103:    Output Parameter:
104: .  A - the matrix

106:    It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(),
107:    MatXXXXSetPreallocation() paradigm instead of this routine directly.
108:    [MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation]

110:    Notes:
111:    If nnz is given then nz is ignored

113:    The AIJ format (also called the Yale sparse matrix format or
114:    compressed row storage), is fully compatible with standard Fortran 77
115:    storage.  That is, the stored row and column indices can begin at
116:    either one (as in Fortran) or zero.  See the users' manual for details.

118:    Specify the preallocated storage with either nz or nnz (not both).
119:    Set nz=PETSC_DEFAULT and nnz=NULL for PETSc to control dynamic memory
120:    allocation.  For large problems you MUST preallocate memory or you
121:    will get TERRIBLE performance, see the users' manual chapter on matrices.

123:    Level: intermediate

125: .seealso: MatCreate(), MatCreateAIJ(), MatCreateAIJCUSP(), MatSetValues(), MatSeqAIJSetColumnIndices(), MatCreateSeqAIJWithArrays(), MatCreateAIJ(), MATMPIAIJVIENNACL, MATAIJVIENNACL
126: @*/
129: PetscErrorCode  MatCreateAIJViennaCL(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A)
130: {
132:   PetscMPIInt    size;

135:   MatCreate(comm,A);
136:   MatSetSizes(*A,m,n,M,N);
137:   MPI_Comm_size(comm,&size);
138:   if (size > 1) {
139:     MatSetType(*A,MATMPIAIJVIENNACL);
140:     MatMPIAIJSetPreallocation(*A,d_nz,d_nnz,o_nz,o_nnz);
141:   } else {
142:     MatSetType(*A,MATSEQAIJVIENNACL);
143:     MatSeqAIJSetPreallocation(*A,d_nz,d_nnz);
144:   }
145:   return(0);
146: }

148: /*M
149:    MATAIJVIENNACL - MATMPIAIJVIENNACL= "aijviennacl" = "mpiaijviennacl" - A matrix type to be used for sparse matrices.

151:    A matrix type (CSR format) whose data resides on GPUs.
152:    All matrix calculations are performed using the ViennaCL library.

154:    This matrix type is identical to MATSEQAIJVIENNACL when constructed with a single process communicator,
155:    and MATMPIAIJVIENNACL otherwise.  As a result, for single process communicators,
156:    MatSeqAIJSetPreallocation is supported, and similarly MatMPIAIJSetPreallocation is supported
157:    for communicators controlling multiple processes.  It is recommended that you call both of
158:    the above preallocation routines for simplicity.

160:    Options Database Keys:
161: +  -mat_type mpiaijviennacl - sets the matrix type to "mpiaijviennacl" during a call to MatSetFromOptions()

163:   Level: beginner

165:  .seealso: MatCreateAIJViennaCL(), MATSEQAIJVIENNACL, MatCreateSeqAIJVIENNACL()
166: M*/