4: #include <petsc/private/cudavecimpl.h> 6: #include <cusparse_v2.h>
8: #include <algorithm>
9: #include <vector>
11: #include <thrust/device_vector.h>
12: #include <thrust/device_ptr.h>
13: #include <thrust/device_malloc_allocator.h>
14: #include <thrust/transform.h>
15: #include <thrust/functional.h>
16: #include <thrust/sequence.h>
18: #if (CUSPARSE_VER_MAJOR > 10 || CUSPARSE_VER_MAJOR == 10 && CUSPARSE_VER_MINOR >= 2) /* According to cuda/10.1.168 on OLCF Summit */
19: #define CHKERRCUSPARSE(stat) \ 20: do { \ 21: if (PetscUnlikely(stat)) { \ 22: const char *name = cusparseGetErrorName(stat); \ 23: const char *descr = cusparseGetErrorString(stat); \ 24: SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_LIB,"cuSPARSE error %d (%s) : %s",(int)stat,name,descr); \ 25: } \ 26: } while (0) 27: #else
28: #define CHKERRCUSPARSE(stat) do {if (PetscUnlikely(stat)) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"cusparse error %d",(int)stat);} while (0) 29: #endif
31: #if defined(PETSC_USE_COMPLEX)
32: #if defined(PETSC_USE_REAL_SINGLE)
33: const cuComplex PETSC_CUSPARSE_ONE = {1.0f, 0.0f};
34: const cuComplex PETSC_CUSPARSE_ZERO = {0.0f, 0.0f};
35: #define cusparse_solve(a,b,c,d,e,f,g,h,i,j,k) cusparseCcsrsv_solve((a),(b),(c),(cuComplex*)(d),(e),(cuComplex*)(f),(g),(h),(i),(cuComplex*)(j),(cuComplex*)(k)) 36: #define cusparse_analysis(a,b,c,d,e,f,g,h,i) cusparseCcsrsv_analysis((a),(b),(c),(d),(e),(cuComplex*)(f),(g),(h),(i)) 37: #define cusparse_csr_spmv(a,b,c,d,e,f,g,h,i,j,k,l,m) cusparseCcsrmv((a),(b),(c),(d),(e),(cuComplex*)(f),(g),(cuComplex*)(h),(i),(j),(cuComplex*)(k),(cuComplex*)(l),(cuComplex*)(m)) 38: #define cusparse_csr_spmm(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p) cusparseCcsrmm((a),(b),(c),(d),(e),(f),(cuComplex*)(g),(h),(cuComplex*)(i),(j),(k),(cuComplex*)(l),(m),(cuComplex*)(n),(cuComplex*)(o),(p)) 39: #define cusparse_csr2csc(a,b,c,d,e,f,g,h,i,j,k,l) cusparseCcsr2csc((a),(b),(c),(d),(cuComplex*)(e),(f),(g),(cuComplex*)(h),(i),(j),(k),(l)) 40: #define cusparse_hyb_spmv(a,b,c,d,e,f,g,h) cusparseChybmv((a),(b),(cuComplex*)(c),(d),(e),(cuComplex*)(f),(cuComplex*)(g),(cuComplex*)(h)) 41: #define cusparse_csr2hyb(a,b,c,d,e,f,g,h,i,j) cusparseCcsr2hyb((a),(b),(c),(d),(cuComplex*)(e),(f),(g),(h),(i),(j)) 42: #define cusparse_hyb2csr(a,b,c,d,e,f) cusparseChyb2csr((a),(b),(c),(cuComplex*)(d),(e),(f)) 43: #elif defined(PETSC_USE_REAL_DOUBLE)
44: const cuDoubleComplex PETSC_CUSPARSE_ONE = {1.0, 0.0};
45: const cuDoubleComplex PETSC_CUSPARSE_ZERO = {0.0, 0.0};
46: #define cusparse_solve(a,b,c,d,e,f,g,h,i,j,k) cusparseZcsrsv_solve((a),(b),(c),(cuDoubleComplex*)(d),(e),(cuDoubleComplex*)(f),(g),(h),(i),(cuDoubleComplex*)(j),(cuDoubleComplex*)(k)) 47: #define cusparse_analysis(a,b,c,d,e,f,g,h,i) cusparseZcsrsv_analysis((a),(b),(c),(d),(e),(cuDoubleComplex*)(f),(g),(h),(i)) 48: #define cusparse_csr_spmv(a,b,c,d,e,f,g,h,i,j,k,l,m) cusparseZcsrmv((a),(b),(c),(d),(e),(cuDoubleComplex*)(f),(g),(cuDoubleComplex*)(h),(i),(j),(cuDoubleComplex*)(k),(cuDoubleComplex*)(l),(cuDoubleComplex*)(m)) 49: #define cusparse_csr_spmm(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p) cusparseZcsrmm((a),(b),(c),(d),(e),(f),(cuDoubleComplex*)(g),(h),(cuDoubleComplex*)(i),(j),(k),(cuDoubleComplex*)(l),(m),(cuDoubleComplex*)(n),(cuDoubleComplex*)(o),(p)) 50: #define cusparse_csr2csc(a,b,c,d,e,f,g,h,i,j,k,l) cusparseZcsr2csc((a),(b),(c),(d),(cuDoubleComplex*)(e),(f),(g),(cuDoubleComplex*)(h),(i),(j),(k),(l)) 51: #define cusparse_hyb_spmv(a,b,c,d,e,f,g,h) cusparseZhybmv((a),(b),(cuDoubleComplex*)(c),(d),(e),(cuDoubleComplex*)(f),(cuDoubleComplex*)(g),(cuDoubleComplex*)(h)) 52: #define cusparse_csr2hyb(a,b,c,d,e,f,g,h,i,j) cusparseZcsr2hyb((a),(b),(c),(d),(cuDoubleComplex*)(e),(f),(g),(h),(i),(j)) 53: #define cusparse_hyb2csr(a,b,c,d,e,f) cusparseZhyb2csr((a),(b),(c),(cuDoubleComplex*)(d),(e),(f)) 54: #endif
55: #else
56: const PetscScalar PETSC_CUSPARSE_ONE = 1.0;
57: const PetscScalar PETSC_CUSPARSE_ZERO = 0.0;
58: #if defined(PETSC_USE_REAL_SINGLE)
59: #define cusparse_solve cusparseScsrsv_solve 60: #define cusparse_analysis cusparseScsrsv_analysis 61: #define cusparse_csr_spmv cusparseScsrmv 62: #define cusparse_csr_spmm cusparseScsrmm 63: #define cusparse_csr2csc cusparseScsr2csc 64: #define cusparse_hyb_spmv cusparseShybmv 65: #define cusparse_csr2hyb cusparseScsr2hyb 66: #define cusparse_hyb2csr cusparseShyb2csr 67: #elif defined(PETSC_USE_REAL_DOUBLE)
68: #define cusparse_solve cusparseDcsrsv_solve 69: #define cusparse_analysis cusparseDcsrsv_analysis 70: #define cusparse_csr_spmv cusparseDcsrmv 71: #define cusparse_csr_spmm cusparseDcsrmm 72: #define cusparse_csr2csc cusparseDcsr2csc 73: #define cusparse_hyb_spmv cusparseDhybmv 74: #define cusparse_csr2hyb cusparseDcsr2hyb 75: #define cusparse_hyb2csr cusparseDhyb2csr 76: #endif
77: #endif
79: #define THRUSTINTARRAY32 thrust::device_vector<int> 80: #define THRUSTINTARRAY thrust::device_vector<PetscInt> 81: #define THRUSTARRAY thrust::device_vector<PetscScalar> 83: /* A CSR matrix structure */
84: struct CsrMatrix {
85: PetscInt num_rows;
86: PetscInt num_cols;
87: PetscInt num_entries;
88: THRUSTINTARRAY32 *row_offsets;
89: THRUSTINTARRAY32 *column_indices;
90: THRUSTARRAY *values;
91: };
93: /* This is struct holding the relevant data needed to a MatSolve */
94: struct Mat_SeqAIJCUSPARSETriFactorStruct {
95: /* Data needed for triangular solve */
96: cusparseMatDescr_t descr;
97: cusparseSolveAnalysisInfo_t solveInfo;
98: cusparseOperation_t solveOp;
99: CsrMatrix *csrMat;
100: };
102: /* This is struct holding the relevant data needed to a MatMult */
103: struct Mat_SeqAIJCUSPARSEMultStruct {
104: void *mat; /* opaque pointer to a matrix. This could be either a cusparseHybMat_t or a CsrMatrix */
105: cusparseMatDescr_t descr; /* Data needed to describe the matrix for a multiply */
106: THRUSTINTARRAY *cprowIndices; /* compressed row indices used in the parallel SpMV */
107: PetscScalar *alpha; /* pointer to a device "scalar" storing the alpha parameter in the SpMV */
108: PetscScalar *beta_zero; /* pointer to a device "scalar" storing the beta parameter in the SpMV as zero*/
109: PetscScalar *beta_one; /* pointer to a device "scalar" storing the beta parameter in the SpMV as one */
110: };
112: /* This is a larger struct holding all the triangular factors for a solve, transpose solve, and
113: any indices used in a reordering */
114: struct Mat_SeqAIJCUSPARSETriFactors {
115: Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorPtr; /* pointer for lower triangular (factored matrix) on GPU */
116: Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorPtr; /* pointer for upper triangular (factored matrix) on GPU */
117: Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorPtrTranspose; /* pointer for lower triangular (factored matrix) on GPU for the transpose (useful for BiCG) */
118: Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorPtrTranspose; /* pointer for upper triangular (factored matrix) on GPU for the transpose (useful for BiCG)*/
119: THRUSTINTARRAY *rpermIndices; /* indices used for any reordering */
120: THRUSTINTARRAY *cpermIndices; /* indices used for any reordering */
121: THRUSTARRAY *workVector;
122: cusparseHandle_t handle; /* a handle to the cusparse library */
123: PetscInt nnz; /* number of nonzeros ... need this for accurate logging between ICC and ILU */
124: };
126: /* This is a larger struct holding all the matrices for a SpMV, and SpMV Tranpose */
127: struct Mat_SeqAIJCUSPARSE {
128: Mat_SeqAIJCUSPARSEMultStruct *mat; /* pointer to the matrix on the GPU */
129: Mat_SeqAIJCUSPARSEMultStruct *matTranspose; /* pointer to the matrix on the GPU (for the transpose ... useful for BiCG) */
130: THRUSTARRAY *workVector; /* pointer to a workvector to which we can copy the relevant indices of a vector we want to multiply */
131: THRUSTINTARRAY32 *rowoffsets_gpu; /* rowoffsets on GPU in non-compressed-row format. It is used to convert CSR to CSC */
132: PetscInt nrows; /* number of rows of the matrix seen by GPU */
133: MatCUSPARSEStorageFormat format; /* the storage format for the matrix on the device */
134: cudaStream_t stream; /* a stream for the parallel SpMV ... this is not owned and should not be deleted */
135: cusparseHandle_t handle; /* a handle to the cusparse library ... this may not be owned (if we're working in parallel i.e. multiGPUs) */
136: PetscObjectState nonzerostate; /* track nonzero state to possibly recreate the GPU matrix */
137: PetscBool transgen; /* whether or not to generate explicit transpose for MatMultTranspose operations */
138: };
140: PETSC_INTERN PetscErrorCode MatCUSPARSECopyToGPU(Mat);
141: PETSC_INTERN PetscErrorCode MatCUSPARSESetStream(Mat, const cudaStream_t stream);
142: PETSC_INTERN PetscErrorCode MatCUSPARSESetHandle(Mat, const cusparseHandle_t handle);
143: PETSC_INTERN PetscErrorCode MatCUSPARSEClearHandle(Mat);
144: #endif