Actual source code: cusparsematimpl.h
1: #ifndef PETSC_CUSPARSEMATIMPL_H
2: #define PETSC_CUSPARSEMATIMPL_H
4: #include <petscpkg_version.h>
5: #include <../src/vec/vec/impls/seq/cupm/vecseqcupm.hpp>
6: #include <petsc/private/petsclegacycupmblas.h>
8: #include <cusparse_v2.h>
10: #include <algorithm>
11: #include <vector>
13: #include <thrust/device_vector.h>
14: #include <thrust/device_ptr.h>
15: #include <thrust/device_malloc_allocator.h>
16: #include <thrust/transform.h>
17: #include <thrust/functional.h>
18: #include <thrust/sequence.h>
19: #include <thrust/system/system_error.h>
21: #if defined(PETSC_USE_COMPLEX)
22: #if defined(PETSC_USE_REAL_SINGLE)
23: const cuComplex PETSC_CUSPARSE_ONE = {1.0f, 0.0f};
24: const cuComplex PETSC_CUSPARSE_ZERO = {0.0f, 0.0f};
25: #define cusparseXcsrilu02_bufferSize(a, b, c, d, e, f, g, h, i) cusparseCcsrilu02_bufferSize(a, b, c, d, (cuComplex *)e, f, g, h, i)
26: #define cusparseXcsrilu02_analysis(a, b, c, d, e, f, g, h, i, j) cusparseCcsrilu02_analysis(a, b, c, d, (cuComplex *)e, f, g, h, i, j)
27: #define cusparseXcsrilu02(a, b, c, d, e, f, g, h, i, j) cusparseCcsrilu02(a, b, c, d, (cuComplex *)e, f, g, h, i, j)
28: #define cusparseXcsric02_bufferSize(a, b, c, d, e, f, g, h, i) cusparseCcsric02_bufferSize(a, b, c, d, (cuComplex *)e, f, g, h, i)
29: #define cusparseXcsric02_analysis(a, b, c, d, e, f, g, h, i, j) cusparseCcsric02_analysis(a, b, c, d, (cuComplex *)e, f, g, h, i, j)
30: #define cusparseXcsric02(a, b, c, d, e, f, g, h, i, j) cusparseCcsric02(a, b, c, d, (cuComplex *)e, f, g, h, i, j)
31: #elif defined(PETSC_USE_REAL_DOUBLE)
32: const cuDoubleComplex PETSC_CUSPARSE_ONE = {1.0, 0.0};
33: const cuDoubleComplex PETSC_CUSPARSE_ZERO = {0.0, 0.0};
34: #define cusparseXcsrilu02_bufferSize(a, b, c, d, e, f, g, h, i) cusparseZcsrilu02_bufferSize(a, b, c, d, (cuDoubleComplex *)e, f, g, h, i)
35: #define cusparseXcsrilu02_analysis(a, b, c, d, e, f, g, h, i, j) cusparseZcsrilu02_analysis(a, b, c, d, (cuDoubleComplex *)e, f, g, h, i, j)
36: #define cusparseXcsrilu02(a, b, c, d, e, f, g, h, i, j) cusparseZcsrilu02(a, b, c, d, (cuDoubleComplex *)e, f, g, h, i, j)
37: #define cusparseXcsric02_bufferSize(a, b, c, d, e, f, g, h, i) cusparseZcsric02_bufferSize(a, b, c, d, (cuDoubleComplex *)e, f, g, h, i)
38: #define cusparseXcsric02_analysis(a, b, c, d, e, f, g, h, i, j) cusparseZcsric02_analysis(a, b, c, d, (cuDoubleComplex *)e, f, g, h, i, j)
39: #define cusparseXcsric02(a, b, c, d, e, f, g, h, i, j) cusparseZcsric02(a, b, c, d, (cuDoubleComplex *)e, f, g, h, i, j)
40: #endif
41: #else
42: const PetscScalar PETSC_CUSPARSE_ONE = 1.0;
43: const PetscScalar PETSC_CUSPARSE_ZERO = 0.0;
44: #if defined(PETSC_USE_REAL_SINGLE)
45: #define cusparseXcsrilu02_bufferSize cusparseScsrilu02_bufferSize
46: #define cusparseXcsrilu02_analysis cusparseScsrilu02_analysis
47: #define cusparseXcsrilu02 cusparseScsrilu02
48: #define cusparseXcsric02_bufferSize cusparseScsric02_bufferSize
49: #define cusparseXcsric02_analysis cusparseScsric02_analysis
50: #define cusparseXcsric02 cusparseScsric02
51: #elif defined(PETSC_USE_REAL_DOUBLE)
52: #define cusparseXcsrilu02_bufferSize cusparseDcsrilu02_bufferSize
53: #define cusparseXcsrilu02_analysis cusparseDcsrilu02_analysis
54: #define cusparseXcsrilu02 cusparseDcsrilu02
55: #define cusparseXcsric02_bufferSize cusparseDcsric02_bufferSize
56: #define cusparseXcsric02_analysis cusparseDcsric02_analysis
57: #define cusparseXcsric02 cusparseDcsric02
58: #endif
59: #endif
61: #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0)
62: #define csrsvInfo_t csrsv2Info_t
63: #define cusparseCreateCsrsvInfo cusparseCreateCsrsv2Info
64: #define cusparseDestroyCsrsvInfo cusparseDestroyCsrsv2Info
65: #if defined(PETSC_USE_COMPLEX)
66: #if defined(PETSC_USE_REAL_SINGLE)
67: #define cusparseXcsrsv_buffsize(a, b, c, d, e, f, g, h, i, j) cusparseCcsrsv2_bufferSize(a, b, c, d, e, (cuComplex *)(f), g, h, i, j)
68: #define cusparseXcsrsv_analysis(a, b, c, d, e, f, g, h, i, j, k) cusparseCcsrsv2_analysis(a, b, c, d, e, (const cuComplex *)(f), g, h, i, j, k)
69: #define cusparseXcsrsv_solve(a, b, c, d, e, f, g, h, i, j, k, l, m, n) cusparseCcsrsv2_solve(a, b, c, d, (const cuComplex *)(e), f, (const cuComplex *)(g), h, i, j, (const cuComplex *)(k), (cuComplex *)(l), m, n)
70: #elif defined(PETSC_USE_REAL_DOUBLE)
71: #define cusparseXcsrsv_buffsize(a, b, c, d, e, f, g, h, i, j) cusparseZcsrsv2_bufferSize(a, b, c, d, e, (cuDoubleComplex *)(f), g, h, i, j)
72: #define cusparseXcsrsv_analysis(a, b, c, d, e, f, g, h, i, j, k) cusparseZcsrsv2_analysis(a, b, c, d, e, (const cuDoubleComplex *)(f), g, h, i, j, k)
73: #define cusparseXcsrsv_solve(a, b, c, d, e, f, g, h, i, j, k, l, m, n) cusparseZcsrsv2_solve(a, b, c, d, (const cuDoubleComplex *)(e), f, (const cuDoubleComplex *)(g), h, i, j, (const cuDoubleComplex *)(k), (cuDoubleComplex *)(l), m, n)
74: #endif
75: #else /* not complex */
76: #if defined(PETSC_USE_REAL_SINGLE)
77: #define cusparseXcsrsv_buffsize cusparseScsrsv2_bufferSize
78: #define cusparseXcsrsv_analysis cusparseScsrsv2_analysis
79: #define cusparseXcsrsv_solve cusparseScsrsv2_solve
80: #elif defined(PETSC_USE_REAL_DOUBLE)
81: #define cusparseXcsrsv_buffsize cusparseDcsrsv2_bufferSize
82: #define cusparseXcsrsv_analysis cusparseDcsrsv2_analysis
83: #define cusparseXcsrsv_solve cusparseDcsrsv2_solve
84: #endif
85: #endif
86: #else /* PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) */
87: #define csrsvInfo_t cusparseSolveAnalysisInfo_t
88: #define cusparseCreateCsrsvInfo cusparseCreateSolveAnalysisInfo
89: #define cusparseDestroyCsrsvInfo cusparseDestroySolveAnalysisInfo
90: #if defined(PETSC_USE_COMPLEX)
91: #if defined(PETSC_USE_REAL_SINGLE)
92: #define cusparseXcsrsv_solve(a, b, c, d_IGNORED, e, f, g, h, i, j, k, l, m_IGNORED, n_IGNORED) cusparseCcsrsv_solve((a), (b), (c), (cuComplex *)(e), (f), (cuComplex *)(g), (h), (i), (j), (cuComplex *)(k), (cuComplex *)(l))
93: #define cusparseXcsrsv_analysis(a, b, c, d, e, f, g, h, i, j_IGNORED, k_IGNORED) cusparseCcsrsv_analysis((a), (b), (c), (d), (e), (cuComplex *)(f), (g), (h), (i))
94: #elif defined(PETSC_USE_REAL_DOUBLE)
95: #define cusparseXcsrsv_solve(a, b, c, d_IGNORED, e, f, g, h, i, j, k, l, m_IGNORED, n_IGNORED) \
96: cusparseZcsrsv_solve((a), (b), (c), (cuDoubleComplex *)(e), (f), (cuDoubleComplex *)(g), (h), (i), (j), (cuDoubleComplex *)(k), (cuDoubleComplex *)(l))
97: #define cusparseXcsrsv_analysis(a, b, c, d, e, f, g, h, i, j_IGNORED, k_IGNORED) cusparseZcsrsv_analysis((a), (b), (c), (d), (e), (cuDoubleComplex *)(f), (g), (h), (i))
98: #endif
99: #else /* not complex */
100: #if defined(PETSC_USE_REAL_SINGLE)
101: #define cusparseXcsrsv_solve cusparseScsrsv_solve
102: #define cusparseXcsrsv_analysis(a, b, c, d, e, f, g, h, i, j, k) cusparseScsrsv_analysis(a, b, c, d, e, f, g, h, i)
103: #elif defined(PETSC_USE_REAL_DOUBLE)
104: #define cusparseXcsrsv_solve cusparseDcsrsv_solve
105: #define cusparseXcsrsv_analysis(a, b, c, d, e, f, g, h, i, j, k) cusparseDcsrsv_analysis(a, b, c, d, e, f, g, h, i)
106: #endif
107: #endif
108: #endif /* PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) */
110: #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
111: #define cusparse_csr2csc cusparseCsr2cscEx2
112: #if defined(PETSC_USE_COMPLEX)
113: #if defined(PETSC_USE_REAL_SINGLE)
114: #define cusparse_scalartype CUDA_C_32F
115: #define cusparse_csr_spgeam(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t) cusparseCcsrgeam2(a, b, c, (cuComplex *)d, e, f, (cuComplex *)g, h, i, (cuComplex *)j, k, l, (cuComplex *)m, n, o, p, (cuComplex *)q, r, s, t)
116: #define cusparse_csr_spgeam_bufferSize(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t) \
117: cusparseCcsrgeam2_bufferSizeExt(a, b, c, (cuComplex *)d, e, f, (cuComplex *)g, h, i, (cuComplex *)j, k, l, (cuComplex *)m, n, o, p, (cuComplex *)q, r, s, t)
118: #elif defined(PETSC_USE_REAL_DOUBLE)
119: #define cusparse_scalartype CUDA_C_64F
120: #define cusparse_csr_spgeam(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t) \
121: cusparseZcsrgeam2(a, b, c, (cuDoubleComplex *)d, e, f, (cuDoubleComplex *)g, h, i, (cuDoubleComplex *)j, k, l, (cuDoubleComplex *)m, n, o, p, (cuDoubleComplex *)q, r, s, t)
122: #define cusparse_csr_spgeam_bufferSize(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t) \
123: cusparseZcsrgeam2_bufferSizeExt(a, b, c, (cuDoubleComplex *)d, e, f, (cuDoubleComplex *)g, h, i, (cuDoubleComplex *)j, k, l, (cuDoubleComplex *)m, n, o, p, (cuDoubleComplex *)q, r, s, t)
124: #endif
125: #else /* not complex */
126: #if defined(PETSC_USE_REAL_SINGLE)
127: #define cusparse_scalartype CUDA_R_32F
128: #define cusparse_csr_spgeam cusparseScsrgeam2
129: #define cusparse_csr_spgeam_bufferSize cusparseScsrgeam2_bufferSizeExt
130: #elif defined(PETSC_USE_REAL_DOUBLE)
131: #define cusparse_scalartype CUDA_R_64F
132: #define cusparse_csr_spgeam cusparseDcsrgeam2
133: #define cusparse_csr_spgeam_bufferSize cusparseDcsrgeam2_bufferSizeExt
134: #endif
135: #endif
136: #else /* PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) */
137: #if defined(PETSC_USE_COMPLEX)
138: #if defined(PETSC_USE_REAL_SINGLE)
139: #define cusparse_csr_spmv(a, b, c, d, e, f, g, h, i, j, k, l, m) cusparseCcsrmv((a), (b), (c), (d), (e), (cuComplex *)(f), (g), (cuComplex *)(h), (i), (j), (cuComplex *)(k), (cuComplex *)(l), (cuComplex *)(m))
140: #define cusparse_csr_spmm(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) cusparseCcsrmm((a), (b), (c), (d), (e), (f), (cuComplex *)(g), (h), (cuComplex *)(i), (j), (k), (cuComplex *)(l), (m), (cuComplex *)(n), (cuComplex *)(o), (p))
141: #define cusparse_csr2csc(a, b, c, d, e, f, g, h, i, j, k, l) cusparseCcsr2csc((a), (b), (c), (d), (cuComplex *)(e), (f), (g), (cuComplex *)(h), (i), (j), (k), (l))
142: #define cusparse_hyb_spmv(a, b, c, d, e, f, g, h) cusparseChybmv((a), (b), (cuComplex *)(c), (d), (e), (cuComplex *)(f), (cuComplex *)(g), (cuComplex *)(h))
143: #define cusparse_csr2hyb(a, b, c, d, e, f, g, h, i, j) cusparseCcsr2hyb((a), (b), (c), (d), (cuComplex *)(e), (f), (g), (h), (i), (j))
144: #define cusparse_hyb2csr(a, b, c, d, e, f) cusparseChyb2csr((a), (b), (c), (cuComplex *)(d), (e), (f))
145: #define cusparse_csr_spgemm(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t) cusparseCcsrgemm(a, b, c, d, e, f, g, h, (cuComplex *)i, j, k, l, m, (cuComplex *)n, o, p, q, (cuComplex *)r, s, t)
146: #define cusparse_csr_spgeam(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s) cusparseCcsrgeam(a, b, c, (cuComplex *)d, e, f, (cuComplex *)g, h, i, (cuComplex *)j, k, l, (cuComplex *)m, n, o, p, (cuComplex *)q, r, s)
147: #elif defined(PETSC_USE_REAL_DOUBLE)
148: #define cusparse_csr_spmv(a, b, c, d, e, f, g, h, i, j, k, l, m) cusparseZcsrmv((a), (b), (c), (d), (e), (cuDoubleComplex *)(f), (g), (cuDoubleComplex *)(h), (i), (j), (cuDoubleComplex *)(k), (cuDoubleComplex *)(l), (cuDoubleComplex *)(m))
149: #define cusparse_csr_spmm(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) \
150: cusparseZcsrmm((a), (b), (c), (d), (e), (f), (cuDoubleComplex *)(g), (h), (cuDoubleComplex *)(i), (j), (k), (cuDoubleComplex *)(l), (m), (cuDoubleComplex *)(n), (cuDoubleComplex *)(o), (p))
151: #define cusparse_csr2csc(a, b, c, d, e, f, g, h, i, j, k, l) cusparseZcsr2csc((a), (b), (c), (d), (cuDoubleComplex *)(e), (f), (g), (cuDoubleComplex *)(h), (i), (j), (k), (l))
152: #define cusparse_hyb_spmv(a, b, c, d, e, f, g, h) cusparseZhybmv((a), (b), (cuDoubleComplex *)(c), (d), (e), (cuDoubleComplex *)(f), (cuDoubleComplex *)(g), (cuDoubleComplex *)(h))
153: #define cusparse_csr2hyb(a, b, c, d, e, f, g, h, i, j) cusparseZcsr2hyb((a), (b), (c), (d), (cuDoubleComplex *)(e), (f), (g), (h), (i), (j))
154: #define cusparse_hyb2csr(a, b, c, d, e, f) cusparseZhyb2csr((a), (b), (c), (cuDoubleComplex *)(d), (e), (f))
155: #define cusparse_csr_spgemm(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t) cusparseZcsrgemm(a, b, c, d, e, f, g, h, (cuDoubleComplex *)i, j, k, l, m, (cuDoubleComplex *)n, o, p, q, (cuDoubleComplex *)r, s, t)
156: #define cusparse_csr_spgeam(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s) \
157: cusparseZcsrgeam(a, b, c, (cuDoubleComplex *)d, e, f, (cuDoubleComplex *)g, h, i, (cuDoubleComplex *)j, k, l, (cuDoubleComplex *)m, n, o, p, (cuDoubleComplex *)q, r, s)
158: #endif
159: #else
160: #if defined(PETSC_USE_REAL_SINGLE)
161: #define cusparse_csr_spmv cusparseScsrmv
162: #define cusparse_csr_spmm cusparseScsrmm
163: #define cusparse_csr2csc cusparseScsr2csc
164: #define cusparse_hyb_spmv cusparseShybmv
165: #define cusparse_csr2hyb cusparseScsr2hyb
166: #define cusparse_hyb2csr cusparseShyb2csr
167: #define cusparse_csr_spgemm cusparseScsrgemm
168: #define cusparse_csr_spgeam cusparseScsrgeam
169: #elif defined(PETSC_USE_REAL_DOUBLE)
170: #define cusparse_csr_spmv cusparseDcsrmv
171: #define cusparse_csr_spmm cusparseDcsrmm
172: #define cusparse_csr2csc cusparseDcsr2csc
173: #define cusparse_hyb_spmv cusparseDhybmv
174: #define cusparse_csr2hyb cusparseDcsr2hyb
175: #define cusparse_hyb2csr cusparseDhyb2csr
176: #define cusparse_csr_spgemm cusparseDcsrgemm
177: #define cusparse_csr_spgeam cusparseDcsrgeam
178: #endif
179: #endif
180: #endif /* PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) */
182: #define THRUSTINTARRAY32 thrust::device_vector<int>
183: #define THRUSTINTARRAY thrust::device_vector<PetscInt>
184: #define THRUSTARRAY thrust::device_vector<PetscScalar>
186: /* A CSR matrix structure */
187: struct CsrMatrix {
188: PetscInt num_rows;
189: PetscInt num_cols;
190: PetscInt num_entries;
191: THRUSTINTARRAY32 *row_offsets;
192: THRUSTINTARRAY32 *column_indices;
193: THRUSTARRAY *values;
194: };
196: /* This is struct holding the relevant data needed to a MatSolve */
197: struct Mat_SeqAIJCUSPARSETriFactorStruct {
198: /* Data needed for triangular solve */
199: cusparseMatDescr_t descr;
200: cusparseOperation_t solveOp;
201: CsrMatrix *csrMat;
202: csrsvInfo_t solveInfo;
203: cusparseSolvePolicy_t solvePolicy; /* whether level information is generated and used */
204: int solveBufferSize;
205: void *solveBuffer;
206: size_t csr2cscBufferSize; /* to transpose the triangular factor (only used for CUDA >= 11.0) */
207: void *csr2cscBuffer;
208: PetscScalar *AA_h; /* managed host buffer for moving values to the GPU */
209: };
211: /* This is a larger struct holding all the triangular factors for a solve, transpose solve, and any indices used in a reordering */
212: struct Mat_SeqAIJCUSPARSETriFactors {
213: Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorPtr; /* pointer for lower triangular (factored matrix) on GPU */
214: Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorPtr; /* pointer for upper triangular (factored matrix) on GPU */
215: Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorPtrTranspose; /* pointer for lower triangular (factored matrix) on GPU for the transpose (useful for BiCG) */
216: Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorPtrTranspose; /* pointer for upper triangular (factored matrix) on GPU for the transpose (useful for BiCG)*/
217: THRUSTINTARRAY *rpermIndices; /* indices used for any reordering */
218: THRUSTINTARRAY *cpermIndices; /* indices used for any reordering */
219: THRUSTARRAY *workVector;
220: cusparseHandle_t handle; /* a handle to the cusparse library */
221: PetscInt nnz; /* number of nonzeros ... need this for accurate logging between ICC and ILU */
222: PetscScalar *a_band_d; /* GPU data for banded CSR LU factorization matrix diag(L)=1 */
223: int *i_band_d; /* this could be optimized away */
224: cudaDeviceProp dev_prop;
225: PetscBool init_dev_prop;
227: /* csrilu0/csric0 appeared in cusparse-8.0, but we use it along with cusparseSpSV,
228: which first appeared in cusparse-11.5 with cuda-11.3.
229: */
230: PetscBool factorizeOnDevice; /* Do factorization on device or not */
231: #if CUSPARSE_VERSION >= 11500
232: PetscScalar *csrVal;
233: int *csrRowPtr, *csrColIdx; /* a,i,j of M. Using int since some cusparse APIs only support 32-bit indices */
235: /* Mixed mat descriptor types? yes, different cusparse APIs use different types */
236: cusparseMatDescr_t matDescr_M;
237: cusparseSpMatDescr_t spMatDescr_L, spMatDescr_U;
238: cusparseSpSVDescr_t spsvDescr_L, spsvDescr_Lt, spsvDescr_U, spsvDescr_Ut;
240: cusparseDnVecDescr_t dnVecDescr_X, dnVecDescr_Y;
241: PetscScalar *X, *Y; /* data array of dnVec X and Y */
243: /* Mixed size types? yes, CUDA-11.7.0 declared cusparseDcsrilu02_bufferSizeExt() that returns size_t but did not implement it! */
244: int factBufferSize_M; /* M ~= LU or LLt */
245: size_t spsvBufferSize_L, spsvBufferSize_Lt, spsvBufferSize_U, spsvBufferSize_Ut;
246: /* cusparse needs various buffers for factorization and solve of L, U, Lt, or Ut.
247: So save memory, we share the factorization buffer with one of spsvBuffer_L/U.
248: */
249: void *factBuffer_M, *spsvBuffer_L, *spsvBuffer_U, *spsvBuffer_Lt, *spsvBuffer_Ut;
251: csrilu02Info_t ilu0Info_M;
252: csric02Info_t ic0Info_M;
253: int structural_zero, numerical_zero;
254: cusparseSolvePolicy_t policy_M;
256: /* In MatSolveTranspose() for ILU0, we use the two flags to do on-demand solve */
257: PetscBool createdTransposeSpSVDescr; /* Have we created SpSV descriptors for Lt, Ut? */
258: PetscBool updatedTransposeSpSVAnalysis; /* Have we updated SpSV analysis with the latest L, U values? */
260: PetscLogDouble numericFactFlops; /* Estimated FLOPs in ILU0/ICC0 numeric factorization */
261: #endif
262: };
264: struct Mat_CusparseSpMV {
265: PetscBool initialized; /* Don't rely on spmvBuffer != NULL to test if the struct is initialized, */
266: size_t spmvBufferSize; /* since I'm not sure if smvBuffer can be NULL even after cusparseSpMV_bufferSize() */
267: void *spmvBuffer;
268: #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) /* these are present from CUDA 10.1, but PETSc code makes use of them from CUDA 11 on */
269: cusparseDnVecDescr_t vecXDescr, vecYDescr; /* descriptor for the dense vectors in y=op(A)x */
270: #endif
271: };
273: /* This is struct holding the relevant data needed to a MatMult */
274: struct Mat_SeqAIJCUSPARSEMultStruct {
275: void *mat; /* opaque pointer to a matrix. This could be either a cusparseHybMat_t or a CsrMatrix */
276: cusparseMatDescr_t descr; /* Data needed to describe the matrix for a multiply */
277: THRUSTINTARRAY *cprowIndices; /* compressed row indices used in the parallel SpMV */
278: PetscScalar *alpha_one; /* pointer to a device "scalar" storing the alpha parameter in the SpMV */
279: PetscScalar *beta_zero; /* pointer to a device "scalar" storing the beta parameter in the SpMV as zero*/
280: PetscScalar *beta_one; /* pointer to a device "scalar" storing the beta parameter in the SpMV as one */
281: #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
282: cusparseSpMatDescr_t matDescr; /* descriptor for the matrix, used by SpMV and SpMM */
283: Mat_CusparseSpMV cuSpMV[3]; /* different Mat_CusparseSpMV structs for non-transpose, transpose, conj-transpose */
284: Mat_SeqAIJCUSPARSEMultStruct() : matDescr(NULL)
285: {
286: for (int i = 0; i < 3; i++) cuSpMV[i].initialized = PETSC_FALSE;
287: }
288: #endif
289: };
291: /* This is a larger struct holding all the matrices for a SpMV, and SpMV Transpose */
292: struct Mat_SeqAIJCUSPARSE {
293: Mat_SeqAIJCUSPARSEMultStruct *mat; /* pointer to the matrix on the GPU */
294: Mat_SeqAIJCUSPARSEMultStruct *matTranspose; /* pointer to the matrix on the GPU (for the transpose ... useful for BiCG) */
295: THRUSTARRAY *workVector; /* pointer to a workvector to which we can copy the relevant indices of a vector we want to multiply */
296: THRUSTINTARRAY32 *rowoffsets_gpu; /* rowoffsets on GPU in non-compressed-row format. It is used to convert CSR to CSC */
297: PetscInt nrows; /* number of rows of the matrix seen by GPU */
298: MatCUSPARSEStorageFormat format; /* the storage format for the matrix on the device */
299: PetscBool use_cpu_solve; /* Use AIJ_Seq (I)LU solve */
300: cudaStream_t stream; /* a stream for the parallel SpMV ... this is not owned and should not be deleted */
301: cusparseHandle_t handle; /* a handle to the cusparse library ... this may not be owned (if we're working in parallel i.e. multiGPUs) */
302: PetscObjectState nonzerostate; /* track nonzero state to possibly recreate the GPU matrix */
303: #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
304: size_t csr2cscBufferSize; /* stuff used to compute the matTranspose above */
305: void *csr2cscBuffer; /* This is used as a C struct and is calloc'ed by PetscNew() */
306: cusparseCsr2CscAlg_t csr2cscAlg; /* algorithms can be selected from command line options */
307: cusparseSpMVAlg_t spmvAlg;
308: cusparseSpMMAlg_t spmmAlg;
309: #endif
310: THRUSTINTARRAY *csr2csc_i;
311: PetscSplitCSRDataStructure deviceMat; /* Matrix on device for, eg, assembly */
313: /* Stuff for basic COO support */
314: THRUSTINTARRAY *cooPerm; /* permutation array that sorts the input coo entris by row and col */
315: THRUSTINTARRAY *cooPerm_a; /* ordered array that indicate i-th nonzero (after sorting) is the j-th unique nonzero */
317: /* Stuff for extended COO support */
318: PetscBool use_extended_coo; /* Use extended COO format */
319: PetscCount *jmap_d; /* perm[disp+jmap[i]..disp+jmap[i+1]) gives indices of entries in v[] associated with i-th nonzero of the matrix */
320: PetscCount *perm_d;
322: Mat_SeqAIJCUSPARSE() : use_extended_coo(PETSC_FALSE), jmap_d(NULL), perm_d(NULL) { }
323: };
325: typedef struct Mat_SeqAIJCUSPARSETriFactors *Mat_SeqAIJCUSPARSETriFactors_p;
327: PETSC_INTERN PetscErrorCode MatSeqAIJCUSPARSECopyToGPU(Mat);
328: PETSC_INTERN PetscErrorCode MatSetPreallocationCOO_SeqAIJCUSPARSE_Basic(Mat, PetscCount, PetscInt[], PetscInt[]);
329: PETSC_INTERN PetscErrorCode MatSetValuesCOO_SeqAIJCUSPARSE_Basic(Mat, const PetscScalar[], InsertMode);
330: PETSC_INTERN PetscErrorCode MatSeqAIJCUSPARSEMergeMats(Mat, Mat, MatReuse, Mat *);
331: PETSC_INTERN PetscErrorCode MatSeqAIJCUSPARSETriFactors_Reset(Mat_SeqAIJCUSPARSETriFactors_p *);
333: using VecSeq_CUDA = Petsc::vec::cupm::impl::VecSeq_CUPM<Petsc::device::cupm::DeviceType::CUDA>;
335: static inline bool isCudaMem(const void *data)
336: {
337: using namespace Petsc::device::cupm;
338: auto mtype = PETSC_MEMTYPE_HOST;
340: PetscFunctionBegin;
341: PetscCallAbort(PETSC_COMM_SELF, impl::Interface<DeviceType::CUDA>::PetscCUPMGetMemType(data, &mtype));
342: PetscFunctionReturn(PetscMemTypeDevice(mtype));
343: }
345: #endif // PETSC_CUSPARSEMATIMPL_H