xref: /petsc/src/mat/impls/aij/seq/mkl_pardiso/mkl_pardiso.c (revision d9acb416d05abeed0a33bde3a81aeb2ea0364f6a)
1 #include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/
2 #include <../src/mat/impls/sbaij/seq/sbaij.h>
3 #include <../src/mat/impls/dense/seq/dense.h>
4 
5 #if defined(PETSC_HAVE_MKL_INTEL_ILP64)
6   #define MKL_ILP64
7 #endif
8 #include <mkl_pardiso.h>
9 
10 PETSC_EXTERN void PetscSetMKL_PARDISOThreads(int);
11 
12 /*
13  *  Possible mkl_pardiso phases that controls the execution of the solver.
14  *  For more information check mkl_pardiso manual.
15  */
16 #define JOB_ANALYSIS                                                    11
17 #define JOB_ANALYSIS_NUMERICAL_FACTORIZATION                            12
18 #define JOB_ANALYSIS_NUMERICAL_FACTORIZATION_SOLVE_ITERATIVE_REFINEMENT 13
19 #define JOB_NUMERICAL_FACTORIZATION                                     22
20 #define JOB_NUMERICAL_FACTORIZATION_SOLVE_ITERATIVE_REFINEMENT          23
21 #define JOB_SOLVE_ITERATIVE_REFINEMENT                                  33
22 #define JOB_SOLVE_FORWARD_SUBSTITUTION                                  331
23 #define JOB_SOLVE_DIAGONAL_SUBSTITUTION                                 332
24 #define JOB_SOLVE_BACKWARD_SUBSTITUTION                                 333
25 #define JOB_RELEASE_OF_LU_MEMORY                                        0
26 #define JOB_RELEASE_OF_ALL_MEMORY                                       -1
27 
28 #define IPARM_SIZE 64
29 
30 #if defined(PETSC_USE_64BIT_INDICES)
31   #if defined(PETSC_HAVE_MKL_INTEL_ILP64)
32     #define INT_TYPE         long long int
33     #define MKL_PARDISO      pardiso
34     #define MKL_PARDISO_INIT pardisoinit
35   #else
36     /* this is the case where the MKL BLAS/LAPACK are 32-bit integers but the 64-bit integer version of
37      of PARDISO code is used; hence the need for the 64 below*/
38     #define INT_TYPE         long long int
39     #define MKL_PARDISO      pardiso_64
40     #define MKL_PARDISO_INIT pardiso_64init
41 void pardiso_64init(void *pt, INT_TYPE *mtype, INT_TYPE iparm[])
42 {
43   int iparm_copy[IPARM_SIZE], mtype_copy, i;
44 
45   mtype_copy = *mtype;
46   pardisoinit(pt, &mtype_copy, iparm_copy);
47   for (i = 0; i < IPARM_SIZE; i++) iparm[i] = iparm_copy[i];
48 }
49   #endif
50 #else
51   #define INT_TYPE         int
52   #define MKL_PARDISO      pardiso
53   #define MKL_PARDISO_INIT pardisoinit
54 #endif
55 
56 /*
57    Internal data structure.
58  */
59 typedef struct {
60   /* Configuration vector*/
61   INT_TYPE iparm[IPARM_SIZE];
62 
63   /*
64      Internal MKL PARDISO memory location.
65      After the first call to MKL PARDISO do not modify pt, as that could cause a serious memory leak.
66    */
67   void *pt[IPARM_SIZE];
68 
69   /* Basic MKL PARDISO info */
70   INT_TYPE phase, maxfct, mnum, mtype, n, nrhs, msglvl, err;
71 
72   /* Matrix structure*/
73   void     *a;
74   INT_TYPE *ia, *ja;
75 
76   /* Number of non-zero elements*/
77   INT_TYPE nz;
78 
79   /* Row permutaton vector*/
80   INT_TYPE *perm;
81 
82   /* Define if matrix preserves sparse structure.*/
83   MatStructure matstruc;
84 
85   PetscBool needsym;
86   PetscBool freeaij;
87 
88   /* Schur complement */
89   PetscScalar *schur;
90   PetscInt     schur_size;
91   PetscInt    *schur_idxs;
92   PetscScalar *schur_work;
93   PetscBLASInt schur_work_size;
94   PetscBool    solve_interior;
95 
96   /* True if MKL PARDISO function have been used. */
97   PetscBool CleanUp;
98 
99   /* Conversion to a format suitable for MKL */
100   PetscErrorCode (*Convert)(Mat, PetscBool, MatReuse, PetscBool *, INT_TYPE *, INT_TYPE **, INT_TYPE **, PetscScalar **);
101 } Mat_MKL_PARDISO;
102 
103 static PetscErrorCode MatMKLPardiso_Convert_seqsbaij(Mat A, PetscBool sym, MatReuse reuse, PetscBool *free, INT_TYPE *nnz, INT_TYPE **r, INT_TYPE **c, PetscScalar **v)
104 {
105   Mat_SeqSBAIJ *aa = (Mat_SeqSBAIJ *)A->data;
106   PetscInt      bs = A->rmap->bs, i;
107 
108   PetscFunctionBegin;
109   PetscCheck(sym, PetscObjectComm((PetscObject)A), PETSC_ERR_PLIB, "This should not happen");
110   *v = aa->a;
111   if (bs == 1) { /* already in the correct format */
112     /* though PetscInt and INT_TYPE are of the same size since they are defined differently the Intel compiler requires a cast */
113     *r    = (INT_TYPE *)aa->i;
114     *c    = (INT_TYPE *)aa->j;
115     *nnz  = (INT_TYPE)aa->nz;
116     *free = PETSC_FALSE;
117   } else if (reuse == MAT_INITIAL_MATRIX) {
118     PetscInt  m = A->rmap->n, nz = aa->nz;
119     PetscInt *row, *col;
120     PetscCall(PetscMalloc2(m + 1, &row, nz, &col));
121     for (i = 0; i < m + 1; i++) row[i] = aa->i[i] + 1;
122     for (i = 0; i < nz; i++) col[i] = aa->j[i] + 1;
123     *r    = (INT_TYPE *)row;
124     *c    = (INT_TYPE *)col;
125     *nnz  = (INT_TYPE)nz;
126     *free = PETSC_TRUE;
127   }
128   PetscFunctionReturn(PETSC_SUCCESS);
129 }
130 
131 static PetscErrorCode MatMKLPardiso_Convert_seqbaij(Mat A, PetscBool sym, MatReuse reuse, PetscBool *free, INT_TYPE *nnz, INT_TYPE **r, INT_TYPE **c, PetscScalar **v)
132 {
133   Mat_SeqBAIJ *aa = (Mat_SeqBAIJ *)A->data;
134   PetscInt     bs = A->rmap->bs, i;
135 
136   PetscFunctionBegin;
137   if (!sym) {
138     *v = aa->a;
139     if (bs == 1) { /* already in the correct format */
140       /* though PetscInt and INT_TYPE are of the same size since they are defined differently the Intel compiler requires a cast */
141       *r    = (INT_TYPE *)aa->i;
142       *c    = (INT_TYPE *)aa->j;
143       *nnz  = (INT_TYPE)aa->nz;
144       *free = PETSC_FALSE;
145       PetscFunctionReturn(PETSC_SUCCESS);
146     } else if (reuse == MAT_INITIAL_MATRIX) {
147       PetscInt  m = A->rmap->n, nz = aa->nz;
148       PetscInt *row, *col;
149       PetscCall(PetscMalloc2(m + 1, &row, nz, &col));
150       for (i = 0; i < m + 1; i++) row[i] = aa->i[i] + 1;
151       for (i = 0; i < nz; i++) col[i] = aa->j[i] + 1;
152       *r   = (INT_TYPE *)row;
153       *c   = (INT_TYPE *)col;
154       *nnz = (INT_TYPE)nz;
155     }
156     *free = PETSC_TRUE;
157   } else {
158     SETERRQ(PetscObjectComm((PetscObject)A), PETSC_ERR_PLIB, "This should not happen");
159   }
160   PetscFunctionReturn(PETSC_SUCCESS);
161 }
162 
163 static PetscErrorCode MatMKLPardiso_Convert_seqaij(Mat A, PetscBool sym, MatReuse reuse, PetscBool *free, INT_TYPE *nnz, INT_TYPE **r, INT_TYPE **c, PetscScalar **v)
164 {
165   Mat_SeqAIJ  *aa = (Mat_SeqAIJ *)A->data;
166   PetscScalar *aav;
167 
168   PetscFunctionBegin;
169   PetscCall(MatSeqAIJGetArrayRead(A, (const PetscScalar **)&aav));
170   if (!sym) { /* already in the correct format */
171     *v    = aav;
172     *r    = (INT_TYPE *)aa->i;
173     *c    = (INT_TYPE *)aa->j;
174     *nnz  = (INT_TYPE)aa->nz;
175     *free = PETSC_FALSE;
176   } else if (reuse == MAT_INITIAL_MATRIX) { /* need to get the triangular part */
177     PetscScalar *vals, *vv;
178     PetscInt    *row, *col, *jj;
179     PetscInt     m = A->rmap->n, nz, i;
180 
181     nz = 0;
182     for (i = 0; i < m; i++) nz += aa->i[i + 1] - aa->diag[i];
183     PetscCall(PetscMalloc2(m + 1, &row, nz, &col));
184     PetscCall(PetscMalloc1(nz, &vals));
185     jj = col;
186     vv = vals;
187 
188     row[0] = 0;
189     for (i = 0; i < m; i++) {
190       PetscInt    *aj = aa->j + aa->diag[i];
191       PetscScalar *av = aav + aa->diag[i];
192       PetscInt     rl = aa->i[i + 1] - aa->diag[i], j;
193 
194       for (j = 0; j < rl; j++) {
195         *jj = *aj;
196         jj++;
197         aj++;
198         *vv = *av;
199         vv++;
200         av++;
201       }
202       row[i + 1] = row[i] + rl;
203     }
204     *v    = vals;
205     *r    = (INT_TYPE *)row;
206     *c    = (INT_TYPE *)col;
207     *nnz  = (INT_TYPE)nz;
208     *free = PETSC_TRUE;
209   } else {
210     PetscScalar *vv;
211     PetscInt     m = A->rmap->n, i;
212 
213     vv = *v;
214     for (i = 0; i < m; i++) {
215       PetscScalar *av = aav + aa->diag[i];
216       PetscInt     rl = aa->i[i + 1] - aa->diag[i], j;
217       for (j = 0; j < rl; j++) {
218         *vv = *av;
219         vv++;
220         av++;
221       }
222     }
223     *free = PETSC_TRUE;
224   }
225   PetscCall(MatSeqAIJRestoreArrayRead(A, (const PetscScalar **)&aav));
226   PetscFunctionReturn(PETSC_SUCCESS);
227 }
228 
229 static PetscErrorCode MatMKLPardisoSolveSchur_Private(Mat F, PetscScalar *B, PetscScalar *X)
230 {
231   Mat_MKL_PARDISO     *mpardiso = (Mat_MKL_PARDISO *)F->data;
232   Mat                  S, Xmat, Bmat;
233   MatFactorSchurStatus schurstatus;
234 
235   PetscFunctionBegin;
236   PetscCall(MatFactorGetSchurComplement(F, &S, &schurstatus));
237   PetscCheck(X != B || schurstatus != MAT_FACTOR_SCHUR_INVERTED, PETSC_COMM_SELF, PETSC_ERR_SUP, "X and B cannot point to the same address");
238   PetscCall(MatCreateSeqDense(PETSC_COMM_SELF, mpardiso->schur_size, mpardiso->nrhs, B, &Bmat));
239   PetscCall(MatCreateSeqDense(PETSC_COMM_SELF, mpardiso->schur_size, mpardiso->nrhs, X, &Xmat));
240   PetscCall(MatSetType(Bmat, ((PetscObject)S)->type_name));
241   PetscCall(MatSetType(Xmat, ((PetscObject)S)->type_name));
242 #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
243   PetscCall(MatBindToCPU(Xmat, S->boundtocpu));
244   PetscCall(MatBindToCPU(Bmat, S->boundtocpu));
245 #endif
246 
247 #if defined(PETSC_USE_COMPLEX)
248   PetscCheck(mpardiso->iparm[12 - 1] != 1, PetscObjectComm((PetscObject)F), PETSC_ERR_SUP, "Hermitian solve not implemented yet");
249 #endif
250 
251   switch (schurstatus) {
252   case MAT_FACTOR_SCHUR_FACTORED:
253     if (!mpardiso->iparm[12 - 1]) {
254       PetscCall(MatMatSolve(S, Bmat, Xmat));
255     } else { /* transpose solve */
256       PetscCall(MatMatSolveTranspose(S, Bmat, Xmat));
257     }
258     break;
259   case MAT_FACTOR_SCHUR_INVERTED:
260     PetscCall(MatProductCreateWithMat(S, Bmat, NULL, Xmat));
261     if (!mpardiso->iparm[12 - 1]) {
262       PetscCall(MatProductSetType(Xmat, MATPRODUCT_AB));
263     } else { /* transpose solve */
264       PetscCall(MatProductSetType(Xmat, MATPRODUCT_AtB));
265     }
266     PetscCall(MatProductSetFromOptions(Xmat));
267     PetscCall(MatProductSymbolic(Xmat));
268     PetscCall(MatProductNumeric(Xmat));
269     PetscCall(MatProductClear(Xmat));
270     break;
271   default:
272     SETERRQ(PetscObjectComm((PetscObject)F), PETSC_ERR_SUP, "Unhandled MatFactorSchurStatus %" PetscInt_FMT, F->schur_status);
273     break;
274   }
275   PetscCall(MatFactorRestoreSchurComplement(F, &S, schurstatus));
276   PetscCall(MatDestroy(&Bmat));
277   PetscCall(MatDestroy(&Xmat));
278   PetscFunctionReturn(PETSC_SUCCESS);
279 }
280 
281 static PetscErrorCode MatFactorSetSchurIS_MKL_PARDISO(Mat F, IS is)
282 {
283   Mat_MKL_PARDISO   *mpardiso = (Mat_MKL_PARDISO *)F->data;
284   const PetscScalar *arr;
285   const PetscInt    *idxs;
286   PetscInt           size, i;
287   PetscMPIInt        csize;
288   PetscBool          sorted;
289 
290   PetscFunctionBegin;
291   PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)F), &csize));
292   PetscCheck(csize <= 1, PETSC_COMM_SELF, PETSC_ERR_SUP, "MKL PARDISO parallel Schur complements not yet supported from PETSc");
293   PetscCall(ISSorted(is, &sorted));
294   PetscCheck(sorted, PETSC_COMM_SELF, PETSC_ERR_SUP, "IS for MKL PARDISO Schur complements needs to be sorted");
295   PetscCall(ISGetLocalSize(is, &size));
296   PetscCall(PetscFree(mpardiso->schur_work));
297   PetscCall(PetscBLASIntCast(PetscMax(mpardiso->n, 2 * size), &mpardiso->schur_work_size));
298   PetscCall(PetscMalloc1(mpardiso->schur_work_size, &mpardiso->schur_work));
299   PetscCall(MatDestroy(&F->schur));
300   PetscCall(MatCreateSeqDense(PETSC_COMM_SELF, size, size, NULL, &F->schur));
301   PetscCall(MatDenseGetArrayRead(F->schur, &arr));
302   mpardiso->schur      = (PetscScalar *)arr;
303   mpardiso->schur_size = size;
304   PetscCall(MatDenseRestoreArrayRead(F->schur, &arr));
305   if (mpardiso->mtype == 2) PetscCall(MatSetOption(F->schur, MAT_SPD, PETSC_TRUE));
306 
307   PetscCall(PetscFree(mpardiso->schur_idxs));
308   PetscCall(PetscMalloc1(size, &mpardiso->schur_idxs));
309   PetscCall(PetscArrayzero(mpardiso->perm, mpardiso->n));
310   PetscCall(ISGetIndices(is, &idxs));
311   PetscCall(PetscArraycpy(mpardiso->schur_idxs, idxs, size));
312   for (i = 0; i < size; i++) mpardiso->perm[idxs[i]] = 1;
313   PetscCall(ISRestoreIndices(is, &idxs));
314   if (size) { /* turn on Schur switch if the set of indices is not empty */
315     mpardiso->iparm[36 - 1] = 2;
316   }
317   PetscFunctionReturn(PETSC_SUCCESS);
318 }
319 
320 static PetscErrorCode MatDestroy_MKL_PARDISO(Mat A)
321 {
322   Mat_MKL_PARDISO *mat_mkl_pardiso = (Mat_MKL_PARDISO *)A->data;
323 
324   PetscFunctionBegin;
325   if (mat_mkl_pardiso->CleanUp) {
326     mat_mkl_pardiso->phase = JOB_RELEASE_OF_ALL_MEMORY;
327 
328     MKL_PARDISO(mat_mkl_pardiso->pt, &mat_mkl_pardiso->maxfct, &mat_mkl_pardiso->mnum, &mat_mkl_pardiso->mtype, &mat_mkl_pardiso->phase, &mat_mkl_pardiso->n, NULL, NULL, NULL, NULL, &mat_mkl_pardiso->nrhs, mat_mkl_pardiso->iparm, &mat_mkl_pardiso->msglvl, NULL, NULL,
329                 &mat_mkl_pardiso->err);
330   }
331   PetscCall(PetscFree(mat_mkl_pardiso->perm));
332   PetscCall(PetscFree(mat_mkl_pardiso->schur_work));
333   PetscCall(PetscFree(mat_mkl_pardiso->schur_idxs));
334   if (mat_mkl_pardiso->freeaij) {
335     PetscCall(PetscFree2(mat_mkl_pardiso->ia, mat_mkl_pardiso->ja));
336     if (mat_mkl_pardiso->iparm[34] == 1) PetscCall(PetscFree(mat_mkl_pardiso->a));
337   }
338   PetscCall(PetscFree(A->data));
339 
340   /* clear composed functions */
341   PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatFactorGetSolverType_C", NULL));
342   PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatFactorSetSchurIS_C", NULL));
343   PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatMkl_PardisoSetCntl_C", NULL));
344   PetscFunctionReturn(PETSC_SUCCESS);
345 }
346 
347 static PetscErrorCode MatMKLPardisoScatterSchur_Private(Mat_MKL_PARDISO *mpardiso, PetscScalar *whole, PetscScalar *schur, PetscBool reduce)
348 {
349   PetscFunctionBegin;
350   if (reduce) { /* data given for the whole matrix */
351     PetscInt i, m = 0, p = 0;
352     for (i = 0; i < mpardiso->nrhs; i++) {
353       PetscInt j;
354       for (j = 0; j < mpardiso->schur_size; j++) schur[p + j] = whole[m + mpardiso->schur_idxs[j]];
355       m += mpardiso->n;
356       p += mpardiso->schur_size;
357     }
358   } else { /* from Schur to whole */
359     PetscInt i, m = 0, p = 0;
360     for (i = 0; i < mpardiso->nrhs; i++) {
361       PetscInt j;
362       for (j = 0; j < mpardiso->schur_size; j++) whole[m + mpardiso->schur_idxs[j]] = schur[p + j];
363       m += mpardiso->n;
364       p += mpardiso->schur_size;
365     }
366   }
367   PetscFunctionReturn(PETSC_SUCCESS);
368 }
369 
370 static PetscErrorCode MatSolve_MKL_PARDISO(Mat A, Vec b, Vec x)
371 {
372   Mat_MKL_PARDISO   *mat_mkl_pardiso = (Mat_MKL_PARDISO *)A->data;
373   PetscScalar       *xarray;
374   const PetscScalar *barray;
375 
376   PetscFunctionBegin;
377   mat_mkl_pardiso->nrhs = 1;
378   PetscCall(VecGetArrayWrite(x, &xarray));
379   PetscCall(VecGetArrayRead(b, &barray));
380 
381   if (!mat_mkl_pardiso->schur) mat_mkl_pardiso->phase = JOB_SOLVE_ITERATIVE_REFINEMENT;
382   else mat_mkl_pardiso->phase = JOB_SOLVE_FORWARD_SUBSTITUTION;
383 
384   if (barray == xarray) { /* if the two vectors share the same memory */
385     PetscScalar *work;
386     if (!mat_mkl_pardiso->schur_work) {
387       PetscCall(PetscMalloc1(mat_mkl_pardiso->n, &work));
388     } else {
389       work = mat_mkl_pardiso->schur_work;
390     }
391     mat_mkl_pardiso->iparm[6 - 1] = 1;
392     MKL_PARDISO(mat_mkl_pardiso->pt, &mat_mkl_pardiso->maxfct, &mat_mkl_pardiso->mnum, &mat_mkl_pardiso->mtype, &mat_mkl_pardiso->phase, &mat_mkl_pardiso->n, mat_mkl_pardiso->a, mat_mkl_pardiso->ia, mat_mkl_pardiso->ja, NULL, &mat_mkl_pardiso->nrhs,
393                 mat_mkl_pardiso->iparm, &mat_mkl_pardiso->msglvl, (void *)xarray, (void *)work, &mat_mkl_pardiso->err);
394     if (!mat_mkl_pardiso->schur_work) PetscCall(PetscFree(work));
395   } else {
396     mat_mkl_pardiso->iparm[6 - 1] = 0;
397     MKL_PARDISO(mat_mkl_pardiso->pt, &mat_mkl_pardiso->maxfct, &mat_mkl_pardiso->mnum, &mat_mkl_pardiso->mtype, &mat_mkl_pardiso->phase, &mat_mkl_pardiso->n, mat_mkl_pardiso->a, mat_mkl_pardiso->ia, mat_mkl_pardiso->ja, mat_mkl_pardiso->perm,
398                 &mat_mkl_pardiso->nrhs, mat_mkl_pardiso->iparm, &mat_mkl_pardiso->msglvl, (void *)barray, (void *)xarray, &mat_mkl_pardiso->err);
399   }
400   PetscCall(VecRestoreArrayRead(b, &barray));
401 
402   PetscCheck(mat_mkl_pardiso->err >= 0, PETSC_COMM_SELF, PETSC_ERR_LIB, "Error reported by MKL PARDISO: err=%d. Please check manual", mat_mkl_pardiso->err);
403 
404   if (mat_mkl_pardiso->schur) { /* solve Schur complement and expand solution */
405     if (!mat_mkl_pardiso->solve_interior) {
406       PetscInt shift = mat_mkl_pardiso->schur_size;
407 
408       PetscCall(MatFactorFactorizeSchurComplement(A));
409       /* if inverted, uses BLAS *MM subroutines, otherwise LAPACK *TRS */
410       if (A->schur_status != MAT_FACTOR_SCHUR_INVERTED) shift = 0;
411 
412       /* solve Schur complement */
413       PetscCall(MatMKLPardisoScatterSchur_Private(mat_mkl_pardiso, xarray, mat_mkl_pardiso->schur_work, PETSC_TRUE));
414       PetscCall(MatMKLPardisoSolveSchur_Private(A, mat_mkl_pardiso->schur_work, mat_mkl_pardiso->schur_work + shift));
415       PetscCall(MatMKLPardisoScatterSchur_Private(mat_mkl_pardiso, xarray, mat_mkl_pardiso->schur_work + shift, PETSC_FALSE));
416     } else { /* if we are solving for the interior problem, any value in barray[schur] forward-substituted to xarray[schur] will be neglected */
417       PetscInt i;
418       for (i = 0; i < mat_mkl_pardiso->schur_size; i++) xarray[mat_mkl_pardiso->schur_idxs[i]] = 0.;
419     }
420 
421     /* expansion phase */
422     mat_mkl_pardiso->iparm[6 - 1] = 1;
423     mat_mkl_pardiso->phase        = JOB_SOLVE_BACKWARD_SUBSTITUTION;
424     MKL_PARDISO(mat_mkl_pardiso->pt, &mat_mkl_pardiso->maxfct, &mat_mkl_pardiso->mnum, &mat_mkl_pardiso->mtype, &mat_mkl_pardiso->phase, &mat_mkl_pardiso->n, mat_mkl_pardiso->a, mat_mkl_pardiso->ia, mat_mkl_pardiso->ja, mat_mkl_pardiso->perm,
425                 &mat_mkl_pardiso->nrhs, mat_mkl_pardiso->iparm, &mat_mkl_pardiso->msglvl, (void *)xarray, (void *)mat_mkl_pardiso->schur_work, /* according to the specs, the solution vector is always used */
426                 &mat_mkl_pardiso->err);
427 
428     PetscCheck(mat_mkl_pardiso->err >= 0, PETSC_COMM_SELF, PETSC_ERR_LIB, "Error reported by MKL PARDISO: err=%d. Please check manual", mat_mkl_pardiso->err);
429     mat_mkl_pardiso->iparm[6 - 1] = 0;
430   }
431   PetscCall(VecRestoreArrayWrite(x, &xarray));
432   mat_mkl_pardiso->CleanUp = PETSC_TRUE;
433   PetscFunctionReturn(PETSC_SUCCESS);
434 }
435 
436 static PetscErrorCode MatSolveTranspose_MKL_PARDISO(Mat A, Vec b, Vec x)
437 {
438   Mat_MKL_PARDISO *mat_mkl_pardiso = (Mat_MKL_PARDISO *)A->data;
439   PetscInt         oiparm12;
440 
441   PetscFunctionBegin;
442   oiparm12                       = mat_mkl_pardiso->iparm[12 - 1];
443   mat_mkl_pardiso->iparm[12 - 1] = 2;
444   PetscCall(MatSolve_MKL_PARDISO(A, b, x));
445   mat_mkl_pardiso->iparm[12 - 1] = oiparm12;
446   PetscFunctionReturn(PETSC_SUCCESS);
447 }
448 
449 static PetscErrorCode MatMatSolve_MKL_PARDISO(Mat A, Mat B, Mat X)
450 {
451   Mat_MKL_PARDISO   *mat_mkl_pardiso = (Mat_MKL_PARDISO *)(A)->data;
452   const PetscScalar *barray;
453   PetscScalar       *xarray;
454   PetscBool          flg;
455 
456   PetscFunctionBegin;
457   PetscCall(PetscObjectBaseTypeCompare((PetscObject)B, MATSEQDENSE, &flg));
458   PetscCheck(flg, PetscObjectComm((PetscObject)A), PETSC_ERR_ARG_WRONG, "Matrix B must be MATSEQDENSE matrix");
459   if (X != B) {
460     PetscCall(PetscObjectBaseTypeCompare((PetscObject)X, MATSEQDENSE, &flg));
461     PetscCheck(flg, PetscObjectComm((PetscObject)A), PETSC_ERR_ARG_WRONG, "Matrix X must be MATSEQDENSE matrix");
462   }
463 
464   PetscCall(MatGetSize(B, NULL, (PetscInt *)&mat_mkl_pardiso->nrhs));
465 
466   if (mat_mkl_pardiso->nrhs > 0) {
467     PetscCall(MatDenseGetArrayRead(B, &barray));
468     PetscCall(MatDenseGetArrayWrite(X, &xarray));
469 
470     PetscCheck(barray != xarray, PETSC_COMM_SELF, PETSC_ERR_SUP, "B and X cannot share the same memory location");
471     if (!mat_mkl_pardiso->schur) mat_mkl_pardiso->phase = JOB_SOLVE_ITERATIVE_REFINEMENT;
472     else mat_mkl_pardiso->phase = JOB_SOLVE_FORWARD_SUBSTITUTION;
473 
474     MKL_PARDISO(mat_mkl_pardiso->pt, &mat_mkl_pardiso->maxfct, &mat_mkl_pardiso->mnum, &mat_mkl_pardiso->mtype, &mat_mkl_pardiso->phase, &mat_mkl_pardiso->n, mat_mkl_pardiso->a, mat_mkl_pardiso->ia, mat_mkl_pardiso->ja, mat_mkl_pardiso->perm,
475                 &mat_mkl_pardiso->nrhs, mat_mkl_pardiso->iparm, &mat_mkl_pardiso->msglvl, (void *)barray, (void *)xarray, &mat_mkl_pardiso->err);
476     PetscCheck(mat_mkl_pardiso->err >= 0, PETSC_COMM_SELF, PETSC_ERR_LIB, "Error reported by MKL PARDISO: err=%d. Please check manual", mat_mkl_pardiso->err);
477 
478     PetscCall(MatDenseRestoreArrayRead(B, &barray));
479     if (mat_mkl_pardiso->schur) { /* solve Schur complement and expand solution */
480       PetscScalar *o_schur_work = NULL;
481 
482       /* solve Schur complement */
483       if (!mat_mkl_pardiso->solve_interior) {
484         PetscInt shift = mat_mkl_pardiso->schur_size * mat_mkl_pardiso->nrhs, scale;
485         PetscInt mem   = mat_mkl_pardiso->n * mat_mkl_pardiso->nrhs;
486 
487         PetscCall(MatFactorFactorizeSchurComplement(A));
488         /* allocate extra memory if it is needed */
489         scale = 1;
490         if (A->schur_status == MAT_FACTOR_SCHUR_INVERTED) scale = 2;
491         mem *= scale;
492         if (mem > mat_mkl_pardiso->schur_work_size) {
493           o_schur_work = mat_mkl_pardiso->schur_work;
494           PetscCall(PetscMalloc1(mem, &mat_mkl_pardiso->schur_work));
495         }
496         /* if inverted, uses BLAS *MM subroutines, otherwise LAPACK *TRS */
497         if (A->schur_status != MAT_FACTOR_SCHUR_INVERTED) shift = 0;
498         PetscCall(MatMKLPardisoScatterSchur_Private(mat_mkl_pardiso, xarray, mat_mkl_pardiso->schur_work, PETSC_TRUE));
499         PetscCall(MatMKLPardisoSolveSchur_Private(A, mat_mkl_pardiso->schur_work, mat_mkl_pardiso->schur_work + shift));
500         PetscCall(MatMKLPardisoScatterSchur_Private(mat_mkl_pardiso, xarray, mat_mkl_pardiso->schur_work + shift, PETSC_FALSE));
501       } else { /* if we are solving for the interior problem, any value in barray[schur,n] forward-substituted to xarray[schur,n] will be neglected */
502         PetscInt i, n, m = 0;
503         for (n = 0; n < mat_mkl_pardiso->nrhs; n++) {
504           for (i = 0; i < mat_mkl_pardiso->schur_size; i++) xarray[mat_mkl_pardiso->schur_idxs[i] + m] = 0.;
505           m += mat_mkl_pardiso->n;
506         }
507       }
508 
509       /* expansion phase */
510       mat_mkl_pardiso->iparm[6 - 1] = 1;
511       mat_mkl_pardiso->phase        = JOB_SOLVE_BACKWARD_SUBSTITUTION;
512       MKL_PARDISO(mat_mkl_pardiso->pt, &mat_mkl_pardiso->maxfct, &mat_mkl_pardiso->mnum, &mat_mkl_pardiso->mtype, &mat_mkl_pardiso->phase, &mat_mkl_pardiso->n, mat_mkl_pardiso->a, mat_mkl_pardiso->ia, mat_mkl_pardiso->ja, mat_mkl_pardiso->perm,
513                   &mat_mkl_pardiso->nrhs, mat_mkl_pardiso->iparm, &mat_mkl_pardiso->msglvl, (void *)xarray, (void *)mat_mkl_pardiso->schur_work, /* according to the specs, the solution vector is always used */
514                   &mat_mkl_pardiso->err);
515       if (o_schur_work) { /* restore original Schur_work (minimal size) */
516         PetscCall(PetscFree(mat_mkl_pardiso->schur_work));
517         mat_mkl_pardiso->schur_work = o_schur_work;
518       }
519       PetscCheck(mat_mkl_pardiso->err >= 0, PETSC_COMM_SELF, PETSC_ERR_LIB, "Error reported by MKL PARDISO: err=%d. Please check manual", mat_mkl_pardiso->err);
520       mat_mkl_pardiso->iparm[6 - 1] = 0;
521     }
522     PetscCall(MatDenseRestoreArrayWrite(X, &xarray));
523   }
524   mat_mkl_pardiso->CleanUp = PETSC_TRUE;
525   PetscFunctionReturn(PETSC_SUCCESS);
526 }
527 
528 static PetscErrorCode MatFactorNumeric_MKL_PARDISO(Mat F, Mat A, const MatFactorInfo *info)
529 {
530   Mat_MKL_PARDISO *mat_mkl_pardiso = (Mat_MKL_PARDISO *)(F)->data;
531 
532   PetscFunctionBegin;
533   mat_mkl_pardiso->matstruc = SAME_NONZERO_PATTERN;
534   PetscCall((*mat_mkl_pardiso->Convert)(A, mat_mkl_pardiso->needsym, MAT_REUSE_MATRIX, &mat_mkl_pardiso->freeaij, &mat_mkl_pardiso->nz, &mat_mkl_pardiso->ia, &mat_mkl_pardiso->ja, (PetscScalar **)&mat_mkl_pardiso->a));
535 
536   mat_mkl_pardiso->phase = JOB_NUMERICAL_FACTORIZATION;
537   MKL_PARDISO(mat_mkl_pardiso->pt, &mat_mkl_pardiso->maxfct, &mat_mkl_pardiso->mnum, &mat_mkl_pardiso->mtype, &mat_mkl_pardiso->phase, &mat_mkl_pardiso->n, mat_mkl_pardiso->a, mat_mkl_pardiso->ia, mat_mkl_pardiso->ja, mat_mkl_pardiso->perm,
538               &mat_mkl_pardiso->nrhs, mat_mkl_pardiso->iparm, &mat_mkl_pardiso->msglvl, NULL, (void *)mat_mkl_pardiso->schur, &mat_mkl_pardiso->err);
539   PetscCheck(mat_mkl_pardiso->err >= 0, PETSC_COMM_SELF, PETSC_ERR_LIB, "Error reported by MKL PARDISO: err=%d. Please check manual", mat_mkl_pardiso->err);
540 
541   /* report flops */
542   if (mat_mkl_pardiso->iparm[18] > 0) PetscCall(PetscLogFlops(PetscPowRealInt(10., 6) * mat_mkl_pardiso->iparm[18]));
543 
544   if (F->schur) { /* schur output from pardiso is in row major format */
545 #if defined(PETSC_HAVE_CUDA)
546     F->schur->offloadmask = PETSC_OFFLOAD_CPU;
547 #endif
548     PetscCall(MatFactorRestoreSchurComplement(F, NULL, MAT_FACTOR_SCHUR_UNFACTORED));
549     PetscCall(MatTranspose(F->schur, MAT_INPLACE_MATRIX, &F->schur));
550   }
551   mat_mkl_pardiso->matstruc = SAME_NONZERO_PATTERN;
552   mat_mkl_pardiso->CleanUp  = PETSC_TRUE;
553   PetscFunctionReturn(PETSC_SUCCESS);
554 }
555 
556 static PetscErrorCode MatSetFromOptions_MKL_PARDISO(Mat F, Mat A)
557 {
558   Mat_MKL_PARDISO *mat_mkl_pardiso = (Mat_MKL_PARDISO *)F->data;
559   PetscInt         icntl, bs, threads = 1;
560   PetscBool        flg;
561 
562   PetscFunctionBegin;
563   PetscOptionsBegin(PetscObjectComm((PetscObject)F), ((PetscObject)F)->prefix, "MKL_PARDISO Options", "Mat");
564 
565   PetscCall(PetscOptionsInt("-mat_mkl_pardiso_65", "Suggested number of threads to use within MKL PARDISO", "None", threads, &threads, &flg));
566   if (flg) PetscSetMKL_PARDISOThreads((int)threads);
567 
568   PetscCall(PetscOptionsInt("-mat_mkl_pardiso_66", "Maximum number of factors with identical sparsity structure that must be kept in memory at the same time", "None", mat_mkl_pardiso->maxfct, &icntl, &flg));
569   if (flg) mat_mkl_pardiso->maxfct = icntl;
570 
571   PetscCall(PetscOptionsInt("-mat_mkl_pardiso_67", "Indicates the actual matrix for the solution phase", "None", mat_mkl_pardiso->mnum, &icntl, &flg));
572   if (flg) mat_mkl_pardiso->mnum = icntl;
573 
574   PetscCall(PetscOptionsInt("-mat_mkl_pardiso_68", "Message level information", "None", mat_mkl_pardiso->msglvl, &icntl, &flg));
575   if (flg) mat_mkl_pardiso->msglvl = icntl;
576 
577   PetscCall(PetscOptionsInt("-mat_mkl_pardiso_69", "Defines the matrix type", "None", mat_mkl_pardiso->mtype, &icntl, &flg));
578   if (flg) {
579     void *pt[IPARM_SIZE];
580     mat_mkl_pardiso->mtype = icntl;
581     icntl                  = mat_mkl_pardiso->iparm[34];
582     bs                     = mat_mkl_pardiso->iparm[36];
583     MKL_PARDISO_INIT(pt, &mat_mkl_pardiso->mtype, mat_mkl_pardiso->iparm);
584 #if defined(PETSC_USE_REAL_SINGLE)
585     mat_mkl_pardiso->iparm[27] = 1;
586 #else
587     mat_mkl_pardiso->iparm[27] = 0;
588 #endif
589     mat_mkl_pardiso->iparm[34] = icntl;
590     mat_mkl_pardiso->iparm[36] = bs;
591   }
592 
593   PetscCall(PetscOptionsInt("-mat_mkl_pardiso_1", "Use default values (if 0)", "None", mat_mkl_pardiso->iparm[0], &icntl, &flg));
594   if (flg) mat_mkl_pardiso->iparm[0] = icntl;
595 
596   PetscCall(PetscOptionsInt("-mat_mkl_pardiso_2", "Fill-in reducing ordering for the input matrix", "None", mat_mkl_pardiso->iparm[1], &icntl, &flg));
597   if (flg) mat_mkl_pardiso->iparm[1] = icntl;
598 
599   PetscCall(PetscOptionsInt("-mat_mkl_pardiso_4", "Preconditioned CGS/CG", "None", mat_mkl_pardiso->iparm[3], &icntl, &flg));
600   if (flg) mat_mkl_pardiso->iparm[3] = icntl;
601 
602   PetscCall(PetscOptionsInt("-mat_mkl_pardiso_5", "User permutation", "None", mat_mkl_pardiso->iparm[4], &icntl, &flg));
603   if (flg) mat_mkl_pardiso->iparm[4] = icntl;
604 
605   PetscCall(PetscOptionsInt("-mat_mkl_pardiso_6", "Write solution on x", "None", mat_mkl_pardiso->iparm[5], &icntl, &flg));
606   if (flg) mat_mkl_pardiso->iparm[5] = icntl;
607 
608   PetscCall(PetscOptionsInt("-mat_mkl_pardiso_8", "Iterative refinement step", "None", mat_mkl_pardiso->iparm[7], &icntl, &flg));
609   if (flg) mat_mkl_pardiso->iparm[7] = icntl;
610 
611   PetscCall(PetscOptionsInt("-mat_mkl_pardiso_10", "Pivoting perturbation", "None", mat_mkl_pardiso->iparm[9], &icntl, &flg));
612   if (flg) mat_mkl_pardiso->iparm[9] = icntl;
613 
614   PetscCall(PetscOptionsInt("-mat_mkl_pardiso_11", "Scaling vectors", "None", mat_mkl_pardiso->iparm[10], &icntl, &flg));
615   if (flg) mat_mkl_pardiso->iparm[10] = icntl;
616 
617   PetscCall(PetscOptionsInt("-mat_mkl_pardiso_12", "Solve with transposed or conjugate transposed matrix A", "None", mat_mkl_pardiso->iparm[11], &icntl, &flg));
618   if (flg) mat_mkl_pardiso->iparm[11] = icntl;
619 
620   PetscCall(PetscOptionsInt("-mat_mkl_pardiso_13", "Improved accuracy using (non-) symmetric weighted matching", "None", mat_mkl_pardiso->iparm[12], &icntl, &flg));
621   if (flg) mat_mkl_pardiso->iparm[12] = icntl;
622 
623   PetscCall(PetscOptionsInt("-mat_mkl_pardiso_18", "Numbers of non-zero elements", "None", mat_mkl_pardiso->iparm[17], &icntl, &flg));
624   if (flg) mat_mkl_pardiso->iparm[17] = icntl;
625 
626   PetscCall(PetscOptionsInt("-mat_mkl_pardiso_19", "Report number of floating point operations (0 to disable)", "None", mat_mkl_pardiso->iparm[18], &icntl, &flg));
627   if (flg) mat_mkl_pardiso->iparm[18] = icntl;
628 
629   PetscCall(PetscOptionsInt("-mat_mkl_pardiso_21", "Pivoting for symmetric indefinite matrices", "None", mat_mkl_pardiso->iparm[20], &icntl, &flg));
630   if (flg) mat_mkl_pardiso->iparm[20] = icntl;
631 
632   PetscCall(PetscOptionsInt("-mat_mkl_pardiso_24", "Parallel factorization control", "None", mat_mkl_pardiso->iparm[23], &icntl, &flg));
633   if (flg) mat_mkl_pardiso->iparm[23] = icntl;
634 
635   PetscCall(PetscOptionsInt("-mat_mkl_pardiso_25", "Parallel forward/backward solve control", "None", mat_mkl_pardiso->iparm[24], &icntl, &flg));
636   if (flg) mat_mkl_pardiso->iparm[24] = icntl;
637 
638   PetscCall(PetscOptionsInt("-mat_mkl_pardiso_27", "Matrix checker", "None", mat_mkl_pardiso->iparm[26], &icntl, &flg));
639   if (flg) mat_mkl_pardiso->iparm[26] = icntl;
640 
641   PetscCall(PetscOptionsInt("-mat_mkl_pardiso_31", "Partial solve and computing selected components of the solution vectors", "None", mat_mkl_pardiso->iparm[30], &icntl, &flg));
642   if (flg) mat_mkl_pardiso->iparm[30] = icntl;
643 
644   PetscCall(PetscOptionsInt("-mat_mkl_pardiso_34", "Optimal number of threads for conditional numerical reproducibility (CNR) mode", "None", mat_mkl_pardiso->iparm[33], &icntl, &flg));
645   if (flg) mat_mkl_pardiso->iparm[33] = icntl;
646 
647   PetscCall(PetscOptionsInt("-mat_mkl_pardiso_60", "Intel MKL PARDISO mode", "None", mat_mkl_pardiso->iparm[59], &icntl, &flg));
648   if (flg) mat_mkl_pardiso->iparm[59] = icntl;
649   PetscOptionsEnd();
650   PetscFunctionReturn(PETSC_SUCCESS);
651 }
652 
653 static PetscErrorCode MatFactorMKL_PARDISOInitialize_Private(Mat A, MatFactorType ftype, Mat_MKL_PARDISO *mat_mkl_pardiso)
654 {
655   PetscInt  i, bs;
656   PetscBool match;
657 
658   PetscFunctionBegin;
659   for (i = 0; i < IPARM_SIZE; i++) mat_mkl_pardiso->iparm[i] = 0;
660   for (i = 0; i < IPARM_SIZE; i++) mat_mkl_pardiso->pt[i] = 0;
661 #if defined(PETSC_USE_REAL_SINGLE)
662   mat_mkl_pardiso->iparm[27] = 1;
663 #else
664   mat_mkl_pardiso->iparm[27] = 0;
665 #endif
666   /* Default options for both sym and unsym */
667   mat_mkl_pardiso->iparm[0]  = 1;  /* Solver default parameters overridden with provided by iparm */
668   mat_mkl_pardiso->iparm[1]  = 2;  /* Metis reordering */
669   mat_mkl_pardiso->iparm[5]  = 0;  /* Write solution into x */
670   mat_mkl_pardiso->iparm[7]  = 0;  /* Max number of iterative refinement steps */
671   mat_mkl_pardiso->iparm[17] = -1; /* Output: Number of nonzeros in the factor LU */
672   mat_mkl_pardiso->iparm[18] = -1; /* Output: Mflops for LU factorization */
673 #if 0
674   mat_mkl_pardiso->iparm[23] =  1; /* Parallel factorization control*/
675 #endif
676   PetscCall(PetscObjectTypeCompareAny((PetscObject)A, &match, MATSEQBAIJ, MATSEQSBAIJ, ""));
677   PetscCall(MatGetBlockSize(A, &bs));
678   if (!match || bs == 1) {
679     mat_mkl_pardiso->iparm[34] = 1; /* Cluster Sparse Solver use C-style indexing for ia and ja arrays */
680     mat_mkl_pardiso->n         = A->rmap->N;
681   } else {
682     mat_mkl_pardiso->iparm[34] = 0; /* Cluster Sparse Solver use Fortran-style indexing for ia and ja arrays */
683     mat_mkl_pardiso->iparm[36] = bs;
684     mat_mkl_pardiso->n         = A->rmap->N / bs;
685   }
686   mat_mkl_pardiso->iparm[39] = 0; /* Input: matrix/rhs/solution stored on rank-0 */
687 
688   mat_mkl_pardiso->CleanUp = PETSC_FALSE;
689   mat_mkl_pardiso->maxfct  = 1; /* Maximum number of numerical factorizations. */
690   mat_mkl_pardiso->mnum    = 1; /* Which factorization to use. */
691   mat_mkl_pardiso->msglvl  = 0; /* 0: do not print 1: Print statistical information in file */
692   mat_mkl_pardiso->phase   = -1;
693   mat_mkl_pardiso->err     = 0;
694 
695   mat_mkl_pardiso->nrhs  = 1;
696   mat_mkl_pardiso->err   = 0;
697   mat_mkl_pardiso->phase = -1;
698 
699   if (ftype == MAT_FACTOR_LU) {
700     mat_mkl_pardiso->iparm[9]  = 13; /* Perturb the pivot elements with 1E-13 */
701     mat_mkl_pardiso->iparm[10] = 1;  /* Use nonsymmetric permutation and scaling MPS */
702     mat_mkl_pardiso->iparm[12] = 1;  /* Switch on Maximum Weighted Matching algorithm (default for non-symmetric) */
703   } else {
704     mat_mkl_pardiso->iparm[9]  = 8; /* Perturb the pivot elements with 1E-8 */
705     mat_mkl_pardiso->iparm[10] = 0; /* Use nonsymmetric permutation and scaling MPS */
706     mat_mkl_pardiso->iparm[12] = 1; /* Switch on Maximum Weighted Matching algorithm (default for non-symmetric) */
707 #if defined(PETSC_USE_DEBUG)
708     mat_mkl_pardiso->iparm[26] = 1; /* Matrix checker */
709 #endif
710   }
711   PetscCall(PetscCalloc1(A->rmap->N * sizeof(INT_TYPE), &mat_mkl_pardiso->perm));
712   mat_mkl_pardiso->schur_size = 0;
713   PetscFunctionReturn(PETSC_SUCCESS);
714 }
715 
716 static PetscErrorCode MatFactorSymbolic_AIJMKL_PARDISO_Private(Mat F, Mat A, const MatFactorInfo *info)
717 {
718   Mat_MKL_PARDISO *mat_mkl_pardiso = (Mat_MKL_PARDISO *)F->data;
719 
720   PetscFunctionBegin;
721   mat_mkl_pardiso->matstruc = DIFFERENT_NONZERO_PATTERN;
722   PetscCall(MatSetFromOptions_MKL_PARDISO(F, A));
723   /* throw away any previously computed structure */
724   if (mat_mkl_pardiso->freeaij) {
725     PetscCall(PetscFree2(mat_mkl_pardiso->ia, mat_mkl_pardiso->ja));
726     if (mat_mkl_pardiso->iparm[34] == 1) PetscCall(PetscFree(mat_mkl_pardiso->a));
727   }
728   PetscCall((*mat_mkl_pardiso->Convert)(A, mat_mkl_pardiso->needsym, MAT_INITIAL_MATRIX, &mat_mkl_pardiso->freeaij, &mat_mkl_pardiso->nz, &mat_mkl_pardiso->ia, &mat_mkl_pardiso->ja, (PetscScalar **)&mat_mkl_pardiso->a));
729   if (mat_mkl_pardiso->iparm[34] == 1) mat_mkl_pardiso->n = A->rmap->N;
730   else mat_mkl_pardiso->n = A->rmap->N / A->rmap->bs;
731 
732   mat_mkl_pardiso->phase = JOB_ANALYSIS;
733 
734   /* reset flops counting if requested */
735   if (mat_mkl_pardiso->iparm[18]) mat_mkl_pardiso->iparm[18] = -1;
736 
737   MKL_PARDISO(mat_mkl_pardiso->pt, &mat_mkl_pardiso->maxfct, &mat_mkl_pardiso->mnum, &mat_mkl_pardiso->mtype, &mat_mkl_pardiso->phase, &mat_mkl_pardiso->n, mat_mkl_pardiso->a, mat_mkl_pardiso->ia, mat_mkl_pardiso->ja, mat_mkl_pardiso->perm,
738               &mat_mkl_pardiso->nrhs, mat_mkl_pardiso->iparm, &mat_mkl_pardiso->msglvl, NULL, NULL, &mat_mkl_pardiso->err);
739   PetscCheck(mat_mkl_pardiso->err >= 0, PETSC_COMM_SELF, PETSC_ERR_LIB, "Error reported by MKL PARDISO: err=%d. Please check manual", mat_mkl_pardiso->err);
740 
741   mat_mkl_pardiso->CleanUp = PETSC_TRUE;
742 
743   if (F->factortype == MAT_FACTOR_LU) F->ops->lufactornumeric = MatFactorNumeric_MKL_PARDISO;
744   else F->ops->choleskyfactornumeric = MatFactorNumeric_MKL_PARDISO;
745 
746   F->ops->solve          = MatSolve_MKL_PARDISO;
747   F->ops->solvetranspose = MatSolveTranspose_MKL_PARDISO;
748   F->ops->matsolve       = MatMatSolve_MKL_PARDISO;
749   PetscFunctionReturn(PETSC_SUCCESS);
750 }
751 
752 static PetscErrorCode MatLUFactorSymbolic_AIJMKL_PARDISO(Mat F, Mat A, IS r, IS c, const MatFactorInfo *info)
753 {
754   PetscFunctionBegin;
755   PetscCall(MatFactorSymbolic_AIJMKL_PARDISO_Private(F, A, info));
756   PetscFunctionReturn(PETSC_SUCCESS);
757 }
758 
759 #if !defined(PETSC_USE_COMPLEX)
760 static PetscErrorCode MatGetInertia_MKL_PARDISO(Mat F, PetscInt *nneg, PetscInt *nzero, PetscInt *npos)
761 {
762   Mat_MKL_PARDISO *mat_mkl_pardiso = (Mat_MKL_PARDISO *)F->data;
763 
764   PetscFunctionBegin;
765   if (nneg) *nneg = mat_mkl_pardiso->iparm[22];
766   if (npos) *npos = mat_mkl_pardiso->iparm[21];
767   if (nzero) *nzero = F->rmap->N - (mat_mkl_pardiso->iparm[22] + mat_mkl_pardiso->iparm[21]);
768   PetscFunctionReturn(PETSC_SUCCESS);
769 }
770 #endif
771 
772 static PetscErrorCode MatCholeskyFactorSymbolic_AIJMKL_PARDISO(Mat F, Mat A, IS r, const MatFactorInfo *info)
773 {
774   PetscFunctionBegin;
775   PetscCall(MatFactorSymbolic_AIJMKL_PARDISO_Private(F, A, info));
776   F->ops->getinertia = NULL;
777 #if !defined(PETSC_USE_COMPLEX)
778   F->ops->getinertia = MatGetInertia_MKL_PARDISO;
779 #endif
780   PetscFunctionReturn(PETSC_SUCCESS);
781 }
782 
783 static PetscErrorCode MatView_MKL_PARDISO(Mat A, PetscViewer viewer)
784 {
785   PetscBool         iascii;
786   PetscViewerFormat format;
787   Mat_MKL_PARDISO  *mat_mkl_pardiso = (Mat_MKL_PARDISO *)A->data;
788   PetscInt          i;
789 
790   PetscFunctionBegin;
791   if (A->ops->solve != MatSolve_MKL_PARDISO) PetscFunctionReturn(PETSC_SUCCESS);
792 
793   PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERASCII, &iascii));
794   if (iascii) {
795     PetscCall(PetscViewerGetFormat(viewer, &format));
796     if (format == PETSC_VIEWER_ASCII_INFO) {
797       PetscCall(PetscViewerASCIIPrintf(viewer, "MKL PARDISO run parameters:\n"));
798       PetscCall(PetscViewerASCIIPrintf(viewer, "MKL PARDISO phase:             %d \n", mat_mkl_pardiso->phase));
799       for (i = 1; i <= 64; i++) PetscCall(PetscViewerASCIIPrintf(viewer, "MKL PARDISO iparm[%d]:     %d \n", i, mat_mkl_pardiso->iparm[i - 1]));
800       PetscCall(PetscViewerASCIIPrintf(viewer, "MKL PARDISO maxfct:     %d \n", mat_mkl_pardiso->maxfct));
801       PetscCall(PetscViewerASCIIPrintf(viewer, "MKL PARDISO mnum:     %d \n", mat_mkl_pardiso->mnum));
802       PetscCall(PetscViewerASCIIPrintf(viewer, "MKL PARDISO mtype:     %d \n", mat_mkl_pardiso->mtype));
803       PetscCall(PetscViewerASCIIPrintf(viewer, "MKL PARDISO n:     %d \n", mat_mkl_pardiso->n));
804       PetscCall(PetscViewerASCIIPrintf(viewer, "MKL PARDISO nrhs:     %d \n", mat_mkl_pardiso->nrhs));
805       PetscCall(PetscViewerASCIIPrintf(viewer, "MKL PARDISO msglvl:     %d \n", mat_mkl_pardiso->msglvl));
806     }
807   }
808   PetscFunctionReturn(PETSC_SUCCESS);
809 }
810 
811 static PetscErrorCode MatGetInfo_MKL_PARDISO(Mat A, MatInfoType flag, MatInfo *info)
812 {
813   Mat_MKL_PARDISO *mat_mkl_pardiso = (Mat_MKL_PARDISO *)A->data;
814 
815   PetscFunctionBegin;
816   info->block_size        = 1.0;
817   info->nz_used           = mat_mkl_pardiso->iparm[17];
818   info->nz_allocated      = mat_mkl_pardiso->iparm[17];
819   info->nz_unneeded       = 0.0;
820   info->assemblies        = 0.0;
821   info->mallocs           = 0.0;
822   info->memory            = 0.0;
823   info->fill_ratio_given  = 0;
824   info->fill_ratio_needed = 0;
825   info->factor_mallocs    = 0;
826   PetscFunctionReturn(PETSC_SUCCESS);
827 }
828 
829 static PetscErrorCode MatMkl_PardisoSetCntl_MKL_PARDISO(Mat F, PetscInt icntl, PetscInt ival)
830 {
831   PetscInt         backup, bs;
832   Mat_MKL_PARDISO *mat_mkl_pardiso = (Mat_MKL_PARDISO *)F->data;
833 
834   PetscFunctionBegin;
835   if (icntl <= 64) {
836     mat_mkl_pardiso->iparm[icntl - 1] = ival;
837   } else {
838     if (icntl == 65) PetscSetMKL_PARDISOThreads(ival);
839     else if (icntl == 66) mat_mkl_pardiso->maxfct = ival;
840     else if (icntl == 67) mat_mkl_pardiso->mnum = ival;
841     else if (icntl == 68) mat_mkl_pardiso->msglvl = ival;
842     else if (icntl == 69) {
843       void *pt[IPARM_SIZE];
844       backup                 = mat_mkl_pardiso->iparm[34];
845       bs                     = mat_mkl_pardiso->iparm[36];
846       mat_mkl_pardiso->mtype = ival;
847       MKL_PARDISO_INIT(pt, &mat_mkl_pardiso->mtype, mat_mkl_pardiso->iparm);
848 #if defined(PETSC_USE_REAL_SINGLE)
849       mat_mkl_pardiso->iparm[27] = 1;
850 #else
851       mat_mkl_pardiso->iparm[27] = 0;
852 #endif
853       mat_mkl_pardiso->iparm[34] = backup;
854       mat_mkl_pardiso->iparm[36] = bs;
855     } else if (icntl == 70) mat_mkl_pardiso->solve_interior = (PetscBool) !!ival;
856   }
857   PetscFunctionReturn(PETSC_SUCCESS);
858 }
859 
860 /*@
861   MatMkl_PardisoSetCntl - Set MKL PARDISO <https://www.intel.com/content/www/us/en/docs/onemkl/developer-reference-c/2023-2/onemkl-pardiso-parallel-direct-sparse-solver-iface.html> parameters
862 
863   Logically Collective
864 
865   Input Parameters:
866 + F     - the factored matrix obtained by calling `MatGetFactor()`
867 . icntl - index of MKL PARDISO parameter
868 - ival  - value of MKL PARDISO parameter
869 
870   Options Database Key:
871 . -mat_mkl_pardiso_<icntl> <ival> - change the option numbered icntl to the value ival
872 
873   Level: beginner
874 
875 .seealso: [](ch_matrices), `Mat`, `MATSOLVERMKL_PARDISO`, `MatGetFactor()`
876 @*/
877 PetscErrorCode MatMkl_PardisoSetCntl(Mat F, PetscInt icntl, PetscInt ival)
878 {
879   PetscFunctionBegin;
880   PetscTryMethod(F, "MatMkl_PardisoSetCntl_C", (Mat, PetscInt, PetscInt), (F, icntl, ival));
881   PetscFunctionReturn(PETSC_SUCCESS);
882 }
883 
884 /*MC
885   MATSOLVERMKL_PARDISO -  A matrix type providing direct solvers, LU, for
886   `MATSEQAIJ` matrices via the external package MKL PARDISO
887   <https://www.intel.com/content/www/us/en/docs/onemkl/developer-reference-c/2024-0/onemkl-pardiso-parallel-direct-sparse-solver-iface.html>.
888 
889   Use `-pc_type lu` `-pc_factor_mat_solver_type mkl_pardiso` to use this direct solver
890 
891   Options Database Keys:
892 + -mat_mkl_pardiso_65 - Suggested number of threads to use within MKL PARDISO
893 . -mat_mkl_pardiso_66 - Maximum number of factors with identical sparsity structure that must be kept in memory at the same time
894 . -mat_mkl_pardiso_67 - Indicates the actual matrix for the solution phase
895 . -mat_mkl_pardiso_68 - Message level information, use 1 to get detailed information on the solver options
896 . -mat_mkl_pardiso_69 - Defines the matrix type. IMPORTANT: When you set this flag, iparm parameters are going to be set to the default ones for the matrix type
897 . -mat_mkl_pardiso_1  - Use default values
898 . -mat_mkl_pardiso_2  - Fill-in reducing ordering for the input matrix
899 . -mat_mkl_pardiso_4  - Preconditioned CGS/CG
900 . -mat_mkl_pardiso_5  - User permutation
901 . -mat_mkl_pardiso_6  - Write solution on x
902 . -mat_mkl_pardiso_8  - Iterative refinement step
903 . -mat_mkl_pardiso_10 - Pivoting perturbation
904 . -mat_mkl_pardiso_11 - Scaling vectors
905 . -mat_mkl_pardiso_12 - Solve with transposed or conjugate transposed matrix A
906 . -mat_mkl_pardiso_13 - Improved accuracy using (non-) symmetric weighted matching
907 . -mat_mkl_pardiso_18 - Numbers of non-zero elements
908 . -mat_mkl_pardiso_19 - Report number of floating point operations
909 . -mat_mkl_pardiso_21 - Pivoting for symmetric indefinite matrices
910 . -mat_mkl_pardiso_24 - Parallel factorization control
911 . -mat_mkl_pardiso_25 - Parallel forward/backward solve control
912 . -mat_mkl_pardiso_27 - Matrix checker
913 . -mat_mkl_pardiso_31 - Partial solve and computing selected components of the solution vectors
914 . -mat_mkl_pardiso_34 - Optimal number of threads for conditional numerical reproducibility (CNR) mode
915 - -mat_mkl_pardiso_60 - Intel MKL PARDISO mode
916 
917   Level: beginner
918 
919   Notes:
920   Use `-mat_mkl_pardiso_68 1` to display the number of threads the solver is using. MKL does not provide a way to directly access this
921   information.
922 
923   For more information on the options check the MKL PARDISO manual
924 
925 .seealso: [](ch_matrices), `Mat`, `MATSEQAIJ`, `PCFactorSetMatSolverType()`, `MatSolverType`, `MatMkl_PardisoSetCntl()`, `MATSOLVERMKL_CPARDISO`
926 M*/
927 static PetscErrorCode MatFactorGetSolverType_mkl_pardiso(Mat A, MatSolverType *type)
928 {
929   PetscFunctionBegin;
930   *type = MATSOLVERMKL_PARDISO;
931   PetscFunctionReturn(PETSC_SUCCESS);
932 }
933 
934 PETSC_EXTERN PetscErrorCode MatGetFactor_aij_mkl_pardiso(Mat A, MatFactorType ftype, Mat *F)
935 {
936   Mat              B;
937   Mat_MKL_PARDISO *mat_mkl_pardiso;
938   PetscBool        isSeqAIJ, isSeqBAIJ, isSeqSBAIJ;
939 
940   PetscFunctionBegin;
941   PetscCall(PetscObjectBaseTypeCompare((PetscObject)A, MATSEQAIJ, &isSeqAIJ));
942   PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQBAIJ, &isSeqBAIJ));
943   PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQSBAIJ, &isSeqSBAIJ));
944   PetscCall(MatCreate(PetscObjectComm((PetscObject)A), &B));
945   PetscCall(MatSetSizes(B, A->rmap->n, A->cmap->n, A->rmap->N, A->cmap->N));
946   PetscCall(PetscStrallocpy("mkl_pardiso", &((PetscObject)B)->type_name));
947   PetscCall(MatSetUp(B));
948 
949   PetscCall(PetscNew(&mat_mkl_pardiso));
950   B->data = mat_mkl_pardiso;
951 
952   PetscCall(MatFactorMKL_PARDISOInitialize_Private(A, ftype, mat_mkl_pardiso));
953   if (ftype == MAT_FACTOR_LU) {
954     B->ops->lufactorsymbolic = MatLUFactorSymbolic_AIJMKL_PARDISO;
955     B->factortype            = MAT_FACTOR_LU;
956     mat_mkl_pardiso->needsym = PETSC_FALSE;
957     if (isSeqAIJ) mat_mkl_pardiso->Convert = MatMKLPardiso_Convert_seqaij;
958     else if (isSeqBAIJ) mat_mkl_pardiso->Convert = MatMKLPardiso_Convert_seqbaij;
959     else {
960       PetscCheck(!isSeqSBAIJ, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "No support for MKL PARDISO LU factor with SEQSBAIJ format! Use MAT_FACTOR_CHOLESKY instead");
961       SETERRQ(PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "No support for MKL PARDISO LU with %s format", ((PetscObject)A)->type_name);
962     }
963 #if defined(PETSC_USE_COMPLEX)
964     mat_mkl_pardiso->mtype = 13;
965 #else
966     mat_mkl_pardiso->mtype = 11;
967 #endif
968   } else {
969     B->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_AIJMKL_PARDISO;
970     B->factortype                  = MAT_FACTOR_CHOLESKY;
971     if (isSeqAIJ) mat_mkl_pardiso->Convert = MatMKLPardiso_Convert_seqaij;
972     else if (isSeqBAIJ) mat_mkl_pardiso->Convert = MatMKLPardiso_Convert_seqbaij;
973     else if (isSeqSBAIJ) mat_mkl_pardiso->Convert = MatMKLPardiso_Convert_seqsbaij;
974     else SETERRQ(PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "No support for PARDISO CHOLESKY with %s format", ((PetscObject)A)->type_name);
975 
976     mat_mkl_pardiso->needsym = PETSC_TRUE;
977 #if !defined(PETSC_USE_COMPLEX)
978     if (A->spd == PETSC_BOOL3_TRUE) mat_mkl_pardiso->mtype = 2;
979     else mat_mkl_pardiso->mtype = -2;
980 #else
981     mat_mkl_pardiso->mtype = 6;
982     PetscCheck(A->hermitian != PETSC_BOOL3_TRUE, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "No support for MKL PARDISO CHOLESKY with Hermitian matrices! Use MAT_FACTOR_LU instead");
983 #endif
984   }
985   B->ops->destroy = MatDestroy_MKL_PARDISO;
986   B->ops->view    = MatView_MKL_PARDISO;
987   B->ops->getinfo = MatGetInfo_MKL_PARDISO;
988   B->factortype   = ftype;
989   B->assembled    = PETSC_TRUE;
990 
991   PetscCall(PetscFree(B->solvertype));
992   PetscCall(PetscStrallocpy(MATSOLVERMKL_PARDISO, &B->solvertype));
993 
994   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatFactorGetSolverType_C", MatFactorGetSolverType_mkl_pardiso));
995   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatFactorSetSchurIS_C", MatFactorSetSchurIS_MKL_PARDISO));
996   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMkl_PardisoSetCntl_C", MatMkl_PardisoSetCntl_MKL_PARDISO));
997 
998   *F = B;
999   PetscFunctionReturn(PETSC_SUCCESS);
1000 }
1001 
1002 PETSC_EXTERN PetscErrorCode MatSolverTypeRegister_MKL_Pardiso(void)
1003 {
1004   PetscFunctionBegin;
1005   PetscCall(MatSolverTypeRegister(MATSOLVERMKL_PARDISO, MATSEQAIJ, MAT_FACTOR_LU, MatGetFactor_aij_mkl_pardiso));
1006   PetscCall(MatSolverTypeRegister(MATSOLVERMKL_PARDISO, MATSEQAIJ, MAT_FACTOR_CHOLESKY, MatGetFactor_aij_mkl_pardiso));
1007   PetscCall(MatSolverTypeRegister(MATSOLVERMKL_PARDISO, MATSEQBAIJ, MAT_FACTOR_LU, MatGetFactor_aij_mkl_pardiso));
1008   PetscCall(MatSolverTypeRegister(MATSOLVERMKL_PARDISO, MATSEQSBAIJ, MAT_FACTOR_CHOLESKY, MatGetFactor_aij_mkl_pardiso));
1009   PetscFunctionReturn(PETSC_SUCCESS);
1010 }
1011