xref: /petsc/src/mat/impls/aij/mpi/aijmkl/mpiaijmkl.c (revision 37eeb8152ec6a2cf24186d3591c2c5de5dfd8fa5)
1 #include <../src/mat/impls/aij/mpi/mpiaij.h>
2 /*@C
3    MatCreateMPIAIJMKL - Creates a sparse parallel matrix whose local
4    portions are stored as SEQAIJMKL matrices (a matrix class that inherits
5    from SEQAIJ but uses some operations provided by Intel MKL).  The same
6    guidelines that apply to MPIAIJ matrices for preallocating the matrix
7    storage apply here as well.
8 
9       Collective
10 
11    Input Parameters:
12 +  comm - MPI communicator
13 .  m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
14            This value should be the same as the local size used in creating the
15            y vector for the matrix-vector product y = Ax.
16 .  n - This value should be the same as the local size used in creating the
17        x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
18        calculated if N is given) For square matrices n is almost always m.
19 .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
20 .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
21 .  d_nz  - number of nonzeros per row in DIAGONAL portion of local submatrix
22            (same value is used for all local rows)
23 .  d_nnz - array containing the number of nonzeros in the various rows of the
24            DIAGONAL portion of the local submatrix (possibly different for each row)
25            or NULL, if d_nz is used to specify the nonzero structure.
26            The size of this array is equal to the number of local rows, i.e 'm'.
27            For matrices you plan to factor you must leave room for the diagonal entry and
28            put in the entry even if it is zero.
29 .  o_nz  - number of nonzeros per row in the OFF-DIAGONAL portion of local
30            submatrix (same value is used for all local rows).
31 -  o_nnz - array containing the number of nonzeros in the various rows of the
32            OFF-DIAGONAL portion of the local submatrix (possibly different for
33            each row) or NULL, if o_nz is used to specify the nonzero
34            structure. The size of this array is equal to the number
35            of local rows, i.e 'm'.
36 
37    Output Parameter:
38 .  A - the matrix
39 
40    Notes:
41    If the *_nnz parameter is given then the *_nz parameter is ignored
42 
43    m,n,M,N parameters specify the size of the matrix, and its partitioning across
44    processors, while d_nz,d_nnz,o_nz,o_nnz parameters specify the approximate
45    storage requirements for this matrix.
46 
47    If PETSC_DECIDE or PETSC_DETERMINE is used for a particular argument on one
48    processor than it must be used on all processors that share the object for
49    that argument.
50 
51    The user MUST specify either the local or global matrix dimensions
52    (possibly both).
53 
54    The parallel matrix is partitioned such that the first m0 rows belong to
55    process 0, the next m1 rows belong to process 1, the next m2 rows belong
56    to process 2 etc.. where m0,m1,m2... are the input parameter 'm'.
57 
58    The DIAGONAL portion of the local submatrix of a processor can be defined
59    as the submatrix which is obtained by extraction the part corresponding
60    to the rows r1-r2 and columns r1-r2 of the global matrix, where r1 is the
61    first row that belongs to the processor, and r2 is the last row belonging
62    to the this processor. This is a square mxm matrix. The remaining portion
63    of the local submatrix (mxN) constitute the OFF-DIAGONAL portion.
64 
65    If o_nnz, d_nnz are specified, then o_nz, and d_nz are ignored.
66 
67    When calling this routine with a single process communicator, a matrix of
68    type SEQAIJMKL is returned.  If a matrix of type MPIAIJMKL is desired
69    for this type of communicator, use the construction mechanism:
70      MatCreate(...,&A); MatSetType(A,MPIAIJMKL); MatMPIAIJSetPreallocation(A,...);
71 
72    Options Database Keys:
73 .  -mat_aijmkl_no_spmv2 - disables use of the SpMV2 inspector-executor routines
74 
75    Level: intermediate
76 
77 .seealso: MatCreate(), MatCreateSeqAIJMKL(), MatSetValues()
78 @*/
79 PetscErrorCode  MatCreateMPIAIJMKL(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A)
80 {
81   PetscErrorCode ierr;
82   PetscMPIInt    size;
83 
84   PetscFunctionBegin;
85   ierr = MatCreate(comm,A);CHKERRQ(ierr);
86   ierr = MatSetSizes(*A,m,n,M,N);CHKERRQ(ierr);
87   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
88   if (size > 1) {
89     ierr = MatSetType(*A,MATMPIAIJMKL);CHKERRQ(ierr);
90     ierr = MatMPIAIJSetPreallocation(*A,d_nz,d_nnz,o_nz,o_nnz);CHKERRQ(ierr);
91   } else {
92     ierr = MatSetType(*A,MATSEQAIJMKL);CHKERRQ(ierr);
93     ierr = MatSeqAIJSetPreallocation(*A,d_nz,d_nnz);CHKERRQ(ierr);
94   }
95   PetscFunctionReturn(0);
96 }
97 
98 PETSC_INTERN PetscErrorCode MatConvert_SeqAIJ_SeqAIJMKL(Mat,MatType,MatReuse,Mat*);
99 
100 PetscErrorCode  MatMPIAIJSetPreallocation_MPIAIJMKL(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
101 {
102   Mat_MPIAIJ     *b = (Mat_MPIAIJ*)B->data;
103   PetscErrorCode ierr;
104 
105   PetscFunctionBegin;
106   ierr = MatMPIAIJSetPreallocation_MPIAIJ(B,d_nz,d_nnz,o_nz,o_nnz);CHKERRQ(ierr);
107   ierr = MatConvert_SeqAIJ_SeqAIJMKL(b->A, MATSEQAIJMKL, MAT_INPLACE_MATRIX, &b->A);CHKERRQ(ierr);
108   ierr = MatConvert_SeqAIJ_SeqAIJMKL(b->B, MATSEQAIJMKL, MAT_INPLACE_MATRIX, &b->B);CHKERRQ(ierr);
109   PetscFunctionReturn(0);
110 }
111 
112 PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJMKL(Mat A,MatType type,MatReuse reuse,Mat *newmat)
113 {
114   PetscErrorCode ierr;
115   Mat            B = *newmat;
116 
117   PetscFunctionBegin;
118   if (reuse == MAT_INITIAL_MATRIX) {
119     ierr = MatDuplicate(A,MAT_COPY_VALUES,&B);CHKERRQ(ierr);
120   }
121 
122   ierr = PetscObjectChangeTypeName((PetscObject) B, MATMPIAIJMKL);CHKERRQ(ierr);
123   ierr = PetscObjectComposeFunction((PetscObject)B,"MatMPIAIJSetPreallocation_C",MatMPIAIJSetPreallocation_MPIAIJMKL);CHKERRQ(ierr);
124   *newmat = B;
125   PetscFunctionReturn(0);
126 }
127 
128 PETSC_EXTERN PetscErrorCode MatCreate_MPIAIJMKL(Mat A)
129 {
130   PetscErrorCode ierr;
131 
132   PetscFunctionBegin;
133   ierr = MatSetType(A,MATMPIAIJ);CHKERRQ(ierr);
134   ierr = MatConvert_MPIAIJ_MPIAIJMKL(A,MATMPIAIJMKL,MAT_INPLACE_MATRIX,&A);CHKERRQ(ierr);
135   PetscFunctionReturn(0);
136 }
137 
138 /*MC
139    MATAIJMKL - MATAIJMKL = "AIJMKL" - A matrix type to be used for sparse matrices.
140 
141    This matrix type is identical to MATSEQAIJMKL when constructed with a single process communicator,
142    and MATMPIAIJMKL otherwise.  As a result, for single process communicators,
143   MatSeqAIJSetPreallocation() is supported, and similarly MatMPIAIJSetPreallocation() is supported
144   for communicators controlling multiple processes.  It is recommended that you call both of
145   the above preallocation routines for simplicity.
146 
147    Options Database Keys:
148 . -mat_type aijmkl - sets the matrix type to "AIJMKL" during a call to MatSetFromOptions()
149 
150   Level: beginner
151 
152 .seealso: MatCreateMPIAIJMKL(), MATSEQAIJMKL, MATMPIAIJMKL
153 M*/
154 
155