xref: /petsc/src/mat/impls/aij/mpi/aijperm/mpiaijperm.c (revision feff33ee0b5b037fa8f9f294dede656a2f85cc47)
1 
2 #include <../src/mat/impls/aij/mpi/mpiaij.h>
3 /*@C
4    MatCreateMPIAIJPERM - Creates a sparse parallel matrix whose local
5    portions are stored as SEQAIJPERM matrices (a matrix class that inherits
6    from SEQAIJ but includes some optimizations to allow more effective
7    vectorization).  The same guidelines that apply to MPIAIJ matrices for
8    preallocating the matrix storage apply here as well.
9 
10       Collective on MPI_Comm
11 
12    Input Parameters:
13 +  comm - MPI communicator
14 .  m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
15            This value should be the same as the local size used in creating the
16            y vector for the matrix-vector product y = Ax.
17 .  n - This value should be the same as the local size used in creating the
18        x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
19        calculated if N is given) For square matrices n is almost always m.
20 .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
21 .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
22 .  d_nz  - number of nonzeros per row in DIAGONAL portion of local submatrix
23            (same value is used for all local rows)
24 .  d_nnz - array containing the number of nonzeros in the various rows of the
25            DIAGONAL portion of the local submatrix (possibly different for each row)
26            or NULL, if d_nz is used to specify the nonzero structure.
27            The size of this array is equal to the number of local rows, i.e 'm'.
28            For matrices you plan to factor you must leave room for the diagonal entry and
29            put in the entry even if it is zero.
30 .  o_nz  - number of nonzeros per row in the OFF-DIAGONAL portion of local
31            submatrix (same value is used for all local rows).
32 -  o_nnz - array containing the number of nonzeros in the various rows of the
33            OFF-DIAGONAL portion of the local submatrix (possibly different for
34            each row) or NULL, if o_nz is used to specify the nonzero
35            structure. The size of this array is equal to the number
36            of local rows, i.e 'm'.
37 
38    Output Parameter:
39 .  A - the matrix
40 
41    Notes:
42    If the *_nnz parameter is given then the *_nz parameter is ignored
43 
44    m,n,M,N parameters specify the size of the matrix, and its partitioning across
45    processors, while d_nz,d_nnz,o_nz,o_nnz parameters specify the approximate
46    storage requirements for this matrix.
47 
48    If PETSC_DECIDE or  PETSC_DETERMINE is used for a particular argument on one
49    processor than it must be used on all processors that share the object for
50    that argument.
51 
52    The user MUST specify either the local or global matrix dimensions
53    (possibly both).
54 
55    The parallel matrix is partitioned such that the first m0 rows belong to
56    process 0, the next m1 rows belong to process 1, the next m2 rows belong
57    to process 2 etc.. where m0,m1,m2... are the input parameter 'm'.
58 
59    The DIAGONAL portion of the local submatrix of a processor can be defined
60    as the submatrix which is obtained by extraction the part corresponding
61    to the rows r1-r2 and columns r1-r2 of the global matrix, where r1 is the
62    first row that belongs to the processor, and r2 is the last row belonging
63    to the this processor. This is a square mxm matrix. The remaining portion
64    of the local submatrix (mxN) constitute the OFF-DIAGONAL portion.
65 
66    If o_nnz, d_nnz are specified, then o_nz, and d_nz are ignored.
67 
68    When calling this routine with a single process communicator, a matrix of
69    type SEQAIJPERM is returned.  If a matrix of type MPIAIJPERM is desired
70    for this type of communicator, use the construction mechanism:
71      MatCreate(...,&A); MatSetType(A,MPIAIJ); MatMPIAIJSetPreallocation(A,...);
72 
73    By default, this format uses inodes (identical nodes) when possible.
74    We search for consecutive rows with the same nonzero structure, thereby
75    reusing matrix information to achieve increased efficiency.
76 
77    Options Database Keys:
78 +  -mat_no_inode  - Do not use inodes
79 -  -mat_inode_limit <limit> - Sets inode limit (max limit=5)
80 
81    Level: intermediate
82 
83 .keywords: matrix, cray, sparse, parallel
84 
85 .seealso: MatCreate(), MatCreateSeqAIJPERM(), MatSetValues()
86 @*/
87 PetscErrorCode  MatCreateMPIAIJPERM(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A)
88 {
89   PetscErrorCode ierr;
90   PetscMPIInt    size;
91 
92   PetscFunctionBegin;
93   ierr = MatCreate(comm,A);CHKERRQ(ierr);
94   ierr = MatSetSizes(*A,m,n,M,N);CHKERRQ(ierr);
95   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
96   if (size > 1) {
97     ierr = MatSetType(*A,MATMPIAIJPERM);CHKERRQ(ierr);
98     ierr = MatMPIAIJSetPreallocation(*A,d_nz,d_nnz,o_nz,o_nnz);CHKERRQ(ierr);
99   } else {
100     ierr = MatSetType(*A,MATSEQAIJPERM);CHKERRQ(ierr);
101     ierr = MatSeqAIJSetPreallocation(*A,d_nz,d_nnz);CHKERRQ(ierr);
102   }
103   PetscFunctionReturn(0);
104 }
105 
106 PetscErrorCode  MatMPIAIJSetPreallocation_MPIAIJPERM(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
107 {
108   Mat_MPIAIJ     *b = (Mat_MPIAIJ*)B->data;
109   PetscErrorCode ierr;
110 
111   PetscFunctionBegin;
112   ierr = MatMPIAIJSetPreallocation_MPIAIJ(B,d_nz,d_nnz,o_nz,o_nnz);CHKERRQ(ierr);
113   ierr = MatConvert_SeqAIJ_SeqAIJPERM(b->A, MATSEQAIJPERM, MAT_INPLACE_MATRIX, &b->A);CHKERRQ(ierr);
114   ierr = MatConvert_SeqAIJ_SeqAIJPERM(b->B, MATSEQAIJPERM, MAT_INPLACE_MATRIX, &b->B);CHKERRQ(ierr);
115   PetscFunctionReturn(0);
116 }
117 
118 PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJPERM(Mat A,MatType type,MatReuse reuse,Mat *newmat)
119 {
120   PetscErrorCode ierr;
121   Mat            B = *newmat;
122 
123   PetscFunctionBegin;
124   if (reuse == MAT_INITIAL_MATRIX) {
125     ierr = MatDuplicate(A,MAT_COPY_VALUES,&B);CHKERRQ(ierr);
126   }
127 
128   ierr = PetscObjectChangeTypeName((PetscObject) B, MATMPIAIJPERM);CHKERRQ(ierr);
129   ierr = PetscObjectComposeFunction((PetscObject)B,"MatMPIAIJSetPreallocation_C",MatMPIAIJSetPreallocation_MPIAIJPERM);CHKERRQ(ierr);
130   *newmat = B;
131   PetscFunctionReturn(0);
132 }
133 
134 PETSC_EXTERN PetscErrorCode MatCreate_MPIAIJPERM(Mat A)
135 {
136   PetscErrorCode ierr;
137 
138   PetscFunctionBegin;
139   ierr = MatSetType(A,MATMPIAIJ);CHKERRQ(ierr);
140   ierr = MatConvert_MPIAIJ_MPIAIJPERM(A,MATMPIAIJPERM,MAT_INPLACE_MATRIX,&A);CHKERRQ(ierr);
141   PetscFunctionReturn(0);
142 }
143 
144 /*MC
145    MATAIJPERM - MATAIJPERM = "AIJPERM" - A matrix type to be used for sparse matrices.
146 
147    This matrix type is identical to MATSEQAIJPERM when constructed with a single process communicator,
148    and MATMPIAIJPERM otherwise.  As a result, for single process communicators,
149   MatSeqAIJSetPreallocation() is supported, and similarly MatMPIAIJSetPreallocation() is supported
150   for communicators controlling multiple processes.  It is recommended that you call both of
151   the above preallocation routines for simplicity.
152 
153    Options Database Keys:
154 . -mat_type aijperm - sets the matrix type to "AIJPERM" during a call to MatSetFromOptions()
155 
156   Level: beginner
157 
158 .seealso: MatCreateMPIAIJPERM(), MATSEQAIJPERM, MATMPIAIJPERM
159 M*/
160 
161