xref: /petsc/src/mat/impls/aij/seq/crl/crl.c (revision 42eaa7f3b58bc3af4b9ef28b03ccaa8b4c1282ac)
1 #define PETSCMAT_DLL
2 
3 /*
4   Defines a matrix-vector product for the MATSEQAIJCRL matrix class.
5   This class is derived from the MATSEQAIJ class and retains the
6   compressed row storage (aka Yale sparse matrix format) but augments
7   it with a column oriented storage that is more efficient for
8   matrix vector products on Vector machines.
9 
10   CRL stands for constant row length (that is the same number of columns
11   is kept (padded with zeros) for each row of the sparse matrix.
12 */
13 #include "../src/mat/impls/aij/seq/crl/crl.h"
14 
15 #undef __FUNCT__
16 #define __FUNCT__ "MatDestroy_SeqAIJCRL"
17 PetscErrorCode MatDestroy_SeqAIJCRL(Mat A)
18 {
19   PetscErrorCode ierr;
20   Mat_AIJCRL     *aijcrl = (Mat_AIJCRL *) A->spptr;
21 
22   /* Free everything in the Mat_AIJCRL data structure. */
23   ierr = PetscFree2(aijcrl->acols,aijcrl->icols);CHKERRQ(ierr);
24   ierr = PetscFree(aijcrl);CHKERRQ(ierr);
25   A->spptr = 0;
26 
27   ierr = PetscObjectChangeTypeName( (PetscObject)A, MATSEQAIJ);CHKERRQ(ierr);
28   ierr = MatDestroy_SeqAIJ(A);CHKERRQ(ierr);
29   PetscFunctionReturn(0);
30 }
31 
32 PetscErrorCode MatDuplicate_AIJCRL(Mat A, MatDuplicateOption op, Mat *M)
33 {
34   PetscFunctionBegin;
35   SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Cannot duplicate AIJCRL matrices yet");
36   PetscFunctionReturn(0);
37 }
38 
39 #undef __FUNCT__
40 #define __FUNCT__ "SeqAIJCRL_create_aijcrl"
41 PetscErrorCode SeqAIJCRL_create_aijcrl(Mat A)
42 {
43   Mat_SeqAIJ     *a = (Mat_SeqAIJ *)(A)->data;
44   Mat_AIJCRL     *aijcrl = (Mat_AIJCRL*) A->spptr;
45   PetscInt       m = A->rmap->n;  /* Number of rows in the matrix. */
46   PetscInt       *aj = a->j;  /* From the CSR representation; points to the beginning  of each row. */
47   PetscInt       i, j,rmax = a->rmax,*icols, *ilen = a->ilen;
48   MatScalar      *aa = a->a;
49   PetscScalar    *acols;
50   PetscErrorCode ierr;
51 
52   PetscFunctionBegin;
53   aijcrl->nz   = a->nz;
54   aijcrl->m    = A->rmap->n;
55   aijcrl->rmax = rmax;
56   ierr = PetscFree2(aijcrl->acols,aijcrl->icols);CHKERRQ(ierr);
57   ierr = PetscMalloc2(rmax*m,PetscScalar,&aijcrl->acols,rmax*m,PetscInt,&aijcrl->icols);CHKERRQ(ierr);
58   acols = aijcrl->acols;
59   icols = aijcrl->icols;
60   for (i=0; i<m; i++) {
61     for (j=0; j<ilen[i]; j++) {
62       acols[j*m+i] = *aa++;
63       icols[j*m+i] = *aj++;
64     }
65     for (;j<rmax; j++) { /* empty column entries */
66       acols[j*m+i] = 0.0;
67       icols[j*m+i] = (j) ? icols[(j-1)*m+i] : 0;  /* handle case where row is EMPTY */
68     }
69   }
70   ierr = PetscInfo2(A,"Percentage of 0's introduced for vectorized multiply %G. Rmax= %D\n",1.0-((double)a->nz)/((double)(rmax*m)),rmax);
71   PetscFunctionReturn(0);
72 }
73 
74 extern PetscErrorCode MatAssemblyEnd_SeqAIJ(Mat,MatAssemblyType);
75 
76 #undef __FUNCT__
77 #define __FUNCT__ "MatAssemblyEnd_SeqAIJCRL"
78 PetscErrorCode MatAssemblyEnd_SeqAIJCRL(Mat A, MatAssemblyType mode)
79 {
80   PetscErrorCode ierr;
81   Mat_SeqAIJ     *a = (Mat_SeqAIJ*)A->data;
82 
83   PetscFunctionBegin;
84   a->inode.use = PETSC_FALSE;
85   ierr = MatAssemblyEnd_SeqAIJ(A,mode);CHKERRQ(ierr);
86   if (mode == MAT_FLUSH_ASSEMBLY) PetscFunctionReturn(0);
87 
88   /* Now calculate the permutation and grouping information. */
89   ierr = SeqAIJCRL_create_aijcrl(A);CHKERRQ(ierr);
90   PetscFunctionReturn(0);
91 }
92 
93 #include "../src/mat/impls/aij/seq/crl/ftn-kernels/fmultcrl.h"
94 
95 #undef __FUNCT__
96 #define __FUNCT__ "MatMult_AIJCRL"
97 /*
98     Shared by both sequential and parallel versions of CRL matrix: MATMPIAIJCRL and MATSEQAIJCRL
99     - the scatter is used only in the parallel version
100 
101 */
102 PetscErrorCode MatMult_AIJCRL(Mat A,Vec xx,Vec yy)
103 {
104   Mat_AIJCRL     *aijcrl = (Mat_AIJCRL*) A->spptr;
105   PetscInt       m = aijcrl->m;  /* Number of rows in the matrix. */
106   PetscInt       rmax = aijcrl->rmax,*icols = aijcrl->icols;
107   PetscScalar    *acols = aijcrl->acols;
108   PetscErrorCode ierr;
109   PetscScalar    *x,*y;
110 #if !defined(PETSC_USE_FORTRAN_KERNEL_MULTCRL)
111   PetscInt       i,j,ii;
112 #endif
113 
114 
115 #if defined(PETSC_HAVE_PRAGMA_DISJOINT)
116 #pragma disjoint(*x,*y,*aa)
117 #endif
118 
119   PetscFunctionBegin;
120   if (aijcrl->xscat) {
121     ierr = VecCopy(xx,aijcrl->xwork);CHKERRQ(ierr);
122     /* get remote values needed for local part of multiply */
123     ierr = VecScatterBegin(aijcrl->xscat,xx,aijcrl->fwork,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
124     ierr = VecScatterEnd(aijcrl->xscat,xx,aijcrl->fwork,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
125     xx = aijcrl->xwork;
126   };
127 
128   ierr = VecGetArray(xx,&x);CHKERRQ(ierr);
129   ierr = VecGetArray(yy,&y);CHKERRQ(ierr);
130 
131 #if defined(PETSC_USE_FORTRAN_KERNEL_MULTCRL)
132   fortranmultcrl_(&m,&rmax,x,y,icols,acols);
133 #else
134 
135   /* first column */
136   for (j=0; j<m; j++) {
137     y[j] = acols[j]*x[icols[j]];
138   }
139 
140   /* other columns */
141 #if defined(PETSC_HAVE_CRAYC)
142 #pragma _CRI preferstream
143 #endif
144   for (i=1; i<rmax; i++) {
145     ii = i*m;
146 #if defined(PETSC_HAVE_CRAYC)
147 #pragma _CRI prefervector
148 #endif
149     for (j=0; j<m; j++) {
150       y[j] = y[j] + acols[ii+j]*x[icols[ii+j]];
151     }
152   }
153 #if defined(PETSC_HAVE_CRAYC)
154 #pragma _CRI ivdep
155 #endif
156 
157 #endif
158   ierr = PetscLogFlops(2.0*aijcrl->nz - m);CHKERRQ(ierr);
159   ierr = VecRestoreArray(xx,&x);CHKERRQ(ierr);
160   ierr = VecRestoreArray(yy,&y);CHKERRQ(ierr);
161   PetscFunctionReturn(0);
162 }
163 
164 
165 /* MatConvert_SeqAIJ_SeqAIJCRL converts a SeqAIJ matrix into a
166  * SeqAIJCRL matrix.  This routine is called by the MatCreate_SeqAIJCRL()
167  * routine, but can also be used to convert an assembled SeqAIJ matrix
168  * into a SeqAIJCRL one. */
169 EXTERN_C_BEGIN
170 #undef __FUNCT__
171 #define __FUNCT__ "MatConvert_SeqAIJ_SeqAIJCRL"
172 PetscErrorCode PETSCMAT_DLLEXPORT MatConvert_SeqAIJ_SeqAIJCRL(Mat A,const MatType type,MatReuse reuse,Mat *newmat)
173 {
174   PetscErrorCode ierr;
175   Mat            B = *newmat;
176   Mat_AIJCRL     *aijcrl;
177 
178   PetscFunctionBegin;
179   if (reuse == MAT_INITIAL_MATRIX) {
180     ierr = MatDuplicate(A,MAT_COPY_VALUES,&B);CHKERRQ(ierr);
181   }
182 
183   ierr = PetscNewLog(B,Mat_AIJCRL,&aijcrl);CHKERRQ(ierr);
184   B->spptr = (void *) aijcrl;
185 
186   /* Set function pointers for methods that we inherit from AIJ but override. */
187   B->ops->duplicate   = MatDuplicate_AIJCRL;
188   B->ops->assemblyend = MatAssemblyEnd_SeqAIJCRL;
189   B->ops->destroy     = MatDestroy_SeqAIJCRL;
190   B->ops->mult        = MatMult_AIJCRL;
191 
192   /* If A has already been assembled, compute the permutation. */
193   if (A->assembled) {
194     ierr = SeqAIJCRL_create_aijcrl(B);CHKERRQ(ierr);
195   }
196   ierr = PetscObjectChangeTypeName((PetscObject)B,MATSEQAIJCRL);CHKERRQ(ierr);
197   *newmat = B;
198   PetscFunctionReturn(0);
199 }
200 EXTERN_C_END
201 
202 
203 #undef __FUNCT__
204 #define __FUNCT__ "MatCreateSeqAIJCRL"
205 /*@C
206    MatCreateSeqAIJCRL - Creates a sparse matrix of type SEQAIJCRL.
207    This type inherits from AIJ, but stores some additional
208    information that is used to allow better vectorization of
209    the matrix-vector product. At the cost of increased storage, the AIJ formatted
210    matrix can be copied to a format in which pieces of the matrix are
211    stored in ELLPACK format, allowing the vectorized matrix multiply
212    routine to use stride-1 memory accesses.  As with the AIJ type, it is
213    important to preallocate matrix storage in order to get good assembly
214    performance.
215 
216    Collective on MPI_Comm
217 
218    Input Parameters:
219 +  comm - MPI communicator, set to PETSC_COMM_SELF
220 .  m - number of rows
221 .  n - number of columns
222 .  nz - number of nonzeros per row (same for all rows)
223 -  nnz - array containing the number of nonzeros in the various rows
224          (possibly different for each row) or PETSC_NULL
225 
226    Output Parameter:
227 .  A - the matrix
228 
229    Notes:
230    If nnz is given then nz is ignored
231 
232    Level: intermediate
233 
234 .keywords: matrix, cray, sparse, parallel
235 
236 .seealso: MatCreate(), MatCreateMPIAIJPERM(), MatSetValues()
237 @*/
238 PetscErrorCode PETSCMAT_DLLEXPORT MatCreateSeqAIJCRL(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt nz,const PetscInt nnz[],Mat *A)
239 {
240   PetscErrorCode ierr;
241 
242   PetscFunctionBegin;
243   ierr = MatCreate(comm,A);CHKERRQ(ierr);
244   ierr = MatSetSizes(*A,m,n,m,n);CHKERRQ(ierr);
245   ierr = MatSetType(*A,MATSEQAIJCRL);CHKERRQ(ierr);
246   ierr = MatSeqAIJSetPreallocation_SeqAIJ(*A,nz,nnz);CHKERRQ(ierr);
247   PetscFunctionReturn(0);
248 }
249 
250 
251 EXTERN_C_BEGIN
252 #undef __FUNCT__
253 #define __FUNCT__ "MatCreate_SeqAIJCRL"
254 PetscErrorCode PETSCMAT_DLLEXPORT MatCreate_SeqAIJCRL(Mat A)
255 {
256   PetscErrorCode ierr;
257 
258   PetscFunctionBegin;
259   ierr = MatSetType(A,MATSEQAIJ);CHKERRQ(ierr);
260   ierr = MatConvert_SeqAIJ_SeqAIJCRL(A,MATSEQAIJCRL,MAT_REUSE_MATRIX,&A);CHKERRQ(ierr);
261   PetscFunctionReturn(0);
262 }
263 EXTERN_C_END
264 
265