xref: /petsc/src/mat/impls/aij/seq/crl/crl.c (revision 7d0a6c19129e7069c8a40e210b34ed62989173db)
1 
2 /*
3   Defines a matrix-vector product for the MATSEQAIJCRL matrix class.
4   This class is derived from the MATSEQAIJ class and retains the
5   compressed row storage (aka Yale sparse matrix format) but augments
6   it with a column oriented storage that is more efficient for
7   matrix vector products on Vector machines.
8 
9   CRL stands for constant row length (that is the same number of columns
10   is kept (padded with zeros) for each row of the sparse matrix.
11 */
12 #include "../src/mat/impls/aij/seq/crl/crl.h"
13 
14 #undef __FUNCT__
15 #define __FUNCT__ "MatDestroy_SeqAIJCRL"
16 PetscErrorCode MatDestroy_SeqAIJCRL(Mat A)
17 {
18   PetscErrorCode ierr;
19   Mat_AIJCRL     *aijcrl = (Mat_AIJCRL *) A->spptr;
20 
21   /* Free everything in the Mat_AIJCRL data structure. */
22   ierr = PetscFree2(aijcrl->acols,aijcrl->icols);CHKERRQ(ierr);
23   ierr = PetscFree(A->spptr);CHKERRQ(ierr);
24 
25   ierr = PetscObjectChangeTypeName( (PetscObject)A, MATSEQAIJ);CHKERRQ(ierr);
26   ierr = MatDestroy_SeqAIJ(A);CHKERRQ(ierr);
27   PetscFunctionReturn(0);
28 }
29 
30 PetscErrorCode MatDuplicate_AIJCRL(Mat A, MatDuplicateOption op, Mat *M)
31 {
32   PetscFunctionBegin;
33   SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Cannot duplicate AIJCRL matrices yet");
34   PetscFunctionReturn(0);
35 }
36 
37 #undef __FUNCT__
38 #define __FUNCT__ "SeqAIJCRL_create_aijcrl"
39 PetscErrorCode SeqAIJCRL_create_aijcrl(Mat A)
40 {
41   Mat_SeqAIJ     *a = (Mat_SeqAIJ *)(A)->data;
42   Mat_AIJCRL     *aijcrl = (Mat_AIJCRL*) A->spptr;
43   PetscInt       m = A->rmap->n;  /* Number of rows in the matrix. */
44   PetscInt       *aj = a->j;  /* From the CSR representation; points to the beginning  of each row. */
45   PetscInt       i, j,rmax = a->rmax,*icols, *ilen = a->ilen;
46   MatScalar      *aa = a->a;
47   PetscScalar    *acols;
48   PetscErrorCode ierr;
49 
50   PetscFunctionBegin;
51   aijcrl->nz   = a->nz;
52   aijcrl->m    = A->rmap->n;
53   aijcrl->rmax = rmax;
54   ierr = PetscFree2(aijcrl->acols,aijcrl->icols);CHKERRQ(ierr);
55   ierr = PetscMalloc2(rmax*m,PetscScalar,&aijcrl->acols,rmax*m,PetscInt,&aijcrl->icols);CHKERRQ(ierr);
56   acols = aijcrl->acols;
57   icols = aijcrl->icols;
58   for (i=0; i<m; i++) {
59     for (j=0; j<ilen[i]; j++) {
60       acols[j*m+i] = *aa++;
61       icols[j*m+i] = *aj++;
62     }
63     for (;j<rmax; j++) { /* empty column entries */
64       acols[j*m+i] = 0.0;
65       icols[j*m+i] = (j) ? icols[(j-1)*m+i] : 0;  /* handle case where row is EMPTY */
66     }
67   }
68   ierr = PetscInfo2(A,"Percentage of 0's introduced for vectorized multiply %G. Rmax= %D\n",1.0-((double)a->nz)/((double)(rmax*m)),rmax);
69   PetscFunctionReturn(0);
70 }
71 
72 extern PetscErrorCode MatAssemblyEnd_SeqAIJ(Mat,MatAssemblyType);
73 
74 #undef __FUNCT__
75 #define __FUNCT__ "MatAssemblyEnd_SeqAIJCRL"
76 PetscErrorCode MatAssemblyEnd_SeqAIJCRL(Mat A, MatAssemblyType mode)
77 {
78   PetscErrorCode ierr;
79   Mat_SeqAIJ     *a = (Mat_SeqAIJ*)A->data;
80 
81   PetscFunctionBegin;
82   a->inode.use = PETSC_FALSE;
83   ierr = MatAssemblyEnd_SeqAIJ(A,mode);CHKERRQ(ierr);
84   if (mode == MAT_FLUSH_ASSEMBLY) PetscFunctionReturn(0);
85 
86   /* Now calculate the permutation and grouping information. */
87   ierr = SeqAIJCRL_create_aijcrl(A);CHKERRQ(ierr);
88   PetscFunctionReturn(0);
89 }
90 
91 #include "../src/mat/impls/aij/seq/crl/ftn-kernels/fmultcrl.h"
92 
93 #undef __FUNCT__
94 #define __FUNCT__ "MatMult_AIJCRL"
95 /*
96     Shared by both sequential and parallel versions of CRL matrix: MATMPIAIJCRL and MATSEQAIJCRL
97     - the scatter is used only in the parallel version
98 
99 */
100 PetscErrorCode MatMult_AIJCRL(Mat A,Vec xx,Vec yy)
101 {
102   Mat_AIJCRL     *aijcrl = (Mat_AIJCRL*) A->spptr;
103   PetscInt       m = aijcrl->m;  /* Number of rows in the matrix. */
104   PetscInt       rmax = aijcrl->rmax,*icols = aijcrl->icols;
105   PetscScalar    *acols = aijcrl->acols;
106   PetscErrorCode ierr;
107   PetscScalar    *x,*y;
108 #if !defined(PETSC_USE_FORTRAN_KERNEL_MULTCRL)
109   PetscInt       i,j,ii;
110 #endif
111 
112 
113 #if defined(PETSC_HAVE_PRAGMA_DISJOINT)
114 #pragma disjoint(*x,*y,*aa)
115 #endif
116 
117   PetscFunctionBegin;
118   if (aijcrl->xscat) {
119     ierr = VecCopy(xx,aijcrl->xwork);CHKERRQ(ierr);
120     /* get remote values needed for local part of multiply */
121     ierr = VecScatterBegin(aijcrl->xscat,xx,aijcrl->fwork,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
122     ierr = VecScatterEnd(aijcrl->xscat,xx,aijcrl->fwork,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
123     xx = aijcrl->xwork;
124   };
125 
126   ierr = VecGetArray(xx,&x);CHKERRQ(ierr);
127   ierr = VecGetArray(yy,&y);CHKERRQ(ierr);
128 
129 #if defined(PETSC_USE_FORTRAN_KERNEL_MULTCRL)
130   fortranmultcrl_(&m,&rmax,x,y,icols,acols);
131 #else
132 
133   /* first column */
134   for (j=0; j<m; j++) {
135     y[j] = acols[j]*x[icols[j]];
136   }
137 
138   /* other columns */
139 #if defined(PETSC_HAVE_CRAY_VECTOR)
140 #pragma _CRI preferstream
141 #endif
142   for (i=1; i<rmax; i++) {
143     ii = i*m;
144 #if defined(PETSC_HAVE_CRAY_VECTOR)
145 #pragma _CRI prefervector
146 #endif
147     for (j=0; j<m; j++) {
148       y[j] = y[j] + acols[ii+j]*x[icols[ii+j]];
149     }
150   }
151 #if defined(PETSC_HAVE_CRAY_VECTOR)
152 #pragma _CRI ivdep
153 #endif
154 
155 #endif
156   ierr = PetscLogFlops(2.0*aijcrl->nz - m);CHKERRQ(ierr);
157   ierr = VecRestoreArray(xx,&x);CHKERRQ(ierr);
158   ierr = VecRestoreArray(yy,&y);CHKERRQ(ierr);
159   PetscFunctionReturn(0);
160 }
161 
162 
163 /* MatConvert_SeqAIJ_SeqAIJCRL converts a SeqAIJ matrix into a
164  * SeqAIJCRL matrix.  This routine is called by the MatCreate_SeqAIJCRL()
165  * routine, but can also be used to convert an assembled SeqAIJ matrix
166  * into a SeqAIJCRL one. */
167 EXTERN_C_BEGIN
168 #undef __FUNCT__
169 #define __FUNCT__ "MatConvert_SeqAIJ_SeqAIJCRL"
170 PetscErrorCode  MatConvert_SeqAIJ_SeqAIJCRL(Mat A,const MatType type,MatReuse reuse,Mat *newmat)
171 {
172   PetscErrorCode ierr;
173   Mat            B = *newmat;
174   Mat_AIJCRL     *aijcrl;
175 
176   PetscFunctionBegin;
177   if (reuse == MAT_INITIAL_MATRIX) {
178     ierr = MatDuplicate(A,MAT_COPY_VALUES,&B);CHKERRQ(ierr);
179   }
180 
181   ierr = PetscNewLog(B,Mat_AIJCRL,&aijcrl);CHKERRQ(ierr);
182   B->spptr = (void *) aijcrl;
183 
184   /* Set function pointers for methods that we inherit from AIJ but override. */
185   B->ops->duplicate   = MatDuplicate_AIJCRL;
186   B->ops->assemblyend = MatAssemblyEnd_SeqAIJCRL;
187   B->ops->destroy     = MatDestroy_SeqAIJCRL;
188   B->ops->mult        = MatMult_AIJCRL;
189 
190   /* If A has already been assembled, compute the permutation. */
191   if (A->assembled) {
192     ierr = SeqAIJCRL_create_aijcrl(B);CHKERRQ(ierr);
193   }
194   ierr = PetscObjectChangeTypeName((PetscObject)B,MATSEQAIJCRL);CHKERRQ(ierr);
195   *newmat = B;
196   PetscFunctionReturn(0);
197 }
198 EXTERN_C_END
199 
200 
201 #undef __FUNCT__
202 #define __FUNCT__ "MatCreateSeqAIJCRL"
203 /*@C
204    MatCreateSeqAIJCRL - Creates a sparse matrix of type SEQAIJCRL.
205    This type inherits from AIJ, but stores some additional
206    information that is used to allow better vectorization of
207    the matrix-vector product. At the cost of increased storage, the AIJ formatted
208    matrix can be copied to a format in which pieces of the matrix are
209    stored in ELLPACK format, allowing the vectorized matrix multiply
210    routine to use stride-1 memory accesses.  As with the AIJ type, it is
211    important to preallocate matrix storage in order to get good assembly
212    performance.
213 
214    Collective on MPI_Comm
215 
216    Input Parameters:
217 +  comm - MPI communicator, set to PETSC_COMM_SELF
218 .  m - number of rows
219 .  n - number of columns
220 .  nz - number of nonzeros per row (same for all rows)
221 -  nnz - array containing the number of nonzeros in the various rows
222          (possibly different for each row) or PETSC_NULL
223 
224    Output Parameter:
225 .  A - the matrix
226 
227    Notes:
228    If nnz is given then nz is ignored
229 
230    Level: intermediate
231 
232 .keywords: matrix, cray, sparse, parallel
233 
234 .seealso: MatCreate(), MatCreateMPIAIJPERM(), MatSetValues()
235 @*/
236 PetscErrorCode  MatCreateSeqAIJCRL(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt nz,const PetscInt nnz[],Mat *A)
237 {
238   PetscErrorCode ierr;
239 
240   PetscFunctionBegin;
241   ierr = MatCreate(comm,A);CHKERRQ(ierr);
242   ierr = MatSetSizes(*A,m,n,m,n);CHKERRQ(ierr);
243   ierr = MatSetType(*A,MATSEQAIJCRL);CHKERRQ(ierr);
244   ierr = MatSeqAIJSetPreallocation_SeqAIJ(*A,nz,nnz);CHKERRQ(ierr);
245   PetscFunctionReturn(0);
246 }
247 
248 
249 EXTERN_C_BEGIN
250 #undef __FUNCT__
251 #define __FUNCT__ "MatCreate_SeqAIJCRL"
252 PetscErrorCode  MatCreate_SeqAIJCRL(Mat A)
253 {
254   PetscErrorCode ierr;
255 
256   PetscFunctionBegin;
257   ierr = MatSetType(A,MATSEQAIJ);CHKERRQ(ierr);
258   ierr = MatConvert_SeqAIJ_SeqAIJCRL(A,MATSEQAIJCRL,MAT_REUSE_MATRIX,&A);CHKERRQ(ierr);
259   PetscFunctionReturn(0);
260 }
261 EXTERN_C_END
262 
263