xref: /petsc/src/mat/impls/aij/mpi/mpiviennacl/mpiaijviennacl.cxx (revision bebe2cf65d55febe21a5af8db2bd2e168caaa2e7)
1 #include <petscconf.h>
2 #include <../src/mat/impls/aij/mpi/mpiaij.h>   /*I "petscmat.h" I*/
3 #include <../src/mat/impls/aij/seq/seqviennacl/viennaclmatimpl.h>
4 
5 #undef __FUNCT__
6 #define __FUNCT__ "MatMPIAIJSetPreallocation_MPIAIJViennaCL"
7 PetscErrorCode  MatMPIAIJSetPreallocation_MPIAIJViennaCL(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
8 {
9   Mat_MPIAIJ *b = (Mat_MPIAIJ*)B->data;
10   PetscErrorCode ierr;
11 
12   PetscFunctionBegin;
13   ierr = PetscLayoutSetUp(B->rmap);CHKERRQ(ierr);
14   ierr = PetscLayoutSetUp(B->cmap);CHKERRQ(ierr);
15   if (!B->preallocated) {
16     /* Explicitly create the two MATSEQAIJVIENNACL matrices. */
17     ierr = MatCreate(PETSC_COMM_SELF,&b->A);CHKERRQ(ierr);
18     ierr = MatSetSizes(b->A,B->rmap->n,B->cmap->n,B->rmap->n,B->cmap->n);CHKERRQ(ierr);
19     ierr = MatSetType(b->A,MATSEQAIJVIENNACL);CHKERRQ(ierr);
20     ierr = PetscLogObjectParent((PetscObject)B,(PetscObject)b->A);CHKERRQ(ierr);
21     ierr = MatCreate(PETSC_COMM_SELF,&b->B);CHKERRQ(ierr);
22     ierr = MatSetSizes(b->B,B->rmap->n,B->cmap->N,B->rmap->n,B->cmap->N);CHKERRQ(ierr);
23     ierr = MatSetType(b->B,MATSEQAIJVIENNACL);CHKERRQ(ierr);
24     ierr = PetscLogObjectParent((PetscObject)B,(PetscObject)b->B);CHKERRQ(ierr);
25   }
26   ierr = MatSeqAIJSetPreallocation(b->A,d_nz,d_nnz);CHKERRQ(ierr);
27   ierr = MatSeqAIJSetPreallocation(b->B,o_nz,o_nnz);CHKERRQ(ierr);
28   B->preallocated = PETSC_TRUE;
29   PetscFunctionReturn(0);
30 }
31 
32 #undef __FUNCT__
33 #define __FUNCT__ "MatCreateVecs_MPIAIJViennaCL"
34 PetscErrorCode  MatCreateVecs_MPIAIJViennaCL(Mat mat,Vec *right,Vec *left)
35 {
36   PetscErrorCode ierr;
37   PetscInt rbs,cbs;
38 
39   PetscFunctionBegin;
40   ierr = MatGetBlockSizes(mat,&rbs,&cbs);CHKERRQ(ierr);
41   if (right) {
42     ierr = VecCreate(PetscObjectComm((PetscObject)mat),right);CHKERRQ(ierr);
43     ierr = VecSetSizes(*right,mat->cmap->n,PETSC_DETERMINE);CHKERRQ(ierr);
44     ierr = VecSetBlockSize(*right,cbs);CHKERRQ(ierr);
45     ierr = VecSetType(*right,VECVIENNACL);CHKERRQ(ierr);
46     ierr = VecSetLayout(*right,mat->cmap);CHKERRQ(ierr);
47   }
48   if (left) {
49     ierr = VecCreate(PetscObjectComm((PetscObject)mat),left);CHKERRQ(ierr);
50     ierr = VecSetSizes(*left,mat->rmap->n,PETSC_DETERMINE);CHKERRQ(ierr);
51     ierr = VecSetBlockSize(*left,rbs);CHKERRQ(ierr);
52     ierr = VecSetType(*left,VECVIENNACL);CHKERRQ(ierr);
53     ierr = VecSetLayout(*left,mat->rmap);CHKERRQ(ierr);
54   }
55   PetscFunctionReturn(0);
56 }
57 
58 
59 #undef __FUNCT__
60 #define __FUNCT__ "MatDestroy_MPIAIJViennaCL"
61 PetscErrorCode MatDestroy_MPIAIJViennaCL(Mat A)
62 {
63   PetscErrorCode ierr;
64 
65   PetscFunctionBegin;
66   ierr = MatDestroy_MPIAIJ(A);CHKERRQ(ierr);
67   PetscFunctionReturn(0);
68 }
69 
70 #undef __FUNCT__
71 #define __FUNCT__ "MatCreate_MPIAIJViennaCL"
72 PETSC_EXTERN PetscErrorCode MatCreate_MPIAIJViennaCL(Mat A)
73 {
74   PetscErrorCode ierr;
75 
76   PetscFunctionBegin;
77   ierr = MatCreate_MPIAIJ(A);CHKERRQ(ierr);
78   ierr = PetscObjectComposeFunction((PetscObject)A,"MatMPIAIJSetPreallocation_C",MatMPIAIJSetPreallocation_MPIAIJViennaCL);CHKERRQ(ierr);
79   A->ops->getvecs        = MatCreateVecs_MPIAIJViennaCL;
80 
81   ierr = PetscObjectChangeTypeName((PetscObject)A,MATMPIAIJVIENNACL);CHKERRQ(ierr);
82   PetscFunctionReturn(0);
83 }
84 
85 
86 /*@
87    MatCreateAIJViennaCL - Creates a sparse matrix in AIJ (compressed row) format
88    (the default parallel PETSc format).  This matrix will ultimately be pushed down
89    to GPUs and use the ViennaCL library for calculations. For good matrix
90    assembly performance the user should preallocate the matrix storage by setting
91    the parameter nz (or the array nnz).  By setting these parameters accurately,
92    performance during matrix assembly can be increased substantially.
93 
94 
95    Collective on MPI_Comm
96 
97    Input Parameters:
98 +  comm - MPI communicator, set to PETSC_COMM_SELF
99 .  m - number of rows
100 .  n - number of columns
101 .  nz - number of nonzeros per row (same for all rows)
102 -  nnz - array containing the number of nonzeros in the various rows
103          (possibly different for each row) or NULL
104 
105    Output Parameter:
106 .  A - the matrix
107 
108    It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(),
109    MatXXXXSetPreallocation() paradigm instead of this routine directly.
110    [MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation]
111 
112    Notes:
113    If nnz is given then nz is ignored
114 
115    The AIJ format (also called the Yale sparse matrix format or
116    compressed row storage), is fully compatible with standard Fortran 77
117    storage.  That is, the stored row and column indices can begin at
118    either one (as in Fortran) or zero.  See the users' manual for details.
119 
120    Specify the preallocated storage with either nz or nnz (not both).
121    Set nz=PETSC_DEFAULT and nnz=NULL for PETSc to control dynamic memory
122    allocation.  For large problems you MUST preallocate memory or you
123    will get TERRIBLE performance, see the users' manual chapter on matrices.
124 
125    Level: intermediate
126 
127 .seealso: MatCreate(), MatCreateAIJ(), MatCreateAIJCUSP(), MatSetValues(), MatSeqAIJSetColumnIndices(), MatCreateSeqAIJWithArrays(), MatCreateAIJ(), MATMPIAIJVIENNACL, MATAIJVIENNACL
128 @*/
129 #undef __FUNCT__
130 #define __FUNCT__ "MatCreateAIJViennaCL"
131 PetscErrorCode  MatCreateAIJViennaCL(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A)
132 {
133   PetscErrorCode ierr;
134   PetscMPIInt    size;
135 
136   PetscFunctionBegin;
137   ierr = MatCreate(comm,A);CHKERRQ(ierr);
138   ierr = MatSetSizes(*A,m,n,M,N);CHKERRQ(ierr);
139   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
140   if (size > 1) {
141     ierr = MatSetType(*A,MATMPIAIJVIENNACL);CHKERRQ(ierr);
142     ierr = MatMPIAIJSetPreallocation(*A,d_nz,d_nnz,o_nz,o_nnz);CHKERRQ(ierr);
143   } else {
144     ierr = MatSetType(*A,MATSEQAIJVIENNACL);CHKERRQ(ierr);
145     ierr = MatSeqAIJSetPreallocation(*A,d_nz,d_nnz);CHKERRQ(ierr);
146   }
147   PetscFunctionReturn(0);
148 }
149 
150 /*M
151    MATAIJVIENNACL - MATMPIAIJVIENNACL= "aijviennacl" = "mpiaijviennacl" - A matrix type to be used for sparse matrices.
152 
153    A matrix type (CSR format) whose data resides on GPUs.
154    All matrix calculations are performed using the ViennaCL library.
155 
156    This matrix type is identical to MATSEQAIJVIENNACL when constructed with a single process communicator,
157    and MATMPIAIJVIENNACL otherwise.  As a result, for single process communicators,
158    MatSeqAIJSetPreallocation is supported, and similarly MatMPIAIJSetPreallocation is supported
159    for communicators controlling multiple processes.  It is recommended that you call both of
160    the above preallocation routines for simplicity.
161 
162    Options Database Keys:
163 +  -mat_type mpiaijviennacl - sets the matrix type to "mpiaijviennacl" during a call to MatSetFromOptions()
164 
165   Level: beginner
166 
167  .seealso: MatCreateAIJViennaCL(), MATSEQAIJVIENNACL, MatCreateSeqAIJVIENNACL()
168 M*/
169 
170