xref: /petsc/src/mat/tests/ex106.c (revision b122ec5aa1bd4469eb4e0673542fb7de3f411254)
1 
2 static char help[] = "Test repeated LU factorizations. Used for checking memory leak\n\
3   -m <size> : problem size\n\
4   -mat_nonsym : use nonsymmetric matrix (default is symmetric)\n\n";
5 
6 #include <petscmat.h>
7 int main(int argc,char **args)
8 {
9   Mat            C,F;                /* matrix */
10   Vec            x,u,b;          /* approx solution, RHS, exact solution */
11   PetscReal      norm;             /* norm of solution error */
12   PetscScalar    v,none = -1.0;
13   PetscInt       I,J,ldim,low,high,iglobal,Istart,Iend;
14   PetscInt       i,j,m = 3,n = 2,its;
15   PetscMPIInt    size,rank;
16   PetscBool      mat_nonsymmetric;
17   PetscInt       its_max;
18   MatFactorInfo  factinfo;
19   IS             perm,iperm;
20 
21   CHKERRQ(PetscInitialize(&argc,&args,(char*)0,help));
22   CHKERRQ(PetscOptionsGetInt(NULL,NULL,"-m",&m,NULL));
23   CHKERRMPI(MPI_Comm_rank(PETSC_COMM_WORLD,&rank));
24   CHKERRMPI(MPI_Comm_size(PETSC_COMM_WORLD,&size));
25   n    = 2*size;
26 
27   /*
28      Set flag if we are doing a nonsymmetric problem; the default is symmetric.
29   */
30   CHKERRQ(PetscOptionsHasName(NULL,NULL,"-mat_nonsym",&mat_nonsymmetric));
31 
32   /*
33      Create parallel matrix, specifying only its global dimensions.
34      When using MatCreate(), the matrix format can be specified at
35      runtime. Also, the parallel partitioning of the matrix is
36      determined by PETSc at runtime.
37   */
38   CHKERRQ(MatCreate(PETSC_COMM_WORLD,&C));
39   CHKERRQ(MatSetSizes(C,PETSC_DECIDE,PETSC_DECIDE,m*n,m*n));
40   CHKERRQ(MatSetFromOptions(C));
41   CHKERRQ(MatGetOwnershipRange(C,&Istart,&Iend));
42 
43   /*
44      Set matrix entries matrix in parallel.
45       - Each processor needs to insert only elements that it owns
46         locally (but any non-local elements will be sent to the
47         appropriate processor during matrix assembly).
48       - Always specify global row and columns of matrix entries.
49   */
50   for (I=Istart; I<Iend; I++) {
51     v = -1.0; i = I/n; j = I - i*n;
52     if (i>0)   {J = I - n; CHKERRQ(MatSetValues(C,1,&I,1,&J,&v,ADD_VALUES));}
53     if (i<m-1) {J = I + n; CHKERRQ(MatSetValues(C,1,&I,1,&J,&v,ADD_VALUES));}
54     if (j>0)   {J = I - 1; CHKERRQ(MatSetValues(C,1,&I,1,&J,&v,ADD_VALUES));}
55     if (j<n-1) {J = I + 1; CHKERRQ(MatSetValues(C,1,&I,1,&J,&v,ADD_VALUES));}
56     v = 4.0; CHKERRQ(MatSetValues(C,1,&I,1,&I,&v,ADD_VALUES));
57   }
58 
59   /*
60      Make the matrix nonsymmetric if desired
61   */
62   if (mat_nonsymmetric) {
63     for (I=Istart; I<Iend; I++) {
64       v = -1.5; i = I/n;
65       if (i>1)   {J = I-n-1; CHKERRQ(MatSetValues(C,1,&I,1,&J,&v,ADD_VALUES));}
66     }
67   } else {
68     CHKERRQ(MatSetOption(C,MAT_SYMMETRIC,PETSC_TRUE));
69     CHKERRQ(MatSetOption(C,MAT_SYMMETRY_ETERNAL,PETSC_TRUE));
70   }
71 
72   /*
73      Assemble matrix, using the 2-step process:
74        MatAssemblyBegin(), MatAssemblyEnd()
75      Computations can be done while messages are in transition
76      by placing code between these two statements.
77   */
78   CHKERRQ(MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY));
79   CHKERRQ(MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY));
80 
81   its_max=1000;
82   /*
83      Create parallel vectors.
84       - When using VecSetSizes(), we specify only the vector's global
85         dimension; the parallel partitioning is determined at runtime.
86       - Note: We form 1 vector from scratch and then duplicate as needed.
87   */
88   CHKERRQ(VecCreate(PETSC_COMM_WORLD,&u));
89   CHKERRQ(VecSetSizes(u,PETSC_DECIDE,m*n));
90   CHKERRQ(VecSetFromOptions(u));
91   CHKERRQ(VecDuplicate(u,&b));
92   CHKERRQ(VecDuplicate(b,&x));
93 
94   /*
95      Currently, all parallel PETSc vectors are partitioned by
96      contiguous chunks across the processors.  Determine which
97      range of entries are locally owned.
98   */
99   CHKERRQ(VecGetOwnershipRange(x,&low,&high));
100 
101   /*
102     Set elements within the exact solution vector in parallel.
103      - Each processor needs to insert only elements that it owns
104        locally (but any non-local entries will be sent to the
105        appropriate processor during vector assembly).
106      - Always specify global locations of vector entries.
107   */
108   CHKERRQ(VecGetLocalSize(x,&ldim));
109   for (i=0; i<ldim; i++) {
110     iglobal = i + low;
111     v       = (PetscScalar)(i + 100*rank);
112     CHKERRQ(VecSetValues(u,1,&iglobal,&v,INSERT_VALUES));
113   }
114 
115   /*
116      Assemble vector, using the 2-step process:
117        VecAssemblyBegin(), VecAssemblyEnd()
118      Computations can be done while messages are in transition,
119      by placing code between these two statements.
120   */
121   CHKERRQ(VecAssemblyBegin(u));
122   CHKERRQ(VecAssemblyEnd(u));
123 
124   /* Compute right-hand-side vector */
125   CHKERRQ(MatMult(C,u,b));
126 
127   CHKERRQ(MatGetOrdering(C,MATORDERINGNATURAL,&perm,&iperm));
128   its_max = 2000;
129   for (i=0; i<its_max; i++) {
130     CHKERRQ(MatGetFactor(C,MATSOLVERPETSC,MAT_FACTOR_LU,&F));
131     CHKERRQ(MatLUFactorSymbolic(F,C,perm,iperm,&factinfo));
132     for (j=0; j<1; j++) {
133       CHKERRQ(MatLUFactorNumeric(F,C,&factinfo));
134     }
135     CHKERRQ(MatSolve(F,b,x));
136     CHKERRQ(MatDestroy(&F));
137   }
138   CHKERRQ(ISDestroy(&perm));
139   CHKERRQ(ISDestroy(&iperm));
140 
141   /* Check the error */
142   CHKERRQ(VecAXPY(x,none,u));
143   CHKERRQ(VecNorm(x,NORM_2,&norm));
144   CHKERRQ(PetscPrintf(PETSC_COMM_WORLD,"Norm of error %t\n",(double)norm));
145 
146   /* Free work space. */
147   CHKERRQ(VecDestroy(&u));
148   CHKERRQ(VecDestroy(&x));
149   CHKERRQ(VecDestroy(&b));
150   CHKERRQ(MatDestroy(&C));
151   CHKERRQ(PetscFinalize());
152   return 0;
153 }
154