xref: /petsc/src/ksp/pc/impls/is/nn/nn.c (revision 4482741e5b2e2bbc854fb1f8dba65221386520f2)
1 /*$Id: nn.c,v 1.13 2001/08/07 03:03:41 balay Exp $*/
2 
3 #include "src/ksp/pc/impls/is/nn/nn.h"
4 
5 /* -------------------------------------------------------------------------- */
6 /*
7    PCSetUp_NN - Prepares for the use of the NN preconditioner
8                     by setting data structures and options.
9 
10    Input Parameter:
11 .  pc - the preconditioner context
12 
13    Application Interface Routine: PCSetUp()
14 
15    Notes:
16    The interface routine PCSetUp() is not usually called directly by
17    the user, but instead is called by PCApply() if necessary.
18 */
19 #undef __FUNCT__
20 #define __FUNCT__ "PCSetUp_NN"
21 static int PCSetUp_NN(PC pc)
22 {
23   int ierr;
24 
25   PetscFunctionBegin;
26   if (!pc->setupcalled) {
27     /* Set up all the "iterative substructuring" common block */
28     ierr = PCISSetUp(pc);CHKERRQ(ierr);
29     /* Create the coarse matrix. */
30     ierr = PCNNCreateCoarseMatrix(pc);CHKERRQ(ierr);
31   }
32   PetscFunctionReturn(0);
33 }
34 
35 /* -------------------------------------------------------------------------- */
36 /*
37    PCApply_NN - Applies the NN preconditioner to a vector.
38 
39    Input Parameters:
40 .  pc - the preconditioner context
41 .  r - input vector (global)
42 
43    Output Parameter:
44 .  z - output vector (global)
45 
46    Application Interface Routine: PCApply()
47  */
48 #undef __FUNCT__
49 #define __FUNCT__ "PCApply_NN"
50 static int PCApply_NN(PC pc,Vec r,Vec z)
51 {
52   PC_IS       *pcis = (PC_IS*)(pc->data);
53   int         ierr;
54   PetscScalar m_one = -1.0;
55   Vec         w = pcis->vec1_global;
56 
57   PetscFunctionBegin;
58 
59   /*
60     Dirichlet solvers.
61     Solving $ B_I^{(i)}r_I^{(i)} $ at each processor.
62     Storing the local results at vec2_D
63   */
64   ierr = VecScatterBegin(r,pcis->vec1_D,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_D);CHKERRQ(ierr);
65   ierr = VecScatterEnd  (r,pcis->vec1_D,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_D);CHKERRQ(ierr);
66   ierr = KSPSetRhs(pcis->ksp_D,pcis->vec1_D);CHKERRQ(ierr);
67   ierr = KSPSetSolution(pcis->ksp_D,pcis->vec2_D);CHKERRQ(ierr);
68   ierr = KSPSolve(pcis->ksp_D);CHKERRQ(ierr);
69 
70   /*
71     Computing $ r_B - \sum_j \tilde R_j^T A_{BI}^{(j)} (B_I^{(j)}r_I^{(j)}) $ .
72     Storing the result in the interface portion of the global vector w.
73   */
74   ierr = MatMult(pcis->A_BI,pcis->vec2_D,pcis->vec1_B);CHKERRQ(ierr);
75   ierr = VecScale(&m_one,pcis->vec1_B);CHKERRQ(ierr);
76   ierr = VecCopy(r,w);CHKERRQ(ierr);
77   ierr = VecScatterBegin(pcis->vec1_B,w,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
78   ierr = VecScatterEnd  (pcis->vec1_B,w,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
79 
80   /*
81     Apply the interface preconditioner
82   */
83   ierr = PCNNApplyInterfacePreconditioner(pc,w,z,pcis->work_N,pcis->vec1_B,pcis->vec2_B,pcis->vec3_B,pcis->vec1_D,
84                                           pcis->vec3_D,pcis->vec1_N,pcis->vec2_N);CHKERRQ(ierr);
85 
86   /*
87     Computing $ t_I^{(i)} = A_{IB}^{(i)} \tilde R_i z_B $
88     The result is stored in vec1_D.
89   */
90   ierr = VecScatterBegin(z,pcis->vec1_B,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_B);CHKERRQ(ierr);
91   ierr = VecScatterEnd  (z,pcis->vec1_B,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_B);CHKERRQ(ierr);
92   ierr = MatMult(pcis->A_IB,pcis->vec1_B,pcis->vec1_D);CHKERRQ(ierr);
93 
94   /*
95     Dirichlet solvers.
96     Computing $ B_I^{(i)}t_I^{(i)} $ and sticking into the global vector the blocks
97     $ B_I^{(i)}r_I^{(i)} - B_I^{(i)}t_I^{(i)} $.
98   */
99   ierr = VecScatterBegin(pcis->vec2_D,z,INSERT_VALUES,SCATTER_REVERSE,pcis->global_to_D);CHKERRQ(ierr);
100   ierr = VecScatterEnd  (pcis->vec2_D,z,INSERT_VALUES,SCATTER_REVERSE,pcis->global_to_D);CHKERRQ(ierr);
101   ierr = KSPSetRhs(pcis->ksp_D,pcis->vec1_D);CHKERRQ(ierr);
102   ierr = KSPSetSolution(pcis->ksp_D,pcis->vec2_D);CHKERRQ(ierr);
103   ierr = KSPSolve(pcis->ksp_D);CHKERRQ(ierr);
104   ierr = VecScale(&m_one,pcis->vec2_D);CHKERRQ(ierr);
105   ierr = VecScatterBegin(pcis->vec2_D,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_D);CHKERRQ(ierr);
106   ierr = VecScatterEnd  (pcis->vec2_D,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_D);CHKERRQ(ierr);
107 
108   PetscFunctionReturn(0);
109 }
110 
111 /* -------------------------------------------------------------------------- */
112 /*
113    PCDestroy_NN - Destroys the private context for the NN preconditioner
114    that was created with PCCreate_NN().
115 
116    Input Parameter:
117 .  pc - the preconditioner context
118 
119    Application Interface Routine: PCDestroy()
120 */
121 #undef __FUNCT__
122 #define __FUNCT__ "PCDestroy_NN"
123 static int PCDestroy_NN(PC pc)
124 {
125   PC_NN *pcnn = (PC_NN*)pc->data;
126   int   ierr;
127 
128   PetscFunctionBegin;
129 
130   ierr = PCISDestroy(pc);CHKERRQ(ierr);
131 
132   if (pcnn->coarse_mat)  {ierr = MatDestroy(pcnn->coarse_mat);CHKERRQ(ierr);}
133   if (pcnn->coarse_x)    {ierr = VecDestroy(pcnn->coarse_x);CHKERRQ(ierr);}
134   if (pcnn->coarse_b)    {ierr = VecDestroy(pcnn->coarse_b);CHKERRQ(ierr);}
135   if (pcnn->ksp_coarse) {ierr = KSPDestroy(pcnn->ksp_coarse);CHKERRQ(ierr);}
136   if (pcnn->DZ_IN) {
137     if (pcnn->DZ_IN[0]) {ierr = PetscFree(pcnn->DZ_IN[0]);CHKERRQ(ierr);}
138     ierr = PetscFree(pcnn->DZ_IN);CHKERRQ(ierr);
139   }
140 
141   /*
142       Free the private data structure that was hanging off the PC
143   */
144   ierr = PetscFree(pcnn);CHKERRQ(ierr);
145   PetscFunctionReturn(0);
146 }
147 
148 /* -------------------------------------------------------------------------- */
149 /*MC
150    PCNN - Balancing Neumann-Neumann for scalar elliptic PDEs.
151 
152    Options Database Keys:
153 +    -pc_nn_turn_off_first_balancing - do not balance the residual before solving the local Neumann problems
154                                        (this skips the first coarse grid solve in the preconditioner)
155 -    -pc_nn_turn_off_second_balancing - do not balance the solution solving the local Neumann problems
156                                        (this skips the second coarse grid solve in the preconditioner)
157 
158    Level: intermediates
159 
160    Notes: The matrix used with this preconditioner must be of type MATIS
161 
162           Options for the coarse grid preconditioner can be set with -coarse_pc_xxx
163 
164    Contributed by Paulo Goldfeld
165 
166 .seealso:  PCCreate(), PCSetType(), PCType (for list of available types), PC,  MatIS
167 M*/
168 EXTERN_C_BEGIN
169 #undef __FUNCT__
170 #define __FUNCT__ "PCCreate_NN"
171 int PCCreate_NN(PC pc)
172 {
173   int   ierr;
174   PC_NN *pcnn;
175 
176   PetscFunctionBegin;
177 
178   /*
179      Creates the private data structure for this preconditioner and
180      attach it to the PC object.
181   */
182   ierr      = PetscNew(PC_NN,&pcnn);CHKERRQ(ierr);
183   pc->data  = (void*)pcnn;
184 
185   /*
186      Logs the memory usage; this is not needed but allows PETSc to
187      monitor how much memory is being used for various purposes.
188   */
189   PetscLogObjectMemory(pc,sizeof(PC_NN)+sizeof(PC_IS)); /* Is this the right thing to do? */
190 
191   ierr = PCISCreate(pc);CHKERRQ(ierr);
192   pcnn->coarse_mat  = 0;
193   pcnn->coarse_x    = 0;
194   pcnn->coarse_b    = 0;
195   pcnn->ksp_coarse = 0;
196   pcnn->DZ_IN       = 0;
197 
198   /*
199       Set the pointers for the functions that are provided above.
200       Now when the user-level routines (such as PCApply(), PCDestroy(), etc.)
201       are called, they will automatically call these functions.  Note we
202       choose not to provide a couple of these functions since they are
203       not needed.
204   */
205   pc->ops->apply               = PCApply_NN;
206   pc->ops->applytranspose      = 0;
207   pc->ops->setup               = PCSetUp_NN;
208   pc->ops->destroy             = PCDestroy_NN;
209   pc->ops->view                = 0;
210   pc->ops->applyrichardson     = 0;
211   pc->ops->applysymmetricleft  = 0;
212   pc->ops->applysymmetricright = 0;
213 
214   PetscFunctionReturn(0);
215 }
216 EXTERN_C_END
217 
218 
219 /* -------------------------------------------------------------------------- */
220 /*
221    PCNNCreateCoarseMatrix -
222 */
223 #undef __FUNCT__
224 #define __FUNCT__ "PCNNCreateCoarseMatrix"
225 int PCNNCreateCoarseMatrix (PC pc)
226 {
227   MPI_Request    *send_request, *recv_request;
228   int            i, j, k, ierr;
229 
230   PetscScalar*   mat;    /* Sub-matrix with this subdomain's contribution to the coarse matrix             */
231   PetscScalar**  DZ_OUT; /* proc[k].DZ_OUT[i][] = bit of vector to be sent from processor k to processor i */
232 
233   /* aliasing some names */
234   PC_IS*         pcis     = (PC_IS*)(pc->data);
235   PC_NN*         pcnn     = (PC_NN*)pc->data;
236   int            n_neigh  = pcis->n_neigh;
237   int*           neigh    = pcis->neigh;
238   int*           n_shared = pcis->n_shared;
239   int**          shared   = pcis->shared;
240   PetscScalar**  DZ_IN;   /* Must be initialized after memory allocation. */
241 
242   PetscFunctionBegin;
243 
244   /* Allocate memory for mat (the +1 is to handle the case n_neigh equal to zero) */
245   ierr = PetscMalloc((n_neigh*n_neigh+1)*sizeof(PetscScalar),&mat);CHKERRQ(ierr);
246 
247   /* Allocate memory for DZ */
248   /* Notice that DZ_OUT[0] is allocated some space that is never used. */
249   /* This is just in order to DZ_OUT and DZ_IN to have exactly the same form. */
250   {
251     int size_of_Z = 0;
252     ierr  = PetscMalloc ((n_neigh+1)*sizeof(PetscScalar*),&pcnn->DZ_IN);CHKERRQ(ierr);
253     DZ_IN = pcnn->DZ_IN;
254     ierr  = PetscMalloc ((n_neigh+1)*sizeof(PetscScalar*),&DZ_OUT);CHKERRQ(ierr);
255     for (i=0; i<n_neigh; i++) {
256       size_of_Z += n_shared[i];
257     }
258     ierr = PetscMalloc ((size_of_Z+1)*sizeof(PetscScalar),&DZ_IN[0]);CHKERRQ(ierr);
259     ierr = PetscMalloc ((size_of_Z+1)*sizeof(PetscScalar),&DZ_OUT[0]);CHKERRQ(ierr);
260   }
261   for (i=1; i<n_neigh; i++) {
262     DZ_IN[i]  = DZ_IN [i-1] + n_shared[i-1];
263     DZ_OUT[i] = DZ_OUT[i-1] + n_shared[i-1];
264   }
265 
266   /* Set the values of DZ_OUT, in order to send this info to the neighbours */
267   /* First, set the auxiliary array pcis->work_N. */
268   ierr = PCISScatterArrayNToVecB(pcis->work_N,pcis->D,INSERT_VALUES,SCATTER_REVERSE,pc);CHKERRQ(ierr);
269   for (i=1; i<n_neigh; i++){
270     for (j=0; j<n_shared[i]; j++) {
271       DZ_OUT[i][j] = pcis->work_N[shared[i][j]];
272     }
273   }
274 
275   /* Non-blocking send/receive the common-interface chunks of scaled nullspaces */
276   /* Notice that send_request[] and recv_request[] could have one less element. */
277   /* We make them longer to have request[i] corresponding to neigh[i].          */
278   {
279     int tag;
280     ierr = PetscObjectGetNewTag((PetscObject)pc,&tag);CHKERRQ(ierr);
281     ierr = PetscMalloc((2*(n_neigh)+1)*sizeof(MPI_Request),&send_request);CHKERRQ(ierr);
282     recv_request = send_request + (n_neigh);
283     for (i=1; i<n_neigh; i++) {
284       ierr = MPI_Isend((void*)(DZ_OUT[i]),n_shared[i],MPIU_SCALAR,neigh[i],tag,pc->comm,&(send_request[i]));CHKERRQ(ierr);
285       ierr = MPI_Irecv((void*)(DZ_IN [i]),n_shared[i],MPIU_SCALAR,neigh[i],tag,pc->comm,&(recv_request[i]));CHKERRQ(ierr);
286     }
287   }
288 
289   /* Set DZ_IN[0][] (recall that neigh[0]==rank, always) */
290   for(j=0; j<n_shared[0]; j++) {
291     DZ_IN[0][j] = pcis->work_N[shared[0][j]];
292   }
293 
294   /* Start computing with local D*Z while communication goes on.    */
295   /* Apply Schur complement. The result is "stored" in vec (more    */
296   /* precisely, vec points to the result, stored in pc_nn->vec1_B)  */
297   /* and also scattered to pcnn->work_N.                            */
298   ierr = PCNNApplySchurToChunk(pc,n_shared[0],shared[0],DZ_IN[0],pcis->work_N,pcis->vec1_B,
299                                pcis->vec2_B,pcis->vec1_D,pcis->vec2_D);CHKERRQ(ierr);
300 
301   /* Compute the first column, while completing the receiving. */
302   for (i=0; i<n_neigh; i++) {
303     MPI_Status stat;
304     int ind=0;
305     if (i>0) { ierr = MPI_Waitany(n_neigh-1,recv_request+1,&ind,&stat);CHKERRQ(ierr); ind++;}
306     mat[ind*n_neigh+0] = 0.0;
307     for (k=0; k<n_shared[ind]; k++) {
308       mat[ind*n_neigh+0] += DZ_IN[ind][k] * pcis->work_N[shared[ind][k]];
309     }
310   }
311 
312   /* Compute the remaining of the columns */
313   for (j=1; j<n_neigh; j++) {
314     ierr = PCNNApplySchurToChunk(pc,n_shared[j],shared[j],DZ_IN[j],pcis->work_N,pcis->vec1_B,
315                                  pcis->vec2_B,pcis->vec1_D,pcis->vec2_D);CHKERRQ(ierr);
316     for (i=0; i<n_neigh; i++) {
317       mat[i*n_neigh+j] = 0.0;
318       for (k=0; k<n_shared[i]; k++) {
319 	mat[i*n_neigh+j] += DZ_IN[i][k] * pcis->work_N[shared[i][k]];
320       }
321     }
322   }
323 
324   /* Complete the sending. */
325   if (n_neigh>1) {
326     MPI_Status *stat;
327     ierr = PetscMalloc((n_neigh-1)*sizeof(MPI_Status),&stat);CHKERRQ(ierr);
328     ierr = MPI_Waitall(n_neigh-1,&(send_request[1]),stat);CHKERRQ(ierr);
329     ierr = PetscFree(stat);CHKERRQ(ierr);
330   }
331 
332   /* Free the memory for the MPI requests */
333   ierr = PetscFree(send_request);CHKERRQ(ierr);
334 
335   /* Free the memory for DZ_OUT */
336   if (DZ_OUT) {
337     if (DZ_OUT[0]) { ierr = PetscFree(DZ_OUT[0]);CHKERRQ(ierr); }
338     ierr = PetscFree(DZ_OUT);CHKERRQ(ierr);
339   }
340 
341   {
342     int size,n_neigh_m1;
343     ierr = MPI_Comm_size(pc->comm,&size);CHKERRQ(ierr);
344     n_neigh_m1 = (n_neigh) ? n_neigh-1 : 0;
345     /* Create the global coarse vectors (rhs and solution). */
346     ierr = VecCreateMPI(pc->comm,1,size,&(pcnn->coarse_b));CHKERRQ(ierr);
347     ierr = VecDuplicate(pcnn->coarse_b,&(pcnn->coarse_x));CHKERRQ(ierr);
348     /* Create and set the global coarse matrix. */
349     ierr = MatCreateMPIAIJ(pc->comm,1,1,size,size,1,PETSC_NULL,n_neigh_m1,PETSC_NULL,&(pcnn->coarse_mat));CHKERRQ(ierr);
350     ierr = MatSetValues(pcnn->coarse_mat,n_neigh,neigh,n_neigh,neigh,mat,ADD_VALUES);CHKERRQ(ierr);
351     ierr = MatAssemblyBegin(pcnn->coarse_mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
352     ierr = MatAssemblyEnd  (pcnn->coarse_mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
353   }
354 
355   {
356     int         rank;
357     PetscScalar one = 1.0;
358     IS          is;
359     ierr = MPI_Comm_rank(pc->comm,&rank);CHKERRQ(ierr);
360     /* "Zero out" rows of not-purely-Neumann subdomains */
361     if (pcis->pure_neumann) {  /* does NOT zero the row; create an empty index set. The reason is that MatZeroRows() is collective. */
362       ierr = ISCreateStride(pc->comm,0,0,0,&is);CHKERRQ(ierr);
363     } else { /* here it DOES zero the row, since it's not a floating subdomain. */
364       ierr = ISCreateStride(pc->comm,1,rank,0,&is);CHKERRQ(ierr);
365     }
366     ierr = MatZeroRows(pcnn->coarse_mat,is,&one);CHKERRQ(ierr);
367     ierr = ISDestroy(is);CHKERRQ(ierr);
368   }
369 
370   /* Create the coarse linear solver context */
371   {
372     PC  pc_ctx, inner_pc;
373     ierr = KSPCreate(pc->comm,&pcnn->ksp_coarse);CHKERRQ(ierr);
374     ierr = KSPSetOperators(pcnn->ksp_coarse,pcnn->coarse_mat,pcnn->coarse_mat,SAME_PRECONDITIONER);CHKERRQ(ierr);
375     ierr = KSPGetPC(pcnn->ksp_coarse,&pc_ctx);CHKERRQ(ierr);
376     ierr = PCSetType(pc_ctx,PCREDUNDANT);CHKERRQ(ierr);
377     ierr = KSPSetType(pcnn->ksp_coarse,KSPPREONLY);CHKERRQ(ierr);
378     ierr = PCRedundantGetPC(pc_ctx,&inner_pc);CHKERRQ(ierr);
379     ierr = PCSetType(inner_pc,PCLU);CHKERRQ(ierr);
380     ierr = KSPSetOptionsPrefix(pcnn->ksp_coarse,"coarse_");CHKERRQ(ierr);
381     ierr = KSPSetFromOptions(pcnn->ksp_coarse);CHKERRQ(ierr);
382     /* the vectors in the following line are dummy arguments, just telling the KSP the vector size. Values are not used */
383     ierr = KSPSetRhs(pcnn->ksp_coarse,pcnn->coarse_x);CHKERRQ(ierr);
384     ierr = KSPSetSolution(pcnn->ksp_coarse,pcnn->coarse_b);CHKERRQ(ierr);
385     ierr = KSPSetUp(pcnn->ksp_coarse);CHKERRQ(ierr);
386   }
387 
388   /* Free the memory for mat */
389   ierr = PetscFree(mat);CHKERRQ(ierr);
390 
391   /* for DEBUGGING, save the coarse matrix to a file. */
392   {
393     PetscTruth flg;
394     ierr = PetscOptionsHasName(PETSC_NULL,"-save_coarse_matrix",&flg);CHKERRQ(ierr);
395     if (flg) {
396       PetscViewer viewer;
397       ierr = PetscViewerASCIIOpen(PETSC_COMM_WORLD,"coarse.m",&viewer);CHKERRQ(ierr);
398       ierr = PetscViewerSetFormat(viewer,PETSC_VIEWER_ASCII_MATLAB);CHKERRQ(ierr);
399       ierr = MatView(pcnn->coarse_mat,viewer);CHKERRQ(ierr);
400       ierr = PetscViewerDestroy(viewer);CHKERRQ(ierr);
401     }
402   }
403 
404   /*  Set the variable pcnn->factor_coarse_rhs. */
405   pcnn->factor_coarse_rhs = (pcis->pure_neumann) ? 1.0 : 0.0;
406 
407   /* See historical note 02, at the bottom of this file. */
408 
409   PetscFunctionReturn(0);
410 }
411 
412 /* -------------------------------------------------------------------------- */
413 /*
414    PCNNApplySchurToChunk -
415 
416    Input parameters:
417 .  pcnn
418 .  n - size of chunk
419 .  idx - indices of chunk
420 .  chunk - values
421 
422    Output parameters:
423 .  array_N - result of Schur complement applied to chunk, scattered to big array
424 .  vec1_B  - result of Schur complement applied to chunk
425 .  vec2_B  - garbage (used as work space)
426 .  vec1_D  - garbage (used as work space)
427 .  vec2_D  - garbage (used as work space)
428 
429 */
430 #undef __FUNCT__
431 #define __FUNCT__ "PCNNApplySchurToChunk"
432 int PCNNApplySchurToChunk(PC pc, int n, int* idx, PetscScalar *chunk, PetscScalar* array_N, Vec vec1_B, Vec vec2_B, Vec vec1_D, Vec vec2_D)
433 {
434   int   i, ierr;
435   PC_IS *pcis = (PC_IS*)(pc->data);
436 
437   PetscFunctionBegin;
438 
439   ierr = PetscMemzero((void*)array_N, pcis->n*sizeof(PetscScalar));CHKERRQ(ierr);
440   for (i=0; i<n; i++) { array_N[idx[i]] = chunk[i]; }
441   ierr = PCISScatterArrayNToVecB(array_N,vec2_B,INSERT_VALUES,SCATTER_FORWARD,pc);CHKERRQ(ierr);
442   ierr = PCISApplySchur(pc,vec2_B,vec1_B,(Vec)0,vec1_D,vec2_D);CHKERRQ(ierr);
443   ierr = PCISScatterArrayNToVecB(array_N,vec1_B,INSERT_VALUES,SCATTER_REVERSE,pc);CHKERRQ(ierr);
444 
445   PetscFunctionReturn(0);
446 }
447 
448 /* -------------------------------------------------------------------------- */
449 /*
450    PCNNApplyInterfacePreconditioner - Apply the interface preconditioner, i.e.,
451                                       the preconditioner for the Schur complement.
452 
453    Input parameter:
454 .  r - global vector of interior and interface nodes. The values on the interior nodes are NOT used.
455 
456    Output parameters:
457 .  z - global vector of interior and interface nodes. The values on the interface are the result of
458        the application of the interface preconditioner to the interface part of r. The values on the
459        interior nodes are garbage.
460 .  work_N - array of local nodes (interior and interface, including ghosts); returns garbage (used as work space)
461 .  vec1_B - vector of local interface nodes (including ghosts); returns garbage (used as work space)
462 .  vec2_B - vector of local interface nodes (including ghosts); returns garbage (used as work space)
463 .  vec3_B - vector of local interface nodes (including ghosts); returns garbage (used as work space)
464 .  vec1_D - vector of local interior nodes; returns garbage (used as work space)
465 .  vec2_D - vector of local interior nodes; returns garbage (used as work space)
466 .  vec1_N - vector of local nodes (interior and interface, including ghosts); returns garbage (used as work space)
467 .  vec2_N - vector of local nodes (interior and interface, including ghosts); returns garbage (used as work space)
468 
469 */
470 #undef __FUNCT__
471 #define __FUNCT__ "PCNNApplyInterfacePreconditioner"
472 int PCNNApplyInterfacePreconditioner (PC pc, Vec r, Vec z, PetscScalar* work_N, Vec vec1_B, Vec vec2_B, Vec vec3_B, Vec vec1_D,
473                                       Vec vec2_D, Vec vec1_N, Vec vec2_N)
474 {
475   int    ierr;
476   PC_IS* pcis = (PC_IS*)(pc->data);
477 
478   PetscFunctionBegin;
479 
480   /*
481     First balancing step.
482   */
483   {
484     PetscTruth flg;
485     ierr = PetscOptionsHasName(PETSC_NULL,"-turn_off_first_balancing",&flg);CHKERRQ(ierr);
486     if (!flg) {
487       ierr = PCNNBalancing(pc,r,(Vec)0,z,vec1_B,vec2_B,(Vec)0,vec1_D,vec2_D,work_N);CHKERRQ(ierr);
488     } else {
489       ierr = VecCopy(r,z);CHKERRQ(ierr);
490     }
491   }
492 
493   /*
494     Extract the local interface part of z and scale it by D
495   */
496   ierr = VecScatterBegin(z,vec1_B,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_B);CHKERRQ(ierr);
497   ierr = VecScatterEnd  (z,vec1_B,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_B);CHKERRQ(ierr);
498   ierr = VecPointwiseMult(pcis->D,vec1_B,vec2_B);CHKERRQ(ierr);
499 
500   /* Neumann Solver */
501   ierr = PCISApplyInvSchur(pc,vec2_B,vec1_B,vec1_N,vec2_N);CHKERRQ(ierr);
502 
503   /*
504     Second balancing step.
505   */
506   {
507     PetscTruth flg;
508     ierr = PetscOptionsHasName(PETSC_NULL,"-turn_off_second_balancing",&flg);CHKERRQ(ierr);
509     if (!flg) {
510       ierr = PCNNBalancing(pc,r,vec1_B,z,vec2_B,vec3_B,(Vec)0,vec1_D,vec2_D,work_N);CHKERRQ(ierr);
511     } else {
512       PetscScalar zero = 0.0;
513       ierr = VecPointwiseMult(pcis->D,vec1_B,vec2_B);CHKERRQ(ierr);
514       ierr = VecSet(&zero,z);CHKERRQ(ierr);
515       ierr = VecScatterBegin(vec2_B,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
516       ierr = VecScatterEnd  (vec2_B,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
517     }
518   }
519 
520   PetscFunctionReturn(0);
521 }
522 
523 /* -------------------------------------------------------------------------- */
524 /*
525    PCNNBalancing - Computes z, as given in equations (15) and (16) (if the
526                    input argument u is provided), or s, as given in equations
527                    (12) and (13), if the input argument u is a null vector.
528                    Notice that the input argument u plays the role of u_i in
529                    equation (14). The equation numbers refer to [Man93].
530 
531    Input Parameters:
532 .  pcnn - NN preconditioner context.
533 .  r - MPI vector of all nodes (interior and interface). It's preserved.
534 .  u - (Optional) sequential vector of local interface nodes. It's preserved UNLESS vec3_B is null.
535 
536    Output Parameters:
537 .  z - MPI vector of interior and interface nodes. Returns s or z (see description above).
538 .  vec1_B - Sequential vector of local interface nodes. Workspace.
539 .  vec2_B - Sequential vector of local interface nodes. Workspace.
540 .  vec3_B - (Optional) sequential vector of local interface nodes. Workspace.
541 .  vec1_D - Sequential vector of local interior nodes. Workspace.
542 .  vec2_D - Sequential vector of local interior nodes. Workspace.
543 .  work_N - Array of all local nodes (interior and interface). Workspace.
544 
545 */
546 #undef __FUNCT__
547 #define __FUNCT__ "PCNNBalancing"
548 int PCNNBalancing (PC pc, Vec r, Vec u, Vec z, Vec vec1_B, Vec vec2_B, Vec vec3_B,
549                    Vec vec1_D, Vec vec2_D, PetscScalar *work_N)
550 {
551   int            k, ierr;
552   PetscScalar    zero     =  0.0;
553   PetscScalar    m_one    = -1.0;
554   PetscScalar    value;
555   PetscScalar*   lambda;
556   PC_NN*         pcnn     = (PC_NN*)(pc->data);
557   PC_IS*         pcis     = (PC_IS*)(pc->data);
558 
559   PetscFunctionBegin;
560   ierr = PetscLogEventBegin(PC_ApplyCoarse,0,0,0,0);CHKERRQ(ierr);
561 
562   if (u) {
563     if (!vec3_B) { vec3_B = u; }
564     ierr = VecPointwiseMult(pcis->D,u,vec1_B);CHKERRQ(ierr);
565     ierr = VecSet(&zero,z);CHKERRQ(ierr);
566     ierr = VecScatterBegin(vec1_B,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
567     ierr = VecScatterEnd  (vec1_B,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
568     ierr = VecScatterBegin(z,vec2_B,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_B);CHKERRQ(ierr);
569     ierr = VecScatterEnd  (z,vec2_B,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_B);CHKERRQ(ierr);
570     ierr = PCISApplySchur(pc,vec2_B,vec3_B,(Vec)0,vec1_D,vec2_D);CHKERRQ(ierr);
571     ierr = VecScale(&m_one,vec3_B);CHKERRQ(ierr);
572     ierr = VecCopy(r,z);CHKERRQ(ierr);
573     ierr = VecScatterBegin(vec3_B,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
574     ierr = VecScatterEnd  (vec3_B,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
575   } else {
576     ierr = VecCopy(r,z);CHKERRQ(ierr);
577   }
578   ierr = VecScatterBegin(z,vec2_B,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_B);CHKERRQ(ierr);
579   ierr = VecScatterEnd  (z,vec2_B,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_B);CHKERRQ(ierr);
580   ierr = PCISScatterArrayNToVecB(work_N,vec2_B,INSERT_VALUES,SCATTER_REVERSE,pc);CHKERRQ(ierr);
581   for (k=0, value=0.0; k<pcis->n_shared[0]; k++) { value += pcnn->DZ_IN[0][k] * work_N[pcis->shared[0][k]]; }
582   value *= pcnn->factor_coarse_rhs;  /* This factor is set in CreateCoarseMatrix(). */
583   {
584     int rank;
585     ierr = MPI_Comm_rank(pc->comm,&rank);CHKERRQ(ierr);
586     ierr = VecSetValue(pcnn->coarse_b,rank,value,INSERT_VALUES);CHKERRQ(ierr);
587     /*
588        Since we are only inserting local values (one value actually) we don't need to do the
589        reduction that tells us there is no data that needs to be moved. Hence we comment out these
590        ierr = VecAssemblyBegin(pcnn->coarse_b);CHKERRQ(ierr);
591        ierr = VecAssemblyEnd  (pcnn->coarse_b);CHKERRQ(ierr);
592     */
593   }
594   ierr = KSPSetRhs(pcnn->ksp_coarse,pcnn->coarse_b);CHKERRQ(ierr);
595   ierr = KSPSetSolution(pcnn->ksp_coarse,pcnn->coarse_x);CHKERRQ(ierr);
596   ierr = KSPSolve(pcnn->ksp_coarse);CHKERRQ(ierr);
597   if (!u) { ierr = VecScale(&m_one,pcnn->coarse_x);CHKERRQ(ierr); }
598   ierr = VecGetArray(pcnn->coarse_x,&lambda);CHKERRQ(ierr);
599   for (k=0; k<pcis->n_shared[0]; k++) { work_N[pcis->shared[0][k]] = *lambda * pcnn->DZ_IN[0][k]; }
600   ierr = VecRestoreArray(pcnn->coarse_x,&lambda);CHKERRQ(ierr);
601   ierr = PCISScatterArrayNToVecB(work_N,vec2_B,INSERT_VALUES,SCATTER_FORWARD,pc);CHKERRQ(ierr);
602   ierr = VecSet(&zero,z);CHKERRQ(ierr);
603   ierr = VecScatterBegin(vec2_B,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
604   ierr = VecScatterEnd  (vec2_B,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
605   if (!u) {
606     ierr = VecScatterBegin(z,vec2_B,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_B);CHKERRQ(ierr);
607     ierr = VecScatterEnd  (z,vec2_B,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_B);CHKERRQ(ierr);
608     ierr = PCISApplySchur(pc,vec2_B,vec1_B,(Vec)0,vec1_D,vec2_D);CHKERRQ(ierr);
609     ierr = VecCopy(r,z);CHKERRQ(ierr);
610   }
611   ierr = VecScatterBegin(vec1_B,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
612   ierr = VecScatterEnd  (vec1_B,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
613   ierr = PetscLogEventEnd(PC_ApplyCoarse,0,0,0,0);CHKERRQ(ierr);
614 
615   PetscFunctionReturn(0);
616 }
617 
618 #undef __FUNCT__
619 
620 
621 
622 /*  -------   E N D   O F   T H E   C O D E   -------  */
623 /*                                                     */
624 /*  From now on, "footnotes" (or "historical notes").  */
625 /*                                                     */
626 /*  -------------------------------------------------  */
627 
628 
629 #ifdef __HISTORICAL_NOTES___do_not_compile__
630 
631 /* --------------------------------------------------------------------------
632    Historical note 01
633    -------------------------------------------------------------------------- */
634 /*
635    We considered the possibility of an alternative D_i that would still
636    provide a partition of unity (i.e., $ \sum_i  N_i D_i N_i^T = I $).
637    The basic principle was still the pseudo-inverse of the counting
638    function; the difference was that we would not count subdomains
639    that do not contribute to the coarse space (i.e., not pure-Neumann
640    subdomains).
641 
642    This turned out to be a bad idea:  we would solve trivial Neumann
643    problems in the not pure-Neumann subdomains, since we would be scaling
644    the balanced residual by zero.
645 */
646 
647     {
648       PetscTruth flg;
649       ierr = PetscOptionsHasName(PETSC_NULL,"-pcnn_new_scaling",&flg);CHKERRQ(ierr);
650       if (flg) {
651         Vec    counter;
652         PetscScalar one=1.0, zero=0.0;
653         ierr = VecDuplicate(pc->vec,&counter);CHKERRQ(ierr);
654         ierr = VecSet(&zero,counter);CHKERRQ(ierr);
655         if (pcnn->pure_neumann) {
656           ierr = VecSet(&one,pcnn->D);CHKERRQ(ierr);
657         } else {
658           ierr = VecSet(&zero,pcnn->D);CHKERRQ(ierr);
659         }
660         ierr = VecScatterBegin(pcnn->D,counter,ADD_VALUES,SCATTER_REVERSE,pcnn->global_to_B);CHKERRQ(ierr);
661         ierr = VecScatterEnd  (pcnn->D,counter,ADD_VALUES,SCATTER_REVERSE,pcnn->global_to_B);CHKERRQ(ierr);
662         ierr = VecScatterBegin(counter,pcnn->D,INSERT_VALUES,SCATTER_FORWARD,pcnn->global_to_B);CHKERRQ(ierr);
663         ierr = VecScatterEnd  (counter,pcnn->D,INSERT_VALUES,SCATTER_FORWARD,pcnn->global_to_B);CHKERRQ(ierr);
664         ierr = VecDestroy(counter);CHKERRQ(ierr);
665         if (pcnn->pure_neumann) {
666           ierr = VecReciprocal(pcnn->D);CHKERRQ(ierr);
667         } else {
668           ierr = VecSet(&zero,pcnn->D);CHKERRQ(ierr);
669         }
670       }
671     }
672 
673 
674 
675 /* --------------------------------------------------------------------------
676    Historical note 02
677    -------------------------------------------------------------------------- */
678 /*
679    We tried an alternative coarse problem, that would eliminate exactly a
680    constant error. Turned out not to improve the overall convergence.
681 */
682 
683   /*  Set the variable pcnn->factor_coarse_rhs. */
684   {
685     PetscTruth flg;
686     ierr = PetscOptionsHasName(PETSC_NULL,"-enforce_preserving_constants",&flg);CHKERRQ(ierr);
687     if (!flg) { pcnn->factor_coarse_rhs = (pcnn->pure_neumann) ? 1.0 : 0.0; }
688     else {
689       PetscScalar zero = 0.0, one = 1.0;
690       ierr = VecSet(&one,pcnn->vec1_B);
691       ierr = ApplySchurComplement(pcnn,pcnn->vec1_B,pcnn->vec2_B,(Vec)0,pcnn->vec1_D,pcnn->vec2_D);CHKERRQ(ierr);
692       ierr = VecSet(&zero,pcnn->vec1_global);CHKERRQ(ierr);
693       ierr = VecScatterBegin(pcnn->vec2_B,pcnn->vec1_global,ADD_VALUES,SCATTER_REVERSE,pcnn->global_to_B);CHKERRQ(ierr);
694       ierr = VecScatterEnd  (pcnn->vec2_B,pcnn->vec1_global,ADD_VALUES,SCATTER_REVERSE,pcnn->global_to_B);CHKERRQ(ierr);
695       ierr = VecScatterBegin(pcnn->vec1_global,pcnn->vec1_B,INSERT_VALUES,SCATTER_FORWARD,pcnn->global_to_B);CHKERRQ(ierr);
696       ierr = VecScatterEnd  (pcnn->vec1_global,pcnn->vec1_B,INSERT_VALUES,SCATTER_FORWARD,pcnn->global_to_B);CHKERRQ(ierr);
697       if (pcnn->pure_neumann) { pcnn->factor_coarse_rhs = 1.0; }
698       else {
699         ierr = ScatterArrayNToVecB(pcnn->work_N,pcnn->vec1_B,INSERT_VALUES,SCATTER_REVERSE,pcnn);CHKERRQ(ierr);
700         for (k=0, pcnn->factor_coarse_rhs=0.0; k<pcnn->n_shared[0]; k++) {
701           pcnn->factor_coarse_rhs += pcnn->work_N[pcnn->shared[0][k]] * pcnn->DZ_IN[0][k];
702         }
703         if (pcnn->factor_coarse_rhs) { pcnn->factor_coarse_rhs = 1.0 / pcnn->factor_coarse_rhs; }
704         else { SETERRQ(1,"Constants cannot be preserved. Remove \"-enforce_preserving_constants\" option."); }
705       }
706     }
707   }
708 
709 #endif /* __HISTORICAL_NOTES___do_not_compile */
710