xref: /petsc/src/ksp/pc/impls/is/nn/nn.c (revision ef46b1a67e276116c83b5d4ce8efc2932ea4fc0a)
1 
2 #include <../src/ksp/pc/impls/is/nn/nn.h>
3 
4 /* -------------------------------------------------------------------------- */
5 /*
6    PCSetUp_NN - Prepares for the use of the NN preconditioner
7                     by setting data structures and options.
8 
9    Input Parameter:
10 .  pc - the preconditioner context
11 
12    Application Interface Routine: PCSetUp()
13 
14    Notes:
15    The interface routine PCSetUp() is not usually called directly by
16    the user, but instead is called by PCApply() if necessary.
17 */
18 static PetscErrorCode PCSetUp_NN(PC pc)
19 {
20   PetscFunctionBegin;
21   if (!pc->setupcalled) {
22     /* Set up all the "iterative substructuring" common block */
23     PetscCall(PCISSetUp(pc,PETSC_TRUE,PETSC_TRUE));
24     /* Create the coarse matrix. */
25     PetscCall(PCNNCreateCoarseMatrix(pc));
26   }
27   PetscFunctionReturn(0);
28 }
29 
30 /* -------------------------------------------------------------------------- */
31 /*
32    PCApply_NN - Applies the NN preconditioner to a vector.
33 
34    Input Parameters:
35 .  pc - the preconditioner context
36 .  r - input vector (global)
37 
38    Output Parameter:
39 .  z - output vector (global)
40 
41    Application Interface Routine: PCApply()
42  */
43 static PetscErrorCode PCApply_NN(PC pc,Vec r,Vec z)
44 {
45   PC_IS          *pcis = (PC_IS*)(pc->data);
46   PetscScalar    m_one = -1.0;
47   Vec            w     = pcis->vec1_global;
48 
49   PetscFunctionBegin;
50   /*
51     Dirichlet solvers.
52     Solving $ B_I^{(i)}r_I^{(i)} $ at each processor.
53     Storing the local results at vec2_D
54   */
55   PetscCall(VecScatterBegin(pcis->global_to_D,r,pcis->vec1_D,INSERT_VALUES,SCATTER_FORWARD));
56   PetscCall(VecScatterEnd  (pcis->global_to_D,r,pcis->vec1_D,INSERT_VALUES,SCATTER_FORWARD));
57   PetscCall(KSPSolve(pcis->ksp_D,pcis->vec1_D,pcis->vec2_D));
58 
59   /*
60     Computing $ r_B - \sum_j \tilde R_j^T A_{BI}^{(j)} (B_I^{(j)}r_I^{(j)}) $ .
61     Storing the result in the interface portion of the global vector w.
62   */
63   PetscCall(MatMult(pcis->A_BI,pcis->vec2_D,pcis->vec1_B));
64   PetscCall(VecScale(pcis->vec1_B,m_one));
65   PetscCall(VecCopy(r,w));
66   PetscCall(VecScatterBegin(pcis->global_to_B,pcis->vec1_B,w,ADD_VALUES,SCATTER_REVERSE));
67   PetscCall(VecScatterEnd  (pcis->global_to_B,pcis->vec1_B,w,ADD_VALUES,SCATTER_REVERSE));
68 
69   /*
70     Apply the interface preconditioner
71   */
72   PetscCall(PCNNApplyInterfacePreconditioner(pc,w,z,pcis->work_N,pcis->vec1_B,pcis->vec2_B,pcis->vec3_B,pcis->vec1_D,pcis->vec3_D,pcis->vec1_N,pcis->vec2_N));
73 
74   /*
75     Computing $ t_I^{(i)} = A_{IB}^{(i)} \tilde R_i z_B $
76     The result is stored in vec1_D.
77   */
78   PetscCall(VecScatterBegin(pcis->global_to_B,z,pcis->vec1_B,INSERT_VALUES,SCATTER_FORWARD));
79   PetscCall(VecScatterEnd  (pcis->global_to_B,z,pcis->vec1_B,INSERT_VALUES,SCATTER_FORWARD));
80   PetscCall(MatMult(pcis->A_IB,pcis->vec1_B,pcis->vec1_D));
81 
82   /*
83     Dirichlet solvers.
84     Computing $ B_I^{(i)}t_I^{(i)} $ and sticking into the global vector the blocks
85     $ B_I^{(i)}r_I^{(i)} - B_I^{(i)}t_I^{(i)} $.
86   */
87   PetscCall(VecScatterBegin(pcis->global_to_D,pcis->vec2_D,z,INSERT_VALUES,SCATTER_REVERSE));
88   PetscCall(VecScatterEnd  (pcis->global_to_D,pcis->vec2_D,z,INSERT_VALUES,SCATTER_REVERSE));
89   PetscCall(KSPSolve(pcis->ksp_D,pcis->vec1_D,pcis->vec2_D));
90   PetscCall(VecScale(pcis->vec2_D,m_one));
91   PetscCall(VecScatterBegin(pcis->global_to_D,pcis->vec2_D,z,ADD_VALUES,SCATTER_REVERSE));
92   PetscCall(VecScatterEnd  (pcis->global_to_D,pcis->vec2_D,z,ADD_VALUES,SCATTER_REVERSE));
93   PetscFunctionReturn(0);
94 }
95 
96 /* -------------------------------------------------------------------------- */
97 /*
98    PCDestroy_NN - Destroys the private context for the NN preconditioner
99    that was created with PCCreate_NN().
100 
101    Input Parameter:
102 .  pc - the preconditioner context
103 
104    Application Interface Routine: PCDestroy()
105 */
106 static PetscErrorCode PCDestroy_NN(PC pc)
107 {
108   PC_NN          *pcnn = (PC_NN*)pc->data;
109 
110   PetscFunctionBegin;
111   PetscCall(PCISDestroy(pc));
112 
113   PetscCall(MatDestroy(&pcnn->coarse_mat));
114   PetscCall(VecDestroy(&pcnn->coarse_x));
115   PetscCall(VecDestroy(&pcnn->coarse_b));
116   PetscCall(KSPDestroy(&pcnn->ksp_coarse));
117   if (pcnn->DZ_IN) {
118     PetscCall(PetscFree(pcnn->DZ_IN[0]));
119     PetscCall(PetscFree(pcnn->DZ_IN));
120   }
121 
122   /*
123       Free the private data structure that was hanging off the PC
124   */
125   PetscCall(PetscFree(pc->data));
126   PetscFunctionReturn(0);
127 }
128 
129 /* -------------------------------------------------------------------------- */
130 /*MC
131    PCNN - Balancing Neumann-Neumann for scalar elliptic PDEs.
132 
133    Options Database Keys:
134 +    -pc_nn_turn_off_first_balancing - do not balance the residual before solving the local Neumann problems
135                                        (this skips the first coarse grid solve in the preconditioner)
136 .    -pc_nn_turn_off_second_balancing - do not balance the solution solving the local Neumann problems
137                                        (this skips the second coarse grid solve in the preconditioner)
138 .    -pc_is_damp_fixed <fact> -
139 .    -pc_is_remove_nullspace_fixed -
140 .    -pc_is_set_damping_factor_floating <fact> -
141 .    -pc_is_not_damp_floating -
142 -    -pc_is_not_remove_nullspace_floating -
143 
144    Level: intermediate
145 
146    Notes:
147     The matrix used with this preconditioner must be of type MATIS
148 
149           Unlike more 'conventional' Neumann-Neumann preconditioners this iterates over ALL the
150           degrees of freedom, NOT just those on the interface (this allows the use of approximate solvers
151           on the subdomains; though in our experience using approximate solvers is slower.).
152 
153           Options for the coarse grid preconditioner can be set with -nn_coarse_pc_xxx
154           Options for the Dirichlet subproblem preconditioner can be set with -is_localD_pc_xxx
155           Options for the Neumann subproblem preconditioner can be set with -is_localN_pc_xxx
156 
157    Contributed by Paulo Goldfeld
158 
159 .seealso:  PCCreate(), PCSetType(), PCType (for list of available types), PC,  MATIS
160 M*/
161 
162 PETSC_EXTERN PetscErrorCode PCCreate_NN(PC pc)
163 {
164   PC_NN          *pcnn;
165 
166   PetscFunctionBegin;
167   /*
168      Creates the private data structure for this preconditioner and
169      attach it to the PC object.
170   */
171   PetscCall(PetscNewLog(pc,&pcnn));
172   pc->data = (void*)pcnn;
173 
174   PetscCall(PCISCreate(pc));
175   pcnn->coarse_mat = NULL;
176   pcnn->coarse_x   = NULL;
177   pcnn->coarse_b   = NULL;
178   pcnn->ksp_coarse = NULL;
179   pcnn->DZ_IN      = NULL;
180 
181   /*
182       Set the pointers for the functions that are provided above.
183       Now when the user-level routines (such as PCApply(), PCDestroy(), etc.)
184       are called, they will automatically call these functions.  Note we
185       choose not to provide a couple of these functions since they are
186       not needed.
187   */
188   pc->ops->apply               = PCApply_NN;
189   pc->ops->applytranspose      = NULL;
190   pc->ops->setup               = PCSetUp_NN;
191   pc->ops->destroy             = PCDestroy_NN;
192   pc->ops->view                = NULL;
193   pc->ops->applyrichardson     = NULL;
194   pc->ops->applysymmetricleft  = NULL;
195   pc->ops->applysymmetricright = NULL;
196   PetscFunctionReturn(0);
197 }
198 
199 /* -------------------------------------------------------------------------- */
200 /*
201    PCNNCreateCoarseMatrix -
202 */
203 PetscErrorCode PCNNCreateCoarseMatrix(PC pc)
204 {
205   MPI_Request    *send_request, *recv_request;
206   PetscInt       i, j, k;
207   PetscScalar    *mat;     /* Sub-matrix with this subdomain's contribution to the coarse matrix             */
208   PetscScalar    **DZ_OUT; /* proc[k].DZ_OUT[i][] = bit of vector to be sent from processor k to processor i */
209 
210   /* aliasing some names */
211   PC_IS       *pcis     = (PC_IS*)(pc->data);
212   PC_NN       *pcnn     = (PC_NN*)pc->data;
213   PetscInt    n_neigh   = pcis->n_neigh;
214   PetscInt    *neigh    = pcis->neigh;
215   PetscInt    *n_shared = pcis->n_shared;
216   PetscInt    **shared  = pcis->shared;
217   PetscScalar **DZ_IN;   /* Must be initialized after memory allocation. */
218 
219   PetscFunctionBegin;
220   /* Allocate memory for mat (the +1 is to handle the case n_neigh equal to zero) */
221   PetscCall(PetscMalloc1(n_neigh*n_neigh+1,&mat));
222 
223   /* Allocate memory for DZ */
224   /* Notice that DZ_OUT[0] is allocated some space that is never used. */
225   /* This is just in order to DZ_OUT and DZ_IN to have exactly the same form. */
226   {
227     PetscInt size_of_Z = 0;
228     PetscCall(PetscMalloc ((n_neigh+1)*sizeof(PetscScalar*),&pcnn->DZ_IN));
229     DZ_IN = pcnn->DZ_IN;
230     PetscCall(PetscMalloc ((n_neigh+1)*sizeof(PetscScalar*),&DZ_OUT));
231     for (i=0; i<n_neigh; i++) size_of_Z += n_shared[i];
232     PetscCall(PetscMalloc ((size_of_Z+1)*sizeof(PetscScalar),&DZ_IN[0]));
233     PetscCall(PetscMalloc ((size_of_Z+1)*sizeof(PetscScalar),&DZ_OUT[0]));
234   }
235   for (i=1; i<n_neigh; i++) {
236     DZ_IN[i]  = DZ_IN [i-1] + n_shared[i-1];
237     DZ_OUT[i] = DZ_OUT[i-1] + n_shared[i-1];
238   }
239 
240   /* Set the values of DZ_OUT, in order to send this info to the neighbours */
241   /* First, set the auxiliary array pcis->work_N. */
242   PetscCall(PCISScatterArrayNToVecB(pcis->work_N,pcis->D,INSERT_VALUES,SCATTER_REVERSE,pc));
243   for (i=1; i<n_neigh; i++) {
244     for (j=0; j<n_shared[i]; j++) {
245       DZ_OUT[i][j] = pcis->work_N[shared[i][j]];
246     }
247   }
248 
249   /* Non-blocking send/receive the common-interface chunks of scaled nullspaces */
250   /* Notice that send_request[] and recv_request[] could have one less element. */
251   /* We make them longer to have request[i] corresponding to neigh[i].          */
252   {
253     PetscMPIInt tag;
254     PetscCall(PetscObjectGetNewTag((PetscObject)pc,&tag));
255     PetscCall(PetscMalloc2(n_neigh+1,&send_request,n_neigh+1,&recv_request));
256     for (i=1; i<n_neigh; i++) {
257       PetscCallMPI(MPI_Isend((void*)(DZ_OUT[i]),n_shared[i],MPIU_SCALAR,neigh[i],tag,PetscObjectComm((PetscObject)pc),&(send_request[i])));
258       PetscCallMPI(MPI_Irecv((void*)(DZ_IN [i]),n_shared[i],MPIU_SCALAR,neigh[i],tag,PetscObjectComm((PetscObject)pc),&(recv_request[i])));
259     }
260   }
261 
262   /* Set DZ_IN[0][] (recall that neigh[0]==rank, always) */
263   for (j=0; j<n_shared[0]; j++) DZ_IN[0][j] = pcis->work_N[shared[0][j]];
264 
265   /* Start computing with local D*Z while communication goes on.    */
266   /* Apply Schur complement. The result is "stored" in vec (more    */
267   /* precisely, vec points to the result, stored in pc_nn->vec1_B)  */
268   /* and also scattered to pcnn->work_N.                            */
269   PetscCall(PCNNApplySchurToChunk(pc,n_shared[0],shared[0],DZ_IN[0],pcis->work_N,pcis->vec1_B,pcis->vec2_B,pcis->vec1_D,pcis->vec2_D));
270 
271   /* Compute the first column, while completing the receiving. */
272   for (i=0; i<n_neigh; i++) {
273     MPI_Status  stat;
274     PetscMPIInt ind=0;
275     if (i>0) {PetscCallMPI(MPI_Waitany(n_neigh-1,recv_request+1,&ind,&stat)); ind++;}
276     mat[ind*n_neigh+0] = 0.0;
277     for (k=0; k<n_shared[ind]; k++) mat[ind*n_neigh+0] += DZ_IN[ind][k] * pcis->work_N[shared[ind][k]];
278   }
279 
280   /* Compute the remaining of the columns */
281   for (j=1; j<n_neigh; j++) {
282     PetscCall(PCNNApplySchurToChunk(pc,n_shared[j],shared[j],DZ_IN[j],pcis->work_N,pcis->vec1_B,pcis->vec2_B,pcis->vec1_D,pcis->vec2_D));
283     for (i=0; i<n_neigh; i++) {
284       mat[i*n_neigh+j] = 0.0;
285       for (k=0; k<n_shared[i]; k++) mat[i*n_neigh+j] += DZ_IN[i][k] * pcis->work_N[shared[i][k]];
286     }
287   }
288 
289   /* Complete the sending. */
290   if (n_neigh>1) {
291     MPI_Status *stat;
292     PetscCall(PetscMalloc1(n_neigh-1,&stat));
293     if (n_neigh-1) PetscCallMPI(MPI_Waitall(n_neigh-1,&(send_request[1]),stat));
294     PetscCall(PetscFree(stat));
295   }
296 
297   /* Free the memory for the MPI requests */
298   PetscCall(PetscFree2(send_request,recv_request));
299 
300   /* Free the memory for DZ_OUT */
301   if (DZ_OUT) {
302     PetscCall(PetscFree(DZ_OUT[0]));
303     PetscCall(PetscFree(DZ_OUT));
304   }
305 
306   {
307     PetscMPIInt size;
308     PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)pc),&size));
309     /* Create the global coarse vectors (rhs and solution). */
310     PetscCall(VecCreateMPI(PetscObjectComm((PetscObject)pc),1,size,&(pcnn->coarse_b)));
311     PetscCall(VecDuplicate(pcnn->coarse_b,&(pcnn->coarse_x)));
312     /* Create and set the global coarse AIJ matrix. */
313     PetscCall(MatCreate(PetscObjectComm((PetscObject)pc),&(pcnn->coarse_mat)));
314     PetscCall(MatSetSizes(pcnn->coarse_mat,1,1,size,size));
315     PetscCall(MatSetType(pcnn->coarse_mat,MATAIJ));
316     PetscCall(MatSeqAIJSetPreallocation(pcnn->coarse_mat,1,NULL));
317     PetscCall(MatMPIAIJSetPreallocation(pcnn->coarse_mat,1,NULL,n_neigh,NULL));
318     PetscCall(MatSetOption(pcnn->coarse_mat,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_FALSE));
319     PetscCall(MatSetOption(pcnn->coarse_mat,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_FALSE));
320     PetscCall(MatSetValues(pcnn->coarse_mat,n_neigh,neigh,n_neigh,neigh,mat,ADD_VALUES));
321     PetscCall(MatAssemblyBegin(pcnn->coarse_mat,MAT_FINAL_ASSEMBLY));
322     PetscCall(MatAssemblyEnd  (pcnn->coarse_mat,MAT_FINAL_ASSEMBLY));
323   }
324 
325   {
326     PetscMPIInt rank;
327     PetscScalar one = 1.0;
328     PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)pc),&rank));
329     /* "Zero out" rows of not-purely-Neumann subdomains */
330     if (pcis->pure_neumann) {  /* does NOT zero the row; create an empty index set. The reason is that MatZeroRows() is collective. */
331       PetscCall(MatZeroRows(pcnn->coarse_mat,0,NULL,one,NULL,NULL));
332     } else { /* here it DOES zero the row, since it's not a floating subdomain. */
333       PetscInt row = (PetscInt) rank;
334       PetscCall(MatZeroRows(pcnn->coarse_mat,1,&row,one,NULL,NULL));
335     }
336   }
337 
338   /* Create the coarse linear solver context */
339   {
340     PC  pc_ctx, inner_pc;
341     KSP inner_ksp;
342 
343     PetscCall(KSPCreate(PetscObjectComm((PetscObject)pc),&pcnn->ksp_coarse));
344     PetscCall(PetscObjectIncrementTabLevel((PetscObject)pcnn->ksp_coarse,(PetscObject)pc,2));
345     PetscCall(KSPSetOperators(pcnn->ksp_coarse,pcnn->coarse_mat,pcnn->coarse_mat));
346     PetscCall(KSPGetPC(pcnn->ksp_coarse,&pc_ctx));
347     PetscCall(PCSetType(pc_ctx,PCREDUNDANT));
348     PetscCall(KSPSetType(pcnn->ksp_coarse,KSPPREONLY));
349     PetscCall(PCRedundantGetKSP(pc_ctx,&inner_ksp));
350     PetscCall(KSPGetPC(inner_ksp,&inner_pc));
351     PetscCall(PCSetType(inner_pc,PCLU));
352     PetscCall(KSPSetOptionsPrefix(pcnn->ksp_coarse,"nn_coarse_"));
353     PetscCall(KSPSetFromOptions(pcnn->ksp_coarse));
354     /* the vectors in the following line are dummy arguments, just telling the KSP the vector size. Values are not used */
355     PetscCall(KSPSetUp(pcnn->ksp_coarse));
356   }
357 
358   /* Free the memory for mat */
359   PetscCall(PetscFree(mat));
360 
361   /* for DEBUGGING, save the coarse matrix to a file. */
362   {
363     PetscBool flg = PETSC_FALSE;
364     PetscCall(PetscOptionsGetBool(NULL,NULL,"-pc_nn_save_coarse_matrix",&flg,NULL));
365     if (flg) {
366       PetscViewer viewer;
367       PetscCall(PetscViewerASCIIOpen(PETSC_COMM_WORLD,"coarse.m",&viewer));
368       PetscCall(PetscViewerPushFormat(viewer,PETSC_VIEWER_ASCII_MATLAB));
369       PetscCall(MatView(pcnn->coarse_mat,viewer));
370       PetscCall(PetscViewerPopFormat(viewer));
371       PetscCall(PetscViewerDestroy(&viewer));
372     }
373   }
374 
375   /*  Set the variable pcnn->factor_coarse_rhs. */
376   pcnn->factor_coarse_rhs = (pcis->pure_neumann) ? 1.0 : 0.0;
377 
378   /* See historical note 02, at the bottom of this file. */
379   PetscFunctionReturn(0);
380 }
381 
382 /* -------------------------------------------------------------------------- */
383 /*
384    PCNNApplySchurToChunk -
385 
386    Input parameters:
387 .  pcnn
388 .  n - size of chunk
389 .  idx - indices of chunk
390 .  chunk - values
391 
392    Output parameters:
393 .  array_N - result of Schur complement applied to chunk, scattered to big array
394 .  vec1_B  - result of Schur complement applied to chunk
395 .  vec2_B  - garbage (used as work space)
396 .  vec1_D  - garbage (used as work space)
397 .  vec2_D  - garbage (used as work space)
398 
399 */
400 PetscErrorCode PCNNApplySchurToChunk(PC pc, PetscInt n, PetscInt *idx, PetscScalar *chunk, PetscScalar *array_N, Vec vec1_B, Vec vec2_B, Vec vec1_D, Vec vec2_D)
401 {
402   PetscInt       i;
403   PC_IS          *pcis = (PC_IS*)(pc->data);
404 
405   PetscFunctionBegin;
406   PetscCall(PetscArrayzero(array_N, pcis->n));
407   for (i=0; i<n; i++) array_N[idx[i]] = chunk[i];
408   PetscCall(PCISScatterArrayNToVecB(array_N,vec2_B,INSERT_VALUES,SCATTER_FORWARD,pc));
409   PetscCall(PCISApplySchur(pc,vec2_B,vec1_B,(Vec)0,vec1_D,vec2_D));
410   PetscCall(PCISScatterArrayNToVecB(array_N,vec1_B,INSERT_VALUES,SCATTER_REVERSE,pc));
411   PetscFunctionReturn(0);
412 }
413 
414 /* -------------------------------------------------------------------------- */
415 /*
416    PCNNApplyInterfacePreconditioner - Apply the interface preconditioner, i.e.,
417                                       the preconditioner for the Schur complement.
418 
419    Input parameter:
420 .  r - global vector of interior and interface nodes. The values on the interior nodes are NOT used.
421 
422    Output parameters:
423 .  z - global vector of interior and interface nodes. The values on the interface are the result of
424        the application of the interface preconditioner to the interface part of r. The values on the
425        interior nodes are garbage.
426 .  work_N - array of local nodes (interior and interface, including ghosts); returns garbage (used as work space)
427 .  vec1_B - vector of local interface nodes (including ghosts); returns garbage (used as work space)
428 .  vec2_B - vector of local interface nodes (including ghosts); returns garbage (used as work space)
429 .  vec3_B - vector of local interface nodes (including ghosts); returns garbage (used as work space)
430 .  vec1_D - vector of local interior nodes; returns garbage (used as work space)
431 .  vec2_D - vector of local interior nodes; returns garbage (used as work space)
432 .  vec1_N - vector of local nodes (interior and interface, including ghosts); returns garbage (used as work space)
433 .  vec2_N - vector of local nodes (interior and interface, including ghosts); returns garbage (used as work space)
434 
435 */
436 PetscErrorCode PCNNApplyInterfacePreconditioner(PC pc, Vec r, Vec z, PetscScalar *work_N, Vec vec1_B, Vec vec2_B, Vec vec3_B, Vec vec1_D,Vec vec2_D, Vec vec1_N, Vec vec2_N)
437 {
438   PC_IS          *pcis = (PC_IS*)(pc->data);
439 
440   PetscFunctionBegin;
441   /*
442     First balancing step.
443   */
444   {
445     PetscBool flg = PETSC_FALSE;
446     PetscCall(PetscOptionsGetBool(NULL,NULL,"-pc_nn_turn_off_first_balancing",&flg,NULL));
447     if (!flg) {
448       PetscCall(PCNNBalancing(pc,r,(Vec)0,z,vec1_B,vec2_B,(Vec)0,vec1_D,vec2_D,work_N));
449     } else {
450       PetscCall(VecCopy(r,z));
451     }
452   }
453 
454   /*
455     Extract the local interface part of z and scale it by D
456   */
457   PetscCall(VecScatterBegin(pcis->global_to_B,z,vec1_B,INSERT_VALUES,SCATTER_FORWARD));
458   PetscCall(VecScatterEnd  (pcis->global_to_B,z,vec1_B,INSERT_VALUES,SCATTER_FORWARD));
459   PetscCall(VecPointwiseMult(vec2_B,pcis->D,vec1_B));
460 
461   /* Neumann Solver */
462   PetscCall(PCISApplyInvSchur(pc,vec2_B,vec1_B,vec1_N,vec2_N));
463 
464   /*
465     Second balancing step.
466   */
467   {
468     PetscBool flg = PETSC_FALSE;
469     PetscCall(PetscOptionsGetBool(NULL,NULL,"-pc_turn_off_second_balancing",&flg,NULL));
470     if (!flg) {
471       PetscCall(PCNNBalancing(pc,r,vec1_B,z,vec2_B,vec3_B,(Vec)0,vec1_D,vec2_D,work_N));
472     } else {
473       PetscCall(VecPointwiseMult(vec2_B,pcis->D,vec1_B));
474       PetscCall(VecSet(z,0.0));
475       PetscCall(VecScatterBegin(pcis->global_to_B,vec2_B,z,ADD_VALUES,SCATTER_REVERSE));
476       PetscCall(VecScatterEnd  (pcis->global_to_B,vec2_B,z,ADD_VALUES,SCATTER_REVERSE));
477     }
478   }
479   PetscFunctionReturn(0);
480 }
481 
482 /* -------------------------------------------------------------------------- */
483 /*
484    PCNNBalancing - Computes z, as given in equations (15) and (16) (if the
485                    input argument u is provided), or s, as given in equations
486                    (12) and (13), if the input argument u is a null vector.
487                    Notice that the input argument u plays the role of u_i in
488                    equation (14). The equation numbers refer to [Man93].
489 
490    Input Parameters:
491 .  pcnn - NN preconditioner context.
492 .  r - MPI vector of all nodes (interior and interface). It's preserved.
493 .  u - (Optional) sequential vector of local interface nodes. It's preserved UNLESS vec3_B is null.
494 
495    Output Parameters:
496 .  z - MPI vector of interior and interface nodes. Returns s or z (see description above).
497 .  vec1_B - Sequential vector of local interface nodes. Workspace.
498 .  vec2_B - Sequential vector of local interface nodes. Workspace.
499 .  vec3_B - (Optional) sequential vector of local interface nodes. Workspace.
500 .  vec1_D - Sequential vector of local interior nodes. Workspace.
501 .  vec2_D - Sequential vector of local interior nodes. Workspace.
502 .  work_N - Array of all local nodes (interior and interface). Workspace.
503 
504 */
505 PetscErrorCode PCNNBalancing(PC pc, Vec r, Vec u, Vec z, Vec vec1_B, Vec vec2_B, Vec vec3_B,Vec vec1_D, Vec vec2_D, PetscScalar *work_N)
506 {
507   PetscInt       k;
508   PetscScalar    value;
509   PetscScalar    *lambda;
510   PC_NN          *pcnn = (PC_NN*)(pc->data);
511   PC_IS          *pcis = (PC_IS*)(pc->data);
512 
513   PetscFunctionBegin;
514   PetscCall(PetscLogEventBegin(PC_ApplyCoarse,pc,0,0,0));
515   if (u) {
516     if (!vec3_B) vec3_B = u;
517     PetscCall(VecPointwiseMult(vec1_B,pcis->D,u));
518     PetscCall(VecSet(z,0.0));
519     PetscCall(VecScatterBegin(pcis->global_to_B,vec1_B,z,ADD_VALUES,SCATTER_REVERSE));
520     PetscCall(VecScatterEnd  (pcis->global_to_B,vec1_B,z,ADD_VALUES,SCATTER_REVERSE));
521     PetscCall(VecScatterBegin(pcis->global_to_B,z,vec2_B,INSERT_VALUES,SCATTER_FORWARD));
522     PetscCall(VecScatterEnd  (pcis->global_to_B,z,vec2_B,INSERT_VALUES,SCATTER_FORWARD));
523     PetscCall(PCISApplySchur(pc,vec2_B,vec3_B,(Vec)0,vec1_D,vec2_D));
524     PetscCall(VecScale(vec3_B,-1.0));
525     PetscCall(VecCopy(r,z));
526     PetscCall(VecScatterBegin(pcis->global_to_B,vec3_B,z,ADD_VALUES,SCATTER_REVERSE));
527     PetscCall(VecScatterEnd  (pcis->global_to_B,vec3_B,z,ADD_VALUES,SCATTER_REVERSE));
528   } else {
529     PetscCall(VecCopy(r,z));
530   }
531   PetscCall(VecScatterBegin(pcis->global_to_B,z,vec2_B,INSERT_VALUES,SCATTER_FORWARD));
532   PetscCall(VecScatterEnd  (pcis->global_to_B,z,vec2_B,INSERT_VALUES,SCATTER_FORWARD));
533   PetscCall(PCISScatterArrayNToVecB(work_N,vec2_B,INSERT_VALUES,SCATTER_REVERSE,pc));
534   for (k=0, value=0.0; k<pcis->n_shared[0]; k++) value += pcnn->DZ_IN[0][k] * work_N[pcis->shared[0][k]];
535   value *= pcnn->factor_coarse_rhs;  /* This factor is set in CreateCoarseMatrix(). */
536   {
537     PetscMPIInt rank;
538     PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)pc),&rank));
539     PetscCall(VecSetValue(pcnn->coarse_b,rank,value,INSERT_VALUES));
540     /*
541        Since we are only inserting local values (one value actually) we don't need to do the
542        reduction that tells us there is no data that needs to be moved. Hence we comment out these
543        PetscCall(VecAssemblyBegin(pcnn->coarse_b));
544        PetscCall(VecAssemblyEnd  (pcnn->coarse_b));
545     */
546   }
547   PetscCall(KSPSolve(pcnn->ksp_coarse,pcnn->coarse_b,pcnn->coarse_x));
548   if (!u) PetscCall(VecScale(pcnn->coarse_x,-1.0));
549   PetscCall(VecGetArray(pcnn->coarse_x,&lambda));
550   for (k=0; k<pcis->n_shared[0]; k++) work_N[pcis->shared[0][k]] = *lambda * pcnn->DZ_IN[0][k];
551   PetscCall(VecRestoreArray(pcnn->coarse_x,&lambda));
552   PetscCall(PCISScatterArrayNToVecB(work_N,vec2_B,INSERT_VALUES,SCATTER_FORWARD,pc));
553   PetscCall(VecSet(z,0.0));
554   PetscCall(VecScatterBegin(pcis->global_to_B,vec2_B,z,ADD_VALUES,SCATTER_REVERSE));
555   PetscCall(VecScatterEnd  (pcis->global_to_B,vec2_B,z,ADD_VALUES,SCATTER_REVERSE));
556   if (!u) {
557     PetscCall(VecScatterBegin(pcis->global_to_B,z,vec2_B,INSERT_VALUES,SCATTER_FORWARD));
558     PetscCall(VecScatterEnd  (pcis->global_to_B,z,vec2_B,INSERT_VALUES,SCATTER_FORWARD));
559     PetscCall(PCISApplySchur(pc,vec2_B,vec1_B,(Vec)0,vec1_D,vec2_D));
560     PetscCall(VecCopy(r,z));
561   }
562   PetscCall(VecScatterBegin(pcis->global_to_B,vec1_B,z,ADD_VALUES,SCATTER_REVERSE));
563   PetscCall(VecScatterEnd  (pcis->global_to_B,vec1_B,z,ADD_VALUES,SCATTER_REVERSE));
564   PetscCall(PetscLogEventEnd(PC_ApplyCoarse,pc,0,0,0));
565   PetscFunctionReturn(0);
566 }
567 
568 /*  -------   E N D   O F   T H E   C O D E   -------  */
569 /*                                                     */
570 /*  From now on, "footnotes" (or "historical notes").  */
571 /*                                                     */
572 /*  -------------------------------------------------  */
573 
574 /* --------------------------------------------------------------------------
575    Historical note 01
576    -------------------------------------------------------------------------- */
577 /*
578    We considered the possibility of an alternative D_i that would still
579    provide a partition of unity (i.e., $ \sum_i  N_i D_i N_i^T = I $).
580    The basic principle was still the pseudo-inverse of the counting
581    function; the difference was that we would not count subdomains
582    that do not contribute to the coarse space (i.e., not pure-Neumann
583    subdomains).
584 
585    This turned out to be a bad idea:  we would solve trivial Neumann
586    problems in the not pure-Neumann subdomains, since we would be scaling
587    the balanced residual by zero.
588 */
589 
590 /* --------------------------------------------------------------------------
591    Historical note 02
592    -------------------------------------------------------------------------- */
593 /*
594    We tried an alternative coarse problem, that would eliminate exactly a
595    constant error. Turned out not to improve the overall convergence.
596 */
597