xref: /petsc/src/ksp/pc/impls/is/nn/nn.c (revision 503c0ea9b45bcfbcebbb1ea5341243bbc69f0bea)
1 
2 #include <../src/ksp/pc/impls/is/nn/nn.h>
3 
4 /* -------------------------------------------------------------------------- */
5 /*
6    PCSetUp_NN - Prepares for the use of the NN preconditioner
7                     by setting data structures and options.
8 
9    Input Parameter:
10 .  pc - the preconditioner context
11 
12    Application Interface Routine: PCSetUp()
13 
14    Notes:
15    The interface routine PCSetUp() is not usually called directly by
16    the user, but instead is called by PCApply() if necessary.
17 */
18 static PetscErrorCode PCSetUp_NN(PC pc)
19 {
20   PetscFunctionBegin;
21   if (!pc->setupcalled) {
22     /* Set up all the "iterative substructuring" common block */
23     PetscCall(PCISSetUp(pc,PETSC_TRUE,PETSC_TRUE));
24     /* Create the coarse matrix. */
25     PetscCall(PCNNCreateCoarseMatrix(pc));
26   }
27   PetscFunctionReturn(0);
28 }
29 
30 /* -------------------------------------------------------------------------- */
31 /*
32    PCApply_NN - Applies the NN preconditioner to a vector.
33 
34    Input Parameters:
35 .  pc - the preconditioner context
36 .  r - input vector (global)
37 
38    Output Parameter:
39 .  z - output vector (global)
40 
41    Application Interface Routine: PCApply()
42  */
43 static PetscErrorCode PCApply_NN(PC pc,Vec r,Vec z)
44 {
45   PC_IS          *pcis = (PC_IS*)(pc->data);
46   PetscErrorCode ierr;
47   PetscScalar    m_one = -1.0;
48   Vec            w     = pcis->vec1_global;
49 
50   PetscFunctionBegin;
51   /*
52     Dirichlet solvers.
53     Solving $ B_I^{(i)}r_I^{(i)} $ at each processor.
54     Storing the local results at vec2_D
55   */
56   PetscCall(VecScatterBegin(pcis->global_to_D,r,pcis->vec1_D,INSERT_VALUES,SCATTER_FORWARD));
57   PetscCall(VecScatterEnd  (pcis->global_to_D,r,pcis->vec1_D,INSERT_VALUES,SCATTER_FORWARD));
58   PetscCall(KSPSolve(pcis->ksp_D,pcis->vec1_D,pcis->vec2_D));
59 
60   /*
61     Computing $ r_B - \sum_j \tilde R_j^T A_{BI}^{(j)} (B_I^{(j)}r_I^{(j)}) $ .
62     Storing the result in the interface portion of the global vector w.
63   */
64   PetscCall(MatMult(pcis->A_BI,pcis->vec2_D,pcis->vec1_B));
65   PetscCall(VecScale(pcis->vec1_B,m_one));
66   PetscCall(VecCopy(r,w));
67   PetscCall(VecScatterBegin(pcis->global_to_B,pcis->vec1_B,w,ADD_VALUES,SCATTER_REVERSE));
68   PetscCall(VecScatterEnd  (pcis->global_to_B,pcis->vec1_B,w,ADD_VALUES,SCATTER_REVERSE));
69 
70   /*
71     Apply the interface preconditioner
72   */
73   ierr = PCNNApplyInterfacePreconditioner(pc,w,z,pcis->work_N,pcis->vec1_B,pcis->vec2_B,pcis->vec3_B,pcis->vec1_D,
74                                           pcis->vec3_D,pcis->vec1_N,pcis->vec2_N);PetscCall(ierr);
75 
76   /*
77     Computing $ t_I^{(i)} = A_{IB}^{(i)} \tilde R_i z_B $
78     The result is stored in vec1_D.
79   */
80   PetscCall(VecScatterBegin(pcis->global_to_B,z,pcis->vec1_B,INSERT_VALUES,SCATTER_FORWARD));
81   PetscCall(VecScatterEnd  (pcis->global_to_B,z,pcis->vec1_B,INSERT_VALUES,SCATTER_FORWARD));
82   PetscCall(MatMult(pcis->A_IB,pcis->vec1_B,pcis->vec1_D));
83 
84   /*
85     Dirichlet solvers.
86     Computing $ B_I^{(i)}t_I^{(i)} $ and sticking into the global vector the blocks
87     $ B_I^{(i)}r_I^{(i)} - B_I^{(i)}t_I^{(i)} $.
88   */
89   PetscCall(VecScatterBegin(pcis->global_to_D,pcis->vec2_D,z,INSERT_VALUES,SCATTER_REVERSE));
90   PetscCall(VecScatterEnd  (pcis->global_to_D,pcis->vec2_D,z,INSERT_VALUES,SCATTER_REVERSE));
91   PetscCall(KSPSolve(pcis->ksp_D,pcis->vec1_D,pcis->vec2_D));
92   PetscCall(VecScale(pcis->vec2_D,m_one));
93   PetscCall(VecScatterBegin(pcis->global_to_D,pcis->vec2_D,z,ADD_VALUES,SCATTER_REVERSE));
94   PetscCall(VecScatterEnd  (pcis->global_to_D,pcis->vec2_D,z,ADD_VALUES,SCATTER_REVERSE));
95   PetscFunctionReturn(0);
96 }
97 
98 /* -------------------------------------------------------------------------- */
99 /*
100    PCDestroy_NN - Destroys the private context for the NN preconditioner
101    that was created with PCCreate_NN().
102 
103    Input Parameter:
104 .  pc - the preconditioner context
105 
106    Application Interface Routine: PCDestroy()
107 */
108 static PetscErrorCode PCDestroy_NN(PC pc)
109 {
110   PC_NN          *pcnn = (PC_NN*)pc->data;
111 
112   PetscFunctionBegin;
113   PetscCall(PCISDestroy(pc));
114 
115   PetscCall(MatDestroy(&pcnn->coarse_mat));
116   PetscCall(VecDestroy(&pcnn->coarse_x));
117   PetscCall(VecDestroy(&pcnn->coarse_b));
118   PetscCall(KSPDestroy(&pcnn->ksp_coarse));
119   if (pcnn->DZ_IN) {
120     PetscCall(PetscFree(pcnn->DZ_IN[0]));
121     PetscCall(PetscFree(pcnn->DZ_IN));
122   }
123 
124   /*
125       Free the private data structure that was hanging off the PC
126   */
127   PetscCall(PetscFree(pc->data));
128   PetscFunctionReturn(0);
129 }
130 
131 /* -------------------------------------------------------------------------- */
132 /*MC
133    PCNN - Balancing Neumann-Neumann for scalar elliptic PDEs.
134 
135    Options Database Keys:
136 +    -pc_nn_turn_off_first_balancing - do not balance the residual before solving the local Neumann problems
137                                        (this skips the first coarse grid solve in the preconditioner)
138 .    -pc_nn_turn_off_second_balancing - do not balance the solution solving the local Neumann problems
139                                        (this skips the second coarse grid solve in the preconditioner)
140 .    -pc_is_damp_fixed <fact> -
141 .    -pc_is_remove_nullspace_fixed -
142 .    -pc_is_set_damping_factor_floating <fact> -
143 .    -pc_is_not_damp_floating -
144 -    -pc_is_not_remove_nullspace_floating -
145 
146    Level: intermediate
147 
148    Notes:
149     The matrix used with this preconditioner must be of type MATIS
150 
151           Unlike more 'conventional' Neumann-Neumann preconditioners this iterates over ALL the
152           degrees of freedom, NOT just those on the interface (this allows the use of approximate solvers
153           on the subdomains; though in our experience using approximate solvers is slower.).
154 
155           Options for the coarse grid preconditioner can be set with -nn_coarse_pc_xxx
156           Options for the Dirichlet subproblem preconditioner can be set with -is_localD_pc_xxx
157           Options for the Neumann subproblem preconditioner can be set with -is_localN_pc_xxx
158 
159    Contributed by Paulo Goldfeld
160 
161 .seealso:  PCCreate(), PCSetType(), PCType (for list of available types), PC,  MATIS
162 M*/
163 
164 PETSC_EXTERN PetscErrorCode PCCreate_NN(PC pc)
165 {
166   PC_NN          *pcnn;
167 
168   PetscFunctionBegin;
169   /*
170      Creates the private data structure for this preconditioner and
171      attach it to the PC object.
172   */
173   PetscCall(PetscNewLog(pc,&pcnn));
174   pc->data = (void*)pcnn;
175 
176   PetscCall(PCISCreate(pc));
177   pcnn->coarse_mat = NULL;
178   pcnn->coarse_x   = NULL;
179   pcnn->coarse_b   = NULL;
180   pcnn->ksp_coarse = NULL;
181   pcnn->DZ_IN      = NULL;
182 
183   /*
184       Set the pointers for the functions that are provided above.
185       Now when the user-level routines (such as PCApply(), PCDestroy(), etc.)
186       are called, they will automatically call these functions.  Note we
187       choose not to provide a couple of these functions since they are
188       not needed.
189   */
190   pc->ops->apply               = PCApply_NN;
191   pc->ops->applytranspose      = NULL;
192   pc->ops->setup               = PCSetUp_NN;
193   pc->ops->destroy             = PCDestroy_NN;
194   pc->ops->view                = NULL;
195   pc->ops->applyrichardson     = NULL;
196   pc->ops->applysymmetricleft  = NULL;
197   pc->ops->applysymmetricright = NULL;
198   PetscFunctionReturn(0);
199 }
200 
201 /* -------------------------------------------------------------------------- */
202 /*
203    PCNNCreateCoarseMatrix -
204 */
205 PetscErrorCode PCNNCreateCoarseMatrix(PC pc)
206 {
207   MPI_Request    *send_request, *recv_request;
208   PetscInt       i, j, k;
209   PetscScalar    *mat;     /* Sub-matrix with this subdomain's contribution to the coarse matrix             */
210   PetscScalar    **DZ_OUT; /* proc[k].DZ_OUT[i][] = bit of vector to be sent from processor k to processor i */
211 
212   /* aliasing some names */
213   PC_IS       *pcis     = (PC_IS*)(pc->data);
214   PC_NN       *pcnn     = (PC_NN*)pc->data;
215   PetscInt    n_neigh   = pcis->n_neigh;
216   PetscInt    *neigh    = pcis->neigh;
217   PetscInt    *n_shared = pcis->n_shared;
218   PetscInt    **shared  = pcis->shared;
219   PetscScalar **DZ_IN;   /* Must be initialized after memory allocation. */
220 
221   PetscFunctionBegin;
222   /* Allocate memory for mat (the +1 is to handle the case n_neigh equal to zero) */
223   PetscCall(PetscMalloc1(n_neigh*n_neigh+1,&mat));
224 
225   /* Allocate memory for DZ */
226   /* Notice that DZ_OUT[0] is allocated some space that is never used. */
227   /* This is just in order to DZ_OUT and DZ_IN to have exactly the same form. */
228   {
229     PetscInt size_of_Z = 0;
230     PetscCall(PetscMalloc ((n_neigh+1)*sizeof(PetscScalar*),&pcnn->DZ_IN));
231     DZ_IN = pcnn->DZ_IN;
232     PetscCall(PetscMalloc ((n_neigh+1)*sizeof(PetscScalar*),&DZ_OUT));
233     for (i=0; i<n_neigh; i++) size_of_Z += n_shared[i];
234     PetscCall(PetscMalloc ((size_of_Z+1)*sizeof(PetscScalar),&DZ_IN[0]));
235     PetscCall(PetscMalloc ((size_of_Z+1)*sizeof(PetscScalar),&DZ_OUT[0]));
236   }
237   for (i=1; i<n_neigh; i++) {
238     DZ_IN[i]  = DZ_IN [i-1] + n_shared[i-1];
239     DZ_OUT[i] = DZ_OUT[i-1] + n_shared[i-1];
240   }
241 
242   /* Set the values of DZ_OUT, in order to send this info to the neighbours */
243   /* First, set the auxiliary array pcis->work_N. */
244   PetscCall(PCISScatterArrayNToVecB(pcis->work_N,pcis->D,INSERT_VALUES,SCATTER_REVERSE,pc));
245   for (i=1; i<n_neigh; i++) {
246     for (j=0; j<n_shared[i]; j++) {
247       DZ_OUT[i][j] = pcis->work_N[shared[i][j]];
248     }
249   }
250 
251   /* Non-blocking send/receive the common-interface chunks of scaled nullspaces */
252   /* Notice that send_request[] and recv_request[] could have one less element. */
253   /* We make them longer to have request[i] corresponding to neigh[i].          */
254   {
255     PetscMPIInt tag;
256     PetscCall(PetscObjectGetNewTag((PetscObject)pc,&tag));
257     PetscCall(PetscMalloc2(n_neigh+1,&send_request,n_neigh+1,&recv_request));
258     for (i=1; i<n_neigh; i++) {
259       PetscCallMPI(MPI_Isend((void*)(DZ_OUT[i]),n_shared[i],MPIU_SCALAR,neigh[i],tag,PetscObjectComm((PetscObject)pc),&(send_request[i])));
260       PetscCallMPI(MPI_Irecv((void*)(DZ_IN [i]),n_shared[i],MPIU_SCALAR,neigh[i],tag,PetscObjectComm((PetscObject)pc),&(recv_request[i])));
261     }
262   }
263 
264   /* Set DZ_IN[0][] (recall that neigh[0]==rank, always) */
265   for (j=0; j<n_shared[0]; j++) DZ_IN[0][j] = pcis->work_N[shared[0][j]];
266 
267   /* Start computing with local D*Z while communication goes on.    */
268   /* Apply Schur complement. The result is "stored" in vec (more    */
269   /* precisely, vec points to the result, stored in pc_nn->vec1_B)  */
270   /* and also scattered to pcnn->work_N.                            */
271   PetscCall(PCNNApplySchurToChunk(pc,n_shared[0],shared[0],DZ_IN[0],pcis->work_N,pcis->vec1_B,pcis->vec2_B,pcis->vec1_D,pcis->vec2_D));
272 
273   /* Compute the first column, while completing the receiving. */
274   for (i=0; i<n_neigh; i++) {
275     MPI_Status  stat;
276     PetscMPIInt ind=0;
277     if (i>0) {PetscCallMPI(MPI_Waitany(n_neigh-1,recv_request+1,&ind,&stat)); ind++;}
278     mat[ind*n_neigh+0] = 0.0;
279     for (k=0; k<n_shared[ind]; k++) mat[ind*n_neigh+0] += DZ_IN[ind][k] * pcis->work_N[shared[ind][k]];
280   }
281 
282   /* Compute the remaining of the columns */
283   for (j=1; j<n_neigh; j++) {
284     PetscCall(PCNNApplySchurToChunk(pc,n_shared[j],shared[j],DZ_IN[j],pcis->work_N,pcis->vec1_B,pcis->vec2_B,pcis->vec1_D,pcis->vec2_D));
285     for (i=0; i<n_neigh; i++) {
286       mat[i*n_neigh+j] = 0.0;
287       for (k=0; k<n_shared[i]; k++) mat[i*n_neigh+j] += DZ_IN[i][k] * pcis->work_N[shared[i][k]];
288     }
289   }
290 
291   /* Complete the sending. */
292   if (n_neigh>1) {
293     MPI_Status *stat;
294     PetscCall(PetscMalloc1(n_neigh-1,&stat));
295     if (n_neigh-1) PetscCallMPI(MPI_Waitall(n_neigh-1,&(send_request[1]),stat));
296     PetscCall(PetscFree(stat));
297   }
298 
299   /* Free the memory for the MPI requests */
300   PetscCall(PetscFree2(send_request,recv_request));
301 
302   /* Free the memory for DZ_OUT */
303   if (DZ_OUT) {
304     PetscCall(PetscFree(DZ_OUT[0]));
305     PetscCall(PetscFree(DZ_OUT));
306   }
307 
308   {
309     PetscMPIInt size;
310     PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)pc),&size));
311     /* Create the global coarse vectors (rhs and solution). */
312     PetscCall(VecCreateMPI(PetscObjectComm((PetscObject)pc),1,size,&(pcnn->coarse_b)));
313     PetscCall(VecDuplicate(pcnn->coarse_b,&(pcnn->coarse_x)));
314     /* Create and set the global coarse AIJ matrix. */
315     PetscCall(MatCreate(PetscObjectComm((PetscObject)pc),&(pcnn->coarse_mat)));
316     PetscCall(MatSetSizes(pcnn->coarse_mat,1,1,size,size));
317     PetscCall(MatSetType(pcnn->coarse_mat,MATAIJ));
318     PetscCall(MatSeqAIJSetPreallocation(pcnn->coarse_mat,1,NULL));
319     PetscCall(MatMPIAIJSetPreallocation(pcnn->coarse_mat,1,NULL,n_neigh,NULL));
320     PetscCall(MatSetOption(pcnn->coarse_mat,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_FALSE));
321     PetscCall(MatSetOption(pcnn->coarse_mat,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_FALSE));
322     PetscCall(MatSetValues(pcnn->coarse_mat,n_neigh,neigh,n_neigh,neigh,mat,ADD_VALUES));
323     PetscCall(MatAssemblyBegin(pcnn->coarse_mat,MAT_FINAL_ASSEMBLY));
324     PetscCall(MatAssemblyEnd  (pcnn->coarse_mat,MAT_FINAL_ASSEMBLY));
325   }
326 
327   {
328     PetscMPIInt rank;
329     PetscScalar one = 1.0;
330     PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)pc),&rank));
331     /* "Zero out" rows of not-purely-Neumann subdomains */
332     if (pcis->pure_neumann) {  /* does NOT zero the row; create an empty index set. The reason is that MatZeroRows() is collective. */
333       PetscCall(MatZeroRows(pcnn->coarse_mat,0,NULL,one,NULL,NULL));
334     } else { /* here it DOES zero the row, since it's not a floating subdomain. */
335       PetscInt row = (PetscInt) rank;
336       PetscCall(MatZeroRows(pcnn->coarse_mat,1,&row,one,NULL,NULL));
337     }
338   }
339 
340   /* Create the coarse linear solver context */
341   {
342     PC  pc_ctx, inner_pc;
343     KSP inner_ksp;
344 
345     PetscCall(KSPCreate(PetscObjectComm((PetscObject)pc),&pcnn->ksp_coarse));
346     PetscCall(PetscObjectIncrementTabLevel((PetscObject)pcnn->ksp_coarse,(PetscObject)pc,2));
347     PetscCall(KSPSetOperators(pcnn->ksp_coarse,pcnn->coarse_mat,pcnn->coarse_mat));
348     PetscCall(KSPGetPC(pcnn->ksp_coarse,&pc_ctx));
349     PetscCall(PCSetType(pc_ctx,PCREDUNDANT));
350     PetscCall(KSPSetType(pcnn->ksp_coarse,KSPPREONLY));
351     PetscCall(PCRedundantGetKSP(pc_ctx,&inner_ksp));
352     PetscCall(KSPGetPC(inner_ksp,&inner_pc));
353     PetscCall(PCSetType(inner_pc,PCLU));
354     PetscCall(KSPSetOptionsPrefix(pcnn->ksp_coarse,"nn_coarse_"));
355     PetscCall(KSPSetFromOptions(pcnn->ksp_coarse));
356     /* the vectors in the following line are dummy arguments, just telling the KSP the vector size. Values are not used */
357     PetscCall(KSPSetUp(pcnn->ksp_coarse));
358   }
359 
360   /* Free the memory for mat */
361   PetscCall(PetscFree(mat));
362 
363   /* for DEBUGGING, save the coarse matrix to a file. */
364   {
365     PetscBool flg = PETSC_FALSE;
366     PetscCall(PetscOptionsGetBool(NULL,NULL,"-pc_nn_save_coarse_matrix",&flg,NULL));
367     if (flg) {
368       PetscViewer viewer;
369       PetscCall(PetscViewerASCIIOpen(PETSC_COMM_WORLD,"coarse.m",&viewer));
370       PetscCall(PetscViewerPushFormat(viewer,PETSC_VIEWER_ASCII_MATLAB));
371       PetscCall(MatView(pcnn->coarse_mat,viewer));
372       PetscCall(PetscViewerPopFormat(viewer));
373       PetscCall(PetscViewerDestroy(&viewer));
374     }
375   }
376 
377   /*  Set the variable pcnn->factor_coarse_rhs. */
378   pcnn->factor_coarse_rhs = (pcis->pure_neumann) ? 1.0 : 0.0;
379 
380   /* See historical note 02, at the bottom of this file. */
381   PetscFunctionReturn(0);
382 }
383 
384 /* -------------------------------------------------------------------------- */
385 /*
386    PCNNApplySchurToChunk -
387 
388    Input parameters:
389 .  pcnn
390 .  n - size of chunk
391 .  idx - indices of chunk
392 .  chunk - values
393 
394    Output parameters:
395 .  array_N - result of Schur complement applied to chunk, scattered to big array
396 .  vec1_B  - result of Schur complement applied to chunk
397 .  vec2_B  - garbage (used as work space)
398 .  vec1_D  - garbage (used as work space)
399 .  vec2_D  - garbage (used as work space)
400 
401 */
402 PetscErrorCode PCNNApplySchurToChunk(PC pc, PetscInt n, PetscInt *idx, PetscScalar *chunk, PetscScalar *array_N, Vec vec1_B, Vec vec2_B, Vec vec1_D, Vec vec2_D)
403 {
404   PetscInt       i;
405   PC_IS          *pcis = (PC_IS*)(pc->data);
406 
407   PetscFunctionBegin;
408   PetscCall(PetscArrayzero(array_N, pcis->n));
409   for (i=0; i<n; i++) array_N[idx[i]] = chunk[i];
410   PetscCall(PCISScatterArrayNToVecB(array_N,vec2_B,INSERT_VALUES,SCATTER_FORWARD,pc));
411   PetscCall(PCISApplySchur(pc,vec2_B,vec1_B,(Vec)0,vec1_D,vec2_D));
412   PetscCall(PCISScatterArrayNToVecB(array_N,vec1_B,INSERT_VALUES,SCATTER_REVERSE,pc));
413   PetscFunctionReturn(0);
414 }
415 
416 /* -------------------------------------------------------------------------- */
417 /*
418    PCNNApplyInterfacePreconditioner - Apply the interface preconditioner, i.e.,
419                                       the preconditioner for the Schur complement.
420 
421    Input parameter:
422 .  r - global vector of interior and interface nodes. The values on the interior nodes are NOT used.
423 
424    Output parameters:
425 .  z - global vector of interior and interface nodes. The values on the interface are the result of
426        the application of the interface preconditioner to the interface part of r. The values on the
427        interior nodes are garbage.
428 .  work_N - array of local nodes (interior and interface, including ghosts); returns garbage (used as work space)
429 .  vec1_B - vector of local interface nodes (including ghosts); returns garbage (used as work space)
430 .  vec2_B - vector of local interface nodes (including ghosts); returns garbage (used as work space)
431 .  vec3_B - vector of local interface nodes (including ghosts); returns garbage (used as work space)
432 .  vec1_D - vector of local interior nodes; returns garbage (used as work space)
433 .  vec2_D - vector of local interior nodes; returns garbage (used as work space)
434 .  vec1_N - vector of local nodes (interior and interface, including ghosts); returns garbage (used as work space)
435 .  vec2_N - vector of local nodes (interior and interface, including ghosts); returns garbage (used as work space)
436 
437 */
438 PetscErrorCode PCNNApplyInterfacePreconditioner(PC pc, Vec r, Vec z, PetscScalar *work_N, Vec vec1_B, Vec vec2_B, Vec vec3_B, Vec vec1_D,Vec vec2_D, Vec vec1_N, Vec vec2_N)
439 {
440   PC_IS          *pcis = (PC_IS*)(pc->data);
441 
442   PetscFunctionBegin;
443   /*
444     First balancing step.
445   */
446   {
447     PetscBool flg = PETSC_FALSE;
448     PetscCall(PetscOptionsGetBool(NULL,NULL,"-pc_nn_turn_off_first_balancing",&flg,NULL));
449     if (!flg) {
450       PetscCall(PCNNBalancing(pc,r,(Vec)0,z,vec1_B,vec2_B,(Vec)0,vec1_D,vec2_D,work_N));
451     } else {
452       PetscCall(VecCopy(r,z));
453     }
454   }
455 
456   /*
457     Extract the local interface part of z and scale it by D
458   */
459   PetscCall(VecScatterBegin(pcis->global_to_B,z,vec1_B,INSERT_VALUES,SCATTER_FORWARD));
460   PetscCall(VecScatterEnd  (pcis->global_to_B,z,vec1_B,INSERT_VALUES,SCATTER_FORWARD));
461   PetscCall(VecPointwiseMult(vec2_B,pcis->D,vec1_B));
462 
463   /* Neumann Solver */
464   PetscCall(PCISApplyInvSchur(pc,vec2_B,vec1_B,vec1_N,vec2_N));
465 
466   /*
467     Second balancing step.
468   */
469   {
470     PetscBool flg = PETSC_FALSE;
471     PetscCall(PetscOptionsGetBool(NULL,NULL,"-pc_turn_off_second_balancing",&flg,NULL));
472     if (!flg) {
473       PetscCall(PCNNBalancing(pc,r,vec1_B,z,vec2_B,vec3_B,(Vec)0,vec1_D,vec2_D,work_N));
474     } else {
475       PetscCall(VecPointwiseMult(vec2_B,pcis->D,vec1_B));
476       PetscCall(VecSet(z,0.0));
477       PetscCall(VecScatterBegin(pcis->global_to_B,vec2_B,z,ADD_VALUES,SCATTER_REVERSE));
478       PetscCall(VecScatterEnd  (pcis->global_to_B,vec2_B,z,ADD_VALUES,SCATTER_REVERSE));
479     }
480   }
481   PetscFunctionReturn(0);
482 }
483 
484 /* -------------------------------------------------------------------------- */
485 /*
486    PCNNBalancing - Computes z, as given in equations (15) and (16) (if the
487                    input argument u is provided), or s, as given in equations
488                    (12) and (13), if the input argument u is a null vector.
489                    Notice that the input argument u plays the role of u_i in
490                    equation (14). The equation numbers refer to [Man93].
491 
492    Input Parameters:
493 .  pcnn - NN preconditioner context.
494 .  r - MPI vector of all nodes (interior and interface). It's preserved.
495 .  u - (Optional) sequential vector of local interface nodes. It's preserved UNLESS vec3_B is null.
496 
497    Output Parameters:
498 .  z - MPI vector of interior and interface nodes. Returns s or z (see description above).
499 .  vec1_B - Sequential vector of local interface nodes. Workspace.
500 .  vec2_B - Sequential vector of local interface nodes. Workspace.
501 .  vec3_B - (Optional) sequential vector of local interface nodes. Workspace.
502 .  vec1_D - Sequential vector of local interior nodes. Workspace.
503 .  vec2_D - Sequential vector of local interior nodes. Workspace.
504 .  work_N - Array of all local nodes (interior and interface). Workspace.
505 
506 */
507 PetscErrorCode PCNNBalancing(PC pc, Vec r, Vec u, Vec z, Vec vec1_B, Vec vec2_B, Vec vec3_B,Vec vec1_D, Vec vec2_D, PetscScalar *work_N)
508 {
509   PetscInt       k;
510   PetscScalar    value;
511   PetscScalar    *lambda;
512   PC_NN          *pcnn = (PC_NN*)(pc->data);
513   PC_IS          *pcis = (PC_IS*)(pc->data);
514 
515   PetscFunctionBegin;
516   PetscCall(PetscLogEventBegin(PC_ApplyCoarse,pc,0,0,0));
517   if (u) {
518     if (!vec3_B) vec3_B = u;
519     PetscCall(VecPointwiseMult(vec1_B,pcis->D,u));
520     PetscCall(VecSet(z,0.0));
521     PetscCall(VecScatterBegin(pcis->global_to_B,vec1_B,z,ADD_VALUES,SCATTER_REVERSE));
522     PetscCall(VecScatterEnd  (pcis->global_to_B,vec1_B,z,ADD_VALUES,SCATTER_REVERSE));
523     PetscCall(VecScatterBegin(pcis->global_to_B,z,vec2_B,INSERT_VALUES,SCATTER_FORWARD));
524     PetscCall(VecScatterEnd  (pcis->global_to_B,z,vec2_B,INSERT_VALUES,SCATTER_FORWARD));
525     PetscCall(PCISApplySchur(pc,vec2_B,vec3_B,(Vec)0,vec1_D,vec2_D));
526     PetscCall(VecScale(vec3_B,-1.0));
527     PetscCall(VecCopy(r,z));
528     PetscCall(VecScatterBegin(pcis->global_to_B,vec3_B,z,ADD_VALUES,SCATTER_REVERSE));
529     PetscCall(VecScatterEnd  (pcis->global_to_B,vec3_B,z,ADD_VALUES,SCATTER_REVERSE));
530   } else {
531     PetscCall(VecCopy(r,z));
532   }
533   PetscCall(VecScatterBegin(pcis->global_to_B,z,vec2_B,INSERT_VALUES,SCATTER_FORWARD));
534   PetscCall(VecScatterEnd  (pcis->global_to_B,z,vec2_B,INSERT_VALUES,SCATTER_FORWARD));
535   PetscCall(PCISScatterArrayNToVecB(work_N,vec2_B,INSERT_VALUES,SCATTER_REVERSE,pc));
536   for (k=0, value=0.0; k<pcis->n_shared[0]; k++) value += pcnn->DZ_IN[0][k] * work_N[pcis->shared[0][k]];
537   value *= pcnn->factor_coarse_rhs;  /* This factor is set in CreateCoarseMatrix(). */
538   {
539     PetscMPIInt rank;
540     PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)pc),&rank));
541     PetscCall(VecSetValue(pcnn->coarse_b,rank,value,INSERT_VALUES));
542     /*
543        Since we are only inserting local values (one value actually) we don't need to do the
544        reduction that tells us there is no data that needs to be moved. Hence we comment out these
545        PetscCall(VecAssemblyBegin(pcnn->coarse_b));
546        PetscCall(VecAssemblyEnd  (pcnn->coarse_b));
547     */
548   }
549   PetscCall(KSPSolve(pcnn->ksp_coarse,pcnn->coarse_b,pcnn->coarse_x));
550   if (!u) PetscCall(VecScale(pcnn->coarse_x,-1.0));
551   PetscCall(VecGetArray(pcnn->coarse_x,&lambda));
552   for (k=0; k<pcis->n_shared[0]; k++) work_N[pcis->shared[0][k]] = *lambda * pcnn->DZ_IN[0][k];
553   PetscCall(VecRestoreArray(pcnn->coarse_x,&lambda));
554   PetscCall(PCISScatterArrayNToVecB(work_N,vec2_B,INSERT_VALUES,SCATTER_FORWARD,pc));
555   PetscCall(VecSet(z,0.0));
556   PetscCall(VecScatterBegin(pcis->global_to_B,vec2_B,z,ADD_VALUES,SCATTER_REVERSE));
557   PetscCall(VecScatterEnd  (pcis->global_to_B,vec2_B,z,ADD_VALUES,SCATTER_REVERSE));
558   if (!u) {
559     PetscCall(VecScatterBegin(pcis->global_to_B,z,vec2_B,INSERT_VALUES,SCATTER_FORWARD));
560     PetscCall(VecScatterEnd  (pcis->global_to_B,z,vec2_B,INSERT_VALUES,SCATTER_FORWARD));
561     PetscCall(PCISApplySchur(pc,vec2_B,vec1_B,(Vec)0,vec1_D,vec2_D));
562     PetscCall(VecCopy(r,z));
563   }
564   PetscCall(VecScatterBegin(pcis->global_to_B,vec1_B,z,ADD_VALUES,SCATTER_REVERSE));
565   PetscCall(VecScatterEnd  (pcis->global_to_B,vec1_B,z,ADD_VALUES,SCATTER_REVERSE));
566   PetscCall(PetscLogEventEnd(PC_ApplyCoarse,pc,0,0,0));
567   PetscFunctionReturn(0);
568 }
569 
570 /*  -------   E N D   O F   T H E   C O D E   -------  */
571 /*                                                     */
572 /*  From now on, "footnotes" (or "historical notes").  */
573 /*                                                     */
574 /*  -------------------------------------------------  */
575 
576 /* --------------------------------------------------------------------------
577    Historical note 01
578    -------------------------------------------------------------------------- */
579 /*
580    We considered the possibility of an alternative D_i that would still
581    provide a partition of unity (i.e., $ \sum_i  N_i D_i N_i^T = I $).
582    The basic principle was still the pseudo-inverse of the counting
583    function; the difference was that we would not count subdomains
584    that do not contribute to the coarse space (i.e., not pure-Neumann
585    subdomains).
586 
587    This turned out to be a bad idea:  we would solve trivial Neumann
588    problems in the not pure-Neumann subdomains, since we would be scaling
589    the balanced residual by zero.
590 */
591 
592 /* --------------------------------------------------------------------------
593    Historical note 02
594    -------------------------------------------------------------------------- */
595 /*
596    We tried an alternative coarse problem, that would eliminate exactly a
597    constant error. Turned out not to improve the overall convergence.
598 */
599