xref: /petsc/src/ksp/pc/impls/is/nn/nn.c (revision 01a79839fc82a7dabb7a87cd2a8bb532c6bfa88d)
1 #define PETSCKSP_DLL
2 
3 #include "../src/ksp/pc/impls/is/nn/nn.h"
4 
5 /* -------------------------------------------------------------------------- */
6 /*
7    PCSetUp_NN - Prepares for the use of the NN preconditioner
8                     by setting data structures and options.
9 
10    Input Parameter:
11 .  pc - the preconditioner context
12 
13    Application Interface Routine: PCSetUp()
14 
15    Notes:
16    The interface routine PCSetUp() is not usually called directly by
17    the user, but instead is called by PCApply() if necessary.
18 */
19 #undef __FUNCT__
20 #define __FUNCT__ "PCSetUp_NN"
21 static PetscErrorCode PCSetUp_NN(PC pc)
22 {
23   PetscErrorCode ierr;
24 
25   PetscFunctionBegin;
26   if (!pc->setupcalled) {
27     /* Set up all the "iterative substructuring" common block */
28     ierr = PCISSetUp(pc);CHKERRQ(ierr);
29     /* Create the coarse matrix. */
30     ierr = PCNNCreateCoarseMatrix(pc);CHKERRQ(ierr);
31   }
32   PetscFunctionReturn(0);
33 }
34 
35 /* -------------------------------------------------------------------------- */
36 /*
37    PCApply_NN - Applies the NN preconditioner to a vector.
38 
39    Input Parameters:
40 .  pc - the preconditioner context
41 .  r - input vector (global)
42 
43    Output Parameter:
44 .  z - output vector (global)
45 
46    Application Interface Routine: PCApply()
47  */
48 #undef __FUNCT__
49 #define __FUNCT__ "PCApply_NN"
50 static PetscErrorCode PCApply_NN(PC pc,Vec r,Vec z)
51 {
52   PC_IS          *pcis = (PC_IS*)(pc->data);
53   PetscErrorCode ierr;
54   PetscScalar    m_one = -1.0;
55   Vec            w = pcis->vec1_global;
56 
57   PetscFunctionBegin;
58   /*
59     Dirichlet solvers.
60     Solving $ B_I^{(i)}r_I^{(i)} $ at each processor.
61     Storing the local results at vec2_D
62   */
63   ierr = VecScatterBegin(pcis->global_to_D,r,pcis->vec1_D,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
64   ierr = VecScatterEnd  (pcis->global_to_D,r,pcis->vec1_D,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
65   ierr = KSPSolve(pcis->ksp_D,pcis->vec1_D,pcis->vec2_D);CHKERRQ(ierr);
66 
67   /*
68     Computing $ r_B - \sum_j \tilde R_j^T A_{BI}^{(j)} (B_I^{(j)}r_I^{(j)}) $ .
69     Storing the result in the interface portion of the global vector w.
70   */
71   ierr = MatMult(pcis->A_BI,pcis->vec2_D,pcis->vec1_B);CHKERRQ(ierr);
72   ierr = VecScale(pcis->vec1_B,m_one);CHKERRQ(ierr);
73   ierr = VecCopy(r,w);CHKERRQ(ierr);
74   ierr = VecScatterBegin(pcis->global_to_B,pcis->vec1_B,w,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
75   ierr = VecScatterEnd  (pcis->global_to_B,pcis->vec1_B,w,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
76 
77   /*
78     Apply the interface preconditioner
79   */
80   ierr = PCNNApplyInterfacePreconditioner(pc,w,z,pcis->work_N,pcis->vec1_B,pcis->vec2_B,pcis->vec3_B,pcis->vec1_D,
81                                           pcis->vec3_D,pcis->vec1_N,pcis->vec2_N);CHKERRQ(ierr);
82 
83   /*
84     Computing $ t_I^{(i)} = A_{IB}^{(i)} \tilde R_i z_B $
85     The result is stored in vec1_D.
86   */
87   ierr = VecScatterBegin(pcis->global_to_B,z,pcis->vec1_B,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
88   ierr = VecScatterEnd  (pcis->global_to_B,z,pcis->vec1_B,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
89   ierr = MatMult(pcis->A_IB,pcis->vec1_B,pcis->vec1_D);CHKERRQ(ierr);
90 
91   /*
92     Dirichlet solvers.
93     Computing $ B_I^{(i)}t_I^{(i)} $ and sticking into the global vector the blocks
94     $ B_I^{(i)}r_I^{(i)} - B_I^{(i)}t_I^{(i)} $.
95   */
96   ierr = VecScatterBegin(pcis->global_to_D,pcis->vec2_D,z,INSERT_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
97   ierr = VecScatterEnd  (pcis->global_to_D,pcis->vec2_D,z,INSERT_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
98   ierr = KSPSolve(pcis->ksp_D,pcis->vec1_D,pcis->vec2_D);CHKERRQ(ierr);
99   ierr = VecScale(pcis->vec2_D,m_one);CHKERRQ(ierr);
100   ierr = VecScatterBegin(pcis->global_to_D,pcis->vec2_D,z,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
101   ierr = VecScatterEnd  (pcis->global_to_D,pcis->vec2_D,z,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
102   PetscFunctionReturn(0);
103 }
104 
105 /* -------------------------------------------------------------------------- */
106 /*
107    PCDestroy_NN - Destroys the private context for the NN preconditioner
108    that was created with PCCreate_NN().
109 
110    Input Parameter:
111 .  pc - the preconditioner context
112 
113    Application Interface Routine: PCDestroy()
114 */
115 #undef __FUNCT__
116 #define __FUNCT__ "PCDestroy_NN"
117 static PetscErrorCode PCDestroy_NN(PC pc)
118 {
119   PC_NN          *pcnn = (PC_NN*)pc->data;
120   PetscErrorCode ierr;
121 
122   PetscFunctionBegin;
123   ierr = PCISDestroy(pc);CHKERRQ(ierr);
124 
125   if (pcnn->coarse_mat)  {ierr = MatDestroy(pcnn->coarse_mat);CHKERRQ(ierr);}
126   if (pcnn->coarse_x)    {ierr = VecDestroy(pcnn->coarse_x);CHKERRQ(ierr);}
127   if (pcnn->coarse_b)    {ierr = VecDestroy(pcnn->coarse_b);CHKERRQ(ierr);}
128   if (pcnn->ksp_coarse) {ierr = KSPDestroy(pcnn->ksp_coarse);CHKERRQ(ierr);}
129   if (pcnn->DZ_IN) {
130     ierr = PetscFree(pcnn->DZ_IN[0]);CHKERRQ(ierr);
131     ierr = PetscFree(pcnn->DZ_IN);CHKERRQ(ierr);
132   }
133 
134   /*
135       Free the private data structure that was hanging off the PC
136   */
137   ierr = PetscFree(pcnn);CHKERRQ(ierr);
138   PetscFunctionReturn(0);
139 }
140 
141 /* -------------------------------------------------------------------------- */
142 /*MC
143    PCNN - Balancing Neumann-Neumann for scalar elliptic PDEs.
144 
145    Options Database Keys:
146 +    -pc_nn_turn_off_first_balancing - do not balance the residual before solving the local Neumann problems
147                                        (this skips the first coarse grid solve in the preconditioner)
148 .    -pc_nn_turn_off_second_balancing - do not balance the solution solving the local Neumann problems
149                                        (this skips the second coarse grid solve in the preconditioner)
150 .    -pc_is_damp_fixed <fact> -
151 .    -pc_is_remove_nullspace_fixed -
152 .    -pc_is_set_damping_factor_floating <fact> -
153 .    -pc_is_not_damp_floating -
154 +    -pc_is_not_remove_nullspace_floating -
155 
156    Level: intermediate
157 
158    Notes: The matrix used with this preconditioner must be of type MATIS
159 
160           Unlike more 'conventional' Neumann-Neumann preconditioners this iterates over ALL the
161           degrees of freedom, NOT just those on the interface (this allows the use of approximate solvers
162           on the subdomains; though in our experience using approximate solvers is slower.).
163 
164           Options for the coarse grid preconditioner can be set with -nn_coarse_pc_xxx
165           Options for the Dirichlet subproblem preconditioner can be set with -is_localD_pc_xxx
166           Options for the Neumann subproblem preconditioner can be set with -is_localN_pc_xxx
167 
168    Contributed by Paulo Goldfeld
169 
170 .seealso:  PCCreate(), PCSetType(), PCType (for list of available types), PC,  MATIS
171 M*/
172 EXTERN_C_BEGIN
173 #undef __FUNCT__
174 #define __FUNCT__ "PCCreate_NN"
175 PetscErrorCode  PCCreate_NN(PC pc)
176 {
177   PetscErrorCode ierr;
178   PC_NN          *pcnn;
179 
180   PetscFunctionBegin;
181   /*
182      Creates the private data structure for this preconditioner and
183      attach it to the PC object.
184   */
185   ierr      = PetscNewLog(pc,PC_NN,&pcnn);CHKERRQ(ierr);
186   pc->data  = (void*)pcnn;
187 
188   ierr = PCISCreate(pc);CHKERRQ(ierr);
189   pcnn->coarse_mat  = 0;
190   pcnn->coarse_x    = 0;
191   pcnn->coarse_b    = 0;
192   pcnn->ksp_coarse = 0;
193   pcnn->DZ_IN       = 0;
194 
195   /*
196       Set the pointers for the functions that are provided above.
197       Now when the user-level routines (such as PCApply(), PCDestroy(), etc.)
198       are called, they will automatically call these functions.  Note we
199       choose not to provide a couple of these functions since they are
200       not needed.
201   */
202   pc->ops->apply               = PCApply_NN;
203   pc->ops->applytranspose      = 0;
204   pc->ops->setup               = PCSetUp_NN;
205   pc->ops->destroy             = PCDestroy_NN;
206   pc->ops->view                = 0;
207   pc->ops->applyrichardson     = 0;
208   pc->ops->applysymmetricleft  = 0;
209   pc->ops->applysymmetricright = 0;
210   PetscFunctionReturn(0);
211 }
212 EXTERN_C_END
213 
214 
215 /* -------------------------------------------------------------------------- */
216 /*
217    PCNNCreateCoarseMatrix -
218 */
219 #undef __FUNCT__
220 #define __FUNCT__ "PCNNCreateCoarseMatrix"
221 PetscErrorCode PCNNCreateCoarseMatrix (PC pc)
222 {
223   MPI_Request    *send_request, *recv_request;
224   PetscErrorCode ierr;
225   PetscInt       i, j, k;
226   PetscScalar*   mat;    /* Sub-matrix with this subdomain's contribution to the coarse matrix             */
227   PetscScalar**  DZ_OUT; /* proc[k].DZ_OUT[i][] = bit of vector to be sent from processor k to processor i */
228 
229   /* aliasing some names */
230   PC_IS*         pcis     = (PC_IS*)(pc->data);
231   PC_NN*         pcnn     = (PC_NN*)pc->data;
232   PetscInt       n_neigh  = pcis->n_neigh;
233   PetscInt*      neigh    = pcis->neigh;
234   PetscInt*      n_shared = pcis->n_shared;
235   PetscInt**     shared   = pcis->shared;
236   PetscScalar**  DZ_IN;   /* Must be initialized after memory allocation. */
237 
238   PetscFunctionBegin;
239   /* Allocate memory for mat (the +1 is to handle the case n_neigh equal to zero) */
240   ierr = PetscMalloc((n_neigh*n_neigh+1)*sizeof(PetscScalar),&mat);CHKERRQ(ierr);
241 
242   /* Allocate memory for DZ */
243   /* Notice that DZ_OUT[0] is allocated some space that is never used. */
244   /* This is just in order to DZ_OUT and DZ_IN to have exactly the same form. */
245   {
246     PetscInt size_of_Z = 0;
247     ierr  = PetscMalloc ((n_neigh+1)*sizeof(PetscScalar*),&pcnn->DZ_IN);CHKERRQ(ierr);
248     DZ_IN = pcnn->DZ_IN;
249     ierr  = PetscMalloc ((n_neigh+1)*sizeof(PetscScalar*),&DZ_OUT);CHKERRQ(ierr);
250     for (i=0; i<n_neigh; i++) {
251       size_of_Z += n_shared[i];
252     }
253     ierr = PetscMalloc ((size_of_Z+1)*sizeof(PetscScalar),&DZ_IN[0]);CHKERRQ(ierr);
254     ierr = PetscMalloc ((size_of_Z+1)*sizeof(PetscScalar),&DZ_OUT[0]);CHKERRQ(ierr);
255   }
256   for (i=1; i<n_neigh; i++) {
257     DZ_IN[i]  = DZ_IN [i-1] + n_shared[i-1];
258     DZ_OUT[i] = DZ_OUT[i-1] + n_shared[i-1];
259   }
260 
261   /* Set the values of DZ_OUT, in order to send this info to the neighbours */
262   /* First, set the auxiliary array pcis->work_N. */
263   ierr = PCISScatterArrayNToVecB(pcis->work_N,pcis->D,INSERT_VALUES,SCATTER_REVERSE,pc);CHKERRQ(ierr);
264   for (i=1; i<n_neigh; i++){
265     for (j=0; j<n_shared[i]; j++) {
266       DZ_OUT[i][j] = pcis->work_N[shared[i][j]];
267     }
268   }
269 
270   /* Non-blocking send/receive the common-interface chunks of scaled nullspaces */
271   /* Notice that send_request[] and recv_request[] could have one less element. */
272   /* We make them longer to have request[i] corresponding to neigh[i].          */
273   {
274     PetscMPIInt tag;
275     ierr = PetscObjectGetNewTag((PetscObject)pc,&tag);CHKERRQ(ierr);
276     ierr = PetscMalloc((2*(n_neigh)+1)*sizeof(MPI_Request),&send_request);CHKERRQ(ierr);
277     recv_request = send_request + (n_neigh);
278     for (i=1; i<n_neigh; i++) {
279       ierr = MPI_Isend((void*)(DZ_OUT[i]),n_shared[i],MPIU_SCALAR,neigh[i],tag,((PetscObject)pc)->comm,&(send_request[i]));CHKERRQ(ierr);
280       ierr = MPI_Irecv((void*)(DZ_IN [i]),n_shared[i],MPIU_SCALAR,neigh[i],tag,((PetscObject)pc)->comm,&(recv_request[i]));CHKERRQ(ierr);
281     }
282   }
283 
284   /* Set DZ_IN[0][] (recall that neigh[0]==rank, always) */
285   for(j=0; j<n_shared[0]; j++) {
286     DZ_IN[0][j] = pcis->work_N[shared[0][j]];
287   }
288 
289   /* Start computing with local D*Z while communication goes on.    */
290   /* Apply Schur complement. The result is "stored" in vec (more    */
291   /* precisely, vec points to the result, stored in pc_nn->vec1_B)  */
292   /* and also scattered to pcnn->work_N.                            */
293   ierr = PCNNApplySchurToChunk(pc,n_shared[0],shared[0],DZ_IN[0],pcis->work_N,pcis->vec1_B,
294                                pcis->vec2_B,pcis->vec1_D,pcis->vec2_D);CHKERRQ(ierr);
295 
296   /* Compute the first column, while completing the receiving. */
297   for (i=0; i<n_neigh; i++) {
298     MPI_Status  stat;
299     PetscMPIInt ind=0;
300     if (i>0) { ierr = MPI_Waitany(n_neigh-1,recv_request+1,&ind,&stat);CHKERRQ(ierr); ind++;}
301     mat[ind*n_neigh+0] = 0.0;
302     for (k=0; k<n_shared[ind]; k++) {
303       mat[ind*n_neigh+0] += DZ_IN[ind][k] * pcis->work_N[shared[ind][k]];
304     }
305   }
306 
307   /* Compute the remaining of the columns */
308   for (j=1; j<n_neigh; j++) {
309     ierr = PCNNApplySchurToChunk(pc,n_shared[j],shared[j],DZ_IN[j],pcis->work_N,pcis->vec1_B,
310                                  pcis->vec2_B,pcis->vec1_D,pcis->vec2_D);CHKERRQ(ierr);
311     for (i=0; i<n_neigh; i++) {
312       mat[i*n_neigh+j] = 0.0;
313       for (k=0; k<n_shared[i]; k++) {
314 	mat[i*n_neigh+j] += DZ_IN[i][k] * pcis->work_N[shared[i][k]];
315       }
316     }
317   }
318 
319   /* Complete the sending. */
320   if (n_neigh>1) {
321     MPI_Status *stat;
322     ierr = PetscMalloc((n_neigh-1)*sizeof(MPI_Status),&stat);CHKERRQ(ierr);
323     if (n_neigh-1) {ierr = MPI_Waitall(n_neigh-1,&(send_request[1]),stat);CHKERRQ(ierr);}
324     ierr = PetscFree(stat);CHKERRQ(ierr);
325   }
326 
327   /* Free the memory for the MPI requests */
328   ierr = PetscFree(send_request);CHKERRQ(ierr);
329 
330   /* Free the memory for DZ_OUT */
331   if (DZ_OUT) {
332     ierr = PetscFree(DZ_OUT[0]);CHKERRQ(ierr);
333     ierr = PetscFree(DZ_OUT);CHKERRQ(ierr);
334   }
335 
336   {
337     PetscMPIInt size;
338     ierr = MPI_Comm_size(((PetscObject)pc)->comm,&size);CHKERRQ(ierr);
339     /* Create the global coarse vectors (rhs and solution). */
340     ierr = VecCreateMPI(((PetscObject)pc)->comm,1,size,&(pcnn->coarse_b));CHKERRQ(ierr);
341     ierr = VecDuplicate(pcnn->coarse_b,&(pcnn->coarse_x));CHKERRQ(ierr);
342     /* Create and set the global coarse AIJ matrix. */
343     ierr = MatCreate(((PetscObject)pc)->comm,&(pcnn->coarse_mat));CHKERRQ(ierr);
344     ierr = MatSetSizes(pcnn->coarse_mat,1,1,size,size);CHKERRQ(ierr);
345     ierr = MatSetType(pcnn->coarse_mat,MATAIJ);CHKERRQ(ierr);
346     ierr = MatSeqAIJSetPreallocation(pcnn->coarse_mat,1,PETSC_NULL);CHKERRQ(ierr);
347     ierr = MatMPIAIJSetPreallocation(pcnn->coarse_mat,1,PETSC_NULL,1,PETSC_NULL);CHKERRQ(ierr);
348     ierr = MatSetValues(pcnn->coarse_mat,n_neigh,neigh,n_neigh,neigh,mat,ADD_VALUES);CHKERRQ(ierr);
349     ierr = MatAssemblyBegin(pcnn->coarse_mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
350     ierr = MatAssemblyEnd  (pcnn->coarse_mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
351   }
352 
353   {
354     PetscMPIInt rank;
355     PetscScalar one = 1.0;
356     ierr = MPI_Comm_rank(((PetscObject)pc)->comm,&rank);CHKERRQ(ierr);
357     /* "Zero out" rows of not-purely-Neumann subdomains */
358     if (pcis->pure_neumann) {  /* does NOT zero the row; create an empty index set. The reason is that MatZeroRows() is collective. */
359       ierr = MatZeroRows(pcnn->coarse_mat,0,PETSC_NULL,one,0,0);CHKERRQ(ierr);
360     } else { /* here it DOES zero the row, since it's not a floating subdomain. */
361       PetscInt row = (PetscInt) rank;
362       ierr = MatZeroRows(pcnn->coarse_mat,1,&row,one,0,0);CHKERRQ(ierr);
363     }
364   }
365 
366   /* Create the coarse linear solver context */
367   {
368     PC  pc_ctx, inner_pc;
369     KSP inner_ksp;
370 
371     ierr = KSPCreate(((PetscObject)pc)->comm,&pcnn->ksp_coarse);CHKERRQ(ierr);
372     ierr = PetscObjectIncrementTabLevel((PetscObject)pcnn->ksp_coarse,(PetscObject)pc,2);CHKERRQ(ierr);
373     ierr = KSPSetOperators(pcnn->ksp_coarse,pcnn->coarse_mat,pcnn->coarse_mat,SAME_PRECONDITIONER);CHKERRQ(ierr);
374     ierr = KSPGetPC(pcnn->ksp_coarse,&pc_ctx);CHKERRQ(ierr);
375     ierr = PCSetType(pc_ctx,PCREDUNDANT);CHKERRQ(ierr);
376     ierr = KSPSetType(pcnn->ksp_coarse,KSPPREONLY);CHKERRQ(ierr);
377     ierr = PCRedundantGetKSP(pc_ctx,&inner_ksp);CHKERRQ(ierr);
378     ierr = KSPGetPC(inner_ksp,&inner_pc);CHKERRQ(ierr);
379     ierr = PCSetType(inner_pc,PCLU);CHKERRQ(ierr);
380     ierr = KSPSetOptionsPrefix(pcnn->ksp_coarse,"nn_coarse_");CHKERRQ(ierr);
381     ierr = KSPSetFromOptions(pcnn->ksp_coarse);CHKERRQ(ierr);
382     /* the vectors in the following line are dummy arguments, just telling the KSP the vector size. Values are not used */
383     ierr = KSPSetUp(pcnn->ksp_coarse);CHKERRQ(ierr);
384   }
385 
386   /* Free the memory for mat */
387   ierr = PetscFree(mat);CHKERRQ(ierr);
388 
389   /* for DEBUGGING, save the coarse matrix to a file. */
390   {
391     PetscBool  flg = PETSC_FALSE;
392     ierr = PetscOptionsGetBool(PETSC_NULL,"-pc_nn_save_coarse_matrix",&flg,PETSC_NULL);CHKERRQ(ierr);
393     if (flg) {
394       PetscViewer viewer;
395       ierr = PetscViewerASCIIOpen(PETSC_COMM_WORLD,"coarse.m",&viewer);CHKERRQ(ierr);
396       ierr = PetscViewerSetFormat(viewer,PETSC_VIEWER_ASCII_MATLAB);CHKERRQ(ierr);
397       ierr = MatView(pcnn->coarse_mat,viewer);CHKERRQ(ierr);
398       ierr = PetscViewerDestroy(viewer);CHKERRQ(ierr);
399     }
400   }
401 
402   /*  Set the variable pcnn->factor_coarse_rhs. */
403   pcnn->factor_coarse_rhs = (pcis->pure_neumann) ? 1.0 : 0.0;
404 
405   /* See historical note 02, at the bottom of this file. */
406   PetscFunctionReturn(0);
407 }
408 
409 /* -------------------------------------------------------------------------- */
410 /*
411    PCNNApplySchurToChunk -
412 
413    Input parameters:
414 .  pcnn
415 .  n - size of chunk
416 .  idx - indices of chunk
417 .  chunk - values
418 
419    Output parameters:
420 .  array_N - result of Schur complement applied to chunk, scattered to big array
421 .  vec1_B  - result of Schur complement applied to chunk
422 .  vec2_B  - garbage (used as work space)
423 .  vec1_D  - garbage (used as work space)
424 .  vec2_D  - garbage (used as work space)
425 
426 */
427 #undef __FUNCT__
428 #define __FUNCT__ "PCNNApplySchurToChunk"
429 PetscErrorCode PCNNApplySchurToChunk(PC pc, PetscInt n, PetscInt* idx, PetscScalar *chunk, PetscScalar* array_N, Vec vec1_B, Vec vec2_B, Vec vec1_D, Vec vec2_D)
430 {
431   PetscErrorCode ierr;
432   PetscInt       i;
433   PC_IS          *pcis = (PC_IS*)(pc->data);
434 
435   PetscFunctionBegin;
436   ierr = PetscMemzero((void*)array_N, pcis->n*sizeof(PetscScalar));CHKERRQ(ierr);
437   for (i=0; i<n; i++) { array_N[idx[i]] = chunk[i]; }
438   ierr = PCISScatterArrayNToVecB(array_N,vec2_B,INSERT_VALUES,SCATTER_FORWARD,pc);CHKERRQ(ierr);
439   ierr = PCISApplySchur(pc,vec2_B,vec1_B,(Vec)0,vec1_D,vec2_D);CHKERRQ(ierr);
440   ierr = PCISScatterArrayNToVecB(array_N,vec1_B,INSERT_VALUES,SCATTER_REVERSE,pc);CHKERRQ(ierr);
441   PetscFunctionReturn(0);
442 }
443 
444 /* -------------------------------------------------------------------------- */
445 /*
446    PCNNApplyInterfacePreconditioner - Apply the interface preconditioner, i.e.,
447                                       the preconditioner for the Schur complement.
448 
449    Input parameter:
450 .  r - global vector of interior and interface nodes. The values on the interior nodes are NOT used.
451 
452    Output parameters:
453 .  z - global vector of interior and interface nodes. The values on the interface are the result of
454        the application of the interface preconditioner to the interface part of r. The values on the
455        interior nodes are garbage.
456 .  work_N - array of local nodes (interior and interface, including ghosts); returns garbage (used as work space)
457 .  vec1_B - vector of local interface nodes (including ghosts); returns garbage (used as work space)
458 .  vec2_B - vector of local interface nodes (including ghosts); returns garbage (used as work space)
459 .  vec3_B - vector of local interface nodes (including ghosts); returns garbage (used as work space)
460 .  vec1_D - vector of local interior nodes; returns garbage (used as work space)
461 .  vec2_D - vector of local interior nodes; returns garbage (used as work space)
462 .  vec1_N - vector of local nodes (interior and interface, including ghosts); returns garbage (used as work space)
463 .  vec2_N - vector of local nodes (interior and interface, including ghosts); returns garbage (used as work space)
464 
465 */
466 #undef __FUNCT__
467 #define __FUNCT__ "PCNNApplyInterfacePreconditioner"
468 PetscErrorCode PCNNApplyInterfacePreconditioner (PC pc, Vec r, Vec z, PetscScalar* work_N, Vec vec1_B, Vec vec2_B, Vec vec3_B, Vec vec1_D,
469                                       Vec vec2_D, Vec vec1_N, Vec vec2_N)
470 {
471   PetscErrorCode ierr;
472   PC_IS*         pcis = (PC_IS*)(pc->data);
473 
474   PetscFunctionBegin;
475   /*
476     First balancing step.
477   */
478   {
479     PetscBool  flg = PETSC_FALSE;
480     ierr = PetscOptionsGetBool(PETSC_NULL,"-pc_nn_turn_off_first_balancing",&flg,PETSC_NULL);CHKERRQ(ierr);
481     if (!flg) {
482       ierr = PCNNBalancing(pc,r,(Vec)0,z,vec1_B,vec2_B,(Vec)0,vec1_D,vec2_D,work_N);CHKERRQ(ierr);
483     } else {
484       ierr = VecCopy(r,z);CHKERRQ(ierr);
485     }
486   }
487 
488   /*
489     Extract the local interface part of z and scale it by D
490   */
491   ierr = VecScatterBegin(pcis->global_to_B,z,vec1_B,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
492   ierr = VecScatterEnd  (pcis->global_to_B,z,vec1_B,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
493   ierr = VecPointwiseMult(vec2_B,pcis->D,vec1_B);CHKERRQ(ierr);
494 
495   /* Neumann Solver */
496   ierr = PCISApplyInvSchur(pc,vec2_B,vec1_B,vec1_N,vec2_N);CHKERRQ(ierr);
497 
498   /*
499     Second balancing step.
500   */
501   {
502     PetscBool  flg = PETSC_FALSE;
503     ierr = PetscOptionsGetBool(PETSC_NULL,"-pc_turn_off_second_balancing",&flg,PETSC_NULL);CHKERRQ(ierr);
504     if (!flg) {
505       ierr = PCNNBalancing(pc,r,vec1_B,z,vec2_B,vec3_B,(Vec)0,vec1_D,vec2_D,work_N);CHKERRQ(ierr);
506     } else {
507       ierr = VecPointwiseMult(vec2_B,pcis->D,vec1_B);CHKERRQ(ierr);
508       ierr = VecSet(z,0.0);CHKERRQ(ierr);
509       ierr = VecScatterBegin(pcis->global_to_B,vec2_B,z,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
510       ierr = VecScatterEnd  (pcis->global_to_B,vec2_B,z,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
511     }
512   }
513   PetscFunctionReturn(0);
514 }
515 
516 /* -------------------------------------------------------------------------- */
517 /*
518    PCNNBalancing - Computes z, as given in equations (15) and (16) (if the
519                    input argument u is provided), or s, as given in equations
520                    (12) and (13), if the input argument u is a null vector.
521                    Notice that the input argument u plays the role of u_i in
522                    equation (14). The equation numbers refer to [Man93].
523 
524    Input Parameters:
525 .  pcnn - NN preconditioner context.
526 .  r - MPI vector of all nodes (interior and interface). It's preserved.
527 .  u - (Optional) sequential vector of local interface nodes. It's preserved UNLESS vec3_B is null.
528 
529    Output Parameters:
530 .  z - MPI vector of interior and interface nodes. Returns s or z (see description above).
531 .  vec1_B - Sequential vector of local interface nodes. Workspace.
532 .  vec2_B - Sequential vector of local interface nodes. Workspace.
533 .  vec3_B - (Optional) sequential vector of local interface nodes. Workspace.
534 .  vec1_D - Sequential vector of local interior nodes. Workspace.
535 .  vec2_D - Sequential vector of local interior nodes. Workspace.
536 .  work_N - Array of all local nodes (interior and interface). Workspace.
537 
538 */
539 #undef __FUNCT__
540 #define __FUNCT__ "PCNNBalancing"
541 PetscErrorCode PCNNBalancing (PC pc, Vec r, Vec u, Vec z, Vec vec1_B, Vec vec2_B, Vec vec3_B,
542                    Vec vec1_D, Vec vec2_D, PetscScalar *work_N)
543 {
544   PetscErrorCode ierr;
545   PetscInt       k;
546   PetscScalar    value;
547   PetscScalar*   lambda;
548   PC_NN*         pcnn     = (PC_NN*)(pc->data);
549   PC_IS*         pcis     = (PC_IS*)(pc->data);
550 
551   PetscFunctionBegin;
552   ierr = PetscLogEventBegin(PC_ApplyCoarse,0,0,0,0);CHKERRQ(ierr);
553   if (u) {
554     if (!vec3_B) { vec3_B = u; }
555     ierr = VecPointwiseMult(vec1_B,pcis->D,u);CHKERRQ(ierr);
556     ierr = VecSet(z,0.0);CHKERRQ(ierr);
557     ierr = VecScatterBegin(pcis->global_to_B,vec1_B,z,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
558     ierr = VecScatterEnd  (pcis->global_to_B,vec1_B,z,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
559     ierr = VecScatterBegin(pcis->global_to_B,z,vec2_B,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
560     ierr = VecScatterEnd  (pcis->global_to_B,z,vec2_B,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
561     ierr = PCISApplySchur(pc,vec2_B,vec3_B,(Vec)0,vec1_D,vec2_D);CHKERRQ(ierr);
562     ierr = VecScale(vec3_B,-1.0);CHKERRQ(ierr);
563     ierr = VecCopy(r,z);CHKERRQ(ierr);
564     ierr = VecScatterBegin(pcis->global_to_B,vec3_B,z,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
565     ierr = VecScatterEnd  (pcis->global_to_B,vec3_B,z,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
566   } else {
567     ierr = VecCopy(r,z);CHKERRQ(ierr);
568   }
569   ierr = VecScatterBegin(pcis->global_to_B,z,vec2_B,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
570   ierr = VecScatterEnd  (pcis->global_to_B,z,vec2_B,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
571   ierr = PCISScatterArrayNToVecB(work_N,vec2_B,INSERT_VALUES,SCATTER_REVERSE,pc);CHKERRQ(ierr);
572   for (k=0, value=0.0; k<pcis->n_shared[0]; k++) { value += pcnn->DZ_IN[0][k] * work_N[pcis->shared[0][k]]; }
573   value *= pcnn->factor_coarse_rhs;  /* This factor is set in CreateCoarseMatrix(). */
574   {
575     PetscMPIInt rank;
576     ierr = MPI_Comm_rank(((PetscObject)pc)->comm,&rank);CHKERRQ(ierr);
577     ierr = VecSetValue(pcnn->coarse_b,rank,value,INSERT_VALUES);CHKERRQ(ierr);
578     /*
579        Since we are only inserting local values (one value actually) we don't need to do the
580        reduction that tells us there is no data that needs to be moved. Hence we comment out these
581        ierr = VecAssemblyBegin(pcnn->coarse_b);CHKERRQ(ierr);
582        ierr = VecAssemblyEnd  (pcnn->coarse_b);CHKERRQ(ierr);
583     */
584   }
585   ierr = KSPSolve(pcnn->ksp_coarse,pcnn->coarse_b,pcnn->coarse_x);CHKERRQ(ierr);
586   if (!u) { ierr = VecScale(pcnn->coarse_x,-1.0);CHKERRQ(ierr); }
587   ierr = VecGetArray(pcnn->coarse_x,&lambda);CHKERRQ(ierr);
588   for (k=0; k<pcis->n_shared[0]; k++) { work_N[pcis->shared[0][k]] = *lambda * pcnn->DZ_IN[0][k]; }
589   ierr = VecRestoreArray(pcnn->coarse_x,&lambda);CHKERRQ(ierr);
590   ierr = PCISScatterArrayNToVecB(work_N,vec2_B,INSERT_VALUES,SCATTER_FORWARD,pc);CHKERRQ(ierr);
591   ierr = VecSet(z,0.0);CHKERRQ(ierr);
592   ierr = VecScatterBegin(pcis->global_to_B,vec2_B,z,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
593   ierr = VecScatterEnd  (pcis->global_to_B,vec2_B,z,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
594   if (!u) {
595     ierr = VecScatterBegin(pcis->global_to_B,z,vec2_B,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
596     ierr = VecScatterEnd  (pcis->global_to_B,z,vec2_B,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
597     ierr = PCISApplySchur(pc,vec2_B,vec1_B,(Vec)0,vec1_D,vec2_D);CHKERRQ(ierr);
598     ierr = VecCopy(r,z);CHKERRQ(ierr);
599   }
600   ierr = VecScatterBegin(pcis->global_to_B,vec1_B,z,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
601   ierr = VecScatterEnd  (pcis->global_to_B,vec1_B,z,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
602   ierr = PetscLogEventEnd(PC_ApplyCoarse,0,0,0,0);CHKERRQ(ierr);
603   PetscFunctionReturn(0);
604 }
605 
606 #undef __FUNCT__
607 
608 
609 
610 /*  -------   E N D   O F   T H E   C O D E   -------  */
611 /*                                                     */
612 /*  From now on, "footnotes" (or "historical notes").  */
613 /*                                                     */
614 /*  -------------------------------------------------  */
615 
616 
617 
618 /* --------------------------------------------------------------------------
619    Historical note 01
620    -------------------------------------------------------------------------- */
621 /*
622    We considered the possibility of an alternative D_i that would still
623    provide a partition of unity (i.e., $ \sum_i  N_i D_i N_i^T = I $).
624    The basic principle was still the pseudo-inverse of the counting
625    function; the difference was that we would not count subdomains
626    that do not contribute to the coarse space (i.e., not pure-Neumann
627    subdomains).
628 
629    This turned out to be a bad idea:  we would solve trivial Neumann
630    problems in the not pure-Neumann subdomains, since we would be scaling
631    the balanced residual by zero.
632 */
633 
634 
635 
636 
637 /* --------------------------------------------------------------------------
638    Historical note 02
639    -------------------------------------------------------------------------- */
640 /*
641    We tried an alternative coarse problem, that would eliminate exactly a
642    constant error. Turned out not to improve the overall convergence.
643 */
644 
645 
646