xref: /petsc/src/ksp/pc/impls/is/nn/nn.c (revision be1d678a52e6eff2808b2fa31ae986cdbf03c9fe)
1 
2 #include "src/ksp/pc/impls/is/nn/nn.h"
3 
4 /* -------------------------------------------------------------------------- */
5 /*
6    PCSetUp_NN - Prepares for the use of the NN preconditioner
7                     by setting data structures and options.
8 
9    Input Parameter:
10 .  pc - the preconditioner context
11 
12    Application Interface Routine: PCSetUp()
13 
14    Notes:
15    The interface routine PCSetUp() is not usually called directly by
16    the user, but instead is called by PCApply() if necessary.
17 */
18 #undef __FUNCT__
19 #define __FUNCT__ "PCSetUp_NN"
20 static PetscErrorCode PCSetUp_NN(PC pc)
21 {
22   PetscErrorCode ierr;
23 
24   PetscFunctionBegin;
25   if (!pc->setupcalled) {
26     /* Set up all the "iterative substructuring" common block */
27     ierr = PCISSetUp(pc);CHKERRQ(ierr);
28     /* Create the coarse matrix. */
29     ierr = PCNNCreateCoarseMatrix(pc);CHKERRQ(ierr);
30   }
31   PetscFunctionReturn(0);
32 }
33 
34 /* -------------------------------------------------------------------------- */
35 /*
36    PCApply_NN - Applies the NN preconditioner to a vector.
37 
38    Input Parameters:
39 .  pc - the preconditioner context
40 .  r - input vector (global)
41 
42    Output Parameter:
43 .  z - output vector (global)
44 
45    Application Interface Routine: PCApply()
46  */
47 #undef __FUNCT__
48 #define __FUNCT__ "PCApply_NN"
49 static PetscErrorCode PCApply_NN(PC pc,Vec r,Vec z)
50 {
51   PC_IS          *pcis = (PC_IS*)(pc->data);
52   PetscErrorCode ierr;
53   PetscScalar    m_one = -1.0;
54   Vec            w = pcis->vec1_global;
55 
56   PetscFunctionBegin;
57   /*
58     Dirichlet solvers.
59     Solving $ B_I^{(i)}r_I^{(i)} $ at each processor.
60     Storing the local results at vec2_D
61   */
62   ierr = VecScatterBegin(r,pcis->vec1_D,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_D);CHKERRQ(ierr);
63   ierr = VecScatterEnd  (r,pcis->vec1_D,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_D);CHKERRQ(ierr);
64   ierr = KSPSolve(pcis->ksp_D,pcis->vec1_D,pcis->vec2_D);CHKERRQ(ierr);
65 
66   /*
67     Computing $ r_B - \sum_j \tilde R_j^T A_{BI}^{(j)} (B_I^{(j)}r_I^{(j)}) $ .
68     Storing the result in the interface portion of the global vector w.
69   */
70   ierr = MatMult(pcis->A_BI,pcis->vec2_D,pcis->vec1_B);CHKERRQ(ierr);
71   ierr = VecScale(&m_one,pcis->vec1_B);CHKERRQ(ierr);
72   ierr = VecCopy(r,w);CHKERRQ(ierr);
73   ierr = VecScatterBegin(pcis->vec1_B,w,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
74   ierr = VecScatterEnd  (pcis->vec1_B,w,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
75 
76   /*
77     Apply the interface preconditioner
78   */
79   ierr = PCNNApplyInterfacePreconditioner(pc,w,z,pcis->work_N,pcis->vec1_B,pcis->vec2_B,pcis->vec3_B,pcis->vec1_D,
80                                           pcis->vec3_D,pcis->vec1_N,pcis->vec2_N);CHKERRQ(ierr);
81 
82   /*
83     Computing $ t_I^{(i)} = A_{IB}^{(i)} \tilde R_i z_B $
84     The result is stored in vec1_D.
85   */
86   ierr = VecScatterBegin(z,pcis->vec1_B,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_B);CHKERRQ(ierr);
87   ierr = VecScatterEnd  (z,pcis->vec1_B,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_B);CHKERRQ(ierr);
88   ierr = MatMult(pcis->A_IB,pcis->vec1_B,pcis->vec1_D);CHKERRQ(ierr);
89 
90   /*
91     Dirichlet solvers.
92     Computing $ B_I^{(i)}t_I^{(i)} $ and sticking into the global vector the blocks
93     $ B_I^{(i)}r_I^{(i)} - B_I^{(i)}t_I^{(i)} $.
94   */
95   ierr = VecScatterBegin(pcis->vec2_D,z,INSERT_VALUES,SCATTER_REVERSE,pcis->global_to_D);CHKERRQ(ierr);
96   ierr = VecScatterEnd  (pcis->vec2_D,z,INSERT_VALUES,SCATTER_REVERSE,pcis->global_to_D);CHKERRQ(ierr);
97   ierr = KSPSolve(pcis->ksp_D,pcis->vec1_D,pcis->vec2_D);CHKERRQ(ierr);
98   ierr = VecScale(&m_one,pcis->vec2_D);CHKERRQ(ierr);
99   ierr = VecScatterBegin(pcis->vec2_D,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_D);CHKERRQ(ierr);
100   ierr = VecScatterEnd  (pcis->vec2_D,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_D);CHKERRQ(ierr);
101   PetscFunctionReturn(0);
102 }
103 
104 /* -------------------------------------------------------------------------- */
105 /*
106    PCDestroy_NN - Destroys the private context for the NN preconditioner
107    that was created with PCCreate_NN().
108 
109    Input Parameter:
110 .  pc - the preconditioner context
111 
112    Application Interface Routine: PCDestroy()
113 */
114 #undef __FUNCT__
115 #define __FUNCT__ "PCDestroy_NN"
116 static PetscErrorCode PCDestroy_NN(PC pc)
117 {
118   PC_NN          *pcnn = (PC_NN*)pc->data;
119   PetscErrorCode ierr;
120 
121   PetscFunctionBegin;
122   ierr = PCISDestroy(pc);CHKERRQ(ierr);
123 
124   if (pcnn->coarse_mat)  {ierr = MatDestroy(pcnn->coarse_mat);CHKERRQ(ierr);}
125   if (pcnn->coarse_x)    {ierr = VecDestroy(pcnn->coarse_x);CHKERRQ(ierr);}
126   if (pcnn->coarse_b)    {ierr = VecDestroy(pcnn->coarse_b);CHKERRQ(ierr);}
127   if (pcnn->ksp_coarse) {ierr = KSPDestroy(pcnn->ksp_coarse);CHKERRQ(ierr);}
128   if (pcnn->DZ_IN) {
129     if (pcnn->DZ_IN[0]) {ierr = PetscFree(pcnn->DZ_IN[0]);CHKERRQ(ierr);}
130     ierr = PetscFree(pcnn->DZ_IN);CHKERRQ(ierr);
131   }
132 
133   /*
134       Free the private data structure that was hanging off the PC
135   */
136   ierr = PetscFree(pcnn);CHKERRQ(ierr);
137   PetscFunctionReturn(0);
138 }
139 
140 /* -------------------------------------------------------------------------- */
141 /*MC
142    PCNN - Balancing Neumann-Neumann for scalar elliptic PDEs.
143 
144    Options Database Keys:
145 +    -pc_nn_turn_off_first_balancing - do not balance the residual before solving the local Neumann problems
146                                        (this skips the first coarse grid solve in the preconditioner)
147 .    -pc_nn_turn_off_second_balancing - do not balance the solution solving the local Neumann problems
148                                        (this skips the second coarse grid solve in the preconditioner)
149 .    -pc_is_damp_fixed <fact> -
150 .    -pc_is_remove_nullspace_fixed -
151 .    -pc_is_set_damping_factor_floating <fact> -
152 .    -pc_is_not_damp_floating -
153 +    -pc_is_not_remove_nullspace_floating -
154 
155    Level: intermediate
156 
157    Notes: The matrix used with this preconditioner must be of type MATIS
158 
159           Unlike more 'conventional' Neumann-Neumann preconditioners this iterates over ALL the
160           degrees of freedom, NOT just those on the interface (this allows the use of approximate solvers
161           on the subdomains; though in our experience using approximate solvers is slower.).
162 
163           Options for the coarse grid preconditioner can be set with -nn_coarse_pc_xxx
164           Options for the Dirichlet subproblem preconditioner can be set with -is_localD_pc_xxx
165           Options for the Neumann subproblem preconditioner can be set with -is_localN_pc_xxx
166 
167    Contributed by Paulo Goldfeld
168 
169 .seealso:  PCCreate(), PCSetType(), PCType (for list of available types), PC,  MatIS
170 M*/
171 EXTERN_C_BEGIN
172 #undef __FUNCT__
173 #define __FUNCT__ "PCCreate_NN"
174 PetscErrorCode PCCreate_NN(PC pc)
175 {
176   PetscErrorCode ierr;
177   PC_NN          *pcnn;
178 
179   PetscFunctionBegin;
180   /*
181      Creates the private data structure for this preconditioner and
182      attach it to the PC object.
183   */
184   ierr      = PetscNew(PC_NN,&pcnn);CHKERRQ(ierr);
185   pc->data  = (void*)pcnn;
186 
187   /*
188      Logs the memory usage; this is not needed but allows PETSc to
189      monitor how much memory is being used for various purposes.
190   */
191   ierr = PetscLogObjectMemory(pc,sizeof(PC_NN)+sizeof(PC_IS));CHKERRQ(ierr); /* Is this the right thing to do? */
192 
193   ierr = PCISCreate(pc);CHKERRQ(ierr);
194   pcnn->coarse_mat  = 0;
195   pcnn->coarse_x    = 0;
196   pcnn->coarse_b    = 0;
197   pcnn->ksp_coarse = 0;
198   pcnn->DZ_IN       = 0;
199 
200   /*
201       Set the pointers for the functions that are provided above.
202       Now when the user-level routines (such as PCApply(), PCDestroy(), etc.)
203       are called, they will automatically call these functions.  Note we
204       choose not to provide a couple of these functions since they are
205       not needed.
206   */
207   pc->ops->apply               = PCApply_NN;
208   pc->ops->applytranspose      = 0;
209   pc->ops->setup               = PCSetUp_NN;
210   pc->ops->destroy             = PCDestroy_NN;
211   pc->ops->view                = 0;
212   pc->ops->applyrichardson     = 0;
213   pc->ops->applysymmetricleft  = 0;
214   pc->ops->applysymmetricright = 0;
215   PetscFunctionReturn(0);
216 }
217 EXTERN_C_END
218 
219 
220 /* -------------------------------------------------------------------------- */
221 /*
222    PCNNCreateCoarseMatrix -
223 */
224 #undef __FUNCT__
225 #define __FUNCT__ "PCNNCreateCoarseMatrix"
226 PetscErrorCode PCNNCreateCoarseMatrix (PC pc)
227 {
228   MPI_Request    *send_request, *recv_request;
229   PetscErrorCode ierr;
230   PetscInt       i, j, k;
231   PetscScalar*   mat;    /* Sub-matrix with this subdomain's contribution to the coarse matrix             */
232   PetscScalar**  DZ_OUT; /* proc[k].DZ_OUT[i][] = bit of vector to be sent from processor k to processor i */
233 
234   /* aliasing some names */
235   PC_IS*         pcis     = (PC_IS*)(pc->data);
236   PC_NN*         pcnn     = (PC_NN*)pc->data;
237   PetscInt       n_neigh  = pcis->n_neigh;
238   PetscInt*      neigh    = pcis->neigh;
239   PetscInt*      n_shared = pcis->n_shared;
240   PetscInt**     shared   = pcis->shared;
241   PetscScalar**  DZ_IN;   /* Must be initialized after memory allocation. */
242 
243   PetscFunctionBegin;
244   /* Allocate memory for mat (the +1 is to handle the case n_neigh equal to zero) */
245   ierr = PetscMalloc((n_neigh*n_neigh+1)*sizeof(PetscScalar),&mat);CHKERRQ(ierr);
246 
247   /* Allocate memory for DZ */
248   /* Notice that DZ_OUT[0] is allocated some space that is never used. */
249   /* This is just in order to DZ_OUT and DZ_IN to have exactly the same form. */
250   {
251     PetscInt size_of_Z = 0;
252     ierr  = PetscMalloc ((n_neigh+1)*sizeof(PetscScalar*),&pcnn->DZ_IN);CHKERRQ(ierr);
253     DZ_IN = pcnn->DZ_IN;
254     ierr  = PetscMalloc ((n_neigh+1)*sizeof(PetscScalar*),&DZ_OUT);CHKERRQ(ierr);
255     for (i=0; i<n_neigh; i++) {
256       size_of_Z += n_shared[i];
257     }
258     ierr = PetscMalloc ((size_of_Z+1)*sizeof(PetscScalar),&DZ_IN[0]);CHKERRQ(ierr);
259     ierr = PetscMalloc ((size_of_Z+1)*sizeof(PetscScalar),&DZ_OUT[0]);CHKERRQ(ierr);
260   }
261   for (i=1; i<n_neigh; i++) {
262     DZ_IN[i]  = DZ_IN [i-1] + n_shared[i-1];
263     DZ_OUT[i] = DZ_OUT[i-1] + n_shared[i-1];
264   }
265 
266   /* Set the values of DZ_OUT, in order to send this info to the neighbours */
267   /* First, set the auxiliary array pcis->work_N. */
268   ierr = PCISScatterArrayNToVecB(pcis->work_N,pcis->D,INSERT_VALUES,SCATTER_REVERSE,pc);CHKERRQ(ierr);
269   for (i=1; i<n_neigh; i++){
270     for (j=0; j<n_shared[i]; j++) {
271       DZ_OUT[i][j] = pcis->work_N[shared[i][j]];
272     }
273   }
274 
275   /* Non-blocking send/receive the common-interface chunks of scaled nullspaces */
276   /* Notice that send_request[] and recv_request[] could have one less element. */
277   /* We make them longer to have request[i] corresponding to neigh[i].          */
278   {
279     PetscMPIInt tag;
280     ierr = PetscObjectGetNewTag((PetscObject)pc,&tag);CHKERRQ(ierr);
281     ierr = PetscMalloc((2*(n_neigh)+1)*sizeof(MPI_Request),&send_request);CHKERRQ(ierr);
282     recv_request = send_request + (n_neigh);
283     for (i=1; i<n_neigh; i++) {
284       ierr = MPI_Isend((void*)(DZ_OUT[i]),n_shared[i],MPIU_SCALAR,neigh[i],tag,pc->comm,&(send_request[i]));CHKERRQ(ierr);
285       ierr = MPI_Irecv((void*)(DZ_IN [i]),n_shared[i],MPIU_SCALAR,neigh[i],tag,pc->comm,&(recv_request[i]));CHKERRQ(ierr);
286     }
287   }
288 
289   /* Set DZ_IN[0][] (recall that neigh[0]==rank, always) */
290   for(j=0; j<n_shared[0]; j++) {
291     DZ_IN[0][j] = pcis->work_N[shared[0][j]];
292   }
293 
294   /* Start computing with local D*Z while communication goes on.    */
295   /* Apply Schur complement. The result is "stored" in vec (more    */
296   /* precisely, vec points to the result, stored in pc_nn->vec1_B)  */
297   /* and also scattered to pcnn->work_N.                            */
298   ierr = PCNNApplySchurToChunk(pc,n_shared[0],shared[0],DZ_IN[0],pcis->work_N,pcis->vec1_B,
299                                pcis->vec2_B,pcis->vec1_D,pcis->vec2_D);CHKERRQ(ierr);
300 
301   /* Compute the first column, while completing the receiving. */
302   for (i=0; i<n_neigh; i++) {
303     MPI_Status  stat;
304     PetscMPIInt ind=0;
305     if (i>0) { ierr = MPI_Waitany(n_neigh-1,recv_request+1,&ind,&stat);CHKERRQ(ierr); ind++;}
306     mat[ind*n_neigh+0] = 0.0;
307     for (k=0; k<n_shared[ind]; k++) {
308       mat[ind*n_neigh+0] += DZ_IN[ind][k] * pcis->work_N[shared[ind][k]];
309     }
310   }
311 
312   /* Compute the remaining of the columns */
313   for (j=1; j<n_neigh; j++) {
314     ierr = PCNNApplySchurToChunk(pc,n_shared[j],shared[j],DZ_IN[j],pcis->work_N,pcis->vec1_B,
315                                  pcis->vec2_B,pcis->vec1_D,pcis->vec2_D);CHKERRQ(ierr);
316     for (i=0; i<n_neigh; i++) {
317       mat[i*n_neigh+j] = 0.0;
318       for (k=0; k<n_shared[i]; k++) {
319 	mat[i*n_neigh+j] += DZ_IN[i][k] * pcis->work_N[shared[i][k]];
320       }
321     }
322   }
323 
324   /* Complete the sending. */
325   if (n_neigh>1) {
326     MPI_Status *stat;
327     ierr = PetscMalloc((n_neigh-1)*sizeof(MPI_Status),&stat);CHKERRQ(ierr);
328     ierr = MPI_Waitall(n_neigh-1,&(send_request[1]),stat);CHKERRQ(ierr);
329     ierr = PetscFree(stat);CHKERRQ(ierr);
330   }
331 
332   /* Free the memory for the MPI requests */
333   ierr = PetscFree(send_request);CHKERRQ(ierr);
334 
335   /* Free the memory for DZ_OUT */
336   if (DZ_OUT) {
337     if (DZ_OUT[0]) { ierr = PetscFree(DZ_OUT[0]);CHKERRQ(ierr); }
338     ierr = PetscFree(DZ_OUT);CHKERRQ(ierr);
339   }
340 
341   {
342     PetscMPIInt size;
343     ierr = MPI_Comm_size(pc->comm,&size);CHKERRQ(ierr);
344     /* Create the global coarse vectors (rhs and solution). */
345     ierr = VecCreateMPI(pc->comm,1,size,&(pcnn->coarse_b));CHKERRQ(ierr);
346     ierr = VecDuplicate(pcnn->coarse_b,&(pcnn->coarse_x));CHKERRQ(ierr);
347     /* Create and set the global coarse AIJ matrix. */
348     ierr = MatCreate(pc->comm,1,1,size,size,&(pcnn->coarse_mat));CHKERRQ(ierr);
349     ierr = MatSetType(pcnn->coarse_mat,MATAIJ);CHKERRQ(ierr);
350     ierr = MatSeqAIJSetPreallocation(pcnn->coarse_mat,1,PETSC_NULL);CHKERRQ(ierr);
351     ierr = MatMPIAIJSetPreallocation(pcnn->coarse_mat,1,PETSC_NULL,1,PETSC_NULL);CHKERRQ(ierr);
352     ierr = MatSetValues(pcnn->coarse_mat,n_neigh,neigh,n_neigh,neigh,mat,ADD_VALUES);CHKERRQ(ierr);
353     ierr = MatAssemblyBegin(pcnn->coarse_mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
354     ierr = MatAssemblyEnd  (pcnn->coarse_mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
355   }
356 
357   {
358     PetscMPIInt rank;
359     PetscScalar one = 1.0;
360     IS          is;
361     ierr = MPI_Comm_rank(pc->comm,&rank);CHKERRQ(ierr);
362     /* "Zero out" rows of not-purely-Neumann subdomains */
363     if (pcis->pure_neumann) {  /* does NOT zero the row; create an empty index set. The reason is that MatZeroRows() is collective. */
364       ierr = ISCreateStride(pc->comm,0,0,0,&is);CHKERRQ(ierr);
365     } else { /* here it DOES zero the row, since it's not a floating subdomain. */
366       ierr = ISCreateStride(pc->comm,1,rank,0,&is);CHKERRQ(ierr);
367     }
368     ierr = MatZeroRows(pcnn->coarse_mat,is,&one);CHKERRQ(ierr);
369     ierr = ISDestroy(is);CHKERRQ(ierr);
370   }
371 
372   /* Create the coarse linear solver context */
373   {
374     PC  pc_ctx, inner_pc;
375     ierr = KSPCreate(pc->comm,&pcnn->ksp_coarse);CHKERRQ(ierr);
376     ierr = KSPSetOperators(pcnn->ksp_coarse,pcnn->coarse_mat,pcnn->coarse_mat,SAME_PRECONDITIONER);CHKERRQ(ierr);
377     ierr = KSPGetPC(pcnn->ksp_coarse,&pc_ctx);CHKERRQ(ierr);
378     ierr = PCSetType(pc_ctx,PCREDUNDANT);CHKERRQ(ierr);
379     ierr = KSPSetType(pcnn->ksp_coarse,KSPPREONLY);CHKERRQ(ierr);
380     ierr = PCRedundantGetPC(pc_ctx,&inner_pc);CHKERRQ(ierr);
381     ierr = PCSetType(inner_pc,PCLU);CHKERRQ(ierr);
382     ierr = KSPSetOptionsPrefix(pcnn->ksp_coarse,"nn_coarse_");CHKERRQ(ierr);
383     ierr = KSPSetFromOptions(pcnn->ksp_coarse);CHKERRQ(ierr);
384     /* the vectors in the following line are dummy arguments, just telling the KSP the vector size. Values are not used */
385     ierr = KSPSetUp(pcnn->ksp_coarse);CHKERRQ(ierr);
386   }
387 
388   /* Free the memory for mat */
389   ierr = PetscFree(mat);CHKERRQ(ierr);
390 
391   /* for DEBUGGING, save the coarse matrix to a file. */
392   {
393     PetscTruth flg;
394     ierr = PetscOptionsHasName(PETSC_NULL,"-pc_nn_save_coarse_matrix",&flg);CHKERRQ(ierr);
395     if (flg) {
396       PetscViewer viewer;
397       ierr = PetscViewerASCIIOpen(PETSC_COMM_WORLD,"coarse.m",&viewer);CHKERRQ(ierr);
398       ierr = PetscViewerSetFormat(viewer,PETSC_VIEWER_ASCII_MATLAB);CHKERRQ(ierr);
399       ierr = MatView(pcnn->coarse_mat,viewer);CHKERRQ(ierr);
400       ierr = PetscViewerDestroy(viewer);CHKERRQ(ierr);
401     }
402   }
403 
404   /*  Set the variable pcnn->factor_coarse_rhs. */
405   pcnn->factor_coarse_rhs = (pcis->pure_neumann) ? 1.0 : 0.0;
406 
407   /* See historical note 02, at the bottom of this file. */
408   PetscFunctionReturn(0);
409 }
410 
411 /* -------------------------------------------------------------------------- */
412 /*
413    PCNNApplySchurToChunk -
414 
415    Input parameters:
416 .  pcnn
417 .  n - size of chunk
418 .  idx - indices of chunk
419 .  chunk - values
420 
421    Output parameters:
422 .  array_N - result of Schur complement applied to chunk, scattered to big array
423 .  vec1_B  - result of Schur complement applied to chunk
424 .  vec2_B  - garbage (used as work space)
425 .  vec1_D  - garbage (used as work space)
426 .  vec2_D  - garbage (used as work space)
427 
428 */
429 #undef __FUNCT__
430 #define __FUNCT__ "PCNNApplySchurToChunk"
431 PetscErrorCode PCNNApplySchurToChunk(PC pc, PetscInt n, PetscInt* idx, PetscScalar *chunk, PetscScalar* array_N, Vec vec1_B, Vec vec2_B, Vec vec1_D, Vec vec2_D)
432 {
433   PetscErrorCode ierr;
434   PetscInt       i;
435   PC_IS          *pcis = (PC_IS*)(pc->data);
436 
437   PetscFunctionBegin;
438   ierr = PetscMemzero((void*)array_N, pcis->n*sizeof(PetscScalar));CHKERRQ(ierr);
439   for (i=0; i<n; i++) { array_N[idx[i]] = chunk[i]; }
440   ierr = PCISScatterArrayNToVecB(array_N,vec2_B,INSERT_VALUES,SCATTER_FORWARD,pc);CHKERRQ(ierr);
441   ierr = PCISApplySchur(pc,vec2_B,vec1_B,(Vec)0,vec1_D,vec2_D);CHKERRQ(ierr);
442   ierr = PCISScatterArrayNToVecB(array_N,vec1_B,INSERT_VALUES,SCATTER_REVERSE,pc);CHKERRQ(ierr);
443   PetscFunctionReturn(0);
444 }
445 
446 /* -------------------------------------------------------------------------- */
447 /*
448    PCNNApplyInterfacePreconditioner - Apply the interface preconditioner, i.e.,
449                                       the preconditioner for the Schur complement.
450 
451    Input parameter:
452 .  r - global vector of interior and interface nodes. The values on the interior nodes are NOT used.
453 
454    Output parameters:
455 .  z - global vector of interior and interface nodes. The values on the interface are the result of
456        the application of the interface preconditioner to the interface part of r. The values on the
457        interior nodes are garbage.
458 .  work_N - array of local nodes (interior and interface, including ghosts); returns garbage (used as work space)
459 .  vec1_B - vector of local interface nodes (including ghosts); returns garbage (used as work space)
460 .  vec2_B - vector of local interface nodes (including ghosts); returns garbage (used as work space)
461 .  vec3_B - vector of local interface nodes (including ghosts); returns garbage (used as work space)
462 .  vec1_D - vector of local interior nodes; returns garbage (used as work space)
463 .  vec2_D - vector of local interior nodes; returns garbage (used as work space)
464 .  vec1_N - vector of local nodes (interior and interface, including ghosts); returns garbage (used as work space)
465 .  vec2_N - vector of local nodes (interior and interface, including ghosts); returns garbage (used as work space)
466 
467 */
468 #undef __FUNCT__
469 #define __FUNCT__ "PCNNApplyInterfacePreconditioner"
470 PetscErrorCode PCNNApplyInterfacePreconditioner (PC pc, Vec r, Vec z, PetscScalar* work_N, Vec vec1_B, Vec vec2_B, Vec vec3_B, Vec vec1_D,
471                                       Vec vec2_D, Vec vec1_N, Vec vec2_N)
472 {
473   PetscErrorCode ierr;
474   PC_IS*         pcis = (PC_IS*)(pc->data);
475 
476   PetscFunctionBegin;
477   /*
478     First balancing step.
479   */
480   {
481     PetscTruth flg;
482     ierr = PetscOptionsHasName(PETSC_NULL,"-pc_nn_turn_off_first_balancing",&flg);CHKERRQ(ierr);
483     if (!flg) {
484       ierr = PCNNBalancing(pc,r,(Vec)0,z,vec1_B,vec2_B,(Vec)0,vec1_D,vec2_D,work_N);CHKERRQ(ierr);
485     } else {
486       ierr = VecCopy(r,z);CHKERRQ(ierr);
487     }
488   }
489 
490   /*
491     Extract the local interface part of z and scale it by D
492   */
493   ierr = VecScatterBegin(z,vec1_B,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_B);CHKERRQ(ierr);
494   ierr = VecScatterEnd  (z,vec1_B,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_B);CHKERRQ(ierr);
495   ierr = VecPointwiseMult(pcis->D,vec1_B,vec2_B);CHKERRQ(ierr);
496 
497   /* Neumann Solver */
498   ierr = PCISApplyInvSchur(pc,vec2_B,vec1_B,vec1_N,vec2_N);CHKERRQ(ierr);
499 
500   /*
501     Second balancing step.
502   */
503   {
504     PetscTruth flg;
505     ierr = PetscOptionsHasName(PETSC_NULL,"-pc_turn_off_second_balancing",&flg);CHKERRQ(ierr);
506     if (!flg) {
507       ierr = PCNNBalancing(pc,r,vec1_B,z,vec2_B,vec3_B,(Vec)0,vec1_D,vec2_D,work_N);CHKERRQ(ierr);
508     } else {
509       PetscScalar zero = 0.0;
510       ierr = VecPointwiseMult(pcis->D,vec1_B,vec2_B);CHKERRQ(ierr);
511       ierr = VecSet(&zero,z);CHKERRQ(ierr);
512       ierr = VecScatterBegin(vec2_B,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
513       ierr = VecScatterEnd  (vec2_B,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
514     }
515   }
516   PetscFunctionReturn(0);
517 }
518 
519 /* -------------------------------------------------------------------------- */
520 /*
521    PCNNBalancing - Computes z, as given in equations (15) and (16) (if the
522                    input argument u is provided), or s, as given in equations
523                    (12) and (13), if the input argument u is a null vector.
524                    Notice that the input argument u plays the role of u_i in
525                    equation (14). The equation numbers refer to [Man93].
526 
527    Input Parameters:
528 .  pcnn - NN preconditioner context.
529 .  r - MPI vector of all nodes (interior and interface). It's preserved.
530 .  u - (Optional) sequential vector of local interface nodes. It's preserved UNLESS vec3_B is null.
531 
532    Output Parameters:
533 .  z - MPI vector of interior and interface nodes. Returns s or z (see description above).
534 .  vec1_B - Sequential vector of local interface nodes. Workspace.
535 .  vec2_B - Sequential vector of local interface nodes. Workspace.
536 .  vec3_B - (Optional) sequential vector of local interface nodes. Workspace.
537 .  vec1_D - Sequential vector of local interior nodes. Workspace.
538 .  vec2_D - Sequential vector of local interior nodes. Workspace.
539 .  work_N - Array of all local nodes (interior and interface). Workspace.
540 
541 */
542 #undef __FUNCT__
543 #define __FUNCT__ "PCNNBalancing"
544 PetscErrorCode PCNNBalancing (PC pc, Vec r, Vec u, Vec z, Vec vec1_B, Vec vec2_B, Vec vec3_B,
545                    Vec vec1_D, Vec vec2_D, PetscScalar *work_N)
546 {
547   PetscErrorCode ierr;
548   PetscInt       k;
549   PetscScalar    zero     =  0.0;
550   PetscScalar    m_one    = -1.0;
551   PetscScalar    value;
552   PetscScalar*   lambda;
553   PC_NN*         pcnn     = (PC_NN*)(pc->data);
554   PC_IS*         pcis     = (PC_IS*)(pc->data);
555 
556   PetscFunctionBegin;
557   ierr = PetscLogEventBegin(PC_ApplyCoarse,0,0,0,0);CHKERRQ(ierr);
558   if (u) {
559     if (!vec3_B) { vec3_B = u; }
560     ierr = VecPointwiseMult(pcis->D,u,vec1_B);CHKERRQ(ierr);
561     ierr = VecSet(&zero,z);CHKERRQ(ierr);
562     ierr = VecScatterBegin(vec1_B,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
563     ierr = VecScatterEnd  (vec1_B,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
564     ierr = VecScatterBegin(z,vec2_B,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_B);CHKERRQ(ierr);
565     ierr = VecScatterEnd  (z,vec2_B,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_B);CHKERRQ(ierr);
566     ierr = PCISApplySchur(pc,vec2_B,vec3_B,(Vec)0,vec1_D,vec2_D);CHKERRQ(ierr);
567     ierr = VecScale(&m_one,vec3_B);CHKERRQ(ierr);
568     ierr = VecCopy(r,z);CHKERRQ(ierr);
569     ierr = VecScatterBegin(vec3_B,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
570     ierr = VecScatterEnd  (vec3_B,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
571   } else {
572     ierr = VecCopy(r,z);CHKERRQ(ierr);
573   }
574   ierr = VecScatterBegin(z,vec2_B,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_B);CHKERRQ(ierr);
575   ierr = VecScatterEnd  (z,vec2_B,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_B);CHKERRQ(ierr);
576   ierr = PCISScatterArrayNToVecB(work_N,vec2_B,INSERT_VALUES,SCATTER_REVERSE,pc);CHKERRQ(ierr);
577   for (k=0, value=0.0; k<pcis->n_shared[0]; k++) { value += pcnn->DZ_IN[0][k] * work_N[pcis->shared[0][k]]; }
578   value *= pcnn->factor_coarse_rhs;  /* This factor is set in CreateCoarseMatrix(). */
579   {
580     PetscMPIInt rank;
581     ierr = MPI_Comm_rank(pc->comm,&rank);CHKERRQ(ierr);
582     ierr = VecSetValue(pcnn->coarse_b,rank,value,INSERT_VALUES);CHKERRQ(ierr);
583     /*
584        Since we are only inserting local values (one value actually) we don't need to do the
585        reduction that tells us there is no data that needs to be moved. Hence we comment out these
586        ierr = VecAssemblyBegin(pcnn->coarse_b);CHKERRQ(ierr);
587        ierr = VecAssemblyEnd  (pcnn->coarse_b);CHKERRQ(ierr);
588     */
589   }
590   ierr = KSPSolve(pcnn->ksp_coarse,pcnn->coarse_b,pcnn->coarse_x);CHKERRQ(ierr);
591   if (!u) { ierr = VecScale(&m_one,pcnn->coarse_x);CHKERRQ(ierr); }
592   ierr = VecGetArray(pcnn->coarse_x,&lambda);CHKERRQ(ierr);
593   for (k=0; k<pcis->n_shared[0]; k++) { work_N[pcis->shared[0][k]] = *lambda * pcnn->DZ_IN[0][k]; }
594   ierr = VecRestoreArray(pcnn->coarse_x,&lambda);CHKERRQ(ierr);
595   ierr = PCISScatterArrayNToVecB(work_N,vec2_B,INSERT_VALUES,SCATTER_FORWARD,pc);CHKERRQ(ierr);
596   ierr = VecSet(&zero,z);CHKERRQ(ierr);
597   ierr = VecScatterBegin(vec2_B,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
598   ierr = VecScatterEnd  (vec2_B,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
599   if (!u) {
600     ierr = VecScatterBegin(z,vec2_B,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_B);CHKERRQ(ierr);
601     ierr = VecScatterEnd  (z,vec2_B,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_B);CHKERRQ(ierr);
602     ierr = PCISApplySchur(pc,vec2_B,vec1_B,(Vec)0,vec1_D,vec2_D);CHKERRQ(ierr);
603     ierr = VecCopy(r,z);CHKERRQ(ierr);
604   }
605   ierr = VecScatterBegin(vec1_B,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
606   ierr = VecScatterEnd  (vec1_B,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
607   ierr = PetscLogEventEnd(PC_ApplyCoarse,0,0,0,0);CHKERRQ(ierr);
608   PetscFunctionReturn(0);
609 }
610 
611 #undef __FUNCT__
612 
613 
614 
615 /*  -------   E N D   O F   T H E   C O D E   -------  */
616 /*                                                     */
617 /*  From now on, "footnotes" (or "historical notes").  */
618 /*                                                     */
619 /*  -------------------------------------------------  */
620 
621 
622 
623 /* --------------------------------------------------------------------------
624    Historical note 01
625    -------------------------------------------------------------------------- */
626 /*
627    We considered the possibility of an alternative D_i that would still
628    provide a partition of unity (i.e., $ \sum_i  N_i D_i N_i^T = I $).
629    The basic principle was still the pseudo-inverse of the counting
630    function; the difference was that we would not count subdomains
631    that do not contribute to the coarse space (i.e., not pure-Neumann
632    subdomains).
633 
634    This turned out to be a bad idea:  we would solve trivial Neumann
635    problems in the not pure-Neumann subdomains, since we would be scaling
636    the balanced residual by zero.
637 */
638 
639 
640 
641 
642 /* --------------------------------------------------------------------------
643    Historical note 02
644    -------------------------------------------------------------------------- */
645 /*
646    We tried an alternative coarse problem, that would eliminate exactly a
647    constant error. Turned out not to improve the overall convergence.
648 */
649 
650 
651