xref: /petsc/src/ksp/pc/impls/is/nn/nn.c (revision fcca9d3d5c45103cafcd0f6bc9babe194d935ced)
1 /*$Id: nn.c,v 1.13 2001/08/07 03:03:41 balay Exp $*/
2 
3 #include "src/ksp/pc/impls/is/nn/nn.h"
4 
5 /* -------------------------------------------------------------------------- */
6 /*
7    PCSetUp_NN - Prepares for the use of the NN preconditioner
8                     by setting data structures and options.
9 
10    Input Parameter:
11 .  pc - the preconditioner context
12 
13    Application Interface Routine: PCSetUp()
14 
15    Notes:
16    The interface routine PCSetUp() is not usually called directly by
17    the user, but instead is called by PCApply() if necessary.
18 */
19 #undef __FUNCT__
20 #define __FUNCT__ "PCSetUp_NN"
21 static int PCSetUp_NN(PC pc)
22 {
23   int ierr;
24 
25   PetscFunctionBegin;
26   if (!pc->setupcalled) {
27     /* Set up all the "iterative substructuring" common block */
28     ierr = PCISSetUp(pc);CHKERRQ(ierr);
29     /* Create the coarse matrix. */
30     ierr = PCNNCreateCoarseMatrix(pc);CHKERRQ(ierr);
31   }
32   PetscFunctionReturn(0);
33 }
34 
35 /* -------------------------------------------------------------------------- */
36 /*
37    PCApply_NN - Applies the NN preconditioner to a vector.
38 
39    Input Parameters:
40 .  pc - the preconditioner context
41 .  r - input vector (global)
42 
43    Output Parameter:
44 .  z - output vector (global)
45 
46    Application Interface Routine: PCApply()
47  */
48 #undef __FUNCT__
49 #define __FUNCT__ "PCApply_NN"
50 static int PCApply_NN(PC pc,Vec r,Vec z)
51 {
52   PC_IS       *pcis = (PC_IS*)(pc->data);
53   int         ierr;
54   PetscScalar m_one = -1.0;
55   Vec         w = pcis->vec1_global;
56 
57   PetscFunctionBegin;
58 
59   /*
60     Dirichlet solvers.
61     Solving $ B_I^{(i)}r_I^{(i)} $ at each processor.
62     Storing the local results at vec2_D
63   */
64   ierr = VecScatterBegin(r,pcis->vec1_D,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_D);CHKERRQ(ierr);
65   ierr = VecScatterEnd  (r,pcis->vec1_D,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_D);CHKERRQ(ierr);
66   ierr = KSPSetRhs(pcis->ksp_D,pcis->vec1_D);CHKERRQ(ierr);
67   ierr = KSPSetSolution(pcis->ksp_D,pcis->vec2_D);CHKERRQ(ierr);
68   ierr = KSPSolve(pcis->ksp_D);CHKERRQ(ierr);
69 
70   /*
71     Computing $ r_B - \sum_j \tilde R_j^T A_{BI}^{(j)} (B_I^{(j)}r_I^{(j)}) $ .
72     Storing the result in the interface portion of the global vector w.
73   */
74   ierr = MatMult(pcis->A_BI,pcis->vec2_D,pcis->vec1_B);CHKERRQ(ierr);
75   ierr = VecScale(&m_one,pcis->vec1_B);CHKERRQ(ierr);
76   ierr = VecCopy(r,w);CHKERRQ(ierr);
77   ierr = VecScatterBegin(pcis->vec1_B,w,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
78   ierr = VecScatterEnd  (pcis->vec1_B,w,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
79 
80   /*
81     Apply the interface preconditioner
82   */
83   ierr = PCNNApplyInterfacePreconditioner(pc,w,z,pcis->work_N,pcis->vec1_B,pcis->vec2_B,pcis->vec3_B,pcis->vec1_D,
84                                           pcis->vec3_D,pcis->vec1_N,pcis->vec2_N);CHKERRQ(ierr);
85 
86   /*
87     Computing $ t_I^{(i)} = A_{IB}^{(i)} \tilde R_i z_B $
88     The result is stored in vec1_D.
89   */
90   ierr = VecScatterBegin(z,pcis->vec1_B,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_B);CHKERRQ(ierr);
91   ierr = VecScatterEnd  (z,pcis->vec1_B,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_B);CHKERRQ(ierr);
92   ierr = MatMult(pcis->A_IB,pcis->vec1_B,pcis->vec1_D);CHKERRQ(ierr);
93 
94   /*
95     Dirichlet solvers.
96     Computing $ B_I^{(i)}t_I^{(i)} $ and sticking into the global vector the blocks
97     $ B_I^{(i)}r_I^{(i)} - B_I^{(i)}t_I^{(i)} $.
98   */
99   ierr = VecScatterBegin(pcis->vec2_D,z,INSERT_VALUES,SCATTER_REVERSE,pcis->global_to_D);CHKERRQ(ierr);
100   ierr = VecScatterEnd  (pcis->vec2_D,z,INSERT_VALUES,SCATTER_REVERSE,pcis->global_to_D);CHKERRQ(ierr);
101   ierr = KSPSetRhs(pcis->ksp_D,pcis->vec1_D);CHKERRQ(ierr);
102   ierr = KSPSetSolution(pcis->ksp_D,pcis->vec2_D);CHKERRQ(ierr);
103   ierr = KSPSolve(pcis->ksp_D);CHKERRQ(ierr);
104   ierr = VecScale(&m_one,pcis->vec2_D);CHKERRQ(ierr);
105   ierr = VecScatterBegin(pcis->vec2_D,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_D);CHKERRQ(ierr);
106   ierr = VecScatterEnd  (pcis->vec2_D,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_D);CHKERRQ(ierr);
107 
108   PetscFunctionReturn(0);
109 }
110 
111 /* -------------------------------------------------------------------------- */
112 /*
113    PCDestroy_NN - Destroys the private context for the NN preconditioner
114    that was created with PCCreate_NN().
115 
116    Input Parameter:
117 .  pc - the preconditioner context
118 
119    Application Interface Routine: PCDestroy()
120 */
121 #undef __FUNCT__
122 #define __FUNCT__ "PCDestroy_NN"
123 static int PCDestroy_NN(PC pc)
124 {
125   PC_NN *pcnn = (PC_NN*)pc->data;
126   int   ierr;
127 
128   PetscFunctionBegin;
129 
130   ierr = PCISDestroy(pc);CHKERRQ(ierr);
131 
132   if (pcnn->coarse_mat)  {ierr = MatDestroy(pcnn->coarse_mat);CHKERRQ(ierr);}
133   if (pcnn->coarse_x)    {ierr = VecDestroy(pcnn->coarse_x);CHKERRQ(ierr);}
134   if (pcnn->coarse_b)    {ierr = VecDestroy(pcnn->coarse_b);CHKERRQ(ierr);}
135   if (pcnn->ksp_coarse) {ierr = KSPDestroy(pcnn->ksp_coarse);CHKERRQ(ierr);}
136   if (pcnn->DZ_IN) {
137     if (pcnn->DZ_IN[0]) {ierr = PetscFree(pcnn->DZ_IN[0]);CHKERRQ(ierr);}
138     ierr = PetscFree(pcnn->DZ_IN);CHKERRQ(ierr);
139   }
140 
141   /*
142       Free the private data structure that was hanging off the PC
143   */
144   ierr = PetscFree(pcnn);CHKERRQ(ierr);
145   PetscFunctionReturn(0);
146 }
147 
148 /* -------------------------------------------------------------------------- */
149 /*
150    PCCreate_NN - Creates a NN preconditioner context, PC_NN,
151    and sets this as the private data within the generic preconditioning
152    context, PC, that was created within PCCreate().
153 
154    Input Parameter:
155 .  pc - the preconditioner context
156 
157    Application Interface Routine: PCCreate()
158 */
159 EXTERN_C_BEGIN
160 #undef __FUNCT__
161 #define __FUNCT__ "PCCreate_NN"
162 int PCCreate_NN(PC pc)
163 {
164   int   ierr;
165   PC_NN *pcnn;
166 
167   PetscFunctionBegin;
168 
169   /*
170      Creates the private data structure for this preconditioner and
171      attach it to the PC object.
172   */
173   ierr      = PetscNew(PC_NN,&pcnn);CHKERRQ(ierr);
174   pc->data  = (void*)pcnn;
175 
176   /*
177      Logs the memory usage; this is not needed but allows PETSc to
178      monitor how much memory is being used for various purposes.
179   */
180   PetscLogObjectMemory(pc,sizeof(PC_NN)+sizeof(PC_IS)); /* Is this the right thing to do? */
181 
182   ierr = PCISCreate(pc);CHKERRQ(ierr);
183   pcnn->coarse_mat  = 0;
184   pcnn->coarse_x    = 0;
185   pcnn->coarse_b    = 0;
186   pcnn->ksp_coarse = 0;
187   pcnn->DZ_IN       = 0;
188 
189   /*
190       Set the pointers for the functions that are provided above.
191       Now when the user-level routines (such as PCApply(), PCDestroy(), etc.)
192       are called, they will automatically call these functions.  Note we
193       choose not to provide a couple of these functions since they are
194       not needed.
195   */
196   pc->ops->apply               = PCApply_NN;
197   pc->ops->applytranspose      = 0;
198   pc->ops->setup               = PCSetUp_NN;
199   pc->ops->destroy             = PCDestroy_NN;
200   pc->ops->view                = 0;
201   pc->ops->applyrichardson     = 0;
202   pc->ops->applysymmetricleft  = 0;
203   pc->ops->applysymmetricright = 0;
204 
205   PetscFunctionReturn(0);
206 }
207 EXTERN_C_END
208 
209 
210 /* -------------------------------------------------------------------------- */
211 /*
212    PCNNCreateCoarseMatrix -
213 */
214 #undef __FUNCT__
215 #define __FUNCT__ "PCNNCreateCoarseMatrix"
216 int PCNNCreateCoarseMatrix (PC pc)
217 {
218   MPI_Request    *send_request, *recv_request;
219   int            i, j, k, ierr;
220 
221   PetscScalar*   mat;    /* Sub-matrix with this subdomain's contribution to the coarse matrix             */
222   PetscScalar**  DZ_OUT; /* proc[k].DZ_OUT[i][] = bit of vector to be sent from processor k to processor i */
223 
224   /* aliasing some names */
225   PC_IS*         pcis     = (PC_IS*)(pc->data);
226   PC_NN*         pcnn     = (PC_NN*)pc->data;
227   int            n_neigh  = pcis->n_neigh;
228   int*           neigh    = pcis->neigh;
229   int*           n_shared = pcis->n_shared;
230   int**          shared   = pcis->shared;
231   PetscScalar**  DZ_IN;   /* Must be initialized after memory allocation. */
232 
233   PetscFunctionBegin;
234 
235   /* Allocate memory for mat (the +1 is to handle the case n_neigh equal to zero) */
236   ierr = PetscMalloc((n_neigh*n_neigh+1)*sizeof(PetscScalar),&mat);CHKERRQ(ierr);
237 
238   /* Allocate memory for DZ */
239   /* Notice that DZ_OUT[0] is allocated some space that is never used. */
240   /* This is just in order to DZ_OUT and DZ_IN to have exactly the same form. */
241   {
242     int size_of_Z = 0;
243     ierr  = PetscMalloc ((n_neigh+1)*sizeof(PetscScalar*),&pcnn->DZ_IN);CHKERRQ(ierr);
244     DZ_IN = pcnn->DZ_IN;
245     ierr  = PetscMalloc ((n_neigh+1)*sizeof(PetscScalar*),&DZ_OUT);CHKERRQ(ierr);
246     for (i=0; i<n_neigh; i++) {
247       size_of_Z += n_shared[i];
248     }
249     ierr = PetscMalloc ((size_of_Z+1)*sizeof(PetscScalar),&DZ_IN[0]);CHKERRQ(ierr);
250     ierr = PetscMalloc ((size_of_Z+1)*sizeof(PetscScalar),&DZ_OUT[0]);CHKERRQ(ierr);
251   }
252   for (i=1; i<n_neigh; i++) {
253     DZ_IN[i]  = DZ_IN [i-1] + n_shared[i-1];
254     DZ_OUT[i] = DZ_OUT[i-1] + n_shared[i-1];
255   }
256 
257   /* Set the values of DZ_OUT, in order to send this info to the neighbours */
258   /* First, set the auxiliary array pcis->work_N. */
259   ierr = PCISScatterArrayNToVecB(pcis->work_N,pcis->D,INSERT_VALUES,SCATTER_REVERSE,pc);CHKERRQ(ierr);
260   for (i=1; i<n_neigh; i++){
261     for (j=0; j<n_shared[i]; j++) {
262       DZ_OUT[i][j] = pcis->work_N[shared[i][j]];
263     }
264   }
265 
266   /* Non-blocking send/receive the common-interface chunks of scaled nullspaces */
267   /* Notice that send_request[] and recv_request[] could have one less element. */
268   /* We make them longer to have request[i] corresponding to neigh[i].          */
269   {
270     int tag;
271     ierr = PetscObjectGetNewTag((PetscObject)pc,&tag);CHKERRQ(ierr);
272     ierr = PetscMalloc((2*(n_neigh)+1)*sizeof(MPI_Request),&send_request);CHKERRQ(ierr);
273     recv_request = send_request + (n_neigh);
274     for (i=1; i<n_neigh; i++) {
275       ierr = MPI_Isend((void*)(DZ_OUT[i]),n_shared[i],MPIU_SCALAR,neigh[i],tag,pc->comm,&(send_request[i]));CHKERRQ(ierr);
276       ierr = MPI_Irecv((void*)(DZ_IN [i]),n_shared[i],MPIU_SCALAR,neigh[i],tag,pc->comm,&(recv_request[i]));CHKERRQ(ierr);
277     }
278   }
279 
280   /* Set DZ_IN[0][] (recall that neigh[0]==rank, always) */
281   for(j=0; j<n_shared[0]; j++) {
282     DZ_IN[0][j] = pcis->work_N[shared[0][j]];
283   }
284 
285   /* Start computing with local D*Z while communication goes on.    */
286   /* Apply Schur complement. The result is "stored" in vec (more    */
287   /* precisely, vec points to the result, stored in pc_nn->vec1_B)  */
288   /* and also scattered to pcnn->work_N.                            */
289   ierr = PCNNApplySchurToChunk(pc,n_shared[0],shared[0],DZ_IN[0],pcis->work_N,pcis->vec1_B,
290                                pcis->vec2_B,pcis->vec1_D,pcis->vec2_D);CHKERRQ(ierr);
291 
292   /* Compute the first column, while completing the receiving. */
293   for (i=0; i<n_neigh; i++) {
294     MPI_Status stat;
295     int ind=0;
296     if (i>0) { ierr = MPI_Waitany(n_neigh-1,recv_request+1,&ind,&stat);CHKERRQ(ierr); ind++;}
297     mat[ind*n_neigh+0] = 0.0;
298     for (k=0; k<n_shared[ind]; k++) {
299       mat[ind*n_neigh+0] += DZ_IN[ind][k] * pcis->work_N[shared[ind][k]];
300     }
301   }
302 
303   /* Compute the remaining of the columns */
304   for (j=1; j<n_neigh; j++) {
305     ierr = PCNNApplySchurToChunk(pc,n_shared[j],shared[j],DZ_IN[j],pcis->work_N,pcis->vec1_B,
306                                  pcis->vec2_B,pcis->vec1_D,pcis->vec2_D);CHKERRQ(ierr);
307     for (i=0; i<n_neigh; i++) {
308       mat[i*n_neigh+j] = 0.0;
309       for (k=0; k<n_shared[i]; k++) {
310 	mat[i*n_neigh+j] += DZ_IN[i][k] * pcis->work_N[shared[i][k]];
311       }
312     }
313   }
314 
315   /* Complete the sending. */
316   if (n_neigh>1) {
317     MPI_Status *stat;
318     ierr = PetscMalloc((n_neigh-1)*sizeof(MPI_Status),&stat);CHKERRQ(ierr);
319     ierr = MPI_Waitall(n_neigh-1,&(send_request[1]),stat);CHKERRQ(ierr);
320     ierr = PetscFree(stat);CHKERRQ(ierr);
321   }
322 
323   /* Free the memory for the MPI requests */
324   ierr = PetscFree(send_request);CHKERRQ(ierr);
325 
326   /* Free the memory for DZ_OUT */
327   if (DZ_OUT) {
328     if (DZ_OUT[0]) { ierr = PetscFree(DZ_OUT[0]);CHKERRQ(ierr); }
329     ierr = PetscFree(DZ_OUT);CHKERRQ(ierr);
330   }
331 
332   {
333     int size,n_neigh_m1;
334     ierr = MPI_Comm_size(pc->comm,&size);CHKERRQ(ierr);
335     n_neigh_m1 = (n_neigh) ? n_neigh-1 : 0;
336     /* Create the global coarse vectors (rhs and solution). */
337     ierr = VecCreateMPI(pc->comm,1,size,&(pcnn->coarse_b));CHKERRQ(ierr);
338     ierr = VecDuplicate(pcnn->coarse_b,&(pcnn->coarse_x));CHKERRQ(ierr);
339     /* Create and set the global coarse matrix. */
340     ierr = MatCreateMPIAIJ(pc->comm,1,1,size,size,1,PETSC_NULL,n_neigh_m1,PETSC_NULL,&(pcnn->coarse_mat));CHKERRQ(ierr);
341     ierr = MatSetValues(pcnn->coarse_mat,n_neigh,neigh,n_neigh,neigh,mat,ADD_VALUES);CHKERRQ(ierr);
342     ierr = MatAssemblyBegin(pcnn->coarse_mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
343     ierr = MatAssemblyEnd  (pcnn->coarse_mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
344   }
345 
346   {
347     int         rank;
348     PetscScalar one = 1.0;
349     IS          is;
350     ierr = MPI_Comm_rank(pc->comm,&rank);CHKERRQ(ierr);
351     /* "Zero out" rows of not-purely-Neumann subdomains */
352     if (pcis->pure_neumann) {  /* does NOT zero the row; create an empty index set. The reason is that MatZeroRows() is collective. */
353       ierr = ISCreateStride(pc->comm,0,0,0,&is);CHKERRQ(ierr);
354     } else { /* here it DOES zero the row, since it's not a floating subdomain. */
355       ierr = ISCreateStride(pc->comm,1,rank,0,&is);CHKERRQ(ierr);
356     }
357     ierr = MatZeroRows(pcnn->coarse_mat,is,&one);CHKERRQ(ierr);
358     ierr = ISDestroy(is);CHKERRQ(ierr);
359   }
360 
361   /* Create the coarse linear solver context */
362   {
363     PC  pc_ctx, inner_pc;
364     ierr = KSPCreate(pc->comm,&pcnn->ksp_coarse);CHKERRQ(ierr);
365     ierr = KSPSetOperators(pcnn->ksp_coarse,pcnn->coarse_mat,pcnn->coarse_mat,SAME_PRECONDITIONER);CHKERRQ(ierr);
366     ierr = KSPGetPC(pcnn->ksp_coarse,&pc_ctx);CHKERRQ(ierr);
367     ierr = PCSetType(pc_ctx,PCREDUNDANT);CHKERRQ(ierr);
368     ierr = KSPSetType(pcnn->ksp_coarse,KSPPREONLY);CHKERRQ(ierr);
369     ierr = PCRedundantGetPC(pc_ctx,&inner_pc);CHKERRQ(ierr);
370     ierr = PCSetType(inner_pc,PCLU);CHKERRQ(ierr);
371     ierr = KSPSetOptionsPrefix(pcnn->ksp_coarse,"coarse_");CHKERRQ(ierr);
372     ierr = KSPSetFromOptions(pcnn->ksp_coarse);CHKERRQ(ierr);
373     /* the vectors in the following line are dummy arguments, just telling the KSP the vector size. Values are not used */
374     ierr = KSPSetRhs(pcnn->ksp_coarse,pcnn->coarse_x);CHKERRQ(ierr);
375     ierr = KSPSetSolution(pcnn->ksp_coarse,pcnn->coarse_b);CHKERRQ(ierr);
376     ierr = KSPSetUp(pcnn->ksp_coarse);CHKERRQ(ierr);
377   }
378 
379   /* Free the memory for mat */
380   ierr = PetscFree(mat);CHKERRQ(ierr);
381 
382   /* for DEBUGGING, save the coarse matrix to a file. */
383   {
384     PetscTruth flg;
385     ierr = PetscOptionsHasName(PETSC_NULL,"-save_coarse_matrix",&flg);CHKERRQ(ierr);
386     if (flg) {
387       PetscViewer viewer;
388       ierr = PetscViewerASCIIOpen(PETSC_COMM_WORLD,"coarse.m",&viewer);CHKERRQ(ierr);
389       ierr = PetscViewerSetFormat(viewer,PETSC_VIEWER_ASCII_MATLAB);CHKERRQ(ierr);
390       ierr = MatView(pcnn->coarse_mat,viewer);CHKERRQ(ierr);
391       ierr = PetscViewerDestroy(viewer);CHKERRQ(ierr);
392     }
393   }
394 
395   /*  Set the variable pcnn->factor_coarse_rhs. */
396   pcnn->factor_coarse_rhs = (pcis->pure_neumann) ? 1.0 : 0.0;
397 
398   /* See historical note 02, at the bottom of this file. */
399 
400   PetscFunctionReturn(0);
401 }
402 
403 /* -------------------------------------------------------------------------- */
404 /*
405    PCNNApplySchurToChunk -
406 
407    Input parameters:
408 .  pcnn
409 .  n - size of chunk
410 .  idx - indices of chunk
411 .  chunk - values
412 
413    Output parameters:
414 .  array_N - result of Schur complement applied to chunk, scattered to big array
415 .  vec1_B  - result of Schur complement applied to chunk
416 .  vec2_B  - garbage (used as work space)
417 .  vec1_D  - garbage (used as work space)
418 .  vec2_D  - garbage (used as work space)
419 
420 */
421 #undef __FUNCT__
422 #define __FUNCT__ "PCNNApplySchurToChunk"
423 int PCNNApplySchurToChunk(PC pc, int n, int* idx, PetscScalar *chunk, PetscScalar* array_N, Vec vec1_B, Vec vec2_B, Vec vec1_D, Vec vec2_D)
424 {
425   int   i, ierr;
426   PC_IS *pcis = (PC_IS*)(pc->data);
427 
428   PetscFunctionBegin;
429 
430   ierr = PetscMemzero((void*)array_N, pcis->n*sizeof(PetscScalar));CHKERRQ(ierr);
431   for (i=0; i<n; i++) { array_N[idx[i]] = chunk[i]; }
432   ierr = PCISScatterArrayNToVecB(array_N,vec2_B,INSERT_VALUES,SCATTER_FORWARD,pc);CHKERRQ(ierr);
433   ierr = PCISApplySchur(pc,vec2_B,vec1_B,(Vec)0,vec1_D,vec2_D);CHKERRQ(ierr);
434   ierr = PCISScatterArrayNToVecB(array_N,vec1_B,INSERT_VALUES,SCATTER_REVERSE,pc);CHKERRQ(ierr);
435 
436   PetscFunctionReturn(0);
437 }
438 
439 /* -------------------------------------------------------------------------- */
440 /*
441    PCNNApplyInterfacePreconditioner - Apply the interface preconditioner, i.e.,
442                                       the preconditioner for the Schur complement.
443 
444    Input parameter:
445 .  r - global vector of interior and interface nodes. The values on the interior nodes are NOT used.
446 
447    Output parameters:
448 .  z - global vector of interior and interface nodes. The values on the interface are the result of
449        the application of the interface preconditioner to the interface part of r. The values on the
450        interior nodes are garbage.
451 .  work_N - array of local nodes (interior and interface, including ghosts); returns garbage (used as work space)
452 .  vec1_B - vector of local interface nodes (including ghosts); returns garbage (used as work space)
453 .  vec2_B - vector of local interface nodes (including ghosts); returns garbage (used as work space)
454 .  vec3_B - vector of local interface nodes (including ghosts); returns garbage (used as work space)
455 .  vec1_D - vector of local interior nodes; returns garbage (used as work space)
456 .  vec2_D - vector of local interior nodes; returns garbage (used as work space)
457 .  vec1_N - vector of local nodes (interior and interface, including ghosts); returns garbage (used as work space)
458 .  vec2_N - vector of local nodes (interior and interface, including ghosts); returns garbage (used as work space)
459 
460 */
461 #undef __FUNCT__
462 #define __FUNCT__ "PCNNApplyInterfacePreconditioner"
463 int PCNNApplyInterfacePreconditioner (PC pc, Vec r, Vec z, PetscScalar* work_N, Vec vec1_B, Vec vec2_B, Vec vec3_B, Vec vec1_D,
464                                       Vec vec2_D, Vec vec1_N, Vec vec2_N)
465 {
466   int    ierr;
467   PC_IS* pcis = (PC_IS*)(pc->data);
468 
469   PetscFunctionBegin;
470 
471   /*
472     First balancing step.
473   */
474   {
475     PetscTruth flg;
476     ierr = PetscOptionsHasName(PETSC_NULL,"-turn_off_first_balancing",&flg);CHKERRQ(ierr);
477     if (!flg) {
478       ierr = PCNNBalancing(pc,r,(Vec)0,z,vec1_B,vec2_B,(Vec)0,vec1_D,vec2_D,work_N);CHKERRQ(ierr);
479     } else {
480       ierr = VecCopy(r,z);CHKERRQ(ierr);
481     }
482   }
483 
484   /*
485     Extract the local interface part of z and scale it by D
486   */
487   ierr = VecScatterBegin(z,vec1_B,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_B);CHKERRQ(ierr);
488   ierr = VecScatterEnd  (z,vec1_B,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_B);CHKERRQ(ierr);
489   ierr = VecPointwiseMult(pcis->D,vec1_B,vec2_B);CHKERRQ(ierr);
490 
491   /* Neumann Solver */
492   ierr = PCISApplyInvSchur(pc,vec2_B,vec1_B,vec1_N,vec2_N);CHKERRQ(ierr);
493 
494   /*
495     Second balancing step.
496   */
497   {
498     PetscTruth flg;
499     ierr = PetscOptionsHasName(PETSC_NULL,"-turn_off_second_balancing",&flg);CHKERRQ(ierr);
500     if (!flg) {
501       ierr = PCNNBalancing(pc,r,vec1_B,z,vec2_B,vec3_B,(Vec)0,vec1_D,vec2_D,work_N);CHKERRQ(ierr);
502     } else {
503       PetscScalar zero = 0.0;
504       ierr = VecPointwiseMult(pcis->D,vec1_B,vec2_B);CHKERRQ(ierr);
505       ierr = VecSet(&zero,z);CHKERRQ(ierr);
506       ierr = VecScatterBegin(vec2_B,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
507       ierr = VecScatterEnd  (vec2_B,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
508     }
509   }
510 
511   PetscFunctionReturn(0);
512 }
513 
514 /* -------------------------------------------------------------------------- */
515 /*
516    PCNNBalancing - Computes z, as given in equations (15) and (16) (if the
517                    input argument u is provided), or s, as given in equations
518                    (12) and (13), if the input argument u is a null vector.
519                    Notice that the input argument u plays the role of u_i in
520                    equation (14). The equation numbers refer to [Man93].
521 
522    Input Parameters:
523 .  pcnn - NN preconditioner context.
524 .  r - MPI vector of all nodes (interior and interface). It's preserved.
525 .  u - (Optional) sequential vector of local interface nodes. It's preserved UNLESS vec3_B is null.
526 
527    Output Parameters:
528 .  z - MPI vector of interior and interface nodes. Returns s or z (see description above).
529 .  vec1_B - Sequential vector of local interface nodes. Workspace.
530 .  vec2_B - Sequential vector of local interface nodes. Workspace.
531 .  vec3_B - (Optional) sequential vector of local interface nodes. Workspace.
532 .  vec1_D - Sequential vector of local interior nodes. Workspace.
533 .  vec2_D - Sequential vector of local interior nodes. Workspace.
534 .  work_N - Array of all local nodes (interior and interface). Workspace.
535 
536 */
537 #undef __FUNCT__
538 #define __FUNCT__ "PCNNBalancing"
539 int PCNNBalancing (PC pc, Vec r, Vec u, Vec z, Vec vec1_B, Vec vec2_B, Vec vec3_B,
540                    Vec vec1_D, Vec vec2_D, PetscScalar *work_N)
541 {
542   int            k, ierr;
543   PetscScalar    zero     =  0.0;
544   PetscScalar    m_one    = -1.0;
545   PetscScalar    value;
546   PetscScalar*   lambda;
547   PC_NN*         pcnn     = (PC_NN*)(pc->data);
548   PC_IS*         pcis     = (PC_IS*)(pc->data);
549 
550   PetscFunctionBegin;
551   ierr = PetscLogEventBegin(PC_ApplyCoarse,0,0,0,0);CHKERRQ(ierr);
552 
553   if (u) {
554     if (!vec3_B) { vec3_B = u; }
555     ierr = VecPointwiseMult(pcis->D,u,vec1_B);CHKERRQ(ierr);
556     ierr = VecSet(&zero,z);CHKERRQ(ierr);
557     ierr = VecScatterBegin(vec1_B,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
558     ierr = VecScatterEnd  (vec1_B,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
559     ierr = VecScatterBegin(z,vec2_B,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_B);CHKERRQ(ierr);
560     ierr = VecScatterEnd  (z,vec2_B,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_B);CHKERRQ(ierr);
561     ierr = PCISApplySchur(pc,vec2_B,vec3_B,(Vec)0,vec1_D,vec2_D);CHKERRQ(ierr);
562     ierr = VecScale(&m_one,vec3_B);CHKERRQ(ierr);
563     ierr = VecCopy(r,z);CHKERRQ(ierr);
564     ierr = VecScatterBegin(vec3_B,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
565     ierr = VecScatterEnd  (vec3_B,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
566   } else {
567     ierr = VecCopy(r,z);CHKERRQ(ierr);
568   }
569   ierr = VecScatterBegin(z,vec2_B,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_B);CHKERRQ(ierr);
570   ierr = VecScatterEnd  (z,vec2_B,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_B);CHKERRQ(ierr);
571   ierr = PCISScatterArrayNToVecB(work_N,vec2_B,INSERT_VALUES,SCATTER_REVERSE,pc);CHKERRQ(ierr);
572   for (k=0, value=0.0; k<pcis->n_shared[0]; k++) { value += pcnn->DZ_IN[0][k] * work_N[pcis->shared[0][k]]; }
573   value *= pcnn->factor_coarse_rhs;  /* This factor is set in CreateCoarseMatrix(). */
574   {
575     int rank;
576     ierr = MPI_Comm_rank(pc->comm,&rank);CHKERRQ(ierr);
577     ierr = VecSetValue(pcnn->coarse_b,rank,value,INSERT_VALUES);CHKERRQ(ierr);
578     /*
579        Since we are only inserting local values (one value actually) we don't need to do the
580        reduction that tells us there is no data that needs to be moved. Hence we comment out these
581        ierr = VecAssemblyBegin(pcnn->coarse_b);CHKERRQ(ierr);
582        ierr = VecAssemblyEnd  (pcnn->coarse_b);CHKERRQ(ierr);
583     */
584   }
585   ierr = KSPSetRhs(pcnn->ksp_coarse,pcnn->coarse_b);CHKERRQ(ierr);
586   ierr = KSPSetSolution(pcnn->ksp_coarse,pcnn->coarse_x);CHKERRQ(ierr);
587   ierr = KSPSolve(pcnn->ksp_coarse);CHKERRQ(ierr);
588   if (!u) { ierr = VecScale(&m_one,pcnn->coarse_x);CHKERRQ(ierr); }
589   ierr = VecGetArray(pcnn->coarse_x,&lambda);CHKERRQ(ierr);
590   for (k=0; k<pcis->n_shared[0]; k++) { work_N[pcis->shared[0][k]] = *lambda * pcnn->DZ_IN[0][k]; }
591   ierr = VecRestoreArray(pcnn->coarse_x,&lambda);CHKERRQ(ierr);
592   ierr = PCISScatterArrayNToVecB(work_N,vec2_B,INSERT_VALUES,SCATTER_FORWARD,pc);CHKERRQ(ierr);
593   ierr = VecSet(&zero,z);CHKERRQ(ierr);
594   ierr = VecScatterBegin(vec2_B,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
595   ierr = VecScatterEnd  (vec2_B,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
596   if (!u) {
597     ierr = VecScatterBegin(z,vec2_B,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_B);CHKERRQ(ierr);
598     ierr = VecScatterEnd  (z,vec2_B,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_B);CHKERRQ(ierr);
599     ierr = PCISApplySchur(pc,vec2_B,vec1_B,(Vec)0,vec1_D,vec2_D);CHKERRQ(ierr);
600     ierr = VecCopy(r,z);CHKERRQ(ierr);
601   }
602   ierr = VecScatterBegin(vec1_B,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
603   ierr = VecScatterEnd  (vec1_B,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
604   ierr = PetscLogEventEnd(PC_ApplyCoarse,0,0,0,0);CHKERRQ(ierr);
605 
606   PetscFunctionReturn(0);
607 }
608 
609 #undef __FUNCT__
610 
611 
612 
613 /*  -------   E N D   O F   T H E   C O D E   -------  */
614 /*                                                     */
615 /*  From now on, "footnotes" (or "historical notes").  */
616 /*                                                     */
617 /*  -------------------------------------------------  */
618 
619 
620 #ifdef __HISTORICAL_NOTES___do_not_compile__
621 
622 /* --------------------------------------------------------------------------
623    Historical note 01
624    -------------------------------------------------------------------------- */
625 /*
626    We considered the possibility of an alternative D_i that would still
627    provide a partition of unity (i.e., $ \sum_i  N_i D_i N_i^T = I $).
628    The basic principle was still the pseudo-inverse of the counting
629    function; the difference was that we would not count subdomains
630    that do not contribute to the coarse space (i.e., not pure-Neumann
631    subdomains).
632 
633    This turned out to be a bad idea:  we would solve trivial Neumann
634    problems in the not pure-Neumann subdomains, since we would be scaling
635    the balanced residual by zero.
636 */
637 
638     {
639       PetscTruth flg;
640       ierr = PetscOptionsHasName(PETSC_NULL,"-pcnn_new_scaling",&flg);CHKERRQ(ierr);
641       if (flg) {
642         Vec    counter;
643         PetscScalar one=1.0, zero=0.0;
644         ierr = VecDuplicate(pc->vec,&counter);CHKERRQ(ierr);
645         ierr = VecSet(&zero,counter);CHKERRQ(ierr);
646         if (pcnn->pure_neumann) {
647           ierr = VecSet(&one,pcnn->D);CHKERRQ(ierr);
648         } else {
649           ierr = VecSet(&zero,pcnn->D);CHKERRQ(ierr);
650         }
651         ierr = VecScatterBegin(pcnn->D,counter,ADD_VALUES,SCATTER_REVERSE,pcnn->global_to_B);CHKERRQ(ierr);
652         ierr = VecScatterEnd  (pcnn->D,counter,ADD_VALUES,SCATTER_REVERSE,pcnn->global_to_B);CHKERRQ(ierr);
653         ierr = VecScatterBegin(counter,pcnn->D,INSERT_VALUES,SCATTER_FORWARD,pcnn->global_to_B);CHKERRQ(ierr);
654         ierr = VecScatterEnd  (counter,pcnn->D,INSERT_VALUES,SCATTER_FORWARD,pcnn->global_to_B);CHKERRQ(ierr);
655         ierr = VecDestroy(counter);CHKERRQ(ierr);
656         if (pcnn->pure_neumann) {
657           ierr = VecReciprocal(pcnn->D);CHKERRQ(ierr);
658         } else {
659           ierr = VecSet(&zero,pcnn->D);CHKERRQ(ierr);
660         }
661       }
662     }
663 
664 
665 
666 /* --------------------------------------------------------------------------
667    Historical note 02
668    -------------------------------------------------------------------------- */
669 /*
670    We tried an alternative coarse problem, that would eliminate exactly a
671    constant error. Turned out not to improve the overall convergence.
672 */
673 
674   /*  Set the variable pcnn->factor_coarse_rhs. */
675   {
676     PetscTruth flg;
677     ierr = PetscOptionsHasName(PETSC_NULL,"-enforce_preserving_constants",&flg);CHKERRQ(ierr);
678     if (!flg) { pcnn->factor_coarse_rhs = (pcnn->pure_neumann) ? 1.0 : 0.0; }
679     else {
680       PetscScalar zero = 0.0, one = 1.0;
681       ierr = VecSet(&one,pcnn->vec1_B);
682       ierr = ApplySchurComplement(pcnn,pcnn->vec1_B,pcnn->vec2_B,(Vec)0,pcnn->vec1_D,pcnn->vec2_D);CHKERRQ(ierr);
683       ierr = VecSet(&zero,pcnn->vec1_global);CHKERRQ(ierr);
684       ierr = VecScatterBegin(pcnn->vec2_B,pcnn->vec1_global,ADD_VALUES,SCATTER_REVERSE,pcnn->global_to_B);CHKERRQ(ierr);
685       ierr = VecScatterEnd  (pcnn->vec2_B,pcnn->vec1_global,ADD_VALUES,SCATTER_REVERSE,pcnn->global_to_B);CHKERRQ(ierr);
686       ierr = VecScatterBegin(pcnn->vec1_global,pcnn->vec1_B,INSERT_VALUES,SCATTER_FORWARD,pcnn->global_to_B);CHKERRQ(ierr);
687       ierr = VecScatterEnd  (pcnn->vec1_global,pcnn->vec1_B,INSERT_VALUES,SCATTER_FORWARD,pcnn->global_to_B);CHKERRQ(ierr);
688       if (pcnn->pure_neumann) { pcnn->factor_coarse_rhs = 1.0; }
689       else {
690         ierr = ScatterArrayNToVecB(pcnn->work_N,pcnn->vec1_B,INSERT_VALUES,SCATTER_REVERSE,pcnn);CHKERRQ(ierr);
691         for (k=0, pcnn->factor_coarse_rhs=0.0; k<pcnn->n_shared[0]; k++) {
692           pcnn->factor_coarse_rhs += pcnn->work_N[pcnn->shared[0][k]] * pcnn->DZ_IN[0][k];
693         }
694         if (pcnn->factor_coarse_rhs) { pcnn->factor_coarse_rhs = 1.0 / pcnn->factor_coarse_rhs; }
695         else { SETERRQ(1,"Constants cannot be preserved. Remove \"-enforce_preserving_constants\" option."); }
696       }
697     }
698   }
699 
700 #endif /* __HISTORICAL_NOTES___do_not_compile */
701