xref: /petsc/src/ksp/pc/impls/is/nn/nn.c (revision 702e6c57188f6ca39e32e26fd8fcae05ffbfed3e)
1 /*$Id: nn.c,v 1.13 2001/08/07 03:03:41 balay Exp $*/
2 
3 #include "src/ksp/pc/impls/is/nn/nn.h"
4 
5 /* -------------------------------------------------------------------------- */
6 /*
7    PCSetUp_NN - Prepares for the use of the NN preconditioner
8                     by setting data structures and options.
9 
10    Input Parameter:
11 .  pc - the preconditioner context
12 
13    Application Interface Routine: PCSetUp()
14 
15    Notes:
16    The interface routine PCSetUp() is not usually called directly by
17    the user, but instead is called by PCApply() if necessary.
18 */
19 #undef __FUNCT__
20 #define __FUNCT__ "PCSetUp_NN"
21 static int PCSetUp_NN(PC pc)
22 {
23   int ierr;
24 
25   PetscFunctionBegin;
26   if (!pc->setupcalled) {
27     /* Set up all the "iterative substructuring" common block */
28     ierr = PCISSetUp(pc);CHKERRQ(ierr);
29     /* Create the coarse matrix. */
30     ierr = PCNNCreateCoarseMatrix(pc);CHKERRQ(ierr);
31   }
32   PetscFunctionReturn(0);
33 }
34 
35 /* -------------------------------------------------------------------------- */
36 /*
37    PCApply_NN - Applies the NN preconditioner to a vector.
38 
39    Input Parameters:
40 .  pc - the preconditioner context
41 .  r - input vector (global)
42 
43    Output Parameter:
44 .  z - output vector (global)
45 
46    Application Interface Routine: PCApply()
47  */
48 #undef __FUNCT__
49 #define __FUNCT__ "PCApply_NN"
50 static int PCApply_NN(PC pc,Vec r,Vec z)
51 {
52   PC_IS       *pcis = (PC_IS*)(pc->data);
53   int         ierr;
54   PetscScalar m_one = -1.0;
55   Vec         w = pcis->vec1_global;
56 
57   PetscFunctionBegin;
58 
59   /*
60     Dirichlet solvers.
61     Solving $ B_I^{(i)}r_I^{(i)} $ at each processor.
62     Storing the local results at vec2_D
63   */
64   ierr = VecScatterBegin(r,pcis->vec1_D,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_D);CHKERRQ(ierr);
65   ierr = VecScatterEnd  (r,pcis->vec1_D,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_D);CHKERRQ(ierr);
66   ierr = KSPSetRhs(pcis->ksp_D,pcis->vec1_D);CHKERRQ(ierr);
67   ierr = KSPSetSolution(pcis->ksp_D,pcis->vec2_D);CHKERRQ(ierr);
68   ierr = KSPSolve(pcis->ksp_D);CHKERRQ(ierr);
69 
70   /*
71     Computing $ r_B - \sum_j \tilde R_j^T A_{BI}^{(j)} (B_I^{(j)}r_I^{(j)}) $ .
72     Storing the result in the interface portion of the global vector w.
73   */
74   ierr = MatMult(pcis->A_BI,pcis->vec2_D,pcis->vec1_B);CHKERRQ(ierr);
75   ierr = VecScale(&m_one,pcis->vec1_B);CHKERRQ(ierr);
76   ierr = VecCopy(r,w);CHKERRQ(ierr);
77   ierr = VecScatterBegin(pcis->vec1_B,w,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
78   ierr = VecScatterEnd  (pcis->vec1_B,w,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
79 
80   /*
81     Apply the interface preconditioner
82   */
83   ierr = PCNNApplyInterfacePreconditioner(pc,w,z,pcis->work_N,pcis->vec1_B,pcis->vec2_B,pcis->vec3_B,pcis->vec1_D,
84                                           pcis->vec3_D,pcis->vec1_N,pcis->vec2_N);CHKERRQ(ierr);
85 
86   /*
87     Computing $ t_I^{(i)} = A_{IB}^{(i)} \tilde R_i z_B $
88     The result is stored in vec1_D.
89   */
90   ierr = VecScatterBegin(z,pcis->vec1_B,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_B);CHKERRQ(ierr);
91   ierr = VecScatterEnd  (z,pcis->vec1_B,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_B);CHKERRQ(ierr);
92   ierr = MatMult(pcis->A_IB,pcis->vec1_B,pcis->vec1_D);CHKERRQ(ierr);
93 
94   /*
95     Dirichlet solvers.
96     Computing $ B_I^{(i)}t_I^{(i)} $ and sticking into the global vector the blocks
97     $ B_I^{(i)}r_I^{(i)} - B_I^{(i)}t_I^{(i)} $.
98   */
99   ierr = VecScatterBegin(pcis->vec2_D,z,INSERT_VALUES,SCATTER_REVERSE,pcis->global_to_D);CHKERRQ(ierr);
100   ierr = VecScatterEnd  (pcis->vec2_D,z,INSERT_VALUES,SCATTER_REVERSE,pcis->global_to_D);CHKERRQ(ierr);
101   ierr = KSPSetRhs(pcis->ksp_D,pcis->vec1_D);CHKERRQ(ierr);
102   ierr = KSPSetSolution(pcis->ksp_D,pcis->vec2_D);CHKERRQ(ierr);
103   ierr = KSPSolve(pcis->ksp_D);CHKERRQ(ierr);
104   ierr = VecScale(&m_one,pcis->vec2_D);CHKERRQ(ierr);
105   ierr = VecScatterBegin(pcis->vec2_D,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_D);CHKERRQ(ierr);
106   ierr = VecScatterEnd  (pcis->vec2_D,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_D);CHKERRQ(ierr);
107 
108   PetscFunctionReturn(0);
109 }
110 
111 /* -------------------------------------------------------------------------- */
112 /*
113    PCDestroy_NN - Destroys the private context for the NN preconditioner
114    that was created with PCCreate_NN().
115 
116    Input Parameter:
117 .  pc - the preconditioner context
118 
119    Application Interface Routine: PCDestroy()
120 */
121 #undef __FUNCT__
122 #define __FUNCT__ "PCDestroy_NN"
123 static int PCDestroy_NN(PC pc)
124 {
125   PC_NN *pcnn = (PC_NN*)pc->data;
126   int   ierr;
127 
128   PetscFunctionBegin;
129 
130   ierr = PCISDestroy(pc);CHKERRQ(ierr);
131 
132   if (pcnn->coarse_mat)  {ierr = MatDestroy(pcnn->coarse_mat);CHKERRQ(ierr);}
133   if (pcnn->coarse_x)    {ierr = VecDestroy(pcnn->coarse_x);CHKERRQ(ierr);}
134   if (pcnn->coarse_b)    {ierr = VecDestroy(pcnn->coarse_b);CHKERRQ(ierr);}
135   if (pcnn->ksp_coarse) {ierr = KSPDestroy(pcnn->ksp_coarse);CHKERRQ(ierr);}
136   if (pcnn->DZ_IN) {
137     if (pcnn->DZ_IN[0]) {ierr = PetscFree(pcnn->DZ_IN[0]);CHKERRQ(ierr);}
138     ierr = PetscFree(pcnn->DZ_IN);CHKERRQ(ierr);
139   }
140 
141   /*
142       Free the private data structure that was hanging off the PC
143   */
144   ierr = PetscFree(pcnn);CHKERRQ(ierr);
145   PetscFunctionReturn(0);
146 }
147 
148 /* -------------------------------------------------------------------------- */
149 /*
150    PCCreate_NN - Creates a NN preconditioner context, PC_NN,
151    and sets this as the private data within the generic preconditioning
152    context, PC, that was created within PCCreate().
153 
154    Input Parameter:
155 .  pc - the preconditioner context
156 
157    Application Interface Routine: PCCreate()
158 */
159 EXTERN_C_BEGIN
160 #undef __FUNCT__
161 #define __FUNCT__ "PCCreate_NN"
162 int PCCreate_NN(PC pc)
163 {
164   int   ierr;
165   PC_NN *pcnn;
166 
167   PetscFunctionBegin;
168 
169   /*
170      Creates the private data structure for this preconditioner and
171      attach it to the PC object.
172   */
173   ierr      = PetscNew(PC_NN,&pcnn);CHKERRQ(ierr);
174   pc->data  = (void*)pcnn;
175 
176   /*
177      Logs the memory usage; this is not needed but allows PETSc to
178      monitor how much memory is being used for various purposes.
179   */
180   PetscLogObjectMemory(pc,sizeof(PC_NN)+sizeof(PC_IS)); /* Is this the right thing to do? */
181 
182   ierr = PCISCreate(pc);CHKERRQ(ierr);
183   pcnn->coarse_mat  = 0;
184   pcnn->coarse_x    = 0;
185   pcnn->coarse_b    = 0;
186   pcnn->ksp_coarse = 0;
187   pcnn->DZ_IN       = 0;
188 
189   /*
190       Set the pointers for the functions that are provided above.
191       Now when the user-level routines (such as PCApply(), PCDestroy(), etc.)
192       are called, they will automatically call these functions.  Note we
193       choose not to provide a couple of these functions since they are
194       not needed.
195   */
196   pc->ops->apply               = PCApply_NN;
197   pc->ops->applytranspose      = 0;
198   pc->ops->setup               = PCSetUp_NN;
199   pc->ops->destroy             = PCDestroy_NN;
200   pc->ops->view                = 0;
201   pc->ops->applyrichardson     = 0;
202   pc->ops->applysymmetricleft  = 0;
203   pc->ops->applysymmetricright = 0;
204 
205   PetscFunctionReturn(0);
206 }
207 EXTERN_C_END
208 
209 
210 /* -------------------------------------------------------------------------- */
211 /*
212    PCNNCreateCoarseMatrix -
213 */
214 #undef __FUNCT__
215 #define __FUNCT__ "PCNNCreateCoarseMatrix"
216 int PCNNCreateCoarseMatrix (PC pc)
217 {
218   MPI_Request    *send_request, *recv_request;
219   int            i, j, k, ierr;
220 
221   PetscScalar*   mat;    /* Sub-matrix with this subdomain's contribution to the coarse matrix             */
222   PetscScalar**  DZ_OUT; /* proc[k].DZ_OUT[i][] = bit of vector to be sent from processor k to processor i */
223 
224   /* aliasing some names */
225   PC_IS*         pcis     = (PC_IS*)(pc->data);
226   PC_NN*         pcnn     = (PC_NN*)pc->data;
227   int            n_neigh  = pcis->n_neigh;
228   int*           neigh    = pcis->neigh;
229   int*           n_shared = pcis->n_shared;
230   int**          shared   = pcis->shared;
231   PetscScalar**  DZ_IN;   /* Must be initialized after memory allocation. */
232 
233   PetscFunctionBegin;
234 
235   /* Allocate memory for mat (the +1 is to handle the case n_neigh equal to zero) */
236   ierr = PetscMalloc((n_neigh*n_neigh+1)*sizeof(PetscScalar),&mat);CHKERRQ(ierr);
237 
238   /* Allocate memory for DZ */
239   /* Notice that DZ_OUT[0] is allocated some space that is never used. */
240   /* This is just in order to DZ_OUT and DZ_IN to have exactly the same form. */
241   {
242     int size_of_Z = 0;
243     ierr  = PetscMalloc ((n_neigh+1)*sizeof(PetscScalar*),&pcnn->DZ_IN);CHKERRQ(ierr);
244     DZ_IN = pcnn->DZ_IN;
245     ierr  = PetscMalloc ((n_neigh+1)*sizeof(PetscScalar*),&DZ_OUT);CHKERRQ(ierr);
246     for (i=0; i<n_neigh; i++) {
247       size_of_Z += n_shared[i];
248     }
249     ierr = PetscMalloc ((size_of_Z+1)*sizeof(PetscScalar),&DZ_IN[0]);CHKERRQ(ierr);
250     ierr = PetscMalloc ((size_of_Z+1)*sizeof(PetscScalar),&DZ_OUT[0]);CHKERRQ(ierr);
251   }
252   for (i=1; i<n_neigh; i++) {
253     DZ_IN[i]  = DZ_IN [i-1] + n_shared[i-1];
254     DZ_OUT[i] = DZ_OUT[i-1] + n_shared[i-1];
255   }
256 
257   /* Set the values of DZ_OUT, in order to send this info to the neighbours */
258   /* First, set the auxiliary array pcis->work_N. */
259   ierr = PCISScatterArrayNToVecB(pcis->work_N,pcis->D,INSERT_VALUES,SCATTER_REVERSE,pc);CHKERRQ(ierr);
260   for (i=1; i<n_neigh; i++){
261     for (j=0; j<n_shared[i]; j++) {
262       DZ_OUT[i][j] = pcis->work_N[shared[i][j]];
263     }
264   }
265 
266   /* Non-blocking send/receive the common-interface chunks of scaled nullspaces */
267   /* Notice that send_request[] and recv_request[] could have one less element. */
268   /* We make them longer to have request[i] corresponding to neigh[i].          */
269   {
270     int tag;
271     ierr = PetscObjectGetNewTag((PetscObject)pc,&tag);CHKERRQ(ierr);
272     ierr = PetscMalloc((2*(n_neigh)+1)*sizeof(MPI_Request),&send_request);CHKERRQ(ierr);
273     recv_request = send_request + (n_neigh);
274     for (i=1; i<n_neigh; i++) {
275       ierr = MPI_Isend((void*)(DZ_OUT[i]),n_shared[i],MPIU_SCALAR,neigh[i],tag,pc->comm,&(send_request[i]));CHKERRQ(ierr);
276       ierr = MPI_Irecv((void*)(DZ_IN [i]),n_shared[i],MPIU_SCALAR,neigh[i],tag,pc->comm,&(recv_request[i]));CHKERRQ(ierr);
277     }
278   }
279 
280   /* Set DZ_IN[0][] (recall that neigh[0]==rank, always) */
281   for(j=0; j<n_shared[0]; j++) {
282     DZ_IN[0][j] = pcis->work_N[shared[0][j]];
283   }
284 
285   /* Start computing with local D*Z while communication goes on.    */
286   /* Apply Schur complement. The result is "stored" in vec (more    */
287   /* precisely, vec points to the result, stored in pc_nn->vec1_B)  */
288   /* and also scattered to pcnn->work_N.                            */
289   ierr = PCNNApplySchurToChunk(pc,n_shared[0],shared[0],DZ_IN[0],pcis->work_N,pcis->vec1_B,
290                                pcis->vec2_B,pcis->vec1_D,pcis->vec2_D);CHKERRQ(ierr);
291 
292   /* Compute the first column, while completing the receiving. */
293   for (i=0; i<n_neigh; i++) {
294     MPI_Status stat;
295     int ind=0;
296     if (i>0) { ierr = MPI_Waitany(n_neigh-1,recv_request+1,&ind,&stat);CHKERRQ(ierr); ind++;}
297     mat[ind*n_neigh+0] = 0.0;
298     for (k=0; k<n_shared[ind]; k++) {
299       mat[ind*n_neigh+0] += DZ_IN[ind][k] * pcis->work_N[shared[ind][k]];
300     }
301   }
302 
303   /* Compute the remaining of the columns */
304   for (j=1; j<n_neigh; j++) {
305     ierr = PCNNApplySchurToChunk(pc,n_shared[j],shared[j],DZ_IN[j],pcis->work_N,pcis->vec1_B,
306                                  pcis->vec2_B,pcis->vec1_D,pcis->vec2_D);CHKERRQ(ierr);
307     for (i=0; i<n_neigh; i++) {
308       mat[i*n_neigh+j] = 0.0;
309       for (k=0; k<n_shared[i]; k++) {
310 	mat[i*n_neigh+j] += DZ_IN[i][k] * pcis->work_N[shared[i][k]];
311       }
312     }
313   }
314 
315   /* Complete the sending. */
316   if (n_neigh>1) {
317     MPI_Status *stat;
318     ierr = PetscMalloc((n_neigh-1)*sizeof(MPI_Status),&stat);CHKERRQ(ierr);
319     ierr = MPI_Waitall(n_neigh-1,&(send_request[1]),stat);CHKERRQ(ierr);
320     ierr = PetscFree(stat);CHKERRQ(ierr);
321   }
322 
323   /* Free the memory for the MPI requests */
324   ierr = PetscFree(send_request);CHKERRQ(ierr);
325 
326   /* Free the memory for DZ_OUT */
327   if (DZ_OUT) {
328     if (DZ_OUT[0]) { ierr = PetscFree(DZ_OUT[0]);CHKERRQ(ierr); }
329     ierr = PetscFree(DZ_OUT);CHKERRQ(ierr);
330   }
331 
332   {
333     int size,n_neigh_m1;
334     ierr = MPI_Comm_size(pc->comm,&size);CHKERRQ(ierr);
335     n_neigh_m1 = (n_neigh) ? n_neigh-1 : 0;
336     /* Create the global coarse vectors (rhs and solution). */
337     ierr = VecCreateMPI(pc->comm,1,size,&(pcnn->coarse_b));CHKERRQ(ierr);
338     ierr = VecDuplicate(pcnn->coarse_b,&(pcnn->coarse_x));CHKERRQ(ierr);
339     /* Create and set the global coarse matrix. */
340     ierr = MatCreateMPIAIJ(pc->comm,1,1,size,size,1,PETSC_NULL,n_neigh_m1,PETSC_NULL,&(pcnn->coarse_mat));CHKERRQ(ierr);
341     ierr = MatSetValues(pcnn->coarse_mat,n_neigh,neigh,n_neigh,neigh,mat,ADD_VALUES);CHKERRQ(ierr);
342     ierr = MatAssemblyBegin(pcnn->coarse_mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
343     ierr = MatAssemblyEnd  (pcnn->coarse_mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
344   }
345 
346   {
347     int         rank;
348     PetscScalar one = 1.0;
349     IS          is;
350     ierr = MPI_Comm_rank(pc->comm,&rank);CHKERRQ(ierr);
351     /* "Zero out" rows of not-purely-Neumann subdomains */
352     if (pcis->pure_neumann) {  /* does NOT zero the row; create an empty index set. The reason is that MatZeroRows() is collective. */
353       ierr = ISCreateStride(pc->comm,0,0,0,&is);CHKERRQ(ierr);
354     } else { /* here it DOES zero the row, since it's not a floating subdomain. */
355       ierr = ISCreateStride(pc->comm,1,rank,0,&is);CHKERRQ(ierr);
356     }
357     ierr = MatZeroRows(pcnn->coarse_mat,is,&one);CHKERRQ(ierr);
358     ierr = ISDestroy(is);CHKERRQ(ierr);
359   }
360 
361   /* Create the coarse linear solver context */
362   {
363     PC  pc_ctx, inner_pc;
364     KSP ksp_ctx;
365     ierr = KSPCreate(pc->comm,&pcnn->ksp_coarse);CHKERRQ(ierr);
366     ierr = KSPSetOperators(pcnn->ksp_coarse,pcnn->coarse_mat,pcnn->coarse_mat,SAME_PRECONDITIONER);CHKERRQ(ierr);
367     ierr = KSPGetPC(pcnn->ksp_coarse,&pc_ctx);CHKERRQ(ierr);
368     ierr = PCSetType(pc_ctx,PCREDUNDANT);CHKERRQ(ierr);
369     ierr = KSPSetType(pcnn->ksp_coarse,KSPPREONLY);CHKERRQ(ierr);
370     ierr = PCRedundantGetPC(pc_ctx,&inner_pc);CHKERRQ(ierr);
371     ierr = PCSetType(inner_pc,PCLU);CHKERRQ(ierr);
372     ierr = KSPSetOptionsPrefix(pcnn->ksp_coarse,"coarse_");CHKERRQ(ierr);
373     ierr = KSPSetFromOptions(pcnn->ksp_coarse);CHKERRQ(ierr);
374     /* the vectors in the following line are dummy arguments, just telling the KSP the vector size. Values are not used */
375     ierr = KSPSetRhs(pcnn->ksp_coarse,pcnn->coarse_x);CHKERRQ(ierr);
376     ierr = KSPSetSolution(pcnn->ksp_coarse,pcnn->coarse_b);CHKERRQ(ierr);
377     ierr = KSPSetUp(pcnn->ksp_coarse);CHKERRQ(ierr);
378   }
379 
380   /* Free the memory for mat */
381   ierr = PetscFree(mat);CHKERRQ(ierr);
382 
383   /* for DEBUGGING, save the coarse matrix to a file. */
384   {
385     PetscTruth flg;
386     ierr = PetscOptionsHasName(PETSC_NULL,"-save_coarse_matrix",&flg);CHKERRQ(ierr);
387     if (flg) {
388       PetscViewer viewer;
389       ierr = PetscViewerASCIIOpen(PETSC_COMM_WORLD,"coarse.m",&viewer);CHKERRQ(ierr);
390       ierr = PetscViewerSetFormat(viewer,PETSC_VIEWER_ASCII_MATLAB);CHKERRQ(ierr);
391       ierr = MatView(pcnn->coarse_mat,viewer);CHKERRQ(ierr);
392       ierr = PetscViewerDestroy(viewer);CHKERRQ(ierr);
393     }
394   }
395 
396   /*  Set the variable pcnn->factor_coarse_rhs. */
397   pcnn->factor_coarse_rhs = (pcis->pure_neumann) ? 1.0 : 0.0;
398 
399   /* See historical note 02, at the bottom of this file. */
400 
401   PetscFunctionReturn(0);
402 }
403 
404 /* -------------------------------------------------------------------------- */
405 /*
406    PCNNApplySchurToChunk -
407 
408    Input parameters:
409 .  pcnn
410 .  n - size of chunk
411 .  idx - indices of chunk
412 .  chunk - values
413 
414    Output parameters:
415 .  array_N - result of Schur complement applied to chunk, scattered to big array
416 .  vec1_B  - result of Schur complement applied to chunk
417 .  vec2_B  - garbage (used as work space)
418 .  vec1_D  - garbage (used as work space)
419 .  vec2_D  - garbage (used as work space)
420 
421 */
422 #undef __FUNCT__
423 #define __FUNCT__ "PCNNApplySchurToChunk"
424 int PCNNApplySchurToChunk(PC pc, int n, int* idx, PetscScalar *chunk, PetscScalar* array_N, Vec vec1_B, Vec vec2_B, Vec vec1_D, Vec vec2_D)
425 {
426   int   i, ierr;
427   PC_IS *pcis = (PC_IS*)(pc->data);
428 
429   PetscFunctionBegin;
430 
431   ierr = PetscMemzero((void*)array_N, pcis->n*sizeof(PetscScalar));CHKERRQ(ierr);
432   for (i=0; i<n; i++) { array_N[idx[i]] = chunk[i]; }
433   ierr = PCISScatterArrayNToVecB(array_N,vec2_B,INSERT_VALUES,SCATTER_FORWARD,pc);CHKERRQ(ierr);
434   ierr = PCISApplySchur(pc,vec2_B,vec1_B,(Vec)0,vec1_D,vec2_D);CHKERRQ(ierr);
435   ierr = PCISScatterArrayNToVecB(array_N,vec1_B,INSERT_VALUES,SCATTER_REVERSE,pc);CHKERRQ(ierr);
436 
437   PetscFunctionReturn(0);
438 }
439 
440 /* -------------------------------------------------------------------------- */
441 /*
442    PCNNApplyInterfacePreconditioner - Apply the interface preconditioner, i.e.,
443                                       the preconditioner for the Schur complement.
444 
445    Input parameter:
446 .  r - global vector of interior and interface nodes. The values on the interior nodes are NOT used.
447 
448    Output parameters:
449 .  z - global vector of interior and interface nodes. The values on the interface are the result of
450        the application of the interface preconditioner to the interface part of r. The values on the
451        interior nodes are garbage.
452 .  work_N - array of local nodes (interior and interface, including ghosts); returns garbage (used as work space)
453 .  vec1_B - vector of local interface nodes (including ghosts); returns garbage (used as work space)
454 .  vec2_B - vector of local interface nodes (including ghosts); returns garbage (used as work space)
455 .  vec3_B - vector of local interface nodes (including ghosts); returns garbage (used as work space)
456 .  vec1_D - vector of local interior nodes; returns garbage (used as work space)
457 .  vec2_D - vector of local interior nodes; returns garbage (used as work space)
458 .  vec1_N - vector of local nodes (interior and interface, including ghosts); returns garbage (used as work space)
459 .  vec2_N - vector of local nodes (interior and interface, including ghosts); returns garbage (used as work space)
460 
461 */
462 #undef __FUNCT__
463 #define __FUNCT__ "PCNNApplyInterfacePreconditioner"
464 int PCNNApplyInterfacePreconditioner (PC pc, Vec r, Vec z, PetscScalar* work_N, Vec vec1_B, Vec vec2_B, Vec vec3_B, Vec vec1_D,
465                                       Vec vec2_D, Vec vec1_N, Vec vec2_N)
466 {
467   int    ierr;
468   PC_IS* pcis = (PC_IS*)(pc->data);
469 
470   PetscFunctionBegin;
471 
472   /*
473     First balancing step.
474   */
475   {
476     PetscTruth flg;
477     ierr = PetscOptionsHasName(PETSC_NULL,"-turn_off_first_balancing",&flg);CHKERRQ(ierr);
478     if (!flg) {
479       ierr = PCNNBalancing(pc,r,(Vec)0,z,vec1_B,vec2_B,(Vec)0,vec1_D,vec2_D,work_N);CHKERRQ(ierr);
480     } else {
481       ierr = VecCopy(r,z);CHKERRQ(ierr);
482     }
483   }
484 
485   /*
486     Extract the local interface part of z and scale it by D
487   */
488   ierr = VecScatterBegin(z,vec1_B,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_B);CHKERRQ(ierr);
489   ierr = VecScatterEnd  (z,vec1_B,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_B);CHKERRQ(ierr);
490   ierr = VecPointwiseMult(pcis->D,vec1_B,vec2_B);CHKERRQ(ierr);
491 
492   /* Neumann Solver */
493   ierr = PCISApplyInvSchur(pc,vec2_B,vec1_B,vec1_N,vec2_N);CHKERRQ(ierr);
494 
495   /*
496     Second balancing step.
497   */
498   {
499     PetscTruth flg;
500     ierr = PetscOptionsHasName(PETSC_NULL,"-turn_off_second_balancing",&flg);CHKERRQ(ierr);
501     if (!flg) {
502       ierr = PCNNBalancing(pc,r,vec1_B,z,vec2_B,vec3_B,(Vec)0,vec1_D,vec2_D,work_N);CHKERRQ(ierr);
503     } else {
504       PetscScalar zero = 0.0;
505       ierr = VecPointwiseMult(pcis->D,vec1_B,vec2_B);CHKERRQ(ierr);
506       ierr = VecSet(&zero,z);CHKERRQ(ierr);
507       ierr = VecScatterBegin(vec2_B,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
508       ierr = VecScatterEnd  (vec2_B,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
509     }
510   }
511 
512   PetscFunctionReturn(0);
513 }
514 
515 /* -------------------------------------------------------------------------- */
516 /*
517    PCNNBalancing - Computes z, as given in equations (15) and (16) (if the
518                    input argument u is provided), or s, as given in equations
519                    (12) and (13), if the input argument u is a null vector.
520                    Notice that the input argument u plays the role of u_i in
521                    equation (14). The equation numbers refer to [Man93].
522 
523    Input Parameters:
524 .  pcnn - NN preconditioner context.
525 .  r - MPI vector of all nodes (interior and interface). It's preserved.
526 .  u - (Optional) sequential vector of local interface nodes. It's preserved UNLESS vec3_B is null.
527 
528    Output Parameters:
529 .  z - MPI vector of interior and interface nodes. Returns s or z (see description above).
530 .  vec1_B - Sequential vector of local interface nodes. Workspace.
531 .  vec2_B - Sequential vector of local interface nodes. Workspace.
532 .  vec3_B - (Optional) sequential vector of local interface nodes. Workspace.
533 .  vec1_D - Sequential vector of local interior nodes. Workspace.
534 .  vec2_D - Sequential vector of local interior nodes. Workspace.
535 .  work_N - Array of all local nodes (interior and interface). Workspace.
536 
537 */
538 #undef __FUNCT__
539 #define __FUNCT__ "PCNNBalancing"
540 int PCNNBalancing (PC pc, Vec r, Vec u, Vec z, Vec vec1_B, Vec vec2_B, Vec vec3_B,
541                    Vec vec1_D, Vec vec2_D, PetscScalar *work_N)
542 {
543   int            k, ierr;
544   PetscScalar    zero     =  0.0;
545   PetscScalar    m_one    = -1.0;
546   PetscScalar    value;
547   PetscScalar*   lambda;
548   PC_NN*         pcnn     = (PC_NN*)(pc->data);
549   PC_IS*         pcis     = (PC_IS*)(pc->data);
550 
551   PetscFunctionBegin;
552   ierr = PetscLogEventBegin(PC_ApplyCoarse,0,0,0,0);CHKERRQ(ierr);
553 
554   if (u) {
555     if (!vec3_B) { vec3_B = u; }
556     ierr = VecPointwiseMult(pcis->D,u,vec1_B);CHKERRQ(ierr);
557     ierr = VecSet(&zero,z);CHKERRQ(ierr);
558     ierr = VecScatterBegin(vec1_B,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
559     ierr = VecScatterEnd  (vec1_B,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
560     ierr = VecScatterBegin(z,vec2_B,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_B);CHKERRQ(ierr);
561     ierr = VecScatterEnd  (z,vec2_B,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_B);CHKERRQ(ierr);
562     ierr = PCISApplySchur(pc,vec2_B,vec3_B,(Vec)0,vec1_D,vec2_D);CHKERRQ(ierr);
563     ierr = VecScale(&m_one,vec3_B);CHKERRQ(ierr);
564     ierr = VecCopy(r,z);CHKERRQ(ierr);
565     ierr = VecScatterBegin(vec3_B,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
566     ierr = VecScatterEnd  (vec3_B,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
567   } else {
568     ierr = VecCopy(r,z);CHKERRQ(ierr);
569   }
570   ierr = VecScatterBegin(z,vec2_B,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_B);CHKERRQ(ierr);
571   ierr = VecScatterEnd  (z,vec2_B,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_B);CHKERRQ(ierr);
572   ierr = PCISScatterArrayNToVecB(work_N,vec2_B,INSERT_VALUES,SCATTER_REVERSE,pc);CHKERRQ(ierr);
573   for (k=0, value=0.0; k<pcis->n_shared[0]; k++) { value += pcnn->DZ_IN[0][k] * work_N[pcis->shared[0][k]]; }
574   value *= pcnn->factor_coarse_rhs;  /* This factor is set in CreateCoarseMatrix(). */
575   {
576     int rank;
577     ierr = MPI_Comm_rank(pc->comm,&rank);CHKERRQ(ierr);
578     ierr = VecSetValue(pcnn->coarse_b,rank,value,INSERT_VALUES);CHKERRQ(ierr);
579     /*
580        Since we are only inserting local values (one value actually) we don't need to do the
581        reduction that tells us there is no data that needs to be moved. Hence we comment out these
582        ierr = VecAssemblyBegin(pcnn->coarse_b);CHKERRQ(ierr);
583        ierr = VecAssemblyEnd  (pcnn->coarse_b);CHKERRQ(ierr);
584     */
585   }
586   ierr = KSPSetRhs(pcnn->ksp_coarse,pcnn->coarse_b);CHKERRQ(ierr);
587   ierr = KSPSetSolution(pcnn->ksp_coarse,pcnn->coarse_x);CHKERRQ(ierr);
588   ierr = KSPSolve(pcnn->ksp_coarse);CHKERRQ(ierr);
589   if (!u) { ierr = VecScale(&m_one,pcnn->coarse_x);CHKERRQ(ierr); }
590   ierr = VecGetArray(pcnn->coarse_x,&lambda);CHKERRQ(ierr);
591   for (k=0; k<pcis->n_shared[0]; k++) { work_N[pcis->shared[0][k]] = *lambda * pcnn->DZ_IN[0][k]; }
592   ierr = VecRestoreArray(pcnn->coarse_x,&lambda);CHKERRQ(ierr);
593   ierr = PCISScatterArrayNToVecB(work_N,vec2_B,INSERT_VALUES,SCATTER_FORWARD,pc);CHKERRQ(ierr);
594   ierr = VecSet(&zero,z);CHKERRQ(ierr);
595   ierr = VecScatterBegin(vec2_B,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
596   ierr = VecScatterEnd  (vec2_B,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
597   if (!u) {
598     ierr = VecScatterBegin(z,vec2_B,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_B);CHKERRQ(ierr);
599     ierr = VecScatterEnd  (z,vec2_B,INSERT_VALUES,SCATTER_FORWARD,pcis->global_to_B);CHKERRQ(ierr);
600     ierr = PCISApplySchur(pc,vec2_B,vec1_B,(Vec)0,vec1_D,vec2_D);CHKERRQ(ierr);
601     ierr = VecCopy(r,z);CHKERRQ(ierr);
602   }
603   ierr = VecScatterBegin(vec1_B,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
604   ierr = VecScatterEnd  (vec1_B,z,ADD_VALUES,SCATTER_REVERSE,pcis->global_to_B);CHKERRQ(ierr);
605   ierr = PetscLogEventEnd(PC_ApplyCoarse,0,0,0,0);CHKERRQ(ierr);
606 
607   PetscFunctionReturn(0);
608 }
609 
610 #undef __FUNCT__
611 
612 
613 
614 /*  -------   E N D   O F   T H E   C O D E   -------  */
615 /*                                                     */
616 /*  From now on, "footnotes" (or "historical notes").  */
617 /*                                                     */
618 /*  -------------------------------------------------  */
619 
620 
621 #ifdef __HISTORICAL_NOTES___do_not_compile__
622 
623 /* --------------------------------------------------------------------------
624    Historical note 01
625    -------------------------------------------------------------------------- */
626 /*
627    We considered the possibility of an alternative D_i that would still
628    provide a partition of unity (i.e., $ \sum_i  N_i D_i N_i^T = I $).
629    The basic principle was still the pseudo-inverse of the counting
630    function; the difference was that we would not count subdomains
631    that do not contribute to the coarse space (i.e., not pure-Neumann
632    subdomains).
633 
634    This turned out to be a bad idea:  we would solve trivial Neumann
635    problems in the not pure-Neumann subdomains, since we would be scaling
636    the balanced residual by zero.
637 */
638 
639     {
640       PetscTruth flg;
641       ierr = PetscOptionsHasName(PETSC_NULL,"-pcnn_new_scaling",&flg);CHKERRQ(ierr);
642       if (flg) {
643         Vec    counter;
644         PetscScalar one=1.0, zero=0.0;
645         ierr = VecDuplicate(pc->vec,&counter);CHKERRQ(ierr);
646         ierr = VecSet(&zero,counter);CHKERRQ(ierr);
647         if (pcnn->pure_neumann) {
648           ierr = VecSet(&one,pcnn->D);CHKERRQ(ierr);
649         } else {
650           ierr = VecSet(&zero,pcnn->D);CHKERRQ(ierr);
651         }
652         ierr = VecScatterBegin(pcnn->D,counter,ADD_VALUES,SCATTER_REVERSE,pcnn->global_to_B);CHKERRQ(ierr);
653         ierr = VecScatterEnd  (pcnn->D,counter,ADD_VALUES,SCATTER_REVERSE,pcnn->global_to_B);CHKERRQ(ierr);
654         ierr = VecScatterBegin(counter,pcnn->D,INSERT_VALUES,SCATTER_FORWARD,pcnn->global_to_B);CHKERRQ(ierr);
655         ierr = VecScatterEnd  (counter,pcnn->D,INSERT_VALUES,SCATTER_FORWARD,pcnn->global_to_B);CHKERRQ(ierr);
656         ierr = VecDestroy(counter);CHKERRQ(ierr);
657         if (pcnn->pure_neumann) {
658           ierr = VecReciprocal(pcnn->D);CHKERRQ(ierr);
659         } else {
660           ierr = VecSet(&zero,pcnn->D);CHKERRQ(ierr);
661         }
662       }
663     }
664 
665 
666 
667 /* --------------------------------------------------------------------------
668    Historical note 02
669    -------------------------------------------------------------------------- */
670 /*
671    We tried an alternative coarse problem, that would eliminate exactly a
672    constant error. Turned out not to improve the overall convergence.
673 */
674 
675   /*  Set the variable pcnn->factor_coarse_rhs. */
676   {
677     PetscTruth flg;
678     ierr = PetscOptionsHasName(PETSC_NULL,"-enforce_preserving_constants",&flg);CHKERRQ(ierr);
679     if (!flg) { pcnn->factor_coarse_rhs = (pcnn->pure_neumann) ? 1.0 : 0.0; }
680     else {
681       PetscScalar zero = 0.0, one = 1.0;
682       ierr = VecSet(&one,pcnn->vec1_B);
683       ierr = ApplySchurComplement(pcnn,pcnn->vec1_B,pcnn->vec2_B,(Vec)0,pcnn->vec1_D,pcnn->vec2_D);CHKERRQ(ierr);
684       ierr = VecSet(&zero,pcnn->vec1_global);CHKERRQ(ierr);
685       ierr = VecScatterBegin(pcnn->vec2_B,pcnn->vec1_global,ADD_VALUES,SCATTER_REVERSE,pcnn->global_to_B);CHKERRQ(ierr);
686       ierr = VecScatterEnd  (pcnn->vec2_B,pcnn->vec1_global,ADD_VALUES,SCATTER_REVERSE,pcnn->global_to_B);CHKERRQ(ierr);
687       ierr = VecScatterBegin(pcnn->vec1_global,pcnn->vec1_B,INSERT_VALUES,SCATTER_FORWARD,pcnn->global_to_B);CHKERRQ(ierr);
688       ierr = VecScatterEnd  (pcnn->vec1_global,pcnn->vec1_B,INSERT_VALUES,SCATTER_FORWARD,pcnn->global_to_B);CHKERRQ(ierr);
689       if (pcnn->pure_neumann) { pcnn->factor_coarse_rhs = 1.0; }
690       else {
691         ierr = ScatterArrayNToVecB(pcnn->work_N,pcnn->vec1_B,INSERT_VALUES,SCATTER_REVERSE,pcnn);CHKERRQ(ierr);
692         for (k=0, pcnn->factor_coarse_rhs=0.0; k<pcnn->n_shared[0]; k++) {
693           pcnn->factor_coarse_rhs += pcnn->work_N[pcnn->shared[0][k]] * pcnn->DZ_IN[0][k];
694         }
695         if (pcnn->factor_coarse_rhs) { pcnn->factor_coarse_rhs = 1.0 / pcnn->factor_coarse_rhs; }
696         else { SETERRQ(1,"Constants cannot be preserved. Remove \"-enforce_preserving_constants\" option."); }
697       }
698     }
699   }
700 
701 #endif /* __HISTORICAL_NOTES___do_not_compile */
702