xref: /petsc/src/ksp/pc/impls/is/nn/nn.c (revision 8cc058d9cd56c1ccb3be12a47760ddfc446aaffc)
1 
2 #include <../src/ksp/pc/impls/is/nn/nn.h>
3 
4 /* -------------------------------------------------------------------------- */
5 /*
6    PCSetUp_NN - Prepares for the use of the NN preconditioner
7                     by setting data structures and options.
8 
9    Input Parameter:
10 .  pc - the preconditioner context
11 
12    Application Interface Routine: PCSetUp()
13 
14    Notes:
15    The interface routine PCSetUp() is not usually called directly by
16    the user, but instead is called by PCApply() if necessary.
17 */
18 #undef __FUNCT__
19 #define __FUNCT__ "PCSetUp_NN"
20 static PetscErrorCode PCSetUp_NN(PC pc)
21 {
22   PetscErrorCode ierr;
23 
24   PetscFunctionBegin;
25   if (!pc->setupcalled) {
26     /* Set up all the "iterative substructuring" common block */
27     ierr = PCISSetUp(pc);CHKERRQ(ierr);
28     /* Create the coarse matrix. */
29     ierr = PCNNCreateCoarseMatrix(pc);CHKERRQ(ierr);
30   }
31   PetscFunctionReturn(0);
32 }
33 
34 /* -------------------------------------------------------------------------- */
35 /*
36    PCApply_NN - Applies the NN preconditioner to a vector.
37 
38    Input Parameters:
39 .  pc - the preconditioner context
40 .  r - input vector (global)
41 
42    Output Parameter:
43 .  z - output vector (global)
44 
45    Application Interface Routine: PCApply()
46  */
47 #undef __FUNCT__
48 #define __FUNCT__ "PCApply_NN"
49 static PetscErrorCode PCApply_NN(PC pc,Vec r,Vec z)
50 {
51   PC_IS          *pcis = (PC_IS*)(pc->data);
52   PetscErrorCode ierr;
53   PetscScalar    m_one = -1.0;
54   Vec            w     = pcis->vec1_global;
55 
56   PetscFunctionBegin;
57   /*
58     Dirichlet solvers.
59     Solving $ B_I^{(i)}r_I^{(i)} $ at each processor.
60     Storing the local results at vec2_D
61   */
62   ierr = VecScatterBegin(pcis->global_to_D,r,pcis->vec1_D,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
63   ierr = VecScatterEnd  (pcis->global_to_D,r,pcis->vec1_D,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
64   ierr = KSPSolve(pcis->ksp_D,pcis->vec1_D,pcis->vec2_D);CHKERRQ(ierr);
65 
66   /*
67     Computing $ r_B - \sum_j \tilde R_j^T A_{BI}^{(j)} (B_I^{(j)}r_I^{(j)}) $ .
68     Storing the result in the interface portion of the global vector w.
69   */
70   ierr = MatMult(pcis->A_BI,pcis->vec2_D,pcis->vec1_B);CHKERRQ(ierr);
71   ierr = VecScale(pcis->vec1_B,m_one);CHKERRQ(ierr);
72   ierr = VecCopy(r,w);CHKERRQ(ierr);
73   ierr = VecScatterBegin(pcis->global_to_B,pcis->vec1_B,w,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
74   ierr = VecScatterEnd  (pcis->global_to_B,pcis->vec1_B,w,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
75 
76   /*
77     Apply the interface preconditioner
78   */
79   ierr = PCNNApplyInterfacePreconditioner(pc,w,z,pcis->work_N,pcis->vec1_B,pcis->vec2_B,pcis->vec3_B,pcis->vec1_D,
80                                           pcis->vec3_D,pcis->vec1_N,pcis->vec2_N);CHKERRQ(ierr);
81 
82   /*
83     Computing $ t_I^{(i)} = A_{IB}^{(i)} \tilde R_i z_B $
84     The result is stored in vec1_D.
85   */
86   ierr = VecScatterBegin(pcis->global_to_B,z,pcis->vec1_B,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
87   ierr = VecScatterEnd  (pcis->global_to_B,z,pcis->vec1_B,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
88   ierr = MatMult(pcis->A_IB,pcis->vec1_B,pcis->vec1_D);CHKERRQ(ierr);
89 
90   /*
91     Dirichlet solvers.
92     Computing $ B_I^{(i)}t_I^{(i)} $ and sticking into the global vector the blocks
93     $ B_I^{(i)}r_I^{(i)} - B_I^{(i)}t_I^{(i)} $.
94   */
95   ierr = VecScatterBegin(pcis->global_to_D,pcis->vec2_D,z,INSERT_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
96   ierr = VecScatterEnd  (pcis->global_to_D,pcis->vec2_D,z,INSERT_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
97   ierr = KSPSolve(pcis->ksp_D,pcis->vec1_D,pcis->vec2_D);CHKERRQ(ierr);
98   ierr = VecScale(pcis->vec2_D,m_one);CHKERRQ(ierr);
99   ierr = VecScatterBegin(pcis->global_to_D,pcis->vec2_D,z,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
100   ierr = VecScatterEnd  (pcis->global_to_D,pcis->vec2_D,z,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
101   PetscFunctionReturn(0);
102 }
103 
104 /* -------------------------------------------------------------------------- */
105 /*
106    PCDestroy_NN - Destroys the private context for the NN preconditioner
107    that was created with PCCreate_NN().
108 
109    Input Parameter:
110 .  pc - the preconditioner context
111 
112    Application Interface Routine: PCDestroy()
113 */
114 #undef __FUNCT__
115 #define __FUNCT__ "PCDestroy_NN"
116 static PetscErrorCode PCDestroy_NN(PC pc)
117 {
118   PC_NN          *pcnn = (PC_NN*)pc->data;
119   PetscErrorCode ierr;
120 
121   PetscFunctionBegin;
122   ierr = PCISDestroy(pc);CHKERRQ(ierr);
123 
124   ierr = MatDestroy(&pcnn->coarse_mat);CHKERRQ(ierr);
125   ierr = VecDestroy(&pcnn->coarse_x);CHKERRQ(ierr);
126   ierr = VecDestroy(&pcnn->coarse_b);CHKERRQ(ierr);
127   ierr = KSPDestroy(&pcnn->ksp_coarse);CHKERRQ(ierr);
128   if (pcnn->DZ_IN) {
129     ierr = PetscFree(pcnn->DZ_IN[0]);CHKERRQ(ierr);
130     ierr = PetscFree(pcnn->DZ_IN);CHKERRQ(ierr);
131   }
132 
133   /*
134       Free the private data structure that was hanging off the PC
135   */
136   ierr = PetscFree(pc->data);CHKERRQ(ierr);
137   PetscFunctionReturn(0);
138 }
139 
140 /* -------------------------------------------------------------------------- */
141 /*MC
142    PCNN - Balancing Neumann-Neumann for scalar elliptic PDEs.
143 
144    Options Database Keys:
145 +    -pc_nn_turn_off_first_balancing - do not balance the residual before solving the local Neumann problems
146                                        (this skips the first coarse grid solve in the preconditioner)
147 .    -pc_nn_turn_off_second_balancing - do not balance the solution solving the local Neumann problems
148                                        (this skips the second coarse grid solve in the preconditioner)
149 .    -pc_is_damp_fixed <fact> -
150 .    -pc_is_remove_nullspace_fixed -
151 .    -pc_is_set_damping_factor_floating <fact> -
152 .    -pc_is_not_damp_floating -
153 +    -pc_is_not_remove_nullspace_floating -
154 
155    Level: intermediate
156 
157    Notes: The matrix used with this preconditioner must be of type MATIS
158 
159           Unlike more 'conventional' Neumann-Neumann preconditioners this iterates over ALL the
160           degrees of freedom, NOT just those on the interface (this allows the use of approximate solvers
161           on the subdomains; though in our experience using approximate solvers is slower.).
162 
163           Options for the coarse grid preconditioner can be set with -nn_coarse_pc_xxx
164           Options for the Dirichlet subproblem preconditioner can be set with -is_localD_pc_xxx
165           Options for the Neumann subproblem preconditioner can be set with -is_localN_pc_xxx
166 
167    Contributed by Paulo Goldfeld
168 
169 .seealso:  PCCreate(), PCSetType(), PCType (for list of available types), PC,  MATIS
170 M*/
171 
172 #undef __FUNCT__
173 #define __FUNCT__ "PCCreate_NN"
174 PETSC_EXTERN PetscErrorCode PCCreate_NN(PC pc)
175 {
176   PetscErrorCode ierr;
177   PC_NN          *pcnn;
178 
179   PetscFunctionBegin;
180   /*
181      Creates the private data structure for this preconditioner and
182      attach it to the PC object.
183   */
184   ierr     = PetscNewLog(pc,PC_NN,&pcnn);CHKERRQ(ierr);
185   pc->data = (void*)pcnn;
186 
187   ierr             = PCISCreate(pc);CHKERRQ(ierr);
188   pcnn->coarse_mat = 0;
189   pcnn->coarse_x   = 0;
190   pcnn->coarse_b   = 0;
191   pcnn->ksp_coarse = 0;
192   pcnn->DZ_IN      = 0;
193 
194   /*
195       Set the pointers for the functions that are provided above.
196       Now when the user-level routines (such as PCApply(), PCDestroy(), etc.)
197       are called, they will automatically call these functions.  Note we
198       choose not to provide a couple of these functions since they are
199       not needed.
200   */
201   pc->ops->apply               = PCApply_NN;
202   pc->ops->applytranspose      = 0;
203   pc->ops->setup               = PCSetUp_NN;
204   pc->ops->destroy             = PCDestroy_NN;
205   pc->ops->view                = 0;
206   pc->ops->applyrichardson     = 0;
207   pc->ops->applysymmetricleft  = 0;
208   pc->ops->applysymmetricright = 0;
209   PetscFunctionReturn(0);
210 }
211 
212 /* -------------------------------------------------------------------------- */
213 /*
214    PCNNCreateCoarseMatrix -
215 */
216 #undef __FUNCT__
217 #define __FUNCT__ "PCNNCreateCoarseMatrix"
218 PetscErrorCode PCNNCreateCoarseMatrix(PC pc)
219 {
220   MPI_Request    *send_request, *recv_request;
221   PetscErrorCode ierr;
222   PetscInt       i, j, k;
223   PetscScalar    *mat;     /* Sub-matrix with this subdomain's contribution to the coarse matrix             */
224   PetscScalar    **DZ_OUT; /* proc[k].DZ_OUT[i][] = bit of vector to be sent from processor k to processor i */
225 
226   /* aliasing some names */
227   PC_IS       *pcis     = (PC_IS*)(pc->data);
228   PC_NN       *pcnn     = (PC_NN*)pc->data;
229   PetscInt    n_neigh   = pcis->n_neigh;
230   PetscInt    *neigh    = pcis->neigh;
231   PetscInt    *n_shared = pcis->n_shared;
232   PetscInt    **shared  = pcis->shared;
233   PetscScalar **DZ_IN;   /* Must be initialized after memory allocation. */
234 
235   PetscFunctionBegin;
236   /* Allocate memory for mat (the +1 is to handle the case n_neigh equal to zero) */
237   ierr = PetscMalloc((n_neigh*n_neigh+1)*sizeof(PetscScalar),&mat);CHKERRQ(ierr);
238 
239   /* Allocate memory for DZ */
240   /* Notice that DZ_OUT[0] is allocated some space that is never used. */
241   /* This is just in order to DZ_OUT and DZ_IN to have exactly the same form. */
242   {
243     PetscInt size_of_Z = 0;
244     ierr  = PetscMalloc ((n_neigh+1)*sizeof(PetscScalar*),&pcnn->DZ_IN);CHKERRQ(ierr);
245     DZ_IN = pcnn->DZ_IN;
246     ierr  = PetscMalloc ((n_neigh+1)*sizeof(PetscScalar*),&DZ_OUT);CHKERRQ(ierr);
247     for (i=0; i<n_neigh; i++) size_of_Z += n_shared[i];
248     ierr = PetscMalloc ((size_of_Z+1)*sizeof(PetscScalar),&DZ_IN[0]);CHKERRQ(ierr);
249     ierr = PetscMalloc ((size_of_Z+1)*sizeof(PetscScalar),&DZ_OUT[0]);CHKERRQ(ierr);
250   }
251   for (i=1; i<n_neigh; i++) {
252     DZ_IN[i]  = DZ_IN [i-1] + n_shared[i-1];
253     DZ_OUT[i] = DZ_OUT[i-1] + n_shared[i-1];
254   }
255 
256   /* Set the values of DZ_OUT, in order to send this info to the neighbours */
257   /* First, set the auxiliary array pcis->work_N. */
258   ierr = PCISScatterArrayNToVecB(pcis->work_N,pcis->D,INSERT_VALUES,SCATTER_REVERSE,pc);CHKERRQ(ierr);
259   for (i=1; i<n_neigh; i++) {
260     for (j=0; j<n_shared[i]; j++) {
261       DZ_OUT[i][j] = pcis->work_N[shared[i][j]];
262     }
263   }
264 
265   /* Non-blocking send/receive the common-interface chunks of scaled nullspaces */
266   /* Notice that send_request[] and recv_request[] could have one less element. */
267   /* We make them longer to have request[i] corresponding to neigh[i].          */
268   {
269     PetscMPIInt tag;
270     ierr         = PetscObjectGetNewTag((PetscObject)pc,&tag);CHKERRQ(ierr);
271     ierr         = PetscMalloc((2*(n_neigh)+1)*sizeof(MPI_Request),&send_request);CHKERRQ(ierr);
272     recv_request = send_request + (n_neigh);
273     for (i=1; i<n_neigh; i++) {
274       ierr = MPI_Isend((void*)(DZ_OUT[i]),n_shared[i],MPIU_SCALAR,neigh[i],tag,PetscObjectComm((PetscObject)pc),&(send_request[i]));CHKERRQ(ierr);
275       ierr = MPI_Irecv((void*)(DZ_IN [i]),n_shared[i],MPIU_SCALAR,neigh[i],tag,PetscObjectComm((PetscObject)pc),&(recv_request[i]));CHKERRQ(ierr);
276     }
277   }
278 
279   /* Set DZ_IN[0][] (recall that neigh[0]==rank, always) */
280   for (j=0; j<n_shared[0]; j++) DZ_IN[0][j] = pcis->work_N[shared[0][j]];
281 
282   /* Start computing with local D*Z while communication goes on.    */
283   /* Apply Schur complement. The result is "stored" in vec (more    */
284   /* precisely, vec points to the result, stored in pc_nn->vec1_B)  */
285   /* and also scattered to pcnn->work_N.                            */
286   ierr = PCNNApplySchurToChunk(pc,n_shared[0],shared[0],DZ_IN[0],pcis->work_N,pcis->vec1_B,
287                                pcis->vec2_B,pcis->vec1_D,pcis->vec2_D);CHKERRQ(ierr);
288 
289   /* Compute the first column, while completing the receiving. */
290   for (i=0; i<n_neigh; i++) {
291     MPI_Status  stat;
292     PetscMPIInt ind=0;
293     if (i>0) { ierr = MPI_Waitany(n_neigh-1,recv_request+1,&ind,&stat);CHKERRQ(ierr); ind++;}
294     mat[ind*n_neigh+0] = 0.0;
295     for (k=0; k<n_shared[ind]; k++) mat[ind*n_neigh+0] += DZ_IN[ind][k] * pcis->work_N[shared[ind][k]];
296   }
297 
298   /* Compute the remaining of the columns */
299   for (j=1; j<n_neigh; j++) {
300     ierr = PCNNApplySchurToChunk(pc,n_shared[j],shared[j],DZ_IN[j],pcis->work_N,pcis->vec1_B,
301                                  pcis->vec2_B,pcis->vec1_D,pcis->vec2_D);CHKERRQ(ierr);
302     for (i=0; i<n_neigh; i++) {
303       mat[i*n_neigh+j] = 0.0;
304       for (k=0; k<n_shared[i]; k++) mat[i*n_neigh+j] += DZ_IN[i][k] * pcis->work_N[shared[i][k]];
305     }
306   }
307 
308   /* Complete the sending. */
309   if (n_neigh>1) {
310     MPI_Status *stat;
311     ierr = PetscMalloc((n_neigh-1)*sizeof(MPI_Status),&stat);CHKERRQ(ierr);
312     if (n_neigh-1) {ierr = MPI_Waitall(n_neigh-1,&(send_request[1]),stat);CHKERRQ(ierr);}
313     ierr = PetscFree(stat);CHKERRQ(ierr);
314   }
315 
316   /* Free the memory for the MPI requests */
317   ierr = PetscFree(send_request);CHKERRQ(ierr);
318 
319   /* Free the memory for DZ_OUT */
320   if (DZ_OUT) {
321     ierr = PetscFree(DZ_OUT[0]);CHKERRQ(ierr);
322     ierr = PetscFree(DZ_OUT);CHKERRQ(ierr);
323   }
324 
325   {
326     PetscMPIInt size;
327     ierr = MPI_Comm_size(PetscObjectComm((PetscObject)pc),&size);CHKERRQ(ierr);
328     /* Create the global coarse vectors (rhs and solution). */
329     ierr = VecCreateMPI(PetscObjectComm((PetscObject)pc),1,size,&(pcnn->coarse_b));CHKERRQ(ierr);
330     ierr = VecDuplicate(pcnn->coarse_b,&(pcnn->coarse_x));CHKERRQ(ierr);
331     /* Create and set the global coarse AIJ matrix. */
332     ierr = MatCreate(PetscObjectComm((PetscObject)pc),&(pcnn->coarse_mat));CHKERRQ(ierr);
333     ierr = MatSetSizes(pcnn->coarse_mat,1,1,size,size);CHKERRQ(ierr);
334     ierr = MatSetType(pcnn->coarse_mat,MATAIJ);CHKERRQ(ierr);
335     ierr = MatSeqAIJSetPreallocation(pcnn->coarse_mat,1,NULL);CHKERRQ(ierr);
336     ierr = MatMPIAIJSetPreallocation(pcnn->coarse_mat,1,NULL,1,NULL);CHKERRQ(ierr);
337     ierr = MatSetValues(pcnn->coarse_mat,n_neigh,neigh,n_neigh,neigh,mat,ADD_VALUES);CHKERRQ(ierr);
338     ierr = MatAssemblyBegin(pcnn->coarse_mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
339     ierr = MatAssemblyEnd  (pcnn->coarse_mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
340   }
341 
342   {
343     PetscMPIInt rank;
344     PetscScalar one = 1.0;
345     ierr = MPI_Comm_rank(PetscObjectComm((PetscObject)pc),&rank);CHKERRQ(ierr);
346     /* "Zero out" rows of not-purely-Neumann subdomains */
347     if (pcis->pure_neumann) {  /* does NOT zero the row; create an empty index set. The reason is that MatZeroRows() is collective. */
348       ierr = MatZeroRows(pcnn->coarse_mat,0,NULL,one,0,0);CHKERRQ(ierr);
349     } else { /* here it DOES zero the row, since it's not a floating subdomain. */
350       PetscInt row = (PetscInt) rank;
351       ierr = MatZeroRows(pcnn->coarse_mat,1,&row,one,0,0);CHKERRQ(ierr);
352     }
353   }
354 
355   /* Create the coarse linear solver context */
356   {
357     PC  pc_ctx, inner_pc;
358     KSP inner_ksp;
359 
360     ierr = KSPCreate(PetscObjectComm((PetscObject)pc),&pcnn->ksp_coarse);CHKERRQ(ierr);
361     ierr = PetscObjectIncrementTabLevel((PetscObject)pcnn->ksp_coarse,(PetscObject)pc,2);CHKERRQ(ierr);
362     ierr = KSPSetOperators(pcnn->ksp_coarse,pcnn->coarse_mat,pcnn->coarse_mat,SAME_PRECONDITIONER);CHKERRQ(ierr);
363     ierr = KSPGetPC(pcnn->ksp_coarse,&pc_ctx);CHKERRQ(ierr);
364     ierr = PCSetType(pc_ctx,PCREDUNDANT);CHKERRQ(ierr);
365     ierr = KSPSetType(pcnn->ksp_coarse,KSPPREONLY);CHKERRQ(ierr);
366     ierr = PCRedundantGetKSP(pc_ctx,&inner_ksp);CHKERRQ(ierr);
367     ierr = KSPGetPC(inner_ksp,&inner_pc);CHKERRQ(ierr);
368     ierr = PCSetType(inner_pc,PCLU);CHKERRQ(ierr);
369     ierr = KSPSetOptionsPrefix(pcnn->ksp_coarse,"nn_coarse_");CHKERRQ(ierr);
370     ierr = KSPSetFromOptions(pcnn->ksp_coarse);CHKERRQ(ierr);
371     /* the vectors in the following line are dummy arguments, just telling the KSP the vector size. Values are not used */
372     ierr = KSPSetUp(pcnn->ksp_coarse);CHKERRQ(ierr);
373   }
374 
375   /* Free the memory for mat */
376   ierr = PetscFree(mat);CHKERRQ(ierr);
377 
378   /* for DEBUGGING, save the coarse matrix to a file. */
379   {
380     PetscBool flg = PETSC_FALSE;
381     ierr = PetscOptionsGetBool(NULL,"-pc_nn_save_coarse_matrix",&flg,NULL);CHKERRQ(ierr);
382     if (flg) {
383       PetscViewer viewer;
384       ierr = PetscViewerASCIIOpen(PETSC_COMM_WORLD,"coarse.m",&viewer);CHKERRQ(ierr);
385       ierr = PetscViewerSetFormat(viewer,PETSC_VIEWER_ASCII_MATLAB);CHKERRQ(ierr);
386       ierr = MatView(pcnn->coarse_mat,viewer);CHKERRQ(ierr);
387       ierr = PetscViewerDestroy(&viewer);CHKERRQ(ierr);
388     }
389   }
390 
391   /*  Set the variable pcnn->factor_coarse_rhs. */
392   pcnn->factor_coarse_rhs = (pcis->pure_neumann) ? 1.0 : 0.0;
393 
394   /* See historical note 02, at the bottom of this file. */
395   PetscFunctionReturn(0);
396 }
397 
398 /* -------------------------------------------------------------------------- */
399 /*
400    PCNNApplySchurToChunk -
401 
402    Input parameters:
403 .  pcnn
404 .  n - size of chunk
405 .  idx - indices of chunk
406 .  chunk - values
407 
408    Output parameters:
409 .  array_N - result of Schur complement applied to chunk, scattered to big array
410 .  vec1_B  - result of Schur complement applied to chunk
411 .  vec2_B  - garbage (used as work space)
412 .  vec1_D  - garbage (used as work space)
413 .  vec2_D  - garbage (used as work space)
414 
415 */
416 #undef __FUNCT__
417 #define __FUNCT__ "PCNNApplySchurToChunk"
418 PetscErrorCode PCNNApplySchurToChunk(PC pc, PetscInt n, PetscInt *idx, PetscScalar *chunk, PetscScalar *array_N, Vec vec1_B, Vec vec2_B, Vec vec1_D, Vec vec2_D)
419 {
420   PetscErrorCode ierr;
421   PetscInt       i;
422   PC_IS          *pcis = (PC_IS*)(pc->data);
423 
424   PetscFunctionBegin;
425   ierr = PetscMemzero((void*)array_N, pcis->n*sizeof(PetscScalar));CHKERRQ(ierr);
426   for (i=0; i<n; i++) array_N[idx[i]] = chunk[i];
427   ierr = PCISScatterArrayNToVecB(array_N,vec2_B,INSERT_VALUES,SCATTER_FORWARD,pc);CHKERRQ(ierr);
428   ierr = PCISApplySchur(pc,vec2_B,vec1_B,(Vec)0,vec1_D,vec2_D);CHKERRQ(ierr);
429   ierr = PCISScatterArrayNToVecB(array_N,vec1_B,INSERT_VALUES,SCATTER_REVERSE,pc);CHKERRQ(ierr);
430   PetscFunctionReturn(0);
431 }
432 
433 /* -------------------------------------------------------------------------- */
434 /*
435    PCNNApplyInterfacePreconditioner - Apply the interface preconditioner, i.e.,
436                                       the preconditioner for the Schur complement.
437 
438    Input parameter:
439 .  r - global vector of interior and interface nodes. The values on the interior nodes are NOT used.
440 
441    Output parameters:
442 .  z - global vector of interior and interface nodes. The values on the interface are the result of
443        the application of the interface preconditioner to the interface part of r. The values on the
444        interior nodes are garbage.
445 .  work_N - array of local nodes (interior and interface, including ghosts); returns garbage (used as work space)
446 .  vec1_B - vector of local interface nodes (including ghosts); returns garbage (used as work space)
447 .  vec2_B - vector of local interface nodes (including ghosts); returns garbage (used as work space)
448 .  vec3_B - vector of local interface nodes (including ghosts); returns garbage (used as work space)
449 .  vec1_D - vector of local interior nodes; returns garbage (used as work space)
450 .  vec2_D - vector of local interior nodes; returns garbage (used as work space)
451 .  vec1_N - vector of local nodes (interior and interface, including ghosts); returns garbage (used as work space)
452 .  vec2_N - vector of local nodes (interior and interface, including ghosts); returns garbage (used as work space)
453 
454 */
455 #undef __FUNCT__
456 #define __FUNCT__ "PCNNApplyInterfacePreconditioner"
457 PetscErrorCode PCNNApplyInterfacePreconditioner(PC pc, Vec r, Vec z, PetscScalar *work_N, Vec vec1_B, Vec vec2_B, Vec vec3_B, Vec vec1_D,Vec vec2_D, Vec vec1_N, Vec vec2_N)
458 {
459   PetscErrorCode ierr;
460   PC_IS          *pcis = (PC_IS*)(pc->data);
461 
462   PetscFunctionBegin;
463   /*
464     First balancing step.
465   */
466   {
467     PetscBool flg = PETSC_FALSE;
468     ierr = PetscOptionsGetBool(NULL,"-pc_nn_turn_off_first_balancing",&flg,NULL);CHKERRQ(ierr);
469     if (!flg) {
470       ierr = PCNNBalancing(pc,r,(Vec)0,z,vec1_B,vec2_B,(Vec)0,vec1_D,vec2_D,work_N);CHKERRQ(ierr);
471     } else {
472       ierr = VecCopy(r,z);CHKERRQ(ierr);
473     }
474   }
475 
476   /*
477     Extract the local interface part of z and scale it by D
478   */
479   ierr = VecScatterBegin(pcis->global_to_B,z,vec1_B,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
480   ierr = VecScatterEnd  (pcis->global_to_B,z,vec1_B,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
481   ierr = VecPointwiseMult(vec2_B,pcis->D,vec1_B);CHKERRQ(ierr);
482 
483   /* Neumann Solver */
484   ierr = PCISApplyInvSchur(pc,vec2_B,vec1_B,vec1_N,vec2_N);CHKERRQ(ierr);
485 
486   /*
487     Second balancing step.
488   */
489   {
490     PetscBool flg = PETSC_FALSE;
491     ierr = PetscOptionsGetBool(NULL,"-pc_turn_off_second_balancing",&flg,NULL);CHKERRQ(ierr);
492     if (!flg) {
493       ierr = PCNNBalancing(pc,r,vec1_B,z,vec2_B,vec3_B,(Vec)0,vec1_D,vec2_D,work_N);CHKERRQ(ierr);
494     } else {
495       ierr = VecPointwiseMult(vec2_B,pcis->D,vec1_B);CHKERRQ(ierr);
496       ierr = VecSet(z,0.0);CHKERRQ(ierr);
497       ierr = VecScatterBegin(pcis->global_to_B,vec2_B,z,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
498       ierr = VecScatterEnd  (pcis->global_to_B,vec2_B,z,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
499     }
500   }
501   PetscFunctionReturn(0);
502 }
503 
504 /* -------------------------------------------------------------------------- */
505 /*
506    PCNNBalancing - Computes z, as given in equations (15) and (16) (if the
507                    input argument u is provided), or s, as given in equations
508                    (12) and (13), if the input argument u is a null vector.
509                    Notice that the input argument u plays the role of u_i in
510                    equation (14). The equation numbers refer to [Man93].
511 
512    Input Parameters:
513 .  pcnn - NN preconditioner context.
514 .  r - MPI vector of all nodes (interior and interface). It's preserved.
515 .  u - (Optional) sequential vector of local interface nodes. It's preserved UNLESS vec3_B is null.
516 
517    Output Parameters:
518 .  z - MPI vector of interior and interface nodes. Returns s or z (see description above).
519 .  vec1_B - Sequential vector of local interface nodes. Workspace.
520 .  vec2_B - Sequential vector of local interface nodes. Workspace.
521 .  vec3_B - (Optional) sequential vector of local interface nodes. Workspace.
522 .  vec1_D - Sequential vector of local interior nodes. Workspace.
523 .  vec2_D - Sequential vector of local interior nodes. Workspace.
524 .  work_N - Array of all local nodes (interior and interface). Workspace.
525 
526 */
527 #undef __FUNCT__
528 #define __FUNCT__ "PCNNBalancing"
529 PetscErrorCode PCNNBalancing(PC pc, Vec r, Vec u, Vec z, Vec vec1_B, Vec vec2_B, Vec vec3_B,Vec vec1_D, Vec vec2_D, PetscScalar *work_N)
530 {
531   PetscErrorCode ierr;
532   PetscInt       k;
533   PetscScalar    value;
534   PetscScalar    *lambda;
535   PC_NN          *pcnn = (PC_NN*)(pc->data);
536   PC_IS          *pcis = (PC_IS*)(pc->data);
537 
538   PetscFunctionBegin;
539   ierr = PetscLogEventBegin(PC_ApplyCoarse,0,0,0,0);CHKERRQ(ierr);
540   if (u) {
541     if (!vec3_B) vec3_B = u;
542     ierr = VecPointwiseMult(vec1_B,pcis->D,u);CHKERRQ(ierr);
543     ierr = VecSet(z,0.0);CHKERRQ(ierr);
544     ierr = VecScatterBegin(pcis->global_to_B,vec1_B,z,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
545     ierr = VecScatterEnd  (pcis->global_to_B,vec1_B,z,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
546     ierr = VecScatterBegin(pcis->global_to_B,z,vec2_B,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
547     ierr = VecScatterEnd  (pcis->global_to_B,z,vec2_B,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
548     ierr = PCISApplySchur(pc,vec2_B,vec3_B,(Vec)0,vec1_D,vec2_D);CHKERRQ(ierr);
549     ierr = VecScale(vec3_B,-1.0);CHKERRQ(ierr);
550     ierr = VecCopy(r,z);CHKERRQ(ierr);
551     ierr = VecScatterBegin(pcis->global_to_B,vec3_B,z,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
552     ierr = VecScatterEnd  (pcis->global_to_B,vec3_B,z,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
553   } else {
554     ierr = VecCopy(r,z);CHKERRQ(ierr);
555   }
556   ierr = VecScatterBegin(pcis->global_to_B,z,vec2_B,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
557   ierr = VecScatterEnd  (pcis->global_to_B,z,vec2_B,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
558   ierr = PCISScatterArrayNToVecB(work_N,vec2_B,INSERT_VALUES,SCATTER_REVERSE,pc);CHKERRQ(ierr);
559   for (k=0, value=0.0; k<pcis->n_shared[0]; k++) value += pcnn->DZ_IN[0][k] * work_N[pcis->shared[0][k]];
560   value *= pcnn->factor_coarse_rhs;  /* This factor is set in CreateCoarseMatrix(). */
561   {
562     PetscMPIInt rank;
563     ierr = MPI_Comm_rank(PetscObjectComm((PetscObject)pc),&rank);CHKERRQ(ierr);
564     ierr = VecSetValue(pcnn->coarse_b,rank,value,INSERT_VALUES);CHKERRQ(ierr);
565     /*
566        Since we are only inserting local values (one value actually) we don't need to do the
567        reduction that tells us there is no data that needs to be moved. Hence we comment out these
568        ierr = VecAssemblyBegin(pcnn->coarse_b);CHKERRQ(ierr);
569        ierr = VecAssemblyEnd  (pcnn->coarse_b);CHKERRQ(ierr);
570     */
571   }
572   ierr = KSPSolve(pcnn->ksp_coarse,pcnn->coarse_b,pcnn->coarse_x);CHKERRQ(ierr);
573   if (!u) { ierr = VecScale(pcnn->coarse_x,-1.0);CHKERRQ(ierr); }
574   ierr = VecGetArray(pcnn->coarse_x,&lambda);CHKERRQ(ierr);
575   for (k=0; k<pcis->n_shared[0]; k++) work_N[pcis->shared[0][k]] = *lambda * pcnn->DZ_IN[0][k];
576   ierr = VecRestoreArray(pcnn->coarse_x,&lambda);CHKERRQ(ierr);
577   ierr = PCISScatterArrayNToVecB(work_N,vec2_B,INSERT_VALUES,SCATTER_FORWARD,pc);CHKERRQ(ierr);
578   ierr = VecSet(z,0.0);CHKERRQ(ierr);
579   ierr = VecScatterBegin(pcis->global_to_B,vec2_B,z,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
580   ierr = VecScatterEnd  (pcis->global_to_B,vec2_B,z,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
581   if (!u) {
582     ierr = VecScatterBegin(pcis->global_to_B,z,vec2_B,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
583     ierr = VecScatterEnd  (pcis->global_to_B,z,vec2_B,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
584     ierr = PCISApplySchur(pc,vec2_B,vec1_B,(Vec)0,vec1_D,vec2_D);CHKERRQ(ierr);
585     ierr = VecCopy(r,z);CHKERRQ(ierr);
586   }
587   ierr = VecScatterBegin(pcis->global_to_B,vec1_B,z,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
588   ierr = VecScatterEnd  (pcis->global_to_B,vec1_B,z,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
589   ierr = PetscLogEventEnd(PC_ApplyCoarse,0,0,0,0);CHKERRQ(ierr);
590   PetscFunctionReturn(0);
591 }
592 
593 #undef __FUNCT__
594 
595 
596 
597 /*  -------   E N D   O F   T H E   C O D E   -------  */
598 /*                                                     */
599 /*  From now on, "footnotes" (or "historical notes").  */
600 /*                                                     */
601 /*  -------------------------------------------------  */
602 
603 
604 
605 /* --------------------------------------------------------------------------
606    Historical note 01
607    -------------------------------------------------------------------------- */
608 /*
609    We considered the possibility of an alternative D_i that would still
610    provide a partition of unity (i.e., $ \sum_i  N_i D_i N_i^T = I $).
611    The basic principle was still the pseudo-inverse of the counting
612    function; the difference was that we would not count subdomains
613    that do not contribute to the coarse space (i.e., not pure-Neumann
614    subdomains).
615 
616    This turned out to be a bad idea:  we would solve trivial Neumann
617    problems in the not pure-Neumann subdomains, since we would be scaling
618    the balanced residual by zero.
619 */
620 
621 
622 
623 
624 /* --------------------------------------------------------------------------
625    Historical note 02
626    -------------------------------------------------------------------------- */
627 /*
628    We tried an alternative coarse problem, that would eliminate exactly a
629    constant error. Turned out not to improve the overall convergence.
630 */
631 
632 
633