xref: /petsc/src/ts/tutorials/autodiff/adr_ex5adj.cxx (revision 40badf4fbc550ac1f60bd080eaff6de6d55b946d)
1 static char help[] = "Demonstrates adjoint sensitivity analysis for Reaction-Diffusion Equations.\n";
2 
3 /*
4    Concepts: TS^time-dependent nonlinear problems
5    Concepts: Automatic differentiation using ADOL-C
6 */
7 /*
8    REQUIRES configuration of PETSc with option --download-adolc.
9 
10    For documentation on ADOL-C, see
11      $PETSC_ARCH/externalpackages/ADOL-C-2.6.0/ADOL-C/doc/adolc-manual.pdf
12 
13    REQUIRES configuration of PETSc with option --download-colpack
14 
15    For documentation on ColPack, see
16      $PETSC_ARCH/externalpackages/git.colpack/README.md
17 */
18 /* ------------------------------------------------------------------------
19   See ../advection-diffusion-reaction/ex5 for a description of the problem
20   ------------------------------------------------------------------------- */
21 
22 /*
23   Runtime options:
24 
25     Solver:
26       -forwardonly       - Run the forward simulation without adjoint.
27       -implicitform      - Provide IFunction and IJacobian to TS, if not set, RHSFunction and RHSJacobian will be used.
28       -aijpc             - Set the preconditioner matrix to be aij (the Jacobian matrix can be of a different type such as ELL).
29 
30     Jacobian generation:
31       -jacobian_by_hand  - Use the hand-coded Jacobian of ex5.c, rather than generating it automatically.
32       -no_annotation     - Do not annotate ADOL-C active variables. (Should be used alongside -jacobian_by_hand.)
33       -adolc_sparse      - Calculate Jacobian in compressed format, using ADOL-C sparse functionality.
34       -adolc_sparse_view - Print sparsity pattern used by -adolc_sparse option.
35  */
36 /*
37   NOTE: If -adolc_sparse option is used, at least four processors should be used, so that no processor owns boundaries which are
38         identified by the periodic boundary conditions. The number of grid points in both x- and y-directions should be multiples
39         of 5, in order for the 5-point stencil to be cleanly parallelised.
40 */
41 
42 #include <petscdmda.h>
43 #include <petscts.h>
44 #include "adolc-utils/drivers.cxx"
45 #include <adolc/adolc.h>
46 
47 /* (Passive) field for the two variables */
48 typedef struct {
49   PetscScalar u,v;
50 } Field;
51 
52 /* Active field for the two variables */
53 typedef struct {
54   adouble u,v;
55 } AField;
56 
57 /* Application context */
58 typedef struct {
59   PetscReal D1,D2,gamma,kappa;
60   AField    **u_a,**f_a;
61   PetscBool aijpc;
62   AdolcCtx  *adctx; /* Automatic differentation support */
63 } AppCtx;
64 
65 extern PetscErrorCode InitialConditions(DM da,Vec U);
66 extern PetscErrorCode InitializeLambda(DM da,Vec lambda,PetscReal x,PetscReal y);
67 extern PetscErrorCode IFunctionLocalPassive(DMDALocalInfo *info,PetscReal t,Field**u,Field**udot,Field**f,void *ptr);
68 extern PetscErrorCode IFunctionActive(TS ts,PetscReal ftime,Vec U,Vec Udot,Vec F,void *ptr);
69 extern PetscErrorCode RHSFunctionActive(TS ts,PetscReal ftime,Vec U,Vec F,void *ptr);
70 extern PetscErrorCode RHSFunctionPassive(TS ts,PetscReal ftime,Vec U,Vec F,void *ptr);
71 extern PetscErrorCode IJacobianByHand(TS ts,PetscReal t,Vec U,Vec Udot,PetscReal a,Mat A,Mat B,void *ctx);
72 extern PetscErrorCode IJacobianAdolc(TS ts,PetscReal t,Vec U,Vec Udot,PetscReal a,Mat A,Mat B,void *ctx);
73 extern PetscErrorCode RHSJacobianByHand(TS ts,PetscReal t,Vec U,Mat A,Mat B,void *ctx);
74 extern PetscErrorCode RHSJacobianAdolc(TS ts,PetscReal t,Vec U,Mat A,Mat B,void *ctx);
75 
76 int main(int argc,char **argv)
77 {
78   TS             ts;
79   Vec            x,r,xdot;
80   PetscErrorCode ierr;
81   DM             da;
82   AppCtx         appctx;
83   AdolcCtx       *adctx;
84   Vec            lambda[1];
85   PetscBool      forwardonly=PETSC_FALSE,implicitform=PETSC_FALSE,byhand=PETSC_FALSE;
86   PetscInt       gxm,gym,i,dofs = 2,ctrl[3] = {0,0,0};
87   PetscScalar    **Seed = NULL,**Rec = NULL,*u_vec;
88   unsigned int   **JP = NULL;
89   ISColoring     iscoloring;
90 
91   ierr = PetscInitialize(&argc,&argv,NULL,help);if (ierr) return ierr;
92   CHKERRQ(PetscNew(&adctx));
93   CHKERRQ(PetscOptionsGetBool(NULL,NULL,"-forwardonly",&forwardonly,NULL));
94   CHKERRQ(PetscOptionsGetBool(NULL,NULL,"-implicitform",&implicitform,NULL));
95   appctx.aijpc = PETSC_FALSE,adctx->no_an = PETSC_FALSE,adctx->sparse = PETSC_FALSE,adctx->sparse_view = PETSC_FALSE;adctx->sparse_view_done = PETSC_FALSE;
96   CHKERRQ(PetscOptionsGetBool(NULL,NULL,"-aijpc",&appctx.aijpc,NULL));
97   CHKERRQ(PetscOptionsGetBool(NULL,NULL,"-no_annotation",&adctx->no_an,NULL));
98   CHKERRQ(PetscOptionsGetBool(NULL,NULL,"-jacobian_by_hand",&byhand,NULL));
99   CHKERRQ(PetscOptionsGetBool(NULL,NULL,"-adolc_sparse",&adctx->sparse,NULL));
100   CHKERRQ(PetscOptionsGetBool(NULL,NULL,"-adolc_sparse_view",&adctx->sparse_view,NULL));
101   appctx.D1    = 8.0e-5;
102   appctx.D2    = 4.0e-5;
103   appctx.gamma = .024;
104   appctx.kappa = .06;
105   appctx.adctx = adctx;
106 
107   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
108      Create distributed array (DMDA) to manage parallel grid and vectors
109   - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
110   CHKERRQ(DMDACreate2d(PETSC_COMM_WORLD,DM_BOUNDARY_PERIODIC,DM_BOUNDARY_PERIODIC,DMDA_STENCIL_STAR,65,65,PETSC_DECIDE,PETSC_DECIDE,2,1,NULL,NULL,&da));
111   CHKERRQ(DMSetFromOptions(da));
112   CHKERRQ(DMSetUp(da));
113   CHKERRQ(DMDASetFieldName(da,0,"u"));
114   CHKERRQ(DMDASetFieldName(da,1,"v"));
115 
116   /*  - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
117      Extract global vectors from DMDA; then duplicate for remaining
118      vectors that are the same types
119    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
120   CHKERRQ(DMCreateGlobalVector(da,&x));
121   CHKERRQ(VecDuplicate(x,&r));
122   CHKERRQ(VecDuplicate(x,&xdot));
123 
124   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
125      Create timestepping solver context
126      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
127   CHKERRQ(TSCreate(PETSC_COMM_WORLD,&ts));
128   CHKERRQ(TSSetType(ts,TSCN));
129   CHKERRQ(TSSetDM(ts,da));
130   CHKERRQ(TSSetProblemType(ts,TS_NONLINEAR));
131   if (!implicitform) {
132     CHKERRQ(TSSetRHSFunction(ts,NULL,RHSFunctionPassive,&appctx));
133   } else {
134     CHKERRQ(DMDATSSetIFunctionLocal(da,INSERT_VALUES,(DMDATSIFunctionLocal)IFunctionLocalPassive,&appctx));
135   }
136 
137   if (!adctx->no_an) {
138     CHKERRQ(DMDAGetGhostCorners(da,NULL,NULL,NULL,&gxm,&gym,NULL));
139     adctx->m = dofs*gxm*gym;adctx->n = dofs*gxm*gym; /* Number of dependent and independent variables */
140 
141     /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
142        Trace function(s) just once
143      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
144     if (!implicitform) {
145       CHKERRQ(RHSFunctionActive(ts,1.0,x,r,&appctx));
146     } else {
147       CHKERRQ(IFunctionActive(ts,1.0,x,xdot,r,&appctx));
148     }
149 
150     /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
151       In the case where ADOL-C generates the Jacobian in compressed format,
152       seed and recovery matrices are required. Since the sparsity structure
153       of the Jacobian does not change over the course of the time
154       integration, we can save computational effort by only generating
155       these objects once.
156        - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
157     if (adctx->sparse) {
158       /*
159          Generate sparsity pattern
160 
161          One way to do this is to use the Jacobian pattern driver `jac_pat`
162          provided by ColPack. Otherwise, it is the responsibility of the user
163          to obtain the sparsity pattern.
164       */
165       CHKERRQ(PetscMalloc1(adctx->n,&u_vec));
166       JP = (unsigned int **) malloc(adctx->m*sizeof(unsigned int*));
167       jac_pat(1,adctx->m,adctx->n,u_vec,JP,ctrl);
168       if (adctx->sparse_view) {
169         CHKERRQ(PrintSparsity(MPI_COMM_WORLD,adctx->m,JP));
170       }
171 
172       /*
173         Extract a column colouring
174 
175         For problems using DMDA, colourings can be extracted directly, as
176         follows. There are also ColPack drivers available. Otherwise, it is the
177         responsibility of the user to obtain a suitable colouring.
178       */
179       CHKERRQ(DMCreateColoring(da,IS_COLORING_LOCAL,&iscoloring));
180       CHKERRQ(ISColoringGetIS(iscoloring,PETSC_USE_POINTER,&adctx->p,NULL));
181 
182       /* Generate seed matrix to propagate through the forward mode of AD */
183       CHKERRQ(AdolcMalloc2(adctx->n,adctx->p,&Seed));
184       CHKERRQ(GenerateSeedMatrix(iscoloring,Seed));
185       CHKERRQ(ISColoringDestroy(&iscoloring));
186 
187       /*
188         Generate recovery matrix, which is used to recover the Jacobian from
189         compressed format */
190       CHKERRQ(AdolcMalloc2(adctx->m,adctx->p,&Rec));
191       CHKERRQ(GetRecoveryMatrix(Seed,JP,adctx->m,adctx->p,Rec));
192 
193       /* Store results and free workspace */
194       adctx->Rec = Rec;
195       for (i=0;i<adctx->m;i++)
196         free(JP[i]);
197       free(JP);
198       CHKERRQ(PetscFree(u_vec));
199 
200     } else {
201 
202       /*
203         In 'full' Jacobian mode, propagate an identity matrix through the
204         forward mode of AD.
205       */
206       adctx->p = adctx->n;
207       CHKERRQ(AdolcMalloc2(adctx->n,adctx->p,&Seed));
208       CHKERRQ(Identity(adctx->n,Seed));
209     }
210     adctx->Seed = Seed;
211   }
212 
213   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
214      Set Jacobian
215    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
216   if (!implicitform) {
217     if (!byhand) {
218       CHKERRQ(TSSetRHSJacobian(ts,NULL,NULL,RHSJacobianAdolc,&appctx));
219     } else {
220       CHKERRQ(TSSetRHSJacobian(ts,NULL,NULL,RHSJacobianByHand,&appctx));
221     }
222   } else {
223     if (appctx.aijpc) {
224       Mat A,B;
225 
226       CHKERRQ(DMSetMatType(da,MATSELL));
227       CHKERRQ(DMCreateMatrix(da,&A));
228       CHKERRQ(MatConvert(A,MATAIJ,MAT_INITIAL_MATRIX,&B));
229       /* FIXME do we need to change viewer to display matrix in natural ordering as DMCreateMatrix_DA does? */
230       if (!byhand) {
231         CHKERRQ(TSSetIJacobian(ts,A,B,IJacobianAdolc,&appctx));
232       } else {
233         CHKERRQ(TSSetIJacobian(ts,A,B,IJacobianByHand,&appctx));
234       }
235       CHKERRQ(MatDestroy(&A));
236       CHKERRQ(MatDestroy(&B));
237     } else {
238       if (!byhand) {
239         CHKERRQ(TSSetIJacobian(ts,NULL,NULL,IJacobianAdolc,&appctx));
240       } else {
241         CHKERRQ(TSSetIJacobian(ts,NULL,NULL,IJacobianByHand,&appctx));
242       }
243     }
244   }
245 
246   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
247      Set initial conditions
248    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
249   CHKERRQ(InitialConditions(da,x));
250   CHKERRQ(TSSetSolution(ts,x));
251 
252   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
253     Have the TS save its trajectory so that TSAdjointSolve() may be used
254     and set solver options
255    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
256   if (!forwardonly) {
257     CHKERRQ(TSSetSaveTrajectory(ts));
258     CHKERRQ(TSSetMaxTime(ts,200.0));
259     CHKERRQ(TSSetTimeStep(ts,0.5));
260   } else {
261     CHKERRQ(TSSetMaxTime(ts,2000.0));
262     CHKERRQ(TSSetTimeStep(ts,10));
263   }
264   CHKERRQ(TSSetExactFinalTime(ts,TS_EXACTFINALTIME_STEPOVER));
265   CHKERRQ(TSSetFromOptions(ts));
266 
267   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
268      Solve ODE system
269      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
270   CHKERRQ(TSSolve(ts,x));
271   if (!forwardonly) {
272     /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
273        Start the Adjoint model
274        - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
275     CHKERRQ(VecDuplicate(x,&lambda[0]));
276     /*   Reset initial conditions for the adjoint integration */
277     CHKERRQ(InitializeLambda(da,lambda[0],0.5,0.5));
278     CHKERRQ(TSSetCostGradients(ts,1,lambda,NULL));
279     CHKERRQ(TSAdjointSolve(ts));
280     CHKERRQ(VecDestroy(&lambda[0]));
281   }
282 
283   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
284      Free work space.
285    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
286   CHKERRQ(VecDestroy(&xdot));
287   CHKERRQ(VecDestroy(&r));
288   CHKERRQ(VecDestroy(&x));
289   CHKERRQ(TSDestroy(&ts));
290   if (!adctx->no_an) {
291     if (adctx->sparse) CHKERRQ(AdolcFree2(Rec));
292     CHKERRQ(AdolcFree2(Seed));
293   }
294   CHKERRQ(DMDestroy(&da));
295   CHKERRQ(PetscFree(adctx));
296   ierr = PetscFinalize();
297   return ierr;
298 }
299 
300 PetscErrorCode InitialConditions(DM da,Vec U)
301 {
302   PetscInt       i,j,xs,ys,xm,ym,Mx,My;
303   Field          **u;
304   PetscReal      hx,hy,x,y;
305 
306   PetscFunctionBegin;
307   CHKERRQ(DMDAGetInfo(da,PETSC_IGNORE,&Mx,&My,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE));
308 
309   hx = 2.5/(PetscReal)Mx;
310   hy = 2.5/(PetscReal)My;
311 
312   /*
313      Get pointers to vector data
314   */
315   CHKERRQ(DMDAVecGetArray(da,U,&u));
316 
317   /*
318      Get local grid boundaries
319   */
320   CHKERRQ(DMDAGetCorners(da,&xs,&ys,NULL,&xm,&ym,NULL));
321 
322   /*
323      Compute function over the locally owned part of the grid
324   */
325   for (j=ys; j<ys+ym; j++) {
326     y = j*hy;
327     for (i=xs; i<xs+xm; i++) {
328       x = i*hx;
329       if (PetscApproximateGTE(x,1.0) && PetscApproximateLTE(x,1.5) && PetscApproximateGTE(y,1.0) && PetscApproximateLTE(y,1.5)) u[j][i].v = PetscPowReal(PetscSinReal(4.0*PETSC_PI*x),2.0)*PetscPowReal(PetscSinReal(4.0*PETSC_PI*y),2.0)/4.0;
330       else u[j][i].v = 0.0;
331 
332       u[j][i].u = 1.0 - 2.0*u[j][i].v;
333     }
334   }
335 
336   /*
337      Restore vectors
338   */
339   CHKERRQ(DMDAVecRestoreArray(da,U,&u));
340   PetscFunctionReturn(0);
341 }
342 
343 PetscErrorCode InitializeLambda(DM da,Vec lambda,PetscReal x,PetscReal y)
344 {
345    PetscInt i,j,Mx,My,xs,ys,xm,ym;
346    Field **l;
347    PetscFunctionBegin;
348 
349    CHKERRQ(DMDAGetInfo(da,PETSC_IGNORE,&Mx,&My,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE));
350    /* locate the global i index for x and j index for y */
351    i = (PetscInt)(x*(Mx-1));
352    j = (PetscInt)(y*(My-1));
353    CHKERRQ(DMDAGetCorners(da,&xs,&ys,NULL,&xm,&ym,NULL));
354 
355    if (xs <= i && i < xs+xm && ys <= j && j < ys+ym) {
356      /* the i,j vertex is on this process */
357      CHKERRQ(DMDAVecGetArray(da,lambda,&l));
358      l[j][i].u = 1.0;
359      l[j][i].v = 1.0;
360      CHKERRQ(DMDAVecRestoreArray(da,lambda,&l));
361    }
362    PetscFunctionReturn(0);
363 }
364 
365 PetscErrorCode IFunctionLocalPassive(DMDALocalInfo *info,PetscReal t,Field**u,Field**udot,Field**f,void *ptr)
366 {
367   AppCtx         *appctx = (AppCtx*)ptr;
368   PetscInt       i,j,xs,ys,xm,ym;
369   PetscReal      hx,hy,sx,sy;
370   PetscScalar    uc,uxx,uyy,vc,vxx,vyy;
371 
372   PetscFunctionBegin;
373   hx = 2.50/(PetscReal)(info->mx); sx = 1.0/(hx*hx);
374   hy = 2.50/(PetscReal)(info->my); sy = 1.0/(hy*hy);
375 
376   /* Get local grid boundaries */
377   xs = info->xs; xm = info->xm; ys = info->ys; ym = info->ym;
378 
379   /* Compute function over the locally owned part of the grid */
380   for (j=ys; j<ys+ym; j++) {
381     for (i=xs; i<xs+xm; i++) {
382       uc        = u[j][i].u;
383       uxx       = (-2.0*uc + u[j][i-1].u + u[j][i+1].u)*sx;
384       uyy       = (-2.0*uc + u[j-1][i].u + u[j+1][i].u)*sy;
385       vc        = u[j][i].v;
386       vxx       = (-2.0*vc + u[j][i-1].v + u[j][i+1].v)*sx;
387       vyy       = (-2.0*vc + u[j-1][i].v + u[j+1][i].v)*sy;
388       f[j][i].u = udot[j][i].u - appctx->D1*(uxx + uyy) + uc*vc*vc - appctx->gamma*(1.0 - uc);
389       f[j][i].v = udot[j][i].v - appctx->D2*(vxx + vyy) - uc*vc*vc + (appctx->gamma + appctx->kappa)*vc;
390     }
391   }
392   CHKERRQ(PetscLogFlops(16.0*xm*ym));
393   PetscFunctionReturn(0);
394 }
395 
396 PetscErrorCode IFunctionActive(TS ts,PetscReal ftime,Vec U,Vec Udot,Vec F,void *ptr)
397 {
398   AppCtx         *appctx = (AppCtx*)ptr;
399   DM             da;
400   DMDALocalInfo  info;
401   Field          **u,**f,**udot;
402   Vec            localU;
403   PetscInt       i,j,xs,ys,xm,ym,gxs,gys,gxm,gym;
404   PetscReal      hx,hy,sx,sy;
405   adouble        uc,uxx,uyy,vc,vxx,vyy;
406   AField         **f_a,*f_c,**u_a,*u_c;
407   PetscScalar    dummy;
408 
409   PetscFunctionBegin;
410 
411   CHKERRQ(TSGetDM(ts,&da));
412   CHKERRQ(DMDAGetLocalInfo(da,&info));
413   CHKERRQ(DMGetLocalVector(da,&localU));
414   hx = 2.50/(PetscReal)(info.mx); sx = 1.0/(hx*hx);
415   hy = 2.50/(PetscReal)(info.my); sy = 1.0/(hy*hy);
416   xs = info.xs; xm = info.xm; gxs = info.gxs; gxm = info.gxm;
417   ys = info.ys; ym = info.ym; gys = info.gys; gym = info.gym;
418 
419   /*
420      Scatter ghost points to local vector,using the 2-step process
421         DMGlobalToLocalBegin(),DMGlobalToLocalEnd().
422      By placing code between these two statements, computations can be
423      done while messages are in transition.
424   */
425   CHKERRQ(DMGlobalToLocalBegin(da,U,INSERT_VALUES,localU));
426   CHKERRQ(DMGlobalToLocalEnd(da,U,INSERT_VALUES,localU));
427 
428   /*
429      Get pointers to vector data
430   */
431   CHKERRQ(DMDAVecGetArrayRead(da,localU,&u));
432   CHKERRQ(DMDAVecGetArray(da,F,&f));
433   CHKERRQ(DMDAVecGetArrayRead(da,Udot,&udot));
434 
435   /*
436     Create contiguous 1-arrays of AFields
437 
438     NOTE: Memory for ADOL-C active variables (such as adouble and AField)
439           cannot be allocated using PetscMalloc, as this does not call the
440           relevant class constructor. Instead, we use the C++ keyword `new`.
441   */
442   u_c = new AField[info.gxm*info.gym];
443   f_c = new AField[info.gxm*info.gym];
444 
445   /* Create corresponding 2-arrays of AFields */
446   u_a = new AField*[info.gym];
447   f_a = new AField*[info.gym];
448 
449   /* Align indices between array types to endow 2d array with ghost points */
450   CHKERRQ(GiveGhostPoints(da,u_c,&u_a));
451   CHKERRQ(GiveGhostPoints(da,f_c,&f_a));
452 
453   trace_on(1);  /* Start of active section on tape 1 */
454 
455   /*
456     Mark independence
457 
458     NOTE: Ghost points are marked as independent, in place of the points they represent on
459           other processors / on other boundaries.
460   */
461   for (j=gys; j<gys+gym; j++) {
462     for (i=gxs; i<gxs+gxm; i++) {
463       u_a[j][i].u <<= u[j][i].u;
464       u_a[j][i].v <<= u[j][i].v;
465     }
466   }
467 
468   /* Compute function over the locally owned part of the grid */
469   for (j=ys; j<ys+ym; j++) {
470     for (i=xs; i<xs+xm; i++) {
471       uc        = u_a[j][i].u;
472       uxx       = (-2.0*uc + u_a[j][i-1].u + u_a[j][i+1].u)*sx;
473       uyy       = (-2.0*uc + u_a[j-1][i].u + u_a[j+1][i].u)*sy;
474       vc        = u_a[j][i].v;
475       vxx       = (-2.0*vc + u_a[j][i-1].v + u_a[j][i+1].v)*sx;
476       vyy       = (-2.0*vc + u_a[j-1][i].v + u_a[j+1][i].v)*sy;
477       f_a[j][i].u = udot[j][i].u - appctx->D1*(uxx + uyy) + uc*vc*vc - appctx->gamma*(1.0 - uc);
478       f_a[j][i].v = udot[j][i].v - appctx->D2*(vxx + vyy) - uc*vc*vc + (appctx->gamma + appctx->kappa)*vc;
479     }
480   }
481 
482   /*
483     Mark dependence
484 
485     NOTE: Marking dependence of dummy variables makes the index notation much simpler when forming
486           the Jacobian later.
487   */
488   for (j=gys; j<gys+gym; j++) {
489     for (i=gxs; i<gxs+gxm; i++) {
490       if ((i < xs) || (i >= xs+xm) || (j < ys) || (j >= ys+ym)) {
491         f_a[j][i].u >>= dummy;
492         f_a[j][i].v >>= dummy;
493       } else {
494         f_a[j][i].u >>= f[j][i].u;
495         f_a[j][i].v >>= f[j][i].v;
496       }
497     }
498   }
499   trace_off();  /* End of active section */
500   CHKERRQ(PetscLogFlops(16.0*xm*ym));
501 
502   /* Restore vectors */
503   CHKERRQ(DMDAVecRestoreArray(da,F,&f));
504   CHKERRQ(DMDAVecRestoreArrayRead(da,localU,&u));
505   CHKERRQ(DMDAVecRestoreArrayRead(da,Udot,&udot));
506 
507   CHKERRQ(DMRestoreLocalVector(da,&localU));
508 
509   /* Destroy AFields appropriately */
510   f_a += info.gys;
511   u_a += info.gys;
512   delete[] f_a;
513   delete[] u_a;
514   delete[] f_c;
515   delete[] u_c;
516 
517   PetscFunctionReturn(0);
518 }
519 
520 PetscErrorCode RHSFunctionPassive(TS ts,PetscReal ftime,Vec U,Vec F,void *ptr)
521 {
522   AppCtx         *appctx = (AppCtx*)ptr;
523   DM             da;
524   PetscInt       i,j,xs,ys,xm,ym,Mx,My;
525   PetscReal      hx,hy,sx,sy;
526   PetscScalar    uc,uxx,uyy,vc,vxx,vyy;
527   Field          **u,**f;
528   Vec            localU,localF;
529 
530   PetscFunctionBegin;
531   CHKERRQ(TSGetDM(ts,&da));
532   CHKERRQ(DMDAGetInfo(da,PETSC_IGNORE,&Mx,&My,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE));
533   hx = 2.50/(PetscReal)(Mx);sx = 1.0/(hx*hx);
534   hy = 2.50/(PetscReal)(My);sy = 1.0/(hy*hy);
535   CHKERRQ(DMGetLocalVector(da,&localU));
536   CHKERRQ(DMGetLocalVector(da,&localF));
537 
538   /*
539      Scatter ghost points to local vector,using the 2-step process
540         DMGlobalToLocalBegin(),DMGlobalToLocalEnd().
541      By placing code between these two statements, computations can be
542      done while messages are in transition.
543   */
544   CHKERRQ(DMGlobalToLocalBegin(da,U,INSERT_VALUES,localU));
545   CHKERRQ(DMGlobalToLocalEnd(da,U,INSERT_VALUES,localU));
546   CHKERRQ(VecZeroEntries(F)); // NOTE (1): See (2) below
547   CHKERRQ(DMGlobalToLocalBegin(da,F,INSERT_VALUES,localF));
548   CHKERRQ(DMGlobalToLocalEnd(da,F,INSERT_VALUES,localF));
549 
550   /*
551      Get pointers to vector data
552   */
553   CHKERRQ(DMDAVecGetArrayRead(da,localU,&u));
554   CHKERRQ(DMDAVecGetArray(da,localF,&f));
555 
556   /*
557      Get local grid boundaries
558   */
559   CHKERRQ(DMDAGetCorners(da,&xs,&ys,NULL,&xm,&ym,NULL));
560 
561   /*
562      Compute function over the locally owned part of the grid
563   */
564   for (j=ys; j<ys+ym; j++) {
565     for (i=xs; i<xs+xm; i++) {
566       uc        = u[j][i].u;
567       uxx       = (-2.0*uc + u[j][i-1].u + u[j][i+1].u)*sx;
568       uyy       = (-2.0*uc + u[j-1][i].u + u[j+1][i].u)*sy;
569       vc        = u[j][i].v;
570       vxx       = (-2.0*vc + u[j][i-1].v + u[j][i+1].v)*sx;
571       vyy       = (-2.0*vc + u[j-1][i].v + u[j+1][i].v)*sy;
572       f[j][i].u = appctx->D1*(uxx + uyy) - uc*vc*vc + appctx->gamma*(1.0 - uc);
573       f[j][i].v = appctx->D2*(vxx + vyy) + uc*vc*vc - (appctx->gamma + appctx->kappa)*vc;
574     }
575   }
576 
577   /*
578      Gather global vector, using the 2-step process
579         DMLocalToGlobalBegin(),DMLocalToGlobalEnd().
580 
581      NOTE (2): We need to use ADD_VALUES if boundaries are not of type DM_BOUNDARY_NONE or
582                DM_BOUNDARY_GHOSTED, meaning we should also zero F before addition (see (1) above).
583   */
584   CHKERRQ(DMLocalToGlobalBegin(da,localF,ADD_VALUES,F));
585   CHKERRQ(DMLocalToGlobalEnd(da,localF,ADD_VALUES,F));
586 
587   /*
588      Restore vectors
589   */
590   CHKERRQ(DMDAVecRestoreArray(da,localF,&f));
591   CHKERRQ(DMDAVecRestoreArrayRead(da,localU,&u));
592   CHKERRQ(DMRestoreLocalVector(da,&localF));
593   CHKERRQ(DMRestoreLocalVector(da,&localU));
594   CHKERRQ(PetscLogFlops(16.0*xm*ym));
595   PetscFunctionReturn(0);
596 }
597 
598 PetscErrorCode RHSFunctionActive(TS ts,PetscReal ftime,Vec U,Vec F,void *ptr)
599 {
600   AppCtx         *appctx = (AppCtx*)ptr;
601   DM             da;
602   PetscInt       i,j,xs,ys,xm,ym,gxs,gys,gxm,gym,Mx,My;
603   PetscReal      hx,hy,sx,sy;
604   AField         **f_a,*f_c,**u_a,*u_c;
605   adouble        uc,uxx,uyy,vc,vxx,vyy;
606   Field          **u,**f;
607   Vec            localU,localF;
608 
609   PetscFunctionBegin;
610   CHKERRQ(TSGetDM(ts,&da));
611   CHKERRQ(DMDAGetInfo(da,PETSC_IGNORE,&Mx,&My,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE));
612   hx = 2.50/(PetscReal)(Mx);sx = 1.0/(hx*hx);
613   hy = 2.50/(PetscReal)(My);sy = 1.0/(hy*hy);
614   CHKERRQ(DMGetLocalVector(da,&localU));
615   CHKERRQ(DMGetLocalVector(da,&localF));
616 
617   /*
618      Scatter ghost points to local vector,using the 2-step process
619         DMGlobalToLocalBegin(),DMGlobalToLocalEnd().
620      By placing code between these two statements, computations can be
621      done while messages are in transition.
622   */
623   CHKERRQ(DMGlobalToLocalBegin(da,U,INSERT_VALUES,localU));
624   CHKERRQ(DMGlobalToLocalEnd(da,U,INSERT_VALUES,localU));
625   CHKERRQ(VecZeroEntries(F)); // NOTE (1): See (2) below
626   CHKERRQ(DMGlobalToLocalBegin(da,F,INSERT_VALUES,localF));
627   CHKERRQ(DMGlobalToLocalEnd(da,F,INSERT_VALUES,localF));
628 
629   /*
630      Get pointers to vector data
631   */
632   CHKERRQ(DMDAVecGetArrayRead(da,localU,&u));
633   CHKERRQ(DMDAVecGetArray(da,localF,&f));
634 
635   /*
636      Get local and ghosted grid boundaries
637   */
638   CHKERRQ(DMDAGetGhostCorners(da,&gxs,&gys,NULL,&gxm,&gym,NULL));
639   CHKERRQ(DMDAGetCorners(da,&xs,&ys,NULL,&xm,&ym,NULL));
640 
641   /*
642     Create contiguous 1-arrays of AFields
643 
644     NOTE: Memory for ADOL-C active variables (such as adouble and AField)
645           cannot be allocated using PetscMalloc, as this does not call the
646           relevant class constructor. Instead, we use the C++ keyword `new`.
647   */
648   u_c = new AField[gxm*gym];
649   f_c = new AField[gxm*gym];
650 
651   /* Create corresponding 2-arrays of AFields */
652   u_a = new AField*[gym];
653   f_a = new AField*[gym];
654 
655   /* Align indices between array types to endow 2d array with ghost points */
656   CHKERRQ(GiveGhostPoints(da,u_c,&u_a));
657   CHKERRQ(GiveGhostPoints(da,f_c,&f_a));
658 
659   /*
660      Compute function over the locally owned part of the grid
661   */
662   trace_on(1);  // ----------------------------------------------- Start of active section
663 
664   /*
665     Mark independence
666 
667     NOTE: Ghost points are marked as independent, in place of the points they represent on
668           other processors / on other boundaries.
669   */
670   for (j=gys; j<gys+gym; j++) {
671     for (i=gxs; i<gxs+gxm; i++) {
672       u_a[j][i].u <<= u[j][i].u;
673       u_a[j][i].v <<= u[j][i].v;
674     }
675   }
676 
677   /*
678     Compute function over the locally owned part of the grid
679   */
680   for (j=ys; j<ys+ym; j++) {
681     for (i=xs; i<xs+xm; i++) {
682       uc          = u_a[j][i].u;
683       uxx         = (-2.0*uc + u_a[j][i-1].u + u_a[j][i+1].u)*sx;
684       uyy         = (-2.0*uc + u_a[j-1][i].u + u_a[j+1][i].u)*sy;
685       vc          = u_a[j][i].v;
686       vxx         = (-2.0*vc + u_a[j][i-1].v + u_a[j][i+1].v)*sx;
687       vyy         = (-2.0*vc + u_a[j-1][i].v + u_a[j+1][i].v)*sy;
688       f_a[j][i].u = appctx->D1*(uxx + uyy) - uc*vc*vc + appctx->gamma*(1.0 - uc);
689       f_a[j][i].v = appctx->D2*(vxx + vyy) + uc*vc*vc - (appctx->gamma + appctx->kappa)*vc;
690     }
691   }
692   /*
693     Mark dependence
694 
695     NOTE: Ghost points are marked as dependent in order to vastly simplify index notation
696           during Jacobian assembly.
697   */
698   for (j=gys; j<gys+gym; j++) {
699     for (i=gxs; i<gxs+gxm; i++) {
700       f_a[j][i].u >>= f[j][i].u;
701       f_a[j][i].v >>= f[j][i].v;
702     }
703   }
704   trace_off();  // ----------------------------------------------- End of active section
705 
706   /* Test zeroth order scalar evaluation in ADOL-C gives the same result */
707 //  if (appctx->adctx->zos) {
708 //    CHKERRQ(TestZOS2d(da,f,u,appctx)); // FIXME: Why does this give nonzero?
709 //  }
710 
711   /*
712      Gather global vector, using the 2-step process
713         DMLocalToGlobalBegin(),DMLocalToGlobalEnd().
714 
715      NOTE (2): We need to use ADD_VALUES if boundaries are not of type DM_BOUNDARY_NONE or
716                DM_BOUNDARY_GHOSTED, meaning we should also zero F before addition (see (1) above).
717   */
718   CHKERRQ(DMLocalToGlobalBegin(da,localF,ADD_VALUES,F));
719   CHKERRQ(DMLocalToGlobalEnd(da,localF,ADD_VALUES,F));
720 
721   /*
722      Restore vectors
723   */
724   CHKERRQ(DMDAVecRestoreArray(da,localF,&f));
725   CHKERRQ(DMDAVecRestoreArrayRead(da,localU,&u));
726   CHKERRQ(DMRestoreLocalVector(da,&localF));
727   CHKERRQ(DMRestoreLocalVector(da,&localU));
728   CHKERRQ(PetscLogFlops(16.0*xm*ym));
729 
730   /* Destroy AFields appropriately */
731   f_a += gys;
732   u_a += gys;
733   delete[] f_a;
734   delete[] u_a;
735   delete[] f_c;
736   delete[] u_c;
737 
738   PetscFunctionReturn(0);
739 }
740 
741 PetscErrorCode IJacobianAdolc(TS ts,PetscReal t,Vec U,Vec Udot,PetscReal a,Mat A,Mat B,void *ctx)
742 {
743   AppCtx            *appctx = (AppCtx*)ctx;
744   DM                da;
745   const PetscScalar *u_vec;
746   Vec               localU;
747 
748   PetscFunctionBegin;
749   CHKERRQ(TSGetDM(ts,&da));
750   CHKERRQ(DMGetLocalVector(da,&localU));
751 
752   /*
753      Scatter ghost points to local vector,using the 2-step process
754         DMGlobalToLocalBegin(),DMGlobalToLocalEnd().
755      By placing code between these two statements, computations can be
756      done while messages are in transition.
757   */
758   CHKERRQ(DMGlobalToLocalBegin(da,U,INSERT_VALUES,localU));
759   CHKERRQ(DMGlobalToLocalEnd(da,U,INSERT_VALUES,localU));
760 
761   /* Get pointers to vector data */
762   CHKERRQ(VecGetArrayRead(localU,&u_vec));
763 
764   /*
765     Compute Jacobian
766   */
767   CHKERRQ(PetscAdolcComputeIJacobianLocalIDMass(1,A,u_vec,a,appctx->adctx));
768 
769   /*
770      Restore vectors
771   */
772   CHKERRQ(VecRestoreArrayRead(localU,&u_vec));
773   CHKERRQ(DMRestoreLocalVector(da,&localU));
774   PetscFunctionReturn(0);
775 }
776 
777 PetscErrorCode IJacobianByHand(TS ts,PetscReal t,Vec U,Vec Udot,PetscReal a,Mat A,Mat B,void *ctx)
778 {
779   AppCtx         *appctx = (AppCtx*)ctx;     /* user-defined application context */
780   DM             da;
781   PetscInt       i,j,Mx,My,xs,ys,xm,ym;
782   PetscReal      hx,hy,sx,sy;
783   PetscScalar    uc,vc;
784   Field          **u;
785   Vec            localU;
786   MatStencil     stencil[6],rowstencil;
787   PetscScalar    entries[6];
788 
789   PetscFunctionBegin;
790   CHKERRQ(TSGetDM(ts,&da));
791   CHKERRQ(DMGetLocalVector(da,&localU));
792   CHKERRQ(DMDAGetInfo(da,PETSC_IGNORE,&Mx,&My,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE));
793 
794   hx = 2.50/(PetscReal)Mx; sx = 1.0/(hx*hx);
795   hy = 2.50/(PetscReal)My; sy = 1.0/(hy*hy);
796 
797   /*
798      Scatter ghost points to local vector,using the 2-step process
799         DMGlobalToLocalBegin(),DMGlobalToLocalEnd().
800      By placing code between these two statements, computations can be
801      done while messages are in transition.
802   */
803   CHKERRQ(DMGlobalToLocalBegin(da,U,INSERT_VALUES,localU));
804   CHKERRQ(DMGlobalToLocalEnd(da,U,INSERT_VALUES,localU));
805 
806   /*
807      Get pointers to vector data
808   */
809   CHKERRQ(DMDAVecGetArrayRead(da,localU,&u));
810 
811   /*
812      Get local grid boundaries
813   */
814   CHKERRQ(DMDAGetCorners(da,&xs,&ys,NULL,&xm,&ym,NULL));
815 
816   stencil[0].k = 0;
817   stencil[1].k = 0;
818   stencil[2].k = 0;
819   stencil[3].k = 0;
820   stencil[4].k = 0;
821   stencil[5].k = 0;
822   rowstencil.k = 0;
823   /*
824      Compute function over the locally owned part of the grid
825   */
826   for (j=ys; j<ys+ym; j++) {
827 
828     stencil[0].j = j-1;
829     stencil[1].j = j+1;
830     stencil[2].j = j;
831     stencil[3].j = j;
832     stencil[4].j = j;
833     stencil[5].j = j;
834     rowstencil.k = 0; rowstencil.j = j;
835     for (i=xs; i<xs+xm; i++) {
836       uc = u[j][i].u;
837       vc = u[j][i].v;
838 
839       /*      uxx       = (-2.0*uc + u[j][i-1].u + u[j][i+1].u)*sx;
840       uyy       = (-2.0*uc + u[j-1][i].u + u[j+1][i].u)*sy;
841 
842       vxx       = (-2.0*vc + u[j][i-1].v + u[j][i+1].v)*sx;
843       vyy       = (-2.0*vc + u[j-1][i].v + u[j+1][i].v)*sy;
844        f[j][i].u = appctx->D1*(uxx + uyy) - uc*vc*vc + appctx->gamma*(1.0 - uc);*/
845 
846       stencil[0].i = i; stencil[0].c = 0; entries[0] = -appctx->D1*sy;
847       stencil[1].i = i; stencil[1].c = 0; entries[1] = -appctx->D1*sy;
848       stencil[2].i = i-1; stencil[2].c = 0; entries[2] = -appctx->D1*sx;
849       stencil[3].i = i+1; stencil[3].c = 0; entries[3] = -appctx->D1*sx;
850       stencil[4].i = i; stencil[4].c = 0; entries[4] = 2.0*appctx->D1*(sx + sy) + vc*vc + appctx->gamma + a;
851       stencil[5].i = i; stencil[5].c = 1; entries[5] = 2.0*uc*vc;
852       rowstencil.i = i; rowstencil.c = 0;
853 
854       CHKERRQ(MatSetValuesStencil(A,1,&rowstencil,6,stencil,entries,INSERT_VALUES));
855       if (appctx->aijpc) {
856         CHKERRQ(MatSetValuesStencil(B,1,&rowstencil,6,stencil,entries,INSERT_VALUES));
857       }
858       stencil[0].c = 1; entries[0] = -appctx->D2*sy;
859       stencil[1].c = 1; entries[1] = -appctx->D2*sy;
860       stencil[2].c = 1; entries[2] = -appctx->D2*sx;
861       stencil[3].c = 1; entries[3] = -appctx->D2*sx;
862       stencil[4].c = 1; entries[4] = 2.0*appctx->D2*(sx + sy) - 2.0*uc*vc + appctx->gamma + appctx->kappa + a;
863       stencil[5].c = 0; entries[5] = -vc*vc;
864 
865       CHKERRQ(MatSetValuesStencil(A,1,&rowstencil,6,stencil,entries,INSERT_VALUES));
866       if (appctx->aijpc) {
867         CHKERRQ(MatSetValuesStencil(B,1,&rowstencil,6,stencil,entries,INSERT_VALUES));
868       }
869       /* f[j][i].v = appctx->D2*(vxx + vyy) + uc*vc*vc - (appctx->gamma + appctx->kappa)*vc; */
870     }
871   }
872 
873   /*
874      Restore vectors
875   */
876   CHKERRQ(PetscLogFlops(19.0*xm*ym));
877   CHKERRQ(DMDAVecRestoreArrayRead(da,localU,&u));
878   CHKERRQ(DMRestoreLocalVector(da,&localU));
879   CHKERRQ(MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY));
880   CHKERRQ(MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY));
881   CHKERRQ(MatSetOption(A,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE));
882   if (appctx->aijpc) {
883     CHKERRQ(MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY));
884     CHKERRQ(MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY));
885     CHKERRQ(MatSetOption(B,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE));
886   }
887   PetscFunctionReturn(0);
888 }
889 
890 PetscErrorCode RHSJacobianByHand(TS ts,PetscReal t,Vec U,Mat A,Mat B,void *ctx)
891 {
892   AppCtx         *appctx = (AppCtx*)ctx;     /* user-defined application context */
893   DM             da;
894   PetscInt       i,j,Mx,My,xs,ys,xm,ym;
895   PetscReal      hx,hy,sx,sy;
896   PetscScalar    uc,vc;
897   Field          **u;
898   Vec            localU;
899   MatStencil     stencil[6],rowstencil;
900   PetscScalar    entries[6];
901 
902   PetscFunctionBegin;
903   CHKERRQ(TSGetDM(ts,&da));
904   CHKERRQ(DMGetLocalVector(da,&localU));
905   CHKERRQ(DMDAGetInfo(da,PETSC_IGNORE,&Mx,&My,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE));
906 
907   hx = 2.50/(PetscReal)(Mx); sx = 1.0/(hx*hx);
908   hy = 2.50/(PetscReal)(My); sy = 1.0/(hy*hy);
909 
910   /*
911      Scatter ghost points to local vector,using the 2-step process
912         DMGlobalToLocalBegin(),DMGlobalToLocalEnd().
913      By placing code between these two statements, computations can be
914      done while messages are in transition.
915   */
916   CHKERRQ(DMGlobalToLocalBegin(da,U,INSERT_VALUES,localU));
917   CHKERRQ(DMGlobalToLocalEnd(da,U,INSERT_VALUES,localU));
918 
919   /*
920      Get pointers to vector data
921   */
922   CHKERRQ(DMDAVecGetArrayRead(da,localU,&u));
923 
924   /*
925      Get local grid boundaries
926   */
927   CHKERRQ(DMDAGetCorners(da,&xs,&ys,NULL,&xm,&ym,NULL));
928 
929   stencil[0].k = 0;
930   stencil[1].k = 0;
931   stencil[2].k = 0;
932   stencil[3].k = 0;
933   stencil[4].k = 0;
934   stencil[5].k = 0;
935   rowstencil.k = 0;
936 
937   /*
938      Compute function over the locally owned part of the grid
939   */
940   for (j=ys; j<ys+ym; j++) {
941 
942     stencil[0].j = j-1;
943     stencil[1].j = j+1;
944     stencil[2].j = j;
945     stencil[3].j = j;
946     stencil[4].j = j;
947     stencil[5].j = j;
948     rowstencil.k = 0; rowstencil.j = j;
949     for (i=xs; i<xs+xm; i++) {
950       uc = u[j][i].u;
951       vc = u[j][i].v;
952 
953       /*      uxx       = (-2.0*uc + u[j][i-1].u + u[j][i+1].u)*sx;
954       uyy       = (-2.0*uc + u[j-1][i].u + u[j+1][i].u)*sy;
955 
956       vxx       = (-2.0*vc + u[j][i-1].v + u[j][i+1].v)*sx;
957       vyy       = (-2.0*vc + u[j-1][i].v + u[j+1][i].v)*sy;
958        f[j][i].u = appctx->D1*(uxx + uyy) - uc*vc*vc + appctx->gamma*(1.0 - uc);*/
959 
960       stencil[0].i = i; stencil[0].c = 0; entries[0] = appctx->D1*sy;
961       stencil[1].i = i; stencil[1].c = 0; entries[1] = appctx->D1*sy;
962       stencil[2].i = i-1; stencil[2].c = 0; entries[2] = appctx->D1*sx;
963       stencil[3].i = i+1; stencil[3].c = 0; entries[3] = appctx->D1*sx;
964       stencil[4].i = i; stencil[4].c = 0; entries[4] = -2.0*appctx->D1*(sx + sy) - vc*vc - appctx->gamma;
965       stencil[5].i = i; stencil[5].c = 1; entries[5] = -2.0*uc*vc;
966       rowstencil.i = i; rowstencil.c = 0;
967 
968       CHKERRQ(MatSetValuesStencil(A,1,&rowstencil,6,stencil,entries,INSERT_VALUES));
969 
970       stencil[0].c = 1; entries[0] = appctx->D2*sy;
971       stencil[1].c = 1; entries[1] = appctx->D2*sy;
972       stencil[2].c = 1; entries[2] = appctx->D2*sx;
973       stencil[3].c = 1; entries[3] = appctx->D2*sx;
974       stencil[4].c = 1; entries[4] = -2.0*appctx->D2*(sx + sy) + 2.0*uc*vc - appctx->gamma - appctx->kappa;
975       stencil[5].c = 0; entries[5] = vc*vc;
976       rowstencil.c = 1;
977 
978       CHKERRQ(MatSetValuesStencil(A,1,&rowstencil,6,stencil,entries,INSERT_VALUES));
979       /* f[j][i].v = appctx->D2*(vxx + vyy) + uc*vc*vc - (appctx->gamma + appctx->kappa)*vc; */
980     }
981   }
982 
983   /*
984      Restore vectors
985   */
986   CHKERRQ(PetscLogFlops(19.0*xm*ym));
987   CHKERRQ(DMDAVecRestoreArrayRead(da,localU,&u));
988   CHKERRQ(DMRestoreLocalVector(da,&localU));
989   CHKERRQ(MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY));
990   CHKERRQ(MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY));
991   CHKERRQ(MatSetOption(A,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE));
992   if (appctx->aijpc) {
993     CHKERRQ(MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY));
994     CHKERRQ(MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY));
995     CHKERRQ(MatSetOption(B,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE));
996   }
997   PetscFunctionReturn(0);
998 }
999 
1000 PetscErrorCode RHSJacobianAdolc(TS ts,PetscReal t,Vec U,Mat A,Mat B,void *ctx)
1001 {
1002   AppCtx         *appctx = (AppCtx*)ctx;
1003   DM             da;
1004   PetscScalar    *u_vec;
1005   Vec            localU;
1006 
1007   PetscFunctionBegin;
1008   CHKERRQ(TSGetDM(ts,&da));
1009   CHKERRQ(DMGetLocalVector(da,&localU));
1010 
1011   /*
1012      Scatter ghost points to local vector,using the 2-step process
1013         DMGlobalToLocalBegin(),DMGlobalToLocalEnd().
1014      By placing code between these two statements, computations can be
1015      done while messages are in transition.
1016   */
1017   CHKERRQ(DMGlobalToLocalBegin(da,U,INSERT_VALUES,localU));
1018   CHKERRQ(DMGlobalToLocalEnd(da,U,INSERT_VALUES,localU));
1019 
1020   /* Get pointers to vector data */
1021   CHKERRQ(VecGetArray(localU,&u_vec));
1022 
1023   /*
1024     Compute Jacobian
1025   */
1026   CHKERRQ(PetscAdolcComputeRHSJacobianLocal(1,A,u_vec,appctx->adctx));
1027 
1028   /*
1029      Restore vectors
1030   */
1031   CHKERRQ(VecRestoreArray(localU,&u_vec));
1032   CHKERRQ(DMRestoreLocalVector(da,&localU));
1033   PetscFunctionReturn(0);
1034 }
1035 
1036 /*TEST
1037 
1038   build:
1039     requires: double !complex adolc colpack
1040 
1041   test:
1042     suffix: 1
1043     nsize: 1
1044     args: -ts_max_steps 1 -da_grid_x 12 -da_grid_y 12 -snes_test_jacobian
1045     output_file: output/adr_ex5adj_1.out
1046 
1047   test:
1048     suffix: 2
1049     nsize: 1
1050     args: -ts_max_steps 1 -da_grid_x 12 -da_grid_y 12 -snes_test_jacobian -implicitform
1051     output_file: output/adr_ex5adj_2.out
1052 
1053   test:
1054     suffix: 3
1055     nsize: 4
1056     args: -ts_max_steps 10 -da_grid_x 12 -da_grid_y 12 -ts_monitor -ts_adjoint_monitor
1057     output_file: output/adr_ex5adj_3.out
1058 
1059   test:
1060     suffix: 4
1061     nsize: 4
1062     args: -ts_max_steps 10 -da_grid_x 12 -da_grid_y 12 -ts_monitor -ts_adjoint_monitor -implicitform
1063     output_file: output/adr_ex5adj_4.out
1064 
1065   testset:
1066     suffix: 5
1067     nsize: 4
1068     args: -ts_max_steps 10 -da_grid_x 15 -da_grid_y 15 -ts_monitor -ts_adjoint_monitor -adolc_sparse
1069     output_file: output/adr_ex5adj_5.out
1070 
1071   testset:
1072     suffix: 6
1073     nsize: 4
1074     args: -ts_max_steps 10 -da_grid_x 15 -da_grid_y 15 -ts_monitor -ts_adjoint_monitor -adolc_sparse -implicitform
1075     output_file: output/adr_ex5adj_6.out
1076 
1077 TEST*/
1078