xref: /petsc/src/tao/unconstrained/impls/ntl/ntl.c (revision 78e4361a5bf1b9c7dbfc82cbbf1a1a2ebd0efe3a)
1 #include <../src/tao/unconstrained/impls/ntl/ntlimpl.h>
2 
3 #include <petscksp.h>
4 
5 #define NTL_PC_NONE     0
6 #define NTL_PC_AHESS    1
7 #define NTL_PC_BFGS     2
8 #define NTL_PC_PETSC    3
9 #define NTL_PC_TYPES    4
10 
11 #define BFGS_SCALE_AHESS        0
12 #define BFGS_SCALE_BFGS         1
13 #define BFGS_SCALE_TYPES        2
14 
15 #define NTL_INIT_CONSTANT         0
16 #define NTL_INIT_DIRECTION        1
17 #define NTL_INIT_INTERPOLATION    2
18 #define NTL_INIT_TYPES            3
19 
20 #define NTL_UPDATE_REDUCTION      0
21 #define NTL_UPDATE_INTERPOLATION  1
22 #define NTL_UPDATE_TYPES          2
23 
24 static const char *NTL_PC[64] = {"none","ahess","bfgs","petsc"};
25 
26 static const char *BFGS_SCALE[64] = {"ahess","bfgs"};
27 
28 static const char *NTL_INIT[64] = {"constant","direction","interpolation"};
29 
30 static const char *NTL_UPDATE[64] = {"reduction","interpolation"};
31 
32 /* Implements Newton's Method with a trust-region, line-search approach for
33    solving unconstrained minimization problems.  A More'-Thuente line search
34    is used to guarantee that the bfgs preconditioner remains positive
35    definite. */
36 
37 #define NTL_NEWTON              0
38 #define NTL_BFGS                1
39 #define NTL_SCALED_GRADIENT     2
40 #define NTL_GRADIENT            3
41 
42 PetscErrorCode TaoNTLPrecondBFGS(PC BFGSpc, Vec X, Vec Y)
43 {
44   PetscErrorCode ierr;
45   Mat *M;
46 
47   PetscFunctionBegin;
48   ierr = PCShellGetContext(BFGSpc, (void**)&M);CHKERRQ(ierr);
49   ierr = MatSolve(*M, X, Y);CHKERRQ(ierr);
50   PetscFunctionReturn(0);
51 }
52 
53 static PetscErrorCode TaoSolve_NTL(Tao tao)
54 {
55   TAO_NTL                      *tl = (TAO_NTL *)tao->data;
56   KSPType                      ksp_type;
57   PetscBool                    is_nash,is_stcg,is_gltr;
58   KSPConvergedReason           ksp_reason;
59   PC                           pc;
60   TaoLineSearchConvergedReason ls_reason;
61 
62   PetscReal                    fmin, ftrial, prered, actred, kappa, sigma;
63   PetscReal                    tau, tau_1, tau_2, tau_max, tau_min, max_radius;
64   PetscReal                    f, fold, gdx, gnorm;
65   PetscReal                    step = 1.0;
66 
67   PetscReal                    delta;
68   PetscReal                    norm_d = 0.0;
69   PetscErrorCode               ierr;
70   PetscInt                     stepType;
71   PetscInt                     its;
72 
73   PetscInt                     bfgsUpdates = 0;
74   PetscInt                     needH;
75 
76   PetscInt                     i_max = 5;
77   PetscInt                     j_max = 1;
78   PetscInt                     i, j, n, N;
79 
80   PetscInt                     tr_reject;
81 
82   PetscFunctionBegin;
83   if (tao->XL || tao->XU || tao->ops->computebounds) {
84     ierr = PetscPrintf(((PetscObject)tao)->comm,"WARNING: Variable bounds have been set but will be ignored by ntl algorithm\n");CHKERRQ(ierr);
85   }
86 
87   ierr = KSPGetType(tao->ksp,&ksp_type);CHKERRQ(ierr);
88   ierr = PetscStrcmp(ksp_type,KSPCGNASH,&is_nash);CHKERRQ(ierr);
89   ierr = PetscStrcmp(ksp_type,KSPCGSTCG,&is_stcg);CHKERRQ(ierr);
90   ierr = PetscStrcmp(ksp_type,KSPCGGLTR,&is_gltr);CHKERRQ(ierr);
91   if (!is_nash && !is_stcg && !is_gltr) {
92     SETERRQ(PETSC_COMM_SELF,1,"TAO_NTR requires nash, stcg, or gltr for the KSP");
93   }
94 
95   /* Initialize the radius and modify if it is too large or small */
96   tao->trust = tao->trust0;
97   tao->trust = PetscMax(tao->trust, tl->min_radius);
98   tao->trust = PetscMin(tao->trust, tl->max_radius);
99 
100   if (NTL_PC_BFGS == tl->pc_type && !tl->M) {
101     ierr = VecGetLocalSize(tao->solution,&n);CHKERRQ(ierr);
102     ierr = VecGetSize(tao->solution,&N);CHKERRQ(ierr);
103     ierr = MatCreateLMVMBFGS(((PetscObject)tao)->comm,n,N,&tl->M);CHKERRQ(ierr);
104     ierr = MatLMVMAllocate(tl->M,tao->solution,tao->gradient);CHKERRQ(ierr);
105   }
106 
107   /* Check convergence criteria */
108   ierr = TaoComputeObjectiveAndGradient(tao, tao->solution, &f, tao->gradient);CHKERRQ(ierr);
109   ierr = VecNorm(tao->gradient, NORM_2, &gnorm);CHKERRQ(ierr);
110   if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf or NaN");
111   needH = 1;
112 
113   tao->reason = TAO_CONTINUE_ITERATING;
114   ierr = TaoLogConvergenceHistory(tao,f,gnorm,0.0,tao->ksp_its);CHKERRQ(ierr);
115   ierr = TaoMonitor(tao,tao->niter,f,gnorm,0.0,step);CHKERRQ(ierr);
116   ierr = (*tao->ops->convergencetest)(tao,tao->cnvP);CHKERRQ(ierr);
117   if (tao->reason != TAO_CONTINUE_ITERATING) PetscFunctionReturn(0);
118 
119   /* Create vectors for the limited memory preconditioner */
120   if ((NTL_PC_BFGS == tl->pc_type) && (BFGS_SCALE_BFGS != tl->bfgs_scale_type)) {
121     if (!tl->Diag) {
122       ierr = VecDuplicate(tao->solution, &tl->Diag);CHKERRQ(ierr);
123     }
124   }
125 
126   /* Modify the preconditioner to use the bfgs approximation */
127   ierr = KSPGetPC(tao->ksp, &pc);CHKERRQ(ierr);
128   switch(tl->pc_type) {
129   case NTL_PC_NONE:
130     ierr = PCSetType(pc, PCNONE);CHKERRQ(ierr);
131     ierr = PCSetFromOptions(pc);CHKERRQ(ierr);
132     break;
133 
134   case NTL_PC_AHESS:
135     ierr = PCSetType(pc, PCJACOBI);CHKERRQ(ierr);
136     ierr = PCSetFromOptions(pc);CHKERRQ(ierr);
137     ierr = PCJacobiSetUseAbs(pc,PETSC_TRUE);CHKERRQ(ierr);
138     break;
139 
140   case NTL_PC_BFGS:
141     ierr = PCSetType(pc, PCSHELL);CHKERRQ(ierr);
142     ierr = PCSetFromOptions(pc);CHKERRQ(ierr);
143     ierr = PCShellSetName(pc, "bfgs");CHKERRQ(ierr);
144     ierr = PCShellSetContext(pc, tl->M);CHKERRQ(ierr);
145     ierr = PCShellSetApply(pc,TaoNTLPrecondBFGS);CHKERRQ(ierr);
146     break;
147 
148   default:
149     /* Use the pc method set by pc_type */
150     break;
151   }
152 
153   /* Initialize trust-region radius */
154   switch(tl->init_type) {
155   case NTL_INIT_CONSTANT:
156     /* Use the initial radius specified */
157     break;
158 
159   case NTL_INIT_INTERPOLATION:
160     /* Use the initial radius specified */
161     max_radius = 0.0;
162 
163     for (j = 0; j < j_max; ++j) {
164       fmin = f;
165       sigma = 0.0;
166 
167       if (needH) {
168         ierr = TaoComputeHessian(tao,tao->solution,tao->hessian,tao->hessian_pre);CHKERRQ(ierr);
169         needH = 0;
170       }
171 
172       for (i = 0; i < i_max; ++i) {
173         ierr = VecCopy(tao->solution, tl->W);CHKERRQ(ierr);
174         ierr = VecAXPY(tl->W, -tao->trust/gnorm, tao->gradient);CHKERRQ(ierr);
175 
176         ierr = TaoComputeObjective(tao, tl->W, &ftrial);CHKERRQ(ierr);
177         if (PetscIsInfOrNanReal(ftrial)) {
178           tau = tl->gamma1_i;
179         } else {
180           if (ftrial < fmin) {
181             fmin = ftrial;
182             sigma = -tao->trust / gnorm;
183           }
184 
185           ierr = MatMult(tao->hessian, tao->gradient, tao->stepdirection);CHKERRQ(ierr);
186           ierr = VecDot(tao->gradient, tao->stepdirection, &prered);CHKERRQ(ierr);
187 
188           prered = tao->trust * (gnorm - 0.5 * tao->trust * prered / (gnorm * gnorm));
189           actred = f - ftrial;
190           if ((PetscAbsScalar(actred) <= tl->epsilon) && (PetscAbsScalar(prered) <= tl->epsilon)) {
191             kappa = 1.0;
192           } else {
193             kappa = actred / prered;
194           }
195 
196           tau_1 = tl->theta_i * gnorm * tao->trust / (tl->theta_i * gnorm * tao->trust + (1.0 - tl->theta_i) * prered - actred);
197           tau_2 = tl->theta_i * gnorm * tao->trust / (tl->theta_i * gnorm * tao->trust - (1.0 + tl->theta_i) * prered + actred);
198           tau_min = PetscMin(tau_1, tau_2);
199           tau_max = PetscMax(tau_1, tau_2);
200 
201           if (PetscAbsScalar(kappa - 1.0) <= tl->mu1_i) {
202             /* Great agreement */
203             max_radius = PetscMax(max_radius, tao->trust);
204 
205             if (tau_max < 1.0) {
206               tau = tl->gamma3_i;
207             } else if (tau_max > tl->gamma4_i) {
208               tau = tl->gamma4_i;
209             } else if (tau_1 >= 1.0 && tau_1 <= tl->gamma4_i && tau_2 < 1.0) {
210               tau = tau_1;
211             } else if (tau_2 >= 1.0 && tau_2 <= tl->gamma4_i && tau_1 < 1.0) {
212               tau = tau_2;
213             } else {
214               tau = tau_max;
215             }
216           } else if (PetscAbsScalar(kappa - 1.0) <= tl->mu2_i) {
217             /* Good agreement */
218             max_radius = PetscMax(max_radius, tao->trust);
219 
220             if (tau_max < tl->gamma2_i) {
221               tau = tl->gamma2_i;
222             } else if (tau_max > tl->gamma3_i) {
223               tau = tl->gamma3_i;
224             } else {
225               tau = tau_max;
226             }
227           } else {
228             /* Not good agreement */
229             if (tau_min > 1.0) {
230               tau = tl->gamma2_i;
231             } else if (tau_max < tl->gamma1_i) {
232               tau = tl->gamma1_i;
233             } else if ((tau_min < tl->gamma1_i) && (tau_max >= 1.0)) {
234               tau = tl->gamma1_i;
235             } else if ((tau_1 >= tl->gamma1_i) && (tau_1 < 1.0) &&  ((tau_2 < tl->gamma1_i) || (tau_2 >= 1.0))) {
236               tau = tau_1;
237             } else if ((tau_2 >= tl->gamma1_i) && (tau_2 < 1.0) &&  ((tau_1 < tl->gamma1_i) || (tau_2 >= 1.0))) {
238               tau = tau_2;
239             } else {
240               tau = tau_max;
241             }
242           }
243         }
244         tao->trust = tau * tao->trust;
245       }
246 
247       if (fmin < f) {
248         f = fmin;
249         ierr = VecAXPY(tao->solution, sigma, tao->gradient);CHKERRQ(ierr);
250         ierr = TaoComputeGradient(tao, tao->solution, tao->gradient);CHKERRQ(ierr);
251 
252         ierr = VecNorm(tao->gradient, NORM_2, &gnorm);CHKERRQ(ierr);
253         if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf or NaN");
254         needH = 1;
255 
256         ierr = TaoLogConvergenceHistory(tao,f,gnorm,0.0,tao->ksp_its);CHKERRQ(ierr);
257         ierr = TaoMonitor(tao,tao->niter,f,gnorm,0.0,step);CHKERRQ(ierr);
258         ierr = (*tao->ops->convergencetest)(tao,tao->cnvP);CHKERRQ(ierr);
259         if (tao->reason != TAO_CONTINUE_ITERATING) PetscFunctionReturn(0);
260       }
261     }
262     tao->trust = PetscMax(tao->trust, max_radius);
263 
264     /* Modify the radius if it is too large or small */
265     tao->trust = PetscMax(tao->trust, tl->min_radius);
266     tao->trust = PetscMin(tao->trust, tl->max_radius);
267     break;
268 
269   default:
270     /* Norm of the first direction will initialize radius */
271     tao->trust = 0.0;
272     break;
273   }
274 
275   /* Set initial scaling for the BFGS preconditioner
276      This step is done after computing the initial trust-region radius
277      since the function value may have decreased */
278   if (NTL_PC_BFGS == tl->pc_type) {
279     delta = 2.0 * PetscMax(1.0, PetscAbsScalar(f)) / (gnorm*gnorm);
280     ierr = MatLMVMSetJ0Scale(tl->M, delta);CHKERRQ(ierr);
281   }
282 
283   /* Set counter for gradient/reset steps */
284   tl->ntrust = 0;
285   tl->newt = 0;
286   tl->bfgs = 0;
287   tl->sgrad = 0;
288   tl->grad = 0;
289 
290   /* Have not converged; continue with Newton method */
291   while (tao->reason == TAO_CONTINUE_ITERATING) {
292     ++tao->niter;
293     tao->ksp_its=0;
294     /* Compute the Hessian */
295     if (needH) {
296       ierr = TaoComputeHessian(tao,tao->solution,tao->hessian,tao->hessian_pre);CHKERRQ(ierr);
297     }
298 
299     if (NTL_PC_BFGS == tl->pc_type) {
300       if (BFGS_SCALE_AHESS == tl->bfgs_scale_type) {
301         /* Obtain diagonal for the bfgs preconditioner */
302         ierr = MatGetDiagonal(tao->hessian, tl->Diag);CHKERRQ(ierr);
303         ierr = VecAbs(tl->Diag);CHKERRQ(ierr);
304         ierr = VecReciprocal(tl->Diag);CHKERRQ(ierr);
305         ierr = MatLMVMSetJ0Diag(tl->M, tl->Diag);CHKERRQ(ierr);
306       }
307 
308       /* Update the limited memory preconditioner */
309       ierr = MatLMVMUpdate(tl->M,tao->solution, tao->gradient);CHKERRQ(ierr);
310       ++bfgsUpdates;
311     }
312     ierr = KSPSetOperators(tao->ksp, tao->hessian, tao->hessian_pre);CHKERRQ(ierr);
313     /* Solve the Newton system of equations */
314     ierr = KSPCGSetRadius(tao->ksp,tl->max_radius);CHKERRQ(ierr);
315     ierr = KSPSolve(tao->ksp, tao->gradient, tao->stepdirection);CHKERRQ(ierr);
316     ierr = KSPGetIterationNumber(tao->ksp,&its);CHKERRQ(ierr);
317     tao->ksp_its+=its;
318     tao->ksp_tot_its+=its;
319     ierr = KSPCGGetNormD(tao->ksp, &norm_d);CHKERRQ(ierr);
320 
321     if (0.0 == tao->trust) {
322       /* Radius was uninitialized; use the norm of the direction */
323       if (norm_d > 0.0) {
324         tao->trust = norm_d;
325 
326         /* Modify the radius if it is too large or small */
327         tao->trust = PetscMax(tao->trust, tl->min_radius);
328         tao->trust = PetscMin(tao->trust, tl->max_radius);
329       } else {
330         /* The direction was bad; set radius to default value and re-solve
331            the trust-region subproblem to get a direction */
332         tao->trust = tao->trust0;
333 
334         /* Modify the radius if it is too large or small */
335         tao->trust = PetscMax(tao->trust, tl->min_radius);
336         tao->trust = PetscMin(tao->trust, tl->max_radius);
337 
338         ierr = KSPCGSetRadius(tao->ksp,tl->max_radius);CHKERRQ(ierr);
339         ierr = KSPSolve(tao->ksp, tao->gradient, tao->stepdirection);CHKERRQ(ierr);
340         ierr = KSPGetIterationNumber(tao->ksp,&its);CHKERRQ(ierr);
341         tao->ksp_its+=its;
342         tao->ksp_tot_its+=its;
343         ierr = KSPCGGetNormD(tao->ksp, &norm_d);CHKERRQ(ierr);
344 
345         if (norm_d == 0.0) SETERRQ(PETSC_COMM_SELF,1, "Initial direction zero");
346       }
347     }
348 
349     ierr = VecScale(tao->stepdirection, -1.0);CHKERRQ(ierr);
350     ierr = KSPGetConvergedReason(tao->ksp, &ksp_reason);CHKERRQ(ierr);
351     if ((KSP_DIVERGED_INDEFINITE_PC == ksp_reason) && (NTL_PC_BFGS == tl->pc_type) && (bfgsUpdates > 1)) {
352       /* Preconditioner is numerically indefinite; reset the
353          approximate if using BFGS preconditioning. */
354 
355       delta = 2.0 * PetscMax(1.0, PetscAbsScalar(f)) / (gnorm*gnorm);
356       ierr = MatLMVMSetJ0Scale(tl->M, delta);CHKERRQ(ierr);
357       ierr = MatLMVMReset(tl->M, PETSC_FALSE);CHKERRQ(ierr);
358       ierr = MatLMVMUpdate(tl->M, tao->solution, tao->gradient);CHKERRQ(ierr);
359       bfgsUpdates = 1;
360     }
361 
362     /* Check trust-region reduction conditions */
363     tr_reject = 0;
364     if (NTL_UPDATE_REDUCTION == tl->update_type) {
365       /* Get predicted reduction */
366       ierr = KSPCGGetObjFcn(tao->ksp,&prered);CHKERRQ(ierr);
367       if (prered >= 0.0) {
368         /* The predicted reduction has the wrong sign.  This cannot
369            happen in infinite precision arithmetic.  Step should
370            be rejected! */
371         tao->trust = tl->alpha1 * PetscMin(tao->trust, norm_d);
372         tr_reject = 1;
373       } else {
374         /* Compute trial step and function value */
375         ierr = VecCopy(tao->solution, tl->W);CHKERRQ(ierr);
376         ierr = VecAXPY(tl->W, 1.0, tao->stepdirection);CHKERRQ(ierr);
377         ierr = TaoComputeObjective(tao, tl->W, &ftrial);CHKERRQ(ierr);
378 
379         if (PetscIsInfOrNanReal(ftrial)) {
380           tao->trust = tl->alpha1 * PetscMin(tao->trust, norm_d);
381           tr_reject = 1;
382         } else {
383           /* Compute and actual reduction */
384           actred = f - ftrial;
385           prered = -prered;
386           if ((PetscAbsScalar(actred) <= tl->epsilon) &&
387               (PetscAbsScalar(prered) <= tl->epsilon)) {
388             kappa = 1.0;
389           } else {
390             kappa = actred / prered;
391           }
392 
393           /* Accept of reject the step and update radius */
394           if (kappa < tl->eta1) {
395             /* Reject the step */
396             tao->trust = tl->alpha1 * PetscMin(tao->trust, norm_d);
397             tr_reject = 1;
398           } else {
399             /* Accept the step */
400             if (kappa < tl->eta2) {
401               /* Marginal bad step */
402               tao->trust = tl->alpha2 * PetscMin(tao->trust, norm_d);
403             } else if (kappa < tl->eta3) {
404               /* Reasonable step */
405               tao->trust = tl->alpha3 * tao->trust;
406             } else if (kappa < tl->eta4) {
407               /* Good step */
408               tao->trust = PetscMax(tl->alpha4 * norm_d, tao->trust);
409             } else {
410               /* Very good step */
411               tao->trust = PetscMax(tl->alpha5 * norm_d, tao->trust);
412             }
413           }
414         }
415       }
416     } else {
417       /* Get predicted reduction */
418       ierr = KSPCGGetObjFcn(tao->ksp,&prered);CHKERRQ(ierr);
419       if (prered >= 0.0) {
420         /* The predicted reduction has the wrong sign.  This cannot
421            happen in infinite precision arithmetic.  Step should
422            be rejected! */
423         tao->trust = tl->gamma1 * PetscMin(tao->trust, norm_d);
424         tr_reject = 1;
425       } else {
426         ierr = VecCopy(tao->solution, tl->W);CHKERRQ(ierr);
427         ierr = VecAXPY(tl->W, 1.0, tao->stepdirection);CHKERRQ(ierr);
428         ierr = TaoComputeObjective(tao, tl->W, &ftrial);CHKERRQ(ierr);
429         if (PetscIsInfOrNanReal(ftrial)) {
430           tao->trust = tl->gamma1 * PetscMin(tao->trust, norm_d);
431           tr_reject = 1;
432         } else {
433           ierr = VecDot(tao->gradient, tao->stepdirection, &gdx);CHKERRQ(ierr);
434 
435           actred = f - ftrial;
436           prered = -prered;
437           if ((PetscAbsScalar(actred) <= tl->epsilon) &&
438               (PetscAbsScalar(prered) <= tl->epsilon)) {
439             kappa = 1.0;
440           } else {
441             kappa = actred / prered;
442           }
443 
444           tau_1 = tl->theta * gdx / (tl->theta * gdx - (1.0 - tl->theta) * prered + actred);
445           tau_2 = tl->theta * gdx / (tl->theta * gdx + (1.0 + tl->theta) * prered - actred);
446           tau_min = PetscMin(tau_1, tau_2);
447           tau_max = PetscMax(tau_1, tau_2);
448 
449           if (kappa >= 1.0 - tl->mu1) {
450             /* Great agreement; accept step and update radius */
451             if (tau_max < 1.0) {
452               tao->trust = PetscMax(tao->trust, tl->gamma3 * norm_d);
453             } else if (tau_max > tl->gamma4) {
454               tao->trust = PetscMax(tao->trust, tl->gamma4 * norm_d);
455             } else {
456               tao->trust = PetscMax(tao->trust, tau_max * norm_d);
457             }
458           } else if (kappa >= 1.0 - tl->mu2) {
459             /* Good agreement */
460 
461             if (tau_max < tl->gamma2) {
462               tao->trust = tl->gamma2 * PetscMin(tao->trust, norm_d);
463             } else if (tau_max > tl->gamma3) {
464               tao->trust = PetscMax(tao->trust, tl->gamma3 * norm_d);
465             } else if (tau_max < 1.0) {
466               tao->trust = tau_max * PetscMin(tao->trust, norm_d);
467             } else {
468               tao->trust = PetscMax(tao->trust, tau_max * norm_d);
469             }
470           } else {
471             /* Not good agreement */
472             if (tau_min > 1.0) {
473               tao->trust = tl->gamma2 * PetscMin(tao->trust, norm_d);
474             } else if (tau_max < tl->gamma1) {
475               tao->trust = tl->gamma1 * PetscMin(tao->trust, norm_d);
476             } else if ((tau_min < tl->gamma1) && (tau_max >= 1.0)) {
477               tao->trust = tl->gamma1 * PetscMin(tao->trust, norm_d);
478             } else if ((tau_1 >= tl->gamma1) && (tau_1 < 1.0) && ((tau_2 < tl->gamma1) || (tau_2 >= 1.0))) {
479               tao->trust = tau_1 * PetscMin(tao->trust, norm_d);
480             } else if ((tau_2 >= tl->gamma1) && (tau_2 < 1.0) && ((tau_1 < tl->gamma1) || (tau_2 >= 1.0))) {
481               tao->trust = tau_2 * PetscMin(tao->trust, norm_d);
482             } else {
483               tao->trust = tau_max * PetscMin(tao->trust, norm_d);
484             }
485             tr_reject = 1;
486           }
487         }
488       }
489     }
490 
491     if (tr_reject) {
492       /* The trust-region constraints rejected the step.  Apply a linesearch.
493          Check for descent direction. */
494       ierr = VecDot(tao->stepdirection, tao->gradient, &gdx);CHKERRQ(ierr);
495       if ((gdx >= 0.0) || PetscIsInfOrNanReal(gdx)) {
496         /* Newton step is not descent or direction produced Inf or NaN */
497 
498         if (NTL_PC_BFGS != tl->pc_type) {
499           /* We don't have the bfgs matrix around and updated
500              Must use gradient direction in this case */
501           ierr = VecCopy(tao->gradient, tao->stepdirection);CHKERRQ(ierr);
502           ierr = VecScale(tao->stepdirection, -1.0);CHKERRQ(ierr);
503           ++tl->grad;
504           stepType = NTL_GRADIENT;
505         } else {
506           /* Attempt to use the BFGS direction */
507           ierr = MatSolve(tl->M, tao->gradient, tao->stepdirection);CHKERRQ(ierr);
508           ierr = VecScale(tao->stepdirection, -1.0);CHKERRQ(ierr);
509 
510           /* Check for success (descent direction) */
511           ierr = VecDot(tao->stepdirection, tao->gradient, &gdx);CHKERRQ(ierr);
512           if ((gdx >= 0) || PetscIsInfOrNanReal(gdx)) {
513             /* BFGS direction is not descent or direction produced not a number
514                We can assert bfgsUpdates > 1 in this case because
515                the first solve produces the scaled gradient direction,
516                which is guaranteed to be descent */
517 
518             /* Use steepest descent direction (scaled) */
519             delta = 2.0 * PetscMax(1.0, PetscAbsScalar(f)) / (gnorm*gnorm);
520             ierr = MatLMVMSetJ0Scale(tl->M, delta);CHKERRQ(ierr);
521             ierr = MatLMVMReset(tl->M, PETSC_FALSE);CHKERRQ(ierr);
522             ierr = MatLMVMUpdate(tl->M, tao->solution, tao->gradient);CHKERRQ(ierr);
523             ierr = MatSolve(tl->M, tao->gradient, tao->stepdirection);CHKERRQ(ierr);
524             ierr = VecScale(tao->stepdirection, -1.0);CHKERRQ(ierr);
525 
526             bfgsUpdates = 1;
527             ++tl->sgrad;
528             stepType = NTL_SCALED_GRADIENT;
529           } else {
530             if (1 == bfgsUpdates) {
531               /* The first BFGS direction is always the scaled gradient */
532               ++tl->sgrad;
533               stepType = NTL_SCALED_GRADIENT;
534             } else {
535               ++tl->bfgs;
536               stepType = NTL_BFGS;
537             }
538           }
539         }
540       } else {
541         /* Computed Newton step is descent */
542         ++tl->newt;
543         stepType = NTL_NEWTON;
544       }
545 
546       /* Perform the linesearch */
547       fold = f;
548       ierr = VecCopy(tao->solution, tl->Xold);CHKERRQ(ierr);
549       ierr = VecCopy(tao->gradient, tl->Gold);CHKERRQ(ierr);
550 
551       step = 1.0;
552       ierr = TaoLineSearchApply(tao->linesearch, tao->solution, &f, tao->gradient, tao->stepdirection, &step, &ls_reason);CHKERRQ(ierr);
553       ierr = TaoAddLineSearchCounts(tao);CHKERRQ(ierr);
554 
555       while (ls_reason != TAOLINESEARCH_SUCCESS && ls_reason != TAOLINESEARCH_SUCCESS_USER && stepType != NTL_GRADIENT) {      /* Linesearch failed */
556         /* Linesearch failed */
557         f = fold;
558         ierr = VecCopy(tl->Xold, tao->solution);CHKERRQ(ierr);
559         ierr = VecCopy(tl->Gold, tao->gradient);CHKERRQ(ierr);
560 
561         switch(stepType) {
562         case NTL_NEWTON:
563           /* Failed to obtain acceptable iterate with Newton step */
564 
565           if (NTL_PC_BFGS != tl->pc_type) {
566             /* We don't have the bfgs matrix around and being updated
567                Must use gradient direction in this case */
568             ierr = VecCopy(tao->gradient, tao->stepdirection);CHKERRQ(ierr);
569             ++tl->grad;
570             stepType = NTL_GRADIENT;
571           } else {
572             /* Attempt to use the BFGS direction */
573             ierr = MatSolve(tl->M, tao->gradient, tao->stepdirection);CHKERRQ(ierr);
574 
575 
576             /* Check for success (descent direction) */
577             ierr = VecDot(tao->stepdirection, tao->gradient, &gdx);CHKERRQ(ierr);
578             if ((gdx <= 0) || PetscIsInfOrNanReal(gdx)) {
579               /* BFGS direction is not descent or direction produced
580                  not a number.  We can assert bfgsUpdates > 1 in this case
581                  Use steepest descent direction (scaled) */
582 
583               delta = 2.0 * PetscMax(1.0, PetscAbsScalar(f)) / (gnorm*gnorm);
584               ierr = MatLMVMSetJ0Scale(tl->M, delta);CHKERRQ(ierr);
585               ierr = MatLMVMReset(tl->M, PETSC_FALSE);CHKERRQ(ierr);
586               ierr = MatLMVMUpdate(tl->M, tao->solution, tao->gradient);CHKERRQ(ierr);
587               ierr = MatSolve(tl->M, tao->gradient, tao->stepdirection);CHKERRQ(ierr);
588 
589               bfgsUpdates = 1;
590               ++tl->sgrad;
591               stepType = NTL_SCALED_GRADIENT;
592             } else {
593               if (1 == bfgsUpdates) {
594                 /* The first BFGS direction is always the scaled gradient */
595                 ++tl->sgrad;
596                 stepType = NTL_SCALED_GRADIENT;
597               } else {
598                 ++tl->bfgs;
599                 stepType = NTL_BFGS;
600               }
601             }
602           }
603           break;
604 
605         case NTL_BFGS:
606           /* Can only enter if pc_type == NTL_PC_BFGS
607              Failed to obtain acceptable iterate with BFGS step
608              Attempt to use the scaled gradient direction */
609 
610           delta = 2.0 * PetscMax(1.0, PetscAbsScalar(f)) / (gnorm*gnorm);
611           ierr = MatLMVMSetJ0Scale(tl->M, delta);CHKERRQ(ierr);
612           ierr = MatLMVMReset(tl->M, PETSC_FALSE);CHKERRQ(ierr);
613           ierr = MatLMVMUpdate(tl->M, tao->solution, tao->gradient);CHKERRQ(ierr);
614           ierr = MatSolve(tl->M, tao->gradient, tao->stepdirection);CHKERRQ(ierr);
615 
616           bfgsUpdates = 1;
617           ++tl->sgrad;
618           stepType = NTL_SCALED_GRADIENT;
619           break;
620 
621         case NTL_SCALED_GRADIENT:
622           /* Can only enter if pc_type == NTL_PC_BFGS
623              The scaled gradient step did not produce a new iterate;
624              attemp to use the gradient direction.
625              Need to make sure we are not using a different diagonal scaling */
626           ierr = MatLMVMSetJ0Scale(tl->M, 1.0);CHKERRQ(ierr);
627           ierr = MatLMVMReset(tl->M, PETSC_FALSE);CHKERRQ(ierr);
628           ierr = MatLMVMUpdate(tl->M, tao->solution, tao->gradient);CHKERRQ(ierr);
629           ierr = MatSolve(tl->M, tao->gradient, tao->stepdirection);CHKERRQ(ierr);
630 
631           bfgsUpdates = 1;
632           ++tl->grad;
633           stepType = NTL_GRADIENT;
634           break;
635         }
636         ierr = VecScale(tao->stepdirection, -1.0);CHKERRQ(ierr);
637 
638         /* This may be incorrect; linesearch has values for stepmax and stepmin
639            that should be reset. */
640         step = 1.0;
641         ierr = TaoLineSearchApply(tao->linesearch, tao->solution, &f, tao->gradient, tao->stepdirection, &step, &ls_reason);CHKERRQ(ierr);
642         ierr = TaoAddLineSearchCounts(tao);CHKERRQ(ierr);
643       }
644 
645       if (ls_reason != TAOLINESEARCH_SUCCESS && ls_reason != TAOLINESEARCH_SUCCESS_USER) {
646         /* Failed to find an improving point */
647         f = fold;
648         ierr = VecCopy(tl->Xold, tao->solution);CHKERRQ(ierr);
649         ierr = VecCopy(tl->Gold, tao->gradient);CHKERRQ(ierr);
650         tao->trust = 0.0;
651         step = 0.0;
652         tao->reason = TAO_DIVERGED_LS_FAILURE;
653         break;
654       } else if (stepType == NTL_NEWTON) {
655         if (step < tl->nu1) {
656           /* Very bad step taken; reduce radius */
657           tao->trust = tl->omega1 * PetscMin(norm_d, tao->trust);
658         } else if (step < tl->nu2) {
659           /* Reasonably bad step taken; reduce radius */
660           tao->trust = tl->omega2 * PetscMin(norm_d, tao->trust);
661         } else if (step < tl->nu3) {
662           /* Reasonable step was taken; leave radius alone */
663           if (tl->omega3 < 1.0) {
664             tao->trust = tl->omega3 * PetscMin(norm_d, tao->trust);
665           } else if (tl->omega3 > 1.0) {
666             tao->trust = PetscMax(tl->omega3 * norm_d, tao->trust);
667           }
668         } else if (step < tl->nu4) {
669           /* Full step taken; increase the radius */
670           tao->trust = PetscMax(tl->omega4 * norm_d, tao->trust);
671         } else {
672           /* More than full step taken; increase the radius */
673           tao->trust = PetscMax(tl->omega5 * norm_d, tao->trust);
674         }
675       } else {
676         /* Newton step was not good; reduce the radius */
677         tao->trust = tl->omega1 * PetscMin(norm_d, tao->trust);
678       }
679     } else {
680       /* Trust-region step is accepted */
681       ierr = VecCopy(tl->W, tao->solution);CHKERRQ(ierr);
682       f = ftrial;
683       ierr = TaoComputeGradient(tao, tao->solution, tao->gradient);CHKERRQ(ierr);
684       ++tl->ntrust;
685     }
686 
687     /* The radius may have been increased; modify if it is too large */
688     tao->trust = PetscMin(tao->trust, tl->max_radius);
689 
690     /* Check for converged */
691     ierr = VecNorm(tao->gradient, NORM_2, &gnorm);CHKERRQ(ierr);
692     if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1,"User provided compute function generated Not-a-Number");
693     needH = 1;
694 
695     ierr = TaoLogConvergenceHistory(tao,f,gnorm,0.0,tao->ksp_its);CHKERRQ(ierr);
696     ierr = TaoMonitor(tao,tao->niter,f,gnorm,0.0,step);CHKERRQ(ierr);
697     ierr = (*tao->ops->convergencetest)(tao,tao->cnvP);CHKERRQ(ierr);
698   }
699   PetscFunctionReturn(0);
700 }
701 
702 /* ---------------------------------------------------------- */
703 static PetscErrorCode TaoSetUp_NTL(Tao tao)
704 {
705   TAO_NTL        *tl = (TAO_NTL *)tao->data;
706   PetscErrorCode ierr;
707 
708   PetscFunctionBegin;
709   if (!tao->gradient) {ierr = VecDuplicate(tao->solution, &tao->gradient);CHKERRQ(ierr); }
710   if (!tao->stepdirection) {ierr = VecDuplicate(tao->solution, &tao->stepdirection);CHKERRQ(ierr);}
711   if (!tl->W) { ierr = VecDuplicate(tao->solution, &tl->W);CHKERRQ(ierr);}
712   if (!tl->Xold) { ierr = VecDuplicate(tao->solution, &tl->Xold);CHKERRQ(ierr);}
713   if (!tl->Gold) { ierr = VecDuplicate(tao->solution, &tl->Gold);CHKERRQ(ierr);}
714   tl->Diag = 0;
715   tl->M = 0;
716   PetscFunctionReturn(0);
717 }
718 
719 /*------------------------------------------------------------*/
720 static PetscErrorCode TaoDestroy_NTL(Tao tao)
721 {
722   TAO_NTL        *tl = (TAO_NTL *)tao->data;
723   PetscErrorCode ierr;
724 
725   PetscFunctionBegin;
726   if (tao->setupcalled) {
727     ierr = VecDestroy(&tl->W);CHKERRQ(ierr);
728     ierr = VecDestroy(&tl->Xold);CHKERRQ(ierr);
729     ierr = VecDestroy(&tl->Gold);CHKERRQ(ierr);
730   }
731   ierr = VecDestroy(&tl->Diag);CHKERRQ(ierr);
732   ierr = MatDestroy(&tl->M);CHKERRQ(ierr);
733   ierr = PetscFree(tao->data);CHKERRQ(ierr);
734   PetscFunctionReturn(0);
735 }
736 
737 /*------------------------------------------------------------*/
738 static PetscErrorCode TaoSetFromOptions_NTL(PetscOptionItems *PetscOptionsObject,Tao tao)
739 {
740   TAO_NTL        *tl = (TAO_NTL *)tao->data;
741   PetscErrorCode ierr;
742 
743   PetscFunctionBegin;
744   ierr = PetscOptionsHead(PetscOptionsObject,"Newton trust region with line search method for unconstrained optimization");CHKERRQ(ierr);
745   ierr = PetscOptionsEList("-tao_ntl_pc_type", "pc type", "", NTL_PC, NTL_PC_TYPES, NTL_PC[tl->pc_type], &tl->pc_type,NULL);CHKERRQ(ierr);
746   ierr = PetscOptionsEList("-tao_ntl_bfgs_scale_type", "bfgs scale type", "", BFGS_SCALE, BFGS_SCALE_TYPES, BFGS_SCALE[tl->bfgs_scale_type], &tl->bfgs_scale_type,NULL);CHKERRQ(ierr);
747   ierr = PetscOptionsEList("-tao_ntl_init_type", "radius initialization type", "", NTL_INIT, NTL_INIT_TYPES, NTL_INIT[tl->init_type], &tl->init_type,NULL);CHKERRQ(ierr);
748   ierr = PetscOptionsEList("-tao_ntl_update_type", "radius update type", "", NTL_UPDATE, NTL_UPDATE_TYPES, NTL_UPDATE[tl->update_type], &tl->update_type,NULL);CHKERRQ(ierr);
749   ierr = PetscOptionsReal("-tao_ntl_eta1", "poor steplength; reduce radius", "", tl->eta1, &tl->eta1,NULL);CHKERRQ(ierr);
750   ierr = PetscOptionsReal("-tao_ntl_eta2", "reasonable steplength; leave radius alone", "", tl->eta2, &tl->eta2,NULL);CHKERRQ(ierr);
751   ierr = PetscOptionsReal("-tao_ntl_eta3", "good steplength; increase radius", "", tl->eta3, &tl->eta3,NULL);CHKERRQ(ierr);
752   ierr = PetscOptionsReal("-tao_ntl_eta4", "excellent steplength; greatly increase radius", "", tl->eta4, &tl->eta4,NULL);CHKERRQ(ierr);
753   ierr = PetscOptionsReal("-tao_ntl_alpha1", "", "", tl->alpha1, &tl->alpha1,NULL);CHKERRQ(ierr);
754   ierr = PetscOptionsReal("-tao_ntl_alpha2", "", "", tl->alpha2, &tl->alpha2,NULL);CHKERRQ(ierr);
755   ierr = PetscOptionsReal("-tao_ntl_alpha3", "", "", tl->alpha3, &tl->alpha3,NULL);CHKERRQ(ierr);
756   ierr = PetscOptionsReal("-tao_ntl_alpha4", "", "", tl->alpha4, &tl->alpha4,NULL);CHKERRQ(ierr);
757   ierr = PetscOptionsReal("-tao_ntl_alpha5", "", "", tl->alpha5, &tl->alpha5,NULL);CHKERRQ(ierr);
758   ierr = PetscOptionsReal("-tao_ntl_nu1", "poor steplength; reduce radius", "", tl->nu1, &tl->nu1,NULL);CHKERRQ(ierr);
759   ierr = PetscOptionsReal("-tao_ntl_nu2", "reasonable steplength; leave radius alone", "", tl->nu2, &tl->nu2,NULL);CHKERRQ(ierr);
760   ierr = PetscOptionsReal("-tao_ntl_nu3", "good steplength; increase radius", "", tl->nu3, &tl->nu3,NULL);CHKERRQ(ierr);
761   ierr = PetscOptionsReal("-tao_ntl_nu4", "excellent steplength; greatly increase radius", "", tl->nu4, &tl->nu4,NULL);CHKERRQ(ierr);
762   ierr = PetscOptionsReal("-tao_ntl_omega1", "", "", tl->omega1, &tl->omega1,NULL);CHKERRQ(ierr);
763   ierr = PetscOptionsReal("-tao_ntl_omega2", "", "", tl->omega2, &tl->omega2,NULL);CHKERRQ(ierr);
764   ierr = PetscOptionsReal("-tao_ntl_omega3", "", "", tl->omega3, &tl->omega3,NULL);CHKERRQ(ierr);
765   ierr = PetscOptionsReal("-tao_ntl_omega4", "", "", tl->omega4, &tl->omega4,NULL);CHKERRQ(ierr);
766   ierr = PetscOptionsReal("-tao_ntl_omega5", "", "", tl->omega5, &tl->omega5,NULL);CHKERRQ(ierr);
767   ierr = PetscOptionsReal("-tao_ntl_mu1_i", "", "", tl->mu1_i, &tl->mu1_i,NULL);CHKERRQ(ierr);
768   ierr = PetscOptionsReal("-tao_ntl_mu2_i", "", "", tl->mu2_i, &tl->mu2_i,NULL);CHKERRQ(ierr);
769   ierr = PetscOptionsReal("-tao_ntl_gamma1_i", "", "", tl->gamma1_i, &tl->gamma1_i,NULL);CHKERRQ(ierr);
770   ierr = PetscOptionsReal("-tao_ntl_gamma2_i", "", "", tl->gamma2_i, &tl->gamma2_i,NULL);CHKERRQ(ierr);
771   ierr = PetscOptionsReal("-tao_ntl_gamma3_i", "", "", tl->gamma3_i, &tl->gamma3_i,NULL);CHKERRQ(ierr);
772   ierr = PetscOptionsReal("-tao_ntl_gamma4_i", "", "", tl->gamma4_i, &tl->gamma4_i,NULL);CHKERRQ(ierr);
773   ierr = PetscOptionsReal("-tao_ntl_theta_i", "", "", tl->theta_i, &tl->theta_i,NULL);CHKERRQ(ierr);
774   ierr = PetscOptionsReal("-tao_ntl_mu1", "", "", tl->mu1, &tl->mu1,NULL);CHKERRQ(ierr);
775   ierr = PetscOptionsReal("-tao_ntl_mu2", "", "", tl->mu2, &tl->mu2,NULL);CHKERRQ(ierr);
776   ierr = PetscOptionsReal("-tao_ntl_gamma1", "", "", tl->gamma1, &tl->gamma1,NULL);CHKERRQ(ierr);
777   ierr = PetscOptionsReal("-tao_ntl_gamma2", "", "", tl->gamma2, &tl->gamma2,NULL);CHKERRQ(ierr);
778   ierr = PetscOptionsReal("-tao_ntl_gamma3", "", "", tl->gamma3, &tl->gamma3,NULL);CHKERRQ(ierr);
779   ierr = PetscOptionsReal("-tao_ntl_gamma4", "", "", tl->gamma4, &tl->gamma4,NULL);CHKERRQ(ierr);
780   ierr = PetscOptionsReal("-tao_ntl_theta", "", "", tl->theta, &tl->theta,NULL);CHKERRQ(ierr);
781   ierr = PetscOptionsReal("-tao_ntl_min_radius", "lower bound on initial radius", "", tl->min_radius, &tl->min_radius,NULL);CHKERRQ(ierr);
782   ierr = PetscOptionsReal("-tao_ntl_max_radius", "upper bound on radius", "", tl->max_radius, &tl->max_radius,NULL);CHKERRQ(ierr);
783   ierr = PetscOptionsReal("-tao_ntl_epsilon", "tolerance used when computing actual and predicted reduction", "", tl->epsilon, &tl->epsilon,NULL);CHKERRQ(ierr);
784   ierr = PetscOptionsTail();CHKERRQ(ierr);
785   ierr = TaoLineSearchSetFromOptions(tao->linesearch);CHKERRQ(ierr);
786   ierr = KSPSetFromOptions(tao->ksp);CHKERRQ(ierr);
787   PetscFunctionReturn(0);
788 }
789 
790 /*------------------------------------------------------------*/
791 static PetscErrorCode TaoView_NTL(Tao tao, PetscViewer viewer)
792 {
793   TAO_NTL        *tl = (TAO_NTL *)tao->data;
794   PetscInt       nrejects;
795   PetscBool      isascii;
796   PetscErrorCode ierr;
797 
798   PetscFunctionBegin;
799   ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&isascii);CHKERRQ(ierr);
800   if (isascii) {
801     ierr = PetscViewerASCIIPushTab(viewer);CHKERRQ(ierr);
802     if (NTL_PC_BFGS == tl->pc_type && tl->M) {
803       ierr = MatLMVMGetRejectCount(tl->M, &nrejects);CHKERRQ(ierr);
804       ierr = PetscViewerASCIIPrintf(viewer, "Rejected matrix updates: %D\n", nrejects);CHKERRQ(ierr);
805     }
806     ierr = PetscViewerASCIIPrintf(viewer, "Trust-region steps: %D\n", tl->ntrust);CHKERRQ(ierr);
807     ierr = PetscViewerASCIIPrintf(viewer, "Newton search steps: %D\n", tl->newt);CHKERRQ(ierr);
808     ierr = PetscViewerASCIIPrintf(viewer, "BFGS search steps: %D\n", tl->bfgs);CHKERRQ(ierr);
809     ierr = PetscViewerASCIIPrintf(viewer, "Scaled gradient search steps: %D\n", tl->sgrad);CHKERRQ(ierr);
810     ierr = PetscViewerASCIIPrintf(viewer, "Gradient search steps: %D\n", tl->grad);CHKERRQ(ierr);
811     ierr = PetscViewerASCIIPopTab(viewer);CHKERRQ(ierr);
812   }
813   PetscFunctionReturn(0);
814 }
815 
816 /* ---------------------------------------------------------- */
817 /*MC
818   TAONTR - Newton's method with trust region and linesearch
819   for unconstrained minimization.
820   At each iteration, the Newton trust region method solves the system for d
821   and performs a line search in the d direction:
822 
823             min_d  .5 dT Hk d + gkT d,  s.t.   ||d|| < Delta_k
824 
825   Options Database Keys:
826 + -tao_ntl_pc_type - "none","ahess","bfgs","petsc"
827 . -tao_ntl_bfgs_scale_type - type of scaling with bfgs pc, "ahess" or "bfgs"
828 . -tao_ntl_init_type - "constant","direction","interpolation"
829 . -tao_ntl_update_type - "reduction","interpolation"
830 . -tao_ntl_min_radius - lower bound on trust region radius
831 . -tao_ntl_max_radius - upper bound on trust region radius
832 . -tao_ntl_epsilon - tolerance for accepting actual / predicted reduction
833 . -tao_ntl_mu1_i - mu1 interpolation init factor
834 . -tao_ntl_mu2_i - mu2 interpolation init factor
835 . -tao_ntl_gamma1_i - gamma1 interpolation init factor
836 . -tao_ntl_gamma2_i - gamma2 interpolation init factor
837 . -tao_ntl_gamma3_i - gamma3 interpolation init factor
838 . -tao_ntl_gamma4_i - gamma4 interpolation init factor
839 . -tao_ntl_theta_i - thetha1 interpolation init factor
840 . -tao_ntl_eta1 - eta1 reduction update factor
841 . -tao_ntl_eta2 - eta2 reduction update factor
842 . -tao_ntl_eta3 - eta3 reduction update factor
843 . -tao_ntl_eta4 - eta4 reduction update factor
844 . -tao_ntl_alpha1 - alpha1 reduction update factor
845 . -tao_ntl_alpha2 - alpha2 reduction update factor
846 . -tao_ntl_alpha3 - alpha3 reduction update factor
847 . -tao_ntl_alpha4 - alpha4 reduction update factor
848 . -tao_ntl_alpha4 - alpha4 reduction update factor
849 . -tao_ntl_mu1 - mu1 interpolation update
850 . -tao_ntl_mu2 - mu2 interpolation update
851 . -tao_ntl_gamma1 - gamma1 interpolcation update
852 . -tao_ntl_gamma2 - gamma2 interpolcation update
853 . -tao_ntl_gamma3 - gamma3 interpolcation update
854 . -tao_ntl_gamma4 - gamma4 interpolation update
855 - -tao_ntl_theta - theta1 interpolation update
856 
857   Level: beginner
858 M*/
859 
860 PETSC_EXTERN PetscErrorCode TaoCreate_NTL(Tao tao)
861 {
862   TAO_NTL        *tl;
863   PetscErrorCode ierr;
864   const char     *morethuente_type = TAOLINESEARCHMT;
865 
866   PetscFunctionBegin;
867   ierr = PetscNewLog(tao,&tl);CHKERRQ(ierr);
868   tao->ops->setup = TaoSetUp_NTL;
869   tao->ops->solve = TaoSolve_NTL;
870   tao->ops->view = TaoView_NTL;
871   tao->ops->setfromoptions = TaoSetFromOptions_NTL;
872   tao->ops->destroy = TaoDestroy_NTL;
873 
874   /* Override default settings (unless already changed) */
875   if (!tao->max_it_changed) tao->max_it = 50;
876   if (!tao->trust0_changed) tao->trust0 = 100.0;
877 
878   tao->data = (void*)tl;
879 
880   /* Default values for trust-region radius update based on steplength */
881   tl->nu1 = 0.25;
882   tl->nu2 = 0.50;
883   tl->nu3 = 1.00;
884   tl->nu4 = 1.25;
885 
886   tl->omega1 = 0.25;
887   tl->omega2 = 0.50;
888   tl->omega3 = 1.00;
889   tl->omega4 = 2.00;
890   tl->omega5 = 4.00;
891 
892   /* Default values for trust-region radius update based on reduction */
893   tl->eta1 = 1.0e-4;
894   tl->eta2 = 0.25;
895   tl->eta3 = 0.50;
896   tl->eta4 = 0.90;
897 
898   tl->alpha1 = 0.25;
899   tl->alpha2 = 0.50;
900   tl->alpha3 = 1.00;
901   tl->alpha4 = 2.00;
902   tl->alpha5 = 4.00;
903 
904   /* Default values for trust-region radius update based on interpolation */
905   tl->mu1 = 0.10;
906   tl->mu2 = 0.50;
907 
908   tl->gamma1 = 0.25;
909   tl->gamma2 = 0.50;
910   tl->gamma3 = 2.00;
911   tl->gamma4 = 4.00;
912 
913   tl->theta = 0.05;
914 
915   /* Default values for trust region initialization based on interpolation */
916   tl->mu1_i = 0.35;
917   tl->mu2_i = 0.50;
918 
919   tl->gamma1_i = 0.0625;
920   tl->gamma2_i = 0.5;
921   tl->gamma3_i = 2.0;
922   tl->gamma4_i = 5.0;
923 
924   tl->theta_i = 0.25;
925 
926   /* Remaining parameters */
927   tl->min_radius = 1.0e-10;
928   tl->max_radius = 1.0e10;
929   tl->epsilon = 1.0e-6;
930 
931   tl->pc_type         = NTL_PC_BFGS;
932   tl->bfgs_scale_type = BFGS_SCALE_AHESS;
933   tl->init_type       = NTL_INIT_INTERPOLATION;
934   tl->update_type     = NTL_UPDATE_REDUCTION;
935 
936   ierr = TaoLineSearchCreate(((PetscObject)tao)->comm, &tao->linesearch);CHKERRQ(ierr);
937   ierr = PetscObjectIncrementTabLevel((PetscObject)tao->linesearch, (PetscObject)tao, 1);CHKERRQ(ierr);
938   ierr = TaoLineSearchSetType(tao->linesearch, morethuente_type);CHKERRQ(ierr);
939   ierr = TaoLineSearchUseTaoRoutines(tao->linesearch, tao);CHKERRQ(ierr);
940   ierr = TaoLineSearchSetOptionsPrefix(tao->linesearch,tao->hdr.prefix);CHKERRQ(ierr);
941   ierr = KSPCreate(((PetscObject)tao)->comm,&tao->ksp);CHKERRQ(ierr);
942   ierr = PetscObjectIncrementTabLevel((PetscObject)tao->ksp, (PetscObject)tao, 1);CHKERRQ(ierr);
943   ierr = KSPSetOptionsPrefix(tao->ksp,tao->hdr.prefix);CHKERRQ(ierr);
944   ierr = KSPSetType(tao->ksp,KSPCGSTCG);CHKERRQ(ierr);
945   PetscFunctionReturn(0);
946 }
947