xref: /petsc/src/tao/unconstrained/impls/lmvm/lmvm.c (revision fdf6c4e30aafdbc795e4f76379caa977fd5cdf5a)
1 #include <petsctaolinesearch.h>
2 #include <../src/tao/unconstrained/impls/lmvm/lmvm.h>
3 
4 #define LMVM_STEP_BFGS     0
5 #define LMVM_STEP_GRAD     1
6 
7 static PetscErrorCode TaoSolve_LMVM(Tao tao)
8 {
9   TAO_LMVM                     *lmP = (TAO_LMVM *)tao->data;
10   PetscReal                    f, fold, gdx, gnorm;
11   PetscReal                    step = 1.0;
12   PetscInt                     stepType = LMVM_STEP_GRAD, nupdates;
13   TaoLineSearchConvergedReason ls_status = TAOLINESEARCH_CONTINUE_ITERATING;
14 
15   PetscFunctionBegin;
16 
17   if (tao->XL || tao->XU || tao->ops->computebounds) {
18     PetscCall(PetscInfo(tao,"WARNING: Variable bounds have been set but will be ignored by lmvm algorithm\n"));
19   }
20 
21   /*  Check convergence criteria */
22   PetscCall(TaoComputeObjectiveAndGradient(tao, tao->solution, &f, tao->gradient));
23   PetscCall(TaoGradientNorm(tao, tao->gradient,NORM_2,&gnorm));
24 
25   PetscCheck(!PetscIsInfOrNanReal(f) && !PetscIsInfOrNanReal(gnorm),PetscObjectComm((PetscObject)tao),PETSC_ERR_USER, "User provided compute function generated Inf or NaN");
26 
27   tao->reason = TAO_CONTINUE_ITERATING;
28   PetscCall(TaoLogConvergenceHistory(tao,f,gnorm,0.0,tao->ksp_its));
29   PetscCall(TaoMonitor(tao,tao->niter,f,gnorm,0.0,step));
30   PetscCall((*tao->ops->convergencetest)(tao,tao->cnvP));
31   if (tao->reason != TAO_CONTINUE_ITERATING) PetscFunctionReturn(0);
32 
33   /*  Set counter for gradient/reset steps */
34   if (!lmP->recycle) {
35     lmP->bfgs = 0;
36     lmP->grad = 0;
37     PetscCall(MatLMVMReset(lmP->M, PETSC_FALSE));
38   }
39 
40   /*  Have not converged; continue with Newton method */
41   while (tao->reason == TAO_CONTINUE_ITERATING) {
42     /* Call general purpose update function */
43     if (tao->ops->update) PetscCall((*tao->ops->update)(tao, tao->niter, tao->user_update));
44 
45     /*  Compute direction */
46     if (lmP->H0) {
47       PetscCall(MatLMVMSetJ0(lmP->M, lmP->H0));
48       stepType = LMVM_STEP_BFGS;
49     }
50     PetscCall(MatLMVMUpdate(lmP->M,tao->solution,tao->gradient));
51     PetscCall(MatSolve(lmP->M, tao->gradient, lmP->D));
52     PetscCall(MatLMVMGetUpdateCount(lmP->M, &nupdates));
53     if (nupdates > 0) stepType = LMVM_STEP_BFGS;
54 
55     /*  Check for success (descent direction) */
56     PetscCall(VecDot(lmP->D, tao->gradient, &gdx));
57     if ((gdx <= 0.0) || PetscIsInfOrNanReal(gdx)) {
58       /* Step is not descent or direction produced not a number
59          We can assert bfgsUpdates > 1 in this case because
60          the first solve produces the scaled gradient direction,
61          which is guaranteed to be descent
62 
63          Use steepest descent direction (scaled)
64       */
65 
66       PetscCall(MatLMVMReset(lmP->M, PETSC_FALSE));
67       PetscCall(MatLMVMClearJ0(lmP->M));
68       PetscCall(MatLMVMUpdate(lmP->M, tao->solution, tao->gradient));
69       PetscCall(MatSolve(lmP->M,tao->gradient, lmP->D));
70 
71       /* On a reset, the direction cannot be not a number; it is a
72          scaled gradient step.  No need to check for this condition. */
73       stepType = LMVM_STEP_GRAD;
74     }
75     PetscCall(VecScale(lmP->D, -1.0));
76 
77     /*  Perform the linesearch */
78     fold = f;
79     PetscCall(VecCopy(tao->solution, lmP->Xold));
80     PetscCall(VecCopy(tao->gradient, lmP->Gold));
81 
82     PetscCall(TaoLineSearchApply(tao->linesearch, tao->solution, &f, tao->gradient, lmP->D, &step,&ls_status));
83     PetscCall(TaoAddLineSearchCounts(tao));
84 
85     if (ls_status != TAOLINESEARCH_SUCCESS && ls_status != TAOLINESEARCH_SUCCESS_USER && (stepType != LMVM_STEP_GRAD)) {
86       /*  Reset factors and use scaled gradient step */
87       f = fold;
88       PetscCall(VecCopy(lmP->Xold, tao->solution));
89       PetscCall(VecCopy(lmP->Gold, tao->gradient));
90 
91       /*  Failed to obtain acceptable iterate with BFGS step */
92       /*  Attempt to use the scaled gradient direction */
93 
94       PetscCall(MatLMVMReset(lmP->M, PETSC_FALSE));
95       PetscCall(MatLMVMClearJ0(lmP->M));
96       PetscCall(MatLMVMUpdate(lmP->M, tao->solution, tao->gradient));
97       PetscCall(MatSolve(lmP->M, tao->solution, tao->gradient));
98 
99       /* On a reset, the direction cannot be not a number; it is a
100           scaled gradient step.  No need to check for this condition. */
101       stepType = LMVM_STEP_GRAD;
102       PetscCall(VecScale(lmP->D, -1.0));
103 
104       /*  Perform the linesearch */
105       PetscCall(TaoLineSearchApply(tao->linesearch, tao->solution, &f, tao->gradient, lmP->D, &step, &ls_status));
106       PetscCall(TaoAddLineSearchCounts(tao));
107     }
108 
109     if (ls_status != TAOLINESEARCH_SUCCESS && ls_status != TAOLINESEARCH_SUCCESS_USER) {
110       /*  Failed to find an improving point */
111       f = fold;
112       PetscCall(VecCopy(lmP->Xold, tao->solution));
113       PetscCall(VecCopy(lmP->Gold, tao->gradient));
114       step = 0.0;
115       tao->reason = TAO_DIVERGED_LS_FAILURE;
116     } else {
117       /* LS found valid step, so tally up step type */
118       switch (stepType) {
119       case LMVM_STEP_BFGS:
120         ++lmP->bfgs;
121         break;
122       case LMVM_STEP_GRAD:
123         ++lmP->grad;
124         break;
125       default:
126         break;
127       }
128       /*  Compute new gradient norm */
129       PetscCall(TaoGradientNorm(tao, tao->gradient,NORM_2,&gnorm));
130     }
131 
132     /* Check convergence */
133     tao->niter++;
134     PetscCall(TaoLogConvergenceHistory(tao,f,gnorm,0.0,tao->ksp_its));
135     PetscCall(TaoMonitor(tao,tao->niter,f,gnorm,0.0,step));
136     PetscCall((*tao->ops->convergencetest)(tao,tao->cnvP));
137   }
138   PetscFunctionReturn(0);
139 }
140 
141 static PetscErrorCode TaoSetUp_LMVM(Tao tao)
142 {
143   TAO_LMVM       *lmP = (TAO_LMVM *)tao->data;
144   PetscInt       n,N;
145   PetscBool      is_set,is_spd;
146 
147   PetscFunctionBegin;
148   /* Existence of tao->solution checked in TaoSetUp() */
149   if (!tao->gradient) PetscCall(VecDuplicate(tao->solution,&tao->gradient));
150   if (!tao->stepdirection) PetscCall(VecDuplicate(tao->solution,&tao->stepdirection));
151   if (!lmP->D) PetscCall(VecDuplicate(tao->solution,&lmP->D));
152   if (!lmP->Xold) PetscCall(VecDuplicate(tao->solution,&lmP->Xold));
153   if (!lmP->Gold) PetscCall(VecDuplicate(tao->solution,&lmP->Gold));
154 
155   /*  Create matrix for the limited memory approximation */
156   PetscCall(VecGetLocalSize(tao->solution,&n));
157   PetscCall(VecGetSize(tao->solution,&N));
158   PetscCall(MatSetSizes(lmP->M, n, n, N, N));
159   PetscCall(MatLMVMAllocate(lmP->M,tao->solution,tao->gradient));
160   PetscCall(MatIsSPDKnown(lmP->M, &is_set,&is_spd));
161   PetscCheck(is_set && is_spd,PetscObjectComm((PetscObject)tao), PETSC_ERR_ARG_INCOMP, "LMVM matrix is not symmetric positive-definite.");
162 
163   /* If the user has set a matrix to solve as the initial H0, set the options prefix here, and set up the KSP */
164   if (lmP->H0) PetscCall(MatLMVMSetJ0(lmP->M, lmP->H0));
165   PetscFunctionReturn(0);
166 }
167 
168 /* ---------------------------------------------------------- */
169 static PetscErrorCode TaoDestroy_LMVM(Tao tao)
170 {
171   TAO_LMVM       *lmP = (TAO_LMVM *)tao->data;
172 
173   PetscFunctionBegin;
174   if (tao->setupcalled) {
175     PetscCall(VecDestroy(&lmP->Xold));
176     PetscCall(VecDestroy(&lmP->Gold));
177     PetscCall(VecDestroy(&lmP->D));
178   }
179   PetscCall(MatDestroy(&lmP->M));
180   if (lmP->H0) PetscCall(PetscObjectDereference((PetscObject)lmP->H0));
181   PetscCall(PetscFree(tao->data));
182   PetscFunctionReturn(0);
183 }
184 
185 /*------------------------------------------------------------*/
186 static PetscErrorCode TaoSetFromOptions_LMVM(PetscOptionItems *PetscOptionsObject,Tao tao)
187 {
188   TAO_LMVM       *lm = (TAO_LMVM *)tao->data;
189 
190   PetscFunctionBegin;
191   PetscOptionsHeadBegin(PetscOptionsObject,"Limited-memory variable-metric method for unconstrained optimization");
192   PetscCall(PetscOptionsBool("-tao_lmvm_recycle","enable recycling of the BFGS matrix between subsequent TaoSolve() calls","",lm->recycle,&lm->recycle,NULL));
193   PetscCall(TaoLineSearchSetFromOptions(tao->linesearch));
194   PetscCall(MatSetFromOptions(lm->M));
195   PetscOptionsHeadEnd();
196   PetscFunctionReturn(0);
197 }
198 
199 /*------------------------------------------------------------*/
200 static PetscErrorCode TaoView_LMVM(Tao tao, PetscViewer viewer)
201 {
202   TAO_LMVM       *lm = (TAO_LMVM *)tao->data;
203   PetscBool      isascii;
204   PetscInt       recycled_its;
205 
206   PetscFunctionBegin;
207   PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERASCII, &isascii));
208   if (isascii) {
209     PetscCall(PetscViewerASCIIPrintf(viewer, "  Gradient steps: %" PetscInt_FMT "\n", lm->grad));
210     if (lm->recycle) {
211       PetscCall(PetscViewerASCIIPrintf(viewer, "  Recycle: on\n"));
212       recycled_its = lm->bfgs + lm->grad;
213       PetscCall(PetscViewerASCIIPrintf(viewer, "  Total recycled iterations: %" PetscInt_FMT "\n", recycled_its));
214     }
215   }
216   PetscFunctionReturn(0);
217 }
218 
219 /* ---------------------------------------------------------- */
220 
221 /*MC
222   TAOLMVM - Limited Memory Variable Metric method is a quasi-Newton
223   optimization solver for unconstrained minimization. It solves
224   the Newton step
225           Hkdk = - gk
226 
227   using an approximation Bk in place of Hk, where Bk is composed using
228   the BFGS update formula. A More-Thuente line search is then used
229   to computed the steplength in the dk direction
230 
231   Options Database Keys:
232 +   -tao_lmvm_recycle - enable recycling LMVM updates between TaoSolve() calls
233 -   -tao_lmvm_no_scale - (developer) disables diagonal Broyden scaling on the LMVM approximation
234 
235   Level: beginner
236 M*/
237 
238 PETSC_EXTERN PetscErrorCode TaoCreate_LMVM(Tao tao)
239 {
240   TAO_LMVM       *lmP;
241   const char     *morethuente_type = TAOLINESEARCHMT;
242 
243   PetscFunctionBegin;
244   tao->ops->setup = TaoSetUp_LMVM;
245   tao->ops->solve = TaoSolve_LMVM;
246   tao->ops->view = TaoView_LMVM;
247   tao->ops->setfromoptions = TaoSetFromOptions_LMVM;
248   tao->ops->destroy = TaoDestroy_LMVM;
249 
250   PetscCall(PetscNewLog(tao,&lmP));
251   lmP->D = NULL;
252   lmP->M = NULL;
253   lmP->Xold = NULL;
254   lmP->Gold = NULL;
255   lmP->H0   = NULL;
256   lmP->recycle = PETSC_FALSE;
257 
258   tao->data = (void*)lmP;
259   /* Override default settings (unless already changed) */
260   if (!tao->max_it_changed) tao->max_it = 2000;
261   if (!tao->max_funcs_changed) tao->max_funcs = 4000;
262 
263   PetscCall(TaoLineSearchCreate(((PetscObject)tao)->comm,&tao->linesearch));
264   PetscCall(PetscObjectIncrementTabLevel((PetscObject)tao->linesearch, (PetscObject)tao, 1));
265   PetscCall(TaoLineSearchSetType(tao->linesearch,morethuente_type));
266   PetscCall(TaoLineSearchUseTaoRoutines(tao->linesearch,tao));
267   PetscCall(TaoLineSearchSetOptionsPrefix(tao->linesearch,tao->hdr.prefix));
268 
269   PetscCall(KSPInitializePackage());
270   PetscCall(MatCreate(((PetscObject)tao)->comm, &lmP->M));
271   PetscCall(PetscObjectIncrementTabLevel((PetscObject)lmP->M, (PetscObject)tao, 1));
272   PetscCall(MatSetType(lmP->M, MATLMVMBFGS));
273   PetscCall(MatSetOptionsPrefix(lmP->M, "tao_lmvm_"));
274   PetscFunctionReturn(0);
275 }
276