xref: /petsc/src/tao/bound/impls/bnk/bnls.c (revision 0875260307e97403c513ee2ad2ca062f01ff5a8a)
1 #include <../src/tao/bound/impls/bnk/bnk.h>
2 #include <petscksp.h>
3 
4 /*
5  Implements Newton's Method with a line search approach for
6  solving bound constrained minimization problems.
7 
8  ------------------------------------------------------------
9 
10  x_0 = VecMedian(x_0)
11  f_0, g_0 = TaoComputeObjectiveAndGradient(x_0)
12  pg_0 = VecBoundGradientProjection(g_0)
13  check convergence at pg_0
14  trust = max_radius
15  niter = 0
16 
17  while niter < max_it
18     niter += 1
19     H_k = TaoComputeHessian(x_k)
20     if pc_type == BNK_PC_BFGS
21       add correction to BFGS approx
22       if scale_type == BNK_SCALE_AHESS
23          D = VecMedian(1e-6, abs(diag(H_k)), 1e6)
24          scale BFGS with VecReciprocal(D)
25       end
26     end
27 
28     if pc_type = BNK_PC_BFGS
29       B_k = BFGS
30     else
31       B_k = VecMedian(1e-6, abs(diag(H_k)), 1e6)
32       B_k = VecReciprocal(B_k)
33     end
34     w = x_k - VecMedian(x_k - 0.001*B_k*g_k)
35     eps = min(eps, norm2(w))
36     determine the active and inactive index sets such that
37       L = {i : (x_k)_i <= l_i + eps && (g_k)_i > 0}
38       U = {i : (x_k)_i >= u_i - eps && (g_k)_i < 0}
39       F = {i : l_i = (x_k)_i = u_i}
40       A = {L + U + F}
41       I = {i : i not in A}
42 
43     generate the reduced system Hr_k dr_k = -gr_k for variables in I
44     if p > 0
45       Hr_k += p*I
46     end
47     if pc_type == BNK_PC_BFGS && scale_type == BNK_SCALE_PHESS
48       D = VecMedian(1e-6, abs(diag(Hr_k)), 1e6)
49       scale BFGS with VecReciprocal(D)
50     end
51     trust = max_radius
52     solve Hr_k dr_k = -gr_k
53     set d_k to (l - x) for variables in L, (u - x) for variables in U, and 0 for variables in F
54 
55     if dot(d_k, pg_k)) >= 0 || norm(d_k) == NaN || norm(d_k) == Inf
56       dr_k = -BFGS*gr_k for variables in I
57       if dot(d_k, pg_k)) >= 0 || norm(d_k) == NaN || norm(d_k) == Inf
58         reset the BFGS preconditioner
59         calculate scale delta and apply it to BFGS
60         dr_k = -BFGS*gr_k for variables in I
61         if dot(d_k, pg_k)) >= 0 || norm(d_k) == NaN || norm(d_k) == Inf
62           dr_k = -gr_k for variables in I
63         end
64       end
65     end
66 
67     x_{k+1}, f_{k+1}, g_{k+1}, ls_failed = TaoBNKPerformLineSearch()
68     if ls_failed
69       f_{k+1} = f_k
70       x_{k+1} = x_k
71       g_{k+1} = g_k
72       pg_{k+1} = pg_k
73       terminate
74     else
75       pg_{k+1} = VecBoundGradientProjection(g_{k+1})
76       count the accepted step type (Newton, BFGS, scaled grad or grad)
77     end
78 
79     check convergence at pg_{k+1}
80  end
81 */
82 
83 static PetscErrorCode TaoSolve_BNLS(Tao tao)
84 {
85   PetscErrorCode               ierr;
86   TAO_BNK                      *bnk = (TAO_BNK *)tao->data;
87   KSPConvergedReason           ksp_reason;
88   TaoLineSearchConvergedReason ls_reason;
89 
90   PetscReal                    resnorm, steplen = 1.0;
91   PetscBool                    cgTerminate, stepAccepted = PETSC_TRUE, shift = PETSC_TRUE;
92   PetscInt                     stepType;
93 
94   PetscFunctionBegin;
95   /* Initialize the preconditioner, KSP solver and trust radius/line search */
96   tao->reason = TAO_CONTINUE_ITERATING;
97   ierr = TaoBNKInitialize(tao, bnk->init_type, &stepAccepted);CHKERRQ(ierr);
98   if (tao->reason != TAO_CONTINUE_ITERATING) PetscFunctionReturn(0);
99 
100   /* Have not converged; continue with Newton method */
101   while (tao->reason == TAO_CONTINUE_ITERATING) {
102     ++tao->niter;
103     tao->ksp_its=0;
104 
105     /* Take BNCG steps (if enabled) to trade-off Hessian evaluations for more gradient evaluations */
106     ierr = TaoBNKTakeCGSteps(tao, &cgTerminate);CHKERRQ(ierr);
107     if (cgTerminate) {
108       tao->reason = bnk->bncg->reason;
109       PetscFunctionReturn(0);
110     }
111 
112     /* Compute the hessian and update the BFGS preconditioner at the new iterate */
113     if (stepAccepted) {ierr = TaoBNKComputeHessian(tao);CHKERRQ(ierr);}
114 
115     /* Use the common BNK kernel to compute the safeguarded Newton step (for inactive variables only) */
116     ierr = TaoBNKComputeStep(tao, shift, &ksp_reason);CHKERRQ(ierr);
117     ierr = TaoBNKSafeguardStep(tao, ksp_reason, &stepType);CHKERRQ(ierr);
118 
119     /* Store current solution before it changes */
120     bnk->fold = bnk->f;
121     ierr = VecCopy(tao->solution, bnk->Xold);CHKERRQ(ierr);
122     ierr = VecCopy(tao->gradient, bnk->Gold);CHKERRQ(ierr);
123     ierr = VecCopy(bnk->unprojected_gradient, bnk->unprojected_gradient_old);CHKERRQ(ierr);
124 
125     /* Trigger the line search */
126     ierr = TaoBNKPerformLineSearch(tao, stepType, &steplen, &ls_reason);CHKERRQ(ierr);
127 
128     if (ls_reason != TAOLINESEARCH_SUCCESS && ls_reason != TAOLINESEARCH_SUCCESS_USER) {
129       /* Failed to find an improving point */
130       stepAccepted = PETSC_FALSE;
131       bnk->f = bnk->fold;
132       ierr = VecCopy(bnk->Xold, tao->solution);CHKERRQ(ierr);
133       ierr = VecCopy(bnk->Gold, tao->gradient);CHKERRQ(ierr);
134       ierr = VecCopy(bnk->unprojected_gradient_old, bnk->unprojected_gradient);CHKERRQ(ierr);
135       steplen = 0.0;
136       tao->reason = TAO_DIVERGED_LS_FAILURE;
137     } else {
138       /* compute the projected gradient */
139       ierr = VecBoundGradientProjection(bnk->unprojected_gradient,tao->solution,tao->XL,tao->XU,tao->gradient);CHKERRQ(ierr);
140       ierr = VecNorm(tao->gradient, NORM_2, &bnk->gnorm);CHKERRQ(ierr);
141       if (PetscIsInfOrNanReal(bnk->f) || PetscIsInfOrNanReal(bnk->gnorm)) SETERRQ(PETSC_COMM_SELF, 1, "User provided compute function generated Not-a-Number");
142       /* update the trust radius based on the step length */
143       ierr = TaoBNKUpdateTrustRadius(tao, 0.0, 0.0, BNK_UPDATE_STEP, stepType, &stepAccepted);CHKERRQ(ierr);
144       /* count the accepted step type */
145       ierr = TaoBNKAddStepCounts(tao, stepType);CHKERRQ(ierr);
146     }
147 
148     /*  Check for termination */
149     ierr = VecFischer(tao->solution, bnk->unprojected_gradient, tao->XL, tao->XU, bnk->Gwork);CHKERRQ(ierr);
150     ierr = VecNorm(bnk->Gwork, NORM_2, &resnorm);CHKERRQ(ierr);
151     ierr = TaoLogConvergenceHistory(tao, bnk->f, resnorm, 0.0, tao->ksp_its);CHKERRQ(ierr);
152     ierr = TaoMonitor(tao, tao->niter, bnk->f, resnorm, 0.0, steplen);CHKERRQ(ierr);
153     ierr = (*tao->ops->convergencetest)(tao, tao->cnvP);CHKERRQ(ierr);
154   }
155   PetscFunctionReturn(0);
156 }
157 
158 /*------------------------------------------------------------*/
159 
160 PETSC_INTERN PetscErrorCode TaoCreate_BNLS(Tao tao)
161 {
162   TAO_BNK        *bnk;
163   PetscErrorCode ierr;
164 
165   PetscFunctionBegin;
166   ierr = TaoCreate_BNK(tao);CHKERRQ(ierr);
167   tao->ops->solve = TaoSolve_BNLS;
168 
169   bnk = (TAO_BNK *)tao->data;
170   bnk->init_type = BNK_INIT_DIRECTION;
171   bnk->update_type = BNK_UPDATE_STEP; /* trust region updates based on line search step length */
172   PetscFunctionReturn(0);
173 }