1 #include <../src/tao/unconstrained/impls/ntr/ntrimpl.h> 2 3 #include <petscksp.h> 4 5 #define NTR_INIT_CONSTANT 0 6 #define NTR_INIT_DIRECTION 1 7 #define NTR_INIT_INTERPOLATION 2 8 #define NTR_INIT_TYPES 3 9 10 #define NTR_UPDATE_REDUCTION 0 11 #define NTR_UPDATE_INTERPOLATION 1 12 #define NTR_UPDATE_TYPES 2 13 14 static const char *NTR_INIT[64] = {"constant","direction","interpolation"}; 15 16 static const char *NTR_UPDATE[64] = {"reduction","interpolation"}; 17 18 /* 19 TaoSolve_NTR - Implements Newton's Method with a trust region approach 20 for solving unconstrained minimization problems. 21 22 The basic algorithm is taken from MINPACK-2 (dstrn). 23 24 TaoSolve_NTR computes a local minimizer of a twice differentiable function 25 f by applying a trust region variant of Newton's method. At each stage 26 of the algorithm, we use the prconditioned conjugate gradient method to 27 determine an approximate minimizer of the quadratic equation 28 29 q(s) = <s, Hs + g> 30 31 subject to the trust region constraint 32 33 || s ||_M <= radius, 34 35 where radius is the trust region radius and M is a symmetric positive 36 definite matrix (the preconditioner). Here g is the gradient and H 37 is the Hessian matrix. 38 39 Note: TaoSolve_NTR MUST use the iterative solver KSPNASH, KSPSTCG, 40 or KSPGLTR. Thus, we set KSPNASH, KSPSTCG, or KSPGLTR in this 41 routine regardless of what the user may have previously specified. 42 */ 43 static PetscErrorCode TaoSolve_NTR(Tao tao) 44 { 45 TAO_NTR *tr = (TAO_NTR *)tao->data; 46 KSPType ksp_type; 47 PetscBool is_nash,is_stcg,is_gltr,is_bfgs,is_jacobi,is_symmetric,sym_set; 48 KSPConvergedReason ksp_reason; 49 PC pc; 50 PetscReal fmin, ftrial, prered, actred, kappa, sigma, beta; 51 PetscReal tau, tau_1, tau_2, tau_max, tau_min, max_radius; 52 PetscReal f, gnorm; 53 54 PetscReal norm_d; 55 PetscInt needH; 56 57 PetscInt i_max = 5; 58 PetscInt j_max = 1; 59 PetscInt i, j, N, n, its; 60 61 PetscFunctionBegin; 62 if (tao->XL || tao->XU || tao->ops->computebounds) { 63 PetscCall(PetscInfo(tao,"WARNING: Variable bounds have been set but will be ignored by ntr algorithm\n")); 64 } 65 66 PetscCall(KSPGetType(tao->ksp,&ksp_type)); 67 PetscCall(PetscStrcmp(ksp_type,KSPNASH,&is_nash)); 68 PetscCall(PetscStrcmp(ksp_type,KSPSTCG,&is_stcg)); 69 PetscCall(PetscStrcmp(ksp_type,KSPGLTR,&is_gltr)); 70 PetscCheck(is_nash || is_stcg || is_gltr,PetscObjectComm((PetscObject)tao),PETSC_ERR_SUP,"TAO_NTR requires nash, stcg, or gltr for the KSP"); 71 72 /* Initialize the radius and modify if it is too large or small */ 73 tao->trust = tao->trust0; 74 tao->trust = PetscMax(tao->trust, tr->min_radius); 75 tao->trust = PetscMin(tao->trust, tr->max_radius); 76 77 /* Allocate the vectors needed for the BFGS approximation */ 78 PetscCall(KSPGetPC(tao->ksp, &pc)); 79 PetscCall(PetscObjectTypeCompare((PetscObject)pc, PCLMVM, &is_bfgs)); 80 PetscCall(PetscObjectTypeCompare((PetscObject)pc, PCJACOBI, &is_jacobi)); 81 if (is_bfgs) { 82 tr->bfgs_pre = pc; 83 PetscCall(PCLMVMGetMatLMVM(tr->bfgs_pre, &tr->M)); 84 PetscCall(VecGetLocalSize(tao->solution, &n)); 85 PetscCall(VecGetSize(tao->solution, &N)); 86 PetscCall(MatSetSizes(tr->M, n, n, N, N)); 87 PetscCall(MatLMVMAllocate(tr->M, tao->solution, tao->gradient)); 88 PetscCall(MatIsSymmetricKnown(tr->M, &sym_set, &is_symmetric)); 89 PetscCheck(sym_set && is_symmetric,PetscObjectComm((PetscObject)tao), PETSC_ERR_ARG_INCOMP, "LMVM matrix in the LMVM preconditioner must be symmetric."); 90 } else if (is_jacobi) PetscCall(PCJacobiSetUseAbs(pc,PETSC_TRUE)); 91 92 /* Check convergence criteria */ 93 PetscCall(TaoComputeObjectiveAndGradient(tao, tao->solution, &f, tao->gradient)); 94 PetscCall(TaoGradientNorm(tao, tao->gradient,NORM_2,&gnorm)); 95 PetscCheck(!PetscIsInfOrNanReal(f) && !PetscIsInfOrNanReal(gnorm),PetscObjectComm((PetscObject)tao),PETSC_ERR_USER,"User provided compute function generated Inf or NaN"); 96 needH = 1; 97 98 tao->reason = TAO_CONTINUE_ITERATING; 99 PetscCall(TaoLogConvergenceHistory(tao,f,gnorm,0.0,tao->ksp_its)); 100 PetscCall(TaoMonitor(tao,tao->niter,f,gnorm,0.0,1.0)); 101 PetscCall((*tao->ops->convergencetest)(tao,tao->cnvP)); 102 if (tao->reason != TAO_CONTINUE_ITERATING) PetscFunctionReturn(0); 103 104 /* Initialize trust-region radius */ 105 switch (tr->init_type) { 106 case NTR_INIT_CONSTANT: 107 /* Use the initial radius specified */ 108 break; 109 110 case NTR_INIT_INTERPOLATION: 111 /* Use the initial radius specified */ 112 max_radius = 0.0; 113 114 for (j = 0; j < j_max; ++j) { 115 fmin = f; 116 sigma = 0.0; 117 118 if (needH) { 119 PetscCall(TaoComputeHessian(tao,tao->solution,tao->hessian,tao->hessian_pre)); 120 needH = 0; 121 } 122 123 for (i = 0; i < i_max; ++i) { 124 125 PetscCall(VecCopy(tao->solution, tr->W)); 126 PetscCall(VecAXPY(tr->W, -tao->trust/gnorm, tao->gradient)); 127 PetscCall(TaoComputeObjective(tao, tr->W, &ftrial)); 128 129 if (PetscIsInfOrNanReal(ftrial)) { 130 tau = tr->gamma1_i; 131 } 132 else { 133 if (ftrial < fmin) { 134 fmin = ftrial; 135 sigma = -tao->trust / gnorm; 136 } 137 138 PetscCall(MatMult(tao->hessian, tao->gradient, tao->stepdirection)); 139 PetscCall(VecDot(tao->gradient, tao->stepdirection, &prered)); 140 141 prered = tao->trust * (gnorm - 0.5 * tao->trust * prered / (gnorm * gnorm)); 142 actred = f - ftrial; 143 if ((PetscAbsScalar(actred) <= tr->epsilon) && 144 (PetscAbsScalar(prered) <= tr->epsilon)) { 145 kappa = 1.0; 146 } 147 else { 148 kappa = actred / prered; 149 } 150 151 tau_1 = tr->theta_i * gnorm * tao->trust / (tr->theta_i * gnorm * tao->trust + (1.0 - tr->theta_i) * prered - actred); 152 tau_2 = tr->theta_i * gnorm * tao->trust / (tr->theta_i * gnorm * tao->trust - (1.0 + tr->theta_i) * prered + actred); 153 tau_min = PetscMin(tau_1, tau_2); 154 tau_max = PetscMax(tau_1, tau_2); 155 156 if (PetscAbsScalar(kappa - (PetscReal)1.0) <= tr->mu1_i) { 157 /* Great agreement */ 158 max_radius = PetscMax(max_radius, tao->trust); 159 160 if (tau_max < 1.0) { 161 tau = tr->gamma3_i; 162 } 163 else if (tau_max > tr->gamma4_i) { 164 tau = tr->gamma4_i; 165 } 166 else { 167 tau = tau_max; 168 } 169 } 170 else if (PetscAbsScalar(kappa - (PetscReal)1.0) <= tr->mu2_i) { 171 /* Good agreement */ 172 max_radius = PetscMax(max_radius, tao->trust); 173 174 if (tau_max < tr->gamma2_i) { 175 tau = tr->gamma2_i; 176 } 177 else if (tau_max > tr->gamma3_i) { 178 tau = tr->gamma3_i; 179 } 180 else { 181 tau = tau_max; 182 } 183 } 184 else { 185 /* Not good agreement */ 186 if (tau_min > 1.0) { 187 tau = tr->gamma2_i; 188 } 189 else if (tau_max < tr->gamma1_i) { 190 tau = tr->gamma1_i; 191 } 192 else if ((tau_min < tr->gamma1_i) && (tau_max >= 1.0)) { 193 tau = tr->gamma1_i; 194 } 195 else if ((tau_1 >= tr->gamma1_i) && (tau_1 < 1.0) && 196 ((tau_2 < tr->gamma1_i) || (tau_2 >= 1.0))) { 197 tau = tau_1; 198 } 199 else if ((tau_2 >= tr->gamma1_i) && (tau_2 < 1.0) && 200 ((tau_1 < tr->gamma1_i) || (tau_2 >= 1.0))) { 201 tau = tau_2; 202 } 203 else { 204 tau = tau_max; 205 } 206 } 207 } 208 tao->trust = tau * tao->trust; 209 } 210 211 if (fmin < f) { 212 f = fmin; 213 PetscCall(VecAXPY(tao->solution, sigma, tao->gradient)); 214 PetscCall(TaoComputeGradient(tao,tao->solution, tao->gradient)); 215 216 PetscCall(TaoGradientNorm(tao, tao->gradient,NORM_2,&gnorm)); 217 PetscCheck(!PetscIsInfOrNanReal(f) && !PetscIsInfOrNanReal(gnorm),PetscObjectComm((PetscObject)tao),PETSC_ERR_USER, "User provided compute function generated Inf or NaN"); 218 needH = 1; 219 220 PetscCall(TaoLogConvergenceHistory(tao,f,gnorm,0.0,tao->ksp_its)); 221 PetscCall(TaoMonitor(tao,tao->niter,f,gnorm,0.0,1.0)); 222 PetscCall((*tao->ops->convergencetest)(tao,tao->cnvP)); 223 if (tao->reason != TAO_CONTINUE_ITERATING) { 224 PetscFunctionReturn(0); 225 } 226 } 227 } 228 tao->trust = PetscMax(tao->trust, max_radius); 229 230 /* Modify the radius if it is too large or small */ 231 tao->trust = PetscMax(tao->trust, tr->min_radius); 232 tao->trust = PetscMin(tao->trust, tr->max_radius); 233 break; 234 235 default: 236 /* Norm of the first direction will initialize radius */ 237 tao->trust = 0.0; 238 break; 239 } 240 241 /* Have not converged; continue with Newton method */ 242 while (tao->reason == TAO_CONTINUE_ITERATING) { 243 /* Call general purpose update function */ 244 if (tao->ops->update) PetscCall((*tao->ops->update)(tao, tao->niter, tao->user_update)); 245 ++tao->niter; 246 tao->ksp_its=0; 247 /* Compute the Hessian */ 248 if (needH) { 249 PetscCall(TaoComputeHessian(tao,tao->solution,tao->hessian,tao->hessian_pre)); 250 needH = 0; 251 } 252 253 if (tr->bfgs_pre) { 254 /* Update the limited memory preconditioner */ 255 PetscCall(MatLMVMUpdate(tr->M, tao->solution, tao->gradient)); 256 } 257 258 while (tao->reason == TAO_CONTINUE_ITERATING) { 259 PetscCall(KSPSetOperators(tao->ksp, tao->hessian, tao->hessian_pre)); 260 261 /* Solve the trust region subproblem */ 262 PetscCall(KSPCGSetRadius(tao->ksp,tao->trust)); 263 PetscCall(KSPSolve(tao->ksp, tao->gradient, tao->stepdirection)); 264 PetscCall(KSPGetIterationNumber(tao->ksp,&its)); 265 tao->ksp_its+=its; 266 tao->ksp_tot_its+=its; 267 PetscCall(KSPCGGetNormD(tao->ksp, &norm_d)); 268 269 if (0.0 == tao->trust) { 270 /* Radius was uninitialized; use the norm of the direction */ 271 if (norm_d > 0.0) { 272 tao->trust = norm_d; 273 274 /* Modify the radius if it is too large or small */ 275 tao->trust = PetscMax(tao->trust, tr->min_radius); 276 tao->trust = PetscMin(tao->trust, tr->max_radius); 277 } 278 else { 279 /* The direction was bad; set radius to default value and re-solve 280 the trust-region subproblem to get a direction */ 281 tao->trust = tao->trust0; 282 283 /* Modify the radius if it is too large or small */ 284 tao->trust = PetscMax(tao->trust, tr->min_radius); 285 tao->trust = PetscMin(tao->trust, tr->max_radius); 286 287 PetscCall(KSPCGSetRadius(tao->ksp,tao->trust)); 288 PetscCall(KSPSolve(tao->ksp, tao->gradient, tao->stepdirection)); 289 PetscCall(KSPGetIterationNumber(tao->ksp,&its)); 290 tao->ksp_its+=its; 291 tao->ksp_tot_its+=its; 292 PetscCall(KSPCGGetNormD(tao->ksp, &norm_d)); 293 294 PetscCheck(norm_d != 0.0,PetscObjectComm((PetscObject)tao),PETSC_ERR_PLIB, "Initial direction zero"); 295 } 296 } 297 PetscCall(VecScale(tao->stepdirection, -1.0)); 298 PetscCall(KSPGetConvergedReason(tao->ksp, &ksp_reason)); 299 if ((KSP_DIVERGED_INDEFINITE_PC == ksp_reason) && (tr->bfgs_pre)) { 300 /* Preconditioner is numerically indefinite; reset the 301 approximate if using BFGS preconditioning. */ 302 PetscCall(MatLMVMReset(tr->M, PETSC_FALSE)); 303 PetscCall(MatLMVMUpdate(tr->M, tao->solution, tao->gradient)); 304 } 305 306 if (NTR_UPDATE_REDUCTION == tr->update_type) { 307 /* Get predicted reduction */ 308 PetscCall(KSPCGGetObjFcn(tao->ksp,&prered)); 309 if (prered >= 0.0) { 310 /* The predicted reduction has the wrong sign. This cannot 311 happen in infinite precision arithmetic. Step should 312 be rejected! */ 313 tao->trust = tr->alpha1 * PetscMin(tao->trust, norm_d); 314 } 315 else { 316 /* Compute trial step and function value */ 317 PetscCall(VecCopy(tao->solution,tr->W)); 318 PetscCall(VecAXPY(tr->W, 1.0, tao->stepdirection)); 319 PetscCall(TaoComputeObjective(tao, tr->W, &ftrial)); 320 321 if (PetscIsInfOrNanReal(ftrial)) { 322 tao->trust = tr->alpha1 * PetscMin(tao->trust, norm_d); 323 } else { 324 /* Compute and actual reduction */ 325 actred = f - ftrial; 326 prered = -prered; 327 if ((PetscAbsScalar(actred) <= tr->epsilon) && 328 (PetscAbsScalar(prered) <= tr->epsilon)) { 329 kappa = 1.0; 330 } 331 else { 332 kappa = actred / prered; 333 } 334 335 /* Accept or reject the step and update radius */ 336 if (kappa < tr->eta1) { 337 /* Reject the step */ 338 tao->trust = tr->alpha1 * PetscMin(tao->trust, norm_d); 339 } 340 else { 341 /* Accept the step */ 342 if (kappa < tr->eta2) { 343 /* Marginal bad step */ 344 tao->trust = tr->alpha2 * PetscMin(tao->trust, norm_d); 345 } 346 else if (kappa < tr->eta3) { 347 /* Reasonable step */ 348 tao->trust = tr->alpha3 * tao->trust; 349 } 350 else if (kappa < tr->eta4) { 351 /* Good step */ 352 tao->trust = PetscMax(tr->alpha4 * norm_d, tao->trust); 353 } 354 else { 355 /* Very good step */ 356 tao->trust = PetscMax(tr->alpha5 * norm_d, tao->trust); 357 } 358 break; 359 } 360 } 361 } 362 } 363 else { 364 /* Get predicted reduction */ 365 PetscCall(KSPCGGetObjFcn(tao->ksp,&prered)); 366 if (prered >= 0.0) { 367 /* The predicted reduction has the wrong sign. This cannot 368 happen in infinite precision arithmetic. Step should 369 be rejected! */ 370 tao->trust = tr->gamma1 * PetscMin(tao->trust, norm_d); 371 } 372 else { 373 PetscCall(VecCopy(tao->solution, tr->W)); 374 PetscCall(VecAXPY(tr->W, 1.0, tao->stepdirection)); 375 PetscCall(TaoComputeObjective(tao, tr->W, &ftrial)); 376 if (PetscIsInfOrNanReal(ftrial)) { 377 tao->trust = tr->gamma1 * PetscMin(tao->trust, norm_d); 378 } 379 else { 380 PetscCall(VecDot(tao->gradient, tao->stepdirection, &beta)); 381 actred = f - ftrial; 382 prered = -prered; 383 if ((PetscAbsScalar(actred) <= tr->epsilon) && 384 (PetscAbsScalar(prered) <= tr->epsilon)) { 385 kappa = 1.0; 386 } 387 else { 388 kappa = actred / prered; 389 } 390 391 tau_1 = tr->theta * beta / (tr->theta * beta - (1.0 - tr->theta) * prered + actred); 392 tau_2 = tr->theta * beta / (tr->theta * beta + (1.0 + tr->theta) * prered - actred); 393 tau_min = PetscMin(tau_1, tau_2); 394 tau_max = PetscMax(tau_1, tau_2); 395 396 if (kappa >= 1.0 - tr->mu1) { 397 /* Great agreement; accept step and update radius */ 398 if (tau_max < 1.0) { 399 tao->trust = PetscMax(tao->trust, tr->gamma3 * norm_d); 400 } 401 else if (tau_max > tr->gamma4) { 402 tao->trust = PetscMax(tao->trust, tr->gamma4 * norm_d); 403 } 404 else { 405 tao->trust = PetscMax(tao->trust, tau_max * norm_d); 406 } 407 break; 408 } 409 else if (kappa >= 1.0 - tr->mu2) { 410 /* Good agreement */ 411 412 if (tau_max < tr->gamma2) { 413 tao->trust = tr->gamma2 * PetscMin(tao->trust, norm_d); 414 } 415 else if (tau_max > tr->gamma3) { 416 tao->trust = PetscMax(tao->trust, tr->gamma3 * norm_d); 417 } 418 else if (tau_max < 1.0) { 419 tao->trust = tau_max * PetscMin(tao->trust, norm_d); 420 } 421 else { 422 tao->trust = PetscMax(tao->trust, tau_max * norm_d); 423 } 424 break; 425 } 426 else { 427 /* Not good agreement */ 428 if (tau_min > 1.0) { 429 tao->trust = tr->gamma2 * PetscMin(tao->trust, norm_d); 430 } 431 else if (tau_max < tr->gamma1) { 432 tao->trust = tr->gamma1 * PetscMin(tao->trust, norm_d); 433 } 434 else if ((tau_min < tr->gamma1) && (tau_max >= 1.0)) { 435 tao->trust = tr->gamma1 * PetscMin(tao->trust, norm_d); 436 } 437 else if ((tau_1 >= tr->gamma1) && (tau_1 < 1.0) && 438 ((tau_2 < tr->gamma1) || (tau_2 >= 1.0))) { 439 tao->trust = tau_1 * PetscMin(tao->trust, norm_d); 440 } 441 else if ((tau_2 >= tr->gamma1) && (tau_2 < 1.0) && 442 ((tau_1 < tr->gamma1) || (tau_2 >= 1.0))) { 443 tao->trust = tau_2 * PetscMin(tao->trust, norm_d); 444 } 445 else { 446 tao->trust = tau_max * PetscMin(tao->trust, norm_d); 447 } 448 } 449 } 450 } 451 } 452 453 /* The step computed was not good and the radius was decreased. 454 Monitor the radius to terminate. */ 455 PetscCall(TaoLogConvergenceHistory(tao,f,gnorm,0.0,tao->ksp_its)); 456 PetscCall(TaoMonitor(tao,tao->niter,f,gnorm,0.0,tao->trust)); 457 PetscCall((*tao->ops->convergencetest)(tao,tao->cnvP)); 458 } 459 460 /* The radius may have been increased; modify if it is too large */ 461 tao->trust = PetscMin(tao->trust, tr->max_radius); 462 463 if (tao->reason == TAO_CONTINUE_ITERATING) { 464 PetscCall(VecCopy(tr->W, tao->solution)); 465 f = ftrial; 466 PetscCall(TaoComputeGradient(tao, tao->solution, tao->gradient)); 467 PetscCall(TaoGradientNorm(tao, tao->gradient,NORM_2,&gnorm)); 468 PetscCheck(!PetscIsInfOrNanReal(f) && !PetscIsInfOrNanReal(gnorm),PetscObjectComm((PetscObject)tao),PETSC_ERR_USER, "User provided compute function generated Inf or NaN"); 469 needH = 1; 470 PetscCall(TaoLogConvergenceHistory(tao,f,gnorm,0.0,tao->ksp_its)); 471 PetscCall(TaoMonitor(tao,tao->niter,f,gnorm,0.0,tao->trust)); 472 PetscCall((*tao->ops->convergencetest)(tao,tao->cnvP)); 473 } 474 } 475 PetscFunctionReturn(0); 476 } 477 478 /*------------------------------------------------------------*/ 479 static PetscErrorCode TaoSetUp_NTR(Tao tao) 480 { 481 TAO_NTR *tr = (TAO_NTR *)tao->data; 482 483 PetscFunctionBegin; 484 if (!tao->gradient) PetscCall(VecDuplicate(tao->solution, &tao->gradient)); 485 if (!tao->stepdirection) PetscCall(VecDuplicate(tao->solution, &tao->stepdirection)); 486 if (!tr->W) PetscCall(VecDuplicate(tao->solution, &tr->W)); 487 488 tr->bfgs_pre = NULL; 489 tr->M = NULL; 490 PetscFunctionReturn(0); 491 } 492 493 /*------------------------------------------------------------*/ 494 static PetscErrorCode TaoDestroy_NTR(Tao tao) 495 { 496 TAO_NTR *tr = (TAO_NTR *)tao->data; 497 498 PetscFunctionBegin; 499 if (tao->setupcalled) { 500 PetscCall(VecDestroy(&tr->W)); 501 } 502 PetscCall(KSPDestroy(&tao->ksp)); 503 PetscCall(PetscFree(tao->data)); 504 PetscFunctionReturn(0); 505 } 506 507 /*------------------------------------------------------------*/ 508 static PetscErrorCode TaoSetFromOptions_NTR(PetscOptionItems *PetscOptionsObject,Tao tao) 509 { 510 TAO_NTR *tr = (TAO_NTR *)tao->data; 511 512 PetscFunctionBegin; 513 PetscOptionsHeadBegin(PetscOptionsObject,"Newton trust region method for unconstrained optimization"); 514 PetscCall(PetscOptionsEList("-tao_ntr_init_type", "tao->trust initialization type", "", NTR_INIT, NTR_INIT_TYPES, NTR_INIT[tr->init_type], &tr->init_type,NULL)); 515 PetscCall(PetscOptionsEList("-tao_ntr_update_type", "radius update type", "", NTR_UPDATE, NTR_UPDATE_TYPES, NTR_UPDATE[tr->update_type], &tr->update_type,NULL)); 516 PetscCall(PetscOptionsReal("-tao_ntr_eta1", "step is unsuccessful if actual reduction < eta1 * predicted reduction", "", tr->eta1, &tr->eta1,NULL)); 517 PetscCall(PetscOptionsReal("-tao_ntr_eta2", "", "", tr->eta2, &tr->eta2,NULL)); 518 PetscCall(PetscOptionsReal("-tao_ntr_eta3", "", "", tr->eta3, &tr->eta3,NULL)); 519 PetscCall(PetscOptionsReal("-tao_ntr_eta4", "", "", tr->eta4, &tr->eta4,NULL)); 520 PetscCall(PetscOptionsReal("-tao_ntr_alpha1", "", "", tr->alpha1, &tr->alpha1,NULL)); 521 PetscCall(PetscOptionsReal("-tao_ntr_alpha2", "", "", tr->alpha2, &tr->alpha2,NULL)); 522 PetscCall(PetscOptionsReal("-tao_ntr_alpha3", "", "", tr->alpha3, &tr->alpha3,NULL)); 523 PetscCall(PetscOptionsReal("-tao_ntr_alpha4", "", "", tr->alpha4, &tr->alpha4,NULL)); 524 PetscCall(PetscOptionsReal("-tao_ntr_alpha5", "", "", tr->alpha5, &tr->alpha5,NULL)); 525 PetscCall(PetscOptionsReal("-tao_ntr_mu1", "", "", tr->mu1, &tr->mu1,NULL)); 526 PetscCall(PetscOptionsReal("-tao_ntr_mu2", "", "", tr->mu2, &tr->mu2,NULL)); 527 PetscCall(PetscOptionsReal("-tao_ntr_gamma1", "", "", tr->gamma1, &tr->gamma1,NULL)); 528 PetscCall(PetscOptionsReal("-tao_ntr_gamma2", "", "", tr->gamma2, &tr->gamma2,NULL)); 529 PetscCall(PetscOptionsReal("-tao_ntr_gamma3", "", "", tr->gamma3, &tr->gamma3,NULL)); 530 PetscCall(PetscOptionsReal("-tao_ntr_gamma4", "", "", tr->gamma4, &tr->gamma4,NULL)); 531 PetscCall(PetscOptionsReal("-tao_ntr_theta", "", "", tr->theta, &tr->theta,NULL)); 532 PetscCall(PetscOptionsReal("-tao_ntr_mu1_i", "", "", tr->mu1_i, &tr->mu1_i,NULL)); 533 PetscCall(PetscOptionsReal("-tao_ntr_mu2_i", "", "", tr->mu2_i, &tr->mu2_i,NULL)); 534 PetscCall(PetscOptionsReal("-tao_ntr_gamma1_i", "", "", tr->gamma1_i, &tr->gamma1_i,NULL)); 535 PetscCall(PetscOptionsReal("-tao_ntr_gamma2_i", "", "", tr->gamma2_i, &tr->gamma2_i,NULL)); 536 PetscCall(PetscOptionsReal("-tao_ntr_gamma3_i", "", "", tr->gamma3_i, &tr->gamma3_i,NULL)); 537 PetscCall(PetscOptionsReal("-tao_ntr_gamma4_i", "", "", tr->gamma4_i, &tr->gamma4_i,NULL)); 538 PetscCall(PetscOptionsReal("-tao_ntr_theta_i", "", "", tr->theta_i, &tr->theta_i,NULL)); 539 PetscCall(PetscOptionsReal("-tao_ntr_min_radius", "lower bound on initial trust-region radius", "", tr->min_radius, &tr->min_radius,NULL)); 540 PetscCall(PetscOptionsReal("-tao_ntr_max_radius", "upper bound on trust-region radius", "", tr->max_radius, &tr->max_radius,NULL)); 541 PetscCall(PetscOptionsReal("-tao_ntr_epsilon", "tolerance used when computing actual and predicted reduction", "", tr->epsilon, &tr->epsilon,NULL)); 542 PetscOptionsHeadEnd(); 543 PetscCall(KSPSetFromOptions(tao->ksp)); 544 PetscFunctionReturn(0); 545 } 546 547 /*------------------------------------------------------------*/ 548 /*MC 549 TAONTR - Newton's method with trust region for unconstrained minimization. 550 At each iteration, the Newton trust region method solves the system. 551 NTR expects a KSP solver with a trust region radius. 552 min_d .5 dT Hk d + gkT d, s.t. ||d|| < Delta_k 553 554 Options Database Keys: 555 + -tao_ntr_init_type - "constant","direction","interpolation" 556 . -tao_ntr_update_type - "reduction","interpolation" 557 . -tao_ntr_min_radius - lower bound on trust region radius 558 . -tao_ntr_max_radius - upper bound on trust region radius 559 . -tao_ntr_epsilon - tolerance for accepting actual / predicted reduction 560 . -tao_ntr_mu1_i - mu1 interpolation init factor 561 . -tao_ntr_mu2_i - mu2 interpolation init factor 562 . -tao_ntr_gamma1_i - gamma1 interpolation init factor 563 . -tao_ntr_gamma2_i - gamma2 interpolation init factor 564 . -tao_ntr_gamma3_i - gamma3 interpolation init factor 565 . -tao_ntr_gamma4_i - gamma4 interpolation init factor 566 . -tao_ntr_theta_i - theta1 interpolation init factor 567 . -tao_ntr_eta1 - eta1 reduction update factor 568 . -tao_ntr_eta2 - eta2 reduction update factor 569 . -tao_ntr_eta3 - eta3 reduction update factor 570 . -tao_ntr_eta4 - eta4 reduction update factor 571 . -tao_ntr_alpha1 - alpha1 reduction update factor 572 . -tao_ntr_alpha2 - alpha2 reduction update factor 573 . -tao_ntr_alpha3 - alpha3 reduction update factor 574 . -tao_ntr_alpha4 - alpha4 reduction update factor 575 . -tao_ntr_alpha4 - alpha4 reduction update factor 576 . -tao_ntr_mu1 - mu1 interpolation update 577 . -tao_ntr_mu2 - mu2 interpolation update 578 . -tao_ntr_gamma1 - gamma1 interpolcation update 579 . -tao_ntr_gamma2 - gamma2 interpolcation update 580 . -tao_ntr_gamma3 - gamma3 interpolcation update 581 . -tao_ntr_gamma4 - gamma4 interpolation update 582 - -tao_ntr_theta - theta interpolation update 583 584 Level: beginner 585 M*/ 586 587 PETSC_EXTERN PetscErrorCode TaoCreate_NTR(Tao tao) 588 { 589 TAO_NTR *tr; 590 591 PetscFunctionBegin; 592 593 PetscCall(PetscNewLog(tao,&tr)); 594 595 tao->ops->setup = TaoSetUp_NTR; 596 tao->ops->solve = TaoSolve_NTR; 597 tao->ops->setfromoptions = TaoSetFromOptions_NTR; 598 tao->ops->destroy = TaoDestroy_NTR; 599 600 /* Override default settings (unless already changed) */ 601 if (!tao->max_it_changed) tao->max_it = 50; 602 if (!tao->trust0_changed) tao->trust0 = 100.0; 603 tao->data = (void*)tr; 604 605 /* Standard trust region update parameters */ 606 tr->eta1 = 1.0e-4; 607 tr->eta2 = 0.25; 608 tr->eta3 = 0.50; 609 tr->eta4 = 0.90; 610 611 tr->alpha1 = 0.25; 612 tr->alpha2 = 0.50; 613 tr->alpha3 = 1.00; 614 tr->alpha4 = 2.00; 615 tr->alpha5 = 4.00; 616 617 /* Interpolation trust region update parameters */ 618 tr->mu1 = 0.10; 619 tr->mu2 = 0.50; 620 621 tr->gamma1 = 0.25; 622 tr->gamma2 = 0.50; 623 tr->gamma3 = 2.00; 624 tr->gamma4 = 4.00; 625 626 tr->theta = 0.05; 627 628 /* Interpolation parameters for initialization */ 629 tr->mu1_i = 0.35; 630 tr->mu2_i = 0.50; 631 632 tr->gamma1_i = 0.0625; 633 tr->gamma2_i = 0.50; 634 tr->gamma3_i = 2.00; 635 tr->gamma4_i = 5.00; 636 637 tr->theta_i = 0.25; 638 639 tr->min_radius = 1.0e-10; 640 tr->max_radius = 1.0e10; 641 tr->epsilon = 1.0e-6; 642 643 tr->init_type = NTR_INIT_INTERPOLATION; 644 tr->update_type = NTR_UPDATE_REDUCTION; 645 646 /* Set linear solver to default for trust region */ 647 PetscCall(KSPCreate(((PetscObject)tao)->comm,&tao->ksp)); 648 PetscCall(PetscObjectIncrementTabLevel((PetscObject)tao->ksp,(PetscObject)tao,1)); 649 PetscCall(KSPSetOptionsPrefix(tao->ksp,tao->hdr.prefix)); 650 PetscCall(KSPAppendOptionsPrefix(tao->ksp,"tao_ntr_")); 651 PetscCall(KSPSetType(tao->ksp,KSPSTCG)); 652 PetscFunctionReturn(0); 653 } 654