Actual source code: ntl.c

petsc-3.5.4 2015-05-23
Report Typos and Errors
  1: #include <../src/tao/matrix/lmvmmat.h>
  2: #include <../src/tao/unconstrained/impls/ntl/ntl.h>

  4: #include <petscksp.h>
  5: #include <petscpc.h>
  6: #include <petsc-private/kspimpl.h>
  7: #include <petsc-private/pcimpl.h>

  9: #define NTL_KSP_NASH    0
 10: #define NTL_KSP_STCG    1
 11: #define NTL_KSP_GLTR    2
 12: #define NTL_KSP_TYPES   3

 14: #define NTL_PC_NONE     0
 15: #define NTL_PC_AHESS    1
 16: #define NTL_PC_BFGS     2
 17: #define NTL_PC_PETSC    3
 18: #define NTL_PC_TYPES    4

 20: #define BFGS_SCALE_AHESS        0
 21: #define BFGS_SCALE_BFGS         1
 22: #define BFGS_SCALE_TYPES        2

 24: #define NTL_INIT_CONSTANT         0
 25: #define NTL_INIT_DIRECTION        1
 26: #define NTL_INIT_INTERPOLATION    2
 27: #define NTL_INIT_TYPES            3

 29: #define NTL_UPDATE_REDUCTION      0
 30: #define NTL_UPDATE_INTERPOLATION  1
 31: #define NTL_UPDATE_TYPES          2

 33: static const char *NTL_KSP[64] = {"nash", "stcg", "gltr"};

 35: static const char *NTL_PC[64] = {"none", "ahess", "bfgs", "petsc"};

 37: static const char *BFGS_SCALE[64] = {"ahess", "bfgs"};

 39: static const char *NTL_INIT[64] = {"constant", "direction", "interpolation"};

 41: static const char *NTL_UPDATE[64] = {"reduction", "interpolation"};

 43: /* Routine for BFGS preconditioner */

 47: static PetscErrorCode MatLMVMSolveShell(PC pc, Vec b, Vec x)
 48: {
 50:   Mat            M;

 56:   PCShellGetContext(pc,(void**)&M);
 57:   MatLMVMSolve(M, b, x);
 58:   return(0);
 59: }

 61: /* Implements Newton's Method with a trust-region, line-search approach for
 62:    solving unconstrained minimization problems.  A More'-Thuente line search
 63:    is used to guarantee that the bfgs preconditioner remains positive
 64:    definite. */

 66: #define NTL_NEWTON              0
 67: #define NTL_BFGS                1
 68: #define NTL_SCALED_GRADIENT     2
 69: #define NTL_GRADIENT            3

 73: static PetscErrorCode TaoSolve_NTL(Tao tao)
 74: {
 75:   TAO_NTL                      *tl = (TAO_NTL *)tao->data;
 76:   PC                           pc;
 77:   KSPConvergedReason           ksp_reason;
 78:   TaoConvergedReason           reason;
 79:   TaoLineSearchConvergedReason ls_reason;

 81:   PetscReal                    fmin, ftrial, prered, actred, kappa, sigma;
 82:   PetscReal                    tau, tau_1, tau_2, tau_max, tau_min, max_radius;
 83:   PetscReal                    f, fold, gdx, gnorm;
 84:   PetscReal                    step = 1.0;

 86:   PetscReal                    delta;
 87:   PetscReal                    norm_d = 0.0;
 88:   PetscErrorCode               ierr;
 89:   PetscInt                     stepType;
 90:   PetscInt                     iter = 0,its;

 92:   PetscInt                     bfgsUpdates = 0;
 93:   PetscInt                     needH;

 95:   PetscInt                     i_max = 5;
 96:   PetscInt                     j_max = 1;
 97:   PetscInt                     i, j, n, N;

 99:   PetscInt                     tr_reject;

102:   if (tao->XL || tao->XU || tao->ops->computebounds) {
103:     PetscPrintf(((PetscObject)tao)->comm,"WARNING: Variable bounds have been set but will be ignored by ntl algorithm\n");
104:   }

106:   /* Initialize trust-region radius */
107:   tao->trust = tao->trust0;

109:   /* Modify the radius if it is too large or small */
110:   tao->trust = PetscMax(tao->trust, tl->min_radius);
111:   tao->trust = PetscMin(tao->trust, tl->max_radius);

113:   if (NTL_PC_BFGS == tl->pc_type && !tl->M) {
114:     VecGetLocalSize(tao->solution,&n);
115:     VecGetSize(tao->solution,&N);
116:     MatCreateLMVM(((PetscObject)tao)->comm,n,N,&tl->M);
117:     MatLMVMAllocateVectors(tl->M,tao->solution);
118:   }

120:   /* Check convergence criteria */
121:   TaoComputeObjectiveAndGradient(tao, tao->solution, &f, tao->gradient);
122:   VecNorm(tao->gradient, NORM_2, &gnorm);
123:   if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf or NaN");
124:   needH = 1;

126:   TaoMonitor(tao, iter, f, gnorm, 0.0, 1.0, &reason);
127:   if (reason != TAO_CONTINUE_ITERATING) return(0);

129:   /* Create vectors for the limited memory preconditioner */
130:   if ((NTL_PC_BFGS == tl->pc_type) && (BFGS_SCALE_BFGS != tl->bfgs_scale_type)) {
131:     if (!tl->Diag) {
132:       VecDuplicate(tao->solution, &tl->Diag);
133:     }
134:   }

136:   /* Modify the linear solver to a conjugate gradient method */
137:   switch(tl->ksp_type) {
138:   case NTL_KSP_NASH:
139:     KSPSetType(tao->ksp, KSPNASH);
140:     if (tao->ksp->ops->setfromoptions) {
141:       (*tao->ksp->ops->setfromoptions)(tao->ksp);
142:     }
143:     break;

145:   case NTL_KSP_STCG:
146:     KSPSetType(tao->ksp, KSPSTCG);
147:     if (tao->ksp->ops->setfromoptions) {
148:       (*tao->ksp->ops->setfromoptions)(tao->ksp);
149:     }
150:     break;

152:   default:
153:     KSPSetType(tao->ksp, KSPGLTR);
154:     if (tao->ksp->ops->setfromoptions) {
155:       (*tao->ksp->ops->setfromoptions)(tao->ksp);
156:     }
157:     break;
158:   }

160:   /* Modify the preconditioner to use the bfgs approximation */
161:   KSPGetPC(tao->ksp, &pc);
162:   switch(tl->pc_type) {
163:   case NTL_PC_NONE:
164:     PCSetType(pc, PCNONE);
165:     if (pc->ops->setfromoptions) {
166:       (*pc->ops->setfromoptions)(pc);
167:     }
168:     break;

170:   case NTL_PC_AHESS:
171:     PCSetType(pc, PCJACOBI);
172:     if (pc->ops->setfromoptions) {
173:       (*pc->ops->setfromoptions)(pc);
174:     }
175:     PCJacobiSetUseAbs(pc);
176:     break;

178:   case NTL_PC_BFGS:
179:     PCSetType(pc, PCSHELL);
180:     if (pc->ops->setfromoptions) {
181:       (*pc->ops->setfromoptions)(pc);
182:     }
183:     PCShellSetName(pc, "bfgs");
184:     PCShellSetContext(pc, tl->M);
185:     PCShellSetApply(pc, MatLMVMSolveShell);
186:     break;

188:   default:
189:     /* Use the pc method set by pc_type */
190:     break;
191:   }

193:   /* Initialize trust-region radius.  The initialization is only performed
194:      when we are using Steihaug-Toint or the Generalized Lanczos method. */
195:   switch(tl->init_type) {
196:   case NTL_INIT_CONSTANT:
197:     /* Use the initial radius specified */
198:     break;

200:   case NTL_INIT_INTERPOLATION:
201:     /* Use the initial radius specified */
202:     max_radius = 0.0;

204:     for (j = 0; j < j_max; ++j) {
205:       fmin = f;
206:       sigma = 0.0;

208:       if (needH) {
209:         TaoComputeHessian(tao,tao->solution,tao->hessian,tao->hessian_pre);
210:         needH = 0;
211:       }

213:       for (i = 0; i < i_max; ++i) {
214:         VecCopy(tao->solution, tl->W);
215:         VecAXPY(tl->W, -tao->trust/gnorm, tao->gradient);

217:         TaoComputeObjective(tao, tl->W, &ftrial);
218:         if (PetscIsInfOrNanReal(ftrial)) {
219:           tau = tl->gamma1_i;
220:         } else {
221:           if (ftrial < fmin) {
222:             fmin = ftrial;
223:             sigma = -tao->trust / gnorm;
224:           }

226:           MatMult(tao->hessian, tao->gradient, tao->stepdirection);
227:           VecDot(tao->gradient, tao->stepdirection, &prered);

229:           prered = tao->trust * (gnorm - 0.5 * tao->trust * prered / (gnorm * gnorm));
230:           actred = f - ftrial;
231:           if ((PetscAbsScalar(actred) <= tl->epsilon) && (PetscAbsScalar(prered) <= tl->epsilon)) {
232:             kappa = 1.0;
233:           } else {
234:             kappa = actred / prered;
235:           }

237:           tau_1 = tl->theta_i * gnorm * tao->trust / (tl->theta_i * gnorm * tao->trust + (1.0 - tl->theta_i) * prered - actred);
238:           tau_2 = tl->theta_i * gnorm * tao->trust / (tl->theta_i * gnorm * tao->trust - (1.0 + tl->theta_i) * prered + actred);
239:           tau_min = PetscMin(tau_1, tau_2);
240:           tau_max = PetscMax(tau_1, tau_2);

242:           if (PetscAbsScalar(kappa - 1.0) <= tl->mu1_i) {
243:             /* Great agreement */
244:             max_radius = PetscMax(max_radius, tao->trust);

246:             if (tau_max < 1.0) {
247:               tau = tl->gamma3_i;
248:             } else if (tau_max > tl->gamma4_i) {
249:               tau = tl->gamma4_i;
250:             } else if (tau_1 >= 1.0 && tau_1 <= tl->gamma4_i && tau_2 < 1.0) {
251:               tau = tau_1;
252:             } else if (tau_2 >= 1.0 && tau_2 <= tl->gamma4_i && tau_1 < 1.0) {
253:               tau = tau_2;
254:             } else {
255:               tau = tau_max;
256:             }
257:           } else if (PetscAbsScalar(kappa - 1.0) <= tl->mu2_i) {
258:             /* Good agreement */
259:             max_radius = PetscMax(max_radius, tao->trust);

261:             if (tau_max < tl->gamma2_i) {
262:               tau = tl->gamma2_i;
263:             } else if (tau_max > tl->gamma3_i) {
264:               tau = tl->gamma3_i;
265:             } else {
266:               tau = tau_max;
267:             }
268:           } else {
269:             /* Not good agreement */
270:             if (tau_min > 1.0) {
271:               tau = tl->gamma2_i;
272:             } else if (tau_max < tl->gamma1_i) {
273:               tau = tl->gamma1_i;
274:             } else if ((tau_min < tl->gamma1_i) && (tau_max >= 1.0)) {
275:               tau = tl->gamma1_i;
276:             } else if ((tau_1 >= tl->gamma1_i) && (tau_1 < 1.0) &&  ((tau_2 < tl->gamma1_i) || (tau_2 >= 1.0))) {
277:               tau = tau_1;
278:             } else if ((tau_2 >= tl->gamma1_i) && (tau_2 < 1.0) &&  ((tau_1 < tl->gamma1_i) || (tau_2 >= 1.0))) {
279:               tau = tau_2;
280:             } else {
281:               tau = tau_max;
282:             }
283:           }
284:         }
285:         tao->trust = tau * tao->trust;
286:       }

288:       if (fmin < f) {
289:         f = fmin;
290:         VecAXPY(tao->solution, sigma, tao->gradient);
291:         TaoComputeGradient(tao, tao->solution, tao->gradient);

293:         VecNorm(tao->gradient, NORM_2, &gnorm);
294:         if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf or NaN");
295:         needH = 1;

297:         TaoMonitor(tao, iter, f, gnorm, 0.0, 1.0, &reason);
298:         if (reason != TAO_CONTINUE_ITERATING) return(0);
299:       }
300:     }
301:     tao->trust = PetscMax(tao->trust, max_radius);

303:     /* Modify the radius if it is too large or small */
304:     tao->trust = PetscMax(tao->trust, tl->min_radius);
305:     tao->trust = PetscMin(tao->trust, tl->max_radius);
306:     break;

308:   default:
309:     /* Norm of the first direction will initialize radius */
310:     tao->trust = 0.0;
311:     break;
312:   }

314:   /* Set initial scaling for the BFGS preconditioner
315:      This step is done after computing the initial trust-region radius
316:      since the function value may have decreased */
317:   if (NTL_PC_BFGS == tl->pc_type) {
318:     if (f != 0.0) {
319:       delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
320:     } else {
321:       delta = 2.0 / (gnorm*gnorm);
322:     }
323:     MatLMVMSetDelta(tl->M, delta);
324:   }

326:   /* Set counter for gradient/reset steps */
327:   tl->ntrust = 0;
328:   tl->newt = 0;
329:   tl->bfgs = 0;
330:   tl->sgrad = 0;
331:   tl->grad = 0;

333:   /* Have not converged; continue with Newton method */
334:   while (reason == TAO_CONTINUE_ITERATING) {
335:     ++iter;

337:     /* Compute the Hessian */
338:     if (needH) {
339:       TaoComputeHessian(tao,tao->solution,tao->hessian,tao->hessian_pre);
340:       needH = 0;
341:     }

343:     if (NTL_PC_BFGS == tl->pc_type) {
344:       if (BFGS_SCALE_AHESS == tl->bfgs_scale_type) {
345:         /* Obtain diagonal for the bfgs preconditioner */
346:         MatGetDiagonal(tao->hessian, tl->Diag);
347:         VecAbs(tl->Diag);
348:         VecReciprocal(tl->Diag);
349:         MatLMVMSetScale(tl->M, tl->Diag);
350:       }

352:       /* Update the limited memory preconditioner */
353:       MatLMVMUpdate(tl->M,tao->solution, tao->gradient);
354:       ++bfgsUpdates;
355:     }
356:     KSPSetOperators(tao->ksp, tao->hessian, tao->hessian_pre);
357:     /* Solve the Newton system of equations */
358:     if (NTL_KSP_NASH == tl->ksp_type) {
359:       KSPNASHSetRadius(tao->ksp,tl->max_radius);
360:       KSPSolve(tao->ksp, tao->gradient, tao->stepdirection);
361:       KSPGetIterationNumber(tao->ksp,&its);
362:       tao->ksp_its+=its;
363:       KSPNASHGetNormD(tao->ksp, &norm_d);
364:     } else if (NTL_KSP_STCG == tl->ksp_type) {
365:       KSPSTCGSetRadius(tao->ksp,tl->max_radius);
366:       KSPSolve(tao->ksp, tao->gradient, tao->stepdirection);
367:       KSPGetIterationNumber(tao->ksp,&its);
368:       tao->ksp_its+=its;
369:       KSPSTCGGetNormD(tao->ksp, &norm_d);
370:     } else { /* NTL_KSP_GLTR */
371:       KSPGLTRSetRadius(tao->ksp,tl->max_radius);
372:       KSPSolve(tao->ksp, tao->gradient, tao->stepdirection);
373:       KSPGetIterationNumber(tao->ksp,&its);
374:       tao->ksp_its+=its;
375:       KSPGLTRGetNormD(tao->ksp, &norm_d);
376:     }

378:     if (0.0 == tao->trust) {
379:       /* Radius was uninitialized; use the norm of the direction */
380:       if (norm_d > 0.0) {
381:         tao->trust = norm_d;

383:         /* Modify the radius if it is too large or small */
384:         tao->trust = PetscMax(tao->trust, tl->min_radius);
385:         tao->trust = PetscMin(tao->trust, tl->max_radius);
386:       } else {
387:         /* The direction was bad; set radius to default value and re-solve
388:            the trust-region subproblem to get a direction */
389:         tao->trust = tao->trust0;

391:         /* Modify the radius if it is too large or small */
392:         tao->trust = PetscMax(tao->trust, tl->min_radius);
393:         tao->trust = PetscMin(tao->trust, tl->max_radius);

395:         if (NTL_KSP_NASH == tl->ksp_type) {
396:           KSPNASHSetRadius(tao->ksp,tl->max_radius);
397:           KSPSolve(tao->ksp, tao->gradient, tao->stepdirection);
398:           KSPGetIterationNumber(tao->ksp,&its);
399:           tao->ksp_its+=its;
400:           KSPNASHGetNormD(tao->ksp, &norm_d);
401:         } else if (NTL_KSP_STCG == tl->ksp_type) {
402:           KSPSTCGSetRadius(tao->ksp,tl->max_radius);
403:           KSPSolve(tao->ksp, tao->gradient, tao->stepdirection);
404:           KSPGetIterationNumber(tao->ksp,&its);
405:           tao->ksp_its+=its;
406:           KSPSTCGGetNormD(tao->ksp, &norm_d);
407:         } else { /* NTL_KSP_GLTR */
408:           KSPGLTRSetRadius(tao->ksp,tl->max_radius);
409:           KSPSolve(tao->ksp, tao->gradient, tao->stepdirection);
410:           KSPGetIterationNumber(tao->ksp,&its);
411:           tao->ksp_its+=its;
412:           KSPGLTRGetNormD(tao->ksp, &norm_d);
413:         }


416:         if (norm_d == 0.0) SETERRQ(PETSC_COMM_SELF,1, "Initial direction zero");
417:       }
418:     }

420:     VecScale(tao->stepdirection, -1.0);
421:     KSPGetConvergedReason(tao->ksp, &ksp_reason);
422:     if ((KSP_DIVERGED_INDEFINITE_PC == ksp_reason) && (NTL_PC_BFGS == tl->pc_type) && (bfgsUpdates > 1)) {
423:       /* Preconditioner is numerically indefinite; reset the
424:          approximate if using BFGS preconditioning. */

426:       if (f != 0.0) {
427:         delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
428:       } else {
429:         delta = 2.0 / (gnorm*gnorm);
430:       }
431:       MatLMVMSetDelta(tl->M, delta);
432:       MatLMVMReset(tl->M);
433:       MatLMVMUpdate(tl->M, tao->solution, tao->gradient);
434:       bfgsUpdates = 1;
435:     }

437:     /* Check trust-region reduction conditions */
438:     tr_reject = 0;
439:     if (NTL_UPDATE_REDUCTION == tl->update_type) {
440:       /* Get predicted reduction */
441:       if (NTL_KSP_NASH == tl->ksp_type) {
442:         KSPNASHGetObjFcn(tao->ksp,&prered);
443:       } else if (NTL_KSP_STCG == tl->ksp_type) {
444:         KSPSTCGGetObjFcn(tao->ksp,&prered);
445:       } else { /* gltr */
446:         KSPGLTRGetObjFcn(tao->ksp,&prered);
447:       }

449:       if (prered >= 0.0) {
450:         /* The predicted reduction has the wrong sign.  This cannot
451:            happen in infinite precision arithmetic.  Step should
452:            be rejected! */
453:         tao->trust = tl->alpha1 * PetscMin(tao->trust, norm_d);
454:         tr_reject = 1;
455:       } else {
456:         /* Compute trial step and function value */
457:         VecCopy(tao->solution, tl->W);
458:         VecAXPY(tl->W, 1.0, tao->stepdirection);
459:         TaoComputeObjective(tao, tl->W, &ftrial);

461:         if (PetscIsInfOrNanReal(ftrial)) {
462:           tao->trust = tl->alpha1 * PetscMin(tao->trust, norm_d);
463:           tr_reject = 1;
464:         } else {
465:           /* Compute and actual reduction */
466:           actred = f - ftrial;
467:           prered = -prered;
468:           if ((PetscAbsScalar(actred) <= tl->epsilon) &&
469:               (PetscAbsScalar(prered) <= tl->epsilon)) {
470:             kappa = 1.0;
471:           } else {
472:             kappa = actred / prered;
473:           }

475:           /* Accept of reject the step and update radius */
476:           if (kappa < tl->eta1) {
477:             /* Reject the step */
478:             tao->trust = tl->alpha1 * PetscMin(tao->trust, norm_d);
479:             tr_reject = 1;
480:           } else {
481:             /* Accept the step */
482:             if (kappa < tl->eta2) {
483:               /* Marginal bad step */
484:               tao->trust = tl->alpha2 * PetscMin(tao->trust, norm_d);
485:             } else if (kappa < tl->eta3) {
486:               /* Reasonable step */
487:               tao->trust = tl->alpha3 * tao->trust;
488:             } else if (kappa < tl->eta4) {
489:               /* Good step */
490:               tao->trust = PetscMax(tl->alpha4 * norm_d, tao->trust);
491:             } else {
492:               /* Very good step */
493:               tao->trust = PetscMax(tl->alpha5 * norm_d, tao->trust);
494:             }
495:           }
496:         }
497:       }
498:     } else {
499:       /* Get predicted reduction */
500:       if (NTL_KSP_NASH == tl->ksp_type) {
501:         KSPNASHGetObjFcn(tao->ksp,&prered);
502:       } else if (NTL_KSP_STCG == tl->ksp_type) {
503:         KSPSTCGGetObjFcn(tao->ksp,&prered);
504:       } else { /* gltr */
505:         KSPGLTRGetObjFcn(tao->ksp,&prered);
506:       }

508:       if (prered >= 0.0) {
509:         /* The predicted reduction has the wrong sign.  This cannot
510:            happen in infinite precision arithmetic.  Step should
511:            be rejected! */
512:         tao->trust = tl->gamma1 * PetscMin(tao->trust, norm_d);
513:         tr_reject = 1;
514:       } else {
515:         VecCopy(tao->solution, tl->W);
516:         VecAXPY(tl->W, 1.0, tao->stepdirection);
517:         TaoComputeObjective(tao, tl->W, &ftrial);
518:         if (PetscIsInfOrNanReal(ftrial)) {
519:           tao->trust = tl->gamma1 * PetscMin(tao->trust, norm_d);
520:           tr_reject = 1;
521:         } else {
522:           VecDot(tao->gradient, tao->stepdirection, &gdx);

524:           actred = f - ftrial;
525:           prered = -prered;
526:           if ((PetscAbsScalar(actred) <= tl->epsilon) &&
527:               (PetscAbsScalar(prered) <= tl->epsilon)) {
528:             kappa = 1.0;
529:           } else {
530:             kappa = actred / prered;
531:           }

533:           tau_1 = tl->theta * gdx / (tl->theta * gdx - (1.0 - tl->theta) * prered + actred);
534:           tau_2 = tl->theta * gdx / (tl->theta * gdx + (1.0 + tl->theta) * prered - actred);
535:           tau_min = PetscMin(tau_1, tau_2);
536:           tau_max = PetscMax(tau_1, tau_2);

538:           if (kappa >= 1.0 - tl->mu1) {
539:             /* Great agreement; accept step and update radius */
540:             if (tau_max < 1.0) {
541:               tao->trust = PetscMax(tao->trust, tl->gamma3 * norm_d);
542:             } else if (tau_max > tl->gamma4) {
543:               tao->trust = PetscMax(tao->trust, tl->gamma4 * norm_d);
544:             } else {
545:               tao->trust = PetscMax(tao->trust, tau_max * norm_d);
546:             }
547:           } else if (kappa >= 1.0 - tl->mu2) {
548:             /* Good agreement */

550:             if (tau_max < tl->gamma2) {
551:               tao->trust = tl->gamma2 * PetscMin(tao->trust, norm_d);
552:             } else if (tau_max > tl->gamma3) {
553:               tao->trust = PetscMax(tao->trust, tl->gamma3 * norm_d);
554:             } else if (tau_max < 1.0) {
555:               tao->trust = tau_max * PetscMin(tao->trust, norm_d);
556:             } else {
557:               tao->trust = PetscMax(tao->trust, tau_max * norm_d);
558:             }
559:           } else {
560:             /* Not good agreement */
561:             if (tau_min > 1.0) {
562:               tao->trust = tl->gamma2 * PetscMin(tao->trust, norm_d);
563:             } else if (tau_max < tl->gamma1) {
564:               tao->trust = tl->gamma1 * PetscMin(tao->trust, norm_d);
565:             } else if ((tau_min < tl->gamma1) && (tau_max >= 1.0)) {
566:               tao->trust = tl->gamma1 * PetscMin(tao->trust, norm_d);
567:             } else if ((tau_1 >= tl->gamma1) && (tau_1 < 1.0) && ((tau_2 < tl->gamma1) || (tau_2 >= 1.0))) {
568:               tao->trust = tau_1 * PetscMin(tao->trust, norm_d);
569:             } else if ((tau_2 >= tl->gamma1) && (tau_2 < 1.0) && ((tau_1 < tl->gamma1) || (tau_2 >= 1.0))) {
570:               tao->trust = tau_2 * PetscMin(tao->trust, norm_d);
571:             } else {
572:               tao->trust = tau_max * PetscMin(tao->trust, norm_d);
573:             }
574:             tr_reject = 1;
575:           }
576:         }
577:       }
578:     }

580:     if (tr_reject) {
581:       /* The trust-region constraints rejected the step.  Apply a linesearch.
582:          Check for descent direction. */
583:       VecDot(tao->stepdirection, tao->gradient, &gdx);
584:       if ((gdx >= 0.0) || PetscIsInfOrNanReal(gdx)) {
585:         /* Newton step is not descent or direction produced Inf or NaN */

587:         if (NTL_PC_BFGS != tl->pc_type) {
588:           /* We don't have the bfgs matrix around and updated
589:              Must use gradient direction in this case */
590:           VecCopy(tao->gradient, tao->stepdirection);
591:           VecScale(tao->stepdirection, -1.0);
592:           ++tl->grad;
593:           stepType = NTL_GRADIENT;
594:         } else {
595:           /* Attempt to use the BFGS direction */
596:           MatLMVMSolve(tl->M, tao->gradient, tao->stepdirection);
597:           VecScale(tao->stepdirection, -1.0);

599:           /* Check for success (descent direction) */
600:           VecDot(tao->stepdirection, tao->gradient, &gdx);
601:           if ((gdx >= 0) || PetscIsInfOrNanReal(gdx)) {
602:             /* BFGS direction is not descent or direction produced not a number
603:                We can assert bfgsUpdates > 1 in this case because
604:                the first solve produces the scaled gradient direction,
605:                which is guaranteed to be descent */

607:             /* Use steepest descent direction (scaled) */
608:             if (f != 0.0) {
609:               delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
610:             } else {
611:               delta = 2.0 / (gnorm*gnorm);
612:             }
613:             MatLMVMSetDelta(tl->M, delta);
614:             MatLMVMReset(tl->M);
615:             MatLMVMUpdate(tl->M, tao->solution, tao->gradient);
616:             MatLMVMSolve(tl->M, tao->gradient, tao->stepdirection);
617:             VecScale(tao->stepdirection, -1.0);

619:             bfgsUpdates = 1;
620:             ++tl->sgrad;
621:             stepType = NTL_SCALED_GRADIENT;
622:           } else {
623:             if (1 == bfgsUpdates) {
624:               /* The first BFGS direction is always the scaled gradient */
625:               ++tl->sgrad;
626:               stepType = NTL_SCALED_GRADIENT;
627:             } else {
628:               ++tl->bfgs;
629:               stepType = NTL_BFGS;
630:             }
631:           }
632:         }
633:       } else {
634:         /* Computed Newton step is descent */
635:         ++tl->newt;
636:         stepType = NTL_NEWTON;
637:       }

639:       /* Perform the linesearch */
640:       fold = f;
641:       VecCopy(tao->solution, tl->Xold);
642:       VecCopy(tao->gradient, tl->Gold);

644:       step = 1.0;
645:       TaoLineSearchApply(tao->linesearch, tao->solution, &f, tao->gradient, tao->stepdirection, &step, &ls_reason);
646:       TaoAddLineSearchCounts(tao);

648:       while (ls_reason != TAOLINESEARCH_SUCCESS && ls_reason != TAOLINESEARCH_SUCCESS_USER && stepType != NTL_GRADIENT) {      /* Linesearch failed */
649:         /* Linesearch failed */
650:         f = fold;
651:         VecCopy(tl->Xold, tao->solution);
652:         VecCopy(tl->Gold, tao->gradient);

654:         switch(stepType) {
655:         case NTL_NEWTON:
656:           /* Failed to obtain acceptable iterate with Newton step */

658:           if (NTL_PC_BFGS != tl->pc_type) {
659:             /* We don't have the bfgs matrix around and being updated
660:                Must use gradient direction in this case */
661:             VecCopy(tao->gradient, tao->stepdirection);
662:             ++tl->grad;
663:             stepType = NTL_GRADIENT;
664:           } else {
665:             /* Attempt to use the BFGS direction */
666:             MatLMVMSolve(tl->M, tao->gradient, tao->stepdirection);


669:             /* Check for success (descent direction) */
670:             VecDot(tao->stepdirection, tao->gradient, &gdx);
671:             if ((gdx <= 0) || PetscIsInfOrNanReal(gdx)) {
672:               /* BFGS direction is not descent or direction produced
673:                  not a number.  We can assert bfgsUpdates > 1 in this case
674:                  Use steepest descent direction (scaled) */

676:               if (f != 0.0) {
677:                 delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
678:               } else {
679:                 delta = 2.0 / (gnorm*gnorm);
680:               }
681:               MatLMVMSetDelta(tl->M, delta);
682:               MatLMVMReset(tl->M);
683:               MatLMVMUpdate(tl->M, tao->solution, tao->gradient);
684:               MatLMVMSolve(tl->M, tao->gradient, tao->stepdirection);

686:               bfgsUpdates = 1;
687:               ++tl->sgrad;
688:               stepType = NTL_SCALED_GRADIENT;
689:             } else {
690:               if (1 == bfgsUpdates) {
691:                 /* The first BFGS direction is always the scaled gradient */
692:                 ++tl->sgrad;
693:                 stepType = NTL_SCALED_GRADIENT;
694:               } else {
695:                 ++tl->bfgs;
696:                 stepType = NTL_BFGS;
697:               }
698:             }
699:           }
700:           break;

702:         case NTL_BFGS:
703:           /* Can only enter if pc_type == NTL_PC_BFGS
704:              Failed to obtain acceptable iterate with BFGS step
705:              Attempt to use the scaled gradient direction */

707:           if (f != 0.0) {
708:             delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
709:           } else {
710:             delta = 2.0 / (gnorm*gnorm);
711:           }
712:           MatLMVMSetDelta(tl->M, delta);
713:           MatLMVMReset(tl->M);
714:           MatLMVMUpdate(tl->M, tao->solution, tao->gradient);
715:           MatLMVMSolve(tl->M, tao->gradient, tao->stepdirection);

717:           bfgsUpdates = 1;
718:           ++tl->sgrad;
719:           stepType = NTL_SCALED_GRADIENT;
720:           break;

722:         case NTL_SCALED_GRADIENT:
723:           /* Can only enter if pc_type == NTL_PC_BFGS
724:              The scaled gradient step did not produce a new iterate;
725:              attemp to use the gradient direction.
726:              Need to make sure we are not using a different diagonal scaling */
727:           MatLMVMSetScale(tl->M, tl->Diag);
728:           MatLMVMSetDelta(tl->M, 1.0);
729:           MatLMVMReset(tl->M);
730:           MatLMVMUpdate(tl->M, tao->solution, tao->gradient);
731:           MatLMVMSolve(tl->M, tao->gradient, tao->stepdirection);

733:           bfgsUpdates = 1;
734:           ++tl->grad;
735:           stepType = NTL_GRADIENT;
736:           break;
737:         }
738:         VecScale(tao->stepdirection, -1.0);

740:         /* This may be incorrect; linesearch has values for stepmax and stepmin
741:            that should be reset. */
742:         step = 1.0;
743:         TaoLineSearchApply(tao->linesearch, tao->solution, &f, tao->gradient, tao->stepdirection, &step, &ls_reason);
744:         TaoAddLineSearchCounts(tao);
745:       }

747:       if (ls_reason != TAOLINESEARCH_SUCCESS && ls_reason != TAOLINESEARCH_SUCCESS_USER) {
748:         /* Failed to find an improving point */
749:         f = fold;
750:         VecCopy(tl->Xold, tao->solution);
751:         VecCopy(tl->Gold, tao->gradient);
752:         tao->trust = 0.0;
753:         step = 0.0;
754:         reason = TAO_DIVERGED_LS_FAILURE;
755:         tao->reason = TAO_DIVERGED_LS_FAILURE;
756:         break;
757:       } else if (stepType == NTL_NEWTON) {
758:         if (step < tl->nu1) {
759:           /* Very bad step taken; reduce radius */
760:           tao->trust = tl->omega1 * PetscMin(norm_d, tao->trust);
761:         } else if (step < tl->nu2) {
762:           /* Reasonably bad step taken; reduce radius */
763:           tao->trust = tl->omega2 * PetscMin(norm_d, tao->trust);
764:         } else if (step < tl->nu3) {
765:           /* Reasonable step was taken; leave radius alone */
766:           if (tl->omega3 < 1.0) {
767:             tao->trust = tl->omega3 * PetscMin(norm_d, tao->trust);
768:           } else if (tl->omega3 > 1.0) {
769:             tao->trust = PetscMax(tl->omega3 * norm_d, tao->trust);
770:           }
771:         } else if (step < tl->nu4) {
772:           /* Full step taken; increase the radius */
773:           tao->trust = PetscMax(tl->omega4 * norm_d, tao->trust);
774:         } else {
775:           /* More than full step taken; increase the radius */
776:           tao->trust = PetscMax(tl->omega5 * norm_d, tao->trust);
777:         }
778:       } else {
779:         /* Newton step was not good; reduce the radius */
780:         tao->trust = tl->omega1 * PetscMin(norm_d, tao->trust);
781:       }
782:     } else {
783:       /* Trust-region step is accepted */
784:       VecCopy(tl->W, tao->solution);
785:       f = ftrial;
786:       TaoComputeGradient(tao, tao->solution, tao->gradient);
787:       ++tl->ntrust;
788:     }

790:     /* The radius may have been increased; modify if it is too large */
791:     tao->trust = PetscMin(tao->trust, tl->max_radius);

793:     /* Check for converged */
794:     VecNorm(tao->gradient, NORM_2, &gnorm);
795:     if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1,"User provided compute function generated Not-a-Number");
796:     needH = 1;

798:     TaoMonitor(tao, iter, f, gnorm, 0.0, tao->trust, &reason);
799:   }
800:   return(0);
801: }

803: /* ---------------------------------------------------------- */
806: static PetscErrorCode TaoSetUp_NTL(Tao tao)
807: {
808:   TAO_NTL        *tl = (TAO_NTL *)tao->data;

812:   if (!tao->gradient) {VecDuplicate(tao->solution, &tao->gradient); }
813:   if (!tao->stepdirection) {VecDuplicate(tao->solution, &tao->stepdirection);}
814:   if (!tl->W) { VecDuplicate(tao->solution, &tl->W);}
815:   if (!tl->Xold) { VecDuplicate(tao->solution, &tl->Xold);}
816:   if (!tl->Gold) { VecDuplicate(tao->solution, &tl->Gold);}
817:   tl->Diag = 0;
818:   tl->M = 0;
819:   return(0);
820: }

822: /*------------------------------------------------------------*/
825: static PetscErrorCode TaoDestroy_NTL(Tao tao)
826: {
827:   TAO_NTL        *tl = (TAO_NTL *)tao->data;

831:   if (tao->setupcalled) {
832:     VecDestroy(&tl->W);
833:     VecDestroy(&tl->Xold);
834:     VecDestroy(&tl->Gold);
835:   }
836:   VecDestroy(&tl->Diag);
837:   MatDestroy(&tl->M);
838:   PetscFree(tao->data);
839:   return(0);
840: }

842: /*------------------------------------------------------------*/
845: static PetscErrorCode TaoSetFromOptions_NTL(Tao tao)
846: {
847:   TAO_NTL        *tl = (TAO_NTL *)tao->data;

851:   PetscOptionsHead("Newton trust region with line search method for unconstrained optimization");
852:   PetscOptionsEList("-tao_ntl_ksp_type", "ksp type", "", NTL_KSP, NTL_KSP_TYPES, NTL_KSP[tl->ksp_type], &tl->ksp_type, 0);
853:   PetscOptionsEList("-tao_ntl_pc_type", "pc type", "", NTL_PC, NTL_PC_TYPES, NTL_PC[tl->pc_type], &tl->pc_type, 0);
854:   PetscOptionsEList("-tao_ntl_bfgs_scale_type", "bfgs scale type", "", BFGS_SCALE, BFGS_SCALE_TYPES, BFGS_SCALE[tl->bfgs_scale_type], &tl->bfgs_scale_type, 0);
855:   PetscOptionsEList("-tao_ntl_init_type", "radius initialization type", "", NTL_INIT, NTL_INIT_TYPES, NTL_INIT[tl->init_type], &tl->init_type, 0);
856:   PetscOptionsEList("-tao_ntl_update_type", "radius update type", "", NTL_UPDATE, NTL_UPDATE_TYPES, NTL_UPDATE[tl->update_type], &tl->update_type, 0);
857:   PetscOptionsReal("-tao_ntl_eta1", "poor steplength; reduce radius", "", tl->eta1, &tl->eta1, 0);
858:   PetscOptionsReal("-tao_ntl_eta2", "reasonable steplength; leave radius alone", "", tl->eta2, &tl->eta2, 0);
859:   PetscOptionsReal("-tao_ntl_eta3", "good steplength; increase radius", "", tl->eta3, &tl->eta3, 0);
860:   PetscOptionsReal("-tao_ntl_eta4", "excellent steplength; greatly increase radius", "", tl->eta4, &tl->eta4, 0);
861:   PetscOptionsReal("-tao_ntl_alpha1", "", "", tl->alpha1, &tl->alpha1, 0);
862:   PetscOptionsReal("-tao_ntl_alpha2", "", "", tl->alpha2, &tl->alpha2, 0);
863:   PetscOptionsReal("-tao_ntl_alpha3", "", "", tl->alpha3, &tl->alpha3, 0);
864:   PetscOptionsReal("-tao_ntl_alpha4", "", "", tl->alpha4, &tl->alpha4, 0);
865:   PetscOptionsReal("-tao_ntl_alpha5", "", "", tl->alpha5, &tl->alpha5, 0);
866:   PetscOptionsReal("-tao_ntl_nu1", "poor steplength; reduce radius", "", tl->nu1, &tl->nu1, 0);
867:   PetscOptionsReal("-tao_ntl_nu2", "reasonable steplength; leave radius alone", "", tl->nu2, &tl->nu2, 0);
868:   PetscOptionsReal("-tao_ntl_nu3", "good steplength; increase radius", "", tl->nu3, &tl->nu3, 0);
869:   PetscOptionsReal("-tao_ntl_nu4", "excellent steplength; greatly increase radius", "", tl->nu4, &tl->nu4, 0);
870:   PetscOptionsReal("-tao_ntl_omega1", "", "", tl->omega1, &tl->omega1, 0);
871:   PetscOptionsReal("-tao_ntl_omega2", "", "", tl->omega2, &tl->omega2, 0);
872:   PetscOptionsReal("-tao_ntl_omega3", "", "", tl->omega3, &tl->omega3, 0);
873:   PetscOptionsReal("-tao_ntl_omega4", "", "", tl->omega4, &tl->omega4, 0);
874:   PetscOptionsReal("-tao_ntl_omega5", "", "", tl->omega5, &tl->omega5, 0);
875:   PetscOptionsReal("-tao_ntl_mu1_i", "", "", tl->mu1_i, &tl->mu1_i, 0);
876:   PetscOptionsReal("-tao_ntl_mu2_i", "", "", tl->mu2_i, &tl->mu2_i, 0);
877:   PetscOptionsReal("-tao_ntl_gamma1_i", "", "", tl->gamma1_i, &tl->gamma1_i, 0);
878:   PetscOptionsReal("-tao_ntl_gamma2_i", "", "", tl->gamma2_i, &tl->gamma2_i, 0);
879:   PetscOptionsReal("-tao_ntl_gamma3_i", "", "", tl->gamma3_i, &tl->gamma3_i, 0);
880:   PetscOptionsReal("-tao_ntl_gamma4_i", "", "", tl->gamma4_i, &tl->gamma4_i, 0);
881:   PetscOptionsReal("-tao_ntl_theta_i", "", "", tl->theta_i, &tl->theta_i, 0);
882:   PetscOptionsReal("-tao_ntl_mu1", "", "", tl->mu1, &tl->mu1, 0);
883:   PetscOptionsReal("-tao_ntl_mu2", "", "", tl->mu2, &tl->mu2, 0);
884:   PetscOptionsReal("-tao_ntl_gamma1", "", "", tl->gamma1, &tl->gamma1, 0);
885:   PetscOptionsReal("-tao_ntl_gamma2", "", "", tl->gamma2, &tl->gamma2, 0);
886:   PetscOptionsReal("-tao_ntl_gamma3", "", "", tl->gamma3, &tl->gamma3, 0);
887:   PetscOptionsReal("-tao_ntl_gamma4", "", "", tl->gamma4, &tl->gamma4, 0);
888:   PetscOptionsReal("-tao_ntl_theta", "", "", tl->theta, &tl->theta, 0);
889:   PetscOptionsReal("-tao_ntl_min_radius", "lower bound on initial radius", "", tl->min_radius, &tl->min_radius, 0);
890:   PetscOptionsReal("-tao_ntl_max_radius", "upper bound on radius", "", tl->max_radius, &tl->max_radius, 0);
891:   PetscOptionsReal("-tao_ntl_epsilon", "tolerance used when computing actual and predicted reduction", "", tl->epsilon, &tl->epsilon, 0);
892:   PetscOptionsTail();
893:   TaoLineSearchSetFromOptions(tao->linesearch);
894:   KSPSetFromOptions(tao->ksp);
895:   return(0);
896: }

898: /*------------------------------------------------------------*/
901: static PetscErrorCode TaoView_NTL(Tao tao, PetscViewer viewer)
902: {
903:   TAO_NTL        *tl = (TAO_NTL *)tao->data;
904:   PetscInt       nrejects;
905:   PetscBool      isascii;

909:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&isascii);
910:   if (isascii) {
911:     PetscViewerASCIIPushTab(viewer);
912:     if (NTL_PC_BFGS == tl->pc_type && tl->M) {
913:       MatLMVMGetRejects(tl->M, &nrejects);
914:       PetscViewerASCIIPrintf(viewer, "Rejected matrix updates: %D\n", nrejects);
915:     }
916:     PetscViewerASCIIPrintf(viewer, "Trust-region steps: %D\n", tl->ntrust);
917:     PetscViewerASCIIPrintf(viewer, "Newton search steps: %D\n", tl->newt);
918:     PetscViewerASCIIPrintf(viewer, "BFGS search steps: %D\n", tl->bfgs);
919:     PetscViewerASCIIPrintf(viewer, "Scaled gradient search steps: %D\n", tl->sgrad);
920:     PetscViewerASCIIPrintf(viewer, "Gradient search steps: %D\n", tl->grad);
921:     PetscViewerASCIIPopTab(viewer);
922:   }
923:   return(0);
924: }

926: /* ---------------------------------------------------------- */
927: /*MC
928:   TAONTR - Newton's method with trust region and linesearch
929:   for unconstrained minimization.
930:   At each iteration, the Newton trust region method solves the system for d
931:   and performs a line search in the d direction:

933:             min_d  .5 dT Hk d + gkT d,  s.t.   ||d|| < Delta_k

935:   Options Database Keys:
936: + -tao_ntl_ksp_type - "nash","stcg","gltr"
937: . -tao_ntl_pc_type - "none","ahess","bfgs","petsc"
938: . -tao_ntl_bfgs_scale_type - type of scaling with bfgs pc, "ahess" or "bfgs"
939: . -tao_ntl_init_type - "constant","direction","interpolation"
940: . -tao_ntl_update_type - "reduction","interpolation"
941: . -tao_ntl_min_radius - lower bound on trust region radius
942: . -tao_ntl_max_radius - upper bound on trust region radius
943: . -tao_ntl_epsilon - tolerance for accepting actual / predicted reduction
944: . -tao_ntl_mu1_i - mu1 interpolation init factor
945: . -tao_ntl_mu2_i - mu2 interpolation init factor
946: . -tao_ntl_gamma1_i - gamma1 interpolation init factor
947: . -tao_ntl_gamma2_i - gamma2 interpolation init factor
948: . -tao_ntl_gamma3_i - gamma3 interpolation init factor
949: . -tao_ntl_gamma4_i - gamma4 interpolation init factor
950: . -tao_ntl_theta_i - thetha1 interpolation init factor
951: . -tao_ntl_eta1 - eta1 reduction update factor
952: . -tao_ntl_eta2 - eta2 reduction update factor
953: . -tao_ntl_eta3 - eta3 reduction update factor
954: . -tao_ntl_eta4 - eta4 reduction update factor
955: . -tao_ntl_alpha1 - alpha1 reduction update factor
956: . -tao_ntl_alpha2 - alpha2 reduction update factor
957: . -tao_ntl_alpha3 - alpha3 reduction update factor
958: . -tao_ntl_alpha4 - alpha4 reduction update factor
959: . -tao_ntl_alpha4 - alpha4 reduction update factor
960: . -tao_ntl_mu1 - mu1 interpolation update
961: . -tao_ntl_mu2 - mu2 interpolation update
962: . -tao_ntl_gamma1 - gamma1 interpolcation update
963: . -tao_ntl_gamma2 - gamma2 interpolcation update
964: . -tao_ntl_gamma3 - gamma3 interpolcation update
965: . -tao_ntl_gamma4 - gamma4 interpolation update
966: - -tao_ntl_theta - theta1 interpolation update

968:   Level: beginner
969: M*/

971: EXTERN_C_BEGIN
974: PetscErrorCode TaoCreate_NTL(Tao tao)
975: {
976:   TAO_NTL        *tl;
978:   const char     *morethuente_type = TAOLINESEARCHMT;

981:   PetscNewLog(tao,&tl);
982:   tao->ops->setup = TaoSetUp_NTL;
983:   tao->ops->solve = TaoSolve_NTL;
984:   tao->ops->view = TaoView_NTL;
985:   tao->ops->setfromoptions = TaoSetFromOptions_NTL;
986:   tao->ops->destroy = TaoDestroy_NTL;

988:   tao->max_it = 50;
989: #if defined(PETSC_USE_REAL_SINGLE)
990:   tao->fatol = 1e-5;
991:   tao->frtol = 1e-5;
992: #else
993:   tao->fatol = 1e-10;
994:   tao->frtol = 1e-10;
995: #endif
996:   tao->data = (void*)tl;

998:   tao->trust0 = 100.0;


1001:   /* Default values for trust-region radius update based on steplength */
1002:   tl->nu1 = 0.25;
1003:   tl->nu2 = 0.50;
1004:   tl->nu3 = 1.00;
1005:   tl->nu4 = 1.25;

1007:   tl->omega1 = 0.25;
1008:   tl->omega2 = 0.50;
1009:   tl->omega3 = 1.00;
1010:   tl->omega4 = 2.00;
1011:   tl->omega5 = 4.00;

1013:   /* Default values for trust-region radius update based on reduction */
1014:   tl->eta1 = 1.0e-4;
1015:   tl->eta2 = 0.25;
1016:   tl->eta3 = 0.50;
1017:   tl->eta4 = 0.90;

1019:   tl->alpha1 = 0.25;
1020:   tl->alpha2 = 0.50;
1021:   tl->alpha3 = 1.00;
1022:   tl->alpha4 = 2.00;
1023:   tl->alpha5 = 4.00;

1025:   /* Default values for trust-region radius update based on interpolation */
1026:   tl->mu1 = 0.10;
1027:   tl->mu2 = 0.50;

1029:   tl->gamma1 = 0.25;
1030:   tl->gamma2 = 0.50;
1031:   tl->gamma3 = 2.00;
1032:   tl->gamma4 = 4.00;

1034:   tl->theta = 0.05;

1036:   /* Default values for trust region initialization based on interpolation */
1037:   tl->mu1_i = 0.35;
1038:   tl->mu2_i = 0.50;

1040:   tl->gamma1_i = 0.0625;
1041:   tl->gamma2_i = 0.5;
1042:   tl->gamma3_i = 2.0;
1043:   tl->gamma4_i = 5.0;

1045:   tl->theta_i = 0.25;

1047:   /* Remaining parameters */
1048:   tl->min_radius = 1.0e-10;
1049:   tl->max_radius = 1.0e10;
1050:   tl->epsilon = 1.0e-6;

1052:   tl->ksp_type        = NTL_KSP_STCG;
1053:   tl->pc_type         = NTL_PC_BFGS;
1054:   tl->bfgs_scale_type = BFGS_SCALE_AHESS;
1055:   tl->init_type       = NTL_INIT_INTERPOLATION;
1056:   tl->update_type     = NTL_UPDATE_REDUCTION;

1058:   TaoLineSearchCreate(((PetscObject)tao)->comm, &tao->linesearch);
1059:   TaoLineSearchSetType(tao->linesearch, morethuente_type);
1060:   TaoLineSearchUseTaoRoutines(tao->linesearch, tao);
1061:   KSPCreate(((PetscObject)tao)->comm, &tao->ksp);
1062:   return(0);
1063: }
1064: EXTERN_C_END