ITK
5.2.0
Insight Toolkit
|
#include <itkQuasiNewtonOptimizerv4.h>
Public Member Functions | |
virtual ::itk::LightObject::Pointer | CreateAnother () const |
virtual void | EstimateNewtonStepOverSubRange (const IndexRangeType &subrange) |
virtual const char * | GetNameOfClass () const |
virtual const DerivativeType & | GetNewtonStep () const |
virtual void | SetMaximumIterationsWithoutProgress (SizeValueType _arg) |
virtual void | SetMaximumNewtonStepSizeInPhysicalUnits (TInternalComputationValueType _arg) |
void | StartOptimization (bool doOnlyInitialization=false) override |
Public Member Functions inherited from itk::GradientDescentOptimizerv4Template< TInternalComputationValueType > | |
virtual void | SetLearningRate (TInternalComputationValueType _arg) |
virtual const TInternalComputationValueType & | GetLearningRate () const |
virtual void | SetMaximumStepSizeInPhysicalUnits (TInternalComputationValueType _arg) |
virtual const TInternalComputationValueType & | GetMaximumStepSizeInPhysicalUnits () const |
virtual void | SetDoEstimateLearningRateAtEachIteration (bool _arg) |
virtual const bool & | GetDoEstimateLearningRateAtEachIteration () const |
virtual void | DoEstimateLearningRateAtEachIterationOn () |
virtual void | DoEstimateLearningRateAtEachIterationOff () |
virtual void | SetDoEstimateLearningRateOnce (bool _arg) |
virtual const bool & | GetDoEstimateLearningRateOnce () const |
virtual void | DoEstimateLearningRateOnceOn () |
virtual void | DoEstimateLearningRateOnceOff () |
virtual void | SetMinimumConvergenceValue (TInternalComputationValueType _arg) |
virtual void | SetConvergenceWindowSize (SizeValueType _arg) |
virtual const TInternalComputationValueType & | GetConvergenceValue () const |
virtual void | SetReturnBestParametersAndValue (bool _arg) |
virtual const bool & | GetReturnBestParametersAndValue () const |
virtual void | ReturnBestParametersAndValueOn () |
virtual void | ReturnBestParametersAndValueOff () |
void | StopOptimization () override |
void | ResumeOptimization () override |
virtual void | EstimateLearningRate () |
Public Member Functions inherited from itk::GradientDescentOptimizerBasev4Template< TInternalComputationValueType > | |
virtual const DerivativeType & | GetGradient () const |
virtual const StopConditionObjectToObjectOptimizerEnum & | GetStopCondition () const |
void | SetNumberOfIterations (const SizeValueType numberOfIterations) override |
SizeValueType | GetNumberOfIterations () const override |
SizeValueType | GetCurrentIteration () const override |
const StopConditionReturnStringType | GetStopConditionDescription () const override |
virtual void | ModifyGradientByScales () |
virtual void | ModifyGradientByLearningRate () |
Public Member Functions inherited from itk::ObjectToObjectOptimizerBaseTemplate< TInternalComputationValueType > | |
virtual void | SetMetric (MetricType *_arg) |
virtual MetricType * | GetModifiableMetric () |
virtual const MetricType * | GetMetric () const |
virtual const MeasureType & | GetCurrentMetricValue () const |
virtual const MeasureType & | GetValue () const |
virtual void | SetScales (const ScalesType &scales) |
virtual const ScalesType & | GetScales () const |
virtual const bool & | GetScalesAreIdentity () const |
virtual void | SetWeights (ScalesType _arg) |
virtual const ScalesType & | GetWeights () const |
virtual const bool & | GetWeightsAreIdentity () const |
bool | GetScalesInitialized () const |
virtual void | SetScalesEstimator (ScalesEstimatorType *_arg) |
virtual void | SetDoEstimateScales (bool _arg) |
virtual const bool & | GetDoEstimateScales () const |
virtual void | DoEstimateScalesOn () |
virtual void | DoEstimateScalesOff () |
virtual void | SetNumberOfWorkUnits (ThreadIdType number) |
virtual const ThreadIdType & | GetNumberOfWorkUnits () const |
virtual const ParametersType & | GetCurrentPosition () const |
Public Member Functions inherited from itk::Object | |
unsigned long | AddObserver (const EventObject &event, Command *) |
unsigned long | AddObserver (const EventObject &event, Command *) const |
unsigned long | AddObserver (const EventObject &event, std::function< void(const EventObject &)> function) const |
virtual void | DebugOff () const |
virtual void | DebugOn () const |
Command * | GetCommand (unsigned long tag) |
bool | GetDebug () const |
MetaDataDictionary & | GetMetaDataDictionary () |
const MetaDataDictionary & | GetMetaDataDictionary () const |
virtual ModifiedTimeType | GetMTime () const |
virtual const TimeStamp & | GetTimeStamp () const |
bool | HasObserver (const EventObject &event) const |
void | InvokeEvent (const EventObject &) |
void | InvokeEvent (const EventObject &) const |
virtual void | Modified () const |
void | Register () const override |
void | RemoveAllObservers () |
void | RemoveObserver (unsigned long tag) |
void | SetDebug (bool debugFlag) const |
void | SetReferenceCount (int) override |
void | UnRegister () const noexcept override |
void | SetMetaDataDictionary (const MetaDataDictionary &rhs) |
void | SetMetaDataDictionary (MetaDataDictionary &&rrhs) |
virtual void | SetObjectName (std::string _arg) |
virtual const std::string & | GetObjectName () const |
Public Member Functions inherited from itk::LightObject | |
Pointer | Clone () const |
virtual void | Delete () |
virtual int | GetReferenceCount () const |
void | Print (std::ostream &os, Indent indent=0) const |
Static Public Member Functions | |
static Pointer | New () |
Static Public Member Functions inherited from itk::GradientDescentOptimizerv4Template< TInternalComputationValueType > | |
static Pointer | New () |
Static Public Member Functions inherited from itk::Object | |
static bool | GetGlobalWarningDisplay () |
static void | GlobalWarningDisplayOff () |
static void | GlobalWarningDisplayOn () |
static Pointer | New () |
static void | SetGlobalWarningDisplay (bool val) |
Static Public Member Functions inherited from itk::LightObject | |
static void | BreakOnError () |
static Pointer | New () |
Private Attributes | |
DomainThreader< ThreadedIndexedContainerPartitioner, Self >::Pointer | m_EstimateNewtonStepThreader |
Implement a Quasi-Newton optimizer with BFGS Hessian estimation.
Second order approximation of the cost function is usually more efficient since it estimates the descent or ascent direction more precisely. However, computation of Hessian is usually expensive or unavailable. Alternatively Quasi-Newton methods can estimate a Hessian from the gradients in previous steps. Here a specific Quasi-Newton method, BFGS, is used to compute the Quasi-Newton steps.
The Quasi-Newton method doesn't produce a valid step sometimes, ex. when the metric function is not a convex locally. In this scenario, the gradient step is used after being scaled properly.
A helper member object, m_ScalesEstimator may be set to estimate parameter scales and step scales. A step scale measures the magnitude of a step and is used for learning rate computation.
When m_ScalesEstimator is set, SetMaximumNewtonStepSizeInPhysicalUnits() may be called to set the maximum step size. If it is not called, m_MaximumNewtonStepSizeInPhysicalUnits defaults to lambda * OptimizerParameterScalesEstimatorTemplate::EstimateMaximumStepSize(), where lambda is in [1,5].
When m_ScalesEstimator is not set, the parameter scales and learning rates defaults to ones, or can be set by users manually.
Definition at line 60 of file itkQuasiNewtonOptimizerv4.h.
using itk::QuasiNewtonOptimizerv4Template< TInternalComputationValueType >::ConstPointer = SmartPointer<const Self> |
Definition at line 70 of file itkQuasiNewtonOptimizerv4.h.
using itk::QuasiNewtonOptimizerv4Template< TInternalComputationValueType >::DerivativeType = typename Superclass::DerivativeType |
Definition at line 83 of file itkQuasiNewtonOptimizerv4.h.
using itk::QuasiNewtonOptimizerv4Template< TInternalComputationValueType >::HessianArrayType = std::vector<HessianType> |
Type for an array of Hessian matrix for local support
Definition at line 90 of file itkQuasiNewtonOptimizerv4.h.
using itk::QuasiNewtonOptimizerv4Template< TInternalComputationValueType >::HessianType = itk::Array2D<TInternalComputationValueType> |
Type for Hessian matrix in the Quasi-Newton method
Definition at line 87 of file itkQuasiNewtonOptimizerv4.h.
using itk::QuasiNewtonOptimizerv4Template< TInternalComputationValueType >::IndexRangeType = typename Superclass::IndexRangeType |
Definition at line 84 of file itkQuasiNewtonOptimizerv4.h.
using itk::QuasiNewtonOptimizerv4Template< TInternalComputationValueType >::InternalComputationValueType = TInternalComputationValueType |
It should be possible to derive the internal computation type from the class object.
Definition at line 79 of file itkQuasiNewtonOptimizerv4.h.
using itk::QuasiNewtonOptimizerv4Template< TInternalComputationValueType >::MeasureType = typename Superclass::MeasureType |
Definition at line 82 of file itkQuasiNewtonOptimizerv4.h.
using itk::QuasiNewtonOptimizerv4Template< TInternalComputationValueType >::ParametersType = typename Superclass::ParametersType |
Definition at line 81 of file itkQuasiNewtonOptimizerv4.h.
using itk::QuasiNewtonOptimizerv4Template< TInternalComputationValueType >::Pointer = SmartPointer<Self> |
Definition at line 69 of file itkQuasiNewtonOptimizerv4.h.
using itk::QuasiNewtonOptimizerv4Template< TInternalComputationValueType >::Self = QuasiNewtonOptimizerv4Template |
Standard class type aliases.
Definition at line 67 of file itkQuasiNewtonOptimizerv4.h.
using itk::QuasiNewtonOptimizerv4Template< TInternalComputationValueType >::Superclass = GradientDescentOptimizerv4Template<TInternalComputationValueType> |
Definition at line 68 of file itkQuasiNewtonOptimizerv4.h.
|
protected |
|
overrideprotecteddefault |
|
overrideprotectedvirtual |
Advance one step using the Quasi-Newton step. When the Newton step is invalid, the gradient step will be used.
Reimplemented from itk::GradientDescentOptimizerv4Template< TInternalComputationValueType >.
|
protected |
Combine a gradient step with a Newton step. The Newton step will be used when it is valid. Otherwise the gradient step will be used.
|
protectedvirtual |
Estimate the next Hessian and step with BFGS method. The details of the method are described at http://en.wikipedia.org/wiki/BFGS_method .
|
virtual |
Create an object from an instance, potentially deferring to a factory. This method allows you to create an instance of an object that is exactly the same type as the referring object. This is useful in cases where an object has been cast back to a base class.
Reimplemented from itk::GradientDescentOptimizerv4Template< TInternalComputationValueType >.
|
protectedvirtual |
Estimate a Newton step
|
virtual |
Estimate the quasi-newton step over a given index range. This function is used in QuasiNewtonOptimizerv4EstimateNewtonStepThreaderTemplate class.
|
virtual |
Run-time type information (and related methods).
Reimplemented from itk::GradientDescentOptimizerv4Template< TInternalComputationValueType >.
|
virtual |
Get the most recent Newton step.
|
protected |
Estimate and apply the learning rate(s) for a combined Newton step. A combined Newton step uses the Newton step by default and the gradient step when the Newton step is not valid.
The learning rate is less than 1.0 and is restricted by m_MaximumNewtonStepSizeInPhysicalUnits.
|
static |
Method for creation through the object factory.
|
overrideprotectedvirtual |
Store the best value and related parameters.
Reimplemented from itk::GradientDescentOptimizerv4Template< TInternalComputationValueType >.
|
protectedvirtual |
Reset the Hessian to identity matrix and the Newton step to zeros.
|
virtual |
Set the maximum tolerable number of iteration without any progress
|
virtual |
Set the maximum step size.
When SetScalesEstimator is called by user, the optimizer will compute learning rates as m_MaximumNewtonStepSizeInPhysicalUnits / m_ScalesEstimator->EstimateStepScale(newtonStep).
If SetMaximumNewtonStepSizeInPhysicalUnits is not called by user, m_MaximumNewtonStepSizeInPhysicalUnits defaults to lambda * m_ScalesEstimator->EstimateMaximumStepSize(),
where EstimateMaximumStepSize returns one voxel spacing and lambda may be in [1,5] according to our experience.
|
overridevirtual |
Start and run the optimization
Reimplemented from itk::GradientDescentOptimizerv4Template< TInternalComputationValueType >.
|
protected |
Definition at line 140 of file itkQuasiNewtonOptimizerv4.h.
|
protected |
Definition at line 139 of file itkQuasiNewtonOptimizerv4.h.
|
protected |
The best value so far and relevant information
Definition at line 138 of file itkQuasiNewtonOptimizerv4.h.
|
protected |
The information about the current step
Definition at line 130 of file itkQuasiNewtonOptimizerv4.h.
|
private |
Threader for Newton step estimation.
Definition at line 205 of file itkQuasiNewtonOptimizerv4.h.
|
protected |
The Hessian with local support
Definition at line 152 of file itkQuasiNewtonOptimizerv4.h.
|
protected |
The maximum tolerable number of iteration without any progress
Definition at line 127 of file itkQuasiNewtonOptimizerv4.h.
|
protected |
The maximum Quasi-Newton step size to restrict learning rates.
Definition at line 149 of file itkQuasiNewtonOptimizerv4.h.
|
protected |
The Quasi-Newton step
Definition at line 143 of file itkQuasiNewtonOptimizerv4.h.
|
protected |
Valid flag for the Quasi-Newton steps
Definition at line 155 of file itkQuasiNewtonOptimizerv4.h.
|
protected |
Warning message during Quasi-Newton step estimation
Definition at line 146 of file itkQuasiNewtonOptimizerv4.h.
|
protected |
Definition at line 131 of file itkQuasiNewtonOptimizerv4.h.
|
protected |
Definition at line 135 of file itkQuasiNewtonOptimizerv4.h.
|
protected |
The information about the previous step
Definition at line 134 of file itkQuasiNewtonOptimizerv4.h.