|
| | Optimizer () |
| constexpr std::string | name () const |
| Integer | gradient_evaluations () const |
| Integer | max_gradient_evaluations () const |
| void | max_gradient_evaluations (const Integer t_gradient_evaluations) |
| Integer | hessian_evaluations () const |
| Integer | max_hessian_evaluations () const |
| void | max_hessian_evaluations (const Integer t_hessian_evaluations) |
| | SolverBase () |
| void | reset_bounds (const Integer n=InputTrait::IsDynamic ? 0 :InputTrait::Dimension) |
| const T & | lower_bound () const |
| const T & | upper_bound () const |
| void | bounds (const T &t_lower_bound, const T &t_upper_bound) |
| constexpr Integer | input_dimension () const |
| constexpr Integer | output_dimension () const |
| Integer | function_evaluations () const |
| void | max_function_evaluations (const Integer t_max_function_evaluations) |
| Integer | iterations () const |
| Integer | max_iterations () const |
| Scalar | alpha () const |
| Integer | relaxations () const |
| Integer | max_relaxations () const |
| Scalar | tolerance () const |
| void | verbose_mode (bool t_verbose) |
| void | enable_verbose_mode () |
| void | disable_verbose_mode () |
| void | damped_mode (bool t_damped) |
| void | enable_damped_mode () |
| void | disable_damped_mode () |
| std::string | task () const |
| bool | converged () const |
| std::ostream & | ostream () const |
| bool | solve (FunctionLambda &&function, const T &x_ini, T &x_sol) |
| bool | rootfind (const FunctionBase< FunctionInput, FunctionOutput, DerivedFunction > &function, const T &x_ini, T &x_sol) |
| bool | optimize (const FunctionBase< FunctionInput, FunctionOutput, DerivedFunction > &function, const T &x_ini, T &x_sol) |
| constexpr std::string | name () const |
|
| template<typename GradientLambda> |
| bool | evaluate_gradient (GradientLambda &&gradient, const Input &x, FirstDerivative &out) |
| template<typename HessianLambda> |
| bool | evaluate_hessian (HessianLambda &&hessian, const Input &x, SecondDerivative &out) |
| template<typename FunctionLambda> |
| bool | solve (FunctionLambda &&function, const Input &x_ini, Output &x_sol) |
| template<typename FunctionLambda, typename GradientLambda> |
| bool | solve (FunctionLambda &&function, GradientLambda &&gradient, const Input &x_ini, Output &x_sol) |
| template<typename FunctionLambda, typename GradientLambda, typename HessianLambda> |
| bool | solve (FunctionLambda &&function, GradientLambda &&gradient, HessianLambda &&hessian, const Input &x_ini, Output &x_sol) |
| Integer | first_derivative_evaluations () const |
| Integer | max_first_derivative_evaluations () const |
| Integer | second_derivative_evaluations () const |
| Integer | max_second_derivative_evaluations () const |
| void | reset_counters () |
| bool | evaluate_function (FunctionLambda &&function, const T &x, TypeTrait< T >::Scalar &out) |
| bool | evaluate_first_derivative (FirstDerivativeLambda &&function, const T &x, FirstDerivative &out) |
| bool | evaluate_second_derivative (SecondDerivativeLambda &&function, const T &x, SecondDerivative &out) |
| bool | damp (FunctionLambda &&function, const T &x_old, const T &function_old, const T &step_old, T &x_new, T &function_new, T &step_new) |
| void | header () |
| void | bottom () |
| void | info (Scalar residuals, const std::string ¬es="-") |
template<typename T, typename DerivedSolver>
requires (
TypeTrait<T>::IsScalar ||
TypeTrait<T>::IsEigen) && (!
TypeTrait<T>::IsFixed ||
TypeTrait<T>::Dimension > 0)
class Optimist::Optimizer::Optimizer< T, DerivedSolver >
Optimizer
This section describes the scalar optimizers implemented in Optimist. The available optimizers are derivative and non-derivative methods. Derivative methods employ the function's derivative to find the minimum with high accuracy, while non-derivative methods approximate the derivative for improved efficiency in certain scenarios.
Here, the solvers are implemented for solving problems of the form
\[ \min_{\mathbf{x}} \mathbf{f}(\mathbf{x}) = 0 \quad \text{with} \quad \mathbf{f}: \mathbb{R}^n \rightarrow \mathbb{R} \text{,}
\]
which consist in finding the minimum of the cost function \(\mathbf{f}\) by iteratively updating the current iterate \(\mathbf{x}_k\) until convergence is achieved. The solvers require the cost function \(\mathbf{f}\) and its first derivative \(\mathbf{f}^{\prime}(\mathbf{x})\) to be provided by the user. Alternatively, the derivative can be approximated numerically using finite differences, depending on the problem's complexity and the user's preference.
- Template Parameters
-
| T | Input type (scalar or Eigen vector). |
| DerivedSolver | Derived solver class. |