|
using | Vector = typename SolverBase<Real, N, 1, DerivedSolver, ForceEigen>::InputType |
using | RowVector = typename SolverBase<Real, N, 1, DerivedSolver, ForceEigen>::FirstDerivativeType |
using | Matrix = typename SolverBase<Real, N, 1, DerivedSolver, ForceEigen>::SecondDerivativeType |
using | FunctionWrapper = typename SolverBase<Real, N, 1, DerivedSolver, ForceEigen>::FunctionWrapper |
using | GradientWrapper = typename SolverBase<Real, N, 1, DerivedSolver, ForceEigen>::FirstDerivativeWrapper |
using | HessianWrapper = typename SolverBase<Real, N, 1, DerivedSolver, ForceEigen>::SecondDerivativeWrapper |
|
| Optimizer () |
std::string | name () const |
Integer | gradient_evaluations () const |
Integer | max_gradient_evaluations () const |
void | max_gradient_evaluations (Integer t_gradient_evaluations) |
Integer | hessian_evaluations () const |
Integer | max_hessian_evaluations () const |
void | max_hessian_evaluations (Integer t_hessian_evaluations) |
| SolverBase () |
const InputType & | lower_bound () const |
const InputType & | upper_bound () const |
void | bounds (const InputType &t_lower_bound, const InputType &t_upper_bound) |
constexpr Integer | input_dimension () const |
constexpr Integer | output_dimension () const |
Integer | function_evaluations () const |
void | max_function_evaluations (Integer t_max_function_evaluations) |
Integer | iterations () const |
Integer | max_iterations () const |
Real | alpha () const |
Integer | relaxations () const |
Integer | max_relaxations () const |
Real | tolerance () const |
void | verbose_mode (bool t_verbose) |
void | enable_verbose_mode () |
void | disable_verbose_mode () |
void | damped_mode (bool t_damped) |
void | enable_damped_mode () |
void | disable_damped_mode () |
std::string | task () const |
bool | converged () const |
const TraceType & | trace () const |
std::ostream & | ostream () const |
bool | solve (FunctionWrapper function, const InputType &x_ini, InputType &x_sol) |
bool | rootfind (FunctionBase< Real, FunInDim, FunOutDim, DerivedFunction, ForceEigen &&FunOutDim==1 > const &function, const InputType &x_ini, InputType &x_sol) |
bool | optimize (FunctionBase< Real, FunInDim, FunOutDim, DerivedFunction, ForceEigen &&FunOutDim==1 > const &function, const InputType &x_ini, InputType &x_sol) |
std::string | name () const |
|
void | evaluate_gradient (GradientWrapper gradient, const Vector &x, Matrix &out) |
void | evaluate_hessian (HessianWrapper hessian, const Vector &x, Matrix &out) |
bool | solve (FunctionWrapper function, Vector const &x_ini, Vector &x_sol) |
bool | solve (FunctionWrapper function, GradientWrapper gradient, Vector const &x_ini, Vector &x_sol) |
bool | solve (FunctionWrapper function, GradientWrapper gradient, HessianWrapper hessian, Vector const &x_ini, Vector &x_sol) |
Integer | first_derivative_evaluations () const |
Integer | max_first_derivative_evaluations () const |
Integer | second_derivative_evaluations () const |
Integer | max_second_derivative_evaluations () const |
void | reset () |
void | evaluate_function (FunctionWrapper function, const InputType &x, OutputType &out) |
void | evaluate_first_derivative (FirstDerivativeWrapper function, const InputType &x, FirstDerivativeType &out) |
void | evaluate_second_derivative (SecondDerivativeWrapper function, const InputType &x, SecondDerivativeType &out) |
void | store_trace (const InputType &x) |
bool | damp (FunctionWrapper function, InputType const &x_old, InputType const &function_old, InputType const &step_old, InputType &x_new, InputType &function_new, InputType &step_new) |
void | header () |
void | bottom () |
void | info (Real residuals, std::string const ¬es="-") |
template<typename Real,
Integer N, typename DerivedSolver, bool ForceEigen = false>
class Optimist::Optimizer::Optimizer< Real, N, DerivedSolver, ForceEigen >
Optimizer
This section describes the scalar optimizers implemented in Optimist. The available optimizers are derivative and non-derivative methods. Derivative methods employ the function's derivative to find the minimum with high accuracy, while non-derivative methods approximate the derivative for improved efficiency in certain scenarios.
Here, the solvers are implemented for solving problems of the form
\[ \min_{\mathbf{x}} \mathbf{f}(\mathbf{x}) = 0 \quad \text{with} \quad \mathbf{f}: \mathbb{R}^n \rightarrow \mathbb{R} \text{,}
\]
which consist in finding the minimum of the cost function \(\mathbf{f}\) by iteratively updating the current iterate \(\mathbf{x}_k\) until convergence is achieved. The solvers require the cost function \(\mathbf{f}\) and its first derivative \(\mathbf{f}^{\prime}(\mathbf{x})\) to be provided by the user. Alternatively, the derivative can be approximated numerically using finite differences, depending on the problem's complexity and the user's preference.
- Template Parameters
-
N | Dimension of the optimization problem. |
DerivedSolver | Derived solver class. |
ForceEigen | Force the use of Eigen types for input and output. |