Sandals
v0.0.0
A C++ library for ODEs/DAEs integration
|
Class container for the generic implicit, explicit, and diagonally implicit Runge-Kutta methods. More...
#include <RungeKutta.hxx>
Public Types | |
using | System = typename Implicit<N, M>::Pointer |
using | Type = typename Tableau<S>::Type |
using | Time = Eigen::Vector<Real, Eigen::Dynamic> |
Public Member Functions | |
RungeKutta (const RungeKutta &)=delete | |
RungeKutta & | operator= (RungeKutta const &)=delete |
RungeKutta (Tableau< S > const &t_tableau) | |
RungeKutta (Tableau< S > const &t_tableau, System t_system) | |
Type | type () const |
bool | is_erk () const |
bool | is_irk () const |
bool | is_dirk () const |
Tableau< S > & | tableau () |
Tableau< S > const & | tableau () const |
Integer | stages () const |
std::string | name () const |
Integer | order () const |
bool | is_embedded () const |
MatrixS | A () const |
VectorS | b () const |
VectorS | b_embedded () const |
VectorS | c () const |
System | system () |
void | system (System t_system) |
bool | has_system () |
Real | absolute_tolerance () |
void | absolute_tolerance (Real t_absolute_tolerance) |
Real | relative_tolerance () |
void | relative_tolerance (Real t_relative_tolerance) |
Real & | safety_factor () |
void | safety_factor (Real t_safety_factor) |
Real & | min_safety_factor () |
void | min_safety_factor (Real t_min_safety_factor) |
Real & | max_safety_factor () |
void | max_safety_factor (Real t_max_safety_factor) |
Real & | min_step () |
void | min_step (Real t_min_step) |
Integer & | max_substeps () |
void | max_substeps (Integer t_max_substeps) |
bool | adaptive_mode () |
void | adaptive (bool t_adaptive) |
void | enable_adaptive_mode () |
void | disable_adaptive_mode () |
bool | verbose_mode () |
void | verbose_mode (bool t_verbose) |
void | enable_verbose_mode () |
void | disable_verbose_mode () |
bool | reverse_mode () |
void | reverse (bool t_reverse) |
void | enable_reverse_mode () |
void | disable_reverse_mode () |
Real | projection_tolerance () |
void | projection_tolerance (Real t_projection_tolerance) |
Integer & | max_projection_iterations () |
void | max_projection_iterations (Integer t_max_projection_iterations) |
bool | projection () |
void | projection (bool t_projection) |
void | enable_projection () |
void | disable_projection () |
Real | estimate_step (VectorN const &x, VectorN const &x_e, Real h_k) const |
std::string | info () const |
void | info (std::ostream &os) |
bool | erk_explicit_step (VectorN const &x_old, Real t_old, Real h_old, VectorN &x_new, Real &h_new) const |
void | erk_implicit_function (Integer s, VectorN const &x, Real t, Real h, MatrixK const &K, VectorN &fun) const |
void | erk_implicit_jacobian (Integer s, VectorN const &x, Real t, Real h, MatrixK const &K, MatrixN &jac) const |
bool | erk_implicit_step (VectorN const &x_old, Real t_old, Real h_old, VectorN &x_new, Real &h_new) |
void | irk_function (VectorN const &x, Real t, Real h, VectorK const &K, VectorK &fun) const |
void | irk_jacobian (VectorN const &x, Real t, Real h, VectorK const &K, MatrixJ &jac) const |
bool | irk_step (VectorN const &x_old, Real t_old, Real h_old, VectorN &x_new, Real &h_new) |
void | dirk_function (Integer n, VectorN const &x, Real t, Real h, MatrixK const &K, VectorN &fun) const |
void | dirk_jacobian (Integer n, VectorN const &x, Real t, Real h, MatrixK const &K, MatrixN &jac) const |
bool | dirk_step (VectorN const &x_old, Real t_old, Real h_old, VectorN &x_new, Real &h_new) |
bool | step (VectorN const &x_old, Real t_old, Real h_old, VectorN &x_new, Real &h_new) |
bool | advance (VectorN const &x_old, Real t_old, Real h_old, VectorN &x_new, Real &h_new) |
bool | solve (VectorX const &t_mesh, VectorN const &ics, Solution< N, M > &sol) |
bool | adaptive_solve (VectorX const &t_mesh, VectorN const &ics, Solution< N, M > &sol) |
bool | project (VectorN const &x, Real t, VectorN &x_projected) |
bool | project_ics (VectorN const &x, Real t, std::vector< Integer > const &projected_equations, std::vector< Integer > const &projected_invariants, VectorN &x_projected) const |
Real | estimate_order (std::vector< VectorX > const &t_mesh, VectorN const &ics, std::function< MatrixX(VectorX)> &sol) |
Private Types | |
using | VectorK = Eigen::Vector<Real, N*S> |
using | MatrixK = Eigen::Matrix<Real, N, S> |
using | MatrixJ = Eigen::Matrix<Real, N*S, N*S> |
using | VectorP = Eigen::Matrix<Real, N+M, 1> |
using | MatrixP = Eigen::Matrix<Real, N+M, N+M> |
using | NewtonX = Optimist::RootFinder::Newton<N> |
using | NewtonK = Optimist::RootFinder::Newton<N*S> |
using | VectorS = typename Tableau<S>::Vector |
using | MatrixS = typename Tableau<S>::Matrix |
using | VectorN = typename Implicit<N, M>::VectorF |
using | MatrixN = typename Implicit<N, M>::MatrixJF |
using | VectorM = typename Implicit<N, M>::VectorH |
using | MatrixM = typename Implicit<N, M>::MatrixJH |
Private Attributes | |
NewtonX | m_newtonX |
NewtonK | m_newtonK |
Tableau< S > | m_tableau |
System | m_system |
Real | m_absolute_tolerance {EPSILON_HIGH} |
Real | m_relative_tolerance {EPSILON_HIGH} |
Real | m_safety_factor {0.9} |
Real | m_min_safety_factor {0.1} |
Real | m_max_safety_factor {10.0} |
Real | m_min_step {EPSILON_HIGH} |
Integer | m_max_substeps {5} |
bool | m_adaptive {true} |
bool | m_verbose {false} |
bool | m_reverse {false} |
Eigen::FullPivLU< MatrixP > | m_lu |
Real | m_projection_tolerance {EPSILON_HIGH} |
Integer | m_max_projection_iterations {5} |
bool | m_projection {true} |
In this section, we introduce Runge-Kutta methods for solving ordinary differential equations (ODEs) and differential-algebraic equations (DAEs). Such equation systems describe the dynamics of systems and are used to model a wide range of physical phenomena.
ODEs and DAEs can be expressed in several forms. While these forms may be mathematically equivalent and convertible into one another, their numerical properties differ, making some more practical for specific computations. Below are the most common representations:
The implicit form is the most general and is frequently used to represent DAEs. However, the explicit forms is more commonly employed for ODEs. However, the explicit, semi-explicit, and linear forms are more commonly employed for ODEs. Note that the semi-explicit and the linear forms are a subset of the explicit form. Consequently, the Runge-Kutta methods we implement will be specialized only for the explicit and implicit forms.
Explicit Runge-Kutta (ERK) methods are among the simplest numerical techniques for solving ordinary differential equations. These methods are termed "explicit" because they typically avoid solving nonlinear systems at each step. Instead, the solution is computed directly using function evaluations. However, this holds only if the derivatives of the dynamic system's states, \(\mathbf{x}^\prime\), can be computed directly or via matrix inversion. This is possible when the dynamic system can be expressed in an explicit, semi-explicit, or linear form.
For an explicit system of the form \(\mathbf{x}^\prime = \mathbf{f}(\mathbf{x}, t)\), the ERK method is expressed as
\[ \begin{array}{l} \mathbf{K}_i = h_k \mathbf{f} \left(\mathbf{x}_k + \displaystyle\sum_{j=1}^{s} a_{ij} \mathbf{K}_j, t_k + h_k c_i\right) \\ \mathbf{x}_{k+1} = \mathbf{x}_k + h_k \displaystyle\sum_{i=1}^s b_i \mathbf{K}_i \end{array} \text{.} \]
where \(s\) is the number of stages, \(h_k\) is the step size, \(\mathbf{x}_k\) is the state at time \(t_k\), and \(\mathbf{K}_i\) are the intermediate variables.
For explicit methods, the Runge-Kutta coefficient matrix \(\mathbf{A}\) is strictly lower triangular. Thus, the stages can be computed sequentially as
\[ \mathbf{K}_i = h_k \mathbf{f} \left(\mathbf{x}_k + \displaystyle\sum_{j=1}^{i-1} a_{ij} \mathbf{K}_j, t_k + h_k c_i\right) \text{,} \]
and by unrolling the stages, we obtain
\[ \begin{array}{l} \begin{cases} \mathbf{K}_1 = h_k mathbf{f} \left(\mathbf{x}_k, t_k\right) \\ \mathbf{K}_2 = h_k \mathbf{f} \left(\mathbf{x}_k + a_{21} \mathbf{K}_1, t_k + h_k c_2\right) \\ \vdots \\[-0.5em] \mathbf{K}_s = h_k \mathbf{f} \left(\mathbf{x}_k + \displaystyle\sum_{j=1}^{s-1} a_{sj} \mathbf{K}_j, t_k + h_k c_s\right) \end{cases} \\ \mathbf{x}_{k+1} = \mathbf{x}_k + h_k \displaystyle\sum_{i=1}^s b_i \mathbf{K}_i \end{array} \text{.} \]
For an implicit dynamical system of the form \(\mathbf{F}(\mathbf{x}, \mathbf{x}^\prime, t) = \mathbf{0}\), the ERK method becomes
\[ \begin{array}{l} \mathbf{F}_i \left(\mathbf{x}_k + \displaystyle\sum_{j=1}^{i-1} a_{ij} \mathbf{K}_j, \displaystyle\frac{\mathbf{K}_i}{h_k}, t_k + h_k c_i\right) = \mathbf{0} \\ \mathbf{x}_{k+1} = \mathbf{x}_k + \displaystyle\sum_{i=1}^s b_i \mathbf{K}_i \end{array} \text{.} \]
Here, the \(s\)-stage system of equations \(\mathbf{F}_i\) forms a lower triangular system, typically nonlinear in the intermediate variables \(\mathbf{K}_i\). The solution is obtained using forward substitution, where each stage is solved sequentially, i.e.,
\[ \begin{array}{l} \begin{cases} \mathbf{F}_1 \left(\mathbf{x}_k, \mathbf{K}_1, t_k + h_k c_1\right) = \mathbf{0} \\ \mathbf{F}_2 \left(\mathbf{x}_k + a_{21} \mathbf{K}_1, \displaystyle\frac{\mathbf{K}_2}{h_k}, t_k + h_k c_2\right) = \mathbf{0} \\ \vdots \\[-0.5em] \mathbf{F}_s \left(\mathbf{x}_k + \displaystyle\sum_{j=1}^{s-1} a_{sj} \mathbf{K}_j, \displaystyle\frac{\mathbf{K}_s}{h_k}, t_k + h_k c_s\right) = \mathbf{0} \end{cases} \\ \mathbf{x}_{k+1} = \mathbf{x}_k + h_k \displaystyle\sum_{i=1}^s b_i \mathbf{K}_i \end{array} \text{.} \]
Notice that the intermediate variables \(\mathbf{K}_i\) are typically computed using Newton's method.
Implicit Runge-Kutta (IRK) methods are more general than ERK methods, as they can handle implicit systems of the form \(\mathbf{F}(\mathbf{x}, \mathbf{x}^\prime, t) = \mathbf{0}\). IRK methods are more stable but computationally expensive because they require solving a nonlinear system at each step. A key characteristic of IRK methods is that the Runge-Kutta coefficient matrix \(\mathbf{A}\) is fully populated, meaning that the stages are coupled and must be solved simultaneously.
For an explicit dynamical system of the form \(\mathbf{x}^\prime = \mathbf{f}(\mathbf{x}, t)\), the IRK method is expressed as
\[ \begin{array}{l} \mathbf{K}_i = h_k \mathbf{f} \left(\mathbf{x}_k + \displaystyle\sum_{j=1}^{s} a_{ij} \mathbf{K}_j, t_k + h_k c_i\right) \\ \mathbf{x}_{k+1} = \mathbf{x}_k + h_k \displaystyle\sum_{i=1}^s b_i \mathbf{K}_i \end{array} \text{,} \]
Unrolling the stages gives
\[ \begin{array}{l} \begin{cases} \mathbf{K}_1 = h_k \mathbf{f} \left(\mathbf{x}_k + \displaystyle\sum_{j=1}^{s} a_{1j} \mathbf{K}_j, t_k + h_k c_1\right) \\ \mathbf{K}_2 = h_k \mathbf{f} \left(\mathbf{x}_k + \displaystyle\sum_{j=1}^{s} a_{2j} \mathbf{K}_j, t_k + h_k c_2\right) \\[-0.5em] \vdots \\[-0.5em] \mathbf{K}_s = h_k \mathbf{f} \left(\mathbf{x}_k + \displaystyle\sum_{j=1}^{s} a_{sj} \mathbf{K}_j, t_k + h_k c_s\right) \end{cases} \\ \mathbf{x}_{k+1} = \mathbf{x}_k + h_k \displaystyle\sum_{i=1}^s b_i \mathbf{K}_i \end{array} \text{.} \]
For an implicit dynamic system of the form \(\mathbf{F}(\mathbf{x}, \mathbf{x}^\prime, t) = \mathbf{0}\), the IRK method is expressed as
\[ \begin{array}{l} \mathbf{F}_i \left(\mathbf{x}_k + \displaystyle\sum_{j=1}^{s} a_{ij} \mathbf{K}_j, \displaystyle\frac{\mathbf{K}_i}{h_k}, t_k + h_k c_i\right) = \mathbf{0} \\ \mathbf{x}_{k+1} = \mathbf{x}_k + h_k \displaystyle\sum_{i=1}^s b_i \mathbf{K}_i \end{array} \text{.} \]
Unrolling the stages, we have
\[ \begin{array}{l} \begin{cases} \mathbf{F}_1 \left(\mathbf{x}_k + \displaystyle\sum_{j=1}^{s} a_{1j} \mathbf{K}_j, \displaystyle\frac{\mathbf{K}_1}{h_k}, t_k + h_k c_1\right) = \mathbf{0} \\ \mathbf{F}_2 \left(\mathbf{x}_k + \displaystyle\sum_{j=1}^{s} a_{2j} \mathbf{K}_j, \displaystyle\frac{\mathbf{K}_2}{h_k}, t_k + h_k c_2\right) = \mathbf{0} \\[-0.5em] \vdots \\[-0.5em] \mathbf{F}_s \left(\mathbf{x}_k + \displaystyle\sum_{j=1}^{s} a_{sj} \mathbf{K}_j, \displaystyle\frac{\mathbf{K}_s}{h_k}, t_k + h_k c_s\right) = \mathbf{0} \end{cases} \\ \mathbf{x}_{k+1} = \mathbf{x}_k + h_k \displaystyle\sum_{i=1}^s b_i \mathbf{K}_i \end{array} \text{.} \]
NOTE: In
Sandals
, the implementation of IRK methods is unified for both explicit and implicit dynamical systems. IRK methods inherently require solving nonlinear systems, irrespective of the system's form. Creating a specialized version for explicit systems would not yield significant computational advantages, as the complexity of solving the nonlinear system remains. Consequently, only the IRK method for implicit dynamical systems is implemented.
Diagonally implicit Runge-Kutta (DIRK) methods are a specialized subset of IRK methods. They are characterized by a lower triangular \(\mathbf{A}\)-matrix with at least one nonzero diagonal entry. This structure allows each stage to be solved sequentially rather than simultaneously, making DIRK methods more computationally efficient compared to general IRK methods.
For an explicit dynamic system of the form \(\mathbf{x}^\prime = \mathbf{f}(\mathbf{x}, t)\), the DIRK method is expressed as
\[ \begin{array}{l} \mathbf{K}_i = h_k \mathbf{f} \left(\mathbf{x}_k + \displaystyle\sum_{j=1}^{i} a_{ij} \mathbf{K}_j, t_k + h_k c_i\right) \\ \mathbf{x}_{k+1} = \mathbf{x}_k + h_k \displaystyle\sum_{i=1}^s b_i \mathbf{K}_i \end{array} \text{.} \]
When the stages are unrolled, the equations become
\[ \begin{array}{l} \begin{cases} \mathbf{K}_1 = h_k \mathbf{f} \left(\mathbf{x}_k + a_{11} \mathbf{K}_1, t_k + h_k c_1\right) \\ \mathbf{K}_2 = h_k \mathbf{f} \left(\mathbf{x}_k + a_{21} \mathbf{K}_1 + h_k a_{22} \mathbf{K}_2, t_k + h_k c_2\right) \\ \vdots \\[-0.5em] \mathbf{K}_s = h_k \mathbf{f} \left(\mathbf{x}_k + \displaystyle\sum_{j=1}^{s} a_{sj} \mathbf{K}_j, t_k + h_k c_s\right) \end{cases} \\ \mathbf{x}_{k+1} = \mathbf{x}_k + h_k \displaystyle\sum_{i=1}^s b_i \mathbf{K}_i \end{array} \text{.} \]
For an implicit dynamic system of the form \(\mathbf{F}(\mathbf{x}, \mathbf{x}^\prime, t) = \mathbf{0}\), the DIRK method is written as
\[ \begin{array}{l} \mathbf{F}_i \left( \mathbf{x}_k + \displaystyle\sum_{j=1}^{i} a_{ij} \mathbf{K}_j, \displaystyle\frac{\mathbf{K}_i}{h_k}, t_k + h_k c_i \right) = \mathbf{0} \\ \mathbf{x}_{k+1} = \mathbf{x}_k + h_k \displaystyle\sum_{i=1}^s b_i \mathbf{K}_i \end{array} \text{.} \]
If the stages are unrolled, we have
\[ \begin{array}{l} \begin{cases} \mathbf{F}_1 \left(\mathbf{x}_k + a_{11} \mathbf{K}_1, \displaystyle\frac{\mathbf{K}_1}{h_k}, t_k + h_k c_1\right) = \mathbf{0} \\ \mathbf{F}_2 \left(\mathbf{x}_k + a_{21} \mathbf{K}_1 + a_{22} \mathbf{K}_2, \displaystyle\frac{\mathbf{K}_3}{h_k}, t_k + h_k c_2\right) = \mathbf{0} \\ \vdots \\[-0.5em] \mathbf{F}_s \left(\mathbf{x}_k + \displaystyle\sum_{j=1}^{s} a_{sj} \mathbf{K}_j, \displaystyle\frac{\mathbf{K}_s}{h_k}, t_k + h_k c_s\right) = \mathbf{0} \end{cases} \\ \mathbf{x}_{k+1} = \mathbf{x}_k + h_k \displaystyle\sum_{i=1}^s b_i \mathbf{K}_i \end{array} \text{.} \]
NOTE: As with IRK methods, the implementation of DIRK methods for explicit and implicit dynamic systems in
Sandals
is unified. Since solving the nonlinear system is inherent to DIRK methods, a specialized version for explicit systems would not significantly reduce computational complexity. Consequently, only the DIRK method for implicit dynamic systems is implemented.
In many applications, the dynamic system's states must satisfy certain constraints, which are typically expressed as invariants. For example, in mechanical systems, the constraints may represent the conservation of energy or momentum. In such cases, the states must be projected onto the manifold defined by the invariants to ensure that the system's dynamics are physically consistent. In Sandals
, the projection is performed using a simple iterative method, where the states are updated until the invariants are satisfied within a specified tolerance. In other words, at each step, the states are adjusted to ensure that the invariants are preserved.
Consider the constrained minimization problem
\[ \underset{\mathbf{x}}{\text{minimize}} \quad \frac{1}{2}\left(\mathbf{x} - \tilde{\mathbf{x}}\right)^2 \quad \text{subject to} \quad \mathbf{h}(\mathbf{x}) = \mathbf{0} \text{,} \]
whose Lagrangian is given by
\[ \mathcal{L}(\mathbf{x}, \boldsymbol{\lambda}) = \frac{1}{2}\left(\mathbf{x} - \tilde{\mathbf{x}}\right)^2 + \boldsymbol{\lambda} \cdot \mathbf{h}(\mathbf{x}) \text{.} \]
The first-order Karush-Kuhn-Tucker conditions for this problem are then
\[ \begin{cases} \mathbf{x} + \mathbf{Jh}_{\mathbf{x}}^\top \boldsymbol{\lambda} = \tilde{\mathbf{x}} \\ \mathbf{h}(\mathbf{x}) = \mathbf{0} \end{cases} \text{.} \]
This system of equations can be solved though the iterative solution of a linear system derived from the Taylor expansion
\[ \begin{cases} \mathbf{x} + \delta\mathbf{x} + \mathbf{Jh}_{\mathbf{x}}^\top(\mathbf{x} + \delta\mathbf{x}, t) \boldsymbol{\lambda} = \tilde{\mathbf{x}} \\ \mathbf{h}(\mathbf{x}) + \mathbf{Jh}_{\mathbf{x}}(\mathbf{x}, t) \delta\mathbf{x} + \mathcal{O}\left(\| \delta\mathbf{x} \|^2\right) = \mathbf{0} \end{cases} \text{,} \]
where \(\mathbf{Jh}_{\mathbf{x}}\) is the Jacobian of the constraint function \(\mathbf{h}(\mathbf{x})\) with respect to the states \(\mathbf{x}\). The linear system can be written in matrix form as
\[ \begin{bmatrix} \mathbf{I} & \mathbf{Jh}_{\mathbf{x}}^\top \\ \mathbf{Jh}_{\mathbf{x}} & \mathbf{0} \end{bmatrix} \begin{bmatrix} \delta\mathbf{x} \\ \boldsymbol{\lambda} \end{bmatrix} = \begin{bmatrix} \tilde{\mathbf{x}} - \mathbf{x} \\ -\mathbf{h}(\mathbf{x}) \end{bmatrix} \text{,} \]
and the update step for the states is then \(\mathbf{x} = \tilde{\mathbf{x}} + \delta\mathbf{x}\).
S | The number of stages of the Runge-Kutta method. |
N | The dimension of the ODE/DAE system. |
M | The dimension of the invariants manifold. |
|
private |
Templetized matrix type.
|
private |
Templetized matrix type.
|
private |
Templetized matrix type.
|
private |
Templetized matrix type.
|
private |
Templetized matrix type.
|
private |
Templetized matrix type.
|
private |
Templetized Newton solver for IRK methods.
|
private |
Templetized Newton solver for ERK and DIRK methods.
using Sandals::RungeKutta< S, N, M >::System = typename Implicit<N, M>::Pointer |
Shared pointer to an implicit ODE/DAE system.
using Sandals::RungeKutta< S, N, M >::Time = Eigen::Vector<Real, Eigen::Dynamic> |
Templetized vector type for the independent variable (or time).
using Sandals::RungeKutta< S, N, M >::Type = typename Tableau<S>::Type |
Runge-Kutta type enumeration.
|
private |
Templetized vector type.
|
private |
Templetized vector type.
|
private |
Templetized vector type.
|
private |
Templetized vector type.
|
private |
Templetized vector type.
|
delete |
Copy constructor for the timer.
|
inline |
Class constructor for the Runge-Kutta method.
[in] | t_tableau | The Tableau reference. |
|
inline |
Class constructor for the Runge-Kutta method.
[in] | t_tableau | The Tableau reference. |
[in] | t_system | The ODE/DAE system shared pointer. |
|
inline |
Get the Butcher tableau matrix \( \mathbf{A} \).
|
inline |
Get the adaptive step absolute tolerance \( \epsilon_{\text{abs}} \).
|
inline |
Get the adaptive step absolute tolerance reference \( \epsilon_{\text{abs}} \).
[in] | t_absolute_tolerance | The adaptive step absolute tolerance \( \epsilon_{\text{abs}} \). |
|
inline |
Set the adaptive step mode.
[in] | t_adaptive | The adaptive step mode. |
|
inline |
Get the adaptive step mode.
|
inline |
Solve the system and calculate the approximate solution over the suggested independent variable mesh \( \mathbf{t} = \left[ t_1, t_2, \ldots, t_n \right]^\top \), the step size is automatically computed based on the error control method.
[in] | t_mesh | Independent variable (or time) mesh \( \mathbf{t} \). |
[in] | ics | Initial conditions \( \mathbf{x}(t = 0) \). |
[out] | sol | The solution of the system over the mesh of independent variable. |
|
inline |
Advance using a generic integration method for a system of the form \( \mathbf{F}(\mathbf{x}, \mathbf{x}^\prime, t) = \mathbf{0} \). The step is automatically selected based on the system properties and the integration method properties. In the advvancing step, the system solution is also projected on the manifold \( \mathbf{h}(\mathbf{x}, t) \). Substepping may be also used to if the integration step fails with the current step size.
[in] | x_old | States \( \mathbf{x}_k \) at the \( k \)-th step. |
[in] | t_old | Independent variable (or time) \( t_k \) at the \( k \)-th step. |
[in] | h_old | Advancing step \( h_k \) at the \( k \)-th step. |
[out] | x_new | Computed states \( \mathbf{x}_{k+1} \) at the \( (k+1) \)-th step. |
[out] | h_new | The suggested step \( h_{k+1}^\star \) for the next advancing step. |
|
inline |
Get the Butcher tableau weights vector \( \mathbf{b} \).
|
inline |
Get the Butcher tableau embedded weights vector \( \hat{\mathbf{b}} \).
|
inline |
Get the Butcher tableau nodes vector \( \mathbf{c} \).
|
inline |
Compute the residual of system to be solved, which is given by the values of the system
\[\begin{array}{l} \mathbf{F}_n \left(\mathbf{x} + \displaystyle\sum_{j=1}^n a_{nj}\tilde{\mathbf{K}}_j, \displaystyle\frac{\tilde{\mathbf{K}}_n}{h}, t + h c_n\right) \end{array} \text{.} \]
where \( \tilde{\mathbf{K}} = h \mathbf{K} \).
[in] | n | Equation index \( n \). |
[in] | x | States \( \mathbf{x} \). |
[in] | t | Independent variable (or time) \( t \). |
[in] | h | Advancing step \( h \). |
[in] | K | Variables \( \tilde{\mathbf{K}} = h \mathbf{K} \) of the system to be solved. |
[out] | fun | The residual of system to be solved. |
|
inline |
Compute the Jacobian of the system of equations
\[\begin{array}{l} \mathbf{F}_n \left(\mathbf{x} + \displaystyle\sum_{j=1}^n a_{nj}\tilde{\mathbf{K}}_j, \displaystyle\frac{\tilde{\mathbf{K}}_n}{h}, t + h c_n\right) \end{array} \text{.} \]
with respect to the \( \tilde{\mathbf{K}}_n = h \mathbf{K}_n \) variables. The Jacobian matrix of the \( n \)-th equation with respect to the \( \tilde{\mathbf{K}}_n \) variables is given by
\[ \frac{\partial\mathbf{F}_n}{\partial\tilde{\mathbf{K}}_n}\left(\mathbf{x} + \displaystyle \sum_{j=1}^n a_{nj}\tilde{\mathbf{K}}_j, \displaystyle\frac{\tilde{\mathbf{K}}_n}{h}, t + h c_n \right) = a_{nj} \mathbf{JF}_x + \displaystyle\frac{1}{h}\begin{cases} \mathbf{JF}_{x^{\prime}} & n = j \\ 0 & n \neq j \end{cases} \text{,} \]
for \( j = 1, 2, \ldots, s \).
[in] | n | Equation index \( n \). |
[in] | x | States \( \mathbf{x} \). |
[in] | t | Independent variable (or time) \( t \). |
[in] | h | Advancing step \( h \). |
[in] | K | Variables \( h \mathbf{K} \) of the system to be solved. |
[out] | jac | The Jacobian of system to be solved. |
|
inline |
Compute the new states \( \mathbf{x}_{k+1} \) at the next advancing step \( t_{k+1} = t_k + h_k \) using an diagonally implicit Runge-Kutta method, given an implicit system of the form \( \mathbf{F}(\mathbf{x}, \mathbf{x}^\prime, t) = \mathbf{0} \). If the method is embedded, the step for the next advancing step \( h_{k+1}^\star \) is also computed according to the error control method.
[in] | x_old | States \( \mathbf{x}_k \) at the \( k \)-th step. |
[in] | t_old | Independent variable (or time) \( t_k \) at the \( k \)-th step. |
[in] | h_old | Advancing step \( h_k \) at the \( k \)-th step. |
[out] | x_new | Computed states \( \mathbf{x}_{k+1} \) at the \( (k+1) \)-th step. |
[out] | h_new | The suggested step \( h_{k+1}^\star \) for the next advancing step. |
|
inline |
Disable the adaptive step mode.
|
inline |
Disable the projection mode.
|
inline |
Disable the time reverse mode.
|
inline |
Disable the verbose mode.
|
inline |
Enable the adaptive step mode.
|
inline |
Enable the projection mode.
|
inline |
Enable the time reverse mode.
|
inline |
Enable the verbose mode.
|
inline |
Compute the new states \( \mathbf{x}_{k+1} \) at the next advancing step \( t_{k+1} = t_k + h_k \) using an explicit Runge-Kutta method, given an explicit system of the form \(\mathbf{x}^\prime = \mathbf{f}(\mathbf{x}, t) \). If the method is embedded, the step for the next advancing step \( h_{k+1}^\star \) is also computed according to the error control method.
[in] | x_old | States \( \mathbf{x}_k \) at the \( k \)-th step. |
[in] | t_old | Independent variable (or time) \( t_k \) at the \( k \)-th step. |
[in] | h_old | Advancing step \( h_k \) at the \( k \)-th step. |
[out] | x_new | Computed states \( \mathbf{x}_{k+1} \) at the \( (k+1) \)-th step. |
[out] | h_new | The suggested step \( h_{k+1}^\star \) for the next advancing step. |
|
inline |
Compute the residual of system to be solved, which is given by the values of the system
\[\begin{array}{l} \mathbf{F}_n \left(\mathbf{x} + \displaystyle\sum_{j=1}^{n-1} a_{nj}\tilde{\mathbf{K}}_j, \displaystyle\frac{\tilde{\mathbf{K}}_n}{h}, t + h c_n\right) \end{array} \text{.} \]
where \( \tilde{\mathbf{K}} = h \mathbf{K} \).
[in] | s | Stage index \( s \). |
[in] | x | States \( \mathbf{x} \). |
[in] | t | Independent variable (or time) \( t \). |
[in] | h | Advancing step \( h \). |
[in] | K | Variables \( \tilde{\mathbf{K}} = h \mathbf{K} \) of the system to be solved. |
[out] | fun | The residual of system to be solved. |
|
inline |
Compute the Jacobian of the system of equations
\[\begin{array}{l} \mathbf{F}_n \left(\mathbf{x} + \displaystyle\sum_{j=1}^{n-1} a_{nj}\tilde{\mathbf{K}}_j, \displaystyle\frac{\tilde{\mathbf{K}}_n}{h}, t + h c_n\right) \end{array} \text{.} \]
with respect to the \( \tilde{\mathbf{K}}_n = h \mathbf{K}_n \) variables. The Jacobian matrix of the \( n \)-th equation with respect to the \( \tilde{\mathbf{K}}_n \) variables is given by
\[ \frac{\partial\mathbf{F}_n}{\partial\tilde{\mathbf{K}}_n}\left(\mathbf{x} + \displaystyle \sum_{j=1}^{n-1} a_{nj}\tilde{\mathbf{K}}_j, \displaystyle\frac{\tilde{\mathbf{K}}_n}{h}, t + h c_n \right) = a_{nj} \mathbf{JF}_x + \displaystyle\frac{1}{h}\begin{cases} \mathbf{JF}_{x^{\prime}} & n = j \\ 0 & n \neq j \end{cases} \text{,} \]
for \( j = 1, 2, \ldots, s \).
[in] | s | Stage index \( s \). |
[in] | x | States \( \mathbf{x} \). |
[in] | t | Independent variable (or time) \( t \). |
[in] | h | Advancing step \( h \). |
[in] | K | Variables \( h \mathbf{K} \) of the system to be solved. |
[out] | jac | The Jacobian of system to be solved. |
|
inline |
Compute the new states \( \mathbf{x}_{k+1} \) at the next advancing step \( t_{k+1} = t_k + h_k \) using an explicit Runge-Kutta method, given an explicit system of the form \(\mathbf{x}^\prime = \mathbf{f}(\mathbf{x}, t) \). If the method is embedded, the step for the next advancing step \( h_{k+1}^\star \) is also computed according to the error control method.
[in] | x_old | States \( \mathbf{x}_k \) at the \( k \)-th step. |
[in] | t_old | Independent variable (or time) \( t_k \) at the \( k \)-th step. |
[in] | h_old | Advancing step \( h_k \) at the \( k \)-th step. |
[out] | x_new | Computed states \( \mathbf{x}_{k+1} \) at the \( (k+1) \)-th step. |
[out] | h_new | The suggested step \( h_{k+1}^\star \) for the next advancing step. |
|
inline |
Estimate the order of the Runge-Kutta method.
[in] | t_mesh | The vector of time meshes with same initial and final time with fixed step. |
[in] | ics | Initial conditions \( \mathbf{x}(t = 0) \). |
[in] | sol | The analytical solution function. |
|
inline |
Estimate the optimal step size for the next advancing step according to the error control method. The error control method used is based on the local truncation error, which is computed as
\[\varepsilon_{\text{t}} = \max\left(\left| \mathbf{x} - \hat{\mathbf{x}} \right|\right) \]
where \( \mathbf{x} \) is the approximation of the states computed with the higher order method, and \( \hat{\mathbf{x}} \) is the approximation of the states computed with the lower order method. The desired error is computed as
\[\varepsilon_{\text{d}} = \epsilon_{\text{abs}} + \epsilon_{\text{rel}} \max\left(\left| \mathbf{x} \right|, \left| \hat{\mathbf{x}} \right|\right) \]
where \( \epsilon_{\text{abs}} \) is the absolute tolerance and \( \epsilon_{\text{rel}} \) is the relative tolerance. The optimal step size is then estimated as
\[h_{\text{opt}} = h_k \min\left( f_{\max}, \max\left( f_{\min}, f \left( \displaystyle\frac{\varepsilon_{\text{d}}}{\varepsilon_{\text{t}}} \right)^{\frac{1}{q+1}} \right)\right) \]
where \( f \) is the safety factor, \( f_{\max} \) is the maximum safety factor, \( f_{\min} \) is the minimum safety factor, and \( q \) is the order of the embedded method.
[in] | x | States approximation \( \mathbf{x}_{k+1} \). |
[in] | x_e | Embedded method's states approximation \( \hat{\mathbf{x}}_{k+1} \). |
[in] | h_k | Actual advancing step \( h_k \). |
|
inline |
Check if the ODE/DAE system pointer is set.
|
inline |
Print the Runge-Kutta method information to the output stream.
|
inline |
Print the Runge-Kutta method information on a stream.
[in,out] | os | Output stream. |
|
inline |
Compute the residual of system to be solved, which is given by the values of the system
\[\begin{cases} \mathbf{F}_1 \left(\mathbf{x} + \displaystyle\sum_{j=1}^{s} a_{1j}\tilde{\mathbf{K}}_j, \displaystyle\frac{\tilde{\mathbf{K}}_1}{h}, t + h c_1\right) \\ \mathbf{F}_2 \left(\mathbf{x} + \displaystyle\sum_{j=1}^{s} a_{2j}\tilde{\mathbf{K}}_j, \displaystyle\frac{\tilde{\mathbf{K}}_2}{h}, t + h c_2\right) \\[-0.5em] \vdots \\[-0.5em] \mathbf{F}_s \left(\mathbf{x} + \displaystyle\sum_{j=1}^{s} a_{sj}\tilde{\mathbf{K}}_j, \displaystyle\frac{\tilde{\mathbf{K}}_s}{h}, t + h c_s\right) \end{cases} \]
where \( \tilde{\mathbf{K}} = h \mathbf{K} \).
[in] | x | States \( \mathbf{x} \). |
[in] | t | Independent variable (or time) \( t \). |
[in] | h | Advancing step \( h \). |
[in] | K | Variables \( \tilde{\mathbf{K}} = h \mathbf{K} \) of the system to be solved. |
[out] | fun | The residual of system to be solved. |
|
inline |
Compute the Jacobian of the system of equations
\[\begin{cases} \mathbf{F}_1 \left(\mathbf{x} + \displaystyle\sum_{j=1}^{s} a_{1j}\tilde{\mathbf{K}}_j, \displaystyle\frac{\tilde{\mathbf{K}}_1}{h}, t + h c_1\right) \\ \mathbf{F}_2 \left(\mathbf{x} + \displaystyle\sum_{j=1}^{s} a_{2j}\tilde{\mathbf{K}}_j, \displaystyle\frac{\tilde{\mathbf{K}}_2}{h}, t + h c_2\right) \\[-0.5em] \vdots \\[-0.5em] \mathbf{F}_s \left(\mathbf{x} + \displaystyle\sum_{j=1}^{s} a_{sj}\tilde{\mathbf{K}}_j, \displaystyle\frac{\tilde{\mathbf{K}}_s}{h}, t + h c_s\right) \end{cases} \text{.} \]
with respect to the \( \tilde{\mathbf{K}} = h \mathbf{K} \) variables. The \( ij \)-th Jacobian matrix entry is given by
\[ \frac{\partial\mathbf{F}_i}{\partial\tilde{\mathbf{K}}_j}\left(\mathbf{x} + \displaystyle \sum_{j=1}^s a_{ij}\tilde{\mathbf{K}}_j, \displaystyle\frac{\tilde{\mathbf{K}}_i}{h}, t + h c_i \right) = a_{ij} \mathbf{JF}_x + \displaystyle\frac{1}{h}\begin{cases} \mathbf{JF}_{x^{\prime}} & i = j \\ 0 & i \neq j \end{cases} \text{,} \]
for \( i = 1, 2, \ldots, s \) and \( j = 1, 2, \ldots, s \).
[in] | x | States \( \mathbf{x} \). |
[in] | t | Independent variable (or time) \( t \). |
[in] | h | Advancing step \( h \). |
[in] | K | Variables \( h \mathbf{K} \) of the system to be solved. |
[out] | jac | The Jacobian of system to be solved. |
|
inline |
Compute the new states \( \mathbf{x}_{k+1} \) at the next advancing step \( t_{k+1} = t_k + h_k \) using an implicit Runge-Kutta method, given an implicit system of the form \(\mathbf{F}(\mathbf{x}, \mathbf{x}^\prime, t) = \mathbf{0} \). If the method is embedded, the step for the next advancing step \( h_{k+1}^\star \) is also computed according to the error control method.
[in] | x_old | States \( \mathbf{x}_k \) at the \( k \)-th step. |
[in] | t_old | Independent variable (or time) \( t_k \) at the \( k \)-th step. |
[in] | h_old | Advancing step \( h_k \) at the \( k \)-th step. |
[out] | x_new | Computed states \( \mathbf{x}_{k+1} \) at the \( (k+1) \)-th step. |
[out] | h_new | The suggested step \( h_{k+1}^\star \) for the next advancing step. |
|
inline |
Check if the method is a diagonally implicit Runge-Kutta (DIRK) method.
|
inline |
Check if the Runge-Kutta method is embedded.
|
inline |
Check if the method is an explicit Runge-Kutta (ERK) method.
|
inline |
Check if the method is an implicit Runge-Kutta (IRK) method.
|
inline |
Get the maximum number of projection iterations.
|
inline |
Set the maximum number of projection iterations.
[in] | t_max_projection_iterations | The maximum number of projection iterations. |
|
inline |
Get the maximum safety factor for adaptive step \( f_{\max} \).
|
inline |
Set the maximum safety factor for adaptive step \( f_{\max} \).
[in] | t_max_safety_factor | The maximum safety factor for adaptive step \( f_{\max} \). |
|
inline |
Get the maximum number of substeps.
|
inline |
Set the maximum number of substeps.
[in] | t_max_substeps | The maximum number of substeps. |
|
inline |
Get the minimum safety factor for adaptive step \( f_{\min} \).
|
inline |
Set the minimum safety factor for adaptive step \( f_{\min} \).
[in] | t_min_safety_factor | The minimum safety factor for adaptive step \( f_{\min} \). |
|
inline |
Get the minimum step for advancing \( h_{\min} \).
|
inline |
Set the minimum step for advancing \( h_{\min} \).
[in] | t_min_step | The minimum step for advancing \( h_{\min} \). |
|
inline |
Get the name of the Runge-Kutta method.
|
delete |
Assignment operator for the timer.
|
inline |
Get the order \( p \) of the Runge-Kutta method.
|
inline |
Project the system solution \( \mathbf{x} \) on the invariants \( \mathbf{h} (\mathbf{x}, t) = \mathbf{0} \).
[in] | x | The initial guess for the states \(\widetilde{\mathbf{x}} \). |
[in] | t | The independent variable (or time) \( t \) at which the states are evaluated. |
[out] | x_projected | The projected states \( \mathbf{x} \) closest to the invariants manifold \( \mathbf{h} (\mathbf{x}, t) = \mathbf{0} \). |
|
inline |
Project the system solution \( \mathbf{x} \) on the invariants \( \mathbf{h} (\mathbf{x}, t) = \mathbf{0} \).
[in] | x | The initial guess for the states \(\widetilde{\mathbf{x}} \). |
[in] | t | The independent variable (or time) \( t \) at which the states are evaluated. |
[in] | projected_equations | The indices of the states to be projected. |
[in] | projected_invariants | The indices of the invariants to be projected. |
[out] | x_projected | The projected states \( \mathbf{x} \) closest to the invariants manifold \( \mathbf{h} (\mathbf{x}, t) = \mathbf{0} \). |
|
inline |
Get projection mode.
|
inline |
Set the projection mode.
[in] | t_projection | The projection mode. |
|
inline |
Get the projection tolerance.
|
inline |
Set the projection tolerance.
[in] | t_projection_tolerance | The projection tolerance. |
|
inline |
Get the adaptive step relative tolerance \( \epsilon_{\text{rel}} \).
|
inline |
Get the adaptive step relative tolerance reference \( \epsilon_{\text{rel}} \).
[in] | t_relative_tolerance | The adaptive step relative tolerance \( \epsilon_{\text{rel}} \). |
|
inline |
Set the time reverse mode.
[in] | t_reverse | The time reverse mode. |
|
inline |
Get the time reverse mode.
|
inline |
Get the safety factor for adaptive step \( f \).
|
inline |
Set safety factor for adaptive step \( f \).
[in] | t_safety_factor | The safety factor for adaptive step \( f \). |
|
inline |
Solve the system and calculate the approximate solution over the independent variable (or time) mesh \( \mathbf{t} = \left[ t_1, t_2, \ldots, t_n \right]^\top \). The step size is fixed and given by \( h = t_{k+1} - t_k \).
[in] | t_mesh | Independent variable (or time) mesh \( \mathbf{t} \). |
[in] | ics | Initial conditions \( \mathbf{x}(t = 0) \). |
[out] | sol | The solution of the system over the mesh of independent variable. |
|
inline |
Get the stages \( s \) number of the Runge-Kutta method.
|
inline |
Compute a step using a generic integration method for a system of the form \( \mathbf{F}( \mathbf{x}, \mathbf{x}^\prime, t) = \mathbf{0} \). The step is automatically selected based on the system properties and the integration method properties.
[in] | x_old | States \( \mathbf{x}_k \) at the \( k \)-th step. |
[in] | t_old | Independent variable (or time) \( t_k \) at the \( k \)-th step. |
[in] | h_old | Advancing step \( h_k \) at the \( k \)-th step. |
[out] | x_new | Computed states \( \mathbf{x}_{k+1} \) at the \( (k+1) \)-th step. |
[out] | h_new | The suggested step \( h_{k+1}^\star \) for the next advancing step. |
|
inline |
Get the ODE/DAE system pointer.
|
inline |
Set the ODE/DAE system pointer.
[in] | t_system | The ODE/DAE system pointer. |
|
inline |
|
inline |
|
inline |
Get the enumeration type of the Runge-Kutta method.
|
inline |
Get the verbose mode.
|
inline |
Set the verbose mode.
[in] | t_verbose | The verbose mode. |
|
private |
Absolute tolerance for adaptive step \( \epsilon_{\text{abs}} \).
|
private |
Aadaptive step mode boolean.
|
private |
LU decomposition for the projection matrix.
|
private |
Maximum number of projection steps.
|
private |
Maximum safety factor for adaptive step \( f_{\min} \).
|
private |
Maximum number of substeps.
|
private |
Minimum safety factor for adaptive step \( f_{\max} \).
|
private |
Minimum step for advancing \( h_{\min} \).
|
private |
Newton solver for IRK methods.
|
private |
Newton solver for ERK and DIRK methods.
|
private |
Aadaptive step mode boolean.
|
private |
Projection tolerance \( \epsilon_{\text{proj}} \).
|
private |
Relative tolerance for adaptive step \( \epsilon_{\text{rel}} \).
|
private |
Time reverse mode boolean.
|
private |
Safety factor for adaptive step \( f \).
|
private |
ODE/DAE system object pointer.
|
private |
Butcher tableau of the Runge-Kutta method.
|
private |
Verbose mode boolean.